aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 21:55:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 21:55:10 -0400
commit5375871d432ae9fc581014ac117b96aaee3cd0c7 (patch)
treebe98e8255b0f927fb920fb532a598b93fa140dbe /drivers
parentb57cb7231b2ce52d3dda14a7b417ae125fb2eb97 (diff)
parentdfbc2d75c1bd47c3186fa91f1655ea2f3825b0ec (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc merge from Benjamin Herrenschmidt: "Here's the powerpc batch for this merge window. It is going to be a bit more nasty than usual as in touching things outside of arch/powerpc mostly due to the big iSeriesectomy :-) We finally got rid of the bugger (legacy iSeries support) which was a PITA to maintain and that nobody really used anymore. Here are some of the highlights: - Legacy iSeries is gone. Thanks Stephen ! There's still some bits and pieces remaining if you do a grep -ir series arch/powerpc but they are harmless and will be removed in the next few weeks hopefully. - The 'fadump' functionality (Firmware Assisted Dump) replaces the previous (equivalent) "pHyp assisted dump"... it's a rewrite of a mechanism to get the hypervisor to do crash dumps on pSeries, the new implementation hopefully being much more reliable. Thanks Mahesh Salgaonkar. - The "EEH" code (pSeries PCI error handling & recovery) got a big spring cleaning, motivated by the need to be able to implement a new backend for it on top of some new different type of firwmare. The work isn't complete yet, but a good chunk of the cleanups is there. Note that this adds a field to struct device_node which is not very nice and which Grant objects to. I will have a patch soon that moves that to a powerpc private data structure (hopefully before rc1) and we'll improve things further later on (hopefully getting rid of the need for that pointer completely). Thanks Gavin Shan. - I dug into our exception & interrupt handling code to improve the way we do lazy interrupt handling (and make it work properly with "edge" triggered interrupt sources), and while at it found & fixed a wagon of issues in those areas, including adding support for page fault retry & fatal signals on page faults. - Your usual random batch of small fixes & updates, including a bunch of new embedded boards, both Freescale and APM based ones, etc..." I fixed up some conflicts with the generalized irq-domain changes from Grant Likely, hopefully correctly. * 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (141 commits) powerpc/ps3: Do not adjust the wrapper load address powerpc: Remove the rest of the legacy iSeries include files powerpc: Remove the remaining CONFIG_PPC_ISERIES pieces init: Remove CONFIG_PPC_ISERIES powerpc: Remove FW_FEATURE ISERIES from arch code tty/hvc_vio: FW_FEATURE_ISERIES is no longer selectable powerpc/spufs: Fix double unlocks powerpc/5200: convert mpc5200 to use of_platform_populate() powerpc/mpc5200: add options to mpc5200_defconfig powerpc/mpc52xx: add a4m072 board support powerpc/mpc5200: update mpc5200_defconfig to fit for charon board Documentation/powerpc/mpc52xx.txt: Checkpatch cleanup powerpc/44x: Add additional device support for APM821xx SoC and Bluestone board powerpc/44x: Add support PCI-E for APM821xx SoC and Bluestone board MAINTAINERS: Update PowerPC 4xx tree powerpc/44x: The bug fixed support for APM821xx SoC and Bluestone board powerpc: document the FSL MPIC message register binding powerpc: add support for MPIC message register API powerpc/fsl: Added aliased MSIIR register address to MSI node in dts powerpc/85xx: mpc8548cds - add 36-bit dts ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/driver.c30
-rw-r--r--drivers/block/viodasd.c809
-rw-r--r--drivers/cdrom/viocd.c739
-rw-r--r--drivers/char/viotape.c1041
-rw-r--r--drivers/gpio/Kconfig11
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/gpio-ge.c199
-rw-r--r--drivers/misc/carma/carma-fpga.c114
-rw-r--r--drivers/mtd/nand/Kconfig10
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c1072
-rw-r--r--drivers/scsi/Kconfig3
-rw-r--r--drivers/scsi/ibmvscsi/Makefile1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c12
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h1
-rw-r--r--drivers/scsi/ibmvscsi/iseries_vscsi.c173
-rw-r--r--drivers/tty/hvc/Kconfig14
-rw-r--r--drivers/tty/hvc/Makefile1
-rw-r--r--drivers/tty/hvc/hvc_iseries.c599
-rw-r--r--drivers/tty/hvc/hvc_udbg.c8
-rw-r--r--drivers/tty/hvc/hvc_vio.c4
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/watchdog/Kconfig2
23 files changed, 1382 insertions, 3465 deletions
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index 60e4f77ca66..3ec3896c83a 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -123,36 +123,6 @@ void driver_remove_file(struct device_driver *drv,
123} 123}
124EXPORT_SYMBOL_GPL(driver_remove_file); 124EXPORT_SYMBOL_GPL(driver_remove_file);
125 125
126/**
127 * driver_add_kobj - add a kobject below the specified driver
128 * @drv: requesting device driver
129 * @kobj: kobject to add below this driver
130 * @fmt: format string that names the kobject
131 *
132 * You really don't want to do this, this is only here due to one looney
133 * iseries driver, go poke those developers if you are annoyed about
134 * this...
135 */
136int driver_add_kobj(struct device_driver *drv, struct kobject *kobj,
137 const char *fmt, ...)
138{
139 va_list args;
140 char *name;
141 int ret;
142
143 va_start(args, fmt);
144 name = kvasprintf(GFP_KERNEL, fmt, args);
145 va_end(args);
146
147 if (!name)
148 return -ENOMEM;
149
150 ret = kobject_add(kobj, &drv->p->kobj, "%s", name);
151 kfree(name);
152 return ret;
153}
154EXPORT_SYMBOL_GPL(driver_add_kobj);
155
156static int driver_add_groups(struct device_driver *drv, 126static int driver_add_groups(struct device_driver *drv,
157 const struct attribute_group **groups) 127 const struct attribute_group **groups)
158{ 128{
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
deleted file mode 100644
index 9a5b2a2d616..00000000000
--- a/drivers/block/viodasd.c
+++ /dev/null
@@ -1,809 +0,0 @@
1/* -*- linux-c -*-
2 * viodasd.c
3 * Authors: Dave Boutcher <boutcher@us.ibm.com>
4 * Ryan Arnold <ryanarn@us.ibm.com>
5 * Colin Devilbiss <devilbis@us.ibm.com>
6 * Stephen Rothwell
7 *
8 * (C) Copyright 2000-2004 IBM Corporation
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of the
13 * License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * This routine provides access to disk space (termed "DASD" in historical
25 * IBM terms) owned and managed by an OS/400 partition running on the
26 * same box as this Linux partition.
27 *
28 * All disk operations are performed by sending messages back and forth to
29 * the OS/400 partition.
30 */
31
32#define pr_fmt(fmt) "viod: " fmt
33
34#include <linux/major.h>
35#include <linux/fs.h>
36#include <linux/module.h>
37#include <linux/kernel.h>
38#include <linux/blkdev.h>
39#include <linux/genhd.h>
40#include <linux/hdreg.h>
41#include <linux/errno.h>
42#include <linux/init.h>
43#include <linux/string.h>
44#include <linux/mutex.h>
45#include <linux/dma-mapping.h>
46#include <linux/completion.h>
47#include <linux/device.h>
48#include <linux/scatterlist.h>
49
50#include <asm/uaccess.h>
51#include <asm/vio.h>
52#include <asm/iseries/hv_types.h>
53#include <asm/iseries/hv_lp_event.h>
54#include <asm/iseries/hv_lp_config.h>
55#include <asm/iseries/vio.h>
56#include <asm/firmware.h>
57
58MODULE_DESCRIPTION("iSeries Virtual DASD");
59MODULE_AUTHOR("Dave Boutcher");
60MODULE_LICENSE("GPL");
61
62/*
63 * We only support 7 partitions per physical disk....so with minor
64 * numbers 0-255 we get a maximum of 32 disks.
65 */
66#define VIOD_GENHD_NAME "iseries/vd"
67
68#define VIOD_VERS "1.64"
69
70enum {
71 PARTITION_SHIFT = 3,
72 MAX_DISKNO = HVMAXARCHITECTEDVIRTUALDISKS,
73 MAX_DISK_NAME = FIELD_SIZEOF(struct gendisk, disk_name)
74};
75
76static DEFINE_MUTEX(viodasd_mutex);
77static DEFINE_SPINLOCK(viodasd_spinlock);
78
79#define VIOMAXREQ 16
80
81#define DEVICE_NO(cell) ((struct viodasd_device *)(cell) - &viodasd_devices[0])
82
83struct viodasd_waitevent {
84 struct completion com;
85 int rc;
86 u16 sub_result;
87 int max_disk; /* open */
88};
89
90static const struct vio_error_entry viodasd_err_table[] = {
91 { 0x0201, EINVAL, "Invalid Range" },
92 { 0x0202, EINVAL, "Invalid Token" },
93 { 0x0203, EIO, "DMA Error" },
94 { 0x0204, EIO, "Use Error" },
95 { 0x0205, EIO, "Release Error" },
96 { 0x0206, EINVAL, "Invalid Disk" },
97 { 0x0207, EBUSY, "Can't Lock" },
98 { 0x0208, EIO, "Already Locked" },
99 { 0x0209, EIO, "Already Unlocked" },
100 { 0x020A, EIO, "Invalid Arg" },
101 { 0x020B, EIO, "Bad IFS File" },
102 { 0x020C, EROFS, "Read Only Device" },
103 { 0x02FF, EIO, "Internal Error" },
104 { 0x0000, 0, NULL },
105};
106
107/*
108 * Figure out the biggest I/O request (in sectors) we can accept
109 */
110#define VIODASD_MAXSECTORS (4096 / 512 * VIOMAXBLOCKDMA)
111
112/*
113 * Number of disk I/O requests we've sent to OS/400
114 */
115static int num_req_outstanding;
116
117/*
118 * This is our internal structure for keeping track of disk devices
119 */
120struct viodasd_device {
121 u16 cylinders;
122 u16 tracks;
123 u16 sectors;
124 u16 bytes_per_sector;
125 u64 size;
126 int read_only;
127 spinlock_t q_lock;
128 struct gendisk *disk;
129 struct device *dev;
130} viodasd_devices[MAX_DISKNO];
131
132/*
133 * External open entry point.
134 */
135static int viodasd_open(struct block_device *bdev, fmode_t mode)
136{
137 struct viodasd_device *d = bdev->bd_disk->private_data;
138 HvLpEvent_Rc hvrc;
139 struct viodasd_waitevent we;
140 u16 flags = 0;
141
142 if (d->read_only) {
143 if (mode & FMODE_WRITE)
144 return -EROFS;
145 flags = vioblockflags_ro;
146 }
147
148 init_completion(&we.com);
149
150 /* Send the open event to OS/400 */
151 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
152 HvLpEvent_Type_VirtualIo,
153 viomajorsubtype_blockio | vioblockopen,
154 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
155 viopath_sourceinst(viopath_hostLp),
156 viopath_targetinst(viopath_hostLp),
157 (u64)(unsigned long)&we, VIOVERSION << 16,
158 ((u64)DEVICE_NO(d) << 48) | ((u64)flags << 32),
159 0, 0, 0);
160 if (hvrc != 0) {
161 pr_warning("HV open failed %d\n", (int)hvrc);
162 return -EIO;
163 }
164
165 wait_for_completion(&we.com);
166
167 /* Check the return code */
168 if (we.rc != 0) {
169 const struct vio_error_entry *err =
170 vio_lookup_rc(viodasd_err_table, we.sub_result);
171
172 pr_warning("bad rc opening disk: %d:0x%04x (%s)\n",
173 (int)we.rc, we.sub_result, err->msg);
174 return -EIO;
175 }
176
177 return 0;
178}
179
180static int viodasd_unlocked_open(struct block_device *bdev, fmode_t mode)
181{
182 int ret;
183
184 mutex_lock(&viodasd_mutex);
185 ret = viodasd_open(bdev, mode);
186 mutex_unlock(&viodasd_mutex);
187
188 return ret;
189}
190
191
192/*
193 * External release entry point.
194 */
195static int viodasd_release(struct gendisk *disk, fmode_t mode)
196{
197 struct viodasd_device *d = disk->private_data;
198 HvLpEvent_Rc hvrc;
199
200 mutex_lock(&viodasd_mutex);
201 /* Send the event to OS/400. We DON'T expect a response */
202 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
203 HvLpEvent_Type_VirtualIo,
204 viomajorsubtype_blockio | vioblockclose,
205 HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck,
206 viopath_sourceinst(viopath_hostLp),
207 viopath_targetinst(viopath_hostLp),
208 0, VIOVERSION << 16,
209 ((u64)DEVICE_NO(d) << 48) /* | ((u64)flags << 32) */,
210 0, 0, 0);
211 if (hvrc != 0)
212 pr_warning("HV close call failed %d\n", (int)hvrc);
213
214 mutex_unlock(&viodasd_mutex);
215
216 return 0;
217}
218
219
220/* External ioctl entry point.
221 */
222static int viodasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
223{
224 struct gendisk *disk = bdev->bd_disk;
225 struct viodasd_device *d = disk->private_data;
226
227 geo->sectors = d->sectors ? d->sectors : 32;
228 geo->heads = d->tracks ? d->tracks : 64;
229 geo->cylinders = d->cylinders ? d->cylinders :
230 get_capacity(disk) / (geo->sectors * geo->heads);
231
232 return 0;
233}
234
235/*
236 * Our file operations table
237 */
238static const struct block_device_operations viodasd_fops = {
239 .owner = THIS_MODULE,
240 .open = viodasd_unlocked_open,
241 .release = viodasd_release,
242 .getgeo = viodasd_getgeo,
243};
244
245/*
246 * End a request
247 */
248static void viodasd_end_request(struct request *req, int error,
249 int num_sectors)
250{
251 __blk_end_request(req, error, num_sectors << 9);
252}
253
254/*
255 * Send an actual I/O request to OS/400
256 */
257static int send_request(struct request *req)
258{
259 u64 start;
260 int direction;
261 int nsg;
262 u16 viocmd;
263 HvLpEvent_Rc hvrc;
264 struct vioblocklpevent *bevent;
265 struct HvLpEvent *hev;
266 struct scatterlist sg[VIOMAXBLOCKDMA];
267 int sgindex;
268 struct viodasd_device *d;
269 unsigned long flags;
270
271 start = (u64)blk_rq_pos(req) << 9;
272
273 if (rq_data_dir(req) == READ) {
274 direction = DMA_FROM_DEVICE;
275 viocmd = viomajorsubtype_blockio | vioblockread;
276 } else {
277 direction = DMA_TO_DEVICE;
278 viocmd = viomajorsubtype_blockio | vioblockwrite;
279 }
280
281 d = req->rq_disk->private_data;
282
283 /* Now build the scatter-gather list */
284 sg_init_table(sg, VIOMAXBLOCKDMA);
285 nsg = blk_rq_map_sg(req->q, req, sg);
286 nsg = dma_map_sg(d->dev, sg, nsg, direction);
287
288 spin_lock_irqsave(&viodasd_spinlock, flags);
289 num_req_outstanding++;
290
291 /* This optimization handles a single DMA block */
292 if (nsg == 1)
293 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
294 HvLpEvent_Type_VirtualIo, viocmd,
295 HvLpEvent_AckInd_DoAck,
296 HvLpEvent_AckType_ImmediateAck,
297 viopath_sourceinst(viopath_hostLp),
298 viopath_targetinst(viopath_hostLp),
299 (u64)(unsigned long)req, VIOVERSION << 16,
300 ((u64)DEVICE_NO(d) << 48), start,
301 ((u64)sg_dma_address(&sg[0])) << 32,
302 sg_dma_len(&sg[0]));
303 else {
304 bevent = (struct vioblocklpevent *)
305 vio_get_event_buffer(viomajorsubtype_blockio);
306 if (bevent == NULL) {
307 pr_warning("error allocating disk event buffer\n");
308 goto error_ret;
309 }
310
311 /*
312 * Now build up the actual request. Note that we store
313 * the pointer to the request in the correlation
314 * token so we can match the response up later
315 */
316 memset(bevent, 0, sizeof(struct vioblocklpevent));
317 hev = &bevent->event;
318 hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DO_ACK |
319 HV_LP_EVENT_INT;
320 hev->xType = HvLpEvent_Type_VirtualIo;
321 hev->xSubtype = viocmd;
322 hev->xSourceLp = HvLpConfig_getLpIndex();
323 hev->xTargetLp = viopath_hostLp;
324 hev->xSizeMinus1 =
325 offsetof(struct vioblocklpevent, u.rw_data.dma_info) +
326 (sizeof(bevent->u.rw_data.dma_info[0]) * nsg) - 1;
327 hev->xSourceInstanceId = viopath_sourceinst(viopath_hostLp);
328 hev->xTargetInstanceId = viopath_targetinst(viopath_hostLp);
329 hev->xCorrelationToken = (u64)req;
330 bevent->version = VIOVERSION;
331 bevent->disk = DEVICE_NO(d);
332 bevent->u.rw_data.offset = start;
333
334 /*
335 * Copy just the dma information from the sg list
336 * into the request
337 */
338 for (sgindex = 0; sgindex < nsg; sgindex++) {
339 bevent->u.rw_data.dma_info[sgindex].token =
340 sg_dma_address(&sg[sgindex]);
341 bevent->u.rw_data.dma_info[sgindex].len =
342 sg_dma_len(&sg[sgindex]);
343 }
344
345 /* Send the request */
346 hvrc = HvCallEvent_signalLpEvent(&bevent->event);
347 vio_free_event_buffer(viomajorsubtype_blockio, bevent);
348 }
349
350 if (hvrc != HvLpEvent_Rc_Good) {
351 pr_warning("error sending disk event to OS/400 (rc %d)\n",
352 (int)hvrc);
353 goto error_ret;
354 }
355 spin_unlock_irqrestore(&viodasd_spinlock, flags);
356 return 0;
357
358error_ret:
359 num_req_outstanding--;
360 spin_unlock_irqrestore(&viodasd_spinlock, flags);
361 dma_unmap_sg(d->dev, sg, nsg, direction);
362 return -1;
363}
364
365/*
366 * This is the external request processing routine
367 */
368static void do_viodasd_request(struct request_queue *q)
369{
370 struct request *req;
371
372 /*
373 * If we already have the maximum number of requests
374 * outstanding to OS/400 just bail out. We'll come
375 * back later.
376 */
377 while (num_req_outstanding < VIOMAXREQ) {
378 req = blk_fetch_request(q);
379 if (req == NULL)
380 return;
381 /* check that request contains a valid command */
382 if (req->cmd_type != REQ_TYPE_FS) {
383 viodasd_end_request(req, -EIO, blk_rq_sectors(req));
384 continue;
385 }
386 /* Try sending the request */
387 if (send_request(req) != 0)
388 viodasd_end_request(req, -EIO, blk_rq_sectors(req));
389 }
390}
391
392/*
393 * Probe a single disk and fill in the viodasd_device structure
394 * for it.
395 */
396static int probe_disk(struct viodasd_device *d)
397{
398 HvLpEvent_Rc hvrc;
399 struct viodasd_waitevent we;
400 int dev_no = DEVICE_NO(d);
401 struct gendisk *g;
402 struct request_queue *q;
403 u16 flags = 0;
404
405retry:
406 init_completion(&we.com);
407
408 /* Send the open event to OS/400 */
409 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
410 HvLpEvent_Type_VirtualIo,
411 viomajorsubtype_blockio | vioblockopen,
412 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
413 viopath_sourceinst(viopath_hostLp),
414 viopath_targetinst(viopath_hostLp),
415 (u64)(unsigned long)&we, VIOVERSION << 16,
416 ((u64)dev_no << 48) | ((u64)flags<< 32),
417 0, 0, 0);
418 if (hvrc != 0) {
419 pr_warning("bad rc on HV open %d\n", (int)hvrc);
420 return 0;
421 }
422
423 wait_for_completion(&we.com);
424
425 if (we.rc != 0) {
426 if (flags != 0)
427 return 0;
428 /* try again with read only flag set */
429 flags = vioblockflags_ro;
430 goto retry;
431 }
432 if (we.max_disk > (MAX_DISKNO - 1)) {
433 printk_once(KERN_INFO pr_fmt("Only examining the first %d of %d disks connected\n"),
434 MAX_DISKNO, we.max_disk + 1);
435 }
436
437 /* Send the close event to OS/400. We DON'T expect a response */
438 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
439 HvLpEvent_Type_VirtualIo,
440 viomajorsubtype_blockio | vioblockclose,
441 HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck,
442 viopath_sourceinst(viopath_hostLp),
443 viopath_targetinst(viopath_hostLp),
444 0, VIOVERSION << 16,
445 ((u64)dev_no << 48) | ((u64)flags << 32),
446 0, 0, 0);
447 if (hvrc != 0) {
448 pr_warning("bad rc sending event to OS/400 %d\n", (int)hvrc);
449 return 0;
450 }
451
452 if (d->dev == NULL) {
453 /* this is when we reprobe for new disks */
454 if (vio_create_viodasd(dev_no) == NULL) {
455 pr_warning("cannot allocate virtual device for disk %d\n",
456 dev_no);
457 return 0;
458 }
459 /*
460 * The vio_create_viodasd will have recursed into this
461 * routine with d->dev set to the new vio device and
462 * will finish the setup of the disk below.
463 */
464 return 1;
465 }
466
467 /* create the request queue for the disk */
468 spin_lock_init(&d->q_lock);
469 q = blk_init_queue(do_viodasd_request, &d->q_lock);
470 if (q == NULL) {
471 pr_warning("cannot allocate queue for disk %d\n", dev_no);
472 return 0;
473 }
474 g = alloc_disk(1 << PARTITION_SHIFT);
475 if (g == NULL) {
476 pr_warning("cannot allocate disk structure for disk %d\n",
477 dev_no);
478 blk_cleanup_queue(q);
479 return 0;
480 }
481
482 d->disk = g;
483 blk_queue_max_segments(q, VIOMAXBLOCKDMA);
484 blk_queue_max_hw_sectors(q, VIODASD_MAXSECTORS);
485 g->major = VIODASD_MAJOR;
486 g->first_minor = dev_no << PARTITION_SHIFT;
487 if (dev_no >= 26)
488 snprintf(g->disk_name, sizeof(g->disk_name),
489 VIOD_GENHD_NAME "%c%c",
490 'a' + (dev_no / 26) - 1, 'a' + (dev_no % 26));
491 else
492 snprintf(g->disk_name, sizeof(g->disk_name),
493 VIOD_GENHD_NAME "%c", 'a' + (dev_no % 26));
494 g->fops = &viodasd_fops;
495 g->queue = q;
496 g->private_data = d;
497 g->driverfs_dev = d->dev;
498 set_capacity(g, d->size >> 9);
499
500 pr_info("disk %d: %lu sectors (%lu MB) CHS=%d/%d/%d sector size %d%s\n",
501 dev_no, (unsigned long)(d->size >> 9),
502 (unsigned long)(d->size >> 20),
503 (int)d->cylinders, (int)d->tracks,
504 (int)d->sectors, (int)d->bytes_per_sector,
505 d->read_only ? " (RO)" : "");
506
507 /* register us in the global list */
508 add_disk(g);
509 return 1;
510}
511
512/* returns the total number of scatterlist elements converted */
513static int block_event_to_scatterlist(const struct vioblocklpevent *bevent,
514 struct scatterlist *sg, int *total_len)
515{
516 int i, numsg;
517 const struct rw_data *rw_data = &bevent->u.rw_data;
518 static const int offset =
519 offsetof(struct vioblocklpevent, u.rw_data.dma_info);
520 static const int element_size = sizeof(rw_data->dma_info[0]);
521
522 numsg = ((bevent->event.xSizeMinus1 + 1) - offset) / element_size;
523 if (numsg > VIOMAXBLOCKDMA)
524 numsg = VIOMAXBLOCKDMA;
525
526 *total_len = 0;
527 sg_init_table(sg, VIOMAXBLOCKDMA);
528 for (i = 0; (i < numsg) && (rw_data->dma_info[i].len > 0); ++i) {
529 sg_dma_address(&sg[i]) = rw_data->dma_info[i].token;
530 sg_dma_len(&sg[i]) = rw_data->dma_info[i].len;
531 *total_len += rw_data->dma_info[i].len;
532 }
533 return i;
534}
535
536/*
537 * Restart all queues, starting with the one _after_ the disk given,
538 * thus reducing the chance of starvation of higher numbered disks.
539 */
540static void viodasd_restart_all_queues_starting_from(int first_index)
541{
542 int i;
543
544 for (i = first_index + 1; i < MAX_DISKNO; ++i)
545 if (viodasd_devices[i].disk)
546 blk_run_queue(viodasd_devices[i].disk->queue);
547 for (i = 0; i <= first_index; ++i)
548 if (viodasd_devices[i].disk)
549 blk_run_queue(viodasd_devices[i].disk->queue);
550}
551
552/*
553 * For read and write requests, decrement the number of outstanding requests,
554 * Free the DMA buffers we allocated.
555 */
556static int viodasd_handle_read_write(struct vioblocklpevent *bevent)
557{
558 int num_sg, num_sect, pci_direction, total_len;
559 struct request *req;
560 struct scatterlist sg[VIOMAXBLOCKDMA];
561 struct HvLpEvent *event = &bevent->event;
562 unsigned long irq_flags;
563 struct viodasd_device *d;
564 int error;
565 spinlock_t *qlock;
566
567 num_sg = block_event_to_scatterlist(bevent, sg, &total_len);
568 num_sect = total_len >> 9;
569 if (event->xSubtype == (viomajorsubtype_blockio | vioblockread))
570 pci_direction = DMA_FROM_DEVICE;
571 else
572 pci_direction = DMA_TO_DEVICE;
573 req = (struct request *)bevent->event.xCorrelationToken;
574 d = req->rq_disk->private_data;
575
576 dma_unmap_sg(d->dev, sg, num_sg, pci_direction);
577
578 /*
579 * Since this is running in interrupt mode, we need to make sure
580 * we're not stepping on any global I/O operations
581 */
582 spin_lock_irqsave(&viodasd_spinlock, irq_flags);
583 num_req_outstanding--;
584 spin_unlock_irqrestore(&viodasd_spinlock, irq_flags);
585
586 error = (event->xRc == HvLpEvent_Rc_Good) ? 0 : -EIO;
587 if (error) {
588 const struct vio_error_entry *err;
589 err = vio_lookup_rc(viodasd_err_table, bevent->sub_result);
590 pr_warning("read/write error %d:0x%04x (%s)\n",
591 event->xRc, bevent->sub_result, err->msg);
592 num_sect = blk_rq_sectors(req);
593 }
594 qlock = req->q->queue_lock;
595 spin_lock_irqsave(qlock, irq_flags);
596 viodasd_end_request(req, error, num_sect);
597 spin_unlock_irqrestore(qlock, irq_flags);
598
599 /* Finally, try to get more requests off of this device's queue */
600 viodasd_restart_all_queues_starting_from(DEVICE_NO(d));
601
602 return 0;
603}
604
605/* This routine handles incoming block LP events */
606static void handle_block_event(struct HvLpEvent *event)
607{
608 struct vioblocklpevent *bevent = (struct vioblocklpevent *)event;
609 struct viodasd_waitevent *pwe;
610
611 if (event == NULL)
612 /* Notification that a partition went away! */
613 return;
614 /* First, we should NEVER get an int here...only acks */
615 if (hvlpevent_is_int(event)) {
616 pr_warning("Yikes! got an int in viodasd event handler!\n");
617 if (hvlpevent_need_ack(event)) {
618 event->xRc = HvLpEvent_Rc_InvalidSubtype;
619 HvCallEvent_ackLpEvent(event);
620 }
621 }
622
623 switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) {
624 case vioblockopen:
625 /*
626 * Handle a response to an open request. We get all the
627 * disk information in the response, so update it. The
628 * correlation token contains a pointer to a waitevent
629 * structure that has a completion in it. update the
630 * return code in the waitevent structure and post the
631 * completion to wake up the guy who sent the request
632 */
633 pwe = (struct viodasd_waitevent *)event->xCorrelationToken;
634 pwe->rc = event->xRc;
635 pwe->sub_result = bevent->sub_result;
636 if (event->xRc == HvLpEvent_Rc_Good) {
637 const struct open_data *data = &bevent->u.open_data;
638 struct viodasd_device *device =
639 &viodasd_devices[bevent->disk];
640 device->read_only =
641 bevent->flags & vioblockflags_ro;
642 device->size = data->disk_size;
643 device->cylinders = data->cylinders;
644 device->tracks = data->tracks;
645 device->sectors = data->sectors;
646 device->bytes_per_sector = data->bytes_per_sector;
647 pwe->max_disk = data->max_disk;
648 }
649 complete(&pwe->com);
650 break;
651 case vioblockclose:
652 break;
653 case vioblockread:
654 case vioblockwrite:
655 viodasd_handle_read_write(bevent);
656 break;
657
658 default:
659 pr_warning("invalid subtype!");
660 if (hvlpevent_need_ack(event)) {
661 event->xRc = HvLpEvent_Rc_InvalidSubtype;
662 HvCallEvent_ackLpEvent(event);
663 }
664 }
665}
666
667/*
668 * Get the driver to reprobe for more disks.
669 */
670static ssize_t probe_disks(struct device_driver *drv, const char *buf,
671 size_t count)
672{
673 struct viodasd_device *d;
674
675 for (d = viodasd_devices; d < &viodasd_devices[MAX_DISKNO]; d++) {
676 if (d->disk == NULL)
677 probe_disk(d);
678 }
679 return count;
680}
681static DRIVER_ATTR(probe, S_IWUSR, NULL, probe_disks);
682
683static int viodasd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
684{
685 struct viodasd_device *d = &viodasd_devices[vdev->unit_address];
686
687 d->dev = &vdev->dev;
688 if (!probe_disk(d))
689 return -ENODEV;
690 return 0;
691}
692
693static int viodasd_remove(struct vio_dev *vdev)
694{
695 struct viodasd_device *d;
696
697 d = &viodasd_devices[vdev->unit_address];
698 if (d->disk) {
699 del_gendisk(d->disk);
700 blk_cleanup_queue(d->disk->queue);
701 put_disk(d->disk);
702 d->disk = NULL;
703 }
704 d->dev = NULL;
705 return 0;
706}
707
708/**
709 * viodasd_device_table: Used by vio.c to match devices that we
710 * support.
711 */
712static struct vio_device_id viodasd_device_table[] __devinitdata = {
713 { "block", "IBM,iSeries-viodasd" },
714 { "", "" }
715};
716MODULE_DEVICE_TABLE(vio, viodasd_device_table);
717
718static struct vio_driver viodasd_driver = {
719 .id_table = viodasd_device_table,
720 .probe = viodasd_probe,
721 .remove = viodasd_remove,
722 .driver = {
723 .name = "viodasd",
724 .owner = THIS_MODULE,
725 }
726};
727
728static int need_delete_probe;
729
730/*
731 * Initialize the whole device driver. Handle module and non-module
732 * versions
733 */
734static int __init viodasd_init(void)
735{
736 int rc;
737
738 if (!firmware_has_feature(FW_FEATURE_ISERIES)) {
739 rc = -ENODEV;
740 goto early_fail;
741 }
742
743 /* Try to open to our host lp */
744 if (viopath_hostLp == HvLpIndexInvalid)
745 vio_set_hostlp();
746
747 if (viopath_hostLp == HvLpIndexInvalid) {
748 pr_warning("invalid hosting partition\n");
749 rc = -EIO;
750 goto early_fail;
751 }
752
753 pr_info("vers " VIOD_VERS ", hosting partition %d\n", viopath_hostLp);
754
755 /* register the block device */
756 rc = register_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
757 if (rc) {
758 pr_warning("Unable to get major number %d for %s\n",
759 VIODASD_MAJOR, VIOD_GENHD_NAME);
760 goto early_fail;
761 }
762 /* Actually open the path to the hosting partition */
763 rc = viopath_open(viopath_hostLp, viomajorsubtype_blockio,
764 VIOMAXREQ + 2);
765 if (rc) {
766 pr_warning("error opening path to host partition %d\n",
767 viopath_hostLp);
768 goto unregister_blk;
769 }
770
771 /* Initialize our request handler */
772 vio_setHandler(viomajorsubtype_blockio, handle_block_event);
773
774 rc = vio_register_driver(&viodasd_driver);
775 if (rc) {
776 pr_warning("vio_register_driver failed\n");
777 goto unset_handler;
778 }
779
780 /*
781 * If this call fails, it just means that we cannot dynamically
782 * add virtual disks, but the driver will still work fine for
783 * all existing disk, so ignore the failure.
784 */
785 if (!driver_create_file(&viodasd_driver.driver, &driver_attr_probe))
786 need_delete_probe = 1;
787
788 return 0;
789
790unset_handler:
791 vio_clearHandler(viomajorsubtype_blockio);
792 viopath_close(viopath_hostLp, viomajorsubtype_blockio, VIOMAXREQ + 2);
793unregister_blk:
794 unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
795early_fail:
796 return rc;
797}
798module_init(viodasd_init);
799
800void __exit viodasd_exit(void)
801{
802 if (need_delete_probe)
803 driver_remove_file(&viodasd_driver.driver, &driver_attr_probe);
804 vio_unregister_driver(&viodasd_driver);
805 vio_clearHandler(viomajorsubtype_blockio);
806 viopath_close(viopath_hostLp, viomajorsubtype_blockio, VIOMAXREQ + 2);
807 unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
808}
809module_exit(viodasd_exit);
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
deleted file mode 100644
index 7878da89d29..00000000000
--- a/drivers/cdrom/viocd.c
+++ /dev/null
@@ -1,739 +0,0 @@
1/* -*- linux-c -*-
2 * drivers/cdrom/viocd.c
3 *
4 * iSeries Virtual CD Rom
5 *
6 * Authors: Dave Boutcher <boutcher@us.ibm.com>
7 * Ryan Arnold <ryanarn@us.ibm.com>
8 * Colin Devilbiss <devilbis@us.ibm.com>
9 * Stephen Rothwell
10 *
11 * (C) Copyright 2000-2004 IBM Corporation
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License as
15 * published by the Free Software Foundation; either version 2 of the
16 * License, or (at your option) anyu later version.
17 *
18 * This program is distributed in the hope that it will be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software Foundation,
25 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 *
27 * This routine provides access to CD ROM drives owned and managed by an
28 * OS/400 partition running on the same box as this Linux partition.
29 *
30 * All operations are performed by sending messages back and forth to
31 * the OS/400 partition.
32 */
33
34#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35
36#include <linux/major.h>
37#include <linux/blkdev.h>
38#include <linux/cdrom.h>
39#include <linux/errno.h>
40#include <linux/init.h>
41#include <linux/dma-mapping.h>
42#include <linux/module.h>
43#include <linux/completion.h>
44#include <linux/proc_fs.h>
45#include <linux/mutex.h>
46#include <linux/seq_file.h>
47#include <linux/scatterlist.h>
48
49#include <asm/vio.h>
50#include <asm/iseries/hv_types.h>
51#include <asm/iseries/hv_lp_event.h>
52#include <asm/iseries/vio.h>
53#include <asm/firmware.h>
54
55#define VIOCD_DEVICE "iseries/vcd"
56
57#define VIOCD_VERS "1.06"
58
59/*
60 * Should probably make this a module parameter....sigh
61 */
62#define VIOCD_MAX_CD HVMAXARCHITECTEDVIRTUALCDROMS
63
64static DEFINE_MUTEX(viocd_mutex);
65static const struct vio_error_entry viocd_err_table[] = {
66 {0x0201, EINVAL, "Invalid Range"},
67 {0x0202, EINVAL, "Invalid Token"},
68 {0x0203, EIO, "DMA Error"},
69 {0x0204, EIO, "Use Error"},
70 {0x0205, EIO, "Release Error"},
71 {0x0206, EINVAL, "Invalid CD"},
72 {0x020C, EROFS, "Read Only Device"},
73 {0x020D, ENOMEDIUM, "Changed or Missing Volume (or Varied Off?)"},
74 {0x020E, EIO, "Optical System Error (Varied Off?)"},
75 {0x02FF, EIO, "Internal Error"},
76 {0x3010, EIO, "Changed Volume"},
77 {0xC100, EIO, "Optical System Error"},
78 {0x0000, 0, NULL},
79};
80
81/*
82 * This is the structure we use to exchange info between driver and interrupt
83 * handler
84 */
85struct viocd_waitevent {
86 struct completion com;
87 int rc;
88 u16 sub_result;
89 int changed;
90};
91
92/* this is a lookup table for the true capabilities of a device */
93struct capability_entry {
94 char *type;
95 int capability;
96};
97
98static struct capability_entry capability_table[] __initdata = {
99 { "6330", CDC_LOCK | CDC_DVD_RAM | CDC_RAM },
100 { "6331", CDC_LOCK | CDC_DVD_RAM | CDC_RAM },
101 { "6333", CDC_LOCK | CDC_DVD_RAM | CDC_RAM },
102 { "632A", CDC_LOCK | CDC_DVD_RAM | CDC_RAM },
103 { "6321", CDC_LOCK },
104 { "632B", 0 },
105 { NULL , CDC_LOCK },
106};
107
108/* These are our internal structures for keeping track of devices */
109static int viocd_numdev;
110
111struct disk_info {
112 struct gendisk *viocd_disk;
113 struct cdrom_device_info viocd_info;
114 struct device *dev;
115 const char *rsrcname;
116 const char *type;
117 const char *model;
118};
119static struct disk_info viocd_diskinfo[VIOCD_MAX_CD];
120
121#define DEVICE_NR(di) ((di) - &viocd_diskinfo[0])
122
123static spinlock_t viocd_reqlock;
124
125#define MAX_CD_REQ 1
126
127/* procfs support */
128static int proc_viocd_show(struct seq_file *m, void *v)
129{
130 int i;
131
132 for (i = 0; i < viocd_numdev; i++) {
133 seq_printf(m, "viocd device %d is iSeries resource %10.10s"
134 "type %4.4s, model %3.3s\n",
135 i, viocd_diskinfo[i].rsrcname,
136 viocd_diskinfo[i].type,
137 viocd_diskinfo[i].model);
138 }
139 return 0;
140}
141
142static int proc_viocd_open(struct inode *inode, struct file *file)
143{
144 return single_open(file, proc_viocd_show, NULL);
145}
146
147static const struct file_operations proc_viocd_operations = {
148 .owner = THIS_MODULE,
149 .open = proc_viocd_open,
150 .read = seq_read,
151 .llseek = seq_lseek,
152 .release = single_release,
153};
154
155static int viocd_blk_open(struct block_device *bdev, fmode_t mode)
156{
157 struct disk_info *di = bdev->bd_disk->private_data;
158 int ret;
159
160 mutex_lock(&viocd_mutex);
161 ret = cdrom_open(&di->viocd_info, bdev, mode);
162 mutex_unlock(&viocd_mutex);
163
164 return ret;
165}
166
167static int viocd_blk_release(struct gendisk *disk, fmode_t mode)
168{
169 struct disk_info *di = disk->private_data;
170 mutex_lock(&viocd_mutex);
171 cdrom_release(&di->viocd_info, mode);
172 mutex_unlock(&viocd_mutex);
173 return 0;
174}
175
176static int viocd_blk_ioctl(struct block_device *bdev, fmode_t mode,
177 unsigned cmd, unsigned long arg)
178{
179 struct disk_info *di = bdev->bd_disk->private_data;
180 int ret;
181
182 mutex_lock(&viocd_mutex);
183 ret = cdrom_ioctl(&di->viocd_info, bdev, mode, cmd, arg);
184 mutex_unlock(&viocd_mutex);
185
186 return ret;
187}
188
189static unsigned int viocd_blk_check_events(struct gendisk *disk,
190 unsigned int clearing)
191{
192 struct disk_info *di = disk->private_data;
193 return cdrom_check_events(&di->viocd_info, clearing);
194}
195
196static const struct block_device_operations viocd_fops = {
197 .owner = THIS_MODULE,
198 .open = viocd_blk_open,
199 .release = viocd_blk_release,
200 .ioctl = viocd_blk_ioctl,
201 .check_events = viocd_blk_check_events,
202};
203
204static int viocd_open(struct cdrom_device_info *cdi, int purpose)
205{
206 struct disk_info *diskinfo = cdi->handle;
207 int device_no = DEVICE_NR(diskinfo);
208 HvLpEvent_Rc hvrc;
209 struct viocd_waitevent we;
210
211 init_completion(&we.com);
212 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
213 HvLpEvent_Type_VirtualIo,
214 viomajorsubtype_cdio | viocdopen,
215 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
216 viopath_sourceinst(viopath_hostLp),
217 viopath_targetinst(viopath_hostLp),
218 (u64)&we, VIOVERSION << 16, ((u64)device_no << 48),
219 0, 0, 0);
220 if (hvrc != 0) {
221 pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n",
222 (int)hvrc);
223 return -EIO;
224 }
225
226 wait_for_completion(&we.com);
227
228 if (we.rc) {
229 const struct vio_error_entry *err =
230 vio_lookup_rc(viocd_err_table, we.sub_result);
231 pr_warning("bad rc %d:0x%04X on open: %s\n",
232 we.rc, we.sub_result, err->msg);
233 return -err->errno;
234 }
235
236 return 0;
237}
238
239static void viocd_release(struct cdrom_device_info *cdi)
240{
241 int device_no = DEVICE_NR((struct disk_info *)cdi->handle);
242 HvLpEvent_Rc hvrc;
243
244 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
245 HvLpEvent_Type_VirtualIo,
246 viomajorsubtype_cdio | viocdclose,
247 HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck,
248 viopath_sourceinst(viopath_hostLp),
249 viopath_targetinst(viopath_hostLp), 0,
250 VIOVERSION << 16, ((u64)device_no << 48), 0, 0, 0);
251 if (hvrc != 0)
252 pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n",
253 (int)hvrc);
254}
255
256/* Send a read or write request to OS/400 */
257static int send_request(struct request *req)
258{
259 HvLpEvent_Rc hvrc;
260 struct disk_info *diskinfo = req->rq_disk->private_data;
261 u64 len;
262 dma_addr_t dmaaddr;
263 int direction;
264 u16 cmd;
265 struct scatterlist sg;
266
267 BUG_ON(req->nr_phys_segments > 1);
268
269 if (rq_data_dir(req) == READ) {
270 direction = DMA_FROM_DEVICE;
271 cmd = viomajorsubtype_cdio | viocdread;
272 } else {
273 direction = DMA_TO_DEVICE;
274 cmd = viomajorsubtype_cdio | viocdwrite;
275 }
276
277 sg_init_table(&sg, 1);
278 if (blk_rq_map_sg(req->q, req, &sg) == 0) {
279 pr_warning("error setting up scatter/gather list\n");
280 return -1;
281 }
282
283 if (dma_map_sg(diskinfo->dev, &sg, 1, direction) == 0) {
284 pr_warning("error allocating sg tce\n");
285 return -1;
286 }
287 dmaaddr = sg_dma_address(&sg);
288 len = sg_dma_len(&sg);
289
290 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
291 HvLpEvent_Type_VirtualIo, cmd,
292 HvLpEvent_AckInd_DoAck,
293 HvLpEvent_AckType_ImmediateAck,
294 viopath_sourceinst(viopath_hostLp),
295 viopath_targetinst(viopath_hostLp),
296 (u64)req, VIOVERSION << 16,
297 ((u64)DEVICE_NR(diskinfo) << 48) | dmaaddr,
298 (u64)blk_rq_pos(req) * 512, len, 0);
299 if (hvrc != HvLpEvent_Rc_Good) {
300 pr_warning("hv error on op %d\n", (int)hvrc);
301 return -1;
302 }
303
304 return 0;
305}
306
307static int rwreq;
308
309static void do_viocd_request(struct request_queue *q)
310{
311 struct request *req;
312
313 while ((rwreq == 0) && ((req = blk_fetch_request(q)) != NULL)) {
314 if (req->cmd_type != REQ_TYPE_FS)
315 __blk_end_request_all(req, -EIO);
316 else if (send_request(req) < 0) {
317 pr_warning("unable to send message to OS/400!\n");
318 __blk_end_request_all(req, -EIO);
319 } else
320 rwreq++;
321 }
322}
323
324static unsigned int viocd_check_events(struct cdrom_device_info *cdi,
325 unsigned int clearing, int disc_nr)
326{
327 struct viocd_waitevent we;
328 HvLpEvent_Rc hvrc;
329 int device_no = DEVICE_NR((struct disk_info *)cdi->handle);
330
331 init_completion(&we.com);
332
333 /* Send the open event to OS/400 */
334 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
335 HvLpEvent_Type_VirtualIo,
336 viomajorsubtype_cdio | viocdcheck,
337 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
338 viopath_sourceinst(viopath_hostLp),
339 viopath_targetinst(viopath_hostLp),
340 (u64)&we, VIOVERSION << 16, ((u64)device_no << 48),
341 0, 0, 0);
342 if (hvrc != 0) {
343 pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n",
344 (int)hvrc);
345 return 0;
346 }
347
348 wait_for_completion(&we.com);
349
350 /* Check the return code. If bad, assume no change */
351 if (we.rc) {
352 const struct vio_error_entry *err =
353 vio_lookup_rc(viocd_err_table, we.sub_result);
354 pr_warning("bad rc %d:0x%04X on check_change: %s; Assuming no change\n",
355 we.rc, we.sub_result, err->msg);
356 return 0;
357 }
358
359 return we.changed ? DISK_EVENT_MEDIA_CHANGE : 0;
360}
361
362static int viocd_lock_door(struct cdrom_device_info *cdi, int locking)
363{
364 HvLpEvent_Rc hvrc;
365 u64 device_no = DEVICE_NR((struct disk_info *)cdi->handle);
366 /* NOTE: flags is 1 or 0 so it won't overwrite the device_no */
367 u64 flags = !!locking;
368 struct viocd_waitevent we;
369
370 init_completion(&we.com);
371
372 /* Send the lockdoor event to OS/400 */
373 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
374 HvLpEvent_Type_VirtualIo,
375 viomajorsubtype_cdio | viocdlockdoor,
376 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
377 viopath_sourceinst(viopath_hostLp),
378 viopath_targetinst(viopath_hostLp),
379 (u64)&we, VIOVERSION << 16,
380 (device_no << 48) | (flags << 32), 0, 0, 0);
381 if (hvrc != 0) {
382 pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n",
383 (int)hvrc);
384 return -EIO;
385 }
386
387 wait_for_completion(&we.com);
388
389 if (we.rc != 0)
390 return -EIO;
391 return 0;
392}
393
394static int viocd_packet(struct cdrom_device_info *cdi,
395 struct packet_command *cgc)
396{
397 unsigned int buflen = cgc->buflen;
398 int ret = -EIO;
399
400 switch (cgc->cmd[0]) {
401 case GPCMD_READ_DISC_INFO:
402 {
403 disc_information *di = (disc_information *)cgc->buffer;
404
405 if (buflen >= 2) {
406 di->disc_information_length = cpu_to_be16(1);
407 ret = 0;
408 }
409 if (buflen >= 3)
410 di->erasable =
411 (cdi->ops->capability & ~cdi->mask
412 & (CDC_DVD_RAM | CDC_RAM)) != 0;
413 }
414 break;
415 case GPCMD_GET_CONFIGURATION:
416 if (cgc->cmd[3] == CDF_RWRT) {
417 struct rwrt_feature_desc *rfd = (struct rwrt_feature_desc *)(cgc->buffer + sizeof(struct feature_header));
418
419 if ((buflen >=
420 (sizeof(struct feature_header) + sizeof(*rfd))) &&
421 (cdi->ops->capability & ~cdi->mask
422 & (CDC_DVD_RAM | CDC_RAM))) {
423 rfd->feature_code = cpu_to_be16(CDF_RWRT);
424 rfd->curr = 1;
425 ret = 0;
426 }
427 }
428 break;
429 default:
430 if (cgc->sense) {
431 /* indicate Unknown code */
432 cgc->sense->sense_key = 0x05;
433 cgc->sense->asc = 0x20;
434 cgc->sense->ascq = 0x00;
435 }
436 break;
437 }
438
439 cgc->stat = ret;
440 return ret;
441}
442
443static void restart_all_queues(int first_index)
444{
445 int i;
446
447 for (i = first_index + 1; i < viocd_numdev; i++)
448 if (viocd_diskinfo[i].viocd_disk)
449 blk_run_queue(viocd_diskinfo[i].viocd_disk->queue);
450 for (i = 0; i <= first_index; i++)
451 if (viocd_diskinfo[i].viocd_disk)
452 blk_run_queue(viocd_diskinfo[i].viocd_disk->queue);
453}
454
455/* This routine handles incoming CD LP events */
456static void vio_handle_cd_event(struct HvLpEvent *event)
457{
458 struct viocdlpevent *bevent;
459 struct viocd_waitevent *pwe;
460 struct disk_info *di;
461 unsigned long flags;
462 struct request *req;
463
464
465 if (event == NULL)
466 /* Notification that a partition went away! */
467 return;
468 /* First, we should NEVER get an int here...only acks */
469 if (hvlpevent_is_int(event)) {
470 pr_warning("Yikes! got an int in viocd event handler!\n");
471 if (hvlpevent_need_ack(event)) {
472 event->xRc = HvLpEvent_Rc_InvalidSubtype;
473 HvCallEvent_ackLpEvent(event);
474 }
475 }
476
477 bevent = (struct viocdlpevent *)event;
478
479 switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) {
480 case viocdopen:
481 if (event->xRc == 0) {
482 di = &viocd_diskinfo[bevent->disk];
483 blk_queue_logical_block_size(di->viocd_disk->queue,
484 bevent->block_size);
485 set_capacity(di->viocd_disk,
486 bevent->media_size *
487 bevent->block_size / 512);
488 }
489 /* FALLTHROUGH !! */
490 case viocdlockdoor:
491 pwe = (struct viocd_waitevent *)event->xCorrelationToken;
492return_complete:
493 pwe->rc = event->xRc;
494 pwe->sub_result = bevent->sub_result;
495 complete(&pwe->com);
496 break;
497
498 case viocdcheck:
499 pwe = (struct viocd_waitevent *)event->xCorrelationToken;
500 pwe->changed = bevent->flags;
501 goto return_complete;
502
503 case viocdclose:
504 break;
505
506 case viocdwrite:
507 case viocdread:
508 /*
509 * Since this is running in interrupt mode, we need to
510 * make sure we're not stepping on any global I/O operations
511 */
512 di = &viocd_diskinfo[bevent->disk];
513 spin_lock_irqsave(&viocd_reqlock, flags);
514 dma_unmap_single(di->dev, bevent->token, bevent->len,
515 ((event->xSubtype & VIOMINOR_SUBTYPE_MASK) == viocdread)
516 ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
517 req = (struct request *)bevent->event.xCorrelationToken;
518 rwreq--;
519
520 if (event->xRc != HvLpEvent_Rc_Good) {
521 const struct vio_error_entry *err =
522 vio_lookup_rc(viocd_err_table,
523 bevent->sub_result);
524 pr_warning("request %p failed with rc %d:0x%04X: %s\n",
525 req, event->xRc,
526 bevent->sub_result, err->msg);
527 __blk_end_request_all(req, -EIO);
528 } else
529 __blk_end_request_all(req, 0);
530
531 /* restart handling of incoming requests */
532 spin_unlock_irqrestore(&viocd_reqlock, flags);
533 restart_all_queues(bevent->disk);
534 break;
535
536 default:
537 pr_warning("message with invalid subtype %0x04X!\n",
538 event->xSubtype & VIOMINOR_SUBTYPE_MASK);
539 if (hvlpevent_need_ack(event)) {
540 event->xRc = HvLpEvent_Rc_InvalidSubtype;
541 HvCallEvent_ackLpEvent(event);
542 }
543 }
544}
545
546static int viocd_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
547 void *arg)
548{
549 return -EINVAL;
550}
551
552static struct cdrom_device_ops viocd_dops = {
553 .open = viocd_open,
554 .release = viocd_release,
555 .check_events = viocd_check_events,
556 .lock_door = viocd_lock_door,
557 .generic_packet = viocd_packet,
558 .audio_ioctl = viocd_audio_ioctl,
559 .capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_SELECT_SPEED | CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | CDC_RESET | CDC_DRIVE_STATUS | CDC_GENERIC_PACKET | CDC_CD_R | CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_RAM
560};
561
562static int find_capability(const char *type)
563{
564 struct capability_entry *entry;
565
566 for(entry = capability_table; entry->type; ++entry)
567 if(!strncmp(entry->type, type, 4))
568 break;
569 return entry->capability;
570}
571
572static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
573{
574 struct gendisk *gendisk;
575 int deviceno;
576 struct disk_info *d;
577 struct cdrom_device_info *c;
578 struct request_queue *q;
579 struct device_node *node = vdev->dev.of_node;
580
581 deviceno = vdev->unit_address;
582 if (deviceno >= VIOCD_MAX_CD)
583 return -ENODEV;
584 if (!node)
585 return -ENODEV;
586
587 if (deviceno >= viocd_numdev)
588 viocd_numdev = deviceno + 1;
589
590 d = &viocd_diskinfo[deviceno];
591 d->rsrcname = of_get_property(node, "linux,vio_rsrcname", NULL);
592 d->type = of_get_property(node, "linux,vio_type", NULL);
593 d->model = of_get_property(node, "linux,vio_model", NULL);
594
595 c = &d->viocd_info;
596
597 c->ops = &viocd_dops;
598 c->speed = 4;
599 c->capacity = 1;
600 c->handle = d;
601 c->mask = ~find_capability(d->type);
602 sprintf(c->name, VIOCD_DEVICE "%c", 'a' + deviceno);
603
604 if (register_cdrom(c) != 0) {
605 pr_warning("Cannot register viocd CD-ROM %s!\n", c->name);
606 goto out;
607 }
608 pr_info("cd %s is iSeries resource %10.10s type %4.4s, model %3.3s\n",
609 c->name, d->rsrcname, d->type, d->model);
610 q = blk_init_queue(do_viocd_request, &viocd_reqlock);
611 if (q == NULL) {
612 pr_warning("Cannot allocate queue for %s!\n", c->name);
613 goto out_unregister_cdrom;
614 }
615 gendisk = alloc_disk(1);
616 if (gendisk == NULL) {
617 pr_warning("Cannot create gendisk for %s!\n", c->name);
618 goto out_cleanup_queue;
619 }
620 gendisk->major = VIOCD_MAJOR;
621 gendisk->first_minor = deviceno;
622 strncpy(gendisk->disk_name, c->name,
623 sizeof(gendisk->disk_name));
624 blk_queue_max_segments(q, 1);
625 blk_queue_max_hw_sectors(q, 4096 / 512);
626 gendisk->queue = q;
627 gendisk->fops = &viocd_fops;
628 gendisk->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE |
629 GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
630 set_capacity(gendisk, 0);
631 gendisk->private_data = d;
632 d->viocd_disk = gendisk;
633 d->dev = &vdev->dev;
634 gendisk->driverfs_dev = d->dev;
635 add_disk(gendisk);
636 return 0;
637
638out_cleanup_queue:
639 blk_cleanup_queue(q);
640out_unregister_cdrom:
641 unregister_cdrom(c);
642out:
643 return -ENODEV;
644}
645
646static int viocd_remove(struct vio_dev *vdev)
647{
648 struct disk_info *d = &viocd_diskinfo[vdev->unit_address];
649
650 unregister_cdrom(&d->viocd_info);
651 del_gendisk(d->viocd_disk);
652 blk_cleanup_queue(d->viocd_disk->queue);
653 put_disk(d->viocd_disk);
654 return 0;
655}
656
657/**
658 * viocd_device_table: Used by vio.c to match devices that we
659 * support.
660 */
661static struct vio_device_id viocd_device_table[] __devinitdata = {
662 { "block", "IBM,iSeries-viocd" },
663 { "", "" }
664};
665MODULE_DEVICE_TABLE(vio, viocd_device_table);
666
667static struct vio_driver viocd_driver = {
668 .id_table = viocd_device_table,
669 .probe = viocd_probe,
670 .remove = viocd_remove,
671 .driver = {
672 .name = "viocd",
673 .owner = THIS_MODULE,
674 }
675};
676
677static int __init viocd_init(void)
678{
679 int ret = 0;
680
681 if (!firmware_has_feature(FW_FEATURE_ISERIES))
682 return -ENODEV;
683
684 if (viopath_hostLp == HvLpIndexInvalid) {
685 vio_set_hostlp();
686 /* If we don't have a host, bail out */
687 if (viopath_hostLp == HvLpIndexInvalid)
688 return -ENODEV;
689 }
690
691 pr_info("vers " VIOCD_VERS ", hosting partition %d\n", viopath_hostLp);
692
693 if (register_blkdev(VIOCD_MAJOR, VIOCD_DEVICE) != 0) {
694 pr_warning("Unable to get major %d for %s\n",
695 VIOCD_MAJOR, VIOCD_DEVICE);
696 return -EIO;
697 }
698
699 ret = viopath_open(viopath_hostLp, viomajorsubtype_cdio,
700 MAX_CD_REQ + 2);
701 if (ret) {
702 pr_warning("error opening path to host partition %d\n",
703 viopath_hostLp);
704 goto out_unregister;
705 }
706
707 /* Initialize our request handler */
708 vio_setHandler(viomajorsubtype_cdio, vio_handle_cd_event);
709
710 spin_lock_init(&viocd_reqlock);
711
712 ret = vio_register_driver(&viocd_driver);
713 if (ret)
714 goto out_free_info;
715
716 proc_create("iSeries/viocd", S_IFREG|S_IRUGO, NULL,
717 &proc_viocd_operations);
718 return 0;
719
720out_free_info:
721 vio_clearHandler(viomajorsubtype_cdio);
722 viopath_close(viopath_hostLp, viomajorsubtype_cdio, MAX_CD_REQ + 2);
723out_unregister:
724 unregister_blkdev(VIOCD_MAJOR, VIOCD_DEVICE);
725 return ret;
726}
727
728static void __exit viocd_exit(void)
729{
730 remove_proc_entry("iSeries/viocd", NULL);
731 vio_unregister_driver(&viocd_driver);
732 viopath_close(viopath_hostLp, viomajorsubtype_cdio, MAX_CD_REQ + 2);
733 vio_clearHandler(viomajorsubtype_cdio);
734 unregister_blkdev(VIOCD_MAJOR, VIOCD_DEVICE);
735}
736
737module_init(viocd_init);
738module_exit(viocd_exit);
739MODULE_LICENSE("GPL");
diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c
deleted file mode 100644
index 8b34c65511e..00000000000
--- a/drivers/char/viotape.c
+++ /dev/null
@@ -1,1041 +0,0 @@
1/* -*- linux-c -*-
2 * drivers/char/viotape.c
3 *
4 * iSeries Virtual Tape
5 *
6 * Authors: Dave Boutcher <boutcher@us.ibm.com>
7 * Ryan Arnold <ryanarn@us.ibm.com>
8 * Colin Devilbiss <devilbis@us.ibm.com>
9 * Stephen Rothwell
10 *
11 * (C) Copyright 2000-2004 IBM Corporation
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License as
15 * published by the Free Software Foundation; either version 2 of the
16 * License, or (at your option) anyu later version.
17 *
18 * This program is distributed in the hope that it will be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software Foundation,
25 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 *
27 * This routine provides access to tape drives owned and managed by an OS/400
28 * partition running on the same box as this Linux partition.
29 *
30 * All tape operations are performed by sending messages back and forth to
31 * the OS/400 partition. The format of the messages is defined in
32 * iseries/vio.h
33 */
34#include <linux/module.h>
35#include <linux/kernel.h>
36#include <linux/errno.h>
37#include <linux/init.h>
38#include <linux/wait.h>
39#include <linux/spinlock.h>
40#include <linux/mtio.h>
41#include <linux/device.h>
42#include <linux/dma-mapping.h>
43#include <linux/fs.h>
44#include <linux/cdev.h>
45#include <linux/major.h>
46#include <linux/completion.h>
47#include <linux/proc_fs.h>
48#include <linux/seq_file.h>
49#include <linux/mutex.h>
50#include <linux/slab.h>
51
52#include <asm/uaccess.h>
53#include <asm/ioctls.h>
54#include <asm/firmware.h>
55#include <asm/vio.h>
56#include <asm/iseries/vio.h>
57#include <asm/iseries/hv_lp_event.h>
58#include <asm/iseries/hv_call_event.h>
59#include <asm/iseries/hv_lp_config.h>
60
61#define VIOTAPE_VERSION "1.2"
62#define VIOTAPE_MAXREQ 1
63
64#define VIOTAPE_KERN_WARN KERN_WARNING "viotape: "
65#define VIOTAPE_KERN_INFO KERN_INFO "viotape: "
66
67static DEFINE_MUTEX(proc_viotape_mutex);
68static int viotape_numdev;
69
70/*
71 * The minor number follows the conventions of the SCSI tape drives. The
72 * rewind and mode are encoded in the minor #. We use this struct to break
73 * them out
74 */
75struct viot_devinfo_struct {
76 int devno;
77 int mode;
78 int rewind;
79};
80
81#define VIOTAPOP_RESET 0
82#define VIOTAPOP_FSF 1
83#define VIOTAPOP_BSF 2
84#define VIOTAPOP_FSR 3
85#define VIOTAPOP_BSR 4
86#define VIOTAPOP_WEOF 5
87#define VIOTAPOP_REW 6
88#define VIOTAPOP_NOP 7
89#define VIOTAPOP_EOM 8
90#define VIOTAPOP_ERASE 9
91#define VIOTAPOP_SETBLK 10
92#define VIOTAPOP_SETDENSITY 11
93#define VIOTAPOP_SETPOS 12
94#define VIOTAPOP_GETPOS 13
95#define VIOTAPOP_SETPART 14
96#define VIOTAPOP_UNLOAD 15
97
98enum viotaperc {
99 viotape_InvalidRange = 0x0601,
100 viotape_InvalidToken = 0x0602,
101 viotape_DMAError = 0x0603,
102 viotape_UseError = 0x0604,
103 viotape_ReleaseError = 0x0605,
104 viotape_InvalidTape = 0x0606,
105 viotape_InvalidOp = 0x0607,
106 viotape_TapeErr = 0x0608,
107
108 viotape_AllocTimedOut = 0x0640,
109 viotape_BOTEnc = 0x0641,
110 viotape_BlankTape = 0x0642,
111 viotape_BufferEmpty = 0x0643,
112 viotape_CleanCartFound = 0x0644,
113 viotape_CmdNotAllowed = 0x0645,
114 viotape_CmdNotSupported = 0x0646,
115 viotape_DataCheck = 0x0647,
116 viotape_DecompressErr = 0x0648,
117 viotape_DeviceTimeout = 0x0649,
118 viotape_DeviceUnavail = 0x064a,
119 viotape_DeviceBusy = 0x064b,
120 viotape_EndOfMedia = 0x064c,
121 viotape_EndOfTape = 0x064d,
122 viotape_EquipCheck = 0x064e,
123 viotape_InsufficientRs = 0x064f,
124 viotape_InvalidLogBlk = 0x0650,
125 viotape_LengthError = 0x0651,
126 viotape_LibDoorOpen = 0x0652,
127 viotape_LoadFailure = 0x0653,
128 viotape_NotCapable = 0x0654,
129 viotape_NotOperational = 0x0655,
130 viotape_NotReady = 0x0656,
131 viotape_OpCancelled = 0x0657,
132 viotape_PhyLinkErr = 0x0658,
133 viotape_RdyNotBOT = 0x0659,
134 viotape_TapeMark = 0x065a,
135 viotape_WriteProt = 0x065b
136};
137
138static const struct vio_error_entry viotape_err_table[] = {
139 { viotape_InvalidRange, EIO, "Internal error" },
140 { viotape_InvalidToken, EIO, "Internal error" },
141 { viotape_DMAError, EIO, "DMA error" },
142 { viotape_UseError, EIO, "Internal error" },
143 { viotape_ReleaseError, EIO, "Internal error" },
144 { viotape_InvalidTape, EIO, "Invalid tape device" },
145 { viotape_InvalidOp, EIO, "Invalid operation" },
146 { viotape_TapeErr, EIO, "Tape error" },
147 { viotape_AllocTimedOut, EBUSY, "Allocate timed out" },
148 { viotape_BOTEnc, EIO, "Beginning of tape encountered" },
149 { viotape_BlankTape, EIO, "Blank tape" },
150 { viotape_BufferEmpty, EIO, "Buffer empty" },
151 { viotape_CleanCartFound, ENOMEDIUM, "Cleaning cartridge found" },
152 { viotape_CmdNotAllowed, EIO, "Command not allowed" },
153 { viotape_CmdNotSupported, EIO, "Command not supported" },
154 { viotape_DataCheck, EIO, "Data check" },
155 { viotape_DecompressErr, EIO, "Decompression error" },
156 { viotape_DeviceTimeout, EBUSY, "Device timeout" },
157 { viotape_DeviceUnavail, EIO, "Device unavailable" },
158 { viotape_DeviceBusy, EBUSY, "Device busy" },
159 { viotape_EndOfMedia, ENOSPC, "End of media" },
160 { viotape_EndOfTape, ENOSPC, "End of tape" },
161 { viotape_EquipCheck, EIO, "Equipment check" },
162 { viotape_InsufficientRs, EOVERFLOW, "Insufficient tape resources" },
163 { viotape_InvalidLogBlk, EIO, "Invalid logical block location" },
164 { viotape_LengthError, EOVERFLOW, "Length error" },
165 { viotape_LibDoorOpen, EBUSY, "Door open" },
166 { viotape_LoadFailure, ENOMEDIUM, "Load failure" },
167 { viotape_NotCapable, EIO, "Not capable" },
168 { viotape_NotOperational, EIO, "Not operational" },
169 { viotape_NotReady, EIO, "Not ready" },
170 { viotape_OpCancelled, EIO, "Operation cancelled" },
171 { viotape_PhyLinkErr, EIO, "Physical link error" },
172 { viotape_RdyNotBOT, EIO, "Ready but not beginning of tape" },
173 { viotape_TapeMark, EIO, "Tape mark" },
174 { viotape_WriteProt, EROFS, "Write protection error" },
175 { 0, 0, NULL },
176};
177
178/* Maximum number of tapes we support */
179#define VIOTAPE_MAX_TAPE HVMAXARCHITECTEDVIRTUALTAPES
180#define MAX_PARTITIONS 4
181
182/* defines for current tape state */
183#define VIOT_IDLE 0
184#define VIOT_READING 1
185#define VIOT_WRITING 2
186
187/* Our info on the tapes */
188static struct {
189 const char *rsrcname;
190 const char *type;
191 const char *model;
192} viotape_unitinfo[VIOTAPE_MAX_TAPE];
193
194static struct mtget viomtget[VIOTAPE_MAX_TAPE];
195
196static struct class *tape_class;
197
198static struct device *tape_device[VIOTAPE_MAX_TAPE];
199
200/*
201 * maintain the current state of each tape (and partition)
202 * so that we know when to write EOF marks.
203 */
204static struct {
205 unsigned char cur_part;
206 unsigned char part_stat_rwi[MAX_PARTITIONS];
207} state[VIOTAPE_MAX_TAPE];
208
209/* We single-thread */
210static struct semaphore reqSem;
211
212/*
213 * When we send a request, we use this struct to get the response back
214 * from the interrupt handler
215 */
216struct op_struct {
217 void *buffer;
218 dma_addr_t dmaaddr;
219 size_t count;
220 int rc;
221 int non_blocking;
222 struct completion com;
223 struct device *dev;
224 struct op_struct *next;
225};
226
227static spinlock_t op_struct_list_lock;
228static struct op_struct *op_struct_list;
229
230/* forward declaration to resolve interdependence */
231static int chg_state(int index, unsigned char new_state, struct file *file);
232
233/* procfs support */
234static int proc_viotape_show(struct seq_file *m, void *v)
235{
236 int i;
237
238 seq_printf(m, "viotape driver version " VIOTAPE_VERSION "\n");
239 for (i = 0; i < viotape_numdev; i++) {
240 seq_printf(m, "viotape device %d is iSeries resource %10.10s"
241 "type %4.4s, model %3.3s\n",
242 i, viotape_unitinfo[i].rsrcname,
243 viotape_unitinfo[i].type,
244 viotape_unitinfo[i].model);
245 }
246 return 0;
247}
248
249static int proc_viotape_open(struct inode *inode, struct file *file)
250{
251 return single_open(file, proc_viotape_show, NULL);
252}
253
254static const struct file_operations proc_viotape_operations = {
255 .owner = THIS_MODULE,
256 .open = proc_viotape_open,
257 .read = seq_read,
258 .llseek = seq_lseek,
259 .release = single_release,
260};
261
262/* Decode the device minor number into its parts */
263void get_dev_info(struct inode *ino, struct viot_devinfo_struct *devi)
264{
265 devi->devno = iminor(ino) & 0x1F;
266 devi->mode = (iminor(ino) & 0x60) >> 5;
267 /* if bit is set in the minor, do _not_ rewind automatically */
268 devi->rewind = (iminor(ino) & 0x80) == 0;
269}
270
271/* This is called only from the exit and init paths, so no need for locking */
272static void clear_op_struct_pool(void)
273{
274 while (op_struct_list) {
275 struct op_struct *toFree = op_struct_list;
276 op_struct_list = op_struct_list->next;
277 kfree(toFree);
278 }
279}
280
281/* Likewise, this is only called from the init path */
282static int add_op_structs(int structs)
283{
284 int i;
285
286 for (i = 0; i < structs; ++i) {
287 struct op_struct *new_struct =
288 kmalloc(sizeof(*new_struct), GFP_KERNEL);
289 if (!new_struct) {
290 clear_op_struct_pool();
291 return -ENOMEM;
292 }
293 new_struct->next = op_struct_list;
294 op_struct_list = new_struct;
295 }
296 return 0;
297}
298
299/* Allocate an op structure from our pool */
300static struct op_struct *get_op_struct(void)
301{
302 struct op_struct *retval;
303 unsigned long flags;
304
305 spin_lock_irqsave(&op_struct_list_lock, flags);
306 retval = op_struct_list;
307 if (retval)
308 op_struct_list = retval->next;
309 spin_unlock_irqrestore(&op_struct_list_lock, flags);
310 if (retval) {
311 memset(retval, 0, sizeof(*retval));
312 init_completion(&retval->com);
313 }
314
315 return retval;
316}
317
318/* Return an op structure to our pool */
319static void free_op_struct(struct op_struct *op_struct)
320{
321 unsigned long flags;
322
323 spin_lock_irqsave(&op_struct_list_lock, flags);
324 op_struct->next = op_struct_list;
325 op_struct_list = op_struct;
326 spin_unlock_irqrestore(&op_struct_list_lock, flags);
327}
328
329/* Map our tape return codes to errno values */
330int tape_rc_to_errno(int tape_rc, char *operation, int tapeno)
331{
332 const struct vio_error_entry *err;
333
334 if (tape_rc == 0)
335 return 0;
336
337 err = vio_lookup_rc(viotape_err_table, tape_rc);
338 printk(VIOTAPE_KERN_WARN "error(%s) 0x%04x on Device %d (%-10s): %s\n",
339 operation, tape_rc, tapeno,
340 viotape_unitinfo[tapeno].rsrcname, err->msg);
341 return -err->errno;
342}
343
344/* Write */
345static ssize_t viotap_write(struct file *file, const char *buf,
346 size_t count, loff_t * ppos)
347{
348 HvLpEvent_Rc hvrc;
349 unsigned short flags = file->f_flags;
350 int noblock = ((flags & O_NONBLOCK) != 0);
351 ssize_t ret;
352 struct viot_devinfo_struct devi;
353 struct op_struct *op = get_op_struct();
354
355 if (op == NULL)
356 return -ENOMEM;
357
358 get_dev_info(file->f_path.dentry->d_inode, &devi);
359
360 /*
361 * We need to make sure we can send a request. We use
362 * a semaphore to keep track of # requests in use. If
363 * we are non-blocking, make sure we don't block on the
364 * semaphore
365 */
366 if (noblock) {
367 if (down_trylock(&reqSem)) {
368 ret = -EWOULDBLOCK;
369 goto free_op;
370 }
371 } else
372 down(&reqSem);
373
374 /* Allocate a DMA buffer */
375 op->dev = tape_device[devi.devno];
376 op->buffer = dma_alloc_coherent(op->dev, count, &op->dmaaddr,
377 GFP_ATOMIC);
378
379 if (op->buffer == NULL) {
380 printk(VIOTAPE_KERN_WARN
381 "error allocating dma buffer for len %ld\n",
382 count);
383 ret = -EFAULT;
384 goto up_sem;
385 }
386
387 /* Copy the data into the buffer */
388 if (copy_from_user(op->buffer, buf, count)) {
389 printk(VIOTAPE_KERN_WARN "tape: error on copy from user\n");
390 ret = -EFAULT;
391 goto free_dma;
392 }
393
394 op->non_blocking = noblock;
395 init_completion(&op->com);
396 op->count = count;
397
398 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
399 HvLpEvent_Type_VirtualIo,
400 viomajorsubtype_tape | viotapewrite,
401 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
402 viopath_sourceinst(viopath_hostLp),
403 viopath_targetinst(viopath_hostLp),
404 (u64)(unsigned long)op, VIOVERSION << 16,
405 ((u64)devi.devno << 48) | op->dmaaddr, count, 0, 0);
406 if (hvrc != HvLpEvent_Rc_Good) {
407 printk(VIOTAPE_KERN_WARN "hv error on op %d\n",
408 (int)hvrc);
409 ret = -EIO;
410 goto free_dma;
411 }
412
413 if (noblock)
414 return count;
415
416 wait_for_completion(&op->com);
417
418 if (op->rc)
419 ret = tape_rc_to_errno(op->rc, "write", devi.devno);
420 else {
421 chg_state(devi.devno, VIOT_WRITING, file);
422 ret = op->count;
423 }
424
425free_dma:
426 dma_free_coherent(op->dev, count, op->buffer, op->dmaaddr);
427up_sem:
428 up(&reqSem);
429free_op:
430 free_op_struct(op);
431 return ret;
432}
433
434/* read */
435static ssize_t viotap_read(struct file *file, char *buf, size_t count,
436 loff_t *ptr)
437{
438 HvLpEvent_Rc hvrc;
439 unsigned short flags = file->f_flags;
440 struct op_struct *op = get_op_struct();
441 int noblock = ((flags & O_NONBLOCK) != 0);
442 ssize_t ret;
443 struct viot_devinfo_struct devi;
444
445 if (op == NULL)
446 return -ENOMEM;
447
448 get_dev_info(file->f_path.dentry->d_inode, &devi);
449
450 /*
451 * We need to make sure we can send a request. We use
452 * a semaphore to keep track of # requests in use. If
453 * we are non-blocking, make sure we don't block on the
454 * semaphore
455 */
456 if (noblock) {
457 if (down_trylock(&reqSem)) {
458 ret = -EWOULDBLOCK;
459 goto free_op;
460 }
461 } else
462 down(&reqSem);
463
464 chg_state(devi.devno, VIOT_READING, file);
465
466 /* Allocate a DMA buffer */
467 op->dev = tape_device[devi.devno];
468 op->buffer = dma_alloc_coherent(op->dev, count, &op->dmaaddr,
469 GFP_ATOMIC);
470 if (op->buffer == NULL) {
471 ret = -EFAULT;
472 goto up_sem;
473 }
474
475 op->count = count;
476 init_completion(&op->com);
477
478 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
479 HvLpEvent_Type_VirtualIo,
480 viomajorsubtype_tape | viotaperead,
481 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
482 viopath_sourceinst(viopath_hostLp),
483 viopath_targetinst(viopath_hostLp),
484 (u64)(unsigned long)op, VIOVERSION << 16,
485 ((u64)devi.devno << 48) | op->dmaaddr, count, 0, 0);
486 if (hvrc != HvLpEvent_Rc_Good) {
487 printk(VIOTAPE_KERN_WARN "tape hv error on op %d\n",
488 (int)hvrc);
489 ret = -EIO;
490 goto free_dma;
491 }
492
493 wait_for_completion(&op->com);
494
495 if (op->rc)
496 ret = tape_rc_to_errno(op->rc, "read", devi.devno);
497 else {
498 ret = op->count;
499 if (ret && copy_to_user(buf, op->buffer, ret)) {
500 printk(VIOTAPE_KERN_WARN "error on copy_to_user\n");
501 ret = -EFAULT;
502 }
503 }
504
505free_dma:
506 dma_free_coherent(op->dev, count, op->buffer, op->dmaaddr);
507up_sem:
508 up(&reqSem);
509free_op:
510 free_op_struct(op);
511 return ret;
512}
513
514/* ioctl */
515static int viotap_ioctl(struct inode *inode, struct file *file,
516 unsigned int cmd, unsigned long arg)
517{
518 HvLpEvent_Rc hvrc;
519 int ret;
520 struct viot_devinfo_struct devi;
521 struct mtop mtc;
522 u32 myOp;
523 struct op_struct *op = get_op_struct();
524
525 if (op == NULL)
526 return -ENOMEM;
527
528 get_dev_info(file->f_path.dentry->d_inode, &devi);
529
530 down(&reqSem);
531
532 ret = -EINVAL;
533
534 switch (cmd) {
535 case MTIOCTOP:
536 ret = -EFAULT;
537 /*
538 * inode is null if and only if we (the kernel)
539 * made the request
540 */
541 if (inode == NULL)
542 memcpy(&mtc, (void *) arg, sizeof(struct mtop));
543 else if (copy_from_user((char *)&mtc, (char *)arg,
544 sizeof(struct mtop)))
545 goto free_op;
546
547 ret = -EIO;
548 switch (mtc.mt_op) {
549 case MTRESET:
550 myOp = VIOTAPOP_RESET;
551 break;
552 case MTFSF:
553 myOp = VIOTAPOP_FSF;
554 break;
555 case MTBSF:
556 myOp = VIOTAPOP_BSF;
557 break;
558 case MTFSR:
559 myOp = VIOTAPOP_FSR;
560 break;
561 case MTBSR:
562 myOp = VIOTAPOP_BSR;
563 break;
564 case MTWEOF:
565 myOp = VIOTAPOP_WEOF;
566 break;
567 case MTREW:
568 myOp = VIOTAPOP_REW;
569 break;
570 case MTNOP:
571 myOp = VIOTAPOP_NOP;
572 break;
573 case MTEOM:
574 myOp = VIOTAPOP_EOM;
575 break;
576 case MTERASE:
577 myOp = VIOTAPOP_ERASE;
578 break;
579 case MTSETBLK:
580 myOp = VIOTAPOP_SETBLK;
581 break;
582 case MTSETDENSITY:
583 myOp = VIOTAPOP_SETDENSITY;
584 break;
585 case MTTELL:
586 myOp = VIOTAPOP_GETPOS;
587 break;
588 case MTSEEK:
589 myOp = VIOTAPOP_SETPOS;
590 break;
591 case MTSETPART:
592 myOp = VIOTAPOP_SETPART;
593 break;
594 case MTOFFL:
595 myOp = VIOTAPOP_UNLOAD;
596 break;
597 default:
598 printk(VIOTAPE_KERN_WARN "MTIOCTOP called "
599 "with invalid op 0x%x\n", mtc.mt_op);
600 goto free_op;
601 }
602
603 /*
604 * if we moved the head, we are no longer
605 * reading or writing
606 */
607 switch (mtc.mt_op) {
608 case MTFSF:
609 case MTBSF:
610 case MTFSR:
611 case MTBSR:
612 case MTTELL:
613 case MTSEEK:
614 case MTREW:
615 chg_state(devi.devno, VIOT_IDLE, file);
616 }
617
618 init_completion(&op->com);
619 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
620 HvLpEvent_Type_VirtualIo,
621 viomajorsubtype_tape | viotapeop,
622 HvLpEvent_AckInd_DoAck,
623 HvLpEvent_AckType_ImmediateAck,
624 viopath_sourceinst(viopath_hostLp),
625 viopath_targetinst(viopath_hostLp),
626 (u64)(unsigned long)op,
627 VIOVERSION << 16,
628 ((u64)devi.devno << 48), 0,
629 (((u64)myOp) << 32) | mtc.mt_count, 0);
630 if (hvrc != HvLpEvent_Rc_Good) {
631 printk(VIOTAPE_KERN_WARN "hv error on op %d\n",
632 (int)hvrc);
633 goto free_op;
634 }
635 wait_for_completion(&op->com);
636 ret = tape_rc_to_errno(op->rc, "tape operation", devi.devno);
637 goto free_op;
638
639 case MTIOCGET:
640 ret = -EIO;
641 init_completion(&op->com);
642 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
643 HvLpEvent_Type_VirtualIo,
644 viomajorsubtype_tape | viotapegetstatus,
645 HvLpEvent_AckInd_DoAck,
646 HvLpEvent_AckType_ImmediateAck,
647 viopath_sourceinst(viopath_hostLp),
648 viopath_targetinst(viopath_hostLp),
649 (u64)(unsigned long)op, VIOVERSION << 16,
650 ((u64)devi.devno << 48), 0, 0, 0);
651 if (hvrc != HvLpEvent_Rc_Good) {
652 printk(VIOTAPE_KERN_WARN "hv error on op %d\n",
653 (int)hvrc);
654 goto free_op;
655 }
656 wait_for_completion(&op->com);
657
658 /* Operation is complete - grab the error code */
659 ret = tape_rc_to_errno(op->rc, "get status", devi.devno);
660 free_op_struct(op);
661 up(&reqSem);
662
663 if ((ret == 0) && copy_to_user((void *)arg,
664 &viomtget[devi.devno],
665 sizeof(viomtget[0])))
666 ret = -EFAULT;
667 return ret;
668 case MTIOCPOS:
669 printk(VIOTAPE_KERN_WARN "Got an (unsupported) MTIOCPOS\n");
670 break;
671 default:
672 printk(VIOTAPE_KERN_WARN "got an unsupported ioctl 0x%0x\n",
673 cmd);
674 break;
675 }
676
677free_op:
678 free_op_struct(op);
679 up(&reqSem);
680 return ret;
681}
682
683static long viotap_unlocked_ioctl(struct file *file,
684 unsigned int cmd, unsigned long arg)
685{
686 long rc;
687
688 mutex_lock(&proc_viotape_mutex);
689 rc = viotap_ioctl(file->f_path.dentry->d_inode, file, cmd, arg);
690 mutex_unlock(&proc_viotape_mutex);
691 return rc;
692}
693
694static int viotap_open(struct inode *inode, struct file *file)
695{
696 HvLpEvent_Rc hvrc;
697 struct viot_devinfo_struct devi;
698 int ret;
699 struct op_struct *op = get_op_struct();
700
701 if (op == NULL)
702 return -ENOMEM;
703
704 mutex_lock(&proc_viotape_mutex);
705 get_dev_info(file->f_path.dentry->d_inode, &devi);
706
707 /* Note: We currently only support one mode! */
708 if ((devi.devno >= viotape_numdev) || (devi.mode)) {
709 ret = -ENODEV;
710 goto free_op;
711 }
712
713 init_completion(&op->com);
714
715 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
716 HvLpEvent_Type_VirtualIo,
717 viomajorsubtype_tape | viotapeopen,
718 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
719 viopath_sourceinst(viopath_hostLp),
720 viopath_targetinst(viopath_hostLp),
721 (u64)(unsigned long)op, VIOVERSION << 16,
722 ((u64)devi.devno << 48), 0, 0, 0);
723 if (hvrc != 0) {
724 printk(VIOTAPE_KERN_WARN "bad rc on signalLpEvent %d\n",
725 (int) hvrc);
726 ret = -EIO;
727 goto free_op;
728 }
729
730 wait_for_completion(&op->com);
731 ret = tape_rc_to_errno(op->rc, "open", devi.devno);
732
733free_op:
734 free_op_struct(op);
735 mutex_unlock(&proc_viotape_mutex);
736 return ret;
737}
738
739
740static int viotap_release(struct inode *inode, struct file *file)
741{
742 HvLpEvent_Rc hvrc;
743 struct viot_devinfo_struct devi;
744 int ret = 0;
745 struct op_struct *op = get_op_struct();
746
747 if (op == NULL)
748 return -ENOMEM;
749 init_completion(&op->com);
750
751 get_dev_info(file->f_path.dentry->d_inode, &devi);
752
753 if (devi.devno >= viotape_numdev) {
754 ret = -ENODEV;
755 goto free_op;
756 }
757
758 chg_state(devi.devno, VIOT_IDLE, file);
759
760 if (devi.rewind) {
761 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
762 HvLpEvent_Type_VirtualIo,
763 viomajorsubtype_tape | viotapeop,
764 HvLpEvent_AckInd_DoAck,
765 HvLpEvent_AckType_ImmediateAck,
766 viopath_sourceinst(viopath_hostLp),
767 viopath_targetinst(viopath_hostLp),
768 (u64)(unsigned long)op, VIOVERSION << 16,
769 ((u64)devi.devno << 48), 0,
770 ((u64)VIOTAPOP_REW) << 32, 0);
771 wait_for_completion(&op->com);
772
773 tape_rc_to_errno(op->rc, "rewind", devi.devno);
774 }
775
776 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
777 HvLpEvent_Type_VirtualIo,
778 viomajorsubtype_tape | viotapeclose,
779 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
780 viopath_sourceinst(viopath_hostLp),
781 viopath_targetinst(viopath_hostLp),
782 (u64)(unsigned long)op, VIOVERSION << 16,
783 ((u64)devi.devno << 48), 0, 0, 0);
784 if (hvrc != 0) {
785 printk(VIOTAPE_KERN_WARN "bad rc on signalLpEvent %d\n",
786 (int) hvrc);
787 ret = -EIO;
788 goto free_op;
789 }
790
791 wait_for_completion(&op->com);
792
793 if (op->rc)
794 printk(VIOTAPE_KERN_WARN "close failed\n");
795
796free_op:
797 free_op_struct(op);
798 return ret;
799}
800
801const struct file_operations viotap_fops = {
802 .owner = THIS_MODULE,
803 .read = viotap_read,
804 .write = viotap_write,
805 .unlocked_ioctl = viotap_unlocked_ioctl,
806 .open = viotap_open,
807 .release = viotap_release,
808 .llseek = noop_llseek,
809};
810
811/* Handle interrupt events for tape */
812static void vioHandleTapeEvent(struct HvLpEvent *event)
813{
814 int tapeminor;
815 struct op_struct *op;
816 struct viotapelpevent *tevent = (struct viotapelpevent *)event;
817
818 if (event == NULL) {
819 /* Notification that a partition went away! */
820 if (!viopath_isactive(viopath_hostLp)) {
821 /* TODO! Clean up */
822 }
823 return;
824 }
825
826 tapeminor = event->xSubtype & VIOMINOR_SUBTYPE_MASK;
827 op = (struct op_struct *)event->xCorrelationToken;
828 switch (tapeminor) {
829 case viotapeopen:
830 case viotapeclose:
831 op->rc = tevent->sub_type_result;
832 complete(&op->com);
833 break;
834 case viotaperead:
835 op->rc = tevent->sub_type_result;
836 op->count = tevent->len;
837 complete(&op->com);
838 break;
839 case viotapewrite:
840 if (op->non_blocking) {
841 dma_free_coherent(op->dev, op->count,
842 op->buffer, op->dmaaddr);
843 free_op_struct(op);
844 up(&reqSem);
845 } else {
846 op->rc = tevent->sub_type_result;
847 op->count = tevent->len;
848 complete(&op->com);
849 }
850 break;
851 case viotapeop:
852 case viotapegetpos:
853 case viotapesetpos:
854 case viotapegetstatus:
855 if (op) {
856 op->count = tevent->u.op.count;
857 op->rc = tevent->sub_type_result;
858 if (!op->non_blocking)
859 complete(&op->com);
860 }
861 break;
862 default:
863 printk(VIOTAPE_KERN_WARN "weird ack\n");
864 }
865}
866
867static int viotape_probe(struct vio_dev *vdev, const struct vio_device_id *id)
868{
869 int i = vdev->unit_address;
870 int j;
871 struct device_node *node = vdev->dev.of_node;
872
873 if (i >= VIOTAPE_MAX_TAPE)
874 return -ENODEV;
875 if (!node)
876 return -ENODEV;
877
878 if (i >= viotape_numdev)
879 viotape_numdev = i + 1;
880
881 tape_device[i] = &vdev->dev;
882 viotape_unitinfo[i].rsrcname = of_get_property(node,
883 "linux,vio_rsrcname", NULL);
884 viotape_unitinfo[i].type = of_get_property(node, "linux,vio_type",
885 NULL);
886 viotape_unitinfo[i].model = of_get_property(node, "linux,vio_model",
887 NULL);
888
889 state[i].cur_part = 0;
890 for (j = 0; j < MAX_PARTITIONS; ++j)
891 state[i].part_stat_rwi[j] = VIOT_IDLE;
892 device_create(tape_class, NULL, MKDEV(VIOTAPE_MAJOR, i), NULL,
893 "iseries!vt%d", i);
894 device_create(tape_class, NULL, MKDEV(VIOTAPE_MAJOR, i | 0x80), NULL,
895 "iseries!nvt%d", i);
896 printk(VIOTAPE_KERN_INFO "tape iseries/vt%d is iSeries "
897 "resource %10.10s type %4.4s, model %3.3s\n",
898 i, viotape_unitinfo[i].rsrcname,
899 viotape_unitinfo[i].type, viotape_unitinfo[i].model);
900 return 0;
901}
902
903static int viotape_remove(struct vio_dev *vdev)
904{
905 int i = vdev->unit_address;
906
907 device_destroy(tape_class, MKDEV(VIOTAPE_MAJOR, i | 0x80));
908 device_destroy(tape_class, MKDEV(VIOTAPE_MAJOR, i));
909 return 0;
910}
911
912/**
913 * viotape_device_table: Used by vio.c to match devices that we
914 * support.
915 */
916static struct vio_device_id viotape_device_table[] __devinitdata = {
917 { "byte", "IBM,iSeries-viotape" },
918 { "", "" }
919};
920MODULE_DEVICE_TABLE(vio, viotape_device_table);
921
922static struct vio_driver viotape_driver = {
923 .id_table = viotape_device_table,
924 .probe = viotape_probe,
925 .remove = viotape_remove,
926 .driver = {
927 .name = "viotape",
928 .owner = THIS_MODULE,
929 }
930};
931
932
933int __init viotap_init(void)
934{
935 int ret;
936
937 if (!firmware_has_feature(FW_FEATURE_ISERIES))
938 return -ENODEV;
939
940 op_struct_list = NULL;
941 if ((ret = add_op_structs(VIOTAPE_MAXREQ)) < 0) {
942 printk(VIOTAPE_KERN_WARN "couldn't allocate op structs\n");
943 return ret;
944 }
945 spin_lock_init(&op_struct_list_lock);
946
947 sema_init(&reqSem, VIOTAPE_MAXREQ);
948
949 if (viopath_hostLp == HvLpIndexInvalid) {
950 vio_set_hostlp();
951 if (viopath_hostLp == HvLpIndexInvalid) {
952 ret = -ENODEV;
953 goto clear_op;
954 }
955 }
956
957 ret = viopath_open(viopath_hostLp, viomajorsubtype_tape,
958 VIOTAPE_MAXREQ + 2);
959 if (ret) {
960 printk(VIOTAPE_KERN_WARN
961 "error on viopath_open to hostlp %d\n", ret);
962 ret = -EIO;
963 goto clear_op;
964 }
965
966 printk(VIOTAPE_KERN_INFO "vers " VIOTAPE_VERSION
967 ", hosting partition %d\n", viopath_hostLp);
968
969 vio_setHandler(viomajorsubtype_tape, vioHandleTapeEvent);
970
971 ret = register_chrdev(VIOTAPE_MAJOR, "viotape", &viotap_fops);
972 if (ret < 0) {
973 printk(VIOTAPE_KERN_WARN "Error registering viotape device\n");
974 goto clear_handler;
975 }
976
977 tape_class = class_create(THIS_MODULE, "tape");
978 if (IS_ERR(tape_class)) {
979 printk(VIOTAPE_KERN_WARN "Unable to allocate class\n");
980 ret = PTR_ERR(tape_class);
981 goto unreg_chrdev;
982 }
983
984 ret = vio_register_driver(&viotape_driver);
985 if (ret)
986 goto unreg_class;
987
988 proc_create("iSeries/viotape", S_IFREG|S_IRUGO, NULL,
989 &proc_viotape_operations);
990
991 return 0;
992
993unreg_class:
994 class_destroy(tape_class);
995unreg_chrdev:
996 unregister_chrdev(VIOTAPE_MAJOR, "viotape");
997clear_handler:
998 vio_clearHandler(viomajorsubtype_tape);
999 viopath_close(viopath_hostLp, viomajorsubtype_tape, VIOTAPE_MAXREQ + 2);
1000clear_op:
1001 clear_op_struct_pool();
1002 return ret;
1003}
1004
1005/* Give a new state to the tape object */
1006static int chg_state(int index, unsigned char new_state, struct file *file)
1007{
1008 unsigned char *cur_state =
1009 &state[index].part_stat_rwi[state[index].cur_part];
1010 int rc = 0;
1011
1012 /* if the same state, don't bother */
1013 if (*cur_state == new_state)
1014 return 0;
1015
1016 /* write an EOF if changing from writing to some other state */
1017 if (*cur_state == VIOT_WRITING) {
1018 struct mtop write_eof = { MTWEOF, 1 };
1019
1020 rc = viotap_ioctl(NULL, file, MTIOCTOP,
1021 (unsigned long)&write_eof);
1022 }
1023 *cur_state = new_state;
1024 return rc;
1025}
1026
1027/* Cleanup */
1028static void __exit viotap_exit(void)
1029{
1030 remove_proc_entry("iSeries/viotape", NULL);
1031 vio_unregister_driver(&viotape_driver);
1032 class_destroy(tape_class);
1033 unregister_chrdev(VIOTAPE_MAJOR, "viotape");
1034 viopath_close(viopath_hostLp, viomajorsubtype_tape, VIOTAPE_MAXREQ + 2);
1035 vio_clearHandler(viomajorsubtype_tape);
1036 clear_op_struct_pool();
1037}
1038
1039MODULE_LICENSE("GPL");
1040module_init(viotap_init);
1041module_exit(viotap_exit);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index d0c41188d4e..0409cf35add 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -190,6 +190,17 @@ config GPIO_VX855
190 additional drivers must be enabled in order to use the 190 additional drivers must be enabled in order to use the
191 functionality of the device. 191 functionality of the device.
192 192
193config GPIO_GE_FPGA
194 bool "GE FPGA based GPIO"
195 depends on GE_FPGA
196 help
197 Support for common GPIO functionality provided on some GE Single Board
198 Computers.
199
200 This driver provides basic support (configure as input or output, read
201 and write pin state) for GPIO implemented in a number of GE single
202 board computers.
203
193comment "I2C GPIO expanders:" 204comment "I2C GPIO expanders:"
194 205
195config GPIO_MAX7300 206config GPIO_MAX7300
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index fa10df604c0..9a8fb54ae46 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_GPIO_CS5535) += gpio-cs5535.o
16obj-$(CONFIG_GPIO_DA9052) += gpio-da9052.o 16obj-$(CONFIG_GPIO_DA9052) += gpio-da9052.o
17obj-$(CONFIG_ARCH_DAVINCI) += gpio-davinci.o 17obj-$(CONFIG_ARCH_DAVINCI) += gpio-davinci.o
18obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o 18obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o
19obj-$(CONFIG_GPIO_GE_FPGA) += gpio-ge.o
19obj-$(CONFIG_GPIO_IT8761E) += gpio-it8761e.o 20obj-$(CONFIG_GPIO_IT8761E) += gpio-it8761e.o
20obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o 21obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
21obj-$(CONFIG_ARCH_KS8695) += gpio-ks8695.o 22obj-$(CONFIG_ARCH_KS8695) += gpio-ks8695.o
diff --git a/drivers/gpio/gpio-ge.c b/drivers/gpio/gpio-ge.c
new file mode 100644
index 00000000000..7b95a4a8318
--- /dev/null
+++ b/drivers/gpio/gpio-ge.c
@@ -0,0 +1,199 @@
1/*
2 * Driver for GE FPGA based GPIO
3 *
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 *
6 * 2008 (c) GE Intelligent Platforms Embedded Systems, Inc.
7 *
8 * This file is licensed under the terms of the GNU General Public License
9 * version 2. This program is licensed "as is" without any warranty of any
10 * kind, whether express or implied.
11 */
12
13/* TODO
14 *
15 * Configuration of output modes (totem-pole/open-drain)
16 * Interrupt configuration - interrupts are always generated the FPGA relies on
17 * the I/O interrupt controllers mask to stop them propergating
18 */
19
20#include <linux/kernel.h>
21#include <linux/compiler.h>
22#include <linux/init.h>
23#include <linux/io.h>
24#include <linux/of.h>
25#include <linux/of_device.h>
26#include <linux/of_platform.h>
27#include <linux/of_gpio.h>
28#include <linux/gpio.h>
29#include <linux/slab.h>
30#include <linux/module.h>
31
32#define GEF_GPIO_DIRECT 0x00
33#define GEF_GPIO_IN 0x04
34#define GEF_GPIO_OUT 0x08
35#define GEF_GPIO_TRIG 0x0C
36#define GEF_GPIO_POLAR_A 0x10
37#define GEF_GPIO_POLAR_B 0x14
38#define GEF_GPIO_INT_STAT 0x18
39#define GEF_GPIO_OVERRUN 0x1C
40#define GEF_GPIO_MODE 0x20
41
42static void _gef_gpio_set(void __iomem *reg, unsigned int offset, int value)
43{
44 unsigned int data;
45
46 data = ioread32be(reg);
47 /* value: 0=low; 1=high */
48 if (value & 0x1)
49 data = data | (0x1 << offset);
50 else
51 data = data & ~(0x1 << offset);
52
53 iowrite32be(data, reg);
54}
55
56
57static int gef_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
58{
59 unsigned int data;
60 struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
61
62 data = ioread32be(mmchip->regs + GEF_GPIO_DIRECT);
63 data = data | (0x1 << offset);
64 iowrite32be(data, mmchip->regs + GEF_GPIO_DIRECT);
65
66 return 0;
67}
68
69static int gef_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int value)
70{
71 unsigned int data;
72 struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
73
74 /* Set direction before switching to input */
75 _gef_gpio_set(mmchip->regs + GEF_GPIO_OUT, offset, value);
76
77 data = ioread32be(mmchip->regs + GEF_GPIO_DIRECT);
78 data = data & ~(0x1 << offset);
79 iowrite32be(data, mmchip->regs + GEF_GPIO_DIRECT);
80
81 return 0;
82}
83
84static int gef_gpio_get(struct gpio_chip *chip, unsigned offset)
85{
86 unsigned int data;
87 int state = 0;
88 struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
89
90 data = ioread32be(mmchip->regs + GEF_GPIO_IN);
91 state = (int)((data >> offset) & 0x1);
92
93 return state;
94}
95
96static void gef_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
97{
98 struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
99
100 _gef_gpio_set(mmchip->regs + GEF_GPIO_OUT, offset, value);
101}
102
103static int __init gef_gpio_init(void)
104{
105 struct device_node *np;
106 int retval;
107 struct of_mm_gpio_chip *gef_gpio_chip;
108
109 for_each_compatible_node(np, NULL, "gef,sbc610-gpio") {
110
111 pr_debug("%s: Initialising GEF GPIO\n", np->full_name);
112
113 /* Allocate chip structure */
114 gef_gpio_chip = kzalloc(sizeof(*gef_gpio_chip), GFP_KERNEL);
115 if (!gef_gpio_chip) {
116 pr_err("%s: Unable to allocate structure\n",
117 np->full_name);
118 continue;
119 }
120
121 /* Setup pointers to chip functions */
122 gef_gpio_chip->gc.of_gpio_n_cells = 2;
123 gef_gpio_chip->gc.ngpio = 19;
124 gef_gpio_chip->gc.direction_input = gef_gpio_dir_in;
125 gef_gpio_chip->gc.direction_output = gef_gpio_dir_out;
126 gef_gpio_chip->gc.get = gef_gpio_get;
127 gef_gpio_chip->gc.set = gef_gpio_set;
128
129 /* This function adds a memory mapped GPIO chip */
130 retval = of_mm_gpiochip_add(np, gef_gpio_chip);
131 if (retval) {
132 kfree(gef_gpio_chip);
133 pr_err("%s: Unable to add GPIO\n", np->full_name);
134 }
135 }
136
137 for_each_compatible_node(np, NULL, "gef,sbc310-gpio") {
138
139 pr_debug("%s: Initialising GEF GPIO\n", np->full_name);
140
141 /* Allocate chip structure */
142 gef_gpio_chip = kzalloc(sizeof(*gef_gpio_chip), GFP_KERNEL);
143 if (!gef_gpio_chip) {
144 pr_err("%s: Unable to allocate structure\n",
145 np->full_name);
146 continue;
147 }
148
149 /* Setup pointers to chip functions */
150 gef_gpio_chip->gc.of_gpio_n_cells = 2;
151 gef_gpio_chip->gc.ngpio = 6;
152 gef_gpio_chip->gc.direction_input = gef_gpio_dir_in;
153 gef_gpio_chip->gc.direction_output = gef_gpio_dir_out;
154 gef_gpio_chip->gc.get = gef_gpio_get;
155 gef_gpio_chip->gc.set = gef_gpio_set;
156
157 /* This function adds a memory mapped GPIO chip */
158 retval = of_mm_gpiochip_add(np, gef_gpio_chip);
159 if (retval) {
160 kfree(gef_gpio_chip);
161 pr_err("%s: Unable to add GPIO\n", np->full_name);
162 }
163 }
164
165 for_each_compatible_node(np, NULL, "ge,imp3a-gpio") {
166
167 pr_debug("%s: Initialising GE GPIO\n", np->full_name);
168
169 /* Allocate chip structure */
170 gef_gpio_chip = kzalloc(sizeof(*gef_gpio_chip), GFP_KERNEL);
171 if (!gef_gpio_chip) {
172 pr_err("%s: Unable to allocate structure\n",
173 np->full_name);
174 continue;
175 }
176
177 /* Setup pointers to chip functions */
178 gef_gpio_chip->gc.of_gpio_n_cells = 2;
179 gef_gpio_chip->gc.ngpio = 16;
180 gef_gpio_chip->gc.direction_input = gef_gpio_dir_in;
181 gef_gpio_chip->gc.direction_output = gef_gpio_dir_out;
182 gef_gpio_chip->gc.get = gef_gpio_get;
183 gef_gpio_chip->gc.set = gef_gpio_set;
184
185 /* This function adds a memory mapped GPIO chip */
186 retval = of_mm_gpiochip_add(np, gef_gpio_chip);
187 if (retval) {
188 kfree(gef_gpio_chip);
189 pr_err("%s: Unable to add GPIO\n", np->full_name);
190 }
191 }
192
193 return 0;
194};
195arch_initcall(gef_gpio_init);
196
197MODULE_DESCRIPTION("GE I/O FPGA GPIO driver");
198MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
199MODULE_LICENSE("GPL");
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c
index 366bc156e34..8c279da0741 100644
--- a/drivers/misc/carma/carma-fpga.c
+++ b/drivers/misc/carma/carma-fpga.c
@@ -560,6 +560,9 @@ static void data_enable_interrupts(struct fpga_device *priv)
560 560
561 /* flush the writes */ 561 /* flush the writes */
562 fpga_read_reg(priv, 0, MMAP_REG_STATUS); 562 fpga_read_reg(priv, 0, MMAP_REG_STATUS);
563 fpga_read_reg(priv, 1, MMAP_REG_STATUS);
564 fpga_read_reg(priv, 2, MMAP_REG_STATUS);
565 fpga_read_reg(priv, 3, MMAP_REG_STATUS);
563 566
564 /* switch back to the external interrupt source */ 567 /* switch back to the external interrupt source */
565 iowrite32be(0x3F, priv->regs + SYS_IRQ_SOURCE_CTL); 568 iowrite32be(0x3F, priv->regs + SYS_IRQ_SOURCE_CTL);
@@ -591,8 +594,12 @@ static void data_dma_cb(void *data)
591 list_move_tail(&priv->inflight->entry, &priv->used); 594 list_move_tail(&priv->inflight->entry, &priv->used);
592 priv->inflight = NULL; 595 priv->inflight = NULL;
593 596
594 /* clear the FPGA status and re-enable interrupts */ 597 /*
595 data_enable_interrupts(priv); 598 * If data dumping is still enabled, then clear the FPGA
599 * status registers and re-enable FPGA interrupts
600 */
601 if (priv->enabled)
602 data_enable_interrupts(priv);
596 603
597 spin_unlock_irqrestore(&priv->lock, flags); 604 spin_unlock_irqrestore(&priv->lock, flags);
598 605
@@ -708,6 +715,15 @@ static irqreturn_t data_irq(int irq, void *dev_id)
708 715
709 spin_lock(&priv->lock); 716 spin_lock(&priv->lock);
710 717
718 /*
719 * This is an error case that should never happen.
720 *
721 * If this driver has a bug and manages to re-enable interrupts while
722 * a DMA is in progress, then we will hit this statement and should
723 * start paying attention immediately.
724 */
725 BUG_ON(priv->inflight != NULL);
726
711 /* hide the interrupt by switching the IRQ driver to GPIO */ 727 /* hide the interrupt by switching the IRQ driver to GPIO */
712 data_disable_interrupts(priv); 728 data_disable_interrupts(priv);
713 729
@@ -762,11 +778,15 @@ out:
762 */ 778 */
763static int data_device_enable(struct fpga_device *priv) 779static int data_device_enable(struct fpga_device *priv)
764{ 780{
781 bool enabled;
765 u32 val; 782 u32 val;
766 int ret; 783 int ret;
767 784
768 /* multiple enables are safe: they do nothing */ 785 /* multiple enables are safe: they do nothing */
769 if (priv->enabled) 786 spin_lock_irq(&priv->lock);
787 enabled = priv->enabled;
788 spin_unlock_irq(&priv->lock);
789 if (enabled)
770 return 0; 790 return 0;
771 791
772 /* check that the FPGAs are programmed */ 792 /* check that the FPGAs are programmed */
@@ -797,6 +817,9 @@ static int data_device_enable(struct fpga_device *priv)
797 goto out_error; 817 goto out_error;
798 } 818 }
799 819
820 /* prevent the FPGAs from generating interrupts */
821 data_disable_interrupts(priv);
822
800 /* hookup the irq handler */ 823 /* hookup the irq handler */
801 ret = request_irq(priv->irq, data_irq, IRQF_SHARED, drv_name, priv); 824 ret = request_irq(priv->irq, data_irq, IRQF_SHARED, drv_name, priv);
802 if (ret) { 825 if (ret) {
@@ -804,11 +827,13 @@ static int data_device_enable(struct fpga_device *priv)
804 goto out_error; 827 goto out_error;
805 } 828 }
806 829
807 /* switch to the external FPGA IRQ line */ 830 /* allow the DMA callback to re-enable FPGA interrupts */
808 data_enable_interrupts(priv); 831 spin_lock_irq(&priv->lock);
809
810 /* success, we're enabled */
811 priv->enabled = true; 832 priv->enabled = true;
833 spin_unlock_irq(&priv->lock);
834
835 /* allow the FPGAs to generate interrupts */
836 data_enable_interrupts(priv);
812 return 0; 837 return 0;
813 838
814out_error: 839out_error:
@@ -834,41 +859,40 @@ out_error:
834 */ 859 */
835static int data_device_disable(struct fpga_device *priv) 860static int data_device_disable(struct fpga_device *priv)
836{ 861{
837 int ret; 862 spin_lock_irq(&priv->lock);
838 863
839 /* allow multiple disable */ 864 /* allow multiple disable */
840 if (!priv->enabled) 865 if (!priv->enabled) {
866 spin_unlock_irq(&priv->lock);
841 return 0; 867 return 0;
868 }
869
870 /*
871 * Mark the device disabled
872 *
873 * This stops DMA callbacks from re-enabling interrupts
874 */
875 priv->enabled = false;
842 876
843 /* switch to the internal GPIO IRQ line */ 877 /* prevent the FPGAs from generating interrupts */
844 data_disable_interrupts(priv); 878 data_disable_interrupts(priv);
845 879
880 /* wait until all ongoing DMA has finished */
881 while (priv->inflight != NULL) {
882 spin_unlock_irq(&priv->lock);
883 wait_event(priv->wait, priv->inflight == NULL);
884 spin_lock_irq(&priv->lock);
885 }
886
887 spin_unlock_irq(&priv->lock);
888
846 /* unhook the irq handler */ 889 /* unhook the irq handler */
847 free_irq(priv->irq, priv); 890 free_irq(priv->irq, priv);
848 891
849 /*
850 * wait for all outstanding DMA to complete
851 *
852 * Device interrupts are disabled, therefore another buffer cannot
853 * be marked inflight.
854 */
855 ret = wait_event_interruptible(priv->wait, priv->inflight == NULL);
856 if (ret)
857 return ret;
858
859 /* free the correlation table */ 892 /* free the correlation table */
860 sg_free_table(&priv->corl_table); 893 sg_free_table(&priv->corl_table);
861 priv->corl_nents = 0; 894 priv->corl_nents = 0;
862 895
863 /*
864 * We are taking the spinlock not to protect priv->enabled, but instead
865 * to make sure that there are no readers in the process of altering
866 * the free or used lists while we are setting this flag.
867 */
868 spin_lock_irq(&priv->lock);
869 priv->enabled = false;
870 spin_unlock_irq(&priv->lock);
871
872 /* free all buffers: the free and used lists are not being changed */ 896 /* free all buffers: the free and used lists are not being changed */
873 data_free_buffers(priv); 897 data_free_buffers(priv);
874 return 0; 898 return 0;
@@ -896,15 +920,6 @@ static unsigned int list_num_entries(struct list_head *list)
896static int data_debug_show(struct seq_file *f, void *offset) 920static int data_debug_show(struct seq_file *f, void *offset)
897{ 921{
898 struct fpga_device *priv = f->private; 922 struct fpga_device *priv = f->private;
899 int ret;
900
901 /*
902 * Lock the mutex first, so that we get an accurate value for enable
903 * Lock the spinlock next, to get accurate list counts
904 */
905 ret = mutex_lock_interruptible(&priv->mutex);
906 if (ret)
907 return ret;
908 923
909 spin_lock_irq(&priv->lock); 924 spin_lock_irq(&priv->lock);
910 925
@@ -917,7 +932,6 @@ static int data_debug_show(struct seq_file *f, void *offset)
917 seq_printf(f, "num_dropped: %d\n", priv->num_dropped); 932 seq_printf(f, "num_dropped: %d\n", priv->num_dropped);
918 933
919 spin_unlock_irq(&priv->lock); 934 spin_unlock_irq(&priv->lock);
920 mutex_unlock(&priv->mutex);
921 return 0; 935 return 0;
922} 936}
923 937
@@ -970,7 +984,13 @@ static ssize_t data_en_show(struct device *dev, struct device_attribute *attr,
970 char *buf) 984 char *buf)
971{ 985{
972 struct fpga_device *priv = dev_get_drvdata(dev); 986 struct fpga_device *priv = dev_get_drvdata(dev);
973 return snprintf(buf, PAGE_SIZE, "%u\n", priv->enabled); 987 int ret;
988
989 spin_lock_irq(&priv->lock);
990 ret = snprintf(buf, PAGE_SIZE, "%u\n", priv->enabled);
991 spin_unlock_irq(&priv->lock);
992
993 return ret;
974} 994}
975 995
976static ssize_t data_en_set(struct device *dev, struct device_attribute *attr, 996static ssize_t data_en_set(struct device *dev, struct device_attribute *attr,
@@ -986,6 +1006,7 @@ static ssize_t data_en_set(struct device *dev, struct device_attribute *attr,
986 return -EINVAL; 1006 return -EINVAL;
987 } 1007 }
988 1008
1009 /* protect against concurrent enable/disable */
989 ret = mutex_lock_interruptible(&priv->mutex); 1010 ret = mutex_lock_interruptible(&priv->mutex);
990 if (ret) 1011 if (ret)
991 return ret; 1012 return ret;
@@ -1079,6 +1100,7 @@ static ssize_t data_read(struct file *filp, char __user *ubuf, size_t count,
1079 struct fpga_reader *reader = filp->private_data; 1100 struct fpga_reader *reader = filp->private_data;
1080 struct fpga_device *priv = reader->priv; 1101 struct fpga_device *priv = reader->priv;
1081 struct list_head *used = &priv->used; 1102 struct list_head *used = &priv->used;
1103 bool drop_buffer = false;
1082 struct data_buf *dbuf; 1104 struct data_buf *dbuf;
1083 size_t avail; 1105 size_t avail;
1084 void *data; 1106 void *data;
@@ -1166,10 +1188,12 @@ have_buffer:
1166 * One of two things has happened, the device is disabled, or the 1188 * One of two things has happened, the device is disabled, or the
1167 * device has been reconfigured underneath us. In either case, we 1189 * device has been reconfigured underneath us. In either case, we
1168 * should just throw away the buffer. 1190 * should just throw away the buffer.
1191 *
1192 * Lockdep complains if this is done under the spinlock, so we
1193 * handle it during the unlock path.
1169 */ 1194 */
1170 if (!priv->enabled || dbuf->size != priv->bufsize) { 1195 if (!priv->enabled || dbuf->size != priv->bufsize) {
1171 videobuf_dma_unmap(priv->dev, &dbuf->vb); 1196 drop_buffer = true;
1172 data_free_buffer(dbuf);
1173 goto out_unlock; 1197 goto out_unlock;
1174 } 1198 }
1175 1199
@@ -1178,6 +1202,12 @@ have_buffer:
1178 1202
1179out_unlock: 1203out_unlock:
1180 spin_unlock_irq(&priv->lock); 1204 spin_unlock_irq(&priv->lock);
1205
1206 if (drop_buffer) {
1207 videobuf_dma_unmap(priv->dev, &dbuf->vb);
1208 data_free_buffer(dbuf);
1209 }
1210
1181 return count; 1211 return count;
1182} 1212}
1183 1213
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 31b034b7eba..3b1d6da874e 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -462,6 +462,16 @@ config MTD_NAND_FSL_ELBC
462 Enabling this option will enable you to use this to control 462 Enabling this option will enable you to use this to control
463 external NAND devices. 463 external NAND devices.
464 464
465config MTD_NAND_FSL_IFC
466 tristate "NAND support for Freescale IFC controller"
467 depends on MTD_NAND && FSL_SOC
468 select FSL_IFC
469 help
470 Various Freescale chips e.g P1010, include a NAND Flash machine
471 with built-in hardware ECC capabilities.
472 Enabling this option will enable you to use this to control
473 external NAND devices.
474
465config MTD_NAND_FSL_UPM 475config MTD_NAND_FSL_UPM
466 tristate "Support for NAND on Freescale UPM" 476 tristate "Support for NAND on Freescale UPM"
467 depends on PPC_83xx || PPC_85xx 477 depends on PPC_83xx || PPC_85xx
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 618f4ba2369..19bc8cb1d18 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_MTD_ALAUDA) += alauda.o
37obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o 37obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o
38obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o 38obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o
39obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o 39obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o
40obj-$(CONFIG_MTD_NAND_FSL_IFC) += fsl_ifc_nand.o
40obj-$(CONFIG_MTD_NAND_FSL_UPM) += fsl_upm.o 41obj-$(CONFIG_MTD_NAND_FSL_UPM) += fsl_upm.o
41obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o 42obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o
42obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o 43obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
new file mode 100644
index 00000000000..c30ac7b83d2
--- /dev/null
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -0,0 +1,1072 @@
1/*
2 * Freescale Integrated Flash Controller NAND driver
3 *
4 * Copyright 2011-2012 Freescale Semiconductor, Inc
5 *
6 * Author: Dipen Dudhat <Dipen.Dudhat@freescale.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/module.h>
24#include <linux/types.h>
25#include <linux/init.h>
26#include <linux/kernel.h>
27#include <linux/slab.h>
28#include <linux/mtd/mtd.h>
29#include <linux/mtd/nand.h>
30#include <linux/mtd/partitions.h>
31#include <linux/mtd/nand_ecc.h>
32#include <asm/fsl_ifc.h>
33
34#define ERR_BYTE 0xFF /* Value returned for read
35 bytes when read failed */
36#define IFC_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait
37 for IFC NAND Machine */
38
39struct fsl_ifc_ctrl;
40
41/* mtd information per set */
42struct fsl_ifc_mtd {
43 struct mtd_info mtd;
44 struct nand_chip chip;
45 struct fsl_ifc_ctrl *ctrl;
46
47 struct device *dev;
48 int bank; /* Chip select bank number */
49 unsigned int bufnum_mask; /* bufnum = page & bufnum_mask */
50 u8 __iomem *vbase; /* Chip select base virtual address */
51};
52
53/* overview of the fsl ifc controller */
54struct fsl_ifc_nand_ctrl {
55 struct nand_hw_control controller;
56 struct fsl_ifc_mtd *chips[FSL_IFC_BANK_COUNT];
57
58 u8 __iomem *addr; /* Address of assigned IFC buffer */
59 unsigned int page; /* Last page written to / read from */
60 unsigned int read_bytes;/* Number of bytes read during command */
61 unsigned int column; /* Saved column from SEQIN */
62 unsigned int index; /* Pointer to next byte to 'read' */
63 unsigned int oob; /* Non zero if operating on OOB data */
64 unsigned int eccread; /* Non zero for a full-page ECC read */
65 unsigned int counter; /* counter for the initializations */
66};
67
68static struct fsl_ifc_nand_ctrl *ifc_nand_ctrl;
69
70/* 512-byte page with 4-bit ECC, 8-bit */
71static struct nand_ecclayout oob_512_8bit_ecc4 = {
72 .eccbytes = 8,
73 .eccpos = {8, 9, 10, 11, 12, 13, 14, 15},
74 .oobfree = { {0, 5}, {6, 2} },
75};
76
77/* 512-byte page with 4-bit ECC, 16-bit */
78static struct nand_ecclayout oob_512_16bit_ecc4 = {
79 .eccbytes = 8,
80 .eccpos = {8, 9, 10, 11, 12, 13, 14, 15},
81 .oobfree = { {2, 6}, },
82};
83
84/* 2048-byte page size with 4-bit ECC */
85static struct nand_ecclayout oob_2048_ecc4 = {
86 .eccbytes = 32,
87 .eccpos = {
88 8, 9, 10, 11, 12, 13, 14, 15,
89 16, 17, 18, 19, 20, 21, 22, 23,
90 24, 25, 26, 27, 28, 29, 30, 31,
91 32, 33, 34, 35, 36, 37, 38, 39,
92 },
93 .oobfree = { {2, 6}, {40, 24} },
94};
95
96/* 4096-byte page size with 4-bit ECC */
97static struct nand_ecclayout oob_4096_ecc4 = {
98 .eccbytes = 64,
99 .eccpos = {
100 8, 9, 10, 11, 12, 13, 14, 15,
101 16, 17, 18, 19, 20, 21, 22, 23,
102 24, 25, 26, 27, 28, 29, 30, 31,
103 32, 33, 34, 35, 36, 37, 38, 39,
104 40, 41, 42, 43, 44, 45, 46, 47,
105 48, 49, 50, 51, 52, 53, 54, 55,
106 56, 57, 58, 59, 60, 61, 62, 63,
107 64, 65, 66, 67, 68, 69, 70, 71,
108 },
109 .oobfree = { {2, 6}, {72, 56} },
110};
111
112/* 4096-byte page size with 8-bit ECC -- requires 218-byte OOB */
113static struct nand_ecclayout oob_4096_ecc8 = {
114 .eccbytes = 128,
115 .eccpos = {
116 8, 9, 10, 11, 12, 13, 14, 15,
117 16, 17, 18, 19, 20, 21, 22, 23,
118 24, 25, 26, 27, 28, 29, 30, 31,
119 32, 33, 34, 35, 36, 37, 38, 39,
120 40, 41, 42, 43, 44, 45, 46, 47,
121 48, 49, 50, 51, 52, 53, 54, 55,
122 56, 57, 58, 59, 60, 61, 62, 63,
123 64, 65, 66, 67, 68, 69, 70, 71,
124 72, 73, 74, 75, 76, 77, 78, 79,
125 80, 81, 82, 83, 84, 85, 86, 87,
126 88, 89, 90, 91, 92, 93, 94, 95,
127 96, 97, 98, 99, 100, 101, 102, 103,
128 104, 105, 106, 107, 108, 109, 110, 111,
129 112, 113, 114, 115, 116, 117, 118, 119,
130 120, 121, 122, 123, 124, 125, 126, 127,
131 128, 129, 130, 131, 132, 133, 134, 135,
132 },
133 .oobfree = { {2, 6}, {136, 82} },
134};
135
136
137/*
138 * Generic flash bbt descriptors
139 */
140static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
141static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
142
143static struct nand_bbt_descr bbt_main_descr = {
144 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
145 NAND_BBT_2BIT | NAND_BBT_VERSION,
146 .offs = 2, /* 0 on 8-bit small page */
147 .len = 4,
148 .veroffs = 6,
149 .maxblocks = 4,
150 .pattern = bbt_pattern,
151};
152
153static struct nand_bbt_descr bbt_mirror_descr = {
154 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
155 NAND_BBT_2BIT | NAND_BBT_VERSION,
156 .offs = 2, /* 0 on 8-bit small page */
157 .len = 4,
158 .veroffs = 6,
159 .maxblocks = 4,
160 .pattern = mirror_pattern,
161};
162
163/*
164 * Set up the IFC hardware block and page address fields, and the ifc nand
165 * structure addr field to point to the correct IFC buffer in memory
166 */
167static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
168{
169 struct nand_chip *chip = mtd->priv;
170 struct fsl_ifc_mtd *priv = chip->priv;
171 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
172 struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
173 int buf_num;
174
175 ifc_nand_ctrl->page = page_addr;
176 /* Program ROW0/COL0 */
177 out_be32(&ifc->ifc_nand.row0, page_addr);
178 out_be32(&ifc->ifc_nand.col0, (oob ? IFC_NAND_COL_MS : 0) | column);
179
180 buf_num = page_addr & priv->bufnum_mask;
181
182 ifc_nand_ctrl->addr = priv->vbase + buf_num * (mtd->writesize * 2);
183 ifc_nand_ctrl->index = column;
184
185 /* for OOB data point to the second half of the buffer */
186 if (oob)
187 ifc_nand_ctrl->index += mtd->writesize;
188}
189
190static int is_blank(struct mtd_info *mtd, unsigned int bufnum)
191{
192 struct nand_chip *chip = mtd->priv;
193 struct fsl_ifc_mtd *priv = chip->priv;
194 u8 __iomem *addr = priv->vbase + bufnum * (mtd->writesize * 2);
195 u32 __iomem *mainarea = (u32 *)addr;
196 u8 __iomem *oob = addr + mtd->writesize;
197 int i;
198
199 for (i = 0; i < mtd->writesize / 4; i++) {
200 if (__raw_readl(&mainarea[i]) != 0xffffffff)
201 return 0;
202 }
203
204 for (i = 0; i < chip->ecc.layout->eccbytes; i++) {
205 int pos = chip->ecc.layout->eccpos[i];
206
207 if (__raw_readb(&oob[pos]) != 0xff)
208 return 0;
209 }
210
211 return 1;
212}
213
214/* returns nonzero if entire page is blank */
215static int check_read_ecc(struct mtd_info *mtd, struct fsl_ifc_ctrl *ctrl,
216 u32 *eccstat, unsigned int bufnum)
217{
218 u32 reg = eccstat[bufnum / 4];
219 int errors;
220
221 errors = (reg >> ((3 - bufnum % 4) * 8)) & 15;
222
223 return errors;
224}
225
226/*
227 * execute IFC NAND command and wait for it to complete
228 */
229static void fsl_ifc_run_command(struct mtd_info *mtd)
230{
231 struct nand_chip *chip = mtd->priv;
232 struct fsl_ifc_mtd *priv = chip->priv;
233 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
234 struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
235 struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
236 u32 eccstat[4];
237 int i;
238
239 /* set the chip select for NAND Transaction */
240 out_be32(&ifc->ifc_nand.nand_csel, priv->bank << IFC_NAND_CSEL_SHIFT);
241
242 dev_vdbg(priv->dev,
243 "%s: fir0=%08x fcr0=%08x\n",
244 __func__,
245 in_be32(&ifc->ifc_nand.nand_fir0),
246 in_be32(&ifc->ifc_nand.nand_fcr0));
247
248 ctrl->nand_stat = 0;
249
250 /* start read/write seq */
251 out_be32(&ifc->ifc_nand.nandseq_strt, IFC_NAND_SEQ_STRT_FIR_STRT);
252
253 /* wait for command complete flag or timeout */
254 wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
255 IFC_TIMEOUT_MSECS * HZ/1000);
256
257 /* ctrl->nand_stat will be updated from IRQ context */
258 if (!ctrl->nand_stat)
259 dev_err(priv->dev, "Controller is not responding\n");
260 if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_FTOER)
261 dev_err(priv->dev, "NAND Flash Timeout Error\n");
262 if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_WPER)
263 dev_err(priv->dev, "NAND Flash Write Protect Error\n");
264
265 if (nctrl->eccread) {
266 int errors;
267 int bufnum = nctrl->page & priv->bufnum_mask;
268 int sector = bufnum * chip->ecc.steps;
269 int sector_end = sector + chip->ecc.steps - 1;
270
271 for (i = sector / 4; i <= sector_end / 4; i++)
272 eccstat[i] = in_be32(&ifc->ifc_nand.nand_eccstat[i]);
273
274 for (i = sector; i <= sector_end; i++) {
275 errors = check_read_ecc(mtd, ctrl, eccstat, i);
276
277 if (errors == 15) {
278 /*
279 * Uncorrectable error.
280 * OK only if the whole page is blank.
281 *
282 * We disable ECCER reporting due to...
283 * erratum IFC-A002770 -- so report it now if we
284 * see an uncorrectable error in ECCSTAT.
285 */
286 if (!is_blank(mtd, bufnum))
287 ctrl->nand_stat |=
288 IFC_NAND_EVTER_STAT_ECCER;
289 break;
290 }
291
292 mtd->ecc_stats.corrected += errors;
293 }
294
295 nctrl->eccread = 0;
296 }
297}
298
299static void fsl_ifc_do_read(struct nand_chip *chip,
300 int oob,
301 struct mtd_info *mtd)
302{
303 struct fsl_ifc_mtd *priv = chip->priv;
304 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
305 struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
306
307 /* Program FIR/IFC_NAND_FCR0 for Small/Large page */
308 if (mtd->writesize > 512) {
309 out_be32(&ifc->ifc_nand.nand_fir0,
310 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
311 (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
312 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
313 (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
314 (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT));
315 out_be32(&ifc->ifc_nand.nand_fir1, 0x0);
316
317 out_be32(&ifc->ifc_nand.nand_fcr0,
318 (NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
319 (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT));
320 } else {
321 out_be32(&ifc->ifc_nand.nand_fir0,
322 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
323 (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
324 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
325 (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT));
326 out_be32(&ifc->ifc_nand.nand_fir1, 0x0);
327
328 if (oob)
329 out_be32(&ifc->ifc_nand.nand_fcr0,
330 NAND_CMD_READOOB << IFC_NAND_FCR0_CMD0_SHIFT);
331 else
332 out_be32(&ifc->ifc_nand.nand_fcr0,
333 NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT);
334 }
335}
336
337/* cmdfunc send commands to the IFC NAND Machine */
338static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
339 int column, int page_addr) {
340 struct nand_chip *chip = mtd->priv;
341 struct fsl_ifc_mtd *priv = chip->priv;
342 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
343 struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
344
345 /* clear the read buffer */
346 ifc_nand_ctrl->read_bytes = 0;
347 if (command != NAND_CMD_PAGEPROG)
348 ifc_nand_ctrl->index = 0;
349
350 switch (command) {
351 /* READ0 read the entire buffer to use hardware ECC. */
352 case NAND_CMD_READ0:
353 out_be32(&ifc->ifc_nand.nand_fbcr, 0);
354 set_addr(mtd, 0, page_addr, 0);
355
356 ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
357 ifc_nand_ctrl->index += column;
358
359 if (chip->ecc.mode == NAND_ECC_HW)
360 ifc_nand_ctrl->eccread = 1;
361
362 fsl_ifc_do_read(chip, 0, mtd);
363 fsl_ifc_run_command(mtd);
364 return;
365
366 /* READOOB reads only the OOB because no ECC is performed. */
367 case NAND_CMD_READOOB:
368 out_be32(&ifc->ifc_nand.nand_fbcr, mtd->oobsize - column);
369 set_addr(mtd, column, page_addr, 1);
370
371 ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
372
373 fsl_ifc_do_read(chip, 1, mtd);
374 fsl_ifc_run_command(mtd);
375
376 return;
377
378 /* READID must read all 8 possible bytes */
379 case NAND_CMD_READID:
380 out_be32(&ifc->ifc_nand.nand_fir0,
381 (IFC_FIR_OP_CMD0 << IFC_NAND_FIR0_OP0_SHIFT) |
382 (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
383 (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT));
384 out_be32(&ifc->ifc_nand.nand_fcr0,
385 NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT);
386 /* 8 bytes for manuf, device and exts */
387 out_be32(&ifc->ifc_nand.nand_fbcr, 8);
388 ifc_nand_ctrl->read_bytes = 8;
389
390 set_addr(mtd, 0, 0, 0);
391 fsl_ifc_run_command(mtd);
392 return;
393
394 /* ERASE1 stores the block and page address */
395 case NAND_CMD_ERASE1:
396 set_addr(mtd, 0, page_addr, 0);
397 return;
398
399 /* ERASE2 uses the block and page address from ERASE1 */
400 case NAND_CMD_ERASE2:
401 out_be32(&ifc->ifc_nand.nand_fir0,
402 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
403 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
404 (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT));
405
406 out_be32(&ifc->ifc_nand.nand_fcr0,
407 (NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
408 (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT));
409
410 out_be32(&ifc->ifc_nand.nand_fbcr, 0);
411 ifc_nand_ctrl->read_bytes = 0;
412 fsl_ifc_run_command(mtd);
413 return;
414
415 /* SEQIN sets up the addr buffer and all registers except the length */
416 case NAND_CMD_SEQIN: {
417 u32 nand_fcr0;
418 ifc_nand_ctrl->column = column;
419 ifc_nand_ctrl->oob = 0;
420
421 if (mtd->writesize > 512) {
422 nand_fcr0 =
423 (NAND_CMD_SEQIN << IFC_NAND_FCR0_CMD0_SHIFT) |
424 (NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD1_SHIFT);
425
426 out_be32(&ifc->ifc_nand.nand_fir0,
427 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
428 (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
429 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
430 (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
431 (IFC_FIR_OP_CW1 << IFC_NAND_FIR0_OP4_SHIFT));
432 } else {
433 nand_fcr0 = ((NAND_CMD_PAGEPROG <<
434 IFC_NAND_FCR0_CMD1_SHIFT) |
435 (NAND_CMD_SEQIN <<
436 IFC_NAND_FCR0_CMD2_SHIFT));
437
438 out_be32(&ifc->ifc_nand.nand_fir0,
439 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
440 (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) |
441 (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) |
442 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
443 (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT));
444 out_be32(&ifc->ifc_nand.nand_fir1,
445 (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT));
446
447 if (column >= mtd->writesize)
448 nand_fcr0 |=
449 NAND_CMD_READOOB << IFC_NAND_FCR0_CMD0_SHIFT;
450 else
451 nand_fcr0 |=
452 NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT;
453 }
454
455 if (column >= mtd->writesize) {
456 /* OOB area --> READOOB */
457 column -= mtd->writesize;
458 ifc_nand_ctrl->oob = 1;
459 }
460 out_be32(&ifc->ifc_nand.nand_fcr0, nand_fcr0);
461 set_addr(mtd, column, page_addr, ifc_nand_ctrl->oob);
462 return;
463 }
464
465 /* PAGEPROG reuses all of the setup from SEQIN and adds the length */
466 case NAND_CMD_PAGEPROG: {
467 if (ifc_nand_ctrl->oob) {
468 out_be32(&ifc->ifc_nand.nand_fbcr,
469 ifc_nand_ctrl->index - ifc_nand_ctrl->column);
470 } else {
471 out_be32(&ifc->ifc_nand.nand_fbcr, 0);
472 }
473
474 fsl_ifc_run_command(mtd);
475 return;
476 }
477
478 case NAND_CMD_STATUS:
479 out_be32(&ifc->ifc_nand.nand_fir0,
480 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
481 (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT));
482 out_be32(&ifc->ifc_nand.nand_fcr0,
483 NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT);
484 out_be32(&ifc->ifc_nand.nand_fbcr, 1);
485 set_addr(mtd, 0, 0, 0);
486 ifc_nand_ctrl->read_bytes = 1;
487
488 fsl_ifc_run_command(mtd);
489
490 /*
491 * The chip always seems to report that it is
492 * write-protected, even when it is not.
493 */
494 setbits8(ifc_nand_ctrl->addr, NAND_STATUS_WP);
495 return;
496
497 case NAND_CMD_RESET:
498 out_be32(&ifc->ifc_nand.nand_fir0,
499 IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT);
500 out_be32(&ifc->ifc_nand.nand_fcr0,
501 NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT);
502 fsl_ifc_run_command(mtd);
503 return;
504
505 default:
506 dev_err(priv->dev, "%s: error, unsupported command 0x%x.\n",
507 __func__, command);
508 }
509}
510
511static void fsl_ifc_select_chip(struct mtd_info *mtd, int chip)
512{
513 /* The hardware does not seem to support multiple
514 * chips per bank.
515 */
516}
517
518/*
519 * Write buf to the IFC NAND Controller Data Buffer
520 */
521static void fsl_ifc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
522{
523 struct nand_chip *chip = mtd->priv;
524 struct fsl_ifc_mtd *priv = chip->priv;
525 unsigned int bufsize = mtd->writesize + mtd->oobsize;
526
527 if (len <= 0) {
528 dev_err(priv->dev, "%s: len %d bytes", __func__, len);
529 return;
530 }
531
532 if ((unsigned int)len > bufsize - ifc_nand_ctrl->index) {
533 dev_err(priv->dev,
534 "%s: beyond end of buffer (%d requested, %u available)\n",
535 __func__, len, bufsize - ifc_nand_ctrl->index);
536 len = bufsize - ifc_nand_ctrl->index;
537 }
538
539 memcpy_toio(&ifc_nand_ctrl->addr[ifc_nand_ctrl->index], buf, len);
540 ifc_nand_ctrl->index += len;
541}
542
543/*
544 * Read a byte from either the IFC hardware buffer
545 * read function for 8-bit buswidth
546 */
547static uint8_t fsl_ifc_read_byte(struct mtd_info *mtd)
548{
549 struct nand_chip *chip = mtd->priv;
550 struct fsl_ifc_mtd *priv = chip->priv;
551
552 /*
553 * If there are still bytes in the IFC buffer, then use the
554 * next byte.
555 */
556 if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes)
557 return in_8(&ifc_nand_ctrl->addr[ifc_nand_ctrl->index++]);
558
559 dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
560 return ERR_BYTE;
561}
562
563/*
564 * Read two bytes from the IFC hardware buffer
565 * read function for 16-bit buswith
566 */
567static uint8_t fsl_ifc_read_byte16(struct mtd_info *mtd)
568{
569 struct nand_chip *chip = mtd->priv;
570 struct fsl_ifc_mtd *priv = chip->priv;
571 uint16_t data;
572
573 /*
574 * If there are still bytes in the IFC buffer, then use the
575 * next byte.
576 */
577 if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
578 data = in_be16((uint16_t *)&ifc_nand_ctrl->
579 addr[ifc_nand_ctrl->index]);
580 ifc_nand_ctrl->index += 2;
581 return (uint8_t) data;
582 }
583
584 dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
585 return ERR_BYTE;
586}
587
588/*
589 * Read from the IFC Controller Data Buffer
590 */
591static void fsl_ifc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
592{
593 struct nand_chip *chip = mtd->priv;
594 struct fsl_ifc_mtd *priv = chip->priv;
595 int avail;
596
597 if (len < 0) {
598 dev_err(priv->dev, "%s: len %d bytes", __func__, len);
599 return;
600 }
601
602 avail = min((unsigned int)len,
603 ifc_nand_ctrl->read_bytes - ifc_nand_ctrl->index);
604 memcpy_fromio(buf, &ifc_nand_ctrl->addr[ifc_nand_ctrl->index], avail);
605 ifc_nand_ctrl->index += avail;
606
607 if (len > avail)
608 dev_err(priv->dev,
609 "%s: beyond end of buffer (%d requested, %d available)\n",
610 __func__, len, avail);
611}
612
613/*
614 * Verify buffer against the IFC Controller Data Buffer
615 */
616static int fsl_ifc_verify_buf(struct mtd_info *mtd,
617 const u_char *buf, int len)
618{
619 struct nand_chip *chip = mtd->priv;
620 struct fsl_ifc_mtd *priv = chip->priv;
621 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
622 struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
623 int i;
624
625 if (len < 0) {
626 dev_err(priv->dev, "%s: write_buf of %d bytes", __func__, len);
627 return -EINVAL;
628 }
629
630 if ((unsigned int)len > nctrl->read_bytes - nctrl->index) {
631 dev_err(priv->dev,
632 "%s: beyond end of buffer (%d requested, %u available)\n",
633 __func__, len, nctrl->read_bytes - nctrl->index);
634
635 nctrl->index = nctrl->read_bytes;
636 return -EINVAL;
637 }
638
639 for (i = 0; i < len; i++)
640 if (in_8(&nctrl->addr[nctrl->index + i]) != buf[i])
641 break;
642
643 nctrl->index += len;
644
645 if (i != len)
646 return -EIO;
647 if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
648 return -EIO;
649
650 return 0;
651}
652
653/*
654 * This function is called after Program and Erase Operations to
655 * check for success or failure.
656 */
657static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
658{
659 struct fsl_ifc_mtd *priv = chip->priv;
660 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
661 struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
662 u32 nand_fsr;
663
664 /* Use READ_STATUS command, but wait for the device to be ready */
665 out_be32(&ifc->ifc_nand.nand_fir0,
666 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
667 (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT));
668 out_be32(&ifc->ifc_nand.nand_fcr0, NAND_CMD_STATUS <<
669 IFC_NAND_FCR0_CMD0_SHIFT);
670 out_be32(&ifc->ifc_nand.nand_fbcr, 1);
671 set_addr(mtd, 0, 0, 0);
672 ifc_nand_ctrl->read_bytes = 1;
673
674 fsl_ifc_run_command(mtd);
675
676 nand_fsr = in_be32(&ifc->ifc_nand.nand_fsr);
677
678 /*
679 * The chip always seems to report that it is
680 * write-protected, even when it is not.
681 */
682 return nand_fsr | NAND_STATUS_WP;
683}
684
685static int fsl_ifc_read_page(struct mtd_info *mtd,
686 struct nand_chip *chip,
687 uint8_t *buf, int page)
688{
689 struct fsl_ifc_mtd *priv = chip->priv;
690 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
691
692 fsl_ifc_read_buf(mtd, buf, mtd->writesize);
693 fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
694
695 if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_ECCER)
696 dev_err(priv->dev, "NAND Flash ECC Uncorrectable Error\n");
697
698 if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
699 mtd->ecc_stats.failed++;
700
701 return 0;
702}
703
704/* ECC will be calculated automatically, and errors will be detected in
705 * waitfunc.
706 */
707static void fsl_ifc_write_page(struct mtd_info *mtd,
708 struct nand_chip *chip,
709 const uint8_t *buf)
710{
711 fsl_ifc_write_buf(mtd, buf, mtd->writesize);
712 fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
713}
714
715static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
716{
717 struct nand_chip *chip = mtd->priv;
718 struct fsl_ifc_mtd *priv = chip->priv;
719
720 dev_dbg(priv->dev, "%s: nand->numchips = %d\n", __func__,
721 chip->numchips);
722 dev_dbg(priv->dev, "%s: nand->chipsize = %lld\n", __func__,
723 chip->chipsize);
724 dev_dbg(priv->dev, "%s: nand->pagemask = %8x\n", __func__,
725 chip->pagemask);
726 dev_dbg(priv->dev, "%s: nand->chip_delay = %d\n", __func__,
727 chip->chip_delay);
728 dev_dbg(priv->dev, "%s: nand->badblockpos = %d\n", __func__,
729 chip->badblockpos);
730 dev_dbg(priv->dev, "%s: nand->chip_shift = %d\n", __func__,
731 chip->chip_shift);
732 dev_dbg(priv->dev, "%s: nand->page_shift = %d\n", __func__,
733 chip->page_shift);
734 dev_dbg(priv->dev, "%s: nand->phys_erase_shift = %d\n", __func__,
735 chip->phys_erase_shift);
736 dev_dbg(priv->dev, "%s: nand->ecclayout = %p\n", __func__,
737 chip->ecclayout);
738 dev_dbg(priv->dev, "%s: nand->ecc.mode = %d\n", __func__,
739 chip->ecc.mode);
740 dev_dbg(priv->dev, "%s: nand->ecc.steps = %d\n", __func__,
741 chip->ecc.steps);
742 dev_dbg(priv->dev, "%s: nand->ecc.bytes = %d\n", __func__,
743 chip->ecc.bytes);
744 dev_dbg(priv->dev, "%s: nand->ecc.total = %d\n", __func__,
745 chip->ecc.total);
746 dev_dbg(priv->dev, "%s: nand->ecc.layout = %p\n", __func__,
747 chip->ecc.layout);
748 dev_dbg(priv->dev, "%s: mtd->flags = %08x\n", __func__, mtd->flags);
749 dev_dbg(priv->dev, "%s: mtd->size = %lld\n", __func__, mtd->size);
750 dev_dbg(priv->dev, "%s: mtd->erasesize = %d\n", __func__,
751 mtd->erasesize);
752 dev_dbg(priv->dev, "%s: mtd->writesize = %d\n", __func__,
753 mtd->writesize);
754 dev_dbg(priv->dev, "%s: mtd->oobsize = %d\n", __func__,
755 mtd->oobsize);
756
757 return 0;
758}
759
760static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
761{
762 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
763 struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
764 struct nand_chip *chip = &priv->chip;
765 struct nand_ecclayout *layout;
766 u32 csor;
767
768 /* Fill in fsl_ifc_mtd structure */
769 priv->mtd.priv = chip;
770 priv->mtd.owner = THIS_MODULE;
771
772 /* fill in nand_chip structure */
773 /* set up function call table */
774 if ((in_be32(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16)
775 chip->read_byte = fsl_ifc_read_byte16;
776 else
777 chip->read_byte = fsl_ifc_read_byte;
778
779 chip->write_buf = fsl_ifc_write_buf;
780 chip->read_buf = fsl_ifc_read_buf;
781 chip->verify_buf = fsl_ifc_verify_buf;
782 chip->select_chip = fsl_ifc_select_chip;
783 chip->cmdfunc = fsl_ifc_cmdfunc;
784 chip->waitfunc = fsl_ifc_wait;
785
786 chip->bbt_td = &bbt_main_descr;
787 chip->bbt_md = &bbt_mirror_descr;
788
789 out_be32(&ifc->ifc_nand.ncfgr, 0x0);
790
791 /* set up nand options */
792 chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR;
793 chip->bbt_options = NAND_BBT_USE_FLASH;
794
795
796 if (in_be32(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) {
797 chip->read_byte = fsl_ifc_read_byte16;
798 chip->options |= NAND_BUSWIDTH_16;
799 } else {
800 chip->read_byte = fsl_ifc_read_byte;
801 }
802
803 chip->controller = &ifc_nand_ctrl->controller;
804 chip->priv = priv;
805
806 chip->ecc.read_page = fsl_ifc_read_page;
807 chip->ecc.write_page = fsl_ifc_write_page;
808
809 csor = in_be32(&ifc->csor_cs[priv->bank].csor);
810
811 /* Hardware generates ECC per 512 Bytes */
812 chip->ecc.size = 512;
813 chip->ecc.bytes = 8;
814
815 switch (csor & CSOR_NAND_PGS_MASK) {
816 case CSOR_NAND_PGS_512:
817 if (chip->options & NAND_BUSWIDTH_16) {
818 layout = &oob_512_16bit_ecc4;
819 } else {
820 layout = &oob_512_8bit_ecc4;
821
822 /* Avoid conflict with bad block marker */
823 bbt_main_descr.offs = 0;
824 bbt_mirror_descr.offs = 0;
825 }
826
827 priv->bufnum_mask = 15;
828 break;
829
830 case CSOR_NAND_PGS_2K:
831 layout = &oob_2048_ecc4;
832 priv->bufnum_mask = 3;
833 break;
834
835 case CSOR_NAND_PGS_4K:
836 if ((csor & CSOR_NAND_ECC_MODE_MASK) ==
837 CSOR_NAND_ECC_MODE_4) {
838 layout = &oob_4096_ecc4;
839 } else {
840 layout = &oob_4096_ecc8;
841 chip->ecc.bytes = 16;
842 }
843
844 priv->bufnum_mask = 1;
845 break;
846
847 default:
848 dev_err(priv->dev, "bad csor %#x: bad page size\n", csor);
849 return -ENODEV;
850 }
851
852 /* Must also set CSOR_NAND_ECC_ENC_EN if DEC_EN set */
853 if (csor & CSOR_NAND_ECC_DEC_EN) {
854 chip->ecc.mode = NAND_ECC_HW;
855 chip->ecc.layout = layout;
856 } else {
857 chip->ecc.mode = NAND_ECC_SOFT;
858 }
859
860 return 0;
861}
862
863static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv)
864{
865 nand_release(&priv->mtd);
866
867 kfree(priv->mtd.name);
868
869 if (priv->vbase)
870 iounmap(priv->vbase);
871
872 ifc_nand_ctrl->chips[priv->bank] = NULL;
873 dev_set_drvdata(priv->dev, NULL);
874 kfree(priv);
875
876 return 0;
877}
878
879static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank,
880 phys_addr_t addr)
881{
882 u32 cspr = in_be32(&ifc->cspr_cs[bank].cspr);
883
884 if (!(cspr & CSPR_V))
885 return 0;
886 if ((cspr & CSPR_MSEL) != CSPR_MSEL_NAND)
887 return 0;
888
889 return (cspr & CSPR_BA) == convert_ifc_address(addr);
890}
891
892static DEFINE_MUTEX(fsl_ifc_nand_mutex);
893
894static int __devinit fsl_ifc_nand_probe(struct platform_device *dev)
895{
896 struct fsl_ifc_regs __iomem *ifc;
897 struct fsl_ifc_mtd *priv;
898 struct resource res;
899 static const char *part_probe_types[]
900 = { "cmdlinepart", "RedBoot", "ofpart", NULL };
901 int ret;
902 int bank;
903 struct device_node *node = dev->dev.of_node;
904 struct mtd_part_parser_data ppdata;
905
906 ppdata.of_node = dev->dev.of_node;
907 if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs)
908 return -ENODEV;
909 ifc = fsl_ifc_ctrl_dev->regs;
910
911 /* get, allocate and map the memory resource */
912 ret = of_address_to_resource(node, 0, &res);
913 if (ret) {
914 dev_err(&dev->dev, "%s: failed to get resource\n", __func__);
915 return ret;
916 }
917
918 /* find which chip select it is connected to */
919 for (bank = 0; bank < FSL_IFC_BANK_COUNT; bank++) {
920 if (match_bank(ifc, bank, res.start))
921 break;
922 }
923
924 if (bank >= FSL_IFC_BANK_COUNT) {
925 dev_err(&dev->dev, "%s: address did not match any chip selects\n",
926 __func__);
927 return -ENODEV;
928 }
929
930 priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
931 if (!priv)
932 return -ENOMEM;
933
934 mutex_lock(&fsl_ifc_nand_mutex);
935 if (!fsl_ifc_ctrl_dev->nand) {
936 ifc_nand_ctrl = kzalloc(sizeof(*ifc_nand_ctrl), GFP_KERNEL);
937 if (!ifc_nand_ctrl) {
938 dev_err(&dev->dev, "failed to allocate memory\n");
939 mutex_unlock(&fsl_ifc_nand_mutex);
940 return -ENOMEM;
941 }
942
943 ifc_nand_ctrl->read_bytes = 0;
944 ifc_nand_ctrl->index = 0;
945 ifc_nand_ctrl->addr = NULL;
946 fsl_ifc_ctrl_dev->nand = ifc_nand_ctrl;
947
948 spin_lock_init(&ifc_nand_ctrl->controller.lock);
949 init_waitqueue_head(&ifc_nand_ctrl->controller.wq);
950 } else {
951 ifc_nand_ctrl = fsl_ifc_ctrl_dev->nand;
952 }
953 mutex_unlock(&fsl_ifc_nand_mutex);
954
955 ifc_nand_ctrl->chips[bank] = priv;
956 priv->bank = bank;
957 priv->ctrl = fsl_ifc_ctrl_dev;
958 priv->dev = &dev->dev;
959
960 priv->vbase = ioremap(res.start, resource_size(&res));
961 if (!priv->vbase) {
962 dev_err(priv->dev, "%s: failed to map chip region\n", __func__);
963 ret = -ENOMEM;
964 goto err;
965 }
966
967 dev_set_drvdata(priv->dev, priv);
968
969 out_be32(&ifc->ifc_nand.nand_evter_en,
970 IFC_NAND_EVTER_EN_OPC_EN |
971 IFC_NAND_EVTER_EN_FTOER_EN |
972 IFC_NAND_EVTER_EN_WPER_EN);
973
974 /* enable NAND Machine Interrupts */
975 out_be32(&ifc->ifc_nand.nand_evter_intr_en,
976 IFC_NAND_EVTER_INTR_OPCIR_EN |
977 IFC_NAND_EVTER_INTR_FTOERIR_EN |
978 IFC_NAND_EVTER_INTR_WPERIR_EN);
979
980 priv->mtd.name = kasprintf(GFP_KERNEL, "%x.flash", (unsigned)res.start);
981 if (!priv->mtd.name) {
982 ret = -ENOMEM;
983 goto err;
984 }
985
986 ret = fsl_ifc_chip_init(priv);
987 if (ret)
988 goto err;
989
990 ret = nand_scan_ident(&priv->mtd, 1, NULL);
991 if (ret)
992 goto err;
993
994 ret = fsl_ifc_chip_init_tail(&priv->mtd);
995 if (ret)
996 goto err;
997
998 ret = nand_scan_tail(&priv->mtd);
999 if (ret)
1000 goto err;
1001
1002 /* First look for RedBoot table or partitions on the command
1003 * line, these take precedence over device tree information */
1004 mtd_device_parse_register(&priv->mtd, part_probe_types, &ppdata,
1005 NULL, 0);
1006
1007 dev_info(priv->dev, "IFC NAND device at 0x%llx, bank %d\n",
1008 (unsigned long long)res.start, priv->bank);
1009 return 0;
1010
1011err:
1012 fsl_ifc_chip_remove(priv);
1013 return ret;
1014}
1015
1016static int fsl_ifc_nand_remove(struct platform_device *dev)
1017{
1018 struct fsl_ifc_mtd *priv = dev_get_drvdata(&dev->dev);
1019
1020 fsl_ifc_chip_remove(priv);
1021
1022 mutex_lock(&fsl_ifc_nand_mutex);
1023 ifc_nand_ctrl->counter--;
1024 if (!ifc_nand_ctrl->counter) {
1025 fsl_ifc_ctrl_dev->nand = NULL;
1026 kfree(ifc_nand_ctrl);
1027 }
1028 mutex_unlock(&fsl_ifc_nand_mutex);
1029
1030 return 0;
1031}
1032
1033static const struct of_device_id fsl_ifc_nand_match[] = {
1034 {
1035 .compatible = "fsl,ifc-nand",
1036 },
1037 {}
1038};
1039
1040static struct platform_driver fsl_ifc_nand_driver = {
1041 .driver = {
1042 .name = "fsl,ifc-nand",
1043 .owner = THIS_MODULE,
1044 .of_match_table = fsl_ifc_nand_match,
1045 },
1046 .probe = fsl_ifc_nand_probe,
1047 .remove = fsl_ifc_nand_remove,
1048};
1049
1050static int __init fsl_ifc_nand_init(void)
1051{
1052 int ret;
1053
1054 ret = platform_driver_register(&fsl_ifc_nand_driver);
1055 if (ret)
1056 printk(KERN_ERR "fsl-ifc: Failed to register platform"
1057 "driver\n");
1058
1059 return ret;
1060}
1061
1062static void __exit fsl_ifc_nand_exit(void)
1063{
1064 platform_driver_unregister(&fsl_ifc_nand_driver);
1065}
1066
1067module_init(fsl_ifc_nand_init);
1068module_exit(fsl_ifc_nand_exit);
1069
1070MODULE_LICENSE("GPL");
1071MODULE_AUTHOR("Freescale");
1072MODULE_DESCRIPTION("Freescale Integrated Flash Controller MTD NAND driver");
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index d3d18e89cb5..4e89103204d 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -974,9 +974,8 @@ config SCSI_IPS
974 974
975config SCSI_IBMVSCSI 975config SCSI_IBMVSCSI
976 tristate "IBM Virtual SCSI support" 976 tristate "IBM Virtual SCSI support"
977 depends on PPC_PSERIES || PPC_ISERIES 977 depends on PPC_PSERIES
978 select SCSI_SRP_ATTRS 978 select SCSI_SRP_ATTRS
979 select VIOPATH if PPC_ISERIES
980 help 979 help
981 This is the IBM POWER Virtual SCSI Client 980 This is the IBM POWER Virtual SCSI Client
982 981
diff --git a/drivers/scsi/ibmvscsi/Makefile b/drivers/scsi/ibmvscsi/Makefile
index a423d963362..ff5b5c5538e 100644
--- a/drivers/scsi/ibmvscsi/Makefile
+++ b/drivers/scsi/ibmvscsi/Makefile
@@ -1,7 +1,6 @@
1obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsic.o 1obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsic.o
2 2
3ibmvscsic-y += ibmvscsi.o 3ibmvscsic-y += ibmvscsi.o
4ibmvscsic-$(CONFIG_PPC_ISERIES) += iseries_vscsi.o
5ibmvscsic-$(CONFIG_PPC_PSERIES) += rpa_vscsi.o 4ibmvscsic-$(CONFIG_PPC_PSERIES) += rpa_vscsi.o
6 5
7obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvstgt.o 6obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvstgt.o
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 3d391dc3f11..e984951baeb 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -55,13 +55,7 @@
55 * and sends a CRQ message back to inform the client that the request has 55 * and sends a CRQ message back to inform the client that the request has
56 * completed. 56 * completed.
57 * 57 *
58 * Note that some of the underlying infrastructure is different between 58 * TODO: This is currently pretty tied to the IBM pSeries hypervisor
59 * machines conforming to the "RS/6000 Platform Architecture" (RPA) and
60 * the older iSeries hypervisor models. To support both, some low level
61 * routines have been broken out into rpa_vscsi.c and iseries_vscsi.c.
62 * The Makefile should pick one, not two, not zero, of these.
63 *
64 * TODO: This is currently pretty tied to the IBM i/pSeries hypervisor
65 * interfaces. It would be really nice to abstract this above an RDMA 59 * interfaces. It would be really nice to abstract this above an RDMA
66 * layer. 60 * layer.
67 */ 61 */
@@ -2085,9 +2079,7 @@ int __init ibmvscsi_module_init(void)
2085 driver_template.can_queue = max_requests; 2079 driver_template.can_queue = max_requests;
2086 max_events = max_requests + 2; 2080 max_events = max_requests + 2;
2087 2081
2088 if (firmware_has_feature(FW_FEATURE_ISERIES)) 2082 if (firmware_has_feature(FW_FEATURE_VIO))
2089 ibmvscsi_ops = &iseriesvscsi_ops;
2090 else if (firmware_has_feature(FW_FEATURE_VIO))
2091 ibmvscsi_ops = &rpavscsi_ops; 2083 ibmvscsi_ops = &rpavscsi_ops;
2092 else 2084 else
2093 return -ENODEV; 2085 return -ENODEV;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 02197a2b22b..c503e177601 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -127,7 +127,6 @@ struct ibmvscsi_ops {
127 int (*resume) (struct ibmvscsi_host_data *hostdata); 127 int (*resume) (struct ibmvscsi_host_data *hostdata);
128}; 128};
129 129
130extern struct ibmvscsi_ops iseriesvscsi_ops;
131extern struct ibmvscsi_ops rpavscsi_ops; 130extern struct ibmvscsi_ops rpavscsi_ops;
132 131
133#endif /* IBMVSCSI_H */ 132#endif /* IBMVSCSI_H */
diff --git a/drivers/scsi/ibmvscsi/iseries_vscsi.c b/drivers/scsi/ibmvscsi/iseries_vscsi.c
deleted file mode 100644
index f4776451a75..00000000000
--- a/drivers/scsi/ibmvscsi/iseries_vscsi.c
+++ /dev/null
@@ -1,173 +0,0 @@
1/* ------------------------------------------------------------
2 * iSeries_vscsi.c
3 * (C) Copyright IBM Corporation 1994, 2003
4 * Authors: Colin DeVilbiss (devilbis@us.ibm.com)
5 * Santiago Leon (santil@us.ibm.com)
6 * Dave Boutcher (sleddog@us.ibm.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
21 * USA
22 *
23 * ------------------------------------------------------------
24 * iSeries-specific functions of the SCSI host adapter for Virtual I/O devices
25 *
26 * This driver allows the Linux SCSI peripheral drivers to directly
27 * access devices in the hosting partition, either on an iSeries
28 * hypervisor system or a converged hypervisor system.
29 */
30
31#include <asm/iseries/vio.h>
32#include <asm/iseries/hv_lp_event.h>
33#include <asm/iseries/hv_types.h>
34#include <asm/iseries/hv_lp_config.h>
35#include <asm/vio.h>
36#include <linux/device.h>
37#include "ibmvscsi.h"
38
39/* global variables */
40static struct ibmvscsi_host_data *single_host_data;
41
42/* ------------------------------------------------------------
43 * Routines for direct interpartition interaction
44 */
45struct srp_lp_event {
46 struct HvLpEvent lpevt; /* 0x00-0x17 */
47 u32 reserved1; /* 0x18-0x1B; unused */
48 u16 version; /* 0x1C-0x1D; unused */
49 u16 subtype_rc; /* 0x1E-0x1F; unused */
50 struct viosrp_crq crq; /* 0x20-0x3F */
51};
52
53/**
54 * standard interface for handling logical partition events.
55 */
56static void iseriesvscsi_handle_event(struct HvLpEvent *lpevt)
57{
58 struct srp_lp_event *evt = (struct srp_lp_event *)lpevt;
59
60 if (!evt) {
61 printk(KERN_ERR "ibmvscsi: received null event\n");
62 return;
63 }
64
65 if (single_host_data == NULL) {
66 printk(KERN_ERR
67 "ibmvscsi: received event, no adapter present\n");
68 return;
69 }
70
71 ibmvscsi_handle_crq(&evt->crq, single_host_data);
72}
73
74/* ------------------------------------------------------------
75 * Routines for driver initialization
76 */
77static int iseriesvscsi_init_crq_queue(struct crq_queue *queue,
78 struct ibmvscsi_host_data *hostdata,
79 int max_requests)
80{
81 int rc;
82
83 single_host_data = hostdata;
84 rc = viopath_open(viopath_hostLp, viomajorsubtype_scsi, max_requests);
85 if (rc < 0) {
86 printk("viopath_open failed with rc %d in open_event_path\n",
87 rc);
88 goto viopath_open_failed;
89 }
90
91 rc = vio_setHandler(viomajorsubtype_scsi, iseriesvscsi_handle_event);
92 if (rc < 0) {
93 printk("vio_setHandler failed with rc %d in open_event_path\n",
94 rc);
95 goto vio_setHandler_failed;
96 }
97 return 0;
98
99 vio_setHandler_failed:
100 viopath_close(viopath_hostLp, viomajorsubtype_scsi, max_requests);
101 viopath_open_failed:
102 return -1;
103}
104
105static void iseriesvscsi_release_crq_queue(struct crq_queue *queue,
106 struct ibmvscsi_host_data *hostdata,
107 int max_requests)
108{
109 vio_clearHandler(viomajorsubtype_scsi);
110 viopath_close(viopath_hostLp, viomajorsubtype_scsi, max_requests);
111}
112
113/**
114 * reset_crq_queue: - resets a crq after a failure
115 * @queue: crq_queue to initialize and register
116 * @hostdata: ibmvscsi_host_data of host
117 *
118 * no-op for iSeries
119 */
120static int iseriesvscsi_reset_crq_queue(struct crq_queue *queue,
121 struct ibmvscsi_host_data *hostdata)
122{
123 return 0;
124}
125
126/**
127 * reenable_crq_queue: - reenables a crq after a failure
128 * @queue: crq_queue to initialize and register
129 * @hostdata: ibmvscsi_host_data of host
130 *
131 * no-op for iSeries
132 */
133static int iseriesvscsi_reenable_crq_queue(struct crq_queue *queue,
134 struct ibmvscsi_host_data *hostdata)
135{
136 return 0;
137}
138
139/**
140 * iseriesvscsi_send_crq: - Send a CRQ
141 * @hostdata: the adapter
142 * @word1: the first 64 bits of the data
143 * @word2: the second 64 bits of the data
144 */
145static int iseriesvscsi_send_crq(struct ibmvscsi_host_data *hostdata,
146 u64 word1, u64 word2)
147{
148 single_host_data = hostdata;
149 return HvCallEvent_signalLpEventFast(viopath_hostLp,
150 HvLpEvent_Type_VirtualIo,
151 viomajorsubtype_scsi,
152 HvLpEvent_AckInd_NoAck,
153 HvLpEvent_AckType_ImmediateAck,
154 viopath_sourceinst(viopath_hostLp),
155 viopath_targetinst(viopath_hostLp),
156 0,
157 VIOVERSION << 16, word1, word2, 0,
158 0);
159}
160
161static int iseriesvscsi_resume(struct ibmvscsi_host_data *hostdata)
162{
163 return 0;
164}
165
166struct ibmvscsi_ops iseriesvscsi_ops = {
167 .init_crq_queue = iseriesvscsi_init_crq_queue,
168 .release_crq_queue = iseriesvscsi_release_crq_queue,
169 .reset_crq_queue = iseriesvscsi_reset_crq_queue,
170 .reenable_crq_queue = iseriesvscsi_reenable_crq_queue,
171 .send_crq = iseriesvscsi_send_crq,
172 .resume = iseriesvscsi_resume,
173};
diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
index 4222035acfb..48cb8d3d175 100644
--- a/drivers/tty/hvc/Kconfig
+++ b/drivers/tty/hvc/Kconfig
@@ -24,16 +24,6 @@ config HVC_OLD_HVSI
24 depends on HVC_CONSOLE 24 depends on HVC_CONSOLE
25 default n 25 default n
26 26
27config HVC_ISERIES
28 bool "iSeries Hypervisor Virtual Console support"
29 depends on PPC_ISERIES
30 default y
31 select HVC_DRIVER
32 select HVC_IRQ
33 select VIOPATH
34 help
35 iSeries machines support a hypervisor virtual console.
36
37config HVC_OPAL 27config HVC_OPAL
38 bool "OPAL Console support" 28 bool "OPAL Console support"
39 depends on PPC_POWERNV 29 depends on PPC_POWERNV
@@ -81,6 +71,10 @@ config HVC_UDBG
81 depends on PPC && EXPERIMENTAL 71 depends on PPC && EXPERIMENTAL
82 select HVC_DRIVER 72 select HVC_DRIVER
83 default n 73 default n
74 help
75 This is meant to be used during HW bring up or debugging when
76 no other console mechanism exist but udbg, to get you a quick
77 console for userspace. Do NOT enable in production kernels.
84 78
85config HVC_DCC 79config HVC_DCC
86 bool "ARM JTAG DCC console" 80 bool "ARM JTAG DCC console"
diff --git a/drivers/tty/hvc/Makefile b/drivers/tty/hvc/Makefile
index 89abf40bc73..4ca3723b0a3 100644
--- a/drivers/tty/hvc/Makefile
+++ b/drivers/tty/hvc/Makefile
@@ -1,7 +1,6 @@
1obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi_lib.o 1obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi_lib.o
2obj-$(CONFIG_HVC_OPAL) += hvc_opal.o hvsi_lib.o 2obj-$(CONFIG_HVC_OPAL) += hvc_opal.o hvsi_lib.o
3obj-$(CONFIG_HVC_OLD_HVSI) += hvsi.o 3obj-$(CONFIG_HVC_OLD_HVSI) += hvsi.o
4obj-$(CONFIG_HVC_ISERIES) += hvc_iseries.o
5obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o 4obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o
6obj-$(CONFIG_HVC_TILE) += hvc_tile.o 5obj-$(CONFIG_HVC_TILE) += hvc_tile.o
7obj-$(CONFIG_HVC_DCC) += hvc_dcc.o 6obj-$(CONFIG_HVC_DCC) += hvc_dcc.o
diff --git a/drivers/tty/hvc/hvc_iseries.c b/drivers/tty/hvc/hvc_iseries.c
deleted file mode 100644
index 3f4a897bf4d..00000000000
--- a/drivers/tty/hvc/hvc_iseries.c
+++ /dev/null
@@ -1,599 +0,0 @@
1/*
2 * iSeries vio driver interface to hvc_console.c
3 *
4 * This code is based heavily on hvc_vio.c and viocons.c
5 *
6 * Copyright (C) 2006 Stephen Rothwell, IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22#include <stdarg.h>
23#include <linux/types.h>
24#include <linux/init.h>
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/spinlock.h>
28#include <linux/console.h>
29
30#include <asm/hvconsole.h>
31#include <asm/vio.h>
32#include <asm/prom.h>
33#include <asm/firmware.h>
34#include <asm/iseries/vio.h>
35#include <asm/iseries/hv_call.h>
36#include <asm/iseries/hv_lp_config.h>
37#include <asm/iseries/hv_lp_event.h>
38
39#include "hvc_console.h"
40
41#define VTTY_PORTS 10
42
43static DEFINE_SPINLOCK(consolelock);
44static DEFINE_SPINLOCK(consoleloglock);
45
46static const char hvc_driver_name[] = "hvc_console";
47
48#define IN_BUF_SIZE 200
49
50/*
51 * Our port information.
52 */
53static struct port_info {
54 HvLpIndex lp;
55 u64 seq; /* sequence number of last HV send */
56 u64 ack; /* last ack from HV */
57 struct hvc_struct *hp;
58 int in_start;
59 int in_end;
60 unsigned char in_buf[IN_BUF_SIZE];
61} port_info[VTTY_PORTS] = {
62 [ 0 ... VTTY_PORTS - 1 ] = {
63 .lp = HvLpIndexInvalid
64 }
65};
66
67#define viochar_is_console(pi) ((pi) == &port_info[0])
68
69static struct vio_device_id hvc_driver_table[] __devinitdata = {
70 {"serial", "IBM,iSeries-vty"},
71 { "", "" }
72};
73MODULE_DEVICE_TABLE(vio, hvc_driver_table);
74
75static void hvlog(char *fmt, ...)
76{
77 int i;
78 unsigned long flags;
79 va_list args;
80 static char buf[256];
81
82 spin_lock_irqsave(&consoleloglock, flags);
83 va_start(args, fmt);
84 i = vscnprintf(buf, sizeof(buf) - 1, fmt, args);
85 va_end(args);
86 buf[i++] = '\r';
87 HvCall_writeLogBuffer(buf, i);
88 spin_unlock_irqrestore(&consoleloglock, flags);
89}
90
91/*
92 * Initialize the common fields in a charLpEvent
93 */
94static void init_data_event(struct viocharlpevent *viochar, HvLpIndex lp)
95{
96 struct HvLpEvent *hev = &viochar->event;
97
98 memset(viochar, 0, sizeof(struct viocharlpevent));
99
100 hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DEFERRED_ACK |
101 HV_LP_EVENT_INT;
102 hev->xType = HvLpEvent_Type_VirtualIo;
103 hev->xSubtype = viomajorsubtype_chario | viochardata;
104 hev->xSourceLp = HvLpConfig_getLpIndex();
105 hev->xTargetLp = lp;
106 hev->xSizeMinus1 = sizeof(struct viocharlpevent);
107 hev->xSourceInstanceId = viopath_sourceinst(lp);
108 hev->xTargetInstanceId = viopath_targetinst(lp);
109}
110
111static int get_chars(uint32_t vtermno, char *buf, int count)
112{
113 struct port_info *pi;
114 int n = 0;
115 unsigned long flags;
116
117 if (vtermno >= VTTY_PORTS)
118 return -EINVAL;
119 if (count == 0)
120 return 0;
121
122 pi = &port_info[vtermno];
123 spin_lock_irqsave(&consolelock, flags);
124
125 if (pi->in_end == 0)
126 goto done;
127
128 n = pi->in_end - pi->in_start;
129 if (n > count)
130 n = count;
131 memcpy(buf, &pi->in_buf[pi->in_start], n);
132 pi->in_start += n;
133 if (pi->in_start == pi->in_end) {
134 pi->in_start = 0;
135 pi->in_end = 0;
136 }
137done:
138 spin_unlock_irqrestore(&consolelock, flags);
139 return n;
140}
141
142static int put_chars(uint32_t vtermno, const char *buf, int count)
143{
144 struct viocharlpevent *viochar;
145 struct port_info *pi;
146 HvLpEvent_Rc hvrc;
147 unsigned long flags;
148 int sent = 0;
149
150 if (vtermno >= VTTY_PORTS)
151 return -EINVAL;
152
153 pi = &port_info[vtermno];
154
155 spin_lock_irqsave(&consolelock, flags);
156
157 if (viochar_is_console(pi) && !viopath_isactive(pi->lp)) {
158 HvCall_writeLogBuffer(buf, count);
159 sent = count;
160 goto done;
161 }
162
163 viochar = vio_get_event_buffer(viomajorsubtype_chario);
164 if (viochar == NULL) {
165 hvlog("\n\rviocons: Can't get viochar buffer.");
166 goto done;
167 }
168
169 while ((count > 0) && ((pi->seq - pi->ack) < VIOCHAR_WINDOW)) {
170 int len;
171
172 len = (count > VIOCHAR_MAX_DATA) ? VIOCHAR_MAX_DATA : count;
173
174 if (viochar_is_console(pi))
175 HvCall_writeLogBuffer(buf, len);
176
177 init_data_event(viochar, pi->lp);
178
179 viochar->len = len;
180 viochar->event.xCorrelationToken = pi->seq++;
181 viochar->event.xSizeMinus1 =
182 offsetof(struct viocharlpevent, data) + len;
183
184 memcpy(viochar->data, buf, len);
185
186 hvrc = HvCallEvent_signalLpEvent(&viochar->event);
187 if (hvrc)
188 hvlog("\n\rerror sending event! return code %d\n\r",
189 (int)hvrc);
190 sent += len;
191 count -= len;
192 buf += len;
193 }
194
195 vio_free_event_buffer(viomajorsubtype_chario, viochar);
196done:
197 spin_unlock_irqrestore(&consolelock, flags);
198 return sent;
199}
200
201static const struct hv_ops hvc_get_put_ops = {
202 .get_chars = get_chars,
203 .put_chars = put_chars,
204 .notifier_add = notifier_add_irq,
205 .notifier_del = notifier_del_irq,
206 .notifier_hangup = notifier_hangup_irq,
207};
208
209static int __devinit hvc_vio_probe(struct vio_dev *vdev,
210 const struct vio_device_id *id)
211{
212 struct hvc_struct *hp;
213 struct port_info *pi;
214
215 /* probed with invalid parameters. */
216 if (!vdev || !id)
217 return -EPERM;
218
219 if (vdev->unit_address >= VTTY_PORTS)
220 return -ENODEV;
221
222 pi = &port_info[vdev->unit_address];
223
224 hp = hvc_alloc(vdev->unit_address, vdev->irq, &hvc_get_put_ops,
225 VIOCHAR_MAX_DATA);
226 if (IS_ERR(hp))
227 return PTR_ERR(hp);
228 pi->hp = hp;
229 dev_set_drvdata(&vdev->dev, pi);
230
231 return 0;
232}
233
234static int __devexit hvc_vio_remove(struct vio_dev *vdev)
235{
236 struct port_info *pi = dev_get_drvdata(&vdev->dev);
237 struct hvc_struct *hp = pi->hp;
238
239 return hvc_remove(hp);
240}
241
242static struct vio_driver hvc_vio_driver = {
243 .id_table = hvc_driver_table,
244 .probe = hvc_vio_probe,
245 .remove = __devexit_p(hvc_vio_remove),
246 .driver = {
247 .name = hvc_driver_name,
248 .owner = THIS_MODULE,
249 }
250};
251
252static void hvc_open_event(struct HvLpEvent *event)
253{
254 unsigned long flags;
255 struct viocharlpevent *cevent = (struct viocharlpevent *)event;
256 u8 port = cevent->virtual_device;
257 struct port_info *pi;
258 int reject = 0;
259
260 if (hvlpevent_is_ack(event)) {
261 if (port >= VTTY_PORTS)
262 return;
263
264 spin_lock_irqsave(&consolelock, flags);
265
266 pi = &port_info[port];
267 if (event->xRc == HvLpEvent_Rc_Good) {
268 pi->seq = pi->ack = 0;
269 /*
270 * This line allows connections from the primary
271 * partition but once one is connected from the
272 * primary partition nothing short of a reboot
273 * of linux will allow access from the hosting
274 * partition again without a required iSeries fix.
275 */
276 pi->lp = event->xTargetLp;
277 }
278
279 spin_unlock_irqrestore(&consolelock, flags);
280 if (event->xRc != HvLpEvent_Rc_Good)
281 printk(KERN_WARNING
282 "hvc: handle_open_event: event->xRc == (%d).\n",
283 event->xRc);
284
285 if (event->xCorrelationToken != 0) {
286 atomic_t *aptr= (atomic_t *)event->xCorrelationToken;
287 atomic_set(aptr, 1);
288 } else
289 printk(KERN_WARNING
290 "hvc: weird...got open ack without atomic\n");
291 return;
292 }
293
294 /* This had better require an ack, otherwise complain */
295 if (!hvlpevent_need_ack(event)) {
296 printk(KERN_WARNING "hvc: viocharopen without ack bit!\n");
297 return;
298 }
299
300 spin_lock_irqsave(&consolelock, flags);
301
302 /* Make sure this is a good virtual tty */
303 if (port >= VTTY_PORTS) {
304 event->xRc = HvLpEvent_Rc_SubtypeError;
305 cevent->subtype_result_code = viorc_openRejected;
306 /*
307 * Flag state here since we can't printk while holding
308 * the consolelock spinlock.
309 */
310 reject = 1;
311 } else {
312 pi = &port_info[port];
313 if ((pi->lp != HvLpIndexInvalid) &&
314 (pi->lp != event->xSourceLp)) {
315 /*
316 * If this is tty is already connected to a different
317 * partition, fail.
318 */
319 event->xRc = HvLpEvent_Rc_SubtypeError;
320 cevent->subtype_result_code = viorc_openRejected;
321 reject = 2;
322 } else {
323 pi->lp = event->xSourceLp;
324 event->xRc = HvLpEvent_Rc_Good;
325 cevent->subtype_result_code = viorc_good;
326 pi->seq = pi->ack = 0;
327 }
328 }
329
330 spin_unlock_irqrestore(&consolelock, flags);
331
332 if (reject == 1)
333 printk(KERN_WARNING "hvc: open rejected: bad virtual tty.\n");
334 else if (reject == 2)
335 printk(KERN_WARNING "hvc: open rejected: console in exclusive "
336 "use by another partition.\n");
337
338 /* Return the acknowledgement */
339 HvCallEvent_ackLpEvent(event);
340}
341
342/*
343 * Handle a close charLpEvent. This should ONLY be an Interrupt because the
344 * virtual console should never actually issue a close event to the hypervisor
345 * because the virtual console never goes away. A close event coming from the
346 * hypervisor simply means that there are no client consoles connected to the
347 * virtual console.
348 */
349static void hvc_close_event(struct HvLpEvent *event)
350{
351 unsigned long flags;
352 struct viocharlpevent *cevent = (struct viocharlpevent *)event;
353 u8 port = cevent->virtual_device;
354
355 if (!hvlpevent_is_int(event)) {
356 printk(KERN_WARNING
357 "hvc: got unexpected close acknowledgement\n");
358 return;
359 }
360
361 if (port >= VTTY_PORTS) {
362 printk(KERN_WARNING
363 "hvc: close message from invalid virtual device.\n");
364 return;
365 }
366
367 /* For closes, just mark the console partition invalid */
368 spin_lock_irqsave(&consolelock, flags);
369
370 if (port_info[port].lp == event->xSourceLp)
371 port_info[port].lp = HvLpIndexInvalid;
372
373 spin_unlock_irqrestore(&consolelock, flags);
374}
375
376static void hvc_data_event(struct HvLpEvent *event)
377{
378 unsigned long flags;
379 struct viocharlpevent *cevent = (struct viocharlpevent *)event;
380 struct port_info *pi;
381 int n;
382 u8 port = cevent->virtual_device;
383
384 if (port >= VTTY_PORTS) {
385 printk(KERN_WARNING "hvc: data on invalid virtual device %d\n",
386 port);
387 return;
388 }
389 if (cevent->len == 0)
390 return;
391
392 /*
393 * Change 05/01/2003 - Ryan Arnold: If a partition other than
394 * the current exclusive partition tries to send us data
395 * events then just drop them on the floor because we don't
396 * want his stinking data. He isn't authorized to receive
397 * data because he wasn't the first one to get the console,
398 * therefore he shouldn't be allowed to send data either.
399 * This will work without an iSeries fix.
400 */
401 pi = &port_info[port];
402 if (pi->lp != event->xSourceLp)
403 return;
404
405 spin_lock_irqsave(&consolelock, flags);
406
407 n = IN_BUF_SIZE - pi->in_end;
408 if (n > cevent->len)
409 n = cevent->len;
410 if (n > 0) {
411 memcpy(&pi->in_buf[pi->in_end], cevent->data, n);
412 pi->in_end += n;
413 }
414 spin_unlock_irqrestore(&consolelock, flags);
415 if (n == 0)
416 printk(KERN_WARNING "hvc: input buffer overflow\n");
417}
418
419static void hvc_ack_event(struct HvLpEvent *event)
420{
421 struct viocharlpevent *cevent = (struct viocharlpevent *)event;
422 unsigned long flags;
423 u8 port = cevent->virtual_device;
424
425 if (port >= VTTY_PORTS) {
426 printk(KERN_WARNING "hvc: data on invalid virtual device\n");
427 return;
428 }
429
430 spin_lock_irqsave(&consolelock, flags);
431 port_info[port].ack = event->xCorrelationToken;
432 spin_unlock_irqrestore(&consolelock, flags);
433}
434
435static void hvc_config_event(struct HvLpEvent *event)
436{
437 struct viocharlpevent *cevent = (struct viocharlpevent *)event;
438
439 if (cevent->data[0] == 0x01)
440 printk(KERN_INFO "hvc: window resized to %d: %d: %d: %d\n",
441 cevent->data[1], cevent->data[2],
442 cevent->data[3], cevent->data[4]);
443 else
444 printk(KERN_WARNING "hvc: unknown config event\n");
445}
446
447static void hvc_handle_event(struct HvLpEvent *event)
448{
449 int charminor;
450
451 if (event == NULL)
452 return;
453
454 charminor = event->xSubtype & VIOMINOR_SUBTYPE_MASK;
455 switch (charminor) {
456 case viocharopen:
457 hvc_open_event(event);
458 break;
459 case viocharclose:
460 hvc_close_event(event);
461 break;
462 case viochardata:
463 hvc_data_event(event);
464 break;
465 case viocharack:
466 hvc_ack_event(event);
467 break;
468 case viocharconfig:
469 hvc_config_event(event);
470 break;
471 default:
472 if (hvlpevent_is_int(event) && hvlpevent_need_ack(event)) {
473 event->xRc = HvLpEvent_Rc_InvalidSubtype;
474 HvCallEvent_ackLpEvent(event);
475 }
476 }
477}
478
479static int __init send_open(HvLpIndex remoteLp, void *sem)
480{
481 return HvCallEvent_signalLpEventFast(remoteLp,
482 HvLpEvent_Type_VirtualIo,
483 viomajorsubtype_chario | viocharopen,
484 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
485 viopath_sourceinst(remoteLp),
486 viopath_targetinst(remoteLp),
487 (u64)(unsigned long)sem, VIOVERSION << 16,
488 0, 0, 0, 0);
489}
490
491static int __init hvc_vio_init(void)
492{
493 atomic_t wait_flag;
494 int rc;
495
496 if (!firmware_has_feature(FW_FEATURE_ISERIES))
497 return -EIO;
498
499 /* +2 for fudge */
500 rc = viopath_open(HvLpConfig_getPrimaryLpIndex(),
501 viomajorsubtype_chario, VIOCHAR_WINDOW + 2);
502 if (rc)
503 printk(KERN_WARNING "hvc: error opening to primary %d\n", rc);
504
505 if (viopath_hostLp == HvLpIndexInvalid)
506 vio_set_hostlp();
507
508 /*
509 * And if the primary is not the same as the hosting LP, open to the
510 * hosting lp
511 */
512 if ((viopath_hostLp != HvLpIndexInvalid) &&
513 (viopath_hostLp != HvLpConfig_getPrimaryLpIndex())) {
514 printk(KERN_INFO "hvc: open path to hosting (%d)\n",
515 viopath_hostLp);
516 rc = viopath_open(viopath_hostLp, viomajorsubtype_chario,
517 VIOCHAR_WINDOW + 2); /* +2 for fudge */
518 if (rc)
519 printk(KERN_WARNING
520 "error opening to partition %d: %d\n",
521 viopath_hostLp, rc);
522 }
523
524 if (vio_setHandler(viomajorsubtype_chario, hvc_handle_event) < 0)
525 printk(KERN_WARNING
526 "hvc: error seting handler for console events!\n");
527
528 /*
529 * First, try to open the console to the hosting lp.
530 * Wait on a semaphore for the response.
531 */
532 atomic_set(&wait_flag, 0);
533 if ((viopath_isactive(viopath_hostLp)) &&
534 (send_open(viopath_hostLp, &wait_flag) == 0)) {
535 printk(KERN_INFO "hvc: hosting partition %d\n", viopath_hostLp);
536 while (atomic_read(&wait_flag) == 0)
537 mb();
538 atomic_set(&wait_flag, 0);
539 }
540
541 /*
542 * If we don't have an active console, try the primary
543 */
544 if ((!viopath_isactive(port_info[0].lp)) &&
545 (viopath_isactive(HvLpConfig_getPrimaryLpIndex())) &&
546 (send_open(HvLpConfig_getPrimaryLpIndex(), &wait_flag) == 0)) {
547 printk(KERN_INFO "hvc: opening console to primary partition\n");
548 while (atomic_read(&wait_flag) == 0)
549 mb();
550 }
551
552 /* Register as a vio device to receive callbacks */
553 rc = vio_register_driver(&hvc_vio_driver);
554
555 return rc;
556}
557module_init(hvc_vio_init); /* after drivers/char/hvc_console.c */
558
559static void __exit hvc_vio_exit(void)
560{
561 vio_unregister_driver(&hvc_vio_driver);
562}
563module_exit(hvc_vio_exit);
564
565/* the device tree order defines our numbering */
566static int __init hvc_find_vtys(void)
567{
568 struct device_node *vty;
569 int num_found = 0;
570
571 for (vty = of_find_node_by_name(NULL, "vty"); vty != NULL;
572 vty = of_find_node_by_name(vty, "vty")) {
573 const uint32_t *vtermno;
574
575 /* We have statically defined space for only a certain number
576 * of console adapters.
577 */
578 if ((num_found >= MAX_NR_HVC_CONSOLES) ||
579 (num_found >= VTTY_PORTS)) {
580 of_node_put(vty);
581 break;
582 }
583
584 vtermno = of_get_property(vty, "reg", NULL);
585 if (!vtermno)
586 continue;
587
588 if (!of_device_is_compatible(vty, "IBM,iSeries-vty"))
589 continue;
590
591 if (num_found == 0)
592 add_preferred_console("hvc", 0, NULL);
593 hvc_instantiate(*vtermno, num_found, &hvc_get_put_ops);
594 ++num_found;
595 }
596
597 return num_found;
598}
599console_initcall(hvc_find_vtys);
diff --git a/drivers/tty/hvc/hvc_udbg.c b/drivers/tty/hvc/hvc_udbg.c
index 4c9b13e7748..72228276fe3 100644
--- a/drivers/tty/hvc/hvc_udbg.c
+++ b/drivers/tty/hvc/hvc_udbg.c
@@ -36,7 +36,7 @@ static int hvc_udbg_put(uint32_t vtermno, const char *buf, int count)
36{ 36{
37 int i; 37 int i;
38 38
39 for (i = 0; i < count; i++) 39 for (i = 0; i < count && udbg_putc; i++)
40 udbg_putc(buf[i]); 40 udbg_putc(buf[i]);
41 41
42 return i; 42 return i;
@@ -67,6 +67,9 @@ static int __init hvc_udbg_init(void)
67{ 67{
68 struct hvc_struct *hp; 68 struct hvc_struct *hp;
69 69
70 if (!udbg_putc)
71 return -ENODEV;
72
70 BUG_ON(hvc_udbg_dev); 73 BUG_ON(hvc_udbg_dev);
71 74
72 hp = hvc_alloc(0, 0, &hvc_udbg_ops, 16); 75 hp = hvc_alloc(0, 0, &hvc_udbg_ops, 16);
@@ -88,6 +91,9 @@ module_exit(hvc_udbg_exit);
88 91
89static int __init hvc_udbg_console_init(void) 92static int __init hvc_udbg_console_init(void)
90{ 93{
94 if (!udbg_putc)
95 return -ENODEV;
96
91 hvc_instantiate(0, 0, &hvc_udbg_ops); 97 hvc_instantiate(0, 0, &hvc_udbg_ops);
92 add_preferred_console("hvc", 0, NULL); 98 add_preferred_console("hvc", 0, NULL);
93 99
diff --git a/drivers/tty/hvc/hvc_vio.c b/drivers/tty/hvc/hvc_vio.c
index fc3c3ad6c07..3a0d53d6368 100644
--- a/drivers/tty/hvc/hvc_vio.c
+++ b/drivers/tty/hvc/hvc_vio.c
@@ -46,7 +46,6 @@
46#include <asm/hvconsole.h> 46#include <asm/hvconsole.h>
47#include <asm/vio.h> 47#include <asm/vio.h>
48#include <asm/prom.h> 48#include <asm/prom.h>
49#include <asm/firmware.h>
50#include <asm/hvsi.h> 49#include <asm/hvsi.h>
51#include <asm/udbg.h> 50#include <asm/udbg.h>
52 51
@@ -322,9 +321,6 @@ static int __init hvc_vio_init(void)
322{ 321{
323 int rc; 322 int rc;
324 323
325 if (firmware_has_feature(FW_FEATURE_ISERIES))
326 return -EIO;
327
328 /* Register as a vio device to receive callbacks */ 324 /* Register as a vio device to receive callbacks */
329 rc = vio_register_driver(&hvc_vio_driver); 325 rc = vio_register_driver(&hvc_vio_driver);
330 326
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 76e7764488e..665beb68f67 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -853,7 +853,7 @@ config SERIAL_MPC52xx_CONSOLE_BAUD
853 853
854config SERIAL_ICOM 854config SERIAL_ICOM
855 tristate "IBM Multiport Serial Adapter" 855 tristate "IBM Multiport Serial Adapter"
856 depends on PCI && (PPC_ISERIES || PPC_PSERIES) 856 depends on PCI && PPC_PSERIES
857 select SERIAL_CORE 857 select SERIAL_CORE
858 select FW_LOADER 858 select FW_LOADER
859 help 859 help
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index df9e8f0e327..7e9e8f4d8f0 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1039,7 +1039,7 @@ config LANTIQ_WDT
1039 1039
1040config GEF_WDT 1040config GEF_WDT
1041 tristate "GE Watchdog Timer" 1041 tristate "GE Watchdog Timer"
1042 depends on GEF_SBC610 || GEF_SBC310 || GEF_PPC9A 1042 depends on GE_FPGA
1043 ---help--- 1043 ---help---
1044 Watchdog timer found in a number of GE single board computers. 1044 Watchdog timer found in a number of GE single board computers.
1045 1045