aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/message/i2o/i2o_block.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/message/i2o/i2o_block.c
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/message/i2o/i2o_block.c')
-rw-r--r--drivers/message/i2o/i2o_block.c1247
1 files changed, 1247 insertions, 0 deletions
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
new file mode 100644
index 000000000000..7b74c87b569e
--- /dev/null
+++ b/drivers/message/i2o/i2o_block.c
@@ -0,0 +1,1247 @@
1/*
2 * Block OSM
3 *
4 * Copyright (C) 1999-2002 Red Hat Software
5 *
6 * Written by Alan Cox, Building Number Three Ltd
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * For the purpose of avoiding doubt the preferred form of the work
19 * for making modifications shall be a standards compliant form such
20 * gzipped tar and not one requiring a proprietary or patent encumbered
21 * tool to unpack.
22 *
23 * Fixes/additions:
24 * Steve Ralston:
25 * Multiple device handling error fixes,
26 * Added a queue depth.
27 * Alan Cox:
28 * FC920 has an rmw bug. Dont or in the end marker.
29 * Removed queue walk, fixed for 64bitness.
30 * Rewrote much of the code over time
31 * Added indirect block lists
32 * Handle 64K limits on many controllers
33 * Don't use indirects on the Promise (breaks)
34 * Heavily chop down the queue depths
35 * Deepak Saxena:
36 * Independent queues per IOP
37 * Support for dynamic device creation/deletion
38 * Code cleanup
39 * Support for larger I/Os through merge* functions
40 * (taken from DAC960 driver)
41 * Boji T Kannanthanam:
42 * Set the I2O Block devices to be detected in increasing
43 * order of TIDs during boot.
44 * Search and set the I2O block device that we boot off
45 * from as the first device to be claimed (as /dev/i2o/hda)
46 * Properly attach/detach I2O gendisk structure from the
47 * system gendisk list. The I2O block devices now appear in
48 * /proc/partitions.
49 * Markus Lidel <Markus.Lidel@shadowconnect.com>:
50 * Minor bugfixes for 2.6.
51 */
52
53#include <linux/module.h>
54#include <linux/i2o.h>
55
56#include <linux/mempool.h>
57
58#include <linux/genhd.h>
59#include <linux/blkdev.h>
60#include <linux/hdreg.h>
61
62#include "i2o_block.h"
63
64#define OSM_NAME "block-osm"
65#define OSM_VERSION "$Rev$"
66#define OSM_DESCRIPTION "I2O Block Device OSM"
67
68static struct i2o_driver i2o_block_driver;
69
70/* global Block OSM request mempool */
71static struct i2o_block_mempool i2o_blk_req_pool;
72
73/* Block OSM class handling definition */
74static struct i2o_class_id i2o_block_class_id[] = {
75 {I2O_CLASS_RANDOM_BLOCK_STORAGE},
76 {I2O_CLASS_END}
77};
78
79/**
80 * i2o_block_device_free - free the memory of the I2O Block device
81 * @dev: I2O Block device, which should be cleaned up
82 *
83 * Frees the request queue, gendisk and the i2o_block_device structure.
84 */
85static void i2o_block_device_free(struct i2o_block_device *dev)
86{
87 blk_cleanup_queue(dev->gd->queue);
88
89 put_disk(dev->gd);
90
91 kfree(dev);
92};
93
94/**
95 * i2o_block_remove - remove the I2O Block device from the system again
96 * @dev: I2O Block device which should be removed
97 *
98 * Remove gendisk from system and free all allocated memory.
99 *
100 * Always returns 0.
101 */
102static int i2o_block_remove(struct device *dev)
103{
104 struct i2o_device *i2o_dev = to_i2o_device(dev);
105 struct i2o_block_device *i2o_blk_dev = dev_get_drvdata(dev);
106
107 osm_info("Device removed %s\n", i2o_blk_dev->gd->disk_name);
108
109 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0);
110
111 del_gendisk(i2o_blk_dev->gd);
112
113 dev_set_drvdata(dev, NULL);
114
115 i2o_device_claim_release(i2o_dev);
116
117 i2o_block_device_free(i2o_blk_dev);
118
119 return 0;
120};
121
122/**
123 * i2o_block_device flush - Flush all dirty data of I2O device dev
124 * @dev: I2O device which should be flushed
125 *
126 * Flushes all dirty data on device dev.
127 *
128 * Returns 0 on success or negative error code on failure.
129 */
130static int i2o_block_device_flush(struct i2o_device *dev)
131{
132 struct i2o_message __iomem *msg;
133 u32 m;
134
135 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
136 if (m == I2O_QUEUE_EMPTY)
137 return -ETIMEDOUT;
138
139 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
140 writel(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->lct_data.tid,
141 &msg->u.head[1]);
142 writel(60 << 16, &msg->body[0]);
143 osm_debug("Flushing...\n");
144
145 return i2o_msg_post_wait(dev->iop, m, 60);
146};
147
148/**
149 * i2o_block_device_mount - Mount (load) the media of device dev
150 * @dev: I2O device which should receive the mount request
151 * @media_id: Media Identifier
152 *
153 * Load a media into drive. Identifier should be set to -1, because the
154 * spec does not support any other value.
155 *
156 * Returns 0 on success or negative error code on failure.
157 */
158static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id)
159{
160 struct i2o_message __iomem *msg;
161 u32 m;
162
163 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
164 if (m == I2O_QUEUE_EMPTY)
165 return -ETIMEDOUT;
166
167 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
168 writel(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->lct_data.tid,
169 &msg->u.head[1]);
170 writel(-1, &msg->body[0]);
171 writel(0, &msg->body[1]);
172 osm_debug("Mounting...\n");
173
174 return i2o_msg_post_wait(dev->iop, m, 2);
175};
176
177/**
178 * i2o_block_device_lock - Locks the media of device dev
179 * @dev: I2O device which should receive the lock request
180 * @media_id: Media Identifier
181 *
182 * Lock media of device dev to prevent removal. The media identifier
183 * should be set to -1, because the spec does not support any other value.
184 *
185 * Returns 0 on success or negative error code on failure.
186 */
187static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
188{
189 struct i2o_message __iomem *msg;
190 u32 m;
191
192 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
193 if (m == I2O_QUEUE_EMPTY)
194 return -ETIMEDOUT;
195
196 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
197 writel(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid,
198 &msg->u.head[1]);
199 writel(-1, &msg->body[0]);
200 osm_debug("Locking...\n");
201
202 return i2o_msg_post_wait(dev->iop, m, 2);
203};
204
205/**
206 * i2o_block_device_unlock - Unlocks the media of device dev
207 * @dev: I2O device which should receive the unlocked request
208 * @media_id: Media Identifier
209 *
210 * Unlocks the media in device dev. The media identifier should be set to
211 * -1, because the spec does not support any other value.
212 *
213 * Returns 0 on success or negative error code on failure.
214 */
215static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id)
216{
217 struct i2o_message __iomem *msg;
218 u32 m;
219
220 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
221 if (m == I2O_QUEUE_EMPTY)
222 return -ETIMEDOUT;
223
224 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
225 writel(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid,
226 &msg->u.head[1]);
227 writel(media_id, &msg->body[0]);
228 osm_debug("Unlocking...\n");
229
230 return i2o_msg_post_wait(dev->iop, m, 2);
231};
232
233/**
234 * i2o_block_device_power - Power management for device dev
235 * @dev: I2O device which should receive the power management request
236 * @operation: Operation which should be send
237 *
238 * Send a power management request to the device dev.
239 *
240 * Returns 0 on success or negative error code on failure.
241 */
242static int i2o_block_device_power(struct i2o_block_device *dev, u8 op)
243{
244 struct i2o_device *i2o_dev = dev->i2o_dev;
245 struct i2o_controller *c = i2o_dev->iop;
246 struct i2o_message __iomem *msg;
247 u32 m;
248 int rc;
249
250 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
251 if (m == I2O_QUEUE_EMPTY)
252 return -ETIMEDOUT;
253
254 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
255 writel(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->lct_data.
256 tid, &msg->u.head[1]);
257 writel(op << 24, &msg->body[0]);
258 osm_debug("Power...\n");
259
260 rc = i2o_msg_post_wait(c, m, 60);
261 if (!rc)
262 dev->power = op;
263
264 return rc;
265};
266
267/**
268 * i2o_block_request_alloc - Allocate an I2O block request struct
269 *
270 * Allocates an I2O block request struct and initialize the list.
271 *
272 * Returns a i2o_block_request pointer on success or negative error code
273 * on failure.
274 */
275static inline struct i2o_block_request *i2o_block_request_alloc(void)
276{
277 struct i2o_block_request *ireq;
278
279 ireq = mempool_alloc(i2o_blk_req_pool.pool, GFP_ATOMIC);
280 if (!ireq)
281 return ERR_PTR(-ENOMEM);
282
283 INIT_LIST_HEAD(&ireq->queue);
284
285 return ireq;
286};
287
288/**
289 * i2o_block_request_free - Frees a I2O block request
290 * @ireq: I2O block request which should be freed
291 *
292 * Fres the allocated memory (give it back to the request mempool).
293 */
294static inline void i2o_block_request_free(struct i2o_block_request *ireq)
295{
296 mempool_free(ireq, i2o_blk_req_pool.pool);
297};
298
299/**
300 * i2o_block_sglist_alloc - Allocate the SG list and map it
301 * @ireq: I2O block request
302 *
303 * Builds the SG list and map it into to be accessable by the controller.
304 *
305 * Returns the number of elements in the SG list or 0 on failure.
306 */
307static inline int i2o_block_sglist_alloc(struct i2o_block_request *ireq)
308{
309 struct device *dev = &ireq->i2o_blk_dev->i2o_dev->iop->pdev->dev;
310 int nents;
311
312 nents = blk_rq_map_sg(ireq->req->q, ireq->req, ireq->sg_table);
313
314 if (rq_data_dir(ireq->req) == READ)
315 ireq->sg_dma_direction = PCI_DMA_FROMDEVICE;
316 else
317 ireq->sg_dma_direction = PCI_DMA_TODEVICE;
318
319 ireq->sg_nents = dma_map_sg(dev, ireq->sg_table, nents,
320 ireq->sg_dma_direction);
321
322 return ireq->sg_nents;
323};
324
325/**
326 * i2o_block_sglist_free - Frees the SG list
327 * @ireq: I2O block request from which the SG should be freed
328 *
329 * Frees the SG list from the I2O block request.
330 */
331static inline void i2o_block_sglist_free(struct i2o_block_request *ireq)
332{
333 struct device *dev = &ireq->i2o_blk_dev->i2o_dev->iop->pdev->dev;
334
335 dma_unmap_sg(dev, ireq->sg_table, ireq->sg_nents,
336 ireq->sg_dma_direction);
337};
338
339/**
340 * i2o_block_prep_req_fn - Allocates I2O block device specific struct
341 * @q: request queue for the request
342 * @req: the request to prepare
343 *
344 * Allocate the necessary i2o_block_request struct and connect it to
345 * the request. This is needed that we not loose the SG list later on.
346 *
347 * Returns BLKPREP_OK on success or BLKPREP_DEFER on failure.
348 */
349static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req)
350{
351 struct i2o_block_device *i2o_blk_dev = q->queuedata;
352 struct i2o_block_request *ireq;
353
354 /* request is already processed by us, so return */
355 if (req->flags & REQ_SPECIAL) {
356 osm_debug("REQ_SPECIAL already set!\n");
357 req->flags |= REQ_DONTPREP;
358 return BLKPREP_OK;
359 }
360
361 /* connect the i2o_block_request to the request */
362 if (!req->special) {
363 ireq = i2o_block_request_alloc();
364 if (unlikely(IS_ERR(ireq))) {
365 osm_debug("unable to allocate i2o_block_request!\n");
366 return BLKPREP_DEFER;
367 }
368
369 ireq->i2o_blk_dev = i2o_blk_dev;
370 req->special = ireq;
371 ireq->req = req;
372 } else
373 ireq = req->special;
374
375 /* do not come back here */
376 req->flags |= REQ_DONTPREP | REQ_SPECIAL;
377
378 return BLKPREP_OK;
379};
380
381/**
382 * i2o_block_delayed_request_fn - delayed request queue function
383 * delayed_request: the delayed request with the queue to start
384 *
385 * If the request queue is stopped for a disk, and there is no open
386 * request, a new event is created, which calls this function to start
387 * the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never
388 * be started again.
389 */
390static void i2o_block_delayed_request_fn(void *delayed_request)
391{
392 struct i2o_block_delayed_request *dreq = delayed_request;
393 struct request_queue *q = dreq->queue;
394 unsigned long flags;
395
396 spin_lock_irqsave(q->queue_lock, flags);
397 blk_start_queue(q);
398 spin_unlock_irqrestore(q->queue_lock, flags);
399 kfree(dreq);
400};
401
402/**
403 * i2o_block_reply - Block OSM reply handler.
404 * @c: I2O controller from which the message arrives
405 * @m: message id of reply
406 * qmsg: the actuall I2O message reply
407 *
408 * This function gets all the message replies.
409 *
410 */
411static int i2o_block_reply(struct i2o_controller *c, u32 m,
412 struct i2o_message *msg)
413{
414 struct i2o_block_request *ireq;
415 struct request *req;
416 struct i2o_block_device *dev;
417 struct request_queue *q;
418 u8 st;
419 unsigned long flags;
420
421 /* FAILed message */
422 if (unlikely(le32_to_cpu(msg->u.head[0]) & (1 << 13))) {
423 struct i2o_message *pmsg;
424 u32 pm;
425
426 /*
427 * FAILed message from controller
428 * We increment the error count and abort it
429 *
430 * In theory this will never happen. The I2O block class
431 * specification states that block devices never return
432 * FAILs but instead use the REQ status field...but
433 * better be on the safe side since no one really follows
434 * the spec to the book :)
435 */
436 pm = le32_to_cpu(msg->body[3]);
437 pmsg = i2o_msg_in_to_virt(c, pm);
438
439 req = i2o_cntxt_list_get(c, le32_to_cpu(pmsg->u.s.tcntxt));
440 if (unlikely(!req)) {
441 osm_err("NULL reply received!\n");
442 return -1;
443 }
444
445 ireq = req->special;
446 dev = ireq->i2o_blk_dev;
447 q = dev->gd->queue;
448
449 req->errors++;
450
451 spin_lock_irqsave(q->queue_lock, flags);
452
453 while (end_that_request_chunk(req, !req->errors,
454 le32_to_cpu(pmsg->body[1]))) ;
455 end_that_request_last(req);
456
457 dev->open_queue_depth--;
458 list_del(&ireq->queue);
459 blk_start_queue(q);
460
461 spin_unlock_irqrestore(q->queue_lock, flags);
462
463 /* Now flush the message by making it a NOP */
464 i2o_msg_nop(c, pm);
465
466 return -1;
467 }
468
469 req = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt));
470 if (unlikely(!req)) {
471 osm_err("NULL reply received!\n");
472 return -1;
473 }
474
475 ireq = req->special;
476 dev = ireq->i2o_blk_dev;
477 q = dev->gd->queue;
478
479 if (unlikely(!dev->i2o_dev)) {
480 /*
481 * This is HACK, but Intel Integrated RAID allows user
482 * to delete a volume that is claimed, locked, and in use
483 * by the OS. We have to check for a reply from a
484 * non-existent device and flag it as an error or the system
485 * goes kaput...
486 */
487 req->errors++;
488 osm_warn("Data transfer to deleted device!\n");
489 spin_lock_irqsave(q->queue_lock, flags);
490 while (end_that_request_chunk
491 (req, !req->errors, le32_to_cpu(msg->body[1]))) ;
492 end_that_request_last(req);
493
494 dev->open_queue_depth--;
495 list_del(&ireq->queue);
496 blk_start_queue(q);
497
498 spin_unlock_irqrestore(q->queue_lock, flags);
499 return -1;
500 }
501
502 /*
503 * Lets see what is cooking. We stuffed the
504 * request in the context.
505 */
506
507 st = le32_to_cpu(msg->body[0]) >> 24;
508
509 if (st != 0) {
510 int err;
511 char *bsa_errors[] = {
512 "Success",
513 "Media Error",
514 "Failure communicating to device",
515 "Device Failure",
516 "Device is not ready",
517 "Media not present",
518 "Media is locked by another user",
519 "Media has failed",
520 "Failure communicating to device",
521 "Device bus failure",
522 "Device is locked by another user",
523 "Device is write protected",
524 "Device has reset",
525 "Volume has changed, waiting for acknowledgement"
526 };
527
528 err = le32_to_cpu(msg->body[0]) & 0xffff;
529
530 /*
531 * Device not ready means two things. One is that the
532 * the thing went offline (but not a removal media)
533 *
534 * The second is that you have a SuperTrak 100 and the
535 * firmware got constipated. Unlike standard i2o card
536 * setups the supertrak returns an error rather than
537 * blocking for the timeout in these cases.
538 *
539 * Don't stick a supertrak100 into cache aggressive modes
540 */
541
542 osm_err("block-osm: /dev/%s error: %s", dev->gd->disk_name,
543 bsa_errors[le32_to_cpu(msg->body[0]) & 0xffff]);
544 if (le32_to_cpu(msg->body[0]) & 0x00ff0000)
545 printk(KERN_ERR " - DDM attempted %d retries",
546 (le32_to_cpu(msg->body[0]) >> 16) & 0x00ff);
547 printk(KERN_ERR ".\n");
548 req->errors++;
549 } else
550 req->errors = 0;
551
552 if (!end_that_request_chunk
553 (req, !req->errors, le32_to_cpu(msg->body[1]))) {
554 add_disk_randomness(req->rq_disk);
555 spin_lock_irqsave(q->queue_lock, flags);
556
557 end_that_request_last(req);
558
559 dev->open_queue_depth--;
560 list_del(&ireq->queue);
561 blk_start_queue(q);
562
563 spin_unlock_irqrestore(q->queue_lock, flags);
564
565 i2o_block_sglist_free(ireq);
566 i2o_block_request_free(ireq);
567 } else
568 osm_err("still remaining chunks\n");
569
570 return 1;
571};
572
573static void i2o_block_event(struct i2o_event *evt)
574{
575 osm_info("block-osm: event received\n");
576};
577
578/*
579 * SCSI-CAM for ioctl geometry mapping
580 * Duplicated with SCSI - this should be moved into somewhere common
581 * perhaps genhd ?
582 *
583 * LBA -> CHS mapping table taken from:
584 *
585 * "Incorporating the I2O Architecture into BIOS for Intel Architecture
586 * Platforms"
587 *
588 * This is an I2O document that is only available to I2O members,
589 * not developers.
590 *
591 * From my understanding, this is how all the I2O cards do this
592 *
593 * Disk Size | Sectors | Heads | Cylinders
594 * ---------------+---------+-------+-------------------
595 * 1 < X <= 528M | 63 | 16 | X/(63 * 16 * 512)
596 * 528M < X <= 1G | 63 | 32 | X/(63 * 32 * 512)
597 * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512)
598 * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512)
599 *
600 */
601#define BLOCK_SIZE_528M 1081344
602#define BLOCK_SIZE_1G 2097152
603#define BLOCK_SIZE_21G 4403200
604#define BLOCK_SIZE_42G 8806400
605#define BLOCK_SIZE_84G 17612800
606
607static void i2o_block_biosparam(unsigned long capacity, unsigned short *cyls,
608 unsigned char *hds, unsigned char *secs)
609{
610 unsigned long heads, sectors, cylinders;
611
612 sectors = 63L; /* Maximize sectors per track */
613 if (capacity <= BLOCK_SIZE_528M)
614 heads = 16;
615 else if (capacity <= BLOCK_SIZE_1G)
616 heads = 32;
617 else if (capacity <= BLOCK_SIZE_21G)
618 heads = 64;
619 else if (capacity <= BLOCK_SIZE_42G)
620 heads = 128;
621 else
622 heads = 255;
623
624 cylinders = (unsigned long)capacity / (heads * sectors);
625
626 *cyls = (unsigned short)cylinders; /* Stuff return values */
627 *secs = (unsigned char)sectors;
628 *hds = (unsigned char)heads;
629}
630
631/**
632 * i2o_block_open - Open the block device
633 *
634 * Power up the device, mount and lock the media. This function is called,
635 * if the block device is opened for access.
636 *
637 * Returns 0 on success or negative error code on failure.
638 */
639static int i2o_block_open(struct inode *inode, struct file *file)
640{
641 struct i2o_block_device *dev = inode->i_bdev->bd_disk->private_data;
642
643 if (!dev->i2o_dev)
644 return -ENODEV;
645
646 if (dev->power > 0x1f)
647 i2o_block_device_power(dev, 0x02);
648
649 i2o_block_device_mount(dev->i2o_dev, -1);
650
651 i2o_block_device_lock(dev->i2o_dev, -1);
652
653 osm_debug("Ready.\n");
654
655 return 0;
656};
657
658/**
659 * i2o_block_release - Release the I2O block device
660 *
661 * Unlock and unmount the media, and power down the device. Gets called if
662 * the block device is closed.
663 *
664 * Returns 0 on success or negative error code on failure.
665 */
666static int i2o_block_release(struct inode *inode, struct file *file)
667{
668 struct gendisk *disk = inode->i_bdev->bd_disk;
669 struct i2o_block_device *dev = disk->private_data;
670 u8 operation;
671
672 /*
673 * This is to deail with the case of an application
674 * opening a device and then the device dissapears while
675 * it's in use, and then the application tries to release
676 * it. ex: Unmounting a deleted RAID volume at reboot.
677 * If we send messages, it will just cause FAILs since
678 * the TID no longer exists.
679 */
680 if (!dev->i2o_dev)
681 return 0;
682
683 i2o_block_device_flush(dev->i2o_dev);
684
685 i2o_block_device_unlock(dev->i2o_dev, -1);
686
687 if (dev->flags & (1 << 3 | 1 << 4)) /* Removable */
688 operation = 0x21;
689 else
690 operation = 0x24;
691
692 i2o_block_device_power(dev, operation);
693
694 return 0;
695}
696
697/**
698 * i2o_block_ioctl - Issue device specific ioctl calls.
699 * @cmd: ioctl command
700 * @arg: arg
701 *
702 * Handles ioctl request for the block device.
703 *
704 * Return 0 on success or negative error on failure.
705 */
706static int i2o_block_ioctl(struct inode *inode, struct file *file,
707 unsigned int cmd, unsigned long arg)
708{
709 struct gendisk *disk = inode->i_bdev->bd_disk;
710 struct i2o_block_device *dev = disk->private_data;
711 void __user *argp = (void __user *)arg;
712
713 /* Anyone capable of this syscall can do *real bad* things */
714
715 if (!capable(CAP_SYS_ADMIN))
716 return -EPERM;
717
718 switch (cmd) {
719 case HDIO_GETGEO:
720 {
721 struct hd_geometry g;
722 i2o_block_biosparam(get_capacity(disk),
723 &g.cylinders, &g.heads, &g.sectors);
724 g.start = get_start_sect(inode->i_bdev);
725 return copy_to_user(argp, &g, sizeof(g)) ? -EFAULT : 0;
726 }
727
728 case BLKI2OGRSTRAT:
729 return put_user(dev->rcache, (int __user *)arg);
730 case BLKI2OGWSTRAT:
731 return put_user(dev->wcache, (int __user *)arg);
732 case BLKI2OSRSTRAT:
733 if (arg < 0 || arg > CACHE_SMARTFETCH)
734 return -EINVAL;
735 dev->rcache = arg;
736 break;
737 case BLKI2OSWSTRAT:
738 if (arg != 0
739 && (arg < CACHE_WRITETHROUGH || arg > CACHE_SMARTBACK))
740 return -EINVAL;
741 dev->wcache = arg;
742 break;
743 }
744 return -ENOTTY;
745};
746
747/**
748 * i2o_block_media_changed - Have we seen a media change?
749 * @disk: gendisk which should be verified
750 *
751 * Verifies if the media has changed.
752 *
753 * Returns 1 if the media was changed or 0 otherwise.
754 */
755static int i2o_block_media_changed(struct gendisk *disk)
756{
757 struct i2o_block_device *p = disk->private_data;
758
759 if (p->media_change_flag) {
760 p->media_change_flag = 0;
761 return 1;
762 }
763 return 0;
764}
765
766/**
767 * i2o_block_transfer - Transfer a request to/from the I2O controller
768 * @req: the request which should be transfered
769 *
770 * This function converts the request into a I2O message. The necessary
771 * DMA buffers are allocated and after everything is setup post the message
772 * to the I2O controller. No cleanup is done by this function. It is done
773 * on the interrupt side when the reply arrives.
774 *
775 * Return 0 on success or negative error code on failure.
776 */
777static int i2o_block_transfer(struct request *req)
778{
779 struct i2o_block_device *dev = req->rq_disk->private_data;
780 struct i2o_controller *c = dev->i2o_dev->iop;
781 int tid = dev->i2o_dev->lct_data.tid;
782 struct i2o_message __iomem *msg;
783 void __iomem *mptr;
784 struct i2o_block_request *ireq = req->special;
785 struct scatterlist *sg;
786 int sgnum;
787 int i;
788 u32 m;
789 u32 tcntxt;
790 u32 sg_flags;
791 int rc;
792
793 m = i2o_msg_get(c, &msg);
794 if (m == I2O_QUEUE_EMPTY) {
795 rc = -EBUSY;
796 goto exit;
797 }
798
799 tcntxt = i2o_cntxt_list_add(c, req);
800 if (!tcntxt) {
801 rc = -ENOMEM;
802 goto nop_msg;
803 }
804
805 if ((sgnum = i2o_block_sglist_alloc(ireq)) <= 0) {
806 rc = -ENOMEM;
807 goto context_remove;
808 }
809
810 /* Build the message based on the request. */
811 writel(i2o_block_driver.context, &msg->u.s.icntxt);
812 writel(tcntxt, &msg->u.s.tcntxt);
813 writel(req->nr_sectors << 9, &msg->body[1]);
814
815 writel((((u64) req->sector) << 9) & 0xffffffff, &msg->body[2]);
816 writel(req->sector >> 23, &msg->body[3]);
817
818 mptr = &msg->body[4];
819
820 sg = ireq->sg_table;
821
822 if (rq_data_dir(req) == READ) {
823 writel(I2O_CMD_BLOCK_READ << 24 | HOST_TID << 12 | tid,
824 &msg->u.head[1]);
825 sg_flags = 0x10000000;
826 switch (dev->rcache) {
827 case CACHE_NULL:
828 writel(0, &msg->body[0]);
829 break;
830 case CACHE_PREFETCH:
831 writel(0x201F0008, &msg->body[0]);
832 break;
833 case CACHE_SMARTFETCH:
834 if (req->nr_sectors > 16)
835 writel(0x201F0008, &msg->body[0]);
836 else
837 writel(0x001F0000, &msg->body[0]);
838 break;
839 }
840 } else {
841 writel(I2O_CMD_BLOCK_WRITE << 24 | HOST_TID << 12 | tid,
842 &msg->u.head[1]);
843 sg_flags = 0x14000000;
844 switch (dev->wcache) {
845 case CACHE_NULL:
846 writel(0, &msg->body[0]);
847 break;
848 case CACHE_WRITETHROUGH:
849 writel(0x001F0008, &msg->body[0]);
850 break;
851 case CACHE_WRITEBACK:
852 writel(0x001F0010, &msg->body[0]);
853 break;
854 case CACHE_SMARTBACK:
855 if (req->nr_sectors > 16)
856 writel(0x001F0004, &msg->body[0]);
857 else
858 writel(0x001F0010, &msg->body[0]);
859 break;
860 case CACHE_SMARTTHROUGH:
861 if (req->nr_sectors > 16)
862 writel(0x001F0004, &msg->body[0]);
863 else
864 writel(0x001F0010, &msg->body[0]);
865 }
866 }
867
868 for (i = sgnum; i > 0; i--) {
869 if (i == 1)
870 sg_flags |= 0x80000000;
871 writel(sg_flags | sg_dma_len(sg), mptr);
872 writel(sg_dma_address(sg), mptr + 4);
873 mptr += 8;
874 sg++;
875 }
876
877 writel(I2O_MESSAGE_SIZE
878 (((unsigned long)mptr -
879 (unsigned long)&msg->u.head[0]) >> 2) | SGL_OFFSET_8,
880 &msg->u.head[0]);
881
882 list_add_tail(&ireq->queue, &dev->open_queue);
883 dev->open_queue_depth++;
884
885 i2o_msg_post(c, m);
886
887 return 0;
888
889 context_remove:
890 i2o_cntxt_list_remove(c, req);
891
892 nop_msg:
893 i2o_msg_nop(c, m);
894
895 exit:
896 return rc;
897};
898
899/**
900 * i2o_block_request_fn - request queue handling function
901 * q: request queue from which the request could be fetched
902 *
903 * Takes the next request from the queue, transfers it and if no error
904 * occurs dequeue it from the queue. On arrival of the reply the message
905 * will be processed further. If an error occurs requeue the request.
906 */
907static void i2o_block_request_fn(struct request_queue *q)
908{
909 struct request *req;
910
911 while (!blk_queue_plugged(q)) {
912 req = elv_next_request(q);
913 if (!req)
914 break;
915
916 if (blk_fs_request(req)) {
917 struct i2o_block_delayed_request *dreq;
918 struct i2o_block_request *ireq = req->special;
919 unsigned int queue_depth;
920
921 queue_depth = ireq->i2o_blk_dev->open_queue_depth;
922
923 if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS)
924 if (!i2o_block_transfer(req)) {
925 blkdev_dequeue_request(req);
926 continue;
927 }
928
929 if (queue_depth)
930 break;
931
932 /* stop the queue and retry later */
933 dreq = kmalloc(sizeof(*dreq), GFP_ATOMIC);
934 if (!dreq)
935 continue;
936
937 dreq->queue = q;
938 INIT_WORK(&dreq->work, i2o_block_delayed_request_fn,
939 dreq);
940
941 osm_info("transfer error\n");
942 if (!queue_delayed_work(i2o_block_driver.event_queue,
943 &dreq->work,
944 I2O_BLOCK_RETRY_TIME))
945 kfree(dreq);
946 else {
947 blk_stop_queue(q);
948 break;
949 }
950 } else
951 end_request(req, 0);
952 }
953};
954
955/* I2O Block device operations definition */
956static struct block_device_operations i2o_block_fops = {
957 .owner = THIS_MODULE,
958 .open = i2o_block_open,
959 .release = i2o_block_release,
960 .ioctl = i2o_block_ioctl,
961 .media_changed = i2o_block_media_changed
962};
963
964/**
965 * i2o_block_device_alloc - Allocate memory for a I2O Block device
966 *
967 * Allocate memory for the i2o_block_device struct, gendisk and request
968 * queue and initialize them as far as no additional information is needed.
969 *
970 * Returns a pointer to the allocated I2O Block device on succes or a
971 * negative error code on failure.
972 */
973static struct i2o_block_device *i2o_block_device_alloc(void)
974{
975 struct i2o_block_device *dev;
976 struct gendisk *gd;
977 struct request_queue *queue;
978 int rc;
979
980 dev = kmalloc(sizeof(*dev), GFP_KERNEL);
981 if (!dev) {
982 osm_err("Insufficient memory to allocate I2O Block disk.\n");
983 rc = -ENOMEM;
984 goto exit;
985 }
986 memset(dev, 0, sizeof(*dev));
987
988 INIT_LIST_HEAD(&dev->open_queue);
989 spin_lock_init(&dev->lock);
990 dev->rcache = CACHE_PREFETCH;
991 dev->wcache = CACHE_WRITEBACK;
992
993 /* allocate a gendisk with 16 partitions */
994 gd = alloc_disk(16);
995 if (!gd) {
996 osm_err("Insufficient memory to allocate gendisk.\n");
997 rc = -ENOMEM;
998 goto cleanup_dev;
999 }
1000
1001 /* initialize the request queue */
1002 queue = blk_init_queue(i2o_block_request_fn, &dev->lock);
1003 if (!queue) {
1004 osm_err("Insufficient memory to allocate request queue.\n");
1005 rc = -ENOMEM;
1006 goto cleanup_queue;
1007 }
1008
1009 blk_queue_prep_rq(queue, i2o_block_prep_req_fn);
1010
1011 gd->major = I2O_MAJOR;
1012 gd->queue = queue;
1013 gd->fops = &i2o_block_fops;
1014 gd->private_data = dev;
1015
1016 dev->gd = gd;
1017
1018 return dev;
1019
1020 cleanup_queue:
1021 put_disk(gd);
1022
1023 cleanup_dev:
1024 kfree(dev);
1025
1026 exit:
1027 return ERR_PTR(rc);
1028};
1029
1030/**
1031 * i2o_block_probe - verify if dev is a I2O Block device and install it
1032 * @dev: device to verify if it is a I2O Block device
1033 *
1034 * We only verify if the user_tid of the device is 0xfff and then install
1035 * the device. Otherwise it is used by some other device (e. g. RAID).
1036 *
1037 * Returns 0 on success or negative error code on failure.
1038 */
1039static int i2o_block_probe(struct device *dev)
1040{
1041 struct i2o_device *i2o_dev = to_i2o_device(dev);
1042 struct i2o_block_device *i2o_blk_dev;
1043 struct i2o_controller *c = i2o_dev->iop;
1044 struct gendisk *gd;
1045 struct request_queue *queue;
1046 static int unit = 0;
1047 int rc;
1048 u64 size;
1049 u32 blocksize;
1050 u16 power;
1051 u32 flags, status;
1052 int segments;
1053
1054 /* skip devices which are used by IOP */
1055 if (i2o_dev->lct_data.user_tid != 0xfff) {
1056 osm_debug("skipping used device %03x\n", i2o_dev->lct_data.tid);
1057 return -ENODEV;
1058 }
1059
1060 osm_info("New device detected (TID: %03x)\n", i2o_dev->lct_data.tid);
1061
1062 if (i2o_device_claim(i2o_dev)) {
1063 osm_warn("Unable to claim device. Installation aborted\n");
1064 rc = -EFAULT;
1065 goto exit;
1066 }
1067
1068 i2o_blk_dev = i2o_block_device_alloc();
1069 if (IS_ERR(i2o_blk_dev)) {
1070 osm_err("could not alloc a new I2O block device");
1071 rc = PTR_ERR(i2o_blk_dev);
1072 goto claim_release;
1073 }
1074
1075 i2o_blk_dev->i2o_dev = i2o_dev;
1076 dev_set_drvdata(dev, i2o_blk_dev);
1077
1078 /* setup gendisk */
1079 gd = i2o_blk_dev->gd;
1080 gd->first_minor = unit << 4;
1081 sprintf(gd->disk_name, "i2o/hd%c", 'a' + unit);
1082 sprintf(gd->devfs_name, "i2o/hd%c", 'a' + unit);
1083 gd->driverfs_dev = &i2o_dev->device;
1084
1085 /* setup request queue */
1086 queue = gd->queue;
1087 queue->queuedata = i2o_blk_dev;
1088
1089 blk_queue_max_phys_segments(queue, I2O_MAX_SEGMENTS);
1090 blk_queue_max_sectors(queue, I2O_MAX_SECTORS);
1091
1092 if (c->short_req)
1093 segments = 8;
1094 else {
1095 i2o_status_block *sb;
1096
1097 sb = c->status_block.virt;
1098
1099 segments = (sb->inbound_frame_size -
1100 sizeof(struct i2o_message) / 4 - 4) / 2;
1101 }
1102
1103 blk_queue_max_hw_segments(queue, segments);
1104
1105 osm_debug("max sectors = %d\n", I2O_MAX_SECTORS);
1106 osm_debug("phys segments = %d\n", I2O_MAX_SEGMENTS);
1107 osm_debug("hw segments = %d\n", segments);
1108
1109 /*
1110 * Ask for the current media data. If that isn't supported
1111 * then we ask for the device capacity data
1112 */
1113 if (i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) != 0
1114 || i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) != 0) {
1115 i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4);
1116 i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8);
1117 }
1118 osm_debug("blocksize = %d\n", blocksize);
1119
1120 if (i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2))
1121 power = 0;
1122 i2o_parm_field_get(i2o_dev, 0x0000, 5, &flags, 4);
1123 i2o_parm_field_get(i2o_dev, 0x0000, 6, &status, 4);
1124
1125 set_capacity(gd, size >> 9);
1126
1127 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff);
1128
1129 add_disk(gd);
1130
1131 unit++;
1132
1133 return 0;
1134
1135 claim_release:
1136 i2o_device_claim_release(i2o_dev);
1137
1138 exit:
1139 return rc;
1140};
1141
1142/* Block OSM driver struct */
1143static struct i2o_driver i2o_block_driver = {
1144 .name = OSM_NAME,
1145 .event = i2o_block_event,
1146 .reply = i2o_block_reply,
1147 .classes = i2o_block_class_id,
1148 .driver = {
1149 .probe = i2o_block_probe,
1150 .remove = i2o_block_remove,
1151 },
1152};
1153
1154/**
1155 * i2o_block_init - Block OSM initialization function
1156 *
1157 * Allocate the slab and mempool for request structs, registers i2o_block
1158 * block device and finally register the Block OSM in the I2O core.
1159 *
1160 * Returns 0 on success or negative error code on failure.
1161 */
1162static int __init i2o_block_init(void)
1163{
1164 int rc;
1165 int size;
1166
1167 printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
1168
1169 /* Allocate request mempool and slab */
1170 size = sizeof(struct i2o_block_request);
1171 i2o_blk_req_pool.slab = kmem_cache_create("i2o_block_req", size, 0,
1172 SLAB_HWCACHE_ALIGN, NULL,
1173 NULL);
1174 if (!i2o_blk_req_pool.slab) {
1175 osm_err("can't init request slab\n");
1176 rc = -ENOMEM;
1177 goto exit;
1178 }
1179
1180 i2o_blk_req_pool.pool = mempool_create(I2O_REQ_MEMPOOL_SIZE,
1181 mempool_alloc_slab,
1182 mempool_free_slab,
1183 i2o_blk_req_pool.slab);
1184 if (!i2o_blk_req_pool.pool) {
1185 osm_err("can't init request mempool\n");
1186 rc = -ENOMEM;
1187 goto free_slab;
1188 }
1189
1190 /* Register the block device interfaces */
1191 rc = register_blkdev(I2O_MAJOR, "i2o_block");
1192 if (rc) {
1193 osm_err("unable to register block device\n");
1194 goto free_mempool;
1195 }
1196#ifdef MODULE
1197 osm_info("registered device at major %d\n", I2O_MAJOR);
1198#endif
1199
1200 /* Register Block OSM into I2O core */
1201 rc = i2o_driver_register(&i2o_block_driver);
1202 if (rc) {
1203 osm_err("Could not register Block driver\n");
1204 goto unregister_blkdev;
1205 }
1206
1207 return 0;
1208
1209 unregister_blkdev:
1210 unregister_blkdev(I2O_MAJOR, "i2o_block");
1211
1212 free_mempool:
1213 mempool_destroy(i2o_blk_req_pool.pool);
1214
1215 free_slab:
1216 kmem_cache_destroy(i2o_blk_req_pool.slab);
1217
1218 exit:
1219 return rc;
1220};
1221
1222/**
1223 * i2o_block_exit - Block OSM exit function
1224 *
1225 * Unregisters Block OSM from I2O core, unregisters i2o_block block device
1226 * and frees the mempool and slab.
1227 */
1228static void __exit i2o_block_exit(void)
1229{
1230 /* Unregister I2O Block OSM from I2O core */
1231 i2o_driver_unregister(&i2o_block_driver);
1232
1233 /* Unregister block device */
1234 unregister_blkdev(I2O_MAJOR, "i2o_block");
1235
1236 /* Free request mempool and slab */
1237 mempool_destroy(i2o_blk_req_pool.pool);
1238 kmem_cache_destroy(i2o_blk_req_pool.slab);
1239};
1240
1241MODULE_AUTHOR("Red Hat");
1242MODULE_LICENSE("GPL");
1243MODULE_DESCRIPTION(OSM_DESCRIPTION);
1244MODULE_VERSION(OSM_VERSION);
1245
1246module_init(i2o_block_init);
1247module_exit(i2o_block_exit);