diff options
Diffstat (limited to 'drivers/message/i2o/i2o_block.c')
-rw-r--r-- | drivers/message/i2o/i2o_block.c | 277 |
1 files changed, 175 insertions, 102 deletions
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index e69421e36ac5..1dd2b9dad50e 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c | |||
@@ -147,6 +147,29 @@ static int i2o_block_device_flush(struct i2o_device *dev) | |||
147 | }; | 147 | }; |
148 | 148 | ||
149 | /** | 149 | /** |
150 | * i2o_block_issue_flush - device-flush interface for block-layer | ||
151 | * @queue: the request queue of the device which should be flushed | ||
152 | * @disk: gendisk | ||
153 | * @error_sector: error offset | ||
154 | * | ||
155 | * Helper function to provide flush functionality to block-layer. | ||
156 | * | ||
157 | * Returns 0 on success or negative error code on failure. | ||
158 | */ | ||
159 | |||
160 | static int i2o_block_issue_flush(request_queue_t * queue, struct gendisk *disk, | ||
161 | sector_t * error_sector) | ||
162 | { | ||
163 | struct i2o_block_device *i2o_blk_dev = queue->queuedata; | ||
164 | int rc = -ENODEV; | ||
165 | |||
166 | if (likely(i2o_blk_dev)) | ||
167 | rc = i2o_block_device_flush(i2o_blk_dev->i2o_dev); | ||
168 | |||
169 | return rc; | ||
170 | } | ||
171 | |||
172 | /** | ||
150 | * i2o_block_device_mount - Mount (load) the media of device dev | 173 | * i2o_block_device_mount - Mount (load) the media of device dev |
151 | * @dev: I2O device which should receive the mount request | 174 | * @dev: I2O device which should receive the mount request |
152 | * @media_id: Media Identifier | 175 | * @media_id: Media Identifier |
@@ -299,28 +322,31 @@ static inline void i2o_block_request_free(struct i2o_block_request *ireq) | |||
299 | 322 | ||
300 | /** | 323 | /** |
301 | * i2o_block_sglist_alloc - Allocate the SG list and map it | 324 | * i2o_block_sglist_alloc - Allocate the SG list and map it |
325 | * @c: I2O controller to which the request belongs | ||
302 | * @ireq: I2O block request | 326 | * @ireq: I2O block request |
303 | * | 327 | * |
304 | * Builds the SG list and map it into to be accessable by the controller. | 328 | * Builds the SG list and map it to be accessable by the controller. |
305 | * | 329 | * |
306 | * Returns the number of elements in the SG list or 0 on failure. | 330 | * Returns 0 on failure or 1 on success. |
307 | */ | 331 | */ |
308 | static inline int i2o_block_sglist_alloc(struct i2o_block_request *ireq) | 332 | static inline int i2o_block_sglist_alloc(struct i2o_controller *c, |
333 | struct i2o_block_request *ireq, | ||
334 | u32 __iomem ** mptr) | ||
309 | { | 335 | { |
310 | struct device *dev = &ireq->i2o_blk_dev->i2o_dev->iop->pdev->dev; | ||
311 | int nents; | 336 | int nents; |
337 | enum dma_data_direction direction; | ||
312 | 338 | ||
339 | ireq->dev = &c->pdev->dev; | ||
313 | nents = blk_rq_map_sg(ireq->req->q, ireq->req, ireq->sg_table); | 340 | nents = blk_rq_map_sg(ireq->req->q, ireq->req, ireq->sg_table); |
314 | 341 | ||
315 | if (rq_data_dir(ireq->req) == READ) | 342 | if (rq_data_dir(ireq->req) == READ) |
316 | ireq->sg_dma_direction = PCI_DMA_FROMDEVICE; | 343 | direction = PCI_DMA_FROMDEVICE; |
317 | else | 344 | else |
318 | ireq->sg_dma_direction = PCI_DMA_TODEVICE; | 345 | direction = PCI_DMA_TODEVICE; |
319 | 346 | ||
320 | ireq->sg_nents = dma_map_sg(dev, ireq->sg_table, nents, | 347 | ireq->sg_nents = nents; |
321 | ireq->sg_dma_direction); | ||
322 | 348 | ||
323 | return ireq->sg_nents; | 349 | return i2o_dma_map_sg(c, ireq->sg_table, nents, direction, mptr); |
324 | }; | 350 | }; |
325 | 351 | ||
326 | /** | 352 | /** |
@@ -331,10 +357,14 @@ static inline int i2o_block_sglist_alloc(struct i2o_block_request *ireq) | |||
331 | */ | 357 | */ |
332 | static inline void i2o_block_sglist_free(struct i2o_block_request *ireq) | 358 | static inline void i2o_block_sglist_free(struct i2o_block_request *ireq) |
333 | { | 359 | { |
334 | struct device *dev = &ireq->i2o_blk_dev->i2o_dev->iop->pdev->dev; | 360 | enum dma_data_direction direction; |
335 | 361 | ||
336 | dma_unmap_sg(dev, ireq->sg_table, ireq->sg_nents, | 362 | if (rq_data_dir(ireq->req) == READ) |
337 | ireq->sg_dma_direction); | 363 | direction = PCI_DMA_FROMDEVICE; |
364 | else | ||
365 | direction = PCI_DMA_TODEVICE; | ||
366 | |||
367 | dma_unmap_sg(ireq->dev, ireq->sg_table, ireq->sg_nents, direction); | ||
338 | }; | 368 | }; |
339 | 369 | ||
340 | /** | 370 | /** |
@@ -352,6 +382,11 @@ static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req) | |||
352 | struct i2o_block_device *i2o_blk_dev = q->queuedata; | 382 | struct i2o_block_device *i2o_blk_dev = q->queuedata; |
353 | struct i2o_block_request *ireq; | 383 | struct i2o_block_request *ireq; |
354 | 384 | ||
385 | if (unlikely(!i2o_blk_dev)) { | ||
386 | osm_err("block device already removed\n"); | ||
387 | return BLKPREP_KILL; | ||
388 | } | ||
389 | |||
355 | /* request is already processed by us, so return */ | 390 | /* request is already processed by us, so return */ |
356 | if (req->flags & REQ_SPECIAL) { | 391 | if (req->flags & REQ_SPECIAL) { |
357 | osm_debug("REQ_SPECIAL already set!\n"); | 392 | osm_debug("REQ_SPECIAL already set!\n"); |
@@ -414,11 +449,11 @@ static void i2o_block_end_request(struct request *req, int uptodate, | |||
414 | { | 449 | { |
415 | struct i2o_block_request *ireq = req->special; | 450 | struct i2o_block_request *ireq = req->special; |
416 | struct i2o_block_device *dev = ireq->i2o_blk_dev; | 451 | struct i2o_block_device *dev = ireq->i2o_blk_dev; |
417 | request_queue_t *q = dev->gd->queue; | 452 | request_queue_t *q = req->q; |
418 | unsigned long flags; | 453 | unsigned long flags; |
419 | 454 | ||
420 | if (end_that_request_chunk(req, uptodate, nr_bytes)) { | 455 | if (end_that_request_chunk(req, uptodate, nr_bytes)) { |
421 | int leftover = (req->hard_nr_sectors << 9); | 456 | int leftover = (req->hard_nr_sectors << KERNEL_SECTOR_SHIFT); |
422 | 457 | ||
423 | if (blk_pc_request(req)) | 458 | if (blk_pc_request(req)) |
424 | leftover = req->data_len; | 459 | leftover = req->data_len; |
@@ -432,8 +467,11 @@ static void i2o_block_end_request(struct request *req, int uptodate, | |||
432 | spin_lock_irqsave(q->queue_lock, flags); | 467 | spin_lock_irqsave(q->queue_lock, flags); |
433 | 468 | ||
434 | end_that_request_last(req); | 469 | end_that_request_last(req); |
435 | dev->open_queue_depth--; | 470 | |
436 | list_del(&ireq->queue); | 471 | if (likely(dev)) { |
472 | dev->open_queue_depth--; | ||
473 | list_del(&ireq->queue); | ||
474 | } | ||
437 | 475 | ||
438 | blk_start_queue(q); | 476 | blk_start_queue(q); |
439 | 477 | ||
@@ -483,8 +521,8 @@ static int i2o_block_reply(struct i2o_controller *c, u32 m, | |||
483 | * Don't stick a supertrak100 into cache aggressive modes | 521 | * Don't stick a supertrak100 into cache aggressive modes |
484 | */ | 522 | */ |
485 | 523 | ||
486 | osm_err("%03x error status: %02x, detailed status: %04x\n", | 524 | osm_err("TID %03x error status: 0x%02x, detailed status: " |
487 | (le32_to_cpu(msg->u.head[1]) >> 12 & 0xfff), | 525 | "0x%04x\n", (le32_to_cpu(msg->u.head[1]) >> 12 & 0xfff), |
488 | status >> 24, status & 0xffff); | 526 | status >> 24, status & 0xffff); |
489 | 527 | ||
490 | req->errors++; | 528 | req->errors++; |
@@ -705,18 +743,25 @@ static int i2o_block_media_changed(struct gendisk *disk) | |||
705 | static int i2o_block_transfer(struct request *req) | 743 | static int i2o_block_transfer(struct request *req) |
706 | { | 744 | { |
707 | struct i2o_block_device *dev = req->rq_disk->private_data; | 745 | struct i2o_block_device *dev = req->rq_disk->private_data; |
708 | struct i2o_controller *c = dev->i2o_dev->iop; | 746 | struct i2o_controller *c; |
709 | int tid = dev->i2o_dev->lct_data.tid; | 747 | int tid = dev->i2o_dev->lct_data.tid; |
710 | struct i2o_message __iomem *msg; | 748 | struct i2o_message __iomem *msg; |
711 | void __iomem *mptr; | 749 | u32 __iomem *mptr; |
712 | struct i2o_block_request *ireq = req->special; | 750 | struct i2o_block_request *ireq = req->special; |
713 | struct scatterlist *sg; | ||
714 | int sgnum; | ||
715 | int i; | ||
716 | u32 m; | 751 | u32 m; |
717 | u32 tcntxt; | 752 | u32 tcntxt; |
718 | u32 sg_flags; | 753 | u32 sgl_offset = SGL_OFFSET_8; |
754 | u32 ctl_flags = 0x00000000; | ||
719 | int rc; | 755 | int rc; |
756 | u32 cmd; | ||
757 | |||
758 | if (unlikely(!dev->i2o_dev)) { | ||
759 | osm_err("transfer to removed drive\n"); | ||
760 | rc = -ENODEV; | ||
761 | goto exit; | ||
762 | } | ||
763 | |||
764 | c = dev->i2o_dev->iop; | ||
720 | 765 | ||
721 | m = i2o_msg_get(c, &msg); | 766 | m = i2o_msg_get(c, &msg); |
722 | if (m == I2O_QUEUE_EMPTY) { | 767 | if (m == I2O_QUEUE_EMPTY) { |
@@ -730,80 +775,109 @@ static int i2o_block_transfer(struct request *req) | |||
730 | goto nop_msg; | 775 | goto nop_msg; |
731 | } | 776 | } |
732 | 777 | ||
733 | if ((sgnum = i2o_block_sglist_alloc(ireq)) <= 0) { | ||
734 | rc = -ENOMEM; | ||
735 | goto context_remove; | ||
736 | } | ||
737 | |||
738 | /* Build the message based on the request. */ | ||
739 | writel(i2o_block_driver.context, &msg->u.s.icntxt); | 778 | writel(i2o_block_driver.context, &msg->u.s.icntxt); |
740 | writel(tcntxt, &msg->u.s.tcntxt); | 779 | writel(tcntxt, &msg->u.s.tcntxt); |
741 | writel(req->nr_sectors << 9, &msg->body[1]); | ||
742 | 780 | ||
743 | writel((((u64) req->sector) << 9) & 0xffffffff, &msg->body[2]); | 781 | mptr = &msg->body[0]; |
744 | writel(req->sector >> 23, &msg->body[3]); | ||
745 | |||
746 | mptr = &msg->body[4]; | ||
747 | |||
748 | sg = ireq->sg_table; | ||
749 | 782 | ||
750 | if (rq_data_dir(req) == READ) { | 783 | if (rq_data_dir(req) == READ) { |
751 | writel(I2O_CMD_BLOCK_READ << 24 | HOST_TID << 12 | tid, | 784 | cmd = I2O_CMD_BLOCK_READ << 24; |
752 | &msg->u.head[1]); | 785 | |
753 | sg_flags = 0x10000000; | ||
754 | switch (dev->rcache) { | 786 | switch (dev->rcache) { |
755 | case CACHE_NULL: | ||
756 | writel(0, &msg->body[0]); | ||
757 | break; | ||
758 | case CACHE_PREFETCH: | 787 | case CACHE_PREFETCH: |
759 | writel(0x201F0008, &msg->body[0]); | 788 | ctl_flags = 0x201F0008; |
760 | break; | 789 | break; |
790 | |||
761 | case CACHE_SMARTFETCH: | 791 | case CACHE_SMARTFETCH: |
762 | if (req->nr_sectors > 16) | 792 | if (req->nr_sectors > 16) |
763 | writel(0x201F0008, &msg->body[0]); | 793 | ctl_flags = 0x201F0008; |
764 | else | 794 | else |
765 | writel(0x001F0000, &msg->body[0]); | 795 | ctl_flags = 0x001F0000; |
796 | break; | ||
797 | |||
798 | default: | ||
766 | break; | 799 | break; |
767 | } | 800 | } |
768 | } else { | 801 | } else { |
769 | writel(I2O_CMD_BLOCK_WRITE << 24 | HOST_TID << 12 | tid, | 802 | cmd = I2O_CMD_BLOCK_WRITE << 24; |
770 | &msg->u.head[1]); | 803 | |
771 | sg_flags = 0x14000000; | ||
772 | switch (dev->wcache) { | 804 | switch (dev->wcache) { |
773 | case CACHE_NULL: | ||
774 | writel(0, &msg->body[0]); | ||
775 | break; | ||
776 | case CACHE_WRITETHROUGH: | 805 | case CACHE_WRITETHROUGH: |
777 | writel(0x001F0008, &msg->body[0]); | 806 | ctl_flags = 0x001F0008; |
778 | break; | 807 | break; |
779 | case CACHE_WRITEBACK: | 808 | case CACHE_WRITEBACK: |
780 | writel(0x001F0010, &msg->body[0]); | 809 | ctl_flags = 0x001F0010; |
781 | break; | 810 | break; |
782 | case CACHE_SMARTBACK: | 811 | case CACHE_SMARTBACK: |
783 | if (req->nr_sectors > 16) | 812 | if (req->nr_sectors > 16) |
784 | writel(0x001F0004, &msg->body[0]); | 813 | ctl_flags = 0x001F0004; |
785 | else | 814 | else |
786 | writel(0x001F0010, &msg->body[0]); | 815 | ctl_flags = 0x001F0010; |
787 | break; | 816 | break; |
788 | case CACHE_SMARTTHROUGH: | 817 | case CACHE_SMARTTHROUGH: |
789 | if (req->nr_sectors > 16) | 818 | if (req->nr_sectors > 16) |
790 | writel(0x001F0004, &msg->body[0]); | 819 | ctl_flags = 0x001F0004; |
791 | else | 820 | else |
792 | writel(0x001F0010, &msg->body[0]); | 821 | ctl_flags = 0x001F0010; |
822 | default: | ||
823 | break; | ||
824 | } | ||
825 | } | ||
826 | |||
827 | #ifdef CONFIG_I2O_EXT_ADAPTEC | ||
828 | if (c->adaptec) { | ||
829 | u8 cmd[10]; | ||
830 | u32 scsi_flags; | ||
831 | u16 hwsec = queue_hardsect_size(req->q) >> KERNEL_SECTOR_SHIFT; | ||
832 | |||
833 | memset(cmd, 0, 10); | ||
834 | |||
835 | sgl_offset = SGL_OFFSET_12; | ||
836 | |||
837 | writel(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid, | ||
838 | &msg->u.head[1]); | ||
839 | |||
840 | writel(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC, mptr++); | ||
841 | writel(tid, mptr++); | ||
842 | |||
843 | /* | ||
844 | * ENABLE_DISCONNECT | ||
845 | * SIMPLE_TAG | ||
846 | * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME | ||
847 | */ | ||
848 | if (rq_data_dir(req) == READ) { | ||
849 | cmd[0] = 0x28; | ||
850 | scsi_flags = 0x60a0000a; | ||
851 | } else { | ||
852 | cmd[0] = 0x2A; | ||
853 | scsi_flags = 0xa0a0000a; | ||
793 | } | 854 | } |
855 | |||
856 | writel(scsi_flags, mptr++); | ||
857 | |||
858 | *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec); | ||
859 | *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec); | ||
860 | |||
861 | memcpy_toio(mptr, cmd, 10); | ||
862 | mptr += 4; | ||
863 | writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++); | ||
864 | } else | ||
865 | #endif | ||
866 | { | ||
867 | writel(cmd | HOST_TID << 12 | tid, &msg->u.head[1]); | ||
868 | writel(ctl_flags, mptr++); | ||
869 | writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++); | ||
870 | writel((u32) (req->sector << KERNEL_SECTOR_SHIFT), mptr++); | ||
871 | writel(req->sector >> (32 - KERNEL_SECTOR_SHIFT), mptr++); | ||
794 | } | 872 | } |
795 | 873 | ||
796 | for (i = sgnum; i > 0; i--) { | 874 | if (!i2o_block_sglist_alloc(c, ireq, &mptr)) { |
797 | if (i == 1) | 875 | rc = -ENOMEM; |
798 | sg_flags |= 0x80000000; | 876 | goto context_remove; |
799 | writel(sg_flags | sg_dma_len(sg), mptr); | ||
800 | writel(sg_dma_address(sg), mptr + 4); | ||
801 | mptr += 8; | ||
802 | sg++; | ||
803 | } | 877 | } |
804 | 878 | ||
805 | writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | SGL_OFFSET_8, | 879 | writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | |
806 | &msg->u.head[0]); | 880 | sgl_offset, &msg->u.head[0]); |
807 | 881 | ||
808 | list_add_tail(&ireq->queue, &dev->open_queue); | 882 | list_add_tail(&ireq->queue, &dev->open_queue); |
809 | dev->open_queue_depth++; | 883 | dev->open_queue_depth++; |
@@ -846,11 +920,13 @@ static void i2o_block_request_fn(struct request_queue *q) | |||
846 | 920 | ||
847 | queue_depth = ireq->i2o_blk_dev->open_queue_depth; | 921 | queue_depth = ireq->i2o_blk_dev->open_queue_depth; |
848 | 922 | ||
849 | if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) | 923 | if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) { |
850 | if (!i2o_block_transfer(req)) { | 924 | if (!i2o_block_transfer(req)) { |
851 | blkdev_dequeue_request(req); | 925 | blkdev_dequeue_request(req); |
852 | continue; | 926 | continue; |
853 | } | 927 | } else |
928 | osm_info("transfer error\n"); | ||
929 | } | ||
854 | 930 | ||
855 | if (queue_depth) | 931 | if (queue_depth) |
856 | break; | 932 | break; |
@@ -933,6 +1009,7 @@ static struct i2o_block_device *i2o_block_device_alloc(void) | |||
933 | } | 1009 | } |
934 | 1010 | ||
935 | blk_queue_prep_rq(queue, i2o_block_prep_req_fn); | 1011 | blk_queue_prep_rq(queue, i2o_block_prep_req_fn); |
1012 | blk_queue_issue_flush_fn(queue, i2o_block_issue_flush); | ||
936 | 1013 | ||
937 | gd->major = I2O_MAJOR; | 1014 | gd->major = I2O_MAJOR; |
938 | gd->queue = queue; | 1015 | gd->queue = queue; |
@@ -974,7 +1051,18 @@ static int i2o_block_probe(struct device *dev) | |||
974 | u64 size; | 1051 | u64 size; |
975 | u32 blocksize; | 1052 | u32 blocksize; |
976 | u32 flags, status; | 1053 | u32 flags, status; |
977 | int segments; | 1054 | u16 body_size = 4; |
1055 | unsigned short max_sectors; | ||
1056 | |||
1057 | #ifdef CONFIG_I2O_EXT_ADAPTEC | ||
1058 | if (c->adaptec) | ||
1059 | body_size = 8; | ||
1060 | #endif | ||
1061 | |||
1062 | if (c->limit_sectors) | ||
1063 | max_sectors = I2O_MAX_SECTORS_LIMITED; | ||
1064 | else | ||
1065 | max_sectors = I2O_MAX_SECTORS; | ||
978 | 1066 | ||
979 | /* skip devices which are used by IOP */ | 1067 | /* skip devices which are used by IOP */ |
980 | if (i2o_dev->lct_data.user_tid != 0xfff) { | 1068 | if (i2o_dev->lct_data.user_tid != 0xfff) { |
@@ -1009,50 +1097,35 @@ static int i2o_block_probe(struct device *dev) | |||
1009 | queue = gd->queue; | 1097 | queue = gd->queue; |
1010 | queue->queuedata = i2o_blk_dev; | 1098 | queue->queuedata = i2o_blk_dev; |
1011 | 1099 | ||
1012 | blk_queue_max_phys_segments(queue, I2O_MAX_SEGMENTS); | 1100 | blk_queue_max_phys_segments(queue, I2O_MAX_PHYS_SEGMENTS); |
1013 | blk_queue_max_sectors(queue, I2O_MAX_SECTORS); | 1101 | blk_queue_max_sectors(queue, max_sectors); |
1014 | 1102 | blk_queue_max_hw_segments(queue, i2o_sg_tablesize(c, body_size)); | |
1015 | if (c->short_req) | ||
1016 | segments = 8; | ||
1017 | else { | ||
1018 | i2o_status_block *sb; | ||
1019 | 1103 | ||
1020 | sb = c->status_block.virt; | 1104 | osm_debug("max sectors = %d\n", queue->max_phys_segments); |
1021 | 1105 | osm_debug("phys segments = %d\n", queue->max_sectors); | |
1022 | segments = (sb->inbound_frame_size - | 1106 | osm_debug("max hw segments = %d\n", queue->max_hw_segments); |
1023 | sizeof(struct i2o_message) / 4 - 4) / 2; | ||
1024 | } | ||
1025 | |||
1026 | blk_queue_max_hw_segments(queue, segments); | ||
1027 | |||
1028 | osm_debug("max sectors = %d\n", I2O_MAX_SECTORS); | ||
1029 | osm_debug("phys segments = %d\n", I2O_MAX_SEGMENTS); | ||
1030 | osm_debug("hw segments = %d\n", segments); | ||
1031 | 1107 | ||
1032 | /* | 1108 | /* |
1033 | * Ask for the current media data. If that isn't supported | 1109 | * Ask for the current media data. If that isn't supported |
1034 | * then we ask for the device capacity data | 1110 | * then we ask for the device capacity data |
1035 | */ | 1111 | */ |
1036 | if (!i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8)) | 1112 | if (i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) || |
1037 | if (!i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) { | 1113 | i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { |
1038 | osm_warn("could not get size of %s\n", gd->disk_name); | 1114 | blk_queue_hardsect_size(queue, blocksize); |
1039 | size = 0; | 1115 | } else |
1040 | } | 1116 | osm_warn("unable to get blocksize of %s\n", gd->disk_name); |
1041 | 1117 | ||
1042 | if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4)) | 1118 | if (i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) || |
1043 | if (!i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { | 1119 | i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) { |
1044 | osm_warn("unable to get blocksize of %s\n", | 1120 | set_capacity(gd, size >> KERNEL_SECTOR_SHIFT); |
1045 | gd->disk_name); | 1121 | } else |
1046 | blocksize = 0; | 1122 | osm_warn("could not get size of %s\n", gd->disk_name); |
1047 | } | ||
1048 | 1123 | ||
1049 | if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &i2o_blk_dev->power, 2)) | 1124 | if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &i2o_blk_dev->power, 2)) |
1050 | i2o_blk_dev->power = 0; | 1125 | i2o_blk_dev->power = 0; |
1051 | i2o_parm_field_get(i2o_dev, 0x0000, 5, &flags, 4); | 1126 | i2o_parm_field_get(i2o_dev, 0x0000, 5, &flags, 4); |
1052 | i2o_parm_field_get(i2o_dev, 0x0000, 6, &status, 4); | 1127 | i2o_parm_field_get(i2o_dev, 0x0000, 6, &status, 4); |
1053 | 1128 | ||
1054 | set_capacity(gd, size >> 9); | ||
1055 | |||
1056 | i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff); | 1129 | i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff); |
1057 | 1130 | ||
1058 | add_disk(gd); | 1131 | add_disk(gd); |
@@ -1109,7 +1182,7 @@ static int __init i2o_block_init(void) | |||
1109 | goto exit; | 1182 | goto exit; |
1110 | } | 1183 | } |
1111 | 1184 | ||
1112 | i2o_blk_req_pool.pool = mempool_create(I2O_REQ_MEMPOOL_SIZE, | 1185 | i2o_blk_req_pool.pool = mempool_create(I2O_BLOCK_REQ_MEMPOOL_SIZE, |
1113 | mempool_alloc_slab, | 1186 | mempool_alloc_slab, |
1114 | mempool_free_slab, | 1187 | mempool_free_slab, |
1115 | i2o_blk_req_pool.slab); | 1188 | i2o_blk_req_pool.slab); |