aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/aoe/aoeblk.c4
-rw-r--r--drivers/block/aoe/aoecmd.c2
-rw-r--r--drivers/block/aoe/aoedev.c4
-rw-r--r--drivers/block/cciss.c2
-rw-r--r--drivers/block/cpqarray.c2
-rw-r--r--drivers/block/floppy.c87
-rw-r--r--drivers/block/lguest_blk.c36
-rw-r--r--drivers/block/loop.c4
-rw-r--r--drivers/block/nbd.c59
-rw-r--r--drivers/block/pktcdvd.c25
-rw-r--r--drivers/block/ps3disk.c42
-rw-r--r--drivers/block/rd.c4
-rw-r--r--drivers/block/umem.c238
-rw-r--r--drivers/block/umem.h133
-rw-r--r--drivers/block/xen-blkfront.c32
-rw-r--r--drivers/block/xsysace.c274
-rw-r--r--drivers/ide/ide-floppy.c52
-rw-r--r--drivers/md/dm-crypt.c21
-rw-r--r--drivers/md/dm-emc.c15
-rw-r--r--drivers/md/dm-io.c8
-rw-r--r--drivers/md/dm-mpath.c4
-rw-r--r--drivers/md/dm-raid1.c4
-rw-r--r--drivers/md/dm-snap.c2
-rw-r--r--drivers/md/dm-zero.c2
-rw-r--r--drivers/md/dm.c18
-rw-r--r--drivers/md/faulty.c10
-rw-r--r--drivers/md/linear.c4
-rw-r--r--drivers/md/md.c25
-rw-r--r--drivers/md/multipath.c13
-rw-r--r--drivers/md/raid0.c4
-rw-r--r--drivers/md/raid1.c30
-rw-r--r--drivers/md/raid10.c31
-rw-r--r--drivers/md/raid5.c48
-rw-r--r--drivers/s390/block/dasd_diag.c37
-rw-r--r--drivers/s390/block/dasd_eckd.c28
-rw-r--r--drivers/s390/block/dasd_fba.c28
-rw-r--r--drivers/s390/block/dcssblk.c4
-rw-r--r--drivers/s390/block/xpram.c6
-rw-r--r--drivers/s390/char/tape_34xx.c32
-rw-r--r--drivers/s390/char/tape_3590.c37
-rw-r--r--drivers/scsi/scsi_lib.c21
41 files changed, 776 insertions, 656 deletions
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 007faaf008e7..b1d00ef6659c 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -138,7 +138,7 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
138 buf = mempool_alloc(d->bufpool, GFP_NOIO); 138 buf = mempool_alloc(d->bufpool, GFP_NOIO);
139 if (buf == NULL) { 139 if (buf == NULL) {
140 printk(KERN_INFO "aoe: buf allocation failure\n"); 140 printk(KERN_INFO "aoe: buf allocation failure\n");
141 bio_endio(bio, bio->bi_size, -ENOMEM); 141 bio_endio(bio, -ENOMEM);
142 return 0; 142 return 0;
143 } 143 }
144 memset(buf, 0, sizeof(*buf)); 144 memset(buf, 0, sizeof(*buf));
@@ -159,7 +159,7 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
159 d->aoemajor, d->aoeminor); 159 d->aoemajor, d->aoeminor);
160 spin_unlock_irqrestore(&d->lock, flags); 160 spin_unlock_irqrestore(&d->lock, flags);
161 mempool_free(buf, d->bufpool); 161 mempool_free(buf, d->bufpool);
162 bio_endio(bio, bio->bi_size, -ENXIO); 162 bio_endio(bio, -ENXIO);
163 return 0; 163 return 0;
164 } 164 }
165 165
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 01fbdd38e3be..5abae34ad65b 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -652,7 +652,7 @@ aoecmd_ata_rsp(struct sk_buff *skb)
652 disk_stat_add(disk, sectors[rw], n_sect); 652 disk_stat_add(disk, sectors[rw], n_sect);
653 disk_stat_add(disk, io_ticks, duration); 653 disk_stat_add(disk, io_ticks, duration);
654 n = (buf->flags & BUFFL_FAIL) ? -EIO : 0; 654 n = (buf->flags & BUFFL_FAIL) ? -EIO : 0;
655 bio_endio(buf->bio, buf->bio->bi_size, n); 655 bio_endio(buf->bio, n);
656 mempool_free(buf, d->bufpool); 656 mempool_free(buf, d->bufpool);
657 } 657 }
658 } 658 }
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 05a97197c918..51f50710e5fc 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -119,7 +119,7 @@ aoedev_downdev(struct aoedev *d)
119 bio = buf->bio; 119 bio = buf->bio;
120 if (--buf->nframesout == 0) { 120 if (--buf->nframesout == 0) {
121 mempool_free(buf, d->bufpool); 121 mempool_free(buf, d->bufpool);
122 bio_endio(bio, bio->bi_size, -EIO); 122 bio_endio(bio, -EIO);
123 } 123 }
124 skb_shinfo(f->skb)->nr_frags = f->skb->data_len = 0; 124 skb_shinfo(f->skb)->nr_frags = f->skb->data_len = 0;
125 } 125 }
@@ -130,7 +130,7 @@ aoedev_downdev(struct aoedev *d)
130 list_del(d->bufq.next); 130 list_del(d->bufq.next);
131 bio = buf->bio; 131 bio = buf->bio;
132 mempool_free(buf, d->bufpool); 132 mempool_free(buf, d->bufpool);
133 bio_endio(bio, bio->bi_size, -EIO); 133 bio_endio(bio, -EIO);
134 } 134 }
135 135
136 if (d->gd) 136 if (d->gd)
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 084358a828e9..28d145756f6c 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1194,7 +1194,7 @@ static inline void complete_buffers(struct bio *bio, int status)
1194 int nr_sectors = bio_sectors(bio); 1194 int nr_sectors = bio_sectors(bio);
1195 1195
1196 bio->bi_next = NULL; 1196 bio->bi_next = NULL;
1197 bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO); 1197 bio_endio(bio, status ? 0 : -EIO);
1198 bio = xbh; 1198 bio = xbh;
1199 } 1199 }
1200} 1200}
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index eb9799acf65b..3853c9a38d6a 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -987,7 +987,7 @@ static inline void complete_buffers(struct bio *bio, int ok)
987 xbh = bio->bi_next; 987 xbh = bio->bi_next;
988 bio->bi_next = NULL; 988 bio->bi_next = NULL;
989 989
990 bio_endio(bio, nr_sectors << 9, ok ? 0 : -EIO); 990 bio_endio(bio, ok ? 0 : -EIO);
991 991
992 bio = xbh; 992 bio = xbh;
993 } 993 }
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 085b7794fb3e..80483aac4cc9 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2437,22 +2437,19 @@ static void rw_interrupt(void)
2437/* Compute maximal contiguous buffer size. */ 2437/* Compute maximal contiguous buffer size. */
2438static int buffer_chain_size(void) 2438static int buffer_chain_size(void)
2439{ 2439{
2440 struct bio *bio;
2441 struct bio_vec *bv; 2440 struct bio_vec *bv;
2442 int size, i; 2441 int size;
2442 struct req_iterator iter;
2443 char *base; 2443 char *base;
2444 2444
2445 base = bio_data(current_req->bio); 2445 base = bio_data(current_req->bio);
2446 size = 0; 2446 size = 0;
2447 2447
2448 rq_for_each_bio(bio, current_req) { 2448 rq_for_each_segment(bv, current_req, iter) {
2449 bio_for_each_segment(bv, bio, i) { 2449 if (page_address(bv->bv_page) + bv->bv_offset != base + size)
2450 if (page_address(bv->bv_page) + bv->bv_offset != 2450 break;
2451 base + size)
2452 break;
2453 2451
2454 size += bv->bv_len; 2452 size += bv->bv_len;
2455 }
2456 } 2453 }
2457 2454
2458 return size >> 9; 2455 return size >> 9;
@@ -2479,9 +2476,9 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
2479{ 2476{
2480 int remaining; /* number of transferred 512-byte sectors */ 2477 int remaining; /* number of transferred 512-byte sectors */
2481 struct bio_vec *bv; 2478 struct bio_vec *bv;
2482 struct bio *bio;
2483 char *buffer, *dma_buffer; 2479 char *buffer, *dma_buffer;
2484 int size, i; 2480 int size;
2481 struct req_iterator iter;
2485 2482
2486 max_sector = transfer_size(ssize, 2483 max_sector = transfer_size(ssize,
2487 min(max_sector, max_sector_2), 2484 min(max_sector, max_sector_2),
@@ -2514,43 +2511,41 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
2514 2511
2515 size = current_req->current_nr_sectors << 9; 2512 size = current_req->current_nr_sectors << 9;
2516 2513
2517 rq_for_each_bio(bio, current_req) { 2514 rq_for_each_segment(bv, current_req, iter) {
2518 bio_for_each_segment(bv, bio, i) { 2515 if (!remaining)
2519 if (!remaining) 2516 break;
2520 break;
2521 2517
2522 size = bv->bv_len; 2518 size = bv->bv_len;
2523 SUPBOUND(size, remaining); 2519 SUPBOUND(size, remaining);
2524 2520
2525 buffer = page_address(bv->bv_page) + bv->bv_offset; 2521 buffer = page_address(bv->bv_page) + bv->bv_offset;
2526#ifdef FLOPPY_SANITY_CHECK 2522#ifdef FLOPPY_SANITY_CHECK
2527 if (dma_buffer + size > 2523 if (dma_buffer + size >
2528 floppy_track_buffer + (max_buffer_sectors << 10) || 2524 floppy_track_buffer + (max_buffer_sectors << 10) ||
2529 dma_buffer < floppy_track_buffer) { 2525 dma_buffer < floppy_track_buffer) {
2530 DPRINT("buffer overrun in copy buffer %d\n", 2526 DPRINT("buffer overrun in copy buffer %d\n",
2531 (int)((floppy_track_buffer - 2527 (int)((floppy_track_buffer -
2532 dma_buffer) >> 9)); 2528 dma_buffer) >> 9));
2533 printk("fsector_t=%d buffer_min=%d\n", 2529 printk("fsector_t=%d buffer_min=%d\n",
2534 fsector_t, buffer_min); 2530 fsector_t, buffer_min);
2535 printk("current_count_sectors=%ld\n", 2531 printk("current_count_sectors=%ld\n",
2536 current_count_sectors); 2532 current_count_sectors);
2537 if (CT(COMMAND) == FD_READ)
2538 printk("read\n");
2539 if (CT(COMMAND) == FD_WRITE)
2540 printk("write\n");
2541 break;
2542 }
2543 if (((unsigned long)buffer) % 512)
2544 DPRINT("%p buffer not aligned\n", buffer);
2545#endif
2546 if (CT(COMMAND) == FD_READ) 2533 if (CT(COMMAND) == FD_READ)
2547 memcpy(buffer, dma_buffer, size); 2534 printk("read\n");
2548 else 2535 if (CT(COMMAND) == FD_WRITE)
2549 memcpy(dma_buffer, buffer, size); 2536 printk("write\n");
2550 2537 break;
2551 remaining -= size;
2552 dma_buffer += size;
2553 } 2538 }
2539 if (((unsigned long)buffer) % 512)
2540 DPRINT("%p buffer not aligned\n", buffer);
2541#endif
2542 if (CT(COMMAND) == FD_READ)
2543 memcpy(buffer, dma_buffer, size);
2544 else
2545 memcpy(dma_buffer, buffer, size);
2546
2547 remaining -= size;
2548 dma_buffer += size;
2554 } 2549 }
2555#ifdef FLOPPY_SANITY_CHECK 2550#ifdef FLOPPY_SANITY_CHECK
2556 if (remaining) { 2551 if (remaining) {
@@ -3815,14 +3810,10 @@ static int check_floppy_change(struct gendisk *disk)
3815 * a disk in the drive, and whether that disk is writable. 3810 * a disk in the drive, and whether that disk is writable.
3816 */ 3811 */
3817 3812
3818static int floppy_rb0_complete(struct bio *bio, unsigned int bytes_done, 3813static void floppy_rb0_complete(struct bio *bio,
3819 int err) 3814 int err)
3820{ 3815{
3821 if (bio->bi_size)
3822 return 1;
3823
3824 complete((struct completion *)bio->bi_private); 3816 complete((struct completion *)bio->bi_private);
3825 return 0;
3826} 3817}
3827 3818
3828static int __floppy_read_block_0(struct block_device *bdev) 3819static int __floppy_read_block_0(struct block_device *bdev)
diff --git a/drivers/block/lguest_blk.c b/drivers/block/lguest_blk.c
index 160cf14431ac..fa8e42341b87 100644
--- a/drivers/block/lguest_blk.c
+++ b/drivers/block/lguest_blk.c
@@ -142,25 +142,23 @@ static irqreturn_t lgb_irq(int irq, void *_bd)
142 * return the total length. */ 142 * return the total length. */
143static unsigned int req_to_dma(struct request *req, struct lguest_dma *dma) 143static unsigned int req_to_dma(struct request *req, struct lguest_dma *dma)
144{ 144{
145 unsigned int i = 0, idx, len = 0; 145 unsigned int i = 0, len = 0;
146 struct bio *bio; 146 struct req_iterator iter;
147 147 struct bio_vec *bvec;
148 rq_for_each_bio(bio, req) { 148
149 struct bio_vec *bvec; 149 rq_for_each_segment(bvec, req, iter) {
150 bio_for_each_segment(bvec, bio, idx) { 150 /* We told the block layer not to give us too many. */
151 /* We told the block layer not to give us too many. */ 151 BUG_ON(i == LGUEST_MAX_DMA_SECTIONS);
152 BUG_ON(i == LGUEST_MAX_DMA_SECTIONS); 152 /* If we had a zero-length segment, it would look like
153 /* If we had a zero-length segment, it would look like 153 * the end of the data referred to by the "struct
154 * the end of the data referred to by the "struct 154 * lguest_dma", so make sure that doesn't happen. */
155 * lguest_dma", so make sure that doesn't happen. */ 155 BUG_ON(!bvec->bv_len);
156 BUG_ON(!bvec->bv_len); 156 /* Convert page & offset to a physical address */
157 /* Convert page & offset to a physical address */ 157 dma->addr[i] = page_to_phys(bvec->bv_page)
158 dma->addr[i] = page_to_phys(bvec->bv_page) 158 + bvec->bv_offset;
159 + bvec->bv_offset; 159 dma->len[i] = bvec->bv_len;
160 dma->len[i] = bvec->bv_len; 160 len += bvec->bv_len;
161 len += bvec->bv_len; 161 i++;
162 i++;
163 }
164 } 162 }
165 /* If the array isn't full, we mark the end with a 0 length */ 163 /* If the array isn't full, we mark the end with a 0 length */
166 if (i < LGUEST_MAX_DMA_SECTIONS) 164 if (i < LGUEST_MAX_DMA_SECTIONS)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 9f015fce4135..b9233a06934c 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -551,7 +551,7 @@ static int loop_make_request(struct request_queue *q, struct bio *old_bio)
551 551
552out: 552out:
553 spin_unlock_irq(&lo->lo_lock); 553 spin_unlock_irq(&lo->lo_lock);
554 bio_io_error(old_bio, old_bio->bi_size); 554 bio_io_error(old_bio);
555 return 0; 555 return 0;
556} 556}
557 557
@@ -580,7 +580,7 @@ static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio)
580 bio_put(bio); 580 bio_put(bio);
581 } else { 581 } else {
582 int ret = do_bio_filebacked(lo, bio); 582 int ret = do_bio_filebacked(lo, bio);
583 bio_endio(bio, bio->bi_size, ret); 583 bio_endio(bio, ret);
584 } 584 }
585} 585}
586 586
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index be92c658f06e..be5ec3a9b1fc 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -180,7 +180,7 @@ static inline int sock_send_bvec(struct socket *sock, struct bio_vec *bvec,
180 180
181static int nbd_send_req(struct nbd_device *lo, struct request *req) 181static int nbd_send_req(struct nbd_device *lo, struct request *req)
182{ 182{
183 int result, i, flags; 183 int result, flags;
184 struct nbd_request request; 184 struct nbd_request request;
185 unsigned long size = req->nr_sectors << 9; 185 unsigned long size = req->nr_sectors << 9;
186 struct socket *sock = lo->sock; 186 struct socket *sock = lo->sock;
@@ -205,27 +205,23 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
205 } 205 }
206 206
207 if (nbd_cmd(req) == NBD_CMD_WRITE) { 207 if (nbd_cmd(req) == NBD_CMD_WRITE) {
208 struct bio *bio; 208 struct req_iterator iter;
209 struct bio_vec *bvec;
209 /* 210 /*
210 * we are really probing at internals to determine 211 * we are really probing at internals to determine
211 * whether to set MSG_MORE or not... 212 * whether to set MSG_MORE or not...
212 */ 213 */
213 rq_for_each_bio(bio, req) { 214 rq_for_each_segment(bvec, req, iter) {
214 struct bio_vec *bvec; 215 flags = 0;
215 bio_for_each_segment(bvec, bio, i) { 216 if (!rq_iter_last(req, iter))
216 flags = 0; 217 flags = MSG_MORE;
217 if ((i < (bio->bi_vcnt - 1)) || bio->bi_next) 218 dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
218 flags = MSG_MORE; 219 lo->disk->disk_name, req, bvec->bv_len);
219 dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n", 220 result = sock_send_bvec(sock, bvec, flags);
220 lo->disk->disk_name, req, 221 if (result <= 0) {
221 bvec->bv_len); 222 printk(KERN_ERR "%s: Send data failed (result %d)\n",
222 result = sock_send_bvec(sock, bvec, flags); 223 lo->disk->disk_name, result);
223 if (result <= 0) { 224 goto error_out;
224 printk(KERN_ERR "%s: Send data failed (result %d)\n",
225 lo->disk->disk_name,
226 result);
227 goto error_out;
228 }
229 } 225 }
230 } 226 }
231 } 227 }
@@ -321,22 +317,19 @@ static struct request *nbd_read_stat(struct nbd_device *lo)
321 dprintk(DBG_RX, "%s: request %p: got reply\n", 317 dprintk(DBG_RX, "%s: request %p: got reply\n",
322 lo->disk->disk_name, req); 318 lo->disk->disk_name, req);
323 if (nbd_cmd(req) == NBD_CMD_READ) { 319 if (nbd_cmd(req) == NBD_CMD_READ) {
324 int i; 320 struct req_iterator iter;
325 struct bio *bio; 321 struct bio_vec *bvec;
326 rq_for_each_bio(bio, req) { 322
327 struct bio_vec *bvec; 323 rq_for_each_segment(bvec, req, iter) {
328 bio_for_each_segment(bvec, bio, i) { 324 result = sock_recv_bvec(sock, bvec);
329 result = sock_recv_bvec(sock, bvec); 325 if (result <= 0) {
330 if (result <= 0) { 326 printk(KERN_ERR "%s: Receive data failed (result %d)\n",
331 printk(KERN_ERR "%s: Receive data failed (result %d)\n", 327 lo->disk->disk_name, result);
332 lo->disk->disk_name, 328 req->errors++;
333 result); 329 return req;
334 req->errors++;
335 return req;
336 }
337 dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
338 lo->disk->disk_name, req, bvec->bv_len);
339 } 330 }
331 dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
332 lo->disk->disk_name, req, bvec->bv_len);
340 } 333 }
341 } 334 }
342 return req; 335 return req;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index fadbfd880bab..540bf3676985 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1058,15 +1058,12 @@ static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec)
1058 } 1058 }
1059} 1059}
1060 1060
1061static int pkt_end_io_read(struct bio *bio, unsigned int bytes_done, int err) 1061static void pkt_end_io_read(struct bio *bio, int err)
1062{ 1062{
1063 struct packet_data *pkt = bio->bi_private; 1063 struct packet_data *pkt = bio->bi_private;
1064 struct pktcdvd_device *pd = pkt->pd; 1064 struct pktcdvd_device *pd = pkt->pd;
1065 BUG_ON(!pd); 1065 BUG_ON(!pd);
1066 1066
1067 if (bio->bi_size)
1068 return 1;
1069
1070 VPRINTK("pkt_end_io_read: bio=%p sec0=%llx sec=%llx err=%d\n", bio, 1067 VPRINTK("pkt_end_io_read: bio=%p sec0=%llx sec=%llx err=%d\n", bio,
1071 (unsigned long long)pkt->sector, (unsigned long long)bio->bi_sector, err); 1068 (unsigned long long)pkt->sector, (unsigned long long)bio->bi_sector, err);
1072 1069
@@ -1077,19 +1074,14 @@ static int pkt_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
1077 wake_up(&pd->wqueue); 1074 wake_up(&pd->wqueue);
1078 } 1075 }
1079 pkt_bio_finished(pd); 1076 pkt_bio_finished(pd);
1080
1081 return 0;
1082} 1077}
1083 1078
1084static int pkt_end_io_packet_write(struct bio *bio, unsigned int bytes_done, int err) 1079static void pkt_end_io_packet_write(struct bio *bio, int err)
1085{ 1080{
1086 struct packet_data *pkt = bio->bi_private; 1081 struct packet_data *pkt = bio->bi_private;
1087 struct pktcdvd_device *pd = pkt->pd; 1082 struct pktcdvd_device *pd = pkt->pd;
1088 BUG_ON(!pd); 1083 BUG_ON(!pd);
1089 1084
1090 if (bio->bi_size)
1091 return 1;
1092
1093 VPRINTK("pkt_end_io_packet_write: id=%d, err=%d\n", pkt->id, err); 1085 VPRINTK("pkt_end_io_packet_write: id=%d, err=%d\n", pkt->id, err);
1094 1086
1095 pd->stats.pkt_ended++; 1087 pd->stats.pkt_ended++;
@@ -1098,7 +1090,6 @@ static int pkt_end_io_packet_write(struct bio *bio, unsigned int bytes_done, int
1098 atomic_dec(&pkt->io_wait); 1090 atomic_dec(&pkt->io_wait);
1099 atomic_inc(&pkt->run_sm); 1091 atomic_inc(&pkt->run_sm);
1100 wake_up(&pd->wqueue); 1092 wake_up(&pd->wqueue);
1101 return 0;
1102} 1093}
1103 1094
1104/* 1095/*
@@ -1470,7 +1461,7 @@ static void pkt_finish_packet(struct packet_data *pkt, int uptodate)
1470 while (bio) { 1461 while (bio) {
1471 next = bio->bi_next; 1462 next = bio->bi_next;
1472 bio->bi_next = NULL; 1463 bio->bi_next = NULL;
1473 bio_endio(bio, bio->bi_size, uptodate ? 0 : -EIO); 1464 bio_endio(bio, uptodate ? 0 : -EIO);
1474 bio = next; 1465 bio = next;
1475 } 1466 }
1476 pkt->orig_bios = pkt->orig_bios_tail = NULL; 1467 pkt->orig_bios = pkt->orig_bios_tail = NULL;
@@ -2462,19 +2453,15 @@ static int pkt_close(struct inode *inode, struct file *file)
2462} 2453}
2463 2454
2464 2455
2465static int pkt_end_io_read_cloned(struct bio *bio, unsigned int bytes_done, int err) 2456static void pkt_end_io_read_cloned(struct bio *bio, int err)
2466{ 2457{
2467 struct packet_stacked_data *psd = bio->bi_private; 2458 struct packet_stacked_data *psd = bio->bi_private;
2468 struct pktcdvd_device *pd = psd->pd; 2459 struct pktcdvd_device *pd = psd->pd;
2469 2460
2470 if (bio->bi_size)
2471 return 1;
2472
2473 bio_put(bio); 2461 bio_put(bio);
2474 bio_endio(psd->bio, psd->bio->bi_size, err); 2462 bio_endio(psd->bio, err);
2475 mempool_free(psd, psd_pool); 2463 mempool_free(psd, psd_pool);
2476 pkt_bio_finished(pd); 2464 pkt_bio_finished(pd);
2477 return 0;
2478} 2465}
2479 2466
2480static int pkt_make_request(struct request_queue *q, struct bio *bio) 2467static int pkt_make_request(struct request_queue *q, struct bio *bio)
@@ -2620,7 +2607,7 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio)
2620 } 2607 }
2621 return 0; 2608 return 0;
2622end_io: 2609end_io:
2623 bio_io_error(bio, bio->bi_size); 2610 bio_io_error(bio);
2624 return 0; 2611 return 0;
2625} 2612}
2626 2613
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index aa8b890c80d7..06d0552cf49c 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -91,30 +91,29 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
91 struct request *req, int gather) 91 struct request *req, int gather)
92{ 92{
93 unsigned int offset = 0; 93 unsigned int offset = 0;
94 struct bio *bio; 94 struct req_iterator iter;
95 sector_t sector;
96 struct bio_vec *bvec; 95 struct bio_vec *bvec;
97 unsigned int i = 0, j; 96 unsigned int i = 0;
98 size_t size; 97 size_t size;
99 void *buf; 98 void *buf;
100 99
101 rq_for_each_bio(bio, req) { 100 rq_for_each_segment(bvec, req, iter) {
102 sector = bio->bi_sector; 101 unsigned long flags;
103 dev_dbg(&dev->sbd.core, 102 dev_dbg(&dev->sbd.core,
104 "%s:%u: bio %u: %u segs %u sectors from %lu\n", 103 "%s:%u: bio %u: %u segs %u sectors from %lu\n",
105 __func__, __LINE__, i, bio_segments(bio), 104 __func__, __LINE__, i, bio_segments(iter.bio),
106 bio_sectors(bio), sector); 105 bio_sectors(iter.bio),
107 bio_for_each_segment(bvec, bio, j) { 106 (unsigned long)iter.bio->bi_sector);
108 size = bvec->bv_len; 107
109 buf = __bio_kmap_atomic(bio, j, KM_IRQ0); 108 size = bvec->bv_len;
110 if (gather) 109 buf = bvec_kmap_irq(bvec, &flags);
111 memcpy(dev->bounce_buf+offset, buf, size); 110 if (gather)
112 else 111 memcpy(dev->bounce_buf+offset, buf, size);
113 memcpy(buf, dev->bounce_buf+offset, size); 112 else
114 offset += size; 113 memcpy(buf, dev->bounce_buf+offset, size);
115 flush_kernel_dcache_page(bio_iovec_idx(bio, j)->bv_page); 114 offset += size;
116 __bio_kunmap_atomic(bio, KM_IRQ0); 115 flush_kernel_dcache_page(bvec->bv_page);
117 } 116 bvec_kunmap_irq(bvec, &flags);
118 i++; 117 i++;
119 } 118 }
120} 119}
@@ -130,12 +129,13 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
130 129
131#ifdef DEBUG 130#ifdef DEBUG
132 unsigned int n = 0; 131 unsigned int n = 0;
133 struct bio *bio; 132 struct bio_vec *bv;
133 struct req_iterator iter;
134 134
135 rq_for_each_bio(bio, req) 135 rq_for_each_segment(bv, req, iter)
136 n++; 136 n++;
137 dev_dbg(&dev->sbd.core, 137 dev_dbg(&dev->sbd.core,
138 "%s:%u: %s req has %u bios for %lu sectors %lu hard sectors\n", 138 "%s:%u: %s req has %u bvecs for %lu sectors %lu hard sectors\n",
139 __func__, __LINE__, op, n, req->nr_sectors, 139 __func__, __LINE__, op, n, req->nr_sectors,
140 req->hard_nr_sectors); 140 req->hard_nr_sectors);
141#endif 141#endif
diff --git a/drivers/block/rd.c b/drivers/block/rd.c
index 65150b548f3a..701ea77f62e9 100644
--- a/drivers/block/rd.c
+++ b/drivers/block/rd.c
@@ -287,10 +287,10 @@ static int rd_make_request(struct request_queue *q, struct bio *bio)
287 if (ret) 287 if (ret)
288 goto fail; 288 goto fail;
289 289
290 bio_endio(bio, bio->bi_size, 0); 290 bio_endio(bio, 0);
291 return 0; 291 return 0;
292fail: 292fail:
293 bio_io_error(bio, bio->bi_size); 293 bio_io_error(bio);
294 return 0; 294 return 0;
295} 295}
296 296
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 6b7c02d6360d..99806f9ee4ce 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -52,7 +52,7 @@
52#include <linux/fcntl.h> /* O_ACCMODE */ 52#include <linux/fcntl.h> /* O_ACCMODE */
53#include <linux/hdreg.h> /* HDIO_GETGEO */ 53#include <linux/hdreg.h> /* HDIO_GETGEO */
54 54
55#include <linux/umem.h> 55#include "umem.h"
56 56
57#include <asm/uaccess.h> 57#include <asm/uaccess.h>
58#include <asm/io.h> 58#include <asm/io.h>
@@ -67,9 +67,10 @@
67 * Version Information 67 * Version Information
68 */ 68 */
69 69
70#define DRIVER_VERSION "v2.3" 70#define DRIVER_NAME "umem"
71#define DRIVER_AUTHOR "San Mehat, Johannes Erdfelt, NeilBrown" 71#define DRIVER_VERSION "v2.3"
72#define DRIVER_DESC "Micro Memory(tm) PCI memory board block driver" 72#define DRIVER_AUTHOR "San Mehat, Johannes Erdfelt, NeilBrown"
73#define DRIVER_DESC "Micro Memory(tm) PCI memory board block driver"
73 74
74static int debug; 75static int debug;
75/* #define HW_TRACE(x) writeb(x,cards[0].csr_remap + MEMCTRLSTATUS_MAGIC) */ 76/* #define HW_TRACE(x) writeb(x,cards[0].csr_remap + MEMCTRLSTATUS_MAGIC) */
@@ -97,15 +98,9 @@ static int major_nr;
97#include <linux/blkpg.h> 98#include <linux/blkpg.h>
98 99
99struct cardinfo { 100struct cardinfo {
100 int card_number;
101 struct pci_dev *dev; 101 struct pci_dev *dev;
102 102
103 int irq;
104
105 unsigned long csr_base;
106 unsigned char __iomem *csr_remap; 103 unsigned char __iomem *csr_remap;
107 unsigned long csr_len;
108 unsigned int win_size; /* PCI window size */
109 unsigned int mm_size; /* size in kbytes */ 104 unsigned int mm_size; /* size in kbytes */
110 105
111 unsigned int init_size; /* initial segment, in sectors, 106 unsigned int init_size; /* initial segment, in sectors,
@@ -113,6 +108,8 @@ struct cardinfo {
113 * have been written 108 * have been written
114 */ 109 */
115 struct bio *bio, *currentbio, **biotail; 110 struct bio *bio, *currentbio, **biotail;
111 int current_idx;
112 sector_t current_sector;
116 113
117 struct request_queue *queue; 114 struct request_queue *queue;
118 115
@@ -121,6 +118,7 @@ struct cardinfo {
121 struct mm_dma_desc *desc; 118 struct mm_dma_desc *desc;
122 int cnt, headcnt; 119 int cnt, headcnt;
123 struct bio *bio, **biotail; 120 struct bio *bio, **biotail;
121 int idx;
124 } mm_pages[2]; 122 } mm_pages[2];
125#define DESC_PER_PAGE ((PAGE_SIZE*2)/sizeof(struct mm_dma_desc)) 123#define DESC_PER_PAGE ((PAGE_SIZE*2)/sizeof(struct mm_dma_desc))
126 124
@@ -233,7 +231,7 @@ static void dump_regs(struct cardinfo *card)
233*/ 231*/
234static void dump_dmastat(struct cardinfo *card, unsigned int dmastat) 232static void dump_dmastat(struct cardinfo *card, unsigned int dmastat)
235{ 233{
236 printk(KERN_DEBUG "MM%d*: DMAstat - ", card->card_number); 234 dev_printk(KERN_DEBUG, &card->dev->dev, "DMAstat - ");
237 if (dmastat & DMASCR_ANY_ERR) 235 if (dmastat & DMASCR_ANY_ERR)
238 printk("ANY_ERR "); 236 printk("ANY_ERR ");
239 if (dmastat & DMASCR_MBE_ERR) 237 if (dmastat & DMASCR_MBE_ERR)
@@ -295,7 +293,7 @@ static void mm_start_io(struct cardinfo *card)
295 desc->control_bits &= ~cpu_to_le32(DMASCR_CHAIN_EN); 293 desc->control_bits &= ~cpu_to_le32(DMASCR_CHAIN_EN);
296 desc->sem_control_bits = desc->control_bits; 294 desc->sem_control_bits = desc->control_bits;
297 295
298 296
299 if (debug & DEBUG_LED_ON_TRANSFER) 297 if (debug & DEBUG_LED_ON_TRANSFER)
300 set_led(card, LED_REMOVE, LED_ON); 298 set_led(card, LED_REMOVE, LED_ON);
301 299
@@ -329,7 +327,7 @@ static int add_bio(struct cardinfo *card);
329 327
330static void activate(struct cardinfo *card) 328static void activate(struct cardinfo *card)
331{ 329{
332 /* if No page is Active, and Ready is 330 /* if No page is Active, and Ready is
333 * not empty, then switch Ready page 331 * not empty, then switch Ready page
334 * to active and start IO. 332 * to active and start IO.
335 * Then add any bh's that are available to Ready 333 * Then add any bh's that are available to Ready
@@ -368,7 +366,7 @@ static void mm_unplug_device(struct request_queue *q)
368 spin_unlock_irqrestore(&card->lock, flags); 366 spin_unlock_irqrestore(&card->lock, flags);
369} 367}
370 368
371/* 369/*
372 * If there is room on Ready page, take 370 * If there is room on Ready page, take
373 * one bh off list and add it. 371 * one bh off list and add it.
374 * return 1 if there was room, else 0. 372 * return 1 if there was room, else 0.
@@ -380,12 +378,16 @@ static int add_bio(struct cardinfo *card)
380 dma_addr_t dma_handle; 378 dma_addr_t dma_handle;
381 int offset; 379 int offset;
382 struct bio *bio; 380 struct bio *bio;
381 struct bio_vec *vec;
382 int idx;
383 int rw; 383 int rw;
384 int len; 384 int len;
385 385
386 bio = card->currentbio; 386 bio = card->currentbio;
387 if (!bio && card->bio) { 387 if (!bio && card->bio) {
388 card->currentbio = card->bio; 388 card->currentbio = card->bio;
389 card->current_idx = card->bio->bi_idx;
390 card->current_sector = card->bio->bi_sector;
389 card->bio = card->bio->bi_next; 391 card->bio = card->bio->bi_next;
390 if (card->bio == NULL) 392 if (card->bio == NULL)
391 card->biotail = &card->bio; 393 card->biotail = &card->bio;
@@ -394,15 +396,17 @@ static int add_bio(struct cardinfo *card)
394 } 396 }
395 if (!bio) 397 if (!bio)
396 return 0; 398 return 0;
399 idx = card->current_idx;
397 400
398 rw = bio_rw(bio); 401 rw = bio_rw(bio);
399 if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE) 402 if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE)
400 return 0; 403 return 0;
401 404
402 len = bio_iovec(bio)->bv_len; 405 vec = bio_iovec_idx(bio, idx);
403 dma_handle = pci_map_page(card->dev, 406 len = vec->bv_len;
404 bio_page(bio), 407 dma_handle = pci_map_page(card->dev,
405 bio_offset(bio), 408 vec->bv_page,
409 vec->bv_offset,
406 len, 410 len,
407 (rw==READ) ? 411 (rw==READ) ?
408 PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); 412 PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
@@ -410,6 +414,8 @@ static int add_bio(struct cardinfo *card)
410 p = &card->mm_pages[card->Ready]; 414 p = &card->mm_pages[card->Ready];
411 desc = &p->desc[p->cnt]; 415 desc = &p->desc[p->cnt];
412 p->cnt++; 416 p->cnt++;
417 if (p->bio == NULL)
418 p->idx = idx;
413 if ((p->biotail) != &bio->bi_next) { 419 if ((p->biotail) != &bio->bi_next) {
414 *(p->biotail) = bio; 420 *(p->biotail) = bio;
415 p->biotail = &(bio->bi_next); 421 p->biotail = &(bio->bi_next);
@@ -419,7 +425,7 @@ static int add_bio(struct cardinfo *card)
419 desc->data_dma_handle = dma_handle; 425 desc->data_dma_handle = dma_handle;
420 426
421 desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle); 427 desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle);
422 desc->local_addr= cpu_to_le64(bio->bi_sector << 9); 428 desc->local_addr = cpu_to_le64(card->current_sector << 9);
423 desc->transfer_size = cpu_to_le32(len); 429 desc->transfer_size = cpu_to_le32(len);
424 offset = ( ((char*)&desc->sem_control_bits) - ((char*)p->desc)); 430 offset = ( ((char*)&desc->sem_control_bits) - ((char*)p->desc));
425 desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset)); 431 desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset));
@@ -435,10 +441,10 @@ static int add_bio(struct cardinfo *card)
435 desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ); 441 desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ);
436 desc->sem_control_bits = desc->control_bits; 442 desc->sem_control_bits = desc->control_bits;
437 443
438 bio->bi_sector += (len>>9); 444 card->current_sector += (len >> 9);
439 bio->bi_size -= len; 445 idx++;
440 bio->bi_idx++; 446 card->current_idx = idx;
441 if (bio->bi_idx >= bio->bi_vcnt) 447 if (idx >= bio->bi_vcnt)
442 card->currentbio = NULL; 448 card->currentbio = NULL;
443 449
444 return 1; 450 return 1;
@@ -461,7 +467,7 @@ static void process_page(unsigned long data)
461 if (card->Active < 0) 467 if (card->Active < 0)
462 goto out_unlock; 468 goto out_unlock;
463 page = &card->mm_pages[card->Active]; 469 page = &card->mm_pages[card->Active];
464 470
465 while (page->headcnt < page->cnt) { 471 while (page->headcnt < page->cnt) {
466 struct bio *bio = page->bio; 472 struct bio *bio = page->bio;
467 struct mm_dma_desc *desc = &page->desc[page->headcnt]; 473 struct mm_dma_desc *desc = &page->desc[page->headcnt];
@@ -471,32 +477,34 @@ static void process_page(unsigned long data)
471 477
472 if (!(control & DMASCR_DMA_COMPLETE)) { 478 if (!(control & DMASCR_DMA_COMPLETE)) {
473 control = dma_status; 479 control = dma_status;
474 last=1; 480 last=1;
475 } 481 }
476 page->headcnt++; 482 page->headcnt++;
477 idx = bio->bi_phys_segments; 483 idx = page->idx;
478 bio->bi_phys_segments++; 484 page->idx++;
479 if (bio->bi_phys_segments >= bio->bi_vcnt) 485 if (page->idx >= bio->bi_vcnt) {
480 page->bio = bio->bi_next; 486 page->bio = bio->bi_next;
487 page->idx = page->bio->bi_idx;
488 }
481 489
482 pci_unmap_page(card->dev, desc->data_dma_handle, 490 pci_unmap_page(card->dev, desc->data_dma_handle,
483 bio_iovec_idx(bio,idx)->bv_len, 491 bio_iovec_idx(bio,idx)->bv_len,
484 (control& DMASCR_TRANSFER_READ) ? 492 (control& DMASCR_TRANSFER_READ) ?
485 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); 493 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
486 if (control & DMASCR_HARD_ERROR) { 494 if (control & DMASCR_HARD_ERROR) {
487 /* error */ 495 /* error */
488 clear_bit(BIO_UPTODATE, &bio->bi_flags); 496 clear_bit(BIO_UPTODATE, &bio->bi_flags);
489 printk(KERN_WARNING "MM%d: I/O error on sector %d/%d\n", 497 dev_printk(KERN_WARNING, &card->dev->dev,
490 card->card_number, 498 "I/O error on sector %d/%d\n",
491 le32_to_cpu(desc->local_addr)>>9, 499 le32_to_cpu(desc->local_addr)>>9,
492 le32_to_cpu(desc->transfer_size)); 500 le32_to_cpu(desc->transfer_size));
493 dump_dmastat(card, control); 501 dump_dmastat(card, control);
494 } else if (test_bit(BIO_RW, &bio->bi_rw) && 502 } else if (test_bit(BIO_RW, &bio->bi_rw) &&
495 le32_to_cpu(desc->local_addr)>>9 == card->init_size) { 503 le32_to_cpu(desc->local_addr)>>9 == card->init_size) {
496 card->init_size += le32_to_cpu(desc->transfer_size)>>9; 504 card->init_size += le32_to_cpu(desc->transfer_size)>>9;
497 if (card->init_size>>1 >= card->mm_size) { 505 if (card->init_size>>1 >= card->mm_size) {
498 printk(KERN_INFO "MM%d: memory now initialised\n", 506 dev_printk(KERN_INFO, &card->dev->dev,
499 card->card_number); 507 "memory now initialised\n");
500 set_userbit(card, MEMORY_INITIALIZED, 1); 508 set_userbit(card, MEMORY_INITIALIZED, 1);
501 } 509 }
502 } 510 }
@@ -532,7 +540,7 @@ static void process_page(unsigned long data)
532 540
533 return_bio = bio->bi_next; 541 return_bio = bio->bi_next;
534 bio->bi_next = NULL; 542 bio->bi_next = NULL;
535 bio_endio(bio, bio->bi_size, 0); 543 bio_endio(bio, 0);
536 } 544 }
537} 545}
538 546
@@ -547,7 +555,6 @@ static int mm_make_request(struct request_queue *q, struct bio *bio)
547 pr_debug("mm_make_request %llu %u\n", 555 pr_debug("mm_make_request %llu %u\n",
548 (unsigned long long)bio->bi_sector, bio->bi_size); 556 (unsigned long long)bio->bi_sector, bio->bi_size);
549 557
550 bio->bi_phys_segments = bio->bi_idx; /* count of completed segments*/
551 spin_lock_irq(&card->lock); 558 spin_lock_irq(&card->lock);
552 *card->biotail = bio; 559 *card->biotail = bio;
553 bio->bi_next = NULL; 560 bio->bi_next = NULL;
@@ -585,7 +592,7 @@ HW_TRACE(0x30);
585 else 592 else
586 writeb((DMASCR_DMA_COMPLETE|DMASCR_CHAIN_COMPLETE) >> 16, 593 writeb((DMASCR_DMA_COMPLETE|DMASCR_CHAIN_COMPLETE) >> 16,
587 card->csr_remap+ DMA_STATUS_CTRL + 2); 594 card->csr_remap+ DMA_STATUS_CTRL + 2);
588 595
589 /* log errors and clear interrupt status */ 596 /* log errors and clear interrupt status */
590 if (dma_status & DMASCR_ANY_ERR) { 597 if (dma_status & DMASCR_ANY_ERR) {
591 unsigned int data_log1, data_log2; 598 unsigned int data_log1, data_log2;
@@ -606,46 +613,51 @@ HW_TRACE(0x30);
606 dump_dmastat(card, dma_status); 613 dump_dmastat(card, dma_status);
607 614
608 if (stat & 0x01) 615 if (stat & 0x01)
609 printk(KERN_ERR "MM%d*: Memory access error detected (err count %d)\n", 616 dev_printk(KERN_ERR, &card->dev->dev,
610 card->card_number, count); 617 "Memory access error detected (err count %d)\n",
618 count);
611 if (stat & 0x02) 619 if (stat & 0x02)
612 printk(KERN_ERR "MM%d*: Multi-bit EDC error\n", 620 dev_printk(KERN_ERR, &card->dev->dev,
613 card->card_number); 621 "Multi-bit EDC error\n");
614 622
615 printk(KERN_ERR "MM%d*: Fault Address 0x%02x%08x, Fault Data 0x%08x%08x\n", 623 dev_printk(KERN_ERR, &card->dev->dev,
616 card->card_number, addr_log2, addr_log1, data_log2, data_log1); 624 "Fault Address 0x%02x%08x, Fault Data 0x%08x%08x\n",
617 printk(KERN_ERR "MM%d*: Fault Check 0x%02x, Fault Syndrome 0x%02x\n", 625 addr_log2, addr_log1, data_log2, data_log1);
618 card->card_number, check, syndrome); 626 dev_printk(KERN_ERR, &card->dev->dev,
627 "Fault Check 0x%02x, Fault Syndrome 0x%02x\n",
628 check, syndrome);
619 629
620 writeb(0, card->csr_remap + ERROR_COUNT); 630 writeb(0, card->csr_remap + ERROR_COUNT);
621 } 631 }
622 632
623 if (dma_status & DMASCR_PARITY_ERR_REP) { 633 if (dma_status & DMASCR_PARITY_ERR_REP) {
624 printk(KERN_ERR "MM%d*: PARITY ERROR REPORTED\n", card->card_number); 634 dev_printk(KERN_ERR, &card->dev->dev,
635 "PARITY ERROR REPORTED\n");
625 pci_read_config_word(card->dev, PCI_STATUS, &cfg_status); 636 pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
626 pci_write_config_word(card->dev, PCI_STATUS, cfg_status); 637 pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
627 } 638 }
628 639
629 if (dma_status & DMASCR_PARITY_ERR_DET) { 640 if (dma_status & DMASCR_PARITY_ERR_DET) {
630 printk(KERN_ERR "MM%d*: PARITY ERROR DETECTED\n", card->card_number); 641 dev_printk(KERN_ERR, &card->dev->dev,
642 "PARITY ERROR DETECTED\n");
631 pci_read_config_word(card->dev, PCI_STATUS, &cfg_status); 643 pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
632 pci_write_config_word(card->dev, PCI_STATUS, cfg_status); 644 pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
633 } 645 }
634 646
635 if (dma_status & DMASCR_SYSTEM_ERR_SIG) { 647 if (dma_status & DMASCR_SYSTEM_ERR_SIG) {
636 printk(KERN_ERR "MM%d*: SYSTEM ERROR\n", card->card_number); 648 dev_printk(KERN_ERR, &card->dev->dev, "SYSTEM ERROR\n");
637 pci_read_config_word(card->dev, PCI_STATUS, &cfg_status); 649 pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
638 pci_write_config_word(card->dev, PCI_STATUS, cfg_status); 650 pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
639 } 651 }
640 652
641 if (dma_status & DMASCR_TARGET_ABT) { 653 if (dma_status & DMASCR_TARGET_ABT) {
642 printk(KERN_ERR "MM%d*: TARGET ABORT\n", card->card_number); 654 dev_printk(KERN_ERR, &card->dev->dev, "TARGET ABORT\n");
643 pci_read_config_word(card->dev, PCI_STATUS, &cfg_status); 655 pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
644 pci_write_config_word(card->dev, PCI_STATUS, cfg_status); 656 pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
645 } 657 }
646 658
647 if (dma_status & DMASCR_MASTER_ABT) { 659 if (dma_status & DMASCR_MASTER_ABT) {
648 printk(KERN_ERR "MM%d*: MASTER ABORT\n", card->card_number); 660 dev_printk(KERN_ERR, &card->dev->dev, "MASTER ABORT\n");
649 pci_read_config_word(card->dev, PCI_STATUS, &cfg_status); 661 pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
650 pci_write_config_word(card->dev, PCI_STATUS, cfg_status); 662 pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
651 } 663 }
@@ -656,7 +668,7 @@ HW_TRACE(0x30);
656 668
657HW_TRACE(0x36); 669HW_TRACE(0x36);
658 670
659 return IRQ_HANDLED; 671 return IRQ_HANDLED;
660} 672}
661/* 673/*
662----------------------------------------------------------------------------------- 674-----------------------------------------------------------------------------------
@@ -696,20 +708,20 @@ static int check_battery(struct cardinfo *card, int battery, int status)
696 card->battery[battery].last_change = jiffies; 708 card->battery[battery].last_change = jiffies;
697 709
698 if (card->battery[battery].good) { 710 if (card->battery[battery].good) {
699 printk(KERN_ERR "MM%d: Battery %d now good\n", 711 dev_printk(KERN_ERR, &card->dev->dev,
700 card->card_number, battery + 1); 712 "Battery %d now good\n", battery + 1);
701 card->battery[battery].warned = 0; 713 card->battery[battery].warned = 0;
702 } else 714 } else
703 printk(KERN_ERR "MM%d: Battery %d now FAILED\n", 715 dev_printk(KERN_ERR, &card->dev->dev,
704 card->card_number, battery + 1); 716 "Battery %d now FAILED\n", battery + 1);
705 717
706 return 1; 718 return 1;
707 } else if (!card->battery[battery].good && 719 } else if (!card->battery[battery].good &&
708 !card->battery[battery].warned && 720 !card->battery[battery].warned &&
709 time_after_eq(jiffies, card->battery[battery].last_change + 721 time_after_eq(jiffies, card->battery[battery].last_change +
710 (HZ * 60 * 60 * 5))) { 722 (HZ * 60 * 60 * 5))) {
711 printk(KERN_ERR "MM%d: Battery %d still FAILED after 5 hours\n", 723 dev_printk(KERN_ERR, &card->dev->dev,
712 card->card_number, battery + 1); 724 "Battery %d still FAILED after 5 hours\n", battery + 1);
713 card->battery[battery].warned = 1; 725 card->battery[battery].warned = 1;
714 726
715 return 1; 727 return 1;
@@ -733,8 +745,8 @@ static void check_batteries(struct cardinfo *card)
733 745
734 status = readb(card->csr_remap + MEMCTRLSTATUS_BATTERY); 746 status = readb(card->csr_remap + MEMCTRLSTATUS_BATTERY);
735 if (debug & DEBUG_BATTERY_POLLING) 747 if (debug & DEBUG_BATTERY_POLLING)
736 printk(KERN_DEBUG "MM%d: checking battery status, 1 = %s, 2 = %s\n", 748 dev_printk(KERN_DEBUG, &card->dev->dev,
737 card->card_number, 749 "checking battery status, 1 = %s, 2 = %s\n",
738 (status & BATTERY_1_FAILURE) ? "FAILURE" : "OK", 750 (status & BATTERY_1_FAILURE) ? "FAILURE" : "OK",
739 (status & BATTERY_2_FAILURE) ? "FAILURE" : "OK"); 751 (status & BATTERY_2_FAILURE) ? "FAILURE" : "OK");
740 752
@@ -749,7 +761,7 @@ static void check_all_batteries(unsigned long ptr)
749{ 761{
750 int i; 762 int i;
751 763
752 for (i = 0; i < num_cards; i++) 764 for (i = 0; i < num_cards; i++)
753 if (!(cards[i].flags & UM_FLAG_NO_BATT)) { 765 if (!(cards[i].flags & UM_FLAG_NO_BATT)) {
754 struct cardinfo *card = &cards[i]; 766 struct cardinfo *card = &cards[i];
755 spin_lock_bh(&card->lock); 767 spin_lock_bh(&card->lock);
@@ -853,45 +865,56 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
853 unsigned char mem_present; 865 unsigned char mem_present;
854 unsigned char batt_status; 866 unsigned char batt_status;
855 unsigned int saved_bar, data; 867 unsigned int saved_bar, data;
868 unsigned long csr_base;
869 unsigned long csr_len;
856 int magic_number; 870 int magic_number;
871 static int printed_version;
857 872
858 if (pci_enable_device(dev) < 0) 873 if (!printed_version++)
859 return -ENODEV; 874 printk(KERN_INFO DRIVER_VERSION " : " DRIVER_DESC "\n");
875
876 ret = pci_enable_device(dev);
877 if (ret)
878 return ret;
860 879
861 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xF8); 880 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xF8);
862 pci_set_master(dev); 881 pci_set_master(dev);
863 882
864 card->dev = dev; 883 card->dev = dev;
865 card->card_number = num_cards;
866 884
867 card->csr_base = pci_resource_start(dev, 0); 885 csr_base = pci_resource_start(dev, 0);
868 card->csr_len = pci_resource_len(dev, 0); 886 csr_len = pci_resource_len(dev, 0);
887 if (!csr_base || !csr_len)
888 return -ENODEV;
869 889
870 printk(KERN_INFO "Micro Memory(tm) controller #%d found at %02x:%02x (PCI Mem Module (Battery Backup))\n", 890 dev_printk(KERN_INFO, &dev->dev,
871 card->card_number, dev->bus->number, dev->devfn); 891 "Micro Memory(tm) controller found (PCI Mem Module (Battery Backup))\n");
872 892
873 if (pci_set_dma_mask(dev, DMA_64BIT_MASK) && 893 if (pci_set_dma_mask(dev, DMA_64BIT_MASK) &&
874 pci_set_dma_mask(dev, DMA_32BIT_MASK)) { 894 pci_set_dma_mask(dev, DMA_32BIT_MASK)) {
875 printk(KERN_WARNING "MM%d: NO suitable DMA found\n",num_cards); 895 dev_printk(KERN_WARNING, &dev->dev, "NO suitable DMA found\n");
876 return -ENOMEM; 896 return -ENOMEM;
877 } 897 }
878 if (!request_mem_region(card->csr_base, card->csr_len, "Micro Memory")) {
879 printk(KERN_ERR "MM%d: Unable to request memory region\n", card->card_number);
880 ret = -ENOMEM;
881 898
899 ret = pci_request_regions(dev, DRIVER_NAME);
900 if (ret) {
901 dev_printk(KERN_ERR, &card->dev->dev,
902 "Unable to request memory region\n");
882 goto failed_req_csr; 903 goto failed_req_csr;
883 } 904 }
884 905
885 card->csr_remap = ioremap_nocache(card->csr_base, card->csr_len); 906 card->csr_remap = ioremap_nocache(csr_base, csr_len);
886 if (!card->csr_remap) { 907 if (!card->csr_remap) {
887 printk(KERN_ERR "MM%d: Unable to remap memory region\n", card->card_number); 908 dev_printk(KERN_ERR, &card->dev->dev,
909 "Unable to remap memory region\n");
888 ret = -ENOMEM; 910 ret = -ENOMEM;
889 911
890 goto failed_remap_csr; 912 goto failed_remap_csr;
891 } 913 }
892 914
893 printk(KERN_INFO "MM%d: CSR 0x%08lx -> 0x%p (0x%lx)\n", card->card_number, 915 dev_printk(KERN_INFO, &card->dev->dev,
894 card->csr_base, card->csr_remap, card->csr_len); 916 "CSR 0x%08lx -> 0x%p (0x%lx)\n",
917 csr_base, card->csr_remap, csr_len);
895 918
896 switch(card->dev->device) { 919 switch(card->dev->device) {
897 case 0x5415: 920 case 0x5415:
@@ -915,7 +938,7 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
915 } 938 }
916 939
917 if (readb(card->csr_remap + MEMCTRLSTATUS_MAGIC) != magic_number) { 940 if (readb(card->csr_remap + MEMCTRLSTATUS_MAGIC) != magic_number) {
918 printk(KERN_ERR "MM%d: Magic number invalid\n", card->card_number); 941 dev_printk(KERN_ERR, &card->dev->dev, "Magic number invalid\n");
919 ret = -ENOMEM; 942 ret = -ENOMEM;
920 goto failed_magic; 943 goto failed_magic;
921 } 944 }
@@ -928,7 +951,7 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
928 &card->mm_pages[1].page_dma); 951 &card->mm_pages[1].page_dma);
929 if (card->mm_pages[0].desc == NULL || 952 if (card->mm_pages[0].desc == NULL ||
930 card->mm_pages[1].desc == NULL) { 953 card->mm_pages[1].desc == NULL) {
931 printk(KERN_ERR "MM%d: alloc failed\n", card->card_number); 954 dev_printk(KERN_ERR, &card->dev->dev, "alloc failed\n");
932 goto failed_alloc; 955 goto failed_alloc;
933 } 956 }
934 reset_page(&card->mm_pages[0]); 957 reset_page(&card->mm_pages[0]);
@@ -949,7 +972,7 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
949 tasklet_init(&card->tasklet, process_page, (unsigned long)card); 972 tasklet_init(&card->tasklet, process_page, (unsigned long)card);
950 973
951 card->check_batteries = 0; 974 card->check_batteries = 0;
952 975
953 mem_present = readb(card->csr_remap + MEMCTRLSTATUS_MEMORY); 976 mem_present = readb(card->csr_remap + MEMCTRLSTATUS_MEMORY);
954 switch (mem_present) { 977 switch (mem_present) {
955 case MEM_128_MB: 978 case MEM_128_MB:
@@ -982,12 +1005,13 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
982 card->battery[1].good = !(batt_status & BATTERY_2_FAILURE); 1005 card->battery[1].good = !(batt_status & BATTERY_2_FAILURE);
983 card->battery[0].last_change = card->battery[1].last_change = jiffies; 1006 card->battery[0].last_change = card->battery[1].last_change = jiffies;
984 1007
985 if (card->flags & UM_FLAG_NO_BATT) 1008 if (card->flags & UM_FLAG_NO_BATT)
986 printk(KERN_INFO "MM%d: Size %d KB\n", 1009 dev_printk(KERN_INFO, &card->dev->dev,
987 card->card_number, card->mm_size); 1010 "Size %d KB\n", card->mm_size);
988 else { 1011 else {
989 printk(KERN_INFO "MM%d: Size %d KB, Battery 1 %s (%s), Battery 2 %s (%s)\n", 1012 dev_printk(KERN_INFO, &card->dev->dev,
990 card->card_number, card->mm_size, 1013 "Size %d KB, Battery 1 %s (%s), Battery 2 %s (%s)\n",
1014 card->mm_size,
991 (batt_status & BATTERY_1_DISABLED ? "Disabled" : "Enabled"), 1015 (batt_status & BATTERY_1_DISABLED ? "Disabled" : "Enabled"),
992 card->battery[0].good ? "OK" : "FAILURE", 1016 card->battery[0].good ? "OK" : "FAILURE",
993 (batt_status & BATTERY_2_DISABLED ? "Disabled" : "Enabled"), 1017 (batt_status & BATTERY_2_DISABLED ? "Disabled" : "Enabled"),
@@ -1005,19 +1029,16 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
1005 data = ~data; 1029 data = ~data;
1006 data += 1; 1030 data += 1;
1007 1031
1008 card->win_size = data; 1032 if (request_irq(dev->irq, mm_interrupt, IRQF_SHARED, DRIVER_NAME, card)) {
1009 1033 dev_printk(KERN_ERR, &card->dev->dev,
1010 1034 "Unable to allocate IRQ\n");
1011 if (request_irq(dev->irq, mm_interrupt, IRQF_SHARED, "pci-umem", card)) {
1012 printk(KERN_ERR "MM%d: Unable to allocate IRQ\n", card->card_number);
1013 ret = -ENODEV; 1035 ret = -ENODEV;
1014 1036
1015 goto failed_req_irq; 1037 goto failed_req_irq;
1016 } 1038 }
1017 1039
1018 card->irq = dev->irq; 1040 dev_printk(KERN_INFO, &card->dev->dev,
1019 printk(KERN_INFO "MM%d: Window size %d bytes, IRQ %d\n", card->card_number, 1041 "Window size %d bytes, IRQ %d\n", data, dev->irq);
1020 card->win_size, card->irq);
1021 1042
1022 spin_lock_init(&card->lock); 1043 spin_lock_init(&card->lock);
1023 1044
@@ -1037,10 +1058,12 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
1037 num_cards++; 1058 num_cards++;
1038 1059
1039 if (!get_userbit(card, MEMORY_INITIALIZED)) { 1060 if (!get_userbit(card, MEMORY_INITIALIZED)) {
1040 printk(KERN_INFO "MM%d: memory NOT initialized. Consider over-writing whole device.\n", card->card_number); 1061 dev_printk(KERN_INFO, &card->dev->dev,
1062 "memory NOT initialized. Consider over-writing whole device.\n");
1041 card->init_size = 0; 1063 card->init_size = 0;
1042 } else { 1064 } else {
1043 printk(KERN_INFO "MM%d: memory already initialized\n", card->card_number); 1065 dev_printk(KERN_INFO, &card->dev->dev,
1066 "memory already initialized\n");
1044 card->init_size = card->mm_size; 1067 card->init_size = card->mm_size;
1045 } 1068 }
1046 1069
@@ -1062,7 +1085,7 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
1062 failed_magic: 1085 failed_magic:
1063 iounmap(card->csr_remap); 1086 iounmap(card->csr_remap);
1064 failed_remap_csr: 1087 failed_remap_csr:
1065 release_mem_region(card->csr_base, card->csr_len); 1088 pci_release_regions(dev);
1066 failed_req_csr: 1089 failed_req_csr:
1067 1090
1068 return ret; 1091 return ret;
@@ -1077,9 +1100,8 @@ static void mm_pci_remove(struct pci_dev *dev)
1077 struct cardinfo *card = pci_get_drvdata(dev); 1100 struct cardinfo *card = pci_get_drvdata(dev);
1078 1101
1079 tasklet_kill(&card->tasklet); 1102 tasklet_kill(&card->tasklet);
1103 free_irq(dev->irq, card);
1080 iounmap(card->csr_remap); 1104 iounmap(card->csr_remap);
1081 release_mem_region(card->csr_base, card->csr_len);
1082 free_irq(card->irq, card);
1083 1105
1084 if (card->mm_pages[0].desc) 1106 if (card->mm_pages[0].desc)
1085 pci_free_consistent(card->dev, PAGE_SIZE*2, 1107 pci_free_consistent(card->dev, PAGE_SIZE*2,
@@ -1090,6 +1112,9 @@ static void mm_pci_remove(struct pci_dev *dev)
1090 card->mm_pages[1].desc, 1112 card->mm_pages[1].desc,
1091 card->mm_pages[1].page_dma); 1113 card->mm_pages[1].page_dma);
1092 blk_cleanup_queue(card->queue); 1114 blk_cleanup_queue(card->queue);
1115
1116 pci_release_regions(dev);
1117 pci_disable_device(dev);
1093} 1118}
1094 1119
1095static const struct pci_device_id mm_pci_ids[] = { 1120static const struct pci_device_id mm_pci_ids[] = {
@@ -1109,11 +1134,12 @@ static const struct pci_device_id mm_pci_ids[] = {
1109MODULE_DEVICE_TABLE(pci, mm_pci_ids); 1134MODULE_DEVICE_TABLE(pci, mm_pci_ids);
1110 1135
1111static struct pci_driver mm_pci_driver = { 1136static struct pci_driver mm_pci_driver = {
1112 .name = "umem", 1137 .name = DRIVER_NAME,
1113 .id_table = mm_pci_ids, 1138 .id_table = mm_pci_ids,
1114 .probe = mm_pci_probe, 1139 .probe = mm_pci_probe,
1115 .remove = mm_pci_remove, 1140 .remove = mm_pci_remove,
1116}; 1141};
1142
1117/* 1143/*
1118----------------------------------------------------------------------------------- 1144-----------------------------------------------------------------------------------
1119-- mm_init 1145-- mm_init
@@ -1125,13 +1151,11 @@ static int __init mm_init(void)
1125 int retval, i; 1151 int retval, i;
1126 int err; 1152 int err;
1127 1153
1128 printk(KERN_INFO DRIVER_VERSION " : " DRIVER_DESC "\n");
1129
1130 retval = pci_register_driver(&mm_pci_driver); 1154 retval = pci_register_driver(&mm_pci_driver);
1131 if (retval) 1155 if (retval)
1132 return -ENOMEM; 1156 return -ENOMEM;
1133 1157
1134 err = major_nr = register_blkdev(0, "umem"); 1158 err = major_nr = register_blkdev(0, DRIVER_NAME);
1135 if (err < 0) { 1159 if (err < 0) {
1136 pci_unregister_driver(&mm_pci_driver); 1160 pci_unregister_driver(&mm_pci_driver);
1137 return -EIO; 1161 return -EIO;
@@ -1157,13 +1181,13 @@ static int __init mm_init(void)
1157 } 1181 }
1158 1182
1159 init_battery_timer(); 1183 init_battery_timer();
1160 printk("MM: desc_per_page = %ld\n", DESC_PER_PAGE); 1184 printk(KERN_INFO "MM: desc_per_page = %ld\n", DESC_PER_PAGE);
1161/* printk("mm_init: Done. 10-19-01 9:00\n"); */ 1185/* printk("mm_init: Done. 10-19-01 9:00\n"); */
1162 return 0; 1186 return 0;
1163 1187
1164out: 1188out:
1165 pci_unregister_driver(&mm_pci_driver); 1189 pci_unregister_driver(&mm_pci_driver);
1166 unregister_blkdev(major_nr, "umem"); 1190 unregister_blkdev(major_nr, DRIVER_NAME);
1167 while (i--) 1191 while (i--)
1168 put_disk(mm_gendisk[i]); 1192 put_disk(mm_gendisk[i]);
1169 return -ENOMEM; 1193 return -ENOMEM;
@@ -1186,7 +1210,7 @@ static void __exit mm_cleanup(void)
1186 1210
1187 pci_unregister_driver(&mm_pci_driver); 1211 pci_unregister_driver(&mm_pci_driver);
1188 1212
1189 unregister_blkdev(major_nr, "umem"); 1213 unregister_blkdev(major_nr, DRIVER_NAME);
1190} 1214}
1191 1215
1192module_init(mm_init); 1216module_init(mm_init);
diff --git a/drivers/block/umem.h b/drivers/block/umem.h
new file mode 100644
index 000000000000..375c68974c9a
--- /dev/null
+++ b/drivers/block/umem.h
@@ -0,0 +1,133 @@
1
2/*
3 * This file contains defines for the
4 * Micro Memory MM5415
5 * family PCI Memory Module with Battery Backup.
6 *
7 * Copyright Micro Memory INC 2001. All rights reserved.
8 * Release under the terms of the GNU GENERAL PUBLIC LICENSE version 2.
9 * See the file COPYING.
10 */
11
12#ifndef _DRIVERS_BLOCK_MM_H
13#define _DRIVERS_BLOCK_MM_H
14
15
16#define IRQ_TIMEOUT (1 * HZ)
17
18/* CSR register definition */
19#define MEMCTRLSTATUS_MAGIC 0x00
20#define MM_MAGIC_VALUE (unsigned char)0x59
21
22#define MEMCTRLSTATUS_BATTERY 0x04
23#define BATTERY_1_DISABLED 0x01
24#define BATTERY_1_FAILURE 0x02
25#define BATTERY_2_DISABLED 0x04
26#define BATTERY_2_FAILURE 0x08
27
28#define MEMCTRLSTATUS_MEMORY 0x07
29#define MEM_128_MB 0xfe
30#define MEM_256_MB 0xfc
31#define MEM_512_MB 0xf8
32#define MEM_1_GB 0xf0
33#define MEM_2_GB 0xe0
34
35#define MEMCTRLCMD_LEDCTRL 0x08
36#define LED_REMOVE 2
37#define LED_FAULT 4
38#define LED_POWER 6
39#define LED_FLIP 255
40#define LED_OFF 0x00
41#define LED_ON 0x01
42#define LED_FLASH_3_5 0x02
43#define LED_FLASH_7_0 0x03
44#define LED_POWER_ON 0x00
45#define LED_POWER_OFF 0x01
46#define USER_BIT1 0x01
47#define USER_BIT2 0x02
48
49#define MEMORY_INITIALIZED USER_BIT1
50
51#define MEMCTRLCMD_ERRCTRL 0x0C
52#define EDC_NONE_DEFAULT 0x00
53#define EDC_NONE 0x01
54#define EDC_STORE_READ 0x02
55#define EDC_STORE_CORRECT 0x03
56
57#define MEMCTRLCMD_ERRCNT 0x0D
58#define MEMCTRLCMD_ERRSTATUS 0x0E
59
60#define ERROR_DATA_LOG 0x20
61#define ERROR_ADDR_LOG 0x28
62#define ERROR_COUNT 0x3D
63#define ERROR_SYNDROME 0x3E
64#define ERROR_CHECK 0x3F
65
66#define DMA_PCI_ADDR 0x40
67#define DMA_LOCAL_ADDR 0x48
68#define DMA_TRANSFER_SIZE 0x50
69#define DMA_DESCRIPTOR_ADDR 0x58
70#define DMA_SEMAPHORE_ADDR 0x60
71#define DMA_STATUS_CTRL 0x68
72#define DMASCR_GO 0x00001
73#define DMASCR_TRANSFER_READ 0x00002
74#define DMASCR_CHAIN_EN 0x00004
75#define DMASCR_SEM_EN 0x00010
76#define DMASCR_DMA_COMP_EN 0x00020
77#define DMASCR_CHAIN_COMP_EN 0x00040
78#define DMASCR_ERR_INT_EN 0x00080
79#define DMASCR_PARITY_INT_EN 0x00100
80#define DMASCR_ANY_ERR 0x00800
81#define DMASCR_MBE_ERR 0x01000
82#define DMASCR_PARITY_ERR_REP 0x02000
83#define DMASCR_PARITY_ERR_DET 0x04000
84#define DMASCR_SYSTEM_ERR_SIG 0x08000
85#define DMASCR_TARGET_ABT 0x10000
86#define DMASCR_MASTER_ABT 0x20000
87#define DMASCR_DMA_COMPLETE 0x40000
88#define DMASCR_CHAIN_COMPLETE 0x80000
89
90/*
913.SOME PCs HAVE HOST BRIDGES WHICH APPARENTLY DO NOT CORRECTLY HANDLE
92READ-LINE (0xE) OR READ-MULTIPLE (0xC) PCI COMMAND CODES DURING DMA
93TRANSFERS. IN OTHER SYSTEMS THESE COMMAND CODES WILL CAUSE THE HOST BRIDGE
94TO ALLOW LONGER BURSTS DURING DMA READ OPERATIONS. THE UPPER FOUR BITS
95(31..28) OF THE DMA CSR HAVE BEEN MADE PROGRAMMABLE, SO THAT EITHER A 0x6,
96AN 0xE OR A 0xC CAN BE WRITTEN TO THEM TO SET THE COMMAND CODE USED DURING
97DMA READ OPERATIONS.
98*/
99#define DMASCR_READ 0x60000000
100#define DMASCR_READLINE 0xE0000000
101#define DMASCR_READMULTI 0xC0000000
102
103
104#define DMASCR_ERROR_MASK (DMASCR_MASTER_ABT | DMASCR_TARGET_ABT | DMASCR_SYSTEM_ERR_SIG | DMASCR_PARITY_ERR_DET | DMASCR_MBE_ERR | DMASCR_ANY_ERR)
105#define DMASCR_HARD_ERROR (DMASCR_MASTER_ABT | DMASCR_TARGET_ABT | DMASCR_SYSTEM_ERR_SIG | DMASCR_PARITY_ERR_DET | DMASCR_MBE_ERR)
106
107#define WINDOWMAP_WINNUM 0x7B
108
109#define DMA_READ_FROM_HOST 0
110#define DMA_WRITE_TO_HOST 1
111
112struct mm_dma_desc {
113 __le64 pci_addr;
114 __le64 local_addr;
115 __le32 transfer_size;
116 u32 zero1;
117 __le64 next_desc_addr;
118 __le64 sem_addr;
119 __le32 control_bits;
120 u32 zero2;
121
122 dma_addr_t data_dma_handle;
123
124 /* Copy of the bits */
125 __le64 sem_control_bits;
126} __attribute__((aligned(8)));
127
128/* bits for card->flags */
129#define UM_FLAG_DMA_IN_REGS 1
130#define UM_FLAG_NO_BYTE_STATUS 2
131#define UM_FLAG_NO_BATTREG 4
132#define UM_FLAG_NO_BATT 8
133#endif
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 964e51634f2d..2bdebcb3ff16 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -150,9 +150,8 @@ static int blkif_queue_request(struct request *req)
150 struct blkfront_info *info = req->rq_disk->private_data; 150 struct blkfront_info *info = req->rq_disk->private_data;
151 unsigned long buffer_mfn; 151 unsigned long buffer_mfn;
152 struct blkif_request *ring_req; 152 struct blkif_request *ring_req;
153 struct bio *bio; 153 struct req_iterator iter;
154 struct bio_vec *bvec; 154 struct bio_vec *bvec;
155 int idx;
156 unsigned long id; 155 unsigned long id;
157 unsigned int fsect, lsect; 156 unsigned int fsect, lsect;
158 int ref; 157 int ref;
@@ -186,34 +185,31 @@ static int blkif_queue_request(struct request *req)
186 ring_req->operation = BLKIF_OP_WRITE_BARRIER; 185 ring_req->operation = BLKIF_OP_WRITE_BARRIER;
187 186
188 ring_req->nr_segments = 0; 187 ring_req->nr_segments = 0;
189 rq_for_each_bio (bio, req) { 188 rq_for_each_segment(bvec, req, iter) {
190 bio_for_each_segment (bvec, bio, idx) { 189 BUG_ON(ring_req->nr_segments == BLKIF_MAX_SEGMENTS_PER_REQUEST);
191 BUG_ON(ring_req->nr_segments 190 buffer_mfn = pfn_to_mfn(page_to_pfn(bvec->bv_page));
192 == BLKIF_MAX_SEGMENTS_PER_REQUEST); 191 fsect = bvec->bv_offset >> 9;
193 buffer_mfn = pfn_to_mfn(page_to_pfn(bvec->bv_page)); 192 lsect = fsect + (bvec->bv_len >> 9) - 1;
194 fsect = bvec->bv_offset >> 9; 193 /* install a grant reference. */
195 lsect = fsect + (bvec->bv_len >> 9) - 1; 194 ref = gnttab_claim_grant_reference(&gref_head);
196 /* install a grant reference. */ 195 BUG_ON(ref == -ENOSPC);
197 ref = gnttab_claim_grant_reference(&gref_head); 196
198 BUG_ON(ref == -ENOSPC); 197 gnttab_grant_foreign_access_ref(
199
200 gnttab_grant_foreign_access_ref(
201 ref, 198 ref,
202 info->xbdev->otherend_id, 199 info->xbdev->otherend_id,
203 buffer_mfn, 200 buffer_mfn,
204 rq_data_dir(req) ); 201 rq_data_dir(req) );
205 202
206 info->shadow[id].frame[ring_req->nr_segments] = 203 info->shadow[id].frame[ring_req->nr_segments] =
207 mfn_to_pfn(buffer_mfn); 204 mfn_to_pfn(buffer_mfn);
208 205
209 ring_req->seg[ring_req->nr_segments] = 206 ring_req->seg[ring_req->nr_segments] =
210 (struct blkif_request_segment) { 207 (struct blkif_request_segment) {
211 .gref = ref, 208 .gref = ref,
212 .first_sect = fsect, 209 .first_sect = fsect,
213 .last_sect = lsect }; 210 .last_sect = lsect };
214 211
215 ring_req->nr_segments++; 212 ring_req->nr_segments++;
216 }
217 } 213 }
218 214
219 info->ring.req_prod_pvt++; 215 info->ring.req_prod_pvt++;
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 3ede0b63da13..9e7652dcde6c 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -91,6 +91,10 @@
91#include <linux/blkdev.h> 91#include <linux/blkdev.h>
92#include <linux/hdreg.h> 92#include <linux/hdreg.h>
93#include <linux/platform_device.h> 93#include <linux/platform_device.h>
94#if defined(CONFIG_OF)
95#include <linux/of_device.h>
96#include <linux/of_platform.h>
97#endif
94 98
95MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>"); 99MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
96MODULE_DESCRIPTION("Xilinx SystemACE device driver"); 100MODULE_DESCRIPTION("Xilinx SystemACE device driver");
@@ -158,6 +162,9 @@ MODULE_LICENSE("GPL");
158#define ACE_FIFO_SIZE (32) 162#define ACE_FIFO_SIZE (32)
159#define ACE_BUF_PER_SECTOR (ACE_SECTOR_SIZE / ACE_FIFO_SIZE) 163#define ACE_BUF_PER_SECTOR (ACE_SECTOR_SIZE / ACE_FIFO_SIZE)
160 164
165#define ACE_BUS_WIDTH_8 0
166#define ACE_BUS_WIDTH_16 1
167
161struct ace_reg_ops; 168struct ace_reg_ops;
162 169
163struct ace_device { 170struct ace_device {
@@ -188,7 +195,7 @@ struct ace_device {
188 195
189 /* Details of hardware device */ 196 /* Details of hardware device */
190 unsigned long physaddr; 197 unsigned long physaddr;
191 void *baseaddr; 198 void __iomem *baseaddr;
192 int irq; 199 int irq;
193 int bus_width; /* 0 := 8 bit; 1 := 16 bit */ 200 int bus_width; /* 0 := 8 bit; 1 := 16 bit */
194 struct ace_reg_ops *reg_ops; 201 struct ace_reg_ops *reg_ops;
@@ -220,20 +227,20 @@ struct ace_reg_ops {
220/* 8 Bit bus width */ 227/* 8 Bit bus width */
221static u16 ace_in_8(struct ace_device *ace, int reg) 228static u16 ace_in_8(struct ace_device *ace, int reg)
222{ 229{
223 void *r = ace->baseaddr + reg; 230 void __iomem *r = ace->baseaddr + reg;
224 return in_8(r) | (in_8(r + 1) << 8); 231 return in_8(r) | (in_8(r + 1) << 8);
225} 232}
226 233
227static void ace_out_8(struct ace_device *ace, int reg, u16 val) 234static void ace_out_8(struct ace_device *ace, int reg, u16 val)
228{ 235{
229 void *r = ace->baseaddr + reg; 236 void __iomem *r = ace->baseaddr + reg;
230 out_8(r, val); 237 out_8(r, val);
231 out_8(r + 1, val >> 8); 238 out_8(r + 1, val >> 8);
232} 239}
233 240
234static void ace_datain_8(struct ace_device *ace) 241static void ace_datain_8(struct ace_device *ace)
235{ 242{
236 void *r = ace->baseaddr + 0x40; 243 void __iomem *r = ace->baseaddr + 0x40;
237 u8 *dst = ace->data_ptr; 244 u8 *dst = ace->data_ptr;
238 int i = ACE_FIFO_SIZE; 245 int i = ACE_FIFO_SIZE;
239 while (i--) 246 while (i--)
@@ -243,7 +250,7 @@ static void ace_datain_8(struct ace_device *ace)
243 250
244static void ace_dataout_8(struct ace_device *ace) 251static void ace_dataout_8(struct ace_device *ace)
245{ 252{
246 void *r = ace->baseaddr + 0x40; 253 void __iomem *r = ace->baseaddr + 0x40;
247 u8 *src = ace->data_ptr; 254 u8 *src = ace->data_ptr;
248 int i = ACE_FIFO_SIZE; 255 int i = ACE_FIFO_SIZE;
249 while (i--) 256 while (i--)
@@ -931,9 +938,11 @@ static int __devinit ace_setup(struct ace_device *ace)
931{ 938{
932 u16 version; 939 u16 version;
933 u16 val; 940 u16 val;
934
935 int rc; 941 int rc;
936 942
943 dev_dbg(ace->dev, "ace_setup(ace=0x%p)\n", ace);
944 dev_dbg(ace->dev, "physaddr=0x%lx irq=%i\n", ace->physaddr, ace->irq);
945
937 spin_lock_init(&ace->lock); 946 spin_lock_init(&ace->lock);
938 init_completion(&ace->id_completion); 947 init_completion(&ace->id_completion);
939 948
@@ -944,15 +953,6 @@ static int __devinit ace_setup(struct ace_device *ace)
944 if (!ace->baseaddr) 953 if (!ace->baseaddr)
945 goto err_ioremap; 954 goto err_ioremap;
946 955
947 if (ace->irq != NO_IRQ) {
948 rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace);
949 if (rc) {
950 /* Failure - fall back to polled mode */
951 dev_err(ace->dev, "request_irq failed\n");
952 ace->irq = NO_IRQ;
953 }
954 }
955
956 /* 956 /*
957 * Initialize the state machine tasklet and stall timer 957 * Initialize the state machine tasklet and stall timer
958 */ 958 */
@@ -982,7 +982,7 @@ static int __devinit ace_setup(struct ace_device *ace)
982 snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a'); 982 snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a');
983 983
984 /* set bus width */ 984 /* set bus width */
985 if (ace->bus_width == 1) { 985 if (ace->bus_width == ACE_BUS_WIDTH_16) {
986 /* 0x0101 should work regardless of endianess */ 986 /* 0x0101 should work regardless of endianess */
987 ace_out_le16(ace, ACE_BUSMODE, 0x0101); 987 ace_out_le16(ace, ACE_BUSMODE, 0x0101);
988 988
@@ -1005,6 +1005,16 @@ static int __devinit ace_setup(struct ace_device *ace)
1005 ace_out(ace, ACE_CTRL, ACE_CTRL_FORCECFGMODE | 1005 ace_out(ace, ACE_CTRL, ACE_CTRL_FORCECFGMODE |
1006 ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ); 1006 ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ);
1007 1007
1008 /* Now we can hook up the irq handler */
1009 if (ace->irq != NO_IRQ) {
1010 rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace);
1011 if (rc) {
1012 /* Failure - fall back to polled mode */
1013 dev_err(ace->dev, "request_irq failed\n");
1014 ace->irq = NO_IRQ;
1015 }
1016 }
1017
1008 /* Enable interrupts */ 1018 /* Enable interrupts */
1009 val = ace_in(ace, ACE_CTRL); 1019 val = ace_in(ace, ACE_CTRL);
1010 val |= ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ; 1020 val |= ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ;
@@ -1024,16 +1034,14 @@ static int __devinit ace_setup(struct ace_device *ace)
1024 1034
1025 return 0; 1035 return 0;
1026 1036
1027 err_read: 1037err_read:
1028 put_disk(ace->gd); 1038 put_disk(ace->gd);
1029 err_alloc_disk: 1039err_alloc_disk:
1030 blk_cleanup_queue(ace->queue); 1040 blk_cleanup_queue(ace->queue);
1031 err_blk_initq: 1041err_blk_initq:
1032 iounmap(ace->baseaddr); 1042 iounmap(ace->baseaddr);
1033 if (ace->irq != NO_IRQ) 1043err_ioremap:
1034 free_irq(ace->irq, ace); 1044 dev_info(ace->dev, "xsysace: error initializing device at 0x%lx\n",
1035 err_ioremap:
1036 printk(KERN_INFO "xsysace: error initializing device at 0x%lx\n",
1037 ace->physaddr); 1045 ace->physaddr);
1038 return -ENOMEM; 1046 return -ENOMEM;
1039} 1047}
@@ -1056,98 +1064,222 @@ static void __devexit ace_teardown(struct ace_device *ace)
1056 iounmap(ace->baseaddr); 1064 iounmap(ace->baseaddr);
1057} 1065}
1058 1066
1059/* --------------------------------------------------------------------- 1067static int __devinit
1060 * Platform Bus Support 1068ace_alloc(struct device *dev, int id, unsigned long physaddr,
1061 */ 1069 int irq, int bus_width)
1062
1063static int __devinit ace_probe(struct device *device)
1064{ 1070{
1065 struct platform_device *dev = to_platform_device(device);
1066 struct ace_device *ace; 1071 struct ace_device *ace;
1067 int i; 1072 int rc;
1073 dev_dbg(dev, "ace_alloc(%p)\n", dev);
1068 1074
1069 dev_dbg(device, "ace_probe(%p)\n", device); 1075 if (!physaddr) {
1076 rc = -ENODEV;
1077 goto err_noreg;
1078 }
1070 1079
1071 /* 1080 /* Allocate and initialize the ace device structure */
1072 * Allocate the ace device structure
1073 */
1074 ace = kzalloc(sizeof(struct ace_device), GFP_KERNEL); 1081 ace = kzalloc(sizeof(struct ace_device), GFP_KERNEL);
1075 if (!ace) 1082 if (!ace) {
1083 rc = -ENOMEM;
1076 goto err_alloc; 1084 goto err_alloc;
1077
1078 ace->dev = device;
1079 ace->id = dev->id;
1080 ace->irq = NO_IRQ;
1081
1082 for (i = 0; i < dev->num_resources; i++) {
1083 if (dev->resource[i].flags & IORESOURCE_MEM)
1084 ace->physaddr = dev->resource[i].start;
1085 if (dev->resource[i].flags & IORESOURCE_IRQ)
1086 ace->irq = dev->resource[i].start;
1087 } 1085 }
1088 1086
1089 /* FIXME: Should get bus_width from the platform_device struct */ 1087 ace->dev = dev;
1090 ace->bus_width = 1; 1088 ace->id = id;
1091 1089 ace->physaddr = physaddr;
1092 dev_set_drvdata(&dev->dev, ace); 1090 ace->irq = irq;
1091 ace->bus_width = bus_width;
1093 1092
1094 /* Call the bus-independant setup code */ 1093 /* Call the setup code */
1095 if (ace_setup(ace) != 0) 1094 rc = ace_setup(ace);
1095 if (rc)
1096 goto err_setup; 1096 goto err_setup;
1097 1097
1098 dev_set_drvdata(dev, ace);
1098 return 0; 1099 return 0;
1099 1100
1100 err_setup: 1101err_setup:
1101 dev_set_drvdata(&dev->dev, NULL); 1102 dev_set_drvdata(dev, NULL);
1102 kfree(ace); 1103 kfree(ace);
1103 err_alloc: 1104err_alloc:
1104 printk(KERN_ERR "xsysace: could not initialize device\n"); 1105err_noreg:
1105 return -ENOMEM; 1106 dev_err(dev, "could not initialize device, err=%i\n", rc);
1107 return rc;
1106} 1108}
1107 1109
1108/* 1110static void __devexit ace_free(struct device *dev)
1109 * Platform bus remove() method
1110 */
1111static int __devexit ace_remove(struct device *device)
1112{ 1111{
1113 struct ace_device *ace = dev_get_drvdata(device); 1112 struct ace_device *ace = dev_get_drvdata(dev);
1114 1113 dev_dbg(dev, "ace_free(%p)\n", dev);
1115 dev_dbg(device, "ace_remove(%p)\n", device);
1116 1114
1117 if (ace) { 1115 if (ace) {
1118 ace_teardown(ace); 1116 ace_teardown(ace);
1117 dev_set_drvdata(dev, NULL);
1119 kfree(ace); 1118 kfree(ace);
1120 } 1119 }
1120}
1121
1122/* ---------------------------------------------------------------------
1123 * Platform Bus Support
1124 */
1125
1126static int __devinit ace_probe(struct platform_device *dev)
1127{
1128 unsigned long physaddr = 0;
1129 int bus_width = ACE_BUS_WIDTH_16; /* FIXME: should not be hard coded */
1130 int id = dev->id;
1131 int irq = NO_IRQ;
1132 int i;
1121 1133
1134 dev_dbg(&dev->dev, "ace_probe(%p)\n", dev);
1135
1136 for (i = 0; i < dev->num_resources; i++) {
1137 if (dev->resource[i].flags & IORESOURCE_MEM)
1138 physaddr = dev->resource[i].start;
1139 if (dev->resource[i].flags & IORESOURCE_IRQ)
1140 irq = dev->resource[i].start;
1141 }
1142
1143 /* Call the bus-independant setup code */
1144 return ace_alloc(&dev->dev, id, physaddr, irq, bus_width);
1145}
1146
1147/*
1148 * Platform bus remove() method
1149 */
1150static int __devexit ace_remove(struct platform_device *dev)
1151{
1152 ace_free(&dev->dev);
1122 return 0; 1153 return 0;
1123} 1154}
1124 1155
1125static struct device_driver ace_driver = { 1156static struct platform_driver ace_platform_driver = {
1126 .name = "xsysace",
1127 .bus = &platform_bus_type,
1128 .probe = ace_probe, 1157 .probe = ace_probe,
1129 .remove = __devexit_p(ace_remove), 1158 .remove = __devexit_p(ace_remove),
1159 .driver = {
1160 .owner = THIS_MODULE,
1161 .name = "xsysace",
1162 },
1163};
1164
1165/* ---------------------------------------------------------------------
1166 * OF_Platform Bus Support
1167 */
1168
1169#if defined(CONFIG_OF)
1170static int __devinit
1171ace_of_probe(struct of_device *op, const struct of_device_id *match)
1172{
1173 struct resource res;
1174 unsigned long physaddr;
1175 const u32 *id;
1176 int irq, bus_width, rc;
1177
1178 dev_dbg(&op->dev, "ace_of_probe(%p, %p)\n", op, match);
1179
1180 /* device id */
1181 id = of_get_property(op->node, "port-number", NULL);
1182
1183 /* physaddr */
1184 rc = of_address_to_resource(op->node, 0, &res);
1185 if (rc) {
1186 dev_err(&op->dev, "invalid address\n");
1187 return rc;
1188 }
1189 physaddr = res.start;
1190
1191 /* irq */
1192 irq = irq_of_parse_and_map(op->node, 0);
1193
1194 /* bus width */
1195 bus_width = ACE_BUS_WIDTH_16;
1196 if (of_find_property(op->node, "8-bit", NULL))
1197 bus_width = ACE_BUS_WIDTH_8;
1198
1199 /* Call the bus-independant setup code */
1200 return ace_alloc(&op->dev, id ? *id : 0, physaddr, irq, bus_width);
1201}
1202
1203static int __devexit ace_of_remove(struct of_device *op)
1204{
1205 ace_free(&op->dev);
1206 return 0;
1207}
1208
1209/* Match table for of_platform binding */
1210static struct of_device_id __devinit ace_of_match[] = {
1211 { .compatible = "xilinx,xsysace", },
1212 {},
1213};
1214MODULE_DEVICE_TABLE(of, ace_of_match);
1215
1216static struct of_platform_driver ace_of_driver = {
1217 .owner = THIS_MODULE,
1218 .name = "xsysace",
1219 .match_table = ace_of_match,
1220 .probe = ace_of_probe,
1221 .remove = __devexit_p(ace_of_remove),
1222 .driver = {
1223 .name = "xsysace",
1224 },
1130}; 1225};
1131 1226
1227/* Registration helpers to keep the number of #ifdefs to a minimum */
1228static inline int __init ace_of_register(void)
1229{
1230 pr_debug("xsysace: registering OF binding\n");
1231 return of_register_platform_driver(&ace_of_driver);
1232}
1233
1234static inline void __exit ace_of_unregister(void)
1235{
1236 of_unregister_platform_driver(&ace_of_driver);
1237}
1238#else /* CONFIG_OF */
1239/* CONFIG_OF not enabled; do nothing helpers */
1240static inline int __init ace_of_register(void) { return 0; }
1241static inline void __exit ace_of_unregister(void) { }
1242#endif /* CONFIG_OF */
1243
1132/* --------------------------------------------------------------------- 1244/* ---------------------------------------------------------------------
1133 * Module init/exit routines 1245 * Module init/exit routines
1134 */ 1246 */
1135static int __init ace_init(void) 1247static int __init ace_init(void)
1136{ 1248{
1249 int rc;
1250
1137 ace_major = register_blkdev(ace_major, "xsysace"); 1251 ace_major = register_blkdev(ace_major, "xsysace");
1138 if (ace_major <= 0) { 1252 if (ace_major <= 0) {
1139 printk(KERN_WARNING "xsysace: register_blkdev() failed\n"); 1253 rc = -ENOMEM;
1140 return ace_major; 1254 goto err_blk;
1141 } 1255 }
1142 1256
1143 pr_debug("Registering Xilinx SystemACE driver, major=%i\n", ace_major); 1257 rc = ace_of_register();
1144 return driver_register(&ace_driver); 1258 if (rc)
1259 goto err_of;
1260
1261 pr_debug("xsysace: registering platform binding\n");
1262 rc = platform_driver_register(&ace_platform_driver);
1263 if (rc)
1264 goto err_plat;
1265
1266 pr_info("Xilinx SystemACE device driver, major=%i\n", ace_major);
1267 return 0;
1268
1269err_plat:
1270 ace_of_unregister();
1271err_of:
1272 unregister_blkdev(ace_major, "xsysace");
1273err_blk:
1274 printk(KERN_ERR "xsysace: registration failed; err=%i\n", rc);
1275 return rc;
1145} 1276}
1146 1277
1147static void __exit ace_exit(void) 1278static void __exit ace_exit(void)
1148{ 1279{
1149 pr_debug("Unregistering Xilinx SystemACE driver\n"); 1280 pr_debug("Unregistering Xilinx SystemACE driver\n");
1150 driver_unregister(&ace_driver); 1281 platform_driver_unregister(&ace_platform_driver);
1282 ace_of_unregister();
1151 unregister_blkdev(ace_major, "xsysace"); 1283 unregister_blkdev(ace_major, "xsysace");
1152} 1284}
1153 1285
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index ae8e1a64b8ad..04a357808f2e 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -606,26 +606,24 @@ static void idefloppy_input_buffers (ide_drive_t *drive, idefloppy_pc_t *pc, uns
606{ 606{
607 struct request *rq = pc->rq; 607 struct request *rq = pc->rq;
608 struct bio_vec *bvec; 608 struct bio_vec *bvec;
609 struct bio *bio; 609 struct req_iterator iter;
610 unsigned long flags; 610 unsigned long flags;
611 char *data; 611 char *data;
612 int count, i, done = 0; 612 int count, done = 0;
613 613
614 rq_for_each_bio(bio, rq) { 614 rq_for_each_segment(bvec, rq, iter) {
615 bio_for_each_segment(bvec, bio, i) { 615 if (!bcount)
616 if (!bcount) 616 break;
617 break;
618 617
619 count = min(bvec->bv_len, bcount); 618 count = min(bvec->bv_len, bcount);
620 619
621 data = bvec_kmap_irq(bvec, &flags); 620 data = bvec_kmap_irq(bvec, &flags);
622 drive->hwif->atapi_input_bytes(drive, data, count); 621 drive->hwif->atapi_input_bytes(drive, data, count);
623 bvec_kunmap_irq(data, &flags); 622 bvec_kunmap_irq(data, &flags);
624 623
625 bcount -= count; 624 bcount -= count;
626 pc->b_count += count; 625 pc->b_count += count;
627 done += count; 626 done += count;
628 }
629 } 627 }
630 628
631 idefloppy_do_end_request(drive, 1, done >> 9); 629 idefloppy_do_end_request(drive, 1, done >> 9);
@@ -639,27 +637,25 @@ static void idefloppy_input_buffers (ide_drive_t *drive, idefloppy_pc_t *pc, uns
639static void idefloppy_output_buffers (ide_drive_t *drive, idefloppy_pc_t *pc, unsigned int bcount) 637static void idefloppy_output_buffers (ide_drive_t *drive, idefloppy_pc_t *pc, unsigned int bcount)
640{ 638{
641 struct request *rq = pc->rq; 639 struct request *rq = pc->rq;
642 struct bio *bio; 640 struct req_iterator iter;
643 struct bio_vec *bvec; 641 struct bio_vec *bvec;
644 unsigned long flags; 642 unsigned long flags;
645 int count, i, done = 0; 643 int count, done = 0;
646 char *data; 644 char *data;
647 645
648 rq_for_each_bio(bio, rq) { 646 rq_for_each_segment(bvec, rq, iter) {
649 bio_for_each_segment(bvec, bio, i) { 647 if (!bcount)
650 if (!bcount) 648 break;
651 break;
652 649
653 count = min(bvec->bv_len, bcount); 650 count = min(bvec->bv_len, bcount);
654 651
655 data = bvec_kmap_irq(bvec, &flags); 652 data = bvec_kmap_irq(bvec, &flags);
656 drive->hwif->atapi_output_bytes(drive, data, count); 653 drive->hwif->atapi_output_bytes(drive, data, count);
657 bvec_kunmap_irq(data, &flags); 654 bvec_kunmap_irq(data, &flags);
658 655
659 bcount -= count; 656 bcount -= count;
660 pc->b_count += count; 657 pc->b_count += count;
661 done += count; 658 done += count;
662 }
663 } 659 }
664 660
665 idefloppy_do_end_request(drive, 1, done >> 9); 661 idefloppy_do_end_request(drive, 1, done >> 9);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index bdc52d6922b7..8216a6f75be5 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -489,7 +489,7 @@ static void dec_pending(struct dm_crypt_io *io, int error)
489 if (!atomic_dec_and_test(&io->pending)) 489 if (!atomic_dec_and_test(&io->pending))
490 return; 490 return;
491 491
492 bio_endio(io->base_bio, io->base_bio->bi_size, io->error); 492 bio_endio(io->base_bio, io->error);
493 493
494 mempool_free(io, cc->io_pool); 494 mempool_free(io, cc->io_pool);
495} 495}
@@ -509,25 +509,19 @@ static void kcryptd_queue_io(struct dm_crypt_io *io)
509 queue_work(_kcryptd_workqueue, &io->work); 509 queue_work(_kcryptd_workqueue, &io->work);
510} 510}
511 511
512static int crypt_endio(struct bio *clone, unsigned int done, int error) 512static void crypt_endio(struct bio *clone, int error)
513{ 513{
514 struct dm_crypt_io *io = clone->bi_private; 514 struct dm_crypt_io *io = clone->bi_private;
515 struct crypt_config *cc = io->target->private; 515 struct crypt_config *cc = io->target->private;
516 unsigned read_io = bio_data_dir(clone) == READ; 516 unsigned read_io = bio_data_dir(clone) == READ;
517 517
518 /* 518 /*
519 * free the processed pages, even if 519 * free the processed pages
520 * it's only a partially completed write
521 */ 520 */
522 if (!read_io) 521 if (!read_io) {
523 crypt_free_buffer_pages(cc, clone, done); 522 crypt_free_buffer_pages(cc, clone, clone->bi_size);
524
525 /* keep going - not finished yet */
526 if (unlikely(clone->bi_size))
527 return 1;
528
529 if (!read_io)
530 goto out; 523 goto out;
524 }
531 525
532 if (unlikely(!bio_flagged(clone, BIO_UPTODATE))) { 526 if (unlikely(!bio_flagged(clone, BIO_UPTODATE))) {
533 error = -EIO; 527 error = -EIO;
@@ -537,12 +531,11 @@ static int crypt_endio(struct bio *clone, unsigned int done, int error)
537 bio_put(clone); 531 bio_put(clone);
538 io->post_process = 1; 532 io->post_process = 1;
539 kcryptd_queue_io(io); 533 kcryptd_queue_io(io);
540 return 0; 534 return;
541 535
542out: 536out:
543 bio_put(clone); 537 bio_put(clone);
544 dec_pending(io, error); 538 dec_pending(io, error);
545 return error;
546} 539}
547 540
548static void clone_init(struct dm_crypt_io *io, struct bio *clone) 541static void clone_init(struct dm_crypt_io *io, struct bio *clone)
diff --git a/drivers/md/dm-emc.c b/drivers/md/dm-emc.c
index 265c467854da..a2191a4fcf77 100644
--- a/drivers/md/dm-emc.c
+++ b/drivers/md/dm-emc.c
@@ -38,13 +38,10 @@ static inline void free_bio(struct bio *bio)
38 bio_put(bio); 38 bio_put(bio);
39} 39}
40 40
41static int emc_endio(struct bio *bio, unsigned int bytes_done, int error) 41static void emc_endio(struct bio *bio, int error)
42{ 42{
43 struct dm_path *path = bio->bi_private; 43 struct dm_path *path = bio->bi_private;
44 44
45 if (bio->bi_size)
46 return 1;
47
48 /* We also need to look at the sense keys here whether or not to 45 /* We also need to look at the sense keys here whether or not to
49 * switch to the next PG etc. 46 * switch to the next PG etc.
50 * 47 *
@@ -109,15 +106,7 @@ static struct request *get_failover_req(struct emc_handler *h,
109 return NULL; 106 return NULL;
110 } 107 }
111 108
112 rq->bio = rq->biotail = bio; 109 blk_rq_append_bio(q, rq, bio);
113 blk_rq_bio_prep(q, rq, bio);
114
115 rq->rq_disk = bdev->bd_contains->bd_disk;
116
117 /* bio backed don't set data */
118 rq->buffer = rq->data = NULL;
119 /* rq data_len used for pc cmd's request_bufflen */
120 rq->data_len = bio->bi_size;
121 110
122 rq->sense = h->sense; 111 rq->sense = h->sense;
123 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); 112 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index f3a772486437..b8e342fe7586 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -124,15 +124,11 @@ static void dec_count(struct io *io, unsigned int region, int error)
124 } 124 }
125} 125}
126 126
127static int endio(struct bio *bio, unsigned int done, int error) 127static void endio(struct bio *bio, int error)
128{ 128{
129 struct io *io; 129 struct io *io;
130 unsigned region; 130 unsigned region;
131 131
132 /* keep going until we've finished */
133 if (bio->bi_size)
134 return 1;
135
136 if (error && bio_data_dir(bio) == READ) 132 if (error && bio_data_dir(bio) == READ)
137 zero_fill_bio(bio); 133 zero_fill_bio(bio);
138 134
@@ -146,8 +142,6 @@ static int endio(struct bio *bio, unsigned int done, int error)
146 bio_put(bio); 142 bio_put(bio);
147 143
148 dec_count(io, region, error); 144 dec_count(io, region, error);
149
150 return 0;
151} 145}
152 146
153/*----------------------------------------------------------------- 147/*-----------------------------------------------------------------
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index d6ca9d0a6fd1..31056abca89d 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -390,11 +390,11 @@ static void dispatch_queued_ios(struct multipath *m)
390 390
391 r = map_io(m, bio, mpio, 1); 391 r = map_io(m, bio, mpio, 1);
392 if (r < 0) 392 if (r < 0)
393 bio_endio(bio, bio->bi_size, r); 393 bio_endio(bio, r);
394 else if (r == DM_MAPIO_REMAPPED) 394 else if (r == DM_MAPIO_REMAPPED)
395 generic_make_request(bio); 395 generic_make_request(bio);
396 else if (r == DM_MAPIO_REQUEUE) 396 else if (r == DM_MAPIO_REQUEUE)
397 bio_endio(bio, bio->bi_size, -EIO); 397 bio_endio(bio, -EIO);
398 398
399 bio = next; 399 bio = next;
400 } 400 }
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 144071e70a93..d09ff15490a5 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -820,7 +820,7 @@ static void write_callback(unsigned long error, void *context)
820 break; 820 break;
821 } 821 }
822 } 822 }
823 bio_endio(bio, bio->bi_size, 0); 823 bio_endio(bio, 0);
824} 824}
825 825
826static void do_write(struct mirror_set *ms, struct bio *bio) 826static void do_write(struct mirror_set *ms, struct bio *bio)
@@ -900,7 +900,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
900 */ 900 */
901 if (unlikely(ms->log_failure)) 901 if (unlikely(ms->log_failure))
902 while ((bio = bio_list_pop(&sync))) 902 while ((bio = bio_list_pop(&sync)))
903 bio_endio(bio, bio->bi_size, -EIO); 903 bio_endio(bio, -EIO);
904 else while ((bio = bio_list_pop(&sync))) 904 else while ((bio = bio_list_pop(&sync)))
905 do_write(ms, bio); 905 do_write(ms, bio);
906 906
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 83ddbfe6b8a4..98a633f3d6b0 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -636,7 +636,7 @@ static void error_bios(struct bio *bio)
636 while (bio) { 636 while (bio) {
637 n = bio->bi_next; 637 n = bio->bi_next;
638 bio->bi_next = NULL; 638 bio->bi_next = NULL;
639 bio_io_error(bio, bio->bi_size); 639 bio_io_error(bio);
640 bio = n; 640 bio = n;
641 } 641 }
642} 642}
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index f314d7dc9c26..bdec206c404b 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -43,7 +43,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio,
43 break; 43 break;
44 } 44 }
45 45
46 bio_endio(bio, bio->bi_size, 0); 46 bio_endio(bio, 0);
47 47
48 /* accepted bio, don't make new request */ 48 /* accepted bio, don't make new request */
49 return DM_MAPIO_SUBMITTED; 49 return DM_MAPIO_SUBMITTED;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 2120155929a6..167765c47747 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -484,23 +484,20 @@ static void dec_pending(struct dm_io *io, int error)
484 blk_add_trace_bio(io->md->queue, io->bio, 484 blk_add_trace_bio(io->md->queue, io->bio,
485 BLK_TA_COMPLETE); 485 BLK_TA_COMPLETE);
486 486
487 bio_endio(io->bio, io->bio->bi_size, io->error); 487 bio_endio(io->bio, io->error);
488 } 488 }
489 489
490 free_io(io->md, io); 490 free_io(io->md, io);
491 } 491 }
492} 492}
493 493
494static int clone_endio(struct bio *bio, unsigned int done, int error) 494static void clone_endio(struct bio *bio, int error)
495{ 495{
496 int r = 0; 496 int r = 0;
497 struct dm_target_io *tio = bio->bi_private; 497 struct dm_target_io *tio = bio->bi_private;
498 struct mapped_device *md = tio->io->md; 498 struct mapped_device *md = tio->io->md;
499 dm_endio_fn endio = tio->ti->type->end_io; 499 dm_endio_fn endio = tio->ti->type->end_io;
500 500
501 if (bio->bi_size)
502 return 1;
503
504 if (!bio_flagged(bio, BIO_UPTODATE) && !error) 501 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
505 error = -EIO; 502 error = -EIO;
506 503
@@ -514,7 +511,7 @@ static int clone_endio(struct bio *bio, unsigned int done, int error)
514 error = r; 511 error = r;
515 else if (r == DM_ENDIO_INCOMPLETE) 512 else if (r == DM_ENDIO_INCOMPLETE)
516 /* The target will handle the io */ 513 /* The target will handle the io */
517 return 1; 514 return;
518 else if (r) { 515 else if (r) {
519 DMWARN("unimplemented target endio return value: %d", r); 516 DMWARN("unimplemented target endio return value: %d", r);
520 BUG(); 517 BUG();
@@ -530,7 +527,6 @@ static int clone_endio(struct bio *bio, unsigned int done, int error)
530 527
531 bio_put(bio); 528 bio_put(bio);
532 free_tio(md, tio); 529 free_tio(md, tio);
533 return r;
534} 530}
535 531
536static sector_t max_io_len(struct mapped_device *md, 532static sector_t max_io_len(struct mapped_device *md,
@@ -761,7 +757,7 @@ static void __split_bio(struct mapped_device *md, struct bio *bio)
761 757
762 ci.map = dm_get_table(md); 758 ci.map = dm_get_table(md);
763 if (!ci.map) { 759 if (!ci.map) {
764 bio_io_error(bio, bio->bi_size); 760 bio_io_error(bio);
765 return; 761 return;
766 } 762 }
767 763
@@ -803,7 +799,7 @@ static int dm_request(struct request_queue *q, struct bio *bio)
803 * guarantee it is (or can be) handled by the targets correctly. 799 * guarantee it is (or can be) handled by the targets correctly.
804 */ 800 */
805 if (unlikely(bio_barrier(bio))) { 801 if (unlikely(bio_barrier(bio))) {
806 bio_endio(bio, bio->bi_size, -EOPNOTSUPP); 802 bio_endio(bio, -EOPNOTSUPP);
807 return 0; 803 return 0;
808 } 804 }
809 805
@@ -820,13 +816,13 @@ static int dm_request(struct request_queue *q, struct bio *bio)
820 up_read(&md->io_lock); 816 up_read(&md->io_lock);
821 817
822 if (bio_rw(bio) == READA) { 818 if (bio_rw(bio) == READA) {
823 bio_io_error(bio, bio->bi_size); 819 bio_io_error(bio);
824 return 0; 820 return 0;
825 } 821 }
826 822
827 r = queue_io(md, bio); 823 r = queue_io(md, bio);
828 if (r < 0) { 824 if (r < 0) {
829 bio_io_error(bio, bio->bi_size); 825 bio_io_error(bio);
830 return 0; 826 return 0;
831 827
832 } else if (r == 0) 828 } else if (r == 0)
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index cb059cf14c2e..cf2ddce34118 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -65,18 +65,16 @@
65#include <linux/raid/md.h> 65#include <linux/raid/md.h>
66 66
67 67
68static int faulty_fail(struct bio *bio, unsigned int bytes_done, int error) 68static void faulty_fail(struct bio *bio, int error)
69{ 69{
70 struct bio *b = bio->bi_private; 70 struct bio *b = bio->bi_private;
71 71
72 b->bi_size = bio->bi_size; 72 b->bi_size = bio->bi_size;
73 b->bi_sector = bio->bi_sector; 73 b->bi_sector = bio->bi_sector;
74 74
75 if (bio->bi_size == 0) 75 bio_put(bio);
76 bio_put(bio);
77 76
78 clear_bit(BIO_UPTODATE, &b->bi_flags); 77 bio_io_error(b);
79 return (b->bi_end_io)(b, bytes_done, -EIO);
80} 78}
81 79
82typedef struct faulty_conf { 80typedef struct faulty_conf {
@@ -179,7 +177,7 @@ static int make_request(struct request_queue *q, struct bio *bio)
179 /* special case - don't decrement, don't generic_make_request, 177 /* special case - don't decrement, don't generic_make_request,
180 * just fail immediately 178 * just fail immediately
181 */ 179 */
182 bio_endio(bio, bio->bi_size, -EIO); 180 bio_endio(bio, -EIO);
183 return 0; 181 return 0;
184 } 182 }
185 183
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 17f795c3e0ab..550148770bb2 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -338,7 +338,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
338 sector_t block; 338 sector_t block;
339 339
340 if (unlikely(bio_barrier(bio))) { 340 if (unlikely(bio_barrier(bio))) {
341 bio_endio(bio, bio->bi_size, -EOPNOTSUPP); 341 bio_endio(bio, -EOPNOTSUPP);
342 return 0; 342 return 0;
343 } 343 }
344 344
@@ -358,7 +358,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
358 bdevname(tmp_dev->rdev->bdev, b), 358 bdevname(tmp_dev->rdev->bdev, b),
359 (unsigned long long)tmp_dev->size, 359 (unsigned long long)tmp_dev->size,
360 (unsigned long long)tmp_dev->offset); 360 (unsigned long long)tmp_dev->offset);
361 bio_io_error(bio, bio->bi_size); 361 bio_io_error(bio);
362 return 0; 362 return 0;
363 } 363 }
364 if (unlikely(bio->bi_sector + (bio->bi_size >> 9) > 364 if (unlikely(bio->bi_sector + (bio->bi_size >> 9) >
diff --git a/drivers/md/md.c b/drivers/md/md.c
index f883b7e37f3d..e8f102ea9b03 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -213,7 +213,7 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
213 213
214static int md_fail_request (struct request_queue *q, struct bio *bio) 214static int md_fail_request (struct request_queue *q, struct bio *bio)
215{ 215{
216 bio_io_error(bio, bio->bi_size); 216 bio_io_error(bio);
217 return 0; 217 return 0;
218} 218}
219 219
@@ -384,12 +384,10 @@ static void free_disk_sb(mdk_rdev_t * rdev)
384} 384}
385 385
386 386
387static int super_written(struct bio *bio, unsigned int bytes_done, int error) 387static void super_written(struct bio *bio, int error)
388{ 388{
389 mdk_rdev_t *rdev = bio->bi_private; 389 mdk_rdev_t *rdev = bio->bi_private;
390 mddev_t *mddev = rdev->mddev; 390 mddev_t *mddev = rdev->mddev;
391 if (bio->bi_size)
392 return 1;
393 391
394 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) { 392 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
395 printk("md: super_written gets error=%d, uptodate=%d\n", 393 printk("md: super_written gets error=%d, uptodate=%d\n",
@@ -401,16 +399,13 @@ static int super_written(struct bio *bio, unsigned int bytes_done, int error)
401 if (atomic_dec_and_test(&mddev->pending_writes)) 399 if (atomic_dec_and_test(&mddev->pending_writes))
402 wake_up(&mddev->sb_wait); 400 wake_up(&mddev->sb_wait);
403 bio_put(bio); 401 bio_put(bio);
404 return 0;
405} 402}
406 403
407static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error) 404static void super_written_barrier(struct bio *bio, int error)
408{ 405{
409 struct bio *bio2 = bio->bi_private; 406 struct bio *bio2 = bio->bi_private;
410 mdk_rdev_t *rdev = bio2->bi_private; 407 mdk_rdev_t *rdev = bio2->bi_private;
411 mddev_t *mddev = rdev->mddev; 408 mddev_t *mddev = rdev->mddev;
412 if (bio->bi_size)
413 return 1;
414 409
415 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && 410 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
416 error == -EOPNOTSUPP) { 411 error == -EOPNOTSUPP) {
@@ -424,11 +419,11 @@ static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int e
424 spin_unlock_irqrestore(&mddev->write_lock, flags); 419 spin_unlock_irqrestore(&mddev->write_lock, flags);
425 wake_up(&mddev->sb_wait); 420 wake_up(&mddev->sb_wait);
426 bio_put(bio); 421 bio_put(bio);
427 return 0; 422 } else {
423 bio_put(bio2);
424 bio->bi_private = rdev;
425 super_written(bio, error);
428 } 426 }
429 bio_put(bio2);
430 bio->bi_private = rdev;
431 return super_written(bio, bytes_done, error);
432} 427}
433 428
434void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, 429void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
@@ -489,13 +484,9 @@ void md_super_wait(mddev_t *mddev)
489 finish_wait(&mddev->sb_wait, &wq); 484 finish_wait(&mddev->sb_wait, &wq);
490} 485}
491 486
492static int bi_complete(struct bio *bio, unsigned int bytes_done, int error) 487static void bi_complete(struct bio *bio, int error)
493{ 488{
494 if (bio->bi_size)
495 return 1;
496
497 complete((struct completion*)bio->bi_private); 489 complete((struct completion*)bio->bi_private);
498 return 0;
499} 490}
500 491
501int sync_page_io(struct block_device *bdev, sector_t sector, int size, 492int sync_page_io(struct block_device *bdev, sector_t sector, int size,
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 1e2af43a73b9..f2a63f394ad9 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -82,21 +82,17 @@ static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err)
82 struct bio *bio = mp_bh->master_bio; 82 struct bio *bio = mp_bh->master_bio;
83 multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev); 83 multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev);
84 84
85 bio_endio(bio, bio->bi_size, err); 85 bio_endio(bio, err);
86 mempool_free(mp_bh, conf->pool); 86 mempool_free(mp_bh, conf->pool);
87} 87}
88 88
89static int multipath_end_request(struct bio *bio, unsigned int bytes_done, 89static void multipath_end_request(struct bio *bio, int error)
90 int error)
91{ 90{
92 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 91 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
93 struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private); 92 struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private);
94 multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev); 93 multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev);
95 mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev; 94 mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev;
96 95
97 if (bio->bi_size)
98 return 1;
99
100 if (uptodate) 96 if (uptodate)
101 multipath_end_bh_io(mp_bh, 0); 97 multipath_end_bh_io(mp_bh, 0);
102 else if (!bio_rw_ahead(bio)) { 98 else if (!bio_rw_ahead(bio)) {
@@ -112,7 +108,6 @@ static int multipath_end_request(struct bio *bio, unsigned int bytes_done,
112 } else 108 } else
113 multipath_end_bh_io(mp_bh, error); 109 multipath_end_bh_io(mp_bh, error);
114 rdev_dec_pending(rdev, conf->mddev); 110 rdev_dec_pending(rdev, conf->mddev);
115 return 0;
116} 111}
117 112
118static void unplug_slaves(mddev_t *mddev) 113static void unplug_slaves(mddev_t *mddev)
@@ -155,7 +150,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
155 const int rw = bio_data_dir(bio); 150 const int rw = bio_data_dir(bio);
156 151
157 if (unlikely(bio_barrier(bio))) { 152 if (unlikely(bio_barrier(bio))) {
158 bio_endio(bio, bio->bi_size, -EOPNOTSUPP); 153 bio_endio(bio, -EOPNOTSUPP);
159 return 0; 154 return 0;
160 } 155 }
161 156
@@ -169,7 +164,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
169 164
170 mp_bh->path = multipath_map(conf); 165 mp_bh->path = multipath_map(conf);
171 if (mp_bh->path < 0) { 166 if (mp_bh->path < 0) {
172 bio_endio(bio, bio->bi_size, -EIO); 167 bio_endio(bio, -EIO);
173 mempool_free(mp_bh, conf->pool); 168 mempool_free(mp_bh, conf->pool);
174 return 0; 169 return 0;
175 } 170 }
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index b8216bc6db45..ef0da2d84959 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -420,7 +420,7 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
420 const int rw = bio_data_dir(bio); 420 const int rw = bio_data_dir(bio);
421 421
422 if (unlikely(bio_barrier(bio))) { 422 if (unlikely(bio_barrier(bio))) {
423 bio_endio(bio, bio->bi_size, -EOPNOTSUPP); 423 bio_endio(bio, -EOPNOTSUPP);
424 return 0; 424 return 0;
425 } 425 }
426 426
@@ -490,7 +490,7 @@ bad_map:
490 " or bigger than %dk %llu %d\n", chunk_size, 490 " or bigger than %dk %llu %d\n", chunk_size,
491 (unsigned long long)bio->bi_sector, bio->bi_size >> 10); 491 (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
492 492
493 bio_io_error(bio, bio->bi_size); 493 bio_io_error(bio);
494 return 0; 494 return 0;
495} 495}
496 496
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index f33a729960ca..6d03bea6fa58 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -238,7 +238,7 @@ static void raid_end_bio_io(r1bio_t *r1_bio)
238 (unsigned long long) bio->bi_sector + 238 (unsigned long long) bio->bi_sector +
239 (bio->bi_size >> 9) - 1); 239 (bio->bi_size >> 9) - 1);
240 240
241 bio_endio(bio, bio->bi_size, 241 bio_endio(bio,
242 test_bit(R1BIO_Uptodate, &r1_bio->state) ? 0 : -EIO); 242 test_bit(R1BIO_Uptodate, &r1_bio->state) ? 0 : -EIO);
243 } 243 }
244 free_r1bio(r1_bio); 244 free_r1bio(r1_bio);
@@ -255,16 +255,13 @@ static inline void update_head_pos(int disk, r1bio_t *r1_bio)
255 r1_bio->sector + (r1_bio->sectors); 255 r1_bio->sector + (r1_bio->sectors);
256} 256}
257 257
258static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int error) 258static void raid1_end_read_request(struct bio *bio, int error)
259{ 259{
260 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 260 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
261 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); 261 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
262 int mirror; 262 int mirror;
263 conf_t *conf = mddev_to_conf(r1_bio->mddev); 263 conf_t *conf = mddev_to_conf(r1_bio->mddev);
264 264
265 if (bio->bi_size)
266 return 1;
267
268 mirror = r1_bio->read_disk; 265 mirror = r1_bio->read_disk;
269 /* 266 /*
270 * this branch is our 'one mirror IO has finished' event handler: 267 * this branch is our 'one mirror IO has finished' event handler:
@@ -301,10 +298,9 @@ static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int
301 } 298 }
302 299
303 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); 300 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
304 return 0;
305} 301}
306 302
307static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int error) 303static void raid1_end_write_request(struct bio *bio, int error)
308{ 304{
309 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 305 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
310 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); 306 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
@@ -312,8 +308,6 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
312 conf_t *conf = mddev_to_conf(r1_bio->mddev); 308 conf_t *conf = mddev_to_conf(r1_bio->mddev);
313 struct bio *to_put = NULL; 309 struct bio *to_put = NULL;
314 310
315 if (bio->bi_size)
316 return 1;
317 311
318 for (mirror = 0; mirror < conf->raid_disks; mirror++) 312 for (mirror = 0; mirror < conf->raid_disks; mirror++)
319 if (r1_bio->bios[mirror] == bio) 313 if (r1_bio->bios[mirror] == bio)
@@ -366,7 +360,7 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
366 (unsigned long long) mbio->bi_sector, 360 (unsigned long long) mbio->bi_sector,
367 (unsigned long long) mbio->bi_sector + 361 (unsigned long long) mbio->bi_sector +
368 (mbio->bi_size >> 9) - 1); 362 (mbio->bi_size >> 9) - 1);
369 bio_endio(mbio, mbio->bi_size, 0); 363 bio_endio(mbio, 0);
370 } 364 }
371 } 365 }
372 } 366 }
@@ -400,8 +394,6 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
400 394
401 if (to_put) 395 if (to_put)
402 bio_put(to_put); 396 bio_put(to_put);
403
404 return 0;
405} 397}
406 398
407 399
@@ -796,7 +788,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
796 if (unlikely(!mddev->barriers_work && bio_barrier(bio))) { 788 if (unlikely(!mddev->barriers_work && bio_barrier(bio))) {
797 if (rw == WRITE) 789 if (rw == WRITE)
798 md_write_end(mddev); 790 md_write_end(mddev);
799 bio_endio(bio, bio->bi_size, -EOPNOTSUPP); 791 bio_endio(bio, -EOPNOTSUPP);
800 return 0; 792 return 0;
801 } 793 }
802 794
@@ -1137,14 +1129,11 @@ abort:
1137} 1129}
1138 1130
1139 1131
1140static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error) 1132static void end_sync_read(struct bio *bio, int error)
1141{ 1133{
1142 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); 1134 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
1143 int i; 1135 int i;
1144 1136
1145 if (bio->bi_size)
1146 return 1;
1147
1148 for (i=r1_bio->mddev->raid_disks; i--; ) 1137 for (i=r1_bio->mddev->raid_disks; i--; )
1149 if (r1_bio->bios[i] == bio) 1138 if (r1_bio->bios[i] == bio)
1150 break; 1139 break;
@@ -1160,10 +1149,9 @@ static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
1160 1149
1161 if (atomic_dec_and_test(&r1_bio->remaining)) 1150 if (atomic_dec_and_test(&r1_bio->remaining))
1162 reschedule_retry(r1_bio); 1151 reschedule_retry(r1_bio);
1163 return 0;
1164} 1152}
1165 1153
1166static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error) 1154static void end_sync_write(struct bio *bio, int error)
1167{ 1155{
1168 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1156 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1169 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); 1157 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
@@ -1172,9 +1160,6 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
1172 int i; 1160 int i;
1173 int mirror=0; 1161 int mirror=0;
1174 1162
1175 if (bio->bi_size)
1176 return 1;
1177
1178 for (i = 0; i < conf->raid_disks; i++) 1163 for (i = 0; i < conf->raid_disks; i++)
1179 if (r1_bio->bios[i] == bio) { 1164 if (r1_bio->bios[i] == bio) {
1180 mirror = i; 1165 mirror = i;
@@ -1200,7 +1185,6 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
1200 md_done_sync(mddev, r1_bio->sectors, uptodate); 1185 md_done_sync(mddev, r1_bio->sectors, uptodate);
1201 put_buf(r1_bio); 1186 put_buf(r1_bio);
1202 } 1187 }
1203 return 0;
1204} 1188}
1205 1189
1206static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) 1190static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 4e53792aa520..25a96c42bdb0 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -227,7 +227,7 @@ static void raid_end_bio_io(r10bio_t *r10_bio)
227{ 227{
228 struct bio *bio = r10_bio->master_bio; 228 struct bio *bio = r10_bio->master_bio;
229 229
230 bio_endio(bio, bio->bi_size, 230 bio_endio(bio,
231 test_bit(R10BIO_Uptodate, &r10_bio->state) ? 0 : -EIO); 231 test_bit(R10BIO_Uptodate, &r10_bio->state) ? 0 : -EIO);
232 free_r10bio(r10_bio); 232 free_r10bio(r10_bio);
233} 233}
@@ -243,15 +243,13 @@ static inline void update_head_pos(int slot, r10bio_t *r10_bio)
243 r10_bio->devs[slot].addr + (r10_bio->sectors); 243 r10_bio->devs[slot].addr + (r10_bio->sectors);
244} 244}
245 245
246static int raid10_end_read_request(struct bio *bio, unsigned int bytes_done, int error) 246static void raid10_end_read_request(struct bio *bio, int error)
247{ 247{
248 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 248 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
249 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); 249 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
250 int slot, dev; 250 int slot, dev;
251 conf_t *conf = mddev_to_conf(r10_bio->mddev); 251 conf_t *conf = mddev_to_conf(r10_bio->mddev);
252 252
253 if (bio->bi_size)
254 return 1;
255 253
256 slot = r10_bio->read_slot; 254 slot = r10_bio->read_slot;
257 dev = r10_bio->devs[slot].devnum; 255 dev = r10_bio->devs[slot].devnum;
@@ -284,19 +282,15 @@ static int raid10_end_read_request(struct bio *bio, unsigned int bytes_done, int
284 } 282 }
285 283
286 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); 284 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
287 return 0;
288} 285}
289 286
290static int raid10_end_write_request(struct bio *bio, unsigned int bytes_done, int error) 287static void raid10_end_write_request(struct bio *bio, int error)
291{ 288{
292 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 289 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
293 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); 290 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
294 int slot, dev; 291 int slot, dev;
295 conf_t *conf = mddev_to_conf(r10_bio->mddev); 292 conf_t *conf = mddev_to_conf(r10_bio->mddev);
296 293
297 if (bio->bi_size)
298 return 1;
299
300 for (slot = 0; slot < conf->copies; slot++) 294 for (slot = 0; slot < conf->copies; slot++)
301 if (r10_bio->devs[slot].bio == bio) 295 if (r10_bio->devs[slot].bio == bio)
302 break; 296 break;
@@ -339,7 +333,6 @@ static int raid10_end_write_request(struct bio *bio, unsigned int bytes_done, in
339 } 333 }
340 334
341 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); 335 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
342 return 0;
343} 336}
344 337
345 338
@@ -787,7 +780,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
787 unsigned long flags; 780 unsigned long flags;
788 781
789 if (unlikely(bio_barrier(bio))) { 782 if (unlikely(bio_barrier(bio))) {
790 bio_endio(bio, bio->bi_size, -EOPNOTSUPP); 783 bio_endio(bio, -EOPNOTSUPP);
791 return 0; 784 return 0;
792 } 785 }
793 786
@@ -819,7 +812,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
819 " or bigger than %dk %llu %d\n", chunk_sects/2, 812 " or bigger than %dk %llu %d\n", chunk_sects/2,
820 (unsigned long long)bio->bi_sector, bio->bi_size >> 10); 813 (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
821 814
822 bio_io_error(bio, bio->bi_size); 815 bio_io_error(bio);
823 return 0; 816 return 0;
824 } 817 }
825 818
@@ -1155,15 +1148,12 @@ abort:
1155} 1148}
1156 1149
1157 1150
1158static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error) 1151static void end_sync_read(struct bio *bio, int error)
1159{ 1152{
1160 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); 1153 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
1161 conf_t *conf = mddev_to_conf(r10_bio->mddev); 1154 conf_t *conf = mddev_to_conf(r10_bio->mddev);
1162 int i,d; 1155 int i,d;
1163 1156
1164 if (bio->bi_size)
1165 return 1;
1166
1167 for (i=0; i<conf->copies; i++) 1157 for (i=0; i<conf->copies; i++)
1168 if (r10_bio->devs[i].bio == bio) 1158 if (r10_bio->devs[i].bio == bio)
1169 break; 1159 break;
@@ -1192,10 +1182,9 @@ static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
1192 reschedule_retry(r10_bio); 1182 reschedule_retry(r10_bio);
1193 } 1183 }
1194 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev); 1184 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
1195 return 0;
1196} 1185}
1197 1186
1198static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error) 1187static void end_sync_write(struct bio *bio, int error)
1199{ 1188{
1200 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1189 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1201 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); 1190 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
@@ -1203,9 +1192,6 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
1203 conf_t *conf = mddev_to_conf(mddev); 1192 conf_t *conf = mddev_to_conf(mddev);
1204 int i,d; 1193 int i,d;
1205 1194
1206 if (bio->bi_size)
1207 return 1;
1208
1209 for (i = 0; i < conf->copies; i++) 1195 for (i = 0; i < conf->copies; i++)
1210 if (r10_bio->devs[i].bio == bio) 1196 if (r10_bio->devs[i].bio == bio)
1211 break; 1197 break;
@@ -1228,7 +1214,6 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
1228 } 1214 }
1229 } 1215 }
1230 rdev_dec_pending(conf->mirrors[d].rdev, mddev); 1216 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1231 return 0;
1232} 1217}
1233 1218
1234/* 1219/*
@@ -1374,7 +1359,7 @@ static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1374 if (test_bit(R10BIO_Uptodate, &r10_bio->state)) 1359 if (test_bit(R10BIO_Uptodate, &r10_bio->state))
1375 generic_make_request(wbio); 1360 generic_make_request(wbio);
1376 else 1361 else
1377 bio_endio(wbio, wbio->bi_size, -EIO); 1362 bio_endio(wbio, -EIO);
1378} 1363}
1379 1364
1380 1365
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f96dea975fa5..caaca9e178bc 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -108,12 +108,11 @@ static void return_io(struct bio *return_bi)
108{ 108{
109 struct bio *bi = return_bi; 109 struct bio *bi = return_bi;
110 while (bi) { 110 while (bi) {
111 int bytes = bi->bi_size;
112 111
113 return_bi = bi->bi_next; 112 return_bi = bi->bi_next;
114 bi->bi_next = NULL; 113 bi->bi_next = NULL;
115 bi->bi_size = 0; 114 bi->bi_size = 0;
116 bi->bi_end_io(bi, bytes, 115 bi->bi_end_io(bi,
117 test_bit(BIO_UPTODATE, &bi->bi_flags) 116 test_bit(BIO_UPTODATE, &bi->bi_flags)
118 ? 0 : -EIO); 117 ? 0 : -EIO);
119 bi = return_bi; 118 bi = return_bi;
@@ -382,10 +381,10 @@ static unsigned long get_stripe_work(struct stripe_head *sh)
382 return pending; 381 return pending;
383} 382}
384 383
385static int 384static void
386raid5_end_read_request(struct bio *bi, unsigned int bytes_done, int error); 385raid5_end_read_request(struct bio *bi, int error);
387static int 386static void
388raid5_end_write_request (struct bio *bi, unsigned int bytes_done, int error); 387raid5_end_write_request(struct bio *bi, int error);
389 388
390static void ops_run_io(struct stripe_head *sh) 389static void ops_run_io(struct stripe_head *sh)
391{ 390{
@@ -1110,8 +1109,7 @@ static void shrink_stripes(raid5_conf_t *conf)
1110 conf->slab_cache = NULL; 1109 conf->slab_cache = NULL;
1111} 1110}
1112 1111
1113static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done, 1112static void raid5_end_read_request(struct bio * bi, int error)
1114 int error)
1115{ 1113{
1116 struct stripe_head *sh = bi->bi_private; 1114 struct stripe_head *sh = bi->bi_private;
1117 raid5_conf_t *conf = sh->raid_conf; 1115 raid5_conf_t *conf = sh->raid_conf;
@@ -1120,8 +1118,6 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
1120 char b[BDEVNAME_SIZE]; 1118 char b[BDEVNAME_SIZE];
1121 mdk_rdev_t *rdev; 1119 mdk_rdev_t *rdev;
1122 1120
1123 if (bi->bi_size)
1124 return 1;
1125 1121
1126 for (i=0 ; i<disks; i++) 1122 for (i=0 ; i<disks; i++)
1127 if (bi == &sh->dev[i].req) 1123 if (bi == &sh->dev[i].req)
@@ -1132,7 +1128,7 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
1132 uptodate); 1128 uptodate);
1133 if (i == disks) { 1129 if (i == disks) {
1134 BUG(); 1130 BUG();
1135 return 0; 1131 return;
1136 } 1132 }
1137 1133
1138 if (uptodate) { 1134 if (uptodate) {
@@ -1185,20 +1181,15 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
1185 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1181 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1186 set_bit(STRIPE_HANDLE, &sh->state); 1182 set_bit(STRIPE_HANDLE, &sh->state);
1187 release_stripe(sh); 1183 release_stripe(sh);
1188 return 0;
1189} 1184}
1190 1185
1191static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done, 1186static void raid5_end_write_request (struct bio *bi, int error)
1192 int error)
1193{ 1187{
1194 struct stripe_head *sh = bi->bi_private; 1188 struct stripe_head *sh = bi->bi_private;
1195 raid5_conf_t *conf = sh->raid_conf; 1189 raid5_conf_t *conf = sh->raid_conf;
1196 int disks = sh->disks, i; 1190 int disks = sh->disks, i;
1197 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 1191 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
1198 1192
1199 if (bi->bi_size)
1200 return 1;
1201
1202 for (i=0 ; i<disks; i++) 1193 for (i=0 ; i<disks; i++)
1203 if (bi == &sh->dev[i].req) 1194 if (bi == &sh->dev[i].req)
1204 break; 1195 break;
@@ -1208,7 +1199,7 @@ static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
1208 uptodate); 1199 uptodate);
1209 if (i == disks) { 1200 if (i == disks) {
1210 BUG(); 1201 BUG();
1211 return 0; 1202 return;
1212 } 1203 }
1213 1204
1214 if (!uptodate) 1205 if (!uptodate)
@@ -1219,7 +1210,6 @@ static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
1219 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1210 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1220 set_bit(STRIPE_HANDLE, &sh->state); 1211 set_bit(STRIPE_HANDLE, &sh->state);
1221 release_stripe(sh); 1212 release_stripe(sh);
1222 return 0;
1223} 1213}
1224 1214
1225 1215
@@ -3340,7 +3330,7 @@ static struct bio *remove_bio_from_retry(raid5_conf_t *conf)
3340 * first). 3330 * first).
3341 * If the read failed.. 3331 * If the read failed..
3342 */ 3332 */
3343static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error) 3333static void raid5_align_endio(struct bio *bi, int error)
3344{ 3334{
3345 struct bio* raid_bi = bi->bi_private; 3335 struct bio* raid_bi = bi->bi_private;
3346 mddev_t *mddev; 3336 mddev_t *mddev;
@@ -3348,8 +3338,6 @@ static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
3348 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 3338 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
3349 mdk_rdev_t *rdev; 3339 mdk_rdev_t *rdev;
3350 3340
3351 if (bi->bi_size)
3352 return 1;
3353 bio_put(bi); 3341 bio_put(bi);
3354 3342
3355 mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata; 3343 mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata;
@@ -3360,17 +3348,16 @@ static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
3360 rdev_dec_pending(rdev, conf->mddev); 3348 rdev_dec_pending(rdev, conf->mddev);
3361 3349
3362 if (!error && uptodate) { 3350 if (!error && uptodate) {
3363 bio_endio(raid_bi, bytes, 0); 3351 bio_endio(raid_bi, 0);
3364 if (atomic_dec_and_test(&conf->active_aligned_reads)) 3352 if (atomic_dec_and_test(&conf->active_aligned_reads))
3365 wake_up(&conf->wait_for_stripe); 3353 wake_up(&conf->wait_for_stripe);
3366 return 0; 3354 return;
3367 } 3355 }
3368 3356
3369 3357
3370 pr_debug("raid5_align_endio : io error...handing IO for a retry\n"); 3358 pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
3371 3359
3372 add_bio_to_retry(raid_bi, conf); 3360 add_bio_to_retry(raid_bi, conf);
3373 return 0;
3374} 3361}
3375 3362
3376static int bio_fits_rdev(struct bio *bi) 3363static int bio_fits_rdev(struct bio *bi)
@@ -3476,7 +3463,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
3476 int remaining; 3463 int remaining;
3477 3464
3478 if (unlikely(bio_barrier(bi))) { 3465 if (unlikely(bio_barrier(bi))) {
3479 bio_endio(bi, bi->bi_size, -EOPNOTSUPP); 3466 bio_endio(bi, -EOPNOTSUPP);
3480 return 0; 3467 return 0;
3481 } 3468 }
3482 3469
@@ -3592,12 +3579,11 @@ static int make_request(struct request_queue *q, struct bio * bi)
3592 remaining = --bi->bi_phys_segments; 3579 remaining = --bi->bi_phys_segments;
3593 spin_unlock_irq(&conf->device_lock); 3580 spin_unlock_irq(&conf->device_lock);
3594 if (remaining == 0) { 3581 if (remaining == 0) {
3595 int bytes = bi->bi_size;
3596 3582
3597 if ( rw == WRITE ) 3583 if ( rw == WRITE )
3598 md_write_end(mddev); 3584 md_write_end(mddev);
3599 bi->bi_size = 0; 3585
3600 bi->bi_end_io(bi, bytes, 3586 bi->bi_end_io(bi,
3601 test_bit(BIO_UPTODATE, &bi->bi_flags) 3587 test_bit(BIO_UPTODATE, &bi->bi_flags)
3602 ? 0 : -EIO); 3588 ? 0 : -EIO);
3603 } 3589 }
@@ -3875,10 +3861,8 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
3875 remaining = --raid_bio->bi_phys_segments; 3861 remaining = --raid_bio->bi_phys_segments;
3876 spin_unlock_irq(&conf->device_lock); 3862 spin_unlock_irq(&conf->device_lock);
3877 if (remaining == 0) { 3863 if (remaining == 0) {
3878 int bytes = raid_bio->bi_size;
3879 3864
3880 raid_bio->bi_size = 0; 3865 raid_bio->bi_end_io(raid_bio,
3881 raid_bio->bi_end_io(raid_bio, bytes,
3882 test_bit(BIO_UPTODATE, &raid_bio->bi_flags) 3866 test_bit(BIO_UPTODATE, &raid_bio->bi_flags)
3883 ? 0 : -EIO); 3867 ? 0 : -EIO);
3884 } 3868 }
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index d32c60dbdd82..571320ab9e1a 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -472,14 +472,13 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
472 struct dasd_ccw_req *cqr; 472 struct dasd_ccw_req *cqr;
473 struct dasd_diag_req *dreq; 473 struct dasd_diag_req *dreq;
474 struct dasd_diag_bio *dbio; 474 struct dasd_diag_bio *dbio;
475 struct bio *bio; 475 struct req_iterator iter;
476 struct bio_vec *bv; 476 struct bio_vec *bv;
477 char *dst; 477 char *dst;
478 unsigned int count, datasize; 478 unsigned int count, datasize;
479 sector_t recid, first_rec, last_rec; 479 sector_t recid, first_rec, last_rec;
480 unsigned int blksize, off; 480 unsigned int blksize, off;
481 unsigned char rw_cmd; 481 unsigned char rw_cmd;
482 int i;
483 482
484 if (rq_data_dir(req) == READ) 483 if (rq_data_dir(req) == READ)
485 rw_cmd = MDSK_READ_REQ; 484 rw_cmd = MDSK_READ_REQ;
@@ -493,13 +492,11 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
493 last_rec = (req->sector + req->nr_sectors - 1) >> device->s2b_shift; 492 last_rec = (req->sector + req->nr_sectors - 1) >> device->s2b_shift;
494 /* Check struct bio and count the number of blocks for the request. */ 493 /* Check struct bio and count the number of blocks for the request. */
495 count = 0; 494 count = 0;
496 rq_for_each_bio(bio, req) { 495 rq_for_each_segment(bv, req, iter) {
497 bio_for_each_segment(bv, bio, i) { 496 if (bv->bv_len & (blksize - 1))
498 if (bv->bv_len & (blksize - 1)) 497 /* Fba can only do full blocks. */
499 /* Fba can only do full blocks. */ 498 return ERR_PTR(-EINVAL);
500 return ERR_PTR(-EINVAL); 499 count += bv->bv_len >> (device->s2b_shift + 9);
501 count += bv->bv_len >> (device->s2b_shift + 9);
502 }
503 } 500 }
504 /* Paranoia. */ 501 /* Paranoia. */
505 if (count != last_rec - first_rec + 1) 502 if (count != last_rec - first_rec + 1)
@@ -516,18 +513,16 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
516 dreq->block_count = count; 513 dreq->block_count = count;
517 dbio = dreq->bio; 514 dbio = dreq->bio;
518 recid = first_rec; 515 recid = first_rec;
519 rq_for_each_bio(bio, req) { 516 rq_for_each_segment(bv, req, iter) {
520 bio_for_each_segment(bv, bio, i) { 517 dst = page_address(bv->bv_page) + bv->bv_offset;
521 dst = page_address(bv->bv_page) + bv->bv_offset; 518 for (off = 0; off < bv->bv_len; off += blksize) {
522 for (off = 0; off < bv->bv_len; off += blksize) { 519 memset(dbio, 0, sizeof (struct dasd_diag_bio));
523 memset(dbio, 0, sizeof (struct dasd_diag_bio)); 520 dbio->type = rw_cmd;
524 dbio->type = rw_cmd; 521 dbio->block_number = recid + 1;
525 dbio->block_number = recid + 1; 522 dbio->buffer = dst;
526 dbio->buffer = dst; 523 dbio++;
527 dbio++; 524 dst += blksize;
528 dst += blksize; 525 recid++;
529 recid++;
530 }
531 } 526 }
532 } 527 }
533 cqr->retries = DIAG_MAX_RETRIES; 528 cqr->retries = DIAG_MAX_RETRIES;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index ea63ba7828f9..44adf8496bda 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1176,7 +1176,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1176 struct LO_eckd_data *LO_data; 1176 struct LO_eckd_data *LO_data;
1177 struct dasd_ccw_req *cqr; 1177 struct dasd_ccw_req *cqr;
1178 struct ccw1 *ccw; 1178 struct ccw1 *ccw;
1179 struct bio *bio; 1179 struct req_iterator iter;
1180 struct bio_vec *bv; 1180 struct bio_vec *bv;
1181 char *dst; 1181 char *dst;
1182 unsigned int blksize, blk_per_trk, off; 1182 unsigned int blksize, blk_per_trk, off;
@@ -1185,7 +1185,6 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1185 sector_t first_trk, last_trk; 1185 sector_t first_trk, last_trk;
1186 unsigned int first_offs, last_offs; 1186 unsigned int first_offs, last_offs;
1187 unsigned char cmd, rcmd; 1187 unsigned char cmd, rcmd;
1188 int i;
1189 1188
1190 private = (struct dasd_eckd_private *) device->private; 1189 private = (struct dasd_eckd_private *) device->private;
1191 if (rq_data_dir(req) == READ) 1190 if (rq_data_dir(req) == READ)
@@ -1206,18 +1205,15 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1206 /* Check struct bio and count the number of blocks for the request. */ 1205 /* Check struct bio and count the number of blocks for the request. */
1207 count = 0; 1206 count = 0;
1208 cidaw = 0; 1207 cidaw = 0;
1209 rq_for_each_bio(bio, req) { 1208 rq_for_each_segment(bv, req, iter) {
1210 bio_for_each_segment(bv, bio, i) { 1209 if (bv->bv_len & (blksize - 1))
1211 if (bv->bv_len & (blksize - 1)) 1210 /* Eckd can only do full blocks. */
1212 /* Eckd can only do full blocks. */ 1211 return ERR_PTR(-EINVAL);
1213 return ERR_PTR(-EINVAL); 1212 count += bv->bv_len >> (device->s2b_shift + 9);
1214 count += bv->bv_len >> (device->s2b_shift + 9);
1215#if defined(CONFIG_64BIT) 1213#if defined(CONFIG_64BIT)
1216 if (idal_is_needed (page_address(bv->bv_page), 1214 if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
1217 bv->bv_len)) 1215 cidaw += bv->bv_len >> (device->s2b_shift + 9);
1218 cidaw += bv->bv_len >> (device->s2b_shift + 9);
1219#endif 1216#endif
1220 }
1221 } 1217 }
1222 /* Paranoia. */ 1218 /* Paranoia. */
1223 if (count != last_rec - first_rec + 1) 1219 if (count != last_rec - first_rec + 1)
@@ -1257,7 +1253,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1257 locate_record(ccw++, LO_data++, first_trk, first_offs + 1, 1253 locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
1258 last_rec - recid + 1, cmd, device, blksize); 1254 last_rec - recid + 1, cmd, device, blksize);
1259 } 1255 }
1260 rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) { 1256 rq_for_each_segment(bv, req, iter) {
1261 dst = page_address(bv->bv_page) + bv->bv_offset; 1257 dst = page_address(bv->bv_page) + bv->bv_offset;
1262 if (dasd_page_cache) { 1258 if (dasd_page_cache) {
1263 char *copy = kmem_cache_alloc(dasd_page_cache, 1259 char *copy = kmem_cache_alloc(dasd_page_cache,
@@ -1328,12 +1324,12 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
1328{ 1324{
1329 struct dasd_eckd_private *private; 1325 struct dasd_eckd_private *private;
1330 struct ccw1 *ccw; 1326 struct ccw1 *ccw;
1331 struct bio *bio; 1327 struct req_iterator iter;
1332 struct bio_vec *bv; 1328 struct bio_vec *bv;
1333 char *dst, *cda; 1329 char *dst, *cda;
1334 unsigned int blksize, blk_per_trk, off; 1330 unsigned int blksize, blk_per_trk, off;
1335 sector_t recid; 1331 sector_t recid;
1336 int i, status; 1332 int status;
1337 1333
1338 if (!dasd_page_cache) 1334 if (!dasd_page_cache)
1339 goto out; 1335 goto out;
@@ -1346,7 +1342,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
1346 ccw++; 1342 ccw++;
1347 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) 1343 if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
1348 ccw++; 1344 ccw++;
1349 rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) { 1345 rq_for_each_segment(bv, req, iter) {
1350 dst = page_address(bv->bv_page) + bv->bv_offset; 1346 dst = page_address(bv->bv_page) + bv->bv_offset;
1351 for (off = 0; off < bv->bv_len; off += blksize) { 1347 for (off = 0; off < bv->bv_len; off += blksize) {
1352 /* Skip locate record. */ 1348 /* Skip locate record. */
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index da16ead8aff2..1d95822e0b8e 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -234,14 +234,13 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
234 struct LO_fba_data *LO_data; 234 struct LO_fba_data *LO_data;
235 struct dasd_ccw_req *cqr; 235 struct dasd_ccw_req *cqr;
236 struct ccw1 *ccw; 236 struct ccw1 *ccw;
237 struct bio *bio; 237 struct req_iterator iter;
238 struct bio_vec *bv; 238 struct bio_vec *bv;
239 char *dst; 239 char *dst;
240 int count, cidaw, cplength, datasize; 240 int count, cidaw, cplength, datasize;
241 sector_t recid, first_rec, last_rec; 241 sector_t recid, first_rec, last_rec;
242 unsigned int blksize, off; 242 unsigned int blksize, off;
243 unsigned char cmd; 243 unsigned char cmd;
244 int i;
245 244
246 private = (struct dasd_fba_private *) device->private; 245 private = (struct dasd_fba_private *) device->private;
247 if (rq_data_dir(req) == READ) { 246 if (rq_data_dir(req) == READ) {
@@ -257,18 +256,15 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
257 /* Check struct bio and count the number of blocks for the request. */ 256 /* Check struct bio and count the number of blocks for the request. */
258 count = 0; 257 count = 0;
259 cidaw = 0; 258 cidaw = 0;
260 rq_for_each_bio(bio, req) { 259 rq_for_each_segment(bv, req, iter) {
261 bio_for_each_segment(bv, bio, i) { 260 if (bv->bv_len & (blksize - 1))
262 if (bv->bv_len & (blksize - 1)) 261 /* Fba can only do full blocks. */
263 /* Fba can only do full blocks. */ 262 return ERR_PTR(-EINVAL);
264 return ERR_PTR(-EINVAL); 263 count += bv->bv_len >> (device->s2b_shift + 9);
265 count += bv->bv_len >> (device->s2b_shift + 9);
266#if defined(CONFIG_64BIT) 264#if defined(CONFIG_64BIT)
267 if (idal_is_needed (page_address(bv->bv_page), 265 if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
268 bv->bv_len)) 266 cidaw += bv->bv_len / blksize;
269 cidaw += bv->bv_len / blksize;
270#endif 267#endif
271 }
272 } 268 }
273 /* Paranoia. */ 269 /* Paranoia. */
274 if (count != last_rec - first_rec + 1) 270 if (count != last_rec - first_rec + 1)
@@ -304,7 +300,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
304 locate_record(ccw++, LO_data++, rq_data_dir(req), 0, count); 300 locate_record(ccw++, LO_data++, rq_data_dir(req), 0, count);
305 } 301 }
306 recid = first_rec; 302 recid = first_rec;
307 rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) { 303 rq_for_each_segment(bv, req, iter) {
308 dst = page_address(bv->bv_page) + bv->bv_offset; 304 dst = page_address(bv->bv_page) + bv->bv_offset;
309 if (dasd_page_cache) { 305 if (dasd_page_cache) {
310 char *copy = kmem_cache_alloc(dasd_page_cache, 306 char *copy = kmem_cache_alloc(dasd_page_cache,
@@ -359,11 +355,11 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
359{ 355{
360 struct dasd_fba_private *private; 356 struct dasd_fba_private *private;
361 struct ccw1 *ccw; 357 struct ccw1 *ccw;
362 struct bio *bio; 358 struct req_iterator iter;
363 struct bio_vec *bv; 359 struct bio_vec *bv;
364 char *dst, *cda; 360 char *dst, *cda;
365 unsigned int blksize, off; 361 unsigned int blksize, off;
366 int i, status; 362 int status;
367 363
368 if (!dasd_page_cache) 364 if (!dasd_page_cache)
369 goto out; 365 goto out;
@@ -374,7 +370,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
374 ccw++; 370 ccw++;
375 if (private->rdc_data.mode.bits.data_chain != 0) 371 if (private->rdc_data.mode.bits.data_chain != 0)
376 ccw++; 372 ccw++;
377 rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) { 373 rq_for_each_segment(bv, req, iter) {
378 dst = page_address(bv->bv_page) + bv->bv_offset; 374 dst = page_address(bv->bv_page) + bv->bv_offset;
379 for (off = 0; off < bv->bv_len; off += blksize) { 375 for (off = 0; off < bv->bv_len; off += blksize) {
380 /* Skip locate record. */ 376 /* Skip locate record. */
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 4d8798bacf97..859f870552e3 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -674,10 +674,10 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
674 } 674 }
675 bytes_done += bvec->bv_len; 675 bytes_done += bvec->bv_len;
676 } 676 }
677 bio_endio(bio, bytes_done, 0); 677 bio_endio(bio, 0);
678 return 0; 678 return 0;
679fail: 679fail:
680 bio_io_error(bio, bio->bi_size); 680 bio_io_error(bio);
681 return 0; 681 return 0;
682} 682}
683 683
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 354a060e5bec..0fbacc8b1063 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -230,12 +230,10 @@ static int xpram_make_request(struct request_queue *q, struct bio *bio)
230 } 230 }
231 } 231 }
232 set_bit(BIO_UPTODATE, &bio->bi_flags); 232 set_bit(BIO_UPTODATE, &bio->bi_flags);
233 bytes = bio->bi_size; 233 bio_end_io(bio, 0);
234 bio->bi_size = 0;
235 bio->bi_end_io(bio, bytes, 0);
236 return 0; 234 return 0;
237fail: 235fail:
238 bio_io_error(bio, bio->bi_size); 236 bio_io_error(bio);
239 return 0; 237 return 0;
240} 238}
241 239
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 80e7a537e7d2..5b47e9cce75f 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -1134,21 +1134,18 @@ tape_34xx_bread(struct tape_device *device, struct request *req)
1134{ 1134{
1135 struct tape_request *request; 1135 struct tape_request *request;
1136 struct ccw1 *ccw; 1136 struct ccw1 *ccw;
1137 int count = 0, i; 1137 int count = 0;
1138 unsigned off; 1138 unsigned off;
1139 char *dst; 1139 char *dst;
1140 struct bio_vec *bv; 1140 struct bio_vec *bv;
1141 struct bio *bio; 1141 struct req_iterator iter;
1142 struct tape_34xx_block_id * start_block; 1142 struct tape_34xx_block_id * start_block;
1143 1143
1144 DBF_EVENT(6, "xBREDid:"); 1144 DBF_EVENT(6, "xBREDid:");
1145 1145
1146 /* Count the number of blocks for the request. */ 1146 /* Count the number of blocks for the request. */
1147 rq_for_each_bio(bio, req) { 1147 rq_for_each_segment(bv, req, iter)
1148 bio_for_each_segment(bv, bio, i) { 1148 count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
1149 count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
1150 }
1151 }
1152 1149
1153 /* Allocate the ccw request. */ 1150 /* Allocate the ccw request. */
1154 request = tape_alloc_request(3+count+1, 8); 1151 request = tape_alloc_request(3+count+1, 8);
@@ -1175,18 +1172,15 @@ tape_34xx_bread(struct tape_device *device, struct request *req)
1175 ccw = tape_ccw_cc(ccw, NOP, 0, NULL); 1172 ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
1176 ccw = tape_ccw_cc(ccw, NOP, 0, NULL); 1173 ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
1177 1174
1178 rq_for_each_bio(bio, req) { 1175 rq_for_each_segment(bv, req, iter) {
1179 bio_for_each_segment(bv, bio, i) { 1176 dst = kmap(bv->bv_page) + bv->bv_offset;
1180 dst = kmap(bv->bv_page) + bv->bv_offset; 1177 for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) {
1181 for (off = 0; off < bv->bv_len; 1178 ccw->flags = CCW_FLAG_CC;
1182 off += TAPEBLOCK_HSEC_SIZE) { 1179 ccw->cmd_code = READ_FORWARD;
1183 ccw->flags = CCW_FLAG_CC; 1180 ccw->count = TAPEBLOCK_HSEC_SIZE;
1184 ccw->cmd_code = READ_FORWARD; 1181 set_normalized_cda(ccw, (void*) __pa(dst));
1185 ccw->count = TAPEBLOCK_HSEC_SIZE; 1182 ccw++;
1186 set_normalized_cda(ccw, (void*) __pa(dst)); 1183 dst += TAPEBLOCK_HSEC_SIZE;
1187 ccw++;
1188 dst += TAPEBLOCK_HSEC_SIZE;
1189 }
1190 } 1184 }
1191 } 1185 }
1192 1186
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 7e2b2ab49264..9f244c591eeb 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -623,21 +623,19 @@ tape_3590_bread(struct tape_device *device, struct request *req)
623{ 623{
624 struct tape_request *request; 624 struct tape_request *request;
625 struct ccw1 *ccw; 625 struct ccw1 *ccw;
626 int count = 0, start_block, i; 626 int count = 0, start_block;
627 unsigned off; 627 unsigned off;
628 char *dst; 628 char *dst;
629 struct bio_vec *bv; 629 struct bio_vec *bv;
630 struct bio *bio; 630 struct req_iterator iter;
631 631
632 DBF_EVENT(6, "xBREDid:"); 632 DBF_EVENT(6, "xBREDid:");
633 start_block = req->sector >> TAPEBLOCK_HSEC_S2B; 633 start_block = req->sector >> TAPEBLOCK_HSEC_S2B;
634 DBF_EVENT(6, "start_block = %i\n", start_block); 634 DBF_EVENT(6, "start_block = %i\n", start_block);
635 635
636 rq_for_each_bio(bio, req) { 636 rq_for_each_segment(bv, req, iter)
637 bio_for_each_segment(bv, bio, i) { 637 count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
638 count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9); 638
639 }
640 }
641 request = tape_alloc_request(2 + count + 1, 4); 639 request = tape_alloc_request(2 + count + 1, 4);
642 if (IS_ERR(request)) 640 if (IS_ERR(request))
643 return request; 641 return request;
@@ -653,21 +651,18 @@ tape_3590_bread(struct tape_device *device, struct request *req)
653 */ 651 */
654 ccw = tape_ccw_cc(ccw, NOP, 0, NULL); 652 ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
655 653
656 rq_for_each_bio(bio, req) { 654 rq_for_each_segment(bv, req, iter) {
657 bio_for_each_segment(bv, bio, i) { 655 dst = page_address(bv->bv_page) + bv->bv_offset;
658 dst = page_address(bv->bv_page) + bv->bv_offset; 656 for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) {
659 for (off = 0; off < bv->bv_len; 657 ccw->flags = CCW_FLAG_CC;
660 off += TAPEBLOCK_HSEC_SIZE) { 658 ccw->cmd_code = READ_FORWARD;
661 ccw->flags = CCW_FLAG_CC; 659 ccw->count = TAPEBLOCK_HSEC_SIZE;
662 ccw->cmd_code = READ_FORWARD; 660 set_normalized_cda(ccw, (void *) __pa(dst));
663 ccw->count = TAPEBLOCK_HSEC_SIZE; 661 ccw++;
664 set_normalized_cda(ccw, (void *) __pa(dst)); 662 dst += TAPEBLOCK_HSEC_SIZE;
665 ccw++;
666 dst += TAPEBLOCK_HSEC_SIZE;
667 }
668 if (off > bv->bv_len)
669 BUG();
670 } 663 }
664 if (off > bv->bv_len)
665 BUG();
671 } 666 }
672 ccw = tape_ccw_end(ccw, NOP, 0, NULL); 667 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
673 DBF_EVENT(6, "xBREDccwg\n"); 668 DBF_EVENT(6, "xBREDccwg\n");
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index a417a6ff9f97..604f4d717933 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -263,25 +263,12 @@ static int scsi_merge_bio(struct request *rq, struct bio *bio)
263 bio->bi_rw |= (1 << BIO_RW); 263 bio->bi_rw |= (1 << BIO_RW);
264 blk_queue_bounce(q, &bio); 264 blk_queue_bounce(q, &bio);
265 265
266 if (!rq->bio) 266 return blk_rq_append_bio(q, rq, bio);
267 blk_rq_bio_prep(q, rq, bio);
268 else if (!ll_back_merge_fn(q, rq, bio))
269 return -EINVAL;
270 else {
271 rq->biotail->bi_next = bio;
272 rq->biotail = bio;
273 }
274
275 return 0;
276} 267}
277 268
278static int scsi_bi_endio(struct bio *bio, unsigned int bytes_done, int error) 269static void scsi_bi_endio(struct bio *bio, int error)
279{ 270{
280 if (bio->bi_size)
281 return 1;
282
283 bio_put(bio); 271 bio_put(bio);
284 return 0;
285} 272}
286 273
287/** 274/**
@@ -337,7 +324,7 @@ static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
337 if (bio->bi_vcnt >= nr_vecs) { 324 if (bio->bi_vcnt >= nr_vecs) {
338 err = scsi_merge_bio(rq, bio); 325 err = scsi_merge_bio(rq, bio);
339 if (err) { 326 if (err) {
340 bio_endio(bio, bio->bi_size, 0); 327 bio_endio(bio, 0);
341 goto free_bios; 328 goto free_bios;
342 } 329 }
343 bio = NULL; 330 bio = NULL;
@@ -359,7 +346,7 @@ free_bios:
359 /* 346 /*
360 * call endio instead of bio_put incase it was bounced 347 * call endio instead of bio_put incase it was bounced
361 */ 348 */
362 bio_endio(bio, bio->bi_size, 0); 349 bio_endio(bio, 0);
363 } 350 }
364 351
365 return err; 352 return err;