aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/block/biodoc.txt20
-rw-r--r--Documentation/block/ioprio.txt11
-rw-r--r--MAINTAINERS7
-rw-r--r--block/Makefile1
-rw-r--r--block/blktrace.c54
-rw-r--r--block/compat_ioctl.c814
-rw-r--r--block/ioctl.c21
-rw-r--r--block/ll_rw_blk.c291
-rw-r--r--drivers/block/aoe/aoeblk.c4
-rw-r--r--drivers/block/aoe/aoecmd.c2
-rw-r--r--drivers/block/aoe/aoedev.c4
-rw-r--r--drivers/block/cciss.c2
-rw-r--r--drivers/block/cpqarray.c2
-rw-r--r--drivers/block/floppy.c87
-rw-r--r--drivers/block/lguest_blk.c36
-rw-r--r--drivers/block/loop.c4
-rw-r--r--drivers/block/nbd.c59
-rw-r--r--drivers/block/pktcdvd.c25
-rw-r--r--drivers/block/ps3disk.c42
-rw-r--r--drivers/block/rd.c4
-rw-r--r--drivers/block/umem.c238
-rw-r--r--drivers/block/umem.h (renamed from include/linux/umem.h)19
-rw-r--r--drivers/block/xen-blkfront.c32
-rw-r--r--drivers/block/xsysace.c274
-rw-r--r--drivers/ide/ide-floppy.c52
-rw-r--r--drivers/md/dm-crypt.c21
-rw-r--r--drivers/md/dm-emc.c15
-rw-r--r--drivers/md/dm-io.c8
-rw-r--r--drivers/md/dm-mpath.c4
-rw-r--r--drivers/md/dm-raid1.c4
-rw-r--r--drivers/md/dm-snap.c2
-rw-r--r--drivers/md/dm-zero.c2
-rw-r--r--drivers/md/dm.c18
-rw-r--r--drivers/md/faulty.c10
-rw-r--r--drivers/md/linear.c4
-rw-r--r--drivers/md/md.c25
-rw-r--r--drivers/md/multipath.c13
-rw-r--r--drivers/md/raid0.c4
-rw-r--r--drivers/md/raid1.c30
-rw-r--r--drivers/md/raid10.c31
-rw-r--r--drivers/md/raid5.c48
-rw-r--r--drivers/s390/block/dasd_diag.c37
-rw-r--r--drivers/s390/block/dasd_eckd.c28
-rw-r--r--drivers/s390/block/dasd_fba.c28
-rw-r--r--drivers/s390/block/dcssblk.c4
-rw-r--r--drivers/s390/block/xpram.c6
-rw-r--r--drivers/s390/char/tape_34xx.c32
-rw-r--r--drivers/s390/char/tape_3590.c37
-rw-r--r--drivers/scsi/scsi_lib.c21
-rw-r--r--fs/bio.c50
-rw-r--r--fs/block_dev.c2
-rw-r--r--fs/buffer.c6
-rw-r--r--fs/compat_ioctl.c671
-rw-r--r--fs/direct-io.c13
-rw-r--r--fs/fs-writeback.c1
-rw-r--r--fs/gfs2/super.c4
-rw-r--r--fs/jfs/jfs_logmgr.c5
-rw-r--r--fs/jfs/jfs_metapage.c12
-rw-r--r--fs/mpage.c12
-rw-r--r--fs/ocfs2/cluster/heartbeat.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c4
-rw-r--r--include/linux/bio.h6
-rw-r--r--include/linux/blkdev.h25
-rw-r--r--include/linux/blktrace_api.h12
-rw-r--r--include/linux/pci_ids.h5
-rw-r--r--include/linux/swap.h2
-rw-r--r--include/linux/writeback.h1
-rw-r--r--kernel/sched.c1
-rw-r--r--mm/bounce.c25
-rw-r--r--mm/page_io.c12
-rw-r--r--mm/readahead.c1
72 files changed, 1728 insertions, 1687 deletions
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index 8af392fc6ef0..dc3f49e3e539 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -477,9 +477,9 @@ With this multipage bio design:
477 the same bi_io_vec array, but with the index and size accordingly modified) 477 the same bi_io_vec array, but with the index and size accordingly modified)
478- A linked list of bios is used as before for unrelated merges (*) - this 478- A linked list of bios is used as before for unrelated merges (*) - this
479 avoids reallocs and makes independent completions easier to handle. 479 avoids reallocs and makes independent completions easier to handle.
480- Code that traverses the req list needs to make a distinction between 480- Code that traverses the req list can find all the segments of a bio
481 segments of a request (bio_for_each_segment) and the distinct completion 481 by using rq_for_each_segment. This handles the fact that a request
482 units/bios (rq_for_each_bio). 482 has multiple bios, each of which can have multiple segments.
483- Drivers which can't process a large bio in one shot can use the bi_idx 483- Drivers which can't process a large bio in one shot can use the bi_idx
484 field to keep track of the next bio_vec entry to process. 484 field to keep track of the next bio_vec entry to process.
485 (e.g a 1MB bio_vec needs to be handled in max 128kB chunks for IDE) 485 (e.g a 1MB bio_vec needs to be handled in max 128kB chunks for IDE)
@@ -664,14 +664,14 @@ in lvm or md.
664 664
6653.2.1 Traversing segments and completion units in a request 6653.2.1 Traversing segments and completion units in a request
666 666
667The macros bio_for_each_segment() and rq_for_each_bio() should be used for 667The macro rq_for_each_segment() should be used for traversing the bios
668traversing the bios in the request list (drivers should avoid directly 668in the request list (drivers should avoid directly trying to do it
669trying to do it themselves). Using these helpers should also make it easier 669themselves). Using these helpers should also make it easier to cope
670to cope with block changes in the future. 670with block changes in the future.
671 671
672 rq_for_each_bio(bio, rq) 672 struct req_iterator iter;
673 bio_for_each_segment(bio_vec, bio, i) 673 rq_for_each_segment(bio_vec, rq, iter)
674 /* bio_vec is now current segment */ 674 /* bio_vec is now current segment */
675 675
676I/O completion callbacks are per-bio rather than per-segment, so drivers 676I/O completion callbacks are per-bio rather than per-segment, so drivers
677that traverse bio chains on completion need to keep that in mind. Drivers 677that traverse bio chains on completion need to keep that in mind. Drivers
diff --git a/Documentation/block/ioprio.txt b/Documentation/block/ioprio.txt
index 1b930ef5a079..35e516b0b8a9 100644
--- a/Documentation/block/ioprio.txt
+++ b/Documentation/block/ioprio.txt
@@ -86,8 +86,15 @@ extern int sys_ioprio_get(int, int);
86#error "Unsupported arch" 86#error "Unsupported arch"
87#endif 87#endif
88 88
89_syscall3(int, ioprio_set, int, which, int, who, int, ioprio); 89static inline int ioprio_set(int which, int who, int ioprio)
90_syscall2(int, ioprio_get, int, which, int, who); 90{
91 return syscall(__NR_ioprio_set, which, who, ioprio);
92}
93
94static inline int ioprio_get(int which, int who)
95{
96 return syscall(__NR_ioprio_get, which, who);
97}
91 98
92enum { 99enum {
93 IOPRIO_CLASS_NONE, 100 IOPRIO_CLASS_NONE,
diff --git a/MAINTAINERS b/MAINTAINERS
index 22497de381da..60162706716f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4156,6 +4156,13 @@ W: http://oss.sgi.com/projects/xfs
4156T: git git://oss.sgi.com:8090/xfs/xfs-2.6.git 4156T: git git://oss.sgi.com:8090/xfs/xfs-2.6.git
4157S: Supported 4157S: Supported
4158 4158
4159XILINX SYSTEMACE DRIVER
4160P: Grant Likely
4161M: grant.likely@secretlab.ca
4162W: http://www.secretlab.ca/
4163L: linux-kernel@vger.kernel.org
4164S: Maintained
4165
4159XILINX UARTLITE SERIAL DRIVER 4166XILINX UARTLITE SERIAL DRIVER
4160P: Peter Korsgaard 4167P: Peter Korsgaard
4161M: jacmet@sunsite.dk 4168M: jacmet@sunsite.dk
diff --git a/block/Makefile b/block/Makefile
index 959feeb253be..3cfe7cebaa6a 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
11obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o 11obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
12 12
13obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o 13obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
14obj-$(CONFIG_COMPAT) += compat_ioctl.o
diff --git a/block/blktrace.c b/block/blktrace.c
index 20fa034ea4a2..775471ef84a5 100644
--- a/block/blktrace.c
+++ b/block/blktrace.c
@@ -312,33 +312,26 @@ static struct rchan_callbacks blk_relay_callbacks = {
312/* 312/*
313 * Setup everything required to start tracing 313 * Setup everything required to start tracing
314 */ 314 */
315static int blk_trace_setup(struct request_queue *q, struct block_device *bdev, 315int do_blk_trace_setup(struct request_queue *q, struct block_device *bdev,
316 char __user *arg) 316 struct blk_user_trace_setup *buts)
317{ 317{
318 struct blk_user_trace_setup buts;
319 struct blk_trace *old_bt, *bt = NULL; 318 struct blk_trace *old_bt, *bt = NULL;
320 struct dentry *dir = NULL; 319 struct dentry *dir = NULL;
321 char b[BDEVNAME_SIZE]; 320 char b[BDEVNAME_SIZE];
322 int ret, i; 321 int ret, i;
323 322
324 if (copy_from_user(&buts, arg, sizeof(buts))) 323 if (!buts->buf_size || !buts->buf_nr)
325 return -EFAULT;
326
327 if (!buts.buf_size || !buts.buf_nr)
328 return -EINVAL; 324 return -EINVAL;
329 325
330 strcpy(buts.name, bdevname(bdev, b)); 326 strcpy(buts->name, bdevname(bdev, b));
331 327
332 /* 328 /*
333 * some device names have larger paths - convert the slashes 329 * some device names have larger paths - convert the slashes
334 * to underscores for this to work as expected 330 * to underscores for this to work as expected
335 */ 331 */
336 for (i = 0; i < strlen(buts.name); i++) 332 for (i = 0; i < strlen(buts->name); i++)
337 if (buts.name[i] == '/') 333 if (buts->name[i] == '/')
338 buts.name[i] = '_'; 334 buts->name[i] = '_';
339
340 if (copy_to_user(arg, &buts, sizeof(buts)))
341 return -EFAULT;
342 335
343 ret = -ENOMEM; 336 ret = -ENOMEM;
344 bt = kzalloc(sizeof(*bt), GFP_KERNEL); 337 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
@@ -350,7 +343,7 @@ static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
350 goto err; 343 goto err;
351 344
352 ret = -ENOENT; 345 ret = -ENOENT;
353 dir = blk_create_tree(buts.name); 346 dir = blk_create_tree(buts->name);
354 if (!dir) 347 if (!dir)
355 goto err; 348 goto err;
356 349
@@ -363,20 +356,21 @@ static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
363 if (!bt->dropped_file) 356 if (!bt->dropped_file)
364 goto err; 357 goto err;
365 358
366 bt->rchan = relay_open("trace", dir, buts.buf_size, buts.buf_nr, &blk_relay_callbacks, bt); 359 bt->rchan = relay_open("trace", dir, buts->buf_size,
360 buts->buf_nr, &blk_relay_callbacks, bt);
367 if (!bt->rchan) 361 if (!bt->rchan)
368 goto err; 362 goto err;
369 363
370 bt->act_mask = buts.act_mask; 364 bt->act_mask = buts->act_mask;
371 if (!bt->act_mask) 365 if (!bt->act_mask)
372 bt->act_mask = (u16) -1; 366 bt->act_mask = (u16) -1;
373 367
374 bt->start_lba = buts.start_lba; 368 bt->start_lba = buts->start_lba;
375 bt->end_lba = buts.end_lba; 369 bt->end_lba = buts->end_lba;
376 if (!bt->end_lba) 370 if (!bt->end_lba)
377 bt->end_lba = -1ULL; 371 bt->end_lba = -1ULL;
378 372
379 bt->pid = buts.pid; 373 bt->pid = buts->pid;
380 bt->trace_state = Blktrace_setup; 374 bt->trace_state = Blktrace_setup;
381 375
382 ret = -EBUSY; 376 ret = -EBUSY;
@@ -401,6 +395,26 @@ err:
401 return ret; 395 return ret;
402} 396}
403 397
398static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
399 char __user *arg)
400{
401 struct blk_user_trace_setup buts;
402 int ret;
403
404 ret = copy_from_user(&buts, arg, sizeof(buts));
405 if (ret)
406 return -EFAULT;
407
408 ret = do_blk_trace_setup(q, bdev, &buts);
409 if (ret)
410 return ret;
411
412 if (copy_to_user(arg, &buts, sizeof(buts)))
413 return -EFAULT;
414
415 return 0;
416}
417
404static int blk_trace_startstop(struct request_queue *q, int start) 418static int blk_trace_startstop(struct request_queue *q, int start)
405{ 419{
406 struct blk_trace *bt; 420 struct blk_trace *bt;
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
new file mode 100644
index 000000000000..f84093b97f70
--- /dev/null
+++ b/block/compat_ioctl.c
@@ -0,0 +1,814 @@
1#include <linux/blkdev.h>
2#include <linux/blkpg.h>
3#include <linux/blktrace_api.h>
4#include <linux/cdrom.h>
5#include <linux/compat.h>
6#include <linux/elevator.h>
7#include <linux/fd.h>
8#include <linux/hdreg.h>
9#include <linux/syscalls.h>
10#include <linux/smp_lock.h>
11#include <linux/types.h>
12#include <linux/uaccess.h>
13
14static int compat_put_ushort(unsigned long arg, unsigned short val)
15{
16 return put_user(val, (unsigned short __user *)compat_ptr(arg));
17}
18
19static int compat_put_int(unsigned long arg, int val)
20{
21 return put_user(val, (compat_int_t __user *)compat_ptr(arg));
22}
23
24static int compat_put_long(unsigned long arg, long val)
25{
26 return put_user(val, (compat_long_t __user *)compat_ptr(arg));
27}
28
29static int compat_put_ulong(unsigned long arg, compat_ulong_t val)
30{
31 return put_user(val, (compat_ulong_t __user *)compat_ptr(arg));
32}
33
34static int compat_put_u64(unsigned long arg, u64 val)
35{
36 return put_user(val, (compat_u64 __user *)compat_ptr(arg));
37}
38
39struct compat_hd_geometry {
40 unsigned char heads;
41 unsigned char sectors;
42 unsigned short cylinders;
43 u32 start;
44};
45
46static int compat_hdio_getgeo(struct gendisk *disk, struct block_device *bdev,
47 struct compat_hd_geometry __user *ugeo)
48{
49 struct hd_geometry geo;
50 int ret;
51
52 if (!ugeo)
53 return -EINVAL;
54 if (!disk->fops->getgeo)
55 return -ENOTTY;
56
57 /*
58 * We need to set the startsect first, the driver may
59 * want to override it.
60 */
61 geo.start = get_start_sect(bdev);
62 ret = disk->fops->getgeo(bdev, &geo);
63 if (ret)
64 return ret;
65
66 ret = copy_to_user(ugeo, &geo, 4);
67 ret |= __put_user(geo.start, &ugeo->start);
68 if (ret)
69 ret = -EFAULT;
70
71 return ret;
72}
73
74static int compat_hdio_ioctl(struct inode *inode, struct file *file,
75 struct gendisk *disk, unsigned int cmd, unsigned long arg)
76{
77 mm_segment_t old_fs = get_fs();
78 unsigned long kval;
79 unsigned int __user *uvp;
80 int error;
81
82 set_fs(KERNEL_DS);
83 error = blkdev_driver_ioctl(inode, file, disk,
84 cmd, (unsigned long)(&kval));
85 set_fs(old_fs);
86
87 if (error == 0) {
88 uvp = compat_ptr(arg);
89 if (put_user(kval, uvp))
90 error = -EFAULT;
91 }
92 return error;
93}
94
95struct compat_cdrom_read_audio {
96 union cdrom_addr addr;
97 u8 addr_format;
98 compat_int_t nframes;
99 compat_caddr_t buf;
100};
101
102struct compat_cdrom_generic_command {
103 unsigned char cmd[CDROM_PACKET_SIZE];
104 compat_caddr_t buffer;
105 compat_uint_t buflen;
106 compat_int_t stat;
107 compat_caddr_t sense;
108 unsigned char data_direction;
109 compat_int_t quiet;
110 compat_int_t timeout;
111 compat_caddr_t reserved[1];
112};
113
114static int compat_cdrom_read_audio(struct inode *inode, struct file *file,
115 struct gendisk *disk, unsigned int cmd, unsigned long arg)
116{
117 struct cdrom_read_audio __user *cdread_audio;
118 struct compat_cdrom_read_audio __user *cdread_audio32;
119 __u32 data;
120 void __user *datap;
121
122 cdread_audio = compat_alloc_user_space(sizeof(*cdread_audio));
123 cdread_audio32 = compat_ptr(arg);
124
125 if (copy_in_user(&cdread_audio->addr,
126 &cdread_audio32->addr,
127 (sizeof(*cdread_audio32) -
128 sizeof(compat_caddr_t))))
129 return -EFAULT;
130
131 if (get_user(data, &cdread_audio32->buf))
132 return -EFAULT;
133 datap = compat_ptr(data);
134 if (put_user(datap, &cdread_audio->buf))
135 return -EFAULT;
136
137 return blkdev_driver_ioctl(inode, file, disk, cmd,
138 (unsigned long)cdread_audio);
139}
140
141static int compat_cdrom_generic_command(struct inode *inode, struct file *file,
142 struct gendisk *disk, unsigned int cmd, unsigned long arg)
143{
144 struct cdrom_generic_command __user *cgc;
145 struct compat_cdrom_generic_command __user *cgc32;
146 u32 data;
147 unsigned char dir;
148 int itmp;
149
150 cgc = compat_alloc_user_space(sizeof(*cgc));
151 cgc32 = compat_ptr(arg);
152
153 if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
154 get_user(data, &cgc32->buffer) ||
155 put_user(compat_ptr(data), &cgc->buffer) ||
156 copy_in_user(&cgc->buflen, &cgc32->buflen,
157 (sizeof(unsigned int) + sizeof(int))) ||
158 get_user(data, &cgc32->sense) ||
159 put_user(compat_ptr(data), &cgc->sense) ||
160 get_user(dir, &cgc32->data_direction) ||
161 put_user(dir, &cgc->data_direction) ||
162 get_user(itmp, &cgc32->quiet) ||
163 put_user(itmp, &cgc->quiet) ||
164 get_user(itmp, &cgc32->timeout) ||
165 put_user(itmp, &cgc->timeout) ||
166 get_user(data, &cgc32->reserved[0]) ||
167 put_user(compat_ptr(data), &cgc->reserved[0]))
168 return -EFAULT;
169
170 return blkdev_driver_ioctl(inode, file, disk, cmd, (unsigned long)cgc);
171}
172
173struct compat_blkpg_ioctl_arg {
174 compat_int_t op;
175 compat_int_t flags;
176 compat_int_t datalen;
177 compat_caddr_t data;
178};
179
180static int compat_blkpg_ioctl(struct inode *inode, struct file *file,
181 unsigned int cmd, struct compat_blkpg_ioctl_arg __user *ua32)
182{
183 struct blkpg_ioctl_arg __user *a = compat_alloc_user_space(sizeof(*a));
184 compat_caddr_t udata;
185 compat_int_t n;
186 int err;
187
188 err = get_user(n, &ua32->op);
189 err |= put_user(n, &a->op);
190 err |= get_user(n, &ua32->flags);
191 err |= put_user(n, &a->flags);
192 err |= get_user(n, &ua32->datalen);
193 err |= put_user(n, &a->datalen);
194 err |= get_user(udata, &ua32->data);
195 err |= put_user(compat_ptr(udata), &a->data);
196 if (err)
197 return err;
198
199 return blkdev_ioctl(inode, file, cmd, (unsigned long)a);
200}
201
202#define BLKBSZGET_32 _IOR(0x12, 112, int)
203#define BLKBSZSET_32 _IOW(0x12, 113, int)
204#define BLKGETSIZE64_32 _IOR(0x12, 114, int)
205
206struct compat_floppy_struct {
207 compat_uint_t size;
208 compat_uint_t sect;
209 compat_uint_t head;
210 compat_uint_t track;
211 compat_uint_t stretch;
212 unsigned char gap;
213 unsigned char rate;
214 unsigned char spec1;
215 unsigned char fmt_gap;
216 const compat_caddr_t name;
217};
218
219struct compat_floppy_drive_params {
220 char cmos;
221 compat_ulong_t max_dtr;
222 compat_ulong_t hlt;
223 compat_ulong_t hut;
224 compat_ulong_t srt;
225 compat_ulong_t spinup;
226 compat_ulong_t spindown;
227 unsigned char spindown_offset;
228 unsigned char select_delay;
229 unsigned char rps;
230 unsigned char tracks;
231 compat_ulong_t timeout;
232 unsigned char interleave_sect;
233 struct floppy_max_errors max_errors;
234 char flags;
235 char read_track;
236 short autodetect[8];
237 compat_int_t checkfreq;
238 compat_int_t native_format;
239};
240
241struct compat_floppy_drive_struct {
242 signed char flags;
243 compat_ulong_t spinup_date;
244 compat_ulong_t select_date;
245 compat_ulong_t first_read_date;
246 short probed_format;
247 short track;
248 short maxblock;
249 short maxtrack;
250 compat_int_t generation;
251 compat_int_t keep_data;
252 compat_int_t fd_ref;
253 compat_int_t fd_device;
254 compat_int_t last_checked;
255 compat_caddr_t dmabuf;
256 compat_int_t bufblocks;
257};
258
259struct compat_floppy_fdc_state {
260 compat_int_t spec1;
261 compat_int_t spec2;
262 compat_int_t dtr;
263 unsigned char version;
264 unsigned char dor;
265 compat_ulong_t address;
266 unsigned int rawcmd:2;
267 unsigned int reset:1;
268 unsigned int need_configure:1;
269 unsigned int perp_mode:2;
270 unsigned int has_fifo:1;
271 unsigned int driver_version;
272 unsigned char track[4];
273};
274
275struct compat_floppy_write_errors {
276 unsigned int write_errors;
277 compat_ulong_t first_error_sector;
278 compat_int_t first_error_generation;
279 compat_ulong_t last_error_sector;
280 compat_int_t last_error_generation;
281 compat_uint_t badness;
282};
283
284#define FDSETPRM32 _IOW(2, 0x42, struct compat_floppy_struct)
285#define FDDEFPRM32 _IOW(2, 0x43, struct compat_floppy_struct)
286#define FDGETPRM32 _IOR(2, 0x04, struct compat_floppy_struct)
287#define FDSETDRVPRM32 _IOW(2, 0x90, struct compat_floppy_drive_params)
288#define FDGETDRVPRM32 _IOR(2, 0x11, struct compat_floppy_drive_params)
289#define FDGETDRVSTAT32 _IOR(2, 0x12, struct compat_floppy_drive_struct)
290#define FDPOLLDRVSTAT32 _IOR(2, 0x13, struct compat_floppy_drive_struct)
291#define FDGETFDCSTAT32 _IOR(2, 0x15, struct compat_floppy_fdc_state)
292#define FDWERRORGET32 _IOR(2, 0x17, struct compat_floppy_write_errors)
293
294static struct {
295 unsigned int cmd32;
296 unsigned int cmd;
297} fd_ioctl_trans_table[] = {
298 { FDSETPRM32, FDSETPRM },
299 { FDDEFPRM32, FDDEFPRM },
300 { FDGETPRM32, FDGETPRM },
301 { FDSETDRVPRM32, FDSETDRVPRM },
302 { FDGETDRVPRM32, FDGETDRVPRM },
303 { FDGETDRVSTAT32, FDGETDRVSTAT },
304 { FDPOLLDRVSTAT32, FDPOLLDRVSTAT },
305 { FDGETFDCSTAT32, FDGETFDCSTAT },
306 { FDWERRORGET32, FDWERRORGET }
307};
308
309#define NR_FD_IOCTL_TRANS ARRAY_SIZE(fd_ioctl_trans_table)
310
311static int compat_fd_ioctl(struct inode *inode, struct file *file,
312 struct gendisk *disk, unsigned int cmd, unsigned long arg)
313{
314 mm_segment_t old_fs = get_fs();
315 void *karg = NULL;
316 unsigned int kcmd = 0;
317 int i, err;
318
319 for (i = 0; i < NR_FD_IOCTL_TRANS; i++)
320 if (cmd == fd_ioctl_trans_table[i].cmd32) {
321 kcmd = fd_ioctl_trans_table[i].cmd;
322 break;
323 }
324 if (!kcmd)
325 return -EINVAL;
326
327 switch (cmd) {
328 case FDSETPRM32:
329 case FDDEFPRM32:
330 case FDGETPRM32:
331 {
332 compat_uptr_t name;
333 struct compat_floppy_struct __user *uf;
334 struct floppy_struct *f;
335
336 uf = compat_ptr(arg);
337 f = karg = kmalloc(sizeof(struct floppy_struct), GFP_KERNEL);
338 if (!karg)
339 return -ENOMEM;
340 if (cmd == FDGETPRM32)
341 break;
342 err = __get_user(f->size, &uf->size);
343 err |= __get_user(f->sect, &uf->sect);
344 err |= __get_user(f->head, &uf->head);
345 err |= __get_user(f->track, &uf->track);
346 err |= __get_user(f->stretch, &uf->stretch);
347 err |= __get_user(f->gap, &uf->gap);
348 err |= __get_user(f->rate, &uf->rate);
349 err |= __get_user(f->spec1, &uf->spec1);
350 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
351 err |= __get_user(name, &uf->name);
352 f->name = compat_ptr(name);
353 if (err) {
354 err = -EFAULT;
355 goto out;
356 }
357 break;
358 }
359 case FDSETDRVPRM32:
360 case FDGETDRVPRM32:
361 {
362 struct compat_floppy_drive_params __user *uf;
363 struct floppy_drive_params *f;
364
365 uf = compat_ptr(arg);
366 f = karg = kmalloc(sizeof(struct floppy_drive_params), GFP_KERNEL);
367 if (!karg)
368 return -ENOMEM;
369 if (cmd == FDGETDRVPRM32)
370 break;
371 err = __get_user(f->cmos, &uf->cmos);
372 err |= __get_user(f->max_dtr, &uf->max_dtr);
373 err |= __get_user(f->hlt, &uf->hlt);
374 err |= __get_user(f->hut, &uf->hut);
375 err |= __get_user(f->srt, &uf->srt);
376 err |= __get_user(f->spinup, &uf->spinup);
377 err |= __get_user(f->spindown, &uf->spindown);
378 err |= __get_user(f->spindown_offset, &uf->spindown_offset);
379 err |= __get_user(f->select_delay, &uf->select_delay);
380 err |= __get_user(f->rps, &uf->rps);
381 err |= __get_user(f->tracks, &uf->tracks);
382 err |= __get_user(f->timeout, &uf->timeout);
383 err |= __get_user(f->interleave_sect, &uf->interleave_sect);
384 err |= __copy_from_user(&f->max_errors, &uf->max_errors, sizeof(f->max_errors));
385 err |= __get_user(f->flags, &uf->flags);
386 err |= __get_user(f->read_track, &uf->read_track);
387 err |= __copy_from_user(f->autodetect, uf->autodetect, sizeof(f->autodetect));
388 err |= __get_user(f->checkfreq, &uf->checkfreq);
389 err |= __get_user(f->native_format, &uf->native_format);
390 if (err) {
391 err = -EFAULT;
392 goto out;
393 }
394 break;
395 }
396 case FDGETDRVSTAT32:
397 case FDPOLLDRVSTAT32:
398 karg = kmalloc(sizeof(struct floppy_drive_struct), GFP_KERNEL);
399 if (!karg)
400 return -ENOMEM;
401 break;
402 case FDGETFDCSTAT32:
403 karg = kmalloc(sizeof(struct floppy_fdc_state), GFP_KERNEL);
404 if (!karg)
405 return -ENOMEM;
406 break;
407 case FDWERRORGET32:
408 karg = kmalloc(sizeof(struct floppy_write_errors), GFP_KERNEL);
409 if (!karg)
410 return -ENOMEM;
411 break;
412 default:
413 return -EINVAL;
414 }
415 set_fs(KERNEL_DS);
416 err = blkdev_driver_ioctl(inode, file, disk, kcmd, (unsigned long)karg);
417 set_fs(old_fs);
418 if (err)
419 goto out;
420 switch (cmd) {
421 case FDGETPRM32:
422 {
423 struct floppy_struct *f = karg;
424 struct compat_floppy_struct __user *uf = compat_ptr(arg);
425
426 err = __put_user(f->size, &uf->size);
427 err |= __put_user(f->sect, &uf->sect);
428 err |= __put_user(f->head, &uf->head);
429 err |= __put_user(f->track, &uf->track);
430 err |= __put_user(f->stretch, &uf->stretch);
431 err |= __put_user(f->gap, &uf->gap);
432 err |= __put_user(f->rate, &uf->rate);
433 err |= __put_user(f->spec1, &uf->spec1);
434 err |= __put_user(f->fmt_gap, &uf->fmt_gap);
435 err |= __put_user((u64)f->name, (compat_caddr_t __user *)&uf->name);
436 break;
437 }
438 case FDGETDRVPRM32:
439 {
440 struct compat_floppy_drive_params __user *uf;
441 struct floppy_drive_params *f = karg;
442
443 uf = compat_ptr(arg);
444 err = __put_user(f->cmos, &uf->cmos);
445 err |= __put_user(f->max_dtr, &uf->max_dtr);
446 err |= __put_user(f->hlt, &uf->hlt);
447 err |= __put_user(f->hut, &uf->hut);
448 err |= __put_user(f->srt, &uf->srt);
449 err |= __put_user(f->spinup, &uf->spinup);
450 err |= __put_user(f->spindown, &uf->spindown);
451 err |= __put_user(f->spindown_offset, &uf->spindown_offset);
452 err |= __put_user(f->select_delay, &uf->select_delay);
453 err |= __put_user(f->rps, &uf->rps);
454 err |= __put_user(f->tracks, &uf->tracks);
455 err |= __put_user(f->timeout, &uf->timeout);
456 err |= __put_user(f->interleave_sect, &uf->interleave_sect);
457 err |= __copy_to_user(&uf->max_errors, &f->max_errors, sizeof(f->max_errors));
458 err |= __put_user(f->flags, &uf->flags);
459 err |= __put_user(f->read_track, &uf->read_track);
460 err |= __copy_to_user(uf->autodetect, f->autodetect, sizeof(f->autodetect));
461 err |= __put_user(f->checkfreq, &uf->checkfreq);
462 err |= __put_user(f->native_format, &uf->native_format);
463 break;
464 }
465 case FDGETDRVSTAT32:
466 case FDPOLLDRVSTAT32:
467 {
468 struct compat_floppy_drive_struct __user *uf;
469 struct floppy_drive_struct *f = karg;
470
471 uf = compat_ptr(arg);
472 err = __put_user(f->flags, &uf->flags);
473 err |= __put_user(f->spinup_date, &uf->spinup_date);
474 err |= __put_user(f->select_date, &uf->select_date);
475 err |= __put_user(f->first_read_date, &uf->first_read_date);
476 err |= __put_user(f->probed_format, &uf->probed_format);
477 err |= __put_user(f->track, &uf->track);
478 err |= __put_user(f->maxblock, &uf->maxblock);
479 err |= __put_user(f->maxtrack, &uf->maxtrack);
480 err |= __put_user(f->generation, &uf->generation);
481 err |= __put_user(f->keep_data, &uf->keep_data);
482 err |= __put_user(f->fd_ref, &uf->fd_ref);
483 err |= __put_user(f->fd_device, &uf->fd_device);
484 err |= __put_user(f->last_checked, &uf->last_checked);
485 err |= __put_user((u64)f->dmabuf, &uf->dmabuf);
486 err |= __put_user((u64)f->bufblocks, &uf->bufblocks);
487 break;
488 }
489 case FDGETFDCSTAT32:
490 {
491 struct compat_floppy_fdc_state __user *uf;
492 struct floppy_fdc_state *f = karg;
493
494 uf = compat_ptr(arg);
495 err = __put_user(f->spec1, &uf->spec1);
496 err |= __put_user(f->spec2, &uf->spec2);
497 err |= __put_user(f->dtr, &uf->dtr);
498 err |= __put_user(f->version, &uf->version);
499 err |= __put_user(f->dor, &uf->dor);
500 err |= __put_user(f->address, &uf->address);
501 err |= __copy_to_user((char __user *)&uf->address + sizeof(uf->address),
502 (char *)&f->address + sizeof(f->address), sizeof(int));
503 err |= __put_user(f->driver_version, &uf->driver_version);
504 err |= __copy_to_user(uf->track, f->track, sizeof(f->track));
505 break;
506 }
507 case FDWERRORGET32:
508 {
509 struct compat_floppy_write_errors __user *uf;
510 struct floppy_write_errors *f = karg;
511
512 uf = compat_ptr(arg);
513 err = __put_user(f->write_errors, &uf->write_errors);
514 err |= __put_user(f->first_error_sector, &uf->first_error_sector);
515 err |= __put_user(f->first_error_generation, &uf->first_error_generation);
516 err |= __put_user(f->last_error_sector, &uf->last_error_sector);
517 err |= __put_user(f->last_error_generation, &uf->last_error_generation);
518 err |= __put_user(f->badness, &uf->badness);
519 break;
520 }
521 default:
522 break;
523 }
524 if (err)
525 err = -EFAULT;
526
527out:
528 kfree(karg);
529 return err;
530}
531
532struct compat_blk_user_trace_setup {
533 char name[32];
534 u16 act_mask;
535 u32 buf_size;
536 u32 buf_nr;
537 compat_u64 start_lba;
538 compat_u64 end_lba;
539 u32 pid;
540};
541#define BLKTRACESETUP32 _IOWR(0x12, 115, struct compat_blk_user_trace_setup)
542
543static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg)
544{
545 struct blk_user_trace_setup buts;
546 struct compat_blk_user_trace_setup cbuts;
547 struct request_queue *q;
548 int ret;
549
550 q = bdev_get_queue(bdev);
551 if (!q)
552 return -ENXIO;
553
554 if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
555 return -EFAULT;
556
557 buts = (struct blk_user_trace_setup) {
558 .act_mask = cbuts.act_mask,
559 .buf_size = cbuts.buf_size,
560 .buf_nr = cbuts.buf_nr,
561 .start_lba = cbuts.start_lba,
562 .end_lba = cbuts.end_lba,
563 .pid = cbuts.pid,
564 };
565 memcpy(&buts.name, &cbuts.name, 32);
566
567 mutex_lock(&bdev->bd_mutex);
568 ret = do_blk_trace_setup(q, bdev, &buts);
569 mutex_unlock(&bdev->bd_mutex);
570 if (ret)
571 return ret;
572
573 if (copy_to_user(arg, &buts.name, 32))
574 return -EFAULT;
575
576 return 0;
577}
578
579static int compat_blkdev_driver_ioctl(struct inode *inode, struct file *file,
580 struct gendisk *disk, unsigned cmd, unsigned long arg)
581{
582 int ret;
583
584 switch (arg) {
585 case HDIO_GET_UNMASKINTR:
586 case HDIO_GET_MULTCOUNT:
587 case HDIO_GET_KEEPSETTINGS:
588 case HDIO_GET_32BIT:
589 case HDIO_GET_NOWERR:
590 case HDIO_GET_DMA:
591 case HDIO_GET_NICE:
592 case HDIO_GET_WCACHE:
593 case HDIO_GET_ACOUSTIC:
594 case HDIO_GET_ADDRESS:
595 case HDIO_GET_BUSSTATE:
596 return compat_hdio_ioctl(inode, file, disk, cmd, arg);
597 case FDSETPRM32:
598 case FDDEFPRM32:
599 case FDGETPRM32:
600 case FDSETDRVPRM32:
601 case FDGETDRVPRM32:
602 case FDGETDRVSTAT32:
603 case FDPOLLDRVSTAT32:
604 case FDGETFDCSTAT32:
605 case FDWERRORGET32:
606 return compat_fd_ioctl(inode, file, disk, cmd, arg);
607 case CDROMREADAUDIO:
608 return compat_cdrom_read_audio(inode, file, disk, cmd, arg);
609 case CDROM_SEND_PACKET:
610 return compat_cdrom_generic_command(inode, file, disk, cmd, arg);
611
612 /*
613 * No handler required for the ones below, we just need to
614 * convert arg to a 64 bit pointer.
615 */
616 case BLKSECTSET:
617 /*
618 * 0x03 -- HD/IDE ioctl's used by hdparm and friends.
619 * Some need translations, these do not.
620 */
621 case HDIO_GET_IDENTITY:
622 case HDIO_DRIVE_TASK:
623 case HDIO_DRIVE_CMD:
624 case HDIO_SCAN_HWIF:
625 /* 0x330 is reserved -- it used to be HDIO_GETGEO_BIG */
626 case 0x330:
627 /* 0x02 -- Floppy ioctls */
628 case FDMSGON:
629 case FDMSGOFF:
630 case FDSETEMSGTRESH:
631 case FDFLUSH:
632 case FDWERRORCLR:
633 case FDSETMAXERRS:
634 case FDGETMAXERRS:
635 case FDGETDRVTYP:
636 case FDEJECT:
637 case FDCLRPRM:
638 case FDFMTBEG:
639 case FDFMTEND:
640 case FDRESET:
641 case FDTWADDLE:
642 case FDFMTTRK:
643 case FDRAWCMD:
644 /* CDROM stuff */
645 case CDROMPAUSE:
646 case CDROMRESUME:
647 case CDROMPLAYMSF:
648 case CDROMPLAYTRKIND:
649 case CDROMREADTOCHDR:
650 case CDROMREADTOCENTRY:
651 case CDROMSTOP:
652 case CDROMSTART:
653 case CDROMEJECT:
654 case CDROMVOLCTRL:
655 case CDROMSUBCHNL:
656 case CDROMMULTISESSION:
657 case CDROM_GET_MCN:
658 case CDROMRESET:
659 case CDROMVOLREAD:
660 case CDROMSEEK:
661 case CDROMPLAYBLK:
662 case CDROMCLOSETRAY:
663 case CDROM_DISC_STATUS:
664 case CDROM_CHANGER_NSLOTS:
665 case CDROM_GET_CAPABILITY:
666 /* Ignore cdrom.h about these next 5 ioctls, they absolutely do
667 * not take a struct cdrom_read, instead they take a struct cdrom_msf
668 * which is compatible.
669 */
670 case CDROMREADMODE2:
671 case CDROMREADMODE1:
672 case CDROMREADRAW:
673 case CDROMREADCOOKED:
674 case CDROMREADALL:
675 /* DVD ioctls */
676 case DVD_READ_STRUCT:
677 case DVD_WRITE_STRUCT:
678 case DVD_AUTH:
679 arg = (unsigned long)compat_ptr(arg);
680 /* These intepret arg as an unsigned long, not as a pointer,
681 * so we must not do compat_ptr() conversion. */
682 case HDIO_SET_MULTCOUNT:
683 case HDIO_SET_UNMASKINTR:
684 case HDIO_SET_KEEPSETTINGS:
685 case HDIO_SET_32BIT:
686 case HDIO_SET_NOWERR:
687 case HDIO_SET_DMA:
688 case HDIO_SET_PIO_MODE:
689 case HDIO_SET_NICE:
690 case HDIO_SET_WCACHE:
691 case HDIO_SET_ACOUSTIC:
692 case HDIO_SET_BUSSTATE:
693 case HDIO_SET_ADDRESS:
694 case CDROMEJECT_SW:
695 case CDROM_SET_OPTIONS:
696 case CDROM_CLEAR_OPTIONS:
697 case CDROM_SELECT_SPEED:
698 case CDROM_SELECT_DISC:
699 case CDROM_MEDIA_CHANGED:
700 case CDROM_DRIVE_STATUS:
701 case CDROM_LOCKDOOR:
702 case CDROM_DEBUG:
703 break;
704 default:
705 /* unknown ioctl number */
706 return -ENOIOCTLCMD;
707 }
708
709 if (disk->fops->unlocked_ioctl)
710 return disk->fops->unlocked_ioctl(file, cmd, arg);
711
712 if (disk->fops->ioctl) {
713 lock_kernel();
714 ret = disk->fops->ioctl(inode, file, cmd, arg);
715 unlock_kernel();
716 return ret;
717 }
718
719 return -ENOTTY;
720}
721
722static int compat_blkdev_locked_ioctl(struct inode *inode, struct file *file,
723 struct block_device *bdev,
724 unsigned cmd, unsigned long arg)
725{
726 struct backing_dev_info *bdi;
727
728 switch (cmd) {
729 case BLKRAGET:
730 case BLKFRAGET:
731 if (!arg)
732 return -EINVAL;
733 bdi = blk_get_backing_dev_info(bdev);
734 if (bdi == NULL)
735 return -ENOTTY;
736 return compat_put_long(arg,
737 (bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
738 case BLKROGET: /* compatible */
739 return compat_put_int(arg, bdev_read_only(bdev) != 0);
740 case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
741 return compat_put_int(arg, block_size(bdev));
742 case BLKSSZGET: /* get block device hardware sector size */
743 return compat_put_int(arg, bdev_hardsect_size(bdev));
744 case BLKSECTGET:
745 return compat_put_ushort(arg,
746 bdev_get_queue(bdev)->max_sectors);
747 case BLKRASET: /* compatible, but no compat_ptr (!) */
748 case BLKFRASET:
749 if (!capable(CAP_SYS_ADMIN))
750 return -EACCES;
751 bdi = blk_get_backing_dev_info(bdev);
752 if (bdi == NULL)
753 return -ENOTTY;
754 bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
755 return 0;
756 case BLKGETSIZE:
757 if ((bdev->bd_inode->i_size >> 9) > ~0UL)
758 return -EFBIG;
759 return compat_put_ulong(arg, bdev->bd_inode->i_size >> 9);
760
761 case BLKGETSIZE64_32:
762 return compat_put_u64(arg, bdev->bd_inode->i_size);
763
764 case BLKTRACESETUP32:
765 return compat_blk_trace_setup(bdev, compat_ptr(arg));
766 case BLKTRACESTART: /* compatible */
767 case BLKTRACESTOP: /* compatible */
768 case BLKTRACETEARDOWN: /* compatible */
769 return blk_trace_ioctl(bdev, cmd, compat_ptr(arg));
770 }
771 return -ENOIOCTLCMD;
772}
773
774/* Most of the generic ioctls are handled in the normal fallback path.
775 This assumes the blkdev's low level compat_ioctl always returns
776 ENOIOCTLCMD for unknown ioctls. */
777long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
778{
779 int ret = -ENOIOCTLCMD;
780 struct inode *inode = file->f_mapping->host;
781 struct block_device *bdev = inode->i_bdev;
782 struct gendisk *disk = bdev->bd_disk;
783
784 switch (cmd) {
785 case HDIO_GETGEO:
786 return compat_hdio_getgeo(disk, bdev, compat_ptr(arg));
787 case BLKFLSBUF:
788 case BLKROSET:
789 /*
790 * the ones below are implemented in blkdev_locked_ioctl,
791 * but we call blkdev_ioctl, which gets the lock for us
792 */
793 case BLKRRPART:
794 return blkdev_ioctl(inode, file, cmd,
795 (unsigned long)compat_ptr(arg));
796 case BLKBSZSET_32:
797 return blkdev_ioctl(inode, file, BLKBSZSET,
798 (unsigned long)compat_ptr(arg));
799 case BLKPG:
800 return compat_blkpg_ioctl(inode, file, cmd, compat_ptr(arg));
801 }
802
803 lock_kernel();
804 ret = compat_blkdev_locked_ioctl(inode, file, bdev, cmd, arg);
805 /* FIXME: why do we assume -> compat_ioctl needs the BKL? */
806 if (ret == -ENOIOCTLCMD && disk->fops->compat_ioctl)
807 ret = disk->fops->compat_ioctl(file, cmd, arg);
808 unlock_kernel();
809
810 if (ret != -ENOIOCTLCMD)
811 return ret;
812
813 return compat_blkdev_driver_ioctl(inode, file, disk, cmd, arg);
814}
diff --git a/block/ioctl.c b/block/ioctl.c
index f7e3e8abf887..52d6385216ad 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -217,6 +217,10 @@ int blkdev_driver_ioctl(struct inode *inode, struct file *file,
217} 217}
218EXPORT_SYMBOL_GPL(blkdev_driver_ioctl); 218EXPORT_SYMBOL_GPL(blkdev_driver_ioctl);
219 219
220/*
221 * always keep this in sync with compat_blkdev_ioctl() and
222 * compat_blkdev_locked_ioctl()
223 */
220int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd, 224int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
221 unsigned long arg) 225 unsigned long arg)
222{ 226{
@@ -284,21 +288,4 @@ int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
284 288
285 return blkdev_driver_ioctl(inode, file, disk, cmd, arg); 289 return blkdev_driver_ioctl(inode, file, disk, cmd, arg);
286} 290}
287
288/* Most of the generic ioctls are handled in the normal fallback path.
289 This assumes the blkdev's low level compat_ioctl always returns
290 ENOIOCTLCMD for unknown ioctls. */
291long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
292{
293 struct block_device *bdev = file->f_path.dentry->d_inode->i_bdev;
294 struct gendisk *disk = bdev->bd_disk;
295 int ret = -ENOIOCTLCMD;
296 if (disk->fops->compat_ioctl) {
297 lock_kernel();
298 ret = disk->fops->compat_ioctl(file, cmd, arg);
299 unlock_kernel();
300 }
301 return ret;
302}
303
304EXPORT_SYMBOL_GPL(blkdev_ioctl); 291EXPORT_SYMBOL_GPL(blkdev_ioctl);
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index ed39313c4085..cd9d2c5d91ae 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -42,6 +42,9 @@ static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
42static void init_request_from_bio(struct request *req, struct bio *bio); 42static void init_request_from_bio(struct request *req, struct bio *bio);
43static int __make_request(struct request_queue *q, struct bio *bio); 43static int __make_request(struct request_queue *q, struct bio *bio);
44static struct io_context *current_io_context(gfp_t gfp_flags, int node); 44static struct io_context *current_io_context(gfp_t gfp_flags, int node);
45static void blk_recalc_rq_segments(struct request *rq);
46static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
47 struct bio *bio);
45 48
46/* 49/*
47 * For the allocated request tables 50 * For the allocated request tables
@@ -428,7 +431,6 @@ static void queue_flush(struct request_queue *q, unsigned which)
428static inline struct request *start_ordered(struct request_queue *q, 431static inline struct request *start_ordered(struct request_queue *q,
429 struct request *rq) 432 struct request *rq)
430{ 433{
431 q->bi_size = 0;
432 q->orderr = 0; 434 q->orderr = 0;
433 q->ordered = q->next_ordered; 435 q->ordered = q->next_ordered;
434 q->ordseq |= QUEUE_ORDSEQ_STARTED; 436 q->ordseq |= QUEUE_ORDSEQ_STARTED;
@@ -525,56 +527,36 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
525 return 1; 527 return 1;
526} 528}
527 529
528static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error) 530static void req_bio_endio(struct request *rq, struct bio *bio,
529{ 531 unsigned int nbytes, int error)
530 struct request_queue *q = bio->bi_private;
531
532 /*
533 * This is dry run, restore bio_sector and size. We'll finish
534 * this request again with the original bi_end_io after an
535 * error occurs or post flush is complete.
536 */
537 q->bi_size += bytes;
538
539 if (bio->bi_size)
540 return 1;
541
542 /* Reset bio */
543 set_bit(BIO_UPTODATE, &bio->bi_flags);
544 bio->bi_size = q->bi_size;
545 bio->bi_sector -= (q->bi_size >> 9);
546 q->bi_size = 0;
547
548 return 0;
549}
550
551static int ordered_bio_endio(struct request *rq, struct bio *bio,
552 unsigned int nbytes, int error)
553{ 532{
554 struct request_queue *q = rq->q; 533 struct request_queue *q = rq->q;
555 bio_end_io_t *endio;
556 void *private;
557 534
558 if (&q->bar_rq != rq) 535 if (&q->bar_rq != rq) {
559 return 0; 536 if (error)
560 537 clear_bit(BIO_UPTODATE, &bio->bi_flags);
561 /* 538 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
562 * Okay, this is the barrier request in progress, dry finish it. 539 error = -EIO;
563 */
564 if (error && !q->orderr)
565 q->orderr = error;
566 540
567 endio = bio->bi_end_io; 541 if (unlikely(nbytes > bio->bi_size)) {
568 private = bio->bi_private; 542 printk("%s: want %u bytes done, only %u left\n",
569 bio->bi_end_io = flush_dry_bio_endio; 543 __FUNCTION__, nbytes, bio->bi_size);
570 bio->bi_private = q; 544 nbytes = bio->bi_size;
571 545 }
572 bio_endio(bio, nbytes, error);
573 546
574 bio->bi_end_io = endio; 547 bio->bi_size -= nbytes;
575 bio->bi_private = private; 548 bio->bi_sector += (nbytes >> 9);
549 if (bio->bi_size == 0)
550 bio_endio(bio, error);
551 } else {
576 552
577 return 1; 553 /*
554 * Okay, this is the barrier request in progress, just
555 * record the error;
556 */
557 if (error && !q->orderr)
558 q->orderr = error;
559 }
578} 560}
579 561
580/** 562/**
@@ -1220,16 +1202,40 @@ EXPORT_SYMBOL(blk_dump_rq_flags);
1220 1202
1221void blk_recount_segments(struct request_queue *q, struct bio *bio) 1203void blk_recount_segments(struct request_queue *q, struct bio *bio)
1222{ 1204{
1205 struct request rq;
1206 struct bio *nxt = bio->bi_next;
1207 rq.q = q;
1208 rq.bio = rq.biotail = bio;
1209 bio->bi_next = NULL;
1210 blk_recalc_rq_segments(&rq);
1211 bio->bi_next = nxt;
1212 bio->bi_phys_segments = rq.nr_phys_segments;
1213 bio->bi_hw_segments = rq.nr_hw_segments;
1214 bio->bi_flags |= (1 << BIO_SEG_VALID);
1215}
1216EXPORT_SYMBOL(blk_recount_segments);
1217
1218static void blk_recalc_rq_segments(struct request *rq)
1219{
1220 int nr_phys_segs;
1221 int nr_hw_segs;
1222 unsigned int phys_size;
1223 unsigned int hw_size;
1223 struct bio_vec *bv, *bvprv = NULL; 1224 struct bio_vec *bv, *bvprv = NULL;
1224 int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster; 1225 int seg_size;
1226 int hw_seg_size;
1227 int cluster;
1228 struct req_iterator iter;
1225 int high, highprv = 1; 1229 int high, highprv = 1;
1230 struct request_queue *q = rq->q;
1226 1231
1227 if (unlikely(!bio->bi_io_vec)) 1232 if (!rq->bio)
1228 return; 1233 return;
1229 1234
1230 cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER); 1235 cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
1231 hw_seg_size = seg_size = nr_phys_segs = nr_hw_segs = 0; 1236 hw_seg_size = seg_size = 0;
1232 bio_for_each_segment(bv, bio, i) { 1237 phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
1238 rq_for_each_segment(bv, rq, iter) {
1233 /* 1239 /*
1234 * the trick here is making sure that a high page is never 1240 * the trick here is making sure that a high page is never
1235 * considered part of another segment, since that might 1241 * considered part of another segment, since that might
@@ -1255,12 +1261,13 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio)
1255 } 1261 }
1256new_segment: 1262new_segment:
1257 if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) && 1263 if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) &&
1258 !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) { 1264 !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
1259 hw_seg_size += bv->bv_len; 1265 hw_seg_size += bv->bv_len;
1260 } else { 1266 else {
1261new_hw_segment: 1267new_hw_segment:
1262 if (hw_seg_size > bio->bi_hw_front_size) 1268 if (nr_hw_segs == 1 &&
1263 bio->bi_hw_front_size = hw_seg_size; 1269 hw_seg_size > rq->bio->bi_hw_front_size)
1270 rq->bio->bi_hw_front_size = hw_seg_size;
1264 hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len; 1271 hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;
1265 nr_hw_segs++; 1272 nr_hw_segs++;
1266 } 1273 }
@@ -1270,15 +1277,15 @@ new_hw_segment:
1270 seg_size = bv->bv_len; 1277 seg_size = bv->bv_len;
1271 highprv = high; 1278 highprv = high;
1272 } 1279 }
1273 if (hw_seg_size > bio->bi_hw_back_size) 1280
1274 bio->bi_hw_back_size = hw_seg_size; 1281 if (nr_hw_segs == 1 &&
1275 if (nr_hw_segs == 1 && hw_seg_size > bio->bi_hw_front_size) 1282 hw_seg_size > rq->bio->bi_hw_front_size)
1276 bio->bi_hw_front_size = hw_seg_size; 1283 rq->bio->bi_hw_front_size = hw_seg_size;
1277 bio->bi_phys_segments = nr_phys_segs; 1284 if (hw_seg_size > rq->biotail->bi_hw_back_size)
1278 bio->bi_hw_segments = nr_hw_segs; 1285 rq->biotail->bi_hw_back_size = hw_seg_size;
1279 bio->bi_flags |= (1 << BIO_SEG_VALID); 1286 rq->nr_phys_segments = nr_phys_segs;
1287 rq->nr_hw_segments = nr_hw_segs;
1280} 1288}
1281EXPORT_SYMBOL(blk_recount_segments);
1282 1289
1283static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, 1290static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
1284 struct bio *nxt) 1291 struct bio *nxt)
@@ -1325,8 +1332,8 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1325 struct scatterlist *sg) 1332 struct scatterlist *sg)
1326{ 1333{
1327 struct bio_vec *bvec, *bvprv; 1334 struct bio_vec *bvec, *bvprv;
1328 struct bio *bio; 1335 struct req_iterator iter;
1329 int nsegs, i, cluster; 1336 int nsegs, cluster;
1330 1337
1331 nsegs = 0; 1338 nsegs = 0;
1332 cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER); 1339 cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
@@ -1335,35 +1342,30 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1335 * for each bio in rq 1342 * for each bio in rq
1336 */ 1343 */
1337 bvprv = NULL; 1344 bvprv = NULL;
1338 rq_for_each_bio(bio, rq) { 1345 rq_for_each_segment(bvec, rq, iter) {
1339 /* 1346 int nbytes = bvec->bv_len;
1340 * for each segment in bio
1341 */
1342 bio_for_each_segment(bvec, bio, i) {
1343 int nbytes = bvec->bv_len;
1344 1347
1345 if (bvprv && cluster) { 1348 if (bvprv && cluster) {
1346 if (sg[nsegs - 1].length + nbytes > q->max_segment_size) 1349 if (sg[nsegs - 1].length + nbytes > q->max_segment_size)
1347 goto new_segment; 1350 goto new_segment;
1348 1351
1349 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) 1352 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
1350 goto new_segment; 1353 goto new_segment;
1351 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) 1354 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
1352 goto new_segment; 1355 goto new_segment;
1353 1356
1354 sg[nsegs - 1].length += nbytes; 1357 sg[nsegs - 1].length += nbytes;
1355 } else { 1358 } else {
1356new_segment: 1359new_segment:
1357 memset(&sg[nsegs],0,sizeof(struct scatterlist)); 1360 memset(&sg[nsegs],0,sizeof(struct scatterlist));
1358 sg[nsegs].page = bvec->bv_page; 1361 sg[nsegs].page = bvec->bv_page;
1359 sg[nsegs].length = nbytes; 1362 sg[nsegs].length = nbytes;
1360 sg[nsegs].offset = bvec->bv_offset; 1363 sg[nsegs].offset = bvec->bv_offset;
1361 1364
1362 nsegs++; 1365 nsegs++;
1363 } 1366 }
1364 bvprv = bvec; 1367 bvprv = bvec;
1365 } /* segments in bio */ 1368 } /* segments in rq */
1366 } /* bios in rq */
1367 1369
1368 return nsegs; 1370 return nsegs;
1369} 1371}
@@ -1420,7 +1422,8 @@ static inline int ll_new_hw_segment(struct request_queue *q,
1420 return 1; 1422 return 1;
1421} 1423}
1422 1424
1423int ll_back_merge_fn(struct request_queue *q, struct request *req, struct bio *bio) 1425static int ll_back_merge_fn(struct request_queue *q, struct request *req,
1426 struct bio *bio)
1424{ 1427{
1425 unsigned short max_sectors; 1428 unsigned short max_sectors;
1426 int len; 1429 int len;
@@ -1456,7 +1459,6 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, struct bio *b
1456 1459
1457 return ll_new_hw_segment(q, req, bio); 1460 return ll_new_hw_segment(q, req, bio);
1458} 1461}
1459EXPORT_SYMBOL(ll_back_merge_fn);
1460 1462
1461static int ll_front_merge_fn(struct request_queue *q, struct request *req, 1463static int ll_front_merge_fn(struct request_queue *q, struct request *req,
1462 struct bio *bio) 1464 struct bio *bio)
@@ -2346,6 +2348,23 @@ static int __blk_rq_unmap_user(struct bio *bio)
2346 return ret; 2348 return ret;
2347} 2349}
2348 2350
2351int blk_rq_append_bio(struct request_queue *q, struct request *rq,
2352 struct bio *bio)
2353{
2354 if (!rq->bio)
2355 blk_rq_bio_prep(q, rq, bio);
2356 else if (!ll_back_merge_fn(q, rq, bio))
2357 return -EINVAL;
2358 else {
2359 rq->biotail->bi_next = bio;
2360 rq->biotail = bio;
2361
2362 rq->data_len += bio->bi_size;
2363 }
2364 return 0;
2365}
2366EXPORT_SYMBOL(blk_rq_append_bio);
2367
2349static int __blk_rq_map_user(struct request_queue *q, struct request *rq, 2368static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
2350 void __user *ubuf, unsigned int len) 2369 void __user *ubuf, unsigned int len)
2351{ 2370{
@@ -2377,23 +2396,12 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
2377 */ 2396 */
2378 bio_get(bio); 2397 bio_get(bio);
2379 2398
2380 if (!rq->bio) 2399 ret = blk_rq_append_bio(q, rq, bio);
2381 blk_rq_bio_prep(q, rq, bio); 2400 if (!ret)
2382 else if (!ll_back_merge_fn(q, rq, bio)) { 2401 return bio->bi_size;
2383 ret = -EINVAL;
2384 goto unmap_bio;
2385 } else {
2386 rq->biotail->bi_next = bio;
2387 rq->biotail = bio;
2388
2389 rq->data_len += bio->bi_size;
2390 }
2391
2392 return bio->bi_size;
2393 2402
2394unmap_bio:
2395 /* if it was boucned we must call the end io function */ 2403 /* if it was boucned we must call the end io function */
2396 bio_endio(bio, bio->bi_size, 0); 2404 bio_endio(bio, 0);
2397 __blk_rq_unmap_user(orig_bio); 2405 __blk_rq_unmap_user(orig_bio);
2398 bio_put(bio); 2406 bio_put(bio);
2399 return ret; 2407 return ret;
@@ -2502,7 +2510,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
2502 return PTR_ERR(bio); 2510 return PTR_ERR(bio);
2503 2511
2504 if (bio->bi_size != len) { 2512 if (bio->bi_size != len) {
2505 bio_endio(bio, bio->bi_size, 0); 2513 bio_endio(bio, 0);
2506 bio_unmap_user(bio); 2514 bio_unmap_user(bio);
2507 return -EINVAL; 2515 return -EINVAL;
2508 } 2516 }
@@ -2912,15 +2920,9 @@ static void init_request_from_bio(struct request *req, struct bio *bio)
2912 2920
2913 req->errors = 0; 2921 req->errors = 0;
2914 req->hard_sector = req->sector = bio->bi_sector; 2922 req->hard_sector = req->sector = bio->bi_sector;
2915 req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio);
2916 req->current_nr_sectors = req->hard_cur_sectors = bio_cur_sectors(bio);
2917 req->nr_phys_segments = bio_phys_segments(req->q, bio);
2918 req->nr_hw_segments = bio_hw_segments(req->q, bio);
2919 req->buffer = bio_data(bio); /* see ->buffer comment above */
2920 req->bio = req->biotail = bio;
2921 req->ioprio = bio_prio(bio); 2923 req->ioprio = bio_prio(bio);
2922 req->rq_disk = bio->bi_bdev->bd_disk;
2923 req->start_time = jiffies; 2924 req->start_time = jiffies;
2925 blk_rq_bio_prep(req->q, req, bio);
2924} 2926}
2925 2927
2926static int __make_request(struct request_queue *q, struct bio *bio) 2928static int __make_request(struct request_queue *q, struct bio *bio)
@@ -3038,7 +3040,7 @@ out:
3038 return 0; 3040 return 0;
3039 3041
3040end_io: 3042end_io:
3041 bio_endio(bio, nr_sectors << 9, err); 3043 bio_endio(bio, err);
3042 return 0; 3044 return 0;
3043} 3045}
3044 3046
@@ -3185,7 +3187,7 @@ static inline void __generic_make_request(struct bio *bio)
3185 bdevname(bio->bi_bdev, b), 3187 bdevname(bio->bi_bdev, b),
3186 (long long) bio->bi_sector); 3188 (long long) bio->bi_sector);
3187end_io: 3189end_io:
3188 bio_endio(bio, bio->bi_size, -EIO); 3190 bio_endio(bio, -EIO);
3189 break; 3191 break;
3190 } 3192 }
3191 3193
@@ -3329,48 +3331,6 @@ void submit_bio(int rw, struct bio *bio)
3329 3331
3330EXPORT_SYMBOL(submit_bio); 3332EXPORT_SYMBOL(submit_bio);
3331 3333
3332static void blk_recalc_rq_segments(struct request *rq)
3333{
3334 struct bio *bio, *prevbio = NULL;
3335 int nr_phys_segs, nr_hw_segs;
3336 unsigned int phys_size, hw_size;
3337 struct request_queue *q = rq->q;
3338
3339 if (!rq->bio)
3340 return;
3341
3342 phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
3343 rq_for_each_bio(bio, rq) {
3344 /* Force bio hw/phys segs to be recalculated. */
3345 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
3346
3347 nr_phys_segs += bio_phys_segments(q, bio);
3348 nr_hw_segs += bio_hw_segments(q, bio);
3349 if (prevbio) {
3350 int pseg = phys_size + prevbio->bi_size + bio->bi_size;
3351 int hseg = hw_size + prevbio->bi_size + bio->bi_size;
3352
3353 if (blk_phys_contig_segment(q, prevbio, bio) &&
3354 pseg <= q->max_segment_size) {
3355 nr_phys_segs--;
3356 phys_size += prevbio->bi_size + bio->bi_size;
3357 } else
3358 phys_size = 0;
3359
3360 if (blk_hw_contig_segment(q, prevbio, bio) &&
3361 hseg <= q->max_segment_size) {
3362 nr_hw_segs--;
3363 hw_size += prevbio->bi_size + bio->bi_size;
3364 } else
3365 hw_size = 0;
3366 }
3367 prevbio = bio;
3368 }
3369
3370 rq->nr_phys_segments = nr_phys_segs;
3371 rq->nr_hw_segments = nr_hw_segs;
3372}
3373
3374static void blk_recalc_rq_sectors(struct request *rq, int nsect) 3334static void blk_recalc_rq_sectors(struct request *rq, int nsect)
3375{ 3335{
3376 if (blk_fs_request(rq)) { 3336 if (blk_fs_request(rq)) {
@@ -3442,8 +3402,7 @@ static int __end_that_request_first(struct request *req, int uptodate,
3442 if (nr_bytes >= bio->bi_size) { 3402 if (nr_bytes >= bio->bi_size) {
3443 req->bio = bio->bi_next; 3403 req->bio = bio->bi_next;
3444 nbytes = bio->bi_size; 3404 nbytes = bio->bi_size;
3445 if (!ordered_bio_endio(req, bio, nbytes, error)) 3405 req_bio_endio(req, bio, nbytes, error);
3446 bio_endio(bio, nbytes, error);
3447 next_idx = 0; 3406 next_idx = 0;
3448 bio_nbytes = 0; 3407 bio_nbytes = 0;
3449 } else { 3408 } else {
@@ -3498,8 +3457,7 @@ static int __end_that_request_first(struct request *req, int uptodate,
3498 * if the request wasn't completed, update state 3457 * if the request wasn't completed, update state
3499 */ 3458 */
3500 if (bio_nbytes) { 3459 if (bio_nbytes) {
3501 if (!ordered_bio_endio(req, bio, bio_nbytes, error)) 3460 req_bio_endio(req, bio, bio_nbytes, error);
3502 bio_endio(bio, bio_nbytes, error);
3503 bio->bi_idx += next_idx; 3461 bio->bi_idx += next_idx;
3504 bio_iovec(bio)->bv_offset += nr_bytes; 3462 bio_iovec(bio)->bv_offset += nr_bytes;
3505 bio_iovec(bio)->bv_len -= nr_bytes; 3463 bio_iovec(bio)->bv_len -= nr_bytes;
@@ -3574,7 +3532,7 @@ static void blk_done_softirq(struct softirq_action *h)
3574 } 3532 }
3575} 3533}
3576 3534
3577static int blk_cpu_notify(struct notifier_block *self, unsigned long action, 3535static int __cpuinit blk_cpu_notify(struct notifier_block *self, unsigned long action,
3578 void *hcpu) 3536 void *hcpu)
3579{ 3537{
3580 /* 3538 /*
@@ -3595,7 +3553,7 @@ static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
3595} 3553}
3596 3554
3597 3555
3598static struct notifier_block __devinitdata blk_cpu_notifier = { 3556static struct notifier_block blk_cpu_notifier __cpuinitdata = {
3599 .notifier_call = blk_cpu_notify, 3557 .notifier_call = blk_cpu_notify,
3600}; 3558};
3601 3559
@@ -3680,8 +3638,8 @@ void end_request(struct request *req, int uptodate)
3680 3638
3681EXPORT_SYMBOL(end_request); 3639EXPORT_SYMBOL(end_request);
3682 3640
3683void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 3641static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
3684 struct bio *bio) 3642 struct bio *bio)
3685{ 3643{
3686 /* first two bits are identical in rq->cmd_flags and bio->bi_rw */ 3644 /* first two bits are identical in rq->cmd_flags and bio->bi_rw */
3687 rq->cmd_flags |= (bio->bi_rw & 3); 3645 rq->cmd_flags |= (bio->bi_rw & 3);
@@ -3695,9 +3653,10 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
3695 rq->data_len = bio->bi_size; 3653 rq->data_len = bio->bi_size;
3696 3654
3697 rq->bio = rq->biotail = bio; 3655 rq->bio = rq->biotail = bio;
3698}
3699 3656
3700EXPORT_SYMBOL(blk_rq_bio_prep); 3657 if (bio->bi_bdev)
3658 rq->rq_disk = bio->bi_bdev->bd_disk;
3659}
3701 3660
3702int kblockd_schedule_work(struct work_struct *work) 3661int kblockd_schedule_work(struct work_struct *work)
3703{ 3662{
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 007faaf008e7..b1d00ef6659c 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -138,7 +138,7 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
138 buf = mempool_alloc(d->bufpool, GFP_NOIO); 138 buf = mempool_alloc(d->bufpool, GFP_NOIO);
139 if (buf == NULL) { 139 if (buf == NULL) {
140 printk(KERN_INFO "aoe: buf allocation failure\n"); 140 printk(KERN_INFO "aoe: buf allocation failure\n");
141 bio_endio(bio, bio->bi_size, -ENOMEM); 141 bio_endio(bio, -ENOMEM);
142 return 0; 142 return 0;
143 } 143 }
144 memset(buf, 0, sizeof(*buf)); 144 memset(buf, 0, sizeof(*buf));
@@ -159,7 +159,7 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
159 d->aoemajor, d->aoeminor); 159 d->aoemajor, d->aoeminor);
160 spin_unlock_irqrestore(&d->lock, flags); 160 spin_unlock_irqrestore(&d->lock, flags);
161 mempool_free(buf, d->bufpool); 161 mempool_free(buf, d->bufpool);
162 bio_endio(bio, bio->bi_size, -ENXIO); 162 bio_endio(bio, -ENXIO);
163 return 0; 163 return 0;
164 } 164 }
165 165
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 01fbdd38e3be..5abae34ad65b 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -652,7 +652,7 @@ aoecmd_ata_rsp(struct sk_buff *skb)
652 disk_stat_add(disk, sectors[rw], n_sect); 652 disk_stat_add(disk, sectors[rw], n_sect);
653 disk_stat_add(disk, io_ticks, duration); 653 disk_stat_add(disk, io_ticks, duration);
654 n = (buf->flags & BUFFL_FAIL) ? -EIO : 0; 654 n = (buf->flags & BUFFL_FAIL) ? -EIO : 0;
655 bio_endio(buf->bio, buf->bio->bi_size, n); 655 bio_endio(buf->bio, n);
656 mempool_free(buf, d->bufpool); 656 mempool_free(buf, d->bufpool);
657 } 657 }
658 } 658 }
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 05a97197c918..51f50710e5fc 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -119,7 +119,7 @@ aoedev_downdev(struct aoedev *d)
119 bio = buf->bio; 119 bio = buf->bio;
120 if (--buf->nframesout == 0) { 120 if (--buf->nframesout == 0) {
121 mempool_free(buf, d->bufpool); 121 mempool_free(buf, d->bufpool);
122 bio_endio(bio, bio->bi_size, -EIO); 122 bio_endio(bio, -EIO);
123 } 123 }
124 skb_shinfo(f->skb)->nr_frags = f->skb->data_len = 0; 124 skb_shinfo(f->skb)->nr_frags = f->skb->data_len = 0;
125 } 125 }
@@ -130,7 +130,7 @@ aoedev_downdev(struct aoedev *d)
130 list_del(d->bufq.next); 130 list_del(d->bufq.next);
131 bio = buf->bio; 131 bio = buf->bio;
132 mempool_free(buf, d->bufpool); 132 mempool_free(buf, d->bufpool);
133 bio_endio(bio, bio->bi_size, -EIO); 133 bio_endio(bio, -EIO);
134 } 134 }
135 135
136 if (d->gd) 136 if (d->gd)
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 084358a828e9..28d145756f6c 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1194,7 +1194,7 @@ static inline void complete_buffers(struct bio *bio, int status)
1194 int nr_sectors = bio_sectors(bio); 1194 int nr_sectors = bio_sectors(bio);
1195 1195
1196 bio->bi_next = NULL; 1196 bio->bi_next = NULL;
1197 bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO); 1197 bio_endio(bio, status ? 0 : -EIO);
1198 bio = xbh; 1198 bio = xbh;
1199 } 1199 }
1200} 1200}
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index eb9799acf65b..3853c9a38d6a 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -987,7 +987,7 @@ static inline void complete_buffers(struct bio *bio, int ok)
987 xbh = bio->bi_next; 987 xbh = bio->bi_next;
988 bio->bi_next = NULL; 988 bio->bi_next = NULL;
989 989
990 bio_endio(bio, nr_sectors << 9, ok ? 0 : -EIO); 990 bio_endio(bio, ok ? 0 : -EIO);
991 991
992 bio = xbh; 992 bio = xbh;
993 } 993 }
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 085b7794fb3e..80483aac4cc9 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2437,22 +2437,19 @@ static void rw_interrupt(void)
2437/* Compute maximal contiguous buffer size. */ 2437/* Compute maximal contiguous buffer size. */
2438static int buffer_chain_size(void) 2438static int buffer_chain_size(void)
2439{ 2439{
2440 struct bio *bio;
2441 struct bio_vec *bv; 2440 struct bio_vec *bv;
2442 int size, i; 2441 int size;
2442 struct req_iterator iter;
2443 char *base; 2443 char *base;
2444 2444
2445 base = bio_data(current_req->bio); 2445 base = bio_data(current_req->bio);
2446 size = 0; 2446 size = 0;
2447 2447
2448 rq_for_each_bio(bio, current_req) { 2448 rq_for_each_segment(bv, current_req, iter) {
2449 bio_for_each_segment(bv, bio, i) { 2449 if (page_address(bv->bv_page) + bv->bv_offset != base + size)
2450 if (page_address(bv->bv_page) + bv->bv_offset != 2450 break;
2451 base + size)
2452 break;
2453 2451
2454 size += bv->bv_len; 2452 size += bv->bv_len;
2455 }
2456 } 2453 }
2457 2454
2458 return size >> 9; 2455 return size >> 9;
@@ -2479,9 +2476,9 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
2479{ 2476{
2480 int remaining; /* number of transferred 512-byte sectors */ 2477 int remaining; /* number of transferred 512-byte sectors */
2481 struct bio_vec *bv; 2478 struct bio_vec *bv;
2482 struct bio *bio;
2483 char *buffer, *dma_buffer; 2479 char *buffer, *dma_buffer;
2484 int size, i; 2480 int size;
2481 struct req_iterator iter;
2485 2482
2486 max_sector = transfer_size(ssize, 2483 max_sector = transfer_size(ssize,
2487 min(max_sector, max_sector_2), 2484 min(max_sector, max_sector_2),
@@ -2514,43 +2511,41 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
2514 2511
2515 size = current_req->current_nr_sectors << 9; 2512 size = current_req->current_nr_sectors << 9;
2516 2513
2517 rq_for_each_bio(bio, current_req) { 2514 rq_for_each_segment(bv, current_req, iter) {
2518 bio_for_each_segment(bv, bio, i) { 2515 if (!remaining)
2519 if (!remaining) 2516 break;
2520 break;
2521 2517
2522 size = bv->bv_len; 2518 size = bv->bv_len;
2523 SUPBOUND(size, remaining); 2519 SUPBOUND(size, remaining);
2524 2520
2525 buffer = page_address(bv->bv_page) + bv->bv_offset; 2521 buffer = page_address(bv->bv_page) + bv->bv_offset;
2526#ifdef FLOPPY_SANITY_CHECK 2522#ifdef FLOPPY_SANITY_CHECK
2527 if (dma_buffer + size > 2523 if (dma_buffer + size >
2528 floppy_track_buffer + (max_buffer_sectors << 10) || 2524 floppy_track_buffer + (max_buffer_sectors << 10) ||
2529 dma_buffer < floppy_track_buffer) { 2525 dma_buffer < floppy_track_buffer) {
2530 DPRINT("buffer overrun in copy buffer %d\n", 2526 DPRINT("buffer overrun in copy buffer %d\n",
2531 (int)((floppy_track_buffer - 2527 (int)((floppy_track_buffer -
2532 dma_buffer) >> 9)); 2528 dma_buffer) >> 9));
2533 printk("fsector_t=%d buffer_min=%d\n", 2529 printk("fsector_t=%d buffer_min=%d\n",
2534 fsector_t, buffer_min); 2530 fsector_t, buffer_min);
2535 printk("current_count_sectors=%ld\n", 2531 printk("current_count_sectors=%ld\n",
2536 current_count_sectors); 2532 current_count_sectors);
2537 if (CT(COMMAND) == FD_READ)
2538 printk("read\n");
2539 if (CT(COMMAND) == FD_WRITE)
2540 printk("write\n");
2541 break;
2542 }
2543 if (((unsigned long)buffer) % 512)
2544 DPRINT("%p buffer not aligned\n", buffer);
2545#endif
2546 if (CT(COMMAND) == FD_READ) 2533 if (CT(COMMAND) == FD_READ)
2547 memcpy(buffer, dma_buffer, size); 2534 printk("read\n");
2548 else 2535 if (CT(COMMAND) == FD_WRITE)
2549 memcpy(dma_buffer, buffer, size); 2536 printk("write\n");
2550 2537 break;
2551 remaining -= size;
2552 dma_buffer += size;
2553 } 2538 }
2539 if (((unsigned long)buffer) % 512)
2540 DPRINT("%p buffer not aligned\n", buffer);
2541#endif
2542 if (CT(COMMAND) == FD_READ)
2543 memcpy(buffer, dma_buffer, size);
2544 else
2545 memcpy(dma_buffer, buffer, size);
2546
2547 remaining -= size;
2548 dma_buffer += size;
2554 } 2549 }
2555#ifdef FLOPPY_SANITY_CHECK 2550#ifdef FLOPPY_SANITY_CHECK
2556 if (remaining) { 2551 if (remaining) {
@@ -3815,14 +3810,10 @@ static int check_floppy_change(struct gendisk *disk)
3815 * a disk in the drive, and whether that disk is writable. 3810 * a disk in the drive, and whether that disk is writable.
3816 */ 3811 */
3817 3812
3818static int floppy_rb0_complete(struct bio *bio, unsigned int bytes_done, 3813static void floppy_rb0_complete(struct bio *bio,
3819 int err) 3814 int err)
3820{ 3815{
3821 if (bio->bi_size)
3822 return 1;
3823
3824 complete((struct completion *)bio->bi_private); 3816 complete((struct completion *)bio->bi_private);
3825 return 0;
3826} 3817}
3827 3818
3828static int __floppy_read_block_0(struct block_device *bdev) 3819static int __floppy_read_block_0(struct block_device *bdev)
diff --git a/drivers/block/lguest_blk.c b/drivers/block/lguest_blk.c
index 160cf14431ac..fa8e42341b87 100644
--- a/drivers/block/lguest_blk.c
+++ b/drivers/block/lguest_blk.c
@@ -142,25 +142,23 @@ static irqreturn_t lgb_irq(int irq, void *_bd)
142 * return the total length. */ 142 * return the total length. */
143static unsigned int req_to_dma(struct request *req, struct lguest_dma *dma) 143static unsigned int req_to_dma(struct request *req, struct lguest_dma *dma)
144{ 144{
145 unsigned int i = 0, idx, len = 0; 145 unsigned int i = 0, len = 0;
146 struct bio *bio; 146 struct req_iterator iter;
147 147 struct bio_vec *bvec;
148 rq_for_each_bio(bio, req) { 148
149 struct bio_vec *bvec; 149 rq_for_each_segment(bvec, req, iter) {
150 bio_for_each_segment(bvec, bio, idx) { 150 /* We told the block layer not to give us too many. */
151 /* We told the block layer not to give us too many. */ 151 BUG_ON(i == LGUEST_MAX_DMA_SECTIONS);
152 BUG_ON(i == LGUEST_MAX_DMA_SECTIONS); 152 /* If we had a zero-length segment, it would look like
153 /* If we had a zero-length segment, it would look like 153 * the end of the data referred to by the "struct
154 * the end of the data referred to by the "struct 154 * lguest_dma", so make sure that doesn't happen. */
155 * lguest_dma", so make sure that doesn't happen. */ 155 BUG_ON(!bvec->bv_len);
156 BUG_ON(!bvec->bv_len); 156 /* Convert page & offset to a physical address */
157 /* Convert page & offset to a physical address */ 157 dma->addr[i] = page_to_phys(bvec->bv_page)
158 dma->addr[i] = page_to_phys(bvec->bv_page) 158 + bvec->bv_offset;
159 + bvec->bv_offset; 159 dma->len[i] = bvec->bv_len;
160 dma->len[i] = bvec->bv_len; 160 len += bvec->bv_len;
161 len += bvec->bv_len; 161 i++;
162 i++;
163 }
164 } 162 }
165 /* If the array isn't full, we mark the end with a 0 length */ 163 /* If the array isn't full, we mark the end with a 0 length */
166 if (i < LGUEST_MAX_DMA_SECTIONS) 164 if (i < LGUEST_MAX_DMA_SECTIONS)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 9f015fce4135..b9233a06934c 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -551,7 +551,7 @@ static int loop_make_request(struct request_queue *q, struct bio *old_bio)
551 551
552out: 552out:
553 spin_unlock_irq(&lo->lo_lock); 553 spin_unlock_irq(&lo->lo_lock);
554 bio_io_error(old_bio, old_bio->bi_size); 554 bio_io_error(old_bio);
555 return 0; 555 return 0;
556} 556}
557 557
@@ -580,7 +580,7 @@ static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio)
580 bio_put(bio); 580 bio_put(bio);
581 } else { 581 } else {
582 int ret = do_bio_filebacked(lo, bio); 582 int ret = do_bio_filebacked(lo, bio);
583 bio_endio(bio, bio->bi_size, ret); 583 bio_endio(bio, ret);
584 } 584 }
585} 585}
586 586
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index be92c658f06e..be5ec3a9b1fc 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -180,7 +180,7 @@ static inline int sock_send_bvec(struct socket *sock, struct bio_vec *bvec,
180 180
181static int nbd_send_req(struct nbd_device *lo, struct request *req) 181static int nbd_send_req(struct nbd_device *lo, struct request *req)
182{ 182{
183 int result, i, flags; 183 int result, flags;
184 struct nbd_request request; 184 struct nbd_request request;
185 unsigned long size = req->nr_sectors << 9; 185 unsigned long size = req->nr_sectors << 9;
186 struct socket *sock = lo->sock; 186 struct socket *sock = lo->sock;
@@ -205,27 +205,23 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
205 } 205 }
206 206
207 if (nbd_cmd(req) == NBD_CMD_WRITE) { 207 if (nbd_cmd(req) == NBD_CMD_WRITE) {
208 struct bio *bio; 208 struct req_iterator iter;
209 struct bio_vec *bvec;
209 /* 210 /*
210 * we are really probing at internals to determine 211 * we are really probing at internals to determine
211 * whether to set MSG_MORE or not... 212 * whether to set MSG_MORE or not...
212 */ 213 */
213 rq_for_each_bio(bio, req) { 214 rq_for_each_segment(bvec, req, iter) {
214 struct bio_vec *bvec; 215 flags = 0;
215 bio_for_each_segment(bvec, bio, i) { 216 if (!rq_iter_last(req, iter))
216 flags = 0; 217 flags = MSG_MORE;
217 if ((i < (bio->bi_vcnt - 1)) || bio->bi_next) 218 dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
218 flags = MSG_MORE; 219 lo->disk->disk_name, req, bvec->bv_len);
219 dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n", 220 result = sock_send_bvec(sock, bvec, flags);
220 lo->disk->disk_name, req, 221 if (result <= 0) {
221 bvec->bv_len); 222 printk(KERN_ERR "%s: Send data failed (result %d)\n",
222 result = sock_send_bvec(sock, bvec, flags); 223 lo->disk->disk_name, result);
223 if (result <= 0) { 224 goto error_out;
224 printk(KERN_ERR "%s: Send data failed (result %d)\n",
225 lo->disk->disk_name,
226 result);
227 goto error_out;
228 }
229 } 225 }
230 } 226 }
231 } 227 }
@@ -321,22 +317,19 @@ static struct request *nbd_read_stat(struct nbd_device *lo)
321 dprintk(DBG_RX, "%s: request %p: got reply\n", 317 dprintk(DBG_RX, "%s: request %p: got reply\n",
322 lo->disk->disk_name, req); 318 lo->disk->disk_name, req);
323 if (nbd_cmd(req) == NBD_CMD_READ) { 319 if (nbd_cmd(req) == NBD_CMD_READ) {
324 int i; 320 struct req_iterator iter;
325 struct bio *bio; 321 struct bio_vec *bvec;
326 rq_for_each_bio(bio, req) { 322
327 struct bio_vec *bvec; 323 rq_for_each_segment(bvec, req, iter) {
328 bio_for_each_segment(bvec, bio, i) { 324 result = sock_recv_bvec(sock, bvec);
329 result = sock_recv_bvec(sock, bvec); 325 if (result <= 0) {
330 if (result <= 0) { 326 printk(KERN_ERR "%s: Receive data failed (result %d)\n",
331 printk(KERN_ERR "%s: Receive data failed (result %d)\n", 327 lo->disk->disk_name, result);
332 lo->disk->disk_name, 328 req->errors++;
333 result); 329 return req;
334 req->errors++;
335 return req;
336 }
337 dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
338 lo->disk->disk_name, req, bvec->bv_len);
339 } 330 }
331 dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
332 lo->disk->disk_name, req, bvec->bv_len);
340 } 333 }
341 } 334 }
342 return req; 335 return req;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index fadbfd880bab..540bf3676985 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1058,15 +1058,12 @@ static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec)
1058 } 1058 }
1059} 1059}
1060 1060
1061static int pkt_end_io_read(struct bio *bio, unsigned int bytes_done, int err) 1061static void pkt_end_io_read(struct bio *bio, int err)
1062{ 1062{
1063 struct packet_data *pkt = bio->bi_private; 1063 struct packet_data *pkt = bio->bi_private;
1064 struct pktcdvd_device *pd = pkt->pd; 1064 struct pktcdvd_device *pd = pkt->pd;
1065 BUG_ON(!pd); 1065 BUG_ON(!pd);
1066 1066
1067 if (bio->bi_size)
1068 return 1;
1069
1070 VPRINTK("pkt_end_io_read: bio=%p sec0=%llx sec=%llx err=%d\n", bio, 1067 VPRINTK("pkt_end_io_read: bio=%p sec0=%llx sec=%llx err=%d\n", bio,
1071 (unsigned long long)pkt->sector, (unsigned long long)bio->bi_sector, err); 1068 (unsigned long long)pkt->sector, (unsigned long long)bio->bi_sector, err);
1072 1069
@@ -1077,19 +1074,14 @@ static int pkt_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
1077 wake_up(&pd->wqueue); 1074 wake_up(&pd->wqueue);
1078 } 1075 }
1079 pkt_bio_finished(pd); 1076 pkt_bio_finished(pd);
1080
1081 return 0;
1082} 1077}
1083 1078
1084static int pkt_end_io_packet_write(struct bio *bio, unsigned int bytes_done, int err) 1079static void pkt_end_io_packet_write(struct bio *bio, int err)
1085{ 1080{
1086 struct packet_data *pkt = bio->bi_private; 1081 struct packet_data *pkt = bio->bi_private;
1087 struct pktcdvd_device *pd = pkt->pd; 1082 struct pktcdvd_device *pd = pkt->pd;
1088 BUG_ON(!pd); 1083 BUG_ON(!pd);
1089 1084
1090 if (bio->bi_size)
1091 return 1;
1092
1093 VPRINTK("pkt_end_io_packet_write: id=%d, err=%d\n", pkt->id, err); 1085 VPRINTK("pkt_end_io_packet_write: id=%d, err=%d\n", pkt->id, err);
1094 1086
1095 pd->stats.pkt_ended++; 1087 pd->stats.pkt_ended++;
@@ -1098,7 +1090,6 @@ static int pkt_end_io_packet_write(struct bio *bio, unsigned int bytes_done, int
1098 atomic_dec(&pkt->io_wait); 1090 atomic_dec(&pkt->io_wait);
1099 atomic_inc(&pkt->run_sm); 1091 atomic_inc(&pkt->run_sm);
1100 wake_up(&pd->wqueue); 1092 wake_up(&pd->wqueue);
1101 return 0;
1102} 1093}
1103 1094
1104/* 1095/*
@@ -1470,7 +1461,7 @@ static void pkt_finish_packet(struct packet_data *pkt, int uptodate)
1470 while (bio) { 1461 while (bio) {
1471 next = bio->bi_next; 1462 next = bio->bi_next;
1472 bio->bi_next = NULL; 1463 bio->bi_next = NULL;
1473 bio_endio(bio, bio->bi_size, uptodate ? 0 : -EIO); 1464 bio_endio(bio, uptodate ? 0 : -EIO);
1474 bio = next; 1465 bio = next;
1475 } 1466 }
1476 pkt->orig_bios = pkt->orig_bios_tail = NULL; 1467 pkt->orig_bios = pkt->orig_bios_tail = NULL;
@@ -2462,19 +2453,15 @@ static int pkt_close(struct inode *inode, struct file *file)
2462} 2453}
2463 2454
2464 2455
2465static int pkt_end_io_read_cloned(struct bio *bio, unsigned int bytes_done, int err) 2456static void pkt_end_io_read_cloned(struct bio *bio, int err)
2466{ 2457{
2467 struct packet_stacked_data *psd = bio->bi_private; 2458 struct packet_stacked_data *psd = bio->bi_private;
2468 struct pktcdvd_device *pd = psd->pd; 2459 struct pktcdvd_device *pd = psd->pd;
2469 2460
2470 if (bio->bi_size)
2471 return 1;
2472
2473 bio_put(bio); 2461 bio_put(bio);
2474 bio_endio(psd->bio, psd->bio->bi_size, err); 2462 bio_endio(psd->bio, err);
2475 mempool_free(psd, psd_pool); 2463 mempool_free(psd, psd_pool);
2476 pkt_bio_finished(pd); 2464 pkt_bio_finished(pd);
2477 return 0;
2478} 2465}
2479 2466
2480static int pkt_make_request(struct request_queue *q, struct bio *bio) 2467static int pkt_make_request(struct request_queue *q, struct bio *bio)
@@ -2620,7 +2607,7 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio)
2620 } 2607 }
2621 return 0; 2608 return 0;
2622end_io: 2609end_io:
2623 bio_io_error(bio, bio->bi_size); 2610 bio_io_error(bio);
2624 return 0; 2611 return 0;
2625} 2612}
2626 2613
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index aa8b890c80d7..06d0552cf49c 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -91,30 +91,29 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
91 struct request *req, int gather) 91 struct request *req, int gather)
92{ 92{
93 unsigned int offset = 0; 93 unsigned int offset = 0;
94 struct bio *bio; 94 struct req_iterator iter;
95 sector_t sector;
96 struct bio_vec *bvec; 95 struct bio_vec *bvec;
97 unsigned int i = 0, j; 96 unsigned int i = 0;
98 size_t size; 97 size_t size;
99 void *buf; 98 void *buf;
100 99
101 rq_for_each_bio(bio, req) { 100 rq_for_each_segment(bvec, req, iter) {
102 sector = bio->bi_sector; 101 unsigned long flags;
103 dev_dbg(&dev->sbd.core, 102 dev_dbg(&dev->sbd.core,
104 "%s:%u: bio %u: %u segs %u sectors from %lu\n", 103 "%s:%u: bio %u: %u segs %u sectors from %lu\n",
105 __func__, __LINE__, i, bio_segments(bio), 104 __func__, __LINE__, i, bio_segments(iter.bio),
106 bio_sectors(bio), sector); 105 bio_sectors(iter.bio),
107 bio_for_each_segment(bvec, bio, j) { 106 (unsigned long)iter.bio->bi_sector);
108 size = bvec->bv_len; 107
109 buf = __bio_kmap_atomic(bio, j, KM_IRQ0); 108 size = bvec->bv_len;
110 if (gather) 109 buf = bvec_kmap_irq(bvec, &flags);
111 memcpy(dev->bounce_buf+offset, buf, size); 110 if (gather)
112 else 111 memcpy(dev->bounce_buf+offset, buf, size);
113 memcpy(buf, dev->bounce_buf+offset, size); 112 else
114 offset += size; 113 memcpy(buf, dev->bounce_buf+offset, size);
115 flush_kernel_dcache_page(bio_iovec_idx(bio, j)->bv_page); 114 offset += size;
116 __bio_kunmap_atomic(bio, KM_IRQ0); 115 flush_kernel_dcache_page(bvec->bv_page);
117 } 116 bvec_kunmap_irq(bvec, &flags);
118 i++; 117 i++;
119 } 118 }
120} 119}
@@ -130,12 +129,13 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
130 129
131#ifdef DEBUG 130#ifdef DEBUG
132 unsigned int n = 0; 131 unsigned int n = 0;
133 struct bio *bio; 132 struct bio_vec *bv;
133 struct req_iterator iter;
134 134
135 rq_for_each_bio(bio, req) 135 rq_for_each_segment(bv, req, iter)
136 n++; 136 n++;
137 dev_dbg(&dev->sbd.core, 137 dev_dbg(&dev->sbd.core,
138 "%s:%u: %s req has %u bios for %lu sectors %lu hard sectors\n", 138 "%s:%u: %s req has %u bvecs for %lu sectors %lu hard sectors\n",
139 __func__, __LINE__, op, n, req->nr_sectors, 139 __func__, __LINE__, op, n, req->nr_sectors,
140 req->hard_nr_sectors); 140 req->hard_nr_sectors);
141#endif 141#endif
diff --git a/drivers/block/rd.c b/drivers/block/rd.c
index 65150b548f3a..701ea77f62e9 100644
--- a/drivers/block/rd.c
+++ b/drivers/block/rd.c
@@ -287,10 +287,10 @@ static int rd_make_request(struct request_queue *q, struct bio *bio)
287 if (ret) 287 if (ret)
288 goto fail; 288 goto fail;
289 289
290 bio_endio(bio, bio->bi_size, 0); 290 bio_endio(bio, 0);
291 return 0; 291 return 0;
292fail: 292fail:
293 bio_io_error(bio, bio->bi_size); 293 bio_io_error(bio);
294 return 0; 294 return 0;
295} 295}
296 296
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 6b7c02d6360d..99806f9ee4ce 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -52,7 +52,7 @@
52#include <linux/fcntl.h> /* O_ACCMODE */ 52#include <linux/fcntl.h> /* O_ACCMODE */
53#include <linux/hdreg.h> /* HDIO_GETGEO */ 53#include <linux/hdreg.h> /* HDIO_GETGEO */
54 54
55#include <linux/umem.h> 55#include "umem.h"
56 56
57#include <asm/uaccess.h> 57#include <asm/uaccess.h>
58#include <asm/io.h> 58#include <asm/io.h>
@@ -67,9 +67,10 @@
67 * Version Information 67 * Version Information
68 */ 68 */
69 69
70#define DRIVER_VERSION "v2.3" 70#define DRIVER_NAME "umem"
71#define DRIVER_AUTHOR "San Mehat, Johannes Erdfelt, NeilBrown" 71#define DRIVER_VERSION "v2.3"
72#define DRIVER_DESC "Micro Memory(tm) PCI memory board block driver" 72#define DRIVER_AUTHOR "San Mehat, Johannes Erdfelt, NeilBrown"
73#define DRIVER_DESC "Micro Memory(tm) PCI memory board block driver"
73 74
74static int debug; 75static int debug;
75/* #define HW_TRACE(x) writeb(x,cards[0].csr_remap + MEMCTRLSTATUS_MAGIC) */ 76/* #define HW_TRACE(x) writeb(x,cards[0].csr_remap + MEMCTRLSTATUS_MAGIC) */
@@ -97,15 +98,9 @@ static int major_nr;
97#include <linux/blkpg.h> 98#include <linux/blkpg.h>
98 99
99struct cardinfo { 100struct cardinfo {
100 int card_number;
101 struct pci_dev *dev; 101 struct pci_dev *dev;
102 102
103 int irq;
104
105 unsigned long csr_base;
106 unsigned char __iomem *csr_remap; 103 unsigned char __iomem *csr_remap;
107 unsigned long csr_len;
108 unsigned int win_size; /* PCI window size */
109 unsigned int mm_size; /* size in kbytes */ 104 unsigned int mm_size; /* size in kbytes */
110 105
111 unsigned int init_size; /* initial segment, in sectors, 106 unsigned int init_size; /* initial segment, in sectors,
@@ -113,6 +108,8 @@ struct cardinfo {
113 * have been written 108 * have been written
114 */ 109 */
115 struct bio *bio, *currentbio, **biotail; 110 struct bio *bio, *currentbio, **biotail;
111 int current_idx;
112 sector_t current_sector;
116 113
117 struct request_queue *queue; 114 struct request_queue *queue;
118 115
@@ -121,6 +118,7 @@ struct cardinfo {
121 struct mm_dma_desc *desc; 118 struct mm_dma_desc *desc;
122 int cnt, headcnt; 119 int cnt, headcnt;
123 struct bio *bio, **biotail; 120 struct bio *bio, **biotail;
121 int idx;
124 } mm_pages[2]; 122 } mm_pages[2];
125#define DESC_PER_PAGE ((PAGE_SIZE*2)/sizeof(struct mm_dma_desc)) 123#define DESC_PER_PAGE ((PAGE_SIZE*2)/sizeof(struct mm_dma_desc))
126 124
@@ -233,7 +231,7 @@ static void dump_regs(struct cardinfo *card)
233*/ 231*/
234static void dump_dmastat(struct cardinfo *card, unsigned int dmastat) 232static void dump_dmastat(struct cardinfo *card, unsigned int dmastat)
235{ 233{
236 printk(KERN_DEBUG "MM%d*: DMAstat - ", card->card_number); 234 dev_printk(KERN_DEBUG, &card->dev->dev, "DMAstat - ");
237 if (dmastat & DMASCR_ANY_ERR) 235 if (dmastat & DMASCR_ANY_ERR)
238 printk("ANY_ERR "); 236 printk("ANY_ERR ");
239 if (dmastat & DMASCR_MBE_ERR) 237 if (dmastat & DMASCR_MBE_ERR)
@@ -295,7 +293,7 @@ static void mm_start_io(struct cardinfo *card)
295 desc->control_bits &= ~cpu_to_le32(DMASCR_CHAIN_EN); 293 desc->control_bits &= ~cpu_to_le32(DMASCR_CHAIN_EN);
296 desc->sem_control_bits = desc->control_bits; 294 desc->sem_control_bits = desc->control_bits;
297 295
298 296
299 if (debug & DEBUG_LED_ON_TRANSFER) 297 if (debug & DEBUG_LED_ON_TRANSFER)
300 set_led(card, LED_REMOVE, LED_ON); 298 set_led(card, LED_REMOVE, LED_ON);
301 299
@@ -329,7 +327,7 @@ static int add_bio(struct cardinfo *card);
329 327
330static void activate(struct cardinfo *card) 328static void activate(struct cardinfo *card)
331{ 329{
332 /* if No page is Active, and Ready is 330 /* if No page is Active, and Ready is
333 * not empty, then switch Ready page 331 * not empty, then switch Ready page
334 * to active and start IO. 332 * to active and start IO.
335 * Then add any bh's that are available to Ready 333 * Then add any bh's that are available to Ready
@@ -368,7 +366,7 @@ static void mm_unplug_device(struct request_queue *q)
368 spin_unlock_irqrestore(&card->lock, flags); 366 spin_unlock_irqrestore(&card->lock, flags);
369} 367}
370 368
371/* 369/*
372 * If there is room on Ready page, take 370 * If there is room on Ready page, take
373 * one bh off list and add it. 371 * one bh off list and add it.
374 * return 1 if there was room, else 0. 372 * return 1 if there was room, else 0.
@@ -380,12 +378,16 @@ static int add_bio(struct cardinfo *card)
380 dma_addr_t dma_handle; 378 dma_addr_t dma_handle;
381 int offset; 379 int offset;
382 struct bio *bio; 380 struct bio *bio;
381 struct bio_vec *vec;
382 int idx;
383 int rw; 383 int rw;
384 int len; 384 int len;
385 385
386 bio = card->currentbio; 386 bio = card->currentbio;
387 if (!bio && card->bio) { 387 if (!bio && card->bio) {
388 card->currentbio = card->bio; 388 card->currentbio = card->bio;
389 card->current_idx = card->bio->bi_idx;
390 card->current_sector = card->bio->bi_sector;
389 card->bio = card->bio->bi_next; 391 card->bio = card->bio->bi_next;
390 if (card->bio == NULL) 392 if (card->bio == NULL)
391 card->biotail = &card->bio; 393 card->biotail = &card->bio;
@@ -394,15 +396,17 @@ static int add_bio(struct cardinfo *card)
394 } 396 }
395 if (!bio) 397 if (!bio)
396 return 0; 398 return 0;
399 idx = card->current_idx;
397 400
398 rw = bio_rw(bio); 401 rw = bio_rw(bio);
399 if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE) 402 if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE)
400 return 0; 403 return 0;
401 404
402 len = bio_iovec(bio)->bv_len; 405 vec = bio_iovec_idx(bio, idx);
403 dma_handle = pci_map_page(card->dev, 406 len = vec->bv_len;
404 bio_page(bio), 407 dma_handle = pci_map_page(card->dev,
405 bio_offset(bio), 408 vec->bv_page,
409 vec->bv_offset,
406 len, 410 len,
407 (rw==READ) ? 411 (rw==READ) ?
408 PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); 412 PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
@@ -410,6 +414,8 @@ static int add_bio(struct cardinfo *card)
410 p = &card->mm_pages[card->Ready]; 414 p = &card->mm_pages[card->Ready];
411 desc = &p->desc[p->cnt]; 415 desc = &p->desc[p->cnt];
412 p->cnt++; 416 p->cnt++;
417 if (p->bio == NULL)
418 p->idx = idx;
413 if ((p->biotail) != &bio->bi_next) { 419 if ((p->biotail) != &bio->bi_next) {
414 *(p->biotail) = bio; 420 *(p->biotail) = bio;
415 p->biotail = &(bio->bi_next); 421 p->biotail = &(bio->bi_next);
@@ -419,7 +425,7 @@ static int add_bio(struct cardinfo *card)
419 desc->data_dma_handle = dma_handle; 425 desc->data_dma_handle = dma_handle;
420 426
421 desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle); 427 desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle);
422 desc->local_addr= cpu_to_le64(bio->bi_sector << 9); 428 desc->local_addr = cpu_to_le64(card->current_sector << 9);
423 desc->transfer_size = cpu_to_le32(len); 429 desc->transfer_size = cpu_to_le32(len);
424 offset = ( ((char*)&desc->sem_control_bits) - ((char*)p->desc)); 430 offset = ( ((char*)&desc->sem_control_bits) - ((char*)p->desc));
425 desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset)); 431 desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset));
@@ -435,10 +441,10 @@ static int add_bio(struct cardinfo *card)
435 desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ); 441 desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ);
436 desc->sem_control_bits = desc->control_bits; 442 desc->sem_control_bits = desc->control_bits;
437 443
438 bio->bi_sector += (len>>9); 444 card->current_sector += (len >> 9);
439 bio->bi_size -= len; 445 idx++;
440 bio->bi_idx++; 446 card->current_idx = idx;
441 if (bio->bi_idx >= bio->bi_vcnt) 447 if (idx >= bio->bi_vcnt)
442 card->currentbio = NULL; 448 card->currentbio = NULL;
443 449
444 return 1; 450 return 1;
@@ -461,7 +467,7 @@ static void process_page(unsigned long data)
461 if (card->Active < 0) 467 if (card->Active < 0)
462 goto out_unlock; 468 goto out_unlock;
463 page = &card->mm_pages[card->Active]; 469 page = &card->mm_pages[card->Active];
464 470
465 while (page->headcnt < page->cnt) { 471 while (page->headcnt < page->cnt) {
466 struct bio *bio = page->bio; 472 struct bio *bio = page->bio;
467 struct mm_dma_desc *desc = &page->desc[page->headcnt]; 473 struct mm_dma_desc *desc = &page->desc[page->headcnt];
@@ -471,32 +477,34 @@ static void process_page(unsigned long data)
471 477
472 if (!(control & DMASCR_DMA_COMPLETE)) { 478 if (!(control & DMASCR_DMA_COMPLETE)) {
473 control = dma_status; 479 control = dma_status;
474 last=1; 480 last=1;
475 } 481 }
476 page->headcnt++; 482 page->headcnt++;
477 idx = bio->bi_phys_segments; 483 idx = page->idx;
478 bio->bi_phys_segments++; 484 page->idx++;
479 if (bio->bi_phys_segments >= bio->bi_vcnt) 485 if (page->idx >= bio->bi_vcnt) {
480 page->bio = bio->bi_next; 486 page->bio = bio->bi_next;
487 page->idx = page->bio->bi_idx;
488 }
481 489
482 pci_unmap_page(card->dev, desc->data_dma_handle, 490 pci_unmap_page(card->dev, desc->data_dma_handle,
483 bio_iovec_idx(bio,idx)->bv_len, 491 bio_iovec_idx(bio,idx)->bv_len,
484 (control& DMASCR_TRANSFER_READ) ? 492 (control& DMASCR_TRANSFER_READ) ?
485 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); 493 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
486 if (control & DMASCR_HARD_ERROR) { 494 if (control & DMASCR_HARD_ERROR) {
487 /* error */ 495 /* error */
488 clear_bit(BIO_UPTODATE, &bio->bi_flags); 496 clear_bit(BIO_UPTODATE, &bio->bi_flags);
489 printk(KERN_WARNING "MM%d: I/O error on sector %d/%d\n", 497 dev_printk(KERN_WARNING, &card->dev->dev,
490 card->card_number, 498 "I/O error on sector %d/%d\n",
491 le32_to_cpu(desc->local_addr)>>9, 499 le32_to_cpu(desc->local_addr)>>9,
492 le32_to_cpu(desc->transfer_size)); 500 le32_to_cpu(desc->transfer_size));
493 dump_dmastat(card, control); 501 dump_dmastat(card, control);
494 } else if (test_bit(BIO_RW, &bio->bi_rw) && 502 } else if (test_bit(BIO_RW, &bio->bi_rw) &&
495 le32_to_cpu(desc->local_addr)>>9 == card->init_size) { 503 le32_to_cpu(desc->local_addr)>>9 == card->init_size) {
496 card->init_size += le32_to_cpu(desc->transfer_size)>>9; 504 card->init_size += le32_to_cpu(desc->transfer_size)>>9;
497 if (card->init_size>>1 >= card->mm_size) { 505 if (card->init_size>>1 >= card->mm_size) {
498 printk(KERN_INFO "MM%d: memory now initialised\n", 506 dev_printk(KERN_INFO, &card->dev->dev,
499 card->card_number); 507 "memory now initialised\n");
500 set_userbit(card, MEMORY_INITIALIZED, 1); 508 set_userbit(card, MEMORY_INITIALIZED, 1);
501 } 509 }
502 } 510 }
@@ -532,7 +540,7 @@ static void process_page(unsigned long data)
532 540
533 return_bio = bio->bi_next; 541 return_bio = bio->bi_next;
534 bio->bi_next = NULL; 542 bio->bi_next = NULL;
535 bio_endio(bio, bio->bi_size, 0); 543 bio_endio(bio, 0);
536 } 544 }
537} 545}
538 546
@@ -547,7 +555,6 @@ static int mm_make_request(struct request_queue *q, struct bio *bio)
547 pr_debug("mm_make_request %llu %u\n", 555 pr_debug("mm_make_request %llu %u\n",
548 (unsigned long long)bio->bi_sector, bio->bi_size); 556 (unsigned long long)bio->bi_sector, bio->bi_size);
549 557
550 bio->bi_phys_segments = bio->bi_idx; /* count of completed segments*/
551 spin_lock_irq(&card->lock); 558 spin_lock_irq(&card->lock);
552 *card->biotail = bio; 559 *card->biotail = bio;
553 bio->bi_next = NULL; 560 bio->bi_next = NULL;
@@ -585,7 +592,7 @@ HW_TRACE(0x30);
585 else 592 else
586 writeb((DMASCR_DMA_COMPLETE|DMASCR_CHAIN_COMPLETE) >> 16, 593 writeb((DMASCR_DMA_COMPLETE|DMASCR_CHAIN_COMPLETE) >> 16,
587 card->csr_remap+ DMA_STATUS_CTRL + 2); 594 card->csr_remap+ DMA_STATUS_CTRL + 2);
588 595
589 /* log errors and clear interrupt status */ 596 /* log errors and clear interrupt status */
590 if (dma_status & DMASCR_ANY_ERR) { 597 if (dma_status & DMASCR_ANY_ERR) {
591 unsigned int data_log1, data_log2; 598 unsigned int data_log1, data_log2;
@@ -606,46 +613,51 @@ HW_TRACE(0x30);
606 dump_dmastat(card, dma_status); 613 dump_dmastat(card, dma_status);
607 614
608 if (stat & 0x01) 615 if (stat & 0x01)
609 printk(KERN_ERR "MM%d*: Memory access error detected (err count %d)\n", 616 dev_printk(KERN_ERR, &card->dev->dev,
610 card->card_number, count); 617 "Memory access error detected (err count %d)\n",
618 count);
611 if (stat & 0x02) 619 if (stat & 0x02)
612 printk(KERN_ERR "MM%d*: Multi-bit EDC error\n", 620 dev_printk(KERN_ERR, &card->dev->dev,
613 card->card_number); 621 "Multi-bit EDC error\n");
614 622
615 printk(KERN_ERR "MM%d*: Fault Address 0x%02x%08x, Fault Data 0x%08x%08x\n", 623 dev_printk(KERN_ERR, &card->dev->dev,
616 card->card_number, addr_log2, addr_log1, data_log2, data_log1); 624 "Fault Address 0x%02x%08x, Fault Data 0x%08x%08x\n",
617 printk(KERN_ERR "MM%d*: Fault Check 0x%02x, Fault Syndrome 0x%02x\n", 625 addr_log2, addr_log1, data_log2, data_log1);
618 card->card_number, check, syndrome); 626 dev_printk(KERN_ERR, &card->dev->dev,
627 "Fault Check 0x%02x, Fault Syndrome 0x%02x\n",
628 check, syndrome);
619 629
620 writeb(0, card->csr_remap + ERROR_COUNT); 630 writeb(0, card->csr_remap + ERROR_COUNT);
621 } 631 }
622 632
623 if (dma_status & DMASCR_PARITY_ERR_REP) { 633 if (dma_status & DMASCR_PARITY_ERR_REP) {
624 printk(KERN_ERR "MM%d*: PARITY ERROR REPORTED\n", card->card_number); 634 dev_printk(KERN_ERR, &card->dev->dev,
635 "PARITY ERROR REPORTED\n");
625 pci_read_config_word(card->dev, PCI_STATUS, &cfg_status); 636 pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
626 pci_write_config_word(card->dev, PCI_STATUS, cfg_status); 637 pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
627 } 638 }
628 639
629 if (dma_status & DMASCR_PARITY_ERR_DET) { 640 if (dma_status & DMASCR_PARITY_ERR_DET) {
630 printk(KERN_ERR "MM%d*: PARITY ERROR DETECTED\n", card->card_number); 641 dev_printk(KERN_ERR, &card->dev->dev,
642 "PARITY ERROR DETECTED\n");
631 pci_read_config_word(card->dev, PCI_STATUS, &cfg_status); 643 pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
632 pci_write_config_word(card->dev, PCI_STATUS, cfg_status); 644 pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
633 } 645 }
634 646
635 if (dma_status & DMASCR_SYSTEM_ERR_SIG) { 647 if (dma_status & DMASCR_SYSTEM_ERR_SIG) {
636 printk(KERN_ERR "MM%d*: SYSTEM ERROR\n", card->card_number); 648 dev_printk(KERN_ERR, &card->dev->dev, "SYSTEM ERROR\n");
637 pci_read_config_word(card->dev, PCI_STATUS, &cfg_status); 649 pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
638 pci_write_config_word(card->dev, PCI_STATUS, cfg_status); 650 pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
639 } 651 }
640 652
641 if (dma_status & DMASCR_TARGET_ABT) { 653 if (dma_status & DMASCR_TARGET_ABT) {
642 printk(KERN_ERR "MM%d*: TARGET ABORT\n", card->card_number); 654 dev_printk(KERN_ERR, &card->dev->dev, "TARGET ABORT\n");
643 pci_read_config_word(card->dev, PCI_STATUS, &cfg_status); 655 pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
644 pci_write_config_word(card->dev, PCI_STATUS, cfg_status); 656 pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
645 } 657 }
646 658
647 if (dma_status & DMASCR_MASTER_ABT) { 659 if (dma_status & DMASCR_MASTER_ABT) {
648 printk(KERN_ERR "MM%d*: MASTER ABORT\n", card->card_number); 660 dev_printk(KERN_ERR, &card->dev->dev, "MASTER ABORT\n");
649 pci_read_config_word(card->dev, PCI_STATUS, &cfg_status); 661 pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
650 pci_write_config_word(card->dev, PCI_STATUS, cfg_status); 662 pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
651 } 663 }
@@ -656,7 +668,7 @@ HW_TRACE(0x30);
656 668
657HW_TRACE(0x36); 669HW_TRACE(0x36);
658 670
659 return IRQ_HANDLED; 671 return IRQ_HANDLED;
660} 672}
661/* 673/*
662----------------------------------------------------------------------------------- 674-----------------------------------------------------------------------------------
@@ -696,20 +708,20 @@ static int check_battery(struct cardinfo *card, int battery, int status)
696 card->battery[battery].last_change = jiffies; 708 card->battery[battery].last_change = jiffies;
697 709
698 if (card->battery[battery].good) { 710 if (card->battery[battery].good) {
699 printk(KERN_ERR "MM%d: Battery %d now good\n", 711 dev_printk(KERN_ERR, &card->dev->dev,
700 card->card_number, battery + 1); 712 "Battery %d now good\n", battery + 1);
701 card->battery[battery].warned = 0; 713 card->battery[battery].warned = 0;
702 } else 714 } else
703 printk(KERN_ERR "MM%d: Battery %d now FAILED\n", 715 dev_printk(KERN_ERR, &card->dev->dev,
704 card->card_number, battery + 1); 716 "Battery %d now FAILED\n", battery + 1);
705 717
706 return 1; 718 return 1;
707 } else if (!card->battery[battery].good && 719 } else if (!card->battery[battery].good &&
708 !card->battery[battery].warned && 720 !card->battery[battery].warned &&
709 time_after_eq(jiffies, card->battery[battery].last_change + 721 time_after_eq(jiffies, card->battery[battery].last_change +
710 (HZ * 60 * 60 * 5))) { 722 (HZ * 60 * 60 * 5))) {
711 printk(KERN_ERR "MM%d: Battery %d still FAILED after 5 hours\n", 723 dev_printk(KERN_ERR, &card->dev->dev,
712 card->card_number, battery + 1); 724 "Battery %d still FAILED after 5 hours\n", battery + 1);
713 card->battery[battery].warned = 1; 725 card->battery[battery].warned = 1;
714 726
715 return 1; 727 return 1;
@@ -733,8 +745,8 @@ static void check_batteries(struct cardinfo *card)
733 745
734 status = readb(card->csr_remap + MEMCTRLSTATUS_BATTERY); 746 status = readb(card->csr_remap + MEMCTRLSTATUS_BATTERY);
735 if (debug & DEBUG_BATTERY_POLLING) 747 if (debug & DEBUG_BATTERY_POLLING)
736 printk(KERN_DEBUG "MM%d: checking battery status, 1 = %s, 2 = %s\n", 748 dev_printk(KERN_DEBUG, &card->dev->dev,
737 card->card_number, 749 "checking battery status, 1 = %s, 2 = %s\n",
738 (status & BATTERY_1_FAILURE) ? "FAILURE" : "OK", 750 (status & BATTERY_1_FAILURE) ? "FAILURE" : "OK",
739 (status & BATTERY_2_FAILURE) ? "FAILURE" : "OK"); 751 (status & BATTERY_2_FAILURE) ? "FAILURE" : "OK");
740 752
@@ -749,7 +761,7 @@ static void check_all_batteries(unsigned long ptr)
749{ 761{
750 int i; 762 int i;
751 763
752 for (i = 0; i < num_cards; i++) 764 for (i = 0; i < num_cards; i++)
753 if (!(cards[i].flags & UM_FLAG_NO_BATT)) { 765 if (!(cards[i].flags & UM_FLAG_NO_BATT)) {
754 struct cardinfo *card = &cards[i]; 766 struct cardinfo *card = &cards[i];
755 spin_lock_bh(&card->lock); 767 spin_lock_bh(&card->lock);
@@ -853,45 +865,56 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
853 unsigned char mem_present; 865 unsigned char mem_present;
854 unsigned char batt_status; 866 unsigned char batt_status;
855 unsigned int saved_bar, data; 867 unsigned int saved_bar, data;
868 unsigned long csr_base;
869 unsigned long csr_len;
856 int magic_number; 870 int magic_number;
871 static int printed_version;
857 872
858 if (pci_enable_device(dev) < 0) 873 if (!printed_version++)
859 return -ENODEV; 874 printk(KERN_INFO DRIVER_VERSION " : " DRIVER_DESC "\n");
875
876 ret = pci_enable_device(dev);
877 if (ret)
878 return ret;
860 879
861 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xF8); 880 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xF8);
862 pci_set_master(dev); 881 pci_set_master(dev);
863 882
864 card->dev = dev; 883 card->dev = dev;
865 card->card_number = num_cards;
866 884
867 card->csr_base = pci_resource_start(dev, 0); 885 csr_base = pci_resource_start(dev, 0);
868 card->csr_len = pci_resource_len(dev, 0); 886 csr_len = pci_resource_len(dev, 0);
887 if (!csr_base || !csr_len)
888 return -ENODEV;
869 889
870 printk(KERN_INFO "Micro Memory(tm) controller #%d found at %02x:%02x (PCI Mem Module (Battery Backup))\n", 890 dev_printk(KERN_INFO, &dev->dev,
871 card->card_number, dev->bus->number, dev->devfn); 891 "Micro Memory(tm) controller found (PCI Mem Module (Battery Backup))\n");
872 892
873 if (pci_set_dma_mask(dev, DMA_64BIT_MASK) && 893 if (pci_set_dma_mask(dev, DMA_64BIT_MASK) &&
874 pci_set_dma_mask(dev, DMA_32BIT_MASK)) { 894 pci_set_dma_mask(dev, DMA_32BIT_MASK)) {
875 printk(KERN_WARNING "MM%d: NO suitable DMA found\n",num_cards); 895 dev_printk(KERN_WARNING, &dev->dev, "NO suitable DMA found\n");
876 return -ENOMEM; 896 return -ENOMEM;
877 } 897 }
878 if (!request_mem_region(card->csr_base, card->csr_len, "Micro Memory")) {
879 printk(KERN_ERR "MM%d: Unable to request memory region\n", card->card_number);
880 ret = -ENOMEM;
881 898
899 ret = pci_request_regions(dev, DRIVER_NAME);
900 if (ret) {
901 dev_printk(KERN_ERR, &card->dev->dev,
902 "Unable to request memory region\n");
882 goto failed_req_csr; 903 goto failed_req_csr;
883 } 904 }
884 905
885 card->csr_remap = ioremap_nocache(card->csr_base, card->csr_len); 906 card->csr_remap = ioremap_nocache(csr_base, csr_len);
886 if (!card->csr_remap) { 907 if (!card->csr_remap) {
887 printk(KERN_ERR "MM%d: Unable to remap memory region\n", card->card_number); 908 dev_printk(KERN_ERR, &card->dev->dev,
909 "Unable to remap memory region\n");
888 ret = -ENOMEM; 910 ret = -ENOMEM;
889 911
890 goto failed_remap_csr; 912 goto failed_remap_csr;
891 } 913 }
892 914
893 printk(KERN_INFO "MM%d: CSR 0x%08lx -> 0x%p (0x%lx)\n", card->card_number, 915 dev_printk(KERN_INFO, &card->dev->dev,
894 card->csr_base, card->csr_remap, card->csr_len); 916 "CSR 0x%08lx -> 0x%p (0x%lx)\n",
917 csr_base, card->csr_remap, csr_len);
895 918
896 switch(card->dev->device) { 919 switch(card->dev->device) {
897 case 0x5415: 920 case 0x5415:
@@ -915,7 +938,7 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
915 } 938 }
916 939
917 if (readb(card->csr_remap + MEMCTRLSTATUS_MAGIC) != magic_number) { 940 if (readb(card->csr_remap + MEMCTRLSTATUS_MAGIC) != magic_number) {
918 printk(KERN_ERR "MM%d: Magic number invalid\n", card->card_number); 941 dev_printk(KERN_ERR, &card->dev->dev, "Magic number invalid\n");
919 ret = -ENOMEM; 942 ret = -ENOMEM;
920 goto failed_magic; 943 goto failed_magic;
921 } 944 }
@@ -928,7 +951,7 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
928 &card->mm_pages[1].page_dma); 951 &card->mm_pages[1].page_dma);
929 if (card->mm_pages[0].desc == NULL || 952 if (card->mm_pages[0].desc == NULL ||
930 card->mm_pages[1].desc == NULL) { 953 card->mm_pages[1].desc == NULL) {
931 printk(KERN_ERR "MM%d: alloc failed\n", card->card_number); 954 dev_printk(KERN_ERR, &card->dev->dev, "alloc failed\n");
932 goto failed_alloc; 955 goto failed_alloc;
933 } 956 }
934 reset_page(&card->mm_pages[0]); 957 reset_page(&card->mm_pages[0]);
@@ -949,7 +972,7 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
949 tasklet_init(&card->tasklet, process_page, (unsigned long)card); 972 tasklet_init(&card->tasklet, process_page, (unsigned long)card);
950 973
951 card->check_batteries = 0; 974 card->check_batteries = 0;
952 975
953 mem_present = readb(card->csr_remap + MEMCTRLSTATUS_MEMORY); 976 mem_present = readb(card->csr_remap + MEMCTRLSTATUS_MEMORY);
954 switch (mem_present) { 977 switch (mem_present) {
955 case MEM_128_MB: 978 case MEM_128_MB:
@@ -982,12 +1005,13 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
982 card->battery[1].good = !(batt_status & BATTERY_2_FAILURE); 1005 card->battery[1].good = !(batt_status & BATTERY_2_FAILURE);
983 card->battery[0].last_change = card->battery[1].last_change = jiffies; 1006 card->battery[0].last_change = card->battery[1].last_change = jiffies;
984 1007
985 if (card->flags & UM_FLAG_NO_BATT) 1008 if (card->flags & UM_FLAG_NO_BATT)
986 printk(KERN_INFO "MM%d: Size %d KB\n", 1009 dev_printk(KERN_INFO, &card->dev->dev,
987 card->card_number, card->mm_size); 1010 "Size %d KB\n", card->mm_size);
988 else { 1011 else {
989 printk(KERN_INFO "MM%d: Size %d KB, Battery 1 %s (%s), Battery 2 %s (%s)\n", 1012 dev_printk(KERN_INFO, &card->dev->dev,
990 card->card_number, card->mm_size, 1013 "Size %d KB, Battery 1 %s (%s), Battery 2 %s (%s)\n",
1014 card->mm_size,
991 (batt_status & BATTERY_1_DISABLED ? "Disabled" : "Enabled"), 1015 (batt_status & BATTERY_1_DISABLED ? "Disabled" : "Enabled"),
992 card->battery[0].good ? "OK" : "FAILURE", 1016 card->battery[0].good ? "OK" : "FAILURE",
993 (batt_status & BATTERY_2_DISABLED ? "Disabled" : "Enabled"), 1017 (batt_status & BATTERY_2_DISABLED ? "Disabled" : "Enabled"),
@@ -1005,19 +1029,16 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
1005 data = ~data; 1029 data = ~data;
1006 data += 1; 1030 data += 1;
1007 1031
1008 card->win_size = data; 1032 if (request_irq(dev->irq, mm_interrupt, IRQF_SHARED, DRIVER_NAME, card)) {
1009 1033 dev_printk(KERN_ERR, &card->dev->dev,
1010 1034 "Unable to allocate IRQ\n");
1011 if (request_irq(dev->irq, mm_interrupt, IRQF_SHARED, "pci-umem", card)) {
1012 printk(KERN_ERR "MM%d: Unable to allocate IRQ\n", card->card_number);
1013 ret = -ENODEV; 1035 ret = -ENODEV;
1014 1036
1015 goto failed_req_irq; 1037 goto failed_req_irq;
1016 } 1038 }
1017 1039
1018 card->irq = dev->irq; 1040 dev_printk(KERN_INFO, &card->dev->dev,
1019 printk(KERN_INFO "MM%d: Window size %d bytes, IRQ %d\n", card->card_number, 1041 "Window size %d bytes, IRQ %d\n", data, dev->irq);
1020 card->win_size, card->irq);
1021 1042
1022 spin_lock_init(&card->lock); 1043 spin_lock_init(&card->lock);
1023 1044
@@ -1037,10 +1058,12 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
1037 num_cards++; 1058 num_cards++;
1038 1059
1039 if (!get_userbit(card, MEMORY_INITIALIZED)) { 1060 if (!get_userbit(card, MEMORY_INITIALIZED)) {
1040 printk(KERN_INFO "MM%d: memory NOT initialized. Consider over-writing whole device.\n", card->card_number); 1061 dev_printk(KERN_INFO, &card->dev->dev,
1062 "memory NOT initialized. Consider over-writing whole device.\n");
1041 card->init_size = 0; 1063 card->init_size = 0;
1042 } else { 1064 } else {
1043 printk(KERN_INFO "MM%d: memory already initialized\n", card->card_number); 1065 dev_printk(KERN_INFO, &card->dev->dev,
1066 "memory already initialized\n");
1044 card->init_size = card->mm_size; 1067 card->init_size = card->mm_size;
1045 } 1068 }
1046 1069
@@ -1062,7 +1085,7 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
1062 failed_magic: 1085 failed_magic:
1063 iounmap(card->csr_remap); 1086 iounmap(card->csr_remap);
1064 failed_remap_csr: 1087 failed_remap_csr:
1065 release_mem_region(card->csr_base, card->csr_len); 1088 pci_release_regions(dev);
1066 failed_req_csr: 1089 failed_req_csr:
1067 1090
1068 return ret; 1091 return ret;
@@ -1077,9 +1100,8 @@ static void mm_pci_remove(struct pci_dev *dev)
1077 struct cardinfo *card = pci_get_drvdata(dev); 1100 struct cardinfo *card = pci_get_drvdata(dev);
1078 1101
1079 tasklet_kill(&card->tasklet); 1102 tasklet_kill(&card->tasklet);
1103 free_irq(dev->irq, card);
1080 iounmap(card->csr_remap); 1104 iounmap(card->csr_remap);
1081 release_mem_region(card->csr_base, card->csr_len);
1082 free_irq(card->irq, card);
1083 1105
1084 if (card->mm_pages[0].desc) 1106 if (card->mm_pages[0].desc)
1085 pci_free_consistent(card->dev, PAGE_SIZE*2, 1107 pci_free_consistent(card->dev, PAGE_SIZE*2,
@@ -1090,6 +1112,9 @@ static void mm_pci_remove(struct pci_dev *dev)
1090 card->mm_pages[1].desc, 1112 card->mm_pages[1].desc,
1091 card->mm_pages[1].page_dma); 1113 card->mm_pages[1].page_dma);
1092 blk_cleanup_queue(card->queue); 1114 blk_cleanup_queue(card->queue);
1115
1116 pci_release_regions(dev);
1117 pci_disable_device(dev);
1093} 1118}
1094 1119
1095static const struct pci_device_id mm_pci_ids[] = { 1120static const struct pci_device_id mm_pci_ids[] = {
@@ -1109,11 +1134,12 @@ static const struct pci_device_id mm_pci_ids[] = {
1109MODULE_DEVICE_TABLE(pci, mm_pci_ids); 1134MODULE_DEVICE_TABLE(pci, mm_pci_ids);
1110 1135
1111static struct pci_driver mm_pci_driver = { 1136static struct pci_driver mm_pci_driver = {
1112 .name = "umem", 1137 .name = DRIVER_NAME,
1113 .id_table = mm_pci_ids, 1138 .id_table = mm_pci_ids,
1114 .probe = mm_pci_probe, 1139 .probe = mm_pci_probe,
1115 .remove = mm_pci_remove, 1140 .remove = mm_pci_remove,
1116}; 1141};
1142
1117/* 1143/*
1118----------------------------------------------------------------------------------- 1144-----------------------------------------------------------------------------------
1119-- mm_init 1145-- mm_init
@@ -1125,13 +1151,11 @@ static int __init mm_init(void)
1125 int retval, i; 1151 int retval, i;
1126 int err; 1152 int err;
1127 1153
1128 printk(KERN_INFO DRIVER_VERSION " : " DRIVER_DESC "\n");
1129
1130 retval = pci_register_driver(&mm_pci_driver); 1154 retval = pci_register_driver(&mm_pci_driver);
1131 if (retval) 1155 if (retval)
1132 return -ENOMEM; 1156 return -ENOMEM;
1133 1157
1134 err = major_nr = register_blkdev(0, "umem"); 1158 err = major_nr = register_blkdev(0, DRIVER_NAME);
1135 if (err < 0) { 1159 if (err < 0) {
1136 pci_unregister_driver(&mm_pci_driver); 1160 pci_unregister_driver(&mm_pci_driver);
1137 return -EIO; 1161 return -EIO;
@@ -1157,13 +1181,13 @@ static int __init mm_init(void)
1157 } 1181 }
1158 1182
1159 init_battery_timer(); 1183 init_battery_timer();
1160 printk("MM: desc_per_page = %ld\n", DESC_PER_PAGE); 1184 printk(KERN_INFO "MM: desc_per_page = %ld\n", DESC_PER_PAGE);
1161/* printk("mm_init: Done. 10-19-01 9:00\n"); */ 1185/* printk("mm_init: Done. 10-19-01 9:00\n"); */
1162 return 0; 1186 return 0;
1163 1187
1164out: 1188out:
1165 pci_unregister_driver(&mm_pci_driver); 1189 pci_unregister_driver(&mm_pci_driver);
1166 unregister_blkdev(major_nr, "umem"); 1190 unregister_blkdev(major_nr, DRIVER_NAME);
1167 while (i--) 1191 while (i--)
1168 put_disk(mm_gendisk[i]); 1192 put_disk(mm_gendisk[i]);
1169 return -ENOMEM; 1193 return -ENOMEM;
@@ -1186,7 +1210,7 @@ static void __exit mm_cleanup(void)
1186 1210
1187 pci_unregister_driver(&mm_pci_driver); 1211 pci_unregister_driver(&mm_pci_driver);
1188 1212
1189 unregister_blkdev(major_nr, "umem"); 1213 unregister_blkdev(major_nr, DRIVER_NAME);
1190} 1214}
1191 1215
1192module_init(mm_init); 1216module_init(mm_init);
diff --git a/include/linux/umem.h b/drivers/block/umem.h
index f36ebfc32bf6..375c68974c9a 100644
--- a/include/linux/umem.h
+++ b/drivers/block/umem.h
@@ -87,13 +87,13 @@
87#define DMASCR_DMA_COMPLETE 0x40000 87#define DMASCR_DMA_COMPLETE 0x40000
88#define DMASCR_CHAIN_COMPLETE 0x80000 88#define DMASCR_CHAIN_COMPLETE 0x80000
89 89
90/* 90/*
913.SOME PCs HAVE HOST BRIDGES WHICH APPARENTLY DO NOT CORRECTLY HANDLE 913.SOME PCs HAVE HOST BRIDGES WHICH APPARENTLY DO NOT CORRECTLY HANDLE
92READ-LINE (0xE) OR READ-MULTIPLE (0xC) PCI COMMAND CODES DURING DMA 92READ-LINE (0xE) OR READ-MULTIPLE (0xC) PCI COMMAND CODES DURING DMA
93TRANSFERS. IN OTHER SYSTEMS THESE COMMAND CODES WILL CAUSE THE HOST BRIDGE 93TRANSFERS. IN OTHER SYSTEMS THESE COMMAND CODES WILL CAUSE THE HOST BRIDGE
94TO ALLOW LONGER BURSTS DURING DMA READ OPERATIONS. THE UPPER FOUR BITS 94TO ALLOW LONGER BURSTS DURING DMA READ OPERATIONS. THE UPPER FOUR BITS
95(31..28) OF THE DMA CSR HAVE BEEN MADE PROGRAMMABLE, SO THAT EITHER A 0x6, 95(31..28) OF THE DMA CSR HAVE BEEN MADE PROGRAMMABLE, SO THAT EITHER A 0x6,
96AN 0xE OR A 0xC CAN BE WRITTEN TO THEM TO SET THE COMMAND CODE USED DURING 96AN 0xE OR A 0xC CAN BE WRITTEN TO THEM TO SET THE COMMAND CODE USED DURING
97DMA READ OPERATIONS. 97DMA READ OPERATIONS.
98*/ 98*/
99#define DMASCR_READ 0x60000000 99#define DMASCR_READ 0x60000000
@@ -125,11 +125,6 @@ struct mm_dma_desc {
125 __le64 sem_control_bits; 125 __le64 sem_control_bits;
126} __attribute__((aligned(8))); 126} __attribute__((aligned(8)));
127 127
128#define PCI_VENDOR_ID_MICRO_MEMORY 0x1332
129#define PCI_DEVICE_ID_MICRO_MEMORY_5415CN 0x5415
130#define PCI_DEVICE_ID_MICRO_MEMORY_5425CN 0x5425
131#define PCI_DEVICE_ID_MICRO_MEMORY_6155 0x6155
132
133/* bits for card->flags */ 128/* bits for card->flags */
134#define UM_FLAG_DMA_IN_REGS 1 129#define UM_FLAG_DMA_IN_REGS 1
135#define UM_FLAG_NO_BYTE_STATUS 2 130#define UM_FLAG_NO_BYTE_STATUS 2
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 964e51634f2d..2bdebcb3ff16 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -150,9 +150,8 @@ static int blkif_queue_request(struct request *req)
150 struct blkfront_info *info = req->rq_disk->private_data; 150 struct blkfront_info *info = req->rq_disk->private_data;
151 unsigned long buffer_mfn; 151 unsigned long buffer_mfn;
152 struct blkif_request *ring_req; 152 struct blkif_request *ring_req;
153 struct bio *bio; 153 struct req_iterator iter;
154 struct bio_vec *bvec; 154 struct bio_vec *bvec;
155 int idx;
156 unsigned long id; 155 unsigned long id;
157 unsigned int fsect, lsect; 156 unsigned int fsect, lsect;
158 int ref; 157 int ref;
@@ -186,34 +185,31 @@ static int blkif_queue_request(struct request *req)
186 ring_req->operation = BLKIF_OP_WRITE_BARRIER; 185 ring_req->operation = BLKIF_OP_WRITE_BARRIER;
187 186
188 ring_req->nr_segments = 0; 187 ring_req->nr_segments = 0;
189 rq_for_each_bio (bio, req) { 188 rq_for_each_segment(bvec, req, iter) {
190 bio_for_each_segment (bvec, bio, idx) { 189 BUG_ON(ring_req->nr_segments == BLKIF_MAX_SEGMENTS_PER_REQUEST);
191 BUG_ON(ring_req->nr_segments 190 buffer_mfn = pfn_to_mfn(page_to_pfn(bvec->bv_page));
192 == BLKIF_MAX_SEGMENTS_PER_REQUEST); 191 fsect = bvec->bv_offset >> 9;
193 buffer_mfn = pfn_to_mfn(page_to_pfn(bvec->bv_page)); 192 lsect = fsect + (bvec->bv_len >> 9) - 1;
194 fsect = bvec->bv_offset >> 9; 193 /* install a grant reference. */
195 lsect = fsect + (bvec->bv_len >> 9) - 1; 194 ref = gnttab_claim_grant_reference(&gref_head);
196 /* install a grant reference. */ 195 BUG_ON(ref == -ENOSPC);
197 ref = gnttab_claim_grant_reference(&gref_head); 196
198 BUG_ON(ref == -ENOSPC); 197 gnttab_grant_foreign_access_ref(
199
200 gnttab_grant_foreign_access_ref(
201 ref, 198 ref,
202 info->xbdev->otherend_id, 199 info->xbdev->otherend_id,
203 buffer_mfn, 200 buffer_mfn,
204 rq_data_dir(req) ); 201 rq_data_dir(req) );
205 202
206 info->shadow[id].frame[ring_req->nr_segments] = 203 info->shadow[id].frame[ring_req->nr_segments] =
207 mfn_to_pfn(buffer_mfn); 204 mfn_to_pfn(buffer_mfn);
208 205
209 ring_req->seg[ring_req->nr_segments] = 206 ring_req->seg[ring_req->nr_segments] =
210 (struct blkif_request_segment) { 207 (struct blkif_request_segment) {
211 .gref = ref, 208 .gref = ref,
212 .first_sect = fsect, 209 .first_sect = fsect,
213 .last_sect = lsect }; 210 .last_sect = lsect };
214 211
215 ring_req->nr_segments++; 212 ring_req->nr_segments++;
216 }
217 } 213 }
218 214
219 info->ring.req_prod_pvt++; 215 info->ring.req_prod_pvt++;
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 3ede0b63da13..9e7652dcde6c 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -91,6 +91,10 @@
91#include <linux/blkdev.h> 91#include <linux/blkdev.h>
92#include <linux/hdreg.h> 92#include <linux/hdreg.h>
93#include <linux/platform_device.h> 93#include <linux/platform_device.h>
94#if defined(CONFIG_OF)
95#include <linux/of_device.h>
96#include <linux/of_platform.h>
97#endif
94 98
95MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>"); 99MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
96MODULE_DESCRIPTION("Xilinx SystemACE device driver"); 100MODULE_DESCRIPTION("Xilinx SystemACE device driver");
@@ -158,6 +162,9 @@ MODULE_LICENSE("GPL");
158#define ACE_FIFO_SIZE (32) 162#define ACE_FIFO_SIZE (32)
159#define ACE_BUF_PER_SECTOR (ACE_SECTOR_SIZE / ACE_FIFO_SIZE) 163#define ACE_BUF_PER_SECTOR (ACE_SECTOR_SIZE / ACE_FIFO_SIZE)
160 164
165#define ACE_BUS_WIDTH_8 0
166#define ACE_BUS_WIDTH_16 1
167
161struct ace_reg_ops; 168struct ace_reg_ops;
162 169
163struct ace_device { 170struct ace_device {
@@ -188,7 +195,7 @@ struct ace_device {
188 195
189 /* Details of hardware device */ 196 /* Details of hardware device */
190 unsigned long physaddr; 197 unsigned long physaddr;
191 void *baseaddr; 198 void __iomem *baseaddr;
192 int irq; 199 int irq;
193 int bus_width; /* 0 := 8 bit; 1 := 16 bit */ 200 int bus_width; /* 0 := 8 bit; 1 := 16 bit */
194 struct ace_reg_ops *reg_ops; 201 struct ace_reg_ops *reg_ops;
@@ -220,20 +227,20 @@ struct ace_reg_ops {
220/* 8 Bit bus width */ 227/* 8 Bit bus width */
221static u16 ace_in_8(struct ace_device *ace, int reg) 228static u16 ace_in_8(struct ace_device *ace, int reg)
222{ 229{
223 void *r = ace->baseaddr + reg; 230 void __iomem *r = ace->baseaddr + reg;
224 return in_8(r) | (in_8(r + 1) << 8); 231 return in_8(r) | (in_8(r + 1) << 8);
225} 232}
226 233
227static void ace_out_8(struct ace_device *ace, int reg, u16 val) 234static void ace_out_8(struct ace_device *ace, int reg, u16 val)
228{ 235{
229 void *r = ace->baseaddr + reg; 236 void __iomem *r = ace->baseaddr + reg;
230 out_8(r, val); 237 out_8(r, val);
231 out_8(r + 1, val >> 8); 238 out_8(r + 1, val >> 8);
232} 239}
233 240
234static void ace_datain_8(struct ace_device *ace) 241static void ace_datain_8(struct ace_device *ace)
235{ 242{
236 void *r = ace->baseaddr + 0x40; 243 void __iomem *r = ace->baseaddr + 0x40;
237 u8 *dst = ace->data_ptr; 244 u8 *dst = ace->data_ptr;
238 int i = ACE_FIFO_SIZE; 245 int i = ACE_FIFO_SIZE;
239 while (i--) 246 while (i--)
@@ -243,7 +250,7 @@ static void ace_datain_8(struct ace_device *ace)
243 250
244static void ace_dataout_8(struct ace_device *ace) 251static void ace_dataout_8(struct ace_device *ace)
245{ 252{
246 void *r = ace->baseaddr + 0x40; 253 void __iomem *r = ace->baseaddr + 0x40;
247 u8 *src = ace->data_ptr; 254 u8 *src = ace->data_ptr;
248 int i = ACE_FIFO_SIZE; 255 int i = ACE_FIFO_SIZE;
249 while (i--) 256 while (i--)
@@ -931,9 +938,11 @@ static int __devinit ace_setup(struct ace_device *ace)
931{ 938{
932 u16 version; 939 u16 version;
933 u16 val; 940 u16 val;
934
935 int rc; 941 int rc;
936 942
943 dev_dbg(ace->dev, "ace_setup(ace=0x%p)\n", ace);
944 dev_dbg(ace->dev, "physaddr=0x%lx irq=%i\n", ace->physaddr, ace->irq);
945
937 spin_lock_init(&ace->lock); 946 spin_lock_init(&ace->lock);
938 init_completion(&ace->id_completion); 947 init_completion(&ace->id_completion);
939 948
@@ -944,15 +953,6 @@ static int __devinit ace_setup(struct ace_device *ace)
944 if (!ace->baseaddr) 953 if (!ace->baseaddr)
945 goto err_ioremap; 954 goto err_ioremap;
946 955
947 if (ace->irq != NO_IRQ) {
948 rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace);
949 if (rc) {
950 /* Failure - fall back to polled mode */
951 dev_err(ace->dev, "request_irq failed\n");
952 ace->irq = NO_IRQ;
953 }
954 }
955
956 /* 956 /*
957 * Initialize the state machine tasklet and stall timer 957 * Initialize the state machine tasklet and stall timer
958 */ 958 */
@@ -982,7 +982,7 @@ static int __devinit ace_setup(struct ace_device *ace)
982 snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a'); 982 snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a');
983 983
984 /* set bus width */ 984 /* set bus width */
985 if (ace->bus_width == 1) { 985 if (ace->bus_width == ACE_BUS_WIDTH_16) {
986 /* 0x0101 should work regardless of endianess */ 986 /* 0x0101 should work regardless of endianess */
987 ace_out_le16(ace, ACE_BUSMODE, 0x0101); 987 ace_out_le16(ace, ACE_BUSMODE, 0x0101);
988 988
@@ -1005,6 +1005,16 @@ static int __devinit ace_setup(struct ace_device *ace)
1005 ace_out(ace, ACE_CTRL, ACE_CTRL_FORCECFGMODE | 1005 ace_out(ace, ACE_CTRL, ACE_CTRL_FORCECFGMODE |
1006 ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ); 1006 ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ);
1007 1007
1008 /* Now we can hook up the irq handler */
1009 if (ace->irq != NO_IRQ) {
1010 rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace);
1011 if (rc) {
1012 /* Failure - fall back to polled mode */
1013 dev_err(ace->dev, "request_irq failed\n");
1014 ace->irq = NO_IRQ;
1015 }
1016 }
1017
1008 /* Enable interrupts */ 1018 /* Enable interrupts */
1009 val = ace_in(ace, ACE_CTRL); 1019 val = ace_in(ace, ACE_CTRL);
1010 val |= ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ; 1020 val |= ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ;
@@ -1024,16 +1034,14 @@ static int __devinit ace_setup(struct ace_device *ace)
1024 1034
1025 return 0; 1035 return 0;
1026 1036
1027 err_read: 1037err_read:
1028 put_disk(ace->gd); 1038 put_disk(ace->gd);
1029 err_alloc_disk: 1039err_alloc_disk:
1030 blk_cleanup_queue(ace->queue); 1040 blk_cleanup_queue(ace->queue);
1031 err_blk_initq: 1041err_blk_initq:
1032 iounmap(ace->baseaddr); 1042 iounmap(ace->baseaddr);
1033 if (ace->irq != NO_IRQ) 1043err_ioremap:
1034 free_irq(ace->irq, ace); 1044 dev_info(ace->dev, "xsysace: error initializing device at 0x%lx\n",
1035 err_ioremap:
1036 printk(KERN_INFO "xsysace: error initializing device at 0x%lx\n",
1037 ace->physaddr); 1045 ace->physaddr);
1038 return -ENOMEM; 1046 return -ENOMEM;
1039} 1047}
@@ -1056,98 +1064,222 @@ static void __devexit ace_teardown(struct ace_device *ace)
1056 iounmap(ace->baseaddr); 1064 iounmap(ace->baseaddr);
1057} 1065}
1058 1066
1059/* --------------------------------------------------------------------- 1067static int __devinit
1060 * Platform Bus Support 1068ace_alloc(struct device *dev, int id, unsigned long physaddr,
1061 */ 1069 int irq, int bus_width)
1062
1063static int __devinit ace_probe(struct device *device)
1064{ 1070{
1065 struct platform_device *dev = to_platform_device(device);
1066 struct ace_device *ace; 1071 struct ace_device *ace;
1067 int i; 1072 int rc;
1073 dev_dbg(dev, "ace_alloc(%p)\n", dev);
1068 1074
1069 dev_dbg(device, "ace_probe(%p)\n", device); 1075 if (!physaddr) {
1076 rc = -ENODEV;
1077 goto err_noreg;
1078 }
1070 1079
1071 /* 1080 /* Allocate and initialize the ace device structure */
1072 * Allocate the ace device structure
1073 */
1074 ace = kzalloc(sizeof(struct ace_device), GFP_KERNEL); 1081 ace = kzalloc(sizeof(struct ace_device), GFP_KERNEL);
1075 if (!ace) 1082 if (!ace) {
1083 rc = -ENOMEM;
1076 goto err_alloc; 1084 goto err_alloc;
1077
1078 ace->dev = device;
1079 ace->id = dev->id;
1080 ace->irq = NO_IRQ;
1081
1082 for (i = 0; i < dev->num_resources; i++) {
1083 if (dev->resource[i].flags & IORESOURCE_MEM)
1084 ace->physaddr = dev->resource[i].start;
1085 if (dev->resource[i].flags & IORESOURCE_IRQ)
1086 ace->irq = dev->resource[i].start;
1087 } 1085 }
1088 1086
1089 /* FIXME: Should get bus_width from the platform_device struct */ 1087 ace->dev = dev;
1090 ace->bus_width = 1; 1088 ace->id = id;
1091 1089 ace->physaddr = physaddr;
1092 dev_set_drvdata(&dev->dev, ace); 1090 ace->irq = irq;
1091 ace->bus_width = bus_width;
1093 1092
1094 /* Call the bus-independant setup code */ 1093 /* Call the setup code */
1095 if (ace_setup(ace) != 0) 1094 rc = ace_setup(ace);
1095 if (rc)
1096 goto err_setup; 1096 goto err_setup;
1097 1097
1098 dev_set_drvdata(dev, ace);
1098 return 0; 1099 return 0;
1099 1100
1100 err_setup: 1101err_setup:
1101 dev_set_drvdata(&dev->dev, NULL); 1102 dev_set_drvdata(dev, NULL);
1102 kfree(ace); 1103 kfree(ace);
1103 err_alloc: 1104err_alloc:
1104 printk(KERN_ERR "xsysace: could not initialize device\n"); 1105err_noreg:
1105 return -ENOMEM; 1106 dev_err(dev, "could not initialize device, err=%i\n", rc);
1107 return rc;
1106} 1108}
1107 1109
1108/* 1110static void __devexit ace_free(struct device *dev)
1109 * Platform bus remove() method
1110 */
1111static int __devexit ace_remove(struct device *device)
1112{ 1111{
1113 struct ace_device *ace = dev_get_drvdata(device); 1112 struct ace_device *ace = dev_get_drvdata(dev);
1114 1113 dev_dbg(dev, "ace_free(%p)\n", dev);
1115 dev_dbg(device, "ace_remove(%p)\n", device);
1116 1114
1117 if (ace) { 1115 if (ace) {
1118 ace_teardown(ace); 1116 ace_teardown(ace);
1117 dev_set_drvdata(dev, NULL);
1119 kfree(ace); 1118 kfree(ace);
1120 } 1119 }
1120}
1121
1122/* ---------------------------------------------------------------------
1123 * Platform Bus Support
1124 */
1125
1126static int __devinit ace_probe(struct platform_device *dev)
1127{
1128 unsigned long physaddr = 0;
1129 int bus_width = ACE_BUS_WIDTH_16; /* FIXME: should not be hard coded */
1130 int id = dev->id;
1131 int irq = NO_IRQ;
1132 int i;
1121 1133
1134 dev_dbg(&dev->dev, "ace_probe(%p)\n", dev);
1135
1136 for (i = 0; i < dev->num_resources; i++) {
1137 if (dev->resource[i].flags & IORESOURCE_MEM)
1138 physaddr = dev->resource[i].start;
1139 if (dev->resource[i].flags & IORESOURCE_IRQ)
1140 irq = dev->resource[i].start;
1141 }
1142
1143 /* Call the bus-independant setup code */
1144 return ace_alloc(&dev->dev, id, physaddr, irq, bus_width);
1145}
1146
1147/*
1148 * Platform bus remove() method
1149 */
1150static int __devexit ace_remove(struct platform_device *dev)
1151{
1152 ace_free(&dev->dev);
1122 return 0; 1153 return 0;
1123} 1154}
1124 1155
1125static struct device_driver ace_driver = { 1156static struct platform_driver ace_platform_driver = {
1126 .name = "xsysace",
1127 .bus = &platform_bus_type,
1128 .probe = ace_probe, 1157 .probe = ace_probe,
1129 .remove = __devexit_p(ace_remove), 1158 .remove = __devexit_p(ace_remove),
1159 .driver = {
1160 .owner = THIS_MODULE,
1161 .name = "xsysace",
1162 },
1163};
1164
1165/* ---------------------------------------------------------------------
1166 * OF_Platform Bus Support
1167 */
1168
1169#if defined(CONFIG_OF)
1170static int __devinit
1171ace_of_probe(struct of_device *op, const struct of_device_id *match)
1172{
1173 struct resource res;
1174 unsigned long physaddr;
1175 const u32 *id;
1176 int irq, bus_width, rc;
1177
1178 dev_dbg(&op->dev, "ace_of_probe(%p, %p)\n", op, match);
1179
1180 /* device id */
1181 id = of_get_property(op->node, "port-number", NULL);
1182
1183 /* physaddr */
1184 rc = of_address_to_resource(op->node, 0, &res);
1185 if (rc) {
1186 dev_err(&op->dev, "invalid address\n");
1187 return rc;
1188 }
1189 physaddr = res.start;
1190
1191 /* irq */
1192 irq = irq_of_parse_and_map(op->node, 0);
1193
1194 /* bus width */
1195 bus_width = ACE_BUS_WIDTH_16;
1196 if (of_find_property(op->node, "8-bit", NULL))
1197 bus_width = ACE_BUS_WIDTH_8;
1198
1199 /* Call the bus-independant setup code */
1200 return ace_alloc(&op->dev, id ? *id : 0, physaddr, irq, bus_width);
1201}
1202
1203static int __devexit ace_of_remove(struct of_device *op)
1204{
1205 ace_free(&op->dev);
1206 return 0;
1207}
1208
1209/* Match table for of_platform binding */
1210static struct of_device_id __devinit ace_of_match[] = {
1211 { .compatible = "xilinx,xsysace", },
1212 {},
1213};
1214MODULE_DEVICE_TABLE(of, ace_of_match);
1215
1216static struct of_platform_driver ace_of_driver = {
1217 .owner = THIS_MODULE,
1218 .name = "xsysace",
1219 .match_table = ace_of_match,
1220 .probe = ace_of_probe,
1221 .remove = __devexit_p(ace_of_remove),
1222 .driver = {
1223 .name = "xsysace",
1224 },
1130}; 1225};
1131 1226
1227/* Registration helpers to keep the number of #ifdefs to a minimum */
1228static inline int __init ace_of_register(void)
1229{
1230 pr_debug("xsysace: registering OF binding\n");
1231 return of_register_platform_driver(&ace_of_driver);
1232}
1233
1234static inline void __exit ace_of_unregister(void)
1235{
1236 of_unregister_platform_driver(&ace_of_driver);
1237}
1238#else /* CONFIG_OF */
1239/* CONFIG_OF not enabled; do nothing helpers */
1240static inline int __init ace_of_register(void) { return 0; }
1241static inline void __exit ace_of_unregister(void) { }
1242#endif /* CONFIG_OF */
1243
1132/* --------------------------------------------------------------------- 1244/* ---------------------------------------------------------------------
1133 * Module init/exit routines 1245 * Module init/exit routines
1134 */ 1246 */
1135static int __init ace_init(void) 1247static int __init ace_init(void)
1136{ 1248{
1249 int rc;
1250
1137 ace_major = register_blkdev(ace_major, "xsysace"); 1251 ace_major = register_blkdev(ace_major, "xsysace");
1138 if (ace_major <= 0) { 1252 if (ace_major <= 0) {
1139 printk(KERN_WARNING "xsysace: register_blkdev() failed\n"); 1253 rc = -ENOMEM;
1140 return ace_major; 1254 goto err_blk;
1141 } 1255 }
1142 1256
1143 pr_debug("Registering Xilinx SystemACE driver, major=%i\n", ace_major); 1257 rc = ace_of_register();
1144 return driver_register(&ace_driver); 1258 if (rc)
1259 goto err_of;
1260
1261 pr_debug("xsysace: registering platform binding\n");
1262 rc = platform_driver_register(&ace_platform_driver);
1263 if (rc)
1264 goto err_plat;
1265
1266 pr_info("Xilinx SystemACE device driver, major=%i\n", ace_major);
1267 return 0;
1268
1269err_plat:
1270 ace_of_unregister();
1271err_of:
1272 unregister_blkdev(ace_major, "xsysace");
1273err_blk:
1274 printk(KERN_ERR "xsysace: registration failed; err=%i\n", rc);
1275 return rc;
1145} 1276}
1146 1277
1147static void __exit ace_exit(void) 1278static void __exit ace_exit(void)
1148{ 1279{
1149 pr_debug("Unregistering Xilinx SystemACE driver\n"); 1280 pr_debug("Unregistering Xilinx SystemACE driver\n");
1150 driver_unregister(&ace_driver); 1281 platform_driver_unregister(&ace_platform_driver);
1282 ace_of_unregister();
1151 unregister_blkdev(ace_major, "xsysace"); 1283 unregister_blkdev(ace_major, "xsysace");
1152} 1284}
1153 1285
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index ae8e1a64b8ad..04a357808f2e 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -606,26 +606,24 @@ static void idefloppy_input_buffers (ide_drive_t *drive, idefloppy_pc_t *pc, uns
606{ 606{
607 struct request *rq = pc->rq; 607 struct request *rq = pc->rq;
608 struct bio_vec *bvec; 608 struct bio_vec *bvec;
609 struct bio *bio; 609 struct req_iterator iter;
610 unsigned long flags; 610 unsigned long flags;
611 char *data; 611 char *data;
612 int count, i, done = 0; 612 int count, done = 0;
613 613
614 rq_for_each_bio(bio, rq) { 614 rq_for_each_segment(bvec, rq, iter) {
615 bio_for_each_segment(bvec, bio, i) { 615 if (!bcount)
616 if (!bcount) 616 break;
617 break;
618 617
619 count = min(bvec->bv_len, bcount); 618 count = min(bvec->bv_len, bcount);
620 619
621 data = bvec_kmap_irq(bvec, &flags); 620 data = bvec_kmap_irq(bvec, &flags);
622 drive->hwif->atapi_input_bytes(drive, data, count); 621 drive->hwif->atapi_input_bytes(drive, data, count);
623 bvec_kunmap_irq(data, &flags); 622 bvec_kunmap_irq(data, &flags);
624 623
625 bcount -= count; 624 bcount -= count;
626 pc->b_count += count; 625 pc->b_count += count;
627 done += count; 626 done += count;
628 }
629 } 627 }
630 628
631 idefloppy_do_end_request(drive, 1, done >> 9); 629 idefloppy_do_end_request(drive, 1, done >> 9);
@@ -639,27 +637,25 @@ static void idefloppy_input_buffers (ide_drive_t *drive, idefloppy_pc_t *pc, uns
639static void idefloppy_output_buffers (ide_drive_t *drive, idefloppy_pc_t *pc, unsigned int bcount) 637static void idefloppy_output_buffers (ide_drive_t *drive, idefloppy_pc_t *pc, unsigned int bcount)
640{ 638{
641 struct request *rq = pc->rq; 639 struct request *rq = pc->rq;
642 struct bio *bio; 640 struct req_iterator iter;
643 struct bio_vec *bvec; 641 struct bio_vec *bvec;
644 unsigned long flags; 642 unsigned long flags;
645 int count, i, done = 0; 643 int count, done = 0;
646 char *data; 644 char *data;
647 645
648 rq_for_each_bio(bio, rq) { 646 rq_for_each_segment(bvec, rq, iter) {
649 bio_for_each_segment(bvec, bio, i) { 647 if (!bcount)
650 if (!bcount) 648 break;
651 break;
652 649
653 count = min(bvec->bv_len, bcount); 650 count = min(bvec->bv_len, bcount);
654 651
655 data = bvec_kmap_irq(bvec, &flags); 652 data = bvec_kmap_irq(bvec, &flags);
656 drive->hwif->atapi_output_bytes(drive, data, count); 653 drive->hwif->atapi_output_bytes(drive, data, count);
657 bvec_kunmap_irq(data, &flags); 654 bvec_kunmap_irq(data, &flags);
658 655
659 bcount -= count; 656 bcount -= count;
660 pc->b_count += count; 657 pc->b_count += count;
661 done += count; 658 done += count;
662 }
663 } 659 }
664 660
665 idefloppy_do_end_request(drive, 1, done >> 9); 661 idefloppy_do_end_request(drive, 1, done >> 9);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index bdc52d6922b7..8216a6f75be5 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -489,7 +489,7 @@ static void dec_pending(struct dm_crypt_io *io, int error)
489 if (!atomic_dec_and_test(&io->pending)) 489 if (!atomic_dec_and_test(&io->pending))
490 return; 490 return;
491 491
492 bio_endio(io->base_bio, io->base_bio->bi_size, io->error); 492 bio_endio(io->base_bio, io->error);
493 493
494 mempool_free(io, cc->io_pool); 494 mempool_free(io, cc->io_pool);
495} 495}
@@ -509,25 +509,19 @@ static void kcryptd_queue_io(struct dm_crypt_io *io)
509 queue_work(_kcryptd_workqueue, &io->work); 509 queue_work(_kcryptd_workqueue, &io->work);
510} 510}
511 511
512static int crypt_endio(struct bio *clone, unsigned int done, int error) 512static void crypt_endio(struct bio *clone, int error)
513{ 513{
514 struct dm_crypt_io *io = clone->bi_private; 514 struct dm_crypt_io *io = clone->bi_private;
515 struct crypt_config *cc = io->target->private; 515 struct crypt_config *cc = io->target->private;
516 unsigned read_io = bio_data_dir(clone) == READ; 516 unsigned read_io = bio_data_dir(clone) == READ;
517 517
518 /* 518 /*
519 * free the processed pages, even if 519 * free the processed pages
520 * it's only a partially completed write
521 */ 520 */
522 if (!read_io) 521 if (!read_io) {
523 crypt_free_buffer_pages(cc, clone, done); 522 crypt_free_buffer_pages(cc, clone, clone->bi_size);
524
525 /* keep going - not finished yet */
526 if (unlikely(clone->bi_size))
527 return 1;
528
529 if (!read_io)
530 goto out; 523 goto out;
524 }
531 525
532 if (unlikely(!bio_flagged(clone, BIO_UPTODATE))) { 526 if (unlikely(!bio_flagged(clone, BIO_UPTODATE))) {
533 error = -EIO; 527 error = -EIO;
@@ -537,12 +531,11 @@ static int crypt_endio(struct bio *clone, unsigned int done, int error)
537 bio_put(clone); 531 bio_put(clone);
538 io->post_process = 1; 532 io->post_process = 1;
539 kcryptd_queue_io(io); 533 kcryptd_queue_io(io);
540 return 0; 534 return;
541 535
542out: 536out:
543 bio_put(clone); 537 bio_put(clone);
544 dec_pending(io, error); 538 dec_pending(io, error);
545 return error;
546} 539}
547 540
548static void clone_init(struct dm_crypt_io *io, struct bio *clone) 541static void clone_init(struct dm_crypt_io *io, struct bio *clone)
diff --git a/drivers/md/dm-emc.c b/drivers/md/dm-emc.c
index 265c467854da..a2191a4fcf77 100644
--- a/drivers/md/dm-emc.c
+++ b/drivers/md/dm-emc.c
@@ -38,13 +38,10 @@ static inline void free_bio(struct bio *bio)
38 bio_put(bio); 38 bio_put(bio);
39} 39}
40 40
41static int emc_endio(struct bio *bio, unsigned int bytes_done, int error) 41static void emc_endio(struct bio *bio, int error)
42{ 42{
43 struct dm_path *path = bio->bi_private; 43 struct dm_path *path = bio->bi_private;
44 44
45 if (bio->bi_size)
46 return 1;
47
48 /* We also need to look at the sense keys here whether or not to 45 /* We also need to look at the sense keys here whether or not to
49 * switch to the next PG etc. 46 * switch to the next PG etc.
50 * 47 *
@@ -109,15 +106,7 @@ static struct request *get_failover_req(struct emc_handler *h,
109 return NULL; 106 return NULL;
110 } 107 }
111 108
112 rq->bio = rq->biotail = bio; 109 blk_rq_append_bio(q, rq, bio);
113 blk_rq_bio_prep(q, rq, bio);
114
115 rq->rq_disk = bdev->bd_contains->bd_disk;
116
117 /* bio backed don't set data */
118 rq->buffer = rq->data = NULL;
119 /* rq data_len used for pc cmd's request_bufflen */
120 rq->data_len = bio->bi_size;
121 110
122 rq->sense = h->sense; 111 rq->sense = h->sense;
123 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); 112 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index f3a772486437..b8e342fe7586 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -124,15 +124,11 @@ static void dec_count(struct io *io, unsigned int region, int error)
124 } 124 }
125} 125}
126 126
127static int endio(struct bio *bio, unsigned int done, int error) 127static void endio(struct bio *bio, int error)
128{ 128{
129 struct io *io; 129 struct io *io;
130 unsigned region; 130 unsigned region;
131 131
132 /* keep going until we've finished */
133 if (bio->bi_size)
134 return 1;
135
136 if (error && bio_data_dir(bio) == READ) 132 if (error && bio_data_dir(bio) == READ)
137 zero_fill_bio(bio); 133 zero_fill_bio(bio);
138 134
@@ -146,8 +142,6 @@ static int endio(struct bio *bio, unsigned int done, int error)
146 bio_put(bio); 142 bio_put(bio);
147 143
148 dec_count(io, region, error); 144 dec_count(io, region, error);
149
150 return 0;
151} 145}
152 146
153/*----------------------------------------------------------------- 147/*-----------------------------------------------------------------
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index d6ca9d0a6fd1..31056abca89d 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -390,11 +390,11 @@ static void dispatch_queued_ios(struct multipath *m)
390 390
391 r = map_io(m, bio, mpio, 1); 391 r = map_io(m, bio, mpio, 1);
392 if (r < 0) 392 if (r < 0)
393 bio_endio(bio, bio->bi_size, r); 393 bio_endio(bio, r);
394 else if (r == DM_MAPIO_REMAPPED) 394 else if (r == DM_MAPIO_REMAPPED)
395 generic_make_request(bio); 395 generic_make_request(bio);
396 else if (r == DM_MAPIO_REQUEUE) 396 else if (r == DM_MAPIO_REQUEUE)
397 bio_endio(bio, bio->bi_size, -EIO); 397 bio_endio(bio, -EIO);
398 398
399 bio = next; 399 bio = next;
400 } 400 }
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 144071e70a93..d09ff15490a5 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -820,7 +820,7 @@ static void write_callback(unsigned long error, void *context)
820 break; 820 break;
821 } 821 }
822 } 822 }
823 bio_endio(bio, bio->bi_size, 0); 823 bio_endio(bio, 0);
824} 824}
825 825
826static void do_write(struct mirror_set *ms, struct bio *bio) 826static void do_write(struct mirror_set *ms, struct bio *bio)
@@ -900,7 +900,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
900 */ 900 */
901 if (unlikely(ms->log_failure)) 901 if (unlikely(ms->log_failure))
902 while ((bio = bio_list_pop(&sync))) 902 while ((bio = bio_list_pop(&sync)))
903 bio_endio(bio, bio->bi_size, -EIO); 903 bio_endio(bio, -EIO);
904 else while ((bio = bio_list_pop(&sync))) 904 else while ((bio = bio_list_pop(&sync)))
905 do_write(ms, bio); 905 do_write(ms, bio);
906 906
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 83ddbfe6b8a4..98a633f3d6b0 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -636,7 +636,7 @@ static void error_bios(struct bio *bio)
636 while (bio) { 636 while (bio) {
637 n = bio->bi_next; 637 n = bio->bi_next;
638 bio->bi_next = NULL; 638 bio->bi_next = NULL;
639 bio_io_error(bio, bio->bi_size); 639 bio_io_error(bio);
640 bio = n; 640 bio = n;
641 } 641 }
642} 642}
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index f314d7dc9c26..bdec206c404b 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -43,7 +43,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio,
43 break; 43 break;
44 } 44 }
45 45
46 bio_endio(bio, bio->bi_size, 0); 46 bio_endio(bio, 0);
47 47
48 /* accepted bio, don't make new request */ 48 /* accepted bio, don't make new request */
49 return DM_MAPIO_SUBMITTED; 49 return DM_MAPIO_SUBMITTED;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 2120155929a6..167765c47747 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -484,23 +484,20 @@ static void dec_pending(struct dm_io *io, int error)
484 blk_add_trace_bio(io->md->queue, io->bio, 484 blk_add_trace_bio(io->md->queue, io->bio,
485 BLK_TA_COMPLETE); 485 BLK_TA_COMPLETE);
486 486
487 bio_endio(io->bio, io->bio->bi_size, io->error); 487 bio_endio(io->bio, io->error);
488 } 488 }
489 489
490 free_io(io->md, io); 490 free_io(io->md, io);
491 } 491 }
492} 492}
493 493
494static int clone_endio(struct bio *bio, unsigned int done, int error) 494static void clone_endio(struct bio *bio, int error)
495{ 495{
496 int r = 0; 496 int r = 0;
497 struct dm_target_io *tio = bio->bi_private; 497 struct dm_target_io *tio = bio->bi_private;
498 struct mapped_device *md = tio->io->md; 498 struct mapped_device *md = tio->io->md;
499 dm_endio_fn endio = tio->ti->type->end_io; 499 dm_endio_fn endio = tio->ti->type->end_io;
500 500
501 if (bio->bi_size)
502 return 1;
503
504 if (!bio_flagged(bio, BIO_UPTODATE) && !error) 501 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
505 error = -EIO; 502 error = -EIO;
506 503
@@ -514,7 +511,7 @@ static int clone_endio(struct bio *bio, unsigned int done, int error)
514 error = r; 511 error = r;
515 else if (r == DM_ENDIO_INCOMPLETE) 512 else if (r == DM_ENDIO_INCOMPLETE)
516 /* The target will handle the io */ 513 /* The target will handle the io */
517 return 1; 514 return;
518 else if (r) { 515 else if (r) {
519 DMWARN("unimplemented target endio return value: %d", r); 516 DMWARN("unimplemented target endio return value: %d", r);
520 BUG(); 517 BUG();
@@ -530,7 +527,6 @@ static int clone_endio(struct bio *bio, unsigned int done, int error)
530 527
531 bio_put(bio); 528 bio_put(bio);
532 free_tio(md, tio); 529 free_tio(md, tio);
533 return r;
534} 530}
535 531
536static sector_t max_io_len(struct mapped_device *md, 532static sector_t max_io_len(struct mapped_device *md,
@@ -761,7 +757,7 @@ static void __split_bio(struct mapped_device *md, struct bio *bio)
761 757
762 ci.map = dm_get_table(md); 758 ci.map = dm_get_table(md);
763 if (!ci.map) { 759 if (!ci.map) {
764 bio_io_error(bio, bio->bi_size); 760 bio_io_error(bio);
765 return; 761 return;
766 } 762 }
767 763
@@ -803,7 +799,7 @@ static int dm_request(struct request_queue *q, struct bio *bio)
803 * guarantee it is (or can be) handled by the targets correctly. 799 * guarantee it is (or can be) handled by the targets correctly.
804 */ 800 */
805 if (unlikely(bio_barrier(bio))) { 801 if (unlikely(bio_barrier(bio))) {
806 bio_endio(bio, bio->bi_size, -EOPNOTSUPP); 802 bio_endio(bio, -EOPNOTSUPP);
807 return 0; 803 return 0;
808 } 804 }
809 805
@@ -820,13 +816,13 @@ static int dm_request(struct request_queue *q, struct bio *bio)
820 up_read(&md->io_lock); 816 up_read(&md->io_lock);
821 817
822 if (bio_rw(bio) == READA) { 818 if (bio_rw(bio) == READA) {
823 bio_io_error(bio, bio->bi_size); 819 bio_io_error(bio);
824 return 0; 820 return 0;
825 } 821 }
826 822
827 r = queue_io(md, bio); 823 r = queue_io(md, bio);
828 if (r < 0) { 824 if (r < 0) {
829 bio_io_error(bio, bio->bi_size); 825 bio_io_error(bio);
830 return 0; 826 return 0;
831 827
832 } else if (r == 0) 828 } else if (r == 0)
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index cb059cf14c2e..cf2ddce34118 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -65,18 +65,16 @@
65#include <linux/raid/md.h> 65#include <linux/raid/md.h>
66 66
67 67
68static int faulty_fail(struct bio *bio, unsigned int bytes_done, int error) 68static void faulty_fail(struct bio *bio, int error)
69{ 69{
70 struct bio *b = bio->bi_private; 70 struct bio *b = bio->bi_private;
71 71
72 b->bi_size = bio->bi_size; 72 b->bi_size = bio->bi_size;
73 b->bi_sector = bio->bi_sector; 73 b->bi_sector = bio->bi_sector;
74 74
75 if (bio->bi_size == 0) 75 bio_put(bio);
76 bio_put(bio);
77 76
78 clear_bit(BIO_UPTODATE, &b->bi_flags); 77 bio_io_error(b);
79 return (b->bi_end_io)(b, bytes_done, -EIO);
80} 78}
81 79
82typedef struct faulty_conf { 80typedef struct faulty_conf {
@@ -179,7 +177,7 @@ static int make_request(struct request_queue *q, struct bio *bio)
179 /* special case - don't decrement, don't generic_make_request, 177 /* special case - don't decrement, don't generic_make_request,
180 * just fail immediately 178 * just fail immediately
181 */ 179 */
182 bio_endio(bio, bio->bi_size, -EIO); 180 bio_endio(bio, -EIO);
183 return 0; 181 return 0;
184 } 182 }
185 183
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 17f795c3e0ab..550148770bb2 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -338,7 +338,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
338 sector_t block; 338 sector_t block;
339 339
340 if (unlikely(bio_barrier(bio))) { 340 if (unlikely(bio_barrier(bio))) {
341 bio_endio(bio, bio->bi_size, -EOPNOTSUPP); 341 bio_endio(bio, -EOPNOTSUPP);
342 return 0; 342 return 0;
343 } 343 }
344 344
@@ -358,7 +358,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
358 bdevname(tmp_dev->rdev->bdev, b), 358 bdevname(tmp_dev->rdev->bdev, b),
359 (unsigned long long)tmp_dev->size, 359 (unsigned long long)tmp_dev->size,
360 (unsigned long long)tmp_dev->offset); 360 (unsigned long long)tmp_dev->offset);
361 bio_io_error(bio, bio->bi_size); 361 bio_io_error(bio);
362 return 0; 362 return 0;
363 } 363 }
364 if (unlikely(bio->bi_sector + (bio->bi_size >> 9) > 364 if (unlikely(bio->bi_sector + (bio->bi_size >> 9) >
diff --git a/drivers/md/md.c b/drivers/md/md.c
index f883b7e37f3d..e8f102ea9b03 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -213,7 +213,7 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
213 213
214static int md_fail_request (struct request_queue *q, struct bio *bio) 214static int md_fail_request (struct request_queue *q, struct bio *bio)
215{ 215{
216 bio_io_error(bio, bio->bi_size); 216 bio_io_error(bio);
217 return 0; 217 return 0;
218} 218}
219 219
@@ -384,12 +384,10 @@ static void free_disk_sb(mdk_rdev_t * rdev)
384} 384}
385 385
386 386
387static int super_written(struct bio *bio, unsigned int bytes_done, int error) 387static void super_written(struct bio *bio, int error)
388{ 388{
389 mdk_rdev_t *rdev = bio->bi_private; 389 mdk_rdev_t *rdev = bio->bi_private;
390 mddev_t *mddev = rdev->mddev; 390 mddev_t *mddev = rdev->mddev;
391 if (bio->bi_size)
392 return 1;
393 391
394 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) { 392 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
395 printk("md: super_written gets error=%d, uptodate=%d\n", 393 printk("md: super_written gets error=%d, uptodate=%d\n",
@@ -401,16 +399,13 @@ static int super_written(struct bio *bio, unsigned int bytes_done, int error)
401 if (atomic_dec_and_test(&mddev->pending_writes)) 399 if (atomic_dec_and_test(&mddev->pending_writes))
402 wake_up(&mddev->sb_wait); 400 wake_up(&mddev->sb_wait);
403 bio_put(bio); 401 bio_put(bio);
404 return 0;
405} 402}
406 403
407static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error) 404static void super_written_barrier(struct bio *bio, int error)
408{ 405{
409 struct bio *bio2 = bio->bi_private; 406 struct bio *bio2 = bio->bi_private;
410 mdk_rdev_t *rdev = bio2->bi_private; 407 mdk_rdev_t *rdev = bio2->bi_private;
411 mddev_t *mddev = rdev->mddev; 408 mddev_t *mddev = rdev->mddev;
412 if (bio->bi_size)
413 return 1;
414 409
415 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && 410 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
416 error == -EOPNOTSUPP) { 411 error == -EOPNOTSUPP) {
@@ -424,11 +419,11 @@ static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int e
424 spin_unlock_irqrestore(&mddev->write_lock, flags); 419 spin_unlock_irqrestore(&mddev->write_lock, flags);
425 wake_up(&mddev->sb_wait); 420 wake_up(&mddev->sb_wait);
426 bio_put(bio); 421 bio_put(bio);
427 return 0; 422 } else {
423 bio_put(bio2);
424 bio->bi_private = rdev;
425 super_written(bio, error);
428 } 426 }
429 bio_put(bio2);
430 bio->bi_private = rdev;
431 return super_written(bio, bytes_done, error);
432} 427}
433 428
434void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, 429void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
@@ -489,13 +484,9 @@ void md_super_wait(mddev_t *mddev)
489 finish_wait(&mddev->sb_wait, &wq); 484 finish_wait(&mddev->sb_wait, &wq);
490} 485}
491 486
492static int bi_complete(struct bio *bio, unsigned int bytes_done, int error) 487static void bi_complete(struct bio *bio, int error)
493{ 488{
494 if (bio->bi_size)
495 return 1;
496
497 complete((struct completion*)bio->bi_private); 489 complete((struct completion*)bio->bi_private);
498 return 0;
499} 490}
500 491
501int sync_page_io(struct block_device *bdev, sector_t sector, int size, 492int sync_page_io(struct block_device *bdev, sector_t sector, int size,
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 1e2af43a73b9..f2a63f394ad9 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -82,21 +82,17 @@ static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err)
82 struct bio *bio = mp_bh->master_bio; 82 struct bio *bio = mp_bh->master_bio;
83 multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev); 83 multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev);
84 84
85 bio_endio(bio, bio->bi_size, err); 85 bio_endio(bio, err);
86 mempool_free(mp_bh, conf->pool); 86 mempool_free(mp_bh, conf->pool);
87} 87}
88 88
89static int multipath_end_request(struct bio *bio, unsigned int bytes_done, 89static void multipath_end_request(struct bio *bio, int error)
90 int error)
91{ 90{
92 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 91 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
93 struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private); 92 struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private);
94 multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev); 93 multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev);
95 mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev; 94 mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev;
96 95
97 if (bio->bi_size)
98 return 1;
99
100 if (uptodate) 96 if (uptodate)
101 multipath_end_bh_io(mp_bh, 0); 97 multipath_end_bh_io(mp_bh, 0);
102 else if (!bio_rw_ahead(bio)) { 98 else if (!bio_rw_ahead(bio)) {
@@ -112,7 +108,6 @@ static int multipath_end_request(struct bio *bio, unsigned int bytes_done,
112 } else 108 } else
113 multipath_end_bh_io(mp_bh, error); 109 multipath_end_bh_io(mp_bh, error);
114 rdev_dec_pending(rdev, conf->mddev); 110 rdev_dec_pending(rdev, conf->mddev);
115 return 0;
116} 111}
117 112
118static void unplug_slaves(mddev_t *mddev) 113static void unplug_slaves(mddev_t *mddev)
@@ -155,7 +150,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
155 const int rw = bio_data_dir(bio); 150 const int rw = bio_data_dir(bio);
156 151
157 if (unlikely(bio_barrier(bio))) { 152 if (unlikely(bio_barrier(bio))) {
158 bio_endio(bio, bio->bi_size, -EOPNOTSUPP); 153 bio_endio(bio, -EOPNOTSUPP);
159 return 0; 154 return 0;
160 } 155 }
161 156
@@ -169,7 +164,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
169 164
170 mp_bh->path = multipath_map(conf); 165 mp_bh->path = multipath_map(conf);
171 if (mp_bh->path < 0) { 166 if (mp_bh->path < 0) {
172 bio_endio(bio, bio->bi_size, -EIO); 167 bio_endio(bio, -EIO);
173 mempool_free(mp_bh, conf->pool); 168 mempool_free(mp_bh, conf->pool);
174 return 0; 169 return 0;
175 } 170 }
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index b8216bc6db45..ef0da2d84959 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -420,7 +420,7 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
420 const int rw = bio_data_dir(bio); 420 const int rw = bio_data_dir(bio);
421 421
422 if (unlikely(bio_barrier(bio))) { 422 if (unlikely(bio_barrier(bio))) {
423 bio_endio(bio, bio->bi_size, -EOPNOTSUPP); 423 bio_endio(bio, -EOPNOTSUPP);
424 return 0; 424 return 0;
425 } 425 }
426 426
@@ -490,7 +490,7 @@ bad_map:
490 " or bigger than %dk %llu %d\n", chunk_size, 490 " or bigger than %dk %llu %d\n", chunk_size,
491 (unsigned long long)bio->bi_sector, bio->bi_size >> 10); 491 (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
492 492
493 bio_io_error(bio, bio->bi_size); 493 bio_io_error(bio);
494 return 0; 494 return 0;
495} 495}
496 496
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index f33a729960ca..6d03bea6fa58 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -238,7 +238,7 @@ static void raid_end_bio_io(r1bio_t *r1_bio)
238 (unsigned long long) bio->bi_sector + 238 (unsigned long long) bio->bi_sector +
239 (bio->bi_size >> 9) - 1); 239 (bio->bi_size >> 9) - 1);
240 240
241 bio_endio(bio, bio->bi_size, 241 bio_endio(bio,
242 test_bit(R1BIO_Uptodate, &r1_bio->state) ? 0 : -EIO); 242 test_bit(R1BIO_Uptodate, &r1_bio->state) ? 0 : -EIO);
243 } 243 }
244 free_r1bio(r1_bio); 244 free_r1bio(r1_bio);
@@ -255,16 +255,13 @@ static inline void update_head_pos(int disk, r1bio_t *r1_bio)
255 r1_bio->sector + (r1_bio->sectors); 255 r1_bio->sector + (r1_bio->sectors);
256} 256}
257 257
258static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int error) 258static void raid1_end_read_request(struct bio *bio, int error)
259{ 259{
260 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 260 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
261 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); 261 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
262 int mirror; 262 int mirror;
263 conf_t *conf = mddev_to_conf(r1_bio->mddev); 263 conf_t *conf = mddev_to_conf(r1_bio->mddev);
264 264
265 if (bio->bi_size)
266 return 1;
267
268 mirror = r1_bio->read_disk; 265 mirror = r1_bio->read_disk;
269 /* 266 /*
270 * this branch is our 'one mirror IO has finished' event handler: 267 * this branch is our 'one mirror IO has finished' event handler:
@@ -301,10 +298,9 @@ static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int
301 } 298 }
302 299
303 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); 300 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
304 return 0;
305} 301}
306 302
307static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int error) 303static void raid1_end_write_request(struct bio *bio, int error)
308{ 304{
309 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 305 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
310 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); 306 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
@@ -312,8 +308,6 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
312 conf_t *conf = mddev_to_conf(r1_bio->mddev); 308 conf_t *conf = mddev_to_conf(r1_bio->mddev);
313 struct bio *to_put = NULL; 309 struct bio *to_put = NULL;
314 310
315 if (bio->bi_size)
316 return 1;
317 311
318 for (mirror = 0; mirror < conf->raid_disks; mirror++) 312 for (mirror = 0; mirror < conf->raid_disks; mirror++)
319 if (r1_bio->bios[mirror] == bio) 313 if (r1_bio->bios[mirror] == bio)
@@ -366,7 +360,7 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
366 (unsigned long long) mbio->bi_sector, 360 (unsigned long long) mbio->bi_sector,
367 (unsigned long long) mbio->bi_sector + 361 (unsigned long long) mbio->bi_sector +
368 (mbio->bi_size >> 9) - 1); 362 (mbio->bi_size >> 9) - 1);
369 bio_endio(mbio, mbio->bi_size, 0); 363 bio_endio(mbio, 0);
370 } 364 }
371 } 365 }
372 } 366 }
@@ -400,8 +394,6 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
400 394
401 if (to_put) 395 if (to_put)
402 bio_put(to_put); 396 bio_put(to_put);
403
404 return 0;
405} 397}
406 398
407 399
@@ -796,7 +788,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
796 if (unlikely(!mddev->barriers_work && bio_barrier(bio))) { 788 if (unlikely(!mddev->barriers_work && bio_barrier(bio))) {
797 if (rw == WRITE) 789 if (rw == WRITE)
798 md_write_end(mddev); 790 md_write_end(mddev);
799 bio_endio(bio, bio->bi_size, -EOPNOTSUPP); 791 bio_endio(bio, -EOPNOTSUPP);
800 return 0; 792 return 0;
801 } 793 }
802 794
@@ -1137,14 +1129,11 @@ abort:
1137} 1129}
1138 1130
1139 1131
1140static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error) 1132static void end_sync_read(struct bio *bio, int error)
1141{ 1133{
1142 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); 1134 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
1143 int i; 1135 int i;
1144 1136
1145 if (bio->bi_size)
1146 return 1;
1147
1148 for (i=r1_bio->mddev->raid_disks; i--; ) 1137 for (i=r1_bio->mddev->raid_disks; i--; )
1149 if (r1_bio->bios[i] == bio) 1138 if (r1_bio->bios[i] == bio)
1150 break; 1139 break;
@@ -1160,10 +1149,9 @@ static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
1160 1149
1161 if (atomic_dec_and_test(&r1_bio->remaining)) 1150 if (atomic_dec_and_test(&r1_bio->remaining))
1162 reschedule_retry(r1_bio); 1151 reschedule_retry(r1_bio);
1163 return 0;
1164} 1152}
1165 1153
1166static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error) 1154static void end_sync_write(struct bio *bio, int error)
1167{ 1155{
1168 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1156 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1169 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); 1157 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
@@ -1172,9 +1160,6 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
1172 int i; 1160 int i;
1173 int mirror=0; 1161 int mirror=0;
1174 1162
1175 if (bio->bi_size)
1176 return 1;
1177
1178 for (i = 0; i < conf->raid_disks; i++) 1163 for (i = 0; i < conf->raid_disks; i++)
1179 if (r1_bio->bios[i] == bio) { 1164 if (r1_bio->bios[i] == bio) {
1180 mirror = i; 1165 mirror = i;
@@ -1200,7 +1185,6 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
1200 md_done_sync(mddev, r1_bio->sectors, uptodate); 1185 md_done_sync(mddev, r1_bio->sectors, uptodate);
1201 put_buf(r1_bio); 1186 put_buf(r1_bio);
1202 } 1187 }
1203 return 0;
1204} 1188}
1205 1189
1206static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) 1190static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 4e53792aa520..25a96c42bdb0 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -227,7 +227,7 @@ static void raid_end_bio_io(r10bio_t *r10_bio)
227{ 227{
228 struct bio *bio = r10_bio->master_bio; 228 struct bio *bio = r10_bio->master_bio;
229 229
230 bio_endio(bio, bio->bi_size, 230 bio_endio(bio,
231 test_bit(R10BIO_Uptodate, &r10_bio->state) ? 0 : -EIO); 231 test_bit(R10BIO_Uptodate, &r10_bio->state) ? 0 : -EIO);
232 free_r10bio(r10_bio); 232 free_r10bio(r10_bio);
233} 233}
@@ -243,15 +243,13 @@ static inline void update_head_pos(int slot, r10bio_t *r10_bio)
243 r10_bio->devs[slot].addr + (r10_bio->sectors); 243 r10_bio->devs[slot].addr + (r10_bio->sectors);
244} 244}
245 245
246static int raid10_end_read_request(struct bio *bio, unsigned int bytes_done, int error) 246static void raid10_end_read_request(struct bio *bio, int error)
247{ 247{
248 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 248 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
249 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); 249 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
250 int slot, dev; 250 int slot, dev;
251 conf_t *conf = mddev_to_conf(r10_bio->mddev); 251 conf_t *conf = mddev_to_conf(r10_bio->mddev);
252 252
253 if (bio->bi_size)
254 return 1;
255 253
256 slot = r10_bio->read_slot; 254 slot = r10_bio->read_slot;
257 dev = r10_bio->devs[slot].devnum; 255 dev = r10_bio->devs[slot].devnum;
@@ -284,19 +282,15 @@ static int raid10_end_read_request(struct bio *bio, unsigned int bytes_done, int
284 } 282 }
285 283
286 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); 284 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
287 return 0;
288} 285}
289 286
290static int raid10_end_write_request(struct bio *bio, unsigned int bytes_done, int error) 287static void raid10_end_write_request(struct bio *bio, int error)
291{ 288{
292 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 289 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
293 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); 290 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
294 int slot, dev; 291 int slot, dev;
295 conf_t *conf = mddev_to_conf(r10_bio->mddev); 292 conf_t *conf = mddev_to_conf(r10_bio->mddev);
296 293
297 if (bio->bi_size)
298 return 1;
299
300 for (slot = 0; slot < conf->copies; slot++) 294 for (slot = 0; slot < conf->copies; slot++)
301 if (r10_bio->devs[slot].bio == bio) 295 if (r10_bio->devs[slot].bio == bio)
302 break; 296 break;
@@ -339,7 +333,6 @@ static int raid10_end_write_request(struct bio *bio, unsigned int bytes_done, in
339 } 333 }
340 334
341 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev); 335 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
342 return 0;
343} 336}
344 337
345 338
@@ -787,7 +780,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
787 unsigned long flags; 780 unsigned long flags;
788 781
789 if (unlikely(bio_barrier(bio))) { 782 if (unlikely(bio_barrier(bio))) {
790 bio_endio(bio, bio->bi_size, -EOPNOTSUPP); 783 bio_endio(bio, -EOPNOTSUPP);
791 return 0; 784 return 0;
792 } 785 }
793 786
@@ -819,7 +812,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
819 " or bigger than %dk %llu %d\n", chunk_sects/2, 812 " or bigger than %dk %llu %d\n", chunk_sects/2,
820 (unsigned long long)bio->bi_sector, bio->bi_size >> 10); 813 (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
821 814
822 bio_io_error(bio, bio->bi_size); 815 bio_io_error(bio);
823 return 0; 816 return 0;
824 } 817 }
825 818
@@ -1155,15 +1148,12 @@ abort:
1155} 1148}
1156 1149
1157 1150
1158static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error) 1151static void end_sync_read(struct bio *bio, int error)
1159{ 1152{
1160 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); 1153 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
1161 conf_t *conf = mddev_to_conf(r10_bio->mddev); 1154 conf_t *conf = mddev_to_conf(r10_bio->mddev);
1162 int i,d; 1155 int i,d;
1163 1156
1164 if (bio->bi_size)
1165 return 1;
1166
1167 for (i=0; i<conf->copies; i++) 1157 for (i=0; i<conf->copies; i++)
1168 if (r10_bio->devs[i].bio == bio) 1158 if (r10_bio->devs[i].bio == bio)
1169 break; 1159 break;
@@ -1192,10 +1182,9 @@ static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
1192 reschedule_retry(r10_bio); 1182 reschedule_retry(r10_bio);
1193 } 1183 }
1194 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev); 1184 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
1195 return 0;
1196} 1185}
1197 1186
1198static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error) 1187static void end_sync_write(struct bio *bio, int error)
1199{ 1188{
1200 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1189 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1201 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); 1190 r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
@@ -1203,9 +1192,6 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
1203 conf_t *conf = mddev_to_conf(mddev); 1192 conf_t *conf = mddev_to_conf(mddev);
1204 int i,d; 1193 int i,d;
1205 1194
1206 if (bio->bi_size)
1207 return 1;
1208
1209 for (i = 0; i < conf->copies; i++) 1195 for (i = 0; i < conf->copies; i++)
1210 if (r10_bio->devs[i].bio == bio) 1196 if (r10_bio->devs[i].bio == bio)
1211 break; 1197 break;
@@ -1228,7 +1214,6 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
1228 } 1214 }
1229 } 1215 }
1230 rdev_dec_pending(conf->mirrors[d].rdev, mddev); 1216 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1231 return 0;
1232} 1217}
1233 1218
1234/* 1219/*
@@ -1374,7 +1359,7 @@ static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1374 if (test_bit(R10BIO_Uptodate, &r10_bio->state)) 1359 if (test_bit(R10BIO_Uptodate, &r10_bio->state))
1375 generic_make_request(wbio); 1360 generic_make_request(wbio);
1376 else 1361 else
1377 bio_endio(wbio, wbio->bi_size, -EIO); 1362 bio_endio(wbio, -EIO);
1378} 1363}
1379 1364
1380 1365
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f96dea975fa5..caaca9e178bc 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -108,12 +108,11 @@ static void return_io(struct bio *return_bi)
108{ 108{
109 struct bio *bi = return_bi; 109 struct bio *bi = return_bi;
110 while (bi) { 110 while (bi) {
111 int bytes = bi->bi_size;
112 111
113 return_bi = bi->bi_next; 112 return_bi = bi->bi_next;
114 bi->bi_next = NULL; 113 bi->bi_next = NULL;
115 bi->bi_size = 0; 114 bi->bi_size = 0;
116 bi->bi_end_io(bi, bytes, 115 bi->bi_end_io(bi,
117 test_bit(BIO_UPTODATE, &bi->bi_flags) 116 test_bit(BIO_UPTODATE, &bi->bi_flags)
118 ? 0 : -EIO); 117 ? 0 : -EIO);
119 bi = return_bi; 118 bi = return_bi;
@@ -382,10 +381,10 @@ static unsigned long get_stripe_work(struct stripe_head *sh)
382 return pending; 381 return pending;
383} 382}
384 383
385static int 384static void
386raid5_end_read_request(struct bio *bi, unsigned int bytes_done, int error); 385raid5_end_read_request(struct bio *bi, int error);
387static int 386static void
388raid5_end_write_request (struct bio *bi, unsigned int bytes_done, int error); 387raid5_end_write_request(struct bio *bi, int error);
389 388
390static void ops_run_io(struct stripe_head *sh) 389static void ops_run_io(struct stripe_head *sh)
391{ 390{
@@ -1110,8 +1109,7 @@ static void shrink_stripes(raid5_conf_t *conf)
1110 conf->slab_cache = NULL; 1109 conf->slab_cache = NULL;
1111} 1110}
1112 1111
1113static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done, 1112static void raid5_end_read_request(struct bio * bi, int error)
1114 int error)
1115{ 1113{
1116 struct stripe_head *sh = bi->bi_private; 1114 struct stripe_head *sh = bi->bi_private;
1117 raid5_conf_t *conf = sh->raid_conf; 1115 raid5_conf_t *conf = sh->raid_conf;
@@ -1120,8 +1118,6 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
1120 char b[BDEVNAME_SIZE]; 1118 char b[BDEVNAME_SIZE];
1121 mdk_rdev_t *rdev; 1119 mdk_rdev_t *rdev;
1122 1120
1123 if (bi->bi_size)
1124 return 1;
1125 1121
1126 for (i=0 ; i<disks; i++) 1122 for (i=0 ; i<disks; i++)
1127 if (bi == &sh->dev[i].req) 1123 if (bi == &sh->dev[i].req)
@@ -1132,7 +1128,7 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
1132 uptodate); 1128 uptodate);
1133 if (i == disks) { 1129 if (i == disks) {
1134 BUG(); 1130 BUG();
1135 return 0; 1131 return;
1136 } 1132 }
1137 1133
1138 if (uptodate) { 1134 if (uptodate) {
@@ -1185,20 +1181,15 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
1185 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1181 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1186 set_bit(STRIPE_HANDLE, &sh->state); 1182 set_bit(STRIPE_HANDLE, &sh->state);
1187 release_stripe(sh); 1183 release_stripe(sh);
1188 return 0;
1189} 1184}
1190 1185
1191static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done, 1186static void raid5_end_write_request (struct bio *bi, int error)
1192 int error)
1193{ 1187{
1194 struct stripe_head *sh = bi->bi_private; 1188 struct stripe_head *sh = bi->bi_private;
1195 raid5_conf_t *conf = sh->raid_conf; 1189 raid5_conf_t *conf = sh->raid_conf;
1196 int disks = sh->disks, i; 1190 int disks = sh->disks, i;
1197 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 1191 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
1198 1192
1199 if (bi->bi_size)
1200 return 1;
1201
1202 for (i=0 ; i<disks; i++) 1193 for (i=0 ; i<disks; i++)
1203 if (bi == &sh->dev[i].req) 1194 if (bi == &sh->dev[i].req)
1204 break; 1195 break;
@@ -1208,7 +1199,7 @@ static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
1208 uptodate); 1199 uptodate);
1209 if (i == disks) { 1200 if (i == disks) {
1210 BUG(); 1201 BUG();
1211 return 0; 1202 return;
1212 } 1203 }
1213 1204
1214 if (!uptodate) 1205 if (!uptodate)
@@ -1219,7 +1210,6 @@ static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
1219 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1210 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1220 set_bit(STRIPE_HANDLE, &sh->state); 1211 set_bit(STRIPE_HANDLE, &sh->state);
1221 release_stripe(sh); 1212 release_stripe(sh);
1222 return 0;
1223} 1213}
1224 1214
1225 1215
@@ -3340,7 +3330,7 @@ static struct bio *remove_bio_from_retry(raid5_conf_t *conf)
3340 * first). 3330 * first).
3341 * If the read failed.. 3331 * If the read failed..
3342 */ 3332 */
3343static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error) 3333static void raid5_align_endio(struct bio *bi, int error)
3344{ 3334{
3345 struct bio* raid_bi = bi->bi_private; 3335 struct bio* raid_bi = bi->bi_private;
3346 mddev_t *mddev; 3336 mddev_t *mddev;
@@ -3348,8 +3338,6 @@ static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
3348 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 3338 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
3349 mdk_rdev_t *rdev; 3339 mdk_rdev_t *rdev;
3350 3340
3351 if (bi->bi_size)
3352 return 1;
3353 bio_put(bi); 3341 bio_put(bi);
3354 3342
3355 mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata; 3343 mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata;
@@ -3360,17 +3348,16 @@ static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
3360 rdev_dec_pending(rdev, conf->mddev); 3348 rdev_dec_pending(rdev, conf->mddev);
3361 3349
3362 if (!error && uptodate) { 3350 if (!error && uptodate) {
3363 bio_endio(raid_bi, bytes, 0); 3351 bio_endio(raid_bi, 0);
3364 if (atomic_dec_and_test(&conf->active_aligned_reads)) 3352 if (atomic_dec_and_test(&conf->active_aligned_reads))
3365 wake_up(&conf->wait_for_stripe); 3353 wake_up(&conf->wait_for_stripe);
3366 return 0; 3354 return;
3367 } 3355 }
3368 3356
3369 3357
3370 pr_debug("raid5_align_endio : io error...handing IO for a retry\n"); 3358 pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
3371 3359
3372 add_bio_to_retry(raid_bi, conf); 3360 add_bio_to_retry(raid_bi, conf);
3373 return 0;
3374} 3361}
3375 3362
3376static int bio_fits_rdev(struct bio *bi) 3363static int bio_fits_rdev(struct bio *bi)
@@ -3476,7 +3463,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
3476 int remaining; 3463 int remaining;
3477 3464
3478 if (unlikely(bio_barrier(bi))) { 3465 if (unlikely(bio_barrier(bi))) {
3479 bio_endio(bi, bi->bi_size, -EOPNOTSUPP); 3466 bio_endio(bi, -EOPNOTSUPP);
3480 return 0; 3467 return 0;
3481 } 3468 }
3482 3469
@@ -3592,12 +3579,11 @@ static int make_request(struct request_queue *q, struct bio * bi)
3592 remaining = --bi->bi_phys_segments; 3579 remaining = --bi->bi_phys_segments;
3593 spin_unlock_irq(&conf->device_lock); 3580 spin_unlock_irq(&conf->device_lock);
3594 if (remaining == 0) { 3581 if (remaining == 0) {
3595 int bytes = bi->bi_size;
3596 3582
3597 if ( rw == WRITE ) 3583 if ( rw == WRITE )
3598 md_write_end(mddev); 3584 md_write_end(mddev);
3599 bi->bi_size = 0; 3585
3600 bi->bi_end_io(bi, bytes, 3586 bi->bi_end_io(bi,
3601 test_bit(BIO_UPTODATE, &bi->bi_flags) 3587 test_bit(BIO_UPTODATE, &bi->bi_flags)
3602 ? 0 : -EIO); 3588 ? 0 : -EIO);
3603 } 3589 }
@@ -3875,10 +3861,8 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
3875 remaining = --raid_bio->bi_phys_segments; 3861 remaining = --raid_bio->bi_phys_segments;
3876 spin_unlock_irq(&conf->device_lock); 3862 spin_unlock_irq(&conf->device_lock);
3877 if (remaining == 0) { 3863 if (remaining == 0) {
3878 int bytes = raid_bio->bi_size;
3879 3864
3880 raid_bio->bi_size = 0; 3865 raid_bio->bi_end_io(raid_bio,
3881 raid_bio->bi_end_io(raid_bio, bytes,
3882 test_bit(BIO_UPTODATE, &raid_bio->bi_flags) 3866 test_bit(BIO_UPTODATE, &raid_bio->bi_flags)
3883 ? 0 : -EIO); 3867 ? 0 : -EIO);
3884 } 3868 }
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index d32c60dbdd82..571320ab9e1a 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -472,14 +472,13 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
472 struct dasd_ccw_req *cqr; 472 struct dasd_ccw_req *cqr;
473 struct dasd_diag_req *dreq; 473 struct dasd_diag_req *dreq;
474 struct dasd_diag_bio *dbio; 474 struct dasd_diag_bio *dbio;
475 struct bio *bio; 475 struct req_iterator iter;
476 struct bio_vec *bv; 476 struct bio_vec *bv;
477 char *dst; 477 char *dst;
478 unsigned int count, datasize; 478 unsigned int count, datasize;
479 sector_t recid, first_rec, last_rec; 479 sector_t recid, first_rec, last_rec;
480 unsigned int blksize, off; 480 unsigned int blksize, off;
481 unsigned char rw_cmd; 481 unsigned char rw_cmd;
482 int i;
483 482
484 if (rq_data_dir(req) == READ) 483 if (rq_data_dir(req) == READ)
485 rw_cmd = MDSK_READ_REQ; 484 rw_cmd = MDSK_READ_REQ;
@@ -493,13 +492,11 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
493 last_rec = (req->sector + req->nr_sectors - 1) >> device->s2b_shift; 492 last_rec = (req->sector + req->nr_sectors - 1) >> device->s2b_shift;
494 /* Check struct bio and count the number of blocks for the request. */ 493 /* Check struct bio and count the number of blocks for the request. */
495 count = 0; 494 count = 0;
496 rq_for_each_bio(bio, req) { 495 rq_for_each_segment(bv, req, iter) {
497 bio_for_each_segment(bv, bio, i) { 496 if (bv->bv_len & (blksize - 1))
498 if (bv->bv_len & (blksize - 1)) 497 /* Fba can only do full blocks. */
499 /* Fba can only do full blocks. */ 498 return ERR_PTR(-EINVAL);
500 return ERR_PTR(-EINVAL); 499 count += bv->bv_len >> (device->s2b_shift + 9);
501 count += bv->bv_len >> (device->s2b_shift + 9);
502 }
503 } 500 }
504 /* Paranoia. */ 501 /* Paranoia. */
505 if (count != last_rec - first_rec + 1) 502 if (count != last_rec - first_rec + 1)
@@ -516,18 +513,16 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
516 dreq->block_count = count; 513 dreq->block_count = count;
517 dbio = dreq->bio; 514 dbio = dreq->bio;
518 recid = first_rec; 515 recid = first_rec;
519 rq_for_each_bio(bio, req) { 516 rq_for_each_segment(bv, req, iter) {
520 bio_for_each_segment(bv, bio, i) { 517 dst = page_address(bv->bv_page) + bv->bv_offset;
521 dst = page_address(bv->bv_page) + bv->bv_offset; 518 for (off = 0; off < bv->bv_len; off += blksize) {
522 for (off = 0; off < bv->bv_len; off += blksize) { 519 memset(dbio, 0, sizeof (struct dasd_diag_bio));
523 memset(dbio, 0, sizeof (struct dasd_diag_bio)); 520 dbio->type = rw_cmd;
524 dbio->type = rw_cmd; 521 dbio->block_number = recid + 1;
525 dbio->block_number = recid + 1; 522 dbio->buffer = dst;
526 dbio->buffer = dst; 523 dbio++;
527 dbio++; 524 dst += blksize;
528 dst += blksize; 525 recid++;
529 recid++;
530 }
531 } 526 }
532 } 527 }
533 cqr->retries = DIAG_MAX_RETRIES; 528 cqr->retries = DIAG_MAX_RETRIES;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index ea63ba7828f9..44adf8496bda 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1176,7 +1176,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1176 struct LO_eckd_data *LO_data; 1176 struct LO_eckd_data *LO_data;
1177 struct dasd_ccw_req *cqr; 1177 struct dasd_ccw_req *cqr;
1178 struct ccw1 *ccw; 1178 struct ccw1 *ccw;
1179 struct bio *bio; 1179 struct req_iterator iter;
1180 struct bio_vec *bv; 1180 struct bio_vec *bv;
1181 char *dst; 1181 char *dst;
1182 unsigned int blksize, blk_per_trk, off; 1182 unsigned int blksize, blk_per_trk, off;
@@ -1185,7 +1185,6 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1185 sector_t first_trk, last_trk; 1185 sector_t first_trk, last_trk;
1186 unsigned int first_offs, last_offs; 1186 unsigned int first_offs, last_offs;
1187 unsigned char cmd, rcmd; 1187 unsigned char cmd, rcmd;
1188 int i;
1189 1188
1190 private = (struct dasd_eckd_private *) device->private; 1189 private = (struct dasd_eckd_private *) device->private;
1191 if (rq_data_dir(req) == READ) 1190 if (rq_data_dir(req) == READ)
@@ -1206,18 +1205,15 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1206 /* Check struct bio and count the number of blocks for the request. */ 1205 /* Check struct bio and count the number of blocks for the request. */
1207 count = 0; 1206 count = 0;
1208 cidaw = 0; 1207 cidaw = 0;
1209 rq_for_each_bio(bio, req) { 1208 rq_for_each_segment(bv, req, iter) {
1210 bio_for_each_segment(bv, bio, i) { 1209 if (bv->bv_len & (blksize - 1))
1211 if (bv->bv_len & (blksize - 1)) 1210 /* Eckd can only do full blocks. */
1212 /* Eckd can only do full blocks. */ 1211 return ERR_PTR(-EINVAL);
1213 return ERR_PTR(-EINVAL); 1212 count += bv->bv_len >> (device->s2b_shift + 9);
1214 count += bv->bv_len >> (device->s2b_shift + 9);
1215#if defined(CONFIG_64BIT) 1213#if defined(CONFIG_64BIT)
1216 if (idal_is_needed (page_address(bv->bv_page), 1214 if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
1217 bv->bv_len)) 1215 cidaw += bv->bv_len >> (device->s2b_shift + 9);
1218 cidaw += bv->bv_len >> (device->s2b_shift + 9);
1219#endif 1216#endif
1220 }
1221 } 1217 }
1222 /* Paranoia. */ 1218 /* Paranoia. */
1223 if (count != last_rec - first_rec + 1) 1219 if (count != last_rec - first_rec + 1)
@@ -1257,7 +1253,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1257 locate_record(ccw++, LO_data++, first_trk, first_offs + 1, 1253 locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
1258 last_rec - recid + 1, cmd, device, blksize); 1254 last_rec - recid + 1, cmd, device, blksize);
1259 } 1255 }
1260 rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) { 1256 rq_for_each_segment(bv, req, iter) {
1261 dst = page_address(bv->bv_page) + bv->bv_offset; 1257 dst = page_address(bv->bv_page) + bv->bv_offset;
1262 if (dasd_page_cache) { 1258 if (dasd_page_cache) {
1263 char *copy = kmem_cache_alloc(dasd_page_cache, 1259 char *copy = kmem_cache_alloc(dasd_page_cache,
@@ -1328,12 +1324,12 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
1328{ 1324{
1329 struct dasd_eckd_private *private; 1325 struct dasd_eckd_private *private;
1330 struct ccw1 *ccw; 1326 struct ccw1 *ccw;
1331 struct bio *bio; 1327 struct req_iterator iter;
1332 struct bio_vec *bv; 1328 struct bio_vec *bv;
1333 char *dst, *cda; 1329 char *dst, *cda;
1334 unsigned int blksize, blk_per_trk, off; 1330 unsigned int blksize, blk_per_trk, off;
1335 sector_t recid; 1331 sector_t recid;
1336 int i, status; 1332 int status;
1337 1333
1338 if (!dasd_page_cache) 1334 if (!dasd_page_cache)
1339 goto out; 1335 goto out;
@@ -1346,7 +1342,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
1346 ccw++; 1342 ccw++;
1347 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) 1343 if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
1348 ccw++; 1344 ccw++;
1349 rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) { 1345 rq_for_each_segment(bv, req, iter) {
1350 dst = page_address(bv->bv_page) + bv->bv_offset; 1346 dst = page_address(bv->bv_page) + bv->bv_offset;
1351 for (off = 0; off < bv->bv_len; off += blksize) { 1347 for (off = 0; off < bv->bv_len; off += blksize) {
1352 /* Skip locate record. */ 1348 /* Skip locate record. */
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index da16ead8aff2..1d95822e0b8e 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -234,14 +234,13 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
234 struct LO_fba_data *LO_data; 234 struct LO_fba_data *LO_data;
235 struct dasd_ccw_req *cqr; 235 struct dasd_ccw_req *cqr;
236 struct ccw1 *ccw; 236 struct ccw1 *ccw;
237 struct bio *bio; 237 struct req_iterator iter;
238 struct bio_vec *bv; 238 struct bio_vec *bv;
239 char *dst; 239 char *dst;
240 int count, cidaw, cplength, datasize; 240 int count, cidaw, cplength, datasize;
241 sector_t recid, first_rec, last_rec; 241 sector_t recid, first_rec, last_rec;
242 unsigned int blksize, off; 242 unsigned int blksize, off;
243 unsigned char cmd; 243 unsigned char cmd;
244 int i;
245 244
246 private = (struct dasd_fba_private *) device->private; 245 private = (struct dasd_fba_private *) device->private;
247 if (rq_data_dir(req) == READ) { 246 if (rq_data_dir(req) == READ) {
@@ -257,18 +256,15 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
257 /* Check struct bio and count the number of blocks for the request. */ 256 /* Check struct bio and count the number of blocks for the request. */
258 count = 0; 257 count = 0;
259 cidaw = 0; 258 cidaw = 0;
260 rq_for_each_bio(bio, req) { 259 rq_for_each_segment(bv, req, iter) {
261 bio_for_each_segment(bv, bio, i) { 260 if (bv->bv_len & (blksize - 1))
262 if (bv->bv_len & (blksize - 1)) 261 /* Fba can only do full blocks. */
263 /* Fba can only do full blocks. */ 262 return ERR_PTR(-EINVAL);
264 return ERR_PTR(-EINVAL); 263 count += bv->bv_len >> (device->s2b_shift + 9);
265 count += bv->bv_len >> (device->s2b_shift + 9);
266#if defined(CONFIG_64BIT) 264#if defined(CONFIG_64BIT)
267 if (idal_is_needed (page_address(bv->bv_page), 265 if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
268 bv->bv_len)) 266 cidaw += bv->bv_len / blksize;
269 cidaw += bv->bv_len / blksize;
270#endif 267#endif
271 }
272 } 268 }
273 /* Paranoia. */ 269 /* Paranoia. */
274 if (count != last_rec - first_rec + 1) 270 if (count != last_rec - first_rec + 1)
@@ -304,7 +300,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
304 locate_record(ccw++, LO_data++, rq_data_dir(req), 0, count); 300 locate_record(ccw++, LO_data++, rq_data_dir(req), 0, count);
305 } 301 }
306 recid = first_rec; 302 recid = first_rec;
307 rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) { 303 rq_for_each_segment(bv, req, iter) {
308 dst = page_address(bv->bv_page) + bv->bv_offset; 304 dst = page_address(bv->bv_page) + bv->bv_offset;
309 if (dasd_page_cache) { 305 if (dasd_page_cache) {
310 char *copy = kmem_cache_alloc(dasd_page_cache, 306 char *copy = kmem_cache_alloc(dasd_page_cache,
@@ -359,11 +355,11 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
359{ 355{
360 struct dasd_fba_private *private; 356 struct dasd_fba_private *private;
361 struct ccw1 *ccw; 357 struct ccw1 *ccw;
362 struct bio *bio; 358 struct req_iterator iter;
363 struct bio_vec *bv; 359 struct bio_vec *bv;
364 char *dst, *cda; 360 char *dst, *cda;
365 unsigned int blksize, off; 361 unsigned int blksize, off;
366 int i, status; 362 int status;
367 363
368 if (!dasd_page_cache) 364 if (!dasd_page_cache)
369 goto out; 365 goto out;
@@ -374,7 +370,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
374 ccw++; 370 ccw++;
375 if (private->rdc_data.mode.bits.data_chain != 0) 371 if (private->rdc_data.mode.bits.data_chain != 0)
376 ccw++; 372 ccw++;
377 rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) { 373 rq_for_each_segment(bv, req, iter) {
378 dst = page_address(bv->bv_page) + bv->bv_offset; 374 dst = page_address(bv->bv_page) + bv->bv_offset;
379 for (off = 0; off < bv->bv_len; off += blksize) { 375 for (off = 0; off < bv->bv_len; off += blksize) {
380 /* Skip locate record. */ 376 /* Skip locate record. */
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 4d8798bacf97..859f870552e3 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -674,10 +674,10 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
674 } 674 }
675 bytes_done += bvec->bv_len; 675 bytes_done += bvec->bv_len;
676 } 676 }
677 bio_endio(bio, bytes_done, 0); 677 bio_endio(bio, 0);
678 return 0; 678 return 0;
679fail: 679fail:
680 bio_io_error(bio, bio->bi_size); 680 bio_io_error(bio);
681 return 0; 681 return 0;
682} 682}
683 683
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 354a060e5bec..0fbacc8b1063 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -230,12 +230,10 @@ static int xpram_make_request(struct request_queue *q, struct bio *bio)
230 } 230 }
231 } 231 }
232 set_bit(BIO_UPTODATE, &bio->bi_flags); 232 set_bit(BIO_UPTODATE, &bio->bi_flags);
233 bytes = bio->bi_size; 233 bio_end_io(bio, 0);
234 bio->bi_size = 0;
235 bio->bi_end_io(bio, bytes, 0);
236 return 0; 234 return 0;
237fail: 235fail:
238 bio_io_error(bio, bio->bi_size); 236 bio_io_error(bio);
239 return 0; 237 return 0;
240} 238}
241 239
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 80e7a537e7d2..5b47e9cce75f 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -1134,21 +1134,18 @@ tape_34xx_bread(struct tape_device *device, struct request *req)
1134{ 1134{
1135 struct tape_request *request; 1135 struct tape_request *request;
1136 struct ccw1 *ccw; 1136 struct ccw1 *ccw;
1137 int count = 0, i; 1137 int count = 0;
1138 unsigned off; 1138 unsigned off;
1139 char *dst; 1139 char *dst;
1140 struct bio_vec *bv; 1140 struct bio_vec *bv;
1141 struct bio *bio; 1141 struct req_iterator iter;
1142 struct tape_34xx_block_id * start_block; 1142 struct tape_34xx_block_id * start_block;
1143 1143
1144 DBF_EVENT(6, "xBREDid:"); 1144 DBF_EVENT(6, "xBREDid:");
1145 1145
1146 /* Count the number of blocks for the request. */ 1146 /* Count the number of blocks for the request. */
1147 rq_for_each_bio(bio, req) { 1147 rq_for_each_segment(bv, req, iter)
1148 bio_for_each_segment(bv, bio, i) { 1148 count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
1149 count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
1150 }
1151 }
1152 1149
1153 /* Allocate the ccw request. */ 1150 /* Allocate the ccw request. */
1154 request = tape_alloc_request(3+count+1, 8); 1151 request = tape_alloc_request(3+count+1, 8);
@@ -1175,18 +1172,15 @@ tape_34xx_bread(struct tape_device *device, struct request *req)
1175 ccw = tape_ccw_cc(ccw, NOP, 0, NULL); 1172 ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
1176 ccw = tape_ccw_cc(ccw, NOP, 0, NULL); 1173 ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
1177 1174
1178 rq_for_each_bio(bio, req) { 1175 rq_for_each_segment(bv, req, iter) {
1179 bio_for_each_segment(bv, bio, i) { 1176 dst = kmap(bv->bv_page) + bv->bv_offset;
1180 dst = kmap(bv->bv_page) + bv->bv_offset; 1177 for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) {
1181 for (off = 0; off < bv->bv_len; 1178 ccw->flags = CCW_FLAG_CC;
1182 off += TAPEBLOCK_HSEC_SIZE) { 1179 ccw->cmd_code = READ_FORWARD;
1183 ccw->flags = CCW_FLAG_CC; 1180 ccw->count = TAPEBLOCK_HSEC_SIZE;
1184 ccw->cmd_code = READ_FORWARD; 1181 set_normalized_cda(ccw, (void*) __pa(dst));
1185 ccw->count = TAPEBLOCK_HSEC_SIZE; 1182 ccw++;
1186 set_normalized_cda(ccw, (void*) __pa(dst)); 1183 dst += TAPEBLOCK_HSEC_SIZE;
1187 ccw++;
1188 dst += TAPEBLOCK_HSEC_SIZE;
1189 }
1190 } 1184 }
1191 } 1185 }
1192 1186
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 7e2b2ab49264..9f244c591eeb 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -623,21 +623,19 @@ tape_3590_bread(struct tape_device *device, struct request *req)
623{ 623{
624 struct tape_request *request; 624 struct tape_request *request;
625 struct ccw1 *ccw; 625 struct ccw1 *ccw;
626 int count = 0, start_block, i; 626 int count = 0, start_block;
627 unsigned off; 627 unsigned off;
628 char *dst; 628 char *dst;
629 struct bio_vec *bv; 629 struct bio_vec *bv;
630 struct bio *bio; 630 struct req_iterator iter;
631 631
632 DBF_EVENT(6, "xBREDid:"); 632 DBF_EVENT(6, "xBREDid:");
633 start_block = req->sector >> TAPEBLOCK_HSEC_S2B; 633 start_block = req->sector >> TAPEBLOCK_HSEC_S2B;
634 DBF_EVENT(6, "start_block = %i\n", start_block); 634 DBF_EVENT(6, "start_block = %i\n", start_block);
635 635
636 rq_for_each_bio(bio, req) { 636 rq_for_each_segment(bv, req, iter)
637 bio_for_each_segment(bv, bio, i) { 637 count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
638 count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9); 638
639 }
640 }
641 request = tape_alloc_request(2 + count + 1, 4); 639 request = tape_alloc_request(2 + count + 1, 4);
642 if (IS_ERR(request)) 640 if (IS_ERR(request))
643 return request; 641 return request;
@@ -653,21 +651,18 @@ tape_3590_bread(struct tape_device *device, struct request *req)
653 */ 651 */
654 ccw = tape_ccw_cc(ccw, NOP, 0, NULL); 652 ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
655 653
656 rq_for_each_bio(bio, req) { 654 rq_for_each_segment(bv, req, iter) {
657 bio_for_each_segment(bv, bio, i) { 655 dst = page_address(bv->bv_page) + bv->bv_offset;
658 dst = page_address(bv->bv_page) + bv->bv_offset; 656 for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) {
659 for (off = 0; off < bv->bv_len; 657 ccw->flags = CCW_FLAG_CC;
660 off += TAPEBLOCK_HSEC_SIZE) { 658 ccw->cmd_code = READ_FORWARD;
661 ccw->flags = CCW_FLAG_CC; 659 ccw->count = TAPEBLOCK_HSEC_SIZE;
662 ccw->cmd_code = READ_FORWARD; 660 set_normalized_cda(ccw, (void *) __pa(dst));
663 ccw->count = TAPEBLOCK_HSEC_SIZE; 661 ccw++;
664 set_normalized_cda(ccw, (void *) __pa(dst)); 662 dst += TAPEBLOCK_HSEC_SIZE;
665 ccw++;
666 dst += TAPEBLOCK_HSEC_SIZE;
667 }
668 if (off > bv->bv_len)
669 BUG();
670 } 663 }
664 if (off > bv->bv_len)
665 BUG();
671 } 666 }
672 ccw = tape_ccw_end(ccw, NOP, 0, NULL); 667 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
673 DBF_EVENT(6, "xBREDccwg\n"); 668 DBF_EVENT(6, "xBREDccwg\n");
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index a417a6ff9f97..604f4d717933 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -263,25 +263,12 @@ static int scsi_merge_bio(struct request *rq, struct bio *bio)
263 bio->bi_rw |= (1 << BIO_RW); 263 bio->bi_rw |= (1 << BIO_RW);
264 blk_queue_bounce(q, &bio); 264 blk_queue_bounce(q, &bio);
265 265
266 if (!rq->bio) 266 return blk_rq_append_bio(q, rq, bio);
267 blk_rq_bio_prep(q, rq, bio);
268 else if (!ll_back_merge_fn(q, rq, bio))
269 return -EINVAL;
270 else {
271 rq->biotail->bi_next = bio;
272 rq->biotail = bio;
273 }
274
275 return 0;
276} 267}
277 268
278static int scsi_bi_endio(struct bio *bio, unsigned int bytes_done, int error) 269static void scsi_bi_endio(struct bio *bio, int error)
279{ 270{
280 if (bio->bi_size)
281 return 1;
282
283 bio_put(bio); 271 bio_put(bio);
284 return 0;
285} 272}
286 273
287/** 274/**
@@ -337,7 +324,7 @@ static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
337 if (bio->bi_vcnt >= nr_vecs) { 324 if (bio->bi_vcnt >= nr_vecs) {
338 err = scsi_merge_bio(rq, bio); 325 err = scsi_merge_bio(rq, bio);
339 if (err) { 326 if (err) {
340 bio_endio(bio, bio->bi_size, 0); 327 bio_endio(bio, 0);
341 goto free_bios; 328 goto free_bios;
342 } 329 }
343 bio = NULL; 330 bio = NULL;
@@ -359,7 +346,7 @@ free_bios:
359 /* 346 /*
360 * call endio instead of bio_put incase it was bounced 347 * call endio instead of bio_put incase it was bounced
361 */ 348 */
362 bio_endio(bio, bio->bi_size, 0); 349 bio_endio(bio, 0);
363 } 350 }
364 351
365 return err; 352 return err;
diff --git a/fs/bio.c b/fs/bio.c
index 29a44c1b64c6..5f604f269dfa 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -798,13 +798,9 @@ void bio_unmap_user(struct bio *bio)
798 bio_put(bio); 798 bio_put(bio);
799} 799}
800 800
801static int bio_map_kern_endio(struct bio *bio, unsigned int bytes_done, int err) 801static void bio_map_kern_endio(struct bio *bio, int err)
802{ 802{
803 if (bio->bi_size)
804 return 1;
805
806 bio_put(bio); 803 bio_put(bio);
807 return 0;
808} 804}
809 805
810 806
@@ -1002,34 +998,26 @@ void bio_check_pages_dirty(struct bio *bio)
1002/** 998/**
1003 * bio_endio - end I/O on a bio 999 * bio_endio - end I/O on a bio
1004 * @bio: bio 1000 * @bio: bio
1005 * @bytes_done: number of bytes completed
1006 * @error: error, if any 1001 * @error: error, if any
1007 * 1002 *
1008 * Description: 1003 * Description:
1009 * bio_endio() will end I/O on @bytes_done number of bytes. This may be 1004 * bio_endio() will end I/O on the whole bio. bio_endio() is the
1010 * just a partial part of the bio, or it may be the whole bio. bio_endio() 1005 * preferred way to end I/O on a bio, it takes care of clearing
1011 * is the preferred way to end I/O on a bio, it takes care of decrementing 1006 * BIO_UPTODATE on error. @error is 0 on success, and and one of the
1012 * bi_size and clearing BIO_UPTODATE on error. @error is 0 on success, and 1007 * established -Exxxx (-EIO, for instance) error values in case
1013 * and one of the established -Exxxx (-EIO, for instance) error values in 1008 * something went wrong. Noone should call bi_end_io() directly on a
1014 * case something went wrong. Noone should call bi_end_io() directly on 1009 * bio unless they own it and thus know that it has an end_io
1015 * a bio unless they own it and thus know that it has an end_io function. 1010 * function.
1016 **/ 1011 **/
1017void bio_endio(struct bio *bio, unsigned int bytes_done, int error) 1012void bio_endio(struct bio *bio, int error)
1018{ 1013{
1019 if (error) 1014 if (error)
1020 clear_bit(BIO_UPTODATE, &bio->bi_flags); 1015 clear_bit(BIO_UPTODATE, &bio->bi_flags);
1021 1016 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1022 if (unlikely(bytes_done > bio->bi_size)) { 1017 error = -EIO;
1023 printk("%s: want %u bytes done, only %u left\n", __FUNCTION__,
1024 bytes_done, bio->bi_size);
1025 bytes_done = bio->bi_size;
1026 }
1027
1028 bio->bi_size -= bytes_done;
1029 bio->bi_sector += (bytes_done >> 9);
1030 1018
1031 if (bio->bi_end_io) 1019 if (bio->bi_end_io)
1032 bio->bi_end_io(bio, bytes_done, error); 1020 bio->bi_end_io(bio, error);
1033} 1021}
1034 1022
1035void bio_pair_release(struct bio_pair *bp) 1023void bio_pair_release(struct bio_pair *bp)
@@ -1037,37 +1025,29 @@ void bio_pair_release(struct bio_pair *bp)
1037 if (atomic_dec_and_test(&bp->cnt)) { 1025 if (atomic_dec_and_test(&bp->cnt)) {
1038 struct bio *master = bp->bio1.bi_private; 1026 struct bio *master = bp->bio1.bi_private;
1039 1027
1040 bio_endio(master, master->bi_size, bp->error); 1028 bio_endio(master, bp->error);
1041 mempool_free(bp, bp->bio2.bi_private); 1029 mempool_free(bp, bp->bio2.bi_private);
1042 } 1030 }
1043} 1031}
1044 1032
1045static int bio_pair_end_1(struct bio * bi, unsigned int done, int err) 1033static void bio_pair_end_1(struct bio *bi, int err)
1046{ 1034{
1047 struct bio_pair *bp = container_of(bi, struct bio_pair, bio1); 1035 struct bio_pair *bp = container_of(bi, struct bio_pair, bio1);
1048 1036
1049 if (err) 1037 if (err)
1050 bp->error = err; 1038 bp->error = err;
1051 1039
1052 if (bi->bi_size)
1053 return 1;
1054
1055 bio_pair_release(bp); 1040 bio_pair_release(bp);
1056 return 0;
1057} 1041}
1058 1042
1059static int bio_pair_end_2(struct bio * bi, unsigned int done, int err) 1043static void bio_pair_end_2(struct bio *bi, int err)
1060{ 1044{
1061 struct bio_pair *bp = container_of(bi, struct bio_pair, bio2); 1045 struct bio_pair *bp = container_of(bi, struct bio_pair, bio2);
1062 1046
1063 if (err) 1047 if (err)
1064 bp->error = err; 1048 bp->error = err;
1065 1049
1066 if (bi->bi_size)
1067 return 1;
1068
1069 bio_pair_release(bp); 1050 bio_pair_release(bp);
1070 return 0;
1071} 1051}
1072 1052
1073/* 1053/*
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 2980eabe5779..6339a30879b7 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -172,7 +172,7 @@ blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
172} 172}
173 173
174#if 0 174#if 0
175static int blk_end_aio(struct bio *bio, unsigned int bytes_done, int error) 175static void blk_end_aio(struct bio *bio, int error)
176{ 176{
177 struct kiocb *iocb = bio->bi_private; 177 struct kiocb *iocb = bio->bi_private;
178 atomic_t *bio_count = &iocb->ki_bio_count; 178 atomic_t *bio_count = &iocb->ki_bio_count;
diff --git a/fs/buffer.c b/fs/buffer.c
index 0e5ec371ce72..75b51dfa5e03 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2634,13 +2634,10 @@ sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2634 return tmp.b_blocknr; 2634 return tmp.b_blocknr;
2635} 2635}
2636 2636
2637static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err) 2637static void end_bio_bh_io_sync(struct bio *bio, int err)
2638{ 2638{
2639 struct buffer_head *bh = bio->bi_private; 2639 struct buffer_head *bh = bio->bi_private;
2640 2640
2641 if (bio->bi_size)
2642 return 1;
2643
2644 if (err == -EOPNOTSUPP) { 2641 if (err == -EOPNOTSUPP) {
2645 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); 2642 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2646 set_bit(BH_Eopnotsupp, &bh->b_state); 2643 set_bit(BH_Eopnotsupp, &bh->b_state);
@@ -2648,7 +2645,6 @@ static int end_bio_bh_io_sync(struct bio *bio, unsigned int bytes_done, int err)
2648 2645
2649 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags)); 2646 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2650 bio_put(bio); 2647 bio_put(bio);
2651 return 0;
2652} 2648}
2653 2649
2654int submit_bh(int rw, struct buffer_head * bh) 2650int submit_bh(int rw, struct buffer_head * bh)
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 37310b0e8107..b9e3357bcc2e 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -21,7 +21,6 @@
21#include <linux/if.h> 21#include <linux/if.h>
22#include <linux/if_bridge.h> 22#include <linux/if_bridge.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/hdreg.h>
25#include <linux/raid/md.h> 24#include <linux/raid/md.h>
26#include <linux/kd.h> 25#include <linux/kd.h>
27#include <linux/dirent.h> 26#include <linux/dirent.h>
@@ -33,12 +32,10 @@
33#include <linux/vt.h> 32#include <linux/vt.h>
34#include <linux/fs.h> 33#include <linux/fs.h>
35#include <linux/file.h> 34#include <linux/file.h>
36#include <linux/fd.h>
37#include <linux/ppp_defs.h> 35#include <linux/ppp_defs.h>
38#include <linux/if_ppp.h> 36#include <linux/if_ppp.h>
39#include <linux/if_pppox.h> 37#include <linux/if_pppox.h>
40#include <linux/mtio.h> 38#include <linux/mtio.h>
41#include <linux/cdrom.h>
42#include <linux/auto_fs.h> 39#include <linux/auto_fs.h>
43#include <linux/auto_fs4.h> 40#include <linux/auto_fs4.h>
44#include <linux/tty.h> 41#include <linux/tty.h>
@@ -48,7 +45,6 @@
48#include <linux/netdevice.h> 45#include <linux/netdevice.h>
49#include <linux/raw.h> 46#include <linux/raw.h>
50#include <linux/smb_fs.h> 47#include <linux/smb_fs.h>
51#include <linux/blkpg.h>
52#include <linux/blkdev.h> 48#include <linux/blkdev.h>
53#include <linux/elevator.h> 49#include <linux/elevator.h>
54#include <linux/rtc.h> 50#include <linux/rtc.h>
@@ -62,7 +58,6 @@
62#include <linux/i2c-dev.h> 58#include <linux/i2c-dev.h>
63#include <linux/wireless.h> 59#include <linux/wireless.h>
64#include <linux/atalk.h> 60#include <linux/atalk.h>
65#include <linux/blktrace_api.h>
66#include <linux/loop.h> 61#include <linux/loop.h>
67 62
68#include <net/bluetooth/bluetooth.h> 63#include <net/bluetooth/bluetooth.h>
@@ -668,53 +663,6 @@ out:
668#endif 663#endif
669 664
670#ifdef CONFIG_BLOCK 665#ifdef CONFIG_BLOCK
671struct hd_geometry32 {
672 unsigned char heads;
673 unsigned char sectors;
674 unsigned short cylinders;
675 u32 start;
676};
677
678static int hdio_getgeo(unsigned int fd, unsigned int cmd, unsigned long arg)
679{
680 mm_segment_t old_fs = get_fs();
681 struct hd_geometry geo;
682 struct hd_geometry32 __user *ugeo;
683 int err;
684
685 set_fs (KERNEL_DS);
686 err = sys_ioctl(fd, HDIO_GETGEO, (unsigned long)&geo);
687 set_fs (old_fs);
688 ugeo = compat_ptr(arg);
689 if (!err) {
690 err = copy_to_user (ugeo, &geo, 4);
691 err |= __put_user (geo.start, &ugeo->start);
692 if (err)
693 err = -EFAULT;
694 }
695 return err;
696}
697
698static int hdio_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
699{
700 mm_segment_t old_fs = get_fs();
701 unsigned long kval;
702 unsigned int __user *uvp;
703 int error;
704
705 set_fs(KERNEL_DS);
706 error = sys_ioctl(fd, cmd, (long)&kval);
707 set_fs(old_fs);
708
709 if(error == 0) {
710 uvp = compat_ptr(arg);
711 if(put_user(kval, uvp))
712 error = -EFAULT;
713 }
714 return error;
715}
716
717
718typedef struct sg_io_hdr32 { 666typedef struct sg_io_hdr32 {
719 compat_int_t interface_id; /* [i] 'S' for SCSI generic (required) */ 667 compat_int_t interface_id; /* [i] 'S' for SCSI generic (required) */
720 compat_int_t dxfer_direction; /* [i] data transfer direction */ 668 compat_int_t dxfer_direction; /* [i] data transfer direction */
@@ -1089,108 +1037,6 @@ static int mt_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
1089 return err ? -EFAULT: 0; 1037 return err ? -EFAULT: 0;
1090} 1038}
1091 1039
1092struct cdrom_read_audio32 {
1093 union cdrom_addr addr;
1094 u8 addr_format;
1095 compat_int_t nframes;
1096 compat_caddr_t buf;
1097};
1098
1099struct cdrom_generic_command32 {
1100 unsigned char cmd[CDROM_PACKET_SIZE];
1101 compat_caddr_t buffer;
1102 compat_uint_t buflen;
1103 compat_int_t stat;
1104 compat_caddr_t sense;
1105 unsigned char data_direction;
1106 compat_int_t quiet;
1107 compat_int_t timeout;
1108 compat_caddr_t reserved[1];
1109};
1110
1111static int cdrom_do_read_audio(unsigned int fd, unsigned int cmd, unsigned long arg)
1112{
1113 struct cdrom_read_audio __user *cdread_audio;
1114 struct cdrom_read_audio32 __user *cdread_audio32;
1115 __u32 data;
1116 void __user *datap;
1117
1118 cdread_audio = compat_alloc_user_space(sizeof(*cdread_audio));
1119 cdread_audio32 = compat_ptr(arg);
1120
1121 if (copy_in_user(&cdread_audio->addr,
1122 &cdread_audio32->addr,
1123 (sizeof(*cdread_audio32) -
1124 sizeof(compat_caddr_t))))
1125 return -EFAULT;
1126
1127 if (get_user(data, &cdread_audio32->buf))
1128 return -EFAULT;
1129 datap = compat_ptr(data);
1130 if (put_user(datap, &cdread_audio->buf))
1131 return -EFAULT;
1132
1133 return sys_ioctl(fd, cmd, (unsigned long) cdread_audio);
1134}
1135
1136static int cdrom_do_generic_command(unsigned int fd, unsigned int cmd, unsigned long arg)
1137{
1138 struct cdrom_generic_command __user *cgc;
1139 struct cdrom_generic_command32 __user *cgc32;
1140 u32 data;
1141 unsigned char dir;
1142 int itmp;
1143
1144 cgc = compat_alloc_user_space(sizeof(*cgc));
1145 cgc32 = compat_ptr(arg);
1146
1147 if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
1148 get_user(data, &cgc32->buffer) ||
1149 put_user(compat_ptr(data), &cgc->buffer) ||
1150 copy_in_user(&cgc->buflen, &cgc32->buflen,
1151 (sizeof(unsigned int) + sizeof(int))) ||
1152 get_user(data, &cgc32->sense) ||
1153 put_user(compat_ptr(data), &cgc->sense) ||
1154 get_user(dir, &cgc32->data_direction) ||
1155 put_user(dir, &cgc->data_direction) ||
1156 get_user(itmp, &cgc32->quiet) ||
1157 put_user(itmp, &cgc->quiet) ||
1158 get_user(itmp, &cgc32->timeout) ||
1159 put_user(itmp, &cgc->timeout) ||
1160 get_user(data, &cgc32->reserved[0]) ||
1161 put_user(compat_ptr(data), &cgc->reserved[0]))
1162 return -EFAULT;
1163
1164 return sys_ioctl(fd, cmd, (unsigned long) cgc);
1165}
1166
1167static int cdrom_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
1168{
1169 int err;
1170
1171 switch(cmd) {
1172 case CDROMREADAUDIO:
1173 err = cdrom_do_read_audio(fd, cmd, arg);
1174 break;
1175
1176 case CDROM_SEND_PACKET:
1177 err = cdrom_do_generic_command(fd, cmd, arg);
1178 break;
1179
1180 default:
1181 do {
1182 static int count;
1183 if (++count <= 20)
1184 printk("cdrom_ioctl: Unknown cmd fd(%d) "
1185 "cmd(%08x) arg(%08x)\n",
1186 (int)fd, (unsigned int)cmd, (unsigned int)arg);
1187 } while(0);
1188 err = -EINVAL;
1189 break;
1190 };
1191
1192 return err;
1193}
1194#endif /* CONFIG_BLOCK */ 1040#endif /* CONFIG_BLOCK */
1195 1041
1196#ifdef CONFIG_VT 1042#ifdef CONFIG_VT
@@ -1536,71 +1382,11 @@ ret_einval(unsigned int fd, unsigned int cmd, unsigned long arg)
1536 return -EINVAL; 1382 return -EINVAL;
1537} 1383}
1538 1384
1539#ifdef CONFIG_BLOCK
1540static int broken_blkgetsize(unsigned int fd, unsigned int cmd, unsigned long arg)
1541{
1542 /* The mkswap binary hard codes it to Intel value :-((( */
1543 return w_long(fd, BLKGETSIZE, arg);
1544}
1545
1546struct blkpg_ioctl_arg32 {
1547 compat_int_t op;
1548 compat_int_t flags;
1549 compat_int_t datalen;
1550 compat_caddr_t data;
1551};
1552
1553static int blkpg_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
1554{
1555 struct blkpg_ioctl_arg32 __user *ua32 = compat_ptr(arg);
1556 struct blkpg_ioctl_arg __user *a = compat_alloc_user_space(sizeof(*a));
1557 compat_caddr_t udata;
1558 compat_int_t n;
1559 int err;
1560
1561 err = get_user(n, &ua32->op);
1562 err |= put_user(n, &a->op);
1563 err |= get_user(n, &ua32->flags);
1564 err |= put_user(n, &a->flags);
1565 err |= get_user(n, &ua32->datalen);
1566 err |= put_user(n, &a->datalen);
1567 err |= get_user(udata, &ua32->data);
1568 err |= put_user(compat_ptr(udata), &a->data);
1569 if (err)
1570 return err;
1571
1572 return sys_ioctl(fd, cmd, (unsigned long)a);
1573}
1574#endif
1575
1576static int ioc_settimeout(unsigned int fd, unsigned int cmd, unsigned long arg) 1385static int ioc_settimeout(unsigned int fd, unsigned int cmd, unsigned long arg)
1577{ 1386{
1578 return rw_long(fd, AUTOFS_IOC_SETTIMEOUT, arg); 1387 return rw_long(fd, AUTOFS_IOC_SETTIMEOUT, arg);
1579} 1388}
1580 1389
1581#ifdef CONFIG_BLOCK
1582/* Fix sizeof(sizeof()) breakage */
1583#define BLKBSZGET_32 _IOR(0x12,112,int)
1584#define BLKBSZSET_32 _IOW(0x12,113,int)
1585#define BLKGETSIZE64_32 _IOR(0x12,114,int)
1586
1587static int do_blkbszget(unsigned int fd, unsigned int cmd, unsigned long arg)
1588{
1589 return sys_ioctl(fd, BLKBSZGET, (unsigned long)compat_ptr(arg));
1590}
1591
1592static int do_blkbszset(unsigned int fd, unsigned int cmd, unsigned long arg)
1593{
1594 return sys_ioctl(fd, BLKBSZSET, (unsigned long)compat_ptr(arg));
1595}
1596
1597static int do_blkgetsize64(unsigned int fd, unsigned int cmd,
1598 unsigned long arg)
1599{
1600 return sys_ioctl(fd, BLKGETSIZE64, (unsigned long)compat_ptr(arg));
1601}
1602#endif
1603
1604/* Bluetooth ioctls */ 1390/* Bluetooth ioctls */
1605#define HCIUARTSETPROTO _IOW('U', 200, int) 1391#define HCIUARTSETPROTO _IOW('U', 200, int)
1606#define HCIUARTGETPROTO _IOR('U', 201, int) 1392#define HCIUARTGETPROTO _IOR('U', 201, int)
@@ -1620,333 +1406,6 @@ static int do_blkgetsize64(unsigned int fd, unsigned int cmd,
1620#define HIDPGETCONNLIST _IOR('H', 210, int) 1406#define HIDPGETCONNLIST _IOR('H', 210, int)
1621#define HIDPGETCONNINFO _IOR('H', 211, int) 1407#define HIDPGETCONNINFO _IOR('H', 211, int)
1622 1408
1623#ifdef CONFIG_BLOCK
1624struct floppy_struct32 {
1625 compat_uint_t size;
1626 compat_uint_t sect;
1627 compat_uint_t head;
1628 compat_uint_t track;
1629 compat_uint_t stretch;
1630 unsigned char gap;
1631 unsigned char rate;
1632 unsigned char spec1;
1633 unsigned char fmt_gap;
1634 const compat_caddr_t name;
1635};
1636
1637struct floppy_drive_params32 {
1638 char cmos;
1639 compat_ulong_t max_dtr;
1640 compat_ulong_t hlt;
1641 compat_ulong_t hut;
1642 compat_ulong_t srt;
1643 compat_ulong_t spinup;
1644 compat_ulong_t spindown;
1645 unsigned char spindown_offset;
1646 unsigned char select_delay;
1647 unsigned char rps;
1648 unsigned char tracks;
1649 compat_ulong_t timeout;
1650 unsigned char interleave_sect;
1651 struct floppy_max_errors max_errors;
1652 char flags;
1653 char read_track;
1654 short autodetect[8];
1655 compat_int_t checkfreq;
1656 compat_int_t native_format;
1657};
1658
1659struct floppy_drive_struct32 {
1660 signed char flags;
1661 compat_ulong_t spinup_date;
1662 compat_ulong_t select_date;
1663 compat_ulong_t first_read_date;
1664 short probed_format;
1665 short track;
1666 short maxblock;
1667 short maxtrack;
1668 compat_int_t generation;
1669 compat_int_t keep_data;
1670 compat_int_t fd_ref;
1671 compat_int_t fd_device;
1672 compat_int_t last_checked;
1673 compat_caddr_t dmabuf;
1674 compat_int_t bufblocks;
1675};
1676
1677struct floppy_fdc_state32 {
1678 compat_int_t spec1;
1679 compat_int_t spec2;
1680 compat_int_t dtr;
1681 unsigned char version;
1682 unsigned char dor;
1683 compat_ulong_t address;
1684 unsigned int rawcmd:2;
1685 unsigned int reset:1;
1686 unsigned int need_configure:1;
1687 unsigned int perp_mode:2;
1688 unsigned int has_fifo:1;
1689 unsigned int driver_version;
1690 unsigned char track[4];
1691};
1692
1693struct floppy_write_errors32 {
1694 unsigned int write_errors;
1695 compat_ulong_t first_error_sector;
1696 compat_int_t first_error_generation;
1697 compat_ulong_t last_error_sector;
1698 compat_int_t last_error_generation;
1699 compat_uint_t badness;
1700};
1701
1702#define FDSETPRM32 _IOW(2, 0x42, struct floppy_struct32)
1703#define FDDEFPRM32 _IOW(2, 0x43, struct floppy_struct32)
1704#define FDGETPRM32 _IOR(2, 0x04, struct floppy_struct32)
1705#define FDSETDRVPRM32 _IOW(2, 0x90, struct floppy_drive_params32)
1706#define FDGETDRVPRM32 _IOR(2, 0x11, struct floppy_drive_params32)
1707#define FDGETDRVSTAT32 _IOR(2, 0x12, struct floppy_drive_struct32)
1708#define FDPOLLDRVSTAT32 _IOR(2, 0x13, struct floppy_drive_struct32)
1709#define FDGETFDCSTAT32 _IOR(2, 0x15, struct floppy_fdc_state32)
1710#define FDWERRORGET32 _IOR(2, 0x17, struct floppy_write_errors32)
1711
1712static struct {
1713 unsigned int cmd32;
1714 unsigned int cmd;
1715} fd_ioctl_trans_table[] = {
1716 { FDSETPRM32, FDSETPRM },
1717 { FDDEFPRM32, FDDEFPRM },
1718 { FDGETPRM32, FDGETPRM },
1719 { FDSETDRVPRM32, FDSETDRVPRM },
1720 { FDGETDRVPRM32, FDGETDRVPRM },
1721 { FDGETDRVSTAT32, FDGETDRVSTAT },
1722 { FDPOLLDRVSTAT32, FDPOLLDRVSTAT },
1723 { FDGETFDCSTAT32, FDGETFDCSTAT },
1724 { FDWERRORGET32, FDWERRORGET }
1725};
1726
1727#define NR_FD_IOCTL_TRANS ARRAY_SIZE(fd_ioctl_trans_table)
1728
1729static int fd_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
1730{
1731 mm_segment_t old_fs = get_fs();
1732 void *karg = NULL;
1733 unsigned int kcmd = 0;
1734 int i, err;
1735
1736 for (i = 0; i < NR_FD_IOCTL_TRANS; i++)
1737 if (cmd == fd_ioctl_trans_table[i].cmd32) {
1738 kcmd = fd_ioctl_trans_table[i].cmd;
1739 break;
1740 }
1741 if (!kcmd)
1742 return -EINVAL;
1743
1744 switch (cmd) {
1745 case FDSETPRM32:
1746 case FDDEFPRM32:
1747 case FDGETPRM32:
1748 {
1749 compat_uptr_t name;
1750 struct floppy_struct32 __user *uf;
1751 struct floppy_struct *f;
1752
1753 uf = compat_ptr(arg);
1754 f = karg = kmalloc(sizeof(struct floppy_struct), GFP_KERNEL);
1755 if (!karg)
1756 return -ENOMEM;
1757 if (cmd == FDGETPRM32)
1758 break;
1759 err = __get_user(f->size, &uf->size);
1760 err |= __get_user(f->sect, &uf->sect);
1761 err |= __get_user(f->head, &uf->head);
1762 err |= __get_user(f->track, &uf->track);
1763 err |= __get_user(f->stretch, &uf->stretch);
1764 err |= __get_user(f->gap, &uf->gap);
1765 err |= __get_user(f->rate, &uf->rate);
1766 err |= __get_user(f->spec1, &uf->spec1);
1767 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
1768 err |= __get_user(name, &uf->name);
1769 f->name = compat_ptr(name);
1770 if (err) {
1771 err = -EFAULT;
1772 goto out;
1773 }
1774 break;
1775 }
1776 case FDSETDRVPRM32:
1777 case FDGETDRVPRM32:
1778 {
1779 struct floppy_drive_params32 __user *uf;
1780 struct floppy_drive_params *f;
1781
1782 uf = compat_ptr(arg);
1783 f = karg = kmalloc(sizeof(struct floppy_drive_params), GFP_KERNEL);
1784 if (!karg)
1785 return -ENOMEM;
1786 if (cmd == FDGETDRVPRM32)
1787 break;
1788 err = __get_user(f->cmos, &uf->cmos);
1789 err |= __get_user(f->max_dtr, &uf->max_dtr);
1790 err |= __get_user(f->hlt, &uf->hlt);
1791 err |= __get_user(f->hut, &uf->hut);
1792 err |= __get_user(f->srt, &uf->srt);
1793 err |= __get_user(f->spinup, &uf->spinup);
1794 err |= __get_user(f->spindown, &uf->spindown);
1795 err |= __get_user(f->spindown_offset, &uf->spindown_offset);
1796 err |= __get_user(f->select_delay, &uf->select_delay);
1797 err |= __get_user(f->rps, &uf->rps);
1798 err |= __get_user(f->tracks, &uf->tracks);
1799 err |= __get_user(f->timeout, &uf->timeout);
1800 err |= __get_user(f->interleave_sect, &uf->interleave_sect);
1801 err |= __copy_from_user(&f->max_errors, &uf->max_errors, sizeof(f->max_errors));
1802 err |= __get_user(f->flags, &uf->flags);
1803 err |= __get_user(f->read_track, &uf->read_track);
1804 err |= __copy_from_user(f->autodetect, uf->autodetect, sizeof(f->autodetect));
1805 err |= __get_user(f->checkfreq, &uf->checkfreq);
1806 err |= __get_user(f->native_format, &uf->native_format);
1807 if (err) {
1808 err = -EFAULT;
1809 goto out;
1810 }
1811 break;
1812 }
1813 case FDGETDRVSTAT32:
1814 case FDPOLLDRVSTAT32:
1815 karg = kmalloc(sizeof(struct floppy_drive_struct), GFP_KERNEL);
1816 if (!karg)
1817 return -ENOMEM;
1818 break;
1819 case FDGETFDCSTAT32:
1820 karg = kmalloc(sizeof(struct floppy_fdc_state), GFP_KERNEL);
1821 if (!karg)
1822 return -ENOMEM;
1823 break;
1824 case FDWERRORGET32:
1825 karg = kmalloc(sizeof(struct floppy_write_errors), GFP_KERNEL);
1826 if (!karg)
1827 return -ENOMEM;
1828 break;
1829 default:
1830 return -EINVAL;
1831 }
1832 set_fs (KERNEL_DS);
1833 err = sys_ioctl (fd, kcmd, (unsigned long)karg);
1834 set_fs (old_fs);
1835 if (err)
1836 goto out;
1837 switch (cmd) {
1838 case FDGETPRM32:
1839 {
1840 struct floppy_struct *f = karg;
1841 struct floppy_struct32 __user *uf = compat_ptr(arg);
1842
1843 err = __put_user(f->size, &uf->size);
1844 err |= __put_user(f->sect, &uf->sect);
1845 err |= __put_user(f->head, &uf->head);
1846 err |= __put_user(f->track, &uf->track);
1847 err |= __put_user(f->stretch, &uf->stretch);
1848 err |= __put_user(f->gap, &uf->gap);
1849 err |= __put_user(f->rate, &uf->rate);
1850 err |= __put_user(f->spec1, &uf->spec1);
1851 err |= __put_user(f->fmt_gap, &uf->fmt_gap);
1852 err |= __put_user((u64)f->name, (compat_caddr_t __user *)&uf->name);
1853 break;
1854 }
1855 case FDGETDRVPRM32:
1856 {
1857 struct floppy_drive_params32 __user *uf;
1858 struct floppy_drive_params *f = karg;
1859
1860 uf = compat_ptr(arg);
1861 err = __put_user(f->cmos, &uf->cmos);
1862 err |= __put_user(f->max_dtr, &uf->max_dtr);
1863 err |= __put_user(f->hlt, &uf->hlt);
1864 err |= __put_user(f->hut, &uf->hut);
1865 err |= __put_user(f->srt, &uf->srt);
1866 err |= __put_user(f->spinup, &uf->spinup);
1867 err |= __put_user(f->spindown, &uf->spindown);
1868 err |= __put_user(f->spindown_offset, &uf->spindown_offset);
1869 err |= __put_user(f->select_delay, &uf->select_delay);
1870 err |= __put_user(f->rps, &uf->rps);
1871 err |= __put_user(f->tracks, &uf->tracks);
1872 err |= __put_user(f->timeout, &uf->timeout);
1873 err |= __put_user(f->interleave_sect, &uf->interleave_sect);
1874 err |= __copy_to_user(&uf->max_errors, &f->max_errors, sizeof(f->max_errors));
1875 err |= __put_user(f->flags, &uf->flags);
1876 err |= __put_user(f->read_track, &uf->read_track);
1877 err |= __copy_to_user(uf->autodetect, f->autodetect, sizeof(f->autodetect));
1878 err |= __put_user(f->checkfreq, &uf->checkfreq);
1879 err |= __put_user(f->native_format, &uf->native_format);
1880 break;
1881 }
1882 case FDGETDRVSTAT32:
1883 case FDPOLLDRVSTAT32:
1884 {
1885 struct floppy_drive_struct32 __user *uf;
1886 struct floppy_drive_struct *f = karg;
1887
1888 uf = compat_ptr(arg);
1889 err = __put_user(f->flags, &uf->flags);
1890 err |= __put_user(f->spinup_date, &uf->spinup_date);
1891 err |= __put_user(f->select_date, &uf->select_date);
1892 err |= __put_user(f->first_read_date, &uf->first_read_date);
1893 err |= __put_user(f->probed_format, &uf->probed_format);
1894 err |= __put_user(f->track, &uf->track);
1895 err |= __put_user(f->maxblock, &uf->maxblock);
1896 err |= __put_user(f->maxtrack, &uf->maxtrack);
1897 err |= __put_user(f->generation, &uf->generation);
1898 err |= __put_user(f->keep_data, &uf->keep_data);
1899 err |= __put_user(f->fd_ref, &uf->fd_ref);
1900 err |= __put_user(f->fd_device, &uf->fd_device);
1901 err |= __put_user(f->last_checked, &uf->last_checked);
1902 err |= __put_user((u64)f->dmabuf, &uf->dmabuf);
1903 err |= __put_user((u64)f->bufblocks, &uf->bufblocks);
1904 break;
1905 }
1906 case FDGETFDCSTAT32:
1907 {
1908 struct floppy_fdc_state32 __user *uf;
1909 struct floppy_fdc_state *f = karg;
1910
1911 uf = compat_ptr(arg);
1912 err = __put_user(f->spec1, &uf->spec1);
1913 err |= __put_user(f->spec2, &uf->spec2);
1914 err |= __put_user(f->dtr, &uf->dtr);
1915 err |= __put_user(f->version, &uf->version);
1916 err |= __put_user(f->dor, &uf->dor);
1917 err |= __put_user(f->address, &uf->address);
1918 err |= __copy_to_user((char __user *)&uf->address + sizeof(uf->address),
1919 (char *)&f->address + sizeof(f->address), sizeof(int));
1920 err |= __put_user(f->driver_version, &uf->driver_version);
1921 err |= __copy_to_user(uf->track, f->track, sizeof(f->track));
1922 break;
1923 }
1924 case FDWERRORGET32:
1925 {
1926 struct floppy_write_errors32 __user *uf;
1927 struct floppy_write_errors *f = karg;
1928
1929 uf = compat_ptr(arg);
1930 err = __put_user(f->write_errors, &uf->write_errors);
1931 err |= __put_user(f->first_error_sector, &uf->first_error_sector);
1932 err |= __put_user(f->first_error_generation, &uf->first_error_generation);
1933 err |= __put_user(f->last_error_sector, &uf->last_error_sector);
1934 err |= __put_user(f->last_error_generation, &uf->last_error_generation);
1935 err |= __put_user(f->badness, &uf->badness);
1936 break;
1937 }
1938 default:
1939 break;
1940 }
1941 if (err)
1942 err = -EFAULT;
1943
1944out:
1945 kfree(karg);
1946 return err;
1947}
1948#endif
1949
1950struct mtd_oob_buf32 { 1409struct mtd_oob_buf32 {
1951 u_int32_t start; 1410 u_int32_t start;
1952 u_int32_t length; 1411 u_int32_t length;
@@ -2506,60 +1965,6 @@ COMPATIBLE_IOCTL(FIONREAD) /* This is also TIOCINQ */
2506/* 0x00 */ 1965/* 0x00 */
2507COMPATIBLE_IOCTL(FIBMAP) 1966COMPATIBLE_IOCTL(FIBMAP)
2508COMPATIBLE_IOCTL(FIGETBSZ) 1967COMPATIBLE_IOCTL(FIGETBSZ)
2509/* 0x03 -- HD/IDE ioctl's used by hdparm and friends.
2510 * Some need translations, these do not.
2511 */
2512COMPATIBLE_IOCTL(HDIO_GET_IDENTITY)
2513COMPATIBLE_IOCTL(HDIO_DRIVE_TASK)
2514COMPATIBLE_IOCTL(HDIO_DRIVE_CMD)
2515ULONG_IOCTL(HDIO_SET_MULTCOUNT)
2516ULONG_IOCTL(HDIO_SET_UNMASKINTR)
2517ULONG_IOCTL(HDIO_SET_KEEPSETTINGS)
2518ULONG_IOCTL(HDIO_SET_32BIT)
2519ULONG_IOCTL(HDIO_SET_NOWERR)
2520ULONG_IOCTL(HDIO_SET_DMA)
2521ULONG_IOCTL(HDIO_SET_PIO_MODE)
2522ULONG_IOCTL(HDIO_SET_NICE)
2523ULONG_IOCTL(HDIO_SET_WCACHE)
2524ULONG_IOCTL(HDIO_SET_ACOUSTIC)
2525ULONG_IOCTL(HDIO_SET_BUSSTATE)
2526ULONG_IOCTL(HDIO_SET_ADDRESS)
2527COMPATIBLE_IOCTL(HDIO_SCAN_HWIF)
2528/* 0x330 is reserved -- it used to be HDIO_GETGEO_BIG */
2529COMPATIBLE_IOCTL(0x330)
2530/* 0x02 -- Floppy ioctls */
2531COMPATIBLE_IOCTL(FDMSGON)
2532COMPATIBLE_IOCTL(FDMSGOFF)
2533COMPATIBLE_IOCTL(FDSETEMSGTRESH)
2534COMPATIBLE_IOCTL(FDFLUSH)
2535COMPATIBLE_IOCTL(FDWERRORCLR)
2536COMPATIBLE_IOCTL(FDSETMAXERRS)
2537COMPATIBLE_IOCTL(FDGETMAXERRS)
2538COMPATIBLE_IOCTL(FDGETDRVTYP)
2539COMPATIBLE_IOCTL(FDEJECT)
2540COMPATIBLE_IOCTL(FDCLRPRM)
2541COMPATIBLE_IOCTL(FDFMTBEG)
2542COMPATIBLE_IOCTL(FDFMTEND)
2543COMPATIBLE_IOCTL(FDRESET)
2544COMPATIBLE_IOCTL(FDTWADDLE)
2545COMPATIBLE_IOCTL(FDFMTTRK)
2546COMPATIBLE_IOCTL(FDRAWCMD)
2547/* 0x12 */
2548#ifdef CONFIG_BLOCK
2549COMPATIBLE_IOCTL(BLKRASET)
2550COMPATIBLE_IOCTL(BLKROSET)
2551COMPATIBLE_IOCTL(BLKROGET)
2552COMPATIBLE_IOCTL(BLKRRPART)
2553COMPATIBLE_IOCTL(BLKFLSBUF)
2554COMPATIBLE_IOCTL(BLKSECTSET)
2555COMPATIBLE_IOCTL(BLKSSZGET)
2556COMPATIBLE_IOCTL(BLKTRACESTART)
2557COMPATIBLE_IOCTL(BLKTRACESTOP)
2558COMPATIBLE_IOCTL(BLKTRACESETUP)
2559COMPATIBLE_IOCTL(BLKTRACETEARDOWN)
2560ULONG_IOCTL(BLKRASET)
2561ULONG_IOCTL(BLKFRASET)
2562#endif
2563/* RAID */ 1968/* RAID */
2564COMPATIBLE_IOCTL(RAID_VERSION) 1969COMPATIBLE_IOCTL(RAID_VERSION)
2565COMPATIBLE_IOCTL(GET_ARRAY_INFO) 1970COMPATIBLE_IOCTL(GET_ARRAY_INFO)
@@ -2807,50 +2212,6 @@ COMPATIBLE_IOCTL(PPGETMODE)
2807COMPATIBLE_IOCTL(PPGETPHASE) 2212COMPATIBLE_IOCTL(PPGETPHASE)
2808COMPATIBLE_IOCTL(PPGETFLAGS) 2213COMPATIBLE_IOCTL(PPGETFLAGS)
2809COMPATIBLE_IOCTL(PPSETFLAGS) 2214COMPATIBLE_IOCTL(PPSETFLAGS)
2810/* CDROM stuff */
2811COMPATIBLE_IOCTL(CDROMPAUSE)
2812COMPATIBLE_IOCTL(CDROMRESUME)
2813COMPATIBLE_IOCTL(CDROMPLAYMSF)
2814COMPATIBLE_IOCTL(CDROMPLAYTRKIND)
2815COMPATIBLE_IOCTL(CDROMREADTOCHDR)
2816COMPATIBLE_IOCTL(CDROMREADTOCENTRY)
2817COMPATIBLE_IOCTL(CDROMSTOP)
2818COMPATIBLE_IOCTL(CDROMSTART)
2819COMPATIBLE_IOCTL(CDROMEJECT)
2820COMPATIBLE_IOCTL(CDROMVOLCTRL)
2821COMPATIBLE_IOCTL(CDROMSUBCHNL)
2822ULONG_IOCTL(CDROMEJECT_SW)
2823COMPATIBLE_IOCTL(CDROMMULTISESSION)
2824COMPATIBLE_IOCTL(CDROM_GET_MCN)
2825COMPATIBLE_IOCTL(CDROMRESET)
2826COMPATIBLE_IOCTL(CDROMVOLREAD)
2827COMPATIBLE_IOCTL(CDROMSEEK)
2828COMPATIBLE_IOCTL(CDROMPLAYBLK)
2829COMPATIBLE_IOCTL(CDROMCLOSETRAY)
2830ULONG_IOCTL(CDROM_SET_OPTIONS)
2831ULONG_IOCTL(CDROM_CLEAR_OPTIONS)
2832ULONG_IOCTL(CDROM_SELECT_SPEED)
2833ULONG_IOCTL(CDROM_SELECT_DISC)
2834ULONG_IOCTL(CDROM_MEDIA_CHANGED)
2835ULONG_IOCTL(CDROM_DRIVE_STATUS)
2836COMPATIBLE_IOCTL(CDROM_DISC_STATUS)
2837COMPATIBLE_IOCTL(CDROM_CHANGER_NSLOTS)
2838ULONG_IOCTL(CDROM_LOCKDOOR)
2839ULONG_IOCTL(CDROM_DEBUG)
2840COMPATIBLE_IOCTL(CDROM_GET_CAPABILITY)
2841/* Ignore cdrom.h about these next 5 ioctls, they absolutely do
2842 * not take a struct cdrom_read, instead they take a struct cdrom_msf
2843 * which is compatible.
2844 */
2845COMPATIBLE_IOCTL(CDROMREADMODE2)
2846COMPATIBLE_IOCTL(CDROMREADMODE1)
2847COMPATIBLE_IOCTL(CDROMREADRAW)
2848COMPATIBLE_IOCTL(CDROMREADCOOKED)
2849COMPATIBLE_IOCTL(CDROMREADALL)
2850/* DVD ioctls */
2851COMPATIBLE_IOCTL(DVD_READ_STRUCT)
2852COMPATIBLE_IOCTL(DVD_WRITE_STRUCT)
2853COMPATIBLE_IOCTL(DVD_AUTH)
2854/* pktcdvd */ 2215/* pktcdvd */
2855COMPATIBLE_IOCTL(PACKET_CTRL_CMD) 2216COMPATIBLE_IOCTL(PACKET_CTRL_CMD)
2856/* Big A */ 2217/* Big A */
@@ -3336,33 +2697,6 @@ HANDLE_IOCTL(SIOCGSTAMP, do_siocgstamp)
3336HANDLE_IOCTL(SIOCGSTAMPNS, do_siocgstampns) 2697HANDLE_IOCTL(SIOCGSTAMPNS, do_siocgstampns)
3337#endif 2698#endif
3338#ifdef CONFIG_BLOCK 2699#ifdef CONFIG_BLOCK
3339HANDLE_IOCTL(HDIO_GETGEO, hdio_getgeo)
3340HANDLE_IOCTL(BLKRAGET, w_long)
3341HANDLE_IOCTL(BLKGETSIZE, w_long)
3342HANDLE_IOCTL(0x1260, broken_blkgetsize)
3343HANDLE_IOCTL(BLKFRAGET, w_long)
3344HANDLE_IOCTL(BLKSECTGET, w_long)
3345HANDLE_IOCTL(BLKPG, blkpg_ioctl_trans)
3346HANDLE_IOCTL(HDIO_GET_UNMASKINTR, hdio_ioctl_trans)
3347HANDLE_IOCTL(HDIO_GET_MULTCOUNT, hdio_ioctl_trans)
3348HANDLE_IOCTL(HDIO_GET_KEEPSETTINGS, hdio_ioctl_trans)
3349HANDLE_IOCTL(HDIO_GET_32BIT, hdio_ioctl_trans)
3350HANDLE_IOCTL(HDIO_GET_NOWERR, hdio_ioctl_trans)
3351HANDLE_IOCTL(HDIO_GET_DMA, hdio_ioctl_trans)
3352HANDLE_IOCTL(HDIO_GET_NICE, hdio_ioctl_trans)
3353HANDLE_IOCTL(HDIO_GET_WCACHE, hdio_ioctl_trans)
3354HANDLE_IOCTL(HDIO_GET_ACOUSTIC, hdio_ioctl_trans)
3355HANDLE_IOCTL(HDIO_GET_ADDRESS, hdio_ioctl_trans)
3356HANDLE_IOCTL(HDIO_GET_BUSSTATE, hdio_ioctl_trans)
3357HANDLE_IOCTL(FDSETPRM32, fd_ioctl_trans)
3358HANDLE_IOCTL(FDDEFPRM32, fd_ioctl_trans)
3359HANDLE_IOCTL(FDGETPRM32, fd_ioctl_trans)
3360HANDLE_IOCTL(FDSETDRVPRM32, fd_ioctl_trans)
3361HANDLE_IOCTL(FDGETDRVPRM32, fd_ioctl_trans)
3362HANDLE_IOCTL(FDGETDRVSTAT32, fd_ioctl_trans)
3363HANDLE_IOCTL(FDPOLLDRVSTAT32, fd_ioctl_trans)
3364HANDLE_IOCTL(FDGETFDCSTAT32, fd_ioctl_trans)
3365HANDLE_IOCTL(FDWERRORGET32, fd_ioctl_trans)
3366HANDLE_IOCTL(SG_IO,sg_ioctl_trans) 2700HANDLE_IOCTL(SG_IO,sg_ioctl_trans)
3367HANDLE_IOCTL(SG_GET_REQUEST_TABLE, sg_grt_trans) 2701HANDLE_IOCTL(SG_GET_REQUEST_TABLE, sg_grt_trans)
3368#endif 2702#endif
@@ -3373,8 +2707,6 @@ HANDLE_IOCTL(PPPIOCSACTIVE32, ppp_sock_fprog_ioctl_trans)
3373#ifdef CONFIG_BLOCK 2707#ifdef CONFIG_BLOCK
3374HANDLE_IOCTL(MTIOCGET32, mt_ioctl_trans) 2708HANDLE_IOCTL(MTIOCGET32, mt_ioctl_trans)
3375HANDLE_IOCTL(MTIOCPOS32, mt_ioctl_trans) 2709HANDLE_IOCTL(MTIOCPOS32, mt_ioctl_trans)
3376HANDLE_IOCTL(CDROMREADAUDIO, cdrom_ioctl_trans)
3377HANDLE_IOCTL(CDROM_SEND_PACKET, cdrom_ioctl_trans)
3378#endif 2710#endif
3379#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(0x93,0x64,unsigned int) 2711#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(0x93,0x64,unsigned int)
3380HANDLE_IOCTL(AUTOFS_IOC_SETTIMEOUT32, ioc_settimeout) 2712HANDLE_IOCTL(AUTOFS_IOC_SETTIMEOUT32, ioc_settimeout)
@@ -3415,9 +2747,6 @@ HANDLE_IOCTL(SONET_GETFRAMING, do_atm_ioctl)
3415HANDLE_IOCTL(SONET_GETFRSENSE, do_atm_ioctl) 2747HANDLE_IOCTL(SONET_GETFRSENSE, do_atm_ioctl)
3416/* block stuff */ 2748/* block stuff */
3417#ifdef CONFIG_BLOCK 2749#ifdef CONFIG_BLOCK
3418HANDLE_IOCTL(BLKBSZGET_32, do_blkbszget)
3419HANDLE_IOCTL(BLKBSZSET_32, do_blkbszset)
3420HANDLE_IOCTL(BLKGETSIZE64_32, do_blkgetsize64)
3421/* Raw devices */ 2750/* Raw devices */
3422HANDLE_IOCTL(RAW_SETBIND, raw_ioctl) 2751HANDLE_IOCTL(RAW_SETBIND, raw_ioctl)
3423HANDLE_IOCTL(RAW_GETBIND, raw_ioctl) 2752HANDLE_IOCTL(RAW_GETBIND, raw_ioctl)
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 901dc55e9f54..b5928a7b6a5a 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -264,15 +264,12 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio);
264/* 264/*
265 * Asynchronous IO callback. 265 * Asynchronous IO callback.
266 */ 266 */
267static int dio_bio_end_aio(struct bio *bio, unsigned int bytes_done, int error) 267static void dio_bio_end_aio(struct bio *bio, int error)
268{ 268{
269 struct dio *dio = bio->bi_private; 269 struct dio *dio = bio->bi_private;
270 unsigned long remaining; 270 unsigned long remaining;
271 unsigned long flags; 271 unsigned long flags;
272 272
273 if (bio->bi_size)
274 return 1;
275
276 /* cleanup the bio */ 273 /* cleanup the bio */
277 dio_bio_complete(dio, bio); 274 dio_bio_complete(dio, bio);
278 275
@@ -287,8 +284,6 @@ static int dio_bio_end_aio(struct bio *bio, unsigned int bytes_done, int error)
287 aio_complete(dio->iocb, ret, 0); 284 aio_complete(dio->iocb, ret, 0);
288 kfree(dio); 285 kfree(dio);
289 } 286 }
290
291 return 0;
292} 287}
293 288
294/* 289/*
@@ -298,21 +293,17 @@ static int dio_bio_end_aio(struct bio *bio, unsigned int bytes_done, int error)
298 * During I/O bi_private points at the dio. After I/O, bi_private is used to 293 * During I/O bi_private points at the dio. After I/O, bi_private is used to
299 * implement a singly-linked list of completed BIOs, at dio->bio_list. 294 * implement a singly-linked list of completed BIOs, at dio->bio_list.
300 */ 295 */
301static int dio_bio_end_io(struct bio *bio, unsigned int bytes_done, int error) 296static void dio_bio_end_io(struct bio *bio, int error)
302{ 297{
303 struct dio *dio = bio->bi_private; 298 struct dio *dio = bio->bi_private;
304 unsigned long flags; 299 unsigned long flags;
305 300
306 if (bio->bi_size)
307 return 1;
308
309 spin_lock_irqsave(&dio->bio_lock, flags); 301 spin_lock_irqsave(&dio->bio_lock, flags);
310 bio->bi_private = dio->bio_list; 302 bio->bi_private = dio->bio_list;
311 dio->bio_list = bio; 303 dio->bio_list = bio;
312 if (--dio->refcount == 1 && dio->waiter) 304 if (--dio->refcount == 1 && dio->waiter)
313 wake_up_process(dio->waiter); 305 wake_up_process(dio->waiter);
314 spin_unlock_irqrestore(&dio->bio_lock, flags); 306 spin_unlock_irqrestore(&dio->bio_lock, flags);
315 return 0;
316} 307}
317 308
318static int 309static int
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index a4b142a6a2c7..8d23b0b38717 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -14,6 +14,7 @@
14 */ 14 */
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/module.h>
17#include <linux/spinlock.h> 18#include <linux/spinlock.h>
18#include <linux/sched.h> 19#include <linux/sched.h>
19#include <linux/fs.h> 20#include <linux/fs.h>
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index f916b9740c75..2473e2a86d1b 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -160,11 +160,9 @@ int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb_host *sb, int silent)
160} 160}
161 161
162 162
163static int end_bio_io_page(struct bio *bio, unsigned int bytes_done, int error) 163static void end_bio_io_page(struct bio *bio, int error)
164{ 164{
165 struct page *page = bio->bi_private; 165 struct page *page = bio->bi_private;
166 if (bio->bi_size)
167 return 1;
168 166
169 if (!error) 167 if (!error)
170 SetPageUptodate(page); 168 SetPageUptodate(page);
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index de3e4a506dbc..57c3b8ac36bf 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -2200,16 +2200,13 @@ static int lbmIOWait(struct lbuf * bp, int flag)
2200 * 2200 *
2201 * executed at INTIODONE level 2201 * executed at INTIODONE level
2202 */ 2202 */
2203static int lbmIODone(struct bio *bio, unsigned int bytes_done, int error) 2203static void lbmIODone(struct bio *bio, int error)
2204{ 2204{
2205 struct lbuf *bp = bio->bi_private; 2205 struct lbuf *bp = bio->bi_private;
2206 struct lbuf *nextbp, *tail; 2206 struct lbuf *nextbp, *tail;
2207 struct jfs_log *log; 2207 struct jfs_log *log;
2208 unsigned long flags; 2208 unsigned long flags;
2209 2209
2210 if (bio->bi_size)
2211 return 1;
2212
2213 /* 2210 /*
2214 * get back jfs buffer bound to the i/o buffer 2211 * get back jfs buffer bound to the i/o buffer
2215 */ 2212 */
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 62e96be02acf..1332adc0b9fa 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -280,14 +280,10 @@ static void last_read_complete(struct page *page)
280 unlock_page(page); 280 unlock_page(page);
281} 281}
282 282
283static int metapage_read_end_io(struct bio *bio, unsigned int bytes_done, 283static void metapage_read_end_io(struct bio *bio, int err)
284 int err)
285{ 284{
286 struct page *page = bio->bi_private; 285 struct page *page = bio->bi_private;
287 286
288 if (bio->bi_size)
289 return 1;
290
291 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) { 287 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
292 printk(KERN_ERR "metapage_read_end_io: I/O error\n"); 288 printk(KERN_ERR "metapage_read_end_io: I/O error\n");
293 SetPageError(page); 289 SetPageError(page);
@@ -341,16 +337,12 @@ static void last_write_complete(struct page *page)
341 end_page_writeback(page); 337 end_page_writeback(page);
342} 338}
343 339
344static int metapage_write_end_io(struct bio *bio, unsigned int bytes_done, 340static void metapage_write_end_io(struct bio *bio, int err)
345 int err)
346{ 341{
347 struct page *page = bio->bi_private; 342 struct page *page = bio->bi_private;
348 343
349 BUG_ON(!PagePrivate(page)); 344 BUG_ON(!PagePrivate(page));
350 345
351 if (bio->bi_size)
352 return 1;
353
354 if (! test_bit(BIO_UPTODATE, &bio->bi_flags)) { 346 if (! test_bit(BIO_UPTODATE, &bio->bi_flags)) {
355 printk(KERN_ERR "metapage_write_end_io: I/O error\n"); 347 printk(KERN_ERR "metapage_write_end_io: I/O error\n");
356 SetPageError(page); 348 SetPageError(page);
diff --git a/fs/mpage.c b/fs/mpage.c
index c1698f2291aa..b1c3e5890508 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -39,14 +39,11 @@
39 * status of that page is hard. See end_buffer_async_read() for the details. 39 * status of that page is hard. See end_buffer_async_read() for the details.
40 * There is no point in duplicating all that complexity. 40 * There is no point in duplicating all that complexity.
41 */ 41 */
42static int mpage_end_io_read(struct bio *bio, unsigned int bytes_done, int err) 42static void mpage_end_io_read(struct bio *bio, int err)
43{ 43{
44 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 44 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
45 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 45 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
46 46
47 if (bio->bi_size)
48 return 1;
49
50 do { 47 do {
51 struct page *page = bvec->bv_page; 48 struct page *page = bvec->bv_page;
52 49
@@ -62,17 +59,13 @@ static int mpage_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
62 unlock_page(page); 59 unlock_page(page);
63 } while (bvec >= bio->bi_io_vec); 60 } while (bvec >= bio->bi_io_vec);
64 bio_put(bio); 61 bio_put(bio);
65 return 0;
66} 62}
67 63
68static int mpage_end_io_write(struct bio *bio, unsigned int bytes_done, int err) 64static void mpage_end_io_write(struct bio *bio, int err)
69{ 65{
70 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 66 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
71 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 67 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
72 68
73 if (bio->bi_size)
74 return 1;
75
76 do { 69 do {
77 struct page *page = bvec->bv_page; 70 struct page *page = bvec->bv_page;
78 71
@@ -87,7 +80,6 @@ static int mpage_end_io_write(struct bio *bio, unsigned int bytes_done, int err)
87 end_page_writeback(page); 80 end_page_writeback(page);
88 } while (bvec >= bio->bi_io_vec); 81 } while (bvec >= bio->bi_io_vec);
89 bio_put(bio); 82 bio_put(bio);
90 return 0;
91} 83}
92 84
93static struct bio *mpage_bio_submit(int rw, struct bio *bio) 85static struct bio *mpage_bio_submit(int rw, struct bio *bio)
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 2bd7f788cf34..da2c2b442b49 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -217,7 +217,6 @@ static void o2hb_wait_on_io(struct o2hb_region *reg,
217} 217}
218 218
219static int o2hb_bio_end_io(struct bio *bio, 219static int o2hb_bio_end_io(struct bio *bio,
220 unsigned int bytes_done,
221 int error) 220 int error)
222{ 221{
223 struct o2hb_bio_wait_ctxt *wc = bio->bi_private; 222 struct o2hb_bio_wait_ctxt *wc = bio->bi_private;
@@ -227,9 +226,6 @@ static int o2hb_bio_end_io(struct bio *bio,
227 wc->wc_error = error; 226 wc->wc_error = error;
228 } 227 }
229 228
230 if (bio->bi_size)
231 return 1;
232
233 o2hb_bio_wait_dec(wc, 1); 229 o2hb_bio_wait_dec(wc, 1);
234 bio_put(bio); 230 bio_put(bio);
235 return 0; 231 return 0;
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 5f152f60d74d..3f13519436af 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -326,14 +326,10 @@ xfs_iomap_valid(
326STATIC int 326STATIC int
327xfs_end_bio( 327xfs_end_bio(
328 struct bio *bio, 328 struct bio *bio,
329 unsigned int bytes_done,
330 int error) 329 int error)
331{ 330{
332 xfs_ioend_t *ioend = bio->bi_private; 331 xfs_ioend_t *ioend = bio->bi_private;
333 332
334 if (bio->bi_size)
335 return 1;
336
337 ASSERT(atomic_read(&bio->bi_cnt) >= 1); 333 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
338 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error; 334 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
339 335
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index b0f0e58866de..6a75f4d984a1 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -1106,16 +1106,12 @@ _xfs_buf_ioend(
1106STATIC int 1106STATIC int
1107xfs_buf_bio_end_io( 1107xfs_buf_bio_end_io(
1108 struct bio *bio, 1108 struct bio *bio,
1109 unsigned int bytes_done,
1110 int error) 1109 int error)
1111{ 1110{
1112 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; 1111 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
1113 unsigned int blocksize = bp->b_target->bt_bsize; 1112 unsigned int blocksize = bp->b_target->bt_bsize;
1114 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 1113 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1115 1114
1116 if (bio->bi_size)
1117 return 1;
1118
1119 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 1115 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1120 bp->b_error = EIO; 1116 bp->b_error = EIO;
1121 1117
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 1ddef34f43c3..089a8bc55dd4 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -64,7 +64,7 @@ struct bio_vec {
64 64
65struct bio_set; 65struct bio_set;
66struct bio; 66struct bio;
67typedef int (bio_end_io_t) (struct bio *, unsigned int, int); 67typedef void (bio_end_io_t) (struct bio *, int);
68typedef void (bio_destructor_t) (struct bio *); 68typedef void (bio_destructor_t) (struct bio *);
69 69
70/* 70/*
@@ -226,7 +226,7 @@ struct bio {
226#define BIO_SEG_BOUNDARY(q, b1, b2) \ 226#define BIO_SEG_BOUNDARY(q, b1, b2) \
227 BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2))) 227 BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
228 228
229#define bio_io_error(bio, bytes) bio_endio((bio), (bytes), -EIO) 229#define bio_io_error(bio) bio_endio((bio), -EIO)
230 230
231/* 231/*
232 * drivers should not use the __ version unless they _really_ want to 232 * drivers should not use the __ version unless they _really_ want to
@@ -286,7 +286,7 @@ extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
286extern void bio_put(struct bio *); 286extern void bio_put(struct bio *);
287extern void bio_free(struct bio *, struct bio_set *); 287extern void bio_free(struct bio *, struct bio_set *);
288 288
289extern void bio_endio(struct bio *, unsigned int, int); 289extern void bio_endio(struct bio *, int);
290struct request_queue; 290struct request_queue;
291extern int bio_phys_segments(struct request_queue *, struct bio *); 291extern int bio_phys_segments(struct request_queue *, struct bio *);
292extern int bio_hw_segments(struct request_queue *, struct bio *); 292extern int bio_hw_segments(struct request_queue *, struct bio *);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index b126c6f68e27..95be0ac57e76 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1,6 +1,8 @@
1#ifndef _LINUX_BLKDEV_H 1#ifndef _LINUX_BLKDEV_H
2#define _LINUX_BLKDEV_H 2#define _LINUX_BLKDEV_H
3 3
4#ifdef CONFIG_BLOCK
5
4#include <linux/sched.h> 6#include <linux/sched.h>
5#include <linux/major.h> 7#include <linux/major.h>
6#include <linux/genhd.h> 8#include <linux/genhd.h>
@@ -32,8 +34,6 @@
32) 34)
33#endif 35#endif
34 36
35#ifdef CONFIG_BLOCK
36
37struct scsi_ioctl_command; 37struct scsi_ioctl_command;
38 38
39struct request_queue; 39struct request_queue;
@@ -471,7 +471,6 @@ struct request_queue
471 int orderr, ordcolor; 471 int orderr, ordcolor;
472 struct request pre_flush_rq, bar_rq, post_flush_rq; 472 struct request pre_flush_rq, bar_rq, post_flush_rq;
473 struct request *orig_bar_rq; 473 struct request *orig_bar_rq;
474 unsigned int bi_size;
475 474
476 struct mutex sysfs_lock; 475 struct mutex sysfs_lock;
477 476
@@ -637,10 +636,23 @@ static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
637} 636}
638#endif /* CONFIG_MMU */ 637#endif /* CONFIG_MMU */
639 638
640#define rq_for_each_bio(_bio, rq) \ 639struct req_iterator {
640 int i;
641 struct bio *bio;
642};
643
644/* This should not be used directly - use rq_for_each_segment */
645#define __rq_for_each_bio(_bio, rq) \
641 if ((rq->bio)) \ 646 if ((rq->bio)) \
642 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 647 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
643 648
649#define rq_for_each_segment(bvl, _rq, _iter) \
650 __rq_for_each_bio(_iter.bio, _rq) \
651 bio_for_each_segment(bvl, _iter.bio, _iter.i)
652
653#define rq_iter_last(rq, _iter) \
654 (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1)
655
644extern int blk_register_queue(struct gendisk *disk); 656extern int blk_register_queue(struct gendisk *disk);
645extern void blk_unregister_queue(struct gendisk *disk); 657extern void blk_unregister_queue(struct gendisk *disk);
646extern void register_disk(struct gendisk *dev); 658extern void register_disk(struct gendisk *dev);
@@ -662,8 +674,8 @@ extern int sg_scsi_ioctl(struct file *, struct request_queue *,
662/* 674/*
663 * Temporary export, until SCSI gets fixed up. 675 * Temporary export, until SCSI gets fixed up.
664 */ 676 */
665extern int ll_back_merge_fn(struct request_queue *, struct request *, 677extern int blk_rq_append_bio(struct request_queue *q, struct request *rq,
666 struct bio *); 678 struct bio *bio);
667 679
668/* 680/*
669 * A queue has just exitted congestion. Note this in the global counter of 681 * A queue has just exitted congestion. Note this in the global counter of
@@ -810,7 +822,6 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
810 return bqt->tag_index[tag]; 822 return bqt->tag_index[tag];
811} 823}
812 824
813extern void blk_rq_bio_prep(struct request_queue *, struct request *, struct bio *);
814extern int blkdev_issue_flush(struct block_device *, sector_t *); 825extern int blkdev_issue_flush(struct block_device *, sector_t *);
815 826
816#define MAX_PHYS_SEGMENTS 128 827#define MAX_PHYS_SEGMENTS 128
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 7b5d56b82b59..2e105a12fe29 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -142,10 +142,14 @@ struct blk_user_trace_setup {
142 u32 pid; 142 u32 pid;
143}; 143};
144 144
145#ifdef __KERNEL__
145#if defined(CONFIG_BLK_DEV_IO_TRACE) 146#if defined(CONFIG_BLK_DEV_IO_TRACE)
146extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); 147extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
147extern void blk_trace_shutdown(struct request_queue *); 148extern void blk_trace_shutdown(struct request_queue *);
148extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *); 149extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
150extern int do_blk_trace_setup(struct request_queue *q,
151 struct block_device *bdev, struct blk_user_trace_setup *buts);
152
149 153
150/** 154/**
151 * blk_add_trace_rq - Add a trace for a request oriented action 155 * blk_add_trace_rq - Add a trace for a request oriented action
@@ -286,6 +290,12 @@ static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
286#define blk_add_trace_generic(q, rq, rw, what) do { } while (0) 290#define blk_add_trace_generic(q, rq, rw, what) do { } while (0)
287#define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0) 291#define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0)
288#define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0) 292#define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0)
293static inline int do_blk_trace_setup(struct request_queue *q,
294 struct block_device *bdev,
295 struct blk_user_trace_setup *buts)
296{
297 return 0;
298}
289#endif /* CONFIG_BLK_DEV_IO_TRACE */ 299#endif /* CONFIG_BLK_DEV_IO_TRACE */
290 300#endif /* __KERNEL__ */
291#endif 301#endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index c8636bb3e3a7..cfee06bca2d3 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1738,6 +1738,11 @@
1738 1738
1739#define PCI_VENDOR_ID_RADISYS 0x1331 1739#define PCI_VENDOR_ID_RADISYS 0x1331
1740 1740
1741#define PCI_VENDOR_ID_MICRO_MEMORY 0x1332
1742#define PCI_DEVICE_ID_MICRO_MEMORY_5415CN 0x5415
1743#define PCI_DEVICE_ID_MICRO_MEMORY_5425CN 0x5425
1744#define PCI_DEVICE_ID_MICRO_MEMORY_6155 0x6155
1745
1741#define PCI_VENDOR_ID_DOMEX 0x134a 1746#define PCI_VENDOR_ID_DOMEX 0x134a
1742#define PCI_DEVICE_ID_DOMEX_DMX3191D 0x0001 1747#define PCI_DEVICE_ID_DOMEX_DMX3191D 0x0001
1743 1748
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 665f85f2a3af..edf681a7fd8f 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -221,7 +221,7 @@ extern void swap_unplug_io_fn(struct backing_dev_info *, struct page *);
221/* linux/mm/page_io.c */ 221/* linux/mm/page_io.c */
222extern int swap_readpage(struct file *, struct page *); 222extern int swap_readpage(struct file *, struct page *);
223extern int swap_writepage(struct page *page, struct writeback_control *wbc); 223extern int swap_writepage(struct page *page, struct writeback_control *wbc);
224extern int end_swap_bio_read(struct bio *bio, unsigned int bytes_done, int err); 224extern void end_swap_bio_read(struct bio *bio, int err);
225 225
226/* linux/mm/swap_state.c */ 226/* linux/mm/swap_state.c */
227extern struct address_space swapper_space; 227extern struct address_space swapper_space;
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index b4af6bcb7b7a..c7c3337c3a88 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -5,6 +5,7 @@
5#define WRITEBACK_H 5#define WRITEBACK_H
6 6
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/fs.h>
8 9
9struct backing_dev_info; 10struct backing_dev_info;
10 11
diff --git a/kernel/sched.c b/kernel/sched.c
index 6107a0cd6325..6c10fa796ca0 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -61,6 +61,7 @@
61#include <linux/delayacct.h> 61#include <linux/delayacct.h>
62#include <linux/reciprocal_div.h> 62#include <linux/reciprocal_div.h>
63#include <linux/unistd.h> 63#include <linux/unistd.h>
64#include <linux/pagemap.h>
64 65
65#include <asm/tlb.h> 66#include <asm/tlb.h>
66 67
diff --git a/mm/bounce.c b/mm/bounce.c
index 179fe38a2416..3b549bf31f7d 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -140,26 +140,19 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
140 mempool_free(bvec->bv_page, pool); 140 mempool_free(bvec->bv_page, pool);
141 } 141 }
142 142
143 bio_endio(bio_orig, bio_orig->bi_size, err); 143 bio_endio(bio_orig, err);
144 bio_put(bio); 144 bio_put(bio);
145} 145}
146 146
147static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done, int err) 147static void bounce_end_io_write(struct bio *bio, int err)
148{ 148{
149 if (bio->bi_size)
150 return 1;
151
152 bounce_end_io(bio, page_pool, err); 149 bounce_end_io(bio, page_pool, err);
153 return 0;
154} 150}
155 151
156static int bounce_end_io_write_isa(struct bio *bio, unsigned int bytes_done, int err) 152static void bounce_end_io_write_isa(struct bio *bio, int err)
157{ 153{
158 if (bio->bi_size)
159 return 1;
160 154
161 bounce_end_io(bio, isa_page_pool, err); 155 bounce_end_io(bio, isa_page_pool, err);
162 return 0;
163} 156}
164 157
165static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err) 158static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err)
@@ -172,22 +165,14 @@ static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err)
172 bounce_end_io(bio, pool, err); 165 bounce_end_io(bio, pool, err);
173} 166}
174 167
175static int bounce_end_io_read(struct bio *bio, unsigned int bytes_done, int err) 168static void bounce_end_io_read(struct bio *bio, int err)
176{ 169{
177 if (bio->bi_size)
178 return 1;
179
180 __bounce_end_io_read(bio, page_pool, err); 170 __bounce_end_io_read(bio, page_pool, err);
181 return 0;
182} 171}
183 172
184static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int err) 173static void bounce_end_io_read_isa(struct bio *bio, int err)
185{ 174{
186 if (bio->bi_size)
187 return 1;
188
189 __bounce_end_io_read(bio, isa_page_pool, err); 175 __bounce_end_io_read(bio, isa_page_pool, err);
190 return 0;
191} 176}
192 177
193static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, 178static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
diff --git a/mm/page_io.c b/mm/page_io.c
index dbffec0d78c9..3b97f6850273 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -44,14 +44,11 @@ static struct bio *get_swap_bio(gfp_t gfp_flags, pgoff_t index,
44 return bio; 44 return bio;
45} 45}
46 46
47static int end_swap_bio_write(struct bio *bio, unsigned int bytes_done, int err) 47static void end_swap_bio_write(struct bio *bio, int err)
48{ 48{
49 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 49 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
50 struct page *page = bio->bi_io_vec[0].bv_page; 50 struct page *page = bio->bi_io_vec[0].bv_page;
51 51
52 if (bio->bi_size)
53 return 1;
54
55 if (!uptodate) { 52 if (!uptodate) {
56 SetPageError(page); 53 SetPageError(page);
57 /* 54 /*
@@ -71,17 +68,13 @@ static int end_swap_bio_write(struct bio *bio, unsigned int bytes_done, int err)
71 } 68 }
72 end_page_writeback(page); 69 end_page_writeback(page);
73 bio_put(bio); 70 bio_put(bio);
74 return 0;
75} 71}
76 72
77int end_swap_bio_read(struct bio *bio, unsigned int bytes_done, int err) 73void end_swap_bio_read(struct bio *bio, int err)
78{ 74{
79 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 75 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
80 struct page *page = bio->bi_io_vec[0].bv_page; 76 struct page *page = bio->bi_io_vec[0].bv_page;
81 77
82 if (bio->bi_size)
83 return 1;
84
85 if (!uptodate) { 78 if (!uptodate) {
86 SetPageError(page); 79 SetPageError(page);
87 ClearPageUptodate(page); 80 ClearPageUptodate(page);
@@ -94,7 +87,6 @@ int end_swap_bio_read(struct bio *bio, unsigned int bytes_done, int err)
94 } 87 }
95 unlock_page(page); 88 unlock_page(page);
96 bio_put(bio); 89 bio_put(bio);
97 return 0;
98} 90}
99 91
100/* 92/*
diff --git a/mm/readahead.c b/mm/readahead.c
index 39bf45d43320..be20c9d699d3 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -15,6 +15,7 @@
15#include <linux/backing-dev.h> 15#include <linux/backing-dev.h>
16#include <linux/task_io_accounting_ops.h> 16#include <linux/task_io_accounting_ops.h>
17#include <linux/pagevec.h> 17#include <linux/pagevec.h>
18#include <linux/pagemap.h>
18 19
19void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) 20void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
20{ 21{