diff options
author | David Woodhouse <dwmw2@infradead.org> | 2007-10-13 09:58:23 -0400 |
---|---|---|
committer | David Woodhouse <dwmw2@infradead.org> | 2007-10-13 09:58:23 -0400 |
commit | ebf8889bd1fe3615991ff4494635d237280652a2 (patch) | |
tree | 10fb735717122bbb86474339eac07f26e7ccdf40 /block | |
parent | b160292cc216a50fd0cd386b0bda2cd48352c73b (diff) | |
parent | 752097cec53eea111d087c545179b421e2bde98a (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'block')
-rw-r--r-- | block/Kconfig | 5 | ||||
-rw-r--r-- | block/Makefile | 1 | ||||
-rw-r--r-- | block/blktrace.c | 54 | ||||
-rw-r--r-- | block/bsg.c | 5 | ||||
-rw-r--r-- | block/compat_ioctl.c | 814 | ||||
-rw-r--r-- | block/elevator.c | 2 | ||||
-rw-r--r-- | block/genhd.c | 35 | ||||
-rw-r--r-- | block/ioctl.c | 21 | ||||
-rw-r--r-- | block/ll_rw_blk.c | 293 |
9 files changed, 994 insertions, 236 deletions
diff --git a/block/Kconfig b/block/Kconfig index 2484e0e9d89c..9bda7bc80307 100644 --- a/block/Kconfig +++ b/block/Kconfig | |||
@@ -64,4 +64,9 @@ config BLK_DEV_BSG | |||
64 | 64 | ||
65 | endif # BLOCK | 65 | endif # BLOCK |
66 | 66 | ||
67 | config BLOCK_COMPAT | ||
68 | bool | ||
69 | depends on BLOCK && COMPAT | ||
70 | default y | ||
71 | |||
67 | source block/Kconfig.iosched | 72 | source block/Kconfig.iosched |
diff --git a/block/Makefile b/block/Makefile index 959feeb253be..826108190f00 100644 --- a/block/Makefile +++ b/block/Makefile | |||
@@ -11,3 +11,4 @@ obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o | |||
11 | obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o | 11 | obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o |
12 | 12 | ||
13 | obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o | 13 | obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o |
14 | obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o | ||
diff --git a/block/blktrace.c b/block/blktrace.c index 20fa034ea4a2..775471ef84a5 100644 --- a/block/blktrace.c +++ b/block/blktrace.c | |||
@@ -312,33 +312,26 @@ static struct rchan_callbacks blk_relay_callbacks = { | |||
312 | /* | 312 | /* |
313 | * Setup everything required to start tracing | 313 | * Setup everything required to start tracing |
314 | */ | 314 | */ |
315 | static int blk_trace_setup(struct request_queue *q, struct block_device *bdev, | 315 | int do_blk_trace_setup(struct request_queue *q, struct block_device *bdev, |
316 | char __user *arg) | 316 | struct blk_user_trace_setup *buts) |
317 | { | 317 | { |
318 | struct blk_user_trace_setup buts; | ||
319 | struct blk_trace *old_bt, *bt = NULL; | 318 | struct blk_trace *old_bt, *bt = NULL; |
320 | struct dentry *dir = NULL; | 319 | struct dentry *dir = NULL; |
321 | char b[BDEVNAME_SIZE]; | 320 | char b[BDEVNAME_SIZE]; |
322 | int ret, i; | 321 | int ret, i; |
323 | 322 | ||
324 | if (copy_from_user(&buts, arg, sizeof(buts))) | 323 | if (!buts->buf_size || !buts->buf_nr) |
325 | return -EFAULT; | ||
326 | |||
327 | if (!buts.buf_size || !buts.buf_nr) | ||
328 | return -EINVAL; | 324 | return -EINVAL; |
329 | 325 | ||
330 | strcpy(buts.name, bdevname(bdev, b)); | 326 | strcpy(buts->name, bdevname(bdev, b)); |
331 | 327 | ||
332 | /* | 328 | /* |
333 | * some device names have larger paths - convert the slashes | 329 | * some device names have larger paths - convert the slashes |
334 | * to underscores for this to work as expected | 330 | * to underscores for this to work as expected |
335 | */ | 331 | */ |
336 | for (i = 0; i < strlen(buts.name); i++) | 332 | for (i = 0; i < strlen(buts->name); i++) |
337 | if (buts.name[i] == '/') | 333 | if (buts->name[i] == '/') |
338 | buts.name[i] = '_'; | 334 | buts->name[i] = '_'; |
339 | |||
340 | if (copy_to_user(arg, &buts, sizeof(buts))) | ||
341 | return -EFAULT; | ||
342 | 335 | ||
343 | ret = -ENOMEM; | 336 | ret = -ENOMEM; |
344 | bt = kzalloc(sizeof(*bt), GFP_KERNEL); | 337 | bt = kzalloc(sizeof(*bt), GFP_KERNEL); |
@@ -350,7 +343,7 @@ static int blk_trace_setup(struct request_queue *q, struct block_device *bdev, | |||
350 | goto err; | 343 | goto err; |
351 | 344 | ||
352 | ret = -ENOENT; | 345 | ret = -ENOENT; |
353 | dir = blk_create_tree(buts.name); | 346 | dir = blk_create_tree(buts->name); |
354 | if (!dir) | 347 | if (!dir) |
355 | goto err; | 348 | goto err; |
356 | 349 | ||
@@ -363,20 +356,21 @@ static int blk_trace_setup(struct request_queue *q, struct block_device *bdev, | |||
363 | if (!bt->dropped_file) | 356 | if (!bt->dropped_file) |
364 | goto err; | 357 | goto err; |
365 | 358 | ||
366 | bt->rchan = relay_open("trace", dir, buts.buf_size, buts.buf_nr, &blk_relay_callbacks, bt); | 359 | bt->rchan = relay_open("trace", dir, buts->buf_size, |
360 | buts->buf_nr, &blk_relay_callbacks, bt); | ||
367 | if (!bt->rchan) | 361 | if (!bt->rchan) |
368 | goto err; | 362 | goto err; |
369 | 363 | ||
370 | bt->act_mask = buts.act_mask; | 364 | bt->act_mask = buts->act_mask; |
371 | if (!bt->act_mask) | 365 | if (!bt->act_mask) |
372 | bt->act_mask = (u16) -1; | 366 | bt->act_mask = (u16) -1; |
373 | 367 | ||
374 | bt->start_lba = buts.start_lba; | 368 | bt->start_lba = buts->start_lba; |
375 | bt->end_lba = buts.end_lba; | 369 | bt->end_lba = buts->end_lba; |
376 | if (!bt->end_lba) | 370 | if (!bt->end_lba) |
377 | bt->end_lba = -1ULL; | 371 | bt->end_lba = -1ULL; |
378 | 372 | ||
379 | bt->pid = buts.pid; | 373 | bt->pid = buts->pid; |
380 | bt->trace_state = Blktrace_setup; | 374 | bt->trace_state = Blktrace_setup; |
381 | 375 | ||
382 | ret = -EBUSY; | 376 | ret = -EBUSY; |
@@ -401,6 +395,26 @@ err: | |||
401 | return ret; | 395 | return ret; |
402 | } | 396 | } |
403 | 397 | ||
398 | static int blk_trace_setup(struct request_queue *q, struct block_device *bdev, | ||
399 | char __user *arg) | ||
400 | { | ||
401 | struct blk_user_trace_setup buts; | ||
402 | int ret; | ||
403 | |||
404 | ret = copy_from_user(&buts, arg, sizeof(buts)); | ||
405 | if (ret) | ||
406 | return -EFAULT; | ||
407 | |||
408 | ret = do_blk_trace_setup(q, bdev, &buts); | ||
409 | if (ret) | ||
410 | return ret; | ||
411 | |||
412 | if (copy_to_user(arg, &buts, sizeof(buts))) | ||
413 | return -EFAULT; | ||
414 | |||
415 | return 0; | ||
416 | } | ||
417 | |||
404 | static int blk_trace_startstop(struct request_queue *q, int start) | 418 | static int blk_trace_startstop(struct request_queue *q, int start) |
405 | { | 419 | { |
406 | struct blk_trace *bt; | 420 | struct blk_trace *bt; |
diff --git a/block/bsg.c b/block/bsg.c index ed2646827234..b8ddfc66f210 100644 --- a/block/bsg.c +++ b/block/bsg.c | |||
@@ -1010,10 +1010,7 @@ unlock: | |||
1010 | } | 1010 | } |
1011 | EXPORT_SYMBOL_GPL(bsg_register_queue); | 1011 | EXPORT_SYMBOL_GPL(bsg_register_queue); |
1012 | 1012 | ||
1013 | static struct cdev bsg_cdev = { | 1013 | static struct cdev bsg_cdev; |
1014 | .kobj = {.name = "bsg", }, | ||
1015 | .owner = THIS_MODULE, | ||
1016 | }; | ||
1017 | 1014 | ||
1018 | static int __init bsg_init(void) | 1015 | static int __init bsg_init(void) |
1019 | { | 1016 | { |
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c new file mode 100644 index 000000000000..f84093b97f70 --- /dev/null +++ b/block/compat_ioctl.c | |||
@@ -0,0 +1,814 @@ | |||
1 | #include <linux/blkdev.h> | ||
2 | #include <linux/blkpg.h> | ||
3 | #include <linux/blktrace_api.h> | ||
4 | #include <linux/cdrom.h> | ||
5 | #include <linux/compat.h> | ||
6 | #include <linux/elevator.h> | ||
7 | #include <linux/fd.h> | ||
8 | #include <linux/hdreg.h> | ||
9 | #include <linux/syscalls.h> | ||
10 | #include <linux/smp_lock.h> | ||
11 | #include <linux/types.h> | ||
12 | #include <linux/uaccess.h> | ||
13 | |||
14 | static int compat_put_ushort(unsigned long arg, unsigned short val) | ||
15 | { | ||
16 | return put_user(val, (unsigned short __user *)compat_ptr(arg)); | ||
17 | } | ||
18 | |||
19 | static int compat_put_int(unsigned long arg, int val) | ||
20 | { | ||
21 | return put_user(val, (compat_int_t __user *)compat_ptr(arg)); | ||
22 | } | ||
23 | |||
24 | static int compat_put_long(unsigned long arg, long val) | ||
25 | { | ||
26 | return put_user(val, (compat_long_t __user *)compat_ptr(arg)); | ||
27 | } | ||
28 | |||
29 | static int compat_put_ulong(unsigned long arg, compat_ulong_t val) | ||
30 | { | ||
31 | return put_user(val, (compat_ulong_t __user *)compat_ptr(arg)); | ||
32 | } | ||
33 | |||
34 | static int compat_put_u64(unsigned long arg, u64 val) | ||
35 | { | ||
36 | return put_user(val, (compat_u64 __user *)compat_ptr(arg)); | ||
37 | } | ||
38 | |||
39 | struct compat_hd_geometry { | ||
40 | unsigned char heads; | ||
41 | unsigned char sectors; | ||
42 | unsigned short cylinders; | ||
43 | u32 start; | ||
44 | }; | ||
45 | |||
46 | static int compat_hdio_getgeo(struct gendisk *disk, struct block_device *bdev, | ||
47 | struct compat_hd_geometry __user *ugeo) | ||
48 | { | ||
49 | struct hd_geometry geo; | ||
50 | int ret; | ||
51 | |||
52 | if (!ugeo) | ||
53 | return -EINVAL; | ||
54 | if (!disk->fops->getgeo) | ||
55 | return -ENOTTY; | ||
56 | |||
57 | /* | ||
58 | * We need to set the startsect first, the driver may | ||
59 | * want to override it. | ||
60 | */ | ||
61 | geo.start = get_start_sect(bdev); | ||
62 | ret = disk->fops->getgeo(bdev, &geo); | ||
63 | if (ret) | ||
64 | return ret; | ||
65 | |||
66 | ret = copy_to_user(ugeo, &geo, 4); | ||
67 | ret |= __put_user(geo.start, &ugeo->start); | ||
68 | if (ret) | ||
69 | ret = -EFAULT; | ||
70 | |||
71 | return ret; | ||
72 | } | ||
73 | |||
74 | static int compat_hdio_ioctl(struct inode *inode, struct file *file, | ||
75 | struct gendisk *disk, unsigned int cmd, unsigned long arg) | ||
76 | { | ||
77 | mm_segment_t old_fs = get_fs(); | ||
78 | unsigned long kval; | ||
79 | unsigned int __user *uvp; | ||
80 | int error; | ||
81 | |||
82 | set_fs(KERNEL_DS); | ||
83 | error = blkdev_driver_ioctl(inode, file, disk, | ||
84 | cmd, (unsigned long)(&kval)); | ||
85 | set_fs(old_fs); | ||
86 | |||
87 | if (error == 0) { | ||
88 | uvp = compat_ptr(arg); | ||
89 | if (put_user(kval, uvp)) | ||
90 | error = -EFAULT; | ||
91 | } | ||
92 | return error; | ||
93 | } | ||
94 | |||
95 | struct compat_cdrom_read_audio { | ||
96 | union cdrom_addr addr; | ||
97 | u8 addr_format; | ||
98 | compat_int_t nframes; | ||
99 | compat_caddr_t buf; | ||
100 | }; | ||
101 | |||
102 | struct compat_cdrom_generic_command { | ||
103 | unsigned char cmd[CDROM_PACKET_SIZE]; | ||
104 | compat_caddr_t buffer; | ||
105 | compat_uint_t buflen; | ||
106 | compat_int_t stat; | ||
107 | compat_caddr_t sense; | ||
108 | unsigned char data_direction; | ||
109 | compat_int_t quiet; | ||
110 | compat_int_t timeout; | ||
111 | compat_caddr_t reserved[1]; | ||
112 | }; | ||
113 | |||
114 | static int compat_cdrom_read_audio(struct inode *inode, struct file *file, | ||
115 | struct gendisk *disk, unsigned int cmd, unsigned long arg) | ||
116 | { | ||
117 | struct cdrom_read_audio __user *cdread_audio; | ||
118 | struct compat_cdrom_read_audio __user *cdread_audio32; | ||
119 | __u32 data; | ||
120 | void __user *datap; | ||
121 | |||
122 | cdread_audio = compat_alloc_user_space(sizeof(*cdread_audio)); | ||
123 | cdread_audio32 = compat_ptr(arg); | ||
124 | |||
125 | if (copy_in_user(&cdread_audio->addr, | ||
126 | &cdread_audio32->addr, | ||
127 | (sizeof(*cdread_audio32) - | ||
128 | sizeof(compat_caddr_t)))) | ||
129 | return -EFAULT; | ||
130 | |||
131 | if (get_user(data, &cdread_audio32->buf)) | ||
132 | return -EFAULT; | ||
133 | datap = compat_ptr(data); | ||
134 | if (put_user(datap, &cdread_audio->buf)) | ||
135 | return -EFAULT; | ||
136 | |||
137 | return blkdev_driver_ioctl(inode, file, disk, cmd, | ||
138 | (unsigned long)cdread_audio); | ||
139 | } | ||
140 | |||
141 | static int compat_cdrom_generic_command(struct inode *inode, struct file *file, | ||
142 | struct gendisk *disk, unsigned int cmd, unsigned long arg) | ||
143 | { | ||
144 | struct cdrom_generic_command __user *cgc; | ||
145 | struct compat_cdrom_generic_command __user *cgc32; | ||
146 | u32 data; | ||
147 | unsigned char dir; | ||
148 | int itmp; | ||
149 | |||
150 | cgc = compat_alloc_user_space(sizeof(*cgc)); | ||
151 | cgc32 = compat_ptr(arg); | ||
152 | |||
153 | if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) || | ||
154 | get_user(data, &cgc32->buffer) || | ||
155 | put_user(compat_ptr(data), &cgc->buffer) || | ||
156 | copy_in_user(&cgc->buflen, &cgc32->buflen, | ||
157 | (sizeof(unsigned int) + sizeof(int))) || | ||
158 | get_user(data, &cgc32->sense) || | ||
159 | put_user(compat_ptr(data), &cgc->sense) || | ||
160 | get_user(dir, &cgc32->data_direction) || | ||
161 | put_user(dir, &cgc->data_direction) || | ||
162 | get_user(itmp, &cgc32->quiet) || | ||
163 | put_user(itmp, &cgc->quiet) || | ||
164 | get_user(itmp, &cgc32->timeout) || | ||
165 | put_user(itmp, &cgc->timeout) || | ||
166 | get_user(data, &cgc32->reserved[0]) || | ||
167 | put_user(compat_ptr(data), &cgc->reserved[0])) | ||
168 | return -EFAULT; | ||
169 | |||
170 | return blkdev_driver_ioctl(inode, file, disk, cmd, (unsigned long)cgc); | ||
171 | } | ||
172 | |||
173 | struct compat_blkpg_ioctl_arg { | ||
174 | compat_int_t op; | ||
175 | compat_int_t flags; | ||
176 | compat_int_t datalen; | ||
177 | compat_caddr_t data; | ||
178 | }; | ||
179 | |||
180 | static int compat_blkpg_ioctl(struct inode *inode, struct file *file, | ||
181 | unsigned int cmd, struct compat_blkpg_ioctl_arg __user *ua32) | ||
182 | { | ||
183 | struct blkpg_ioctl_arg __user *a = compat_alloc_user_space(sizeof(*a)); | ||
184 | compat_caddr_t udata; | ||
185 | compat_int_t n; | ||
186 | int err; | ||
187 | |||
188 | err = get_user(n, &ua32->op); | ||
189 | err |= put_user(n, &a->op); | ||
190 | err |= get_user(n, &ua32->flags); | ||
191 | err |= put_user(n, &a->flags); | ||
192 | err |= get_user(n, &ua32->datalen); | ||
193 | err |= put_user(n, &a->datalen); | ||
194 | err |= get_user(udata, &ua32->data); | ||
195 | err |= put_user(compat_ptr(udata), &a->data); | ||
196 | if (err) | ||
197 | return err; | ||
198 | |||
199 | return blkdev_ioctl(inode, file, cmd, (unsigned long)a); | ||
200 | } | ||
201 | |||
202 | #define BLKBSZGET_32 _IOR(0x12, 112, int) | ||
203 | #define BLKBSZSET_32 _IOW(0x12, 113, int) | ||
204 | #define BLKGETSIZE64_32 _IOR(0x12, 114, int) | ||
205 | |||
206 | struct compat_floppy_struct { | ||
207 | compat_uint_t size; | ||
208 | compat_uint_t sect; | ||
209 | compat_uint_t head; | ||
210 | compat_uint_t track; | ||
211 | compat_uint_t stretch; | ||
212 | unsigned char gap; | ||
213 | unsigned char rate; | ||
214 | unsigned char spec1; | ||
215 | unsigned char fmt_gap; | ||
216 | const compat_caddr_t name; | ||
217 | }; | ||
218 | |||
219 | struct compat_floppy_drive_params { | ||
220 | char cmos; | ||
221 | compat_ulong_t max_dtr; | ||
222 | compat_ulong_t hlt; | ||
223 | compat_ulong_t hut; | ||
224 | compat_ulong_t srt; | ||
225 | compat_ulong_t spinup; | ||
226 | compat_ulong_t spindown; | ||
227 | unsigned char spindown_offset; | ||
228 | unsigned char select_delay; | ||
229 | unsigned char rps; | ||
230 | unsigned char tracks; | ||
231 | compat_ulong_t timeout; | ||
232 | unsigned char interleave_sect; | ||
233 | struct floppy_max_errors max_errors; | ||
234 | char flags; | ||
235 | char read_track; | ||
236 | short autodetect[8]; | ||
237 | compat_int_t checkfreq; | ||
238 | compat_int_t native_format; | ||
239 | }; | ||
240 | |||
241 | struct compat_floppy_drive_struct { | ||
242 | signed char flags; | ||
243 | compat_ulong_t spinup_date; | ||
244 | compat_ulong_t select_date; | ||
245 | compat_ulong_t first_read_date; | ||
246 | short probed_format; | ||
247 | short track; | ||
248 | short maxblock; | ||
249 | short maxtrack; | ||
250 | compat_int_t generation; | ||
251 | compat_int_t keep_data; | ||
252 | compat_int_t fd_ref; | ||
253 | compat_int_t fd_device; | ||
254 | compat_int_t last_checked; | ||
255 | compat_caddr_t dmabuf; | ||
256 | compat_int_t bufblocks; | ||
257 | }; | ||
258 | |||
259 | struct compat_floppy_fdc_state { | ||
260 | compat_int_t spec1; | ||
261 | compat_int_t spec2; | ||
262 | compat_int_t dtr; | ||
263 | unsigned char version; | ||
264 | unsigned char dor; | ||
265 | compat_ulong_t address; | ||
266 | unsigned int rawcmd:2; | ||
267 | unsigned int reset:1; | ||
268 | unsigned int need_configure:1; | ||
269 | unsigned int perp_mode:2; | ||
270 | unsigned int has_fifo:1; | ||
271 | unsigned int driver_version; | ||
272 | unsigned char track[4]; | ||
273 | }; | ||
274 | |||
275 | struct compat_floppy_write_errors { | ||
276 | unsigned int write_errors; | ||
277 | compat_ulong_t first_error_sector; | ||
278 | compat_int_t first_error_generation; | ||
279 | compat_ulong_t last_error_sector; | ||
280 | compat_int_t last_error_generation; | ||
281 | compat_uint_t badness; | ||
282 | }; | ||
283 | |||
284 | #define FDSETPRM32 _IOW(2, 0x42, struct compat_floppy_struct) | ||
285 | #define FDDEFPRM32 _IOW(2, 0x43, struct compat_floppy_struct) | ||
286 | #define FDGETPRM32 _IOR(2, 0x04, struct compat_floppy_struct) | ||
287 | #define FDSETDRVPRM32 _IOW(2, 0x90, struct compat_floppy_drive_params) | ||
288 | #define FDGETDRVPRM32 _IOR(2, 0x11, struct compat_floppy_drive_params) | ||
289 | #define FDGETDRVSTAT32 _IOR(2, 0x12, struct compat_floppy_drive_struct) | ||
290 | #define FDPOLLDRVSTAT32 _IOR(2, 0x13, struct compat_floppy_drive_struct) | ||
291 | #define FDGETFDCSTAT32 _IOR(2, 0x15, struct compat_floppy_fdc_state) | ||
292 | #define FDWERRORGET32 _IOR(2, 0x17, struct compat_floppy_write_errors) | ||
293 | |||
294 | static struct { | ||
295 | unsigned int cmd32; | ||
296 | unsigned int cmd; | ||
297 | } fd_ioctl_trans_table[] = { | ||
298 | { FDSETPRM32, FDSETPRM }, | ||
299 | { FDDEFPRM32, FDDEFPRM }, | ||
300 | { FDGETPRM32, FDGETPRM }, | ||
301 | { FDSETDRVPRM32, FDSETDRVPRM }, | ||
302 | { FDGETDRVPRM32, FDGETDRVPRM }, | ||
303 | { FDGETDRVSTAT32, FDGETDRVSTAT }, | ||
304 | { FDPOLLDRVSTAT32, FDPOLLDRVSTAT }, | ||
305 | { FDGETFDCSTAT32, FDGETFDCSTAT }, | ||
306 | { FDWERRORGET32, FDWERRORGET } | ||
307 | }; | ||
308 | |||
309 | #define NR_FD_IOCTL_TRANS ARRAY_SIZE(fd_ioctl_trans_table) | ||
310 | |||
311 | static int compat_fd_ioctl(struct inode *inode, struct file *file, | ||
312 | struct gendisk *disk, unsigned int cmd, unsigned long arg) | ||
313 | { | ||
314 | mm_segment_t old_fs = get_fs(); | ||
315 | void *karg = NULL; | ||
316 | unsigned int kcmd = 0; | ||
317 | int i, err; | ||
318 | |||
319 | for (i = 0; i < NR_FD_IOCTL_TRANS; i++) | ||
320 | if (cmd == fd_ioctl_trans_table[i].cmd32) { | ||
321 | kcmd = fd_ioctl_trans_table[i].cmd; | ||
322 | break; | ||
323 | } | ||
324 | if (!kcmd) | ||
325 | return -EINVAL; | ||
326 | |||
327 | switch (cmd) { | ||
328 | case FDSETPRM32: | ||
329 | case FDDEFPRM32: | ||
330 | case FDGETPRM32: | ||
331 | { | ||
332 | compat_uptr_t name; | ||
333 | struct compat_floppy_struct __user *uf; | ||
334 | struct floppy_struct *f; | ||
335 | |||
336 | uf = compat_ptr(arg); | ||
337 | f = karg = kmalloc(sizeof(struct floppy_struct), GFP_KERNEL); | ||
338 | if (!karg) | ||
339 | return -ENOMEM; | ||
340 | if (cmd == FDGETPRM32) | ||
341 | break; | ||
342 | err = __get_user(f->size, &uf->size); | ||
343 | err |= __get_user(f->sect, &uf->sect); | ||
344 | err |= __get_user(f->head, &uf->head); | ||
345 | err |= __get_user(f->track, &uf->track); | ||
346 | err |= __get_user(f->stretch, &uf->stretch); | ||
347 | err |= __get_user(f->gap, &uf->gap); | ||
348 | err |= __get_user(f->rate, &uf->rate); | ||
349 | err |= __get_user(f->spec1, &uf->spec1); | ||
350 | err |= __get_user(f->fmt_gap, &uf->fmt_gap); | ||
351 | err |= __get_user(name, &uf->name); | ||
352 | f->name = compat_ptr(name); | ||
353 | if (err) { | ||
354 | err = -EFAULT; | ||
355 | goto out; | ||
356 | } | ||
357 | break; | ||
358 | } | ||
359 | case FDSETDRVPRM32: | ||
360 | case FDGETDRVPRM32: | ||
361 | { | ||
362 | struct compat_floppy_drive_params __user *uf; | ||
363 | struct floppy_drive_params *f; | ||
364 | |||
365 | uf = compat_ptr(arg); | ||
366 | f = karg = kmalloc(sizeof(struct floppy_drive_params), GFP_KERNEL); | ||
367 | if (!karg) | ||
368 | return -ENOMEM; | ||
369 | if (cmd == FDGETDRVPRM32) | ||
370 | break; | ||
371 | err = __get_user(f->cmos, &uf->cmos); | ||
372 | err |= __get_user(f->max_dtr, &uf->max_dtr); | ||
373 | err |= __get_user(f->hlt, &uf->hlt); | ||
374 | err |= __get_user(f->hut, &uf->hut); | ||
375 | err |= __get_user(f->srt, &uf->srt); | ||
376 | err |= __get_user(f->spinup, &uf->spinup); | ||
377 | err |= __get_user(f->spindown, &uf->spindown); | ||
378 | err |= __get_user(f->spindown_offset, &uf->spindown_offset); | ||
379 | err |= __get_user(f->select_delay, &uf->select_delay); | ||
380 | err |= __get_user(f->rps, &uf->rps); | ||
381 | err |= __get_user(f->tracks, &uf->tracks); | ||
382 | err |= __get_user(f->timeout, &uf->timeout); | ||
383 | err |= __get_user(f->interleave_sect, &uf->interleave_sect); | ||
384 | err |= __copy_from_user(&f->max_errors, &uf->max_errors, sizeof(f->max_errors)); | ||
385 | err |= __get_user(f->flags, &uf->flags); | ||
386 | err |= __get_user(f->read_track, &uf->read_track); | ||
387 | err |= __copy_from_user(f->autodetect, uf->autodetect, sizeof(f->autodetect)); | ||
388 | err |= __get_user(f->checkfreq, &uf->checkfreq); | ||
389 | err |= __get_user(f->native_format, &uf->native_format); | ||
390 | if (err) { | ||
391 | err = -EFAULT; | ||
392 | goto out; | ||
393 | } | ||
394 | break; | ||
395 | } | ||
396 | case FDGETDRVSTAT32: | ||
397 | case FDPOLLDRVSTAT32: | ||
398 | karg = kmalloc(sizeof(struct floppy_drive_struct), GFP_KERNEL); | ||
399 | if (!karg) | ||
400 | return -ENOMEM; | ||
401 | break; | ||
402 | case FDGETFDCSTAT32: | ||
403 | karg = kmalloc(sizeof(struct floppy_fdc_state), GFP_KERNEL); | ||
404 | if (!karg) | ||
405 | return -ENOMEM; | ||
406 | break; | ||
407 | case FDWERRORGET32: | ||
408 | karg = kmalloc(sizeof(struct floppy_write_errors), GFP_KERNEL); | ||
409 | if (!karg) | ||
410 | return -ENOMEM; | ||
411 | break; | ||
412 | default: | ||
413 | return -EINVAL; | ||
414 | } | ||
415 | set_fs(KERNEL_DS); | ||
416 | err = blkdev_driver_ioctl(inode, file, disk, kcmd, (unsigned long)karg); | ||
417 | set_fs(old_fs); | ||
418 | if (err) | ||
419 | goto out; | ||
420 | switch (cmd) { | ||
421 | case FDGETPRM32: | ||
422 | { | ||
423 | struct floppy_struct *f = karg; | ||
424 | struct compat_floppy_struct __user *uf = compat_ptr(arg); | ||
425 | |||
426 | err = __put_user(f->size, &uf->size); | ||
427 | err |= __put_user(f->sect, &uf->sect); | ||
428 | err |= __put_user(f->head, &uf->head); | ||
429 | err |= __put_user(f->track, &uf->track); | ||
430 | err |= __put_user(f->stretch, &uf->stretch); | ||
431 | err |= __put_user(f->gap, &uf->gap); | ||
432 | err |= __put_user(f->rate, &uf->rate); | ||
433 | err |= __put_user(f->spec1, &uf->spec1); | ||
434 | err |= __put_user(f->fmt_gap, &uf->fmt_gap); | ||
435 | err |= __put_user((u64)f->name, (compat_caddr_t __user *)&uf->name); | ||
436 | break; | ||
437 | } | ||
438 | case FDGETDRVPRM32: | ||
439 | { | ||
440 | struct compat_floppy_drive_params __user *uf; | ||
441 | struct floppy_drive_params *f = karg; | ||
442 | |||
443 | uf = compat_ptr(arg); | ||
444 | err = __put_user(f->cmos, &uf->cmos); | ||
445 | err |= __put_user(f->max_dtr, &uf->max_dtr); | ||
446 | err |= __put_user(f->hlt, &uf->hlt); | ||
447 | err |= __put_user(f->hut, &uf->hut); | ||
448 | err |= __put_user(f->srt, &uf->srt); | ||
449 | err |= __put_user(f->spinup, &uf->spinup); | ||
450 | err |= __put_user(f->spindown, &uf->spindown); | ||
451 | err |= __put_user(f->spindown_offset, &uf->spindown_offset); | ||
452 | err |= __put_user(f->select_delay, &uf->select_delay); | ||
453 | err |= __put_user(f->rps, &uf->rps); | ||
454 | err |= __put_user(f->tracks, &uf->tracks); | ||
455 | err |= __put_user(f->timeout, &uf->timeout); | ||
456 | err |= __put_user(f->interleave_sect, &uf->interleave_sect); | ||
457 | err |= __copy_to_user(&uf->max_errors, &f->max_errors, sizeof(f->max_errors)); | ||
458 | err |= __put_user(f->flags, &uf->flags); | ||
459 | err |= __put_user(f->read_track, &uf->read_track); | ||
460 | err |= __copy_to_user(uf->autodetect, f->autodetect, sizeof(f->autodetect)); | ||
461 | err |= __put_user(f->checkfreq, &uf->checkfreq); | ||
462 | err |= __put_user(f->native_format, &uf->native_format); | ||
463 | break; | ||
464 | } | ||
465 | case FDGETDRVSTAT32: | ||
466 | case FDPOLLDRVSTAT32: | ||
467 | { | ||
468 | struct compat_floppy_drive_struct __user *uf; | ||
469 | struct floppy_drive_struct *f = karg; | ||
470 | |||
471 | uf = compat_ptr(arg); | ||
472 | err = __put_user(f->flags, &uf->flags); | ||
473 | err |= __put_user(f->spinup_date, &uf->spinup_date); | ||
474 | err |= __put_user(f->select_date, &uf->select_date); | ||
475 | err |= __put_user(f->first_read_date, &uf->first_read_date); | ||
476 | err |= __put_user(f->probed_format, &uf->probed_format); | ||
477 | err |= __put_user(f->track, &uf->track); | ||
478 | err |= __put_user(f->maxblock, &uf->maxblock); | ||
479 | err |= __put_user(f->maxtrack, &uf->maxtrack); | ||
480 | err |= __put_user(f->generation, &uf->generation); | ||
481 | err |= __put_user(f->keep_data, &uf->keep_data); | ||
482 | err |= __put_user(f->fd_ref, &uf->fd_ref); | ||
483 | err |= __put_user(f->fd_device, &uf->fd_device); | ||
484 | err |= __put_user(f->last_checked, &uf->last_checked); | ||
485 | err |= __put_user((u64)f->dmabuf, &uf->dmabuf); | ||
486 | err |= __put_user((u64)f->bufblocks, &uf->bufblocks); | ||
487 | break; | ||
488 | } | ||
489 | case FDGETFDCSTAT32: | ||
490 | { | ||
491 | struct compat_floppy_fdc_state __user *uf; | ||
492 | struct floppy_fdc_state *f = karg; | ||
493 | |||
494 | uf = compat_ptr(arg); | ||
495 | err = __put_user(f->spec1, &uf->spec1); | ||
496 | err |= __put_user(f->spec2, &uf->spec2); | ||
497 | err |= __put_user(f->dtr, &uf->dtr); | ||
498 | err |= __put_user(f->version, &uf->version); | ||
499 | err |= __put_user(f->dor, &uf->dor); | ||
500 | err |= __put_user(f->address, &uf->address); | ||
501 | err |= __copy_to_user((char __user *)&uf->address + sizeof(uf->address), | ||
502 | (char *)&f->address + sizeof(f->address), sizeof(int)); | ||
503 | err |= __put_user(f->driver_version, &uf->driver_version); | ||
504 | err |= __copy_to_user(uf->track, f->track, sizeof(f->track)); | ||
505 | break; | ||
506 | } | ||
507 | case FDWERRORGET32: | ||
508 | { | ||
509 | struct compat_floppy_write_errors __user *uf; | ||
510 | struct floppy_write_errors *f = karg; | ||
511 | |||
512 | uf = compat_ptr(arg); | ||
513 | err = __put_user(f->write_errors, &uf->write_errors); | ||
514 | err |= __put_user(f->first_error_sector, &uf->first_error_sector); | ||
515 | err |= __put_user(f->first_error_generation, &uf->first_error_generation); | ||
516 | err |= __put_user(f->last_error_sector, &uf->last_error_sector); | ||
517 | err |= __put_user(f->last_error_generation, &uf->last_error_generation); | ||
518 | err |= __put_user(f->badness, &uf->badness); | ||
519 | break; | ||
520 | } | ||
521 | default: | ||
522 | break; | ||
523 | } | ||
524 | if (err) | ||
525 | err = -EFAULT; | ||
526 | |||
527 | out: | ||
528 | kfree(karg); | ||
529 | return err; | ||
530 | } | ||
531 | |||
532 | struct compat_blk_user_trace_setup { | ||
533 | char name[32]; | ||
534 | u16 act_mask; | ||
535 | u32 buf_size; | ||
536 | u32 buf_nr; | ||
537 | compat_u64 start_lba; | ||
538 | compat_u64 end_lba; | ||
539 | u32 pid; | ||
540 | }; | ||
541 | #define BLKTRACESETUP32 _IOWR(0x12, 115, struct compat_blk_user_trace_setup) | ||
542 | |||
543 | static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg) | ||
544 | { | ||
545 | struct blk_user_trace_setup buts; | ||
546 | struct compat_blk_user_trace_setup cbuts; | ||
547 | struct request_queue *q; | ||
548 | int ret; | ||
549 | |||
550 | q = bdev_get_queue(bdev); | ||
551 | if (!q) | ||
552 | return -ENXIO; | ||
553 | |||
554 | if (copy_from_user(&cbuts, arg, sizeof(cbuts))) | ||
555 | return -EFAULT; | ||
556 | |||
557 | buts = (struct blk_user_trace_setup) { | ||
558 | .act_mask = cbuts.act_mask, | ||
559 | .buf_size = cbuts.buf_size, | ||
560 | .buf_nr = cbuts.buf_nr, | ||
561 | .start_lba = cbuts.start_lba, | ||
562 | .end_lba = cbuts.end_lba, | ||
563 | .pid = cbuts.pid, | ||
564 | }; | ||
565 | memcpy(&buts.name, &cbuts.name, 32); | ||
566 | |||
567 | mutex_lock(&bdev->bd_mutex); | ||
568 | ret = do_blk_trace_setup(q, bdev, &buts); | ||
569 | mutex_unlock(&bdev->bd_mutex); | ||
570 | if (ret) | ||
571 | return ret; | ||
572 | |||
573 | if (copy_to_user(arg, &buts.name, 32)) | ||
574 | return -EFAULT; | ||
575 | |||
576 | return 0; | ||
577 | } | ||
578 | |||
579 | static int compat_blkdev_driver_ioctl(struct inode *inode, struct file *file, | ||
580 | struct gendisk *disk, unsigned cmd, unsigned long arg) | ||
581 | { | ||
582 | int ret; | ||
583 | |||
584 | switch (arg) { | ||
585 | case HDIO_GET_UNMASKINTR: | ||
586 | case HDIO_GET_MULTCOUNT: | ||
587 | case HDIO_GET_KEEPSETTINGS: | ||
588 | case HDIO_GET_32BIT: | ||
589 | case HDIO_GET_NOWERR: | ||
590 | case HDIO_GET_DMA: | ||
591 | case HDIO_GET_NICE: | ||
592 | case HDIO_GET_WCACHE: | ||
593 | case HDIO_GET_ACOUSTIC: | ||
594 | case HDIO_GET_ADDRESS: | ||
595 | case HDIO_GET_BUSSTATE: | ||
596 | return compat_hdio_ioctl(inode, file, disk, cmd, arg); | ||
597 | case FDSETPRM32: | ||
598 | case FDDEFPRM32: | ||
599 | case FDGETPRM32: | ||
600 | case FDSETDRVPRM32: | ||
601 | case FDGETDRVPRM32: | ||
602 | case FDGETDRVSTAT32: | ||
603 | case FDPOLLDRVSTAT32: | ||
604 | case FDGETFDCSTAT32: | ||
605 | case FDWERRORGET32: | ||
606 | return compat_fd_ioctl(inode, file, disk, cmd, arg); | ||
607 | case CDROMREADAUDIO: | ||
608 | return compat_cdrom_read_audio(inode, file, disk, cmd, arg); | ||
609 | case CDROM_SEND_PACKET: | ||
610 | return compat_cdrom_generic_command(inode, file, disk, cmd, arg); | ||
611 | |||
612 | /* | ||
613 | * No handler required for the ones below, we just need to | ||
614 | * convert arg to a 64 bit pointer. | ||
615 | */ | ||
616 | case BLKSECTSET: | ||
617 | /* | ||
618 | * 0x03 -- HD/IDE ioctl's used by hdparm and friends. | ||
619 | * Some need translations, these do not. | ||
620 | */ | ||
621 | case HDIO_GET_IDENTITY: | ||
622 | case HDIO_DRIVE_TASK: | ||
623 | case HDIO_DRIVE_CMD: | ||
624 | case HDIO_SCAN_HWIF: | ||
625 | /* 0x330 is reserved -- it used to be HDIO_GETGEO_BIG */ | ||
626 | case 0x330: | ||
627 | /* 0x02 -- Floppy ioctls */ | ||
628 | case FDMSGON: | ||
629 | case FDMSGOFF: | ||
630 | case FDSETEMSGTRESH: | ||
631 | case FDFLUSH: | ||
632 | case FDWERRORCLR: | ||
633 | case FDSETMAXERRS: | ||
634 | case FDGETMAXERRS: | ||
635 | case FDGETDRVTYP: | ||
636 | case FDEJECT: | ||
637 | case FDCLRPRM: | ||
638 | case FDFMTBEG: | ||
639 | case FDFMTEND: | ||
640 | case FDRESET: | ||
641 | case FDTWADDLE: | ||
642 | case FDFMTTRK: | ||
643 | case FDRAWCMD: | ||
644 | /* CDROM stuff */ | ||
645 | case CDROMPAUSE: | ||
646 | case CDROMRESUME: | ||
647 | case CDROMPLAYMSF: | ||
648 | case CDROMPLAYTRKIND: | ||
649 | case CDROMREADTOCHDR: | ||
650 | case CDROMREADTOCENTRY: | ||
651 | case CDROMSTOP: | ||
652 | case CDROMSTART: | ||
653 | case CDROMEJECT: | ||
654 | case CDROMVOLCTRL: | ||
655 | case CDROMSUBCHNL: | ||
656 | case CDROMMULTISESSION: | ||
657 | case CDROM_GET_MCN: | ||
658 | case CDROMRESET: | ||
659 | case CDROMVOLREAD: | ||
660 | case CDROMSEEK: | ||
661 | case CDROMPLAYBLK: | ||
662 | case CDROMCLOSETRAY: | ||
663 | case CDROM_DISC_STATUS: | ||
664 | case CDROM_CHANGER_NSLOTS: | ||
665 | case CDROM_GET_CAPABILITY: | ||
666 | /* Ignore cdrom.h about these next 5 ioctls, they absolutely do | ||
667 | * not take a struct cdrom_read, instead they take a struct cdrom_msf | ||
668 | * which is compatible. | ||
669 | */ | ||
670 | case CDROMREADMODE2: | ||
671 | case CDROMREADMODE1: | ||
672 | case CDROMREADRAW: | ||
673 | case CDROMREADCOOKED: | ||
674 | case CDROMREADALL: | ||
675 | /* DVD ioctls */ | ||
676 | case DVD_READ_STRUCT: | ||
677 | case DVD_WRITE_STRUCT: | ||
678 | case DVD_AUTH: | ||
679 | arg = (unsigned long)compat_ptr(arg); | ||
680 | /* These intepret arg as an unsigned long, not as a pointer, | ||
681 | * so we must not do compat_ptr() conversion. */ | ||
682 | case HDIO_SET_MULTCOUNT: | ||
683 | case HDIO_SET_UNMASKINTR: | ||
684 | case HDIO_SET_KEEPSETTINGS: | ||
685 | case HDIO_SET_32BIT: | ||
686 | case HDIO_SET_NOWERR: | ||
687 | case HDIO_SET_DMA: | ||
688 | case HDIO_SET_PIO_MODE: | ||
689 | case HDIO_SET_NICE: | ||
690 | case HDIO_SET_WCACHE: | ||
691 | case HDIO_SET_ACOUSTIC: | ||
692 | case HDIO_SET_BUSSTATE: | ||
693 | case HDIO_SET_ADDRESS: | ||
694 | case CDROMEJECT_SW: | ||
695 | case CDROM_SET_OPTIONS: | ||
696 | case CDROM_CLEAR_OPTIONS: | ||
697 | case CDROM_SELECT_SPEED: | ||
698 | case CDROM_SELECT_DISC: | ||
699 | case CDROM_MEDIA_CHANGED: | ||
700 | case CDROM_DRIVE_STATUS: | ||
701 | case CDROM_LOCKDOOR: | ||
702 | case CDROM_DEBUG: | ||
703 | break; | ||
704 | default: | ||
705 | /* unknown ioctl number */ | ||
706 | return -ENOIOCTLCMD; | ||
707 | } | ||
708 | |||
709 | if (disk->fops->unlocked_ioctl) | ||
710 | return disk->fops->unlocked_ioctl(file, cmd, arg); | ||
711 | |||
712 | if (disk->fops->ioctl) { | ||
713 | lock_kernel(); | ||
714 | ret = disk->fops->ioctl(inode, file, cmd, arg); | ||
715 | unlock_kernel(); | ||
716 | return ret; | ||
717 | } | ||
718 | |||
719 | return -ENOTTY; | ||
720 | } | ||
721 | |||
722 | static int compat_blkdev_locked_ioctl(struct inode *inode, struct file *file, | ||
723 | struct block_device *bdev, | ||
724 | unsigned cmd, unsigned long arg) | ||
725 | { | ||
726 | struct backing_dev_info *bdi; | ||
727 | |||
728 | switch (cmd) { | ||
729 | case BLKRAGET: | ||
730 | case BLKFRAGET: | ||
731 | if (!arg) | ||
732 | return -EINVAL; | ||
733 | bdi = blk_get_backing_dev_info(bdev); | ||
734 | if (bdi == NULL) | ||
735 | return -ENOTTY; | ||
736 | return compat_put_long(arg, | ||
737 | (bdi->ra_pages * PAGE_CACHE_SIZE) / 512); | ||
738 | case BLKROGET: /* compatible */ | ||
739 | return compat_put_int(arg, bdev_read_only(bdev) != 0); | ||
740 | case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */ | ||
741 | return compat_put_int(arg, block_size(bdev)); | ||
742 | case BLKSSZGET: /* get block device hardware sector size */ | ||
743 | return compat_put_int(arg, bdev_hardsect_size(bdev)); | ||
744 | case BLKSECTGET: | ||
745 | return compat_put_ushort(arg, | ||
746 | bdev_get_queue(bdev)->max_sectors); | ||
747 | case BLKRASET: /* compatible, but no compat_ptr (!) */ | ||
748 | case BLKFRASET: | ||
749 | if (!capable(CAP_SYS_ADMIN)) | ||
750 | return -EACCES; | ||
751 | bdi = blk_get_backing_dev_info(bdev); | ||
752 | if (bdi == NULL) | ||
753 | return -ENOTTY; | ||
754 | bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE; | ||
755 | return 0; | ||
756 | case BLKGETSIZE: | ||
757 | if ((bdev->bd_inode->i_size >> 9) > ~0UL) | ||
758 | return -EFBIG; | ||
759 | return compat_put_ulong(arg, bdev->bd_inode->i_size >> 9); | ||
760 | |||
761 | case BLKGETSIZE64_32: | ||
762 | return compat_put_u64(arg, bdev->bd_inode->i_size); | ||
763 | |||
764 | case BLKTRACESETUP32: | ||
765 | return compat_blk_trace_setup(bdev, compat_ptr(arg)); | ||
766 | case BLKTRACESTART: /* compatible */ | ||
767 | case BLKTRACESTOP: /* compatible */ | ||
768 | case BLKTRACETEARDOWN: /* compatible */ | ||
769 | return blk_trace_ioctl(bdev, cmd, compat_ptr(arg)); | ||
770 | } | ||
771 | return -ENOIOCTLCMD; | ||
772 | } | ||
773 | |||
774 | /* Most of the generic ioctls are handled in the normal fallback path. | ||
775 | This assumes the blkdev's low level compat_ioctl always returns | ||
776 | ENOIOCTLCMD for unknown ioctls. */ | ||
777 | long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) | ||
778 | { | ||
779 | int ret = -ENOIOCTLCMD; | ||
780 | struct inode *inode = file->f_mapping->host; | ||
781 | struct block_device *bdev = inode->i_bdev; | ||
782 | struct gendisk *disk = bdev->bd_disk; | ||
783 | |||
784 | switch (cmd) { | ||
785 | case HDIO_GETGEO: | ||
786 | return compat_hdio_getgeo(disk, bdev, compat_ptr(arg)); | ||
787 | case BLKFLSBUF: | ||
788 | case BLKROSET: | ||
789 | /* | ||
790 | * the ones below are implemented in blkdev_locked_ioctl, | ||
791 | * but we call blkdev_ioctl, which gets the lock for us | ||
792 | */ | ||
793 | case BLKRRPART: | ||
794 | return blkdev_ioctl(inode, file, cmd, | ||
795 | (unsigned long)compat_ptr(arg)); | ||
796 | case BLKBSZSET_32: | ||
797 | return blkdev_ioctl(inode, file, BLKBSZSET, | ||
798 | (unsigned long)compat_ptr(arg)); | ||
799 | case BLKPG: | ||
800 | return compat_blkpg_ioctl(inode, file, cmd, compat_ptr(arg)); | ||
801 | } | ||
802 | |||
803 | lock_kernel(); | ||
804 | ret = compat_blkdev_locked_ioctl(inode, file, bdev, cmd, arg); | ||
805 | /* FIXME: why do we assume -> compat_ioctl needs the BKL? */ | ||
806 | if (ret == -ENOIOCTLCMD && disk->fops->compat_ioctl) | ||
807 | ret = disk->fops->compat_ioctl(file, cmd, arg); | ||
808 | unlock_kernel(); | ||
809 | |||
810 | if (ret != -ENOIOCTLCMD) | ||
811 | return ret; | ||
812 | |||
813 | return compat_blkdev_driver_ioctl(inode, file, disk, cmd, arg); | ||
814 | } | ||
diff --git a/block/elevator.c b/block/elevator.c index c6d153de9fd6..b9c518afe1f8 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -186,7 +186,7 @@ static elevator_t *elevator_alloc(struct request_queue *q, | |||
186 | eq->ops = &e->ops; | 186 | eq->ops = &e->ops; |
187 | eq->elevator_type = e; | 187 | eq->elevator_type = e; |
188 | kobject_init(&eq->kobj); | 188 | kobject_init(&eq->kobj); |
189 | snprintf(eq->kobj.name, KOBJ_NAME_LEN, "%s", "iosched"); | 189 | kobject_set_name(&eq->kobj, "%s", "iosched"); |
190 | eq->kobj.ktype = &elv_ktype; | 190 | eq->kobj.ktype = &elv_ktype; |
191 | mutex_init(&eq->sysfs_lock); | 191 | mutex_init(&eq->sysfs_lock); |
192 | 192 | ||
diff --git a/block/genhd.c b/block/genhd.c index 3af1e7a378d4..e609996f2e76 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -540,61 +540,42 @@ static int block_uevent_filter(struct kset *kset, struct kobject *kobj) | |||
540 | return ((ktype == &ktype_block) || (ktype == &ktype_part)); | 540 | return ((ktype == &ktype_block) || (ktype == &ktype_part)); |
541 | } | 541 | } |
542 | 542 | ||
543 | static int block_uevent(struct kset *kset, struct kobject *kobj, char **envp, | 543 | static int block_uevent(struct kset *kset, struct kobject *kobj, |
544 | int num_envp, char *buffer, int buffer_size) | 544 | struct kobj_uevent_env *env) |
545 | { | 545 | { |
546 | struct kobj_type *ktype = get_ktype(kobj); | 546 | struct kobj_type *ktype = get_ktype(kobj); |
547 | struct device *physdev; | 547 | struct device *physdev; |
548 | struct gendisk *disk; | 548 | struct gendisk *disk; |
549 | struct hd_struct *part; | 549 | struct hd_struct *part; |
550 | int length = 0; | ||
551 | int i = 0; | ||
552 | 550 | ||
553 | if (ktype == &ktype_block) { | 551 | if (ktype == &ktype_block) { |
554 | disk = container_of(kobj, struct gendisk, kobj); | 552 | disk = container_of(kobj, struct gendisk, kobj); |
555 | add_uevent_var(envp, num_envp, &i, buffer, buffer_size, | 553 | add_uevent_var(env, "MINOR=%u", disk->first_minor); |
556 | &length, "MINOR=%u", disk->first_minor); | ||
557 | } else if (ktype == &ktype_part) { | 554 | } else if (ktype == &ktype_part) { |
558 | disk = container_of(kobj->parent, struct gendisk, kobj); | 555 | disk = container_of(kobj->parent, struct gendisk, kobj); |
559 | part = container_of(kobj, struct hd_struct, kobj); | 556 | part = container_of(kobj, struct hd_struct, kobj); |
560 | add_uevent_var(envp, num_envp, &i, buffer, buffer_size, | 557 | add_uevent_var(env, "MINOR=%u", |
561 | &length, "MINOR=%u", | ||
562 | disk->first_minor + part->partno); | 558 | disk->first_minor + part->partno); |
563 | } else | 559 | } else |
564 | return 0; | 560 | return 0; |
565 | 561 | ||
566 | add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, | 562 | add_uevent_var(env, "MAJOR=%u", disk->major); |
567 | "MAJOR=%u", disk->major); | ||
568 | 563 | ||
569 | /* add physical device, backing this device */ | 564 | /* add physical device, backing this device */ |
570 | physdev = disk->driverfs_dev; | 565 | physdev = disk->driverfs_dev; |
571 | if (physdev) { | 566 | if (physdev) { |
572 | char *path = kobject_get_path(&physdev->kobj, GFP_KERNEL); | 567 | char *path = kobject_get_path(&physdev->kobj, GFP_KERNEL); |
573 | 568 | ||
574 | add_uevent_var(envp, num_envp, &i, buffer, buffer_size, | 569 | add_uevent_var(env, "PHYSDEVPATH=%s", path); |
575 | &length, "PHYSDEVPATH=%s", path); | ||
576 | kfree(path); | 570 | kfree(path); |
577 | 571 | ||
578 | if (physdev->bus) | 572 | if (physdev->bus) |
579 | add_uevent_var(envp, num_envp, &i, | 573 | add_uevent_var(env, "PHYSDEVBUS=%s", physdev->bus->name); |
580 | buffer, buffer_size, &length, | ||
581 | "PHYSDEVBUS=%s", | ||
582 | physdev->bus->name); | ||
583 | 574 | ||
584 | if (physdev->driver) | 575 | if (physdev->driver) |
585 | add_uevent_var(envp, num_envp, &i, | 576 | add_uevent_var(env, physdev->driver->name); |
586 | buffer, buffer_size, &length, | ||
587 | "PHYSDEVDRIVER=%s", | ||
588 | physdev->driver->name); | ||
589 | } | 577 | } |
590 | 578 | ||
591 | /* terminate, set to next free slot, shrink available space */ | ||
592 | envp[i] = NULL; | ||
593 | envp = &envp[i]; | ||
594 | num_envp -= i; | ||
595 | buffer = &buffer[length]; | ||
596 | buffer_size -= length; | ||
597 | |||
598 | return 0; | 579 | return 0; |
599 | } | 580 | } |
600 | 581 | ||
diff --git a/block/ioctl.c b/block/ioctl.c index f7e3e8abf887..52d6385216ad 100644 --- a/block/ioctl.c +++ b/block/ioctl.c | |||
@@ -217,6 +217,10 @@ int blkdev_driver_ioctl(struct inode *inode, struct file *file, | |||
217 | } | 217 | } |
218 | EXPORT_SYMBOL_GPL(blkdev_driver_ioctl); | 218 | EXPORT_SYMBOL_GPL(blkdev_driver_ioctl); |
219 | 219 | ||
220 | /* | ||
221 | * always keep this in sync with compat_blkdev_ioctl() and | ||
222 | * compat_blkdev_locked_ioctl() | ||
223 | */ | ||
220 | int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd, | 224 | int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd, |
221 | unsigned long arg) | 225 | unsigned long arg) |
222 | { | 226 | { |
@@ -284,21 +288,4 @@ int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd, | |||
284 | 288 | ||
285 | return blkdev_driver_ioctl(inode, file, disk, cmd, arg); | 289 | return blkdev_driver_ioctl(inode, file, disk, cmd, arg); |
286 | } | 290 | } |
287 | |||
288 | /* Most of the generic ioctls are handled in the normal fallback path. | ||
289 | This assumes the blkdev's low level compat_ioctl always returns | ||
290 | ENOIOCTLCMD for unknown ioctls. */ | ||
291 | long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) | ||
292 | { | ||
293 | struct block_device *bdev = file->f_path.dentry->d_inode->i_bdev; | ||
294 | struct gendisk *disk = bdev->bd_disk; | ||
295 | int ret = -ENOIOCTLCMD; | ||
296 | if (disk->fops->compat_ioctl) { | ||
297 | lock_kernel(); | ||
298 | ret = disk->fops->compat_ioctl(file, cmd, arg); | ||
299 | unlock_kernel(); | ||
300 | } | ||
301 | return ret; | ||
302 | } | ||
303 | |||
304 | EXPORT_SYMBOL_GPL(blkdev_ioctl); | 291 | EXPORT_SYMBOL_GPL(blkdev_ioctl); |
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index ed39313c4085..d875673e76cd 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -42,6 +42,9 @@ static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); | |||
42 | static void init_request_from_bio(struct request *req, struct bio *bio); | 42 | static void init_request_from_bio(struct request *req, struct bio *bio); |
43 | static int __make_request(struct request_queue *q, struct bio *bio); | 43 | static int __make_request(struct request_queue *q, struct bio *bio); |
44 | static struct io_context *current_io_context(gfp_t gfp_flags, int node); | 44 | static struct io_context *current_io_context(gfp_t gfp_flags, int node); |
45 | static void blk_recalc_rq_segments(struct request *rq); | ||
46 | static void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | ||
47 | struct bio *bio); | ||
45 | 48 | ||
46 | /* | 49 | /* |
47 | * For the allocated request tables | 50 | * For the allocated request tables |
@@ -428,7 +431,6 @@ static void queue_flush(struct request_queue *q, unsigned which) | |||
428 | static inline struct request *start_ordered(struct request_queue *q, | 431 | static inline struct request *start_ordered(struct request_queue *q, |
429 | struct request *rq) | 432 | struct request *rq) |
430 | { | 433 | { |
431 | q->bi_size = 0; | ||
432 | q->orderr = 0; | 434 | q->orderr = 0; |
433 | q->ordered = q->next_ordered; | 435 | q->ordered = q->next_ordered; |
434 | q->ordseq |= QUEUE_ORDSEQ_STARTED; | 436 | q->ordseq |= QUEUE_ORDSEQ_STARTED; |
@@ -525,56 +527,36 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp) | |||
525 | return 1; | 527 | return 1; |
526 | } | 528 | } |
527 | 529 | ||
528 | static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error) | 530 | static void req_bio_endio(struct request *rq, struct bio *bio, |
529 | { | 531 | unsigned int nbytes, int error) |
530 | struct request_queue *q = bio->bi_private; | ||
531 | |||
532 | /* | ||
533 | * This is dry run, restore bio_sector and size. We'll finish | ||
534 | * this request again with the original bi_end_io after an | ||
535 | * error occurs or post flush is complete. | ||
536 | */ | ||
537 | q->bi_size += bytes; | ||
538 | |||
539 | if (bio->bi_size) | ||
540 | return 1; | ||
541 | |||
542 | /* Reset bio */ | ||
543 | set_bit(BIO_UPTODATE, &bio->bi_flags); | ||
544 | bio->bi_size = q->bi_size; | ||
545 | bio->bi_sector -= (q->bi_size >> 9); | ||
546 | q->bi_size = 0; | ||
547 | |||
548 | return 0; | ||
549 | } | ||
550 | |||
551 | static int ordered_bio_endio(struct request *rq, struct bio *bio, | ||
552 | unsigned int nbytes, int error) | ||
553 | { | 532 | { |
554 | struct request_queue *q = rq->q; | 533 | struct request_queue *q = rq->q; |
555 | bio_end_io_t *endio; | ||
556 | void *private; | ||
557 | 534 | ||
558 | if (&q->bar_rq != rq) | 535 | if (&q->bar_rq != rq) { |
559 | return 0; | 536 | if (error) |
560 | 537 | clear_bit(BIO_UPTODATE, &bio->bi_flags); | |
561 | /* | 538 | else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) |
562 | * Okay, this is the barrier request in progress, dry finish it. | 539 | error = -EIO; |
563 | */ | ||
564 | if (error && !q->orderr) | ||
565 | q->orderr = error; | ||
566 | 540 | ||
567 | endio = bio->bi_end_io; | 541 | if (unlikely(nbytes > bio->bi_size)) { |
568 | private = bio->bi_private; | 542 | printk("%s: want %u bytes done, only %u left\n", |
569 | bio->bi_end_io = flush_dry_bio_endio; | 543 | __FUNCTION__, nbytes, bio->bi_size); |
570 | bio->bi_private = q; | 544 | nbytes = bio->bi_size; |
571 | 545 | } | |
572 | bio_endio(bio, nbytes, error); | ||
573 | 546 | ||
574 | bio->bi_end_io = endio; | 547 | bio->bi_size -= nbytes; |
575 | bio->bi_private = private; | 548 | bio->bi_sector += (nbytes >> 9); |
549 | if (bio->bi_size == 0) | ||
550 | bio_endio(bio, error); | ||
551 | } else { | ||
576 | 552 | ||
577 | return 1; | 553 | /* |
554 | * Okay, this is the barrier request in progress, just | ||
555 | * record the error; | ||
556 | */ | ||
557 | if (error && !q->orderr) | ||
558 | q->orderr = error; | ||
559 | } | ||
578 | } | 560 | } |
579 | 561 | ||
580 | /** | 562 | /** |
@@ -1220,16 +1202,40 @@ EXPORT_SYMBOL(blk_dump_rq_flags); | |||
1220 | 1202 | ||
1221 | void blk_recount_segments(struct request_queue *q, struct bio *bio) | 1203 | void blk_recount_segments(struct request_queue *q, struct bio *bio) |
1222 | { | 1204 | { |
1205 | struct request rq; | ||
1206 | struct bio *nxt = bio->bi_next; | ||
1207 | rq.q = q; | ||
1208 | rq.bio = rq.biotail = bio; | ||
1209 | bio->bi_next = NULL; | ||
1210 | blk_recalc_rq_segments(&rq); | ||
1211 | bio->bi_next = nxt; | ||
1212 | bio->bi_phys_segments = rq.nr_phys_segments; | ||
1213 | bio->bi_hw_segments = rq.nr_hw_segments; | ||
1214 | bio->bi_flags |= (1 << BIO_SEG_VALID); | ||
1215 | } | ||
1216 | EXPORT_SYMBOL(blk_recount_segments); | ||
1217 | |||
1218 | static void blk_recalc_rq_segments(struct request *rq) | ||
1219 | { | ||
1220 | int nr_phys_segs; | ||
1221 | int nr_hw_segs; | ||
1222 | unsigned int phys_size; | ||
1223 | unsigned int hw_size; | ||
1223 | struct bio_vec *bv, *bvprv = NULL; | 1224 | struct bio_vec *bv, *bvprv = NULL; |
1224 | int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster; | 1225 | int seg_size; |
1226 | int hw_seg_size; | ||
1227 | int cluster; | ||
1228 | struct req_iterator iter; | ||
1225 | int high, highprv = 1; | 1229 | int high, highprv = 1; |
1230 | struct request_queue *q = rq->q; | ||
1226 | 1231 | ||
1227 | if (unlikely(!bio->bi_io_vec)) | 1232 | if (!rq->bio) |
1228 | return; | 1233 | return; |
1229 | 1234 | ||
1230 | cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER); | 1235 | cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER); |
1231 | hw_seg_size = seg_size = nr_phys_segs = nr_hw_segs = 0; | 1236 | hw_seg_size = seg_size = 0; |
1232 | bio_for_each_segment(bv, bio, i) { | 1237 | phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0; |
1238 | rq_for_each_segment(bv, rq, iter) { | ||
1233 | /* | 1239 | /* |
1234 | * the trick here is making sure that a high page is never | 1240 | * the trick here is making sure that a high page is never |
1235 | * considered part of another segment, since that might | 1241 | * considered part of another segment, since that might |
@@ -1255,12 +1261,13 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio) | |||
1255 | } | 1261 | } |
1256 | new_segment: | 1262 | new_segment: |
1257 | if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) && | 1263 | if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) && |
1258 | !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) { | 1264 | !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) |
1259 | hw_seg_size += bv->bv_len; | 1265 | hw_seg_size += bv->bv_len; |
1260 | } else { | 1266 | else { |
1261 | new_hw_segment: | 1267 | new_hw_segment: |
1262 | if (hw_seg_size > bio->bi_hw_front_size) | 1268 | if (nr_hw_segs == 1 && |
1263 | bio->bi_hw_front_size = hw_seg_size; | 1269 | hw_seg_size > rq->bio->bi_hw_front_size) |
1270 | rq->bio->bi_hw_front_size = hw_seg_size; | ||
1264 | hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len; | 1271 | hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len; |
1265 | nr_hw_segs++; | 1272 | nr_hw_segs++; |
1266 | } | 1273 | } |
@@ -1270,15 +1277,15 @@ new_hw_segment: | |||
1270 | seg_size = bv->bv_len; | 1277 | seg_size = bv->bv_len; |
1271 | highprv = high; | 1278 | highprv = high; |
1272 | } | 1279 | } |
1273 | if (hw_seg_size > bio->bi_hw_back_size) | 1280 | |
1274 | bio->bi_hw_back_size = hw_seg_size; | 1281 | if (nr_hw_segs == 1 && |
1275 | if (nr_hw_segs == 1 && hw_seg_size > bio->bi_hw_front_size) | 1282 | hw_seg_size > rq->bio->bi_hw_front_size) |
1276 | bio->bi_hw_front_size = hw_seg_size; | 1283 | rq->bio->bi_hw_front_size = hw_seg_size; |
1277 | bio->bi_phys_segments = nr_phys_segs; | 1284 | if (hw_seg_size > rq->biotail->bi_hw_back_size) |
1278 | bio->bi_hw_segments = nr_hw_segs; | 1285 | rq->biotail->bi_hw_back_size = hw_seg_size; |
1279 | bio->bi_flags |= (1 << BIO_SEG_VALID); | 1286 | rq->nr_phys_segments = nr_phys_segs; |
1287 | rq->nr_hw_segments = nr_hw_segs; | ||
1280 | } | 1288 | } |
1281 | EXPORT_SYMBOL(blk_recount_segments); | ||
1282 | 1289 | ||
1283 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | 1290 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, |
1284 | struct bio *nxt) | 1291 | struct bio *nxt) |
@@ -1325,8 +1332,8 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |||
1325 | struct scatterlist *sg) | 1332 | struct scatterlist *sg) |
1326 | { | 1333 | { |
1327 | struct bio_vec *bvec, *bvprv; | 1334 | struct bio_vec *bvec, *bvprv; |
1328 | struct bio *bio; | 1335 | struct req_iterator iter; |
1329 | int nsegs, i, cluster; | 1336 | int nsegs, cluster; |
1330 | 1337 | ||
1331 | nsegs = 0; | 1338 | nsegs = 0; |
1332 | cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER); | 1339 | cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER); |
@@ -1335,35 +1342,30 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |||
1335 | * for each bio in rq | 1342 | * for each bio in rq |
1336 | */ | 1343 | */ |
1337 | bvprv = NULL; | 1344 | bvprv = NULL; |
1338 | rq_for_each_bio(bio, rq) { | 1345 | rq_for_each_segment(bvec, rq, iter) { |
1339 | /* | 1346 | int nbytes = bvec->bv_len; |
1340 | * for each segment in bio | ||
1341 | */ | ||
1342 | bio_for_each_segment(bvec, bio, i) { | ||
1343 | int nbytes = bvec->bv_len; | ||
1344 | 1347 | ||
1345 | if (bvprv && cluster) { | 1348 | if (bvprv && cluster) { |
1346 | if (sg[nsegs - 1].length + nbytes > q->max_segment_size) | 1349 | if (sg[nsegs - 1].length + nbytes > q->max_segment_size) |
1347 | goto new_segment; | 1350 | goto new_segment; |
1348 | 1351 | ||
1349 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) | 1352 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) |
1350 | goto new_segment; | 1353 | goto new_segment; |
1351 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) | 1354 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) |
1352 | goto new_segment; | 1355 | goto new_segment; |
1353 | 1356 | ||
1354 | sg[nsegs - 1].length += nbytes; | 1357 | sg[nsegs - 1].length += nbytes; |
1355 | } else { | 1358 | } else { |
1356 | new_segment: | 1359 | new_segment: |
1357 | memset(&sg[nsegs],0,sizeof(struct scatterlist)); | 1360 | memset(&sg[nsegs],0,sizeof(struct scatterlist)); |
1358 | sg[nsegs].page = bvec->bv_page; | 1361 | sg[nsegs].page = bvec->bv_page; |
1359 | sg[nsegs].length = nbytes; | 1362 | sg[nsegs].length = nbytes; |
1360 | sg[nsegs].offset = bvec->bv_offset; | 1363 | sg[nsegs].offset = bvec->bv_offset; |
1361 | 1364 | ||
1362 | nsegs++; | 1365 | nsegs++; |
1363 | } | 1366 | } |
1364 | bvprv = bvec; | 1367 | bvprv = bvec; |
1365 | } /* segments in bio */ | 1368 | } /* segments in rq */ |
1366 | } /* bios in rq */ | ||
1367 | 1369 | ||
1368 | return nsegs; | 1370 | return nsegs; |
1369 | } | 1371 | } |
@@ -1420,7 +1422,8 @@ static inline int ll_new_hw_segment(struct request_queue *q, | |||
1420 | return 1; | 1422 | return 1; |
1421 | } | 1423 | } |
1422 | 1424 | ||
1423 | int ll_back_merge_fn(struct request_queue *q, struct request *req, struct bio *bio) | 1425 | static int ll_back_merge_fn(struct request_queue *q, struct request *req, |
1426 | struct bio *bio) | ||
1424 | { | 1427 | { |
1425 | unsigned short max_sectors; | 1428 | unsigned short max_sectors; |
1426 | int len; | 1429 | int len; |
@@ -1456,7 +1459,6 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, struct bio *b | |||
1456 | 1459 | ||
1457 | return ll_new_hw_segment(q, req, bio); | 1460 | return ll_new_hw_segment(q, req, bio); |
1458 | } | 1461 | } |
1459 | EXPORT_SYMBOL(ll_back_merge_fn); | ||
1460 | 1462 | ||
1461 | static int ll_front_merge_fn(struct request_queue *q, struct request *req, | 1463 | static int ll_front_merge_fn(struct request_queue *q, struct request *req, |
1462 | struct bio *bio) | 1464 | struct bio *bio) |
@@ -1852,7 +1854,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | |||
1852 | 1854 | ||
1853 | init_timer(&q->unplug_timer); | 1855 | init_timer(&q->unplug_timer); |
1854 | 1856 | ||
1855 | snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue"); | 1857 | kobject_set_name(&q->kobj, "%s", "queue"); |
1856 | q->kobj.ktype = &queue_ktype; | 1858 | q->kobj.ktype = &queue_ktype; |
1857 | kobject_init(&q->kobj); | 1859 | kobject_init(&q->kobj); |
1858 | 1860 | ||
@@ -2346,6 +2348,23 @@ static int __blk_rq_unmap_user(struct bio *bio) | |||
2346 | return ret; | 2348 | return ret; |
2347 | } | 2349 | } |
2348 | 2350 | ||
2351 | int blk_rq_append_bio(struct request_queue *q, struct request *rq, | ||
2352 | struct bio *bio) | ||
2353 | { | ||
2354 | if (!rq->bio) | ||
2355 | blk_rq_bio_prep(q, rq, bio); | ||
2356 | else if (!ll_back_merge_fn(q, rq, bio)) | ||
2357 | return -EINVAL; | ||
2358 | else { | ||
2359 | rq->biotail->bi_next = bio; | ||
2360 | rq->biotail = bio; | ||
2361 | |||
2362 | rq->data_len += bio->bi_size; | ||
2363 | } | ||
2364 | return 0; | ||
2365 | } | ||
2366 | EXPORT_SYMBOL(blk_rq_append_bio); | ||
2367 | |||
2349 | static int __blk_rq_map_user(struct request_queue *q, struct request *rq, | 2368 | static int __blk_rq_map_user(struct request_queue *q, struct request *rq, |
2350 | void __user *ubuf, unsigned int len) | 2369 | void __user *ubuf, unsigned int len) |
2351 | { | 2370 | { |
@@ -2377,23 +2396,12 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, | |||
2377 | */ | 2396 | */ |
2378 | bio_get(bio); | 2397 | bio_get(bio); |
2379 | 2398 | ||
2380 | if (!rq->bio) | 2399 | ret = blk_rq_append_bio(q, rq, bio); |
2381 | blk_rq_bio_prep(q, rq, bio); | 2400 | if (!ret) |
2382 | else if (!ll_back_merge_fn(q, rq, bio)) { | 2401 | return bio->bi_size; |
2383 | ret = -EINVAL; | ||
2384 | goto unmap_bio; | ||
2385 | } else { | ||
2386 | rq->biotail->bi_next = bio; | ||
2387 | rq->biotail = bio; | ||
2388 | |||
2389 | rq->data_len += bio->bi_size; | ||
2390 | } | ||
2391 | |||
2392 | return bio->bi_size; | ||
2393 | 2402 | ||
2394 | unmap_bio: | ||
2395 | /* if it was boucned we must call the end io function */ | 2403 | /* if it was boucned we must call the end io function */ |
2396 | bio_endio(bio, bio->bi_size, 0); | 2404 | bio_endio(bio, 0); |
2397 | __blk_rq_unmap_user(orig_bio); | 2405 | __blk_rq_unmap_user(orig_bio); |
2398 | bio_put(bio); | 2406 | bio_put(bio); |
2399 | return ret; | 2407 | return ret; |
@@ -2502,7 +2510,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, | |||
2502 | return PTR_ERR(bio); | 2510 | return PTR_ERR(bio); |
2503 | 2511 | ||
2504 | if (bio->bi_size != len) { | 2512 | if (bio->bi_size != len) { |
2505 | bio_endio(bio, bio->bi_size, 0); | 2513 | bio_endio(bio, 0); |
2506 | bio_unmap_user(bio); | 2514 | bio_unmap_user(bio); |
2507 | return -EINVAL; | 2515 | return -EINVAL; |
2508 | } | 2516 | } |
@@ -2912,15 +2920,9 @@ static void init_request_from_bio(struct request *req, struct bio *bio) | |||
2912 | 2920 | ||
2913 | req->errors = 0; | 2921 | req->errors = 0; |
2914 | req->hard_sector = req->sector = bio->bi_sector; | 2922 | req->hard_sector = req->sector = bio->bi_sector; |
2915 | req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio); | ||
2916 | req->current_nr_sectors = req->hard_cur_sectors = bio_cur_sectors(bio); | ||
2917 | req->nr_phys_segments = bio_phys_segments(req->q, bio); | ||
2918 | req->nr_hw_segments = bio_hw_segments(req->q, bio); | ||
2919 | req->buffer = bio_data(bio); /* see ->buffer comment above */ | ||
2920 | req->bio = req->biotail = bio; | ||
2921 | req->ioprio = bio_prio(bio); | 2923 | req->ioprio = bio_prio(bio); |
2922 | req->rq_disk = bio->bi_bdev->bd_disk; | ||
2923 | req->start_time = jiffies; | 2924 | req->start_time = jiffies; |
2925 | blk_rq_bio_prep(req->q, req, bio); | ||
2924 | } | 2926 | } |
2925 | 2927 | ||
2926 | static int __make_request(struct request_queue *q, struct bio *bio) | 2928 | static int __make_request(struct request_queue *q, struct bio *bio) |
@@ -3038,7 +3040,7 @@ out: | |||
3038 | return 0; | 3040 | return 0; |
3039 | 3041 | ||
3040 | end_io: | 3042 | end_io: |
3041 | bio_endio(bio, nr_sectors << 9, err); | 3043 | bio_endio(bio, err); |
3042 | return 0; | 3044 | return 0; |
3043 | } | 3045 | } |
3044 | 3046 | ||
@@ -3185,7 +3187,7 @@ static inline void __generic_make_request(struct bio *bio) | |||
3185 | bdevname(bio->bi_bdev, b), | 3187 | bdevname(bio->bi_bdev, b), |
3186 | (long long) bio->bi_sector); | 3188 | (long long) bio->bi_sector); |
3187 | end_io: | 3189 | end_io: |
3188 | bio_endio(bio, bio->bi_size, -EIO); | 3190 | bio_endio(bio, -EIO); |
3189 | break; | 3191 | break; |
3190 | } | 3192 | } |
3191 | 3193 | ||
@@ -3329,48 +3331,6 @@ void submit_bio(int rw, struct bio *bio) | |||
3329 | 3331 | ||
3330 | EXPORT_SYMBOL(submit_bio); | 3332 | EXPORT_SYMBOL(submit_bio); |
3331 | 3333 | ||
3332 | static void blk_recalc_rq_segments(struct request *rq) | ||
3333 | { | ||
3334 | struct bio *bio, *prevbio = NULL; | ||
3335 | int nr_phys_segs, nr_hw_segs; | ||
3336 | unsigned int phys_size, hw_size; | ||
3337 | struct request_queue *q = rq->q; | ||
3338 | |||
3339 | if (!rq->bio) | ||
3340 | return; | ||
3341 | |||
3342 | phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0; | ||
3343 | rq_for_each_bio(bio, rq) { | ||
3344 | /* Force bio hw/phys segs to be recalculated. */ | ||
3345 | bio->bi_flags &= ~(1 << BIO_SEG_VALID); | ||
3346 | |||
3347 | nr_phys_segs += bio_phys_segments(q, bio); | ||
3348 | nr_hw_segs += bio_hw_segments(q, bio); | ||
3349 | if (prevbio) { | ||
3350 | int pseg = phys_size + prevbio->bi_size + bio->bi_size; | ||
3351 | int hseg = hw_size + prevbio->bi_size + bio->bi_size; | ||
3352 | |||
3353 | if (blk_phys_contig_segment(q, prevbio, bio) && | ||
3354 | pseg <= q->max_segment_size) { | ||
3355 | nr_phys_segs--; | ||
3356 | phys_size += prevbio->bi_size + bio->bi_size; | ||
3357 | } else | ||
3358 | phys_size = 0; | ||
3359 | |||
3360 | if (blk_hw_contig_segment(q, prevbio, bio) && | ||
3361 | hseg <= q->max_segment_size) { | ||
3362 | nr_hw_segs--; | ||
3363 | hw_size += prevbio->bi_size + bio->bi_size; | ||
3364 | } else | ||
3365 | hw_size = 0; | ||
3366 | } | ||
3367 | prevbio = bio; | ||
3368 | } | ||
3369 | |||
3370 | rq->nr_phys_segments = nr_phys_segs; | ||
3371 | rq->nr_hw_segments = nr_hw_segs; | ||
3372 | } | ||
3373 | |||
3374 | static void blk_recalc_rq_sectors(struct request *rq, int nsect) | 3334 | static void blk_recalc_rq_sectors(struct request *rq, int nsect) |
3375 | { | 3335 | { |
3376 | if (blk_fs_request(rq)) { | 3336 | if (blk_fs_request(rq)) { |
@@ -3442,8 +3402,7 @@ static int __end_that_request_first(struct request *req, int uptodate, | |||
3442 | if (nr_bytes >= bio->bi_size) { | 3402 | if (nr_bytes >= bio->bi_size) { |
3443 | req->bio = bio->bi_next; | 3403 | req->bio = bio->bi_next; |
3444 | nbytes = bio->bi_size; | 3404 | nbytes = bio->bi_size; |
3445 | if (!ordered_bio_endio(req, bio, nbytes, error)) | 3405 | req_bio_endio(req, bio, nbytes, error); |
3446 | bio_endio(bio, nbytes, error); | ||
3447 | next_idx = 0; | 3406 | next_idx = 0; |
3448 | bio_nbytes = 0; | 3407 | bio_nbytes = 0; |
3449 | } else { | 3408 | } else { |
@@ -3498,8 +3457,7 @@ static int __end_that_request_first(struct request *req, int uptodate, | |||
3498 | * if the request wasn't completed, update state | 3457 | * if the request wasn't completed, update state |
3499 | */ | 3458 | */ |
3500 | if (bio_nbytes) { | 3459 | if (bio_nbytes) { |
3501 | if (!ordered_bio_endio(req, bio, bio_nbytes, error)) | 3460 | req_bio_endio(req, bio, bio_nbytes, error); |
3502 | bio_endio(bio, bio_nbytes, error); | ||
3503 | bio->bi_idx += next_idx; | 3461 | bio->bi_idx += next_idx; |
3504 | bio_iovec(bio)->bv_offset += nr_bytes; | 3462 | bio_iovec(bio)->bv_offset += nr_bytes; |
3505 | bio_iovec(bio)->bv_len -= nr_bytes; | 3463 | bio_iovec(bio)->bv_len -= nr_bytes; |
@@ -3574,7 +3532,7 @@ static void blk_done_softirq(struct softirq_action *h) | |||
3574 | } | 3532 | } |
3575 | } | 3533 | } |
3576 | 3534 | ||
3577 | static int blk_cpu_notify(struct notifier_block *self, unsigned long action, | 3535 | static int __cpuinit blk_cpu_notify(struct notifier_block *self, unsigned long action, |
3578 | void *hcpu) | 3536 | void *hcpu) |
3579 | { | 3537 | { |
3580 | /* | 3538 | /* |
@@ -3595,7 +3553,7 @@ static int blk_cpu_notify(struct notifier_block *self, unsigned long action, | |||
3595 | } | 3553 | } |
3596 | 3554 | ||
3597 | 3555 | ||
3598 | static struct notifier_block __devinitdata blk_cpu_notifier = { | 3556 | static struct notifier_block blk_cpu_notifier __cpuinitdata = { |
3599 | .notifier_call = blk_cpu_notify, | 3557 | .notifier_call = blk_cpu_notify, |
3600 | }; | 3558 | }; |
3601 | 3559 | ||
@@ -3680,8 +3638,8 @@ void end_request(struct request *req, int uptodate) | |||
3680 | 3638 | ||
3681 | EXPORT_SYMBOL(end_request); | 3639 | EXPORT_SYMBOL(end_request); |
3682 | 3640 | ||
3683 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | 3641 | static void blk_rq_bio_prep(struct request_queue *q, struct request *rq, |
3684 | struct bio *bio) | 3642 | struct bio *bio) |
3685 | { | 3643 | { |
3686 | /* first two bits are identical in rq->cmd_flags and bio->bi_rw */ | 3644 | /* first two bits are identical in rq->cmd_flags and bio->bi_rw */ |
3687 | rq->cmd_flags |= (bio->bi_rw & 3); | 3645 | rq->cmd_flags |= (bio->bi_rw & 3); |
@@ -3695,9 +3653,10 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | |||
3695 | rq->data_len = bio->bi_size; | 3653 | rq->data_len = bio->bi_size; |
3696 | 3654 | ||
3697 | rq->bio = rq->biotail = bio; | 3655 | rq->bio = rq->biotail = bio; |
3698 | } | ||
3699 | 3656 | ||
3700 | EXPORT_SYMBOL(blk_rq_bio_prep); | 3657 | if (bio->bi_bdev) |
3658 | rq->rq_disk = bio->bi_bdev->bd_disk; | ||
3659 | } | ||
3701 | 3660 | ||
3702 | int kblockd_schedule_work(struct work_struct *work) | 3661 | int kblockd_schedule_work(struct work_struct *work) |
3703 | { | 3662 | { |