aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/as-iosched.c2
-rw-r--r--block/blktrace.c57
-rw-r--r--block/cfq-iosched.c9
-rw-r--r--block/deadline-iosched.c2
-rw-r--r--block/elevator.c4
-rw-r--r--block/ll_rw_blk.c166
-rw-r--r--block/noop-iosched.c2
-rw-r--r--block/scsi_ioctl.c53
-rw-r--r--drivers/cdrom/cdrom.c6
-rw-r--r--fs/bio.c23
-rw-r--r--include/linux/blkdev.h7
-rw-r--r--include/linux/blktrace_api.h12
-rw-r--r--include/linux/elevator.h4
13 files changed, 230 insertions, 117 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 50b95e4c142..00242111a45 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -1317,7 +1317,7 @@ static void as_exit_queue(elevator_t *e)
1317/* 1317/*
1318 * initialize elevator private data (as_data). 1318 * initialize elevator private data (as_data).
1319 */ 1319 */
1320static void *as_init_queue(request_queue_t *q, elevator_t *e) 1320static void *as_init_queue(request_queue_t *q)
1321{ 1321{
1322 struct as_data *ad; 1322 struct as_data *ad;
1323 1323
diff --git a/block/blktrace.c b/block/blktrace.c
index 135593c8e45..562ca7cbf85 100644
--- a/block/blktrace.c
+++ b/block/blktrace.c
@@ -22,30 +22,61 @@
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/debugfs.h> 24#include <linux/debugfs.h>
25#include <linux/time.h>
25#include <asm/uaccess.h> 26#include <asm/uaccess.h>
26 27
27static DEFINE_PER_CPU(unsigned long long, blk_trace_cpu_offset) = { 0, }; 28static DEFINE_PER_CPU(unsigned long long, blk_trace_cpu_offset) = { 0, };
28static unsigned int blktrace_seq __read_mostly = 1; 29static unsigned int blktrace_seq __read_mostly = 1;
29 30
30/* 31/*
32 * Send out a notify message.
33 */
34static inline unsigned int trace_note(struct blk_trace *bt,
35 pid_t pid, int action,
36 const void *data, size_t len)
37{
38 struct blk_io_trace *t;
39 int cpu = smp_processor_id();
40
41 t = relay_reserve(bt->rchan, sizeof(*t) + len);
42 if (t == NULL)
43 return 0;
44
45 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
46 t->time = sched_clock() - per_cpu(blk_trace_cpu_offset, cpu);
47 t->device = bt->dev;
48 t->action = action;
49 t->pid = pid;
50 t->cpu = cpu;
51 t->pdu_len = len;
52 memcpy((void *) t + sizeof(*t), data, len);
53 return blktrace_seq;
54}
55
56/*
31 * Send out a notify for this process, if we haven't done so since a trace 57 * Send out a notify for this process, if we haven't done so since a trace
32 * started 58 * started
33 */ 59 */
34static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk) 60static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
35{ 61{
36 struct blk_io_trace *t; 62 tsk->btrace_seq = trace_note(bt, tsk->pid,
63 BLK_TN_PROCESS,
64 tsk->comm, sizeof(tsk->comm));
65}
37 66
38 t = relay_reserve(bt->rchan, sizeof(*t) + sizeof(tsk->comm)); 67static void trace_note_time(struct blk_trace *bt)
39 if (t) { 68{
40 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; 69 struct timespec now;
41 t->device = bt->dev; 70 unsigned long flags;
42 t->action = BLK_TC_ACT(BLK_TC_NOTIFY); 71 u32 words[2];
43 t->pid = tsk->pid; 72
44 t->cpu = smp_processor_id(); 73 getnstimeofday(&now);
45 t->pdu_len = sizeof(tsk->comm); 74 words[0] = now.tv_sec;
46 memcpy((void *) t + sizeof(*t), tsk->comm, t->pdu_len); 75 words[1] = now.tv_nsec;
47 tsk->btrace_seq = blktrace_seq; 76
48 } 77 local_irq_save(flags);
78 trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words));
79 local_irq_restore(flags);
49} 80}
50 81
51static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector, 82static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
@@ -394,6 +425,8 @@ static int blk_trace_startstop(request_queue_t *q, int start)
394 blktrace_seq++; 425 blktrace_seq++;
395 smp_mb(); 426 smp_mb();
396 bt->trace_state = Blktrace_running; 427 bt->trace_state = Blktrace_running;
428
429 trace_note_time(bt);
397 ret = 0; 430 ret = 0;
398 } 431 }
399 } else { 432 } else {
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 1d9c3c70a9a..e9019ed39b7 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1464,8 +1464,7 @@ cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
1464} 1464}
1465 1465
1466static void 1466static void
1467cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic, 1467cfq_update_io_seektime(struct cfq_io_context *cic, struct request *rq)
1468 struct request *rq)
1469{ 1468{
1470 sector_t sdist; 1469 sector_t sdist;
1471 u64 total; 1470 u64 total;
@@ -1617,7 +1616,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1617 } 1616 }
1618 1617
1619 cfq_update_io_thinktime(cfqd, cic); 1618 cfq_update_io_thinktime(cfqd, cic);
1620 cfq_update_io_seektime(cfqd, cic, rq); 1619 cfq_update_io_seektime(cic, rq);
1621 cfq_update_idle_window(cfqd, cfqq, cic); 1620 cfq_update_idle_window(cfqd, cfqq, cic);
1622 1621
1623 cic->last_queue = jiffies; 1622 cic->last_queue = jiffies;
@@ -1770,7 +1769,7 @@ static int cfq_may_queue(request_queue_t *q, int rw)
1770/* 1769/*
1771 * queue lock held here 1770 * queue lock held here
1772 */ 1771 */
1773static void cfq_put_request(request_queue_t *q, struct request *rq) 1772static void cfq_put_request(struct request *rq)
1774{ 1773{
1775 struct cfq_queue *cfqq = RQ_CFQQ(rq); 1774 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1776 1775
@@ -1951,7 +1950,7 @@ static void cfq_exit_queue(elevator_t *e)
1951 kfree(cfqd); 1950 kfree(cfqd);
1952} 1951}
1953 1952
1954static void *cfq_init_queue(request_queue_t *q, elevator_t *e) 1953static void *cfq_init_queue(request_queue_t *q)
1955{ 1954{
1956 struct cfq_data *cfqd; 1955 struct cfq_data *cfqd;
1957 int i; 1956 int i;
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index b7c5b34cb7b..6d673e938d3 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -356,7 +356,7 @@ static void deadline_exit_queue(elevator_t *e)
356/* 356/*
357 * initialize elevator private data (deadline_data). 357 * initialize elevator private data (deadline_data).
358 */ 358 */
359static void *deadline_init_queue(request_queue_t *q, elevator_t *e) 359static void *deadline_init_queue(request_queue_t *q)
360{ 360{
361 struct deadline_data *dd; 361 struct deadline_data *dd;
362 362
diff --git a/block/elevator.c b/block/elevator.c
index 8ccd163254b..c0063f345c5 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -129,7 +129,7 @@ static struct elevator_type *elevator_get(const char *name)
129 129
130static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq) 130static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq)
131{ 131{
132 return eq->ops->elevator_init_fn(q, eq); 132 return eq->ops->elevator_init_fn(q);
133} 133}
134 134
135static void elevator_attach(request_queue_t *q, struct elevator_queue *eq, 135static void elevator_attach(request_queue_t *q, struct elevator_queue *eq,
@@ -810,7 +810,7 @@ void elv_put_request(request_queue_t *q, struct request *rq)
810 elevator_t *e = q->elevator; 810 elevator_t *e = q->elevator;
811 811
812 if (e->ops->elevator_put_req_fn) 812 if (e->ops->elevator_put_req_fn)
813 e->ops->elevator_put_req_fn(q, rq); 813 e->ops->elevator_put_req_fn(rq);
814} 814}
815 815
816int elv_may_queue(request_queue_t *q, int rw) 816int elv_may_queue(request_queue_t *q, int rw)
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 9eaee664053..0f82e12f7b6 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -2322,6 +2322,84 @@ void blk_insert_request(request_queue_t *q, struct request *rq,
2322 2322
2323EXPORT_SYMBOL(blk_insert_request); 2323EXPORT_SYMBOL(blk_insert_request);
2324 2324
2325static int __blk_rq_unmap_user(struct bio *bio)
2326{
2327 int ret = 0;
2328
2329 if (bio) {
2330 if (bio_flagged(bio, BIO_USER_MAPPED))
2331 bio_unmap_user(bio);
2332 else
2333 ret = bio_uncopy_user(bio);
2334 }
2335
2336 return ret;
2337}
2338
2339static int __blk_rq_map_user(request_queue_t *q, struct request *rq,
2340 void __user *ubuf, unsigned int len)
2341{
2342 unsigned long uaddr;
2343 struct bio *bio, *orig_bio;
2344 int reading, ret;
2345
2346 reading = rq_data_dir(rq) == READ;
2347
2348 /*
2349 * if alignment requirement is satisfied, map in user pages for
2350 * direct dma. else, set up kernel bounce buffers
2351 */
2352 uaddr = (unsigned long) ubuf;
2353 if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
2354 bio = bio_map_user(q, NULL, uaddr, len, reading);
2355 else
2356 bio = bio_copy_user(q, uaddr, len, reading);
2357
2358 if (IS_ERR(bio)) {
2359 return PTR_ERR(bio);
2360 }
2361
2362 orig_bio = bio;
2363 blk_queue_bounce(q, &bio);
2364 /*
2365 * We link the bounce buffer in and could have to traverse it
2366 * later so we have to get a ref to prevent it from being freed
2367 */
2368 bio_get(bio);
2369
2370 /*
2371 * for most (all? don't know of any) queues we could
2372 * skip grabbing the queue lock here. only drivers with
2373 * funky private ->back_merge_fn() function could be
2374 * problematic.
2375 */
2376 spin_lock_irq(q->queue_lock);
2377 if (!rq->bio)
2378 blk_rq_bio_prep(q, rq, bio);
2379 else if (!q->back_merge_fn(q, rq, bio)) {
2380 ret = -EINVAL;
2381 spin_unlock_irq(q->queue_lock);
2382 goto unmap_bio;
2383 } else {
2384 rq->biotail->bi_next = bio;
2385 rq->biotail = bio;
2386
2387 rq->nr_sectors += bio_sectors(bio);
2388 rq->hard_nr_sectors = rq->nr_sectors;
2389 rq->data_len += bio->bi_size;
2390 }
2391 spin_unlock_irq(q->queue_lock);
2392
2393 return bio->bi_size;
2394
2395unmap_bio:
2396 /* if it was boucned we must call the end io function */
2397 bio_endio(bio, bio->bi_size, 0);
2398 __blk_rq_unmap_user(orig_bio);
2399 bio_put(bio);
2400 return ret;
2401}
2402
2325/** 2403/**
2326 * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage 2404 * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
2327 * @q: request queue where request should be inserted 2405 * @q: request queue where request should be inserted
@@ -2343,42 +2421,44 @@ EXPORT_SYMBOL(blk_insert_request);
2343 * unmapping. 2421 * unmapping.
2344 */ 2422 */
2345int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, 2423int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
2346 unsigned int len) 2424 unsigned long len)
2347{ 2425{
2348 unsigned long uaddr; 2426 unsigned long bytes_read = 0;
2349 struct bio *bio; 2427 int ret;
2350 int reading;
2351 2428
2352 if (len > (q->max_hw_sectors << 9)) 2429 if (len > (q->max_hw_sectors << 9))
2353 return -EINVAL; 2430 return -EINVAL;
2354 if (!len || !ubuf) 2431 if (!len || !ubuf)
2355 return -EINVAL; 2432 return -EINVAL;
2356 2433
2357 reading = rq_data_dir(rq) == READ; 2434 while (bytes_read != len) {
2435 unsigned long map_len, end, start;
2358 2436
2359 /* 2437 map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
2360 * if alignment requirement is satisfied, map in user pages for 2438 end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
2361 * direct dma. else, set up kernel bounce buffers 2439 >> PAGE_SHIFT;
2362 */ 2440 start = (unsigned long)ubuf >> PAGE_SHIFT;
2363 uaddr = (unsigned long) ubuf;
2364 if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
2365 bio = bio_map_user(q, NULL, uaddr, len, reading);
2366 else
2367 bio = bio_copy_user(q, uaddr, len, reading);
2368 2441
2369 if (!IS_ERR(bio)) { 2442 /*
2370 rq->bio = rq->biotail = bio; 2443 * A bad offset could cause us to require BIO_MAX_PAGES + 1
2371 blk_rq_bio_prep(q, rq, bio); 2444 * pages. If this happens we just lower the requested
2445 * mapping len by a page so that we can fit
2446 */
2447 if (end - start > BIO_MAX_PAGES)
2448 map_len -= PAGE_SIZE;
2372 2449
2373 rq->buffer = rq->data = NULL; 2450 ret = __blk_rq_map_user(q, rq, ubuf, map_len);
2374 rq->data_len = len; 2451 if (ret < 0)
2375 return 0; 2452 goto unmap_rq;
2453 bytes_read += ret;
2454 ubuf += ret;
2376 } 2455 }
2377 2456
2378 /* 2457 rq->buffer = rq->data = NULL;
2379 * bio is the err-ptr 2458 return 0;
2380 */ 2459unmap_rq:
2381 return PTR_ERR(bio); 2460 blk_rq_unmap_user(rq);
2461 return ret;
2382} 2462}
2383 2463
2384EXPORT_SYMBOL(blk_rq_map_user); 2464EXPORT_SYMBOL(blk_rq_map_user);
@@ -2404,7 +2484,7 @@ EXPORT_SYMBOL(blk_rq_map_user);
2404 * unmapping. 2484 * unmapping.
2405 */ 2485 */
2406int blk_rq_map_user_iov(request_queue_t *q, struct request *rq, 2486int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
2407 struct sg_iovec *iov, int iov_count) 2487 struct sg_iovec *iov, int iov_count, unsigned int len)
2408{ 2488{
2409 struct bio *bio; 2489 struct bio *bio;
2410 2490
@@ -2418,10 +2498,15 @@ int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
2418 if (IS_ERR(bio)) 2498 if (IS_ERR(bio))
2419 return PTR_ERR(bio); 2499 return PTR_ERR(bio);
2420 2500
2421 rq->bio = rq->biotail = bio; 2501 if (bio->bi_size != len) {
2502 bio_endio(bio, bio->bi_size, 0);
2503 bio_unmap_user(bio);
2504 return -EINVAL;
2505 }
2506
2507 bio_get(bio);
2422 blk_rq_bio_prep(q, rq, bio); 2508 blk_rq_bio_prep(q, rq, bio);
2423 rq->buffer = rq->data = NULL; 2509 rq->buffer = rq->data = NULL;
2424 rq->data_len = bio->bi_size;
2425 return 0; 2510 return 0;
2426} 2511}
2427 2512
@@ -2429,23 +2514,26 @@ EXPORT_SYMBOL(blk_rq_map_user_iov);
2429 2514
2430/** 2515/**
2431 * blk_rq_unmap_user - unmap a request with user data 2516 * blk_rq_unmap_user - unmap a request with user data
2432 * @bio: bio to be unmapped 2517 * @rq: rq to be unmapped
2433 * @ulen: length of user buffer
2434 * 2518 *
2435 * Description: 2519 * Description:
2436 * Unmap a bio previously mapped by blk_rq_map_user(). 2520 * Unmap a rq previously mapped by blk_rq_map_user().
2521 * rq->bio must be set to the original head of the request.
2437 */ 2522 */
2438int blk_rq_unmap_user(struct bio *bio, unsigned int ulen) 2523int blk_rq_unmap_user(struct request *rq)
2439{ 2524{
2440 int ret = 0; 2525 struct bio *bio, *mapped_bio;
2441 2526
2442 if (bio) { 2527 while ((bio = rq->bio)) {
2443 if (bio_flagged(bio, BIO_USER_MAPPED)) 2528 if (bio_flagged(bio, BIO_BOUNCED))
2444 bio_unmap_user(bio); 2529 mapped_bio = bio->bi_private;
2445 else 2530 else
2446 ret = bio_uncopy_user(bio); 2531 mapped_bio = bio;
2447 }
2448 2532
2533 __blk_rq_unmap_user(mapped_bio);
2534 rq->bio = bio->bi_next;
2535 bio_put(bio);
2536 }
2449 return 0; 2537 return 0;
2450} 2538}
2451 2539
@@ -2476,11 +2564,8 @@ int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
2476 if (rq_data_dir(rq) == WRITE) 2564 if (rq_data_dir(rq) == WRITE)
2477 bio->bi_rw |= (1 << BIO_RW); 2565 bio->bi_rw |= (1 << BIO_RW);
2478 2566
2479 rq->bio = rq->biotail = bio;
2480 blk_rq_bio_prep(q, rq, bio); 2567 blk_rq_bio_prep(q, rq, bio);
2481
2482 rq->buffer = rq->data = NULL; 2568 rq->buffer = rq->data = NULL;
2483 rq->data_len = len;
2484 return 0; 2569 return 0;
2485} 2570}
2486 2571
@@ -3495,6 +3580,7 @@ void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
3495 rq->hard_cur_sectors = rq->current_nr_sectors; 3580 rq->hard_cur_sectors = rq->current_nr_sectors;
3496 rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio); 3581 rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
3497 rq->buffer = bio_data(bio); 3582 rq->buffer = bio_data(bio);
3583 rq->data_len = bio->bi_size;
3498 3584
3499 rq->bio = rq->biotail = bio; 3585 rq->bio = rq->biotail = bio;
3500} 3586}
diff --git a/block/noop-iosched.c b/block/noop-iosched.c
index 79af4317942..1c3de2b9a6b 100644
--- a/block/noop-iosched.c
+++ b/block/noop-iosched.c
@@ -65,7 +65,7 @@ noop_latter_request(request_queue_t *q, struct request *rq)
65 return list_entry(rq->queuelist.next, struct request, queuelist); 65 return list_entry(rq->queuelist.next, struct request, queuelist);
66} 66}
67 67
68static void *noop_init_queue(request_queue_t *q, elevator_t *e) 68static void *noop_init_queue(request_queue_t *q)
69{ 69{
70 struct noop_data *nd; 70 struct noop_data *nd;
71 71
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index e55a7562143..5493c2fbbab 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -226,7 +226,6 @@ static int sg_io(struct file *file, request_queue_t *q,
226 unsigned long start_time; 226 unsigned long start_time;
227 int writing = 0, ret = 0; 227 int writing = 0, ret = 0;
228 struct request *rq; 228 struct request *rq;
229 struct bio *bio;
230 char sense[SCSI_SENSE_BUFFERSIZE]; 229 char sense[SCSI_SENSE_BUFFERSIZE];
231 unsigned char cmd[BLK_MAX_CDB]; 230 unsigned char cmd[BLK_MAX_CDB];
232 231
@@ -258,30 +257,6 @@ static int sg_io(struct file *file, request_queue_t *q,
258 if (!rq) 257 if (!rq)
259 return -ENOMEM; 258 return -ENOMEM;
260 259
261 if (hdr->iovec_count) {
262 const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
263 struct sg_iovec *iov;
264
265 iov = kmalloc(size, GFP_KERNEL);
266 if (!iov) {
267 ret = -ENOMEM;
268 goto out;
269 }
270
271 if (copy_from_user(iov, hdr->dxferp, size)) {
272 kfree(iov);
273 ret = -EFAULT;
274 goto out;
275 }
276
277 ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count);
278 kfree(iov);
279 } else if (hdr->dxfer_len)
280 ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len);
281
282 if (ret)
283 goto out;
284
285 /* 260 /*
286 * fill in request structure 261 * fill in request structure
287 */ 262 */
@@ -294,7 +269,6 @@ static int sg_io(struct file *file, request_queue_t *q,
294 rq->sense_len = 0; 269 rq->sense_len = 0;
295 270
296 rq->cmd_type = REQ_TYPE_BLOCK_PC; 271 rq->cmd_type = REQ_TYPE_BLOCK_PC;
297 bio = rq->bio;
298 272
299 /* 273 /*
300 * bounce this after holding a reference to the original bio, it's 274 * bounce this after holding a reference to the original bio, it's
@@ -309,6 +283,31 @@ static int sg_io(struct file *file, request_queue_t *q,
309 if (!rq->timeout) 283 if (!rq->timeout)
310 rq->timeout = BLK_DEFAULT_TIMEOUT; 284 rq->timeout = BLK_DEFAULT_TIMEOUT;
311 285
286 if (hdr->iovec_count) {
287 const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
288 struct sg_iovec *iov;
289
290 iov = kmalloc(size, GFP_KERNEL);
291 if (!iov) {
292 ret = -ENOMEM;
293 goto out;
294 }
295
296 if (copy_from_user(iov, hdr->dxferp, size)) {
297 kfree(iov);
298 ret = -EFAULT;
299 goto out;
300 }
301
302 ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count,
303 hdr->dxfer_len);
304 kfree(iov);
305 } else if (hdr->dxfer_len)
306 ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len);
307
308 if (ret)
309 goto out;
310
312 rq->retries = 0; 311 rq->retries = 0;
313 312
314 start_time = jiffies; 313 start_time = jiffies;
@@ -339,7 +338,7 @@ static int sg_io(struct file *file, request_queue_t *q,
339 hdr->sb_len_wr = len; 338 hdr->sb_len_wr = len;
340 } 339 }
341 340
342 if (blk_rq_unmap_user(bio, hdr->dxfer_len)) 341 if (blk_rq_unmap_user(rq))
343 ret = -EFAULT; 342 ret = -EFAULT;
344 343
345 /* may not have succeeded, but output values written to control 344 /* may not have succeeded, but output values written to control
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 7ea0f48f8fa..2df5cf4ec74 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2133,16 +2133,14 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
2133 rq->timeout = 60 * HZ; 2133 rq->timeout = 60 * HZ;
2134 bio = rq->bio; 2134 bio = rq->bio;
2135 2135
2136 if (rq->bio)
2137 blk_queue_bounce(q, &rq->bio);
2138
2139 if (blk_execute_rq(q, cdi->disk, rq, 0)) { 2136 if (blk_execute_rq(q, cdi->disk, rq, 0)) {
2140 struct request_sense *s = rq->sense; 2137 struct request_sense *s = rq->sense;
2141 ret = -EIO; 2138 ret = -EIO;
2142 cdi->last_sense = s->sense_key; 2139 cdi->last_sense = s->sense_key;
2143 } 2140 }
2144 2141
2145 if (blk_rq_unmap_user(bio, len)) 2142 rq->bio = bio;
2143 if (blk_rq_unmap_user(rq))
2146 ret = -EFAULT; 2144 ret = -EFAULT;
2147 2145
2148 if (ret) 2146 if (ret)
diff --git a/fs/bio.c b/fs/bio.c
index f95c8749499..aa4d09bd4e7 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -560,10 +560,8 @@ struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr,
560 break; 560 break;
561 } 561 }
562 562
563 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) { 563 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
564 ret = -EINVAL;
565 break; 564 break;
566 }
567 565
568 len -= bytes; 566 len -= bytes;
569 } 567 }
@@ -622,10 +620,9 @@ static struct bio *__bio_map_user_iov(request_queue_t *q,
622 620
623 nr_pages += end - start; 621 nr_pages += end - start;
624 /* 622 /*
625 * transfer and buffer must be aligned to at least hardsector 623 * buffer must be aligned to at least hardsector size for now
626 * size for now, in the future we can relax this restriction
627 */ 624 */
628 if ((uaddr & queue_dma_alignment(q)) || (len & queue_dma_alignment(q))) 625 if (uaddr & queue_dma_alignment(q))
629 return ERR_PTR(-EINVAL); 626 return ERR_PTR(-EINVAL);
630 } 627 }
631 628
@@ -751,7 +748,6 @@ struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev,
751 int write_to_vm) 748 int write_to_vm)
752{ 749{
753 struct bio *bio; 750 struct bio *bio;
754 int len = 0, i;
755 751
756 bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm); 752 bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm);
757 753
@@ -766,18 +762,7 @@ struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev,
766 */ 762 */
767 bio_get(bio); 763 bio_get(bio);
768 764
769 for (i = 0; i < iov_count; i++) 765 return bio;
770 len += iov[i].iov_len;
771
772 if (bio->bi_size == len)
773 return bio;
774
775 /*
776 * don't support partial mappings
777 */
778 bio_endio(bio, bio->bi_size, 0);
779 bio_unmap_user(bio);
780 return ERR_PTR(-EINVAL);
781} 766}
782 767
783static void __bio_unmap_user(struct bio *bio) 768static void __bio_unmap_user(struct bio *bio)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 7bfcde2d557..e1c7286165f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -678,10 +678,11 @@ extern void __blk_stop_queue(request_queue_t *q);
678extern void blk_run_queue(request_queue_t *); 678extern void blk_run_queue(request_queue_t *);
679extern void blk_start_queueing(request_queue_t *); 679extern void blk_start_queueing(request_queue_t *);
680extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *); 680extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *);
681extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned int); 681extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned long);
682extern int blk_rq_unmap_user(struct bio *, unsigned int); 682extern int blk_rq_unmap_user(struct request *);
683extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t); 683extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t);
684extern int blk_rq_map_user_iov(request_queue_t *, struct request *, struct sg_iovec *, int); 684extern int blk_rq_map_user_iov(request_queue_t *, struct request *,
685 struct sg_iovec *, int, unsigned int);
685extern int blk_execute_rq(request_queue_t *, struct gendisk *, 686extern int blk_execute_rq(request_queue_t *, struct gendisk *,
686 struct request *, int); 687 struct request *, int);
687extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *, 688extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *,
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index b99a714fcac..3680ff9a30e 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -50,6 +50,15 @@ enum blktrace_act {
50}; 50};
51 51
52/* 52/*
53 * Notify events.
54 */
55enum blktrace_notify {
56 __BLK_TN_PROCESS = 0, /* establish pid/name mapping */
57 __BLK_TN_TIMESTAMP, /* include system clock */
58};
59
60
61/*
53 * Trace actions in full. Additionally, read or write is masked 62 * Trace actions in full. Additionally, read or write is masked
54 */ 63 */
55#define BLK_TA_QUEUE (__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE)) 64#define BLK_TA_QUEUE (__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE))
@@ -68,6 +77,9 @@ enum blktrace_act {
68#define BLK_TA_BOUNCE (__BLK_TA_BOUNCE) 77#define BLK_TA_BOUNCE (__BLK_TA_BOUNCE)
69#define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE)) 78#define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE))
70 79
80#define BLK_TN_PROCESS (__BLK_TN_PROCESS | BLK_TC_ACT(BLK_TC_NOTIFY))
81#define BLK_TN_TIMESTAMP (__BLK_TN_TIMESTAMP | BLK_TC_ACT(BLK_TC_NOTIFY))
82
71#define BLK_IO_TRACE_MAGIC 0x65617400 83#define BLK_IO_TRACE_MAGIC 0x65617400
72#define BLK_IO_TRACE_VERSION 0x07 84#define BLK_IO_TRACE_VERSION 0x07
73 85
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 2fa9f114422..a24931d2440 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -21,11 +21,11 @@ typedef void (elevator_completed_req_fn) (request_queue_t *, struct request *);
21typedef int (elevator_may_queue_fn) (request_queue_t *, int); 21typedef int (elevator_may_queue_fn) (request_queue_t *, int);
22 22
23typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, gfp_t); 23typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, gfp_t);
24typedef void (elevator_put_req_fn) (request_queue_t *, struct request *); 24typedef void (elevator_put_req_fn) (struct request *);
25typedef void (elevator_activate_req_fn) (request_queue_t *, struct request *); 25typedef void (elevator_activate_req_fn) (request_queue_t *, struct request *);
26typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *); 26typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *);
27 27
28typedef void *(elevator_init_fn) (request_queue_t *, elevator_t *); 28typedef void *(elevator_init_fn) (request_queue_t *);
29typedef void (elevator_exit_fn) (elevator_t *); 29typedef void (elevator_exit_fn) (elevator_t *);
30 30
31struct elevator_ops 31struct elevator_ops