aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blktrace.c24
-rw-r--r--block/compat_ioctl.c5
-rw-r--r--block/elevator.c26
-rw-r--r--block/ll_rw_blk.c49
-rw-r--r--drivers/scsi/sg.c12
-rw-r--r--fs/bio.c8
-rw-r--r--include/linux/blkdev.h11
-rw-r--r--include/linux/blktrace_api.h12
8 files changed, 123 insertions, 24 deletions
diff --git a/block/blktrace.c b/block/blktrace.c
index 9b4da4ae3c7d..568588cd16b2 100644
--- a/block/blktrace.c
+++ b/block/blktrace.c
@@ -235,7 +235,7 @@ static void blk_trace_cleanup(struct blk_trace *bt)
235 kfree(bt); 235 kfree(bt);
236} 236}
237 237
238static int blk_trace_remove(struct request_queue *q) 238int blk_trace_remove(struct request_queue *q)
239{ 239{
240 struct blk_trace *bt; 240 struct blk_trace *bt;
241 241
@@ -249,6 +249,7 @@ static int blk_trace_remove(struct request_queue *q)
249 249
250 return 0; 250 return 0;
251} 251}
252EXPORT_SYMBOL_GPL(blk_trace_remove);
252 253
253static int blk_dropped_open(struct inode *inode, struct file *filp) 254static int blk_dropped_open(struct inode *inode, struct file *filp)
254{ 255{
@@ -316,18 +317,17 @@ static struct rchan_callbacks blk_relay_callbacks = {
316/* 317/*
317 * Setup everything required to start tracing 318 * Setup everything required to start tracing
318 */ 319 */
319int do_blk_trace_setup(struct request_queue *q, struct block_device *bdev, 320int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
320 struct blk_user_trace_setup *buts) 321 struct blk_user_trace_setup *buts)
321{ 322{
322 struct blk_trace *old_bt, *bt = NULL; 323 struct blk_trace *old_bt, *bt = NULL;
323 struct dentry *dir = NULL; 324 struct dentry *dir = NULL;
324 char b[BDEVNAME_SIZE];
325 int ret, i; 325 int ret, i;
326 326
327 if (!buts->buf_size || !buts->buf_nr) 327 if (!buts->buf_size || !buts->buf_nr)
328 return -EINVAL; 328 return -EINVAL;
329 329
330 strcpy(buts->name, bdevname(bdev, b)); 330 strcpy(buts->name, name);
331 331
332 /* 332 /*
333 * some device names have larger paths - convert the slashes 333 * some device names have larger paths - convert the slashes
@@ -352,7 +352,7 @@ int do_blk_trace_setup(struct request_queue *q, struct block_device *bdev,
352 goto err; 352 goto err;
353 353
354 bt->dir = dir; 354 bt->dir = dir;
355 bt->dev = bdev->bd_dev; 355 bt->dev = dev;
356 atomic_set(&bt->dropped, 0); 356 atomic_set(&bt->dropped, 0);
357 357
358 ret = -EIO; 358 ret = -EIO;
@@ -399,8 +399,8 @@ err:
399 return ret; 399 return ret;
400} 400}
401 401
402static int blk_trace_setup(struct request_queue *q, struct block_device *bdev, 402int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
403 char __user *arg) 403 char __user *arg)
404{ 404{
405 struct blk_user_trace_setup buts; 405 struct blk_user_trace_setup buts;
406 int ret; 406 int ret;
@@ -409,7 +409,7 @@ static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
409 if (ret) 409 if (ret)
410 return -EFAULT; 410 return -EFAULT;
411 411
412 ret = do_blk_trace_setup(q, bdev, &buts); 412 ret = do_blk_trace_setup(q, name, dev, &buts);
413 if (ret) 413 if (ret)
414 return ret; 414 return ret;
415 415
@@ -418,8 +418,9 @@ static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
418 418
419 return 0; 419 return 0;
420} 420}
421EXPORT_SYMBOL_GPL(blk_trace_setup);
421 422
422static int blk_trace_startstop(struct request_queue *q, int start) 423int blk_trace_startstop(struct request_queue *q, int start)
423{ 424{
424 struct blk_trace *bt; 425 struct blk_trace *bt;
425 int ret; 426 int ret;
@@ -452,6 +453,7 @@ static int blk_trace_startstop(struct request_queue *q, int start)
452 453
453 return ret; 454 return ret;
454} 455}
456EXPORT_SYMBOL_GPL(blk_trace_startstop);
455 457
456/** 458/**
457 * blk_trace_ioctl: - handle the ioctls associated with tracing 459 * blk_trace_ioctl: - handle the ioctls associated with tracing
@@ -464,6 +466,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
464{ 466{
465 struct request_queue *q; 467 struct request_queue *q;
466 int ret, start = 0; 468 int ret, start = 0;
469 char b[BDEVNAME_SIZE];
467 470
468 q = bdev_get_queue(bdev); 471 q = bdev_get_queue(bdev);
469 if (!q) 472 if (!q)
@@ -473,7 +476,8 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
473 476
474 switch (cmd) { 477 switch (cmd) {
475 case BLKTRACESETUP: 478 case BLKTRACESETUP:
476 ret = blk_trace_setup(q, bdev, arg); 479 strcpy(b, bdevname(bdev, b));
480 ret = blk_trace_setup(q, b, bdev->bd_dev, arg);
477 break; 481 break;
478 case BLKTRACESTART: 482 case BLKTRACESTART:
479 start = 1; 483 start = 1;
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index cae0a852619e..b73373216b0e 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -545,6 +545,7 @@ static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg)
545 struct blk_user_trace_setup buts; 545 struct blk_user_trace_setup buts;
546 struct compat_blk_user_trace_setup cbuts; 546 struct compat_blk_user_trace_setup cbuts;
547 struct request_queue *q; 547 struct request_queue *q;
548 char b[BDEVNAME_SIZE];
548 int ret; 549 int ret;
549 550
550 q = bdev_get_queue(bdev); 551 q = bdev_get_queue(bdev);
@@ -554,6 +555,8 @@ static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg)
554 if (copy_from_user(&cbuts, arg, sizeof(cbuts))) 555 if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
555 return -EFAULT; 556 return -EFAULT;
556 557
558 strcpy(b, bdevname(bdev, b));
559
557 buts = (struct blk_user_trace_setup) { 560 buts = (struct blk_user_trace_setup) {
558 .act_mask = cbuts.act_mask, 561 .act_mask = cbuts.act_mask,
559 .buf_size = cbuts.buf_size, 562 .buf_size = cbuts.buf_size,
@@ -565,7 +568,7 @@ static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg)
565 memcpy(&buts.name, &cbuts.name, 32); 568 memcpy(&buts.name, &cbuts.name, 32);
566 569
567 mutex_lock(&bdev->bd_mutex); 570 mutex_lock(&bdev->bd_mutex);
568 ret = do_blk_trace_setup(q, bdev, &buts); 571 ret = do_blk_trace_setup(q, b, bdev->bd_dev, &buts);
569 mutex_unlock(&bdev->bd_mutex); 572 mutex_unlock(&bdev->bd_mutex);
570 if (ret) 573 if (ret)
571 return ret; 574 return ret;
diff --git a/block/elevator.c b/block/elevator.c
index f9736fbdab03..8cd5775acd7a 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -741,7 +741,21 @@ struct request *elv_next_request(struct request_queue *q)
741 q->boundary_rq = NULL; 741 q->boundary_rq = NULL;
742 } 742 }
743 743
744 if ((rq->cmd_flags & REQ_DONTPREP) || !q->prep_rq_fn) 744 if (rq->cmd_flags & REQ_DONTPREP)
745 break;
746
747 if (q->dma_drain_size && rq->data_len) {
748 /*
749 * make sure space for the drain appears we
750 * know we can do this because max_hw_segments
751 * has been adjusted to be one fewer than the
752 * device can handle
753 */
754 rq->nr_phys_segments++;
755 rq->nr_hw_segments++;
756 }
757
758 if (!q->prep_rq_fn)
745 break; 759 break;
746 760
747 ret = q->prep_rq_fn(q, rq); 761 ret = q->prep_rq_fn(q, rq);
@@ -754,6 +768,16 @@ struct request *elv_next_request(struct request_queue *q)
754 * avoid resource deadlock. REQ_STARTED will 768 * avoid resource deadlock. REQ_STARTED will
755 * prevent other fs requests from passing this one. 769 * prevent other fs requests from passing this one.
756 */ 770 */
771 if (q->dma_drain_size && rq->data_len &&
772 !(rq->cmd_flags & REQ_DONTPREP)) {
773 /*
774 * remove the space for the drain we added
775 * so that we don't add it again
776 */
777 --rq->nr_phys_segments;
778 --rq->nr_hw_segments;
779 }
780
757 rq = NULL; 781 rq = NULL;
758 break; 782 break;
759 } else if (ret == BLKPREP_KILL) { 783 } else if (ret == BLKPREP_KILL) {
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index c16fdfed8c62..1932a56f5e4b 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -721,6 +721,45 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
721EXPORT_SYMBOL(blk_queue_stack_limits); 721EXPORT_SYMBOL(blk_queue_stack_limits);
722 722
723/** 723/**
724 * blk_queue_dma_drain - Set up a drain buffer for excess dma.
725 *
726 * @q: the request queue for the device
727 * @buf: physically contiguous buffer
728 * @size: size of the buffer in bytes
729 *
730 * Some devices have excess DMA problems and can't simply discard (or
731 * zero fill) the unwanted piece of the transfer. They have to have a
732 * real area of memory to transfer it into. The use case for this is
733 * ATAPI devices in DMA mode. If the packet command causes a transfer
734 * bigger than the transfer size some HBAs will lock up if there
735 * aren't DMA elements to contain the excess transfer. What this API
736 * does is adjust the queue so that the buf is always appended
737 * silently to the scatterlist.
738 *
739 * Note: This routine adjusts max_hw_segments to make room for
740 * appending the drain buffer. If you call
741 * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after
742 * calling this routine, you must set the limit to one fewer than your
743 * device can support otherwise there won't be room for the drain
744 * buffer.
745 */
746int blk_queue_dma_drain(struct request_queue *q, void *buf,
747 unsigned int size)
748{
749 if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
750 return -EINVAL;
751 /* make room for appending the drain */
752 --q->max_hw_segments;
753 --q->max_phys_segments;
754 q->dma_drain_buffer = buf;
755 q->dma_drain_size = size;
756
757 return 0;
758}
759
760EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
761
762/**
724 * blk_queue_segment_boundary - set boundary rules for segment merging 763 * blk_queue_segment_boundary - set boundary rules for segment merging
725 * @q: the request queue for the device 764 * @q: the request queue for the device
726 * @mask: the memory boundary mask 765 * @mask: the memory boundary mask
@@ -1374,6 +1413,16 @@ new_segment:
1374 bvprv = bvec; 1413 bvprv = bvec;
1375 } /* segments in rq */ 1414 } /* segments in rq */
1376 1415
1416 if (q->dma_drain_size) {
1417 sg->page_link &= ~0x02;
1418 sg = sg_next(sg);
1419 sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
1420 q->dma_drain_size,
1421 ((unsigned long)q->dma_drain_buffer) &
1422 (PAGE_SIZE - 1));
1423 nsegs++;
1424 }
1425
1377 if (sg) 1426 if (sg)
1378 sg_mark_end(sg); 1427 sg_mark_end(sg);
1379 1428
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 17216b76efdc..aba28f335b88 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -48,6 +48,7 @@ static int sg_version_num = 30534; /* 2 digits for each component */
48#include <linux/blkdev.h> 48#include <linux/blkdev.h>
49#include <linux/delay.h> 49#include <linux/delay.h>
50#include <linux/scatterlist.h> 50#include <linux/scatterlist.h>
51#include <linux/blktrace_api.h>
51 52
52#include "scsi.h" 53#include "scsi.h"
53#include <scsi/scsi_dbg.h> 54#include <scsi/scsi_dbg.h>
@@ -1067,6 +1068,17 @@ sg_ioctl(struct inode *inode, struct file *filp,
1067 case BLKSECTGET: 1068 case BLKSECTGET:
1068 return put_user(sdp->device->request_queue->max_sectors * 512, 1069 return put_user(sdp->device->request_queue->max_sectors * 512,
1069 ip); 1070 ip);
1071 case BLKTRACESETUP:
1072 return blk_trace_setup(sdp->device->request_queue,
1073 sdp->disk->disk_name,
1074 sdp->device->sdev_gendev.devt,
1075 (char *)arg);
1076 case BLKTRACESTART:
1077 return blk_trace_startstop(sdp->device->request_queue, 1);
1078 case BLKTRACESTOP:
1079 return blk_trace_startstop(sdp->device->request_queue, 0);
1080 case BLKTRACETEARDOWN:
1081 return blk_trace_remove(sdp->device->request_queue);
1070 default: 1082 default:
1071 if (read_only) 1083 if (read_only)
1072 return -EPERM; /* don't know so take safe approach */ 1084 return -EPERM; /* don't know so take safe approach */
diff --git a/fs/bio.c b/fs/bio.c
index d59ddbf79626..242e409dab4b 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -248,11 +248,13 @@ inline int bio_hw_segments(struct request_queue *q, struct bio *bio)
248 */ 248 */
249void __bio_clone(struct bio *bio, struct bio *bio_src) 249void __bio_clone(struct bio *bio, struct bio *bio_src)
250{ 250{
251 struct request_queue *q = bdev_get_queue(bio_src->bi_bdev);
252
253 memcpy(bio->bi_io_vec, bio_src->bi_io_vec, 251 memcpy(bio->bi_io_vec, bio_src->bi_io_vec,
254 bio_src->bi_max_vecs * sizeof(struct bio_vec)); 252 bio_src->bi_max_vecs * sizeof(struct bio_vec));
255 253
254 /*
255 * most users will be overriding ->bi_bdev with a new target,
256 * so we don't set nor calculate new physical/hw segment counts here
257 */
256 bio->bi_sector = bio_src->bi_sector; 258 bio->bi_sector = bio_src->bi_sector;
257 bio->bi_bdev = bio_src->bi_bdev; 259 bio->bi_bdev = bio_src->bi_bdev;
258 bio->bi_flags |= 1 << BIO_CLONED; 260 bio->bi_flags |= 1 << BIO_CLONED;
@@ -260,8 +262,6 @@ void __bio_clone(struct bio *bio, struct bio *bio_src)
260 bio->bi_vcnt = bio_src->bi_vcnt; 262 bio->bi_vcnt = bio_src->bi_vcnt;
261 bio->bi_size = bio_src->bi_size; 263 bio->bi_size = bio_src->bi_size;
262 bio->bi_idx = bio_src->bi_idx; 264 bio->bi_idx = bio_src->bi_idx;
263 bio_phys_segments(q, bio);
264 bio_hw_segments(q, bio);
265} 265}
266 266
267/** 267/**
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index b71c3900810d..71e7a847dffc 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -356,6 +356,8 @@ struct request_queue
356 unsigned int max_segment_size; 356 unsigned int max_segment_size;
357 357
358 unsigned long seg_boundary_mask; 358 unsigned long seg_boundary_mask;
359 void *dma_drain_buffer;
360 unsigned int dma_drain_size;
359 unsigned int dma_alignment; 361 unsigned int dma_alignment;
360 362
361 struct blk_queue_tag *queue_tags; 363 struct blk_queue_tag *queue_tags;
@@ -692,6 +694,8 @@ extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
692extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 694extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
693extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); 695extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
694extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 696extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
697extern int blk_queue_dma_drain(struct request_queue *q, void *buf,
698 unsigned int size);
695extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 699extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
696extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 700extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
697extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 701extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
@@ -768,12 +772,7 @@ static inline int bdev_hardsect_size(struct block_device *bdev)
768 772
769static inline int queue_dma_alignment(struct request_queue *q) 773static inline int queue_dma_alignment(struct request_queue *q)
770{ 774{
771 int retval = 511; 775 return q ? q->dma_alignment : 511;
772
773 if (q && q->dma_alignment)
774 retval = q->dma_alignment;
775
776 return retval;
777} 776}
778 777
779/* assumes size > 256 */ 778/* assumes size > 256 */
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 7e11d23ac36a..06dadba349ac 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -148,7 +148,7 @@ extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
148extern void blk_trace_shutdown(struct request_queue *); 148extern void blk_trace_shutdown(struct request_queue *);
149extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *); 149extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
150extern int do_blk_trace_setup(struct request_queue *q, 150extern int do_blk_trace_setup(struct request_queue *q,
151 struct block_device *bdev, struct blk_user_trace_setup *buts); 151 char *name, dev_t dev, struct blk_user_trace_setup *buts);
152 152
153 153
154/** 154/**
@@ -282,6 +282,11 @@ static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
282 __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r); 282 __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
283} 283}
284 284
285extern int blk_trace_setup(request_queue_t *q, char *name, dev_t dev,
286 char __user *arg);
287extern int blk_trace_startstop(request_queue_t *q, int start);
288extern int blk_trace_remove(request_queue_t *q);
289
285#else /* !CONFIG_BLK_DEV_IO_TRACE */ 290#else /* !CONFIG_BLK_DEV_IO_TRACE */
286#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) 291#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
287#define blk_trace_shutdown(q) do { } while (0) 292#define blk_trace_shutdown(q) do { } while (0)
@@ -290,7 +295,10 @@ static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
290#define blk_add_trace_generic(q, rq, rw, what) do { } while (0) 295#define blk_add_trace_generic(q, rq, rw, what) do { } while (0)
291#define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0) 296#define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0)
292#define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0) 297#define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0)
293#define do_blk_trace_setup(q, bdev, buts) (-ENOTTY) 298#define do_blk_trace_setup(q, name, dev, buts) (-ENOTTY)
299#define blk_trace_setup(q, name, dev, arg) (-ENOTTY)
300#define blk_trace_startstop(q, start) (-ENOTTY)
301#define blk_trace_remove(q) (-ENOTTY)
294#endif /* CONFIG_BLK_DEV_IO_TRACE */ 302#endif /* CONFIG_BLK_DEV_IO_TRACE */
295#endif /* __KERNEL__ */ 303#endif /* __KERNEL__ */
296#endif 304#endif