aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-merge.c6
-rw-r--r--block/blk-settings.c25
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--drivers/md/dm-table.c5
-rw-r--r--drivers/md/md.c3
-rw-r--r--drivers/scsi/scsi_lib.c3
-rw-r--r--include/linux/blkdev.h9
7 files changed, 13 insertions, 40 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 77b7c26df6b5..74bc4a768f32 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -21,7 +21,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
21 return 0; 21 return 0;
22 22
23 fbio = bio; 23 fbio = bio;
24 cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 24 cluster = blk_queue_cluster(q);
25 seg_size = 0; 25 seg_size = 0;
26 nr_phys_segs = 0; 26 nr_phys_segs = 0;
27 for_each_bio(bio) { 27 for_each_bio(bio) {
@@ -87,7 +87,7 @@ EXPORT_SYMBOL(blk_recount_segments);
87static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, 87static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
88 struct bio *nxt) 88 struct bio *nxt)
89{ 89{
90 if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) 90 if (!blk_queue_cluster(q))
91 return 0; 91 return 0;
92 92
93 if (bio->bi_seg_back_size + nxt->bi_seg_front_size > 93 if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
@@ -123,7 +123,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
123 int nsegs, cluster; 123 int nsegs, cluster;
124 124
125 nsegs = 0; 125 nsegs = 0;
126 cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 126 cluster = blk_queue_cluster(q);
127 127
128 /* 128 /*
129 * for each bio in rq 129 * for each bio in rq
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 701859fb9647..e55f5fc4ca22 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -126,7 +126,7 @@ void blk_set_default_limits(struct queue_limits *lim)
126 lim->alignment_offset = 0; 126 lim->alignment_offset = 0;
127 lim->io_opt = 0; 127 lim->io_opt = 0;
128 lim->misaligned = 0; 128 lim->misaligned = 0;
129 lim->no_cluster = 0; 129 lim->cluster = 1;
130} 130}
131EXPORT_SYMBOL(blk_set_default_limits); 131EXPORT_SYMBOL(blk_set_default_limits);
132 132
@@ -464,15 +464,6 @@ EXPORT_SYMBOL(blk_queue_io_opt);
464void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) 464void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
465{ 465{
466 blk_stack_limits(&t->limits, &b->limits, 0); 466 blk_stack_limits(&t->limits, &b->limits, 0);
467
468 if (!t->queue_lock)
469 WARN_ON_ONCE(1);
470 else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
471 unsigned long flags;
472 spin_lock_irqsave(t->queue_lock, flags);
473 queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
474 spin_unlock_irqrestore(t->queue_lock, flags);
475 }
476} 467}
477EXPORT_SYMBOL(blk_queue_stack_limits); 468EXPORT_SYMBOL(blk_queue_stack_limits);
478 469
@@ -545,7 +536,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
545 t->io_min = max(t->io_min, b->io_min); 536 t->io_min = max(t->io_min, b->io_min);
546 t->io_opt = lcm(t->io_opt, b->io_opt); 537 t->io_opt = lcm(t->io_opt, b->io_opt);
547 538
548 t->no_cluster |= b->no_cluster; 539 t->cluster &= b->cluster;
549 t->discard_zeroes_data &= b->discard_zeroes_data; 540 t->discard_zeroes_data &= b->discard_zeroes_data;
550 541
551 /* Physical block size a multiple of the logical block size? */ 542 /* Physical block size a multiple of the logical block size? */
@@ -641,7 +632,6 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
641 sector_t offset) 632 sector_t offset)
642{ 633{
643 struct request_queue *t = disk->queue; 634 struct request_queue *t = disk->queue;
644 struct request_queue *b = bdev_get_queue(bdev);
645 635
646 if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { 636 if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
647 char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; 637 char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
@@ -652,17 +642,6 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
652 printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", 642 printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
653 top, bottom); 643 top, bottom);
654 } 644 }
655
656 if (!t->queue_lock)
657 WARN_ON_ONCE(1);
658 else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
659 unsigned long flags;
660
661 spin_lock_irqsave(t->queue_lock, flags);
662 if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
663 queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
664 spin_unlock_irqrestore(t->queue_lock, flags);
665 }
666} 645}
667EXPORT_SYMBOL(disk_stack_limits); 646EXPORT_SYMBOL(disk_stack_limits);
668 647
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 013457f47fdc..41fb69150b4d 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -119,7 +119,7 @@ static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *
119 119
120static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) 120static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
121{ 121{
122 if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) 122 if (blk_queue_cluster(q))
123 return queue_var_show(queue_max_segment_size(q), (page)); 123 return queue_var_show(queue_max_segment_size(q), (page));
124 124
125 return queue_var_show(PAGE_CACHE_SIZE, (page)); 125 return queue_var_show(PAGE_CACHE_SIZE, (page));
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 90267f8d64ee..e2da1912a2cb 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1131,11 +1131,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1131 */ 1131 */
1132 q->limits = *limits; 1132 q->limits = *limits;
1133 1133
1134 if (limits->no_cluster)
1135 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
1136 else
1137 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
1138
1139 if (!dm_table_supports_discards(t)) 1134 if (!dm_table_supports_discards(t))
1140 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); 1135 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
1141 else 1136 else
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 84c46a161927..52694d29663d 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -4296,9 +4296,6 @@ static int md_alloc(dev_t dev, char *name)
4296 goto abort; 4296 goto abort;
4297 mddev->queue->queuedata = mddev; 4297 mddev->queue->queuedata = mddev;
4298 4298
4299 /* Can be unlocked because the queue is new: no concurrency */
4300 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue);
4301
4302 blk_queue_make_request(mddev->queue, md_make_request); 4299 blk_queue_make_request(mddev->queue, md_make_request);
4303 4300
4304 disk = alloc_disk(1 << shift); 4301 disk = alloc_disk(1 << shift);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index eafeeda6e194..9d7ba07dc5ef 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1642,9 +1642,8 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1642 1642
1643 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); 1643 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
1644 1644
1645 /* New queue, no concurrency on queue_flags */
1646 if (!shost->use_clustering) 1645 if (!shost->use_clustering)
1647 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); 1646 q->limits.cluster = 0;
1648 1647
1649 /* 1648 /*
1650 * set a reasonable default alignment on word boundaries: the 1649 * set a reasonable default alignment on word boundaries: the
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index aae86fd10c4f..95aeeeb49e8b 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -250,7 +250,7 @@ struct queue_limits {
250 250
251 unsigned char misaligned; 251 unsigned char misaligned;
252 unsigned char discard_misaligned; 252 unsigned char discard_misaligned;
253 unsigned char no_cluster; 253 unsigned char cluster;
254 signed char discard_zeroes_data; 254 signed char discard_zeroes_data;
255}; 255};
256 256
@@ -380,7 +380,6 @@ struct request_queue
380#endif 380#endif
381}; 381};
382 382
383#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
384#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 383#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
385#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 384#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
386#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 385#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
@@ -403,7 +402,6 @@ struct request_queue
403#define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */ 402#define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */
404 403
405#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 404#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
406 (1 << QUEUE_FLAG_CLUSTER) | \
407 (1 << QUEUE_FLAG_STACKABLE) | \ 405 (1 << QUEUE_FLAG_STACKABLE) | \
408 (1 << QUEUE_FLAG_SAME_COMP) | \ 406 (1 << QUEUE_FLAG_SAME_COMP) | \
409 (1 << QUEUE_FLAG_ADD_RANDOM)) 407 (1 << QUEUE_FLAG_ADD_RANDOM))
@@ -510,6 +508,11 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
510 508
511#define rq_data_dir(rq) ((rq)->cmd_flags & 1) 509#define rq_data_dir(rq) ((rq)->cmd_flags & 1)
512 510
511static inline unsigned int blk_queue_cluster(struct request_queue *q)
512{
513 return q->limits.cluster;
514}
515
513/* 516/*
514 * We regard a request as sync, if either a read or a sync write 517 * We regard a request as sync, if either a read or a sync write
515 */ 518 */