aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-map.c5
-rw-r--r--block/blk-merge.c6
-rw-r--r--block/blk-settings.c51
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/blk-throttle.c39
-rw-r--r--drivers/block/cciss.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c14
-rw-r--r--drivers/block/drbd/drbd_req.h3
-rw-r--r--drivers/block/drbd/drbd_worker.c10
-rw-r--r--drivers/md/dm-table.c10
-rw-r--r--drivers/md/md.c3
-rw-r--r--drivers/scsi/scsi_lib.c3
-rw-r--r--include/linux/blkdev.h10
13 files changed, 85 insertions, 73 deletions
diff --git a/block/blk-map.c b/block/blk-map.c
index 5d5dbe47c22..e663ac2d8e6 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
201 for (i = 0; i < iov_count; i++) { 201 for (i = 0; i < iov_count; i++) {
202 unsigned long uaddr = (unsigned long)iov[i].iov_base; 202 unsigned long uaddr = (unsigned long)iov[i].iov_base;
203 203
204 if (!iov[i].iov_len)
205 return -EINVAL;
206
204 if (uaddr & queue_dma_alignment(q)) { 207 if (uaddr & queue_dma_alignment(q)) {
205 unaligned = 1; 208 unaligned = 1;
206 break; 209 break;
207 } 210 }
208 if (!iov[i].iov_len)
209 return -EINVAL;
210 } 211 }
211 212
212 if (unaligned || (q->dma_pad_mask & len) || map_data) 213 if (unaligned || (q->dma_pad_mask & len) || map_data)
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 77b7c26df6b..74bc4a768f3 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -21,7 +21,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
21 return 0; 21 return 0;
22 22
23 fbio = bio; 23 fbio = bio;
24 cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 24 cluster = blk_queue_cluster(q);
25 seg_size = 0; 25 seg_size = 0;
26 nr_phys_segs = 0; 26 nr_phys_segs = 0;
27 for_each_bio(bio) { 27 for_each_bio(bio) {
@@ -87,7 +87,7 @@ EXPORT_SYMBOL(blk_recount_segments);
87static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, 87static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
88 struct bio *nxt) 88 struct bio *nxt)
89{ 89{
90 if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) 90 if (!blk_queue_cluster(q))
91 return 0; 91 return 0;
92 92
93 if (bio->bi_seg_back_size + nxt->bi_seg_front_size > 93 if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
@@ -123,7 +123,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
123 int nsegs, cluster; 123 int nsegs, cluster;
124 124
125 nsegs = 0; 125 nsegs = 0;
126 cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 126 cluster = blk_queue_cluster(q);
127 127
128 /* 128 /*
129 * for each bio in rq 129 * for each bio in rq
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 701859fb964..36c8c1f2af1 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -126,7 +126,7 @@ void blk_set_default_limits(struct queue_limits *lim)
126 lim->alignment_offset = 0; 126 lim->alignment_offset = 0;
127 lim->io_opt = 0; 127 lim->io_opt = 0;
128 lim->misaligned = 0; 128 lim->misaligned = 0;
129 lim->no_cluster = 0; 129 lim->cluster = 1;
130} 130}
131EXPORT_SYMBOL(blk_set_default_limits); 131EXPORT_SYMBOL(blk_set_default_limits);
132 132
@@ -229,8 +229,8 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
229EXPORT_SYMBOL(blk_queue_bounce_limit); 229EXPORT_SYMBOL(blk_queue_bounce_limit);
230 230
231/** 231/**
232 * blk_queue_max_hw_sectors - set max sectors for a request for this queue 232 * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request
233 * @q: the request queue for the device 233 * @limits: the queue limits
234 * @max_hw_sectors: max hardware sectors in the usual 512b unit 234 * @max_hw_sectors: max hardware sectors in the usual 512b unit
235 * 235 *
236 * Description: 236 * Description:
@@ -244,7 +244,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
244 * per-device basis in /sys/block/<device>/queue/max_sectors_kb. 244 * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
245 * The soft limit can not exceed max_hw_sectors. 245 * The soft limit can not exceed max_hw_sectors.
246 **/ 246 **/
247void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) 247void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors)
248{ 248{
249 if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { 249 if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
250 max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); 250 max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
@@ -252,9 +252,23 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
252 __func__, max_hw_sectors); 252 __func__, max_hw_sectors);
253 } 253 }
254 254
255 q->limits.max_hw_sectors = max_hw_sectors; 255 limits->max_hw_sectors = max_hw_sectors;
256 q->limits.max_sectors = min_t(unsigned int, max_hw_sectors, 256 limits->max_sectors = min_t(unsigned int, max_hw_sectors,
257 BLK_DEF_MAX_SECTORS); 257 BLK_DEF_MAX_SECTORS);
258}
259EXPORT_SYMBOL(blk_limits_max_hw_sectors);
260
261/**
262 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
263 * @q: the request queue for the device
264 * @max_hw_sectors: max hardware sectors in the usual 512b unit
265 *
266 * Description:
267 * See description for blk_limits_max_hw_sectors().
268 **/
269void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
270{
271 blk_limits_max_hw_sectors(&q->limits, max_hw_sectors);
258} 272}
259EXPORT_SYMBOL(blk_queue_max_hw_sectors); 273EXPORT_SYMBOL(blk_queue_max_hw_sectors);
260 274
@@ -464,15 +478,6 @@ EXPORT_SYMBOL(blk_queue_io_opt);
464void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) 478void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
465{ 479{
466 blk_stack_limits(&t->limits, &b->limits, 0); 480 blk_stack_limits(&t->limits, &b->limits, 0);
467
468 if (!t->queue_lock)
469 WARN_ON_ONCE(1);
470 else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
471 unsigned long flags;
472 spin_lock_irqsave(t->queue_lock, flags);
473 queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
474 spin_unlock_irqrestore(t->queue_lock, flags);
475 }
476} 481}
477EXPORT_SYMBOL(blk_queue_stack_limits); 482EXPORT_SYMBOL(blk_queue_stack_limits);
478 483
@@ -545,7 +550,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
545 t->io_min = max(t->io_min, b->io_min); 550 t->io_min = max(t->io_min, b->io_min);
546 t->io_opt = lcm(t->io_opt, b->io_opt); 551 t->io_opt = lcm(t->io_opt, b->io_opt);
547 552
548 t->no_cluster |= b->no_cluster; 553 t->cluster &= b->cluster;
549 t->discard_zeroes_data &= b->discard_zeroes_data; 554 t->discard_zeroes_data &= b->discard_zeroes_data;
550 555
551 /* Physical block size a multiple of the logical block size? */ 556 /* Physical block size a multiple of the logical block size? */
@@ -641,7 +646,6 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
641 sector_t offset) 646 sector_t offset)
642{ 647{
643 struct request_queue *t = disk->queue; 648 struct request_queue *t = disk->queue;
644 struct request_queue *b = bdev_get_queue(bdev);
645 649
646 if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { 650 if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
647 char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; 651 char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
@@ -652,17 +656,6 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
652 printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", 656 printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
653 top, bottom); 657 top, bottom);
654 } 658 }
655
656 if (!t->queue_lock)
657 WARN_ON_ONCE(1);
658 else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
659 unsigned long flags;
660
661 spin_lock_irqsave(t->queue_lock, flags);
662 if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
663 queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
664 spin_unlock_irqrestore(t->queue_lock, flags);
665 }
666} 659}
667EXPORT_SYMBOL(disk_stack_limits); 660EXPORT_SYMBOL(disk_stack_limits);
668 661
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 013457f47fd..41fb69150b4 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -119,7 +119,7 @@ static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *
119 119
120static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) 120static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
121{ 121{
122 if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) 122 if (blk_queue_cluster(q))
123 return queue_var_show(queue_max_segment_size(q), (page)); 123 return queue_var_show(queue_max_segment_size(q), (page));
124 124
125 return queue_var_show(PAGE_CACHE_SIZE, (page)); 125 return queue_var_show(PAGE_CACHE_SIZE, (page));
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 004be80fd89..381b09bb562 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -355,6 +355,12 @@ throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
355 tg->slice_end[rw], jiffies); 355 tg->slice_end[rw], jiffies);
356} 356}
357 357
358static inline void throtl_set_slice_end(struct throtl_data *td,
359 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
360{
361 tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
362}
363
358static inline void throtl_extend_slice(struct throtl_data *td, 364static inline void throtl_extend_slice(struct throtl_data *td,
359 struct throtl_grp *tg, bool rw, unsigned long jiffy_end) 365 struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
360{ 366{
@@ -391,6 +397,16 @@ throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
391 if (throtl_slice_used(td, tg, rw)) 397 if (throtl_slice_used(td, tg, rw))
392 return; 398 return;
393 399
400 /*
401 * A bio has been dispatched. Also adjust slice_end. It might happen
402 * that initially cgroup limit was very low resulting in high
403 * slice_end, but later limit was bumped up and bio was dispached
404 * sooner, then we need to reduce slice_end. A high bogus slice_end
405 * is bad because it does not allow new slice to start.
406 */
407
408 throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice);
409
394 time_elapsed = jiffies - tg->slice_start[rw]; 410 time_elapsed = jiffies - tg->slice_start[rw];
395 411
396 nr_slices = time_elapsed / throtl_slice; 412 nr_slices = time_elapsed / throtl_slice;
@@ -709,26 +725,21 @@ static void throtl_process_limit_change(struct throtl_data *td)
709 struct throtl_grp *tg; 725 struct throtl_grp *tg;
710 struct hlist_node *pos, *n; 726 struct hlist_node *pos, *n;
711 727
712 /*
713 * Make sure atomic_inc() effects from
714 * throtl_update_blkio_group_read_bps(), group of functions are
715 * visible.
716 * Is this required or smp_mb__after_atomic_inc() was suffcient
717 * after the atomic_inc().
718 */
719 smp_rmb();
720 if (!atomic_read(&td->limits_changed)) 728 if (!atomic_read(&td->limits_changed))
721 return; 729 return;
722 730
723 throtl_log(td, "limit changed =%d", atomic_read(&td->limits_changed)); 731 throtl_log(td, "limit changed =%d", atomic_read(&td->limits_changed));
724 732
725 hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) { 733 /*
726 /* 734 * Make sure updates from throtl_update_blkio_group_read_bps() group
727 * Do I need an smp_rmb() here to make sure tg->limits_changed 735 * of functions to tg->limits_changed are visible. We do not
728 * update is visible. I am relying on smp_rmb() at the 736 * want update td->limits_changed to be visible but update to
729 * beginning of function and not putting a new one here. 737 * tg->limits_changed not being visible yet on this cpu. Hence
730 */ 738 * the read barrier.
739 */
740 smp_rmb();
731 741
742 hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
732 if (throtl_tg_on_rr(tg) && tg->limits_changed) { 743 if (throtl_tg_on_rr(tg) && tg->limits_changed) {
733 throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu" 744 throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
734 " riops=%u wiops=%u", tg->bps[READ], 745 " riops=%u wiops=%u", tg->bps[READ],
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index f291587d753..233e06c29ff 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -2834,6 +2834,8 @@ static int cciss_revalidate(struct gendisk *disk)
2834 InquiryData_struct *inq_buff = NULL; 2834 InquiryData_struct *inq_buff = NULL;
2835 2835
2836 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) { 2836 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
2837 if (!h->drv[logvol])
2838 continue
2837 if (memcmp(h->drv[logvol]->LunID, drv->LunID, 2839 if (memcmp(h->drv[logvol]->LunID, drv->LunID,
2838 sizeof(drv->LunID)) == 0) { 2840 sizeof(drv->LunID)) == 0) {
2839 FOUND = 1; 2841 FOUND = 1;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 89d8a7cc405..24487d4fb20 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -3627,17 +3627,19 @@ static void drbdd(struct drbd_conf *mdev)
3627 } 3627 }
3628 3628
3629 shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header); 3629 shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header);
3630 rv = drbd_recv(mdev, &header->h80.payload, shs);
3631 if (unlikely(rv != shs)) {
3632 dev_err(DEV, "short read while reading sub header: rv=%d\n", rv);
3633 goto err_out;
3634 }
3635
3636 if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) { 3630 if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
3637 dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size); 3631 dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
3638 goto err_out; 3632 goto err_out;
3639 } 3633 }
3640 3634
3635 if (shs) {
3636 rv = drbd_recv(mdev, &header->h80.payload, shs);
3637 if (unlikely(rv != shs)) {
3638 dev_err(DEV, "short read while reading sub header: rv=%d\n", rv);
3639 goto err_out;
3640 }
3641 }
3642
3641 rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs); 3643 rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
3642 3644
3643 if (unlikely(!rv)) { 3645 if (unlikely(!rv)) {
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 181ea036482..ab2bd09d54b 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -339,7 +339,8 @@ static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what)
339} 339}
340 340
341/* completion of master bio is outside of spinlock. 341/* completion of master bio is outside of spinlock.
342 * If you need it irqsave, do it your self! */ 342 * If you need it irqsave, do it your self!
343 * Which means: don't use from bio endio callback. */
343static inline int req_mod(struct drbd_request *req, 344static inline int req_mod(struct drbd_request *req,
344 enum drbd_req_event what) 345 enum drbd_req_event what)
345{ 346{
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 47d223c2409..34f224b018b 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -193,8 +193,10 @@ void drbd_endio_sec(struct bio *bio, int error)
193 */ 193 */
194void drbd_endio_pri(struct bio *bio, int error) 194void drbd_endio_pri(struct bio *bio, int error)
195{ 195{
196 unsigned long flags;
196 struct drbd_request *req = bio->bi_private; 197 struct drbd_request *req = bio->bi_private;
197 struct drbd_conf *mdev = req->mdev; 198 struct drbd_conf *mdev = req->mdev;
199 struct bio_and_error m;
198 enum drbd_req_event what; 200 enum drbd_req_event what;
199 int uptodate = bio_flagged(bio, BIO_UPTODATE); 201 int uptodate = bio_flagged(bio, BIO_UPTODATE);
200 202
@@ -220,7 +222,13 @@ void drbd_endio_pri(struct bio *bio, int error)
220 bio_put(req->private_bio); 222 bio_put(req->private_bio);
221 req->private_bio = ERR_PTR(error); 223 req->private_bio = ERR_PTR(error);
222 224
223 req_mod(req, what); 225 /* not req_mod(), we need irqsave here! */
226 spin_lock_irqsave(&mdev->req_lock, flags);
227 __req_mod(req, what, &m);
228 spin_unlock_irqrestore(&mdev->req_lock, flags);
229
230 if (m.bio)
231 complete_master_bio(mdev, &m);
224} 232}
225 233
226int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel) 234int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 90267f8d64e..4d705cea0f8 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -517,9 +517,8 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
517 */ 517 */
518 518
519 if (q->merge_bvec_fn && !ti->type->merge) 519 if (q->merge_bvec_fn && !ti->type->merge)
520 limits->max_sectors = 520 blk_limits_max_hw_sectors(limits,
521 min_not_zero(limits->max_sectors, 521 (unsigned int) (PAGE_SIZE >> 9));
522 (unsigned int) (PAGE_SIZE >> 9));
523 return 0; 522 return 0;
524} 523}
525EXPORT_SYMBOL_GPL(dm_set_device_limits); 524EXPORT_SYMBOL_GPL(dm_set_device_limits);
@@ -1131,11 +1130,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1131 */ 1130 */
1132 q->limits = *limits; 1131 q->limits = *limits;
1133 1132
1134 if (limits->no_cluster)
1135 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
1136 else
1137 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
1138
1139 if (!dm_table_supports_discards(t)) 1133 if (!dm_table_supports_discards(t))
1140 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); 1134 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
1141 else 1135 else
diff --git a/drivers/md/md.c b/drivers/md/md.c
index e71c5fa527f..175c424f201 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -4295,9 +4295,6 @@ static int md_alloc(dev_t dev, char *name)
4295 goto abort; 4295 goto abort;
4296 mddev->queue->queuedata = mddev; 4296 mddev->queue->queuedata = mddev;
4297 4297
4298 /* Can be unlocked because the queue is new: no concurrency */
4299 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue);
4300
4301 blk_queue_make_request(mddev->queue, md_make_request); 4298 blk_queue_make_request(mddev->queue, md_make_request);
4302 4299
4303 disk = alloc_disk(1 << shift); 4300 disk = alloc_disk(1 << shift);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 5b6bbaea59f..4a3842212c5 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1637,9 +1637,8 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1637 1637
1638 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); 1638 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
1639 1639
1640 /* New queue, no concurrency on queue_flags */
1641 if (!shost->use_clustering) 1640 if (!shost->use_clustering)
1642 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); 1641 q->limits.cluster = 0;
1643 1642
1644 /* 1643 /*
1645 * set a reasonable default alignment on word boundaries: the 1644 * set a reasonable default alignment on word boundaries: the
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index aae86fd10c4..36ab42c9bb9 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -250,7 +250,7 @@ struct queue_limits {
250 250
251 unsigned char misaligned; 251 unsigned char misaligned;
252 unsigned char discard_misaligned; 252 unsigned char discard_misaligned;
253 unsigned char no_cluster; 253 unsigned char cluster;
254 signed char discard_zeroes_data; 254 signed char discard_zeroes_data;
255}; 255};
256 256
@@ -380,7 +380,6 @@ struct request_queue
380#endif 380#endif
381}; 381};
382 382
383#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
384#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 383#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
385#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 384#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
386#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 385#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
@@ -403,7 +402,6 @@ struct request_queue
403#define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */ 402#define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */
404 403
405#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 404#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
406 (1 << QUEUE_FLAG_CLUSTER) | \
407 (1 << QUEUE_FLAG_STACKABLE) | \ 405 (1 << QUEUE_FLAG_STACKABLE) | \
408 (1 << QUEUE_FLAG_SAME_COMP) | \ 406 (1 << QUEUE_FLAG_SAME_COMP) | \
409 (1 << QUEUE_FLAG_ADD_RANDOM)) 407 (1 << QUEUE_FLAG_ADD_RANDOM))
@@ -510,6 +508,11 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
510 508
511#define rq_data_dir(rq) ((rq)->cmd_flags & 1) 509#define rq_data_dir(rq) ((rq)->cmd_flags & 1)
512 510
511static inline unsigned int blk_queue_cluster(struct request_queue *q)
512{
513 return q->limits.cluster;
514}
515
513/* 516/*
514 * We regard a request as sync, if either a read or a sync write 517 * We regard a request as sync, if either a read or a sync write
515 */ 518 */
@@ -805,6 +808,7 @@ extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
805extern void blk_cleanup_queue(struct request_queue *); 808extern void blk_cleanup_queue(struct request_queue *);
806extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 809extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
807extern void blk_queue_bounce_limit(struct request_queue *, u64); 810extern void blk_queue_bounce_limit(struct request_queue *, u64);
811extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
808extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 812extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
809extern void blk_queue_max_segments(struct request_queue *, unsigned short); 813extern void blk_queue_max_segments(struct request_queue *, unsigned short);
810extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 814extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);