aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-barrier.c2
-rw-r--r--block/blk-ioc.c5
-rw-r--r--block/blk-settings.c140
-rw-r--r--block/cfq-iosched.c73
-rw-r--r--block/genhd.c2
5 files changed, 131 insertions, 91 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 8873b9b439ff..8618d8996fea 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -402,7 +402,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
402 * our current implementations need. If we'll ever need 402 * our current implementations need. If we'll ever need
403 * more the interface will need revisiting. 403 * more the interface will need revisiting.
404 */ 404 */
405 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 405 page = alloc_page(gfp_mask | __GFP_ZERO);
406 if (!page) 406 if (!page)
407 goto out_free_bio; 407 goto out_free_bio;
408 if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size) 408 if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size)
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index cbdabb0dd6d7..98e6bf61b0ac 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -39,8 +39,6 @@ int put_io_context(struct io_context *ioc)
39 39
40 if (atomic_long_dec_and_test(&ioc->refcount)) { 40 if (atomic_long_dec_and_test(&ioc->refcount)) {
41 rcu_read_lock(); 41 rcu_read_lock();
42 if (ioc->aic && ioc->aic->dtor)
43 ioc->aic->dtor(ioc->aic);
44 cfq_dtor(ioc); 42 cfq_dtor(ioc);
45 rcu_read_unlock(); 43 rcu_read_unlock();
46 44
@@ -76,8 +74,6 @@ void exit_io_context(struct task_struct *task)
76 task_unlock(task); 74 task_unlock(task);
77 75
78 if (atomic_dec_and_test(&ioc->nr_tasks)) { 76 if (atomic_dec_and_test(&ioc->nr_tasks)) {
79 if (ioc->aic && ioc->aic->exit)
80 ioc->aic->exit(ioc->aic);
81 cfq_exit(ioc); 77 cfq_exit(ioc);
82 78
83 } 79 }
@@ -97,7 +93,6 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
97 ret->ioprio = 0; 93 ret->ioprio = 0;
98 ret->last_waited = jiffies; /* doesn't matter... */ 94 ret->last_waited = jiffies; /* doesn't matter... */
99 ret->nr_batch_requests = 0; /* because this is 0 */ 95 ret->nr_batch_requests = 0; /* because this is 0 */
100 ret->aic = NULL;
101 INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH); 96 INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
102 INIT_HLIST_HEAD(&ret->cic_list); 97 INIT_HLIST_HEAD(&ret->cic_list);
103 ret->ioc_data = NULL; 98 ret->ioc_data = NULL;
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 6ae118d6e193..5eeb9e0d256e 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -505,21 +505,30 @@ static unsigned int lcm(unsigned int a, unsigned int b)
505 505
506/** 506/**
507 * blk_stack_limits - adjust queue_limits for stacked devices 507 * blk_stack_limits - adjust queue_limits for stacked devices
508 * @t: the stacking driver limits (top) 508 * @t: the stacking driver limits (top device)
509 * @b: the underlying queue limits (bottom) 509 * @b: the underlying queue limits (bottom, component device)
510 * @offset: offset to beginning of data within component device 510 * @offset: offset to beginning of data within component device
511 * 511 *
512 * Description: 512 * Description:
513 * Merges two queue_limit structs. Returns 0 if alignment didn't 513 * This function is used by stacking drivers like MD and DM to ensure
514 * change. Returns -1 if adding the bottom device caused 514 * that all component devices have compatible block sizes and
515 * misalignment. 515 * alignments. The stacking driver must provide a queue_limits
516 * struct (top) and then iteratively call the stacking function for
517 * all component (bottom) devices. The stacking function will
518 * attempt to combine the values and ensure proper alignment.
519 *
520 * Returns 0 if the top and bottom queue_limits are compatible. The
521 * top device's block sizes and alignment offsets may be adjusted to
522 * ensure alignment with the bottom device. If no compatible sizes
523 * and alignments exist, -1 is returned and the resulting top
524 * queue_limits will have the misaligned flag set to indicate that
525 * the alignment_offset is undefined.
516 */ 526 */
517int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 527int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
518 sector_t offset) 528 sector_t offset)
519{ 529{
520 int ret; 530 sector_t alignment;
521 531 unsigned int top, bottom, ret = 0;
522 ret = 0;
523 532
524 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); 533 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
525 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); 534 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
@@ -537,6 +546,26 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
537 t->max_segment_size = min_not_zero(t->max_segment_size, 546 t->max_segment_size = min_not_zero(t->max_segment_size,
538 b->max_segment_size); 547 b->max_segment_size);
539 548
549 t->misaligned |= b->misaligned;
550
551 alignment = queue_limit_alignment_offset(b, offset);
552
553 /* Bottom device has different alignment. Check that it is
554 * compatible with the current top alignment.
555 */
556 if (t->alignment_offset != alignment) {
557
558 top = max(t->physical_block_size, t->io_min)
559 + t->alignment_offset;
560 bottom = max(b->physical_block_size, b->io_min) + alignment;
561
562 /* Verify that top and bottom intervals line up */
563 if (max(top, bottom) & (min(top, bottom) - 1)) {
564 t->misaligned = 1;
565 ret = -1;
566 }
567 }
568
540 t->logical_block_size = max(t->logical_block_size, 569 t->logical_block_size = max(t->logical_block_size,
541 b->logical_block_size); 570 b->logical_block_size);
542 571
@@ -544,58 +573,95 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
544 b->physical_block_size); 573 b->physical_block_size);
545 574
546 t->io_min = max(t->io_min, b->io_min); 575 t->io_min = max(t->io_min, b->io_min);
576 t->io_opt = lcm(t->io_opt, b->io_opt);
577
547 t->no_cluster |= b->no_cluster; 578 t->no_cluster |= b->no_cluster;
548 t->discard_zeroes_data &= b->discard_zeroes_data; 579 t->discard_zeroes_data &= b->discard_zeroes_data;
549 580
550 /* Bottom device offset aligned? */ 581 /* Physical block size a multiple of the logical block size? */
551 if (offset && 582 if (t->physical_block_size & (t->logical_block_size - 1)) {
552 (offset & (b->physical_block_size - 1)) != b->alignment_offset) { 583 t->physical_block_size = t->logical_block_size;
553 t->misaligned = 1; 584 t->misaligned = 1;
554 ret = -1; 585 ret = -1;
555 } 586 }
556 587
557 /* 588 /* Minimum I/O a multiple of the physical block size? */
558 * Temporarily disable discard granularity. It's currently buggy 589 if (t->io_min & (t->physical_block_size - 1)) {
559 * since we default to 0 for discard_granularity, hence this 590 t->io_min = t->physical_block_size;
560 * "failure" will always trigger for non-zero offsets. 591 t->misaligned = 1;
561 */
562#if 0
563 if (offset &&
564 (offset & (b->discard_granularity - 1)) != b->discard_alignment) {
565 t->discard_misaligned = 1;
566 ret = -1; 592 ret = -1;
567 } 593 }
568#endif
569 594
570 /* If top has no alignment offset, inherit from bottom */ 595 /* Optimal I/O a multiple of the physical block size? */
571 if (!t->alignment_offset) 596 if (t->io_opt & (t->physical_block_size - 1)) {
572 t->alignment_offset = 597 t->io_opt = 0;
573 b->alignment_offset & (b->physical_block_size - 1); 598 t->misaligned = 1;
599 ret = -1;
600 }
574 601
575 if (!t->discard_alignment) 602 /* Find lowest common alignment_offset */
576 t->discard_alignment = 603 t->alignment_offset = lcm(t->alignment_offset, alignment)
577 b->discard_alignment & (b->discard_granularity - 1); 604 & (max(t->physical_block_size, t->io_min) - 1);
578 605
579 /* Top device aligned on logical block boundary? */ 606 /* Verify that new alignment_offset is on a logical block boundary */
580 if (t->alignment_offset & (t->logical_block_size - 1)) { 607 if (t->alignment_offset & (t->logical_block_size - 1)) {
581 t->misaligned = 1; 608 t->misaligned = 1;
582 ret = -1; 609 ret = -1;
583 } 610 }
584 611
585 /* Find lcm() of optimal I/O size and granularity */ 612 /* Discard alignment and granularity */
586 t->io_opt = lcm(t->io_opt, b->io_opt); 613 if (b->discard_granularity) {
587 t->discard_granularity = lcm(t->discard_granularity, 614 unsigned int granularity = b->discard_granularity;
588 b->discard_granularity); 615 offset &= granularity - 1;
589 616
590 /* Verify that optimal I/O size is a multiple of io_min */ 617 alignment = (granularity + b->discard_alignment - offset)
591 if (t->io_min && t->io_opt % t->io_min) 618 & (granularity - 1);
592 ret = -1; 619
620 if (t->discard_granularity != 0 &&
621 t->discard_alignment != alignment) {
622 top = t->discard_granularity + t->discard_alignment;
623 bottom = b->discard_granularity + alignment;
624
625 /* Verify that top and bottom intervals line up */
626 if (max(top, bottom) & (min(top, bottom) - 1))
627 t->discard_misaligned = 1;
628 }
629
630 t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
631 b->max_discard_sectors);
632 t->discard_granularity = max(t->discard_granularity,
633 b->discard_granularity);
634 t->discard_alignment = lcm(t->discard_alignment, alignment) &
635 (t->discard_granularity - 1);
636 }
593 637
594 return ret; 638 return ret;
595} 639}
596EXPORT_SYMBOL(blk_stack_limits); 640EXPORT_SYMBOL(blk_stack_limits);
597 641
598/** 642/**
643 * bdev_stack_limits - adjust queue limits for stacked drivers
644 * @t: the stacking driver limits (top device)
645 * @bdev: the component block_device (bottom)
646 * @start: first data sector within component device
647 *
648 * Description:
649 * Merges queue limits for a top device and a block_device. Returns
650 * 0 if alignment didn't change. Returns -1 if adding the bottom
651 * device caused misalignment.
652 */
653int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
654 sector_t start)
655{
656 struct request_queue *bq = bdev_get_queue(bdev);
657
658 start += get_start_sect(bdev);
659
660 return blk_stack_limits(t, &bq->limits, start << 9);
661}
662EXPORT_SYMBOL(bdev_stack_limits);
663
664/**
599 * disk_stack_limits - adjust queue limits for stacked drivers 665 * disk_stack_limits - adjust queue limits for stacked drivers
600 * @disk: MD/DM gendisk (top) 666 * @disk: MD/DM gendisk (top)
601 * @bdev: the underlying block device (bottom) 667 * @bdev: the underlying block device (bottom)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index e2f80463ed0d..ee130f14d1fc 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -208,8 +208,6 @@ struct cfq_data {
208 /* Root service tree for cfq_groups */ 208 /* Root service tree for cfq_groups */
209 struct cfq_rb_root grp_service_tree; 209 struct cfq_rb_root grp_service_tree;
210 struct cfq_group root_group; 210 struct cfq_group root_group;
211 /* Number of active cfq groups on group service tree */
212 int nr_groups;
213 211
214 /* 212 /*
215 * The priority currently being served 213 * The priority currently being served
@@ -294,8 +292,7 @@ static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
294 292
295static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg, 293static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
296 enum wl_prio_t prio, 294 enum wl_prio_t prio,
297 enum wl_type_t type, 295 enum wl_type_t type)
298 struct cfq_data *cfqd)
299{ 296{
300 if (!cfqg) 297 if (!cfqg)
301 return NULL; 298 return NULL;
@@ -842,7 +839,6 @@ cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
842 839
843 __cfq_group_service_tree_add(st, cfqg); 840 __cfq_group_service_tree_add(st, cfqg);
844 cfqg->on_st = true; 841 cfqg->on_st = true;
845 cfqd->nr_groups++;
846 st->total_weight += cfqg->weight; 842 st->total_weight += cfqg->weight;
847} 843}
848 844
@@ -863,7 +859,6 @@ cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
863 859
864 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); 860 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
865 cfqg->on_st = false; 861 cfqg->on_st = false;
866 cfqd->nr_groups--;
867 st->total_weight -= cfqg->weight; 862 st->total_weight -= cfqg->weight;
868 if (!RB_EMPTY_NODE(&cfqg->rb_node)) 863 if (!RB_EMPTY_NODE(&cfqg->rb_node))
869 cfq_rb_erase(&cfqg->rb_node, st); 864 cfq_rb_erase(&cfqg->rb_node, st);
@@ -1150,7 +1145,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1150#endif 1145#endif
1151 1146
1152 service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq), 1147 service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
1153 cfqq_type(cfqq), cfqd); 1148 cfqq_type(cfqq));
1154 if (cfq_class_idle(cfqq)) { 1149 if (cfq_class_idle(cfqq)) {
1155 rb_key = CFQ_IDLE_DELAY; 1150 rb_key = CFQ_IDLE_DELAY;
1156 parent = rb_last(&service_tree->rb); 1151 parent = rb_last(&service_tree->rb);
@@ -1513,9 +1508,6 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1513 struct cfq_io_context *cic; 1508 struct cfq_io_context *cic;
1514 struct cfq_queue *cfqq; 1509 struct cfq_queue *cfqq;
1515 1510
1516 /* Deny merge if bio and rq don't belong to same cfq group */
1517 if ((RQ_CFQQ(rq))->cfqg != cfq_get_cfqg(cfqd, 0))
1518 return false;
1519 /* 1511 /*
1520 * Disallow merge of a sync bio into an async request. 1512 * Disallow merge of a sync bio into an async request.
1521 */ 1513 */
@@ -1616,7 +1608,7 @@ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
1616{ 1608{
1617 struct cfq_rb_root *service_tree = 1609 struct cfq_rb_root *service_tree =
1618 service_tree_for(cfqd->serving_group, cfqd->serving_prio, 1610 service_tree_for(cfqd->serving_group, cfqd->serving_prio,
1619 cfqd->serving_type, cfqd); 1611 cfqd->serving_type);
1620 1612
1621 if (!cfqd->rq_queued) 1613 if (!cfqd->rq_queued)
1622 return NULL; 1614 return NULL;
@@ -1675,13 +1667,17 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1675#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR) 1667#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR)
1676 1668
1677static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1669static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1678 struct request *rq) 1670 struct request *rq, bool for_preempt)
1679{ 1671{
1680 sector_t sdist = cfqq->seek_mean; 1672 sector_t sdist = cfqq->seek_mean;
1681 1673
1682 if (!sample_valid(cfqq->seek_samples)) 1674 if (!sample_valid(cfqq->seek_samples))
1683 sdist = CFQQ_SEEK_THR; 1675 sdist = CFQQ_SEEK_THR;
1684 1676
1677 /* if seek_mean is big, using it as close criteria is meaningless */
1678 if (sdist > CFQQ_SEEK_THR && !for_preempt)
1679 sdist = CFQQ_SEEK_THR;
1680
1685 return cfq_dist_from_last(cfqd, rq) <= sdist; 1681 return cfq_dist_from_last(cfqd, rq) <= sdist;
1686} 1682}
1687 1683
@@ -1709,7 +1705,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1709 * will contain the closest sector. 1705 * will contain the closest sector.
1710 */ 1706 */
1711 __cfqq = rb_entry(parent, struct cfq_queue, p_node); 1707 __cfqq = rb_entry(parent, struct cfq_queue, p_node);
1712 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) 1708 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq, false))
1713 return __cfqq; 1709 return __cfqq;
1714 1710
1715 if (blk_rq_pos(__cfqq->next_rq) < sector) 1711 if (blk_rq_pos(__cfqq->next_rq) < sector)
@@ -1720,7 +1716,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1720 return NULL; 1716 return NULL;
1721 1717
1722 __cfqq = rb_entry(node, struct cfq_queue, p_node); 1718 __cfqq = rb_entry(node, struct cfq_queue, p_node);
1723 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) 1719 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq, false))
1724 return __cfqq; 1720 return __cfqq;
1725 1721
1726 return NULL; 1722 return NULL;
@@ -1963,8 +1959,7 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
1963} 1959}
1964 1960
1965static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd, 1961static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
1966 struct cfq_group *cfqg, enum wl_prio_t prio, 1962 struct cfq_group *cfqg, enum wl_prio_t prio)
1967 bool prio_changed)
1968{ 1963{
1969 struct cfq_queue *queue; 1964 struct cfq_queue *queue;
1970 int i; 1965 int i;
@@ -1972,24 +1967,9 @@ static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
1972 unsigned long lowest_key = 0; 1967 unsigned long lowest_key = 0;
1973 enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD; 1968 enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
1974 1969
1975 if (prio_changed) { 1970 for (i = 0; i <= SYNC_WORKLOAD; ++i) {
1976 /* 1971 /* select the one with lowest rb_key */
1977 * When priorities switched, we prefer starting 1972 queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
1978 * from SYNC_NOIDLE (first choice), or just SYNC
1979 * over ASYNC
1980 */
1981 if (service_tree_for(cfqg, prio, cur_best, cfqd)->count)
1982 return cur_best;
1983 cur_best = SYNC_WORKLOAD;
1984 if (service_tree_for(cfqg, prio, cur_best, cfqd)->count)
1985 return cur_best;
1986
1987 return ASYNC_WORKLOAD;
1988 }
1989
1990 for (i = 0; i < 3; ++i) {
1991 /* otherwise, select the one with lowest rb_key */
1992 queue = cfq_rb_first(service_tree_for(cfqg, prio, i, cfqd));
1993 if (queue && 1973 if (queue &&
1994 (!key_valid || time_before(queue->rb_key, lowest_key))) { 1974 (!key_valid || time_before(queue->rb_key, lowest_key))) {
1995 lowest_key = queue->rb_key; 1975 lowest_key = queue->rb_key;
@@ -2003,8 +1983,6 @@ static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
2003 1983
2004static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg) 1984static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2005{ 1985{
2006 enum wl_prio_t previous_prio = cfqd->serving_prio;
2007 bool prio_changed;
2008 unsigned slice; 1986 unsigned slice;
2009 unsigned count; 1987 unsigned count;
2010 struct cfq_rb_root *st; 1988 struct cfq_rb_root *st;
@@ -2032,24 +2010,19 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2032 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload 2010 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2033 * expiration time 2011 * expiration time
2034 */ 2012 */
2035 prio_changed = (cfqd->serving_prio != previous_prio); 2013 st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2036 st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type,
2037 cfqd);
2038 count = st->count; 2014 count = st->count;
2039 2015
2040 /* 2016 /*
2041 * If priority didn't change, check workload expiration, 2017 * check workload expiration, and that we still have other queues ready
2042 * and that we still have other queues ready
2043 */ 2018 */
2044 if (!prio_changed && count && 2019 if (count && !time_after(jiffies, cfqd->workload_expires))
2045 !time_after(jiffies, cfqd->workload_expires))
2046 return; 2020 return;
2047 2021
2048 /* otherwise select new workload type */ 2022 /* otherwise select new workload type */
2049 cfqd->serving_type = 2023 cfqd->serving_type =
2050 cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio, prio_changed); 2024 cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
2051 st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type, 2025 st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2052 cfqd);
2053 count = st->count; 2026 count = st->count;
2054 2027
2055 /* 2028 /*
@@ -3104,6 +3077,12 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3104 return true; 3077 return true;
3105 3078
3106 /* 3079 /*
3080 * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3081 */
3082 if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3083 return false;
3084
3085 /*
3107 * if the new request is sync, but the currently running queue is 3086 * if the new request is sync, but the currently running queue is
3108 * not, let the sync request have priority. 3087 * not, let the sync request have priority.
3109 */ 3088 */
@@ -3143,7 +3122,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3143 * if this request is as-good as one we would expect from the 3122 * if this request is as-good as one we would expect from the
3144 * current cfqq, let it preempt 3123 * current cfqq, let it preempt
3145 */ 3124 */
3146 if (cfq_rq_close(cfqd, cfqq, rq)) 3125 if (cfq_rq_close(cfqd, cfqq, rq, true))
3147 return true; 3126 return true;
3148 3127
3149 return false; 3128 return false;
diff --git a/block/genhd.c b/block/genhd.c
index b11a4ad7d571..d13ba76a169c 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -867,7 +867,7 @@ static ssize_t disk_discard_alignment_show(struct device *dev,
867{ 867{
868 struct gendisk *disk = dev_to_disk(dev); 868 struct gendisk *disk = dev_to_disk(dev);
869 869
870 return sprintf(buf, "%u\n", queue_discard_alignment(disk->queue)); 870 return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue));
871} 871}
872 872
873static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL); 873static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL);