aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-06-02 14:44:46 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-06-02 14:44:46 -0400
commitbb329859effa06736c39adf6ce622e86e38fc274 (patch)
treebdac02a67489ac72de09916cfede54c8f85f1bbc
parent46356945fc2bc932dd224af60c12ece2672479be (diff)
parentb425e50492583b10cceb388af36ef0bd3bdf842a (diff)
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "A set of fixes that should go into the next -rc. This contains: - A use-after-free in the request_list exit for the legacy IO path, from Bart. - A fix for CFQ, fixing a recent regression with the conversion to higher resolution timing for iops mode. From Hou Tao. - A single fix for nbd, split in two patches, fixing a leak of a data structure. - A regression fix from Keith, ensuring that callers of blk_mq_update_nr_hw_queues() hold the right lock" * 'for-linus' of git://git.kernel.dk/linux-block: block: Avoid that blk_exit_rl() triggers a use-after-free cfq-iosched: fix the delay of cfq_group's vdisktime under iops mode blk-mq: Take tagset lock when updating hw queues nbd: don't leak nbd_config nbd: nbd_reset() call in nbd_dev_add() is redundant
-rw-r--r--block/blk-cgroup.c2
-rw-r--r--block/blk-core.c10
-rw-r--r--block/blk-mq.c10
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/blk.h2
-rw-r--r--block/cfq-iosched.c17
-rw-r--r--drivers/block/nbd.c15
7 files changed, 40 insertions, 18 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 7c2947128f58..0480892e97e5 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -74,7 +74,7 @@ static void blkg_free(struct blkcg_gq *blkg)
74 blkcg_policy[i]->pd_free_fn(blkg->pd[i]); 74 blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
75 75
76 if (blkg->blkcg != &blkcg_root) 76 if (blkg->blkcg != &blkcg_root)
77 blk_exit_rl(&blkg->rl); 77 blk_exit_rl(blkg->q, &blkg->rl);
78 78
79 blkg_rwstat_exit(&blkg->stat_ios); 79 blkg_rwstat_exit(&blkg->stat_ios);
80 blkg_rwstat_exit(&blkg->stat_bytes); 80 blkg_rwstat_exit(&blkg->stat_bytes);
diff --git a/block/blk-core.c b/block/blk-core.c
index c7068520794b..a7421b772d0e 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -648,13 +648,19 @@ int blk_init_rl(struct request_list *rl, struct request_queue *q,
648 if (!rl->rq_pool) 648 if (!rl->rq_pool)
649 return -ENOMEM; 649 return -ENOMEM;
650 650
651 if (rl != &q->root_rl)
652 WARN_ON_ONCE(!blk_get_queue(q));
653
651 return 0; 654 return 0;
652} 655}
653 656
654void blk_exit_rl(struct request_list *rl) 657void blk_exit_rl(struct request_queue *q, struct request_list *rl)
655{ 658{
656 if (rl->rq_pool) 659 if (rl->rq_pool) {
657 mempool_destroy(rl->rq_pool); 660 mempool_destroy(rl->rq_pool);
661 if (rl != &q->root_rl)
662 blk_put_queue(q);
663 }
658} 664}
659 665
660struct request_queue *blk_alloc_queue(gfp_t gfp_mask) 666struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index f2224ffd225d..1bcccedcc74f 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2641,7 +2641,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2641 return ret; 2641 return ret;
2642} 2642}
2643 2643
2644void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) 2644static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
2645 int nr_hw_queues)
2645{ 2646{
2646 struct request_queue *q; 2647 struct request_queue *q;
2647 2648
@@ -2665,6 +2666,13 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2665 list_for_each_entry(q, &set->tag_list, tag_set_list) 2666 list_for_each_entry(q, &set->tag_list, tag_set_list)
2666 blk_mq_unfreeze_queue(q); 2667 blk_mq_unfreeze_queue(q);
2667} 2668}
2669
2670void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2671{
2672 mutex_lock(&set->tag_list_lock);
2673 __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
2674 mutex_unlock(&set->tag_list_lock);
2675}
2668EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); 2676EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
2669 2677
2670/* Enable polling stats and return whether they were already enabled. */ 2678/* Enable polling stats and return whether they were already enabled. */
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 712b018e9f54..283da7fbe034 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -809,7 +809,7 @@ static void blk_release_queue(struct kobject *kobj)
809 809
810 blk_free_queue_stats(q->stats); 810 blk_free_queue_stats(q->stats);
811 811
812 blk_exit_rl(&q->root_rl); 812 blk_exit_rl(q, &q->root_rl);
813 813
814 if (q->queue_tags) 814 if (q->queue_tags)
815 __blk_queue_free_tags(q); 815 __blk_queue_free_tags(q);
diff --git a/block/blk.h b/block/blk.h
index 2ed70228e44f..83c8e1100525 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -59,7 +59,7 @@ void blk_free_flush_queue(struct blk_flush_queue *q);
59 59
60int blk_init_rl(struct request_list *rl, struct request_queue *q, 60int blk_init_rl(struct request_list *rl, struct request_queue *q,
61 gfp_t gfp_mask); 61 gfp_t gfp_mask);
62void blk_exit_rl(struct request_list *rl); 62void blk_exit_rl(struct request_queue *q, struct request_list *rl);
63void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 63void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
64 struct bio *bio); 64 struct bio *bio);
65void blk_queue_bypass_start(struct request_queue *q); 65void blk_queue_bypass_start(struct request_queue *q);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index da69b079725f..b7e9c7feeab2 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -38,9 +38,13 @@ static const u64 cfq_target_latency = (u64)NSEC_PER_SEC * 3/10; /* 300 ms */
38static const int cfq_hist_divisor = 4; 38static const int cfq_hist_divisor = 4;
39 39
40/* 40/*
41 * offset from end of service tree 41 * offset from end of queue service tree for idle class
42 */ 42 */
43#define CFQ_IDLE_DELAY (NSEC_PER_SEC / 5) 43#define CFQ_IDLE_DELAY (NSEC_PER_SEC / 5)
44/* offset from end of group service tree under time slice mode */
45#define CFQ_SLICE_MODE_GROUP_DELAY (NSEC_PER_SEC / 5)
46/* offset from end of group service under IOPS mode */
47#define CFQ_IOPS_MODE_GROUP_DELAY (HZ / 5)
44 48
45/* 49/*
46 * below this threshold, we consider thinktime immediate 50 * below this threshold, we consider thinktime immediate
@@ -1362,6 +1366,14 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1362 cfqg->vfraction = max_t(unsigned, vfr, 1); 1366 cfqg->vfraction = max_t(unsigned, vfr, 1);
1363} 1367}
1364 1368
1369static inline u64 cfq_get_cfqg_vdisktime_delay(struct cfq_data *cfqd)
1370{
1371 if (!iops_mode(cfqd))
1372 return CFQ_SLICE_MODE_GROUP_DELAY;
1373 else
1374 return CFQ_IOPS_MODE_GROUP_DELAY;
1375}
1376
1365static void 1377static void
1366cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg) 1378cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
1367{ 1379{
@@ -1381,7 +1393,8 @@ cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
1381 n = rb_last(&st->rb); 1393 n = rb_last(&st->rb);
1382 if (n) { 1394 if (n) {
1383 __cfqg = rb_entry_cfqg(n); 1395 __cfqg = rb_entry_cfqg(n);
1384 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY; 1396 cfqg->vdisktime = __cfqg->vdisktime +
1397 cfq_get_cfqg_vdisktime_delay(cfqd);
1385 } else 1398 } else
1386 cfqg->vdisktime = st->min_vdisktime; 1399 cfqg->vdisktime = st->min_vdisktime;
1387 cfq_group_service_tree_add(st, cfqg); 1400 cfq_group_service_tree_add(st, cfqg);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 9a7bb2c29447..f3f191ba8ca4 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -937,14 +937,6 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
937 return -ENOSPC; 937 return -ENOSPC;
938} 938}
939 939
940/* Reset all properties of an NBD device */
941static void nbd_reset(struct nbd_device *nbd)
942{
943 nbd->config = NULL;
944 nbd->tag_set.timeout = 0;
945 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
946}
947
948static void nbd_bdev_reset(struct block_device *bdev) 940static void nbd_bdev_reset(struct block_device *bdev)
949{ 941{
950 if (bdev->bd_openers > 1) 942 if (bdev->bd_openers > 1)
@@ -1029,7 +1021,11 @@ static void nbd_config_put(struct nbd_device *nbd)
1029 } 1021 }
1030 kfree(config->socks); 1022 kfree(config->socks);
1031 } 1023 }
1032 nbd_reset(nbd); 1024 kfree(nbd->config);
1025 nbd->config = NULL;
1026
1027 nbd->tag_set.timeout = 0;
1028 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
1033 1029
1034 mutex_unlock(&nbd->config_lock); 1030 mutex_unlock(&nbd->config_lock);
1035 nbd_put(nbd); 1031 nbd_put(nbd);
@@ -1483,7 +1479,6 @@ static int nbd_dev_add(int index)
1483 disk->fops = &nbd_fops; 1479 disk->fops = &nbd_fops;
1484 disk->private_data = nbd; 1480 disk->private_data = nbd;
1485 sprintf(disk->disk_name, "nbd%d", index); 1481 sprintf(disk->disk_name, "nbd%d", index);
1486 nbd_reset(nbd);
1487 add_disk(disk); 1482 add_disk(disk);
1488 nbd_total_devices++; 1483 nbd_total_devices++;
1489 return index; 1484 return index;