aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-29 14:21:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-29 14:21:49 -0400
commit522a15db959f934ac096673e0c4600db0af5b337 (patch)
treeb26d918fcccfd9523d7577c23dc7e14782f79534
parent9e36c633951acb33e250e75b209b04217ce7b86c (diff)
parent7b5af5cffce569298b1d03af1ddf1dc43570ab42 (diff)
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block layer fixes from Jens Axboe: "A smaller collection of fixes that have come up since the initial merge window pull request. This contains: - error handling cleanup and support for larger than 16 byte cdbs in sg_io() from Christoph. The latter just matches what bsg and friends support, sg_io() got left out in the merge. - an option for brd to expose partitions in /proc/partitions. They are hidden by default for compat reasons. From Dmitry Monakhov. - a few blk-mq fixes from me - killing a dead/unused flag, fix for merging happening even if turned off, and correction of a few comments. - removal of unnecessary ->owner setting in systemace. From Michal Simek. - two related fixes for a problem with nesting freezing of queues in blk-mq. One from Ming Lei removing an unecessary freeze operation, and another from Tejun fixing the nesting regression introduced in the merge window. - fix for a BUG_ON() at bio_endio time when protection info is attached and the IO has an error. From Sagi Grimberg. - two scsi_ioctl bug fixes for regressions with scsi-mq from Tony Battersby. - a cfq weight update fix and subsequent comment update from Toshiaki Makita" * 'for-linus' of git://git.kernel.dk/linux-block: cfq-iosched: Add comments on update timing of weight cfq-iosched: Fix wrong children_weight calculation block: fix error handling in sg_io fix regression in SCSI_IOCTL_SEND_COMMAND scsi-mq: fix requests that use a separate CDB buffer block: support > 16 byte CDBs for SG_IO block: cleanup error handling in sg_io brd: add ram disk visibility option block: systemace: Remove .owner field for driver blk-mq: blk_mq_freeze_queue() should allow nesting blk-mq: correct a few wrong/bad comments block: Fix BUG_ON when pi errors occur blk-mq: don't allow merges if turned off for the queue blk-mq: get rid of unused BLK_MQ_F_SHOULD_SORT flag blk-mq: fix WARNING "percpu_ref_kill() called more than once!"
-rw-r--r--block/bio-integrity.c2
-rw-r--r--block/blk-core.c1
-rw-r--r--block/blk-mq.c36
-rw-r--r--block/cfq-iosched.c19
-rw-r--r--block/scsi_ioctl.c40
-rw-r--r--drivers/block/brd.c6
-rw-r--r--drivers/block/xsysace.c1
-rw-r--r--drivers/scsi/scsi_lib.c1
-rw-r--r--include/linux/blk-mq.h7
9 files changed, 74 insertions, 39 deletions
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index bc423f7b02da..f14b4abbebd8 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -520,7 +520,7 @@ void bio_integrity_endio(struct bio *bio, int error)
520 */ 520 */
521 if (error) { 521 if (error) {
522 bio->bi_end_io = bip->bip_end_io; 522 bio->bi_end_io = bip->bip_end_io;
523 bio_endio(bio, error); 523 bio_endio_nodec(bio, error);
524 524
525 return; 525 return;
526 } 526 }
diff --git a/block/blk-core.c b/block/blk-core.c
index c359d72e9d76..bf930f481d43 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1252,7 +1252,6 @@ void blk_rq_set_block_pc(struct request *rq)
1252 rq->__sector = (sector_t) -1; 1252 rq->__sector = (sector_t) -1;
1253 rq->bio = rq->biotail = NULL; 1253 rq->bio = rq->biotail = NULL;
1254 memset(rq->__cmd, 0, sizeof(rq->__cmd)); 1254 memset(rq->__cmd, 0, sizeof(rq->__cmd));
1255 rq->cmd = rq->__cmd;
1256} 1255}
1257EXPORT_SYMBOL(blk_rq_set_block_pc); 1256EXPORT_SYMBOL(blk_rq_set_block_pc);
1258 1257
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 5189cb1e478a..4aac82615a46 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -112,18 +112,22 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
112 */ 112 */
113void blk_mq_freeze_queue(struct request_queue *q) 113void blk_mq_freeze_queue(struct request_queue *q)
114{ 114{
115 bool freeze;
116
115 spin_lock_irq(q->queue_lock); 117 spin_lock_irq(q->queue_lock);
116 q->mq_freeze_depth++; 118 freeze = !q->mq_freeze_depth++;
117 spin_unlock_irq(q->queue_lock); 119 spin_unlock_irq(q->queue_lock);
118 120
119 percpu_ref_kill(&q->mq_usage_counter); 121 if (freeze) {
120 blk_mq_run_queues(q, false); 122 percpu_ref_kill(&q->mq_usage_counter);
123 blk_mq_run_queues(q, false);
124 }
121 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter)); 125 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
122} 126}
123 127
124static void blk_mq_unfreeze_queue(struct request_queue *q) 128static void blk_mq_unfreeze_queue(struct request_queue *q)
125{ 129{
126 bool wake = false; 130 bool wake;
127 131
128 spin_lock_irq(q->queue_lock); 132 spin_lock_irq(q->queue_lock);
129 wake = !--q->mq_freeze_depth; 133 wake = !--q->mq_freeze_depth;
@@ -172,6 +176,8 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
172 /* tag was already set */ 176 /* tag was already set */
173 rq->errors = 0; 177 rq->errors = 0;
174 178
179 rq->cmd = rq->__cmd;
180
175 rq->extra_len = 0; 181 rq->extra_len = 0;
176 rq->sense_len = 0; 182 rq->sense_len = 0;
177 rq->resid_len = 0; 183 rq->resid_len = 0;
@@ -1068,13 +1074,17 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1068 blk_account_io_start(rq, 1); 1074 blk_account_io_start(rq, 1);
1069} 1075}
1070 1076
1077static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1078{
1079 return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1080 !blk_queue_nomerges(hctx->queue);
1081}
1082
1071static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx, 1083static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1072 struct blk_mq_ctx *ctx, 1084 struct blk_mq_ctx *ctx,
1073 struct request *rq, struct bio *bio) 1085 struct request *rq, struct bio *bio)
1074{ 1086{
1075 struct request_queue *q = hctx->queue; 1087 if (!hctx_allow_merges(hctx)) {
1076
1077 if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE)) {
1078 blk_mq_bio_to_request(rq, bio); 1088 blk_mq_bio_to_request(rq, bio);
1079 spin_lock(&ctx->lock); 1089 spin_lock(&ctx->lock);
1080insert_rq: 1090insert_rq:
@@ -1082,6 +1092,8 @@ insert_rq:
1082 spin_unlock(&ctx->lock); 1092 spin_unlock(&ctx->lock);
1083 return false; 1093 return false;
1084 } else { 1094 } else {
1095 struct request_queue *q = hctx->queue;
1096
1085 spin_lock(&ctx->lock); 1097 spin_lock(&ctx->lock);
1086 if (!blk_mq_attempt_merge(q, ctx, bio)) { 1098 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1087 blk_mq_bio_to_request(rq, bio); 1099 blk_mq_bio_to_request(rq, bio);
@@ -1574,7 +1586,7 @@ static int blk_mq_init_hw_queues(struct request_queue *q,
1574 hctx->tags = set->tags[i]; 1586 hctx->tags = set->tags[i];
1575 1587
1576 /* 1588 /*
1577 * Allocate space for all possible cpus to avoid allocation in 1589 * Allocate space for all possible cpus to avoid allocation at
1578 * runtime 1590 * runtime
1579 */ 1591 */
1580 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *), 1592 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
@@ -1662,8 +1674,8 @@ static void blk_mq_map_swqueue(struct request_queue *q)
1662 1674
1663 queue_for_each_hw_ctx(q, hctx, i) { 1675 queue_for_each_hw_ctx(q, hctx, i) {
1664 /* 1676 /*
1665 * If not software queues are mapped to this hardware queue, 1677 * If no software queues are mapped to this hardware queue,
1666 * disable it and free the request entries 1678 * disable it and free the request entries.
1667 */ 1679 */
1668 if (!hctx->nr_ctx) { 1680 if (!hctx->nr_ctx) {
1669 struct blk_mq_tag_set *set = q->tag_set; 1681 struct blk_mq_tag_set *set = q->tag_set;
@@ -1713,14 +1725,10 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
1713{ 1725{
1714 struct blk_mq_tag_set *set = q->tag_set; 1726 struct blk_mq_tag_set *set = q->tag_set;
1715 1727
1716 blk_mq_freeze_queue(q);
1717
1718 mutex_lock(&set->tag_list_lock); 1728 mutex_lock(&set->tag_list_lock);
1719 list_del_init(&q->tag_set_list); 1729 list_del_init(&q->tag_set_list);
1720 blk_mq_update_tag_set_depth(set); 1730 blk_mq_update_tag_set_depth(set);
1721 mutex_unlock(&set->tag_list_lock); 1731 mutex_unlock(&set->tag_list_lock);
1722
1723 blk_mq_unfreeze_queue(q);
1724} 1732}
1725 1733
1726static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, 1734static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index cadc37841744..3f31cf9508e6 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1272,15 +1272,22 @@ __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1272 rb_insert_color(&cfqg->rb_node, &st->rb); 1272 rb_insert_color(&cfqg->rb_node, &st->rb);
1273} 1273}
1274 1274
1275/*
1276 * This has to be called only on activation of cfqg
1277 */
1275static void 1278static void
1276cfq_update_group_weight(struct cfq_group *cfqg) 1279cfq_update_group_weight(struct cfq_group *cfqg)
1277{ 1280{
1278 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1279
1280 if (cfqg->new_weight) { 1281 if (cfqg->new_weight) {
1281 cfqg->weight = cfqg->new_weight; 1282 cfqg->weight = cfqg->new_weight;
1282 cfqg->new_weight = 0; 1283 cfqg->new_weight = 0;
1283 } 1284 }
1285}
1286
1287static void
1288cfq_update_group_leaf_weight(struct cfq_group *cfqg)
1289{
1290 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1284 1291
1285 if (cfqg->new_leaf_weight) { 1292 if (cfqg->new_leaf_weight) {
1286 cfqg->leaf_weight = cfqg->new_leaf_weight; 1293 cfqg->leaf_weight = cfqg->new_leaf_weight;
@@ -1299,7 +1306,12 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1299 /* add to the service tree */ 1306 /* add to the service tree */
1300 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); 1307 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1301 1308
1302 cfq_update_group_weight(cfqg); 1309 /*
1310 * Update leaf_weight. We cannot update weight at this point
1311 * because cfqg might already have been activated and is
1312 * contributing its current weight to the parent's child_weight.
1313 */
1314 cfq_update_group_leaf_weight(cfqg);
1303 __cfq_group_service_tree_add(st, cfqg); 1315 __cfq_group_service_tree_add(st, cfqg);
1304 1316
1305 /* 1317 /*
@@ -1323,6 +1335,7 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1323 */ 1335 */
1324 while ((parent = cfqg_parent(pos))) { 1336 while ((parent = cfqg_parent(pos))) {
1325 if (propagate) { 1337 if (propagate) {
1338 cfq_update_group_weight(pos);
1326 propagate = !parent->nr_active++; 1339 propagate = !parent->nr_active++;
1327 parent->children_weight += pos->weight; 1340 parent->children_weight += pos->weight;
1328 } 1341 }
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 51bf5155ee75..9b8eaeca6a79 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -279,7 +279,6 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
279 r = blk_rq_unmap_user(bio); 279 r = blk_rq_unmap_user(bio);
280 if (!ret) 280 if (!ret)
281 ret = r; 281 ret = r;
282 blk_put_request(rq);
283 282
284 return ret; 283 return ret;
285} 284}
@@ -297,8 +296,6 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
297 296
298 if (hdr->interface_id != 'S') 297 if (hdr->interface_id != 'S')
299 return -EINVAL; 298 return -EINVAL;
300 if (hdr->cmd_len > BLK_MAX_CDB)
301 return -EINVAL;
302 299
303 if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9)) 300 if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
304 return -EIO; 301 return -EIO;
@@ -317,16 +314,23 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
317 if (hdr->flags & SG_FLAG_Q_AT_HEAD) 314 if (hdr->flags & SG_FLAG_Q_AT_HEAD)
318 at_head = 1; 315 at_head = 1;
319 316
317 ret = -ENOMEM;
320 rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL); 318 rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
321 if (!rq) 319 if (!rq)
322 return -ENOMEM; 320 goto out;
323 blk_rq_set_block_pc(rq); 321 blk_rq_set_block_pc(rq);
324 322
325 if (blk_fill_sghdr_rq(q, rq, hdr, mode)) { 323 if (hdr->cmd_len > BLK_MAX_CDB) {
326 blk_put_request(rq); 324 rq->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL);
327 return -EFAULT; 325 if (!rq->cmd)
326 goto out_put_request;
328 } 327 }
329 328
329 ret = -EFAULT;
330 if (blk_fill_sghdr_rq(q, rq, hdr, mode))
331 goto out_free_cdb;
332
333 ret = 0;
330 if (hdr->iovec_count) { 334 if (hdr->iovec_count) {
331 size_t iov_data_len; 335 size_t iov_data_len;
332 struct iovec *iov = NULL; 336 struct iovec *iov = NULL;
@@ -335,7 +339,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
335 0, NULL, &iov); 339 0, NULL, &iov);
336 if (ret < 0) { 340 if (ret < 0) {
337 kfree(iov); 341 kfree(iov);
338 goto out; 342 goto out_free_cdb;
339 } 343 }
340 344
341 iov_data_len = ret; 345 iov_data_len = ret;
@@ -358,7 +362,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
358 GFP_KERNEL); 362 GFP_KERNEL);
359 363
360 if (ret) 364 if (ret)
361 goto out; 365 goto out_free_cdb;
362 366
363 bio = rq->bio; 367 bio = rq->bio;
364 memset(sense, 0, sizeof(sense)); 368 memset(sense, 0, sizeof(sense));
@@ -376,9 +380,14 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
376 380
377 hdr->duration = jiffies_to_msecs(jiffies - start_time); 381 hdr->duration = jiffies_to_msecs(jiffies - start_time);
378 382
379 return blk_complete_sghdr_rq(rq, hdr, bio); 383 ret = blk_complete_sghdr_rq(rq, hdr, bio);
380out: 384
385out_free_cdb:
386 if (rq->cmd != rq->__cmd)
387 kfree(rq->cmd);
388out_put_request:
381 blk_put_request(rq); 389 blk_put_request(rq);
390out:
382 return ret; 391 return ret;
383} 392}
384 393
@@ -448,6 +457,11 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
448 } 457 }
449 458
450 rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT); 459 rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
460 if (!rq) {
461 err = -ENOMEM;
462 goto error;
463 }
464 blk_rq_set_block_pc(rq);
451 465
452 cmdlen = COMMAND_SIZE(opcode); 466 cmdlen = COMMAND_SIZE(opcode);
453 467
@@ -501,7 +515,6 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
501 memset(sense, 0, sizeof(sense)); 515 memset(sense, 0, sizeof(sense));
502 rq->sense = sense; 516 rq->sense = sense;
503 rq->sense_len = 0; 517 rq->sense_len = 0;
504 blk_rq_set_block_pc(rq);
505 518
506 blk_execute_rq(q, disk, rq, 0); 519 blk_execute_rq(q, disk, rq, 0);
507 520
@@ -521,7 +534,8 @@ out:
521 534
522error: 535error:
523 kfree(buffer); 536 kfree(buffer);
524 blk_put_request(rq); 537 if (rq)
538 blk_put_request(rq);
525 return err; 539 return err;
526} 540}
527EXPORT_SYMBOL_GPL(sg_scsi_ioctl); 541EXPORT_SYMBOL_GPL(sg_scsi_ioctl);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index c7d138eca731..3598110d2cef 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -442,12 +442,15 @@ static int rd_nr;
442int rd_size = CONFIG_BLK_DEV_RAM_SIZE; 442int rd_size = CONFIG_BLK_DEV_RAM_SIZE;
443static int max_part; 443static int max_part;
444static int part_shift; 444static int part_shift;
445static int part_show = 0;
445module_param(rd_nr, int, S_IRUGO); 446module_param(rd_nr, int, S_IRUGO);
446MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices"); 447MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
447module_param(rd_size, int, S_IRUGO); 448module_param(rd_size, int, S_IRUGO);
448MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes."); 449MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
449module_param(max_part, int, S_IRUGO); 450module_param(max_part, int, S_IRUGO);
450MODULE_PARM_DESC(max_part, "Maximum number of partitions per RAM disk"); 451MODULE_PARM_DESC(max_part, "Maximum number of partitions per RAM disk");
452module_param(part_show, int, S_IRUGO);
453MODULE_PARM_DESC(part_show, "Control RAM disk visibility in /proc/partitions");
451MODULE_LICENSE("GPL"); 454MODULE_LICENSE("GPL");
452MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR); 455MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
453MODULE_ALIAS("rd"); 456MODULE_ALIAS("rd");
@@ -501,7 +504,8 @@ static struct brd_device *brd_alloc(int i)
501 disk->fops = &brd_fops; 504 disk->fops = &brd_fops;
502 disk->private_data = brd; 505 disk->private_data = brd;
503 disk->queue = brd->brd_queue; 506 disk->queue = brd->brd_queue;
504 disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO; 507 if (!part_show)
508 disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
505 sprintf(disk->disk_name, "ram%d", i); 509 sprintf(disk->disk_name, "ram%d", i);
506 set_capacity(disk, rd_size * 2); 510 set_capacity(disk, rd_size * 2);
507 511
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index ab3ea62e5dfc..c4328d9d9981 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -1203,7 +1203,6 @@ static struct platform_driver ace_platform_driver = {
1203 .probe = ace_probe, 1203 .probe = ace_probe,
1204 .remove = ace_remove, 1204 .remove = ace_remove,
1205 .driver = { 1205 .driver = {
1206 .owner = THIS_MODULE,
1207 .name = "xsysace", 1206 .name = "xsysace",
1208 .of_match_table = ace_of_match, 1207 .of_match_table = ace_of_match,
1209 }, 1208 },
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ce62e8798cc8..d837dc180522 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1808,7 +1808,6 @@ static int scsi_mq_prep_fn(struct request *req)
1808 1808
1809 cmd->tag = req->tag; 1809 cmd->tag = req->tag;
1810 1810
1811 req->cmd = req->__cmd;
1812 cmd->cmnd = req->cmd; 1811 cmd->cmnd = req->cmd;
1813 cmd->prot_op = SCSI_PROT_NORMAL; 1812 cmd->prot_op = SCSI_PROT_NORMAL;
1814 1813
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index eb726b9c5762..a1e31f274fcd 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -127,10 +127,9 @@ enum {
127 BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */ 127 BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */
128 128
129 BLK_MQ_F_SHOULD_MERGE = 1 << 0, 129 BLK_MQ_F_SHOULD_MERGE = 1 << 0,
130 BLK_MQ_F_SHOULD_SORT = 1 << 1, 130 BLK_MQ_F_TAG_SHARED = 1 << 1,
131 BLK_MQ_F_TAG_SHARED = 1 << 2, 131 BLK_MQ_F_SG_MERGE = 1 << 2,
132 BLK_MQ_F_SG_MERGE = 1 << 3, 132 BLK_MQ_F_SYSFS_UP = 1 << 3,
133 BLK_MQ_F_SYSFS_UP = 1 << 4,
134 133
135 BLK_MQ_S_STOPPED = 0, 134 BLK_MQ_S_STOPPED = 0,
136 BLK_MQ_S_TAG_ACTIVE = 1, 135 BLK_MQ_S_TAG_ACTIVE = 1,