summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2019-08-27 07:01:48 -0400
committerJens Axboe <axboe@kernel.dk>2019-08-27 12:40:20 -0400
commitcecf5d87ff2035127bb5a9ee054d0023a4a7cad3 (patch)
treee3376c4d945685f9507862908402f6d4f81ed343
parent58c898ba370e68d39470cd0d932b524682c1f9be (diff)
block: split .sysfs_lock into two locks
The kernfs built-in lock of 'kn->count' is held in sysfs .show/.store path. Meantime, inside block's .show/.store callback, q->sysfs_lock is required. However, when mq & iosched kobjects are removed via blk_mq_unregister_dev() & elv_unregister_queue(), q->sysfs_lock is held too. This way causes AB-BA lock because the kernfs built-in lock of 'kn-count' is required inside kobject_del() too, see the lockdep warning[1]. On the other hand, it isn't necessary to acquire q->sysfs_lock for both blk_mq_unregister_dev() & elv_unregister_queue() because clearing REGISTERED flag prevents storing to 'queue/scheduler' from being happened. Also sysfs write(store) is exclusive, so no necessary to hold the lock for elv_unregister_queue() when it is called in switching elevator path. So split .sysfs_lock into two: one is still named as .sysfs_lock for covering sync .store, the other one is named as .sysfs_dir_lock for covering kobjects and related status change. sysfs itself can handle the race between add/remove kobjects and showing/storing attributes under kobjects. For switching scheduler via storing to 'queue/scheduler', we use the queue flag of QUEUE_FLAG_REGISTERED with .sysfs_lock for avoiding the race, then we can avoid to hold .sysfs_lock during removing/adding kobjects. [1] lockdep warning ====================================================== WARNING: possible circular locking dependency detected 5.3.0-rc3-00044-g73277fc75ea0 #1380 Not tainted ------------------------------------------------------ rmmod/777 is trying to acquire lock: 00000000ac50e981 (kn->count#202){++++}, at: kernfs_remove_by_name_ns+0x59/0x72 but task is already holding lock: 00000000fb16ae21 (&q->sysfs_lock){+.+.}, at: blk_unregister_queue+0x78/0x10b which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (&q->sysfs_lock){+.+.}: __lock_acquire+0x95f/0xa2f lock_acquire+0x1b4/0x1e8 __mutex_lock+0x14a/0xa9b blk_mq_hw_sysfs_show+0x63/0xb6 sysfs_kf_seq_show+0x11f/0x196 seq_read+0x2cd/0x5f2 vfs_read+0xc7/0x18c ksys_read+0xc4/0x13e do_syscall_64+0xa7/0x295 entry_SYSCALL_64_after_hwframe+0x49/0xbe -> #0 (kn->count#202){++++}: check_prev_add+0x5d2/0xc45 validate_chain+0xed3/0xf94 __lock_acquire+0x95f/0xa2f lock_acquire+0x1b4/0x1e8 __kernfs_remove+0x237/0x40b kernfs_remove_by_name_ns+0x59/0x72 remove_files+0x61/0x96 sysfs_remove_group+0x81/0xa4 sysfs_remove_groups+0x3b/0x44 kobject_del+0x44/0x94 blk_mq_unregister_dev+0x83/0xdd blk_unregister_queue+0xa0/0x10b del_gendisk+0x259/0x3fa null_del_dev+0x8b/0x1c3 [null_blk] null_exit+0x5c/0x95 [null_blk] __se_sys_delete_module+0x204/0x337 do_syscall_64+0xa7/0x295 entry_SYSCALL_64_after_hwframe+0x49/0xbe other info that might help us debug this: Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&q->sysfs_lock); lock(kn->count#202); lock(&q->sysfs_lock); lock(kn->count#202); *** DEADLOCK *** 2 locks held by rmmod/777: #0: 00000000e69bd9de (&lock){+.+.}, at: null_exit+0x2e/0x95 [null_blk] #1: 00000000fb16ae21 (&q->sysfs_lock){+.+.}, at: blk_unregister_queue+0x78/0x10b stack backtrace: CPU: 0 PID: 777 Comm: rmmod Not tainted 5.3.0-rc3-00044-g73277fc75ea0 #1380 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS ?-20180724_192412-buildhw-07.phx4 Call Trace: dump_stack+0x9a/0xe6 check_noncircular+0x207/0x251 ? print_circular_bug+0x32a/0x32a ? find_usage_backwards+0x84/0xb0 check_prev_add+0x5d2/0xc45 validate_chain+0xed3/0xf94 ? check_prev_add+0xc45/0xc45 ? mark_lock+0x11b/0x804 ? check_usage_forwards+0x1ca/0x1ca __lock_acquire+0x95f/0xa2f lock_acquire+0x1b4/0x1e8 ? kernfs_remove_by_name_ns+0x59/0x72 __kernfs_remove+0x237/0x40b ? kernfs_remove_by_name_ns+0x59/0x72 ? kernfs_next_descendant_post+0x7d/0x7d ? strlen+0x10/0x23 ? strcmp+0x22/0x44 kernfs_remove_by_name_ns+0x59/0x72 remove_files+0x61/0x96 sysfs_remove_group+0x81/0xa4 sysfs_remove_groups+0x3b/0x44 kobject_del+0x44/0x94 blk_mq_unregister_dev+0x83/0xdd blk_unregister_queue+0xa0/0x10b del_gendisk+0x259/0x3fa ? disk_events_poll_msecs_store+0x12b/0x12b ? check_flags+0x1ea/0x204 ? mark_held_locks+0x1f/0x7a null_del_dev+0x8b/0x1c3 [null_blk] null_exit+0x5c/0x95 [null_blk] __se_sys_delete_module+0x204/0x337 ? free_module+0x39f/0x39f ? blkcg_maybe_throttle_current+0x8a/0x718 ? rwlock_bug+0x62/0x62 ? __blkcg_punt_bio_submit+0xd0/0xd0 ? trace_hardirqs_on_thunk+0x1a/0x20 ? mark_held_locks+0x1f/0x7a ? do_syscall_64+0x4c/0x295 do_syscall_64+0xa7/0x295 entry_SYSCALL_64_after_hwframe+0x49/0xbe RIP: 0033:0x7fb696cdbe6b Code: 73 01 c3 48 8b 0d 1d 20 0c 00 f7 d8 64 89 01 48 83 c8 ff c3 66 2e 0f 1f 84 00 00 008 RSP: 002b:00007ffec9588788 EFLAGS: 00000206 ORIG_RAX: 00000000000000b0 RAX: ffffffffffffffda RBX: 0000559e589137c0 RCX: 00007fb696cdbe6b RDX: 000000000000000a RSI: 0000000000000800 RDI: 0000559e58913828 RBP: 0000000000000000 R08: 00007ffec9587701 R09: 0000000000000000 R10: 00007fb696d4eae0 R11: 0000000000000206 R12: 00007ffec95889b0 R13: 00007ffec95896b3 R14: 0000559e58913260 R15: 0000559e589137c0 Cc: Christoph Hellwig <hch@infradead.org> Cc: Hannes Reinecke <hare@suse.com> Cc: Greg KH <gregkh@linuxfoundation.org> Cc: Mike Snitzer <snitzer@redhat.com> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-core.c1
-rw-r--r--block/blk-mq-sysfs.c12
-rw-r--r--block/blk-sysfs.c46
-rw-r--r--block/blk.h2
-rw-r--r--block/elevator.c55
-rw-r--r--include/linux/blkdev.h1
6 files changed, 84 insertions, 33 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 5d0d7441a443..77807a5d7f9e 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -520,6 +520,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
520 mutex_init(&q->blk_trace_mutex); 520 mutex_init(&q->blk_trace_mutex);
521#endif 521#endif
522 mutex_init(&q->sysfs_lock); 522 mutex_init(&q->sysfs_lock);
523 mutex_init(&q->sysfs_dir_lock);
523 spin_lock_init(&q->queue_lock); 524 spin_lock_init(&q->queue_lock);
524 525
525 init_waitqueue_head(&q->mq_freeze_wq); 526 init_waitqueue_head(&q->mq_freeze_wq);
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 6ddde3774ebe..a0d3ce30fa08 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -270,7 +270,7 @@ void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
270 struct blk_mq_hw_ctx *hctx; 270 struct blk_mq_hw_ctx *hctx;
271 int i; 271 int i;
272 272
273 lockdep_assert_held(&q->sysfs_lock); 273 lockdep_assert_held(&q->sysfs_dir_lock);
274 274
275 queue_for_each_hw_ctx(q, hctx, i) 275 queue_for_each_hw_ctx(q, hctx, i)
276 blk_mq_unregister_hctx(hctx); 276 blk_mq_unregister_hctx(hctx);
@@ -320,7 +320,7 @@ int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
320 int ret, i; 320 int ret, i;
321 321
322 WARN_ON_ONCE(!q->kobj.parent); 322 WARN_ON_ONCE(!q->kobj.parent);
323 lockdep_assert_held(&q->sysfs_lock); 323 lockdep_assert_held(&q->sysfs_dir_lock);
324 324
325 ret = kobject_add(q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); 325 ret = kobject_add(q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
326 if (ret < 0) 326 if (ret < 0)
@@ -354,7 +354,7 @@ void blk_mq_sysfs_unregister(struct request_queue *q)
354 struct blk_mq_hw_ctx *hctx; 354 struct blk_mq_hw_ctx *hctx;
355 int i; 355 int i;
356 356
357 mutex_lock(&q->sysfs_lock); 357 mutex_lock(&q->sysfs_dir_lock);
358 if (!q->mq_sysfs_init_done) 358 if (!q->mq_sysfs_init_done)
359 goto unlock; 359 goto unlock;
360 360
@@ -362,7 +362,7 @@ void blk_mq_sysfs_unregister(struct request_queue *q)
362 blk_mq_unregister_hctx(hctx); 362 blk_mq_unregister_hctx(hctx);
363 363
364unlock: 364unlock:
365 mutex_unlock(&q->sysfs_lock); 365 mutex_unlock(&q->sysfs_dir_lock);
366} 366}
367 367
368int blk_mq_sysfs_register(struct request_queue *q) 368int blk_mq_sysfs_register(struct request_queue *q)
@@ -370,7 +370,7 @@ int blk_mq_sysfs_register(struct request_queue *q)
370 struct blk_mq_hw_ctx *hctx; 370 struct blk_mq_hw_ctx *hctx;
371 int i, ret = 0; 371 int i, ret = 0;
372 372
373 mutex_lock(&q->sysfs_lock); 373 mutex_lock(&q->sysfs_dir_lock);
374 if (!q->mq_sysfs_init_done) 374 if (!q->mq_sysfs_init_done)
375 goto unlock; 375 goto unlock;
376 376
@@ -381,7 +381,7 @@ int blk_mq_sysfs_register(struct request_queue *q)
381 } 381 }
382 382
383unlock: 383unlock:
384 mutex_unlock(&q->sysfs_lock); 384 mutex_unlock(&q->sysfs_dir_lock);
385 385
386 return ret; 386 return ret;
387} 387}
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 5b0b5224cfd4..107513495220 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -938,6 +938,7 @@ int blk_register_queue(struct gendisk *disk)
938 int ret; 938 int ret;
939 struct device *dev = disk_to_dev(disk); 939 struct device *dev = disk_to_dev(disk);
940 struct request_queue *q = disk->queue; 940 struct request_queue *q = disk->queue;
941 bool has_elevator = false;
941 942
942 if (WARN_ON(!q)) 943 if (WARN_ON(!q))
943 return -ENXIO; 944 return -ENXIO;
@@ -945,7 +946,6 @@ int blk_register_queue(struct gendisk *disk)
945 WARN_ONCE(blk_queue_registered(q), 946 WARN_ONCE(blk_queue_registered(q),
946 "%s is registering an already registered queue\n", 947 "%s is registering an already registered queue\n",
947 kobject_name(&dev->kobj)); 948 kobject_name(&dev->kobj));
948 blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
949 949
950 /* 950 /*
951 * SCSI probing may synchronously create and destroy a lot of 951 * SCSI probing may synchronously create and destroy a lot of
@@ -965,8 +965,7 @@ int blk_register_queue(struct gendisk *disk)
965 if (ret) 965 if (ret)
966 return ret; 966 return ret;
967 967
968 /* Prevent changes through sysfs until registration is completed. */ 968 mutex_lock(&q->sysfs_dir_lock);
969 mutex_lock(&q->sysfs_lock);
970 969
971 ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); 970 ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
972 if (ret < 0) { 971 if (ret < 0) {
@@ -987,26 +986,36 @@ int blk_register_queue(struct gendisk *disk)
987 blk_mq_debugfs_register(q); 986 blk_mq_debugfs_register(q);
988 } 987 }
989 988
990 kobject_uevent(&q->kobj, KOBJ_ADD); 989 /*
991 990 * The flag of QUEUE_FLAG_REGISTERED isn't set yet, so elevator
992 wbt_enable_default(q); 991 * switch won't happen at all.
993 992 */
994 blk_throtl_register_queue(q);
995
996 if (q->elevator) { 993 if (q->elevator) {
997 ret = elv_register_queue(q); 994 ret = elv_register_queue(q, false);
998 if (ret) { 995 if (ret) {
999 mutex_unlock(&q->sysfs_lock); 996 mutex_unlock(&q->sysfs_dir_lock);
1000 kobject_uevent(&q->kobj, KOBJ_REMOVE);
1001 kobject_del(&q->kobj); 997 kobject_del(&q->kobj);
1002 blk_trace_remove_sysfs(dev); 998 blk_trace_remove_sysfs(dev);
1003 kobject_put(&dev->kobj); 999 kobject_put(&dev->kobj);
1004 return ret; 1000 return ret;
1005 } 1001 }
1002 has_elevator = true;
1006 } 1003 }
1004
1005 mutex_lock(&q->sysfs_lock);
1006 blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
1007 wbt_enable_default(q);
1008 blk_throtl_register_queue(q);
1009
1010 /* Now everything is ready and send out KOBJ_ADD uevent */
1011 kobject_uevent(&q->kobj, KOBJ_ADD);
1012 if (has_elevator)
1013 kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
1014 mutex_unlock(&q->sysfs_lock);
1015
1007 ret = 0; 1016 ret = 0;
1008unlock: 1017unlock:
1009 mutex_unlock(&q->sysfs_lock); 1018 mutex_unlock(&q->sysfs_dir_lock);
1010 return ret; 1019 return ret;
1011} 1020}
1012EXPORT_SYMBOL_GPL(blk_register_queue); 1021EXPORT_SYMBOL_GPL(blk_register_queue);
@@ -1021,6 +1030,7 @@ EXPORT_SYMBOL_GPL(blk_register_queue);
1021void blk_unregister_queue(struct gendisk *disk) 1030void blk_unregister_queue(struct gendisk *disk)
1022{ 1031{
1023 struct request_queue *q = disk->queue; 1032 struct request_queue *q = disk->queue;
1033 bool has_elevator;
1024 1034
1025 if (WARN_ON(!q)) 1035 if (WARN_ON(!q))
1026 return; 1036 return;
@@ -1035,25 +1045,25 @@ void blk_unregister_queue(struct gendisk *disk)
1035 * concurrent elv_iosched_store() calls. 1045 * concurrent elv_iosched_store() calls.
1036 */ 1046 */
1037 mutex_lock(&q->sysfs_lock); 1047 mutex_lock(&q->sysfs_lock);
1038
1039 blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); 1048 blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
1049 has_elevator = !!q->elevator;
1050 mutex_unlock(&q->sysfs_lock);
1040 1051
1052 mutex_lock(&q->sysfs_dir_lock);
1041 /* 1053 /*
1042 * Remove the sysfs attributes before unregistering the queue data 1054 * Remove the sysfs attributes before unregistering the queue data
1043 * structures that can be modified through sysfs. 1055 * structures that can be modified through sysfs.
1044 */ 1056 */
1045 if (queue_is_mq(q)) 1057 if (queue_is_mq(q))
1046 blk_mq_unregister_dev(disk_to_dev(disk), q); 1058 blk_mq_unregister_dev(disk_to_dev(disk), q);
1047 mutex_unlock(&q->sysfs_lock);
1048 1059
1049 kobject_uevent(&q->kobj, KOBJ_REMOVE); 1060 kobject_uevent(&q->kobj, KOBJ_REMOVE);
1050 kobject_del(&q->kobj); 1061 kobject_del(&q->kobj);
1051 blk_trace_remove_sysfs(disk_to_dev(disk)); 1062 blk_trace_remove_sysfs(disk_to_dev(disk));
1052 1063
1053 mutex_lock(&q->sysfs_lock); 1064 if (has_elevator)
1054 if (q->elevator)
1055 elv_unregister_queue(q); 1065 elv_unregister_queue(q);
1056 mutex_unlock(&q->sysfs_lock); 1066 mutex_unlock(&q->sysfs_dir_lock);
1057 1067
1058 kobject_put(&disk_to_dev(disk)->kobj); 1068 kobject_put(&disk_to_dev(disk)->kobj);
1059} 1069}
diff --git a/block/blk.h b/block/blk.h
index de6b2e146d6e..e4619fc5c99a 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -188,7 +188,7 @@ int elevator_init_mq(struct request_queue *q);
188int elevator_switch_mq(struct request_queue *q, 188int elevator_switch_mq(struct request_queue *q,
189 struct elevator_type *new_e); 189 struct elevator_type *new_e);
190void __elevator_exit(struct request_queue *, struct elevator_queue *); 190void __elevator_exit(struct request_queue *, struct elevator_queue *);
191int elv_register_queue(struct request_queue *q); 191int elv_register_queue(struct request_queue *q, bool uevent);
192void elv_unregister_queue(struct request_queue *q); 192void elv_unregister_queue(struct request_queue *q);
193 193
194static inline void elevator_exit(struct request_queue *q, 194static inline void elevator_exit(struct request_queue *q,
diff --git a/block/elevator.c b/block/elevator.c
index 03d923196569..4781c4205a5d 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -470,13 +470,16 @@ static struct kobj_type elv_ktype = {
470 .release = elevator_release, 470 .release = elevator_release,
471}; 471};
472 472
473int elv_register_queue(struct request_queue *q) 473/*
474 * elv_register_queue is called from either blk_register_queue or
475 * elevator_switch, elevator switch is prevented from being happen
476 * in the two paths, so it is safe to not hold q->sysfs_lock.
477 */
478int elv_register_queue(struct request_queue *q, bool uevent)
474{ 479{
475 struct elevator_queue *e = q->elevator; 480 struct elevator_queue *e = q->elevator;
476 int error; 481 int error;
477 482
478 lockdep_assert_held(&q->sysfs_lock);
479
480 error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched"); 483 error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
481 if (!error) { 484 if (!error) {
482 struct elv_fs_entry *attr = e->type->elevator_attrs; 485 struct elv_fs_entry *attr = e->type->elevator_attrs;
@@ -487,24 +490,34 @@ int elv_register_queue(struct request_queue *q)
487 attr++; 490 attr++;
488 } 491 }
489 } 492 }
490 kobject_uevent(&e->kobj, KOBJ_ADD); 493 if (uevent)
494 kobject_uevent(&e->kobj, KOBJ_ADD);
495
496 mutex_lock(&q->sysfs_lock);
491 e->registered = 1; 497 e->registered = 1;
498 mutex_unlock(&q->sysfs_lock);
492 } 499 }
493 return error; 500 return error;
494} 501}
495 502
503/*
504 * elv_unregister_queue is called from either blk_unregister_queue or
505 * elevator_switch, elevator switch is prevented from being happen
506 * in the two paths, so it is safe to not hold q->sysfs_lock.
507 */
496void elv_unregister_queue(struct request_queue *q) 508void elv_unregister_queue(struct request_queue *q)
497{ 509{
498 lockdep_assert_held(&q->sysfs_lock);
499
500 if (q) { 510 if (q) {
501 struct elevator_queue *e = q->elevator; 511 struct elevator_queue *e = q->elevator;
502 512
503 kobject_uevent(&e->kobj, KOBJ_REMOVE); 513 kobject_uevent(&e->kobj, KOBJ_REMOVE);
504 kobject_del(&e->kobj); 514 kobject_del(&e->kobj);
515
516 mutex_lock(&q->sysfs_lock);
505 e->registered = 0; 517 e->registered = 0;
506 /* Re-enable throttling in case elevator disabled it */ 518 /* Re-enable throttling in case elevator disabled it */
507 wbt_enable_default(q); 519 wbt_enable_default(q);
520 mutex_unlock(&q->sysfs_lock);
508 } 521 }
509} 522}
510 523
@@ -567,10 +580,32 @@ int elevator_switch_mq(struct request_queue *q,
567 lockdep_assert_held(&q->sysfs_lock); 580 lockdep_assert_held(&q->sysfs_lock);
568 581
569 if (q->elevator) { 582 if (q->elevator) {
570 if (q->elevator->registered) 583 if (q->elevator->registered) {
584 mutex_unlock(&q->sysfs_lock);
585
586 /*
587 * Concurrent elevator switch can't happen becasue
588 * sysfs write is always exclusively on same file.
589 *
590 * Also the elevator queue won't be freed after
591 * sysfs_lock is released becasue kobject_del() in
592 * blk_unregister_queue() waits for completion of
593 * .store & .show on its attributes.
594 */
571 elv_unregister_queue(q); 595 elv_unregister_queue(q);
596
597 mutex_lock(&q->sysfs_lock);
598 }
572 ioc_clear_queue(q); 599 ioc_clear_queue(q);
573 elevator_exit(q, q->elevator); 600 elevator_exit(q, q->elevator);
601
602 /*
603 * sysfs_lock may be dropped, so re-check if queue is
604 * unregistered. If yes, don't switch to new elevator
605 * any more
606 */
607 if (!blk_queue_registered(q))
608 return 0;
574 } 609 }
575 610
576 ret = blk_mq_init_sched(q, new_e); 611 ret = blk_mq_init_sched(q, new_e);
@@ -578,7 +613,11 @@ int elevator_switch_mq(struct request_queue *q,
578 goto out; 613 goto out;
579 614
580 if (new_e) { 615 if (new_e) {
581 ret = elv_register_queue(q); 616 mutex_unlock(&q->sysfs_lock);
617
618 ret = elv_register_queue(q, true);
619
620 mutex_lock(&q->sysfs_lock);
582 if (ret) { 621 if (ret) {
583 elevator_exit(q, q->elevator); 622 elevator_exit(q, q->elevator);
584 goto out; 623 goto out;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index d5077f3fdfd6..1ac790178787 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -535,6 +535,7 @@ struct request_queue {
535 struct delayed_work requeue_work; 535 struct delayed_work requeue_work;
536 536
537 struct mutex sysfs_lock; 537 struct mutex sysfs_lock;
538 struct mutex sysfs_dir_lock;
538 539
539 /* 540 /*
540 * for reusing dead hctx instance in case of updating 541 * for reusing dead hctx instance in case of updating