diff options
author | Bob Liu <bob.liu@oracle.com> | 2015-11-13 22:12:13 -0500 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2016-01-04 12:21:01 -0500 |
commit | 11659569f7202d0cb6553e81f9b8aa04dfeb94ce (patch) | |
tree | 2fa7cecbf25a733fe2a2d0f15d29e6a0120d2726 /drivers/block/xen-blkfront.c | |
parent | 3df0e5059908b8fdba351c4b5dd77caadd95a949 (diff) |
xen/blkfront: split per device io_lock
After patch "xen/blkfront: separate per ring information out of device
info", per-ring data is protected by a per-device lock ('io_lock').
This is not a good way and will effect the scalability, so introduce a
per-ring lock ('ring_lock').
The old 'io_lock' is renamed to 'dev_lock' which protects the ->grants list and
->persistent_gnts_c which are shared by all rings.
Note that in 'blkfront_probe' the 'blkfront_info' is setup via kzalloc
so setting ->persistent_gnts_c to zero is not needed.
Signed-off-by: Bob Liu <bob.liu@oracle.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/block/xen-blkfront.c')
-rw-r--r-- | drivers/block/xen-blkfront.c | 73 |
1 files changed, 47 insertions, 26 deletions
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 0638b1722a40..a9058bbdaa6b 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -125,6 +125,8 @@ MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the | |||
125 | * depending on how many hardware queues/rings to be used. | 125 | * depending on how many hardware queues/rings to be used. |
126 | */ | 126 | */ |
127 | struct blkfront_ring_info { | 127 | struct blkfront_ring_info { |
128 | /* Lock to protect data in every ring buffer. */ | ||
129 | spinlock_t ring_lock; | ||
128 | struct blkif_front_ring ring; | 130 | struct blkif_front_ring ring; |
129 | unsigned int ring_ref[XENBUS_MAX_RING_GRANTS]; | 131 | unsigned int ring_ref[XENBUS_MAX_RING_GRANTS]; |
130 | unsigned int evtchn, irq; | 132 | unsigned int evtchn, irq; |
@@ -143,7 +145,6 @@ struct blkfront_ring_info { | |||
143 | */ | 145 | */ |
144 | struct blkfront_info | 146 | struct blkfront_info |
145 | { | 147 | { |
146 | spinlock_t io_lock; | ||
147 | struct mutex mutex; | 148 | struct mutex mutex; |
148 | struct xenbus_device *xbdev; | 149 | struct xenbus_device *xbdev; |
149 | struct gendisk *gd; | 150 | struct gendisk *gd; |
@@ -153,6 +154,11 @@ struct blkfront_info | |||
153 | /* Number of pages per ring buffer. */ | 154 | /* Number of pages per ring buffer. */ |
154 | unsigned int nr_ring_pages; | 155 | unsigned int nr_ring_pages; |
155 | struct request_queue *rq; | 156 | struct request_queue *rq; |
157 | /* | ||
158 | * Lock to protect info->grants list and persistent_gnts_c shared by all | ||
159 | * rings. | ||
160 | */ | ||
161 | spinlock_t dev_lock; | ||
156 | struct list_head grants; | 162 | struct list_head grants; |
157 | unsigned int persistent_gnts_c; | 163 | unsigned int persistent_gnts_c; |
158 | unsigned int feature_flush; | 164 | unsigned int feature_flush; |
@@ -258,7 +264,9 @@ static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num) | |||
258 | } | 264 | } |
259 | 265 | ||
260 | gnt_list_entry->gref = GRANT_INVALID_REF; | 266 | gnt_list_entry->gref = GRANT_INVALID_REF; |
267 | spin_lock_irq(&info->dev_lock); | ||
261 | list_add(&gnt_list_entry->node, &info->grants); | 268 | list_add(&gnt_list_entry->node, &info->grants); |
269 | spin_unlock_irq(&info->dev_lock); | ||
262 | i++; | 270 | i++; |
263 | } | 271 | } |
264 | 272 | ||
@@ -267,7 +275,9 @@ static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num) | |||
267 | out_of_memory: | 275 | out_of_memory: |
268 | list_for_each_entry_safe(gnt_list_entry, n, | 276 | list_for_each_entry_safe(gnt_list_entry, n, |
269 | &info->grants, node) { | 277 | &info->grants, node) { |
278 | spin_lock_irq(&info->dev_lock); | ||
270 | list_del(&gnt_list_entry->node); | 279 | list_del(&gnt_list_entry->node); |
280 | spin_unlock_irq(&info->dev_lock); | ||
271 | if (info->feature_persistent) | 281 | if (info->feature_persistent) |
272 | __free_page(gnt_list_entry->page); | 282 | __free_page(gnt_list_entry->page); |
273 | kfree(gnt_list_entry); | 283 | kfree(gnt_list_entry); |
@@ -280,7 +290,9 @@ out_of_memory: | |||
280 | static struct grant *get_free_grant(struct blkfront_info *info) | 290 | static struct grant *get_free_grant(struct blkfront_info *info) |
281 | { | 291 | { |
282 | struct grant *gnt_list_entry; | 292 | struct grant *gnt_list_entry; |
293 | unsigned long flags; | ||
283 | 294 | ||
295 | spin_lock_irqsave(&info->dev_lock, flags); | ||
284 | BUG_ON(list_empty(&info->grants)); | 296 | BUG_ON(list_empty(&info->grants)); |
285 | gnt_list_entry = list_first_entry(&info->grants, struct grant, | 297 | gnt_list_entry = list_first_entry(&info->grants, struct grant, |
286 | node); | 298 | node); |
@@ -288,6 +300,7 @@ static struct grant *get_free_grant(struct blkfront_info *info) | |||
288 | 300 | ||
289 | if (gnt_list_entry->gref != GRANT_INVALID_REF) | 301 | if (gnt_list_entry->gref != GRANT_INVALID_REF) |
290 | info->persistent_gnts_c--; | 302 | info->persistent_gnts_c--; |
303 | spin_unlock_irqrestore(&info->dev_lock, flags); | ||
291 | 304 | ||
292 | return gnt_list_entry; | 305 | return gnt_list_entry; |
293 | } | 306 | } |
@@ -757,11 +770,11 @@ static inline bool blkif_request_flush_invalid(struct request *req, | |||
757 | static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx, | 770 | static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx, |
758 | const struct blk_mq_queue_data *qd) | 771 | const struct blk_mq_queue_data *qd) |
759 | { | 772 | { |
773 | unsigned long flags; | ||
760 | struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)hctx->driver_data; | 774 | struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)hctx->driver_data; |
761 | struct blkfront_info *info = rinfo->dev_info; | ||
762 | 775 | ||
763 | blk_mq_start_request(qd->rq); | 776 | blk_mq_start_request(qd->rq); |
764 | spin_lock_irq(&info->io_lock); | 777 | spin_lock_irqsave(&rinfo->ring_lock, flags); |
765 | if (RING_FULL(&rinfo->ring)) | 778 | if (RING_FULL(&rinfo->ring)) |
766 | goto out_busy; | 779 | goto out_busy; |
767 | 780 | ||
@@ -772,15 +785,15 @@ static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
772 | goto out_busy; | 785 | goto out_busy; |
773 | 786 | ||
774 | flush_requests(rinfo); | 787 | flush_requests(rinfo); |
775 | spin_unlock_irq(&info->io_lock); | 788 | spin_unlock_irqrestore(&rinfo->ring_lock, flags); |
776 | return BLK_MQ_RQ_QUEUE_OK; | 789 | return BLK_MQ_RQ_QUEUE_OK; |
777 | 790 | ||
778 | out_err: | 791 | out_err: |
779 | spin_unlock_irq(&info->io_lock); | 792 | spin_unlock_irqrestore(&rinfo->ring_lock, flags); |
780 | return BLK_MQ_RQ_QUEUE_ERROR; | 793 | return BLK_MQ_RQ_QUEUE_ERROR; |
781 | 794 | ||
782 | out_busy: | 795 | out_busy: |
783 | spin_unlock_irq(&info->io_lock); | 796 | spin_unlock_irqrestore(&rinfo->ring_lock, flags); |
784 | blk_mq_stop_hw_queue(hctx); | 797 | blk_mq_stop_hw_queue(hctx); |
785 | return BLK_MQ_RQ_QUEUE_BUSY; | 798 | return BLK_MQ_RQ_QUEUE_BUSY; |
786 | } | 799 | } |
@@ -1082,21 +1095,28 @@ static void xlvbd_release_gendisk(struct blkfront_info *info) | |||
1082 | info->gd = NULL; | 1095 | info->gd = NULL; |
1083 | } | 1096 | } |
1084 | 1097 | ||
1085 | /* Must be called with io_lock holded */ | 1098 | /* Already hold rinfo->ring_lock. */ |
1086 | static void kick_pending_request_queues(struct blkfront_ring_info *rinfo) | 1099 | static inline void kick_pending_request_queues_locked(struct blkfront_ring_info *rinfo) |
1087 | { | 1100 | { |
1088 | if (!RING_FULL(&rinfo->ring)) | 1101 | if (!RING_FULL(&rinfo->ring)) |
1089 | blk_mq_start_stopped_hw_queues(rinfo->dev_info->rq, true); | 1102 | blk_mq_start_stopped_hw_queues(rinfo->dev_info->rq, true); |
1090 | } | 1103 | } |
1091 | 1104 | ||
1105 | static void kick_pending_request_queues(struct blkfront_ring_info *rinfo) | ||
1106 | { | ||
1107 | unsigned long flags; | ||
1108 | |||
1109 | spin_lock_irqsave(&rinfo->ring_lock, flags); | ||
1110 | kick_pending_request_queues_locked(rinfo); | ||
1111 | spin_unlock_irqrestore(&rinfo->ring_lock, flags); | ||
1112 | } | ||
1113 | |||
1092 | static void blkif_restart_queue(struct work_struct *work) | 1114 | static void blkif_restart_queue(struct work_struct *work) |
1093 | { | 1115 | { |
1094 | struct blkfront_ring_info *rinfo = container_of(work, struct blkfront_ring_info, work); | 1116 | struct blkfront_ring_info *rinfo = container_of(work, struct blkfront_ring_info, work); |
1095 | 1117 | ||
1096 | spin_lock_irq(&rinfo->dev_info->io_lock); | ||
1097 | if (rinfo->dev_info->connected == BLKIF_STATE_CONNECTED) | 1118 | if (rinfo->dev_info->connected == BLKIF_STATE_CONNECTED) |
1098 | kick_pending_request_queues(rinfo); | 1119 | kick_pending_request_queues(rinfo); |
1099 | spin_unlock_irq(&rinfo->dev_info->io_lock); | ||
1100 | } | 1120 | } |
1101 | 1121 | ||
1102 | static void blkif_free_ring(struct blkfront_ring_info *rinfo) | 1122 | static void blkif_free_ring(struct blkfront_ring_info *rinfo) |
@@ -1188,7 +1208,6 @@ static void blkif_free(struct blkfront_info *info, int suspend) | |||
1188 | unsigned int i; | 1208 | unsigned int i; |
1189 | 1209 | ||
1190 | /* Prevent new requests being issued until we fix things up. */ | 1210 | /* Prevent new requests being issued until we fix things up. */ |
1191 | spin_lock_irq(&info->io_lock); | ||
1192 | info->connected = suspend ? | 1211 | info->connected = suspend ? |
1193 | BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; | 1212 | BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; |
1194 | /* No more blkif_request(). */ | 1213 | /* No more blkif_request(). */ |
@@ -1196,6 +1215,7 @@ static void blkif_free(struct blkfront_info *info, int suspend) | |||
1196 | blk_mq_stop_hw_queues(info->rq); | 1215 | blk_mq_stop_hw_queues(info->rq); |
1197 | 1216 | ||
1198 | /* Remove all persistent grants */ | 1217 | /* Remove all persistent grants */ |
1218 | spin_lock_irq(&info->dev_lock); | ||
1199 | if (!list_empty(&info->grants)) { | 1219 | if (!list_empty(&info->grants)) { |
1200 | list_for_each_entry_safe(persistent_gnt, n, | 1220 | list_for_each_entry_safe(persistent_gnt, n, |
1201 | &info->grants, node) { | 1221 | &info->grants, node) { |
@@ -1211,6 +1231,7 @@ static void blkif_free(struct blkfront_info *info, int suspend) | |||
1211 | } | 1231 | } |
1212 | } | 1232 | } |
1213 | BUG_ON(info->persistent_gnts_c != 0); | 1233 | BUG_ON(info->persistent_gnts_c != 0); |
1234 | spin_unlock_irq(&info->dev_lock); | ||
1214 | 1235 | ||
1215 | for (i = 0; i < info->nr_rings; i++) | 1236 | for (i = 0; i < info->nr_rings; i++) |
1216 | blkif_free_ring(&info->rinfo[i]); | 1237 | blkif_free_ring(&info->rinfo[i]); |
@@ -1218,7 +1239,6 @@ static void blkif_free(struct blkfront_info *info, int suspend) | |||
1218 | kfree(info->rinfo); | 1239 | kfree(info->rinfo); |
1219 | info->rinfo = NULL; | 1240 | info->rinfo = NULL; |
1220 | info->nr_rings = 0; | 1241 | info->nr_rings = 0; |
1221 | spin_unlock_irq(&info->io_lock); | ||
1222 | } | 1242 | } |
1223 | 1243 | ||
1224 | struct copy_from_grant { | 1244 | struct copy_from_grant { |
@@ -1253,6 +1273,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_ring_info *ri | |||
1253 | int i = 0; | 1273 | int i = 0; |
1254 | struct scatterlist *sg; | 1274 | struct scatterlist *sg; |
1255 | int num_sg, num_grant; | 1275 | int num_sg, num_grant; |
1276 | unsigned long flags; | ||
1256 | struct blkfront_info *info = rinfo->dev_info; | 1277 | struct blkfront_info *info = rinfo->dev_info; |
1257 | struct copy_from_grant data = { | 1278 | struct copy_from_grant data = { |
1258 | .s = s, | 1279 | .s = s, |
@@ -1291,8 +1312,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_ring_info *ri | |||
1291 | if (!info->feature_persistent) | 1312 | if (!info->feature_persistent) |
1292 | pr_alert_ratelimited("backed has not unmapped grant: %u\n", | 1313 | pr_alert_ratelimited("backed has not unmapped grant: %u\n", |
1293 | s->grants_used[i]->gref); | 1314 | s->grants_used[i]->gref); |
1315 | spin_lock_irqsave(&info->dev_lock, flags); | ||
1294 | list_add(&s->grants_used[i]->node, &info->grants); | 1316 | list_add(&s->grants_used[i]->node, &info->grants); |
1295 | info->persistent_gnts_c++; | 1317 | info->persistent_gnts_c++; |
1318 | spin_unlock_irqrestore(&info->dev_lock, flags); | ||
1296 | } else { | 1319 | } else { |
1297 | /* | 1320 | /* |
1298 | * If the grant is not mapped by the backend we end the | 1321 | * If the grant is not mapped by the backend we end the |
@@ -1302,7 +1325,9 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_ring_info *ri | |||
1302 | */ | 1325 | */ |
1303 | gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL); | 1326 | gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL); |
1304 | s->grants_used[i]->gref = GRANT_INVALID_REF; | 1327 | s->grants_used[i]->gref = GRANT_INVALID_REF; |
1328 | spin_lock_irqsave(&info->dev_lock, flags); | ||
1305 | list_add_tail(&s->grants_used[i]->node, &info->grants); | 1329 | list_add_tail(&s->grants_used[i]->node, &info->grants); |
1330 | spin_unlock_irqrestore(&info->dev_lock, flags); | ||
1306 | } | 1331 | } |
1307 | } | 1332 | } |
1308 | if (s->req.operation == BLKIF_OP_INDIRECT) { | 1333 | if (s->req.operation == BLKIF_OP_INDIRECT) { |
@@ -1311,8 +1336,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_ring_info *ri | |||
1311 | if (!info->feature_persistent) | 1336 | if (!info->feature_persistent) |
1312 | pr_alert_ratelimited("backed has not unmapped grant: %u\n", | 1337 | pr_alert_ratelimited("backed has not unmapped grant: %u\n", |
1313 | s->indirect_grants[i]->gref); | 1338 | s->indirect_grants[i]->gref); |
1339 | spin_lock_irqsave(&info->dev_lock, flags); | ||
1314 | list_add(&s->indirect_grants[i]->node, &info->grants); | 1340 | list_add(&s->indirect_grants[i]->node, &info->grants); |
1315 | info->persistent_gnts_c++; | 1341 | info->persistent_gnts_c++; |
1342 | spin_unlock_irqrestore(&info->dev_lock, flags); | ||
1316 | } else { | 1343 | } else { |
1317 | struct page *indirect_page; | 1344 | struct page *indirect_page; |
1318 | 1345 | ||
@@ -1326,7 +1353,9 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_ring_info *ri | |||
1326 | list_add(&indirect_page->lru, &rinfo->indirect_pages); | 1353 | list_add(&indirect_page->lru, &rinfo->indirect_pages); |
1327 | } | 1354 | } |
1328 | s->indirect_grants[i]->gref = GRANT_INVALID_REF; | 1355 | s->indirect_grants[i]->gref = GRANT_INVALID_REF; |
1356 | spin_lock_irqsave(&info->dev_lock, flags); | ||
1329 | list_add_tail(&s->indirect_grants[i]->node, &info->grants); | 1357 | list_add_tail(&s->indirect_grants[i]->node, &info->grants); |
1358 | spin_unlock_irqrestore(&info->dev_lock, flags); | ||
1330 | } | 1359 | } |
1331 | } | 1360 | } |
1332 | } | 1361 | } |
@@ -1342,13 +1371,10 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) | |||
1342 | struct blkfront_info *info = rinfo->dev_info; | 1371 | struct blkfront_info *info = rinfo->dev_info; |
1343 | int error; | 1372 | int error; |
1344 | 1373 | ||
1345 | spin_lock_irqsave(&info->io_lock, flags); | 1374 | if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) |
1346 | |||
1347 | if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { | ||
1348 | spin_unlock_irqrestore(&info->io_lock, flags); | ||
1349 | return IRQ_HANDLED; | 1375 | return IRQ_HANDLED; |
1350 | } | ||
1351 | 1376 | ||
1377 | spin_lock_irqsave(&rinfo->ring_lock, flags); | ||
1352 | again: | 1378 | again: |
1353 | rp = rinfo->ring.sring->rsp_prod; | 1379 | rp = rinfo->ring.sring->rsp_prod; |
1354 | rmb(); /* Ensure we see queued responses up to 'rp'. */ | 1380 | rmb(); /* Ensure we see queued responses up to 'rp'. */ |
@@ -1439,9 +1465,9 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) | |||
1439 | } else | 1465 | } else |
1440 | rinfo->ring.sring->rsp_event = i + 1; | 1466 | rinfo->ring.sring->rsp_event = i + 1; |
1441 | 1467 | ||
1442 | kick_pending_request_queues(rinfo); | 1468 | kick_pending_request_queues_locked(rinfo); |
1443 | 1469 | ||
1444 | spin_unlock_irqrestore(&info->io_lock, flags); | 1470 | spin_unlock_irqrestore(&rinfo->ring_lock, flags); |
1445 | 1471 | ||
1446 | return IRQ_HANDLED; | 1472 | return IRQ_HANDLED; |
1447 | } | 1473 | } |
@@ -1690,14 +1716,14 @@ static int blkfront_probe(struct xenbus_device *dev, | |||
1690 | INIT_LIST_HEAD(&rinfo->indirect_pages); | 1716 | INIT_LIST_HEAD(&rinfo->indirect_pages); |
1691 | rinfo->dev_info = info; | 1717 | rinfo->dev_info = info; |
1692 | INIT_WORK(&rinfo->work, blkif_restart_queue); | 1718 | INIT_WORK(&rinfo->work, blkif_restart_queue); |
1719 | spin_lock_init(&rinfo->ring_lock); | ||
1693 | } | 1720 | } |
1694 | 1721 | ||
1695 | mutex_init(&info->mutex); | 1722 | mutex_init(&info->mutex); |
1696 | spin_lock_init(&info->io_lock); | 1723 | spin_lock_init(&info->dev_lock); |
1697 | info->xbdev = dev; | 1724 | info->xbdev = dev; |
1698 | info->vdevice = vdevice; | 1725 | info->vdevice = vdevice; |
1699 | INIT_LIST_HEAD(&info->grants); | 1726 | INIT_LIST_HEAD(&info->grants); |
1700 | info->persistent_gnts_c = 0; | ||
1701 | info->connected = BLKIF_STATE_DISCONNECTED; | 1727 | info->connected = BLKIF_STATE_DISCONNECTED; |
1702 | 1728 | ||
1703 | /* Front end dir is a number, which is used as the id. */ | 1729 | /* Front end dir is a number, which is used as the id. */ |
@@ -1790,8 +1816,6 @@ static int blkif_recover(struct blkfront_info *info) | |||
1790 | } | 1816 | } |
1791 | xenbus_switch_state(info->xbdev, XenbusStateConnected); | 1817 | xenbus_switch_state(info->xbdev, XenbusStateConnected); |
1792 | 1818 | ||
1793 | spin_lock_irq(&info->io_lock); | ||
1794 | |||
1795 | /* Now safe for us to use the shared ring */ | 1819 | /* Now safe for us to use the shared ring */ |
1796 | info->connected = BLKIF_STATE_CONNECTED; | 1820 | info->connected = BLKIF_STATE_CONNECTED; |
1797 | 1821 | ||
@@ -1809,7 +1833,6 @@ static int blkif_recover(struct blkfront_info *info) | |||
1809 | BUG_ON(req->nr_phys_segments > segs); | 1833 | BUG_ON(req->nr_phys_segments > segs); |
1810 | blk_mq_requeue_request(req); | 1834 | blk_mq_requeue_request(req); |
1811 | } | 1835 | } |
1812 | spin_unlock_irq(&info->io_lock); | ||
1813 | blk_mq_kick_requeue_list(info->rq); | 1836 | blk_mq_kick_requeue_list(info->rq); |
1814 | 1837 | ||
1815 | while ((bio = bio_list_pop(&bio_list)) != NULL) { | 1838 | while ((bio = bio_list_pop(&bio_list)) != NULL) { |
@@ -2158,11 +2181,9 @@ static void blkfront_connect(struct blkfront_info *info) | |||
2158 | xenbus_switch_state(info->xbdev, XenbusStateConnected); | 2181 | xenbus_switch_state(info->xbdev, XenbusStateConnected); |
2159 | 2182 | ||
2160 | /* Kick pending requests. */ | 2183 | /* Kick pending requests. */ |
2161 | spin_lock_irq(&info->io_lock); | ||
2162 | info->connected = BLKIF_STATE_CONNECTED; | 2184 | info->connected = BLKIF_STATE_CONNECTED; |
2163 | for (i = 0; i < info->nr_rings; i++) | 2185 | for (i = 0; i < info->nr_rings; i++) |
2164 | kick_pending_request_queues(&info->rinfo[i]); | 2186 | kick_pending_request_queues(&info->rinfo[i]); |
2165 | spin_unlock_irq(&info->io_lock); | ||
2166 | 2187 | ||
2167 | add_disk(info->gd); | 2188 | add_disk(info->gd); |
2168 | 2189 | ||