aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-02-02 13:16:28 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2019-02-02 13:16:28 -0500
commitc8864cb70fd13beb57211982bf3b78b6629f1a68 (patch)
treeb2f94166c842a3e8c32a34a8b141816084fae3b7
parent3cde55ee7921609331178c84cca485491c97df2a (diff)
parent9a6d5488002fdca7134a0e59b0ae252f61042810 (diff)
Merge tag 'for-linus-20190202' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "A few fixes that should go into this release. This contains: - MD pull request from Song, fixing a recovery OOM issue (Alexei) - Fix for a sync related stall (Jianchao) - Dummy callback for timeouts (Tetsuo) - IDE atapi sense ordering fix (me)" * tag 'for-linus-20190202' of git://git.kernel.dk/linux-block: ide: ensure atapi sense request aren't preempted blk-mq: fix a hung issue when fsync block: pass no-op callback to INIT_WORK(). md/raid5: fix 'out of memory' during raid cache recovery
-rw-r--r--block/blk-core.c6
-rw-r--r--block/blk-flush.c2
-rw-r--r--drivers/ide/ide-atapi.c9
-rw-r--r--drivers/ide/ide-io.c61
-rw-r--r--drivers/ide/ide-park.c2
-rw-r--r--drivers/ide/ide-probe.c23
-rw-r--r--drivers/md/raid5-cache.c33
-rw-r--r--drivers/md/raid5.c8
-rw-r--r--include/linux/ide.h2
9 files changed, 93 insertions, 53 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 3c5f61ceeb67..6b78ec56a4f2 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -462,6 +462,10 @@ static void blk_rq_timed_out_timer(struct timer_list *t)
462 kblockd_schedule_work(&q->timeout_work); 462 kblockd_schedule_work(&q->timeout_work);
463} 463}
464 464
465static void blk_timeout_work(struct work_struct *work)
466{
467}
468
465/** 469/**
466 * blk_alloc_queue_node - allocate a request queue 470 * blk_alloc_queue_node - allocate a request queue
467 * @gfp_mask: memory allocation flags 471 * @gfp_mask: memory allocation flags
@@ -505,7 +509,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
505 timer_setup(&q->backing_dev_info->laptop_mode_wb_timer, 509 timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
506 laptop_mode_timer_fn, 0); 510 laptop_mode_timer_fn, 0);
507 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); 511 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
508 INIT_WORK(&q->timeout_work, NULL); 512 INIT_WORK(&q->timeout_work, blk_timeout_work);
509 INIT_LIST_HEAD(&q->icq_list); 513 INIT_LIST_HEAD(&q->icq_list);
510#ifdef CONFIG_BLK_CGROUP 514#ifdef CONFIG_BLK_CGROUP
511 INIT_LIST_HEAD(&q->blkg_list); 515 INIT_LIST_HEAD(&q->blkg_list);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index a3fc7191c694..6e0f2d97fc6d 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -335,7 +335,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
335 blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error); 335 blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
336 spin_unlock_irqrestore(&fq->mq_flush_lock, flags); 336 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
337 337
338 blk_mq_run_hw_queue(hctx, true); 338 blk_mq_sched_restart(hctx);
339} 339}
340 340
341/** 341/**
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index da58020a144e..33a28cde126c 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -235,21 +235,28 @@ EXPORT_SYMBOL_GPL(ide_prep_sense);
235 235
236int ide_queue_sense_rq(ide_drive_t *drive, void *special) 236int ide_queue_sense_rq(ide_drive_t *drive, void *special)
237{ 237{
238 struct request *sense_rq = drive->sense_rq; 238 ide_hwif_t *hwif = drive->hwif;
239 struct request *sense_rq;
240 unsigned long flags;
241
242 spin_lock_irqsave(&hwif->lock, flags);
239 243
240 /* deferred failure from ide_prep_sense() */ 244 /* deferred failure from ide_prep_sense() */
241 if (!drive->sense_rq_armed) { 245 if (!drive->sense_rq_armed) {
242 printk(KERN_WARNING PFX "%s: error queuing a sense request\n", 246 printk(KERN_WARNING PFX "%s: error queuing a sense request\n",
243 drive->name); 247 drive->name);
248 spin_unlock_irqrestore(&hwif->lock, flags);
244 return -ENOMEM; 249 return -ENOMEM;
245 } 250 }
246 251
252 sense_rq = drive->sense_rq;
247 ide_req(sense_rq)->special = special; 253 ide_req(sense_rq)->special = special;
248 drive->sense_rq_armed = false; 254 drive->sense_rq_armed = false;
249 255
250 drive->hwif->rq = NULL; 256 drive->hwif->rq = NULL;
251 257
252 ide_insert_request_head(drive, sense_rq); 258 ide_insert_request_head(drive, sense_rq);
259 spin_unlock_irqrestore(&hwif->lock, flags);
253 return 0; 260 return 0;
254} 261}
255EXPORT_SYMBOL_GPL(ide_queue_sense_rq); 262EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 8445b484ae69..b137f27a34d5 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -68,8 +68,10 @@ int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error,
68 } 68 }
69 69
70 if (!blk_update_request(rq, error, nr_bytes)) { 70 if (!blk_update_request(rq, error, nr_bytes)) {
71 if (rq == drive->sense_rq) 71 if (rq == drive->sense_rq) {
72 drive->sense_rq = NULL; 72 drive->sense_rq = NULL;
73 drive->sense_rq_active = false;
74 }
73 75
74 __blk_mq_end_request(rq, error); 76 __blk_mq_end_request(rq, error);
75 return 0; 77 return 0;
@@ -451,16 +453,11 @@ void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
451 blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3); 453 blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3);
452} 454}
453 455
454/* 456blk_status_t ide_issue_rq(ide_drive_t *drive, struct request *rq,
455 * Issue a new request to a device. 457 bool local_requeue)
456 */
457blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
458 const struct blk_mq_queue_data *bd)
459{ 458{
460 ide_drive_t *drive = hctx->queue->queuedata; 459 ide_hwif_t *hwif = drive->hwif;
461 ide_hwif_t *hwif = drive->hwif;
462 struct ide_host *host = hwif->host; 460 struct ide_host *host = hwif->host;
463 struct request *rq = bd->rq;
464 ide_startstop_t startstop; 461 ide_startstop_t startstop;
465 462
466 if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) { 463 if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) {
@@ -474,8 +471,6 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
474 if (ide_lock_host(host, hwif)) 471 if (ide_lock_host(host, hwif))
475 return BLK_STS_DEV_RESOURCE; 472 return BLK_STS_DEV_RESOURCE;
476 473
477 blk_mq_start_request(rq);
478
479 spin_lock_irq(&hwif->lock); 474 spin_lock_irq(&hwif->lock);
480 475
481 if (!ide_lock_port(hwif)) { 476 if (!ide_lock_port(hwif)) {
@@ -511,18 +506,6 @@ repeat:
511 drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); 506 drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
512 507
513 /* 508 /*
514 * we know that the queue isn't empty, but this can happen
515 * if ->prep_rq() decides to kill a request
516 */
517 if (!rq) {
518 rq = bd->rq;
519 if (!rq) {
520 ide_unlock_port(hwif);
521 goto out;
522 }
523 }
524
525 /*
526 * Sanity: don't accept a request that isn't a PM request 509 * Sanity: don't accept a request that isn't a PM request
527 * if we are currently power managed. This is very important as 510 * if we are currently power managed. This is very important as
528 * blk_stop_queue() doesn't prevent the blk_fetch_request() 511 * blk_stop_queue() doesn't prevent the blk_fetch_request()
@@ -560,9 +543,12 @@ repeat:
560 } 543 }
561 } else { 544 } else {
562plug_device: 545plug_device:
546 if (local_requeue)
547 list_add(&rq->queuelist, &drive->rq_list);
563 spin_unlock_irq(&hwif->lock); 548 spin_unlock_irq(&hwif->lock);
564 ide_unlock_host(host); 549 ide_unlock_host(host);
565 ide_requeue_and_plug(drive, rq); 550 if (!local_requeue)
551 ide_requeue_and_plug(drive, rq);
566 return BLK_STS_OK; 552 return BLK_STS_OK;
567 } 553 }
568 554
@@ -573,6 +559,26 @@ out:
573 return BLK_STS_OK; 559 return BLK_STS_OK;
574} 560}
575 561
562/*
563 * Issue a new request to a device.
564 */
565blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
566 const struct blk_mq_queue_data *bd)
567{
568 ide_drive_t *drive = hctx->queue->queuedata;
569 ide_hwif_t *hwif = drive->hwif;
570
571 spin_lock_irq(&hwif->lock);
572 if (drive->sense_rq_active) {
573 spin_unlock_irq(&hwif->lock);
574 return BLK_STS_DEV_RESOURCE;
575 }
576 spin_unlock_irq(&hwif->lock);
577
578 blk_mq_start_request(bd->rq);
579 return ide_issue_rq(drive, bd->rq, false);
580}
581
576static int drive_is_ready(ide_drive_t *drive) 582static int drive_is_ready(ide_drive_t *drive)
577{ 583{
578 ide_hwif_t *hwif = drive->hwif; 584 ide_hwif_t *hwif = drive->hwif;
@@ -893,13 +899,8 @@ EXPORT_SYMBOL_GPL(ide_pad_transfer);
893 899
894void ide_insert_request_head(ide_drive_t *drive, struct request *rq) 900void ide_insert_request_head(ide_drive_t *drive, struct request *rq)
895{ 901{
896 ide_hwif_t *hwif = drive->hwif; 902 drive->sense_rq_active = true;
897 unsigned long flags;
898
899 spin_lock_irqsave(&hwif->lock, flags);
900 list_add_tail(&rq->queuelist, &drive->rq_list); 903 list_add_tail(&rq->queuelist, &drive->rq_list);
901 spin_unlock_irqrestore(&hwif->lock, flags);
902
903 kblockd_schedule_work(&drive->rq_work); 904 kblockd_schedule_work(&drive->rq_work);
904} 905}
905EXPORT_SYMBOL_GPL(ide_insert_request_head); 906EXPORT_SYMBOL_GPL(ide_insert_request_head);
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index 102aa3bc3e7f..8af7af6001eb 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -54,7 +54,9 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
54 scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS; 54 scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS;
55 scsi_req(rq)->cmd_len = 1; 55 scsi_req(rq)->cmd_len = 1;
56 ide_req(rq)->type = ATA_PRIV_MISC; 56 ide_req(rq)->type = ATA_PRIV_MISC;
57 spin_lock_irq(&hwif->lock);
57 ide_insert_request_head(drive, rq); 58 ide_insert_request_head(drive, rq);
59 spin_unlock_irq(&hwif->lock);
58 60
59out: 61out:
60 return; 62 return;
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 63627be0811a..5aeaca24a28f 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1159,18 +1159,27 @@ static void drive_rq_insert_work(struct work_struct *work)
1159 ide_drive_t *drive = container_of(work, ide_drive_t, rq_work); 1159 ide_drive_t *drive = container_of(work, ide_drive_t, rq_work);
1160 ide_hwif_t *hwif = drive->hwif; 1160 ide_hwif_t *hwif = drive->hwif;
1161 struct request *rq; 1161 struct request *rq;
1162 blk_status_t ret;
1162 LIST_HEAD(list); 1163 LIST_HEAD(list);
1163 1164
1164 spin_lock_irq(&hwif->lock); 1165 blk_mq_quiesce_queue(drive->queue);
1165 if (!list_empty(&drive->rq_list))
1166 list_splice_init(&drive->rq_list, &list);
1167 spin_unlock_irq(&hwif->lock);
1168 1166
1169 while (!list_empty(&list)) { 1167 ret = BLK_STS_OK;
1170 rq = list_first_entry(&list, struct request, queuelist); 1168 spin_lock_irq(&hwif->lock);
1169 while (!list_empty(&drive->rq_list)) {
1170 rq = list_first_entry(&drive->rq_list, struct request, queuelist);
1171 list_del_init(&rq->queuelist); 1171 list_del_init(&rq->queuelist);
1172 blk_execute_rq_nowait(drive->queue, rq->rq_disk, rq, true, NULL); 1172
1173 spin_unlock_irq(&hwif->lock);
1174 ret = ide_issue_rq(drive, rq, true);
1175 spin_lock_irq(&hwif->lock);
1173 } 1176 }
1177 spin_unlock_irq(&hwif->lock);
1178
1179 blk_mq_unquiesce_queue(drive->queue);
1180
1181 if (ret != BLK_STS_OK)
1182 kblockd_schedule_work(&drive->rq_work);
1174} 1183}
1175 1184
1176static const u8 ide_hwif_to_major[] = 1185static const u8 ide_hwif_to_major[] =
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index ec3a5ef7fee0..cbbe6b6535be 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1935,12 +1935,14 @@ out:
1935} 1935}
1936 1936
1937static struct stripe_head * 1937static struct stripe_head *
1938r5c_recovery_alloc_stripe(struct r5conf *conf, 1938r5c_recovery_alloc_stripe(
1939 sector_t stripe_sect) 1939 struct r5conf *conf,
1940 sector_t stripe_sect,
1941 int noblock)
1940{ 1942{
1941 struct stripe_head *sh; 1943 struct stripe_head *sh;
1942 1944
1943 sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0); 1945 sh = raid5_get_active_stripe(conf, stripe_sect, 0, noblock, 0);
1944 if (!sh) 1946 if (!sh)
1945 return NULL; /* no more stripe available */ 1947 return NULL; /* no more stripe available */
1946 1948
@@ -2150,7 +2152,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
2150 stripe_sect); 2152 stripe_sect);
2151 2153
2152 if (!sh) { 2154 if (!sh) {
2153 sh = r5c_recovery_alloc_stripe(conf, stripe_sect); 2155 sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1);
2154 /* 2156 /*
2155 * cannot get stripe from raid5_get_active_stripe 2157 * cannot get stripe from raid5_get_active_stripe
2156 * try replay some stripes 2158 * try replay some stripes
@@ -2159,20 +2161,29 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
2159 r5c_recovery_replay_stripes( 2161 r5c_recovery_replay_stripes(
2160 cached_stripe_list, ctx); 2162 cached_stripe_list, ctx);
2161 sh = r5c_recovery_alloc_stripe( 2163 sh = r5c_recovery_alloc_stripe(
2162 conf, stripe_sect); 2164 conf, stripe_sect, 1);
2163 } 2165 }
2164 if (!sh) { 2166 if (!sh) {
2167 int new_size = conf->min_nr_stripes * 2;
2165 pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n", 2168 pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
2166 mdname(mddev), 2169 mdname(mddev),
2167 conf->min_nr_stripes * 2); 2170 new_size);
2168 raid5_set_cache_size(mddev, 2171 ret = raid5_set_cache_size(mddev, new_size);
2169 conf->min_nr_stripes * 2); 2172 if (conf->min_nr_stripes <= new_size / 2) {
2170 sh = r5c_recovery_alloc_stripe(conf, 2173 pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n",
2171 stripe_sect); 2174 mdname(mddev),
2175 ret,
2176 new_size,
2177 conf->min_nr_stripes,
2178 conf->max_nr_stripes);
2179 return -ENOMEM;
2180 }
2181 sh = r5c_recovery_alloc_stripe(
2182 conf, stripe_sect, 0);
2172 } 2183 }
2173 if (!sh) { 2184 if (!sh) {
2174 pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n", 2185 pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
2175 mdname(mddev)); 2186 mdname(mddev));
2176 return -ENOMEM; 2187 return -ENOMEM;
2177 } 2188 }
2178 list_add_tail(&sh->lru, cached_stripe_list); 2189 list_add_tail(&sh->lru, cached_stripe_list);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4990f0319f6c..cecea901ab8c 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6369,6 +6369,7 @@ raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
6369int 6369int
6370raid5_set_cache_size(struct mddev *mddev, int size) 6370raid5_set_cache_size(struct mddev *mddev, int size)
6371{ 6371{
6372 int result = 0;
6372 struct r5conf *conf = mddev->private; 6373 struct r5conf *conf = mddev->private;
6373 6374
6374 if (size <= 16 || size > 32768) 6375 if (size <= 16 || size > 32768)
@@ -6385,11 +6386,14 @@ raid5_set_cache_size(struct mddev *mddev, int size)
6385 6386
6386 mutex_lock(&conf->cache_size_mutex); 6387 mutex_lock(&conf->cache_size_mutex);
6387 while (size > conf->max_nr_stripes) 6388 while (size > conf->max_nr_stripes)
6388 if (!grow_one_stripe(conf, GFP_KERNEL)) 6389 if (!grow_one_stripe(conf, GFP_KERNEL)) {
6390 conf->min_nr_stripes = conf->max_nr_stripes;
6391 result = -ENOMEM;
6389 break; 6392 break;
6393 }
6390 mutex_unlock(&conf->cache_size_mutex); 6394 mutex_unlock(&conf->cache_size_mutex);
6391 6395
6392 return 0; 6396 return result;
6393} 6397}
6394EXPORT_SYMBOL(raid5_set_cache_size); 6398EXPORT_SYMBOL(raid5_set_cache_size);
6395 6399
diff --git a/include/linux/ide.h b/include/linux/ide.h
index e7d29ae633cd..971cf76a78a0 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -615,6 +615,7 @@ struct ide_drive_s {
615 615
616 /* current sense rq and buffer */ 616 /* current sense rq and buffer */
617 bool sense_rq_armed; 617 bool sense_rq_armed;
618 bool sense_rq_active;
618 struct request *sense_rq; 619 struct request *sense_rq;
619 struct request_sense sense_data; 620 struct request_sense sense_data;
620 621
@@ -1219,6 +1220,7 @@ extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
1219extern void ide_timer_expiry(struct timer_list *t); 1220extern void ide_timer_expiry(struct timer_list *t);
1220extern irqreturn_t ide_intr(int irq, void *dev_id); 1221extern irqreturn_t ide_intr(int irq, void *dev_id);
1221extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); 1222extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
1223extern blk_status_t ide_issue_rq(ide_drive_t *, struct request *, bool);
1222extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq); 1224extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq);
1223 1225
1224void ide_init_disk(struct gendisk *, ide_drive_t *); 1226void ide_init_disk(struct gendisk *, ide_drive_t *);