summaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-kcopyd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-kcopyd.c')
-rw-r--r--drivers/md/dm-kcopyd.c65
1 files changed, 63 insertions, 2 deletions
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index f85846741d50..cf2c67e35eaf 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -356,6 +356,7 @@ struct kcopyd_job {
356 struct mutex lock; 356 struct mutex lock;
357 atomic_t sub_jobs; 357 atomic_t sub_jobs;
358 sector_t progress; 358 sector_t progress;
359 sector_t write_offset;
359 360
360 struct kcopyd_job *master_job; 361 struct kcopyd_job *master_job;
361}; 362};
@@ -386,6 +387,31 @@ void dm_kcopyd_exit(void)
386 * Functions to push and pop a job onto the head of a given job 387 * Functions to push and pop a job onto the head of a given job
387 * list. 388 * list.
388 */ 389 */
390static struct kcopyd_job *pop_io_job(struct list_head *jobs,
391 struct dm_kcopyd_client *kc)
392{
393 struct kcopyd_job *job;
394
395 /*
396 * For I/O jobs, pop any read, any write without sequential write
397 * constraint and sequential writes that are at the right position.
398 */
399 list_for_each_entry(job, jobs, list) {
400 if (job->rw == READ || !test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags)) {
401 list_del(&job->list);
402 return job;
403 }
404
405 if (job->write_offset == job->master_job->write_offset) {
406 job->master_job->write_offset += job->source.count;
407 list_del(&job->list);
408 return job;
409 }
410 }
411
412 return NULL;
413}
414
389static struct kcopyd_job *pop(struct list_head *jobs, 415static struct kcopyd_job *pop(struct list_head *jobs,
390 struct dm_kcopyd_client *kc) 416 struct dm_kcopyd_client *kc)
391{ 417{
@@ -395,8 +421,12 @@ static struct kcopyd_job *pop(struct list_head *jobs,
395 spin_lock_irqsave(&kc->job_lock, flags); 421 spin_lock_irqsave(&kc->job_lock, flags);
396 422
397 if (!list_empty(jobs)) { 423 if (!list_empty(jobs)) {
398 job = list_entry(jobs->next, struct kcopyd_job, list); 424 if (jobs == &kc->io_jobs)
399 list_del(&job->list); 425 job = pop_io_job(jobs, kc);
426 else {
427 job = list_entry(jobs->next, struct kcopyd_job, list);
428 list_del(&job->list);
429 }
400 } 430 }
401 spin_unlock_irqrestore(&kc->job_lock, flags); 431 spin_unlock_irqrestore(&kc->job_lock, flags);
402 432
@@ -506,6 +536,14 @@ static int run_io_job(struct kcopyd_job *job)
506 .client = job->kc->io_client, 536 .client = job->kc->io_client,
507 }; 537 };
508 538
539 /*
540 * If we need to write sequentially and some reads or writes failed,
541 * no point in continuing.
542 */
543 if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) &&
544 job->master_job->write_err)
545 return -EIO;
546
509 io_job_start(job->kc->throttle); 547 io_job_start(job->kc->throttle);
510 548
511 if (job->rw == READ) 549 if (job->rw == READ)
@@ -655,6 +693,7 @@ static void segment_complete(int read_err, unsigned long write_err,
655 int i; 693 int i;
656 694
657 *sub_job = *job; 695 *sub_job = *job;
696 sub_job->write_offset = progress;
658 sub_job->source.sector += progress; 697 sub_job->source.sector += progress;
659 sub_job->source.count = count; 698 sub_job->source.count = count;
660 699
@@ -723,6 +762,27 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
723 job->num_dests = num_dests; 762 job->num_dests = num_dests;
724 memcpy(&job->dests, dests, sizeof(*dests) * num_dests); 763 memcpy(&job->dests, dests, sizeof(*dests) * num_dests);
725 764
765 /*
766 * If one of the destination is a host-managed zoned block device,
767 * we need to write sequentially. If one of the destination is a
768 * host-aware device, then leave it to the caller to choose what to do.
769 */
770 if (!test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags)) {
771 for (i = 0; i < job->num_dests; i++) {
772 if (bdev_zoned_model(dests[i].bdev) == BLK_ZONED_HM) {
773 set_bit(DM_KCOPYD_WRITE_SEQ, &job->flags);
774 break;
775 }
776 }
777 }
778
779 /*
780 * If we need to write sequentially, errors cannot be ignored.
781 */
782 if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) &&
783 test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags))
784 clear_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags);
785
726 if (from) { 786 if (from) {
727 job->source = *from; 787 job->source = *from;
728 job->pages = NULL; 788 job->pages = NULL;
@@ -746,6 +806,7 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
746 job->fn = fn; 806 job->fn = fn;
747 job->context = context; 807 job->context = context;
748 job->master_job = job; 808 job->master_job = job;
809 job->write_offset = 0;
749 810
750 if (job->source.count <= SUB_JOB_SIZE) 811 if (job->source.count <= SUB_JOB_SIZE)
751 dispatch_job(job); 812 dispatch_job(job);