aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-03 13:34:03 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-03 13:34:03 -0400
commit7b66f13207e60e7c550af730986e77e38a0c69a3 (patch)
treec2dad63d3ef3513a6656b39fb3ed0f974a6fff97
parentd35a878ae1c50977b55e352fd46e36e35add72a0 (diff)
parent412445acb6cad4cef026daae37c4765fb9942c60 (diff)
Merge tag 'for-4.12/dm-post-merge-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull additional device mapper updates from Mike Snitzer: "Here are some changes from Christoph that needed to be rebased ontop of changes that were already merged into the device mapper tree. In addition, these changes depend on the 'for-4.12/block' changes that you've already merged. - Cleanups to request-based DM and DM multipath from Christoph that prepare for his block core error code type checking improvements" * tag 'for-4.12/dm-post-merge-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm: introduce a new DM_MAPIO_KILL return value dm rq: change ->rq_end_io calling conventions dm mpath: merge do_end_io into multipath_end_io
-rw-r--r--drivers/md/dm-mpath.c54
-rw-r--r--drivers/md/dm-rq.c29
-rw-r--r--drivers/md/dm-target.c2
-rw-r--r--include/linux/device-mapper.h2
4 files changed, 39 insertions, 48 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 52cd3f1608b3..926a6bcb32c8 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1464,12 +1464,13 @@ static int noretry_error(int error)
1464 return 0; 1464 return 0;
1465} 1465}
1466 1466
1467/* 1467static int multipath_end_io(struct dm_target *ti, struct request *clone,
1468 * end_io handling 1468 int error, union map_info *map_context)
1469 */
1470static int do_end_io(struct multipath *m, struct request *clone,
1471 int error, struct dm_mpath_io *mpio)
1472{ 1469{
1470 struct dm_mpath_io *mpio = get_mpio(map_context);
1471 struct pgpath *pgpath = mpio->pgpath;
1472 int r = DM_ENDIO_DONE;
1473
1473 /* 1474 /*
1474 * We don't queue any clone request inside the multipath target 1475 * We don't queue any clone request inside the multipath target
1475 * during end I/O handling, since those clone requests don't have 1476 * during end I/O handling, since those clone requests don't have
@@ -1481,39 +1482,26 @@ static int do_end_io(struct multipath *m, struct request *clone,
1481 * request into dm core, which will remake a clone request and 1482 * request into dm core, which will remake a clone request and
1482 * clone bios for it and resubmit it later. 1483 * clone bios for it and resubmit it later.
1483 */ 1484 */
1484 int r = DM_ENDIO_REQUEUE; 1485 if (error && !noretry_error(error)) {
1485 1486 struct multipath *m = ti->private;
1486 if (!error)
1487 return 0; /* I/O complete */
1488 1487
1489 if (noretry_error(error)) 1488 r = DM_ENDIO_REQUEUE;
1490 return error;
1491
1492 if (mpio->pgpath)
1493 fail_path(mpio->pgpath);
1494 1489
1495 if (atomic_read(&m->nr_valid_paths) == 0 && 1490 if (pgpath)
1496 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) 1491 fail_path(pgpath);
1497 r = dm_report_EIO(m);
1498
1499 return r;
1500}
1501
1502static int multipath_end_io(struct dm_target *ti, struct request *clone,
1503 int error, union map_info *map_context)
1504{
1505 struct multipath *m = ti->private;
1506 struct dm_mpath_io *mpio = get_mpio(map_context);
1507 struct pgpath *pgpath;
1508 struct path_selector *ps;
1509 int r;
1510 1492
1511 BUG_ON(!mpio); 1493 if (atomic_read(&m->nr_valid_paths) == 0 &&
1494 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1495 if (error == -EIO)
1496 error = dm_report_EIO(m);
1497 /* complete with the original error */
1498 r = DM_ENDIO_DONE;
1499 }
1500 }
1512 1501
1513 r = do_end_io(m, clone, error, mpio);
1514 pgpath = mpio->pgpath;
1515 if (pgpath) { 1502 if (pgpath) {
1516 ps = &pgpath->pg->ps; 1503 struct path_selector *ps = &pgpath->pg->ps;
1504
1517 if (ps->type->end_io) 1505 if (ps->type->end_io)
1518 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes); 1506 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
1519 } 1507 }
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index d445b712970b..a48130b90157 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -287,7 +287,7 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
287 287
288static void dm_done(struct request *clone, int error, bool mapped) 288static void dm_done(struct request *clone, int error, bool mapped)
289{ 289{
290 int r = error; 290 int r = DM_ENDIO_DONE;
291 struct dm_rq_target_io *tio = clone->end_io_data; 291 struct dm_rq_target_io *tio = clone->end_io_data;
292 dm_request_endio_fn rq_end_io = NULL; 292 dm_request_endio_fn rq_end_io = NULL;
293 293
@@ -298,7 +298,7 @@ static void dm_done(struct request *clone, int error, bool mapped)
298 r = rq_end_io(tio->ti, clone, error, &tio->info); 298 r = rq_end_io(tio->ti, clone, error, &tio->info);
299 } 299 }
300 300
301 if (unlikely(r == -EREMOTEIO)) { 301 if (unlikely(error == -EREMOTEIO)) {
302 if (req_op(clone) == REQ_OP_WRITE_SAME && 302 if (req_op(clone) == REQ_OP_WRITE_SAME &&
303 !clone->q->limits.max_write_same_sectors) 303 !clone->q->limits.max_write_same_sectors)
304 disable_write_same(tio->md); 304 disable_write_same(tio->md);
@@ -307,16 +307,19 @@ static void dm_done(struct request *clone, int error, bool mapped)
307 disable_write_zeroes(tio->md); 307 disable_write_zeroes(tio->md);
308 } 308 }
309 309
310 if (r <= 0) 310 switch (r) {
311 case DM_ENDIO_DONE:
311 /* The target wants to complete the I/O */ 312 /* The target wants to complete the I/O */
312 dm_end_request(clone, r); 313 dm_end_request(clone, error);
313 else if (r == DM_ENDIO_INCOMPLETE) 314 break;
315 case DM_ENDIO_INCOMPLETE:
314 /* The target will handle the I/O */ 316 /* The target will handle the I/O */
315 return; 317 return;
316 else if (r == DM_ENDIO_REQUEUE) 318 case DM_ENDIO_REQUEUE:
317 /* The target wants to requeue the I/O */ 319 /* The target wants to requeue the I/O */
318 dm_requeue_original_request(tio, false); 320 dm_requeue_original_request(tio, false);
319 else { 321 break;
322 default:
320 DMWARN("unimplemented target endio return value: %d", r); 323 DMWARN("unimplemented target endio return value: %d", r);
321 BUG(); 324 BUG();
322 } 325 }
@@ -501,14 +504,12 @@ static int map_request(struct dm_rq_target_io *tio)
501 /* The target wants to requeue the I/O after a delay */ 504 /* The target wants to requeue the I/O after a delay */
502 dm_requeue_original_request(tio, true); 505 dm_requeue_original_request(tio, true);
503 break; 506 break;
504 default: 507 case DM_MAPIO_KILL:
505 if (r > 0) {
506 DMWARN("unimplemented target map return value: %d", r);
507 BUG();
508 }
509
510 /* The target wants to complete the I/O */ 508 /* The target wants to complete the I/O */
511 dm_kill_unmapped_request(rq, r); 509 dm_kill_unmapped_request(rq, -EIO);
510 default:
511 DMWARN("unimplemented target map return value: %d", r);
512 BUG();
512 } 513 }
513 514
514 return r; 515 return r;
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 43d3445b121d..6264ff00dcf0 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -135,7 +135,7 @@ static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq,
135 union map_info *map_context, 135 union map_info *map_context,
136 struct request **clone) 136 struct request **clone)
137{ 137{
138 return -EIO; 138 return DM_MAPIO_KILL;
139} 139}
140 140
141static void io_err_release_clone_rq(struct request *clone) 141static void io_err_release_clone_rq(struct request *clone)
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 925b63cdef52..78ad0624cdae 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -593,6 +593,7 @@ extern struct ratelimit_state dm_ratelimit_state;
593/* 593/*
594 * Definitions of return values from target end_io function. 594 * Definitions of return values from target end_io function.
595 */ 595 */
596#define DM_ENDIO_DONE 0
596#define DM_ENDIO_INCOMPLETE 1 597#define DM_ENDIO_INCOMPLETE 1
597#define DM_ENDIO_REQUEUE 2 598#define DM_ENDIO_REQUEUE 2
598 599
@@ -603,6 +604,7 @@ extern struct ratelimit_state dm_ratelimit_state;
603#define DM_MAPIO_REMAPPED 1 604#define DM_MAPIO_REMAPPED 1
604#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE 605#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
605#define DM_MAPIO_DELAY_REQUEUE 3 606#define DM_MAPIO_DELAY_REQUEUE 3
607#define DM_MAPIO_KILL 4
606 608
607#define dm_sector_div64(x, y)( \ 609#define dm_sector_div64(x, y)( \
608{ \ 610{ \