diff options
author | Tejun Heo <tj@kernel.org> | 2010-09-03 05:56:18 -0400 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2010-09-10 06:35:38 -0400 |
commit | e9c7469bb4f502dafc092166201bea1ad5fc0fbf (patch) | |
tree | 04202b0bb88623d3005c909eaafcb280778902da /drivers/md/md.h | |
parent | 7bc9fddab074d6bb630344e1969e28d20b140621 (diff) |
md: implment REQ_FLUSH/FUA support
This patch converts md to support REQ_FLUSH/FUA instead of now
deprecated REQ_HARDBARRIER. In the core part (md.c), the following
changes are notable.
* Unlike REQ_HARDBARRIER, REQ_FLUSH/FUA don't interfere with
processing of other requests and thus there is no reason to mark the
queue congested while FLUSH/FUA is in progress.
* REQ_FLUSH/FUA failures are final and its users don't need retry
logic. Retry logic is removed.
* Preflush needs to be issued to all member devices but FUA writes can
be handled the same way as other writes - their processing can be
deferred to request_queue of member devices. md_barrier_request()
is renamed to md_flush_request() and simplified accordingly.
For linear, raid0 and multipath, the core changes are enough. raid1,
5 and 10 need the following conversions.
* raid1: Handling of FLUSH/FUA bio's can simply be deferred to
request_queues of member devices. Barrier related logic removed.
* raid5: Queue draining logic dropped. FUA bit is propagated through
biodrain and stripe resconstruction such that all the updated parts
of the stripe are written out with FUA writes if any of the dirtying
writes was FUA. preread_active_stripes handling in make_request()
is updated as suggested by Neil Brown.
* raid10: FUA bit needs to be propagated to write clones.
linear, raid0, 1, 5 and 10 tested.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Neil Brown <neilb@suse.de>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'drivers/md/md.h')
-rw-r--r-- | drivers/md/md.h | 23 |
1 files changed, 6 insertions, 17 deletions
diff --git a/drivers/md/md.h b/drivers/md/md.h index a953fe2808ae..d8e2ab25103b 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
@@ -87,7 +87,6 @@ struct mdk_rdev_s | |||
87 | #define Faulty 1 /* device is known to have a fault */ | 87 | #define Faulty 1 /* device is known to have a fault */ |
88 | #define In_sync 2 /* device is in_sync with rest of array */ | 88 | #define In_sync 2 /* device is in_sync with rest of array */ |
89 | #define WriteMostly 4 /* Avoid reading if at all possible */ | 89 | #define WriteMostly 4 /* Avoid reading if at all possible */ |
90 | #define BarriersNotsupp 5 /* REQ_HARDBARRIER is not supported */ | ||
91 | #define AllReserved 6 /* If whole device is reserved for | 90 | #define AllReserved 6 /* If whole device is reserved for |
92 | * one array */ | 91 | * one array */ |
93 | #define AutoDetected 7 /* added by auto-detect */ | 92 | #define AutoDetected 7 /* added by auto-detect */ |
@@ -273,13 +272,6 @@ struct mddev_s | |||
273 | int degraded; /* whether md should consider | 272 | int degraded; /* whether md should consider |
274 | * adding a spare | 273 | * adding a spare |
275 | */ | 274 | */ |
276 | int barriers_work; /* initialised to true, cleared as soon | ||
277 | * as a barrier request to slave | ||
278 | * fails. Only supported | ||
279 | */ | ||
280 | struct bio *biolist; /* bios that need to be retried | ||
281 | * because REQ_HARDBARRIER is not supported | ||
282 | */ | ||
283 | 275 | ||
284 | atomic_t recovery_active; /* blocks scheduled, but not written */ | 276 | atomic_t recovery_active; /* blocks scheduled, but not written */ |
285 | wait_queue_head_t recovery_wait; | 277 | wait_queue_head_t recovery_wait; |
@@ -339,16 +331,13 @@ struct mddev_s | |||
339 | struct attribute_group *to_remove; | 331 | struct attribute_group *to_remove; |
340 | struct plug_handle *plug; /* if used by personality */ | 332 | struct plug_handle *plug; /* if used by personality */ |
341 | 333 | ||
342 | /* Generic barrier handling. | 334 | /* Generic flush handling. |
343 | * If there is a pending barrier request, all other | 335 | * The last to finish preflush schedules a worker to submit |
344 | * writes are blocked while the devices are flushed. | 336 | * the rest of the request (without the REQ_FLUSH flag). |
345 | * The last to finish a flush schedules a worker to | ||
346 | * submit the barrier request (without the barrier flag), | ||
347 | * then submit more flush requests. | ||
348 | */ | 337 | */ |
349 | struct bio *barrier; | 338 | struct bio *flush_bio; |
350 | atomic_t flush_pending; | 339 | atomic_t flush_pending; |
351 | struct work_struct barrier_work; | 340 | struct work_struct flush_work; |
352 | struct work_struct event_work; /* used by dm to report failure event */ | 341 | struct work_struct event_work; /* used by dm to report failure event */ |
353 | }; | 342 | }; |
354 | 343 | ||
@@ -502,7 +491,7 @@ extern void md_done_sync(mddev_t *mddev, int blocks, int ok); | |||
502 | extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev); | 491 | extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev); |
503 | 492 | ||
504 | extern int mddev_congested(mddev_t *mddev, int bits); | 493 | extern int mddev_congested(mddev_t *mddev, int bits); |
505 | extern void md_barrier_request(mddev_t *mddev, struct bio *bio); | 494 | extern void md_flush_request(mddev_t *mddev, struct bio *bio); |
506 | extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, | 495 | extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, |
507 | sector_t sector, int size, struct page *page); | 496 | sector_t sector, int size, struct page *page); |
508 | extern void md_super_wait(mddev_t *mddev); | 497 | extern void md_super_wait(mddev_t *mddev); |