aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/md.h
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2009-12-13 20:49:49 -0500
committerNeilBrown <neilb@suse.de>2009-12-13 20:49:49 -0500
commita2826aa92e2e14db372eda01d333267258944033 (patch)
tree9cdd3329205bf480a4782705a3db1738e3faae44 /drivers/md/md.h
parentefa593390e70b0e3c39f6b2dca8876b6b1461e41 (diff)
md: support barrier requests on all personalities.
Previously barriers were only supported on RAID1. This is because other levels requires synchronisation across all devices and so needed a different approach. Here is that approach. When a barrier arrives, we send a zero-length barrier to every active device. When that completes - and if the original request was not empty - we submit the barrier request itself (with the barrier flag cleared) and then submit a fresh load of zero length barriers. The barrier request itself is asynchronous, but any subsequent request will block until the barrier completes. The reason for clearing the barrier flag is that a barrier request is allowed to fail. If we pass a non-empty barrier through a striping raid level it is conceivable that part of it could succeed and part could fail. That would be way too hard to deal with. So if the first run of zero length barriers succeed, we assume all is sufficiently well that we send the request and ignore errors in the second run of barriers. RAID5 needs extra care as write requests may not have been submitted to the underlying devices yet. So we flush the stripe cache before proceeding with the barrier. Note that the second set of zero-length barriers are submitted immediately after the original request is submitted. Thus when a personality finds mddev->barrier to be set during make_request, it should not return from make_request until the corresponding per-device request(s) have been queued. That will be done in later patches. Signed-off-by: NeilBrown <neilb@suse.de> Reviewed-by: Andre Noll <maan@systemlinux.org>
Diffstat (limited to 'drivers/md/md.h')
-rw-r--r--drivers/md/md.h12
1 files changed, 12 insertions, 0 deletions
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 87430fea2875..cb036868a9e9 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -292,6 +292,17 @@ struct mddev_s
292 struct mutex bitmap_mutex; 292 struct mutex bitmap_mutex;
293 293
294 struct list_head all_mddevs; 294 struct list_head all_mddevs;
295
296 /* Generic barrier handling.
297 * If there is a pending barrier request, all other
298 * writes are blocked while the devices are flushed.
299 * The last to finish a flush schedules a worker to
300 * submit the barrier request (without the barrier flag),
301 * then submit more flush requests.
302 */
303 struct bio *barrier;
304 atomic_t flush_pending;
305 struct work_struct barrier_work;
295}; 306};
296 307
297 308
@@ -432,6 +443,7 @@ extern void md_done_sync(mddev_t *mddev, int blocks, int ok);
432extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev); 443extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev);
433 444
434extern int mddev_congested(mddev_t *mddev, int bits); 445extern int mddev_congested(mddev_t *mddev, int bits);
446extern void md_barrier_request(mddev_t *mddev, struct bio *bio);
435extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, 447extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
436 sector_t sector, int size, struct page *page); 448 sector_t sector, int size, struct page *page);
437extern void md_super_wait(mddev_t *mddev); 449extern void md_super_wait(mddev_t *mddev);