diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/md/md.h | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'drivers/md/md.h')
-rw-r--r-- | drivers/md/md.h | 78 |
1 files changed, 32 insertions, 46 deletions
diff --git a/drivers/md/md.h b/drivers/md/md.h index 3931299788dc..1c26c7a08ae6 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
@@ -29,26 +29,6 @@ | |||
29 | typedef struct mddev_s mddev_t; | 29 | typedef struct mddev_s mddev_t; |
30 | typedef struct mdk_rdev_s mdk_rdev_t; | 30 | typedef struct mdk_rdev_s mdk_rdev_t; |
31 | 31 | ||
32 | /* generic plugging support - like that provided with request_queue, | ||
33 | * but does not require a request_queue | ||
34 | */ | ||
35 | struct plug_handle { | ||
36 | void (*unplug_fn)(struct plug_handle *); | ||
37 | struct timer_list unplug_timer; | ||
38 | struct work_struct unplug_work; | ||
39 | unsigned long unplug_flag; | ||
40 | }; | ||
41 | #define PLUGGED_FLAG 1 | ||
42 | void plugger_init(struct plug_handle *plug, | ||
43 | void (*unplug_fn)(struct plug_handle *)); | ||
44 | void plugger_set_plug(struct plug_handle *plug); | ||
45 | int plugger_remove_plug(struct plug_handle *plug); | ||
46 | static inline void plugger_flush(struct plug_handle *plug) | ||
47 | { | ||
48 | del_timer_sync(&plug->unplug_timer); | ||
49 | cancel_work_sync(&plug->unplug_work); | ||
50 | } | ||
51 | |||
52 | /* | 32 | /* |
53 | * MD's 'extended' device | 33 | * MD's 'extended' device |
54 | */ | 34 | */ |
@@ -60,6 +40,12 @@ struct mdk_rdev_s | |||
60 | mddev_t *mddev; /* RAID array if running */ | 40 | mddev_t *mddev; /* RAID array if running */ |
61 | int last_events; /* IO event timestamp */ | 41 | int last_events; /* IO event timestamp */ |
62 | 42 | ||
43 | /* | ||
44 | * If meta_bdev is non-NULL, it means that a separate device is | ||
45 | * being used to store the metadata (superblock/bitmap) which | ||
46 | * would otherwise be contained on the same device as the data (bdev). | ||
47 | */ | ||
48 | struct block_device *meta_bdev; | ||
63 | struct block_device *bdev; /* block device handle */ | 49 | struct block_device *bdev; /* block device handle */ |
64 | 50 | ||
65 | struct page *sb_page; | 51 | struct page *sb_page; |
@@ -87,11 +73,8 @@ struct mdk_rdev_s | |||
87 | #define Faulty 1 /* device is known to have a fault */ | 73 | #define Faulty 1 /* device is known to have a fault */ |
88 | #define In_sync 2 /* device is in_sync with rest of array */ | 74 | #define In_sync 2 /* device is in_sync with rest of array */ |
89 | #define WriteMostly 4 /* Avoid reading if at all possible */ | 75 | #define WriteMostly 4 /* Avoid reading if at all possible */ |
90 | #define BarriersNotsupp 5 /* REQ_HARDBARRIER is not supported */ | ||
91 | #define AllReserved 6 /* If whole device is reserved for | ||
92 | * one array */ | ||
93 | #define AutoDetected 7 /* added by auto-detect */ | 76 | #define AutoDetected 7 /* added by auto-detect */ |
94 | #define Blocked 8 /* An error occured on an externally | 77 | #define Blocked 8 /* An error occurred on an externally |
95 | * managed array, don't allow writes | 78 | * managed array, don't allow writes |
96 | * until it is cleared */ | 79 | * until it is cleared */ |
97 | wait_queue_head_t blocked_wait; | 80 | wait_queue_head_t blocked_wait; |
@@ -141,6 +124,7 @@ struct mddev_s | |||
141 | #define MD_CHANGE_DEVS 0 /* Some device status has changed */ | 124 | #define MD_CHANGE_DEVS 0 /* Some device status has changed */ |
142 | #define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ | 125 | #define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ |
143 | #define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */ | 126 | #define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */ |
127 | #define MD_ARRAY_FIRST_USE 3 /* First use of array, needs initialization */ | ||
144 | 128 | ||
145 | int suspended; | 129 | int suspended; |
146 | atomic_t active_io; | 130 | atomic_t active_io; |
@@ -149,7 +133,8 @@ struct mddev_s | |||
149 | * are happening, so run/ | 133 | * are happening, so run/ |
150 | * takeover/stop are not safe | 134 | * takeover/stop are not safe |
151 | */ | 135 | */ |
152 | 136 | int ready; /* See when safe to pass | |
137 | * IO requests down */ | ||
153 | struct gendisk *gendisk; | 138 | struct gendisk *gendisk; |
154 | 139 | ||
155 | struct kobject kobj; | 140 | struct kobject kobj; |
@@ -195,6 +180,9 @@ struct mddev_s | |||
195 | int delta_disks, new_level, new_layout; | 180 | int delta_disks, new_level, new_layout; |
196 | int new_chunk_sectors; | 181 | int new_chunk_sectors; |
197 | 182 | ||
183 | atomic_t plug_cnt; /* If device is expecting | ||
184 | * more bios soon. | ||
185 | */ | ||
198 | struct mdk_thread_s *thread; /* management thread */ | 186 | struct mdk_thread_s *thread; /* management thread */ |
199 | struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */ | 187 | struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */ |
200 | sector_t curr_resync; /* last block scheduled */ | 188 | sector_t curr_resync; /* last block scheduled */ |
@@ -270,16 +258,11 @@ struct mddev_s | |||
270 | atomic_t active; /* general refcount */ | 258 | atomic_t active; /* general refcount */ |
271 | atomic_t openers; /* number of active opens */ | 259 | atomic_t openers; /* number of active opens */ |
272 | 260 | ||
261 | int changed; /* True if we might need to | ||
262 | * reread partition info */ | ||
273 | int degraded; /* whether md should consider | 263 | int degraded; /* whether md should consider |
274 | * adding a spare | 264 | * adding a spare |
275 | */ | 265 | */ |
276 | int barriers_work; /* initialised to true, cleared as soon | ||
277 | * as a barrier request to slave | ||
278 | * fails. Only supported | ||
279 | */ | ||
280 | struct bio *biolist; /* bios that need to be retried | ||
281 | * because REQ_HARDBARRIER is not supported | ||
282 | */ | ||
283 | 266 | ||
284 | atomic_t recovery_active; /* blocks scheduled, but not written */ | 267 | atomic_t recovery_active; /* blocks scheduled, but not written */ |
285 | wait_queue_head_t recovery_wait; | 268 | wait_queue_head_t recovery_wait; |
@@ -337,19 +320,18 @@ struct mddev_s | |||
337 | struct list_head all_mddevs; | 320 | struct list_head all_mddevs; |
338 | 321 | ||
339 | struct attribute_group *to_remove; | 322 | struct attribute_group *to_remove; |
340 | struct plug_handle *plug; /* if used by personality */ | 323 | |
341 | 324 | struct bio_set *bio_set; | |
342 | /* Generic barrier handling. | 325 | |
343 | * If there is a pending barrier request, all other | 326 | /* Generic flush handling. |
344 | * writes are blocked while the devices are flushed. | 327 | * The last to finish preflush schedules a worker to submit |
345 | * The last to finish a flush schedules a worker to | 328 | * the rest of the request (without the REQ_FLUSH flag). |
346 | * submit the barrier request (without the barrier flag), | ||
347 | * then submit more flush requests. | ||
348 | */ | 329 | */ |
349 | struct bio *barrier; | 330 | struct bio *flush_bio; |
350 | atomic_t flush_pending; | 331 | atomic_t flush_pending; |
351 | struct work_struct barrier_work; | 332 | struct work_struct flush_work; |
352 | struct work_struct event_work; /* used by dm to report failure event */ | 333 | struct work_struct event_work; /* used by dm to report failure event */ |
334 | void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev); | ||
353 | }; | 335 | }; |
354 | 336 | ||
355 | 337 | ||
@@ -502,12 +484,12 @@ extern void md_done_sync(mddev_t *mddev, int blocks, int ok); | |||
502 | extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev); | 484 | extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev); |
503 | 485 | ||
504 | extern int mddev_congested(mddev_t *mddev, int bits); | 486 | extern int mddev_congested(mddev_t *mddev, int bits); |
505 | extern void md_barrier_request(mddev_t *mddev, struct bio *bio); | 487 | extern void md_flush_request(mddev_t *mddev, struct bio *bio); |
506 | extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, | 488 | extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, |
507 | sector_t sector, int size, struct page *page); | 489 | sector_t sector, int size, struct page *page); |
508 | extern void md_super_wait(mddev_t *mddev); | 490 | extern void md_super_wait(mddev_t *mddev); |
509 | extern int sync_page_io(struct block_device *bdev, sector_t sector, int size, | 491 | extern int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size, |
510 | struct page *page, int rw); | 492 | struct page *page, int rw, bool metadata_op); |
511 | extern void md_do_sync(mddev_t *mddev); | 493 | extern void md_do_sync(mddev_t *mddev); |
512 | extern void md_new_event(mddev_t *mddev); | 494 | extern void md_new_event(mddev_t *mddev); |
513 | extern int md_allow_write(mddev_t *mddev); | 495 | extern int md_allow_write(mddev_t *mddev); |
@@ -518,7 +500,6 @@ extern int md_integrity_register(mddev_t *mddev); | |||
518 | extern void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev); | 500 | extern void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev); |
519 | extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale); | 501 | extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale); |
520 | extern void restore_bitmap_write_access(struct file *file); | 502 | extern void restore_bitmap_write_access(struct file *file); |
521 | extern void md_unplug(mddev_t *mddev); | ||
522 | 503 | ||
523 | extern void mddev_init(mddev_t *mddev); | 504 | extern void mddev_init(mddev_t *mddev); |
524 | extern int md_run(mddev_t *mddev); | 505 | extern int md_run(mddev_t *mddev); |
@@ -528,4 +509,9 @@ extern void md_rdev_init(mdk_rdev_t *rdev); | |||
528 | 509 | ||
529 | extern void mddev_suspend(mddev_t *mddev); | 510 | extern void mddev_suspend(mddev_t *mddev); |
530 | extern void mddev_resume(mddev_t *mddev); | 511 | extern void mddev_resume(mddev_t *mddev); |
512 | extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, | ||
513 | mddev_t *mddev); | ||
514 | extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, | ||
515 | mddev_t *mddev); | ||
516 | extern int mddev_check_plugged(mddev_t *mddev); | ||
531 | #endif /* _MD_MD_H */ | 517 | #endif /* _MD_MD_H */ |