diff options
Diffstat (limited to 'include/linux/raid')
-rw-r--r-- | include/linux/raid/md.h | 4 | ||||
-rw-r--r-- | include/linux/raid/md_k.h | 80 | ||||
-rw-r--r-- | include/linux/raid/raid1.h | 14 | ||||
-rw-r--r-- | include/linux/raid/raid10.h | 22 | ||||
-rw-r--r-- | include/linux/raid/raid5.h | 7 |
5 files changed, 62 insertions, 65 deletions
diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h index 13e7c4b62367..b6e0bcad84e1 100644 --- a/include/linux/raid/md.h +++ b/include/linux/raid/md.h | |||
@@ -71,8 +71,8 @@ | |||
71 | */ | 71 | */ |
72 | #define MD_PATCHLEVEL_VERSION 3 | 72 | #define MD_PATCHLEVEL_VERSION 3 |
73 | 73 | ||
74 | extern int register_md_personality (int p_num, mdk_personality_t *p); | 74 | extern int register_md_personality (struct mdk_personality *p); |
75 | extern int unregister_md_personality (int p_num); | 75 | extern int unregister_md_personality (struct mdk_personality *p); |
76 | extern mdk_thread_t * md_register_thread (void (*run) (mddev_t *mddev), | 76 | extern mdk_thread_t * md_register_thread (void (*run) (mddev_t *mddev), |
77 | mddev_t *mddev, const char *name); | 77 | mddev_t *mddev, const char *name); |
78 | extern void md_unregister_thread (mdk_thread_t *thread); | 78 | extern void md_unregister_thread (mdk_thread_t *thread); |
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index 46629a275ba9..617b9506c760 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h | |||
@@ -18,62 +18,19 @@ | |||
18 | /* and dm-bio-list.h is not under include/linux because.... ??? */ | 18 | /* and dm-bio-list.h is not under include/linux because.... ??? */ |
19 | #include "../../../drivers/md/dm-bio-list.h" | 19 | #include "../../../drivers/md/dm-bio-list.h" |
20 | 20 | ||
21 | #define MD_RESERVED 0UL | ||
22 | #define LINEAR 1UL | ||
23 | #define RAID0 2UL | ||
24 | #define RAID1 3UL | ||
25 | #define RAID5 4UL | ||
26 | #define TRANSLUCENT 5UL | ||
27 | #define HSM 6UL | ||
28 | #define MULTIPATH 7UL | ||
29 | #define RAID6 8UL | ||
30 | #define RAID10 9UL | ||
31 | #define FAULTY 10UL | ||
32 | #define MAX_PERSONALITY 11UL | ||
33 | |||
34 | #define LEVEL_MULTIPATH (-4) | 21 | #define LEVEL_MULTIPATH (-4) |
35 | #define LEVEL_LINEAR (-1) | 22 | #define LEVEL_LINEAR (-1) |
36 | #define LEVEL_FAULTY (-5) | 23 | #define LEVEL_FAULTY (-5) |
37 | 24 | ||
25 | /* we need a value for 'no level specified' and 0 | ||
26 | * means 'raid0', so we need something else. This is | ||
27 | * for internal use only | ||
28 | */ | ||
29 | #define LEVEL_NONE (-1000000) | ||
30 | |||
38 | #define MaxSector (~(sector_t)0) | 31 | #define MaxSector (~(sector_t)0) |
39 | #define MD_THREAD_NAME_MAX 14 | 32 | #define MD_THREAD_NAME_MAX 14 |
40 | 33 | ||
41 | static inline int pers_to_level (int pers) | ||
42 | { | ||
43 | switch (pers) { | ||
44 | case FAULTY: return LEVEL_FAULTY; | ||
45 | case MULTIPATH: return LEVEL_MULTIPATH; | ||
46 | case HSM: return -3; | ||
47 | case TRANSLUCENT: return -2; | ||
48 | case LINEAR: return LEVEL_LINEAR; | ||
49 | case RAID0: return 0; | ||
50 | case RAID1: return 1; | ||
51 | case RAID5: return 5; | ||
52 | case RAID6: return 6; | ||
53 | case RAID10: return 10; | ||
54 | } | ||
55 | BUG(); | ||
56 | return MD_RESERVED; | ||
57 | } | ||
58 | |||
59 | static inline int level_to_pers (int level) | ||
60 | { | ||
61 | switch (level) { | ||
62 | case LEVEL_FAULTY: return FAULTY; | ||
63 | case LEVEL_MULTIPATH: return MULTIPATH; | ||
64 | case -3: return HSM; | ||
65 | case -2: return TRANSLUCENT; | ||
66 | case LEVEL_LINEAR: return LINEAR; | ||
67 | case 0: return RAID0; | ||
68 | case 1: return RAID1; | ||
69 | case 4: | ||
70 | case 5: return RAID5; | ||
71 | case 6: return RAID6; | ||
72 | case 10: return RAID10; | ||
73 | } | ||
74 | return MD_RESERVED; | ||
75 | } | ||
76 | |||
77 | typedef struct mddev_s mddev_t; | 34 | typedef struct mddev_s mddev_t; |
78 | typedef struct mdk_rdev_s mdk_rdev_t; | 35 | typedef struct mdk_rdev_s mdk_rdev_t; |
79 | 36 | ||
@@ -138,14 +95,16 @@ struct mdk_rdev_s | |||
138 | atomic_t read_errors; /* number of consecutive read errors that | 95 | atomic_t read_errors; /* number of consecutive read errors that |
139 | * we have tried to ignore. | 96 | * we have tried to ignore. |
140 | */ | 97 | */ |
98 | atomic_t corrected_errors; /* number of corrected read errors, | ||
99 | * for reporting to userspace and storing | ||
100 | * in superblock. | ||
101 | */ | ||
141 | }; | 102 | }; |
142 | 103 | ||
143 | typedef struct mdk_personality_s mdk_personality_t; | ||
144 | |||
145 | struct mddev_s | 104 | struct mddev_s |
146 | { | 105 | { |
147 | void *private; | 106 | void *private; |
148 | mdk_personality_t *pers; | 107 | struct mdk_personality *pers; |
149 | dev_t unit; | 108 | dev_t unit; |
150 | int md_minor; | 109 | int md_minor; |
151 | struct list_head disks; | 110 | struct list_head disks; |
@@ -164,6 +123,7 @@ struct mddev_s | |||
164 | int chunk_size; | 123 | int chunk_size; |
165 | time_t ctime, utime; | 124 | time_t ctime, utime; |
166 | int level, layout; | 125 | int level, layout; |
126 | char clevel[16]; | ||
167 | int raid_disks; | 127 | int raid_disks; |
168 | int max_disks; | 128 | int max_disks; |
169 | sector_t size; /* used size of component devices */ | 129 | sector_t size; /* used size of component devices */ |
@@ -183,6 +143,11 @@ struct mddev_s | |||
183 | sector_t resync_mismatches; /* count of sectors where | 143 | sector_t resync_mismatches; /* count of sectors where |
184 | * parity/replica mismatch found | 144 | * parity/replica mismatch found |
185 | */ | 145 | */ |
146 | /* if zero, use the system-wide default */ | ||
147 | int sync_speed_min; | ||
148 | int sync_speed_max; | ||
149 | |||
150 | int ok_start_degraded; | ||
186 | /* recovery/resync flags | 151 | /* recovery/resync flags |
187 | * NEEDED: we might need to start a resync/recover | 152 | * NEEDED: we might need to start a resync/recover |
188 | * RUNNING: a thread is running, or about to be started | 153 | * RUNNING: a thread is running, or about to be started |
@@ -265,9 +230,11 @@ static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sect | |||
265 | atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); | 230 | atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); |
266 | } | 231 | } |
267 | 232 | ||
268 | struct mdk_personality_s | 233 | struct mdk_personality |
269 | { | 234 | { |
270 | char *name; | 235 | char *name; |
236 | int level; | ||
237 | struct list_head list; | ||
271 | struct module *owner; | 238 | struct module *owner; |
272 | int (*make_request)(request_queue_t *q, struct bio *bio); | 239 | int (*make_request)(request_queue_t *q, struct bio *bio); |
273 | int (*run)(mddev_t *mddev); | 240 | int (*run)(mddev_t *mddev); |
@@ -305,8 +272,6 @@ static inline char * mdname (mddev_t * mddev) | |||
305 | return mddev->gendisk ? mddev->gendisk->disk_name : "mdX"; | 272 | return mddev->gendisk ? mddev->gendisk->disk_name : "mdX"; |
306 | } | 273 | } |
307 | 274 | ||
308 | extern mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr); | ||
309 | |||
310 | /* | 275 | /* |
311 | * iterates through some rdev ringlist. It's safe to remove the | 276 | * iterates through some rdev ringlist. It's safe to remove the |
312 | * current 'rdev'. Dont touch 'tmp' though. | 277 | * current 'rdev'. Dont touch 'tmp' though. |
@@ -366,5 +331,10 @@ do { \ | |||
366 | __wait_event_lock_irq(wq, condition, lock, cmd); \ | 331 | __wait_event_lock_irq(wq, condition, lock, cmd); \ |
367 | } while (0) | 332 | } while (0) |
368 | 333 | ||
334 | static inline void safe_put_page(struct page *p) | ||
335 | { | ||
336 | if (p) put_page(p); | ||
337 | } | ||
338 | |||
369 | #endif | 339 | #endif |
370 | 340 | ||
diff --git a/include/linux/raid/raid1.h b/include/linux/raid/raid1.h index 292b98f2b408..9d5494aaac0f 100644 --- a/include/linux/raid/raid1.h +++ b/include/linux/raid/raid1.h | |||
@@ -45,6 +45,8 @@ struct r1_private_data_s { | |||
45 | 45 | ||
46 | spinlock_t resync_lock; | 46 | spinlock_t resync_lock; |
47 | int nr_pending; | 47 | int nr_pending; |
48 | int nr_waiting; | ||
49 | int nr_queued; | ||
48 | int barrier; | 50 | int barrier; |
49 | sector_t next_resync; | 51 | sector_t next_resync; |
50 | int fullsync; /* set to 1 if a full sync is needed, | 52 | int fullsync; /* set to 1 if a full sync is needed, |
@@ -52,11 +54,12 @@ struct r1_private_data_s { | |||
52 | * Cleared when a sync completes. | 54 | * Cleared when a sync completes. |
53 | */ | 55 | */ |
54 | 56 | ||
55 | wait_queue_head_t wait_idle; | 57 | wait_queue_head_t wait_barrier; |
56 | wait_queue_head_t wait_resume; | ||
57 | 58 | ||
58 | struct pool_info *poolinfo; | 59 | struct pool_info *poolinfo; |
59 | 60 | ||
61 | struct page *tmppage; | ||
62 | |||
60 | mempool_t *r1bio_pool; | 63 | mempool_t *r1bio_pool; |
61 | mempool_t *r1buf_pool; | 64 | mempool_t *r1buf_pool; |
62 | }; | 65 | }; |
@@ -106,6 +109,13 @@ struct r1bio_s { | |||
106 | /* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/ | 109 | /* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/ |
107 | }; | 110 | }; |
108 | 111 | ||
112 | /* when we get a read error on a read-only array, we redirect to another | ||
113 | * device without failing the first device, or trying to over-write to | ||
114 | * correct the read error. To keep track of bad blocks on a per-bio | ||
115 | * level, we store IO_BLOCKED in the appropriate 'bios' pointer | ||
116 | */ | ||
117 | #define IO_BLOCKED ((struct bio*)1) | ||
118 | |||
109 | /* bits for r1bio.state */ | 119 | /* bits for r1bio.state */ |
110 | #define R1BIO_Uptodate 0 | 120 | #define R1BIO_Uptodate 0 |
111 | #define R1BIO_IsSync 1 | 121 | #define R1BIO_IsSync 1 |
diff --git a/include/linux/raid/raid10.h b/include/linux/raid/raid10.h index 60708789c8f9..b1103298a8c2 100644 --- a/include/linux/raid/raid10.h +++ b/include/linux/raid/raid10.h | |||
@@ -35,18 +35,26 @@ struct r10_private_data_s { | |||
35 | sector_t chunk_mask; | 35 | sector_t chunk_mask; |
36 | 36 | ||
37 | struct list_head retry_list; | 37 | struct list_head retry_list; |
38 | /* for use when syncing mirrors: */ | 38 | /* queue pending writes and submit them on unplug */ |
39 | struct bio_list pending_bio_list; | ||
40 | |||
39 | 41 | ||
40 | spinlock_t resync_lock; | 42 | spinlock_t resync_lock; |
41 | int nr_pending; | 43 | int nr_pending; |
44 | int nr_waiting; | ||
45 | int nr_queued; | ||
42 | int barrier; | 46 | int barrier; |
43 | sector_t next_resync; | 47 | sector_t next_resync; |
48 | int fullsync; /* set to 1 if a full sync is needed, | ||
49 | * (fresh device added). | ||
50 | * Cleared when a sync completes. | ||
51 | */ | ||
44 | 52 | ||
45 | wait_queue_head_t wait_idle; | 53 | wait_queue_head_t wait_barrier; |
46 | wait_queue_head_t wait_resume; | ||
47 | 54 | ||
48 | mempool_t *r10bio_pool; | 55 | mempool_t *r10bio_pool; |
49 | mempool_t *r10buf_pool; | 56 | mempool_t *r10buf_pool; |
57 | struct page *tmppage; | ||
50 | }; | 58 | }; |
51 | 59 | ||
52 | typedef struct r10_private_data_s conf_t; | 60 | typedef struct r10_private_data_s conf_t; |
@@ -96,8 +104,16 @@ struct r10bio_s { | |||
96 | } devs[0]; | 104 | } devs[0]; |
97 | }; | 105 | }; |
98 | 106 | ||
107 | /* when we get a read error on a read-only array, we redirect to another | ||
108 | * device without failing the first device, or trying to over-write to | ||
109 | * correct the read error. To keep track of bad blocks on a per-bio | ||
110 | * level, we store IO_BLOCKED in the appropriate 'bios' pointer | ||
111 | */ | ||
112 | #define IO_BLOCKED ((struct bio*)1) | ||
113 | |||
99 | /* bits for r10bio.state */ | 114 | /* bits for r10bio.state */ |
100 | #define R10BIO_Uptodate 0 | 115 | #define R10BIO_Uptodate 0 |
101 | #define R10BIO_IsSync 1 | 116 | #define R10BIO_IsSync 1 |
102 | #define R10BIO_IsRecover 2 | 117 | #define R10BIO_IsRecover 2 |
118 | #define R10BIO_Degraded 3 | ||
103 | #endif | 119 | #endif |
diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h index f025ba6fb14c..394da8207b34 100644 --- a/include/linux/raid/raid5.h +++ b/include/linux/raid/raid5.h | |||
@@ -126,7 +126,7 @@ | |||
126 | */ | 126 | */ |
127 | 127 | ||
128 | struct stripe_head { | 128 | struct stripe_head { |
129 | struct stripe_head *hash_next, **hash_pprev; /* hash pointers */ | 129 | struct hlist_node hash; |
130 | struct list_head lru; /* inactive_list or handle_list */ | 130 | struct list_head lru; /* inactive_list or handle_list */ |
131 | struct raid5_private_data *raid_conf; | 131 | struct raid5_private_data *raid_conf; |
132 | sector_t sector; /* sector of this row */ | 132 | sector_t sector; /* sector of this row */ |
@@ -152,7 +152,6 @@ struct stripe_head { | |||
152 | #define R5_Insync 3 /* rdev && rdev->in_sync at start */ | 152 | #define R5_Insync 3 /* rdev && rdev->in_sync at start */ |
153 | #define R5_Wantread 4 /* want to schedule a read */ | 153 | #define R5_Wantread 4 /* want to schedule a read */ |
154 | #define R5_Wantwrite 5 | 154 | #define R5_Wantwrite 5 |
155 | #define R5_Syncio 6 /* this io need to be accounted as resync io */ | ||
156 | #define R5_Overlap 7 /* There is a pending overlapping request on this block */ | 155 | #define R5_Overlap 7 /* There is a pending overlapping request on this block */ |
157 | #define R5_ReadError 8 /* seen a read error here recently */ | 156 | #define R5_ReadError 8 /* seen a read error here recently */ |
158 | #define R5_ReWrite 9 /* have tried to over-write the readerror */ | 157 | #define R5_ReWrite 9 /* have tried to over-write the readerror */ |
@@ -205,7 +204,7 @@ struct disk_info { | |||
205 | }; | 204 | }; |
206 | 205 | ||
207 | struct raid5_private_data { | 206 | struct raid5_private_data { |
208 | struct stripe_head **stripe_hashtbl; | 207 | struct hlist_head *stripe_hashtbl; |
209 | mddev_t *mddev; | 208 | mddev_t *mddev; |
210 | struct disk_info *spare; | 209 | struct disk_info *spare; |
211 | int chunk_size, level, algorithm; | 210 | int chunk_size, level, algorithm; |
@@ -228,6 +227,8 @@ struct raid5_private_data { | |||
228 | * Cleared when a sync completes. | 227 | * Cleared when a sync completes. |
229 | */ | 228 | */ |
230 | 229 | ||
230 | struct page *spare_page; /* Used when checking P/Q in raid6 */ | ||
231 | |||
231 | /* | 232 | /* |
232 | * Free stripes pool | 233 | * Free stripes pool |
233 | */ | 234 | */ |