diff options
Diffstat (limited to 'include/linux/raid/md_k.h')
-rw-r--r-- | include/linux/raid/md_k.h | 369 |
1 files changed, 369 insertions, 0 deletions
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h new file mode 100644 index 000000000000..c9a0d4013be7 --- /dev/null +++ b/include/linux/raid/md_k.h | |||
@@ -0,0 +1,369 @@ | |||
1 | /* | ||
2 | md_k.h : kernel internal structure of the Linux MD driver | ||
3 | Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman | ||
4 | |||
5 | This program is free software; you can redistribute it and/or modify | ||
6 | it under the terms of the GNU General Public License as published by | ||
7 | the Free Software Foundation; either version 2, or (at your option) | ||
8 | any later version. | ||
9 | |||
10 | You should have received a copy of the GNU General Public License | ||
11 | (for example /usr/src/linux/COPYING); if not, write to the Free | ||
12 | Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
13 | */ | ||
14 | |||
15 | #ifndef _MD_K_H | ||
16 | #define _MD_K_H | ||
17 | |||
18 | #define MD_RESERVED 0UL | ||
19 | #define LINEAR 1UL | ||
20 | #define RAID0 2UL | ||
21 | #define RAID1 3UL | ||
22 | #define RAID5 4UL | ||
23 | #define TRANSLUCENT 5UL | ||
24 | #define HSM 6UL | ||
25 | #define MULTIPATH 7UL | ||
26 | #define RAID6 8UL | ||
27 | #define RAID10 9UL | ||
28 | #define FAULTY 10UL | ||
29 | #define MAX_PERSONALITY 11UL | ||
30 | |||
31 | #define LEVEL_MULTIPATH (-4) | ||
32 | #define LEVEL_LINEAR (-1) | ||
33 | #define LEVEL_FAULTY (-5) | ||
34 | |||
35 | #define MaxSector (~(sector_t)0) | ||
36 | #define MD_THREAD_NAME_MAX 14 | ||
37 | |||
38 | static inline int pers_to_level (int pers) | ||
39 | { | ||
40 | switch (pers) { | ||
41 | case FAULTY: return LEVEL_FAULTY; | ||
42 | case MULTIPATH: return LEVEL_MULTIPATH; | ||
43 | case HSM: return -3; | ||
44 | case TRANSLUCENT: return -2; | ||
45 | case LINEAR: return LEVEL_LINEAR; | ||
46 | case RAID0: return 0; | ||
47 | case RAID1: return 1; | ||
48 | case RAID5: return 5; | ||
49 | case RAID6: return 6; | ||
50 | case RAID10: return 10; | ||
51 | } | ||
52 | BUG(); | ||
53 | return MD_RESERVED; | ||
54 | } | ||
55 | |||
56 | static inline int level_to_pers (int level) | ||
57 | { | ||
58 | switch (level) { | ||
59 | case LEVEL_FAULTY: return FAULTY; | ||
60 | case LEVEL_MULTIPATH: return MULTIPATH; | ||
61 | case -3: return HSM; | ||
62 | case -2: return TRANSLUCENT; | ||
63 | case LEVEL_LINEAR: return LINEAR; | ||
64 | case 0: return RAID0; | ||
65 | case 1: return RAID1; | ||
66 | case 4: | ||
67 | case 5: return RAID5; | ||
68 | case 6: return RAID6; | ||
69 | case 10: return RAID10; | ||
70 | } | ||
71 | return MD_RESERVED; | ||
72 | } | ||
73 | |||
74 | typedef struct mddev_s mddev_t; | ||
75 | typedef struct mdk_rdev_s mdk_rdev_t; | ||
76 | |||
77 | #define MAX_MD_DEVS 256 /* Max number of md dev */ | ||
78 | |||
79 | /* | ||
80 | * options passed in raidrun: | ||
81 | */ | ||
82 | |||
83 | #define MAX_CHUNK_SIZE (4096*1024) | ||
84 | |||
85 | /* | ||
86 | * default readahead | ||
87 | */ | ||
88 | |||
89 | static inline int disk_faulty(mdp_disk_t * d) | ||
90 | { | ||
91 | return d->state & (1 << MD_DISK_FAULTY); | ||
92 | } | ||
93 | |||
94 | static inline int disk_active(mdp_disk_t * d) | ||
95 | { | ||
96 | return d->state & (1 << MD_DISK_ACTIVE); | ||
97 | } | ||
98 | |||
99 | static inline int disk_sync(mdp_disk_t * d) | ||
100 | { | ||
101 | return d->state & (1 << MD_DISK_SYNC); | ||
102 | } | ||
103 | |||
104 | static inline int disk_spare(mdp_disk_t * d) | ||
105 | { | ||
106 | return !disk_sync(d) && !disk_active(d) && !disk_faulty(d); | ||
107 | } | ||
108 | |||
109 | static inline int disk_removed(mdp_disk_t * d) | ||
110 | { | ||
111 | return d->state & (1 << MD_DISK_REMOVED); | ||
112 | } | ||
113 | |||
114 | static inline void mark_disk_faulty(mdp_disk_t * d) | ||
115 | { | ||
116 | d->state |= (1 << MD_DISK_FAULTY); | ||
117 | } | ||
118 | |||
119 | static inline void mark_disk_active(mdp_disk_t * d) | ||
120 | { | ||
121 | d->state |= (1 << MD_DISK_ACTIVE); | ||
122 | } | ||
123 | |||
124 | static inline void mark_disk_sync(mdp_disk_t * d) | ||
125 | { | ||
126 | d->state |= (1 << MD_DISK_SYNC); | ||
127 | } | ||
128 | |||
129 | static inline void mark_disk_spare(mdp_disk_t * d) | ||
130 | { | ||
131 | d->state = 0; | ||
132 | } | ||
133 | |||
134 | static inline void mark_disk_removed(mdp_disk_t * d) | ||
135 | { | ||
136 | d->state = (1 << MD_DISK_FAULTY) | (1 << MD_DISK_REMOVED); | ||
137 | } | ||
138 | |||
139 | static inline void mark_disk_inactive(mdp_disk_t * d) | ||
140 | { | ||
141 | d->state &= ~(1 << MD_DISK_ACTIVE); | ||
142 | } | ||
143 | |||
144 | static inline void mark_disk_nonsync(mdp_disk_t * d) | ||
145 | { | ||
146 | d->state &= ~(1 << MD_DISK_SYNC); | ||
147 | } | ||
148 | |||
149 | /* | ||
150 | * MD's 'extended' device | ||
151 | */ | ||
152 | struct mdk_rdev_s | ||
153 | { | ||
154 | struct list_head same_set; /* RAID devices within the same set */ | ||
155 | |||
156 | sector_t size; /* Device size (in blocks) */ | ||
157 | mddev_t *mddev; /* RAID array if running */ | ||
158 | unsigned long last_events; /* IO event timestamp */ | ||
159 | |||
160 | struct block_device *bdev; /* block device handle */ | ||
161 | |||
162 | struct page *sb_page; | ||
163 | int sb_loaded; | ||
164 | sector_t data_offset; /* start of data in array */ | ||
165 | sector_t sb_offset; | ||
166 | int preferred_minor; /* autorun support */ | ||
167 | |||
168 | /* A device can be in one of three states based on two flags: | ||
169 | * Not working: faulty==1 in_sync==0 | ||
170 | * Fully working: faulty==0 in_sync==1 | ||
171 | * Working, but not | ||
172 | * in sync with array | ||
173 | * faulty==0 in_sync==0 | ||
174 | * | ||
175 | * It can never have faulty==1, in_sync==1 | ||
176 | * This reduces the burden of testing multiple flags in many cases | ||
177 | */ | ||
178 | int faulty; /* if faulty do not issue IO requests */ | ||
179 | int in_sync; /* device is a full member of the array */ | ||
180 | |||
181 | int desc_nr; /* descriptor index in the superblock */ | ||
182 | int raid_disk; /* role of device in array */ | ||
183 | |||
184 | atomic_t nr_pending; /* number of pending requests. | ||
185 | * only maintained for arrays that | ||
186 | * support hot removal | ||
187 | */ | ||
188 | }; | ||
189 | |||
190 | typedef struct mdk_personality_s mdk_personality_t; | ||
191 | |||
192 | struct mddev_s | ||
193 | { | ||
194 | void *private; | ||
195 | mdk_personality_t *pers; | ||
196 | dev_t unit; | ||
197 | int md_minor; | ||
198 | struct list_head disks; | ||
199 | int sb_dirty; | ||
200 | int ro; | ||
201 | |||
202 | struct gendisk *gendisk; | ||
203 | |||
204 | /* Superblock information */ | ||
205 | int major_version, | ||
206 | minor_version, | ||
207 | patch_version; | ||
208 | int persistent; | ||
209 | int chunk_size; | ||
210 | time_t ctime, utime; | ||
211 | int level, layout; | ||
212 | int raid_disks; | ||
213 | int max_disks; | ||
214 | sector_t size; /* used size of component devices */ | ||
215 | sector_t array_size; /* exported array size */ | ||
216 | __u64 events; | ||
217 | |||
218 | char uuid[16]; | ||
219 | |||
220 | struct mdk_thread_s *thread; /* management thread */ | ||
221 | struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */ | ||
222 | sector_t curr_resync; /* blocks scheduled */ | ||
223 | unsigned long resync_mark; /* a recent timestamp */ | ||
224 | sector_t resync_mark_cnt;/* blocks written at resync_mark */ | ||
225 | |||
226 | sector_t resync_max_sectors; /* may be set by personality */ | ||
227 | /* recovery/resync flags | ||
228 | * NEEDED: we might need to start a resync/recover | ||
229 | * RUNNING: a thread is running, or about to be started | ||
230 | * SYNC: actually doing a resync, not a recovery | ||
231 | * ERR: and IO error was detected - abort the resync/recovery | ||
232 | * INTR: someone requested a (clean) early abort. | ||
233 | * DONE: thread is done and is waiting to be reaped | ||
234 | */ | ||
235 | #define MD_RECOVERY_RUNNING 0 | ||
236 | #define MD_RECOVERY_SYNC 1 | ||
237 | #define MD_RECOVERY_ERR 2 | ||
238 | #define MD_RECOVERY_INTR 3 | ||
239 | #define MD_RECOVERY_DONE 4 | ||
240 | #define MD_RECOVERY_NEEDED 5 | ||
241 | unsigned long recovery; | ||
242 | |||
243 | int in_sync; /* know to not need resync */ | ||
244 | struct semaphore reconfig_sem; | ||
245 | atomic_t active; | ||
246 | |||
247 | int changed; /* true if we might need to reread partition info */ | ||
248 | int degraded; /* whether md should consider | ||
249 | * adding a spare | ||
250 | */ | ||
251 | |||
252 | atomic_t recovery_active; /* blocks scheduled, but not written */ | ||
253 | wait_queue_head_t recovery_wait; | ||
254 | sector_t recovery_cp; | ||
255 | unsigned int safemode; /* if set, update "clean" superblock | ||
256 | * when no writes pending. | ||
257 | */ | ||
258 | unsigned int safemode_delay; | ||
259 | struct timer_list safemode_timer; | ||
260 | atomic_t writes_pending; | ||
261 | request_queue_t *queue; /* for plugging ... */ | ||
262 | |||
263 | struct list_head all_mddevs; | ||
264 | }; | ||
265 | |||
266 | |||
267 | static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev) | ||
268 | { | ||
269 | int faulty = rdev->faulty; | ||
270 | if (atomic_dec_and_test(&rdev->nr_pending) && faulty) | ||
271 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | ||
272 | } | ||
273 | |||
274 | static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) | ||
275 | { | ||
276 | atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); | ||
277 | } | ||
278 | |||
279 | struct mdk_personality_s | ||
280 | { | ||
281 | char *name; | ||
282 | struct module *owner; | ||
283 | int (*make_request)(request_queue_t *q, struct bio *bio); | ||
284 | int (*run)(mddev_t *mddev); | ||
285 | int (*stop)(mddev_t *mddev); | ||
286 | void (*status)(struct seq_file *seq, mddev_t *mddev); | ||
287 | /* error_handler must set ->faulty and clear ->in_sync | ||
288 | * if appropriate, and should abort recovery if needed | ||
289 | */ | ||
290 | void (*error_handler)(mddev_t *mddev, mdk_rdev_t *rdev); | ||
291 | int (*hot_add_disk) (mddev_t *mddev, mdk_rdev_t *rdev); | ||
292 | int (*hot_remove_disk) (mddev_t *mddev, int number); | ||
293 | int (*spare_active) (mddev_t *mddev); | ||
294 | int (*sync_request)(mddev_t *mddev, sector_t sector_nr, int go_faster); | ||
295 | int (*resize) (mddev_t *mddev, sector_t sectors); | ||
296 | int (*reshape) (mddev_t *mddev, int raid_disks); | ||
297 | int (*reconfig) (mddev_t *mddev, int layout, int chunk_size); | ||
298 | }; | ||
299 | |||
300 | |||
301 | static inline char * mdname (mddev_t * mddev) | ||
302 | { | ||
303 | return mddev->gendisk ? mddev->gendisk->disk_name : "mdX"; | ||
304 | } | ||
305 | |||
306 | extern mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr); | ||
307 | |||
308 | /* | ||
309 | * iterates through some rdev ringlist. It's safe to remove the | ||
310 | * current 'rdev'. Dont touch 'tmp' though. | ||
311 | */ | ||
312 | #define ITERATE_RDEV_GENERIC(head,rdev,tmp) \ | ||
313 | \ | ||
314 | for ((tmp) = (head).next; \ | ||
315 | (rdev) = (list_entry((tmp), mdk_rdev_t, same_set)), \ | ||
316 | (tmp) = (tmp)->next, (tmp)->prev != &(head) \ | ||
317 | ; ) | ||
318 | /* | ||
319 | * iterates through the 'same array disks' ringlist | ||
320 | */ | ||
321 | #define ITERATE_RDEV(mddev,rdev,tmp) \ | ||
322 | ITERATE_RDEV_GENERIC((mddev)->disks,rdev,tmp) | ||
323 | |||
324 | /* | ||
325 | * Iterates through 'pending RAID disks' | ||
326 | */ | ||
327 | #define ITERATE_RDEV_PENDING(rdev,tmp) \ | ||
328 | ITERATE_RDEV_GENERIC(pending_raid_disks,rdev,tmp) | ||
329 | |||
330 | typedef struct mdk_thread_s { | ||
331 | void (*run) (mddev_t *mddev); | ||
332 | mddev_t *mddev; | ||
333 | wait_queue_head_t wqueue; | ||
334 | unsigned long flags; | ||
335 | struct completion *event; | ||
336 | struct task_struct *tsk; | ||
337 | const char *name; | ||
338 | } mdk_thread_t; | ||
339 | |||
340 | #define THREAD_WAKEUP 0 | ||
341 | |||
342 | #define __wait_event_lock_irq(wq, condition, lock, cmd) \ | ||
343 | do { \ | ||
344 | wait_queue_t __wait; \ | ||
345 | init_waitqueue_entry(&__wait, current); \ | ||
346 | \ | ||
347 | add_wait_queue(&wq, &__wait); \ | ||
348 | for (;;) { \ | ||
349 | set_current_state(TASK_UNINTERRUPTIBLE); \ | ||
350 | if (condition) \ | ||
351 | break; \ | ||
352 | spin_unlock_irq(&lock); \ | ||
353 | cmd; \ | ||
354 | schedule(); \ | ||
355 | spin_lock_irq(&lock); \ | ||
356 | } \ | ||
357 | current->state = TASK_RUNNING; \ | ||
358 | remove_wait_queue(&wq, &__wait); \ | ||
359 | } while (0) | ||
360 | |||
361 | #define wait_event_lock_irq(wq, condition, lock, cmd) \ | ||
362 | do { \ | ||
363 | if (condition) \ | ||
364 | break; \ | ||
365 | __wait_event_lock_irq(wq, condition, lock, cmd); \ | ||
366 | } while (0) | ||
367 | |||
368 | #endif | ||
369 | |||