aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/raid5.h')
-rw-r--r--drivers/md/raid5.h474
1 files changed, 474 insertions, 0 deletions
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
new file mode 100644
index 000000000000..52ba99954dec
--- /dev/null
+++ b/drivers/md/raid5.h
@@ -0,0 +1,474 @@
1#ifndef _RAID5_H
2#define _RAID5_H
3
4#include <linux/raid/xor.h>
5
6/*
7 *
8 * Each stripe contains one buffer per disc. Each buffer can be in
9 * one of a number of states stored in "flags". Changes between
10 * these states happen *almost* exclusively under a per-stripe
11 * spinlock. Some very specific changes can happen in bi_end_io, and
12 * these are not protected by the spin lock.
13 *
14 * The flag bits that are used to represent these states are:
15 * R5_UPTODATE and R5_LOCKED
16 *
17 * State Empty == !UPTODATE, !LOCK
18 * We have no data, and there is no active request
19 * State Want == !UPTODATE, LOCK
20 * A read request is being submitted for this block
21 * State Dirty == UPTODATE, LOCK
22 * Some new data is in this buffer, and it is being written out
23 * State Clean == UPTODATE, !LOCK
24 * We have valid data which is the same as on disc
25 *
26 * The possible state transitions are:
27 *
28 * Empty -> Want - on read or write to get old data for parity calc
29 * Empty -> Dirty - on compute_parity to satisfy write/sync request.(RECONSTRUCT_WRITE)
30 * Empty -> Clean - on compute_block when computing a block for failed drive
31 * Want -> Empty - on failed read
32 * Want -> Clean - on successful completion of read request
33 * Dirty -> Clean - on successful completion of write request
34 * Dirty -> Clean - on failed write
35 * Clean -> Dirty - on compute_parity to satisfy write/sync (RECONSTRUCT or RMW)
36 *
37 * The Want->Empty, Want->Clean, Dirty->Clean, transitions
38 * all happen in b_end_io at interrupt time.
39 * Each sets the Uptodate bit before releasing the Lock bit.
40 * This leaves one multi-stage transition:
41 * Want->Dirty->Clean
42 * This is safe because thinking that a Clean buffer is actually dirty
43 * will at worst delay some action, and the stripe will be scheduled
44 * for attention after the transition is complete.
45 *
46 * There is one possibility that is not covered by these states. That
47 * is if one drive has failed and there is a spare being rebuilt. We
48 * can't distinguish between a clean block that has been generated
49 * from parity calculations, and a clean block that has been
50 * successfully written to the spare ( or to parity when resyncing).
51 * To distingush these states we have a stripe bit STRIPE_INSYNC that
52 * is set whenever a write is scheduled to the spare, or to the parity
53 * disc if there is no spare. A sync request clears this bit, and
54 * when we find it set with no buffers locked, we know the sync is
55 * complete.
56 *
57 * Buffers for the md device that arrive via make_request are attached
58 * to the appropriate stripe in one of two lists linked on b_reqnext.
59 * One list (bh_read) for read requests, one (bh_write) for write.
60 * There should never be more than one buffer on the two lists
61 * together, but we are not guaranteed of that so we allow for more.
62 *
63 * If a buffer is on the read list when the associated cache buffer is
64 * Uptodate, the data is copied into the read buffer and it's b_end_io
65 * routine is called. This may happen in the end_request routine only
66 * if the buffer has just successfully been read. end_request should
67 * remove the buffers from the list and then set the Uptodate bit on
68 * the buffer. Other threads may do this only if they first check
69 * that the Uptodate bit is set. Once they have checked that they may
70 * take buffers off the read queue.
71 *
72 * When a buffer on the write list is committed for write it is copied
73 * into the cache buffer, which is then marked dirty, and moved onto a
74 * third list, the written list (bh_written). Once both the parity
75 * block and the cached buffer are successfully written, any buffer on
76 * a written list can be returned with b_end_io.
77 *
78 * The write list and read list both act as fifos. The read list is
79 * protected by the device_lock. The write and written lists are
80 * protected by the stripe lock. The device_lock, which can be
81 * claimed while the stipe lock is held, is only for list
82 * manipulations and will only be held for a very short time. It can
83 * be claimed from interrupts.
84 *
85 *
86 * Stripes in the stripe cache can be on one of two lists (or on
87 * neither). The "inactive_list" contains stripes which are not
88 * currently being used for any request. They can freely be reused
89 * for another stripe. The "handle_list" contains stripes that need
90 * to be handled in some way. Both of these are fifo queues. Each
91 * stripe is also (potentially) linked to a hash bucket in the hash
92 * table so that it can be found by sector number. Stripes that are
93 * not hashed must be on the inactive_list, and will normally be at
94 * the front. All stripes start life this way.
95 *
96 * The inactive_list, handle_list and hash bucket lists are all protected by the
97 * device_lock.
98 * - stripes on the inactive_list never have their stripe_lock held.
99 * - stripes have a reference counter. If count==0, they are on a list.
100 * - If a stripe might need handling, STRIPE_HANDLE is set.
101 * - When refcount reaches zero, then if STRIPE_HANDLE it is put on
102 * handle_list else inactive_list
103 *
104 * This, combined with the fact that STRIPE_HANDLE is only ever
105 * cleared while a stripe has a non-zero count means that if the
106 * refcount is 0 and STRIPE_HANDLE is set, then it is on the
107 * handle_list and if recount is 0 and STRIPE_HANDLE is not set, then
108 * the stripe is on inactive_list.
109 *
110 * The possible transitions are:
111 * activate an unhashed/inactive stripe (get_active_stripe())
112 * lockdev check-hash unlink-stripe cnt++ clean-stripe hash-stripe unlockdev
113 * activate a hashed, possibly active stripe (get_active_stripe())
114 * lockdev check-hash if(!cnt++)unlink-stripe unlockdev
115 * attach a request to an active stripe (add_stripe_bh())
116 * lockdev attach-buffer unlockdev
117 * handle a stripe (handle_stripe())
118 * lockstripe clrSTRIPE_HANDLE ...
119 * (lockdev check-buffers unlockdev) ..
120 * change-state ..
121 * record io/ops needed unlockstripe schedule io/ops
122 * release an active stripe (release_stripe())
123 * lockdev if (!--cnt) { if STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev
124 *
125 * The refcount counts each thread that have activated the stripe,
126 * plus raid5d if it is handling it, plus one for each active request
127 * on a cached buffer, and plus one if the stripe is undergoing stripe
128 * operations.
129 *
130 * Stripe operations are performed outside the stripe lock,
131 * the stripe operations are:
132 * -copying data between the stripe cache and user application buffers
133 * -computing blocks to save a disk access, or to recover a missing block
134 * -updating the parity on a write operation (reconstruct write and
135 * read-modify-write)
136 * -checking parity correctness
137 * -running i/o to disk
138 * These operations are carried out by raid5_run_ops which uses the async_tx
139 * api to (optionally) offload operations to dedicated hardware engines.
140 * When requesting an operation handle_stripe sets the pending bit for the
141 * operation and increments the count. raid5_run_ops is then run whenever
142 * the count is non-zero.
143 * There are some critical dependencies between the operations that prevent some
144 * from being requested while another is in flight.
145 * 1/ Parity check operations destroy the in cache version of the parity block,
146 * so we prevent parity dependent operations like writes and compute_blocks
147 * from starting while a check is in progress. Some dma engines can perform
148 * the check without damaging the parity block, in these cases the parity
149 * block is re-marked up to date (assuming the check was successful) and is
150 * not re-read from disk.
151 * 2/ When a write operation is requested we immediately lock the affected
152 * blocks, and mark them as not up to date. This causes new read requests
153 * to be held off, as well as parity checks and compute block operations.
154 * 3/ Once a compute block operation has been requested handle_stripe treats
155 * that block as if it is up to date. raid5_run_ops guaruntees that any
156 * operation that is dependent on the compute block result is initiated after
157 * the compute block completes.
158 */
159
160/*
161 * Operations state - intermediate states that are visible outside of sh->lock
162 * In general _idle indicates nothing is running, _run indicates a data
163 * processing operation is active, and _result means the data processing result
164 * is stable and can be acted upon. For simple operations like biofill and
165 * compute that only have an _idle and _run state they are indicated with
166 * sh->state flags (STRIPE_BIOFILL_RUN and STRIPE_COMPUTE_RUN)
167 */
168/**
169 * enum check_states - handles syncing / repairing a stripe
170 * @check_state_idle - check operations are quiesced
171 * @check_state_run - check operation is running
172 * @check_state_result - set outside lock when check result is valid
173 * @check_state_compute_run - check failed and we are repairing
174 * @check_state_compute_result - set outside lock when compute result is valid
175 */
176enum check_states {
177 check_state_idle = 0,
178 check_state_run, /* parity check */
179 check_state_check_result,
180 check_state_compute_run, /* parity repair */
181 check_state_compute_result,
182};
183
184/**
185 * enum reconstruct_states - handles writing or expanding a stripe
186 */
187enum reconstruct_states {
188 reconstruct_state_idle = 0,
189 reconstruct_state_prexor_drain_run, /* prexor-write */
190 reconstruct_state_drain_run, /* write */
191 reconstruct_state_run, /* expand */
192 reconstruct_state_prexor_drain_result,
193 reconstruct_state_drain_result,
194 reconstruct_state_result,
195};
196
197struct stripe_head {
198 struct hlist_node hash;
199 struct list_head lru; /* inactive_list or handle_list */
200 struct raid5_private_data *raid_conf;
201 short generation; /* increments with every
202 * reshape */
203 sector_t sector; /* sector of this row */
204 short pd_idx; /* parity disk index */
205 short qd_idx; /* 'Q' disk index for raid6 */
206 short ddf_layout;/* use DDF ordering to calculate Q */
207 unsigned long state; /* state flags */
208 atomic_t count; /* nr of active thread/requests */
209 spinlock_t lock;
210 int bm_seq; /* sequence number for bitmap flushes */
211 int disks; /* disks in stripe */
212 enum check_states check_state;
213 enum reconstruct_states reconstruct_state;
214 /* stripe_operations
215 * @target - STRIPE_OP_COMPUTE_BLK target
216 */
217 struct stripe_operations {
218 int target;
219 u32 zero_sum_result;
220 } ops;
221 struct r5dev {
222 struct bio req;
223 struct bio_vec vec;
224 struct page *page;
225 struct bio *toread, *read, *towrite, *written;
226 sector_t sector; /* sector of this page */
227 unsigned long flags;
228 } dev[1]; /* allocated with extra space depending of RAID geometry */
229};
230
231/* stripe_head_state - collects and tracks the dynamic state of a stripe_head
232 * for handle_stripe. It is only valid under spin_lock(sh->lock);
233 */
234struct stripe_head_state {
235 int syncing, expanding, expanded;
236 int locked, uptodate, to_read, to_write, failed, written;
237 int to_fill, compute, req_compute, non_overwrite;
238 int failed_num;
239 unsigned long ops_request;
240};
241
242/* r6_state - extra state data only relevant to r6 */
243struct r6_state {
244 int p_failed, q_failed, failed_num[2];
245};
246
247/* Flags */
248#define R5_UPTODATE 0 /* page contains current data */
249#define R5_LOCKED 1 /* IO has been submitted on "req" */
250#define R5_OVERWRITE 2 /* towrite covers whole page */
251/* and some that are internal to handle_stripe */
252#define R5_Insync 3 /* rdev && rdev->in_sync at start */
253#define R5_Wantread 4 /* want to schedule a read */
254#define R5_Wantwrite 5
255#define R5_Overlap 7 /* There is a pending overlapping request on this block */
256#define R5_ReadError 8 /* seen a read error here recently */
257#define R5_ReWrite 9 /* have tried to over-write the readerror */
258
259#define R5_Expanded 10 /* This block now has post-expand data */
260#define R5_Wantcompute 11 /* compute_block in progress treat as
261 * uptodate
262 */
263#define R5_Wantfill 12 /* dev->toread contains a bio that needs
264 * filling
265 */
266#define R5_Wantdrain 13 /* dev->towrite needs to be drained */
267/*
268 * Write method
269 */
270#define RECONSTRUCT_WRITE 1
271#define READ_MODIFY_WRITE 2
272/* not a write method, but a compute_parity mode */
273#define CHECK_PARITY 3
274/* Additional compute_parity mode -- updates the parity w/o LOCKING */
275#define UPDATE_PARITY 4
276
277/*
278 * Stripe state
279 */
280#define STRIPE_HANDLE 2
281#define STRIPE_SYNCING 3
282#define STRIPE_INSYNC 4
283#define STRIPE_PREREAD_ACTIVE 5
284#define STRIPE_DELAYED 6
285#define STRIPE_DEGRADED 7
286#define STRIPE_BIT_DELAY 8
287#define STRIPE_EXPANDING 9
288#define STRIPE_EXPAND_SOURCE 10
289#define STRIPE_EXPAND_READY 11
290#define STRIPE_IO_STARTED 12 /* do not count towards 'bypass_count' */
291#define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */
292#define STRIPE_BIOFILL_RUN 14
293#define STRIPE_COMPUTE_RUN 15
294/*
295 * Operation request flags
296 */
297#define STRIPE_OP_BIOFILL 0
298#define STRIPE_OP_COMPUTE_BLK 1
299#define STRIPE_OP_PREXOR 2
300#define STRIPE_OP_BIODRAIN 3
301#define STRIPE_OP_POSTXOR 4
302#define STRIPE_OP_CHECK 5
303
304/*
305 * Plugging:
306 *
307 * To improve write throughput, we need to delay the handling of some
308 * stripes until there has been a chance that several write requests
309 * for the one stripe have all been collected.
310 * In particular, any write request that would require pre-reading
311 * is put on a "delayed" queue until there are no stripes currently
312 * in a pre-read phase. Further, if the "delayed" queue is empty when
313 * a stripe is put on it then we "plug" the queue and do not process it
314 * until an unplug call is made. (the unplug_io_fn() is called).
315 *
316 * When preread is initiated on a stripe, we set PREREAD_ACTIVE and add
317 * it to the count of prereading stripes.
318 * When write is initiated, or the stripe refcnt == 0 (just in case) we
319 * clear the PREREAD_ACTIVE flag and decrement the count
320 * Whenever the 'handle' queue is empty and the device is not plugged, we
321 * move any strips from delayed to handle and clear the DELAYED flag and set
322 * PREREAD_ACTIVE.
323 * In stripe_handle, if we find pre-reading is necessary, we do it if
324 * PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue.
325 * HANDLE gets cleared if stripe_handle leave nothing locked.
326 */
327
328
329struct disk_info {
330 mdk_rdev_t *rdev;
331};
332
333struct raid5_private_data {
334 struct hlist_head *stripe_hashtbl;
335 mddev_t *mddev;
336 struct disk_info *spare;
337 int chunk_size, level, algorithm;
338 int max_degraded;
339 int raid_disks;
340 int max_nr_stripes;
341
342 /* reshape_progress is the leading edge of a 'reshape'
343 * It has value MaxSector when no reshape is happening
344 * If delta_disks < 0, it is the last sector we started work on,
345 * else is it the next sector to work on.
346 */
347 sector_t reshape_progress;
348 /* reshape_safe is the trailing edge of a reshape. We know that
349 * before (or after) this address, all reshape has completed.
350 */
351 sector_t reshape_safe;
352 int previous_raid_disks;
353 int prev_chunk, prev_algo;
354 short generation; /* increments with every reshape */
355 unsigned long reshape_checkpoint; /* Time we last updated
356 * metadata */
357
358 struct list_head handle_list; /* stripes needing handling */
359 struct list_head hold_list; /* preread ready stripes */
360 struct list_head delayed_list; /* stripes that have plugged requests */
361 struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */
362 struct bio *retry_read_aligned; /* currently retrying aligned bios */
363 struct bio *retry_read_aligned_list; /* aligned bios retry list */
364 atomic_t preread_active_stripes; /* stripes with scheduled io */
365 atomic_t active_aligned_reads;
366 atomic_t pending_full_writes; /* full write backlog */
367 int bypass_count; /* bypassed prereads */
368 int bypass_threshold; /* preread nice */
369 struct list_head *last_hold; /* detect hold_list promotions */
370
371 atomic_t reshape_stripes; /* stripes with pending writes for reshape */
372 /* unfortunately we need two cache names as we temporarily have
373 * two caches.
374 */
375 int active_name;
376 char cache_name[2][20];
377 struct kmem_cache *slab_cache; /* for allocating stripes */
378
379 int seq_flush, seq_write;
380 int quiesce;
381
382 int fullsync; /* set to 1 if a full sync is needed,
383 * (fresh device added).
384 * Cleared when a sync completes.
385 */
386
387 struct page *spare_page; /* Used when checking P/Q in raid6 */
388
389 /*
390 * Free stripes pool
391 */
392 atomic_t active_stripes;
393 struct list_head inactive_list;
394 wait_queue_head_t wait_for_stripe;
395 wait_queue_head_t wait_for_overlap;
396 int inactive_blocked; /* release of inactive stripes blocked,
397 * waiting for 25% to be free
398 */
399 int pool_size; /* number of disks in stripeheads in pool */
400 spinlock_t device_lock;
401 struct disk_info *disks;
402
403 /* When taking over an array from a different personality, we store
404 * the new thread here until we fully activate the array.
405 */
406 struct mdk_thread_s *thread;
407};
408
409typedef struct raid5_private_data raid5_conf_t;
410
411#define mddev_to_conf(mddev) ((raid5_conf_t *) mddev->private)
412
413/*
414 * Our supported algorithms
415 */
416#define ALGORITHM_LEFT_ASYMMETRIC 0 /* Rotating Parity N with Data Restart */
417#define ALGORITHM_RIGHT_ASYMMETRIC 1 /* Rotating Parity 0 with Data Restart */
418#define ALGORITHM_LEFT_SYMMETRIC 2 /* Rotating Parity N with Data Continuation */
419#define ALGORITHM_RIGHT_SYMMETRIC 3 /* Rotating Parity 0 with Data Continuation */
420
421/* Define non-rotating (raid4) algorithms. These allow
422 * conversion of raid4 to raid5.
423 */
424#define ALGORITHM_PARITY_0 4 /* P or P,Q are initial devices */
425#define ALGORITHM_PARITY_N 5 /* P or P,Q are final devices. */
426
427/* DDF RAID6 layouts differ from md/raid6 layouts in two ways.
428 * Firstly, the exact positioning of the parity block is slightly
429 * different between the 'LEFT_*' modes of md and the "_N_*" modes
430 * of DDF.
431 * Secondly, or order of datablocks over which the Q syndrome is computed
432 * is different.
433 * Consequently we have different layouts for DDF/raid6 than md/raid6.
434 * These layouts are from the DDFv1.2 spec.
435 * Interestingly DDFv1.2-Errata-A does not specify N_CONTINUE but
436 * leaves RLQ=3 as 'Vendor Specific'
437 */
438
439#define ALGORITHM_ROTATING_ZERO_RESTART 8 /* DDF PRL=6 RLQ=1 */
440#define ALGORITHM_ROTATING_N_RESTART 9 /* DDF PRL=6 RLQ=2 */
441#define ALGORITHM_ROTATING_N_CONTINUE 10 /*DDF PRL=6 RLQ=3 */
442
443
444/* For every RAID5 algorithm we define a RAID6 algorithm
445 * with exactly the same layout for data and parity, and
446 * with the Q block always on the last device (N-1).
447 * This allows trivial conversion from RAID5 to RAID6
448 */
449#define ALGORITHM_LEFT_ASYMMETRIC_6 16
450#define ALGORITHM_RIGHT_ASYMMETRIC_6 17
451#define ALGORITHM_LEFT_SYMMETRIC_6 18
452#define ALGORITHM_RIGHT_SYMMETRIC_6 19
453#define ALGORITHM_PARITY_0_6 20
454#define ALGORITHM_PARITY_N_6 ALGORITHM_PARITY_N
455
456static inline int algorithm_valid_raid5(int layout)
457{
458 return (layout >= 0) &&
459 (layout <= 5);
460}
461static inline int algorithm_valid_raid6(int layout)
462{
463 return (layout >= 0 && layout <= 5)
464 ||
465 (layout == 8 || layout == 10)
466 ||
467 (layout >= 16 && layout <= 20);
468}
469
470static inline int algorithm_is_DDF(int layout)
471{
472 return layout >= 8 && layout <= 10;
473}
474#endif