diff options
author | Philipp Reisner <philipp.reisner@linbit.com> | 2009-09-25 19:07:19 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-10-01 15:17:49 -0400 |
commit | b411b3637fa71fce9cf2acf0639009500f5892fe (patch) | |
tree | 6b88e5202e0f137fef50e95b0441bcafdbf91990 /drivers/block/drbd/drbd_actlog.c | |
parent | 1a35e0f6443f4266dad4c569c55c57a9032596fa (diff) |
The DRBD driver
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block/drbd/drbd_actlog.c')
-rw-r--r-- | drivers/block/drbd/drbd_actlog.c | 1484 |
1 files changed, 1484 insertions, 0 deletions
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c new file mode 100644 index 00000000000..74b4835d310 --- /dev/null +++ b/drivers/block/drbd/drbd_actlog.c | |||
@@ -0,0 +1,1484 @@ | |||
1 | /* | ||
2 | drbd_actlog.c | ||
3 | |||
4 | This file is part of DRBD by Philipp Reisner and Lars Ellenberg. | ||
5 | |||
6 | Copyright (C) 2003-2008, LINBIT Information Technologies GmbH. | ||
7 | Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>. | ||
8 | Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. | ||
9 | |||
10 | drbd is free software; you can redistribute it and/or modify | ||
11 | it under the terms of the GNU General Public License as published by | ||
12 | the Free Software Foundation; either version 2, or (at your option) | ||
13 | any later version. | ||
14 | |||
15 | drbd is distributed in the hope that it will be useful, | ||
16 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | GNU General Public License for more details. | ||
19 | |||
20 | You should have received a copy of the GNU General Public License | ||
21 | along with drbd; see the file COPYING. If not, write to | ||
22 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | ||
23 | |||
24 | */ | ||
25 | |||
26 | #include <linux/slab.h> | ||
27 | #include <linux/drbd.h> | ||
28 | #include "drbd_int.h" | ||
29 | #include "drbd_tracing.h" | ||
30 | #include "drbd_wrappers.h" | ||
31 | |||
32 | /* We maintain a trivial check sum in our on disk activity log. | ||
33 | * With that we can ensure correct operation even when the storage | ||
34 | * device might do a partial (last) sector write while loosing power. | ||
35 | */ | ||
36 | struct __packed al_transaction { | ||
37 | u32 magic; | ||
38 | u32 tr_number; | ||
39 | struct __packed { | ||
40 | u32 pos; | ||
41 | u32 extent; } updates[1 + AL_EXTENTS_PT]; | ||
42 | u32 xor_sum; | ||
43 | }; | ||
44 | |||
45 | struct update_odbm_work { | ||
46 | struct drbd_work w; | ||
47 | unsigned int enr; | ||
48 | }; | ||
49 | |||
50 | struct update_al_work { | ||
51 | struct drbd_work w; | ||
52 | struct lc_element *al_ext; | ||
53 | struct completion event; | ||
54 | unsigned int enr; | ||
55 | /* if old_enr != LC_FREE, write corresponding bitmap sector, too */ | ||
56 | unsigned int old_enr; | ||
57 | }; | ||
58 | |||
59 | struct drbd_atodb_wait { | ||
60 | atomic_t count; | ||
61 | struct completion io_done; | ||
62 | struct drbd_conf *mdev; | ||
63 | int error; | ||
64 | }; | ||
65 | |||
66 | |||
67 | int w_al_write_transaction(struct drbd_conf *, struct drbd_work *, int); | ||
68 | |||
69 | /* The actual tracepoint needs to have constant number of known arguments... | ||
70 | */ | ||
71 | void trace_drbd_resync(struct drbd_conf *mdev, int level, const char *fmt, ...) | ||
72 | { | ||
73 | va_list ap; | ||
74 | |||
75 | va_start(ap, fmt); | ||
76 | trace__drbd_resync(mdev, level, fmt, ap); | ||
77 | va_end(ap); | ||
78 | } | ||
79 | |||
80 | static int _drbd_md_sync_page_io(struct drbd_conf *mdev, | ||
81 | struct drbd_backing_dev *bdev, | ||
82 | struct page *page, sector_t sector, | ||
83 | int rw, int size) | ||
84 | { | ||
85 | struct bio *bio; | ||
86 | struct drbd_md_io md_io; | ||
87 | int ok; | ||
88 | |||
89 | md_io.mdev = mdev; | ||
90 | init_completion(&md_io.event); | ||
91 | md_io.error = 0; | ||
92 | |||
93 | if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags)) | ||
94 | rw |= (1 << BIO_RW_BARRIER); | ||
95 | rw |= ((1<<BIO_RW_UNPLUG) | (1<<BIO_RW_SYNCIO)); | ||
96 | |||
97 | retry: | ||
98 | bio = bio_alloc(GFP_NOIO, 1); | ||
99 | bio->bi_bdev = bdev->md_bdev; | ||
100 | bio->bi_sector = sector; | ||
101 | ok = (bio_add_page(bio, page, size, 0) == size); | ||
102 | if (!ok) | ||
103 | goto out; | ||
104 | bio->bi_private = &md_io; | ||
105 | bio->bi_end_io = drbd_md_io_complete; | ||
106 | bio->bi_rw = rw; | ||
107 | |||
108 | trace_drbd_bio(mdev, "Md", bio, 0, NULL); | ||
109 | |||
110 | if (FAULT_ACTIVE(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) | ||
111 | bio_endio(bio, -EIO); | ||
112 | else | ||
113 | submit_bio(rw, bio); | ||
114 | wait_for_completion(&md_io.event); | ||
115 | ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0; | ||
116 | |||
117 | /* check for unsupported barrier op. | ||
118 | * would rather check on EOPNOTSUPP, but that is not reliable. | ||
119 | * don't try again for ANY return value != 0 */ | ||
120 | if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER) && !ok)) { | ||
121 | /* Try again with no barrier */ | ||
122 | dev_warn(DEV, "Barriers not supported on meta data device - disabling\n"); | ||
123 | set_bit(MD_NO_BARRIER, &mdev->flags); | ||
124 | rw &= ~(1 << BIO_RW_BARRIER); | ||
125 | bio_put(bio); | ||
126 | goto retry; | ||
127 | } | ||
128 | out: | ||
129 | bio_put(bio); | ||
130 | return ok; | ||
131 | } | ||
132 | |||
133 | int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, | ||
134 | sector_t sector, int rw) | ||
135 | { | ||
136 | int logical_block_size, mask, ok; | ||
137 | int offset = 0; | ||
138 | struct page *iop = mdev->md_io_page; | ||
139 | |||
140 | D_ASSERT(mutex_is_locked(&mdev->md_io_mutex)); | ||
141 | |||
142 | BUG_ON(!bdev->md_bdev); | ||
143 | |||
144 | logical_block_size = bdev_logical_block_size(bdev->md_bdev); | ||
145 | if (logical_block_size == 0) | ||
146 | logical_block_size = MD_SECTOR_SIZE; | ||
147 | |||
148 | /* in case logical_block_size != 512 [ s390 only? ] */ | ||
149 | if (logical_block_size != MD_SECTOR_SIZE) { | ||
150 | mask = (logical_block_size / MD_SECTOR_SIZE) - 1; | ||
151 | D_ASSERT(mask == 1 || mask == 3 || mask == 7); | ||
152 | D_ASSERT(logical_block_size == (mask+1) * MD_SECTOR_SIZE); | ||
153 | offset = sector & mask; | ||
154 | sector = sector & ~mask; | ||
155 | iop = mdev->md_io_tmpp; | ||
156 | |||
157 | if (rw & WRITE) { | ||
158 | /* these are GFP_KERNEL pages, pre-allocated | ||
159 | * on device initialization */ | ||
160 | void *p = page_address(mdev->md_io_page); | ||
161 | void *hp = page_address(mdev->md_io_tmpp); | ||
162 | |||
163 | ok = _drbd_md_sync_page_io(mdev, bdev, iop, sector, | ||
164 | READ, logical_block_size); | ||
165 | |||
166 | if (unlikely(!ok)) { | ||
167 | dev_err(DEV, "drbd_md_sync_page_io(,%llus," | ||
168 | "READ [logical_block_size!=512]) failed!\n", | ||
169 | (unsigned long long)sector); | ||
170 | return 0; | ||
171 | } | ||
172 | |||
173 | memcpy(hp + offset*MD_SECTOR_SIZE, p, MD_SECTOR_SIZE); | ||
174 | } | ||
175 | } | ||
176 | |||
177 | if (sector < drbd_md_first_sector(bdev) || | ||
178 | sector > drbd_md_last_sector(bdev)) | ||
179 | dev_alert(DEV, "%s [%d]:%s(,%llus,%s) out of range md access!\n", | ||
180 | current->comm, current->pid, __func__, | ||
181 | (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ"); | ||
182 | |||
183 | ok = _drbd_md_sync_page_io(mdev, bdev, iop, sector, rw, logical_block_size); | ||
184 | if (unlikely(!ok)) { | ||
185 | dev_err(DEV, "drbd_md_sync_page_io(,%llus,%s) failed!\n", | ||
186 | (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ"); | ||
187 | return 0; | ||
188 | } | ||
189 | |||
190 | if (logical_block_size != MD_SECTOR_SIZE && !(rw & WRITE)) { | ||
191 | void *p = page_address(mdev->md_io_page); | ||
192 | void *hp = page_address(mdev->md_io_tmpp); | ||
193 | |||
194 | memcpy(p, hp + offset*MD_SECTOR_SIZE, MD_SECTOR_SIZE); | ||
195 | } | ||
196 | |||
197 | return ok; | ||
198 | } | ||
199 | |||
200 | static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr) | ||
201 | { | ||
202 | struct lc_element *al_ext; | ||
203 | struct lc_element *tmp; | ||
204 | unsigned long al_flags = 0; | ||
205 | |||
206 | spin_lock_irq(&mdev->al_lock); | ||
207 | tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT); | ||
208 | if (unlikely(tmp != NULL)) { | ||
209 | struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); | ||
210 | if (test_bit(BME_NO_WRITES, &bm_ext->flags)) { | ||
211 | spin_unlock_irq(&mdev->al_lock); | ||
212 | return NULL; | ||
213 | } | ||
214 | } | ||
215 | al_ext = lc_get(mdev->act_log, enr); | ||
216 | al_flags = mdev->act_log->flags; | ||
217 | spin_unlock_irq(&mdev->al_lock); | ||
218 | |||
219 | /* | ||
220 | if (!al_ext) { | ||
221 | if (al_flags & LC_STARVING) | ||
222 | dev_warn(DEV, "Have to wait for LRU element (AL too small?)\n"); | ||
223 | if (al_flags & LC_DIRTY) | ||
224 | dev_warn(DEV, "Ongoing AL update (AL device too slow?)\n"); | ||
225 | } | ||
226 | */ | ||
227 | |||
228 | return al_ext; | ||
229 | } | ||
230 | |||
231 | void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector) | ||
232 | { | ||
233 | unsigned int enr = (sector >> (AL_EXTENT_SHIFT-9)); | ||
234 | struct lc_element *al_ext; | ||
235 | struct update_al_work al_work; | ||
236 | |||
237 | D_ASSERT(atomic_read(&mdev->local_cnt) > 0); | ||
238 | |||
239 | trace_drbd_actlog(mdev, sector, "al_begin_io"); | ||
240 | |||
241 | wait_event(mdev->al_wait, (al_ext = _al_get(mdev, enr))); | ||
242 | |||
243 | if (al_ext->lc_number != enr) { | ||
244 | /* drbd_al_write_transaction(mdev,al_ext,enr); | ||
245 | * recurses into generic_make_request(), which | ||
246 | * disallows recursion, bios being serialized on the | ||
247 | * current->bio_tail list now. | ||
248 | * we have to delegate updates to the activity log | ||
249 | * to the worker thread. */ | ||
250 | init_completion(&al_work.event); | ||
251 | al_work.al_ext = al_ext; | ||
252 | al_work.enr = enr; | ||
253 | al_work.old_enr = al_ext->lc_number; | ||
254 | al_work.w.cb = w_al_write_transaction; | ||
255 | drbd_queue_work_front(&mdev->data.work, &al_work.w); | ||
256 | wait_for_completion(&al_work.event); | ||
257 | |||
258 | mdev->al_writ_cnt++; | ||
259 | |||
260 | spin_lock_irq(&mdev->al_lock); | ||
261 | lc_changed(mdev->act_log, al_ext); | ||
262 | spin_unlock_irq(&mdev->al_lock); | ||
263 | wake_up(&mdev->al_wait); | ||
264 | } | ||
265 | } | ||
266 | |||
267 | void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector) | ||
268 | { | ||
269 | unsigned int enr = (sector >> (AL_EXTENT_SHIFT-9)); | ||
270 | struct lc_element *extent; | ||
271 | unsigned long flags; | ||
272 | |||
273 | trace_drbd_actlog(mdev, sector, "al_complete_io"); | ||
274 | |||
275 | spin_lock_irqsave(&mdev->al_lock, flags); | ||
276 | |||
277 | extent = lc_find(mdev->act_log, enr); | ||
278 | |||
279 | if (!extent) { | ||
280 | spin_unlock_irqrestore(&mdev->al_lock, flags); | ||
281 | dev_err(DEV, "al_complete_io() called on inactive extent %u\n", enr); | ||
282 | return; | ||
283 | } | ||
284 | |||
285 | if (lc_put(mdev->act_log, extent) == 0) | ||
286 | wake_up(&mdev->al_wait); | ||
287 | |||
288 | spin_unlock_irqrestore(&mdev->al_lock, flags); | ||
289 | } | ||
290 | |||
291 | int | ||
292 | w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused) | ||
293 | { | ||
294 | struct update_al_work *aw = container_of(w, struct update_al_work, w); | ||
295 | struct lc_element *updated = aw->al_ext; | ||
296 | const unsigned int new_enr = aw->enr; | ||
297 | const unsigned int evicted = aw->old_enr; | ||
298 | struct al_transaction *buffer; | ||
299 | sector_t sector; | ||
300 | int i, n, mx; | ||
301 | unsigned int extent_nr; | ||
302 | u32 xor_sum = 0; | ||
303 | |||
304 | if (!get_ldev(mdev)) { | ||
305 | dev_err(DEV, "get_ldev() failed in w_al_write_transaction\n"); | ||
306 | complete(&((struct update_al_work *)w)->event); | ||
307 | return 1; | ||
308 | } | ||
309 | /* do we have to do a bitmap write, first? | ||
310 | * TODO reduce maximum latency: | ||
311 | * submit both bios, then wait for both, | ||
312 | * instead of doing two synchronous sector writes. */ | ||
313 | if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE) | ||
314 | drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT); | ||
315 | |||
316 | mutex_lock(&mdev->md_io_mutex); /* protects md_io_page, al_tr_cycle, ... */ | ||
317 | buffer = (struct al_transaction *)page_address(mdev->md_io_page); | ||
318 | |||
319 | buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC); | ||
320 | buffer->tr_number = cpu_to_be32(mdev->al_tr_number); | ||
321 | |||
322 | n = lc_index_of(mdev->act_log, updated); | ||
323 | |||
324 | buffer->updates[0].pos = cpu_to_be32(n); | ||
325 | buffer->updates[0].extent = cpu_to_be32(new_enr); | ||
326 | |||
327 | xor_sum ^= new_enr; | ||
328 | |||
329 | mx = min_t(int, AL_EXTENTS_PT, | ||
330 | mdev->act_log->nr_elements - mdev->al_tr_cycle); | ||
331 | for (i = 0; i < mx; i++) { | ||
332 | unsigned idx = mdev->al_tr_cycle + i; | ||
333 | extent_nr = lc_element_by_index(mdev->act_log, idx)->lc_number; | ||
334 | buffer->updates[i+1].pos = cpu_to_be32(idx); | ||
335 | buffer->updates[i+1].extent = cpu_to_be32(extent_nr); | ||
336 | xor_sum ^= extent_nr; | ||
337 | } | ||
338 | for (; i < AL_EXTENTS_PT; i++) { | ||
339 | buffer->updates[i+1].pos = __constant_cpu_to_be32(-1); | ||
340 | buffer->updates[i+1].extent = __constant_cpu_to_be32(LC_FREE); | ||
341 | xor_sum ^= LC_FREE; | ||
342 | } | ||
343 | mdev->al_tr_cycle += AL_EXTENTS_PT; | ||
344 | if (mdev->al_tr_cycle >= mdev->act_log->nr_elements) | ||
345 | mdev->al_tr_cycle = 0; | ||
346 | |||
347 | buffer->xor_sum = cpu_to_be32(xor_sum); | ||
348 | |||
349 | sector = mdev->ldev->md.md_offset | ||
350 | + mdev->ldev->md.al_offset + mdev->al_tr_pos; | ||
351 | |||
352 | if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) | ||
353 | drbd_chk_io_error(mdev, 1, TRUE); | ||
354 | |||
355 | if (++mdev->al_tr_pos > | ||
356 | div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT)) | ||
357 | mdev->al_tr_pos = 0; | ||
358 | |||
359 | D_ASSERT(mdev->al_tr_pos < MD_AL_MAX_SIZE); | ||
360 | mdev->al_tr_number++; | ||
361 | |||
362 | mutex_unlock(&mdev->md_io_mutex); | ||
363 | |||
364 | complete(&((struct update_al_work *)w)->event); | ||
365 | put_ldev(mdev); | ||
366 | |||
367 | return 1; | ||
368 | } | ||
369 | |||
370 | /** | ||
371 | * drbd_al_read_tr() - Read a single transaction from the on disk activity log | ||
372 | * @mdev: DRBD device. | ||
373 | * @bdev: Block device to read form. | ||
374 | * @b: pointer to an al_transaction. | ||
375 | * @index: On disk slot of the transaction to read. | ||
376 | * | ||
377 | * Returns -1 on IO error, 0 on checksum error and 1 upon success. | ||
378 | */ | ||
379 | static int drbd_al_read_tr(struct drbd_conf *mdev, | ||
380 | struct drbd_backing_dev *bdev, | ||
381 | struct al_transaction *b, | ||
382 | int index) | ||
383 | { | ||
384 | sector_t sector; | ||
385 | int rv, i; | ||
386 | u32 xor_sum = 0; | ||
387 | |||
388 | sector = bdev->md.md_offset + bdev->md.al_offset + index; | ||
389 | |||
390 | /* Dont process error normally, | ||
391 | * as this is done before disk is attached! */ | ||
392 | if (!drbd_md_sync_page_io(mdev, bdev, sector, READ)) | ||
393 | return -1; | ||
394 | |||
395 | rv = (be32_to_cpu(b->magic) == DRBD_MAGIC); | ||
396 | |||
397 | for (i = 0; i < AL_EXTENTS_PT + 1; i++) | ||
398 | xor_sum ^= be32_to_cpu(b->updates[i].extent); | ||
399 | rv &= (xor_sum == be32_to_cpu(b->xor_sum)); | ||
400 | |||
401 | return rv; | ||
402 | } | ||
403 | |||
404 | /** | ||
405 | * drbd_al_read_log() - Restores the activity log from its on disk representation. | ||
406 | * @mdev: DRBD device. | ||
407 | * @bdev: Block device to read form. | ||
408 | * | ||
409 | * Returns 1 on success, returns 0 when reading the log failed due to IO errors. | ||
410 | */ | ||
411 | int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) | ||
412 | { | ||
413 | struct al_transaction *buffer; | ||
414 | int i; | ||
415 | int rv; | ||
416 | int mx; | ||
417 | int active_extents = 0; | ||
418 | int transactions = 0; | ||
419 | int found_valid = 0; | ||
420 | int from = 0; | ||
421 | int to = 0; | ||
422 | u32 from_tnr = 0; | ||
423 | u32 to_tnr = 0; | ||
424 | u32 cnr; | ||
425 | |||
426 | mx = div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT); | ||
427 | |||
428 | /* lock out all other meta data io for now, | ||
429 | * and make sure the page is mapped. | ||
430 | */ | ||
431 | mutex_lock(&mdev->md_io_mutex); | ||
432 | buffer = page_address(mdev->md_io_page); | ||
433 | |||
434 | /* Find the valid transaction in the log */ | ||
435 | for (i = 0; i <= mx; i++) { | ||
436 | rv = drbd_al_read_tr(mdev, bdev, buffer, i); | ||
437 | if (rv == 0) | ||
438 | continue; | ||
439 | if (rv == -1) { | ||
440 | mutex_unlock(&mdev->md_io_mutex); | ||
441 | return 0; | ||
442 | } | ||
443 | cnr = be32_to_cpu(buffer->tr_number); | ||
444 | |||
445 | if (++found_valid == 1) { | ||
446 | from = i; | ||
447 | to = i; | ||
448 | from_tnr = cnr; | ||
449 | to_tnr = cnr; | ||
450 | continue; | ||
451 | } | ||
452 | if ((int)cnr - (int)from_tnr < 0) { | ||
453 | D_ASSERT(from_tnr - cnr + i - from == mx+1); | ||
454 | from = i; | ||
455 | from_tnr = cnr; | ||
456 | } | ||
457 | if ((int)cnr - (int)to_tnr > 0) { | ||
458 | D_ASSERT(cnr - to_tnr == i - to); | ||
459 | to = i; | ||
460 | to_tnr = cnr; | ||
461 | } | ||
462 | } | ||
463 | |||
464 | if (!found_valid) { | ||
465 | dev_warn(DEV, "No usable activity log found.\n"); | ||
466 | mutex_unlock(&mdev->md_io_mutex); | ||
467 | return 1; | ||
468 | } | ||
469 | |||
470 | /* Read the valid transactions. | ||
471 | * dev_info(DEV, "Reading from %d to %d.\n",from,to); */ | ||
472 | i = from; | ||
473 | while (1) { | ||
474 | int j, pos; | ||
475 | unsigned int extent_nr; | ||
476 | unsigned int trn; | ||
477 | |||
478 | rv = drbd_al_read_tr(mdev, bdev, buffer, i); | ||
479 | ERR_IF(rv == 0) goto cancel; | ||
480 | if (rv == -1) { | ||
481 | mutex_unlock(&mdev->md_io_mutex); | ||
482 | return 0; | ||
483 | } | ||
484 | |||
485 | trn = be32_to_cpu(buffer->tr_number); | ||
486 | |||
487 | spin_lock_irq(&mdev->al_lock); | ||
488 | |||
489 | /* This loop runs backwards because in the cyclic | ||
490 | elements there might be an old version of the | ||
491 | updated element (in slot 0). So the element in slot 0 | ||
492 | can overwrite old versions. */ | ||
493 | for (j = AL_EXTENTS_PT; j >= 0; j--) { | ||
494 | pos = be32_to_cpu(buffer->updates[j].pos); | ||
495 | extent_nr = be32_to_cpu(buffer->updates[j].extent); | ||
496 | |||
497 | if (extent_nr == LC_FREE) | ||
498 | continue; | ||
499 | |||
500 | lc_set(mdev->act_log, extent_nr, pos); | ||
501 | active_extents++; | ||
502 | } | ||
503 | spin_unlock_irq(&mdev->al_lock); | ||
504 | |||
505 | transactions++; | ||
506 | |||
507 | cancel: | ||
508 | if (i == to) | ||
509 | break; | ||
510 | i++; | ||
511 | if (i > mx) | ||
512 | i = 0; | ||
513 | } | ||
514 | |||
515 | mdev->al_tr_number = to_tnr+1; | ||
516 | mdev->al_tr_pos = to; | ||
517 | if (++mdev->al_tr_pos > | ||
518 | div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT)) | ||
519 | mdev->al_tr_pos = 0; | ||
520 | |||
521 | /* ok, we are done with it */ | ||
522 | mutex_unlock(&mdev->md_io_mutex); | ||
523 | |||
524 | dev_info(DEV, "Found %d transactions (%d active extents) in activity log.\n", | ||
525 | transactions, active_extents); | ||
526 | |||
527 | return 1; | ||
528 | } | ||
529 | |||
530 | static void atodb_endio(struct bio *bio, int error) | ||
531 | { | ||
532 | struct drbd_atodb_wait *wc = bio->bi_private; | ||
533 | struct drbd_conf *mdev = wc->mdev; | ||
534 | struct page *page; | ||
535 | int uptodate = bio_flagged(bio, BIO_UPTODATE); | ||
536 | |||
537 | /* strange behavior of some lower level drivers... | ||
538 | * fail the request by clearing the uptodate flag, | ||
539 | * but do not return any error?! */ | ||
540 | if (!error && !uptodate) | ||
541 | error = -EIO; | ||
542 | |||
543 | drbd_chk_io_error(mdev, error, TRUE); | ||
544 | if (error && wc->error == 0) | ||
545 | wc->error = error; | ||
546 | |||
547 | if (atomic_dec_and_test(&wc->count)) | ||
548 | complete(&wc->io_done); | ||
549 | |||
550 | page = bio->bi_io_vec[0].bv_page; | ||
551 | put_page(page); | ||
552 | bio_put(bio); | ||
553 | mdev->bm_writ_cnt++; | ||
554 | put_ldev(mdev); | ||
555 | } | ||
556 | |||
557 | #define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL)) | ||
558 | /* activity log to on disk bitmap -- prepare bio unless that sector | ||
559 | * is already covered by previously prepared bios */ | ||
560 | static int atodb_prepare_unless_covered(struct drbd_conf *mdev, | ||
561 | struct bio **bios, | ||
562 | unsigned int enr, | ||
563 | struct drbd_atodb_wait *wc) __must_hold(local) | ||
564 | { | ||
565 | struct bio *bio; | ||
566 | struct page *page; | ||
567 | sector_t on_disk_sector = enr + mdev->ldev->md.md_offset | ||
568 | + mdev->ldev->md.bm_offset; | ||
569 | unsigned int page_offset = PAGE_SIZE; | ||
570 | int offset; | ||
571 | int i = 0; | ||
572 | int err = -ENOMEM; | ||
573 | |||
574 | /* Check if that enr is already covered by an already created bio. | ||
575 | * Caution, bios[] is not NULL terminated, | ||
576 | * but only initialized to all NULL. | ||
577 | * For completely scattered activity log, | ||
578 | * the last invocation iterates over all bios, | ||
579 | * and finds the last NULL entry. | ||
580 | */ | ||
581 | while ((bio = bios[i])) { | ||
582 | if (bio->bi_sector == on_disk_sector) | ||
583 | return 0; | ||
584 | i++; | ||
585 | } | ||
586 | /* bios[i] == NULL, the next not yet used slot */ | ||
587 | |||
588 | /* GFP_KERNEL, we are not in the write-out path */ | ||
589 | bio = bio_alloc(GFP_KERNEL, 1); | ||
590 | if (bio == NULL) | ||
591 | return -ENOMEM; | ||
592 | |||
593 | if (i > 0) { | ||
594 | const struct bio_vec *prev_bv = bios[i-1]->bi_io_vec; | ||
595 | page_offset = prev_bv->bv_offset + prev_bv->bv_len; | ||
596 | page = prev_bv->bv_page; | ||
597 | } | ||
598 | if (page_offset == PAGE_SIZE) { | ||
599 | page = alloc_page(__GFP_HIGHMEM); | ||
600 | if (page == NULL) | ||
601 | goto out_bio_put; | ||
602 | page_offset = 0; | ||
603 | } else { | ||
604 | get_page(page); | ||
605 | } | ||
606 | |||
607 | offset = S2W(enr); | ||
608 | drbd_bm_get_lel(mdev, offset, | ||
609 | min_t(size_t, S2W(1), drbd_bm_words(mdev) - offset), | ||
610 | kmap(page) + page_offset); | ||
611 | kunmap(page); | ||
612 | |||
613 | bio->bi_private = wc; | ||
614 | bio->bi_end_io = atodb_endio; | ||
615 | bio->bi_bdev = mdev->ldev->md_bdev; | ||
616 | bio->bi_sector = on_disk_sector; | ||
617 | |||
618 | if (bio_add_page(bio, page, MD_SECTOR_SIZE, page_offset) != MD_SECTOR_SIZE) | ||
619 | goto out_put_page; | ||
620 | |||
621 | atomic_inc(&wc->count); | ||
622 | /* we already know that we may do this... | ||
623 | * get_ldev_if_state(mdev,D_ATTACHING); | ||
624 | * just get the extra reference, so that the local_cnt reflects | ||
625 | * the number of pending IO requests DRBD at its backing device. | ||
626 | */ | ||
627 | atomic_inc(&mdev->local_cnt); | ||
628 | |||
629 | bios[i] = bio; | ||
630 | |||
631 | return 0; | ||
632 | |||
633 | out_put_page: | ||
634 | err = -EINVAL; | ||
635 | put_page(page); | ||
636 | out_bio_put: | ||
637 | bio_put(bio); | ||
638 | return err; | ||
639 | } | ||
640 | |||
641 | /** | ||
642 | * drbd_al_to_on_disk_bm() - * Writes bitmap parts covered by active AL extents | ||
643 | * @mdev: DRBD device. | ||
644 | * | ||
645 | * Called when we detach (unconfigure) local storage, | ||
646 | * or when we go from R_PRIMARY to R_SECONDARY role. | ||
647 | */ | ||
648 | void drbd_al_to_on_disk_bm(struct drbd_conf *mdev) | ||
649 | { | ||
650 | int i, nr_elements; | ||
651 | unsigned int enr; | ||
652 | struct bio **bios; | ||
653 | struct drbd_atodb_wait wc; | ||
654 | |||
655 | ERR_IF (!get_ldev_if_state(mdev, D_ATTACHING)) | ||
656 | return; /* sorry, I don't have any act_log etc... */ | ||
657 | |||
658 | wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); | ||
659 | |||
660 | nr_elements = mdev->act_log->nr_elements; | ||
661 | |||
662 | /* GFP_KERNEL, we are not in anyone's write-out path */ | ||
663 | bios = kzalloc(sizeof(struct bio *) * nr_elements, GFP_KERNEL); | ||
664 | if (!bios) | ||
665 | goto submit_one_by_one; | ||
666 | |||
667 | atomic_set(&wc.count, 0); | ||
668 | init_completion(&wc.io_done); | ||
669 | wc.mdev = mdev; | ||
670 | wc.error = 0; | ||
671 | |||
672 | for (i = 0; i < nr_elements; i++) { | ||
673 | enr = lc_element_by_index(mdev->act_log, i)->lc_number; | ||
674 | if (enr == LC_FREE) | ||
675 | continue; | ||
676 | /* next statement also does atomic_inc wc.count and local_cnt */ | ||
677 | if (atodb_prepare_unless_covered(mdev, bios, | ||
678 | enr/AL_EXT_PER_BM_SECT, | ||
679 | &wc)) | ||
680 | goto free_bios_submit_one_by_one; | ||
681 | } | ||
682 | |||
683 | /* unnecessary optimization? */ | ||
684 | lc_unlock(mdev->act_log); | ||
685 | wake_up(&mdev->al_wait); | ||
686 | |||
687 | /* all prepared, submit them */ | ||
688 | for (i = 0; i < nr_elements; i++) { | ||
689 | if (bios[i] == NULL) | ||
690 | break; | ||
691 | if (FAULT_ACTIVE(mdev, DRBD_FAULT_MD_WR)) { | ||
692 | bios[i]->bi_rw = WRITE; | ||
693 | bio_endio(bios[i], -EIO); | ||
694 | } else { | ||
695 | submit_bio(WRITE, bios[i]); | ||
696 | } | ||
697 | } | ||
698 | |||
699 | drbd_blk_run_queue(bdev_get_queue(mdev->ldev->md_bdev)); | ||
700 | |||
701 | /* always (try to) flush bitmap to stable storage */ | ||
702 | drbd_md_flush(mdev); | ||
703 | |||
704 | /* In case we did not submit a single IO do not wait for | ||
705 | * them to complete. ( Because we would wait forever here. ) | ||
706 | * | ||
707 | * In case we had IOs and they are already complete, there | ||
708 | * is not point in waiting anyways. | ||
709 | * Therefore this if () ... */ | ||
710 | if (atomic_read(&wc.count)) | ||
711 | wait_for_completion(&wc.io_done); | ||
712 | |||
713 | put_ldev(mdev); | ||
714 | |||
715 | kfree(bios); | ||
716 | return; | ||
717 | |||
718 | free_bios_submit_one_by_one: | ||
719 | /* free everything by calling the endio callback directly. */ | ||
720 | for (i = 0; i < nr_elements && bios[i]; i++) | ||
721 | bio_endio(bios[i], 0); | ||
722 | |||
723 | kfree(bios); | ||
724 | |||
725 | submit_one_by_one: | ||
726 | dev_warn(DEV, "Using the slow drbd_al_to_on_disk_bm()\n"); | ||
727 | |||
728 | for (i = 0; i < mdev->act_log->nr_elements; i++) { | ||
729 | enr = lc_element_by_index(mdev->act_log, i)->lc_number; | ||
730 | if (enr == LC_FREE) | ||
731 | continue; | ||
732 | /* Really slow: if we have al-extents 16..19 active, | ||
733 | * sector 4 will be written four times! Synchronous! */ | ||
734 | drbd_bm_write_sect(mdev, enr/AL_EXT_PER_BM_SECT); | ||
735 | } | ||
736 | |||
737 | lc_unlock(mdev->act_log); | ||
738 | wake_up(&mdev->al_wait); | ||
739 | put_ldev(mdev); | ||
740 | } | ||
741 | |||
742 | /** | ||
743 | * drbd_al_apply_to_bm() - Sets the bitmap to diry(1) where covered ba active AL extents | ||
744 | * @mdev: DRBD device. | ||
745 | */ | ||
746 | void drbd_al_apply_to_bm(struct drbd_conf *mdev) | ||
747 | { | ||
748 | unsigned int enr; | ||
749 | unsigned long add = 0; | ||
750 | char ppb[10]; | ||
751 | int i; | ||
752 | |||
753 | wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); | ||
754 | |||
755 | for (i = 0; i < mdev->act_log->nr_elements; i++) { | ||
756 | enr = lc_element_by_index(mdev->act_log, i)->lc_number; | ||
757 | if (enr == LC_FREE) | ||
758 | continue; | ||
759 | add += drbd_bm_ALe_set_all(mdev, enr); | ||
760 | } | ||
761 | |||
762 | lc_unlock(mdev->act_log); | ||
763 | wake_up(&mdev->al_wait); | ||
764 | |||
765 | dev_info(DEV, "Marked additional %s as out-of-sync based on AL.\n", | ||
766 | ppsize(ppb, Bit2KB(add))); | ||
767 | } | ||
768 | |||
769 | static int _try_lc_del(struct drbd_conf *mdev, struct lc_element *al_ext) | ||
770 | { | ||
771 | int rv; | ||
772 | |||
773 | spin_lock_irq(&mdev->al_lock); | ||
774 | rv = (al_ext->refcnt == 0); | ||
775 | if (likely(rv)) | ||
776 | lc_del(mdev->act_log, al_ext); | ||
777 | spin_unlock_irq(&mdev->al_lock); | ||
778 | |||
779 | return rv; | ||
780 | } | ||
781 | |||
782 | /** | ||
783 | * drbd_al_shrink() - Removes all active extents form the activity log | ||
784 | * @mdev: DRBD device. | ||
785 | * | ||
786 | * Removes all active extents form the activity log, waiting until | ||
787 | * the reference count of each entry dropped to 0 first, of course. | ||
788 | * | ||
789 | * You need to lock mdev->act_log with lc_try_lock() / lc_unlock() | ||
790 | */ | ||
791 | void drbd_al_shrink(struct drbd_conf *mdev) | ||
792 | { | ||
793 | struct lc_element *al_ext; | ||
794 | int i; | ||
795 | |||
796 | D_ASSERT(test_bit(__LC_DIRTY, &mdev->act_log->flags)); | ||
797 | |||
798 | for (i = 0; i < mdev->act_log->nr_elements; i++) { | ||
799 | al_ext = lc_element_by_index(mdev->act_log, i); | ||
800 | if (al_ext->lc_number == LC_FREE) | ||
801 | continue; | ||
802 | wait_event(mdev->al_wait, _try_lc_del(mdev, al_ext)); | ||
803 | } | ||
804 | |||
805 | wake_up(&mdev->al_wait); | ||
806 | } | ||
807 | |||
808 | static int w_update_odbm(struct drbd_conf *mdev, struct drbd_work *w, int unused) | ||
809 | { | ||
810 | struct update_odbm_work *udw = container_of(w, struct update_odbm_work, w); | ||
811 | |||
812 | if (!get_ldev(mdev)) { | ||
813 | if (__ratelimit(&drbd_ratelimit_state)) | ||
814 | dev_warn(DEV, "Can not update on disk bitmap, local IO disabled.\n"); | ||
815 | kfree(udw); | ||
816 | return 1; | ||
817 | } | ||
818 | |||
819 | drbd_bm_write_sect(mdev, udw->enr); | ||
820 | put_ldev(mdev); | ||
821 | |||
822 | kfree(udw); | ||
823 | |||
824 | if (drbd_bm_total_weight(mdev) <= mdev->rs_failed) { | ||
825 | switch (mdev->state.conn) { | ||
826 | case C_SYNC_SOURCE: case C_SYNC_TARGET: | ||
827 | case C_PAUSED_SYNC_S: case C_PAUSED_SYNC_T: | ||
828 | drbd_resync_finished(mdev); | ||
829 | default: | ||
830 | /* nothing to do */ | ||
831 | break; | ||
832 | } | ||
833 | } | ||
834 | drbd_bcast_sync_progress(mdev); | ||
835 | |||
836 | return 1; | ||
837 | } | ||
838 | |||
839 | |||
840 | /* ATTENTION. The AL's extents are 4MB each, while the extents in the | ||
841 | * resync LRU-cache are 16MB each. | ||
842 | * The caller of this function has to hold an get_ldev() reference. | ||
843 | * | ||
844 | * TODO will be obsoleted once we have a caching lru of the on disk bitmap | ||
845 | */ | ||
846 | static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector, | ||
847 | int count, int success) | ||
848 | { | ||
849 | struct lc_element *e; | ||
850 | struct update_odbm_work *udw; | ||
851 | |||
852 | unsigned int enr; | ||
853 | |||
854 | D_ASSERT(atomic_read(&mdev->local_cnt)); | ||
855 | |||
856 | /* I simply assume that a sector/size pair never crosses | ||
857 | * a 16 MB extent border. (Currently this is true...) */ | ||
858 | enr = BM_SECT_TO_EXT(sector); | ||
859 | |||
860 | e = lc_get(mdev->resync, enr); | ||
861 | if (e) { | ||
862 | struct bm_extent *ext = lc_entry(e, struct bm_extent, lce); | ||
863 | if (ext->lce.lc_number == enr) { | ||
864 | if (success) | ||
865 | ext->rs_left -= count; | ||
866 | else | ||
867 | ext->rs_failed += count; | ||
868 | if (ext->rs_left < ext->rs_failed) { | ||
869 | dev_err(DEV, "BAD! sector=%llus enr=%u rs_left=%d " | ||
870 | "rs_failed=%d count=%d\n", | ||
871 | (unsigned long long)sector, | ||
872 | ext->lce.lc_number, ext->rs_left, | ||
873 | ext->rs_failed, count); | ||
874 | dump_stack(); | ||
875 | |||
876 | lc_put(mdev->resync, &ext->lce); | ||
877 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | ||
878 | return; | ||
879 | } | ||
880 | } else { | ||
881 | /* Normally this element should be in the cache, | ||
882 | * since drbd_rs_begin_io() pulled it already in. | ||
883 | * | ||
884 | * But maybe an application write finished, and we set | ||
885 | * something outside the resync lru_cache in sync. | ||
886 | */ | ||
887 | int rs_left = drbd_bm_e_weight(mdev, enr); | ||
888 | if (ext->flags != 0) { | ||
889 | dev_warn(DEV, "changing resync lce: %d[%u;%02lx]" | ||
890 | " -> %d[%u;00]\n", | ||
891 | ext->lce.lc_number, ext->rs_left, | ||
892 | ext->flags, enr, rs_left); | ||
893 | ext->flags = 0; | ||
894 | } | ||
895 | if (ext->rs_failed) { | ||
896 | dev_warn(DEV, "Kicking resync_lru element enr=%u " | ||
897 | "out with rs_failed=%d\n", | ||
898 | ext->lce.lc_number, ext->rs_failed); | ||
899 | set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags); | ||
900 | } | ||
901 | ext->rs_left = rs_left; | ||
902 | ext->rs_failed = success ? 0 : count; | ||
903 | lc_changed(mdev->resync, &ext->lce); | ||
904 | } | ||
905 | lc_put(mdev->resync, &ext->lce); | ||
906 | /* no race, we are within the al_lock! */ | ||
907 | |||
908 | if (ext->rs_left == ext->rs_failed) { | ||
909 | ext->rs_failed = 0; | ||
910 | |||
911 | udw = kmalloc(sizeof(*udw), GFP_ATOMIC); | ||
912 | if (udw) { | ||
913 | udw->enr = ext->lce.lc_number; | ||
914 | udw->w.cb = w_update_odbm; | ||
915 | drbd_queue_work_front(&mdev->data.work, &udw->w); | ||
916 | } else { | ||
917 | dev_warn(DEV, "Could not kmalloc an udw\n"); | ||
918 | set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags); | ||
919 | } | ||
920 | } | ||
921 | } else { | ||
922 | dev_err(DEV, "lc_get() failed! locked=%d/%d flags=%lu\n", | ||
923 | mdev->resync_locked, | ||
924 | mdev->resync->nr_elements, | ||
925 | mdev->resync->flags); | ||
926 | } | ||
927 | } | ||
928 | |||
929 | /* clear the bit corresponding to the piece of storage in question: | ||
930 | * size byte of data starting from sector. Only clear a bits of the affected | ||
931 | * one ore more _aligned_ BM_BLOCK_SIZE blocks. | ||
932 | * | ||
933 | * called by worker on C_SYNC_TARGET and receiver on SyncSource. | ||
934 | * | ||
935 | */ | ||
936 | void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size, | ||
937 | const char *file, const unsigned int line) | ||
938 | { | ||
939 | /* Is called from worker and receiver context _only_ */ | ||
940 | unsigned long sbnr, ebnr, lbnr; | ||
941 | unsigned long count = 0; | ||
942 | sector_t esector, nr_sectors; | ||
943 | int wake_up = 0; | ||
944 | unsigned long flags; | ||
945 | |||
946 | if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { | ||
947 | dev_err(DEV, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n", | ||
948 | (unsigned long long)sector, size); | ||
949 | return; | ||
950 | } | ||
951 | nr_sectors = drbd_get_capacity(mdev->this_bdev); | ||
952 | esector = sector + (size >> 9) - 1; | ||
953 | |||
954 | ERR_IF(sector >= nr_sectors) return; | ||
955 | ERR_IF(esector >= nr_sectors) esector = (nr_sectors-1); | ||
956 | |||
957 | lbnr = BM_SECT_TO_BIT(nr_sectors-1); | ||
958 | |||
959 | /* we clear it (in sync). | ||
960 | * round up start sector, round down end sector. we make sure we only | ||
961 | * clear full, aligned, BM_BLOCK_SIZE (4K) blocks */ | ||
962 | if (unlikely(esector < BM_SECT_PER_BIT-1)) | ||
963 | return; | ||
964 | if (unlikely(esector == (nr_sectors-1))) | ||
965 | ebnr = lbnr; | ||
966 | else | ||
967 | ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1)); | ||
968 | sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1); | ||
969 | |||
970 | trace_drbd_resync(mdev, TRACE_LVL_METRICS, | ||
971 | "drbd_set_in_sync: sector=%llus size=%u sbnr=%lu ebnr=%lu\n", | ||
972 | (unsigned long long)sector, size, sbnr, ebnr); | ||
973 | |||
974 | if (sbnr > ebnr) | ||
975 | return; | ||
976 | |||
977 | /* | ||
978 | * ok, (capacity & 7) != 0 sometimes, but who cares... | ||
979 | * we count rs_{total,left} in bits, not sectors. | ||
980 | */ | ||
981 | spin_lock_irqsave(&mdev->al_lock, flags); | ||
982 | count = drbd_bm_clear_bits(mdev, sbnr, ebnr); | ||
983 | if (count) { | ||
984 | /* we need the lock for drbd_try_clear_on_disk_bm */ | ||
985 | if (jiffies - mdev->rs_mark_time > HZ*10) { | ||
986 | /* should be rolling marks, | ||
987 | * but we estimate only anyways. */ | ||
988 | if (mdev->rs_mark_left != drbd_bm_total_weight(mdev) && | ||
989 | mdev->state.conn != C_PAUSED_SYNC_T && | ||
990 | mdev->state.conn != C_PAUSED_SYNC_S) { | ||
991 | mdev->rs_mark_time = jiffies; | ||
992 | mdev->rs_mark_left = drbd_bm_total_weight(mdev); | ||
993 | } | ||
994 | } | ||
995 | if (get_ldev(mdev)) { | ||
996 | drbd_try_clear_on_disk_bm(mdev, sector, count, TRUE); | ||
997 | put_ldev(mdev); | ||
998 | } | ||
999 | /* just wake_up unconditional now, various lc_chaged(), | ||
1000 | * lc_put() in drbd_try_clear_on_disk_bm(). */ | ||
1001 | wake_up = 1; | ||
1002 | } | ||
1003 | spin_unlock_irqrestore(&mdev->al_lock, flags); | ||
1004 | if (wake_up) | ||
1005 | wake_up(&mdev->al_wait); | ||
1006 | } | ||
1007 | |||
1008 | /* | ||
1009 | * this is intended to set one request worth of data out of sync. | ||
1010 | * affects at least 1 bit, | ||
1011 | * and at most 1+DRBD_MAX_SEGMENT_SIZE/BM_BLOCK_SIZE bits. | ||
1012 | * | ||
1013 | * called by tl_clear and drbd_send_dblock (==drbd_make_request). | ||
1014 | * so this can be _any_ process. | ||
1015 | */ | ||
1016 | void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size, | ||
1017 | const char *file, const unsigned int line) | ||
1018 | { | ||
1019 | unsigned long sbnr, ebnr, lbnr, flags; | ||
1020 | sector_t esector, nr_sectors; | ||
1021 | unsigned int enr, count; | ||
1022 | struct lc_element *e; | ||
1023 | |||
1024 | if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { | ||
1025 | dev_err(DEV, "sector: %llus, size: %d\n", | ||
1026 | (unsigned long long)sector, size); | ||
1027 | return; | ||
1028 | } | ||
1029 | |||
1030 | if (!get_ldev(mdev)) | ||
1031 | return; /* no disk, no metadata, no bitmap to set bits in */ | ||
1032 | |||
1033 | nr_sectors = drbd_get_capacity(mdev->this_bdev); | ||
1034 | esector = sector + (size >> 9) - 1; | ||
1035 | |||
1036 | ERR_IF(sector >= nr_sectors) | ||
1037 | goto out; | ||
1038 | ERR_IF(esector >= nr_sectors) | ||
1039 | esector = (nr_sectors-1); | ||
1040 | |||
1041 | lbnr = BM_SECT_TO_BIT(nr_sectors-1); | ||
1042 | |||
1043 | /* we set it out of sync, | ||
1044 | * we do not need to round anything here */ | ||
1045 | sbnr = BM_SECT_TO_BIT(sector); | ||
1046 | ebnr = BM_SECT_TO_BIT(esector); | ||
1047 | |||
1048 | trace_drbd_resync(mdev, TRACE_LVL_METRICS, | ||
1049 | "drbd_set_out_of_sync: sector=%llus size=%u sbnr=%lu ebnr=%lu\n", | ||
1050 | (unsigned long long)sector, size, sbnr, ebnr); | ||
1051 | |||
1052 | /* ok, (capacity & 7) != 0 sometimes, but who cares... | ||
1053 | * we count rs_{total,left} in bits, not sectors. */ | ||
1054 | spin_lock_irqsave(&mdev->al_lock, flags); | ||
1055 | count = drbd_bm_set_bits(mdev, sbnr, ebnr); | ||
1056 | |||
1057 | enr = BM_SECT_TO_EXT(sector); | ||
1058 | e = lc_find(mdev->resync, enr); | ||
1059 | if (e) | ||
1060 | lc_entry(e, struct bm_extent, lce)->rs_left += count; | ||
1061 | spin_unlock_irqrestore(&mdev->al_lock, flags); | ||
1062 | |||
1063 | out: | ||
1064 | put_ldev(mdev); | ||
1065 | } | ||
1066 | |||
1067 | static | ||
1068 | struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr) | ||
1069 | { | ||
1070 | struct lc_element *e; | ||
1071 | struct bm_extent *bm_ext; | ||
1072 | int wakeup = 0; | ||
1073 | unsigned long rs_flags; | ||
1074 | |||
1075 | spin_lock_irq(&mdev->al_lock); | ||
1076 | if (mdev->resync_locked > mdev->resync->nr_elements/2) { | ||
1077 | spin_unlock_irq(&mdev->al_lock); | ||
1078 | return NULL; | ||
1079 | } | ||
1080 | e = lc_get(mdev->resync, enr); | ||
1081 | bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; | ||
1082 | if (bm_ext) { | ||
1083 | if (bm_ext->lce.lc_number != enr) { | ||
1084 | bm_ext->rs_left = drbd_bm_e_weight(mdev, enr); | ||
1085 | bm_ext->rs_failed = 0; | ||
1086 | lc_changed(mdev->resync, &bm_ext->lce); | ||
1087 | wakeup = 1; | ||
1088 | } | ||
1089 | if (bm_ext->lce.refcnt == 1) | ||
1090 | mdev->resync_locked++; | ||
1091 | set_bit(BME_NO_WRITES, &bm_ext->flags); | ||
1092 | } | ||
1093 | rs_flags = mdev->resync->flags; | ||
1094 | spin_unlock_irq(&mdev->al_lock); | ||
1095 | if (wakeup) | ||
1096 | wake_up(&mdev->al_wait); | ||
1097 | |||
1098 | if (!bm_ext) { | ||
1099 | if (rs_flags & LC_STARVING) | ||
1100 | dev_warn(DEV, "Have to wait for element" | ||
1101 | " (resync LRU too small?)\n"); | ||
1102 | BUG_ON(rs_flags & LC_DIRTY); | ||
1103 | } | ||
1104 | |||
1105 | return bm_ext; | ||
1106 | } | ||
1107 | |||
1108 | static int _is_in_al(struct drbd_conf *mdev, unsigned int enr) | ||
1109 | { | ||
1110 | struct lc_element *al_ext; | ||
1111 | int rv = 0; | ||
1112 | |||
1113 | spin_lock_irq(&mdev->al_lock); | ||
1114 | if (unlikely(enr == mdev->act_log->new_number)) | ||
1115 | rv = 1; | ||
1116 | else { | ||
1117 | al_ext = lc_find(mdev->act_log, enr); | ||
1118 | if (al_ext) { | ||
1119 | if (al_ext->refcnt) | ||
1120 | rv = 1; | ||
1121 | } | ||
1122 | } | ||
1123 | spin_unlock_irq(&mdev->al_lock); | ||
1124 | |||
1125 | /* | ||
1126 | if (unlikely(rv)) { | ||
1127 | dev_info(DEV, "Delaying sync read until app's write is done\n"); | ||
1128 | } | ||
1129 | */ | ||
1130 | return rv; | ||
1131 | } | ||
1132 | |||
1133 | /** | ||
1134 | * drbd_rs_begin_io() - Gets an extent in the resync LRU cache and sets it to BME_LOCKED | ||
1135 | * @mdev: DRBD device. | ||
1136 | * @sector: The sector number. | ||
1137 | * | ||
1138 | * This functions sleeps on al_wait. Returns 1 on success, 0 if interrupted. | ||
1139 | */ | ||
1140 | int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector) | ||
1141 | { | ||
1142 | unsigned int enr = BM_SECT_TO_EXT(sector); | ||
1143 | struct bm_extent *bm_ext; | ||
1144 | int i, sig; | ||
1145 | |||
1146 | trace_drbd_resync(mdev, TRACE_LVL_ALL, | ||
1147 | "drbd_rs_begin_io: sector=%llus (rs_end=%d)\n", | ||
1148 | (unsigned long long)sector, enr); | ||
1149 | |||
1150 | sig = wait_event_interruptible(mdev->al_wait, | ||
1151 | (bm_ext = _bme_get(mdev, enr))); | ||
1152 | if (sig) | ||
1153 | return 0; | ||
1154 | |||
1155 | if (test_bit(BME_LOCKED, &bm_ext->flags)) | ||
1156 | return 1; | ||
1157 | |||
1158 | for (i = 0; i < AL_EXT_PER_BM_SECT; i++) { | ||
1159 | sig = wait_event_interruptible(mdev->al_wait, | ||
1160 | !_is_in_al(mdev, enr * AL_EXT_PER_BM_SECT + i)); | ||
1161 | if (sig) { | ||
1162 | spin_lock_irq(&mdev->al_lock); | ||
1163 | if (lc_put(mdev->resync, &bm_ext->lce) == 0) { | ||
1164 | clear_bit(BME_NO_WRITES, &bm_ext->flags); | ||
1165 | mdev->resync_locked--; | ||
1166 | wake_up(&mdev->al_wait); | ||
1167 | } | ||
1168 | spin_unlock_irq(&mdev->al_lock); | ||
1169 | return 0; | ||
1170 | } | ||
1171 | } | ||
1172 | |||
1173 | set_bit(BME_LOCKED, &bm_ext->flags); | ||
1174 | |||
1175 | return 1; | ||
1176 | } | ||
1177 | |||
1178 | /** | ||
1179 | * drbd_try_rs_begin_io() - Gets an extent in the resync LRU cache, does not sleep | ||
1180 | * @mdev: DRBD device. | ||
1181 | * @sector: The sector number. | ||
1182 | * | ||
1183 | * Gets an extent in the resync LRU cache, sets it to BME_NO_WRITES, then | ||
1184 | * tries to set it to BME_LOCKED. Returns 0 upon success, and -EAGAIN | ||
1185 | * if there is still application IO going on in this area. | ||
1186 | */ | ||
1187 | int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector) | ||
1188 | { | ||
1189 | unsigned int enr = BM_SECT_TO_EXT(sector); | ||
1190 | const unsigned int al_enr = enr*AL_EXT_PER_BM_SECT; | ||
1191 | struct lc_element *e; | ||
1192 | struct bm_extent *bm_ext; | ||
1193 | int i; | ||
1194 | |||
1195 | trace_drbd_resync(mdev, TRACE_LVL_ALL, "drbd_try_rs_begin_io: sector=%llus\n", | ||
1196 | (unsigned long long)sector); | ||
1197 | |||
1198 | spin_lock_irq(&mdev->al_lock); | ||
1199 | if (mdev->resync_wenr != LC_FREE && mdev->resync_wenr != enr) { | ||
1200 | /* in case you have very heavy scattered io, it may | ||
1201 | * stall the syncer undefined if we give up the ref count | ||
1202 | * when we try again and requeue. | ||
1203 | * | ||
1204 | * if we don't give up the refcount, but the next time | ||
1205 | * we are scheduled this extent has been "synced" by new | ||
1206 | * application writes, we'd miss the lc_put on the | ||
1207 | * extent we keep the refcount on. | ||
1208 | * so we remembered which extent we had to try again, and | ||
1209 | * if the next requested one is something else, we do | ||
1210 | * the lc_put here... | ||
1211 | * we also have to wake_up | ||
1212 | */ | ||
1213 | |||
1214 | trace_drbd_resync(mdev, TRACE_LVL_ALL, | ||
1215 | "dropping %u, apparently got 'synced' by application io\n", | ||
1216 | mdev->resync_wenr); | ||
1217 | |||
1218 | e = lc_find(mdev->resync, mdev->resync_wenr); | ||
1219 | bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; | ||
1220 | if (bm_ext) { | ||
1221 | D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags)); | ||
1222 | D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags)); | ||
1223 | clear_bit(BME_NO_WRITES, &bm_ext->flags); | ||
1224 | mdev->resync_wenr = LC_FREE; | ||
1225 | if (lc_put(mdev->resync, &bm_ext->lce) == 0) | ||
1226 | mdev->resync_locked--; | ||
1227 | wake_up(&mdev->al_wait); | ||
1228 | } else { | ||
1229 | dev_alert(DEV, "LOGIC BUG\n"); | ||
1230 | } | ||
1231 | } | ||
1232 | /* TRY. */ | ||
1233 | e = lc_try_get(mdev->resync, enr); | ||
1234 | bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; | ||
1235 | if (bm_ext) { | ||
1236 | if (test_bit(BME_LOCKED, &bm_ext->flags)) | ||
1237 | goto proceed; | ||
1238 | if (!test_and_set_bit(BME_NO_WRITES, &bm_ext->flags)) { | ||
1239 | mdev->resync_locked++; | ||
1240 | } else { | ||
1241 | /* we did set the BME_NO_WRITES, | ||
1242 | * but then could not set BME_LOCKED, | ||
1243 | * so we tried again. | ||
1244 | * drop the extra reference. */ | ||
1245 | trace_drbd_resync(mdev, TRACE_LVL_ALL, | ||
1246 | "dropping extra reference on %u\n", enr); | ||
1247 | |||
1248 | bm_ext->lce.refcnt--; | ||
1249 | D_ASSERT(bm_ext->lce.refcnt > 0); | ||
1250 | } | ||
1251 | goto check_al; | ||
1252 | } else { | ||
1253 | /* do we rather want to try later? */ | ||
1254 | if (mdev->resync_locked > mdev->resync->nr_elements-3) { | ||
1255 | trace_drbd_resync(mdev, TRACE_LVL_ALL, | ||
1256 | "resync_locked = %u!\n", mdev->resync_locked); | ||
1257 | |||
1258 | goto try_again; | ||
1259 | } | ||
1260 | /* Do or do not. There is no try. -- Yoda */ | ||
1261 | e = lc_get(mdev->resync, enr); | ||
1262 | bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; | ||
1263 | if (!bm_ext) { | ||
1264 | const unsigned long rs_flags = mdev->resync->flags; | ||
1265 | if (rs_flags & LC_STARVING) | ||
1266 | dev_warn(DEV, "Have to wait for element" | ||
1267 | " (resync LRU too small?)\n"); | ||
1268 | BUG_ON(rs_flags & LC_DIRTY); | ||
1269 | goto try_again; | ||
1270 | } | ||
1271 | if (bm_ext->lce.lc_number != enr) { | ||
1272 | bm_ext->rs_left = drbd_bm_e_weight(mdev, enr); | ||
1273 | bm_ext->rs_failed = 0; | ||
1274 | lc_changed(mdev->resync, &bm_ext->lce); | ||
1275 | wake_up(&mdev->al_wait); | ||
1276 | D_ASSERT(test_bit(BME_LOCKED, &bm_ext->flags) == 0); | ||
1277 | } | ||
1278 | set_bit(BME_NO_WRITES, &bm_ext->flags); | ||
1279 | D_ASSERT(bm_ext->lce.refcnt == 1); | ||
1280 | mdev->resync_locked++; | ||
1281 | goto check_al; | ||
1282 | } | ||
1283 | check_al: | ||
1284 | trace_drbd_resync(mdev, TRACE_LVL_ALL, "checking al for %u\n", enr); | ||
1285 | |||
1286 | for (i = 0; i < AL_EXT_PER_BM_SECT; i++) { | ||
1287 | if (unlikely(al_enr+i == mdev->act_log->new_number)) | ||
1288 | goto try_again; | ||
1289 | if (lc_is_used(mdev->act_log, al_enr+i)) | ||
1290 | goto try_again; | ||
1291 | } | ||
1292 | set_bit(BME_LOCKED, &bm_ext->flags); | ||
1293 | proceed: | ||
1294 | mdev->resync_wenr = LC_FREE; | ||
1295 | spin_unlock_irq(&mdev->al_lock); | ||
1296 | return 0; | ||
1297 | |||
1298 | try_again: | ||
1299 | trace_drbd_resync(mdev, TRACE_LVL_ALL, "need to try again for %u\n", enr); | ||
1300 | if (bm_ext) | ||
1301 | mdev->resync_wenr = enr; | ||
1302 | spin_unlock_irq(&mdev->al_lock); | ||
1303 | return -EAGAIN; | ||
1304 | } | ||
1305 | |||
1306 | void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector) | ||
1307 | { | ||
1308 | unsigned int enr = BM_SECT_TO_EXT(sector); | ||
1309 | struct lc_element *e; | ||
1310 | struct bm_extent *bm_ext; | ||
1311 | unsigned long flags; | ||
1312 | |||
1313 | trace_drbd_resync(mdev, TRACE_LVL_ALL, | ||
1314 | "drbd_rs_complete_io: sector=%llus (rs_enr=%d)\n", | ||
1315 | (long long)sector, enr); | ||
1316 | |||
1317 | spin_lock_irqsave(&mdev->al_lock, flags); | ||
1318 | e = lc_find(mdev->resync, enr); | ||
1319 | bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; | ||
1320 | if (!bm_ext) { | ||
1321 | spin_unlock_irqrestore(&mdev->al_lock, flags); | ||
1322 | if (__ratelimit(&drbd_ratelimit_state)) | ||
1323 | dev_err(DEV, "drbd_rs_complete_io() called, but extent not found\n"); | ||
1324 | return; | ||
1325 | } | ||
1326 | |||
1327 | if (bm_ext->lce.refcnt == 0) { | ||
1328 | spin_unlock_irqrestore(&mdev->al_lock, flags); | ||
1329 | dev_err(DEV, "drbd_rs_complete_io(,%llu [=%u]) called, " | ||
1330 | "but refcnt is 0!?\n", | ||
1331 | (unsigned long long)sector, enr); | ||
1332 | return; | ||
1333 | } | ||
1334 | |||
1335 | if (lc_put(mdev->resync, &bm_ext->lce) == 0) { | ||
1336 | clear_bit(BME_LOCKED, &bm_ext->flags); | ||
1337 | clear_bit(BME_NO_WRITES, &bm_ext->flags); | ||
1338 | mdev->resync_locked--; | ||
1339 | wake_up(&mdev->al_wait); | ||
1340 | } | ||
1341 | |||
1342 | spin_unlock_irqrestore(&mdev->al_lock, flags); | ||
1343 | } | ||
1344 | |||
1345 | /** | ||
1346 | * drbd_rs_cancel_all() - Removes all extents from the resync LRU (even BME_LOCKED) | ||
1347 | * @mdev: DRBD device. | ||
1348 | */ | ||
1349 | void drbd_rs_cancel_all(struct drbd_conf *mdev) | ||
1350 | { | ||
1351 | trace_drbd_resync(mdev, TRACE_LVL_METRICS, "drbd_rs_cancel_all\n"); | ||
1352 | |||
1353 | spin_lock_irq(&mdev->al_lock); | ||
1354 | |||
1355 | if (get_ldev_if_state(mdev, D_FAILED)) { /* Makes sure ->resync is there. */ | ||
1356 | lc_reset(mdev->resync); | ||
1357 | put_ldev(mdev); | ||
1358 | } | ||
1359 | mdev->resync_locked = 0; | ||
1360 | mdev->resync_wenr = LC_FREE; | ||
1361 | spin_unlock_irq(&mdev->al_lock); | ||
1362 | wake_up(&mdev->al_wait); | ||
1363 | } | ||
1364 | |||
1365 | /** | ||
1366 | * drbd_rs_del_all() - Gracefully remove all extents from the resync LRU | ||
1367 | * @mdev: DRBD device. | ||
1368 | * | ||
1369 | * Returns 0 upon success, -EAGAIN if at least one reference count was | ||
1370 | * not zero. | ||
1371 | */ | ||
1372 | int drbd_rs_del_all(struct drbd_conf *mdev) | ||
1373 | { | ||
1374 | struct lc_element *e; | ||
1375 | struct bm_extent *bm_ext; | ||
1376 | int i; | ||
1377 | |||
1378 | trace_drbd_resync(mdev, TRACE_LVL_METRICS, "drbd_rs_del_all\n"); | ||
1379 | |||
1380 | spin_lock_irq(&mdev->al_lock); | ||
1381 | |||
1382 | if (get_ldev_if_state(mdev, D_FAILED)) { | ||
1383 | /* ok, ->resync is there. */ | ||
1384 | for (i = 0; i < mdev->resync->nr_elements; i++) { | ||
1385 | e = lc_element_by_index(mdev->resync, i); | ||
1386 | bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; | ||
1387 | if (bm_ext->lce.lc_number == LC_FREE) | ||
1388 | continue; | ||
1389 | if (bm_ext->lce.lc_number == mdev->resync_wenr) { | ||
1390 | dev_info(DEV, "dropping %u in drbd_rs_del_all, apparently" | ||
1391 | " got 'synced' by application io\n", | ||
1392 | mdev->resync_wenr); | ||
1393 | D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags)); | ||
1394 | D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags)); | ||
1395 | clear_bit(BME_NO_WRITES, &bm_ext->flags); | ||
1396 | mdev->resync_wenr = LC_FREE; | ||
1397 | lc_put(mdev->resync, &bm_ext->lce); | ||
1398 | } | ||
1399 | if (bm_ext->lce.refcnt != 0) { | ||
1400 | dev_info(DEV, "Retrying drbd_rs_del_all() later. " | ||
1401 | "refcnt=%d\n", bm_ext->lce.refcnt); | ||
1402 | put_ldev(mdev); | ||
1403 | spin_unlock_irq(&mdev->al_lock); | ||
1404 | return -EAGAIN; | ||
1405 | } | ||
1406 | D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags)); | ||
1407 | D_ASSERT(!test_bit(BME_NO_WRITES, &bm_ext->flags)); | ||
1408 | lc_del(mdev->resync, &bm_ext->lce); | ||
1409 | } | ||
1410 | D_ASSERT(mdev->resync->used == 0); | ||
1411 | put_ldev(mdev); | ||
1412 | } | ||
1413 | spin_unlock_irq(&mdev->al_lock); | ||
1414 | |||
1415 | return 0; | ||
1416 | } | ||
1417 | |||
1418 | /** | ||
1419 | * drbd_rs_failed_io() - Record information on a failure to resync the specified blocks | ||
1420 | * @mdev: DRBD device. | ||
1421 | * @sector: The sector number. | ||
1422 | * @size: Size of failed IO operation, in byte. | ||
1423 | */ | ||
1424 | void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size) | ||
1425 | { | ||
1426 | /* Is called from worker and receiver context _only_ */ | ||
1427 | unsigned long sbnr, ebnr, lbnr; | ||
1428 | unsigned long count; | ||
1429 | sector_t esector, nr_sectors; | ||
1430 | int wake_up = 0; | ||
1431 | |||
1432 | trace_drbd_resync(mdev, TRACE_LVL_SUMMARY, | ||
1433 | "drbd_rs_failed_io: sector=%llus, size=%u\n", | ||
1434 | (unsigned long long)sector, size); | ||
1435 | |||
1436 | if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { | ||
1437 | dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n", | ||
1438 | (unsigned long long)sector, size); | ||
1439 | return; | ||
1440 | } | ||
1441 | nr_sectors = drbd_get_capacity(mdev->this_bdev); | ||
1442 | esector = sector + (size >> 9) - 1; | ||
1443 | |||
1444 | ERR_IF(sector >= nr_sectors) return; | ||
1445 | ERR_IF(esector >= nr_sectors) esector = (nr_sectors-1); | ||
1446 | |||
1447 | lbnr = BM_SECT_TO_BIT(nr_sectors-1); | ||
1448 | |||
1449 | /* | ||
1450 | * round up start sector, round down end sector. we make sure we only | ||
1451 | * handle full, aligned, BM_BLOCK_SIZE (4K) blocks */ | ||
1452 | if (unlikely(esector < BM_SECT_PER_BIT-1)) | ||
1453 | return; | ||
1454 | if (unlikely(esector == (nr_sectors-1))) | ||
1455 | ebnr = lbnr; | ||
1456 | else | ||
1457 | ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1)); | ||
1458 | sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1); | ||
1459 | |||
1460 | if (sbnr > ebnr) | ||
1461 | return; | ||
1462 | |||
1463 | /* | ||
1464 | * ok, (capacity & 7) != 0 sometimes, but who cares... | ||
1465 | * we count rs_{total,left} in bits, not sectors. | ||
1466 | */ | ||
1467 | spin_lock_irq(&mdev->al_lock); | ||
1468 | count = drbd_bm_count_bits(mdev, sbnr, ebnr); | ||
1469 | if (count) { | ||
1470 | mdev->rs_failed += count; | ||
1471 | |||
1472 | if (get_ldev(mdev)) { | ||
1473 | drbd_try_clear_on_disk_bm(mdev, sector, count, FALSE); | ||
1474 | put_ldev(mdev); | ||
1475 | } | ||
1476 | |||
1477 | /* just wake_up unconditional now, various lc_chaged(), | ||
1478 | * lc_put() in drbd_try_clear_on_disk_bm(). */ | ||
1479 | wake_up = 1; | ||
1480 | } | ||
1481 | spin_unlock_irq(&mdev->al_lock); | ||
1482 | if (wake_up) | ||
1483 | wake_up(&mdev->al_wait); | ||
1484 | } | ||