diff options
Diffstat (limited to 'drivers/block/drbd/drbd_actlog.c')
-rw-r--r-- | drivers/block/drbd/drbd_actlog.c | 1424 |
1 files changed, 1424 insertions, 0 deletions
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c new file mode 100644 index 000000000000..17956ff6a08d --- /dev/null +++ b/drivers/block/drbd/drbd_actlog.c | |||
@@ -0,0 +1,1424 @@ | |||
1 | /* | ||
2 | drbd_actlog.c | ||
3 | |||
4 | This file is part of DRBD by Philipp Reisner and Lars Ellenberg. | ||
5 | |||
6 | Copyright (C) 2003-2008, LINBIT Information Technologies GmbH. | ||
7 | Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>. | ||
8 | Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. | ||
9 | |||
10 | drbd is free software; you can redistribute it and/or modify | ||
11 | it under the terms of the GNU General Public License as published by | ||
12 | the Free Software Foundation; either version 2, or (at your option) | ||
13 | any later version. | ||
14 | |||
15 | drbd is distributed in the hope that it will be useful, | ||
16 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | GNU General Public License for more details. | ||
19 | |||
20 | You should have received a copy of the GNU General Public License | ||
21 | along with drbd; see the file COPYING. If not, write to | ||
22 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | ||
23 | |||
24 | */ | ||
25 | |||
26 | #include <linux/slab.h> | ||
27 | #include <linux/drbd.h> | ||
28 | #include "drbd_int.h" | ||
29 | #include "drbd_wrappers.h" | ||
30 | |||
31 | /* We maintain a trivial check sum in our on disk activity log. | ||
32 | * With that we can ensure correct operation even when the storage | ||
33 | * device might do a partial (last) sector write while loosing power. | ||
34 | */ | ||
35 | struct __packed al_transaction { | ||
36 | u32 magic; | ||
37 | u32 tr_number; | ||
38 | struct __packed { | ||
39 | u32 pos; | ||
40 | u32 extent; } updates[1 + AL_EXTENTS_PT]; | ||
41 | u32 xor_sum; | ||
42 | }; | ||
43 | |||
44 | struct update_odbm_work { | ||
45 | struct drbd_work w; | ||
46 | unsigned int enr; | ||
47 | }; | ||
48 | |||
49 | struct update_al_work { | ||
50 | struct drbd_work w; | ||
51 | struct lc_element *al_ext; | ||
52 | struct completion event; | ||
53 | unsigned int enr; | ||
54 | /* if old_enr != LC_FREE, write corresponding bitmap sector, too */ | ||
55 | unsigned int old_enr; | ||
56 | }; | ||
57 | |||
58 | struct drbd_atodb_wait { | ||
59 | atomic_t count; | ||
60 | struct completion io_done; | ||
61 | struct drbd_conf *mdev; | ||
62 | int error; | ||
63 | }; | ||
64 | |||
65 | |||
66 | int w_al_write_transaction(struct drbd_conf *, struct drbd_work *, int); | ||
67 | |||
68 | static int _drbd_md_sync_page_io(struct drbd_conf *mdev, | ||
69 | struct drbd_backing_dev *bdev, | ||
70 | struct page *page, sector_t sector, | ||
71 | int rw, int size) | ||
72 | { | ||
73 | struct bio *bio; | ||
74 | struct drbd_md_io md_io; | ||
75 | int ok; | ||
76 | |||
77 | md_io.mdev = mdev; | ||
78 | init_completion(&md_io.event); | ||
79 | md_io.error = 0; | ||
80 | |||
81 | if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags)) | ||
82 | rw |= (1 << BIO_RW_BARRIER); | ||
83 | rw |= ((1<<BIO_RW_UNPLUG) | (1<<BIO_RW_SYNCIO)); | ||
84 | |||
85 | retry: | ||
86 | bio = bio_alloc(GFP_NOIO, 1); | ||
87 | bio->bi_bdev = bdev->md_bdev; | ||
88 | bio->bi_sector = sector; | ||
89 | ok = (bio_add_page(bio, page, size, 0) == size); | ||
90 | if (!ok) | ||
91 | goto out; | ||
92 | bio->bi_private = &md_io; | ||
93 | bio->bi_end_io = drbd_md_io_complete; | ||
94 | bio->bi_rw = rw; | ||
95 | |||
96 | if (FAULT_ACTIVE(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) | ||
97 | bio_endio(bio, -EIO); | ||
98 | else | ||
99 | submit_bio(rw, bio); | ||
100 | wait_for_completion(&md_io.event); | ||
101 | ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0; | ||
102 | |||
103 | /* check for unsupported barrier op. | ||
104 | * would rather check on EOPNOTSUPP, but that is not reliable. | ||
105 | * don't try again for ANY return value != 0 */ | ||
106 | if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER) && !ok)) { | ||
107 | /* Try again with no barrier */ | ||
108 | dev_warn(DEV, "Barriers not supported on meta data device - disabling\n"); | ||
109 | set_bit(MD_NO_BARRIER, &mdev->flags); | ||
110 | rw &= ~(1 << BIO_RW_BARRIER); | ||
111 | bio_put(bio); | ||
112 | goto retry; | ||
113 | } | ||
114 | out: | ||
115 | bio_put(bio); | ||
116 | return ok; | ||
117 | } | ||
118 | |||
119 | int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, | ||
120 | sector_t sector, int rw) | ||
121 | { | ||
122 | int logical_block_size, mask, ok; | ||
123 | int offset = 0; | ||
124 | struct page *iop = mdev->md_io_page; | ||
125 | |||
126 | D_ASSERT(mutex_is_locked(&mdev->md_io_mutex)); | ||
127 | |||
128 | BUG_ON(!bdev->md_bdev); | ||
129 | |||
130 | logical_block_size = bdev_logical_block_size(bdev->md_bdev); | ||
131 | if (logical_block_size == 0) | ||
132 | logical_block_size = MD_SECTOR_SIZE; | ||
133 | |||
134 | /* in case logical_block_size != 512 [ s390 only? ] */ | ||
135 | if (logical_block_size != MD_SECTOR_SIZE) { | ||
136 | mask = (logical_block_size / MD_SECTOR_SIZE) - 1; | ||
137 | D_ASSERT(mask == 1 || mask == 3 || mask == 7); | ||
138 | D_ASSERT(logical_block_size == (mask+1) * MD_SECTOR_SIZE); | ||
139 | offset = sector & mask; | ||
140 | sector = sector & ~mask; | ||
141 | iop = mdev->md_io_tmpp; | ||
142 | |||
143 | if (rw & WRITE) { | ||
144 | /* these are GFP_KERNEL pages, pre-allocated | ||
145 | * on device initialization */ | ||
146 | void *p = page_address(mdev->md_io_page); | ||
147 | void *hp = page_address(mdev->md_io_tmpp); | ||
148 | |||
149 | ok = _drbd_md_sync_page_io(mdev, bdev, iop, sector, | ||
150 | READ, logical_block_size); | ||
151 | |||
152 | if (unlikely(!ok)) { | ||
153 | dev_err(DEV, "drbd_md_sync_page_io(,%llus," | ||
154 | "READ [logical_block_size!=512]) failed!\n", | ||
155 | (unsigned long long)sector); | ||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | memcpy(hp + offset*MD_SECTOR_SIZE, p, MD_SECTOR_SIZE); | ||
160 | } | ||
161 | } | ||
162 | |||
163 | if (sector < drbd_md_first_sector(bdev) || | ||
164 | sector > drbd_md_last_sector(bdev)) | ||
165 | dev_alert(DEV, "%s [%d]:%s(,%llus,%s) out of range md access!\n", | ||
166 | current->comm, current->pid, __func__, | ||
167 | (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ"); | ||
168 | |||
169 | ok = _drbd_md_sync_page_io(mdev, bdev, iop, sector, rw, logical_block_size); | ||
170 | if (unlikely(!ok)) { | ||
171 | dev_err(DEV, "drbd_md_sync_page_io(,%llus,%s) failed!\n", | ||
172 | (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ"); | ||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | if (logical_block_size != MD_SECTOR_SIZE && !(rw & WRITE)) { | ||
177 | void *p = page_address(mdev->md_io_page); | ||
178 | void *hp = page_address(mdev->md_io_tmpp); | ||
179 | |||
180 | memcpy(p, hp + offset*MD_SECTOR_SIZE, MD_SECTOR_SIZE); | ||
181 | } | ||
182 | |||
183 | return ok; | ||
184 | } | ||
185 | |||
186 | static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr) | ||
187 | { | ||
188 | struct lc_element *al_ext; | ||
189 | struct lc_element *tmp; | ||
190 | unsigned long al_flags = 0; | ||
191 | |||
192 | spin_lock_irq(&mdev->al_lock); | ||
193 | tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT); | ||
194 | if (unlikely(tmp != NULL)) { | ||
195 | struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); | ||
196 | if (test_bit(BME_NO_WRITES, &bm_ext->flags)) { | ||
197 | spin_unlock_irq(&mdev->al_lock); | ||
198 | return NULL; | ||
199 | } | ||
200 | } | ||
201 | al_ext = lc_get(mdev->act_log, enr); | ||
202 | al_flags = mdev->act_log->flags; | ||
203 | spin_unlock_irq(&mdev->al_lock); | ||
204 | |||
205 | /* | ||
206 | if (!al_ext) { | ||
207 | if (al_flags & LC_STARVING) | ||
208 | dev_warn(DEV, "Have to wait for LRU element (AL too small?)\n"); | ||
209 | if (al_flags & LC_DIRTY) | ||
210 | dev_warn(DEV, "Ongoing AL update (AL device too slow?)\n"); | ||
211 | } | ||
212 | */ | ||
213 | |||
214 | return al_ext; | ||
215 | } | ||
216 | |||
217 | void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector) | ||
218 | { | ||
219 | unsigned int enr = (sector >> (AL_EXTENT_SHIFT-9)); | ||
220 | struct lc_element *al_ext; | ||
221 | struct update_al_work al_work; | ||
222 | |||
223 | D_ASSERT(atomic_read(&mdev->local_cnt) > 0); | ||
224 | |||
225 | wait_event(mdev->al_wait, (al_ext = _al_get(mdev, enr))); | ||
226 | |||
227 | if (al_ext->lc_number != enr) { | ||
228 | /* drbd_al_write_transaction(mdev,al_ext,enr); | ||
229 | * recurses into generic_make_request(), which | ||
230 | * disallows recursion, bios being serialized on the | ||
231 | * current->bio_tail list now. | ||
232 | * we have to delegate updates to the activity log | ||
233 | * to the worker thread. */ | ||
234 | init_completion(&al_work.event); | ||
235 | al_work.al_ext = al_ext; | ||
236 | al_work.enr = enr; | ||
237 | al_work.old_enr = al_ext->lc_number; | ||
238 | al_work.w.cb = w_al_write_transaction; | ||
239 | drbd_queue_work_front(&mdev->data.work, &al_work.w); | ||
240 | wait_for_completion(&al_work.event); | ||
241 | |||
242 | mdev->al_writ_cnt++; | ||
243 | |||
244 | spin_lock_irq(&mdev->al_lock); | ||
245 | lc_changed(mdev->act_log, al_ext); | ||
246 | spin_unlock_irq(&mdev->al_lock); | ||
247 | wake_up(&mdev->al_wait); | ||
248 | } | ||
249 | } | ||
250 | |||
251 | void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector) | ||
252 | { | ||
253 | unsigned int enr = (sector >> (AL_EXTENT_SHIFT-9)); | ||
254 | struct lc_element *extent; | ||
255 | unsigned long flags; | ||
256 | |||
257 | spin_lock_irqsave(&mdev->al_lock, flags); | ||
258 | |||
259 | extent = lc_find(mdev->act_log, enr); | ||
260 | |||
261 | if (!extent) { | ||
262 | spin_unlock_irqrestore(&mdev->al_lock, flags); | ||
263 | dev_err(DEV, "al_complete_io() called on inactive extent %u\n", enr); | ||
264 | return; | ||
265 | } | ||
266 | |||
267 | if (lc_put(mdev->act_log, extent) == 0) | ||
268 | wake_up(&mdev->al_wait); | ||
269 | |||
270 | spin_unlock_irqrestore(&mdev->al_lock, flags); | ||
271 | } | ||
272 | |||
273 | int | ||
274 | w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused) | ||
275 | { | ||
276 | struct update_al_work *aw = container_of(w, struct update_al_work, w); | ||
277 | struct lc_element *updated = aw->al_ext; | ||
278 | const unsigned int new_enr = aw->enr; | ||
279 | const unsigned int evicted = aw->old_enr; | ||
280 | struct al_transaction *buffer; | ||
281 | sector_t sector; | ||
282 | int i, n, mx; | ||
283 | unsigned int extent_nr; | ||
284 | u32 xor_sum = 0; | ||
285 | |||
286 | if (!get_ldev(mdev)) { | ||
287 | dev_err(DEV, "get_ldev() failed in w_al_write_transaction\n"); | ||
288 | complete(&((struct update_al_work *)w)->event); | ||
289 | return 1; | ||
290 | } | ||
291 | /* do we have to do a bitmap write, first? | ||
292 | * TODO reduce maximum latency: | ||
293 | * submit both bios, then wait for both, | ||
294 | * instead of doing two synchronous sector writes. */ | ||
295 | if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE) | ||
296 | drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT); | ||
297 | |||
298 | mutex_lock(&mdev->md_io_mutex); /* protects md_io_page, al_tr_cycle, ... */ | ||
299 | buffer = (struct al_transaction *)page_address(mdev->md_io_page); | ||
300 | |||
301 | buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC); | ||
302 | buffer->tr_number = cpu_to_be32(mdev->al_tr_number); | ||
303 | |||
304 | n = lc_index_of(mdev->act_log, updated); | ||
305 | |||
306 | buffer->updates[0].pos = cpu_to_be32(n); | ||
307 | buffer->updates[0].extent = cpu_to_be32(new_enr); | ||
308 | |||
309 | xor_sum ^= new_enr; | ||
310 | |||
311 | mx = min_t(int, AL_EXTENTS_PT, | ||
312 | mdev->act_log->nr_elements - mdev->al_tr_cycle); | ||
313 | for (i = 0; i < mx; i++) { | ||
314 | unsigned idx = mdev->al_tr_cycle + i; | ||
315 | extent_nr = lc_element_by_index(mdev->act_log, idx)->lc_number; | ||
316 | buffer->updates[i+1].pos = cpu_to_be32(idx); | ||
317 | buffer->updates[i+1].extent = cpu_to_be32(extent_nr); | ||
318 | xor_sum ^= extent_nr; | ||
319 | } | ||
320 | for (; i < AL_EXTENTS_PT; i++) { | ||
321 | buffer->updates[i+1].pos = __constant_cpu_to_be32(-1); | ||
322 | buffer->updates[i+1].extent = __constant_cpu_to_be32(LC_FREE); | ||
323 | xor_sum ^= LC_FREE; | ||
324 | } | ||
325 | mdev->al_tr_cycle += AL_EXTENTS_PT; | ||
326 | if (mdev->al_tr_cycle >= mdev->act_log->nr_elements) | ||
327 | mdev->al_tr_cycle = 0; | ||
328 | |||
329 | buffer->xor_sum = cpu_to_be32(xor_sum); | ||
330 | |||
331 | sector = mdev->ldev->md.md_offset | ||
332 | + mdev->ldev->md.al_offset + mdev->al_tr_pos; | ||
333 | |||
334 | if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) | ||
335 | drbd_chk_io_error(mdev, 1, TRUE); | ||
336 | |||
337 | if (++mdev->al_tr_pos > | ||
338 | div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT)) | ||
339 | mdev->al_tr_pos = 0; | ||
340 | |||
341 | D_ASSERT(mdev->al_tr_pos < MD_AL_MAX_SIZE); | ||
342 | mdev->al_tr_number++; | ||
343 | |||
344 | mutex_unlock(&mdev->md_io_mutex); | ||
345 | |||
346 | complete(&((struct update_al_work *)w)->event); | ||
347 | put_ldev(mdev); | ||
348 | |||
349 | return 1; | ||
350 | } | ||
351 | |||
352 | /** | ||
353 | * drbd_al_read_tr() - Read a single transaction from the on disk activity log | ||
354 | * @mdev: DRBD device. | ||
355 | * @bdev: Block device to read form. | ||
356 | * @b: pointer to an al_transaction. | ||
357 | * @index: On disk slot of the transaction to read. | ||
358 | * | ||
359 | * Returns -1 on IO error, 0 on checksum error and 1 upon success. | ||
360 | */ | ||
361 | static int drbd_al_read_tr(struct drbd_conf *mdev, | ||
362 | struct drbd_backing_dev *bdev, | ||
363 | struct al_transaction *b, | ||
364 | int index) | ||
365 | { | ||
366 | sector_t sector; | ||
367 | int rv, i; | ||
368 | u32 xor_sum = 0; | ||
369 | |||
370 | sector = bdev->md.md_offset + bdev->md.al_offset + index; | ||
371 | |||
372 | /* Dont process error normally, | ||
373 | * as this is done before disk is attached! */ | ||
374 | if (!drbd_md_sync_page_io(mdev, bdev, sector, READ)) | ||
375 | return -1; | ||
376 | |||
377 | rv = (be32_to_cpu(b->magic) == DRBD_MAGIC); | ||
378 | |||
379 | for (i = 0; i < AL_EXTENTS_PT + 1; i++) | ||
380 | xor_sum ^= be32_to_cpu(b->updates[i].extent); | ||
381 | rv &= (xor_sum == be32_to_cpu(b->xor_sum)); | ||
382 | |||
383 | return rv; | ||
384 | } | ||
385 | |||
386 | /** | ||
387 | * drbd_al_read_log() - Restores the activity log from its on disk representation. | ||
388 | * @mdev: DRBD device. | ||
389 | * @bdev: Block device to read form. | ||
390 | * | ||
391 | * Returns 1 on success, returns 0 when reading the log failed due to IO errors. | ||
392 | */ | ||
393 | int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) | ||
394 | { | ||
395 | struct al_transaction *buffer; | ||
396 | int i; | ||
397 | int rv; | ||
398 | int mx; | ||
399 | int active_extents = 0; | ||
400 | int transactions = 0; | ||
401 | int found_valid = 0; | ||
402 | int from = 0; | ||
403 | int to = 0; | ||
404 | u32 from_tnr = 0; | ||
405 | u32 to_tnr = 0; | ||
406 | u32 cnr; | ||
407 | |||
408 | mx = div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT); | ||
409 | |||
410 | /* lock out all other meta data io for now, | ||
411 | * and make sure the page is mapped. | ||
412 | */ | ||
413 | mutex_lock(&mdev->md_io_mutex); | ||
414 | buffer = page_address(mdev->md_io_page); | ||
415 | |||
416 | /* Find the valid transaction in the log */ | ||
417 | for (i = 0; i <= mx; i++) { | ||
418 | rv = drbd_al_read_tr(mdev, bdev, buffer, i); | ||
419 | if (rv == 0) | ||
420 | continue; | ||
421 | if (rv == -1) { | ||
422 | mutex_unlock(&mdev->md_io_mutex); | ||
423 | return 0; | ||
424 | } | ||
425 | cnr = be32_to_cpu(buffer->tr_number); | ||
426 | |||
427 | if (++found_valid == 1) { | ||
428 | from = i; | ||
429 | to = i; | ||
430 | from_tnr = cnr; | ||
431 | to_tnr = cnr; | ||
432 | continue; | ||
433 | } | ||
434 | if ((int)cnr - (int)from_tnr < 0) { | ||
435 | D_ASSERT(from_tnr - cnr + i - from == mx+1); | ||
436 | from = i; | ||
437 | from_tnr = cnr; | ||
438 | } | ||
439 | if ((int)cnr - (int)to_tnr > 0) { | ||
440 | D_ASSERT(cnr - to_tnr == i - to); | ||
441 | to = i; | ||
442 | to_tnr = cnr; | ||
443 | } | ||
444 | } | ||
445 | |||
446 | if (!found_valid) { | ||
447 | dev_warn(DEV, "No usable activity log found.\n"); | ||
448 | mutex_unlock(&mdev->md_io_mutex); | ||
449 | return 1; | ||
450 | } | ||
451 | |||
452 | /* Read the valid transactions. | ||
453 | * dev_info(DEV, "Reading from %d to %d.\n",from,to); */ | ||
454 | i = from; | ||
455 | while (1) { | ||
456 | int j, pos; | ||
457 | unsigned int extent_nr; | ||
458 | unsigned int trn; | ||
459 | |||
460 | rv = drbd_al_read_tr(mdev, bdev, buffer, i); | ||
461 | ERR_IF(rv == 0) goto cancel; | ||
462 | if (rv == -1) { | ||
463 | mutex_unlock(&mdev->md_io_mutex); | ||
464 | return 0; | ||
465 | } | ||
466 | |||
467 | trn = be32_to_cpu(buffer->tr_number); | ||
468 | |||
469 | spin_lock_irq(&mdev->al_lock); | ||
470 | |||
471 | /* This loop runs backwards because in the cyclic | ||
472 | elements there might be an old version of the | ||
473 | updated element (in slot 0). So the element in slot 0 | ||
474 | can overwrite old versions. */ | ||
475 | for (j = AL_EXTENTS_PT; j >= 0; j--) { | ||
476 | pos = be32_to_cpu(buffer->updates[j].pos); | ||
477 | extent_nr = be32_to_cpu(buffer->updates[j].extent); | ||
478 | |||
479 | if (extent_nr == LC_FREE) | ||
480 | continue; | ||
481 | |||
482 | lc_set(mdev->act_log, extent_nr, pos); | ||
483 | active_extents++; | ||
484 | } | ||
485 | spin_unlock_irq(&mdev->al_lock); | ||
486 | |||
487 | transactions++; | ||
488 | |||
489 | cancel: | ||
490 | if (i == to) | ||
491 | break; | ||
492 | i++; | ||
493 | if (i > mx) | ||
494 | i = 0; | ||
495 | } | ||
496 | |||
497 | mdev->al_tr_number = to_tnr+1; | ||
498 | mdev->al_tr_pos = to; | ||
499 | if (++mdev->al_tr_pos > | ||
500 | div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT)) | ||
501 | mdev->al_tr_pos = 0; | ||
502 | |||
503 | /* ok, we are done with it */ | ||
504 | mutex_unlock(&mdev->md_io_mutex); | ||
505 | |||
506 | dev_info(DEV, "Found %d transactions (%d active extents) in activity log.\n", | ||
507 | transactions, active_extents); | ||
508 | |||
509 | return 1; | ||
510 | } | ||
511 | |||
512 | static void atodb_endio(struct bio *bio, int error) | ||
513 | { | ||
514 | struct drbd_atodb_wait *wc = bio->bi_private; | ||
515 | struct drbd_conf *mdev = wc->mdev; | ||
516 | struct page *page; | ||
517 | int uptodate = bio_flagged(bio, BIO_UPTODATE); | ||
518 | |||
519 | /* strange behavior of some lower level drivers... | ||
520 | * fail the request by clearing the uptodate flag, | ||
521 | * but do not return any error?! */ | ||
522 | if (!error && !uptodate) | ||
523 | error = -EIO; | ||
524 | |||
525 | drbd_chk_io_error(mdev, error, TRUE); | ||
526 | if (error && wc->error == 0) | ||
527 | wc->error = error; | ||
528 | |||
529 | if (atomic_dec_and_test(&wc->count)) | ||
530 | complete(&wc->io_done); | ||
531 | |||
532 | page = bio->bi_io_vec[0].bv_page; | ||
533 | put_page(page); | ||
534 | bio_put(bio); | ||
535 | mdev->bm_writ_cnt++; | ||
536 | put_ldev(mdev); | ||
537 | } | ||
538 | |||
539 | #define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL)) | ||
540 | /* activity log to on disk bitmap -- prepare bio unless that sector | ||
541 | * is already covered by previously prepared bios */ | ||
542 | static int atodb_prepare_unless_covered(struct drbd_conf *mdev, | ||
543 | struct bio **bios, | ||
544 | unsigned int enr, | ||
545 | struct drbd_atodb_wait *wc) __must_hold(local) | ||
546 | { | ||
547 | struct bio *bio; | ||
548 | struct page *page; | ||
549 | sector_t on_disk_sector = enr + mdev->ldev->md.md_offset | ||
550 | + mdev->ldev->md.bm_offset; | ||
551 | unsigned int page_offset = PAGE_SIZE; | ||
552 | int offset; | ||
553 | int i = 0; | ||
554 | int err = -ENOMEM; | ||
555 | |||
556 | /* Check if that enr is already covered by an already created bio. | ||
557 | * Caution, bios[] is not NULL terminated, | ||
558 | * but only initialized to all NULL. | ||
559 | * For completely scattered activity log, | ||
560 | * the last invocation iterates over all bios, | ||
561 | * and finds the last NULL entry. | ||
562 | */ | ||
563 | while ((bio = bios[i])) { | ||
564 | if (bio->bi_sector == on_disk_sector) | ||
565 | return 0; | ||
566 | i++; | ||
567 | } | ||
568 | /* bios[i] == NULL, the next not yet used slot */ | ||
569 | |||
570 | /* GFP_KERNEL, we are not in the write-out path */ | ||
571 | bio = bio_alloc(GFP_KERNEL, 1); | ||
572 | if (bio == NULL) | ||
573 | return -ENOMEM; | ||
574 | |||
575 | if (i > 0) { | ||
576 | const struct bio_vec *prev_bv = bios[i-1]->bi_io_vec; | ||
577 | page_offset = prev_bv->bv_offset + prev_bv->bv_len; | ||
578 | page = prev_bv->bv_page; | ||
579 | } | ||
580 | if (page_offset == PAGE_SIZE) { | ||
581 | page = alloc_page(__GFP_HIGHMEM); | ||
582 | if (page == NULL) | ||
583 | goto out_bio_put; | ||
584 | page_offset = 0; | ||
585 | } else { | ||
586 | get_page(page); | ||
587 | } | ||
588 | |||
589 | offset = S2W(enr); | ||
590 | drbd_bm_get_lel(mdev, offset, | ||
591 | min_t(size_t, S2W(1), drbd_bm_words(mdev) - offset), | ||
592 | kmap(page) + page_offset); | ||
593 | kunmap(page); | ||
594 | |||
595 | bio->bi_private = wc; | ||
596 | bio->bi_end_io = atodb_endio; | ||
597 | bio->bi_bdev = mdev->ldev->md_bdev; | ||
598 | bio->bi_sector = on_disk_sector; | ||
599 | |||
600 | if (bio_add_page(bio, page, MD_SECTOR_SIZE, page_offset) != MD_SECTOR_SIZE) | ||
601 | goto out_put_page; | ||
602 | |||
603 | atomic_inc(&wc->count); | ||
604 | /* we already know that we may do this... | ||
605 | * get_ldev_if_state(mdev,D_ATTACHING); | ||
606 | * just get the extra reference, so that the local_cnt reflects | ||
607 | * the number of pending IO requests DRBD at its backing device. | ||
608 | */ | ||
609 | atomic_inc(&mdev->local_cnt); | ||
610 | |||
611 | bios[i] = bio; | ||
612 | |||
613 | return 0; | ||
614 | |||
615 | out_put_page: | ||
616 | err = -EINVAL; | ||
617 | put_page(page); | ||
618 | out_bio_put: | ||
619 | bio_put(bio); | ||
620 | return err; | ||
621 | } | ||
622 | |||
623 | /** | ||
624 | * drbd_al_to_on_disk_bm() - * Writes bitmap parts covered by active AL extents | ||
625 | * @mdev: DRBD device. | ||
626 | * | ||
627 | * Called when we detach (unconfigure) local storage, | ||
628 | * or when we go from R_PRIMARY to R_SECONDARY role. | ||
629 | */ | ||
630 | void drbd_al_to_on_disk_bm(struct drbd_conf *mdev) | ||
631 | { | ||
632 | int i, nr_elements; | ||
633 | unsigned int enr; | ||
634 | struct bio **bios; | ||
635 | struct drbd_atodb_wait wc; | ||
636 | |||
637 | ERR_IF (!get_ldev_if_state(mdev, D_ATTACHING)) | ||
638 | return; /* sorry, I don't have any act_log etc... */ | ||
639 | |||
640 | wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); | ||
641 | |||
642 | nr_elements = mdev->act_log->nr_elements; | ||
643 | |||
644 | /* GFP_KERNEL, we are not in anyone's write-out path */ | ||
645 | bios = kzalloc(sizeof(struct bio *) * nr_elements, GFP_KERNEL); | ||
646 | if (!bios) | ||
647 | goto submit_one_by_one; | ||
648 | |||
649 | atomic_set(&wc.count, 0); | ||
650 | init_completion(&wc.io_done); | ||
651 | wc.mdev = mdev; | ||
652 | wc.error = 0; | ||
653 | |||
654 | for (i = 0; i < nr_elements; i++) { | ||
655 | enr = lc_element_by_index(mdev->act_log, i)->lc_number; | ||
656 | if (enr == LC_FREE) | ||
657 | continue; | ||
658 | /* next statement also does atomic_inc wc.count and local_cnt */ | ||
659 | if (atodb_prepare_unless_covered(mdev, bios, | ||
660 | enr/AL_EXT_PER_BM_SECT, | ||
661 | &wc)) | ||
662 | goto free_bios_submit_one_by_one; | ||
663 | } | ||
664 | |||
665 | /* unnecessary optimization? */ | ||
666 | lc_unlock(mdev->act_log); | ||
667 | wake_up(&mdev->al_wait); | ||
668 | |||
669 | /* all prepared, submit them */ | ||
670 | for (i = 0; i < nr_elements; i++) { | ||
671 | if (bios[i] == NULL) | ||
672 | break; | ||
673 | if (FAULT_ACTIVE(mdev, DRBD_FAULT_MD_WR)) { | ||
674 | bios[i]->bi_rw = WRITE; | ||
675 | bio_endio(bios[i], -EIO); | ||
676 | } else { | ||
677 | submit_bio(WRITE, bios[i]); | ||
678 | } | ||
679 | } | ||
680 | |||
681 | drbd_blk_run_queue(bdev_get_queue(mdev->ldev->md_bdev)); | ||
682 | |||
683 | /* always (try to) flush bitmap to stable storage */ | ||
684 | drbd_md_flush(mdev); | ||
685 | |||
686 | /* In case we did not submit a single IO do not wait for | ||
687 | * them to complete. ( Because we would wait forever here. ) | ||
688 | * | ||
689 | * In case we had IOs and they are already complete, there | ||
690 | * is not point in waiting anyways. | ||
691 | * Therefore this if () ... */ | ||
692 | if (atomic_read(&wc.count)) | ||
693 | wait_for_completion(&wc.io_done); | ||
694 | |||
695 | put_ldev(mdev); | ||
696 | |||
697 | kfree(bios); | ||
698 | return; | ||
699 | |||
700 | free_bios_submit_one_by_one: | ||
701 | /* free everything by calling the endio callback directly. */ | ||
702 | for (i = 0; i < nr_elements && bios[i]; i++) | ||
703 | bio_endio(bios[i], 0); | ||
704 | |||
705 | kfree(bios); | ||
706 | |||
707 | submit_one_by_one: | ||
708 | dev_warn(DEV, "Using the slow drbd_al_to_on_disk_bm()\n"); | ||
709 | |||
710 | for (i = 0; i < mdev->act_log->nr_elements; i++) { | ||
711 | enr = lc_element_by_index(mdev->act_log, i)->lc_number; | ||
712 | if (enr == LC_FREE) | ||
713 | continue; | ||
714 | /* Really slow: if we have al-extents 16..19 active, | ||
715 | * sector 4 will be written four times! Synchronous! */ | ||
716 | drbd_bm_write_sect(mdev, enr/AL_EXT_PER_BM_SECT); | ||
717 | } | ||
718 | |||
719 | lc_unlock(mdev->act_log); | ||
720 | wake_up(&mdev->al_wait); | ||
721 | put_ldev(mdev); | ||
722 | } | ||
723 | |||
724 | /** | ||
725 | * drbd_al_apply_to_bm() - Sets the bitmap to diry(1) where covered ba active AL extents | ||
726 | * @mdev: DRBD device. | ||
727 | */ | ||
728 | void drbd_al_apply_to_bm(struct drbd_conf *mdev) | ||
729 | { | ||
730 | unsigned int enr; | ||
731 | unsigned long add = 0; | ||
732 | char ppb[10]; | ||
733 | int i; | ||
734 | |||
735 | wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); | ||
736 | |||
737 | for (i = 0; i < mdev->act_log->nr_elements; i++) { | ||
738 | enr = lc_element_by_index(mdev->act_log, i)->lc_number; | ||
739 | if (enr == LC_FREE) | ||
740 | continue; | ||
741 | add += drbd_bm_ALe_set_all(mdev, enr); | ||
742 | } | ||
743 | |||
744 | lc_unlock(mdev->act_log); | ||
745 | wake_up(&mdev->al_wait); | ||
746 | |||
747 | dev_info(DEV, "Marked additional %s as out-of-sync based on AL.\n", | ||
748 | ppsize(ppb, Bit2KB(add))); | ||
749 | } | ||
750 | |||
751 | static int _try_lc_del(struct drbd_conf *mdev, struct lc_element *al_ext) | ||
752 | { | ||
753 | int rv; | ||
754 | |||
755 | spin_lock_irq(&mdev->al_lock); | ||
756 | rv = (al_ext->refcnt == 0); | ||
757 | if (likely(rv)) | ||
758 | lc_del(mdev->act_log, al_ext); | ||
759 | spin_unlock_irq(&mdev->al_lock); | ||
760 | |||
761 | return rv; | ||
762 | } | ||
763 | |||
764 | /** | ||
765 | * drbd_al_shrink() - Removes all active extents form the activity log | ||
766 | * @mdev: DRBD device. | ||
767 | * | ||
768 | * Removes all active extents form the activity log, waiting until | ||
769 | * the reference count of each entry dropped to 0 first, of course. | ||
770 | * | ||
771 | * You need to lock mdev->act_log with lc_try_lock() / lc_unlock() | ||
772 | */ | ||
773 | void drbd_al_shrink(struct drbd_conf *mdev) | ||
774 | { | ||
775 | struct lc_element *al_ext; | ||
776 | int i; | ||
777 | |||
778 | D_ASSERT(test_bit(__LC_DIRTY, &mdev->act_log->flags)); | ||
779 | |||
780 | for (i = 0; i < mdev->act_log->nr_elements; i++) { | ||
781 | al_ext = lc_element_by_index(mdev->act_log, i); | ||
782 | if (al_ext->lc_number == LC_FREE) | ||
783 | continue; | ||
784 | wait_event(mdev->al_wait, _try_lc_del(mdev, al_ext)); | ||
785 | } | ||
786 | |||
787 | wake_up(&mdev->al_wait); | ||
788 | } | ||
789 | |||
790 | static int w_update_odbm(struct drbd_conf *mdev, struct drbd_work *w, int unused) | ||
791 | { | ||
792 | struct update_odbm_work *udw = container_of(w, struct update_odbm_work, w); | ||
793 | |||
794 | if (!get_ldev(mdev)) { | ||
795 | if (__ratelimit(&drbd_ratelimit_state)) | ||
796 | dev_warn(DEV, "Can not update on disk bitmap, local IO disabled.\n"); | ||
797 | kfree(udw); | ||
798 | return 1; | ||
799 | } | ||
800 | |||
801 | drbd_bm_write_sect(mdev, udw->enr); | ||
802 | put_ldev(mdev); | ||
803 | |||
804 | kfree(udw); | ||
805 | |||
806 | if (drbd_bm_total_weight(mdev) <= mdev->rs_failed) { | ||
807 | switch (mdev->state.conn) { | ||
808 | case C_SYNC_SOURCE: case C_SYNC_TARGET: | ||
809 | case C_PAUSED_SYNC_S: case C_PAUSED_SYNC_T: | ||
810 | drbd_resync_finished(mdev); | ||
811 | default: | ||
812 | /* nothing to do */ | ||
813 | break; | ||
814 | } | ||
815 | } | ||
816 | drbd_bcast_sync_progress(mdev); | ||
817 | |||
818 | return 1; | ||
819 | } | ||
820 | |||
821 | |||
822 | /* ATTENTION. The AL's extents are 4MB each, while the extents in the | ||
823 | * resync LRU-cache are 16MB each. | ||
824 | * The caller of this function has to hold an get_ldev() reference. | ||
825 | * | ||
826 | * TODO will be obsoleted once we have a caching lru of the on disk bitmap | ||
827 | */ | ||
828 | static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector, | ||
829 | int count, int success) | ||
830 | { | ||
831 | struct lc_element *e; | ||
832 | struct update_odbm_work *udw; | ||
833 | |||
834 | unsigned int enr; | ||
835 | |||
836 | D_ASSERT(atomic_read(&mdev->local_cnt)); | ||
837 | |||
838 | /* I simply assume that a sector/size pair never crosses | ||
839 | * a 16 MB extent border. (Currently this is true...) */ | ||
840 | enr = BM_SECT_TO_EXT(sector); | ||
841 | |||
842 | e = lc_get(mdev->resync, enr); | ||
843 | if (e) { | ||
844 | struct bm_extent *ext = lc_entry(e, struct bm_extent, lce); | ||
845 | if (ext->lce.lc_number == enr) { | ||
846 | if (success) | ||
847 | ext->rs_left -= count; | ||
848 | else | ||
849 | ext->rs_failed += count; | ||
850 | if (ext->rs_left < ext->rs_failed) { | ||
851 | dev_err(DEV, "BAD! sector=%llus enr=%u rs_left=%d " | ||
852 | "rs_failed=%d count=%d\n", | ||
853 | (unsigned long long)sector, | ||
854 | ext->lce.lc_number, ext->rs_left, | ||
855 | ext->rs_failed, count); | ||
856 | dump_stack(); | ||
857 | |||
858 | lc_put(mdev->resync, &ext->lce); | ||
859 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | ||
860 | return; | ||
861 | } | ||
862 | } else { | ||
863 | /* Normally this element should be in the cache, | ||
864 | * since drbd_rs_begin_io() pulled it already in. | ||
865 | * | ||
866 | * But maybe an application write finished, and we set | ||
867 | * something outside the resync lru_cache in sync. | ||
868 | */ | ||
869 | int rs_left = drbd_bm_e_weight(mdev, enr); | ||
870 | if (ext->flags != 0) { | ||
871 | dev_warn(DEV, "changing resync lce: %d[%u;%02lx]" | ||
872 | " -> %d[%u;00]\n", | ||
873 | ext->lce.lc_number, ext->rs_left, | ||
874 | ext->flags, enr, rs_left); | ||
875 | ext->flags = 0; | ||
876 | } | ||
877 | if (ext->rs_failed) { | ||
878 | dev_warn(DEV, "Kicking resync_lru element enr=%u " | ||
879 | "out with rs_failed=%d\n", | ||
880 | ext->lce.lc_number, ext->rs_failed); | ||
881 | set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags); | ||
882 | } | ||
883 | ext->rs_left = rs_left; | ||
884 | ext->rs_failed = success ? 0 : count; | ||
885 | lc_changed(mdev->resync, &ext->lce); | ||
886 | } | ||
887 | lc_put(mdev->resync, &ext->lce); | ||
888 | /* no race, we are within the al_lock! */ | ||
889 | |||
890 | if (ext->rs_left == ext->rs_failed) { | ||
891 | ext->rs_failed = 0; | ||
892 | |||
893 | udw = kmalloc(sizeof(*udw), GFP_ATOMIC); | ||
894 | if (udw) { | ||
895 | udw->enr = ext->lce.lc_number; | ||
896 | udw->w.cb = w_update_odbm; | ||
897 | drbd_queue_work_front(&mdev->data.work, &udw->w); | ||
898 | } else { | ||
899 | dev_warn(DEV, "Could not kmalloc an udw\n"); | ||
900 | set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags); | ||
901 | } | ||
902 | } | ||
903 | } else { | ||
904 | dev_err(DEV, "lc_get() failed! locked=%d/%d flags=%lu\n", | ||
905 | mdev->resync_locked, | ||
906 | mdev->resync->nr_elements, | ||
907 | mdev->resync->flags); | ||
908 | } | ||
909 | } | ||
910 | |||
911 | /* clear the bit corresponding to the piece of storage in question: | ||
912 | * size byte of data starting from sector. Only clear a bits of the affected | ||
913 | * one ore more _aligned_ BM_BLOCK_SIZE blocks. | ||
914 | * | ||
915 | * called by worker on C_SYNC_TARGET and receiver on SyncSource. | ||
916 | * | ||
917 | */ | ||
918 | void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size, | ||
919 | const char *file, const unsigned int line) | ||
920 | { | ||
921 | /* Is called from worker and receiver context _only_ */ | ||
922 | unsigned long sbnr, ebnr, lbnr; | ||
923 | unsigned long count = 0; | ||
924 | sector_t esector, nr_sectors; | ||
925 | int wake_up = 0; | ||
926 | unsigned long flags; | ||
927 | |||
928 | if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { | ||
929 | dev_err(DEV, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n", | ||
930 | (unsigned long long)sector, size); | ||
931 | return; | ||
932 | } | ||
933 | nr_sectors = drbd_get_capacity(mdev->this_bdev); | ||
934 | esector = sector + (size >> 9) - 1; | ||
935 | |||
936 | ERR_IF(sector >= nr_sectors) return; | ||
937 | ERR_IF(esector >= nr_sectors) esector = (nr_sectors-1); | ||
938 | |||
939 | lbnr = BM_SECT_TO_BIT(nr_sectors-1); | ||
940 | |||
941 | /* we clear it (in sync). | ||
942 | * round up start sector, round down end sector. we make sure we only | ||
943 | * clear full, aligned, BM_BLOCK_SIZE (4K) blocks */ | ||
944 | if (unlikely(esector < BM_SECT_PER_BIT-1)) | ||
945 | return; | ||
946 | if (unlikely(esector == (nr_sectors-1))) | ||
947 | ebnr = lbnr; | ||
948 | else | ||
949 | ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1)); | ||
950 | sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1); | ||
951 | |||
952 | if (sbnr > ebnr) | ||
953 | return; | ||
954 | |||
955 | /* | ||
956 | * ok, (capacity & 7) != 0 sometimes, but who cares... | ||
957 | * we count rs_{total,left} in bits, not sectors. | ||
958 | */ | ||
959 | spin_lock_irqsave(&mdev->al_lock, flags); | ||
960 | count = drbd_bm_clear_bits(mdev, sbnr, ebnr); | ||
961 | if (count) { | ||
962 | /* we need the lock for drbd_try_clear_on_disk_bm */ | ||
963 | if (jiffies - mdev->rs_mark_time > HZ*10) { | ||
964 | /* should be rolling marks, | ||
965 | * but we estimate only anyways. */ | ||
966 | if (mdev->rs_mark_left != drbd_bm_total_weight(mdev) && | ||
967 | mdev->state.conn != C_PAUSED_SYNC_T && | ||
968 | mdev->state.conn != C_PAUSED_SYNC_S) { | ||
969 | mdev->rs_mark_time = jiffies; | ||
970 | mdev->rs_mark_left = drbd_bm_total_weight(mdev); | ||
971 | } | ||
972 | } | ||
973 | if (get_ldev(mdev)) { | ||
974 | drbd_try_clear_on_disk_bm(mdev, sector, count, TRUE); | ||
975 | put_ldev(mdev); | ||
976 | } | ||
977 | /* just wake_up unconditional now, various lc_chaged(), | ||
978 | * lc_put() in drbd_try_clear_on_disk_bm(). */ | ||
979 | wake_up = 1; | ||
980 | } | ||
981 | spin_unlock_irqrestore(&mdev->al_lock, flags); | ||
982 | if (wake_up) | ||
983 | wake_up(&mdev->al_wait); | ||
984 | } | ||
985 | |||
986 | /* | ||
987 | * this is intended to set one request worth of data out of sync. | ||
988 | * affects at least 1 bit, | ||
989 | * and at most 1+DRBD_MAX_SEGMENT_SIZE/BM_BLOCK_SIZE bits. | ||
990 | * | ||
991 | * called by tl_clear and drbd_send_dblock (==drbd_make_request). | ||
992 | * so this can be _any_ process. | ||
993 | */ | ||
994 | void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size, | ||
995 | const char *file, const unsigned int line) | ||
996 | { | ||
997 | unsigned long sbnr, ebnr, lbnr, flags; | ||
998 | sector_t esector, nr_sectors; | ||
999 | unsigned int enr, count; | ||
1000 | struct lc_element *e; | ||
1001 | |||
1002 | if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { | ||
1003 | dev_err(DEV, "sector: %llus, size: %d\n", | ||
1004 | (unsigned long long)sector, size); | ||
1005 | return; | ||
1006 | } | ||
1007 | |||
1008 | if (!get_ldev(mdev)) | ||
1009 | return; /* no disk, no metadata, no bitmap to set bits in */ | ||
1010 | |||
1011 | nr_sectors = drbd_get_capacity(mdev->this_bdev); | ||
1012 | esector = sector + (size >> 9) - 1; | ||
1013 | |||
1014 | ERR_IF(sector >= nr_sectors) | ||
1015 | goto out; | ||
1016 | ERR_IF(esector >= nr_sectors) | ||
1017 | esector = (nr_sectors-1); | ||
1018 | |||
1019 | lbnr = BM_SECT_TO_BIT(nr_sectors-1); | ||
1020 | |||
1021 | /* we set it out of sync, | ||
1022 | * we do not need to round anything here */ | ||
1023 | sbnr = BM_SECT_TO_BIT(sector); | ||
1024 | ebnr = BM_SECT_TO_BIT(esector); | ||
1025 | |||
1026 | /* ok, (capacity & 7) != 0 sometimes, but who cares... | ||
1027 | * we count rs_{total,left} in bits, not sectors. */ | ||
1028 | spin_lock_irqsave(&mdev->al_lock, flags); | ||
1029 | count = drbd_bm_set_bits(mdev, sbnr, ebnr); | ||
1030 | |||
1031 | enr = BM_SECT_TO_EXT(sector); | ||
1032 | e = lc_find(mdev->resync, enr); | ||
1033 | if (e) | ||
1034 | lc_entry(e, struct bm_extent, lce)->rs_left += count; | ||
1035 | spin_unlock_irqrestore(&mdev->al_lock, flags); | ||
1036 | |||
1037 | out: | ||
1038 | put_ldev(mdev); | ||
1039 | } | ||
1040 | |||
1041 | static | ||
1042 | struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr) | ||
1043 | { | ||
1044 | struct lc_element *e; | ||
1045 | struct bm_extent *bm_ext; | ||
1046 | int wakeup = 0; | ||
1047 | unsigned long rs_flags; | ||
1048 | |||
1049 | spin_lock_irq(&mdev->al_lock); | ||
1050 | if (mdev->resync_locked > mdev->resync->nr_elements/2) { | ||
1051 | spin_unlock_irq(&mdev->al_lock); | ||
1052 | return NULL; | ||
1053 | } | ||
1054 | e = lc_get(mdev->resync, enr); | ||
1055 | bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; | ||
1056 | if (bm_ext) { | ||
1057 | if (bm_ext->lce.lc_number != enr) { | ||
1058 | bm_ext->rs_left = drbd_bm_e_weight(mdev, enr); | ||
1059 | bm_ext->rs_failed = 0; | ||
1060 | lc_changed(mdev->resync, &bm_ext->lce); | ||
1061 | wakeup = 1; | ||
1062 | } | ||
1063 | if (bm_ext->lce.refcnt == 1) | ||
1064 | mdev->resync_locked++; | ||
1065 | set_bit(BME_NO_WRITES, &bm_ext->flags); | ||
1066 | } | ||
1067 | rs_flags = mdev->resync->flags; | ||
1068 | spin_unlock_irq(&mdev->al_lock); | ||
1069 | if (wakeup) | ||
1070 | wake_up(&mdev->al_wait); | ||
1071 | |||
1072 | if (!bm_ext) { | ||
1073 | if (rs_flags & LC_STARVING) | ||
1074 | dev_warn(DEV, "Have to wait for element" | ||
1075 | " (resync LRU too small?)\n"); | ||
1076 | BUG_ON(rs_flags & LC_DIRTY); | ||
1077 | } | ||
1078 | |||
1079 | return bm_ext; | ||
1080 | } | ||
1081 | |||
1082 | static int _is_in_al(struct drbd_conf *mdev, unsigned int enr) | ||
1083 | { | ||
1084 | struct lc_element *al_ext; | ||
1085 | int rv = 0; | ||
1086 | |||
1087 | spin_lock_irq(&mdev->al_lock); | ||
1088 | if (unlikely(enr == mdev->act_log->new_number)) | ||
1089 | rv = 1; | ||
1090 | else { | ||
1091 | al_ext = lc_find(mdev->act_log, enr); | ||
1092 | if (al_ext) { | ||
1093 | if (al_ext->refcnt) | ||
1094 | rv = 1; | ||
1095 | } | ||
1096 | } | ||
1097 | spin_unlock_irq(&mdev->al_lock); | ||
1098 | |||
1099 | /* | ||
1100 | if (unlikely(rv)) { | ||
1101 | dev_info(DEV, "Delaying sync read until app's write is done\n"); | ||
1102 | } | ||
1103 | */ | ||
1104 | return rv; | ||
1105 | } | ||
1106 | |||
1107 | /** | ||
1108 | * drbd_rs_begin_io() - Gets an extent in the resync LRU cache and sets it to BME_LOCKED | ||
1109 | * @mdev: DRBD device. | ||
1110 | * @sector: The sector number. | ||
1111 | * | ||
1112 | * This functions sleeps on al_wait. Returns 1 on success, 0 if interrupted. | ||
1113 | */ | ||
1114 | int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector) | ||
1115 | { | ||
1116 | unsigned int enr = BM_SECT_TO_EXT(sector); | ||
1117 | struct bm_extent *bm_ext; | ||
1118 | int i, sig; | ||
1119 | |||
1120 | sig = wait_event_interruptible(mdev->al_wait, | ||
1121 | (bm_ext = _bme_get(mdev, enr))); | ||
1122 | if (sig) | ||
1123 | return 0; | ||
1124 | |||
1125 | if (test_bit(BME_LOCKED, &bm_ext->flags)) | ||
1126 | return 1; | ||
1127 | |||
1128 | for (i = 0; i < AL_EXT_PER_BM_SECT; i++) { | ||
1129 | sig = wait_event_interruptible(mdev->al_wait, | ||
1130 | !_is_in_al(mdev, enr * AL_EXT_PER_BM_SECT + i)); | ||
1131 | if (sig) { | ||
1132 | spin_lock_irq(&mdev->al_lock); | ||
1133 | if (lc_put(mdev->resync, &bm_ext->lce) == 0) { | ||
1134 | clear_bit(BME_NO_WRITES, &bm_ext->flags); | ||
1135 | mdev->resync_locked--; | ||
1136 | wake_up(&mdev->al_wait); | ||
1137 | } | ||
1138 | spin_unlock_irq(&mdev->al_lock); | ||
1139 | return 0; | ||
1140 | } | ||
1141 | } | ||
1142 | |||
1143 | set_bit(BME_LOCKED, &bm_ext->flags); | ||
1144 | |||
1145 | return 1; | ||
1146 | } | ||
1147 | |||
1148 | /** | ||
1149 | * drbd_try_rs_begin_io() - Gets an extent in the resync LRU cache, does not sleep | ||
1150 | * @mdev: DRBD device. | ||
1151 | * @sector: The sector number. | ||
1152 | * | ||
1153 | * Gets an extent in the resync LRU cache, sets it to BME_NO_WRITES, then | ||
1154 | * tries to set it to BME_LOCKED. Returns 0 upon success, and -EAGAIN | ||
1155 | * if there is still application IO going on in this area. | ||
1156 | */ | ||
1157 | int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector) | ||
1158 | { | ||
1159 | unsigned int enr = BM_SECT_TO_EXT(sector); | ||
1160 | const unsigned int al_enr = enr*AL_EXT_PER_BM_SECT; | ||
1161 | struct lc_element *e; | ||
1162 | struct bm_extent *bm_ext; | ||
1163 | int i; | ||
1164 | |||
1165 | spin_lock_irq(&mdev->al_lock); | ||
1166 | if (mdev->resync_wenr != LC_FREE && mdev->resync_wenr != enr) { | ||
1167 | /* in case you have very heavy scattered io, it may | ||
1168 | * stall the syncer undefined if we give up the ref count | ||
1169 | * when we try again and requeue. | ||
1170 | * | ||
1171 | * if we don't give up the refcount, but the next time | ||
1172 | * we are scheduled this extent has been "synced" by new | ||
1173 | * application writes, we'd miss the lc_put on the | ||
1174 | * extent we keep the refcount on. | ||
1175 | * so we remembered which extent we had to try again, and | ||
1176 | * if the next requested one is something else, we do | ||
1177 | * the lc_put here... | ||
1178 | * we also have to wake_up | ||
1179 | */ | ||
1180 | e = lc_find(mdev->resync, mdev->resync_wenr); | ||
1181 | bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; | ||
1182 | if (bm_ext) { | ||
1183 | D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags)); | ||
1184 | D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags)); | ||
1185 | clear_bit(BME_NO_WRITES, &bm_ext->flags); | ||
1186 | mdev->resync_wenr = LC_FREE; | ||
1187 | if (lc_put(mdev->resync, &bm_ext->lce) == 0) | ||
1188 | mdev->resync_locked--; | ||
1189 | wake_up(&mdev->al_wait); | ||
1190 | } else { | ||
1191 | dev_alert(DEV, "LOGIC BUG\n"); | ||
1192 | } | ||
1193 | } | ||
1194 | /* TRY. */ | ||
1195 | e = lc_try_get(mdev->resync, enr); | ||
1196 | bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; | ||
1197 | if (bm_ext) { | ||
1198 | if (test_bit(BME_LOCKED, &bm_ext->flags)) | ||
1199 | goto proceed; | ||
1200 | if (!test_and_set_bit(BME_NO_WRITES, &bm_ext->flags)) { | ||
1201 | mdev->resync_locked++; | ||
1202 | } else { | ||
1203 | /* we did set the BME_NO_WRITES, | ||
1204 | * but then could not set BME_LOCKED, | ||
1205 | * so we tried again. | ||
1206 | * drop the extra reference. */ | ||
1207 | bm_ext->lce.refcnt--; | ||
1208 | D_ASSERT(bm_ext->lce.refcnt > 0); | ||
1209 | } | ||
1210 | goto check_al; | ||
1211 | } else { | ||
1212 | /* do we rather want to try later? */ | ||
1213 | if (mdev->resync_locked > mdev->resync->nr_elements-3) | ||
1214 | goto try_again; | ||
1215 | /* Do or do not. There is no try. -- Yoda */ | ||
1216 | e = lc_get(mdev->resync, enr); | ||
1217 | bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; | ||
1218 | if (!bm_ext) { | ||
1219 | const unsigned long rs_flags = mdev->resync->flags; | ||
1220 | if (rs_flags & LC_STARVING) | ||
1221 | dev_warn(DEV, "Have to wait for element" | ||
1222 | " (resync LRU too small?)\n"); | ||
1223 | BUG_ON(rs_flags & LC_DIRTY); | ||
1224 | goto try_again; | ||
1225 | } | ||
1226 | if (bm_ext->lce.lc_number != enr) { | ||
1227 | bm_ext->rs_left = drbd_bm_e_weight(mdev, enr); | ||
1228 | bm_ext->rs_failed = 0; | ||
1229 | lc_changed(mdev->resync, &bm_ext->lce); | ||
1230 | wake_up(&mdev->al_wait); | ||
1231 | D_ASSERT(test_bit(BME_LOCKED, &bm_ext->flags) == 0); | ||
1232 | } | ||
1233 | set_bit(BME_NO_WRITES, &bm_ext->flags); | ||
1234 | D_ASSERT(bm_ext->lce.refcnt == 1); | ||
1235 | mdev->resync_locked++; | ||
1236 | goto check_al; | ||
1237 | } | ||
1238 | check_al: | ||
1239 | for (i = 0; i < AL_EXT_PER_BM_SECT; i++) { | ||
1240 | if (unlikely(al_enr+i == mdev->act_log->new_number)) | ||
1241 | goto try_again; | ||
1242 | if (lc_is_used(mdev->act_log, al_enr+i)) | ||
1243 | goto try_again; | ||
1244 | } | ||
1245 | set_bit(BME_LOCKED, &bm_ext->flags); | ||
1246 | proceed: | ||
1247 | mdev->resync_wenr = LC_FREE; | ||
1248 | spin_unlock_irq(&mdev->al_lock); | ||
1249 | return 0; | ||
1250 | |||
1251 | try_again: | ||
1252 | if (bm_ext) | ||
1253 | mdev->resync_wenr = enr; | ||
1254 | spin_unlock_irq(&mdev->al_lock); | ||
1255 | return -EAGAIN; | ||
1256 | } | ||
1257 | |||
1258 | void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector) | ||
1259 | { | ||
1260 | unsigned int enr = BM_SECT_TO_EXT(sector); | ||
1261 | struct lc_element *e; | ||
1262 | struct bm_extent *bm_ext; | ||
1263 | unsigned long flags; | ||
1264 | |||
1265 | spin_lock_irqsave(&mdev->al_lock, flags); | ||
1266 | e = lc_find(mdev->resync, enr); | ||
1267 | bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; | ||
1268 | if (!bm_ext) { | ||
1269 | spin_unlock_irqrestore(&mdev->al_lock, flags); | ||
1270 | if (__ratelimit(&drbd_ratelimit_state)) | ||
1271 | dev_err(DEV, "drbd_rs_complete_io() called, but extent not found\n"); | ||
1272 | return; | ||
1273 | } | ||
1274 | |||
1275 | if (bm_ext->lce.refcnt == 0) { | ||
1276 | spin_unlock_irqrestore(&mdev->al_lock, flags); | ||
1277 | dev_err(DEV, "drbd_rs_complete_io(,%llu [=%u]) called, " | ||
1278 | "but refcnt is 0!?\n", | ||
1279 | (unsigned long long)sector, enr); | ||
1280 | return; | ||
1281 | } | ||
1282 | |||
1283 | if (lc_put(mdev->resync, &bm_ext->lce) == 0) { | ||
1284 | clear_bit(BME_LOCKED, &bm_ext->flags); | ||
1285 | clear_bit(BME_NO_WRITES, &bm_ext->flags); | ||
1286 | mdev->resync_locked--; | ||
1287 | wake_up(&mdev->al_wait); | ||
1288 | } | ||
1289 | |||
1290 | spin_unlock_irqrestore(&mdev->al_lock, flags); | ||
1291 | } | ||
1292 | |||
1293 | /** | ||
1294 | * drbd_rs_cancel_all() - Removes all extents from the resync LRU (even BME_LOCKED) | ||
1295 | * @mdev: DRBD device. | ||
1296 | */ | ||
1297 | void drbd_rs_cancel_all(struct drbd_conf *mdev) | ||
1298 | { | ||
1299 | spin_lock_irq(&mdev->al_lock); | ||
1300 | |||
1301 | if (get_ldev_if_state(mdev, D_FAILED)) { /* Makes sure ->resync is there. */ | ||
1302 | lc_reset(mdev->resync); | ||
1303 | put_ldev(mdev); | ||
1304 | } | ||
1305 | mdev->resync_locked = 0; | ||
1306 | mdev->resync_wenr = LC_FREE; | ||
1307 | spin_unlock_irq(&mdev->al_lock); | ||
1308 | wake_up(&mdev->al_wait); | ||
1309 | } | ||
1310 | |||
1311 | /** | ||
1312 | * drbd_rs_del_all() - Gracefully remove all extents from the resync LRU | ||
1313 | * @mdev: DRBD device. | ||
1314 | * | ||
1315 | * Returns 0 upon success, -EAGAIN if at least one reference count was | ||
1316 | * not zero. | ||
1317 | */ | ||
1318 | int drbd_rs_del_all(struct drbd_conf *mdev) | ||
1319 | { | ||
1320 | struct lc_element *e; | ||
1321 | struct bm_extent *bm_ext; | ||
1322 | int i; | ||
1323 | |||
1324 | spin_lock_irq(&mdev->al_lock); | ||
1325 | |||
1326 | if (get_ldev_if_state(mdev, D_FAILED)) { | ||
1327 | /* ok, ->resync is there. */ | ||
1328 | for (i = 0; i < mdev->resync->nr_elements; i++) { | ||
1329 | e = lc_element_by_index(mdev->resync, i); | ||
1330 | bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; | ||
1331 | if (bm_ext->lce.lc_number == LC_FREE) | ||
1332 | continue; | ||
1333 | if (bm_ext->lce.lc_number == mdev->resync_wenr) { | ||
1334 | dev_info(DEV, "dropping %u in drbd_rs_del_all, apparently" | ||
1335 | " got 'synced' by application io\n", | ||
1336 | mdev->resync_wenr); | ||
1337 | D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags)); | ||
1338 | D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags)); | ||
1339 | clear_bit(BME_NO_WRITES, &bm_ext->flags); | ||
1340 | mdev->resync_wenr = LC_FREE; | ||
1341 | lc_put(mdev->resync, &bm_ext->lce); | ||
1342 | } | ||
1343 | if (bm_ext->lce.refcnt != 0) { | ||
1344 | dev_info(DEV, "Retrying drbd_rs_del_all() later. " | ||
1345 | "refcnt=%d\n", bm_ext->lce.refcnt); | ||
1346 | put_ldev(mdev); | ||
1347 | spin_unlock_irq(&mdev->al_lock); | ||
1348 | return -EAGAIN; | ||
1349 | } | ||
1350 | D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags)); | ||
1351 | D_ASSERT(!test_bit(BME_NO_WRITES, &bm_ext->flags)); | ||
1352 | lc_del(mdev->resync, &bm_ext->lce); | ||
1353 | } | ||
1354 | D_ASSERT(mdev->resync->used == 0); | ||
1355 | put_ldev(mdev); | ||
1356 | } | ||
1357 | spin_unlock_irq(&mdev->al_lock); | ||
1358 | |||
1359 | return 0; | ||
1360 | } | ||
1361 | |||
1362 | /** | ||
1363 | * drbd_rs_failed_io() - Record information on a failure to resync the specified blocks | ||
1364 | * @mdev: DRBD device. | ||
1365 | * @sector: The sector number. | ||
1366 | * @size: Size of failed IO operation, in byte. | ||
1367 | */ | ||
1368 | void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size) | ||
1369 | { | ||
1370 | /* Is called from worker and receiver context _only_ */ | ||
1371 | unsigned long sbnr, ebnr, lbnr; | ||
1372 | unsigned long count; | ||
1373 | sector_t esector, nr_sectors; | ||
1374 | int wake_up = 0; | ||
1375 | |||
1376 | if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { | ||
1377 | dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n", | ||
1378 | (unsigned long long)sector, size); | ||
1379 | return; | ||
1380 | } | ||
1381 | nr_sectors = drbd_get_capacity(mdev->this_bdev); | ||
1382 | esector = sector + (size >> 9) - 1; | ||
1383 | |||
1384 | ERR_IF(sector >= nr_sectors) return; | ||
1385 | ERR_IF(esector >= nr_sectors) esector = (nr_sectors-1); | ||
1386 | |||
1387 | lbnr = BM_SECT_TO_BIT(nr_sectors-1); | ||
1388 | |||
1389 | /* | ||
1390 | * round up start sector, round down end sector. we make sure we only | ||
1391 | * handle full, aligned, BM_BLOCK_SIZE (4K) blocks */ | ||
1392 | if (unlikely(esector < BM_SECT_PER_BIT-1)) | ||
1393 | return; | ||
1394 | if (unlikely(esector == (nr_sectors-1))) | ||
1395 | ebnr = lbnr; | ||
1396 | else | ||
1397 | ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1)); | ||
1398 | sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1); | ||
1399 | |||
1400 | if (sbnr > ebnr) | ||
1401 | return; | ||
1402 | |||
1403 | /* | ||
1404 | * ok, (capacity & 7) != 0 sometimes, but who cares... | ||
1405 | * we count rs_{total,left} in bits, not sectors. | ||
1406 | */ | ||
1407 | spin_lock_irq(&mdev->al_lock); | ||
1408 | count = drbd_bm_count_bits(mdev, sbnr, ebnr); | ||
1409 | if (count) { | ||
1410 | mdev->rs_failed += count; | ||
1411 | |||
1412 | if (get_ldev(mdev)) { | ||
1413 | drbd_try_clear_on_disk_bm(mdev, sector, count, FALSE); | ||
1414 | put_ldev(mdev); | ||
1415 | } | ||
1416 | |||
1417 | /* just wake_up unconditional now, various lc_chaged(), | ||
1418 | * lc_put() in drbd_try_clear_on_disk_bm(). */ | ||
1419 | wake_up = 1; | ||
1420 | } | ||
1421 | spin_unlock_irq(&mdev->al_lock); | ||
1422 | if (wake_up) | ||
1423 | wake_up(&mdev->al_wait); | ||
1424 | } | ||