diff options
Diffstat (limited to 'drivers/block/drbd/drbd_main.c')
-rw-r--r-- | drivers/block/drbd/drbd_main.c | 3700 |
1 files changed, 3700 insertions, 0 deletions
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c new file mode 100644 index 000000000000..11d8ff6016ac --- /dev/null +++ b/drivers/block/drbd/drbd_main.c | |||
@@ -0,0 +1,3700 @@ | |||
1 | /* | ||
2 | drbd.c | ||
3 | |||
4 | This file is part of DRBD by Philipp Reisner and Lars Ellenberg. | ||
5 | |||
6 | Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. | ||
7 | Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. | ||
8 | Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. | ||
9 | |||
10 | Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev | ||
11 | from Logicworks, Inc. for making SDP replication support possible. | ||
12 | |||
13 | drbd is free software; you can redistribute it and/or modify | ||
14 | it under the terms of the GNU General Public License as published by | ||
15 | the Free Software Foundation; either version 2, or (at your option) | ||
16 | any later version. | ||
17 | |||
18 | drbd is distributed in the hope that it will be useful, | ||
19 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
20 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
21 | GNU General Public License for more details. | ||
22 | |||
23 | You should have received a copy of the GNU General Public License | ||
24 | along with drbd; see the file COPYING. If not, write to | ||
25 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | ||
26 | |||
27 | */ | ||
28 | |||
29 | #include <linux/module.h> | ||
30 | #include <linux/version.h> | ||
31 | #include <linux/drbd.h> | ||
32 | #include <asm/uaccess.h> | ||
33 | #include <asm/types.h> | ||
34 | #include <net/sock.h> | ||
35 | #include <linux/ctype.h> | ||
36 | #include <linux/smp_lock.h> | ||
37 | #include <linux/fs.h> | ||
38 | #include <linux/file.h> | ||
39 | #include <linux/proc_fs.h> | ||
40 | #include <linux/init.h> | ||
41 | #include <linux/mm.h> | ||
42 | #include <linux/memcontrol.h> | ||
43 | #include <linux/mm_inline.h> | ||
44 | #include <linux/slab.h> | ||
45 | #include <linux/random.h> | ||
46 | #include <linux/reboot.h> | ||
47 | #include <linux/notifier.h> | ||
48 | #include <linux/kthread.h> | ||
49 | |||
50 | #define __KERNEL_SYSCALLS__ | ||
51 | #include <linux/unistd.h> | ||
52 | #include <linux/vmalloc.h> | ||
53 | |||
54 | #include <linux/drbd_limits.h> | ||
55 | #include "drbd_int.h" | ||
56 | #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */ | ||
57 | |||
58 | #include "drbd_vli.h" | ||
59 | |||
60 | struct after_state_chg_work { | ||
61 | struct drbd_work w; | ||
62 | union drbd_state os; | ||
63 | union drbd_state ns; | ||
64 | enum chg_state_flags flags; | ||
65 | struct completion *done; | ||
66 | }; | ||
67 | |||
68 | int drbdd_init(struct drbd_thread *); | ||
69 | int drbd_worker(struct drbd_thread *); | ||
70 | int drbd_asender(struct drbd_thread *); | ||
71 | |||
72 | int drbd_init(void); | ||
73 | static int drbd_open(struct block_device *bdev, fmode_t mode); | ||
74 | static int drbd_release(struct gendisk *gd, fmode_t mode); | ||
75 | static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused); | ||
76 | static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, | ||
77 | union drbd_state ns, enum chg_state_flags flags); | ||
78 | static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused); | ||
79 | static void md_sync_timer_fn(unsigned long data); | ||
80 | static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused); | ||
81 | |||
82 | MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, " | ||
83 | "Lars Ellenberg <lars@linbit.com>"); | ||
84 | MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION); | ||
85 | MODULE_VERSION(REL_VERSION); | ||
86 | MODULE_LICENSE("GPL"); | ||
87 | MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices (1-255)"); | ||
88 | MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR); | ||
89 | |||
90 | #include <linux/moduleparam.h> | ||
91 | /* allow_open_on_secondary */ | ||
92 | MODULE_PARM_DESC(allow_oos, "DONT USE!"); | ||
93 | /* thanks to these macros, if compiled into the kernel (not-module), | ||
94 | * this becomes the boot parameter drbd.minor_count */ | ||
95 | module_param(minor_count, uint, 0444); | ||
96 | module_param(disable_sendpage, bool, 0644); | ||
97 | module_param(allow_oos, bool, 0); | ||
98 | module_param(cn_idx, uint, 0444); | ||
99 | module_param(proc_details, int, 0644); | ||
100 | |||
101 | #ifdef CONFIG_DRBD_FAULT_INJECTION | ||
102 | int enable_faults; | ||
103 | int fault_rate; | ||
104 | static int fault_count; | ||
105 | int fault_devs; | ||
106 | /* bitmap of enabled faults */ | ||
107 | module_param(enable_faults, int, 0664); | ||
108 | /* fault rate % value - applies to all enabled faults */ | ||
109 | module_param(fault_rate, int, 0664); | ||
110 | /* count of faults inserted */ | ||
111 | module_param(fault_count, int, 0664); | ||
112 | /* bitmap of devices to insert faults on */ | ||
113 | module_param(fault_devs, int, 0644); | ||
114 | #endif | ||
115 | |||
116 | /* module parameter, defined */ | ||
117 | unsigned int minor_count = 32; | ||
118 | int disable_sendpage; | ||
119 | int allow_oos; | ||
120 | unsigned int cn_idx = CN_IDX_DRBD; | ||
121 | int proc_details; /* Detail level in proc drbd*/ | ||
122 | |||
123 | /* Module parameter for setting the user mode helper program | ||
124 | * to run. Default is /sbin/drbdadm */ | ||
125 | char usermode_helper[80] = "/sbin/drbdadm"; | ||
126 | |||
127 | module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644); | ||
128 | |||
129 | /* in 2.6.x, our device mapping and config info contains our virtual gendisks | ||
130 | * as member "struct gendisk *vdisk;" | ||
131 | */ | ||
132 | struct drbd_conf **minor_table; | ||
133 | |||
134 | struct kmem_cache *drbd_request_cache; | ||
135 | struct kmem_cache *drbd_ee_cache; /* epoch entries */ | ||
136 | struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */ | ||
137 | struct kmem_cache *drbd_al_ext_cache; /* activity log extents */ | ||
138 | mempool_t *drbd_request_mempool; | ||
139 | mempool_t *drbd_ee_mempool; | ||
140 | |||
141 | /* I do not use a standard mempool, because: | ||
142 | 1) I want to hand out the pre-allocated objects first. | ||
143 | 2) I want to be able to interrupt sleeping allocation with a signal. | ||
144 | Note: This is a single linked list, the next pointer is the private | ||
145 | member of struct page. | ||
146 | */ | ||
147 | struct page *drbd_pp_pool; | ||
148 | spinlock_t drbd_pp_lock; | ||
149 | int drbd_pp_vacant; | ||
150 | wait_queue_head_t drbd_pp_wait; | ||
151 | |||
152 | DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5); | ||
153 | |||
154 | static struct block_device_operations drbd_ops = { | ||
155 | .owner = THIS_MODULE, | ||
156 | .open = drbd_open, | ||
157 | .release = drbd_release, | ||
158 | }; | ||
159 | |||
160 | #define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0])) | ||
161 | |||
162 | #ifdef __CHECKER__ | ||
163 | /* When checking with sparse, and this is an inline function, sparse will | ||
164 | give tons of false positives. When this is a real functions sparse works. | ||
165 | */ | ||
166 | int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins) | ||
167 | { | ||
168 | int io_allowed; | ||
169 | |||
170 | atomic_inc(&mdev->local_cnt); | ||
171 | io_allowed = (mdev->state.disk >= mins); | ||
172 | if (!io_allowed) { | ||
173 | if (atomic_dec_and_test(&mdev->local_cnt)) | ||
174 | wake_up(&mdev->misc_wait); | ||
175 | } | ||
176 | return io_allowed; | ||
177 | } | ||
178 | |||
179 | #endif | ||
180 | |||
181 | /** | ||
182 | * DOC: The transfer log | ||
183 | * | ||
184 | * The transfer log is a single linked list of &struct drbd_tl_epoch objects. | ||
185 | * mdev->newest_tle points to the head, mdev->oldest_tle points to the tail | ||
186 | * of the list. There is always at least one &struct drbd_tl_epoch object. | ||
187 | * | ||
188 | * Each &struct drbd_tl_epoch has a circular double linked list of requests | ||
189 | * attached. | ||
190 | */ | ||
191 | static int tl_init(struct drbd_conf *mdev) | ||
192 | { | ||
193 | struct drbd_tl_epoch *b; | ||
194 | |||
195 | /* during device minor initialization, we may well use GFP_KERNEL */ | ||
196 | b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL); | ||
197 | if (!b) | ||
198 | return 0; | ||
199 | INIT_LIST_HEAD(&b->requests); | ||
200 | INIT_LIST_HEAD(&b->w.list); | ||
201 | b->next = NULL; | ||
202 | b->br_number = 4711; | ||
203 | b->n_req = 0; | ||
204 | b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */ | ||
205 | |||
206 | mdev->oldest_tle = b; | ||
207 | mdev->newest_tle = b; | ||
208 | INIT_LIST_HEAD(&mdev->out_of_sequence_requests); | ||
209 | |||
210 | mdev->tl_hash = NULL; | ||
211 | mdev->tl_hash_s = 0; | ||
212 | |||
213 | return 1; | ||
214 | } | ||
215 | |||
216 | static void tl_cleanup(struct drbd_conf *mdev) | ||
217 | { | ||
218 | D_ASSERT(mdev->oldest_tle == mdev->newest_tle); | ||
219 | D_ASSERT(list_empty(&mdev->out_of_sequence_requests)); | ||
220 | kfree(mdev->oldest_tle); | ||
221 | mdev->oldest_tle = NULL; | ||
222 | kfree(mdev->unused_spare_tle); | ||
223 | mdev->unused_spare_tle = NULL; | ||
224 | kfree(mdev->tl_hash); | ||
225 | mdev->tl_hash = NULL; | ||
226 | mdev->tl_hash_s = 0; | ||
227 | } | ||
228 | |||
229 | /** | ||
230 | * _tl_add_barrier() - Adds a barrier to the transfer log | ||
231 | * @mdev: DRBD device. | ||
232 | * @new: Barrier to be added before the current head of the TL. | ||
233 | * | ||
234 | * The caller must hold the req_lock. | ||
235 | */ | ||
236 | void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new) | ||
237 | { | ||
238 | struct drbd_tl_epoch *newest_before; | ||
239 | |||
240 | INIT_LIST_HEAD(&new->requests); | ||
241 | INIT_LIST_HEAD(&new->w.list); | ||
242 | new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */ | ||
243 | new->next = NULL; | ||
244 | new->n_req = 0; | ||
245 | |||
246 | newest_before = mdev->newest_tle; | ||
247 | /* never send a barrier number == 0, because that is special-cased | ||
248 | * when using TCQ for our write ordering code */ | ||
249 | new->br_number = (newest_before->br_number+1) ?: 1; | ||
250 | if (mdev->newest_tle != new) { | ||
251 | mdev->newest_tle->next = new; | ||
252 | mdev->newest_tle = new; | ||
253 | } | ||
254 | } | ||
255 | |||
256 | /** | ||
257 | * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL | ||
258 | * @mdev: DRBD device. | ||
259 | * @barrier_nr: Expected identifier of the DRBD write barrier packet. | ||
260 | * @set_size: Expected number of requests before that barrier. | ||
261 | * | ||
262 | * In case the passed barrier_nr or set_size does not match the oldest | ||
263 | * &struct drbd_tl_epoch objects this function will cause a termination | ||
264 | * of the connection. | ||
265 | */ | ||
266 | void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr, | ||
267 | unsigned int set_size) | ||
268 | { | ||
269 | struct drbd_tl_epoch *b, *nob; /* next old barrier */ | ||
270 | struct list_head *le, *tle; | ||
271 | struct drbd_request *r; | ||
272 | |||
273 | spin_lock_irq(&mdev->req_lock); | ||
274 | |||
275 | b = mdev->oldest_tle; | ||
276 | |||
277 | /* first some paranoia code */ | ||
278 | if (b == NULL) { | ||
279 | dev_err(DEV, "BAD! BarrierAck #%u received, but no epoch in tl!?\n", | ||
280 | barrier_nr); | ||
281 | goto bail; | ||
282 | } | ||
283 | if (b->br_number != barrier_nr) { | ||
284 | dev_err(DEV, "BAD! BarrierAck #%u received, expected #%u!\n", | ||
285 | barrier_nr, b->br_number); | ||
286 | goto bail; | ||
287 | } | ||
288 | if (b->n_req != set_size) { | ||
289 | dev_err(DEV, "BAD! BarrierAck #%u received with n_req=%u, expected n_req=%u!\n", | ||
290 | barrier_nr, set_size, b->n_req); | ||
291 | goto bail; | ||
292 | } | ||
293 | |||
294 | /* Clean up list of requests processed during current epoch */ | ||
295 | list_for_each_safe(le, tle, &b->requests) { | ||
296 | r = list_entry(le, struct drbd_request, tl_requests); | ||
297 | _req_mod(r, barrier_acked); | ||
298 | } | ||
299 | /* There could be requests on the list waiting for completion | ||
300 | of the write to the local disk. To avoid corruptions of | ||
301 | slab's data structures we have to remove the lists head. | ||
302 | |||
303 | Also there could have been a barrier ack out of sequence, overtaking | ||
304 | the write acks - which would be a bug and violating write ordering. | ||
305 | To not deadlock in case we lose connection while such requests are | ||
306 | still pending, we need some way to find them for the | ||
307 | _req_mode(connection_lost_while_pending). | ||
308 | |||
309 | These have been list_move'd to the out_of_sequence_requests list in | ||
310 | _req_mod(, barrier_acked) above. | ||
311 | */ | ||
312 | list_del_init(&b->requests); | ||
313 | |||
314 | nob = b->next; | ||
315 | if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) { | ||
316 | _tl_add_barrier(mdev, b); | ||
317 | if (nob) | ||
318 | mdev->oldest_tle = nob; | ||
319 | /* if nob == NULL b was the only barrier, and becomes the new | ||
320 | barrier. Therefore mdev->oldest_tle points already to b */ | ||
321 | } else { | ||
322 | D_ASSERT(nob != NULL); | ||
323 | mdev->oldest_tle = nob; | ||
324 | kfree(b); | ||
325 | } | ||
326 | |||
327 | spin_unlock_irq(&mdev->req_lock); | ||
328 | dec_ap_pending(mdev); | ||
329 | |||
330 | return; | ||
331 | |||
332 | bail: | ||
333 | spin_unlock_irq(&mdev->req_lock); | ||
334 | drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); | ||
335 | } | ||
336 | |||
337 | |||
338 | /** | ||
339 | * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL | ||
340 | * @mdev: DRBD device. | ||
341 | * | ||
342 | * This is called after the connection to the peer was lost. The storage covered | ||
343 | * by the requests on the transfer gets marked as our of sync. Called from the | ||
344 | * receiver thread and the worker thread. | ||
345 | */ | ||
346 | void tl_clear(struct drbd_conf *mdev) | ||
347 | { | ||
348 | struct drbd_tl_epoch *b, *tmp; | ||
349 | struct list_head *le, *tle; | ||
350 | struct drbd_request *r; | ||
351 | int new_initial_bnr = net_random(); | ||
352 | |||
353 | spin_lock_irq(&mdev->req_lock); | ||
354 | |||
355 | b = mdev->oldest_tle; | ||
356 | while (b) { | ||
357 | list_for_each_safe(le, tle, &b->requests) { | ||
358 | r = list_entry(le, struct drbd_request, tl_requests); | ||
359 | /* It would be nice to complete outside of spinlock. | ||
360 | * But this is easier for now. */ | ||
361 | _req_mod(r, connection_lost_while_pending); | ||
362 | } | ||
363 | tmp = b->next; | ||
364 | |||
365 | /* there could still be requests on that ring list, | ||
366 | * in case local io is still pending */ | ||
367 | list_del(&b->requests); | ||
368 | |||
369 | /* dec_ap_pending corresponding to queue_barrier. | ||
370 | * the newest barrier may not have been queued yet, | ||
371 | * in which case w.cb is still NULL. */ | ||
372 | if (b->w.cb != NULL) | ||
373 | dec_ap_pending(mdev); | ||
374 | |||
375 | if (b == mdev->newest_tle) { | ||
376 | /* recycle, but reinit! */ | ||
377 | D_ASSERT(tmp == NULL); | ||
378 | INIT_LIST_HEAD(&b->requests); | ||
379 | INIT_LIST_HEAD(&b->w.list); | ||
380 | b->w.cb = NULL; | ||
381 | b->br_number = new_initial_bnr; | ||
382 | b->n_req = 0; | ||
383 | |||
384 | mdev->oldest_tle = b; | ||
385 | break; | ||
386 | } | ||
387 | kfree(b); | ||
388 | b = tmp; | ||
389 | } | ||
390 | |||
391 | /* we expect this list to be empty. */ | ||
392 | D_ASSERT(list_empty(&mdev->out_of_sequence_requests)); | ||
393 | |||
394 | /* but just in case, clean it up anyways! */ | ||
395 | list_for_each_safe(le, tle, &mdev->out_of_sequence_requests) { | ||
396 | r = list_entry(le, struct drbd_request, tl_requests); | ||
397 | /* It would be nice to complete outside of spinlock. | ||
398 | * But this is easier for now. */ | ||
399 | _req_mod(r, connection_lost_while_pending); | ||
400 | } | ||
401 | |||
402 | /* ensure bit indicating barrier is required is clear */ | ||
403 | clear_bit(CREATE_BARRIER, &mdev->flags); | ||
404 | |||
405 | spin_unlock_irq(&mdev->req_lock); | ||
406 | } | ||
407 | |||
408 | /** | ||
409 | * cl_wide_st_chg() - TRUE if the state change is a cluster wide one | ||
410 | * @mdev: DRBD device. | ||
411 | * @os: old (current) state. | ||
412 | * @ns: new (wanted) state. | ||
413 | */ | ||
414 | static int cl_wide_st_chg(struct drbd_conf *mdev, | ||
415 | union drbd_state os, union drbd_state ns) | ||
416 | { | ||
417 | return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED && | ||
418 | ((os.role != R_PRIMARY && ns.role == R_PRIMARY) || | ||
419 | (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) || | ||
420 | (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) || | ||
421 | (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))) || | ||
422 | (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) || | ||
423 | (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S); | ||
424 | } | ||
425 | |||
426 | int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f, | ||
427 | union drbd_state mask, union drbd_state val) | ||
428 | { | ||
429 | unsigned long flags; | ||
430 | union drbd_state os, ns; | ||
431 | int rv; | ||
432 | |||
433 | spin_lock_irqsave(&mdev->req_lock, flags); | ||
434 | os = mdev->state; | ||
435 | ns.i = (os.i & ~mask.i) | val.i; | ||
436 | rv = _drbd_set_state(mdev, ns, f, NULL); | ||
437 | ns = mdev->state; | ||
438 | spin_unlock_irqrestore(&mdev->req_lock, flags); | ||
439 | |||
440 | return rv; | ||
441 | } | ||
442 | |||
443 | /** | ||
444 | * drbd_force_state() - Impose a change which happens outside our control on our state | ||
445 | * @mdev: DRBD device. | ||
446 | * @mask: mask of state bits to change. | ||
447 | * @val: value of new state bits. | ||
448 | */ | ||
449 | void drbd_force_state(struct drbd_conf *mdev, | ||
450 | union drbd_state mask, union drbd_state val) | ||
451 | { | ||
452 | drbd_change_state(mdev, CS_HARD, mask, val); | ||
453 | } | ||
454 | |||
455 | static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns); | ||
456 | static int is_valid_state_transition(struct drbd_conf *, | ||
457 | union drbd_state, union drbd_state); | ||
458 | static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os, | ||
459 | union drbd_state ns, int *warn_sync_abort); | ||
460 | int drbd_send_state_req(struct drbd_conf *, | ||
461 | union drbd_state, union drbd_state); | ||
462 | |||
463 | static enum drbd_state_ret_codes _req_st_cond(struct drbd_conf *mdev, | ||
464 | union drbd_state mask, union drbd_state val) | ||
465 | { | ||
466 | union drbd_state os, ns; | ||
467 | unsigned long flags; | ||
468 | int rv; | ||
469 | |||
470 | if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags)) | ||
471 | return SS_CW_SUCCESS; | ||
472 | |||
473 | if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags)) | ||
474 | return SS_CW_FAILED_BY_PEER; | ||
475 | |||
476 | rv = 0; | ||
477 | spin_lock_irqsave(&mdev->req_lock, flags); | ||
478 | os = mdev->state; | ||
479 | ns.i = (os.i & ~mask.i) | val.i; | ||
480 | ns = sanitize_state(mdev, os, ns, NULL); | ||
481 | |||
482 | if (!cl_wide_st_chg(mdev, os, ns)) | ||
483 | rv = SS_CW_NO_NEED; | ||
484 | if (!rv) { | ||
485 | rv = is_valid_state(mdev, ns); | ||
486 | if (rv == SS_SUCCESS) { | ||
487 | rv = is_valid_state_transition(mdev, ns, os); | ||
488 | if (rv == SS_SUCCESS) | ||
489 | rv = 0; /* cont waiting, otherwise fail. */ | ||
490 | } | ||
491 | } | ||
492 | spin_unlock_irqrestore(&mdev->req_lock, flags); | ||
493 | |||
494 | return rv; | ||
495 | } | ||
496 | |||
497 | /** | ||
498 | * drbd_req_state() - Perform an eventually cluster wide state change | ||
499 | * @mdev: DRBD device. | ||
500 | * @mask: mask of state bits to change. | ||
501 | * @val: value of new state bits. | ||
502 | * @f: flags | ||
503 | * | ||
504 | * Should not be called directly, use drbd_request_state() or | ||
505 | * _drbd_request_state(). | ||
506 | */ | ||
507 | static int drbd_req_state(struct drbd_conf *mdev, | ||
508 | union drbd_state mask, union drbd_state val, | ||
509 | enum chg_state_flags f) | ||
510 | { | ||
511 | struct completion done; | ||
512 | unsigned long flags; | ||
513 | union drbd_state os, ns; | ||
514 | int rv; | ||
515 | |||
516 | init_completion(&done); | ||
517 | |||
518 | if (f & CS_SERIALIZE) | ||
519 | mutex_lock(&mdev->state_mutex); | ||
520 | |||
521 | spin_lock_irqsave(&mdev->req_lock, flags); | ||
522 | os = mdev->state; | ||
523 | ns.i = (os.i & ~mask.i) | val.i; | ||
524 | ns = sanitize_state(mdev, os, ns, NULL); | ||
525 | |||
526 | if (cl_wide_st_chg(mdev, os, ns)) { | ||
527 | rv = is_valid_state(mdev, ns); | ||
528 | if (rv == SS_SUCCESS) | ||
529 | rv = is_valid_state_transition(mdev, ns, os); | ||
530 | spin_unlock_irqrestore(&mdev->req_lock, flags); | ||
531 | |||
532 | if (rv < SS_SUCCESS) { | ||
533 | if (f & CS_VERBOSE) | ||
534 | print_st_err(mdev, os, ns, rv); | ||
535 | goto abort; | ||
536 | } | ||
537 | |||
538 | drbd_state_lock(mdev); | ||
539 | if (!drbd_send_state_req(mdev, mask, val)) { | ||
540 | drbd_state_unlock(mdev); | ||
541 | rv = SS_CW_FAILED_BY_PEER; | ||
542 | if (f & CS_VERBOSE) | ||
543 | print_st_err(mdev, os, ns, rv); | ||
544 | goto abort; | ||
545 | } | ||
546 | |||
547 | wait_event(mdev->state_wait, | ||
548 | (rv = _req_st_cond(mdev, mask, val))); | ||
549 | |||
550 | if (rv < SS_SUCCESS) { | ||
551 | drbd_state_unlock(mdev); | ||
552 | if (f & CS_VERBOSE) | ||
553 | print_st_err(mdev, os, ns, rv); | ||
554 | goto abort; | ||
555 | } | ||
556 | spin_lock_irqsave(&mdev->req_lock, flags); | ||
557 | os = mdev->state; | ||
558 | ns.i = (os.i & ~mask.i) | val.i; | ||
559 | rv = _drbd_set_state(mdev, ns, f, &done); | ||
560 | drbd_state_unlock(mdev); | ||
561 | } else { | ||
562 | rv = _drbd_set_state(mdev, ns, f, &done); | ||
563 | } | ||
564 | |||
565 | spin_unlock_irqrestore(&mdev->req_lock, flags); | ||
566 | |||
567 | if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) { | ||
568 | D_ASSERT(current != mdev->worker.task); | ||
569 | wait_for_completion(&done); | ||
570 | } | ||
571 | |||
572 | abort: | ||
573 | if (f & CS_SERIALIZE) | ||
574 | mutex_unlock(&mdev->state_mutex); | ||
575 | |||
576 | return rv; | ||
577 | } | ||
578 | |||
579 | /** | ||
580 | * _drbd_request_state() - Request a state change (with flags) | ||
581 | * @mdev: DRBD device. | ||
582 | * @mask: mask of state bits to change. | ||
583 | * @val: value of new state bits. | ||
584 | * @f: flags | ||
585 | * | ||
586 | * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE | ||
587 | * flag, or when logging of failed state change requests is not desired. | ||
588 | */ | ||
589 | int _drbd_request_state(struct drbd_conf *mdev, union drbd_state mask, | ||
590 | union drbd_state val, enum chg_state_flags f) | ||
591 | { | ||
592 | int rv; | ||
593 | |||
594 | wait_event(mdev->state_wait, | ||
595 | (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE); | ||
596 | |||
597 | return rv; | ||
598 | } | ||
599 | |||
600 | static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns) | ||
601 | { | ||
602 | dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c }\n", | ||
603 | name, | ||
604 | drbd_conn_str(ns.conn), | ||
605 | drbd_role_str(ns.role), | ||
606 | drbd_role_str(ns.peer), | ||
607 | drbd_disk_str(ns.disk), | ||
608 | drbd_disk_str(ns.pdsk), | ||
609 | ns.susp ? 's' : 'r', | ||
610 | ns.aftr_isp ? 'a' : '-', | ||
611 | ns.peer_isp ? 'p' : '-', | ||
612 | ns.user_isp ? 'u' : '-' | ||
613 | ); | ||
614 | } | ||
615 | |||
616 | void print_st_err(struct drbd_conf *mdev, | ||
617 | union drbd_state os, union drbd_state ns, int err) | ||
618 | { | ||
619 | if (err == SS_IN_TRANSIENT_STATE) | ||
620 | return; | ||
621 | dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err)); | ||
622 | print_st(mdev, " state", os); | ||
623 | print_st(mdev, "wanted", ns); | ||
624 | } | ||
625 | |||
626 | |||
627 | #define drbd_peer_str drbd_role_str | ||
628 | #define drbd_pdsk_str drbd_disk_str | ||
629 | |||
630 | #define drbd_susp_str(A) ((A) ? "1" : "0") | ||
631 | #define drbd_aftr_isp_str(A) ((A) ? "1" : "0") | ||
632 | #define drbd_peer_isp_str(A) ((A) ? "1" : "0") | ||
633 | #define drbd_user_isp_str(A) ((A) ? "1" : "0") | ||
634 | |||
635 | #define PSC(A) \ | ||
636 | ({ if (ns.A != os.A) { \ | ||
637 | pbp += sprintf(pbp, #A "( %s -> %s ) ", \ | ||
638 | drbd_##A##_str(os.A), \ | ||
639 | drbd_##A##_str(ns.A)); \ | ||
640 | } }) | ||
641 | |||
642 | /** | ||
643 | * is_valid_state() - Returns an SS_ error code if ns is not valid | ||
644 | * @mdev: DRBD device. | ||
645 | * @ns: State to consider. | ||
646 | */ | ||
647 | static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns) | ||
648 | { | ||
649 | /* See drbd_state_sw_errors in drbd_strings.c */ | ||
650 | |||
651 | enum drbd_fencing_p fp; | ||
652 | int rv = SS_SUCCESS; | ||
653 | |||
654 | fp = FP_DONT_CARE; | ||
655 | if (get_ldev(mdev)) { | ||
656 | fp = mdev->ldev->dc.fencing; | ||
657 | put_ldev(mdev); | ||
658 | } | ||
659 | |||
660 | if (get_net_conf(mdev)) { | ||
661 | if (!mdev->net_conf->two_primaries && | ||
662 | ns.role == R_PRIMARY && ns.peer == R_PRIMARY) | ||
663 | rv = SS_TWO_PRIMARIES; | ||
664 | put_net_conf(mdev); | ||
665 | } | ||
666 | |||
667 | if (rv <= 0) | ||
668 | /* already found a reason to abort */; | ||
669 | else if (ns.role == R_SECONDARY && mdev->open_cnt) | ||
670 | rv = SS_DEVICE_IN_USE; | ||
671 | |||
672 | else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE) | ||
673 | rv = SS_NO_UP_TO_DATE_DISK; | ||
674 | |||
675 | else if (fp >= FP_RESOURCE && | ||
676 | ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN) | ||
677 | rv = SS_PRIMARY_NOP; | ||
678 | |||
679 | else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT) | ||
680 | rv = SS_NO_UP_TO_DATE_DISK; | ||
681 | |||
682 | else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT) | ||
683 | rv = SS_NO_LOCAL_DISK; | ||
684 | |||
685 | else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT) | ||
686 | rv = SS_NO_REMOTE_DISK; | ||
687 | |||
688 | else if ((ns.conn == C_CONNECTED || | ||
689 | ns.conn == C_WF_BITMAP_S || | ||
690 | ns.conn == C_SYNC_SOURCE || | ||
691 | ns.conn == C_PAUSED_SYNC_S) && | ||
692 | ns.disk == D_OUTDATED) | ||
693 | rv = SS_CONNECTED_OUTDATES; | ||
694 | |||
695 | else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && | ||
696 | (mdev->sync_conf.verify_alg[0] == 0)) | ||
697 | rv = SS_NO_VERIFY_ALG; | ||
698 | |||
699 | else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && | ||
700 | mdev->agreed_pro_version < 88) | ||
701 | rv = SS_NOT_SUPPORTED; | ||
702 | |||
703 | return rv; | ||
704 | } | ||
705 | |||
706 | /** | ||
707 | * is_valid_state_transition() - Returns an SS_ error code if the state transition is not possible | ||
708 | * @mdev: DRBD device. | ||
709 | * @ns: new state. | ||
710 | * @os: old state. | ||
711 | */ | ||
712 | static int is_valid_state_transition(struct drbd_conf *mdev, | ||
713 | union drbd_state ns, union drbd_state os) | ||
714 | { | ||
715 | int rv = SS_SUCCESS; | ||
716 | |||
717 | if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) && | ||
718 | os.conn > C_CONNECTED) | ||
719 | rv = SS_RESYNC_RUNNING; | ||
720 | |||
721 | if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE) | ||
722 | rv = SS_ALREADY_STANDALONE; | ||
723 | |||
724 | if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS) | ||
725 | rv = SS_IS_DISKLESS; | ||
726 | |||
727 | if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED) | ||
728 | rv = SS_NO_NET_CONFIG; | ||
729 | |||
730 | if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING) | ||
731 | rv = SS_LOWER_THAN_OUTDATED; | ||
732 | |||
733 | if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED) | ||
734 | rv = SS_IN_TRANSIENT_STATE; | ||
735 | |||
736 | if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS) | ||
737 | rv = SS_IN_TRANSIENT_STATE; | ||
738 | |||
739 | if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED) | ||
740 | rv = SS_NEED_CONNECTION; | ||
741 | |||
742 | if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && | ||
743 | ns.conn != os.conn && os.conn > C_CONNECTED) | ||
744 | rv = SS_RESYNC_RUNNING; | ||
745 | |||
746 | if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) && | ||
747 | os.conn < C_CONNECTED) | ||
748 | rv = SS_NEED_CONNECTION; | ||
749 | |||
750 | return rv; | ||
751 | } | ||
752 | |||
753 | /** | ||
754 | * sanitize_state() - Resolves implicitly necessary additional changes to a state transition | ||
755 | * @mdev: DRBD device. | ||
756 | * @os: old state. | ||
757 | * @ns: new state. | ||
758 | * @warn_sync_abort: | ||
759 | * | ||
760 | * When we loose connection, we have to set the state of the peers disk (pdsk) | ||
761 | * to D_UNKNOWN. This rule and many more along those lines are in this function. | ||
762 | */ | ||
763 | static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os, | ||
764 | union drbd_state ns, int *warn_sync_abort) | ||
765 | { | ||
766 | enum drbd_fencing_p fp; | ||
767 | |||
768 | fp = FP_DONT_CARE; | ||
769 | if (get_ldev(mdev)) { | ||
770 | fp = mdev->ldev->dc.fencing; | ||
771 | put_ldev(mdev); | ||
772 | } | ||
773 | |||
774 | /* Disallow Network errors to configure a device's network part */ | ||
775 | if ((ns.conn >= C_TIMEOUT && ns.conn <= C_TEAR_DOWN) && | ||
776 | os.conn <= C_DISCONNECTING) | ||
777 | ns.conn = os.conn; | ||
778 | |||
779 | /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow */ | ||
780 | if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN && | ||
781 | ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING) | ||
782 | ns.conn = os.conn; | ||
783 | |||
784 | /* After C_DISCONNECTING only C_STANDALONE may follow */ | ||
785 | if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE) | ||
786 | ns.conn = os.conn; | ||
787 | |||
788 | if (ns.conn < C_CONNECTED) { | ||
789 | ns.peer_isp = 0; | ||
790 | ns.peer = R_UNKNOWN; | ||
791 | if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT) | ||
792 | ns.pdsk = D_UNKNOWN; | ||
793 | } | ||
794 | |||
795 | /* Clear the aftr_isp when becoming unconfigured */ | ||
796 | if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY) | ||
797 | ns.aftr_isp = 0; | ||
798 | |||
799 | if (ns.conn <= C_DISCONNECTING && ns.disk == D_DISKLESS) | ||
800 | ns.pdsk = D_UNKNOWN; | ||
801 | |||
802 | /* Abort resync if a disk fails/detaches */ | ||
803 | if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED && | ||
804 | (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) { | ||
805 | if (warn_sync_abort) | ||
806 | *warn_sync_abort = 1; | ||
807 | ns.conn = C_CONNECTED; | ||
808 | } | ||
809 | |||
810 | if (ns.conn >= C_CONNECTED && | ||
811 | ((ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED) || | ||
812 | (ns.disk == D_NEGOTIATING && ns.conn == C_WF_BITMAP_T))) { | ||
813 | switch (ns.conn) { | ||
814 | case C_WF_BITMAP_T: | ||
815 | case C_PAUSED_SYNC_T: | ||
816 | ns.disk = D_OUTDATED; | ||
817 | break; | ||
818 | case C_CONNECTED: | ||
819 | case C_WF_BITMAP_S: | ||
820 | case C_SYNC_SOURCE: | ||
821 | case C_PAUSED_SYNC_S: | ||
822 | ns.disk = D_UP_TO_DATE; | ||
823 | break; | ||
824 | case C_SYNC_TARGET: | ||
825 | ns.disk = D_INCONSISTENT; | ||
826 | dev_warn(DEV, "Implicitly set disk state Inconsistent!\n"); | ||
827 | break; | ||
828 | } | ||
829 | if (os.disk == D_OUTDATED && ns.disk == D_UP_TO_DATE) | ||
830 | dev_warn(DEV, "Implicitly set disk from Outdated to UpToDate\n"); | ||
831 | } | ||
832 | |||
833 | if (ns.conn >= C_CONNECTED && | ||
834 | (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)) { | ||
835 | switch (ns.conn) { | ||
836 | case C_CONNECTED: | ||
837 | case C_WF_BITMAP_T: | ||
838 | case C_PAUSED_SYNC_T: | ||
839 | case C_SYNC_TARGET: | ||
840 | ns.pdsk = D_UP_TO_DATE; | ||
841 | break; | ||
842 | case C_WF_BITMAP_S: | ||
843 | case C_PAUSED_SYNC_S: | ||
844 | ns.pdsk = D_OUTDATED; | ||
845 | break; | ||
846 | case C_SYNC_SOURCE: | ||
847 | ns.pdsk = D_INCONSISTENT; | ||
848 | dev_warn(DEV, "Implicitly set pdsk Inconsistent!\n"); | ||
849 | break; | ||
850 | } | ||
851 | if (os.pdsk == D_OUTDATED && ns.pdsk == D_UP_TO_DATE) | ||
852 | dev_warn(DEV, "Implicitly set pdsk from Outdated to UpToDate\n"); | ||
853 | } | ||
854 | |||
855 | /* Connection breaks down before we finished "Negotiating" */ | ||
856 | if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING && | ||
857 | get_ldev_if_state(mdev, D_NEGOTIATING)) { | ||
858 | if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) { | ||
859 | ns.disk = mdev->new_state_tmp.disk; | ||
860 | ns.pdsk = mdev->new_state_tmp.pdsk; | ||
861 | } else { | ||
862 | dev_alert(DEV, "Connection lost while negotiating, no data!\n"); | ||
863 | ns.disk = D_DISKLESS; | ||
864 | ns.pdsk = D_UNKNOWN; | ||
865 | } | ||
866 | put_ldev(mdev); | ||
867 | } | ||
868 | |||
869 | if (fp == FP_STONITH && | ||
870 | (ns.role == R_PRIMARY && | ||
871 | ns.conn < C_CONNECTED && | ||
872 | ns.pdsk > D_OUTDATED)) | ||
873 | ns.susp = 1; | ||
874 | |||
875 | if (ns.aftr_isp || ns.peer_isp || ns.user_isp) { | ||
876 | if (ns.conn == C_SYNC_SOURCE) | ||
877 | ns.conn = C_PAUSED_SYNC_S; | ||
878 | if (ns.conn == C_SYNC_TARGET) | ||
879 | ns.conn = C_PAUSED_SYNC_T; | ||
880 | } else { | ||
881 | if (ns.conn == C_PAUSED_SYNC_S) | ||
882 | ns.conn = C_SYNC_SOURCE; | ||
883 | if (ns.conn == C_PAUSED_SYNC_T) | ||
884 | ns.conn = C_SYNC_TARGET; | ||
885 | } | ||
886 | |||
887 | return ns; | ||
888 | } | ||
889 | |||
890 | /* helper for __drbd_set_state */ | ||
891 | static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs) | ||
892 | { | ||
893 | if (cs == C_VERIFY_T) { | ||
894 | /* starting online verify from an arbitrary position | ||
895 | * does not fit well into the existing protocol. | ||
896 | * on C_VERIFY_T, we initialize ov_left and friends | ||
897 | * implicitly in receive_DataRequest once the | ||
898 | * first P_OV_REQUEST is received */ | ||
899 | mdev->ov_start_sector = ~(sector_t)0; | ||
900 | } else { | ||
901 | unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector); | ||
902 | if (bit >= mdev->rs_total) | ||
903 | mdev->ov_start_sector = | ||
904 | BM_BIT_TO_SECT(mdev->rs_total - 1); | ||
905 | mdev->ov_position = mdev->ov_start_sector; | ||
906 | } | ||
907 | } | ||
908 | |||
909 | /** | ||
910 | * __drbd_set_state() - Set a new DRBD state | ||
911 | * @mdev: DRBD device. | ||
912 | * @ns: new state. | ||
913 | * @flags: Flags | ||
914 | * @done: Optional completion, that will get completed after the after_state_ch() finished | ||
915 | * | ||
916 | * Caller needs to hold req_lock, and global_state_lock. Do not call directly. | ||
917 | */ | ||
918 | int __drbd_set_state(struct drbd_conf *mdev, | ||
919 | union drbd_state ns, enum chg_state_flags flags, | ||
920 | struct completion *done) | ||
921 | { | ||
922 | union drbd_state os; | ||
923 | int rv = SS_SUCCESS; | ||
924 | int warn_sync_abort = 0; | ||
925 | struct after_state_chg_work *ascw; | ||
926 | |||
927 | os = mdev->state; | ||
928 | |||
929 | ns = sanitize_state(mdev, os, ns, &warn_sync_abort); | ||
930 | |||
931 | if (ns.i == os.i) | ||
932 | return SS_NOTHING_TO_DO; | ||
933 | |||
934 | if (!(flags & CS_HARD)) { | ||
935 | /* pre-state-change checks ; only look at ns */ | ||
936 | /* See drbd_state_sw_errors in drbd_strings.c */ | ||
937 | |||
938 | rv = is_valid_state(mdev, ns); | ||
939 | if (rv < SS_SUCCESS) { | ||
940 | /* If the old state was illegal as well, then let | ||
941 | this happen...*/ | ||
942 | |||
943 | if (is_valid_state(mdev, os) == rv) { | ||
944 | dev_err(DEV, "Considering state change from bad state. " | ||
945 | "Error would be: '%s'\n", | ||
946 | drbd_set_st_err_str(rv)); | ||
947 | print_st(mdev, "old", os); | ||
948 | print_st(mdev, "new", ns); | ||
949 | rv = is_valid_state_transition(mdev, ns, os); | ||
950 | } | ||
951 | } else | ||
952 | rv = is_valid_state_transition(mdev, ns, os); | ||
953 | } | ||
954 | |||
955 | if (rv < SS_SUCCESS) { | ||
956 | if (flags & CS_VERBOSE) | ||
957 | print_st_err(mdev, os, ns, rv); | ||
958 | return rv; | ||
959 | } | ||
960 | |||
961 | if (warn_sync_abort) | ||
962 | dev_warn(DEV, "Resync aborted.\n"); | ||
963 | |||
964 | { | ||
965 | char *pbp, pb[300]; | ||
966 | pbp = pb; | ||
967 | *pbp = 0; | ||
968 | PSC(role); | ||
969 | PSC(peer); | ||
970 | PSC(conn); | ||
971 | PSC(disk); | ||
972 | PSC(pdsk); | ||
973 | PSC(susp); | ||
974 | PSC(aftr_isp); | ||
975 | PSC(peer_isp); | ||
976 | PSC(user_isp); | ||
977 | dev_info(DEV, "%s\n", pb); | ||
978 | } | ||
979 | |||
980 | /* solve the race between becoming unconfigured, | ||
981 | * worker doing the cleanup, and | ||
982 | * admin reconfiguring us: | ||
983 | * on (re)configure, first set CONFIG_PENDING, | ||
984 | * then wait for a potentially exiting worker, | ||
985 | * start the worker, and schedule one no_op. | ||
986 | * then proceed with configuration. | ||
987 | */ | ||
988 | if (ns.disk == D_DISKLESS && | ||
989 | ns.conn == C_STANDALONE && | ||
990 | ns.role == R_SECONDARY && | ||
991 | !test_and_set_bit(CONFIG_PENDING, &mdev->flags)) | ||
992 | set_bit(DEVICE_DYING, &mdev->flags); | ||
993 | |||
994 | mdev->state.i = ns.i; | ||
995 | wake_up(&mdev->misc_wait); | ||
996 | wake_up(&mdev->state_wait); | ||
997 | |||
998 | /* post-state-change actions */ | ||
999 | if (os.conn >= C_SYNC_SOURCE && ns.conn <= C_CONNECTED) { | ||
1000 | set_bit(STOP_SYNC_TIMER, &mdev->flags); | ||
1001 | mod_timer(&mdev->resync_timer, jiffies); | ||
1002 | } | ||
1003 | |||
1004 | /* aborted verify run. log the last position */ | ||
1005 | if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) && | ||
1006 | ns.conn < C_CONNECTED) { | ||
1007 | mdev->ov_start_sector = | ||
1008 | BM_BIT_TO_SECT(mdev->rs_total - mdev->ov_left); | ||
1009 | dev_info(DEV, "Online Verify reached sector %llu\n", | ||
1010 | (unsigned long long)mdev->ov_start_sector); | ||
1011 | } | ||
1012 | |||
1013 | if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) && | ||
1014 | (ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) { | ||
1015 | dev_info(DEV, "Syncer continues.\n"); | ||
1016 | mdev->rs_paused += (long)jiffies-(long)mdev->rs_mark_time; | ||
1017 | if (ns.conn == C_SYNC_TARGET) { | ||
1018 | if (!test_and_clear_bit(STOP_SYNC_TIMER, &mdev->flags)) | ||
1019 | mod_timer(&mdev->resync_timer, jiffies); | ||
1020 | /* This if (!test_bit) is only needed for the case | ||
1021 | that a device that has ceased to used its timer, | ||
1022 | i.e. it is already in drbd_resync_finished() gets | ||
1023 | paused and resumed. */ | ||
1024 | } | ||
1025 | } | ||
1026 | |||
1027 | if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) && | ||
1028 | (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) { | ||
1029 | dev_info(DEV, "Resync suspended\n"); | ||
1030 | mdev->rs_mark_time = jiffies; | ||
1031 | if (ns.conn == C_PAUSED_SYNC_T) | ||
1032 | set_bit(STOP_SYNC_TIMER, &mdev->flags); | ||
1033 | } | ||
1034 | |||
1035 | if (os.conn == C_CONNECTED && | ||
1036 | (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) { | ||
1037 | mdev->ov_position = 0; | ||
1038 | mdev->rs_total = | ||
1039 | mdev->rs_mark_left = drbd_bm_bits(mdev); | ||
1040 | if (mdev->agreed_pro_version >= 90) | ||
1041 | set_ov_position(mdev, ns.conn); | ||
1042 | else | ||
1043 | mdev->ov_start_sector = 0; | ||
1044 | mdev->ov_left = mdev->rs_total | ||
1045 | - BM_SECT_TO_BIT(mdev->ov_position); | ||
1046 | mdev->rs_start = | ||
1047 | mdev->rs_mark_time = jiffies; | ||
1048 | mdev->ov_last_oos_size = 0; | ||
1049 | mdev->ov_last_oos_start = 0; | ||
1050 | |||
1051 | if (ns.conn == C_VERIFY_S) { | ||
1052 | dev_info(DEV, "Starting Online Verify from sector %llu\n", | ||
1053 | (unsigned long long)mdev->ov_position); | ||
1054 | mod_timer(&mdev->resync_timer, jiffies); | ||
1055 | } | ||
1056 | } | ||
1057 | |||
1058 | if (get_ldev(mdev)) { | ||
1059 | u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND| | ||
1060 | MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE| | ||
1061 | MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY); | ||
1062 | |||
1063 | if (test_bit(CRASHED_PRIMARY, &mdev->flags)) | ||
1064 | mdf |= MDF_CRASHED_PRIMARY; | ||
1065 | if (mdev->state.role == R_PRIMARY || | ||
1066 | (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY)) | ||
1067 | mdf |= MDF_PRIMARY_IND; | ||
1068 | if (mdev->state.conn > C_WF_REPORT_PARAMS) | ||
1069 | mdf |= MDF_CONNECTED_IND; | ||
1070 | if (mdev->state.disk > D_INCONSISTENT) | ||
1071 | mdf |= MDF_CONSISTENT; | ||
1072 | if (mdev->state.disk > D_OUTDATED) | ||
1073 | mdf |= MDF_WAS_UP_TO_DATE; | ||
1074 | if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT) | ||
1075 | mdf |= MDF_PEER_OUT_DATED; | ||
1076 | if (mdf != mdev->ldev->md.flags) { | ||
1077 | mdev->ldev->md.flags = mdf; | ||
1078 | drbd_md_mark_dirty(mdev); | ||
1079 | } | ||
1080 | if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT) | ||
1081 | drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]); | ||
1082 | put_ldev(mdev); | ||
1083 | } | ||
1084 | |||
1085 | /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */ | ||
1086 | if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT && | ||
1087 | os.peer == R_SECONDARY && ns.peer == R_PRIMARY) | ||
1088 | set_bit(CONSIDER_RESYNC, &mdev->flags); | ||
1089 | |||
1090 | /* Receiver should clean up itself */ | ||
1091 | if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING) | ||
1092 | drbd_thread_stop_nowait(&mdev->receiver); | ||
1093 | |||
1094 | /* Now the receiver finished cleaning up itself, it should die */ | ||
1095 | if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE) | ||
1096 | drbd_thread_stop_nowait(&mdev->receiver); | ||
1097 | |||
1098 | /* Upon network failure, we need to restart the receiver. */ | ||
1099 | if (os.conn > C_TEAR_DOWN && | ||
1100 | ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT) | ||
1101 | drbd_thread_restart_nowait(&mdev->receiver); | ||
1102 | |||
1103 | ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC); | ||
1104 | if (ascw) { | ||
1105 | ascw->os = os; | ||
1106 | ascw->ns = ns; | ||
1107 | ascw->flags = flags; | ||
1108 | ascw->w.cb = w_after_state_ch; | ||
1109 | ascw->done = done; | ||
1110 | drbd_queue_work(&mdev->data.work, &ascw->w); | ||
1111 | } else { | ||
1112 | dev_warn(DEV, "Could not kmalloc an ascw\n"); | ||
1113 | } | ||
1114 | |||
1115 | return rv; | ||
1116 | } | ||
1117 | |||
1118 | static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused) | ||
1119 | { | ||
1120 | struct after_state_chg_work *ascw = | ||
1121 | container_of(w, struct after_state_chg_work, w); | ||
1122 | after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags); | ||
1123 | if (ascw->flags & CS_WAIT_COMPLETE) { | ||
1124 | D_ASSERT(ascw->done != NULL); | ||
1125 | complete(ascw->done); | ||
1126 | } | ||
1127 | kfree(ascw); | ||
1128 | |||
1129 | return 1; | ||
1130 | } | ||
1131 | |||
1132 | static void abw_start_sync(struct drbd_conf *mdev, int rv) | ||
1133 | { | ||
1134 | if (rv) { | ||
1135 | dev_err(DEV, "Writing the bitmap failed not starting resync.\n"); | ||
1136 | _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE); | ||
1137 | return; | ||
1138 | } | ||
1139 | |||
1140 | switch (mdev->state.conn) { | ||
1141 | case C_STARTING_SYNC_T: | ||
1142 | _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE); | ||
1143 | break; | ||
1144 | case C_STARTING_SYNC_S: | ||
1145 | drbd_start_resync(mdev, C_SYNC_SOURCE); | ||
1146 | break; | ||
1147 | } | ||
1148 | } | ||
1149 | |||
1150 | /** | ||
1151 | * after_state_ch() - Perform after state change actions that may sleep | ||
1152 | * @mdev: DRBD device. | ||
1153 | * @os: old state. | ||
1154 | * @ns: new state. | ||
1155 | * @flags: Flags | ||
1156 | */ | ||
1157 | static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, | ||
1158 | union drbd_state ns, enum chg_state_flags flags) | ||
1159 | { | ||
1160 | enum drbd_fencing_p fp; | ||
1161 | |||
1162 | if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) { | ||
1163 | clear_bit(CRASHED_PRIMARY, &mdev->flags); | ||
1164 | if (mdev->p_uuid) | ||
1165 | mdev->p_uuid[UI_FLAGS] &= ~((u64)2); | ||
1166 | } | ||
1167 | |||
1168 | fp = FP_DONT_CARE; | ||
1169 | if (get_ldev(mdev)) { | ||
1170 | fp = mdev->ldev->dc.fencing; | ||
1171 | put_ldev(mdev); | ||
1172 | } | ||
1173 | |||
1174 | /* Inform userspace about the change... */ | ||
1175 | drbd_bcast_state(mdev, ns); | ||
1176 | |||
1177 | if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) && | ||
1178 | (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)) | ||
1179 | drbd_khelper(mdev, "pri-on-incon-degr"); | ||
1180 | |||
1181 | /* Here we have the actions that are performed after a | ||
1182 | state change. This function might sleep */ | ||
1183 | |||
1184 | if (fp == FP_STONITH && ns.susp) { | ||
1185 | /* case1: The outdate peer handler is successful: | ||
1186 | * case2: The connection was established again: */ | ||
1187 | if ((os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) || | ||
1188 | (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)) { | ||
1189 | tl_clear(mdev); | ||
1190 | spin_lock_irq(&mdev->req_lock); | ||
1191 | _drbd_set_state(_NS(mdev, susp, 0), CS_VERBOSE, NULL); | ||
1192 | spin_unlock_irq(&mdev->req_lock); | ||
1193 | } | ||
1194 | } | ||
1195 | /* Do not change the order of the if above and the two below... */ | ||
1196 | if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */ | ||
1197 | drbd_send_uuids(mdev); | ||
1198 | drbd_send_state(mdev); | ||
1199 | } | ||
1200 | if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S) | ||
1201 | drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL, "send_bitmap (WFBitMapS)"); | ||
1202 | |||
1203 | /* Lost contact to peer's copy of the data */ | ||
1204 | if ((os.pdsk >= D_INCONSISTENT && | ||
1205 | os.pdsk != D_UNKNOWN && | ||
1206 | os.pdsk != D_OUTDATED) | ||
1207 | && (ns.pdsk < D_INCONSISTENT || | ||
1208 | ns.pdsk == D_UNKNOWN || | ||
1209 | ns.pdsk == D_OUTDATED)) { | ||
1210 | kfree(mdev->p_uuid); | ||
1211 | mdev->p_uuid = NULL; | ||
1212 | if (get_ldev(mdev)) { | ||
1213 | if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) && | ||
1214 | mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) { | ||
1215 | drbd_uuid_new_current(mdev); | ||
1216 | drbd_send_uuids(mdev); | ||
1217 | } | ||
1218 | put_ldev(mdev); | ||
1219 | } | ||
1220 | } | ||
1221 | |||
1222 | if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) { | ||
1223 | if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) | ||
1224 | drbd_uuid_new_current(mdev); | ||
1225 | |||
1226 | /* D_DISKLESS Peer becomes secondary */ | ||
1227 | if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY) | ||
1228 | drbd_al_to_on_disk_bm(mdev); | ||
1229 | put_ldev(mdev); | ||
1230 | } | ||
1231 | |||
1232 | /* Last part of the attaching process ... */ | ||
1233 | if (ns.conn >= C_CONNECTED && | ||
1234 | os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) { | ||
1235 | kfree(mdev->p_uuid); /* We expect to receive up-to-date UUIDs soon. */ | ||
1236 | mdev->p_uuid = NULL; /* ...to not use the old ones in the mean time */ | ||
1237 | drbd_send_sizes(mdev, 0); /* to start sync... */ | ||
1238 | drbd_send_uuids(mdev); | ||
1239 | drbd_send_state(mdev); | ||
1240 | } | ||
1241 | |||
1242 | /* We want to pause/continue resync, tell peer. */ | ||
1243 | if (ns.conn >= C_CONNECTED && | ||
1244 | ((os.aftr_isp != ns.aftr_isp) || | ||
1245 | (os.user_isp != ns.user_isp))) | ||
1246 | drbd_send_state(mdev); | ||
1247 | |||
1248 | /* In case one of the isp bits got set, suspend other devices. */ | ||
1249 | if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) && | ||
1250 | (ns.aftr_isp || ns.peer_isp || ns.user_isp)) | ||
1251 | suspend_other_sg(mdev); | ||
1252 | |||
1253 | /* Make sure the peer gets informed about eventual state | ||
1254 | changes (ISP bits) while we were in WFReportParams. */ | ||
1255 | if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED) | ||
1256 | drbd_send_state(mdev); | ||
1257 | |||
1258 | /* We are in the progress to start a full sync... */ | ||
1259 | if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) || | ||
1260 | (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S)) | ||
1261 | drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, &abw_start_sync, "set_n_write from StartingSync"); | ||
1262 | |||
1263 | /* We are invalidating our self... */ | ||
1264 | if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED && | ||
1265 | os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT) | ||
1266 | drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate"); | ||
1267 | |||
1268 | if (os.disk > D_FAILED && ns.disk == D_FAILED) { | ||
1269 | enum drbd_io_error_p eh; | ||
1270 | |||
1271 | eh = EP_PASS_ON; | ||
1272 | if (get_ldev_if_state(mdev, D_FAILED)) { | ||
1273 | eh = mdev->ldev->dc.on_io_error; | ||
1274 | put_ldev(mdev); | ||
1275 | } | ||
1276 | |||
1277 | drbd_rs_cancel_all(mdev); | ||
1278 | /* since get_ldev() only works as long as disk>=D_INCONSISTENT, | ||
1279 | and it is D_DISKLESS here, local_cnt can only go down, it can | ||
1280 | not increase... It will reach zero */ | ||
1281 | wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt)); | ||
1282 | mdev->rs_total = 0; | ||
1283 | mdev->rs_failed = 0; | ||
1284 | atomic_set(&mdev->rs_pending_cnt, 0); | ||
1285 | |||
1286 | spin_lock_irq(&mdev->req_lock); | ||
1287 | _drbd_set_state(_NS(mdev, disk, D_DISKLESS), CS_HARD, NULL); | ||
1288 | spin_unlock_irq(&mdev->req_lock); | ||
1289 | |||
1290 | if (eh == EP_CALL_HELPER) | ||
1291 | drbd_khelper(mdev, "local-io-error"); | ||
1292 | } | ||
1293 | |||
1294 | if (os.disk > D_DISKLESS && ns.disk == D_DISKLESS) { | ||
1295 | |||
1296 | if (os.disk == D_FAILED) /* && ns.disk == D_DISKLESS*/ { | ||
1297 | if (drbd_send_state(mdev)) | ||
1298 | dev_warn(DEV, "Notified peer that my disk is broken.\n"); | ||
1299 | else | ||
1300 | dev_err(DEV, "Sending state in drbd_io_error() failed\n"); | ||
1301 | } | ||
1302 | |||
1303 | lc_destroy(mdev->resync); | ||
1304 | mdev->resync = NULL; | ||
1305 | lc_destroy(mdev->act_log); | ||
1306 | mdev->act_log = NULL; | ||
1307 | __no_warn(local, | ||
1308 | drbd_free_bc(mdev->ldev); | ||
1309 | mdev->ldev = NULL;); | ||
1310 | |||
1311 | if (mdev->md_io_tmpp) | ||
1312 | __free_page(mdev->md_io_tmpp); | ||
1313 | } | ||
1314 | |||
1315 | /* Disks got bigger while they were detached */ | ||
1316 | if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING && | ||
1317 | test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) { | ||
1318 | if (ns.conn == C_CONNECTED) | ||
1319 | resync_after_online_grow(mdev); | ||
1320 | } | ||
1321 | |||
1322 | /* A resync finished or aborted, wake paused devices... */ | ||
1323 | if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) || | ||
1324 | (os.peer_isp && !ns.peer_isp) || | ||
1325 | (os.user_isp && !ns.user_isp)) | ||
1326 | resume_next_sg(mdev); | ||
1327 | |||
1328 | /* Upon network connection, we need to start the receiver */ | ||
1329 | if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED) | ||
1330 | drbd_thread_start(&mdev->receiver); | ||
1331 | |||
1332 | /* Terminate worker thread if we are unconfigured - it will be | ||
1333 | restarted as needed... */ | ||
1334 | if (ns.disk == D_DISKLESS && | ||
1335 | ns.conn == C_STANDALONE && | ||
1336 | ns.role == R_SECONDARY) { | ||
1337 | if (os.aftr_isp != ns.aftr_isp) | ||
1338 | resume_next_sg(mdev); | ||
1339 | /* set in __drbd_set_state, unless CONFIG_PENDING was set */ | ||
1340 | if (test_bit(DEVICE_DYING, &mdev->flags)) | ||
1341 | drbd_thread_stop_nowait(&mdev->worker); | ||
1342 | } | ||
1343 | |||
1344 | drbd_md_sync(mdev); | ||
1345 | } | ||
1346 | |||
1347 | |||
1348 | static int drbd_thread_setup(void *arg) | ||
1349 | { | ||
1350 | struct drbd_thread *thi = (struct drbd_thread *) arg; | ||
1351 | struct drbd_conf *mdev = thi->mdev; | ||
1352 | unsigned long flags; | ||
1353 | int retval; | ||
1354 | |||
1355 | restart: | ||
1356 | retval = thi->function(thi); | ||
1357 | |||
1358 | spin_lock_irqsave(&thi->t_lock, flags); | ||
1359 | |||
1360 | /* if the receiver has been "Exiting", the last thing it did | ||
1361 | * was set the conn state to "StandAlone", | ||
1362 | * if now a re-connect request comes in, conn state goes C_UNCONNECTED, | ||
1363 | * and receiver thread will be "started". | ||
1364 | * drbd_thread_start needs to set "Restarting" in that case. | ||
1365 | * t_state check and assignment needs to be within the same spinlock, | ||
1366 | * so either thread_start sees Exiting, and can remap to Restarting, | ||
1367 | * or thread_start see None, and can proceed as normal. | ||
1368 | */ | ||
1369 | |||
1370 | if (thi->t_state == Restarting) { | ||
1371 | dev_info(DEV, "Restarting %s\n", current->comm); | ||
1372 | thi->t_state = Running; | ||
1373 | spin_unlock_irqrestore(&thi->t_lock, flags); | ||
1374 | goto restart; | ||
1375 | } | ||
1376 | |||
1377 | thi->task = NULL; | ||
1378 | thi->t_state = None; | ||
1379 | smp_mb(); | ||
1380 | complete(&thi->stop); | ||
1381 | spin_unlock_irqrestore(&thi->t_lock, flags); | ||
1382 | |||
1383 | dev_info(DEV, "Terminating %s\n", current->comm); | ||
1384 | |||
1385 | /* Release mod reference taken when thread was started */ | ||
1386 | module_put(THIS_MODULE); | ||
1387 | return retval; | ||
1388 | } | ||
1389 | |||
1390 | static void drbd_thread_init(struct drbd_conf *mdev, struct drbd_thread *thi, | ||
1391 | int (*func) (struct drbd_thread *)) | ||
1392 | { | ||
1393 | spin_lock_init(&thi->t_lock); | ||
1394 | thi->task = NULL; | ||
1395 | thi->t_state = None; | ||
1396 | thi->function = func; | ||
1397 | thi->mdev = mdev; | ||
1398 | } | ||
1399 | |||
1400 | int drbd_thread_start(struct drbd_thread *thi) | ||
1401 | { | ||
1402 | struct drbd_conf *mdev = thi->mdev; | ||
1403 | struct task_struct *nt; | ||
1404 | unsigned long flags; | ||
1405 | |||
1406 | const char *me = | ||
1407 | thi == &mdev->receiver ? "receiver" : | ||
1408 | thi == &mdev->asender ? "asender" : | ||
1409 | thi == &mdev->worker ? "worker" : "NONSENSE"; | ||
1410 | |||
1411 | /* is used from state engine doing drbd_thread_stop_nowait, | ||
1412 | * while holding the req lock irqsave */ | ||
1413 | spin_lock_irqsave(&thi->t_lock, flags); | ||
1414 | |||
1415 | switch (thi->t_state) { | ||
1416 | case None: | ||
1417 | dev_info(DEV, "Starting %s thread (from %s [%d])\n", | ||
1418 | me, current->comm, current->pid); | ||
1419 | |||
1420 | /* Get ref on module for thread - this is released when thread exits */ | ||
1421 | if (!try_module_get(THIS_MODULE)) { | ||
1422 | dev_err(DEV, "Failed to get module reference in drbd_thread_start\n"); | ||
1423 | spin_unlock_irqrestore(&thi->t_lock, flags); | ||
1424 | return FALSE; | ||
1425 | } | ||
1426 | |||
1427 | init_completion(&thi->stop); | ||
1428 | D_ASSERT(thi->task == NULL); | ||
1429 | thi->reset_cpu_mask = 1; | ||
1430 | thi->t_state = Running; | ||
1431 | spin_unlock_irqrestore(&thi->t_lock, flags); | ||
1432 | flush_signals(current); /* otherw. may get -ERESTARTNOINTR */ | ||
1433 | |||
1434 | nt = kthread_create(drbd_thread_setup, (void *) thi, | ||
1435 | "drbd%d_%s", mdev_to_minor(mdev), me); | ||
1436 | |||
1437 | if (IS_ERR(nt)) { | ||
1438 | dev_err(DEV, "Couldn't start thread\n"); | ||
1439 | |||
1440 | module_put(THIS_MODULE); | ||
1441 | return FALSE; | ||
1442 | } | ||
1443 | spin_lock_irqsave(&thi->t_lock, flags); | ||
1444 | thi->task = nt; | ||
1445 | thi->t_state = Running; | ||
1446 | spin_unlock_irqrestore(&thi->t_lock, flags); | ||
1447 | wake_up_process(nt); | ||
1448 | break; | ||
1449 | case Exiting: | ||
1450 | thi->t_state = Restarting; | ||
1451 | dev_info(DEV, "Restarting %s thread (from %s [%d])\n", | ||
1452 | me, current->comm, current->pid); | ||
1453 | /* fall through */ | ||
1454 | case Running: | ||
1455 | case Restarting: | ||
1456 | default: | ||
1457 | spin_unlock_irqrestore(&thi->t_lock, flags); | ||
1458 | break; | ||
1459 | } | ||
1460 | |||
1461 | return TRUE; | ||
1462 | } | ||
1463 | |||
1464 | |||
1465 | void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait) | ||
1466 | { | ||
1467 | unsigned long flags; | ||
1468 | |||
1469 | enum drbd_thread_state ns = restart ? Restarting : Exiting; | ||
1470 | |||
1471 | /* may be called from state engine, holding the req lock irqsave */ | ||
1472 | spin_lock_irqsave(&thi->t_lock, flags); | ||
1473 | |||
1474 | if (thi->t_state == None) { | ||
1475 | spin_unlock_irqrestore(&thi->t_lock, flags); | ||
1476 | if (restart) | ||
1477 | drbd_thread_start(thi); | ||
1478 | return; | ||
1479 | } | ||
1480 | |||
1481 | if (thi->t_state != ns) { | ||
1482 | if (thi->task == NULL) { | ||
1483 | spin_unlock_irqrestore(&thi->t_lock, flags); | ||
1484 | return; | ||
1485 | } | ||
1486 | |||
1487 | thi->t_state = ns; | ||
1488 | smp_mb(); | ||
1489 | init_completion(&thi->stop); | ||
1490 | if (thi->task != current) | ||
1491 | force_sig(DRBD_SIGKILL, thi->task); | ||
1492 | |||
1493 | } | ||
1494 | |||
1495 | spin_unlock_irqrestore(&thi->t_lock, flags); | ||
1496 | |||
1497 | if (wait) | ||
1498 | wait_for_completion(&thi->stop); | ||
1499 | } | ||
1500 | |||
1501 | #ifdef CONFIG_SMP | ||
1502 | /** | ||
1503 | * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs | ||
1504 | * @mdev: DRBD device. | ||
1505 | * | ||
1506 | * Forces all threads of a device onto the same CPU. This is beneficial for | ||
1507 | * DRBD's performance. May be overwritten by user's configuration. | ||
1508 | */ | ||
1509 | void drbd_calc_cpu_mask(struct drbd_conf *mdev) | ||
1510 | { | ||
1511 | int ord, cpu; | ||
1512 | |||
1513 | /* user override. */ | ||
1514 | if (cpumask_weight(mdev->cpu_mask)) | ||
1515 | return; | ||
1516 | |||
1517 | ord = mdev_to_minor(mdev) % cpumask_weight(cpu_online_mask); | ||
1518 | for_each_online_cpu(cpu) { | ||
1519 | if (ord-- == 0) { | ||
1520 | cpumask_set_cpu(cpu, mdev->cpu_mask); | ||
1521 | return; | ||
1522 | } | ||
1523 | } | ||
1524 | /* should not be reached */ | ||
1525 | cpumask_setall(mdev->cpu_mask); | ||
1526 | } | ||
1527 | |||
1528 | /** | ||
1529 | * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread | ||
1530 | * @mdev: DRBD device. | ||
1531 | * | ||
1532 | * call in the "main loop" of _all_ threads, no need for any mutex, current won't die | ||
1533 | * prematurely. | ||
1534 | */ | ||
1535 | void drbd_thread_current_set_cpu(struct drbd_conf *mdev) | ||
1536 | { | ||
1537 | struct task_struct *p = current; | ||
1538 | struct drbd_thread *thi = | ||
1539 | p == mdev->asender.task ? &mdev->asender : | ||
1540 | p == mdev->receiver.task ? &mdev->receiver : | ||
1541 | p == mdev->worker.task ? &mdev->worker : | ||
1542 | NULL; | ||
1543 | ERR_IF(thi == NULL) | ||
1544 | return; | ||
1545 | if (!thi->reset_cpu_mask) | ||
1546 | return; | ||
1547 | thi->reset_cpu_mask = 0; | ||
1548 | set_cpus_allowed_ptr(p, mdev->cpu_mask); | ||
1549 | } | ||
1550 | #endif | ||
1551 | |||
1552 | /* the appropriate socket mutex must be held already */ | ||
1553 | int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock, | ||
1554 | enum drbd_packets cmd, struct p_header *h, | ||
1555 | size_t size, unsigned msg_flags) | ||
1556 | { | ||
1557 | int sent, ok; | ||
1558 | |||
1559 | ERR_IF(!h) return FALSE; | ||
1560 | ERR_IF(!size) return FALSE; | ||
1561 | |||
1562 | h->magic = BE_DRBD_MAGIC; | ||
1563 | h->command = cpu_to_be16(cmd); | ||
1564 | h->length = cpu_to_be16(size-sizeof(struct p_header)); | ||
1565 | |||
1566 | sent = drbd_send(mdev, sock, h, size, msg_flags); | ||
1567 | |||
1568 | ok = (sent == size); | ||
1569 | if (!ok) | ||
1570 | dev_err(DEV, "short sent %s size=%d sent=%d\n", | ||
1571 | cmdname(cmd), (int)size, sent); | ||
1572 | return ok; | ||
1573 | } | ||
1574 | |||
1575 | /* don't pass the socket. we may only look at it | ||
1576 | * when we hold the appropriate socket mutex. | ||
1577 | */ | ||
1578 | int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket, | ||
1579 | enum drbd_packets cmd, struct p_header *h, size_t size) | ||
1580 | { | ||
1581 | int ok = 0; | ||
1582 | struct socket *sock; | ||
1583 | |||
1584 | if (use_data_socket) { | ||
1585 | mutex_lock(&mdev->data.mutex); | ||
1586 | sock = mdev->data.socket; | ||
1587 | } else { | ||
1588 | mutex_lock(&mdev->meta.mutex); | ||
1589 | sock = mdev->meta.socket; | ||
1590 | } | ||
1591 | |||
1592 | /* drbd_disconnect() could have called drbd_free_sock() | ||
1593 | * while we were waiting in down()... */ | ||
1594 | if (likely(sock != NULL)) | ||
1595 | ok = _drbd_send_cmd(mdev, sock, cmd, h, size, 0); | ||
1596 | |||
1597 | if (use_data_socket) | ||
1598 | mutex_unlock(&mdev->data.mutex); | ||
1599 | else | ||
1600 | mutex_unlock(&mdev->meta.mutex); | ||
1601 | return ok; | ||
1602 | } | ||
1603 | |||
1604 | int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data, | ||
1605 | size_t size) | ||
1606 | { | ||
1607 | struct p_header h; | ||
1608 | int ok; | ||
1609 | |||
1610 | h.magic = BE_DRBD_MAGIC; | ||
1611 | h.command = cpu_to_be16(cmd); | ||
1612 | h.length = cpu_to_be16(size); | ||
1613 | |||
1614 | if (!drbd_get_data_sock(mdev)) | ||
1615 | return 0; | ||
1616 | |||
1617 | ok = (sizeof(h) == | ||
1618 | drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0)); | ||
1619 | ok = ok && (size == | ||
1620 | drbd_send(mdev, mdev->data.socket, data, size, 0)); | ||
1621 | |||
1622 | drbd_put_data_sock(mdev); | ||
1623 | |||
1624 | return ok; | ||
1625 | } | ||
1626 | |||
1627 | int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc) | ||
1628 | { | ||
1629 | struct p_rs_param_89 *p; | ||
1630 | struct socket *sock; | ||
1631 | int size, rv; | ||
1632 | const int apv = mdev->agreed_pro_version; | ||
1633 | |||
1634 | size = apv <= 87 ? sizeof(struct p_rs_param) | ||
1635 | : apv == 88 ? sizeof(struct p_rs_param) | ||
1636 | + strlen(mdev->sync_conf.verify_alg) + 1 | ||
1637 | : /* 89 */ sizeof(struct p_rs_param_89); | ||
1638 | |||
1639 | /* used from admin command context and receiver/worker context. | ||
1640 | * to avoid kmalloc, grab the socket right here, | ||
1641 | * then use the pre-allocated sbuf there */ | ||
1642 | mutex_lock(&mdev->data.mutex); | ||
1643 | sock = mdev->data.socket; | ||
1644 | |||
1645 | if (likely(sock != NULL)) { | ||
1646 | enum drbd_packets cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM; | ||
1647 | |||
1648 | p = &mdev->data.sbuf.rs_param_89; | ||
1649 | |||
1650 | /* initialize verify_alg and csums_alg */ | ||
1651 | memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX); | ||
1652 | |||
1653 | p->rate = cpu_to_be32(sc->rate); | ||
1654 | |||
1655 | if (apv >= 88) | ||
1656 | strcpy(p->verify_alg, mdev->sync_conf.verify_alg); | ||
1657 | if (apv >= 89) | ||
1658 | strcpy(p->csums_alg, mdev->sync_conf.csums_alg); | ||
1659 | |||
1660 | rv = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0); | ||
1661 | } else | ||
1662 | rv = 0; /* not ok */ | ||
1663 | |||
1664 | mutex_unlock(&mdev->data.mutex); | ||
1665 | |||
1666 | return rv; | ||
1667 | } | ||
1668 | |||
1669 | int drbd_send_protocol(struct drbd_conf *mdev) | ||
1670 | { | ||
1671 | struct p_protocol *p; | ||
1672 | int size, rv; | ||
1673 | |||
1674 | size = sizeof(struct p_protocol); | ||
1675 | |||
1676 | if (mdev->agreed_pro_version >= 87) | ||
1677 | size += strlen(mdev->net_conf->integrity_alg) + 1; | ||
1678 | |||
1679 | /* we must not recurse into our own queue, | ||
1680 | * as that is blocked during handshake */ | ||
1681 | p = kmalloc(size, GFP_NOIO); | ||
1682 | if (p == NULL) | ||
1683 | return 0; | ||
1684 | |||
1685 | p->protocol = cpu_to_be32(mdev->net_conf->wire_protocol); | ||
1686 | p->after_sb_0p = cpu_to_be32(mdev->net_conf->after_sb_0p); | ||
1687 | p->after_sb_1p = cpu_to_be32(mdev->net_conf->after_sb_1p); | ||
1688 | p->after_sb_2p = cpu_to_be32(mdev->net_conf->after_sb_2p); | ||
1689 | p->want_lose = cpu_to_be32(mdev->net_conf->want_lose); | ||
1690 | p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries); | ||
1691 | |||
1692 | if (mdev->agreed_pro_version >= 87) | ||
1693 | strcpy(p->integrity_alg, mdev->net_conf->integrity_alg); | ||
1694 | |||
1695 | rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL, | ||
1696 | (struct p_header *)p, size); | ||
1697 | kfree(p); | ||
1698 | return rv; | ||
1699 | } | ||
1700 | |||
1701 | int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags) | ||
1702 | { | ||
1703 | struct p_uuids p; | ||
1704 | int i; | ||
1705 | |||
1706 | if (!get_ldev_if_state(mdev, D_NEGOTIATING)) | ||
1707 | return 1; | ||
1708 | |||
1709 | for (i = UI_CURRENT; i < UI_SIZE; i++) | ||
1710 | p.uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0; | ||
1711 | |||
1712 | mdev->comm_bm_set = drbd_bm_total_weight(mdev); | ||
1713 | p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set); | ||
1714 | uuid_flags |= mdev->net_conf->want_lose ? 1 : 0; | ||
1715 | uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0; | ||
1716 | uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0; | ||
1717 | p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags); | ||
1718 | |||
1719 | put_ldev(mdev); | ||
1720 | |||
1721 | return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS, | ||
1722 | (struct p_header *)&p, sizeof(p)); | ||
1723 | } | ||
1724 | |||
1725 | int drbd_send_uuids(struct drbd_conf *mdev) | ||
1726 | { | ||
1727 | return _drbd_send_uuids(mdev, 0); | ||
1728 | } | ||
1729 | |||
1730 | int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev) | ||
1731 | { | ||
1732 | return _drbd_send_uuids(mdev, 8); | ||
1733 | } | ||
1734 | |||
1735 | |||
1736 | int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val) | ||
1737 | { | ||
1738 | struct p_rs_uuid p; | ||
1739 | |||
1740 | p.uuid = cpu_to_be64(val); | ||
1741 | |||
1742 | return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID, | ||
1743 | (struct p_header *)&p, sizeof(p)); | ||
1744 | } | ||
1745 | |||
1746 | int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply) | ||
1747 | { | ||
1748 | struct p_sizes p; | ||
1749 | sector_t d_size, u_size; | ||
1750 | int q_order_type; | ||
1751 | int ok; | ||
1752 | |||
1753 | if (get_ldev_if_state(mdev, D_NEGOTIATING)) { | ||
1754 | D_ASSERT(mdev->ldev->backing_bdev); | ||
1755 | d_size = drbd_get_max_capacity(mdev->ldev); | ||
1756 | u_size = mdev->ldev->dc.disk_size; | ||
1757 | q_order_type = drbd_queue_order_type(mdev); | ||
1758 | p.queue_order_type = cpu_to_be32(drbd_queue_order_type(mdev)); | ||
1759 | put_ldev(mdev); | ||
1760 | } else { | ||
1761 | d_size = 0; | ||
1762 | u_size = 0; | ||
1763 | q_order_type = QUEUE_ORDERED_NONE; | ||
1764 | } | ||
1765 | |||
1766 | p.d_size = cpu_to_be64(d_size); | ||
1767 | p.u_size = cpu_to_be64(u_size); | ||
1768 | p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev)); | ||
1769 | p.max_segment_size = cpu_to_be32(queue_max_segment_size(mdev->rq_queue)); | ||
1770 | p.queue_order_type = cpu_to_be32(q_order_type); | ||
1771 | |||
1772 | ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES, | ||
1773 | (struct p_header *)&p, sizeof(p)); | ||
1774 | return ok; | ||
1775 | } | ||
1776 | |||
1777 | /** | ||
1778 | * drbd_send_state() - Sends the drbd state to the peer | ||
1779 | * @mdev: DRBD device. | ||
1780 | */ | ||
1781 | int drbd_send_state(struct drbd_conf *mdev) | ||
1782 | { | ||
1783 | struct socket *sock; | ||
1784 | struct p_state p; | ||
1785 | int ok = 0; | ||
1786 | |||
1787 | /* Grab state lock so we wont send state if we're in the middle | ||
1788 | * of a cluster wide state change on another thread */ | ||
1789 | drbd_state_lock(mdev); | ||
1790 | |||
1791 | mutex_lock(&mdev->data.mutex); | ||
1792 | |||
1793 | p.state = cpu_to_be32(mdev->state.i); /* Within the send mutex */ | ||
1794 | sock = mdev->data.socket; | ||
1795 | |||
1796 | if (likely(sock != NULL)) { | ||
1797 | ok = _drbd_send_cmd(mdev, sock, P_STATE, | ||
1798 | (struct p_header *)&p, sizeof(p), 0); | ||
1799 | } | ||
1800 | |||
1801 | mutex_unlock(&mdev->data.mutex); | ||
1802 | |||
1803 | drbd_state_unlock(mdev); | ||
1804 | return ok; | ||
1805 | } | ||
1806 | |||
1807 | int drbd_send_state_req(struct drbd_conf *mdev, | ||
1808 | union drbd_state mask, union drbd_state val) | ||
1809 | { | ||
1810 | struct p_req_state p; | ||
1811 | |||
1812 | p.mask = cpu_to_be32(mask.i); | ||
1813 | p.val = cpu_to_be32(val.i); | ||
1814 | |||
1815 | return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ, | ||
1816 | (struct p_header *)&p, sizeof(p)); | ||
1817 | } | ||
1818 | |||
1819 | int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode) | ||
1820 | { | ||
1821 | struct p_req_state_reply p; | ||
1822 | |||
1823 | p.retcode = cpu_to_be32(retcode); | ||
1824 | |||
1825 | return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY, | ||
1826 | (struct p_header *)&p, sizeof(p)); | ||
1827 | } | ||
1828 | |||
1829 | int fill_bitmap_rle_bits(struct drbd_conf *mdev, | ||
1830 | struct p_compressed_bm *p, | ||
1831 | struct bm_xfer_ctx *c) | ||
1832 | { | ||
1833 | struct bitstream bs; | ||
1834 | unsigned long plain_bits; | ||
1835 | unsigned long tmp; | ||
1836 | unsigned long rl; | ||
1837 | unsigned len; | ||
1838 | unsigned toggle; | ||
1839 | int bits; | ||
1840 | |||
1841 | /* may we use this feature? */ | ||
1842 | if ((mdev->sync_conf.use_rle == 0) || | ||
1843 | (mdev->agreed_pro_version < 90)) | ||
1844 | return 0; | ||
1845 | |||
1846 | if (c->bit_offset >= c->bm_bits) | ||
1847 | return 0; /* nothing to do. */ | ||
1848 | |||
1849 | /* use at most thus many bytes */ | ||
1850 | bitstream_init(&bs, p->code, BM_PACKET_VLI_BYTES_MAX, 0); | ||
1851 | memset(p->code, 0, BM_PACKET_VLI_BYTES_MAX); | ||
1852 | /* plain bits covered in this code string */ | ||
1853 | plain_bits = 0; | ||
1854 | |||
1855 | /* p->encoding & 0x80 stores whether the first run length is set. | ||
1856 | * bit offset is implicit. | ||
1857 | * start with toggle == 2 to be able to tell the first iteration */ | ||
1858 | toggle = 2; | ||
1859 | |||
1860 | /* see how much plain bits we can stuff into one packet | ||
1861 | * using RLE and VLI. */ | ||
1862 | do { | ||
1863 | tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset) | ||
1864 | : _drbd_bm_find_next(mdev, c->bit_offset); | ||
1865 | if (tmp == -1UL) | ||
1866 | tmp = c->bm_bits; | ||
1867 | rl = tmp - c->bit_offset; | ||
1868 | |||
1869 | if (toggle == 2) { /* first iteration */ | ||
1870 | if (rl == 0) { | ||
1871 | /* the first checked bit was set, | ||
1872 | * store start value, */ | ||
1873 | DCBP_set_start(p, 1); | ||
1874 | /* but skip encoding of zero run length */ | ||
1875 | toggle = !toggle; | ||
1876 | continue; | ||
1877 | } | ||
1878 | DCBP_set_start(p, 0); | ||
1879 | } | ||
1880 | |||
1881 | /* paranoia: catch zero runlength. | ||
1882 | * can only happen if bitmap is modified while we scan it. */ | ||
1883 | if (rl == 0) { | ||
1884 | dev_err(DEV, "unexpected zero runlength while encoding bitmap " | ||
1885 | "t:%u bo:%lu\n", toggle, c->bit_offset); | ||
1886 | return -1; | ||
1887 | } | ||
1888 | |||
1889 | bits = vli_encode_bits(&bs, rl); | ||
1890 | if (bits == -ENOBUFS) /* buffer full */ | ||
1891 | break; | ||
1892 | if (bits <= 0) { | ||
1893 | dev_err(DEV, "error while encoding bitmap: %d\n", bits); | ||
1894 | return 0; | ||
1895 | } | ||
1896 | |||
1897 | toggle = !toggle; | ||
1898 | plain_bits += rl; | ||
1899 | c->bit_offset = tmp; | ||
1900 | } while (c->bit_offset < c->bm_bits); | ||
1901 | |||
1902 | len = bs.cur.b - p->code + !!bs.cur.bit; | ||
1903 | |||
1904 | if (plain_bits < (len << 3)) { | ||
1905 | /* incompressible with this method. | ||
1906 | * we need to rewind both word and bit position. */ | ||
1907 | c->bit_offset -= plain_bits; | ||
1908 | bm_xfer_ctx_bit_to_word_offset(c); | ||
1909 | c->bit_offset = c->word_offset * BITS_PER_LONG; | ||
1910 | return 0; | ||
1911 | } | ||
1912 | |||
1913 | /* RLE + VLI was able to compress it just fine. | ||
1914 | * update c->word_offset. */ | ||
1915 | bm_xfer_ctx_bit_to_word_offset(c); | ||
1916 | |||
1917 | /* store pad_bits */ | ||
1918 | DCBP_set_pad_bits(p, (8 - bs.cur.bit) & 0x7); | ||
1919 | |||
1920 | return len; | ||
1921 | } | ||
1922 | |||
1923 | enum { OK, FAILED, DONE } | ||
1924 | send_bitmap_rle_or_plain(struct drbd_conf *mdev, | ||
1925 | struct p_header *h, struct bm_xfer_ctx *c) | ||
1926 | { | ||
1927 | struct p_compressed_bm *p = (void*)h; | ||
1928 | unsigned long num_words; | ||
1929 | int len; | ||
1930 | int ok; | ||
1931 | |||
1932 | len = fill_bitmap_rle_bits(mdev, p, c); | ||
1933 | |||
1934 | if (len < 0) | ||
1935 | return FAILED; | ||
1936 | |||
1937 | if (len) { | ||
1938 | DCBP_set_code(p, RLE_VLI_Bits); | ||
1939 | ok = _drbd_send_cmd(mdev, mdev->data.socket, P_COMPRESSED_BITMAP, h, | ||
1940 | sizeof(*p) + len, 0); | ||
1941 | |||
1942 | c->packets[0]++; | ||
1943 | c->bytes[0] += sizeof(*p) + len; | ||
1944 | |||
1945 | if (c->bit_offset >= c->bm_bits) | ||
1946 | len = 0; /* DONE */ | ||
1947 | } else { | ||
1948 | /* was not compressible. | ||
1949 | * send a buffer full of plain text bits instead. */ | ||
1950 | num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset); | ||
1951 | len = num_words * sizeof(long); | ||
1952 | if (len) | ||
1953 | drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload); | ||
1954 | ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BITMAP, | ||
1955 | h, sizeof(struct p_header) + len, 0); | ||
1956 | c->word_offset += num_words; | ||
1957 | c->bit_offset = c->word_offset * BITS_PER_LONG; | ||
1958 | |||
1959 | c->packets[1]++; | ||
1960 | c->bytes[1] += sizeof(struct p_header) + len; | ||
1961 | |||
1962 | if (c->bit_offset > c->bm_bits) | ||
1963 | c->bit_offset = c->bm_bits; | ||
1964 | } | ||
1965 | ok = ok ? ((len == 0) ? DONE : OK) : FAILED; | ||
1966 | |||
1967 | if (ok == DONE) | ||
1968 | INFO_bm_xfer_stats(mdev, "send", c); | ||
1969 | return ok; | ||
1970 | } | ||
1971 | |||
1972 | /* See the comment at receive_bitmap() */ | ||
1973 | int _drbd_send_bitmap(struct drbd_conf *mdev) | ||
1974 | { | ||
1975 | struct bm_xfer_ctx c; | ||
1976 | struct p_header *p; | ||
1977 | int ret; | ||
1978 | |||
1979 | ERR_IF(!mdev->bitmap) return FALSE; | ||
1980 | |||
1981 | /* maybe we should use some per thread scratch page, | ||
1982 | * and allocate that during initial device creation? */ | ||
1983 | p = (struct p_header *) __get_free_page(GFP_NOIO); | ||
1984 | if (!p) { | ||
1985 | dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__); | ||
1986 | return FALSE; | ||
1987 | } | ||
1988 | |||
1989 | if (get_ldev(mdev)) { | ||
1990 | if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) { | ||
1991 | dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n"); | ||
1992 | drbd_bm_set_all(mdev); | ||
1993 | if (drbd_bm_write(mdev)) { | ||
1994 | /* write_bm did fail! Leave full sync flag set in Meta P_DATA | ||
1995 | * but otherwise process as per normal - need to tell other | ||
1996 | * side that a full resync is required! */ | ||
1997 | dev_err(DEV, "Failed to write bitmap to disk!\n"); | ||
1998 | } else { | ||
1999 | drbd_md_clear_flag(mdev, MDF_FULL_SYNC); | ||
2000 | drbd_md_sync(mdev); | ||
2001 | } | ||
2002 | } | ||
2003 | put_ldev(mdev); | ||
2004 | } | ||
2005 | |||
2006 | c = (struct bm_xfer_ctx) { | ||
2007 | .bm_bits = drbd_bm_bits(mdev), | ||
2008 | .bm_words = drbd_bm_words(mdev), | ||
2009 | }; | ||
2010 | |||
2011 | do { | ||
2012 | ret = send_bitmap_rle_or_plain(mdev, p, &c); | ||
2013 | } while (ret == OK); | ||
2014 | |||
2015 | free_page((unsigned long) p); | ||
2016 | return (ret == DONE); | ||
2017 | } | ||
2018 | |||
2019 | int drbd_send_bitmap(struct drbd_conf *mdev) | ||
2020 | { | ||
2021 | int err; | ||
2022 | |||
2023 | if (!drbd_get_data_sock(mdev)) | ||
2024 | return -1; | ||
2025 | err = !_drbd_send_bitmap(mdev); | ||
2026 | drbd_put_data_sock(mdev); | ||
2027 | return err; | ||
2028 | } | ||
2029 | |||
2030 | int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size) | ||
2031 | { | ||
2032 | int ok; | ||
2033 | struct p_barrier_ack p; | ||
2034 | |||
2035 | p.barrier = barrier_nr; | ||
2036 | p.set_size = cpu_to_be32(set_size); | ||
2037 | |||
2038 | if (mdev->state.conn < C_CONNECTED) | ||
2039 | return FALSE; | ||
2040 | ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK, | ||
2041 | (struct p_header *)&p, sizeof(p)); | ||
2042 | return ok; | ||
2043 | } | ||
2044 | |||
2045 | /** | ||
2046 | * _drbd_send_ack() - Sends an ack packet | ||
2047 | * @mdev: DRBD device. | ||
2048 | * @cmd: Packet command code. | ||
2049 | * @sector: sector, needs to be in big endian byte order | ||
2050 | * @blksize: size in byte, needs to be in big endian byte order | ||
2051 | * @block_id: Id, big endian byte order | ||
2052 | */ | ||
2053 | static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd, | ||
2054 | u64 sector, | ||
2055 | u32 blksize, | ||
2056 | u64 block_id) | ||
2057 | { | ||
2058 | int ok; | ||
2059 | struct p_block_ack p; | ||
2060 | |||
2061 | p.sector = sector; | ||
2062 | p.block_id = block_id; | ||
2063 | p.blksize = blksize; | ||
2064 | p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq)); | ||
2065 | |||
2066 | if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED) | ||
2067 | return FALSE; | ||
2068 | ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd, | ||
2069 | (struct p_header *)&p, sizeof(p)); | ||
2070 | return ok; | ||
2071 | } | ||
2072 | |||
2073 | int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd, | ||
2074 | struct p_data *dp) | ||
2075 | { | ||
2076 | const int header_size = sizeof(struct p_data) | ||
2077 | - sizeof(struct p_header); | ||
2078 | int data_size = ((struct p_header *)dp)->length - header_size; | ||
2079 | |||
2080 | return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size), | ||
2081 | dp->block_id); | ||
2082 | } | ||
2083 | |||
2084 | int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd, | ||
2085 | struct p_block_req *rp) | ||
2086 | { | ||
2087 | return _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id); | ||
2088 | } | ||
2089 | |||
2090 | /** | ||
2091 | * drbd_send_ack() - Sends an ack packet | ||
2092 | * @mdev: DRBD device. | ||
2093 | * @cmd: Packet command code. | ||
2094 | * @e: Epoch entry. | ||
2095 | */ | ||
2096 | int drbd_send_ack(struct drbd_conf *mdev, | ||
2097 | enum drbd_packets cmd, struct drbd_epoch_entry *e) | ||
2098 | { | ||
2099 | return _drbd_send_ack(mdev, cmd, | ||
2100 | cpu_to_be64(e->sector), | ||
2101 | cpu_to_be32(e->size), | ||
2102 | e->block_id); | ||
2103 | } | ||
2104 | |||
2105 | /* This function misuses the block_id field to signal if the blocks | ||
2106 | * are is sync or not. */ | ||
2107 | int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd, | ||
2108 | sector_t sector, int blksize, u64 block_id) | ||
2109 | { | ||
2110 | return _drbd_send_ack(mdev, cmd, | ||
2111 | cpu_to_be64(sector), | ||
2112 | cpu_to_be32(blksize), | ||
2113 | cpu_to_be64(block_id)); | ||
2114 | } | ||
2115 | |||
2116 | int drbd_send_drequest(struct drbd_conf *mdev, int cmd, | ||
2117 | sector_t sector, int size, u64 block_id) | ||
2118 | { | ||
2119 | int ok; | ||
2120 | struct p_block_req p; | ||
2121 | |||
2122 | p.sector = cpu_to_be64(sector); | ||
2123 | p.block_id = block_id; | ||
2124 | p.blksize = cpu_to_be32(size); | ||
2125 | |||
2126 | ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd, | ||
2127 | (struct p_header *)&p, sizeof(p)); | ||
2128 | return ok; | ||
2129 | } | ||
2130 | |||
2131 | int drbd_send_drequest_csum(struct drbd_conf *mdev, | ||
2132 | sector_t sector, int size, | ||
2133 | void *digest, int digest_size, | ||
2134 | enum drbd_packets cmd) | ||
2135 | { | ||
2136 | int ok; | ||
2137 | struct p_block_req p; | ||
2138 | |||
2139 | p.sector = cpu_to_be64(sector); | ||
2140 | p.block_id = BE_DRBD_MAGIC + 0xbeef; | ||
2141 | p.blksize = cpu_to_be32(size); | ||
2142 | |||
2143 | p.head.magic = BE_DRBD_MAGIC; | ||
2144 | p.head.command = cpu_to_be16(cmd); | ||
2145 | p.head.length = cpu_to_be16(sizeof(p) - sizeof(struct p_header) + digest_size); | ||
2146 | |||
2147 | mutex_lock(&mdev->data.mutex); | ||
2148 | |||
2149 | ok = (sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), 0)); | ||
2150 | ok = ok && (digest_size == drbd_send(mdev, mdev->data.socket, digest, digest_size, 0)); | ||
2151 | |||
2152 | mutex_unlock(&mdev->data.mutex); | ||
2153 | |||
2154 | return ok; | ||
2155 | } | ||
2156 | |||
2157 | int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size) | ||
2158 | { | ||
2159 | int ok; | ||
2160 | struct p_block_req p; | ||
2161 | |||
2162 | p.sector = cpu_to_be64(sector); | ||
2163 | p.block_id = BE_DRBD_MAGIC + 0xbabe; | ||
2164 | p.blksize = cpu_to_be32(size); | ||
2165 | |||
2166 | ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST, | ||
2167 | (struct p_header *)&p, sizeof(p)); | ||
2168 | return ok; | ||
2169 | } | ||
2170 | |||
2171 | /* called on sndtimeo | ||
2172 | * returns FALSE if we should retry, | ||
2173 | * TRUE if we think connection is dead | ||
2174 | */ | ||
2175 | static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock) | ||
2176 | { | ||
2177 | int drop_it; | ||
2178 | /* long elapsed = (long)(jiffies - mdev->last_received); */ | ||
2179 | |||
2180 | drop_it = mdev->meta.socket == sock | ||
2181 | || !mdev->asender.task | ||
2182 | || get_t_state(&mdev->asender) != Running | ||
2183 | || mdev->state.conn < C_CONNECTED; | ||
2184 | |||
2185 | if (drop_it) | ||
2186 | return TRUE; | ||
2187 | |||
2188 | drop_it = !--mdev->ko_count; | ||
2189 | if (!drop_it) { | ||
2190 | dev_err(DEV, "[%s/%d] sock_sendmsg time expired, ko = %u\n", | ||
2191 | current->comm, current->pid, mdev->ko_count); | ||
2192 | request_ping(mdev); | ||
2193 | } | ||
2194 | |||
2195 | return drop_it; /* && (mdev->state == R_PRIMARY) */; | ||
2196 | } | ||
2197 | |||
2198 | /* The idea of sendpage seems to be to put some kind of reference | ||
2199 | * to the page into the skb, and to hand it over to the NIC. In | ||
2200 | * this process get_page() gets called. | ||
2201 | * | ||
2202 | * As soon as the page was really sent over the network put_page() | ||
2203 | * gets called by some part of the network layer. [ NIC driver? ] | ||
2204 | * | ||
2205 | * [ get_page() / put_page() increment/decrement the count. If count | ||
2206 | * reaches 0 the page will be freed. ] | ||
2207 | * | ||
2208 | * This works nicely with pages from FSs. | ||
2209 | * But this means that in protocol A we might signal IO completion too early! | ||
2210 | * | ||
2211 | * In order not to corrupt data during a resync we must make sure | ||
2212 | * that we do not reuse our own buffer pages (EEs) to early, therefore | ||
2213 | * we have the net_ee list. | ||
2214 | * | ||
2215 | * XFS seems to have problems, still, it submits pages with page_count == 0! | ||
2216 | * As a workaround, we disable sendpage on pages | ||
2217 | * with page_count == 0 or PageSlab. | ||
2218 | */ | ||
2219 | static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page, | ||
2220 | int offset, size_t size) | ||
2221 | { | ||
2222 | int sent = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, 0); | ||
2223 | kunmap(page); | ||
2224 | if (sent == size) | ||
2225 | mdev->send_cnt += size>>9; | ||
2226 | return sent == size; | ||
2227 | } | ||
2228 | |||
2229 | static int _drbd_send_page(struct drbd_conf *mdev, struct page *page, | ||
2230 | int offset, size_t size) | ||
2231 | { | ||
2232 | mm_segment_t oldfs = get_fs(); | ||
2233 | int sent, ok; | ||
2234 | int len = size; | ||
2235 | |||
2236 | /* e.g. XFS meta- & log-data is in slab pages, which have a | ||
2237 | * page_count of 0 and/or have PageSlab() set. | ||
2238 | * we cannot use send_page for those, as that does get_page(); | ||
2239 | * put_page(); and would cause either a VM_BUG directly, or | ||
2240 | * __page_cache_release a page that would actually still be referenced | ||
2241 | * by someone, leading to some obscure delayed Oops somewhere else. */ | ||
2242 | if (disable_sendpage || (page_count(page) < 1) || PageSlab(page)) | ||
2243 | return _drbd_no_send_page(mdev, page, offset, size); | ||
2244 | |||
2245 | drbd_update_congested(mdev); | ||
2246 | set_fs(KERNEL_DS); | ||
2247 | do { | ||
2248 | sent = mdev->data.socket->ops->sendpage(mdev->data.socket, page, | ||
2249 | offset, len, | ||
2250 | MSG_NOSIGNAL); | ||
2251 | if (sent == -EAGAIN) { | ||
2252 | if (we_should_drop_the_connection(mdev, | ||
2253 | mdev->data.socket)) | ||
2254 | break; | ||
2255 | else | ||
2256 | continue; | ||
2257 | } | ||
2258 | if (sent <= 0) { | ||
2259 | dev_warn(DEV, "%s: size=%d len=%d sent=%d\n", | ||
2260 | __func__, (int)size, len, sent); | ||
2261 | break; | ||
2262 | } | ||
2263 | len -= sent; | ||
2264 | offset += sent; | ||
2265 | } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/); | ||
2266 | set_fs(oldfs); | ||
2267 | clear_bit(NET_CONGESTED, &mdev->flags); | ||
2268 | |||
2269 | ok = (len == 0); | ||
2270 | if (likely(ok)) | ||
2271 | mdev->send_cnt += size>>9; | ||
2272 | return ok; | ||
2273 | } | ||
2274 | |||
2275 | static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio) | ||
2276 | { | ||
2277 | struct bio_vec *bvec; | ||
2278 | int i; | ||
2279 | __bio_for_each_segment(bvec, bio, i, 0) { | ||
2280 | if (!_drbd_no_send_page(mdev, bvec->bv_page, | ||
2281 | bvec->bv_offset, bvec->bv_len)) | ||
2282 | return 0; | ||
2283 | } | ||
2284 | return 1; | ||
2285 | } | ||
2286 | |||
2287 | static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio) | ||
2288 | { | ||
2289 | struct bio_vec *bvec; | ||
2290 | int i; | ||
2291 | __bio_for_each_segment(bvec, bio, i, 0) { | ||
2292 | if (!_drbd_send_page(mdev, bvec->bv_page, | ||
2293 | bvec->bv_offset, bvec->bv_len)) | ||
2294 | return 0; | ||
2295 | } | ||
2296 | |||
2297 | return 1; | ||
2298 | } | ||
2299 | |||
2300 | /* Used to send write requests | ||
2301 | * R_PRIMARY -> Peer (P_DATA) | ||
2302 | */ | ||
2303 | int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req) | ||
2304 | { | ||
2305 | int ok = 1; | ||
2306 | struct p_data p; | ||
2307 | unsigned int dp_flags = 0; | ||
2308 | void *dgb; | ||
2309 | int dgs; | ||
2310 | |||
2311 | if (!drbd_get_data_sock(mdev)) | ||
2312 | return 0; | ||
2313 | |||
2314 | dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ? | ||
2315 | crypto_hash_digestsize(mdev->integrity_w_tfm) : 0; | ||
2316 | |||
2317 | p.head.magic = BE_DRBD_MAGIC; | ||
2318 | p.head.command = cpu_to_be16(P_DATA); | ||
2319 | p.head.length = | ||
2320 | cpu_to_be16(sizeof(p) - sizeof(struct p_header) + dgs + req->size); | ||
2321 | |||
2322 | p.sector = cpu_to_be64(req->sector); | ||
2323 | p.block_id = (unsigned long)req; | ||
2324 | p.seq_num = cpu_to_be32(req->seq_num = | ||
2325 | atomic_add_return(1, &mdev->packet_seq)); | ||
2326 | dp_flags = 0; | ||
2327 | |||
2328 | /* NOTE: no need to check if barriers supported here as we would | ||
2329 | * not pass the test in make_request_common in that case | ||
2330 | */ | ||
2331 | if (bio_rw_flagged(req->master_bio, BIO_RW_BARRIER)) { | ||
2332 | dev_err(DEV, "ASSERT FAILED would have set DP_HARDBARRIER\n"); | ||
2333 | /* dp_flags |= DP_HARDBARRIER; */ | ||
2334 | } | ||
2335 | if (bio_rw_flagged(req->master_bio, BIO_RW_SYNCIO)) | ||
2336 | dp_flags |= DP_RW_SYNC; | ||
2337 | /* for now handle SYNCIO and UNPLUG | ||
2338 | * as if they still were one and the same flag */ | ||
2339 | if (bio_rw_flagged(req->master_bio, BIO_RW_UNPLUG)) | ||
2340 | dp_flags |= DP_RW_SYNC; | ||
2341 | if (mdev->state.conn >= C_SYNC_SOURCE && | ||
2342 | mdev->state.conn <= C_PAUSED_SYNC_T) | ||
2343 | dp_flags |= DP_MAY_SET_IN_SYNC; | ||
2344 | |||
2345 | p.dp_flags = cpu_to_be32(dp_flags); | ||
2346 | set_bit(UNPLUG_REMOTE, &mdev->flags); | ||
2347 | ok = (sizeof(p) == | ||
2348 | drbd_send(mdev, mdev->data.socket, &p, sizeof(p), MSG_MORE)); | ||
2349 | if (ok && dgs) { | ||
2350 | dgb = mdev->int_dig_out; | ||
2351 | drbd_csum(mdev, mdev->integrity_w_tfm, req->master_bio, dgb); | ||
2352 | ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, MSG_MORE); | ||
2353 | } | ||
2354 | if (ok) { | ||
2355 | if (mdev->net_conf->wire_protocol == DRBD_PROT_A) | ||
2356 | ok = _drbd_send_bio(mdev, req->master_bio); | ||
2357 | else | ||
2358 | ok = _drbd_send_zc_bio(mdev, req->master_bio); | ||
2359 | } | ||
2360 | |||
2361 | drbd_put_data_sock(mdev); | ||
2362 | return ok; | ||
2363 | } | ||
2364 | |||
2365 | /* answer packet, used to send data back for read requests: | ||
2366 | * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY) | ||
2367 | * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY) | ||
2368 | */ | ||
2369 | int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd, | ||
2370 | struct drbd_epoch_entry *e) | ||
2371 | { | ||
2372 | int ok; | ||
2373 | struct p_data p; | ||
2374 | void *dgb; | ||
2375 | int dgs; | ||
2376 | |||
2377 | dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ? | ||
2378 | crypto_hash_digestsize(mdev->integrity_w_tfm) : 0; | ||
2379 | |||
2380 | p.head.magic = BE_DRBD_MAGIC; | ||
2381 | p.head.command = cpu_to_be16(cmd); | ||
2382 | p.head.length = | ||
2383 | cpu_to_be16(sizeof(p) - sizeof(struct p_header) + dgs + e->size); | ||
2384 | |||
2385 | p.sector = cpu_to_be64(e->sector); | ||
2386 | p.block_id = e->block_id; | ||
2387 | /* p.seq_num = 0; No sequence numbers here.. */ | ||
2388 | |||
2389 | /* Only called by our kernel thread. | ||
2390 | * This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL | ||
2391 | * in response to admin command or module unload. | ||
2392 | */ | ||
2393 | if (!drbd_get_data_sock(mdev)) | ||
2394 | return 0; | ||
2395 | |||
2396 | ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, | ||
2397 | sizeof(p), MSG_MORE); | ||
2398 | if (ok && dgs) { | ||
2399 | dgb = mdev->int_dig_out; | ||
2400 | drbd_csum(mdev, mdev->integrity_w_tfm, e->private_bio, dgb); | ||
2401 | ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, MSG_MORE); | ||
2402 | } | ||
2403 | if (ok) | ||
2404 | ok = _drbd_send_zc_bio(mdev, e->private_bio); | ||
2405 | |||
2406 | drbd_put_data_sock(mdev); | ||
2407 | return ok; | ||
2408 | } | ||
2409 | |||
2410 | /* | ||
2411 | drbd_send distinguishes two cases: | ||
2412 | |||
2413 | Packets sent via the data socket "sock" | ||
2414 | and packets sent via the meta data socket "msock" | ||
2415 | |||
2416 | sock msock | ||
2417 | -----------------+-------------------------+------------------------------ | ||
2418 | timeout conf.timeout / 2 conf.timeout / 2 | ||
2419 | timeout action send a ping via msock Abort communication | ||
2420 | and close all sockets | ||
2421 | */ | ||
2422 | |||
2423 | /* | ||
2424 | * you must have down()ed the appropriate [m]sock_mutex elsewhere! | ||
2425 | */ | ||
2426 | int drbd_send(struct drbd_conf *mdev, struct socket *sock, | ||
2427 | void *buf, size_t size, unsigned msg_flags) | ||
2428 | { | ||
2429 | struct kvec iov; | ||
2430 | struct msghdr msg; | ||
2431 | int rv, sent = 0; | ||
2432 | |||
2433 | if (!sock) | ||
2434 | return -1000; | ||
2435 | |||
2436 | /* THINK if (signal_pending) return ... ? */ | ||
2437 | |||
2438 | iov.iov_base = buf; | ||
2439 | iov.iov_len = size; | ||
2440 | |||
2441 | msg.msg_name = NULL; | ||
2442 | msg.msg_namelen = 0; | ||
2443 | msg.msg_control = NULL; | ||
2444 | msg.msg_controllen = 0; | ||
2445 | msg.msg_flags = msg_flags | MSG_NOSIGNAL; | ||
2446 | |||
2447 | if (sock == mdev->data.socket) { | ||
2448 | mdev->ko_count = mdev->net_conf->ko_count; | ||
2449 | drbd_update_congested(mdev); | ||
2450 | } | ||
2451 | do { | ||
2452 | /* STRANGE | ||
2453 | * tcp_sendmsg does _not_ use its size parameter at all ? | ||
2454 | * | ||
2455 | * -EAGAIN on timeout, -EINTR on signal. | ||
2456 | */ | ||
2457 | /* THINK | ||
2458 | * do we need to block DRBD_SIG if sock == &meta.socket ?? | ||
2459 | * otherwise wake_asender() might interrupt some send_*Ack ! | ||
2460 | */ | ||
2461 | rv = kernel_sendmsg(sock, &msg, &iov, 1, size); | ||
2462 | if (rv == -EAGAIN) { | ||
2463 | if (we_should_drop_the_connection(mdev, sock)) | ||
2464 | break; | ||
2465 | else | ||
2466 | continue; | ||
2467 | } | ||
2468 | D_ASSERT(rv != 0); | ||
2469 | if (rv == -EINTR) { | ||
2470 | flush_signals(current); | ||
2471 | rv = 0; | ||
2472 | } | ||
2473 | if (rv < 0) | ||
2474 | break; | ||
2475 | sent += rv; | ||
2476 | iov.iov_base += rv; | ||
2477 | iov.iov_len -= rv; | ||
2478 | } while (sent < size); | ||
2479 | |||
2480 | if (sock == mdev->data.socket) | ||
2481 | clear_bit(NET_CONGESTED, &mdev->flags); | ||
2482 | |||
2483 | if (rv <= 0) { | ||
2484 | if (rv != -EAGAIN) { | ||
2485 | dev_err(DEV, "%s_sendmsg returned %d\n", | ||
2486 | sock == mdev->meta.socket ? "msock" : "sock", | ||
2487 | rv); | ||
2488 | drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE)); | ||
2489 | } else | ||
2490 | drbd_force_state(mdev, NS(conn, C_TIMEOUT)); | ||
2491 | } | ||
2492 | |||
2493 | return sent; | ||
2494 | } | ||
2495 | |||
2496 | static int drbd_open(struct block_device *bdev, fmode_t mode) | ||
2497 | { | ||
2498 | struct drbd_conf *mdev = bdev->bd_disk->private_data; | ||
2499 | unsigned long flags; | ||
2500 | int rv = 0; | ||
2501 | |||
2502 | spin_lock_irqsave(&mdev->req_lock, flags); | ||
2503 | /* to have a stable mdev->state.role | ||
2504 | * and no race with updating open_cnt */ | ||
2505 | |||
2506 | if (mdev->state.role != R_PRIMARY) { | ||
2507 | if (mode & FMODE_WRITE) | ||
2508 | rv = -EROFS; | ||
2509 | else if (!allow_oos) | ||
2510 | rv = -EMEDIUMTYPE; | ||
2511 | } | ||
2512 | |||
2513 | if (!rv) | ||
2514 | mdev->open_cnt++; | ||
2515 | spin_unlock_irqrestore(&mdev->req_lock, flags); | ||
2516 | |||
2517 | return rv; | ||
2518 | } | ||
2519 | |||
2520 | static int drbd_release(struct gendisk *gd, fmode_t mode) | ||
2521 | { | ||
2522 | struct drbd_conf *mdev = gd->private_data; | ||
2523 | mdev->open_cnt--; | ||
2524 | return 0; | ||
2525 | } | ||
2526 | |||
2527 | static void drbd_unplug_fn(struct request_queue *q) | ||
2528 | { | ||
2529 | struct drbd_conf *mdev = q->queuedata; | ||
2530 | |||
2531 | /* unplug FIRST */ | ||
2532 | spin_lock_irq(q->queue_lock); | ||
2533 | blk_remove_plug(q); | ||
2534 | spin_unlock_irq(q->queue_lock); | ||
2535 | |||
2536 | /* only if connected */ | ||
2537 | spin_lock_irq(&mdev->req_lock); | ||
2538 | if (mdev->state.pdsk >= D_INCONSISTENT && mdev->state.conn >= C_CONNECTED) { | ||
2539 | D_ASSERT(mdev->state.role == R_PRIMARY); | ||
2540 | if (test_and_clear_bit(UNPLUG_REMOTE, &mdev->flags)) { | ||
2541 | /* add to the data.work queue, | ||
2542 | * unless already queued. | ||
2543 | * XXX this might be a good addition to drbd_queue_work | ||
2544 | * anyways, to detect "double queuing" ... */ | ||
2545 | if (list_empty(&mdev->unplug_work.list)) | ||
2546 | drbd_queue_work(&mdev->data.work, | ||
2547 | &mdev->unplug_work); | ||
2548 | } | ||
2549 | } | ||
2550 | spin_unlock_irq(&mdev->req_lock); | ||
2551 | |||
2552 | if (mdev->state.disk >= D_INCONSISTENT) | ||
2553 | drbd_kick_lo(mdev); | ||
2554 | } | ||
2555 | |||
2556 | static void drbd_set_defaults(struct drbd_conf *mdev) | ||
2557 | { | ||
2558 | mdev->sync_conf.after = DRBD_AFTER_DEF; | ||
2559 | mdev->sync_conf.rate = DRBD_RATE_DEF; | ||
2560 | mdev->sync_conf.al_extents = DRBD_AL_EXTENTS_DEF; | ||
2561 | mdev->state = (union drbd_state) { | ||
2562 | { .role = R_SECONDARY, | ||
2563 | .peer = R_UNKNOWN, | ||
2564 | .conn = C_STANDALONE, | ||
2565 | .disk = D_DISKLESS, | ||
2566 | .pdsk = D_UNKNOWN, | ||
2567 | .susp = 0 | ||
2568 | } }; | ||
2569 | } | ||
2570 | |||
2571 | void drbd_init_set_defaults(struct drbd_conf *mdev) | ||
2572 | { | ||
2573 | /* the memset(,0,) did most of this. | ||
2574 | * note: only assignments, no allocation in here */ | ||
2575 | |||
2576 | drbd_set_defaults(mdev); | ||
2577 | |||
2578 | /* for now, we do NOT yet support it, | ||
2579 | * even though we start some framework | ||
2580 | * to eventually support barriers */ | ||
2581 | set_bit(NO_BARRIER_SUPP, &mdev->flags); | ||
2582 | |||
2583 | atomic_set(&mdev->ap_bio_cnt, 0); | ||
2584 | atomic_set(&mdev->ap_pending_cnt, 0); | ||
2585 | atomic_set(&mdev->rs_pending_cnt, 0); | ||
2586 | atomic_set(&mdev->unacked_cnt, 0); | ||
2587 | atomic_set(&mdev->local_cnt, 0); | ||
2588 | atomic_set(&mdev->net_cnt, 0); | ||
2589 | atomic_set(&mdev->packet_seq, 0); | ||
2590 | atomic_set(&mdev->pp_in_use, 0); | ||
2591 | |||
2592 | mutex_init(&mdev->md_io_mutex); | ||
2593 | mutex_init(&mdev->data.mutex); | ||
2594 | mutex_init(&mdev->meta.mutex); | ||
2595 | sema_init(&mdev->data.work.s, 0); | ||
2596 | sema_init(&mdev->meta.work.s, 0); | ||
2597 | mutex_init(&mdev->state_mutex); | ||
2598 | |||
2599 | spin_lock_init(&mdev->data.work.q_lock); | ||
2600 | spin_lock_init(&mdev->meta.work.q_lock); | ||
2601 | |||
2602 | spin_lock_init(&mdev->al_lock); | ||
2603 | spin_lock_init(&mdev->req_lock); | ||
2604 | spin_lock_init(&mdev->peer_seq_lock); | ||
2605 | spin_lock_init(&mdev->epoch_lock); | ||
2606 | |||
2607 | INIT_LIST_HEAD(&mdev->active_ee); | ||
2608 | INIT_LIST_HEAD(&mdev->sync_ee); | ||
2609 | INIT_LIST_HEAD(&mdev->done_ee); | ||
2610 | INIT_LIST_HEAD(&mdev->read_ee); | ||
2611 | INIT_LIST_HEAD(&mdev->net_ee); | ||
2612 | INIT_LIST_HEAD(&mdev->resync_reads); | ||
2613 | INIT_LIST_HEAD(&mdev->data.work.q); | ||
2614 | INIT_LIST_HEAD(&mdev->meta.work.q); | ||
2615 | INIT_LIST_HEAD(&mdev->resync_work.list); | ||
2616 | INIT_LIST_HEAD(&mdev->unplug_work.list); | ||
2617 | INIT_LIST_HEAD(&mdev->md_sync_work.list); | ||
2618 | INIT_LIST_HEAD(&mdev->bm_io_work.w.list); | ||
2619 | mdev->resync_work.cb = w_resync_inactive; | ||
2620 | mdev->unplug_work.cb = w_send_write_hint; | ||
2621 | mdev->md_sync_work.cb = w_md_sync; | ||
2622 | mdev->bm_io_work.w.cb = w_bitmap_io; | ||
2623 | init_timer(&mdev->resync_timer); | ||
2624 | init_timer(&mdev->md_sync_timer); | ||
2625 | mdev->resync_timer.function = resync_timer_fn; | ||
2626 | mdev->resync_timer.data = (unsigned long) mdev; | ||
2627 | mdev->md_sync_timer.function = md_sync_timer_fn; | ||
2628 | mdev->md_sync_timer.data = (unsigned long) mdev; | ||
2629 | |||
2630 | init_waitqueue_head(&mdev->misc_wait); | ||
2631 | init_waitqueue_head(&mdev->state_wait); | ||
2632 | init_waitqueue_head(&mdev->ee_wait); | ||
2633 | init_waitqueue_head(&mdev->al_wait); | ||
2634 | init_waitqueue_head(&mdev->seq_wait); | ||
2635 | |||
2636 | drbd_thread_init(mdev, &mdev->receiver, drbdd_init); | ||
2637 | drbd_thread_init(mdev, &mdev->worker, drbd_worker); | ||
2638 | drbd_thread_init(mdev, &mdev->asender, drbd_asender); | ||
2639 | |||
2640 | mdev->agreed_pro_version = PRO_VERSION_MAX; | ||
2641 | mdev->write_ordering = WO_bio_barrier; | ||
2642 | mdev->resync_wenr = LC_FREE; | ||
2643 | } | ||
2644 | |||
2645 | void drbd_mdev_cleanup(struct drbd_conf *mdev) | ||
2646 | { | ||
2647 | if (mdev->receiver.t_state != None) | ||
2648 | dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n", | ||
2649 | mdev->receiver.t_state); | ||
2650 | |||
2651 | /* no need to lock it, I'm the only thread alive */ | ||
2652 | if (atomic_read(&mdev->current_epoch->epoch_size) != 0) | ||
2653 | dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size)); | ||
2654 | mdev->al_writ_cnt = | ||
2655 | mdev->bm_writ_cnt = | ||
2656 | mdev->read_cnt = | ||
2657 | mdev->recv_cnt = | ||
2658 | mdev->send_cnt = | ||
2659 | mdev->writ_cnt = | ||
2660 | mdev->p_size = | ||
2661 | mdev->rs_start = | ||
2662 | mdev->rs_total = | ||
2663 | mdev->rs_failed = | ||
2664 | mdev->rs_mark_left = | ||
2665 | mdev->rs_mark_time = 0; | ||
2666 | D_ASSERT(mdev->net_conf == NULL); | ||
2667 | |||
2668 | drbd_set_my_capacity(mdev, 0); | ||
2669 | if (mdev->bitmap) { | ||
2670 | /* maybe never allocated. */ | ||
2671 | drbd_bm_resize(mdev, 0); | ||
2672 | drbd_bm_cleanup(mdev); | ||
2673 | } | ||
2674 | |||
2675 | drbd_free_resources(mdev); | ||
2676 | |||
2677 | /* | ||
2678 | * currently we drbd_init_ee only on module load, so | ||
2679 | * we may do drbd_release_ee only on module unload! | ||
2680 | */ | ||
2681 | D_ASSERT(list_empty(&mdev->active_ee)); | ||
2682 | D_ASSERT(list_empty(&mdev->sync_ee)); | ||
2683 | D_ASSERT(list_empty(&mdev->done_ee)); | ||
2684 | D_ASSERT(list_empty(&mdev->read_ee)); | ||
2685 | D_ASSERT(list_empty(&mdev->net_ee)); | ||
2686 | D_ASSERT(list_empty(&mdev->resync_reads)); | ||
2687 | D_ASSERT(list_empty(&mdev->data.work.q)); | ||
2688 | D_ASSERT(list_empty(&mdev->meta.work.q)); | ||
2689 | D_ASSERT(list_empty(&mdev->resync_work.list)); | ||
2690 | D_ASSERT(list_empty(&mdev->unplug_work.list)); | ||
2691 | |||
2692 | } | ||
2693 | |||
2694 | |||
2695 | static void drbd_destroy_mempools(void) | ||
2696 | { | ||
2697 | struct page *page; | ||
2698 | |||
2699 | while (drbd_pp_pool) { | ||
2700 | page = drbd_pp_pool; | ||
2701 | drbd_pp_pool = (struct page *)page_private(page); | ||
2702 | __free_page(page); | ||
2703 | drbd_pp_vacant--; | ||
2704 | } | ||
2705 | |||
2706 | /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */ | ||
2707 | |||
2708 | if (drbd_ee_mempool) | ||
2709 | mempool_destroy(drbd_ee_mempool); | ||
2710 | if (drbd_request_mempool) | ||
2711 | mempool_destroy(drbd_request_mempool); | ||
2712 | if (drbd_ee_cache) | ||
2713 | kmem_cache_destroy(drbd_ee_cache); | ||
2714 | if (drbd_request_cache) | ||
2715 | kmem_cache_destroy(drbd_request_cache); | ||
2716 | if (drbd_bm_ext_cache) | ||
2717 | kmem_cache_destroy(drbd_bm_ext_cache); | ||
2718 | if (drbd_al_ext_cache) | ||
2719 | kmem_cache_destroy(drbd_al_ext_cache); | ||
2720 | |||
2721 | drbd_ee_mempool = NULL; | ||
2722 | drbd_request_mempool = NULL; | ||
2723 | drbd_ee_cache = NULL; | ||
2724 | drbd_request_cache = NULL; | ||
2725 | drbd_bm_ext_cache = NULL; | ||
2726 | drbd_al_ext_cache = NULL; | ||
2727 | |||
2728 | return; | ||
2729 | } | ||
2730 | |||
2731 | static int drbd_create_mempools(void) | ||
2732 | { | ||
2733 | struct page *page; | ||
2734 | const int number = (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE) * minor_count; | ||
2735 | int i; | ||
2736 | |||
2737 | /* prepare our caches and mempools */ | ||
2738 | drbd_request_mempool = NULL; | ||
2739 | drbd_ee_cache = NULL; | ||
2740 | drbd_request_cache = NULL; | ||
2741 | drbd_bm_ext_cache = NULL; | ||
2742 | drbd_al_ext_cache = NULL; | ||
2743 | drbd_pp_pool = NULL; | ||
2744 | |||
2745 | /* caches */ | ||
2746 | drbd_request_cache = kmem_cache_create( | ||
2747 | "drbd_req", sizeof(struct drbd_request), 0, 0, NULL); | ||
2748 | if (drbd_request_cache == NULL) | ||
2749 | goto Enomem; | ||
2750 | |||
2751 | drbd_ee_cache = kmem_cache_create( | ||
2752 | "drbd_ee", sizeof(struct drbd_epoch_entry), 0, 0, NULL); | ||
2753 | if (drbd_ee_cache == NULL) | ||
2754 | goto Enomem; | ||
2755 | |||
2756 | drbd_bm_ext_cache = kmem_cache_create( | ||
2757 | "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL); | ||
2758 | if (drbd_bm_ext_cache == NULL) | ||
2759 | goto Enomem; | ||
2760 | |||
2761 | drbd_al_ext_cache = kmem_cache_create( | ||
2762 | "drbd_al", sizeof(struct lc_element), 0, 0, NULL); | ||
2763 | if (drbd_al_ext_cache == NULL) | ||
2764 | goto Enomem; | ||
2765 | |||
2766 | /* mempools */ | ||
2767 | drbd_request_mempool = mempool_create(number, | ||
2768 | mempool_alloc_slab, mempool_free_slab, drbd_request_cache); | ||
2769 | if (drbd_request_mempool == NULL) | ||
2770 | goto Enomem; | ||
2771 | |||
2772 | drbd_ee_mempool = mempool_create(number, | ||
2773 | mempool_alloc_slab, mempool_free_slab, drbd_ee_cache); | ||
2774 | if (drbd_request_mempool == NULL) | ||
2775 | goto Enomem; | ||
2776 | |||
2777 | /* drbd's page pool */ | ||
2778 | spin_lock_init(&drbd_pp_lock); | ||
2779 | |||
2780 | for (i = 0; i < number; i++) { | ||
2781 | page = alloc_page(GFP_HIGHUSER); | ||
2782 | if (!page) | ||
2783 | goto Enomem; | ||
2784 | set_page_private(page, (unsigned long)drbd_pp_pool); | ||
2785 | drbd_pp_pool = page; | ||
2786 | } | ||
2787 | drbd_pp_vacant = number; | ||
2788 | |||
2789 | return 0; | ||
2790 | |||
2791 | Enomem: | ||
2792 | drbd_destroy_mempools(); /* in case we allocated some */ | ||
2793 | return -ENOMEM; | ||
2794 | } | ||
2795 | |||
2796 | static int drbd_notify_sys(struct notifier_block *this, unsigned long code, | ||
2797 | void *unused) | ||
2798 | { | ||
2799 | /* just so we have it. you never know what interesting things we | ||
2800 | * might want to do here some day... | ||
2801 | */ | ||
2802 | |||
2803 | return NOTIFY_DONE; | ||
2804 | } | ||
2805 | |||
2806 | static struct notifier_block drbd_notifier = { | ||
2807 | .notifier_call = drbd_notify_sys, | ||
2808 | }; | ||
2809 | |||
2810 | static void drbd_release_ee_lists(struct drbd_conf *mdev) | ||
2811 | { | ||
2812 | int rr; | ||
2813 | |||
2814 | rr = drbd_release_ee(mdev, &mdev->active_ee); | ||
2815 | if (rr) | ||
2816 | dev_err(DEV, "%d EEs in active list found!\n", rr); | ||
2817 | |||
2818 | rr = drbd_release_ee(mdev, &mdev->sync_ee); | ||
2819 | if (rr) | ||
2820 | dev_err(DEV, "%d EEs in sync list found!\n", rr); | ||
2821 | |||
2822 | rr = drbd_release_ee(mdev, &mdev->read_ee); | ||
2823 | if (rr) | ||
2824 | dev_err(DEV, "%d EEs in read list found!\n", rr); | ||
2825 | |||
2826 | rr = drbd_release_ee(mdev, &mdev->done_ee); | ||
2827 | if (rr) | ||
2828 | dev_err(DEV, "%d EEs in done list found!\n", rr); | ||
2829 | |||
2830 | rr = drbd_release_ee(mdev, &mdev->net_ee); | ||
2831 | if (rr) | ||
2832 | dev_err(DEV, "%d EEs in net list found!\n", rr); | ||
2833 | } | ||
2834 | |||
2835 | /* caution. no locking. | ||
2836 | * currently only used from module cleanup code. */ | ||
2837 | static void drbd_delete_device(unsigned int minor) | ||
2838 | { | ||
2839 | struct drbd_conf *mdev = minor_to_mdev(minor); | ||
2840 | |||
2841 | if (!mdev) | ||
2842 | return; | ||
2843 | |||
2844 | /* paranoia asserts */ | ||
2845 | if (mdev->open_cnt != 0) | ||
2846 | dev_err(DEV, "open_cnt = %d in %s:%u", mdev->open_cnt, | ||
2847 | __FILE__ , __LINE__); | ||
2848 | |||
2849 | ERR_IF (!list_empty(&mdev->data.work.q)) { | ||
2850 | struct list_head *lp; | ||
2851 | list_for_each(lp, &mdev->data.work.q) { | ||
2852 | dev_err(DEV, "lp = %p\n", lp); | ||
2853 | } | ||
2854 | }; | ||
2855 | /* end paranoia asserts */ | ||
2856 | |||
2857 | del_gendisk(mdev->vdisk); | ||
2858 | |||
2859 | /* cleanup stuff that may have been allocated during | ||
2860 | * device (re-)configuration or state changes */ | ||
2861 | |||
2862 | if (mdev->this_bdev) | ||
2863 | bdput(mdev->this_bdev); | ||
2864 | |||
2865 | drbd_free_resources(mdev); | ||
2866 | |||
2867 | drbd_release_ee_lists(mdev); | ||
2868 | |||
2869 | /* should be free'd on disconnect? */ | ||
2870 | kfree(mdev->ee_hash); | ||
2871 | /* | ||
2872 | mdev->ee_hash_s = 0; | ||
2873 | mdev->ee_hash = NULL; | ||
2874 | */ | ||
2875 | |||
2876 | lc_destroy(mdev->act_log); | ||
2877 | lc_destroy(mdev->resync); | ||
2878 | |||
2879 | kfree(mdev->p_uuid); | ||
2880 | /* mdev->p_uuid = NULL; */ | ||
2881 | |||
2882 | kfree(mdev->int_dig_out); | ||
2883 | kfree(mdev->int_dig_in); | ||
2884 | kfree(mdev->int_dig_vv); | ||
2885 | |||
2886 | /* cleanup the rest that has been | ||
2887 | * allocated from drbd_new_device | ||
2888 | * and actually free the mdev itself */ | ||
2889 | drbd_free_mdev(mdev); | ||
2890 | } | ||
2891 | |||
2892 | static void drbd_cleanup(void) | ||
2893 | { | ||
2894 | unsigned int i; | ||
2895 | |||
2896 | unregister_reboot_notifier(&drbd_notifier); | ||
2897 | |||
2898 | drbd_nl_cleanup(); | ||
2899 | |||
2900 | if (minor_table) { | ||
2901 | if (drbd_proc) | ||
2902 | remove_proc_entry("drbd", NULL); | ||
2903 | i = minor_count; | ||
2904 | while (i--) | ||
2905 | drbd_delete_device(i); | ||
2906 | drbd_destroy_mempools(); | ||
2907 | } | ||
2908 | |||
2909 | kfree(minor_table); | ||
2910 | |||
2911 | unregister_blkdev(DRBD_MAJOR, "drbd"); | ||
2912 | |||
2913 | printk(KERN_INFO "drbd: module cleanup done.\n"); | ||
2914 | } | ||
2915 | |||
2916 | /** | ||
2917 | * drbd_congested() - Callback for pdflush | ||
2918 | * @congested_data: User data | ||
2919 | * @bdi_bits: Bits pdflush is currently interested in | ||
2920 | * | ||
2921 | * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested. | ||
2922 | */ | ||
2923 | static int drbd_congested(void *congested_data, int bdi_bits) | ||
2924 | { | ||
2925 | struct drbd_conf *mdev = congested_data; | ||
2926 | struct request_queue *q; | ||
2927 | char reason = '-'; | ||
2928 | int r = 0; | ||
2929 | |||
2930 | if (!__inc_ap_bio_cond(mdev)) { | ||
2931 | /* DRBD has frozen IO */ | ||
2932 | r = bdi_bits; | ||
2933 | reason = 'd'; | ||
2934 | goto out; | ||
2935 | } | ||
2936 | |||
2937 | if (get_ldev(mdev)) { | ||
2938 | q = bdev_get_queue(mdev->ldev->backing_bdev); | ||
2939 | r = bdi_congested(&q->backing_dev_info, bdi_bits); | ||
2940 | put_ldev(mdev); | ||
2941 | if (r) | ||
2942 | reason = 'b'; | ||
2943 | } | ||
2944 | |||
2945 | if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->flags)) { | ||
2946 | r |= (1 << BDI_async_congested); | ||
2947 | reason = reason == 'b' ? 'a' : 'n'; | ||
2948 | } | ||
2949 | |||
2950 | out: | ||
2951 | mdev->congestion_reason = reason; | ||
2952 | return r; | ||
2953 | } | ||
2954 | |||
2955 | struct drbd_conf *drbd_new_device(unsigned int minor) | ||
2956 | { | ||
2957 | struct drbd_conf *mdev; | ||
2958 | struct gendisk *disk; | ||
2959 | struct request_queue *q; | ||
2960 | |||
2961 | /* GFP_KERNEL, we are outside of all write-out paths */ | ||
2962 | mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL); | ||
2963 | if (!mdev) | ||
2964 | return NULL; | ||
2965 | if (!zalloc_cpumask_var(&mdev->cpu_mask, GFP_KERNEL)) | ||
2966 | goto out_no_cpumask; | ||
2967 | |||
2968 | mdev->minor = minor; | ||
2969 | |||
2970 | drbd_init_set_defaults(mdev); | ||
2971 | |||
2972 | q = blk_alloc_queue(GFP_KERNEL); | ||
2973 | if (!q) | ||
2974 | goto out_no_q; | ||
2975 | mdev->rq_queue = q; | ||
2976 | q->queuedata = mdev; | ||
2977 | blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE); | ||
2978 | |||
2979 | disk = alloc_disk(1); | ||
2980 | if (!disk) | ||
2981 | goto out_no_disk; | ||
2982 | mdev->vdisk = disk; | ||
2983 | |||
2984 | set_disk_ro(disk, TRUE); | ||
2985 | |||
2986 | disk->queue = q; | ||
2987 | disk->major = DRBD_MAJOR; | ||
2988 | disk->first_minor = minor; | ||
2989 | disk->fops = &drbd_ops; | ||
2990 | sprintf(disk->disk_name, "drbd%d", minor); | ||
2991 | disk->private_data = mdev; | ||
2992 | |||
2993 | mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor)); | ||
2994 | /* we have no partitions. we contain only ourselves. */ | ||
2995 | mdev->this_bdev->bd_contains = mdev->this_bdev; | ||
2996 | |||
2997 | q->backing_dev_info.congested_fn = drbd_congested; | ||
2998 | q->backing_dev_info.congested_data = mdev; | ||
2999 | |||
3000 | blk_queue_make_request(q, drbd_make_request_26); | ||
3001 | blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); | ||
3002 | blk_queue_merge_bvec(q, drbd_merge_bvec); | ||
3003 | q->queue_lock = &mdev->req_lock; /* needed since we use */ | ||
3004 | /* plugging on a queue, that actually has no requests! */ | ||
3005 | q->unplug_fn = drbd_unplug_fn; | ||
3006 | |||
3007 | mdev->md_io_page = alloc_page(GFP_KERNEL); | ||
3008 | if (!mdev->md_io_page) | ||
3009 | goto out_no_io_page; | ||
3010 | |||
3011 | if (drbd_bm_init(mdev)) | ||
3012 | goto out_no_bitmap; | ||
3013 | /* no need to lock access, we are still initializing this minor device. */ | ||
3014 | if (!tl_init(mdev)) | ||
3015 | goto out_no_tl; | ||
3016 | |||
3017 | mdev->app_reads_hash = kzalloc(APP_R_HSIZE*sizeof(void *), GFP_KERNEL); | ||
3018 | if (!mdev->app_reads_hash) | ||
3019 | goto out_no_app_reads; | ||
3020 | |||
3021 | mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL); | ||
3022 | if (!mdev->current_epoch) | ||
3023 | goto out_no_epoch; | ||
3024 | |||
3025 | INIT_LIST_HEAD(&mdev->current_epoch->list); | ||
3026 | mdev->epochs = 1; | ||
3027 | |||
3028 | return mdev; | ||
3029 | |||
3030 | /* out_whatever_else: | ||
3031 | kfree(mdev->current_epoch); */ | ||
3032 | out_no_epoch: | ||
3033 | kfree(mdev->app_reads_hash); | ||
3034 | out_no_app_reads: | ||
3035 | tl_cleanup(mdev); | ||
3036 | out_no_tl: | ||
3037 | drbd_bm_cleanup(mdev); | ||
3038 | out_no_bitmap: | ||
3039 | __free_page(mdev->md_io_page); | ||
3040 | out_no_io_page: | ||
3041 | put_disk(disk); | ||
3042 | out_no_disk: | ||
3043 | blk_cleanup_queue(q); | ||
3044 | out_no_q: | ||
3045 | free_cpumask_var(mdev->cpu_mask); | ||
3046 | out_no_cpumask: | ||
3047 | kfree(mdev); | ||
3048 | return NULL; | ||
3049 | } | ||
3050 | |||
3051 | /* counterpart of drbd_new_device. | ||
3052 | * last part of drbd_delete_device. */ | ||
3053 | void drbd_free_mdev(struct drbd_conf *mdev) | ||
3054 | { | ||
3055 | kfree(mdev->current_epoch); | ||
3056 | kfree(mdev->app_reads_hash); | ||
3057 | tl_cleanup(mdev); | ||
3058 | if (mdev->bitmap) /* should no longer be there. */ | ||
3059 | drbd_bm_cleanup(mdev); | ||
3060 | __free_page(mdev->md_io_page); | ||
3061 | put_disk(mdev->vdisk); | ||
3062 | blk_cleanup_queue(mdev->rq_queue); | ||
3063 | free_cpumask_var(mdev->cpu_mask); | ||
3064 | kfree(mdev); | ||
3065 | } | ||
3066 | |||
3067 | |||
3068 | int __init drbd_init(void) | ||
3069 | { | ||
3070 | int err; | ||
3071 | |||
3072 | if (sizeof(struct p_handshake) != 80) { | ||
3073 | printk(KERN_ERR | ||
3074 | "drbd: never change the size or layout " | ||
3075 | "of the HandShake packet.\n"); | ||
3076 | return -EINVAL; | ||
3077 | } | ||
3078 | |||
3079 | if (1 > minor_count || minor_count > 255) { | ||
3080 | printk(KERN_ERR | ||
3081 | "drbd: invalid minor_count (%d)\n", minor_count); | ||
3082 | #ifdef MODULE | ||
3083 | return -EINVAL; | ||
3084 | #else | ||
3085 | minor_count = 8; | ||
3086 | #endif | ||
3087 | } | ||
3088 | |||
3089 | err = drbd_nl_init(); | ||
3090 | if (err) | ||
3091 | return err; | ||
3092 | |||
3093 | err = register_blkdev(DRBD_MAJOR, "drbd"); | ||
3094 | if (err) { | ||
3095 | printk(KERN_ERR | ||
3096 | "drbd: unable to register block device major %d\n", | ||
3097 | DRBD_MAJOR); | ||
3098 | return err; | ||
3099 | } | ||
3100 | |||
3101 | register_reboot_notifier(&drbd_notifier); | ||
3102 | |||
3103 | /* | ||
3104 | * allocate all necessary structs | ||
3105 | */ | ||
3106 | err = -ENOMEM; | ||
3107 | |||
3108 | init_waitqueue_head(&drbd_pp_wait); | ||
3109 | |||
3110 | drbd_proc = NULL; /* play safe for drbd_cleanup */ | ||
3111 | minor_table = kzalloc(sizeof(struct drbd_conf *)*minor_count, | ||
3112 | GFP_KERNEL); | ||
3113 | if (!minor_table) | ||
3114 | goto Enomem; | ||
3115 | |||
3116 | err = drbd_create_mempools(); | ||
3117 | if (err) | ||
3118 | goto Enomem; | ||
3119 | |||
3120 | drbd_proc = proc_create("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops); | ||
3121 | if (!drbd_proc) { | ||
3122 | printk(KERN_ERR "drbd: unable to register proc file\n"); | ||
3123 | goto Enomem; | ||
3124 | } | ||
3125 | |||
3126 | rwlock_init(&global_state_lock); | ||
3127 | |||
3128 | printk(KERN_INFO "drbd: initialized. " | ||
3129 | "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n", | ||
3130 | API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX); | ||
3131 | printk(KERN_INFO "drbd: %s\n", drbd_buildtag()); | ||
3132 | printk(KERN_INFO "drbd: registered as block device major %d\n", | ||
3133 | DRBD_MAJOR); | ||
3134 | printk(KERN_INFO "drbd: minor_table @ 0x%p\n", minor_table); | ||
3135 | |||
3136 | return 0; /* Success! */ | ||
3137 | |||
3138 | Enomem: | ||
3139 | drbd_cleanup(); | ||
3140 | if (err == -ENOMEM) | ||
3141 | /* currently always the case */ | ||
3142 | printk(KERN_ERR "drbd: ran out of memory\n"); | ||
3143 | else | ||
3144 | printk(KERN_ERR "drbd: initialization failure\n"); | ||
3145 | return err; | ||
3146 | } | ||
3147 | |||
3148 | void drbd_free_bc(struct drbd_backing_dev *ldev) | ||
3149 | { | ||
3150 | if (ldev == NULL) | ||
3151 | return; | ||
3152 | |||
3153 | bd_release(ldev->backing_bdev); | ||
3154 | bd_release(ldev->md_bdev); | ||
3155 | |||
3156 | fput(ldev->lo_file); | ||
3157 | fput(ldev->md_file); | ||
3158 | |||
3159 | kfree(ldev); | ||
3160 | } | ||
3161 | |||
3162 | void drbd_free_sock(struct drbd_conf *mdev) | ||
3163 | { | ||
3164 | if (mdev->data.socket) { | ||
3165 | kernel_sock_shutdown(mdev->data.socket, SHUT_RDWR); | ||
3166 | sock_release(mdev->data.socket); | ||
3167 | mdev->data.socket = NULL; | ||
3168 | } | ||
3169 | if (mdev->meta.socket) { | ||
3170 | kernel_sock_shutdown(mdev->meta.socket, SHUT_RDWR); | ||
3171 | sock_release(mdev->meta.socket); | ||
3172 | mdev->meta.socket = NULL; | ||
3173 | } | ||
3174 | } | ||
3175 | |||
3176 | |||
3177 | void drbd_free_resources(struct drbd_conf *mdev) | ||
3178 | { | ||
3179 | crypto_free_hash(mdev->csums_tfm); | ||
3180 | mdev->csums_tfm = NULL; | ||
3181 | crypto_free_hash(mdev->verify_tfm); | ||
3182 | mdev->verify_tfm = NULL; | ||
3183 | crypto_free_hash(mdev->cram_hmac_tfm); | ||
3184 | mdev->cram_hmac_tfm = NULL; | ||
3185 | crypto_free_hash(mdev->integrity_w_tfm); | ||
3186 | mdev->integrity_w_tfm = NULL; | ||
3187 | crypto_free_hash(mdev->integrity_r_tfm); | ||
3188 | mdev->integrity_r_tfm = NULL; | ||
3189 | |||
3190 | drbd_free_sock(mdev); | ||
3191 | |||
3192 | __no_warn(local, | ||
3193 | drbd_free_bc(mdev->ldev); | ||
3194 | mdev->ldev = NULL;); | ||
3195 | } | ||
3196 | |||
3197 | /* meta data management */ | ||
3198 | |||
3199 | struct meta_data_on_disk { | ||
3200 | u64 la_size; /* last agreed size. */ | ||
3201 | u64 uuid[UI_SIZE]; /* UUIDs. */ | ||
3202 | u64 device_uuid; | ||
3203 | u64 reserved_u64_1; | ||
3204 | u32 flags; /* MDF */ | ||
3205 | u32 magic; | ||
3206 | u32 md_size_sect; | ||
3207 | u32 al_offset; /* offset to this block */ | ||
3208 | u32 al_nr_extents; /* important for restoring the AL */ | ||
3209 | /* `-- act_log->nr_elements <-- sync_conf.al_extents */ | ||
3210 | u32 bm_offset; /* offset to the bitmap, from here */ | ||
3211 | u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */ | ||
3212 | u32 reserved_u32[4]; | ||
3213 | |||
3214 | } __packed; | ||
3215 | |||
3216 | /** | ||
3217 | * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set | ||
3218 | * @mdev: DRBD device. | ||
3219 | */ | ||
3220 | void drbd_md_sync(struct drbd_conf *mdev) | ||
3221 | { | ||
3222 | struct meta_data_on_disk *buffer; | ||
3223 | sector_t sector; | ||
3224 | int i; | ||
3225 | |||
3226 | if (!test_and_clear_bit(MD_DIRTY, &mdev->flags)) | ||
3227 | return; | ||
3228 | del_timer(&mdev->md_sync_timer); | ||
3229 | |||
3230 | /* We use here D_FAILED and not D_ATTACHING because we try to write | ||
3231 | * metadata even if we detach due to a disk failure! */ | ||
3232 | if (!get_ldev_if_state(mdev, D_FAILED)) | ||
3233 | return; | ||
3234 | |||
3235 | mutex_lock(&mdev->md_io_mutex); | ||
3236 | buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page); | ||
3237 | memset(buffer, 0, 512); | ||
3238 | |||
3239 | buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev)); | ||
3240 | for (i = UI_CURRENT; i < UI_SIZE; i++) | ||
3241 | buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]); | ||
3242 | buffer->flags = cpu_to_be32(mdev->ldev->md.flags); | ||
3243 | buffer->magic = cpu_to_be32(DRBD_MD_MAGIC); | ||
3244 | |||
3245 | buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect); | ||
3246 | buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset); | ||
3247 | buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements); | ||
3248 | buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE); | ||
3249 | buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid); | ||
3250 | |||
3251 | buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset); | ||
3252 | |||
3253 | D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset); | ||
3254 | sector = mdev->ldev->md.md_offset; | ||
3255 | |||
3256 | if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) { | ||
3257 | clear_bit(MD_DIRTY, &mdev->flags); | ||
3258 | } else { | ||
3259 | /* this was a try anyways ... */ | ||
3260 | dev_err(DEV, "meta data update failed!\n"); | ||
3261 | |||
3262 | drbd_chk_io_error(mdev, 1, TRUE); | ||
3263 | } | ||
3264 | |||
3265 | /* Update mdev->ldev->md.la_size_sect, | ||
3266 | * since we updated it on metadata. */ | ||
3267 | mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev); | ||
3268 | |||
3269 | mutex_unlock(&mdev->md_io_mutex); | ||
3270 | put_ldev(mdev); | ||
3271 | } | ||
3272 | |||
3273 | /** | ||
3274 | * drbd_md_read() - Reads in the meta data super block | ||
3275 | * @mdev: DRBD device. | ||
3276 | * @bdev: Device from which the meta data should be read in. | ||
3277 | * | ||
3278 | * Return 0 (NO_ERROR) on success, and an enum drbd_ret_codes in case | ||
3279 | * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID. | ||
3280 | */ | ||
3281 | int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) | ||
3282 | { | ||
3283 | struct meta_data_on_disk *buffer; | ||
3284 | int i, rv = NO_ERROR; | ||
3285 | |||
3286 | if (!get_ldev_if_state(mdev, D_ATTACHING)) | ||
3287 | return ERR_IO_MD_DISK; | ||
3288 | |||
3289 | mutex_lock(&mdev->md_io_mutex); | ||
3290 | buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page); | ||
3291 | |||
3292 | if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) { | ||
3293 | /* NOTE: cant do normal error processing here as this is | ||
3294 | called BEFORE disk is attached */ | ||
3295 | dev_err(DEV, "Error while reading metadata.\n"); | ||
3296 | rv = ERR_IO_MD_DISK; | ||
3297 | goto err; | ||
3298 | } | ||
3299 | |||
3300 | if (be32_to_cpu(buffer->magic) != DRBD_MD_MAGIC) { | ||
3301 | dev_err(DEV, "Error while reading metadata, magic not found.\n"); | ||
3302 | rv = ERR_MD_INVALID; | ||
3303 | goto err; | ||
3304 | } | ||
3305 | if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) { | ||
3306 | dev_err(DEV, "unexpected al_offset: %d (expected %d)\n", | ||
3307 | be32_to_cpu(buffer->al_offset), bdev->md.al_offset); | ||
3308 | rv = ERR_MD_INVALID; | ||
3309 | goto err; | ||
3310 | } | ||
3311 | if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) { | ||
3312 | dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n", | ||
3313 | be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset); | ||
3314 | rv = ERR_MD_INVALID; | ||
3315 | goto err; | ||
3316 | } | ||
3317 | if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) { | ||
3318 | dev_err(DEV, "unexpected md_size: %u (expected %u)\n", | ||
3319 | be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect); | ||
3320 | rv = ERR_MD_INVALID; | ||
3321 | goto err; | ||
3322 | } | ||
3323 | |||
3324 | if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) { | ||
3325 | dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n", | ||
3326 | be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE); | ||
3327 | rv = ERR_MD_INVALID; | ||
3328 | goto err; | ||
3329 | } | ||
3330 | |||
3331 | bdev->md.la_size_sect = be64_to_cpu(buffer->la_size); | ||
3332 | for (i = UI_CURRENT; i < UI_SIZE; i++) | ||
3333 | bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]); | ||
3334 | bdev->md.flags = be32_to_cpu(buffer->flags); | ||
3335 | mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents); | ||
3336 | bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid); | ||
3337 | |||
3338 | if (mdev->sync_conf.al_extents < 7) | ||
3339 | mdev->sync_conf.al_extents = 127; | ||
3340 | |||
3341 | err: | ||
3342 | mutex_unlock(&mdev->md_io_mutex); | ||
3343 | put_ldev(mdev); | ||
3344 | |||
3345 | return rv; | ||
3346 | } | ||
3347 | |||
3348 | /** | ||
3349 | * drbd_md_mark_dirty() - Mark meta data super block as dirty | ||
3350 | * @mdev: DRBD device. | ||
3351 | * | ||
3352 | * Call this function if you change anything that should be written to | ||
3353 | * the meta-data super block. This function sets MD_DIRTY, and starts a | ||
3354 | * timer that ensures that within five seconds you have to call drbd_md_sync(). | ||
3355 | */ | ||
3356 | void drbd_md_mark_dirty(struct drbd_conf *mdev) | ||
3357 | { | ||
3358 | set_bit(MD_DIRTY, &mdev->flags); | ||
3359 | mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ); | ||
3360 | } | ||
3361 | |||
3362 | |||
3363 | static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local) | ||
3364 | { | ||
3365 | int i; | ||
3366 | |||
3367 | for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++) | ||
3368 | mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i]; | ||
3369 | } | ||
3370 | |||
3371 | void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) | ||
3372 | { | ||
3373 | if (idx == UI_CURRENT) { | ||
3374 | if (mdev->state.role == R_PRIMARY) | ||
3375 | val |= 1; | ||
3376 | else | ||
3377 | val &= ~((u64)1); | ||
3378 | |||
3379 | drbd_set_ed_uuid(mdev, val); | ||
3380 | } | ||
3381 | |||
3382 | mdev->ldev->md.uuid[idx] = val; | ||
3383 | drbd_md_mark_dirty(mdev); | ||
3384 | } | ||
3385 | |||
3386 | |||
3387 | void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) | ||
3388 | { | ||
3389 | if (mdev->ldev->md.uuid[idx]) { | ||
3390 | drbd_uuid_move_history(mdev); | ||
3391 | mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx]; | ||
3392 | } | ||
3393 | _drbd_uuid_set(mdev, idx, val); | ||
3394 | } | ||
3395 | |||
3396 | /** | ||
3397 | * drbd_uuid_new_current() - Creates a new current UUID | ||
3398 | * @mdev: DRBD device. | ||
3399 | * | ||
3400 | * Creates a new current UUID, and rotates the old current UUID into | ||
3401 | * the bitmap slot. Causes an incremental resync upon next connect. | ||
3402 | */ | ||
3403 | void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local) | ||
3404 | { | ||
3405 | u64 val; | ||
3406 | |||
3407 | dev_info(DEV, "Creating new current UUID\n"); | ||
3408 | D_ASSERT(mdev->ldev->md.uuid[UI_BITMAP] == 0); | ||
3409 | mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT]; | ||
3410 | |||
3411 | get_random_bytes(&val, sizeof(u64)); | ||
3412 | _drbd_uuid_set(mdev, UI_CURRENT, val); | ||
3413 | } | ||
3414 | |||
3415 | void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local) | ||
3416 | { | ||
3417 | if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0) | ||
3418 | return; | ||
3419 | |||
3420 | if (val == 0) { | ||
3421 | drbd_uuid_move_history(mdev); | ||
3422 | mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP]; | ||
3423 | mdev->ldev->md.uuid[UI_BITMAP] = 0; | ||
3424 | } else { | ||
3425 | if (mdev->ldev->md.uuid[UI_BITMAP]) | ||
3426 | dev_warn(DEV, "bm UUID already set"); | ||
3427 | |||
3428 | mdev->ldev->md.uuid[UI_BITMAP] = val; | ||
3429 | mdev->ldev->md.uuid[UI_BITMAP] &= ~((u64)1); | ||
3430 | |||
3431 | } | ||
3432 | drbd_md_mark_dirty(mdev); | ||
3433 | } | ||
3434 | |||
3435 | /** | ||
3436 | * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io() | ||
3437 | * @mdev: DRBD device. | ||
3438 | * | ||
3439 | * Sets all bits in the bitmap and writes the whole bitmap to stable storage. | ||
3440 | */ | ||
3441 | int drbd_bmio_set_n_write(struct drbd_conf *mdev) | ||
3442 | { | ||
3443 | int rv = -EIO; | ||
3444 | |||
3445 | if (get_ldev_if_state(mdev, D_ATTACHING)) { | ||
3446 | drbd_md_set_flag(mdev, MDF_FULL_SYNC); | ||
3447 | drbd_md_sync(mdev); | ||
3448 | drbd_bm_set_all(mdev); | ||
3449 | |||
3450 | rv = drbd_bm_write(mdev); | ||
3451 | |||
3452 | if (!rv) { | ||
3453 | drbd_md_clear_flag(mdev, MDF_FULL_SYNC); | ||
3454 | drbd_md_sync(mdev); | ||
3455 | } | ||
3456 | |||
3457 | put_ldev(mdev); | ||
3458 | } | ||
3459 | |||
3460 | return rv; | ||
3461 | } | ||
3462 | |||
3463 | /** | ||
3464 | * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io() | ||
3465 | * @mdev: DRBD device. | ||
3466 | * | ||
3467 | * Clears all bits in the bitmap and writes the whole bitmap to stable storage. | ||
3468 | */ | ||
3469 | int drbd_bmio_clear_n_write(struct drbd_conf *mdev) | ||
3470 | { | ||
3471 | int rv = -EIO; | ||
3472 | |||
3473 | if (get_ldev_if_state(mdev, D_ATTACHING)) { | ||
3474 | drbd_bm_clear_all(mdev); | ||
3475 | rv = drbd_bm_write(mdev); | ||
3476 | put_ldev(mdev); | ||
3477 | } | ||
3478 | |||
3479 | return rv; | ||
3480 | } | ||
3481 | |||
3482 | static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused) | ||
3483 | { | ||
3484 | struct bm_io_work *work = container_of(w, struct bm_io_work, w); | ||
3485 | int rv; | ||
3486 | |||
3487 | D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0); | ||
3488 | |||
3489 | drbd_bm_lock(mdev, work->why); | ||
3490 | rv = work->io_fn(mdev); | ||
3491 | drbd_bm_unlock(mdev); | ||
3492 | |||
3493 | clear_bit(BITMAP_IO, &mdev->flags); | ||
3494 | wake_up(&mdev->misc_wait); | ||
3495 | |||
3496 | if (work->done) | ||
3497 | work->done(mdev, rv); | ||
3498 | |||
3499 | clear_bit(BITMAP_IO_QUEUED, &mdev->flags); | ||
3500 | work->why = NULL; | ||
3501 | |||
3502 | return 1; | ||
3503 | } | ||
3504 | |||
3505 | /** | ||
3506 | * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap | ||
3507 | * @mdev: DRBD device. | ||
3508 | * @io_fn: IO callback to be called when bitmap IO is possible | ||
3509 | * @done: callback to be called after the bitmap IO was performed | ||
3510 | * @why: Descriptive text of the reason for doing the IO | ||
3511 | * | ||
3512 | * While IO on the bitmap happens we freeze application IO thus we ensure | ||
3513 | * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be | ||
3514 | * called from worker context. It MUST NOT be used while a previous such | ||
3515 | * work is still pending! | ||
3516 | */ | ||
3517 | void drbd_queue_bitmap_io(struct drbd_conf *mdev, | ||
3518 | int (*io_fn)(struct drbd_conf *), | ||
3519 | void (*done)(struct drbd_conf *, int), | ||
3520 | char *why) | ||
3521 | { | ||
3522 | D_ASSERT(current == mdev->worker.task); | ||
3523 | |||
3524 | D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags)); | ||
3525 | D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags)); | ||
3526 | D_ASSERT(list_empty(&mdev->bm_io_work.w.list)); | ||
3527 | if (mdev->bm_io_work.why) | ||
3528 | dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n", | ||
3529 | why, mdev->bm_io_work.why); | ||
3530 | |||
3531 | mdev->bm_io_work.io_fn = io_fn; | ||
3532 | mdev->bm_io_work.done = done; | ||
3533 | mdev->bm_io_work.why = why; | ||
3534 | |||
3535 | set_bit(BITMAP_IO, &mdev->flags); | ||
3536 | if (atomic_read(&mdev->ap_bio_cnt) == 0) { | ||
3537 | if (list_empty(&mdev->bm_io_work.w.list)) { | ||
3538 | set_bit(BITMAP_IO_QUEUED, &mdev->flags); | ||
3539 | drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w); | ||
3540 | } else | ||
3541 | dev_err(DEV, "FIXME avoided double queuing bm_io_work\n"); | ||
3542 | } | ||
3543 | } | ||
3544 | |||
3545 | /** | ||
3546 | * drbd_bitmap_io() - Does an IO operation on the whole bitmap | ||
3547 | * @mdev: DRBD device. | ||
3548 | * @io_fn: IO callback to be called when bitmap IO is possible | ||
3549 | * @why: Descriptive text of the reason for doing the IO | ||
3550 | * | ||
3551 | * freezes application IO while that the actual IO operations runs. This | ||
3552 | * functions MAY NOT be called from worker context. | ||
3553 | */ | ||
3554 | int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why) | ||
3555 | { | ||
3556 | int rv; | ||
3557 | |||
3558 | D_ASSERT(current != mdev->worker.task); | ||
3559 | |||
3560 | drbd_suspend_io(mdev); | ||
3561 | |||
3562 | drbd_bm_lock(mdev, why); | ||
3563 | rv = io_fn(mdev); | ||
3564 | drbd_bm_unlock(mdev); | ||
3565 | |||
3566 | drbd_resume_io(mdev); | ||
3567 | |||
3568 | return rv; | ||
3569 | } | ||
3570 | |||
3571 | void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local) | ||
3572 | { | ||
3573 | if ((mdev->ldev->md.flags & flag) != flag) { | ||
3574 | drbd_md_mark_dirty(mdev); | ||
3575 | mdev->ldev->md.flags |= flag; | ||
3576 | } | ||
3577 | } | ||
3578 | |||
3579 | void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local) | ||
3580 | { | ||
3581 | if ((mdev->ldev->md.flags & flag) != 0) { | ||
3582 | drbd_md_mark_dirty(mdev); | ||
3583 | mdev->ldev->md.flags &= ~flag; | ||
3584 | } | ||
3585 | } | ||
3586 | int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag) | ||
3587 | { | ||
3588 | return (bdev->md.flags & flag) != 0; | ||
3589 | } | ||
3590 | |||
3591 | static void md_sync_timer_fn(unsigned long data) | ||
3592 | { | ||
3593 | struct drbd_conf *mdev = (struct drbd_conf *) data; | ||
3594 | |||
3595 | drbd_queue_work_front(&mdev->data.work, &mdev->md_sync_work); | ||
3596 | } | ||
3597 | |||
3598 | static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused) | ||
3599 | { | ||
3600 | dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n"); | ||
3601 | drbd_md_sync(mdev); | ||
3602 | |||
3603 | return 1; | ||
3604 | } | ||
3605 | |||
3606 | #ifdef CONFIG_DRBD_FAULT_INJECTION | ||
3607 | /* Fault insertion support including random number generator shamelessly | ||
3608 | * stolen from kernel/rcutorture.c */ | ||
3609 | struct fault_random_state { | ||
3610 | unsigned long state; | ||
3611 | unsigned long count; | ||
3612 | }; | ||
3613 | |||
3614 | #define FAULT_RANDOM_MULT 39916801 /* prime */ | ||
3615 | #define FAULT_RANDOM_ADD 479001701 /* prime */ | ||
3616 | #define FAULT_RANDOM_REFRESH 10000 | ||
3617 | |||
3618 | /* | ||
3619 | * Crude but fast random-number generator. Uses a linear congruential | ||
3620 | * generator, with occasional help from get_random_bytes(). | ||
3621 | */ | ||
3622 | static unsigned long | ||
3623 | _drbd_fault_random(struct fault_random_state *rsp) | ||
3624 | { | ||
3625 | long refresh; | ||
3626 | |||
3627 | if (--rsp->count < 0) { | ||
3628 | get_random_bytes(&refresh, sizeof(refresh)); | ||
3629 | rsp->state += refresh; | ||
3630 | rsp->count = FAULT_RANDOM_REFRESH; | ||
3631 | } | ||
3632 | rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD; | ||
3633 | return swahw32(rsp->state); | ||
3634 | } | ||
3635 | |||
3636 | static char * | ||
3637 | _drbd_fault_str(unsigned int type) { | ||
3638 | static char *_faults[] = { | ||
3639 | [DRBD_FAULT_MD_WR] = "Meta-data write", | ||
3640 | [DRBD_FAULT_MD_RD] = "Meta-data read", | ||
3641 | [DRBD_FAULT_RS_WR] = "Resync write", | ||
3642 | [DRBD_FAULT_RS_RD] = "Resync read", | ||
3643 | [DRBD_FAULT_DT_WR] = "Data write", | ||
3644 | [DRBD_FAULT_DT_RD] = "Data read", | ||
3645 | [DRBD_FAULT_DT_RA] = "Data read ahead", | ||
3646 | [DRBD_FAULT_BM_ALLOC] = "BM allocation", | ||
3647 | [DRBD_FAULT_AL_EE] = "EE allocation" | ||
3648 | }; | ||
3649 | |||
3650 | return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**"; | ||
3651 | } | ||
3652 | |||
3653 | unsigned int | ||
3654 | _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) | ||
3655 | { | ||
3656 | static struct fault_random_state rrs = {0, 0}; | ||
3657 | |||
3658 | unsigned int ret = ( | ||
3659 | (fault_devs == 0 || | ||
3660 | ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) && | ||
3661 | (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate)); | ||
3662 | |||
3663 | if (ret) { | ||
3664 | fault_count++; | ||
3665 | |||
3666 | if (printk_ratelimit()) | ||
3667 | dev_warn(DEV, "***Simulating %s failure\n", | ||
3668 | _drbd_fault_str(type)); | ||
3669 | } | ||
3670 | |||
3671 | return ret; | ||
3672 | } | ||
3673 | #endif | ||
3674 | |||
3675 | const char *drbd_buildtag(void) | ||
3676 | { | ||
3677 | /* DRBD built from external sources has here a reference to the | ||
3678 | git hash of the source code. */ | ||
3679 | |||
3680 | static char buildtag[38] = "\0uilt-in"; | ||
3681 | |||
3682 | if (buildtag[0] == 0) { | ||
3683 | #ifdef CONFIG_MODULES | ||
3684 | if (THIS_MODULE != NULL) | ||
3685 | sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion); | ||
3686 | else | ||
3687 | #endif | ||
3688 | buildtag[0] = 'b'; | ||
3689 | } | ||
3690 | |||
3691 | return buildtag; | ||
3692 | } | ||
3693 | |||
3694 | module_init(drbd_init) | ||
3695 | module_exit(drbd_cleanup) | ||
3696 | |||
3697 | EXPORT_SYMBOL(drbd_conn_str); | ||
3698 | EXPORT_SYMBOL(drbd_role_str); | ||
3699 | EXPORT_SYMBOL(drbd_disk_str); | ||
3700 | EXPORT_SYMBOL(drbd_set_st_err_str); | ||