diff options
Diffstat (limited to 'drivers/block/drbd/drbd_nl.c')
-rw-r--r-- | drivers/block/drbd/drbd_nl.c | 2364 |
1 files changed, 2364 insertions, 0 deletions
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c new file mode 100644 index 000000000000..436a090b532b --- /dev/null +++ b/drivers/block/drbd/drbd_nl.c | |||
@@ -0,0 +1,2364 @@ | |||
1 | /* | ||
2 | drbd_nl.c | ||
3 | |||
4 | This file is part of DRBD by Philipp Reisner and Lars Ellenberg. | ||
5 | |||
6 | Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. | ||
7 | Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. | ||
8 | Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. | ||
9 | |||
10 | drbd is free software; you can redistribute it and/or modify | ||
11 | it under the terms of the GNU General Public License as published by | ||
12 | the Free Software Foundation; either version 2, or (at your option) | ||
13 | any later version. | ||
14 | |||
15 | drbd is distributed in the hope that it will be useful, | ||
16 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | GNU General Public License for more details. | ||
19 | |||
20 | You should have received a copy of the GNU General Public License | ||
21 | along with drbd; see the file COPYING. If not, write to | ||
22 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | ||
23 | |||
24 | */ | ||
25 | |||
26 | #include <linux/module.h> | ||
27 | #include <linux/drbd.h> | ||
28 | #include <linux/in.h> | ||
29 | #include <linux/fs.h> | ||
30 | #include <linux/file.h> | ||
31 | #include <linux/slab.h> | ||
32 | #include <linux/connector.h> | ||
33 | #include <linux/blkpg.h> | ||
34 | #include <linux/cpumask.h> | ||
35 | #include "drbd_int.h" | ||
36 | #include "drbd_wrappers.h" | ||
37 | #include <asm/unaligned.h> | ||
38 | #include <linux/drbd_tag_magic.h> | ||
39 | #include <linux/drbd_limits.h> | ||
40 | |||
41 | static unsigned short *tl_add_blob(unsigned short *, enum drbd_tags, const void *, int); | ||
42 | static unsigned short *tl_add_str(unsigned short *, enum drbd_tags, const char *); | ||
43 | static unsigned short *tl_add_int(unsigned short *, enum drbd_tags, const void *); | ||
44 | |||
45 | /* see get_sb_bdev and bd_claim */ | ||
46 | static char *drbd_m_holder = "Hands off! this is DRBD's meta data device."; | ||
47 | |||
48 | /* Generate the tag_list to struct functions */ | ||
49 | #define NL_PACKET(name, number, fields) \ | ||
50 | static int name ## _from_tags(struct drbd_conf *mdev, \ | ||
51 | unsigned short *tags, struct name *arg) __attribute__ ((unused)); \ | ||
52 | static int name ## _from_tags(struct drbd_conf *mdev, \ | ||
53 | unsigned short *tags, struct name *arg) \ | ||
54 | { \ | ||
55 | int tag; \ | ||
56 | int dlen; \ | ||
57 | \ | ||
58 | while ((tag = get_unaligned(tags++)) != TT_END) { \ | ||
59 | dlen = get_unaligned(tags++); \ | ||
60 | switch (tag_number(tag)) { \ | ||
61 | fields \ | ||
62 | default: \ | ||
63 | if (tag & T_MANDATORY) { \ | ||
64 | dev_err(DEV, "Unknown tag: %d\n", tag_number(tag)); \ | ||
65 | return 0; \ | ||
66 | } \ | ||
67 | } \ | ||
68 | tags = (unsigned short *)((char *)tags + dlen); \ | ||
69 | } \ | ||
70 | return 1; \ | ||
71 | } | ||
72 | #define NL_INTEGER(pn, pr, member) \ | ||
73 | case pn: /* D_ASSERT( tag_type(tag) == TT_INTEGER ); */ \ | ||
74 | arg->member = get_unaligned((int *)(tags)); \ | ||
75 | break; | ||
76 | #define NL_INT64(pn, pr, member) \ | ||
77 | case pn: /* D_ASSERT( tag_type(tag) == TT_INT64 ); */ \ | ||
78 | arg->member = get_unaligned((u64 *)(tags)); \ | ||
79 | break; | ||
80 | #define NL_BIT(pn, pr, member) \ | ||
81 | case pn: /* D_ASSERT( tag_type(tag) == TT_BIT ); */ \ | ||
82 | arg->member = *(char *)(tags) ? 1 : 0; \ | ||
83 | break; | ||
84 | #define NL_STRING(pn, pr, member, len) \ | ||
85 | case pn: /* D_ASSERT( tag_type(tag) == TT_STRING ); */ \ | ||
86 | if (dlen > len) { \ | ||
87 | dev_err(DEV, "arg too long: %s (%u wanted, max len: %u bytes)\n", \ | ||
88 | #member, dlen, (unsigned int)len); \ | ||
89 | return 0; \ | ||
90 | } \ | ||
91 | arg->member ## _len = dlen; \ | ||
92 | memcpy(arg->member, tags, min_t(size_t, dlen, len)); \ | ||
93 | break; | ||
94 | #include "linux/drbd_nl.h" | ||
95 | |||
96 | /* Generate the struct to tag_list functions */ | ||
97 | #define NL_PACKET(name, number, fields) \ | ||
98 | static unsigned short* \ | ||
99 | name ## _to_tags(struct drbd_conf *mdev, \ | ||
100 | struct name *arg, unsigned short *tags) __attribute__ ((unused)); \ | ||
101 | static unsigned short* \ | ||
102 | name ## _to_tags(struct drbd_conf *mdev, \ | ||
103 | struct name *arg, unsigned short *tags) \ | ||
104 | { \ | ||
105 | fields \ | ||
106 | return tags; \ | ||
107 | } | ||
108 | |||
109 | #define NL_INTEGER(pn, pr, member) \ | ||
110 | put_unaligned(pn | pr | TT_INTEGER, tags++); \ | ||
111 | put_unaligned(sizeof(int), tags++); \ | ||
112 | put_unaligned(arg->member, (int *)tags); \ | ||
113 | tags = (unsigned short *)((char *)tags+sizeof(int)); | ||
114 | #define NL_INT64(pn, pr, member) \ | ||
115 | put_unaligned(pn | pr | TT_INT64, tags++); \ | ||
116 | put_unaligned(sizeof(u64), tags++); \ | ||
117 | put_unaligned(arg->member, (u64 *)tags); \ | ||
118 | tags = (unsigned short *)((char *)tags+sizeof(u64)); | ||
119 | #define NL_BIT(pn, pr, member) \ | ||
120 | put_unaligned(pn | pr | TT_BIT, tags++); \ | ||
121 | put_unaligned(sizeof(char), tags++); \ | ||
122 | *(char *)tags = arg->member; \ | ||
123 | tags = (unsigned short *)((char *)tags+sizeof(char)); | ||
124 | #define NL_STRING(pn, pr, member, len) \ | ||
125 | put_unaligned(pn | pr | TT_STRING, tags++); \ | ||
126 | put_unaligned(arg->member ## _len, tags++); \ | ||
127 | memcpy(tags, arg->member, arg->member ## _len); \ | ||
128 | tags = (unsigned short *)((char *)tags + arg->member ## _len); | ||
129 | #include "linux/drbd_nl.h" | ||
130 | |||
131 | void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name); | ||
132 | void drbd_nl_send_reply(struct cn_msg *, int); | ||
133 | |||
134 | int drbd_khelper(struct drbd_conf *mdev, char *cmd) | ||
135 | { | ||
136 | char *envp[] = { "HOME=/", | ||
137 | "TERM=linux", | ||
138 | "PATH=/sbin:/usr/sbin:/bin:/usr/bin", | ||
139 | NULL, /* Will be set to address family */ | ||
140 | NULL, /* Will be set to address */ | ||
141 | NULL }; | ||
142 | |||
143 | char mb[12], af[20], ad[60], *afs; | ||
144 | char *argv[] = {usermode_helper, cmd, mb, NULL }; | ||
145 | int ret; | ||
146 | |||
147 | snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev)); | ||
148 | |||
149 | if (get_net_conf(mdev)) { | ||
150 | switch (((struct sockaddr *)mdev->net_conf->peer_addr)->sa_family) { | ||
151 | case AF_INET6: | ||
152 | afs = "ipv6"; | ||
153 | snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI6", | ||
154 | &((struct sockaddr_in6 *)mdev->net_conf->peer_addr)->sin6_addr); | ||
155 | break; | ||
156 | case AF_INET: | ||
157 | afs = "ipv4"; | ||
158 | snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4", | ||
159 | &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr); | ||
160 | break; | ||
161 | default: | ||
162 | afs = "ssocks"; | ||
163 | snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4", | ||
164 | &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr); | ||
165 | } | ||
166 | snprintf(af, 20, "DRBD_PEER_AF=%s", afs); | ||
167 | envp[3]=af; | ||
168 | envp[4]=ad; | ||
169 | put_net_conf(mdev); | ||
170 | } | ||
171 | |||
172 | dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb); | ||
173 | |||
174 | drbd_bcast_ev_helper(mdev, cmd); | ||
175 | ret = call_usermodehelper(usermode_helper, argv, envp, 1); | ||
176 | if (ret) | ||
177 | dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n", | ||
178 | usermode_helper, cmd, mb, | ||
179 | (ret >> 8) & 0xff, ret); | ||
180 | else | ||
181 | dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n", | ||
182 | usermode_helper, cmd, mb, | ||
183 | (ret >> 8) & 0xff, ret); | ||
184 | |||
185 | if (ret < 0) /* Ignore any ERRNOs we got. */ | ||
186 | ret = 0; | ||
187 | |||
188 | return ret; | ||
189 | } | ||
190 | |||
191 | enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev) | ||
192 | { | ||
193 | char *ex_to_string; | ||
194 | int r; | ||
195 | enum drbd_disk_state nps; | ||
196 | enum drbd_fencing_p fp; | ||
197 | |||
198 | D_ASSERT(mdev->state.pdsk == D_UNKNOWN); | ||
199 | |||
200 | if (get_ldev_if_state(mdev, D_CONSISTENT)) { | ||
201 | fp = mdev->ldev->dc.fencing; | ||
202 | put_ldev(mdev); | ||
203 | } else { | ||
204 | dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n"); | ||
205 | return mdev->state.pdsk; | ||
206 | } | ||
207 | |||
208 | if (fp == FP_STONITH) | ||
209 | _drbd_request_state(mdev, NS(susp, 1), CS_WAIT_COMPLETE); | ||
210 | |||
211 | r = drbd_khelper(mdev, "fence-peer"); | ||
212 | |||
213 | switch ((r>>8) & 0xff) { | ||
214 | case 3: /* peer is inconsistent */ | ||
215 | ex_to_string = "peer is inconsistent or worse"; | ||
216 | nps = D_INCONSISTENT; | ||
217 | break; | ||
218 | case 4: /* peer got outdated, or was already outdated */ | ||
219 | ex_to_string = "peer was fenced"; | ||
220 | nps = D_OUTDATED; | ||
221 | break; | ||
222 | case 5: /* peer was down */ | ||
223 | if (mdev->state.disk == D_UP_TO_DATE) { | ||
224 | /* we will(have) create(d) a new UUID anyways... */ | ||
225 | ex_to_string = "peer is unreachable, assumed to be dead"; | ||
226 | nps = D_OUTDATED; | ||
227 | } else { | ||
228 | ex_to_string = "peer unreachable, doing nothing since disk != UpToDate"; | ||
229 | nps = mdev->state.pdsk; | ||
230 | } | ||
231 | break; | ||
232 | case 6: /* Peer is primary, voluntarily outdate myself. | ||
233 | * This is useful when an unconnected R_SECONDARY is asked to | ||
234 | * become R_PRIMARY, but finds the other peer being active. */ | ||
235 | ex_to_string = "peer is active"; | ||
236 | dev_warn(DEV, "Peer is primary, outdating myself.\n"); | ||
237 | nps = D_UNKNOWN; | ||
238 | _drbd_request_state(mdev, NS(disk, D_OUTDATED), CS_WAIT_COMPLETE); | ||
239 | break; | ||
240 | case 7: | ||
241 | if (fp != FP_STONITH) | ||
242 | dev_err(DEV, "fence-peer() = 7 && fencing != Stonith !!!\n"); | ||
243 | ex_to_string = "peer was stonithed"; | ||
244 | nps = D_OUTDATED; | ||
245 | break; | ||
246 | default: | ||
247 | /* The script is broken ... */ | ||
248 | nps = D_UNKNOWN; | ||
249 | dev_err(DEV, "fence-peer helper broken, returned %d\n", (r>>8)&0xff); | ||
250 | return nps; | ||
251 | } | ||
252 | |||
253 | dev_info(DEV, "fence-peer helper returned %d (%s)\n", | ||
254 | (r>>8) & 0xff, ex_to_string); | ||
255 | return nps; | ||
256 | } | ||
257 | |||
258 | |||
259 | int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) | ||
260 | { | ||
261 | const int max_tries = 4; | ||
262 | int r = 0; | ||
263 | int try = 0; | ||
264 | int forced = 0; | ||
265 | union drbd_state mask, val; | ||
266 | enum drbd_disk_state nps; | ||
267 | |||
268 | if (new_role == R_PRIMARY) | ||
269 | request_ping(mdev); /* Detect a dead peer ASAP */ | ||
270 | |||
271 | mutex_lock(&mdev->state_mutex); | ||
272 | |||
273 | mask.i = 0; mask.role = R_MASK; | ||
274 | val.i = 0; val.role = new_role; | ||
275 | |||
276 | while (try++ < max_tries) { | ||
277 | r = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE); | ||
278 | |||
279 | /* in case we first succeeded to outdate, | ||
280 | * but now suddenly could establish a connection */ | ||
281 | if (r == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) { | ||
282 | val.pdsk = 0; | ||
283 | mask.pdsk = 0; | ||
284 | continue; | ||
285 | } | ||
286 | |||
287 | if (r == SS_NO_UP_TO_DATE_DISK && force && | ||
288 | (mdev->state.disk == D_INCONSISTENT || | ||
289 | mdev->state.disk == D_OUTDATED)) { | ||
290 | mask.disk = D_MASK; | ||
291 | val.disk = D_UP_TO_DATE; | ||
292 | forced = 1; | ||
293 | continue; | ||
294 | } | ||
295 | |||
296 | if (r == SS_NO_UP_TO_DATE_DISK && | ||
297 | mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) { | ||
298 | D_ASSERT(mdev->state.pdsk == D_UNKNOWN); | ||
299 | nps = drbd_try_outdate_peer(mdev); | ||
300 | |||
301 | if (nps == D_OUTDATED || nps == D_INCONSISTENT) { | ||
302 | val.disk = D_UP_TO_DATE; | ||
303 | mask.disk = D_MASK; | ||
304 | } | ||
305 | |||
306 | val.pdsk = nps; | ||
307 | mask.pdsk = D_MASK; | ||
308 | |||
309 | continue; | ||
310 | } | ||
311 | |||
312 | if (r == SS_NOTHING_TO_DO) | ||
313 | goto fail; | ||
314 | if (r == SS_PRIMARY_NOP && mask.pdsk == 0) { | ||
315 | nps = drbd_try_outdate_peer(mdev); | ||
316 | |||
317 | if (force && nps > D_OUTDATED) { | ||
318 | dev_warn(DEV, "Forced into split brain situation!\n"); | ||
319 | nps = D_OUTDATED; | ||
320 | } | ||
321 | |||
322 | mask.pdsk = D_MASK; | ||
323 | val.pdsk = nps; | ||
324 | |||
325 | continue; | ||
326 | } | ||
327 | if (r == SS_TWO_PRIMARIES) { | ||
328 | /* Maybe the peer is detected as dead very soon... | ||
329 | retry at most once more in this case. */ | ||
330 | __set_current_state(TASK_INTERRUPTIBLE); | ||
331 | schedule_timeout((mdev->net_conf->ping_timeo+1)*HZ/10); | ||
332 | if (try < max_tries) | ||
333 | try = max_tries - 1; | ||
334 | continue; | ||
335 | } | ||
336 | if (r < SS_SUCCESS) { | ||
337 | r = _drbd_request_state(mdev, mask, val, | ||
338 | CS_VERBOSE + CS_WAIT_COMPLETE); | ||
339 | if (r < SS_SUCCESS) | ||
340 | goto fail; | ||
341 | } | ||
342 | break; | ||
343 | } | ||
344 | |||
345 | if (r < SS_SUCCESS) | ||
346 | goto fail; | ||
347 | |||
348 | if (forced) | ||
349 | dev_warn(DEV, "Forced to consider local data as UpToDate!\n"); | ||
350 | |||
351 | /* Wait until nothing is on the fly :) */ | ||
352 | wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0); | ||
353 | |||
354 | if (new_role == R_SECONDARY) { | ||
355 | set_disk_ro(mdev->vdisk, TRUE); | ||
356 | if (get_ldev(mdev)) { | ||
357 | mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; | ||
358 | put_ldev(mdev); | ||
359 | } | ||
360 | } else { | ||
361 | if (get_net_conf(mdev)) { | ||
362 | mdev->net_conf->want_lose = 0; | ||
363 | put_net_conf(mdev); | ||
364 | } | ||
365 | set_disk_ro(mdev->vdisk, FALSE); | ||
366 | if (get_ldev(mdev)) { | ||
367 | if (((mdev->state.conn < C_CONNECTED || | ||
368 | mdev->state.pdsk <= D_FAILED) | ||
369 | && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced) | ||
370 | drbd_uuid_new_current(mdev); | ||
371 | |||
372 | mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1; | ||
373 | put_ldev(mdev); | ||
374 | } | ||
375 | } | ||
376 | |||
377 | if ((new_role == R_SECONDARY) && get_ldev(mdev)) { | ||
378 | drbd_al_to_on_disk_bm(mdev); | ||
379 | put_ldev(mdev); | ||
380 | } | ||
381 | |||
382 | if (mdev->state.conn >= C_WF_REPORT_PARAMS) { | ||
383 | /* if this was forced, we should consider sync */ | ||
384 | if (forced) | ||
385 | drbd_send_uuids(mdev); | ||
386 | drbd_send_state(mdev); | ||
387 | } | ||
388 | |||
389 | drbd_md_sync(mdev); | ||
390 | |||
391 | kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); | ||
392 | fail: | ||
393 | mutex_unlock(&mdev->state_mutex); | ||
394 | return r; | ||
395 | } | ||
396 | |||
397 | |||
398 | static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | ||
399 | struct drbd_nl_cfg_reply *reply) | ||
400 | { | ||
401 | struct primary primary_args; | ||
402 | |||
403 | memset(&primary_args, 0, sizeof(struct primary)); | ||
404 | if (!primary_from_tags(mdev, nlp->tag_list, &primary_args)) { | ||
405 | reply->ret_code = ERR_MANDATORY_TAG; | ||
406 | return 0; | ||
407 | } | ||
408 | |||
409 | reply->ret_code = | ||
410 | drbd_set_role(mdev, R_PRIMARY, primary_args.overwrite_peer); | ||
411 | |||
412 | return 0; | ||
413 | } | ||
414 | |||
415 | static int drbd_nl_secondary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | ||
416 | struct drbd_nl_cfg_reply *reply) | ||
417 | { | ||
418 | reply->ret_code = drbd_set_role(mdev, R_SECONDARY, 0); | ||
419 | |||
420 | return 0; | ||
421 | } | ||
422 | |||
423 | /* initializes the md.*_offset members, so we are able to find | ||
424 | * the on disk meta data */ | ||
425 | static void drbd_md_set_sector_offsets(struct drbd_conf *mdev, | ||
426 | struct drbd_backing_dev *bdev) | ||
427 | { | ||
428 | sector_t md_size_sect = 0; | ||
429 | switch (bdev->dc.meta_dev_idx) { | ||
430 | default: | ||
431 | /* v07 style fixed size indexed meta data */ | ||
432 | bdev->md.md_size_sect = MD_RESERVED_SECT; | ||
433 | bdev->md.md_offset = drbd_md_ss__(mdev, bdev); | ||
434 | bdev->md.al_offset = MD_AL_OFFSET; | ||
435 | bdev->md.bm_offset = MD_BM_OFFSET; | ||
436 | break; | ||
437 | case DRBD_MD_INDEX_FLEX_EXT: | ||
438 | /* just occupy the full device; unit: sectors */ | ||
439 | bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev); | ||
440 | bdev->md.md_offset = 0; | ||
441 | bdev->md.al_offset = MD_AL_OFFSET; | ||
442 | bdev->md.bm_offset = MD_BM_OFFSET; | ||
443 | break; | ||
444 | case DRBD_MD_INDEX_INTERNAL: | ||
445 | case DRBD_MD_INDEX_FLEX_INT: | ||
446 | bdev->md.md_offset = drbd_md_ss__(mdev, bdev); | ||
447 | /* al size is still fixed */ | ||
448 | bdev->md.al_offset = -MD_AL_MAX_SIZE; | ||
449 | /* we need (slightly less than) ~ this much bitmap sectors: */ | ||
450 | md_size_sect = drbd_get_capacity(bdev->backing_bdev); | ||
451 | md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT); | ||
452 | md_size_sect = BM_SECT_TO_EXT(md_size_sect); | ||
453 | md_size_sect = ALIGN(md_size_sect, 8); | ||
454 | |||
455 | /* plus the "drbd meta data super block", | ||
456 | * and the activity log; */ | ||
457 | md_size_sect += MD_BM_OFFSET; | ||
458 | |||
459 | bdev->md.md_size_sect = md_size_sect; | ||
460 | /* bitmap offset is adjusted by 'super' block size */ | ||
461 | bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET; | ||
462 | break; | ||
463 | } | ||
464 | } | ||
465 | |||
466 | char *ppsize(char *buf, unsigned long long size) | ||
467 | { | ||
468 | /* Needs 9 bytes at max. */ | ||
469 | static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' }; | ||
470 | int base = 0; | ||
471 | while (size >= 10000) { | ||
472 | /* shift + round */ | ||
473 | size = (size >> 10) + !!(size & (1<<9)); | ||
474 | base++; | ||
475 | } | ||
476 | sprintf(buf, "%lu %cB", (long)size, units[base]); | ||
477 | |||
478 | return buf; | ||
479 | } | ||
480 | |||
481 | /* there is still a theoretical deadlock when called from receiver | ||
482 | * on an D_INCONSISTENT R_PRIMARY: | ||
483 | * remote READ does inc_ap_bio, receiver would need to receive answer | ||
484 | * packet from remote to dec_ap_bio again. | ||
485 | * receiver receive_sizes(), comes here, | ||
486 | * waits for ap_bio_cnt == 0. -> deadlock. | ||
487 | * but this cannot happen, actually, because: | ||
488 | * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable | ||
489 | * (not connected, or bad/no disk on peer): | ||
490 | * see drbd_fail_request_early, ap_bio_cnt is zero. | ||
491 | * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET: | ||
492 | * peer may not initiate a resize. | ||
493 | */ | ||
494 | void drbd_suspend_io(struct drbd_conf *mdev) | ||
495 | { | ||
496 | set_bit(SUSPEND_IO, &mdev->flags); | ||
497 | wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt)); | ||
498 | } | ||
499 | |||
500 | void drbd_resume_io(struct drbd_conf *mdev) | ||
501 | { | ||
502 | clear_bit(SUSPEND_IO, &mdev->flags); | ||
503 | wake_up(&mdev->misc_wait); | ||
504 | } | ||
505 | |||
506 | /** | ||
507 | * drbd_determine_dev_size() - Sets the right device size obeying all constraints | ||
508 | * @mdev: DRBD device. | ||
509 | * | ||
510 | * Returns 0 on success, negative return values indicate errors. | ||
511 | * You should call drbd_md_sync() after calling this function. | ||
512 | */ | ||
513 | enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev) __must_hold(local) | ||
514 | { | ||
515 | sector_t prev_first_sect, prev_size; /* previous meta location */ | ||
516 | sector_t la_size; | ||
517 | sector_t size; | ||
518 | char ppb[10]; | ||
519 | |||
520 | int md_moved, la_size_changed; | ||
521 | enum determine_dev_size rv = unchanged; | ||
522 | |||
523 | /* race: | ||
524 | * application request passes inc_ap_bio, | ||
525 | * but then cannot get an AL-reference. | ||
526 | * this function later may wait on ap_bio_cnt == 0. -> deadlock. | ||
527 | * | ||
528 | * to avoid that: | ||
529 | * Suspend IO right here. | ||
530 | * still lock the act_log to not trigger ASSERTs there. | ||
531 | */ | ||
532 | drbd_suspend_io(mdev); | ||
533 | |||
534 | /* no wait necessary anymore, actually we could assert that */ | ||
535 | wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); | ||
536 | |||
537 | prev_first_sect = drbd_md_first_sector(mdev->ldev); | ||
538 | prev_size = mdev->ldev->md.md_size_sect; | ||
539 | la_size = mdev->ldev->md.la_size_sect; | ||
540 | |||
541 | /* TODO: should only be some assert here, not (re)init... */ | ||
542 | drbd_md_set_sector_offsets(mdev, mdev->ldev); | ||
543 | |||
544 | size = drbd_new_dev_size(mdev, mdev->ldev); | ||
545 | |||
546 | if (drbd_get_capacity(mdev->this_bdev) != size || | ||
547 | drbd_bm_capacity(mdev) != size) { | ||
548 | int err; | ||
549 | err = drbd_bm_resize(mdev, size); | ||
550 | if (unlikely(err)) { | ||
551 | /* currently there is only one error: ENOMEM! */ | ||
552 | size = drbd_bm_capacity(mdev)>>1; | ||
553 | if (size == 0) { | ||
554 | dev_err(DEV, "OUT OF MEMORY! " | ||
555 | "Could not allocate bitmap!\n"); | ||
556 | } else { | ||
557 | dev_err(DEV, "BM resizing failed. " | ||
558 | "Leaving size unchanged at size = %lu KB\n", | ||
559 | (unsigned long)size); | ||
560 | } | ||
561 | rv = dev_size_error; | ||
562 | } | ||
563 | /* racy, see comments above. */ | ||
564 | drbd_set_my_capacity(mdev, size); | ||
565 | mdev->ldev->md.la_size_sect = size; | ||
566 | dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1), | ||
567 | (unsigned long long)size>>1); | ||
568 | } | ||
569 | if (rv == dev_size_error) | ||
570 | goto out; | ||
571 | |||
572 | la_size_changed = (la_size != mdev->ldev->md.la_size_sect); | ||
573 | |||
574 | md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev) | ||
575 | || prev_size != mdev->ldev->md.md_size_sect; | ||
576 | |||
577 | if (la_size_changed || md_moved) { | ||
578 | drbd_al_shrink(mdev); /* All extents inactive. */ | ||
579 | dev_info(DEV, "Writing the whole bitmap, %s\n", | ||
580 | la_size_changed && md_moved ? "size changed and md moved" : | ||
581 | la_size_changed ? "size changed" : "md moved"); | ||
582 | rv = drbd_bitmap_io(mdev, &drbd_bm_write, "size changed"); /* does drbd_resume_io() ! */ | ||
583 | drbd_md_mark_dirty(mdev); | ||
584 | } | ||
585 | |||
586 | if (size > la_size) | ||
587 | rv = grew; | ||
588 | if (size < la_size) | ||
589 | rv = shrunk; | ||
590 | out: | ||
591 | lc_unlock(mdev->act_log); | ||
592 | wake_up(&mdev->al_wait); | ||
593 | drbd_resume_io(mdev); | ||
594 | |||
595 | return rv; | ||
596 | } | ||
597 | |||
598 | sector_t | ||
599 | drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) | ||
600 | { | ||
601 | sector_t p_size = mdev->p_size; /* partner's disk size. */ | ||
602 | sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */ | ||
603 | sector_t m_size; /* my size */ | ||
604 | sector_t u_size = bdev->dc.disk_size; /* size requested by user. */ | ||
605 | sector_t size = 0; | ||
606 | |||
607 | m_size = drbd_get_max_capacity(bdev); | ||
608 | |||
609 | if (p_size && m_size) { | ||
610 | size = min_t(sector_t, p_size, m_size); | ||
611 | } else { | ||
612 | if (la_size) { | ||
613 | size = la_size; | ||
614 | if (m_size && m_size < size) | ||
615 | size = m_size; | ||
616 | if (p_size && p_size < size) | ||
617 | size = p_size; | ||
618 | } else { | ||
619 | if (m_size) | ||
620 | size = m_size; | ||
621 | if (p_size) | ||
622 | size = p_size; | ||
623 | } | ||
624 | } | ||
625 | |||
626 | if (size == 0) | ||
627 | dev_err(DEV, "Both nodes diskless!\n"); | ||
628 | |||
629 | if (u_size) { | ||
630 | if (u_size > size) | ||
631 | dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n", | ||
632 | (unsigned long)u_size>>1, (unsigned long)size>>1); | ||
633 | else | ||
634 | size = u_size; | ||
635 | } | ||
636 | |||
637 | return size; | ||
638 | } | ||
639 | |||
640 | /** | ||
641 | * drbd_check_al_size() - Ensures that the AL is of the right size | ||
642 | * @mdev: DRBD device. | ||
643 | * | ||
644 | * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation | ||
645 | * failed, and 0 on success. You should call drbd_md_sync() after you called | ||
646 | * this function. | ||
647 | */ | ||
648 | static int drbd_check_al_size(struct drbd_conf *mdev) | ||
649 | { | ||
650 | struct lru_cache *n, *t; | ||
651 | struct lc_element *e; | ||
652 | unsigned int in_use; | ||
653 | int i; | ||
654 | |||
655 | ERR_IF(mdev->sync_conf.al_extents < 7) | ||
656 | mdev->sync_conf.al_extents = 127; | ||
657 | |||
658 | if (mdev->act_log && | ||
659 | mdev->act_log->nr_elements == mdev->sync_conf.al_extents) | ||
660 | return 0; | ||
661 | |||
662 | in_use = 0; | ||
663 | t = mdev->act_log; | ||
664 | n = lc_create("act_log", drbd_al_ext_cache, | ||
665 | mdev->sync_conf.al_extents, sizeof(struct lc_element), 0); | ||
666 | |||
667 | if (n == NULL) { | ||
668 | dev_err(DEV, "Cannot allocate act_log lru!\n"); | ||
669 | return -ENOMEM; | ||
670 | } | ||
671 | spin_lock_irq(&mdev->al_lock); | ||
672 | if (t) { | ||
673 | for (i = 0; i < t->nr_elements; i++) { | ||
674 | e = lc_element_by_index(t, i); | ||
675 | if (e->refcnt) | ||
676 | dev_err(DEV, "refcnt(%d)==%d\n", | ||
677 | e->lc_number, e->refcnt); | ||
678 | in_use += e->refcnt; | ||
679 | } | ||
680 | } | ||
681 | if (!in_use) | ||
682 | mdev->act_log = n; | ||
683 | spin_unlock_irq(&mdev->al_lock); | ||
684 | if (in_use) { | ||
685 | dev_err(DEV, "Activity log still in use!\n"); | ||
686 | lc_destroy(n); | ||
687 | return -EBUSY; | ||
688 | } else { | ||
689 | if (t) | ||
690 | lc_destroy(t); | ||
691 | } | ||
692 | drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */ | ||
693 | return 0; | ||
694 | } | ||
695 | |||
696 | void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __must_hold(local) | ||
697 | { | ||
698 | struct request_queue * const q = mdev->rq_queue; | ||
699 | struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; | ||
700 | int max_segments = mdev->ldev->dc.max_bio_bvecs; | ||
701 | |||
702 | if (b->merge_bvec_fn && !mdev->ldev->dc.use_bmbv) | ||
703 | max_seg_s = PAGE_SIZE; | ||
704 | |||
705 | max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s); | ||
706 | |||
707 | blk_queue_max_sectors(q, max_seg_s >> 9); | ||
708 | blk_queue_max_phys_segments(q, max_segments ? max_segments : MAX_PHYS_SEGMENTS); | ||
709 | blk_queue_max_hw_segments(q, max_segments ? max_segments : MAX_HW_SEGMENTS); | ||
710 | blk_queue_max_segment_size(q, max_seg_s); | ||
711 | blk_queue_logical_block_size(q, 512); | ||
712 | blk_queue_segment_boundary(q, PAGE_SIZE-1); | ||
713 | blk_stack_limits(&q->limits, &b->limits, 0); | ||
714 | |||
715 | if (b->merge_bvec_fn) | ||
716 | dev_warn(DEV, "Backing device's merge_bvec_fn() = %p\n", | ||
717 | b->merge_bvec_fn); | ||
718 | dev_info(DEV, "max_segment_size ( = BIO size ) = %u\n", queue_max_segment_size(q)); | ||
719 | |||
720 | if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { | ||
721 | dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n", | ||
722 | q->backing_dev_info.ra_pages, | ||
723 | b->backing_dev_info.ra_pages); | ||
724 | q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages; | ||
725 | } | ||
726 | } | ||
727 | |||
728 | /* serialize deconfig (worker exiting, doing cleanup) | ||
729 | * and reconfig (drbdsetup disk, drbdsetup net) | ||
730 | * | ||
731 | * wait for a potentially exiting worker, then restart it, | ||
732 | * or start a new one. | ||
733 | */ | ||
734 | static void drbd_reconfig_start(struct drbd_conf *mdev) | ||
735 | { | ||
736 | wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags)); | ||
737 | wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags)); | ||
738 | drbd_thread_start(&mdev->worker); | ||
739 | } | ||
740 | |||
741 | /* if still unconfigured, stops worker again. | ||
742 | * if configured now, clears CONFIG_PENDING. | ||
743 | * wakes potential waiters */ | ||
744 | static void drbd_reconfig_done(struct drbd_conf *mdev) | ||
745 | { | ||
746 | spin_lock_irq(&mdev->req_lock); | ||
747 | if (mdev->state.disk == D_DISKLESS && | ||
748 | mdev->state.conn == C_STANDALONE && | ||
749 | mdev->state.role == R_SECONDARY) { | ||
750 | set_bit(DEVICE_DYING, &mdev->flags); | ||
751 | drbd_thread_stop_nowait(&mdev->worker); | ||
752 | } else | ||
753 | clear_bit(CONFIG_PENDING, &mdev->flags); | ||
754 | spin_unlock_irq(&mdev->req_lock); | ||
755 | wake_up(&mdev->state_wait); | ||
756 | } | ||
757 | |||
758 | /* does always return 0; | ||
759 | * interesting return code is in reply->ret_code */ | ||
760 | static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | ||
761 | struct drbd_nl_cfg_reply *reply) | ||
762 | { | ||
763 | enum drbd_ret_codes retcode; | ||
764 | enum determine_dev_size dd; | ||
765 | sector_t max_possible_sectors; | ||
766 | sector_t min_md_device_sectors; | ||
767 | struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */ | ||
768 | struct inode *inode, *inode2; | ||
769 | struct lru_cache *resync_lru = NULL; | ||
770 | union drbd_state ns, os; | ||
771 | int rv; | ||
772 | int cp_discovered = 0; | ||
773 | int logical_block_size; | ||
774 | |||
775 | drbd_reconfig_start(mdev); | ||
776 | |||
777 | /* if you want to reconfigure, please tear down first */ | ||
778 | if (mdev->state.disk > D_DISKLESS) { | ||
779 | retcode = ERR_DISK_CONFIGURED; | ||
780 | goto fail; | ||
781 | } | ||
782 | |||
783 | /* allocation not in the IO path, cqueue thread context */ | ||
784 | nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL); | ||
785 | if (!nbc) { | ||
786 | retcode = ERR_NOMEM; | ||
787 | goto fail; | ||
788 | } | ||
789 | |||
790 | nbc->dc.disk_size = DRBD_DISK_SIZE_SECT_DEF; | ||
791 | nbc->dc.on_io_error = DRBD_ON_IO_ERROR_DEF; | ||
792 | nbc->dc.fencing = DRBD_FENCING_DEF; | ||
793 | nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF; | ||
794 | |||
795 | if (!disk_conf_from_tags(mdev, nlp->tag_list, &nbc->dc)) { | ||
796 | retcode = ERR_MANDATORY_TAG; | ||
797 | goto fail; | ||
798 | } | ||
799 | |||
800 | if (nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) { | ||
801 | retcode = ERR_MD_IDX_INVALID; | ||
802 | goto fail; | ||
803 | } | ||
804 | |||
805 | nbc->lo_file = filp_open(nbc->dc.backing_dev, O_RDWR, 0); | ||
806 | if (IS_ERR(nbc->lo_file)) { | ||
807 | dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev, | ||
808 | PTR_ERR(nbc->lo_file)); | ||
809 | nbc->lo_file = NULL; | ||
810 | retcode = ERR_OPEN_DISK; | ||
811 | goto fail; | ||
812 | } | ||
813 | |||
814 | inode = nbc->lo_file->f_dentry->d_inode; | ||
815 | |||
816 | if (!S_ISBLK(inode->i_mode)) { | ||
817 | retcode = ERR_DISK_NOT_BDEV; | ||
818 | goto fail; | ||
819 | } | ||
820 | |||
821 | nbc->md_file = filp_open(nbc->dc.meta_dev, O_RDWR, 0); | ||
822 | if (IS_ERR(nbc->md_file)) { | ||
823 | dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev, | ||
824 | PTR_ERR(nbc->md_file)); | ||
825 | nbc->md_file = NULL; | ||
826 | retcode = ERR_OPEN_MD_DISK; | ||
827 | goto fail; | ||
828 | } | ||
829 | |||
830 | inode2 = nbc->md_file->f_dentry->d_inode; | ||
831 | |||
832 | if (!S_ISBLK(inode2->i_mode)) { | ||
833 | retcode = ERR_MD_NOT_BDEV; | ||
834 | goto fail; | ||
835 | } | ||
836 | |||
837 | nbc->backing_bdev = inode->i_bdev; | ||
838 | if (bd_claim(nbc->backing_bdev, mdev)) { | ||
839 | printk(KERN_ERR "drbd: bd_claim(%p,%p); failed [%p;%p;%u]\n", | ||
840 | nbc->backing_bdev, mdev, | ||
841 | nbc->backing_bdev->bd_holder, | ||
842 | nbc->backing_bdev->bd_contains->bd_holder, | ||
843 | nbc->backing_bdev->bd_holders); | ||
844 | retcode = ERR_BDCLAIM_DISK; | ||
845 | goto fail; | ||
846 | } | ||
847 | |||
848 | resync_lru = lc_create("resync", drbd_bm_ext_cache, | ||
849 | 61, sizeof(struct bm_extent), | ||
850 | offsetof(struct bm_extent, lce)); | ||
851 | if (!resync_lru) { | ||
852 | retcode = ERR_NOMEM; | ||
853 | goto release_bdev_fail; | ||
854 | } | ||
855 | |||
856 | /* meta_dev_idx >= 0: external fixed size, | ||
857 | * possibly multiple drbd sharing one meta device. | ||
858 | * TODO in that case, paranoia check that [md_bdev, meta_dev_idx] is | ||
859 | * not yet used by some other drbd minor! | ||
860 | * (if you use drbd.conf + drbdadm, | ||
861 | * that should check it for you already; but if you don't, or someone | ||
862 | * fooled it, we need to double check here) */ | ||
863 | nbc->md_bdev = inode2->i_bdev; | ||
864 | if (bd_claim(nbc->md_bdev, (nbc->dc.meta_dev_idx < 0) ? (void *)mdev | ||
865 | : (void *) drbd_m_holder)) { | ||
866 | retcode = ERR_BDCLAIM_MD_DISK; | ||
867 | goto release_bdev_fail; | ||
868 | } | ||
869 | |||
870 | if ((nbc->backing_bdev == nbc->md_bdev) != | ||
871 | (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL || | ||
872 | nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) { | ||
873 | retcode = ERR_MD_IDX_INVALID; | ||
874 | goto release_bdev2_fail; | ||
875 | } | ||
876 | |||
877 | /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */ | ||
878 | drbd_md_set_sector_offsets(mdev, nbc); | ||
879 | |||
880 | if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) { | ||
881 | dev_err(DEV, "max capacity %llu smaller than disk size %llu\n", | ||
882 | (unsigned long long) drbd_get_max_capacity(nbc), | ||
883 | (unsigned long long) nbc->dc.disk_size); | ||
884 | retcode = ERR_DISK_TO_SMALL; | ||
885 | goto release_bdev2_fail; | ||
886 | } | ||
887 | |||
888 | if (nbc->dc.meta_dev_idx < 0) { | ||
889 | max_possible_sectors = DRBD_MAX_SECTORS_FLEX; | ||
890 | /* at least one MB, otherwise it does not make sense */ | ||
891 | min_md_device_sectors = (2<<10); | ||
892 | } else { | ||
893 | max_possible_sectors = DRBD_MAX_SECTORS; | ||
894 | min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1); | ||
895 | } | ||
896 | |||
897 | if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) { | ||
898 | retcode = ERR_MD_DISK_TO_SMALL; | ||
899 | dev_warn(DEV, "refusing attach: md-device too small, " | ||
900 | "at least %llu sectors needed for this meta-disk type\n", | ||
901 | (unsigned long long) min_md_device_sectors); | ||
902 | goto release_bdev2_fail; | ||
903 | } | ||
904 | |||
905 | /* Make sure the new disk is big enough | ||
906 | * (we may currently be R_PRIMARY with no local disk...) */ | ||
907 | if (drbd_get_max_capacity(nbc) < | ||
908 | drbd_get_capacity(mdev->this_bdev)) { | ||
909 | retcode = ERR_DISK_TO_SMALL; | ||
910 | goto release_bdev2_fail; | ||
911 | } | ||
912 | |||
913 | nbc->known_size = drbd_get_capacity(nbc->backing_bdev); | ||
914 | |||
915 | if (nbc->known_size > max_possible_sectors) { | ||
916 | dev_warn(DEV, "==> truncating very big lower level device " | ||
917 | "to currently maximum possible %llu sectors <==\n", | ||
918 | (unsigned long long) max_possible_sectors); | ||
919 | if (nbc->dc.meta_dev_idx >= 0) | ||
920 | dev_warn(DEV, "==>> using internal or flexible " | ||
921 | "meta data may help <<==\n"); | ||
922 | } | ||
923 | |||
924 | drbd_suspend_io(mdev); | ||
925 | /* also wait for the last barrier ack. */ | ||
926 | wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt)); | ||
927 | /* and for any other previously queued work */ | ||
928 | drbd_flush_workqueue(mdev); | ||
929 | |||
930 | retcode = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE); | ||
931 | drbd_resume_io(mdev); | ||
932 | if (retcode < SS_SUCCESS) | ||
933 | goto release_bdev2_fail; | ||
934 | |||
935 | if (!get_ldev_if_state(mdev, D_ATTACHING)) | ||
936 | goto force_diskless; | ||
937 | |||
938 | drbd_md_set_sector_offsets(mdev, nbc); | ||
939 | |||
940 | if (!mdev->bitmap) { | ||
941 | if (drbd_bm_init(mdev)) { | ||
942 | retcode = ERR_NOMEM; | ||
943 | goto force_diskless_dec; | ||
944 | } | ||
945 | } | ||
946 | |||
947 | retcode = drbd_md_read(mdev, nbc); | ||
948 | if (retcode != NO_ERROR) | ||
949 | goto force_diskless_dec; | ||
950 | |||
951 | if (mdev->state.conn < C_CONNECTED && | ||
952 | mdev->state.role == R_PRIMARY && | ||
953 | (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) { | ||
954 | dev_err(DEV, "Can only attach to data with current UUID=%016llX\n", | ||
955 | (unsigned long long)mdev->ed_uuid); | ||
956 | retcode = ERR_DATA_NOT_CURRENT; | ||
957 | goto force_diskless_dec; | ||
958 | } | ||
959 | |||
960 | /* Since we are diskless, fix the activity log first... */ | ||
961 | if (drbd_check_al_size(mdev)) { | ||
962 | retcode = ERR_NOMEM; | ||
963 | goto force_diskless_dec; | ||
964 | } | ||
965 | |||
966 | /* Prevent shrinking of consistent devices ! */ | ||
967 | if (drbd_md_test_flag(nbc, MDF_CONSISTENT) && | ||
968 | drbd_new_dev_size(mdev, nbc) < nbc->md.la_size_sect) { | ||
969 | dev_warn(DEV, "refusing to truncate a consistent device\n"); | ||
970 | retcode = ERR_DISK_TO_SMALL; | ||
971 | goto force_diskless_dec; | ||
972 | } | ||
973 | |||
974 | if (!drbd_al_read_log(mdev, nbc)) { | ||
975 | retcode = ERR_IO_MD_DISK; | ||
976 | goto force_diskless_dec; | ||
977 | } | ||
978 | |||
979 | /* allocate a second IO page if logical_block_size != 512 */ | ||
980 | logical_block_size = bdev_logical_block_size(nbc->md_bdev); | ||
981 | if (logical_block_size == 0) | ||
982 | logical_block_size = MD_SECTOR_SIZE; | ||
983 | |||
984 | if (logical_block_size != MD_SECTOR_SIZE) { | ||
985 | if (!mdev->md_io_tmpp) { | ||
986 | struct page *page = alloc_page(GFP_NOIO); | ||
987 | if (!page) | ||
988 | goto force_diskless_dec; | ||
989 | |||
990 | dev_warn(DEV, "Meta data's bdev logical_block_size = %d != %d\n", | ||
991 | logical_block_size, MD_SECTOR_SIZE); | ||
992 | dev_warn(DEV, "Workaround engaged (has performance impact).\n"); | ||
993 | |||
994 | mdev->md_io_tmpp = page; | ||
995 | } | ||
996 | } | ||
997 | |||
998 | /* Reset the "barriers don't work" bits here, then force meta data to | ||
999 | * be written, to ensure we determine if barriers are supported. */ | ||
1000 | if (nbc->dc.no_md_flush) | ||
1001 | set_bit(MD_NO_BARRIER, &mdev->flags); | ||
1002 | else | ||
1003 | clear_bit(MD_NO_BARRIER, &mdev->flags); | ||
1004 | |||
1005 | /* Point of no return reached. | ||
1006 | * Devices and memory are no longer released by error cleanup below. | ||
1007 | * now mdev takes over responsibility, and the state engine should | ||
1008 | * clean it up somewhere. */ | ||
1009 | D_ASSERT(mdev->ldev == NULL); | ||
1010 | mdev->ldev = nbc; | ||
1011 | mdev->resync = resync_lru; | ||
1012 | nbc = NULL; | ||
1013 | resync_lru = NULL; | ||
1014 | |||
1015 | mdev->write_ordering = WO_bio_barrier; | ||
1016 | drbd_bump_write_ordering(mdev, WO_bio_barrier); | ||
1017 | |||
1018 | if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY)) | ||
1019 | set_bit(CRASHED_PRIMARY, &mdev->flags); | ||
1020 | else | ||
1021 | clear_bit(CRASHED_PRIMARY, &mdev->flags); | ||
1022 | |||
1023 | if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND)) { | ||
1024 | set_bit(CRASHED_PRIMARY, &mdev->flags); | ||
1025 | cp_discovered = 1; | ||
1026 | } | ||
1027 | |||
1028 | mdev->send_cnt = 0; | ||
1029 | mdev->recv_cnt = 0; | ||
1030 | mdev->read_cnt = 0; | ||
1031 | mdev->writ_cnt = 0; | ||
1032 | |||
1033 | drbd_setup_queue_param(mdev, DRBD_MAX_SEGMENT_SIZE); | ||
1034 | |||
1035 | /* If I am currently not R_PRIMARY, | ||
1036 | * but meta data primary indicator is set, | ||
1037 | * I just now recover from a hard crash, | ||
1038 | * and have been R_PRIMARY before that crash. | ||
1039 | * | ||
1040 | * Now, if I had no connection before that crash | ||
1041 | * (have been degraded R_PRIMARY), chances are that | ||
1042 | * I won't find my peer now either. | ||
1043 | * | ||
1044 | * In that case, and _only_ in that case, | ||
1045 | * we use the degr-wfc-timeout instead of the default, | ||
1046 | * so we can automatically recover from a crash of a | ||
1047 | * degraded but active "cluster" after a certain timeout. | ||
1048 | */ | ||
1049 | clear_bit(USE_DEGR_WFC_T, &mdev->flags); | ||
1050 | if (mdev->state.role != R_PRIMARY && | ||
1051 | drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) && | ||
1052 | !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND)) | ||
1053 | set_bit(USE_DEGR_WFC_T, &mdev->flags); | ||
1054 | |||
1055 | dd = drbd_determin_dev_size(mdev); | ||
1056 | if (dd == dev_size_error) { | ||
1057 | retcode = ERR_NOMEM_BITMAP; | ||
1058 | goto force_diskless_dec; | ||
1059 | } else if (dd == grew) | ||
1060 | set_bit(RESYNC_AFTER_NEG, &mdev->flags); | ||
1061 | |||
1062 | if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) { | ||
1063 | dev_info(DEV, "Assuming that all blocks are out of sync " | ||
1064 | "(aka FullSync)\n"); | ||
1065 | if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from attaching")) { | ||
1066 | retcode = ERR_IO_MD_DISK; | ||
1067 | goto force_diskless_dec; | ||
1068 | } | ||
1069 | } else { | ||
1070 | if (drbd_bitmap_io(mdev, &drbd_bm_read, "read from attaching") < 0) { | ||
1071 | retcode = ERR_IO_MD_DISK; | ||
1072 | goto force_diskless_dec; | ||
1073 | } | ||
1074 | } | ||
1075 | |||
1076 | if (cp_discovered) { | ||
1077 | drbd_al_apply_to_bm(mdev); | ||
1078 | drbd_al_to_on_disk_bm(mdev); | ||
1079 | } | ||
1080 | |||
1081 | spin_lock_irq(&mdev->req_lock); | ||
1082 | os = mdev->state; | ||
1083 | ns.i = os.i; | ||
1084 | /* If MDF_CONSISTENT is not set go into inconsistent state, | ||
1085 | otherwise investigate MDF_WasUpToDate... | ||
1086 | If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state, | ||
1087 | otherwise into D_CONSISTENT state. | ||
1088 | */ | ||
1089 | if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) { | ||
1090 | if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE)) | ||
1091 | ns.disk = D_CONSISTENT; | ||
1092 | else | ||
1093 | ns.disk = D_OUTDATED; | ||
1094 | } else { | ||
1095 | ns.disk = D_INCONSISTENT; | ||
1096 | } | ||
1097 | |||
1098 | if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED)) | ||
1099 | ns.pdsk = D_OUTDATED; | ||
1100 | |||
1101 | if ( ns.disk == D_CONSISTENT && | ||
1102 | (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE)) | ||
1103 | ns.disk = D_UP_TO_DATE; | ||
1104 | |||
1105 | /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND, | ||
1106 | MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before | ||
1107 | this point, because drbd_request_state() modifies these | ||
1108 | flags. */ | ||
1109 | |||
1110 | /* In case we are C_CONNECTED postpone any decision on the new disk | ||
1111 | state after the negotiation phase. */ | ||
1112 | if (mdev->state.conn == C_CONNECTED) { | ||
1113 | mdev->new_state_tmp.i = ns.i; | ||
1114 | ns.i = os.i; | ||
1115 | ns.disk = D_NEGOTIATING; | ||
1116 | } | ||
1117 | |||
1118 | rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); | ||
1119 | ns = mdev->state; | ||
1120 | spin_unlock_irq(&mdev->req_lock); | ||
1121 | |||
1122 | if (rv < SS_SUCCESS) | ||
1123 | goto force_diskless_dec; | ||
1124 | |||
1125 | if (mdev->state.role == R_PRIMARY) | ||
1126 | mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1; | ||
1127 | else | ||
1128 | mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; | ||
1129 | |||
1130 | drbd_md_mark_dirty(mdev); | ||
1131 | drbd_md_sync(mdev); | ||
1132 | |||
1133 | kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); | ||
1134 | put_ldev(mdev); | ||
1135 | reply->ret_code = retcode; | ||
1136 | drbd_reconfig_done(mdev); | ||
1137 | return 0; | ||
1138 | |||
1139 | force_diskless_dec: | ||
1140 | put_ldev(mdev); | ||
1141 | force_diskless: | ||
1142 | drbd_force_state(mdev, NS(disk, D_DISKLESS)); | ||
1143 | drbd_md_sync(mdev); | ||
1144 | release_bdev2_fail: | ||
1145 | if (nbc) | ||
1146 | bd_release(nbc->md_bdev); | ||
1147 | release_bdev_fail: | ||
1148 | if (nbc) | ||
1149 | bd_release(nbc->backing_bdev); | ||
1150 | fail: | ||
1151 | if (nbc) { | ||
1152 | if (nbc->lo_file) | ||
1153 | fput(nbc->lo_file); | ||
1154 | if (nbc->md_file) | ||
1155 | fput(nbc->md_file); | ||
1156 | kfree(nbc); | ||
1157 | } | ||
1158 | lc_destroy(resync_lru); | ||
1159 | |||
1160 | reply->ret_code = retcode; | ||
1161 | drbd_reconfig_done(mdev); | ||
1162 | return 0; | ||
1163 | } | ||
1164 | |||
1165 | static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | ||
1166 | struct drbd_nl_cfg_reply *reply) | ||
1167 | { | ||
1168 | reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS)); | ||
1169 | return 0; | ||
1170 | } | ||
1171 | |||
1172 | static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | ||
1173 | struct drbd_nl_cfg_reply *reply) | ||
1174 | { | ||
1175 | int i, ns; | ||
1176 | enum drbd_ret_codes retcode; | ||
1177 | struct net_conf *new_conf = NULL; | ||
1178 | struct crypto_hash *tfm = NULL; | ||
1179 | struct crypto_hash *integrity_w_tfm = NULL; | ||
1180 | struct crypto_hash *integrity_r_tfm = NULL; | ||
1181 | struct hlist_head *new_tl_hash = NULL; | ||
1182 | struct hlist_head *new_ee_hash = NULL; | ||
1183 | struct drbd_conf *odev; | ||
1184 | char hmac_name[CRYPTO_MAX_ALG_NAME]; | ||
1185 | void *int_dig_out = NULL; | ||
1186 | void *int_dig_in = NULL; | ||
1187 | void *int_dig_vv = NULL; | ||
1188 | struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr; | ||
1189 | |||
1190 | drbd_reconfig_start(mdev); | ||
1191 | |||
1192 | if (mdev->state.conn > C_STANDALONE) { | ||
1193 | retcode = ERR_NET_CONFIGURED; | ||
1194 | goto fail; | ||
1195 | } | ||
1196 | |||
1197 | /* allocation not in the IO path, cqueue thread context */ | ||
1198 | new_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL); | ||
1199 | if (!new_conf) { | ||
1200 | retcode = ERR_NOMEM; | ||
1201 | goto fail; | ||
1202 | } | ||
1203 | |||
1204 | memset(new_conf, 0, sizeof(struct net_conf)); | ||
1205 | new_conf->timeout = DRBD_TIMEOUT_DEF; | ||
1206 | new_conf->try_connect_int = DRBD_CONNECT_INT_DEF; | ||
1207 | new_conf->ping_int = DRBD_PING_INT_DEF; | ||
1208 | new_conf->max_epoch_size = DRBD_MAX_EPOCH_SIZE_DEF; | ||
1209 | new_conf->max_buffers = DRBD_MAX_BUFFERS_DEF; | ||
1210 | new_conf->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF; | ||
1211 | new_conf->sndbuf_size = DRBD_SNDBUF_SIZE_DEF; | ||
1212 | new_conf->rcvbuf_size = DRBD_RCVBUF_SIZE_DEF; | ||
1213 | new_conf->ko_count = DRBD_KO_COUNT_DEF; | ||
1214 | new_conf->after_sb_0p = DRBD_AFTER_SB_0P_DEF; | ||
1215 | new_conf->after_sb_1p = DRBD_AFTER_SB_1P_DEF; | ||
1216 | new_conf->after_sb_2p = DRBD_AFTER_SB_2P_DEF; | ||
1217 | new_conf->want_lose = 0; | ||
1218 | new_conf->two_primaries = 0; | ||
1219 | new_conf->wire_protocol = DRBD_PROT_C; | ||
1220 | new_conf->ping_timeo = DRBD_PING_TIMEO_DEF; | ||
1221 | new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF; | ||
1222 | |||
1223 | if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) { | ||
1224 | retcode = ERR_MANDATORY_TAG; | ||
1225 | goto fail; | ||
1226 | } | ||
1227 | |||
1228 | if (new_conf->two_primaries | ||
1229 | && (new_conf->wire_protocol != DRBD_PROT_C)) { | ||
1230 | retcode = ERR_NOT_PROTO_C; | ||
1231 | goto fail; | ||
1232 | }; | ||
1233 | |||
1234 | if (mdev->state.role == R_PRIMARY && new_conf->want_lose) { | ||
1235 | retcode = ERR_DISCARD; | ||
1236 | goto fail; | ||
1237 | } | ||
1238 | |||
1239 | retcode = NO_ERROR; | ||
1240 | |||
1241 | new_my_addr = (struct sockaddr *)&new_conf->my_addr; | ||
1242 | new_peer_addr = (struct sockaddr *)&new_conf->peer_addr; | ||
1243 | for (i = 0; i < minor_count; i++) { | ||
1244 | odev = minor_to_mdev(i); | ||
1245 | if (!odev || odev == mdev) | ||
1246 | continue; | ||
1247 | if (get_net_conf(odev)) { | ||
1248 | taken_addr = (struct sockaddr *)&odev->net_conf->my_addr; | ||
1249 | if (new_conf->my_addr_len == odev->net_conf->my_addr_len && | ||
1250 | !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len)) | ||
1251 | retcode = ERR_LOCAL_ADDR; | ||
1252 | |||
1253 | taken_addr = (struct sockaddr *)&odev->net_conf->peer_addr; | ||
1254 | if (new_conf->peer_addr_len == odev->net_conf->peer_addr_len && | ||
1255 | !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len)) | ||
1256 | retcode = ERR_PEER_ADDR; | ||
1257 | |||
1258 | put_net_conf(odev); | ||
1259 | if (retcode != NO_ERROR) | ||
1260 | goto fail; | ||
1261 | } | ||
1262 | } | ||
1263 | |||
1264 | if (new_conf->cram_hmac_alg[0] != 0) { | ||
1265 | snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", | ||
1266 | new_conf->cram_hmac_alg); | ||
1267 | tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC); | ||
1268 | if (IS_ERR(tfm)) { | ||
1269 | tfm = NULL; | ||
1270 | retcode = ERR_AUTH_ALG; | ||
1271 | goto fail; | ||
1272 | } | ||
1273 | |||
1274 | if (crypto_tfm_alg_type(crypto_hash_tfm(tfm)) | ||
1275 | != CRYPTO_ALG_TYPE_HASH) { | ||
1276 | retcode = ERR_AUTH_ALG_ND; | ||
1277 | goto fail; | ||
1278 | } | ||
1279 | } | ||
1280 | |||
1281 | if (new_conf->integrity_alg[0]) { | ||
1282 | integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC); | ||
1283 | if (IS_ERR(integrity_w_tfm)) { | ||
1284 | integrity_w_tfm = NULL; | ||
1285 | retcode=ERR_INTEGRITY_ALG; | ||
1286 | goto fail; | ||
1287 | } | ||
1288 | |||
1289 | if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) { | ||
1290 | retcode=ERR_INTEGRITY_ALG_ND; | ||
1291 | goto fail; | ||
1292 | } | ||
1293 | |||
1294 | integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC); | ||
1295 | if (IS_ERR(integrity_r_tfm)) { | ||
1296 | integrity_r_tfm = NULL; | ||
1297 | retcode=ERR_INTEGRITY_ALG; | ||
1298 | goto fail; | ||
1299 | } | ||
1300 | } | ||
1301 | |||
1302 | ns = new_conf->max_epoch_size/8; | ||
1303 | if (mdev->tl_hash_s != ns) { | ||
1304 | new_tl_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL); | ||
1305 | if (!new_tl_hash) { | ||
1306 | retcode = ERR_NOMEM; | ||
1307 | goto fail; | ||
1308 | } | ||
1309 | } | ||
1310 | |||
1311 | ns = new_conf->max_buffers/8; | ||
1312 | if (new_conf->two_primaries && (mdev->ee_hash_s != ns)) { | ||
1313 | new_ee_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL); | ||
1314 | if (!new_ee_hash) { | ||
1315 | retcode = ERR_NOMEM; | ||
1316 | goto fail; | ||
1317 | } | ||
1318 | } | ||
1319 | |||
1320 | ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0; | ||
1321 | |||
1322 | if (integrity_w_tfm) { | ||
1323 | i = crypto_hash_digestsize(integrity_w_tfm); | ||
1324 | int_dig_out = kmalloc(i, GFP_KERNEL); | ||
1325 | if (!int_dig_out) { | ||
1326 | retcode = ERR_NOMEM; | ||
1327 | goto fail; | ||
1328 | } | ||
1329 | int_dig_in = kmalloc(i, GFP_KERNEL); | ||
1330 | if (!int_dig_in) { | ||
1331 | retcode = ERR_NOMEM; | ||
1332 | goto fail; | ||
1333 | } | ||
1334 | int_dig_vv = kmalloc(i, GFP_KERNEL); | ||
1335 | if (!int_dig_vv) { | ||
1336 | retcode = ERR_NOMEM; | ||
1337 | goto fail; | ||
1338 | } | ||
1339 | } | ||
1340 | |||
1341 | if (!mdev->bitmap) { | ||
1342 | if(drbd_bm_init(mdev)) { | ||
1343 | retcode = ERR_NOMEM; | ||
1344 | goto fail; | ||
1345 | } | ||
1346 | } | ||
1347 | |||
1348 | spin_lock_irq(&mdev->req_lock); | ||
1349 | if (mdev->net_conf != NULL) { | ||
1350 | retcode = ERR_NET_CONFIGURED; | ||
1351 | spin_unlock_irq(&mdev->req_lock); | ||
1352 | goto fail; | ||
1353 | } | ||
1354 | mdev->net_conf = new_conf; | ||
1355 | |||
1356 | mdev->send_cnt = 0; | ||
1357 | mdev->recv_cnt = 0; | ||
1358 | |||
1359 | if (new_tl_hash) { | ||
1360 | kfree(mdev->tl_hash); | ||
1361 | mdev->tl_hash_s = mdev->net_conf->max_epoch_size/8; | ||
1362 | mdev->tl_hash = new_tl_hash; | ||
1363 | } | ||
1364 | |||
1365 | if (new_ee_hash) { | ||
1366 | kfree(mdev->ee_hash); | ||
1367 | mdev->ee_hash_s = mdev->net_conf->max_buffers/8; | ||
1368 | mdev->ee_hash = new_ee_hash; | ||
1369 | } | ||
1370 | |||
1371 | crypto_free_hash(mdev->cram_hmac_tfm); | ||
1372 | mdev->cram_hmac_tfm = tfm; | ||
1373 | |||
1374 | crypto_free_hash(mdev->integrity_w_tfm); | ||
1375 | mdev->integrity_w_tfm = integrity_w_tfm; | ||
1376 | |||
1377 | crypto_free_hash(mdev->integrity_r_tfm); | ||
1378 | mdev->integrity_r_tfm = integrity_r_tfm; | ||
1379 | |||
1380 | kfree(mdev->int_dig_out); | ||
1381 | kfree(mdev->int_dig_in); | ||
1382 | kfree(mdev->int_dig_vv); | ||
1383 | mdev->int_dig_out=int_dig_out; | ||
1384 | mdev->int_dig_in=int_dig_in; | ||
1385 | mdev->int_dig_vv=int_dig_vv; | ||
1386 | spin_unlock_irq(&mdev->req_lock); | ||
1387 | |||
1388 | retcode = _drbd_request_state(mdev, NS(conn, C_UNCONNECTED), CS_VERBOSE); | ||
1389 | |||
1390 | kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); | ||
1391 | reply->ret_code = retcode; | ||
1392 | drbd_reconfig_done(mdev); | ||
1393 | return 0; | ||
1394 | |||
1395 | fail: | ||
1396 | kfree(int_dig_out); | ||
1397 | kfree(int_dig_in); | ||
1398 | kfree(int_dig_vv); | ||
1399 | crypto_free_hash(tfm); | ||
1400 | crypto_free_hash(integrity_w_tfm); | ||
1401 | crypto_free_hash(integrity_r_tfm); | ||
1402 | kfree(new_tl_hash); | ||
1403 | kfree(new_ee_hash); | ||
1404 | kfree(new_conf); | ||
1405 | |||
1406 | reply->ret_code = retcode; | ||
1407 | drbd_reconfig_done(mdev); | ||
1408 | return 0; | ||
1409 | } | ||
1410 | |||
1411 | static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | ||
1412 | struct drbd_nl_cfg_reply *reply) | ||
1413 | { | ||
1414 | int retcode; | ||
1415 | |||
1416 | retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED); | ||
1417 | |||
1418 | if (retcode == SS_NOTHING_TO_DO) | ||
1419 | goto done; | ||
1420 | else if (retcode == SS_ALREADY_STANDALONE) | ||
1421 | goto done; | ||
1422 | else if (retcode == SS_PRIMARY_NOP) { | ||
1423 | /* Our statche checking code wants to see the peer outdated. */ | ||
1424 | retcode = drbd_request_state(mdev, NS2(conn, C_DISCONNECTING, | ||
1425 | pdsk, D_OUTDATED)); | ||
1426 | } else if (retcode == SS_CW_FAILED_BY_PEER) { | ||
1427 | /* The peer probably wants to see us outdated. */ | ||
1428 | retcode = _drbd_request_state(mdev, NS2(conn, C_DISCONNECTING, | ||
1429 | disk, D_OUTDATED), | ||
1430 | CS_ORDERED); | ||
1431 | if (retcode == SS_IS_DISKLESS || retcode == SS_LOWER_THAN_OUTDATED) { | ||
1432 | drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); | ||
1433 | retcode = SS_SUCCESS; | ||
1434 | } | ||
1435 | } | ||
1436 | |||
1437 | if (retcode < SS_SUCCESS) | ||
1438 | goto fail; | ||
1439 | |||
1440 | if (wait_event_interruptible(mdev->state_wait, | ||
1441 | mdev->state.conn != C_DISCONNECTING)) { | ||
1442 | /* Do not test for mdev->state.conn == C_STANDALONE, since | ||
1443 | someone else might connect us in the mean time! */ | ||
1444 | retcode = ERR_INTR; | ||
1445 | goto fail; | ||
1446 | } | ||
1447 | |||
1448 | done: | ||
1449 | retcode = NO_ERROR; | ||
1450 | fail: | ||
1451 | drbd_md_sync(mdev); | ||
1452 | reply->ret_code = retcode; | ||
1453 | return 0; | ||
1454 | } | ||
1455 | |||
1456 | void resync_after_online_grow(struct drbd_conf *mdev) | ||
1457 | { | ||
1458 | int iass; /* I am sync source */ | ||
1459 | |||
1460 | dev_info(DEV, "Resync of new storage after online grow\n"); | ||
1461 | if (mdev->state.role != mdev->state.peer) | ||
1462 | iass = (mdev->state.role == R_PRIMARY); | ||
1463 | else | ||
1464 | iass = test_bit(DISCARD_CONCURRENT, &mdev->flags); | ||
1465 | |||
1466 | if (iass) | ||
1467 | drbd_start_resync(mdev, C_SYNC_SOURCE); | ||
1468 | else | ||
1469 | _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE); | ||
1470 | } | ||
1471 | |||
1472 | static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | ||
1473 | struct drbd_nl_cfg_reply *reply) | ||
1474 | { | ||
1475 | struct resize rs; | ||
1476 | int retcode = NO_ERROR; | ||
1477 | int ldsc = 0; /* local disk size changed */ | ||
1478 | enum determine_dev_size dd; | ||
1479 | |||
1480 | memset(&rs, 0, sizeof(struct resize)); | ||
1481 | if (!resize_from_tags(mdev, nlp->tag_list, &rs)) { | ||
1482 | retcode = ERR_MANDATORY_TAG; | ||
1483 | goto fail; | ||
1484 | } | ||
1485 | |||
1486 | if (mdev->state.conn > C_CONNECTED) { | ||
1487 | retcode = ERR_RESIZE_RESYNC; | ||
1488 | goto fail; | ||
1489 | } | ||
1490 | |||
1491 | if (mdev->state.role == R_SECONDARY && | ||
1492 | mdev->state.peer == R_SECONDARY) { | ||
1493 | retcode = ERR_NO_PRIMARY; | ||
1494 | goto fail; | ||
1495 | } | ||
1496 | |||
1497 | if (!get_ldev(mdev)) { | ||
1498 | retcode = ERR_NO_DISK; | ||
1499 | goto fail; | ||
1500 | } | ||
1501 | |||
1502 | if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) { | ||
1503 | mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev); | ||
1504 | ldsc = 1; | ||
1505 | } | ||
1506 | |||
1507 | mdev->ldev->dc.disk_size = (sector_t)rs.resize_size; | ||
1508 | dd = drbd_determin_dev_size(mdev); | ||
1509 | drbd_md_sync(mdev); | ||
1510 | put_ldev(mdev); | ||
1511 | if (dd == dev_size_error) { | ||
1512 | retcode = ERR_NOMEM_BITMAP; | ||
1513 | goto fail; | ||
1514 | } | ||
1515 | |||
1516 | if (mdev->state.conn == C_CONNECTED && (dd != unchanged || ldsc)) { | ||
1517 | if (dd == grew) | ||
1518 | set_bit(RESIZE_PENDING, &mdev->flags); | ||
1519 | |||
1520 | drbd_send_uuids(mdev); | ||
1521 | drbd_send_sizes(mdev, 1); | ||
1522 | } | ||
1523 | |||
1524 | fail: | ||
1525 | reply->ret_code = retcode; | ||
1526 | return 0; | ||
1527 | } | ||
1528 | |||
1529 | static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | ||
1530 | struct drbd_nl_cfg_reply *reply) | ||
1531 | { | ||
1532 | int retcode = NO_ERROR; | ||
1533 | int err; | ||
1534 | int ovr; /* online verify running */ | ||
1535 | int rsr; /* re-sync running */ | ||
1536 | struct crypto_hash *verify_tfm = NULL; | ||
1537 | struct crypto_hash *csums_tfm = NULL; | ||
1538 | struct syncer_conf sc; | ||
1539 | cpumask_var_t new_cpu_mask; | ||
1540 | |||
1541 | if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) { | ||
1542 | retcode = ERR_NOMEM; | ||
1543 | goto fail; | ||
1544 | } | ||
1545 | |||
1546 | if (nlp->flags & DRBD_NL_SET_DEFAULTS) { | ||
1547 | memset(&sc, 0, sizeof(struct syncer_conf)); | ||
1548 | sc.rate = DRBD_RATE_DEF; | ||
1549 | sc.after = DRBD_AFTER_DEF; | ||
1550 | sc.al_extents = DRBD_AL_EXTENTS_DEF; | ||
1551 | } else | ||
1552 | memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf)); | ||
1553 | |||
1554 | if (!syncer_conf_from_tags(mdev, nlp->tag_list, &sc)) { | ||
1555 | retcode = ERR_MANDATORY_TAG; | ||
1556 | goto fail; | ||
1557 | } | ||
1558 | |||
1559 | /* re-sync running */ | ||
1560 | rsr = ( mdev->state.conn == C_SYNC_SOURCE || | ||
1561 | mdev->state.conn == C_SYNC_TARGET || | ||
1562 | mdev->state.conn == C_PAUSED_SYNC_S || | ||
1563 | mdev->state.conn == C_PAUSED_SYNC_T ); | ||
1564 | |||
1565 | if (rsr && strcmp(sc.csums_alg, mdev->sync_conf.csums_alg)) { | ||
1566 | retcode = ERR_CSUMS_RESYNC_RUNNING; | ||
1567 | goto fail; | ||
1568 | } | ||
1569 | |||
1570 | if (!rsr && sc.csums_alg[0]) { | ||
1571 | csums_tfm = crypto_alloc_hash(sc.csums_alg, 0, CRYPTO_ALG_ASYNC); | ||
1572 | if (IS_ERR(csums_tfm)) { | ||
1573 | csums_tfm = NULL; | ||
1574 | retcode = ERR_CSUMS_ALG; | ||
1575 | goto fail; | ||
1576 | } | ||
1577 | |||
1578 | if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) { | ||
1579 | retcode = ERR_CSUMS_ALG_ND; | ||
1580 | goto fail; | ||
1581 | } | ||
1582 | } | ||
1583 | |||
1584 | /* online verify running */ | ||
1585 | ovr = (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T); | ||
1586 | |||
1587 | if (ovr) { | ||
1588 | if (strcmp(sc.verify_alg, mdev->sync_conf.verify_alg)) { | ||
1589 | retcode = ERR_VERIFY_RUNNING; | ||
1590 | goto fail; | ||
1591 | } | ||
1592 | } | ||
1593 | |||
1594 | if (!ovr && sc.verify_alg[0]) { | ||
1595 | verify_tfm = crypto_alloc_hash(sc.verify_alg, 0, CRYPTO_ALG_ASYNC); | ||
1596 | if (IS_ERR(verify_tfm)) { | ||
1597 | verify_tfm = NULL; | ||
1598 | retcode = ERR_VERIFY_ALG; | ||
1599 | goto fail; | ||
1600 | } | ||
1601 | |||
1602 | if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) { | ||
1603 | retcode = ERR_VERIFY_ALG_ND; | ||
1604 | goto fail; | ||
1605 | } | ||
1606 | } | ||
1607 | |||
1608 | /* silently ignore cpu mask on UP kernel */ | ||
1609 | if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) { | ||
1610 | err = __bitmap_parse(sc.cpu_mask, 32, 0, | ||
1611 | cpumask_bits(new_cpu_mask), nr_cpu_ids); | ||
1612 | if (err) { | ||
1613 | dev_warn(DEV, "__bitmap_parse() failed with %d\n", err); | ||
1614 | retcode = ERR_CPU_MASK_PARSE; | ||
1615 | goto fail; | ||
1616 | } | ||
1617 | } | ||
1618 | |||
1619 | ERR_IF (sc.rate < 1) sc.rate = 1; | ||
1620 | ERR_IF (sc.al_extents < 7) sc.al_extents = 127; /* arbitrary minimum */ | ||
1621 | #define AL_MAX ((MD_AL_MAX_SIZE-1) * AL_EXTENTS_PT) | ||
1622 | if (sc.al_extents > AL_MAX) { | ||
1623 | dev_err(DEV, "sc.al_extents > %d\n", AL_MAX); | ||
1624 | sc.al_extents = AL_MAX; | ||
1625 | } | ||
1626 | #undef AL_MAX | ||
1627 | |||
1628 | /* most sanity checks done, try to assign the new sync-after | ||
1629 | * dependency. need to hold the global lock in there, | ||
1630 | * to avoid a race in the dependency loop check. */ | ||
1631 | retcode = drbd_alter_sa(mdev, sc.after); | ||
1632 | if (retcode != NO_ERROR) | ||
1633 | goto fail; | ||
1634 | |||
1635 | /* ok, assign the rest of it as well. | ||
1636 | * lock against receive_SyncParam() */ | ||
1637 | spin_lock(&mdev->peer_seq_lock); | ||
1638 | mdev->sync_conf = sc; | ||
1639 | |||
1640 | if (!rsr) { | ||
1641 | crypto_free_hash(mdev->csums_tfm); | ||
1642 | mdev->csums_tfm = csums_tfm; | ||
1643 | csums_tfm = NULL; | ||
1644 | } | ||
1645 | |||
1646 | if (!ovr) { | ||
1647 | crypto_free_hash(mdev->verify_tfm); | ||
1648 | mdev->verify_tfm = verify_tfm; | ||
1649 | verify_tfm = NULL; | ||
1650 | } | ||
1651 | spin_unlock(&mdev->peer_seq_lock); | ||
1652 | |||
1653 | if (get_ldev(mdev)) { | ||
1654 | wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); | ||
1655 | drbd_al_shrink(mdev); | ||
1656 | err = drbd_check_al_size(mdev); | ||
1657 | lc_unlock(mdev->act_log); | ||
1658 | wake_up(&mdev->al_wait); | ||
1659 | |||
1660 | put_ldev(mdev); | ||
1661 | drbd_md_sync(mdev); | ||
1662 | |||
1663 | if (err) { | ||
1664 | retcode = ERR_NOMEM; | ||
1665 | goto fail; | ||
1666 | } | ||
1667 | } | ||
1668 | |||
1669 | if (mdev->state.conn >= C_CONNECTED) | ||
1670 | drbd_send_sync_param(mdev, &sc); | ||
1671 | |||
1672 | if (!cpumask_equal(mdev->cpu_mask, new_cpu_mask)) { | ||
1673 | cpumask_copy(mdev->cpu_mask, new_cpu_mask); | ||
1674 | drbd_calc_cpu_mask(mdev); | ||
1675 | mdev->receiver.reset_cpu_mask = 1; | ||
1676 | mdev->asender.reset_cpu_mask = 1; | ||
1677 | mdev->worker.reset_cpu_mask = 1; | ||
1678 | } | ||
1679 | |||
1680 | kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); | ||
1681 | fail: | ||
1682 | free_cpumask_var(new_cpu_mask); | ||
1683 | crypto_free_hash(csums_tfm); | ||
1684 | crypto_free_hash(verify_tfm); | ||
1685 | reply->ret_code = retcode; | ||
1686 | return 0; | ||
1687 | } | ||
1688 | |||
1689 | static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | ||
1690 | struct drbd_nl_cfg_reply *reply) | ||
1691 | { | ||
1692 | int retcode; | ||
1693 | |||
1694 | retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED); | ||
1695 | |||
1696 | if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION) | ||
1697 | retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T)); | ||
1698 | |||
1699 | while (retcode == SS_NEED_CONNECTION) { | ||
1700 | spin_lock_irq(&mdev->req_lock); | ||
1701 | if (mdev->state.conn < C_CONNECTED) | ||
1702 | retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL); | ||
1703 | spin_unlock_irq(&mdev->req_lock); | ||
1704 | |||
1705 | if (retcode != SS_NEED_CONNECTION) | ||
1706 | break; | ||
1707 | |||
1708 | retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T)); | ||
1709 | } | ||
1710 | |||
1711 | reply->ret_code = retcode; | ||
1712 | return 0; | ||
1713 | } | ||
1714 | |||
1715 | static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | ||
1716 | struct drbd_nl_cfg_reply *reply) | ||
1717 | { | ||
1718 | |||
1719 | reply->ret_code = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S)); | ||
1720 | |||
1721 | return 0; | ||
1722 | } | ||
1723 | |||
1724 | static int drbd_nl_pause_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | ||
1725 | struct drbd_nl_cfg_reply *reply) | ||
1726 | { | ||
1727 | int retcode = NO_ERROR; | ||
1728 | |||
1729 | if (drbd_request_state(mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO) | ||
1730 | retcode = ERR_PAUSE_IS_SET; | ||
1731 | |||
1732 | reply->ret_code = retcode; | ||
1733 | return 0; | ||
1734 | } | ||
1735 | |||
1736 | static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | ||
1737 | struct drbd_nl_cfg_reply *reply) | ||
1738 | { | ||
1739 | int retcode = NO_ERROR; | ||
1740 | |||
1741 | if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) | ||
1742 | retcode = ERR_PAUSE_IS_CLEAR; | ||
1743 | |||
1744 | reply->ret_code = retcode; | ||
1745 | return 0; | ||
1746 | } | ||
1747 | |||
1748 | static int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | ||
1749 | struct drbd_nl_cfg_reply *reply) | ||
1750 | { | ||
1751 | reply->ret_code = drbd_request_state(mdev, NS(susp, 1)); | ||
1752 | |||
1753 | return 0; | ||
1754 | } | ||
1755 | |||
1756 | static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | ||
1757 | struct drbd_nl_cfg_reply *reply) | ||
1758 | { | ||
1759 | reply->ret_code = drbd_request_state(mdev, NS(susp, 0)); | ||
1760 | return 0; | ||
1761 | } | ||
1762 | |||
1763 | static int drbd_nl_outdate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | ||
1764 | struct drbd_nl_cfg_reply *reply) | ||
1765 | { | ||
1766 | reply->ret_code = drbd_request_state(mdev, NS(disk, D_OUTDATED)); | ||
1767 | return 0; | ||
1768 | } | ||
1769 | |||
1770 | static int drbd_nl_get_config(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | ||
1771 | struct drbd_nl_cfg_reply *reply) | ||
1772 | { | ||
1773 | unsigned short *tl; | ||
1774 | |||
1775 | tl = reply->tag_list; | ||
1776 | |||
1777 | if (get_ldev(mdev)) { | ||
1778 | tl = disk_conf_to_tags(mdev, &mdev->ldev->dc, tl); | ||
1779 | put_ldev(mdev); | ||
1780 | } | ||
1781 | |||
1782 | if (get_net_conf(mdev)) { | ||
1783 | tl = net_conf_to_tags(mdev, mdev->net_conf, tl); | ||
1784 | put_net_conf(mdev); | ||
1785 | } | ||
1786 | tl = syncer_conf_to_tags(mdev, &mdev->sync_conf, tl); | ||
1787 | |||
1788 | put_unaligned(TT_END, tl++); /* Close the tag list */ | ||
1789 | |||
1790 | return (int)((char *)tl - (char *)reply->tag_list); | ||
1791 | } | ||
1792 | |||
1793 | static int drbd_nl_get_state(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | ||
1794 | struct drbd_nl_cfg_reply *reply) | ||
1795 | { | ||
1796 | unsigned short *tl = reply->tag_list; | ||
1797 | union drbd_state s = mdev->state; | ||
1798 | unsigned long rs_left; | ||
1799 | unsigned int res; | ||
1800 | |||
1801 | tl = get_state_to_tags(mdev, (struct get_state *)&s, tl); | ||
1802 | |||
1803 | /* no local ref, no bitmap, no syncer progress. */ | ||
1804 | if (s.conn >= C_SYNC_SOURCE && s.conn <= C_PAUSED_SYNC_T) { | ||
1805 | if (get_ldev(mdev)) { | ||
1806 | drbd_get_syncer_progress(mdev, &rs_left, &res); | ||
1807 | tl = tl_add_int(tl, T_sync_progress, &res); | ||
1808 | put_ldev(mdev); | ||
1809 | } | ||
1810 | } | ||
1811 | put_unaligned(TT_END, tl++); /* Close the tag list */ | ||
1812 | |||
1813 | return (int)((char *)tl - (char *)reply->tag_list); | ||
1814 | } | ||
1815 | |||
1816 | static int drbd_nl_get_uuids(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | ||
1817 | struct drbd_nl_cfg_reply *reply) | ||
1818 | { | ||
1819 | unsigned short *tl; | ||
1820 | |||
1821 | tl = reply->tag_list; | ||
1822 | |||
1823 | if (get_ldev(mdev)) { | ||
1824 | tl = tl_add_blob(tl, T_uuids, mdev->ldev->md.uuid, UI_SIZE*sizeof(u64)); | ||
1825 | tl = tl_add_int(tl, T_uuids_flags, &mdev->ldev->md.flags); | ||
1826 | put_ldev(mdev); | ||
1827 | } | ||
1828 | put_unaligned(TT_END, tl++); /* Close the tag list */ | ||
1829 | |||
1830 | return (int)((char *)tl - (char *)reply->tag_list); | ||
1831 | } | ||
1832 | |||
1833 | /** | ||
1834 | * drbd_nl_get_timeout_flag() - Used by drbdsetup to find out which timeout value to use | ||
1835 | * @mdev: DRBD device. | ||
1836 | * @nlp: Netlink/connector packet from drbdsetup | ||
1837 | * @reply: Reply packet for drbdsetup | ||
1838 | */ | ||
1839 | static int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | ||
1840 | struct drbd_nl_cfg_reply *reply) | ||
1841 | { | ||
1842 | unsigned short *tl; | ||
1843 | char rv; | ||
1844 | |||
1845 | tl = reply->tag_list; | ||
1846 | |||
1847 | rv = mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED : | ||
1848 | test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_DEGRADED : UT_DEFAULT; | ||
1849 | |||
1850 | tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv)); | ||
1851 | put_unaligned(TT_END, tl++); /* Close the tag list */ | ||
1852 | |||
1853 | return (int)((char *)tl - (char *)reply->tag_list); | ||
1854 | } | ||
1855 | |||
1856 | static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | ||
1857 | struct drbd_nl_cfg_reply *reply) | ||
1858 | { | ||
1859 | /* default to resume from last known position, if possible */ | ||
1860 | struct start_ov args = | ||
1861 | { .start_sector = mdev->ov_start_sector }; | ||
1862 | |||
1863 | if (!start_ov_from_tags(mdev, nlp->tag_list, &args)) { | ||
1864 | reply->ret_code = ERR_MANDATORY_TAG; | ||
1865 | return 0; | ||
1866 | } | ||
1867 | /* w_make_ov_request expects position to be aligned */ | ||
1868 | mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT; | ||
1869 | reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S)); | ||
1870 | return 0; | ||
1871 | } | ||
1872 | |||
1873 | |||
1874 | static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | ||
1875 | struct drbd_nl_cfg_reply *reply) | ||
1876 | { | ||
1877 | int retcode = NO_ERROR; | ||
1878 | int skip_initial_sync = 0; | ||
1879 | int err; | ||
1880 | |||
1881 | struct new_c_uuid args; | ||
1882 | |||
1883 | memset(&args, 0, sizeof(struct new_c_uuid)); | ||
1884 | if (!new_c_uuid_from_tags(mdev, nlp->tag_list, &args)) { | ||
1885 | reply->ret_code = ERR_MANDATORY_TAG; | ||
1886 | return 0; | ||
1887 | } | ||
1888 | |||
1889 | mutex_lock(&mdev->state_mutex); /* Protects us against serialized state changes. */ | ||
1890 | |||
1891 | if (!get_ldev(mdev)) { | ||
1892 | retcode = ERR_NO_DISK; | ||
1893 | goto out; | ||
1894 | } | ||
1895 | |||
1896 | /* this is "skip initial sync", assume to be clean */ | ||
1897 | if (mdev->state.conn == C_CONNECTED && mdev->agreed_pro_version >= 90 && | ||
1898 | mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) { | ||
1899 | dev_info(DEV, "Preparing to skip initial sync\n"); | ||
1900 | skip_initial_sync = 1; | ||
1901 | } else if (mdev->state.conn != C_STANDALONE) { | ||
1902 | retcode = ERR_CONNECTED; | ||
1903 | goto out_dec; | ||
1904 | } | ||
1905 | |||
1906 | drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */ | ||
1907 | drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */ | ||
1908 | |||
1909 | if (args.clear_bm) { | ||
1910 | err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, "clear_n_write from new_c_uuid"); | ||
1911 | if (err) { | ||
1912 | dev_err(DEV, "Writing bitmap failed with %d\n",err); | ||
1913 | retcode = ERR_IO_MD_DISK; | ||
1914 | } | ||
1915 | if (skip_initial_sync) { | ||
1916 | drbd_send_uuids_skip_initial_sync(mdev); | ||
1917 | _drbd_uuid_set(mdev, UI_BITMAP, 0); | ||
1918 | spin_lock_irq(&mdev->req_lock); | ||
1919 | _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), | ||
1920 | CS_VERBOSE, NULL); | ||
1921 | spin_unlock_irq(&mdev->req_lock); | ||
1922 | } | ||
1923 | } | ||
1924 | |||
1925 | drbd_md_sync(mdev); | ||
1926 | out_dec: | ||
1927 | put_ldev(mdev); | ||
1928 | out: | ||
1929 | mutex_unlock(&mdev->state_mutex); | ||
1930 | |||
1931 | reply->ret_code = retcode; | ||
1932 | return 0; | ||
1933 | } | ||
1934 | |||
1935 | static struct drbd_conf *ensure_mdev(struct drbd_nl_cfg_req *nlp) | ||
1936 | { | ||
1937 | struct drbd_conf *mdev; | ||
1938 | |||
1939 | if (nlp->drbd_minor >= minor_count) | ||
1940 | return NULL; | ||
1941 | |||
1942 | mdev = minor_to_mdev(nlp->drbd_minor); | ||
1943 | |||
1944 | if (!mdev && (nlp->flags & DRBD_NL_CREATE_DEVICE)) { | ||
1945 | struct gendisk *disk = NULL; | ||
1946 | mdev = drbd_new_device(nlp->drbd_minor); | ||
1947 | |||
1948 | spin_lock_irq(&drbd_pp_lock); | ||
1949 | if (minor_table[nlp->drbd_minor] == NULL) { | ||
1950 | minor_table[nlp->drbd_minor] = mdev; | ||
1951 | disk = mdev->vdisk; | ||
1952 | mdev = NULL; | ||
1953 | } /* else: we lost the race */ | ||
1954 | spin_unlock_irq(&drbd_pp_lock); | ||
1955 | |||
1956 | if (disk) /* we won the race above */ | ||
1957 | /* in case we ever add a drbd_delete_device(), | ||
1958 | * don't forget the del_gendisk! */ | ||
1959 | add_disk(disk); | ||
1960 | else /* we lost the race above */ | ||
1961 | drbd_free_mdev(mdev); | ||
1962 | |||
1963 | mdev = minor_to_mdev(nlp->drbd_minor); | ||
1964 | } | ||
1965 | |||
1966 | return mdev; | ||
1967 | } | ||
1968 | |||
1969 | struct cn_handler_struct { | ||
1970 | int (*function)(struct drbd_conf *, | ||
1971 | struct drbd_nl_cfg_req *, | ||
1972 | struct drbd_nl_cfg_reply *); | ||
1973 | int reply_body_size; | ||
1974 | }; | ||
1975 | |||
1976 | static struct cn_handler_struct cnd_table[] = { | ||
1977 | [ P_primary ] = { &drbd_nl_primary, 0 }, | ||
1978 | [ P_secondary ] = { &drbd_nl_secondary, 0 }, | ||
1979 | [ P_disk_conf ] = { &drbd_nl_disk_conf, 0 }, | ||
1980 | [ P_detach ] = { &drbd_nl_detach, 0 }, | ||
1981 | [ P_net_conf ] = { &drbd_nl_net_conf, 0 }, | ||
1982 | [ P_disconnect ] = { &drbd_nl_disconnect, 0 }, | ||
1983 | [ P_resize ] = { &drbd_nl_resize, 0 }, | ||
1984 | [ P_syncer_conf ] = { &drbd_nl_syncer_conf, 0 }, | ||
1985 | [ P_invalidate ] = { &drbd_nl_invalidate, 0 }, | ||
1986 | [ P_invalidate_peer ] = { &drbd_nl_invalidate_peer, 0 }, | ||
1987 | [ P_pause_sync ] = { &drbd_nl_pause_sync, 0 }, | ||
1988 | [ P_resume_sync ] = { &drbd_nl_resume_sync, 0 }, | ||
1989 | [ P_suspend_io ] = { &drbd_nl_suspend_io, 0 }, | ||
1990 | [ P_resume_io ] = { &drbd_nl_resume_io, 0 }, | ||
1991 | [ P_outdate ] = { &drbd_nl_outdate, 0 }, | ||
1992 | [ P_get_config ] = { &drbd_nl_get_config, | ||
1993 | sizeof(struct syncer_conf_tag_len_struct) + | ||
1994 | sizeof(struct disk_conf_tag_len_struct) + | ||
1995 | sizeof(struct net_conf_tag_len_struct) }, | ||
1996 | [ P_get_state ] = { &drbd_nl_get_state, | ||
1997 | sizeof(struct get_state_tag_len_struct) + | ||
1998 | sizeof(struct sync_progress_tag_len_struct) }, | ||
1999 | [ P_get_uuids ] = { &drbd_nl_get_uuids, | ||
2000 | sizeof(struct get_uuids_tag_len_struct) }, | ||
2001 | [ P_get_timeout_flag ] = { &drbd_nl_get_timeout_flag, | ||
2002 | sizeof(struct get_timeout_flag_tag_len_struct)}, | ||
2003 | [ P_start_ov ] = { &drbd_nl_start_ov, 0 }, | ||
2004 | [ P_new_c_uuid ] = { &drbd_nl_new_c_uuid, 0 }, | ||
2005 | }; | ||
2006 | |||
2007 | static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms *nsp) | ||
2008 | { | ||
2009 | struct drbd_nl_cfg_req *nlp = (struct drbd_nl_cfg_req *)req->data; | ||
2010 | struct cn_handler_struct *cm; | ||
2011 | struct cn_msg *cn_reply; | ||
2012 | struct drbd_nl_cfg_reply *reply; | ||
2013 | struct drbd_conf *mdev; | ||
2014 | int retcode, rr; | ||
2015 | int reply_size = sizeof(struct cn_msg) | ||
2016 | + sizeof(struct drbd_nl_cfg_reply) | ||
2017 | + sizeof(short int); | ||
2018 | |||
2019 | if (!try_module_get(THIS_MODULE)) { | ||
2020 | printk(KERN_ERR "drbd: try_module_get() failed!\n"); | ||
2021 | return; | ||
2022 | } | ||
2023 | |||
2024 | if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) { | ||
2025 | retcode = ERR_PERM; | ||
2026 | goto fail; | ||
2027 | } | ||
2028 | |||
2029 | mdev = ensure_mdev(nlp); | ||
2030 | if (!mdev) { | ||
2031 | retcode = ERR_MINOR_INVALID; | ||
2032 | goto fail; | ||
2033 | } | ||
2034 | |||
2035 | if (nlp->packet_type >= P_nl_after_last_packet) { | ||
2036 | retcode = ERR_PACKET_NR; | ||
2037 | goto fail; | ||
2038 | } | ||
2039 | |||
2040 | cm = cnd_table + nlp->packet_type; | ||
2041 | |||
2042 | /* This may happen if packet number is 0: */ | ||
2043 | if (cm->function == NULL) { | ||
2044 | retcode = ERR_PACKET_NR; | ||
2045 | goto fail; | ||
2046 | } | ||
2047 | |||
2048 | reply_size += cm->reply_body_size; | ||
2049 | |||
2050 | /* allocation not in the IO path, cqueue thread context */ | ||
2051 | cn_reply = kmalloc(reply_size, GFP_KERNEL); | ||
2052 | if (!cn_reply) { | ||
2053 | retcode = ERR_NOMEM; | ||
2054 | goto fail; | ||
2055 | } | ||
2056 | reply = (struct drbd_nl_cfg_reply *) cn_reply->data; | ||
2057 | |||
2058 | reply->packet_type = | ||
2059 | cm->reply_body_size ? nlp->packet_type : P_nl_after_last_packet; | ||
2060 | reply->minor = nlp->drbd_minor; | ||
2061 | reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */ | ||
2062 | /* reply->tag_list; might be modified by cm->function. */ | ||
2063 | |||
2064 | rr = cm->function(mdev, nlp, reply); | ||
2065 | |||
2066 | cn_reply->id = req->id; | ||
2067 | cn_reply->seq = req->seq; | ||
2068 | cn_reply->ack = req->ack + 1; | ||
2069 | cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + rr; | ||
2070 | cn_reply->flags = 0; | ||
2071 | |||
2072 | rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL); | ||
2073 | if (rr && rr != -ESRCH) | ||
2074 | printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr); | ||
2075 | |||
2076 | kfree(cn_reply); | ||
2077 | module_put(THIS_MODULE); | ||
2078 | return; | ||
2079 | fail: | ||
2080 | drbd_nl_send_reply(req, retcode); | ||
2081 | module_put(THIS_MODULE); | ||
2082 | } | ||
2083 | |||
2084 | static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */ | ||
2085 | |||
2086 | static unsigned short * | ||
2087 | __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, | ||
2088 | unsigned short len, int nul_terminated) | ||
2089 | { | ||
2090 | unsigned short l = tag_descriptions[tag_number(tag)].max_len; | ||
2091 | len = (len < l) ? len : l; | ||
2092 | put_unaligned(tag, tl++); | ||
2093 | put_unaligned(len, tl++); | ||
2094 | memcpy(tl, data, len); | ||
2095 | tl = (unsigned short*)((char*)tl + len); | ||
2096 | if (nul_terminated) | ||
2097 | *((char*)tl - 1) = 0; | ||
2098 | return tl; | ||
2099 | } | ||
2100 | |||
2101 | static unsigned short * | ||
2102 | tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, int len) | ||
2103 | { | ||
2104 | return __tl_add_blob(tl, tag, data, len, 0); | ||
2105 | } | ||
2106 | |||
2107 | static unsigned short * | ||
2108 | tl_add_str(unsigned short *tl, enum drbd_tags tag, const char *str) | ||
2109 | { | ||
2110 | return __tl_add_blob(tl, tag, str, strlen(str)+1, 0); | ||
2111 | } | ||
2112 | |||
2113 | static unsigned short * | ||
2114 | tl_add_int(unsigned short *tl, enum drbd_tags tag, const void *val) | ||
2115 | { | ||
2116 | put_unaligned(tag, tl++); | ||
2117 | switch(tag_type(tag)) { | ||
2118 | case TT_INTEGER: | ||
2119 | put_unaligned(sizeof(int), tl++); | ||
2120 | put_unaligned(*(int *)val, (int *)tl); | ||
2121 | tl = (unsigned short*)((char*)tl+sizeof(int)); | ||
2122 | break; | ||
2123 | case TT_INT64: | ||
2124 | put_unaligned(sizeof(u64), tl++); | ||
2125 | put_unaligned(*(u64 *)val, (u64 *)tl); | ||
2126 | tl = (unsigned short*)((char*)tl+sizeof(u64)); | ||
2127 | break; | ||
2128 | default: | ||
2129 | /* someone did something stupid. */ | ||
2130 | ; | ||
2131 | } | ||
2132 | return tl; | ||
2133 | } | ||
2134 | |||
2135 | void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state) | ||
2136 | { | ||
2137 | char buffer[sizeof(struct cn_msg)+ | ||
2138 | sizeof(struct drbd_nl_cfg_reply)+ | ||
2139 | sizeof(struct get_state_tag_len_struct)+ | ||
2140 | sizeof(short int)]; | ||
2141 | struct cn_msg *cn_reply = (struct cn_msg *) buffer; | ||
2142 | struct drbd_nl_cfg_reply *reply = | ||
2143 | (struct drbd_nl_cfg_reply *)cn_reply->data; | ||
2144 | unsigned short *tl = reply->tag_list; | ||
2145 | |||
2146 | /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */ | ||
2147 | |||
2148 | tl = get_state_to_tags(mdev, (struct get_state *)&state, tl); | ||
2149 | |||
2150 | put_unaligned(TT_END, tl++); /* Close the tag list */ | ||
2151 | |||
2152 | cn_reply->id.idx = CN_IDX_DRBD; | ||
2153 | cn_reply->id.val = CN_VAL_DRBD; | ||
2154 | |||
2155 | cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); | ||
2156 | cn_reply->ack = 0; /* not used here. */ | ||
2157 | cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + | ||
2158 | (int)((char *)tl - (char *)reply->tag_list); | ||
2159 | cn_reply->flags = 0; | ||
2160 | |||
2161 | reply->packet_type = P_get_state; | ||
2162 | reply->minor = mdev_to_minor(mdev); | ||
2163 | reply->ret_code = NO_ERROR; | ||
2164 | |||
2165 | cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); | ||
2166 | } | ||
2167 | |||
2168 | void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name) | ||
2169 | { | ||
2170 | char buffer[sizeof(struct cn_msg)+ | ||
2171 | sizeof(struct drbd_nl_cfg_reply)+ | ||
2172 | sizeof(struct call_helper_tag_len_struct)+ | ||
2173 | sizeof(short int)]; | ||
2174 | struct cn_msg *cn_reply = (struct cn_msg *) buffer; | ||
2175 | struct drbd_nl_cfg_reply *reply = | ||
2176 | (struct drbd_nl_cfg_reply *)cn_reply->data; | ||
2177 | unsigned short *tl = reply->tag_list; | ||
2178 | |||
2179 | /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */ | ||
2180 | |||
2181 | tl = tl_add_str(tl, T_helper, helper_name); | ||
2182 | put_unaligned(TT_END, tl++); /* Close the tag list */ | ||
2183 | |||
2184 | cn_reply->id.idx = CN_IDX_DRBD; | ||
2185 | cn_reply->id.val = CN_VAL_DRBD; | ||
2186 | |||
2187 | cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); | ||
2188 | cn_reply->ack = 0; /* not used here. */ | ||
2189 | cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + | ||
2190 | (int)((char *)tl - (char *)reply->tag_list); | ||
2191 | cn_reply->flags = 0; | ||
2192 | |||
2193 | reply->packet_type = P_call_helper; | ||
2194 | reply->minor = mdev_to_minor(mdev); | ||
2195 | reply->ret_code = NO_ERROR; | ||
2196 | |||
2197 | cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); | ||
2198 | } | ||
2199 | |||
2200 | void drbd_bcast_ee(struct drbd_conf *mdev, | ||
2201 | const char *reason, const int dgs, | ||
2202 | const char* seen_hash, const char* calc_hash, | ||
2203 | const struct drbd_epoch_entry* e) | ||
2204 | { | ||
2205 | struct cn_msg *cn_reply; | ||
2206 | struct drbd_nl_cfg_reply *reply; | ||
2207 | struct bio_vec *bvec; | ||
2208 | unsigned short *tl; | ||
2209 | int i; | ||
2210 | |||
2211 | if (!e) | ||
2212 | return; | ||
2213 | if (!reason || !reason[0]) | ||
2214 | return; | ||
2215 | |||
2216 | /* apparently we have to memcpy twice, first to prepare the data for the | ||
2217 | * struct cn_msg, then within cn_netlink_send from the cn_msg to the | ||
2218 | * netlink skb. */ | ||
2219 | /* receiver thread context, which is not in the writeout path (of this node), | ||
2220 | * but may be in the writeout path of the _other_ node. | ||
2221 | * GFP_NOIO to avoid potential "distributed deadlock". */ | ||
2222 | cn_reply = kmalloc( | ||
2223 | sizeof(struct cn_msg)+ | ||
2224 | sizeof(struct drbd_nl_cfg_reply)+ | ||
2225 | sizeof(struct dump_ee_tag_len_struct)+ | ||
2226 | sizeof(short int), | ||
2227 | GFP_NOIO); | ||
2228 | |||
2229 | if (!cn_reply) { | ||
2230 | dev_err(DEV, "could not kmalloc buffer for drbd_bcast_ee, sector %llu, size %u\n", | ||
2231 | (unsigned long long)e->sector, e->size); | ||
2232 | return; | ||
2233 | } | ||
2234 | |||
2235 | reply = (struct drbd_nl_cfg_reply*)cn_reply->data; | ||
2236 | tl = reply->tag_list; | ||
2237 | |||
2238 | tl = tl_add_str(tl, T_dump_ee_reason, reason); | ||
2239 | tl = tl_add_blob(tl, T_seen_digest, seen_hash, dgs); | ||
2240 | tl = tl_add_blob(tl, T_calc_digest, calc_hash, dgs); | ||
2241 | tl = tl_add_int(tl, T_ee_sector, &e->sector); | ||
2242 | tl = tl_add_int(tl, T_ee_block_id, &e->block_id); | ||
2243 | |||
2244 | put_unaligned(T_ee_data, tl++); | ||
2245 | put_unaligned(e->size, tl++); | ||
2246 | |||
2247 | __bio_for_each_segment(bvec, e->private_bio, i, 0) { | ||
2248 | void *d = kmap(bvec->bv_page); | ||
2249 | memcpy(tl, d + bvec->bv_offset, bvec->bv_len); | ||
2250 | kunmap(bvec->bv_page); | ||
2251 | tl=(unsigned short*)((char*)tl + bvec->bv_len); | ||
2252 | } | ||
2253 | put_unaligned(TT_END, tl++); /* Close the tag list */ | ||
2254 | |||
2255 | cn_reply->id.idx = CN_IDX_DRBD; | ||
2256 | cn_reply->id.val = CN_VAL_DRBD; | ||
2257 | |||
2258 | cn_reply->seq = atomic_add_return(1,&drbd_nl_seq); | ||
2259 | cn_reply->ack = 0; // not used here. | ||
2260 | cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + | ||
2261 | (int)((char*)tl - (char*)reply->tag_list); | ||
2262 | cn_reply->flags = 0; | ||
2263 | |||
2264 | reply->packet_type = P_dump_ee; | ||
2265 | reply->minor = mdev_to_minor(mdev); | ||
2266 | reply->ret_code = NO_ERROR; | ||
2267 | |||
2268 | cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); | ||
2269 | kfree(cn_reply); | ||
2270 | } | ||
2271 | |||
2272 | void drbd_bcast_sync_progress(struct drbd_conf *mdev) | ||
2273 | { | ||
2274 | char buffer[sizeof(struct cn_msg)+ | ||
2275 | sizeof(struct drbd_nl_cfg_reply)+ | ||
2276 | sizeof(struct sync_progress_tag_len_struct)+ | ||
2277 | sizeof(short int)]; | ||
2278 | struct cn_msg *cn_reply = (struct cn_msg *) buffer; | ||
2279 | struct drbd_nl_cfg_reply *reply = | ||
2280 | (struct drbd_nl_cfg_reply *)cn_reply->data; | ||
2281 | unsigned short *tl = reply->tag_list; | ||
2282 | unsigned long rs_left; | ||
2283 | unsigned int res; | ||
2284 | |||
2285 | /* no local ref, no bitmap, no syncer progress, no broadcast. */ | ||
2286 | if (!get_ldev(mdev)) | ||
2287 | return; | ||
2288 | drbd_get_syncer_progress(mdev, &rs_left, &res); | ||
2289 | put_ldev(mdev); | ||
2290 | |||
2291 | tl = tl_add_int(tl, T_sync_progress, &res); | ||
2292 | put_unaligned(TT_END, tl++); /* Close the tag list */ | ||
2293 | |||
2294 | cn_reply->id.idx = CN_IDX_DRBD; | ||
2295 | cn_reply->id.val = CN_VAL_DRBD; | ||
2296 | |||
2297 | cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); | ||
2298 | cn_reply->ack = 0; /* not used here. */ | ||
2299 | cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + | ||
2300 | (int)((char *)tl - (char *)reply->tag_list); | ||
2301 | cn_reply->flags = 0; | ||
2302 | |||
2303 | reply->packet_type = P_sync_progress; | ||
2304 | reply->minor = mdev_to_minor(mdev); | ||
2305 | reply->ret_code = NO_ERROR; | ||
2306 | |||
2307 | cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); | ||
2308 | } | ||
2309 | |||
2310 | int __init drbd_nl_init(void) | ||
2311 | { | ||
2312 | static struct cb_id cn_id_drbd; | ||
2313 | int err, try=10; | ||
2314 | |||
2315 | cn_id_drbd.val = CN_VAL_DRBD; | ||
2316 | do { | ||
2317 | cn_id_drbd.idx = cn_idx; | ||
2318 | err = cn_add_callback(&cn_id_drbd, "cn_drbd", &drbd_connector_callback); | ||
2319 | if (!err) | ||
2320 | break; | ||
2321 | cn_idx = (cn_idx + CN_IDX_STEP); | ||
2322 | } while (try--); | ||
2323 | |||
2324 | if (err) { | ||
2325 | printk(KERN_ERR "drbd: cn_drbd failed to register\n"); | ||
2326 | return err; | ||
2327 | } | ||
2328 | |||
2329 | return 0; | ||
2330 | } | ||
2331 | |||
2332 | void drbd_nl_cleanup(void) | ||
2333 | { | ||
2334 | static struct cb_id cn_id_drbd; | ||
2335 | |||
2336 | cn_id_drbd.idx = cn_idx; | ||
2337 | cn_id_drbd.val = CN_VAL_DRBD; | ||
2338 | |||
2339 | cn_del_callback(&cn_id_drbd); | ||
2340 | } | ||
2341 | |||
2342 | void drbd_nl_send_reply(struct cn_msg *req, int ret_code) | ||
2343 | { | ||
2344 | char buffer[sizeof(struct cn_msg)+sizeof(struct drbd_nl_cfg_reply)]; | ||
2345 | struct cn_msg *cn_reply = (struct cn_msg *) buffer; | ||
2346 | struct drbd_nl_cfg_reply *reply = | ||
2347 | (struct drbd_nl_cfg_reply *)cn_reply->data; | ||
2348 | int rr; | ||
2349 | |||
2350 | cn_reply->id = req->id; | ||
2351 | |||
2352 | cn_reply->seq = req->seq; | ||
2353 | cn_reply->ack = req->ack + 1; | ||
2354 | cn_reply->len = sizeof(struct drbd_nl_cfg_reply); | ||
2355 | cn_reply->flags = 0; | ||
2356 | |||
2357 | reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor; | ||
2358 | reply->ret_code = ret_code; | ||
2359 | |||
2360 | rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); | ||
2361 | if (rr && rr != -ESRCH) | ||
2362 | printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr); | ||
2363 | } | ||
2364 | |||