diff options
-rw-r--r-- | drivers/block/drbd/drbd_actlog.c | 3 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_int.h | 36 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_main.c | 27 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_nl.c | 1536 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_state.c | 7 | ||||
-rw-r--r-- | include/linux/drbd.h | 35 | ||||
-rw-r--r-- | include/linux/genl_magic_func.h | 2 |
7 files changed, 806 insertions, 840 deletions
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index 7cd78617669b..c1a90616776b 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c | |||
@@ -702,6 +702,7 @@ static int w_update_odbm(struct drbd_work *w, int unused) | |||
702 | { | 702 | { |
703 | struct update_odbm_work *udw = container_of(w, struct update_odbm_work, w); | 703 | struct update_odbm_work *udw = container_of(w, struct update_odbm_work, w); |
704 | struct drbd_conf *mdev = w->mdev; | 704 | struct drbd_conf *mdev = w->mdev; |
705 | struct sib_info sib = { .sib_reason = SIB_SYNC_PROGRESS, }; | ||
705 | 706 | ||
706 | if (!get_ldev(mdev)) { | 707 | if (!get_ldev(mdev)) { |
707 | if (__ratelimit(&drbd_ratelimit_state)) | 708 | if (__ratelimit(&drbd_ratelimit_state)) |
@@ -725,7 +726,7 @@ static int w_update_odbm(struct drbd_work *w, int unused) | |||
725 | break; | 726 | break; |
726 | } | 727 | } |
727 | } | 728 | } |
728 | drbd_bcast_sync_progress(mdev); | 729 | drbd_bcast_event(mdev, &sib); |
729 | 730 | ||
730 | return 1; | 731 | return 1; |
731 | } | 732 | } |
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index e68758344647..429fd8da6b71 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <net/tcp.h> | 44 | #include <net/tcp.h> |
45 | #include <linux/lru_cache.h> | 45 | #include <linux/lru_cache.h> |
46 | #include <linux/prefetch.h> | 46 | #include <linux/prefetch.h> |
47 | #include <linux/drbd_genl_api.h> | ||
47 | #include <linux/drbd.h> | 48 | #include <linux/drbd.h> |
48 | #include "drbd_state.h" | 49 | #include "drbd_state.h" |
49 | 50 | ||
@@ -65,7 +66,6 @@ | |||
65 | extern unsigned int minor_count; | 66 | extern unsigned int minor_count; |
66 | extern int disable_sendpage; | 67 | extern int disable_sendpage; |
67 | extern int allow_oos; | 68 | extern int allow_oos; |
68 | extern unsigned int cn_idx; | ||
69 | 69 | ||
70 | #ifdef CONFIG_DRBD_FAULT_INJECTION | 70 | #ifdef CONFIG_DRBD_FAULT_INJECTION |
71 | extern int enable_faults; | 71 | extern int enable_faults; |
@@ -865,14 +865,6 @@ struct drbd_md { | |||
865 | */ | 865 | */ |
866 | }; | 866 | }; |
867 | 867 | ||
868 | /* for sync_conf and other types... */ | ||
869 | #define NL_PACKET(name, number, fields) struct name { fields }; | ||
870 | #define NL_INTEGER(pn,pr,member) int member; | ||
871 | #define NL_INT64(pn,pr,member) __u64 member; | ||
872 | #define NL_BIT(pn,pr,member) unsigned member:1; | ||
873 | #define NL_STRING(pn,pr,member,len) unsigned char member[len]; int member ## _len; | ||
874 | #include "linux/drbd_nl.h" | ||
875 | |||
876 | struct drbd_backing_dev { | 868 | struct drbd_backing_dev { |
877 | struct block_device *backing_bdev; | 869 | struct block_device *backing_bdev; |
878 | struct block_device *md_bdev; | 870 | struct block_device *md_bdev; |
@@ -1502,7 +1494,7 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, | |||
1502 | extern void drbd_free_mdev(struct drbd_conf *mdev); | 1494 | extern void drbd_free_mdev(struct drbd_conf *mdev); |
1503 | extern void drbd_delete_device(unsigned int minor); | 1495 | extern void drbd_delete_device(unsigned int minor); |
1504 | 1496 | ||
1505 | struct drbd_tconn *drbd_new_tconn(char *name); | 1497 | struct drbd_tconn *drbd_new_tconn(const char *name); |
1506 | extern void drbd_free_tconn(struct drbd_tconn *tconn); | 1498 | extern void drbd_free_tconn(struct drbd_tconn *tconn); |
1507 | struct drbd_tconn *conn_by_name(const char *name); | 1499 | struct drbd_tconn *conn_by_name(const char *name); |
1508 | 1500 | ||
@@ -1679,16 +1671,22 @@ extern int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, | |||
1679 | extern void drbd_al_apply_to_bm(struct drbd_conf *mdev); | 1671 | extern void drbd_al_apply_to_bm(struct drbd_conf *mdev); |
1680 | extern void drbd_al_shrink(struct drbd_conf *mdev); | 1672 | extern void drbd_al_shrink(struct drbd_conf *mdev); |
1681 | 1673 | ||
1682 | |||
1683 | /* drbd_nl.c */ | 1674 | /* drbd_nl.c */ |
1684 | 1675 | /* state info broadcast */ | |
1685 | void drbd_nl_cleanup(void); | 1676 | struct sib_info { |
1686 | int __init drbd_nl_init(void); | 1677 | enum drbd_state_info_bcast_reason sib_reason; |
1687 | void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state); | 1678 | union { |
1688 | void drbd_bcast_sync_progress(struct drbd_conf *mdev); | 1679 | struct { |
1689 | void drbd_bcast_ee(struct drbd_conf *, const char *, const int, const char *, | 1680 | char *helper_name; |
1690 | const char *, const struct drbd_peer_request *); | 1681 | unsigned helper_exit_code; |
1691 | 1682 | }; | |
1683 | struct { | ||
1684 | union drbd_state os; | ||
1685 | union drbd_state ns; | ||
1686 | }; | ||
1687 | }; | ||
1688 | }; | ||
1689 | void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib); | ||
1692 | 1690 | ||
1693 | /* | 1691 | /* |
1694 | * inline helper functions | 1692 | * inline helper functions |
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 9f6db5947c65..9697ab872098 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c | |||
@@ -86,7 +86,6 @@ MODULE_PARM_DESC(allow_oos, "DONT USE!"); | |||
86 | module_param(minor_count, uint, 0444); | 86 | module_param(minor_count, uint, 0444); |
87 | module_param(disable_sendpage, bool, 0644); | 87 | module_param(disable_sendpage, bool, 0644); |
88 | module_param(allow_oos, bool, 0); | 88 | module_param(allow_oos, bool, 0); |
89 | module_param(cn_idx, uint, 0444); | ||
90 | module_param(proc_details, int, 0644); | 89 | module_param(proc_details, int, 0644); |
91 | 90 | ||
92 | #ifdef CONFIG_DRBD_FAULT_INJECTION | 91 | #ifdef CONFIG_DRBD_FAULT_INJECTION |
@@ -108,7 +107,6 @@ module_param(fault_devs, int, 0644); | |||
108 | unsigned int minor_count = DRBD_MINOR_COUNT_DEF; | 107 | unsigned int minor_count = DRBD_MINOR_COUNT_DEF; |
109 | int disable_sendpage; | 108 | int disable_sendpage; |
110 | int allow_oos; | 109 | int allow_oos; |
111 | unsigned int cn_idx = CN_IDX_DRBD; | ||
112 | int proc_details; /* Detail level in proc drbd*/ | 110 | int proc_details; /* Detail level in proc drbd*/ |
113 | 111 | ||
114 | /* Module parameter for setting the user mode helper program | 112 | /* Module parameter for setting the user mode helper program |
@@ -2175,7 +2173,7 @@ static void drbd_cleanup(void) | |||
2175 | if (drbd_proc) | 2173 | if (drbd_proc) |
2176 | remove_proc_entry("drbd", NULL); | 2174 | remove_proc_entry("drbd", NULL); |
2177 | 2175 | ||
2178 | drbd_nl_cleanup(); | 2176 | drbd_genl_unregister(); |
2179 | 2177 | ||
2180 | idr_for_each_entry(&minors, mdev, i) | 2178 | idr_for_each_entry(&minors, mdev, i) |
2181 | drbd_delete_device(i); | 2179 | drbd_delete_device(i); |
@@ -2237,6 +2235,9 @@ struct drbd_tconn *conn_by_name(const char *name) | |||
2237 | { | 2235 | { |
2238 | struct drbd_tconn *tconn; | 2236 | struct drbd_tconn *tconn; |
2239 | 2237 | ||
2238 | if (!name || !name[0]) | ||
2239 | return NULL; | ||
2240 | |||
2240 | write_lock_irq(&global_state_lock); | 2241 | write_lock_irq(&global_state_lock); |
2241 | list_for_each_entry(tconn, &drbd_tconns, all_tconn) { | 2242 | list_for_each_entry(tconn, &drbd_tconns, all_tconn) { |
2242 | if (!strcmp(tconn->name, name)) | 2243 | if (!strcmp(tconn->name, name)) |
@@ -2248,7 +2249,7 @@ found: | |||
2248 | return tconn; | 2249 | return tconn; |
2249 | } | 2250 | } |
2250 | 2251 | ||
2251 | struct drbd_tconn *drbd_new_tconn(char *name) | 2252 | struct drbd_tconn *drbd_new_tconn(const char *name) |
2252 | { | 2253 | { |
2253 | struct drbd_tconn *tconn; | 2254 | struct drbd_tconn *tconn; |
2254 | 2255 | ||
@@ -2333,6 +2334,7 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, | |||
2333 | 2334 | ||
2334 | mdev->tconn = tconn; | 2335 | mdev->tconn = tconn; |
2335 | mdev->minor = minor; | 2336 | mdev->minor = minor; |
2337 | mdev->vnr = vnr; | ||
2336 | 2338 | ||
2337 | drbd_init_set_defaults(mdev); | 2339 | drbd_init_set_defaults(mdev); |
2338 | 2340 | ||
@@ -2461,10 +2463,6 @@ int __init drbd_init(void) | |||
2461 | #endif | 2463 | #endif |
2462 | } | 2464 | } |
2463 | 2465 | ||
2464 | err = drbd_nl_init(); | ||
2465 | if (err) | ||
2466 | return err; | ||
2467 | |||
2468 | err = register_blkdev(DRBD_MAJOR, "drbd"); | 2466 | err = register_blkdev(DRBD_MAJOR, "drbd"); |
2469 | if (err) { | 2467 | if (err) { |
2470 | printk(KERN_ERR | 2468 | printk(KERN_ERR |
@@ -2473,6 +2471,13 @@ int __init drbd_init(void) | |||
2473 | return err; | 2471 | return err; |
2474 | } | 2472 | } |
2475 | 2473 | ||
2474 | err = drbd_genl_register(); | ||
2475 | if (err) { | ||
2476 | printk(KERN_ERR "drbd: unable to register generic netlink family\n"); | ||
2477 | goto fail; | ||
2478 | } | ||
2479 | |||
2480 | |||
2476 | register_reboot_notifier(&drbd_notifier); | 2481 | register_reboot_notifier(&drbd_notifier); |
2477 | 2482 | ||
2478 | /* | 2483 | /* |
@@ -2487,12 +2492,12 @@ int __init drbd_init(void) | |||
2487 | 2492 | ||
2488 | err = drbd_create_mempools(); | 2493 | err = drbd_create_mempools(); |
2489 | if (err) | 2494 | if (err) |
2490 | goto Enomem; | 2495 | goto fail; |
2491 | 2496 | ||
2492 | drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL); | 2497 | drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL); |
2493 | if (!drbd_proc) { | 2498 | if (!drbd_proc) { |
2494 | printk(KERN_ERR "drbd: unable to register proc file\n"); | 2499 | printk(KERN_ERR "drbd: unable to register proc file\n"); |
2495 | goto Enomem; | 2500 | goto fail; |
2496 | } | 2501 | } |
2497 | 2502 | ||
2498 | rwlock_init(&global_state_lock); | 2503 | rwlock_init(&global_state_lock); |
@@ -2507,7 +2512,7 @@ int __init drbd_init(void) | |||
2507 | 2512 | ||
2508 | return 0; /* Success! */ | 2513 | return 0; /* Success! */ |
2509 | 2514 | ||
2510 | Enomem: | 2515 | fail: |
2511 | drbd_cleanup(); | 2516 | drbd_cleanup(); |
2512 | if (err == -ENOMEM) | 2517 | if (err == -ENOMEM) |
2513 | /* currently always the case */ | 2518 | /* currently always the case */ |
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index f2739fd188a0..f9be14248e33 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c | |||
@@ -29,110 +29,225 @@ | |||
29 | #include <linux/fs.h> | 29 | #include <linux/fs.h> |
30 | #include <linux/file.h> | 30 | #include <linux/file.h> |
31 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
32 | #include <linux/connector.h> | ||
33 | #include <linux/blkpg.h> | 32 | #include <linux/blkpg.h> |
34 | #include <linux/cpumask.h> | 33 | #include <linux/cpumask.h> |
35 | #include "drbd_int.h" | 34 | #include "drbd_int.h" |
36 | #include "drbd_req.h" | 35 | #include "drbd_req.h" |
37 | #include "drbd_wrappers.h" | 36 | #include "drbd_wrappers.h" |
38 | #include <asm/unaligned.h> | 37 | #include <asm/unaligned.h> |
39 | #include <linux/drbd_tag_magic.h> | ||
40 | #include <linux/drbd_limits.h> | 38 | #include <linux/drbd_limits.h> |
41 | #include <linux/compiler.h> | ||
42 | #include <linux/kthread.h> | 39 | #include <linux/kthread.h> |
43 | 40 | ||
44 | static unsigned short *tl_add_blob(unsigned short *, enum drbd_tags, const void *, int); | 41 | #include <net/genetlink.h> |
45 | static unsigned short *tl_add_str(unsigned short *, enum drbd_tags, const char *); | 42 | |
46 | static unsigned short *tl_add_int(unsigned short *, enum drbd_tags, const void *); | 43 | /* .doit */ |
47 | 44 | // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info); | |
48 | /* see get_sb_bdev and bd_claim */ | 45 | // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info); |
46 | |||
47 | int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info); | ||
48 | int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info); | ||
49 | |||
50 | int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info); | ||
51 | int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info); | ||
52 | |||
53 | int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info); | ||
54 | int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info); | ||
55 | int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info); | ||
56 | int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info); | ||
57 | int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info); | ||
58 | int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info); | ||
59 | int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info); | ||
60 | int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info); | ||
61 | int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info); | ||
62 | int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info); | ||
63 | int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info); | ||
64 | int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info); | ||
65 | int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info); | ||
66 | int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info); | ||
67 | int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info); | ||
68 | int drbd_adm_syncer(struct sk_buff *skb, struct genl_info *info); | ||
69 | int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info); | ||
70 | int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info); | ||
71 | /* .dumpit */ | ||
72 | int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb); | ||
73 | |||
74 | #include <linux/drbd_genl_api.h> | ||
75 | #include <linux/genl_magic_func.h> | ||
76 | |||
77 | /* used blkdev_get_by_path, to claim our meta data device(s) */ | ||
49 | static char *drbd_m_holder = "Hands off! this is DRBD's meta data device."; | 78 | static char *drbd_m_holder = "Hands off! this is DRBD's meta data device."; |
50 | 79 | ||
51 | /* Generate the tag_list to struct functions */ | 80 | /* Configuration is strictly serialized, because generic netlink message |
52 | #define NL_PACKET(name, number, fields) \ | 81 | * processing is strictly serialized by the genl_lock(). |
53 | static int name ## _from_tags( \ | 82 | * Which means we can use one static global drbd_config_context struct. |
54 | unsigned short *tags, struct name *arg) __attribute__ ((unused)); \ | 83 | */ |
55 | static int name ## _from_tags( \ | 84 | static struct drbd_config_context { |
56 | unsigned short *tags, struct name *arg) \ | 85 | /* assigned from drbd_genlmsghdr */ |
57 | { \ | 86 | unsigned int minor; |
58 | int tag; \ | 87 | /* assigned from request attributes, if present */ |
59 | int dlen; \ | 88 | unsigned int volume; |
60 | \ | 89 | #define VOLUME_UNSPECIFIED (-1U) |
61 | while ((tag = get_unaligned(tags++)) != TT_END) { \ | 90 | /* pointer into the request skb, |
62 | dlen = get_unaligned(tags++); \ | 91 | * limited lifetime! */ |
63 | switch (tag_number(tag)) { \ | 92 | char *conn_name; |
64 | fields \ | 93 | |
65 | default: \ | 94 | /* reply buffer */ |
66 | if (tag & T_MANDATORY) { \ | 95 | struct sk_buff *reply_skb; |
67 | printk(KERN_ERR "drbd: Unknown tag: %d\n", tag_number(tag)); \ | 96 | /* pointer into reply buffer */ |
68 | return 0; \ | 97 | struct drbd_genlmsghdr *reply_dh; |
69 | } \ | 98 | /* resolved from attributes, if possible */ |
70 | } \ | 99 | struct drbd_conf *mdev; |
71 | tags = (unsigned short *)((char *)tags + dlen); \ | 100 | struct drbd_tconn *tconn; |
72 | } \ | 101 | } adm_ctx; |
73 | return 1; \ | 102 | |
103 | static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info) | ||
104 | { | ||
105 | genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb)))); | ||
106 | if (genlmsg_reply(skb, info)) | ||
107 | printk(KERN_ERR "drbd: error sending genl reply\n"); | ||
74 | } | 108 | } |
75 | #define NL_INTEGER(pn, pr, member) \ | 109 | |
76 | case pn: /* D_ASSERT( tag_type(tag) == TT_INTEGER ); */ \ | 110 | /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only |
77 | arg->member = get_unaligned((int *)(tags)); \ | 111 | * reason it could fail was no space in skb, and there are 4k available. */ |
78 | break; | 112 | static int drbd_msg_put_info(const char *info) |
79 | #define NL_INT64(pn, pr, member) \ | 113 | { |
80 | case pn: /* D_ASSERT( tag_type(tag) == TT_INT64 ); */ \ | 114 | struct sk_buff *skb = adm_ctx.reply_skb; |
81 | arg->member = get_unaligned((u64 *)(tags)); \ | 115 | struct nlattr *nla; |
82 | break; | 116 | int err = -EMSGSIZE; |
83 | #define NL_BIT(pn, pr, member) \ | 117 | |
84 | case pn: /* D_ASSERT( tag_type(tag) == TT_BIT ); */ \ | 118 | if (!info || !info[0]) |
85 | arg->member = *(char *)(tags) ? 1 : 0; \ | 119 | return 0; |
86 | break; | 120 | |
87 | #define NL_STRING(pn, pr, member, len) \ | 121 | nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY); |
88 | case pn: /* D_ASSERT( tag_type(tag) == TT_STRING ); */ \ | 122 | if (!nla) |
89 | if (dlen > len) { \ | 123 | return err; |
90 | printk(KERN_ERR "drbd: arg too long: %s (%u wanted, max len: %u bytes)\n", \ | 124 | |
91 | #member, dlen, (unsigned int)len); \ | 125 | err = nla_put_string(skb, T_info_text, info); |
92 | return 0; \ | 126 | if (err) { |
93 | } \ | 127 | nla_nest_cancel(skb, nla); |
94 | arg->member ## _len = dlen; \ | 128 | return err; |
95 | memcpy(arg->member, tags, min_t(size_t, dlen, len)); \ | 129 | } else |
96 | break; | 130 | nla_nest_end(skb, nla); |
97 | #include "linux/drbd_nl.h" | 131 | return 0; |
98 | |||
99 | /* Generate the struct to tag_list functions */ | ||
100 | #define NL_PACKET(name, number, fields) \ | ||
101 | static unsigned short* \ | ||
102 | name ## _to_tags( \ | ||
103 | struct name *arg, unsigned short *tags) __attribute__ ((unused)); \ | ||
104 | static unsigned short* \ | ||
105 | name ## _to_tags( \ | ||
106 | struct name *arg, unsigned short *tags) \ | ||
107 | { \ | ||
108 | fields \ | ||
109 | return tags; \ | ||
110 | } | 132 | } |
111 | 133 | ||
112 | #define NL_INTEGER(pn, pr, member) \ | 134 | /* This would be a good candidate for a "pre_doit" hook, |
113 | put_unaligned(pn | pr | TT_INTEGER, tags++); \ | 135 | * and per-family private info->pointers. |
114 | put_unaligned(sizeof(int), tags++); \ | 136 | * But we need to stay compatible with older kernels. |
115 | put_unaligned(arg->member, (int *)tags); \ | 137 | * If it returns successfully, adm_ctx members are valid. |
116 | tags = (unsigned short *)((char *)tags+sizeof(int)); | 138 | */ |
117 | #define NL_INT64(pn, pr, member) \ | 139 | #define DRBD_ADM_NEED_MINOR 1 |
118 | put_unaligned(pn | pr | TT_INT64, tags++); \ | 140 | #define DRBD_ADM_NEED_CONN 2 |
119 | put_unaligned(sizeof(u64), tags++); \ | 141 | static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info, |
120 | put_unaligned(arg->member, (u64 *)tags); \ | 142 | unsigned flags) |
121 | tags = (unsigned short *)((char *)tags+sizeof(u64)); | 143 | { |
122 | #define NL_BIT(pn, pr, member) \ | 144 | struct drbd_genlmsghdr *d_in = info->userhdr; |
123 | put_unaligned(pn | pr | TT_BIT, tags++); \ | 145 | const u8 cmd = info->genlhdr->cmd; |
124 | put_unaligned(sizeof(char), tags++); \ | 146 | int err; |
125 | *(char *)tags = arg->member; \ | 147 | |
126 | tags = (unsigned short *)((char *)tags+sizeof(char)); | 148 | memset(&adm_ctx, 0, sizeof(adm_ctx)); |
127 | #define NL_STRING(pn, pr, member, len) \ | 149 | |
128 | put_unaligned(pn | pr | TT_STRING, tags++); \ | 150 | /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */ |
129 | put_unaligned(arg->member ## _len, tags++); \ | 151 | if (cmd != DRBD_ADM_GET_STATUS |
130 | memcpy(tags, arg->member, arg->member ## _len); \ | 152 | && security_netlink_recv(skb, CAP_SYS_ADMIN)) |
131 | tags = (unsigned short *)((char *)tags + arg->member ## _len); | 153 | return -EPERM; |
132 | #include "linux/drbd_nl.h" | 154 | |
133 | 155 | adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); | |
134 | void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name); | 156 | if (!adm_ctx.reply_skb) |
135 | void drbd_nl_send_reply(struct cn_msg *, int); | 157 | goto fail; |
158 | |||
159 | adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb, | ||
160 | info, &drbd_genl_family, 0, cmd); | ||
161 | /* put of a few bytes into a fresh skb of >= 4k will always succeed. | ||
162 | * but anyways */ | ||
163 | if (!adm_ctx.reply_dh) | ||
164 | goto fail; | ||
165 | |||
166 | adm_ctx.reply_dh->minor = d_in->minor; | ||
167 | adm_ctx.reply_dh->ret_code = NO_ERROR; | ||
168 | |||
169 | if (info->attrs[DRBD_NLA_CFG_CONTEXT]) { | ||
170 | struct nlattr *nla; | ||
171 | /* parse and validate only */ | ||
172 | err = drbd_cfg_context_from_attrs(NULL, info->attrs); | ||
173 | if (err) | ||
174 | goto fail; | ||
175 | |||
176 | /* It was present, and valid, | ||
177 | * copy it over to the reply skb. */ | ||
178 | err = nla_put_nohdr(adm_ctx.reply_skb, | ||
179 | info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len, | ||
180 | info->attrs[DRBD_NLA_CFG_CONTEXT]); | ||
181 | if (err) | ||
182 | goto fail; | ||
183 | |||
184 | /* and assign stuff to the global adm_ctx */ | ||
185 | nla = nested_attr_tb[__nla_type(T_ctx_volume)]; | ||
186 | adm_ctx.volume = nla ? nla_get_u32(nla) : VOLUME_UNSPECIFIED; | ||
187 | nla = nested_attr_tb[__nla_type(T_ctx_conn_name)]; | ||
188 | if (nla) | ||
189 | adm_ctx.conn_name = nla_data(nla); | ||
190 | } else | ||
191 | adm_ctx.volume = VOLUME_UNSPECIFIED; | ||
192 | |||
193 | adm_ctx.minor = d_in->minor; | ||
194 | adm_ctx.mdev = minor_to_mdev(d_in->minor); | ||
195 | adm_ctx.tconn = conn_by_name(adm_ctx.conn_name); | ||
196 | |||
197 | if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) { | ||
198 | drbd_msg_put_info("unknown minor"); | ||
199 | return ERR_MINOR_INVALID; | ||
200 | } | ||
201 | if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_CONN)) { | ||
202 | drbd_msg_put_info("unknown connection"); | ||
203 | return ERR_INVALID_REQUEST; | ||
204 | } | ||
205 | |||
206 | /* some more paranoia, if the request was over-determined */ | ||
207 | if (adm_ctx.mdev && | ||
208 | adm_ctx.volume != VOLUME_UNSPECIFIED && | ||
209 | adm_ctx.volume != adm_ctx.mdev->vnr) { | ||
210 | pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n", | ||
211 | adm_ctx.minor, adm_ctx.volume, | ||
212 | adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name); | ||
213 | drbd_msg_put_info("over-determined configuration context mismatch"); | ||
214 | return ERR_INVALID_REQUEST; | ||
215 | } | ||
216 | if (adm_ctx.mdev && adm_ctx.tconn && | ||
217 | adm_ctx.mdev->tconn != adm_ctx.tconn) { | ||
218 | pr_warning("request: minor=%u, conn=%s; but that minor belongs to connection %s\n", | ||
219 | adm_ctx.minor, adm_ctx.conn_name, adm_ctx.mdev->tconn->name); | ||
220 | drbd_msg_put_info("over-determined configuration context mismatch"); | ||
221 | return ERR_INVALID_REQUEST; | ||
222 | } | ||
223 | return NO_ERROR; | ||
224 | |||
225 | fail: | ||
226 | nlmsg_free(adm_ctx.reply_skb); | ||
227 | adm_ctx.reply_skb = NULL; | ||
228 | return -ENOMEM; | ||
229 | } | ||
230 | |||
231 | static int drbd_adm_finish(struct genl_info *info, int retcode) | ||
232 | { | ||
233 | struct nlattr *nla; | ||
234 | const char *conn_name = NULL; | ||
235 | |||
236 | if (!adm_ctx.reply_skb) | ||
237 | return -ENOMEM; | ||
238 | |||
239 | adm_ctx.reply_dh->ret_code = retcode; | ||
240 | |||
241 | nla = info->attrs[DRBD_NLA_CFG_CONTEXT]; | ||
242 | if (nla) { | ||
243 | nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name)); | ||
244 | if (nla) | ||
245 | conn_name = nla_data(nla); | ||
246 | } | ||
247 | |||
248 | drbd_adm_send_reply(adm_ctx.reply_skb, info); | ||
249 | return 0; | ||
250 | } | ||
136 | 251 | ||
137 | int drbd_khelper(struct drbd_conf *mdev, char *cmd) | 252 | int drbd_khelper(struct drbd_conf *mdev, char *cmd) |
138 | { | 253 | { |
@@ -142,9 +257,9 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd) | |||
142 | NULL, /* Will be set to address family */ | 257 | NULL, /* Will be set to address family */ |
143 | NULL, /* Will be set to address */ | 258 | NULL, /* Will be set to address */ |
144 | NULL }; | 259 | NULL }; |
145 | |||
146 | char mb[12], af[20], ad[60], *afs; | 260 | char mb[12], af[20], ad[60], *afs; |
147 | char *argv[] = {usermode_helper, cmd, mb, NULL }; | 261 | char *argv[] = {usermode_helper, cmd, mb, NULL }; |
262 | struct sib_info sib; | ||
148 | int ret; | 263 | int ret; |
149 | 264 | ||
150 | snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev)); | 265 | snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev)); |
@@ -177,8 +292,9 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd) | |||
177 | drbd_md_sync(mdev); | 292 | drbd_md_sync(mdev); |
178 | 293 | ||
179 | dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb); | 294 | dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb); |
180 | 295 | sib.sib_reason = SIB_HELPER_PRE; | |
181 | drbd_bcast_ev_helper(mdev, cmd); | 296 | sib.helper_name = cmd; |
297 | drbd_bcast_event(mdev, &sib); | ||
182 | ret = call_usermodehelper(usermode_helper, argv, envp, 1); | 298 | ret = call_usermodehelper(usermode_helper, argv, envp, 1); |
183 | if (ret) | 299 | if (ret) |
184 | dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n", | 300 | dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n", |
@@ -188,6 +304,9 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd) | |||
188 | dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n", | 304 | dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n", |
189 | usermode_helper, cmd, mb, | 305 | usermode_helper, cmd, mb, |
190 | (ret >> 8) & 0xff, ret); | 306 | (ret >> 8) & 0xff, ret); |
307 | sib.sib_reason = SIB_HELPER_POST; | ||
308 | sib.helper_exit_code = ret; | ||
309 | drbd_bcast_event(mdev, &sib); | ||
191 | 310 | ||
192 | if (ret < 0) /* Ignore any ERRNOs we got. */ | 311 | if (ret < 0) /* Ignore any ERRNOs we got. */ |
193 | ret = 0; | 312 | ret = 0; |
@@ -362,7 +481,7 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) | |||
362 | } | 481 | } |
363 | 482 | ||
364 | if (rv == SS_NOTHING_TO_DO) | 483 | if (rv == SS_NOTHING_TO_DO) |
365 | goto fail; | 484 | goto out; |
366 | if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) { | 485 | if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) { |
367 | nps = drbd_try_outdate_peer(mdev); | 486 | nps = drbd_try_outdate_peer(mdev); |
368 | 487 | ||
@@ -388,13 +507,13 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) | |||
388 | rv = _drbd_request_state(mdev, mask, val, | 507 | rv = _drbd_request_state(mdev, mask, val, |
389 | CS_VERBOSE + CS_WAIT_COMPLETE); | 508 | CS_VERBOSE + CS_WAIT_COMPLETE); |
390 | if (rv < SS_SUCCESS) | 509 | if (rv < SS_SUCCESS) |
391 | goto fail; | 510 | goto out; |
392 | } | 511 | } |
393 | break; | 512 | break; |
394 | } | 513 | } |
395 | 514 | ||
396 | if (rv < SS_SUCCESS) | 515 | if (rv < SS_SUCCESS) |
397 | goto fail; | 516 | goto out; |
398 | 517 | ||
399 | if (forced) | 518 | if (forced) |
400 | dev_warn(DEV, "Forced to consider local data as UpToDate!\n"); | 519 | dev_warn(DEV, "Forced to consider local data as UpToDate!\n"); |
@@ -438,33 +557,46 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) | |||
438 | drbd_md_sync(mdev); | 557 | drbd_md_sync(mdev); |
439 | 558 | ||
440 | kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); | 559 | kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); |
441 | fail: | 560 | out: |
442 | mutex_unlock(mdev->state_mutex); | 561 | mutex_unlock(mdev->state_mutex); |
443 | return rv; | 562 | return rv; |
444 | } | 563 | } |
445 | 564 | ||
446 | static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | 565 | static const char *from_attrs_err_to_txt(int err) |
447 | struct drbd_nl_cfg_reply *reply) | ||
448 | { | 566 | { |
449 | struct primary primary_args; | 567 | return err == -ENOMSG ? "required attribute missing" : |
450 | 568 | err == -EOPNOTSUPP ? "unknown mandatory attribute" : | |
451 | memset(&primary_args, 0, sizeof(struct primary)); | 569 | "invalid attribute value"; |
452 | if (!primary_from_tags(nlp->tag_list, &primary_args)) { | ||
453 | reply->ret_code = ERR_MANDATORY_TAG; | ||
454 | return 0; | ||
455 | } | ||
456 | |||
457 | reply->ret_code = | ||
458 | drbd_set_role(mdev, R_PRIMARY, primary_args.primary_force); | ||
459 | |||
460 | return 0; | ||
461 | } | 570 | } |
462 | 571 | ||
463 | static int drbd_nl_secondary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | 572 | int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info) |
464 | struct drbd_nl_cfg_reply *reply) | ||
465 | { | 573 | { |
466 | reply->ret_code = drbd_set_role(mdev, R_SECONDARY, 0); | 574 | struct set_role_parms parms; |
575 | int err; | ||
576 | enum drbd_ret_code retcode; | ||
467 | 577 | ||
578 | retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); | ||
579 | if (!adm_ctx.reply_skb) | ||
580 | return retcode; | ||
581 | if (retcode != NO_ERROR) | ||
582 | goto out; | ||
583 | |||
584 | memset(&parms, 0, sizeof(parms)); | ||
585 | if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) { | ||
586 | err = set_role_parms_from_attrs(&parms, info->attrs); | ||
587 | if (err) { | ||
588 | retcode = ERR_MANDATORY_TAG; | ||
589 | drbd_msg_put_info(from_attrs_err_to_txt(err)); | ||
590 | goto out; | ||
591 | } | ||
592 | } | ||
593 | |||
594 | if (info->genlhdr->cmd == DRBD_ADM_PRIMARY) | ||
595 | retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate); | ||
596 | else | ||
597 | retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0); | ||
598 | out: | ||
599 | drbd_adm_finish(info, retcode); | ||
468 | return 0; | 600 | return 0; |
469 | } | 601 | } |
470 | 602 | ||
@@ -541,6 +673,12 @@ char *ppsize(char *buf, unsigned long long size) | |||
541 | * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET: | 673 | * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET: |
542 | * peer may not initiate a resize. | 674 | * peer may not initiate a resize. |
543 | */ | 675 | */ |
676 | /* Note these are not to be confused with | ||
677 | * drbd_adm_suspend_io/drbd_adm_resume_io, | ||
678 | * which are (sub) state changes triggered by admin (drbdsetup), | ||
679 | * and can be long lived. | ||
680 | * This changes an mdev->flag, is triggered by drbd internals, | ||
681 | * and should be short-lived. */ | ||
544 | void drbd_suspend_io(struct drbd_conf *mdev) | 682 | void drbd_suspend_io(struct drbd_conf *mdev) |
545 | { | 683 | { |
546 | set_bit(SUSPEND_IO, &mdev->flags); | 684 | set_bit(SUSPEND_IO, &mdev->flags); |
@@ -881,11 +1019,10 @@ static void drbd_suspend_al(struct drbd_conf *mdev) | |||
881 | dev_info(DEV, "Suspended AL updates\n"); | 1019 | dev_info(DEV, "Suspended AL updates\n"); |
882 | } | 1020 | } |
883 | 1021 | ||
884 | /* does always return 0; | 1022 | int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) |
885 | * interesting return code is in reply->ret_code */ | ||
886 | static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | ||
887 | struct drbd_nl_cfg_reply *reply) | ||
888 | { | 1023 | { |
1024 | struct drbd_conf *mdev; | ||
1025 | int err; | ||
889 | enum drbd_ret_code retcode; | 1026 | enum drbd_ret_code retcode; |
890 | enum determine_dev_size dd; | 1027 | enum determine_dev_size dd; |
891 | sector_t max_possible_sectors; | 1028 | sector_t max_possible_sectors; |
@@ -897,6 +1034,13 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp | |||
897 | enum drbd_state_rv rv; | 1034 | enum drbd_state_rv rv; |
898 | int cp_discovered = 0; | 1035 | int cp_discovered = 0; |
899 | 1036 | ||
1037 | retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); | ||
1038 | if (!adm_ctx.reply_skb) | ||
1039 | return retcode; | ||
1040 | if (retcode != NO_ERROR) | ||
1041 | goto fail; | ||
1042 | |||
1043 | mdev = adm_ctx.mdev; | ||
900 | conn_reconfig_start(mdev->tconn); | 1044 | conn_reconfig_start(mdev->tconn); |
901 | 1045 | ||
902 | /* if you want to reconfigure, please tear down first */ | 1046 | /* if you want to reconfigure, please tear down first */ |
@@ -910,7 +1054,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp | |||
910 | * to realize a "hot spare" feature (not that I'd recommend that) */ | 1054 | * to realize a "hot spare" feature (not that I'd recommend that) */ |
911 | wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt)); | 1055 | wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt)); |
912 | 1056 | ||
913 | /* allocation not in the IO path, cqueue thread context */ | 1057 | /* allocation not in the IO path, drbdsetup context */ |
914 | nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL); | 1058 | nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL); |
915 | if (!nbc) { | 1059 | if (!nbc) { |
916 | retcode = ERR_NOMEM; | 1060 | retcode = ERR_NOMEM; |
@@ -922,12 +1066,14 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp | |||
922 | nbc->dc.fencing = DRBD_FENCING_DEF; | 1066 | nbc->dc.fencing = DRBD_FENCING_DEF; |
923 | nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF; | 1067 | nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF; |
924 | 1068 | ||
925 | if (!disk_conf_from_tags(nlp->tag_list, &nbc->dc)) { | 1069 | err = disk_conf_from_attrs(&nbc->dc, info->attrs); |
1070 | if (err) { | ||
926 | retcode = ERR_MANDATORY_TAG; | 1071 | retcode = ERR_MANDATORY_TAG; |
1072 | drbd_msg_put_info(from_attrs_err_to_txt(err)); | ||
927 | goto fail; | 1073 | goto fail; |
928 | } | 1074 | } |
929 | 1075 | ||
930 | if (nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) { | 1076 | if ((int)nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) { |
931 | retcode = ERR_MD_IDX_INVALID; | 1077 | retcode = ERR_MD_IDX_INVALID; |
932 | goto fail; | 1078 | goto fail; |
933 | } | 1079 | } |
@@ -961,7 +1107,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp | |||
961 | */ | 1107 | */ |
962 | bdev = blkdev_get_by_path(nbc->dc.meta_dev, | 1108 | bdev = blkdev_get_by_path(nbc->dc.meta_dev, |
963 | FMODE_READ | FMODE_WRITE | FMODE_EXCL, | 1109 | FMODE_READ | FMODE_WRITE | FMODE_EXCL, |
964 | (nbc->dc.meta_dev_idx < 0) ? | 1110 | ((int)nbc->dc.meta_dev_idx < 0) ? |
965 | (void *)mdev : (void *)drbd_m_holder); | 1111 | (void *)mdev : (void *)drbd_m_holder); |
966 | if (IS_ERR(bdev)) { | 1112 | if (IS_ERR(bdev)) { |
967 | dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev, | 1113 | dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev, |
@@ -997,7 +1143,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp | |||
997 | goto fail; | 1143 | goto fail; |
998 | } | 1144 | } |
999 | 1145 | ||
1000 | if (nbc->dc.meta_dev_idx < 0) { | 1146 | if ((int)nbc->dc.meta_dev_idx < 0) { |
1001 | max_possible_sectors = DRBD_MAX_SECTORS_FLEX; | 1147 | max_possible_sectors = DRBD_MAX_SECTORS_FLEX; |
1002 | /* at least one MB, otherwise it does not make sense */ | 1148 | /* at least one MB, otherwise it does not make sense */ |
1003 | min_md_device_sectors = (2<<10); | 1149 | min_md_device_sectors = (2<<10); |
@@ -1028,7 +1174,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp | |||
1028 | dev_warn(DEV, "==> truncating very big lower level device " | 1174 | dev_warn(DEV, "==> truncating very big lower level device " |
1029 | "to currently maximum possible %llu sectors <==\n", | 1175 | "to currently maximum possible %llu sectors <==\n", |
1030 | (unsigned long long) max_possible_sectors); | 1176 | (unsigned long long) max_possible_sectors); |
1031 | if (nbc->dc.meta_dev_idx >= 0) | 1177 | if ((int)nbc->dc.meta_dev_idx >= 0) |
1032 | dev_warn(DEV, "==>> using internal or flexible " | 1178 | dev_warn(DEV, "==>> using internal or flexible " |
1033 | "meta data may help <<==\n"); | 1179 | "meta data may help <<==\n"); |
1034 | } | 1180 | } |
@@ -1242,8 +1388,8 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp | |||
1242 | 1388 | ||
1243 | kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); | 1389 | kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); |
1244 | put_ldev(mdev); | 1390 | put_ldev(mdev); |
1245 | reply->ret_code = retcode; | ||
1246 | conn_reconfig_done(mdev->tconn); | 1391 | conn_reconfig_done(mdev->tconn); |
1392 | drbd_adm_finish(info, retcode); | ||
1247 | return 0; | 1393 | return 0; |
1248 | 1394 | ||
1249 | force_diskless_dec: | 1395 | force_diskless_dec: |
@@ -1251,6 +1397,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp | |||
1251 | force_diskless: | 1397 | force_diskless: |
1252 | drbd_force_state(mdev, NS(disk, D_FAILED)); | 1398 | drbd_force_state(mdev, NS(disk, D_FAILED)); |
1253 | drbd_md_sync(mdev); | 1399 | drbd_md_sync(mdev); |
1400 | conn_reconfig_done(mdev->tconn); | ||
1254 | fail: | 1401 | fail: |
1255 | if (nbc) { | 1402 | if (nbc) { |
1256 | if (nbc->backing_bdev) | 1403 | if (nbc->backing_bdev) |
@@ -1263,8 +1410,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp | |||
1263 | } | 1410 | } |
1264 | lc_destroy(resync_lru); | 1411 | lc_destroy(resync_lru); |
1265 | 1412 | ||
1266 | reply->ret_code = retcode; | 1413 | drbd_adm_finish(info, retcode); |
1267 | conn_reconfig_done(mdev->tconn); | ||
1268 | return 0; | 1414 | return 0; |
1269 | } | 1415 | } |
1270 | 1416 | ||
@@ -1273,42 +1419,54 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp | |||
1273 | * Then we transition to D_DISKLESS, and wait for put_ldev() to return all | 1419 | * Then we transition to D_DISKLESS, and wait for put_ldev() to return all |
1274 | * internal references as well. | 1420 | * internal references as well. |
1275 | * Only then we have finally detached. */ | 1421 | * Only then we have finally detached. */ |
1276 | static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | 1422 | int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info) |
1277 | struct drbd_nl_cfg_reply *reply) | ||
1278 | { | 1423 | { |
1424 | struct drbd_conf *mdev; | ||
1279 | enum drbd_ret_code retcode; | 1425 | enum drbd_ret_code retcode; |
1280 | int ret; | 1426 | |
1427 | retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); | ||
1428 | if (!adm_ctx.reply_skb) | ||
1429 | return retcode; | ||
1430 | if (retcode != NO_ERROR) | ||
1431 | goto out; | ||
1432 | |||
1433 | mdev = adm_ctx.mdev; | ||
1281 | drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */ | 1434 | drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */ |
1282 | retcode = drbd_request_state(mdev, NS(disk, D_FAILED)); | 1435 | retcode = drbd_request_state(mdev, NS(disk, D_DISKLESS)); |
1283 | /* D_FAILED will transition to DISKLESS. */ | 1436 | wait_event(mdev->misc_wait, |
1284 | ret = wait_event_interruptible(mdev->misc_wait, | 1437 | mdev->state.disk != D_DISKLESS || |
1285 | mdev->state.disk != D_FAILED); | 1438 | !atomic_read(&mdev->local_cnt)); |
1286 | drbd_resume_io(mdev); | 1439 | drbd_resume_io(mdev); |
1287 | if ((int)retcode == (int)SS_IS_DISKLESS) | 1440 | out: |
1288 | retcode = SS_NOTHING_TO_DO; | 1441 | drbd_adm_finish(info, retcode); |
1289 | if (ret) | ||
1290 | retcode = ERR_INTR; | ||
1291 | reply->ret_code = retcode; | ||
1292 | return 0; | 1442 | return 0; |
1293 | } | 1443 | } |
1294 | 1444 | ||
1295 | static int drbd_nl_net_conf(struct drbd_tconn *tconn, struct drbd_nl_cfg_req *nlp, | 1445 | int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info) |
1296 | struct drbd_nl_cfg_reply *reply) | ||
1297 | { | 1446 | { |
1298 | int i; | 1447 | char hmac_name[CRYPTO_MAX_ALG_NAME]; |
1299 | enum drbd_ret_code retcode; | 1448 | struct drbd_conf *mdev; |
1300 | struct net_conf *new_conf = NULL; | 1449 | struct net_conf *new_conf = NULL; |
1301 | struct crypto_hash *tfm = NULL; | 1450 | struct crypto_hash *tfm = NULL; |
1302 | struct crypto_hash *integrity_w_tfm = NULL; | 1451 | struct crypto_hash *integrity_w_tfm = NULL; |
1303 | struct crypto_hash *integrity_r_tfm = NULL; | 1452 | struct crypto_hash *integrity_r_tfm = NULL; |
1304 | struct drbd_conf *mdev; | ||
1305 | char hmac_name[CRYPTO_MAX_ALG_NAME]; | ||
1306 | void *int_dig_out = NULL; | 1453 | void *int_dig_out = NULL; |
1307 | void *int_dig_in = NULL; | 1454 | void *int_dig_in = NULL; |
1308 | void *int_dig_vv = NULL; | 1455 | void *int_dig_vv = NULL; |
1309 | struct drbd_tconn *oconn; | 1456 | struct drbd_tconn *oconn; |
1457 | struct drbd_tconn *tconn; | ||
1310 | struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr; | 1458 | struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr; |
1459 | enum drbd_ret_code retcode; | ||
1460 | int i; | ||
1461 | int err; | ||
1311 | 1462 | ||
1463 | retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN); | ||
1464 | if (!adm_ctx.reply_skb) | ||
1465 | return retcode; | ||
1466 | if (retcode != NO_ERROR) | ||
1467 | goto out; | ||
1468 | |||
1469 | tconn = adm_ctx.tconn; | ||
1312 | conn_reconfig_start(tconn); | 1470 | conn_reconfig_start(tconn); |
1313 | 1471 | ||
1314 | if (tconn->cstate > C_STANDALONE) { | 1472 | if (tconn->cstate > C_STANDALONE) { |
@@ -1343,8 +1501,10 @@ static int drbd_nl_net_conf(struct drbd_tconn *tconn, struct drbd_nl_cfg_req *nl | |||
1343 | new_conf->on_congestion = DRBD_ON_CONGESTION_DEF; | 1501 | new_conf->on_congestion = DRBD_ON_CONGESTION_DEF; |
1344 | new_conf->cong_extents = DRBD_CONG_EXTENTS_DEF; | 1502 | new_conf->cong_extents = DRBD_CONG_EXTENTS_DEF; |
1345 | 1503 | ||
1346 | if (!net_conf_from_tags(nlp->tag_list, new_conf)) { | 1504 | err = net_conf_from_attrs(new_conf, info->attrs); |
1505 | if (err) { | ||
1347 | retcode = ERR_MANDATORY_TAG; | 1506 | retcode = ERR_MANDATORY_TAG; |
1507 | drbd_msg_put_info(from_attrs_err_to_txt(err)); | ||
1348 | goto fail; | 1508 | goto fail; |
1349 | } | 1509 | } |
1350 | 1510 | ||
@@ -1495,8 +1655,8 @@ static int drbd_nl_net_conf(struct drbd_tconn *tconn, struct drbd_nl_cfg_req *nl | |||
1495 | mdev->recv_cnt = 0; | 1655 | mdev->recv_cnt = 0; |
1496 | kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); | 1656 | kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); |
1497 | } | 1657 | } |
1498 | reply->ret_code = retcode; | ||
1499 | conn_reconfig_done(tconn); | 1658 | conn_reconfig_done(tconn); |
1659 | drbd_adm_finish(info, retcode); | ||
1500 | return 0; | 1660 | return 0; |
1501 | 1661 | ||
1502 | fail: | 1662 | fail: |
@@ -1508,24 +1668,37 @@ fail: | |||
1508 | crypto_free_hash(integrity_r_tfm); | 1668 | crypto_free_hash(integrity_r_tfm); |
1509 | kfree(new_conf); | 1669 | kfree(new_conf); |
1510 | 1670 | ||
1511 | reply->ret_code = retcode; | ||
1512 | conn_reconfig_done(tconn); | 1671 | conn_reconfig_done(tconn); |
1672 | out: | ||
1673 | drbd_adm_finish(info, retcode); | ||
1513 | return 0; | 1674 | return 0; |
1514 | } | 1675 | } |
1515 | 1676 | ||
1516 | static int drbd_nl_disconnect(struct drbd_tconn *tconn, struct drbd_nl_cfg_req *nlp, | 1677 | int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info) |
1517 | struct drbd_nl_cfg_reply *reply) | ||
1518 | { | 1678 | { |
1519 | int retcode; | 1679 | struct disconnect_parms parms; |
1520 | struct disconnect dc; | 1680 | struct drbd_tconn *tconn; |
1681 | enum drbd_ret_code retcode; | ||
1682 | int err; | ||
1521 | 1683 | ||
1522 | memset(&dc, 0, sizeof(struct disconnect)); | 1684 | retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN); |
1523 | if (!disconnect_from_tags(nlp->tag_list, &dc)) { | 1685 | if (!adm_ctx.reply_skb) |
1524 | retcode = ERR_MANDATORY_TAG; | 1686 | return retcode; |
1687 | if (retcode != NO_ERROR) | ||
1525 | goto fail; | 1688 | goto fail; |
1689 | |||
1690 | tconn = adm_ctx.tconn; | ||
1691 | memset(&parms, 0, sizeof(parms)); | ||
1692 | if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) { | ||
1693 | err = disconnect_parms_from_attrs(&parms, info->attrs); | ||
1694 | if (err) { | ||
1695 | retcode = ERR_MANDATORY_TAG; | ||
1696 | drbd_msg_put_info(from_attrs_err_to_txt(err)); | ||
1697 | goto fail; | ||
1698 | } | ||
1526 | } | 1699 | } |
1527 | 1700 | ||
1528 | if (dc.force) { | 1701 | if (parms.force_disconnect) { |
1529 | spin_lock_irq(&tconn->req_lock); | 1702 | spin_lock_irq(&tconn->req_lock); |
1530 | if (tconn->cstate >= C_WF_CONNECTION) | 1703 | if (tconn->cstate >= C_WF_CONNECTION) |
1531 | _conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD); | 1704 | _conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD); |
@@ -1567,7 +1740,7 @@ static int drbd_nl_disconnect(struct drbd_tconn *tconn, struct drbd_nl_cfg_req * | |||
1567 | done: | 1740 | done: |
1568 | retcode = NO_ERROR; | 1741 | retcode = NO_ERROR; |
1569 | fail: | 1742 | fail: |
1570 | reply->ret_code = retcode; | 1743 | drbd_adm_finish(info, retcode); |
1571 | return 0; | 1744 | return 0; |
1572 | } | 1745 | } |
1573 | 1746 | ||
@@ -1587,20 +1760,32 @@ void resync_after_online_grow(struct drbd_conf *mdev) | |||
1587 | _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE); | 1760 | _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE); |
1588 | } | 1761 | } |
1589 | 1762 | ||
1590 | static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | 1763 | int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info) |
1591 | struct drbd_nl_cfg_reply *reply) | ||
1592 | { | 1764 | { |
1593 | struct resize rs; | 1765 | struct resize_parms rs; |
1594 | int retcode = NO_ERROR; | 1766 | struct drbd_conf *mdev; |
1767 | enum drbd_ret_code retcode; | ||
1595 | enum determine_dev_size dd; | 1768 | enum determine_dev_size dd; |
1596 | enum dds_flags ddsf; | 1769 | enum dds_flags ddsf; |
1770 | int err; | ||
1597 | 1771 | ||
1598 | memset(&rs, 0, sizeof(struct resize)); | 1772 | retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); |
1599 | if (!resize_from_tags(nlp->tag_list, &rs)) { | 1773 | if (!adm_ctx.reply_skb) |
1600 | retcode = ERR_MANDATORY_TAG; | 1774 | return retcode; |
1775 | if (retcode != NO_ERROR) | ||
1601 | goto fail; | 1776 | goto fail; |
1777 | |||
1778 | memset(&rs, 0, sizeof(struct resize_parms)); | ||
1779 | if (info->attrs[DRBD_NLA_RESIZE_PARMS]) { | ||
1780 | err = resize_parms_from_attrs(&rs, info->attrs); | ||
1781 | if (err) { | ||
1782 | retcode = ERR_MANDATORY_TAG; | ||
1783 | drbd_msg_put_info(from_attrs_err_to_txt(err)); | ||
1784 | goto fail; | ||
1785 | } | ||
1602 | } | 1786 | } |
1603 | 1787 | ||
1788 | mdev = adm_ctx.mdev; | ||
1604 | if (mdev->state.conn > C_CONNECTED) { | 1789 | if (mdev->state.conn > C_CONNECTED) { |
1605 | retcode = ERR_RESIZE_RESYNC; | 1790 | retcode = ERR_RESIZE_RESYNC; |
1606 | goto fail; | 1791 | goto fail; |
@@ -1644,14 +1829,14 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | |||
1644 | } | 1829 | } |
1645 | 1830 | ||
1646 | fail: | 1831 | fail: |
1647 | reply->ret_code = retcode; | 1832 | drbd_adm_finish(info, retcode); |
1648 | return 0; | 1833 | return 0; |
1649 | } | 1834 | } |
1650 | 1835 | ||
1651 | static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | 1836 | int drbd_adm_syncer(struct sk_buff *skb, struct genl_info *info) |
1652 | struct drbd_nl_cfg_reply *reply) | ||
1653 | { | 1837 | { |
1654 | int retcode = NO_ERROR; | 1838 | struct drbd_conf *mdev; |
1839 | enum drbd_ret_code retcode; | ||
1655 | int err; | 1840 | int err; |
1656 | int ovr; /* online verify running */ | 1841 | int ovr; /* online verify running */ |
1657 | int rsr; /* re-sync running */ | 1842 | int rsr; /* re-sync running */ |
@@ -1662,12 +1847,21 @@ static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n | |||
1662 | int *rs_plan_s = NULL; | 1847 | int *rs_plan_s = NULL; |
1663 | int fifo_size; | 1848 | int fifo_size; |
1664 | 1849 | ||
1850 | retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); | ||
1851 | if (!adm_ctx.reply_skb) | ||
1852 | return retcode; | ||
1853 | if (retcode != NO_ERROR) | ||
1854 | goto fail; | ||
1855 | mdev = adm_ctx.mdev; | ||
1856 | |||
1665 | if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) { | 1857 | if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) { |
1666 | retcode = ERR_NOMEM; | 1858 | retcode = ERR_NOMEM; |
1859 | drbd_msg_put_info("unable to allocate cpumask"); | ||
1667 | goto fail; | 1860 | goto fail; |
1668 | } | 1861 | } |
1669 | 1862 | ||
1670 | if (nlp->flags & DRBD_NL_SET_DEFAULTS) { | 1863 | if (((struct drbd_genlmsghdr*)info->userhdr)->flags |
1864 | & DRBD_GENL_F_SET_DEFAULTS) { | ||
1671 | memset(&sc, 0, sizeof(struct syncer_conf)); | 1865 | memset(&sc, 0, sizeof(struct syncer_conf)); |
1672 | sc.rate = DRBD_RATE_DEF; | 1866 | sc.rate = DRBD_RATE_DEF; |
1673 | sc.after = DRBD_AFTER_DEF; | 1867 | sc.after = DRBD_AFTER_DEF; |
@@ -1681,8 +1875,10 @@ static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n | |||
1681 | } else | 1875 | } else |
1682 | memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf)); | 1876 | memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf)); |
1683 | 1877 | ||
1684 | if (!syncer_conf_from_tags(nlp->tag_list, &sc)) { | 1878 | err = syncer_conf_from_attrs(&sc, info->attrs); |
1879 | if (err) { | ||
1685 | retcode = ERR_MANDATORY_TAG; | 1880 | retcode = ERR_MANDATORY_TAG; |
1881 | drbd_msg_put_info(from_attrs_err_to_txt(err)); | ||
1686 | goto fail; | 1882 | goto fail; |
1687 | } | 1883 | } |
1688 | 1884 | ||
@@ -1832,14 +2028,23 @@ fail: | |||
1832 | free_cpumask_var(new_cpu_mask); | 2028 | free_cpumask_var(new_cpu_mask); |
1833 | crypto_free_hash(csums_tfm); | 2029 | crypto_free_hash(csums_tfm); |
1834 | crypto_free_hash(verify_tfm); | 2030 | crypto_free_hash(verify_tfm); |
1835 | reply->ret_code = retcode; | 2031 | |
2032 | drbd_adm_finish(info, retcode); | ||
1836 | return 0; | 2033 | return 0; |
1837 | } | 2034 | } |
1838 | 2035 | ||
1839 | static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | 2036 | int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info) |
1840 | struct drbd_nl_cfg_reply *reply) | ||
1841 | { | 2037 | { |
1842 | int retcode; | 2038 | struct drbd_conf *mdev; |
2039 | int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */ | ||
2040 | |||
2041 | retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); | ||
2042 | if (!adm_ctx.reply_skb) | ||
2043 | return retcode; | ||
2044 | if (retcode != NO_ERROR) | ||
2045 | goto out; | ||
2046 | |||
2047 | mdev = adm_ctx.mdev; | ||
1843 | 2048 | ||
1844 | /* If there is still bitmap IO pending, probably because of a previous | 2049 | /* If there is still bitmap IO pending, probably because of a previous |
1845 | * resync just being finished, wait for it before requesting a new resync. */ | 2050 | * resync just being finished, wait for it before requesting a new resync. */ |
@@ -1862,7 +2067,8 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl | |||
1862 | retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T)); | 2067 | retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T)); |
1863 | } | 2068 | } |
1864 | 2069 | ||
1865 | reply->ret_code = retcode; | 2070 | out: |
2071 | drbd_adm_finish(info, retcode); | ||
1866 | return 0; | 2072 | return 0; |
1867 | } | 2073 | } |
1868 | 2074 | ||
@@ -1875,56 +2081,58 @@ static int drbd_bmio_set_susp_al(struct drbd_conf *mdev) | |||
1875 | return rv; | 2081 | return rv; |
1876 | } | 2082 | } |
1877 | 2083 | ||
1878 | static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | 2084 | static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info, |
1879 | struct drbd_nl_cfg_reply *reply) | 2085 | union drbd_state mask, union drbd_state val) |
1880 | { | 2086 | { |
1881 | int retcode; | 2087 | enum drbd_ret_code retcode; |
1882 | |||
1883 | /* If there is still bitmap IO pending, probably because of a previous | ||
1884 | * resync just being finished, wait for it before requesting a new resync. */ | ||
1885 | wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); | ||
1886 | 2088 | ||
1887 | retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED); | 2089 | retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); |
1888 | 2090 | if (!adm_ctx.reply_skb) | |
1889 | if (retcode < SS_SUCCESS) { | 2091 | return retcode; |
1890 | if (retcode == SS_NEED_CONNECTION && mdev->state.role == R_PRIMARY) { | 2092 | if (retcode != NO_ERROR) |
1891 | /* The peer will get a resync upon connect anyways. Just make that | 2093 | goto out; |
1892 | into a full resync. */ | ||
1893 | retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT)); | ||
1894 | if (retcode >= SS_SUCCESS) { | ||
1895 | if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al, | ||
1896 | "set_n_write from invalidate_peer", | ||
1897 | BM_LOCKED_SET_ALLOWED)) | ||
1898 | retcode = ERR_IO_MD_DISK; | ||
1899 | } | ||
1900 | } else | ||
1901 | retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S)); | ||
1902 | } | ||
1903 | 2094 | ||
1904 | reply->ret_code = retcode; | 2095 | retcode = drbd_request_state(adm_ctx.mdev, mask, val); |
2096 | out: | ||
2097 | drbd_adm_finish(info, retcode); | ||
1905 | return 0; | 2098 | return 0; |
1906 | } | 2099 | } |
1907 | 2100 | ||
1908 | static int drbd_nl_pause_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | 2101 | int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info) |
1909 | struct drbd_nl_cfg_reply *reply) | ||
1910 | { | 2102 | { |
1911 | int retcode = NO_ERROR; | 2103 | return drbd_adm_simple_request_state(skb, info, NS(conn, C_STARTING_SYNC_S)); |
2104 | } | ||
1912 | 2105 | ||
1913 | if (drbd_request_state(mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO) | 2106 | int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info) |
1914 | retcode = ERR_PAUSE_IS_SET; | 2107 | { |
2108 | enum drbd_ret_code retcode; | ||
2109 | |||
2110 | retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); | ||
2111 | if (!adm_ctx.reply_skb) | ||
2112 | return retcode; | ||
2113 | if (retcode != NO_ERROR) | ||
2114 | goto out; | ||
1915 | 2115 | ||
1916 | reply->ret_code = retcode; | 2116 | if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO) |
2117 | retcode = ERR_PAUSE_IS_SET; | ||
2118 | out: | ||
2119 | drbd_adm_finish(info, retcode); | ||
1917 | return 0; | 2120 | return 0; |
1918 | } | 2121 | } |
1919 | 2122 | ||
1920 | static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | 2123 | int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info) |
1921 | struct drbd_nl_cfg_reply *reply) | ||
1922 | { | 2124 | { |
1923 | int retcode = NO_ERROR; | ||
1924 | union drbd_state s; | 2125 | union drbd_state s; |
2126 | enum drbd_ret_code retcode; | ||
2127 | |||
2128 | retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); | ||
2129 | if (!adm_ctx.reply_skb) | ||
2130 | return retcode; | ||
2131 | if (retcode != NO_ERROR) | ||
2132 | goto out; | ||
1925 | 2133 | ||
1926 | if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) { | 2134 | if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) { |
1927 | s = mdev->state; | 2135 | s = adm_ctx.mdev->state; |
1928 | if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) { | 2136 | if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) { |
1929 | retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP : | 2137 | retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP : |
1930 | s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR; | 2138 | s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR; |
@@ -1933,28 +2141,35 @@ static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n | |||
1933 | } | 2141 | } |
1934 | } | 2142 | } |
1935 | 2143 | ||
1936 | reply->ret_code = retcode; | 2144 | out: |
2145 | drbd_adm_finish(info, retcode); | ||
1937 | return 0; | 2146 | return 0; |
1938 | } | 2147 | } |
1939 | 2148 | ||
1940 | static int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | 2149 | int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info) |
1941 | struct drbd_nl_cfg_reply *reply) | ||
1942 | { | 2150 | { |
1943 | reply->ret_code = drbd_request_state(mdev, NS(susp, 1)); | 2151 | return drbd_adm_simple_request_state(skb, info, NS(susp, 1)); |
1944 | |||
1945 | return 0; | ||
1946 | } | 2152 | } |
1947 | 2153 | ||
1948 | static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | 2154 | int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info) |
1949 | struct drbd_nl_cfg_reply *reply) | ||
1950 | { | 2155 | { |
2156 | struct drbd_conf *mdev; | ||
2157 | int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */ | ||
2158 | |||
2159 | retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); | ||
2160 | if (!adm_ctx.reply_skb) | ||
2161 | return retcode; | ||
2162 | if (retcode != NO_ERROR) | ||
2163 | goto out; | ||
2164 | |||
2165 | mdev = adm_ctx.mdev; | ||
1951 | if (test_bit(NEW_CUR_UUID, &mdev->flags)) { | 2166 | if (test_bit(NEW_CUR_UUID, &mdev->flags)) { |
1952 | drbd_uuid_new_current(mdev); | 2167 | drbd_uuid_new_current(mdev); |
1953 | clear_bit(NEW_CUR_UUID, &mdev->flags); | 2168 | clear_bit(NEW_CUR_UUID, &mdev->flags); |
1954 | } | 2169 | } |
1955 | drbd_suspend_io(mdev); | 2170 | drbd_suspend_io(mdev); |
1956 | reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0)); | 2171 | retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0)); |
1957 | if (reply->ret_code == SS_SUCCESS) { | 2172 | if (retcode == SS_SUCCESS) { |
1958 | if (mdev->state.conn < C_CONNECTED) | 2173 | if (mdev->state.conn < C_CONNECTED) |
1959 | tl_clear(mdev->tconn); | 2174 | tl_clear(mdev->tconn); |
1960 | if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED) | 2175 | if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED) |
@@ -1962,138 +2177,259 @@ static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp | |||
1962 | } | 2177 | } |
1963 | drbd_resume_io(mdev); | 2178 | drbd_resume_io(mdev); |
1964 | 2179 | ||
2180 | out: | ||
2181 | drbd_adm_finish(info, retcode); | ||
1965 | return 0; | 2182 | return 0; |
1966 | } | 2183 | } |
1967 | 2184 | ||
1968 | static int drbd_nl_outdate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | 2185 | int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info) |
1969 | struct drbd_nl_cfg_reply *reply) | ||
1970 | { | 2186 | { |
1971 | reply->ret_code = drbd_request_state(mdev, NS(disk, D_OUTDATED)); | 2187 | return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED)); |
1972 | return 0; | ||
1973 | } | 2188 | } |
1974 | 2189 | ||
1975 | static int drbd_nl_get_config(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | 2190 | int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev, |
1976 | struct drbd_nl_cfg_reply *reply) | 2191 | const struct sib_info *sib) |
1977 | { | 2192 | { |
1978 | unsigned short *tl; | 2193 | struct state_info *si = NULL; /* for sizeof(si->member); */ |
1979 | 2194 | struct nlattr *nla; | |
1980 | tl = reply->tag_list; | 2195 | int got_ldev; |
1981 | 2196 | int got_net; | |
1982 | if (get_ldev(mdev)) { | 2197 | int err = 0; |
1983 | tl = disk_conf_to_tags(&mdev->ldev->dc, tl); | 2198 | int exclude_sensitive; |
1984 | put_ldev(mdev); | 2199 | |
2200 | /* If sib != NULL, this is drbd_bcast_event, which anyone can listen | ||
2201 | * to. So we better exclude_sensitive information. | ||
2202 | * | ||
2203 | * If sib == NULL, this is drbd_adm_get_status, executed synchronously | ||
2204 | * in the context of the requesting user process. Exclude sensitive | ||
2205 | * information, unless current has superuser. | ||
2206 | * | ||
2207 | * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and | ||
2208 | * relies on the current implementation of netlink_dump(), which | ||
2209 | * executes the dump callback successively from netlink_recvmsg(), | ||
2210 | * always in the context of the receiving process */ | ||
2211 | exclude_sensitive = sib || !capable(CAP_SYS_ADMIN); | ||
2212 | |||
2213 | got_ldev = get_ldev(mdev); | ||
2214 | got_net = get_net_conf(mdev->tconn); | ||
2215 | |||
2216 | /* We need to add connection name and volume number information still. | ||
2217 | * Minor number is in drbd_genlmsghdr. */ | ||
2218 | nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT); | ||
2219 | if (!nla) | ||
2220 | goto nla_put_failure; | ||
2221 | NLA_PUT_U32(skb, T_ctx_volume, mdev->vnr); | ||
2222 | NLA_PUT_STRING(skb, T_ctx_conn_name, mdev->tconn->name); | ||
2223 | nla_nest_end(skb, nla); | ||
2224 | |||
2225 | if (got_ldev) | ||
2226 | if (disk_conf_to_skb(skb, &mdev->ldev->dc, exclude_sensitive)) | ||
2227 | goto nla_put_failure; | ||
2228 | if (got_net) | ||
2229 | if (net_conf_to_skb(skb, mdev->tconn->net_conf, exclude_sensitive)) | ||
2230 | goto nla_put_failure; | ||
2231 | |||
2232 | if (syncer_conf_to_skb(skb, &mdev->sync_conf, exclude_sensitive)) | ||
2233 | goto nla_put_failure; | ||
2234 | |||
2235 | nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO); | ||
2236 | if (!nla) | ||
2237 | goto nla_put_failure; | ||
2238 | NLA_PUT_U32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY); | ||
2239 | NLA_PUT_U32(skb, T_current_state, mdev->state.i); | ||
2240 | NLA_PUT_U64(skb, T_ed_uuid, mdev->ed_uuid); | ||
2241 | NLA_PUT_U64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev)); | ||
2242 | |||
2243 | if (got_ldev) { | ||
2244 | NLA_PUT_U32(skb, T_disk_flags, mdev->ldev->md.flags); | ||
2245 | NLA_PUT(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid); | ||
2246 | NLA_PUT_U64(skb, T_bits_total, drbd_bm_bits(mdev)); | ||
2247 | NLA_PUT_U64(skb, T_bits_oos, drbd_bm_total_weight(mdev)); | ||
2248 | if (C_SYNC_SOURCE <= mdev->state.conn && | ||
2249 | C_PAUSED_SYNC_T >= mdev->state.conn) { | ||
2250 | NLA_PUT_U64(skb, T_bits_rs_total, mdev->rs_total); | ||
2251 | NLA_PUT_U64(skb, T_bits_rs_failed, mdev->rs_failed); | ||
2252 | } | ||
1985 | } | 2253 | } |
1986 | 2254 | ||
1987 | if (get_net_conf(mdev->tconn)) { | 2255 | if (sib) { |
1988 | tl = net_conf_to_tags(mdev->tconn->net_conf, tl); | 2256 | switch(sib->sib_reason) { |
1989 | put_net_conf(mdev->tconn); | 2257 | case SIB_SYNC_PROGRESS: |
2258 | case SIB_GET_STATUS_REPLY: | ||
2259 | break; | ||
2260 | case SIB_STATE_CHANGE: | ||
2261 | NLA_PUT_U32(skb, T_prev_state, sib->os.i); | ||
2262 | NLA_PUT_U32(skb, T_new_state, sib->ns.i); | ||
2263 | break; | ||
2264 | case SIB_HELPER_POST: | ||
2265 | NLA_PUT_U32(skb, | ||
2266 | T_helper_exit_code, sib->helper_exit_code); | ||
2267 | /* fall through */ | ||
2268 | case SIB_HELPER_PRE: | ||
2269 | NLA_PUT_STRING(skb, T_helper, sib->helper_name); | ||
2270 | break; | ||
2271 | } | ||
1990 | } | 2272 | } |
1991 | tl = syncer_conf_to_tags(&mdev->sync_conf, tl); | 2273 | nla_nest_end(skb, nla); |
1992 | |||
1993 | put_unaligned(TT_END, tl++); /* Close the tag list */ | ||
1994 | 2274 | ||
1995 | return (int)((char *)tl - (char *)reply->tag_list); | 2275 | if (0) |
2276 | nla_put_failure: | ||
2277 | err = -EMSGSIZE; | ||
2278 | if (got_ldev) | ||
2279 | put_ldev(mdev); | ||
2280 | if (got_net) | ||
2281 | put_net_conf(mdev->tconn); | ||
2282 | return err; | ||
1996 | } | 2283 | } |
1997 | 2284 | ||
1998 | static int drbd_nl_get_state(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | 2285 | int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info) |
1999 | struct drbd_nl_cfg_reply *reply) | ||
2000 | { | 2286 | { |
2001 | unsigned short *tl = reply->tag_list; | 2287 | enum drbd_ret_code retcode; |
2002 | union drbd_state s = mdev->state; | 2288 | int err; |
2003 | unsigned long rs_left; | ||
2004 | unsigned int res; | ||
2005 | 2289 | ||
2006 | tl = get_state_to_tags((struct get_state *)&s, tl); | 2290 | retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); |
2291 | if (!adm_ctx.reply_skb) | ||
2292 | return retcode; | ||
2293 | if (retcode != NO_ERROR) | ||
2294 | goto out; | ||
2007 | 2295 | ||
2008 | /* no local ref, no bitmap, no syncer progress. */ | 2296 | err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL); |
2009 | if (s.conn >= C_SYNC_SOURCE && s.conn <= C_PAUSED_SYNC_T) { | 2297 | if (err) { |
2010 | if (get_ldev(mdev)) { | 2298 | nlmsg_free(adm_ctx.reply_skb); |
2011 | drbd_get_syncer_progress(mdev, &rs_left, &res); | 2299 | return err; |
2012 | tl = tl_add_int(tl, T_sync_progress, &res); | ||
2013 | put_ldev(mdev); | ||
2014 | } | ||
2015 | } | 2300 | } |
2016 | put_unaligned(TT_END, tl++); /* Close the tag list */ | 2301 | out: |
2017 | 2302 | drbd_adm_finish(info, retcode); | |
2018 | return (int)((char *)tl - (char *)reply->tag_list); | 2303 | return 0; |
2019 | } | 2304 | } |
2020 | 2305 | ||
2021 | static int drbd_nl_get_uuids(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | 2306 | int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb) |
2022 | struct drbd_nl_cfg_reply *reply) | ||
2023 | { | 2307 | { |
2024 | unsigned short *tl; | 2308 | struct drbd_conf *mdev; |
2025 | 2309 | struct drbd_genlmsghdr *dh; | |
2026 | tl = reply->tag_list; | 2310 | int minor = cb->args[0]; |
2311 | |||
2312 | /* Open coded deferred single idr_for_each_entry iteration. | ||
2313 | * This may miss entries inserted after this dump started, | ||
2314 | * or entries deleted before they are reached. | ||
2315 | * But we need to make sure the mdev won't disappear while | ||
2316 | * we are looking at it. */ | ||
2317 | |||
2318 | rcu_read_lock(); | ||
2319 | mdev = idr_get_next(&minors, &minor); | ||
2320 | if (mdev) { | ||
2321 | dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, | ||
2322 | cb->nlh->nlmsg_seq, &drbd_genl_family, | ||
2323 | NLM_F_MULTI, DRBD_ADM_GET_STATUS); | ||
2324 | if (!dh) | ||
2325 | goto errout; | ||
2326 | |||
2327 | D_ASSERT(mdev->minor == minor); | ||
2328 | |||
2329 | dh->minor = minor; | ||
2330 | dh->ret_code = NO_ERROR; | ||
2331 | |||
2332 | if (nla_put_status_info(skb, mdev, NULL)) { | ||
2333 | genlmsg_cancel(skb, dh); | ||
2334 | goto errout; | ||
2335 | } | ||
2336 | genlmsg_end(skb, dh); | ||
2337 | } | ||
2027 | 2338 | ||
2028 | if (get_ldev(mdev)) { | 2339 | errout: |
2029 | tl = tl_add_blob(tl, T_uuids, mdev->ldev->md.uuid, UI_SIZE*sizeof(u64)); | 2340 | rcu_read_unlock(); |
2030 | tl = tl_add_int(tl, T_uuids_flags, &mdev->ldev->md.flags); | 2341 | /* where to start idr_get_next with the next iteration */ |
2031 | put_ldev(mdev); | 2342 | cb->args[0] = minor+1; |
2032 | } | ||
2033 | put_unaligned(TT_END, tl++); /* Close the tag list */ | ||
2034 | 2343 | ||
2035 | return (int)((char *)tl - (char *)reply->tag_list); | 2344 | /* No more minors found: empty skb. Which will terminate the dump. */ |
2345 | return skb->len; | ||
2036 | } | 2346 | } |
2037 | 2347 | ||
2038 | /** | 2348 | int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info) |
2039 | * drbd_nl_get_timeout_flag() - Used by drbdsetup to find out which timeout value to use | ||
2040 | * @mdev: DRBD device. | ||
2041 | * @nlp: Netlink/connector packet from drbdsetup | ||
2042 | * @reply: Reply packet for drbdsetup | ||
2043 | */ | ||
2044 | static int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | ||
2045 | struct drbd_nl_cfg_reply *reply) | ||
2046 | { | 2349 | { |
2047 | unsigned short *tl; | 2350 | enum drbd_ret_code retcode; |
2048 | char rv; | 2351 | struct timeout_parms tp; |
2049 | 2352 | int err; | |
2050 | tl = reply->tag_list; | ||
2051 | 2353 | ||
2052 | rv = mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED : | 2354 | retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); |
2053 | test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_DEGRADED : UT_DEFAULT; | 2355 | if (!adm_ctx.reply_skb) |
2356 | return retcode; | ||
2357 | if (retcode != NO_ERROR) | ||
2358 | goto out; | ||
2054 | 2359 | ||
2055 | tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv)); | 2360 | tp.timeout_type = |
2056 | put_unaligned(TT_END, tl++); /* Close the tag list */ | 2361 | adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED : |
2362 | test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED : | ||
2363 | UT_DEFAULT; | ||
2057 | 2364 | ||
2058 | return (int)((char *)tl - (char *)reply->tag_list); | 2365 | err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp); |
2366 | if (err) { | ||
2367 | nlmsg_free(adm_ctx.reply_skb); | ||
2368 | return err; | ||
2369 | } | ||
2370 | out: | ||
2371 | drbd_adm_finish(info, retcode); | ||
2372 | return 0; | ||
2059 | } | 2373 | } |
2060 | 2374 | ||
2061 | static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | 2375 | int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info) |
2062 | struct drbd_nl_cfg_reply *reply) | ||
2063 | { | 2376 | { |
2064 | /* default to resume from last known position, if possible */ | 2377 | struct drbd_conf *mdev; |
2065 | struct start_ov args = | 2378 | enum drbd_ret_code retcode; |
2066 | { .start_sector = mdev->ov_start_sector }; | ||
2067 | 2379 | ||
2068 | if (!start_ov_from_tags(nlp->tag_list, &args)) { | 2380 | retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); |
2069 | reply->ret_code = ERR_MANDATORY_TAG; | 2381 | if (!adm_ctx.reply_skb) |
2070 | return 0; | 2382 | return retcode; |
2071 | } | 2383 | if (retcode != NO_ERROR) |
2384 | goto out; | ||
2072 | 2385 | ||
2386 | mdev = adm_ctx.mdev; | ||
2387 | if (info->attrs[DRBD_NLA_START_OV_PARMS]) { | ||
2388 | /* resume from last known position, if possible */ | ||
2389 | struct start_ov_parms parms = | ||
2390 | { .ov_start_sector = mdev->ov_start_sector }; | ||
2391 | int err = start_ov_parms_from_attrs(&parms, info->attrs); | ||
2392 | if (err) { | ||
2393 | retcode = ERR_MANDATORY_TAG; | ||
2394 | drbd_msg_put_info(from_attrs_err_to_txt(err)); | ||
2395 | goto out; | ||
2396 | } | ||
2397 | /* w_make_ov_request expects position to be aligned */ | ||
2398 | mdev->ov_start_sector = parms.ov_start_sector & ~BM_SECT_PER_BIT; | ||
2399 | } | ||
2073 | /* If there is still bitmap IO pending, e.g. previous resync or verify | 2400 | /* If there is still bitmap IO pending, e.g. previous resync or verify |
2074 | * just being finished, wait for it before requesting a new resync. */ | 2401 | * just being finished, wait for it before requesting a new resync. */ |
2075 | wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); | 2402 | wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); |
2076 | 2403 | retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S)); | |
2077 | /* w_make_ov_request expects position to be aligned */ | 2404 | out: |
2078 | mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT; | 2405 | drbd_adm_finish(info, retcode); |
2079 | reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S)); | ||
2080 | return 0; | 2406 | return 0; |
2081 | } | 2407 | } |
2082 | 2408 | ||
2083 | 2409 | ||
2084 | static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | 2410 | int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info) |
2085 | struct drbd_nl_cfg_reply *reply) | ||
2086 | { | 2411 | { |
2087 | int retcode = NO_ERROR; | 2412 | struct drbd_conf *mdev; |
2413 | enum drbd_ret_code retcode; | ||
2088 | int skip_initial_sync = 0; | 2414 | int skip_initial_sync = 0; |
2089 | int err; | 2415 | int err; |
2416 | struct new_c_uuid_parms args; | ||
2090 | 2417 | ||
2091 | struct new_c_uuid args; | 2418 | retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); |
2419 | if (!adm_ctx.reply_skb) | ||
2420 | return retcode; | ||
2421 | if (retcode != NO_ERROR) | ||
2422 | goto out_nolock; | ||
2092 | 2423 | ||
2093 | memset(&args, 0, sizeof(struct new_c_uuid)); | 2424 | mdev = adm_ctx.mdev; |
2094 | if (!new_c_uuid_from_tags(nlp->tag_list, &args)) { | 2425 | memset(&args, 0, sizeof(args)); |
2095 | reply->ret_code = ERR_MANDATORY_TAG; | 2426 | if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) { |
2096 | return 0; | 2427 | err = new_c_uuid_parms_from_attrs(&args, info->attrs); |
2428 | if (err) { | ||
2429 | retcode = ERR_MANDATORY_TAG; | ||
2430 | drbd_msg_put_info(from_attrs_err_to_txt(err)); | ||
2431 | goto out_nolock; | ||
2432 | } | ||
2097 | } | 2433 | } |
2098 | 2434 | ||
2099 | mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */ | 2435 | mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */ |
@@ -2139,510 +2475,164 @@ out_dec: | |||
2139 | put_ldev(mdev); | 2475 | put_ldev(mdev); |
2140 | out: | 2476 | out: |
2141 | mutex_unlock(mdev->state_mutex); | 2477 | mutex_unlock(mdev->state_mutex); |
2142 | 2478 | out_nolock: | |
2143 | reply->ret_code = retcode; | 2479 | drbd_adm_finish(info, retcode); |
2144 | return 0; | ||
2145 | } | ||
2146 | |||
2147 | static int drbd_nl_new_conn(struct drbd_nl_cfg_req *nlp, struct drbd_nl_cfg_reply *reply) | ||
2148 | { | ||
2149 | struct new_connection args; | ||
2150 | |||
2151 | if (!new_connection_from_tags(nlp->tag_list, &args)) { | ||
2152 | reply->ret_code = ERR_MANDATORY_TAG; | ||
2153 | return 0; | ||
2154 | } | ||
2155 | |||
2156 | reply->ret_code = NO_ERROR; | ||
2157 | if (!drbd_new_tconn(args.name)) | ||
2158 | reply->ret_code = ERR_NOMEM; | ||
2159 | |||
2160 | return 0; | ||
2161 | } | ||
2162 | |||
2163 | static int drbd_nl_new_minor(struct drbd_tconn *tconn, | ||
2164 | struct drbd_nl_cfg_req *nlp, struct drbd_nl_cfg_reply *reply) | ||
2165 | { | ||
2166 | struct new_minor args; | ||
2167 | |||
2168 | args.vol_nr = 0; | ||
2169 | args.minor = 0; | ||
2170 | |||
2171 | if (!new_minor_from_tags(nlp->tag_list, &args)) { | ||
2172 | reply->ret_code = ERR_MANDATORY_TAG; | ||
2173 | return 0; | ||
2174 | } | ||
2175 | |||
2176 | reply->ret_code = conn_new_minor(tconn, args.minor, args.vol_nr); | ||
2177 | |||
2178 | return 0; | 2480 | return 0; |
2179 | } | 2481 | } |
2180 | 2482 | ||
2181 | static int drbd_nl_del_minor(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | 2483 | static enum drbd_ret_code |
2182 | struct drbd_nl_cfg_reply *reply) | 2484 | drbd_check_conn_name(const char *name) |
2183 | { | 2485 | { |
2184 | if (mdev->state.disk == D_DISKLESS && | 2486 | if (!name || !name[0]) { |
2185 | mdev->state.conn == C_STANDALONE && | 2487 | drbd_msg_put_info("connection name missing"); |
2186 | mdev->state.role == R_SECONDARY) { | 2488 | return ERR_MANDATORY_TAG; |
2187 | drbd_delete_device(mdev_to_minor(mdev)); | ||
2188 | reply->ret_code = NO_ERROR; | ||
2189 | } else { | ||
2190 | reply->ret_code = ERR_MINOR_CONFIGURED; | ||
2191 | } | 2489 | } |
2192 | return 0; | 2490 | /* if we want to use these in sysfs/configfs/debugfs some day, |
2193 | } | 2491 | * we must not allow slashes */ |
2194 | 2492 | if (strchr(name, '/')) { | |
2195 | static int drbd_nl_del_conn(struct drbd_tconn *tconn, | 2493 | drbd_msg_put_info("invalid connection name"); |
2196 | struct drbd_nl_cfg_req *nlp, struct drbd_nl_cfg_reply *reply) | 2494 | return ERR_INVALID_REQUEST; |
2197 | { | ||
2198 | if (conn_lowest_minor(tconn) < 0) { | ||
2199 | drbd_free_tconn(tconn); | ||
2200 | reply->ret_code = NO_ERROR; | ||
2201 | } else { | ||
2202 | reply->ret_code = ERR_CONN_IN_USE; | ||
2203 | } | 2495 | } |
2204 | 2496 | return NO_ERROR; | |
2205 | return 0; | ||
2206 | } | 2497 | } |
2207 | 2498 | ||
2208 | enum cn_handler_type { | 2499 | int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info) |
2209 | CHT_MINOR, | ||
2210 | CHT_CONN, | ||
2211 | CHT_CTOR, | ||
2212 | /* CHT_RES, later */ | ||
2213 | }; | ||
2214 | struct cn_handler_struct { | ||
2215 | enum cn_handler_type type; | ||
2216 | union { | ||
2217 | int (*minor_based)(struct drbd_conf *, | ||
2218 | struct drbd_nl_cfg_req *, | ||
2219 | struct drbd_nl_cfg_reply *); | ||
2220 | int (*conn_based)(struct drbd_tconn *, | ||
2221 | struct drbd_nl_cfg_req *, | ||
2222 | struct drbd_nl_cfg_reply *); | ||
2223 | int (*constructor)(struct drbd_nl_cfg_req *, | ||
2224 | struct drbd_nl_cfg_reply *); | ||
2225 | }; | ||
2226 | int reply_body_size; | ||
2227 | }; | ||
2228 | |||
2229 | static struct cn_handler_struct cnd_table[] = { | ||
2230 | [ P_primary ] = { CHT_MINOR, { &drbd_nl_primary }, 0 }, | ||
2231 | [ P_secondary ] = { CHT_MINOR, { &drbd_nl_secondary }, 0 }, | ||
2232 | [ P_disk_conf ] = { CHT_MINOR, { &drbd_nl_disk_conf }, 0 }, | ||
2233 | [ P_detach ] = { CHT_MINOR, { &drbd_nl_detach }, 0 }, | ||
2234 | [ P_net_conf ] = { CHT_CONN, { .conn_based = &drbd_nl_net_conf }, 0 }, | ||
2235 | [ P_disconnect ] = { CHT_CONN, { .conn_based = &drbd_nl_disconnect }, 0 }, | ||
2236 | [ P_resize ] = { CHT_MINOR, { &drbd_nl_resize }, 0 }, | ||
2237 | [ P_syncer_conf ] = { CHT_MINOR, { &drbd_nl_syncer_conf },0 }, | ||
2238 | [ P_invalidate ] = { CHT_MINOR, { &drbd_nl_invalidate }, 0 }, | ||
2239 | [ P_invalidate_peer ] = { CHT_MINOR, { &drbd_nl_invalidate_peer },0 }, | ||
2240 | [ P_pause_sync ] = { CHT_MINOR, { &drbd_nl_pause_sync }, 0 }, | ||
2241 | [ P_resume_sync ] = { CHT_MINOR, { &drbd_nl_resume_sync },0 }, | ||
2242 | [ P_suspend_io ] = { CHT_MINOR, { &drbd_nl_suspend_io }, 0 }, | ||
2243 | [ P_resume_io ] = { CHT_MINOR, { &drbd_nl_resume_io }, 0 }, | ||
2244 | [ P_outdate ] = { CHT_MINOR, { &drbd_nl_outdate }, 0 }, | ||
2245 | [ P_get_config ] = { CHT_MINOR, { &drbd_nl_get_config }, | ||
2246 | sizeof(struct syncer_conf_tag_len_struct) + | ||
2247 | sizeof(struct disk_conf_tag_len_struct) + | ||
2248 | sizeof(struct net_conf_tag_len_struct) }, | ||
2249 | [ P_get_state ] = { CHT_MINOR, { &drbd_nl_get_state }, | ||
2250 | sizeof(struct get_state_tag_len_struct) + | ||
2251 | sizeof(struct sync_progress_tag_len_struct) }, | ||
2252 | [ P_get_uuids ] = { CHT_MINOR, { &drbd_nl_get_uuids }, | ||
2253 | sizeof(struct get_uuids_tag_len_struct) }, | ||
2254 | [ P_get_timeout_flag ] = { CHT_MINOR, { &drbd_nl_get_timeout_flag }, | ||
2255 | sizeof(struct get_timeout_flag_tag_len_struct)}, | ||
2256 | [ P_start_ov ] = { CHT_MINOR, { &drbd_nl_start_ov }, 0 }, | ||
2257 | [ P_new_c_uuid ] = { CHT_MINOR, { &drbd_nl_new_c_uuid }, 0 }, | ||
2258 | [ P_new_connection ] = { CHT_CTOR, { .constructor = &drbd_nl_new_conn }, 0 }, | ||
2259 | [ P_new_minor ] = { CHT_CONN, { .conn_based = &drbd_nl_new_minor }, 0 }, | ||
2260 | [ P_del_minor ] = { CHT_MINOR, { &drbd_nl_del_minor }, 0 }, | ||
2261 | [ P_del_connection ] = { CHT_CONN, { .conn_based = &drbd_nl_del_conn }, 0 }, | ||
2262 | }; | ||
2263 | |||
2264 | static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms *nsp) | ||
2265 | { | 2500 | { |
2266 | struct drbd_nl_cfg_req *nlp = (struct drbd_nl_cfg_req *)req->data; | 2501 | enum drbd_ret_code retcode; |
2267 | struct cn_handler_struct *cm; | ||
2268 | struct cn_msg *cn_reply; | ||
2269 | struct drbd_nl_cfg_reply *reply; | ||
2270 | struct drbd_conf *mdev; | ||
2271 | struct drbd_tconn *tconn; | ||
2272 | int retcode, rr; | ||
2273 | int reply_size = sizeof(struct cn_msg) | ||
2274 | + sizeof(struct drbd_nl_cfg_reply) | ||
2275 | + sizeof(short int); | ||
2276 | |||
2277 | if (!try_module_get(THIS_MODULE)) { | ||
2278 | printk(KERN_ERR "drbd: try_module_get() failed!\n"); | ||
2279 | return; | ||
2280 | } | ||
2281 | |||
2282 | if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) { | ||
2283 | retcode = ERR_PERM; | ||
2284 | goto fail; | ||
2285 | } | ||
2286 | 2502 | ||
2287 | if (nlp->packet_type >= P_nl_after_last_packet || | 2503 | retcode = drbd_adm_prepare(skb, info, 0); |
2288 | nlp->packet_type == P_return_code_only) { | 2504 | if (!adm_ctx.reply_skb) |
2289 | retcode = ERR_PACKET_NR; | 2505 | return retcode; |
2290 | goto fail; | 2506 | if (retcode != NO_ERROR) |
2291 | } | 2507 | goto out; |
2292 | 2508 | ||
2293 | cm = cnd_table + nlp->packet_type; | 2509 | retcode = drbd_check_conn_name(adm_ctx.conn_name); |
2510 | if (retcode != NO_ERROR) | ||
2511 | goto out; | ||
2294 | 2512 | ||
2295 | /* This may happen if packet number is 0: */ | 2513 | if (adm_ctx.tconn) { |
2296 | if (cm->minor_based == NULL) { | 2514 | retcode = ERR_INVALID_REQUEST; |
2297 | retcode = ERR_PACKET_NR; | 2515 | drbd_msg_put_info("connection exists"); |
2298 | goto fail; | 2516 | goto out; |
2299 | } | 2517 | } |
2300 | 2518 | ||
2301 | reply_size += cm->reply_body_size; | 2519 | if (!drbd_new_tconn(adm_ctx.conn_name)) |
2302 | |||
2303 | /* allocation not in the IO path, cqueue thread context */ | ||
2304 | cn_reply = kzalloc(reply_size, GFP_KERNEL); | ||
2305 | if (!cn_reply) { | ||
2306 | retcode = ERR_NOMEM; | 2520 | retcode = ERR_NOMEM; |
2307 | goto fail; | 2521 | out: |
2308 | } | 2522 | drbd_adm_finish(info, retcode); |
2309 | reply = (struct drbd_nl_cfg_reply *) cn_reply->data; | 2523 | return 0; |
2310 | |||
2311 | reply->packet_type = | ||
2312 | cm->reply_body_size ? nlp->packet_type : P_return_code_only; | ||
2313 | reply->minor = nlp->drbd_minor; | ||
2314 | reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */ | ||
2315 | /* reply->tag_list; might be modified by cm->function. */ | ||
2316 | |||
2317 | retcode = ERR_MINOR_INVALID; | ||
2318 | rr = 0; | ||
2319 | switch (cm->type) { | ||
2320 | case CHT_MINOR: | ||
2321 | mdev = minor_to_mdev(nlp->drbd_minor); | ||
2322 | if (!mdev) | ||
2323 | goto fail; | ||
2324 | rr = cm->minor_based(mdev, nlp, reply); | ||
2325 | break; | ||
2326 | case CHT_CONN: | ||
2327 | tconn = conn_by_name(nlp->obj_name); | ||
2328 | if (!tconn) { | ||
2329 | retcode = ERR_CONN_NOT_KNOWN; | ||
2330 | goto fail; | ||
2331 | } | ||
2332 | rr = cm->conn_based(tconn, nlp, reply); | ||
2333 | break; | ||
2334 | case CHT_CTOR: | ||
2335 | rr = cm->constructor(nlp, reply); | ||
2336 | break; | ||
2337 | /* case CHT_RES: */ | ||
2338 | } | ||
2339 | |||
2340 | cn_reply->id = req->id; | ||
2341 | cn_reply->seq = req->seq; | ||
2342 | cn_reply->ack = req->ack + 1; | ||
2343 | cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + rr; | ||
2344 | cn_reply->flags = 0; | ||
2345 | |||
2346 | rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL); | ||
2347 | if (rr && rr != -ESRCH) | ||
2348 | printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr); | ||
2349 | |||
2350 | kfree(cn_reply); | ||
2351 | module_put(THIS_MODULE); | ||
2352 | return; | ||
2353 | fail: | ||
2354 | drbd_nl_send_reply(req, retcode); | ||
2355 | module_put(THIS_MODULE); | ||
2356 | } | ||
2357 | |||
2358 | static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */ | ||
2359 | |||
2360 | static unsigned short * | ||
2361 | __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, | ||
2362 | unsigned short len, int nul_terminated) | ||
2363 | { | ||
2364 | unsigned short l = tag_descriptions[tag_number(tag)].max_len; | ||
2365 | len = (len < l) ? len : l; | ||
2366 | put_unaligned(tag, tl++); | ||
2367 | put_unaligned(len, tl++); | ||
2368 | memcpy(tl, data, len); | ||
2369 | tl = (unsigned short*)((char*)tl + len); | ||
2370 | if (nul_terminated) | ||
2371 | *((char*)tl - 1) = 0; | ||
2372 | return tl; | ||
2373 | } | ||
2374 | |||
2375 | static unsigned short * | ||
2376 | tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, int len) | ||
2377 | { | ||
2378 | return __tl_add_blob(tl, tag, data, len, 0); | ||
2379 | } | ||
2380 | |||
2381 | static unsigned short * | ||
2382 | tl_add_str(unsigned short *tl, enum drbd_tags tag, const char *str) | ||
2383 | { | ||
2384 | return __tl_add_blob(tl, tag, str, strlen(str)+1, 0); | ||
2385 | } | ||
2386 | |||
2387 | static unsigned short * | ||
2388 | tl_add_int(unsigned short *tl, enum drbd_tags tag, const void *val) | ||
2389 | { | ||
2390 | put_unaligned(tag, tl++); | ||
2391 | switch(tag_type(tag)) { | ||
2392 | case TT_INTEGER: | ||
2393 | put_unaligned(sizeof(int), tl++); | ||
2394 | put_unaligned(*(int *)val, (int *)tl); | ||
2395 | tl = (unsigned short*)((char*)tl+sizeof(int)); | ||
2396 | break; | ||
2397 | case TT_INT64: | ||
2398 | put_unaligned(sizeof(u64), tl++); | ||
2399 | put_unaligned(*(u64 *)val, (u64 *)tl); | ||
2400 | tl = (unsigned short*)((char*)tl+sizeof(u64)); | ||
2401 | break; | ||
2402 | default: | ||
2403 | /* someone did something stupid. */ | ||
2404 | ; | ||
2405 | } | ||
2406 | return tl; | ||
2407 | } | ||
2408 | |||
2409 | void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state) | ||
2410 | { | ||
2411 | char buffer[sizeof(struct cn_msg)+ | ||
2412 | sizeof(struct drbd_nl_cfg_reply)+ | ||
2413 | sizeof(struct get_state_tag_len_struct)+ | ||
2414 | sizeof(short int)]; | ||
2415 | struct cn_msg *cn_reply = (struct cn_msg *) buffer; | ||
2416 | struct drbd_nl_cfg_reply *reply = | ||
2417 | (struct drbd_nl_cfg_reply *)cn_reply->data; | ||
2418 | unsigned short *tl = reply->tag_list; | ||
2419 | |||
2420 | /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */ | ||
2421 | |||
2422 | tl = get_state_to_tags((struct get_state *)&state, tl); | ||
2423 | |||
2424 | put_unaligned(TT_END, tl++); /* Close the tag list */ | ||
2425 | |||
2426 | cn_reply->id.idx = CN_IDX_DRBD; | ||
2427 | cn_reply->id.val = CN_VAL_DRBD; | ||
2428 | |||
2429 | cn_reply->seq = atomic_inc_return(&drbd_nl_seq); | ||
2430 | cn_reply->ack = 0; /* not used here. */ | ||
2431 | cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + | ||
2432 | (int)((char *)tl - (char *)reply->tag_list); | ||
2433 | cn_reply->flags = 0; | ||
2434 | |||
2435 | reply->packet_type = P_get_state; | ||
2436 | reply->minor = mdev_to_minor(mdev); | ||
2437 | reply->ret_code = NO_ERROR; | ||
2438 | |||
2439 | cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); | ||
2440 | } | ||
2441 | |||
2442 | void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name) | ||
2443 | { | ||
2444 | char buffer[sizeof(struct cn_msg)+ | ||
2445 | sizeof(struct drbd_nl_cfg_reply)+ | ||
2446 | sizeof(struct call_helper_tag_len_struct)+ | ||
2447 | sizeof(short int)]; | ||
2448 | struct cn_msg *cn_reply = (struct cn_msg *) buffer; | ||
2449 | struct drbd_nl_cfg_reply *reply = | ||
2450 | (struct drbd_nl_cfg_reply *)cn_reply->data; | ||
2451 | unsigned short *tl = reply->tag_list; | ||
2452 | |||
2453 | /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */ | ||
2454 | |||
2455 | tl = tl_add_str(tl, T_helper, helper_name); | ||
2456 | put_unaligned(TT_END, tl++); /* Close the tag list */ | ||
2457 | |||
2458 | cn_reply->id.idx = CN_IDX_DRBD; | ||
2459 | cn_reply->id.val = CN_VAL_DRBD; | ||
2460 | |||
2461 | cn_reply->seq = atomic_inc_return(&drbd_nl_seq); | ||
2462 | cn_reply->ack = 0; /* not used here. */ | ||
2463 | cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + | ||
2464 | (int)((char *)tl - (char *)reply->tag_list); | ||
2465 | cn_reply->flags = 0; | ||
2466 | |||
2467 | reply->packet_type = P_call_helper; | ||
2468 | reply->minor = mdev_to_minor(mdev); | ||
2469 | reply->ret_code = NO_ERROR; | ||
2470 | |||
2471 | cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); | ||
2472 | } | 2524 | } |
2473 | 2525 | ||
2474 | void drbd_bcast_ee(struct drbd_conf *mdev, const char *reason, const int dgs, | 2526 | int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info) |
2475 | const char *seen_hash, const char *calc_hash, | ||
2476 | const struct drbd_peer_request *peer_req) | ||
2477 | { | 2527 | { |
2478 | struct cn_msg *cn_reply; | 2528 | struct drbd_genlmsghdr *dh = info->userhdr; |
2479 | struct drbd_nl_cfg_reply *reply; | 2529 | enum drbd_ret_code retcode; |
2480 | unsigned short *tl; | ||
2481 | struct page *page; | ||
2482 | unsigned len; | ||
2483 | 2530 | ||
2484 | if (!peer_req) | 2531 | retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN); |
2485 | return; | 2532 | if (!adm_ctx.reply_skb) |
2486 | if (!reason || !reason[0]) | 2533 | return retcode; |
2487 | return; | 2534 | if (retcode != NO_ERROR) |
2535 | goto out; | ||
2488 | 2536 | ||
2489 | /* apparently we have to memcpy twice, first to prepare the data for the | 2537 | /* FIXME drop minor_count parameter, limit to MINORMASK */ |
2490 | * struct cn_msg, then within cn_netlink_send from the cn_msg to the | 2538 | if (dh->minor >= minor_count) { |
2491 | * netlink skb. */ | 2539 | drbd_msg_put_info("requested minor out of range"); |
2492 | /* receiver thread context, which is not in the writeout path (of this node), | 2540 | retcode = ERR_INVALID_REQUEST; |
2493 | * but may be in the writeout path of the _other_ node. | 2541 | goto out; |
2494 | * GFP_NOIO to avoid potential "distributed deadlock". */ | ||
2495 | cn_reply = kzalloc( | ||
2496 | sizeof(struct cn_msg)+ | ||
2497 | sizeof(struct drbd_nl_cfg_reply)+ | ||
2498 | sizeof(struct dump_ee_tag_len_struct)+ | ||
2499 | sizeof(short int), | ||
2500 | GFP_NOIO); | ||
2501 | |||
2502 | if (!cn_reply) { | ||
2503 | dev_err(DEV, "could not kmalloc buffer for drbd_bcast_ee, " | ||
2504 | "sector %llu, size %u\n", | ||
2505 | (unsigned long long)peer_req->i.sector, | ||
2506 | peer_req->i.size); | ||
2507 | return; | ||
2508 | } | 2542 | } |
2509 | 2543 | /* FIXME we need a define here */ | |
2510 | reply = (struct drbd_nl_cfg_reply*)cn_reply->data; | 2544 | if (adm_ctx.volume >= 256) { |
2511 | tl = reply->tag_list; | 2545 | drbd_msg_put_info("requested volume id out of range"); |
2512 | 2546 | retcode = ERR_INVALID_REQUEST; | |
2513 | tl = tl_add_str(tl, T_dump_ee_reason, reason); | 2547 | goto out; |
2514 | tl = tl_add_blob(tl, T_seen_digest, seen_hash, dgs); | ||
2515 | tl = tl_add_blob(tl, T_calc_digest, calc_hash, dgs); | ||
2516 | tl = tl_add_int(tl, T_ee_sector, &peer_req->i.sector); | ||
2517 | tl = tl_add_int(tl, T_ee_block_id, &peer_req->block_id); | ||
2518 | |||
2519 | /* dump the first 32k */ | ||
2520 | len = min_t(unsigned, peer_req->i.size, 32 << 10); | ||
2521 | put_unaligned(T_ee_data, tl++); | ||
2522 | put_unaligned(len, tl++); | ||
2523 | |||
2524 | page = peer_req->pages; | ||
2525 | page_chain_for_each(page) { | ||
2526 | void *d = kmap_atomic(page, KM_USER0); | ||
2527 | unsigned l = min_t(unsigned, len, PAGE_SIZE); | ||
2528 | memcpy(tl, d, l); | ||
2529 | kunmap_atomic(d, KM_USER0); | ||
2530 | tl = (unsigned short*)((char*)tl + l); | ||
2531 | len -= l; | ||
2532 | if (len == 0) | ||
2533 | break; | ||
2534 | } | 2548 | } |
2535 | put_unaligned(TT_END, tl++); /* Close the tag list */ | ||
2536 | |||
2537 | cn_reply->id.idx = CN_IDX_DRBD; | ||
2538 | cn_reply->id.val = CN_VAL_DRBD; | ||
2539 | |||
2540 | cn_reply->seq = atomic_inc_return(&drbd_nl_seq); | ||
2541 | cn_reply->ack = 0; // not used here. | ||
2542 | cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + | ||
2543 | (int)((char*)tl - (char*)reply->tag_list); | ||
2544 | cn_reply->flags = 0; | ||
2545 | 2549 | ||
2546 | reply->packet_type = P_dump_ee; | 2550 | retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume); |
2547 | reply->minor = mdev_to_minor(mdev); | 2551 | out: |
2548 | reply->ret_code = NO_ERROR; | 2552 | drbd_adm_finish(info, retcode); |
2549 | 2553 | return 0; | |
2550 | cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); | ||
2551 | kfree(cn_reply); | ||
2552 | } | 2554 | } |
2553 | 2555 | ||
2554 | void drbd_bcast_sync_progress(struct drbd_conf *mdev) | 2556 | int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info) |
2555 | { | 2557 | { |
2556 | char buffer[sizeof(struct cn_msg)+ | 2558 | struct drbd_conf *mdev; |
2557 | sizeof(struct drbd_nl_cfg_reply)+ | 2559 | enum drbd_ret_code retcode; |
2558 | sizeof(struct sync_progress_tag_len_struct)+ | ||
2559 | sizeof(short int)]; | ||
2560 | struct cn_msg *cn_reply = (struct cn_msg *) buffer; | ||
2561 | struct drbd_nl_cfg_reply *reply = | ||
2562 | (struct drbd_nl_cfg_reply *)cn_reply->data; | ||
2563 | unsigned short *tl = reply->tag_list; | ||
2564 | unsigned long rs_left; | ||
2565 | unsigned int res; | ||
2566 | |||
2567 | /* no local ref, no bitmap, no syncer progress, no broadcast. */ | ||
2568 | if (!get_ldev(mdev)) | ||
2569 | return; | ||
2570 | drbd_get_syncer_progress(mdev, &rs_left, &res); | ||
2571 | put_ldev(mdev); | ||
2572 | |||
2573 | tl = tl_add_int(tl, T_sync_progress, &res); | ||
2574 | put_unaligned(TT_END, tl++); /* Close the tag list */ | ||
2575 | |||
2576 | cn_reply->id.idx = CN_IDX_DRBD; | ||
2577 | cn_reply->id.val = CN_VAL_DRBD; | ||
2578 | |||
2579 | cn_reply->seq = atomic_inc_return(&drbd_nl_seq); | ||
2580 | cn_reply->ack = 0; /* not used here. */ | ||
2581 | cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + | ||
2582 | (int)((char *)tl - (char *)reply->tag_list); | ||
2583 | cn_reply->flags = 0; | ||
2584 | 2560 | ||
2585 | reply->packet_type = P_sync_progress; | 2561 | retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); |
2586 | reply->minor = mdev_to_minor(mdev); | 2562 | if (!adm_ctx.reply_skb) |
2587 | reply->ret_code = NO_ERROR; | 2563 | return retcode; |
2564 | if (retcode != NO_ERROR) | ||
2565 | goto out; | ||
2588 | 2566 | ||
2589 | cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); | 2567 | mdev = adm_ctx.mdev; |
2568 | if (mdev->state.disk == D_DISKLESS && | ||
2569 | mdev->state.conn == C_STANDALONE && | ||
2570 | mdev->state.role == R_SECONDARY) { | ||
2571 | drbd_delete_device(mdev_to_minor(mdev)); | ||
2572 | retcode = NO_ERROR; | ||
2573 | } else | ||
2574 | retcode = ERR_MINOR_CONFIGURED; | ||
2575 | out: | ||
2576 | drbd_adm_finish(info, retcode); | ||
2577 | return 0; | ||
2590 | } | 2578 | } |
2591 | 2579 | ||
2592 | int __init drbd_nl_init(void) | 2580 | int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info) |
2593 | { | 2581 | { |
2594 | static struct cb_id cn_id_drbd; | 2582 | enum drbd_ret_code retcode; |
2595 | int err, try=10; | ||
2596 | |||
2597 | cn_id_drbd.val = CN_VAL_DRBD; | ||
2598 | do { | ||
2599 | cn_id_drbd.idx = cn_idx; | ||
2600 | err = cn_add_callback(&cn_id_drbd, "cn_drbd", &drbd_connector_callback); | ||
2601 | if (!err) | ||
2602 | break; | ||
2603 | cn_idx = (cn_idx + CN_IDX_STEP); | ||
2604 | } while (try--); | ||
2605 | 2583 | ||
2606 | if (err) { | 2584 | retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN); |
2607 | printk(KERN_ERR "drbd: cn_drbd failed to register\n"); | 2585 | if (!adm_ctx.reply_skb) |
2608 | return err; | 2586 | return retcode; |
2587 | if (retcode != NO_ERROR) | ||
2588 | goto out; | ||
2589 | |||
2590 | if (conn_lowest_minor(adm_ctx.tconn) < 0) { | ||
2591 | drbd_free_tconn(adm_ctx.tconn); | ||
2592 | retcode = NO_ERROR; | ||
2593 | } else { | ||
2594 | retcode = ERR_CONN_IN_USE; | ||
2609 | } | 2595 | } |
2610 | 2596 | ||
2597 | out: | ||
2598 | drbd_adm_finish(info, retcode); | ||
2611 | return 0; | 2599 | return 0; |
2612 | } | 2600 | } |
2613 | 2601 | ||
2614 | void drbd_nl_cleanup(void) | 2602 | void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib) |
2615 | { | 2603 | { |
2616 | static struct cb_id cn_id_drbd; | 2604 | static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */ |
2617 | 2605 | struct sk_buff *msg; | |
2618 | cn_id_drbd.idx = cn_idx; | 2606 | struct drbd_genlmsghdr *d_out; |
2619 | cn_id_drbd.val = CN_VAL_DRBD; | 2607 | unsigned seq; |
2608 | int err = -ENOMEM; | ||
2609 | |||
2610 | seq = atomic_inc_return(&drbd_genl_seq); | ||
2611 | msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO); | ||
2612 | if (!msg) | ||
2613 | goto failed; | ||
2614 | |||
2615 | err = -EMSGSIZE; | ||
2616 | d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT); | ||
2617 | if (!d_out) /* cannot happen, but anyways. */ | ||
2618 | goto nla_put_failure; | ||
2619 | d_out->minor = mdev_to_minor(mdev); | ||
2620 | d_out->ret_code = 0; | ||
2621 | |||
2622 | if (nla_put_status_info(msg, mdev, sib)) | ||
2623 | goto nla_put_failure; | ||
2624 | genlmsg_end(msg, d_out); | ||
2625 | err = drbd_genl_multicast_events(msg, 0); | ||
2626 | /* msg has been consumed or freed in netlink_broadcast() */ | ||
2627 | if (err && err != -ESRCH) | ||
2628 | goto failed; | ||
2620 | 2629 | ||
2621 | cn_del_callback(&cn_id_drbd); | 2630 | return; |
2622 | } | ||
2623 | 2631 | ||
2624 | void drbd_nl_send_reply(struct cn_msg *req, int ret_code) | 2632 | nla_put_failure: |
2625 | { | 2633 | nlmsg_free(msg); |
2626 | char buffer[sizeof(struct cn_msg)+sizeof(struct drbd_nl_cfg_reply)]; | 2634 | failed: |
2627 | struct cn_msg *cn_reply = (struct cn_msg *) buffer; | 2635 | dev_err(DEV, "Error %d while broadcasting event. " |
2628 | struct drbd_nl_cfg_reply *reply = | 2636 | "Event seq:%u sib_reason:%u\n", |
2629 | (struct drbd_nl_cfg_reply *)cn_reply->data; | 2637 | err, seq, sib->sib_reason); |
2630 | int rr; | ||
2631 | |||
2632 | memset(buffer, 0, sizeof(buffer)); | ||
2633 | cn_reply->id = req->id; | ||
2634 | |||
2635 | cn_reply->seq = req->seq; | ||
2636 | cn_reply->ack = req->ack + 1; | ||
2637 | cn_reply->len = sizeof(struct drbd_nl_cfg_reply); | ||
2638 | cn_reply->flags = 0; | ||
2639 | |||
2640 | reply->packet_type = P_return_code_only; | ||
2641 | reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor; | ||
2642 | reply->ret_code = ret_code; | ||
2643 | |||
2644 | rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); | ||
2645 | if (rr && rr != -ESRCH) | ||
2646 | printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr); | ||
2647 | } | 2638 | } |
2648 | |||
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c index ffee90d6d374..a280bc238acd 100644 --- a/drivers/block/drbd/drbd_state.c +++ b/drivers/block/drbd/drbd_state.c | |||
@@ -970,6 +970,11 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, | |||
970 | enum drbd_fencing_p fp; | 970 | enum drbd_fencing_p fp; |
971 | enum drbd_req_event what = NOTHING; | 971 | enum drbd_req_event what = NOTHING; |
972 | union drbd_state nsm = (union drbd_state){ .i = -1 }; | 972 | union drbd_state nsm = (union drbd_state){ .i = -1 }; |
973 | struct sib_info sib; | ||
974 | |||
975 | sib.sib_reason = SIB_STATE_CHANGE; | ||
976 | sib.os = os; | ||
977 | sib.ns = ns; | ||
973 | 978 | ||
974 | if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) { | 979 | if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) { |
975 | clear_bit(CRASHED_PRIMARY, &mdev->flags); | 980 | clear_bit(CRASHED_PRIMARY, &mdev->flags); |
@@ -984,7 +989,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, | |||
984 | } | 989 | } |
985 | 990 | ||
986 | /* Inform userspace about the change... */ | 991 | /* Inform userspace about the change... */ |
987 | drbd_bcast_state(mdev, ns); | 992 | drbd_bcast_event(mdev, &sib); |
988 | 993 | ||
989 | if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) && | 994 | if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) && |
990 | (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)) | 995 | (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)) |
diff --git a/include/linux/drbd.h b/include/linux/drbd.h index e192167e6145..d28fdd8fcd49 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h | |||
@@ -51,7 +51,6 @@ | |||
51 | 51 | ||
52 | #endif | 52 | #endif |
53 | 53 | ||
54 | |||
55 | extern const char *drbd_buildtag(void); | 54 | extern const char *drbd_buildtag(void); |
56 | #define REL_VERSION "8.3.11" | 55 | #define REL_VERSION "8.3.11" |
57 | #define API_VERSION 88 | 56 | #define API_VERSION 88 |
@@ -159,6 +158,7 @@ enum drbd_ret_code { | |||
159 | ERR_CONN_IN_USE = 159, | 158 | ERR_CONN_IN_USE = 159, |
160 | ERR_MINOR_CONFIGURED = 160, | 159 | ERR_MINOR_CONFIGURED = 160, |
161 | ERR_MINOR_EXISTS = 161, | 160 | ERR_MINOR_EXISTS = 161, |
161 | ERR_INVALID_REQUEST = 162, | ||
162 | 162 | ||
163 | /* insert new ones above this line */ | 163 | /* insert new ones above this line */ |
164 | AFTER_LAST_ERR_CODE | 164 | AFTER_LAST_ERR_CODE |
@@ -349,37 +349,4 @@ enum drbd_timeout_flag { | |||
349 | #define DRBD_MD_INDEX_FLEX_EXT -2 | 349 | #define DRBD_MD_INDEX_FLEX_EXT -2 |
350 | #define DRBD_MD_INDEX_FLEX_INT -3 | 350 | #define DRBD_MD_INDEX_FLEX_INT -3 |
351 | 351 | ||
352 | /* Start of the new netlink/connector stuff */ | ||
353 | |||
354 | enum drbd_ncr_flags { | ||
355 | DRBD_NL_CREATE_DEVICE = 0x01, | ||
356 | DRBD_NL_SET_DEFAULTS = 0x02, | ||
357 | }; | ||
358 | #define DRBD_NL_OBJ_NAME_LEN 32 | ||
359 | |||
360 | |||
361 | /* For searching a vacant cn_idx value */ | ||
362 | #define CN_IDX_STEP 6977 | ||
363 | |||
364 | struct drbd_nl_cfg_req { | ||
365 | int packet_type; | ||
366 | union { | ||
367 | struct { | ||
368 | unsigned int drbd_minor; | ||
369 | enum drbd_ncr_flags flags; | ||
370 | }; | ||
371 | struct { | ||
372 | char obj_name[DRBD_NL_OBJ_NAME_LEN]; | ||
373 | }; | ||
374 | }; | ||
375 | unsigned short tag_list[]; | ||
376 | }; | ||
377 | |||
378 | struct drbd_nl_cfg_reply { | ||
379 | int packet_type; | ||
380 | unsigned int minor; | ||
381 | int ret_code; /* enum ret_code or set_st_err_t */ | ||
382 | unsigned short tag_list[]; /* only used with get_* calls */ | ||
383 | }; | ||
384 | |||
385 | #endif | 352 | #endif |
diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h index 8a86f659d363..c8c67239f616 100644 --- a/include/linux/genl_magic_func.h +++ b/include/linux/genl_magic_func.h | |||
@@ -95,7 +95,7 @@ static struct nla_policy s_name ## _nl_policy[] __read_mostly = \ | |||
95 | #endif | 95 | #endif |
96 | #endif | 96 | #endif |
97 | 97 | ||
98 | #if 1 | 98 | #ifdef GENL_MAGIC_DEBUG |
99 | static void dprint_field(const char *dir, int nla_type, | 99 | static void dprint_field(const char *dir, int nla_type, |
100 | const char *name, void *valp) | 100 | const char *name, void *valp) |
101 | { | 101 | { |