aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/drbd/drbd_nl.c
diff options
context:
space:
mode:
authorLars Ellenberg <lars.ellenberg@linbit.com>2014-04-28 12:43:22 -0400
committerJens Axboe <axboe@fb.com>2014-04-30 15:46:54 -0400
commit9e276872fe1665ea158f0c6f40df13008fed2908 (patch)
tree606f85fe946ba1fdc759da76c0acca9afc62df3b /drivers/block/drbd/drbd_nl.c
parenta910b12352f5ddee712c3423c31fbb8b312dde88 (diff)
drbd: allow parallel promote/demote actions
We plan to use genl_family->parallel_ops = true in the future, but need to review all possible interactions first. For now, only selectively drop genl_lock() in drbd_set_role(), instead serializing on our own internal resource->conf_update mutex. We now can be promoted/demoted on many resources in parallel, which may significantly improve cluster failover times when fencing is required. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/block/drbd/drbd_nl.c')
-rw-r--r--drivers/block/drbd/drbd_nl.c100
1 files changed, 86 insertions, 14 deletions
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 118ac72f8699..bb3679263ea7 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -115,6 +115,10 @@ int drbd_msg_put_info(struct sk_buff *skb, const char *info)
115 * and per-family private info->pointers. 115 * and per-family private info->pointers.
116 * But we need to stay compatible with older kernels. 116 * But we need to stay compatible with older kernels.
117 * If it returns successfully, adm_ctx members are valid. 117 * If it returns successfully, adm_ctx members are valid.
118 *
119 * At this point, we still rely on the global genl_lock().
120 * If we want to avoid that, and allow "genl_family.parallel_ops", we may need
121 * to add additional synchronization against object destruction/modification.
118 */ 122 */
119#define DRBD_ADM_NEED_MINOR 1 123#define DRBD_ADM_NEED_MINOR 1
120#define DRBD_ADM_NEED_RESOURCE 2 124#define DRBD_ADM_NEED_RESOURCE 2
@@ -166,7 +170,7 @@ static int drbd_adm_prepare(struct drbd_config_context *adm_ctx,
166 if (err) 170 if (err)
167 goto fail; 171 goto fail;
168 172
169 /* and assign stuff to the global adm_ctx */ 173 /* and assign stuff to the adm_ctx */
170 nla = nested_attr_tb[__nla_type(T_ctx_volume)]; 174 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
171 if (nla) 175 if (nla)
172 adm_ctx->volume = nla_get_u32(nla); 176 adm_ctx->volume = nla_get_u32(nla);
@@ -186,6 +190,13 @@ static int drbd_adm_prepare(struct drbd_config_context *adm_ctx,
186 190
187 adm_ctx->minor = d_in->minor; 191 adm_ctx->minor = d_in->minor;
188 adm_ctx->device = minor_to_device(d_in->minor); 192 adm_ctx->device = minor_to_device(d_in->minor);
193
194 /* We are protected by the global genl_lock().
195 * But we may explicitly drop it/retake it in drbd_adm_set_role(),
196 * so make sure this object stays around. */
197 if (adm_ctx->device)
198 kref_get(&adm_ctx->device->kref);
199
189 if (adm_ctx->resource_name) { 200 if (adm_ctx->resource_name) {
190 adm_ctx->resource = drbd_find_resource(adm_ctx->resource_name); 201 adm_ctx->resource = drbd_find_resource(adm_ctx->resource_name);
191 } 202 }
@@ -241,6 +252,14 @@ static int drbd_adm_prepare(struct drbd_config_context *adm_ctx,
241 return ERR_INVALID_REQUEST; 252 return ERR_INVALID_REQUEST;
242 } 253 }
243 254
255 /* still, provide adm_ctx->resource always, if possible. */
256 if (!adm_ctx->resource) {
257 adm_ctx->resource = adm_ctx->device ? adm_ctx->device->resource
258 : adm_ctx->connection ? adm_ctx->connection->resource : NULL;
259 if (adm_ctx->resource)
260 kref_get(&adm_ctx->resource->kref);
261 }
262
244 return NO_ERROR; 263 return NO_ERROR;
245 264
246fail: 265fail:
@@ -252,6 +271,10 @@ fail:
252static int drbd_adm_finish(struct drbd_config_context *adm_ctx, 271static int drbd_adm_finish(struct drbd_config_context *adm_ctx,
253 struct genl_info *info, int retcode) 272 struct genl_info *info, int retcode)
254{ 273{
274 if (adm_ctx->device) {
275 kref_put(&adm_ctx->device->kref, drbd_destroy_device);
276 adm_ctx->device = NULL;
277 }
255 if (adm_ctx->connection) { 278 if (adm_ctx->connection) {
256 kref_put(&adm_ctx->connection->kref, &drbd_destroy_connection); 279 kref_put(&adm_ctx->connection->kref, &drbd_destroy_connection);
257 adm_ctx->connection = NULL; 280 adm_ctx->connection = NULL;
@@ -635,11 +658,11 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
635 put_ldev(device); 658 put_ldev(device);
636 } 659 }
637 } else { 660 } else {
638 mutex_lock(&device->resource->conf_update); 661 /* Called from drbd_adm_set_role only.
662 * We are still holding the conf_update mutex. */
639 nc = first_peer_device(device)->connection->net_conf; 663 nc = first_peer_device(device)->connection->net_conf;
640 if (nc) 664 if (nc)
641 nc->discard_my_data = 0; /* without copy; single bit op is atomic */ 665 nc->discard_my_data = 0; /* without copy; single bit op is atomic */
642 mutex_unlock(&device->resource->conf_update);
643 666
644 set_disk_ro(device->vdisk, false); 667 set_disk_ro(device->vdisk, false);
645 if (get_ldev(device)) { 668 if (get_ldev(device)) {
@@ -701,11 +724,16 @@ int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
701 goto out; 724 goto out;
702 } 725 }
703 } 726 }
727 genl_unlock();
728 mutex_lock(&adm_ctx.resource->adm_mutex);
704 729
705 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY) 730 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
706 retcode = drbd_set_role(adm_ctx.device, R_PRIMARY, parms.assume_uptodate); 731 retcode = drbd_set_role(adm_ctx.device, R_PRIMARY, parms.assume_uptodate);
707 else 732 else
708 retcode = drbd_set_role(adm_ctx.device, R_SECONDARY, 0); 733 retcode = drbd_set_role(adm_ctx.device, R_SECONDARY, 0);
734
735 mutex_unlock(&adm_ctx.resource->adm_mutex);
736 genl_lock();
709out: 737out:
710 drbd_adm_finish(&adm_ctx, info, retcode); 738 drbd_adm_finish(&adm_ctx, info, retcode);
711 return 0; 739 return 0;
@@ -1251,9 +1279,10 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1251 if (!adm_ctx.reply_skb) 1279 if (!adm_ctx.reply_skb)
1252 return retcode; 1280 return retcode;
1253 if (retcode != NO_ERROR) 1281 if (retcode != NO_ERROR)
1254 goto out; 1282 goto finish;
1255 1283
1256 device = adm_ctx.device; 1284 device = adm_ctx.device;
1285 mutex_lock(&adm_ctx.resource->adm_mutex);
1257 1286
1258 /* we also need a disk 1287 /* we also need a disk
1259 * to change the options on */ 1288 * to change the options on */
@@ -1368,6 +1397,8 @@ fail_unlock:
1368success: 1397success:
1369 put_ldev(device); 1398 put_ldev(device);
1370 out: 1399 out:
1400 mutex_unlock(&adm_ctx.resource->adm_mutex);
1401 finish:
1371 drbd_adm_finish(&adm_ctx, info, retcode); 1402 drbd_adm_finish(&adm_ctx, info, retcode);
1372 return 0; 1403 return 0;
1373} 1404}
@@ -1397,6 +1428,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1397 goto finish; 1428 goto finish;
1398 1429
1399 device = adm_ctx.device; 1430 device = adm_ctx.device;
1431 mutex_lock(&adm_ctx.resource->adm_mutex);
1400 conn_reconfig_start(first_peer_device(device)->connection); 1432 conn_reconfig_start(first_peer_device(device)->connection);
1401 1433
1402 /* if you want to reconfigure, please tear down first */ 1434 /* if you want to reconfigure, please tear down first */
@@ -1781,6 +1813,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1781 kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE); 1813 kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
1782 put_ldev(device); 1814 put_ldev(device);
1783 conn_reconfig_done(first_peer_device(device)->connection); 1815 conn_reconfig_done(first_peer_device(device)->connection);
1816 mutex_unlock(&adm_ctx.resource->adm_mutex);
1784 drbd_adm_finish(&adm_ctx, info, retcode); 1817 drbd_adm_finish(&adm_ctx, info, retcode);
1785 return 0; 1818 return 0;
1786 1819
@@ -1803,7 +1836,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1803 kfree(new_disk_conf); 1836 kfree(new_disk_conf);
1804 lc_destroy(resync_lru); 1837 lc_destroy(resync_lru);
1805 kfree(new_plan); 1838 kfree(new_plan);
1806 1839 mutex_unlock(&adm_ctx.resource->adm_mutex);
1807 finish: 1840 finish:
1808 drbd_adm_finish(&adm_ctx, info, retcode); 1841 drbd_adm_finish(&adm_ctx, info, retcode);
1809 return 0; 1842 return 0;
@@ -1864,7 +1897,9 @@ int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
1864 } 1897 }
1865 } 1898 }
1866 1899
1900 mutex_lock(&adm_ctx.resource->adm_mutex);
1867 retcode = adm_detach(adm_ctx.device, parms.force_detach); 1901 retcode = adm_detach(adm_ctx.device, parms.force_detach);
1902 mutex_unlock(&adm_ctx.resource->adm_mutex);
1868out: 1903out:
1869 drbd_adm_finish(&adm_ctx, info, retcode); 1904 drbd_adm_finish(&adm_ctx, info, retcode);
1870 return 0; 1905 return 0;
@@ -2053,9 +2088,10 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
2053 if (!adm_ctx.reply_skb) 2088 if (!adm_ctx.reply_skb)
2054 return retcode; 2089 return retcode;
2055 if (retcode != NO_ERROR) 2090 if (retcode != NO_ERROR)
2056 goto out; 2091 goto finish;
2057 2092
2058 connection = adm_ctx.connection; 2093 connection = adm_ctx.connection;
2094 mutex_lock(&adm_ctx.resource->adm_mutex);
2059 2095
2060 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL); 2096 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
2061 if (!new_net_conf) { 2097 if (!new_net_conf) {
@@ -2153,6 +2189,8 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
2153 done: 2189 done:
2154 conn_reconfig_done(connection); 2190 conn_reconfig_done(connection);
2155 out: 2191 out:
2192 mutex_unlock(&adm_ctx.resource->adm_mutex);
2193 finish:
2156 drbd_adm_finish(&adm_ctx, info, retcode); 2194 drbd_adm_finish(&adm_ctx, info, retcode);
2157 return 0; 2195 return 0;
2158} 2196}
@@ -2202,6 +2240,7 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
2202 } 2240 }
2203 } 2241 }
2204 2242
2243 mutex_lock(&adm_ctx.resource->adm_mutex);
2205 connection = first_connection(adm_ctx.resource); 2244 connection = first_connection(adm_ctx.resource);
2206 conn_reconfig_start(connection); 2245 conn_reconfig_start(connection);
2207 2246
@@ -2271,6 +2310,7 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
2271 retcode = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE); 2310 retcode = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
2272 2311
2273 conn_reconfig_done(connection); 2312 conn_reconfig_done(connection);
2313 mutex_unlock(&adm_ctx.resource->adm_mutex);
2274 drbd_adm_finish(&adm_ctx, info, retcode); 2314 drbd_adm_finish(&adm_ctx, info, retcode);
2275 return 0; 2315 return 0;
2276 2316
@@ -2279,6 +2319,7 @@ fail:
2279 kfree(new_net_conf); 2319 kfree(new_net_conf);
2280 2320
2281 conn_reconfig_done(connection); 2321 conn_reconfig_done(connection);
2322 mutex_unlock(&adm_ctx.resource->adm_mutex);
2282out: 2323out:
2283 drbd_adm_finish(&adm_ctx, info, retcode); 2324 drbd_adm_finish(&adm_ctx, info, retcode);
2284 return 0; 2325 return 0;
@@ -2367,11 +2408,13 @@ int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
2367 } 2408 }
2368 } 2409 }
2369 2410
2411 mutex_lock(&adm_ctx.resource->adm_mutex);
2370 rv = conn_try_disconnect(connection, parms.force_disconnect); 2412 rv = conn_try_disconnect(connection, parms.force_disconnect);
2371 if (rv < SS_SUCCESS) 2413 if (rv < SS_SUCCESS)
2372 retcode = rv; /* FIXME: Type mismatch. */ 2414 retcode = rv; /* FIXME: Type mismatch. */
2373 else 2415 else
2374 retcode = NO_ERROR; 2416 retcode = NO_ERROR;
2417 mutex_unlock(&adm_ctx.resource->adm_mutex);
2375 fail: 2418 fail:
2376 drbd_adm_finish(&adm_ctx, info, retcode); 2419 drbd_adm_finish(&adm_ctx, info, retcode);
2377 return 0; 2420 return 0;
@@ -2410,8 +2453,9 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2410 if (!adm_ctx.reply_skb) 2453 if (!adm_ctx.reply_skb)
2411 return retcode; 2454 return retcode;
2412 if (retcode != NO_ERROR) 2455 if (retcode != NO_ERROR)
2413 goto fail; 2456 goto finish;
2414 2457
2458 mutex_lock(&adm_ctx.resource->adm_mutex);
2415 device = adm_ctx.device; 2459 device = adm_ctx.device;
2416 if (!get_ldev(device)) { 2460 if (!get_ldev(device)) {
2417 retcode = ERR_NO_DISK; 2461 retcode = ERR_NO_DISK;
@@ -2517,6 +2561,8 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2517 } 2561 }
2518 2562
2519 fail: 2563 fail:
2564 mutex_unlock(&adm_ctx.resource->adm_mutex);
2565 finish:
2520 drbd_adm_finish(&adm_ctx, info, retcode); 2566 drbd_adm_finish(&adm_ctx, info, retcode);
2521 return 0; 2567 return 0;
2522 2568
@@ -2549,12 +2595,14 @@ int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
2549 goto fail; 2595 goto fail;
2550 } 2596 }
2551 2597
2598 mutex_lock(&adm_ctx.resource->adm_mutex);
2552 err = set_resource_options(adm_ctx.resource, &res_opts); 2599 err = set_resource_options(adm_ctx.resource, &res_opts);
2553 if (err) { 2600 if (err) {
2554 retcode = ERR_INVALID_REQUEST; 2601 retcode = ERR_INVALID_REQUEST;
2555 if (err == -ENOMEM) 2602 if (err == -ENOMEM)
2556 retcode = ERR_NOMEM; 2603 retcode = ERR_NOMEM;
2557 } 2604 }
2605 mutex_unlock(&adm_ctx.resource->adm_mutex);
2558 2606
2559fail: 2607fail:
2560 drbd_adm_finish(&adm_ctx, info, retcode); 2608 drbd_adm_finish(&adm_ctx, info, retcode);
@@ -2573,6 +2621,7 @@ int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2573 if (retcode != NO_ERROR) 2621 if (retcode != NO_ERROR)
2574 goto out; 2622 goto out;
2575 2623
2624 mutex_lock(&adm_ctx.resource->adm_mutex);
2576 device = adm_ctx.device; 2625 device = adm_ctx.device;
2577 2626
2578 /* If there is still bitmap IO pending, probably because of a previous 2627 /* If there is still bitmap IO pending, probably because of a previous
@@ -2596,7 +2645,7 @@ int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2596 } else 2645 } else
2597 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T)); 2646 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T));
2598 drbd_resume_io(device); 2647 drbd_resume_io(device);
2599 2648 mutex_unlock(&adm_ctx.resource->adm_mutex);
2600out: 2649out:
2601 drbd_adm_finish(&adm_ctx, info, retcode); 2650 drbd_adm_finish(&adm_ctx, info, retcode);
2602 return 0; 2651 return 0;
@@ -2614,7 +2663,9 @@ static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *
2614 if (retcode != NO_ERROR) 2663 if (retcode != NO_ERROR)
2615 goto out; 2664 goto out;
2616 2665
2666 mutex_lock(&adm_ctx.resource->adm_mutex);
2617 retcode = drbd_request_state(adm_ctx.device, mask, val); 2667 retcode = drbd_request_state(adm_ctx.device, mask, val);
2668 mutex_unlock(&adm_ctx.resource->adm_mutex);
2618out: 2669out:
2619 drbd_adm_finish(&adm_ctx, info, retcode); 2670 drbd_adm_finish(&adm_ctx, info, retcode);
2620 return 0; 2671 return 0;
@@ -2641,6 +2692,7 @@ int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
2641 if (retcode != NO_ERROR) 2692 if (retcode != NO_ERROR)
2642 goto out; 2693 goto out;
2643 2694
2695 mutex_lock(&adm_ctx.resource->adm_mutex);
2644 device = adm_ctx.device; 2696 device = adm_ctx.device;
2645 2697
2646 /* If there is still bitmap IO pending, probably because of a previous 2698 /* If there is still bitmap IO pending, probably because of a previous
@@ -2667,7 +2719,7 @@ int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
2667 } else 2719 } else
2668 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S)); 2720 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S));
2669 drbd_resume_io(device); 2721 drbd_resume_io(device);
2670 2722 mutex_unlock(&adm_ctx.resource->adm_mutex);
2671out: 2723out:
2672 drbd_adm_finish(&adm_ctx, info, retcode); 2724 drbd_adm_finish(&adm_ctx, info, retcode);
2673 return 0; 2725 return 0;
@@ -2684,8 +2736,10 @@ int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
2684 if (retcode != NO_ERROR) 2736 if (retcode != NO_ERROR)
2685 goto out; 2737 goto out;
2686 2738
2739 mutex_lock(&adm_ctx.resource->adm_mutex);
2687 if (drbd_request_state(adm_ctx.device, NS(user_isp, 1)) == SS_NOTHING_TO_DO) 2740 if (drbd_request_state(adm_ctx.device, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
2688 retcode = ERR_PAUSE_IS_SET; 2741 retcode = ERR_PAUSE_IS_SET;
2742 mutex_unlock(&adm_ctx.resource->adm_mutex);
2689out: 2743out:
2690 drbd_adm_finish(&adm_ctx, info, retcode); 2744 drbd_adm_finish(&adm_ctx, info, retcode);
2691 return 0; 2745 return 0;
@@ -2703,6 +2757,7 @@ int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
2703 if (retcode != NO_ERROR) 2757 if (retcode != NO_ERROR)
2704 goto out; 2758 goto out;
2705 2759
2760 mutex_lock(&adm_ctx.resource->adm_mutex);
2706 if (drbd_request_state(adm_ctx.device, NS(user_isp, 0)) == SS_NOTHING_TO_DO) { 2761 if (drbd_request_state(adm_ctx.device, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2707 s = adm_ctx.device->state; 2762 s = adm_ctx.device->state;
2708 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) { 2763 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
@@ -2712,7 +2767,7 @@ int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
2712 retcode = ERR_PAUSE_IS_CLEAR; 2767 retcode = ERR_PAUSE_IS_CLEAR;
2713 } 2768 }
2714 } 2769 }
2715 2770 mutex_unlock(&adm_ctx.resource->adm_mutex);
2716out: 2771out:
2717 drbd_adm_finish(&adm_ctx, info, retcode); 2772 drbd_adm_finish(&adm_ctx, info, retcode);
2718 return 0; 2773 return 0;
@@ -2735,6 +2790,7 @@ int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
2735 if (retcode != NO_ERROR) 2790 if (retcode != NO_ERROR)
2736 goto out; 2791 goto out;
2737 2792
2793 mutex_lock(&adm_ctx.resource->adm_mutex);
2738 device = adm_ctx.device; 2794 device = adm_ctx.device;
2739 if (test_bit(NEW_CUR_UUID, &device->flags)) { 2795 if (test_bit(NEW_CUR_UUID, &device->flags)) {
2740 drbd_uuid_new_current(device); 2796 drbd_uuid_new_current(device);
@@ -2749,7 +2805,7 @@ int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
2749 tl_restart(first_peer_device(device)->connection, FAIL_FROZEN_DISK_IO); 2805 tl_restart(first_peer_device(device)->connection, FAIL_FROZEN_DISK_IO);
2750 } 2806 }
2751 drbd_resume_io(device); 2807 drbd_resume_io(device);
2752 2808 mutex_unlock(&adm_ctx.resource->adm_mutex);
2753out: 2809out:
2754 drbd_adm_finish(&adm_ctx, info, retcode); 2810 drbd_adm_finish(&adm_ctx, info, retcode);
2755 return 0; 2811 return 0;
@@ -3182,6 +3238,8 @@ int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
3182 goto out; 3238 goto out;
3183 } 3239 }
3184 } 3240 }
3241 mutex_lock(&adm_ctx.resource->adm_mutex);
3242
3185 /* w_make_ov_request expects position to be aligned */ 3243 /* w_make_ov_request expects position to be aligned */
3186 device->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1); 3244 device->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
3187 device->ov_stop_sector = parms.ov_stop_sector; 3245 device->ov_stop_sector = parms.ov_stop_sector;
@@ -3192,6 +3250,8 @@ int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
3192 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags)); 3250 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
3193 retcode = drbd_request_state(device, NS(conn, C_VERIFY_S)); 3251 retcode = drbd_request_state(device, NS(conn, C_VERIFY_S));
3194 drbd_resume_io(device); 3252 drbd_resume_io(device);
3253
3254 mutex_unlock(&adm_ctx.resource->adm_mutex);
3195out: 3255out:
3196 drbd_adm_finish(&adm_ctx, info, retcode); 3256 drbd_adm_finish(&adm_ctx, info, retcode);
3197 return 0; 3257 return 0;
@@ -3224,6 +3284,7 @@ int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
3224 } 3284 }
3225 } 3285 }
3226 3286
3287 mutex_lock(&adm_ctx.resource->adm_mutex);
3227 mutex_lock(device->state_mutex); /* Protects us against serialized state changes. */ 3288 mutex_lock(device->state_mutex); /* Protects us against serialized state changes. */
3228 3289
3229 if (!get_ldev(device)) { 3290 if (!get_ldev(device)) {
@@ -3268,6 +3329,7 @@ out_dec:
3268 put_ldev(device); 3329 put_ldev(device);
3269out: 3330out:
3270 mutex_unlock(device->state_mutex); 3331 mutex_unlock(device->state_mutex);
3332 mutex_unlock(&adm_ctx.resource->adm_mutex);
3271out_nolock: 3333out_nolock:
3272 drbd_adm_finish(&adm_ctx, info, retcode); 3334 drbd_adm_finish(&adm_ctx, info, retcode);
3273 return 0; 3335 return 0;
@@ -3324,6 +3386,7 @@ int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
3324 goto out; 3386 goto out;
3325 } 3387 }
3326 3388
3389 /* not yet safe for genl_family.parallel_ops */
3327 if (!conn_create(adm_ctx.resource_name, &res_opts)) 3390 if (!conn_create(adm_ctx.resource_name, &res_opts))
3328 retcode = ERR_NOMEM; 3391 retcode = ERR_NOMEM;
3329out: 3392out:
@@ -3363,7 +3426,9 @@ int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info)
3363 goto out; 3426 goto out;
3364 } 3427 }
3365 3428
3429 mutex_lock(&adm_ctx.resource->adm_mutex);
3366 retcode = drbd_create_device(&adm_ctx, dh->minor); 3430 retcode = drbd_create_device(&adm_ctx, dh->minor);
3431 mutex_unlock(&adm_ctx.resource->adm_mutex);
3367out: 3432out:
3368 drbd_adm_finish(&adm_ctx, info, retcode); 3433 drbd_adm_finish(&adm_ctx, info, retcode);
3369 return 0; 3434 return 0;
@@ -3395,7 +3460,9 @@ int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info)
3395 if (retcode != NO_ERROR) 3460 if (retcode != NO_ERROR)
3396 goto out; 3461 goto out;
3397 3462
3463 mutex_lock(&adm_ctx.resource->adm_mutex);
3398 retcode = adm_del_minor(adm_ctx.device); 3464 retcode = adm_del_minor(adm_ctx.device);
3465 mutex_unlock(&adm_ctx.resource->adm_mutex);
3399out: 3466out:
3400 drbd_adm_finish(&adm_ctx, info, retcode); 3467 drbd_adm_finish(&adm_ctx, info, retcode);
3401 return 0; 3468 return 0;
@@ -3414,9 +3481,10 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
3414 if (!adm_ctx.reply_skb) 3481 if (!adm_ctx.reply_skb)
3415 return retcode; 3482 return retcode;
3416 if (retcode != NO_ERROR) 3483 if (retcode != NO_ERROR)
3417 goto out; 3484 goto finish;
3418 3485
3419 resource = adm_ctx.resource; 3486 resource = adm_ctx.resource;
3487 mutex_lock(&resource->adm_mutex);
3420 /* demote */ 3488 /* demote */
3421 for_each_connection(connection, resource) { 3489 for_each_connection(connection, resource) {
3422 struct drbd_peer_device *peer_device; 3490 struct drbd_peer_device *peer_device;
@@ -3467,8 +3535,9 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
3467 synchronize_rcu(); 3535 synchronize_rcu();
3468 drbd_free_resource(resource); 3536 drbd_free_resource(resource);
3469 retcode = NO_ERROR; 3537 retcode = NO_ERROR;
3470
3471out: 3538out:
3539 mutex_unlock(&resource->adm_mutex);
3540finish:
3472 drbd_adm_finish(&adm_ctx, info, retcode); 3541 drbd_adm_finish(&adm_ctx, info, retcode);
3473 return 0; 3542 return 0;
3474} 3543}
@@ -3484,9 +3553,10 @@ int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
3484 if (!adm_ctx.reply_skb) 3553 if (!adm_ctx.reply_skb)
3485 return retcode; 3554 return retcode;
3486 if (retcode != NO_ERROR) 3555 if (retcode != NO_ERROR)
3487 goto out; 3556 goto finish;
3488 3557
3489 resource = adm_ctx.resource; 3558 resource = adm_ctx.resource;
3559 mutex_lock(&resource->adm_mutex);
3490 for_each_connection(connection, resource) { 3560 for_each_connection(connection, resource) {
3491 if (connection->cstate > C_STANDALONE) { 3561 if (connection->cstate > C_STANDALONE) {
3492 retcode = ERR_NET_CONFIGURED; 3562 retcode = ERR_NET_CONFIGURED;
@@ -3505,6 +3575,8 @@ int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
3505 drbd_free_resource(resource); 3575 drbd_free_resource(resource);
3506 retcode = NO_ERROR; 3576 retcode = NO_ERROR;
3507out: 3577out:
3578 mutex_unlock(&resource->adm_mutex);
3579finish:
3508 drbd_adm_finish(&adm_ctx, info, retcode); 3580 drbd_adm_finish(&adm_ctx, info, retcode);
3509 return 0; 3581 return 0;
3510} 3582}