diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-13 17:22:26 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-13 17:22:26 -0500 |
commit | 9ea18f8cab5f1c36cdd0f09717e35ceb48c36a87 (patch) | |
tree | 0c8da7ac47cb59fe39f177ab0407f554aff77194 /drivers/block/drbd | |
parent | caf292ae5bb9d57198ce001d8b762f7abae3a94d (diff) | |
parent | 849c6e7746e4f6317ace6aa7d2fcdcd844e99ddb (diff) |
Merge branch 'for-3.19/drivers' of git://git.kernel.dk/linux-block
Pull block layer driver updates from Jens Axboe:
- NVMe updates:
- The blk-mq conversion from Matias (and others)
- A stack of NVMe bug fixes from the nvme tree, mostly from Keith.
- Various bug fixes from me, fixing issues in both the blk-mq
conversion and generic bugs.
- Abort and CPU online fix from Sam.
- Hot add/remove fix from Indraneel.
- A couple of drbd fixes from the drbd team (Andreas, Lars, Philipp)
- With the generic IO stat accounting from 3.19/core, converting md,
bcache, and rsxx to use those. From Gu Zheng.
- Boundary check for queue/irq mode for null_blk from Matias. Fixes
cases where invalid values could be given, causing the device to hang.
- The xen blkfront pull request, with two bug fixes from Vitaly.
* 'for-3.19/drivers' of git://git.kernel.dk/linux-block: (56 commits)
NVMe: fix race condition in nvme_submit_sync_cmd()
NVMe: fix retry/error logic in nvme_queue_rq()
NVMe: Fix FS mount issue (hot-remove followed by hot-add)
NVMe: fix error return checking from blk_mq_alloc_request()
NVMe: fix freeing of wrong request in abort path
xen/blkfront: remove redundant flush_op
xen/blkfront: improve protection against issuing unsupported REQ_FUA
NVMe: Fix command setup on IO retry
null_blk: boundary check queue_mode and irqmode
block/rsxx: use generic io stats accounting functions to simplify io stat accounting
md: use generic io stats accounting functions to simplify io stat accounting
drbd: use generic io stats accounting functions to simplify io stat accounting
md/bcache: use generic io stats accounting functions to simplify io stat accounting
NVMe: Update module version major number
NVMe: fail pci initialization if the device doesn't have any BARs
NVMe: add ->exit_hctx() hook
NVMe: make setup work for devices that don't do INTx
NVMe: enable IO stats by default
NVMe: nvme_submit_async_admin_req() must use atomic rq allocation
NVMe: replace blk_put_request() with blk_mq_free_request()
...
Diffstat (limited to 'drivers/block/drbd')
-rw-r--r-- | drivers/block/drbd/drbd_actlog.c | 3 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_int.h | 39 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_main.c | 23 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_nl.c | 64 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_receiver.c | 2 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_req.c | 25 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_state.c | 42 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_state.h | 5 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_worker.c | 5 |
9 files changed, 93 insertions, 115 deletions
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index a2dfa169237d..1318e3217cb0 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c | |||
@@ -827,8 +827,7 @@ static int update_sync_bits(struct drbd_device *device, | |||
827 | * | 827 | * |
828 | */ | 828 | */ |
829 | int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size, | 829 | int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size, |
830 | enum update_sync_bits_mode mode, | 830 | enum update_sync_bits_mode mode) |
831 | const char *file, const unsigned int line) | ||
832 | { | 831 | { |
833 | /* Is called from worker and receiver context _only_ */ | 832 | /* Is called from worker and receiver context _only_ */ |
834 | unsigned long sbnr, ebnr, lbnr; | 833 | unsigned long sbnr, ebnr, lbnr; |
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 9b22f8f01b57..b905e9888b88 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h | |||
@@ -1454,7 +1454,6 @@ extern int is_valid_ar_handle(struct drbd_request *, sector_t); | |||
1454 | 1454 | ||
1455 | 1455 | ||
1456 | /* drbd_nl.c */ | 1456 | /* drbd_nl.c */ |
1457 | extern int drbd_msg_put_info(struct sk_buff *skb, const char *info); | ||
1458 | extern void drbd_suspend_io(struct drbd_device *device); | 1457 | extern void drbd_suspend_io(struct drbd_device *device); |
1459 | extern void drbd_resume_io(struct drbd_device *device); | 1458 | extern void drbd_resume_io(struct drbd_device *device); |
1460 | extern char *ppsize(char *buf, unsigned long long size); | 1459 | extern char *ppsize(char *buf, unsigned long long size); |
@@ -1558,52 +1557,31 @@ extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled); | |||
1558 | extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed); | 1557 | extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed); |
1559 | extern int drbd_connected(struct drbd_peer_device *); | 1558 | extern int drbd_connected(struct drbd_peer_device *); |
1560 | 1559 | ||
1561 | /* Yes, there is kernel_setsockopt, but only since 2.6.18. | ||
1562 | * So we have our own copy of it here. */ | ||
1563 | static inline int drbd_setsockopt(struct socket *sock, int level, int optname, | ||
1564 | char *optval, int optlen) | ||
1565 | { | ||
1566 | mm_segment_t oldfs = get_fs(); | ||
1567 | char __user *uoptval; | ||
1568 | int err; | ||
1569 | |||
1570 | uoptval = (char __user __force *)optval; | ||
1571 | |||
1572 | set_fs(KERNEL_DS); | ||
1573 | if (level == SOL_SOCKET) | ||
1574 | err = sock_setsockopt(sock, level, optname, uoptval, optlen); | ||
1575 | else | ||
1576 | err = sock->ops->setsockopt(sock, level, optname, uoptval, | ||
1577 | optlen); | ||
1578 | set_fs(oldfs); | ||
1579 | return err; | ||
1580 | } | ||
1581 | |||
1582 | static inline void drbd_tcp_cork(struct socket *sock) | 1560 | static inline void drbd_tcp_cork(struct socket *sock) |
1583 | { | 1561 | { |
1584 | int val = 1; | 1562 | int val = 1; |
1585 | (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK, | 1563 | (void) kernel_setsockopt(sock, SOL_TCP, TCP_CORK, |
1586 | (char*)&val, sizeof(val)); | 1564 | (char*)&val, sizeof(val)); |
1587 | } | 1565 | } |
1588 | 1566 | ||
1589 | static inline void drbd_tcp_uncork(struct socket *sock) | 1567 | static inline void drbd_tcp_uncork(struct socket *sock) |
1590 | { | 1568 | { |
1591 | int val = 0; | 1569 | int val = 0; |
1592 | (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK, | 1570 | (void) kernel_setsockopt(sock, SOL_TCP, TCP_CORK, |
1593 | (char*)&val, sizeof(val)); | 1571 | (char*)&val, sizeof(val)); |
1594 | } | 1572 | } |
1595 | 1573 | ||
1596 | static inline void drbd_tcp_nodelay(struct socket *sock) | 1574 | static inline void drbd_tcp_nodelay(struct socket *sock) |
1597 | { | 1575 | { |
1598 | int val = 1; | 1576 | int val = 1; |
1599 | (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY, | 1577 | (void) kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, |
1600 | (char*)&val, sizeof(val)); | 1578 | (char*)&val, sizeof(val)); |
1601 | } | 1579 | } |
1602 | 1580 | ||
1603 | static inline void drbd_tcp_quickack(struct socket *sock) | 1581 | static inline void drbd_tcp_quickack(struct socket *sock) |
1604 | { | 1582 | { |
1605 | int val = 2; | 1583 | int val = 2; |
1606 | (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK, | 1584 | (void) kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK, |
1607 | (char*)&val, sizeof(val)); | 1585 | (char*)&val, sizeof(val)); |
1608 | } | 1586 | } |
1609 | 1587 | ||
@@ -1662,14 +1640,13 @@ extern void drbd_advance_rs_marks(struct drbd_device *device, unsigned long stil | |||
1662 | 1640 | ||
1663 | enum update_sync_bits_mode { RECORD_RS_FAILED, SET_OUT_OF_SYNC, SET_IN_SYNC }; | 1641 | enum update_sync_bits_mode { RECORD_RS_FAILED, SET_OUT_OF_SYNC, SET_IN_SYNC }; |
1664 | extern int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size, | 1642 | extern int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size, |
1665 | enum update_sync_bits_mode mode, | 1643 | enum update_sync_bits_mode mode); |
1666 | const char *file, const unsigned int line); | ||
1667 | #define drbd_set_in_sync(device, sector, size) \ | 1644 | #define drbd_set_in_sync(device, sector, size) \ |
1668 | __drbd_change_sync(device, sector, size, SET_IN_SYNC, __FILE__, __LINE__) | 1645 | __drbd_change_sync(device, sector, size, SET_IN_SYNC) |
1669 | #define drbd_set_out_of_sync(device, sector, size) \ | 1646 | #define drbd_set_out_of_sync(device, sector, size) \ |
1670 | __drbd_change_sync(device, sector, size, SET_OUT_OF_SYNC, __FILE__, __LINE__) | 1647 | __drbd_change_sync(device, sector, size, SET_OUT_OF_SYNC) |
1671 | #define drbd_rs_failed_io(device, sector, size) \ | 1648 | #define drbd_rs_failed_io(device, sector, size) \ |
1672 | __drbd_change_sync(device, sector, size, RECORD_RS_FAILED, __FILE__, __LINE__) | 1649 | __drbd_change_sync(device, sector, size, RECORD_RS_FAILED) |
1673 | extern void drbd_al_shrink(struct drbd_device *device); | 1650 | extern void drbd_al_shrink(struct drbd_device *device); |
1674 | extern int drbd_initialize_al(struct drbd_device *, void *); | 1651 | extern int drbd_initialize_al(struct drbd_device *, void *); |
1675 | 1652 | ||
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 973c185c9cfe..1fc83427199c 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c | |||
@@ -2532,10 +2532,6 @@ int set_resource_options(struct drbd_resource *resource, struct res_opts *res_op | |||
2532 | 2532 | ||
2533 | if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) | 2533 | if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) |
2534 | return -ENOMEM; | 2534 | return -ENOMEM; |
2535 | /* | ||
2536 | retcode = ERR_NOMEM; | ||
2537 | drbd_msg_put_info("unable to allocate cpumask"); | ||
2538 | */ | ||
2539 | 2535 | ||
2540 | /* silently ignore cpu mask on UP kernel */ | 2536 | /* silently ignore cpu mask on UP kernel */ |
2541 | if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) { | 2537 | if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) { |
@@ -2731,7 +2727,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig | |||
2731 | 2727 | ||
2732 | device = minor_to_device(minor); | 2728 | device = minor_to_device(minor); |
2733 | if (device) | 2729 | if (device) |
2734 | return ERR_MINOR_EXISTS; | 2730 | return ERR_MINOR_OR_VOLUME_EXISTS; |
2735 | 2731 | ||
2736 | /* GFP_KERNEL, we are outside of all write-out paths */ | 2732 | /* GFP_KERNEL, we are outside of all write-out paths */ |
2737 | device = kzalloc(sizeof(struct drbd_device), GFP_KERNEL); | 2733 | device = kzalloc(sizeof(struct drbd_device), GFP_KERNEL); |
@@ -2793,20 +2789,16 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig | |||
2793 | 2789 | ||
2794 | id = idr_alloc(&drbd_devices, device, minor, minor + 1, GFP_KERNEL); | 2790 | id = idr_alloc(&drbd_devices, device, minor, minor + 1, GFP_KERNEL); |
2795 | if (id < 0) { | 2791 | if (id < 0) { |
2796 | if (id == -ENOSPC) { | 2792 | if (id == -ENOSPC) |
2797 | err = ERR_MINOR_EXISTS; | 2793 | err = ERR_MINOR_OR_VOLUME_EXISTS; |
2798 | drbd_msg_put_info(adm_ctx->reply_skb, "requested minor exists already"); | ||
2799 | } | ||
2800 | goto out_no_minor_idr; | 2794 | goto out_no_minor_idr; |
2801 | } | 2795 | } |
2802 | kref_get(&device->kref); | 2796 | kref_get(&device->kref); |
2803 | 2797 | ||
2804 | id = idr_alloc(&resource->devices, device, vnr, vnr + 1, GFP_KERNEL); | 2798 | id = idr_alloc(&resource->devices, device, vnr, vnr + 1, GFP_KERNEL); |
2805 | if (id < 0) { | 2799 | if (id < 0) { |
2806 | if (id == -ENOSPC) { | 2800 | if (id == -ENOSPC) |
2807 | err = ERR_MINOR_EXISTS; | 2801 | err = ERR_MINOR_OR_VOLUME_EXISTS; |
2808 | drbd_msg_put_info(adm_ctx->reply_skb, "requested minor exists already"); | ||
2809 | } | ||
2810 | goto out_idr_remove_minor; | 2802 | goto out_idr_remove_minor; |
2811 | } | 2803 | } |
2812 | kref_get(&device->kref); | 2804 | kref_get(&device->kref); |
@@ -2825,10 +2817,8 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig | |||
2825 | 2817 | ||
2826 | id = idr_alloc(&connection->peer_devices, peer_device, vnr, vnr + 1, GFP_KERNEL); | 2818 | id = idr_alloc(&connection->peer_devices, peer_device, vnr, vnr + 1, GFP_KERNEL); |
2827 | if (id < 0) { | 2819 | if (id < 0) { |
2828 | if (id == -ENOSPC) { | 2820 | if (id == -ENOSPC) |
2829 | err = ERR_INVALID_REQUEST; | 2821 | err = ERR_INVALID_REQUEST; |
2830 | drbd_msg_put_info(adm_ctx->reply_skb, "requested volume exists already"); | ||
2831 | } | ||
2832 | goto out_idr_remove_from_resource; | 2822 | goto out_idr_remove_from_resource; |
2833 | } | 2823 | } |
2834 | kref_get(&connection->kref); | 2824 | kref_get(&connection->kref); |
@@ -2836,7 +2826,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig | |||
2836 | 2826 | ||
2837 | if (init_submitter(device)) { | 2827 | if (init_submitter(device)) { |
2838 | err = ERR_NOMEM; | 2828 | err = ERR_NOMEM; |
2839 | drbd_msg_put_info(adm_ctx->reply_skb, "unable to create submit workqueue"); | ||
2840 | goto out_idr_remove_vol; | 2829 | goto out_idr_remove_vol; |
2841 | } | 2830 | } |
2842 | 2831 | ||
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 1cd47df44bda..74df8cfad414 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c | |||
@@ -92,7 +92,7 @@ static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info) | |||
92 | 92 | ||
93 | /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only | 93 | /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only |
94 | * reason it could fail was no space in skb, and there are 4k available. */ | 94 | * reason it could fail was no space in skb, and there are 4k available. */ |
95 | int drbd_msg_put_info(struct sk_buff *skb, const char *info) | 95 | static int drbd_msg_put_info(struct sk_buff *skb, const char *info) |
96 | { | 96 | { |
97 | struct nlattr *nla; | 97 | struct nlattr *nla; |
98 | int err = -EMSGSIZE; | 98 | int err = -EMSGSIZE; |
@@ -588,7 +588,7 @@ drbd_set_role(struct drbd_device *const device, enum drbd_role new_role, int for | |||
588 | val.i = 0; val.role = new_role; | 588 | val.i = 0; val.role = new_role; |
589 | 589 | ||
590 | while (try++ < max_tries) { | 590 | while (try++ < max_tries) { |
591 | rv = _drbd_request_state(device, mask, val, CS_WAIT_COMPLETE); | 591 | rv = _drbd_request_state_holding_state_mutex(device, mask, val, CS_WAIT_COMPLETE); |
592 | 592 | ||
593 | /* in case we first succeeded to outdate, | 593 | /* in case we first succeeded to outdate, |
594 | * but now suddenly could establish a connection */ | 594 | * but now suddenly could establish a connection */ |
@@ -2052,7 +2052,7 @@ check_net_options(struct drbd_connection *connection, struct net_conf *new_net_c | |||
2052 | rv = _check_net_options(connection, rcu_dereference(connection->net_conf), new_net_conf); | 2052 | rv = _check_net_options(connection, rcu_dereference(connection->net_conf), new_net_conf); |
2053 | rcu_read_unlock(); | 2053 | rcu_read_unlock(); |
2054 | 2054 | ||
2055 | /* connection->volumes protected by genl_lock() here */ | 2055 | /* connection->peer_devices protected by genl_lock() here */ |
2056 | idr_for_each_entry(&connection->peer_devices, peer_device, i) { | 2056 | idr_for_each_entry(&connection->peer_devices, peer_device, i) { |
2057 | struct drbd_device *device = peer_device->device; | 2057 | struct drbd_device *device = peer_device->device; |
2058 | if (!device->bitmap) { | 2058 | if (!device->bitmap) { |
@@ -3483,7 +3483,7 @@ int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info) | |||
3483 | * that first_peer_device(device)->connection and device->vnr match the request. */ | 3483 | * that first_peer_device(device)->connection and device->vnr match the request. */ |
3484 | if (adm_ctx.device) { | 3484 | if (adm_ctx.device) { |
3485 | if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) | 3485 | if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) |
3486 | retcode = ERR_MINOR_EXISTS; | 3486 | retcode = ERR_MINOR_OR_VOLUME_EXISTS; |
3487 | /* else: still NO_ERROR */ | 3487 | /* else: still NO_ERROR */ |
3488 | goto out; | 3488 | goto out; |
3489 | } | 3489 | } |
@@ -3530,6 +3530,27 @@ out: | |||
3530 | return 0; | 3530 | return 0; |
3531 | } | 3531 | } |
3532 | 3532 | ||
3533 | static int adm_del_resource(struct drbd_resource *resource) | ||
3534 | { | ||
3535 | struct drbd_connection *connection; | ||
3536 | |||
3537 | for_each_connection(connection, resource) { | ||
3538 | if (connection->cstate > C_STANDALONE) | ||
3539 | return ERR_NET_CONFIGURED; | ||
3540 | } | ||
3541 | if (!idr_is_empty(&resource->devices)) | ||
3542 | return ERR_RES_IN_USE; | ||
3543 | |||
3544 | list_del_rcu(&resource->resources); | ||
3545 | /* Make sure all threads have actually stopped: state handling only | ||
3546 | * does drbd_thread_stop_nowait(). */ | ||
3547 | list_for_each_entry(connection, &resource->connections, connections) | ||
3548 | drbd_thread_stop(&connection->worker); | ||
3549 | synchronize_rcu(); | ||
3550 | drbd_free_resource(resource); | ||
3551 | return NO_ERROR; | ||
3552 | } | ||
3553 | |||
3533 | int drbd_adm_down(struct sk_buff *skb, struct genl_info *info) | 3554 | int drbd_adm_down(struct sk_buff *skb, struct genl_info *info) |
3534 | { | 3555 | { |
3535 | struct drbd_config_context adm_ctx; | 3556 | struct drbd_config_context adm_ctx; |
@@ -3575,14 +3596,6 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info) | |||
3575 | } | 3596 | } |
3576 | } | 3597 | } |
3577 | 3598 | ||
3578 | /* If we reach this, all volumes (of this connection) are Secondary, | ||
3579 | * Disconnected, Diskless, aka Unconfigured. Make sure all threads have | ||
3580 | * actually stopped, state handling only does drbd_thread_stop_nowait(). */ | ||
3581 | for_each_connection(connection, resource) | ||
3582 | drbd_thread_stop(&connection->worker); | ||
3583 | |||
3584 | /* Now, nothing can fail anymore */ | ||
3585 | |||
3586 | /* delete volumes */ | 3599 | /* delete volumes */ |
3587 | idr_for_each_entry(&resource->devices, device, i) { | 3600 | idr_for_each_entry(&resource->devices, device, i) { |
3588 | retcode = adm_del_minor(device); | 3601 | retcode = adm_del_minor(device); |
@@ -3593,10 +3606,7 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info) | |||
3593 | } | 3606 | } |
3594 | } | 3607 | } |
3595 | 3608 | ||
3596 | list_del_rcu(&resource->resources); | 3609 | retcode = adm_del_resource(resource); |
3597 | synchronize_rcu(); | ||
3598 | drbd_free_resource(resource); | ||
3599 | retcode = NO_ERROR; | ||
3600 | out: | 3610 | out: |
3601 | mutex_unlock(&resource->adm_mutex); | 3611 | mutex_unlock(&resource->adm_mutex); |
3602 | finish: | 3612 | finish: |
@@ -3608,7 +3618,6 @@ int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info) | |||
3608 | { | 3618 | { |
3609 | struct drbd_config_context adm_ctx; | 3619 | struct drbd_config_context adm_ctx; |
3610 | struct drbd_resource *resource; | 3620 | struct drbd_resource *resource; |
3611 | struct drbd_connection *connection; | ||
3612 | enum drbd_ret_code retcode; | 3621 | enum drbd_ret_code retcode; |
3613 | 3622 | ||
3614 | retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE); | 3623 | retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE); |
@@ -3616,27 +3625,10 @@ int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info) | |||
3616 | return retcode; | 3625 | return retcode; |
3617 | if (retcode != NO_ERROR) | 3626 | if (retcode != NO_ERROR) |
3618 | goto finish; | 3627 | goto finish; |
3619 | |||
3620 | resource = adm_ctx.resource; | 3628 | resource = adm_ctx.resource; |
3621 | mutex_lock(&resource->adm_mutex); | ||
3622 | for_each_connection(connection, resource) { | ||
3623 | if (connection->cstate > C_STANDALONE) { | ||
3624 | retcode = ERR_NET_CONFIGURED; | ||
3625 | goto out; | ||
3626 | } | ||
3627 | } | ||
3628 | if (!idr_is_empty(&resource->devices)) { | ||
3629 | retcode = ERR_RES_IN_USE; | ||
3630 | goto out; | ||
3631 | } | ||
3632 | 3629 | ||
3633 | list_del_rcu(&resource->resources); | 3630 | mutex_lock(&resource->adm_mutex); |
3634 | for_each_connection(connection, resource) | 3631 | retcode = adm_del_resource(resource); |
3635 | drbd_thread_stop(&connection->worker); | ||
3636 | synchronize_rcu(); | ||
3637 | drbd_free_resource(resource); | ||
3638 | retcode = NO_ERROR; | ||
3639 | out: | ||
3640 | mutex_unlock(&resource->adm_mutex); | 3632 | mutex_unlock(&resource->adm_mutex); |
3641 | finish: | 3633 | finish: |
3642 | drbd_adm_finish(&adm_ctx, info, retcode); | 3634 | drbd_adm_finish(&adm_ctx, info, retcode); |
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 6960fb064731..d169b4a79267 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c | |||
@@ -2482,7 +2482,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device) | |||
2482 | atomic_read(&device->rs_sect_ev); | 2482 | atomic_read(&device->rs_sect_ev); |
2483 | 2483 | ||
2484 | if (atomic_read(&device->ap_actlog_cnt) | 2484 | if (atomic_read(&device->ap_actlog_cnt) |
2485 | || !device->rs_last_events || curr_events - device->rs_last_events > 64) { | 2485 | || curr_events - device->rs_last_events > 64) { |
2486 | unsigned long rs_left; | 2486 | unsigned long rs_left; |
2487 | int i; | 2487 | int i; |
2488 | 2488 | ||
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 5a01c53dddeb..34f2f0ba409b 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c | |||
@@ -36,29 +36,15 @@ static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, | |||
36 | /* Update disk stats at start of I/O request */ | 36 | /* Update disk stats at start of I/O request */ |
37 | static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req) | 37 | static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req) |
38 | { | 38 | { |
39 | const int rw = bio_data_dir(req->master_bio); | 39 | generic_start_io_acct(bio_data_dir(req->master_bio), req->i.size >> 9, |
40 | int cpu; | 40 | &device->vdisk->part0); |
41 | cpu = part_stat_lock(); | ||
42 | part_round_stats(cpu, &device->vdisk->part0); | ||
43 | part_stat_inc(cpu, &device->vdisk->part0, ios[rw]); | ||
44 | part_stat_add(cpu, &device->vdisk->part0, sectors[rw], req->i.size >> 9); | ||
45 | (void) cpu; /* The macro invocations above want the cpu argument, I do not like | ||
46 | the compiler warning about cpu only assigned but never used... */ | ||
47 | part_inc_in_flight(&device->vdisk->part0, rw); | ||
48 | part_stat_unlock(); | ||
49 | } | 41 | } |
50 | 42 | ||
51 | /* Update disk stats when completing request upwards */ | 43 | /* Update disk stats when completing request upwards */ |
52 | static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req) | 44 | static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req) |
53 | { | 45 | { |
54 | int rw = bio_data_dir(req->master_bio); | 46 | generic_end_io_acct(bio_data_dir(req->master_bio), |
55 | unsigned long duration = jiffies - req->start_jif; | 47 | &device->vdisk->part0, req->start_jif); |
56 | int cpu; | ||
57 | cpu = part_stat_lock(); | ||
58 | part_stat_add(cpu, &device->vdisk->part0, ticks[rw], duration); | ||
59 | part_round_stats(cpu, &device->vdisk->part0); | ||
60 | part_dec_in_flight(&device->vdisk->part0, rw); | ||
61 | part_stat_unlock(); | ||
62 | } | 48 | } |
63 | 49 | ||
64 | static struct drbd_request *drbd_req_new(struct drbd_device *device, | 50 | static struct drbd_request *drbd_req_new(struct drbd_device *device, |
@@ -1545,6 +1531,7 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct | |||
1545 | struct request_queue * const b = | 1531 | struct request_queue * const b = |
1546 | device->ldev->backing_bdev->bd_disk->queue; | 1532 | device->ldev->backing_bdev->bd_disk->queue; |
1547 | if (b->merge_bvec_fn) { | 1533 | if (b->merge_bvec_fn) { |
1534 | bvm->bi_bdev = device->ldev->backing_bdev; | ||
1548 | backing_limit = b->merge_bvec_fn(b, bvm, bvec); | 1535 | backing_limit = b->merge_bvec_fn(b, bvm, bvec); |
1549 | limit = min(limit, backing_limit); | 1536 | limit = min(limit, backing_limit); |
1550 | } | 1537 | } |
@@ -1628,7 +1615,7 @@ void request_timer_fn(unsigned long data) | |||
1628 | time_after(now, req_peer->pre_send_jif + ent) && | 1615 | time_after(now, req_peer->pre_send_jif + ent) && |
1629 | !time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent)) { | 1616 | !time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent)) { |
1630 | drbd_warn(device, "Remote failed to finish a request within ko-count * timeout\n"); | 1617 | drbd_warn(device, "Remote failed to finish a request within ko-count * timeout\n"); |
1631 | _drbd_set_state(_NS(device, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL); | 1618 | _conn_request_state(connection, NS(conn, C_TIMEOUT), CS_VERBOSE | CS_HARD); |
1632 | } | 1619 | } |
1633 | if (dt && oldest_submit_jif != now && | 1620 | if (dt && oldest_submit_jif != now && |
1634 | time_after(now, oldest_submit_jif + dt) && | 1621 | time_after(now, oldest_submit_jif + dt) && |
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c index 84b11f887d73..2d7dd269b6a8 100644 --- a/drivers/block/drbd/drbd_state.c +++ b/drivers/block/drbd/drbd_state.c | |||
@@ -215,6 +215,18 @@ static bool no_peer_wf_report_params(struct drbd_connection *connection) | |||
215 | return rv; | 215 | return rv; |
216 | } | 216 | } |
217 | 217 | ||
218 | static void wake_up_all_devices(struct drbd_connection *connection) | ||
219 | { | ||
220 | struct drbd_peer_device *peer_device; | ||
221 | int vnr; | ||
222 | |||
223 | rcu_read_lock(); | ||
224 | idr_for_each_entry(&connection->peer_devices, peer_device, vnr) | ||
225 | wake_up(&peer_device->device->state_wait); | ||
226 | rcu_read_unlock(); | ||
227 | |||
228 | } | ||
229 | |||
218 | 230 | ||
219 | /** | 231 | /** |
220 | * cl_wide_st_chg() - true if the state change is a cluster wide one | 232 | * cl_wide_st_chg() - true if the state change is a cluster wide one |
@@ -410,6 +422,22 @@ _drbd_request_state(struct drbd_device *device, union drbd_state mask, | |||
410 | return rv; | 422 | return rv; |
411 | } | 423 | } |
412 | 424 | ||
425 | enum drbd_state_rv | ||
426 | _drbd_request_state_holding_state_mutex(struct drbd_device *device, union drbd_state mask, | ||
427 | union drbd_state val, enum chg_state_flags f) | ||
428 | { | ||
429 | enum drbd_state_rv rv; | ||
430 | |||
431 | BUG_ON(f & CS_SERIALIZE); | ||
432 | |||
433 | wait_event_cmd(device->state_wait, | ||
434 | (rv = drbd_req_state(device, mask, val, f)) != SS_IN_TRANSIENT_STATE, | ||
435 | mutex_unlock(device->state_mutex), | ||
436 | mutex_lock(device->state_mutex)); | ||
437 | |||
438 | return rv; | ||
439 | } | ||
440 | |||
413 | static void print_st(struct drbd_device *device, const char *name, union drbd_state ns) | 441 | static void print_st(struct drbd_device *device, const char *name, union drbd_state ns) |
414 | { | 442 | { |
415 | drbd_err(device, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n", | 443 | drbd_err(device, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n", |
@@ -629,14 +657,11 @@ is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_c | |||
629 | if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED) | 657 | if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED) |
630 | rv = SS_IN_TRANSIENT_STATE; | 658 | rv = SS_IN_TRANSIENT_STATE; |
631 | 659 | ||
632 | /* if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS) | ||
633 | rv = SS_IN_TRANSIENT_STATE; */ | ||
634 | |||
635 | /* While establishing a connection only allow cstate to change. | 660 | /* While establishing a connection only allow cstate to change. |
636 | Delay/refuse role changes, detach attach etc... */ | 661 | Delay/refuse role changes, detach attach etc... (they do not touch cstate) */ |
637 | if (test_bit(STATE_SENT, &connection->flags) && | 662 | if (test_bit(STATE_SENT, &connection->flags) && |
638 | !(os.conn == C_WF_REPORT_PARAMS || | 663 | !((ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION) || |
639 | (ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION))) | 664 | (ns.conn >= C_CONNECTED && os.conn == C_WF_REPORT_PARAMS))) |
640 | rv = SS_IN_TRANSIENT_STATE; | 665 | rv = SS_IN_TRANSIENT_STATE; |
641 | 666 | ||
642 | if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED) | 667 | if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED) |
@@ -1032,8 +1057,10 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns, | |||
1032 | 1057 | ||
1033 | /* Wake up role changes, that were delayed because of connection establishing */ | 1058 | /* Wake up role changes, that were delayed because of connection establishing */ |
1034 | if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS && | 1059 | if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS && |
1035 | no_peer_wf_report_params(connection)) | 1060 | no_peer_wf_report_params(connection)) { |
1036 | clear_bit(STATE_SENT, &connection->flags); | 1061 | clear_bit(STATE_SENT, &connection->flags); |
1062 | wake_up_all_devices(connection); | ||
1063 | } | ||
1037 | 1064 | ||
1038 | wake_up(&device->misc_wait); | 1065 | wake_up(&device->misc_wait); |
1039 | wake_up(&device->state_wait); | 1066 | wake_up(&device->state_wait); |
@@ -1072,7 +1099,6 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns, | |||
1072 | 1099 | ||
1073 | set_ov_position(device, ns.conn); | 1100 | set_ov_position(device, ns.conn); |
1074 | device->rs_start = now; | 1101 | device->rs_start = now; |
1075 | device->rs_last_events = 0; | ||
1076 | device->rs_last_sect_ev = 0; | 1102 | device->rs_last_sect_ev = 0; |
1077 | device->ov_last_oos_size = 0; | 1103 | device->ov_last_oos_size = 0; |
1078 | device->ov_last_oos_start = 0; | 1104 | device->ov_last_oos_start = 0; |
diff --git a/drivers/block/drbd/drbd_state.h b/drivers/block/drbd/drbd_state.h index cc41605ba21c..7f53c40823cd 100644 --- a/drivers/block/drbd/drbd_state.h +++ b/drivers/block/drbd/drbd_state.h | |||
@@ -117,6 +117,11 @@ extern enum drbd_state_rv _drbd_request_state(struct drbd_device *, | |||
117 | union drbd_state, | 117 | union drbd_state, |
118 | union drbd_state, | 118 | union drbd_state, |
119 | enum chg_state_flags); | 119 | enum chg_state_flags); |
120 | |||
121 | extern enum drbd_state_rv | ||
122 | _drbd_request_state_holding_state_mutex(struct drbd_device *, union drbd_state, | ||
123 | union drbd_state, enum chg_state_flags); | ||
124 | |||
120 | extern enum drbd_state_rv __drbd_set_state(struct drbd_device *, union drbd_state, | 125 | extern enum drbd_state_rv __drbd_set_state(struct drbd_device *, union drbd_state, |
121 | enum chg_state_flags, | 126 | enum chg_state_flags, |
122 | struct completion *done); | 127 | struct completion *done); |
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index d2d1f97511bd..d0fae55d871d 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c | |||
@@ -1592,11 +1592,15 @@ void drbd_resync_after_changed(struct drbd_device *device) | |||
1592 | 1592 | ||
1593 | void drbd_rs_controller_reset(struct drbd_device *device) | 1593 | void drbd_rs_controller_reset(struct drbd_device *device) |
1594 | { | 1594 | { |
1595 | struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk; | ||
1595 | struct fifo_buffer *plan; | 1596 | struct fifo_buffer *plan; |
1596 | 1597 | ||
1597 | atomic_set(&device->rs_sect_in, 0); | 1598 | atomic_set(&device->rs_sect_in, 0); |
1598 | atomic_set(&device->rs_sect_ev, 0); | 1599 | atomic_set(&device->rs_sect_ev, 0); |
1599 | device->rs_in_flight = 0; | 1600 | device->rs_in_flight = 0; |
1601 | device->rs_last_events = | ||
1602 | (int)part_stat_read(&disk->part0, sectors[0]) + | ||
1603 | (int)part_stat_read(&disk->part0, sectors[1]); | ||
1600 | 1604 | ||
1601 | /* Updating the RCU protected object in place is necessary since | 1605 | /* Updating the RCU protected object in place is necessary since |
1602 | this function gets called from atomic context. | 1606 | this function gets called from atomic context. |
@@ -1743,7 +1747,6 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) | |||
1743 | device->rs_failed = 0; | 1747 | device->rs_failed = 0; |
1744 | device->rs_paused = 0; | 1748 | device->rs_paused = 0; |
1745 | device->rs_same_csum = 0; | 1749 | device->rs_same_csum = 0; |
1746 | device->rs_last_events = 0; | ||
1747 | device->rs_last_sect_ev = 0; | 1750 | device->rs_last_sect_ev = 0; |
1748 | device->rs_total = tw; | 1751 | device->rs_total = tw; |
1749 | device->rs_start = now; | 1752 | device->rs_start = now; |