aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/drbd/drbd_receiver.c
diff options
context:
space:
mode:
authorAndreas Gruenbacher <agruen@linbit.com>2010-12-09 09:03:57 -0500
committerPhilipp Reisner <philipp.reisner@linbit.com>2011-03-10 05:36:24 -0500
commit81e84650c200de0695372461964dd960365696db (patch)
treec57e51e8c1f540321fd6e8d43c304a95f7fa5ebe /drivers/block/drbd/drbd_receiver.c
parent6184ea2145609b4ad63b141bf1f8124135ff4949 (diff)
drbd: Use the standard bool, true, and false keywords
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block/drbd/drbd_receiver.c')
-rw-r--r--drivers/block/drbd/drbd_receiver.c128
1 files changed, 64 insertions, 64 deletions
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 00dcb1172ca8..732aacb46a32 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -725,16 +725,16 @@ static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
725 char tb[4]; 725 char tb[4];
726 726
727 if (!*sock) 727 if (!*sock)
728 return FALSE; 728 return false;
729 729
730 rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK); 730 rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
731 731
732 if (rr > 0 || rr == -EAGAIN) { 732 if (rr > 0 || rr == -EAGAIN) {
733 return TRUE; 733 return true;
734 } else { 734 } else {
735 sock_release(*sock); 735 sock_release(*sock);
736 *sock = NULL; 736 *sock = NULL;
737 return FALSE; 737 return false;
738 } 738 }
739} 739}
740 740
@@ -933,7 +933,7 @@ static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsi
933 r = drbd_recv(mdev, h, sizeof(*h)); 933 r = drbd_recv(mdev, h, sizeof(*h));
934 if (unlikely(r != sizeof(*h))) { 934 if (unlikely(r != sizeof(*h))) {
935 dev_err(DEV, "short read expecting header on sock: r=%d\n", r); 935 dev_err(DEV, "short read expecting header on sock: r=%d\n", r);
936 return FALSE; 936 return false;
937 } 937 }
938 938
939 if (likely(h->h80.magic == BE_DRBD_MAGIC)) { 939 if (likely(h->h80.magic == BE_DRBD_MAGIC)) {
@@ -947,11 +947,11 @@ static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsi
947 be32_to_cpu(h->h80.magic), 947 be32_to_cpu(h->h80.magic),
948 be16_to_cpu(h->h80.command), 948 be16_to_cpu(h->h80.command),
949 be16_to_cpu(h->h80.length)); 949 be16_to_cpu(h->h80.length));
950 return FALSE; 950 return false;
951 } 951 }
952 mdev->last_received = jiffies; 952 mdev->last_received = jiffies;
953 953
954 return TRUE; 954 return true;
955} 955}
956 956
957static void drbd_flush(struct drbd_conf *mdev) 957static void drbd_flush(struct drbd_conf *mdev)
@@ -1160,7 +1160,7 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
1160 switch (mdev->write_ordering) { 1160 switch (mdev->write_ordering) {
1161 case WO_none: 1161 case WO_none:
1162 if (rv == FE_RECYCLED) 1162 if (rv == FE_RECYCLED)
1163 return TRUE; 1163 return true;
1164 1164
1165 /* receiver context, in the writeout path of the other node. 1165 /* receiver context, in the writeout path of the other node.
1166 * avoid potential distributed deadlock */ 1166 * avoid potential distributed deadlock */
@@ -1188,10 +1188,10 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
1188 D_ASSERT(atomic_read(&epoch->active) == 0); 1188 D_ASSERT(atomic_read(&epoch->active) == 0);
1189 D_ASSERT(epoch->flags == 0); 1189 D_ASSERT(epoch->flags == 0);
1190 1190
1191 return TRUE; 1191 return true;
1192 default: 1192 default:
1193 dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering); 1193 dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
1194 return FALSE; 1194 return false;
1195 } 1195 }
1196 1196
1197 epoch->flags = 0; 1197 epoch->flags = 0;
@@ -1209,7 +1209,7 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
1209 } 1209 }
1210 spin_unlock(&mdev->epoch_lock); 1210 spin_unlock(&mdev->epoch_lock);
1211 1211
1212 return TRUE; 1212 return true;
1213} 1213}
1214 1214
1215/* used from receive_RSDataReply (recv_resync_read) 1215/* used from receive_RSDataReply (recv_resync_read)
@@ -1303,7 +1303,7 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1303 void *data; 1303 void *data;
1304 1304
1305 if (!data_size) 1305 if (!data_size)
1306 return TRUE; 1306 return true;
1307 1307
1308 page = drbd_pp_alloc(mdev, 1, 1); 1308 page = drbd_pp_alloc(mdev, 1, 1);
1309 1309
@@ -1426,7 +1426,7 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si
1426 1426
1427 atomic_add(data_size >> 9, &mdev->rs_sect_ev); 1427 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
1428 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0) 1428 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
1429 return TRUE; 1429 return true;
1430 1430
1431 /* drbd_submit_ee currently fails for one reason only: 1431 /* drbd_submit_ee currently fails for one reason only:
1432 * not being able to allocate enough bios. 1432 * not being able to allocate enough bios.
@@ -1438,7 +1438,7 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si
1438 drbd_free_ee(mdev, e); 1438 drbd_free_ee(mdev, e);
1439fail: 1439fail:
1440 put_ldev(mdev); 1440 put_ldev(mdev);
1441 return FALSE; 1441 return false;
1442} 1442}
1443 1443
1444static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 1444static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
@@ -1455,7 +1455,7 @@ static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
1455 spin_unlock_irq(&mdev->req_lock); 1455 spin_unlock_irq(&mdev->req_lock);
1456 if (unlikely(!req)) { 1456 if (unlikely(!req)) {
1457 dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n"); 1457 dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
1458 return FALSE; 1458 return false;
1459 } 1459 }
1460 1460
1461 /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid 1461 /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid
@@ -1655,7 +1655,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1655 e = read_in_block(mdev, p->block_id, sector, data_size); 1655 e = read_in_block(mdev, p->block_id, sector, data_size);
1656 if (!e) { 1656 if (!e) {
1657 put_ldev(mdev); 1657 put_ldev(mdev);
1658 return FALSE; 1658 return false;
1659 } 1659 }
1660 1660
1661 e->w.cb = e_end_block; 1661 e->w.cb = e_end_block;
@@ -1774,7 +1774,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1774 put_ldev(mdev); 1774 put_ldev(mdev);
1775 wake_asender(mdev); 1775 wake_asender(mdev);
1776 finish_wait(&mdev->misc_wait, &wait); 1776 finish_wait(&mdev->misc_wait, &wait);
1777 return TRUE; 1777 return true;
1778 } 1778 }
1779 1779
1780 if (signal_pending(current)) { 1780 if (signal_pending(current)) {
@@ -1830,7 +1830,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1830 } 1830 }
1831 1831
1832 if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0) 1832 if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
1833 return TRUE; 1833 return true;
1834 1834
1835 /* drbd_submit_ee currently fails for one reason only: 1835 /* drbd_submit_ee currently fails for one reason only:
1836 * not being able to allocate enough bios. 1836 * not being able to allocate enough bios.
@@ -1848,7 +1848,7 @@ out_interrupted:
1848 * receive a barrier... atomic_inc(&mdev->epoch_size); */ 1848 * receive a barrier... atomic_inc(&mdev->epoch_size); */
1849 put_ldev(mdev); 1849 put_ldev(mdev);
1850 drbd_free_ee(mdev, e); 1850 drbd_free_ee(mdev, e);
1851 return FALSE; 1851 return false;
1852} 1852}
1853 1853
1854/* We may throttle resync, if the lower device seems to be busy, 1854/* We may throttle resync, if the lower device seems to be busy,
@@ -1934,12 +1934,12 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
1934 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { 1934 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
1935 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, 1935 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1936 (unsigned long long)sector, size); 1936 (unsigned long long)sector, size);
1937 return FALSE; 1937 return false;
1938 } 1938 }
1939 if (sector + (size>>9) > capacity) { 1939 if (sector + (size>>9) > capacity) {
1940 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, 1940 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1941 (unsigned long long)sector, size); 1941 (unsigned long long)sector, size);
1942 return FALSE; 1942 return false;
1943 } 1943 }
1944 1944
1945 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) { 1945 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
@@ -1976,7 +1976,7 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
1976 e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO); 1976 e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
1977 if (!e) { 1977 if (!e) {
1978 put_ldev(mdev); 1978 put_ldev(mdev);
1979 return FALSE; 1979 return false;
1980 } 1980 }
1981 1981
1982 switch (cmd) { 1982 switch (cmd) {
@@ -2089,7 +2089,7 @@ submit:
2089 spin_unlock_irq(&mdev->req_lock); 2089 spin_unlock_irq(&mdev->req_lock);
2090 2090
2091 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0) 2091 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
2092 return TRUE; 2092 return true;
2093 2093
2094 /* drbd_submit_ee currently fails for one reason only: 2094 /* drbd_submit_ee currently fails for one reason only:
2095 * not being able to allocate enough bios. 2095 * not being able to allocate enough bios.
@@ -2102,7 +2102,7 @@ submit:
2102out_free_e: 2102out_free_e:
2103 put_ldev(mdev); 2103 put_ldev(mdev);
2104 drbd_free_ee(mdev, e); 2104 drbd_free_ee(mdev, e);
2105 return FALSE; 2105 return false;
2106} 2106}
2107 2107
2108static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local) 2108static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
@@ -2690,7 +2690,7 @@ static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsig
2690 unsigned char *my_alg = mdev->net_conf->integrity_alg; 2690 unsigned char *my_alg = mdev->net_conf->integrity_alg;
2691 2691
2692 if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size) 2692 if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
2693 return FALSE; 2693 return false;
2694 2694
2695 p_integrity_alg[SHARED_SECRET_MAX-1] = 0; 2695 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2696 if (strcmp(p_integrity_alg, my_alg)) { 2696 if (strcmp(p_integrity_alg, my_alg)) {
@@ -2701,11 +2701,11 @@ static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsig
2701 my_alg[0] ? my_alg : (unsigned char *)"<not-used>"); 2701 my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2702 } 2702 }
2703 2703
2704 return TRUE; 2704 return true;
2705 2705
2706disconnect: 2706disconnect:
2707 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 2707 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2708 return FALSE; 2708 return false;
2709} 2709}
2710 2710
2711/* helper function 2711/* helper function
@@ -2737,7 +2737,7 @@ struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2737 2737
2738static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size) 2738static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size)
2739{ 2739{
2740 int ok = TRUE; 2740 int ok = true;
2741 struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95; 2741 struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95;
2742 unsigned int header_size, data_size, exp_max_sz; 2742 unsigned int header_size, data_size, exp_max_sz;
2743 struct crypto_hash *verify_tfm = NULL; 2743 struct crypto_hash *verify_tfm = NULL;
@@ -2755,7 +2755,7 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
2755 if (packet_size > exp_max_sz) { 2755 if (packet_size > exp_max_sz) {
2756 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n", 2756 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
2757 packet_size, exp_max_sz); 2757 packet_size, exp_max_sz);
2758 return FALSE; 2758 return false;
2759 } 2759 }
2760 2760
2761 if (apv <= 88) { 2761 if (apv <= 88) {
@@ -2775,7 +2775,7 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
2775 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX); 2775 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2776 2776
2777 if (drbd_recv(mdev, &p->head.payload, header_size) != header_size) 2777 if (drbd_recv(mdev, &p->head.payload, header_size) != header_size)
2778 return FALSE; 2778 return false;
2779 2779
2780 mdev->sync_conf.rate = be32_to_cpu(p->rate); 2780 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2781 2781
@@ -2785,11 +2785,11 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
2785 dev_err(DEV, "verify-alg too long, " 2785 dev_err(DEV, "verify-alg too long, "
2786 "peer wants %u, accepting only %u byte\n", 2786 "peer wants %u, accepting only %u byte\n",
2787 data_size, SHARED_SECRET_MAX); 2787 data_size, SHARED_SECRET_MAX);
2788 return FALSE; 2788 return false;
2789 } 2789 }
2790 2790
2791 if (drbd_recv(mdev, p->verify_alg, data_size) != data_size) 2791 if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
2792 return FALSE; 2792 return false;
2793 2793
2794 /* we expect NUL terminated string */ 2794 /* we expect NUL terminated string */
2795 /* but just in case someone tries to be evil */ 2795 /* but just in case someone tries to be evil */
@@ -2883,7 +2883,7 @@ disconnect:
2883 /* but free the verify_tfm again, if csums_tfm did not work out */ 2883 /* but free the verify_tfm again, if csums_tfm did not work out */
2884 crypto_free_hash(verify_tfm); 2884 crypto_free_hash(verify_tfm);
2885 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 2885 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2886 return FALSE; 2886 return false;
2887} 2887}
2888 2888
2889static void drbd_setup_order_type(struct drbd_conf *mdev, int peer) 2889static void drbd_setup_order_type(struct drbd_conf *mdev, int peer)
@@ -2920,7 +2920,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
2920 if (p_size == 0 && mdev->state.disk == D_DISKLESS) { 2920 if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
2921 dev_err(DEV, "some backing storage is needed\n"); 2921 dev_err(DEV, "some backing storage is needed\n");
2922 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 2922 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2923 return FALSE; 2923 return false;
2924 } 2924 }
2925 2925
2926 /* just store the peer's disk size for now. 2926 /* just store the peer's disk size for now.
@@ -2957,7 +2957,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
2957 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 2957 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2958 mdev->ldev->dc.disk_size = my_usize; 2958 mdev->ldev->dc.disk_size = my_usize;
2959 put_ldev(mdev); 2959 put_ldev(mdev);
2960 return FALSE; 2960 return false;
2961 } 2961 }
2962 put_ldev(mdev); 2962 put_ldev(mdev);
2963 } 2963 }
@@ -2967,7 +2967,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
2967 dd = drbd_determin_dev_size(mdev, ddsf); 2967 dd = drbd_determin_dev_size(mdev, ddsf);
2968 put_ldev(mdev); 2968 put_ldev(mdev);
2969 if (dd == dev_size_error) 2969 if (dd == dev_size_error)
2970 return FALSE; 2970 return false;
2971 drbd_md_sync(mdev); 2971 drbd_md_sync(mdev);
2972 } else { 2972 } else {
2973 /* I am diskless, need to accept the peer's size. */ 2973 /* I am diskless, need to accept the peer's size. */
@@ -3014,7 +3014,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
3014 } 3014 }
3015 } 3015 }
3016 3016
3017 return TRUE; 3017 return true;
3018} 3018}
3019 3019
3020static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 3020static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
@@ -3038,7 +3038,7 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
3038 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n", 3038 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3039 (unsigned long long)mdev->ed_uuid); 3039 (unsigned long long)mdev->ed_uuid);
3040 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 3040 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3041 return FALSE; 3041 return false;
3042 } 3042 }
3043 3043
3044 if (get_ldev(mdev)) { 3044 if (get_ldev(mdev)) {
@@ -3073,7 +3073,7 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
3073 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT) 3073 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
3074 drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); 3074 drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3075 3075
3076 return TRUE; 3076 return true;
3077} 3077}
3078 3078
3079/** 3079/**
@@ -3118,7 +3118,7 @@ static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
3118 if (test_bit(DISCARD_CONCURRENT, &mdev->flags) && 3118 if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
3119 test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) { 3119 test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
3120 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG); 3120 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
3121 return TRUE; 3121 return true;
3122 } 3122 }
3123 3123
3124 mask = convert_state(mask); 3124 mask = convert_state(mask);
@@ -3129,7 +3129,7 @@ static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
3129 drbd_send_sr_reply(mdev, rv); 3129 drbd_send_sr_reply(mdev, rv);
3130 drbd_md_sync(mdev); 3130 drbd_md_sync(mdev);
3131 3131
3132 return TRUE; 3132 return true;
3133} 3133}
3134 3134
3135static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 3135static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
@@ -3174,7 +3174,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
3174 peer_state.conn == C_CONNECTED) { 3174 peer_state.conn == C_CONNECTED) {
3175 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed) 3175 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3176 drbd_resync_finished(mdev); 3176 drbd_resync_finished(mdev);
3177 return TRUE; 3177 return true;
3178 } 3178 }
3179 } 3179 }
3180 3180
@@ -3227,10 +3227,10 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
3227 real_peer_disk = D_DISKLESS; 3227 real_peer_disk = D_DISKLESS;
3228 } else { 3228 } else {
3229 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags)) 3229 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
3230 return FALSE; 3230 return false;
3231 D_ASSERT(os.conn == C_WF_REPORT_PARAMS); 3231 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
3232 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 3232 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3233 return FALSE; 3233 return false;
3234 } 3234 }
3235 } 3235 }
3236 } 3236 }
@@ -3255,7 +3255,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
3255 drbd_uuid_new_current(mdev); 3255 drbd_uuid_new_current(mdev);
3256 clear_bit(NEW_CUR_UUID, &mdev->flags); 3256 clear_bit(NEW_CUR_UUID, &mdev->flags);
3257 drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0)); 3257 drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
3258 return FALSE; 3258 return false;
3259 } 3259 }
3260 rv = _drbd_set_state(mdev, ns, cs_flags, NULL); 3260 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
3261 ns = mdev->state; 3261 ns = mdev->state;
@@ -3263,7 +3263,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
3263 3263
3264 if (rv < SS_SUCCESS) { 3264 if (rv < SS_SUCCESS) {
3265 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 3265 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3266 return FALSE; 3266 return false;
3267 } 3267 }
3268 3268
3269 if (os.conn > C_WF_REPORT_PARAMS) { 3269 if (os.conn > C_WF_REPORT_PARAMS) {
@@ -3281,7 +3281,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
3281 3281
3282 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */ 3282 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3283 3283
3284 return TRUE; 3284 return true;
3285} 3285}
3286 3286
3287static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 3287static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
@@ -3308,7 +3308,7 @@ static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
3308 } else 3308 } else
3309 dev_err(DEV, "Ignoring SyncUUID packet!\n"); 3309 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3310 3310
3311 return TRUE; 3311 return true;
3312} 3312}
3313 3313
3314enum receive_bitmap_ret { OK, DONE, FAILED }; 3314enum receive_bitmap_ret { OK, DONE, FAILED };
@@ -3462,7 +3462,7 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne
3462 struct bm_xfer_ctx c; 3462 struct bm_xfer_ctx c;
3463 void *buffer; 3463 void *buffer;
3464 enum receive_bitmap_ret ret; 3464 enum receive_bitmap_ret ret;
3465 int ok = FALSE; 3465 int ok = false;
3466 struct p_header80 *h = &mdev->data.rbuf.header.h80; 3466 struct p_header80 *h = &mdev->data.rbuf.header.h80;
3467 3467
3468 /* drbd_bm_lock(mdev, "receive bitmap"); By intention no bm_lock */ 3468 /* drbd_bm_lock(mdev, "receive bitmap"); By intention no bm_lock */
@@ -3535,7 +3535,7 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne
3535 drbd_conn_str(mdev->state.conn)); 3535 drbd_conn_str(mdev->state.conn));
3536 } 3536 }
3537 3537
3538 ok = TRUE; 3538 ok = true;
3539 out: 3539 out:
3540 /* drbd_bm_unlock(mdev); by intention no lock */ 3540 /* drbd_bm_unlock(mdev); by intention no lock */
3541 if (ok && mdev->state.conn == C_WF_BITMAP_S) 3541 if (ok && mdev->state.conn == C_WF_BITMAP_S)
@@ -3569,7 +3569,7 @@ static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, u
3569 * with the data requests being unplugged */ 3569 * with the data requests being unplugged */
3570 drbd_tcp_quickack(mdev->data.socket); 3570 drbd_tcp_quickack(mdev->data.socket);
3571 3571
3572 return TRUE; 3572 return true;
3573} 3573}
3574 3574
3575static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 3575static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
@@ -3578,7 +3578,7 @@ static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, un
3578 3578
3579 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize)); 3579 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
3580 3580
3581 return TRUE; 3581 return true;
3582} 3582}
3583 3583
3584typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive); 3584typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive);
@@ -4147,7 +4147,7 @@ static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h)
4147 } 4147 }
4148 wake_up(&mdev->state_wait); 4148 wake_up(&mdev->state_wait);
4149 4149
4150 return TRUE; 4150 return true;
4151} 4151}
4152 4152
4153static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h) 4153static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h)
@@ -4163,7 +4163,7 @@ static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
4163 if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags)) 4163 if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
4164 wake_up(&mdev->misc_wait); 4164 wake_up(&mdev->misc_wait);
4165 4165
4166 return TRUE; 4166 return true;
4167} 4167}
4168 4168
4169static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h) 4169static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
@@ -4186,7 +4186,7 @@ static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
4186 dec_rs_pending(mdev); 4186 dec_rs_pending(mdev);
4187 atomic_add(blksize >> 9, &mdev->rs_sect_in); 4187 atomic_add(blksize >> 9, &mdev->rs_sect_in);
4188 4188
4189 return TRUE; 4189 return true;
4190} 4190}
4191 4191
4192/* when we receive the ACK for a write request, 4192/* when we receive the ACK for a write request,
@@ -4230,14 +4230,14 @@ static int validate_req_change_req_state(struct drbd_conf *mdev,
4230 if (unlikely(!req)) { 4230 if (unlikely(!req)) {
4231 spin_unlock_irq(&mdev->req_lock); 4231 spin_unlock_irq(&mdev->req_lock);
4232 dev_err(DEV, "%s: got a corrupt block_id/sector pair\n", func); 4232 dev_err(DEV, "%s: got a corrupt block_id/sector pair\n", func);
4233 return FALSE; 4233 return false;
4234 } 4234 }
4235 __req_mod(req, what, &m); 4235 __req_mod(req, what, &m);
4236 spin_unlock_irq(&mdev->req_lock); 4236 spin_unlock_irq(&mdev->req_lock);
4237 4237
4238 if (m.bio) 4238 if (m.bio)
4239 complete_master_bio(mdev, &m); 4239 complete_master_bio(mdev, &m);
4240 return TRUE; 4240 return true;
4241} 4241}
4242 4242
4243static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h) 4243static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
@@ -4252,7 +4252,7 @@ static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
4252 if (is_syncer_block_id(p->block_id)) { 4252 if (is_syncer_block_id(p->block_id)) {
4253 drbd_set_in_sync(mdev, sector, blksize); 4253 drbd_set_in_sync(mdev, sector, blksize);
4254 dec_rs_pending(mdev); 4254 dec_rs_pending(mdev);
4255 return TRUE; 4255 return true;
4256 } 4256 }
4257 switch (be16_to_cpu(h->command)) { 4257 switch (be16_to_cpu(h->command)) {
4258 case P_RS_WRITE_ACK: 4258 case P_RS_WRITE_ACK:
@@ -4273,7 +4273,7 @@ static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
4273 break; 4273 break;
4274 default: 4274 default:
4275 D_ASSERT(0); 4275 D_ASSERT(0);
4276 return FALSE; 4276 return false;
4277 } 4277 }
4278 4278
4279 return validate_req_change_req_state(mdev, p->block_id, sector, 4279 return validate_req_change_req_state(mdev, p->block_id, sector,
@@ -4294,7 +4294,7 @@ static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h)
4294 int size = be32_to_cpu(p->blksize); 4294 int size = be32_to_cpu(p->blksize);
4295 dec_rs_pending(mdev); 4295 dec_rs_pending(mdev);
4296 drbd_rs_failed_io(mdev, sector, size); 4296 drbd_rs_failed_io(mdev, sector, size);
4297 return TRUE; 4297 return true;
4298 } 4298 }
4299 return validate_req_change_req_state(mdev, p->block_id, sector, 4299 return validate_req_change_req_state(mdev, p->block_id, sector,
4300 _ack_id_to_req, __func__ , neg_acked); 4300 _ack_id_to_req, __func__ , neg_acked);
@@ -4332,7 +4332,7 @@ static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
4332 put_ldev(mdev); 4332 put_ldev(mdev);
4333 } 4333 }
4334 4334
4335 return TRUE; 4335 return true;
4336} 4336}
4337 4337
4338static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h) 4338static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
@@ -4349,7 +4349,7 @@ static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
4349 drbd_queue_work_front(&mdev->data.work, w); 4349 drbd_queue_work_front(&mdev->data.work, w);
4350 } 4350 }
4351 4351
4352 return TRUE; 4352 return true;
4353} 4353}
4354 4354
4355static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h) 4355static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
@@ -4370,7 +4370,7 @@ static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
4370 ov_oos_print(mdev); 4370 ov_oos_print(mdev);
4371 4371
4372 if (!get_ldev(mdev)) 4372 if (!get_ldev(mdev))
4373 return TRUE; 4373 return true;
4374 4374
4375 drbd_rs_complete_io(mdev, sector); 4375 drbd_rs_complete_io(mdev, sector);
4376 dec_rs_pending(mdev); 4376 dec_rs_pending(mdev);
@@ -4393,12 +4393,12 @@ static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
4393 } 4393 }
4394 } 4394 }
4395 put_ldev(mdev); 4395 put_ldev(mdev);
4396 return TRUE; 4396 return true;
4397} 4397}
4398 4398
4399static int got_skip(struct drbd_conf *mdev, struct p_header80 *h) 4399static int got_skip(struct drbd_conf *mdev, struct p_header80 *h)
4400{ 4400{
4401 return TRUE; 4401 return true;
4402} 4402}
4403 4403
4404struct asender_cmd { 4404struct asender_cmd {