aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLars Ellenberg <lars.ellenberg@linbit.com>2012-09-22 14:27:19 -0400
committerJens Axboe <axboe@kernel.dk>2012-10-30 03:39:18 -0400
commit06f10adbdb027b225fd51584a218fa8344169514 (patch)
tree4425b0f49e1518637ee6b66b68345c31972a6218
parent44edfb0d785ea06712b5a717fa2c1ae34e300845 (diff)
drbd: prepare for more than 32 bit flags
- struct drbd_conf { ... unsigned long flags; ... } + struct drbd_conf { ... unsigned long drbd_flags[N]; ... } And introduce wrapper functions for test/set/clear bit operations on this member. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--drivers/block/drbd/drbd_actlog.c4
-rw-r--r--drivers/block/drbd/drbd_int.h60
-rw-r--r--drivers/block/drbd/drbd_main.c82
-rw-r--r--drivers/block/drbd/drbd_nl.c56
-rw-r--r--drivers/block/drbd/drbd_proc.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c76
-rw-r--r--drivers/block/drbd/drbd_req.c20
-rw-r--r--drivers/block/drbd/drbd_worker.c14
8 files changed, 171 insertions, 143 deletions
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index ec9b10cd65dd..d4dd563d0d54 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -90,7 +90,7 @@ void wait_until_done_or_force_detached(struct drbd_conf *mdev, struct drbd_backi
90 dt = MAX_SCHEDULE_TIMEOUT; 90 dt = MAX_SCHEDULE_TIMEOUT;
91 91
92 dt = wait_event_timeout(mdev->misc_wait, 92 dt = wait_event_timeout(mdev->misc_wait,
93 *done || test_bit(FORCE_DETACH, &mdev->flags), dt); 93 *done || drbd_test_flag(mdev, FORCE_DETACH), dt);
94 if (dt == 0) { 94 if (dt == 0) {
95 dev_err(DEV, "meta-data IO operation timed out\n"); 95 dev_err(DEV, "meta-data IO operation timed out\n");
96 drbd_chk_io_error(mdev, 1, DRBD_FORCE_DETACH); 96 drbd_chk_io_error(mdev, 1, DRBD_FORCE_DETACH);
@@ -108,7 +108,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
108 mdev->md_io.done = 0; 108 mdev->md_io.done = 0;
109 mdev->md_io.error = -ENODEV; 109 mdev->md_io.error = -ENODEV;
110 110
111 if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags)) 111 if ((rw & WRITE) && !drbd_test_flag(mdev, MD_NO_FUA))
112 rw |= REQ_FUA | REQ_FLUSH; 112 rw |= REQ_FUA | REQ_FLUSH;
113 rw |= REQ_SYNC; 113 rw |= REQ_SYNC;
114 114
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 619a4944feea..125fe1481ca2 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -808,7 +808,7 @@ enum {
808#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST) 808#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
809 809
810/* global flag bits */ 810/* global flag bits */
811enum { 811enum drbd_flag {
812 CREATE_BARRIER, /* next P_DATA is preceded by a P_BARRIER */ 812 CREATE_BARRIER, /* next P_DATA is preceded by a P_BARRIER */
813 SIGNAL_ASENDER, /* whether asender wants to be interrupted */ 813 SIGNAL_ASENDER, /* whether asender wants to be interrupted */
814 SEND_PING, /* whether asender should send a ping asap */ 814 SEND_PING, /* whether asender should send a ping asap */
@@ -858,6 +858,9 @@ enum {
858 * and potentially deadlock on, this drbd worker. 858 * and potentially deadlock on, this drbd worker.
859 */ 859 */
860 DISCONNECT_SENT, /* Currently the last bit in this 32bit word */ 860 DISCONNECT_SENT, /* Currently the last bit in this 32bit word */
861
862 /* keep last */
863 DRBD_N_FLAGS,
861}; 864};
862 865
863struct drbd_bitmap; /* opaque for drbd_conf */ 866struct drbd_bitmap; /* opaque for drbd_conf */
@@ -970,8 +973,7 @@ struct fifo_buffer {
970}; 973};
971 974
972struct drbd_conf { 975struct drbd_conf {
973 /* things that are stored as / read from meta data on disk */ 976 unsigned long drbd_flags[(DRBD_N_FLAGS + BITS_PER_LONG -1)/BITS_PER_LONG];
974 unsigned long flags;
975 977
976 /* configured by drbdsetup */ 978 /* configured by drbdsetup */
977 struct net_conf *net_conf; /* protected by get_net_conf() and put_net_conf() */ 979 struct net_conf *net_conf; /* protected by get_net_conf() and put_net_conf() */
@@ -1143,6 +1145,31 @@ struct drbd_conf {
1143 unsigned int local_max_bio_size; 1145 unsigned int local_max_bio_size;
1144}; 1146};
1145 1147
1148static inline void drbd_set_flag(struct drbd_conf *mdev, enum drbd_flag f)
1149{
1150 set_bit(f, &mdev->drbd_flags[0]);
1151}
1152
1153static inline void drbd_clear_flag(struct drbd_conf *mdev, enum drbd_flag f)
1154{
1155 clear_bit(f, &mdev->drbd_flags[0]);
1156}
1157
1158static inline int drbd_test_flag(struct drbd_conf *mdev, enum drbd_flag f)
1159{
1160 return test_bit(f, &mdev->drbd_flags[0]);
1161}
1162
1163static inline int drbd_test_and_set_flag(struct drbd_conf *mdev, enum drbd_flag f)
1164{
1165 return test_and_set_bit(f, &mdev->drbd_flags[0]);
1166}
1167
1168static inline int drbd_test_and_clear_flag(struct drbd_conf *mdev, enum drbd_flag f)
1169{
1170 return test_and_clear_bit(f, &mdev->drbd_flags[0]);
1171}
1172
1146static inline struct drbd_conf *minor_to_mdev(unsigned int minor) 1173static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
1147{ 1174{
1148 struct drbd_conf *mdev; 1175 struct drbd_conf *mdev;
@@ -1812,12 +1839,12 @@ static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e)
1812static inline void drbd_state_lock(struct drbd_conf *mdev) 1839static inline void drbd_state_lock(struct drbd_conf *mdev)
1813{ 1840{
1814 wait_event(mdev->misc_wait, 1841 wait_event(mdev->misc_wait,
1815 !test_and_set_bit(CLUSTER_ST_CHANGE, &mdev->flags)); 1842 !drbd_test_and_set_flag(mdev, CLUSTER_ST_CHANGE));
1816} 1843}
1817 1844
1818static inline void drbd_state_unlock(struct drbd_conf *mdev) 1845static inline void drbd_state_unlock(struct drbd_conf *mdev)
1819{ 1846{
1820 clear_bit(CLUSTER_ST_CHANGE, &mdev->flags); 1847 drbd_clear_flag(mdev, CLUSTER_ST_CHANGE);
1821 wake_up(&mdev->misc_wait); 1848 wake_up(&mdev->misc_wait);
1822} 1849}
1823 1850
@@ -1874,9 +1901,9 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev,
1874 /* NOTE fall through to detach case if forcedetach set */ 1901 /* NOTE fall through to detach case if forcedetach set */
1875 case EP_DETACH: 1902 case EP_DETACH:
1876 case EP_CALL_HELPER: 1903 case EP_CALL_HELPER:
1877 set_bit(WAS_IO_ERROR, &mdev->flags); 1904 drbd_set_flag(mdev, WAS_IO_ERROR);
1878 if (forcedetach == DRBD_FORCE_DETACH) 1905 if (forcedetach == DRBD_FORCE_DETACH)
1879 set_bit(FORCE_DETACH, &mdev->flags); 1906 drbd_set_flag(mdev, FORCE_DETACH);
1880 if (mdev->state.disk > D_FAILED) { 1907 if (mdev->state.disk > D_FAILED) {
1881 _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL); 1908 _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
1882 dev_err(DEV, 1909 dev_err(DEV,
@@ -2037,13 +2064,13 @@ drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
2037 2064
2038static inline void wake_asender(struct drbd_conf *mdev) 2065static inline void wake_asender(struct drbd_conf *mdev)
2039{ 2066{
2040 if (test_bit(SIGNAL_ASENDER, &mdev->flags)) 2067 if (drbd_test_flag(mdev, SIGNAL_ASENDER))
2041 force_sig(DRBD_SIG, mdev->asender.task); 2068 force_sig(DRBD_SIG, mdev->asender.task);
2042} 2069}
2043 2070
2044static inline void request_ping(struct drbd_conf *mdev) 2071static inline void request_ping(struct drbd_conf *mdev)
2045{ 2072{
2046 set_bit(SEND_PING, &mdev->flags); 2073 drbd_set_flag(mdev, SEND_PING);
2047 wake_asender(mdev); 2074 wake_asender(mdev);
2048} 2075}
2049 2076
@@ -2374,7 +2401,7 @@ static inline bool may_inc_ap_bio(struct drbd_conf *mdev)
2374 2401
2375 if (is_susp(mdev->state)) 2402 if (is_susp(mdev->state))
2376 return false; 2403 return false;
2377 if (test_bit(SUSPEND_IO, &mdev->flags)) 2404 if (drbd_test_flag(mdev, SUSPEND_IO))
2378 return false; 2405 return false;
2379 2406
2380 /* to avoid potential deadlock or bitmap corruption, 2407 /* to avoid potential deadlock or bitmap corruption,
@@ -2389,7 +2416,7 @@ static inline bool may_inc_ap_bio(struct drbd_conf *mdev)
2389 * and we are within the spinlock anyways, we have this workaround. */ 2416 * and we are within the spinlock anyways, we have this workaround. */
2390 if (atomic_read(&mdev->ap_bio_cnt) > mxb) 2417 if (atomic_read(&mdev->ap_bio_cnt) > mxb)
2391 return false; 2418 return false;
2392 if (test_bit(BITMAP_IO, &mdev->flags)) 2419 if (drbd_test_flag(mdev, BITMAP_IO))
2393 return false; 2420 return false;
2394 return true; 2421 return true;
2395} 2422}
@@ -2427,8 +2454,8 @@ static inline void dec_ap_bio(struct drbd_conf *mdev)
2427 2454
2428 D_ASSERT(ap_bio >= 0); 2455 D_ASSERT(ap_bio >= 0);
2429 2456
2430 if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) { 2457 if (ap_bio == 0 && drbd_test_flag(mdev, BITMAP_IO)) {
2431 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags)) 2458 if (!drbd_test_and_set_flag(mdev, BITMAP_IO_QUEUED))
2432 drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w); 2459 drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
2433 } 2460 }
2434 2461
@@ -2477,7 +2504,7 @@ static inline void drbd_update_congested(struct drbd_conf *mdev)
2477{ 2504{
2478 struct sock *sk = mdev->data.socket->sk; 2505 struct sock *sk = mdev->data.socket->sk;
2479 if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5) 2506 if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
2480 set_bit(NET_CONGESTED, &mdev->flags); 2507 drbd_set_flag(mdev, NET_CONGESTED);
2481} 2508}
2482 2509
2483static inline int drbd_queue_order_type(struct drbd_conf *mdev) 2510static inline int drbd_queue_order_type(struct drbd_conf *mdev)
@@ -2494,14 +2521,15 @@ static inline void drbd_md_flush(struct drbd_conf *mdev)
2494{ 2521{
2495 int r; 2522 int r;
2496 2523
2497 if (test_bit(MD_NO_FUA, &mdev->flags)) 2524 if (drbd_test_flag(mdev, MD_NO_FUA))
2498 return; 2525 return;
2499 2526
2500 r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_NOIO, NULL); 2527 r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_NOIO, NULL);
2501 if (r) { 2528 if (r) {
2502 set_bit(MD_NO_FUA, &mdev->flags); 2529 drbd_set_flag(mdev, MD_NO_FUA);
2503 dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r); 2530 dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
2504 } 2531 }
2505} 2532}
2506 2533
2534
2507#endif 2535#endif
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 7b48653d1c8f..d8ba5c42670f 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -322,7 +322,7 @@ void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
322 list_splice_init(&b->requests, &mdev->barrier_acked_requests); 322 list_splice_init(&b->requests, &mdev->barrier_acked_requests);
323 323
324 nob = b->next; 324 nob = b->next;
325 if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) { 325 if (drbd_test_and_clear_flag(mdev, CREATE_BARRIER)) {
326 _tl_add_barrier(mdev, b); 326 _tl_add_barrier(mdev, b);
327 if (nob) 327 if (nob)
328 mdev->oldest_tle = nob; 328 mdev->oldest_tle = nob;
@@ -381,7 +381,7 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
381 if (b->w.cb == NULL) { 381 if (b->w.cb == NULL) {
382 b->w.cb = w_send_barrier; 382 b->w.cb = w_send_barrier;
383 inc_ap_pending(mdev); 383 inc_ap_pending(mdev);
384 set_bit(CREATE_BARRIER, &mdev->flags); 384 drbd_set_flag(mdev, CREATE_BARRIER);
385 } 385 }
386 386
387 drbd_queue_work(&mdev->data.work, &b->w); 387 drbd_queue_work(&mdev->data.work, &b->w);
@@ -464,7 +464,7 @@ static void _tl_clear(struct drbd_conf *mdev)
464 } 464 }
465 465
466 /* ensure bit indicating barrier is required is clear */ 466 /* ensure bit indicating barrier is required is clear */
467 clear_bit(CREATE_BARRIER, &mdev->flags); 467 drbd_clear_flag(mdev, CREATE_BARRIER);
468 468
469 memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *)); 469 memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
470 470
@@ -582,10 +582,10 @@ _req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
582 unsigned long flags; 582 unsigned long flags;
583 enum drbd_state_rv rv; 583 enum drbd_state_rv rv;
584 584
585 if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags)) 585 if (drbd_test_and_clear_flag(mdev, CL_ST_CHG_SUCCESS))
586 return SS_CW_SUCCESS; 586 return SS_CW_SUCCESS;
587 587
588 if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags)) 588 if (drbd_test_and_clear_flag(mdev, CL_ST_CHG_FAIL))
589 return SS_CW_FAILED_BY_PEER; 589 return SS_CW_FAILED_BY_PEER;
590 590
591 rv = 0; 591 rv = 0;
@@ -660,7 +660,7 @@ drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
660 } 660 }
661 661
662 if (mask.conn == C_MASK && val.conn == C_DISCONNECTING) 662 if (mask.conn == C_MASK && val.conn == C_DISCONNECTING)
663 set_bit(DISCONNECT_SENT, &mdev->flags); 663 drbd_set_flag(mdev, DISCONNECT_SENT);
664 664
665 wait_event(mdev->state_wait, 665 wait_event(mdev->state_wait,
666 (rv = _req_st_cond(mdev, mask, val))); 666 (rv = _req_st_cond(mdev, mask, val)));
@@ -850,7 +850,7 @@ is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
850 850
851 /* While establishing a connection only allow cstate to change. 851 /* While establishing a connection only allow cstate to change.
852 Delay/refuse role changes, detach attach etc... */ 852 Delay/refuse role changes, detach attach etc... */
853 if (test_bit(STATE_SENT, &mdev->flags) && 853 if (drbd_test_flag(mdev, STATE_SENT) &&
854 !(os.conn == C_WF_REPORT_PARAMS || 854 !(os.conn == C_WF_REPORT_PARAMS ||
855 (ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION))) 855 (ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION)))
856 rv = SS_IN_TRANSIENT_STATE; 856 rv = SS_IN_TRANSIENT_STATE;
@@ -1109,7 +1109,7 @@ static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
1109 1109
1110static void drbd_resume_al(struct drbd_conf *mdev) 1110static void drbd_resume_al(struct drbd_conf *mdev)
1111{ 1111{
1112 if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags)) 1112 if (drbd_test_and_clear_flag(mdev, AL_SUSPENDED))
1113 dev_info(DEV, "Resumed AL updates\n"); 1113 dev_info(DEV, "Resumed AL updates\n");
1114} 1114}
1115 1115
@@ -1215,8 +1215,8 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
1215 if (ns.disk == D_DISKLESS && 1215 if (ns.disk == D_DISKLESS &&
1216 ns.conn == C_STANDALONE && 1216 ns.conn == C_STANDALONE &&
1217 ns.role == R_SECONDARY && 1217 ns.role == R_SECONDARY &&
1218 !test_and_set_bit(CONFIG_PENDING, &mdev->flags)) 1218 !drbd_test_and_set_flag(mdev, CONFIG_PENDING))
1219 set_bit(DEVICE_DYING, &mdev->flags); 1219 drbd_set_flag(mdev, DEVICE_DYING);
1220 1220
1221 /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference 1221 /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
1222 * on the ldev here, to be sure the transition -> D_DISKLESS resp. 1222 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
@@ -1291,7 +1291,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
1291 MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE| 1291 MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
1292 MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY); 1292 MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
1293 1293
1294 if (test_bit(CRASHED_PRIMARY, &mdev->flags)) 1294 if (drbd_test_flag(mdev, CRASHED_PRIMARY))
1295 mdf |= MDF_CRASHED_PRIMARY; 1295 mdf |= MDF_CRASHED_PRIMARY;
1296 if (mdev->state.role == R_PRIMARY || 1296 if (mdev->state.role == R_PRIMARY ||
1297 (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY)) 1297 (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
@@ -1316,7 +1316,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
1316 /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */ 1316 /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1317 if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT && 1317 if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
1318 os.peer == R_SECONDARY && ns.peer == R_PRIMARY) 1318 os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
1319 set_bit(CONSIDER_RESYNC, &mdev->flags); 1319 drbd_set_flag(mdev, CONSIDER_RESYNC);
1320 1320
1321 /* Receiver should clean up itself */ 1321 /* Receiver should clean up itself */
1322 if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING) 1322 if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
@@ -1400,7 +1400,7 @@ int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
1400 D_ASSERT(current == mdev->worker.task); 1400 D_ASSERT(current == mdev->worker.task);
1401 1401
1402 /* open coded non-blocking drbd_suspend_io(mdev); */ 1402 /* open coded non-blocking drbd_suspend_io(mdev); */
1403 set_bit(SUSPEND_IO, &mdev->flags); 1403 drbd_set_flag(mdev, SUSPEND_IO);
1404 1404
1405 drbd_bm_lock(mdev, why, flags); 1405 drbd_bm_lock(mdev, why, flags);
1406 rv = io_fn(mdev); 1406 rv = io_fn(mdev);
@@ -1426,7 +1426,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1426 union drbd_state nsm = (union drbd_state){ .i = -1 }; 1426 union drbd_state nsm = (union drbd_state){ .i = -1 };
1427 1427
1428 if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) { 1428 if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
1429 clear_bit(CRASHED_PRIMARY, &mdev->flags); 1429 drbd_clear_flag(mdev, CRASHED_PRIMARY);
1430 if (mdev->p_uuid) 1430 if (mdev->p_uuid)
1431 mdev->p_uuid[UI_FLAGS] &= ~((u64)2); 1431 mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
1432 } 1432 }
@@ -1466,9 +1466,9 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1466 if (ns.susp_fen) { 1466 if (ns.susp_fen) {
1467 /* case1: The outdate peer handler is successful: */ 1467 /* case1: The outdate peer handler is successful: */
1468 if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) { 1468 if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) {
1469 if (test_bit(NEW_CUR_UUID, &mdev->flags)) { 1469 if (drbd_test_flag(mdev, NEW_CUR_UUID)) {
1470 drbd_uuid_new_current(mdev); 1470 drbd_uuid_new_current(mdev);
1471 clear_bit(NEW_CUR_UUID, &mdev->flags); 1471 drbd_clear_flag(mdev, NEW_CUR_UUID);
1472 } 1472 }
1473 spin_lock_irq(&mdev->req_lock); 1473 spin_lock_irq(&mdev->req_lock);
1474 _tl_clear(mdev); 1474 _tl_clear(mdev);
@@ -1477,7 +1477,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1477 } 1477 }
1478 /* case2: The connection was established again: */ 1478 /* case2: The connection was established again: */
1479 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) { 1479 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1480 clear_bit(NEW_CUR_UUID, &mdev->flags); 1480 drbd_clear_flag(mdev, NEW_CUR_UUID);
1481 what = resend; 1481 what = resend;
1482 nsm.susp_fen = 0; 1482 nsm.susp_fen = 0;
1483 } 1483 }
@@ -1534,7 +1534,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1534 if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) && 1534 if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
1535 mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) { 1535 mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
1536 if (is_susp(mdev->state)) { 1536 if (is_susp(mdev->state)) {
1537 set_bit(NEW_CUR_UUID, &mdev->flags); 1537 drbd_set_flag(mdev, NEW_CUR_UUID);
1538 } else { 1538 } else {
1539 drbd_uuid_new_current(mdev); 1539 drbd_uuid_new_current(mdev);
1540 drbd_send_uuids(mdev); 1540 drbd_send_uuids(mdev);
@@ -1625,7 +1625,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1625 * we might come from an failed Attach before ldev was set. */ 1625 * we might come from an failed Attach before ldev was set. */
1626 if (mdev->ldev) { 1626 if (mdev->ldev) {
1627 eh = mdev->ldev->dc.on_io_error; 1627 eh = mdev->ldev->dc.on_io_error;
1628 was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags); 1628 was_io_error = drbd_test_and_clear_flag(mdev, WAS_IO_ERROR);
1629 1629
1630 if (was_io_error && eh == EP_CALL_HELPER) 1630 if (was_io_error && eh == EP_CALL_HELPER)
1631 drbd_khelper(mdev, "local-io-error"); 1631 drbd_khelper(mdev, "local-io-error");
@@ -1643,7 +1643,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1643 * So aborting local requests may cause crashes, 1643 * So aborting local requests may cause crashes,
1644 * or even worse, silent data corruption. 1644 * or even worse, silent data corruption.
1645 */ 1645 */
1646 if (test_and_clear_bit(FORCE_DETACH, &mdev->flags)) 1646 if (drbd_test_and_clear_flag(mdev, FORCE_DETACH))
1647 tl_abort_disk_io(mdev); 1647 tl_abort_disk_io(mdev);
1648 1648
1649 /* current state still has to be D_FAILED, 1649 /* current state still has to be D_FAILED,
@@ -1692,7 +1692,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1692 1692
1693 /* Disks got bigger while they were detached */ 1693 /* Disks got bigger while they were detached */
1694 if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING && 1694 if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
1695 test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) { 1695 drbd_test_and_clear_flag(mdev, RESYNC_AFTER_NEG)) {
1696 if (ns.conn == C_CONNECTED) 1696 if (ns.conn == C_CONNECTED)
1697 resync_after_online_grow(mdev); 1697 resync_after_online_grow(mdev);
1698 } 1698 }
@@ -1717,7 +1717,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1717 1717
1718 /* Wake up role changes, that were delayed because of connection establishing */ 1718 /* Wake up role changes, that were delayed because of connection establishing */
1719 if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS) { 1719 if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS) {
1720 clear_bit(STATE_SENT, &mdev->flags); 1720 drbd_clear_flag(mdev, STATE_SENT);
1721 wake_up(&mdev->state_wait); 1721 wake_up(&mdev->state_wait);
1722 } 1722 }
1723 1723
@@ -1750,7 +1750,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1750 if (os.aftr_isp != ns.aftr_isp) 1750 if (os.aftr_isp != ns.aftr_isp)
1751 resume_next_sg(mdev); 1751 resume_next_sg(mdev);
1752 /* set in __drbd_set_state, unless CONFIG_PENDING was set */ 1752 /* set in __drbd_set_state, unless CONFIG_PENDING was set */
1753 if (test_bit(DEVICE_DYING, &mdev->flags)) 1753 if (drbd_test_flag(mdev, DEVICE_DYING))
1754 drbd_thread_stop_nowait(&mdev->worker); 1754 drbd_thread_stop_nowait(&mdev->worker);
1755 } 1755 }
1756 1756
@@ -2145,7 +2145,7 @@ int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
2145 mdev->comm_bm_set = drbd_bm_total_weight(mdev); 2145 mdev->comm_bm_set = drbd_bm_total_weight(mdev);
2146 p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set); 2146 p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
2147 uuid_flags |= mdev->net_conf->want_lose ? 1 : 0; 2147 uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
2148 uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0; 2148 uuid_flags |= drbd_test_flag(mdev, CRASHED_PRIMARY) ? 2 : 0;
2149 uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0; 2149 uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
2150 p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags); 2150 p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
2151 2151
@@ -2775,7 +2775,7 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
2775 offset += sent; 2775 offset += sent;
2776 } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/); 2776 } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
2777 set_fs(oldfs); 2777 set_fs(oldfs);
2778 clear_bit(NET_CONGESTED, &mdev->flags); 2778 drbd_clear_flag(mdev, NET_CONGESTED);
2779 2779
2780 ok = (len == 0); 2780 ok = (len == 0);
2781 if (likely(ok)) 2781 if (likely(ok))
@@ -2877,7 +2877,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
2877 dp_flags |= DP_MAY_SET_IN_SYNC; 2877 dp_flags |= DP_MAY_SET_IN_SYNC;
2878 2878
2879 p.dp_flags = cpu_to_be32(dp_flags); 2879 p.dp_flags = cpu_to_be32(dp_flags);
2880 set_bit(UNPLUG_REMOTE, &mdev->flags); 2880 drbd_set_flag(mdev, UNPLUG_REMOTE);
2881 ok = (sizeof(p) == 2881 ok = (sizeof(p) ==
2882 drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0)); 2882 drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0));
2883 if (ok && dgs) { 2883 if (ok && dgs) {
@@ -3056,7 +3056,7 @@ int drbd_send(struct drbd_conf *mdev, struct socket *sock,
3056 } while (sent < size); 3056 } while (sent < size);
3057 3057
3058 if (sock == mdev->data.socket) 3058 if (sock == mdev->data.socket)
3059 clear_bit(NET_CONGESTED, &mdev->flags); 3059 drbd_clear_flag(mdev, NET_CONGESTED);
3060 3060
3061 if (rv <= 0) { 3061 if (rv <= 0) {
3062 if (rv != -EAGAIN) { 3062 if (rv != -EAGAIN) {
@@ -3263,7 +3263,7 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
3263 } 3263 }
3264 3264
3265 drbd_free_resources(mdev); 3265 drbd_free_resources(mdev);
3266 clear_bit(AL_SUSPENDED, &mdev->flags); 3266 drbd_clear_flag(mdev, AL_SUSPENDED);
3267 3267
3268 /* 3268 /*
3269 * currently we drbd_init_ee only on module load, so 3269 * currently we drbd_init_ee only on module load, so
@@ -3556,7 +3556,7 @@ static int drbd_congested(void *congested_data, int bdi_bits)
3556 goto out; 3556 goto out;
3557 } 3557 }
3558 3558
3559 if (test_bit(CALLBACK_PENDING, &mdev->flags)) { 3559 if (drbd_test_flag(mdev, CALLBACK_PENDING)) {
3560 r |= (1 << BDI_async_congested); 3560 r |= (1 << BDI_async_congested);
3561 /* Without good local data, we would need to read from remote, 3561 /* Without good local data, we would need to read from remote,
3562 * and that would need the worker thread as well, which is 3562 * and that would need the worker thread as well, which is
@@ -3580,7 +3580,7 @@ static int drbd_congested(void *congested_data, int bdi_bits)
3580 reason = 'b'; 3580 reason = 'b';
3581 } 3581 }
3582 3582
3583 if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->flags)) { 3583 if (bdi_bits & (1 << BDI_async_congested) && drbd_test_flag(mdev, NET_CONGESTED)) {
3584 r |= (1 << BDI_async_congested); 3584 r |= (1 << BDI_async_congested);
3585 reason = reason == 'b' ? 'a' : 'n'; 3585 reason = reason == 'b' ? 'a' : 'n';
3586 } 3586 }
@@ -3867,7 +3867,7 @@ void drbd_md_sync(struct drbd_conf *mdev)
3867 3867
3868 del_timer(&mdev->md_sync_timer); 3868 del_timer(&mdev->md_sync_timer);
3869 /* timer may be rearmed by drbd_md_mark_dirty() now. */ 3869 /* timer may be rearmed by drbd_md_mark_dirty() now. */
3870 if (!test_and_clear_bit(MD_DIRTY, &mdev->flags)) 3870 if (!drbd_test_and_clear_flag(mdev, MD_DIRTY))
3871 return; 3871 return;
3872 3872
3873 /* We use here D_FAILED and not D_ATTACHING because we try to write 3873 /* We use here D_FAILED and not D_ATTACHING because we try to write
@@ -4011,7 +4011,7 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
4011#ifdef DEBUG 4011#ifdef DEBUG
4012void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func) 4012void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
4013{ 4013{
4014 if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) { 4014 if (!drbd_test_and_set_flag(mdev, MD_DIRTY)) {
4015 mod_timer(&mdev->md_sync_timer, jiffies + HZ); 4015 mod_timer(&mdev->md_sync_timer, jiffies + HZ);
4016 mdev->last_md_mark_dirty.line = line; 4016 mdev->last_md_mark_dirty.line = line;
4017 mdev->last_md_mark_dirty.func = func; 4017 mdev->last_md_mark_dirty.func = func;
@@ -4020,7 +4020,7 @@ void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *
4020#else 4020#else
4021void drbd_md_mark_dirty(struct drbd_conf *mdev) 4021void drbd_md_mark_dirty(struct drbd_conf *mdev)
4022{ 4022{
4023 if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) 4023 if (!drbd_test_and_set_flag(mdev, MD_DIRTY))
4024 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ); 4024 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
4025} 4025}
4026#endif 4026#endif
@@ -4182,14 +4182,14 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
4182 put_ldev(mdev); 4182 put_ldev(mdev);
4183 } 4183 }
4184 4184
4185 clear_bit(BITMAP_IO, &mdev->flags); 4185 drbd_clear_flag(mdev, BITMAP_IO);
4186 smp_mb__after_clear_bit(); 4186 smp_mb__after_clear_bit();
4187 wake_up(&mdev->misc_wait); 4187 wake_up(&mdev->misc_wait);
4188 4188
4189 if (work->done) 4189 if (work->done)
4190 work->done(mdev, rv); 4190 work->done(mdev, rv);
4191 4191
4192 clear_bit(BITMAP_IO_QUEUED, &mdev->flags); 4192 drbd_clear_flag(mdev, BITMAP_IO_QUEUED);
4193 work->why = NULL; 4193 work->why = NULL;
4194 work->flags = 0; 4194 work->flags = 0;
4195 4195
@@ -4210,7 +4210,7 @@ void drbd_ldev_destroy(struct drbd_conf *mdev)
4210 __free_page(mdev->md_io_tmpp); 4210 __free_page(mdev->md_io_tmpp);
4211 mdev->md_io_tmpp = NULL; 4211 mdev->md_io_tmpp = NULL;
4212 } 4212 }
4213 clear_bit(GO_DISKLESS, &mdev->flags); 4213 drbd_clear_flag(mdev, GO_DISKLESS);
4214} 4214}
4215 4215
4216static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused) 4216static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
@@ -4227,7 +4227,7 @@ static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused
4227void drbd_go_diskless(struct drbd_conf *mdev) 4227void drbd_go_diskless(struct drbd_conf *mdev)
4228{ 4228{
4229 D_ASSERT(mdev->state.disk == D_FAILED); 4229 D_ASSERT(mdev->state.disk == D_FAILED);
4230 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags)) 4230 if (!drbd_test_and_set_flag(mdev, GO_DISKLESS))
4231 drbd_queue_work(&mdev->data.work, &mdev->go_diskless); 4231 drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
4232} 4232}
4233 4233
@@ -4250,8 +4250,8 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev,
4250{ 4250{
4251 D_ASSERT(current == mdev->worker.task); 4251 D_ASSERT(current == mdev->worker.task);
4252 4252
4253 D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags)); 4253 D_ASSERT(!drbd_test_flag(mdev, BITMAP_IO_QUEUED));
4254 D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags)); 4254 D_ASSERT(!drbd_test_flag(mdev, BITMAP_IO));
4255 D_ASSERT(list_empty(&mdev->bm_io_work.w.list)); 4255 D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
4256 if (mdev->bm_io_work.why) 4256 if (mdev->bm_io_work.why)
4257 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n", 4257 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
@@ -4263,9 +4263,9 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev,
4263 mdev->bm_io_work.flags = flags; 4263 mdev->bm_io_work.flags = flags;
4264 4264
4265 spin_lock_irq(&mdev->req_lock); 4265 spin_lock_irq(&mdev->req_lock);
4266 set_bit(BITMAP_IO, &mdev->flags); 4266 drbd_set_flag(mdev, BITMAP_IO);
4267 if (atomic_read(&mdev->ap_bio_cnt) == 0) { 4267 if (atomic_read(&mdev->ap_bio_cnt) == 0) {
4268 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags)) 4268 if (!drbd_test_and_set_flag(mdev, BITMAP_IO_QUEUED))
4269 drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w); 4269 drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
4270 } 4270 }
4271 spin_unlock_irq(&mdev->req_lock); 4271 spin_unlock_irq(&mdev->req_lock);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index e2d368f1747e..42d172877aea 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -148,7 +148,7 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
148 int ret; 148 int ret;
149 149
150 if (current == mdev->worker.task) 150 if (current == mdev->worker.task)
151 set_bit(CALLBACK_PENDING, &mdev->flags); 151 drbd_set_flag(mdev, CALLBACK_PENDING);
152 152
153 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev)); 153 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
154 154
@@ -193,7 +193,7 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
193 (ret >> 8) & 0xff, ret); 193 (ret >> 8) & 0xff, ret);
194 194
195 if (current == mdev->worker.task) 195 if (current == mdev->worker.task)
196 clear_bit(CALLBACK_PENDING, &mdev->flags); 196 drbd_clear_flag(mdev, CALLBACK_PENDING);
197 197
198 if (ret < 0) /* Ignore any ERRNOs we got. */ 198 if (ret < 0) /* Ignore any ERRNOs we got. */
199 ret = 0; 199 ret = 0;
@@ -295,7 +295,7 @@ static int _try_outdate_peer_async(void *data)
295 */ 295 */
296 spin_lock_irq(&mdev->req_lock); 296 spin_lock_irq(&mdev->req_lock);
297 ns = mdev->state; 297 ns = mdev->state;
298 if (ns.conn < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &mdev->flags)) { 298 if (ns.conn < C_WF_REPORT_PARAMS && !drbd_test_flag(mdev, STATE_SENT)) {
299 ns.pdsk = nps; 299 ns.pdsk = nps;
300 _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); 300 _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
301 } 301 }
@@ -583,7 +583,7 @@ char *ppsize(char *buf, unsigned long long size)
583 */ 583 */
584void drbd_suspend_io(struct drbd_conf *mdev) 584void drbd_suspend_io(struct drbd_conf *mdev)
585{ 585{
586 set_bit(SUSPEND_IO, &mdev->flags); 586 drbd_set_flag(mdev, SUSPEND_IO);
587 if (is_susp(mdev->state)) 587 if (is_susp(mdev->state))
588 return; 588 return;
589 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt)); 589 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
@@ -591,7 +591,7 @@ void drbd_suspend_io(struct drbd_conf *mdev)
591 591
592void drbd_resume_io(struct drbd_conf *mdev) 592void drbd_resume_io(struct drbd_conf *mdev)
593{ 593{
594 clear_bit(SUSPEND_IO, &mdev->flags); 594 drbd_clear_flag(mdev, SUSPEND_IO);
595 wake_up(&mdev->misc_wait); 595 wake_up(&mdev->misc_wait);
596} 596}
597 597
@@ -881,8 +881,8 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
881 */ 881 */
882static void drbd_reconfig_start(struct drbd_conf *mdev) 882static void drbd_reconfig_start(struct drbd_conf *mdev)
883{ 883{
884 wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags)); 884 wait_event(mdev->state_wait, !drbd_test_and_set_flag(mdev, CONFIG_PENDING));
885 wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags)); 885 wait_event(mdev->state_wait, !drbd_test_flag(mdev, DEVICE_DYING));
886 drbd_thread_start(&mdev->worker); 886 drbd_thread_start(&mdev->worker);
887 drbd_flush_workqueue(mdev); 887 drbd_flush_workqueue(mdev);
888} 888}
@@ -896,10 +896,10 @@ static void drbd_reconfig_done(struct drbd_conf *mdev)
896 if (mdev->state.disk == D_DISKLESS && 896 if (mdev->state.disk == D_DISKLESS &&
897 mdev->state.conn == C_STANDALONE && 897 mdev->state.conn == C_STANDALONE &&
898 mdev->state.role == R_SECONDARY) { 898 mdev->state.role == R_SECONDARY) {
899 set_bit(DEVICE_DYING, &mdev->flags); 899 drbd_set_flag(mdev, DEVICE_DYING);
900 drbd_thread_stop_nowait(&mdev->worker); 900 drbd_thread_stop_nowait(&mdev->worker);
901 } else 901 } else
902 clear_bit(CONFIG_PENDING, &mdev->flags); 902 drbd_clear_flag(mdev, CONFIG_PENDING);
903 spin_unlock_irq(&mdev->req_lock); 903 spin_unlock_irq(&mdev->req_lock);
904 wake_up(&mdev->state_wait); 904 wake_up(&mdev->state_wait);
905} 905}
@@ -919,7 +919,7 @@ static void drbd_suspend_al(struct drbd_conf *mdev)
919 919
920 spin_lock_irq(&mdev->req_lock); 920 spin_lock_irq(&mdev->req_lock);
921 if (mdev->state.conn < C_CONNECTED) 921 if (mdev->state.conn < C_CONNECTED)
922 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags); 922 s = !drbd_test_and_set_flag(mdev, AL_SUSPENDED);
923 923
924 spin_unlock_irq(&mdev->req_lock); 924 spin_unlock_irq(&mdev->req_lock);
925 925
@@ -958,7 +958,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
958 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt)); 958 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
959 959
960 /* make sure there is no leftover from previous force-detach attempts */ 960 /* make sure there is no leftover from previous force-detach attempts */
961 clear_bit(FORCE_DETACH, &mdev->flags); 961 drbd_clear_flag(mdev, FORCE_DETACH);
962 962
963 /* and no leftover from previously aborted resync or verify, either */ 963 /* and no leftover from previously aborted resync or verify, either */
964 mdev->rs_total = 0; 964 mdev->rs_total = 0;
@@ -1168,9 +1168,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1168 /* Reset the "barriers don't work" bits here, then force meta data to 1168 /* Reset the "barriers don't work" bits here, then force meta data to
1169 * be written, to ensure we determine if barriers are supported. */ 1169 * be written, to ensure we determine if barriers are supported. */
1170 if (nbc->dc.no_md_flush) 1170 if (nbc->dc.no_md_flush)
1171 set_bit(MD_NO_FUA, &mdev->flags); 1171 drbd_set_flag(mdev, MD_NO_FUA);
1172 else 1172 else
1173 clear_bit(MD_NO_FUA, &mdev->flags); 1173 drbd_clear_flag(mdev, MD_NO_FUA);
1174 1174
1175 /* Point of no return reached. 1175 /* Point of no return reached.
1176 * Devices and memory are no longer released by error cleanup below. 1176 * Devices and memory are no longer released by error cleanup below.
@@ -1186,13 +1186,13 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1186 drbd_bump_write_ordering(mdev, WO_bdev_flush); 1186 drbd_bump_write_ordering(mdev, WO_bdev_flush);
1187 1187
1188 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY)) 1188 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1189 set_bit(CRASHED_PRIMARY, &mdev->flags); 1189 drbd_set_flag(mdev, CRASHED_PRIMARY);
1190 else 1190 else
1191 clear_bit(CRASHED_PRIMARY, &mdev->flags); 1191 drbd_clear_flag(mdev, CRASHED_PRIMARY);
1192 1192
1193 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) && 1193 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1194 !(mdev->state.role == R_PRIMARY && mdev->state.susp_nod)) { 1194 !(mdev->state.role == R_PRIMARY && mdev->state.susp_nod)) {
1195 set_bit(CRASHED_PRIMARY, &mdev->flags); 1195 drbd_set_flag(mdev, CRASHED_PRIMARY);
1196 cp_discovered = 1; 1196 cp_discovered = 1;
1197 } 1197 }
1198 1198
@@ -1217,18 +1217,18 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1217 * so we can automatically recover from a crash of a 1217 * so we can automatically recover from a crash of a
1218 * degraded but active "cluster" after a certain timeout. 1218 * degraded but active "cluster" after a certain timeout.
1219 */ 1219 */
1220 clear_bit(USE_DEGR_WFC_T, &mdev->flags); 1220 drbd_clear_flag(mdev, USE_DEGR_WFC_T);
1221 if (mdev->state.role != R_PRIMARY && 1221 if (mdev->state.role != R_PRIMARY &&
1222 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) && 1222 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1223 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND)) 1223 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1224 set_bit(USE_DEGR_WFC_T, &mdev->flags); 1224 drbd_set_flag(mdev, USE_DEGR_WFC_T);
1225 1225
1226 dd = drbd_determine_dev_size(mdev, 0); 1226 dd = drbd_determine_dev_size(mdev, 0);
1227 if (dd == dev_size_error) { 1227 if (dd == dev_size_error) {
1228 retcode = ERR_NOMEM_BITMAP; 1228 retcode = ERR_NOMEM_BITMAP;
1229 goto force_diskless_dec; 1229 goto force_diskless_dec;
1230 } else if (dd == grew) 1230 } else if (dd == grew)
1231 set_bit(RESYNC_AFTER_NEG, &mdev->flags); 1231 drbd_set_flag(mdev, RESYNC_AFTER_NEG);
1232 1232
1233 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) { 1233 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1234 dev_info(DEV, "Assuming that all blocks are out of sync " 1234 dev_info(DEV, "Assuming that all blocks are out of sync "
@@ -1362,7 +1362,7 @@ static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1362 } 1362 }
1363 1363
1364 if (dt.detach_force) { 1364 if (dt.detach_force) {
1365 set_bit(FORCE_DETACH, &mdev->flags); 1365 drbd_set_flag(mdev, FORCE_DETACH);
1366 drbd_force_state(mdev, NS(disk, D_FAILED)); 1366 drbd_force_state(mdev, NS(disk, D_FAILED));
1367 reply->ret_code = SS_SUCCESS; 1367 reply->ret_code = SS_SUCCESS;
1368 goto out; 1368 goto out;
@@ -1707,7 +1707,7 @@ void resync_after_online_grow(struct drbd_conf *mdev)
1707 if (mdev->state.role != mdev->state.peer) 1707 if (mdev->state.role != mdev->state.peer)
1708 iass = (mdev->state.role == R_PRIMARY); 1708 iass = (mdev->state.role == R_PRIMARY);
1709 else 1709 else
1710 iass = test_bit(DISCARD_CONCURRENT, &mdev->flags); 1710 iass = drbd_test_flag(mdev, DISCARD_CONCURRENT);
1711 1711
1712 if (iass) 1712 if (iass)
1713 drbd_start_resync(mdev, C_SYNC_SOURCE); 1713 drbd_start_resync(mdev, C_SYNC_SOURCE);
@@ -1765,7 +1765,7 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1765 1765
1766 if (mdev->state.conn == C_CONNECTED) { 1766 if (mdev->state.conn == C_CONNECTED) {
1767 if (dd == grew) 1767 if (dd == grew)
1768 set_bit(RESIZE_PENDING, &mdev->flags); 1768 drbd_set_flag(mdev, RESIZE_PENDING);
1769 1769
1770 drbd_send_uuids(mdev); 1770 drbd_send_uuids(mdev);
1771 drbd_send_sizes(mdev, 1, ddsf); 1771 drbd_send_sizes(mdev, 1, ddsf);
@@ -1983,7 +1983,7 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
1983 * resync just being finished, wait for it before requesting a new resync. 1983 * resync just being finished, wait for it before requesting a new resync.
1984 * Also wait for it's after_state_ch(). */ 1984 * Also wait for it's after_state_ch(). */
1985 drbd_suspend_io(mdev); 1985 drbd_suspend_io(mdev);
1986 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); 1986 wait_event(mdev->misc_wait, !drbd_test_flag(mdev, BITMAP_IO));
1987 drbd_flush_workqueue(mdev); 1987 drbd_flush_workqueue(mdev);
1988 1988
1989 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED); 1989 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
@@ -2026,7 +2026,7 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re
2026 * resync just being finished, wait for it before requesting a new resync. 2026 * resync just being finished, wait for it before requesting a new resync.
2027 * Also wait for it's after_state_ch(). */ 2027 * Also wait for it's after_state_ch(). */
2028 drbd_suspend_io(mdev); 2028 drbd_suspend_io(mdev);
2029 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); 2029 wait_event(mdev->misc_wait, !drbd_test_flag(mdev, BITMAP_IO));
2030 drbd_flush_workqueue(mdev); 2030 drbd_flush_workqueue(mdev);
2031 2031
2032 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED); 2032 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
@@ -2094,9 +2094,9 @@ static int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
2094static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 2094static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2095 struct drbd_nl_cfg_reply *reply) 2095 struct drbd_nl_cfg_reply *reply)
2096{ 2096{
2097 if (test_bit(NEW_CUR_UUID, &mdev->flags)) { 2097 if (drbd_test_flag(mdev, NEW_CUR_UUID)) {
2098 drbd_uuid_new_current(mdev); 2098 drbd_uuid_new_current(mdev);
2099 clear_bit(NEW_CUR_UUID, &mdev->flags); 2099 drbd_clear_flag(mdev, NEW_CUR_UUID);
2100 } 2100 }
2101 drbd_suspend_io(mdev); 2101 drbd_suspend_io(mdev);
2102 reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0)); 2102 reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
@@ -2199,7 +2199,7 @@ static int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_r
2199 tl = reply->tag_list; 2199 tl = reply->tag_list;
2200 2200
2201 rv = mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED : 2201 rv = mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
2202 test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_DEGRADED : UT_DEFAULT; 2202 drbd_test_flag(mdev, USE_DEGR_WFC_T) ? UT_DEGRADED : UT_DEFAULT;
2203 2203
2204 tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv)); 2204 tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv));
2205 put_unaligned(TT_END, tl++); /* Close the tag list */ 2205 put_unaligned(TT_END, tl++); /* Close the tag list */
@@ -2224,7 +2224,7 @@ static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
2224 /* If there is still bitmap IO pending, e.g. previous resync or verify 2224 /* If there is still bitmap IO pending, e.g. previous resync or verify
2225 * just being finished, wait for it before requesting a new resync. */ 2225 * just being finished, wait for it before requesting a new resync. */
2226 drbd_suspend_io(mdev); 2226 drbd_suspend_io(mdev);
2227 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); 2227 wait_event(mdev->misc_wait, !drbd_test_flag(mdev, BITMAP_IO));
2228 2228
2229 /* w_make_ov_request expects start position to be aligned */ 2229 /* w_make_ov_request expects start position to be aligned */
2230 mdev->ov_start_sector = args.start_sector & ~(BM_SECT_PER_BIT-1); 2230 mdev->ov_start_sector = args.start_sector & ~(BM_SECT_PER_BIT-1);
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index a5a453b4355f..662bc8ef830a 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -270,7 +270,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
270 mdev->state.peer_isp ? 'p' : '-', 270 mdev->state.peer_isp ? 'p' : '-',
271 mdev->state.user_isp ? 'u' : '-', 271 mdev->state.user_isp ? 'u' : '-',
272 mdev->congestion_reason ?: '-', 272 mdev->congestion_reason ?: '-',
273 test_bit(AL_SUSPENDED, &mdev->flags) ? 's' : '-', 273 drbd_test_flag(mdev, AL_SUSPENDED) ? 's' : '-',
274 mdev->send_cnt/2, 274 mdev->send_cnt/2,
275 mdev->recv_cnt/2, 275 mdev->recv_cnt/2,
276 mdev->writ_cnt/2, 276 mdev->writ_cnt/2,
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index c44eaa0ee0a9..eb0cafea1423 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -525,7 +525,7 @@ static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
525 else if (rv != -ERESTARTSYS) 525 else if (rv != -ERESTARTSYS)
526 dev_err(DEV, "sock_recvmsg returned %d\n", rv); 526 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
527 } else if (rv == 0) { 527 } else if (rv == 0) {
528 if (test_bit(DISCONNECT_SENT, &mdev->flags)) { 528 if (drbd_test_flag(mdev, DISCONNECT_SENT)) {
529 long t; /* time_left */ 529 long t; /* time_left */
530 t = wait_event_timeout(mdev->state_wait, mdev->state.conn < C_CONNECTED, 530 t = wait_event_timeout(mdev->state_wait, mdev->state.conn < C_CONNECTED,
531 mdev->net_conf->ping_timeo * HZ/10); 531 mdev->net_conf->ping_timeo * HZ/10);
@@ -749,7 +749,7 @@ static int drbd_connect(struct drbd_conf *mdev)
749 749
750 D_ASSERT(!mdev->data.socket); 750 D_ASSERT(!mdev->data.socket);
751 751
752 clear_bit(DISCONNECT_SENT, &mdev->flags); 752 drbd_clear_flag(mdev, DISCONNECT_SENT);
753 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS) 753 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
754 return -2; 754 return -2;
755 755
@@ -772,7 +772,7 @@ static int drbd_connect(struct drbd_conf *mdev)
772 sock = s; 772 sock = s;
773 s = NULL; 773 s = NULL;
774 } else if (!msock) { 774 } else if (!msock) {
775 clear_bit(DISCARD_CONCURRENT, &mdev->flags); 775 drbd_clear_flag(mdev, DISCARD_CONCURRENT);
776 drbd_send_fp(mdev, s, P_HAND_SHAKE_M); 776 drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
777 msock = s; 777 msock = s;
778 s = NULL; 778 s = NULL;
@@ -810,7 +810,7 @@ retry:
810 sock_release(msock); 810 sock_release(msock);
811 } 811 }
812 msock = s; 812 msock = s;
813 set_bit(DISCARD_CONCURRENT, &mdev->flags); 813 drbd_set_flag(mdev, DISCARD_CONCURRENT);
814 break; 814 break;
815 default: 815 default:
816 dev_warn(DEV, "Error receiving initial packet\n"); 816 dev_warn(DEV, "Error receiving initial packet\n");
@@ -892,18 +892,18 @@ retry:
892 892
893 if (drbd_send_protocol(mdev) == -1) 893 if (drbd_send_protocol(mdev) == -1)
894 return -1; 894 return -1;
895 set_bit(STATE_SENT, &mdev->flags); 895 drbd_set_flag(mdev, STATE_SENT);
896 drbd_send_sync_param(mdev, &mdev->sync_conf); 896 drbd_send_sync_param(mdev, &mdev->sync_conf);
897 drbd_send_sizes(mdev, 0, 0); 897 drbd_send_sizes(mdev, 0, 0);
898 drbd_send_uuids(mdev); 898 drbd_send_uuids(mdev);
899 drbd_send_current_state(mdev); 899 drbd_send_current_state(mdev);
900 clear_bit(USE_DEGR_WFC_T, &mdev->flags); 900 drbd_clear_flag(mdev, USE_DEGR_WFC_T);
901 clear_bit(RESIZE_PENDING, &mdev->flags); 901 drbd_clear_flag(mdev, RESIZE_PENDING);
902 902
903 spin_lock_irq(&mdev->req_lock); 903 spin_lock_irq(&mdev->req_lock);
904 rv = _drbd_set_state(_NS(mdev, conn, C_WF_REPORT_PARAMS), CS_VERBOSE, NULL); 904 rv = _drbd_set_state(_NS(mdev, conn, C_WF_REPORT_PARAMS), CS_VERBOSE, NULL);
905 if (mdev->state.conn != C_WF_REPORT_PARAMS) 905 if (mdev->state.conn != C_WF_REPORT_PARAMS)
906 clear_bit(STATE_SENT, &mdev->flags); 906 drbd_clear_flag(mdev, STATE_SENT);
907 spin_unlock_irq(&mdev->req_lock); 907 spin_unlock_irq(&mdev->req_lock);
908 908
909 if (rv < SS_SUCCESS) 909 if (rv < SS_SUCCESS)
@@ -1732,7 +1732,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1732 /* don't get the req_lock yet, 1732 /* don't get the req_lock yet,
1733 * we may sleep in drbd_wait_peer_seq */ 1733 * we may sleep in drbd_wait_peer_seq */
1734 const int size = e->size; 1734 const int size = e->size;
1735 const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags); 1735 const int discard = drbd_test_flag(mdev, DISCARD_CONCURRENT);
1736 DEFINE_WAIT(wait); 1736 DEFINE_WAIT(wait);
1737 struct drbd_request *i; 1737 struct drbd_request *i;
1738 struct hlist_node *n; 1738 struct hlist_node *n;
@@ -2200,7 +2200,7 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2200 "Using discard-least-changes instead\n"); 2200 "Using discard-least-changes instead\n");
2201 case ASB_DISCARD_ZERO_CHG: 2201 case ASB_DISCARD_ZERO_CHG:
2202 if (ch_peer == 0 && ch_self == 0) { 2202 if (ch_peer == 0 && ch_self == 0) {
2203 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags) 2203 rv = drbd_test_flag(mdev, DISCARD_CONCURRENT)
2204 ? -1 : 1; 2204 ? -1 : 1;
2205 break; 2205 break;
2206 } else { 2206 } else {
@@ -2216,7 +2216,7 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2216 rv = 1; 2216 rv = 1;
2217 else /* ( ch_self == ch_peer ) */ 2217 else /* ( ch_self == ch_peer ) */
2218 /* Well, then use something else. */ 2218 /* Well, then use something else. */
2219 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags) 2219 rv = drbd_test_flag(mdev, DISCARD_CONCURRENT)
2220 ? -1 : 1; 2220 ? -1 : 1;
2221 break; 2221 break;
2222 case ASB_DISCARD_LOCAL: 2222 case ASB_DISCARD_LOCAL:
@@ -2420,7 +2420,7 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
2420 } 2420 }
2421 2421
2422 /* Common power [off|failure] */ 2422 /* Common power [off|failure] */
2423 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) + 2423 rct = (drbd_test_flag(mdev, CRASHED_PRIMARY) ? 1 : 0) +
2424 (mdev->p_uuid[UI_FLAGS] & 2); 2424 (mdev->p_uuid[UI_FLAGS] & 2);
2425 /* lowest bit is set when we were primary, 2425 /* lowest bit is set when we were primary,
2426 * next bit (weight 2) is set when peer was primary */ 2426 * next bit (weight 2) is set when peer was primary */
@@ -2431,7 +2431,7 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
2431 case 1: /* self_pri && !peer_pri */ return 1; 2431 case 1: /* self_pri && !peer_pri */ return 1;
2432 case 2: /* !self_pri && peer_pri */ return -1; 2432 case 2: /* !self_pri && peer_pri */ return -1;
2433 case 3: /* self_pri && peer_pri */ 2433 case 3: /* self_pri && peer_pri */
2434 dc = test_bit(DISCARD_CONCURRENT, &mdev->flags); 2434 dc = drbd_test_flag(mdev, DISCARD_CONCURRENT);
2435 return dc ? -1 : 1; 2435 return dc ? -1 : 1;
2436 } 2436 }
2437 } 2437 }
@@ -2648,7 +2648,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
2648 } 2648 }
2649 } 2649 }
2650 2650
2651 if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) { 2651 if (mdev->net_conf->dry_run || drbd_test_flag(mdev, CONN_DRY_RUN)) {
2652 if (hg == 0) 2652 if (hg == 0)
2653 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n"); 2653 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2654 else 2654 else
@@ -2716,10 +2716,10 @@ static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsig
2716 cf = be32_to_cpu(p->conn_flags); 2716 cf = be32_to_cpu(p->conn_flags);
2717 p_want_lose = cf & CF_WANT_LOSE; 2717 p_want_lose = cf & CF_WANT_LOSE;
2718 2718
2719 clear_bit(CONN_DRY_RUN, &mdev->flags); 2719 drbd_clear_flag(mdev, CONN_DRY_RUN);
2720 2720
2721 if (cf & CF_DRY_RUN) 2721 if (cf & CF_DRY_RUN)
2722 set_bit(CONN_DRY_RUN, &mdev->flags); 2722 drbd_set_flag(mdev, CONN_DRY_RUN);
2723 2723
2724 if (p_proto != mdev->net_conf->wire_protocol) { 2724 if (p_proto != mdev->net_conf->wire_protocol) {
2725 dev_err(DEV, "incompatible communication protocols\n"); 2725 dev_err(DEV, "incompatible communication protocols\n");
@@ -3051,7 +3051,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
3051 * needs to know my new size... */ 3051 * needs to know my new size... */
3052 drbd_send_sizes(mdev, 0, ddsf); 3052 drbd_send_sizes(mdev, 0, ddsf);
3053 } 3053 }
3054 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) || 3054 if (drbd_test_and_clear_flag(mdev, RESIZE_PENDING) ||
3055 (dd == grew && mdev->state.conn == C_CONNECTED)) { 3055 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3056 if (mdev->state.pdsk >= D_INCONSISTENT && 3056 if (mdev->state.pdsk >= D_INCONSISTENT &&
3057 mdev->state.disk >= D_INCONSISTENT) { 3057 mdev->state.disk >= D_INCONSISTENT) {
@@ -3060,7 +3060,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
3060 else 3060 else
3061 resync_after_online_grow(mdev); 3061 resync_after_online_grow(mdev);
3062 } else 3062 } else
3063 set_bit(RESYNC_AFTER_NEG, &mdev->flags); 3063 drbd_set_flag(mdev, RESYNC_AFTER_NEG);
3064 } 3064 }
3065 } 3065 }
3066 3066
@@ -3121,7 +3121,7 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
3121 ongoing cluster wide state change is finished. That is important if 3121 ongoing cluster wide state change is finished. That is important if
3122 we are primary and are detaching from our disk. We need to see the 3122 we are primary and are detaching from our disk. We need to see the
3123 new disk state... */ 3123 new disk state... */
3124 wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags)); 3124 wait_event(mdev->misc_wait, !drbd_test_flag(mdev, CLUSTER_ST_CHANGE));
3125 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT) 3125 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
3126 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); 3126 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3127 3127
@@ -3170,8 +3170,8 @@ static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
3170 mask.i = be32_to_cpu(p->mask); 3170 mask.i = be32_to_cpu(p->mask);
3171 val.i = be32_to_cpu(p->val); 3171 val.i = be32_to_cpu(p->val);
3172 3172
3173 if (test_bit(DISCARD_CONCURRENT, &mdev->flags) && 3173 if (drbd_test_flag(mdev, DISCARD_CONCURRENT) &&
3174 test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) { 3174 drbd_test_flag(mdev, CLUSTER_ST_CHANGE)) {
3175 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG); 3175 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
3176 return true; 3176 return true;
3177 } 3177 }
@@ -3280,7 +3280,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
3280 os.disk == D_NEGOTIATING)); 3280 os.disk == D_NEGOTIATING));
3281 /* if we have both been inconsistent, and the peer has been 3281 /* if we have both been inconsistent, and the peer has been
3282 * forced to be UpToDate with --overwrite-data */ 3282 * forced to be UpToDate with --overwrite-data */
3283 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags); 3283 cr |= drbd_test_flag(mdev, CONSIDER_RESYNC);
3284 /* if we had been plain connected, and the admin requested to 3284 /* if we had been plain connected, and the admin requested to
3285 * start a sync by "invalidate" or "invalidate-remote" */ 3285 * start a sync by "invalidate" or "invalidate-remote" */
3286 cr |= (os.conn == C_CONNECTED && 3286 cr |= (os.conn == C_CONNECTED &&
@@ -3300,7 +3300,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
3300 peer_state.disk = D_DISKLESS; 3300 peer_state.disk = D_DISKLESS;
3301 real_peer_disk = D_DISKLESS; 3301 real_peer_disk = D_DISKLESS;
3302 } else { 3302 } else {
3303 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags)) 3303 if (drbd_test_and_clear_flag(mdev, CONN_DRY_RUN))
3304 return false; 3304 return false;
3305 D_ASSERT(os.conn == C_WF_REPORT_PARAMS); 3305 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
3306 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 3306 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
@@ -3312,7 +3312,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
3312 spin_lock_irq(&mdev->req_lock); 3312 spin_lock_irq(&mdev->req_lock);
3313 if (mdev->state.i != os.i) 3313 if (mdev->state.i != os.i)
3314 goto retry; 3314 goto retry;
3315 clear_bit(CONSIDER_RESYNC, &mdev->flags); 3315 drbd_clear_flag(mdev, CONSIDER_RESYNC);
3316 ns.peer = peer_state.role; 3316 ns.peer = peer_state.role;
3317 ns.pdsk = real_peer_disk; 3317 ns.pdsk = real_peer_disk;
3318 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp); 3318 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
@@ -3320,14 +3320,14 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
3320 ns.disk = mdev->new_state_tmp.disk; 3320 ns.disk = mdev->new_state_tmp.disk;
3321 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD); 3321 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3322 if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED && 3322 if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
3323 test_bit(NEW_CUR_UUID, &mdev->flags)) { 3323 drbd_test_flag(mdev, NEW_CUR_UUID)) {
3324 /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this 3324 /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
3325 for temporal network outages! */ 3325 for temporal network outages! */
3326 spin_unlock_irq(&mdev->req_lock); 3326 spin_unlock_irq(&mdev->req_lock);
3327 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n"); 3327 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3328 tl_clear(mdev); 3328 tl_clear(mdev);
3329 drbd_uuid_new_current(mdev); 3329 drbd_uuid_new_current(mdev);
3330 clear_bit(NEW_CUR_UUID, &mdev->flags); 3330 drbd_clear_flag(mdev, NEW_CUR_UUID);
3331 drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0)); 3331 drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
3332 return false; 3332 return false;
3333 } 3333 }
@@ -3931,7 +3931,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
3931 3931
3932 /* serialize with bitmap writeout triggered by the state change, 3932 /* serialize with bitmap writeout triggered by the state change,
3933 * if any. */ 3933 * if any. */
3934 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); 3934 wait_event(mdev->misc_wait, !drbd_test_flag(mdev, BITMAP_IO));
3935 3935
3936 /* tcp_close and release of sendpage pages can be deferred. I don't 3936 /* tcp_close and release of sendpage pages can be deferred. I don't
3937 * want to use SO_LINGER, because apparently it can be deferred for 3937 * want to use SO_LINGER, because apparently it can be deferred for
@@ -4267,9 +4267,9 @@ static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h)
4267 int retcode = be32_to_cpu(p->retcode); 4267 int retcode = be32_to_cpu(p->retcode);
4268 4268
4269 if (retcode >= SS_SUCCESS) { 4269 if (retcode >= SS_SUCCESS) {
4270 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags); 4270 drbd_set_flag(mdev, CL_ST_CHG_SUCCESS);
4271 } else { 4271 } else {
4272 set_bit(CL_ST_CHG_FAIL, &mdev->flags); 4272 drbd_set_flag(mdev, CL_ST_CHG_FAIL);
4273 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n", 4273 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4274 drbd_set_st_err_str(retcode), retcode); 4274 drbd_set_st_err_str(retcode), retcode);
4275 } 4275 }
@@ -4288,7 +4288,7 @@ static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
4288{ 4288{
4289 /* restore idle timeout */ 4289 /* restore idle timeout */
4290 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ; 4290 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
4291 if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags)) 4291 if (!drbd_test_and_set_flag(mdev, GOT_PING_ACK))
4292 wake_up(&mdev->misc_wait); 4292 wake_up(&mdev->misc_wait);
4293 4293
4294 return true; 4294 return true;
@@ -4504,7 +4504,7 @@ static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
4504 4504
4505 if (mdev->state.conn == C_AHEAD && 4505 if (mdev->state.conn == C_AHEAD &&
4506 atomic_read(&mdev->ap_in_flight) == 0 && 4506 atomic_read(&mdev->ap_in_flight) == 0 &&
4507 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) { 4507 !drbd_test_and_set_flag(mdev, AHEAD_TO_SYNC_SOURCE)) {
4508 mdev->start_resync_timer.expires = jiffies + HZ; 4508 mdev->start_resync_timer.expires = jiffies + HZ;
4509 add_timer(&mdev->start_resync_timer); 4509 add_timer(&mdev->start_resync_timer);
4510 } 4510 }
@@ -4614,7 +4614,7 @@ int drbd_asender(struct drbd_thread *thi)
4614 4614
4615 while (get_t_state(thi) == Running) { 4615 while (get_t_state(thi) == Running) {
4616 drbd_thread_current_set_cpu(mdev); 4616 drbd_thread_current_set_cpu(mdev);
4617 if (test_and_clear_bit(SEND_PING, &mdev->flags)) { 4617 if (drbd_test_and_clear_flag(mdev, SEND_PING)) {
4618 ERR_IF(!drbd_send_ping(mdev)) goto reconnect; 4618 ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
4619 mdev->meta.socket->sk->sk_rcvtimeo = 4619 mdev->meta.socket->sk->sk_rcvtimeo =
4620 mdev->net_conf->ping_timeo*HZ/10; 4620 mdev->net_conf->ping_timeo*HZ/10;
@@ -4627,12 +4627,12 @@ int drbd_asender(struct drbd_thread *thi)
4627 3 < atomic_read(&mdev->unacked_cnt)) 4627 3 < atomic_read(&mdev->unacked_cnt))
4628 drbd_tcp_cork(mdev->meta.socket); 4628 drbd_tcp_cork(mdev->meta.socket);
4629 while (1) { 4629 while (1) {
4630 clear_bit(SIGNAL_ASENDER, &mdev->flags); 4630 drbd_clear_flag(mdev, SIGNAL_ASENDER);
4631 flush_signals(current); 4631 flush_signals(current);
4632 if (!drbd_process_done_ee(mdev)) 4632 if (!drbd_process_done_ee(mdev))
4633 goto reconnect; 4633 goto reconnect;
4634 /* to avoid race with newly queued ACKs */ 4634 /* to avoid race with newly queued ACKs */
4635 set_bit(SIGNAL_ASENDER, &mdev->flags); 4635 drbd_set_flag(mdev, SIGNAL_ASENDER);
4636 spin_lock_irq(&mdev->req_lock); 4636 spin_lock_irq(&mdev->req_lock);
4637 empty = list_empty(&mdev->done_ee); 4637 empty = list_empty(&mdev->done_ee);
4638 spin_unlock_irq(&mdev->req_lock); 4638 spin_unlock_irq(&mdev->req_lock);
@@ -4652,7 +4652,7 @@ int drbd_asender(struct drbd_thread *thi)
4652 4652
4653 rv = drbd_recv_short(mdev, mdev->meta.socket, 4653 rv = drbd_recv_short(mdev, mdev->meta.socket,
4654 buf, expect-received, 0); 4654 buf, expect-received, 0);
4655 clear_bit(SIGNAL_ASENDER, &mdev->flags); 4655 drbd_clear_flag(mdev, SIGNAL_ASENDER);
4656 4656
4657 flush_signals(current); 4657 flush_signals(current);
4658 4658
@@ -4670,7 +4670,7 @@ int drbd_asender(struct drbd_thread *thi)
4670 received += rv; 4670 received += rv;
4671 buf += rv; 4671 buf += rv;
4672 } else if (rv == 0) { 4672 } else if (rv == 0) {
4673 if (test_bit(DISCONNECT_SENT, &mdev->flags)) { 4673 if (drbd_test_flag(mdev, DISCONNECT_SENT)) {
4674 long t; /* time_left */ 4674 long t; /* time_left */
4675 t = wait_event_timeout(mdev->state_wait, mdev->state.conn < C_CONNECTED, 4675 t = wait_event_timeout(mdev->state_wait, mdev->state.conn < C_CONNECTED,
4676 mdev->net_conf->ping_timeo * HZ/10); 4676 mdev->net_conf->ping_timeo * HZ/10);
@@ -4689,7 +4689,7 @@ int drbd_asender(struct drbd_thread *thi)
4689 dev_err(DEV, "PingAck did not arrive in time.\n"); 4689 dev_err(DEV, "PingAck did not arrive in time.\n");
4690 goto reconnect; 4690 goto reconnect;
4691 } 4691 }
4692 set_bit(SEND_PING, &mdev->flags); 4692 drbd_set_flag(mdev, SEND_PING);
4693 continue; 4693 continue;
4694 } else if (rv == -EINTR) { 4694 } else if (rv == -EINTR) {
4695 continue; 4695 continue;
@@ -4747,7 +4747,7 @@ disconnect:
4747 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 4747 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4748 drbd_md_sync(mdev); 4748 drbd_md_sync(mdev);
4749 } 4749 }
4750 clear_bit(SIGNAL_ASENDER, &mdev->flags); 4750 drbd_clear_flag(mdev, SIGNAL_ASENDER);
4751 4751
4752 D_ASSERT(mdev->state.conn < C_CONNECTED); 4752 D_ASSERT(mdev->state.conn < C_CONNECTED);
4753 dev_info(DEV, "asender terminated\n"); 4753 dev_info(DEV, "asender terminated\n");
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 01b2ac641c7b..9220d9f9d6cd 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -118,7 +118,7 @@ static void queue_barrier(struct drbd_conf *mdev)
118 * barrier/epoch object is added. This is the only place this bit is 118 * barrier/epoch object is added. This is the only place this bit is
119 * set. It indicates that the barrier for this epoch is already queued, 119 * set. It indicates that the barrier for this epoch is already queued,
120 * and no new epoch has been created yet. */ 120 * and no new epoch has been created yet. */
121 if (test_bit(CREATE_BARRIER, &mdev->flags)) 121 if (drbd_test_flag(mdev, CREATE_BARRIER))
122 return; 122 return;
123 123
124 b = mdev->newest_tle; 124 b = mdev->newest_tle;
@@ -129,7 +129,7 @@ static void queue_barrier(struct drbd_conf *mdev)
129 * or (on connection loss) in tl_clear. */ 129 * or (on connection loss) in tl_clear. */
130 inc_ap_pending(mdev); 130 inc_ap_pending(mdev);
131 drbd_queue_work(&mdev->data.work, &b->w); 131 drbd_queue_work(&mdev->data.work, &b->w);
132 set_bit(CREATE_BARRIER, &mdev->flags); 132 drbd_set_flag(mdev, CREATE_BARRIER);
133} 133}
134 134
135static void _about_to_complete_local_write(struct drbd_conf *mdev, 135static void _about_to_complete_local_write(struct drbd_conf *mdev,
@@ -507,7 +507,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
507 * corresponding hlist_del is in _req_may_be_done() */ 507 * corresponding hlist_del is in _req_may_be_done() */
508 hlist_add_head(&req->collision, ar_hash_slot(mdev, req->sector)); 508 hlist_add_head(&req->collision, ar_hash_slot(mdev, req->sector));
509 509
510 set_bit(UNPLUG_REMOTE, &mdev->flags); 510 drbd_set_flag(mdev, UNPLUG_REMOTE);
511 511
512 D_ASSERT(req->rq_state & RQ_NET_PENDING); 512 D_ASSERT(req->rq_state & RQ_NET_PENDING);
513 req->rq_state |= RQ_NET_QUEUED; 513 req->rq_state |= RQ_NET_QUEUED;
@@ -541,11 +541,11 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
541 /* otherwise we may lose an unplug, which may cause some remote 541 /* otherwise we may lose an unplug, which may cause some remote
542 * io-scheduler timeout to expire, increasing maximum latency, 542 * io-scheduler timeout to expire, increasing maximum latency,
543 * hurting performance. */ 543 * hurting performance. */
544 set_bit(UNPLUG_REMOTE, &mdev->flags); 544 drbd_set_flag(mdev, UNPLUG_REMOTE);
545 545
546 /* see drbd_make_request_common, 546 /* see drbd_make_request_common,
547 * just after it grabs the req_lock */ 547 * just after it grabs the req_lock */
548 D_ASSERT(test_bit(CREATE_BARRIER, &mdev->flags) == 0); 548 D_ASSERT(drbd_test_flag(mdev, CREATE_BARRIER) == 0);
549 549
550 req->epoch = mdev->newest_tle->br_number; 550 req->epoch = mdev->newest_tle->br_number;
551 551
@@ -888,7 +888,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
888 * Empty flushes don't need to go into the activity log, they can only 888 * Empty flushes don't need to go into the activity log, they can only
889 * flush data for pending writes which are already in there. */ 889 * flush data for pending writes which are already in there. */
890 if (rw == WRITE && local && size 890 if (rw == WRITE && local && size
891 && !test_bit(AL_SUSPENDED, &mdev->flags)) { 891 && !drbd_test_flag(mdev, AL_SUSPENDED)) {
892 req->rq_state |= RQ_IN_ACT_LOG; 892 req->rq_state |= RQ_IN_ACT_LOG;
893 drbd_al_begin_io(mdev, sector); 893 drbd_al_begin_io(mdev, sector);
894 } 894 }
@@ -912,7 +912,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
912 * if we lost that race, we retry. */ 912 * if we lost that race, we retry. */
913 if (rw == WRITE && (remote || send_oos) && 913 if (rw == WRITE && (remote || send_oos) &&
914 mdev->unused_spare_tle == NULL && 914 mdev->unused_spare_tle == NULL &&
915 test_bit(CREATE_BARRIER, &mdev->flags)) { 915 drbd_test_flag(mdev, CREATE_BARRIER)) {
916allocate_barrier: 916allocate_barrier:
917 b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_NOIO); 917 b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_NOIO);
918 if (!b) { 918 if (!b) {
@@ -955,7 +955,7 @@ allocate_barrier:
955 } 955 }
956 if (rw == WRITE && (remote || send_oos) && 956 if (rw == WRITE && (remote || send_oos) &&
957 mdev->unused_spare_tle == NULL && 957 mdev->unused_spare_tle == NULL &&
958 test_bit(CREATE_BARRIER, &mdev->flags)) { 958 drbd_test_flag(mdev, CREATE_BARRIER)) {
959 /* someone closed the current epoch 959 /* someone closed the current epoch
960 * while we were grabbing the spinlock */ 960 * while we were grabbing the spinlock */
961 spin_unlock_irq(&mdev->req_lock); 961 spin_unlock_irq(&mdev->req_lock);
@@ -977,12 +977,12 @@ allocate_barrier:
977 * make sure that, if this is a write request and it triggered a 977 * make sure that, if this is a write request and it triggered a
978 * barrier packet, this request is queued within the same spinlock. */ 978 * barrier packet, this request is queued within the same spinlock. */
979 if ((remote || send_oos) && mdev->unused_spare_tle && 979 if ((remote || send_oos) && mdev->unused_spare_tle &&
980 test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) { 980 drbd_test_and_clear_flag(mdev, CREATE_BARRIER)) {
981 _tl_add_barrier(mdev, mdev->unused_spare_tle); 981 _tl_add_barrier(mdev, mdev->unused_spare_tle);
982 mdev->unused_spare_tle = NULL; 982 mdev->unused_spare_tle = NULL;
983 } else { 983 } else {
984 D_ASSERT(!(remote && rw == WRITE && 984 D_ASSERT(!(remote && rw == WRITE &&
985 test_bit(CREATE_BARRIER, &mdev->flags))); 985 drbd_test_flag(mdev, CREATE_BARRIER)));
986 } 986 }
987 987
988 /* NOTE 988 /* NOTE
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 66dcb2d7eada..acb614ac9fe1 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -793,7 +793,7 @@ int w_start_resync(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
793 } 793 }
794 794
795 drbd_start_resync(mdev, C_SYNC_SOURCE); 795 drbd_start_resync(mdev, C_SYNC_SOURCE);
796 clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags); 796 drbd_clear_flag(mdev, AHEAD_TO_SYNC_SOURCE);
797 return 1; 797 return 1;
798} 798}
799 799
@@ -817,10 +817,10 @@ static int w_resync_finished(struct drbd_conf *mdev, struct drbd_work *w, int ca
817 817
818static void ping_peer(struct drbd_conf *mdev) 818static void ping_peer(struct drbd_conf *mdev)
819{ 819{
820 clear_bit(GOT_PING_ACK, &mdev->flags); 820 drbd_clear_flag(mdev, GOT_PING_ACK);
821 request_ping(mdev); 821 request_ping(mdev);
822 wait_event(mdev->misc_wait, 822 wait_event(mdev->misc_wait,
823 test_bit(GOT_PING_ACK, &mdev->flags) || mdev->state.conn < C_CONNECTED); 823 drbd_test_flag(mdev, GOT_PING_ACK) || mdev->state.conn < C_CONNECTED);
824} 824}
825 825
826int drbd_resync_finished(struct drbd_conf *mdev) 826int drbd_resync_finished(struct drbd_conf *mdev)
@@ -1749,8 +1749,8 @@ int drbd_worker(struct drbd_thread *thi)
1749 NS(conn, C_NETWORK_FAILURE)); 1749 NS(conn, C_NETWORK_FAILURE));
1750 } 1750 }
1751 } 1751 }
1752 D_ASSERT(test_bit(DEVICE_DYING, &mdev->flags)); 1752 D_ASSERT(drbd_test_flag(mdev, DEVICE_DYING));
1753 D_ASSERT(test_bit(CONFIG_PENDING, &mdev->flags)); 1753 D_ASSERT(drbd_test_flag(mdev, CONFIG_PENDING));
1754 1754
1755 spin_lock_irq(&mdev->data.work.q_lock); 1755 spin_lock_irq(&mdev->data.work.q_lock);
1756 i = 0; 1756 i = 0;
@@ -1783,8 +1783,8 @@ int drbd_worker(struct drbd_thread *thi)
1783 1783
1784 dev_info(DEV, "worker terminated\n"); 1784 dev_info(DEV, "worker terminated\n");
1785 1785
1786 clear_bit(DEVICE_DYING, &mdev->flags); 1786 drbd_clear_flag(mdev, DEVICE_DYING);
1787 clear_bit(CONFIG_PENDING, &mdev->flags); 1787 drbd_clear_flag(mdev, CONFIG_PENDING);
1788 wake_up(&mdev->state_wait); 1788 wake_up(&mdev->state_wait);
1789 1789
1790 return 0; 1790 return 0;