aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/drbd/drbd_state.c
diff options
context:
space:
mode:
authorAndreas Gruenbacher <agruen@linbit.com>2011-07-07 08:19:42 -0400
committerPhilipp Reisner <philipp.reisner@linbit.com>2014-02-17 10:46:46 -0500
commit0500813fe0c9a617ace86d91344e36839050dad6 (patch)
treee866ddce790b671cea8dd2034a3de6f08d50f1ff /drivers/block/drbd/drbd_state.c
parent3ab706fe52a5cc12b021d7861943581db766a171 (diff)
drbd: Move conf_mutex from connection to resource
Signed-off-by: Andreas Gruenbacher <agruen@linbit.com> Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
Diffstat (limited to 'drivers/block/drbd/drbd_state.c')
-rw-r--r--drivers/block/drbd/drbd_state.c48
1 files changed, 24 insertions, 24 deletions
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index 79d0ea26f373..10c89314ff2b 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -250,10 +250,10 @@ drbd_change_state(struct drbd_device *device, enum chg_state_flags f,
250 union drbd_state ns; 250 union drbd_state ns;
251 enum drbd_state_rv rv; 251 enum drbd_state_rv rv;
252 252
253 spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags); 253 spin_lock_irqsave(&device->resource->req_lock, flags);
254 ns = apply_mask_val(drbd_read_state(device), mask, val); 254 ns = apply_mask_val(drbd_read_state(device), mask, val);
255 rv = _drbd_set_state(device, ns, f, NULL); 255 rv = _drbd_set_state(device, ns, f, NULL);
256 spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags); 256 spin_unlock_irqrestore(&device->resource->req_lock, flags);
257 257
258 return rv; 258 return rv;
259} 259}
@@ -284,7 +284,7 @@ _req_st_cond(struct drbd_device *device, union drbd_state mask,
284 if (test_and_clear_bit(CL_ST_CHG_FAIL, &device->flags)) 284 if (test_and_clear_bit(CL_ST_CHG_FAIL, &device->flags))
285 return SS_CW_FAILED_BY_PEER; 285 return SS_CW_FAILED_BY_PEER;
286 286
287 spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags); 287 spin_lock_irqsave(&device->resource->req_lock, flags);
288 os = drbd_read_state(device); 288 os = drbd_read_state(device);
289 ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL); 289 ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL);
290 rv = is_valid_transition(os, ns); 290 rv = is_valid_transition(os, ns);
@@ -301,7 +301,7 @@ _req_st_cond(struct drbd_device *device, union drbd_state mask,
301 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */ 301 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
302 } 302 }
303 } 303 }
304 spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags); 304 spin_unlock_irqrestore(&device->resource->req_lock, flags);
305 305
306 return rv; 306 return rv;
307} 307}
@@ -330,12 +330,12 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask,
330 if (f & CS_SERIALIZE) 330 if (f & CS_SERIALIZE)
331 mutex_lock(device->state_mutex); 331 mutex_lock(device->state_mutex);
332 332
333 spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags); 333 spin_lock_irqsave(&device->resource->req_lock, flags);
334 os = drbd_read_state(device); 334 os = drbd_read_state(device);
335 ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL); 335 ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL);
336 rv = is_valid_transition(os, ns); 336 rv = is_valid_transition(os, ns);
337 if (rv < SS_SUCCESS) { 337 if (rv < SS_SUCCESS) {
338 spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags); 338 spin_unlock_irqrestore(&device->resource->req_lock, flags);
339 goto abort; 339 goto abort;
340 } 340 }
341 341
@@ -343,7 +343,7 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask,
343 rv = is_valid_state(device, ns); 343 rv = is_valid_state(device, ns);
344 if (rv == SS_SUCCESS) 344 if (rv == SS_SUCCESS)
345 rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection); 345 rv = is_valid_soft_transition(os, ns, first_peer_device(device)->connection);
346 spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags); 346 spin_unlock_irqrestore(&device->resource->req_lock, flags);
347 347
348 if (rv < SS_SUCCESS) { 348 if (rv < SS_SUCCESS) {
349 if (f & CS_VERBOSE) 349 if (f & CS_VERBOSE)
@@ -366,14 +366,14 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask,
366 print_st_err(device, os, ns, rv); 366 print_st_err(device, os, ns, rv);
367 goto abort; 367 goto abort;
368 } 368 }
369 spin_lock_irqsave(&first_peer_device(device)->connection->req_lock, flags); 369 spin_lock_irqsave(&device->resource->req_lock, flags);
370 ns = apply_mask_val(drbd_read_state(device), mask, val); 370 ns = apply_mask_val(drbd_read_state(device), mask, val);
371 rv = _drbd_set_state(device, ns, f, &done); 371 rv = _drbd_set_state(device, ns, f, &done);
372 } else { 372 } else {
373 rv = _drbd_set_state(device, ns, f, &done); 373 rv = _drbd_set_state(device, ns, f, &done);
374 } 374 }
375 375
376 spin_unlock_irqrestore(&first_peer_device(device)->connection->req_lock, flags); 376 spin_unlock_irqrestore(&device->resource->req_lock, flags);
377 377
378 if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) { 378 if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
379 D_ASSERT(device, current != first_peer_device(device)->connection->worker.task); 379 D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
@@ -1245,7 +1245,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
1245 struct drbd_connection *connection = first_peer_device(device)->connection; 1245 struct drbd_connection *connection = first_peer_device(device)->connection;
1246 enum drbd_req_event what = NOTHING; 1246 enum drbd_req_event what = NOTHING;
1247 1247
1248 spin_lock_irq(&connection->req_lock); 1248 spin_lock_irq(&device->resource->req_lock);
1249 if (os.conn < C_CONNECTED && conn_lowest_conn(connection) >= C_CONNECTED) 1249 if (os.conn < C_CONNECTED && conn_lowest_conn(connection) >= C_CONNECTED)
1250 what = RESEND; 1250 what = RESEND;
1251 1251
@@ -1260,13 +1260,13 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
1260 (union drbd_state) { { .susp_nod = 0 } }, 1260 (union drbd_state) { { .susp_nod = 0 } },
1261 CS_VERBOSE); 1261 CS_VERBOSE);
1262 } 1262 }
1263 spin_unlock_irq(&connection->req_lock); 1263 spin_unlock_irq(&device->resource->req_lock);
1264 } 1264 }
1265 1265
1266 if (ns.susp_fen) { 1266 if (ns.susp_fen) {
1267 struct drbd_connection *connection = first_peer_device(device)->connection; 1267 struct drbd_connection *connection = first_peer_device(device)->connection;
1268 1268
1269 spin_lock_irq(&connection->req_lock); 1269 spin_lock_irq(&device->resource->req_lock);
1270 if (connection->susp_fen && conn_lowest_conn(connection) >= C_CONNECTED) { 1270 if (connection->susp_fen && conn_lowest_conn(connection) >= C_CONNECTED) {
1271 /* case2: The connection was established again: */ 1271 /* case2: The connection was established again: */
1272 struct drbd_peer_device *peer_device; 1272 struct drbd_peer_device *peer_device;
@@ -1282,7 +1282,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
1282 (union drbd_state) { { .susp_fen = 0 } }, 1282 (union drbd_state) { { .susp_fen = 0 } },
1283 CS_VERBOSE); 1283 CS_VERBOSE);
1284 } 1284 }
1285 spin_unlock_irq(&connection->req_lock); 1285 spin_unlock_irq(&device->resource->req_lock);
1286 } 1286 }
1287 1287
1288 /* Became sync source. With protocol >= 96, we still need to send out 1288 /* Became sync source. With protocol >= 96, we still need to send out
@@ -1555,13 +1555,13 @@ static int w_after_conn_state_ch(struct drbd_work *w, int unused)
1555 if (oc == C_DISCONNECTING && ns_max.conn == C_STANDALONE) { 1555 if (oc == C_DISCONNECTING && ns_max.conn == C_STANDALONE) {
1556 struct net_conf *old_conf; 1556 struct net_conf *old_conf;
1557 1557
1558 mutex_lock(&connection->conf_update); 1558 mutex_lock(&connection->resource->conf_update);
1559 old_conf = connection->net_conf; 1559 old_conf = connection->net_conf;
1560 connection->my_addr_len = 0; 1560 connection->my_addr_len = 0;
1561 connection->peer_addr_len = 0; 1561 connection->peer_addr_len = 0;
1562 rcu_assign_pointer(connection->net_conf, NULL); 1562 rcu_assign_pointer(connection->net_conf, NULL);
1563 conn_free_crypto(connection); 1563 conn_free_crypto(connection);
1564 mutex_unlock(&connection->conf_update); 1564 mutex_unlock(&connection->resource->conf_update);
1565 1565
1566 synchronize_rcu(); 1566 synchronize_rcu();
1567 kfree(old_conf); 1567 kfree(old_conf);
@@ -1579,13 +1579,13 @@ static int w_after_conn_state_ch(struct drbd_work *w, int unused)
1579 } 1579 }
1580 } 1580 }
1581 rcu_read_unlock(); 1581 rcu_read_unlock();
1582 spin_lock_irq(&connection->req_lock); 1582 spin_lock_irq(&connection->resource->req_lock);
1583 _tl_restart(connection, CONNECTION_LOST_WHILE_PENDING); 1583 _tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
1584 _conn_request_state(connection, 1584 _conn_request_state(connection,
1585 (union drbd_state) { { .susp_fen = 1 } }, 1585 (union drbd_state) { { .susp_fen = 1 } },
1586 (union drbd_state) { { .susp_fen = 0 } }, 1586 (union drbd_state) { { .susp_fen = 0 } },
1587 CS_VERBOSE); 1587 CS_VERBOSE);
1588 spin_unlock_irq(&connection->req_lock); 1588 spin_unlock_irq(&connection->resource->req_lock);
1589 } 1589 }
1590 } 1590 }
1591 kref_put(&connection->kref, drbd_destroy_connection); 1591 kref_put(&connection->kref, drbd_destroy_connection);
@@ -1802,7 +1802,7 @@ _conn_request_state(struct drbd_connection *connection, union drbd_state mask, u
1802 /* This will be a cluster-wide state change. 1802 /* This will be a cluster-wide state change.
1803 * Need to give up the spinlock, grab the mutex, 1803 * Need to give up the spinlock, grab the mutex,
1804 * then send the state change request, ... */ 1804 * then send the state change request, ... */
1805 spin_unlock_irq(&connection->req_lock); 1805 spin_unlock_irq(&connection->resource->req_lock);
1806 mutex_lock(&connection->cstate_mutex); 1806 mutex_lock(&connection->cstate_mutex);
1807 have_mutex = true; 1807 have_mutex = true;
1808 1808
@@ -1821,10 +1821,10 @@ _conn_request_state(struct drbd_connection *connection, union drbd_state mask, u
1821 /* ... and re-aquire the spinlock. 1821 /* ... and re-aquire the spinlock.
1822 * If _conn_rq_cond() returned >= SS_SUCCESS, we must call 1822 * If _conn_rq_cond() returned >= SS_SUCCESS, we must call
1823 * conn_set_state() within the same spinlock. */ 1823 * conn_set_state() within the same spinlock. */
1824 spin_lock_irq(&connection->req_lock); 1824 spin_lock_irq(&connection->resource->req_lock);
1825 wait_event_lock_irq(connection->ping_wait, 1825 wait_event_lock_irq(connection->ping_wait,
1826 (rv = _conn_rq_cond(connection, mask, val)), 1826 (rv = _conn_rq_cond(connection, mask, val)),
1827 connection->req_lock); 1827 connection->resource->req_lock);
1828 clear_bit(CONN_WD_ST_CHG_REQ, &connection->flags); 1828 clear_bit(CONN_WD_ST_CHG_REQ, &connection->flags);
1829 if (rv < SS_SUCCESS) 1829 if (rv < SS_SUCCESS)
1830 goto abort; 1830 goto abort;
@@ -1853,10 +1853,10 @@ _conn_request_state(struct drbd_connection *connection, union drbd_state mask, u
1853 if (have_mutex) { 1853 if (have_mutex) {
1854 /* mutex_unlock() "... must not be used in interrupt context.", 1854 /* mutex_unlock() "... must not be used in interrupt context.",
1855 * so give up the spinlock, then re-aquire it */ 1855 * so give up the spinlock, then re-aquire it */
1856 spin_unlock_irq(&connection->req_lock); 1856 spin_unlock_irq(&connection->resource->req_lock);
1857 abort_unlocked: 1857 abort_unlocked:
1858 mutex_unlock(&connection->cstate_mutex); 1858 mutex_unlock(&connection->cstate_mutex);
1859 spin_lock_irq(&connection->req_lock); 1859 spin_lock_irq(&connection->resource->req_lock);
1860 } 1860 }
1861 if (rv < SS_SUCCESS && flags & CS_VERBOSE) { 1861 if (rv < SS_SUCCESS && flags & CS_VERBOSE) {
1862 drbd_err(connection, "State change failed: %s\n", drbd_set_st_err_str(rv)); 1862 drbd_err(connection, "State change failed: %s\n", drbd_set_st_err_str(rv));
@@ -1872,9 +1872,9 @@ conn_request_state(struct drbd_connection *connection, union drbd_state mask, un
1872{ 1872{
1873 enum drbd_state_rv rv; 1873 enum drbd_state_rv rv;
1874 1874
1875 spin_lock_irq(&connection->req_lock); 1875 spin_lock_irq(&connection->resource->req_lock);
1876 rv = _conn_request_state(connection, mask, val, flags); 1876 rv = _conn_request_state(connection, mask, val, flags);
1877 spin_unlock_irq(&connection->req_lock); 1877 spin_unlock_irq(&connection->resource->req_lock);
1878 1878
1879 return rv; 1879 return rv;
1880} 1880}