diff options
author | Philipp Reisner <philipp.reisner@linbit.com> | 2011-02-21 08:29:27 -0500 |
---|---|---|
committer | Philipp Reisner <philipp.reisner@linbit.com> | 2011-10-14 10:47:58 -0400 |
commit | 2f5cdd0b2cf7a4099faa7e53ba0a29ddf0ddf950 (patch) | |
tree | 0a90c3b06c455a51536743467e3146bad3507af3 /drivers/block/drbd/drbd_main.c | |
parent | 49559d87fdfe3ab33c684506c394681da6a746c9 (diff) |
drbd: Converted the transfer log from mdev to tconn
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block/drbd/drbd_main.c')
-rw-r--r-- | drivers/block/drbd/drbd_main.c | 125 |
1 files changed, 66 insertions, 59 deletions
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index f43752fb5b52..cbec5ff2cc74 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c | |||
@@ -180,7 +180,7 @@ int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins) | |||
180 | * Each &struct drbd_tl_epoch has a circular double linked list of requests | 180 | * Each &struct drbd_tl_epoch has a circular double linked list of requests |
181 | * attached. | 181 | * attached. |
182 | */ | 182 | */ |
183 | static int tl_init(struct drbd_conf *mdev) | 183 | static int tl_init(struct drbd_tconn *tconn) |
184 | { | 184 | { |
185 | struct drbd_tl_epoch *b; | 185 | struct drbd_tl_epoch *b; |
186 | 186 | ||
@@ -195,21 +195,23 @@ static int tl_init(struct drbd_conf *mdev) | |||
195 | b->n_writes = 0; | 195 | b->n_writes = 0; |
196 | b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */ | 196 | b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */ |
197 | 197 | ||
198 | mdev->tconn->oldest_tle = b; | 198 | tconn->oldest_tle = b; |
199 | mdev->tconn->newest_tle = b; | 199 | tconn->newest_tle = b; |
200 | INIT_LIST_HEAD(&mdev->tconn->out_of_sequence_requests); | 200 | INIT_LIST_HEAD(&tconn->out_of_sequence_requests); |
201 | 201 | ||
202 | return 1; | 202 | return 1; |
203 | } | 203 | } |
204 | 204 | ||
205 | static void tl_cleanup(struct drbd_conf *mdev) | 205 | static void tl_cleanup(struct drbd_tconn *tconn) |
206 | { | 206 | { |
207 | D_ASSERT(mdev->tconn->oldest_tle == mdev->tconn->newest_tle); | 207 | if (tconn->oldest_tle != tconn->newest_tle) |
208 | D_ASSERT(list_empty(&mdev->tconn->out_of_sequence_requests)); | 208 | conn_err(tconn, "ASSERT FAILED: oldest_tle == newest_tle\n"); |
209 | kfree(mdev->tconn->oldest_tle); | 209 | if (!list_empty(&tconn->out_of_sequence_requests)) |
210 | mdev->tconn->oldest_tle = NULL; | 210 | conn_err(tconn, "ASSERT FAILED: list_empty(out_of_sequence_requests)\n"); |
211 | kfree(mdev->tconn->unused_spare_tle); | 211 | kfree(tconn->oldest_tle); |
212 | mdev->tconn->unused_spare_tle = NULL; | 212 | tconn->oldest_tle = NULL; |
213 | kfree(tconn->unused_spare_tle); | ||
214 | tconn->unused_spare_tle = NULL; | ||
213 | } | 215 | } |
214 | 216 | ||
215 | /** | 217 | /** |
@@ -219,7 +221,7 @@ static void tl_cleanup(struct drbd_conf *mdev) | |||
219 | * | 221 | * |
220 | * The caller must hold the req_lock. | 222 | * The caller must hold the req_lock. |
221 | */ | 223 | */ |
222 | void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new) | 224 | void _tl_add_barrier(struct drbd_tconn *tconn, struct drbd_tl_epoch *new) |
223 | { | 225 | { |
224 | struct drbd_tl_epoch *newest_before; | 226 | struct drbd_tl_epoch *newest_before; |
225 | 227 | ||
@@ -229,13 +231,13 @@ void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new) | |||
229 | new->next = NULL; | 231 | new->next = NULL; |
230 | new->n_writes = 0; | 232 | new->n_writes = 0; |
231 | 233 | ||
232 | newest_before = mdev->tconn->newest_tle; | 234 | newest_before = tconn->newest_tle; |
233 | /* never send a barrier number == 0, because that is special-cased | 235 | /* never send a barrier number == 0, because that is special-cased |
234 | * when using TCQ for our write ordering code */ | 236 | * when using TCQ for our write ordering code */ |
235 | new->br_number = (newest_before->br_number+1) ?: 1; | 237 | new->br_number = (newest_before->br_number+1) ?: 1; |
236 | if (mdev->tconn->newest_tle != new) { | 238 | if (tconn->newest_tle != new) { |
237 | mdev->tconn->newest_tle->next = new; | 239 | tconn->newest_tle->next = new; |
238 | mdev->tconn->newest_tle = new; | 240 | tconn->newest_tle = new; |
239 | } | 241 | } |
240 | } | 242 | } |
241 | 243 | ||
@@ -249,31 +251,32 @@ void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new) | |||
249 | * &struct drbd_tl_epoch objects this function will cause a termination | 251 | * &struct drbd_tl_epoch objects this function will cause a termination |
250 | * of the connection. | 252 | * of the connection. |
251 | */ | 253 | */ |
252 | void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr, | 254 | void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr, |
253 | unsigned int set_size) | 255 | unsigned int set_size) |
254 | { | 256 | { |
257 | struct drbd_conf *mdev; | ||
255 | struct drbd_tl_epoch *b, *nob; /* next old barrier */ | 258 | struct drbd_tl_epoch *b, *nob; /* next old barrier */ |
256 | struct list_head *le, *tle; | 259 | struct list_head *le, *tle; |
257 | struct drbd_request *r; | 260 | struct drbd_request *r; |
258 | 261 | ||
259 | spin_lock_irq(&mdev->tconn->req_lock); | 262 | spin_lock_irq(&tconn->req_lock); |
260 | 263 | ||
261 | b = mdev->tconn->oldest_tle; | 264 | b = tconn->oldest_tle; |
262 | 265 | ||
263 | /* first some paranoia code */ | 266 | /* first some paranoia code */ |
264 | if (b == NULL) { | 267 | if (b == NULL) { |
265 | dev_err(DEV, "BAD! BarrierAck #%u received, but no epoch in tl!?\n", | 268 | conn_err(tconn, "BAD! BarrierAck #%u received, but no epoch in tl!?\n", |
266 | barrier_nr); | 269 | barrier_nr); |
267 | goto bail; | 270 | goto bail; |
268 | } | 271 | } |
269 | if (b->br_number != barrier_nr) { | 272 | if (b->br_number != barrier_nr) { |
270 | dev_err(DEV, "BAD! BarrierAck #%u received, expected #%u!\n", | 273 | conn_err(tconn, "BAD! BarrierAck #%u received, expected #%u!\n", |
271 | barrier_nr, b->br_number); | 274 | barrier_nr, b->br_number); |
272 | goto bail; | 275 | goto bail; |
273 | } | 276 | } |
274 | if (b->n_writes != set_size) { | 277 | if (b->n_writes != set_size) { |
275 | dev_err(DEV, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n", | 278 | conn_err(tconn, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n", |
276 | barrier_nr, set_size, b->n_writes); | 279 | barrier_nr, set_size, b->n_writes); |
277 | goto bail; | 280 | goto bail; |
278 | } | 281 | } |
279 | 282 | ||
@@ -296,28 +299,29 @@ void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr, | |||
296 | _req_mod(, BARRIER_ACKED) above. | 299 | _req_mod(, BARRIER_ACKED) above. |
297 | */ | 300 | */ |
298 | list_del_init(&b->requests); | 301 | list_del_init(&b->requests); |
302 | mdev = b->w.mdev; | ||
299 | 303 | ||
300 | nob = b->next; | 304 | nob = b->next; |
301 | if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) { | 305 | if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) { |
302 | _tl_add_barrier(mdev, b); | 306 | _tl_add_barrier(tconn, b); |
303 | if (nob) | 307 | if (nob) |
304 | mdev->tconn->oldest_tle = nob; | 308 | tconn->oldest_tle = nob; |
305 | /* if nob == NULL b was the only barrier, and becomes the new | 309 | /* if nob == NULL b was the only barrier, and becomes the new |
306 | barrier. Therefore mdev->tconn->oldest_tle points already to b */ | 310 | barrier. Therefore tconn->oldest_tle points already to b */ |
307 | } else { | 311 | } else { |
308 | D_ASSERT(nob != NULL); | 312 | D_ASSERT(nob != NULL); |
309 | mdev->tconn->oldest_tle = nob; | 313 | tconn->oldest_tle = nob; |
310 | kfree(b); | 314 | kfree(b); |
311 | } | 315 | } |
312 | 316 | ||
313 | spin_unlock_irq(&mdev->tconn->req_lock); | 317 | spin_unlock_irq(&tconn->req_lock); |
314 | dec_ap_pending(mdev); | 318 | dec_ap_pending(mdev); |
315 | 319 | ||
316 | return; | 320 | return; |
317 | 321 | ||
318 | bail: | 322 | bail: |
319 | spin_unlock_irq(&mdev->tconn->req_lock); | 323 | spin_unlock_irq(&tconn->req_lock); |
320 | drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); | 324 | conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD); |
321 | } | 325 | } |
322 | 326 | ||
323 | 327 | ||
@@ -329,15 +333,15 @@ bail: | |||
329 | * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO, | 333 | * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO, |
330 | * RESTART_FROZEN_DISK_IO. | 334 | * RESTART_FROZEN_DISK_IO. |
331 | */ | 335 | */ |
332 | void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what) | 336 | void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what) |
333 | { | 337 | { |
334 | struct drbd_tl_epoch *b, *tmp, **pn; | 338 | struct drbd_tl_epoch *b, *tmp, **pn; |
335 | struct list_head *le, *tle, carry_reads; | 339 | struct list_head *le, *tle, carry_reads; |
336 | struct drbd_request *req; | 340 | struct drbd_request *req; |
337 | int rv, n_writes, n_reads; | 341 | int rv, n_writes, n_reads; |
338 | 342 | ||
339 | b = mdev->tconn->oldest_tle; | 343 | b = tconn->oldest_tle; |
340 | pn = &mdev->tconn->oldest_tle; | 344 | pn = &tconn->oldest_tle; |
341 | while (b) { | 345 | while (b) { |
342 | n_writes = 0; | 346 | n_writes = 0; |
343 | n_reads = 0; | 347 | n_reads = 0; |
@@ -356,11 +360,11 @@ void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what) | |||
356 | b->n_writes = n_writes; | 360 | b->n_writes = n_writes; |
357 | if (b->w.cb == NULL) { | 361 | if (b->w.cb == NULL) { |
358 | b->w.cb = w_send_barrier; | 362 | b->w.cb = w_send_barrier; |
359 | inc_ap_pending(mdev); | 363 | inc_ap_pending(b->w.mdev); |
360 | set_bit(CREATE_BARRIER, &mdev->flags); | 364 | set_bit(CREATE_BARRIER, &b->w.mdev->flags); |
361 | } | 365 | } |
362 | 366 | ||
363 | drbd_queue_work(&mdev->tconn->data.work, &b->w); | 367 | drbd_queue_work(&tconn->data.work, &b->w); |
364 | } | 368 | } |
365 | pn = &b->next; | 369 | pn = &b->next; |
366 | } else { | 370 | } else { |
@@ -374,11 +378,12 @@ void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what) | |||
374 | * the newest barrier may not have been queued yet, | 378 | * the newest barrier may not have been queued yet, |
375 | * in which case w.cb is still NULL. */ | 379 | * in which case w.cb is still NULL. */ |
376 | if (b->w.cb != NULL) | 380 | if (b->w.cb != NULL) |
377 | dec_ap_pending(mdev); | 381 | dec_ap_pending(b->w.mdev); |
378 | 382 | ||
379 | if (b == mdev->tconn->newest_tle) { | 383 | if (b == tconn->newest_tle) { |
380 | /* recycle, but reinit! */ | 384 | /* recycle, but reinit! */ |
381 | D_ASSERT(tmp == NULL); | 385 | if (tmp != NULL) |
386 | conn_err(tconn, "ASSERT FAILED tmp == NULL"); | ||
382 | INIT_LIST_HEAD(&b->requests); | 387 | INIT_LIST_HEAD(&b->requests); |
383 | list_splice(&carry_reads, &b->requests); | 388 | list_splice(&carry_reads, &b->requests); |
384 | INIT_LIST_HEAD(&b->w.list); | 389 | INIT_LIST_HEAD(&b->w.list); |
@@ -406,20 +411,23 @@ void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what) | |||
406 | * by the requests on the transfer gets marked as our of sync. Called from the | 411 | * by the requests on the transfer gets marked as our of sync. Called from the |
407 | * receiver thread and the worker thread. | 412 | * receiver thread and the worker thread. |
408 | */ | 413 | */ |
409 | void tl_clear(struct drbd_conf *mdev) | 414 | void tl_clear(struct drbd_tconn *tconn) |
410 | { | 415 | { |
416 | struct drbd_conf *mdev; | ||
411 | struct list_head *le, *tle; | 417 | struct list_head *le, *tle; |
412 | struct drbd_request *r; | 418 | struct drbd_request *r; |
419 | int minor; | ||
413 | 420 | ||
414 | spin_lock_irq(&mdev->tconn->req_lock); | 421 | spin_lock_irq(&tconn->req_lock); |
415 | 422 | ||
416 | _tl_restart(mdev, CONNECTION_LOST_WHILE_PENDING); | 423 | _tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING); |
417 | 424 | ||
418 | /* we expect this list to be empty. */ | 425 | /* we expect this list to be empty. */ |
419 | D_ASSERT(list_empty(&mdev->tconn->out_of_sequence_requests)); | 426 | if (!list_empty(&tconn->out_of_sequence_requests)) |
427 | conn_err(tconn, "ASSERT FAILED list_empty(&out_of_sequence_requests)\n"); | ||
420 | 428 | ||
421 | /* but just in case, clean it up anyways! */ | 429 | /* but just in case, clean it up anyways! */ |
422 | list_for_each_safe(le, tle, &mdev->tconn->out_of_sequence_requests) { | 430 | list_for_each_safe(le, tle, &tconn->out_of_sequence_requests) { |
423 | r = list_entry(le, struct drbd_request, tl_requests); | 431 | r = list_entry(le, struct drbd_request, tl_requests); |
424 | /* It would be nice to complete outside of spinlock. | 432 | /* It would be nice to complete outside of spinlock. |
425 | * But this is easier for now. */ | 433 | * But this is easier for now. */ |
@@ -427,16 +435,17 @@ void tl_clear(struct drbd_conf *mdev) | |||
427 | } | 435 | } |
428 | 436 | ||
429 | /* ensure bit indicating barrier is required is clear */ | 437 | /* ensure bit indicating barrier is required is clear */ |
430 | clear_bit(CREATE_BARRIER, &mdev->flags); | 438 | idr_for_each_entry(&tconn->volumes, mdev, minor) |
439 | clear_bit(CREATE_BARRIER, &mdev->flags); | ||
431 | 440 | ||
432 | spin_unlock_irq(&mdev->tconn->req_lock); | 441 | spin_unlock_irq(&tconn->req_lock); |
433 | } | 442 | } |
434 | 443 | ||
435 | void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what) | 444 | void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what) |
436 | { | 445 | { |
437 | spin_lock_irq(&mdev->tconn->req_lock); | 446 | spin_lock_irq(&tconn->req_lock); |
438 | _tl_restart(mdev, what); | 447 | _tl_restart(tconn, what); |
439 | spin_unlock_irq(&mdev->tconn->req_lock); | 448 | spin_unlock_irq(&tconn->req_lock); |
440 | } | 449 | } |
441 | 450 | ||
442 | static int drbd_thread_setup(void *arg) | 451 | static int drbd_thread_setup(void *arg) |
@@ -2199,6 +2208,9 @@ struct drbd_tconn *drbd_new_tconn(char *name) | |||
2199 | if (!tconn->name) | 2208 | if (!tconn->name) |
2200 | goto fail; | 2209 | goto fail; |
2201 | 2210 | ||
2211 | if (!tl_init(tconn)) | ||
2212 | goto fail; | ||
2213 | |||
2202 | tconn->cstate = C_STANDALONE; | 2214 | tconn->cstate = C_STANDALONE; |
2203 | mutex_init(&tconn->cstate_mutex); | 2215 | mutex_init(&tconn->cstate_mutex); |
2204 | spin_lock_init(&tconn->req_lock); | 2216 | spin_lock_init(&tconn->req_lock); |
@@ -2224,6 +2236,7 @@ struct drbd_tconn *drbd_new_tconn(char *name) | |||
2224 | return tconn; | 2236 | return tconn; |
2225 | 2237 | ||
2226 | fail: | 2238 | fail: |
2239 | tl_cleanup(tconn); | ||
2227 | kfree(tconn->name); | 2240 | kfree(tconn->name); |
2228 | kfree(tconn); | 2241 | kfree(tconn); |
2229 | 2242 | ||
@@ -2316,9 +2329,6 @@ struct drbd_conf *drbd_new_device(unsigned int minor) | |||
2316 | 2329 | ||
2317 | if (drbd_bm_init(mdev)) | 2330 | if (drbd_bm_init(mdev)) |
2318 | goto out_no_bitmap; | 2331 | goto out_no_bitmap; |
2319 | /* no need to lock access, we are still initializing this minor device. */ | ||
2320 | if (!tl_init(mdev)) | ||
2321 | goto out_no_tl; | ||
2322 | mdev->read_requests = RB_ROOT; | 2332 | mdev->read_requests = RB_ROOT; |
2323 | mdev->write_requests = RB_ROOT; | 2333 | mdev->write_requests = RB_ROOT; |
2324 | 2334 | ||
@@ -2334,8 +2344,6 @@ struct drbd_conf *drbd_new_device(unsigned int minor) | |||
2334 | /* out_whatever_else: | 2344 | /* out_whatever_else: |
2335 | kfree(mdev->current_epoch); */ | 2345 | kfree(mdev->current_epoch); */ |
2336 | out_no_epoch: | 2346 | out_no_epoch: |
2337 | tl_cleanup(mdev); | ||
2338 | out_no_tl: | ||
2339 | drbd_bm_cleanup(mdev); | 2347 | drbd_bm_cleanup(mdev); |
2340 | out_no_bitmap: | 2348 | out_no_bitmap: |
2341 | __free_page(mdev->md_io_page); | 2349 | __free_page(mdev->md_io_page); |
@@ -2357,7 +2365,6 @@ out_no_tconn: | |||
2357 | void drbd_free_mdev(struct drbd_conf *mdev) | 2365 | void drbd_free_mdev(struct drbd_conf *mdev) |
2358 | { | 2366 | { |
2359 | kfree(mdev->current_epoch); | 2367 | kfree(mdev->current_epoch); |
2360 | tl_cleanup(mdev); | ||
2361 | if (mdev->bitmap) /* should no longer be there. */ | 2368 | if (mdev->bitmap) /* should no longer be there. */ |
2362 | drbd_bm_cleanup(mdev); | 2369 | drbd_bm_cleanup(mdev); |
2363 | __free_page(mdev->md_io_page); | 2370 | __free_page(mdev->md_io_page); |