diff options
Diffstat (limited to 'fs/gfs2/glock.c')
| -rw-r--r-- | fs/gfs2/glock.c | 1643 |
1 files changed, 665 insertions, 978 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index d636b3e80f5d..13391e546616 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
| @@ -45,21 +45,19 @@ struct gfs2_gl_hash_bucket { | |||
| 45 | struct hlist_head hb_list; | 45 | struct hlist_head hb_list; |
| 46 | }; | 46 | }; |
| 47 | 47 | ||
| 48 | struct glock_iter { | 48 | struct gfs2_glock_iter { |
| 49 | int hash; /* hash bucket index */ | 49 | int hash; /* hash bucket index */ |
| 50 | struct gfs2_sbd *sdp; /* incore superblock */ | 50 | struct gfs2_sbd *sdp; /* incore superblock */ |
| 51 | struct gfs2_glock *gl; /* current glock struct */ | 51 | struct gfs2_glock *gl; /* current glock struct */ |
| 52 | struct seq_file *seq; /* sequence file for debugfs */ | 52 | char string[512]; /* scratch space */ |
| 53 | char string[512]; /* scratch space */ | ||
| 54 | }; | 53 | }; |
| 55 | 54 | ||
| 56 | typedef void (*glock_examiner) (struct gfs2_glock * gl); | 55 | typedef void (*glock_examiner) (struct gfs2_glock * gl); |
| 57 | 56 | ||
| 58 | static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); | 57 | static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); |
| 59 | static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl); | 58 | static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl); |
| 60 | static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh); | 59 | #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0) |
| 61 | static void gfs2_glock_drop_th(struct gfs2_glock *gl); | 60 | static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target); |
| 62 | static void run_queue(struct gfs2_glock *gl); | ||
| 63 | 61 | ||
| 64 | static DECLARE_RWSEM(gfs2_umount_flush_sem); | 62 | static DECLARE_RWSEM(gfs2_umount_flush_sem); |
| 65 | static struct dentry *gfs2_root; | 63 | static struct dentry *gfs2_root; |
| @@ -123,33 +121,6 @@ static inline rwlock_t *gl_lock_addr(unsigned int x) | |||
| 123 | #endif | 121 | #endif |
| 124 | 122 | ||
| 125 | /** | 123 | /** |
| 126 | * relaxed_state_ok - is a requested lock compatible with the current lock mode? | ||
| 127 | * @actual: the current state of the lock | ||
| 128 | * @requested: the lock state that was requested by the caller | ||
| 129 | * @flags: the modifier flags passed in by the caller | ||
| 130 | * | ||
| 131 | * Returns: 1 if the locks are compatible, 0 otherwise | ||
| 132 | */ | ||
| 133 | |||
| 134 | static inline int relaxed_state_ok(unsigned int actual, unsigned requested, | ||
| 135 | int flags) | ||
| 136 | { | ||
| 137 | if (actual == requested) | ||
| 138 | return 1; | ||
| 139 | |||
| 140 | if (flags & GL_EXACT) | ||
| 141 | return 0; | ||
| 142 | |||
| 143 | if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED) | ||
| 144 | return 1; | ||
| 145 | |||
| 146 | if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY)) | ||
| 147 | return 1; | ||
| 148 | |||
| 149 | return 0; | ||
| 150 | } | ||
| 151 | |||
| 152 | /** | ||
| 153 | * gl_hash() - Turn glock number into hash bucket number | 124 | * gl_hash() - Turn glock number into hash bucket number |
| 154 | * @lock: The glock number | 125 | * @lock: The glock number |
| 155 | * | 126 | * |
| @@ -182,7 +153,7 @@ static void glock_free(struct gfs2_glock *gl) | |||
| 182 | struct gfs2_sbd *sdp = gl->gl_sbd; | 153 | struct gfs2_sbd *sdp = gl->gl_sbd; |
| 183 | struct inode *aspace = gl->gl_aspace; | 154 | struct inode *aspace = gl->gl_aspace; |
| 184 | 155 | ||
| 185 | if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) | 156 | if (sdp->sd_lockstruct.ls_ops->lm_put_lock) |
| 186 | sdp->sd_lockstruct.ls_ops->lm_put_lock(gl->gl_lock); | 157 | sdp->sd_lockstruct.ls_ops->lm_put_lock(gl->gl_lock); |
| 187 | 158 | ||
| 188 | if (aspace) | 159 | if (aspace) |
| @@ -211,17 +182,14 @@ static void gfs2_glock_hold(struct gfs2_glock *gl) | |||
| 211 | int gfs2_glock_put(struct gfs2_glock *gl) | 182 | int gfs2_glock_put(struct gfs2_glock *gl) |
| 212 | { | 183 | { |
| 213 | int rv = 0; | 184 | int rv = 0; |
| 214 | struct gfs2_sbd *sdp = gl->gl_sbd; | ||
| 215 | 185 | ||
| 216 | write_lock(gl_lock_addr(gl->gl_hash)); | 186 | write_lock(gl_lock_addr(gl->gl_hash)); |
| 217 | if (atomic_dec_and_test(&gl->gl_ref)) { | 187 | if (atomic_dec_and_test(&gl->gl_ref)) { |
| 218 | hlist_del(&gl->gl_list); | 188 | hlist_del(&gl->gl_list); |
| 219 | write_unlock(gl_lock_addr(gl->gl_hash)); | 189 | write_unlock(gl_lock_addr(gl->gl_hash)); |
| 220 | gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED); | 190 | GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_UNLOCKED); |
| 221 | gfs2_assert(sdp, list_empty(&gl->gl_reclaim)); | 191 | GLOCK_BUG_ON(gl, !list_empty(&gl->gl_reclaim)); |
| 222 | gfs2_assert(sdp, list_empty(&gl->gl_holders)); | 192 | GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); |
| 223 | gfs2_assert(sdp, list_empty(&gl->gl_waiters1)); | ||
| 224 | gfs2_assert(sdp, list_empty(&gl->gl_waiters3)); | ||
| 225 | glock_free(gl); | 193 | glock_free(gl); |
| 226 | rv = 1; | 194 | rv = 1; |
| 227 | goto out; | 195 | goto out; |
| @@ -281,22 +249,401 @@ static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp, | |||
| 281 | return gl; | 249 | return gl; |
| 282 | } | 250 | } |
| 283 | 251 | ||
| 252 | /** | ||
| 253 | * may_grant - check if its ok to grant a new lock | ||
| 254 | * @gl: The glock | ||
| 255 | * @gh: The lock request which we wish to grant | ||
| 256 | * | ||
| 257 | * Returns: true if its ok to grant the lock | ||
| 258 | */ | ||
| 259 | |||
| 260 | static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh) | ||
| 261 | { | ||
| 262 | const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list); | ||
| 263 | if ((gh->gh_state == LM_ST_EXCLUSIVE || | ||
| 264 | gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head) | ||
| 265 | return 0; | ||
| 266 | if (gl->gl_state == gh->gh_state) | ||
| 267 | return 1; | ||
| 268 | if (gh->gh_flags & GL_EXACT) | ||
| 269 | return 0; | ||
| 270 | if (gl->gl_state == LM_ST_EXCLUSIVE) { | ||
| 271 | if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED) | ||
| 272 | return 1; | ||
| 273 | if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED) | ||
| 274 | return 1; | ||
| 275 | } | ||
| 276 | if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY)) | ||
| 277 | return 1; | ||
| 278 | return 0; | ||
| 279 | } | ||
| 280 | |||
| 281 | static void gfs2_holder_wake(struct gfs2_holder *gh) | ||
| 282 | { | ||
| 283 | clear_bit(HIF_WAIT, &gh->gh_iflags); | ||
| 284 | smp_mb__after_clear_bit(); | ||
| 285 | wake_up_bit(&gh->gh_iflags, HIF_WAIT); | ||
| 286 | } | ||
| 287 | |||
| 288 | /** | ||
| 289 | * do_promote - promote as many requests as possible on the current queue | ||
| 290 | * @gl: The glock | ||
| 291 | * | ||
| 292 | * Returns: true if there is a blocked holder at the head of the list | ||
| 293 | */ | ||
| 294 | |||
| 295 | static int do_promote(struct gfs2_glock *gl) | ||
| 296 | { | ||
| 297 | const struct gfs2_glock_operations *glops = gl->gl_ops; | ||
| 298 | struct gfs2_holder *gh, *tmp; | ||
| 299 | int ret; | ||
| 300 | |||
| 301 | restart: | ||
| 302 | list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { | ||
| 303 | if (test_bit(HIF_HOLDER, &gh->gh_iflags)) | ||
| 304 | continue; | ||
| 305 | if (may_grant(gl, gh)) { | ||
| 306 | if (gh->gh_list.prev == &gl->gl_holders && | ||
| 307 | glops->go_lock) { | ||
| 308 | spin_unlock(&gl->gl_spin); | ||
| 309 | /* FIXME: eliminate this eventually */ | ||
| 310 | ret = glops->go_lock(gh); | ||
| 311 | spin_lock(&gl->gl_spin); | ||
| 312 | if (ret) { | ||
| 313 | gh->gh_error = ret; | ||
| 314 | list_del_init(&gh->gh_list); | ||
| 315 | gfs2_holder_wake(gh); | ||
| 316 | goto restart; | ||
| 317 | } | ||
| 318 | set_bit(HIF_HOLDER, &gh->gh_iflags); | ||
| 319 | gfs2_holder_wake(gh); | ||
| 320 | goto restart; | ||
| 321 | } | ||
| 322 | set_bit(HIF_HOLDER, &gh->gh_iflags); | ||
| 323 | gfs2_holder_wake(gh); | ||
| 324 | continue; | ||
| 325 | } | ||
| 326 | if (gh->gh_list.prev == &gl->gl_holders) | ||
| 327 | return 1; | ||
| 328 | break; | ||
| 329 | } | ||
| 330 | return 0; | ||
| 331 | } | ||
| 332 | |||
| 333 | /** | ||
| 334 | * do_error - Something unexpected has happened during a lock request | ||
| 335 | * | ||
| 336 | */ | ||
| 337 | |||
| 338 | static inline void do_error(struct gfs2_glock *gl, const int ret) | ||
| 339 | { | ||
| 340 | struct gfs2_holder *gh, *tmp; | ||
| 341 | |||
| 342 | list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { | ||
| 343 | if (test_bit(HIF_HOLDER, &gh->gh_iflags)) | ||
| 344 | continue; | ||
| 345 | if (ret & LM_OUT_ERROR) | ||
| 346 | gh->gh_error = -EIO; | ||
| 347 | else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) | ||
| 348 | gh->gh_error = GLR_TRYFAILED; | ||
| 349 | else | ||
| 350 | continue; | ||
| 351 | list_del_init(&gh->gh_list); | ||
| 352 | gfs2_holder_wake(gh); | ||
| 353 | } | ||
| 354 | } | ||
| 355 | |||
| 356 | /** | ||
| 357 | * find_first_waiter - find the first gh that's waiting for the glock | ||
| 358 | * @gl: the glock | ||
| 359 | */ | ||
| 360 | |||
| 361 | static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl) | ||
| 362 | { | ||
| 363 | struct gfs2_holder *gh; | ||
| 364 | |||
| 365 | list_for_each_entry(gh, &gl->gl_holders, gh_list) { | ||
| 366 | if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) | ||
| 367 | return gh; | ||
| 368 | } | ||
| 369 | return NULL; | ||
| 370 | } | ||
| 371 | |||
| 372 | /** | ||
| 373 | * state_change - record that the glock is now in a different state | ||
| 374 | * @gl: the glock | ||
| 375 | * @new_state the new state | ||
| 376 | * | ||
| 377 | */ | ||
| 378 | |||
| 379 | static void state_change(struct gfs2_glock *gl, unsigned int new_state) | ||
| 380 | { | ||
| 381 | int held1, held2; | ||
| 382 | |||
| 383 | held1 = (gl->gl_state != LM_ST_UNLOCKED); | ||
| 384 | held2 = (new_state != LM_ST_UNLOCKED); | ||
| 385 | |||
| 386 | if (held1 != held2) { | ||
| 387 | if (held2) | ||
| 388 | gfs2_glock_hold(gl); | ||
| 389 | else | ||
| 390 | gfs2_glock_put(gl); | ||
| 391 | } | ||
| 392 | |||
| 393 | gl->gl_state = new_state; | ||
| 394 | gl->gl_tchange = jiffies; | ||
| 395 | } | ||
| 396 | |||
| 397 | static void gfs2_demote_wake(struct gfs2_glock *gl) | ||
| 398 | { | ||
| 399 | gl->gl_demote_state = LM_ST_EXCLUSIVE; | ||
| 400 | clear_bit(GLF_DEMOTE, &gl->gl_flags); | ||
| 401 | smp_mb__after_clear_bit(); | ||
| 402 | wake_up_bit(&gl->gl_flags, GLF_DEMOTE); | ||
| 403 | } | ||
| 404 | |||
| 405 | /** | ||
| 406 | * finish_xmote - The DLM has replied to one of our lock requests | ||
| 407 | * @gl: The glock | ||
| 408 | * @ret: The status from the DLM | ||
| 409 | * | ||
| 410 | */ | ||
| 411 | |||
| 412 | static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) | ||
| 413 | { | ||
| 414 | const struct gfs2_glock_operations *glops = gl->gl_ops; | ||
| 415 | struct gfs2_holder *gh; | ||
| 416 | unsigned state = ret & LM_OUT_ST_MASK; | ||
| 417 | |||
| 418 | spin_lock(&gl->gl_spin); | ||
| 419 | state_change(gl, state); | ||
| 420 | gh = find_first_waiter(gl); | ||
| 421 | |||
| 422 | /* Demote to UN request arrived during demote to SH or DF */ | ||
| 423 | if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) && | ||
| 424 | state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED) | ||
| 425 | gl->gl_target = LM_ST_UNLOCKED; | ||
| 426 | |||
| 427 | /* Check for state != intended state */ | ||
| 428 | if (unlikely(state != gl->gl_target)) { | ||
| 429 | if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { | ||
| 430 | /* move to back of queue and try next entry */ | ||
| 431 | if (ret & LM_OUT_CANCELED) { | ||
| 432 | if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0) | ||
| 433 | list_move_tail(&gh->gh_list, &gl->gl_holders); | ||
| 434 | gh = find_first_waiter(gl); | ||
| 435 | gl->gl_target = gh->gh_state; | ||
| 436 | goto retry; | ||
| 437 | } | ||
| 438 | /* Some error or failed "try lock" - report it */ | ||
| 439 | if ((ret & LM_OUT_ERROR) || | ||
| 440 | (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) { | ||
| 441 | gl->gl_target = gl->gl_state; | ||
| 442 | do_error(gl, ret); | ||
| 443 | goto out; | ||
| 444 | } | ||
| 445 | } | ||
| 446 | switch(state) { | ||
| 447 | /* Unlocked due to conversion deadlock, try again */ | ||
| 448 | case LM_ST_UNLOCKED: | ||
| 449 | retry: | ||
| 450 | do_xmote(gl, gh, gl->gl_target); | ||
| 451 | break; | ||
| 452 | /* Conversion fails, unlock and try again */ | ||
| 453 | case LM_ST_SHARED: | ||
| 454 | case LM_ST_DEFERRED: | ||
| 455 | do_xmote(gl, gh, LM_ST_UNLOCKED); | ||
| 456 | break; | ||
| 457 | default: /* Everything else */ | ||
| 458 | printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state); | ||
| 459 | GLOCK_BUG_ON(gl, 1); | ||
| 460 | } | ||
| 461 | spin_unlock(&gl->gl_spin); | ||
| 462 | gfs2_glock_put(gl); | ||
| 463 | return; | ||
| 464 | } | ||
| 465 | |||
| 466 | /* Fast path - we got what we asked for */ | ||
| 467 | if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) | ||
| 468 | gfs2_demote_wake(gl); | ||
| 469 | if (state != LM_ST_UNLOCKED) { | ||
| 470 | if (glops->go_xmote_bh) { | ||
| 471 | int rv; | ||
| 472 | spin_unlock(&gl->gl_spin); | ||
| 473 | rv = glops->go_xmote_bh(gl, gh); | ||
| 474 | if (rv == -EAGAIN) | ||
| 475 | return; | ||
| 476 | spin_lock(&gl->gl_spin); | ||
| 477 | if (rv) { | ||
| 478 | do_error(gl, rv); | ||
| 479 | goto out; | ||
| 480 | } | ||
| 481 | } | ||
| 482 | do_promote(gl); | ||
| 483 | } | ||
| 484 | out: | ||
| 485 | clear_bit(GLF_LOCK, &gl->gl_flags); | ||
| 486 | spin_unlock(&gl->gl_spin); | ||
| 487 | gfs2_glock_put(gl); | ||
| 488 | } | ||
| 489 | |||
| 490 | static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock, | ||
| 491 | unsigned int cur_state, unsigned int req_state, | ||
| 492 | unsigned int flags) | ||
| 493 | { | ||
| 494 | int ret = LM_OUT_ERROR; | ||
| 495 | |||
| 496 | if (!sdp->sd_lockstruct.ls_ops->lm_lock) | ||
| 497 | return req_state == LM_ST_UNLOCKED ? 0 : req_state; | ||
| 498 | |||
| 499 | if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) | ||
| 500 | ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock, cur_state, | ||
| 501 | req_state, flags); | ||
| 502 | return ret; | ||
| 503 | } | ||
| 504 | |||
| 505 | /** | ||
| 506 | * do_xmote - Calls the DLM to change the state of a lock | ||
| 507 | * @gl: The lock state | ||
| 508 | * @gh: The holder (only for promotes) | ||
| 509 | * @target: The target lock state | ||
| 510 | * | ||
| 511 | */ | ||
| 512 | |||
| 513 | static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target) | ||
| 514 | { | ||
| 515 | const struct gfs2_glock_operations *glops = gl->gl_ops; | ||
| 516 | struct gfs2_sbd *sdp = gl->gl_sbd; | ||
| 517 | unsigned int lck_flags = gh ? gh->gh_flags : 0; | ||
| 518 | int ret; | ||
| 519 | |||
| 520 | lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP | | ||
| 521 | LM_FLAG_PRIORITY); | ||
| 522 | BUG_ON(gl->gl_state == target); | ||
| 523 | BUG_ON(gl->gl_state == gl->gl_target); | ||
| 524 | if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) && | ||
| 525 | glops->go_inval) { | ||
| 526 | set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); | ||
| 527 | do_error(gl, 0); /* Fail queued try locks */ | ||
| 528 | } | ||
| 529 | spin_unlock(&gl->gl_spin); | ||
| 530 | if (glops->go_xmote_th) | ||
| 531 | glops->go_xmote_th(gl); | ||
| 532 | if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) | ||
| 533 | glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA); | ||
| 534 | clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); | ||
| 535 | |||
| 536 | gfs2_glock_hold(gl); | ||
| 537 | if (target != LM_ST_UNLOCKED && (gl->gl_state == LM_ST_SHARED || | ||
| 538 | gl->gl_state == LM_ST_DEFERRED) && | ||
| 539 | !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) | ||
| 540 | lck_flags |= LM_FLAG_TRY_1CB; | ||
| 541 | ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, target, lck_flags); | ||
| 542 | |||
| 543 | if (!(ret & LM_OUT_ASYNC)) { | ||
| 544 | finish_xmote(gl, ret); | ||
| 545 | gfs2_glock_hold(gl); | ||
| 546 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) | ||
| 547 | gfs2_glock_put(gl); | ||
| 548 | } else { | ||
| 549 | GLOCK_BUG_ON(gl, ret != LM_OUT_ASYNC); | ||
| 550 | } | ||
| 551 | spin_lock(&gl->gl_spin); | ||
| 552 | } | ||
| 553 | |||
| 554 | /** | ||
| 555 | * find_first_holder - find the first "holder" gh | ||
| 556 | * @gl: the glock | ||
| 557 | */ | ||
| 558 | |||
| 559 | static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl) | ||
| 560 | { | ||
| 561 | struct gfs2_holder *gh; | ||
| 562 | |||
| 563 | if (!list_empty(&gl->gl_holders)) { | ||
| 564 | gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); | ||
| 565 | if (test_bit(HIF_HOLDER, &gh->gh_iflags)) | ||
| 566 | return gh; | ||
| 567 | } | ||
| 568 | return NULL; | ||
| 569 | } | ||
| 570 | |||
| 571 | /** | ||
| 572 | * run_queue - do all outstanding tasks related to a glock | ||
| 573 | * @gl: The glock in question | ||
| 574 | * @nonblock: True if we must not block in run_queue | ||
| 575 | * | ||
| 576 | */ | ||
| 577 | |||
| 578 | static void run_queue(struct gfs2_glock *gl, const int nonblock) | ||
| 579 | { | ||
| 580 | struct gfs2_holder *gh = NULL; | ||
| 581 | |||
| 582 | if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) | ||
| 583 | return; | ||
| 584 | |||
| 585 | GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); | ||
| 586 | |||
| 587 | if (test_bit(GLF_DEMOTE, &gl->gl_flags) && | ||
| 588 | gl->gl_demote_state != gl->gl_state) { | ||
| 589 | if (find_first_holder(gl)) | ||
| 590 | goto out; | ||
| 591 | if (nonblock) | ||
| 592 | goto out_sched; | ||
| 593 | set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); | ||
| 594 | GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); | ||
| 595 | gl->gl_target = gl->gl_demote_state; | ||
| 596 | } else { | ||
| 597 | if (test_bit(GLF_DEMOTE, &gl->gl_flags)) | ||
| 598 | gfs2_demote_wake(gl); | ||
| 599 | if (do_promote(gl) == 0) | ||
| 600 | goto out; | ||
| 601 | gh = find_first_waiter(gl); | ||
| 602 | gl->gl_target = gh->gh_state; | ||
| 603 | if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) | ||
| 604 | do_error(gl, 0); /* Fail queued try locks */ | ||
| 605 | } | ||
| 606 | do_xmote(gl, gh, gl->gl_target); | ||
| 607 | return; | ||
| 608 | |||
| 609 | out_sched: | ||
| 610 | gfs2_glock_hold(gl); | ||
| 611 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) | ||
| 612 | gfs2_glock_put(gl); | ||
| 613 | out: | ||
| 614 | clear_bit(GLF_LOCK, &gl->gl_flags); | ||
| 615 | } | ||
| 616 | |||
| 284 | static void glock_work_func(struct work_struct *work) | 617 | static void glock_work_func(struct work_struct *work) |
| 285 | { | 618 | { |
| 619 | unsigned long delay = 0; | ||
| 286 | struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); | 620 | struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); |
| 287 | 621 | ||
| 622 | if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) | ||
| 623 | finish_xmote(gl, gl->gl_reply); | ||
| 288 | spin_lock(&gl->gl_spin); | 624 | spin_lock(&gl->gl_spin); |
| 289 | if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags)) | 625 | if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && |
| 290 | set_bit(GLF_DEMOTE, &gl->gl_flags); | 626 | gl->gl_state != LM_ST_UNLOCKED && |
| 291 | run_queue(gl); | 627 | gl->gl_demote_state != LM_ST_EXCLUSIVE) { |
| 628 | unsigned long holdtime, now = jiffies; | ||
| 629 | holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time; | ||
| 630 | if (time_before(now, holdtime)) | ||
| 631 | delay = holdtime - now; | ||
| 632 | set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags); | ||
| 633 | } | ||
| 634 | run_queue(gl, 0); | ||
| 292 | spin_unlock(&gl->gl_spin); | 635 | spin_unlock(&gl->gl_spin); |
| 293 | gfs2_glock_put(gl); | 636 | if (!delay || |
| 637 | queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) | ||
| 638 | gfs2_glock_put(gl); | ||
| 294 | } | 639 | } |
| 295 | 640 | ||
| 296 | static int gfs2_lm_get_lock(struct gfs2_sbd *sdp, struct lm_lockname *name, | 641 | static int gfs2_lm_get_lock(struct gfs2_sbd *sdp, struct lm_lockname *name, |
| 297 | void **lockp) | 642 | void **lockp) |
| 298 | { | 643 | { |
| 299 | int error = -EIO; | 644 | int error = -EIO; |
| 645 | if (!sdp->sd_lockstruct.ls_ops->lm_get_lock) | ||
| 646 | return 0; | ||
| 300 | if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) | 647 | if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) |
| 301 | error = sdp->sd_lockstruct.ls_ops->lm_get_lock( | 648 | error = sdp->sd_lockstruct.ls_ops->lm_get_lock( |
| 302 | sdp->sd_lockstruct.ls_lockspace, name, lockp); | 649 | sdp->sd_lockstruct.ls_lockspace, name, lockp); |
| @@ -342,12 +689,10 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, | |||
| 342 | gl->gl_name = name; | 689 | gl->gl_name = name; |
| 343 | atomic_set(&gl->gl_ref, 1); | 690 | atomic_set(&gl->gl_ref, 1); |
| 344 | gl->gl_state = LM_ST_UNLOCKED; | 691 | gl->gl_state = LM_ST_UNLOCKED; |
| 692 | gl->gl_target = LM_ST_UNLOCKED; | ||
| 345 | gl->gl_demote_state = LM_ST_EXCLUSIVE; | 693 | gl->gl_demote_state = LM_ST_EXCLUSIVE; |
| 346 | gl->gl_hash = hash; | 694 | gl->gl_hash = hash; |
| 347 | gl->gl_owner_pid = NULL; | ||
| 348 | gl->gl_ip = 0; | ||
| 349 | gl->gl_ops = glops; | 695 | gl->gl_ops = glops; |
| 350 | gl->gl_req_gh = NULL; | ||
| 351 | gl->gl_stamp = jiffies; | 696 | gl->gl_stamp = jiffies; |
| 352 | gl->gl_tchange = jiffies; | 697 | gl->gl_tchange = jiffies; |
| 353 | gl->gl_object = NULL; | 698 | gl->gl_object = NULL; |
| @@ -447,13 +792,6 @@ void gfs2_holder_uninit(struct gfs2_holder *gh) | |||
| 447 | gh->gh_ip = 0; | 792 | gh->gh_ip = 0; |
| 448 | } | 793 | } |
| 449 | 794 | ||
| 450 | static void gfs2_holder_wake(struct gfs2_holder *gh) | ||
| 451 | { | ||
| 452 | clear_bit(HIF_WAIT, &gh->gh_iflags); | ||
| 453 | smp_mb__after_clear_bit(); | ||
| 454 | wake_up_bit(&gh->gh_iflags, HIF_WAIT); | ||
| 455 | } | ||
| 456 | |||
| 457 | static int just_schedule(void *word) | 795 | static int just_schedule(void *word) |
| 458 | { | 796 | { |
| 459 | schedule(); | 797 | schedule(); |
| @@ -466,14 +804,6 @@ static void wait_on_holder(struct gfs2_holder *gh) | |||
| 466 | wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE); | 804 | wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE); |
| 467 | } | 805 | } |
| 468 | 806 | ||
| 469 | static void gfs2_demote_wake(struct gfs2_glock *gl) | ||
| 470 | { | ||
| 471 | gl->gl_demote_state = LM_ST_EXCLUSIVE; | ||
| 472 | clear_bit(GLF_DEMOTE, &gl->gl_flags); | ||
| 473 | smp_mb__after_clear_bit(); | ||
| 474 | wake_up_bit(&gl->gl_flags, GLF_DEMOTE); | ||
| 475 | } | ||
| 476 | |||
| 477 | static void wait_on_demote(struct gfs2_glock *gl) | 807 | static void wait_on_demote(struct gfs2_glock *gl) |
| 478 | { | 808 | { |
| 479 | might_sleep(); | 809 | might_sleep(); |
| @@ -481,217 +811,6 @@ static void wait_on_demote(struct gfs2_glock *gl) | |||
| 481 | } | 811 | } |
| 482 | 812 | ||
| 483 | /** | 813 | /** |
| 484 | * rq_mutex - process a mutex request in the queue | ||
| 485 | * @gh: the glock holder | ||
| 486 | * | ||
| 487 | * Returns: 1 if the queue is blocked | ||
| 488 | */ | ||
| 489 | |||
| 490 | static int rq_mutex(struct gfs2_holder *gh) | ||
| 491 | { | ||
| 492 | struct gfs2_glock *gl = gh->gh_gl; | ||
| 493 | |||
| 494 | list_del_init(&gh->gh_list); | ||
| 495 | /* gh->gh_error never examined. */ | ||
| 496 | set_bit(GLF_LOCK, &gl->gl_flags); | ||
| 497 | clear_bit(HIF_WAIT, &gh->gh_iflags); | ||
| 498 | smp_mb(); | ||
| 499 | wake_up_bit(&gh->gh_iflags, HIF_WAIT); | ||
| 500 | |||
| 501 | return 1; | ||
| 502 | } | ||
| 503 | |||
| 504 | /** | ||
| 505 | * rq_promote - process a promote request in the queue | ||
| 506 | * @gh: the glock holder | ||
| 507 | * | ||
| 508 | * Acquire a new inter-node lock, or change a lock state to more restrictive. | ||
| 509 | * | ||
| 510 | * Returns: 1 if the queue is blocked | ||
| 511 | */ | ||
| 512 | |||
| 513 | static int rq_promote(struct gfs2_holder *gh) | ||
| 514 | { | ||
| 515 | struct gfs2_glock *gl = gh->gh_gl; | ||
| 516 | |||
| 517 | if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { | ||
| 518 | if (list_empty(&gl->gl_holders)) { | ||
| 519 | gl->gl_req_gh = gh; | ||
| 520 | set_bit(GLF_LOCK, &gl->gl_flags); | ||
| 521 | spin_unlock(&gl->gl_spin); | ||
| 522 | gfs2_glock_xmote_th(gh->gh_gl, gh); | ||
| 523 | spin_lock(&gl->gl_spin); | ||
| 524 | } | ||
| 525 | return 1; | ||
| 526 | } | ||
| 527 | |||
| 528 | if (list_empty(&gl->gl_holders)) { | ||
| 529 | set_bit(HIF_FIRST, &gh->gh_iflags); | ||
| 530 | set_bit(GLF_LOCK, &gl->gl_flags); | ||
| 531 | } else { | ||
| 532 | struct gfs2_holder *next_gh; | ||
| 533 | if (gh->gh_state == LM_ST_EXCLUSIVE) | ||
| 534 | return 1; | ||
| 535 | next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder, | ||
| 536 | gh_list); | ||
| 537 | if (next_gh->gh_state == LM_ST_EXCLUSIVE) | ||
| 538 | return 1; | ||
| 539 | } | ||
| 540 | |||
| 541 | list_move_tail(&gh->gh_list, &gl->gl_holders); | ||
| 542 | gh->gh_error = 0; | ||
| 543 | set_bit(HIF_HOLDER, &gh->gh_iflags); | ||
| 544 | |||
| 545 | gfs2_holder_wake(gh); | ||
| 546 | |||
| 547 | return 0; | ||
| 548 | } | ||
| 549 | |||
| 550 | /** | ||
| 551 | * rq_demote - process a demote request in the queue | ||
| 552 | * @gh: the glock holder | ||
| 553 | * | ||
| 554 | * Returns: 1 if the queue is blocked | ||
| 555 | */ | ||
| 556 | |||
| 557 | static int rq_demote(struct gfs2_glock *gl) | ||
| 558 | { | ||
| 559 | if (!list_empty(&gl->gl_holders)) | ||
| 560 | return 1; | ||
| 561 | |||
| 562 | if (gl->gl_state == gl->gl_demote_state || | ||
| 563 | gl->gl_state == LM_ST_UNLOCKED) { | ||
| 564 | gfs2_demote_wake(gl); | ||
| 565 | return 0; | ||
| 566 | } | ||
| 567 | |||
| 568 | set_bit(GLF_LOCK, &gl->gl_flags); | ||
| 569 | set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); | ||
| 570 | |||
| 571 | if (gl->gl_demote_state == LM_ST_UNLOCKED || | ||
| 572 | gl->gl_state != LM_ST_EXCLUSIVE) { | ||
| 573 | spin_unlock(&gl->gl_spin); | ||
| 574 | gfs2_glock_drop_th(gl); | ||
| 575 | } else { | ||
| 576 | spin_unlock(&gl->gl_spin); | ||
| 577 | gfs2_glock_xmote_th(gl, NULL); | ||
| 578 | } | ||
| 579 | |||
| 580 | spin_lock(&gl->gl_spin); | ||
| 581 | clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); | ||
| 582 | |||
| 583 | return 0; | ||
| 584 | } | ||
| 585 | |||
| 586 | /** | ||
| 587 | * run_queue - process holder structures on a glock | ||
| 588 | * @gl: the glock | ||
| 589 | * | ||
| 590 | */ | ||
| 591 | static void run_queue(struct gfs2_glock *gl) | ||
| 592 | { | ||
| 593 | struct gfs2_holder *gh; | ||
| 594 | int blocked = 1; | ||
| 595 | |||
| 596 | for (;;) { | ||
| 597 | if (test_bit(GLF_LOCK, &gl->gl_flags)) | ||
| 598 | break; | ||
| 599 | |||
| 600 | if (!list_empty(&gl->gl_waiters1)) { | ||
| 601 | gh = list_entry(gl->gl_waiters1.next, | ||
| 602 | struct gfs2_holder, gh_list); | ||
| 603 | blocked = rq_mutex(gh); | ||
| 604 | } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { | ||
| 605 | blocked = rq_demote(gl); | ||
| 606 | if (test_bit(GLF_WAITERS2, &gl->gl_flags) && | ||
| 607 | !blocked) { | ||
| 608 | set_bit(GLF_DEMOTE, &gl->gl_flags); | ||
| 609 | gl->gl_demote_state = LM_ST_UNLOCKED; | ||
| 610 | } | ||
| 611 | clear_bit(GLF_WAITERS2, &gl->gl_flags); | ||
| 612 | } else if (!list_empty(&gl->gl_waiters3)) { | ||
| 613 | gh = list_entry(gl->gl_waiters3.next, | ||
| 614 | struct gfs2_holder, gh_list); | ||
| 615 | blocked = rq_promote(gh); | ||
| 616 | } else | ||
| 617 | break; | ||
| 618 | |||
| 619 | if (blocked) | ||
| 620 | break; | ||
| 621 | } | ||
| 622 | } | ||
| 623 | |||
| 624 | /** | ||
| 625 | * gfs2_glmutex_lock - acquire a local lock on a glock | ||
| 626 | * @gl: the glock | ||
| 627 | * | ||
| 628 | * Gives caller exclusive access to manipulate a glock structure. | ||
| 629 | */ | ||
| 630 | |||
| 631 | static void gfs2_glmutex_lock(struct gfs2_glock *gl) | ||
| 632 | { | ||
| 633 | spin_lock(&gl->gl_spin); | ||
| 634 | if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { | ||
| 635 | struct gfs2_holder gh; | ||
| 636 | |||
| 637 | gfs2_holder_init(gl, 0, 0, &gh); | ||
| 638 | set_bit(HIF_WAIT, &gh.gh_iflags); | ||
| 639 | list_add_tail(&gh.gh_list, &gl->gl_waiters1); | ||
| 640 | spin_unlock(&gl->gl_spin); | ||
| 641 | wait_on_holder(&gh); | ||
| 642 | gfs2_holder_uninit(&gh); | ||
| 643 | } else { | ||
| 644 | gl->gl_owner_pid = get_pid(task_pid(current)); | ||
| 645 | gl->gl_ip = (unsigned long)__builtin_return_address(0); | ||
| 646 | spin_unlock(&gl->gl_spin); | ||
| 647 | } | ||
| 648 | } | ||
| 649 | |||
| 650 | /** | ||
| 651 | * gfs2_glmutex_trylock - try to acquire a local lock on a glock | ||
| 652 | * @gl: the glock | ||
| 653 | * | ||
| 654 | * Returns: 1 if the glock is acquired | ||
| 655 | */ | ||
| 656 | |||
| 657 | static int gfs2_glmutex_trylock(struct gfs2_glock *gl) | ||
| 658 | { | ||
| 659 | int acquired = 1; | ||
| 660 | |||
| 661 | spin_lock(&gl->gl_spin); | ||
| 662 | if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { | ||
| 663 | acquired = 0; | ||
| 664 | } else { | ||
| 665 | gl->gl_owner_pid = get_pid(task_pid(current)); | ||
| 666 | gl->gl_ip = (unsigned long)__builtin_return_address(0); | ||
| 667 | } | ||
| 668 | spin_unlock(&gl->gl_spin); | ||
| 669 | |||
| 670 | return acquired; | ||
| 671 | } | ||
| 672 | |||
| 673 | /** | ||
| 674 | * gfs2_glmutex_unlock - release a local lock on a glock | ||
| 675 | * @gl: the glock | ||
| 676 | * | ||
| 677 | */ | ||
| 678 | |||
| 679 | static void gfs2_glmutex_unlock(struct gfs2_glock *gl) | ||
| 680 | { | ||
| 681 | struct pid *pid; | ||
| 682 | |||
| 683 | spin_lock(&gl->gl_spin); | ||
| 684 | clear_bit(GLF_LOCK, &gl->gl_flags); | ||
| 685 | pid = gl->gl_owner_pid; | ||
| 686 | gl->gl_owner_pid = NULL; | ||
| 687 | gl->gl_ip = 0; | ||
| 688 | run_queue(gl); | ||
| 689 | spin_unlock(&gl->gl_spin); | ||
| 690 | |||
| 691 | put_pid(pid); | ||
| 692 | } | ||
| 693 | |||
| 694 | /** | ||
| 695 | * handle_callback - process a demote request | 814 | * handle_callback - process a demote request |
| 696 | * @gl: the glock | 815 | * @gl: the glock |
| 697 | * @state: the state the caller wants us to change to | 816 | * @state: the state the caller wants us to change to |
| @@ -705,398 +824,45 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state, | |||
| 705 | { | 824 | { |
| 706 | int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE; | 825 | int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE; |
| 707 | 826 | ||
| 708 | spin_lock(&gl->gl_spin); | ||
| 709 | set_bit(bit, &gl->gl_flags); | 827 | set_bit(bit, &gl->gl_flags); |
| 710 | if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { | 828 | if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { |
| 711 | gl->gl_demote_state = state; | 829 | gl->gl_demote_state = state; |
| 712 | gl->gl_demote_time = jiffies; | 830 | gl->gl_demote_time = jiffies; |
| 713 | if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN && | 831 | if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN && |
| 714 | gl->gl_object) { | 832 | gl->gl_object) |
| 715 | gfs2_glock_schedule_for_reclaim(gl); | 833 | gfs2_glock_schedule_for_reclaim(gl); |
| 716 | spin_unlock(&gl->gl_spin); | ||
| 717 | return; | ||
| 718 | } | ||
| 719 | } else if (gl->gl_demote_state != LM_ST_UNLOCKED && | 834 | } else if (gl->gl_demote_state != LM_ST_UNLOCKED && |
| 720 | gl->gl_demote_state != state) { | 835 | gl->gl_demote_state != state) { |
| 721 | if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) | 836 | gl->gl_demote_state = LM_ST_UNLOCKED; |
| 722 | set_bit(GLF_WAITERS2, &gl->gl_flags); | ||
| 723 | else | ||
| 724 | gl->gl_demote_state = LM_ST_UNLOCKED; | ||
| 725 | } | ||
| 726 | spin_unlock(&gl->gl_spin); | ||
| 727 | } | ||
| 728 | |||
| 729 | /** | ||
| 730 | * state_change - record that the glock is now in a different state | ||
| 731 | * @gl: the glock | ||
| 732 | * @new_state the new state | ||
| 733 | * | ||
| 734 | */ | ||
| 735 | |||
| 736 | static void state_change(struct gfs2_glock *gl, unsigned int new_state) | ||
| 737 | { | ||
| 738 | int held1, held2; | ||
| 739 | |||
| 740 | held1 = (gl->gl_state != LM_ST_UNLOCKED); | ||
| 741 | held2 = (new_state != LM_ST_UNLOCKED); | ||
| 742 | |||
| 743 | if (held1 != held2) { | ||
| 744 | if (held2) | ||
| 745 | gfs2_glock_hold(gl); | ||
| 746 | else | ||
| 747 | gfs2_glock_put(gl); | ||
| 748 | } | 837 | } |
| 749 | |||
| 750 | gl->gl_state = new_state; | ||
| 751 | gl->gl_tchange = jiffies; | ||
| 752 | } | 838 | } |
| 753 | 839 | ||
| 754 | /** | 840 | /** |
| 755 | * drop_bh - Called after a lock module unlock completes | 841 | * gfs2_glock_wait - wait on a glock acquisition |
| 756 | * @gl: the glock | ||
| 757 | * @ret: the return status | ||
| 758 | * | ||
| 759 | * Doesn't wake up the process waiting on the struct gfs2_holder (if any) | ||
| 760 | * Doesn't drop the reference on the glock the top half took out | ||
| 761 | * | ||
| 762 | */ | ||
| 763 | |||
| 764 | static void drop_bh(struct gfs2_glock *gl, unsigned int ret) | ||
| 765 | { | ||
| 766 | struct gfs2_sbd *sdp = gl->gl_sbd; | ||
| 767 | struct gfs2_holder *gh = gl->gl_req_gh; | ||
| 768 | |||
| 769 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); | ||
| 770 | gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); | ||
| 771 | gfs2_assert_warn(sdp, !ret); | ||
| 772 | |||
| 773 | state_change(gl, LM_ST_UNLOCKED); | ||
| 774 | |||
| 775 | if (test_and_clear_bit(GLF_CONV_DEADLK, &gl->gl_flags)) { | ||
| 776 | spin_lock(&gl->gl_spin); | ||
| 777 | gh->gh_error = 0; | ||
| 778 | spin_unlock(&gl->gl_spin); | ||
| 779 | gfs2_glock_xmote_th(gl, gl->gl_req_gh); | ||
| 780 | gfs2_glock_put(gl); | ||
| 781 | return; | ||
| 782 | } | ||
| 783 | |||
| 784 | spin_lock(&gl->gl_spin); | ||
| 785 | gfs2_demote_wake(gl); | ||
| 786 | clear_bit(GLF_LOCK, &gl->gl_flags); | ||
| 787 | spin_unlock(&gl->gl_spin); | ||
| 788 | gfs2_glock_put(gl); | ||
| 789 | } | ||
| 790 | |||
| 791 | /** | ||
| 792 | * xmote_bh - Called after the lock module is done acquiring a lock | ||
| 793 | * @gl: The glock in question | ||
| 794 | * @ret: the int returned from the lock module | ||
| 795 | * | ||
| 796 | */ | ||
| 797 | |||
| 798 | static void xmote_bh(struct gfs2_glock *gl, unsigned int ret) | ||
| 799 | { | ||
| 800 | struct gfs2_sbd *sdp = gl->gl_sbd; | ||
| 801 | const struct gfs2_glock_operations *glops = gl->gl_ops; | ||
| 802 | struct gfs2_holder *gh = gl->gl_req_gh; | ||
| 803 | int op_done = 1; | ||
| 804 | |||
| 805 | if (!gh && (ret & LM_OUT_ST_MASK) == LM_ST_UNLOCKED) { | ||
| 806 | drop_bh(gl, ret); | ||
| 807 | return; | ||
| 808 | } | ||
| 809 | |||
| 810 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); | ||
| 811 | gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); | ||
| 812 | gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC)); | ||
| 813 | |||
| 814 | state_change(gl, ret & LM_OUT_ST_MASK); | ||
| 815 | |||
| 816 | /* Deal with each possible exit condition */ | ||
| 817 | |||
| 818 | if (!gh) { | ||
| 819 | gl->gl_stamp = jiffies; | ||
| 820 | if (ret & LM_OUT_CANCELED) { | ||
| 821 | op_done = 0; | ||
| 822 | } else { | ||
| 823 | spin_lock(&gl->gl_spin); | ||
| 824 | if (gl->gl_state != gl->gl_demote_state) { | ||
| 825 | spin_unlock(&gl->gl_spin); | ||
| 826 | gfs2_glock_drop_th(gl); | ||
| 827 | gfs2_glock_put(gl); | ||
| 828 | return; | ||
| 829 | } | ||
| 830 | gfs2_demote_wake(gl); | ||
| 831 | spin_unlock(&gl->gl_spin); | ||
| 832 | } | ||
| 833 | } else { | ||
| 834 | spin_lock(&gl->gl_spin); | ||
| 835 | if (ret & LM_OUT_CONV_DEADLK) { | ||
| 836 | gh->gh_error = 0; | ||
| 837 | set_bit(GLF_CONV_DEADLK, &gl->gl_flags); | ||
| 838 | spin_unlock(&gl->gl_spin); | ||
| 839 | gfs2_glock_drop_th(gl); | ||
| 840 | gfs2_glock_put(gl); | ||
| 841 | return; | ||
| 842 | } | ||
| 843 | list_del_init(&gh->gh_list); | ||
| 844 | gh->gh_error = -EIO; | ||
| 845 | if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) | ||
| 846 | goto out; | ||
| 847 | gh->gh_error = GLR_CANCELED; | ||
| 848 | if (ret & LM_OUT_CANCELED) | ||
| 849 | goto out; | ||
| 850 | if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { | ||
| 851 | list_add_tail(&gh->gh_list, &gl->gl_holders); | ||
| 852 | gh->gh_error = 0; | ||
| 853 | set_bit(HIF_HOLDER, &gh->gh_iflags); | ||
| 854 | set_bit(HIF_FIRST, &gh->gh_iflags); | ||
| 855 | op_done = 0; | ||
| 856 | goto out; | ||
| 857 | } | ||
| 858 | gh->gh_error = GLR_TRYFAILED; | ||
| 859 | if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) | ||
| 860 | goto out; | ||
| 861 | gh->gh_error = -EINVAL; | ||
| 862 | if (gfs2_assert_withdraw(sdp, 0) == -1) | ||
| 863 | fs_err(sdp, "ret = 0x%.8X\n", ret); | ||
| 864 | out: | ||
| 865 | spin_unlock(&gl->gl_spin); | ||
| 866 | } | ||
| 867 | |||
| 868 | if (glops->go_xmote_bh) | ||
| 869 | glops->go_xmote_bh(gl); | ||
| 870 | |||
| 871 | if (op_done) { | ||
| 872 | spin_lock(&gl->gl_spin); | ||
| 873 | gl->gl_req_gh = NULL; | ||
| 874 | clear_bit(GLF_LOCK, &gl->gl_flags); | ||
| 875 | spin_unlock(&gl->gl_spin); | ||
| 876 | } | ||
| 877 | |||
| 878 | gfs2_glock_put(gl); | ||
| 879 | |||
| 880 | if (gh) | ||
| 881 | gfs2_holder_wake(gh); | ||
| 882 | } | ||
| 883 | |||
| 884 | static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock, | ||
| 885 | unsigned int cur_state, unsigned int req_state, | ||
| 886 | unsigned int flags) | ||
| 887 | { | ||
| 888 | int ret = 0; | ||
| 889 | if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) | ||
| 890 | ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock, cur_state, | ||
| 891 | req_state, flags); | ||
| 892 | return ret; | ||
| 893 | } | ||
| 894 | |||
| 895 | /** | ||
| 896 | * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock | ||
| 897 | * @gl: The glock in question | ||
| 898 | * @state: the requested state | ||
| 899 | * @flags: modifier flags to the lock call | ||
| 900 | * | ||
| 901 | */ | ||
| 902 | |||
| 903 | static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh) | ||
| 904 | { | ||
| 905 | struct gfs2_sbd *sdp = gl->gl_sbd; | ||
| 906 | int flags = gh ? gh->gh_flags : 0; | ||
| 907 | unsigned state = gh ? gh->gh_state : gl->gl_demote_state; | ||
| 908 | const struct gfs2_glock_operations *glops = gl->gl_ops; | ||
| 909 | int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB | | ||
| 910 | LM_FLAG_NOEXP | LM_FLAG_ANY | | ||
| 911 | LM_FLAG_PRIORITY); | ||
| 912 | unsigned int lck_ret; | ||
| 913 | |||
| 914 | if (glops->go_xmote_th) | ||
| 915 | glops->go_xmote_th(gl); | ||
| 916 | if (state == LM_ST_DEFERRED && glops->go_inval) | ||
| 917 | glops->go_inval(gl, DIO_METADATA); | ||
| 918 | |||
| 919 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); | ||
| 920 | gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); | ||
| 921 | gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED); | ||
| 922 | gfs2_assert_warn(sdp, state != gl->gl_state); | ||
| 923 | |||
| 924 | gfs2_glock_hold(gl); | ||
| 925 | |||
| 926 | lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags); | ||
| 927 | |||
| 928 | if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR))) | ||
| 929 | return; | ||
| 930 | |||
| 931 | if (lck_ret & LM_OUT_ASYNC) | ||
| 932 | gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC); | ||
| 933 | else | ||
| 934 | xmote_bh(gl, lck_ret); | ||
| 935 | } | ||
| 936 | |||
| 937 | static unsigned int gfs2_lm_unlock(struct gfs2_sbd *sdp, void *lock, | ||
| 938 | unsigned int cur_state) | ||
| 939 | { | ||
| 940 | int ret = 0; | ||
| 941 | if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) | ||
| 942 | ret = sdp->sd_lockstruct.ls_ops->lm_unlock(lock, cur_state); | ||
| 943 | return ret; | ||
| 944 | } | ||
| 945 | |||
| 946 | /** | ||
| 947 | * gfs2_glock_drop_th - call into the lock module to unlock a lock | ||
| 948 | * @gl: the glock | ||
| 949 | * | ||
| 950 | */ | ||
| 951 | |||
| 952 | static void gfs2_glock_drop_th(struct gfs2_glock *gl) | ||
| 953 | { | ||
| 954 | struct gfs2_sbd *sdp = gl->gl_sbd; | ||
| 955 | const struct gfs2_glock_operations *glops = gl->gl_ops; | ||
| 956 | unsigned int ret; | ||
| 957 | |||
| 958 | if (glops->go_xmote_th) | ||
| 959 | glops->go_xmote_th(gl); | ||
| 960 | if (glops->go_inval) | ||
| 961 | glops->go_inval(gl, DIO_METADATA); | ||
| 962 | |||
| 963 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); | ||
| 964 | gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); | ||
| 965 | gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED); | ||
| 966 | |||
| 967 | gfs2_glock_hold(gl); | ||
| 968 | |||
| 969 | ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state); | ||
| 970 | |||
| 971 | if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR))) | ||
| 972 | return; | ||
| 973 | |||
| 974 | if (!ret) | ||
| 975 | drop_bh(gl, ret); | ||
| 976 | else | ||
| 977 | gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC); | ||
| 978 | } | ||
| 979 | |||
| 980 | /** | ||
| 981 | * do_cancels - cancel requests for locks stuck waiting on an expire flag | ||
| 982 | * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock | ||
| 983 | * | ||
| 984 | * Don't cancel GL_NOCANCEL requests. | ||
| 985 | */ | ||
| 986 | |||
| 987 | static void do_cancels(struct gfs2_holder *gh) | ||
| 988 | { | ||
| 989 | struct gfs2_glock *gl = gh->gh_gl; | ||
| 990 | struct gfs2_sbd *sdp = gl->gl_sbd; | ||
| 991 | |||
| 992 | spin_lock(&gl->gl_spin); | ||
| 993 | |||
| 994 | while (gl->gl_req_gh != gh && | ||
| 995 | !test_bit(HIF_HOLDER, &gh->gh_iflags) && | ||
| 996 | !list_empty(&gh->gh_list)) { | ||
| 997 | if (!(gl->gl_req_gh && (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) { | ||
| 998 | spin_unlock(&gl->gl_spin); | ||
| 999 | if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) | ||
| 1000 | sdp->sd_lockstruct.ls_ops->lm_cancel(gl->gl_lock); | ||
| 1001 | msleep(100); | ||
| 1002 | spin_lock(&gl->gl_spin); | ||
| 1003 | } else { | ||
| 1004 | spin_unlock(&gl->gl_spin); | ||
| 1005 | msleep(100); | ||
| 1006 | spin_lock(&gl->gl_spin); | ||
| 1007 | } | ||
| 1008 | } | ||
| 1009 | |||
| 1010 | spin_unlock(&gl->gl_spin); | ||
| 1011 | } | ||
| 1012 | |||
| 1013 | /** | ||
| 1014 | * glock_wait_internal - wait on a glock acquisition | ||
| 1015 | * @gh: the glock holder | 842 | * @gh: the glock holder |
| 1016 | * | 843 | * |
| 1017 | * Returns: 0 on success | 844 | * Returns: 0 on success |
| 1018 | */ | 845 | */ |
| 1019 | 846 | ||
| 1020 | static int glock_wait_internal(struct gfs2_holder *gh) | 847 | int gfs2_glock_wait(struct gfs2_holder *gh) |
| 1021 | { | 848 | { |
| 1022 | struct gfs2_glock *gl = gh->gh_gl; | ||
| 1023 | struct gfs2_sbd *sdp = gl->gl_sbd; | ||
| 1024 | const struct gfs2_glock_operations *glops = gl->gl_ops; | ||
| 1025 | |||
| 1026 | if (test_bit(HIF_ABORTED, &gh->gh_iflags)) | ||
| 1027 | return -EIO; | ||
| 1028 | |||
| 1029 | if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { | ||
| 1030 | spin_lock(&gl->gl_spin); | ||
| 1031 | if (gl->gl_req_gh != gh && | ||
| 1032 | !test_bit(HIF_HOLDER, &gh->gh_iflags) && | ||
| 1033 | !list_empty(&gh->gh_list)) { | ||
| 1034 | list_del_init(&gh->gh_list); | ||
| 1035 | gh->gh_error = GLR_TRYFAILED; | ||
| 1036 | run_queue(gl); | ||
| 1037 | spin_unlock(&gl->gl_spin); | ||
| 1038 | return gh->gh_error; | ||
| 1039 | } | ||
| 1040 | spin_unlock(&gl->gl_spin); | ||
| 1041 | } | ||
| 1042 | |||
| 1043 | if (gh->gh_flags & LM_FLAG_PRIORITY) | ||
| 1044 | do_cancels(gh); | ||
| 1045 | |||
| 1046 | wait_on_holder(gh); | 849 | wait_on_holder(gh); |
| 1047 | if (gh->gh_error) | ||
| 1048 | return gh->gh_error; | ||
| 1049 | |||
| 1050 | gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags)); | ||
| 1051 | gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state, | ||
| 1052 | gh->gh_flags)); | ||
| 1053 | |||
| 1054 | if (test_bit(HIF_FIRST, &gh->gh_iflags)) { | ||
| 1055 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); | ||
| 1056 | |||
| 1057 | if (glops->go_lock) { | ||
| 1058 | gh->gh_error = glops->go_lock(gh); | ||
| 1059 | if (gh->gh_error) { | ||
| 1060 | spin_lock(&gl->gl_spin); | ||
| 1061 | list_del_init(&gh->gh_list); | ||
| 1062 | spin_unlock(&gl->gl_spin); | ||
| 1063 | } | ||
| 1064 | } | ||
| 1065 | |||
| 1066 | spin_lock(&gl->gl_spin); | ||
| 1067 | gl->gl_req_gh = NULL; | ||
| 1068 | clear_bit(GLF_LOCK, &gl->gl_flags); | ||
| 1069 | run_queue(gl); | ||
| 1070 | spin_unlock(&gl->gl_spin); | ||
| 1071 | } | ||
| 1072 | |||
| 1073 | return gh->gh_error; | 850 | return gh->gh_error; |
| 1074 | } | 851 | } |
| 1075 | 852 | ||
| 1076 | static inline struct gfs2_holder * | 853 | void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...) |
| 1077 | find_holder_by_owner(struct list_head *head, struct pid *pid) | ||
| 1078 | { | ||
| 1079 | struct gfs2_holder *gh; | ||
| 1080 | |||
| 1081 | list_for_each_entry(gh, head, gh_list) { | ||
| 1082 | if (gh->gh_owner_pid == pid) | ||
| 1083 | return gh; | ||
| 1084 | } | ||
| 1085 | |||
| 1086 | return NULL; | ||
| 1087 | } | ||
| 1088 | |||
| 1089 | static void print_dbg(struct glock_iter *gi, const char *fmt, ...) | ||
| 1090 | { | 854 | { |
| 1091 | va_list args; | 855 | va_list args; |
| 1092 | 856 | ||
| 1093 | va_start(args, fmt); | 857 | va_start(args, fmt); |
| 1094 | if (gi) { | 858 | if (seq) { |
| 859 | struct gfs2_glock_iter *gi = seq->private; | ||
| 1095 | vsprintf(gi->string, fmt, args); | 860 | vsprintf(gi->string, fmt, args); |
| 1096 | seq_printf(gi->seq, gi->string); | 861 | seq_printf(seq, gi->string); |
| 1097 | } | 862 | } else { |
| 1098 | else | 863 | printk(KERN_ERR " "); |
| 1099 | vprintk(fmt, args); | 864 | vprintk(fmt, args); |
| 865 | } | ||
| 1100 | va_end(args); | 866 | va_end(args); |
| 1101 | } | 867 | } |
| 1102 | 868 | ||
| @@ -1104,50 +870,76 @@ static void print_dbg(struct glock_iter *gi, const char *fmt, ...) | |||
| 1104 | * add_to_queue - Add a holder to the wait queue (but look for recursion) | 870 | * add_to_queue - Add a holder to the wait queue (but look for recursion) |
| 1105 | * @gh: the holder structure to add | 871 | * @gh: the holder structure to add |
| 1106 | * | 872 | * |
| 873 | * Eventually we should move the recursive locking trap to a | ||
| 874 | * debugging option or something like that. This is the fast | ||
| 875 | * path and needs to have the minimum number of distractions. | ||
| 876 | * | ||
| 1107 | */ | 877 | */ |
| 1108 | 878 | ||
| 1109 | static void add_to_queue(struct gfs2_holder *gh) | 879 | static inline void add_to_queue(struct gfs2_holder *gh) |
| 1110 | { | 880 | { |
| 1111 | struct gfs2_glock *gl = gh->gh_gl; | 881 | struct gfs2_glock *gl = gh->gh_gl; |
| 1112 | struct gfs2_holder *existing; | 882 | struct gfs2_sbd *sdp = gl->gl_sbd; |
| 883 | struct list_head *insert_pt = NULL; | ||
| 884 | struct gfs2_holder *gh2; | ||
| 885 | int try_lock = 0; | ||
| 1113 | 886 | ||
| 1114 | BUG_ON(gh->gh_owner_pid == NULL); | 887 | BUG_ON(gh->gh_owner_pid == NULL); |
| 1115 | if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) | 888 | if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) |
| 1116 | BUG(); | 889 | BUG(); |
| 1117 | 890 | ||
| 1118 | if (!(gh->gh_flags & GL_FLOCK)) { | 891 | if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { |
| 1119 | existing = find_holder_by_owner(&gl->gl_holders, | 892 | if (test_bit(GLF_LOCK, &gl->gl_flags)) |
| 1120 | gh->gh_owner_pid); | 893 | try_lock = 1; |
| 1121 | if (existing) { | 894 | if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) |
| 1122 | print_symbol(KERN_WARNING "original: %s\n", | 895 | goto fail; |
| 1123 | existing->gh_ip); | 896 | } |
| 1124 | printk(KERN_INFO "pid : %d\n", | 897 | |
| 1125 | pid_nr(existing->gh_owner_pid)); | 898 | list_for_each_entry(gh2, &gl->gl_holders, gh_list) { |
| 1126 | printk(KERN_INFO "lock type : %d lock state : %d\n", | 899 | if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid && |
| 1127 | existing->gh_gl->gl_name.ln_type, | 900 | (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK))) |
| 1128 | existing->gh_gl->gl_state); | 901 | goto trap_recursive; |
| 1129 | print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); | 902 | if (try_lock && |
| 1130 | printk(KERN_INFO "pid : %d\n", | 903 | !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) && |
| 1131 | pid_nr(gh->gh_owner_pid)); | 904 | !may_grant(gl, gh)) { |
| 1132 | printk(KERN_INFO "lock type : %d lock state : %d\n", | 905 | fail: |
| 1133 | gl->gl_name.ln_type, gl->gl_state); | 906 | gh->gh_error = GLR_TRYFAILED; |
| 1134 | BUG(); | 907 | gfs2_holder_wake(gh); |
| 1135 | } | 908 | return; |
| 1136 | |||
| 1137 | existing = find_holder_by_owner(&gl->gl_waiters3, | ||
| 1138 | gh->gh_owner_pid); | ||
| 1139 | if (existing) { | ||
| 1140 | print_symbol(KERN_WARNING "original: %s\n", | ||
| 1141 | existing->gh_ip); | ||
| 1142 | print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); | ||
| 1143 | BUG(); | ||
| 1144 | } | 909 | } |
| 910 | if (test_bit(HIF_HOLDER, &gh2->gh_iflags)) | ||
| 911 | continue; | ||
| 912 | if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt)) | ||
| 913 | insert_pt = &gh2->gh_list; | ||
| 914 | } | ||
| 915 | if (likely(insert_pt == NULL)) { | ||
| 916 | list_add_tail(&gh->gh_list, &gl->gl_holders); | ||
| 917 | if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY)) | ||
| 918 | goto do_cancel; | ||
| 919 | return; | ||
| 920 | } | ||
| 921 | list_add_tail(&gh->gh_list, insert_pt); | ||
| 922 | do_cancel: | ||
| 923 | gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); | ||
| 924 | if (!(gh->gh_flags & LM_FLAG_PRIORITY)) { | ||
| 925 | spin_unlock(&gl->gl_spin); | ||
| 926 | if (sdp->sd_lockstruct.ls_ops->lm_cancel) | ||
| 927 | sdp->sd_lockstruct.ls_ops->lm_cancel(gl->gl_lock); | ||
| 928 | spin_lock(&gl->gl_spin); | ||
| 1145 | } | 929 | } |
| 930 | return; | ||
| 1146 | 931 | ||
| 1147 | if (gh->gh_flags & LM_FLAG_PRIORITY) | 932 | trap_recursive: |
| 1148 | list_add(&gh->gh_list, &gl->gl_waiters3); | 933 | print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip); |
| 1149 | else | 934 | printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid)); |
| 1150 | list_add_tail(&gh->gh_list, &gl->gl_waiters3); | 935 | printk(KERN_ERR "lock type: %d req lock state : %d\n", |
| 936 | gh2->gh_gl->gl_name.ln_type, gh2->gh_state); | ||
| 937 | print_symbol(KERN_ERR "new: %s\n", gh->gh_ip); | ||
| 938 | printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid)); | ||
| 939 | printk(KERN_ERR "lock type: %d req lock state : %d\n", | ||
| 940 | gh->gh_gl->gl_name.ln_type, gh->gh_state); | ||
| 941 | __dump_glock(NULL, gl); | ||
| 942 | BUG(); | ||
| 1151 | } | 943 | } |
| 1152 | 944 | ||
| 1153 | /** | 945 | /** |
| @@ -1165,24 +957,16 @@ int gfs2_glock_nq(struct gfs2_holder *gh) | |||
| 1165 | struct gfs2_sbd *sdp = gl->gl_sbd; | 957 | struct gfs2_sbd *sdp = gl->gl_sbd; |
| 1166 | int error = 0; | 958 | int error = 0; |
| 1167 | 959 | ||
| 1168 | restart: | 960 | if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) |
| 1169 | if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) { | ||
| 1170 | set_bit(HIF_ABORTED, &gh->gh_iflags); | ||
| 1171 | return -EIO; | 961 | return -EIO; |
| 1172 | } | ||
| 1173 | 962 | ||
| 1174 | spin_lock(&gl->gl_spin); | 963 | spin_lock(&gl->gl_spin); |
| 1175 | add_to_queue(gh); | 964 | add_to_queue(gh); |
| 1176 | run_queue(gl); | 965 | run_queue(gl, 1); |
| 1177 | spin_unlock(&gl->gl_spin); | 966 | spin_unlock(&gl->gl_spin); |
| 1178 | 967 | ||
| 1179 | if (!(gh->gh_flags & GL_ASYNC)) { | 968 | if (!(gh->gh_flags & GL_ASYNC)) |
| 1180 | error = glock_wait_internal(gh); | 969 | error = gfs2_glock_wait(gh); |
| 1181 | if (error == GLR_CANCELED) { | ||
| 1182 | msleep(100); | ||
| 1183 | goto restart; | ||
| 1184 | } | ||
| 1185 | } | ||
| 1186 | 970 | ||
| 1187 | return error; | 971 | return error; |
| 1188 | } | 972 | } |
| @@ -1196,48 +980,7 @@ restart: | |||
| 1196 | 980 | ||
| 1197 | int gfs2_glock_poll(struct gfs2_holder *gh) | 981 | int gfs2_glock_poll(struct gfs2_holder *gh) |
| 1198 | { | 982 | { |
| 1199 | struct gfs2_glock *gl = gh->gh_gl; | 983 | return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1; |
| 1200 | int ready = 0; | ||
| 1201 | |||
| 1202 | spin_lock(&gl->gl_spin); | ||
| 1203 | |||
| 1204 | if (test_bit(HIF_HOLDER, &gh->gh_iflags)) | ||
| 1205 | ready = 1; | ||
| 1206 | else if (list_empty(&gh->gh_list)) { | ||
| 1207 | if (gh->gh_error == GLR_CANCELED) { | ||
| 1208 | spin_unlock(&gl->gl_spin); | ||
| 1209 | msleep(100); | ||
| 1210 | if (gfs2_glock_nq(gh)) | ||
| 1211 | return 1; | ||
| 1212 | return 0; | ||
| 1213 | } else | ||
| 1214 | ready = 1; | ||
| 1215 | } | ||
| 1216 | |||
| 1217 | spin_unlock(&gl->gl_spin); | ||
| 1218 | |||
| 1219 | return ready; | ||
| 1220 | } | ||
| 1221 | |||
| 1222 | /** | ||
| 1223 | * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC | ||
| 1224 | * @gh: the holder structure | ||
| 1225 | * | ||
| 1226 | * Returns: 0, GLR_TRYFAILED, or errno on failure | ||
| 1227 | */ | ||
| 1228 | |||
| 1229 | int gfs2_glock_wait(struct gfs2_holder *gh) | ||
| 1230 | { | ||
| 1231 | int error; | ||
| 1232 | |||
| 1233 | error = glock_wait_internal(gh); | ||
| 1234 | if (error == GLR_CANCELED) { | ||
| 1235 | msleep(100); | ||
| 1236 | gh->gh_flags &= ~GL_ASYNC; | ||
| 1237 | error = gfs2_glock_nq(gh); | ||
| 1238 | } | ||
| 1239 | |||
| 1240 | return error; | ||
| 1241 | } | 984 | } |
| 1242 | 985 | ||
| 1243 | /** | 986 | /** |
| @@ -1251,26 +994,30 @@ void gfs2_glock_dq(struct gfs2_holder *gh) | |||
| 1251 | struct gfs2_glock *gl = gh->gh_gl; | 994 | struct gfs2_glock *gl = gh->gh_gl; |
| 1252 | const struct gfs2_glock_operations *glops = gl->gl_ops; | 995 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
| 1253 | unsigned delay = 0; | 996 | unsigned delay = 0; |
| 997 | int fast_path = 0; | ||
| 1254 | 998 | ||
| 999 | spin_lock(&gl->gl_spin); | ||
| 1255 | if (gh->gh_flags & GL_NOCACHE) | 1000 | if (gh->gh_flags & GL_NOCACHE) |
| 1256 | handle_callback(gl, LM_ST_UNLOCKED, 0, 0); | 1001 | handle_callback(gl, LM_ST_UNLOCKED, 0, 0); |
| 1257 | 1002 | ||
| 1258 | gfs2_glmutex_lock(gl); | ||
| 1259 | |||
| 1260 | spin_lock(&gl->gl_spin); | ||
| 1261 | list_del_init(&gh->gh_list); | 1003 | list_del_init(&gh->gh_list); |
| 1262 | 1004 | if (find_first_holder(gl) == NULL) { | |
| 1263 | if (list_empty(&gl->gl_holders)) { | ||
| 1264 | if (glops->go_unlock) { | 1005 | if (glops->go_unlock) { |
| 1006 | GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags)); | ||
| 1265 | spin_unlock(&gl->gl_spin); | 1007 | spin_unlock(&gl->gl_spin); |
| 1266 | glops->go_unlock(gh); | 1008 | glops->go_unlock(gh); |
| 1267 | spin_lock(&gl->gl_spin); | 1009 | spin_lock(&gl->gl_spin); |
| 1010 | clear_bit(GLF_LOCK, &gl->gl_flags); | ||
| 1268 | } | 1011 | } |
| 1269 | gl->gl_stamp = jiffies; | 1012 | gl->gl_stamp = jiffies; |
| 1013 | if (list_empty(&gl->gl_holders) && | ||
| 1014 | !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && | ||
| 1015 | !test_bit(GLF_DEMOTE, &gl->gl_flags)) | ||
| 1016 | fast_path = 1; | ||
| 1270 | } | 1017 | } |
| 1271 | |||
| 1272 | clear_bit(GLF_LOCK, &gl->gl_flags); | ||
| 1273 | spin_unlock(&gl->gl_spin); | 1018 | spin_unlock(&gl->gl_spin); |
| 1019 | if (likely(fast_path)) | ||
| 1020 | return; | ||
| 1274 | 1021 | ||
| 1275 | gfs2_glock_hold(gl); | 1022 | gfs2_glock_hold(gl); |
| 1276 | if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && | 1023 | if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && |
| @@ -1454,6 +1201,8 @@ void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs) | |||
| 1454 | static int gfs2_lm_hold_lvb(struct gfs2_sbd *sdp, void *lock, char **lvbp) | 1201 | static int gfs2_lm_hold_lvb(struct gfs2_sbd *sdp, void *lock, char **lvbp) |
| 1455 | { | 1202 | { |
| 1456 | int error = -EIO; | 1203 | int error = -EIO; |
| 1204 | if (!sdp->sd_lockstruct.ls_ops->lm_hold_lvb) | ||
| 1205 | return 0; | ||
| 1457 | if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) | 1206 | if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) |
| 1458 | error = sdp->sd_lockstruct.ls_ops->lm_hold_lvb(lock, lvbp); | 1207 | error = sdp->sd_lockstruct.ls_ops->lm_hold_lvb(lock, lvbp); |
| 1459 | return error; | 1208 | return error; |
| @@ -1469,20 +1218,14 @@ int gfs2_lvb_hold(struct gfs2_glock *gl) | |||
| 1469 | { | 1218 | { |
| 1470 | int error; | 1219 | int error; |
| 1471 | 1220 | ||
| 1472 | gfs2_glmutex_lock(gl); | ||
| 1473 | |||
| 1474 | if (!atomic_read(&gl->gl_lvb_count)) { | 1221 | if (!atomic_read(&gl->gl_lvb_count)) { |
| 1475 | error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb); | 1222 | error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb); |
| 1476 | if (error) { | 1223 | if (error) |
| 1477 | gfs2_glmutex_unlock(gl); | ||
| 1478 | return error; | 1224 | return error; |
| 1479 | } | ||
| 1480 | gfs2_glock_hold(gl); | 1225 | gfs2_glock_hold(gl); |
| 1481 | } | 1226 | } |
| 1482 | atomic_inc(&gl->gl_lvb_count); | 1227 | atomic_inc(&gl->gl_lvb_count); |
| 1483 | 1228 | ||
| 1484 | gfs2_glmutex_unlock(gl); | ||
| 1485 | |||
| 1486 | return 0; | 1229 | return 0; |
| 1487 | } | 1230 | } |
| 1488 | 1231 | ||
| @@ -1497,17 +1240,13 @@ void gfs2_lvb_unhold(struct gfs2_glock *gl) | |||
| 1497 | struct gfs2_sbd *sdp = gl->gl_sbd; | 1240 | struct gfs2_sbd *sdp = gl->gl_sbd; |
| 1498 | 1241 | ||
| 1499 | gfs2_glock_hold(gl); | 1242 | gfs2_glock_hold(gl); |
| 1500 | gfs2_glmutex_lock(gl); | ||
| 1501 | |||
| 1502 | gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0); | 1243 | gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0); |
| 1503 | if (atomic_dec_and_test(&gl->gl_lvb_count)) { | 1244 | if (atomic_dec_and_test(&gl->gl_lvb_count)) { |
| 1504 | if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) | 1245 | if (sdp->sd_lockstruct.ls_ops->lm_unhold_lvb) |
| 1505 | sdp->sd_lockstruct.ls_ops->lm_unhold_lvb(gl->gl_lock, gl->gl_lvb); | 1246 | sdp->sd_lockstruct.ls_ops->lm_unhold_lvb(gl->gl_lock, gl->gl_lvb); |
| 1506 | gl->gl_lvb = NULL; | 1247 | gl->gl_lvb = NULL; |
| 1507 | gfs2_glock_put(gl); | 1248 | gfs2_glock_put(gl); |
| 1508 | } | 1249 | } |
| 1509 | |||
| 1510 | gfs2_glmutex_unlock(gl); | ||
| 1511 | gfs2_glock_put(gl); | 1250 | gfs2_glock_put(gl); |
| 1512 | } | 1251 | } |
| 1513 | 1252 | ||
| @@ -1527,7 +1266,9 @@ static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name, | |||
| 1527 | if (time_before(now, holdtime)) | 1266 | if (time_before(now, holdtime)) |
| 1528 | delay = holdtime - now; | 1267 | delay = holdtime - now; |
| 1529 | 1268 | ||
| 1269 | spin_lock(&gl->gl_spin); | ||
| 1530 | handle_callback(gl, state, 1, delay); | 1270 | handle_callback(gl, state, 1, delay); |
| 1271 | spin_unlock(&gl->gl_spin); | ||
| 1531 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) | 1272 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) |
| 1532 | gfs2_glock_put(gl); | 1273 | gfs2_glock_put(gl); |
| 1533 | } | 1274 | } |
| @@ -1568,7 +1309,8 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data) | |||
| 1568 | gl = gfs2_glock_find(sdp, &async->lc_name); | 1309 | gl = gfs2_glock_find(sdp, &async->lc_name); |
| 1569 | if (gfs2_assert_warn(sdp, gl)) | 1310 | if (gfs2_assert_warn(sdp, gl)) |
| 1570 | return; | 1311 | return; |
| 1571 | xmote_bh(gl, async->lc_ret); | 1312 | gl->gl_reply = async->lc_ret; |
| 1313 | set_bit(GLF_REPLY_PENDING, &gl->gl_flags); | ||
| 1572 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) | 1314 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) |
| 1573 | gfs2_glock_put(gl); | 1315 | gfs2_glock_put(gl); |
| 1574 | up_read(&gfs2_umount_flush_sem); | 1316 | up_read(&gfs2_umount_flush_sem); |
| @@ -1581,11 +1323,6 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data) | |||
| 1581 | wake_up_process(sdp->sd_recoverd_process); | 1323 | wake_up_process(sdp->sd_recoverd_process); |
| 1582 | return; | 1324 | return; |
| 1583 | 1325 | ||
| 1584 | case LM_CB_DROPLOCKS: | ||
| 1585 | gfs2_gl_hash_clear(sdp, NO_WAIT); | ||
| 1586 | gfs2_quota_scan(sdp); | ||
| 1587 | return; | ||
| 1588 | |||
| 1589 | default: | 1326 | default: |
| 1590 | gfs2_assert_warn(sdp, 0); | 1327 | gfs2_assert_warn(sdp, 0); |
| 1591 | return; | 1328 | return; |
| @@ -1646,6 +1383,7 @@ void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) | |||
| 1646 | void gfs2_reclaim_glock(struct gfs2_sbd *sdp) | 1383 | void gfs2_reclaim_glock(struct gfs2_sbd *sdp) |
| 1647 | { | 1384 | { |
| 1648 | struct gfs2_glock *gl; | 1385 | struct gfs2_glock *gl; |
| 1386 | int done_callback = 0; | ||
| 1649 | 1387 | ||
| 1650 | spin_lock(&sdp->sd_reclaim_lock); | 1388 | spin_lock(&sdp->sd_reclaim_lock); |
| 1651 | if (list_empty(&sdp->sd_reclaim_list)) { | 1389 | if (list_empty(&sdp->sd_reclaim_list)) { |
| @@ -1660,14 +1398,16 @@ void gfs2_reclaim_glock(struct gfs2_sbd *sdp) | |||
| 1660 | atomic_dec(&sdp->sd_reclaim_count); | 1398 | atomic_dec(&sdp->sd_reclaim_count); |
| 1661 | atomic_inc(&sdp->sd_reclaimed); | 1399 | atomic_inc(&sdp->sd_reclaimed); |
| 1662 | 1400 | ||
| 1663 | if (gfs2_glmutex_trylock(gl)) { | 1401 | spin_lock(&gl->gl_spin); |
| 1664 | if (list_empty(&gl->gl_holders) && | 1402 | if (find_first_holder(gl) == NULL && |
| 1665 | gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) | 1403 | gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) { |
| 1666 | handle_callback(gl, LM_ST_UNLOCKED, 0, 0); | 1404 | handle_callback(gl, LM_ST_UNLOCKED, 0, 0); |
| 1667 | gfs2_glmutex_unlock(gl); | 1405 | done_callback = 1; |
| 1668 | } | 1406 | } |
| 1669 | 1407 | spin_unlock(&gl->gl_spin); | |
| 1670 | gfs2_glock_put(gl); | 1408 | if (!done_callback || |
| 1409 | queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) | ||
| 1410 | gfs2_glock_put(gl); | ||
| 1671 | } | 1411 | } |
| 1672 | 1412 | ||
| 1673 | /** | 1413 | /** |
| @@ -1724,18 +1464,14 @@ static void scan_glock(struct gfs2_glock *gl) | |||
| 1724 | { | 1464 | { |
| 1725 | if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) | 1465 | if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) |
| 1726 | return; | 1466 | return; |
| 1467 | if (test_bit(GLF_LOCK, &gl->gl_flags)) | ||
| 1468 | return; | ||
| 1727 | 1469 | ||
| 1728 | if (gfs2_glmutex_trylock(gl)) { | 1470 | spin_lock(&gl->gl_spin); |
| 1729 | if (list_empty(&gl->gl_holders) && | 1471 | if (find_first_holder(gl) == NULL && |
| 1730 | gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) | 1472 | gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) |
| 1731 | goto out_schedule; | 1473 | gfs2_glock_schedule_for_reclaim(gl); |
| 1732 | gfs2_glmutex_unlock(gl); | 1474 | spin_unlock(&gl->gl_spin); |
| 1733 | } | ||
| 1734 | return; | ||
| 1735 | |||
| 1736 | out_schedule: | ||
| 1737 | gfs2_glmutex_unlock(gl); | ||
| 1738 | gfs2_glock_schedule_for_reclaim(gl); | ||
| 1739 | } | 1475 | } |
| 1740 | 1476 | ||
| 1741 | /** | 1477 | /** |
| @@ -1760,12 +1496,13 @@ static void clear_glock(struct gfs2_glock *gl) | |||
| 1760 | spin_unlock(&sdp->sd_reclaim_lock); | 1496 | spin_unlock(&sdp->sd_reclaim_lock); |
| 1761 | } | 1497 | } |
| 1762 | 1498 | ||
| 1763 | if (gfs2_glmutex_trylock(gl)) { | 1499 | spin_lock(&gl->gl_spin); |
| 1764 | if (list_empty(&gl->gl_holders) && | 1500 | if (find_first_holder(gl) == NULL && gl->gl_state != LM_ST_UNLOCKED) |
| 1765 | gl->gl_state != LM_ST_UNLOCKED) | 1501 | handle_callback(gl, LM_ST_UNLOCKED, 0, 0); |
| 1766 | handle_callback(gl, LM_ST_UNLOCKED, 0, 0); | 1502 | spin_unlock(&gl->gl_spin); |
| 1767 | gfs2_glmutex_unlock(gl); | 1503 | gfs2_glock_hold(gl); |
| 1768 | } | 1504 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) |
| 1505 | gfs2_glock_put(gl); | ||
| 1769 | } | 1506 | } |
| 1770 | 1507 | ||
| 1771 | /** | 1508 | /** |
| @@ -1773,11 +1510,10 @@ static void clear_glock(struct gfs2_glock *gl) | |||
| 1773 | * @sdp: the filesystem | 1510 | * @sdp: the filesystem |
| 1774 | * @wait: wait until it's all gone | 1511 | * @wait: wait until it's all gone |
| 1775 | * | 1512 | * |
| 1776 | * Called when unmounting the filesystem, or when inter-node lock manager | 1513 | * Called when unmounting the filesystem. |
| 1777 | * requests DROPLOCKS because it is running out of capacity. | ||
| 1778 | */ | 1514 | */ |
| 1779 | 1515 | ||
| 1780 | void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait) | 1516 | void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) |
| 1781 | { | 1517 | { |
| 1782 | unsigned long t; | 1518 | unsigned long t; |
| 1783 | unsigned int x; | 1519 | unsigned int x; |
| @@ -1792,7 +1528,7 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait) | |||
| 1792 | cont = 1; | 1528 | cont = 1; |
| 1793 | } | 1529 | } |
| 1794 | 1530 | ||
| 1795 | if (!wait || !cont) | 1531 | if (!cont) |
| 1796 | break; | 1532 | break; |
| 1797 | 1533 | ||
| 1798 | if (time_after_eq(jiffies, | 1534 | if (time_after_eq(jiffies, |
| @@ -1810,180 +1546,164 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait) | |||
| 1810 | } | 1546 | } |
| 1811 | } | 1547 | } |
| 1812 | 1548 | ||
| 1813 | /* | 1549 | static const char *state2str(unsigned state) |
| 1814 | * Diagnostic routines to help debug distributed deadlock | ||
| 1815 | */ | ||
| 1816 | |||
| 1817 | static void gfs2_print_symbol(struct glock_iter *gi, const char *fmt, | ||
| 1818 | unsigned long address) | ||
| 1819 | { | 1550 | { |
| 1820 | char buffer[KSYM_SYMBOL_LEN]; | 1551 | switch(state) { |
| 1821 | 1552 | case LM_ST_UNLOCKED: | |
| 1822 | sprint_symbol(buffer, address); | 1553 | return "UN"; |
| 1823 | print_dbg(gi, fmt, buffer); | 1554 | case LM_ST_SHARED: |
| 1555 | return "SH"; | ||
| 1556 | case LM_ST_DEFERRED: | ||
| 1557 | return "DF"; | ||
| 1558 | case LM_ST_EXCLUSIVE: | ||
| 1559 | return "EX"; | ||
| 1560 | } | ||
| 1561 | return "??"; | ||
| 1562 | } | ||
| 1563 | |||
| 1564 | static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags) | ||
| 1565 | { | ||
| 1566 | char *p = buf; | ||
| 1567 | if (flags & LM_FLAG_TRY) | ||
| 1568 | *p++ = 't'; | ||
| 1569 | if (flags & LM_FLAG_TRY_1CB) | ||
| 1570 | *p++ = 'T'; | ||
| 1571 | if (flags & LM_FLAG_NOEXP) | ||
| 1572 | *p++ = 'e'; | ||
| 1573 | if (flags & LM_FLAG_ANY) | ||
| 1574 | *p++ = 'a'; | ||
| 1575 | if (flags & LM_FLAG_PRIORITY) | ||
| 1576 | *p++ = 'p'; | ||
| 1577 | if (flags & GL_ASYNC) | ||
| 1578 | *p++ = 'a'; | ||
| 1579 | if (flags & GL_EXACT) | ||
| 1580 | *p++ = 'E'; | ||
| 1581 | if (flags & GL_ATIME) | ||
| 1582 | *p++ = 'a'; | ||
| 1583 | if (flags & GL_NOCACHE) | ||
| 1584 | *p++ = 'c'; | ||
| 1585 | if (test_bit(HIF_HOLDER, &iflags)) | ||
| 1586 | *p++ = 'H'; | ||
| 1587 | if (test_bit(HIF_WAIT, &iflags)) | ||
| 1588 | *p++ = 'W'; | ||
| 1589 | if (test_bit(HIF_FIRST, &iflags)) | ||
| 1590 | *p++ = 'F'; | ||
| 1591 | *p = 0; | ||
| 1592 | return buf; | ||
| 1824 | } | 1593 | } |
| 1825 | 1594 | ||
| 1826 | /** | 1595 | /** |
| 1827 | * dump_holder - print information about a glock holder | 1596 | * dump_holder - print information about a glock holder |
| 1828 | * @str: a string naming the type of holder | 1597 | * @seq: the seq_file struct |
| 1829 | * @gh: the glock holder | 1598 | * @gh: the glock holder |
| 1830 | * | 1599 | * |
| 1831 | * Returns: 0 on success, -ENOBUFS when we run out of space | 1600 | * Returns: 0 on success, -ENOBUFS when we run out of space |
| 1832 | */ | 1601 | */ |
| 1833 | 1602 | ||
| 1834 | static int dump_holder(struct glock_iter *gi, char *str, | 1603 | static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh) |
| 1835 | struct gfs2_holder *gh) | ||
| 1836 | { | 1604 | { |
| 1837 | unsigned int x; | 1605 | struct task_struct *gh_owner = NULL; |
| 1838 | struct task_struct *gh_owner; | 1606 | char buffer[KSYM_SYMBOL_LEN]; |
| 1607 | char flags_buf[32]; | ||
| 1839 | 1608 | ||
| 1840 | print_dbg(gi, " %s\n", str); | 1609 | sprint_symbol(buffer, gh->gh_ip); |
| 1841 | if (gh->gh_owner_pid) { | 1610 | if (gh->gh_owner_pid) |
| 1842 | print_dbg(gi, " owner = %ld ", | ||
| 1843 | (long)pid_nr(gh->gh_owner_pid)); | ||
| 1844 | gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID); | 1611 | gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID); |
| 1845 | if (gh_owner) | 1612 | gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %s\n", |
| 1846 | print_dbg(gi, "(%s)\n", gh_owner->comm); | 1613 | state2str(gh->gh_state), |
| 1847 | else | 1614 | hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags), |
| 1848 | print_dbg(gi, "(ended)\n"); | 1615 | gh->gh_error, |
| 1849 | } else | 1616 | gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1, |
| 1850 | print_dbg(gi, " owner = -1\n"); | 1617 | gh_owner ? gh_owner->comm : "(ended)", buffer); |
| 1851 | print_dbg(gi, " gh_state = %u\n", gh->gh_state); | ||
| 1852 | print_dbg(gi, " gh_flags ="); | ||
| 1853 | for (x = 0; x < 32; x++) | ||
| 1854 | if (gh->gh_flags & (1 << x)) | ||
| 1855 | print_dbg(gi, " %u", x); | ||
| 1856 | print_dbg(gi, " \n"); | ||
| 1857 | print_dbg(gi, " error = %d\n", gh->gh_error); | ||
| 1858 | print_dbg(gi, " gh_iflags ="); | ||
| 1859 | for (x = 0; x < 32; x++) | ||
| 1860 | if (test_bit(x, &gh->gh_iflags)) | ||
| 1861 | print_dbg(gi, " %u", x); | ||
| 1862 | print_dbg(gi, " \n"); | ||
| 1863 | gfs2_print_symbol(gi, " initialized at: %s\n", gh->gh_ip); | ||
| 1864 | |||
| 1865 | return 0; | 1618 | return 0; |
| 1866 | } | 1619 | } |
| 1867 | 1620 | ||
| 1868 | /** | 1621 | static const char *gflags2str(char *buf, const unsigned long *gflags) |
| 1869 | * dump_inode - print information about an inode | 1622 | { |
| 1870 | * @ip: the inode | 1623 | char *p = buf; |
| 1871 | * | 1624 | if (test_bit(GLF_LOCK, gflags)) |
| 1872 | * Returns: 0 on success, -ENOBUFS when we run out of space | 1625 | *p++ = 'l'; |
| 1873 | */ | 1626 | if (test_bit(GLF_STICKY, gflags)) |
| 1874 | 1627 | *p++ = 's'; | |
| 1875 | static int dump_inode(struct glock_iter *gi, struct gfs2_inode *ip) | 1628 | if (test_bit(GLF_DEMOTE, gflags)) |
| 1876 | { | 1629 | *p++ = 'D'; |
| 1877 | unsigned int x; | 1630 | if (test_bit(GLF_PENDING_DEMOTE, gflags)) |
| 1878 | 1631 | *p++ = 'd'; | |
| 1879 | print_dbg(gi, " Inode:\n"); | 1632 | if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags)) |
| 1880 | print_dbg(gi, " num = %llu/%llu\n", | 1633 | *p++ = 'p'; |
| 1881 | (unsigned long long)ip->i_no_formal_ino, | 1634 | if (test_bit(GLF_DIRTY, gflags)) |
| 1882 | (unsigned long long)ip->i_no_addr); | 1635 | *p++ = 'y'; |
| 1883 | print_dbg(gi, " type = %u\n", IF2DT(ip->i_inode.i_mode)); | 1636 | if (test_bit(GLF_LFLUSH, gflags)) |
| 1884 | print_dbg(gi, " i_flags ="); | 1637 | *p++ = 'f'; |
| 1885 | for (x = 0; x < 32; x++) | 1638 | if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags)) |
| 1886 | if (test_bit(x, &ip->i_flags)) | 1639 | *p++ = 'i'; |
| 1887 | print_dbg(gi, " %u", x); | 1640 | if (test_bit(GLF_REPLY_PENDING, gflags)) |
| 1888 | print_dbg(gi, " \n"); | 1641 | *p++ = 'r'; |
| 1889 | return 0; | 1642 | *p = 0; |
| 1643 | return buf; | ||
| 1890 | } | 1644 | } |
| 1891 | 1645 | ||
| 1892 | /** | 1646 | /** |
| 1893 | * dump_glock - print information about a glock | 1647 | * __dump_glock - print information about a glock |
| 1648 | * @seq: The seq_file struct | ||
| 1894 | * @gl: the glock | 1649 | * @gl: the glock |
| 1895 | * @count: where we are in the buffer | 1650 | * |
| 1651 | * The file format is as follows: | ||
| 1652 | * One line per object, capital letters are used to indicate objects | ||
| 1653 | * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented, | ||
| 1654 | * other objects are indented by a single space and follow the glock to | ||
| 1655 | * which they are related. Fields are indicated by lower case letters | ||
| 1656 | * followed by a colon and the field value, except for strings which are in | ||
| 1657 | * [] so that its possible to see if they are composed of spaces for | ||
| 1658 | * example. The field's are n = number (id of the object), f = flags, | ||
| 1659 | * t = type, s = state, r = refcount, e = error, p = pid. | ||
| 1896 | * | 1660 | * |
| 1897 | * Returns: 0 on success, -ENOBUFS when we run out of space | 1661 | * Returns: 0 on success, -ENOBUFS when we run out of space |
| 1898 | */ | 1662 | */ |
| 1899 | 1663 | ||
| 1900 | static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl) | 1664 | static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl) |
| 1901 | { | 1665 | { |
| 1902 | struct gfs2_holder *gh; | 1666 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
| 1903 | unsigned int x; | 1667 | unsigned long long dtime; |
| 1904 | int error = -ENOBUFS; | 1668 | const struct gfs2_holder *gh; |
| 1905 | struct task_struct *gl_owner; | 1669 | char gflags_buf[32]; |
| 1670 | int error = 0; | ||
| 1906 | 1671 | ||
| 1907 | spin_lock(&gl->gl_spin); | 1672 | dtime = jiffies - gl->gl_demote_time; |
| 1673 | dtime *= 1000000/HZ; /* demote time in uSec */ | ||
| 1674 | if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) | ||
| 1675 | dtime = 0; | ||
| 1676 | gfs2_print_dbg(seq, "G: s:%s n:%u/%llu f:%s t:%s d:%s/%llu l:%d a:%d r:%d\n", | ||
| 1677 | state2str(gl->gl_state), | ||
| 1678 | gl->gl_name.ln_type, | ||
| 1679 | (unsigned long long)gl->gl_name.ln_number, | ||
| 1680 | gflags2str(gflags_buf, &gl->gl_flags), | ||
| 1681 | state2str(gl->gl_target), | ||
| 1682 | state2str(gl->gl_demote_state), dtime, | ||
| 1683 | atomic_read(&gl->gl_lvb_count), | ||
| 1684 | atomic_read(&gl->gl_ail_count), | ||
| 1685 | atomic_read(&gl->gl_ref)); | ||
| 1908 | 1686 | ||
| 1909 | print_dbg(gi, "Glock 0x%p (%u, 0x%llx)\n", gl, gl->gl_name.ln_type, | ||
| 1910 | (unsigned long long)gl->gl_name.ln_number); | ||
| 1911 | print_dbg(gi, " gl_flags ="); | ||
| 1912 | for (x = 0; x < 32; x++) { | ||
| 1913 | if (test_bit(x, &gl->gl_flags)) | ||
| 1914 | print_dbg(gi, " %u", x); | ||
| 1915 | } | ||
| 1916 | if (!test_bit(GLF_LOCK, &gl->gl_flags)) | ||
| 1917 | print_dbg(gi, " (unlocked)"); | ||
| 1918 | print_dbg(gi, " \n"); | ||
| 1919 | print_dbg(gi, " gl_ref = %d\n", atomic_read(&gl->gl_ref)); | ||
| 1920 | print_dbg(gi, " gl_state = %u\n", gl->gl_state); | ||
| 1921 | if (gl->gl_owner_pid) { | ||
| 1922 | gl_owner = pid_task(gl->gl_owner_pid, PIDTYPE_PID); | ||
| 1923 | if (gl_owner) | ||
| 1924 | print_dbg(gi, " gl_owner = pid %d (%s)\n", | ||
| 1925 | pid_nr(gl->gl_owner_pid), gl_owner->comm); | ||
| 1926 | else | ||
| 1927 | print_dbg(gi, " gl_owner = %d (ended)\n", | ||
| 1928 | pid_nr(gl->gl_owner_pid)); | ||
| 1929 | } else | ||
| 1930 | print_dbg(gi, " gl_owner = -1\n"); | ||
| 1931 | print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip); | ||
| 1932 | print_dbg(gi, " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no"); | ||
| 1933 | print_dbg(gi, " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count)); | ||
| 1934 | print_dbg(gi, " object = %s\n", (gl->gl_object) ? "yes" : "no"); | ||
| 1935 | print_dbg(gi, " reclaim = %s\n", | ||
| 1936 | (list_empty(&gl->gl_reclaim)) ? "no" : "yes"); | ||
| 1937 | if (gl->gl_aspace) | ||
| 1938 | print_dbg(gi, " aspace = 0x%p nrpages = %lu\n", gl->gl_aspace, | ||
| 1939 | gl->gl_aspace->i_mapping->nrpages); | ||
| 1940 | else | ||
| 1941 | print_dbg(gi, " aspace = no\n"); | ||
| 1942 | print_dbg(gi, " ail = %d\n", atomic_read(&gl->gl_ail_count)); | ||
| 1943 | if (gl->gl_req_gh) { | ||
| 1944 | error = dump_holder(gi, "Request", gl->gl_req_gh); | ||
| 1945 | if (error) | ||
| 1946 | goto out; | ||
| 1947 | } | ||
| 1948 | list_for_each_entry(gh, &gl->gl_holders, gh_list) { | 1687 | list_for_each_entry(gh, &gl->gl_holders, gh_list) { |
| 1949 | error = dump_holder(gi, "Holder", gh); | 1688 | error = dump_holder(seq, gh); |
| 1950 | if (error) | 1689 | if (error) |
| 1951 | goto out; | 1690 | goto out; |
| 1952 | } | 1691 | } |
| 1953 | list_for_each_entry(gh, &gl->gl_waiters1, gh_list) { | 1692 | if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump) |
| 1954 | error = dump_holder(gi, "Waiter1", gh); | 1693 | error = glops->go_dump(seq, gl); |
| 1955 | if (error) | ||
| 1956 | goto out; | ||
| 1957 | } | ||
| 1958 | list_for_each_entry(gh, &gl->gl_waiters3, gh_list) { | ||
| 1959 | error = dump_holder(gi, "Waiter3", gh); | ||
| 1960 | if (error) | ||
| 1961 | goto out; | ||
| 1962 | } | ||
| 1963 | if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { | ||
| 1964 | print_dbg(gi, " Demotion req to state %u (%llu uS ago)\n", | ||
| 1965 | gl->gl_demote_state, (unsigned long long) | ||
| 1966 | (jiffies - gl->gl_demote_time)*(1000000/HZ)); | ||
| 1967 | } | ||
| 1968 | if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) { | ||
| 1969 | if (!test_bit(GLF_LOCK, &gl->gl_flags) && | ||
| 1970 | list_empty(&gl->gl_holders)) { | ||
| 1971 | error = dump_inode(gi, gl->gl_object); | ||
| 1972 | if (error) | ||
| 1973 | goto out; | ||
| 1974 | } else { | ||
| 1975 | error = -ENOBUFS; | ||
| 1976 | print_dbg(gi, " Inode: busy\n"); | ||
| 1977 | } | ||
| 1978 | } | ||
| 1979 | |||
| 1980 | error = 0; | ||
| 1981 | |||
| 1982 | out: | 1694 | out: |
| 1983 | spin_unlock(&gl->gl_spin); | ||
| 1984 | return error; | 1695 | return error; |
| 1985 | } | 1696 | } |
| 1986 | 1697 | ||
| 1698 | static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl) | ||
| 1699 | { | ||
| 1700 | int ret; | ||
| 1701 | spin_lock(&gl->gl_spin); | ||
| 1702 | ret = __dump_glock(seq, gl); | ||
| 1703 | spin_unlock(&gl->gl_spin); | ||
| 1704 | return ret; | ||
| 1705 | } | ||
| 1706 | |||
| 1987 | /** | 1707 | /** |
| 1988 | * gfs2_dump_lockstate - print out the current lockstate | 1708 | * gfs2_dump_lockstate - print out the current lockstate |
| 1989 | * @sdp: the filesystem | 1709 | * @sdp: the filesystem |
| @@ -2086,7 +1806,7 @@ void gfs2_glock_exit(void) | |||
| 2086 | module_param(scand_secs, uint, S_IRUGO|S_IWUSR); | 1806 | module_param(scand_secs, uint, S_IRUGO|S_IWUSR); |
| 2087 | MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs"); | 1807 | MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs"); |
| 2088 | 1808 | ||
| 2089 | static int gfs2_glock_iter_next(struct glock_iter *gi) | 1809 | static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi) |
| 2090 | { | 1810 | { |
| 2091 | struct gfs2_glock *gl; | 1811 | struct gfs2_glock *gl; |
| 2092 | 1812 | ||
| @@ -2104,7 +1824,7 @@ restart: | |||
| 2104 | gfs2_glock_put(gl); | 1824 | gfs2_glock_put(gl); |
| 2105 | if (gl && gi->gl == NULL) | 1825 | if (gl && gi->gl == NULL) |
| 2106 | gi->hash++; | 1826 | gi->hash++; |
| 2107 | while(gi->gl == NULL) { | 1827 | while (gi->gl == NULL) { |
| 2108 | if (gi->hash >= GFS2_GL_HASH_SIZE) | 1828 | if (gi->hash >= GFS2_GL_HASH_SIZE) |
| 2109 | return 1; | 1829 | return 1; |
| 2110 | read_lock(gl_lock_addr(gi->hash)); | 1830 | read_lock(gl_lock_addr(gi->hash)); |
| @@ -2122,58 +1842,34 @@ restart: | |||
| 2122 | return 0; | 1842 | return 0; |
| 2123 | } | 1843 | } |
| 2124 | 1844 | ||
| 2125 | static void gfs2_glock_iter_free(struct glock_iter *gi) | 1845 | static void gfs2_glock_iter_free(struct gfs2_glock_iter *gi) |
| 2126 | { | 1846 | { |
| 2127 | if (gi->gl) | 1847 | if (gi->gl) |
| 2128 | gfs2_glock_put(gi->gl); | 1848 | gfs2_glock_put(gi->gl); |
| 2129 | kfree(gi); | ||
| 2130 | } | ||
| 2131 | |||
| 2132 | static struct glock_iter *gfs2_glock_iter_init(struct gfs2_sbd *sdp) | ||
| 2133 | { | ||
| 2134 | struct glock_iter *gi; | ||
| 2135 | |||
| 2136 | gi = kmalloc(sizeof (*gi), GFP_KERNEL); | ||
| 2137 | if (!gi) | ||
| 2138 | return NULL; | ||
| 2139 | |||
| 2140 | gi->sdp = sdp; | ||
| 2141 | gi->hash = 0; | ||
| 2142 | gi->seq = NULL; | ||
| 2143 | gi->gl = NULL; | 1849 | gi->gl = NULL; |
| 2144 | memset(gi->string, 0, sizeof(gi->string)); | ||
| 2145 | |||
| 2146 | if (gfs2_glock_iter_next(gi)) { | ||
| 2147 | gfs2_glock_iter_free(gi); | ||
| 2148 | return NULL; | ||
| 2149 | } | ||
| 2150 | |||
| 2151 | return gi; | ||
| 2152 | } | 1850 | } |
| 2153 | 1851 | ||
| 2154 | static void *gfs2_glock_seq_start(struct seq_file *file, loff_t *pos) | 1852 | static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos) |
| 2155 | { | 1853 | { |
| 2156 | struct glock_iter *gi; | 1854 | struct gfs2_glock_iter *gi = seq->private; |
| 2157 | loff_t n = *pos; | 1855 | loff_t n = *pos; |
| 2158 | 1856 | ||
| 2159 | gi = gfs2_glock_iter_init(file->private); | 1857 | gi->hash = 0; |
| 2160 | if (!gi) | ||
| 2161 | return NULL; | ||
| 2162 | 1858 | ||
| 2163 | while(n--) { | 1859 | do { |
| 2164 | if (gfs2_glock_iter_next(gi)) { | 1860 | if (gfs2_glock_iter_next(gi)) { |
| 2165 | gfs2_glock_iter_free(gi); | 1861 | gfs2_glock_iter_free(gi); |
| 2166 | return NULL; | 1862 | return NULL; |
| 2167 | } | 1863 | } |
| 2168 | } | 1864 | } while (n--); |
| 2169 | 1865 | ||
| 2170 | return gi; | 1866 | return gi->gl; |
| 2171 | } | 1867 | } |
| 2172 | 1868 | ||
| 2173 | static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr, | 1869 | static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr, |
| 2174 | loff_t *pos) | 1870 | loff_t *pos) |
| 2175 | { | 1871 | { |
| 2176 | struct glock_iter *gi = iter_ptr; | 1872 | struct gfs2_glock_iter *gi = seq->private; |
| 2177 | 1873 | ||
| 2178 | (*pos)++; | 1874 | (*pos)++; |
| 2179 | 1875 | ||
| @@ -2182,24 +1878,18 @@ static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr, | |||
| 2182 | return NULL; | 1878 | return NULL; |
| 2183 | } | 1879 | } |
| 2184 | 1880 | ||
| 2185 | return gi; | 1881 | return gi->gl; |
| 2186 | } | 1882 | } |
| 2187 | 1883 | ||
| 2188 | static void gfs2_glock_seq_stop(struct seq_file *file, void *iter_ptr) | 1884 | static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr) |
| 2189 | { | 1885 | { |
| 2190 | struct glock_iter *gi = iter_ptr; | 1886 | struct gfs2_glock_iter *gi = seq->private; |
| 2191 | if (gi) | 1887 | gfs2_glock_iter_free(gi); |
| 2192 | gfs2_glock_iter_free(gi); | ||
| 2193 | } | 1888 | } |
| 2194 | 1889 | ||
| 2195 | static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr) | 1890 | static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr) |
| 2196 | { | 1891 | { |
| 2197 | struct glock_iter *gi = iter_ptr; | 1892 | return dump_glock(seq, iter_ptr); |
| 2198 | |||
| 2199 | gi->seq = file; | ||
| 2200 | dump_glock(gi, gi->gl); | ||
| 2201 | |||
| 2202 | return 0; | ||
| 2203 | } | 1893 | } |
| 2204 | 1894 | ||
| 2205 | static const struct seq_operations gfs2_glock_seq_ops = { | 1895 | static const struct seq_operations gfs2_glock_seq_ops = { |
| @@ -2211,17 +1901,14 @@ static const struct seq_operations gfs2_glock_seq_ops = { | |||
| 2211 | 1901 | ||
| 2212 | static int gfs2_debugfs_open(struct inode *inode, struct file *file) | 1902 | static int gfs2_debugfs_open(struct inode *inode, struct file *file) |
| 2213 | { | 1903 | { |
| 2214 | struct seq_file *seq; | 1904 | int ret = seq_open_private(file, &gfs2_glock_seq_ops, |
| 2215 | int ret; | 1905 | sizeof(struct gfs2_glock_iter)); |
| 2216 | 1906 | if (ret == 0) { | |
| 2217 | ret = seq_open(file, &gfs2_glock_seq_ops); | 1907 | struct seq_file *seq = file->private_data; |
| 2218 | if (ret) | 1908 | struct gfs2_glock_iter *gi = seq->private; |
| 2219 | return ret; | 1909 | gi->sdp = inode->i_private; |
| 2220 | 1910 | } | |
| 2221 | seq = file->private_data; | 1911 | return ret; |
| 2222 | seq->private = inode->i_private; | ||
| 2223 | |||
| 2224 | return 0; | ||
| 2225 | } | 1912 | } |
| 2226 | 1913 | ||
| 2227 | static const struct file_operations gfs2_debug_fops = { | 1914 | static const struct file_operations gfs2_debug_fops = { |
| @@ -2229,7 +1916,7 @@ static const struct file_operations gfs2_debug_fops = { | |||
| 2229 | .open = gfs2_debugfs_open, | 1916 | .open = gfs2_debugfs_open, |
| 2230 | .read = seq_read, | 1917 | .read = seq_read, |
| 2231 | .llseek = seq_lseek, | 1918 | .llseek = seq_lseek, |
| 2232 | .release = seq_release | 1919 | .release = seq_release_private, |
| 2233 | }; | 1920 | }; |
| 2234 | 1921 | ||
| 2235 | int gfs2_create_debugfs_file(struct gfs2_sbd *sdp) | 1922 | int gfs2_create_debugfs_file(struct gfs2_sbd *sdp) |
