diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-03-28 17:02:03 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-03-28 17:02:03 -0400 |
commit | e5c465f5d957ab581bc79d3ce981281fe73021a3 (patch) | |
tree | d37f808b94d0cb46e88108d422da2c22deb721b7 | |
parent | d0a9af809124c432297a7c4a3bcf98cbfdb4036a (diff) | |
parent | 2f5bf1f2d061dea5146aa283685ce2b00cea2f3d (diff) |
Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mfasheh/ocfs2
* 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mfasheh/ocfs2:
ocfs2_dlm: Check for migrateable lockres in dlm_empty_lockres()
ocfs2_dlm: Fix lockres ref counting bug
-rw-r--r-- | fs/ocfs2/dlm/dlmdomain.c | 8 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmmaster.c | 99 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmthread.c | 10 |
3 files changed, 73 insertions, 44 deletions
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 6087c4749fee..c558442a0b44 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c | |||
@@ -138,8 +138,10 @@ static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm); | |||
138 | 138 | ||
139 | void __dlm_unhash_lockres(struct dlm_lock_resource *lockres) | 139 | void __dlm_unhash_lockres(struct dlm_lock_resource *lockres) |
140 | { | 140 | { |
141 | hlist_del_init(&lockres->hash_node); | 141 | if (!hlist_unhashed(&lockres->hash_node)) { |
142 | dlm_lockres_put(lockres); | 142 | hlist_del_init(&lockres->hash_node); |
143 | dlm_lockres_put(lockres); | ||
144 | } | ||
143 | } | 145 | } |
144 | 146 | ||
145 | void __dlm_insert_lockres(struct dlm_ctxt *dlm, | 147 | void __dlm_insert_lockres(struct dlm_ctxt *dlm, |
@@ -655,6 +657,8 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm) | |||
655 | dlm_kick_thread(dlm, NULL); | 657 | dlm_kick_thread(dlm, NULL); |
656 | 658 | ||
657 | while (dlm_migrate_all_locks(dlm)) { | 659 | while (dlm_migrate_all_locks(dlm)) { |
660 | /* Give dlm_thread time to purge the lockres' */ | ||
661 | msleep(500); | ||
658 | mlog(0, "%s: more migration to do\n", dlm->name); | 662 | mlog(0, "%s: more migration to do\n", dlm->name); |
659 | } | 663 | } |
660 | dlm_mark_domain_leaving(dlm); | 664 | dlm_mark_domain_leaving(dlm); |
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 9229e04362f6..6edffca99d98 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c | |||
@@ -2424,6 +2424,57 @@ static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data) | |||
2424 | dlm_lockres_put(res); | 2424 | dlm_lockres_put(res); |
2425 | } | 2425 | } |
2426 | 2426 | ||
2427 | /* Checks whether the lockres can be migrated. Returns 0 if yes, < 0 | ||
2428 | * if not. If 0, numlocks is set to the number of locks in the lockres. | ||
2429 | */ | ||
2430 | static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm, | ||
2431 | struct dlm_lock_resource *res, | ||
2432 | int *numlocks) | ||
2433 | { | ||
2434 | int ret; | ||
2435 | int i; | ||
2436 | int count = 0; | ||
2437 | struct list_head *queue, *iter; | ||
2438 | struct dlm_lock *lock; | ||
2439 | |||
2440 | assert_spin_locked(&res->spinlock); | ||
2441 | |||
2442 | ret = -EINVAL; | ||
2443 | if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { | ||
2444 | mlog(0, "cannot migrate lockres with unknown owner!\n"); | ||
2445 | goto leave; | ||
2446 | } | ||
2447 | |||
2448 | if (res->owner != dlm->node_num) { | ||
2449 | mlog(0, "cannot migrate lockres this node doesn't own!\n"); | ||
2450 | goto leave; | ||
2451 | } | ||
2452 | |||
2453 | ret = 0; | ||
2454 | queue = &res->granted; | ||
2455 | for (i = 0; i < 3; i++) { | ||
2456 | list_for_each(iter, queue) { | ||
2457 | lock = list_entry(iter, struct dlm_lock, list); | ||
2458 | ++count; | ||
2459 | if (lock->ml.node == dlm->node_num) { | ||
2460 | mlog(0, "found a lock owned by this node still " | ||
2461 | "on the %s queue! will not migrate this " | ||
2462 | "lockres\n", (i == 0 ? "granted" : | ||
2463 | (i == 1 ? "converting" : | ||
2464 | "blocked"))); | ||
2465 | ret = -ENOTEMPTY; | ||
2466 | goto leave; | ||
2467 | } | ||
2468 | } | ||
2469 | queue++; | ||
2470 | } | ||
2471 | |||
2472 | *numlocks = count; | ||
2473 | mlog(0, "migrateable lockres having %d locks\n", *numlocks); | ||
2474 | |||
2475 | leave: | ||
2476 | return ret; | ||
2477 | } | ||
2427 | 2478 | ||
2428 | /* | 2479 | /* |
2429 | * DLM_MIGRATE_LOCKRES | 2480 | * DLM_MIGRATE_LOCKRES |
@@ -2437,14 +2488,12 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm, | |||
2437 | struct dlm_master_list_entry *mle = NULL; | 2488 | struct dlm_master_list_entry *mle = NULL; |
2438 | struct dlm_master_list_entry *oldmle = NULL; | 2489 | struct dlm_master_list_entry *oldmle = NULL; |
2439 | struct dlm_migratable_lockres *mres = NULL; | 2490 | struct dlm_migratable_lockres *mres = NULL; |
2440 | int ret = -EINVAL; | 2491 | int ret = 0; |
2441 | const char *name; | 2492 | const char *name; |
2442 | unsigned int namelen; | 2493 | unsigned int namelen; |
2443 | int mle_added = 0; | 2494 | int mle_added = 0; |
2444 | struct list_head *queue, *iter; | 2495 | int numlocks; |
2445 | int i; | 2496 | int wake = 0; |
2446 | struct dlm_lock *lock; | ||
2447 | int empty = 1, wake = 0; | ||
2448 | 2497 | ||
2449 | if (!dlm_grab(dlm)) | 2498 | if (!dlm_grab(dlm)) |
2450 | return -EINVAL; | 2499 | return -EINVAL; |
@@ -2458,42 +2507,16 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm, | |||
2458 | * ensure this lockres is a proper candidate for migration | 2507 | * ensure this lockres is a proper candidate for migration |
2459 | */ | 2508 | */ |
2460 | spin_lock(&res->spinlock); | 2509 | spin_lock(&res->spinlock); |
2461 | if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { | 2510 | ret = dlm_is_lockres_migrateable(dlm, res, &numlocks); |
2462 | mlog(0, "cannot migrate lockres with unknown owner!\n"); | 2511 | if (ret < 0) { |
2463 | spin_unlock(&res->spinlock); | ||
2464 | goto leave; | ||
2465 | } | ||
2466 | if (res->owner != dlm->node_num) { | ||
2467 | mlog(0, "cannot migrate lockres this node doesn't own!\n"); | ||
2468 | spin_unlock(&res->spinlock); | 2512 | spin_unlock(&res->spinlock); |
2469 | goto leave; | 2513 | goto leave; |
2470 | } | 2514 | } |
2471 | mlog(0, "checking queues...\n"); | ||
2472 | queue = &res->granted; | ||
2473 | for (i=0; i<3; i++) { | ||
2474 | list_for_each(iter, queue) { | ||
2475 | lock = list_entry (iter, struct dlm_lock, list); | ||
2476 | empty = 0; | ||
2477 | if (lock->ml.node == dlm->node_num) { | ||
2478 | mlog(0, "found a lock owned by this node " | ||
2479 | "still on the %s queue! will not " | ||
2480 | "migrate this lockres\n", | ||
2481 | i==0 ? "granted" : | ||
2482 | (i==1 ? "converting" : "blocked")); | ||
2483 | spin_unlock(&res->spinlock); | ||
2484 | ret = -ENOTEMPTY; | ||
2485 | goto leave; | ||
2486 | } | ||
2487 | } | ||
2488 | queue++; | ||
2489 | } | ||
2490 | mlog(0, "all locks on this lockres are nonlocal. continuing\n"); | ||
2491 | spin_unlock(&res->spinlock); | 2515 | spin_unlock(&res->spinlock); |
2492 | 2516 | ||
2493 | /* no work to do */ | 2517 | /* no work to do */ |
2494 | if (empty) { | 2518 | if (numlocks == 0) { |
2495 | mlog(0, "no locks were found on this lockres! done!\n"); | 2519 | mlog(0, "no locks were found on this lockres! done!\n"); |
2496 | ret = 0; | ||
2497 | goto leave; | 2520 | goto leave; |
2498 | } | 2521 | } |
2499 | 2522 | ||
@@ -2729,6 +2752,7 @@ int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) | |||
2729 | { | 2752 | { |
2730 | int ret; | 2753 | int ret; |
2731 | int lock_dropped = 0; | 2754 | int lock_dropped = 0; |
2755 | int numlocks; | ||
2732 | 2756 | ||
2733 | spin_lock(&res->spinlock); | 2757 | spin_lock(&res->spinlock); |
2734 | if (res->owner != dlm->node_num) { | 2758 | if (res->owner != dlm->node_num) { |
@@ -2740,6 +2764,13 @@ int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) | |||
2740 | spin_unlock(&res->spinlock); | 2764 | spin_unlock(&res->spinlock); |
2741 | goto leave; | 2765 | goto leave; |
2742 | } | 2766 | } |
2767 | |||
2768 | /* No need to migrate a lockres having no locks */ | ||
2769 | ret = dlm_is_lockres_migrateable(dlm, res, &numlocks); | ||
2770 | if (ret >= 0 && numlocks == 0) { | ||
2771 | spin_unlock(&res->spinlock); | ||
2772 | goto leave; | ||
2773 | } | ||
2743 | spin_unlock(&res->spinlock); | 2774 | spin_unlock(&res->spinlock); |
2744 | 2775 | ||
2745 | /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */ | 2776 | /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */ |
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c index 6421a8fae1de..2b264c6ba039 100644 --- a/fs/ocfs2/dlm/dlmthread.c +++ b/fs/ocfs2/dlm/dlmthread.c | |||
@@ -256,20 +256,14 @@ static void dlm_run_purge_list(struct dlm_ctxt *dlm, | |||
256 | break; | 256 | break; |
257 | } | 257 | } |
258 | 258 | ||
259 | mlog(0, "removing lockres %.*s:%p from purgelist\n", | 259 | dlm_lockres_get(lockres); |
260 | lockres->lockname.len, lockres->lockname.name, lockres); | ||
261 | list_del_init(&lockres->purge); | ||
262 | dlm_lockres_put(lockres); | ||
263 | dlm->purge_count--; | ||
264 | 260 | ||
265 | /* This may drop and reacquire the dlm spinlock if it | 261 | /* This may drop and reacquire the dlm spinlock if it |
266 | * has to do migration. */ | 262 | * has to do migration. */ |
267 | mlog(0, "calling dlm_purge_lockres!\n"); | ||
268 | dlm_lockres_get(lockres); | ||
269 | if (dlm_purge_lockres(dlm, lockres)) | 263 | if (dlm_purge_lockres(dlm, lockres)) |
270 | BUG(); | 264 | BUG(); |
265 | |||
271 | dlm_lockres_put(lockres); | 266 | dlm_lockres_put(lockres); |
272 | mlog(0, "DONE calling dlm_purge_lockres!\n"); | ||
273 | 267 | ||
274 | /* Avoid adding any scheduling latencies */ | 268 | /* Avoid adding any scheduling latencies */ |
275 | cond_resched_lock(&dlm->spinlock); | 269 | cond_resched_lock(&dlm->spinlock); |