diff options
author | Sunil Mushran <sunil.mushran@oracle.com> | 2008-03-01 17:04:21 -0500 |
---|---|---|
committer | Mark Fasheh <mark.fasheh@oracle.com> | 2008-03-10 18:14:11 -0400 |
commit | 52987e2ab456c1a828046494aac53819b1454341 (patch) | |
tree | c6ce00323a57c277fefd271f17d0542f35fc3055 /fs/ocfs2/dlm/dlmrecovery.c | |
parent | 2c5c54aca9d0263f81bd4886232835ba31f7635a (diff) |
ocfs2/dlm: Add missing dlm_lockres_put()s in migration path
During migration, the recovery master node may be asked to master a lockres
it may not know about. In that case, it would not only have to create a
lockres and add it to the hash, but also remember to to do the _put_
corresponding to the kref_init in dlm_init_lockres(), as soon as the migration
is completed. Yes, we don't wait for the dlm_purge_lockres() to do that
matching put. Note the ref added for it being in the hash protects the lockres
from being freed prematurely.
This patch adds that missing put, as described above, to plug a memleak.
Signed-off-by: Sunil Mushran <sunil.mushran@oracle.com>
Signed-off-by: Joel Becker <joel.becker@oracle.com>
Signed-off-by: Mark Fasheh <mark.fasheh@oracle.com>
Diffstat (limited to 'fs/ocfs2/dlm/dlmrecovery.c')
-rw-r--r-- | fs/ocfs2/dlm/dlmrecovery.c | 40 |
1 files changed, 34 insertions, 6 deletions
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index db17727594ab..f94683552420 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c | |||
@@ -1327,6 +1327,7 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data, | |||
1327 | (struct dlm_migratable_lockres *)msg->buf; | 1327 | (struct dlm_migratable_lockres *)msg->buf; |
1328 | int ret = 0; | 1328 | int ret = 0; |
1329 | u8 real_master; | 1329 | u8 real_master; |
1330 | u8 extra_refs = 0; | ||
1330 | char *buf = NULL; | 1331 | char *buf = NULL; |
1331 | struct dlm_work_item *item = NULL; | 1332 | struct dlm_work_item *item = NULL; |
1332 | struct dlm_lock_resource *res = NULL; | 1333 | struct dlm_lock_resource *res = NULL; |
@@ -1404,16 +1405,28 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data, | |||
1404 | __dlm_insert_lockres(dlm, res); | 1405 | __dlm_insert_lockres(dlm, res); |
1405 | spin_unlock(&dlm->spinlock); | 1406 | spin_unlock(&dlm->spinlock); |
1406 | 1407 | ||
1408 | /* Add an extra ref for this lock-less lockres lest the | ||
1409 | * dlm_thread purges it before we get the chance to add | ||
1410 | * locks to it */ | ||
1411 | dlm_lockres_get(res); | ||
1412 | |||
1413 | /* There are three refs that need to be put. | ||
1414 | * 1. Taken above. | ||
1415 | * 2. kref_init in dlm_new_lockres()->dlm_init_lockres(). | ||
1416 | * 3. dlm_lookup_lockres() | ||
1417 | * The first one is handled at the end of this function. The | ||
1418 | * other two are handled in the worker thread after locks have | ||
1419 | * been attached. Yes, we don't wait for purge time to match | ||
1420 | * kref_init. The lockres will still have atleast one ref | ||
1421 | * added because it is in the hash __dlm_insert_lockres() */ | ||
1422 | extra_refs++; | ||
1423 | |||
1407 | /* now that the new lockres is inserted, | 1424 | /* now that the new lockres is inserted, |
1408 | * make it usable by other processes */ | 1425 | * make it usable by other processes */ |
1409 | spin_lock(&res->spinlock); | 1426 | spin_lock(&res->spinlock); |
1410 | res->state &= ~DLM_LOCK_RES_IN_PROGRESS; | 1427 | res->state &= ~DLM_LOCK_RES_IN_PROGRESS; |
1411 | spin_unlock(&res->spinlock); | 1428 | spin_unlock(&res->spinlock); |
1412 | wake_up(&res->wq); | 1429 | wake_up(&res->wq); |
1413 | |||
1414 | /* add an extra ref for just-allocated lockres | ||
1415 | * otherwise the lockres will be purged immediately */ | ||
1416 | dlm_lockres_get(res); | ||
1417 | } | 1430 | } |
1418 | 1431 | ||
1419 | /* at this point we have allocated everything we need, | 1432 | /* at this point we have allocated everything we need, |
@@ -1443,12 +1456,17 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data, | |||
1443 | dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf); | 1456 | dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf); |
1444 | item->u.ml.lockres = res; /* already have a ref */ | 1457 | item->u.ml.lockres = res; /* already have a ref */ |
1445 | item->u.ml.real_master = real_master; | 1458 | item->u.ml.real_master = real_master; |
1459 | item->u.ml.extra_ref = extra_refs; | ||
1446 | spin_lock(&dlm->work_lock); | 1460 | spin_lock(&dlm->work_lock); |
1447 | list_add_tail(&item->list, &dlm->work_list); | 1461 | list_add_tail(&item->list, &dlm->work_list); |
1448 | spin_unlock(&dlm->work_lock); | 1462 | spin_unlock(&dlm->work_lock); |
1449 | queue_work(dlm->dlm_worker, &dlm->dispatched_work); | 1463 | queue_work(dlm->dlm_worker, &dlm->dispatched_work); |
1450 | 1464 | ||
1451 | leave: | 1465 | leave: |
1466 | /* One extra ref taken needs to be put here */ | ||
1467 | if (extra_refs) | ||
1468 | dlm_lockres_put(res); | ||
1469 | |||
1452 | dlm_put(dlm); | 1470 | dlm_put(dlm); |
1453 | if (ret < 0) { | 1471 | if (ret < 0) { |
1454 | if (buf) | 1472 | if (buf) |
@@ -1464,17 +1482,19 @@ leave: | |||
1464 | 1482 | ||
1465 | static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data) | 1483 | static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data) |
1466 | { | 1484 | { |
1467 | struct dlm_ctxt *dlm = data; | 1485 | struct dlm_ctxt *dlm; |
1468 | struct dlm_migratable_lockres *mres; | 1486 | struct dlm_migratable_lockres *mres; |
1469 | int ret = 0; | 1487 | int ret = 0; |
1470 | struct dlm_lock_resource *res; | 1488 | struct dlm_lock_resource *res; |
1471 | u8 real_master; | 1489 | u8 real_master; |
1490 | u8 extra_ref; | ||
1472 | 1491 | ||
1473 | dlm = item->dlm; | 1492 | dlm = item->dlm; |
1474 | mres = (struct dlm_migratable_lockres *)data; | 1493 | mres = (struct dlm_migratable_lockres *)data; |
1475 | 1494 | ||
1476 | res = item->u.ml.lockres; | 1495 | res = item->u.ml.lockres; |
1477 | real_master = item->u.ml.real_master; | 1496 | real_master = item->u.ml.real_master; |
1497 | extra_ref = item->u.ml.extra_ref; | ||
1478 | 1498 | ||
1479 | if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { | 1499 | if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { |
1480 | /* this case is super-rare. only occurs if | 1500 | /* this case is super-rare. only occurs if |
@@ -1517,6 +1537,12 @@ again: | |||
1517 | } | 1537 | } |
1518 | 1538 | ||
1519 | leave: | 1539 | leave: |
1540 | /* See comment in dlm_mig_lockres_handler() */ | ||
1541 | if (res) { | ||
1542 | if (extra_ref) | ||
1543 | dlm_lockres_put(res); | ||
1544 | dlm_lockres_put(res); | ||
1545 | } | ||
1520 | kfree(data); | 1546 | kfree(data); |
1521 | mlog_exit(ret); | 1547 | mlog_exit(ret); |
1522 | } | 1548 | } |
@@ -1644,7 +1670,8 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data, | |||
1644 | /* retry!? */ | 1670 | /* retry!? */ |
1645 | BUG(); | 1671 | BUG(); |
1646 | } | 1672 | } |
1647 | } | 1673 | } else /* put.. incase we are not the master */ |
1674 | dlm_lockres_put(res); | ||
1648 | spin_unlock(&res->spinlock); | 1675 | spin_unlock(&res->spinlock); |
1649 | } | 1676 | } |
1650 | spin_unlock(&dlm->spinlock); | 1677 | spin_unlock(&dlm->spinlock); |
@@ -1921,6 +1948,7 @@ void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm, | |||
1921 | "Recovering res %s:%.*s, is already on recovery list!\n", | 1948 | "Recovering res %s:%.*s, is already on recovery list!\n", |
1922 | dlm->name, res->lockname.len, res->lockname.name); | 1949 | dlm->name, res->lockname.len, res->lockname.name); |
1923 | list_del_init(&res->recovering); | 1950 | list_del_init(&res->recovering); |
1951 | dlm_lockres_put(res); | ||
1924 | } | 1952 | } |
1925 | /* We need to hold a reference while on the recovery list */ | 1953 | /* We need to hold a reference while on the recovery list */ |
1926 | dlm_lockres_get(res); | 1954 | dlm_lockres_get(res); |