diff options
author | David Woodhouse <dwmw2@infradead.org> | 2007-07-11 09:55:48 -0400 |
---|---|---|
committer | David Woodhouse <dwmw2@infradead.org> | 2007-07-11 09:55:48 -0400 |
commit | db1b39d8b860e3716620c225bc86e0ec41764e34 (patch) | |
tree | 8739074db733ef767400ea92cfbfed9352ddb92d /fs/gfs2/glock.c | |
parent | a6bc432e296dfa1f05d4b586ca5ca3085a2d42d7 (diff) | |
parent | 4eb6bf6bfb580afaf1e1a1d30cba17a078530cf4 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r-- | fs/gfs2/glock.c | 123 |
1 files changed, 58 insertions, 65 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 1815429a2978..3f0974e1afef 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -422,11 +422,11 @@ void gfs2_holder_uninit(struct gfs2_holder *gh) | |||
422 | static void gfs2_holder_wake(struct gfs2_holder *gh) | 422 | static void gfs2_holder_wake(struct gfs2_holder *gh) |
423 | { | 423 | { |
424 | clear_bit(HIF_WAIT, &gh->gh_iflags); | 424 | clear_bit(HIF_WAIT, &gh->gh_iflags); |
425 | smp_mb(); | 425 | smp_mb__after_clear_bit(); |
426 | wake_up_bit(&gh->gh_iflags, HIF_WAIT); | 426 | wake_up_bit(&gh->gh_iflags, HIF_WAIT); |
427 | } | 427 | } |
428 | 428 | ||
429 | static int holder_wait(void *word) | 429 | static int just_schedule(void *word) |
430 | { | 430 | { |
431 | schedule(); | 431 | schedule(); |
432 | return 0; | 432 | return 0; |
@@ -435,7 +435,20 @@ static int holder_wait(void *word) | |||
435 | static void wait_on_holder(struct gfs2_holder *gh) | 435 | static void wait_on_holder(struct gfs2_holder *gh) |
436 | { | 436 | { |
437 | might_sleep(); | 437 | might_sleep(); |
438 | wait_on_bit(&gh->gh_iflags, HIF_WAIT, holder_wait, TASK_UNINTERRUPTIBLE); | 438 | wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE); |
439 | } | ||
440 | |||
441 | static void gfs2_demote_wake(struct gfs2_glock *gl) | ||
442 | { | ||
443 | clear_bit(GLF_DEMOTE, &gl->gl_flags); | ||
444 | smp_mb__after_clear_bit(); | ||
445 | wake_up_bit(&gl->gl_flags, GLF_DEMOTE); | ||
446 | } | ||
447 | |||
448 | static void wait_on_demote(struct gfs2_glock *gl) | ||
449 | { | ||
450 | might_sleep(); | ||
451 | wait_on_bit(&gl->gl_flags, GLF_DEMOTE, just_schedule, TASK_UNINTERRUPTIBLE); | ||
439 | } | 452 | } |
440 | 453 | ||
441 | /** | 454 | /** |
@@ -528,7 +541,7 @@ static int rq_demote(struct gfs2_glock *gl) | |||
528 | 541 | ||
529 | if (gl->gl_state == gl->gl_demote_state || | 542 | if (gl->gl_state == gl->gl_demote_state || |
530 | gl->gl_state == LM_ST_UNLOCKED) { | 543 | gl->gl_state == LM_ST_UNLOCKED) { |
531 | clear_bit(GLF_DEMOTE, &gl->gl_flags); | 544 | gfs2_demote_wake(gl); |
532 | return 0; | 545 | return 0; |
533 | } | 546 | } |
534 | set_bit(GLF_LOCK, &gl->gl_flags); | 547 | set_bit(GLF_LOCK, &gl->gl_flags); |
@@ -666,12 +679,22 @@ static void gfs2_glmutex_unlock(struct gfs2_glock *gl) | |||
666 | * practise: LM_ST_SHARED and LM_ST_UNLOCKED | 679 | * practise: LM_ST_SHARED and LM_ST_UNLOCKED |
667 | */ | 680 | */ |
668 | 681 | ||
669 | static void handle_callback(struct gfs2_glock *gl, unsigned int state) | 682 | static void handle_callback(struct gfs2_glock *gl, unsigned int state, int remote) |
670 | { | 683 | { |
671 | spin_lock(&gl->gl_spin); | 684 | spin_lock(&gl->gl_spin); |
672 | if (test_and_set_bit(GLF_DEMOTE, &gl->gl_flags) == 0) { | 685 | if (test_and_set_bit(GLF_DEMOTE, &gl->gl_flags) == 0) { |
673 | gl->gl_demote_state = state; | 686 | gl->gl_demote_state = state; |
674 | gl->gl_demote_time = jiffies; | 687 | gl->gl_demote_time = jiffies; |
688 | if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN && | ||
689 | gl->gl_object) { | ||
690 | struct inode *inode = igrab(gl->gl_object); | ||
691 | spin_unlock(&gl->gl_spin); | ||
692 | if (inode) { | ||
693 | d_prune_aliases(inode); | ||
694 | iput(inode); | ||
695 | } | ||
696 | return; | ||
697 | } | ||
675 | } else if (gl->gl_demote_state != LM_ST_UNLOCKED) { | 698 | } else if (gl->gl_demote_state != LM_ST_UNLOCKED) { |
676 | gl->gl_demote_state = state; | 699 | gl->gl_demote_state = state; |
677 | } | 700 | } |
@@ -740,7 +763,7 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret) | |||
740 | if (ret & LM_OUT_CANCELED) | 763 | if (ret & LM_OUT_CANCELED) |
741 | op_done = 0; | 764 | op_done = 0; |
742 | else | 765 | else |
743 | clear_bit(GLF_DEMOTE, &gl->gl_flags); | 766 | gfs2_demote_wake(gl); |
744 | } else { | 767 | } else { |
745 | spin_lock(&gl->gl_spin); | 768 | spin_lock(&gl->gl_spin); |
746 | list_del_init(&gh->gh_list); | 769 | list_del_init(&gh->gh_list); |
@@ -848,7 +871,7 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret) | |||
848 | gfs2_assert_warn(sdp, !ret); | 871 | gfs2_assert_warn(sdp, !ret); |
849 | 872 | ||
850 | state_change(gl, LM_ST_UNLOCKED); | 873 | state_change(gl, LM_ST_UNLOCKED); |
851 | clear_bit(GLF_DEMOTE, &gl->gl_flags); | 874 | gfs2_demote_wake(gl); |
852 | 875 | ||
853 | if (glops->go_inval) | 876 | if (glops->go_inval) |
854 | glops->go_inval(gl, DIO_METADATA); | 877 | glops->go_inval(gl, DIO_METADATA); |
@@ -1174,7 +1197,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh) | |||
1174 | const struct gfs2_glock_operations *glops = gl->gl_ops; | 1197 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
1175 | 1198 | ||
1176 | if (gh->gh_flags & GL_NOCACHE) | 1199 | if (gh->gh_flags & GL_NOCACHE) |
1177 | handle_callback(gl, LM_ST_UNLOCKED); | 1200 | handle_callback(gl, LM_ST_UNLOCKED, 0); |
1178 | 1201 | ||
1179 | gfs2_glmutex_lock(gl); | 1202 | gfs2_glmutex_lock(gl); |
1180 | 1203 | ||
@@ -1196,6 +1219,13 @@ void gfs2_glock_dq(struct gfs2_holder *gh) | |||
1196 | spin_unlock(&gl->gl_spin); | 1219 | spin_unlock(&gl->gl_spin); |
1197 | } | 1220 | } |
1198 | 1221 | ||
1222 | void gfs2_glock_dq_wait(struct gfs2_holder *gh) | ||
1223 | { | ||
1224 | struct gfs2_glock *gl = gh->gh_gl; | ||
1225 | gfs2_glock_dq(gh); | ||
1226 | wait_on_demote(gl); | ||
1227 | } | ||
1228 | |||
1199 | /** | 1229 | /** |
1200 | * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it | 1230 | * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it |
1201 | * @gh: the holder structure | 1231 | * @gh: the holder structure |
@@ -1297,10 +1327,6 @@ static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs, | |||
1297 | * @num_gh: the number of structures | 1327 | * @num_gh: the number of structures |
1298 | * @ghs: an array of struct gfs2_holder structures | 1328 | * @ghs: an array of struct gfs2_holder structures |
1299 | * | 1329 | * |
1300 | * Figure out how big an impact this function has. Either: | ||
1301 | * 1) Replace this code with code that calls gfs2_glock_prefetch() | ||
1302 | * 2) Forget async stuff and just call nq_m_sync() | ||
1303 | * 3) Leave it like it is | ||
1304 | * | 1330 | * |
1305 | * Returns: 0 on success (all glocks acquired), | 1331 | * Returns: 0 on success (all glocks acquired), |
1306 | * errno on failure (no glocks acquired) | 1332 | * errno on failure (no glocks acquired) |
@@ -1308,62 +1334,28 @@ static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs, | |||
1308 | 1334 | ||
1309 | int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs) | 1335 | int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs) |
1310 | { | 1336 | { |
1311 | int *e; | 1337 | struct gfs2_holder *tmp[4]; |
1312 | unsigned int x; | 1338 | struct gfs2_holder **pph = tmp; |
1313 | int borked = 0, serious = 0; | ||
1314 | int error = 0; | 1339 | int error = 0; |
1315 | 1340 | ||
1316 | if (!num_gh) | 1341 | switch(num_gh) { |
1342 | case 0: | ||
1317 | return 0; | 1343 | return 0; |
1318 | 1344 | case 1: | |
1319 | if (num_gh == 1) { | ||
1320 | ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); | 1345 | ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); |
1321 | return gfs2_glock_nq(ghs); | 1346 | return gfs2_glock_nq(ghs); |
1322 | } | 1347 | default: |
1323 | 1348 | if (num_gh <= 4) | |
1324 | e = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL); | ||
1325 | if (!e) | ||
1326 | return -ENOMEM; | ||
1327 | |||
1328 | for (x = 0; x < num_gh; x++) { | ||
1329 | ghs[x].gh_flags |= LM_FLAG_TRY | GL_ASYNC; | ||
1330 | error = gfs2_glock_nq(&ghs[x]); | ||
1331 | if (error) { | ||
1332 | borked = 1; | ||
1333 | serious = error; | ||
1334 | num_gh = x; | ||
1335 | break; | 1349 | break; |
1336 | } | 1350 | pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS); |
1337 | } | 1351 | if (!pph) |
1338 | 1352 | return -ENOMEM; | |
1339 | for (x = 0; x < num_gh; x++) { | ||
1340 | error = e[x] = glock_wait_internal(&ghs[x]); | ||
1341 | if (error) { | ||
1342 | borked = 1; | ||
1343 | if (error != GLR_TRYFAILED && error != GLR_CANCELED) | ||
1344 | serious = error; | ||
1345 | } | ||
1346 | } | 1353 | } |
1347 | 1354 | ||
1348 | if (!borked) { | 1355 | error = nq_m_sync(num_gh, ghs, pph); |
1349 | kfree(e); | ||
1350 | return 0; | ||
1351 | } | ||
1352 | |||
1353 | for (x = 0; x < num_gh; x++) | ||
1354 | if (!e[x]) | ||
1355 | gfs2_glock_dq(&ghs[x]); | ||
1356 | |||
1357 | if (serious) | ||
1358 | error = serious; | ||
1359 | else { | ||
1360 | for (x = 0; x < num_gh; x++) | ||
1361 | gfs2_holder_reinit(ghs[x].gh_state, ghs[x].gh_flags, | ||
1362 | &ghs[x]); | ||
1363 | error = nq_m_sync(num_gh, ghs, (struct gfs2_holder **)e); | ||
1364 | } | ||
1365 | 1356 | ||
1366 | kfree(e); | 1357 | if (pph != tmp) |
1358 | kfree(pph); | ||
1367 | 1359 | ||
1368 | return error; | 1360 | return error; |
1369 | } | 1361 | } |
@@ -1456,7 +1448,7 @@ static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name, | |||
1456 | if (!gl) | 1448 | if (!gl) |
1457 | return; | 1449 | return; |
1458 | 1450 | ||
1459 | handle_callback(gl, state); | 1451 | handle_callback(gl, state, 1); |
1460 | 1452 | ||
1461 | spin_lock(&gl->gl_spin); | 1453 | spin_lock(&gl->gl_spin); |
1462 | run_queue(gl); | 1454 | run_queue(gl); |
@@ -1596,7 +1588,7 @@ void gfs2_reclaim_glock(struct gfs2_sbd *sdp) | |||
1596 | if (gfs2_glmutex_trylock(gl)) { | 1588 | if (gfs2_glmutex_trylock(gl)) { |
1597 | if (list_empty(&gl->gl_holders) && | 1589 | if (list_empty(&gl->gl_holders) && |
1598 | gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) | 1590 | gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) |
1599 | handle_callback(gl, LM_ST_UNLOCKED); | 1591 | handle_callback(gl, LM_ST_UNLOCKED, 0); |
1600 | gfs2_glmutex_unlock(gl); | 1592 | gfs2_glmutex_unlock(gl); |
1601 | } | 1593 | } |
1602 | 1594 | ||
@@ -1709,7 +1701,7 @@ static void clear_glock(struct gfs2_glock *gl) | |||
1709 | if (gfs2_glmutex_trylock(gl)) { | 1701 | if (gfs2_glmutex_trylock(gl)) { |
1710 | if (list_empty(&gl->gl_holders) && | 1702 | if (list_empty(&gl->gl_holders) && |
1711 | gl->gl_state != LM_ST_UNLOCKED) | 1703 | gl->gl_state != LM_ST_UNLOCKED) |
1712 | handle_callback(gl, LM_ST_UNLOCKED); | 1704 | handle_callback(gl, LM_ST_UNLOCKED, 0); |
1713 | gfs2_glmutex_unlock(gl); | 1705 | gfs2_glmutex_unlock(gl); |
1714 | } | 1706 | } |
1715 | } | 1707 | } |
@@ -1823,7 +1815,8 @@ static int dump_inode(struct glock_iter *gi, struct gfs2_inode *ip) | |||
1823 | 1815 | ||
1824 | print_dbg(gi, " Inode:\n"); | 1816 | print_dbg(gi, " Inode:\n"); |
1825 | print_dbg(gi, " num = %llu/%llu\n", | 1817 | print_dbg(gi, " num = %llu/%llu\n", |
1826 | ip->i_num.no_formal_ino, ip->i_num.no_addr); | 1818 | (unsigned long long)ip->i_no_formal_ino, |
1819 | (unsigned long long)ip->i_no_addr); | ||
1827 | print_dbg(gi, " type = %u\n", IF2DT(ip->i_inode.i_mode)); | 1820 | print_dbg(gi, " type = %u\n", IF2DT(ip->i_inode.i_mode)); |
1828 | print_dbg(gi, " i_flags ="); | 1821 | print_dbg(gi, " i_flags ="); |
1829 | for (x = 0; x < 32; x++) | 1822 | for (x = 0; x < 32; x++) |
@@ -1909,8 +1902,8 @@ static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl) | |||
1909 | } | 1902 | } |
1910 | if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { | 1903 | if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { |
1911 | print_dbg(gi, " Demotion req to state %u (%llu uS ago)\n", | 1904 | print_dbg(gi, " Demotion req to state %u (%llu uS ago)\n", |
1912 | gl->gl_demote_state, | 1905 | gl->gl_demote_state, (unsigned long long) |
1913 | (u64)(jiffies - gl->gl_demote_time)*(1000000/HZ)); | 1906 | (jiffies - gl->gl_demote_time)*(1000000/HZ)); |
1914 | } | 1907 | } |
1915 | if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) { | 1908 | if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) { |
1916 | if (!test_bit(GLF_LOCK, &gl->gl_flags) && | 1909 | if (!test_bit(GLF_LOCK, &gl->gl_flags) && |