diff options
Diffstat (limited to 'fs/gfs2/glock.c')
| -rw-r--r-- | fs/gfs2/glock.c | 303 |
1 files changed, 156 insertions, 147 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index c962283d4e7f..6b983aef785d 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
| @@ -40,6 +40,7 @@ | |||
| 40 | #include "quota.h" | 40 | #include "quota.h" |
| 41 | #include "super.h" | 41 | #include "super.h" |
| 42 | #include "util.h" | 42 | #include "util.h" |
| 43 | #include "bmap.h" | ||
| 43 | 44 | ||
| 44 | struct gfs2_gl_hash_bucket { | 45 | struct gfs2_gl_hash_bucket { |
| 45 | struct hlist_head hb_list; | 46 | struct hlist_head hb_list; |
| @@ -61,9 +62,10 @@ static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int | |||
| 61 | 62 | ||
| 62 | static DECLARE_RWSEM(gfs2_umount_flush_sem); | 63 | static DECLARE_RWSEM(gfs2_umount_flush_sem); |
| 63 | static struct dentry *gfs2_root; | 64 | static struct dentry *gfs2_root; |
| 64 | static struct task_struct *scand_process; | ||
| 65 | static unsigned int scand_secs = 5; | ||
| 66 | static struct workqueue_struct *glock_workqueue; | 65 | static struct workqueue_struct *glock_workqueue; |
| 66 | static LIST_HEAD(lru_list); | ||
| 67 | static atomic_t lru_count = ATOMIC_INIT(0); | ||
| 68 | static DEFINE_SPINLOCK(lru_lock); | ||
| 67 | 69 | ||
| 68 | #define GFS2_GL_HASH_SHIFT 15 | 70 | #define GFS2_GL_HASH_SHIFT 15 |
| 69 | #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT) | 71 | #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT) |
| @@ -174,6 +176,22 @@ static void gfs2_glock_hold(struct gfs2_glock *gl) | |||
| 174 | } | 176 | } |
| 175 | 177 | ||
| 176 | /** | 178 | /** |
| 179 | * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list | ||
| 180 | * @gl: the glock | ||
| 181 | * | ||
| 182 | */ | ||
| 183 | |||
| 184 | static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) | ||
| 185 | { | ||
| 186 | spin_lock(&lru_lock); | ||
| 187 | if (list_empty(&gl->gl_lru) && gl->gl_state != LM_ST_UNLOCKED) { | ||
| 188 | list_add_tail(&gl->gl_lru, &lru_list); | ||
| 189 | atomic_inc(&lru_count); | ||
| 190 | } | ||
| 191 | spin_unlock(&lru_lock); | ||
| 192 | } | ||
| 193 | |||
| 194 | /** | ||
| 177 | * gfs2_glock_put() - Decrement reference count on glock | 195 | * gfs2_glock_put() - Decrement reference count on glock |
| 178 | * @gl: The glock to put | 196 | * @gl: The glock to put |
| 179 | * | 197 | * |
| @@ -187,14 +205,23 @@ int gfs2_glock_put(struct gfs2_glock *gl) | |||
| 187 | if (atomic_dec_and_test(&gl->gl_ref)) { | 205 | if (atomic_dec_and_test(&gl->gl_ref)) { |
| 188 | hlist_del(&gl->gl_list); | 206 | hlist_del(&gl->gl_list); |
| 189 | write_unlock(gl_lock_addr(gl->gl_hash)); | 207 | write_unlock(gl_lock_addr(gl->gl_hash)); |
| 208 | spin_lock(&lru_lock); | ||
| 209 | if (!list_empty(&gl->gl_lru)) { | ||
| 210 | list_del_init(&gl->gl_lru); | ||
| 211 | atomic_dec(&lru_count); | ||
| 212 | } | ||
| 213 | spin_unlock(&lru_lock); | ||
| 190 | GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_UNLOCKED); | 214 | GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_UNLOCKED); |
| 191 | GLOCK_BUG_ON(gl, !list_empty(&gl->gl_reclaim)); | 215 | GLOCK_BUG_ON(gl, !list_empty(&gl->gl_lru)); |
| 192 | GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); | 216 | GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); |
| 193 | glock_free(gl); | 217 | glock_free(gl); |
| 194 | rv = 1; | 218 | rv = 1; |
| 195 | goto out; | 219 | goto out; |
| 196 | } | 220 | } |
| 197 | write_unlock(gl_lock_addr(gl->gl_hash)); | 221 | write_unlock(gl_lock_addr(gl->gl_hash)); |
| 222 | /* 1 for being hashed, 1 for having state != LM_ST_UNLOCKED */ | ||
| 223 | if (atomic_read(&gl->gl_ref) == 2) | ||
| 224 | gfs2_glock_schedule_for_reclaim(gl); | ||
| 198 | out: | 225 | out: |
| 199 | return rv; | 226 | return rv; |
| 200 | } | 227 | } |
| @@ -289,10 +316,13 @@ static void gfs2_holder_wake(struct gfs2_holder *gh) | |||
| 289 | * do_promote - promote as many requests as possible on the current queue | 316 | * do_promote - promote as many requests as possible on the current queue |
| 290 | * @gl: The glock | 317 | * @gl: The glock |
| 291 | * | 318 | * |
| 292 | * Returns: true if there is a blocked holder at the head of the list | 319 | * Returns: 1 if there is a blocked holder at the head of the list, or 2 |
| 320 | * if a type specific operation is underway. | ||
| 293 | */ | 321 | */ |
| 294 | 322 | ||
| 295 | static int do_promote(struct gfs2_glock *gl) | 323 | static int do_promote(struct gfs2_glock *gl) |
| 324 | __releases(&gl->gl_spin) | ||
| 325 | __acquires(&gl->gl_spin) | ||
| 296 | { | 326 | { |
| 297 | const struct gfs2_glock_operations *glops = gl->gl_ops; | 327 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
| 298 | struct gfs2_holder *gh, *tmp; | 328 | struct gfs2_holder *gh, *tmp; |
| @@ -310,6 +340,8 @@ restart: | |||
| 310 | ret = glops->go_lock(gh); | 340 | ret = glops->go_lock(gh); |
| 311 | spin_lock(&gl->gl_spin); | 341 | spin_lock(&gl->gl_spin); |
| 312 | if (ret) { | 342 | if (ret) { |
| 343 | if (ret == 1) | ||
| 344 | return 2; | ||
| 313 | gh->gh_error = ret; | 345 | gh->gh_error = ret; |
| 314 | list_del_init(&gh->gh_list); | 346 | list_del_init(&gh->gh_list); |
| 315 | gfs2_holder_wake(gh); | 347 | gfs2_holder_wake(gh); |
| @@ -414,6 +446,7 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) | |||
| 414 | const struct gfs2_glock_operations *glops = gl->gl_ops; | 446 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
| 415 | struct gfs2_holder *gh; | 447 | struct gfs2_holder *gh; |
| 416 | unsigned state = ret & LM_OUT_ST_MASK; | 448 | unsigned state = ret & LM_OUT_ST_MASK; |
| 449 | int rv; | ||
| 417 | 450 | ||
| 418 | spin_lock(&gl->gl_spin); | 451 | spin_lock(&gl->gl_spin); |
| 419 | state_change(gl, state); | 452 | state_change(gl, state); |
| @@ -468,7 +501,6 @@ retry: | |||
| 468 | gfs2_demote_wake(gl); | 501 | gfs2_demote_wake(gl); |
| 469 | if (state != LM_ST_UNLOCKED) { | 502 | if (state != LM_ST_UNLOCKED) { |
| 470 | if (glops->go_xmote_bh) { | 503 | if (glops->go_xmote_bh) { |
| 471 | int rv; | ||
| 472 | spin_unlock(&gl->gl_spin); | 504 | spin_unlock(&gl->gl_spin); |
| 473 | rv = glops->go_xmote_bh(gl, gh); | 505 | rv = glops->go_xmote_bh(gl, gh); |
| 474 | if (rv == -EAGAIN) | 506 | if (rv == -EAGAIN) |
| @@ -479,10 +511,13 @@ retry: | |||
| 479 | goto out; | 511 | goto out; |
| 480 | } | 512 | } |
| 481 | } | 513 | } |
| 482 | do_promote(gl); | 514 | rv = do_promote(gl); |
| 515 | if (rv == 2) | ||
| 516 | goto out_locked; | ||
| 483 | } | 517 | } |
| 484 | out: | 518 | out: |
| 485 | clear_bit(GLF_LOCK, &gl->gl_flags); | 519 | clear_bit(GLF_LOCK, &gl->gl_flags); |
| 520 | out_locked: | ||
| 486 | spin_unlock(&gl->gl_spin); | 521 | spin_unlock(&gl->gl_spin); |
| 487 | gfs2_glock_put(gl); | 522 | gfs2_glock_put(gl); |
| 488 | } | 523 | } |
| @@ -511,6 +546,8 @@ static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock, | |||
| 511 | */ | 546 | */ |
| 512 | 547 | ||
| 513 | static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target) | 548 | static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target) |
| 549 | __releases(&gl->gl_spin) | ||
| 550 | __acquires(&gl->gl_spin) | ||
| 514 | { | 551 | { |
| 515 | const struct gfs2_glock_operations *glops = gl->gl_ops; | 552 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
| 516 | struct gfs2_sbd *sdp = gl->gl_sbd; | 553 | struct gfs2_sbd *sdp = gl->gl_sbd; |
| @@ -576,8 +613,11 @@ static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl) | |||
| 576 | */ | 613 | */ |
| 577 | 614 | ||
| 578 | static void run_queue(struct gfs2_glock *gl, const int nonblock) | 615 | static void run_queue(struct gfs2_glock *gl, const int nonblock) |
| 616 | __releases(&gl->gl_spin) | ||
| 617 | __acquires(&gl->gl_spin) | ||
| 579 | { | 618 | { |
| 580 | struct gfs2_holder *gh = NULL; | 619 | struct gfs2_holder *gh = NULL; |
| 620 | int ret; | ||
| 581 | 621 | ||
| 582 | if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) | 622 | if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) |
| 583 | return; | 623 | return; |
| @@ -596,8 +636,11 @@ static void run_queue(struct gfs2_glock *gl, const int nonblock) | |||
| 596 | } else { | 636 | } else { |
| 597 | if (test_bit(GLF_DEMOTE, &gl->gl_flags)) | 637 | if (test_bit(GLF_DEMOTE, &gl->gl_flags)) |
| 598 | gfs2_demote_wake(gl); | 638 | gfs2_demote_wake(gl); |
| 599 | if (do_promote(gl) == 0) | 639 | ret = do_promote(gl); |
| 640 | if (ret == 0) | ||
| 600 | goto out; | 641 | goto out; |
| 642 | if (ret == 2) | ||
| 643 | return; | ||
| 601 | gh = find_first_waiter(gl); | 644 | gh = find_first_waiter(gl); |
| 602 | gl->gl_target = gh->gh_state; | 645 | gl->gl_target = gh->gh_state; |
| 603 | if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) | 646 | if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) |
| @@ -820,7 +863,7 @@ static void wait_on_demote(struct gfs2_glock *gl) | |||
| 820 | */ | 863 | */ |
| 821 | 864 | ||
| 822 | static void handle_callback(struct gfs2_glock *gl, unsigned int state, | 865 | static void handle_callback(struct gfs2_glock *gl, unsigned int state, |
| 823 | int remote, unsigned long delay) | 866 | unsigned long delay) |
| 824 | { | 867 | { |
| 825 | int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE; | 868 | int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE; |
| 826 | 869 | ||
| @@ -828,9 +871,6 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state, | |||
| 828 | if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { | 871 | if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { |
| 829 | gl->gl_demote_state = state; | 872 | gl->gl_demote_state = state; |
| 830 | gl->gl_demote_time = jiffies; | 873 | gl->gl_demote_time = jiffies; |
| 831 | if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN && | ||
| 832 | gl->gl_object) | ||
| 833 | gfs2_glock_schedule_for_reclaim(gl); | ||
| 834 | } else if (gl->gl_demote_state != LM_ST_UNLOCKED && | 874 | } else if (gl->gl_demote_state != LM_ST_UNLOCKED && |
| 835 | gl->gl_demote_state != state) { | 875 | gl->gl_demote_state != state) { |
| 836 | gl->gl_demote_state = LM_ST_UNLOCKED; | 876 | gl->gl_demote_state = LM_ST_UNLOCKED; |
| @@ -877,6 +917,8 @@ void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...) | |||
| 877 | */ | 917 | */ |
| 878 | 918 | ||
| 879 | static inline void add_to_queue(struct gfs2_holder *gh) | 919 | static inline void add_to_queue(struct gfs2_holder *gh) |
| 920 | __releases(&gl->gl_spin) | ||
| 921 | __acquires(&gl->gl_spin) | ||
| 880 | { | 922 | { |
| 881 | struct gfs2_glock *gl = gh->gh_gl; | 923 | struct gfs2_glock *gl = gh->gh_gl; |
| 882 | struct gfs2_sbd *sdp = gl->gl_sbd; | 924 | struct gfs2_sbd *sdp = gl->gl_sbd; |
| @@ -998,7 +1040,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh) | |||
| 998 | 1040 | ||
| 999 | spin_lock(&gl->gl_spin); | 1041 | spin_lock(&gl->gl_spin); |
| 1000 | if (gh->gh_flags & GL_NOCACHE) | 1042 | if (gh->gh_flags & GL_NOCACHE) |
| 1001 | handle_callback(gl, LM_ST_UNLOCKED, 0, 0); | 1043 | handle_callback(gl, LM_ST_UNLOCKED, 0); |
| 1002 | 1044 | ||
| 1003 | list_del_init(&gh->gh_list); | 1045 | list_del_init(&gh->gh_list); |
| 1004 | if (find_first_holder(gl) == NULL) { | 1046 | if (find_first_holder(gl) == NULL) { |
| @@ -1269,12 +1311,26 @@ static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name, | |||
| 1269 | delay = gl->gl_ops->go_min_hold_time; | 1311 | delay = gl->gl_ops->go_min_hold_time; |
| 1270 | 1312 | ||
| 1271 | spin_lock(&gl->gl_spin); | 1313 | spin_lock(&gl->gl_spin); |
| 1272 | handle_callback(gl, state, 1, delay); | 1314 | handle_callback(gl, state, delay); |
| 1273 | spin_unlock(&gl->gl_spin); | 1315 | spin_unlock(&gl->gl_spin); |
| 1274 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) | 1316 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) |
| 1275 | gfs2_glock_put(gl); | 1317 | gfs2_glock_put(gl); |
| 1276 | } | 1318 | } |
| 1277 | 1319 | ||
| 1320 | static void gfs2_jdesc_make_dirty(struct gfs2_sbd *sdp, unsigned int jid) | ||
| 1321 | { | ||
| 1322 | struct gfs2_jdesc *jd; | ||
| 1323 | |||
| 1324 | spin_lock(&sdp->sd_jindex_spin); | ||
| 1325 | list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { | ||
| 1326 | if (jd->jd_jid != jid) | ||
| 1327 | continue; | ||
| 1328 | jd->jd_dirty = 1; | ||
| 1329 | break; | ||
| 1330 | } | ||
| 1331 | spin_unlock(&sdp->sd_jindex_spin); | ||
| 1332 | } | ||
| 1333 | |||
| 1278 | /** | 1334 | /** |
| 1279 | * gfs2_glock_cb - Callback used by locking module | 1335 | * gfs2_glock_cb - Callback used by locking module |
| 1280 | * @sdp: Pointer to the superblock | 1336 | * @sdp: Pointer to the superblock |
| @@ -1338,80 +1394,83 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data) | |||
| 1338 | * Returns: 1 if it's ok | 1394 | * Returns: 1 if it's ok |
| 1339 | */ | 1395 | */ |
| 1340 | 1396 | ||
| 1341 | static int demote_ok(struct gfs2_glock *gl) | 1397 | static int demote_ok(const struct gfs2_glock *gl) |
| 1342 | { | 1398 | { |
| 1343 | const struct gfs2_glock_operations *glops = gl->gl_ops; | 1399 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
| 1344 | int demote = 1; | ||
| 1345 | |||
| 1346 | if (test_bit(GLF_STICKY, &gl->gl_flags)) | ||
| 1347 | demote = 0; | ||
| 1348 | else if (glops->go_demote_ok) | ||
| 1349 | demote = glops->go_demote_ok(gl); | ||
| 1350 | |||
| 1351 | return demote; | ||
| 1352 | } | ||
| 1353 | |||
| 1354 | /** | ||
| 1355 | * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list | ||
| 1356 | * @gl: the glock | ||
| 1357 | * | ||
| 1358 | */ | ||
| 1359 | |||
| 1360 | void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) | ||
| 1361 | { | ||
| 1362 | struct gfs2_sbd *sdp = gl->gl_sbd; | ||
| 1363 | 1400 | ||
| 1364 | spin_lock(&sdp->sd_reclaim_lock); | 1401 | if (gl->gl_state == LM_ST_UNLOCKED) |
| 1365 | if (list_empty(&gl->gl_reclaim)) { | 1402 | return 0; |
| 1366 | gfs2_glock_hold(gl); | 1403 | if (!list_empty(&gl->gl_holders)) |
| 1367 | list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list); | 1404 | return 0; |
| 1368 | atomic_inc(&sdp->sd_reclaim_count); | 1405 | if (glops->go_demote_ok) |
| 1369 | spin_unlock(&sdp->sd_reclaim_lock); | 1406 | return glops->go_demote_ok(gl); |
| 1370 | wake_up(&sdp->sd_reclaim_wq); | 1407 | return 1; |
| 1371 | } else | ||
| 1372 | spin_unlock(&sdp->sd_reclaim_lock); | ||
| 1373 | } | 1408 | } |
| 1374 | 1409 | ||
| 1375 | /** | ||
| 1376 | * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list | ||
| 1377 | * @sdp: the filesystem | ||
| 1378 | * | ||
| 1379 | * Called from gfs2_glockd() glock reclaim daemon, or when promoting a | ||
| 1380 | * different glock and we notice that there are a lot of glocks in the | ||
| 1381 | * reclaim list. | ||
| 1382 | * | ||
| 1383 | */ | ||
| 1384 | 1410 | ||
| 1385 | void gfs2_reclaim_glock(struct gfs2_sbd *sdp) | 1411 | static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask) |
| 1386 | { | 1412 | { |
| 1387 | struct gfs2_glock *gl; | 1413 | struct gfs2_glock *gl; |
| 1388 | int done_callback = 0; | 1414 | int may_demote; |
| 1415 | int nr_skipped = 0; | ||
| 1416 | int got_ref = 0; | ||
| 1417 | LIST_HEAD(skipped); | ||
| 1389 | 1418 | ||
| 1390 | spin_lock(&sdp->sd_reclaim_lock); | 1419 | if (nr == 0) |
| 1391 | if (list_empty(&sdp->sd_reclaim_list)) { | 1420 | goto out; |
| 1392 | spin_unlock(&sdp->sd_reclaim_lock); | ||
| 1393 | return; | ||
| 1394 | } | ||
| 1395 | gl = list_entry(sdp->sd_reclaim_list.next, | ||
| 1396 | struct gfs2_glock, gl_reclaim); | ||
| 1397 | list_del_init(&gl->gl_reclaim); | ||
| 1398 | spin_unlock(&sdp->sd_reclaim_lock); | ||
| 1399 | 1421 | ||
| 1400 | atomic_dec(&sdp->sd_reclaim_count); | 1422 | if (!(gfp_mask & __GFP_FS)) |
| 1401 | atomic_inc(&sdp->sd_reclaimed); | 1423 | return -1; |
| 1402 | 1424 | ||
| 1403 | spin_lock(&gl->gl_spin); | 1425 | spin_lock(&lru_lock); |
| 1404 | if (find_first_holder(gl) == NULL && | 1426 | while(nr && !list_empty(&lru_list)) { |
| 1405 | gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) { | 1427 | gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); |
| 1406 | handle_callback(gl, LM_ST_UNLOCKED, 0, 0); | 1428 | list_del_init(&gl->gl_lru); |
| 1407 | done_callback = 1; | 1429 | atomic_dec(&lru_count); |
| 1430 | |||
| 1431 | /* Test for being demotable */ | ||
| 1432 | if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { | ||
| 1433 | gfs2_glock_hold(gl); | ||
| 1434 | got_ref = 1; | ||
| 1435 | spin_unlock(&lru_lock); | ||
| 1436 | spin_lock(&gl->gl_spin); | ||
| 1437 | may_demote = demote_ok(gl); | ||
| 1438 | spin_unlock(&gl->gl_spin); | ||
| 1439 | clear_bit(GLF_LOCK, &gl->gl_flags); | ||
| 1440 | if (may_demote) { | ||
| 1441 | handle_callback(gl, LM_ST_UNLOCKED, 0); | ||
| 1442 | nr--; | ||
| 1443 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) | ||
| 1444 | gfs2_glock_put(gl); | ||
| 1445 | } | ||
| 1446 | spin_lock(&lru_lock); | ||
| 1447 | if (may_demote) | ||
| 1448 | continue; | ||
| 1449 | } | ||
| 1450 | if (list_empty(&gl->gl_lru) && | ||
| 1451 | (atomic_read(&gl->gl_ref) <= (2 + got_ref))) { | ||
| 1452 | nr_skipped++; | ||
| 1453 | list_add(&gl->gl_lru, &skipped); | ||
| 1454 | } | ||
| 1455 | if (got_ref) { | ||
| 1456 | spin_unlock(&lru_lock); | ||
| 1457 | gfs2_glock_put(gl); | ||
| 1458 | spin_lock(&lru_lock); | ||
| 1459 | got_ref = 0; | ||
| 1460 | } | ||
| 1408 | } | 1461 | } |
| 1409 | spin_unlock(&gl->gl_spin); | 1462 | list_splice(&skipped, &lru_list); |
| 1410 | if (!done_callback || | 1463 | atomic_add(nr_skipped, &lru_count); |
| 1411 | queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) | 1464 | spin_unlock(&lru_lock); |
| 1412 | gfs2_glock_put(gl); | 1465 | out: |
| 1466 | return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure; | ||
| 1413 | } | 1467 | } |
| 1414 | 1468 | ||
| 1469 | static struct shrinker glock_shrinker = { | ||
| 1470 | .shrink = gfs2_shrink_glock_memory, | ||
| 1471 | .seeks = DEFAULT_SEEKS, | ||
| 1472 | }; | ||
| 1473 | |||
| 1415 | /** | 1474 | /** |
| 1416 | * examine_bucket - Call a function for glock in a hash bucket | 1475 | * examine_bucket - Call a function for glock in a hash bucket |
| 1417 | * @examiner: the function | 1476 | * @examiner: the function |
| @@ -1457,26 +1516,6 @@ out: | |||
| 1457 | } | 1516 | } |
| 1458 | 1517 | ||
| 1459 | /** | 1518 | /** |
| 1460 | * scan_glock - look at a glock and see if we can reclaim it | ||
| 1461 | * @gl: the glock to look at | ||
| 1462 | * | ||
| 1463 | */ | ||
| 1464 | |||
| 1465 | static void scan_glock(struct gfs2_glock *gl) | ||
| 1466 | { | ||
| 1467 | if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) | ||
| 1468 | return; | ||
| 1469 | if (test_bit(GLF_LOCK, &gl->gl_flags)) | ||
| 1470 | return; | ||
| 1471 | |||
| 1472 | spin_lock(&gl->gl_spin); | ||
| 1473 | if (find_first_holder(gl) == NULL && | ||
| 1474 | gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) | ||
| 1475 | gfs2_glock_schedule_for_reclaim(gl); | ||
| 1476 | spin_unlock(&gl->gl_spin); | ||
| 1477 | } | ||
| 1478 | |||
| 1479 | /** | ||
| 1480 | * clear_glock - look at a glock and see if we can free it from glock cache | 1519 | * clear_glock - look at a glock and see if we can free it from glock cache |
| 1481 | * @gl: the glock to look at | 1520 | * @gl: the glock to look at |
| 1482 | * | 1521 | * |
| @@ -1484,23 +1523,16 @@ static void scan_glock(struct gfs2_glock *gl) | |||
| 1484 | 1523 | ||
| 1485 | static void clear_glock(struct gfs2_glock *gl) | 1524 | static void clear_glock(struct gfs2_glock *gl) |
| 1486 | { | 1525 | { |
| 1487 | struct gfs2_sbd *sdp = gl->gl_sbd; | 1526 | spin_lock(&lru_lock); |
| 1488 | int released; | 1527 | if (!list_empty(&gl->gl_lru)) { |
| 1489 | 1528 | list_del_init(&gl->gl_lru); | |
| 1490 | spin_lock(&sdp->sd_reclaim_lock); | 1529 | atomic_dec(&lru_count); |
| 1491 | if (!list_empty(&gl->gl_reclaim)) { | ||
| 1492 | list_del_init(&gl->gl_reclaim); | ||
| 1493 | atomic_dec(&sdp->sd_reclaim_count); | ||
| 1494 | spin_unlock(&sdp->sd_reclaim_lock); | ||
| 1495 | released = gfs2_glock_put(gl); | ||
| 1496 | gfs2_assert(sdp, !released); | ||
| 1497 | } else { | ||
| 1498 | spin_unlock(&sdp->sd_reclaim_lock); | ||
| 1499 | } | 1530 | } |
| 1531 | spin_unlock(&lru_lock); | ||
| 1500 | 1532 | ||
| 1501 | spin_lock(&gl->gl_spin); | 1533 | spin_lock(&gl->gl_spin); |
| 1502 | if (find_first_holder(gl) == NULL && gl->gl_state != LM_ST_UNLOCKED) | 1534 | if (find_first_holder(gl) == NULL && gl->gl_state != LM_ST_UNLOCKED) |
| 1503 | handle_callback(gl, LM_ST_UNLOCKED, 0, 0); | 1535 | handle_callback(gl, LM_ST_UNLOCKED, 0); |
| 1504 | spin_unlock(&gl->gl_spin); | 1536 | spin_unlock(&gl->gl_spin); |
| 1505 | gfs2_glock_hold(gl); | 1537 | gfs2_glock_hold(gl); |
| 1506 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) | 1538 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) |
| @@ -1548,6 +1580,20 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) | |||
| 1548 | } | 1580 | } |
| 1549 | } | 1581 | } |
| 1550 | 1582 | ||
| 1583 | void gfs2_glock_finish_truncate(struct gfs2_inode *ip) | ||
| 1584 | { | ||
| 1585 | struct gfs2_glock *gl = ip->i_gl; | ||
| 1586 | int ret; | ||
| 1587 | |||
| 1588 | ret = gfs2_truncatei_resume(ip); | ||
| 1589 | gfs2_assert_withdraw(gl->gl_sbd, ret == 0); | ||
| 1590 | |||
| 1591 | spin_lock(&gl->gl_spin); | ||
| 1592 | clear_bit(GLF_LOCK, &gl->gl_flags); | ||
| 1593 | run_queue(gl, 1); | ||
| 1594 | spin_unlock(&gl->gl_spin); | ||
| 1595 | } | ||
| 1596 | |||
| 1551 | static const char *state2str(unsigned state) | 1597 | static const char *state2str(unsigned state) |
| 1552 | { | 1598 | { |
| 1553 | switch(state) { | 1599 | switch(state) { |
| @@ -1623,8 +1669,6 @@ static const char *gflags2str(char *buf, const unsigned long *gflags) | |||
| 1623 | char *p = buf; | 1669 | char *p = buf; |
| 1624 | if (test_bit(GLF_LOCK, gflags)) | 1670 | if (test_bit(GLF_LOCK, gflags)) |
| 1625 | *p++ = 'l'; | 1671 | *p++ = 'l'; |
| 1626 | if (test_bit(GLF_STICKY, gflags)) | ||
| 1627 | *p++ = 's'; | ||
| 1628 | if (test_bit(GLF_DEMOTE, gflags)) | 1672 | if (test_bit(GLF_DEMOTE, gflags)) |
| 1629 | *p++ = 'D'; | 1673 | *p++ = 'D'; |
| 1630 | if (test_bit(GLF_PENDING_DEMOTE, gflags)) | 1674 | if (test_bit(GLF_PENDING_DEMOTE, gflags)) |
| @@ -1743,34 +1787,6 @@ static int gfs2_dump_lockstate(struct gfs2_sbd *sdp) | |||
| 1743 | return error; | 1787 | return error; |
| 1744 | } | 1788 | } |
| 1745 | 1789 | ||
| 1746 | /** | ||
| 1747 | * gfs2_scand - Look for cached glocks and inodes to toss from memory | ||
| 1748 | * @sdp: Pointer to GFS2 superblock | ||
| 1749 | * | ||
| 1750 | * One of these daemons runs, finding candidates to add to sd_reclaim_list. | ||
| 1751 | * See gfs2_glockd() | ||
| 1752 | */ | ||
| 1753 | |||
| 1754 | static int gfs2_scand(void *data) | ||
| 1755 | { | ||
| 1756 | unsigned x; | ||
| 1757 | unsigned delay; | ||
| 1758 | |||
| 1759 | while (!kthread_should_stop()) { | ||
| 1760 | for (x = 0; x < GFS2_GL_HASH_SIZE; x++) | ||
| 1761 | examine_bucket(scan_glock, NULL, x); | ||
| 1762 | if (freezing(current)) | ||
| 1763 | refrigerator(); | ||
| 1764 | delay = scand_secs; | ||
| 1765 | if (delay < 1) | ||
| 1766 | delay = 1; | ||
| 1767 | schedule_timeout_interruptible(delay * HZ); | ||
| 1768 | } | ||
| 1769 | |||
| 1770 | return 0; | ||
| 1771 | } | ||
| 1772 | |||
| 1773 | |||
| 1774 | 1790 | ||
| 1775 | int __init gfs2_glock_init(void) | 1791 | int __init gfs2_glock_init(void) |
| 1776 | { | 1792 | { |
| @@ -1784,28 +1800,21 @@ int __init gfs2_glock_init(void) | |||
| 1784 | } | 1800 | } |
| 1785 | #endif | 1801 | #endif |
| 1786 | 1802 | ||
| 1787 | scand_process = kthread_run(gfs2_scand, NULL, "gfs2_scand"); | ||
| 1788 | if (IS_ERR(scand_process)) | ||
| 1789 | return PTR_ERR(scand_process); | ||
| 1790 | |||
| 1791 | glock_workqueue = create_workqueue("glock_workqueue"); | 1803 | glock_workqueue = create_workqueue("glock_workqueue"); |
| 1792 | if (IS_ERR(glock_workqueue)) { | 1804 | if (IS_ERR(glock_workqueue)) |
| 1793 | kthread_stop(scand_process); | ||
| 1794 | return PTR_ERR(glock_workqueue); | 1805 | return PTR_ERR(glock_workqueue); |
| 1795 | } | 1806 | |
| 1807 | register_shrinker(&glock_shrinker); | ||
| 1796 | 1808 | ||
| 1797 | return 0; | 1809 | return 0; |
| 1798 | } | 1810 | } |
| 1799 | 1811 | ||
| 1800 | void gfs2_glock_exit(void) | 1812 | void gfs2_glock_exit(void) |
| 1801 | { | 1813 | { |
| 1814 | unregister_shrinker(&glock_shrinker); | ||
| 1802 | destroy_workqueue(glock_workqueue); | 1815 | destroy_workqueue(glock_workqueue); |
| 1803 | kthread_stop(scand_process); | ||
| 1804 | } | 1816 | } |
| 1805 | 1817 | ||
| 1806 | module_param(scand_secs, uint, S_IRUGO|S_IWUSR); | ||
| 1807 | MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs"); | ||
| 1808 | |||
| 1809 | static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi) | 1818 | static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi) |
| 1810 | { | 1819 | { |
| 1811 | struct gfs2_glock *gl; | 1820 | struct gfs2_glock *gl; |
