diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-08-31 11:54:18 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-08-31 12:05:25 -0400 |
commit | bbe69aa57a7374b51242b95a54eefcf0d0393b7e (patch) | |
tree | c45e48d11cc9cb81a57c8c27f7243863b117cec8 /fs/gfs2/glock.c | |
parent | a417887637e862b434b293404f2a31ad1f282a58 (diff) | |
parent | 326ba5010a5429a5a528b268b36a5900d4ab0eba (diff) |
Merge commit 'v2.6.31-rc8' into core/locking
Merge reason: we were on -rc4, move to -rc8 before applying
a new batch of locking infrastructure changes.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r-- | fs/gfs2/glock.c | 138 |
1 files changed, 91 insertions, 47 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 297421c0427a..8b674b1f3a55 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -63,6 +63,7 @@ static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int | |||
63 | static DECLARE_RWSEM(gfs2_umount_flush_sem); | 63 | static DECLARE_RWSEM(gfs2_umount_flush_sem); |
64 | static struct dentry *gfs2_root; | 64 | static struct dentry *gfs2_root; |
65 | static struct workqueue_struct *glock_workqueue; | 65 | static struct workqueue_struct *glock_workqueue; |
66 | struct workqueue_struct *gfs2_delete_workqueue; | ||
66 | static LIST_HEAD(lru_list); | 67 | static LIST_HEAD(lru_list); |
67 | static atomic_t lru_count = ATOMIC_INIT(0); | 68 | static atomic_t lru_count = ATOMIC_INIT(0); |
68 | static DEFINE_SPINLOCK(lru_lock); | 69 | static DEFINE_SPINLOCK(lru_lock); |
@@ -167,13 +168,33 @@ static void glock_free(struct gfs2_glock *gl) | |||
167 | * | 168 | * |
168 | */ | 169 | */ |
169 | 170 | ||
170 | static void gfs2_glock_hold(struct gfs2_glock *gl) | 171 | void gfs2_glock_hold(struct gfs2_glock *gl) |
171 | { | 172 | { |
172 | GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0); | 173 | GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0); |
173 | atomic_inc(&gl->gl_ref); | 174 | atomic_inc(&gl->gl_ref); |
174 | } | 175 | } |
175 | 176 | ||
176 | /** | 177 | /** |
178 | * demote_ok - Check to see if it's ok to unlock a glock | ||
179 | * @gl: the glock | ||
180 | * | ||
181 | * Returns: 1 if it's ok | ||
182 | */ | ||
183 | |||
184 | static int demote_ok(const struct gfs2_glock *gl) | ||
185 | { | ||
186 | const struct gfs2_glock_operations *glops = gl->gl_ops; | ||
187 | |||
188 | if (gl->gl_state == LM_ST_UNLOCKED) | ||
189 | return 0; | ||
190 | if (!list_empty(&gl->gl_holders)) | ||
191 | return 0; | ||
192 | if (glops->go_demote_ok) | ||
193 | return glops->go_demote_ok(gl); | ||
194 | return 1; | ||
195 | } | ||
196 | |||
197 | /** | ||
177 | * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list | 198 | * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list |
178 | * @gl: the glock | 199 | * @gl: the glock |
179 | * | 200 | * |
@@ -181,8 +202,13 @@ static void gfs2_glock_hold(struct gfs2_glock *gl) | |||
181 | 202 | ||
182 | static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) | 203 | static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) |
183 | { | 204 | { |
205 | int may_reclaim; | ||
206 | may_reclaim = (demote_ok(gl) && | ||
207 | (atomic_read(&gl->gl_ref) == 1 || | ||
208 | (gl->gl_name.ln_type == LM_TYPE_INODE && | ||
209 | atomic_read(&gl->gl_ref) <= 2))); | ||
184 | spin_lock(&lru_lock); | 210 | spin_lock(&lru_lock); |
185 | if (list_empty(&gl->gl_lru) && gl->gl_state != LM_ST_UNLOCKED) { | 211 | if (list_empty(&gl->gl_lru) && may_reclaim) { |
186 | list_add_tail(&gl->gl_lru, &lru_list); | 212 | list_add_tail(&gl->gl_lru, &lru_list); |
187 | atomic_inc(&lru_count); | 213 | atomic_inc(&lru_count); |
188 | } | 214 | } |
@@ -190,6 +216,21 @@ static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) | |||
190 | } | 216 | } |
191 | 217 | ||
192 | /** | 218 | /** |
219 | * gfs2_glock_put_nolock() - Decrement reference count on glock | ||
220 | * @gl: The glock to put | ||
221 | * | ||
222 | * This function should only be used if the caller has its own reference | ||
223 | * to the glock, in addition to the one it is dropping. | ||
224 | */ | ||
225 | |||
226 | void gfs2_glock_put_nolock(struct gfs2_glock *gl) | ||
227 | { | ||
228 | if (atomic_dec_and_test(&gl->gl_ref)) | ||
229 | GLOCK_BUG_ON(gl, 1); | ||
230 | gfs2_glock_schedule_for_reclaim(gl); | ||
231 | } | ||
232 | |||
233 | /** | ||
193 | * gfs2_glock_put() - Decrement reference count on glock | 234 | * gfs2_glock_put() - Decrement reference count on glock |
194 | * @gl: The glock to put | 235 | * @gl: The glock to put |
195 | * | 236 | * |
@@ -214,9 +255,9 @@ int gfs2_glock_put(struct gfs2_glock *gl) | |||
214 | rv = 1; | 255 | rv = 1; |
215 | goto out; | 256 | goto out; |
216 | } | 257 | } |
217 | /* 1 for being hashed, 1 for having state != LM_ST_UNLOCKED */ | 258 | spin_lock(&gl->gl_spin); |
218 | if (atomic_read(&gl->gl_ref) == 2) | 259 | gfs2_glock_schedule_for_reclaim(gl); |
219 | gfs2_glock_schedule_for_reclaim(gl); | 260 | spin_unlock(&gl->gl_spin); |
220 | write_unlock(gl_lock_addr(gl->gl_hash)); | 261 | write_unlock(gl_lock_addr(gl->gl_hash)); |
221 | out: | 262 | out: |
222 | return rv; | 263 | return rv; |
@@ -398,7 +439,7 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state) | |||
398 | if (held2) | 439 | if (held2) |
399 | gfs2_glock_hold(gl); | 440 | gfs2_glock_hold(gl); |
400 | else | 441 | else |
401 | gfs2_glock_put(gl); | 442 | gfs2_glock_put_nolock(gl); |
402 | } | 443 | } |
403 | 444 | ||
404 | gl->gl_state = new_state; | 445 | gl->gl_state = new_state; |
@@ -633,12 +674,35 @@ out: | |||
633 | out_sched: | 674 | out_sched: |
634 | gfs2_glock_hold(gl); | 675 | gfs2_glock_hold(gl); |
635 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) | 676 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) |
636 | gfs2_glock_put(gl); | 677 | gfs2_glock_put_nolock(gl); |
637 | out_unlock: | 678 | out_unlock: |
638 | clear_bit(GLF_LOCK, &gl->gl_flags); | 679 | clear_bit(GLF_LOCK, &gl->gl_flags); |
639 | goto out; | 680 | goto out; |
640 | } | 681 | } |
641 | 682 | ||
683 | static void delete_work_func(struct work_struct *work) | ||
684 | { | ||
685 | struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete); | ||
686 | struct gfs2_sbd *sdp = gl->gl_sbd; | ||
687 | struct gfs2_inode *ip = NULL; | ||
688 | struct inode *inode; | ||
689 | u64 no_addr = 0; | ||
690 | |||
691 | spin_lock(&gl->gl_spin); | ||
692 | ip = (struct gfs2_inode *)gl->gl_object; | ||
693 | if (ip) | ||
694 | no_addr = ip->i_no_addr; | ||
695 | spin_unlock(&gl->gl_spin); | ||
696 | if (ip) { | ||
697 | inode = gfs2_ilookup(sdp->sd_vfs, no_addr); | ||
698 | if (inode) { | ||
699 | d_prune_aliases(inode); | ||
700 | iput(inode); | ||
701 | } | ||
702 | } | ||
703 | gfs2_glock_put(gl); | ||
704 | } | ||
705 | |||
642 | static void glock_work_func(struct work_struct *work) | 706 | static void glock_work_func(struct work_struct *work) |
643 | { | 707 | { |
644 | unsigned long delay = 0; | 708 | unsigned long delay = 0; |
@@ -717,6 +781,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, | |||
717 | gl->gl_sbd = sdp; | 781 | gl->gl_sbd = sdp; |
718 | gl->gl_aspace = NULL; | 782 | gl->gl_aspace = NULL; |
719 | INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); | 783 | INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); |
784 | INIT_WORK(&gl->gl_delete, delete_work_func); | ||
720 | 785 | ||
721 | /* If this glock protects actual on-disk data or metadata blocks, | 786 | /* If this glock protects actual on-disk data or metadata blocks, |
722 | create a VFS inode to manage the pages/buffers holding them. */ | 787 | create a VFS inode to manage the pages/buffers holding them. */ |
@@ -858,6 +923,8 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state, | |||
858 | gl->gl_demote_state != state) { | 923 | gl->gl_demote_state != state) { |
859 | gl->gl_demote_state = LM_ST_UNLOCKED; | 924 | gl->gl_demote_state = LM_ST_UNLOCKED; |
860 | } | 925 | } |
926 | if (gl->gl_ops->go_callback) | ||
927 | gl->gl_ops->go_callback(gl); | ||
861 | trace_gfs2_demote_rq(gl); | 928 | trace_gfs2_demote_rq(gl); |
862 | } | 929 | } |
863 | 930 | ||
@@ -1274,33 +1341,12 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret) | |||
1274 | gfs2_glock_put(gl); | 1341 | gfs2_glock_put(gl); |
1275 | } | 1342 | } |
1276 | 1343 | ||
1277 | /** | ||
1278 | * demote_ok - Check to see if it's ok to unlock a glock | ||
1279 | * @gl: the glock | ||
1280 | * | ||
1281 | * Returns: 1 if it's ok | ||
1282 | */ | ||
1283 | |||
1284 | static int demote_ok(const struct gfs2_glock *gl) | ||
1285 | { | ||
1286 | const struct gfs2_glock_operations *glops = gl->gl_ops; | ||
1287 | |||
1288 | if (gl->gl_state == LM_ST_UNLOCKED) | ||
1289 | return 0; | ||
1290 | if (!list_empty(&gl->gl_holders)) | ||
1291 | return 0; | ||
1292 | if (glops->go_demote_ok) | ||
1293 | return glops->go_demote_ok(gl); | ||
1294 | return 1; | ||
1295 | } | ||
1296 | |||
1297 | 1344 | ||
1298 | static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask) | 1345 | static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask) |
1299 | { | 1346 | { |
1300 | struct gfs2_glock *gl; | 1347 | struct gfs2_glock *gl; |
1301 | int may_demote; | 1348 | int may_demote; |
1302 | int nr_skipped = 0; | 1349 | int nr_skipped = 0; |
1303 | int got_ref = 0; | ||
1304 | LIST_HEAD(skipped); | 1350 | LIST_HEAD(skipped); |
1305 | 1351 | ||
1306 | if (nr == 0) | 1352 | if (nr == 0) |
@@ -1315,37 +1361,29 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask) | |||
1315 | list_del_init(&gl->gl_lru); | 1361 | list_del_init(&gl->gl_lru); |
1316 | atomic_dec(&lru_count); | 1362 | atomic_dec(&lru_count); |
1317 | 1363 | ||
1364 | /* Check if glock is about to be freed */ | ||
1365 | if (atomic_read(&gl->gl_ref) == 0) | ||
1366 | continue; | ||
1367 | |||
1318 | /* Test for being demotable */ | 1368 | /* Test for being demotable */ |
1319 | if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { | 1369 | if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { |
1320 | gfs2_glock_hold(gl); | 1370 | gfs2_glock_hold(gl); |
1321 | got_ref = 1; | ||
1322 | spin_unlock(&lru_lock); | 1371 | spin_unlock(&lru_lock); |
1323 | spin_lock(&gl->gl_spin); | 1372 | spin_lock(&gl->gl_spin); |
1324 | may_demote = demote_ok(gl); | 1373 | may_demote = demote_ok(gl); |
1325 | spin_unlock(&gl->gl_spin); | ||
1326 | clear_bit(GLF_LOCK, &gl->gl_flags); | ||
1327 | if (may_demote) { | 1374 | if (may_demote) { |
1328 | handle_callback(gl, LM_ST_UNLOCKED, 0); | 1375 | handle_callback(gl, LM_ST_UNLOCKED, 0); |
1329 | nr--; | 1376 | nr--; |
1330 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) | ||
1331 | gfs2_glock_put(gl); | ||
1332 | got_ref = 0; | ||
1333 | } | 1377 | } |
1378 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) | ||
1379 | gfs2_glock_put_nolock(gl); | ||
1380 | spin_unlock(&gl->gl_spin); | ||
1381 | clear_bit(GLF_LOCK, &gl->gl_flags); | ||
1334 | spin_lock(&lru_lock); | 1382 | spin_lock(&lru_lock); |
1335 | if (may_demote) | 1383 | continue; |
1336 | continue; | ||
1337 | } | ||
1338 | if (list_empty(&gl->gl_lru) && | ||
1339 | (atomic_read(&gl->gl_ref) <= (2 + got_ref))) { | ||
1340 | nr_skipped++; | ||
1341 | list_add(&gl->gl_lru, &skipped); | ||
1342 | } | ||
1343 | if (got_ref) { | ||
1344 | spin_unlock(&lru_lock); | ||
1345 | gfs2_glock_put(gl); | ||
1346 | spin_lock(&lru_lock); | ||
1347 | got_ref = 0; | ||
1348 | } | 1384 | } |
1385 | nr_skipped++; | ||
1386 | list_add(&gl->gl_lru, &skipped); | ||
1349 | } | 1387 | } |
1350 | list_splice(&skipped, &lru_list); | 1388 | list_splice(&skipped, &lru_list); |
1351 | atomic_add(nr_skipped, &lru_count); | 1389 | atomic_add(nr_skipped, &lru_count); |
@@ -1727,6 +1765,11 @@ int __init gfs2_glock_init(void) | |||
1727 | glock_workqueue = create_workqueue("glock_workqueue"); | 1765 | glock_workqueue = create_workqueue("glock_workqueue"); |
1728 | if (IS_ERR(glock_workqueue)) | 1766 | if (IS_ERR(glock_workqueue)) |
1729 | return PTR_ERR(glock_workqueue); | 1767 | return PTR_ERR(glock_workqueue); |
1768 | gfs2_delete_workqueue = create_workqueue("delete_workqueue"); | ||
1769 | if (IS_ERR(gfs2_delete_workqueue)) { | ||
1770 | destroy_workqueue(glock_workqueue); | ||
1771 | return PTR_ERR(gfs2_delete_workqueue); | ||
1772 | } | ||
1730 | 1773 | ||
1731 | register_shrinker(&glock_shrinker); | 1774 | register_shrinker(&glock_shrinker); |
1732 | 1775 | ||
@@ -1737,6 +1780,7 @@ void gfs2_glock_exit(void) | |||
1737 | { | 1780 | { |
1738 | unregister_shrinker(&glock_shrinker); | 1781 | unregister_shrinker(&glock_shrinker); |
1739 | destroy_workqueue(glock_workqueue); | 1782 | destroy_workqueue(glock_workqueue); |
1783 | destroy_workqueue(gfs2_delete_workqueue); | ||
1740 | } | 1784 | } |
1741 | 1785 | ||
1742 | static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi) | 1786 | static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi) |