aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2013-02-01 15:36:03 -0500
committerSteven Whitehouse <swhiteho@redhat.com>2013-02-01 15:36:03 -0500
commit4506a519f2a76775e6c236f44d50e9f79a7d3434 (patch)
tree20b1e8e26fcb2bbe09b9589ed38183f6b0f6a4d4 /fs/gfs2
parent4513899092b3254b3539f92a65d2839afa1d50f6 (diff)
GFS2: Split glock lru processing into two parts
The intent here is to split the processing of the glock lru list into two parts, so that the selection of glocks and the disposal are separate functions. The plan is then, that further updates can then be made to these functions in the future to improve the selection of glocks and also the efficiency of glock disposal. The new feature which this patch brings is sorting the glocks to be disposed of into glock number (and thus also disk block number) order. Not all glocks will need i/o in order to dispose of them, but some will, and at least we'll generate mostly disk block order i/o now. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/glock.c90
1 files changed, 67 insertions, 23 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 3ad8fd36f8da..cf3515546739 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -30,6 +30,7 @@
30#include <linux/rculist_bl.h> 30#include <linux/rculist_bl.h>
31#include <linux/bit_spinlock.h> 31#include <linux/bit_spinlock.h>
32#include <linux/percpu.h> 32#include <linux/percpu.h>
33#include <linux/list_sort.h>
33 34
34#include "gfs2.h" 35#include "gfs2.h"
35#include "incore.h" 36#include "incore.h"
@@ -1376,50 +1377,93 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1376 gfs2_glock_put(gl); 1377 gfs2_glock_put(gl);
1377} 1378}
1378 1379
1380static int glock_cmp(void *priv, struct list_head *a, struct list_head *b)
1381{
1382 struct gfs2_glock *gla, *glb;
1383
1384 gla = list_entry(a, struct gfs2_glock, gl_lru);
1385 glb = list_entry(b, struct gfs2_glock, gl_lru);
1386
1387 if (gla->gl_name.ln_number > glb->gl_name.ln_number)
1388 return 1;
1389 if (gla->gl_name.ln_number < glb->gl_name.ln_number)
1390 return -1;
1391
1392 return 0;
1393}
1394
1395/**
1396 * gfs2_dispose_glock_lru - Demote a list of glocks
1397 * @list: The list to dispose of
1398 *
1399 * Disposing of glocks may involve disk accesses, so that here we sort
1400 * the glocks by number (i.e. disk location of the inodes) so that if
1401 * there are any such accesses, they'll be sent in order (mostly).
1402 *
1403 * Must be called under the lru_lock, but may drop and retake this
1404 * lock. While the lru_lock is dropped, entries may vanish from the
1405 * list, but no new entries will appear on the list (since it is
1406 * private)
1407 */
1408
1409static void gfs2_dispose_glock_lru(struct list_head *list)
1410__releases(&lru_lock)
1411__acquires(&lru_lock)
1412{
1413 struct gfs2_glock *gl;
1414
1415 list_sort(NULL, list, glock_cmp);
1416
1417 while(!list_empty(list)) {
1418 gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1419 list_del_init(&gl->gl_lru);
1420 clear_bit(GLF_LRU, &gl->gl_flags);
1421 gfs2_glock_hold(gl);
1422 spin_unlock(&lru_lock);
1423 spin_lock(&gl->gl_spin);
1424 if (demote_ok(gl))
1425 handle_callback(gl, LM_ST_UNLOCKED, 0);
1426 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
1427 smp_mb__after_clear_bit();
1428 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1429 gfs2_glock_put_nolock(gl);
1430 spin_unlock(&gl->gl_spin);
1431 spin_lock(&lru_lock);
1432 }
1433}
1434
1379/** 1435/**
1380 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote 1436 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote
1381 * @nr: The number of entries to scan 1437 * @nr: The number of entries to scan
1382 * 1438 *
1439 * This function selects the entries on the LRU which are able to
1440 * be demoted, and then kicks off the process by calling
1441 * gfs2_dispose_glock_lru() above.
1383 */ 1442 */
1384 1443
1385static void gfs2_scan_glock_lru(int nr) 1444static void gfs2_scan_glock_lru(int nr)
1386{ 1445{
1387 struct gfs2_glock *gl; 1446 struct gfs2_glock *gl;
1388 int may_demote;
1389 int nr_skipped = 0;
1390 LIST_HEAD(skipped); 1447 LIST_HEAD(skipped);
1448 LIST_HEAD(dispose);
1391 1449
1392 spin_lock(&lru_lock); 1450 spin_lock(&lru_lock);
1393 while(nr && !list_empty(&lru_list)) { 1451 while(nr && !list_empty(&lru_list)) {
1394 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); 1452 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1395 list_del_init(&gl->gl_lru);
1396 clear_bit(GLF_LRU, &gl->gl_flags);
1397 atomic_dec(&lru_count);
1398 1453
1399 /* Test for being demotable */ 1454 /* Test for being demotable */
1400 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 1455 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1401 gfs2_glock_hold(gl); 1456 list_move(&gl->gl_lru, &dispose);
1402 spin_unlock(&lru_lock); 1457 atomic_dec(&lru_count);
1403 spin_lock(&gl->gl_spin); 1458 nr--;
1404 may_demote = demote_ok(gl);
1405 if (may_demote) {
1406 handle_callback(gl, LM_ST_UNLOCKED, 0);
1407 nr--;
1408 }
1409 clear_bit(GLF_LOCK, &gl->gl_flags);
1410 smp_mb__after_clear_bit();
1411 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1412 gfs2_glock_put_nolock(gl);
1413 spin_unlock(&gl->gl_spin);
1414 spin_lock(&lru_lock);
1415 continue; 1459 continue;
1416 } 1460 }
1417 nr_skipped++; 1461
1418 list_add(&gl->gl_lru, &skipped); 1462 list_move(&gl->gl_lru, &skipped);
1419 set_bit(GLF_LRU, &gl->gl_flags);
1420 } 1463 }
1421 list_splice(&skipped, &lru_list); 1464 list_splice(&skipped, &lru_list);
1422 atomic_add(nr_skipped, &lru_count); 1465 if (!list_empty(&dispose))
1466 gfs2_dispose_glock_lru(&dispose);
1423 spin_unlock(&lru_lock); 1467 spin_unlock(&lru_lock);
1424} 1468}
1425 1469