aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/glock.c
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-09-11 21:40:30 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-09-11 21:40:30 -0400
commit24264434603cc102d71fb2a1b3b7e282a781f449 (patch)
tree18af5274fa222f0420df30651f57bab1eaf75eab /fs/gfs2/glock.c
parent94610610f10749c0e17b4d2840ff8a7cb636c413 (diff)
[GFS2] Rewrite of examine_bucket()
The existing implementation of this function in glock.c was not very efficient as it relied upon keeping a cursor element upon the hash chain in question and moving it along. This new version improves upon this by using the current element as a cursor. This is possible since we only look at the "next" element in the list after we've taken the read_lock() subsequent to calling the examiner function. Obviously we have to eventually drop the ref count that we are then left with and we cannot do that while holding the read_lock, so we do that next time we drop the lock. That means either just before we examine another glock, or when the loop has terminated. The new implementation has several advantages: it uses only a read_lock() rather than a write_lock(), so it can run simnultaneously with other code, it doesn't need a "plug" element, so that it removes a test not only from this list iterator, but from all the other glock list iterators too. So it makes things faster and smaller. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r--fs/gfs2/glock.c95
1 files changed, 31 insertions, 64 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index b348053c4363..b5effb9e4a38 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -18,6 +18,7 @@
18#include <linux/kref.h> 18#include <linux/kref.h>
19#include <linux/kallsyms.h> 19#include <linux/kallsyms.h>
20#include <linux/gfs2_ondisk.h> 20#include <linux/gfs2_ondisk.h>
21#include <linux/list.h>
21#include <asm/uaccess.h> 22#include <asm/uaccess.h>
22 23
23#include "gfs2.h" 24#include "gfs2.h"
@@ -33,12 +34,6 @@
33#include "super.h" 34#include "super.h"
34#include "util.h" 35#include "util.h"
35 36
36/* Must be kept in sync with the beginning of struct gfs2_glock */
37struct glock_plug {
38 struct list_head gl_list;
39 unsigned long gl_flags;
40};
41
42struct greedy { 37struct greedy {
43 struct gfs2_holder gr_gh; 38 struct gfs2_holder gr_gh;
44 struct work_struct gr_work; 39 struct work_struct gr_work;
@@ -52,6 +47,7 @@ typedef void (*glock_examiner) (struct gfs2_glock * gl);
52 47
53static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); 48static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
54static int dump_glock(struct gfs2_glock *gl); 49static int dump_glock(struct gfs2_glock *gl);
50static int dump_inode(struct gfs2_inode *ip);
55 51
56#define GFS2_GL_HASH_SHIFT 13 52#define GFS2_GL_HASH_SHIFT 13
57#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT) 53#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
@@ -214,7 +210,7 @@ int gfs2_glock_put(struct gfs2_glock *gl)
214 210
215 write_lock(gl_lock_addr(gl->gl_hash)); 211 write_lock(gl_lock_addr(gl->gl_hash));
216 if (kref_put(&gl->gl_ref, kill_glock)) { 212 if (kref_put(&gl->gl_ref, kill_glock)) {
217 list_del_init(&gl_hash_table[gl->gl_hash].hb_list); 213 list_del_init(&gl->gl_list);
218 write_unlock(gl_lock_addr(gl->gl_hash)); 214 write_unlock(gl_lock_addr(gl->gl_hash));
219 BUG_ON(spin_is_locked(&gl->gl_spin)); 215 BUG_ON(spin_is_locked(&gl->gl_spin));
220 glock_free(gl); 216 glock_free(gl);
@@ -265,8 +261,6 @@ static struct gfs2_glock *search_bucket(unsigned int hash,
265 struct gfs2_glock *gl; 261 struct gfs2_glock *gl;
266 262
267 list_for_each_entry(gl, &gl_hash_table[hash].hb_list, gl_list) { 263 list_for_each_entry(gl, &gl_hash_table[hash].hb_list, gl_list) {
268 if (test_bit(GLF_PLUG, &gl->gl_flags))
269 continue;
270 if (!lm_name_equal(&gl->gl_name, name)) 264 if (!lm_name_equal(&gl->gl_name, name))
271 continue; 265 continue;
272 if (gl->gl_sbd != sdp) 266 if (gl->gl_sbd != sdp)
@@ -1899,51 +1893,33 @@ void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1899static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp, 1893static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1900 unsigned int hash) 1894 unsigned int hash)
1901{ 1895{
1902 struct glock_plug plug; 1896 struct gfs2_glock *gl, *prev = NULL;
1903 struct list_head *tmp; 1897 int has_entries = 0;
1904 struct gfs2_glock *gl; 1898 struct list_head *head = &gl_hash_table[hash].hb_list;
1905 int entries;
1906
1907 /* Add "plug" to end of bucket list, work back up list from there */
1908 memset(&plug.gl_flags, 0, sizeof(unsigned long));
1909 set_bit(GLF_PLUG, &plug.gl_flags);
1910
1911 write_lock(gl_lock_addr(hash));
1912 list_add(&plug.gl_list, &gl_hash_table[hash].hb_list);
1913 write_unlock(gl_lock_addr(hash));
1914
1915 for (;;) {
1916 write_lock(gl_lock_addr(hash));
1917
1918 for (;;) {
1919 tmp = plug.gl_list.next;
1920 1899
1921 if (tmp == &gl_hash_table[hash].hb_list) { 1900 read_lock(gl_lock_addr(hash));
1922 list_del(&plug.gl_list); 1901 /* Can't use list_for_each_entry - don't want prefetch here */
1923 entries = !list_empty(&gl_hash_table[hash].hb_list); 1902 if (list_empty(head))
1924 write_unlock(gl_lock_addr(hash)); 1903 goto out;
1925 return entries; 1904 has_entries = 1;
1926 } 1905 gl = list_entry(head->next, struct gfs2_glock, gl_list);
1927 gl = list_entry(tmp, struct gfs2_glock, gl_list); 1906 while(&gl->gl_list != head) {
1928 1907 if (gl->gl_sbd == sdp) {
1929 /* Move plug up list */
1930 list_move(&plug.gl_list, &gl->gl_list);
1931
1932 if (test_bit(GLF_PLUG, &gl->gl_flags))
1933 continue;
1934 if (gl->gl_sbd != sdp)
1935 continue;
1936
1937 /* examiner() must glock_put() */
1938 gfs2_glock_hold(gl); 1908 gfs2_glock_hold(gl);
1939 1909 read_unlock(gl_lock_addr(hash));
1940 break; 1910 if (prev)
1911 gfs2_glock_put(prev);
1912 prev = gl;
1913 examiner(gl);
1914 read_lock(gl_lock_addr(hash));
1941 } 1915 }
1942 1916 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1943 write_unlock(gl_lock_addr(hash));
1944
1945 examiner(gl);
1946 } 1917 }
1918out:
1919 read_unlock(gl_lock_addr(hash));
1920 if (prev)
1921 gfs2_glock_put(prev);
1922 return has_entries;
1947} 1923}
1948 1924
1949/** 1925/**
@@ -1955,23 +1931,19 @@ static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1955static void scan_glock(struct gfs2_glock *gl) 1931static void scan_glock(struct gfs2_glock *gl)
1956{ 1932{
1957 if (gl->gl_ops == &gfs2_inode_glops) 1933 if (gl->gl_ops == &gfs2_inode_glops)
1958 goto out; 1934 return;
1959 1935
1960 if (gfs2_glmutex_trylock(gl)) { 1936 if (gfs2_glmutex_trylock(gl)) {
1961 if (queue_empty(gl, &gl->gl_holders) && 1937 if (queue_empty(gl, &gl->gl_holders) &&
1962 gl->gl_state != LM_ST_UNLOCKED && 1938 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1963 demote_ok(gl))
1964 goto out_schedule; 1939 goto out_schedule;
1965 gfs2_glmutex_unlock(gl); 1940 gfs2_glmutex_unlock(gl);
1966 } 1941 }
1967out:
1968 gfs2_glock_put(gl);
1969 return; 1942 return;
1970 1943
1971out_schedule: 1944out_schedule:
1972 gfs2_glmutex_unlock(gl); 1945 gfs2_glmutex_unlock(gl);
1973 gfs2_glock_schedule_for_reclaim(gl); 1946 gfs2_glock_schedule_for_reclaim(gl);
1974 gfs2_glock_put(gl);
1975} 1947}
1976 1948
1977/** 1949/**
@@ -2014,11 +1986,8 @@ static void clear_glock(struct gfs2_glock *gl)
2014 if (queue_empty(gl, &gl->gl_holders) && 1986 if (queue_empty(gl, &gl->gl_holders) &&
2015 gl->gl_state != LM_ST_UNLOCKED) 1987 gl->gl_state != LM_ST_UNLOCKED)
2016 handle_callback(gl, LM_ST_UNLOCKED); 1988 handle_callback(gl, LM_ST_UNLOCKED);
2017
2018 gfs2_glmutex_unlock(gl); 1989 gfs2_glmutex_unlock(gl);
2019 } 1990 }
2020
2021 gfs2_glock_put(gl);
2022} 1991}
2023 1992
2024/** 1993/**
@@ -2040,10 +2009,10 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
2040 2009
2041 for (;;) { 2010 for (;;) {
2042 cont = 0; 2011 cont = 0;
2043 2012 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
2044 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) 2013 if (examine_bucket(clear_glock, sdp, x))
2045 if (examine_bucket(clear_glock, sdp, x))
2046 cont = 1; 2014 cont = 1;
2015 }
2047 2016
2048 if (!wait || !cont) 2017 if (!wait || !cont)
2049 break; 2018 break;
@@ -2234,8 +2203,6 @@ static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
2234 read_lock(gl_lock_addr(x)); 2203 read_lock(gl_lock_addr(x));
2235 2204
2236 list_for_each_entry(gl, &gl_hash_table[x].hb_list, gl_list) { 2205 list_for_each_entry(gl, &gl_hash_table[x].hb_list, gl_list) {
2237 if (test_bit(GLF_PLUG, &gl->gl_flags))
2238 continue;
2239 if (gl->gl_sbd != sdp) 2206 if (gl->gl_sbd != sdp)
2240 continue; 2207 continue;
2241 2208