diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2006-09-09 16:59:11 -0400 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2006-09-09 16:59:11 -0400 |
commit | 087efdd391f47305dc251a7b00dcc5d69e8c636a (patch) | |
tree | e6d74f4107b5046fb8280a33f76ad7cab1d2b992 /fs/gfs2/glock.c | |
parent | ff6af411ae65da95a1801668b9580c5c33f0f7d1 (diff) |
[GFS2] Make glock hash locks proportional to NR_CPUS
Make the number of locks used for hash chains in glock.c
proportional to NR_CPUS. Also move constants for the number
of hash chains into glock.c from incore.h since they are
not used outside of glock.c.
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r-- | fs/gfs2/glock.c | 93 |
1 files changed, 74 insertions, 19 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 49512696160e..8abfefe4efd4 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -53,8 +53,59 @@ typedef void (*glock_examiner) (struct gfs2_glock * gl); | |||
53 | static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); | 53 | static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); |
54 | static int dump_glock(struct gfs2_glock *gl); | 54 | static int dump_glock(struct gfs2_glock *gl); |
55 | 55 | ||
56 | #define GFS2_GL_HASH_SHIFT 13 | ||
57 | #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT) | ||
58 | #define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1) | ||
59 | |||
56 | static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE]; | 60 | static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE]; |
57 | static rwlock_t gl_hash_locks[GFS2_GL_HASH_SIZE]; | 61 | |
62 | /* | ||
63 | * Despite what you might think, the numbers below are not arbitrary :-) | ||
64 | * They are taken from the ipv4 routing hash code, which is well tested | ||
65 | * and thus should be nearly optimal. Later on we might tweek the numbers | ||
66 | * but for now this should be fine. | ||
67 | * | ||
68 | * The reason for putting the locks in a separate array from the list heads | ||
69 | * is that we can have fewer locks than list heads and save memory. We use | ||
70 | * the same hash function for both, but with a different hash mask. | ||
71 | */ | ||
72 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \ | ||
73 | defined(CONFIG_PROVE_LOCKING) | ||
74 | |||
75 | #ifdef CONFIG_LOCKDEP | ||
76 | # define GL_HASH_LOCK_SZ 256 | ||
77 | #else | ||
78 | # if NR_CPUS >= 32 | ||
79 | # define GL_HASH_LOCK_SZ 4096 | ||
80 | # elif NR_CPUS >= 16 | ||
81 | # define GL_HASH_LOCK_SZ 2048 | ||
82 | # elif NR_CPUS >= 8 | ||
83 | # define GL_HASH_LOCK_SZ 1024 | ||
84 | # elif NR_CPUS >= 4 | ||
85 | # define GL_HASH_LOCK_SZ 512 | ||
86 | # else | ||
87 | # define GL_HASH_LOCK_SZ 256 | ||
88 | # endif | ||
89 | #endif | ||
90 | |||
91 | /* We never want more locks than chains */ | ||
92 | #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ | ||
93 | # undef GL_HASH_LOCK_SZ | ||
94 | # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE | ||
95 | #endif | ||
96 | |||
97 | static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ]; | ||
98 | |||
99 | static inline rwlock_t *gl_lock_addr(unsigned int x) | ||
100 | { | ||
101 | return &gl_hash_locks[(x) & (GL_HASH_LOCK_SZ-1)]; | ||
102 | } | ||
103 | #else /* not SMP, so no spinlocks required */ | ||
104 | static inline rwlock_t *gl_lock_addr(x) | ||
105 | { | ||
106 | return NULL; | ||
107 | } | ||
108 | #endif | ||
58 | 109 | ||
59 | /** | 110 | /** |
60 | * relaxed_state_ok - is a requested lock compatible with the current lock mode? | 111 | * relaxed_state_ok - is a requested lock compatible with the current lock mode? |
@@ -161,16 +212,16 @@ int gfs2_glock_put(struct gfs2_glock *gl) | |||
161 | { | 212 | { |
162 | int rv = 0; | 213 | int rv = 0; |
163 | 214 | ||
164 | write_lock(&gl_hash_locks[gl->gl_hash]); | 215 | write_lock(gl_lock_addr(gl->gl_hash)); |
165 | if (kref_put(&gl->gl_ref, kill_glock)) { | 216 | if (kref_put(&gl->gl_ref, kill_glock)) { |
166 | list_del_init(&gl_hash_table[gl->gl_hash].hb_list); | 217 | list_del_init(&gl_hash_table[gl->gl_hash].hb_list); |
167 | write_unlock(&gl_hash_locks[gl->gl_hash]); | 218 | write_unlock(gl_lock_addr(gl->gl_hash)); |
168 | BUG_ON(spin_is_locked(&gl->gl_spin)); | 219 | BUG_ON(spin_is_locked(&gl->gl_spin)); |
169 | glock_free(gl); | 220 | glock_free(gl); |
170 | rv = 1; | 221 | rv = 1; |
171 | goto out; | 222 | goto out; |
172 | } | 223 | } |
173 | write_unlock(&gl_hash_locks[gl->gl_hash]); | 224 | write_unlock(gl_lock_addr(gl->gl_hash)); |
174 | out: | 225 | out: |
175 | return rv; | 226 | return rv; |
176 | } | 227 | } |
@@ -243,9 +294,9 @@ static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp, | |||
243 | unsigned int hash = gl_hash(sdp, name); | 294 | unsigned int hash = gl_hash(sdp, name); |
244 | struct gfs2_glock *gl; | 295 | struct gfs2_glock *gl; |
245 | 296 | ||
246 | read_lock(&gl_hash_locks[hash]); | 297 | read_lock(gl_lock_addr(hash)); |
247 | gl = search_bucket(hash, sdp, name); | 298 | gl = search_bucket(hash, sdp, name); |
248 | read_unlock(&gl_hash_locks[hash]); | 299 | read_unlock(gl_lock_addr(hash)); |
249 | 300 | ||
250 | return gl; | 301 | return gl; |
251 | } | 302 | } |
@@ -272,9 +323,9 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, | |||
272 | unsigned int hash = gl_hash(sdp, &name); | 323 | unsigned int hash = gl_hash(sdp, &name); |
273 | int error; | 324 | int error; |
274 | 325 | ||
275 | read_lock(&gl_hash_locks[hash]); | 326 | read_lock(gl_lock_addr(hash)); |
276 | gl = search_bucket(hash, sdp, &name); | 327 | gl = search_bucket(hash, sdp, &name); |
277 | read_unlock(&gl_hash_locks[hash]); | 328 | read_unlock(gl_lock_addr(hash)); |
278 | 329 | ||
279 | if (gl || !create) { | 330 | if (gl || !create) { |
280 | *glp = gl; | 331 | *glp = gl; |
@@ -316,15 +367,15 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, | |||
316 | if (error) | 367 | if (error) |
317 | goto fail_aspace; | 368 | goto fail_aspace; |
318 | 369 | ||
319 | write_lock(&gl_hash_locks[hash]); | 370 | write_lock(gl_lock_addr(hash)); |
320 | tmp = search_bucket(hash, sdp, &name); | 371 | tmp = search_bucket(hash, sdp, &name); |
321 | if (tmp) { | 372 | if (tmp) { |
322 | write_unlock(&gl_hash_locks[hash]); | 373 | write_unlock(gl_lock_addr(hash)); |
323 | glock_free(gl); | 374 | glock_free(gl); |
324 | gl = tmp; | 375 | gl = tmp; |
325 | } else { | 376 | } else { |
326 | list_add_tail(&gl->gl_list, &gl_hash_table[hash].hb_list); | 377 | list_add_tail(&gl->gl_list, &gl_hash_table[hash].hb_list); |
327 | write_unlock(&gl_hash_locks[hash]); | 378 | write_unlock(gl_lock_addr(hash)); |
328 | } | 379 | } |
329 | 380 | ||
330 | *glp = gl; | 381 | *glp = gl; |
@@ -1879,12 +1930,12 @@ static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp, | |||
1879 | memset(&plug.gl_flags, 0, sizeof(unsigned long)); | 1930 | memset(&plug.gl_flags, 0, sizeof(unsigned long)); |
1880 | set_bit(GLF_PLUG, &plug.gl_flags); | 1931 | set_bit(GLF_PLUG, &plug.gl_flags); |
1881 | 1932 | ||
1882 | write_lock(&gl_hash_locks[hash]); | 1933 | write_lock(gl_lock_addr(hash)); |
1883 | list_add(&plug.gl_list, &gl_hash_table[hash].hb_list); | 1934 | list_add(&plug.gl_list, &gl_hash_table[hash].hb_list); |
1884 | write_unlock(&gl_hash_locks[hash]); | 1935 | write_unlock(gl_lock_addr(hash)); |
1885 | 1936 | ||
1886 | for (;;) { | 1937 | for (;;) { |
1887 | write_lock(&gl_hash_locks[hash]); | 1938 | write_lock(gl_lock_addr(hash)); |
1888 | 1939 | ||
1889 | for (;;) { | 1940 | for (;;) { |
1890 | tmp = plug.gl_list.next; | 1941 | tmp = plug.gl_list.next; |
@@ -1892,7 +1943,7 @@ static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp, | |||
1892 | if (tmp == &gl_hash_table[hash].hb_list) { | 1943 | if (tmp == &gl_hash_table[hash].hb_list) { |
1893 | list_del(&plug.gl_list); | 1944 | list_del(&plug.gl_list); |
1894 | entries = !list_empty(&gl_hash_table[hash].hb_list); | 1945 | entries = !list_empty(&gl_hash_table[hash].hb_list); |
1895 | write_unlock(&gl_hash_locks[hash]); | 1946 | write_unlock(gl_lock_addr(hash)); |
1896 | return entries; | 1947 | return entries; |
1897 | } | 1948 | } |
1898 | gl = list_entry(tmp, struct gfs2_glock, gl_list); | 1949 | gl = list_entry(tmp, struct gfs2_glock, gl_list); |
@@ -1911,7 +1962,7 @@ static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp, | |||
1911 | break; | 1962 | break; |
1912 | } | 1963 | } |
1913 | 1964 | ||
1914 | write_unlock(&gl_hash_locks[hash]); | 1965 | write_unlock(gl_lock_addr(hash)); |
1915 | 1966 | ||
1916 | examiner(gl); | 1967 | examiner(gl); |
1917 | } | 1968 | } |
@@ -2204,7 +2255,7 @@ static int gfs2_dump_lockstate(struct gfs2_sbd *sdp) | |||
2204 | 2255 | ||
2205 | for (x = 0; x < GFS2_GL_HASH_SIZE; x++) { | 2256 | for (x = 0; x < GFS2_GL_HASH_SIZE; x++) { |
2206 | 2257 | ||
2207 | read_lock(&gl_hash_locks[x]); | 2258 | read_lock(gl_lock_addr(x)); |
2208 | 2259 | ||
2209 | list_for_each_entry(gl, &gl_hash_table[x].hb_list, gl_list) { | 2260 | list_for_each_entry(gl, &gl_hash_table[x].hb_list, gl_list) { |
2210 | if (test_bit(GLF_PLUG, &gl->gl_flags)) | 2261 | if (test_bit(GLF_PLUG, &gl->gl_flags)) |
@@ -2217,7 +2268,7 @@ static int gfs2_dump_lockstate(struct gfs2_sbd *sdp) | |||
2217 | break; | 2268 | break; |
2218 | } | 2269 | } |
2219 | 2270 | ||
2220 | read_unlock(&gl_hash_locks[x]); | 2271 | read_unlock(gl_lock_addr(x)); |
2221 | 2272 | ||
2222 | if (error) | 2273 | if (error) |
2223 | break; | 2274 | break; |
@@ -2231,9 +2282,13 @@ int __init gfs2_glock_init(void) | |||
2231 | { | 2282 | { |
2232 | unsigned i; | 2283 | unsigned i; |
2233 | for(i = 0; i < GFS2_GL_HASH_SIZE; i++) { | 2284 | for(i = 0; i < GFS2_GL_HASH_SIZE; i++) { |
2234 | rwlock_init(&gl_hash_locks[i]); | ||
2235 | INIT_LIST_HEAD(&gl_hash_table[i].hb_list); | 2285 | INIT_LIST_HEAD(&gl_hash_table[i].hb_list); |
2236 | } | 2286 | } |
2287 | #ifdef GL_HASH_LOCK_SZ | ||
2288 | for(i = 0; i < GL_HASH_LOCK_SZ; i++) { | ||
2289 | rwlock_init(&gl_hash_locks[i]); | ||
2290 | } | ||
2291 | #endif | ||
2237 | return 0; | 2292 | return 0; |
2238 | } | 2293 | } |
2239 | 2294 | ||