diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2006-09-08 13:35:56 -0400 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2006-09-08 13:35:56 -0400 |
commit | 37b2fa6a24f996d751dc80fbc8a77602cead269b (patch) | |
tree | 2b96dc483c92593fac467076e76433f6fd6297be /fs/gfs2 | |
parent | 9b47c11d1cbedcba685c9bd90c73fd41acdfab0e (diff) |
[GFS2] Move rwlocks in glock.c into their own array
This splits the rwlocks guarding the hash chains of the glock hash
table into their own array. This will reduce memory usage in some
cases due to better alignment, although the real reason for doing it
is to allow the two tables to be different sizes in future (i.e.
the locks will be sized proportionally with the max number of CPUs
and the hash chains sized proportinally with the size of physical memory)
In order to allow this, the gl_bucket member of struct gfs2_glock has
now become gl_hash, so we record the hash rather than a pointer to the
bucket itself.
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r-- | fs/gfs2/glock.c | 87 | ||||
-rw-r--r-- | fs/gfs2/incore.h | 7 |
2 files changed, 43 insertions, 51 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 92aa0e8c9099..49512696160e 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -44,12 +44,17 @@ struct greedy { | |||
44 | struct work_struct gr_work; | 44 | struct work_struct gr_work; |
45 | }; | 45 | }; |
46 | 46 | ||
47 | struct gfs2_gl_hash_bucket { | ||
48 | struct list_head hb_list; | ||
49 | }; | ||
50 | |||
47 | typedef void (*glock_examiner) (struct gfs2_glock * gl); | 51 | typedef void (*glock_examiner) (struct gfs2_glock * gl); |
48 | 52 | ||
49 | static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); | 53 | static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); |
50 | static int dump_glock(struct gfs2_glock *gl); | 54 | static int dump_glock(struct gfs2_glock *gl); |
51 | 55 | ||
52 | static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE]; | 56 | static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE]; |
57 | static rwlock_t gl_hash_locks[GFS2_GL_HASH_SIZE]; | ||
53 | 58 | ||
54 | /** | 59 | /** |
55 | * relaxed_state_ok - is a requested lock compatible with the current lock mode? | 60 | * relaxed_state_ok - is a requested lock compatible with the current lock mode? |
@@ -154,19 +159,18 @@ static void kill_glock(struct kref *kref) | |||
154 | 159 | ||
155 | int gfs2_glock_put(struct gfs2_glock *gl) | 160 | int gfs2_glock_put(struct gfs2_glock *gl) |
156 | { | 161 | { |
157 | struct gfs2_gl_hash_bucket *bucket = gl->gl_bucket; | ||
158 | int rv = 0; | 162 | int rv = 0; |
159 | 163 | ||
160 | write_lock(&bucket->hb_lock); | 164 | write_lock(&gl_hash_locks[gl->gl_hash]); |
161 | if (kref_put(&gl->gl_ref, kill_glock)) { | 165 | if (kref_put(&gl->gl_ref, kill_glock)) { |
162 | list_del_init(&gl->gl_list); | 166 | list_del_init(&gl_hash_table[gl->gl_hash].hb_list); |
163 | write_unlock(&bucket->hb_lock); | 167 | write_unlock(&gl_hash_locks[gl->gl_hash]); |
164 | BUG_ON(spin_is_locked(&gl->gl_spin)); | 168 | BUG_ON(spin_is_locked(&gl->gl_spin)); |
165 | glock_free(gl); | 169 | glock_free(gl); |
166 | rv = 1; | 170 | rv = 1; |
167 | goto out; | 171 | goto out; |
168 | } | 172 | } |
169 | write_unlock(&bucket->hb_lock); | 173 | write_unlock(&gl_hash_locks[gl->gl_hash]); |
170 | out: | 174 | out: |
171 | return rv; | 175 | return rv; |
172 | } | 176 | } |
@@ -203,13 +207,13 @@ static inline int queue_empty(struct gfs2_glock *gl, struct list_head *head) | |||
203 | * Returns: NULL, or the struct gfs2_glock with the requested number | 207 | * Returns: NULL, or the struct gfs2_glock with the requested number |
204 | */ | 208 | */ |
205 | 209 | ||
206 | static struct gfs2_glock *search_bucket(struct gfs2_gl_hash_bucket *bucket, | 210 | static struct gfs2_glock *search_bucket(unsigned int hash, |
207 | const struct gfs2_sbd *sdp, | 211 | const struct gfs2_sbd *sdp, |
208 | const struct lm_lockname *name) | 212 | const struct lm_lockname *name) |
209 | { | 213 | { |
210 | struct gfs2_glock *gl; | 214 | struct gfs2_glock *gl; |
211 | 215 | ||
212 | list_for_each_entry(gl, &bucket->hb_list, gl_list) { | 216 | list_for_each_entry(gl, &gl_hash_table[hash].hb_list, gl_list) { |
213 | if (test_bit(GLF_PLUG, &gl->gl_flags)) | 217 | if (test_bit(GLF_PLUG, &gl->gl_flags)) |
214 | continue; | 218 | continue; |
215 | if (!lm_name_equal(&gl->gl_name, name)) | 219 | if (!lm_name_equal(&gl->gl_name, name)) |
@@ -236,12 +240,12 @@ static struct gfs2_glock *search_bucket(struct gfs2_gl_hash_bucket *bucket, | |||
236 | static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp, | 240 | static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp, |
237 | const struct lm_lockname *name) | 241 | const struct lm_lockname *name) |
238 | { | 242 | { |
239 | struct gfs2_gl_hash_bucket *bucket = &gl_hash_table[gl_hash(sdp, name)]; | 243 | unsigned int hash = gl_hash(sdp, name); |
240 | struct gfs2_glock *gl; | 244 | struct gfs2_glock *gl; |
241 | 245 | ||
242 | read_lock(&bucket->hb_lock); | 246 | read_lock(&gl_hash_locks[hash]); |
243 | gl = search_bucket(bucket, sdp, name); | 247 | gl = search_bucket(hash, sdp, name); |
244 | read_unlock(&bucket->hb_lock); | 248 | read_unlock(&gl_hash_locks[hash]); |
245 | 249 | ||
246 | return gl; | 250 | return gl; |
247 | } | 251 | } |
@@ -263,18 +267,14 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, | |||
263 | const struct gfs2_glock_operations *glops, int create, | 267 | const struct gfs2_glock_operations *glops, int create, |
264 | struct gfs2_glock **glp) | 268 | struct gfs2_glock **glp) |
265 | { | 269 | { |
266 | struct lm_lockname name; | 270 | struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type }; |
267 | struct gfs2_glock *gl, *tmp; | 271 | struct gfs2_glock *gl, *tmp; |
268 | struct gfs2_gl_hash_bucket *bucket; | 272 | unsigned int hash = gl_hash(sdp, &name); |
269 | int error; | 273 | int error; |
270 | 274 | ||
271 | name.ln_number = number; | 275 | read_lock(&gl_hash_locks[hash]); |
272 | name.ln_type = glops->go_type; | 276 | gl = search_bucket(hash, sdp, &name); |
273 | bucket = &gl_hash_table[gl_hash(sdp, &name)]; | 277 | read_unlock(&gl_hash_locks[hash]); |
274 | |||
275 | read_lock(&bucket->hb_lock); | ||
276 | gl = search_bucket(bucket, sdp, &name); | ||
277 | read_unlock(&bucket->hb_lock); | ||
278 | 278 | ||
279 | if (gl || !create) { | 279 | if (gl || !create) { |
280 | *glp = gl; | 280 | *glp = gl; |
@@ -289,6 +289,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, | |||
289 | gl->gl_name = name; | 289 | gl->gl_name = name; |
290 | kref_init(&gl->gl_ref); | 290 | kref_init(&gl->gl_ref); |
291 | gl->gl_state = LM_ST_UNLOCKED; | 291 | gl->gl_state = LM_ST_UNLOCKED; |
292 | gl->gl_hash = hash; | ||
292 | gl->gl_owner = NULL; | 293 | gl->gl_owner = NULL; |
293 | gl->gl_ip = 0; | 294 | gl->gl_ip = 0; |
294 | gl->gl_ops = glops; | 295 | gl->gl_ops = glops; |
@@ -297,7 +298,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, | |||
297 | gl->gl_vn = 0; | 298 | gl->gl_vn = 0; |
298 | gl->gl_stamp = jiffies; | 299 | gl->gl_stamp = jiffies; |
299 | gl->gl_object = NULL; | 300 | gl->gl_object = NULL; |
300 | gl->gl_bucket = bucket; | ||
301 | gl->gl_sbd = sdp; | 301 | gl->gl_sbd = sdp; |
302 | gl->gl_aspace = NULL; | 302 | gl->gl_aspace = NULL; |
303 | lops_init_le(&gl->gl_le, &gfs2_glock_lops); | 303 | lops_init_le(&gl->gl_le, &gfs2_glock_lops); |
@@ -316,15 +316,15 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, | |||
316 | if (error) | 316 | if (error) |
317 | goto fail_aspace; | 317 | goto fail_aspace; |
318 | 318 | ||
319 | write_lock(&bucket->hb_lock); | 319 | write_lock(&gl_hash_locks[hash]); |
320 | tmp = search_bucket(bucket, sdp, &name); | 320 | tmp = search_bucket(hash, sdp, &name); |
321 | if (tmp) { | 321 | if (tmp) { |
322 | write_unlock(&bucket->hb_lock); | 322 | write_unlock(&gl_hash_locks[hash]); |
323 | glock_free(gl); | 323 | glock_free(gl); |
324 | gl = tmp; | 324 | gl = tmp; |
325 | } else { | 325 | } else { |
326 | list_add_tail(&gl->gl_list, &bucket->hb_list); | 326 | list_add_tail(&gl->gl_list, &gl_hash_table[hash].hb_list); |
327 | write_unlock(&bucket->hb_lock); | 327 | write_unlock(&gl_hash_locks[hash]); |
328 | } | 328 | } |
329 | 329 | ||
330 | *glp = gl; | 330 | *glp = gl; |
@@ -1868,7 +1868,7 @@ void gfs2_reclaim_glock(struct gfs2_sbd *sdp) | |||
1868 | */ | 1868 | */ |
1869 | 1869 | ||
1870 | static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp, | 1870 | static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp, |
1871 | struct gfs2_gl_hash_bucket *bucket) | 1871 | unsigned int hash) |
1872 | { | 1872 | { |
1873 | struct glock_plug plug; | 1873 | struct glock_plug plug; |
1874 | struct list_head *tmp; | 1874 | struct list_head *tmp; |
@@ -1879,20 +1879,20 @@ static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp, | |||
1879 | memset(&plug.gl_flags, 0, sizeof(unsigned long)); | 1879 | memset(&plug.gl_flags, 0, sizeof(unsigned long)); |
1880 | set_bit(GLF_PLUG, &plug.gl_flags); | 1880 | set_bit(GLF_PLUG, &plug.gl_flags); |
1881 | 1881 | ||
1882 | write_lock(&bucket->hb_lock); | 1882 | write_lock(&gl_hash_locks[hash]); |
1883 | list_add(&plug.gl_list, &bucket->hb_list); | 1883 | list_add(&plug.gl_list, &gl_hash_table[hash].hb_list); |
1884 | write_unlock(&bucket->hb_lock); | 1884 | write_unlock(&gl_hash_locks[hash]); |
1885 | 1885 | ||
1886 | for (;;) { | 1886 | for (;;) { |
1887 | write_lock(&bucket->hb_lock); | 1887 | write_lock(&gl_hash_locks[hash]); |
1888 | 1888 | ||
1889 | for (;;) { | 1889 | for (;;) { |
1890 | tmp = plug.gl_list.next; | 1890 | tmp = plug.gl_list.next; |
1891 | 1891 | ||
1892 | if (tmp == &bucket->hb_list) { | 1892 | if (tmp == &gl_hash_table[hash].hb_list) { |
1893 | list_del(&plug.gl_list); | 1893 | list_del(&plug.gl_list); |
1894 | entries = !list_empty(&bucket->hb_list); | 1894 | entries = !list_empty(&gl_hash_table[hash].hb_list); |
1895 | write_unlock(&bucket->hb_lock); | 1895 | write_unlock(&gl_hash_locks[hash]); |
1896 | return entries; | 1896 | return entries; |
1897 | } | 1897 | } |
1898 | gl = list_entry(tmp, struct gfs2_glock, gl_list); | 1898 | gl = list_entry(tmp, struct gfs2_glock, gl_list); |
@@ -1911,7 +1911,7 @@ static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp, | |||
1911 | break; | 1911 | break; |
1912 | } | 1912 | } |
1913 | 1913 | ||
1914 | write_unlock(&bucket->hb_lock); | 1914 | write_unlock(&gl_hash_locks[hash]); |
1915 | 1915 | ||
1916 | examiner(gl); | 1916 | examiner(gl); |
1917 | } | 1917 | } |
@@ -1956,7 +1956,7 @@ void gfs2_scand_internal(struct gfs2_sbd *sdp) | |||
1956 | unsigned int x; | 1956 | unsigned int x; |
1957 | 1957 | ||
1958 | for (x = 0; x < GFS2_GL_HASH_SIZE; x++) { | 1958 | for (x = 0; x < GFS2_GL_HASH_SIZE; x++) { |
1959 | examine_bucket(scan_glock, sdp, &gl_hash_table[x]); | 1959 | examine_bucket(scan_glock, sdp, x); |
1960 | cond_resched(); | 1960 | cond_resched(); |
1961 | } | 1961 | } |
1962 | } | 1962 | } |
@@ -2015,7 +2015,7 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait) | |||
2015 | cont = 0; | 2015 | cont = 0; |
2016 | 2016 | ||
2017 | for (x = 0; x < GFS2_GL_HASH_SIZE; x++) | 2017 | for (x = 0; x < GFS2_GL_HASH_SIZE; x++) |
2018 | if (examine_bucket(clear_glock, sdp, &gl_hash_table[x])) | 2018 | if (examine_bucket(clear_glock, sdp, x)) |
2019 | cont = 1; | 2019 | cont = 1; |
2020 | 2020 | ||
2021 | if (!wait || !cont) | 2021 | if (!wait || !cont) |
@@ -2198,17 +2198,15 @@ out: | |||
2198 | 2198 | ||
2199 | static int gfs2_dump_lockstate(struct gfs2_sbd *sdp) | 2199 | static int gfs2_dump_lockstate(struct gfs2_sbd *sdp) |
2200 | { | 2200 | { |
2201 | struct gfs2_gl_hash_bucket *bucket; | ||
2202 | struct gfs2_glock *gl; | 2201 | struct gfs2_glock *gl; |
2203 | unsigned int x; | 2202 | unsigned int x; |
2204 | int error = 0; | 2203 | int error = 0; |
2205 | 2204 | ||
2206 | for (x = 0; x < GFS2_GL_HASH_SIZE; x++) { | 2205 | for (x = 0; x < GFS2_GL_HASH_SIZE; x++) { |
2207 | bucket = &gl_hash_table[x]; | ||
2208 | 2206 | ||
2209 | read_lock(&bucket->hb_lock); | 2207 | read_lock(&gl_hash_locks[x]); |
2210 | 2208 | ||
2211 | list_for_each_entry(gl, &bucket->hb_list, gl_list) { | 2209 | list_for_each_entry(gl, &gl_hash_table[x].hb_list, gl_list) { |
2212 | if (test_bit(GLF_PLUG, &gl->gl_flags)) | 2210 | if (test_bit(GLF_PLUG, &gl->gl_flags)) |
2213 | continue; | 2211 | continue; |
2214 | if (gl->gl_sbd != sdp) | 2212 | if (gl->gl_sbd != sdp) |
@@ -2219,7 +2217,7 @@ static int gfs2_dump_lockstate(struct gfs2_sbd *sdp) | |||
2219 | break; | 2217 | break; |
2220 | } | 2218 | } |
2221 | 2219 | ||
2222 | read_unlock(&bucket->hb_lock); | 2220 | read_unlock(&gl_hash_locks[x]); |
2223 | 2221 | ||
2224 | if (error) | 2222 | if (error) |
2225 | break; | 2223 | break; |
@@ -2233,9 +2231,8 @@ int __init gfs2_glock_init(void) | |||
2233 | { | 2231 | { |
2234 | unsigned i; | 2232 | unsigned i; |
2235 | for(i = 0; i < GFS2_GL_HASH_SIZE; i++) { | 2233 | for(i = 0; i < GFS2_GL_HASH_SIZE; i++) { |
2236 | struct gfs2_gl_hash_bucket *hb = &gl_hash_table[i]; | 2234 | rwlock_init(&gl_hash_locks[i]); |
2237 | rwlock_init(&hb->hb_lock); | 2235 | INIT_LIST_HEAD(&gl_hash_table[i].hb_list); |
2238 | INIT_LIST_HEAD(&hb->hb_list); | ||
2239 | } | 2236 | } |
2240 | return 0; | 2237 | return 0; |
2241 | } | 2238 | } |
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 9f5d98ff823a..f50ea6282e77 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h | |||
@@ -106,11 +106,6 @@ struct gfs2_bufdata { | |||
106 | struct list_head bd_ail_gl_list; | 106 | struct list_head bd_ail_gl_list; |
107 | }; | 107 | }; |
108 | 108 | ||
109 | struct gfs2_gl_hash_bucket { | ||
110 | rwlock_t hb_lock; | ||
111 | struct list_head hb_list; | ||
112 | }; | ||
113 | |||
114 | struct gfs2_glock_operations { | 109 | struct gfs2_glock_operations { |
115 | void (*go_xmote_th) (struct gfs2_glock * gl, unsigned int state, | 110 | void (*go_xmote_th) (struct gfs2_glock * gl, unsigned int state, |
116 | int flags); | 111 | int flags); |
@@ -175,6 +170,7 @@ struct gfs2_glock { | |||
175 | spinlock_t gl_spin; | 170 | spinlock_t gl_spin; |
176 | 171 | ||
177 | unsigned int gl_state; | 172 | unsigned int gl_state; |
173 | unsigned int gl_hash; | ||
178 | struct task_struct *gl_owner; | 174 | struct task_struct *gl_owner; |
179 | unsigned long gl_ip; | 175 | unsigned long gl_ip; |
180 | struct list_head gl_holders; | 176 | struct list_head gl_holders; |
@@ -195,7 +191,6 @@ struct gfs2_glock { | |||
195 | unsigned long gl_stamp; | 191 | unsigned long gl_stamp; |
196 | void *gl_object; | 192 | void *gl_object; |
197 | 193 | ||
198 | struct gfs2_gl_hash_bucket *gl_bucket; | ||
199 | struct list_head gl_reclaim; | 194 | struct list_head gl_reclaim; |
200 | 195 | ||
201 | struct gfs2_sbd *gl_sbd; | 196 | struct gfs2_sbd *gl_sbd; |