diff options
author | Abhijith Das <adas@redhat.com> | 2009-01-07 17:03:37 -0500 |
---|---|---|
committer | Steven Whitehouse <steve@dolmen.chygwyn.com> | 2009-03-24 07:21:12 -0400 |
commit | 0a7ab79c5b5a16035e09b466c9013c8afc3b4bff (patch) | |
tree | bb82ea7f936b334b4de184288bc951f1642e9541 | |
parent | 2db2aac255c38e75ad17c0b24feb589ccfccc0ae (diff) |
GFS2: change gfs2_quota_scan into a shrinker
Deallocation of gfs2_quota_data objects now happens on-demand through a
shrinker instead of routinely deallocating through the quotad daemon.
Signed-off-by: Abhijith Das <adas@redhat.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
-rw-r--r-- | fs/gfs2/incore.h | 6 | ||||
-rw-r--r-- | fs/gfs2/main.c | 10 | ||||
-rw-r--r-- | fs/gfs2/ops_address.c | 1 | ||||
-rw-r--r-- | fs/gfs2/ops_fstype.c | 1 | ||||
-rw-r--r-- | fs/gfs2/quota.c | 165 | ||||
-rw-r--r-- | fs/gfs2/quota.h | 2 | ||||
-rw-r--r-- | fs/gfs2/sys.c | 2 |
7 files changed, 114 insertions, 73 deletions
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 608849d00021..592aa5040d29 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h | |||
@@ -283,7 +283,9 @@ enum { | |||
283 | 283 | ||
284 | struct gfs2_quota_data { | 284 | struct gfs2_quota_data { |
285 | struct list_head qd_list; | 285 | struct list_head qd_list; |
286 | unsigned int qd_count; | 286 | struct list_head qd_reclaim; |
287 | |||
288 | atomic_t qd_count; | ||
287 | 289 | ||
288 | u32 qd_id; | 290 | u32 qd_id; |
289 | unsigned long qd_flags; /* QDF_... */ | 291 | unsigned long qd_flags; /* QDF_... */ |
@@ -303,7 +305,6 @@ struct gfs2_quota_data { | |||
303 | 305 | ||
304 | u64 qd_sync_gen; | 306 | u64 qd_sync_gen; |
305 | unsigned long qd_last_warn; | 307 | unsigned long qd_last_warn; |
306 | unsigned long qd_last_touched; | ||
307 | }; | 308 | }; |
308 | 309 | ||
309 | struct gfs2_trans { | 310 | struct gfs2_trans { |
@@ -406,7 +407,6 @@ struct gfs2_tune { | |||
406 | unsigned int gt_quota_warn_period; /* Secs between quota warn msgs */ | 407 | unsigned int gt_quota_warn_period; /* Secs between quota warn msgs */ |
407 | unsigned int gt_quota_scale_num; /* Numerator */ | 408 | unsigned int gt_quota_scale_num; /* Numerator */ |
408 | unsigned int gt_quota_scale_den; /* Denominator */ | 409 | unsigned int gt_quota_scale_den; /* Denominator */ |
409 | unsigned int gt_quota_cache_secs; | ||
410 | unsigned int gt_quota_quantum; /* Secs between syncs to quota file */ | 410 | unsigned int gt_quota_quantum; /* Secs between syncs to quota file */ |
411 | unsigned int gt_new_files_jdata; | 411 | unsigned int gt_new_files_jdata; |
412 | unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */ | 412 | unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */ |
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index 7cacfde32194..86fe06798711 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c | |||
@@ -23,6 +23,12 @@ | |||
23 | #include "sys.h" | 23 | #include "sys.h" |
24 | #include "util.h" | 24 | #include "util.h" |
25 | #include "glock.h" | 25 | #include "glock.h" |
26 | #include "quota.h" | ||
27 | |||
28 | static struct shrinker qd_shrinker = { | ||
29 | .shrink = gfs2_shrink_qd_memory, | ||
30 | .seeks = DEFAULT_SEEKS, | ||
31 | }; | ||
26 | 32 | ||
27 | static void gfs2_init_inode_once(void *foo) | 33 | static void gfs2_init_inode_once(void *foo) |
28 | { | 34 | { |
@@ -100,6 +106,8 @@ static int __init init_gfs2_fs(void) | |||
100 | if (!gfs2_quotad_cachep) | 106 | if (!gfs2_quotad_cachep) |
101 | goto fail; | 107 | goto fail; |
102 | 108 | ||
109 | register_shrinker(&qd_shrinker); | ||
110 | |||
103 | error = register_filesystem(&gfs2_fs_type); | 111 | error = register_filesystem(&gfs2_fs_type); |
104 | if (error) | 112 | if (error) |
105 | goto fail; | 113 | goto fail; |
@@ -117,6 +125,7 @@ static int __init init_gfs2_fs(void) | |||
117 | fail_unregister: | 125 | fail_unregister: |
118 | unregister_filesystem(&gfs2_fs_type); | 126 | unregister_filesystem(&gfs2_fs_type); |
119 | fail: | 127 | fail: |
128 | unregister_shrinker(&qd_shrinker); | ||
120 | gfs2_glock_exit(); | 129 | gfs2_glock_exit(); |
121 | 130 | ||
122 | if (gfs2_quotad_cachep) | 131 | if (gfs2_quotad_cachep) |
@@ -145,6 +154,7 @@ fail: | |||
145 | 154 | ||
146 | static void __exit exit_gfs2_fs(void) | 155 | static void __exit exit_gfs2_fs(void) |
147 | { | 156 | { |
157 | unregister_shrinker(&qd_shrinker); | ||
148 | gfs2_glock_exit(); | 158 | gfs2_glock_exit(); |
149 | gfs2_unregister_debugfs(); | 159 | gfs2_unregister_debugfs(); |
150 | unregister_filesystem(&gfs2_fs_type); | 160 | unregister_filesystem(&gfs2_fs_type); |
diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c index 4ddab67867eb..dde4ead2c3be 100644 --- a/fs/gfs2/ops_address.c +++ b/fs/gfs2/ops_address.c | |||
@@ -442,6 +442,7 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page) | |||
442 | */ | 442 | */ |
443 | if (unlikely(page->index)) { | 443 | if (unlikely(page->index)) { |
444 | zero_user(page, 0, PAGE_CACHE_SIZE); | 444 | zero_user(page, 0, PAGE_CACHE_SIZE); |
445 | SetPageUptodate(page); | ||
445 | return 0; | 446 | return 0; |
446 | } | 447 | } |
447 | 448 | ||
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 3eb49edae542..530d3f6f6ea8 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c | |||
@@ -63,7 +63,6 @@ static void gfs2_tune_init(struct gfs2_tune *gt) | |||
63 | gt->gt_quota_warn_period = 10; | 63 | gt->gt_quota_warn_period = 10; |
64 | gt->gt_quota_scale_num = 1; | 64 | gt->gt_quota_scale_num = 1; |
65 | gt->gt_quota_scale_den = 1; | 65 | gt->gt_quota_scale_den = 1; |
66 | gt->gt_quota_cache_secs = 300; | ||
67 | gt->gt_quota_quantum = 60; | 66 | gt->gt_quota_quantum = 60; |
68 | gt->gt_new_files_jdata = 0; | 67 | gt->gt_new_files_jdata = 0; |
69 | gt->gt_max_readahead = 1 << 18; | 68 | gt->gt_max_readahead = 1 << 18; |
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index b08d09696b3e..2ada6e10d07b 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c | |||
@@ -80,6 +80,53 @@ struct gfs2_quota_change_host { | |||
80 | u32 qc_id; | 80 | u32 qc_id; |
81 | }; | 81 | }; |
82 | 82 | ||
83 | static LIST_HEAD(qd_lru_list); | ||
84 | static atomic_t qd_lru_count = ATOMIC_INIT(0); | ||
85 | static spinlock_t qd_lru_lock = SPIN_LOCK_UNLOCKED; | ||
86 | |||
87 | int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask) | ||
88 | { | ||
89 | struct gfs2_quota_data *qd; | ||
90 | struct gfs2_sbd *sdp; | ||
91 | |||
92 | if (nr == 0) | ||
93 | goto out; | ||
94 | |||
95 | if (!(gfp_mask & __GFP_FS)) | ||
96 | return -1; | ||
97 | |||
98 | spin_lock(&qd_lru_lock); | ||
99 | while (nr && !list_empty(&qd_lru_list)) { | ||
100 | qd = list_entry(qd_lru_list.next, | ||
101 | struct gfs2_quota_data, qd_reclaim); | ||
102 | sdp = qd->qd_gl->gl_sbd; | ||
103 | |||
104 | /* Free from the filesystem-specific list */ | ||
105 | list_del(&qd->qd_list); | ||
106 | |||
107 | spin_lock(&sdp->sd_quota_spin); | ||
108 | gfs2_assert_warn(sdp, !qd->qd_change); | ||
109 | gfs2_assert_warn(sdp, !qd->qd_slot_count); | ||
110 | gfs2_assert_warn(sdp, !qd->qd_bh_count); | ||
111 | |||
112 | gfs2_lvb_unhold(qd->qd_gl); | ||
113 | spin_unlock(&sdp->sd_quota_spin); | ||
114 | atomic_dec(&sdp->sd_quota_count); | ||
115 | |||
116 | /* Delete it from the common reclaim list */ | ||
117 | list_del_init(&qd->qd_reclaim); | ||
118 | atomic_dec(&qd_lru_count); | ||
119 | spin_unlock(&qd_lru_lock); | ||
120 | kmem_cache_free(gfs2_quotad_cachep, qd); | ||
121 | spin_lock(&qd_lru_lock); | ||
122 | nr--; | ||
123 | } | ||
124 | spin_unlock(&qd_lru_lock); | ||
125 | |||
126 | out: | ||
127 | return (atomic_read(&qd_lru_count) * sysctl_vfs_cache_pressure) / 100; | ||
128 | } | ||
129 | |||
83 | static u64 qd2offset(struct gfs2_quota_data *qd) | 130 | static u64 qd2offset(struct gfs2_quota_data *qd) |
84 | { | 131 | { |
85 | u64 offset; | 132 | u64 offset; |
@@ -100,11 +147,12 @@ static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id, | |||
100 | if (!qd) | 147 | if (!qd) |
101 | return -ENOMEM; | 148 | return -ENOMEM; |
102 | 149 | ||
103 | qd->qd_count = 1; | 150 | atomic_set(&qd->qd_count, 1); |
104 | qd->qd_id = id; | 151 | qd->qd_id = id; |
105 | if (user) | 152 | if (user) |
106 | set_bit(QDF_USER, &qd->qd_flags); | 153 | set_bit(QDF_USER, &qd->qd_flags); |
107 | qd->qd_slot = -1; | 154 | qd->qd_slot = -1; |
155 | INIT_LIST_HEAD(&qd->qd_reclaim); | ||
108 | 156 | ||
109 | error = gfs2_glock_get(sdp, 2 * (u64)id + !user, | 157 | error = gfs2_glock_get(sdp, 2 * (u64)id + !user, |
110 | &gfs2_quota_glops, CREATE, &qd->qd_gl); | 158 | &gfs2_quota_glops, CREATE, &qd->qd_gl); |
@@ -135,11 +183,17 @@ static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create, | |||
135 | 183 | ||
136 | for (;;) { | 184 | for (;;) { |
137 | found = 0; | 185 | found = 0; |
138 | spin_lock(&sdp->sd_quota_spin); | 186 | spin_lock(&qd_lru_lock); |
139 | list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { | 187 | list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { |
140 | if (qd->qd_id == id && | 188 | if (qd->qd_id == id && |
141 | !test_bit(QDF_USER, &qd->qd_flags) == !user) { | 189 | !test_bit(QDF_USER, &qd->qd_flags) == !user) { |
142 | qd->qd_count++; | 190 | if (!atomic_read(&qd->qd_count) && |
191 | !list_empty(&qd->qd_reclaim)) { | ||
192 | /* Remove it from reclaim list */ | ||
193 | list_del_init(&qd->qd_reclaim); | ||
194 | atomic_dec(&qd_lru_count); | ||
195 | } | ||
196 | atomic_inc(&qd->qd_count); | ||
143 | found = 1; | 197 | found = 1; |
144 | break; | 198 | break; |
145 | } | 199 | } |
@@ -155,7 +209,7 @@ static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create, | |||
155 | new_qd = NULL; | 209 | new_qd = NULL; |
156 | } | 210 | } |
157 | 211 | ||
158 | spin_unlock(&sdp->sd_quota_spin); | 212 | spin_unlock(&qd_lru_lock); |
159 | 213 | ||
160 | if (qd || !create) { | 214 | if (qd || !create) { |
161 | if (new_qd) { | 215 | if (new_qd) { |
@@ -175,21 +229,18 @@ static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create, | |||
175 | static void qd_hold(struct gfs2_quota_data *qd) | 229 | static void qd_hold(struct gfs2_quota_data *qd) |
176 | { | 230 | { |
177 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; | 231 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; |
178 | 232 | gfs2_assert(sdp, atomic_read(&qd->qd_count)); | |
179 | spin_lock(&sdp->sd_quota_spin); | 233 | atomic_inc(&qd->qd_count); |
180 | gfs2_assert(sdp, qd->qd_count); | ||
181 | qd->qd_count++; | ||
182 | spin_unlock(&sdp->sd_quota_spin); | ||
183 | } | 234 | } |
184 | 235 | ||
185 | static void qd_put(struct gfs2_quota_data *qd) | 236 | static void qd_put(struct gfs2_quota_data *qd) |
186 | { | 237 | { |
187 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; | 238 | if (atomic_dec_and_lock(&qd->qd_count, &qd_lru_lock)) { |
188 | spin_lock(&sdp->sd_quota_spin); | 239 | /* Add to the reclaim list */ |
189 | gfs2_assert(sdp, qd->qd_count); | 240 | list_add_tail(&qd->qd_reclaim, &qd_lru_list); |
190 | if (!--qd->qd_count) | 241 | atomic_inc(&qd_lru_count); |
191 | qd->qd_last_touched = jiffies; | 242 | spin_unlock(&qd_lru_lock); |
192 | spin_unlock(&sdp->sd_quota_spin); | 243 | } |
193 | } | 244 | } |
194 | 245 | ||
195 | static int slot_get(struct gfs2_quota_data *qd) | 246 | static int slot_get(struct gfs2_quota_data *qd) |
@@ -330,6 +381,7 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp) | |||
330 | if (sdp->sd_vfs->s_flags & MS_RDONLY) | 381 | if (sdp->sd_vfs->s_flags & MS_RDONLY) |
331 | return 0; | 382 | return 0; |
332 | 383 | ||
384 | spin_lock(&qd_lru_lock); | ||
333 | spin_lock(&sdp->sd_quota_spin); | 385 | spin_lock(&sdp->sd_quota_spin); |
334 | 386 | ||
335 | list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { | 387 | list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { |
@@ -341,8 +393,8 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp) | |||
341 | list_move_tail(&qd->qd_list, &sdp->sd_quota_list); | 393 | list_move_tail(&qd->qd_list, &sdp->sd_quota_list); |
342 | 394 | ||
343 | set_bit(QDF_LOCKED, &qd->qd_flags); | 395 | set_bit(QDF_LOCKED, &qd->qd_flags); |
344 | gfs2_assert_warn(sdp, qd->qd_count); | 396 | gfs2_assert_warn(sdp, atomic_read(&qd->qd_count)); |
345 | qd->qd_count++; | 397 | atomic_inc(&qd->qd_count); |
346 | qd->qd_change_sync = qd->qd_change; | 398 | qd->qd_change_sync = qd->qd_change; |
347 | gfs2_assert_warn(sdp, qd->qd_slot_count); | 399 | gfs2_assert_warn(sdp, qd->qd_slot_count); |
348 | qd->qd_slot_count++; | 400 | qd->qd_slot_count++; |
@@ -355,6 +407,7 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp) | |||
355 | qd = NULL; | 407 | qd = NULL; |
356 | 408 | ||
357 | spin_unlock(&sdp->sd_quota_spin); | 409 | spin_unlock(&sdp->sd_quota_spin); |
410 | spin_unlock(&qd_lru_lock); | ||
358 | 411 | ||
359 | if (qd) { | 412 | if (qd) { |
360 | gfs2_assert_warn(sdp, qd->qd_change_sync); | 413 | gfs2_assert_warn(sdp, qd->qd_change_sync); |
@@ -379,24 +432,27 @@ static int qd_trylock(struct gfs2_quota_data *qd) | |||
379 | if (sdp->sd_vfs->s_flags & MS_RDONLY) | 432 | if (sdp->sd_vfs->s_flags & MS_RDONLY) |
380 | return 0; | 433 | return 0; |
381 | 434 | ||
435 | spin_lock(&qd_lru_lock); | ||
382 | spin_lock(&sdp->sd_quota_spin); | 436 | spin_lock(&sdp->sd_quota_spin); |
383 | 437 | ||
384 | if (test_bit(QDF_LOCKED, &qd->qd_flags) || | 438 | if (test_bit(QDF_LOCKED, &qd->qd_flags) || |
385 | !test_bit(QDF_CHANGE, &qd->qd_flags)) { | 439 | !test_bit(QDF_CHANGE, &qd->qd_flags)) { |
386 | spin_unlock(&sdp->sd_quota_spin); | 440 | spin_unlock(&sdp->sd_quota_spin); |
441 | spin_unlock(&qd_lru_lock); | ||
387 | return 0; | 442 | return 0; |
388 | } | 443 | } |
389 | 444 | ||
390 | list_move_tail(&qd->qd_list, &sdp->sd_quota_list); | 445 | list_move_tail(&qd->qd_list, &sdp->sd_quota_list); |
391 | 446 | ||
392 | set_bit(QDF_LOCKED, &qd->qd_flags); | 447 | set_bit(QDF_LOCKED, &qd->qd_flags); |
393 | gfs2_assert_warn(sdp, qd->qd_count); | 448 | gfs2_assert_warn(sdp, atomic_read(&qd->qd_count)); |
394 | qd->qd_count++; | 449 | atomic_inc(&qd->qd_count); |
395 | qd->qd_change_sync = qd->qd_change; | 450 | qd->qd_change_sync = qd->qd_change; |
396 | gfs2_assert_warn(sdp, qd->qd_slot_count); | 451 | gfs2_assert_warn(sdp, qd->qd_slot_count); |
397 | qd->qd_slot_count++; | 452 | qd->qd_slot_count++; |
398 | 453 | ||
399 | spin_unlock(&sdp->sd_quota_spin); | 454 | spin_unlock(&sdp->sd_quota_spin); |
455 | spin_unlock(&qd_lru_lock); | ||
400 | 456 | ||
401 | gfs2_assert_warn(sdp, qd->qd_change_sync); | 457 | gfs2_assert_warn(sdp, qd->qd_change_sync); |
402 | if (bh_get(qd)) { | 458 | if (bh_get(qd)) { |
@@ -802,8 +858,8 @@ restart: | |||
802 | loff_t pos; | 858 | loff_t pos; |
803 | gfs2_glock_dq_uninit(q_gh); | 859 | gfs2_glock_dq_uninit(q_gh); |
804 | error = gfs2_glock_nq_init(qd->qd_gl, | 860 | error = gfs2_glock_nq_init(qd->qd_gl, |
805 | LM_ST_EXCLUSIVE, GL_NOCACHE, | 861 | LM_ST_EXCLUSIVE, GL_NOCACHE, |
806 | q_gh); | 862 | q_gh); |
807 | if (error) | 863 | if (error) |
808 | return error; | 864 | return error; |
809 | 865 | ||
@@ -820,7 +876,6 @@ restart: | |||
820 | 876 | ||
821 | gfs2_glock_dq_uninit(&i_gh); | 877 | gfs2_glock_dq_uninit(&i_gh); |
822 | 878 | ||
823 | |||
824 | gfs2_quota_in(&q, buf); | 879 | gfs2_quota_in(&q, buf); |
825 | qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; | 880 | qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; |
826 | qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC); | 881 | qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC); |
@@ -1171,13 +1226,14 @@ int gfs2_quota_init(struct gfs2_sbd *sdp) | |||
1171 | qd->qd_change = qc.qc_change; | 1226 | qd->qd_change = qc.qc_change; |
1172 | qd->qd_slot = slot; | 1227 | qd->qd_slot = slot; |
1173 | qd->qd_slot_count = 1; | 1228 | qd->qd_slot_count = 1; |
1174 | qd->qd_last_touched = jiffies; | ||
1175 | 1229 | ||
1230 | spin_lock(&qd_lru_lock); | ||
1176 | spin_lock(&sdp->sd_quota_spin); | 1231 | spin_lock(&sdp->sd_quota_spin); |
1177 | gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1); | 1232 | gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1); |
1233 | spin_unlock(&sdp->sd_quota_spin); | ||
1178 | list_add(&qd->qd_list, &sdp->sd_quota_list); | 1234 | list_add(&qd->qd_list, &sdp->sd_quota_list); |
1179 | atomic_inc(&sdp->sd_quota_count); | 1235 | atomic_inc(&sdp->sd_quota_count); |
1180 | spin_unlock(&sdp->sd_quota_spin); | 1236 | spin_unlock(&qd_lru_lock); |
1181 | 1237 | ||
1182 | found++; | 1238 | found++; |
1183 | } | 1239 | } |
@@ -1197,61 +1253,39 @@ fail: | |||
1197 | return error; | 1253 | return error; |
1198 | } | 1254 | } |
1199 | 1255 | ||
1200 | static void gfs2_quota_scan(struct gfs2_sbd *sdp) | ||
1201 | { | ||
1202 | struct gfs2_quota_data *qd, *safe; | ||
1203 | LIST_HEAD(dead); | ||
1204 | |||
1205 | spin_lock(&sdp->sd_quota_spin); | ||
1206 | list_for_each_entry_safe(qd, safe, &sdp->sd_quota_list, qd_list) { | ||
1207 | if (!qd->qd_count && | ||
1208 | time_after_eq(jiffies, qd->qd_last_touched + | ||
1209 | gfs2_tune_get(sdp, gt_quota_cache_secs) * HZ)) { | ||
1210 | list_move(&qd->qd_list, &dead); | ||
1211 | gfs2_assert_warn(sdp, | ||
1212 | atomic_read(&sdp->sd_quota_count) > 0); | ||
1213 | atomic_dec(&sdp->sd_quota_count); | ||
1214 | } | ||
1215 | } | ||
1216 | spin_unlock(&sdp->sd_quota_spin); | ||
1217 | |||
1218 | while (!list_empty(&dead)) { | ||
1219 | qd = list_entry(dead.next, struct gfs2_quota_data, qd_list); | ||
1220 | list_del(&qd->qd_list); | ||
1221 | |||
1222 | gfs2_assert_warn(sdp, !qd->qd_change); | ||
1223 | gfs2_assert_warn(sdp, !qd->qd_slot_count); | ||
1224 | gfs2_assert_warn(sdp, !qd->qd_bh_count); | ||
1225 | |||
1226 | gfs2_lvb_unhold(qd->qd_gl); | ||
1227 | kmem_cache_free(gfs2_quotad_cachep, qd); | ||
1228 | } | ||
1229 | } | ||
1230 | |||
1231 | void gfs2_quota_cleanup(struct gfs2_sbd *sdp) | 1256 | void gfs2_quota_cleanup(struct gfs2_sbd *sdp) |
1232 | { | 1257 | { |
1233 | struct list_head *head = &sdp->sd_quota_list; | 1258 | struct list_head *head = &sdp->sd_quota_list; |
1234 | struct gfs2_quota_data *qd; | 1259 | struct gfs2_quota_data *qd; |
1235 | unsigned int x; | 1260 | unsigned int x; |
1236 | 1261 | ||
1237 | spin_lock(&sdp->sd_quota_spin); | 1262 | spin_lock(&qd_lru_lock); |
1238 | while (!list_empty(head)) { | 1263 | while (!list_empty(head)) { |
1239 | qd = list_entry(head->prev, struct gfs2_quota_data, qd_list); | 1264 | qd = list_entry(head->prev, struct gfs2_quota_data, qd_list); |
1240 | 1265 | ||
1241 | if (qd->qd_count > 1 || | 1266 | spin_lock(&sdp->sd_quota_spin); |
1242 | (qd->qd_count && !test_bit(QDF_CHANGE, &qd->qd_flags))) { | 1267 | if (atomic_read(&qd->qd_count) > 1 || |
1243 | list_move(&qd->qd_list, head); | 1268 | (atomic_read(&qd->qd_count) && |
1269 | !test_bit(QDF_CHANGE, &qd->qd_flags))) { | ||
1244 | spin_unlock(&sdp->sd_quota_spin); | 1270 | spin_unlock(&sdp->sd_quota_spin); |
1271 | list_move(&qd->qd_list, head); | ||
1272 | spin_unlock(&qd_lru_lock); | ||
1245 | schedule(); | 1273 | schedule(); |
1246 | spin_lock(&sdp->sd_quota_spin); | 1274 | spin_lock(&qd_lru_lock); |
1247 | continue; | 1275 | continue; |
1248 | } | 1276 | } |
1277 | spin_unlock(&sdp->sd_quota_spin); | ||
1249 | 1278 | ||
1250 | list_del(&qd->qd_list); | 1279 | list_del(&qd->qd_list); |
1280 | /* Also remove if this qd exists in the reclaim list */ | ||
1281 | if (!list_empty(&qd->qd_reclaim)) { | ||
1282 | list_del_init(&qd->qd_reclaim); | ||
1283 | atomic_dec(&qd_lru_count); | ||
1284 | } | ||
1251 | atomic_dec(&sdp->sd_quota_count); | 1285 | atomic_dec(&sdp->sd_quota_count); |
1252 | spin_unlock(&sdp->sd_quota_spin); | 1286 | spin_unlock(&qd_lru_lock); |
1253 | 1287 | ||
1254 | if (!qd->qd_count) { | 1288 | if (!atomic_read(&qd->qd_count)) { |
1255 | gfs2_assert_warn(sdp, !qd->qd_change); | 1289 | gfs2_assert_warn(sdp, !qd->qd_change); |
1256 | gfs2_assert_warn(sdp, !qd->qd_slot_count); | 1290 | gfs2_assert_warn(sdp, !qd->qd_slot_count); |
1257 | } else | 1291 | } else |
@@ -1261,9 +1295,9 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp) | |||
1261 | gfs2_lvb_unhold(qd->qd_gl); | 1295 | gfs2_lvb_unhold(qd->qd_gl); |
1262 | kmem_cache_free(gfs2_quotad_cachep, qd); | 1296 | kmem_cache_free(gfs2_quotad_cachep, qd); |
1263 | 1297 | ||
1264 | spin_lock(&sdp->sd_quota_spin); | 1298 | spin_lock(&qd_lru_lock); |
1265 | } | 1299 | } |
1266 | spin_unlock(&sdp->sd_quota_spin); | 1300 | spin_unlock(&qd_lru_lock); |
1267 | 1301 | ||
1268 | gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count)); | 1302 | gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count)); |
1269 | 1303 | ||
@@ -1341,9 +1375,6 @@ int gfs2_quotad(void *data) | |||
1341 | quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t, | 1375 | quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t, |
1342 | "ad_timeo, &tune->gt_quota_quantum); | 1376 | "ad_timeo, &tune->gt_quota_quantum); |
1343 | 1377 | ||
1344 | /* FIXME: This should be turned into a shrinker */ | ||
1345 | gfs2_quota_scan(sdp); | ||
1346 | |||
1347 | /* Check for & recover partially truncated inodes */ | 1378 | /* Check for & recover partially truncated inodes */ |
1348 | quotad_check_trunc_list(sdp); | 1379 | quotad_check_trunc_list(sdp); |
1349 | 1380 | ||
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h index cec9032be97d..0fa5fa63d0e8 100644 --- a/fs/gfs2/quota.h +++ b/fs/gfs2/quota.h | |||
@@ -49,4 +49,6 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip) | |||
49 | return ret; | 49 | return ret; |
50 | } | 50 | } |
51 | 51 | ||
52 | extern int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask); | ||
53 | |||
52 | #endif /* __QUOTA_DOT_H__ */ | 54 | #endif /* __QUOTA_DOT_H__ */ |
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c index 26c1fa777a95..a58a120dac92 100644 --- a/fs/gfs2/sys.c +++ b/fs/gfs2/sys.c | |||
@@ -373,7 +373,6 @@ TUNE_ATTR(complain_secs, 0); | |||
373 | TUNE_ATTR(statfs_slow, 0); | 373 | TUNE_ATTR(statfs_slow, 0); |
374 | TUNE_ATTR(new_files_jdata, 0); | 374 | TUNE_ATTR(new_files_jdata, 0); |
375 | TUNE_ATTR(quota_simul_sync, 1); | 375 | TUNE_ATTR(quota_simul_sync, 1); |
376 | TUNE_ATTR(quota_cache_secs, 1); | ||
377 | TUNE_ATTR(stall_secs, 1); | 376 | TUNE_ATTR(stall_secs, 1); |
378 | TUNE_ATTR(statfs_quantum, 1); | 377 | TUNE_ATTR(statfs_quantum, 1); |
379 | TUNE_ATTR_DAEMON(recoverd_secs, recoverd_process); | 378 | TUNE_ATTR_DAEMON(recoverd_secs, recoverd_process); |
@@ -389,7 +388,6 @@ static struct attribute *tune_attrs[] = { | |||
389 | &tune_attr_complain_secs.attr, | 388 | &tune_attr_complain_secs.attr, |
390 | &tune_attr_statfs_slow.attr, | 389 | &tune_attr_statfs_slow.attr, |
391 | &tune_attr_quota_simul_sync.attr, | 390 | &tune_attr_quota_simul_sync.attr, |
392 | &tune_attr_quota_cache_secs.attr, | ||
393 | &tune_attr_stall_secs.attr, | 391 | &tune_attr_stall_secs.attr, |
394 | &tune_attr_statfs_quantum.attr, | 392 | &tune_attr_statfs_quantum.attr, |
395 | &tune_attr_recoverd_secs.attr, | 393 | &tune_attr_recoverd_secs.attr, |