diff options
Diffstat (limited to 'fs/gfs2/quota.c')
-rw-r--r-- | fs/gfs2/quota.c | 203 |
1 files changed, 108 insertions, 95 deletions
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index b08d09696b3e..8d53f66b5bcc 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c | |||
@@ -45,7 +45,6 @@ | |||
45 | #include <linux/fs.h> | 45 | #include <linux/fs.h> |
46 | #include <linux/bio.h> | 46 | #include <linux/bio.h> |
47 | #include <linux/gfs2_ondisk.h> | 47 | #include <linux/gfs2_ondisk.h> |
48 | #include <linux/lm_interface.h> | ||
49 | #include <linux/kthread.h> | 48 | #include <linux/kthread.h> |
50 | #include <linux/freezer.h> | 49 | #include <linux/freezer.h> |
51 | 50 | ||
@@ -80,6 +79,51 @@ struct gfs2_quota_change_host { | |||
80 | u32 qc_id; | 79 | u32 qc_id; |
81 | }; | 80 | }; |
82 | 81 | ||
82 | static LIST_HEAD(qd_lru_list); | ||
83 | static atomic_t qd_lru_count = ATOMIC_INIT(0); | ||
84 | static spinlock_t qd_lru_lock = SPIN_LOCK_UNLOCKED; | ||
85 | |||
86 | int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask) | ||
87 | { | ||
88 | struct gfs2_quota_data *qd; | ||
89 | struct gfs2_sbd *sdp; | ||
90 | |||
91 | if (nr == 0) | ||
92 | goto out; | ||
93 | |||
94 | if (!(gfp_mask & __GFP_FS)) | ||
95 | return -1; | ||
96 | |||
97 | spin_lock(&qd_lru_lock); | ||
98 | while (nr && !list_empty(&qd_lru_list)) { | ||
99 | qd = list_entry(qd_lru_list.next, | ||
100 | struct gfs2_quota_data, qd_reclaim); | ||
101 | sdp = qd->qd_gl->gl_sbd; | ||
102 | |||
103 | /* Free from the filesystem-specific list */ | ||
104 | list_del(&qd->qd_list); | ||
105 | |||
106 | gfs2_assert_warn(sdp, !qd->qd_change); | ||
107 | gfs2_assert_warn(sdp, !qd->qd_slot_count); | ||
108 | gfs2_assert_warn(sdp, !qd->qd_bh_count); | ||
109 | |||
110 | gfs2_glock_put(qd->qd_gl); | ||
111 | atomic_dec(&sdp->sd_quota_count); | ||
112 | |||
113 | /* Delete it from the common reclaim list */ | ||
114 | list_del_init(&qd->qd_reclaim); | ||
115 | atomic_dec(&qd_lru_count); | ||
116 | spin_unlock(&qd_lru_lock); | ||
117 | kmem_cache_free(gfs2_quotad_cachep, qd); | ||
118 | spin_lock(&qd_lru_lock); | ||
119 | nr--; | ||
120 | } | ||
121 | spin_unlock(&qd_lru_lock); | ||
122 | |||
123 | out: | ||
124 | return (atomic_read(&qd_lru_count) * sysctl_vfs_cache_pressure) / 100; | ||
125 | } | ||
126 | |||
83 | static u64 qd2offset(struct gfs2_quota_data *qd) | 127 | static u64 qd2offset(struct gfs2_quota_data *qd) |
84 | { | 128 | { |
85 | u64 offset; | 129 | u64 offset; |
@@ -100,22 +144,18 @@ static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id, | |||
100 | if (!qd) | 144 | if (!qd) |
101 | return -ENOMEM; | 145 | return -ENOMEM; |
102 | 146 | ||
103 | qd->qd_count = 1; | 147 | atomic_set(&qd->qd_count, 1); |
104 | qd->qd_id = id; | 148 | qd->qd_id = id; |
105 | if (user) | 149 | if (user) |
106 | set_bit(QDF_USER, &qd->qd_flags); | 150 | set_bit(QDF_USER, &qd->qd_flags); |
107 | qd->qd_slot = -1; | 151 | qd->qd_slot = -1; |
152 | INIT_LIST_HEAD(&qd->qd_reclaim); | ||
108 | 153 | ||
109 | error = gfs2_glock_get(sdp, 2 * (u64)id + !user, | 154 | error = gfs2_glock_get(sdp, 2 * (u64)id + !user, |
110 | &gfs2_quota_glops, CREATE, &qd->qd_gl); | 155 | &gfs2_quota_glops, CREATE, &qd->qd_gl); |
111 | if (error) | 156 | if (error) |
112 | goto fail; | 157 | goto fail; |
113 | 158 | ||
114 | error = gfs2_lvb_hold(qd->qd_gl); | ||
115 | gfs2_glock_put(qd->qd_gl); | ||
116 | if (error) | ||
117 | goto fail; | ||
118 | |||
119 | *qdp = qd; | 159 | *qdp = qd; |
120 | 160 | ||
121 | return 0; | 161 | return 0; |
@@ -135,11 +175,17 @@ static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create, | |||
135 | 175 | ||
136 | for (;;) { | 176 | for (;;) { |
137 | found = 0; | 177 | found = 0; |
138 | spin_lock(&sdp->sd_quota_spin); | 178 | spin_lock(&qd_lru_lock); |
139 | list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { | 179 | list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { |
140 | if (qd->qd_id == id && | 180 | if (qd->qd_id == id && |
141 | !test_bit(QDF_USER, &qd->qd_flags) == !user) { | 181 | !test_bit(QDF_USER, &qd->qd_flags) == !user) { |
142 | qd->qd_count++; | 182 | if (!atomic_read(&qd->qd_count) && |
183 | !list_empty(&qd->qd_reclaim)) { | ||
184 | /* Remove it from reclaim list */ | ||
185 | list_del_init(&qd->qd_reclaim); | ||
186 | atomic_dec(&qd_lru_count); | ||
187 | } | ||
188 | atomic_inc(&qd->qd_count); | ||
143 | found = 1; | 189 | found = 1; |
144 | break; | 190 | break; |
145 | } | 191 | } |
@@ -155,11 +201,11 @@ static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create, | |||
155 | new_qd = NULL; | 201 | new_qd = NULL; |
156 | } | 202 | } |
157 | 203 | ||
158 | spin_unlock(&sdp->sd_quota_spin); | 204 | spin_unlock(&qd_lru_lock); |
159 | 205 | ||
160 | if (qd || !create) { | 206 | if (qd || !create) { |
161 | if (new_qd) { | 207 | if (new_qd) { |
162 | gfs2_lvb_unhold(new_qd->qd_gl); | 208 | gfs2_glock_put(new_qd->qd_gl); |
163 | kmem_cache_free(gfs2_quotad_cachep, new_qd); | 209 | kmem_cache_free(gfs2_quotad_cachep, new_qd); |
164 | } | 210 | } |
165 | *qdp = qd; | 211 | *qdp = qd; |
@@ -175,21 +221,18 @@ static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create, | |||
175 | static void qd_hold(struct gfs2_quota_data *qd) | 221 | static void qd_hold(struct gfs2_quota_data *qd) |
176 | { | 222 | { |
177 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; | 223 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; |
178 | 224 | gfs2_assert(sdp, atomic_read(&qd->qd_count)); | |
179 | spin_lock(&sdp->sd_quota_spin); | 225 | atomic_inc(&qd->qd_count); |
180 | gfs2_assert(sdp, qd->qd_count); | ||
181 | qd->qd_count++; | ||
182 | spin_unlock(&sdp->sd_quota_spin); | ||
183 | } | 226 | } |
184 | 227 | ||
185 | static void qd_put(struct gfs2_quota_data *qd) | 228 | static void qd_put(struct gfs2_quota_data *qd) |
186 | { | 229 | { |
187 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; | 230 | if (atomic_dec_and_lock(&qd->qd_count, &qd_lru_lock)) { |
188 | spin_lock(&sdp->sd_quota_spin); | 231 | /* Add to the reclaim list */ |
189 | gfs2_assert(sdp, qd->qd_count); | 232 | list_add_tail(&qd->qd_reclaim, &qd_lru_list); |
190 | if (!--qd->qd_count) | 233 | atomic_inc(&qd_lru_count); |
191 | qd->qd_last_touched = jiffies; | 234 | spin_unlock(&qd_lru_lock); |
192 | spin_unlock(&sdp->sd_quota_spin); | 235 | } |
193 | } | 236 | } |
194 | 237 | ||
195 | static int slot_get(struct gfs2_quota_data *qd) | 238 | static int slot_get(struct gfs2_quota_data *qd) |
@@ -198,10 +241,10 @@ static int slot_get(struct gfs2_quota_data *qd) | |||
198 | unsigned int c, o = 0, b; | 241 | unsigned int c, o = 0, b; |
199 | unsigned char byte = 0; | 242 | unsigned char byte = 0; |
200 | 243 | ||
201 | spin_lock(&sdp->sd_quota_spin); | 244 | spin_lock(&qd_lru_lock); |
202 | 245 | ||
203 | if (qd->qd_slot_count++) { | 246 | if (qd->qd_slot_count++) { |
204 | spin_unlock(&sdp->sd_quota_spin); | 247 | spin_unlock(&qd_lru_lock); |
205 | return 0; | 248 | return 0; |
206 | } | 249 | } |
207 | 250 | ||
@@ -225,13 +268,13 @@ found: | |||
225 | 268 | ||
226 | sdp->sd_quota_bitmap[c][o] |= 1 << b; | 269 | sdp->sd_quota_bitmap[c][o] |= 1 << b; |
227 | 270 | ||
228 | spin_unlock(&sdp->sd_quota_spin); | 271 | spin_unlock(&qd_lru_lock); |
229 | 272 | ||
230 | return 0; | 273 | return 0; |
231 | 274 | ||
232 | fail: | 275 | fail: |
233 | qd->qd_slot_count--; | 276 | qd->qd_slot_count--; |
234 | spin_unlock(&sdp->sd_quota_spin); | 277 | spin_unlock(&qd_lru_lock); |
235 | return -ENOSPC; | 278 | return -ENOSPC; |
236 | } | 279 | } |
237 | 280 | ||
@@ -239,23 +282,23 @@ static void slot_hold(struct gfs2_quota_data *qd) | |||
239 | { | 282 | { |
240 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; | 283 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; |
241 | 284 | ||
242 | spin_lock(&sdp->sd_quota_spin); | 285 | spin_lock(&qd_lru_lock); |
243 | gfs2_assert(sdp, qd->qd_slot_count); | 286 | gfs2_assert(sdp, qd->qd_slot_count); |
244 | qd->qd_slot_count++; | 287 | qd->qd_slot_count++; |
245 | spin_unlock(&sdp->sd_quota_spin); | 288 | spin_unlock(&qd_lru_lock); |
246 | } | 289 | } |
247 | 290 | ||
248 | static void slot_put(struct gfs2_quota_data *qd) | 291 | static void slot_put(struct gfs2_quota_data *qd) |
249 | { | 292 | { |
250 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; | 293 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; |
251 | 294 | ||
252 | spin_lock(&sdp->sd_quota_spin); | 295 | spin_lock(&qd_lru_lock); |
253 | gfs2_assert(sdp, qd->qd_slot_count); | 296 | gfs2_assert(sdp, qd->qd_slot_count); |
254 | if (!--qd->qd_slot_count) { | 297 | if (!--qd->qd_slot_count) { |
255 | gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0); | 298 | gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0); |
256 | qd->qd_slot = -1; | 299 | qd->qd_slot = -1; |
257 | } | 300 | } |
258 | spin_unlock(&sdp->sd_quota_spin); | 301 | spin_unlock(&qd_lru_lock); |
259 | } | 302 | } |
260 | 303 | ||
261 | static int bh_get(struct gfs2_quota_data *qd) | 304 | static int bh_get(struct gfs2_quota_data *qd) |
@@ -330,7 +373,7 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp) | |||
330 | if (sdp->sd_vfs->s_flags & MS_RDONLY) | 373 | if (sdp->sd_vfs->s_flags & MS_RDONLY) |
331 | return 0; | 374 | return 0; |
332 | 375 | ||
333 | spin_lock(&sdp->sd_quota_spin); | 376 | spin_lock(&qd_lru_lock); |
334 | 377 | ||
335 | list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { | 378 | list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { |
336 | if (test_bit(QDF_LOCKED, &qd->qd_flags) || | 379 | if (test_bit(QDF_LOCKED, &qd->qd_flags) || |
@@ -341,8 +384,8 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp) | |||
341 | list_move_tail(&qd->qd_list, &sdp->sd_quota_list); | 384 | list_move_tail(&qd->qd_list, &sdp->sd_quota_list); |
342 | 385 | ||
343 | set_bit(QDF_LOCKED, &qd->qd_flags); | 386 | set_bit(QDF_LOCKED, &qd->qd_flags); |
344 | gfs2_assert_warn(sdp, qd->qd_count); | 387 | gfs2_assert_warn(sdp, atomic_read(&qd->qd_count)); |
345 | qd->qd_count++; | 388 | atomic_inc(&qd->qd_count); |
346 | qd->qd_change_sync = qd->qd_change; | 389 | qd->qd_change_sync = qd->qd_change; |
347 | gfs2_assert_warn(sdp, qd->qd_slot_count); | 390 | gfs2_assert_warn(sdp, qd->qd_slot_count); |
348 | qd->qd_slot_count++; | 391 | qd->qd_slot_count++; |
@@ -354,7 +397,7 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp) | |||
354 | if (!found) | 397 | if (!found) |
355 | qd = NULL; | 398 | qd = NULL; |
356 | 399 | ||
357 | spin_unlock(&sdp->sd_quota_spin); | 400 | spin_unlock(&qd_lru_lock); |
358 | 401 | ||
359 | if (qd) { | 402 | if (qd) { |
360 | gfs2_assert_warn(sdp, qd->qd_change_sync); | 403 | gfs2_assert_warn(sdp, qd->qd_change_sync); |
@@ -379,24 +422,24 @@ static int qd_trylock(struct gfs2_quota_data *qd) | |||
379 | if (sdp->sd_vfs->s_flags & MS_RDONLY) | 422 | if (sdp->sd_vfs->s_flags & MS_RDONLY) |
380 | return 0; | 423 | return 0; |
381 | 424 | ||
382 | spin_lock(&sdp->sd_quota_spin); | 425 | spin_lock(&qd_lru_lock); |
383 | 426 | ||
384 | if (test_bit(QDF_LOCKED, &qd->qd_flags) || | 427 | if (test_bit(QDF_LOCKED, &qd->qd_flags) || |
385 | !test_bit(QDF_CHANGE, &qd->qd_flags)) { | 428 | !test_bit(QDF_CHANGE, &qd->qd_flags)) { |
386 | spin_unlock(&sdp->sd_quota_spin); | 429 | spin_unlock(&qd_lru_lock); |
387 | return 0; | 430 | return 0; |
388 | } | 431 | } |
389 | 432 | ||
390 | list_move_tail(&qd->qd_list, &sdp->sd_quota_list); | 433 | list_move_tail(&qd->qd_list, &sdp->sd_quota_list); |
391 | 434 | ||
392 | set_bit(QDF_LOCKED, &qd->qd_flags); | 435 | set_bit(QDF_LOCKED, &qd->qd_flags); |
393 | gfs2_assert_warn(sdp, qd->qd_count); | 436 | gfs2_assert_warn(sdp, atomic_read(&qd->qd_count)); |
394 | qd->qd_count++; | 437 | atomic_inc(&qd->qd_count); |
395 | qd->qd_change_sync = qd->qd_change; | 438 | qd->qd_change_sync = qd->qd_change; |
396 | gfs2_assert_warn(sdp, qd->qd_slot_count); | 439 | gfs2_assert_warn(sdp, qd->qd_slot_count); |
397 | qd->qd_slot_count++; | 440 | qd->qd_slot_count++; |
398 | 441 | ||
399 | spin_unlock(&sdp->sd_quota_spin); | 442 | spin_unlock(&qd_lru_lock); |
400 | 443 | ||
401 | gfs2_assert_warn(sdp, qd->qd_change_sync); | 444 | gfs2_assert_warn(sdp, qd->qd_change_sync); |
402 | if (bh_get(qd)) { | 445 | if (bh_get(qd)) { |
@@ -556,9 +599,9 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change) | |||
556 | x = be64_to_cpu(qc->qc_change) + change; | 599 | x = be64_to_cpu(qc->qc_change) + change; |
557 | qc->qc_change = cpu_to_be64(x); | 600 | qc->qc_change = cpu_to_be64(x); |
558 | 601 | ||
559 | spin_lock(&sdp->sd_quota_spin); | 602 | spin_lock(&qd_lru_lock); |
560 | qd->qd_change = x; | 603 | qd->qd_change = x; |
561 | spin_unlock(&sdp->sd_quota_spin); | 604 | spin_unlock(&qd_lru_lock); |
562 | 605 | ||
563 | if (!x) { | 606 | if (!x) { |
564 | gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags)); | 607 | gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags)); |
@@ -802,8 +845,8 @@ restart: | |||
802 | loff_t pos; | 845 | loff_t pos; |
803 | gfs2_glock_dq_uninit(q_gh); | 846 | gfs2_glock_dq_uninit(q_gh); |
804 | error = gfs2_glock_nq_init(qd->qd_gl, | 847 | error = gfs2_glock_nq_init(qd->qd_gl, |
805 | LM_ST_EXCLUSIVE, GL_NOCACHE, | 848 | LM_ST_EXCLUSIVE, GL_NOCACHE, |
806 | q_gh); | 849 | q_gh); |
807 | if (error) | 850 | if (error) |
808 | return error; | 851 | return error; |
809 | 852 | ||
@@ -820,7 +863,6 @@ restart: | |||
820 | 863 | ||
821 | gfs2_glock_dq_uninit(&i_gh); | 864 | gfs2_glock_dq_uninit(&i_gh); |
822 | 865 | ||
823 | |||
824 | gfs2_quota_in(&q, buf); | 866 | gfs2_quota_in(&q, buf); |
825 | qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; | 867 | qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; |
826 | qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC); | 868 | qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC); |
@@ -890,9 +932,9 @@ static int need_sync(struct gfs2_quota_data *qd) | |||
890 | if (!qd->qd_qb.qb_limit) | 932 | if (!qd->qd_qb.qb_limit) |
891 | return 0; | 933 | return 0; |
892 | 934 | ||
893 | spin_lock(&sdp->sd_quota_spin); | 935 | spin_lock(&qd_lru_lock); |
894 | value = qd->qd_change; | 936 | value = qd->qd_change; |
895 | spin_unlock(&sdp->sd_quota_spin); | 937 | spin_unlock(&qd_lru_lock); |
896 | 938 | ||
897 | spin_lock(>->gt_spin); | 939 | spin_lock(>->gt_spin); |
898 | num = gt->gt_quota_scale_num; | 940 | num = gt->gt_quota_scale_num; |
@@ -985,9 +1027,9 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid) | |||
985 | continue; | 1027 | continue; |
986 | 1028 | ||
987 | value = (s64)be64_to_cpu(qd->qd_qb.qb_value); | 1029 | value = (s64)be64_to_cpu(qd->qd_qb.qb_value); |
988 | spin_lock(&sdp->sd_quota_spin); | 1030 | spin_lock(&qd_lru_lock); |
989 | value += qd->qd_change; | 1031 | value += qd->qd_change; |
990 | spin_unlock(&sdp->sd_quota_spin); | 1032 | spin_unlock(&qd_lru_lock); |
991 | 1033 | ||
992 | if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) { | 1034 | if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) { |
993 | print_message(qd, "exceeded"); | 1035 | print_message(qd, "exceeded"); |
@@ -1171,13 +1213,12 @@ int gfs2_quota_init(struct gfs2_sbd *sdp) | |||
1171 | qd->qd_change = qc.qc_change; | 1213 | qd->qd_change = qc.qc_change; |
1172 | qd->qd_slot = slot; | 1214 | qd->qd_slot = slot; |
1173 | qd->qd_slot_count = 1; | 1215 | qd->qd_slot_count = 1; |
1174 | qd->qd_last_touched = jiffies; | ||
1175 | 1216 | ||
1176 | spin_lock(&sdp->sd_quota_spin); | 1217 | spin_lock(&qd_lru_lock); |
1177 | gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1); | 1218 | gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1); |
1178 | list_add(&qd->qd_list, &sdp->sd_quota_list); | 1219 | list_add(&qd->qd_list, &sdp->sd_quota_list); |
1179 | atomic_inc(&sdp->sd_quota_count); | 1220 | atomic_inc(&sdp->sd_quota_count); |
1180 | spin_unlock(&sdp->sd_quota_spin); | 1221 | spin_unlock(&qd_lru_lock); |
1181 | 1222 | ||
1182 | found++; | 1223 | found++; |
1183 | } | 1224 | } |
@@ -1197,73 +1238,48 @@ fail: | |||
1197 | return error; | 1238 | return error; |
1198 | } | 1239 | } |
1199 | 1240 | ||
1200 | static void gfs2_quota_scan(struct gfs2_sbd *sdp) | ||
1201 | { | ||
1202 | struct gfs2_quota_data *qd, *safe; | ||
1203 | LIST_HEAD(dead); | ||
1204 | |||
1205 | spin_lock(&sdp->sd_quota_spin); | ||
1206 | list_for_each_entry_safe(qd, safe, &sdp->sd_quota_list, qd_list) { | ||
1207 | if (!qd->qd_count && | ||
1208 | time_after_eq(jiffies, qd->qd_last_touched + | ||
1209 | gfs2_tune_get(sdp, gt_quota_cache_secs) * HZ)) { | ||
1210 | list_move(&qd->qd_list, &dead); | ||
1211 | gfs2_assert_warn(sdp, | ||
1212 | atomic_read(&sdp->sd_quota_count) > 0); | ||
1213 | atomic_dec(&sdp->sd_quota_count); | ||
1214 | } | ||
1215 | } | ||
1216 | spin_unlock(&sdp->sd_quota_spin); | ||
1217 | |||
1218 | while (!list_empty(&dead)) { | ||
1219 | qd = list_entry(dead.next, struct gfs2_quota_data, qd_list); | ||
1220 | list_del(&qd->qd_list); | ||
1221 | |||
1222 | gfs2_assert_warn(sdp, !qd->qd_change); | ||
1223 | gfs2_assert_warn(sdp, !qd->qd_slot_count); | ||
1224 | gfs2_assert_warn(sdp, !qd->qd_bh_count); | ||
1225 | |||
1226 | gfs2_lvb_unhold(qd->qd_gl); | ||
1227 | kmem_cache_free(gfs2_quotad_cachep, qd); | ||
1228 | } | ||
1229 | } | ||
1230 | |||
1231 | void gfs2_quota_cleanup(struct gfs2_sbd *sdp) | 1241 | void gfs2_quota_cleanup(struct gfs2_sbd *sdp) |
1232 | { | 1242 | { |
1233 | struct list_head *head = &sdp->sd_quota_list; | 1243 | struct list_head *head = &sdp->sd_quota_list; |
1234 | struct gfs2_quota_data *qd; | 1244 | struct gfs2_quota_data *qd; |
1235 | unsigned int x; | 1245 | unsigned int x; |
1236 | 1246 | ||
1237 | spin_lock(&sdp->sd_quota_spin); | 1247 | spin_lock(&qd_lru_lock); |
1238 | while (!list_empty(head)) { | 1248 | while (!list_empty(head)) { |
1239 | qd = list_entry(head->prev, struct gfs2_quota_data, qd_list); | 1249 | qd = list_entry(head->prev, struct gfs2_quota_data, qd_list); |
1240 | 1250 | ||
1241 | if (qd->qd_count > 1 || | 1251 | if (atomic_read(&qd->qd_count) > 1 || |
1242 | (qd->qd_count && !test_bit(QDF_CHANGE, &qd->qd_flags))) { | 1252 | (atomic_read(&qd->qd_count) && |
1253 | !test_bit(QDF_CHANGE, &qd->qd_flags))) { | ||
1243 | list_move(&qd->qd_list, head); | 1254 | list_move(&qd->qd_list, head); |
1244 | spin_unlock(&sdp->sd_quota_spin); | 1255 | spin_unlock(&qd_lru_lock); |
1245 | schedule(); | 1256 | schedule(); |
1246 | spin_lock(&sdp->sd_quota_spin); | 1257 | spin_lock(&qd_lru_lock); |
1247 | continue; | 1258 | continue; |
1248 | } | 1259 | } |
1249 | 1260 | ||
1250 | list_del(&qd->qd_list); | 1261 | list_del(&qd->qd_list); |
1262 | /* Also remove if this qd exists in the reclaim list */ | ||
1263 | if (!list_empty(&qd->qd_reclaim)) { | ||
1264 | list_del_init(&qd->qd_reclaim); | ||
1265 | atomic_dec(&qd_lru_count); | ||
1266 | } | ||
1251 | atomic_dec(&sdp->sd_quota_count); | 1267 | atomic_dec(&sdp->sd_quota_count); |
1252 | spin_unlock(&sdp->sd_quota_spin); | 1268 | spin_unlock(&qd_lru_lock); |
1253 | 1269 | ||
1254 | if (!qd->qd_count) { | 1270 | if (!atomic_read(&qd->qd_count)) { |
1255 | gfs2_assert_warn(sdp, !qd->qd_change); | 1271 | gfs2_assert_warn(sdp, !qd->qd_change); |
1256 | gfs2_assert_warn(sdp, !qd->qd_slot_count); | 1272 | gfs2_assert_warn(sdp, !qd->qd_slot_count); |
1257 | } else | 1273 | } else |
1258 | gfs2_assert_warn(sdp, qd->qd_slot_count == 1); | 1274 | gfs2_assert_warn(sdp, qd->qd_slot_count == 1); |
1259 | gfs2_assert_warn(sdp, !qd->qd_bh_count); | 1275 | gfs2_assert_warn(sdp, !qd->qd_bh_count); |
1260 | 1276 | ||
1261 | gfs2_lvb_unhold(qd->qd_gl); | 1277 | gfs2_glock_put(qd->qd_gl); |
1262 | kmem_cache_free(gfs2_quotad_cachep, qd); | 1278 | kmem_cache_free(gfs2_quotad_cachep, qd); |
1263 | 1279 | ||
1264 | spin_lock(&sdp->sd_quota_spin); | 1280 | spin_lock(&qd_lru_lock); |
1265 | } | 1281 | } |
1266 | spin_unlock(&sdp->sd_quota_spin); | 1282 | spin_unlock(&qd_lru_lock); |
1267 | 1283 | ||
1268 | gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count)); | 1284 | gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count)); |
1269 | 1285 | ||
@@ -1341,9 +1357,6 @@ int gfs2_quotad(void *data) | |||
1341 | quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t, | 1357 | quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t, |
1342 | "ad_timeo, &tune->gt_quota_quantum); | 1358 | "ad_timeo, &tune->gt_quota_quantum); |
1343 | 1359 | ||
1344 | /* FIXME: This should be turned into a shrinker */ | ||
1345 | gfs2_quota_scan(sdp); | ||
1346 | |||
1347 | /* Check for & recover partially truncated inodes */ | 1360 | /* Check for & recover partially truncated inodes */ |
1348 | quotad_check_trunc_list(sdp); | 1361 | quotad_check_trunc_list(sdp); |
1349 | 1362 | ||