diff options
author | Abhijith Das <adas@redhat.com> | 2009-01-07 17:03:37 -0500 |
---|---|---|
committer | Steven Whitehouse <steve@dolmen.chygwyn.com> | 2009-03-24 07:21:12 -0400 |
commit | 0a7ab79c5b5a16035e09b466c9013c8afc3b4bff (patch) | |
tree | bb82ea7f936b334b4de184288bc951f1642e9541 /fs/gfs2/quota.c | |
parent | 2db2aac255c38e75ad17c0b24feb589ccfccc0ae (diff) |
GFS2: change gfs2_quota_scan into a shrinker
Deallocation of gfs2_quota_data objects now happens on-demand through a
shrinker instead of routinely deallocating through the quotad daemon.
Signed-off-by: Abhijith Das <adas@redhat.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/quota.c')
-rw-r--r-- | fs/gfs2/quota.c | 165 |
1 files changed, 98 insertions, 67 deletions
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index b08d09696b3..2ada6e10d07 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c | |||
@@ -80,6 +80,53 @@ struct gfs2_quota_change_host { | |||
80 | u32 qc_id; | 80 | u32 qc_id; |
81 | }; | 81 | }; |
82 | 82 | ||
83 | static LIST_HEAD(qd_lru_list); | ||
84 | static atomic_t qd_lru_count = ATOMIC_INIT(0); | ||
85 | static spinlock_t qd_lru_lock = SPIN_LOCK_UNLOCKED; | ||
86 | |||
87 | int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask) | ||
88 | { | ||
89 | struct gfs2_quota_data *qd; | ||
90 | struct gfs2_sbd *sdp; | ||
91 | |||
92 | if (nr == 0) | ||
93 | goto out; | ||
94 | |||
95 | if (!(gfp_mask & __GFP_FS)) | ||
96 | return -1; | ||
97 | |||
98 | spin_lock(&qd_lru_lock); | ||
99 | while (nr && !list_empty(&qd_lru_list)) { | ||
100 | qd = list_entry(qd_lru_list.next, | ||
101 | struct gfs2_quota_data, qd_reclaim); | ||
102 | sdp = qd->qd_gl->gl_sbd; | ||
103 | |||
104 | /* Free from the filesystem-specific list */ | ||
105 | list_del(&qd->qd_list); | ||
106 | |||
107 | spin_lock(&sdp->sd_quota_spin); | ||
108 | gfs2_assert_warn(sdp, !qd->qd_change); | ||
109 | gfs2_assert_warn(sdp, !qd->qd_slot_count); | ||
110 | gfs2_assert_warn(sdp, !qd->qd_bh_count); | ||
111 | |||
112 | gfs2_lvb_unhold(qd->qd_gl); | ||
113 | spin_unlock(&sdp->sd_quota_spin); | ||
114 | atomic_dec(&sdp->sd_quota_count); | ||
115 | |||
116 | /* Delete it from the common reclaim list */ | ||
117 | list_del_init(&qd->qd_reclaim); | ||
118 | atomic_dec(&qd_lru_count); | ||
119 | spin_unlock(&qd_lru_lock); | ||
120 | kmem_cache_free(gfs2_quotad_cachep, qd); | ||
121 | spin_lock(&qd_lru_lock); | ||
122 | nr--; | ||
123 | } | ||
124 | spin_unlock(&qd_lru_lock); | ||
125 | |||
126 | out: | ||
127 | return (atomic_read(&qd_lru_count) * sysctl_vfs_cache_pressure) / 100; | ||
128 | } | ||
129 | |||
83 | static u64 qd2offset(struct gfs2_quota_data *qd) | 130 | static u64 qd2offset(struct gfs2_quota_data *qd) |
84 | { | 131 | { |
85 | u64 offset; | 132 | u64 offset; |
@@ -100,11 +147,12 @@ static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id, | |||
100 | if (!qd) | 147 | if (!qd) |
101 | return -ENOMEM; | 148 | return -ENOMEM; |
102 | 149 | ||
103 | qd->qd_count = 1; | 150 | atomic_set(&qd->qd_count, 1); |
104 | qd->qd_id = id; | 151 | qd->qd_id = id; |
105 | if (user) | 152 | if (user) |
106 | set_bit(QDF_USER, &qd->qd_flags); | 153 | set_bit(QDF_USER, &qd->qd_flags); |
107 | qd->qd_slot = -1; | 154 | qd->qd_slot = -1; |
155 | INIT_LIST_HEAD(&qd->qd_reclaim); | ||
108 | 156 | ||
109 | error = gfs2_glock_get(sdp, 2 * (u64)id + !user, | 157 | error = gfs2_glock_get(sdp, 2 * (u64)id + !user, |
110 | &gfs2_quota_glops, CREATE, &qd->qd_gl); | 158 | &gfs2_quota_glops, CREATE, &qd->qd_gl); |
@@ -135,11 +183,17 @@ static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create, | |||
135 | 183 | ||
136 | for (;;) { | 184 | for (;;) { |
137 | found = 0; | 185 | found = 0; |
138 | spin_lock(&sdp->sd_quota_spin); | 186 | spin_lock(&qd_lru_lock); |
139 | list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { | 187 | list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { |
140 | if (qd->qd_id == id && | 188 | if (qd->qd_id == id && |
141 | !test_bit(QDF_USER, &qd->qd_flags) == !user) { | 189 | !test_bit(QDF_USER, &qd->qd_flags) == !user) { |
142 | qd->qd_count++; | 190 | if (!atomic_read(&qd->qd_count) && |
191 | !list_empty(&qd->qd_reclaim)) { | ||
192 | /* Remove it from reclaim list */ | ||
193 | list_del_init(&qd->qd_reclaim); | ||
194 | atomic_dec(&qd_lru_count); | ||
195 | } | ||
196 | atomic_inc(&qd->qd_count); | ||
143 | found = 1; | 197 | found = 1; |
144 | break; | 198 | break; |
145 | } | 199 | } |
@@ -155,7 +209,7 @@ static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create, | |||
155 | new_qd = NULL; | 209 | new_qd = NULL; |
156 | } | 210 | } |
157 | 211 | ||
158 | spin_unlock(&sdp->sd_quota_spin); | 212 | spin_unlock(&qd_lru_lock); |
159 | 213 | ||
160 | if (qd || !create) { | 214 | if (qd || !create) { |
161 | if (new_qd) { | 215 | if (new_qd) { |
@@ -175,21 +229,18 @@ static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create, | |||
175 | static void qd_hold(struct gfs2_quota_data *qd) | 229 | static void qd_hold(struct gfs2_quota_data *qd) |
176 | { | 230 | { |
177 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; | 231 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; |
178 | 232 | gfs2_assert(sdp, atomic_read(&qd->qd_count)); | |
179 | spin_lock(&sdp->sd_quota_spin); | 233 | atomic_inc(&qd->qd_count); |
180 | gfs2_assert(sdp, qd->qd_count); | ||
181 | qd->qd_count++; | ||
182 | spin_unlock(&sdp->sd_quota_spin); | ||
183 | } | 234 | } |
184 | 235 | ||
185 | static void qd_put(struct gfs2_quota_data *qd) | 236 | static void qd_put(struct gfs2_quota_data *qd) |
186 | { | 237 | { |
187 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; | 238 | if (atomic_dec_and_lock(&qd->qd_count, &qd_lru_lock)) { |
188 | spin_lock(&sdp->sd_quota_spin); | 239 | /* Add to the reclaim list */ |
189 | gfs2_assert(sdp, qd->qd_count); | 240 | list_add_tail(&qd->qd_reclaim, &qd_lru_list); |
190 | if (!--qd->qd_count) | 241 | atomic_inc(&qd_lru_count); |
191 | qd->qd_last_touched = jiffies; | 242 | spin_unlock(&qd_lru_lock); |
192 | spin_unlock(&sdp->sd_quota_spin); | 243 | } |
193 | } | 244 | } |
194 | 245 | ||
195 | static int slot_get(struct gfs2_quota_data *qd) | 246 | static int slot_get(struct gfs2_quota_data *qd) |
@@ -330,6 +381,7 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp) | |||
330 | if (sdp->sd_vfs->s_flags & MS_RDONLY) | 381 | if (sdp->sd_vfs->s_flags & MS_RDONLY) |
331 | return 0; | 382 | return 0; |
332 | 383 | ||
384 | spin_lock(&qd_lru_lock); | ||
333 | spin_lock(&sdp->sd_quota_spin); | 385 | spin_lock(&sdp->sd_quota_spin); |
334 | 386 | ||
335 | list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { | 387 | list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { |
@@ -341,8 +393,8 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp) | |||
341 | list_move_tail(&qd->qd_list, &sdp->sd_quota_list); | 393 | list_move_tail(&qd->qd_list, &sdp->sd_quota_list); |
342 | 394 | ||
343 | set_bit(QDF_LOCKED, &qd->qd_flags); | 395 | set_bit(QDF_LOCKED, &qd->qd_flags); |
344 | gfs2_assert_warn(sdp, qd->qd_count); | 396 | gfs2_assert_warn(sdp, atomic_read(&qd->qd_count)); |
345 | qd->qd_count++; | 397 | atomic_inc(&qd->qd_count); |
346 | qd->qd_change_sync = qd->qd_change; | 398 | qd->qd_change_sync = qd->qd_change; |
347 | gfs2_assert_warn(sdp, qd->qd_slot_count); | 399 | gfs2_assert_warn(sdp, qd->qd_slot_count); |
348 | qd->qd_slot_count++; | 400 | qd->qd_slot_count++; |
@@ -355,6 +407,7 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp) | |||
355 | qd = NULL; | 407 | qd = NULL; |
356 | 408 | ||
357 | spin_unlock(&sdp->sd_quota_spin); | 409 | spin_unlock(&sdp->sd_quota_spin); |
410 | spin_unlock(&qd_lru_lock); | ||
358 | 411 | ||
359 | if (qd) { | 412 | if (qd) { |
360 | gfs2_assert_warn(sdp, qd->qd_change_sync); | 413 | gfs2_assert_warn(sdp, qd->qd_change_sync); |
@@ -379,24 +432,27 @@ static int qd_trylock(struct gfs2_quota_data *qd) | |||
379 | if (sdp->sd_vfs->s_flags & MS_RDONLY) | 432 | if (sdp->sd_vfs->s_flags & MS_RDONLY) |
380 | return 0; | 433 | return 0; |
381 | 434 | ||
435 | spin_lock(&qd_lru_lock); | ||
382 | spin_lock(&sdp->sd_quota_spin); | 436 | spin_lock(&sdp->sd_quota_spin); |
383 | 437 | ||
384 | if (test_bit(QDF_LOCKED, &qd->qd_flags) || | 438 | if (test_bit(QDF_LOCKED, &qd->qd_flags) || |
385 | !test_bit(QDF_CHANGE, &qd->qd_flags)) { | 439 | !test_bit(QDF_CHANGE, &qd->qd_flags)) { |
386 | spin_unlock(&sdp->sd_quota_spin); | 440 | spin_unlock(&sdp->sd_quota_spin); |
441 | spin_unlock(&qd_lru_lock); | ||
387 | return 0; | 442 | return 0; |
388 | } | 443 | } |
389 | 444 | ||
390 | list_move_tail(&qd->qd_list, &sdp->sd_quota_list); | 445 | list_move_tail(&qd->qd_list, &sdp->sd_quota_list); |
391 | 446 | ||
392 | set_bit(QDF_LOCKED, &qd->qd_flags); | 447 | set_bit(QDF_LOCKED, &qd->qd_flags); |
393 | gfs2_assert_warn(sdp, qd->qd_count); | 448 | gfs2_assert_warn(sdp, atomic_read(&qd->qd_count)); |
394 | qd->qd_count++; | 449 | atomic_inc(&qd->qd_count); |
395 | qd->qd_change_sync = qd->qd_change; | 450 | qd->qd_change_sync = qd->qd_change; |
396 | gfs2_assert_warn(sdp, qd->qd_slot_count); | 451 | gfs2_assert_warn(sdp, qd->qd_slot_count); |
397 | qd->qd_slot_count++; | 452 | qd->qd_slot_count++; |
398 | 453 | ||
399 | spin_unlock(&sdp->sd_quota_spin); | 454 | spin_unlock(&sdp->sd_quota_spin); |
455 | spin_unlock(&qd_lru_lock); | ||
400 | 456 | ||
401 | gfs2_assert_warn(sdp, qd->qd_change_sync); | 457 | gfs2_assert_warn(sdp, qd->qd_change_sync); |
402 | if (bh_get(qd)) { | 458 | if (bh_get(qd)) { |
@@ -802,8 +858,8 @@ restart: | |||
802 | loff_t pos; | 858 | loff_t pos; |
803 | gfs2_glock_dq_uninit(q_gh); | 859 | gfs2_glock_dq_uninit(q_gh); |
804 | error = gfs2_glock_nq_init(qd->qd_gl, | 860 | error = gfs2_glock_nq_init(qd->qd_gl, |
805 | LM_ST_EXCLUSIVE, GL_NOCACHE, | 861 | LM_ST_EXCLUSIVE, GL_NOCACHE, |
806 | q_gh); | 862 | q_gh); |
807 | if (error) | 863 | if (error) |
808 | return error; | 864 | return error; |
809 | 865 | ||
@@ -820,7 +876,6 @@ restart: | |||
820 | 876 | ||
821 | gfs2_glock_dq_uninit(&i_gh); | 877 | gfs2_glock_dq_uninit(&i_gh); |
822 | 878 | ||
823 | |||
824 | gfs2_quota_in(&q, buf); | 879 | gfs2_quota_in(&q, buf); |
825 | qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; | 880 | qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; |
826 | qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC); | 881 | qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC); |
@@ -1171,13 +1226,14 @@ int gfs2_quota_init(struct gfs2_sbd *sdp) | |||
1171 | qd->qd_change = qc.qc_change; | 1226 | qd->qd_change = qc.qc_change; |
1172 | qd->qd_slot = slot; | 1227 | qd->qd_slot = slot; |
1173 | qd->qd_slot_count = 1; | 1228 | qd->qd_slot_count = 1; |
1174 | qd->qd_last_touched = jiffies; | ||
1175 | 1229 | ||
1230 | spin_lock(&qd_lru_lock); | ||
1176 | spin_lock(&sdp->sd_quota_spin); | 1231 | spin_lock(&sdp->sd_quota_spin); |
1177 | gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1); | 1232 | gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1); |
1233 | spin_unlock(&sdp->sd_quota_spin); | ||
1178 | list_add(&qd->qd_list, &sdp->sd_quota_list); | 1234 | list_add(&qd->qd_list, &sdp->sd_quota_list); |
1179 | atomic_inc(&sdp->sd_quota_count); | 1235 | atomic_inc(&sdp->sd_quota_count); |
1180 | spin_unlock(&sdp->sd_quota_spin); | 1236 | spin_unlock(&qd_lru_lock); |
1181 | 1237 | ||
1182 | found++; | 1238 | found++; |
1183 | } | 1239 | } |
@@ -1197,61 +1253,39 @@ fail: | |||
1197 | return error; | 1253 | return error; |
1198 | } | 1254 | } |
1199 | 1255 | ||
1200 | static void gfs2_quota_scan(struct gfs2_sbd *sdp) | ||
1201 | { | ||
1202 | struct gfs2_quota_data *qd, *safe; | ||
1203 | LIST_HEAD(dead); | ||
1204 | |||
1205 | spin_lock(&sdp->sd_quota_spin); | ||
1206 | list_for_each_entry_safe(qd, safe, &sdp->sd_quota_list, qd_list) { | ||
1207 | if (!qd->qd_count && | ||
1208 | time_after_eq(jiffies, qd->qd_last_touched + | ||
1209 | gfs2_tune_get(sdp, gt_quota_cache_secs) * HZ)) { | ||
1210 | list_move(&qd->qd_list, &dead); | ||
1211 | gfs2_assert_warn(sdp, | ||
1212 | atomic_read(&sdp->sd_quota_count) > 0); | ||
1213 | atomic_dec(&sdp->sd_quota_count); | ||
1214 | } | ||
1215 | } | ||
1216 | spin_unlock(&sdp->sd_quota_spin); | ||
1217 | |||
1218 | while (!list_empty(&dead)) { | ||
1219 | qd = list_entry(dead.next, struct gfs2_quota_data, qd_list); | ||
1220 | list_del(&qd->qd_list); | ||
1221 | |||
1222 | gfs2_assert_warn(sdp, !qd->qd_change); | ||
1223 | gfs2_assert_warn(sdp, !qd->qd_slot_count); | ||
1224 | gfs2_assert_warn(sdp, !qd->qd_bh_count); | ||
1225 | |||
1226 | gfs2_lvb_unhold(qd->qd_gl); | ||
1227 | kmem_cache_free(gfs2_quotad_cachep, qd); | ||
1228 | } | ||
1229 | } | ||
1230 | |||
1231 | void gfs2_quota_cleanup(struct gfs2_sbd *sdp) | 1256 | void gfs2_quota_cleanup(struct gfs2_sbd *sdp) |
1232 | { | 1257 | { |
1233 | struct list_head *head = &sdp->sd_quota_list; | 1258 | struct list_head *head = &sdp->sd_quota_list; |
1234 | struct gfs2_quota_data *qd; | 1259 | struct gfs2_quota_data *qd; |
1235 | unsigned int x; | 1260 | unsigned int x; |
1236 | 1261 | ||
1237 | spin_lock(&sdp->sd_quota_spin); | 1262 | spin_lock(&qd_lru_lock); |
1238 | while (!list_empty(head)) { | 1263 | while (!list_empty(head)) { |
1239 | qd = list_entry(head->prev, struct gfs2_quota_data, qd_list); | 1264 | qd = list_entry(head->prev, struct gfs2_quota_data, qd_list); |
1240 | 1265 | ||
1241 | if (qd->qd_count > 1 || | 1266 | spin_lock(&sdp->sd_quota_spin); |
1242 | (qd->qd_count && !test_bit(QDF_CHANGE, &qd->qd_flags))) { | 1267 | if (atomic_read(&qd->qd_count) > 1 || |
1243 | list_move(&qd->qd_list, head); | 1268 | (atomic_read(&qd->qd_count) && |
1269 | !test_bit(QDF_CHANGE, &qd->qd_flags))) { | ||
1244 | spin_unlock(&sdp->sd_quota_spin); | 1270 | spin_unlock(&sdp->sd_quota_spin); |
1271 | list_move(&qd->qd_list, head); | ||
1272 | spin_unlock(&qd_lru_lock); | ||
1245 | schedule(); | 1273 | schedule(); |
1246 | spin_lock(&sdp->sd_quota_spin); | 1274 | spin_lock(&qd_lru_lock); |
1247 | continue; | 1275 | continue; |
1248 | } | 1276 | } |
1277 | spin_unlock(&sdp->sd_quota_spin); | ||
1249 | 1278 | ||
1250 | list_del(&qd->qd_list); | 1279 | list_del(&qd->qd_list); |
1280 | /* Also remove if this qd exists in the reclaim list */ | ||
1281 | if (!list_empty(&qd->qd_reclaim)) { | ||
1282 | list_del_init(&qd->qd_reclaim); | ||
1283 | atomic_dec(&qd_lru_count); | ||
1284 | } | ||
1251 | atomic_dec(&sdp->sd_quota_count); | 1285 | atomic_dec(&sdp->sd_quota_count); |
1252 | spin_unlock(&sdp->sd_quota_spin); | 1286 | spin_unlock(&qd_lru_lock); |
1253 | 1287 | ||
1254 | if (!qd->qd_count) { | 1288 | if (!atomic_read(&qd->qd_count)) { |
1255 | gfs2_assert_warn(sdp, !qd->qd_change); | 1289 | gfs2_assert_warn(sdp, !qd->qd_change); |
1256 | gfs2_assert_warn(sdp, !qd->qd_slot_count); | 1290 | gfs2_assert_warn(sdp, !qd->qd_slot_count); |
1257 | } else | 1291 | } else |
@@ -1261,9 +1295,9 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp) | |||
1261 | gfs2_lvb_unhold(qd->qd_gl); | 1295 | gfs2_lvb_unhold(qd->qd_gl); |
1262 | kmem_cache_free(gfs2_quotad_cachep, qd); | 1296 | kmem_cache_free(gfs2_quotad_cachep, qd); |
1263 | 1297 | ||
1264 | spin_lock(&sdp->sd_quota_spin); | 1298 | spin_lock(&qd_lru_lock); |
1265 | } | 1299 | } |
1266 | spin_unlock(&sdp->sd_quota_spin); | 1300 | spin_unlock(&qd_lru_lock); |
1267 | 1301 | ||
1268 | gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count)); | 1302 | gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count)); |
1269 | 1303 | ||
@@ -1341,9 +1375,6 @@ int gfs2_quotad(void *data) | |||
1341 | quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t, | 1375 | quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t, |
1342 | "ad_timeo, &tune->gt_quota_quantum); | 1376 | "ad_timeo, &tune->gt_quota_quantum); |
1343 | 1377 | ||
1344 | /* FIXME: This should be turned into a shrinker */ | ||
1345 | gfs2_quota_scan(sdp); | ||
1346 | |||
1347 | /* Check for & recover partially truncated inodes */ | 1378 | /* Check for & recover partially truncated inodes */ |
1348 | quotad_check_trunc_list(sdp); | 1379 | quotad_check_trunc_list(sdp); |
1349 | 1380 | ||