diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2013-11-01 14:52:08 -0400 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2013-11-04 06:17:36 -0500 |
commit | 7d80823e1d83e35977d77ae201bf63af3317ad0a (patch) | |
tree | b1bf994196dfefc5382e9756562421902ebaf337 /fs/gfs2 | |
parent | 9b9f039d570bddc1653cda2e38f4331dcacfcde5 (diff) |
GFS2: Rename quota qd_lru_lock qd_lock
This is a straight forward rename which is in preparation for
introducing the generic list_lru infrastructure in the
following patch.
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Signed-off-by: Abhijith Das <adas@redhat.com>
Tested-by: Abhijith Das <adas@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r-- | fs/gfs2/quota.c | 70 |
1 files changed, 35 insertions, 35 deletions
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index ed089118c171..466516ac5e57 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c | |||
@@ -74,7 +74,7 @@ struct gfs2_quota_change_host { | |||
74 | 74 | ||
75 | static LIST_HEAD(qd_lru_list); | 75 | static LIST_HEAD(qd_lru_list); |
76 | static atomic_t qd_lru_count = ATOMIC_INIT(0); | 76 | static atomic_t qd_lru_count = ATOMIC_INIT(0); |
77 | static DEFINE_SPINLOCK(qd_lru_lock); | 77 | static DEFINE_SPINLOCK(qd_lock); |
78 | 78 | ||
79 | unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink, | 79 | unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink, |
80 | struct shrink_control *sc) | 80 | struct shrink_control *sc) |
@@ -87,7 +87,7 @@ unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink, | |||
87 | if (!(sc->gfp_mask & __GFP_FS)) | 87 | if (!(sc->gfp_mask & __GFP_FS)) |
88 | return SHRINK_STOP; | 88 | return SHRINK_STOP; |
89 | 89 | ||
90 | spin_lock(&qd_lru_lock); | 90 | spin_lock(&qd_lock); |
91 | while (nr_to_scan && !list_empty(&qd_lru_list)) { | 91 | while (nr_to_scan && !list_empty(&qd_lru_list)) { |
92 | qd = list_entry(qd_lru_list.next, | 92 | qd = list_entry(qd_lru_list.next, |
93 | struct gfs2_quota_data, qd_reclaim); | 93 | struct gfs2_quota_data, qd_reclaim); |
@@ -106,13 +106,13 @@ unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink, | |||
106 | /* Delete it from the common reclaim list */ | 106 | /* Delete it from the common reclaim list */ |
107 | list_del_init(&qd->qd_reclaim); | 107 | list_del_init(&qd->qd_reclaim); |
108 | atomic_dec(&qd_lru_count); | 108 | atomic_dec(&qd_lru_count); |
109 | spin_unlock(&qd_lru_lock); | 109 | spin_unlock(&qd_lock); |
110 | kmem_cache_free(gfs2_quotad_cachep, qd); | 110 | kmem_cache_free(gfs2_quotad_cachep, qd); |
111 | spin_lock(&qd_lru_lock); | 111 | spin_lock(&qd_lock); |
112 | nr_to_scan--; | 112 | nr_to_scan--; |
113 | freed++; | 113 | freed++; |
114 | } | 114 | } |
115 | spin_unlock(&qd_lru_lock); | 115 | spin_unlock(&qd_lock); |
116 | return freed; | 116 | return freed; |
117 | } | 117 | } |
118 | 118 | ||
@@ -179,7 +179,7 @@ static int qd_get(struct gfs2_sbd *sdp, struct kqid qid, | |||
179 | 179 | ||
180 | for (;;) { | 180 | for (;;) { |
181 | found = 0; | 181 | found = 0; |
182 | spin_lock(&qd_lru_lock); | 182 | spin_lock(&qd_lock); |
183 | list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { | 183 | list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { |
184 | if (qid_eq(qd->qd_id, qid)) { | 184 | if (qid_eq(qd->qd_id, qid)) { |
185 | lockref_get(&qd->qd_lockref); | 185 | lockref_get(&qd->qd_lockref); |
@@ -203,7 +203,7 @@ static int qd_get(struct gfs2_sbd *sdp, struct kqid qid, | |||
203 | new_qd = NULL; | 203 | new_qd = NULL; |
204 | } | 204 | } |
205 | 205 | ||
206 | spin_unlock(&qd_lru_lock); | 206 | spin_unlock(&qd_lock); |
207 | 207 | ||
208 | if (qd) { | 208 | if (qd) { |
209 | if (new_qd) { | 209 | if (new_qd) { |
@@ -229,7 +229,7 @@ static void qd_hold(struct gfs2_quota_data *qd) | |||
229 | 229 | ||
230 | static void qd_put(struct gfs2_quota_data *qd) | 230 | static void qd_put(struct gfs2_quota_data *qd) |
231 | { | 231 | { |
232 | spin_lock(&qd_lru_lock); | 232 | spin_lock(&qd_lock); |
233 | 233 | ||
234 | if (!lockref_put_or_lock(&qd->qd_lockref)) { | 234 | if (!lockref_put_or_lock(&qd->qd_lockref)) { |
235 | 235 | ||
@@ -240,7 +240,7 @@ static void qd_put(struct gfs2_quota_data *qd) | |||
240 | spin_unlock(&qd->qd_lockref.lock); | 240 | spin_unlock(&qd->qd_lockref.lock); |
241 | } | 241 | } |
242 | 242 | ||
243 | spin_unlock(&qd_lru_lock); | 243 | spin_unlock(&qd_lock); |
244 | } | 244 | } |
245 | 245 | ||
246 | static int slot_get(struct gfs2_quota_data *qd) | 246 | static int slot_get(struct gfs2_quota_data *qd) |
@@ -249,10 +249,10 @@ static int slot_get(struct gfs2_quota_data *qd) | |||
249 | unsigned int c, o = 0, b; | 249 | unsigned int c, o = 0, b; |
250 | unsigned char byte = 0; | 250 | unsigned char byte = 0; |
251 | 251 | ||
252 | spin_lock(&qd_lru_lock); | 252 | spin_lock(&qd_lock); |
253 | 253 | ||
254 | if (qd->qd_slot_count++) { | 254 | if (qd->qd_slot_count++) { |
255 | spin_unlock(&qd_lru_lock); | 255 | spin_unlock(&qd_lock); |
256 | return 0; | 256 | return 0; |
257 | } | 257 | } |
258 | 258 | ||
@@ -276,13 +276,13 @@ found: | |||
276 | 276 | ||
277 | sdp->sd_quota_bitmap[c][o] |= 1 << b; | 277 | sdp->sd_quota_bitmap[c][o] |= 1 << b; |
278 | 278 | ||
279 | spin_unlock(&qd_lru_lock); | 279 | spin_unlock(&qd_lock); |
280 | 280 | ||
281 | return 0; | 281 | return 0; |
282 | 282 | ||
283 | fail: | 283 | fail: |
284 | qd->qd_slot_count--; | 284 | qd->qd_slot_count--; |
285 | spin_unlock(&qd_lru_lock); | 285 | spin_unlock(&qd_lock); |
286 | return -ENOSPC; | 286 | return -ENOSPC; |
287 | } | 287 | } |
288 | 288 | ||
@@ -290,10 +290,10 @@ static void slot_hold(struct gfs2_quota_data *qd) | |||
290 | { | 290 | { |
291 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; | 291 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; |
292 | 292 | ||
293 | spin_lock(&qd_lru_lock); | 293 | spin_lock(&qd_lock); |
294 | gfs2_assert(sdp, qd->qd_slot_count); | 294 | gfs2_assert(sdp, qd->qd_slot_count); |
295 | qd->qd_slot_count++; | 295 | qd->qd_slot_count++; |
296 | spin_unlock(&qd_lru_lock); | 296 | spin_unlock(&qd_lock); |
297 | } | 297 | } |
298 | 298 | ||
299 | static void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap, | 299 | static void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap, |
@@ -320,13 +320,13 @@ static void slot_put(struct gfs2_quota_data *qd) | |||
320 | { | 320 | { |
321 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; | 321 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; |
322 | 322 | ||
323 | spin_lock(&qd_lru_lock); | 323 | spin_lock(&qd_lock); |
324 | gfs2_assert(sdp, qd->qd_slot_count); | 324 | gfs2_assert(sdp, qd->qd_slot_count); |
325 | if (!--qd->qd_slot_count) { | 325 | if (!--qd->qd_slot_count) { |
326 | gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0); | 326 | gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0); |
327 | qd->qd_slot = -1; | 327 | qd->qd_slot = -1; |
328 | } | 328 | } |
329 | spin_unlock(&qd_lru_lock); | 329 | spin_unlock(&qd_lock); |
330 | } | 330 | } |
331 | 331 | ||
332 | static int bh_get(struct gfs2_quota_data *qd) | 332 | static int bh_get(struct gfs2_quota_data *qd) |
@@ -420,7 +420,7 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp) | |||
420 | if (sdp->sd_vfs->s_flags & MS_RDONLY) | 420 | if (sdp->sd_vfs->s_flags & MS_RDONLY) |
421 | return 0; | 421 | return 0; |
422 | 422 | ||
423 | spin_lock(&qd_lru_lock); | 423 | spin_lock(&qd_lock); |
424 | 424 | ||
425 | list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { | 425 | list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { |
426 | found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen); | 426 | found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen); |
@@ -431,7 +431,7 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp) | |||
431 | if (!found) | 431 | if (!found) |
432 | qd = NULL; | 432 | qd = NULL; |
433 | 433 | ||
434 | spin_unlock(&qd_lru_lock); | 434 | spin_unlock(&qd_lock); |
435 | 435 | ||
436 | if (qd) { | 436 | if (qd) { |
437 | gfs2_assert_warn(sdp, qd->qd_change_sync); | 437 | gfs2_assert_warn(sdp, qd->qd_change_sync); |
@@ -598,9 +598,9 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change) | |||
598 | x = be64_to_cpu(qc->qc_change) + change; | 598 | x = be64_to_cpu(qc->qc_change) + change; |
599 | qc->qc_change = cpu_to_be64(x); | 599 | qc->qc_change = cpu_to_be64(x); |
600 | 600 | ||
601 | spin_lock(&qd_lru_lock); | 601 | spin_lock(&qd_lock); |
602 | qd->qd_change = x; | 602 | qd->qd_change = x; |
603 | spin_unlock(&qd_lru_lock); | 603 | spin_unlock(&qd_lock); |
604 | 604 | ||
605 | if (!x) { | 605 | if (!x) { |
606 | gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags)); | 606 | gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags)); |
@@ -972,9 +972,9 @@ static int need_sync(struct gfs2_quota_data *qd) | |||
972 | if (!qd->qd_qb.qb_limit) | 972 | if (!qd->qd_qb.qb_limit) |
973 | return 0; | 973 | return 0; |
974 | 974 | ||
975 | spin_lock(&qd_lru_lock); | 975 | spin_lock(&qd_lock); |
976 | value = qd->qd_change; | 976 | value = qd->qd_change; |
977 | spin_unlock(&qd_lru_lock); | 977 | spin_unlock(&qd_lock); |
978 | 978 | ||
979 | spin_lock(>->gt_spin); | 979 | spin_lock(>->gt_spin); |
980 | num = gt->gt_quota_scale_num; | 980 | num = gt->gt_quota_scale_num; |
@@ -1019,9 +1019,9 @@ void gfs2_quota_unlock(struct gfs2_inode *ip) | |||
1019 | if (!sync) | 1019 | if (!sync) |
1020 | continue; | 1020 | continue; |
1021 | 1021 | ||
1022 | spin_lock(&qd_lru_lock); | 1022 | spin_lock(&qd_lock); |
1023 | found = qd_check_sync(sdp, qd, NULL); | 1023 | found = qd_check_sync(sdp, qd, NULL); |
1024 | spin_unlock(&qd_lru_lock); | 1024 | spin_unlock(&qd_lock); |
1025 | 1025 | ||
1026 | if (!found) | 1026 | if (!found) |
1027 | continue; | 1027 | continue; |
@@ -1083,9 +1083,9 @@ int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) | |||
1083 | continue; | 1083 | continue; |
1084 | 1084 | ||
1085 | value = (s64)be64_to_cpu(qd->qd_qb.qb_value); | 1085 | value = (s64)be64_to_cpu(qd->qd_qb.qb_value); |
1086 | spin_lock(&qd_lru_lock); | 1086 | spin_lock(&qd_lock); |
1087 | value += qd->qd_change; | 1087 | value += qd->qd_change; |
1088 | spin_unlock(&qd_lru_lock); | 1088 | spin_unlock(&qd_lock); |
1089 | 1089 | ||
1090 | if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) { | 1090 | if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) { |
1091 | print_message(qd, "exceeded"); | 1091 | print_message(qd, "exceeded"); |
@@ -1276,11 +1276,11 @@ int gfs2_quota_init(struct gfs2_sbd *sdp) | |||
1276 | qd->qd_slot = slot; | 1276 | qd->qd_slot = slot; |
1277 | qd->qd_slot_count = 1; | 1277 | qd->qd_slot_count = 1; |
1278 | 1278 | ||
1279 | spin_lock(&qd_lru_lock); | 1279 | spin_lock(&qd_lock); |
1280 | gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1); | 1280 | gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1); |
1281 | list_add(&qd->qd_list, &sdp->sd_quota_list); | 1281 | list_add(&qd->qd_list, &sdp->sd_quota_list); |
1282 | atomic_inc(&sdp->sd_quota_count); | 1282 | atomic_inc(&sdp->sd_quota_count); |
1283 | spin_unlock(&qd_lru_lock); | 1283 | spin_unlock(&qd_lock); |
1284 | 1284 | ||
1285 | found++; | 1285 | found++; |
1286 | } | 1286 | } |
@@ -1306,7 +1306,7 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp) | |||
1306 | struct gfs2_quota_data *qd; | 1306 | struct gfs2_quota_data *qd; |
1307 | unsigned int x; | 1307 | unsigned int x; |
1308 | 1308 | ||
1309 | spin_lock(&qd_lru_lock); | 1309 | spin_lock(&qd_lock); |
1310 | while (!list_empty(head)) { | 1310 | while (!list_empty(head)) { |
1311 | qd = list_entry(head->prev, struct gfs2_quota_data, qd_list); | 1311 | qd = list_entry(head->prev, struct gfs2_quota_data, qd_list); |
1312 | 1312 | ||
@@ -1320,9 +1320,9 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp) | |||
1320 | (qd->qd_lockref.count && !test_bit(QDF_CHANGE, &qd->qd_flags))) { | 1320 | (qd->qd_lockref.count && !test_bit(QDF_CHANGE, &qd->qd_flags))) { |
1321 | spin_unlock(&qd->qd_lockref.lock); | 1321 | spin_unlock(&qd->qd_lockref.lock); |
1322 | list_move(&qd->qd_list, head); | 1322 | list_move(&qd->qd_list, head); |
1323 | spin_unlock(&qd_lru_lock); | 1323 | spin_unlock(&qd_lock); |
1324 | schedule(); | 1324 | schedule(); |
1325 | spin_lock(&qd_lru_lock); | 1325 | spin_lock(&qd_lock); |
1326 | continue; | 1326 | continue; |
1327 | } | 1327 | } |
1328 | spin_unlock(&qd->qd_lockref.lock); | 1328 | spin_unlock(&qd->qd_lockref.lock); |
@@ -1334,7 +1334,7 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp) | |||
1334 | atomic_dec(&qd_lru_count); | 1334 | atomic_dec(&qd_lru_count); |
1335 | } | 1335 | } |
1336 | atomic_dec(&sdp->sd_quota_count); | 1336 | atomic_dec(&sdp->sd_quota_count); |
1337 | spin_unlock(&qd_lru_lock); | 1337 | spin_unlock(&qd_lock); |
1338 | 1338 | ||
1339 | if (!qd->qd_lockref.count) { | 1339 | if (!qd->qd_lockref.count) { |
1340 | gfs2_assert_warn(sdp, !qd->qd_change); | 1340 | gfs2_assert_warn(sdp, !qd->qd_change); |
@@ -1346,9 +1346,9 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp) | |||
1346 | gfs2_glock_put(qd->qd_gl); | 1346 | gfs2_glock_put(qd->qd_gl); |
1347 | kmem_cache_free(gfs2_quotad_cachep, qd); | 1347 | kmem_cache_free(gfs2_quotad_cachep, qd); |
1348 | 1348 | ||
1349 | spin_lock(&qd_lru_lock); | 1349 | spin_lock(&qd_lock); |
1350 | } | 1350 | } |
1351 | spin_unlock(&qd_lru_lock); | 1351 | spin_unlock(&qd_lock); |
1352 | 1352 | ||
1353 | gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count)); | 1353 | gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count)); |
1354 | 1354 | ||