diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2009-01-08 09:28:42 -0500 |
---|---|---|
committer | Steven Whitehouse <steve@dolmen.chygwyn.com> | 2009-03-24 07:21:13 -0400 |
commit | 22077f57dec8fcbeb1112b35313961c0902ff038 (patch) | |
tree | d3d871fe31b8ac922a09e28202c348d186d2dca3 /fs | |
parent | 0a7ab79c5b5a16035e09b466c9013c8afc3b4bff (diff) |
GFS2: Remove "double" locking in quota
We only really need a single spin lock for the quota data, so
lets just use the lru lock for now.
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Cc: Abhijith Das <adas@redhat.com>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/gfs2/incore.h | 1 | ||||
-rw-r--r-- | fs/gfs2/ops_fstype.c | 1 | ||||
-rw-r--r-- | fs/gfs2/quota.c | 40 |
3 files changed, 14 insertions, 28 deletions
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 592aa5040d2..a0117d6eb14 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h | |||
@@ -540,7 +540,6 @@ struct gfs2_sbd { | |||
540 | 540 | ||
541 | struct list_head sd_quota_list; | 541 | struct list_head sd_quota_list; |
542 | atomic_t sd_quota_count; | 542 | atomic_t sd_quota_count; |
543 | spinlock_t sd_quota_spin; | ||
544 | struct mutex sd_quota_mutex; | 543 | struct mutex sd_quota_mutex; |
545 | wait_queue_head_t sd_quota_wait; | 544 | wait_queue_head_t sd_quota_wait; |
546 | struct list_head sd_trunc_list; | 545 | struct list_head sd_trunc_list; |
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 530d3f6f6ea..402b6a2cd2c 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c | |||
@@ -98,7 +98,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) | |||
98 | mutex_init(&sdp->sd_jindex_mutex); | 98 | mutex_init(&sdp->sd_jindex_mutex); |
99 | 99 | ||
100 | INIT_LIST_HEAD(&sdp->sd_quota_list); | 100 | INIT_LIST_HEAD(&sdp->sd_quota_list); |
101 | spin_lock_init(&sdp->sd_quota_spin); | ||
102 | mutex_init(&sdp->sd_quota_mutex); | 101 | mutex_init(&sdp->sd_quota_mutex); |
103 | init_waitqueue_head(&sdp->sd_quota_wait); | 102 | init_waitqueue_head(&sdp->sd_quota_wait); |
104 | INIT_LIST_HEAD(&sdp->sd_trunc_list); | 103 | INIT_LIST_HEAD(&sdp->sd_trunc_list); |
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 2ada6e10d07..e8ef0f80fb1 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c | |||
@@ -104,13 +104,11 @@ int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask) | |||
104 | /* Free from the filesystem-specific list */ | 104 | /* Free from the filesystem-specific list */ |
105 | list_del(&qd->qd_list); | 105 | list_del(&qd->qd_list); |
106 | 106 | ||
107 | spin_lock(&sdp->sd_quota_spin); | ||
108 | gfs2_assert_warn(sdp, !qd->qd_change); | 107 | gfs2_assert_warn(sdp, !qd->qd_change); |
109 | gfs2_assert_warn(sdp, !qd->qd_slot_count); | 108 | gfs2_assert_warn(sdp, !qd->qd_slot_count); |
110 | gfs2_assert_warn(sdp, !qd->qd_bh_count); | 109 | gfs2_assert_warn(sdp, !qd->qd_bh_count); |
111 | 110 | ||
112 | gfs2_lvb_unhold(qd->qd_gl); | 111 | gfs2_lvb_unhold(qd->qd_gl); |
113 | spin_unlock(&sdp->sd_quota_spin); | ||
114 | atomic_dec(&sdp->sd_quota_count); | 112 | atomic_dec(&sdp->sd_quota_count); |
115 | 113 | ||
116 | /* Delete it from the common reclaim list */ | 114 | /* Delete it from the common reclaim list */ |
@@ -249,10 +247,10 @@ static int slot_get(struct gfs2_quota_data *qd) | |||
249 | unsigned int c, o = 0, b; | 247 | unsigned int c, o = 0, b; |
250 | unsigned char byte = 0; | 248 | unsigned char byte = 0; |
251 | 249 | ||
252 | spin_lock(&sdp->sd_quota_spin); | 250 | spin_lock(&qd_lru_lock); |
253 | 251 | ||
254 | if (qd->qd_slot_count++) { | 252 | if (qd->qd_slot_count++) { |
255 | spin_unlock(&sdp->sd_quota_spin); | 253 | spin_unlock(&qd_lru_lock); |
256 | return 0; | 254 | return 0; |
257 | } | 255 | } |
258 | 256 | ||
@@ -276,13 +274,13 @@ found: | |||
276 | 274 | ||
277 | sdp->sd_quota_bitmap[c][o] |= 1 << b; | 275 | sdp->sd_quota_bitmap[c][o] |= 1 << b; |
278 | 276 | ||
279 | spin_unlock(&sdp->sd_quota_spin); | 277 | spin_unlock(&qd_lru_lock); |
280 | 278 | ||
281 | return 0; | 279 | return 0; |
282 | 280 | ||
283 | fail: | 281 | fail: |
284 | qd->qd_slot_count--; | 282 | qd->qd_slot_count--; |
285 | spin_unlock(&sdp->sd_quota_spin); | 283 | spin_unlock(&qd_lru_lock); |
286 | return -ENOSPC; | 284 | return -ENOSPC; |
287 | } | 285 | } |
288 | 286 | ||
@@ -290,23 +288,23 @@ static void slot_hold(struct gfs2_quota_data *qd) | |||
290 | { | 288 | { |
291 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; | 289 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; |
292 | 290 | ||
293 | spin_lock(&sdp->sd_quota_spin); | 291 | spin_lock(&qd_lru_lock); |
294 | gfs2_assert(sdp, qd->qd_slot_count); | 292 | gfs2_assert(sdp, qd->qd_slot_count); |
295 | qd->qd_slot_count++; | 293 | qd->qd_slot_count++; |
296 | spin_unlock(&sdp->sd_quota_spin); | 294 | spin_unlock(&qd_lru_lock); |
297 | } | 295 | } |
298 | 296 | ||
299 | static void slot_put(struct gfs2_quota_data *qd) | 297 | static void slot_put(struct gfs2_quota_data *qd) |
300 | { | 298 | { |
301 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; | 299 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; |
302 | 300 | ||
303 | spin_lock(&sdp->sd_quota_spin); | 301 | spin_lock(&qd_lru_lock); |
304 | gfs2_assert(sdp, qd->qd_slot_count); | 302 | gfs2_assert(sdp, qd->qd_slot_count); |
305 | if (!--qd->qd_slot_count) { | 303 | if (!--qd->qd_slot_count) { |
306 | gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0); | 304 | gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0); |
307 | qd->qd_slot = -1; | 305 | qd->qd_slot = -1; |
308 | } | 306 | } |
309 | spin_unlock(&sdp->sd_quota_spin); | 307 | spin_unlock(&qd_lru_lock); |
310 | } | 308 | } |
311 | 309 | ||
312 | static int bh_get(struct gfs2_quota_data *qd) | 310 | static int bh_get(struct gfs2_quota_data *qd) |
@@ -382,7 +380,6 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp) | |||
382 | return 0; | 380 | return 0; |
383 | 381 | ||
384 | spin_lock(&qd_lru_lock); | 382 | spin_lock(&qd_lru_lock); |
385 | spin_lock(&sdp->sd_quota_spin); | ||
386 | 383 | ||
387 | list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { | 384 | list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { |
388 | if (test_bit(QDF_LOCKED, &qd->qd_flags) || | 385 | if (test_bit(QDF_LOCKED, &qd->qd_flags) || |
@@ -406,7 +403,6 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp) | |||
406 | if (!found) | 403 | if (!found) |
407 | qd = NULL; | 404 | qd = NULL; |
408 | 405 | ||
409 | spin_unlock(&sdp->sd_quota_spin); | ||
410 | spin_unlock(&qd_lru_lock); | 406 | spin_unlock(&qd_lru_lock); |
411 | 407 | ||
412 | if (qd) { | 408 | if (qd) { |
@@ -433,11 +429,9 @@ static int qd_trylock(struct gfs2_quota_data *qd) | |||
433 | return 0; | 429 | return 0; |
434 | 430 | ||
435 | spin_lock(&qd_lru_lock); | 431 | spin_lock(&qd_lru_lock); |
436 | spin_lock(&sdp->sd_quota_spin); | ||
437 | 432 | ||
438 | if (test_bit(QDF_LOCKED, &qd->qd_flags) || | 433 | if (test_bit(QDF_LOCKED, &qd->qd_flags) || |
439 | !test_bit(QDF_CHANGE, &qd->qd_flags)) { | 434 | !test_bit(QDF_CHANGE, &qd->qd_flags)) { |
440 | spin_unlock(&sdp->sd_quota_spin); | ||
441 | spin_unlock(&qd_lru_lock); | 435 | spin_unlock(&qd_lru_lock); |
442 | return 0; | 436 | return 0; |
443 | } | 437 | } |
@@ -451,7 +445,6 @@ static int qd_trylock(struct gfs2_quota_data *qd) | |||
451 | gfs2_assert_warn(sdp, qd->qd_slot_count); | 445 | gfs2_assert_warn(sdp, qd->qd_slot_count); |
452 | qd->qd_slot_count++; | 446 | qd->qd_slot_count++; |
453 | 447 | ||
454 | spin_unlock(&sdp->sd_quota_spin); | ||
455 | spin_unlock(&qd_lru_lock); | 448 | spin_unlock(&qd_lru_lock); |
456 | 449 | ||
457 | gfs2_assert_warn(sdp, qd->qd_change_sync); | 450 | gfs2_assert_warn(sdp, qd->qd_change_sync); |
@@ -612,9 +605,9 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change) | |||
612 | x = be64_to_cpu(qc->qc_change) + change; | 605 | x = be64_to_cpu(qc->qc_change) + change; |
613 | qc->qc_change = cpu_to_be64(x); | 606 | qc->qc_change = cpu_to_be64(x); |
614 | 607 | ||
615 | spin_lock(&sdp->sd_quota_spin); | 608 | spin_lock(&qd_lru_lock); |
616 | qd->qd_change = x; | 609 | qd->qd_change = x; |
617 | spin_unlock(&sdp->sd_quota_spin); | 610 | spin_unlock(&qd_lru_lock); |
618 | 611 | ||
619 | if (!x) { | 612 | if (!x) { |
620 | gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags)); | 613 | gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags)); |
@@ -945,9 +938,9 @@ static int need_sync(struct gfs2_quota_data *qd) | |||
945 | if (!qd->qd_qb.qb_limit) | 938 | if (!qd->qd_qb.qb_limit) |
946 | return 0; | 939 | return 0; |
947 | 940 | ||
948 | spin_lock(&sdp->sd_quota_spin); | 941 | spin_lock(&qd_lru_lock); |
949 | value = qd->qd_change; | 942 | value = qd->qd_change; |
950 | spin_unlock(&sdp->sd_quota_spin); | 943 | spin_unlock(&qd_lru_lock); |
951 | 944 | ||
952 | spin_lock(>->gt_spin); | 945 | spin_lock(>->gt_spin); |
953 | num = gt->gt_quota_scale_num; | 946 | num = gt->gt_quota_scale_num; |
@@ -1040,9 +1033,9 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid) | |||
1040 | continue; | 1033 | continue; |
1041 | 1034 | ||
1042 | value = (s64)be64_to_cpu(qd->qd_qb.qb_value); | 1035 | value = (s64)be64_to_cpu(qd->qd_qb.qb_value); |
1043 | spin_lock(&sdp->sd_quota_spin); | 1036 | spin_lock(&qd_lru_lock); |
1044 | value += qd->qd_change; | 1037 | value += qd->qd_change; |
1045 | spin_unlock(&sdp->sd_quota_spin); | 1038 | spin_unlock(&qd_lru_lock); |
1046 | 1039 | ||
1047 | if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) { | 1040 | if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) { |
1048 | print_message(qd, "exceeded"); | 1041 | print_message(qd, "exceeded"); |
@@ -1228,9 +1221,7 @@ int gfs2_quota_init(struct gfs2_sbd *sdp) | |||
1228 | qd->qd_slot_count = 1; | 1221 | qd->qd_slot_count = 1; |
1229 | 1222 | ||
1230 | spin_lock(&qd_lru_lock); | 1223 | spin_lock(&qd_lru_lock); |
1231 | spin_lock(&sdp->sd_quota_spin); | ||
1232 | gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1); | 1224 | gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1); |
1233 | spin_unlock(&sdp->sd_quota_spin); | ||
1234 | list_add(&qd->qd_list, &sdp->sd_quota_list); | 1225 | list_add(&qd->qd_list, &sdp->sd_quota_list); |
1235 | atomic_inc(&sdp->sd_quota_count); | 1226 | atomic_inc(&sdp->sd_quota_count); |
1236 | spin_unlock(&qd_lru_lock); | 1227 | spin_unlock(&qd_lru_lock); |
@@ -1263,18 +1254,15 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp) | |||
1263 | while (!list_empty(head)) { | 1254 | while (!list_empty(head)) { |
1264 | qd = list_entry(head->prev, struct gfs2_quota_data, qd_list); | 1255 | qd = list_entry(head->prev, struct gfs2_quota_data, qd_list); |
1265 | 1256 | ||
1266 | spin_lock(&sdp->sd_quota_spin); | ||
1267 | if (atomic_read(&qd->qd_count) > 1 || | 1257 | if (atomic_read(&qd->qd_count) > 1 || |
1268 | (atomic_read(&qd->qd_count) && | 1258 | (atomic_read(&qd->qd_count) && |
1269 | !test_bit(QDF_CHANGE, &qd->qd_flags))) { | 1259 | !test_bit(QDF_CHANGE, &qd->qd_flags))) { |
1270 | spin_unlock(&sdp->sd_quota_spin); | ||
1271 | list_move(&qd->qd_list, head); | 1260 | list_move(&qd->qd_list, head); |
1272 | spin_unlock(&qd_lru_lock); | 1261 | spin_unlock(&qd_lru_lock); |
1273 | schedule(); | 1262 | schedule(); |
1274 | spin_lock(&qd_lru_lock); | 1263 | spin_lock(&qd_lru_lock); |
1275 | continue; | 1264 | continue; |
1276 | } | 1265 | } |
1277 | spin_unlock(&sdp->sd_quota_spin); | ||
1278 | 1266 | ||
1279 | list_del(&qd->qd_list); | 1267 | list_del(&qd->qd_list); |
1280 | /* Also remove if this qd exists in the reclaim list */ | 1268 | /* Also remove if this qd exists in the reclaim list */ |