aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2013-12-12 12:29:32 -0500
committerSteven Whitehouse <swhiteho@redhat.com>2014-01-14 14:28:49 -0500
commitee2411a8db49a21bc55dc124e1b434ba194c8903 (patch)
tree041c3d21230183753963de71930d39b6d0919328 /fs/gfs2
parent8ad151c2ac9aa106cb903cfd838b31561dbd7bcc (diff)
GFS2: Clean up quota slot allocation
Quota slot allocation has historically used a vector of pages and a set of homegrown find/test/set/clear bit functions. Since the size of the bitmap is likely to be based on the default qc file size, thats a couple of pages at most. So we ought to be able to allocate that as a single chunk, with a vmalloc fallback, just in case of memory fragmentation. We are then able to use the kernel's own find/test/set/clear bit functions, rather than rolling our own. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com> Cc: Abhijith Das <adas@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/incore.h3
-rw-r--r--fs/gfs2/quota.c100
2 files changed, 30 insertions, 73 deletions
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 59d99ec9d875..4b9aa5b6908c 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -733,8 +733,7 @@ struct gfs2_sbd {
733 spinlock_t sd_trunc_lock; 733 spinlock_t sd_trunc_lock;
734 734
735 unsigned int sd_quota_slots; 735 unsigned int sd_quota_slots;
736 unsigned int sd_quota_chunks; 736 unsigned long *sd_quota_bitmap;
737 unsigned char **sd_quota_bitmap;
738 737
739 u64 sd_quota_sync_gen; 738 u64 sd_quota_sync_gen;
740 739
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 3287d9871508..79be67ab8603 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -315,50 +315,30 @@ static void qd_put(struct gfs2_quota_data *qd)
315 315
316static int slot_get(struct gfs2_quota_data *qd) 316static int slot_get(struct gfs2_quota_data *qd)
317{ 317{
318 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 318 struct gfs2_sbd *sdp = qd->qd_sbd;
319 unsigned int c, o = 0, b; 319 unsigned int bit;
320 unsigned char byte = 0; 320 int error = 0;
321 321
322 spin_lock(&qd_lock); 322 spin_lock(&qd_lock);
323 if (qd->qd_slot_count != 0)
324 goto out;
323 325
324 if (qd->qd_slot_count++) { 326 error = -ENOSPC;
325 spin_unlock(&qd_lock); 327 bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots);
326 return 0; 328 if (bit < sdp->sd_quota_slots) {
329 set_bit(bit, sdp->sd_quota_bitmap);
330 qd->qd_slot = bit;
331out:
332 qd->qd_slot_count++;
327 } 333 }
328
329 for (c = 0; c < sdp->sd_quota_chunks; c++)
330 for (o = 0; o < PAGE_SIZE; o++) {
331 byte = sdp->sd_quota_bitmap[c][o];
332 if (byte != 0xFF)
333 goto found;
334 }
335
336 goto fail;
337
338found:
339 for (b = 0; b < 8; b++)
340 if (!(byte & (1 << b)))
341 break;
342 qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
343
344 if (qd->qd_slot >= sdp->sd_quota_slots)
345 goto fail;
346
347 sdp->sd_quota_bitmap[c][o] |= 1 << b;
348
349 spin_unlock(&qd_lock); 334 spin_unlock(&qd_lock);
350 335
351 return 0; 336 return error;
352
353fail:
354 qd->qd_slot_count--;
355 spin_unlock(&qd_lock);
356 return -ENOSPC;
357} 337}
358 338
359static void slot_hold(struct gfs2_quota_data *qd) 339static void slot_hold(struct gfs2_quota_data *qd)
360{ 340{
361 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 341 struct gfs2_sbd *sdp = qd->qd_sbd;
362 342
363 spin_lock(&qd_lock); 343 spin_lock(&qd_lock);
364 gfs2_assert(sdp, qd->qd_slot_count); 344 gfs2_assert(sdp, qd->qd_slot_count);
@@ -366,34 +346,14 @@ static void slot_hold(struct gfs2_quota_data *qd)
366 spin_unlock(&qd_lock); 346 spin_unlock(&qd_lock);
367} 347}
368 348
369static void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap,
370 unsigned int bit, int new_value)
371{
372 unsigned int c, o, b = bit;
373 int old_value;
374
375 c = b / (8 * PAGE_SIZE);
376 b %= 8 * PAGE_SIZE;
377 o = b / 8;
378 b %= 8;
379
380 old_value = (bitmap[c][o] & (1 << b));
381 gfs2_assert_withdraw(sdp, !old_value != !new_value);
382
383 if (new_value)
384 bitmap[c][o] |= 1 << b;
385 else
386 bitmap[c][o] &= ~(1 << b);
387}
388
389static void slot_put(struct gfs2_quota_data *qd) 349static void slot_put(struct gfs2_quota_data *qd)
390{ 350{
391 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 351 struct gfs2_sbd *sdp = qd->qd_sbd;
392 352
393 spin_lock(&qd_lock); 353 spin_lock(&qd_lock);
394 gfs2_assert(sdp, qd->qd_slot_count); 354 gfs2_assert(sdp, qd->qd_slot_count);
395 if (!--qd->qd_slot_count) { 355 if (!--qd->qd_slot_count) {
396 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0); 356 BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
397 qd->qd_slot = -1; 357 qd->qd_slot = -1;
398 } 358 }
399 spin_unlock(&qd_lock); 359 spin_unlock(&qd_lock);
@@ -1269,6 +1229,7 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
1269 unsigned int x, slot = 0; 1229 unsigned int x, slot = 0;
1270 unsigned int found = 0; 1230 unsigned int found = 0;
1271 unsigned int hash; 1231 unsigned int hash;
1232 unsigned int bm_size;
1272 u64 dblock; 1233 u64 dblock;
1273 u32 extlen = 0; 1234 u32 extlen = 0;
1274 int error; 1235 int error;
@@ -1277,20 +1238,16 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
1277 return -EIO; 1238 return -EIO;
1278 1239
1279 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block; 1240 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1280 sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE); 1241 bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long));
1281 1242 bm_size *= sizeof(unsigned long);
1282 error = -ENOMEM; 1243 error = -ENOMEM;
1283 1244 sdp->sd_quota_bitmap = kmalloc(bm_size, GFP_NOFS|__GFP_NOWARN);
1284 sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks, 1245 if (sdp->sd_quota_bitmap == NULL)
1285 sizeof(unsigned char *), GFP_NOFS); 1246 sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS, PAGE_KERNEL);
1286 if (!sdp->sd_quota_bitmap) 1247 if (!sdp->sd_quota_bitmap)
1287 return error; 1248 return error;
1288 1249
1289 for (x = 0; x < sdp->sd_quota_chunks; x++) { 1250 memset(sdp->sd_quota_bitmap, 0, bm_size);
1290 sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_NOFS);
1291 if (!sdp->sd_quota_bitmap[x])
1292 goto fail;
1293 }
1294 1251
1295 for (x = 0; x < blocks; x++) { 1252 for (x = 0; x < blocks; x++) {
1296 struct buffer_head *bh; 1253 struct buffer_head *bh;
@@ -1339,7 +1296,7 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
1339 qd->qd_slot_count = 1; 1296 qd->qd_slot_count = 1;
1340 1297
1341 spin_lock(&qd_lock); 1298 spin_lock(&qd_lock);
1342 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1); 1299 BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
1343 list_add(&qd->qd_list, &sdp->sd_quota_list); 1300 list_add(&qd->qd_list, &sdp->sd_quota_list);
1344 atomic_inc(&sdp->sd_quota_count); 1301 atomic_inc(&sdp->sd_quota_count);
1345 spin_unlock(&qd_lock); 1302 spin_unlock(&qd_lock);
@@ -1370,7 +1327,6 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1370{ 1327{
1371 struct list_head *head = &sdp->sd_quota_list; 1328 struct list_head *head = &sdp->sd_quota_list;
1372 struct gfs2_quota_data *qd; 1329 struct gfs2_quota_data *qd;
1373 unsigned int x;
1374 1330
1375 spin_lock(&qd_lock); 1331 spin_lock(&qd_lock);
1376 while (!list_empty(head)) { 1332 while (!list_empty(head)) {
@@ -1401,9 +1357,11 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1401 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count)); 1357 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1402 1358
1403 if (sdp->sd_quota_bitmap) { 1359 if (sdp->sd_quota_bitmap) {
1404 for (x = 0; x < sdp->sd_quota_chunks; x++) 1360 if (is_vmalloc_addr(sdp->sd_quota_bitmap))
1405 kfree(sdp->sd_quota_bitmap[x]); 1361 vfree(sdp->sd_quota_bitmap);
1406 kfree(sdp->sd_quota_bitmap); 1362 else
1363 kfree(sdp->sd_quota_bitmap);
1364 sdp->sd_quota_bitmap = NULL;
1407 } 1365 }
1408} 1366}
1409 1367