aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/quota.c
diff options
context:
space:
mode:
authorDavid Teigland <teigland@redhat.com>2006-01-16 11:50:04 -0500
committerSteven Whitehouse <swhiteho@redhat.com>2006-01-16 11:50:04 -0500
commitb3b94faa5fe5968827ba0640ee9fba4b3e7f736e (patch)
tree70bd6068b050d2c46e338484f8b03fae4365c6c3 /fs/gfs2/quota.c
parentf7825dcf8c7301cfd3724eb40c5b443cc85ab7b8 (diff)
[GFS2] The core of GFS2
This patch contains all the core files for GFS2. Signed-off-by: David Teigland <teigland@redhat.com> Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/quota.c')
-rw-r--r--fs/gfs2/quota.c1238
1 files changed, 1238 insertions, 0 deletions
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
new file mode 100644
index 000000000000..a0320f22b57b
--- /dev/null
+++ b/fs/gfs2/quota.c
@@ -0,0 +1,1238 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
8 */
9
10/*
11 * Quota change tags are associated with each transaction that allocates or
12 * deallocates space. Those changes are accumulated locally to each node (in a
13 * per-node file) and then are periodically synced to the quota file. This
14 * avoids the bottleneck of constantly touching the quota file, but introduces
15 * fuzziness in the current usage value of IDs that are being used on different
16 * nodes in the cluster simultaneously. So, it is possible for a user on
17 * multiple nodes to overrun their quota, but that overrun is controlable.
18 * Since quota tags are part of transactions, there is no need to a quota check
19 * program to be run on node crashes or anything like that.
20 *
21 * There are couple of knobs that let the administrator manage the quota
22 * fuzziness. "quota_quantum" sets the maximum time a quota change can be
23 * sitting on one node before being synced to the quota file. (The default is
24 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
25 * of quota file syncs increases as the user moves closer to their limit. The
26 * more frequent the syncs, the more accurate the quota enforcement, but that
27 * means that there is more contention between the nodes for the quota file.
28 * The default value is one. This sets the maximum theoretical quota overrun
29 * (with infinite node with infinite bandwidth) to twice the user's limit. (In
30 * practice, the maximum overrun you see should be much less.) A "quota_scale"
31 * number greater than one makes quota syncs more frequent and reduces the
32 * maximum overrun. Numbers less than one (but greater than zero) make quota
33 * syncs less frequent.
34 *
35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
36 * the quota file, so it is not being constantly read.
37 */
38
39#include <linux/sched.h>
40#include <linux/slab.h>
41#include <linux/spinlock.h>
42#include <linux/completion.h>
43#include <linux/buffer_head.h>
44#include <linux/tty.h>
45#include <linux/sort.h>
46#include <asm/semaphore.h>
47
48#include "gfs2.h"
49#include "bmap.h"
50#include "glock.h"
51#include "glops.h"
52#include "jdata.h"
53#include "log.h"
54#include "meta_io.h"
55#include "quota.h"
56#include "rgrp.h"
57#include "super.h"
58#include "trans.h"
59
60#define QUOTA_USER 1
61#define QUOTA_GROUP 0
62
63static uint64_t qd2offset(struct gfs2_quota_data *qd)
64{
65 uint64_t offset;
66
67 offset = 2 * (uint64_t)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags);
68 offset *= sizeof(struct gfs2_quota);
69
70 return offset;
71}
72
73static int qd_alloc(struct gfs2_sbd *sdp, int user, uint32_t id,
74 struct gfs2_quota_data **qdp)
75{
76 struct gfs2_quota_data *qd;
77 int error;
78
79 qd = kzalloc(sizeof(struct gfs2_quota_data), GFP_KERNEL);
80 if (!qd)
81 return -ENOMEM;
82
83 qd->qd_count = 1;
84 qd->qd_id = id;
85 if (user)
86 set_bit(QDF_USER, &qd->qd_flags);
87 qd->qd_slot = -1;
88
89 error = gfs2_glock_get(sdp, 2 * (uint64_t)id + !user,
90 &gfs2_quota_glops, CREATE, &qd->qd_gl);
91 if (error)
92 goto fail;
93
94 error = gfs2_lvb_hold(qd->qd_gl);
95 gfs2_glock_put(qd->qd_gl);
96 if (error)
97 goto fail;
98
99 *qdp = qd;
100
101 return 0;
102
103 fail:
104 kfree(qd);
105 return error;
106}
107
108static int qd_get(struct gfs2_sbd *sdp, int user, uint32_t id, int create,
109 struct gfs2_quota_data **qdp)
110{
111 struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
112 int error, found;
113
114 *qdp = NULL;
115
116 for (;;) {
117 found = 0;
118 spin_lock(&sdp->sd_quota_spin);
119 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
120 if (qd->qd_id == id &&
121 !test_bit(QDF_USER, &qd->qd_flags) == !user) {
122 qd->qd_count++;
123 found = 1;
124 break;
125 }
126 }
127
128 if (!found)
129 qd = NULL;
130
131 if (!qd && new_qd) {
132 qd = new_qd;
133 list_add(&qd->qd_list, &sdp->sd_quota_list);
134 atomic_inc(&sdp->sd_quota_count);
135 new_qd = NULL;
136 }
137
138 spin_unlock(&sdp->sd_quota_spin);
139
140 if (qd || !create) {
141 if (new_qd) {
142 gfs2_lvb_unhold(new_qd->qd_gl);
143 kfree(new_qd);
144 }
145 *qdp = qd;
146 return 0;
147 }
148
149 error = qd_alloc(sdp, user, id, &new_qd);
150 if (error)
151 return error;
152 }
153}
154
155static void qd_hold(struct gfs2_quota_data *qd)
156{
157 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
158
159 spin_lock(&sdp->sd_quota_spin);
160 gfs2_assert(sdp, qd->qd_count);
161 qd->qd_count++;
162 spin_unlock(&sdp->sd_quota_spin);
163}
164
165static void qd_put(struct gfs2_quota_data *qd)
166{
167 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
168 spin_lock(&sdp->sd_quota_spin);
169 gfs2_assert(sdp, qd->qd_count);
170 if (!--qd->qd_count)
171 qd->qd_last_touched = jiffies;
172 spin_unlock(&sdp->sd_quota_spin);
173}
174
175static int slot_get(struct gfs2_quota_data *qd)
176{
177 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
178 unsigned int c, o = 0, b;
179 unsigned char byte = 0;
180
181 spin_lock(&sdp->sd_quota_spin);
182
183 if (qd->qd_slot_count++) {
184 spin_unlock(&sdp->sd_quota_spin);
185 return 0;
186 }
187
188 for (c = 0; c < sdp->sd_quota_chunks; c++)
189 for (o = 0; o < PAGE_SIZE; o++) {
190 byte = sdp->sd_quota_bitmap[c][o];
191 if (byte != 0xFF)
192 goto found;
193 }
194
195 goto fail;
196
197 found:
198 for (b = 0; b < 8; b++)
199 if (!(byte & (1 << b)))
200 break;
201 qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
202
203 if (qd->qd_slot >= sdp->sd_quota_slots)
204 goto fail;
205
206 sdp->sd_quota_bitmap[c][o] |= 1 << b;
207
208 spin_unlock(&sdp->sd_quota_spin);
209
210 return 0;
211
212 fail:
213 qd->qd_slot_count--;
214 spin_unlock(&sdp->sd_quota_spin);
215 return -ENOSPC;
216}
217
218static void slot_hold(struct gfs2_quota_data *qd)
219{
220 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
221
222 spin_lock(&sdp->sd_quota_spin);
223 gfs2_assert(sdp, qd->qd_slot_count);
224 qd->qd_slot_count++;
225 spin_unlock(&sdp->sd_quota_spin);
226}
227
228static void slot_put(struct gfs2_quota_data *qd)
229{
230 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
231
232 spin_lock(&sdp->sd_quota_spin);
233 gfs2_assert(sdp, qd->qd_slot_count);
234 if (!--qd->qd_slot_count) {
235 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
236 qd->qd_slot = -1;
237 }
238 spin_unlock(&sdp->sd_quota_spin);
239}
240
241static int bh_get(struct gfs2_quota_data *qd)
242{
243 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
244 struct gfs2_inode *ip = sdp->sd_qc_inode;
245 unsigned int block, offset;
246 uint64_t dblock;
247 int new = 0;
248 struct buffer_head *bh;
249 int error;
250
251 down(&sdp->sd_quota_mutex);
252
253 if (qd->qd_bh_count++) {
254 up(&sdp->sd_quota_mutex);
255 return 0;
256 }
257
258 block = qd->qd_slot / sdp->sd_qc_per_block;
259 offset = qd->qd_slot % sdp->sd_qc_per_block;;
260
261 error = gfs2_block_map(ip, block, &new, &dblock, NULL);
262 if (error)
263 goto fail;
264 error = gfs2_meta_read(ip->i_gl, dblock, DIO_START | DIO_WAIT, &bh);
265 if (error)
266 goto fail;
267 error = -EIO;
268 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
269 goto fail_brelse;
270
271 qd->qd_bh = bh;
272 qd->qd_bh_qc = (struct gfs2_quota_change *)
273 (bh->b_data + sizeof(struct gfs2_meta_header) +
274 offset * sizeof(struct gfs2_quota_change));
275
276 up(&sdp->sd_quota_mutex);
277
278 return 0;
279
280 fail_brelse:
281 brelse(bh);
282
283 fail:
284 qd->qd_bh_count--;
285 up(&sdp->sd_quota_mutex);
286 return error;
287}
288
289static void bh_put(struct gfs2_quota_data *qd)
290{
291 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
292
293 down(&sdp->sd_quota_mutex);
294 gfs2_assert(sdp, qd->qd_bh_count);
295 if (!--qd->qd_bh_count) {
296 brelse(qd->qd_bh);
297 qd->qd_bh = NULL;
298 qd->qd_bh_qc = NULL;
299 }
300 up(&sdp->sd_quota_mutex);
301}
302
303static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
304{
305 struct gfs2_quota_data *qd = NULL;
306 int error;
307 int found = 0;
308
309 *qdp = NULL;
310
311 if (sdp->sd_vfs->s_flags & MS_RDONLY)
312 return 0;
313
314 spin_lock(&sdp->sd_quota_spin);
315
316 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
317 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
318 !test_bit(QDF_CHANGE, &qd->qd_flags) ||
319 qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
320 continue;
321
322 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
323
324 set_bit(QDF_LOCKED, &qd->qd_flags);
325 gfs2_assert_warn(sdp, qd->qd_count);
326 qd->qd_count++;
327 qd->qd_change_sync = qd->qd_change;
328 gfs2_assert_warn(sdp, qd->qd_slot_count);
329 qd->qd_slot_count++;
330 found = 1;
331
332 break;
333 }
334
335 if (!found)
336 qd = NULL;
337
338 spin_unlock(&sdp->sd_quota_spin);
339
340 if (qd) {
341 gfs2_assert_warn(sdp, qd->qd_change_sync);
342 error = bh_get(qd);
343 if (error) {
344 clear_bit(QDF_LOCKED, &qd->qd_flags);
345 slot_put(qd);
346 qd_put(qd);
347 return error;
348 }
349 }
350
351 *qdp = qd;
352
353 return 0;
354}
355
356static int qd_trylock(struct gfs2_quota_data *qd)
357{
358 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
359
360 if (sdp->sd_vfs->s_flags & MS_RDONLY)
361 return 0;
362
363 spin_lock(&sdp->sd_quota_spin);
364
365 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
366 !test_bit(QDF_CHANGE, &qd->qd_flags)) {
367 spin_unlock(&sdp->sd_quota_spin);
368 return 0;
369 }
370
371 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
372
373 set_bit(QDF_LOCKED, &qd->qd_flags);
374 gfs2_assert_warn(sdp, qd->qd_count);
375 qd->qd_count++;
376 qd->qd_change_sync = qd->qd_change;
377 gfs2_assert_warn(sdp, qd->qd_slot_count);
378 qd->qd_slot_count++;
379
380 spin_unlock(&sdp->sd_quota_spin);
381
382 gfs2_assert_warn(sdp, qd->qd_change_sync);
383 if (bh_get(qd)) {
384 clear_bit(QDF_LOCKED, &qd->qd_flags);
385 slot_put(qd);
386 qd_put(qd);
387 return 0;
388 }
389
390 return 1;
391}
392
393static void qd_unlock(struct gfs2_quota_data *qd)
394{
395 gfs2_assert_warn(qd->qd_gl->gl_sbd, test_bit(QDF_LOCKED, &qd->qd_flags));
396 clear_bit(QDF_LOCKED, &qd->qd_flags);
397 bh_put(qd);
398 slot_put(qd);
399 qd_put(qd);
400}
401
402static int qdsb_get(struct gfs2_sbd *sdp, int user, uint32_t id, int create,
403 struct gfs2_quota_data **qdp)
404{
405 int error;
406
407 error = qd_get(sdp, user, id, create, qdp);
408 if (error)
409 return error;
410
411 error = slot_get(*qdp);
412 if (error)
413 goto fail;
414
415 error = bh_get(*qdp);
416 if (error)
417 goto fail_slot;
418
419 return 0;
420
421 fail_slot:
422 slot_put(*qdp);
423
424 fail:
425 qd_put(*qdp);
426 return error;
427}
428
429static void qdsb_put(struct gfs2_quota_data *qd)
430{
431 bh_put(qd);
432 slot_put(qd);
433 qd_put(qd);
434}
435
436int gfs2_quota_hold(struct gfs2_inode *ip, uint32_t uid, uint32_t gid)
437{
438 struct gfs2_sbd *sdp = ip->i_sbd;
439 struct gfs2_alloc *al = &ip->i_alloc;
440 struct gfs2_quota_data **qd = al->al_qd;
441 int error;
442
443 if (gfs2_assert_warn(sdp, !al->al_qd_num) ||
444 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
445 return -EIO;
446
447 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
448 return 0;
449
450 error = qdsb_get(sdp, QUOTA_USER, ip->i_di.di_uid, CREATE, qd);
451 if (error)
452 goto out;
453 al->al_qd_num++;
454 qd++;
455
456 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_di.di_gid, CREATE, qd);
457 if (error)
458 goto out;
459 al->al_qd_num++;
460 qd++;
461
462 if (uid != NO_QUOTA_CHANGE && uid != ip->i_di.di_uid) {
463 error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd);
464 if (error)
465 goto out;
466 al->al_qd_num++;
467 qd++;
468 }
469
470 if (gid != NO_QUOTA_CHANGE && gid != ip->i_di.di_gid) {
471 error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd);
472 if (error)
473 goto out;
474 al->al_qd_num++;
475 qd++;
476 }
477
478 out:
479 if (error)
480 gfs2_quota_unhold(ip);
481
482 return error;
483}
484
485void gfs2_quota_unhold(struct gfs2_inode *ip)
486{
487 struct gfs2_sbd *sdp = ip->i_sbd;
488 struct gfs2_alloc *al = &ip->i_alloc;
489 unsigned int x;
490
491 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
492
493 for (x = 0; x < al->al_qd_num; x++) {
494 qdsb_put(al->al_qd[x]);
495 al->al_qd[x] = NULL;
496 }
497 al->al_qd_num = 0;
498}
499
500static int sort_qd(const void *a, const void *b)
501{
502 struct gfs2_quota_data *qd_a = *(struct gfs2_quota_data **)a;
503 struct gfs2_quota_data *qd_b = *(struct gfs2_quota_data **)b;
504 int ret = 0;
505
506 if (!test_bit(QDF_USER, &qd_a->qd_flags) !=
507 !test_bit(QDF_USER, &qd_b->qd_flags)) {
508 if (test_bit(QDF_USER, &qd_a->qd_flags))
509 ret = -1;
510 else
511 ret = 1;
512 } else {
513 if (qd_a->qd_id < qd_b->qd_id)
514 ret = -1;
515 else if (qd_a->qd_id > qd_b->qd_id)
516 ret = 1;
517 }
518
519 return ret;
520}
521
522static void do_qc(struct gfs2_quota_data *qd, int64_t change)
523{
524 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
525 struct gfs2_inode *ip = sdp->sd_qc_inode;
526 struct gfs2_quota_change *qc = qd->qd_bh_qc;
527 int64_t x;
528
529 down(&sdp->sd_quota_mutex);
530 gfs2_trans_add_bh(ip->i_gl, qd->qd_bh);
531
532 if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
533 qc->qc_change = 0;
534 qc->qc_flags = 0;
535 if (test_bit(QDF_USER, &qd->qd_flags))
536 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
537 qc->qc_id = cpu_to_be32(qd->qd_id);
538 }
539
540 x = qc->qc_change;
541 x = be64_to_cpu(x) + change;
542 qc->qc_change = cpu_to_be64(x);
543
544 spin_lock(&sdp->sd_quota_spin);
545 qd->qd_change = x;
546 spin_unlock(&sdp->sd_quota_spin);
547
548 if (!x) {
549 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
550 clear_bit(QDF_CHANGE, &qd->qd_flags);
551 qc->qc_flags = 0;
552 qc->qc_id = 0;
553 slot_put(qd);
554 qd_put(qd);
555 } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
556 qd_hold(qd);
557 slot_hold(qd);
558 }
559
560 up(&sdp->sd_quota_mutex);
561}
562
563static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
564{
565 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
566 struct gfs2_inode *ip = sdp->sd_quota_inode;
567 unsigned int data_blocks, ind_blocks;
568 struct gfs2_holder *ghs, i_gh;
569 unsigned int qx, x;
570 struct gfs2_quota_data *qd;
571 uint64_t offset;
572 unsigned int nalloc = 0;
573 struct gfs2_alloc *al = NULL;
574 int error;
575
576 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
577 &data_blocks, &ind_blocks);
578
579 ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_KERNEL);
580 if (!ghs)
581 return -ENOMEM;
582
583 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
584 for (qx = 0; qx < num_qd; qx++) {
585 error = gfs2_glock_nq_init(qda[qx]->qd_gl,
586 LM_ST_EXCLUSIVE,
587 GL_NOCACHE, &ghs[qx]);
588 if (error)
589 goto out;
590 }
591
592 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
593 if (error)
594 goto out;
595
596 for (x = 0; x < num_qd; x++) {
597 int alloc_required;
598
599 offset = qd2offset(qda[x]);
600 error = gfs2_write_alloc_required(ip, offset,
601 sizeof(struct gfs2_quota),
602 &alloc_required);
603 if (error)
604 goto out_gunlock;
605 if (alloc_required)
606 nalloc++;
607 }
608
609 if (nalloc) {
610 al = gfs2_alloc_get(ip);
611
612 al->al_requested = nalloc * (data_blocks + ind_blocks);
613
614 error = gfs2_inplace_reserve(ip);
615 if (error)
616 goto out_alloc;
617
618 error = gfs2_trans_begin(sdp,
619 al->al_rgd->rd_ri.ri_length +
620 num_qd * data_blocks +
621 nalloc * ind_blocks +
622 RES_DINODE + num_qd +
623 RES_STATFS, 0);
624 if (error)
625 goto out_ipres;
626 } else {
627 error = gfs2_trans_begin(sdp,
628 num_qd * data_blocks +
629 RES_DINODE + num_qd, 0);
630 if (error)
631 goto out_gunlock;
632 }
633
634 for (x = 0; x < num_qd; x++) {
635 char buf[sizeof(struct gfs2_quota)];
636 struct gfs2_quota q;
637
638 qd = qda[x];
639 offset = qd2offset(qd);
640
641 /* The quota file may not be a multiple of
642 sizeof(struct gfs2_quota) bytes. */
643 memset(buf, 0, sizeof(struct gfs2_quota));
644
645 error = gfs2_jdata_read_mem(ip, buf, offset,
646 sizeof(struct gfs2_quota));
647 if (error < 0)
648 goto out_end_trans;
649
650 gfs2_quota_in(&q, buf);
651 q.qu_value += qda[x]->qd_change_sync;
652 gfs2_quota_out(&q, buf);
653
654 error = gfs2_jdata_write_mem(ip, buf, offset,
655 sizeof(struct gfs2_quota));
656 if (error < 0)
657 goto out_end_trans;
658 else if (error != sizeof(struct gfs2_quota)) {
659 error = -EIO;
660 goto out_end_trans;
661 }
662
663 do_qc(qd, -qd->qd_change_sync);
664
665 memset(&qd->qd_qb, 0, sizeof(struct gfs2_quota_lvb));
666 qd->qd_qb.qb_magic = GFS2_MAGIC;
667 qd->qd_qb.qb_limit = q.qu_limit;
668 qd->qd_qb.qb_warn = q.qu_warn;
669 qd->qd_qb.qb_value = q.qu_value;
670
671 gfs2_quota_lvb_out(&qd->qd_qb, qd->qd_gl->gl_lvb);
672 }
673
674 error = 0;
675
676 out_end_trans:
677 gfs2_trans_end(sdp);
678
679 out_ipres:
680 if (nalloc)
681 gfs2_inplace_release(ip);
682
683 out_alloc:
684 if (nalloc)
685 gfs2_alloc_put(ip);
686
687 out_gunlock:
688 gfs2_glock_dq_uninit(&i_gh);
689
690 out:
691 while (qx--)
692 gfs2_glock_dq_uninit(&ghs[qx]);
693 kfree(ghs);
694 gfs2_log_flush_glock(ip->i_gl);
695
696 return error;
697}
698
699static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
700 struct gfs2_holder *q_gh)
701{
702 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
703 struct gfs2_holder i_gh;
704 struct gfs2_quota q;
705 char buf[sizeof(struct gfs2_quota)];
706 int error;
707
708 restart:
709 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
710 if (error)
711 return error;
712
713 gfs2_quota_lvb_in(&qd->qd_qb, qd->qd_gl->gl_lvb);
714
715 if (force_refresh || qd->qd_qb.qb_magic != GFS2_MAGIC) {
716 gfs2_glock_dq_uninit(q_gh);
717 error = gfs2_glock_nq_init(qd->qd_gl,
718 LM_ST_EXCLUSIVE, GL_NOCACHE,
719 q_gh);
720 if (error)
721 return error;
722
723 error = gfs2_glock_nq_init(sdp->sd_quota_inode->i_gl,
724 LM_ST_SHARED, 0,
725 &i_gh);
726 if (error)
727 goto fail;
728
729 memset(buf, 0, sizeof(struct gfs2_quota));
730
731 error = gfs2_jdata_read_mem(sdp->sd_quota_inode, buf,
732 qd2offset(qd),
733 sizeof(struct gfs2_quota));
734 if (error < 0)
735 goto fail_gunlock;
736
737 gfs2_glock_dq_uninit(&i_gh);
738
739 gfs2_quota_in(&q, buf);
740
741 memset(&qd->qd_qb, 0, sizeof(struct gfs2_quota_lvb));
742 qd->qd_qb.qb_magic = GFS2_MAGIC;
743 qd->qd_qb.qb_limit = q.qu_limit;
744 qd->qd_qb.qb_warn = q.qu_warn;
745 qd->qd_qb.qb_value = q.qu_value;
746
747 gfs2_quota_lvb_out(&qd->qd_qb, qd->qd_gl->gl_lvb);
748
749 if (gfs2_glock_is_blocking(qd->qd_gl)) {
750 gfs2_glock_dq_uninit(q_gh);
751 force_refresh = 0;
752 goto restart;
753 }
754 }
755
756 return 0;
757
758 fail_gunlock:
759 gfs2_glock_dq_uninit(&i_gh);
760
761 fail:
762 gfs2_glock_dq_uninit(q_gh);
763
764 return error;
765}
766
767int gfs2_quota_lock(struct gfs2_inode *ip, uint32_t uid, uint32_t gid)
768{
769 struct gfs2_sbd *sdp = ip->i_sbd;
770 struct gfs2_alloc *al = &ip->i_alloc;
771 unsigned int x;
772 int error = 0;
773
774 gfs2_quota_hold(ip, uid, gid);
775
776 if (capable(CAP_SYS_RESOURCE) ||
777 sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
778 return 0;
779
780 sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *),
781 sort_qd, NULL);
782
783 for (x = 0; x < al->al_qd_num; x++) {
784 error = do_glock(al->al_qd[x], NO_FORCE, &al->al_qd_ghs[x]);
785 if (error)
786 break;
787 }
788
789 if (!error)
790 set_bit(GIF_QD_LOCKED, &ip->i_flags);
791 else {
792 while (x--)
793 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
794 gfs2_quota_unhold(ip);
795 }
796
797 return error;
798}
799
800static int need_sync(struct gfs2_quota_data *qd)
801{
802 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
803 struct gfs2_tune *gt = &sdp->sd_tune;
804 int64_t value;
805 unsigned int num, den;
806 int do_sync = 1;
807
808 if (!qd->qd_qb.qb_limit)
809 return 0;
810
811 spin_lock(&sdp->sd_quota_spin);
812 value = qd->qd_change;
813 spin_unlock(&sdp->sd_quota_spin);
814
815 spin_lock(&gt->gt_spin);
816 num = gt->gt_quota_scale_num;
817 den = gt->gt_quota_scale_den;
818 spin_unlock(&gt->gt_spin);
819
820 if (value < 0)
821 do_sync = 0;
822 else if (qd->qd_qb.qb_value >= (int64_t)qd->qd_qb.qb_limit)
823 do_sync = 0;
824 else {
825 value *= gfs2_jindex_size(sdp) * num;
826 do_div(value, den);
827 value += qd->qd_qb.qb_value;
828 if (value < (int64_t)qd->qd_qb.qb_limit)
829 do_sync = 0;
830 }
831
832 return do_sync;
833}
834
835void gfs2_quota_unlock(struct gfs2_inode *ip)
836{
837 struct gfs2_alloc *al = &ip->i_alloc;
838 struct gfs2_quota_data *qda[4];
839 unsigned int count = 0;
840 unsigned int x;
841
842 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
843 goto out;
844
845 for (x = 0; x < al->al_qd_num; x++) {
846 struct gfs2_quota_data *qd;
847 int sync;
848
849 qd = al->al_qd[x];
850 sync = need_sync(qd);
851
852 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
853
854 if (sync && qd_trylock(qd))
855 qda[count++] = qd;
856 }
857
858 if (count) {
859 do_sync(count, qda);
860 for (x = 0; x < count; x++)
861 qd_unlock(qda[x]);
862 }
863
864 out:
865 gfs2_quota_unhold(ip);
866}
867
868#define MAX_LINE 256
869
870static int print_message(struct gfs2_quota_data *qd, char *type)
871{
872 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
873 char *line;
874 int len;
875
876 line = kmalloc(MAX_LINE, GFP_KERNEL);
877 if (!line)
878 return -ENOMEM;
879
880 len = snprintf(line, MAX_LINE-1, "GFS2: fsid=%s: quota %s for %s %u\r\n",
881 sdp->sd_fsname, type,
882 (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
883 qd->qd_id);
884 line[MAX_LINE-1] = 0;
885
886 if (current->signal) { /* Is this test still required? */
887 tty_write_message(current->signal->tty, line);
888 }
889
890 kfree(line);
891
892 return 0;
893}
894
895int gfs2_quota_check(struct gfs2_inode *ip, uint32_t uid, uint32_t gid)
896{
897 struct gfs2_sbd *sdp = ip->i_sbd;
898 struct gfs2_alloc *al = &ip->i_alloc;
899 struct gfs2_quota_data *qd;
900 int64_t value;
901 unsigned int x;
902 int error = 0;
903
904 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
905 return 0;
906
907 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
908 return 0;
909
910 for (x = 0; x < al->al_qd_num; x++) {
911 qd = al->al_qd[x];
912
913 if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
914 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
915 continue;
916
917 value = qd->qd_qb.qb_value;
918 spin_lock(&sdp->sd_quota_spin);
919 value += qd->qd_change;
920 spin_unlock(&sdp->sd_quota_spin);
921
922 if (qd->qd_qb.qb_limit && (int64_t)qd->qd_qb.qb_limit < value) {
923 print_message(qd, "exceeded");
924 error = -EDQUOT;
925 break;
926 } else if (qd->qd_qb.qb_warn &&
927 (int64_t)qd->qd_qb.qb_warn < value &&
928 time_after_eq(jiffies, qd->qd_last_warn +
929 gfs2_tune_get(sdp, gt_quota_warn_period) * HZ)) {
930 error = print_message(qd, "warning");
931 qd->qd_last_warn = jiffies;
932 }
933 }
934
935 return error;
936}
937
938void gfs2_quota_change(struct gfs2_inode *ip, int64_t change,
939 uint32_t uid, uint32_t gid)
940{
941 struct gfs2_alloc *al = &ip->i_alloc;
942 struct gfs2_quota_data *qd;
943 unsigned int x;
944 unsigned int found = 0;
945
946 if (gfs2_assert_warn(ip->i_sbd, change))
947 return;
948 if (ip->i_di.di_flags & GFS2_DIF_SYSTEM)
949 return;
950
951 for (x = 0; x < al->al_qd_num; x++) {
952 qd = al->al_qd[x];
953
954 if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
955 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
956 do_qc(qd, change);
957 found++;
958 }
959 }
960}
961
962int gfs2_quota_sync(struct gfs2_sbd *sdp)
963{
964 struct gfs2_quota_data **qda;
965 unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
966 unsigned int num_qd;
967 unsigned int x;
968 int error = 0;
969
970 sdp->sd_quota_sync_gen++;
971
972 qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
973 if (!qda)
974 return -ENOMEM;
975
976 do {
977 num_qd = 0;
978
979 for (;;) {
980 error = qd_fish(sdp, qda + num_qd);
981 if (error || !qda[num_qd])
982 break;
983 if (++num_qd == max_qd)
984 break;
985 }
986
987 if (num_qd) {
988 if (!error)
989 error = do_sync(num_qd, qda);
990 if (!error)
991 for (x = 0; x < num_qd; x++)
992 qda[x]->qd_sync_gen =
993 sdp->sd_quota_sync_gen;
994
995 for (x = 0; x < num_qd; x++)
996 qd_unlock(qda[x]);
997 }
998 } while (!error && num_qd == max_qd);
999
1000 kfree(qda);
1001
1002 return error;
1003}
1004
1005int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, uint32_t id)
1006{
1007 struct gfs2_quota_data *qd;
1008 struct gfs2_holder q_gh;
1009 int error;
1010
1011 error = qd_get(sdp, user, id, CREATE, &qd);
1012 if (error)
1013 return error;
1014
1015 error = do_glock(qd, FORCE, &q_gh);
1016 if (!error)
1017 gfs2_glock_dq_uninit(&q_gh);
1018
1019 qd_put(qd);
1020
1021 return error;
1022}
1023
1024int gfs2_quota_read(struct gfs2_sbd *sdp, int user, uint32_t id,
1025 struct gfs2_quota *q)
1026{
1027 struct gfs2_quota_data *qd;
1028 struct gfs2_holder q_gh;
1029 int error;
1030
1031 if (((user) ? (id != current->fsuid) : (!in_group_p(id))) &&
1032 !capable(CAP_SYS_ADMIN))
1033 return -EACCES;
1034
1035 error = qd_get(sdp, user, id, CREATE, &qd);
1036 if (error)
1037 return error;
1038
1039 error = do_glock(qd, NO_FORCE, &q_gh);
1040 if (error)
1041 goto out;
1042
1043 memset(q, 0, sizeof(struct gfs2_quota));
1044 q->qu_limit = qd->qd_qb.qb_limit;
1045 q->qu_warn = qd->qd_qb.qb_warn;
1046 q->qu_value = qd->qd_qb.qb_value;
1047
1048 spin_lock(&sdp->sd_quota_spin);
1049 q->qu_value += qd->qd_change;
1050 spin_unlock(&sdp->sd_quota_spin);
1051
1052 gfs2_glock_dq_uninit(&q_gh);
1053
1054 out:
1055 qd_put(qd);
1056
1057 return error;
1058}
1059
1060int gfs2_quota_init(struct gfs2_sbd *sdp)
1061{
1062 struct gfs2_inode *ip = sdp->sd_qc_inode;
1063 unsigned int blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift;
1064 unsigned int x, slot = 0;
1065 unsigned int found = 0;
1066 uint64_t dblock;
1067 uint32_t extlen = 0;
1068 int error;
1069
1070 if (!ip->i_di.di_size ||
1071 ip->i_di.di_size > (64 << 20) ||
1072 ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1)) {
1073 gfs2_consist_inode(ip);
1074 return -EIO;
1075 }
1076 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1077 sdp->sd_quota_chunks = DIV_RU(sdp->sd_quota_slots, 8 * PAGE_SIZE);
1078
1079 error = -ENOMEM;
1080
1081 sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
1082 sizeof(unsigned char *), GFP_KERNEL);
1083 if (!sdp->sd_quota_bitmap)
1084 return error;
1085
1086 for (x = 0; x < sdp->sd_quota_chunks; x++) {
1087 sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_KERNEL);
1088 if (!sdp->sd_quota_bitmap[x])
1089 goto fail;
1090 }
1091
1092 for (x = 0; x < blocks; x++) {
1093 struct buffer_head *bh;
1094 unsigned int y;
1095
1096 if (!extlen) {
1097 int new = 0;
1098 error = gfs2_block_map(ip, x, &new, &dblock, &extlen);
1099 if (error)
1100 goto fail;
1101 }
1102 gfs2_meta_ra(ip->i_gl, dblock, extlen);
1103 error = gfs2_meta_read(ip->i_gl, dblock, DIO_START | DIO_WAIT,
1104 &bh);
1105 if (error)
1106 goto fail;
1107 error = -EIO;
1108 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1109 brelse(bh);
1110 goto fail;
1111 }
1112
1113 for (y = 0;
1114 y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1115 y++, slot++) {
1116 struct gfs2_quota_change qc;
1117 struct gfs2_quota_data *qd;
1118
1119 gfs2_quota_change_in(&qc, bh->b_data +
1120 sizeof(struct gfs2_meta_header) +
1121 y * sizeof(struct gfs2_quota_change));
1122 if (!qc.qc_change)
1123 continue;
1124
1125 error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER),
1126 qc.qc_id, &qd);
1127 if (error) {
1128 brelse(bh);
1129 goto fail;
1130 }
1131
1132 set_bit(QDF_CHANGE, &qd->qd_flags);
1133 qd->qd_change = qc.qc_change;
1134 qd->qd_slot = slot;
1135 qd->qd_slot_count = 1;
1136 qd->qd_last_touched = jiffies;
1137
1138 spin_lock(&sdp->sd_quota_spin);
1139 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
1140 list_add(&qd->qd_list, &sdp->sd_quota_list);
1141 atomic_inc(&sdp->sd_quota_count);
1142 spin_unlock(&sdp->sd_quota_spin);
1143
1144 found++;
1145 }
1146
1147 brelse(bh);
1148 dblock++;
1149 extlen--;
1150 }
1151
1152 if (found)
1153 fs_info(sdp, "found %u quota changes\n", found);
1154
1155 return 0;
1156
1157 fail:
1158 gfs2_quota_cleanup(sdp);
1159 return error;
1160}
1161
1162void gfs2_quota_scan(struct gfs2_sbd *sdp)
1163{
1164 struct gfs2_quota_data *qd, *safe;
1165 LIST_HEAD(dead);
1166
1167 spin_lock(&sdp->sd_quota_spin);
1168 list_for_each_entry_safe(qd, safe, &sdp->sd_quota_list, qd_list) {
1169 if (!qd->qd_count &&
1170 time_after_eq(jiffies, qd->qd_last_touched +
1171 gfs2_tune_get(sdp, gt_quota_cache_secs) * HZ)) {
1172 list_move(&qd->qd_list, &dead);
1173 gfs2_assert_warn(sdp,
1174 atomic_read(&sdp->sd_quota_count) > 0);
1175 atomic_dec(&sdp->sd_quota_count);
1176 }
1177 }
1178 spin_unlock(&sdp->sd_quota_spin);
1179
1180 while (!list_empty(&dead)) {
1181 qd = list_entry(dead.next, struct gfs2_quota_data, qd_list);
1182 list_del(&qd->qd_list);
1183
1184 gfs2_assert_warn(sdp, !qd->qd_change);
1185 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1186 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1187
1188 gfs2_lvb_unhold(qd->qd_gl);
1189 kfree(qd);
1190 }
1191}
1192
1193void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1194{
1195 struct list_head *head = &sdp->sd_quota_list;
1196 struct gfs2_quota_data *qd;
1197 unsigned int x;
1198
1199 spin_lock(&sdp->sd_quota_spin);
1200 while (!list_empty(head)) {
1201 qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
1202
1203 if (qd->qd_count > 1 ||
1204 (qd->qd_count && !test_bit(QDF_CHANGE, &qd->qd_flags))) {
1205 list_move(&qd->qd_list, head);
1206 spin_unlock(&sdp->sd_quota_spin);
1207 schedule();
1208 spin_lock(&sdp->sd_quota_spin);
1209 continue;
1210 }
1211
1212 list_del(&qd->qd_list);
1213 atomic_dec(&sdp->sd_quota_count);
1214 spin_unlock(&sdp->sd_quota_spin);
1215
1216 if (!qd->qd_count) {
1217 gfs2_assert_warn(sdp, !qd->qd_change);
1218 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1219 } else
1220 gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
1221 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1222
1223 gfs2_lvb_unhold(qd->qd_gl);
1224 kfree(qd);
1225
1226 spin_lock(&sdp->sd_quota_spin);
1227 }
1228 spin_unlock(&sdp->sd_quota_spin);
1229
1230 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1231
1232 if (sdp->sd_quota_bitmap) {
1233 for (x = 0; x < sdp->sd_quota_chunks; x++)
1234 kfree(sdp->sd_quota_bitmap[x]);
1235 kfree(sdp->sd_quota_bitmap);
1236 }
1237}
1238