aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/quota
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2011-08-12 17:21:35 -0400
committerAlex Elder <aelder@sgi.com>2011-08-12 17:21:35 -0400
commitc59d87c460767bc35dafd490139d3cfe78fb8da4 (patch)
tree2aad8261f86488e501d9645bd35d1398906da46d /fs/xfs/quota
parent06f8e2d6754dc631732415b741b5aa58a0f7133f (diff)
xfs: remove subdirectories
Use the move from Linux 2.6 to Linux 3.x as an excuse to kill the annoying subdirectories in the XFS source code. Besides the large amount of file rename the only changes are to the Makefile, a few files including headers with the subdirectory prefix, and the binary sysctl compat code that includes a header under fs/xfs/ from kernel/. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Alex Elder <aelder@sgi.com>
Diffstat (limited to 'fs/xfs/quota')
-rw-r--r--fs/xfs/quota/xfs_dquot.c1454
-rw-r--r--fs/xfs/quota/xfs_dquot.h137
-rw-r--r--fs/xfs/quota/xfs_dquot_item.c529
-rw-r--r--fs/xfs/quota/xfs_dquot_item.h48
-rw-r--r--fs/xfs/quota/xfs_qm.c2416
-rw-r--r--fs/xfs/quota/xfs_qm.h166
-rw-r--r--fs/xfs/quota/xfs_qm_bhv.c176
-rw-r--r--fs/xfs/quota/xfs_qm_stats.c105
-rw-r--r--fs/xfs/quota/xfs_qm_stats.h53
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c906
-rw-r--r--fs/xfs/quota/xfs_quota_priv.h53
-rw-r--r--fs/xfs/quota/xfs_trans_dquot.c890
12 files changed, 0 insertions, 6933 deletions
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
deleted file mode 100644
index db62959bed13..000000000000
--- a/fs/xfs/quota/xfs_dquot.c
+++ /dev/null
@@ -1,1454 +0,0 @@
1/*
2 * Copyright (c) 2000-2003 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_bit.h"
21#include "xfs_log.h"
22#include "xfs_inum.h"
23#include "xfs_trans.h"
24#include "xfs_sb.h"
25#include "xfs_ag.h"
26#include "xfs_alloc.h"
27#include "xfs_quota.h"
28#include "xfs_mount.h"
29#include "xfs_bmap_btree.h"
30#include "xfs_inode.h"
31#include "xfs_bmap.h"
32#include "xfs_rtalloc.h"
33#include "xfs_error.h"
34#include "xfs_itable.h"
35#include "xfs_attr.h"
36#include "xfs_buf_item.h"
37#include "xfs_trans_space.h"
38#include "xfs_trans_priv.h"
39#include "xfs_qm.h"
40#include "xfs_trace.h"
41
42
43/*
44 LOCK ORDER
45
46 inode lock (ilock)
47 dquot hash-chain lock (hashlock)
48 xqm dquot freelist lock (freelistlock
49 mount's dquot list lock (mplistlock)
50 user dquot lock - lock ordering among dquots is based on the uid or gid
51 group dquot lock - similar to udquots. Between the two dquots, the udquot
52 has to be locked first.
53 pin lock - the dquot lock must be held to take this lock.
54 flush lock - ditto.
55*/
56
57#ifdef DEBUG
58xfs_buftarg_t *xfs_dqerror_target;
59int xfs_do_dqerror;
60int xfs_dqreq_num;
61int xfs_dqerror_mod = 33;
62#endif
63
64static struct lock_class_key xfs_dquot_other_class;
65
66/*
67 * Allocate and initialize a dquot. We don't always allocate fresh memory;
68 * we try to reclaim a free dquot if the number of incore dquots are above
69 * a threshold.
70 * The only field inside the core that gets initialized at this point
71 * is the d_id field. The idea is to fill in the entire q_core
72 * when we read in the on disk dquot.
73 */
74STATIC xfs_dquot_t *
75xfs_qm_dqinit(
76 xfs_mount_t *mp,
77 xfs_dqid_t id,
78 uint type)
79{
80 xfs_dquot_t *dqp;
81 boolean_t brandnewdquot;
82
83 brandnewdquot = xfs_qm_dqalloc_incore(&dqp);
84 dqp->dq_flags = type;
85 dqp->q_core.d_id = cpu_to_be32(id);
86 dqp->q_mount = mp;
87
88 /*
89 * No need to re-initialize these if this is a reclaimed dquot.
90 */
91 if (brandnewdquot) {
92 INIT_LIST_HEAD(&dqp->q_freelist);
93 mutex_init(&dqp->q_qlock);
94 init_waitqueue_head(&dqp->q_pinwait);
95
96 /*
97 * Because we want to use a counting completion, complete
98 * the flush completion once to allow a single access to
99 * the flush completion without blocking.
100 */
101 init_completion(&dqp->q_flush);
102 complete(&dqp->q_flush);
103
104 trace_xfs_dqinit(dqp);
105 } else {
106 /*
107 * Only the q_core portion was zeroed in dqreclaim_one().
108 * So, we need to reset others.
109 */
110 dqp->q_nrefs = 0;
111 dqp->q_blkno = 0;
112 INIT_LIST_HEAD(&dqp->q_mplist);
113 INIT_LIST_HEAD(&dqp->q_hashlist);
114 dqp->q_bufoffset = 0;
115 dqp->q_fileoffset = 0;
116 dqp->q_transp = NULL;
117 dqp->q_gdquot = NULL;
118 dqp->q_res_bcount = 0;
119 dqp->q_res_icount = 0;
120 dqp->q_res_rtbcount = 0;
121 atomic_set(&dqp->q_pincount, 0);
122 dqp->q_hash = NULL;
123 ASSERT(list_empty(&dqp->q_freelist));
124
125 trace_xfs_dqreuse(dqp);
126 }
127
128 /*
129 * In either case we need to make sure group quotas have a different
130 * lock class than user quotas, to make sure lockdep knows we can
131 * locks of one of each at the same time.
132 */
133 if (!(type & XFS_DQ_USER))
134 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class);
135
136 /*
137 * log item gets initialized later
138 */
139 return (dqp);
140}
141
142/*
143 * This is called to free all the memory associated with a dquot
144 */
145void
146xfs_qm_dqdestroy(
147 xfs_dquot_t *dqp)
148{
149 ASSERT(list_empty(&dqp->q_freelist));
150
151 mutex_destroy(&dqp->q_qlock);
152 kmem_zone_free(xfs_Gqm->qm_dqzone, dqp);
153
154 atomic_dec(&xfs_Gqm->qm_totaldquots);
155}
156
157/*
158 * This is what a 'fresh' dquot inside a dquot chunk looks like on disk.
159 */
160STATIC void
161xfs_qm_dqinit_core(
162 xfs_dqid_t id,
163 uint type,
164 xfs_dqblk_t *d)
165{
166 /*
167 * Caller has zero'd the entire dquot 'chunk' already.
168 */
169 d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
170 d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
171 d->dd_diskdq.d_id = cpu_to_be32(id);
172 d->dd_diskdq.d_flags = type;
173}
174
175/*
176 * If default limits are in force, push them into the dquot now.
177 * We overwrite the dquot limits only if they are zero and this
178 * is not the root dquot.
179 */
180void
181xfs_qm_adjust_dqlimits(
182 xfs_mount_t *mp,
183 xfs_disk_dquot_t *d)
184{
185 xfs_quotainfo_t *q = mp->m_quotainfo;
186
187 ASSERT(d->d_id);
188
189 if (q->qi_bsoftlimit && !d->d_blk_softlimit)
190 d->d_blk_softlimit = cpu_to_be64(q->qi_bsoftlimit);
191 if (q->qi_bhardlimit && !d->d_blk_hardlimit)
192 d->d_blk_hardlimit = cpu_to_be64(q->qi_bhardlimit);
193 if (q->qi_isoftlimit && !d->d_ino_softlimit)
194 d->d_ino_softlimit = cpu_to_be64(q->qi_isoftlimit);
195 if (q->qi_ihardlimit && !d->d_ino_hardlimit)
196 d->d_ino_hardlimit = cpu_to_be64(q->qi_ihardlimit);
197 if (q->qi_rtbsoftlimit && !d->d_rtb_softlimit)
198 d->d_rtb_softlimit = cpu_to_be64(q->qi_rtbsoftlimit);
199 if (q->qi_rtbhardlimit && !d->d_rtb_hardlimit)
200 d->d_rtb_hardlimit = cpu_to_be64(q->qi_rtbhardlimit);
201}
202
203/*
204 * Check the limits and timers of a dquot and start or reset timers
205 * if necessary.
206 * This gets called even when quota enforcement is OFF, which makes our
207 * life a little less complicated. (We just don't reject any quota
208 * reservations in that case, when enforcement is off).
209 * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
210 * enforcement's off.
211 * In contrast, warnings are a little different in that they don't
212 * 'automatically' get started when limits get exceeded. They do
213 * get reset to zero, however, when we find the count to be under
214 * the soft limit (they are only ever set non-zero via userspace).
215 */
216void
217xfs_qm_adjust_dqtimers(
218 xfs_mount_t *mp,
219 xfs_disk_dquot_t *d)
220{
221 ASSERT(d->d_id);
222
223#ifdef DEBUG
224 if (d->d_blk_hardlimit)
225 ASSERT(be64_to_cpu(d->d_blk_softlimit) <=
226 be64_to_cpu(d->d_blk_hardlimit));
227 if (d->d_ino_hardlimit)
228 ASSERT(be64_to_cpu(d->d_ino_softlimit) <=
229 be64_to_cpu(d->d_ino_hardlimit));
230 if (d->d_rtb_hardlimit)
231 ASSERT(be64_to_cpu(d->d_rtb_softlimit) <=
232 be64_to_cpu(d->d_rtb_hardlimit));
233#endif
234
235 if (!d->d_btimer) {
236 if ((d->d_blk_softlimit &&
237 (be64_to_cpu(d->d_bcount) >=
238 be64_to_cpu(d->d_blk_softlimit))) ||
239 (d->d_blk_hardlimit &&
240 (be64_to_cpu(d->d_bcount) >=
241 be64_to_cpu(d->d_blk_hardlimit)))) {
242 d->d_btimer = cpu_to_be32(get_seconds() +
243 mp->m_quotainfo->qi_btimelimit);
244 } else {
245 d->d_bwarns = 0;
246 }
247 } else {
248 if ((!d->d_blk_softlimit ||
249 (be64_to_cpu(d->d_bcount) <
250 be64_to_cpu(d->d_blk_softlimit))) &&
251 (!d->d_blk_hardlimit ||
252 (be64_to_cpu(d->d_bcount) <
253 be64_to_cpu(d->d_blk_hardlimit)))) {
254 d->d_btimer = 0;
255 }
256 }
257
258 if (!d->d_itimer) {
259 if ((d->d_ino_softlimit &&
260 (be64_to_cpu(d->d_icount) >=
261 be64_to_cpu(d->d_ino_softlimit))) ||
262 (d->d_ino_hardlimit &&
263 (be64_to_cpu(d->d_icount) >=
264 be64_to_cpu(d->d_ino_hardlimit)))) {
265 d->d_itimer = cpu_to_be32(get_seconds() +
266 mp->m_quotainfo->qi_itimelimit);
267 } else {
268 d->d_iwarns = 0;
269 }
270 } else {
271 if ((!d->d_ino_softlimit ||
272 (be64_to_cpu(d->d_icount) <
273 be64_to_cpu(d->d_ino_softlimit))) &&
274 (!d->d_ino_hardlimit ||
275 (be64_to_cpu(d->d_icount) <
276 be64_to_cpu(d->d_ino_hardlimit)))) {
277 d->d_itimer = 0;
278 }
279 }
280
281 if (!d->d_rtbtimer) {
282 if ((d->d_rtb_softlimit &&
283 (be64_to_cpu(d->d_rtbcount) >=
284 be64_to_cpu(d->d_rtb_softlimit))) ||
285 (d->d_rtb_hardlimit &&
286 (be64_to_cpu(d->d_rtbcount) >=
287 be64_to_cpu(d->d_rtb_hardlimit)))) {
288 d->d_rtbtimer = cpu_to_be32(get_seconds() +
289 mp->m_quotainfo->qi_rtbtimelimit);
290 } else {
291 d->d_rtbwarns = 0;
292 }
293 } else {
294 if ((!d->d_rtb_softlimit ||
295 (be64_to_cpu(d->d_rtbcount) <
296 be64_to_cpu(d->d_rtb_softlimit))) &&
297 (!d->d_rtb_hardlimit ||
298 (be64_to_cpu(d->d_rtbcount) <
299 be64_to_cpu(d->d_rtb_hardlimit)))) {
300 d->d_rtbtimer = 0;
301 }
302 }
303}
304
305/*
306 * initialize a buffer full of dquots and log the whole thing
307 */
308STATIC void
309xfs_qm_init_dquot_blk(
310 xfs_trans_t *tp,
311 xfs_mount_t *mp,
312 xfs_dqid_t id,
313 uint type,
314 xfs_buf_t *bp)
315{
316 struct xfs_quotainfo *q = mp->m_quotainfo;
317 xfs_dqblk_t *d;
318 int curid, i;
319
320 ASSERT(tp);
321 ASSERT(xfs_buf_islocked(bp));
322
323 d = bp->b_addr;
324
325 /*
326 * ID of the first dquot in the block - id's are zero based.
327 */
328 curid = id - (id % q->qi_dqperchunk);
329 ASSERT(curid >= 0);
330 memset(d, 0, BBTOB(q->qi_dqchunklen));
331 for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++)
332 xfs_qm_dqinit_core(curid, type, d);
333 xfs_trans_dquot_buf(tp, bp,
334 (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF :
335 ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF :
336 XFS_BLF_GDQUOT_BUF)));
337 xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
338}
339
340
341
342/*
343 * Allocate a block and fill it with dquots.
344 * This is called when the bmapi finds a hole.
345 */
346STATIC int
347xfs_qm_dqalloc(
348 xfs_trans_t **tpp,
349 xfs_mount_t *mp,
350 xfs_dquot_t *dqp,
351 xfs_inode_t *quotip,
352 xfs_fileoff_t offset_fsb,
353 xfs_buf_t **O_bpp)
354{
355 xfs_fsblock_t firstblock;
356 xfs_bmap_free_t flist;
357 xfs_bmbt_irec_t map;
358 int nmaps, error, committed;
359 xfs_buf_t *bp;
360 xfs_trans_t *tp = *tpp;
361
362 ASSERT(tp != NULL);
363
364 trace_xfs_dqalloc(dqp);
365
366 /*
367 * Initialize the bmap freelist prior to calling bmapi code.
368 */
369 xfs_bmap_init(&flist, &firstblock);
370 xfs_ilock(quotip, XFS_ILOCK_EXCL);
371 /*
372 * Return if this type of quotas is turned off while we didn't
373 * have an inode lock
374 */
375 if (XFS_IS_THIS_QUOTA_OFF(dqp)) {
376 xfs_iunlock(quotip, XFS_ILOCK_EXCL);
377 return (ESRCH);
378 }
379
380 xfs_trans_ijoin_ref(tp, quotip, XFS_ILOCK_EXCL);
381 nmaps = 1;
382 if ((error = xfs_bmapi(tp, quotip,
383 offset_fsb, XFS_DQUOT_CLUSTER_SIZE_FSB,
384 XFS_BMAPI_METADATA | XFS_BMAPI_WRITE,
385 &firstblock,
386 XFS_QM_DQALLOC_SPACE_RES(mp),
387 &map, &nmaps, &flist))) {
388 goto error0;
389 }
390 ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
391 ASSERT(nmaps == 1);
392 ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
393 (map.br_startblock != HOLESTARTBLOCK));
394
395 /*
396 * Keep track of the blkno to save a lookup later
397 */
398 dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
399
400 /* now we can just get the buffer (there's nothing to read yet) */
401 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
402 dqp->q_blkno,
403 mp->m_quotainfo->qi_dqchunklen,
404 0);
405 if (!bp || (error = xfs_buf_geterror(bp)))
406 goto error1;
407 /*
408 * Make a chunk of dquots out of this buffer and log
409 * the entire thing.
410 */
411 xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id),
412 dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
413
414 /*
415 * xfs_bmap_finish() may commit the current transaction and
416 * start a second transaction if the freelist is not empty.
417 *
418 * Since we still want to modify this buffer, we need to
419 * ensure that the buffer is not released on commit of
420 * the first transaction and ensure the buffer is added to the
421 * second transaction.
422 *
423 * If there is only one transaction then don't stop the buffer
424 * from being released when it commits later on.
425 */
426
427 xfs_trans_bhold(tp, bp);
428
429 if ((error = xfs_bmap_finish(tpp, &flist, &committed))) {
430 goto error1;
431 }
432
433 if (committed) {
434 tp = *tpp;
435 xfs_trans_bjoin(tp, bp);
436 } else {
437 xfs_trans_bhold_release(tp, bp);
438 }
439
440 *O_bpp = bp;
441 return 0;
442
443 error1:
444 xfs_bmap_cancel(&flist);
445 error0:
446 xfs_iunlock(quotip, XFS_ILOCK_EXCL);
447
448 return (error);
449}
450
451/*
452 * Maps a dquot to the buffer containing its on-disk version.
453 * This returns a ptr to the buffer containing the on-disk dquot
454 * in the bpp param, and a ptr to the on-disk dquot within that buffer
455 */
456STATIC int
457xfs_qm_dqtobp(
458 xfs_trans_t **tpp,
459 xfs_dquot_t *dqp,
460 xfs_disk_dquot_t **O_ddpp,
461 xfs_buf_t **O_bpp,
462 uint flags)
463{
464 xfs_bmbt_irec_t map;
465 int nmaps = 1, error;
466 xfs_buf_t *bp;
467 xfs_inode_t *quotip = XFS_DQ_TO_QIP(dqp);
468 xfs_mount_t *mp = dqp->q_mount;
469 xfs_disk_dquot_t *ddq;
470 xfs_dqid_t id = be32_to_cpu(dqp->q_core.d_id);
471 xfs_trans_t *tp = (tpp ? *tpp : NULL);
472
473 dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
474
475 xfs_ilock(quotip, XFS_ILOCK_SHARED);
476 if (XFS_IS_THIS_QUOTA_OFF(dqp)) {
477 /*
478 * Return if this type of quotas is turned off while we
479 * didn't have the quota inode lock.
480 */
481 xfs_iunlock(quotip, XFS_ILOCK_SHARED);
482 return ESRCH;
483 }
484
485 /*
486 * Find the block map; no allocations yet
487 */
488 error = xfs_bmapi(NULL, quotip, dqp->q_fileoffset,
489 XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA,
490 NULL, 0, &map, &nmaps, NULL);
491
492 xfs_iunlock(quotip, XFS_ILOCK_SHARED);
493 if (error)
494 return error;
495
496 ASSERT(nmaps == 1);
497 ASSERT(map.br_blockcount == 1);
498
499 /*
500 * Offset of dquot in the (fixed sized) dquot chunk.
501 */
502 dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
503 sizeof(xfs_dqblk_t);
504
505 ASSERT(map.br_startblock != DELAYSTARTBLOCK);
506 if (map.br_startblock == HOLESTARTBLOCK) {
507 /*
508 * We don't allocate unless we're asked to
509 */
510 if (!(flags & XFS_QMOPT_DQALLOC))
511 return ENOENT;
512
513 ASSERT(tp);
514 error = xfs_qm_dqalloc(tpp, mp, dqp, quotip,
515 dqp->q_fileoffset, &bp);
516 if (error)
517 return error;
518 tp = *tpp;
519 } else {
520 trace_xfs_dqtobp_read(dqp);
521
522 /*
523 * store the blkno etc so that we don't have to do the
524 * mapping all the time
525 */
526 dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
527
528 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
529 dqp->q_blkno,
530 mp->m_quotainfo->qi_dqchunklen,
531 0, &bp);
532 if (error || !bp)
533 return XFS_ERROR(error);
534 }
535
536 ASSERT(xfs_buf_islocked(bp));
537
538 /*
539 * calculate the location of the dquot inside the buffer.
540 */
541 ddq = bp->b_addr + dqp->q_bufoffset;
542
543 /*
544 * A simple sanity check in case we got a corrupted dquot...
545 */
546 error = xfs_qm_dqcheck(mp, ddq, id, dqp->dq_flags & XFS_DQ_ALLTYPES,
547 flags & (XFS_QMOPT_DQREPAIR|XFS_QMOPT_DOWARN),
548 "dqtobp");
549 if (error) {
550 if (!(flags & XFS_QMOPT_DQREPAIR)) {
551 xfs_trans_brelse(tp, bp);
552 return XFS_ERROR(EIO);
553 }
554 }
555
556 *O_bpp = bp;
557 *O_ddpp = ddq;
558
559 return (0);
560}
561
562
563/*
564 * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
565 * and release the buffer immediately.
566 *
567 */
568/* ARGSUSED */
569STATIC int
570xfs_qm_dqread(
571 xfs_trans_t **tpp,
572 xfs_dqid_t id,
573 xfs_dquot_t *dqp, /* dquot to get filled in */
574 uint flags)
575{
576 xfs_disk_dquot_t *ddqp;
577 xfs_buf_t *bp;
578 int error;
579 xfs_trans_t *tp;
580
581 ASSERT(tpp);
582
583 trace_xfs_dqread(dqp);
584
585 /*
586 * get a pointer to the on-disk dquot and the buffer containing it
587 * dqp already knows its own type (GROUP/USER).
588 */
589 if ((error = xfs_qm_dqtobp(tpp, dqp, &ddqp, &bp, flags))) {
590 return (error);
591 }
592 tp = *tpp;
593
594 /* copy everything from disk dquot to the incore dquot */
595 memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
596 ASSERT(be32_to_cpu(dqp->q_core.d_id) == id);
597 xfs_qm_dquot_logitem_init(dqp);
598
599 /*
600 * Reservation counters are defined as reservation plus current usage
601 * to avoid having to add every time.
602 */
603 dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount);
604 dqp->q_res_icount = be64_to_cpu(ddqp->d_icount);
605 dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount);
606
607 /* Mark the buf so that this will stay incore a little longer */
608 XFS_BUF_SET_VTYPE_REF(bp, B_FS_DQUOT, XFS_DQUOT_REF);
609
610 /*
611 * We got the buffer with a xfs_trans_read_buf() (in dqtobp())
612 * So we need to release with xfs_trans_brelse().
613 * The strategy here is identical to that of inodes; we lock
614 * the dquot in xfs_qm_dqget() before making it accessible to
615 * others. This is because dquots, like inodes, need a good level of
616 * concurrency, and we don't want to take locks on the entire buffers
617 * for dquot accesses.
618 * Note also that the dquot buffer may even be dirty at this point, if
619 * this particular dquot was repaired. We still aren't afraid to
620 * brelse it because we have the changes incore.
621 */
622 ASSERT(xfs_buf_islocked(bp));
623 xfs_trans_brelse(tp, bp);
624
625 return (error);
626}
627
628
629/*
630 * allocate an incore dquot from the kernel heap,
631 * and fill its core with quota information kept on disk.
632 * If XFS_QMOPT_DQALLOC is set, it'll allocate a dquot on disk
633 * if it wasn't already allocated.
634 */
635STATIC int
636xfs_qm_idtodq(
637 xfs_mount_t *mp,
638 xfs_dqid_t id, /* gid or uid, depending on type */
639 uint type, /* UDQUOT or GDQUOT */
640 uint flags, /* DQALLOC, DQREPAIR */
641 xfs_dquot_t **O_dqpp)/* OUT : incore dquot, not locked */
642{
643 xfs_dquot_t *dqp;
644 int error;
645 xfs_trans_t *tp;
646 int cancelflags=0;
647
648 dqp = xfs_qm_dqinit(mp, id, type);
649 tp = NULL;
650 if (flags & XFS_QMOPT_DQALLOC) {
651 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC);
652 error = xfs_trans_reserve(tp, XFS_QM_DQALLOC_SPACE_RES(mp),
653 XFS_WRITE_LOG_RES(mp) +
654 BBTOB(mp->m_quotainfo->qi_dqchunklen) - 1 +
655 128,
656 0,
657 XFS_TRANS_PERM_LOG_RES,
658 XFS_WRITE_LOG_COUNT);
659 if (error) {
660 cancelflags = 0;
661 goto error0;
662 }
663 cancelflags = XFS_TRANS_RELEASE_LOG_RES;
664 }
665
666 /*
667 * Read it from disk; xfs_dqread() takes care of
668 * all the necessary initialization of dquot's fields (locks, etc)
669 */
670 if ((error = xfs_qm_dqread(&tp, id, dqp, flags))) {
671 /*
672 * This can happen if quotas got turned off (ESRCH),
673 * or if the dquot didn't exist on disk and we ask to
674 * allocate (ENOENT).
675 */
676 trace_xfs_dqread_fail(dqp);
677 cancelflags |= XFS_TRANS_ABORT;
678 goto error0;
679 }
680 if (tp) {
681 if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES)))
682 goto error1;
683 }
684
685 *O_dqpp = dqp;
686 return (0);
687
688 error0:
689 ASSERT(error);
690 if (tp)
691 xfs_trans_cancel(tp, cancelflags);
692 error1:
693 xfs_qm_dqdestroy(dqp);
694 *O_dqpp = NULL;
695 return (error);
696}
697
698/*
699 * Lookup a dquot in the incore dquot hashtable. We keep two separate
700 * hashtables for user and group dquots; and, these are global tables
701 * inside the XQM, not per-filesystem tables.
702 * The hash chain must be locked by caller, and it is left locked
703 * on return. Returning dquot is locked.
704 */
705STATIC int
706xfs_qm_dqlookup(
707 xfs_mount_t *mp,
708 xfs_dqid_t id,
709 xfs_dqhash_t *qh,
710 xfs_dquot_t **O_dqpp)
711{
712 xfs_dquot_t *dqp;
713 uint flist_locked;
714
715 ASSERT(mutex_is_locked(&qh->qh_lock));
716
717 flist_locked = B_FALSE;
718
719 /*
720 * Traverse the hashchain looking for a match
721 */
722 list_for_each_entry(dqp, &qh->qh_list, q_hashlist) {
723 /*
724 * We already have the hashlock. We don't need the
725 * dqlock to look at the id field of the dquot, since the
726 * id can't be modified without the hashlock anyway.
727 */
728 if (be32_to_cpu(dqp->q_core.d_id) == id && dqp->q_mount == mp) {
729 trace_xfs_dqlookup_found(dqp);
730
731 /*
732 * All in core dquots must be on the dqlist of mp
733 */
734 ASSERT(!list_empty(&dqp->q_mplist));
735
736 xfs_dqlock(dqp);
737 if (dqp->q_nrefs == 0) {
738 ASSERT(!list_empty(&dqp->q_freelist));
739 if (!mutex_trylock(&xfs_Gqm->qm_dqfrlist_lock)) {
740 trace_xfs_dqlookup_want(dqp);
741
742 /*
743 * We may have raced with dqreclaim_one()
744 * (and lost). So, flag that we don't
745 * want the dquot to be reclaimed.
746 */
747 dqp->dq_flags |= XFS_DQ_WANT;
748 xfs_dqunlock(dqp);
749 mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
750 xfs_dqlock(dqp);
751 dqp->dq_flags &= ~(XFS_DQ_WANT);
752 }
753 flist_locked = B_TRUE;
754 }
755
756 /*
757 * id couldn't have changed; we had the hashlock all
758 * along
759 */
760 ASSERT(be32_to_cpu(dqp->q_core.d_id) == id);
761
762 if (flist_locked) {
763 if (dqp->q_nrefs != 0) {
764 mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
765 flist_locked = B_FALSE;
766 } else {
767 /* take it off the freelist */
768 trace_xfs_dqlookup_freelist(dqp);
769 list_del_init(&dqp->q_freelist);
770 xfs_Gqm->qm_dqfrlist_cnt--;
771 }
772 }
773
774 XFS_DQHOLD(dqp);
775
776 if (flist_locked)
777 mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
778 /*
779 * move the dquot to the front of the hashchain
780 */
781 ASSERT(mutex_is_locked(&qh->qh_lock));
782 list_move(&dqp->q_hashlist, &qh->qh_list);
783 trace_xfs_dqlookup_done(dqp);
784 *O_dqpp = dqp;
785 return 0;
786 }
787 }
788
789 *O_dqpp = NULL;
790 ASSERT(mutex_is_locked(&qh->qh_lock));
791 return (1);
792}
793
794/*
795 * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
796 * a locked dquot, doing an allocation (if requested) as needed.
797 * When both an inode and an id are given, the inode's id takes precedence.
798 * That is, if the id changes while we don't hold the ilock inside this
799 * function, the new dquot is returned, not necessarily the one requested
800 * in the id argument.
801 */
802int
803xfs_qm_dqget(
804 xfs_mount_t *mp,
805 xfs_inode_t *ip, /* locked inode (optional) */
806 xfs_dqid_t id, /* uid/projid/gid depending on type */
807 uint type, /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */
808 uint flags, /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */
809 xfs_dquot_t **O_dqpp) /* OUT : locked incore dquot */
810{
811 xfs_dquot_t *dqp;
812 xfs_dqhash_t *h;
813 uint version;
814 int error;
815
816 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
817 if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) ||
818 (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) ||
819 (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) {
820 return (ESRCH);
821 }
822 h = XFS_DQ_HASH(mp, id, type);
823
824#ifdef DEBUG
825 if (xfs_do_dqerror) {
826 if ((xfs_dqerror_target == mp->m_ddev_targp) &&
827 (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) {
828 xfs_debug(mp, "Returning error in dqget");
829 return (EIO);
830 }
831 }
832#endif
833
834 again:
835
836#ifdef DEBUG
837 ASSERT(type == XFS_DQ_USER ||
838 type == XFS_DQ_PROJ ||
839 type == XFS_DQ_GROUP);
840 if (ip) {
841 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
842 if (type == XFS_DQ_USER)
843 ASSERT(ip->i_udquot == NULL);
844 else
845 ASSERT(ip->i_gdquot == NULL);
846 }
847#endif
848 mutex_lock(&h->qh_lock);
849
850 /*
851 * Look in the cache (hashtable).
852 * The chain is kept locked during lookup.
853 */
854 if (xfs_qm_dqlookup(mp, id, h, O_dqpp) == 0) {
855 XQM_STATS_INC(xqmstats.xs_qm_dqcachehits);
856 /*
857 * The dquot was found, moved to the front of the chain,
858 * taken off the freelist if it was on it, and locked
859 * at this point. Just unlock the hashchain and return.
860 */
861 ASSERT(*O_dqpp);
862 ASSERT(XFS_DQ_IS_LOCKED(*O_dqpp));
863 mutex_unlock(&h->qh_lock);
864 trace_xfs_dqget_hit(*O_dqpp);
865 return (0); /* success */
866 }
867 XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses);
868
869 /*
870 * Dquot cache miss. We don't want to keep the inode lock across
871 * a (potential) disk read. Also we don't want to deal with the lock
872 * ordering between quotainode and this inode. OTOH, dropping the inode
873 * lock here means dealing with a chown that can happen before
874 * we re-acquire the lock.
875 */
876 if (ip)
877 xfs_iunlock(ip, XFS_ILOCK_EXCL);
878 /*
879 * Save the hashchain version stamp, and unlock the chain, so that
880 * we don't keep the lock across a disk read
881 */
882 version = h->qh_version;
883 mutex_unlock(&h->qh_lock);
884
885 /*
886 * Allocate the dquot on the kernel heap, and read the ondisk
887 * portion off the disk. Also, do all the necessary initialization
888 * This can return ENOENT if dquot didn't exist on disk and we didn't
889 * ask it to allocate; ESRCH if quotas got turned off suddenly.
890 */
891 if ((error = xfs_qm_idtodq(mp, id, type,
892 flags & (XFS_QMOPT_DQALLOC|XFS_QMOPT_DQREPAIR|
893 XFS_QMOPT_DOWARN),
894 &dqp))) {
895 if (ip)
896 xfs_ilock(ip, XFS_ILOCK_EXCL);
897 return (error);
898 }
899
900 /*
901 * See if this is mount code calling to look at the overall quota limits
902 * which are stored in the id == 0 user or group's dquot.
903 * Since we may not have done a quotacheck by this point, just return
904 * the dquot without attaching it to any hashtables, lists, etc, or even
905 * taking a reference.
906 * The caller must dqdestroy this once done.
907 */
908 if (flags & XFS_QMOPT_DQSUSER) {
909 ASSERT(id == 0);
910 ASSERT(! ip);
911 goto dqret;
912 }
913
914 /*
915 * Dquot lock comes after hashlock in the lock ordering
916 */
917 if (ip) {
918 xfs_ilock(ip, XFS_ILOCK_EXCL);
919
920 /*
921 * A dquot could be attached to this inode by now, since
922 * we had dropped the ilock.
923 */
924 if (type == XFS_DQ_USER) {
925 if (!XFS_IS_UQUOTA_ON(mp)) {
926 /* inode stays locked on return */
927 xfs_qm_dqdestroy(dqp);
928 return XFS_ERROR(ESRCH);
929 }
930 if (ip->i_udquot) {
931 xfs_qm_dqdestroy(dqp);
932 dqp = ip->i_udquot;
933 xfs_dqlock(dqp);
934 goto dqret;
935 }
936 } else {
937 if (!XFS_IS_OQUOTA_ON(mp)) {
938 /* inode stays locked on return */
939 xfs_qm_dqdestroy(dqp);
940 return XFS_ERROR(ESRCH);
941 }
942 if (ip->i_gdquot) {
943 xfs_qm_dqdestroy(dqp);
944 dqp = ip->i_gdquot;
945 xfs_dqlock(dqp);
946 goto dqret;
947 }
948 }
949 }
950
951 /*
952 * Hashlock comes after ilock in lock order
953 */
954 mutex_lock(&h->qh_lock);
955 if (version != h->qh_version) {
956 xfs_dquot_t *tmpdqp;
957 /*
958 * Now, see if somebody else put the dquot in the
959 * hashtable before us. This can happen because we didn't
960 * keep the hashchain lock. We don't have to worry about
961 * lock order between the two dquots here since dqp isn't
962 * on any findable lists yet.
963 */
964 if (xfs_qm_dqlookup(mp, id, h, &tmpdqp) == 0) {
965 /*
966 * Duplicate found. Just throw away the new dquot
967 * and start over.
968 */
969 xfs_qm_dqput(tmpdqp);
970 mutex_unlock(&h->qh_lock);
971 xfs_qm_dqdestroy(dqp);
972 XQM_STATS_INC(xqmstats.xs_qm_dquot_dups);
973 goto again;
974 }
975 }
976
977 /*
978 * Put the dquot at the beginning of the hash-chain and mp's list
979 * LOCK ORDER: hashlock, freelistlock, mplistlock, udqlock, gdqlock ..
980 */
981 ASSERT(mutex_is_locked(&h->qh_lock));
982 dqp->q_hash = h;
983 list_add(&dqp->q_hashlist, &h->qh_list);
984 h->qh_version++;
985
986 /*
987 * Attach this dquot to this filesystem's list of all dquots,
988 * kept inside the mount structure in m_quotainfo field
989 */
990 mutex_lock(&mp->m_quotainfo->qi_dqlist_lock);
991
992 /*
993 * We return a locked dquot to the caller, with a reference taken
994 */
995 xfs_dqlock(dqp);
996 dqp->q_nrefs = 1;
997
998 list_add(&dqp->q_mplist, &mp->m_quotainfo->qi_dqlist);
999 mp->m_quotainfo->qi_dquots++;
1000 mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
1001 mutex_unlock(&h->qh_lock);
1002 dqret:
1003 ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
1004 trace_xfs_dqget_miss(dqp);
1005 *O_dqpp = dqp;
1006 return (0);
1007}
1008
1009
1010/*
1011 * Release a reference to the dquot (decrement ref-count)
1012 * and unlock it. If there is a group quota attached to this
1013 * dquot, carefully release that too without tripping over
1014 * deadlocks'n'stuff.
1015 */
1016void
1017xfs_qm_dqput(
1018 xfs_dquot_t *dqp)
1019{
1020 xfs_dquot_t *gdqp;
1021
1022 ASSERT(dqp->q_nrefs > 0);
1023 ASSERT(XFS_DQ_IS_LOCKED(dqp));
1024
1025 trace_xfs_dqput(dqp);
1026
1027 if (dqp->q_nrefs != 1) {
1028 dqp->q_nrefs--;
1029 xfs_dqunlock(dqp);
1030 return;
1031 }
1032
1033 /*
1034 * drop the dqlock and acquire the freelist and dqlock
1035 * in the right order; but try to get it out-of-order first
1036 */
1037 if (!mutex_trylock(&xfs_Gqm->qm_dqfrlist_lock)) {
1038 trace_xfs_dqput_wait(dqp);
1039 xfs_dqunlock(dqp);
1040 mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
1041 xfs_dqlock(dqp);
1042 }
1043
1044 while (1) {
1045 gdqp = NULL;
1046
1047 /* We can't depend on nrefs being == 1 here */
1048 if (--dqp->q_nrefs == 0) {
1049 trace_xfs_dqput_free(dqp);
1050
1051 list_add_tail(&dqp->q_freelist, &xfs_Gqm->qm_dqfrlist);
1052 xfs_Gqm->qm_dqfrlist_cnt++;
1053
1054 /*
1055 * If we just added a udquot to the freelist, then
1056 * we want to release the gdquot reference that
1057 * it (probably) has. Otherwise it'll keep the
1058 * gdquot from getting reclaimed.
1059 */
1060 if ((gdqp = dqp->q_gdquot)) {
1061 /*
1062 * Avoid a recursive dqput call
1063 */
1064 xfs_dqlock(gdqp);
1065 dqp->q_gdquot = NULL;
1066 }
1067 }
1068 xfs_dqunlock(dqp);
1069
1070 /*
1071 * If we had a group quota inside the user quota as a hint,
1072 * release it now.
1073 */
1074 if (! gdqp)
1075 break;
1076 dqp = gdqp;
1077 }
1078 mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
1079}
1080
1081/*
1082 * Release a dquot. Flush it if dirty, then dqput() it.
1083 * dquot must not be locked.
1084 */
1085void
1086xfs_qm_dqrele(
1087 xfs_dquot_t *dqp)
1088{
1089 if (!dqp)
1090 return;
1091
1092 trace_xfs_dqrele(dqp);
1093
1094 xfs_dqlock(dqp);
1095 /*
1096 * We don't care to flush it if the dquot is dirty here.
1097 * That will create stutters that we want to avoid.
1098 * Instead we do a delayed write when we try to reclaim
1099 * a dirty dquot. Also xfs_sync will take part of the burden...
1100 */
1101 xfs_qm_dqput(dqp);
1102}
1103
1104/*
1105 * This is the dquot flushing I/O completion routine. It is called
1106 * from interrupt level when the buffer containing the dquot is
1107 * flushed to disk. It is responsible for removing the dquot logitem
1108 * from the AIL if it has not been re-logged, and unlocking the dquot's
1109 * flush lock. This behavior is very similar to that of inodes..
1110 */
1111STATIC void
1112xfs_qm_dqflush_done(
1113 struct xfs_buf *bp,
1114 struct xfs_log_item *lip)
1115{
1116 xfs_dq_logitem_t *qip = (struct xfs_dq_logitem *)lip;
1117 xfs_dquot_t *dqp = qip->qli_dquot;
1118 struct xfs_ail *ailp = lip->li_ailp;
1119
1120 /*
1121 * We only want to pull the item from the AIL if its
1122 * location in the log has not changed since we started the flush.
1123 * Thus, we only bother if the dquot's lsn has
1124 * not changed. First we check the lsn outside the lock
1125 * since it's cheaper, and then we recheck while
1126 * holding the lock before removing the dquot from the AIL.
1127 */
1128 if ((lip->li_flags & XFS_LI_IN_AIL) &&
1129 lip->li_lsn == qip->qli_flush_lsn) {
1130
1131 /* xfs_trans_ail_delete() drops the AIL lock. */
1132 spin_lock(&ailp->xa_lock);
1133 if (lip->li_lsn == qip->qli_flush_lsn)
1134 xfs_trans_ail_delete(ailp, lip);
1135 else
1136 spin_unlock(&ailp->xa_lock);
1137 }
1138
1139 /*
1140 * Release the dq's flush lock since we're done with it.
1141 */
1142 xfs_dqfunlock(dqp);
1143}
1144
1145/*
1146 * Write a modified dquot to disk.
1147 * The dquot must be locked and the flush lock too taken by caller.
1148 * The flush lock will not be unlocked until the dquot reaches the disk,
1149 * but the dquot is free to be unlocked and modified by the caller
1150 * in the interim. Dquot is still locked on return. This behavior is
1151 * identical to that of inodes.
1152 */
1153int
1154xfs_qm_dqflush(
1155 xfs_dquot_t *dqp,
1156 uint flags)
1157{
1158 struct xfs_mount *mp = dqp->q_mount;
1159 struct xfs_buf *bp;
1160 struct xfs_disk_dquot *ddqp;
1161 int error;
1162
1163 ASSERT(XFS_DQ_IS_LOCKED(dqp));
1164 ASSERT(!completion_done(&dqp->q_flush));
1165
1166 trace_xfs_dqflush(dqp);
1167
1168 /*
1169 * If not dirty, or it's pinned and we are not supposed to block, nada.
1170 */
1171 if (!XFS_DQ_IS_DIRTY(dqp) ||
1172 (!(flags & SYNC_WAIT) && atomic_read(&dqp->q_pincount) > 0)) {
1173 xfs_dqfunlock(dqp);
1174 return 0;
1175 }
1176 xfs_qm_dqunpin_wait(dqp);
1177
1178 /*
1179 * This may have been unpinned because the filesystem is shutting
1180 * down forcibly. If that's the case we must not write this dquot
1181 * to disk, because the log record didn't make it to disk!
1182 */
1183 if (XFS_FORCED_SHUTDOWN(mp)) {
1184 dqp->dq_flags &= ~XFS_DQ_DIRTY;
1185 xfs_dqfunlock(dqp);
1186 return XFS_ERROR(EIO);
1187 }
1188
1189 /*
1190 * Get the buffer containing the on-disk dquot
1191 */
1192 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
1193 mp->m_quotainfo->qi_dqchunklen, 0, &bp);
1194 if (error) {
1195 ASSERT(error != ENOENT);
1196 xfs_dqfunlock(dqp);
1197 return error;
1198 }
1199
1200 /*
1201 * Calculate the location of the dquot inside the buffer.
1202 */
1203 ddqp = bp->b_addr + dqp->q_bufoffset;
1204
1205 /*
1206 * A simple sanity check in case we got a corrupted dquot..
1207 */
1208 error = xfs_qm_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
1209 XFS_QMOPT_DOWARN, "dqflush (incore copy)");
1210 if (error) {
1211 xfs_buf_relse(bp);
1212 xfs_dqfunlock(dqp);
1213 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1214 return XFS_ERROR(EIO);
1215 }
1216
1217 /* This is the only portion of data that needs to persist */
1218 memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t));
1219
1220 /*
1221 * Clear the dirty field and remember the flush lsn for later use.
1222 */
1223 dqp->dq_flags &= ~XFS_DQ_DIRTY;
1224
1225 xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
1226 &dqp->q_logitem.qli_item.li_lsn);
1227
1228 /*
1229 * Attach an iodone routine so that we can remove this dquot from the
1230 * AIL and release the flush lock once the dquot is synced to disk.
1231 */
1232 xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done,
1233 &dqp->q_logitem.qli_item);
1234
1235 /*
1236 * If the buffer is pinned then push on the log so we won't
1237 * get stuck waiting in the write for too long.
1238 */
1239 if (xfs_buf_ispinned(bp)) {
1240 trace_xfs_dqflush_force(dqp);
1241 xfs_log_force(mp, 0);
1242 }
1243
1244 if (flags & SYNC_WAIT)
1245 error = xfs_bwrite(mp, bp);
1246 else
1247 xfs_bdwrite(mp, bp);
1248
1249 trace_xfs_dqflush_done(dqp);
1250
1251 /*
1252 * dqp is still locked, but caller is free to unlock it now.
1253 */
1254 return error;
1255
1256}
1257
1258int
1259xfs_qm_dqlock_nowait(
1260 xfs_dquot_t *dqp)
1261{
1262 return mutex_trylock(&dqp->q_qlock);
1263}
1264
1265void
1266xfs_dqlock(
1267 xfs_dquot_t *dqp)
1268{
1269 mutex_lock(&dqp->q_qlock);
1270}
1271
1272void
1273xfs_dqunlock(
1274 xfs_dquot_t *dqp)
1275{
1276 mutex_unlock(&(dqp->q_qlock));
1277 if (dqp->q_logitem.qli_dquot == dqp) {
1278 /* Once was dqp->q_mount, but might just have been cleared */
1279 xfs_trans_unlocked_item(dqp->q_logitem.qli_item.li_ailp,
1280 (xfs_log_item_t*)&(dqp->q_logitem));
1281 }
1282}
1283
1284
1285void
1286xfs_dqunlock_nonotify(
1287 xfs_dquot_t *dqp)
1288{
1289 mutex_unlock(&(dqp->q_qlock));
1290}
1291
1292/*
1293 * Lock two xfs_dquot structures.
1294 *
1295 * To avoid deadlocks we always lock the quota structure with
1296 * the lowerd id first.
1297 */
1298void
1299xfs_dqlock2(
1300 xfs_dquot_t *d1,
1301 xfs_dquot_t *d2)
1302{
1303 if (d1 && d2) {
1304 ASSERT(d1 != d2);
1305 if (be32_to_cpu(d1->q_core.d_id) >
1306 be32_to_cpu(d2->q_core.d_id)) {
1307 mutex_lock(&d2->q_qlock);
1308 mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED);
1309 } else {
1310 mutex_lock(&d1->q_qlock);
1311 mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED);
1312 }
1313 } else if (d1) {
1314 mutex_lock(&d1->q_qlock);
1315 } else if (d2) {
1316 mutex_lock(&d2->q_qlock);
1317 }
1318}
1319
1320
1321/*
1322 * Take a dquot out of the mount's dqlist as well as the hashlist.
1323 * This is called via unmount as well as quotaoff, and the purge
1324 * will always succeed unless there are soft (temp) references
1325 * outstanding.
1326 *
1327 * This returns 0 if it was purged, 1 if it wasn't. It's not an error code
1328 * that we're returning! XXXsup - not cool.
1329 */
1330/* ARGSUSED */
1331int
1332xfs_qm_dqpurge(
1333 xfs_dquot_t *dqp)
1334{
1335 xfs_dqhash_t *qh = dqp->q_hash;
1336 xfs_mount_t *mp = dqp->q_mount;
1337
1338 ASSERT(mutex_is_locked(&mp->m_quotainfo->qi_dqlist_lock));
1339 ASSERT(mutex_is_locked(&dqp->q_hash->qh_lock));
1340
1341 xfs_dqlock(dqp);
1342 /*
1343 * We really can't afford to purge a dquot that is
1344 * referenced, because these are hard refs.
1345 * It shouldn't happen in general because we went thru _all_ inodes in
1346 * dqrele_all_inodes before calling this and didn't let the mountlock go.
1347 * However it is possible that we have dquots with temporary
1348 * references that are not attached to an inode. e.g. see xfs_setattr().
1349 */
1350 if (dqp->q_nrefs != 0) {
1351 xfs_dqunlock(dqp);
1352 mutex_unlock(&dqp->q_hash->qh_lock);
1353 return (1);
1354 }
1355
1356 ASSERT(!list_empty(&dqp->q_freelist));
1357
1358 /*
1359 * If we're turning off quotas, we have to make sure that, for
1360 * example, we don't delete quota disk blocks while dquots are
1361 * in the process of getting written to those disk blocks.
1362 * This dquot might well be on AIL, and we can't leave it there
1363 * if we're turning off quotas. Basically, we need this flush
1364 * lock, and are willing to block on it.
1365 */
1366 if (!xfs_dqflock_nowait(dqp)) {
1367 /*
1368 * Block on the flush lock after nudging dquot buffer,
1369 * if it is incore.
1370 */
1371 xfs_qm_dqflock_pushbuf_wait(dqp);
1372 }
1373
1374 /*
1375 * XXXIf we're turning this type of quotas off, we don't care
1376 * about the dirty metadata sitting in this dquot. OTOH, if
1377 * we're unmounting, we do care, so we flush it and wait.
1378 */
1379 if (XFS_DQ_IS_DIRTY(dqp)) {
1380 int error;
1381
1382 /* dqflush unlocks dqflock */
1383 /*
1384 * Given that dqpurge is a very rare occurrence, it is OK
1385 * that we're holding the hashlist and mplist locks
1386 * across the disk write. But, ... XXXsup
1387 *
1388 * We don't care about getting disk errors here. We need
1389 * to purge this dquot anyway, so we go ahead regardless.
1390 */
1391 error = xfs_qm_dqflush(dqp, SYNC_WAIT);
1392 if (error)
1393 xfs_warn(mp, "%s: dquot %p flush failed",
1394 __func__, dqp);
1395 xfs_dqflock(dqp);
1396 }
1397 ASSERT(atomic_read(&dqp->q_pincount) == 0);
1398 ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
1399 !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));
1400
1401 list_del_init(&dqp->q_hashlist);
1402 qh->qh_version++;
1403 list_del_init(&dqp->q_mplist);
1404 mp->m_quotainfo->qi_dqreclaims++;
1405 mp->m_quotainfo->qi_dquots--;
1406 /*
1407 * XXX Move this to the front of the freelist, if we can get the
1408 * freelist lock.
1409 */
1410 ASSERT(!list_empty(&dqp->q_freelist));
1411
1412 dqp->q_mount = NULL;
1413 dqp->q_hash = NULL;
1414 dqp->dq_flags = XFS_DQ_INACTIVE;
1415 memset(&dqp->q_core, 0, sizeof(dqp->q_core));
1416 xfs_dqfunlock(dqp);
1417 xfs_dqunlock(dqp);
1418 mutex_unlock(&qh->qh_lock);
1419 return (0);
1420}
1421
1422
1423/*
1424 * Give the buffer a little push if it is incore and
1425 * wait on the flush lock.
1426 */
1427void
1428xfs_qm_dqflock_pushbuf_wait(
1429 xfs_dquot_t *dqp)
1430{
1431 xfs_mount_t *mp = dqp->q_mount;
1432 xfs_buf_t *bp;
1433
1434 /*
1435 * Check to see if the dquot has been flushed delayed
1436 * write. If so, grab its buffer and send it
1437 * out immediately. We'll be able to acquire
1438 * the flush lock when the I/O completes.
1439 */
1440 bp = xfs_incore(mp->m_ddev_targp, dqp->q_blkno,
1441 mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
1442 if (!bp)
1443 goto out_lock;
1444
1445 if (XFS_BUF_ISDELAYWRITE(bp)) {
1446 if (xfs_buf_ispinned(bp))
1447 xfs_log_force(mp, 0);
1448 xfs_buf_delwri_promote(bp);
1449 wake_up_process(bp->b_target->bt_task);
1450 }
1451 xfs_buf_relse(bp);
1452out_lock:
1453 xfs_dqflock(dqp);
1454}
diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h
deleted file mode 100644
index 34b7e945dbfa..000000000000
--- a/fs/xfs/quota/xfs_dquot.h
+++ /dev/null
@@ -1,137 +0,0 @@
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#ifndef __XFS_DQUOT_H__
19#define __XFS_DQUOT_H__
20
21/*
22 * Dquots are structures that hold quota information about a user or a group,
23 * much like inodes are for files. In fact, dquots share many characteristics
24 * with inodes. However, dquots can also be a centralized resource, relative
25 * to a collection of inodes. In this respect, dquots share some characteristics
26 * of the superblock.
27 * XFS dquots exploit both those in its algorithms. They make every attempt
28 * to not be a bottleneck when quotas are on and have minimal impact, if any,
29 * when quotas are off.
30 */
31
32/*
33 * The hash chain headers (hash buckets)
34 */
35typedef struct xfs_dqhash {
36 struct list_head qh_list;
37 struct mutex qh_lock;
38 uint qh_version; /* ever increasing version */
39 uint qh_nelems; /* number of dquots on the list */
40} xfs_dqhash_t;
41
42struct xfs_mount;
43struct xfs_trans;
44
45/*
46 * The incore dquot structure
47 */
48typedef struct xfs_dquot {
49 uint dq_flags; /* various flags (XFS_DQ_*) */
50 struct list_head q_freelist; /* global free list of dquots */
51 struct list_head q_mplist; /* mount's list of dquots */
52 struct list_head q_hashlist; /* gloabl hash list of dquots */
53 xfs_dqhash_t *q_hash; /* the hashchain header */
54 struct xfs_mount*q_mount; /* filesystem this relates to */
55 struct xfs_trans*q_transp; /* trans this belongs to currently */
56 uint q_nrefs; /* # active refs from inodes */
57 xfs_daddr_t q_blkno; /* blkno of dquot buffer */
58 int q_bufoffset; /* off of dq in buffer (# dquots) */
59 xfs_fileoff_t q_fileoffset; /* offset in quotas file */
60
61 struct xfs_dquot*q_gdquot; /* group dquot, hint only */
62 xfs_disk_dquot_t q_core; /* actual usage & quotas */
63 xfs_dq_logitem_t q_logitem; /* dquot log item */
64 xfs_qcnt_t q_res_bcount; /* total regular nblks used+reserved */
65 xfs_qcnt_t q_res_icount; /* total inos allocd+reserved */
66 xfs_qcnt_t q_res_rtbcount;/* total realtime blks used+reserved */
67 struct mutex q_qlock; /* quota lock */
68 struct completion q_flush; /* flush completion queue */
69 atomic_t q_pincount; /* dquot pin count */
70 wait_queue_head_t q_pinwait; /* dquot pinning wait queue */
71} xfs_dquot_t;
72
73/*
74 * Lock hierarchy for q_qlock:
75 * XFS_QLOCK_NORMAL is the implicit default,
76 * XFS_QLOCK_NESTED is the dquot with the higher id in xfs_dqlock2
77 */
78enum {
79 XFS_QLOCK_NORMAL = 0,
80 XFS_QLOCK_NESTED,
81};
82
83#define XFS_DQHOLD(dqp) ((dqp)->q_nrefs++)
84
85/*
86 * Manage the q_flush completion queue embedded in the dquot. This completion
87 * queue synchronizes processes attempting to flush the in-core dquot back to
88 * disk.
89 */
90static inline void xfs_dqflock(xfs_dquot_t *dqp)
91{
92 wait_for_completion(&dqp->q_flush);
93}
94
95static inline int xfs_dqflock_nowait(xfs_dquot_t *dqp)
96{
97 return try_wait_for_completion(&dqp->q_flush);
98}
99
100static inline void xfs_dqfunlock(xfs_dquot_t *dqp)
101{
102 complete(&dqp->q_flush);
103}
104
105#define XFS_DQ_IS_LOCKED(dqp) (mutex_is_locked(&((dqp)->q_qlock)))
106#define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY)
107#define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER)
108#define XFS_QM_ISPDQ(dqp) ((dqp)->dq_flags & XFS_DQ_PROJ)
109#define XFS_QM_ISGDQ(dqp) ((dqp)->dq_flags & XFS_DQ_GROUP)
110#define XFS_DQ_TO_QINF(dqp) ((dqp)->q_mount->m_quotainfo)
111#define XFS_DQ_TO_QIP(dqp) (XFS_QM_ISUDQ(dqp) ? \
112 XFS_DQ_TO_QINF(dqp)->qi_uquotaip : \
113 XFS_DQ_TO_QINF(dqp)->qi_gquotaip)
114
115#define XFS_IS_THIS_QUOTA_OFF(d) (! (XFS_QM_ISUDQ(d) ? \
116 (XFS_IS_UQUOTA_ON((d)->q_mount)) : \
117 (XFS_IS_OQUOTA_ON((d)->q_mount))))
118
119extern void xfs_qm_dqdestroy(xfs_dquot_t *);
120extern int xfs_qm_dqflush(xfs_dquot_t *, uint);
121extern int xfs_qm_dqpurge(xfs_dquot_t *);
122extern void xfs_qm_dqunpin_wait(xfs_dquot_t *);
123extern int xfs_qm_dqlock_nowait(xfs_dquot_t *);
124extern void xfs_qm_dqflock_pushbuf_wait(xfs_dquot_t *dqp);
125extern void xfs_qm_adjust_dqtimers(xfs_mount_t *,
126 xfs_disk_dquot_t *);
127extern void xfs_qm_adjust_dqlimits(xfs_mount_t *,
128 xfs_disk_dquot_t *);
129extern int xfs_qm_dqget(xfs_mount_t *, xfs_inode_t *,
130 xfs_dqid_t, uint, uint, xfs_dquot_t **);
131extern void xfs_qm_dqput(xfs_dquot_t *);
132extern void xfs_dqlock(xfs_dquot_t *);
133extern void xfs_dqlock2(xfs_dquot_t *, xfs_dquot_t *);
134extern void xfs_dqunlock(xfs_dquot_t *);
135extern void xfs_dqunlock_nonotify(xfs_dquot_t *);
136
137#endif /* __XFS_DQUOT_H__ */
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c
deleted file mode 100644
index 9e0e2fa3f2c8..000000000000
--- a/fs/xfs/quota/xfs_dquot_item.c
+++ /dev/null
@@ -1,529 +0,0 @@
1/*
2 * Copyright (c) 2000-2003 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_bit.h"
21#include "xfs_log.h"
22#include "xfs_inum.h"
23#include "xfs_trans.h"
24#include "xfs_sb.h"
25#include "xfs_ag.h"
26#include "xfs_alloc.h"
27#include "xfs_quota.h"
28#include "xfs_mount.h"
29#include "xfs_bmap_btree.h"
30#include "xfs_inode.h"
31#include "xfs_bmap.h"
32#include "xfs_rtalloc.h"
33#include "xfs_error.h"
34#include "xfs_itable.h"
35#include "xfs_attr.h"
36#include "xfs_buf_item.h"
37#include "xfs_trans_priv.h"
38#include "xfs_qm.h"
39
40static inline struct xfs_dq_logitem *DQUOT_ITEM(struct xfs_log_item *lip)
41{
42 return container_of(lip, struct xfs_dq_logitem, qli_item);
43}
44
45/*
46 * returns the number of iovecs needed to log the given dquot item.
47 */
48STATIC uint
49xfs_qm_dquot_logitem_size(
50 struct xfs_log_item *lip)
51{
52 /*
53 * we need only two iovecs, one for the format, one for the real thing
54 */
55 return 2;
56}
57
58/*
59 * fills in the vector of log iovecs for the given dquot log item.
60 */
61STATIC void
62xfs_qm_dquot_logitem_format(
63 struct xfs_log_item *lip,
64 struct xfs_log_iovec *logvec)
65{
66 struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
67
68 logvec->i_addr = &qlip->qli_format;
69 logvec->i_len = sizeof(xfs_dq_logformat_t);
70 logvec->i_type = XLOG_REG_TYPE_QFORMAT;
71 logvec++;
72 logvec->i_addr = &qlip->qli_dquot->q_core;
73 logvec->i_len = sizeof(xfs_disk_dquot_t);
74 logvec->i_type = XLOG_REG_TYPE_DQUOT;
75
76 ASSERT(2 == lip->li_desc->lid_size);
77 qlip->qli_format.qlf_size = 2;
78
79}
80
81/*
82 * Increment the pin count of the given dquot.
83 */
84STATIC void
85xfs_qm_dquot_logitem_pin(
86 struct xfs_log_item *lip)
87{
88 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
89
90 ASSERT(XFS_DQ_IS_LOCKED(dqp));
91 atomic_inc(&dqp->q_pincount);
92}
93
94/*
95 * Decrement the pin count of the given dquot, and wake up
96 * anyone in xfs_dqwait_unpin() if the count goes to 0. The
97 * dquot must have been previously pinned with a call to
98 * xfs_qm_dquot_logitem_pin().
99 */
100STATIC void
101xfs_qm_dquot_logitem_unpin(
102 struct xfs_log_item *lip,
103 int remove)
104{
105 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
106
107 ASSERT(atomic_read(&dqp->q_pincount) > 0);
108 if (atomic_dec_and_test(&dqp->q_pincount))
109 wake_up(&dqp->q_pinwait);
110}
111
112/*
113 * Given the logitem, this writes the corresponding dquot entry to disk
114 * asynchronously. This is called with the dquot entry securely locked;
115 * we simply get xfs_qm_dqflush() to do the work, and unlock the dquot
116 * at the end.
117 */
118STATIC void
119xfs_qm_dquot_logitem_push(
120 struct xfs_log_item *lip)
121{
122 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
123 int error;
124
125 ASSERT(XFS_DQ_IS_LOCKED(dqp));
126 ASSERT(!completion_done(&dqp->q_flush));
127
128 /*
129 * Since we were able to lock the dquot's flush lock and
130 * we found it on the AIL, the dquot must be dirty. This
131 * is because the dquot is removed from the AIL while still
132 * holding the flush lock in xfs_dqflush_done(). Thus, if
133 * we found it in the AIL and were able to obtain the flush
134 * lock without sleeping, then there must not have been
135 * anyone in the process of flushing the dquot.
136 */
137 error = xfs_qm_dqflush(dqp, 0);
138 if (error)
139 xfs_warn(dqp->q_mount, "%s: push error %d on dqp %p",
140 __func__, error, dqp);
141 xfs_dqunlock(dqp);
142}
143
144STATIC xfs_lsn_t
145xfs_qm_dquot_logitem_committed(
146 struct xfs_log_item *lip,
147 xfs_lsn_t lsn)
148{
149 /*
150 * We always re-log the entire dquot when it becomes dirty,
151 * so, the latest copy _is_ the only one that matters.
152 */
153 return lsn;
154}
155
156/*
157 * This is called to wait for the given dquot to be unpinned.
158 * Most of these pin/unpin routines are plagiarized from inode code.
159 */
160void
161xfs_qm_dqunpin_wait(
162 struct xfs_dquot *dqp)
163{
164 ASSERT(XFS_DQ_IS_LOCKED(dqp));
165 if (atomic_read(&dqp->q_pincount) == 0)
166 return;
167
168 /*
169 * Give the log a push so we don't wait here too long.
170 */
171 xfs_log_force(dqp->q_mount, 0);
172 wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0));
173}
174
175/*
176 * This is called when IOP_TRYLOCK returns XFS_ITEM_PUSHBUF to indicate that
177 * the dquot is locked by us, but the flush lock isn't. So, here we are
178 * going to see if the relevant dquot buffer is incore, waiting on DELWRI.
179 * If so, we want to push it out to help us take this item off the AIL as soon
180 * as possible.
181 *
182 * We must not be holding the AIL lock at this point. Calling incore() to
183 * search the buffer cache can be a time consuming thing, and AIL lock is a
184 * spinlock.
185 */
186STATIC void
187xfs_qm_dquot_logitem_pushbuf(
188 struct xfs_log_item *lip)
189{
190 struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
191 struct xfs_dquot *dqp = qlip->qli_dquot;
192 struct xfs_buf *bp;
193
194 ASSERT(XFS_DQ_IS_LOCKED(dqp));
195
196 /*
197 * If flushlock isn't locked anymore, chances are that the
198 * inode flush completed and the inode was taken off the AIL.
199 * So, just get out.
200 */
201 if (completion_done(&dqp->q_flush) ||
202 !(lip->li_flags & XFS_LI_IN_AIL)) {
203 xfs_dqunlock(dqp);
204 return;
205 }
206
207 bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno,
208 dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
209 xfs_dqunlock(dqp);
210 if (!bp)
211 return;
212 if (XFS_BUF_ISDELAYWRITE(bp))
213 xfs_buf_delwri_promote(bp);
214 xfs_buf_relse(bp);
215}
216
217/*
218 * This is called to attempt to lock the dquot associated with this
219 * dquot log item. Don't sleep on the dquot lock or the flush lock.
220 * If the flush lock is already held, indicating that the dquot has
221 * been or is in the process of being flushed, then see if we can
222 * find the dquot's buffer in the buffer cache without sleeping. If
223 * we can and it is marked delayed write, then we want to send it out.
224 * We delay doing so until the push routine, though, to avoid sleeping
225 * in any device strategy routines.
226 */
227STATIC uint
228xfs_qm_dquot_logitem_trylock(
229 struct xfs_log_item *lip)
230{
231 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
232
233 if (atomic_read(&dqp->q_pincount) > 0)
234 return XFS_ITEM_PINNED;
235
236 if (!xfs_qm_dqlock_nowait(dqp))
237 return XFS_ITEM_LOCKED;
238
239 if (!xfs_dqflock_nowait(dqp)) {
240 /*
241 * dquot has already been flushed to the backing buffer,
242 * leave it locked, pushbuf routine will unlock it.
243 */
244 return XFS_ITEM_PUSHBUF;
245 }
246
247 ASSERT(lip->li_flags & XFS_LI_IN_AIL);
248 return XFS_ITEM_SUCCESS;
249}
250
251/*
252 * Unlock the dquot associated with the log item.
253 * Clear the fields of the dquot and dquot log item that
254 * are specific to the current transaction. If the
255 * hold flags is set, do not unlock the dquot.
256 */
257STATIC void
258xfs_qm_dquot_logitem_unlock(
259 struct xfs_log_item *lip)
260{
261 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
262
263 ASSERT(XFS_DQ_IS_LOCKED(dqp));
264
265 /*
266 * Clear the transaction pointer in the dquot
267 */
268 dqp->q_transp = NULL;
269
270 /*
271 * dquots are never 'held' from getting unlocked at the end of
272 * a transaction. Their locking and unlocking is hidden inside the
273 * transaction layer, within trans_commit. Hence, no LI_HOLD flag
274 * for the logitem.
275 */
276 xfs_dqunlock(dqp);
277}
278
279/*
280 * this needs to stamp an lsn into the dquot, I think.
281 * rpc's that look at user dquot's would then have to
282 * push on the dependency recorded in the dquot
283 */
284STATIC void
285xfs_qm_dquot_logitem_committing(
286 struct xfs_log_item *lip,
287 xfs_lsn_t lsn)
288{
289}
290
291/*
292 * This is the ops vector for dquots
293 */
294static struct xfs_item_ops xfs_dquot_item_ops = {
295 .iop_size = xfs_qm_dquot_logitem_size,
296 .iop_format = xfs_qm_dquot_logitem_format,
297 .iop_pin = xfs_qm_dquot_logitem_pin,
298 .iop_unpin = xfs_qm_dquot_logitem_unpin,
299 .iop_trylock = xfs_qm_dquot_logitem_trylock,
300 .iop_unlock = xfs_qm_dquot_logitem_unlock,
301 .iop_committed = xfs_qm_dquot_logitem_committed,
302 .iop_push = xfs_qm_dquot_logitem_push,
303 .iop_pushbuf = xfs_qm_dquot_logitem_pushbuf,
304 .iop_committing = xfs_qm_dquot_logitem_committing
305};
306
307/*
308 * Initialize the dquot log item for a newly allocated dquot.
309 * The dquot isn't locked at this point, but it isn't on any of the lists
310 * either, so we don't care.
311 */
312void
313xfs_qm_dquot_logitem_init(
314 struct xfs_dquot *dqp)
315{
316 struct xfs_dq_logitem *lp = &dqp->q_logitem;
317
318 xfs_log_item_init(dqp->q_mount, &lp->qli_item, XFS_LI_DQUOT,
319 &xfs_dquot_item_ops);
320 lp->qli_dquot = dqp;
321 lp->qli_format.qlf_type = XFS_LI_DQUOT;
322 lp->qli_format.qlf_id = be32_to_cpu(dqp->q_core.d_id);
323 lp->qli_format.qlf_blkno = dqp->q_blkno;
324 lp->qli_format.qlf_len = 1;
325 /*
326 * This is just the offset of this dquot within its buffer
327 * (which is currently 1 FSB and probably won't change).
328 * Hence 32 bits for this offset should be just fine.
329 * Alternatively, we can store (bufoffset / sizeof(xfs_dqblk_t))
330 * here, and recompute it at recovery time.
331 */
332 lp->qli_format.qlf_boffset = (__uint32_t)dqp->q_bufoffset;
333}
334
335/*------------------ QUOTAOFF LOG ITEMS -------------------*/
336
337static inline struct xfs_qoff_logitem *QOFF_ITEM(struct xfs_log_item *lip)
338{
339 return container_of(lip, struct xfs_qoff_logitem, qql_item);
340}
341
342
343/*
344 * This returns the number of iovecs needed to log the given quotaoff item.
345 * We only need 1 iovec for an quotaoff item. It just logs the
346 * quotaoff_log_format structure.
347 */
348STATIC uint
349xfs_qm_qoff_logitem_size(
350 struct xfs_log_item *lip)
351{
352 return 1;
353}
354
355/*
356 * This is called to fill in the vector of log iovecs for the
357 * given quotaoff log item. We use only 1 iovec, and we point that
358 * at the quotaoff_log_format structure embedded in the quotaoff item.
359 * It is at this point that we assert that all of the extent
360 * slots in the quotaoff item have been filled.
361 */
362STATIC void
363xfs_qm_qoff_logitem_format(
364 struct xfs_log_item *lip,
365 struct xfs_log_iovec *log_vector)
366{
367 struct xfs_qoff_logitem *qflip = QOFF_ITEM(lip);
368
369 ASSERT(qflip->qql_format.qf_type == XFS_LI_QUOTAOFF);
370
371 log_vector->i_addr = &qflip->qql_format;
372 log_vector->i_len = sizeof(xfs_qoff_logitem_t);
373 log_vector->i_type = XLOG_REG_TYPE_QUOTAOFF;
374 qflip->qql_format.qf_size = 1;
375}
376
377/*
378 * Pinning has no meaning for an quotaoff item, so just return.
379 */
380STATIC void
381xfs_qm_qoff_logitem_pin(
382 struct xfs_log_item *lip)
383{
384}
385
386/*
387 * Since pinning has no meaning for an quotaoff item, unpinning does
388 * not either.
389 */
390STATIC void
391xfs_qm_qoff_logitem_unpin(
392 struct xfs_log_item *lip,
393 int remove)
394{
395}
396
397/*
398 * Quotaoff items have no locking, so just return success.
399 */
400STATIC uint
401xfs_qm_qoff_logitem_trylock(
402 struct xfs_log_item *lip)
403{
404 return XFS_ITEM_LOCKED;
405}
406
407/*
408 * Quotaoff items have no locking or pushing, so return failure
409 * so that the caller doesn't bother with us.
410 */
411STATIC void
412xfs_qm_qoff_logitem_unlock(
413 struct xfs_log_item *lip)
414{
415}
416
417/*
418 * The quotaoff-start-item is logged only once and cannot be moved in the log,
419 * so simply return the lsn at which it's been logged.
420 */
421STATIC xfs_lsn_t
422xfs_qm_qoff_logitem_committed(
423 struct xfs_log_item *lip,
424 xfs_lsn_t lsn)
425{
426 return lsn;
427}
428
429/*
430 * There isn't much you can do to push on an quotaoff item. It is simply
431 * stuck waiting for the log to be flushed to disk.
432 */
433STATIC void
434xfs_qm_qoff_logitem_push(
435 struct xfs_log_item *lip)
436{
437}
438
439
440STATIC xfs_lsn_t
441xfs_qm_qoffend_logitem_committed(
442 struct xfs_log_item *lip,
443 xfs_lsn_t lsn)
444{
445 struct xfs_qoff_logitem *qfe = QOFF_ITEM(lip);
446 struct xfs_qoff_logitem *qfs = qfe->qql_start_lip;
447 struct xfs_ail *ailp = qfs->qql_item.li_ailp;
448
449 /*
450 * Delete the qoff-start logitem from the AIL.
451 * xfs_trans_ail_delete() drops the AIL lock.
452 */
453 spin_lock(&ailp->xa_lock);
454 xfs_trans_ail_delete(ailp, (xfs_log_item_t *)qfs);
455
456 kmem_free(qfs);
457 kmem_free(qfe);
458 return (xfs_lsn_t)-1;
459}
460
461/*
462 * XXX rcc - don't know quite what to do with this. I think we can
463 * just ignore it. The only time that isn't the case is if we allow
464 * the client to somehow see that quotas have been turned off in which
465 * we can't allow that to get back until the quotaoff hits the disk.
466 * So how would that happen? Also, do we need different routines for
467 * quotaoff start and quotaoff end? I suspect the answer is yes but
468 * to be sure, I need to look at the recovery code and see how quota off
469 * recovery is handled (do we roll forward or back or do something else).
470 * If we roll forwards or backwards, then we need two separate routines,
471 * one that does nothing and one that stamps in the lsn that matters
472 * (truly makes the quotaoff irrevocable). If we do something else,
473 * then maybe we don't need two.
474 */
475STATIC void
476xfs_qm_qoff_logitem_committing(
477 struct xfs_log_item *lip,
478 xfs_lsn_t commit_lsn)
479{
480}
481
482static struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
483 .iop_size = xfs_qm_qoff_logitem_size,
484 .iop_format = xfs_qm_qoff_logitem_format,
485 .iop_pin = xfs_qm_qoff_logitem_pin,
486 .iop_unpin = xfs_qm_qoff_logitem_unpin,
487 .iop_trylock = xfs_qm_qoff_logitem_trylock,
488 .iop_unlock = xfs_qm_qoff_logitem_unlock,
489 .iop_committed = xfs_qm_qoffend_logitem_committed,
490 .iop_push = xfs_qm_qoff_logitem_push,
491 .iop_committing = xfs_qm_qoff_logitem_committing
492};
493
494/*
495 * This is the ops vector shared by all quotaoff-start log items.
496 */
497static struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
498 .iop_size = xfs_qm_qoff_logitem_size,
499 .iop_format = xfs_qm_qoff_logitem_format,
500 .iop_pin = xfs_qm_qoff_logitem_pin,
501 .iop_unpin = xfs_qm_qoff_logitem_unpin,
502 .iop_trylock = xfs_qm_qoff_logitem_trylock,
503 .iop_unlock = xfs_qm_qoff_logitem_unlock,
504 .iop_committed = xfs_qm_qoff_logitem_committed,
505 .iop_push = xfs_qm_qoff_logitem_push,
506 .iop_committing = xfs_qm_qoff_logitem_committing
507};
508
509/*
510 * Allocate and initialize an quotaoff item of the correct quota type(s).
511 */
512struct xfs_qoff_logitem *
513xfs_qm_qoff_logitem_init(
514 struct xfs_mount *mp,
515 struct xfs_qoff_logitem *start,
516 uint flags)
517{
518 struct xfs_qoff_logitem *qf;
519
520 qf = kmem_zalloc(sizeof(struct xfs_qoff_logitem), KM_SLEEP);
521
522 xfs_log_item_init(mp, &qf->qql_item, XFS_LI_QUOTAOFF, start ?
523 &xfs_qm_qoffend_logitem_ops : &xfs_qm_qoff_logitem_ops);
524 qf->qql_item.li_mountp = mp;
525 qf->qql_format.qf_type = XFS_LI_QUOTAOFF;
526 qf->qql_format.qf_flags = flags;
527 qf->qql_start_lip = start;
528 return qf;
529}
diff --git a/fs/xfs/quota/xfs_dquot_item.h b/fs/xfs/quota/xfs_dquot_item.h
deleted file mode 100644
index 5acae2ada70b..000000000000
--- a/fs/xfs/quota/xfs_dquot_item.h
+++ /dev/null
@@ -1,48 +0,0 @@
1/*
2 * Copyright (c) 2000-2003 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#ifndef __XFS_DQUOT_ITEM_H__
19#define __XFS_DQUOT_ITEM_H__
20
21struct xfs_dquot;
22struct xfs_trans;
23struct xfs_mount;
24struct xfs_qoff_logitem;
25
26typedef struct xfs_dq_logitem {
27 xfs_log_item_t qli_item; /* common portion */
28 struct xfs_dquot *qli_dquot; /* dquot ptr */
29 xfs_lsn_t qli_flush_lsn; /* lsn at last flush */
30 xfs_dq_logformat_t qli_format; /* logged structure */
31} xfs_dq_logitem_t;
32
33typedef struct xfs_qoff_logitem {
34 xfs_log_item_t qql_item; /* common portion */
35 struct xfs_qoff_logitem *qql_start_lip; /* qoff-start logitem, if any */
36 xfs_qoff_logformat_t qql_format; /* logged structure */
37} xfs_qoff_logitem_t;
38
39
40extern void xfs_qm_dquot_logitem_init(struct xfs_dquot *);
41extern xfs_qoff_logitem_t *xfs_qm_qoff_logitem_init(struct xfs_mount *,
42 struct xfs_qoff_logitem *, uint);
43extern xfs_qoff_logitem_t *xfs_trans_get_qoff_item(struct xfs_trans *,
44 struct xfs_qoff_logitem *, uint);
45extern void xfs_trans_log_quotaoff_item(struct xfs_trans *,
46 struct xfs_qoff_logitem *);
47
48#endif /* __XFS_DQUOT_ITEM_H__ */
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
deleted file mode 100644
index 9a0aa76facdf..000000000000
--- a/fs/xfs/quota/xfs_qm.c
+++ /dev/null
@@ -1,2416 +0,0 @@
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_bit.h"
21#include "xfs_log.h"
22#include "xfs_inum.h"
23#include "xfs_trans.h"
24#include "xfs_sb.h"
25#include "xfs_ag.h"
26#include "xfs_alloc.h"
27#include "xfs_quota.h"
28#include "xfs_mount.h"
29#include "xfs_bmap_btree.h"
30#include "xfs_ialloc_btree.h"
31#include "xfs_dinode.h"
32#include "xfs_inode.h"
33#include "xfs_ialloc.h"
34#include "xfs_itable.h"
35#include "xfs_rtalloc.h"
36#include "xfs_error.h"
37#include "xfs_bmap.h"
38#include "xfs_attr.h"
39#include "xfs_buf_item.h"
40#include "xfs_trans_space.h"
41#include "xfs_utils.h"
42#include "xfs_qm.h"
43#include "xfs_trace.h"
44
45/*
46 * The global quota manager. There is only one of these for the entire
47 * system, _not_ one per file system. XQM keeps track of the overall
48 * quota functionality, including maintaining the freelist and hash
49 * tables of dquots.
50 */
51struct mutex xfs_Gqm_lock;
52struct xfs_qm *xfs_Gqm;
53uint ndquot;
54
55kmem_zone_t *qm_dqzone;
56kmem_zone_t *qm_dqtrxzone;
57
58STATIC void xfs_qm_list_init(xfs_dqlist_t *, char *, int);
59STATIC void xfs_qm_list_destroy(xfs_dqlist_t *);
60
61STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
62STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
63STATIC int xfs_qm_shake(struct shrinker *, struct shrink_control *);
64
65static struct shrinker xfs_qm_shaker = {
66 .shrink = xfs_qm_shake,
67 .seeks = DEFAULT_SEEKS,
68};
69
70/*
71 * Initialize the XQM structure.
72 * Note that there is not one quota manager per file system.
73 */
74STATIC struct xfs_qm *
75xfs_Gqm_init(void)
76{
77 xfs_dqhash_t *udqhash, *gdqhash;
78 xfs_qm_t *xqm;
79 size_t hsize;
80 uint i;
81
82 /*
83 * Initialize the dquot hash tables.
84 */
85 udqhash = kmem_zalloc_greedy(&hsize,
86 XFS_QM_HASHSIZE_LOW * sizeof(xfs_dqhash_t),
87 XFS_QM_HASHSIZE_HIGH * sizeof(xfs_dqhash_t));
88 if (!udqhash)
89 goto out;
90
91 gdqhash = kmem_zalloc_large(hsize);
92 if (!gdqhash)
93 goto out_free_udqhash;
94
95 hsize /= sizeof(xfs_dqhash_t);
96 ndquot = hsize << 8;
97
98 xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP);
99 xqm->qm_dqhashmask = hsize - 1;
100 xqm->qm_usr_dqhtable = udqhash;
101 xqm->qm_grp_dqhtable = gdqhash;
102 ASSERT(xqm->qm_usr_dqhtable != NULL);
103 ASSERT(xqm->qm_grp_dqhtable != NULL);
104
105 for (i = 0; i < hsize; i++) {
106 xfs_qm_list_init(&(xqm->qm_usr_dqhtable[i]), "uxdqh", i);
107 xfs_qm_list_init(&(xqm->qm_grp_dqhtable[i]), "gxdqh", i);
108 }
109
110 /*
111 * Freelist of all dquots of all file systems
112 */
113 INIT_LIST_HEAD(&xqm->qm_dqfrlist);
114 xqm->qm_dqfrlist_cnt = 0;
115 mutex_init(&xqm->qm_dqfrlist_lock);
116
117 /*
118 * dquot zone. we register our own low-memory callback.
119 */
120 if (!qm_dqzone) {
121 xqm->qm_dqzone = kmem_zone_init(sizeof(xfs_dquot_t),
122 "xfs_dquots");
123 qm_dqzone = xqm->qm_dqzone;
124 } else
125 xqm->qm_dqzone = qm_dqzone;
126
127 register_shrinker(&xfs_qm_shaker);
128
129 /*
130 * The t_dqinfo portion of transactions.
131 */
132 if (!qm_dqtrxzone) {
133 xqm->qm_dqtrxzone = kmem_zone_init(sizeof(xfs_dquot_acct_t),
134 "xfs_dqtrx");
135 qm_dqtrxzone = xqm->qm_dqtrxzone;
136 } else
137 xqm->qm_dqtrxzone = qm_dqtrxzone;
138
139 atomic_set(&xqm->qm_totaldquots, 0);
140 xqm->qm_dqfree_ratio = XFS_QM_DQFREE_RATIO;
141 xqm->qm_nrefs = 0;
142 return xqm;
143
144 out_free_udqhash:
145 kmem_free_large(udqhash);
146 out:
147 return NULL;
148}
149
150/*
151 * Destroy the global quota manager when its reference count goes to zero.
152 */
153STATIC void
154xfs_qm_destroy(
155 struct xfs_qm *xqm)
156{
157 struct xfs_dquot *dqp, *n;
158 int hsize, i;
159
160 ASSERT(xqm != NULL);
161 ASSERT(xqm->qm_nrefs == 0);
162 unregister_shrinker(&xfs_qm_shaker);
163 hsize = xqm->qm_dqhashmask + 1;
164 for (i = 0; i < hsize; i++) {
165 xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i]));
166 xfs_qm_list_destroy(&(xqm->qm_grp_dqhtable[i]));
167 }
168 kmem_free_large(xqm->qm_usr_dqhtable);
169 kmem_free_large(xqm->qm_grp_dqhtable);
170 xqm->qm_usr_dqhtable = NULL;
171 xqm->qm_grp_dqhtable = NULL;
172 xqm->qm_dqhashmask = 0;
173
174 /* frlist cleanup */
175 mutex_lock(&xqm->qm_dqfrlist_lock);
176 list_for_each_entry_safe(dqp, n, &xqm->qm_dqfrlist, q_freelist) {
177 xfs_dqlock(dqp);
178 list_del_init(&dqp->q_freelist);
179 xfs_Gqm->qm_dqfrlist_cnt--;
180 xfs_dqunlock(dqp);
181 xfs_qm_dqdestroy(dqp);
182 }
183 mutex_unlock(&xqm->qm_dqfrlist_lock);
184 mutex_destroy(&xqm->qm_dqfrlist_lock);
185 kmem_free(xqm);
186}
187
188/*
189 * Called at mount time to let XQM know that another file system is
190 * starting quotas. This isn't crucial information as the individual mount
191 * structures are pretty independent, but it helps the XQM keep a
192 * global view of what's going on.
193 */
194/* ARGSUSED */
195STATIC int
196xfs_qm_hold_quotafs_ref(
197 struct xfs_mount *mp)
198{
199 /*
200 * Need to lock the xfs_Gqm structure for things like this. For example,
201 * the structure could disappear between the entry to this routine and
202 * a HOLD operation if not locked.
203 */
204 mutex_lock(&xfs_Gqm_lock);
205
206 if (!xfs_Gqm) {
207 xfs_Gqm = xfs_Gqm_init();
208 if (!xfs_Gqm) {
209 mutex_unlock(&xfs_Gqm_lock);
210 return ENOMEM;
211 }
212 }
213
214 /*
215 * We can keep a list of all filesystems with quotas mounted for
216 * debugging and statistical purposes, but ...
217 * Just take a reference and get out.
218 */
219 xfs_Gqm->qm_nrefs++;
220 mutex_unlock(&xfs_Gqm_lock);
221
222 return 0;
223}
224
225
226/*
227 * Release the reference that a filesystem took at mount time,
228 * so that we know when we need to destroy the entire quota manager.
229 */
230/* ARGSUSED */
231STATIC void
232xfs_qm_rele_quotafs_ref(
233 struct xfs_mount *mp)
234{
235 xfs_dquot_t *dqp, *n;
236
237 ASSERT(xfs_Gqm);
238 ASSERT(xfs_Gqm->qm_nrefs > 0);
239
240 /*
241 * Go thru the freelist and destroy all inactive dquots.
242 */
243 mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
244
245 list_for_each_entry_safe(dqp, n, &xfs_Gqm->qm_dqfrlist, q_freelist) {
246 xfs_dqlock(dqp);
247 if (dqp->dq_flags & XFS_DQ_INACTIVE) {
248 ASSERT(dqp->q_mount == NULL);
249 ASSERT(! XFS_DQ_IS_DIRTY(dqp));
250 ASSERT(list_empty(&dqp->q_hashlist));
251 ASSERT(list_empty(&dqp->q_mplist));
252 list_del_init(&dqp->q_freelist);
253 xfs_Gqm->qm_dqfrlist_cnt--;
254 xfs_dqunlock(dqp);
255 xfs_qm_dqdestroy(dqp);
256 } else {
257 xfs_dqunlock(dqp);
258 }
259 }
260 mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
261
262 /*
263 * Destroy the entire XQM. If somebody mounts with quotaon, this'll
264 * be restarted.
265 */
266 mutex_lock(&xfs_Gqm_lock);
267 if (--xfs_Gqm->qm_nrefs == 0) {
268 xfs_qm_destroy(xfs_Gqm);
269 xfs_Gqm = NULL;
270 }
271 mutex_unlock(&xfs_Gqm_lock);
272}
273
274/*
275 * Just destroy the quotainfo structure.
276 */
277void
278xfs_qm_unmount(
279 struct xfs_mount *mp)
280{
281 if (mp->m_quotainfo) {
282 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
283 xfs_qm_destroy_quotainfo(mp);
284 }
285}
286
287
288/*
289 * This is called from xfs_mountfs to start quotas and initialize all
290 * necessary data structures like quotainfo. This is also responsible for
291 * running a quotacheck as necessary. We are guaranteed that the superblock
292 * is consistently read in at this point.
293 *
294 * If we fail here, the mount will continue with quota turned off. We don't
295 * need to inidicate success or failure at all.
296 */
297void
298xfs_qm_mount_quotas(
299 xfs_mount_t *mp)
300{
301 int error = 0;
302 uint sbf;
303
304 /*
305 * If quotas on realtime volumes is not supported, we disable
306 * quotas immediately.
307 */
308 if (mp->m_sb.sb_rextents) {
309 xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
310 mp->m_qflags = 0;
311 goto write_changes;
312 }
313
314 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
315
316 /*
317 * Allocate the quotainfo structure inside the mount struct, and
318 * create quotainode(s), and change/rev superblock if necessary.
319 */
320 error = xfs_qm_init_quotainfo(mp);
321 if (error) {
322 /*
323 * We must turn off quotas.
324 */
325 ASSERT(mp->m_quotainfo == NULL);
326 mp->m_qflags = 0;
327 goto write_changes;
328 }
329 /*
330 * If any of the quotas are not consistent, do a quotacheck.
331 */
332 if (XFS_QM_NEED_QUOTACHECK(mp)) {
333 error = xfs_qm_quotacheck(mp);
334 if (error) {
335 /* Quotacheck failed and disabled quotas. */
336 return;
337 }
338 }
339 /*
340 * If one type of quotas is off, then it will lose its
341 * quotachecked status, since we won't be doing accounting for
342 * that type anymore.
343 */
344 if (!XFS_IS_UQUOTA_ON(mp))
345 mp->m_qflags &= ~XFS_UQUOTA_CHKD;
346 if (!(XFS_IS_GQUOTA_ON(mp) || XFS_IS_PQUOTA_ON(mp)))
347 mp->m_qflags &= ~XFS_OQUOTA_CHKD;
348
349 write_changes:
350 /*
351 * We actually don't have to acquire the m_sb_lock at all.
352 * This can only be called from mount, and that's single threaded. XXX
353 */
354 spin_lock(&mp->m_sb_lock);
355 sbf = mp->m_sb.sb_qflags;
356 mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
357 spin_unlock(&mp->m_sb_lock);
358
359 if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
360 if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) {
361 /*
362 * We could only have been turning quotas off.
363 * We aren't in very good shape actually because
364 * the incore structures are convinced that quotas are
365 * off, but the on disk superblock doesn't know that !
366 */
367 ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
368 xfs_alert(mp, "%s: Superblock update failed!",
369 __func__);
370 }
371 }
372
373 if (error) {
374 xfs_warn(mp, "Failed to initialize disk quotas.");
375 return;
376 }
377}
378
379/*
380 * Called from the vfsops layer.
381 */
382void
383xfs_qm_unmount_quotas(
384 xfs_mount_t *mp)
385{
386 /*
387 * Release the dquots that root inode, et al might be holding,
388 * before we flush quotas and blow away the quotainfo structure.
389 */
390 ASSERT(mp->m_rootip);
391 xfs_qm_dqdetach(mp->m_rootip);
392 if (mp->m_rbmip)
393 xfs_qm_dqdetach(mp->m_rbmip);
394 if (mp->m_rsumip)
395 xfs_qm_dqdetach(mp->m_rsumip);
396
397 /*
398 * Release the quota inodes.
399 */
400 if (mp->m_quotainfo) {
401 if (mp->m_quotainfo->qi_uquotaip) {
402 IRELE(mp->m_quotainfo->qi_uquotaip);
403 mp->m_quotainfo->qi_uquotaip = NULL;
404 }
405 if (mp->m_quotainfo->qi_gquotaip) {
406 IRELE(mp->m_quotainfo->qi_gquotaip);
407 mp->m_quotainfo->qi_gquotaip = NULL;
408 }
409 }
410}
411
412/*
413 * Flush all dquots of the given file system to disk. The dquots are
414 * _not_ purged from memory here, just their data written to disk.
415 */
416STATIC int
417xfs_qm_dqflush_all(
418 struct xfs_mount *mp,
419 int sync_mode)
420{
421 struct xfs_quotainfo *q = mp->m_quotainfo;
422 int recl;
423 struct xfs_dquot *dqp;
424 int error;
425
426 if (!q)
427 return 0;
428again:
429 mutex_lock(&q->qi_dqlist_lock);
430 list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
431 xfs_dqlock(dqp);
432 if (! XFS_DQ_IS_DIRTY(dqp)) {
433 xfs_dqunlock(dqp);
434 continue;
435 }
436
437 /* XXX a sentinel would be better */
438 recl = q->qi_dqreclaims;
439 if (!xfs_dqflock_nowait(dqp)) {
440 /*
441 * If we can't grab the flush lock then check
442 * to see if the dquot has been flushed delayed
443 * write. If so, grab its buffer and send it
444 * out immediately. We'll be able to acquire
445 * the flush lock when the I/O completes.
446 */
447 xfs_qm_dqflock_pushbuf_wait(dqp);
448 }
449 /*
450 * Let go of the mplist lock. We don't want to hold it
451 * across a disk write.
452 */
453 mutex_unlock(&q->qi_dqlist_lock);
454 error = xfs_qm_dqflush(dqp, sync_mode);
455 xfs_dqunlock(dqp);
456 if (error)
457 return error;
458
459 mutex_lock(&q->qi_dqlist_lock);
460 if (recl != q->qi_dqreclaims) {
461 mutex_unlock(&q->qi_dqlist_lock);
462 /* XXX restart limit */
463 goto again;
464 }
465 }
466
467 mutex_unlock(&q->qi_dqlist_lock);
468 /* return ! busy */
469 return 0;
470}
471/*
472 * Release the group dquot pointers the user dquots may be
473 * carrying around as a hint. mplist is locked on entry and exit.
474 */
475STATIC void
476xfs_qm_detach_gdquots(
477 struct xfs_mount *mp)
478{
479 struct xfs_quotainfo *q = mp->m_quotainfo;
480 struct xfs_dquot *dqp, *gdqp;
481 int nrecl;
482
483 again:
484 ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
485 list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
486 xfs_dqlock(dqp);
487 if ((gdqp = dqp->q_gdquot)) {
488 xfs_dqlock(gdqp);
489 dqp->q_gdquot = NULL;
490 }
491 xfs_dqunlock(dqp);
492
493 if (gdqp) {
494 /*
495 * Can't hold the mplist lock across a dqput.
496 * XXXmust convert to marker based iterations here.
497 */
498 nrecl = q->qi_dqreclaims;
499 mutex_unlock(&q->qi_dqlist_lock);
500 xfs_qm_dqput(gdqp);
501
502 mutex_lock(&q->qi_dqlist_lock);
503 if (nrecl != q->qi_dqreclaims)
504 goto again;
505 }
506 }
507}
508
509/*
510 * Go through all the incore dquots of this file system and take them
511 * off the mplist and hashlist, if the dquot type matches the dqtype
512 * parameter. This is used when turning off quota accounting for
513 * users and/or groups, as well as when the filesystem is unmounting.
514 */
515STATIC int
516xfs_qm_dqpurge_int(
517 struct xfs_mount *mp,
518 uint flags)
519{
520 struct xfs_quotainfo *q = mp->m_quotainfo;
521 struct xfs_dquot *dqp, *n;
522 uint dqtype;
523 int nrecl;
524 int nmisses;
525
526 if (!q)
527 return 0;
528
529 dqtype = (flags & XFS_QMOPT_UQUOTA) ? XFS_DQ_USER : 0;
530 dqtype |= (flags & XFS_QMOPT_PQUOTA) ? XFS_DQ_PROJ : 0;
531 dqtype |= (flags & XFS_QMOPT_GQUOTA) ? XFS_DQ_GROUP : 0;
532
533 mutex_lock(&q->qi_dqlist_lock);
534
535 /*
536 * In the first pass through all incore dquots of this filesystem,
537 * we release the group dquot pointers the user dquots may be
538 * carrying around as a hint. We need to do this irrespective of
539 * what's being turned off.
540 */
541 xfs_qm_detach_gdquots(mp);
542
543 again:
544 nmisses = 0;
545 ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
546 /*
547 * Try to get rid of all of the unwanted dquots. The idea is to
548 * get them off mplist and hashlist, but leave them on freelist.
549 */
550 list_for_each_entry_safe(dqp, n, &q->qi_dqlist, q_mplist) {
551 /*
552 * It's OK to look at the type without taking dqlock here.
553 * We're holding the mplist lock here, and that's needed for
554 * a dqreclaim.
555 */
556 if ((dqp->dq_flags & dqtype) == 0)
557 continue;
558
559 if (!mutex_trylock(&dqp->q_hash->qh_lock)) {
560 nrecl = q->qi_dqreclaims;
561 mutex_unlock(&q->qi_dqlist_lock);
562 mutex_lock(&dqp->q_hash->qh_lock);
563 mutex_lock(&q->qi_dqlist_lock);
564
565 /*
566 * XXXTheoretically, we can get into a very long
567 * ping pong game here.
568 * No one can be adding dquots to the mplist at
569 * this point, but somebody might be taking things off.
570 */
571 if (nrecl != q->qi_dqreclaims) {
572 mutex_unlock(&dqp->q_hash->qh_lock);
573 goto again;
574 }
575 }
576
577 /*
578 * Take the dquot off the mplist and hashlist. It may remain on
579 * freelist in INACTIVE state.
580 */
581 nmisses += xfs_qm_dqpurge(dqp);
582 }
583 mutex_unlock(&q->qi_dqlist_lock);
584 return nmisses;
585}
586
587int
588xfs_qm_dqpurge_all(
589 xfs_mount_t *mp,
590 uint flags)
591{
592 int ndquots;
593
594 /*
595 * Purge the dquot cache.
596 * None of the dquots should really be busy at this point.
597 */
598 if (mp->m_quotainfo) {
599 while ((ndquots = xfs_qm_dqpurge_int(mp, flags))) {
600 delay(ndquots * 10);
601 }
602 }
603 return 0;
604}
605
606STATIC int
607xfs_qm_dqattach_one(
608 xfs_inode_t *ip,
609 xfs_dqid_t id,
610 uint type,
611 uint doalloc,
612 xfs_dquot_t *udqhint, /* hint */
613 xfs_dquot_t **IO_idqpp)
614{
615 xfs_dquot_t *dqp;
616 int error;
617
618 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
619 error = 0;
620
621 /*
622 * See if we already have it in the inode itself. IO_idqpp is
623 * &i_udquot or &i_gdquot. This made the code look weird, but
624 * made the logic a lot simpler.
625 */
626 dqp = *IO_idqpp;
627 if (dqp) {
628 trace_xfs_dqattach_found(dqp);
629 return 0;
630 }
631
632 /*
633 * udqhint is the i_udquot field in inode, and is non-NULL only
634 * when the type arg is group/project. Its purpose is to save a
635 * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside
636 * the user dquot.
637 */
638 if (udqhint) {
639 ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
640 xfs_dqlock(udqhint);
641
642 /*
643 * No need to take dqlock to look at the id.
644 *
645 * The ID can't change until it gets reclaimed, and it won't
646 * be reclaimed as long as we have a ref from inode and we
647 * hold the ilock.
648 */
649 dqp = udqhint->q_gdquot;
650 if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) {
651 xfs_dqlock(dqp);
652 XFS_DQHOLD(dqp);
653 ASSERT(*IO_idqpp == NULL);
654 *IO_idqpp = dqp;
655
656 xfs_dqunlock(dqp);
657 xfs_dqunlock(udqhint);
658 return 0;
659 }
660
661 /*
662 * We can't hold a dquot lock when we call the dqget code.
663 * We'll deadlock in no time, because of (not conforming to)
664 * lock ordering - the inodelock comes before any dquot lock,
665 * and we may drop and reacquire the ilock in xfs_qm_dqget().
666 */
667 xfs_dqunlock(udqhint);
668 }
669
670 /*
671 * Find the dquot from somewhere. This bumps the
672 * reference count of dquot and returns it locked.
673 * This can return ENOENT if dquot didn't exist on
674 * disk and we didn't ask it to allocate;
675 * ESRCH if quotas got turned off suddenly.
676 */
677 error = xfs_qm_dqget(ip->i_mount, ip, id, type, XFS_QMOPT_DOWARN, &dqp);
678 if (error)
679 return error;
680
681 trace_xfs_dqattach_get(dqp);
682
683 /*
684 * dqget may have dropped and re-acquired the ilock, but it guarantees
685 * that the dquot returned is the one that should go in the inode.
686 */
687 *IO_idqpp = dqp;
688 xfs_dqunlock(dqp);
689 return 0;
690}
691
692
693/*
694 * Given a udquot and gdquot, attach a ptr to the group dquot in the
695 * udquot as a hint for future lookups. The idea sounds simple, but the
696 * execution isn't, because the udquot might have a group dquot attached
697 * already and getting rid of that gets us into lock ordering constraints.
698 * The process is complicated more by the fact that the dquots may or may not
699 * be locked on entry.
700 */
701STATIC void
702xfs_qm_dqattach_grouphint(
703 xfs_dquot_t *udq,
704 xfs_dquot_t *gdq)
705{
706 xfs_dquot_t *tmp;
707
708 xfs_dqlock(udq);
709
710 if ((tmp = udq->q_gdquot)) {
711 if (tmp == gdq) {
712 xfs_dqunlock(udq);
713 return;
714 }
715
716 udq->q_gdquot = NULL;
717 /*
718 * We can't keep any dqlocks when calling dqrele,
719 * because the freelist lock comes before dqlocks.
720 */
721 xfs_dqunlock(udq);
722 /*
723 * we took a hard reference once upon a time in dqget,
724 * so give it back when the udquot no longer points at it
725 * dqput() does the unlocking of the dquot.
726 */
727 xfs_qm_dqrele(tmp);
728
729 xfs_dqlock(udq);
730 xfs_dqlock(gdq);
731
732 } else {
733 ASSERT(XFS_DQ_IS_LOCKED(udq));
734 xfs_dqlock(gdq);
735 }
736
737 ASSERT(XFS_DQ_IS_LOCKED(udq));
738 ASSERT(XFS_DQ_IS_LOCKED(gdq));
739 /*
740 * Somebody could have attached a gdquot here,
741 * when we dropped the uqlock. If so, just do nothing.
742 */
743 if (udq->q_gdquot == NULL) {
744 XFS_DQHOLD(gdq);
745 udq->q_gdquot = gdq;
746 }
747
748 xfs_dqunlock(gdq);
749 xfs_dqunlock(udq);
750}
751
752
753/*
754 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
755 * into account.
756 * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
757 * Inode may get unlocked and relocked in here, and the caller must deal with
758 * the consequences.
759 */
760int
761xfs_qm_dqattach_locked(
762 xfs_inode_t *ip,
763 uint flags)
764{
765 xfs_mount_t *mp = ip->i_mount;
766 uint nquotas = 0;
767 int error = 0;
768
769 if (!XFS_IS_QUOTA_RUNNING(mp) ||
770 !XFS_IS_QUOTA_ON(mp) ||
771 !XFS_NOT_DQATTACHED(mp, ip) ||
772 ip->i_ino == mp->m_sb.sb_uquotino ||
773 ip->i_ino == mp->m_sb.sb_gquotino)
774 return 0;
775
776 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
777
778 if (XFS_IS_UQUOTA_ON(mp)) {
779 error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
780 flags & XFS_QMOPT_DQALLOC,
781 NULL, &ip->i_udquot);
782 if (error)
783 goto done;
784 nquotas++;
785 }
786
787 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
788 if (XFS_IS_OQUOTA_ON(mp)) {
789 error = XFS_IS_GQUOTA_ON(mp) ?
790 xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
791 flags & XFS_QMOPT_DQALLOC,
792 ip->i_udquot, &ip->i_gdquot) :
793 xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
794 flags & XFS_QMOPT_DQALLOC,
795 ip->i_udquot, &ip->i_gdquot);
796 /*
797 * Don't worry about the udquot that we may have
798 * attached above. It'll get detached, if not already.
799 */
800 if (error)
801 goto done;
802 nquotas++;
803 }
804
805 /*
806 * Attach this group quota to the user quota as a hint.
807 * This WON'T, in general, result in a thrash.
808 */
809 if (nquotas == 2) {
810 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
811 ASSERT(ip->i_udquot);
812 ASSERT(ip->i_gdquot);
813
814 /*
815 * We may or may not have the i_udquot locked at this point,
816 * but this check is OK since we don't depend on the i_gdquot to
817 * be accurate 100% all the time. It is just a hint, and this
818 * will succeed in general.
819 */
820 if (ip->i_udquot->q_gdquot == ip->i_gdquot)
821 goto done;
822 /*
823 * Attach i_gdquot to the gdquot hint inside the i_udquot.
824 */
825 xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot);
826 }
827
828 done:
829#ifdef DEBUG
830 if (!error) {
831 if (XFS_IS_UQUOTA_ON(mp))
832 ASSERT(ip->i_udquot);
833 if (XFS_IS_OQUOTA_ON(mp))
834 ASSERT(ip->i_gdquot);
835 }
836 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
837#endif
838 return error;
839}
840
841int
842xfs_qm_dqattach(
843 struct xfs_inode *ip,
844 uint flags)
845{
846 int error;
847
848 xfs_ilock(ip, XFS_ILOCK_EXCL);
849 error = xfs_qm_dqattach_locked(ip, flags);
850 xfs_iunlock(ip, XFS_ILOCK_EXCL);
851
852 return error;
853}
854
855/*
856 * Release dquots (and their references) if any.
857 * The inode should be locked EXCL except when this's called by
858 * xfs_ireclaim.
859 */
860void
861xfs_qm_dqdetach(
862 xfs_inode_t *ip)
863{
864 if (!(ip->i_udquot || ip->i_gdquot))
865 return;
866
867 trace_xfs_dquot_dqdetach(ip);
868
869 ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino);
870 ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino);
871 if (ip->i_udquot) {
872 xfs_qm_dqrele(ip->i_udquot);
873 ip->i_udquot = NULL;
874 }
875 if (ip->i_gdquot) {
876 xfs_qm_dqrele(ip->i_gdquot);
877 ip->i_gdquot = NULL;
878 }
879}
880
881int
882xfs_qm_sync(
883 struct xfs_mount *mp,
884 int flags)
885{
886 struct xfs_quotainfo *q = mp->m_quotainfo;
887 int recl, restarts;
888 struct xfs_dquot *dqp;
889 int error;
890
891 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
892 return 0;
893
894 restarts = 0;
895
896 again:
897 mutex_lock(&q->qi_dqlist_lock);
898 /*
899 * dqpurge_all() also takes the mplist lock and iterate thru all dquots
900 * in quotaoff. However, if the QUOTA_ACTIVE bits are not cleared
901 * when we have the mplist lock, we know that dquots will be consistent
902 * as long as we have it locked.
903 */
904 if (!XFS_IS_QUOTA_ON(mp)) {
905 mutex_unlock(&q->qi_dqlist_lock);
906 return 0;
907 }
908 ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
909 list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
910 /*
911 * If this is vfs_sync calling, then skip the dquots that
912 * don't 'seem' to be dirty. ie. don't acquire dqlock.
913 * This is very similar to what xfs_sync does with inodes.
914 */
915 if (flags & SYNC_TRYLOCK) {
916 if (!XFS_DQ_IS_DIRTY(dqp))
917 continue;
918 if (!xfs_qm_dqlock_nowait(dqp))
919 continue;
920 } else {
921 xfs_dqlock(dqp);
922 }
923
924 /*
925 * Now, find out for sure if this dquot is dirty or not.
926 */
927 if (! XFS_DQ_IS_DIRTY(dqp)) {
928 xfs_dqunlock(dqp);
929 continue;
930 }
931
932 /* XXX a sentinel would be better */
933 recl = q->qi_dqreclaims;
934 if (!xfs_dqflock_nowait(dqp)) {
935 if (flags & SYNC_TRYLOCK) {
936 xfs_dqunlock(dqp);
937 continue;
938 }
939 /*
940 * If we can't grab the flush lock then if the caller
941 * really wanted us to give this our best shot, so
942 * see if we can give a push to the buffer before we wait
943 * on the flush lock. At this point, we know that
944 * even though the dquot is being flushed,
945 * it has (new) dirty data.
946 */
947 xfs_qm_dqflock_pushbuf_wait(dqp);
948 }
949 /*
950 * Let go of the mplist lock. We don't want to hold it
951 * across a disk write
952 */
953 mutex_unlock(&q->qi_dqlist_lock);
954 error = xfs_qm_dqflush(dqp, flags);
955 xfs_dqunlock(dqp);
956 if (error && XFS_FORCED_SHUTDOWN(mp))
957 return 0; /* Need to prevent umount failure */
958 else if (error)
959 return error;
960
961 mutex_lock(&q->qi_dqlist_lock);
962 if (recl != q->qi_dqreclaims) {
963 if (++restarts >= XFS_QM_SYNC_MAX_RESTARTS)
964 break;
965
966 mutex_unlock(&q->qi_dqlist_lock);
967 goto again;
968 }
969 }
970
971 mutex_unlock(&q->qi_dqlist_lock);
972 return 0;
973}
974
975/*
976 * The hash chains and the mplist use the same xfs_dqhash structure as
977 * their list head, but we can take the mplist qh_lock and one of the
978 * hash qh_locks at the same time without any problem as they aren't
979 * related.
980 */
981static struct lock_class_key xfs_quota_mplist_class;
982
983/*
984 * This initializes all the quota information that's kept in the
985 * mount structure
986 */
987STATIC int
988xfs_qm_init_quotainfo(
989 xfs_mount_t *mp)
990{
991 xfs_quotainfo_t *qinf;
992 int error;
993 xfs_dquot_t *dqp;
994
995 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
996
997 /*
998 * Tell XQM that we exist as soon as possible.
999 */
1000 if ((error = xfs_qm_hold_quotafs_ref(mp))) {
1001 return error;
1002 }
1003
1004 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
1005
1006 /*
1007 * See if quotainodes are setup, and if not, allocate them,
1008 * and change the superblock accordingly.
1009 */
1010 if ((error = xfs_qm_init_quotainos(mp))) {
1011 kmem_free(qinf);
1012 mp->m_quotainfo = NULL;
1013 return error;
1014 }
1015
1016 INIT_LIST_HEAD(&qinf->qi_dqlist);
1017 mutex_init(&qinf->qi_dqlist_lock);
1018 lockdep_set_class(&qinf->qi_dqlist_lock, &xfs_quota_mplist_class);
1019
1020 qinf->qi_dqreclaims = 0;
1021
1022 /* mutex used to serialize quotaoffs */
1023 mutex_init(&qinf->qi_quotaofflock);
1024
1025 /* Precalc some constants */
1026 qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
1027 ASSERT(qinf->qi_dqchunklen);
1028 qinf->qi_dqperchunk = BBTOB(qinf->qi_dqchunklen);
1029 do_div(qinf->qi_dqperchunk, sizeof(xfs_dqblk_t));
1030
1031 mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
1032
1033 /*
1034 * We try to get the limits from the superuser's limits fields.
1035 * This is quite hacky, but it is standard quota practice.
1036 * We look at the USR dquot with id == 0 first, but if user quotas
1037 * are not enabled we goto the GRP dquot with id == 0.
1038 * We don't really care to keep separate default limits for user
1039 * and group quotas, at least not at this point.
1040 */
1041 error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)0,
1042 XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
1043 (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
1044 XFS_DQ_PROJ),
1045 XFS_QMOPT_DQSUSER|XFS_QMOPT_DOWARN,
1046 &dqp);
1047 if (! error) {
1048 xfs_disk_dquot_t *ddqp = &dqp->q_core;
1049
1050 /*
1051 * The warnings and timers set the grace period given to
1052 * a user or group before he or she can not perform any
1053 * more writing. If it is zero, a default is used.
1054 */
1055 qinf->qi_btimelimit = ddqp->d_btimer ?
1056 be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT;
1057 qinf->qi_itimelimit = ddqp->d_itimer ?
1058 be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT;
1059 qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ?
1060 be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT;
1061 qinf->qi_bwarnlimit = ddqp->d_bwarns ?
1062 be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT;
1063 qinf->qi_iwarnlimit = ddqp->d_iwarns ?
1064 be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT;
1065 qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ?
1066 be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT;
1067 qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
1068 qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
1069 qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
1070 qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
1071 qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
1072 qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
1073
1074 /*
1075 * We sent the XFS_QMOPT_DQSUSER flag to dqget because
1076 * we don't want this dquot cached. We haven't done a
1077 * quotacheck yet, and quotacheck doesn't like incore dquots.
1078 */
1079 xfs_qm_dqdestroy(dqp);
1080 } else {
1081 qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
1082 qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
1083 qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
1084 qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
1085 qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
1086 qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
1087 }
1088
1089 return 0;
1090}
1091
1092
1093/*
1094 * Gets called when unmounting a filesystem or when all quotas get
1095 * turned off.
1096 * This purges the quota inodes, destroys locks and frees itself.
1097 */
1098void
1099xfs_qm_destroy_quotainfo(
1100 xfs_mount_t *mp)
1101{
1102 xfs_quotainfo_t *qi;
1103
1104 qi = mp->m_quotainfo;
1105 ASSERT(qi != NULL);
1106 ASSERT(xfs_Gqm != NULL);
1107
1108 /*
1109 * Release the reference that XQM kept, so that we know
1110 * when the XQM structure should be freed. We cannot assume
1111 * that xfs_Gqm is non-null after this point.
1112 */
1113 xfs_qm_rele_quotafs_ref(mp);
1114
1115 ASSERT(list_empty(&qi->qi_dqlist));
1116 mutex_destroy(&qi->qi_dqlist_lock);
1117
1118 if (qi->qi_uquotaip) {
1119 IRELE(qi->qi_uquotaip);
1120 qi->qi_uquotaip = NULL; /* paranoia */
1121 }
1122 if (qi->qi_gquotaip) {
1123 IRELE(qi->qi_gquotaip);
1124 qi->qi_gquotaip = NULL;
1125 }
1126 mutex_destroy(&qi->qi_quotaofflock);
1127 kmem_free(qi);
1128 mp->m_quotainfo = NULL;
1129}
1130
1131
1132
1133/* ------------------- PRIVATE STATIC FUNCTIONS ----------------------- */
1134
1135/* ARGSUSED */
1136STATIC void
1137xfs_qm_list_init(
1138 xfs_dqlist_t *list,
1139 char *str,
1140 int n)
1141{
1142 mutex_init(&list->qh_lock);
1143 INIT_LIST_HEAD(&list->qh_list);
1144 list->qh_version = 0;
1145 list->qh_nelems = 0;
1146}
1147
1148STATIC void
1149xfs_qm_list_destroy(
1150 xfs_dqlist_t *list)
1151{
1152 mutex_destroy(&(list->qh_lock));
1153}
1154
1155/*
1156 * Create an inode and return with a reference already taken, but unlocked
1157 * This is how we create quota inodes
1158 */
1159STATIC int
1160xfs_qm_qino_alloc(
1161 xfs_mount_t *mp,
1162 xfs_inode_t **ip,
1163 __int64_t sbfields,
1164 uint flags)
1165{
1166 xfs_trans_t *tp;
1167 int error;
1168 int committed;
1169
1170 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);
1171 if ((error = xfs_trans_reserve(tp,
1172 XFS_QM_QINOCREATE_SPACE_RES(mp),
1173 XFS_CREATE_LOG_RES(mp), 0,
1174 XFS_TRANS_PERM_LOG_RES,
1175 XFS_CREATE_LOG_COUNT))) {
1176 xfs_trans_cancel(tp, 0);
1177 return error;
1178 }
1179
1180 error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip, &committed);
1181 if (error) {
1182 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
1183 XFS_TRANS_ABORT);
1184 return error;
1185 }
1186
1187 /*
1188 * Make the changes in the superblock, and log those too.
1189 * sbfields arg may contain fields other than *QUOTINO;
1190 * VERSIONNUM for example.
1191 */
1192 spin_lock(&mp->m_sb_lock);
1193 if (flags & XFS_QMOPT_SBVERSION) {
1194 ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
1195 ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
1196 XFS_SB_GQUOTINO | XFS_SB_QFLAGS)) ==
1197 (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
1198 XFS_SB_GQUOTINO | XFS_SB_QFLAGS));
1199
1200 xfs_sb_version_addquota(&mp->m_sb);
1201 mp->m_sb.sb_uquotino = NULLFSINO;
1202 mp->m_sb.sb_gquotino = NULLFSINO;
1203
1204 /* qflags will get updated _after_ quotacheck */
1205 mp->m_sb.sb_qflags = 0;
1206 }
1207 if (flags & XFS_QMOPT_UQUOTA)
1208 mp->m_sb.sb_uquotino = (*ip)->i_ino;
1209 else
1210 mp->m_sb.sb_gquotino = (*ip)->i_ino;
1211 spin_unlock(&mp->m_sb_lock);
1212 xfs_mod_sb(tp, sbfields);
1213
1214 if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) {
1215 xfs_alert(mp, "%s failed (error %d)!", __func__, error);
1216 return error;
1217 }
1218 return 0;
1219}
1220
1221
1222STATIC void
1223xfs_qm_reset_dqcounts(
1224 xfs_mount_t *mp,
1225 xfs_buf_t *bp,
1226 xfs_dqid_t id,
1227 uint type)
1228{
1229 xfs_disk_dquot_t *ddq;
1230 int j;
1231
1232 trace_xfs_reset_dqcounts(bp, _RET_IP_);
1233
1234 /*
1235 * Reset all counters and timers. They'll be
1236 * started afresh by xfs_qm_quotacheck.
1237 */
1238#ifdef DEBUG
1239 j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
1240 do_div(j, sizeof(xfs_dqblk_t));
1241 ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
1242#endif
1243 ddq = bp->b_addr;
1244 for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
1245 /*
1246 * Do a sanity check, and if needed, repair the dqblk. Don't
1247 * output any warnings because it's perfectly possible to
1248 * find uninitialised dquot blks. See comment in xfs_qm_dqcheck.
1249 */
1250 (void) xfs_qm_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
1251 "xfs_quotacheck");
1252 ddq->d_bcount = 0;
1253 ddq->d_icount = 0;
1254 ddq->d_rtbcount = 0;
1255 ddq->d_btimer = 0;
1256 ddq->d_itimer = 0;
1257 ddq->d_rtbtimer = 0;
1258 ddq->d_bwarns = 0;
1259 ddq->d_iwarns = 0;
1260 ddq->d_rtbwarns = 0;
1261 ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1);
1262 }
1263}
1264
1265STATIC int
1266xfs_qm_dqiter_bufs(
1267 xfs_mount_t *mp,
1268 xfs_dqid_t firstid,
1269 xfs_fsblock_t bno,
1270 xfs_filblks_t blkcnt,
1271 uint flags)
1272{
1273 xfs_buf_t *bp;
1274 int error;
1275 int type;
1276
1277 ASSERT(blkcnt > 0);
1278 type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
1279 (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
1280 error = 0;
1281
1282 /*
1283 * Blkcnt arg can be a very big number, and might even be
1284 * larger than the log itself. So, we have to break it up into
1285 * manageable-sized transactions.
1286 * Note that we don't start a permanent transaction here; we might
1287 * not be able to get a log reservation for the whole thing up front,
1288 * and we don't really care to either, because we just discard
1289 * everything if we were to crash in the middle of this loop.
1290 */
1291 while (blkcnt--) {
1292 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
1293 XFS_FSB_TO_DADDR(mp, bno),
1294 mp->m_quotainfo->qi_dqchunklen, 0, &bp);
1295 if (error)
1296 break;
1297
1298 xfs_qm_reset_dqcounts(mp, bp, firstid, type);
1299 xfs_bdwrite(mp, bp);
1300 /*
1301 * goto the next block.
1302 */
1303 bno++;
1304 firstid += mp->m_quotainfo->qi_dqperchunk;
1305 }
1306 return error;
1307}
1308
1309/*
1310 * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
1311 * caller supplied function for every chunk of dquots that we find.
1312 */
1313STATIC int
1314xfs_qm_dqiterate(
1315 xfs_mount_t *mp,
1316 xfs_inode_t *qip,
1317 uint flags)
1318{
1319 xfs_bmbt_irec_t *map;
1320 int i, nmaps; /* number of map entries */
1321 int error; /* return value */
1322 xfs_fileoff_t lblkno;
1323 xfs_filblks_t maxlblkcnt;
1324 xfs_dqid_t firstid;
1325 xfs_fsblock_t rablkno;
1326 xfs_filblks_t rablkcnt;
1327
1328 error = 0;
1329 /*
1330 * This looks racy, but we can't keep an inode lock across a
1331 * trans_reserve. But, this gets called during quotacheck, and that
1332 * happens only at mount time which is single threaded.
1333 */
1334 if (qip->i_d.di_nblocks == 0)
1335 return 0;
1336
1337 map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
1338
1339 lblkno = 0;
1340 maxlblkcnt = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
1341 do {
1342 nmaps = XFS_DQITER_MAP_SIZE;
1343 /*
1344 * We aren't changing the inode itself. Just changing
1345 * some of its data. No new blocks are added here, and
1346 * the inode is never added to the transaction.
1347 */
1348 xfs_ilock(qip, XFS_ILOCK_SHARED);
1349 error = xfs_bmapi(NULL, qip, lblkno,
1350 maxlblkcnt - lblkno,
1351 XFS_BMAPI_METADATA,
1352 NULL,
1353 0, map, &nmaps, NULL);
1354 xfs_iunlock(qip, XFS_ILOCK_SHARED);
1355 if (error)
1356 break;
1357
1358 ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1359 for (i = 0; i < nmaps; i++) {
1360 ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1361 ASSERT(map[i].br_blockcount);
1362
1363
1364 lblkno += map[i].br_blockcount;
1365
1366 if (map[i].br_startblock == HOLESTARTBLOCK)
1367 continue;
1368
1369 firstid = (xfs_dqid_t) map[i].br_startoff *
1370 mp->m_quotainfo->qi_dqperchunk;
1371 /*
1372 * Do a read-ahead on the next extent.
1373 */
1374 if ((i+1 < nmaps) &&
1375 (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1376 rablkcnt = map[i+1].br_blockcount;
1377 rablkno = map[i+1].br_startblock;
1378 while (rablkcnt--) {
1379 xfs_buf_readahead(mp->m_ddev_targp,
1380 XFS_FSB_TO_DADDR(mp, rablkno),
1381 mp->m_quotainfo->qi_dqchunklen);
1382 rablkno++;
1383 }
1384 }
1385 /*
1386 * Iterate thru all the blks in the extent and
1387 * reset the counters of all the dquots inside them.
1388 */
1389 if ((error = xfs_qm_dqiter_bufs(mp,
1390 firstid,
1391 map[i].br_startblock,
1392 map[i].br_blockcount,
1393 flags))) {
1394 break;
1395 }
1396 }
1397
1398 if (error)
1399 break;
1400 } while (nmaps > 0);
1401
1402 kmem_free(map);
1403
1404 return error;
1405}
1406
1407/*
1408 * Called by dqusage_adjust in doing a quotacheck.
1409 *
1410 * Given the inode, and a dquot id this updates both the incore dqout as well
1411 * as the buffer copy. This is so that once the quotacheck is done, we can
1412 * just log all the buffers, as opposed to logging numerous updates to
1413 * individual dquots.
1414 */
1415STATIC int
1416xfs_qm_quotacheck_dqadjust(
1417 struct xfs_inode *ip,
1418 xfs_dqid_t id,
1419 uint type,
1420 xfs_qcnt_t nblks,
1421 xfs_qcnt_t rtblks)
1422{
1423 struct xfs_mount *mp = ip->i_mount;
1424 struct xfs_dquot *dqp;
1425 int error;
1426
1427 error = xfs_qm_dqget(mp, ip, id, type,
1428 XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp);
1429 if (error) {
1430 /*
1431 * Shouldn't be able to turn off quotas here.
1432 */
1433 ASSERT(error != ESRCH);
1434 ASSERT(error != ENOENT);
1435 return error;
1436 }
1437
1438 trace_xfs_dqadjust(dqp);
1439
1440 /*
1441 * Adjust the inode count and the block count to reflect this inode's
1442 * resource usage.
1443 */
1444 be64_add_cpu(&dqp->q_core.d_icount, 1);
1445 dqp->q_res_icount++;
1446 if (nblks) {
1447 be64_add_cpu(&dqp->q_core.d_bcount, nblks);
1448 dqp->q_res_bcount += nblks;
1449 }
1450 if (rtblks) {
1451 be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
1452 dqp->q_res_rtbcount += rtblks;
1453 }
1454
1455 /*
1456 * Set default limits, adjust timers (since we changed usages)
1457 *
1458 * There are no timers for the default values set in the root dquot.
1459 */
1460 if (dqp->q_core.d_id) {
1461 xfs_qm_adjust_dqlimits(mp, &dqp->q_core);
1462 xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
1463 }
1464
1465 dqp->dq_flags |= XFS_DQ_DIRTY;
1466 xfs_qm_dqput(dqp);
1467 return 0;
1468}
1469
1470STATIC int
1471xfs_qm_get_rtblks(
1472 xfs_inode_t *ip,
1473 xfs_qcnt_t *O_rtblks)
1474{
1475 xfs_filblks_t rtblks; /* total rt blks */
1476 xfs_extnum_t idx; /* extent record index */
1477 xfs_ifork_t *ifp; /* inode fork pointer */
1478 xfs_extnum_t nextents; /* number of extent entries */
1479 int error;
1480
1481 ASSERT(XFS_IS_REALTIME_INODE(ip));
1482 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1483 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1484 if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK)))
1485 return error;
1486 }
1487 rtblks = 0;
1488 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
1489 for (idx = 0; idx < nextents; idx++)
1490 rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx));
1491 *O_rtblks = (xfs_qcnt_t)rtblks;
1492 return 0;
1493}
1494
1495/*
1496 * callback routine supplied to bulkstat(). Given an inumber, find its
1497 * dquots and update them to account for resources taken by that inode.
1498 */
1499/* ARGSUSED */
1500STATIC int
1501xfs_qm_dqusage_adjust(
1502 xfs_mount_t *mp, /* mount point for filesystem */
1503 xfs_ino_t ino, /* inode number to get data for */
1504 void __user *buffer, /* not used */
1505 int ubsize, /* not used */
1506 int *ubused, /* not used */
1507 int *res) /* result code value */
1508{
1509 xfs_inode_t *ip;
1510 xfs_qcnt_t nblks, rtblks = 0;
1511 int error;
1512
1513 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1514
1515 /*
1516 * rootino must have its resources accounted for, not so with the quota
1517 * inodes.
1518 */
1519 if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) {
1520 *res = BULKSTAT_RV_NOTHING;
1521 return XFS_ERROR(EINVAL);
1522 }
1523
1524 /*
1525 * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
1526 * interface expects the inode to be exclusively locked because that's
1527 * the case in all other instances. It's OK that we do this because
1528 * quotacheck is done only at mount time.
1529 */
1530 error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip);
1531 if (error) {
1532 *res = BULKSTAT_RV_NOTHING;
1533 return error;
1534 }
1535
1536 ASSERT(ip->i_delayed_blks == 0);
1537
1538 if (XFS_IS_REALTIME_INODE(ip)) {
1539 /*
1540 * Walk thru the extent list and count the realtime blocks.
1541 */
1542 error = xfs_qm_get_rtblks(ip, &rtblks);
1543 if (error)
1544 goto error0;
1545 }
1546
1547 nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
1548
1549 /*
1550 * Add the (disk blocks and inode) resources occupied by this
1551 * inode to its dquots. We do this adjustment in the incore dquot,
1552 * and also copy the changes to its buffer.
1553 * We don't care about putting these changes in a transaction
1554 * envelope because if we crash in the middle of a 'quotacheck'
1555 * we have to start from the beginning anyway.
1556 * Once we're done, we'll log all the dquot bufs.
1557 *
1558 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1559 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1560 */
1561 if (XFS_IS_UQUOTA_ON(mp)) {
1562 error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid,
1563 XFS_DQ_USER, nblks, rtblks);
1564 if (error)
1565 goto error0;
1566 }
1567
1568 if (XFS_IS_GQUOTA_ON(mp)) {
1569 error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid,
1570 XFS_DQ_GROUP, nblks, rtblks);
1571 if (error)
1572 goto error0;
1573 }
1574
1575 if (XFS_IS_PQUOTA_ON(mp)) {
1576 error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip),
1577 XFS_DQ_PROJ, nblks, rtblks);
1578 if (error)
1579 goto error0;
1580 }
1581
1582 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1583 IRELE(ip);
1584 *res = BULKSTAT_RV_DIDONE;
1585 return 0;
1586
1587error0:
1588 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1589 IRELE(ip);
1590 *res = BULKSTAT_RV_GIVEUP;
1591 return error;
1592}
1593
1594/*
1595 * Walk thru all the filesystem inodes and construct a consistent view
1596 * of the disk quota world. If the quotacheck fails, disable quotas.
1597 */
1598int
1599xfs_qm_quotacheck(
1600 xfs_mount_t *mp)
1601{
1602 int done, count, error;
1603 xfs_ino_t lastino;
1604 size_t structsz;
1605 xfs_inode_t *uip, *gip;
1606 uint flags;
1607
1608 count = INT_MAX;
1609 structsz = 1;
1610 lastino = 0;
1611 flags = 0;
1612
1613 ASSERT(mp->m_quotainfo->qi_uquotaip || mp->m_quotainfo->qi_gquotaip);
1614 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1615
1616 /*
1617 * There should be no cached dquots. The (simplistic) quotacheck
1618 * algorithm doesn't like that.
1619 */
1620 ASSERT(list_empty(&mp->m_quotainfo->qi_dqlist));
1621
1622 xfs_notice(mp, "Quotacheck needed: Please wait.");
1623
1624 /*
1625 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1626 * their counters to zero. We need a clean slate.
1627 * We don't log our changes till later.
1628 */
1629 uip = mp->m_quotainfo->qi_uquotaip;
1630 if (uip) {
1631 error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA);
1632 if (error)
1633 goto error_return;
1634 flags |= XFS_UQUOTA_CHKD;
1635 }
1636
1637 gip = mp->m_quotainfo->qi_gquotaip;
1638 if (gip) {
1639 error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ?
1640 XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA);
1641 if (error)
1642 goto error_return;
1643 flags |= XFS_OQUOTA_CHKD;
1644 }
1645
1646 do {
1647 /*
1648 * Iterate thru all the inodes in the file system,
1649 * adjusting the corresponding dquot counters in core.
1650 */
1651 error = xfs_bulkstat(mp, &lastino, &count,
1652 xfs_qm_dqusage_adjust,
1653 structsz, NULL, &done);
1654 if (error)
1655 break;
1656
1657 } while (!done);
1658
1659 /*
1660 * We've made all the changes that we need to make incore.
1661 * Flush them down to disk buffers if everything was updated
1662 * successfully.
1663 */
1664 if (!error)
1665 error = xfs_qm_dqflush_all(mp, 0);
1666
1667 /*
1668 * We can get this error if we couldn't do a dquot allocation inside
1669 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1670 * dirty dquots that might be cached, we just want to get rid of them
1671 * and turn quotaoff. The dquots won't be attached to any of the inodes
1672 * at this point (because we intentionally didn't in dqget_noattach).
1673 */
1674 if (error) {
1675 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
1676 goto error_return;
1677 }
1678
1679 /*
1680 * We didn't log anything, because if we crashed, we'll have to
1681 * start the quotacheck from scratch anyway. However, we must make
1682 * sure that our dquot changes are secure before we put the
1683 * quotacheck'd stamp on the superblock. So, here we do a synchronous
1684 * flush.
1685 */
1686 XFS_bflush(mp->m_ddev_targp);
1687
1688 /*
1689 * If one type of quotas is off, then it will lose its
1690 * quotachecked status, since we won't be doing accounting for
1691 * that type anymore.
1692 */
1693 mp->m_qflags &= ~(XFS_OQUOTA_CHKD | XFS_UQUOTA_CHKD);
1694 mp->m_qflags |= flags;
1695
1696 error_return:
1697 if (error) {
1698 xfs_warn(mp,
1699 "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1700 error);
1701 /*
1702 * We must turn off quotas.
1703 */
1704 ASSERT(mp->m_quotainfo != NULL);
1705 ASSERT(xfs_Gqm != NULL);
1706 xfs_qm_destroy_quotainfo(mp);
1707 if (xfs_mount_reset_sbqflags(mp)) {
1708 xfs_warn(mp,
1709 "Quotacheck: Failed to reset quota flags.");
1710 }
1711 } else
1712 xfs_notice(mp, "Quotacheck: Done.");
1713 return (error);
1714}
1715
1716/*
1717 * This is called after the superblock has been read in and we're ready to
1718 * iget the quota inodes.
1719 */
1720STATIC int
1721xfs_qm_init_quotainos(
1722 xfs_mount_t *mp)
1723{
1724 xfs_inode_t *uip, *gip;
1725 int error;
1726 __int64_t sbflags;
1727 uint flags;
1728
1729 ASSERT(mp->m_quotainfo);
1730 uip = gip = NULL;
1731 sbflags = 0;
1732 flags = 0;
1733
1734 /*
1735 * Get the uquota and gquota inodes
1736 */
1737 if (xfs_sb_version_hasquota(&mp->m_sb)) {
1738 if (XFS_IS_UQUOTA_ON(mp) &&
1739 mp->m_sb.sb_uquotino != NULLFSINO) {
1740 ASSERT(mp->m_sb.sb_uquotino > 0);
1741 if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1742 0, 0, &uip)))
1743 return XFS_ERROR(error);
1744 }
1745 if (XFS_IS_OQUOTA_ON(mp) &&
1746 mp->m_sb.sb_gquotino != NULLFSINO) {
1747 ASSERT(mp->m_sb.sb_gquotino > 0);
1748 if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1749 0, 0, &gip))) {
1750 if (uip)
1751 IRELE(uip);
1752 return XFS_ERROR(error);
1753 }
1754 }
1755 } else {
1756 flags |= XFS_QMOPT_SBVERSION;
1757 sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
1758 XFS_SB_GQUOTINO | XFS_SB_QFLAGS);
1759 }
1760
1761 /*
1762 * Create the two inodes, if they don't exist already. The changes
1763 * made above will get added to a transaction and logged in one of
1764 * the qino_alloc calls below. If the device is readonly,
1765 * temporarily switch to read-write to do this.
1766 */
1767 if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1768 if ((error = xfs_qm_qino_alloc(mp, &uip,
1769 sbflags | XFS_SB_UQUOTINO,
1770 flags | XFS_QMOPT_UQUOTA)))
1771 return XFS_ERROR(error);
1772
1773 flags &= ~XFS_QMOPT_SBVERSION;
1774 }
1775 if (XFS_IS_OQUOTA_ON(mp) && gip == NULL) {
1776 flags |= (XFS_IS_GQUOTA_ON(mp) ?
1777 XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA);
1778 error = xfs_qm_qino_alloc(mp, &gip,
1779 sbflags | XFS_SB_GQUOTINO, flags);
1780 if (error) {
1781 if (uip)
1782 IRELE(uip);
1783
1784 return XFS_ERROR(error);
1785 }
1786 }
1787
1788 mp->m_quotainfo->qi_uquotaip = uip;
1789 mp->m_quotainfo->qi_gquotaip = gip;
1790
1791 return 0;
1792}
1793
1794
1795
1796/*
1797 * Just pop the least recently used dquot off the freelist and
1798 * recycle it. The returned dquot is locked.
1799 */
1800STATIC xfs_dquot_t *
1801xfs_qm_dqreclaim_one(void)
1802{
1803 xfs_dquot_t *dqpout;
1804 xfs_dquot_t *dqp;
1805 int restarts;
1806 int startagain;
1807
1808 restarts = 0;
1809 dqpout = NULL;
1810
1811 /* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */
1812again:
1813 startagain = 0;
1814 mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
1815
1816 list_for_each_entry(dqp, &xfs_Gqm->qm_dqfrlist, q_freelist) {
1817 struct xfs_mount *mp = dqp->q_mount;
1818 xfs_dqlock(dqp);
1819
1820 /*
1821 * We are racing with dqlookup here. Naturally we don't
1822 * want to reclaim a dquot that lookup wants. We release the
1823 * freelist lock and start over, so that lookup will grab
1824 * both the dquot and the freelistlock.
1825 */
1826 if (dqp->dq_flags & XFS_DQ_WANT) {
1827 ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE));
1828
1829 trace_xfs_dqreclaim_want(dqp);
1830 XQM_STATS_INC(xqmstats.xs_qm_dqwants);
1831 restarts++;
1832 startagain = 1;
1833 goto dqunlock;
1834 }
1835
1836 /*
1837 * If the dquot is inactive, we are assured that it is
1838 * not on the mplist or the hashlist, and that makes our
1839 * life easier.
1840 */
1841 if (dqp->dq_flags & XFS_DQ_INACTIVE) {
1842 ASSERT(mp == NULL);
1843 ASSERT(! XFS_DQ_IS_DIRTY(dqp));
1844 ASSERT(list_empty(&dqp->q_hashlist));
1845 ASSERT(list_empty(&dqp->q_mplist));
1846 list_del_init(&dqp->q_freelist);
1847 xfs_Gqm->qm_dqfrlist_cnt--;
1848 dqpout = dqp;
1849 XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);
1850 goto dqunlock;
1851 }
1852
1853 ASSERT(dqp->q_hash);
1854 ASSERT(!list_empty(&dqp->q_mplist));
1855
1856 /*
1857 * Try to grab the flush lock. If this dquot is in the process
1858 * of getting flushed to disk, we don't want to reclaim it.
1859 */
1860 if (!xfs_dqflock_nowait(dqp))
1861 goto dqunlock;
1862
1863 /*
1864 * We have the flush lock so we know that this is not in the
1865 * process of being flushed. So, if this is dirty, flush it
1866 * DELWRI so that we don't get a freelist infested with
1867 * dirty dquots.
1868 */
1869 if (XFS_DQ_IS_DIRTY(dqp)) {
1870 int error;
1871
1872 trace_xfs_dqreclaim_dirty(dqp);
1873
1874 /*
1875 * We flush it delayed write, so don't bother
1876 * releasing the freelist lock.
1877 */
1878 error = xfs_qm_dqflush(dqp, 0);
1879 if (error) {
1880 xfs_warn(mp, "%s: dquot %p flush failed",
1881 __func__, dqp);
1882 }
1883 goto dqunlock;
1884 }
1885
1886 /*
1887 * We're trying to get the hashlock out of order. This races
1888 * with dqlookup; so, we giveup and goto the next dquot if
1889 * we couldn't get the hashlock. This way, we won't starve
1890 * a dqlookup process that holds the hashlock that is
1891 * waiting for the freelist lock.
1892 */
1893 if (!mutex_trylock(&dqp->q_hash->qh_lock)) {
1894 restarts++;
1895 goto dqfunlock;
1896 }
1897
1898 /*
1899 * This races with dquot allocation code as well as dqflush_all
1900 * and reclaim code. So, if we failed to grab the mplist lock,
1901 * giveup everything and start over.
1902 */
1903 if (!mutex_trylock(&mp->m_quotainfo->qi_dqlist_lock)) {
1904 restarts++;
1905 startagain = 1;
1906 goto qhunlock;
1907 }
1908
1909 ASSERT(dqp->q_nrefs == 0);
1910 list_del_init(&dqp->q_mplist);
1911 mp->m_quotainfo->qi_dquots--;
1912 mp->m_quotainfo->qi_dqreclaims++;
1913 list_del_init(&dqp->q_hashlist);
1914 dqp->q_hash->qh_version++;
1915 list_del_init(&dqp->q_freelist);
1916 xfs_Gqm->qm_dqfrlist_cnt--;
1917 dqpout = dqp;
1918 mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
1919qhunlock:
1920 mutex_unlock(&dqp->q_hash->qh_lock);
1921dqfunlock:
1922 xfs_dqfunlock(dqp);
1923dqunlock:
1924 xfs_dqunlock(dqp);
1925 if (dqpout)
1926 break;
1927 if (restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
1928 break;
1929 if (startagain) {
1930 mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
1931 goto again;
1932 }
1933 }
1934 mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
1935 return dqpout;
1936}
1937
1938/*
1939 * Traverse the freelist of dquots and attempt to reclaim a maximum of
1940 * 'howmany' dquots. This operation races with dqlookup(), and attempts to
1941 * favor the lookup function ...
1942 */
1943STATIC int
1944xfs_qm_shake_freelist(
1945 int howmany)
1946{
1947 int nreclaimed = 0;
1948 xfs_dquot_t *dqp;
1949
1950 if (howmany <= 0)
1951 return 0;
1952
1953 while (nreclaimed < howmany) {
1954 dqp = xfs_qm_dqreclaim_one();
1955 if (!dqp)
1956 return nreclaimed;
1957 xfs_qm_dqdestroy(dqp);
1958 nreclaimed++;
1959 }
1960 return nreclaimed;
1961}
1962
1963/*
1964 * The kmem_shake interface is invoked when memory is running low.
1965 */
1966/* ARGSUSED */
1967STATIC int
1968xfs_qm_shake(
1969 struct shrinker *shrink,
1970 struct shrink_control *sc)
1971{
1972 int ndqused, nfree, n;
1973 gfp_t gfp_mask = sc->gfp_mask;
1974
1975 if (!kmem_shake_allow(gfp_mask))
1976 return 0;
1977 if (!xfs_Gqm)
1978 return 0;
1979
1980 nfree = xfs_Gqm->qm_dqfrlist_cnt; /* free dquots */
1981 /* incore dquots in all f/s's */
1982 ndqused = atomic_read(&xfs_Gqm->qm_totaldquots) - nfree;
1983
1984 ASSERT(ndqused >= 0);
1985
1986 if (nfree <= ndqused && nfree < ndquot)
1987 return 0;
1988
1989 ndqused *= xfs_Gqm->qm_dqfree_ratio; /* target # of free dquots */
1990 n = nfree - ndqused - ndquot; /* # over target */
1991
1992 return xfs_qm_shake_freelist(MAX(nfree, n));
1993}
1994
1995
1996/*------------------------------------------------------------------*/
1997
1998/*
1999 * Return a new incore dquot. Depending on the number of
2000 * dquots in the system, we either allocate a new one on the kernel heap,
2001 * or reclaim a free one.
2002 * Return value is B_TRUE if we allocated a new dquot, B_FALSE if we managed
2003 * to reclaim an existing one from the freelist.
2004 */
2005boolean_t
2006xfs_qm_dqalloc_incore(
2007 xfs_dquot_t **O_dqpp)
2008{
2009 xfs_dquot_t *dqp;
2010
2011 /*
2012 * Check against high water mark to see if we want to pop
2013 * a nincompoop dquot off the freelist.
2014 */
2015 if (atomic_read(&xfs_Gqm->qm_totaldquots) >= ndquot) {
2016 /*
2017 * Try to recycle a dquot from the freelist.
2018 */
2019 if ((dqp = xfs_qm_dqreclaim_one())) {
2020 XQM_STATS_INC(xqmstats.xs_qm_dqreclaims);
2021 /*
2022 * Just zero the core here. The rest will get
2023 * reinitialized by caller. XXX we shouldn't even
2024 * do this zero ...
2025 */
2026 memset(&dqp->q_core, 0, sizeof(dqp->q_core));
2027 *O_dqpp = dqp;
2028 return B_FALSE;
2029 }
2030 XQM_STATS_INC(xqmstats.xs_qm_dqreclaim_misses);
2031 }
2032
2033 /*
2034 * Allocate a brand new dquot on the kernel heap and return it
2035 * to the caller to initialize.
2036 */
2037 ASSERT(xfs_Gqm->qm_dqzone != NULL);
2038 *O_dqpp = kmem_zone_zalloc(xfs_Gqm->qm_dqzone, KM_SLEEP);
2039 atomic_inc(&xfs_Gqm->qm_totaldquots);
2040
2041 return B_TRUE;
2042}
2043
2044
2045/*
2046 * Start a transaction and write the incore superblock changes to
2047 * disk. flags parameter indicates which fields have changed.
2048 */
2049int
2050xfs_qm_write_sb_changes(
2051 xfs_mount_t *mp,
2052 __int64_t flags)
2053{
2054 xfs_trans_t *tp;
2055 int error;
2056
2057 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
2058 if ((error = xfs_trans_reserve(tp, 0,
2059 mp->m_sb.sb_sectsize + 128, 0,
2060 0,
2061 XFS_DEFAULT_LOG_COUNT))) {
2062 xfs_trans_cancel(tp, 0);
2063 return error;
2064 }
2065
2066 xfs_mod_sb(tp, flags);
2067 error = xfs_trans_commit(tp, 0);
2068
2069 return error;
2070}
2071
2072
2073/* --------------- utility functions for vnodeops ---------------- */
2074
2075
2076/*
2077 * Given an inode, a uid, gid and prid make sure that we have
2078 * allocated relevant dquot(s) on disk, and that we won't exceed inode
2079 * quotas by creating this file.
2080 * This also attaches dquot(s) to the given inode after locking it,
2081 * and returns the dquots corresponding to the uid and/or gid.
2082 *
2083 * in : inode (unlocked)
2084 * out : udquot, gdquot with references taken and unlocked
2085 */
2086int
2087xfs_qm_vop_dqalloc(
2088 struct xfs_inode *ip,
2089 uid_t uid,
2090 gid_t gid,
2091 prid_t prid,
2092 uint flags,
2093 struct xfs_dquot **O_udqpp,
2094 struct xfs_dquot **O_gdqpp)
2095{
2096 struct xfs_mount *mp = ip->i_mount;
2097 struct xfs_dquot *uq, *gq;
2098 int error;
2099 uint lockflags;
2100
2101 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
2102 return 0;
2103
2104 lockflags = XFS_ILOCK_EXCL;
2105 xfs_ilock(ip, lockflags);
2106
2107 if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
2108 gid = ip->i_d.di_gid;
2109
2110 /*
2111 * Attach the dquot(s) to this inode, doing a dquot allocation
2112 * if necessary. The dquot(s) will not be locked.
2113 */
2114 if (XFS_NOT_DQATTACHED(mp, ip)) {
2115 error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC);
2116 if (error) {
2117 xfs_iunlock(ip, lockflags);
2118 return error;
2119 }
2120 }
2121
2122 uq = gq = NULL;
2123 if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
2124 if (ip->i_d.di_uid != uid) {
2125 /*
2126 * What we need is the dquot that has this uid, and
2127 * if we send the inode to dqget, the uid of the inode
2128 * takes priority over what's sent in the uid argument.
2129 * We must unlock inode here before calling dqget if
2130 * we're not sending the inode, because otherwise
2131 * we'll deadlock by doing trans_reserve while
2132 * holding ilock.
2133 */
2134 xfs_iunlock(ip, lockflags);
2135 if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t) uid,
2136 XFS_DQ_USER,
2137 XFS_QMOPT_DQALLOC |
2138 XFS_QMOPT_DOWARN,
2139 &uq))) {
2140 ASSERT(error != ENOENT);
2141 return error;
2142 }
2143 /*
2144 * Get the ilock in the right order.
2145 */
2146 xfs_dqunlock(uq);
2147 lockflags = XFS_ILOCK_SHARED;
2148 xfs_ilock(ip, lockflags);
2149 } else {
2150 /*
2151 * Take an extra reference, because we'll return
2152 * this to caller
2153 */
2154 ASSERT(ip->i_udquot);
2155 uq = ip->i_udquot;
2156 xfs_dqlock(uq);
2157 XFS_DQHOLD(uq);
2158 xfs_dqunlock(uq);
2159 }
2160 }
2161 if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
2162 if (ip->i_d.di_gid != gid) {
2163 xfs_iunlock(ip, lockflags);
2164 if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)gid,
2165 XFS_DQ_GROUP,
2166 XFS_QMOPT_DQALLOC |
2167 XFS_QMOPT_DOWARN,
2168 &gq))) {
2169 if (uq)
2170 xfs_qm_dqrele(uq);
2171 ASSERT(error != ENOENT);
2172 return error;
2173 }
2174 xfs_dqunlock(gq);
2175 lockflags = XFS_ILOCK_SHARED;
2176 xfs_ilock(ip, lockflags);
2177 } else {
2178 ASSERT(ip->i_gdquot);
2179 gq = ip->i_gdquot;
2180 xfs_dqlock(gq);
2181 XFS_DQHOLD(gq);
2182 xfs_dqunlock(gq);
2183 }
2184 } else if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
2185 if (xfs_get_projid(ip) != prid) {
2186 xfs_iunlock(ip, lockflags);
2187 if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
2188 XFS_DQ_PROJ,
2189 XFS_QMOPT_DQALLOC |
2190 XFS_QMOPT_DOWARN,
2191 &gq))) {
2192 if (uq)
2193 xfs_qm_dqrele(uq);
2194 ASSERT(error != ENOENT);
2195 return (error);
2196 }
2197 xfs_dqunlock(gq);
2198 lockflags = XFS_ILOCK_SHARED;
2199 xfs_ilock(ip, lockflags);
2200 } else {
2201 ASSERT(ip->i_gdquot);
2202 gq = ip->i_gdquot;
2203 xfs_dqlock(gq);
2204 XFS_DQHOLD(gq);
2205 xfs_dqunlock(gq);
2206 }
2207 }
2208 if (uq)
2209 trace_xfs_dquot_dqalloc(ip);
2210
2211 xfs_iunlock(ip, lockflags);
2212 if (O_udqpp)
2213 *O_udqpp = uq;
2214 else if (uq)
2215 xfs_qm_dqrele(uq);
2216 if (O_gdqpp)
2217 *O_gdqpp = gq;
2218 else if (gq)
2219 xfs_qm_dqrele(gq);
2220 return 0;
2221}
2222
2223/*
2224 * Actually transfer ownership, and do dquot modifications.
2225 * These were already reserved.
2226 */
2227xfs_dquot_t *
2228xfs_qm_vop_chown(
2229 xfs_trans_t *tp,
2230 xfs_inode_t *ip,
2231 xfs_dquot_t **IO_olddq,
2232 xfs_dquot_t *newdq)
2233{
2234 xfs_dquot_t *prevdq;
2235 uint bfield = XFS_IS_REALTIME_INODE(ip) ?
2236 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
2237
2238
2239 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2240 ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
2241
2242 /* old dquot */
2243 prevdq = *IO_olddq;
2244 ASSERT(prevdq);
2245 ASSERT(prevdq != newdq);
2246
2247 xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
2248 xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
2249
2250 /* the sparkling new dquot */
2251 xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
2252 xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
2253
2254 /*
2255 * Take an extra reference, because the inode
2256 * is going to keep this dquot pointer even
2257 * after the trans_commit.
2258 */
2259 xfs_dqlock(newdq);
2260 XFS_DQHOLD(newdq);
2261 xfs_dqunlock(newdq);
2262 *IO_olddq = newdq;
2263
2264 return prevdq;
2265}
2266
2267/*
2268 * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
2269 */
2270int
2271xfs_qm_vop_chown_reserve(
2272 xfs_trans_t *tp,
2273 xfs_inode_t *ip,
2274 xfs_dquot_t *udqp,
2275 xfs_dquot_t *gdqp,
2276 uint flags)
2277{
2278 xfs_mount_t *mp = ip->i_mount;
2279 uint delblks, blkflags, prjflags = 0;
2280 xfs_dquot_t *unresudq, *unresgdq, *delblksudq, *delblksgdq;
2281 int error;
2282
2283
2284 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2285 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
2286
2287 delblks = ip->i_delayed_blks;
2288 delblksudq = delblksgdq = unresudq = unresgdq = NULL;
2289 blkflags = XFS_IS_REALTIME_INODE(ip) ?
2290 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
2291
2292 if (XFS_IS_UQUOTA_ON(mp) && udqp &&
2293 ip->i_d.di_uid != (uid_t)be32_to_cpu(udqp->q_core.d_id)) {
2294 delblksudq = udqp;
2295 /*
2296 * If there are delayed allocation blocks, then we have to
2297 * unreserve those from the old dquot, and add them to the
2298 * new dquot.
2299 */
2300 if (delblks) {
2301 ASSERT(ip->i_udquot);
2302 unresudq = ip->i_udquot;
2303 }
2304 }
2305 if (XFS_IS_OQUOTA_ON(ip->i_mount) && gdqp) {
2306 if (XFS_IS_PQUOTA_ON(ip->i_mount) &&
2307 xfs_get_projid(ip) != be32_to_cpu(gdqp->q_core.d_id))
2308 prjflags = XFS_QMOPT_ENOSPC;
2309
2310 if (prjflags ||
2311 (XFS_IS_GQUOTA_ON(ip->i_mount) &&
2312 ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id))) {
2313 delblksgdq = gdqp;
2314 if (delblks) {
2315 ASSERT(ip->i_gdquot);
2316 unresgdq = ip->i_gdquot;
2317 }
2318 }
2319 }
2320
2321 if ((error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
2322 delblksudq, delblksgdq, ip->i_d.di_nblocks, 1,
2323 flags | blkflags | prjflags)))
2324 return (error);
2325
2326 /*
2327 * Do the delayed blks reservations/unreservations now. Since, these
2328 * are done without the help of a transaction, if a reservation fails
2329 * its previous reservations won't be automatically undone by trans
2330 * code. So, we have to do it manually here.
2331 */
2332 if (delblks) {
2333 /*
2334 * Do the reservations first. Unreservation can't fail.
2335 */
2336 ASSERT(delblksudq || delblksgdq);
2337 ASSERT(unresudq || unresgdq);
2338 if ((error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
2339 delblksudq, delblksgdq, (xfs_qcnt_t)delblks, 0,
2340 flags | blkflags | prjflags)))
2341 return (error);
2342 xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
2343 unresudq, unresgdq, -((xfs_qcnt_t)delblks), 0,
2344 blkflags);
2345 }
2346
2347 return (0);
2348}
2349
2350int
2351xfs_qm_vop_rename_dqattach(
2352 struct xfs_inode **i_tab)
2353{
2354 struct xfs_mount *mp = i_tab[0]->i_mount;
2355 int i;
2356
2357 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
2358 return 0;
2359
2360 for (i = 0; (i < 4 && i_tab[i]); i++) {
2361 struct xfs_inode *ip = i_tab[i];
2362 int error;
2363
2364 /*
2365 * Watch out for duplicate entries in the table.
2366 */
2367 if (i == 0 || ip != i_tab[i-1]) {
2368 if (XFS_NOT_DQATTACHED(mp, ip)) {
2369 error = xfs_qm_dqattach(ip, 0);
2370 if (error)
2371 return error;
2372 }
2373 }
2374 }
2375 return 0;
2376}
2377
2378void
2379xfs_qm_vop_create_dqattach(
2380 struct xfs_trans *tp,
2381 struct xfs_inode *ip,
2382 struct xfs_dquot *udqp,
2383 struct xfs_dquot *gdqp)
2384{
2385 struct xfs_mount *mp = tp->t_mountp;
2386
2387 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
2388 return;
2389
2390 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2391 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
2392
2393 if (udqp) {
2394 xfs_dqlock(udqp);
2395 XFS_DQHOLD(udqp);
2396 xfs_dqunlock(udqp);
2397 ASSERT(ip->i_udquot == NULL);
2398 ip->i_udquot = udqp;
2399 ASSERT(XFS_IS_UQUOTA_ON(mp));
2400 ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
2401 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
2402 }
2403 if (gdqp) {
2404 xfs_dqlock(gdqp);
2405 XFS_DQHOLD(gdqp);
2406 xfs_dqunlock(gdqp);
2407 ASSERT(ip->i_gdquot == NULL);
2408 ip->i_gdquot = gdqp;
2409 ASSERT(XFS_IS_OQUOTA_ON(mp));
2410 ASSERT((XFS_IS_GQUOTA_ON(mp) ?
2411 ip->i_d.di_gid : xfs_get_projid(ip)) ==
2412 be32_to_cpu(gdqp->q_core.d_id));
2413 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
2414 }
2415}
2416
diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h
deleted file mode 100644
index 43b9abe1052c..000000000000
--- a/fs/xfs/quota/xfs_qm.h
+++ /dev/null
@@ -1,166 +0,0 @@
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#ifndef __XFS_QM_H__
19#define __XFS_QM_H__
20
21#include "xfs_dquot_item.h"
22#include "xfs_dquot.h"
23#include "xfs_quota_priv.h"
24#include "xfs_qm_stats.h"
25
26struct xfs_qm;
27struct xfs_inode;
28
29extern uint ndquot;
30extern struct mutex xfs_Gqm_lock;
31extern struct xfs_qm *xfs_Gqm;
32extern kmem_zone_t *qm_dqzone;
33extern kmem_zone_t *qm_dqtrxzone;
34
35/*
36 * Used in xfs_qm_sync called by xfs_sync to count the max times that it can
37 * iterate over the mountpt's dquot list in one call.
38 */
39#define XFS_QM_SYNC_MAX_RESTARTS 7
40
41/*
42 * Ditto, for xfs_qm_dqreclaim_one.
43 */
44#define XFS_QM_RECLAIM_MAX_RESTARTS 4
45
46/*
47 * Ideal ratio of free to in use dquots. Quota manager makes an attempt
48 * to keep this balance.
49 */
50#define XFS_QM_DQFREE_RATIO 2
51
52/*
53 * Dquot hashtable constants/threshold values.
54 */
55#define XFS_QM_HASHSIZE_LOW (PAGE_SIZE / sizeof(xfs_dqhash_t))
56#define XFS_QM_HASHSIZE_HIGH ((PAGE_SIZE * 4) / sizeof(xfs_dqhash_t))
57
58/*
59 * This defines the unit of allocation of dquots.
60 * Currently, it is just one file system block, and a 4K blk contains 30
61 * (136 * 30 = 4080) dquots. It's probably not worth trying to make
62 * this more dynamic.
63 * XXXsup However, if this number is changed, we have to make sure that we don't
64 * implicitly assume that we do allocations in chunks of a single filesystem
65 * block in the dquot/xqm code.
66 */
67#define XFS_DQUOT_CLUSTER_SIZE_FSB (xfs_filblks_t)1
68
69typedef xfs_dqhash_t xfs_dqlist_t;
70
71/*
72 * Quota Manager (global) structure. Lives only in core.
73 */
74typedef struct xfs_qm {
75 xfs_dqlist_t *qm_usr_dqhtable;/* udquot hash table */
76 xfs_dqlist_t *qm_grp_dqhtable;/* gdquot hash table */
77 uint qm_dqhashmask; /* # buckets in dq hashtab - 1 */
78 struct list_head qm_dqfrlist; /* freelist of dquots */
79 struct mutex qm_dqfrlist_lock;
80 int qm_dqfrlist_cnt;
81 atomic_t qm_totaldquots; /* total incore dquots */
82 uint qm_nrefs; /* file systems with quota on */
83 int qm_dqfree_ratio;/* ratio of free to inuse dquots */
84 kmem_zone_t *qm_dqzone; /* dquot mem-alloc zone */
85 kmem_zone_t *qm_dqtrxzone; /* t_dqinfo of transactions */
86} xfs_qm_t;
87
88/*
89 * Various quota information for individual filesystems.
90 * The mount structure keeps a pointer to this.
91 */
92typedef struct xfs_quotainfo {
93 xfs_inode_t *qi_uquotaip; /* user quota inode */
94 xfs_inode_t *qi_gquotaip; /* group quota inode */
95 struct list_head qi_dqlist; /* all dquots in filesys */
96 struct mutex qi_dqlist_lock;
97 int qi_dquots;
98 int qi_dqreclaims; /* a change here indicates
99 a removal in the dqlist */
100 time_t qi_btimelimit; /* limit for blks timer */
101 time_t qi_itimelimit; /* limit for inodes timer */
102 time_t qi_rtbtimelimit;/* limit for rt blks timer */
103 xfs_qwarncnt_t qi_bwarnlimit; /* limit for blks warnings */
104 xfs_qwarncnt_t qi_iwarnlimit; /* limit for inodes warnings */
105 xfs_qwarncnt_t qi_rtbwarnlimit;/* limit for rt blks warnings */
106 struct mutex qi_quotaofflock;/* to serialize quotaoff */
107 xfs_filblks_t qi_dqchunklen; /* # BBs in a chunk of dqs */
108 uint qi_dqperchunk; /* # ondisk dqs in above chunk */
109 xfs_qcnt_t qi_bhardlimit; /* default data blk hard limit */
110 xfs_qcnt_t qi_bsoftlimit; /* default data blk soft limit */
111 xfs_qcnt_t qi_ihardlimit; /* default inode count hard limit */
112 xfs_qcnt_t qi_isoftlimit; /* default inode count soft limit */
113 xfs_qcnt_t qi_rtbhardlimit;/* default realtime blk hard limit */
114 xfs_qcnt_t qi_rtbsoftlimit;/* default realtime blk soft limit */
115} xfs_quotainfo_t;
116
117
118extern void xfs_trans_mod_dquot(xfs_trans_t *, xfs_dquot_t *, uint, long);
119extern int xfs_trans_reserve_quota_bydquots(xfs_trans_t *, xfs_mount_t *,
120 xfs_dquot_t *, xfs_dquot_t *, long, long, uint);
121extern void xfs_trans_dqjoin(xfs_trans_t *, xfs_dquot_t *);
122extern void xfs_trans_log_dquot(xfs_trans_t *, xfs_dquot_t *);
123
124/*
125 * We keep the usr and grp dquots separately so that locking will be easier
126 * to do at commit time. All transactions that we know of at this point
127 * affect no more than two dquots of one type. Hence, the TRANS_MAXDQS value.
128 */
129#define XFS_QM_TRANS_MAXDQS 2
130typedef struct xfs_dquot_acct {
131 xfs_dqtrx_t dqa_usrdquots[XFS_QM_TRANS_MAXDQS];
132 xfs_dqtrx_t dqa_grpdquots[XFS_QM_TRANS_MAXDQS];
133} xfs_dquot_acct_t;
134
135/*
136 * Users are allowed to have a usage exceeding their softlimit for
137 * a period this long.
138 */
139#define XFS_QM_BTIMELIMIT (7 * 24*60*60) /* 1 week */
140#define XFS_QM_RTBTIMELIMIT (7 * 24*60*60) /* 1 week */
141#define XFS_QM_ITIMELIMIT (7 * 24*60*60) /* 1 week */
142
143#define XFS_QM_BWARNLIMIT 5
144#define XFS_QM_IWARNLIMIT 5
145#define XFS_QM_RTBWARNLIMIT 5
146
147extern void xfs_qm_destroy_quotainfo(xfs_mount_t *);
148extern int xfs_qm_quotacheck(xfs_mount_t *);
149extern int xfs_qm_write_sb_changes(xfs_mount_t *, __int64_t);
150
151/* dquot stuff */
152extern boolean_t xfs_qm_dqalloc_incore(xfs_dquot_t **);
153extern int xfs_qm_dqpurge_all(xfs_mount_t *, uint);
154extern void xfs_qm_dqrele_all_inodes(xfs_mount_t *, uint);
155
156/* quota ops */
157extern int xfs_qm_scall_trunc_qfiles(xfs_mount_t *, uint);
158extern int xfs_qm_scall_getquota(xfs_mount_t *, xfs_dqid_t, uint,
159 fs_disk_quota_t *);
160extern int xfs_qm_scall_setqlim(xfs_mount_t *, xfs_dqid_t, uint,
161 fs_disk_quota_t *);
162extern int xfs_qm_scall_getqstat(xfs_mount_t *, fs_quota_stat_t *);
163extern int xfs_qm_scall_quotaon(xfs_mount_t *, uint);
164extern int xfs_qm_scall_quotaoff(xfs_mount_t *, uint);
165
166#endif /* __XFS_QM_H__ */
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c
deleted file mode 100644
index a0a829addca9..000000000000
--- a/fs/xfs/quota/xfs_qm_bhv.c
+++ /dev/null
@@ -1,176 +0,0 @@
1/*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_bit.h"
21#include "xfs_log.h"
22#include "xfs_inum.h"
23#include "xfs_trans.h"
24#include "xfs_sb.h"
25#include "xfs_ag.h"
26#include "xfs_alloc.h"
27#include "xfs_quota.h"
28#include "xfs_mount.h"
29#include "xfs_bmap_btree.h"
30#include "xfs_inode.h"
31#include "xfs_itable.h"
32#include "xfs_bmap.h"
33#include "xfs_rtalloc.h"
34#include "xfs_error.h"
35#include "xfs_attr.h"
36#include "xfs_buf_item.h"
37#include "xfs_qm.h"
38
39
40STATIC void
41xfs_fill_statvfs_from_dquot(
42 struct kstatfs *statp,
43 xfs_disk_dquot_t *dp)
44{
45 __uint64_t limit;
46
47 limit = dp->d_blk_softlimit ?
48 be64_to_cpu(dp->d_blk_softlimit) :
49 be64_to_cpu(dp->d_blk_hardlimit);
50 if (limit && statp->f_blocks > limit) {
51 statp->f_blocks = limit;
52 statp->f_bfree = statp->f_bavail =
53 (statp->f_blocks > be64_to_cpu(dp->d_bcount)) ?
54 (statp->f_blocks - be64_to_cpu(dp->d_bcount)) : 0;
55 }
56
57 limit = dp->d_ino_softlimit ?
58 be64_to_cpu(dp->d_ino_softlimit) :
59 be64_to_cpu(dp->d_ino_hardlimit);
60 if (limit && statp->f_files > limit) {
61 statp->f_files = limit;
62 statp->f_ffree =
63 (statp->f_files > be64_to_cpu(dp->d_icount)) ?
64 (statp->f_ffree - be64_to_cpu(dp->d_icount)) : 0;
65 }
66}
67
68
69/*
70 * Directory tree accounting is implemented using project quotas, where
71 * the project identifier is inherited from parent directories.
72 * A statvfs (df, etc.) of a directory that is using project quota should
73 * return a statvfs of the project, not the entire filesystem.
74 * This makes such trees appear as if they are filesystems in themselves.
75 */
76void
77xfs_qm_statvfs(
78 xfs_inode_t *ip,
79 struct kstatfs *statp)
80{
81 xfs_mount_t *mp = ip->i_mount;
82 xfs_dquot_t *dqp;
83
84 if (!xfs_qm_dqget(mp, NULL, xfs_get_projid(ip), XFS_DQ_PROJ, 0, &dqp)) {
85 xfs_fill_statvfs_from_dquot(statp, &dqp->q_core);
86 xfs_qm_dqput(dqp);
87 }
88}
89
90int
91xfs_qm_newmount(
92 xfs_mount_t *mp,
93 uint *needquotamount,
94 uint *quotaflags)
95{
96 uint quotaondisk;
97 uint uquotaondisk = 0, gquotaondisk = 0, pquotaondisk = 0;
98
99 quotaondisk = xfs_sb_version_hasquota(&mp->m_sb) &&
100 (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT);
101
102 if (quotaondisk) {
103 uquotaondisk = mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT;
104 pquotaondisk = mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT;
105 gquotaondisk = mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT;
106 }
107
108 /*
109 * If the device itself is read-only, we can't allow
110 * the user to change the state of quota on the mount -
111 * this would generate a transaction on the ro device,
112 * which would lead to an I/O error and shutdown
113 */
114
115 if (((uquotaondisk && !XFS_IS_UQUOTA_ON(mp)) ||
116 (!uquotaondisk && XFS_IS_UQUOTA_ON(mp)) ||
117 (pquotaondisk && !XFS_IS_PQUOTA_ON(mp)) ||
118 (!pquotaondisk && XFS_IS_PQUOTA_ON(mp)) ||
119 (gquotaondisk && !XFS_IS_GQUOTA_ON(mp)) ||
120 (!gquotaondisk && XFS_IS_OQUOTA_ON(mp))) &&
121 xfs_dev_is_read_only(mp, "changing quota state")) {
122 xfs_warn(mp, "please mount with%s%s%s%s.",
123 (!quotaondisk ? "out quota" : ""),
124 (uquotaondisk ? " usrquota" : ""),
125 (pquotaondisk ? " prjquota" : ""),
126 (gquotaondisk ? " grpquota" : ""));
127 return XFS_ERROR(EPERM);
128 }
129
130 if (XFS_IS_QUOTA_ON(mp) || quotaondisk) {
131 /*
132 * Call mount_quotas at this point only if we won't have to do
133 * a quotacheck.
134 */
135 if (quotaondisk && !XFS_QM_NEED_QUOTACHECK(mp)) {
136 /*
137 * If an error occurred, qm_mount_quotas code
138 * has already disabled quotas. So, just finish
139 * mounting, and get on with the boring life
140 * without disk quotas.
141 */
142 xfs_qm_mount_quotas(mp);
143 } else {
144 /*
145 * Clear the quota flags, but remember them. This
146 * is so that the quota code doesn't get invoked
147 * before we're ready. This can happen when an
148 * inode goes inactive and wants to free blocks,
149 * or via xfs_log_mount_finish.
150 */
151 *needquotamount = B_TRUE;
152 *quotaflags = mp->m_qflags;
153 mp->m_qflags = 0;
154 }
155 }
156
157 return 0;
158}
159
160void __init
161xfs_qm_init(void)
162{
163 printk(KERN_INFO "SGI XFS Quota Management subsystem\n");
164 mutex_init(&xfs_Gqm_lock);
165 xfs_qm_init_procfs();
166}
167
168void __exit
169xfs_qm_exit(void)
170{
171 xfs_qm_cleanup_procfs();
172 if (qm_dqzone)
173 kmem_zone_destroy(qm_dqzone);
174 if (qm_dqtrxzone)
175 kmem_zone_destroy(qm_dqtrxzone);
176}
diff --git a/fs/xfs/quota/xfs_qm_stats.c b/fs/xfs/quota/xfs_qm_stats.c
deleted file mode 100644
index 8671a0b32644..000000000000
--- a/fs/xfs/quota/xfs_qm_stats.c
+++ /dev/null
@@ -1,105 +0,0 @@
1/*
2 * Copyright (c) 2000-2003 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_bit.h"
21#include "xfs_log.h"
22#include "xfs_inum.h"
23#include "xfs_trans.h"
24#include "xfs_sb.h"
25#include "xfs_ag.h"
26#include "xfs_alloc.h"
27#include "xfs_quota.h"
28#include "xfs_mount.h"
29#include "xfs_bmap_btree.h"
30#include "xfs_inode.h"
31#include "xfs_itable.h"
32#include "xfs_bmap.h"
33#include "xfs_rtalloc.h"
34#include "xfs_error.h"
35#include "xfs_attr.h"
36#include "xfs_buf_item.h"
37#include "xfs_qm.h"
38
39struct xqmstats xqmstats;
40
41static int xqm_proc_show(struct seq_file *m, void *v)
42{
43 /* maximum; incore; ratio free to inuse; freelist */
44 seq_printf(m, "%d\t%d\t%d\t%u\n",
45 ndquot,
46 xfs_Gqm? atomic_read(&xfs_Gqm->qm_totaldquots) : 0,
47 xfs_Gqm? xfs_Gqm->qm_dqfree_ratio : 0,
48 xfs_Gqm? xfs_Gqm->qm_dqfrlist_cnt : 0);
49 return 0;
50}
51
52static int xqm_proc_open(struct inode *inode, struct file *file)
53{
54 return single_open(file, xqm_proc_show, NULL);
55}
56
57static const struct file_operations xqm_proc_fops = {
58 .owner = THIS_MODULE,
59 .open = xqm_proc_open,
60 .read = seq_read,
61 .llseek = seq_lseek,
62 .release = single_release,
63};
64
65static int xqmstat_proc_show(struct seq_file *m, void *v)
66{
67 /* quota performance statistics */
68 seq_printf(m, "qm %u %u %u %u %u %u %u %u\n",
69 xqmstats.xs_qm_dqreclaims,
70 xqmstats.xs_qm_dqreclaim_misses,
71 xqmstats.xs_qm_dquot_dups,
72 xqmstats.xs_qm_dqcachemisses,
73 xqmstats.xs_qm_dqcachehits,
74 xqmstats.xs_qm_dqwants,
75 xqmstats.xs_qm_dqshake_reclaims,
76 xqmstats.xs_qm_dqinact_reclaims);
77 return 0;
78}
79
80static int xqmstat_proc_open(struct inode *inode, struct file *file)
81{
82 return single_open(file, xqmstat_proc_show, NULL);
83}
84
85static const struct file_operations xqmstat_proc_fops = {
86 .owner = THIS_MODULE,
87 .open = xqmstat_proc_open,
88 .read = seq_read,
89 .llseek = seq_lseek,
90 .release = single_release,
91};
92
93void
94xfs_qm_init_procfs(void)
95{
96 proc_create("fs/xfs/xqmstat", 0, NULL, &xqmstat_proc_fops);
97 proc_create("fs/xfs/xqm", 0, NULL, &xqm_proc_fops);
98}
99
100void
101xfs_qm_cleanup_procfs(void)
102{
103 remove_proc_entry("fs/xfs/xqm", NULL);
104 remove_proc_entry("fs/xfs/xqmstat", NULL);
105}
diff --git a/fs/xfs/quota/xfs_qm_stats.h b/fs/xfs/quota/xfs_qm_stats.h
deleted file mode 100644
index 5b964fc0dc09..000000000000
--- a/fs/xfs/quota/xfs_qm_stats.h
+++ /dev/null
@@ -1,53 +0,0 @@
1/*
2 * Copyright (c) 2002 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#ifndef __XFS_QM_STATS_H__
19#define __XFS_QM_STATS_H__
20
21#if defined(CONFIG_PROC_FS) && !defined(XFS_STATS_OFF)
22
23/*
24 * XQM global statistics
25 */
26struct xqmstats {
27 __uint32_t xs_qm_dqreclaims;
28 __uint32_t xs_qm_dqreclaim_misses;
29 __uint32_t xs_qm_dquot_dups;
30 __uint32_t xs_qm_dqcachemisses;
31 __uint32_t xs_qm_dqcachehits;
32 __uint32_t xs_qm_dqwants;
33 __uint32_t xs_qm_dqshake_reclaims;
34 __uint32_t xs_qm_dqinact_reclaims;
35};
36
37extern struct xqmstats xqmstats;
38
39# define XQM_STATS_INC(count) ( (count)++ )
40
41extern void xfs_qm_init_procfs(void);
42extern void xfs_qm_cleanup_procfs(void);
43
44#else
45
46# define XQM_STATS_INC(count) do { } while (0)
47
48static inline void xfs_qm_init_procfs(void) { };
49static inline void xfs_qm_cleanup_procfs(void) { };
50
51#endif
52
53#endif /* __XFS_QM_STATS_H__ */
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
deleted file mode 100644
index 609246f42e6c..000000000000
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ /dev/null
@@ -1,906 +0,0 @@
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19#include <linux/capability.h>
20
21#include "xfs.h"
22#include "xfs_fs.h"
23#include "xfs_bit.h"
24#include "xfs_log.h"
25#include "xfs_inum.h"
26#include "xfs_trans.h"
27#include "xfs_sb.h"
28#include "xfs_ag.h"
29#include "xfs_alloc.h"
30#include "xfs_quota.h"
31#include "xfs_mount.h"
32#include "xfs_bmap_btree.h"
33#include "xfs_inode.h"
34#include "xfs_itable.h"
35#include "xfs_bmap.h"
36#include "xfs_rtalloc.h"
37#include "xfs_error.h"
38#include "xfs_attr.h"
39#include "xfs_buf_item.h"
40#include "xfs_utils.h"
41#include "xfs_qm.h"
42#include "xfs_trace.h"
43
44STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
45STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
46 uint);
47STATIC uint xfs_qm_export_flags(uint);
48STATIC uint xfs_qm_export_qtype_flags(uint);
49STATIC void xfs_qm_export_dquot(xfs_mount_t *, xfs_disk_dquot_t *,
50 fs_disk_quota_t *);
51
52
53/*
54 * Turn off quota accounting and/or enforcement for all udquots and/or
55 * gdquots. Called only at unmount time.
56 *
57 * This assumes that there are no dquots of this file system cached
58 * incore, and modifies the ondisk dquot directly. Therefore, for example,
59 * it is an error to call this twice, without purging the cache.
60 */
61int
62xfs_qm_scall_quotaoff(
63 xfs_mount_t *mp,
64 uint flags)
65{
66 struct xfs_quotainfo *q = mp->m_quotainfo;
67 uint dqtype;
68 int error;
69 uint inactivate_flags;
70 xfs_qoff_logitem_t *qoffstart;
71 int nculprits;
72
73 /*
74 * No file system can have quotas enabled on disk but not in core.
75 * Note that quota utilities (like quotaoff) _expect_
76 * errno == EEXIST here.
77 */
78 if ((mp->m_qflags & flags) == 0)
79 return XFS_ERROR(EEXIST);
80 error = 0;
81
82 flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
83
84 /*
85 * We don't want to deal with two quotaoffs messing up each other,
86 * so we're going to serialize it. quotaoff isn't exactly a performance
87 * critical thing.
88 * If quotaoff, then we must be dealing with the root filesystem.
89 */
90 ASSERT(q);
91 mutex_lock(&q->qi_quotaofflock);
92
93 /*
94 * If we're just turning off quota enforcement, change mp and go.
95 */
96 if ((flags & XFS_ALL_QUOTA_ACCT) == 0) {
97 mp->m_qflags &= ~(flags);
98
99 spin_lock(&mp->m_sb_lock);
100 mp->m_sb.sb_qflags = mp->m_qflags;
101 spin_unlock(&mp->m_sb_lock);
102 mutex_unlock(&q->qi_quotaofflock);
103
104 /* XXX what to do if error ? Revert back to old vals incore ? */
105 error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS);
106 return (error);
107 }
108
109 dqtype = 0;
110 inactivate_flags = 0;
111 /*
112 * If accounting is off, we must turn enforcement off, clear the
113 * quota 'CHKD' certificate to make it known that we have to
114 * do a quotacheck the next time this quota is turned on.
115 */
116 if (flags & XFS_UQUOTA_ACCT) {
117 dqtype |= XFS_QMOPT_UQUOTA;
118 flags |= (XFS_UQUOTA_CHKD | XFS_UQUOTA_ENFD);
119 inactivate_flags |= XFS_UQUOTA_ACTIVE;
120 }
121 if (flags & XFS_GQUOTA_ACCT) {
122 dqtype |= XFS_QMOPT_GQUOTA;
123 flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD);
124 inactivate_flags |= XFS_GQUOTA_ACTIVE;
125 } else if (flags & XFS_PQUOTA_ACCT) {
126 dqtype |= XFS_QMOPT_PQUOTA;
127 flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD);
128 inactivate_flags |= XFS_PQUOTA_ACTIVE;
129 }
130
131 /*
132 * Nothing to do? Don't complain. This happens when we're just
133 * turning off quota enforcement.
134 */
135 if ((mp->m_qflags & flags) == 0)
136 goto out_unlock;
137
138 /*
139 * Write the LI_QUOTAOFF log record, and do SB changes atomically,
140 * and synchronously. If we fail to write, we should abort the
141 * operation as it cannot be recovered safely if we crash.
142 */
143 error = xfs_qm_log_quotaoff(mp, &qoffstart, flags);
144 if (error)
145 goto out_unlock;
146
147 /*
148 * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct
149 * to take care of the race between dqget and quotaoff. We don't take
150 * any special locks to reset these bits. All processes need to check
151 * these bits *after* taking inode lock(s) to see if the particular
152 * quota type is in the process of being turned off. If *ACTIVE, it is
153 * guaranteed that all dquot structures and all quotainode ptrs will all
154 * stay valid as long as that inode is kept locked.
155 *
156 * There is no turning back after this.
157 */
158 mp->m_qflags &= ~inactivate_flags;
159
160 /*
161 * Give back all the dquot reference(s) held by inodes.
162 * Here we go thru every single incore inode in this file system, and
163 * do a dqrele on the i_udquot/i_gdquot that it may have.
164 * Essentially, as long as somebody has an inode locked, this guarantees
165 * that quotas will not be turned off. This is handy because in a
166 * transaction once we lock the inode(s) and check for quotaon, we can
167 * depend on the quota inodes (and other things) being valid as long as
168 * we keep the lock(s).
169 */
170 xfs_qm_dqrele_all_inodes(mp, flags);
171
172 /*
173 * Next we make the changes in the quota flag in the mount struct.
174 * This isn't protected by a particular lock directly, because we
175 * don't want to take a mrlock every time we depend on quotas being on.
176 */
177 mp->m_qflags &= ~(flags);
178
179 /*
180 * Go through all the dquots of this file system and purge them,
181 * according to what was turned off. We may not be able to get rid
182 * of all dquots, because dquots can have temporary references that
183 * are not attached to inodes. eg. xfs_setattr, xfs_create.
184 * So, if we couldn't purge all the dquots from the filesystem,
185 * we can't get rid of the incore data structures.
186 */
187 while ((nculprits = xfs_qm_dqpurge_all(mp, dqtype)))
188 delay(10 * nculprits);
189
190 /*
191 * Transactions that had started before ACTIVE state bit was cleared
192 * could have logged many dquots, so they'd have higher LSNs than
193 * the first QUOTAOFF log record does. If we happen to crash when
194 * the tail of the log has gone past the QUOTAOFF record, but
195 * before the last dquot modification, those dquots __will__
196 * recover, and that's not good.
197 *
198 * So, we have QUOTAOFF start and end logitems; the start
199 * logitem won't get overwritten until the end logitem appears...
200 */
201 error = xfs_qm_log_quotaoff_end(mp, qoffstart, flags);
202 if (error) {
203 /* We're screwed now. Shutdown is the only option. */
204 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
205 goto out_unlock;
206 }
207
208 /*
209 * If quotas is completely disabled, close shop.
210 */
211 if (((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET1) ||
212 ((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET2)) {
213 mutex_unlock(&q->qi_quotaofflock);
214 xfs_qm_destroy_quotainfo(mp);
215 return (0);
216 }
217
218 /*
219 * Release our quotainode references if we don't need them anymore.
220 */
221 if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) {
222 IRELE(q->qi_uquotaip);
223 q->qi_uquotaip = NULL;
224 }
225 if ((dqtype & (XFS_QMOPT_GQUOTA|XFS_QMOPT_PQUOTA)) && q->qi_gquotaip) {
226 IRELE(q->qi_gquotaip);
227 q->qi_gquotaip = NULL;
228 }
229
230out_unlock:
231 mutex_unlock(&q->qi_quotaofflock);
232 return error;
233}
234
235STATIC int
236xfs_qm_scall_trunc_qfile(
237 struct xfs_mount *mp,
238 xfs_ino_t ino)
239{
240 struct xfs_inode *ip;
241 struct xfs_trans *tp;
242 int error;
243
244 if (ino == NULLFSINO)
245 return 0;
246
247 error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
248 if (error)
249 return error;
250
251 xfs_ilock(ip, XFS_IOLOCK_EXCL);
252
253 tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE);
254 error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
255 XFS_TRANS_PERM_LOG_RES,
256 XFS_ITRUNCATE_LOG_COUNT);
257 if (error) {
258 xfs_trans_cancel(tp, 0);
259 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
260 goto out_put;
261 }
262
263 xfs_ilock(ip, XFS_ILOCK_EXCL);
264 xfs_trans_ijoin(tp, ip);
265
266 error = xfs_itruncate_data(&tp, ip, 0);
267 if (error) {
268 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
269 XFS_TRANS_ABORT);
270 goto out_unlock;
271 }
272
273 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
274 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
275
276out_unlock:
277 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
278out_put:
279 IRELE(ip);
280 return error;
281}
282
283int
284xfs_qm_scall_trunc_qfiles(
285 xfs_mount_t *mp,
286 uint flags)
287{
288 int error = 0, error2 = 0;
289
290 if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) {
291 xfs_debug(mp, "%s: flags=%x m_qflags=%x\n",
292 __func__, flags, mp->m_qflags);
293 return XFS_ERROR(EINVAL);
294 }
295
296 if (flags & XFS_DQ_USER)
297 error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino);
298 if (flags & (XFS_DQ_GROUP|XFS_DQ_PROJ))
299 error2 = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino);
300
301 return error ? error : error2;
302}
303
304/*
305 * Switch on (a given) quota enforcement for a filesystem. This takes
306 * effect immediately.
307 * (Switching on quota accounting must be done at mount time.)
308 */
309int
310xfs_qm_scall_quotaon(
311 xfs_mount_t *mp,
312 uint flags)
313{
314 int error;
315 uint qf;
316 __int64_t sbflags;
317
318 flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
319 /*
320 * Switching on quota accounting must be done at mount time.
321 */
322 flags &= ~(XFS_ALL_QUOTA_ACCT);
323
324 sbflags = 0;
325
326 if (flags == 0) {
327 xfs_debug(mp, "%s: zero flags, m_qflags=%x\n",
328 __func__, mp->m_qflags);
329 return XFS_ERROR(EINVAL);
330 }
331
332 /* No fs can turn on quotas with a delayed effect */
333 ASSERT((flags & XFS_ALL_QUOTA_ACCT) == 0);
334
335 /*
336 * Can't enforce without accounting. We check the superblock
337 * qflags here instead of m_qflags because rootfs can have
338 * quota acct on ondisk without m_qflags' knowing.
339 */
340 if (((flags & XFS_UQUOTA_ACCT) == 0 &&
341 (mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
342 (flags & XFS_UQUOTA_ENFD))
343 ||
344 ((flags & XFS_PQUOTA_ACCT) == 0 &&
345 (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
346 (flags & XFS_GQUOTA_ACCT) == 0 &&
347 (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
348 (flags & XFS_OQUOTA_ENFD))) {
349 xfs_debug(mp,
350 "%s: Can't enforce without acct, flags=%x sbflags=%x\n",
351 __func__, flags, mp->m_sb.sb_qflags);
352 return XFS_ERROR(EINVAL);
353 }
354 /*
355 * If everything's up to-date incore, then don't waste time.
356 */
357 if ((mp->m_qflags & flags) == flags)
358 return XFS_ERROR(EEXIST);
359
360 /*
361 * Change sb_qflags on disk but not incore mp->qflags
362 * if this is the root filesystem.
363 */
364 spin_lock(&mp->m_sb_lock);
365 qf = mp->m_sb.sb_qflags;
366 mp->m_sb.sb_qflags = qf | flags;
367 spin_unlock(&mp->m_sb_lock);
368
369 /*
370 * There's nothing to change if it's the same.
371 */
372 if ((qf & flags) == flags && sbflags == 0)
373 return XFS_ERROR(EEXIST);
374 sbflags |= XFS_SB_QFLAGS;
375
376 if ((error = xfs_qm_write_sb_changes(mp, sbflags)))
377 return (error);
378 /*
379 * If we aren't trying to switch on quota enforcement, we are done.
380 */
381 if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) !=
382 (mp->m_qflags & XFS_UQUOTA_ACCT)) ||
383 ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) !=
384 (mp->m_qflags & XFS_PQUOTA_ACCT)) ||
385 ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) !=
386 (mp->m_qflags & XFS_GQUOTA_ACCT)) ||
387 (flags & XFS_ALL_QUOTA_ENFD) == 0)
388 return (0);
389
390 if (! XFS_IS_QUOTA_RUNNING(mp))
391 return XFS_ERROR(ESRCH);
392
393 /*
394 * Switch on quota enforcement in core.
395 */
396 mutex_lock(&mp->m_quotainfo->qi_quotaofflock);
397 mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD);
398 mutex_unlock(&mp->m_quotainfo->qi_quotaofflock);
399
400 return (0);
401}
402
403
404/*
405 * Return quota status information, such as uquota-off, enforcements, etc.
406 */
407int
408xfs_qm_scall_getqstat(
409 struct xfs_mount *mp,
410 struct fs_quota_stat *out)
411{
412 struct xfs_quotainfo *q = mp->m_quotainfo;
413 struct xfs_inode *uip, *gip;
414 boolean_t tempuqip, tempgqip;
415
416 uip = gip = NULL;
417 tempuqip = tempgqip = B_FALSE;
418 memset(out, 0, sizeof(fs_quota_stat_t));
419
420 out->qs_version = FS_QSTAT_VERSION;
421 if (!xfs_sb_version_hasquota(&mp->m_sb)) {
422 out->qs_uquota.qfs_ino = NULLFSINO;
423 out->qs_gquota.qfs_ino = NULLFSINO;
424 return (0);
425 }
426 out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags &
427 (XFS_ALL_QUOTA_ACCT|
428 XFS_ALL_QUOTA_ENFD));
429 out->qs_pad = 0;
430 out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino;
431 out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino;
432
433 if (q) {
434 uip = q->qi_uquotaip;
435 gip = q->qi_gquotaip;
436 }
437 if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
438 if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
439 0, 0, &uip) == 0)
440 tempuqip = B_TRUE;
441 }
442 if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) {
443 if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
444 0, 0, &gip) == 0)
445 tempgqip = B_TRUE;
446 }
447 if (uip) {
448 out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks;
449 out->qs_uquota.qfs_nextents = uip->i_d.di_nextents;
450 if (tempuqip)
451 IRELE(uip);
452 }
453 if (gip) {
454 out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks;
455 out->qs_gquota.qfs_nextents = gip->i_d.di_nextents;
456 if (tempgqip)
457 IRELE(gip);
458 }
459 if (q) {
460 out->qs_incoredqs = q->qi_dquots;
461 out->qs_btimelimit = q->qi_btimelimit;
462 out->qs_itimelimit = q->qi_itimelimit;
463 out->qs_rtbtimelimit = q->qi_rtbtimelimit;
464 out->qs_bwarnlimit = q->qi_bwarnlimit;
465 out->qs_iwarnlimit = q->qi_iwarnlimit;
466 }
467 return 0;
468}
469
470#define XFS_DQ_MASK \
471 (FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK)
472
473/*
474 * Adjust quota limits, and start/stop timers accordingly.
475 */
476int
477xfs_qm_scall_setqlim(
478 xfs_mount_t *mp,
479 xfs_dqid_t id,
480 uint type,
481 fs_disk_quota_t *newlim)
482{
483 struct xfs_quotainfo *q = mp->m_quotainfo;
484 xfs_disk_dquot_t *ddq;
485 xfs_dquot_t *dqp;
486 xfs_trans_t *tp;
487 int error;
488 xfs_qcnt_t hard, soft;
489
490 if (newlim->d_fieldmask & ~XFS_DQ_MASK)
491 return EINVAL;
492 if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
493 return 0;
494
495 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
496 if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128,
497 0, 0, XFS_DEFAULT_LOG_COUNT))) {
498 xfs_trans_cancel(tp, 0);
499 return (error);
500 }
501
502 /*
503 * We don't want to race with a quotaoff so take the quotaoff lock.
504 * (We don't hold an inode lock, so there's nothing else to stop
505 * a quotaoff from happening). (XXXThis doesn't currently happen
506 * because we take the vfslock before calling xfs_qm_sysent).
507 */
508 mutex_lock(&q->qi_quotaofflock);
509
510 /*
511 * Get the dquot (locked), and join it to the transaction.
512 * Allocate the dquot if this doesn't exist.
513 */
514 if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) {
515 xfs_trans_cancel(tp, XFS_TRANS_ABORT);
516 ASSERT(error != ENOENT);
517 goto out_unlock;
518 }
519 xfs_trans_dqjoin(tp, dqp);
520 ddq = &dqp->q_core;
521
522 /*
523 * Make sure that hardlimits are >= soft limits before changing.
524 */
525 hard = (newlim->d_fieldmask & FS_DQ_BHARD) ?
526 (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) :
527 be64_to_cpu(ddq->d_blk_hardlimit);
528 soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ?
529 (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) :
530 be64_to_cpu(ddq->d_blk_softlimit);
531 if (hard == 0 || hard >= soft) {
532 ddq->d_blk_hardlimit = cpu_to_be64(hard);
533 ddq->d_blk_softlimit = cpu_to_be64(soft);
534 if (id == 0) {
535 q->qi_bhardlimit = hard;
536 q->qi_bsoftlimit = soft;
537 }
538 } else {
539 xfs_debug(mp, "blkhard %Ld < blksoft %Ld\n", hard, soft);
540 }
541 hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ?
542 (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) :
543 be64_to_cpu(ddq->d_rtb_hardlimit);
544 soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ?
545 (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) :
546 be64_to_cpu(ddq->d_rtb_softlimit);
547 if (hard == 0 || hard >= soft) {
548 ddq->d_rtb_hardlimit = cpu_to_be64(hard);
549 ddq->d_rtb_softlimit = cpu_to_be64(soft);
550 if (id == 0) {
551 q->qi_rtbhardlimit = hard;
552 q->qi_rtbsoftlimit = soft;
553 }
554 } else {
555 xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld\n", hard, soft);
556 }
557
558 hard = (newlim->d_fieldmask & FS_DQ_IHARD) ?
559 (xfs_qcnt_t) newlim->d_ino_hardlimit :
560 be64_to_cpu(ddq->d_ino_hardlimit);
561 soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ?
562 (xfs_qcnt_t) newlim->d_ino_softlimit :
563 be64_to_cpu(ddq->d_ino_softlimit);
564 if (hard == 0 || hard >= soft) {
565 ddq->d_ino_hardlimit = cpu_to_be64(hard);
566 ddq->d_ino_softlimit = cpu_to_be64(soft);
567 if (id == 0) {
568 q->qi_ihardlimit = hard;
569 q->qi_isoftlimit = soft;
570 }
571 } else {
572 xfs_debug(mp, "ihard %Ld < isoft %Ld\n", hard, soft);
573 }
574
575 /*
576 * Update warnings counter(s) if requested
577 */
578 if (newlim->d_fieldmask & FS_DQ_BWARNS)
579 ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns);
580 if (newlim->d_fieldmask & FS_DQ_IWARNS)
581 ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns);
582 if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
583 ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns);
584
585 if (id == 0) {
586 /*
587 * Timelimits for the super user set the relative time
588 * the other users can be over quota for this file system.
589 * If it is zero a default is used. Ditto for the default
590 * soft and hard limit values (already done, above), and
591 * for warnings.
592 */
593 if (newlim->d_fieldmask & FS_DQ_BTIMER) {
594 q->qi_btimelimit = newlim->d_btimer;
595 ddq->d_btimer = cpu_to_be32(newlim->d_btimer);
596 }
597 if (newlim->d_fieldmask & FS_DQ_ITIMER) {
598 q->qi_itimelimit = newlim->d_itimer;
599 ddq->d_itimer = cpu_to_be32(newlim->d_itimer);
600 }
601 if (newlim->d_fieldmask & FS_DQ_RTBTIMER) {
602 q->qi_rtbtimelimit = newlim->d_rtbtimer;
603 ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer);
604 }
605 if (newlim->d_fieldmask & FS_DQ_BWARNS)
606 q->qi_bwarnlimit = newlim->d_bwarns;
607 if (newlim->d_fieldmask & FS_DQ_IWARNS)
608 q->qi_iwarnlimit = newlim->d_iwarns;
609 if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
610 q->qi_rtbwarnlimit = newlim->d_rtbwarns;
611 } else {
612 /*
613 * If the user is now over quota, start the timelimit.
614 * The user will not be 'warned'.
615 * Note that we keep the timers ticking, whether enforcement
616 * is on or off. We don't really want to bother with iterating
617 * over all ondisk dquots and turning the timers on/off.
618 */
619 xfs_qm_adjust_dqtimers(mp, ddq);
620 }
621 dqp->dq_flags |= XFS_DQ_DIRTY;
622 xfs_trans_log_dquot(tp, dqp);
623
624 error = xfs_trans_commit(tp, 0);
625 xfs_qm_dqrele(dqp);
626
627 out_unlock:
628 mutex_unlock(&q->qi_quotaofflock);
629 return error;
630}
631
632int
633xfs_qm_scall_getquota(
634 xfs_mount_t *mp,
635 xfs_dqid_t id,
636 uint type,
637 fs_disk_quota_t *out)
638{
639 xfs_dquot_t *dqp;
640 int error;
641
642 /*
643 * Try to get the dquot. We don't want it allocated on disk, so
644 * we aren't passing the XFS_QMOPT_DOALLOC flag. If it doesn't
645 * exist, we'll get ENOENT back.
646 */
647 if ((error = xfs_qm_dqget(mp, NULL, id, type, 0, &dqp))) {
648 return (error);
649 }
650
651 /*
652 * If everything's NULL, this dquot doesn't quite exist as far as
653 * our utility programs are concerned.
654 */
655 if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
656 xfs_qm_dqput(dqp);
657 return XFS_ERROR(ENOENT);
658 }
659 /*
660 * Convert the disk dquot to the exportable format
661 */
662 xfs_qm_export_dquot(mp, &dqp->q_core, out);
663 xfs_qm_dqput(dqp);
664 return (error ? XFS_ERROR(EFAULT) : 0);
665}
666
667
668STATIC int
669xfs_qm_log_quotaoff_end(
670 xfs_mount_t *mp,
671 xfs_qoff_logitem_t *startqoff,
672 uint flags)
673{
674 xfs_trans_t *tp;
675 int error;
676 xfs_qoff_logitem_t *qoffi;
677
678 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF_END);
679
680 if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_qoff_logitem_t) * 2,
681 0, 0, XFS_DEFAULT_LOG_COUNT))) {
682 xfs_trans_cancel(tp, 0);
683 return (error);
684 }
685
686 qoffi = xfs_trans_get_qoff_item(tp, startqoff,
687 flags & XFS_ALL_QUOTA_ACCT);
688 xfs_trans_log_quotaoff_item(tp, qoffi);
689
690 /*
691 * We have to make sure that the transaction is secure on disk before we
692 * return and actually stop quota accounting. So, make it synchronous.
693 * We don't care about quotoff's performance.
694 */
695 xfs_trans_set_sync(tp);
696 error = xfs_trans_commit(tp, 0);
697 return (error);
698}
699
700
701STATIC int
702xfs_qm_log_quotaoff(
703 xfs_mount_t *mp,
704 xfs_qoff_logitem_t **qoffstartp,
705 uint flags)
706{
707 xfs_trans_t *tp;
708 int error;
709 xfs_qoff_logitem_t *qoffi=NULL;
710 uint oldsbqflag=0;
711
712 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF);
713 if ((error = xfs_trans_reserve(tp, 0,
714 sizeof(xfs_qoff_logitem_t) * 2 +
715 mp->m_sb.sb_sectsize + 128,
716 0,
717 0,
718 XFS_DEFAULT_LOG_COUNT))) {
719 goto error0;
720 }
721
722 qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
723 xfs_trans_log_quotaoff_item(tp, qoffi);
724
725 spin_lock(&mp->m_sb_lock);
726 oldsbqflag = mp->m_sb.sb_qflags;
727 mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
728 spin_unlock(&mp->m_sb_lock);
729
730 xfs_mod_sb(tp, XFS_SB_QFLAGS);
731
732 /*
733 * We have to make sure that the transaction is secure on disk before we
734 * return and actually stop quota accounting. So, make it synchronous.
735 * We don't care about quotoff's performance.
736 */
737 xfs_trans_set_sync(tp);
738 error = xfs_trans_commit(tp, 0);
739
740error0:
741 if (error) {
742 xfs_trans_cancel(tp, 0);
743 /*
744 * No one else is modifying sb_qflags, so this is OK.
745 * We still hold the quotaofflock.
746 */
747 spin_lock(&mp->m_sb_lock);
748 mp->m_sb.sb_qflags = oldsbqflag;
749 spin_unlock(&mp->m_sb_lock);
750 }
751 *qoffstartp = qoffi;
752 return (error);
753}
754
755
756/*
757 * Translate an internal style on-disk-dquot to the exportable format.
758 * The main differences are that the counters/limits are all in Basic
759 * Blocks (BBs) instead of the internal FSBs, and all on-disk data has
760 * to be converted to the native endianness.
761 */
762STATIC void
763xfs_qm_export_dquot(
764 xfs_mount_t *mp,
765 xfs_disk_dquot_t *src,
766 struct fs_disk_quota *dst)
767{
768 memset(dst, 0, sizeof(*dst));
769 dst->d_version = FS_DQUOT_VERSION; /* different from src->d_version */
770 dst->d_flags = xfs_qm_export_qtype_flags(src->d_flags);
771 dst->d_id = be32_to_cpu(src->d_id);
772 dst->d_blk_hardlimit =
773 XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_blk_hardlimit));
774 dst->d_blk_softlimit =
775 XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_blk_softlimit));
776 dst->d_ino_hardlimit = be64_to_cpu(src->d_ino_hardlimit);
777 dst->d_ino_softlimit = be64_to_cpu(src->d_ino_softlimit);
778 dst->d_bcount = XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_bcount));
779 dst->d_icount = be64_to_cpu(src->d_icount);
780 dst->d_btimer = be32_to_cpu(src->d_btimer);
781 dst->d_itimer = be32_to_cpu(src->d_itimer);
782 dst->d_iwarns = be16_to_cpu(src->d_iwarns);
783 dst->d_bwarns = be16_to_cpu(src->d_bwarns);
784 dst->d_rtb_hardlimit =
785 XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtb_hardlimit));
786 dst->d_rtb_softlimit =
787 XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtb_softlimit));
788 dst->d_rtbcount = XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtbcount));
789 dst->d_rtbtimer = be32_to_cpu(src->d_rtbtimer);
790 dst->d_rtbwarns = be16_to_cpu(src->d_rtbwarns);
791
792 /*
793 * Internally, we don't reset all the timers when quota enforcement
794 * gets turned off. No need to confuse the user level code,
795 * so return zeroes in that case.
796 */
797 if ((!XFS_IS_UQUOTA_ENFORCED(mp) && src->d_flags == XFS_DQ_USER) ||
798 (!XFS_IS_OQUOTA_ENFORCED(mp) &&
799 (src->d_flags & (XFS_DQ_PROJ | XFS_DQ_GROUP)))) {
800 dst->d_btimer = 0;
801 dst->d_itimer = 0;
802 dst->d_rtbtimer = 0;
803 }
804
805#ifdef DEBUG
806 if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) ||
807 (XFS_IS_OQUOTA_ENFORCED(mp) &&
808 (dst->d_flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)))) &&
809 dst->d_id != 0) {
810 if (((int) dst->d_bcount >= (int) dst->d_blk_softlimit) &&
811 (dst->d_blk_softlimit > 0)) {
812 ASSERT(dst->d_btimer != 0);
813 }
814 if (((int) dst->d_icount >= (int) dst->d_ino_softlimit) &&
815 (dst->d_ino_softlimit > 0)) {
816 ASSERT(dst->d_itimer != 0);
817 }
818 }
819#endif
820}
821
822STATIC uint
823xfs_qm_export_qtype_flags(
824 uint flags)
825{
826 /*
827 * Can't be more than one, or none.
828 */
829 ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) !=
830 (FS_PROJ_QUOTA | FS_USER_QUOTA));
831 ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) !=
832 (FS_PROJ_QUOTA | FS_GROUP_QUOTA));
833 ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) !=
834 (FS_USER_QUOTA | FS_GROUP_QUOTA));
835 ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0);
836
837 return (flags & XFS_DQ_USER) ?
838 FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ?
839 FS_PROJ_QUOTA : FS_GROUP_QUOTA;
840}
841
842STATIC uint
843xfs_qm_export_flags(
844 uint flags)
845{
846 uint uflags;
847
848 uflags = 0;
849 if (flags & XFS_UQUOTA_ACCT)
850 uflags |= FS_QUOTA_UDQ_ACCT;
851 if (flags & XFS_PQUOTA_ACCT)
852 uflags |= FS_QUOTA_PDQ_ACCT;
853 if (flags & XFS_GQUOTA_ACCT)
854 uflags |= FS_QUOTA_GDQ_ACCT;
855 if (flags & XFS_UQUOTA_ENFD)
856 uflags |= FS_QUOTA_UDQ_ENFD;
857 if (flags & (XFS_OQUOTA_ENFD)) {
858 uflags |= (flags & XFS_GQUOTA_ACCT) ?
859 FS_QUOTA_GDQ_ENFD : FS_QUOTA_PDQ_ENFD;
860 }
861 return (uflags);
862}
863
864
865STATIC int
866xfs_dqrele_inode(
867 struct xfs_inode *ip,
868 struct xfs_perag *pag,
869 int flags)
870{
871 /* skip quota inodes */
872 if (ip == ip->i_mount->m_quotainfo->qi_uquotaip ||
873 ip == ip->i_mount->m_quotainfo->qi_gquotaip) {
874 ASSERT(ip->i_udquot == NULL);
875 ASSERT(ip->i_gdquot == NULL);
876 return 0;
877 }
878
879 xfs_ilock(ip, XFS_ILOCK_EXCL);
880 if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
881 xfs_qm_dqrele(ip->i_udquot);
882 ip->i_udquot = NULL;
883 }
884 if (flags & (XFS_PQUOTA_ACCT|XFS_GQUOTA_ACCT) && ip->i_gdquot) {
885 xfs_qm_dqrele(ip->i_gdquot);
886 ip->i_gdquot = NULL;
887 }
888 xfs_iunlock(ip, XFS_ILOCK_EXCL);
889 return 0;
890}
891
892
893/*
894 * Go thru all the inodes in the file system, releasing their dquots.
895 *
896 * Note that the mount structure gets modified to indicate that quotas are off
897 * AFTER this, in the case of quotaoff.
898 */
899void
900xfs_qm_dqrele_all_inodes(
901 struct xfs_mount *mp,
902 uint flags)
903{
904 ASSERT(mp->m_quotainfo);
905 xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags);
906}
diff --git a/fs/xfs/quota/xfs_quota_priv.h b/fs/xfs/quota/xfs_quota_priv.h
deleted file mode 100644
index 94a3d927d716..000000000000
--- a/fs/xfs/quota/xfs_quota_priv.h
+++ /dev/null
@@ -1,53 +0,0 @@
1/*
2 * Copyright (c) 2000-2003 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#ifndef __XFS_QUOTA_PRIV_H__
19#define __XFS_QUOTA_PRIV_H__
20
21/*
22 * Number of bmaps that we ask from bmapi when doing a quotacheck.
23 * We make this restriction to keep the memory usage to a minimum.
24 */
25#define XFS_DQITER_MAP_SIZE 10
26
27/*
28 * Hash into a bucket in the dquot hash table, based on <mp, id>.
29 */
30#define XFS_DQ_HASHVAL(mp, id) (((__psunsigned_t)(mp) + \
31 (__psunsigned_t)(id)) & \
32 (xfs_Gqm->qm_dqhashmask - 1))
33#define XFS_DQ_HASH(mp, id, type) (type == XFS_DQ_USER ? \
34 (xfs_Gqm->qm_usr_dqhtable + \
35 XFS_DQ_HASHVAL(mp, id)) : \
36 (xfs_Gqm->qm_grp_dqhtable + \
37 XFS_DQ_HASHVAL(mp, id)))
38#define XFS_IS_DQUOT_UNINITIALIZED(dqp) ( \
39 !dqp->q_core.d_blk_hardlimit && \
40 !dqp->q_core.d_blk_softlimit && \
41 !dqp->q_core.d_rtb_hardlimit && \
42 !dqp->q_core.d_rtb_softlimit && \
43 !dqp->q_core.d_ino_hardlimit && \
44 !dqp->q_core.d_ino_softlimit && \
45 !dqp->q_core.d_bcount && \
46 !dqp->q_core.d_rtbcount && \
47 !dqp->q_core.d_icount)
48
49#define DQFLAGTO_TYPESTR(d) (((d)->dq_flags & XFS_DQ_USER) ? "USR" : \
50 (((d)->dq_flags & XFS_DQ_GROUP) ? "GRP" : \
51 (((d)->dq_flags & XFS_DQ_PROJ) ? "PRJ":"???")))
52
53#endif /* __XFS_QUOTA_PRIV_H__ */
diff --git a/fs/xfs/quota/xfs_trans_dquot.c b/fs/xfs/quota/xfs_trans_dquot.c
deleted file mode 100644
index 4d00ee67792d..000000000000
--- a/fs/xfs/quota/xfs_trans_dquot.c
+++ /dev/null
@@ -1,890 +0,0 @@
1/*
2 * Copyright (c) 2000-2002 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_bit.h"
21#include "xfs_log.h"
22#include "xfs_inum.h"
23#include "xfs_trans.h"
24#include "xfs_sb.h"
25#include "xfs_ag.h"
26#include "xfs_alloc.h"
27#include "xfs_quota.h"
28#include "xfs_mount.h"
29#include "xfs_bmap_btree.h"
30#include "xfs_inode.h"
31#include "xfs_itable.h"
32#include "xfs_bmap.h"
33#include "xfs_rtalloc.h"
34#include "xfs_error.h"
35#include "xfs_attr.h"
36#include "xfs_buf_item.h"
37#include "xfs_trans_priv.h"
38#include "xfs_qm.h"
39
40STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *);
41
42/*
43 * Add the locked dquot to the transaction.
44 * The dquot must be locked, and it cannot be associated with any
45 * transaction.
46 */
47void
48xfs_trans_dqjoin(
49 xfs_trans_t *tp,
50 xfs_dquot_t *dqp)
51{
52 ASSERT(dqp->q_transp != tp);
53 ASSERT(XFS_DQ_IS_LOCKED(dqp));
54 ASSERT(dqp->q_logitem.qli_dquot == dqp);
55
56 /*
57 * Get a log_item_desc to point at the new item.
58 */
59 xfs_trans_add_item(tp, &dqp->q_logitem.qli_item);
60
61 /*
62 * Initialize d_transp so we can later determine if this dquot is
63 * associated with this transaction.
64 */
65 dqp->q_transp = tp;
66}
67
68
69/*
70 * This is called to mark the dquot as needing
71 * to be logged when the transaction is committed. The dquot must
72 * already be associated with the given transaction.
73 * Note that it marks the entire transaction as dirty. In the ordinary
74 * case, this gets called via xfs_trans_commit, after the transaction
75 * is already dirty. However, there's nothing stop this from getting
76 * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY
77 * flag.
78 */
79void
80xfs_trans_log_dquot(
81 xfs_trans_t *tp,
82 xfs_dquot_t *dqp)
83{
84 ASSERT(dqp->q_transp == tp);
85 ASSERT(XFS_DQ_IS_LOCKED(dqp));
86
87 tp->t_flags |= XFS_TRANS_DIRTY;
88 dqp->q_logitem.qli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
89}
90
91/*
92 * Carry forward whatever is left of the quota blk reservation to
93 * the spanky new transaction
94 */
95void
96xfs_trans_dup_dqinfo(
97 xfs_trans_t *otp,
98 xfs_trans_t *ntp)
99{
100 xfs_dqtrx_t *oq, *nq;
101 int i,j;
102 xfs_dqtrx_t *oqa, *nqa;
103
104 if (!otp->t_dqinfo)
105 return;
106
107 xfs_trans_alloc_dqinfo(ntp);
108 oqa = otp->t_dqinfo->dqa_usrdquots;
109 nqa = ntp->t_dqinfo->dqa_usrdquots;
110
111 /*
112 * Because the quota blk reservation is carried forward,
113 * it is also necessary to carry forward the DQ_DIRTY flag.
114 */
115 if(otp->t_flags & XFS_TRANS_DQ_DIRTY)
116 ntp->t_flags |= XFS_TRANS_DQ_DIRTY;
117
118 for (j = 0; j < 2; j++) {
119 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
120 if (oqa[i].qt_dquot == NULL)
121 break;
122 oq = &oqa[i];
123 nq = &nqa[i];
124
125 nq->qt_dquot = oq->qt_dquot;
126 nq->qt_bcount_delta = nq->qt_icount_delta = 0;
127 nq->qt_rtbcount_delta = 0;
128
129 /*
130 * Transfer whatever is left of the reservations.
131 */
132 nq->qt_blk_res = oq->qt_blk_res - oq->qt_blk_res_used;
133 oq->qt_blk_res = oq->qt_blk_res_used;
134
135 nq->qt_rtblk_res = oq->qt_rtblk_res -
136 oq->qt_rtblk_res_used;
137 oq->qt_rtblk_res = oq->qt_rtblk_res_used;
138
139 nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used;
140 oq->qt_ino_res = oq->qt_ino_res_used;
141
142 }
143 oqa = otp->t_dqinfo->dqa_grpdquots;
144 nqa = ntp->t_dqinfo->dqa_grpdquots;
145 }
146}
147
148/*
149 * Wrap around mod_dquot to account for both user and group quotas.
150 */
151void
152xfs_trans_mod_dquot_byino(
153 xfs_trans_t *tp,
154 xfs_inode_t *ip,
155 uint field,
156 long delta)
157{
158 xfs_mount_t *mp = tp->t_mountp;
159
160 if (!XFS_IS_QUOTA_RUNNING(mp) ||
161 !XFS_IS_QUOTA_ON(mp) ||
162 ip->i_ino == mp->m_sb.sb_uquotino ||
163 ip->i_ino == mp->m_sb.sb_gquotino)
164 return;
165
166 if (tp->t_dqinfo == NULL)
167 xfs_trans_alloc_dqinfo(tp);
168
169 if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot)
170 (void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta);
171 if (XFS_IS_OQUOTA_ON(mp) && ip->i_gdquot)
172 (void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta);
173}
174
175STATIC xfs_dqtrx_t *
176xfs_trans_get_dqtrx(
177 xfs_trans_t *tp,
178 xfs_dquot_t *dqp)
179{
180 int i;
181 xfs_dqtrx_t *qa;
182
183 qa = XFS_QM_ISUDQ(dqp) ?
184 tp->t_dqinfo->dqa_usrdquots : tp->t_dqinfo->dqa_grpdquots;
185
186 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
187 if (qa[i].qt_dquot == NULL ||
188 qa[i].qt_dquot == dqp)
189 return &qa[i];
190 }
191
192 return NULL;
193}
194
195/*
196 * Make the changes in the transaction structure.
197 * The moral equivalent to xfs_trans_mod_sb().
198 * We don't touch any fields in the dquot, so we don't care
199 * if it's locked or not (most of the time it won't be).
200 */
201void
202xfs_trans_mod_dquot(
203 xfs_trans_t *tp,
204 xfs_dquot_t *dqp,
205 uint field,
206 long delta)
207{
208 xfs_dqtrx_t *qtrx;
209
210 ASSERT(tp);
211 ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp));
212 qtrx = NULL;
213
214 if (tp->t_dqinfo == NULL)
215 xfs_trans_alloc_dqinfo(tp);
216 /*
217 * Find either the first free slot or the slot that belongs
218 * to this dquot.
219 */
220 qtrx = xfs_trans_get_dqtrx(tp, dqp);
221 ASSERT(qtrx);
222 if (qtrx->qt_dquot == NULL)
223 qtrx->qt_dquot = dqp;
224
225 switch (field) {
226
227 /*
228 * regular disk blk reservation
229 */
230 case XFS_TRANS_DQ_RES_BLKS:
231 qtrx->qt_blk_res += (ulong)delta;
232 break;
233
234 /*
235 * inode reservation
236 */
237 case XFS_TRANS_DQ_RES_INOS:
238 qtrx->qt_ino_res += (ulong)delta;
239 break;
240
241 /*
242 * disk blocks used.
243 */
244 case XFS_TRANS_DQ_BCOUNT:
245 if (qtrx->qt_blk_res && delta > 0) {
246 qtrx->qt_blk_res_used += (ulong)delta;
247 ASSERT(qtrx->qt_blk_res >= qtrx->qt_blk_res_used);
248 }
249 qtrx->qt_bcount_delta += delta;
250 break;
251
252 case XFS_TRANS_DQ_DELBCOUNT:
253 qtrx->qt_delbcnt_delta += delta;
254 break;
255
256 /*
257 * Inode Count
258 */
259 case XFS_TRANS_DQ_ICOUNT:
260 if (qtrx->qt_ino_res && delta > 0) {
261 qtrx->qt_ino_res_used += (ulong)delta;
262 ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
263 }
264 qtrx->qt_icount_delta += delta;
265 break;
266
267 /*
268 * rtblk reservation
269 */
270 case XFS_TRANS_DQ_RES_RTBLKS:
271 qtrx->qt_rtblk_res += (ulong)delta;
272 break;
273
274 /*
275 * rtblk count
276 */
277 case XFS_TRANS_DQ_RTBCOUNT:
278 if (qtrx->qt_rtblk_res && delta > 0) {
279 qtrx->qt_rtblk_res_used += (ulong)delta;
280 ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used);
281 }
282 qtrx->qt_rtbcount_delta += delta;
283 break;
284
285 case XFS_TRANS_DQ_DELRTBCOUNT:
286 qtrx->qt_delrtb_delta += delta;
287 break;
288
289 default:
290 ASSERT(0);
291 }
292 tp->t_flags |= XFS_TRANS_DQ_DIRTY;
293}
294
295
296/*
297 * Given an array of dqtrx structures, lock all the dquots associated
298 * and join them to the transaction, provided they have been modified.
299 * We know that the highest number of dquots (of one type - usr OR grp),
300 * involved in a transaction is 2 and that both usr and grp combined - 3.
301 * So, we don't attempt to make this very generic.
302 */
303STATIC void
304xfs_trans_dqlockedjoin(
305 xfs_trans_t *tp,
306 xfs_dqtrx_t *q)
307{
308 ASSERT(q[0].qt_dquot != NULL);
309 if (q[1].qt_dquot == NULL) {
310 xfs_dqlock(q[0].qt_dquot);
311 xfs_trans_dqjoin(tp, q[0].qt_dquot);
312 } else {
313 ASSERT(XFS_QM_TRANS_MAXDQS == 2);
314 xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot);
315 xfs_trans_dqjoin(tp, q[0].qt_dquot);
316 xfs_trans_dqjoin(tp, q[1].qt_dquot);
317 }
318}
319
320
321/*
322 * Called by xfs_trans_commit() and similar in spirit to
323 * xfs_trans_apply_sb_deltas().
324 * Go thru all the dquots belonging to this transaction and modify the
325 * INCORE dquot to reflect the actual usages.
326 * Unreserve just the reservations done by this transaction.
327 * dquot is still left locked at exit.
328 */
329void
330xfs_trans_apply_dquot_deltas(
331 xfs_trans_t *tp)
332{
333 int i, j;
334 xfs_dquot_t *dqp;
335 xfs_dqtrx_t *qtrx, *qa;
336 xfs_disk_dquot_t *d;
337 long totalbdelta;
338 long totalrtbdelta;
339
340 if (!(tp->t_flags & XFS_TRANS_DQ_DIRTY))
341 return;
342
343 ASSERT(tp->t_dqinfo);
344 qa = tp->t_dqinfo->dqa_usrdquots;
345 for (j = 0; j < 2; j++) {
346 if (qa[0].qt_dquot == NULL) {
347 qa = tp->t_dqinfo->dqa_grpdquots;
348 continue;
349 }
350
351 /*
352 * Lock all of the dquots and join them to the transaction.
353 */
354 xfs_trans_dqlockedjoin(tp, qa);
355
356 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
357 qtrx = &qa[i];
358 /*
359 * The array of dquots is filled
360 * sequentially, not sparsely.
361 */
362 if ((dqp = qtrx->qt_dquot) == NULL)
363 break;
364
365 ASSERT(XFS_DQ_IS_LOCKED(dqp));
366 ASSERT(dqp->q_transp == tp);
367
368 /*
369 * adjust the actual number of blocks used
370 */
371 d = &dqp->q_core;
372
373 /*
374 * The issue here is - sometimes we don't make a blkquota
375 * reservation intentionally to be fair to users
376 * (when the amount is small). On the other hand,
377 * delayed allocs do make reservations, but that's
378 * outside of a transaction, so we have no
379 * idea how much was really reserved.
380 * So, here we've accumulated delayed allocation blks and
381 * non-delay blks. The assumption is that the
382 * delayed ones are always reserved (outside of a
383 * transaction), and the others may or may not have
384 * quota reservations.
385 */
386 totalbdelta = qtrx->qt_bcount_delta +
387 qtrx->qt_delbcnt_delta;
388 totalrtbdelta = qtrx->qt_rtbcount_delta +
389 qtrx->qt_delrtb_delta;
390#ifdef DEBUG
391 if (totalbdelta < 0)
392 ASSERT(be64_to_cpu(d->d_bcount) >=
393 -totalbdelta);
394
395 if (totalrtbdelta < 0)
396 ASSERT(be64_to_cpu(d->d_rtbcount) >=
397 -totalrtbdelta);
398
399 if (qtrx->qt_icount_delta < 0)
400 ASSERT(be64_to_cpu(d->d_icount) >=
401 -qtrx->qt_icount_delta);
402#endif
403 if (totalbdelta)
404 be64_add_cpu(&d->d_bcount, (xfs_qcnt_t)totalbdelta);
405
406 if (qtrx->qt_icount_delta)
407 be64_add_cpu(&d->d_icount, (xfs_qcnt_t)qtrx->qt_icount_delta);
408
409 if (totalrtbdelta)
410 be64_add_cpu(&d->d_rtbcount, (xfs_qcnt_t)totalrtbdelta);
411
412 /*
413 * Get any default limits in use.
414 * Start/reset the timer(s) if needed.
415 */
416 if (d->d_id) {
417 xfs_qm_adjust_dqlimits(tp->t_mountp, d);
418 xfs_qm_adjust_dqtimers(tp->t_mountp, d);
419 }
420
421 dqp->dq_flags |= XFS_DQ_DIRTY;
422 /*
423 * add this to the list of items to get logged
424 */
425 xfs_trans_log_dquot(tp, dqp);
426 /*
427 * Take off what's left of the original reservation.
428 * In case of delayed allocations, there's no
429 * reservation that a transaction structure knows of.
430 */
431 if (qtrx->qt_blk_res != 0) {
432 if (qtrx->qt_blk_res != qtrx->qt_blk_res_used) {
433 if (qtrx->qt_blk_res >
434 qtrx->qt_blk_res_used)
435 dqp->q_res_bcount -= (xfs_qcnt_t)
436 (qtrx->qt_blk_res -
437 qtrx->qt_blk_res_used);
438 else
439 dqp->q_res_bcount -= (xfs_qcnt_t)
440 (qtrx->qt_blk_res_used -
441 qtrx->qt_blk_res);
442 }
443 } else {
444 /*
445 * These blks were never reserved, either inside
446 * a transaction or outside one (in a delayed
447 * allocation). Also, this isn't always a
448 * negative number since we sometimes
449 * deliberately skip quota reservations.
450 */
451 if (qtrx->qt_bcount_delta) {
452 dqp->q_res_bcount +=
453 (xfs_qcnt_t)qtrx->qt_bcount_delta;
454 }
455 }
456 /*
457 * Adjust the RT reservation.
458 */
459 if (qtrx->qt_rtblk_res != 0) {
460 if (qtrx->qt_rtblk_res != qtrx->qt_rtblk_res_used) {
461 if (qtrx->qt_rtblk_res >
462 qtrx->qt_rtblk_res_used)
463 dqp->q_res_rtbcount -= (xfs_qcnt_t)
464 (qtrx->qt_rtblk_res -
465 qtrx->qt_rtblk_res_used);
466 else
467 dqp->q_res_rtbcount -= (xfs_qcnt_t)
468 (qtrx->qt_rtblk_res_used -
469 qtrx->qt_rtblk_res);
470 }
471 } else {
472 if (qtrx->qt_rtbcount_delta)
473 dqp->q_res_rtbcount +=
474 (xfs_qcnt_t)qtrx->qt_rtbcount_delta;
475 }
476
477 /*
478 * Adjust the inode reservation.
479 */
480 if (qtrx->qt_ino_res != 0) {
481 ASSERT(qtrx->qt_ino_res >=
482 qtrx->qt_ino_res_used);
483 if (qtrx->qt_ino_res > qtrx->qt_ino_res_used)
484 dqp->q_res_icount -= (xfs_qcnt_t)
485 (qtrx->qt_ino_res -
486 qtrx->qt_ino_res_used);
487 } else {
488 if (qtrx->qt_icount_delta)
489 dqp->q_res_icount +=
490 (xfs_qcnt_t)qtrx->qt_icount_delta;
491 }
492
493 ASSERT(dqp->q_res_bcount >=
494 be64_to_cpu(dqp->q_core.d_bcount));
495 ASSERT(dqp->q_res_icount >=
496 be64_to_cpu(dqp->q_core.d_icount));
497 ASSERT(dqp->q_res_rtbcount >=
498 be64_to_cpu(dqp->q_core.d_rtbcount));
499 }
500 /*
501 * Do the group quotas next
502 */
503 qa = tp->t_dqinfo->dqa_grpdquots;
504 }
505}
506
507/*
508 * Release the reservations, and adjust the dquots accordingly.
509 * This is called only when the transaction is being aborted. If by
510 * any chance we have done dquot modifications incore (ie. deltas) already,
511 * we simply throw those away, since that's the expected behavior
512 * when a transaction is curtailed without a commit.
513 */
514void
515xfs_trans_unreserve_and_mod_dquots(
516 xfs_trans_t *tp)
517{
518 int i, j;
519 xfs_dquot_t *dqp;
520 xfs_dqtrx_t *qtrx, *qa;
521 boolean_t locked;
522
523 if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY))
524 return;
525
526 qa = tp->t_dqinfo->dqa_usrdquots;
527
528 for (j = 0; j < 2; j++) {
529 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
530 qtrx = &qa[i];
531 /*
532 * We assume that the array of dquots is filled
533 * sequentially, not sparsely.
534 */
535 if ((dqp = qtrx->qt_dquot) == NULL)
536 break;
537 /*
538 * Unreserve the original reservation. We don't care
539 * about the number of blocks used field, or deltas.
540 * Also we don't bother to zero the fields.
541 */
542 locked = B_FALSE;
543 if (qtrx->qt_blk_res) {
544 xfs_dqlock(dqp);
545 locked = B_TRUE;
546 dqp->q_res_bcount -=
547 (xfs_qcnt_t)qtrx->qt_blk_res;
548 }
549 if (qtrx->qt_ino_res) {
550 if (!locked) {
551 xfs_dqlock(dqp);
552 locked = B_TRUE;
553 }
554 dqp->q_res_icount -=
555 (xfs_qcnt_t)qtrx->qt_ino_res;
556 }
557
558 if (qtrx->qt_rtblk_res) {
559 if (!locked) {
560 xfs_dqlock(dqp);
561 locked = B_TRUE;
562 }
563 dqp->q_res_rtbcount -=
564 (xfs_qcnt_t)qtrx->qt_rtblk_res;
565 }
566 if (locked)
567 xfs_dqunlock(dqp);
568
569 }
570 qa = tp->t_dqinfo->dqa_grpdquots;
571 }
572}
573
574STATIC void
575xfs_quota_warn(
576 struct xfs_mount *mp,
577 struct xfs_dquot *dqp,
578 int type)
579{
580 /* no warnings for project quotas - we just return ENOSPC later */
581 if (dqp->dq_flags & XFS_DQ_PROJ)
582 return;
583 quota_send_warning((dqp->dq_flags & XFS_DQ_USER) ? USRQUOTA : GRPQUOTA,
584 be32_to_cpu(dqp->q_core.d_id), mp->m_super->s_dev,
585 type);
586}
587
588/*
589 * This reserves disk blocks and inodes against a dquot.
590 * Flags indicate if the dquot is to be locked here and also
591 * if the blk reservation is for RT or regular blocks.
592 * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check.
593 */
594STATIC int
595xfs_trans_dqresv(
596 xfs_trans_t *tp,
597 xfs_mount_t *mp,
598 xfs_dquot_t *dqp,
599 long nblks,
600 long ninos,
601 uint flags)
602{
603 xfs_qcnt_t hardlimit;
604 xfs_qcnt_t softlimit;
605 time_t timer;
606 xfs_qwarncnt_t warns;
607 xfs_qwarncnt_t warnlimit;
608 xfs_qcnt_t count;
609 xfs_qcnt_t *resbcountp;
610 xfs_quotainfo_t *q = mp->m_quotainfo;
611
612
613 xfs_dqlock(dqp);
614
615 if (flags & XFS_TRANS_DQ_RES_BLKS) {
616 hardlimit = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
617 if (!hardlimit)
618 hardlimit = q->qi_bhardlimit;
619 softlimit = be64_to_cpu(dqp->q_core.d_blk_softlimit);
620 if (!softlimit)
621 softlimit = q->qi_bsoftlimit;
622 timer = be32_to_cpu(dqp->q_core.d_btimer);
623 warns = be16_to_cpu(dqp->q_core.d_bwarns);
624 warnlimit = dqp->q_mount->m_quotainfo->qi_bwarnlimit;
625 resbcountp = &dqp->q_res_bcount;
626 } else {
627 ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS);
628 hardlimit = be64_to_cpu(dqp->q_core.d_rtb_hardlimit);
629 if (!hardlimit)
630 hardlimit = q->qi_rtbhardlimit;
631 softlimit = be64_to_cpu(dqp->q_core.d_rtb_softlimit);
632 if (!softlimit)
633 softlimit = q->qi_rtbsoftlimit;
634 timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
635 warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
636 warnlimit = dqp->q_mount->m_quotainfo->qi_rtbwarnlimit;
637 resbcountp = &dqp->q_res_rtbcount;
638 }
639
640 if ((flags & XFS_QMOPT_FORCE_RES) == 0 &&
641 dqp->q_core.d_id &&
642 ((XFS_IS_UQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISUDQ(dqp)) ||
643 (XFS_IS_OQUOTA_ENFORCED(dqp->q_mount) &&
644 (XFS_QM_ISPDQ(dqp) || XFS_QM_ISGDQ(dqp))))) {
645 if (nblks > 0) {
646 /*
647 * dquot is locked already. See if we'd go over the
648 * hardlimit or exceed the timelimit if we allocate
649 * nblks.
650 */
651 if (hardlimit > 0ULL &&
652 hardlimit <= nblks + *resbcountp) {
653 xfs_quota_warn(mp, dqp, QUOTA_NL_BHARDWARN);
654 goto error_return;
655 }
656 if (softlimit > 0ULL &&
657 softlimit <= nblks + *resbcountp) {
658 if ((timer != 0 && get_seconds() > timer) ||
659 (warns != 0 && warns >= warnlimit)) {
660 xfs_quota_warn(mp, dqp,
661 QUOTA_NL_BSOFTLONGWARN);
662 goto error_return;
663 }
664
665 xfs_quota_warn(mp, dqp, QUOTA_NL_BSOFTWARN);
666 }
667 }
668 if (ninos > 0) {
669 count = be64_to_cpu(dqp->q_core.d_icount);
670 timer = be32_to_cpu(dqp->q_core.d_itimer);
671 warns = be16_to_cpu(dqp->q_core.d_iwarns);
672 warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit;
673 hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
674 if (!hardlimit)
675 hardlimit = q->qi_ihardlimit;
676 softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
677 if (!softlimit)
678 softlimit = q->qi_isoftlimit;
679
680 if (hardlimit > 0ULL && count >= hardlimit) {
681 xfs_quota_warn(mp, dqp, QUOTA_NL_IHARDWARN);
682 goto error_return;
683 }
684 if (softlimit > 0ULL && count >= softlimit) {
685 if ((timer != 0 && get_seconds() > timer) ||
686 (warns != 0 && warns >= warnlimit)) {
687 xfs_quota_warn(mp, dqp,
688 QUOTA_NL_ISOFTLONGWARN);
689 goto error_return;
690 }
691 xfs_quota_warn(mp, dqp, QUOTA_NL_ISOFTWARN);
692 }
693 }
694 }
695
696 /*
697 * Change the reservation, but not the actual usage.
698 * Note that q_res_bcount = q_core.d_bcount + resv
699 */
700 (*resbcountp) += (xfs_qcnt_t)nblks;
701 if (ninos != 0)
702 dqp->q_res_icount += (xfs_qcnt_t)ninos;
703
704 /*
705 * note the reservation amt in the trans struct too,
706 * so that the transaction knows how much was reserved by
707 * it against this particular dquot.
708 * We don't do this when we are reserving for a delayed allocation,
709 * because we don't have the luxury of a transaction envelope then.
710 */
711 if (tp) {
712 ASSERT(tp->t_dqinfo);
713 ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
714 if (nblks != 0)
715 xfs_trans_mod_dquot(tp, dqp,
716 flags & XFS_QMOPT_RESBLK_MASK,
717 nblks);
718 if (ninos != 0)
719 xfs_trans_mod_dquot(tp, dqp,
720 XFS_TRANS_DQ_RES_INOS,
721 ninos);
722 }
723 ASSERT(dqp->q_res_bcount >= be64_to_cpu(dqp->q_core.d_bcount));
724 ASSERT(dqp->q_res_rtbcount >= be64_to_cpu(dqp->q_core.d_rtbcount));
725 ASSERT(dqp->q_res_icount >= be64_to_cpu(dqp->q_core.d_icount));
726
727 xfs_dqunlock(dqp);
728 return 0;
729
730error_return:
731 xfs_dqunlock(dqp);
732 if (flags & XFS_QMOPT_ENOSPC)
733 return ENOSPC;
734 return EDQUOT;
735}
736
737
738/*
739 * Given dquot(s), make disk block and/or inode reservations against them.
740 * The fact that this does the reservation against both the usr and
741 * grp/prj quotas is important, because this follows a both-or-nothing
742 * approach.
743 *
744 * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown.
745 * XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT. Used by pquota.
746 * XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks
747 * XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks
748 * dquots are unlocked on return, if they were not locked by caller.
749 */
750int
751xfs_trans_reserve_quota_bydquots(
752 xfs_trans_t *tp,
753 xfs_mount_t *mp,
754 xfs_dquot_t *udqp,
755 xfs_dquot_t *gdqp,
756 long nblks,
757 long ninos,
758 uint flags)
759{
760 int resvd = 0, error;
761
762 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
763 return 0;
764
765 if (tp && tp->t_dqinfo == NULL)
766 xfs_trans_alloc_dqinfo(tp);
767
768 ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
769
770 if (udqp) {
771 error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos,
772 (flags & ~XFS_QMOPT_ENOSPC));
773 if (error)
774 return error;
775 resvd = 1;
776 }
777
778 if (gdqp) {
779 error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags);
780 if (error) {
781 /*
782 * can't do it, so backout previous reservation
783 */
784 if (resvd) {
785 flags |= XFS_QMOPT_FORCE_RES;
786 xfs_trans_dqresv(tp, mp, udqp,
787 -nblks, -ninos, flags);
788 }
789 return error;
790 }
791 }
792
793 /*
794 * Didn't change anything critical, so, no need to log
795 */
796 return 0;
797}
798
799
800/*
801 * Lock the dquot and change the reservation if we can.
802 * This doesn't change the actual usage, just the reservation.
803 * The inode sent in is locked.
804 */
805int
806xfs_trans_reserve_quota_nblks(
807 struct xfs_trans *tp,
808 struct xfs_inode *ip,
809 long nblks,
810 long ninos,
811 uint flags)
812{
813 struct xfs_mount *mp = ip->i_mount;
814
815 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
816 return 0;
817 if (XFS_IS_PQUOTA_ON(mp))
818 flags |= XFS_QMOPT_ENOSPC;
819
820 ASSERT(ip->i_ino != mp->m_sb.sb_uquotino);
821 ASSERT(ip->i_ino != mp->m_sb.sb_gquotino);
822
823 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
824 ASSERT((flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) ==
825 XFS_TRANS_DQ_RES_RTBLKS ||
826 (flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) ==
827 XFS_TRANS_DQ_RES_BLKS);
828
829 /*
830 * Reserve nblks against these dquots, with trans as the mediator.
831 */
832 return xfs_trans_reserve_quota_bydquots(tp, mp,
833 ip->i_udquot, ip->i_gdquot,
834 nblks, ninos, flags);
835}
836
837/*
838 * This routine is called to allocate a quotaoff log item.
839 */
840xfs_qoff_logitem_t *
841xfs_trans_get_qoff_item(
842 xfs_trans_t *tp,
843 xfs_qoff_logitem_t *startqoff,
844 uint flags)
845{
846 xfs_qoff_logitem_t *q;
847
848 ASSERT(tp != NULL);
849
850 q = xfs_qm_qoff_logitem_init(tp->t_mountp, startqoff, flags);
851 ASSERT(q != NULL);
852
853 /*
854 * Get a log_item_desc to point at the new item.
855 */
856 xfs_trans_add_item(tp, &q->qql_item);
857 return q;
858}
859
860
861/*
862 * This is called to mark the quotaoff logitem as needing
863 * to be logged when the transaction is committed. The logitem must
864 * already be associated with the given transaction.
865 */
866void
867xfs_trans_log_quotaoff_item(
868 xfs_trans_t *tp,
869 xfs_qoff_logitem_t *qlp)
870{
871 tp->t_flags |= XFS_TRANS_DIRTY;
872 qlp->qql_item.li_desc->lid_flags |= XFS_LID_DIRTY;
873}
874
875STATIC void
876xfs_trans_alloc_dqinfo(
877 xfs_trans_t *tp)
878{
879 tp->t_dqinfo = kmem_zone_zalloc(xfs_Gqm->qm_dqtrxzone, KM_SLEEP);
880}
881
882void
883xfs_trans_free_dqinfo(
884 xfs_trans_t *tp)
885{
886 if (!tp->t_dqinfo)
887 return;
888 kmem_zone_free(xfs_Gqm->qm_dqtrxzone, tp->t_dqinfo);
889 tp->t_dqinfo = NULL;
890}