aboutsummaryrefslogtreecommitdiffstats
path: root/fs/quota
diff options
context:
space:
mode:
Diffstat (limited to 'fs/quota')
-rw-r--r--fs/quota/Kconfig59
-rw-r--r--fs/quota/Makefile14
-rw-r--r--fs/quota/dquot.c2611
-rw-r--r--fs/quota/quota.c524
-rw-r--r--fs/quota/quota_tree.c651
-rw-r--r--fs/quota/quota_tree.h25
-rw-r--r--fs/quota/quota_v1.c234
-rw-r--r--fs/quota/quota_v2.c237
-rw-r--r--fs/quota/quotaio_v1.h33
-rw-r--r--fs/quota/quotaio_v2.h60
10 files changed, 4448 insertions, 0 deletions
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig
new file mode 100644
index 000000000000..8047e01ef46b
--- /dev/null
+++ b/fs/quota/Kconfig
@@ -0,0 +1,59 @@
1#
2# Quota configuration
3#
4
5config QUOTA
6 bool "Quota support"
7 help
8 If you say Y here, you will be able to set per user limits for disk
9 usage (also called disk quotas). Currently, it works for the
10 ext2, ext3, and reiserfs file system. ext3 also supports journalled
11 quotas for which you don't need to run quotacheck(8) after an unclean
12 shutdown.
13 For further details, read the Quota mini-HOWTO, available from
14 <http://www.tldp.org/docs.html#howto>, or the documentation provided
15 with the quota tools. Probably the quota support is only useful for
16 multi user systems. If unsure, say N.
17
18config QUOTA_NETLINK_INTERFACE
19 bool "Report quota messages through netlink interface"
20 depends on QUOTA && NET
21 help
22 If you say Y here, quota warnings (about exceeding softlimit, reaching
23 hardlimit, etc.) will be reported through netlink interface. If unsure,
24 say Y.
25
26config PRINT_QUOTA_WARNING
27 bool "Print quota warnings to console (OBSOLETE)"
28 depends on QUOTA
29 default y
30 help
31 If you say Y here, quota warnings (about exceeding softlimit, reaching
32 hardlimit, etc.) will be printed to the process' controlling terminal.
33 Note that this behavior is currently deprecated and may go away in
34 future. Please use notification via netlink socket instead.
35
36# Generic support for tree structured quota files. Selected when needed.
37config QUOTA_TREE
38 tristate
39
40config QFMT_V1
41 tristate "Old quota format support"
42 depends on QUOTA
43 help
44 This quota format was (is) used by kernels earlier than 2.4.22. If
45 you have quota working and you don't want to convert to new quota
46 format say Y here.
47
48config QFMT_V2
49 tristate "Quota format v2 support"
50 depends on QUOTA
51 select QUOTA_TREE
52 help
53 This quota format allows using quotas with 32-bit UIDs/GIDs. If you
54 need this functionality say Y here.
55
56config QUOTACTL
57 bool
58 depends on XFS_QUOTA || QUOTA
59 default y
diff --git a/fs/quota/Makefile b/fs/quota/Makefile
new file mode 100644
index 000000000000..385a0831cc99
--- /dev/null
+++ b/fs/quota/Makefile
@@ -0,0 +1,14 @@
1#
2# Makefile for the Linux filesystems.
3#
4# 14 Sep 2000, Christoph Hellwig <hch@infradead.org>
5# Rewritten to use lists instead of if-statements.
6#
7
8obj-y :=
9
10obj-$(CONFIG_QUOTA) += dquot.o
11obj-$(CONFIG_QFMT_V1) += quota_v1.o
12obj-$(CONFIG_QFMT_V2) += quota_v2.o
13obj-$(CONFIG_QUOTA_TREE) += quota_tree.o
14obj-$(CONFIG_QUOTACTL) += quota.o
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
new file mode 100644
index 000000000000..eb938015bd91
--- /dev/null
+++ b/fs/quota/dquot.c
@@ -0,0 +1,2611 @@
1/*
2 * Implementation of the diskquota system for the LINUX operating system. QUOTA
3 * is implemented using the BSD system call interface as the means of
4 * communication with the user level. This file contains the generic routines
5 * called by the different filesystems on allocation of an inode or block.
6 * These routines take care of the administration needed to have a consistent
7 * diskquota tracking system. The ideas of both user and group quotas are based
8 * on the Melbourne quota system as used on BSD derived systems. The internal
9 * implementation is based on one of the several variants of the LINUX
10 * inode-subsystem with added complexity of the diskquota system.
11 *
12 * Author: Marco van Wieringen <mvw@planets.elm.net>
13 *
14 * Fixes: Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96
15 *
16 * Revised list management to avoid races
17 * -- Bill Hawes, <whawes@star.net>, 9/98
18 *
19 * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
20 * As the consequence the locking was moved from dquot_decr_...(),
21 * dquot_incr_...() to calling functions.
22 * invalidate_dquots() now writes modified dquots.
23 * Serialized quota_off() and quota_on() for mount point.
24 * Fixed a few bugs in grow_dquots().
25 * Fixed deadlock in write_dquot() - we no longer account quotas on
26 * quota files
27 * remove_dquot_ref() moved to inode.c - it now traverses through inodes
28 * add_dquot_ref() restarts after blocking
29 * Added check for bogus uid and fixed check for group in quotactl.
30 * Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99
31 *
32 * Used struct list_head instead of own list struct
33 * Invalidation of referenced dquots is no longer possible
34 * Improved free_dquots list management
35 * Quota and i_blocks are now updated in one place to avoid races
36 * Warnings are now delayed so we won't block in critical section
37 * Write updated not to require dquot lock
38 * Jan Kara, <jack@suse.cz>, 9/2000
39 *
40 * Added dynamic quota structure allocation
41 * Jan Kara <jack@suse.cz> 12/2000
42 *
43 * Rewritten quota interface. Implemented new quota format and
44 * formats registering.
45 * Jan Kara, <jack@suse.cz>, 2001,2002
46 *
47 * New SMP locking.
48 * Jan Kara, <jack@suse.cz>, 10/2002
49 *
50 * Added journalled quota support, fix lock inversion problems
51 * Jan Kara, <jack@suse.cz>, 2003,2004
52 *
53 * (C) Copyright 1994 - 1997 Marco van Wieringen
54 */
55
56#include <linux/errno.h>
57#include <linux/kernel.h>
58#include <linux/fs.h>
59#include <linux/mount.h>
60#include <linux/mm.h>
61#include <linux/time.h>
62#include <linux/types.h>
63#include <linux/string.h>
64#include <linux/fcntl.h>
65#include <linux/stat.h>
66#include <linux/tty.h>
67#include <linux/file.h>
68#include <linux/slab.h>
69#include <linux/sysctl.h>
70#include <linux/init.h>
71#include <linux/module.h>
72#include <linux/proc_fs.h>
73#include <linux/security.h>
74#include <linux/kmod.h>
75#include <linux/namei.h>
76#include <linux/buffer_head.h>
77#include <linux/capability.h>
78#include <linux/quotaops.h>
79#include <linux/writeback.h> /* for inode_lock, oddly enough.. */
80#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
81#include <net/netlink.h>
82#include <net/genetlink.h>
83#endif
84
85#include <asm/uaccess.h>
86
87#define __DQUOT_PARANOIA
88
89/*
90 * There are three quota SMP locks. dq_list_lock protects all lists with quotas
91 * and quota formats, dqstats structure containing statistics about the lists
92 * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and
93 * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
94 * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
95 * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects
96 * modifications of quota state (on quotaon and quotaoff) and readers who care
97 * about latest values take it as well.
98 *
99 * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock,
100 * dq_list_lock > dq_state_lock
101 *
102 * Note that some things (eg. sb pointer, type, id) doesn't change during
103 * the life of the dquot structure and so needn't to be protected by a lock
104 *
105 * Any operation working on dquots via inode pointers must hold dqptr_sem. If
106 * operation is just reading pointers from inode (or not using them at all) the
107 * read lock is enough. If pointers are altered function must hold write lock
108 * (these locking rules also apply for S_NOQUOTA flag in the inode - note that
109 * for altering the flag i_mutex is also needed).
110 *
111 * Each dquot has its dq_lock mutex. Locked dquots might not be referenced
112 * from inodes (dquot_alloc_space() and such don't check the dq_lock).
113 * Currently dquot is locked only when it is being read to memory (or space for
114 * it is being allocated) on the first dqget() and when it is being released on
115 * the last dqput(). The allocation and release oparations are serialized by
116 * the dq_lock and by checking the use count in dquot_release(). Write
117 * operations on dquots don't hold dq_lock as they copy data under dq_data_lock
118 * spinlock to internal buffers before writing.
119 *
120 * Lock ordering (including related VFS locks) is the following:
121 * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock >
122 * dqio_mutex
123 * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem >
124 * dqptr_sem. But filesystem has to count with the fact that functions such as
125 * dquot_alloc_space() acquire dqptr_sem and they usually have to be called
126 * from inside a transaction to keep filesystem consistency after a crash. Also
127 * filesystems usually want to do some IO on dquot from ->mark_dirty which is
128 * called with dqptr_sem held.
129 * i_mutex on quota files is special (it's below dqio_mutex)
130 */
131
132static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
133static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
134__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
135EXPORT_SYMBOL(dq_data_lock);
136
137static char *quotatypes[] = INITQFNAMES;
138static struct quota_format_type *quota_formats; /* List of registered formats */
139static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
140
141/* SLAB cache for dquot structures */
142static struct kmem_cache *dquot_cachep;
143
144int register_quota_format(struct quota_format_type *fmt)
145{
146 spin_lock(&dq_list_lock);
147 fmt->qf_next = quota_formats;
148 quota_formats = fmt;
149 spin_unlock(&dq_list_lock);
150 return 0;
151}
152EXPORT_SYMBOL(register_quota_format);
153
154void unregister_quota_format(struct quota_format_type *fmt)
155{
156 struct quota_format_type **actqf;
157
158 spin_lock(&dq_list_lock);
159 for (actqf = &quota_formats; *actqf && *actqf != fmt;
160 actqf = &(*actqf)->qf_next)
161 ;
162 if (*actqf)
163 *actqf = (*actqf)->qf_next;
164 spin_unlock(&dq_list_lock);
165}
166EXPORT_SYMBOL(unregister_quota_format);
167
168static struct quota_format_type *find_quota_format(int id)
169{
170 struct quota_format_type *actqf;
171
172 spin_lock(&dq_list_lock);
173 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
174 actqf = actqf->qf_next)
175 ;
176 if (!actqf || !try_module_get(actqf->qf_owner)) {
177 int qm;
178
179 spin_unlock(&dq_list_lock);
180
181 for (qm = 0; module_names[qm].qm_fmt_id &&
182 module_names[qm].qm_fmt_id != id; qm++)
183 ;
184 if (!module_names[qm].qm_fmt_id ||
185 request_module(module_names[qm].qm_mod_name))
186 return NULL;
187
188 spin_lock(&dq_list_lock);
189 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
190 actqf = actqf->qf_next)
191 ;
192 if (actqf && !try_module_get(actqf->qf_owner))
193 actqf = NULL;
194 }
195 spin_unlock(&dq_list_lock);
196 return actqf;
197}
198
199static void put_quota_format(struct quota_format_type *fmt)
200{
201 module_put(fmt->qf_owner);
202}
203
204/*
205 * Dquot List Management:
206 * The quota code uses three lists for dquot management: the inuse_list,
207 * free_dquots, and dquot_hash[] array. A single dquot structure may be
208 * on all three lists, depending on its current state.
209 *
210 * All dquots are placed to the end of inuse_list when first created, and this
211 * list is used for invalidate operation, which must look at every dquot.
212 *
213 * Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
214 * and this list is searched whenever we need an available dquot. Dquots are
215 * removed from the list as soon as they are used again, and
216 * dqstats.free_dquots gives the number of dquots on the list. When
217 * dquot is invalidated it's completely released from memory.
218 *
219 * Dquots with a specific identity (device, type and id) are placed on
220 * one of the dquot_hash[] hash chains. The provides an efficient search
221 * mechanism to locate a specific dquot.
222 */
223
224static LIST_HEAD(inuse_list);
225static LIST_HEAD(free_dquots);
226static unsigned int dq_hash_bits, dq_hash_mask;
227static struct hlist_head *dquot_hash;
228
229struct dqstats dqstats;
230EXPORT_SYMBOL(dqstats);
231
232static inline unsigned int
233hashfn(const struct super_block *sb, unsigned int id, int type)
234{
235 unsigned long tmp;
236
237 tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type);
238 return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask;
239}
240
241/*
242 * Following list functions expect dq_list_lock to be held
243 */
244static inline void insert_dquot_hash(struct dquot *dquot)
245{
246 struct hlist_head *head;
247 head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type);
248 hlist_add_head(&dquot->dq_hash, head);
249}
250
251static inline void remove_dquot_hash(struct dquot *dquot)
252{
253 hlist_del_init(&dquot->dq_hash);
254}
255
256static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
257 unsigned int id, int type)
258{
259 struct hlist_node *node;
260 struct dquot *dquot;
261
262 hlist_for_each (node, dquot_hash+hashent) {
263 dquot = hlist_entry(node, struct dquot, dq_hash);
264 if (dquot->dq_sb == sb && dquot->dq_id == id &&
265 dquot->dq_type == type)
266 return dquot;
267 }
268 return NULL;
269}
270
271/* Add a dquot to the tail of the free list */
272static inline void put_dquot_last(struct dquot *dquot)
273{
274 list_add_tail(&dquot->dq_free, &free_dquots);
275 dqstats.free_dquots++;
276}
277
278static inline void remove_free_dquot(struct dquot *dquot)
279{
280 if (list_empty(&dquot->dq_free))
281 return;
282 list_del_init(&dquot->dq_free);
283 dqstats.free_dquots--;
284}
285
286static inline void put_inuse(struct dquot *dquot)
287{
288 /* We add to the back of inuse list so we don't have to restart
289 * when traversing this list and we block */
290 list_add_tail(&dquot->dq_inuse, &inuse_list);
291 dqstats.allocated_dquots++;
292}
293
294static inline void remove_inuse(struct dquot *dquot)
295{
296 dqstats.allocated_dquots--;
297 list_del(&dquot->dq_inuse);
298}
299/*
300 * End of list functions needing dq_list_lock
301 */
302
303static void wait_on_dquot(struct dquot *dquot)
304{
305 mutex_lock(&dquot->dq_lock);
306 mutex_unlock(&dquot->dq_lock);
307}
308
309static inline int dquot_dirty(struct dquot *dquot)
310{
311 return test_bit(DQ_MOD_B, &dquot->dq_flags);
312}
313
314static inline int mark_dquot_dirty(struct dquot *dquot)
315{
316 return dquot->dq_sb->dq_op->mark_dirty(dquot);
317}
318
319int dquot_mark_dquot_dirty(struct dquot *dquot)
320{
321 spin_lock(&dq_list_lock);
322 if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags))
323 list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
324 info[dquot->dq_type].dqi_dirty_list);
325 spin_unlock(&dq_list_lock);
326 return 0;
327}
328EXPORT_SYMBOL(dquot_mark_dquot_dirty);
329
330/* This function needs dq_list_lock */
331static inline int clear_dquot_dirty(struct dquot *dquot)
332{
333 if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags))
334 return 0;
335 list_del_init(&dquot->dq_dirty);
336 return 1;
337}
338
339void mark_info_dirty(struct super_block *sb, int type)
340{
341 set_bit(DQF_INFO_DIRTY_B, &sb_dqopt(sb)->info[type].dqi_flags);
342}
343EXPORT_SYMBOL(mark_info_dirty);
344
345/*
346 * Read dquot from disk and alloc space for it
347 */
348
349int dquot_acquire(struct dquot *dquot)
350{
351 int ret = 0, ret2 = 0;
352 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
353
354 mutex_lock(&dquot->dq_lock);
355 mutex_lock(&dqopt->dqio_mutex);
356 if (!test_bit(DQ_READ_B, &dquot->dq_flags))
357 ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot);
358 if (ret < 0)
359 goto out_iolock;
360 set_bit(DQ_READ_B, &dquot->dq_flags);
361 /* Instantiate dquot if needed */
362 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
363 ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
364 /* Write the info if needed */
365 if (info_dirty(&dqopt->info[dquot->dq_type])) {
366 ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
367 dquot->dq_sb, dquot->dq_type);
368 }
369 if (ret < 0)
370 goto out_iolock;
371 if (ret2 < 0) {
372 ret = ret2;
373 goto out_iolock;
374 }
375 }
376 set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
377out_iolock:
378 mutex_unlock(&dqopt->dqio_mutex);
379 mutex_unlock(&dquot->dq_lock);
380 return ret;
381}
382EXPORT_SYMBOL(dquot_acquire);
383
384/*
385 * Write dquot to disk
386 */
387int dquot_commit(struct dquot *dquot)
388{
389 int ret = 0, ret2 = 0;
390 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
391
392 mutex_lock(&dqopt->dqio_mutex);
393 spin_lock(&dq_list_lock);
394 if (!clear_dquot_dirty(dquot)) {
395 spin_unlock(&dq_list_lock);
396 goto out_sem;
397 }
398 spin_unlock(&dq_list_lock);
399 /* Inactive dquot can be only if there was error during read/init
400 * => we have better not writing it */
401 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
402 ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
403 if (info_dirty(&dqopt->info[dquot->dq_type])) {
404 ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
405 dquot->dq_sb, dquot->dq_type);
406 }
407 if (ret >= 0)
408 ret = ret2;
409 }
410out_sem:
411 mutex_unlock(&dqopt->dqio_mutex);
412 return ret;
413}
414EXPORT_SYMBOL(dquot_commit);
415
416/*
417 * Release dquot
418 */
419int dquot_release(struct dquot *dquot)
420{
421 int ret = 0, ret2 = 0;
422 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
423
424 mutex_lock(&dquot->dq_lock);
425 /* Check whether we are not racing with some other dqget() */
426 if (atomic_read(&dquot->dq_count) > 1)
427 goto out_dqlock;
428 mutex_lock(&dqopt->dqio_mutex);
429 if (dqopt->ops[dquot->dq_type]->release_dqblk) {
430 ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot);
431 /* Write the info */
432 if (info_dirty(&dqopt->info[dquot->dq_type])) {
433 ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
434 dquot->dq_sb, dquot->dq_type);
435 }
436 if (ret >= 0)
437 ret = ret2;
438 }
439 clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
440 mutex_unlock(&dqopt->dqio_mutex);
441out_dqlock:
442 mutex_unlock(&dquot->dq_lock);
443 return ret;
444}
445EXPORT_SYMBOL(dquot_release);
446
447void dquot_destroy(struct dquot *dquot)
448{
449 kmem_cache_free(dquot_cachep, dquot);
450}
451EXPORT_SYMBOL(dquot_destroy);
452
453static inline void do_destroy_dquot(struct dquot *dquot)
454{
455 dquot->dq_sb->dq_op->destroy_dquot(dquot);
456}
457
458/* Invalidate all dquots on the list. Note that this function is called after
459 * quota is disabled and pointers from inodes removed so there cannot be new
460 * quota users. There can still be some users of quotas due to inodes being
461 * just deleted or pruned by prune_icache() (those are not attached to any
462 * list) or parallel quotactl call. We have to wait for such users.
463 */
464static void invalidate_dquots(struct super_block *sb, int type)
465{
466 struct dquot *dquot, *tmp;
467
468restart:
469 spin_lock(&dq_list_lock);
470 list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
471 if (dquot->dq_sb != sb)
472 continue;
473 if (dquot->dq_type != type)
474 continue;
475 /* Wait for dquot users */
476 if (atomic_read(&dquot->dq_count)) {
477 DEFINE_WAIT(wait);
478
479 atomic_inc(&dquot->dq_count);
480 prepare_to_wait(&dquot->dq_wait_unused, &wait,
481 TASK_UNINTERRUPTIBLE);
482 spin_unlock(&dq_list_lock);
483 /* Once dqput() wakes us up, we know it's time to free
484 * the dquot.
485 * IMPORTANT: we rely on the fact that there is always
486 * at most one process waiting for dquot to free.
487 * Otherwise dq_count would be > 1 and we would never
488 * wake up.
489 */
490 if (atomic_read(&dquot->dq_count) > 1)
491 schedule();
492 finish_wait(&dquot->dq_wait_unused, &wait);
493 dqput(dquot);
494 /* At this moment dquot() need not exist (it could be
495 * reclaimed by prune_dqcache(). Hence we must
496 * restart. */
497 goto restart;
498 }
499 /*
500 * Quota now has no users and it has been written on last
501 * dqput()
502 */
503 remove_dquot_hash(dquot);
504 remove_free_dquot(dquot);
505 remove_inuse(dquot);
506 do_destroy_dquot(dquot);
507 }
508 spin_unlock(&dq_list_lock);
509}
510
511/* Call callback for every active dquot on given filesystem */
512int dquot_scan_active(struct super_block *sb,
513 int (*fn)(struct dquot *dquot, unsigned long priv),
514 unsigned long priv)
515{
516 struct dquot *dquot, *old_dquot = NULL;
517 int ret = 0;
518
519 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
520 spin_lock(&dq_list_lock);
521 list_for_each_entry(dquot, &inuse_list, dq_inuse) {
522 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
523 continue;
524 if (dquot->dq_sb != sb)
525 continue;
526 /* Now we have active dquot so we can just increase use count */
527 atomic_inc(&dquot->dq_count);
528 dqstats.lookups++;
529 spin_unlock(&dq_list_lock);
530 dqput(old_dquot);
531 old_dquot = dquot;
532 ret = fn(dquot, priv);
533 if (ret < 0)
534 goto out;
535 spin_lock(&dq_list_lock);
536 /* We are safe to continue now because our dquot could not
537 * be moved out of the inuse list while we hold the reference */
538 }
539 spin_unlock(&dq_list_lock);
540out:
541 dqput(old_dquot);
542 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
543 return ret;
544}
545EXPORT_SYMBOL(dquot_scan_active);
546
547int vfs_quota_sync(struct super_block *sb, int type)
548{
549 struct list_head *dirty;
550 struct dquot *dquot;
551 struct quota_info *dqopt = sb_dqopt(sb);
552 int cnt;
553
554 mutex_lock(&dqopt->dqonoff_mutex);
555 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
556 if (type != -1 && cnt != type)
557 continue;
558 if (!sb_has_quota_active(sb, cnt))
559 continue;
560 spin_lock(&dq_list_lock);
561 dirty = &dqopt->info[cnt].dqi_dirty_list;
562 while (!list_empty(dirty)) {
563 dquot = list_first_entry(dirty, struct dquot,
564 dq_dirty);
565 /* Dirty and inactive can be only bad dquot... */
566 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
567 clear_dquot_dirty(dquot);
568 continue;
569 }
570 /* Now we have active dquot from which someone is
571 * holding reference so we can safely just increase
572 * use count */
573 atomic_inc(&dquot->dq_count);
574 dqstats.lookups++;
575 spin_unlock(&dq_list_lock);
576 sb->dq_op->write_dquot(dquot);
577 dqput(dquot);
578 spin_lock(&dq_list_lock);
579 }
580 spin_unlock(&dq_list_lock);
581 }
582
583 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
584 if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt)
585 && info_dirty(&dqopt->info[cnt]))
586 sb->dq_op->write_info(sb, cnt);
587 spin_lock(&dq_list_lock);
588 dqstats.syncs++;
589 spin_unlock(&dq_list_lock);
590 mutex_unlock(&dqopt->dqonoff_mutex);
591
592 return 0;
593}
594EXPORT_SYMBOL(vfs_quota_sync);
595
596/* Free unused dquots from cache */
597static void prune_dqcache(int count)
598{
599 struct list_head *head;
600 struct dquot *dquot;
601
602 head = free_dquots.prev;
603 while (head != &free_dquots && count) {
604 dquot = list_entry(head, struct dquot, dq_free);
605 remove_dquot_hash(dquot);
606 remove_free_dquot(dquot);
607 remove_inuse(dquot);
608 do_destroy_dquot(dquot);
609 count--;
610 head = free_dquots.prev;
611 }
612}
613
614/*
615 * This is called from kswapd when we think we need some
616 * more memory
617 */
618
619static int shrink_dqcache_memory(int nr, gfp_t gfp_mask)
620{
621 if (nr) {
622 spin_lock(&dq_list_lock);
623 prune_dqcache(nr);
624 spin_unlock(&dq_list_lock);
625 }
626 return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure;
627}
628
629static struct shrinker dqcache_shrinker = {
630 .shrink = shrink_dqcache_memory,
631 .seeks = DEFAULT_SEEKS,
632};
633
634/*
635 * Put reference to dquot
636 * NOTE: If you change this function please check whether dqput_blocks() works right...
637 */
638void dqput(struct dquot *dquot)
639{
640 int ret;
641
642 if (!dquot)
643 return;
644#ifdef __DQUOT_PARANOIA
645 if (!atomic_read(&dquot->dq_count)) {
646 printk("VFS: dqput: trying to free free dquot\n");
647 printk("VFS: device %s, dquot of %s %d\n",
648 dquot->dq_sb->s_id,
649 quotatypes[dquot->dq_type],
650 dquot->dq_id);
651 BUG();
652 }
653#endif
654
655 spin_lock(&dq_list_lock);
656 dqstats.drops++;
657 spin_unlock(&dq_list_lock);
658we_slept:
659 spin_lock(&dq_list_lock);
660 if (atomic_read(&dquot->dq_count) > 1) {
661 /* We have more than one user... nothing to do */
662 atomic_dec(&dquot->dq_count);
663 /* Releasing dquot during quotaoff phase? */
664 if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_type) &&
665 atomic_read(&dquot->dq_count) == 1)
666 wake_up(&dquot->dq_wait_unused);
667 spin_unlock(&dq_list_lock);
668 return;
669 }
670 /* Need to release dquot? */
671 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) {
672 spin_unlock(&dq_list_lock);
673 /* Commit dquot before releasing */
674 ret = dquot->dq_sb->dq_op->write_dquot(dquot);
675 if (ret < 0) {
676 printk(KERN_ERR "VFS: cannot write quota structure on "
677 "device %s (error %d). Quota may get out of "
678 "sync!\n", dquot->dq_sb->s_id, ret);
679 /*
680 * We clear dirty bit anyway, so that we avoid
681 * infinite loop here
682 */
683 spin_lock(&dq_list_lock);
684 clear_dquot_dirty(dquot);
685 spin_unlock(&dq_list_lock);
686 }
687 goto we_slept;
688 }
689 /* Clear flag in case dquot was inactive (something bad happened) */
690 clear_dquot_dirty(dquot);
691 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
692 spin_unlock(&dq_list_lock);
693 dquot->dq_sb->dq_op->release_dquot(dquot);
694 goto we_slept;
695 }
696 atomic_dec(&dquot->dq_count);
697#ifdef __DQUOT_PARANOIA
698 /* sanity check */
699 BUG_ON(!list_empty(&dquot->dq_free));
700#endif
701 put_dquot_last(dquot);
702 spin_unlock(&dq_list_lock);
703}
704EXPORT_SYMBOL(dqput);
705
706struct dquot *dquot_alloc(struct super_block *sb, int type)
707{
708 return kmem_cache_zalloc(dquot_cachep, GFP_NOFS);
709}
710EXPORT_SYMBOL(dquot_alloc);
711
712static struct dquot *get_empty_dquot(struct super_block *sb, int type)
713{
714 struct dquot *dquot;
715
716 dquot = sb->dq_op->alloc_dquot(sb, type);
717 if(!dquot)
718 return NULL;
719
720 mutex_init(&dquot->dq_lock);
721 INIT_LIST_HEAD(&dquot->dq_free);
722 INIT_LIST_HEAD(&dquot->dq_inuse);
723 INIT_HLIST_NODE(&dquot->dq_hash);
724 INIT_LIST_HEAD(&dquot->dq_dirty);
725 init_waitqueue_head(&dquot->dq_wait_unused);
726 dquot->dq_sb = sb;
727 dquot->dq_type = type;
728 atomic_set(&dquot->dq_count, 1);
729
730 return dquot;
731}
732
733/*
734 * Get reference to dquot
735 *
736 * Locking is slightly tricky here. We are guarded from parallel quotaoff()
737 * destroying our dquot by:
738 * a) checking for quota flags under dq_list_lock and
739 * b) getting a reference to dquot before we release dq_list_lock
740 */
741struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
742{
743 unsigned int hashent = hashfn(sb, id, type);
744 struct dquot *dquot = NULL, *empty = NULL;
745
746 if (!sb_has_quota_active(sb, type))
747 return NULL;
748we_slept:
749 spin_lock(&dq_list_lock);
750 spin_lock(&dq_state_lock);
751 if (!sb_has_quota_active(sb, type)) {
752 spin_unlock(&dq_state_lock);
753 spin_unlock(&dq_list_lock);
754 goto out;
755 }
756 spin_unlock(&dq_state_lock);
757
758 dquot = find_dquot(hashent, sb, id, type);
759 if (!dquot) {
760 if (!empty) {
761 spin_unlock(&dq_list_lock);
762 empty = get_empty_dquot(sb, type);
763 if (!empty)
764 schedule(); /* Try to wait for a moment... */
765 goto we_slept;
766 }
767 dquot = empty;
768 empty = NULL;
769 dquot->dq_id = id;
770 /* all dquots go on the inuse_list */
771 put_inuse(dquot);
772 /* hash it first so it can be found */
773 insert_dquot_hash(dquot);
774 dqstats.lookups++;
775 spin_unlock(&dq_list_lock);
776 } else {
777 if (!atomic_read(&dquot->dq_count))
778 remove_free_dquot(dquot);
779 atomic_inc(&dquot->dq_count);
780 dqstats.cache_hits++;
781 dqstats.lookups++;
782 spin_unlock(&dq_list_lock);
783 }
784 /* Wait for dq_lock - after this we know that either dquot_release() is
785 * already finished or it will be canceled due to dq_count > 1 test */
786 wait_on_dquot(dquot);
787 /* Read the dquot / allocate space in quota file */
788 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) &&
789 sb->dq_op->acquire_dquot(dquot) < 0) {
790 dqput(dquot);
791 dquot = NULL;
792 goto out;
793 }
794#ifdef __DQUOT_PARANOIA
795 BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */
796#endif
797out:
798 if (empty)
799 do_destroy_dquot(empty);
800
801 return dquot;
802}
803EXPORT_SYMBOL(dqget);
804
805static int dqinit_needed(struct inode *inode, int type)
806{
807 int cnt;
808
809 if (IS_NOQUOTA(inode))
810 return 0;
811 if (type != -1)
812 return !inode->i_dquot[type];
813 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
814 if (!inode->i_dquot[cnt])
815 return 1;
816 return 0;
817}
818
819/* This routine is guarded by dqonoff_mutex mutex */
820static void add_dquot_ref(struct super_block *sb, int type)
821{
822 struct inode *inode, *old_inode = NULL;
823
824 spin_lock(&inode_lock);
825 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
826 if (!atomic_read(&inode->i_writecount))
827 continue;
828 if (!dqinit_needed(inode, type))
829 continue;
830 if (inode->i_state & (I_FREEING|I_WILL_FREE))
831 continue;
832
833 __iget(inode);
834 spin_unlock(&inode_lock);
835
836 iput(old_inode);
837 sb->dq_op->initialize(inode, type);
838 /* We hold a reference to 'inode' so it couldn't have been
839 * removed from s_inodes list while we dropped the inode_lock.
840 * We cannot iput the inode now as we can be holding the last
841 * reference and we cannot iput it under inode_lock. So we
842 * keep the reference and iput it later. */
843 old_inode = inode;
844 spin_lock(&inode_lock);
845 }
846 spin_unlock(&inode_lock);
847 iput(old_inode);
848}
849
850/*
851 * Return 0 if dqput() won't block.
852 * (note that 1 doesn't necessarily mean blocking)
853 */
854static inline int dqput_blocks(struct dquot *dquot)
855{
856 if (atomic_read(&dquot->dq_count) <= 1)
857 return 1;
858 return 0;
859}
860
861/*
862 * Remove references to dquots from inode and add dquot to list for freeing
863 * if we have the last referece to dquot
864 * We can't race with anybody because we hold dqptr_sem for writing...
865 */
866static int remove_inode_dquot_ref(struct inode *inode, int type,
867 struct list_head *tofree_head)
868{
869 struct dquot *dquot = inode->i_dquot[type];
870
871 inode->i_dquot[type] = NULL;
872 if (dquot) {
873 if (dqput_blocks(dquot)) {
874#ifdef __DQUOT_PARANOIA
875 if (atomic_read(&dquot->dq_count) != 1)
876 printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count));
877#endif
878 spin_lock(&dq_list_lock);
879 /* As dquot must have currently users it can't be on
880 * the free list... */
881 list_add(&dquot->dq_free, tofree_head);
882 spin_unlock(&dq_list_lock);
883 return 1;
884 }
885 else
886 dqput(dquot); /* We have guaranteed we won't block */
887 }
888 return 0;
889}
890
891/*
892 * Free list of dquots
893 * Dquots are removed from inodes and no new references can be got so we are
894 * the only ones holding reference
895 */
896static void put_dquot_list(struct list_head *tofree_head)
897{
898 struct list_head *act_head;
899 struct dquot *dquot;
900
901 act_head = tofree_head->next;
902 while (act_head != tofree_head) {
903 dquot = list_entry(act_head, struct dquot, dq_free);
904 act_head = act_head->next;
905 /* Remove dquot from the list so we won't have problems... */
906 list_del_init(&dquot->dq_free);
907 dqput(dquot);
908 }
909}
910
911static void remove_dquot_ref(struct super_block *sb, int type,
912 struct list_head *tofree_head)
913{
914 struct inode *inode;
915
916 spin_lock(&inode_lock);
917 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
918 if (!IS_NOQUOTA(inode))
919 remove_inode_dquot_ref(inode, type, tofree_head);
920 }
921 spin_unlock(&inode_lock);
922}
923
924/* Gather all references from inodes and drop them */
925static void drop_dquot_ref(struct super_block *sb, int type)
926{
927 LIST_HEAD(tofree_head);
928
929 if (sb->dq_op) {
930 down_write(&sb_dqopt(sb)->dqptr_sem);
931 remove_dquot_ref(sb, type, &tofree_head);
932 up_write(&sb_dqopt(sb)->dqptr_sem);
933 put_dquot_list(&tofree_head);
934 }
935}
936
937static inline void dquot_incr_inodes(struct dquot *dquot, qsize_t number)
938{
939 dquot->dq_dqb.dqb_curinodes += number;
940}
941
942static inline void dquot_incr_space(struct dquot *dquot, qsize_t number)
943{
944 dquot->dq_dqb.dqb_curspace += number;
945}
946
947static inline void dquot_resv_space(struct dquot *dquot, qsize_t number)
948{
949 dquot->dq_dqb.dqb_rsvspace += number;
950}
951
952/*
953 * Claim reserved quota space
954 */
955static void dquot_claim_reserved_space(struct dquot *dquot,
956 qsize_t number)
957{
958 WARN_ON(dquot->dq_dqb.dqb_rsvspace < number);
959 dquot->dq_dqb.dqb_curspace += number;
960 dquot->dq_dqb.dqb_rsvspace -= number;
961}
962
963static inline
964void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
965{
966 dquot->dq_dqb.dqb_rsvspace -= number;
967}
968
969static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
970{
971 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
972 dquot->dq_dqb.dqb_curinodes >= number)
973 dquot->dq_dqb.dqb_curinodes -= number;
974 else
975 dquot->dq_dqb.dqb_curinodes = 0;
976 if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit)
977 dquot->dq_dqb.dqb_itime = (time_t) 0;
978 clear_bit(DQ_INODES_B, &dquot->dq_flags);
979}
980
981static void dquot_decr_space(struct dquot *dquot, qsize_t number)
982{
983 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
984 dquot->dq_dqb.dqb_curspace >= number)
985 dquot->dq_dqb.dqb_curspace -= number;
986 else
987 dquot->dq_dqb.dqb_curspace = 0;
988 if (dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit)
989 dquot->dq_dqb.dqb_btime = (time_t) 0;
990 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
991}
992
993static int warning_issued(struct dquot *dquot, const int warntype)
994{
995 int flag = (warntype == QUOTA_NL_BHARDWARN ||
996 warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B :
997 ((warntype == QUOTA_NL_IHARDWARN ||
998 warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0);
999
1000 if (!flag)
1001 return 0;
1002 return test_and_set_bit(flag, &dquot->dq_flags);
1003}
1004
1005#ifdef CONFIG_PRINT_QUOTA_WARNING
1006static int flag_print_warnings = 1;
1007
1008static int need_print_warning(struct dquot *dquot)
1009{
1010 if (!flag_print_warnings)
1011 return 0;
1012
1013 switch (dquot->dq_type) {
1014 case USRQUOTA:
1015 return current_fsuid() == dquot->dq_id;
1016 case GRPQUOTA:
1017 return in_group_p(dquot->dq_id);
1018 }
1019 return 0;
1020}
1021
1022/* Print warning to user which exceeded quota */
1023static void print_warning(struct dquot *dquot, const int warntype)
1024{
1025 char *msg = NULL;
1026 struct tty_struct *tty;
1027
1028 if (warntype == QUOTA_NL_IHARDBELOW ||
1029 warntype == QUOTA_NL_ISOFTBELOW ||
1030 warntype == QUOTA_NL_BHARDBELOW ||
1031 warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(dquot))
1032 return;
1033
1034 tty = get_current_tty();
1035 if (!tty)
1036 return;
1037 tty_write_message(tty, dquot->dq_sb->s_id);
1038 if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN)
1039 tty_write_message(tty, ": warning, ");
1040 else
1041 tty_write_message(tty, ": write failed, ");
1042 tty_write_message(tty, quotatypes[dquot->dq_type]);
1043 switch (warntype) {
1044 case QUOTA_NL_IHARDWARN:
1045 msg = " file limit reached.\r\n";
1046 break;
1047 case QUOTA_NL_ISOFTLONGWARN:
1048 msg = " file quota exceeded too long.\r\n";
1049 break;
1050 case QUOTA_NL_ISOFTWARN:
1051 msg = " file quota exceeded.\r\n";
1052 break;
1053 case QUOTA_NL_BHARDWARN:
1054 msg = " block limit reached.\r\n";
1055 break;
1056 case QUOTA_NL_BSOFTLONGWARN:
1057 msg = " block quota exceeded too long.\r\n";
1058 break;
1059 case QUOTA_NL_BSOFTWARN:
1060 msg = " block quota exceeded.\r\n";
1061 break;
1062 }
1063 tty_write_message(tty, msg);
1064 tty_kref_put(tty);
1065}
1066#endif
1067
1068#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
1069
1070/* Netlink family structure for quota */
1071static struct genl_family quota_genl_family = {
1072 .id = GENL_ID_GENERATE,
1073 .hdrsize = 0,
1074 .name = "VFS_DQUOT",
1075 .version = 1,
1076 .maxattr = QUOTA_NL_A_MAX,
1077};
1078
1079/* Send warning to userspace about user which exceeded quota */
1080static void send_warning(const struct dquot *dquot, const char warntype)
1081{
1082 static atomic_t seq;
1083 struct sk_buff *skb;
1084 void *msg_head;
1085 int ret;
1086 int msg_size = 4 * nla_total_size(sizeof(u32)) +
1087 2 * nla_total_size(sizeof(u64));
1088
1089 /* We have to allocate using GFP_NOFS as we are called from a
1090 * filesystem performing write and thus further recursion into
1091 * the fs to free some data could cause deadlocks. */
1092 skb = genlmsg_new(msg_size, GFP_NOFS);
1093 if (!skb) {
1094 printk(KERN_ERR
1095 "VFS: Not enough memory to send quota warning.\n");
1096 return;
1097 }
1098 msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
1099 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
1100 if (!msg_head) {
1101 printk(KERN_ERR
1102 "VFS: Cannot store netlink header in quota warning.\n");
1103 goto err_out;
1104 }
1105 ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, dquot->dq_type);
1106 if (ret)
1107 goto attr_err_out;
1108 ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, dquot->dq_id);
1109 if (ret)
1110 goto attr_err_out;
1111 ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
1112 if (ret)
1113 goto attr_err_out;
1114 ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR,
1115 MAJOR(dquot->dq_sb->s_dev));
1116 if (ret)
1117 goto attr_err_out;
1118 ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR,
1119 MINOR(dquot->dq_sb->s_dev));
1120 if (ret)
1121 goto attr_err_out;
1122 ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid());
1123 if (ret)
1124 goto attr_err_out;
1125 genlmsg_end(skb, msg_head);
1126
1127 genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS);
1128 return;
1129attr_err_out:
1130 printk(KERN_ERR "VFS: Not enough space to compose quota message!\n");
1131err_out:
1132 kfree_skb(skb);
1133}
1134#endif
1135/*
1136 * Write warnings to the console and send warning messages over netlink.
1137 *
1138 * Note that this function can sleep.
1139 */
1140static void flush_warnings(struct dquot *const *dquots, char *warntype)
1141{
1142 int i;
1143
1144 for (i = 0; i < MAXQUOTAS; i++)
1145 if (dquots[i] && warntype[i] != QUOTA_NL_NOWARN &&
1146 !warning_issued(dquots[i], warntype[i])) {
1147#ifdef CONFIG_PRINT_QUOTA_WARNING
1148 print_warning(dquots[i], warntype[i]);
1149#endif
1150#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
1151 send_warning(dquots[i], warntype[i]);
1152#endif
1153 }
1154}
1155
1156static int ignore_hardlimit(struct dquot *dquot)
1157{
1158 struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
1159
1160 return capable(CAP_SYS_RESOURCE) &&
1161 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
1162 !(info->dqi_flags & V1_DQF_RSQUASH));
1163}
1164
1165/* needs dq_data_lock */
1166static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype)
1167{
1168 qsize_t newinodes = dquot->dq_dqb.dqb_curinodes + inodes;
1169
1170 *warntype = QUOTA_NL_NOWARN;
1171 if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) ||
1172 test_bit(DQ_FAKE_B, &dquot->dq_flags))
1173 return QUOTA_OK;
1174
1175 if (dquot->dq_dqb.dqb_ihardlimit &&
1176 newinodes > dquot->dq_dqb.dqb_ihardlimit &&
1177 !ignore_hardlimit(dquot)) {
1178 *warntype = QUOTA_NL_IHARDWARN;
1179 return NO_QUOTA;
1180 }
1181
1182 if (dquot->dq_dqb.dqb_isoftlimit &&
1183 newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1184 dquot->dq_dqb.dqb_itime &&
1185 get_seconds() >= dquot->dq_dqb.dqb_itime &&
1186 !ignore_hardlimit(dquot)) {
1187 *warntype = QUOTA_NL_ISOFTLONGWARN;
1188 return NO_QUOTA;
1189 }
1190
1191 if (dquot->dq_dqb.dqb_isoftlimit &&
1192 newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1193 dquot->dq_dqb.dqb_itime == 0) {
1194 *warntype = QUOTA_NL_ISOFTWARN;
1195 dquot->dq_dqb.dqb_itime = get_seconds() +
1196 sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace;
1197 }
1198
1199 return QUOTA_OK;
1200}
1201
1202/* needs dq_data_lock */
1203static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype)
1204{
1205 qsize_t tspace;
1206 struct super_block *sb = dquot->dq_sb;
1207
1208 *warntype = QUOTA_NL_NOWARN;
1209 if (!sb_has_quota_limits_enabled(sb, dquot->dq_type) ||
1210 test_bit(DQ_FAKE_B, &dquot->dq_flags))
1211 return QUOTA_OK;
1212
1213 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
1214 + space;
1215
1216 if (dquot->dq_dqb.dqb_bhardlimit &&
1217 tspace > dquot->dq_dqb.dqb_bhardlimit &&
1218 !ignore_hardlimit(dquot)) {
1219 if (!prealloc)
1220 *warntype = QUOTA_NL_BHARDWARN;
1221 return NO_QUOTA;
1222 }
1223
1224 if (dquot->dq_dqb.dqb_bsoftlimit &&
1225 tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1226 dquot->dq_dqb.dqb_btime &&
1227 get_seconds() >= dquot->dq_dqb.dqb_btime &&
1228 !ignore_hardlimit(dquot)) {
1229 if (!prealloc)
1230 *warntype = QUOTA_NL_BSOFTLONGWARN;
1231 return NO_QUOTA;
1232 }
1233
1234 if (dquot->dq_dqb.dqb_bsoftlimit &&
1235 tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1236 dquot->dq_dqb.dqb_btime == 0) {
1237 if (!prealloc) {
1238 *warntype = QUOTA_NL_BSOFTWARN;
1239 dquot->dq_dqb.dqb_btime = get_seconds() +
1240 sb_dqopt(sb)->info[dquot->dq_type].dqi_bgrace;
1241 }
1242 else
1243 /*
1244 * We don't allow preallocation to exceed softlimit so exceeding will
1245 * be always printed
1246 */
1247 return NO_QUOTA;
1248 }
1249
1250 return QUOTA_OK;
1251}
1252
1253static int info_idq_free(struct dquot *dquot, qsize_t inodes)
1254{
1255 qsize_t newinodes;
1256
1257 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1258 dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
1259 !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type))
1260 return QUOTA_NL_NOWARN;
1261
1262 newinodes = dquot->dq_dqb.dqb_curinodes - inodes;
1263 if (newinodes <= dquot->dq_dqb.dqb_isoftlimit)
1264 return QUOTA_NL_ISOFTBELOW;
1265 if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
1266 newinodes < dquot->dq_dqb.dqb_ihardlimit)
1267 return QUOTA_NL_IHARDBELOW;
1268 return QUOTA_NL_NOWARN;
1269}
1270
1271static int info_bdq_free(struct dquot *dquot, qsize_t space)
1272{
1273 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1274 dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit)
1275 return QUOTA_NL_NOWARN;
1276
1277 if (dquot->dq_dqb.dqb_curspace - space <= dquot->dq_dqb.dqb_bsoftlimit)
1278 return QUOTA_NL_BSOFTBELOW;
1279 if (dquot->dq_dqb.dqb_curspace >= dquot->dq_dqb.dqb_bhardlimit &&
1280 dquot->dq_dqb.dqb_curspace - space < dquot->dq_dqb.dqb_bhardlimit)
1281 return QUOTA_NL_BHARDBELOW;
1282 return QUOTA_NL_NOWARN;
1283}
1284/*
1285 * Initialize quota pointers in inode
1286 * We do things in a bit complicated way but by that we avoid calling
1287 * dqget() and thus filesystem callbacks under dqptr_sem.
1288 */
1289int dquot_initialize(struct inode *inode, int type)
1290{
1291 unsigned int id = 0;
1292 int cnt, ret = 0;
1293 struct dquot *got[MAXQUOTAS] = { NULL, NULL };
1294 struct super_block *sb = inode->i_sb;
1295
1296 /* First test before acquiring mutex - solves deadlocks when we
1297 * re-enter the quota code and are already holding the mutex */
1298 if (IS_NOQUOTA(inode))
1299 return 0;
1300
1301 /* First get references to structures we might need. */
1302 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1303 if (type != -1 && cnt != type)
1304 continue;
1305 switch (cnt) {
1306 case USRQUOTA:
1307 id = inode->i_uid;
1308 break;
1309 case GRPQUOTA:
1310 id = inode->i_gid;
1311 break;
1312 }
1313 got[cnt] = dqget(sb, id, cnt);
1314 }
1315
1316 down_write(&sb_dqopt(sb)->dqptr_sem);
1317 /* Having dqptr_sem we know NOQUOTA flags can't be altered... */
1318 if (IS_NOQUOTA(inode))
1319 goto out_err;
1320 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1321 if (type != -1 && cnt != type)
1322 continue;
1323 /* Avoid races with quotaoff() */
1324 if (!sb_has_quota_active(sb, cnt))
1325 continue;
1326 if (!inode->i_dquot[cnt]) {
1327 inode->i_dquot[cnt] = got[cnt];
1328 got[cnt] = NULL;
1329 }
1330 }
1331out_err:
1332 up_write(&sb_dqopt(sb)->dqptr_sem);
1333 /* Drop unused references */
1334 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1335 dqput(got[cnt]);
1336 return ret;
1337}
1338EXPORT_SYMBOL(dquot_initialize);
1339
1340/*
1341 * Release all quotas referenced by inode
1342 */
1343int dquot_drop(struct inode *inode)
1344{
1345 int cnt;
1346 struct dquot *put[MAXQUOTAS];
1347
1348 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1349 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1350 put[cnt] = inode->i_dquot[cnt];
1351 inode->i_dquot[cnt] = NULL;
1352 }
1353 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1354
1355 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1356 dqput(put[cnt]);
1357 return 0;
1358}
1359EXPORT_SYMBOL(dquot_drop);
1360
1361/* Wrapper to remove references to quota structures from inode */
1362void vfs_dq_drop(struct inode *inode)
1363{
1364 /* Here we can get arbitrary inode from clear_inode() so we have
1365 * to be careful. OTOH we don't need locking as quota operations
1366 * are allowed to change only at mount time */
1367 if (!IS_NOQUOTA(inode) && inode->i_sb && inode->i_sb->dq_op
1368 && inode->i_sb->dq_op->drop) {
1369 int cnt;
1370 /* Test before calling to rule out calls from proc and such
1371 * where we are not allowed to block. Note that this is
1372 * actually reliable test even without the lock - the caller
1373 * must assure that nobody can come after the DQUOT_DROP and
1374 * add quota pointers back anyway */
1375 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1376 if (inode->i_dquot[cnt])
1377 break;
1378 if (cnt < MAXQUOTAS)
1379 inode->i_sb->dq_op->drop(inode);
1380 }
1381}
1382EXPORT_SYMBOL(vfs_dq_drop);
1383
1384/*
1385 * Following four functions update i_blocks+i_bytes fields and
1386 * quota information (together with appropriate checks)
1387 * NOTE: We absolutely rely on the fact that caller dirties
1388 * the inode (usually macros in quotaops.h care about this) and
1389 * holds a handle for the current transaction so that dquot write and
1390 * inode write go into the same transaction.
1391 */
1392
1393/*
1394 * This operation can block, but only after everything is updated
1395 */
1396int __dquot_alloc_space(struct inode *inode, qsize_t number,
1397 int warn, int reserve)
1398{
1399 int cnt, ret = QUOTA_OK;
1400 char warntype[MAXQUOTAS];
1401
1402 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1403 warntype[cnt] = QUOTA_NL_NOWARN;
1404
1405 spin_lock(&dq_data_lock);
1406 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1407 if (!inode->i_dquot[cnt])
1408 continue;
1409 if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt)
1410 == NO_QUOTA) {
1411 ret = NO_QUOTA;
1412 goto out_unlock;
1413 }
1414 }
1415 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1416 if (!inode->i_dquot[cnt])
1417 continue;
1418 if (reserve)
1419 dquot_resv_space(inode->i_dquot[cnt], number);
1420 else
1421 dquot_incr_space(inode->i_dquot[cnt], number);
1422 }
1423 if (!reserve)
1424 inode_add_bytes(inode, number);
1425out_unlock:
1426 spin_unlock(&dq_data_lock);
1427 flush_warnings(inode->i_dquot, warntype);
1428 return ret;
1429}
1430
1431int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
1432{
1433 int cnt, ret = QUOTA_OK;
1434
1435 /*
1436 * First test before acquiring mutex - solves deadlocks when we
1437 * re-enter the quota code and are already holding the mutex
1438 */
1439 if (IS_NOQUOTA(inode)) {
1440 inode_add_bytes(inode, number);
1441 goto out;
1442 }
1443
1444 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1445 if (IS_NOQUOTA(inode)) {
1446 inode_add_bytes(inode, number);
1447 goto out_unlock;
1448 }
1449
1450 ret = __dquot_alloc_space(inode, number, warn, 0);
1451 if (ret == NO_QUOTA)
1452 goto out_unlock;
1453
1454 /* Dirtify all the dquots - this can block when journalling */
1455 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1456 if (inode->i_dquot[cnt])
1457 mark_dquot_dirty(inode->i_dquot[cnt]);
1458out_unlock:
1459 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1460out:
1461 return ret;
1462}
1463EXPORT_SYMBOL(dquot_alloc_space);
1464
1465int dquot_reserve_space(struct inode *inode, qsize_t number, int warn)
1466{
1467 int ret = QUOTA_OK;
1468
1469 if (IS_NOQUOTA(inode))
1470 goto out;
1471
1472 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1473 if (IS_NOQUOTA(inode))
1474 goto out_unlock;
1475
1476 ret = __dquot_alloc_space(inode, number, warn, 1);
1477out_unlock:
1478 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1479out:
1480 return ret;
1481}
1482EXPORT_SYMBOL(dquot_reserve_space);
1483
1484/*
1485 * This operation can block, but only after everything is updated
1486 */
1487int dquot_alloc_inode(const struct inode *inode, qsize_t number)
1488{
1489 int cnt, ret = NO_QUOTA;
1490 char warntype[MAXQUOTAS];
1491
1492 /* First test before acquiring mutex - solves deadlocks when we
1493 * re-enter the quota code and are already holding the mutex */
1494 if (IS_NOQUOTA(inode))
1495 return QUOTA_OK;
1496 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1497 warntype[cnt] = QUOTA_NL_NOWARN;
1498 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1499 if (IS_NOQUOTA(inode)) {
1500 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1501 return QUOTA_OK;
1502 }
1503 spin_lock(&dq_data_lock);
1504 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1505 if (!inode->i_dquot[cnt])
1506 continue;
1507 if (check_idq(inode->i_dquot[cnt], number, warntype+cnt)
1508 == NO_QUOTA)
1509 goto warn_put_all;
1510 }
1511
1512 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1513 if (!inode->i_dquot[cnt])
1514 continue;
1515 dquot_incr_inodes(inode->i_dquot[cnt], number);
1516 }
1517 ret = QUOTA_OK;
1518warn_put_all:
1519 spin_unlock(&dq_data_lock);
1520 if (ret == QUOTA_OK)
1521 /* Dirtify all the dquots - this can block when journalling */
1522 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1523 if (inode->i_dquot[cnt])
1524 mark_dquot_dirty(inode->i_dquot[cnt]);
1525 flush_warnings(inode->i_dquot, warntype);
1526 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1527 return ret;
1528}
1529EXPORT_SYMBOL(dquot_alloc_inode);
1530
1531int dquot_claim_space(struct inode *inode, qsize_t number)
1532{
1533 int cnt;
1534 int ret = QUOTA_OK;
1535
1536 if (IS_NOQUOTA(inode)) {
1537 inode_add_bytes(inode, number);
1538 goto out;
1539 }
1540
1541 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1542 if (IS_NOQUOTA(inode)) {
1543 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1544 inode_add_bytes(inode, number);
1545 goto out;
1546 }
1547
1548 spin_lock(&dq_data_lock);
1549 /* Claim reserved quotas to allocated quotas */
1550 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1551 if (inode->i_dquot[cnt])
1552 dquot_claim_reserved_space(inode->i_dquot[cnt],
1553 number);
1554 }
1555 /* Update inode bytes */
1556 inode_add_bytes(inode, number);
1557 spin_unlock(&dq_data_lock);
1558 /* Dirtify all the dquots - this can block when journalling */
1559 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1560 if (inode->i_dquot[cnt])
1561 mark_dquot_dirty(inode->i_dquot[cnt]);
1562 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1563out:
1564 return ret;
1565}
1566EXPORT_SYMBOL(dquot_claim_space);
1567
1568/*
1569 * Release reserved quota space
1570 */
1571void dquot_release_reserved_space(struct inode *inode, qsize_t number)
1572{
1573 int cnt;
1574
1575 if (IS_NOQUOTA(inode))
1576 goto out;
1577
1578 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1579 if (IS_NOQUOTA(inode))
1580 goto out_unlock;
1581
1582 spin_lock(&dq_data_lock);
1583 /* Release reserved dquots */
1584 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1585 if (inode->i_dquot[cnt])
1586 dquot_free_reserved_space(inode->i_dquot[cnt], number);
1587 }
1588 spin_unlock(&dq_data_lock);
1589
1590out_unlock:
1591 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1592out:
1593 return;
1594}
1595EXPORT_SYMBOL(dquot_release_reserved_space);
1596
1597/*
1598 * This operation can block, but only after everything is updated
1599 */
1600int dquot_free_space(struct inode *inode, qsize_t number)
1601{
1602 unsigned int cnt;
1603 char warntype[MAXQUOTAS];
1604
1605 /* First test before acquiring mutex - solves deadlocks when we
1606 * re-enter the quota code and are already holding the mutex */
1607 if (IS_NOQUOTA(inode)) {
1608out_sub:
1609 inode_sub_bytes(inode, number);
1610 return QUOTA_OK;
1611 }
1612
1613 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1614 /* Now recheck reliably when holding dqptr_sem */
1615 if (IS_NOQUOTA(inode)) {
1616 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1617 goto out_sub;
1618 }
1619 spin_lock(&dq_data_lock);
1620 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1621 if (!inode->i_dquot[cnt])
1622 continue;
1623 warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
1624 dquot_decr_space(inode->i_dquot[cnt], number);
1625 }
1626 inode_sub_bytes(inode, number);
1627 spin_unlock(&dq_data_lock);
1628 /* Dirtify all the dquots - this can block when journalling */
1629 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1630 if (inode->i_dquot[cnt])
1631 mark_dquot_dirty(inode->i_dquot[cnt]);
1632 flush_warnings(inode->i_dquot, warntype);
1633 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1634 return QUOTA_OK;
1635}
1636EXPORT_SYMBOL(dquot_free_space);
1637
1638/*
1639 * This operation can block, but only after everything is updated
1640 */
1641int dquot_free_inode(const struct inode *inode, qsize_t number)
1642{
1643 unsigned int cnt;
1644 char warntype[MAXQUOTAS];
1645
1646 /* First test before acquiring mutex - solves deadlocks when we
1647 * re-enter the quota code and are already holding the mutex */
1648 if (IS_NOQUOTA(inode))
1649 return QUOTA_OK;
1650
1651 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1652 /* Now recheck reliably when holding dqptr_sem */
1653 if (IS_NOQUOTA(inode)) {
1654 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1655 return QUOTA_OK;
1656 }
1657 spin_lock(&dq_data_lock);
1658 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1659 if (!inode->i_dquot[cnt])
1660 continue;
1661 warntype[cnt] = info_idq_free(inode->i_dquot[cnt], number);
1662 dquot_decr_inodes(inode->i_dquot[cnt], number);
1663 }
1664 spin_unlock(&dq_data_lock);
1665 /* Dirtify all the dquots - this can block when journalling */
1666 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1667 if (inode->i_dquot[cnt])
1668 mark_dquot_dirty(inode->i_dquot[cnt]);
1669 flush_warnings(inode->i_dquot, warntype);
1670 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1671 return QUOTA_OK;
1672}
1673EXPORT_SYMBOL(dquot_free_inode);
1674
1675/*
1676 * call back function, get reserved quota space from underlying fs
1677 */
1678qsize_t dquot_get_reserved_space(struct inode *inode)
1679{
1680 qsize_t reserved_space = 0;
1681
1682 if (sb_any_quota_active(inode->i_sb) &&
1683 inode->i_sb->dq_op->get_reserved_space)
1684 reserved_space = inode->i_sb->dq_op->get_reserved_space(inode);
1685 return reserved_space;
1686}
1687
1688/*
1689 * Transfer the number of inode and blocks from one diskquota to an other.
1690 *
1691 * This operation can block, but only after everything is updated
1692 * A transaction must be started when entering this function.
1693 */
1694int dquot_transfer(struct inode *inode, struct iattr *iattr)
1695{
1696 qsize_t space, cur_space;
1697 qsize_t rsv_space = 0;
1698 struct dquot *transfer_from[MAXQUOTAS];
1699 struct dquot *transfer_to[MAXQUOTAS];
1700 int cnt, ret = QUOTA_OK;
1701 int chuid = iattr->ia_valid & ATTR_UID && inode->i_uid != iattr->ia_uid,
1702 chgid = iattr->ia_valid & ATTR_GID && inode->i_gid != iattr->ia_gid;
1703 char warntype_to[MAXQUOTAS];
1704 char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS];
1705
1706 /* First test before acquiring mutex - solves deadlocks when we
1707 * re-enter the quota code and are already holding the mutex */
1708 if (IS_NOQUOTA(inode))
1709 return QUOTA_OK;
1710 /* Initialize the arrays */
1711 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1712 transfer_from[cnt] = NULL;
1713 transfer_to[cnt] = NULL;
1714 warntype_to[cnt] = QUOTA_NL_NOWARN;
1715 }
1716 if (chuid)
1717 transfer_to[USRQUOTA] = dqget(inode->i_sb, iattr->ia_uid,
1718 USRQUOTA);
1719 if (chgid)
1720 transfer_to[GRPQUOTA] = dqget(inode->i_sb, iattr->ia_gid,
1721 GRPQUOTA);
1722
1723 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1724 /* Now recheck reliably when holding dqptr_sem */
1725 if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
1726 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1727 goto put_all;
1728 }
1729 spin_lock(&dq_data_lock);
1730 cur_space = inode_get_bytes(inode);
1731 rsv_space = dquot_get_reserved_space(inode);
1732 space = cur_space + rsv_space;
1733 /* Build the transfer_from list and check the limits */
1734 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1735 if (!transfer_to[cnt])
1736 continue;
1737 transfer_from[cnt] = inode->i_dquot[cnt];
1738 if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) ==
1739 NO_QUOTA || check_bdq(transfer_to[cnt], space, 0,
1740 warntype_to + cnt) == NO_QUOTA)
1741 goto over_quota;
1742 }
1743
1744 /*
1745 * Finally perform the needed transfer from transfer_from to transfer_to
1746 */
1747 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1748 /*
1749 * Skip changes for same uid or gid or for turned off quota-type.
1750 */
1751 if (!transfer_to[cnt])
1752 continue;
1753
1754 /* Due to IO error we might not have transfer_from[] structure */
1755 if (transfer_from[cnt]) {
1756 warntype_from_inodes[cnt] =
1757 info_idq_free(transfer_from[cnt], 1);
1758 warntype_from_space[cnt] =
1759 info_bdq_free(transfer_from[cnt], space);
1760 dquot_decr_inodes(transfer_from[cnt], 1);
1761 dquot_decr_space(transfer_from[cnt], cur_space);
1762 dquot_free_reserved_space(transfer_from[cnt],
1763 rsv_space);
1764 }
1765
1766 dquot_incr_inodes(transfer_to[cnt], 1);
1767 dquot_incr_space(transfer_to[cnt], cur_space);
1768 dquot_resv_space(transfer_to[cnt], rsv_space);
1769
1770 inode->i_dquot[cnt] = transfer_to[cnt];
1771 }
1772 spin_unlock(&dq_data_lock);
1773 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1774
1775 /* Dirtify all the dquots - this can block when journalling */
1776 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1777 if (transfer_from[cnt])
1778 mark_dquot_dirty(transfer_from[cnt]);
1779 if (transfer_to[cnt]) {
1780 mark_dquot_dirty(transfer_to[cnt]);
1781 /* The reference we got is transferred to the inode */
1782 transfer_to[cnt] = NULL;
1783 }
1784 }
1785warn_put_all:
1786 flush_warnings(transfer_to, warntype_to);
1787 flush_warnings(transfer_from, warntype_from_inodes);
1788 flush_warnings(transfer_from, warntype_from_space);
1789put_all:
1790 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1791 dqput(transfer_from[cnt]);
1792 dqput(transfer_to[cnt]);
1793 }
1794 return ret;
1795over_quota:
1796 spin_unlock(&dq_data_lock);
1797 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1798 /* Clear dquot pointers we don't want to dqput() */
1799 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1800 transfer_from[cnt] = NULL;
1801 ret = NO_QUOTA;
1802 goto warn_put_all;
1803}
1804EXPORT_SYMBOL(dquot_transfer);
1805
1806/* Wrapper for transferring ownership of an inode */
1807int vfs_dq_transfer(struct inode *inode, struct iattr *iattr)
1808{
1809 if (sb_any_quota_active(inode->i_sb) && !IS_NOQUOTA(inode)) {
1810 vfs_dq_init(inode);
1811 if (inode->i_sb->dq_op->transfer(inode, iattr) == NO_QUOTA)
1812 return 1;
1813 }
1814 return 0;
1815}
1816EXPORT_SYMBOL(vfs_dq_transfer);
1817
1818/*
1819 * Write info of quota file to disk
1820 */
1821int dquot_commit_info(struct super_block *sb, int type)
1822{
1823 int ret;
1824 struct quota_info *dqopt = sb_dqopt(sb);
1825
1826 mutex_lock(&dqopt->dqio_mutex);
1827 ret = dqopt->ops[type]->write_file_info(sb, type);
1828 mutex_unlock(&dqopt->dqio_mutex);
1829 return ret;
1830}
1831EXPORT_SYMBOL(dquot_commit_info);
1832
1833/*
1834 * Definitions of diskquota operations.
1835 */
1836struct dquot_operations dquot_operations = {
1837 .initialize = dquot_initialize,
1838 .drop = dquot_drop,
1839 .alloc_space = dquot_alloc_space,
1840 .alloc_inode = dquot_alloc_inode,
1841 .free_space = dquot_free_space,
1842 .free_inode = dquot_free_inode,
1843 .transfer = dquot_transfer,
1844 .write_dquot = dquot_commit,
1845 .acquire_dquot = dquot_acquire,
1846 .release_dquot = dquot_release,
1847 .mark_dirty = dquot_mark_dquot_dirty,
1848 .write_info = dquot_commit_info,
1849 .alloc_dquot = dquot_alloc,
1850 .destroy_dquot = dquot_destroy,
1851};
1852
1853/*
1854 * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
1855 */
1856int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags)
1857{
1858 int cnt, ret = 0;
1859 struct quota_info *dqopt = sb_dqopt(sb);
1860 struct inode *toputinode[MAXQUOTAS];
1861
1862 /* Cannot turn off usage accounting without turning off limits, or
1863 * suspend quotas and simultaneously turn quotas off. */
1864 if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED))
1865 || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED |
1866 DQUOT_USAGE_ENABLED)))
1867 return -EINVAL;
1868
1869 /* We need to serialize quota_off() for device */
1870 mutex_lock(&dqopt->dqonoff_mutex);
1871
1872 /*
1873 * Skip everything if there's nothing to do. We have to do this because
1874 * sometimes we are called when fill_super() failed and calling
1875 * sync_fs() in such cases does no good.
1876 */
1877 if (!sb_any_quota_loaded(sb)) {
1878 mutex_unlock(&dqopt->dqonoff_mutex);
1879 return 0;
1880 }
1881 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1882 toputinode[cnt] = NULL;
1883 if (type != -1 && cnt != type)
1884 continue;
1885 if (!sb_has_quota_loaded(sb, cnt))
1886 continue;
1887
1888 if (flags & DQUOT_SUSPENDED) {
1889 spin_lock(&dq_state_lock);
1890 dqopt->flags |=
1891 dquot_state_flag(DQUOT_SUSPENDED, cnt);
1892 spin_unlock(&dq_state_lock);
1893 } else {
1894 spin_lock(&dq_state_lock);
1895 dqopt->flags &= ~dquot_state_flag(flags, cnt);
1896 /* Turning off suspended quotas? */
1897 if (!sb_has_quota_loaded(sb, cnt) &&
1898 sb_has_quota_suspended(sb, cnt)) {
1899 dqopt->flags &= ~dquot_state_flag(
1900 DQUOT_SUSPENDED, cnt);
1901 spin_unlock(&dq_state_lock);
1902 iput(dqopt->files[cnt]);
1903 dqopt->files[cnt] = NULL;
1904 continue;
1905 }
1906 spin_unlock(&dq_state_lock);
1907 }
1908
1909 /* We still have to keep quota loaded? */
1910 if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED))
1911 continue;
1912
1913 /* Note: these are blocking operations */
1914 drop_dquot_ref(sb, cnt);
1915 invalidate_dquots(sb, cnt);
1916 /*
1917 * Now all dquots should be invalidated, all writes done so we
1918 * should be only users of the info. No locks needed.
1919 */
1920 if (info_dirty(&dqopt->info[cnt]))
1921 sb->dq_op->write_info(sb, cnt);
1922 if (dqopt->ops[cnt]->free_file_info)
1923 dqopt->ops[cnt]->free_file_info(sb, cnt);
1924 put_quota_format(dqopt->info[cnt].dqi_format);
1925
1926 toputinode[cnt] = dqopt->files[cnt];
1927 if (!sb_has_quota_loaded(sb, cnt))
1928 dqopt->files[cnt] = NULL;
1929 dqopt->info[cnt].dqi_flags = 0;
1930 dqopt->info[cnt].dqi_igrace = 0;
1931 dqopt->info[cnt].dqi_bgrace = 0;
1932 dqopt->ops[cnt] = NULL;
1933 }
1934 mutex_unlock(&dqopt->dqonoff_mutex);
1935
1936 /* Skip syncing and setting flags if quota files are hidden */
1937 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
1938 goto put_inodes;
1939
1940 /* Sync the superblock so that buffers with quota data are written to
1941 * disk (and so userspace sees correct data afterwards). */
1942 if (sb->s_op->sync_fs)
1943 sb->s_op->sync_fs(sb, 1);
1944 sync_blockdev(sb->s_bdev);
1945 /* Now the quota files are just ordinary files and we can set the
1946 * inode flags back. Moreover we discard the pagecache so that
1947 * userspace sees the writes we did bypassing the pagecache. We
1948 * must also discard the blockdev buffers so that we see the
1949 * changes done by userspace on the next quotaon() */
1950 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1951 if (toputinode[cnt]) {
1952 mutex_lock(&dqopt->dqonoff_mutex);
1953 /* If quota was reenabled in the meantime, we have
1954 * nothing to do */
1955 if (!sb_has_quota_loaded(sb, cnt)) {
1956 mutex_lock_nested(&toputinode[cnt]->i_mutex,
1957 I_MUTEX_QUOTA);
1958 toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
1959 S_NOATIME | S_NOQUOTA);
1960 truncate_inode_pages(&toputinode[cnt]->i_data,
1961 0);
1962 mutex_unlock(&toputinode[cnt]->i_mutex);
1963 mark_inode_dirty(toputinode[cnt]);
1964 }
1965 mutex_unlock(&dqopt->dqonoff_mutex);
1966 }
1967 if (sb->s_bdev)
1968 invalidate_bdev(sb->s_bdev);
1969put_inodes:
1970 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1971 if (toputinode[cnt]) {
1972 /* On remount RO, we keep the inode pointer so that we
1973 * can reenable quota on the subsequent remount RW. We
1974 * have to check 'flags' variable and not use sb_has_
1975 * function because another quotaon / quotaoff could
1976 * change global state before we got here. We refuse
1977 * to suspend quotas when there is pending delete on
1978 * the quota file... */
1979 if (!(flags & DQUOT_SUSPENDED))
1980 iput(toputinode[cnt]);
1981 else if (!toputinode[cnt]->i_nlink)
1982 ret = -EBUSY;
1983 }
1984 return ret;
1985}
1986EXPORT_SYMBOL(vfs_quota_disable);
1987
1988int vfs_quota_off(struct super_block *sb, int type, int remount)
1989{
1990 return vfs_quota_disable(sb, type, remount ? DQUOT_SUSPENDED :
1991 (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED));
1992}
1993EXPORT_SYMBOL(vfs_quota_off);
1994/*
1995 * Turn quotas on on a device
1996 */
1997
1998/*
1999 * Helper function to turn quotas on when we already have the inode of
2000 * quota file and no quota information is loaded.
2001 */
2002static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
2003 unsigned int flags)
2004{
2005 struct quota_format_type *fmt = find_quota_format(format_id);
2006 struct super_block *sb = inode->i_sb;
2007 struct quota_info *dqopt = sb_dqopt(sb);
2008 int error;
2009 int oldflags = -1;
2010
2011 if (!fmt)
2012 return -ESRCH;
2013 if (!S_ISREG(inode->i_mode)) {
2014 error = -EACCES;
2015 goto out_fmt;
2016 }
2017 if (IS_RDONLY(inode)) {
2018 error = -EROFS;
2019 goto out_fmt;
2020 }
2021 if (!sb->s_op->quota_write || !sb->s_op->quota_read) {
2022 error = -EINVAL;
2023 goto out_fmt;
2024 }
2025 /* Usage always has to be set... */
2026 if (!(flags & DQUOT_USAGE_ENABLED)) {
2027 error = -EINVAL;
2028 goto out_fmt;
2029 }
2030
2031 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2032 /* As we bypass the pagecache we must now flush the inode so
2033 * that we see all the changes from userspace... */
2034 write_inode_now(inode, 1);
2035 /* And now flush the block cache so that kernel sees the
2036 * changes */
2037 invalidate_bdev(sb->s_bdev);
2038 }
2039 mutex_lock(&inode->i_mutex);
2040 mutex_lock(&dqopt->dqonoff_mutex);
2041 if (sb_has_quota_loaded(sb, type)) {
2042 error = -EBUSY;
2043 goto out_lock;
2044 }
2045
2046 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2047 /* We don't want quota and atime on quota files (deadlocks
2048 * possible) Also nobody should write to the file - we use
2049 * special IO operations which ignore the immutable bit. */
2050 down_write(&dqopt->dqptr_sem);
2051 oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE |
2052 S_NOQUOTA);
2053 inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
2054 up_write(&dqopt->dqptr_sem);
2055 sb->dq_op->drop(inode);
2056 }
2057
2058 error = -EIO;
2059 dqopt->files[type] = igrab(inode);
2060 if (!dqopt->files[type])
2061 goto out_lock;
2062 error = -EINVAL;
2063 if (!fmt->qf_ops->check_quota_file(sb, type))
2064 goto out_file_init;
2065
2066 dqopt->ops[type] = fmt->qf_ops;
2067 dqopt->info[type].dqi_format = fmt;
2068 dqopt->info[type].dqi_fmt_id = format_id;
2069 INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
2070 mutex_lock(&dqopt->dqio_mutex);
2071 error = dqopt->ops[type]->read_file_info(sb, type);
2072 if (error < 0) {
2073 mutex_unlock(&dqopt->dqio_mutex);
2074 goto out_file_init;
2075 }
2076 mutex_unlock(&dqopt->dqio_mutex);
2077 mutex_unlock(&inode->i_mutex);
2078 spin_lock(&dq_state_lock);
2079 dqopt->flags |= dquot_state_flag(flags, type);
2080 spin_unlock(&dq_state_lock);
2081
2082 add_dquot_ref(sb, type);
2083 mutex_unlock(&dqopt->dqonoff_mutex);
2084
2085 return 0;
2086
2087out_file_init:
2088 dqopt->files[type] = NULL;
2089 iput(inode);
2090out_lock:
2091 mutex_unlock(&dqopt->dqonoff_mutex);
2092 if (oldflags != -1) {
2093 down_write(&dqopt->dqptr_sem);
2094 /* Set the flags back (in the case of accidental quotaon()
2095 * on a wrong file we don't want to mess up the flags) */
2096 inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE);
2097 inode->i_flags |= oldflags;
2098 up_write(&dqopt->dqptr_sem);
2099 }
2100 mutex_unlock(&inode->i_mutex);
2101out_fmt:
2102 put_quota_format(fmt);
2103
2104 return error;
2105}
2106
2107/* Reenable quotas on remount RW */
2108static int vfs_quota_on_remount(struct super_block *sb, int type)
2109{
2110 struct quota_info *dqopt = sb_dqopt(sb);
2111 struct inode *inode;
2112 int ret;
2113 unsigned int flags;
2114
2115 mutex_lock(&dqopt->dqonoff_mutex);
2116 if (!sb_has_quota_suspended(sb, type)) {
2117 mutex_unlock(&dqopt->dqonoff_mutex);
2118 return 0;
2119 }
2120 inode = dqopt->files[type];
2121 dqopt->files[type] = NULL;
2122 spin_lock(&dq_state_lock);
2123 flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
2124 DQUOT_LIMITS_ENABLED, type);
2125 dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, type);
2126 spin_unlock(&dq_state_lock);
2127 mutex_unlock(&dqopt->dqonoff_mutex);
2128
2129 flags = dquot_generic_flag(flags, type);
2130 ret = vfs_load_quota_inode(inode, type, dqopt->info[type].dqi_fmt_id,
2131 flags);
2132 iput(inode);
2133
2134 return ret;
2135}
2136
2137int vfs_quota_on_path(struct super_block *sb, int type, int format_id,
2138 struct path *path)
2139{
2140 int error = security_quota_on(path->dentry);
2141 if (error)
2142 return error;
2143 /* Quota file not on the same filesystem? */
2144 if (path->mnt->mnt_sb != sb)
2145 error = -EXDEV;
2146 else
2147 error = vfs_load_quota_inode(path->dentry->d_inode, type,
2148 format_id, DQUOT_USAGE_ENABLED |
2149 DQUOT_LIMITS_ENABLED);
2150 return error;
2151}
2152EXPORT_SYMBOL(vfs_quota_on_path);
2153
2154int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name,
2155 int remount)
2156{
2157 struct path path;
2158 int error;
2159
2160 if (remount)
2161 return vfs_quota_on_remount(sb, type);
2162
2163 error = kern_path(name, LOOKUP_FOLLOW, &path);
2164 if (!error) {
2165 error = vfs_quota_on_path(sb, type, format_id, &path);
2166 path_put(&path);
2167 }
2168 return error;
2169}
2170EXPORT_SYMBOL(vfs_quota_on);
2171
2172/*
2173 * More powerful function for turning on quotas allowing setting
2174 * of individual quota flags
2175 */
2176int vfs_quota_enable(struct inode *inode, int type, int format_id,
2177 unsigned int flags)
2178{
2179 int ret = 0;
2180 struct super_block *sb = inode->i_sb;
2181 struct quota_info *dqopt = sb_dqopt(sb);
2182
2183 /* Just unsuspend quotas? */
2184 if (flags & DQUOT_SUSPENDED)
2185 return vfs_quota_on_remount(sb, type);
2186 if (!flags)
2187 return 0;
2188 /* Just updating flags needed? */
2189 if (sb_has_quota_loaded(sb, type)) {
2190 mutex_lock(&dqopt->dqonoff_mutex);
2191 /* Now do a reliable test... */
2192 if (!sb_has_quota_loaded(sb, type)) {
2193 mutex_unlock(&dqopt->dqonoff_mutex);
2194 goto load_quota;
2195 }
2196 if (flags & DQUOT_USAGE_ENABLED &&
2197 sb_has_quota_usage_enabled(sb, type)) {
2198 ret = -EBUSY;
2199 goto out_lock;
2200 }
2201 if (flags & DQUOT_LIMITS_ENABLED &&
2202 sb_has_quota_limits_enabled(sb, type)) {
2203 ret = -EBUSY;
2204 goto out_lock;
2205 }
2206 spin_lock(&dq_state_lock);
2207 sb_dqopt(sb)->flags |= dquot_state_flag(flags, type);
2208 spin_unlock(&dq_state_lock);
2209out_lock:
2210 mutex_unlock(&dqopt->dqonoff_mutex);
2211 return ret;
2212 }
2213
2214load_quota:
2215 return vfs_load_quota_inode(inode, type, format_id, flags);
2216}
2217EXPORT_SYMBOL(vfs_quota_enable);
2218
2219/*
2220 * This function is used when filesystem needs to initialize quotas
2221 * during mount time.
2222 */
2223int vfs_quota_on_mount(struct super_block *sb, char *qf_name,
2224 int format_id, int type)
2225{
2226 struct dentry *dentry;
2227 int error;
2228
2229 dentry = lookup_one_len(qf_name, sb->s_root, strlen(qf_name));
2230 if (IS_ERR(dentry))
2231 return PTR_ERR(dentry);
2232
2233 if (!dentry->d_inode) {
2234 error = -ENOENT;
2235 goto out;
2236 }
2237
2238 error = security_quota_on(dentry);
2239 if (!error)
2240 error = vfs_load_quota_inode(dentry->d_inode, type, format_id,
2241 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2242
2243out:
2244 dput(dentry);
2245 return error;
2246}
2247EXPORT_SYMBOL(vfs_quota_on_mount);
2248
2249/* Wrapper to turn on quotas when remounting rw */
2250int vfs_dq_quota_on_remount(struct super_block *sb)
2251{
2252 int cnt;
2253 int ret = 0, err;
2254
2255 if (!sb->s_qcop || !sb->s_qcop->quota_on)
2256 return -ENOSYS;
2257 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2258 err = sb->s_qcop->quota_on(sb, cnt, 0, NULL, 1);
2259 if (err < 0 && !ret)
2260 ret = err;
2261 }
2262 return ret;
2263}
2264EXPORT_SYMBOL(vfs_dq_quota_on_remount);
2265
2266static inline qsize_t qbtos(qsize_t blocks)
2267{
2268 return blocks << QIF_DQBLKSIZE_BITS;
2269}
2270
2271static inline qsize_t stoqb(qsize_t space)
2272{
2273 return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
2274}
2275
2276/* Generic routine for getting common part of quota structure */
2277static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di)
2278{
2279 struct mem_dqblk *dm = &dquot->dq_dqb;
2280
2281 spin_lock(&dq_data_lock);
2282 di->dqb_bhardlimit = stoqb(dm->dqb_bhardlimit);
2283 di->dqb_bsoftlimit = stoqb(dm->dqb_bsoftlimit);
2284 di->dqb_curspace = dm->dqb_curspace + dm->dqb_rsvspace;
2285 di->dqb_ihardlimit = dm->dqb_ihardlimit;
2286 di->dqb_isoftlimit = dm->dqb_isoftlimit;
2287 di->dqb_curinodes = dm->dqb_curinodes;
2288 di->dqb_btime = dm->dqb_btime;
2289 di->dqb_itime = dm->dqb_itime;
2290 di->dqb_valid = QIF_ALL;
2291 spin_unlock(&dq_data_lock);
2292}
2293
2294int vfs_get_dqblk(struct super_block *sb, int type, qid_t id,
2295 struct if_dqblk *di)
2296{
2297 struct dquot *dquot;
2298
2299 dquot = dqget(sb, id, type);
2300 if (!dquot)
2301 return -ESRCH;
2302 do_get_dqblk(dquot, di);
2303 dqput(dquot);
2304
2305 return 0;
2306}
2307EXPORT_SYMBOL(vfs_get_dqblk);
2308
2309/* Generic routine for setting common part of quota structure */
2310static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
2311{
2312 struct mem_dqblk *dm = &dquot->dq_dqb;
2313 int check_blim = 0, check_ilim = 0;
2314 struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
2315
2316 if ((di->dqb_valid & QIF_BLIMITS &&
2317 (di->dqb_bhardlimit > dqi->dqi_maxblimit ||
2318 di->dqb_bsoftlimit > dqi->dqi_maxblimit)) ||
2319 (di->dqb_valid & QIF_ILIMITS &&
2320 (di->dqb_ihardlimit > dqi->dqi_maxilimit ||
2321 di->dqb_isoftlimit > dqi->dqi_maxilimit)))
2322 return -ERANGE;
2323
2324 spin_lock(&dq_data_lock);
2325 if (di->dqb_valid & QIF_SPACE) {
2326 dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace;
2327 check_blim = 1;
2328 __set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
2329 }
2330 if (di->dqb_valid & QIF_BLIMITS) {
2331 dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit);
2332 dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit);
2333 check_blim = 1;
2334 __set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
2335 }
2336 if (di->dqb_valid & QIF_INODES) {
2337 dm->dqb_curinodes = di->dqb_curinodes;
2338 check_ilim = 1;
2339 __set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
2340 }
2341 if (di->dqb_valid & QIF_ILIMITS) {
2342 dm->dqb_isoftlimit = di->dqb_isoftlimit;
2343 dm->dqb_ihardlimit = di->dqb_ihardlimit;
2344 check_ilim = 1;
2345 __set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
2346 }
2347 if (di->dqb_valid & QIF_BTIME) {
2348 dm->dqb_btime = di->dqb_btime;
2349 check_blim = 1;
2350 __set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
2351 }
2352 if (di->dqb_valid & QIF_ITIME) {
2353 dm->dqb_itime = di->dqb_itime;
2354 check_ilim = 1;
2355 __set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
2356 }
2357
2358 if (check_blim) {
2359 if (!dm->dqb_bsoftlimit ||
2360 dm->dqb_curspace < dm->dqb_bsoftlimit) {
2361 dm->dqb_btime = 0;
2362 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
2363 } else if (!(di->dqb_valid & QIF_BTIME))
2364 /* Set grace only if user hasn't provided his own... */
2365 dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
2366 }
2367 if (check_ilim) {
2368 if (!dm->dqb_isoftlimit ||
2369 dm->dqb_curinodes < dm->dqb_isoftlimit) {
2370 dm->dqb_itime = 0;
2371 clear_bit(DQ_INODES_B, &dquot->dq_flags);
2372 } else if (!(di->dqb_valid & QIF_ITIME))
2373 /* Set grace only if user hasn't provided his own... */
2374 dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
2375 }
2376 if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit ||
2377 dm->dqb_isoftlimit)
2378 clear_bit(DQ_FAKE_B, &dquot->dq_flags);
2379 else
2380 set_bit(DQ_FAKE_B, &dquot->dq_flags);
2381 spin_unlock(&dq_data_lock);
2382 mark_dquot_dirty(dquot);
2383
2384 return 0;
2385}
2386
2387int vfs_set_dqblk(struct super_block *sb, int type, qid_t id,
2388 struct if_dqblk *di)
2389{
2390 struct dquot *dquot;
2391 int rc;
2392
2393 dquot = dqget(sb, id, type);
2394 if (!dquot) {
2395 rc = -ESRCH;
2396 goto out;
2397 }
2398 rc = do_set_dqblk(dquot, di);
2399 dqput(dquot);
2400out:
2401 return rc;
2402}
2403EXPORT_SYMBOL(vfs_set_dqblk);
2404
2405/* Generic routine for getting common part of quota file information */
2406int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
2407{
2408 struct mem_dqinfo *mi;
2409
2410 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
2411 if (!sb_has_quota_active(sb, type)) {
2412 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2413 return -ESRCH;
2414 }
2415 mi = sb_dqopt(sb)->info + type;
2416 spin_lock(&dq_data_lock);
2417 ii->dqi_bgrace = mi->dqi_bgrace;
2418 ii->dqi_igrace = mi->dqi_igrace;
2419 ii->dqi_flags = mi->dqi_flags & DQF_MASK;
2420 ii->dqi_valid = IIF_ALL;
2421 spin_unlock(&dq_data_lock);
2422 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2423 return 0;
2424}
2425EXPORT_SYMBOL(vfs_get_dqinfo);
2426
2427/* Generic routine for setting common part of quota file information */
2428int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
2429{
2430 struct mem_dqinfo *mi;
2431 int err = 0;
2432
2433 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
2434 if (!sb_has_quota_active(sb, type)) {
2435 err = -ESRCH;
2436 goto out;
2437 }
2438 mi = sb_dqopt(sb)->info + type;
2439 spin_lock(&dq_data_lock);
2440 if (ii->dqi_valid & IIF_BGRACE)
2441 mi->dqi_bgrace = ii->dqi_bgrace;
2442 if (ii->dqi_valid & IIF_IGRACE)
2443 mi->dqi_igrace = ii->dqi_igrace;
2444 if (ii->dqi_valid & IIF_FLAGS)
2445 mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) |
2446 (ii->dqi_flags & DQF_MASK);
2447 spin_unlock(&dq_data_lock);
2448 mark_info_dirty(sb, type);
2449 /* Force write to disk */
2450 sb->dq_op->write_info(sb, type);
2451out:
2452 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2453 return err;
2454}
2455EXPORT_SYMBOL(vfs_set_dqinfo);
2456
2457struct quotactl_ops vfs_quotactl_ops = {
2458 .quota_on = vfs_quota_on,
2459 .quota_off = vfs_quota_off,
2460 .quota_sync = vfs_quota_sync,
2461 .get_info = vfs_get_dqinfo,
2462 .set_info = vfs_set_dqinfo,
2463 .get_dqblk = vfs_get_dqblk,
2464 .set_dqblk = vfs_set_dqblk
2465};
2466
2467static ctl_table fs_dqstats_table[] = {
2468 {
2469 .ctl_name = FS_DQ_LOOKUPS,
2470 .procname = "lookups",
2471 .data = &dqstats.lookups,
2472 .maxlen = sizeof(int),
2473 .mode = 0444,
2474 .proc_handler = &proc_dointvec,
2475 },
2476 {
2477 .ctl_name = FS_DQ_DROPS,
2478 .procname = "drops",
2479 .data = &dqstats.drops,
2480 .maxlen = sizeof(int),
2481 .mode = 0444,
2482 .proc_handler = &proc_dointvec,
2483 },
2484 {
2485 .ctl_name = FS_DQ_READS,
2486 .procname = "reads",
2487 .data = &dqstats.reads,
2488 .maxlen = sizeof(int),
2489 .mode = 0444,
2490 .proc_handler = &proc_dointvec,
2491 },
2492 {
2493 .ctl_name = FS_DQ_WRITES,
2494 .procname = "writes",
2495 .data = &dqstats.writes,
2496 .maxlen = sizeof(int),
2497 .mode = 0444,
2498 .proc_handler = &proc_dointvec,
2499 },
2500 {
2501 .ctl_name = FS_DQ_CACHE_HITS,
2502 .procname = "cache_hits",
2503 .data = &dqstats.cache_hits,
2504 .maxlen = sizeof(int),
2505 .mode = 0444,
2506 .proc_handler = &proc_dointvec,
2507 },
2508 {
2509 .ctl_name = FS_DQ_ALLOCATED,
2510 .procname = "allocated_dquots",
2511 .data = &dqstats.allocated_dquots,
2512 .maxlen = sizeof(int),
2513 .mode = 0444,
2514 .proc_handler = &proc_dointvec,
2515 },
2516 {
2517 .ctl_name = FS_DQ_FREE,
2518 .procname = "free_dquots",
2519 .data = &dqstats.free_dquots,
2520 .maxlen = sizeof(int),
2521 .mode = 0444,
2522 .proc_handler = &proc_dointvec,
2523 },
2524 {
2525 .ctl_name = FS_DQ_SYNCS,
2526 .procname = "syncs",
2527 .data = &dqstats.syncs,
2528 .maxlen = sizeof(int),
2529 .mode = 0444,
2530 .proc_handler = &proc_dointvec,
2531 },
2532#ifdef CONFIG_PRINT_QUOTA_WARNING
2533 {
2534 .ctl_name = FS_DQ_WARNINGS,
2535 .procname = "warnings",
2536 .data = &flag_print_warnings,
2537 .maxlen = sizeof(int),
2538 .mode = 0644,
2539 .proc_handler = &proc_dointvec,
2540 },
2541#endif
2542 { .ctl_name = 0 },
2543};
2544
2545static ctl_table fs_table[] = {
2546 {
2547 .ctl_name = FS_DQSTATS,
2548 .procname = "quota",
2549 .mode = 0555,
2550 .child = fs_dqstats_table,
2551 },
2552 { .ctl_name = 0 },
2553};
2554
2555static ctl_table sys_table[] = {
2556 {
2557 .ctl_name = CTL_FS,
2558 .procname = "fs",
2559 .mode = 0555,
2560 .child = fs_table,
2561 },
2562 { .ctl_name = 0 },
2563};
2564
2565static int __init dquot_init(void)
2566{
2567 int i;
2568 unsigned long nr_hash, order;
2569
2570 printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__);
2571
2572 register_sysctl_table(sys_table);
2573
2574 dquot_cachep = kmem_cache_create("dquot",
2575 sizeof(struct dquot), sizeof(unsigned long) * 4,
2576 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
2577 SLAB_MEM_SPREAD|SLAB_PANIC),
2578 NULL);
2579
2580 order = 0;
2581 dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order);
2582 if (!dquot_hash)
2583 panic("Cannot create dquot hash table");
2584
2585 /* Find power-of-two hlist_heads which can fit into allocation */
2586 nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
2587 dq_hash_bits = 0;
2588 do {
2589 dq_hash_bits++;
2590 } while (nr_hash >> dq_hash_bits);
2591 dq_hash_bits--;
2592
2593 nr_hash = 1UL << dq_hash_bits;
2594 dq_hash_mask = nr_hash - 1;
2595 for (i = 0; i < nr_hash; i++)
2596 INIT_HLIST_HEAD(dquot_hash + i);
2597
2598 printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n",
2599 nr_hash, order, (PAGE_SIZE << order));
2600
2601 register_shrinker(&dqcache_shrinker);
2602
2603#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
2604 if (genl_register_family(&quota_genl_family) != 0)
2605 printk(KERN_ERR
2606 "VFS: Failed to create quota netlink interface.\n");
2607#endif
2608
2609 return 0;
2610}
2611module_init(dquot_init);
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
new file mode 100644
index 000000000000..b7f5a468f076
--- /dev/null
+++ b/fs/quota/quota.c
@@ -0,0 +1,524 @@
1/*
2 * Quota code necessary even when VFS quota support is not compiled
3 * into the kernel. The interesting stuff is over in dquot.c, here
4 * we have symbols for initial quotactl(2) handling, the sysctl(2)
5 * variables, etc - things needed even when quota support disabled.
6 */
7
8#include <linux/fs.h>
9#include <linux/namei.h>
10#include <linux/slab.h>
11#include <asm/current.h>
12#include <asm/uaccess.h>
13#include <linux/compat.h>
14#include <linux/kernel.h>
15#include <linux/security.h>
16#include <linux/syscalls.h>
17#include <linux/buffer_head.h>
18#include <linux/capability.h>
19#include <linux/quotaops.h>
20#include <linux/types.h>
21
22/* Check validity of generic quotactl commands */
23static int generic_quotactl_valid(struct super_block *sb, int type, int cmd,
24 qid_t id)
25{
26 if (type >= MAXQUOTAS)
27 return -EINVAL;
28 if (!sb && cmd != Q_SYNC)
29 return -ENODEV;
30 /* Is operation supported? */
31 if (sb && !sb->s_qcop)
32 return -ENOSYS;
33
34 switch (cmd) {
35 case Q_GETFMT:
36 break;
37 case Q_QUOTAON:
38 if (!sb->s_qcop->quota_on)
39 return -ENOSYS;
40 break;
41 case Q_QUOTAOFF:
42 if (!sb->s_qcop->quota_off)
43 return -ENOSYS;
44 break;
45 case Q_SETINFO:
46 if (!sb->s_qcop->set_info)
47 return -ENOSYS;
48 break;
49 case Q_GETINFO:
50 if (!sb->s_qcop->get_info)
51 return -ENOSYS;
52 break;
53 case Q_SETQUOTA:
54 if (!sb->s_qcop->set_dqblk)
55 return -ENOSYS;
56 break;
57 case Q_GETQUOTA:
58 if (!sb->s_qcop->get_dqblk)
59 return -ENOSYS;
60 break;
61 case Q_SYNC:
62 if (sb && !sb->s_qcop->quota_sync)
63 return -ENOSYS;
64 break;
65 default:
66 return -EINVAL;
67 }
68
69 /* Is quota turned on for commands which need it? */
70 switch (cmd) {
71 case Q_GETFMT:
72 case Q_GETINFO:
73 case Q_SETINFO:
74 case Q_SETQUOTA:
75 case Q_GETQUOTA:
76 /* This is just an informative test so we are satisfied
77 * without the lock */
78 if (!sb_has_quota_active(sb, type))
79 return -ESRCH;
80 }
81
82 /* Check privileges */
83 if (cmd == Q_GETQUOTA) {
84 if (((type == USRQUOTA && current_euid() != id) ||
85 (type == GRPQUOTA && !in_egroup_p(id))) &&
86 !capable(CAP_SYS_ADMIN))
87 return -EPERM;
88 }
89 else if (cmd != Q_GETFMT && cmd != Q_SYNC && cmd != Q_GETINFO)
90 if (!capable(CAP_SYS_ADMIN))
91 return -EPERM;
92
93 return 0;
94}
95
96/* Check validity of XFS Quota Manager commands */
97static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd,
98 qid_t id)
99{
100 if (type >= XQM_MAXQUOTAS)
101 return -EINVAL;
102 if (!sb)
103 return -ENODEV;
104 if (!sb->s_qcop)
105 return -ENOSYS;
106
107 switch (cmd) {
108 case Q_XQUOTAON:
109 case Q_XQUOTAOFF:
110 case Q_XQUOTARM:
111 if (!sb->s_qcop->set_xstate)
112 return -ENOSYS;
113 break;
114 case Q_XGETQSTAT:
115 if (!sb->s_qcop->get_xstate)
116 return -ENOSYS;
117 break;
118 case Q_XSETQLIM:
119 if (!sb->s_qcop->set_xquota)
120 return -ENOSYS;
121 break;
122 case Q_XGETQUOTA:
123 if (!sb->s_qcop->get_xquota)
124 return -ENOSYS;
125 break;
126 case Q_XQUOTASYNC:
127 if (!sb->s_qcop->quota_sync)
128 return -ENOSYS;
129 break;
130 default:
131 return -EINVAL;
132 }
133
134 /* Check privileges */
135 if (cmd == Q_XGETQUOTA) {
136 if (((type == XQM_USRQUOTA && current_euid() != id) ||
137 (type == XQM_GRPQUOTA && !in_egroup_p(id))) &&
138 !capable(CAP_SYS_ADMIN))
139 return -EPERM;
140 } else if (cmd != Q_XGETQSTAT && cmd != Q_XQUOTASYNC) {
141 if (!capable(CAP_SYS_ADMIN))
142 return -EPERM;
143 }
144
145 return 0;
146}
147
148static int check_quotactl_valid(struct super_block *sb, int type, int cmd,
149 qid_t id)
150{
151 int error;
152
153 if (XQM_COMMAND(cmd))
154 error = xqm_quotactl_valid(sb, type, cmd, id);
155 else
156 error = generic_quotactl_valid(sb, type, cmd, id);
157 if (!error)
158 error = security_quotactl(cmd, type, id, sb);
159 return error;
160}
161
162static void quota_sync_sb(struct super_block *sb, int type)
163{
164 int cnt;
165
166 sb->s_qcop->quota_sync(sb, type);
167
168 if (sb_dqopt(sb)->flags & DQUOT_QUOTA_SYS_FILE)
169 return;
170 /* This is not very clever (and fast) but currently I don't know about
171 * any other simple way of getting quota data to disk and we must get
172 * them there for userspace to be visible... */
173 if (sb->s_op->sync_fs)
174 sb->s_op->sync_fs(sb, 1);
175 sync_blockdev(sb->s_bdev);
176
177 /*
178 * Now when everything is written we can discard the pagecache so
179 * that userspace sees the changes.
180 */
181 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
182 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
183 if (type != -1 && cnt != type)
184 continue;
185 if (!sb_has_quota_active(sb, cnt))
186 continue;
187 mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex,
188 I_MUTEX_QUOTA);
189 truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0);
190 mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex);
191 }
192 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
193}
194
195void sync_dquots(struct super_block *sb, int type)
196{
197 int cnt;
198
199 if (sb) {
200 if (sb->s_qcop->quota_sync)
201 quota_sync_sb(sb, type);
202 return;
203 }
204
205 spin_lock(&sb_lock);
206restart:
207 list_for_each_entry(sb, &super_blocks, s_list) {
208 /* This test just improves performance so it needn't be
209 * reliable... */
210 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
211 if (type != -1 && type != cnt)
212 continue;
213 if (!sb_has_quota_active(sb, cnt))
214 continue;
215 if (!info_dirty(&sb_dqopt(sb)->info[cnt]) &&
216 list_empty(&sb_dqopt(sb)->info[cnt].dqi_dirty_list))
217 continue;
218 break;
219 }
220 if (cnt == MAXQUOTAS)
221 continue;
222 sb->s_count++;
223 spin_unlock(&sb_lock);
224 down_read(&sb->s_umount);
225 if (sb->s_root && sb->s_qcop->quota_sync)
226 quota_sync_sb(sb, type);
227 up_read(&sb->s_umount);
228 spin_lock(&sb_lock);
229 if (__put_super_and_need_restart(sb))
230 goto restart;
231 }
232 spin_unlock(&sb_lock);
233}
234
235/* Copy parameters and call proper function */
236static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
237 void __user *addr)
238{
239 int ret;
240
241 switch (cmd) {
242 case Q_QUOTAON: {
243 char *pathname;
244
245 pathname = getname(addr);
246 if (IS_ERR(pathname))
247 return PTR_ERR(pathname);
248 ret = sb->s_qcop->quota_on(sb, type, id, pathname, 0);
249 putname(pathname);
250 return ret;
251 }
252 case Q_QUOTAOFF:
253 return sb->s_qcop->quota_off(sb, type, 0);
254
255 case Q_GETFMT: {
256 __u32 fmt;
257
258 down_read(&sb_dqopt(sb)->dqptr_sem);
259 if (!sb_has_quota_active(sb, type)) {
260 up_read(&sb_dqopt(sb)->dqptr_sem);
261 return -ESRCH;
262 }
263 fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id;
264 up_read(&sb_dqopt(sb)->dqptr_sem);
265 if (copy_to_user(addr, &fmt, sizeof(fmt)))
266 return -EFAULT;
267 return 0;
268 }
269 case Q_GETINFO: {
270 struct if_dqinfo info;
271
272 ret = sb->s_qcop->get_info(sb, type, &info);
273 if (ret)
274 return ret;
275 if (copy_to_user(addr, &info, sizeof(info)))
276 return -EFAULT;
277 return 0;
278 }
279 case Q_SETINFO: {
280 struct if_dqinfo info;
281
282 if (copy_from_user(&info, addr, sizeof(info)))
283 return -EFAULT;
284 return sb->s_qcop->set_info(sb, type, &info);
285 }
286 case Q_GETQUOTA: {
287 struct if_dqblk idq;
288
289 ret = sb->s_qcop->get_dqblk(sb, type, id, &idq);
290 if (ret)
291 return ret;
292 if (copy_to_user(addr, &idq, sizeof(idq)))
293 return -EFAULT;
294 return 0;
295 }
296 case Q_SETQUOTA: {
297 struct if_dqblk idq;
298
299 if (copy_from_user(&idq, addr, sizeof(idq)))
300 return -EFAULT;
301 return sb->s_qcop->set_dqblk(sb, type, id, &idq);
302 }
303 case Q_SYNC:
304 sync_dquots(sb, type);
305 return 0;
306
307 case Q_XQUOTAON:
308 case Q_XQUOTAOFF:
309 case Q_XQUOTARM: {
310 __u32 flags;
311
312 if (copy_from_user(&flags, addr, sizeof(flags)))
313 return -EFAULT;
314 return sb->s_qcop->set_xstate(sb, flags, cmd);
315 }
316 case Q_XGETQSTAT: {
317 struct fs_quota_stat fqs;
318
319 if ((ret = sb->s_qcop->get_xstate(sb, &fqs)))
320 return ret;
321 if (copy_to_user(addr, &fqs, sizeof(fqs)))
322 return -EFAULT;
323 return 0;
324 }
325 case Q_XSETQLIM: {
326 struct fs_disk_quota fdq;
327
328 if (copy_from_user(&fdq, addr, sizeof(fdq)))
329 return -EFAULT;
330 return sb->s_qcop->set_xquota(sb, type, id, &fdq);
331 }
332 case Q_XGETQUOTA: {
333 struct fs_disk_quota fdq;
334
335 ret = sb->s_qcop->get_xquota(sb, type, id, &fdq);
336 if (ret)
337 return ret;
338 if (copy_to_user(addr, &fdq, sizeof(fdq)))
339 return -EFAULT;
340 return 0;
341 }
342 case Q_XQUOTASYNC:
343 return sb->s_qcop->quota_sync(sb, type);
344 /* We never reach here unless validity check is broken */
345 default:
346 BUG();
347 }
348 return 0;
349}
350
351/*
352 * look up a superblock on which quota ops will be performed
353 * - use the name of a block device to find the superblock thereon
354 */
355static struct super_block *quotactl_block(const char __user *special)
356{
357#ifdef CONFIG_BLOCK
358 struct block_device *bdev;
359 struct super_block *sb;
360 char *tmp = getname(special);
361
362 if (IS_ERR(tmp))
363 return ERR_CAST(tmp);
364 bdev = lookup_bdev(tmp);
365 putname(tmp);
366 if (IS_ERR(bdev))
367 return ERR_CAST(bdev);
368 sb = get_super(bdev);
369 bdput(bdev);
370 if (!sb)
371 return ERR_PTR(-ENODEV);
372
373 return sb;
374#else
375 return ERR_PTR(-ENODEV);
376#endif
377}
378
379/*
380 * This is the system call interface. This communicates with
381 * the user-level programs. Currently this only supports diskquota
382 * calls. Maybe we need to add the process quotas etc. in the future,
383 * but we probably should use rlimits for that.
384 */
385SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special,
386 qid_t, id, void __user *, addr)
387{
388 uint cmds, type;
389 struct super_block *sb = NULL;
390 int ret;
391
392 cmds = cmd >> SUBCMDSHIFT;
393 type = cmd & SUBCMDMASK;
394
395 if (cmds != Q_SYNC || special) {
396 sb = quotactl_block(special);
397 if (IS_ERR(sb))
398 return PTR_ERR(sb);
399 }
400
401 ret = check_quotactl_valid(sb, type, cmds, id);
402 if (ret >= 0)
403 ret = do_quotactl(sb, type, cmds, id, addr);
404 if (sb)
405 drop_super(sb);
406
407 return ret;
408}
409
410#if defined(CONFIG_COMPAT_FOR_U64_ALIGNMENT)
411/*
412 * This code works only for 32 bit quota tools over 64 bit OS (x86_64, ia64)
413 * and is necessary due to alignment problems.
414 */
415struct compat_if_dqblk {
416 compat_u64 dqb_bhardlimit;
417 compat_u64 dqb_bsoftlimit;
418 compat_u64 dqb_curspace;
419 compat_u64 dqb_ihardlimit;
420 compat_u64 dqb_isoftlimit;
421 compat_u64 dqb_curinodes;
422 compat_u64 dqb_btime;
423 compat_u64 dqb_itime;
424 compat_uint_t dqb_valid;
425};
426
427/* XFS structures */
428struct compat_fs_qfilestat {
429 compat_u64 dqb_bhardlimit;
430 compat_u64 qfs_nblks;
431 compat_uint_t qfs_nextents;
432};
433
434struct compat_fs_quota_stat {
435 __s8 qs_version;
436 __u16 qs_flags;
437 __s8 qs_pad;
438 struct compat_fs_qfilestat qs_uquota;
439 struct compat_fs_qfilestat qs_gquota;
440 compat_uint_t qs_incoredqs;
441 compat_int_t qs_btimelimit;
442 compat_int_t qs_itimelimit;
443 compat_int_t qs_rtbtimelimit;
444 __u16 qs_bwarnlimit;
445 __u16 qs_iwarnlimit;
446};
447
448asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special,
449 qid_t id, void __user *addr)
450{
451 unsigned int cmds;
452 struct if_dqblk __user *dqblk;
453 struct compat_if_dqblk __user *compat_dqblk;
454 struct fs_quota_stat __user *fsqstat;
455 struct compat_fs_quota_stat __user *compat_fsqstat;
456 compat_uint_t data;
457 u16 xdata;
458 long ret;
459
460 cmds = cmd >> SUBCMDSHIFT;
461
462 switch (cmds) {
463 case Q_GETQUOTA:
464 dqblk = compat_alloc_user_space(sizeof(struct if_dqblk));
465 compat_dqblk = addr;
466 ret = sys_quotactl(cmd, special, id, dqblk);
467 if (ret)
468 break;
469 if (copy_in_user(compat_dqblk, dqblk, sizeof(*compat_dqblk)) ||
470 get_user(data, &dqblk->dqb_valid) ||
471 put_user(data, &compat_dqblk->dqb_valid))
472 ret = -EFAULT;
473 break;
474 case Q_SETQUOTA:
475 dqblk = compat_alloc_user_space(sizeof(struct if_dqblk));
476 compat_dqblk = addr;
477 ret = -EFAULT;
478 if (copy_in_user(dqblk, compat_dqblk, sizeof(*compat_dqblk)) ||
479 get_user(data, &compat_dqblk->dqb_valid) ||
480 put_user(data, &dqblk->dqb_valid))
481 break;
482 ret = sys_quotactl(cmd, special, id, dqblk);
483 break;
484 case Q_XGETQSTAT:
485 fsqstat = compat_alloc_user_space(sizeof(struct fs_quota_stat));
486 compat_fsqstat = addr;
487 ret = sys_quotactl(cmd, special, id, fsqstat);
488 if (ret)
489 break;
490 ret = -EFAULT;
491 /* Copying qs_version, qs_flags, qs_pad */
492 if (copy_in_user(compat_fsqstat, fsqstat,
493 offsetof(struct compat_fs_quota_stat, qs_uquota)))
494 break;
495 /* Copying qs_uquota */
496 if (copy_in_user(&compat_fsqstat->qs_uquota,
497 &fsqstat->qs_uquota,
498 sizeof(compat_fsqstat->qs_uquota)) ||
499 get_user(data, &fsqstat->qs_uquota.qfs_nextents) ||
500 put_user(data, &compat_fsqstat->qs_uquota.qfs_nextents))
501 break;
502 /* Copying qs_gquota */
503 if (copy_in_user(&compat_fsqstat->qs_gquota,
504 &fsqstat->qs_gquota,
505 sizeof(compat_fsqstat->qs_gquota)) ||
506 get_user(data, &fsqstat->qs_gquota.qfs_nextents) ||
507 put_user(data, &compat_fsqstat->qs_gquota.qfs_nextents))
508 break;
509 /* Copying the rest */
510 if (copy_in_user(&compat_fsqstat->qs_incoredqs,
511 &fsqstat->qs_incoredqs,
512 sizeof(struct compat_fs_quota_stat) -
513 offsetof(struct compat_fs_quota_stat, qs_incoredqs)) ||
514 get_user(xdata, &fsqstat->qs_iwarnlimit) ||
515 put_user(xdata, &compat_fsqstat->qs_iwarnlimit))
516 break;
517 ret = 0;
518 break;
519 default:
520 ret = sys_quotactl(cmd, special, id, addr);
521 }
522 return ret;
523}
524#endif
diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c
new file mode 100644
index 000000000000..f81f4bcfb178
--- /dev/null
+++ b/fs/quota/quota_tree.c
@@ -0,0 +1,651 @@
1/*
2 * vfsv0 quota IO operations on file
3 */
4
5#include <linux/errno.h>
6#include <linux/fs.h>
7#include <linux/mount.h>
8#include <linux/dqblk_v2.h>
9#include <linux/kernel.h>
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/quotaops.h>
14
15#include <asm/byteorder.h>
16
17#include "quota_tree.h"
18
19MODULE_AUTHOR("Jan Kara");
20MODULE_DESCRIPTION("Quota trie support");
21MODULE_LICENSE("GPL");
22
23#define __QUOTA_QT_PARANOIA
24
25static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
26{
27 unsigned int epb = info->dqi_usable_bs >> 2;
28
29 depth = info->dqi_qtree_depth - depth - 1;
30 while (depth--)
31 id /= epb;
32 return id % epb;
33}
34
35/* Number of entries in one blocks */
36static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info)
37{
38 return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader))
39 / info->dqi_entry_size;
40}
41
42static char *getdqbuf(size_t size)
43{
44 char *buf = kmalloc(size, GFP_NOFS);
45 if (!buf)
46 printk(KERN_WARNING
47 "VFS: Not enough memory for quota buffers.\n");
48 return buf;
49}
50
51static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
52{
53 struct super_block *sb = info->dqi_sb;
54
55 memset(buf, 0, info->dqi_usable_bs);
56 return sb->s_op->quota_read(sb, info->dqi_type, buf,
57 info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
58}
59
60static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
61{
62 struct super_block *sb = info->dqi_sb;
63
64 return sb->s_op->quota_write(sb, info->dqi_type, buf,
65 info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
66}
67
68/* Remove empty block from list and return it */
69static int get_free_dqblk(struct qtree_mem_dqinfo *info)
70{
71 char *buf = getdqbuf(info->dqi_usable_bs);
72 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
73 int ret, blk;
74
75 if (!buf)
76 return -ENOMEM;
77 if (info->dqi_free_blk) {
78 blk = info->dqi_free_blk;
79 ret = read_blk(info, blk, buf);
80 if (ret < 0)
81 goto out_buf;
82 info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free);
83 }
84 else {
85 memset(buf, 0, info->dqi_usable_bs);
86 /* Assure block allocation... */
87 ret = write_blk(info, info->dqi_blocks, buf);
88 if (ret < 0)
89 goto out_buf;
90 blk = info->dqi_blocks++;
91 }
92 mark_info_dirty(info->dqi_sb, info->dqi_type);
93 ret = blk;
94out_buf:
95 kfree(buf);
96 return ret;
97}
98
99/* Insert empty block to the list */
100static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk)
101{
102 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
103 int err;
104
105 dh->dqdh_next_free = cpu_to_le32(info->dqi_free_blk);
106 dh->dqdh_prev_free = cpu_to_le32(0);
107 dh->dqdh_entries = cpu_to_le16(0);
108 err = write_blk(info, blk, buf);
109 if (err < 0)
110 return err;
111 info->dqi_free_blk = blk;
112 mark_info_dirty(info->dqi_sb, info->dqi_type);
113 return 0;
114}
115
116/* Remove given block from the list of blocks with free entries */
117static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
118 uint blk)
119{
120 char *tmpbuf = getdqbuf(info->dqi_usable_bs);
121 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
122 uint nextblk = le32_to_cpu(dh->dqdh_next_free);
123 uint prevblk = le32_to_cpu(dh->dqdh_prev_free);
124 int err;
125
126 if (!tmpbuf)
127 return -ENOMEM;
128 if (nextblk) {
129 err = read_blk(info, nextblk, tmpbuf);
130 if (err < 0)
131 goto out_buf;
132 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
133 dh->dqdh_prev_free;
134 err = write_blk(info, nextblk, tmpbuf);
135 if (err < 0)
136 goto out_buf;
137 }
138 if (prevblk) {
139 err = read_blk(info, prevblk, tmpbuf);
140 if (err < 0)
141 goto out_buf;
142 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_next_free =
143 dh->dqdh_next_free;
144 err = write_blk(info, prevblk, tmpbuf);
145 if (err < 0)
146 goto out_buf;
147 } else {
148 info->dqi_free_entry = nextblk;
149 mark_info_dirty(info->dqi_sb, info->dqi_type);
150 }
151 kfree(tmpbuf);
152 dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
153 /* No matter whether write succeeds block is out of list */
154 if (write_blk(info, blk, buf) < 0)
155 printk(KERN_ERR
156 "VFS: Can't write block (%u) with free entries.\n",
157 blk);
158 return 0;
159out_buf:
160 kfree(tmpbuf);
161 return err;
162}
163
164/* Insert given block to the beginning of list with free entries */
165static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
166 uint blk)
167{
168 char *tmpbuf = getdqbuf(info->dqi_usable_bs);
169 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
170 int err;
171
172 if (!tmpbuf)
173 return -ENOMEM;
174 dh->dqdh_next_free = cpu_to_le32(info->dqi_free_entry);
175 dh->dqdh_prev_free = cpu_to_le32(0);
176 err = write_blk(info, blk, buf);
177 if (err < 0)
178 goto out_buf;
179 if (info->dqi_free_entry) {
180 err = read_blk(info, info->dqi_free_entry, tmpbuf);
181 if (err < 0)
182 goto out_buf;
183 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
184 cpu_to_le32(blk);
185 err = write_blk(info, info->dqi_free_entry, tmpbuf);
186 if (err < 0)
187 goto out_buf;
188 }
189 kfree(tmpbuf);
190 info->dqi_free_entry = blk;
191 mark_info_dirty(info->dqi_sb, info->dqi_type);
192 return 0;
193out_buf:
194 kfree(tmpbuf);
195 return err;
196}
197
198/* Is the entry in the block free? */
199int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk)
200{
201 int i;
202
203 for (i = 0; i < info->dqi_entry_size; i++)
204 if (disk[i])
205 return 0;
206 return 1;
207}
208EXPORT_SYMBOL(qtree_entry_unused);
209
210/* Find space for dquot */
211static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
212 struct dquot *dquot, int *err)
213{
214 uint blk, i;
215 struct qt_disk_dqdbheader *dh;
216 char *buf = getdqbuf(info->dqi_usable_bs);
217 char *ddquot;
218
219 *err = 0;
220 if (!buf) {
221 *err = -ENOMEM;
222 return 0;
223 }
224 dh = (struct qt_disk_dqdbheader *)buf;
225 if (info->dqi_free_entry) {
226 blk = info->dqi_free_entry;
227 *err = read_blk(info, blk, buf);
228 if (*err < 0)
229 goto out_buf;
230 } else {
231 blk = get_free_dqblk(info);
232 if ((int)blk < 0) {
233 *err = blk;
234 kfree(buf);
235 return 0;
236 }
237 memset(buf, 0, info->dqi_usable_bs);
238 /* This is enough as the block is already zeroed and the entry
239 * list is empty... */
240 info->dqi_free_entry = blk;
241 mark_info_dirty(dquot->dq_sb, dquot->dq_type);
242 }
243 /* Block will be full? */
244 if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) {
245 *err = remove_free_dqentry(info, buf, blk);
246 if (*err < 0) {
247 printk(KERN_ERR "VFS: find_free_dqentry(): Can't "
248 "remove block (%u) from entry free list.\n",
249 blk);
250 goto out_buf;
251 }
252 }
253 le16_add_cpu(&dh->dqdh_entries, 1);
254 /* Find free structure in block */
255 ddquot = buf + sizeof(struct qt_disk_dqdbheader);
256 for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
257 if (qtree_entry_unused(info, ddquot))
258 break;
259 ddquot += info->dqi_entry_size;
260 }
261#ifdef __QUOTA_QT_PARANOIA
262 if (i == qtree_dqstr_in_blk(info)) {
263 printk(KERN_ERR "VFS: find_free_dqentry(): Data block full "
264 "but it shouldn't.\n");
265 *err = -EIO;
266 goto out_buf;
267 }
268#endif
269 *err = write_blk(info, blk, buf);
270 if (*err < 0) {
271 printk(KERN_ERR "VFS: find_free_dqentry(): Can't write quota "
272 "data block %u.\n", blk);
273 goto out_buf;
274 }
275 dquot->dq_off = (blk << info->dqi_blocksize_bits) +
276 sizeof(struct qt_disk_dqdbheader) +
277 i * info->dqi_entry_size;
278 kfree(buf);
279 return blk;
280out_buf:
281 kfree(buf);
282 return 0;
283}
284
285/* Insert reference to structure into the trie */
286static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
287 uint *treeblk, int depth)
288{
289 char *buf = getdqbuf(info->dqi_usable_bs);
290 int ret = 0, newson = 0, newact = 0;
291 __le32 *ref;
292 uint newblk;
293
294 if (!buf)
295 return -ENOMEM;
296 if (!*treeblk) {
297 ret = get_free_dqblk(info);
298 if (ret < 0)
299 goto out_buf;
300 *treeblk = ret;
301 memset(buf, 0, info->dqi_usable_bs);
302 newact = 1;
303 } else {
304 ret = read_blk(info, *treeblk, buf);
305 if (ret < 0) {
306 printk(KERN_ERR "VFS: Can't read tree quota block "
307 "%u.\n", *treeblk);
308 goto out_buf;
309 }
310 }
311 ref = (__le32 *)buf;
312 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
313 if (!newblk)
314 newson = 1;
315 if (depth == info->dqi_qtree_depth - 1) {
316#ifdef __QUOTA_QT_PARANOIA
317 if (newblk) {
318 printk(KERN_ERR "VFS: Inserting already present quota "
319 "entry (block %u).\n",
320 le32_to_cpu(ref[get_index(info,
321 dquot->dq_id, depth)]));
322 ret = -EIO;
323 goto out_buf;
324 }
325#endif
326 newblk = find_free_dqentry(info, dquot, &ret);
327 } else {
328 ret = do_insert_tree(info, dquot, &newblk, depth+1);
329 }
330 if (newson && ret >= 0) {
331 ref[get_index(info, dquot->dq_id, depth)] =
332 cpu_to_le32(newblk);
333 ret = write_blk(info, *treeblk, buf);
334 } else if (newact && ret < 0) {
335 put_free_dqblk(info, buf, *treeblk);
336 }
337out_buf:
338 kfree(buf);
339 return ret;
340}
341
342/* Wrapper for inserting quota structure into tree */
343static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
344 struct dquot *dquot)
345{
346 int tmp = QT_TREEOFF;
347 return do_insert_tree(info, dquot, &tmp, 0);
348}
349
350/*
351 * We don't have to be afraid of deadlocks as we never have quotas on quota
352 * files...
353 */
354int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
355{
356 int type = dquot->dq_type;
357 struct super_block *sb = dquot->dq_sb;
358 ssize_t ret;
359 char *ddquot = getdqbuf(info->dqi_entry_size);
360
361 if (!ddquot)
362 return -ENOMEM;
363
364 /* dq_off is guarded by dqio_mutex */
365 if (!dquot->dq_off) {
366 ret = dq_insert_tree(info, dquot);
367 if (ret < 0) {
368 printk(KERN_ERR "VFS: Error %zd occurred while "
369 "creating quota.\n", ret);
370 kfree(ddquot);
371 return ret;
372 }
373 }
374 spin_lock(&dq_data_lock);
375 info->dqi_ops->mem2disk_dqblk(ddquot, dquot);
376 spin_unlock(&dq_data_lock);
377 ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
378 dquot->dq_off);
379 if (ret != info->dqi_entry_size) {
380 printk(KERN_WARNING "VFS: dquota write failed on dev %s\n",
381 sb->s_id);
382 if (ret >= 0)
383 ret = -ENOSPC;
384 } else {
385 ret = 0;
386 }
387 dqstats.writes++;
388 kfree(ddquot);
389
390 return ret;
391}
392EXPORT_SYMBOL(qtree_write_dquot);
393
394/* Free dquot entry in data block */
395static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
396 uint blk)
397{
398 struct qt_disk_dqdbheader *dh;
399 char *buf = getdqbuf(info->dqi_usable_bs);
400 int ret = 0;
401
402 if (!buf)
403 return -ENOMEM;
404 if (dquot->dq_off >> info->dqi_blocksize_bits != blk) {
405 printk(KERN_ERR "VFS: Quota structure has offset to other "
406 "block (%u) than it should (%u).\n", blk,
407 (uint)(dquot->dq_off >> info->dqi_blocksize_bits));
408 goto out_buf;
409 }
410 ret = read_blk(info, blk, buf);
411 if (ret < 0) {
412 printk(KERN_ERR "VFS: Can't read quota data block %u\n", blk);
413 goto out_buf;
414 }
415 dh = (struct qt_disk_dqdbheader *)buf;
416 le16_add_cpu(&dh->dqdh_entries, -1);
417 if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */
418 ret = remove_free_dqentry(info, buf, blk);
419 if (ret >= 0)
420 ret = put_free_dqblk(info, buf, blk);
421 if (ret < 0) {
422 printk(KERN_ERR "VFS: Can't move quota data block (%u) "
423 "to free list.\n", blk);
424 goto out_buf;
425 }
426 } else {
427 memset(buf +
428 (dquot->dq_off & ((1 << info->dqi_blocksize_bits) - 1)),
429 0, info->dqi_entry_size);
430 if (le16_to_cpu(dh->dqdh_entries) ==
431 qtree_dqstr_in_blk(info) - 1) {
432 /* Insert will write block itself */
433 ret = insert_free_dqentry(info, buf, blk);
434 if (ret < 0) {
435 printk(KERN_ERR "VFS: Can't insert quota data "
436 "block (%u) to free entry list.\n", blk);
437 goto out_buf;
438 }
439 } else {
440 ret = write_blk(info, blk, buf);
441 if (ret < 0) {
442 printk(KERN_ERR "VFS: Can't write quota data "
443 "block %u\n", blk);
444 goto out_buf;
445 }
446 }
447 }
448 dquot->dq_off = 0; /* Quota is now unattached */
449out_buf:
450 kfree(buf);
451 return ret;
452}
453
454/* Remove reference to dquot from tree */
455static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
456 uint *blk, int depth)
457{
458 char *buf = getdqbuf(info->dqi_usable_bs);
459 int ret = 0;
460 uint newblk;
461 __le32 *ref = (__le32 *)buf;
462
463 if (!buf)
464 return -ENOMEM;
465 ret = read_blk(info, *blk, buf);
466 if (ret < 0) {
467 printk(KERN_ERR "VFS: Can't read quota data block %u\n", *blk);
468 goto out_buf;
469 }
470 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
471 if (depth == info->dqi_qtree_depth - 1) {
472 ret = free_dqentry(info, dquot, newblk);
473 newblk = 0;
474 } else {
475 ret = remove_tree(info, dquot, &newblk, depth+1);
476 }
477 if (ret >= 0 && !newblk) {
478 int i;
479 ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0);
480 /* Block got empty? */
481 for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++)
482 ;
483 /* Don't put the root block into the free block list */
484 if (i == (info->dqi_usable_bs >> 2)
485 && *blk != QT_TREEOFF) {
486 put_free_dqblk(info, buf, *blk);
487 *blk = 0;
488 } else {
489 ret = write_blk(info, *blk, buf);
490 if (ret < 0)
491 printk(KERN_ERR "VFS: Can't write quota tree "
492 "block %u.\n", *blk);
493 }
494 }
495out_buf:
496 kfree(buf);
497 return ret;
498}
499
500/* Delete dquot from tree */
501int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
502{
503 uint tmp = QT_TREEOFF;
504
505 if (!dquot->dq_off) /* Even not allocated? */
506 return 0;
507 return remove_tree(info, dquot, &tmp, 0);
508}
509EXPORT_SYMBOL(qtree_delete_dquot);
510
511/* Find entry in block */
512static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
513 struct dquot *dquot, uint blk)
514{
515 char *buf = getdqbuf(info->dqi_usable_bs);
516 loff_t ret = 0;
517 int i;
518 char *ddquot;
519
520 if (!buf)
521 return -ENOMEM;
522 ret = read_blk(info, blk, buf);
523 if (ret < 0) {
524 printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
525 goto out_buf;
526 }
527 ddquot = buf + sizeof(struct qt_disk_dqdbheader);
528 for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
529 if (info->dqi_ops->is_id(ddquot, dquot))
530 break;
531 ddquot += info->dqi_entry_size;
532 }
533 if (i == qtree_dqstr_in_blk(info)) {
534 printk(KERN_ERR "VFS: Quota for id %u referenced "
535 "but not present.\n", dquot->dq_id);
536 ret = -EIO;
537 goto out_buf;
538 } else {
539 ret = (blk << info->dqi_blocksize_bits) + sizeof(struct
540 qt_disk_dqdbheader) + i * info->dqi_entry_size;
541 }
542out_buf:
543 kfree(buf);
544 return ret;
545}
546
547/* Find entry for given id in the tree */
548static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
549 struct dquot *dquot, uint blk, int depth)
550{
551 char *buf = getdqbuf(info->dqi_usable_bs);
552 loff_t ret = 0;
553 __le32 *ref = (__le32 *)buf;
554
555 if (!buf)
556 return -ENOMEM;
557 ret = read_blk(info, blk, buf);
558 if (ret < 0) {
559 printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
560 goto out_buf;
561 }
562 ret = 0;
563 blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
564 if (!blk) /* No reference? */
565 goto out_buf;
566 if (depth < info->dqi_qtree_depth - 1)
567 ret = find_tree_dqentry(info, dquot, blk, depth+1);
568 else
569 ret = find_block_dqentry(info, dquot, blk);
570out_buf:
571 kfree(buf);
572 return ret;
573}
574
575/* Find entry for given id in the tree - wrapper function */
576static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info,
577 struct dquot *dquot)
578{
579 return find_tree_dqentry(info, dquot, QT_TREEOFF, 0);
580}
581
582int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
583{
584 int type = dquot->dq_type;
585 struct super_block *sb = dquot->dq_sb;
586 loff_t offset;
587 char *ddquot;
588 int ret = 0;
589
590#ifdef __QUOTA_QT_PARANOIA
591 /* Invalidated quota? */
592 if (!sb_dqopt(dquot->dq_sb)->files[type]) {
593 printk(KERN_ERR "VFS: Quota invalidated while reading!\n");
594 return -EIO;
595 }
596#endif
597 /* Do we know offset of the dquot entry in the quota file? */
598 if (!dquot->dq_off) {
599 offset = find_dqentry(info, dquot);
600 if (offset <= 0) { /* Entry not present? */
601 if (offset < 0)
602 printk(KERN_ERR "VFS: Can't read quota "
603 "structure for id %u.\n", dquot->dq_id);
604 dquot->dq_off = 0;
605 set_bit(DQ_FAKE_B, &dquot->dq_flags);
606 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
607 ret = offset;
608 goto out;
609 }
610 dquot->dq_off = offset;
611 }
612 ddquot = getdqbuf(info->dqi_entry_size);
613 if (!ddquot)
614 return -ENOMEM;
615 ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size,
616 dquot->dq_off);
617 if (ret != info->dqi_entry_size) {
618 if (ret >= 0)
619 ret = -EIO;
620 printk(KERN_ERR "VFS: Error while reading quota "
621 "structure for id %u.\n", dquot->dq_id);
622 set_bit(DQ_FAKE_B, &dquot->dq_flags);
623 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
624 kfree(ddquot);
625 goto out;
626 }
627 spin_lock(&dq_data_lock);
628 info->dqi_ops->disk2mem_dqblk(dquot, ddquot);
629 if (!dquot->dq_dqb.dqb_bhardlimit &&
630 !dquot->dq_dqb.dqb_bsoftlimit &&
631 !dquot->dq_dqb.dqb_ihardlimit &&
632 !dquot->dq_dqb.dqb_isoftlimit)
633 set_bit(DQ_FAKE_B, &dquot->dq_flags);
634 spin_unlock(&dq_data_lock);
635 kfree(ddquot);
636out:
637 dqstats.reads++;
638 return ret;
639}
640EXPORT_SYMBOL(qtree_read_dquot);
641
642/* Check whether dquot should not be deleted. We know we are
643 * the only one operating on dquot (thanks to dq_lock) */
644int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
645{
646 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) &&
647 !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace))
648 return qtree_delete_dquot(info, dquot);
649 return 0;
650}
651EXPORT_SYMBOL(qtree_release_dquot);
diff --git a/fs/quota/quota_tree.h b/fs/quota/quota_tree.h
new file mode 100644
index 000000000000..a1ab8db81a51
--- /dev/null
+++ b/fs/quota/quota_tree.h
@@ -0,0 +1,25 @@
1/*
2 * Definitions of structures for vfsv0 quota format
3 */
4
5#ifndef _LINUX_QUOTA_TREE_H
6#define _LINUX_QUOTA_TREE_H
7
8#include <linux/types.h>
9#include <linux/quota.h>
10
11/*
12 * Structure of header of block with quota structures. It is padded to 16 bytes so
13 * there will be space for exactly 21 quota-entries in a block
14 */
15struct qt_disk_dqdbheader {
16 __le32 dqdh_next_free; /* Number of next block with free entry */
17 __le32 dqdh_prev_free; /* Number of previous block with free entry */
18 __le16 dqdh_entries; /* Number of valid entries in block */
19 __le16 dqdh_pad1;
20 __le32 dqdh_pad2;
21};
22
23#define QT_TREEOFF 1 /* Offset of tree in file in blocks */
24
25#endif /* _LINUX_QUOTAIO_TREE_H */
diff --git a/fs/quota/quota_v1.c b/fs/quota/quota_v1.c
new file mode 100644
index 000000000000..0edcf42b1778
--- /dev/null
+++ b/fs/quota/quota_v1.c
@@ -0,0 +1,234 @@
1#include <linux/errno.h>
2#include <linux/fs.h>
3#include <linux/quota.h>
4#include <linux/quotaops.h>
5#include <linux/dqblk_v1.h>
6#include <linux/kernel.h>
7#include <linux/init.h>
8#include <linux/module.h>
9
10#include <asm/byteorder.h>
11
12#include "quotaio_v1.h"
13
14MODULE_AUTHOR("Jan Kara");
15MODULE_DESCRIPTION("Old quota format support");
16MODULE_LICENSE("GPL");
17
18#define QUOTABLOCK_BITS 10
19#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS)
20
21static inline qsize_t v1_stoqb(qsize_t space)
22{
23 return (space + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS;
24}
25
26static inline qsize_t v1_qbtos(qsize_t blocks)
27{
28 return blocks << QUOTABLOCK_BITS;
29}
30
31static void v1_disk2mem_dqblk(struct mem_dqblk *m, struct v1_disk_dqblk *d)
32{
33 m->dqb_ihardlimit = d->dqb_ihardlimit;
34 m->dqb_isoftlimit = d->dqb_isoftlimit;
35 m->dqb_curinodes = d->dqb_curinodes;
36 m->dqb_bhardlimit = v1_qbtos(d->dqb_bhardlimit);
37 m->dqb_bsoftlimit = v1_qbtos(d->dqb_bsoftlimit);
38 m->dqb_curspace = v1_qbtos(d->dqb_curblocks);
39 m->dqb_itime = d->dqb_itime;
40 m->dqb_btime = d->dqb_btime;
41}
42
43static void v1_mem2disk_dqblk(struct v1_disk_dqblk *d, struct mem_dqblk *m)
44{
45 d->dqb_ihardlimit = m->dqb_ihardlimit;
46 d->dqb_isoftlimit = m->dqb_isoftlimit;
47 d->dqb_curinodes = m->dqb_curinodes;
48 d->dqb_bhardlimit = v1_stoqb(m->dqb_bhardlimit);
49 d->dqb_bsoftlimit = v1_stoqb(m->dqb_bsoftlimit);
50 d->dqb_curblocks = v1_stoqb(m->dqb_curspace);
51 d->dqb_itime = m->dqb_itime;
52 d->dqb_btime = m->dqb_btime;
53}
54
55static int v1_read_dqblk(struct dquot *dquot)
56{
57 int type = dquot->dq_type;
58 struct v1_disk_dqblk dqblk;
59
60 if (!sb_dqopt(dquot->dq_sb)->files[type])
61 return -EINVAL;
62
63 /* Set structure to 0s in case read fails/is after end of file */
64 memset(&dqblk, 0, sizeof(struct v1_disk_dqblk));
65 dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type, (char *)&dqblk,
66 sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id));
67
68 v1_disk2mem_dqblk(&dquot->dq_dqb, &dqblk);
69 if (dquot->dq_dqb.dqb_bhardlimit == 0 &&
70 dquot->dq_dqb.dqb_bsoftlimit == 0 &&
71 dquot->dq_dqb.dqb_ihardlimit == 0 &&
72 dquot->dq_dqb.dqb_isoftlimit == 0)
73 set_bit(DQ_FAKE_B, &dquot->dq_flags);
74 dqstats.reads++;
75
76 return 0;
77}
78
79static int v1_commit_dqblk(struct dquot *dquot)
80{
81 short type = dquot->dq_type;
82 ssize_t ret;
83 struct v1_disk_dqblk dqblk;
84
85 v1_mem2disk_dqblk(&dqblk, &dquot->dq_dqb);
86 if (dquot->dq_id == 0) {
87 dqblk.dqb_btime =
88 sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace;
89 dqblk.dqb_itime =
90 sb_dqopt(dquot->dq_sb)->info[type].dqi_igrace;
91 }
92 ret = 0;
93 if (sb_dqopt(dquot->dq_sb)->files[type])
94 ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type,
95 (char *)&dqblk, sizeof(struct v1_disk_dqblk),
96 v1_dqoff(dquot->dq_id));
97 if (ret != sizeof(struct v1_disk_dqblk)) {
98 printk(KERN_WARNING "VFS: dquota write failed on dev %s\n",
99 dquot->dq_sb->s_id);
100 if (ret >= 0)
101 ret = -EIO;
102 goto out;
103 }
104 ret = 0;
105
106out:
107 dqstats.writes++;
108
109 return ret;
110}
111
112/* Magics of new quota format */
113#define V2_INITQMAGICS {\
114 0xd9c01f11, /* USRQUOTA */\
115 0xd9c01927 /* GRPQUOTA */\
116}
117
118/* Header of new quota format */
119struct v2_disk_dqheader {
120 __le32 dqh_magic; /* Magic number identifying file */
121 __le32 dqh_version; /* File version */
122};
123
124static int v1_check_quota_file(struct super_block *sb, int type)
125{
126 struct inode *inode = sb_dqopt(sb)->files[type];
127 ulong blocks;
128 size_t off;
129 struct v2_disk_dqheader dqhead;
130 ssize_t size;
131 loff_t isize;
132 static const uint quota_magics[] = V2_INITQMAGICS;
133
134 isize = i_size_read(inode);
135 if (!isize)
136 return 0;
137 blocks = isize >> BLOCK_SIZE_BITS;
138 off = isize & (BLOCK_SIZE - 1);
139 if ((blocks % sizeof(struct v1_disk_dqblk) * BLOCK_SIZE + off) %
140 sizeof(struct v1_disk_dqblk))
141 return 0;
142 /* Doublecheck whether we didn't get file with new format - with old
143 * quotactl() this could happen */
144 size = sb->s_op->quota_read(sb, type, (char *)&dqhead,
145 sizeof(struct v2_disk_dqheader), 0);
146 if (size != sizeof(struct v2_disk_dqheader))
147 return 1; /* Probably not new format */
148 if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type])
149 return 1; /* Definitely not new format */
150 printk(KERN_INFO
151 "VFS: %s: Refusing to turn on old quota format on given file."
152 " It probably contains newer quota format.\n", sb->s_id);
153 return 0; /* Seems like a new format file -> refuse it */
154}
155
156static int v1_read_file_info(struct super_block *sb, int type)
157{
158 struct quota_info *dqopt = sb_dqopt(sb);
159 struct v1_disk_dqblk dqblk;
160 int ret;
161
162 ret = sb->s_op->quota_read(sb, type, (char *)&dqblk,
163 sizeof(struct v1_disk_dqblk), v1_dqoff(0));
164 if (ret != sizeof(struct v1_disk_dqblk)) {
165 if (ret >= 0)
166 ret = -EIO;
167 goto out;
168 }
169 ret = 0;
170 /* limits are stored as unsigned 32-bit data */
171 dqopt->info[type].dqi_maxblimit = 0xffffffff;
172 dqopt->info[type].dqi_maxilimit = 0xffffffff;
173 dqopt->info[type].dqi_igrace =
174 dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME;
175 dqopt->info[type].dqi_bgrace =
176 dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME;
177out:
178 return ret;
179}
180
181static int v1_write_file_info(struct super_block *sb, int type)
182{
183 struct quota_info *dqopt = sb_dqopt(sb);
184 struct v1_disk_dqblk dqblk;
185 int ret;
186
187 dqopt->info[type].dqi_flags &= ~DQF_INFO_DIRTY;
188 ret = sb->s_op->quota_read(sb, type, (char *)&dqblk,
189 sizeof(struct v1_disk_dqblk), v1_dqoff(0));
190 if (ret != sizeof(struct v1_disk_dqblk)) {
191 if (ret >= 0)
192 ret = -EIO;
193 goto out;
194 }
195 dqblk.dqb_itime = dqopt->info[type].dqi_igrace;
196 dqblk.dqb_btime = dqopt->info[type].dqi_bgrace;
197 ret = sb->s_op->quota_write(sb, type, (char *)&dqblk,
198 sizeof(struct v1_disk_dqblk), v1_dqoff(0));
199 if (ret == sizeof(struct v1_disk_dqblk))
200 ret = 0;
201 else if (ret > 0)
202 ret = -EIO;
203out:
204 return ret;
205}
206
207static struct quota_format_ops v1_format_ops = {
208 .check_quota_file = v1_check_quota_file,
209 .read_file_info = v1_read_file_info,
210 .write_file_info = v1_write_file_info,
211 .free_file_info = NULL,
212 .read_dqblk = v1_read_dqblk,
213 .commit_dqblk = v1_commit_dqblk,
214};
215
216static struct quota_format_type v1_quota_format = {
217 .qf_fmt_id = QFMT_VFS_OLD,
218 .qf_ops = &v1_format_ops,
219 .qf_owner = THIS_MODULE
220};
221
222static int __init init_v1_quota_format(void)
223{
224 return register_quota_format(&v1_quota_format);
225}
226
227static void __exit exit_v1_quota_format(void)
228{
229 unregister_quota_format(&v1_quota_format);
230}
231
232module_init(init_v1_quota_format);
233module_exit(exit_v1_quota_format);
234
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
new file mode 100644
index 000000000000..a5475fb1ae44
--- /dev/null
+++ b/fs/quota/quota_v2.c
@@ -0,0 +1,237 @@
1/*
2 * vfsv0 quota IO operations on file
3 */
4
5#include <linux/errno.h>
6#include <linux/fs.h>
7#include <linux/mount.h>
8#include <linux/dqblk_v2.h>
9#include <linux/kernel.h>
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/quotaops.h>
14
15#include <asm/byteorder.h>
16
17#include "quota_tree.h"
18#include "quotaio_v2.h"
19
20MODULE_AUTHOR("Jan Kara");
21MODULE_DESCRIPTION("Quota format v2 support");
22MODULE_LICENSE("GPL");
23
24#define __QUOTA_V2_PARANOIA
25
26static void v2_mem2diskdqb(void *dp, struct dquot *dquot);
27static void v2_disk2memdqb(struct dquot *dquot, void *dp);
28static int v2_is_id(void *dp, struct dquot *dquot);
29
30static struct qtree_fmt_operations v2_qtree_ops = {
31 .mem2disk_dqblk = v2_mem2diskdqb,
32 .disk2mem_dqblk = v2_disk2memdqb,
33 .is_id = v2_is_id,
34};
35
36#define QUOTABLOCK_BITS 10
37#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS)
38
39static inline qsize_t v2_stoqb(qsize_t space)
40{
41 return (space + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS;
42}
43
44static inline qsize_t v2_qbtos(qsize_t blocks)
45{
46 return blocks << QUOTABLOCK_BITS;
47}
48
49/* Check whether given file is really vfsv0 quotafile */
50static int v2_check_quota_file(struct super_block *sb, int type)
51{
52 struct v2_disk_dqheader dqhead;
53 ssize_t size;
54 static const uint quota_magics[] = V2_INITQMAGICS;
55 static const uint quota_versions[] = V2_INITQVERSIONS;
56
57 size = sb->s_op->quota_read(sb, type, (char *)&dqhead,
58 sizeof(struct v2_disk_dqheader), 0);
59 if (size != sizeof(struct v2_disk_dqheader)) {
60 printk("quota_v2: failed read expected=%zd got=%zd\n",
61 sizeof(struct v2_disk_dqheader), size);
62 return 0;
63 }
64 if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type] ||
65 le32_to_cpu(dqhead.dqh_version) != quota_versions[type])
66 return 0;
67 return 1;
68}
69
70/* Read information header from quota file */
71static int v2_read_file_info(struct super_block *sb, int type)
72{
73 struct v2_disk_dqinfo dinfo;
74 struct mem_dqinfo *info = sb_dqinfo(sb, type);
75 struct qtree_mem_dqinfo *qinfo;
76 ssize_t size;
77
78 size = sb->s_op->quota_read(sb, type, (char *)&dinfo,
79 sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF);
80 if (size != sizeof(struct v2_disk_dqinfo)) {
81 printk(KERN_WARNING "Can't read info structure on device %s.\n",
82 sb->s_id);
83 return -1;
84 }
85 info->dqi_priv = kmalloc(sizeof(struct qtree_mem_dqinfo), GFP_NOFS);
86 if (!info->dqi_priv) {
87 printk(KERN_WARNING
88 "Not enough memory for quota information structure.\n");
89 return -1;
90 }
91 qinfo = info->dqi_priv;
92 /* limits are stored as unsigned 32-bit data */
93 info->dqi_maxblimit = 0xffffffff;
94 info->dqi_maxilimit = 0xffffffff;
95 info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
96 info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
97 info->dqi_flags = le32_to_cpu(dinfo.dqi_flags);
98 qinfo->dqi_sb = sb;
99 qinfo->dqi_type = type;
100 qinfo->dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
101 qinfo->dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk);
102 qinfo->dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry);
103 qinfo->dqi_blocksize_bits = V2_DQBLKSIZE_BITS;
104 qinfo->dqi_usable_bs = 1 << V2_DQBLKSIZE_BITS;
105 qinfo->dqi_qtree_depth = qtree_depth(qinfo);
106 qinfo->dqi_entry_size = sizeof(struct v2_disk_dqblk);
107 qinfo->dqi_ops = &v2_qtree_ops;
108 return 0;
109}
110
111/* Write information header to quota file */
112static int v2_write_file_info(struct super_block *sb, int type)
113{
114 struct v2_disk_dqinfo dinfo;
115 struct mem_dqinfo *info = sb_dqinfo(sb, type);
116 struct qtree_mem_dqinfo *qinfo = info->dqi_priv;
117 ssize_t size;
118
119 spin_lock(&dq_data_lock);
120 info->dqi_flags &= ~DQF_INFO_DIRTY;
121 dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
122 dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
123 dinfo.dqi_flags = cpu_to_le32(info->dqi_flags & DQF_MASK);
124 spin_unlock(&dq_data_lock);
125 dinfo.dqi_blocks = cpu_to_le32(qinfo->dqi_blocks);
126 dinfo.dqi_free_blk = cpu_to_le32(qinfo->dqi_free_blk);
127 dinfo.dqi_free_entry = cpu_to_le32(qinfo->dqi_free_entry);
128 size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
129 sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF);
130 if (size != sizeof(struct v2_disk_dqinfo)) {
131 printk(KERN_WARNING "Can't write info structure on device %s.\n",
132 sb->s_id);
133 return -1;
134 }
135 return 0;
136}
137
138static void v2_disk2memdqb(struct dquot *dquot, void *dp)
139{
140 struct v2_disk_dqblk *d = dp, empty;
141 struct mem_dqblk *m = &dquot->dq_dqb;
142
143 m->dqb_ihardlimit = le32_to_cpu(d->dqb_ihardlimit);
144 m->dqb_isoftlimit = le32_to_cpu(d->dqb_isoftlimit);
145 m->dqb_curinodes = le32_to_cpu(d->dqb_curinodes);
146 m->dqb_itime = le64_to_cpu(d->dqb_itime);
147 m->dqb_bhardlimit = v2_qbtos(le32_to_cpu(d->dqb_bhardlimit));
148 m->dqb_bsoftlimit = v2_qbtos(le32_to_cpu(d->dqb_bsoftlimit));
149 m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
150 m->dqb_btime = le64_to_cpu(d->dqb_btime);
151 /* We need to escape back all-zero structure */
152 memset(&empty, 0, sizeof(struct v2_disk_dqblk));
153 empty.dqb_itime = cpu_to_le64(1);
154 if (!memcmp(&empty, dp, sizeof(struct v2_disk_dqblk)))
155 m->dqb_itime = 0;
156}
157
158static void v2_mem2diskdqb(void *dp, struct dquot *dquot)
159{
160 struct v2_disk_dqblk *d = dp;
161 struct mem_dqblk *m = &dquot->dq_dqb;
162 struct qtree_mem_dqinfo *info =
163 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
164
165 d->dqb_ihardlimit = cpu_to_le32(m->dqb_ihardlimit);
166 d->dqb_isoftlimit = cpu_to_le32(m->dqb_isoftlimit);
167 d->dqb_curinodes = cpu_to_le32(m->dqb_curinodes);
168 d->dqb_itime = cpu_to_le64(m->dqb_itime);
169 d->dqb_bhardlimit = cpu_to_le32(v2_stoqb(m->dqb_bhardlimit));
170 d->dqb_bsoftlimit = cpu_to_le32(v2_stoqb(m->dqb_bsoftlimit));
171 d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
172 d->dqb_btime = cpu_to_le64(m->dqb_btime);
173 d->dqb_id = cpu_to_le32(dquot->dq_id);
174 if (qtree_entry_unused(info, dp))
175 d->dqb_itime = cpu_to_le64(1);
176}
177
178static int v2_is_id(void *dp, struct dquot *dquot)
179{
180 struct v2_disk_dqblk *d = dp;
181 struct qtree_mem_dqinfo *info =
182 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
183
184 if (qtree_entry_unused(info, dp))
185 return 0;
186 return le32_to_cpu(d->dqb_id) == dquot->dq_id;
187}
188
189static int v2_read_dquot(struct dquot *dquot)
190{
191 return qtree_read_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot);
192}
193
194static int v2_write_dquot(struct dquot *dquot)
195{
196 return qtree_write_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot);
197}
198
199static int v2_release_dquot(struct dquot *dquot)
200{
201 return qtree_release_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot);
202}
203
204static int v2_free_file_info(struct super_block *sb, int type)
205{
206 kfree(sb_dqinfo(sb, type)->dqi_priv);
207 return 0;
208}
209
210static struct quota_format_ops v2_format_ops = {
211 .check_quota_file = v2_check_quota_file,
212 .read_file_info = v2_read_file_info,
213 .write_file_info = v2_write_file_info,
214 .free_file_info = v2_free_file_info,
215 .read_dqblk = v2_read_dquot,
216 .commit_dqblk = v2_write_dquot,
217 .release_dqblk = v2_release_dquot,
218};
219
220static struct quota_format_type v2_quota_format = {
221 .qf_fmt_id = QFMT_VFS_V0,
222 .qf_ops = &v2_format_ops,
223 .qf_owner = THIS_MODULE
224};
225
226static int __init init_v2_quota_format(void)
227{
228 return register_quota_format(&v2_quota_format);
229}
230
231static void __exit exit_v2_quota_format(void)
232{
233 unregister_quota_format(&v2_quota_format);
234}
235
236module_init(init_v2_quota_format);
237module_exit(exit_v2_quota_format);
diff --git a/fs/quota/quotaio_v1.h b/fs/quota/quotaio_v1.h
new file mode 100644
index 000000000000..746654b5de70
--- /dev/null
+++ b/fs/quota/quotaio_v1.h
@@ -0,0 +1,33 @@
1#ifndef _LINUX_QUOTAIO_V1_H
2#define _LINUX_QUOTAIO_V1_H
3
4#include <linux/types.h>
5
6/*
7 * The following constants define the amount of time given a user
8 * before the soft limits are treated as hard limits (usually resulting
9 * in an allocation failure). The timer is started when the user crosses
10 * their soft limit, it is reset when they go below their soft limit.
11 */
12#define MAX_IQ_TIME 604800 /* (7*24*60*60) 1 week */
13#define MAX_DQ_TIME 604800 /* (7*24*60*60) 1 week */
14
15/*
16 * The following structure defines the format of the disk quota file
17 * (as it appears on disk) - the file is an array of these structures
18 * indexed by user or group number.
19 */
20struct v1_disk_dqblk {
21 __u32 dqb_bhardlimit; /* absolute limit on disk blks alloc */
22 __u32 dqb_bsoftlimit; /* preferred limit on disk blks */
23 __u32 dqb_curblocks; /* current block count */
24 __u32 dqb_ihardlimit; /* absolute limit on allocated inodes */
25 __u32 dqb_isoftlimit; /* preferred inode limit */
26 __u32 dqb_curinodes; /* current # allocated inodes */
27 time_t dqb_btime; /* time limit for excessive disk use */
28 time_t dqb_itime; /* time limit for excessive inode use */
29};
30
31#define v1_dqoff(UID) ((loff_t)((UID) * sizeof (struct v1_disk_dqblk)))
32
33#endif /* _LINUX_QUOTAIO_V1_H */
diff --git a/fs/quota/quotaio_v2.h b/fs/quota/quotaio_v2.h
new file mode 100644
index 000000000000..530fe580685c
--- /dev/null
+++ b/fs/quota/quotaio_v2.h
@@ -0,0 +1,60 @@
1/*
2 * Definitions of structures for vfsv0 quota format
3 */
4
5#ifndef _LINUX_QUOTAIO_V2_H
6#define _LINUX_QUOTAIO_V2_H
7
8#include <linux/types.h>
9#include <linux/quota.h>
10
11/*
12 * Definitions of magics and versions of current quota files
13 */
14#define V2_INITQMAGICS {\
15 0xd9c01f11, /* USRQUOTA */\
16 0xd9c01927 /* GRPQUOTA */\
17}
18
19#define V2_INITQVERSIONS {\
20 0, /* USRQUOTA */\
21 0 /* GRPQUOTA */\
22}
23
24/* First generic header */
25struct v2_disk_dqheader {
26 __le32 dqh_magic; /* Magic number identifying file */
27 __le32 dqh_version; /* File version */
28};
29
30/*
31 * The following structure defines the format of the disk quota file
32 * (as it appears on disk) - the file is a radix tree whose leaves point
33 * to blocks of these structures.
34 */
35struct v2_disk_dqblk {
36 __le32 dqb_id; /* id this quota applies to */
37 __le32 dqb_ihardlimit; /* absolute limit on allocated inodes */
38 __le32 dqb_isoftlimit; /* preferred inode limit */
39 __le32 dqb_curinodes; /* current # allocated inodes */
40 __le32 dqb_bhardlimit; /* absolute limit on disk space (in QUOTABLOCK_SIZE) */
41 __le32 dqb_bsoftlimit; /* preferred limit on disk space (in QUOTABLOCK_SIZE) */
42 __le64 dqb_curspace; /* current space occupied (in bytes) */
43 __le64 dqb_btime; /* time limit for excessive disk use */
44 __le64 dqb_itime; /* time limit for excessive inode use */
45};
46
47/* Header with type and version specific information */
48struct v2_disk_dqinfo {
49 __le32 dqi_bgrace; /* Time before block soft limit becomes hard limit */
50 __le32 dqi_igrace; /* Time before inode soft limit becomes hard limit */
51 __le32 dqi_flags; /* Flags for quotafile (DQF_*) */
52 __le32 dqi_blocks; /* Number of blocks in file */
53 __le32 dqi_free_blk; /* Number of first free block in the list */
54 __le32 dqi_free_entry; /* Number of block with at least one free entry */
55};
56
57#define V2_DQINFOOFF sizeof(struct v2_disk_dqheader) /* Offset of info header in file */
58#define V2_DQBLKSIZE_BITS 10 /* Size of leaf block in tree */
59
60#endif /* _LINUX_QUOTAIO_V2_H */