diff options
Diffstat (limited to 'fs/quota')
-rw-r--r-- | fs/quota/Kconfig | 59 | ||||
-rw-r--r-- | fs/quota/Makefile | 14 | ||||
-rw-r--r-- | fs/quota/dquot.c | 2564 | ||||
-rw-r--r-- | fs/quota/quota.c | 513 | ||||
-rw-r--r-- | fs/quota/quota_tree.c | 645 | ||||
-rw-r--r-- | fs/quota/quota_tree.h | 25 | ||||
-rw-r--r-- | fs/quota/quota_v1.c | 218 | ||||
-rw-r--r-- | fs/quota/quota_v2.c | 236 | ||||
-rw-r--r-- | fs/quota/quotaio_v1.h | 33 | ||||
-rw-r--r-- | fs/quota/quotaio_v2.h | 60 |
10 files changed, 4367 insertions, 0 deletions
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig new file mode 100644 index 000000000000..d9750d86fde2 --- /dev/null +++ b/fs/quota/Kconfig | |||
@@ -0,0 +1,59 @@ | |||
1 | # | ||
2 | # Quota configuration | ||
3 | # | ||
4 | |||
5 | config QUOTA | ||
6 | bool "Quota support" | ||
7 | help | ||
8 | If you say Y here, you will be able to set per user limits for disk | ||
9 | usage (also called disk quotas). Currently, it works for the | ||
10 | ext2, ext3, and reiserfs file system. ext3 also supports journalled | ||
11 | quotas for which you don't need to run quotacheck(8) after an unclean | ||
12 | shutdown. | ||
13 | For further details, read the Quota mini-HOWTO, available from | ||
14 | <http://www.tldp.org/docs.html#howto>, or the documentation provided | ||
15 | with the quota tools. Probably the quota support is only useful for | ||
16 | multi user systems. If unsure, say N. | ||
17 | |||
18 | config QUOTA_NETLINK_INTERFACE | ||
19 | bool "Report quota messages through netlink interface" | ||
20 | depends on QUOTA && NET | ||
21 | help | ||
22 | If you say Y here, quota warnings (about exceeding softlimit, reaching | ||
23 | hardlimit, etc.) will be reported through netlink interface. If unsure, | ||
24 | say Y. | ||
25 | |||
26 | config PRINT_QUOTA_WARNING | ||
27 | bool "Print quota warnings to console (OBSOLETE)" | ||
28 | depends on QUOTA | ||
29 | default y | ||
30 | help | ||
31 | If you say Y here, quota warnings (about exceeding softlimit, reaching | ||
32 | hardlimit, etc.) will be printed to the process' controlling terminal. | ||
33 | Note that this behavior is currently deprecated and may go away in | ||
34 | future. Please use notification via netlink socket instead. | ||
35 | |||
36 | # Generic support for tree structured quota files. Seleted when needed. | ||
37 | config QUOTA_TREE | ||
38 | tristate | ||
39 | |||
40 | config QFMT_V1 | ||
41 | tristate "Old quota format support" | ||
42 | depends on QUOTA | ||
43 | help | ||
44 | This quota format was (is) used by kernels earlier than 2.4.22. If | ||
45 | you have quota working and you don't want to convert to new quota | ||
46 | format say Y here. | ||
47 | |||
48 | config QFMT_V2 | ||
49 | tristate "Quota format v2 support" | ||
50 | depends on QUOTA | ||
51 | select QUOTA_TREE | ||
52 | help | ||
53 | This quota format allows using quotas with 32-bit UIDs/GIDs. If you | ||
54 | need this functionality say Y here. | ||
55 | |||
56 | config QUOTACTL | ||
57 | bool | ||
58 | depends on XFS_QUOTA || QUOTA | ||
59 | default y | ||
diff --git a/fs/quota/Makefile b/fs/quota/Makefile new file mode 100644 index 000000000000..385a0831cc99 --- /dev/null +++ b/fs/quota/Makefile | |||
@@ -0,0 +1,14 @@ | |||
1 | # | ||
2 | # Makefile for the Linux filesystems. | ||
3 | # | ||
4 | # 14 Sep 2000, Christoph Hellwig <hch@infradead.org> | ||
5 | # Rewritten to use lists instead of if-statements. | ||
6 | # | ||
7 | |||
8 | obj-y := | ||
9 | |||
10 | obj-$(CONFIG_QUOTA) += dquot.o | ||
11 | obj-$(CONFIG_QFMT_V1) += quota_v1.o | ||
12 | obj-$(CONFIG_QFMT_V2) += quota_v2.o | ||
13 | obj-$(CONFIG_QUOTA_TREE) += quota_tree.o | ||
14 | obj-$(CONFIG_QUOTACTL) += quota.o | ||
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c new file mode 100644 index 000000000000..28aa14667602 --- /dev/null +++ b/fs/quota/dquot.c | |||
@@ -0,0 +1,2564 @@ | |||
1 | /* | ||
2 | * Implementation of the diskquota system for the LINUX operating system. QUOTA | ||
3 | * is implemented using the BSD system call interface as the means of | ||
4 | * communication with the user level. This file contains the generic routines | ||
5 | * called by the different filesystems on allocation of an inode or block. | ||
6 | * These routines take care of the administration needed to have a consistent | ||
7 | * diskquota tracking system. The ideas of both user and group quotas are based | ||
8 | * on the Melbourne quota system as used on BSD derived systems. The internal | ||
9 | * implementation is based on one of the several variants of the LINUX | ||
10 | * inode-subsystem with added complexity of the diskquota system. | ||
11 | * | ||
12 | * Author: Marco van Wieringen <mvw@planets.elm.net> | ||
13 | * | ||
14 | * Fixes: Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96 | ||
15 | * | ||
16 | * Revised list management to avoid races | ||
17 | * -- Bill Hawes, <whawes@star.net>, 9/98 | ||
18 | * | ||
19 | * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...(). | ||
20 | * As the consequence the locking was moved from dquot_decr_...(), | ||
21 | * dquot_incr_...() to calling functions. | ||
22 | * invalidate_dquots() now writes modified dquots. | ||
23 | * Serialized quota_off() and quota_on() for mount point. | ||
24 | * Fixed a few bugs in grow_dquots(). | ||
25 | * Fixed deadlock in write_dquot() - we no longer account quotas on | ||
26 | * quota files | ||
27 | * remove_dquot_ref() moved to inode.c - it now traverses through inodes | ||
28 | * add_dquot_ref() restarts after blocking | ||
29 | * Added check for bogus uid and fixed check for group in quotactl. | ||
30 | * Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99 | ||
31 | * | ||
32 | * Used struct list_head instead of own list struct | ||
33 | * Invalidation of referenced dquots is no longer possible | ||
34 | * Improved free_dquots list management | ||
35 | * Quota and i_blocks are now updated in one place to avoid races | ||
36 | * Warnings are now delayed so we won't block in critical section | ||
37 | * Write updated not to require dquot lock | ||
38 | * Jan Kara, <jack@suse.cz>, 9/2000 | ||
39 | * | ||
40 | * Added dynamic quota structure allocation | ||
41 | * Jan Kara <jack@suse.cz> 12/2000 | ||
42 | * | ||
43 | * Rewritten quota interface. Implemented new quota format and | ||
44 | * formats registering. | ||
45 | * Jan Kara, <jack@suse.cz>, 2001,2002 | ||
46 | * | ||
47 | * New SMP locking. | ||
48 | * Jan Kara, <jack@suse.cz>, 10/2002 | ||
49 | * | ||
50 | * Added journalled quota support, fix lock inversion problems | ||
51 | * Jan Kara, <jack@suse.cz>, 2003,2004 | ||
52 | * | ||
53 | * (C) Copyright 1994 - 1997 Marco van Wieringen | ||
54 | */ | ||
55 | |||
56 | #include <linux/errno.h> | ||
57 | #include <linux/kernel.h> | ||
58 | #include <linux/fs.h> | ||
59 | #include <linux/mount.h> | ||
60 | #include <linux/mm.h> | ||
61 | #include <linux/time.h> | ||
62 | #include <linux/types.h> | ||
63 | #include <linux/string.h> | ||
64 | #include <linux/fcntl.h> | ||
65 | #include <linux/stat.h> | ||
66 | #include <linux/tty.h> | ||
67 | #include <linux/file.h> | ||
68 | #include <linux/slab.h> | ||
69 | #include <linux/sysctl.h> | ||
70 | #include <linux/init.h> | ||
71 | #include <linux/module.h> | ||
72 | #include <linux/proc_fs.h> | ||
73 | #include <linux/security.h> | ||
74 | #include <linux/kmod.h> | ||
75 | #include <linux/namei.h> | ||
76 | #include <linux/buffer_head.h> | ||
77 | #include <linux/capability.h> | ||
78 | #include <linux/quotaops.h> | ||
79 | #include <linux/writeback.h> /* for inode_lock, oddly enough.. */ | ||
80 | #ifdef CONFIG_QUOTA_NETLINK_INTERFACE | ||
81 | #include <net/netlink.h> | ||
82 | #include <net/genetlink.h> | ||
83 | #endif | ||
84 | |||
85 | #include <asm/uaccess.h> | ||
86 | |||
87 | #define __DQUOT_PARANOIA | ||
88 | |||
89 | /* | ||
90 | * There are three quota SMP locks. dq_list_lock protects all lists with quotas | ||
91 | * and quota formats, dqstats structure containing statistics about the lists | ||
92 | * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and | ||
93 | * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes. | ||
94 | * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly | ||
95 | * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects | ||
96 | * modifications of quota state (on quotaon and quotaoff) and readers who care | ||
97 | * about latest values take it as well. | ||
98 | * | ||
99 | * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock, | ||
100 | * dq_list_lock > dq_state_lock | ||
101 | * | ||
102 | * Note that some things (eg. sb pointer, type, id) doesn't change during | ||
103 | * the life of the dquot structure and so needn't to be protected by a lock | ||
104 | * | ||
105 | * Any operation working on dquots via inode pointers must hold dqptr_sem. If | ||
106 | * operation is just reading pointers from inode (or not using them at all) the | ||
107 | * read lock is enough. If pointers are altered function must hold write lock | ||
108 | * (these locking rules also apply for S_NOQUOTA flag in the inode - note that | ||
109 | * for altering the flag i_mutex is also needed). | ||
110 | * | ||
111 | * Each dquot has its dq_lock mutex. Locked dquots might not be referenced | ||
112 | * from inodes (dquot_alloc_space() and such don't check the dq_lock). | ||
113 | * Currently dquot is locked only when it is being read to memory (or space for | ||
114 | * it is being allocated) on the first dqget() and when it is being released on | ||
115 | * the last dqput(). The allocation and release oparations are serialized by | ||
116 | * the dq_lock and by checking the use count in dquot_release(). Write | ||
117 | * operations on dquots don't hold dq_lock as they copy data under dq_data_lock | ||
118 | * spinlock to internal buffers before writing. | ||
119 | * | ||
120 | * Lock ordering (including related VFS locks) is the following: | ||
121 | * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock > | ||
122 | * dqio_mutex | ||
123 | * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem > | ||
124 | * dqptr_sem. But filesystem has to count with the fact that functions such as | ||
125 | * dquot_alloc_space() acquire dqptr_sem and they usually have to be called | ||
126 | * from inside a transaction to keep filesystem consistency after a crash. Also | ||
127 | * filesystems usually want to do some IO on dquot from ->mark_dirty which is | ||
128 | * called with dqptr_sem held. | ||
129 | * i_mutex on quota files is special (it's below dqio_mutex) | ||
130 | */ | ||
131 | |||
132 | static DEFINE_SPINLOCK(dq_list_lock); | ||
133 | static DEFINE_SPINLOCK(dq_state_lock); | ||
134 | DEFINE_SPINLOCK(dq_data_lock); | ||
135 | EXPORT_SYMBOL(dq_data_lock); | ||
136 | |||
137 | static char *quotatypes[] = INITQFNAMES; | ||
138 | static struct quota_format_type *quota_formats; /* List of registered formats */ | ||
139 | static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES; | ||
140 | |||
141 | /* SLAB cache for dquot structures */ | ||
142 | static struct kmem_cache *dquot_cachep; | ||
143 | |||
144 | int register_quota_format(struct quota_format_type *fmt) | ||
145 | { | ||
146 | spin_lock(&dq_list_lock); | ||
147 | fmt->qf_next = quota_formats; | ||
148 | quota_formats = fmt; | ||
149 | spin_unlock(&dq_list_lock); | ||
150 | return 0; | ||
151 | } | ||
152 | EXPORT_SYMBOL(register_quota_format); | ||
153 | |||
154 | void unregister_quota_format(struct quota_format_type *fmt) | ||
155 | { | ||
156 | struct quota_format_type **actqf; | ||
157 | |||
158 | spin_lock(&dq_list_lock); | ||
159 | for (actqf = "a_formats; *actqf && *actqf != fmt; actqf = &(*actqf)->qf_next); | ||
160 | if (*actqf) | ||
161 | *actqf = (*actqf)->qf_next; | ||
162 | spin_unlock(&dq_list_lock); | ||
163 | } | ||
164 | EXPORT_SYMBOL(unregister_quota_format); | ||
165 | |||
166 | static struct quota_format_type *find_quota_format(int id) | ||
167 | { | ||
168 | struct quota_format_type *actqf; | ||
169 | |||
170 | spin_lock(&dq_list_lock); | ||
171 | for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next); | ||
172 | if (!actqf || !try_module_get(actqf->qf_owner)) { | ||
173 | int qm; | ||
174 | |||
175 | spin_unlock(&dq_list_lock); | ||
176 | |||
177 | for (qm = 0; module_names[qm].qm_fmt_id && module_names[qm].qm_fmt_id != id; qm++); | ||
178 | if (!module_names[qm].qm_fmt_id || request_module(module_names[qm].qm_mod_name)) | ||
179 | return NULL; | ||
180 | |||
181 | spin_lock(&dq_list_lock); | ||
182 | for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next); | ||
183 | if (actqf && !try_module_get(actqf->qf_owner)) | ||
184 | actqf = NULL; | ||
185 | } | ||
186 | spin_unlock(&dq_list_lock); | ||
187 | return actqf; | ||
188 | } | ||
189 | |||
190 | static void put_quota_format(struct quota_format_type *fmt) | ||
191 | { | ||
192 | module_put(fmt->qf_owner); | ||
193 | } | ||
194 | |||
195 | /* | ||
196 | * Dquot List Management: | ||
197 | * The quota code uses three lists for dquot management: the inuse_list, | ||
198 | * free_dquots, and dquot_hash[] array. A single dquot structure may be | ||
199 | * on all three lists, depending on its current state. | ||
200 | * | ||
201 | * All dquots are placed to the end of inuse_list when first created, and this | ||
202 | * list is used for invalidate operation, which must look at every dquot. | ||
203 | * | ||
204 | * Unused dquots (dq_count == 0) are added to the free_dquots list when freed, | ||
205 | * and this list is searched whenever we need an available dquot. Dquots are | ||
206 | * removed from the list as soon as they are used again, and | ||
207 | * dqstats.free_dquots gives the number of dquots on the list. When | ||
208 | * dquot is invalidated it's completely released from memory. | ||
209 | * | ||
210 | * Dquots with a specific identity (device, type and id) are placed on | ||
211 | * one of the dquot_hash[] hash chains. The provides an efficient search | ||
212 | * mechanism to locate a specific dquot. | ||
213 | */ | ||
214 | |||
215 | static LIST_HEAD(inuse_list); | ||
216 | static LIST_HEAD(free_dquots); | ||
217 | static unsigned int dq_hash_bits, dq_hash_mask; | ||
218 | static struct hlist_head *dquot_hash; | ||
219 | |||
220 | struct dqstats dqstats; | ||
221 | EXPORT_SYMBOL(dqstats); | ||
222 | |||
223 | static inline unsigned int | ||
224 | hashfn(const struct super_block *sb, unsigned int id, int type) | ||
225 | { | ||
226 | unsigned long tmp; | ||
227 | |||
228 | tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type); | ||
229 | return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask; | ||
230 | } | ||
231 | |||
232 | /* | ||
233 | * Following list functions expect dq_list_lock to be held | ||
234 | */ | ||
235 | static inline void insert_dquot_hash(struct dquot *dquot) | ||
236 | { | ||
237 | struct hlist_head *head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type); | ||
238 | hlist_add_head(&dquot->dq_hash, head); | ||
239 | } | ||
240 | |||
241 | static inline void remove_dquot_hash(struct dquot *dquot) | ||
242 | { | ||
243 | hlist_del_init(&dquot->dq_hash); | ||
244 | } | ||
245 | |||
246 | static inline struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, unsigned int id, int type) | ||
247 | { | ||
248 | struct hlist_node *node; | ||
249 | struct dquot *dquot; | ||
250 | |||
251 | hlist_for_each (node, dquot_hash+hashent) { | ||
252 | dquot = hlist_entry(node, struct dquot, dq_hash); | ||
253 | if (dquot->dq_sb == sb && dquot->dq_id == id && dquot->dq_type == type) | ||
254 | return dquot; | ||
255 | } | ||
256 | return NODQUOT; | ||
257 | } | ||
258 | |||
259 | /* Add a dquot to the tail of the free list */ | ||
260 | static inline void put_dquot_last(struct dquot *dquot) | ||
261 | { | ||
262 | list_add_tail(&dquot->dq_free, &free_dquots); | ||
263 | dqstats.free_dquots++; | ||
264 | } | ||
265 | |||
266 | static inline void remove_free_dquot(struct dquot *dquot) | ||
267 | { | ||
268 | if (list_empty(&dquot->dq_free)) | ||
269 | return; | ||
270 | list_del_init(&dquot->dq_free); | ||
271 | dqstats.free_dquots--; | ||
272 | } | ||
273 | |||
274 | static inline void put_inuse(struct dquot *dquot) | ||
275 | { | ||
276 | /* We add to the back of inuse list so we don't have to restart | ||
277 | * when traversing this list and we block */ | ||
278 | list_add_tail(&dquot->dq_inuse, &inuse_list); | ||
279 | dqstats.allocated_dquots++; | ||
280 | } | ||
281 | |||
282 | static inline void remove_inuse(struct dquot *dquot) | ||
283 | { | ||
284 | dqstats.allocated_dquots--; | ||
285 | list_del(&dquot->dq_inuse); | ||
286 | } | ||
287 | /* | ||
288 | * End of list functions needing dq_list_lock | ||
289 | */ | ||
290 | |||
291 | static void wait_on_dquot(struct dquot *dquot) | ||
292 | { | ||
293 | mutex_lock(&dquot->dq_lock); | ||
294 | mutex_unlock(&dquot->dq_lock); | ||
295 | } | ||
296 | |||
297 | static inline int dquot_dirty(struct dquot *dquot) | ||
298 | { | ||
299 | return test_bit(DQ_MOD_B, &dquot->dq_flags); | ||
300 | } | ||
301 | |||
302 | static inline int mark_dquot_dirty(struct dquot *dquot) | ||
303 | { | ||
304 | return dquot->dq_sb->dq_op->mark_dirty(dquot); | ||
305 | } | ||
306 | |||
307 | int dquot_mark_dquot_dirty(struct dquot *dquot) | ||
308 | { | ||
309 | spin_lock(&dq_list_lock); | ||
310 | if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) | ||
311 | list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)-> | ||
312 | info[dquot->dq_type].dqi_dirty_list); | ||
313 | spin_unlock(&dq_list_lock); | ||
314 | return 0; | ||
315 | } | ||
316 | EXPORT_SYMBOL(dquot_mark_dquot_dirty); | ||
317 | |||
318 | /* This function needs dq_list_lock */ | ||
319 | static inline int clear_dquot_dirty(struct dquot *dquot) | ||
320 | { | ||
321 | if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags)) | ||
322 | return 0; | ||
323 | list_del_init(&dquot->dq_dirty); | ||
324 | return 1; | ||
325 | } | ||
326 | |||
327 | void mark_info_dirty(struct super_block *sb, int type) | ||
328 | { | ||
329 | set_bit(DQF_INFO_DIRTY_B, &sb_dqopt(sb)->info[type].dqi_flags); | ||
330 | } | ||
331 | EXPORT_SYMBOL(mark_info_dirty); | ||
332 | |||
333 | /* | ||
334 | * Read dquot from disk and alloc space for it | ||
335 | */ | ||
336 | |||
337 | int dquot_acquire(struct dquot *dquot) | ||
338 | { | ||
339 | int ret = 0, ret2 = 0; | ||
340 | struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); | ||
341 | |||
342 | mutex_lock(&dquot->dq_lock); | ||
343 | mutex_lock(&dqopt->dqio_mutex); | ||
344 | if (!test_bit(DQ_READ_B, &dquot->dq_flags)) | ||
345 | ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot); | ||
346 | if (ret < 0) | ||
347 | goto out_iolock; | ||
348 | set_bit(DQ_READ_B, &dquot->dq_flags); | ||
349 | /* Instantiate dquot if needed */ | ||
350 | if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) { | ||
351 | ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); | ||
352 | /* Write the info if needed */ | ||
353 | if (info_dirty(&dqopt->info[dquot->dq_type])) | ||
354 | ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); | ||
355 | if (ret < 0) | ||
356 | goto out_iolock; | ||
357 | if (ret2 < 0) { | ||
358 | ret = ret2; | ||
359 | goto out_iolock; | ||
360 | } | ||
361 | } | ||
362 | set_bit(DQ_ACTIVE_B, &dquot->dq_flags); | ||
363 | out_iolock: | ||
364 | mutex_unlock(&dqopt->dqio_mutex); | ||
365 | mutex_unlock(&dquot->dq_lock); | ||
366 | return ret; | ||
367 | } | ||
368 | EXPORT_SYMBOL(dquot_acquire); | ||
369 | |||
370 | /* | ||
371 | * Write dquot to disk | ||
372 | */ | ||
373 | int dquot_commit(struct dquot *dquot) | ||
374 | { | ||
375 | int ret = 0, ret2 = 0; | ||
376 | struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); | ||
377 | |||
378 | mutex_lock(&dqopt->dqio_mutex); | ||
379 | spin_lock(&dq_list_lock); | ||
380 | if (!clear_dquot_dirty(dquot)) { | ||
381 | spin_unlock(&dq_list_lock); | ||
382 | goto out_sem; | ||
383 | } | ||
384 | spin_unlock(&dq_list_lock); | ||
385 | /* Inactive dquot can be only if there was error during read/init | ||
386 | * => we have better not writing it */ | ||
387 | if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { | ||
388 | ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); | ||
389 | if (info_dirty(&dqopt->info[dquot->dq_type])) | ||
390 | ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); | ||
391 | if (ret >= 0) | ||
392 | ret = ret2; | ||
393 | } | ||
394 | out_sem: | ||
395 | mutex_unlock(&dqopt->dqio_mutex); | ||
396 | return ret; | ||
397 | } | ||
398 | EXPORT_SYMBOL(dquot_commit); | ||
399 | |||
400 | /* | ||
401 | * Release dquot | ||
402 | */ | ||
403 | int dquot_release(struct dquot *dquot) | ||
404 | { | ||
405 | int ret = 0, ret2 = 0; | ||
406 | struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); | ||
407 | |||
408 | mutex_lock(&dquot->dq_lock); | ||
409 | /* Check whether we are not racing with some other dqget() */ | ||
410 | if (atomic_read(&dquot->dq_count) > 1) | ||
411 | goto out_dqlock; | ||
412 | mutex_lock(&dqopt->dqio_mutex); | ||
413 | if (dqopt->ops[dquot->dq_type]->release_dqblk) { | ||
414 | ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot); | ||
415 | /* Write the info */ | ||
416 | if (info_dirty(&dqopt->info[dquot->dq_type])) | ||
417 | ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); | ||
418 | if (ret >= 0) | ||
419 | ret = ret2; | ||
420 | } | ||
421 | clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); | ||
422 | mutex_unlock(&dqopt->dqio_mutex); | ||
423 | out_dqlock: | ||
424 | mutex_unlock(&dquot->dq_lock); | ||
425 | return ret; | ||
426 | } | ||
427 | EXPORT_SYMBOL(dquot_release); | ||
428 | |||
429 | void dquot_destroy(struct dquot *dquot) | ||
430 | { | ||
431 | kmem_cache_free(dquot_cachep, dquot); | ||
432 | } | ||
433 | EXPORT_SYMBOL(dquot_destroy); | ||
434 | |||
435 | static inline void do_destroy_dquot(struct dquot *dquot) | ||
436 | { | ||
437 | dquot->dq_sb->dq_op->destroy_dquot(dquot); | ||
438 | } | ||
439 | |||
440 | /* Invalidate all dquots on the list. Note that this function is called after | ||
441 | * quota is disabled and pointers from inodes removed so there cannot be new | ||
442 | * quota users. There can still be some users of quotas due to inodes being | ||
443 | * just deleted or pruned by prune_icache() (those are not attached to any | ||
444 | * list) or parallel quotactl call. We have to wait for such users. | ||
445 | */ | ||
446 | static void invalidate_dquots(struct super_block *sb, int type) | ||
447 | { | ||
448 | struct dquot *dquot, *tmp; | ||
449 | |||
450 | restart: | ||
451 | spin_lock(&dq_list_lock); | ||
452 | list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) { | ||
453 | if (dquot->dq_sb != sb) | ||
454 | continue; | ||
455 | if (dquot->dq_type != type) | ||
456 | continue; | ||
457 | /* Wait for dquot users */ | ||
458 | if (atomic_read(&dquot->dq_count)) { | ||
459 | DEFINE_WAIT(wait); | ||
460 | |||
461 | atomic_inc(&dquot->dq_count); | ||
462 | prepare_to_wait(&dquot->dq_wait_unused, &wait, | ||
463 | TASK_UNINTERRUPTIBLE); | ||
464 | spin_unlock(&dq_list_lock); | ||
465 | /* Once dqput() wakes us up, we know it's time to free | ||
466 | * the dquot. | ||
467 | * IMPORTANT: we rely on the fact that there is always | ||
468 | * at most one process waiting for dquot to free. | ||
469 | * Otherwise dq_count would be > 1 and we would never | ||
470 | * wake up. | ||
471 | */ | ||
472 | if (atomic_read(&dquot->dq_count) > 1) | ||
473 | schedule(); | ||
474 | finish_wait(&dquot->dq_wait_unused, &wait); | ||
475 | dqput(dquot); | ||
476 | /* At this moment dquot() need not exist (it could be | ||
477 | * reclaimed by prune_dqcache(). Hence we must | ||
478 | * restart. */ | ||
479 | goto restart; | ||
480 | } | ||
481 | /* | ||
482 | * Quota now has no users and it has been written on last | ||
483 | * dqput() | ||
484 | */ | ||
485 | remove_dquot_hash(dquot); | ||
486 | remove_free_dquot(dquot); | ||
487 | remove_inuse(dquot); | ||
488 | do_destroy_dquot(dquot); | ||
489 | } | ||
490 | spin_unlock(&dq_list_lock); | ||
491 | } | ||
492 | |||
493 | /* Call callback for every active dquot on given filesystem */ | ||
494 | int dquot_scan_active(struct super_block *sb, | ||
495 | int (*fn)(struct dquot *dquot, unsigned long priv), | ||
496 | unsigned long priv) | ||
497 | { | ||
498 | struct dquot *dquot, *old_dquot = NULL; | ||
499 | int ret = 0; | ||
500 | |||
501 | mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); | ||
502 | spin_lock(&dq_list_lock); | ||
503 | list_for_each_entry(dquot, &inuse_list, dq_inuse) { | ||
504 | if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) | ||
505 | continue; | ||
506 | if (dquot->dq_sb != sb) | ||
507 | continue; | ||
508 | /* Now we have active dquot so we can just increase use count */ | ||
509 | atomic_inc(&dquot->dq_count); | ||
510 | dqstats.lookups++; | ||
511 | spin_unlock(&dq_list_lock); | ||
512 | dqput(old_dquot); | ||
513 | old_dquot = dquot; | ||
514 | ret = fn(dquot, priv); | ||
515 | if (ret < 0) | ||
516 | goto out; | ||
517 | spin_lock(&dq_list_lock); | ||
518 | /* We are safe to continue now because our dquot could not | ||
519 | * be moved out of the inuse list while we hold the reference */ | ||
520 | } | ||
521 | spin_unlock(&dq_list_lock); | ||
522 | out: | ||
523 | dqput(old_dquot); | ||
524 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); | ||
525 | return ret; | ||
526 | } | ||
527 | EXPORT_SYMBOL(dquot_scan_active); | ||
528 | |||
529 | int vfs_quota_sync(struct super_block *sb, int type) | ||
530 | { | ||
531 | struct list_head *dirty; | ||
532 | struct dquot *dquot; | ||
533 | struct quota_info *dqopt = sb_dqopt(sb); | ||
534 | int cnt; | ||
535 | |||
536 | mutex_lock(&dqopt->dqonoff_mutex); | ||
537 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
538 | if (type != -1 && cnt != type) | ||
539 | continue; | ||
540 | if (!sb_has_quota_active(sb, cnt)) | ||
541 | continue; | ||
542 | spin_lock(&dq_list_lock); | ||
543 | dirty = &dqopt->info[cnt].dqi_dirty_list; | ||
544 | while (!list_empty(dirty)) { | ||
545 | dquot = list_first_entry(dirty, struct dquot, dq_dirty); | ||
546 | /* Dirty and inactive can be only bad dquot... */ | ||
547 | if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { | ||
548 | clear_dquot_dirty(dquot); | ||
549 | continue; | ||
550 | } | ||
551 | /* Now we have active dquot from which someone is | ||
552 | * holding reference so we can safely just increase | ||
553 | * use count */ | ||
554 | atomic_inc(&dquot->dq_count); | ||
555 | dqstats.lookups++; | ||
556 | spin_unlock(&dq_list_lock); | ||
557 | sb->dq_op->write_dquot(dquot); | ||
558 | dqput(dquot); | ||
559 | spin_lock(&dq_list_lock); | ||
560 | } | ||
561 | spin_unlock(&dq_list_lock); | ||
562 | } | ||
563 | |||
564 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
565 | if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt) | ||
566 | && info_dirty(&dqopt->info[cnt])) | ||
567 | sb->dq_op->write_info(sb, cnt); | ||
568 | spin_lock(&dq_list_lock); | ||
569 | dqstats.syncs++; | ||
570 | spin_unlock(&dq_list_lock); | ||
571 | mutex_unlock(&dqopt->dqonoff_mutex); | ||
572 | |||
573 | return 0; | ||
574 | } | ||
575 | EXPORT_SYMBOL(vfs_quota_sync); | ||
576 | |||
577 | /* Free unused dquots from cache */ | ||
578 | static void prune_dqcache(int count) | ||
579 | { | ||
580 | struct list_head *head; | ||
581 | struct dquot *dquot; | ||
582 | |||
583 | head = free_dquots.prev; | ||
584 | while (head != &free_dquots && count) { | ||
585 | dquot = list_entry(head, struct dquot, dq_free); | ||
586 | remove_dquot_hash(dquot); | ||
587 | remove_free_dquot(dquot); | ||
588 | remove_inuse(dquot); | ||
589 | do_destroy_dquot(dquot); | ||
590 | count--; | ||
591 | head = free_dquots.prev; | ||
592 | } | ||
593 | } | ||
594 | |||
595 | /* | ||
596 | * This is called from kswapd when we think we need some | ||
597 | * more memory | ||
598 | */ | ||
599 | |||
600 | static int shrink_dqcache_memory(int nr, gfp_t gfp_mask) | ||
601 | { | ||
602 | if (nr) { | ||
603 | spin_lock(&dq_list_lock); | ||
604 | prune_dqcache(nr); | ||
605 | spin_unlock(&dq_list_lock); | ||
606 | } | ||
607 | return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure; | ||
608 | } | ||
609 | |||
610 | static struct shrinker dqcache_shrinker = { | ||
611 | .shrink = shrink_dqcache_memory, | ||
612 | .seeks = DEFAULT_SEEKS, | ||
613 | }; | ||
614 | |||
615 | /* | ||
616 | * Put reference to dquot | ||
617 | * NOTE: If you change this function please check whether dqput_blocks() works right... | ||
618 | */ | ||
619 | void dqput(struct dquot *dquot) | ||
620 | { | ||
621 | int ret; | ||
622 | |||
623 | if (!dquot) | ||
624 | return; | ||
625 | #ifdef __DQUOT_PARANOIA | ||
626 | if (!atomic_read(&dquot->dq_count)) { | ||
627 | printk("VFS: dqput: trying to free free dquot\n"); | ||
628 | printk("VFS: device %s, dquot of %s %d\n", | ||
629 | dquot->dq_sb->s_id, | ||
630 | quotatypes[dquot->dq_type], | ||
631 | dquot->dq_id); | ||
632 | BUG(); | ||
633 | } | ||
634 | #endif | ||
635 | |||
636 | spin_lock(&dq_list_lock); | ||
637 | dqstats.drops++; | ||
638 | spin_unlock(&dq_list_lock); | ||
639 | we_slept: | ||
640 | spin_lock(&dq_list_lock); | ||
641 | if (atomic_read(&dquot->dq_count) > 1) { | ||
642 | /* We have more than one user... nothing to do */ | ||
643 | atomic_dec(&dquot->dq_count); | ||
644 | /* Releasing dquot during quotaoff phase? */ | ||
645 | if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_type) && | ||
646 | atomic_read(&dquot->dq_count) == 1) | ||
647 | wake_up(&dquot->dq_wait_unused); | ||
648 | spin_unlock(&dq_list_lock); | ||
649 | return; | ||
650 | } | ||
651 | /* Need to release dquot? */ | ||
652 | if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) { | ||
653 | spin_unlock(&dq_list_lock); | ||
654 | /* Commit dquot before releasing */ | ||
655 | ret = dquot->dq_sb->dq_op->write_dquot(dquot); | ||
656 | if (ret < 0) { | ||
657 | printk(KERN_ERR "VFS: cannot write quota structure on " | ||
658 | "device %s (error %d). Quota may get out of " | ||
659 | "sync!\n", dquot->dq_sb->s_id, ret); | ||
660 | /* | ||
661 | * We clear dirty bit anyway, so that we avoid | ||
662 | * infinite loop here | ||
663 | */ | ||
664 | spin_lock(&dq_list_lock); | ||
665 | clear_dquot_dirty(dquot); | ||
666 | spin_unlock(&dq_list_lock); | ||
667 | } | ||
668 | goto we_slept; | ||
669 | } | ||
670 | /* Clear flag in case dquot was inactive (something bad happened) */ | ||
671 | clear_dquot_dirty(dquot); | ||
672 | if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { | ||
673 | spin_unlock(&dq_list_lock); | ||
674 | dquot->dq_sb->dq_op->release_dquot(dquot); | ||
675 | goto we_slept; | ||
676 | } | ||
677 | atomic_dec(&dquot->dq_count); | ||
678 | #ifdef __DQUOT_PARANOIA | ||
679 | /* sanity check */ | ||
680 | BUG_ON(!list_empty(&dquot->dq_free)); | ||
681 | #endif | ||
682 | put_dquot_last(dquot); | ||
683 | spin_unlock(&dq_list_lock); | ||
684 | } | ||
685 | EXPORT_SYMBOL(dqput); | ||
686 | |||
687 | struct dquot *dquot_alloc(struct super_block *sb, int type) | ||
688 | { | ||
689 | return kmem_cache_zalloc(dquot_cachep, GFP_NOFS); | ||
690 | } | ||
691 | EXPORT_SYMBOL(dquot_alloc); | ||
692 | |||
693 | static struct dquot *get_empty_dquot(struct super_block *sb, int type) | ||
694 | { | ||
695 | struct dquot *dquot; | ||
696 | |||
697 | dquot = sb->dq_op->alloc_dquot(sb, type); | ||
698 | if(!dquot) | ||
699 | return NODQUOT; | ||
700 | |||
701 | mutex_init(&dquot->dq_lock); | ||
702 | INIT_LIST_HEAD(&dquot->dq_free); | ||
703 | INIT_LIST_HEAD(&dquot->dq_inuse); | ||
704 | INIT_HLIST_NODE(&dquot->dq_hash); | ||
705 | INIT_LIST_HEAD(&dquot->dq_dirty); | ||
706 | init_waitqueue_head(&dquot->dq_wait_unused); | ||
707 | dquot->dq_sb = sb; | ||
708 | dquot->dq_type = type; | ||
709 | atomic_set(&dquot->dq_count, 1); | ||
710 | |||
711 | return dquot; | ||
712 | } | ||
713 | |||
714 | /* | ||
715 | * Get reference to dquot | ||
716 | * | ||
717 | * Locking is slightly tricky here. We are guarded from parallel quotaoff() | ||
718 | * destroying our dquot by: | ||
719 | * a) checking for quota flags under dq_list_lock and | ||
720 | * b) getting a reference to dquot before we release dq_list_lock | ||
721 | */ | ||
722 | struct dquot *dqget(struct super_block *sb, unsigned int id, int type) | ||
723 | { | ||
724 | unsigned int hashent = hashfn(sb, id, type); | ||
725 | struct dquot *dquot = NODQUOT, *empty = NODQUOT; | ||
726 | |||
727 | if (!sb_has_quota_active(sb, type)) | ||
728 | return NODQUOT; | ||
729 | we_slept: | ||
730 | spin_lock(&dq_list_lock); | ||
731 | spin_lock(&dq_state_lock); | ||
732 | if (!sb_has_quota_active(sb, type)) { | ||
733 | spin_unlock(&dq_state_lock); | ||
734 | spin_unlock(&dq_list_lock); | ||
735 | goto out; | ||
736 | } | ||
737 | spin_unlock(&dq_state_lock); | ||
738 | |||
739 | if ((dquot = find_dquot(hashent, sb, id, type)) == NODQUOT) { | ||
740 | if (empty == NODQUOT) { | ||
741 | spin_unlock(&dq_list_lock); | ||
742 | if ((empty = get_empty_dquot(sb, type)) == NODQUOT) | ||
743 | schedule(); /* Try to wait for a moment... */ | ||
744 | goto we_slept; | ||
745 | } | ||
746 | dquot = empty; | ||
747 | empty = NODQUOT; | ||
748 | dquot->dq_id = id; | ||
749 | /* all dquots go on the inuse_list */ | ||
750 | put_inuse(dquot); | ||
751 | /* hash it first so it can be found */ | ||
752 | insert_dquot_hash(dquot); | ||
753 | dqstats.lookups++; | ||
754 | spin_unlock(&dq_list_lock); | ||
755 | } else { | ||
756 | if (!atomic_read(&dquot->dq_count)) | ||
757 | remove_free_dquot(dquot); | ||
758 | atomic_inc(&dquot->dq_count); | ||
759 | dqstats.cache_hits++; | ||
760 | dqstats.lookups++; | ||
761 | spin_unlock(&dq_list_lock); | ||
762 | } | ||
763 | /* Wait for dq_lock - after this we know that either dquot_release() is already | ||
764 | * finished or it will be canceled due to dq_count > 1 test */ | ||
765 | wait_on_dquot(dquot); | ||
766 | /* Read the dquot and instantiate it (everything done only if needed) */ | ||
767 | if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && sb->dq_op->acquire_dquot(dquot) < 0) { | ||
768 | dqput(dquot); | ||
769 | dquot = NODQUOT; | ||
770 | goto out; | ||
771 | } | ||
772 | #ifdef __DQUOT_PARANOIA | ||
773 | BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */ | ||
774 | #endif | ||
775 | out: | ||
776 | if (empty) | ||
777 | do_destroy_dquot(empty); | ||
778 | |||
779 | return dquot; | ||
780 | } | ||
781 | EXPORT_SYMBOL(dqget); | ||
782 | |||
783 | static int dqinit_needed(struct inode *inode, int type) | ||
784 | { | ||
785 | int cnt; | ||
786 | |||
787 | if (IS_NOQUOTA(inode)) | ||
788 | return 0; | ||
789 | if (type != -1) | ||
790 | return inode->i_dquot[type] == NODQUOT; | ||
791 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
792 | if (inode->i_dquot[cnt] == NODQUOT) | ||
793 | return 1; | ||
794 | return 0; | ||
795 | } | ||
796 | |||
797 | /* This routine is guarded by dqonoff_mutex mutex */ | ||
798 | static void add_dquot_ref(struct super_block *sb, int type) | ||
799 | { | ||
800 | struct inode *inode, *old_inode = NULL; | ||
801 | |||
802 | spin_lock(&inode_lock); | ||
803 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { | ||
804 | if (!atomic_read(&inode->i_writecount)) | ||
805 | continue; | ||
806 | if (!dqinit_needed(inode, type)) | ||
807 | continue; | ||
808 | if (inode->i_state & (I_FREEING|I_WILL_FREE)) | ||
809 | continue; | ||
810 | |||
811 | __iget(inode); | ||
812 | spin_unlock(&inode_lock); | ||
813 | |||
814 | iput(old_inode); | ||
815 | sb->dq_op->initialize(inode, type); | ||
816 | /* We hold a reference to 'inode' so it couldn't have been | ||
817 | * removed from s_inodes list while we dropped the inode_lock. | ||
818 | * We cannot iput the inode now as we can be holding the last | ||
819 | * reference and we cannot iput it under inode_lock. So we | ||
820 | * keep the reference and iput it later. */ | ||
821 | old_inode = inode; | ||
822 | spin_lock(&inode_lock); | ||
823 | } | ||
824 | spin_unlock(&inode_lock); | ||
825 | iput(old_inode); | ||
826 | } | ||
827 | |||
828 | /* Return 0 if dqput() won't block (note that 1 doesn't necessarily mean blocking) */ | ||
829 | static inline int dqput_blocks(struct dquot *dquot) | ||
830 | { | ||
831 | if (atomic_read(&dquot->dq_count) <= 1) | ||
832 | return 1; | ||
833 | return 0; | ||
834 | } | ||
835 | |||
836 | /* Remove references to dquots from inode - add dquot to list for freeing if needed */ | ||
837 | /* We can't race with anybody because we hold dqptr_sem for writing... */ | ||
838 | static int remove_inode_dquot_ref(struct inode *inode, int type, | ||
839 | struct list_head *tofree_head) | ||
840 | { | ||
841 | struct dquot *dquot = inode->i_dquot[type]; | ||
842 | |||
843 | inode->i_dquot[type] = NODQUOT; | ||
844 | if (dquot != NODQUOT) { | ||
845 | if (dqput_blocks(dquot)) { | ||
846 | #ifdef __DQUOT_PARANOIA | ||
847 | if (atomic_read(&dquot->dq_count) != 1) | ||
848 | printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count)); | ||
849 | #endif | ||
850 | spin_lock(&dq_list_lock); | ||
851 | list_add(&dquot->dq_free, tofree_head); /* As dquot must have currently users it can't be on the free list... */ | ||
852 | spin_unlock(&dq_list_lock); | ||
853 | return 1; | ||
854 | } | ||
855 | else | ||
856 | dqput(dquot); /* We have guaranteed we won't block */ | ||
857 | } | ||
858 | return 0; | ||
859 | } | ||
860 | |||
861 | /* Free list of dquots - called from inode.c */ | ||
862 | /* dquots are removed from inodes, no new references can be got so we are the only ones holding reference */ | ||
863 | static void put_dquot_list(struct list_head *tofree_head) | ||
864 | { | ||
865 | struct list_head *act_head; | ||
866 | struct dquot *dquot; | ||
867 | |||
868 | act_head = tofree_head->next; | ||
869 | /* So now we have dquots on the list... Just free them */ | ||
870 | while (act_head != tofree_head) { | ||
871 | dquot = list_entry(act_head, struct dquot, dq_free); | ||
872 | act_head = act_head->next; | ||
873 | list_del_init(&dquot->dq_free); /* Remove dquot from the list so we won't have problems... */ | ||
874 | dqput(dquot); | ||
875 | } | ||
876 | } | ||
877 | |||
878 | static void remove_dquot_ref(struct super_block *sb, int type, | ||
879 | struct list_head *tofree_head) | ||
880 | { | ||
881 | struct inode *inode; | ||
882 | |||
883 | spin_lock(&inode_lock); | ||
884 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { | ||
885 | if (!IS_NOQUOTA(inode)) | ||
886 | remove_inode_dquot_ref(inode, type, tofree_head); | ||
887 | } | ||
888 | spin_unlock(&inode_lock); | ||
889 | } | ||
890 | |||
891 | /* Gather all references from inodes and drop them */ | ||
892 | static void drop_dquot_ref(struct super_block *sb, int type) | ||
893 | { | ||
894 | LIST_HEAD(tofree_head); | ||
895 | |||
896 | if (sb->dq_op) { | ||
897 | down_write(&sb_dqopt(sb)->dqptr_sem); | ||
898 | remove_dquot_ref(sb, type, &tofree_head); | ||
899 | up_write(&sb_dqopt(sb)->dqptr_sem); | ||
900 | put_dquot_list(&tofree_head); | ||
901 | } | ||
902 | } | ||
903 | |||
904 | static inline void dquot_incr_inodes(struct dquot *dquot, qsize_t number) | ||
905 | { | ||
906 | dquot->dq_dqb.dqb_curinodes += number; | ||
907 | } | ||
908 | |||
909 | static inline void dquot_incr_space(struct dquot *dquot, qsize_t number) | ||
910 | { | ||
911 | dquot->dq_dqb.dqb_curspace += number; | ||
912 | } | ||
913 | |||
914 | static inline void dquot_resv_space(struct dquot *dquot, qsize_t number) | ||
915 | { | ||
916 | dquot->dq_dqb.dqb_rsvspace += number; | ||
917 | } | ||
918 | |||
919 | /* | ||
920 | * Claim reserved quota space | ||
921 | */ | ||
922 | static void dquot_claim_reserved_space(struct dquot *dquot, | ||
923 | qsize_t number) | ||
924 | { | ||
925 | WARN_ON(dquot->dq_dqb.dqb_rsvspace < number); | ||
926 | dquot->dq_dqb.dqb_curspace += number; | ||
927 | dquot->dq_dqb.dqb_rsvspace -= number; | ||
928 | } | ||
929 | |||
930 | static inline | ||
931 | void dquot_free_reserved_space(struct dquot *dquot, qsize_t number) | ||
932 | { | ||
933 | dquot->dq_dqb.dqb_rsvspace -= number; | ||
934 | } | ||
935 | |||
936 | static inline void dquot_decr_inodes(struct dquot *dquot, qsize_t number) | ||
937 | { | ||
938 | if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || | ||
939 | dquot->dq_dqb.dqb_curinodes >= number) | ||
940 | dquot->dq_dqb.dqb_curinodes -= number; | ||
941 | else | ||
942 | dquot->dq_dqb.dqb_curinodes = 0; | ||
943 | if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit) | ||
944 | dquot->dq_dqb.dqb_itime = (time_t) 0; | ||
945 | clear_bit(DQ_INODES_B, &dquot->dq_flags); | ||
946 | } | ||
947 | |||
948 | static inline void dquot_decr_space(struct dquot *dquot, qsize_t number) | ||
949 | { | ||
950 | if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || | ||
951 | dquot->dq_dqb.dqb_curspace >= number) | ||
952 | dquot->dq_dqb.dqb_curspace -= number; | ||
953 | else | ||
954 | dquot->dq_dqb.dqb_curspace = 0; | ||
955 | if (dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit) | ||
956 | dquot->dq_dqb.dqb_btime = (time_t) 0; | ||
957 | clear_bit(DQ_BLKS_B, &dquot->dq_flags); | ||
958 | } | ||
959 | |||
960 | static int warning_issued(struct dquot *dquot, const int warntype) | ||
961 | { | ||
962 | int flag = (warntype == QUOTA_NL_BHARDWARN || | ||
963 | warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B : | ||
964 | ((warntype == QUOTA_NL_IHARDWARN || | ||
965 | warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0); | ||
966 | |||
967 | if (!flag) | ||
968 | return 0; | ||
969 | return test_and_set_bit(flag, &dquot->dq_flags); | ||
970 | } | ||
971 | |||
972 | #ifdef CONFIG_PRINT_QUOTA_WARNING | ||
973 | static int flag_print_warnings = 1; | ||
974 | |||
975 | static inline int need_print_warning(struct dquot *dquot) | ||
976 | { | ||
977 | if (!flag_print_warnings) | ||
978 | return 0; | ||
979 | |||
980 | switch (dquot->dq_type) { | ||
981 | case USRQUOTA: | ||
982 | return current_fsuid() == dquot->dq_id; | ||
983 | case GRPQUOTA: | ||
984 | return in_group_p(dquot->dq_id); | ||
985 | } | ||
986 | return 0; | ||
987 | } | ||
988 | |||
989 | /* Print warning to user which exceeded quota */ | ||
990 | static void print_warning(struct dquot *dquot, const int warntype) | ||
991 | { | ||
992 | char *msg = NULL; | ||
993 | struct tty_struct *tty; | ||
994 | |||
995 | if (warntype == QUOTA_NL_IHARDBELOW || | ||
996 | warntype == QUOTA_NL_ISOFTBELOW || | ||
997 | warntype == QUOTA_NL_BHARDBELOW || | ||
998 | warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(dquot)) | ||
999 | return; | ||
1000 | |||
1001 | tty = get_current_tty(); | ||
1002 | if (!tty) | ||
1003 | return; | ||
1004 | tty_write_message(tty, dquot->dq_sb->s_id); | ||
1005 | if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN) | ||
1006 | tty_write_message(tty, ": warning, "); | ||
1007 | else | ||
1008 | tty_write_message(tty, ": write failed, "); | ||
1009 | tty_write_message(tty, quotatypes[dquot->dq_type]); | ||
1010 | switch (warntype) { | ||
1011 | case QUOTA_NL_IHARDWARN: | ||
1012 | msg = " file limit reached.\r\n"; | ||
1013 | break; | ||
1014 | case QUOTA_NL_ISOFTLONGWARN: | ||
1015 | msg = " file quota exceeded too long.\r\n"; | ||
1016 | break; | ||
1017 | case QUOTA_NL_ISOFTWARN: | ||
1018 | msg = " file quota exceeded.\r\n"; | ||
1019 | break; | ||
1020 | case QUOTA_NL_BHARDWARN: | ||
1021 | msg = " block limit reached.\r\n"; | ||
1022 | break; | ||
1023 | case QUOTA_NL_BSOFTLONGWARN: | ||
1024 | msg = " block quota exceeded too long.\r\n"; | ||
1025 | break; | ||
1026 | case QUOTA_NL_BSOFTWARN: | ||
1027 | msg = " block quota exceeded.\r\n"; | ||
1028 | break; | ||
1029 | } | ||
1030 | tty_write_message(tty, msg); | ||
1031 | tty_kref_put(tty); | ||
1032 | } | ||
1033 | #endif | ||
1034 | |||
1035 | #ifdef CONFIG_QUOTA_NETLINK_INTERFACE | ||
1036 | |||
1037 | /* Netlink family structure for quota */ | ||
1038 | static struct genl_family quota_genl_family = { | ||
1039 | .id = GENL_ID_GENERATE, | ||
1040 | .hdrsize = 0, | ||
1041 | .name = "VFS_DQUOT", | ||
1042 | .version = 1, | ||
1043 | .maxattr = QUOTA_NL_A_MAX, | ||
1044 | }; | ||
1045 | |||
1046 | /* Send warning to userspace about user which exceeded quota */ | ||
1047 | static void send_warning(const struct dquot *dquot, const char warntype) | ||
1048 | { | ||
1049 | static atomic_t seq; | ||
1050 | struct sk_buff *skb; | ||
1051 | void *msg_head; | ||
1052 | int ret; | ||
1053 | int msg_size = 4 * nla_total_size(sizeof(u32)) + | ||
1054 | 2 * nla_total_size(sizeof(u64)); | ||
1055 | |||
1056 | /* We have to allocate using GFP_NOFS as we are called from a | ||
1057 | * filesystem performing write and thus further recursion into | ||
1058 | * the fs to free some data could cause deadlocks. */ | ||
1059 | skb = genlmsg_new(msg_size, GFP_NOFS); | ||
1060 | if (!skb) { | ||
1061 | printk(KERN_ERR | ||
1062 | "VFS: Not enough memory to send quota warning.\n"); | ||
1063 | return; | ||
1064 | } | ||
1065 | msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq), | ||
1066 | "a_genl_family, 0, QUOTA_NL_C_WARNING); | ||
1067 | if (!msg_head) { | ||
1068 | printk(KERN_ERR | ||
1069 | "VFS: Cannot store netlink header in quota warning.\n"); | ||
1070 | goto err_out; | ||
1071 | } | ||
1072 | ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, dquot->dq_type); | ||
1073 | if (ret) | ||
1074 | goto attr_err_out; | ||
1075 | ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, dquot->dq_id); | ||
1076 | if (ret) | ||
1077 | goto attr_err_out; | ||
1078 | ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype); | ||
1079 | if (ret) | ||
1080 | goto attr_err_out; | ||
1081 | ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, | ||
1082 | MAJOR(dquot->dq_sb->s_dev)); | ||
1083 | if (ret) | ||
1084 | goto attr_err_out; | ||
1085 | ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, | ||
1086 | MINOR(dquot->dq_sb->s_dev)); | ||
1087 | if (ret) | ||
1088 | goto attr_err_out; | ||
1089 | ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid()); | ||
1090 | if (ret) | ||
1091 | goto attr_err_out; | ||
1092 | genlmsg_end(skb, msg_head); | ||
1093 | |||
1094 | ret = genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS); | ||
1095 | if (ret < 0 && ret != -ESRCH) | ||
1096 | printk(KERN_ERR | ||
1097 | "VFS: Failed to send notification message: %d\n", ret); | ||
1098 | return; | ||
1099 | attr_err_out: | ||
1100 | printk(KERN_ERR "VFS: Not enough space to compose quota message!\n"); | ||
1101 | err_out: | ||
1102 | kfree_skb(skb); | ||
1103 | } | ||
1104 | #endif | ||
1105 | /* | ||
1106 | * Write warnings to the console and send warning messages over netlink. | ||
1107 | * | ||
1108 | * Note that this function can sleep. | ||
1109 | */ | ||
1110 | static inline void flush_warnings(struct dquot * const *dquots, char *warntype) | ||
1111 | { | ||
1112 | int i; | ||
1113 | |||
1114 | for (i = 0; i < MAXQUOTAS; i++) | ||
1115 | if (dquots[i] != NODQUOT && warntype[i] != QUOTA_NL_NOWARN && | ||
1116 | !warning_issued(dquots[i], warntype[i])) { | ||
1117 | #ifdef CONFIG_PRINT_QUOTA_WARNING | ||
1118 | print_warning(dquots[i], warntype[i]); | ||
1119 | #endif | ||
1120 | #ifdef CONFIG_QUOTA_NETLINK_INTERFACE | ||
1121 | send_warning(dquots[i], warntype[i]); | ||
1122 | #endif | ||
1123 | } | ||
1124 | } | ||
1125 | |||
1126 | static inline char ignore_hardlimit(struct dquot *dquot) | ||
1127 | { | ||
1128 | struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type]; | ||
1129 | |||
1130 | return capable(CAP_SYS_RESOURCE) && | ||
1131 | (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || !(info->dqi_flags & V1_DQF_RSQUASH)); | ||
1132 | } | ||
1133 | |||
1134 | /* needs dq_data_lock */ | ||
1135 | static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype) | ||
1136 | { | ||
1137 | *warntype = QUOTA_NL_NOWARN; | ||
1138 | if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || | ||
1139 | test_bit(DQ_FAKE_B, &dquot->dq_flags)) | ||
1140 | return QUOTA_OK; | ||
1141 | |||
1142 | if (dquot->dq_dqb.dqb_ihardlimit && | ||
1143 | (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_ihardlimit && | ||
1144 | !ignore_hardlimit(dquot)) { | ||
1145 | *warntype = QUOTA_NL_IHARDWARN; | ||
1146 | return NO_QUOTA; | ||
1147 | } | ||
1148 | |||
1149 | if (dquot->dq_dqb.dqb_isoftlimit && | ||
1150 | (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit && | ||
1151 | dquot->dq_dqb.dqb_itime && get_seconds() >= dquot->dq_dqb.dqb_itime && | ||
1152 | !ignore_hardlimit(dquot)) { | ||
1153 | *warntype = QUOTA_NL_ISOFTLONGWARN; | ||
1154 | return NO_QUOTA; | ||
1155 | } | ||
1156 | |||
1157 | if (dquot->dq_dqb.dqb_isoftlimit && | ||
1158 | (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit && | ||
1159 | dquot->dq_dqb.dqb_itime == 0) { | ||
1160 | *warntype = QUOTA_NL_ISOFTWARN; | ||
1161 | dquot->dq_dqb.dqb_itime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace; | ||
1162 | } | ||
1163 | |||
1164 | return QUOTA_OK; | ||
1165 | } | ||
1166 | |||
1167 | /* needs dq_data_lock */ | ||
1168 | static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype) | ||
1169 | { | ||
1170 | qsize_t tspace; | ||
1171 | |||
1172 | *warntype = QUOTA_NL_NOWARN; | ||
1173 | if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || | ||
1174 | test_bit(DQ_FAKE_B, &dquot->dq_flags)) | ||
1175 | return QUOTA_OK; | ||
1176 | |||
1177 | tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace | ||
1178 | + space; | ||
1179 | |||
1180 | if (dquot->dq_dqb.dqb_bhardlimit && | ||
1181 | tspace > dquot->dq_dqb.dqb_bhardlimit && | ||
1182 | !ignore_hardlimit(dquot)) { | ||
1183 | if (!prealloc) | ||
1184 | *warntype = QUOTA_NL_BHARDWARN; | ||
1185 | return NO_QUOTA; | ||
1186 | } | ||
1187 | |||
1188 | if (dquot->dq_dqb.dqb_bsoftlimit && | ||
1189 | tspace > dquot->dq_dqb.dqb_bsoftlimit && | ||
1190 | dquot->dq_dqb.dqb_btime && get_seconds() >= dquot->dq_dqb.dqb_btime && | ||
1191 | !ignore_hardlimit(dquot)) { | ||
1192 | if (!prealloc) | ||
1193 | *warntype = QUOTA_NL_BSOFTLONGWARN; | ||
1194 | return NO_QUOTA; | ||
1195 | } | ||
1196 | |||
1197 | if (dquot->dq_dqb.dqb_bsoftlimit && | ||
1198 | tspace > dquot->dq_dqb.dqb_bsoftlimit && | ||
1199 | dquot->dq_dqb.dqb_btime == 0) { | ||
1200 | if (!prealloc) { | ||
1201 | *warntype = QUOTA_NL_BSOFTWARN; | ||
1202 | dquot->dq_dqb.dqb_btime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_bgrace; | ||
1203 | } | ||
1204 | else | ||
1205 | /* | ||
1206 | * We don't allow preallocation to exceed softlimit so exceeding will | ||
1207 | * be always printed | ||
1208 | */ | ||
1209 | return NO_QUOTA; | ||
1210 | } | ||
1211 | |||
1212 | return QUOTA_OK; | ||
1213 | } | ||
1214 | |||
1215 | static int info_idq_free(struct dquot *dquot, qsize_t inodes) | ||
1216 | { | ||
1217 | if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || | ||
1218 | dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit || | ||
1219 | !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type)) | ||
1220 | return QUOTA_NL_NOWARN; | ||
1221 | |||
1222 | if (dquot->dq_dqb.dqb_curinodes - inodes <= dquot->dq_dqb.dqb_isoftlimit) | ||
1223 | return QUOTA_NL_ISOFTBELOW; | ||
1224 | if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit && | ||
1225 | dquot->dq_dqb.dqb_curinodes - inodes < dquot->dq_dqb.dqb_ihardlimit) | ||
1226 | return QUOTA_NL_IHARDBELOW; | ||
1227 | return QUOTA_NL_NOWARN; | ||
1228 | } | ||
1229 | |||
1230 | static int info_bdq_free(struct dquot *dquot, qsize_t space) | ||
1231 | { | ||
1232 | if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || | ||
1233 | dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit) | ||
1234 | return QUOTA_NL_NOWARN; | ||
1235 | |||
1236 | if (dquot->dq_dqb.dqb_curspace - space <= dquot->dq_dqb.dqb_bsoftlimit) | ||
1237 | return QUOTA_NL_BSOFTBELOW; | ||
1238 | if (dquot->dq_dqb.dqb_curspace >= dquot->dq_dqb.dqb_bhardlimit && | ||
1239 | dquot->dq_dqb.dqb_curspace - space < dquot->dq_dqb.dqb_bhardlimit) | ||
1240 | return QUOTA_NL_BHARDBELOW; | ||
1241 | return QUOTA_NL_NOWARN; | ||
1242 | } | ||
1243 | /* | ||
1244 | * Initialize quota pointers in inode | ||
1245 | * We do things in a bit complicated way but by that we avoid calling | ||
1246 | * dqget() and thus filesystem callbacks under dqptr_sem. | ||
1247 | */ | ||
1248 | int dquot_initialize(struct inode *inode, int type) | ||
1249 | { | ||
1250 | unsigned int id = 0; | ||
1251 | int cnt, ret = 0; | ||
1252 | struct dquot *got[MAXQUOTAS] = { NODQUOT, NODQUOT }; | ||
1253 | struct super_block *sb = inode->i_sb; | ||
1254 | |||
1255 | /* First test before acquiring mutex - solves deadlocks when we | ||
1256 | * re-enter the quota code and are already holding the mutex */ | ||
1257 | if (IS_NOQUOTA(inode)) | ||
1258 | return 0; | ||
1259 | |||
1260 | /* First get references to structures we might need. */ | ||
1261 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1262 | if (type != -1 && cnt != type) | ||
1263 | continue; | ||
1264 | switch (cnt) { | ||
1265 | case USRQUOTA: | ||
1266 | id = inode->i_uid; | ||
1267 | break; | ||
1268 | case GRPQUOTA: | ||
1269 | id = inode->i_gid; | ||
1270 | break; | ||
1271 | } | ||
1272 | got[cnt] = dqget(sb, id, cnt); | ||
1273 | } | ||
1274 | |||
1275 | down_write(&sb_dqopt(sb)->dqptr_sem); | ||
1276 | /* Having dqptr_sem we know NOQUOTA flags can't be altered... */ | ||
1277 | if (IS_NOQUOTA(inode)) | ||
1278 | goto out_err; | ||
1279 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1280 | if (type != -1 && cnt != type) | ||
1281 | continue; | ||
1282 | /* Avoid races with quotaoff() */ | ||
1283 | if (!sb_has_quota_active(sb, cnt)) | ||
1284 | continue; | ||
1285 | if (inode->i_dquot[cnt] == NODQUOT) { | ||
1286 | inode->i_dquot[cnt] = got[cnt]; | ||
1287 | got[cnt] = NODQUOT; | ||
1288 | } | ||
1289 | } | ||
1290 | out_err: | ||
1291 | up_write(&sb_dqopt(sb)->dqptr_sem); | ||
1292 | /* Drop unused references */ | ||
1293 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1294 | dqput(got[cnt]); | ||
1295 | return ret; | ||
1296 | } | ||
1297 | EXPORT_SYMBOL(dquot_initialize); | ||
1298 | |||
1299 | /* | ||
1300 | * Release all quotas referenced by inode | ||
1301 | */ | ||
1302 | int dquot_drop(struct inode *inode) | ||
1303 | { | ||
1304 | int cnt; | ||
1305 | struct dquot *put[MAXQUOTAS]; | ||
1306 | |||
1307 | down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1308 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1309 | put[cnt] = inode->i_dquot[cnt]; | ||
1310 | inode->i_dquot[cnt] = NODQUOT; | ||
1311 | } | ||
1312 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1313 | |||
1314 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1315 | dqput(put[cnt]); | ||
1316 | return 0; | ||
1317 | } | ||
1318 | EXPORT_SYMBOL(dquot_drop); | ||
1319 | |||
1320 | /* Wrapper to remove references to quota structures from inode */ | ||
1321 | void vfs_dq_drop(struct inode *inode) | ||
1322 | { | ||
1323 | /* Here we can get arbitrary inode from clear_inode() so we have | ||
1324 | * to be careful. OTOH we don't need locking as quota operations | ||
1325 | * are allowed to change only at mount time */ | ||
1326 | if (!IS_NOQUOTA(inode) && inode->i_sb && inode->i_sb->dq_op | ||
1327 | && inode->i_sb->dq_op->drop) { | ||
1328 | int cnt; | ||
1329 | /* Test before calling to rule out calls from proc and such | ||
1330 | * where we are not allowed to block. Note that this is | ||
1331 | * actually reliable test even without the lock - the caller | ||
1332 | * must assure that nobody can come after the DQUOT_DROP and | ||
1333 | * add quota pointers back anyway */ | ||
1334 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1335 | if (inode->i_dquot[cnt] != NODQUOT) | ||
1336 | break; | ||
1337 | if (cnt < MAXQUOTAS) | ||
1338 | inode->i_sb->dq_op->drop(inode); | ||
1339 | } | ||
1340 | } | ||
1341 | EXPORT_SYMBOL(vfs_dq_drop); | ||
1342 | |||
1343 | /* | ||
1344 | * Following four functions update i_blocks+i_bytes fields and | ||
1345 | * quota information (together with appropriate checks) | ||
1346 | * NOTE: We absolutely rely on the fact that caller dirties | ||
1347 | * the inode (usually macros in quotaops.h care about this) and | ||
1348 | * holds a handle for the current transaction so that dquot write and | ||
1349 | * inode write go into the same transaction. | ||
1350 | */ | ||
1351 | |||
1352 | /* | ||
1353 | * This operation can block, but only after everything is updated | ||
1354 | */ | ||
1355 | int __dquot_alloc_space(struct inode *inode, qsize_t number, | ||
1356 | int warn, int reserve) | ||
1357 | { | ||
1358 | int cnt, ret = QUOTA_OK; | ||
1359 | char warntype[MAXQUOTAS]; | ||
1360 | |||
1361 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1362 | warntype[cnt] = QUOTA_NL_NOWARN; | ||
1363 | |||
1364 | spin_lock(&dq_data_lock); | ||
1365 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1366 | if (inode->i_dquot[cnt] == NODQUOT) | ||
1367 | continue; | ||
1368 | if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) | ||
1369 | == NO_QUOTA) { | ||
1370 | ret = NO_QUOTA; | ||
1371 | goto out_unlock; | ||
1372 | } | ||
1373 | } | ||
1374 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1375 | if (inode->i_dquot[cnt] == NODQUOT) | ||
1376 | continue; | ||
1377 | if (reserve) | ||
1378 | dquot_resv_space(inode->i_dquot[cnt], number); | ||
1379 | else | ||
1380 | dquot_incr_space(inode->i_dquot[cnt], number); | ||
1381 | } | ||
1382 | if (!reserve) | ||
1383 | inode_add_bytes(inode, number); | ||
1384 | out_unlock: | ||
1385 | spin_unlock(&dq_data_lock); | ||
1386 | flush_warnings(inode->i_dquot, warntype); | ||
1387 | return ret; | ||
1388 | } | ||
1389 | |||
1390 | int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) | ||
1391 | { | ||
1392 | int cnt, ret = QUOTA_OK; | ||
1393 | |||
1394 | /* | ||
1395 | * First test before acquiring mutex - solves deadlocks when we | ||
1396 | * re-enter the quota code and are already holding the mutex | ||
1397 | */ | ||
1398 | if (IS_NOQUOTA(inode)) { | ||
1399 | inode_add_bytes(inode, number); | ||
1400 | goto out; | ||
1401 | } | ||
1402 | |||
1403 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1404 | if (IS_NOQUOTA(inode)) { | ||
1405 | inode_add_bytes(inode, number); | ||
1406 | goto out_unlock; | ||
1407 | } | ||
1408 | |||
1409 | ret = __dquot_alloc_space(inode, number, warn, 0); | ||
1410 | if (ret == NO_QUOTA) | ||
1411 | goto out_unlock; | ||
1412 | |||
1413 | /* Dirtify all the dquots - this can block when journalling */ | ||
1414 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1415 | if (inode->i_dquot[cnt]) | ||
1416 | mark_dquot_dirty(inode->i_dquot[cnt]); | ||
1417 | out_unlock: | ||
1418 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1419 | out: | ||
1420 | return ret; | ||
1421 | } | ||
1422 | EXPORT_SYMBOL(dquot_alloc_space); | ||
1423 | |||
1424 | int dquot_reserve_space(struct inode *inode, qsize_t number, int warn) | ||
1425 | { | ||
1426 | int ret = QUOTA_OK; | ||
1427 | |||
1428 | if (IS_NOQUOTA(inode)) | ||
1429 | goto out; | ||
1430 | |||
1431 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1432 | if (IS_NOQUOTA(inode)) | ||
1433 | goto out_unlock; | ||
1434 | |||
1435 | ret = __dquot_alloc_space(inode, number, warn, 1); | ||
1436 | out_unlock: | ||
1437 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1438 | out: | ||
1439 | return ret; | ||
1440 | } | ||
1441 | EXPORT_SYMBOL(dquot_reserve_space); | ||
1442 | |||
1443 | /* | ||
1444 | * This operation can block, but only after everything is updated | ||
1445 | */ | ||
1446 | int dquot_alloc_inode(const struct inode *inode, qsize_t number) | ||
1447 | { | ||
1448 | int cnt, ret = NO_QUOTA; | ||
1449 | char warntype[MAXQUOTAS]; | ||
1450 | |||
1451 | /* First test before acquiring mutex - solves deadlocks when we | ||
1452 | * re-enter the quota code and are already holding the mutex */ | ||
1453 | if (IS_NOQUOTA(inode)) | ||
1454 | return QUOTA_OK; | ||
1455 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1456 | warntype[cnt] = QUOTA_NL_NOWARN; | ||
1457 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1458 | if (IS_NOQUOTA(inode)) { | ||
1459 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1460 | return QUOTA_OK; | ||
1461 | } | ||
1462 | spin_lock(&dq_data_lock); | ||
1463 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1464 | if (inode->i_dquot[cnt] == NODQUOT) | ||
1465 | continue; | ||
1466 | if (check_idq(inode->i_dquot[cnt], number, warntype+cnt) == NO_QUOTA) | ||
1467 | goto warn_put_all; | ||
1468 | } | ||
1469 | |||
1470 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1471 | if (inode->i_dquot[cnt] == NODQUOT) | ||
1472 | continue; | ||
1473 | dquot_incr_inodes(inode->i_dquot[cnt], number); | ||
1474 | } | ||
1475 | ret = QUOTA_OK; | ||
1476 | warn_put_all: | ||
1477 | spin_unlock(&dq_data_lock); | ||
1478 | if (ret == QUOTA_OK) | ||
1479 | /* Dirtify all the dquots - this can block when journalling */ | ||
1480 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1481 | if (inode->i_dquot[cnt]) | ||
1482 | mark_dquot_dirty(inode->i_dquot[cnt]); | ||
1483 | flush_warnings(inode->i_dquot, warntype); | ||
1484 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1485 | return ret; | ||
1486 | } | ||
1487 | EXPORT_SYMBOL(dquot_alloc_inode); | ||
1488 | |||
1489 | int dquot_claim_space(struct inode *inode, qsize_t number) | ||
1490 | { | ||
1491 | int cnt; | ||
1492 | int ret = QUOTA_OK; | ||
1493 | |||
1494 | if (IS_NOQUOTA(inode)) { | ||
1495 | inode_add_bytes(inode, number); | ||
1496 | goto out; | ||
1497 | } | ||
1498 | |||
1499 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1500 | if (IS_NOQUOTA(inode)) { | ||
1501 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1502 | inode_add_bytes(inode, number); | ||
1503 | goto out; | ||
1504 | } | ||
1505 | |||
1506 | spin_lock(&dq_data_lock); | ||
1507 | /* Claim reserved quotas to allocated quotas */ | ||
1508 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1509 | if (inode->i_dquot[cnt] != NODQUOT) | ||
1510 | dquot_claim_reserved_space(inode->i_dquot[cnt], | ||
1511 | number); | ||
1512 | } | ||
1513 | /* Update inode bytes */ | ||
1514 | inode_add_bytes(inode, number); | ||
1515 | spin_unlock(&dq_data_lock); | ||
1516 | /* Dirtify all the dquots - this can block when journalling */ | ||
1517 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1518 | if (inode->i_dquot[cnt]) | ||
1519 | mark_dquot_dirty(inode->i_dquot[cnt]); | ||
1520 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1521 | out: | ||
1522 | return ret; | ||
1523 | } | ||
1524 | EXPORT_SYMBOL(dquot_claim_space); | ||
1525 | |||
1526 | /* | ||
1527 | * Release reserved quota space | ||
1528 | */ | ||
1529 | void dquot_release_reserved_space(struct inode *inode, qsize_t number) | ||
1530 | { | ||
1531 | int cnt; | ||
1532 | |||
1533 | if (IS_NOQUOTA(inode)) | ||
1534 | goto out; | ||
1535 | |||
1536 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1537 | if (IS_NOQUOTA(inode)) | ||
1538 | goto out_unlock; | ||
1539 | |||
1540 | spin_lock(&dq_data_lock); | ||
1541 | /* Release reserved dquots */ | ||
1542 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1543 | if (inode->i_dquot[cnt] != NODQUOT) | ||
1544 | dquot_free_reserved_space(inode->i_dquot[cnt], number); | ||
1545 | } | ||
1546 | spin_unlock(&dq_data_lock); | ||
1547 | |||
1548 | out_unlock: | ||
1549 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1550 | out: | ||
1551 | return; | ||
1552 | } | ||
1553 | EXPORT_SYMBOL(dquot_release_reserved_space); | ||
1554 | |||
1555 | /* | ||
1556 | * This operation can block, but only after everything is updated | ||
1557 | */ | ||
1558 | int dquot_free_space(struct inode *inode, qsize_t number) | ||
1559 | { | ||
1560 | unsigned int cnt; | ||
1561 | char warntype[MAXQUOTAS]; | ||
1562 | |||
1563 | /* First test before acquiring mutex - solves deadlocks when we | ||
1564 | * re-enter the quota code and are already holding the mutex */ | ||
1565 | if (IS_NOQUOTA(inode)) { | ||
1566 | out_sub: | ||
1567 | inode_sub_bytes(inode, number); | ||
1568 | return QUOTA_OK; | ||
1569 | } | ||
1570 | |||
1571 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1572 | /* Now recheck reliably when holding dqptr_sem */ | ||
1573 | if (IS_NOQUOTA(inode)) { | ||
1574 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1575 | goto out_sub; | ||
1576 | } | ||
1577 | spin_lock(&dq_data_lock); | ||
1578 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1579 | if (inode->i_dquot[cnt] == NODQUOT) | ||
1580 | continue; | ||
1581 | warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number); | ||
1582 | dquot_decr_space(inode->i_dquot[cnt], number); | ||
1583 | } | ||
1584 | inode_sub_bytes(inode, number); | ||
1585 | spin_unlock(&dq_data_lock); | ||
1586 | /* Dirtify all the dquots - this can block when journalling */ | ||
1587 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1588 | if (inode->i_dquot[cnt]) | ||
1589 | mark_dquot_dirty(inode->i_dquot[cnt]); | ||
1590 | flush_warnings(inode->i_dquot, warntype); | ||
1591 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1592 | return QUOTA_OK; | ||
1593 | } | ||
1594 | EXPORT_SYMBOL(dquot_free_space); | ||
1595 | |||
1596 | /* | ||
1597 | * This operation can block, but only after everything is updated | ||
1598 | */ | ||
1599 | int dquot_free_inode(const struct inode *inode, qsize_t number) | ||
1600 | { | ||
1601 | unsigned int cnt; | ||
1602 | char warntype[MAXQUOTAS]; | ||
1603 | |||
1604 | /* First test before acquiring mutex - solves deadlocks when we | ||
1605 | * re-enter the quota code and are already holding the mutex */ | ||
1606 | if (IS_NOQUOTA(inode)) | ||
1607 | return QUOTA_OK; | ||
1608 | |||
1609 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1610 | /* Now recheck reliably when holding dqptr_sem */ | ||
1611 | if (IS_NOQUOTA(inode)) { | ||
1612 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1613 | return QUOTA_OK; | ||
1614 | } | ||
1615 | spin_lock(&dq_data_lock); | ||
1616 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1617 | if (inode->i_dquot[cnt] == NODQUOT) | ||
1618 | continue; | ||
1619 | warntype[cnt] = info_idq_free(inode->i_dquot[cnt], number); | ||
1620 | dquot_decr_inodes(inode->i_dquot[cnt], number); | ||
1621 | } | ||
1622 | spin_unlock(&dq_data_lock); | ||
1623 | /* Dirtify all the dquots - this can block when journalling */ | ||
1624 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1625 | if (inode->i_dquot[cnt]) | ||
1626 | mark_dquot_dirty(inode->i_dquot[cnt]); | ||
1627 | flush_warnings(inode->i_dquot, warntype); | ||
1628 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1629 | return QUOTA_OK; | ||
1630 | } | ||
1631 | EXPORT_SYMBOL(dquot_free_inode); | ||
1632 | |||
1633 | /* | ||
1634 | * call back function, get reserved quota space from underlying fs | ||
1635 | */ | ||
1636 | qsize_t dquot_get_reserved_space(struct inode *inode) | ||
1637 | { | ||
1638 | qsize_t reserved_space = 0; | ||
1639 | |||
1640 | if (sb_any_quota_active(inode->i_sb) && | ||
1641 | inode->i_sb->dq_op->get_reserved_space) | ||
1642 | reserved_space = inode->i_sb->dq_op->get_reserved_space(inode); | ||
1643 | return reserved_space; | ||
1644 | } | ||
1645 | |||
1646 | /* | ||
1647 | * Transfer the number of inode and blocks from one diskquota to an other. | ||
1648 | * | ||
1649 | * This operation can block, but only after everything is updated | ||
1650 | * A transaction must be started when entering this function. | ||
1651 | */ | ||
1652 | int dquot_transfer(struct inode *inode, struct iattr *iattr) | ||
1653 | { | ||
1654 | qsize_t space, cur_space; | ||
1655 | qsize_t rsv_space = 0; | ||
1656 | struct dquot *transfer_from[MAXQUOTAS]; | ||
1657 | struct dquot *transfer_to[MAXQUOTAS]; | ||
1658 | int cnt, ret = QUOTA_OK; | ||
1659 | int chuid = iattr->ia_valid & ATTR_UID && inode->i_uid != iattr->ia_uid, | ||
1660 | chgid = iattr->ia_valid & ATTR_GID && inode->i_gid != iattr->ia_gid; | ||
1661 | char warntype_to[MAXQUOTAS]; | ||
1662 | char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS]; | ||
1663 | |||
1664 | /* First test before acquiring mutex - solves deadlocks when we | ||
1665 | * re-enter the quota code and are already holding the mutex */ | ||
1666 | if (IS_NOQUOTA(inode)) | ||
1667 | return QUOTA_OK; | ||
1668 | /* Initialize the arrays */ | ||
1669 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1670 | transfer_from[cnt] = NODQUOT; | ||
1671 | transfer_to[cnt] = NODQUOT; | ||
1672 | warntype_to[cnt] = QUOTA_NL_NOWARN; | ||
1673 | switch (cnt) { | ||
1674 | case USRQUOTA: | ||
1675 | if (!chuid) | ||
1676 | continue; | ||
1677 | transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_uid, cnt); | ||
1678 | break; | ||
1679 | case GRPQUOTA: | ||
1680 | if (!chgid) | ||
1681 | continue; | ||
1682 | transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_gid, cnt); | ||
1683 | break; | ||
1684 | } | ||
1685 | } | ||
1686 | |||
1687 | down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1688 | /* Now recheck reliably when holding dqptr_sem */ | ||
1689 | if (IS_NOQUOTA(inode)) { /* File without quota accounting? */ | ||
1690 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1691 | goto put_all; | ||
1692 | } | ||
1693 | spin_lock(&dq_data_lock); | ||
1694 | cur_space = inode_get_bytes(inode); | ||
1695 | rsv_space = dquot_get_reserved_space(inode); | ||
1696 | space = cur_space + rsv_space; | ||
1697 | /* Build the transfer_from list and check the limits */ | ||
1698 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1699 | if (transfer_to[cnt] == NODQUOT) | ||
1700 | continue; | ||
1701 | transfer_from[cnt] = inode->i_dquot[cnt]; | ||
1702 | if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) == | ||
1703 | NO_QUOTA || check_bdq(transfer_to[cnt], space, 0, | ||
1704 | warntype_to + cnt) == NO_QUOTA) | ||
1705 | goto over_quota; | ||
1706 | } | ||
1707 | |||
1708 | /* | ||
1709 | * Finally perform the needed transfer from transfer_from to transfer_to | ||
1710 | */ | ||
1711 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1712 | /* | ||
1713 | * Skip changes for same uid or gid or for turned off quota-type. | ||
1714 | */ | ||
1715 | if (transfer_to[cnt] == NODQUOT) | ||
1716 | continue; | ||
1717 | |||
1718 | /* Due to IO error we might not have transfer_from[] structure */ | ||
1719 | if (transfer_from[cnt]) { | ||
1720 | warntype_from_inodes[cnt] = | ||
1721 | info_idq_free(transfer_from[cnt], 1); | ||
1722 | warntype_from_space[cnt] = | ||
1723 | info_bdq_free(transfer_from[cnt], space); | ||
1724 | dquot_decr_inodes(transfer_from[cnt], 1); | ||
1725 | dquot_decr_space(transfer_from[cnt], cur_space); | ||
1726 | dquot_free_reserved_space(transfer_from[cnt], | ||
1727 | rsv_space); | ||
1728 | } | ||
1729 | |||
1730 | dquot_incr_inodes(transfer_to[cnt], 1); | ||
1731 | dquot_incr_space(transfer_to[cnt], cur_space); | ||
1732 | dquot_resv_space(transfer_to[cnt], rsv_space); | ||
1733 | |||
1734 | inode->i_dquot[cnt] = transfer_to[cnt]; | ||
1735 | } | ||
1736 | spin_unlock(&dq_data_lock); | ||
1737 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1738 | |||
1739 | /* Dirtify all the dquots - this can block when journalling */ | ||
1740 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1741 | if (transfer_from[cnt]) | ||
1742 | mark_dquot_dirty(transfer_from[cnt]); | ||
1743 | if (transfer_to[cnt]) { | ||
1744 | mark_dquot_dirty(transfer_to[cnt]); | ||
1745 | /* The reference we got is transferred to the inode */ | ||
1746 | transfer_to[cnt] = NODQUOT; | ||
1747 | } | ||
1748 | } | ||
1749 | warn_put_all: | ||
1750 | flush_warnings(transfer_to, warntype_to); | ||
1751 | flush_warnings(transfer_from, warntype_from_inodes); | ||
1752 | flush_warnings(transfer_from, warntype_from_space); | ||
1753 | put_all: | ||
1754 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1755 | dqput(transfer_from[cnt]); | ||
1756 | dqput(transfer_to[cnt]); | ||
1757 | } | ||
1758 | return ret; | ||
1759 | over_quota: | ||
1760 | spin_unlock(&dq_data_lock); | ||
1761 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1762 | /* Clear dquot pointers we don't want to dqput() */ | ||
1763 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1764 | transfer_from[cnt] = NODQUOT; | ||
1765 | ret = NO_QUOTA; | ||
1766 | goto warn_put_all; | ||
1767 | } | ||
1768 | EXPORT_SYMBOL(dquot_transfer); | ||
1769 | |||
1770 | /* Wrapper for transferring ownership of an inode */ | ||
1771 | int vfs_dq_transfer(struct inode *inode, struct iattr *iattr) | ||
1772 | { | ||
1773 | if (sb_any_quota_active(inode->i_sb) && !IS_NOQUOTA(inode)) { | ||
1774 | vfs_dq_init(inode); | ||
1775 | if (inode->i_sb->dq_op->transfer(inode, iattr) == NO_QUOTA) | ||
1776 | return 1; | ||
1777 | } | ||
1778 | return 0; | ||
1779 | } | ||
1780 | EXPORT_SYMBOL(vfs_dq_transfer); | ||
1781 | |||
1782 | /* | ||
1783 | * Write info of quota file to disk | ||
1784 | */ | ||
1785 | int dquot_commit_info(struct super_block *sb, int type) | ||
1786 | { | ||
1787 | int ret; | ||
1788 | struct quota_info *dqopt = sb_dqopt(sb); | ||
1789 | |||
1790 | mutex_lock(&dqopt->dqio_mutex); | ||
1791 | ret = dqopt->ops[type]->write_file_info(sb, type); | ||
1792 | mutex_unlock(&dqopt->dqio_mutex); | ||
1793 | return ret; | ||
1794 | } | ||
1795 | EXPORT_SYMBOL(dquot_commit_info); | ||
1796 | |||
1797 | /* | ||
1798 | * Definitions of diskquota operations. | ||
1799 | */ | ||
1800 | struct dquot_operations dquot_operations = { | ||
1801 | .initialize = dquot_initialize, | ||
1802 | .drop = dquot_drop, | ||
1803 | .alloc_space = dquot_alloc_space, | ||
1804 | .alloc_inode = dquot_alloc_inode, | ||
1805 | .free_space = dquot_free_space, | ||
1806 | .free_inode = dquot_free_inode, | ||
1807 | .transfer = dquot_transfer, | ||
1808 | .write_dquot = dquot_commit, | ||
1809 | .acquire_dquot = dquot_acquire, | ||
1810 | .release_dquot = dquot_release, | ||
1811 | .mark_dirty = dquot_mark_dquot_dirty, | ||
1812 | .write_info = dquot_commit_info, | ||
1813 | .alloc_dquot = dquot_alloc, | ||
1814 | .destroy_dquot = dquot_destroy, | ||
1815 | }; | ||
1816 | |||
1817 | /* | ||
1818 | * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount) | ||
1819 | */ | ||
1820 | int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags) | ||
1821 | { | ||
1822 | int cnt, ret = 0; | ||
1823 | struct quota_info *dqopt = sb_dqopt(sb); | ||
1824 | struct inode *toputinode[MAXQUOTAS]; | ||
1825 | |||
1826 | /* Cannot turn off usage accounting without turning off limits, or | ||
1827 | * suspend quotas and simultaneously turn quotas off. */ | ||
1828 | if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED)) | ||
1829 | || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED | | ||
1830 | DQUOT_USAGE_ENABLED))) | ||
1831 | return -EINVAL; | ||
1832 | |||
1833 | /* We need to serialize quota_off() for device */ | ||
1834 | mutex_lock(&dqopt->dqonoff_mutex); | ||
1835 | |||
1836 | /* | ||
1837 | * Skip everything if there's nothing to do. We have to do this because | ||
1838 | * sometimes we are called when fill_super() failed and calling | ||
1839 | * sync_fs() in such cases does no good. | ||
1840 | */ | ||
1841 | if (!sb_any_quota_loaded(sb)) { | ||
1842 | mutex_unlock(&dqopt->dqonoff_mutex); | ||
1843 | return 0; | ||
1844 | } | ||
1845 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1846 | toputinode[cnt] = NULL; | ||
1847 | if (type != -1 && cnt != type) | ||
1848 | continue; | ||
1849 | if (!sb_has_quota_loaded(sb, cnt)) | ||
1850 | continue; | ||
1851 | |||
1852 | if (flags & DQUOT_SUSPENDED) { | ||
1853 | spin_lock(&dq_state_lock); | ||
1854 | dqopt->flags |= | ||
1855 | dquot_state_flag(DQUOT_SUSPENDED, cnt); | ||
1856 | spin_unlock(&dq_state_lock); | ||
1857 | } else { | ||
1858 | spin_lock(&dq_state_lock); | ||
1859 | dqopt->flags &= ~dquot_state_flag(flags, cnt); | ||
1860 | /* Turning off suspended quotas? */ | ||
1861 | if (!sb_has_quota_loaded(sb, cnt) && | ||
1862 | sb_has_quota_suspended(sb, cnt)) { | ||
1863 | dqopt->flags &= ~dquot_state_flag( | ||
1864 | DQUOT_SUSPENDED, cnt); | ||
1865 | spin_unlock(&dq_state_lock); | ||
1866 | iput(dqopt->files[cnt]); | ||
1867 | dqopt->files[cnt] = NULL; | ||
1868 | continue; | ||
1869 | } | ||
1870 | spin_unlock(&dq_state_lock); | ||
1871 | } | ||
1872 | |||
1873 | /* We still have to keep quota loaded? */ | ||
1874 | if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED)) | ||
1875 | continue; | ||
1876 | |||
1877 | /* Note: these are blocking operations */ | ||
1878 | drop_dquot_ref(sb, cnt); | ||
1879 | invalidate_dquots(sb, cnt); | ||
1880 | /* | ||
1881 | * Now all dquots should be invalidated, all writes done so we should be only | ||
1882 | * users of the info. No locks needed. | ||
1883 | */ | ||
1884 | if (info_dirty(&dqopt->info[cnt])) | ||
1885 | sb->dq_op->write_info(sb, cnt); | ||
1886 | if (dqopt->ops[cnt]->free_file_info) | ||
1887 | dqopt->ops[cnt]->free_file_info(sb, cnt); | ||
1888 | put_quota_format(dqopt->info[cnt].dqi_format); | ||
1889 | |||
1890 | toputinode[cnt] = dqopt->files[cnt]; | ||
1891 | if (!sb_has_quota_loaded(sb, cnt)) | ||
1892 | dqopt->files[cnt] = NULL; | ||
1893 | dqopt->info[cnt].dqi_flags = 0; | ||
1894 | dqopt->info[cnt].dqi_igrace = 0; | ||
1895 | dqopt->info[cnt].dqi_bgrace = 0; | ||
1896 | dqopt->ops[cnt] = NULL; | ||
1897 | } | ||
1898 | mutex_unlock(&dqopt->dqonoff_mutex); | ||
1899 | |||
1900 | /* Skip syncing and setting flags if quota files are hidden */ | ||
1901 | if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) | ||
1902 | goto put_inodes; | ||
1903 | |||
1904 | /* Sync the superblock so that buffers with quota data are written to | ||
1905 | * disk (and so userspace sees correct data afterwards). */ | ||
1906 | if (sb->s_op->sync_fs) | ||
1907 | sb->s_op->sync_fs(sb, 1); | ||
1908 | sync_blockdev(sb->s_bdev); | ||
1909 | /* Now the quota files are just ordinary files and we can set the | ||
1910 | * inode flags back. Moreover we discard the pagecache so that | ||
1911 | * userspace sees the writes we did bypassing the pagecache. We | ||
1912 | * must also discard the blockdev buffers so that we see the | ||
1913 | * changes done by userspace on the next quotaon() */ | ||
1914 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1915 | if (toputinode[cnt]) { | ||
1916 | mutex_lock(&dqopt->dqonoff_mutex); | ||
1917 | /* If quota was reenabled in the meantime, we have | ||
1918 | * nothing to do */ | ||
1919 | if (!sb_has_quota_loaded(sb, cnt)) { | ||
1920 | mutex_lock_nested(&toputinode[cnt]->i_mutex, I_MUTEX_QUOTA); | ||
1921 | toputinode[cnt]->i_flags &= ~(S_IMMUTABLE | | ||
1922 | S_NOATIME | S_NOQUOTA); | ||
1923 | truncate_inode_pages(&toputinode[cnt]->i_data, 0); | ||
1924 | mutex_unlock(&toputinode[cnt]->i_mutex); | ||
1925 | mark_inode_dirty(toputinode[cnt]); | ||
1926 | } | ||
1927 | mutex_unlock(&dqopt->dqonoff_mutex); | ||
1928 | } | ||
1929 | if (sb->s_bdev) | ||
1930 | invalidate_bdev(sb->s_bdev); | ||
1931 | put_inodes: | ||
1932 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1933 | if (toputinode[cnt]) { | ||
1934 | /* On remount RO, we keep the inode pointer so that we | ||
1935 | * can reenable quota on the subsequent remount RW. We | ||
1936 | * have to check 'flags' variable and not use sb_has_ | ||
1937 | * function because another quotaon / quotaoff could | ||
1938 | * change global state before we got here. We refuse | ||
1939 | * to suspend quotas when there is pending delete on | ||
1940 | * the quota file... */ | ||
1941 | if (!(flags & DQUOT_SUSPENDED)) | ||
1942 | iput(toputinode[cnt]); | ||
1943 | else if (!toputinode[cnt]->i_nlink) | ||
1944 | ret = -EBUSY; | ||
1945 | } | ||
1946 | return ret; | ||
1947 | } | ||
1948 | EXPORT_SYMBOL(vfs_quota_disable); | ||
1949 | |||
1950 | int vfs_quota_off(struct super_block *sb, int type, int remount) | ||
1951 | { | ||
1952 | return vfs_quota_disable(sb, type, remount ? DQUOT_SUSPENDED : | ||
1953 | (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED)); | ||
1954 | } | ||
1955 | EXPORT_SYMBOL(vfs_quota_off); | ||
1956 | /* | ||
1957 | * Turn quotas on on a device | ||
1958 | */ | ||
1959 | |||
1960 | /* | ||
1961 | * Helper function to turn quotas on when we already have the inode of | ||
1962 | * quota file and no quota information is loaded. | ||
1963 | */ | ||
1964 | static int vfs_load_quota_inode(struct inode *inode, int type, int format_id, | ||
1965 | unsigned int flags) | ||
1966 | { | ||
1967 | struct quota_format_type *fmt = find_quota_format(format_id); | ||
1968 | struct super_block *sb = inode->i_sb; | ||
1969 | struct quota_info *dqopt = sb_dqopt(sb); | ||
1970 | int error; | ||
1971 | int oldflags = -1; | ||
1972 | |||
1973 | if (!fmt) | ||
1974 | return -ESRCH; | ||
1975 | if (!S_ISREG(inode->i_mode)) { | ||
1976 | error = -EACCES; | ||
1977 | goto out_fmt; | ||
1978 | } | ||
1979 | if (IS_RDONLY(inode)) { | ||
1980 | error = -EROFS; | ||
1981 | goto out_fmt; | ||
1982 | } | ||
1983 | if (!sb->s_op->quota_write || !sb->s_op->quota_read) { | ||
1984 | error = -EINVAL; | ||
1985 | goto out_fmt; | ||
1986 | } | ||
1987 | /* Usage always has to be set... */ | ||
1988 | if (!(flags & DQUOT_USAGE_ENABLED)) { | ||
1989 | error = -EINVAL; | ||
1990 | goto out_fmt; | ||
1991 | } | ||
1992 | |||
1993 | if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { | ||
1994 | /* As we bypass the pagecache we must now flush the inode so | ||
1995 | * that we see all the changes from userspace... */ | ||
1996 | write_inode_now(inode, 1); | ||
1997 | /* And now flush the block cache so that kernel sees the | ||
1998 | * changes */ | ||
1999 | invalidate_bdev(sb->s_bdev); | ||
2000 | } | ||
2001 | mutex_lock(&inode->i_mutex); | ||
2002 | mutex_lock(&dqopt->dqonoff_mutex); | ||
2003 | if (sb_has_quota_loaded(sb, type)) { | ||
2004 | error = -EBUSY; | ||
2005 | goto out_lock; | ||
2006 | } | ||
2007 | |||
2008 | if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { | ||
2009 | /* We don't want quota and atime on quota files (deadlocks | ||
2010 | * possible) Also nobody should write to the file - we use | ||
2011 | * special IO operations which ignore the immutable bit. */ | ||
2012 | down_write(&dqopt->dqptr_sem); | ||
2013 | oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | S_NOQUOTA); | ||
2014 | inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE; | ||
2015 | up_write(&dqopt->dqptr_sem); | ||
2016 | sb->dq_op->drop(inode); | ||
2017 | } | ||
2018 | |||
2019 | error = -EIO; | ||
2020 | dqopt->files[type] = igrab(inode); | ||
2021 | if (!dqopt->files[type]) | ||
2022 | goto out_lock; | ||
2023 | error = -EINVAL; | ||
2024 | if (!fmt->qf_ops->check_quota_file(sb, type)) | ||
2025 | goto out_file_init; | ||
2026 | |||
2027 | dqopt->ops[type] = fmt->qf_ops; | ||
2028 | dqopt->info[type].dqi_format = fmt; | ||
2029 | dqopt->info[type].dqi_fmt_id = format_id; | ||
2030 | INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list); | ||
2031 | mutex_lock(&dqopt->dqio_mutex); | ||
2032 | if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) { | ||
2033 | mutex_unlock(&dqopt->dqio_mutex); | ||
2034 | goto out_file_init; | ||
2035 | } | ||
2036 | mutex_unlock(&dqopt->dqio_mutex); | ||
2037 | mutex_unlock(&inode->i_mutex); | ||
2038 | spin_lock(&dq_state_lock); | ||
2039 | dqopt->flags |= dquot_state_flag(flags, type); | ||
2040 | spin_unlock(&dq_state_lock); | ||
2041 | |||
2042 | add_dquot_ref(sb, type); | ||
2043 | mutex_unlock(&dqopt->dqonoff_mutex); | ||
2044 | |||
2045 | return 0; | ||
2046 | |||
2047 | out_file_init: | ||
2048 | dqopt->files[type] = NULL; | ||
2049 | iput(inode); | ||
2050 | out_lock: | ||
2051 | mutex_unlock(&dqopt->dqonoff_mutex); | ||
2052 | if (oldflags != -1) { | ||
2053 | down_write(&dqopt->dqptr_sem); | ||
2054 | /* Set the flags back (in the case of accidental quotaon() | ||
2055 | * on a wrong file we don't want to mess up the flags) */ | ||
2056 | inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE); | ||
2057 | inode->i_flags |= oldflags; | ||
2058 | up_write(&dqopt->dqptr_sem); | ||
2059 | } | ||
2060 | mutex_unlock(&inode->i_mutex); | ||
2061 | out_fmt: | ||
2062 | put_quota_format(fmt); | ||
2063 | |||
2064 | return error; | ||
2065 | } | ||
2066 | |||
2067 | /* Reenable quotas on remount RW */ | ||
2068 | static int vfs_quota_on_remount(struct super_block *sb, int type) | ||
2069 | { | ||
2070 | struct quota_info *dqopt = sb_dqopt(sb); | ||
2071 | struct inode *inode; | ||
2072 | int ret; | ||
2073 | unsigned int flags; | ||
2074 | |||
2075 | mutex_lock(&dqopt->dqonoff_mutex); | ||
2076 | if (!sb_has_quota_suspended(sb, type)) { | ||
2077 | mutex_unlock(&dqopt->dqonoff_mutex); | ||
2078 | return 0; | ||
2079 | } | ||
2080 | inode = dqopt->files[type]; | ||
2081 | dqopt->files[type] = NULL; | ||
2082 | spin_lock(&dq_state_lock); | ||
2083 | flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED | | ||
2084 | DQUOT_LIMITS_ENABLED, type); | ||
2085 | dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, type); | ||
2086 | spin_unlock(&dq_state_lock); | ||
2087 | mutex_unlock(&dqopt->dqonoff_mutex); | ||
2088 | |||
2089 | flags = dquot_generic_flag(flags, type); | ||
2090 | ret = vfs_load_quota_inode(inode, type, dqopt->info[type].dqi_fmt_id, | ||
2091 | flags); | ||
2092 | iput(inode); | ||
2093 | |||
2094 | return ret; | ||
2095 | } | ||
2096 | |||
2097 | int vfs_quota_on_path(struct super_block *sb, int type, int format_id, | ||
2098 | struct path *path) | ||
2099 | { | ||
2100 | int error = security_quota_on(path->dentry); | ||
2101 | if (error) | ||
2102 | return error; | ||
2103 | /* Quota file not on the same filesystem? */ | ||
2104 | if (path->mnt->mnt_sb != sb) | ||
2105 | error = -EXDEV; | ||
2106 | else | ||
2107 | error = vfs_load_quota_inode(path->dentry->d_inode, type, | ||
2108 | format_id, DQUOT_USAGE_ENABLED | | ||
2109 | DQUOT_LIMITS_ENABLED); | ||
2110 | return error; | ||
2111 | } | ||
2112 | EXPORT_SYMBOL(vfs_quota_on_path); | ||
2113 | |||
2114 | int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name, | ||
2115 | int remount) | ||
2116 | { | ||
2117 | struct path path; | ||
2118 | int error; | ||
2119 | |||
2120 | if (remount) | ||
2121 | return vfs_quota_on_remount(sb, type); | ||
2122 | |||
2123 | error = kern_path(name, LOOKUP_FOLLOW, &path); | ||
2124 | if (!error) { | ||
2125 | error = vfs_quota_on_path(sb, type, format_id, &path); | ||
2126 | path_put(&path); | ||
2127 | } | ||
2128 | return error; | ||
2129 | } | ||
2130 | EXPORT_SYMBOL(vfs_quota_on); | ||
2131 | |||
2132 | /* | ||
2133 | * More powerful function for turning on quotas allowing setting | ||
2134 | * of individual quota flags | ||
2135 | */ | ||
2136 | int vfs_quota_enable(struct inode *inode, int type, int format_id, | ||
2137 | unsigned int flags) | ||
2138 | { | ||
2139 | int ret = 0; | ||
2140 | struct super_block *sb = inode->i_sb; | ||
2141 | struct quota_info *dqopt = sb_dqopt(sb); | ||
2142 | |||
2143 | /* Just unsuspend quotas? */ | ||
2144 | if (flags & DQUOT_SUSPENDED) | ||
2145 | return vfs_quota_on_remount(sb, type); | ||
2146 | if (!flags) | ||
2147 | return 0; | ||
2148 | /* Just updating flags needed? */ | ||
2149 | if (sb_has_quota_loaded(sb, type)) { | ||
2150 | mutex_lock(&dqopt->dqonoff_mutex); | ||
2151 | /* Now do a reliable test... */ | ||
2152 | if (!sb_has_quota_loaded(sb, type)) { | ||
2153 | mutex_unlock(&dqopt->dqonoff_mutex); | ||
2154 | goto load_quota; | ||
2155 | } | ||
2156 | if (flags & DQUOT_USAGE_ENABLED && | ||
2157 | sb_has_quota_usage_enabled(sb, type)) { | ||
2158 | ret = -EBUSY; | ||
2159 | goto out_lock; | ||
2160 | } | ||
2161 | if (flags & DQUOT_LIMITS_ENABLED && | ||
2162 | sb_has_quota_limits_enabled(sb, type)) { | ||
2163 | ret = -EBUSY; | ||
2164 | goto out_lock; | ||
2165 | } | ||
2166 | spin_lock(&dq_state_lock); | ||
2167 | sb_dqopt(sb)->flags |= dquot_state_flag(flags, type); | ||
2168 | spin_unlock(&dq_state_lock); | ||
2169 | out_lock: | ||
2170 | mutex_unlock(&dqopt->dqonoff_mutex); | ||
2171 | return ret; | ||
2172 | } | ||
2173 | |||
2174 | load_quota: | ||
2175 | return vfs_load_quota_inode(inode, type, format_id, flags); | ||
2176 | } | ||
2177 | EXPORT_SYMBOL(vfs_quota_enable); | ||
2178 | |||
2179 | /* | ||
2180 | * This function is used when filesystem needs to initialize quotas | ||
2181 | * during mount time. | ||
2182 | */ | ||
2183 | int vfs_quota_on_mount(struct super_block *sb, char *qf_name, | ||
2184 | int format_id, int type) | ||
2185 | { | ||
2186 | struct dentry *dentry; | ||
2187 | int error; | ||
2188 | |||
2189 | dentry = lookup_one_len(qf_name, sb->s_root, strlen(qf_name)); | ||
2190 | if (IS_ERR(dentry)) | ||
2191 | return PTR_ERR(dentry); | ||
2192 | |||
2193 | if (!dentry->d_inode) { | ||
2194 | error = -ENOENT; | ||
2195 | goto out; | ||
2196 | } | ||
2197 | |||
2198 | error = security_quota_on(dentry); | ||
2199 | if (!error) | ||
2200 | error = vfs_load_quota_inode(dentry->d_inode, type, format_id, | ||
2201 | DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); | ||
2202 | |||
2203 | out: | ||
2204 | dput(dentry); | ||
2205 | return error; | ||
2206 | } | ||
2207 | EXPORT_SYMBOL(vfs_quota_on_mount); | ||
2208 | |||
2209 | /* Wrapper to turn on quotas when remounting rw */ | ||
2210 | int vfs_dq_quota_on_remount(struct super_block *sb) | ||
2211 | { | ||
2212 | int cnt; | ||
2213 | int ret = 0, err; | ||
2214 | |||
2215 | if (!sb->s_qcop || !sb->s_qcop->quota_on) | ||
2216 | return -ENOSYS; | ||
2217 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
2218 | err = sb->s_qcop->quota_on(sb, cnt, 0, NULL, 1); | ||
2219 | if (err < 0 && !ret) | ||
2220 | ret = err; | ||
2221 | } | ||
2222 | return ret; | ||
2223 | } | ||
2224 | EXPORT_SYMBOL(vfs_dq_quota_on_remount); | ||
2225 | |||
2226 | static inline qsize_t qbtos(qsize_t blocks) | ||
2227 | { | ||
2228 | return blocks << QIF_DQBLKSIZE_BITS; | ||
2229 | } | ||
2230 | |||
2231 | static inline qsize_t stoqb(qsize_t space) | ||
2232 | { | ||
2233 | return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS; | ||
2234 | } | ||
2235 | |||
2236 | /* Generic routine for getting common part of quota structure */ | ||
2237 | static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di) | ||
2238 | { | ||
2239 | struct mem_dqblk *dm = &dquot->dq_dqb; | ||
2240 | |||
2241 | spin_lock(&dq_data_lock); | ||
2242 | di->dqb_bhardlimit = stoqb(dm->dqb_bhardlimit); | ||
2243 | di->dqb_bsoftlimit = stoqb(dm->dqb_bsoftlimit); | ||
2244 | di->dqb_curspace = dm->dqb_curspace + dm->dqb_rsvspace; | ||
2245 | di->dqb_ihardlimit = dm->dqb_ihardlimit; | ||
2246 | di->dqb_isoftlimit = dm->dqb_isoftlimit; | ||
2247 | di->dqb_curinodes = dm->dqb_curinodes; | ||
2248 | di->dqb_btime = dm->dqb_btime; | ||
2249 | di->dqb_itime = dm->dqb_itime; | ||
2250 | di->dqb_valid = QIF_ALL; | ||
2251 | spin_unlock(&dq_data_lock); | ||
2252 | } | ||
2253 | |||
2254 | int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di) | ||
2255 | { | ||
2256 | struct dquot *dquot; | ||
2257 | |||
2258 | dquot = dqget(sb, id, type); | ||
2259 | if (dquot == NODQUOT) | ||
2260 | return -ESRCH; | ||
2261 | do_get_dqblk(dquot, di); | ||
2262 | dqput(dquot); | ||
2263 | |||
2264 | return 0; | ||
2265 | } | ||
2266 | EXPORT_SYMBOL(vfs_get_dqblk); | ||
2267 | |||
2268 | /* Generic routine for setting common part of quota structure */ | ||
2269 | static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di) | ||
2270 | { | ||
2271 | struct mem_dqblk *dm = &dquot->dq_dqb; | ||
2272 | int check_blim = 0, check_ilim = 0; | ||
2273 | struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type]; | ||
2274 | |||
2275 | if ((di->dqb_valid & QIF_BLIMITS && | ||
2276 | (di->dqb_bhardlimit > dqi->dqi_maxblimit || | ||
2277 | di->dqb_bsoftlimit > dqi->dqi_maxblimit)) || | ||
2278 | (di->dqb_valid & QIF_ILIMITS && | ||
2279 | (di->dqb_ihardlimit > dqi->dqi_maxilimit || | ||
2280 | di->dqb_isoftlimit > dqi->dqi_maxilimit))) | ||
2281 | return -ERANGE; | ||
2282 | |||
2283 | spin_lock(&dq_data_lock); | ||
2284 | if (di->dqb_valid & QIF_SPACE) { | ||
2285 | dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace; | ||
2286 | check_blim = 1; | ||
2287 | __set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); | ||
2288 | } | ||
2289 | if (di->dqb_valid & QIF_BLIMITS) { | ||
2290 | dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit); | ||
2291 | dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit); | ||
2292 | check_blim = 1; | ||
2293 | __set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); | ||
2294 | } | ||
2295 | if (di->dqb_valid & QIF_INODES) { | ||
2296 | dm->dqb_curinodes = di->dqb_curinodes; | ||
2297 | check_ilim = 1; | ||
2298 | __set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); | ||
2299 | } | ||
2300 | if (di->dqb_valid & QIF_ILIMITS) { | ||
2301 | dm->dqb_isoftlimit = di->dqb_isoftlimit; | ||
2302 | dm->dqb_ihardlimit = di->dqb_ihardlimit; | ||
2303 | check_ilim = 1; | ||
2304 | __set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); | ||
2305 | } | ||
2306 | if (di->dqb_valid & QIF_BTIME) { | ||
2307 | dm->dqb_btime = di->dqb_btime; | ||
2308 | check_blim = 1; | ||
2309 | __set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); | ||
2310 | } | ||
2311 | if (di->dqb_valid & QIF_ITIME) { | ||
2312 | dm->dqb_itime = di->dqb_itime; | ||
2313 | check_ilim = 1; | ||
2314 | __set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); | ||
2315 | } | ||
2316 | |||
2317 | if (check_blim) { | ||
2318 | if (!dm->dqb_bsoftlimit || dm->dqb_curspace < dm->dqb_bsoftlimit) { | ||
2319 | dm->dqb_btime = 0; | ||
2320 | clear_bit(DQ_BLKS_B, &dquot->dq_flags); | ||
2321 | } | ||
2322 | else if (!(di->dqb_valid & QIF_BTIME)) /* Set grace only if user hasn't provided his own... */ | ||
2323 | dm->dqb_btime = get_seconds() + dqi->dqi_bgrace; | ||
2324 | } | ||
2325 | if (check_ilim) { | ||
2326 | if (!dm->dqb_isoftlimit || dm->dqb_curinodes < dm->dqb_isoftlimit) { | ||
2327 | dm->dqb_itime = 0; | ||
2328 | clear_bit(DQ_INODES_B, &dquot->dq_flags); | ||
2329 | } | ||
2330 | else if (!(di->dqb_valid & QIF_ITIME)) /* Set grace only if user hasn't provided his own... */ | ||
2331 | dm->dqb_itime = get_seconds() + dqi->dqi_igrace; | ||
2332 | } | ||
2333 | if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || dm->dqb_isoftlimit) | ||
2334 | clear_bit(DQ_FAKE_B, &dquot->dq_flags); | ||
2335 | else | ||
2336 | set_bit(DQ_FAKE_B, &dquot->dq_flags); | ||
2337 | spin_unlock(&dq_data_lock); | ||
2338 | mark_dquot_dirty(dquot); | ||
2339 | |||
2340 | return 0; | ||
2341 | } | ||
2342 | |||
2343 | int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di) | ||
2344 | { | ||
2345 | struct dquot *dquot; | ||
2346 | int rc; | ||
2347 | |||
2348 | dquot = dqget(sb, id, type); | ||
2349 | if (!dquot) { | ||
2350 | rc = -ESRCH; | ||
2351 | goto out; | ||
2352 | } | ||
2353 | rc = do_set_dqblk(dquot, di); | ||
2354 | dqput(dquot); | ||
2355 | out: | ||
2356 | return rc; | ||
2357 | } | ||
2358 | EXPORT_SYMBOL(vfs_set_dqblk); | ||
2359 | |||
2360 | /* Generic routine for getting common part of quota file information */ | ||
2361 | int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) | ||
2362 | { | ||
2363 | struct mem_dqinfo *mi; | ||
2364 | |||
2365 | mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); | ||
2366 | if (!sb_has_quota_active(sb, type)) { | ||
2367 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); | ||
2368 | return -ESRCH; | ||
2369 | } | ||
2370 | mi = sb_dqopt(sb)->info + type; | ||
2371 | spin_lock(&dq_data_lock); | ||
2372 | ii->dqi_bgrace = mi->dqi_bgrace; | ||
2373 | ii->dqi_igrace = mi->dqi_igrace; | ||
2374 | ii->dqi_flags = mi->dqi_flags & DQF_MASK; | ||
2375 | ii->dqi_valid = IIF_ALL; | ||
2376 | spin_unlock(&dq_data_lock); | ||
2377 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); | ||
2378 | return 0; | ||
2379 | } | ||
2380 | EXPORT_SYMBOL(vfs_get_dqinfo); | ||
2381 | |||
2382 | /* Generic routine for setting common part of quota file information */ | ||
2383 | int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) | ||
2384 | { | ||
2385 | struct mem_dqinfo *mi; | ||
2386 | int err = 0; | ||
2387 | |||
2388 | mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); | ||
2389 | if (!sb_has_quota_active(sb, type)) { | ||
2390 | err = -ESRCH; | ||
2391 | goto out; | ||
2392 | } | ||
2393 | mi = sb_dqopt(sb)->info + type; | ||
2394 | spin_lock(&dq_data_lock); | ||
2395 | if (ii->dqi_valid & IIF_BGRACE) | ||
2396 | mi->dqi_bgrace = ii->dqi_bgrace; | ||
2397 | if (ii->dqi_valid & IIF_IGRACE) | ||
2398 | mi->dqi_igrace = ii->dqi_igrace; | ||
2399 | if (ii->dqi_valid & IIF_FLAGS) | ||
2400 | mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) | (ii->dqi_flags & DQF_MASK); | ||
2401 | spin_unlock(&dq_data_lock); | ||
2402 | mark_info_dirty(sb, type); | ||
2403 | /* Force write to disk */ | ||
2404 | sb->dq_op->write_info(sb, type); | ||
2405 | out: | ||
2406 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); | ||
2407 | return err; | ||
2408 | } | ||
2409 | EXPORT_SYMBOL(vfs_set_dqinfo); | ||
2410 | |||
2411 | struct quotactl_ops vfs_quotactl_ops = { | ||
2412 | .quota_on = vfs_quota_on, | ||
2413 | .quota_off = vfs_quota_off, | ||
2414 | .quota_sync = vfs_quota_sync, | ||
2415 | .get_info = vfs_get_dqinfo, | ||
2416 | .set_info = vfs_set_dqinfo, | ||
2417 | .get_dqblk = vfs_get_dqblk, | ||
2418 | .set_dqblk = vfs_set_dqblk | ||
2419 | }; | ||
2420 | |||
2421 | static ctl_table fs_dqstats_table[] = { | ||
2422 | { | ||
2423 | .ctl_name = FS_DQ_LOOKUPS, | ||
2424 | .procname = "lookups", | ||
2425 | .data = &dqstats.lookups, | ||
2426 | .maxlen = sizeof(int), | ||
2427 | .mode = 0444, | ||
2428 | .proc_handler = &proc_dointvec, | ||
2429 | }, | ||
2430 | { | ||
2431 | .ctl_name = FS_DQ_DROPS, | ||
2432 | .procname = "drops", | ||
2433 | .data = &dqstats.drops, | ||
2434 | .maxlen = sizeof(int), | ||
2435 | .mode = 0444, | ||
2436 | .proc_handler = &proc_dointvec, | ||
2437 | }, | ||
2438 | { | ||
2439 | .ctl_name = FS_DQ_READS, | ||
2440 | .procname = "reads", | ||
2441 | .data = &dqstats.reads, | ||
2442 | .maxlen = sizeof(int), | ||
2443 | .mode = 0444, | ||
2444 | .proc_handler = &proc_dointvec, | ||
2445 | }, | ||
2446 | { | ||
2447 | .ctl_name = FS_DQ_WRITES, | ||
2448 | .procname = "writes", | ||
2449 | .data = &dqstats.writes, | ||
2450 | .maxlen = sizeof(int), | ||
2451 | .mode = 0444, | ||
2452 | .proc_handler = &proc_dointvec, | ||
2453 | }, | ||
2454 | { | ||
2455 | .ctl_name = FS_DQ_CACHE_HITS, | ||
2456 | .procname = "cache_hits", | ||
2457 | .data = &dqstats.cache_hits, | ||
2458 | .maxlen = sizeof(int), | ||
2459 | .mode = 0444, | ||
2460 | .proc_handler = &proc_dointvec, | ||
2461 | }, | ||
2462 | { | ||
2463 | .ctl_name = FS_DQ_ALLOCATED, | ||
2464 | .procname = "allocated_dquots", | ||
2465 | .data = &dqstats.allocated_dquots, | ||
2466 | .maxlen = sizeof(int), | ||
2467 | .mode = 0444, | ||
2468 | .proc_handler = &proc_dointvec, | ||
2469 | }, | ||
2470 | { | ||
2471 | .ctl_name = FS_DQ_FREE, | ||
2472 | .procname = "free_dquots", | ||
2473 | .data = &dqstats.free_dquots, | ||
2474 | .maxlen = sizeof(int), | ||
2475 | .mode = 0444, | ||
2476 | .proc_handler = &proc_dointvec, | ||
2477 | }, | ||
2478 | { | ||
2479 | .ctl_name = FS_DQ_SYNCS, | ||
2480 | .procname = "syncs", | ||
2481 | .data = &dqstats.syncs, | ||
2482 | .maxlen = sizeof(int), | ||
2483 | .mode = 0444, | ||
2484 | .proc_handler = &proc_dointvec, | ||
2485 | }, | ||
2486 | #ifdef CONFIG_PRINT_QUOTA_WARNING | ||
2487 | { | ||
2488 | .ctl_name = FS_DQ_WARNINGS, | ||
2489 | .procname = "warnings", | ||
2490 | .data = &flag_print_warnings, | ||
2491 | .maxlen = sizeof(int), | ||
2492 | .mode = 0644, | ||
2493 | .proc_handler = &proc_dointvec, | ||
2494 | }, | ||
2495 | #endif | ||
2496 | { .ctl_name = 0 }, | ||
2497 | }; | ||
2498 | |||
2499 | static ctl_table fs_table[] = { | ||
2500 | { | ||
2501 | .ctl_name = FS_DQSTATS, | ||
2502 | .procname = "quota", | ||
2503 | .mode = 0555, | ||
2504 | .child = fs_dqstats_table, | ||
2505 | }, | ||
2506 | { .ctl_name = 0 }, | ||
2507 | }; | ||
2508 | |||
2509 | static ctl_table sys_table[] = { | ||
2510 | { | ||
2511 | .ctl_name = CTL_FS, | ||
2512 | .procname = "fs", | ||
2513 | .mode = 0555, | ||
2514 | .child = fs_table, | ||
2515 | }, | ||
2516 | { .ctl_name = 0 }, | ||
2517 | }; | ||
2518 | |||
2519 | static int __init dquot_init(void) | ||
2520 | { | ||
2521 | int i; | ||
2522 | unsigned long nr_hash, order; | ||
2523 | |||
2524 | printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__); | ||
2525 | |||
2526 | register_sysctl_table(sys_table); | ||
2527 | |||
2528 | dquot_cachep = kmem_cache_create("dquot", | ||
2529 | sizeof(struct dquot), sizeof(unsigned long) * 4, | ||
2530 | (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| | ||
2531 | SLAB_MEM_SPREAD|SLAB_PANIC), | ||
2532 | NULL); | ||
2533 | |||
2534 | order = 0; | ||
2535 | dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order); | ||
2536 | if (!dquot_hash) | ||
2537 | panic("Cannot create dquot hash table"); | ||
2538 | |||
2539 | /* Find power-of-two hlist_heads which can fit into allocation */ | ||
2540 | nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head); | ||
2541 | dq_hash_bits = 0; | ||
2542 | do { | ||
2543 | dq_hash_bits++; | ||
2544 | } while (nr_hash >> dq_hash_bits); | ||
2545 | dq_hash_bits--; | ||
2546 | |||
2547 | nr_hash = 1UL << dq_hash_bits; | ||
2548 | dq_hash_mask = nr_hash - 1; | ||
2549 | for (i = 0; i < nr_hash; i++) | ||
2550 | INIT_HLIST_HEAD(dquot_hash + i); | ||
2551 | |||
2552 | printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n", | ||
2553 | nr_hash, order, (PAGE_SIZE << order)); | ||
2554 | |||
2555 | register_shrinker(&dqcache_shrinker); | ||
2556 | |||
2557 | #ifdef CONFIG_QUOTA_NETLINK_INTERFACE | ||
2558 | if (genl_register_family("a_genl_family) != 0) | ||
2559 | printk(KERN_ERR "VFS: Failed to create quota netlink interface.\n"); | ||
2560 | #endif | ||
2561 | |||
2562 | return 0; | ||
2563 | } | ||
2564 | module_init(dquot_init); | ||
diff --git a/fs/quota/quota.c b/fs/quota/quota.c new file mode 100644 index 000000000000..d76ada914f98 --- /dev/null +++ b/fs/quota/quota.c | |||
@@ -0,0 +1,513 @@ | |||
1 | /* | ||
2 | * Quota code necessary even when VFS quota support is not compiled | ||
3 | * into the kernel. The interesting stuff is over in dquot.c, here | ||
4 | * we have symbols for initial quotactl(2) handling, the sysctl(2) | ||
5 | * variables, etc - things needed even when quota support disabled. | ||
6 | */ | ||
7 | |||
8 | #include <linux/fs.h> | ||
9 | #include <linux/namei.h> | ||
10 | #include <linux/slab.h> | ||
11 | #include <asm/current.h> | ||
12 | #include <asm/uaccess.h> | ||
13 | #include <linux/compat.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/security.h> | ||
16 | #include <linux/syscalls.h> | ||
17 | #include <linux/buffer_head.h> | ||
18 | #include <linux/capability.h> | ||
19 | #include <linux/quotaops.h> | ||
20 | #include <linux/types.h> | ||
21 | |||
22 | /* Check validity of generic quotactl commands */ | ||
23 | static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) | ||
24 | { | ||
25 | if (type >= MAXQUOTAS) | ||
26 | return -EINVAL; | ||
27 | if (!sb && cmd != Q_SYNC) | ||
28 | return -ENODEV; | ||
29 | /* Is operation supported? */ | ||
30 | if (sb && !sb->s_qcop) | ||
31 | return -ENOSYS; | ||
32 | |||
33 | switch (cmd) { | ||
34 | case Q_GETFMT: | ||
35 | break; | ||
36 | case Q_QUOTAON: | ||
37 | if (!sb->s_qcop->quota_on) | ||
38 | return -ENOSYS; | ||
39 | break; | ||
40 | case Q_QUOTAOFF: | ||
41 | if (!sb->s_qcop->quota_off) | ||
42 | return -ENOSYS; | ||
43 | break; | ||
44 | case Q_SETINFO: | ||
45 | if (!sb->s_qcop->set_info) | ||
46 | return -ENOSYS; | ||
47 | break; | ||
48 | case Q_GETINFO: | ||
49 | if (!sb->s_qcop->get_info) | ||
50 | return -ENOSYS; | ||
51 | break; | ||
52 | case Q_SETQUOTA: | ||
53 | if (!sb->s_qcop->set_dqblk) | ||
54 | return -ENOSYS; | ||
55 | break; | ||
56 | case Q_GETQUOTA: | ||
57 | if (!sb->s_qcop->get_dqblk) | ||
58 | return -ENOSYS; | ||
59 | break; | ||
60 | case Q_SYNC: | ||
61 | if (sb && !sb->s_qcop->quota_sync) | ||
62 | return -ENOSYS; | ||
63 | break; | ||
64 | default: | ||
65 | return -EINVAL; | ||
66 | } | ||
67 | |||
68 | /* Is quota turned on for commands which need it? */ | ||
69 | switch (cmd) { | ||
70 | case Q_GETFMT: | ||
71 | case Q_GETINFO: | ||
72 | case Q_SETINFO: | ||
73 | case Q_SETQUOTA: | ||
74 | case Q_GETQUOTA: | ||
75 | /* This is just informative test so we are satisfied without a lock */ | ||
76 | if (!sb_has_quota_active(sb, type)) | ||
77 | return -ESRCH; | ||
78 | } | ||
79 | |||
80 | /* Check privileges */ | ||
81 | if (cmd == Q_GETQUOTA) { | ||
82 | if (((type == USRQUOTA && current_euid() != id) || | ||
83 | (type == GRPQUOTA && !in_egroup_p(id))) && | ||
84 | !capable(CAP_SYS_ADMIN)) | ||
85 | return -EPERM; | ||
86 | } | ||
87 | else if (cmd != Q_GETFMT && cmd != Q_SYNC && cmd != Q_GETINFO) | ||
88 | if (!capable(CAP_SYS_ADMIN)) | ||
89 | return -EPERM; | ||
90 | |||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | /* Check validity of XFS Quota Manager commands */ | ||
95 | static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) | ||
96 | { | ||
97 | if (type >= XQM_MAXQUOTAS) | ||
98 | return -EINVAL; | ||
99 | if (!sb) | ||
100 | return -ENODEV; | ||
101 | if (!sb->s_qcop) | ||
102 | return -ENOSYS; | ||
103 | |||
104 | switch (cmd) { | ||
105 | case Q_XQUOTAON: | ||
106 | case Q_XQUOTAOFF: | ||
107 | case Q_XQUOTARM: | ||
108 | if (!sb->s_qcop->set_xstate) | ||
109 | return -ENOSYS; | ||
110 | break; | ||
111 | case Q_XGETQSTAT: | ||
112 | if (!sb->s_qcop->get_xstate) | ||
113 | return -ENOSYS; | ||
114 | break; | ||
115 | case Q_XSETQLIM: | ||
116 | if (!sb->s_qcop->set_xquota) | ||
117 | return -ENOSYS; | ||
118 | break; | ||
119 | case Q_XGETQUOTA: | ||
120 | if (!sb->s_qcop->get_xquota) | ||
121 | return -ENOSYS; | ||
122 | break; | ||
123 | case Q_XQUOTASYNC: | ||
124 | if (!sb->s_qcop->quota_sync) | ||
125 | return -ENOSYS; | ||
126 | break; | ||
127 | default: | ||
128 | return -EINVAL; | ||
129 | } | ||
130 | |||
131 | /* Check privileges */ | ||
132 | if (cmd == Q_XGETQUOTA) { | ||
133 | if (((type == XQM_USRQUOTA && current_euid() != id) || | ||
134 | (type == XQM_GRPQUOTA && !in_egroup_p(id))) && | ||
135 | !capable(CAP_SYS_ADMIN)) | ||
136 | return -EPERM; | ||
137 | } else if (cmd != Q_XGETQSTAT && cmd != Q_XQUOTASYNC) { | ||
138 | if (!capable(CAP_SYS_ADMIN)) | ||
139 | return -EPERM; | ||
140 | } | ||
141 | |||
142 | return 0; | ||
143 | } | ||
144 | |||
145 | static int check_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id) | ||
146 | { | ||
147 | int error; | ||
148 | |||
149 | if (XQM_COMMAND(cmd)) | ||
150 | error = xqm_quotactl_valid(sb, type, cmd, id); | ||
151 | else | ||
152 | error = generic_quotactl_valid(sb, type, cmd, id); | ||
153 | if (!error) | ||
154 | error = security_quotactl(cmd, type, id, sb); | ||
155 | return error; | ||
156 | } | ||
157 | |||
158 | static void quota_sync_sb(struct super_block *sb, int type) | ||
159 | { | ||
160 | int cnt; | ||
161 | |||
162 | sb->s_qcop->quota_sync(sb, type); | ||
163 | |||
164 | if (sb_dqopt(sb)->flags & DQUOT_QUOTA_SYS_FILE) | ||
165 | return; | ||
166 | /* This is not very clever (and fast) but currently I don't know about | ||
167 | * any other simple way of getting quota data to disk and we must get | ||
168 | * them there for userspace to be visible... */ | ||
169 | if (sb->s_op->sync_fs) | ||
170 | sb->s_op->sync_fs(sb, 1); | ||
171 | sync_blockdev(sb->s_bdev); | ||
172 | |||
173 | /* | ||
174 | * Now when everything is written we can discard the pagecache so | ||
175 | * that userspace sees the changes. | ||
176 | */ | ||
177 | mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); | ||
178 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
179 | if (type != -1 && cnt != type) | ||
180 | continue; | ||
181 | if (!sb_has_quota_active(sb, cnt)) | ||
182 | continue; | ||
183 | mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, I_MUTEX_QUOTA); | ||
184 | truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0); | ||
185 | mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex); | ||
186 | } | ||
187 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); | ||
188 | } | ||
189 | |||
190 | void sync_dquots(struct super_block *sb, int type) | ||
191 | { | ||
192 | int cnt; | ||
193 | |||
194 | if (sb) { | ||
195 | if (sb->s_qcop->quota_sync) | ||
196 | quota_sync_sb(sb, type); | ||
197 | return; | ||
198 | } | ||
199 | |||
200 | spin_lock(&sb_lock); | ||
201 | restart: | ||
202 | list_for_each_entry(sb, &super_blocks, s_list) { | ||
203 | /* This test just improves performance so it needn't be reliable... */ | ||
204 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
205 | if (type != -1 && type != cnt) | ||
206 | continue; | ||
207 | if (!sb_has_quota_active(sb, cnt)) | ||
208 | continue; | ||
209 | if (!info_dirty(&sb_dqopt(sb)->info[cnt]) && | ||
210 | list_empty(&sb_dqopt(sb)->info[cnt].dqi_dirty_list)) | ||
211 | continue; | ||
212 | break; | ||
213 | } | ||
214 | if (cnt == MAXQUOTAS) | ||
215 | continue; | ||
216 | sb->s_count++; | ||
217 | spin_unlock(&sb_lock); | ||
218 | down_read(&sb->s_umount); | ||
219 | if (sb->s_root && sb->s_qcop->quota_sync) | ||
220 | quota_sync_sb(sb, type); | ||
221 | up_read(&sb->s_umount); | ||
222 | spin_lock(&sb_lock); | ||
223 | if (__put_super_and_need_restart(sb)) | ||
224 | goto restart; | ||
225 | } | ||
226 | spin_unlock(&sb_lock); | ||
227 | } | ||
228 | |||
229 | /* Copy parameters and call proper function */ | ||
230 | static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void __user *addr) | ||
231 | { | ||
232 | int ret; | ||
233 | |||
234 | switch (cmd) { | ||
235 | case Q_QUOTAON: { | ||
236 | char *pathname; | ||
237 | |||
238 | if (IS_ERR(pathname = getname(addr))) | ||
239 | return PTR_ERR(pathname); | ||
240 | ret = sb->s_qcop->quota_on(sb, type, id, pathname, 0); | ||
241 | putname(pathname); | ||
242 | return ret; | ||
243 | } | ||
244 | case Q_QUOTAOFF: | ||
245 | return sb->s_qcop->quota_off(sb, type, 0); | ||
246 | |||
247 | case Q_GETFMT: { | ||
248 | __u32 fmt; | ||
249 | |||
250 | down_read(&sb_dqopt(sb)->dqptr_sem); | ||
251 | if (!sb_has_quota_active(sb, type)) { | ||
252 | up_read(&sb_dqopt(sb)->dqptr_sem); | ||
253 | return -ESRCH; | ||
254 | } | ||
255 | fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id; | ||
256 | up_read(&sb_dqopt(sb)->dqptr_sem); | ||
257 | if (copy_to_user(addr, &fmt, sizeof(fmt))) | ||
258 | return -EFAULT; | ||
259 | return 0; | ||
260 | } | ||
261 | case Q_GETINFO: { | ||
262 | struct if_dqinfo info; | ||
263 | |||
264 | if ((ret = sb->s_qcop->get_info(sb, type, &info))) | ||
265 | return ret; | ||
266 | if (copy_to_user(addr, &info, sizeof(info))) | ||
267 | return -EFAULT; | ||
268 | return 0; | ||
269 | } | ||
270 | case Q_SETINFO: { | ||
271 | struct if_dqinfo info; | ||
272 | |||
273 | if (copy_from_user(&info, addr, sizeof(info))) | ||
274 | return -EFAULT; | ||
275 | return sb->s_qcop->set_info(sb, type, &info); | ||
276 | } | ||
277 | case Q_GETQUOTA: { | ||
278 | struct if_dqblk idq; | ||
279 | |||
280 | if ((ret = sb->s_qcop->get_dqblk(sb, type, id, &idq))) | ||
281 | return ret; | ||
282 | if (copy_to_user(addr, &idq, sizeof(idq))) | ||
283 | return -EFAULT; | ||
284 | return 0; | ||
285 | } | ||
286 | case Q_SETQUOTA: { | ||
287 | struct if_dqblk idq; | ||
288 | |||
289 | if (copy_from_user(&idq, addr, sizeof(idq))) | ||
290 | return -EFAULT; | ||
291 | return sb->s_qcop->set_dqblk(sb, type, id, &idq); | ||
292 | } | ||
293 | case Q_SYNC: | ||
294 | sync_dquots(sb, type); | ||
295 | return 0; | ||
296 | |||
297 | case Q_XQUOTAON: | ||
298 | case Q_XQUOTAOFF: | ||
299 | case Q_XQUOTARM: { | ||
300 | __u32 flags; | ||
301 | |||
302 | if (copy_from_user(&flags, addr, sizeof(flags))) | ||
303 | return -EFAULT; | ||
304 | return sb->s_qcop->set_xstate(sb, flags, cmd); | ||
305 | } | ||
306 | case Q_XGETQSTAT: { | ||
307 | struct fs_quota_stat fqs; | ||
308 | |||
309 | if ((ret = sb->s_qcop->get_xstate(sb, &fqs))) | ||
310 | return ret; | ||
311 | if (copy_to_user(addr, &fqs, sizeof(fqs))) | ||
312 | return -EFAULT; | ||
313 | return 0; | ||
314 | } | ||
315 | case Q_XSETQLIM: { | ||
316 | struct fs_disk_quota fdq; | ||
317 | |||
318 | if (copy_from_user(&fdq, addr, sizeof(fdq))) | ||
319 | return -EFAULT; | ||
320 | return sb->s_qcop->set_xquota(sb, type, id, &fdq); | ||
321 | } | ||
322 | case Q_XGETQUOTA: { | ||
323 | struct fs_disk_quota fdq; | ||
324 | |||
325 | if ((ret = sb->s_qcop->get_xquota(sb, type, id, &fdq))) | ||
326 | return ret; | ||
327 | if (copy_to_user(addr, &fdq, sizeof(fdq))) | ||
328 | return -EFAULT; | ||
329 | return 0; | ||
330 | } | ||
331 | case Q_XQUOTASYNC: | ||
332 | return sb->s_qcop->quota_sync(sb, type); | ||
333 | /* We never reach here unless validity check is broken */ | ||
334 | default: | ||
335 | BUG(); | ||
336 | } | ||
337 | return 0; | ||
338 | } | ||
339 | |||
340 | /* | ||
341 | * look up a superblock on which quota ops will be performed | ||
342 | * - use the name of a block device to find the superblock thereon | ||
343 | */ | ||
344 | static inline struct super_block *quotactl_block(const char __user *special) | ||
345 | { | ||
346 | #ifdef CONFIG_BLOCK | ||
347 | struct block_device *bdev; | ||
348 | struct super_block *sb; | ||
349 | char *tmp = getname(special); | ||
350 | |||
351 | if (IS_ERR(tmp)) | ||
352 | return ERR_CAST(tmp); | ||
353 | bdev = lookup_bdev(tmp); | ||
354 | putname(tmp); | ||
355 | if (IS_ERR(bdev)) | ||
356 | return ERR_CAST(bdev); | ||
357 | sb = get_super(bdev); | ||
358 | bdput(bdev); | ||
359 | if (!sb) | ||
360 | return ERR_PTR(-ENODEV); | ||
361 | |||
362 | return sb; | ||
363 | #else | ||
364 | return ERR_PTR(-ENODEV); | ||
365 | #endif | ||
366 | } | ||
367 | |||
368 | /* | ||
369 | * This is the system call interface. This communicates with | ||
370 | * the user-level programs. Currently this only supports diskquota | ||
371 | * calls. Maybe we need to add the process quotas etc. in the future, | ||
372 | * but we probably should use rlimits for that. | ||
373 | */ | ||
374 | SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special, | ||
375 | qid_t, id, void __user *, addr) | ||
376 | { | ||
377 | uint cmds, type; | ||
378 | struct super_block *sb = NULL; | ||
379 | int ret; | ||
380 | |||
381 | cmds = cmd >> SUBCMDSHIFT; | ||
382 | type = cmd & SUBCMDMASK; | ||
383 | |||
384 | if (cmds != Q_SYNC || special) { | ||
385 | sb = quotactl_block(special); | ||
386 | if (IS_ERR(sb)) | ||
387 | return PTR_ERR(sb); | ||
388 | } | ||
389 | |||
390 | ret = check_quotactl_valid(sb, type, cmds, id); | ||
391 | if (ret >= 0) | ||
392 | ret = do_quotactl(sb, type, cmds, id, addr); | ||
393 | if (sb) | ||
394 | drop_super(sb); | ||
395 | |||
396 | return ret; | ||
397 | } | ||
398 | |||
399 | #if defined(CONFIG_COMPAT_FOR_U64_ALIGNMENT) | ||
400 | /* | ||
401 | * This code works only for 32 bit quota tools over 64 bit OS (x86_64, ia64) | ||
402 | * and is necessary due to alignment problems. | ||
403 | */ | ||
404 | struct compat_if_dqblk { | ||
405 | compat_u64 dqb_bhardlimit; | ||
406 | compat_u64 dqb_bsoftlimit; | ||
407 | compat_u64 dqb_curspace; | ||
408 | compat_u64 dqb_ihardlimit; | ||
409 | compat_u64 dqb_isoftlimit; | ||
410 | compat_u64 dqb_curinodes; | ||
411 | compat_u64 dqb_btime; | ||
412 | compat_u64 dqb_itime; | ||
413 | compat_uint_t dqb_valid; | ||
414 | }; | ||
415 | |||
416 | /* XFS structures */ | ||
417 | struct compat_fs_qfilestat { | ||
418 | compat_u64 dqb_bhardlimit; | ||
419 | compat_u64 qfs_nblks; | ||
420 | compat_uint_t qfs_nextents; | ||
421 | }; | ||
422 | |||
423 | struct compat_fs_quota_stat { | ||
424 | __s8 qs_version; | ||
425 | __u16 qs_flags; | ||
426 | __s8 qs_pad; | ||
427 | struct compat_fs_qfilestat qs_uquota; | ||
428 | struct compat_fs_qfilestat qs_gquota; | ||
429 | compat_uint_t qs_incoredqs; | ||
430 | compat_int_t qs_btimelimit; | ||
431 | compat_int_t qs_itimelimit; | ||
432 | compat_int_t qs_rtbtimelimit; | ||
433 | __u16 qs_bwarnlimit; | ||
434 | __u16 qs_iwarnlimit; | ||
435 | }; | ||
436 | |||
437 | asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special, | ||
438 | qid_t id, void __user *addr) | ||
439 | { | ||
440 | unsigned int cmds; | ||
441 | struct if_dqblk __user *dqblk; | ||
442 | struct compat_if_dqblk __user *compat_dqblk; | ||
443 | struct fs_quota_stat __user *fsqstat; | ||
444 | struct compat_fs_quota_stat __user *compat_fsqstat; | ||
445 | compat_uint_t data; | ||
446 | u16 xdata; | ||
447 | long ret; | ||
448 | |||
449 | cmds = cmd >> SUBCMDSHIFT; | ||
450 | |||
451 | switch (cmds) { | ||
452 | case Q_GETQUOTA: | ||
453 | dqblk = compat_alloc_user_space(sizeof(struct if_dqblk)); | ||
454 | compat_dqblk = addr; | ||
455 | ret = sys_quotactl(cmd, special, id, dqblk); | ||
456 | if (ret) | ||
457 | break; | ||
458 | if (copy_in_user(compat_dqblk, dqblk, sizeof(*compat_dqblk)) || | ||
459 | get_user(data, &dqblk->dqb_valid) || | ||
460 | put_user(data, &compat_dqblk->dqb_valid)) | ||
461 | ret = -EFAULT; | ||
462 | break; | ||
463 | case Q_SETQUOTA: | ||
464 | dqblk = compat_alloc_user_space(sizeof(struct if_dqblk)); | ||
465 | compat_dqblk = addr; | ||
466 | ret = -EFAULT; | ||
467 | if (copy_in_user(dqblk, compat_dqblk, sizeof(*compat_dqblk)) || | ||
468 | get_user(data, &compat_dqblk->dqb_valid) || | ||
469 | put_user(data, &dqblk->dqb_valid)) | ||
470 | break; | ||
471 | ret = sys_quotactl(cmd, special, id, dqblk); | ||
472 | break; | ||
473 | case Q_XGETQSTAT: | ||
474 | fsqstat = compat_alloc_user_space(sizeof(struct fs_quota_stat)); | ||
475 | compat_fsqstat = addr; | ||
476 | ret = sys_quotactl(cmd, special, id, fsqstat); | ||
477 | if (ret) | ||
478 | break; | ||
479 | ret = -EFAULT; | ||
480 | /* Copying qs_version, qs_flags, qs_pad */ | ||
481 | if (copy_in_user(compat_fsqstat, fsqstat, | ||
482 | offsetof(struct compat_fs_quota_stat, qs_uquota))) | ||
483 | break; | ||
484 | /* Copying qs_uquota */ | ||
485 | if (copy_in_user(&compat_fsqstat->qs_uquota, | ||
486 | &fsqstat->qs_uquota, | ||
487 | sizeof(compat_fsqstat->qs_uquota)) || | ||
488 | get_user(data, &fsqstat->qs_uquota.qfs_nextents) || | ||
489 | put_user(data, &compat_fsqstat->qs_uquota.qfs_nextents)) | ||
490 | break; | ||
491 | /* Copying qs_gquota */ | ||
492 | if (copy_in_user(&compat_fsqstat->qs_gquota, | ||
493 | &fsqstat->qs_gquota, | ||
494 | sizeof(compat_fsqstat->qs_gquota)) || | ||
495 | get_user(data, &fsqstat->qs_gquota.qfs_nextents) || | ||
496 | put_user(data, &compat_fsqstat->qs_gquota.qfs_nextents)) | ||
497 | break; | ||
498 | /* Copying the rest */ | ||
499 | if (copy_in_user(&compat_fsqstat->qs_incoredqs, | ||
500 | &fsqstat->qs_incoredqs, | ||
501 | sizeof(struct compat_fs_quota_stat) - | ||
502 | offsetof(struct compat_fs_quota_stat, qs_incoredqs)) || | ||
503 | get_user(xdata, &fsqstat->qs_iwarnlimit) || | ||
504 | put_user(xdata, &compat_fsqstat->qs_iwarnlimit)) | ||
505 | break; | ||
506 | ret = 0; | ||
507 | break; | ||
508 | default: | ||
509 | ret = sys_quotactl(cmd, special, id, addr); | ||
510 | } | ||
511 | return ret; | ||
512 | } | ||
513 | #endif | ||
diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c new file mode 100644 index 000000000000..953404c95b17 --- /dev/null +++ b/fs/quota/quota_tree.c | |||
@@ -0,0 +1,645 @@ | |||
1 | /* | ||
2 | * vfsv0 quota IO operations on file | ||
3 | */ | ||
4 | |||
5 | #include <linux/errno.h> | ||
6 | #include <linux/fs.h> | ||
7 | #include <linux/mount.h> | ||
8 | #include <linux/dqblk_v2.h> | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/slab.h> | ||
13 | #include <linux/quotaops.h> | ||
14 | |||
15 | #include <asm/byteorder.h> | ||
16 | |||
17 | #include "quota_tree.h" | ||
18 | |||
19 | MODULE_AUTHOR("Jan Kara"); | ||
20 | MODULE_DESCRIPTION("Quota trie support"); | ||
21 | MODULE_LICENSE("GPL"); | ||
22 | |||
23 | #define __QUOTA_QT_PARANOIA | ||
24 | |||
25 | typedef char *dqbuf_t; | ||
26 | |||
27 | static int get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth) | ||
28 | { | ||
29 | unsigned int epb = info->dqi_usable_bs >> 2; | ||
30 | |||
31 | depth = info->dqi_qtree_depth - depth - 1; | ||
32 | while (depth--) | ||
33 | id /= epb; | ||
34 | return id % epb; | ||
35 | } | ||
36 | |||
37 | /* Number of entries in one blocks */ | ||
38 | static inline int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info) | ||
39 | { | ||
40 | return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader)) | ||
41 | / info->dqi_entry_size; | ||
42 | } | ||
43 | |||
44 | static dqbuf_t getdqbuf(size_t size) | ||
45 | { | ||
46 | dqbuf_t buf = kmalloc(size, GFP_NOFS); | ||
47 | if (!buf) | ||
48 | printk(KERN_WARNING "VFS: Not enough memory for quota buffers.\n"); | ||
49 | return buf; | ||
50 | } | ||
51 | |||
52 | static inline void freedqbuf(dqbuf_t buf) | ||
53 | { | ||
54 | kfree(buf); | ||
55 | } | ||
56 | |||
57 | static inline ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf) | ||
58 | { | ||
59 | struct super_block *sb = info->dqi_sb; | ||
60 | |||
61 | memset(buf, 0, info->dqi_usable_bs); | ||
62 | return sb->s_op->quota_read(sb, info->dqi_type, (char *)buf, | ||
63 | info->dqi_usable_bs, blk << info->dqi_blocksize_bits); | ||
64 | } | ||
65 | |||
66 | static inline ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, dqbuf_t buf) | ||
67 | { | ||
68 | struct super_block *sb = info->dqi_sb; | ||
69 | |||
70 | return sb->s_op->quota_write(sb, info->dqi_type, (char *)buf, | ||
71 | info->dqi_usable_bs, blk << info->dqi_blocksize_bits); | ||
72 | } | ||
73 | |||
74 | /* Remove empty block from list and return it */ | ||
75 | static int get_free_dqblk(struct qtree_mem_dqinfo *info) | ||
76 | { | ||
77 | dqbuf_t buf = getdqbuf(info->dqi_usable_bs); | ||
78 | struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; | ||
79 | int ret, blk; | ||
80 | |||
81 | if (!buf) | ||
82 | return -ENOMEM; | ||
83 | if (info->dqi_free_blk) { | ||
84 | blk = info->dqi_free_blk; | ||
85 | ret = read_blk(info, blk, buf); | ||
86 | if (ret < 0) | ||
87 | goto out_buf; | ||
88 | info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free); | ||
89 | } | ||
90 | else { | ||
91 | memset(buf, 0, info->dqi_usable_bs); | ||
92 | /* Assure block allocation... */ | ||
93 | ret = write_blk(info, info->dqi_blocks, buf); | ||
94 | if (ret < 0) | ||
95 | goto out_buf; | ||
96 | blk = info->dqi_blocks++; | ||
97 | } | ||
98 | mark_info_dirty(info->dqi_sb, info->dqi_type); | ||
99 | ret = blk; | ||
100 | out_buf: | ||
101 | freedqbuf(buf); | ||
102 | return ret; | ||
103 | } | ||
104 | |||
105 | /* Insert empty block to the list */ | ||
106 | static int put_free_dqblk(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) | ||
107 | { | ||
108 | struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; | ||
109 | int err; | ||
110 | |||
111 | dh->dqdh_next_free = cpu_to_le32(info->dqi_free_blk); | ||
112 | dh->dqdh_prev_free = cpu_to_le32(0); | ||
113 | dh->dqdh_entries = cpu_to_le16(0); | ||
114 | err = write_blk(info, blk, buf); | ||
115 | if (err < 0) | ||
116 | return err; | ||
117 | info->dqi_free_blk = blk; | ||
118 | mark_info_dirty(info->dqi_sb, info->dqi_type); | ||
119 | return 0; | ||
120 | } | ||
121 | |||
122 | /* Remove given block from the list of blocks with free entries */ | ||
123 | static int remove_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) | ||
124 | { | ||
125 | dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs); | ||
126 | struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; | ||
127 | uint nextblk = le32_to_cpu(dh->dqdh_next_free); | ||
128 | uint prevblk = le32_to_cpu(dh->dqdh_prev_free); | ||
129 | int err; | ||
130 | |||
131 | if (!tmpbuf) | ||
132 | return -ENOMEM; | ||
133 | if (nextblk) { | ||
134 | err = read_blk(info, nextblk, tmpbuf); | ||
135 | if (err < 0) | ||
136 | goto out_buf; | ||
137 | ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = | ||
138 | dh->dqdh_prev_free; | ||
139 | err = write_blk(info, nextblk, tmpbuf); | ||
140 | if (err < 0) | ||
141 | goto out_buf; | ||
142 | } | ||
143 | if (prevblk) { | ||
144 | err = read_blk(info, prevblk, tmpbuf); | ||
145 | if (err < 0) | ||
146 | goto out_buf; | ||
147 | ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_next_free = | ||
148 | dh->dqdh_next_free; | ||
149 | err = write_blk(info, prevblk, tmpbuf); | ||
150 | if (err < 0) | ||
151 | goto out_buf; | ||
152 | } else { | ||
153 | info->dqi_free_entry = nextblk; | ||
154 | mark_info_dirty(info->dqi_sb, info->dqi_type); | ||
155 | } | ||
156 | freedqbuf(tmpbuf); | ||
157 | dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0); | ||
158 | /* No matter whether write succeeds block is out of list */ | ||
159 | if (write_blk(info, blk, buf) < 0) | ||
160 | printk(KERN_ERR "VFS: Can't write block (%u) with free entries.\n", blk); | ||
161 | return 0; | ||
162 | out_buf: | ||
163 | freedqbuf(tmpbuf); | ||
164 | return err; | ||
165 | } | ||
166 | |||
167 | /* Insert given block to the beginning of list with free entries */ | ||
168 | static int insert_free_dqentry(struct qtree_mem_dqinfo *info, dqbuf_t buf, uint blk) | ||
169 | { | ||
170 | dqbuf_t tmpbuf = getdqbuf(info->dqi_usable_bs); | ||
171 | struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; | ||
172 | int err; | ||
173 | |||
174 | if (!tmpbuf) | ||
175 | return -ENOMEM; | ||
176 | dh->dqdh_next_free = cpu_to_le32(info->dqi_free_entry); | ||
177 | dh->dqdh_prev_free = cpu_to_le32(0); | ||
178 | err = write_blk(info, blk, buf); | ||
179 | if (err < 0) | ||
180 | goto out_buf; | ||
181 | if (info->dqi_free_entry) { | ||
182 | err = read_blk(info, info->dqi_free_entry, tmpbuf); | ||
183 | if (err < 0) | ||
184 | goto out_buf; | ||
185 | ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = | ||
186 | cpu_to_le32(blk); | ||
187 | err = write_blk(info, info->dqi_free_entry, tmpbuf); | ||
188 | if (err < 0) | ||
189 | goto out_buf; | ||
190 | } | ||
191 | freedqbuf(tmpbuf); | ||
192 | info->dqi_free_entry = blk; | ||
193 | mark_info_dirty(info->dqi_sb, info->dqi_type); | ||
194 | return 0; | ||
195 | out_buf: | ||
196 | freedqbuf(tmpbuf); | ||
197 | return err; | ||
198 | } | ||
199 | |||
200 | /* Is the entry in the block free? */ | ||
201 | int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk) | ||
202 | { | ||
203 | int i; | ||
204 | |||
205 | for (i = 0; i < info->dqi_entry_size; i++) | ||
206 | if (disk[i]) | ||
207 | return 0; | ||
208 | return 1; | ||
209 | } | ||
210 | EXPORT_SYMBOL(qtree_entry_unused); | ||
211 | |||
212 | /* Find space for dquot */ | ||
213 | static uint find_free_dqentry(struct qtree_mem_dqinfo *info, | ||
214 | struct dquot *dquot, int *err) | ||
215 | { | ||
216 | uint blk, i; | ||
217 | struct qt_disk_dqdbheader *dh; | ||
218 | dqbuf_t buf = getdqbuf(info->dqi_usable_bs); | ||
219 | char *ddquot; | ||
220 | |||
221 | *err = 0; | ||
222 | if (!buf) { | ||
223 | *err = -ENOMEM; | ||
224 | return 0; | ||
225 | } | ||
226 | dh = (struct qt_disk_dqdbheader *)buf; | ||
227 | if (info->dqi_free_entry) { | ||
228 | blk = info->dqi_free_entry; | ||
229 | *err = read_blk(info, blk, buf); | ||
230 | if (*err < 0) | ||
231 | goto out_buf; | ||
232 | } else { | ||
233 | blk = get_free_dqblk(info); | ||
234 | if ((int)blk < 0) { | ||
235 | *err = blk; | ||
236 | freedqbuf(buf); | ||
237 | return 0; | ||
238 | } | ||
239 | memset(buf, 0, info->dqi_usable_bs); | ||
240 | /* This is enough as block is already zeroed and entry list is empty... */ | ||
241 | info->dqi_free_entry = blk; | ||
242 | mark_info_dirty(dquot->dq_sb, dquot->dq_type); | ||
243 | } | ||
244 | /* Block will be full? */ | ||
245 | if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) { | ||
246 | *err = remove_free_dqentry(info, buf, blk); | ||
247 | if (*err < 0) { | ||
248 | printk(KERN_ERR "VFS: find_free_dqentry(): Can't " | ||
249 | "remove block (%u) from entry free list.\n", | ||
250 | blk); | ||
251 | goto out_buf; | ||
252 | } | ||
253 | } | ||
254 | le16_add_cpu(&dh->dqdh_entries, 1); | ||
255 | /* Find free structure in block */ | ||
256 | for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader); | ||
257 | i < qtree_dqstr_in_blk(info) && !qtree_entry_unused(info, ddquot); | ||
258 | i++, ddquot += info->dqi_entry_size); | ||
259 | #ifdef __QUOTA_QT_PARANOIA | ||
260 | if (i == qtree_dqstr_in_blk(info)) { | ||
261 | printk(KERN_ERR "VFS: find_free_dqentry(): Data block full " | ||
262 | "but it shouldn't.\n"); | ||
263 | *err = -EIO; | ||
264 | goto out_buf; | ||
265 | } | ||
266 | #endif | ||
267 | *err = write_blk(info, blk, buf); | ||
268 | if (*err < 0) { | ||
269 | printk(KERN_ERR "VFS: find_free_dqentry(): Can't write quota " | ||
270 | "data block %u.\n", blk); | ||
271 | goto out_buf; | ||
272 | } | ||
273 | dquot->dq_off = (blk << info->dqi_blocksize_bits) + | ||
274 | sizeof(struct qt_disk_dqdbheader) + | ||
275 | i * info->dqi_entry_size; | ||
276 | freedqbuf(buf); | ||
277 | return blk; | ||
278 | out_buf: | ||
279 | freedqbuf(buf); | ||
280 | return 0; | ||
281 | } | ||
282 | |||
283 | /* Insert reference to structure into the trie */ | ||
284 | static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, | ||
285 | uint *treeblk, int depth) | ||
286 | { | ||
287 | dqbuf_t buf = getdqbuf(info->dqi_usable_bs); | ||
288 | int ret = 0, newson = 0, newact = 0; | ||
289 | __le32 *ref; | ||
290 | uint newblk; | ||
291 | |||
292 | if (!buf) | ||
293 | return -ENOMEM; | ||
294 | if (!*treeblk) { | ||
295 | ret = get_free_dqblk(info); | ||
296 | if (ret < 0) | ||
297 | goto out_buf; | ||
298 | *treeblk = ret; | ||
299 | memset(buf, 0, info->dqi_usable_bs); | ||
300 | newact = 1; | ||
301 | } else { | ||
302 | ret = read_blk(info, *treeblk, buf); | ||
303 | if (ret < 0) { | ||
304 | printk(KERN_ERR "VFS: Can't read tree quota block " | ||
305 | "%u.\n", *treeblk); | ||
306 | goto out_buf; | ||
307 | } | ||
308 | } | ||
309 | ref = (__le32 *)buf; | ||
310 | newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); | ||
311 | if (!newblk) | ||
312 | newson = 1; | ||
313 | if (depth == info->dqi_qtree_depth - 1) { | ||
314 | #ifdef __QUOTA_QT_PARANOIA | ||
315 | if (newblk) { | ||
316 | printk(KERN_ERR "VFS: Inserting already present quota " | ||
317 | "entry (block %u).\n", | ||
318 | le32_to_cpu(ref[get_index(info, | ||
319 | dquot->dq_id, depth)])); | ||
320 | ret = -EIO; | ||
321 | goto out_buf; | ||
322 | } | ||
323 | #endif | ||
324 | newblk = find_free_dqentry(info, dquot, &ret); | ||
325 | } else { | ||
326 | ret = do_insert_tree(info, dquot, &newblk, depth+1); | ||
327 | } | ||
328 | if (newson && ret >= 0) { | ||
329 | ref[get_index(info, dquot->dq_id, depth)] = | ||
330 | cpu_to_le32(newblk); | ||
331 | ret = write_blk(info, *treeblk, buf); | ||
332 | } else if (newact && ret < 0) { | ||
333 | put_free_dqblk(info, buf, *treeblk); | ||
334 | } | ||
335 | out_buf: | ||
336 | freedqbuf(buf); | ||
337 | return ret; | ||
338 | } | ||
339 | |||
340 | /* Wrapper for inserting quota structure into tree */ | ||
341 | static inline int dq_insert_tree(struct qtree_mem_dqinfo *info, | ||
342 | struct dquot *dquot) | ||
343 | { | ||
344 | int tmp = QT_TREEOFF; | ||
345 | return do_insert_tree(info, dquot, &tmp, 0); | ||
346 | } | ||
347 | |||
348 | /* | ||
349 | * We don't have to be afraid of deadlocks as we never have quotas on quota files... | ||
350 | */ | ||
351 | int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) | ||
352 | { | ||
353 | int type = dquot->dq_type; | ||
354 | struct super_block *sb = dquot->dq_sb; | ||
355 | ssize_t ret; | ||
356 | dqbuf_t ddquot = getdqbuf(info->dqi_entry_size); | ||
357 | |||
358 | if (!ddquot) | ||
359 | return -ENOMEM; | ||
360 | |||
361 | /* dq_off is guarded by dqio_mutex */ | ||
362 | if (!dquot->dq_off) { | ||
363 | ret = dq_insert_tree(info, dquot); | ||
364 | if (ret < 0) { | ||
365 | printk(KERN_ERR "VFS: Error %zd occurred while " | ||
366 | "creating quota.\n", ret); | ||
367 | freedqbuf(ddquot); | ||
368 | return ret; | ||
369 | } | ||
370 | } | ||
371 | spin_lock(&dq_data_lock); | ||
372 | info->dqi_ops->mem2disk_dqblk(ddquot, dquot); | ||
373 | spin_unlock(&dq_data_lock); | ||
374 | ret = sb->s_op->quota_write(sb, type, (char *)ddquot, | ||
375 | info->dqi_entry_size, dquot->dq_off); | ||
376 | if (ret != info->dqi_entry_size) { | ||
377 | printk(KERN_WARNING "VFS: dquota write failed on dev %s\n", | ||
378 | sb->s_id); | ||
379 | if (ret >= 0) | ||
380 | ret = -ENOSPC; | ||
381 | } else { | ||
382 | ret = 0; | ||
383 | } | ||
384 | dqstats.writes++; | ||
385 | freedqbuf(ddquot); | ||
386 | |||
387 | return ret; | ||
388 | } | ||
389 | EXPORT_SYMBOL(qtree_write_dquot); | ||
390 | |||
391 | /* Free dquot entry in data block */ | ||
392 | static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, | ||
393 | uint blk) | ||
394 | { | ||
395 | struct qt_disk_dqdbheader *dh; | ||
396 | dqbuf_t buf = getdqbuf(info->dqi_usable_bs); | ||
397 | int ret = 0; | ||
398 | |||
399 | if (!buf) | ||
400 | return -ENOMEM; | ||
401 | if (dquot->dq_off >> info->dqi_blocksize_bits != blk) { | ||
402 | printk(KERN_ERR "VFS: Quota structure has offset to other " | ||
403 | "block (%u) than it should (%u).\n", blk, | ||
404 | (uint)(dquot->dq_off >> info->dqi_blocksize_bits)); | ||
405 | goto out_buf; | ||
406 | } | ||
407 | ret = read_blk(info, blk, buf); | ||
408 | if (ret < 0) { | ||
409 | printk(KERN_ERR "VFS: Can't read quota data block %u\n", blk); | ||
410 | goto out_buf; | ||
411 | } | ||
412 | dh = (struct qt_disk_dqdbheader *)buf; | ||
413 | le16_add_cpu(&dh->dqdh_entries, -1); | ||
414 | if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */ | ||
415 | ret = remove_free_dqentry(info, buf, blk); | ||
416 | if (ret >= 0) | ||
417 | ret = put_free_dqblk(info, buf, blk); | ||
418 | if (ret < 0) { | ||
419 | printk(KERN_ERR "VFS: Can't move quota data block (%u) " | ||
420 | "to free list.\n", blk); | ||
421 | goto out_buf; | ||
422 | } | ||
423 | } else { | ||
424 | memset(buf + | ||
425 | (dquot->dq_off & ((1 << info->dqi_blocksize_bits) - 1)), | ||
426 | 0, info->dqi_entry_size); | ||
427 | if (le16_to_cpu(dh->dqdh_entries) == | ||
428 | qtree_dqstr_in_blk(info) - 1) { | ||
429 | /* Insert will write block itself */ | ||
430 | ret = insert_free_dqentry(info, buf, blk); | ||
431 | if (ret < 0) { | ||
432 | printk(KERN_ERR "VFS: Can't insert quota data " | ||
433 | "block (%u) to free entry list.\n", blk); | ||
434 | goto out_buf; | ||
435 | } | ||
436 | } else { | ||
437 | ret = write_blk(info, blk, buf); | ||
438 | if (ret < 0) { | ||
439 | printk(KERN_ERR "VFS: Can't write quota data " | ||
440 | "block %u\n", blk); | ||
441 | goto out_buf; | ||
442 | } | ||
443 | } | ||
444 | } | ||
445 | dquot->dq_off = 0; /* Quota is now unattached */ | ||
446 | out_buf: | ||
447 | freedqbuf(buf); | ||
448 | return ret; | ||
449 | } | ||
450 | |||
451 | /* Remove reference to dquot from tree */ | ||
452 | static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, | ||
453 | uint *blk, int depth) | ||
454 | { | ||
455 | dqbuf_t buf = getdqbuf(info->dqi_usable_bs); | ||
456 | int ret = 0; | ||
457 | uint newblk; | ||
458 | __le32 *ref = (__le32 *)buf; | ||
459 | |||
460 | if (!buf) | ||
461 | return -ENOMEM; | ||
462 | ret = read_blk(info, *blk, buf); | ||
463 | if (ret < 0) { | ||
464 | printk(KERN_ERR "VFS: Can't read quota data block %u\n", *blk); | ||
465 | goto out_buf; | ||
466 | } | ||
467 | newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); | ||
468 | if (depth == info->dqi_qtree_depth - 1) { | ||
469 | ret = free_dqentry(info, dquot, newblk); | ||
470 | newblk = 0; | ||
471 | } else { | ||
472 | ret = remove_tree(info, dquot, &newblk, depth+1); | ||
473 | } | ||
474 | if (ret >= 0 && !newblk) { | ||
475 | int i; | ||
476 | ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0); | ||
477 | /* Block got empty? */ | ||
478 | for (i = 0; | ||
479 | i < (info->dqi_usable_bs >> 2) && !ref[i]; | ||
480 | i++); | ||
481 | /* Don't put the root block into the free block list */ | ||
482 | if (i == (info->dqi_usable_bs >> 2) | ||
483 | && *blk != QT_TREEOFF) { | ||
484 | put_free_dqblk(info, buf, *blk); | ||
485 | *blk = 0; | ||
486 | } else { | ||
487 | ret = write_blk(info, *blk, buf); | ||
488 | if (ret < 0) | ||
489 | printk(KERN_ERR "VFS: Can't write quota tree " | ||
490 | "block %u.\n", *blk); | ||
491 | } | ||
492 | } | ||
493 | out_buf: | ||
494 | freedqbuf(buf); | ||
495 | return ret; | ||
496 | } | ||
497 | |||
498 | /* Delete dquot from tree */ | ||
499 | int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) | ||
500 | { | ||
501 | uint tmp = QT_TREEOFF; | ||
502 | |||
503 | if (!dquot->dq_off) /* Even not allocated? */ | ||
504 | return 0; | ||
505 | return remove_tree(info, dquot, &tmp, 0); | ||
506 | } | ||
507 | EXPORT_SYMBOL(qtree_delete_dquot); | ||
508 | |||
509 | /* Find entry in block */ | ||
510 | static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, | ||
511 | struct dquot *dquot, uint blk) | ||
512 | { | ||
513 | dqbuf_t buf = getdqbuf(info->dqi_usable_bs); | ||
514 | loff_t ret = 0; | ||
515 | int i; | ||
516 | char *ddquot; | ||
517 | |||
518 | if (!buf) | ||
519 | return -ENOMEM; | ||
520 | ret = read_blk(info, blk, buf); | ||
521 | if (ret < 0) { | ||
522 | printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk); | ||
523 | goto out_buf; | ||
524 | } | ||
525 | for (i = 0, ddquot = ((char *)buf) + sizeof(struct qt_disk_dqdbheader); | ||
526 | i < qtree_dqstr_in_blk(info) && !info->dqi_ops->is_id(ddquot, dquot); | ||
527 | i++, ddquot += info->dqi_entry_size); | ||
528 | if (i == qtree_dqstr_in_blk(info)) { | ||
529 | printk(KERN_ERR "VFS: Quota for id %u referenced " | ||
530 | "but not present.\n", dquot->dq_id); | ||
531 | ret = -EIO; | ||
532 | goto out_buf; | ||
533 | } else { | ||
534 | ret = (blk << info->dqi_blocksize_bits) + sizeof(struct | ||
535 | qt_disk_dqdbheader) + i * info->dqi_entry_size; | ||
536 | } | ||
537 | out_buf: | ||
538 | freedqbuf(buf); | ||
539 | return ret; | ||
540 | } | ||
541 | |||
542 | /* Find entry for given id in the tree */ | ||
543 | static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info, | ||
544 | struct dquot *dquot, uint blk, int depth) | ||
545 | { | ||
546 | dqbuf_t buf = getdqbuf(info->dqi_usable_bs); | ||
547 | loff_t ret = 0; | ||
548 | __le32 *ref = (__le32 *)buf; | ||
549 | |||
550 | if (!buf) | ||
551 | return -ENOMEM; | ||
552 | ret = read_blk(info, blk, buf); | ||
553 | if (ret < 0) { | ||
554 | printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk); | ||
555 | goto out_buf; | ||
556 | } | ||
557 | ret = 0; | ||
558 | blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); | ||
559 | if (!blk) /* No reference? */ | ||
560 | goto out_buf; | ||
561 | if (depth < info->dqi_qtree_depth - 1) | ||
562 | ret = find_tree_dqentry(info, dquot, blk, depth+1); | ||
563 | else | ||
564 | ret = find_block_dqentry(info, dquot, blk); | ||
565 | out_buf: | ||
566 | freedqbuf(buf); | ||
567 | return ret; | ||
568 | } | ||
569 | |||
570 | /* Find entry for given id in the tree - wrapper function */ | ||
571 | static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info, | ||
572 | struct dquot *dquot) | ||
573 | { | ||
574 | return find_tree_dqentry(info, dquot, QT_TREEOFF, 0); | ||
575 | } | ||
576 | |||
577 | int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) | ||
578 | { | ||
579 | int type = dquot->dq_type; | ||
580 | struct super_block *sb = dquot->dq_sb; | ||
581 | loff_t offset; | ||
582 | dqbuf_t ddquot; | ||
583 | int ret = 0; | ||
584 | |||
585 | #ifdef __QUOTA_QT_PARANOIA | ||
586 | /* Invalidated quota? */ | ||
587 | if (!sb_dqopt(dquot->dq_sb)->files[type]) { | ||
588 | printk(KERN_ERR "VFS: Quota invalidated while reading!\n"); | ||
589 | return -EIO; | ||
590 | } | ||
591 | #endif | ||
592 | /* Do we know offset of the dquot entry in the quota file? */ | ||
593 | if (!dquot->dq_off) { | ||
594 | offset = find_dqentry(info, dquot); | ||
595 | if (offset <= 0) { /* Entry not present? */ | ||
596 | if (offset < 0) | ||
597 | printk(KERN_ERR "VFS: Can't read quota " | ||
598 | "structure for id %u.\n", dquot->dq_id); | ||
599 | dquot->dq_off = 0; | ||
600 | set_bit(DQ_FAKE_B, &dquot->dq_flags); | ||
601 | memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); | ||
602 | ret = offset; | ||
603 | goto out; | ||
604 | } | ||
605 | dquot->dq_off = offset; | ||
606 | } | ||
607 | ddquot = getdqbuf(info->dqi_entry_size); | ||
608 | if (!ddquot) | ||
609 | return -ENOMEM; | ||
610 | ret = sb->s_op->quota_read(sb, type, (char *)ddquot, | ||
611 | info->dqi_entry_size, dquot->dq_off); | ||
612 | if (ret != info->dqi_entry_size) { | ||
613 | if (ret >= 0) | ||
614 | ret = -EIO; | ||
615 | printk(KERN_ERR "VFS: Error while reading quota " | ||
616 | "structure for id %u.\n", dquot->dq_id); | ||
617 | set_bit(DQ_FAKE_B, &dquot->dq_flags); | ||
618 | memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); | ||
619 | freedqbuf(ddquot); | ||
620 | goto out; | ||
621 | } | ||
622 | spin_lock(&dq_data_lock); | ||
623 | info->dqi_ops->disk2mem_dqblk(dquot, ddquot); | ||
624 | if (!dquot->dq_dqb.dqb_bhardlimit && | ||
625 | !dquot->dq_dqb.dqb_bsoftlimit && | ||
626 | !dquot->dq_dqb.dqb_ihardlimit && | ||
627 | !dquot->dq_dqb.dqb_isoftlimit) | ||
628 | set_bit(DQ_FAKE_B, &dquot->dq_flags); | ||
629 | spin_unlock(&dq_data_lock); | ||
630 | freedqbuf(ddquot); | ||
631 | out: | ||
632 | dqstats.reads++; | ||
633 | return ret; | ||
634 | } | ||
635 | EXPORT_SYMBOL(qtree_read_dquot); | ||
636 | |||
637 | /* Check whether dquot should not be deleted. We know we are | ||
638 | * the only one operating on dquot (thanks to dq_lock) */ | ||
639 | int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) | ||
640 | { | ||
641 | if (test_bit(DQ_FAKE_B, &dquot->dq_flags) && !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace)) | ||
642 | return qtree_delete_dquot(info, dquot); | ||
643 | return 0; | ||
644 | } | ||
645 | EXPORT_SYMBOL(qtree_release_dquot); | ||
diff --git a/fs/quota/quota_tree.h b/fs/quota/quota_tree.h new file mode 100644 index 000000000000..a1ab8db81a51 --- /dev/null +++ b/fs/quota/quota_tree.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * Definitions of structures for vfsv0 quota format | ||
3 | */ | ||
4 | |||
5 | #ifndef _LINUX_QUOTA_TREE_H | ||
6 | #define _LINUX_QUOTA_TREE_H | ||
7 | |||
8 | #include <linux/types.h> | ||
9 | #include <linux/quota.h> | ||
10 | |||
11 | /* | ||
12 | * Structure of header of block with quota structures. It is padded to 16 bytes so | ||
13 | * there will be space for exactly 21 quota-entries in a block | ||
14 | */ | ||
15 | struct qt_disk_dqdbheader { | ||
16 | __le32 dqdh_next_free; /* Number of next block with free entry */ | ||
17 | __le32 dqdh_prev_free; /* Number of previous block with free entry */ | ||
18 | __le16 dqdh_entries; /* Number of valid entries in block */ | ||
19 | __le16 dqdh_pad1; | ||
20 | __le32 dqdh_pad2; | ||
21 | }; | ||
22 | |||
23 | #define QT_TREEOFF 1 /* Offset of tree in file in blocks */ | ||
24 | |||
25 | #endif /* _LINUX_QUOTAIO_TREE_H */ | ||
diff --git a/fs/quota/quota_v1.c b/fs/quota/quota_v1.c new file mode 100644 index 000000000000..b4af1c69ad16 --- /dev/null +++ b/fs/quota/quota_v1.c | |||
@@ -0,0 +1,218 @@ | |||
1 | #include <linux/errno.h> | ||
2 | #include <linux/fs.h> | ||
3 | #include <linux/quota.h> | ||
4 | #include <linux/quotaops.h> | ||
5 | #include <linux/dqblk_v1.h> | ||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/init.h> | ||
8 | #include <linux/module.h> | ||
9 | |||
10 | #include <asm/byteorder.h> | ||
11 | |||
12 | #include "quotaio_v1.h" | ||
13 | |||
14 | MODULE_AUTHOR("Jan Kara"); | ||
15 | MODULE_DESCRIPTION("Old quota format support"); | ||
16 | MODULE_LICENSE("GPL"); | ||
17 | |||
18 | #define QUOTABLOCK_BITS 10 | ||
19 | #define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS) | ||
20 | |||
21 | static inline qsize_t v1_stoqb(qsize_t space) | ||
22 | { | ||
23 | return (space + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS; | ||
24 | } | ||
25 | |||
26 | static inline qsize_t v1_qbtos(qsize_t blocks) | ||
27 | { | ||
28 | return blocks << QUOTABLOCK_BITS; | ||
29 | } | ||
30 | |||
31 | static void v1_disk2mem_dqblk(struct mem_dqblk *m, struct v1_disk_dqblk *d) | ||
32 | { | ||
33 | m->dqb_ihardlimit = d->dqb_ihardlimit; | ||
34 | m->dqb_isoftlimit = d->dqb_isoftlimit; | ||
35 | m->dqb_curinodes = d->dqb_curinodes; | ||
36 | m->dqb_bhardlimit = v1_qbtos(d->dqb_bhardlimit); | ||
37 | m->dqb_bsoftlimit = v1_qbtos(d->dqb_bsoftlimit); | ||
38 | m->dqb_curspace = v1_qbtos(d->dqb_curblocks); | ||
39 | m->dqb_itime = d->dqb_itime; | ||
40 | m->dqb_btime = d->dqb_btime; | ||
41 | } | ||
42 | |||
43 | static void v1_mem2disk_dqblk(struct v1_disk_dqblk *d, struct mem_dqblk *m) | ||
44 | { | ||
45 | d->dqb_ihardlimit = m->dqb_ihardlimit; | ||
46 | d->dqb_isoftlimit = m->dqb_isoftlimit; | ||
47 | d->dqb_curinodes = m->dqb_curinodes; | ||
48 | d->dqb_bhardlimit = v1_stoqb(m->dqb_bhardlimit); | ||
49 | d->dqb_bsoftlimit = v1_stoqb(m->dqb_bsoftlimit); | ||
50 | d->dqb_curblocks = v1_stoqb(m->dqb_curspace); | ||
51 | d->dqb_itime = m->dqb_itime; | ||
52 | d->dqb_btime = m->dqb_btime; | ||
53 | } | ||
54 | |||
55 | static int v1_read_dqblk(struct dquot *dquot) | ||
56 | { | ||
57 | int type = dquot->dq_type; | ||
58 | struct v1_disk_dqblk dqblk; | ||
59 | |||
60 | if (!sb_dqopt(dquot->dq_sb)->files[type]) | ||
61 | return -EINVAL; | ||
62 | |||
63 | /* Set structure to 0s in case read fails/is after end of file */ | ||
64 | memset(&dqblk, 0, sizeof(struct v1_disk_dqblk)); | ||
65 | dquot->dq_sb->s_op->quota_read(dquot->dq_sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id)); | ||
66 | |||
67 | v1_disk2mem_dqblk(&dquot->dq_dqb, &dqblk); | ||
68 | if (dquot->dq_dqb.dqb_bhardlimit == 0 && dquot->dq_dqb.dqb_bsoftlimit == 0 && | ||
69 | dquot->dq_dqb.dqb_ihardlimit == 0 && dquot->dq_dqb.dqb_isoftlimit == 0) | ||
70 | set_bit(DQ_FAKE_B, &dquot->dq_flags); | ||
71 | dqstats.reads++; | ||
72 | |||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | static int v1_commit_dqblk(struct dquot *dquot) | ||
77 | { | ||
78 | short type = dquot->dq_type; | ||
79 | ssize_t ret; | ||
80 | struct v1_disk_dqblk dqblk; | ||
81 | |||
82 | v1_mem2disk_dqblk(&dqblk, &dquot->dq_dqb); | ||
83 | if (dquot->dq_id == 0) { | ||
84 | dqblk.dqb_btime = sb_dqopt(dquot->dq_sb)->info[type].dqi_bgrace; | ||
85 | dqblk.dqb_itime = sb_dqopt(dquot->dq_sb)->info[type].dqi_igrace; | ||
86 | } | ||
87 | ret = 0; | ||
88 | if (sb_dqopt(dquot->dq_sb)->files[type]) | ||
89 | ret = dquot->dq_sb->s_op->quota_write(dquot->dq_sb, type, (char *)&dqblk, | ||
90 | sizeof(struct v1_disk_dqblk), v1_dqoff(dquot->dq_id)); | ||
91 | if (ret != sizeof(struct v1_disk_dqblk)) { | ||
92 | printk(KERN_WARNING "VFS: dquota write failed on dev %s\n", | ||
93 | dquot->dq_sb->s_id); | ||
94 | if (ret >= 0) | ||
95 | ret = -EIO; | ||
96 | goto out; | ||
97 | } | ||
98 | ret = 0; | ||
99 | |||
100 | out: | ||
101 | dqstats.writes++; | ||
102 | |||
103 | return ret; | ||
104 | } | ||
105 | |||
106 | /* Magics of new quota format */ | ||
107 | #define V2_INITQMAGICS {\ | ||
108 | 0xd9c01f11, /* USRQUOTA */\ | ||
109 | 0xd9c01927 /* GRPQUOTA */\ | ||
110 | } | ||
111 | |||
112 | /* Header of new quota format */ | ||
113 | struct v2_disk_dqheader { | ||
114 | __le32 dqh_magic; /* Magic number identifying file */ | ||
115 | __le32 dqh_version; /* File version */ | ||
116 | }; | ||
117 | |||
118 | static int v1_check_quota_file(struct super_block *sb, int type) | ||
119 | { | ||
120 | struct inode *inode = sb_dqopt(sb)->files[type]; | ||
121 | ulong blocks; | ||
122 | size_t off; | ||
123 | struct v2_disk_dqheader dqhead; | ||
124 | ssize_t size; | ||
125 | loff_t isize; | ||
126 | static const uint quota_magics[] = V2_INITQMAGICS; | ||
127 | |||
128 | isize = i_size_read(inode); | ||
129 | if (!isize) | ||
130 | return 0; | ||
131 | blocks = isize >> BLOCK_SIZE_BITS; | ||
132 | off = isize & (BLOCK_SIZE - 1); | ||
133 | if ((blocks % sizeof(struct v1_disk_dqblk) * BLOCK_SIZE + off) % sizeof(struct v1_disk_dqblk)) | ||
134 | return 0; | ||
135 | /* Doublecheck whether we didn't get file with new format - with old quotactl() this could happen */ | ||
136 | size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0); | ||
137 | if (size != sizeof(struct v2_disk_dqheader)) | ||
138 | return 1; /* Probably not new format */ | ||
139 | if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type]) | ||
140 | return 1; /* Definitely not new format */ | ||
141 | printk(KERN_INFO "VFS: %s: Refusing to turn on old quota format on given file. It probably contains newer quota format.\n", sb->s_id); | ||
142 | return 0; /* Seems like a new format file -> refuse it */ | ||
143 | } | ||
144 | |||
145 | static int v1_read_file_info(struct super_block *sb, int type) | ||
146 | { | ||
147 | struct quota_info *dqopt = sb_dqopt(sb); | ||
148 | struct v1_disk_dqblk dqblk; | ||
149 | int ret; | ||
150 | |||
151 | if ((ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, sizeof(struct v1_disk_dqblk), v1_dqoff(0))) != sizeof(struct v1_disk_dqblk)) { | ||
152 | if (ret >= 0) | ||
153 | ret = -EIO; | ||
154 | goto out; | ||
155 | } | ||
156 | ret = 0; | ||
157 | /* limits are stored as unsigned 32-bit data */ | ||
158 | dqopt->info[type].dqi_maxblimit = 0xffffffff; | ||
159 | dqopt->info[type].dqi_maxilimit = 0xffffffff; | ||
160 | dqopt->info[type].dqi_igrace = dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME; | ||
161 | dqopt->info[type].dqi_bgrace = dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME; | ||
162 | out: | ||
163 | return ret; | ||
164 | } | ||
165 | |||
166 | static int v1_write_file_info(struct super_block *sb, int type) | ||
167 | { | ||
168 | struct quota_info *dqopt = sb_dqopt(sb); | ||
169 | struct v1_disk_dqblk dqblk; | ||
170 | int ret; | ||
171 | |||
172 | dqopt->info[type].dqi_flags &= ~DQF_INFO_DIRTY; | ||
173 | if ((ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, | ||
174 | sizeof(struct v1_disk_dqblk), v1_dqoff(0))) != sizeof(struct v1_disk_dqblk)) { | ||
175 | if (ret >= 0) | ||
176 | ret = -EIO; | ||
177 | goto out; | ||
178 | } | ||
179 | dqblk.dqb_itime = dqopt->info[type].dqi_igrace; | ||
180 | dqblk.dqb_btime = dqopt->info[type].dqi_bgrace; | ||
181 | ret = sb->s_op->quota_write(sb, type, (char *)&dqblk, | ||
182 | sizeof(struct v1_disk_dqblk), v1_dqoff(0)); | ||
183 | if (ret == sizeof(struct v1_disk_dqblk)) | ||
184 | ret = 0; | ||
185 | else if (ret > 0) | ||
186 | ret = -EIO; | ||
187 | out: | ||
188 | return ret; | ||
189 | } | ||
190 | |||
191 | static struct quota_format_ops v1_format_ops = { | ||
192 | .check_quota_file = v1_check_quota_file, | ||
193 | .read_file_info = v1_read_file_info, | ||
194 | .write_file_info = v1_write_file_info, | ||
195 | .free_file_info = NULL, | ||
196 | .read_dqblk = v1_read_dqblk, | ||
197 | .commit_dqblk = v1_commit_dqblk, | ||
198 | }; | ||
199 | |||
200 | static struct quota_format_type v1_quota_format = { | ||
201 | .qf_fmt_id = QFMT_VFS_OLD, | ||
202 | .qf_ops = &v1_format_ops, | ||
203 | .qf_owner = THIS_MODULE | ||
204 | }; | ||
205 | |||
206 | static int __init init_v1_quota_format(void) | ||
207 | { | ||
208 | return register_quota_format(&v1_quota_format); | ||
209 | } | ||
210 | |||
211 | static void __exit exit_v1_quota_format(void) | ||
212 | { | ||
213 | unregister_quota_format(&v1_quota_format); | ||
214 | } | ||
215 | |||
216 | module_init(init_v1_quota_format); | ||
217 | module_exit(exit_v1_quota_format); | ||
218 | |||
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c new file mode 100644 index 000000000000..b618b563635c --- /dev/null +++ b/fs/quota/quota_v2.c | |||
@@ -0,0 +1,236 @@ | |||
1 | /* | ||
2 | * vfsv0 quota IO operations on file | ||
3 | */ | ||
4 | |||
5 | #include <linux/errno.h> | ||
6 | #include <linux/fs.h> | ||
7 | #include <linux/mount.h> | ||
8 | #include <linux/dqblk_v2.h> | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/slab.h> | ||
13 | #include <linux/quotaops.h> | ||
14 | |||
15 | #include <asm/byteorder.h> | ||
16 | |||
17 | #include "quota_tree.h" | ||
18 | #include "quotaio_v2.h" | ||
19 | |||
20 | MODULE_AUTHOR("Jan Kara"); | ||
21 | MODULE_DESCRIPTION("Quota format v2 support"); | ||
22 | MODULE_LICENSE("GPL"); | ||
23 | |||
24 | #define __QUOTA_V2_PARANOIA | ||
25 | |||
26 | static void v2_mem2diskdqb(void *dp, struct dquot *dquot); | ||
27 | static void v2_disk2memdqb(struct dquot *dquot, void *dp); | ||
28 | static int v2_is_id(void *dp, struct dquot *dquot); | ||
29 | |||
30 | static struct qtree_fmt_operations v2_qtree_ops = { | ||
31 | .mem2disk_dqblk = v2_mem2diskdqb, | ||
32 | .disk2mem_dqblk = v2_disk2memdqb, | ||
33 | .is_id = v2_is_id, | ||
34 | }; | ||
35 | |||
36 | #define QUOTABLOCK_BITS 10 | ||
37 | #define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS) | ||
38 | |||
39 | static inline qsize_t v2_stoqb(qsize_t space) | ||
40 | { | ||
41 | return (space + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS; | ||
42 | } | ||
43 | |||
44 | static inline qsize_t v2_qbtos(qsize_t blocks) | ||
45 | { | ||
46 | return blocks << QUOTABLOCK_BITS; | ||
47 | } | ||
48 | |||
49 | /* Check whether given file is really vfsv0 quotafile */ | ||
50 | static int v2_check_quota_file(struct super_block *sb, int type) | ||
51 | { | ||
52 | struct v2_disk_dqheader dqhead; | ||
53 | ssize_t size; | ||
54 | static const uint quota_magics[] = V2_INITQMAGICS; | ||
55 | static const uint quota_versions[] = V2_INITQVERSIONS; | ||
56 | |||
57 | size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0); | ||
58 | if (size != sizeof(struct v2_disk_dqheader)) { | ||
59 | printk("quota_v2: failed read expected=%zd got=%zd\n", | ||
60 | sizeof(struct v2_disk_dqheader), size); | ||
61 | return 0; | ||
62 | } | ||
63 | if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type] || | ||
64 | le32_to_cpu(dqhead.dqh_version) != quota_versions[type]) | ||
65 | return 0; | ||
66 | return 1; | ||
67 | } | ||
68 | |||
69 | /* Read information header from quota file */ | ||
70 | static int v2_read_file_info(struct super_block *sb, int type) | ||
71 | { | ||
72 | struct v2_disk_dqinfo dinfo; | ||
73 | struct mem_dqinfo *info = sb_dqinfo(sb, type); | ||
74 | struct qtree_mem_dqinfo *qinfo; | ||
75 | ssize_t size; | ||
76 | |||
77 | size = sb->s_op->quota_read(sb, type, (char *)&dinfo, | ||
78 | sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF); | ||
79 | if (size != sizeof(struct v2_disk_dqinfo)) { | ||
80 | printk(KERN_WARNING "Can't read info structure on device %s.\n", | ||
81 | sb->s_id); | ||
82 | return -1; | ||
83 | } | ||
84 | info->dqi_priv = kmalloc(sizeof(struct qtree_mem_dqinfo), GFP_NOFS); | ||
85 | if (!info->dqi_priv) { | ||
86 | printk(KERN_WARNING | ||
87 | "Not enough memory for quota information structure.\n"); | ||
88 | return -1; | ||
89 | } | ||
90 | qinfo = info->dqi_priv; | ||
91 | /* limits are stored as unsigned 32-bit data */ | ||
92 | info->dqi_maxblimit = 0xffffffff; | ||
93 | info->dqi_maxilimit = 0xffffffff; | ||
94 | info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace); | ||
95 | info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace); | ||
96 | info->dqi_flags = le32_to_cpu(dinfo.dqi_flags); | ||
97 | qinfo->dqi_sb = sb; | ||
98 | qinfo->dqi_type = type; | ||
99 | qinfo->dqi_blocks = le32_to_cpu(dinfo.dqi_blocks); | ||
100 | qinfo->dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk); | ||
101 | qinfo->dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry); | ||
102 | qinfo->dqi_blocksize_bits = V2_DQBLKSIZE_BITS; | ||
103 | qinfo->dqi_usable_bs = 1 << V2_DQBLKSIZE_BITS; | ||
104 | qinfo->dqi_qtree_depth = qtree_depth(qinfo); | ||
105 | qinfo->dqi_entry_size = sizeof(struct v2_disk_dqblk); | ||
106 | qinfo->dqi_ops = &v2_qtree_ops; | ||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | /* Write information header to quota file */ | ||
111 | static int v2_write_file_info(struct super_block *sb, int type) | ||
112 | { | ||
113 | struct v2_disk_dqinfo dinfo; | ||
114 | struct mem_dqinfo *info = sb_dqinfo(sb, type); | ||
115 | struct qtree_mem_dqinfo *qinfo = info->dqi_priv; | ||
116 | ssize_t size; | ||
117 | |||
118 | spin_lock(&dq_data_lock); | ||
119 | info->dqi_flags &= ~DQF_INFO_DIRTY; | ||
120 | dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace); | ||
121 | dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace); | ||
122 | dinfo.dqi_flags = cpu_to_le32(info->dqi_flags & DQF_MASK); | ||
123 | spin_unlock(&dq_data_lock); | ||
124 | dinfo.dqi_blocks = cpu_to_le32(qinfo->dqi_blocks); | ||
125 | dinfo.dqi_free_blk = cpu_to_le32(qinfo->dqi_free_blk); | ||
126 | dinfo.dqi_free_entry = cpu_to_le32(qinfo->dqi_free_entry); | ||
127 | size = sb->s_op->quota_write(sb, type, (char *)&dinfo, | ||
128 | sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF); | ||
129 | if (size != sizeof(struct v2_disk_dqinfo)) { | ||
130 | printk(KERN_WARNING "Can't write info structure on device %s.\n", | ||
131 | sb->s_id); | ||
132 | return -1; | ||
133 | } | ||
134 | return 0; | ||
135 | } | ||
136 | |||
137 | static void v2_disk2memdqb(struct dquot *dquot, void *dp) | ||
138 | { | ||
139 | struct v2_disk_dqblk *d = dp, empty; | ||
140 | struct mem_dqblk *m = &dquot->dq_dqb; | ||
141 | |||
142 | m->dqb_ihardlimit = le32_to_cpu(d->dqb_ihardlimit); | ||
143 | m->dqb_isoftlimit = le32_to_cpu(d->dqb_isoftlimit); | ||
144 | m->dqb_curinodes = le32_to_cpu(d->dqb_curinodes); | ||
145 | m->dqb_itime = le64_to_cpu(d->dqb_itime); | ||
146 | m->dqb_bhardlimit = v2_qbtos(le32_to_cpu(d->dqb_bhardlimit)); | ||
147 | m->dqb_bsoftlimit = v2_qbtos(le32_to_cpu(d->dqb_bsoftlimit)); | ||
148 | m->dqb_curspace = le64_to_cpu(d->dqb_curspace); | ||
149 | m->dqb_btime = le64_to_cpu(d->dqb_btime); | ||
150 | /* We need to escape back all-zero structure */ | ||
151 | memset(&empty, 0, sizeof(struct v2_disk_dqblk)); | ||
152 | empty.dqb_itime = cpu_to_le64(1); | ||
153 | if (!memcmp(&empty, dp, sizeof(struct v2_disk_dqblk))) | ||
154 | m->dqb_itime = 0; | ||
155 | } | ||
156 | |||
157 | static void v2_mem2diskdqb(void *dp, struct dquot *dquot) | ||
158 | { | ||
159 | struct v2_disk_dqblk *d = dp; | ||
160 | struct mem_dqblk *m = &dquot->dq_dqb; | ||
161 | struct qtree_mem_dqinfo *info = | ||
162 | sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; | ||
163 | |||
164 | d->dqb_ihardlimit = cpu_to_le32(m->dqb_ihardlimit); | ||
165 | d->dqb_isoftlimit = cpu_to_le32(m->dqb_isoftlimit); | ||
166 | d->dqb_curinodes = cpu_to_le32(m->dqb_curinodes); | ||
167 | d->dqb_itime = cpu_to_le64(m->dqb_itime); | ||
168 | d->dqb_bhardlimit = cpu_to_le32(v2_stoqb(m->dqb_bhardlimit)); | ||
169 | d->dqb_bsoftlimit = cpu_to_le32(v2_stoqb(m->dqb_bsoftlimit)); | ||
170 | d->dqb_curspace = cpu_to_le64(m->dqb_curspace); | ||
171 | d->dqb_btime = cpu_to_le64(m->dqb_btime); | ||
172 | d->dqb_id = cpu_to_le32(dquot->dq_id); | ||
173 | if (qtree_entry_unused(info, dp)) | ||
174 | d->dqb_itime = cpu_to_le64(1); | ||
175 | } | ||
176 | |||
177 | static int v2_is_id(void *dp, struct dquot *dquot) | ||
178 | { | ||
179 | struct v2_disk_dqblk *d = dp; | ||
180 | struct qtree_mem_dqinfo *info = | ||
181 | sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; | ||
182 | |||
183 | if (qtree_entry_unused(info, dp)) | ||
184 | return 0; | ||
185 | return le32_to_cpu(d->dqb_id) == dquot->dq_id; | ||
186 | } | ||
187 | |||
188 | static int v2_read_dquot(struct dquot *dquot) | ||
189 | { | ||
190 | return qtree_read_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot); | ||
191 | } | ||
192 | |||
193 | static int v2_write_dquot(struct dquot *dquot) | ||
194 | { | ||
195 | return qtree_write_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot); | ||
196 | } | ||
197 | |||
198 | static int v2_release_dquot(struct dquot *dquot) | ||
199 | { | ||
200 | return qtree_release_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot); | ||
201 | } | ||
202 | |||
203 | static int v2_free_file_info(struct super_block *sb, int type) | ||
204 | { | ||
205 | kfree(sb_dqinfo(sb, type)->dqi_priv); | ||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | static struct quota_format_ops v2_format_ops = { | ||
210 | .check_quota_file = v2_check_quota_file, | ||
211 | .read_file_info = v2_read_file_info, | ||
212 | .write_file_info = v2_write_file_info, | ||
213 | .free_file_info = v2_free_file_info, | ||
214 | .read_dqblk = v2_read_dquot, | ||
215 | .commit_dqblk = v2_write_dquot, | ||
216 | .release_dqblk = v2_release_dquot, | ||
217 | }; | ||
218 | |||
219 | static struct quota_format_type v2_quota_format = { | ||
220 | .qf_fmt_id = QFMT_VFS_V0, | ||
221 | .qf_ops = &v2_format_ops, | ||
222 | .qf_owner = THIS_MODULE | ||
223 | }; | ||
224 | |||
225 | static int __init init_v2_quota_format(void) | ||
226 | { | ||
227 | return register_quota_format(&v2_quota_format); | ||
228 | } | ||
229 | |||
230 | static void __exit exit_v2_quota_format(void) | ||
231 | { | ||
232 | unregister_quota_format(&v2_quota_format); | ||
233 | } | ||
234 | |||
235 | module_init(init_v2_quota_format); | ||
236 | module_exit(exit_v2_quota_format); | ||
diff --git a/fs/quota/quotaio_v1.h b/fs/quota/quotaio_v1.h new file mode 100644 index 000000000000..746654b5de70 --- /dev/null +++ b/fs/quota/quotaio_v1.h | |||
@@ -0,0 +1,33 @@ | |||
1 | #ifndef _LINUX_QUOTAIO_V1_H | ||
2 | #define _LINUX_QUOTAIO_V1_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | /* | ||
7 | * The following constants define the amount of time given a user | ||
8 | * before the soft limits are treated as hard limits (usually resulting | ||
9 | * in an allocation failure). The timer is started when the user crosses | ||
10 | * their soft limit, it is reset when they go below their soft limit. | ||
11 | */ | ||
12 | #define MAX_IQ_TIME 604800 /* (7*24*60*60) 1 week */ | ||
13 | #define MAX_DQ_TIME 604800 /* (7*24*60*60) 1 week */ | ||
14 | |||
15 | /* | ||
16 | * The following structure defines the format of the disk quota file | ||
17 | * (as it appears on disk) - the file is an array of these structures | ||
18 | * indexed by user or group number. | ||
19 | */ | ||
20 | struct v1_disk_dqblk { | ||
21 | __u32 dqb_bhardlimit; /* absolute limit on disk blks alloc */ | ||
22 | __u32 dqb_bsoftlimit; /* preferred limit on disk blks */ | ||
23 | __u32 dqb_curblocks; /* current block count */ | ||
24 | __u32 dqb_ihardlimit; /* absolute limit on allocated inodes */ | ||
25 | __u32 dqb_isoftlimit; /* preferred inode limit */ | ||
26 | __u32 dqb_curinodes; /* current # allocated inodes */ | ||
27 | time_t dqb_btime; /* time limit for excessive disk use */ | ||
28 | time_t dqb_itime; /* time limit for excessive inode use */ | ||
29 | }; | ||
30 | |||
31 | #define v1_dqoff(UID) ((loff_t)((UID) * sizeof (struct v1_disk_dqblk))) | ||
32 | |||
33 | #endif /* _LINUX_QUOTAIO_V1_H */ | ||
diff --git a/fs/quota/quotaio_v2.h b/fs/quota/quotaio_v2.h new file mode 100644 index 000000000000..530fe580685c --- /dev/null +++ b/fs/quota/quotaio_v2.h | |||
@@ -0,0 +1,60 @@ | |||
1 | /* | ||
2 | * Definitions of structures for vfsv0 quota format | ||
3 | */ | ||
4 | |||
5 | #ifndef _LINUX_QUOTAIO_V2_H | ||
6 | #define _LINUX_QUOTAIO_V2_H | ||
7 | |||
8 | #include <linux/types.h> | ||
9 | #include <linux/quota.h> | ||
10 | |||
11 | /* | ||
12 | * Definitions of magics and versions of current quota files | ||
13 | */ | ||
14 | #define V2_INITQMAGICS {\ | ||
15 | 0xd9c01f11, /* USRQUOTA */\ | ||
16 | 0xd9c01927 /* GRPQUOTA */\ | ||
17 | } | ||
18 | |||
19 | #define V2_INITQVERSIONS {\ | ||
20 | 0, /* USRQUOTA */\ | ||
21 | 0 /* GRPQUOTA */\ | ||
22 | } | ||
23 | |||
24 | /* First generic header */ | ||
25 | struct v2_disk_dqheader { | ||
26 | __le32 dqh_magic; /* Magic number identifying file */ | ||
27 | __le32 dqh_version; /* File version */ | ||
28 | }; | ||
29 | |||
30 | /* | ||
31 | * The following structure defines the format of the disk quota file | ||
32 | * (as it appears on disk) - the file is a radix tree whose leaves point | ||
33 | * to blocks of these structures. | ||
34 | */ | ||
35 | struct v2_disk_dqblk { | ||
36 | __le32 dqb_id; /* id this quota applies to */ | ||
37 | __le32 dqb_ihardlimit; /* absolute limit on allocated inodes */ | ||
38 | __le32 dqb_isoftlimit; /* preferred inode limit */ | ||
39 | __le32 dqb_curinodes; /* current # allocated inodes */ | ||
40 | __le32 dqb_bhardlimit; /* absolute limit on disk space (in QUOTABLOCK_SIZE) */ | ||
41 | __le32 dqb_bsoftlimit; /* preferred limit on disk space (in QUOTABLOCK_SIZE) */ | ||
42 | __le64 dqb_curspace; /* current space occupied (in bytes) */ | ||
43 | __le64 dqb_btime; /* time limit for excessive disk use */ | ||
44 | __le64 dqb_itime; /* time limit for excessive inode use */ | ||
45 | }; | ||
46 | |||
47 | /* Header with type and version specific information */ | ||
48 | struct v2_disk_dqinfo { | ||
49 | __le32 dqi_bgrace; /* Time before block soft limit becomes hard limit */ | ||
50 | __le32 dqi_igrace; /* Time before inode soft limit becomes hard limit */ | ||
51 | __le32 dqi_flags; /* Flags for quotafile (DQF_*) */ | ||
52 | __le32 dqi_blocks; /* Number of blocks in file */ | ||
53 | __le32 dqi_free_blk; /* Number of first free block in the list */ | ||
54 | __le32 dqi_free_entry; /* Number of block with at least one free entry */ | ||
55 | }; | ||
56 | |||
57 | #define V2_DQINFOOFF sizeof(struct v2_disk_dqheader) /* Offset of info header in file */ | ||
58 | #define V2_DQBLKSIZE_BITS 10 /* Size of leaf block in tree */ | ||
59 | |||
60 | #endif /* _LINUX_QUOTAIO_V2_H */ | ||