diff options
author | Jan Kara <jack@suse.cz> | 2009-01-26 09:28:09 -0500 |
---|---|---|
committer | Jan Kara <jack@suse.cz> | 2009-03-25 21:18:35 -0400 |
commit | 884d179dff3aa98a73c3ba9dee05fd6050d664f0 (patch) | |
tree | 68c18e3ff8decd2af861e807bed96cca2dea30d4 /fs/dquot.c | |
parent | 60e58e0f30e723464c2a7d34b71b8675566c572d (diff) |
quota: Move quota files into separate directory
Quota subsystem has more and more files. It's time to create a dir for it.
Signed-off-by: Jan Kara <jack@suse.cz>
Diffstat (limited to 'fs/dquot.c')
-rw-r--r-- | fs/dquot.c | 2564 |
1 files changed, 0 insertions, 2564 deletions
diff --git a/fs/dquot.c b/fs/dquot.c deleted file mode 100644 index 28aa14667602..000000000000 --- a/fs/dquot.c +++ /dev/null | |||
@@ -1,2564 +0,0 @@ | |||
1 | /* | ||
2 | * Implementation of the diskquota system for the LINUX operating system. QUOTA | ||
3 | * is implemented using the BSD system call interface as the means of | ||
4 | * communication with the user level. This file contains the generic routines | ||
5 | * called by the different filesystems on allocation of an inode or block. | ||
6 | * These routines take care of the administration needed to have a consistent | ||
7 | * diskquota tracking system. The ideas of both user and group quotas are based | ||
8 | * on the Melbourne quota system as used on BSD derived systems. The internal | ||
9 | * implementation is based on one of the several variants of the LINUX | ||
10 | * inode-subsystem with added complexity of the diskquota system. | ||
11 | * | ||
12 | * Author: Marco van Wieringen <mvw@planets.elm.net> | ||
13 | * | ||
14 | * Fixes: Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96 | ||
15 | * | ||
16 | * Revised list management to avoid races | ||
17 | * -- Bill Hawes, <whawes@star.net>, 9/98 | ||
18 | * | ||
19 | * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...(). | ||
20 | * As the consequence the locking was moved from dquot_decr_...(), | ||
21 | * dquot_incr_...() to calling functions. | ||
22 | * invalidate_dquots() now writes modified dquots. | ||
23 | * Serialized quota_off() and quota_on() for mount point. | ||
24 | * Fixed a few bugs in grow_dquots(). | ||
25 | * Fixed deadlock in write_dquot() - we no longer account quotas on | ||
26 | * quota files | ||
27 | * remove_dquot_ref() moved to inode.c - it now traverses through inodes | ||
28 | * add_dquot_ref() restarts after blocking | ||
29 | * Added check for bogus uid and fixed check for group in quotactl. | ||
30 | * Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99 | ||
31 | * | ||
32 | * Used struct list_head instead of own list struct | ||
33 | * Invalidation of referenced dquots is no longer possible | ||
34 | * Improved free_dquots list management | ||
35 | * Quota and i_blocks are now updated in one place to avoid races | ||
36 | * Warnings are now delayed so we won't block in critical section | ||
37 | * Write updated not to require dquot lock | ||
38 | * Jan Kara, <jack@suse.cz>, 9/2000 | ||
39 | * | ||
40 | * Added dynamic quota structure allocation | ||
41 | * Jan Kara <jack@suse.cz> 12/2000 | ||
42 | * | ||
43 | * Rewritten quota interface. Implemented new quota format and | ||
44 | * formats registering. | ||
45 | * Jan Kara, <jack@suse.cz>, 2001,2002 | ||
46 | * | ||
47 | * New SMP locking. | ||
48 | * Jan Kara, <jack@suse.cz>, 10/2002 | ||
49 | * | ||
50 | * Added journalled quota support, fix lock inversion problems | ||
51 | * Jan Kara, <jack@suse.cz>, 2003,2004 | ||
52 | * | ||
53 | * (C) Copyright 1994 - 1997 Marco van Wieringen | ||
54 | */ | ||
55 | |||
56 | #include <linux/errno.h> | ||
57 | #include <linux/kernel.h> | ||
58 | #include <linux/fs.h> | ||
59 | #include <linux/mount.h> | ||
60 | #include <linux/mm.h> | ||
61 | #include <linux/time.h> | ||
62 | #include <linux/types.h> | ||
63 | #include <linux/string.h> | ||
64 | #include <linux/fcntl.h> | ||
65 | #include <linux/stat.h> | ||
66 | #include <linux/tty.h> | ||
67 | #include <linux/file.h> | ||
68 | #include <linux/slab.h> | ||
69 | #include <linux/sysctl.h> | ||
70 | #include <linux/init.h> | ||
71 | #include <linux/module.h> | ||
72 | #include <linux/proc_fs.h> | ||
73 | #include <linux/security.h> | ||
74 | #include <linux/kmod.h> | ||
75 | #include <linux/namei.h> | ||
76 | #include <linux/buffer_head.h> | ||
77 | #include <linux/capability.h> | ||
78 | #include <linux/quotaops.h> | ||
79 | #include <linux/writeback.h> /* for inode_lock, oddly enough.. */ | ||
80 | #ifdef CONFIG_QUOTA_NETLINK_INTERFACE | ||
81 | #include <net/netlink.h> | ||
82 | #include <net/genetlink.h> | ||
83 | #endif | ||
84 | |||
85 | #include <asm/uaccess.h> | ||
86 | |||
87 | #define __DQUOT_PARANOIA | ||
88 | |||
89 | /* | ||
90 | * There are three quota SMP locks. dq_list_lock protects all lists with quotas | ||
91 | * and quota formats, dqstats structure containing statistics about the lists | ||
92 | * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and | ||
93 | * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes. | ||
94 | * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly | ||
95 | * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects | ||
96 | * modifications of quota state (on quotaon and quotaoff) and readers who care | ||
97 | * about latest values take it as well. | ||
98 | * | ||
99 | * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock, | ||
100 | * dq_list_lock > dq_state_lock | ||
101 | * | ||
102 | * Note that some things (eg. sb pointer, type, id) doesn't change during | ||
103 | * the life of the dquot structure and so needn't to be protected by a lock | ||
104 | * | ||
105 | * Any operation working on dquots via inode pointers must hold dqptr_sem. If | ||
106 | * operation is just reading pointers from inode (or not using them at all) the | ||
107 | * read lock is enough. If pointers are altered function must hold write lock | ||
108 | * (these locking rules also apply for S_NOQUOTA flag in the inode - note that | ||
109 | * for altering the flag i_mutex is also needed). | ||
110 | * | ||
111 | * Each dquot has its dq_lock mutex. Locked dquots might not be referenced | ||
112 | * from inodes (dquot_alloc_space() and such don't check the dq_lock). | ||
113 | * Currently dquot is locked only when it is being read to memory (or space for | ||
114 | * it is being allocated) on the first dqget() and when it is being released on | ||
115 | * the last dqput(). The allocation and release oparations are serialized by | ||
116 | * the dq_lock and by checking the use count in dquot_release(). Write | ||
117 | * operations on dquots don't hold dq_lock as they copy data under dq_data_lock | ||
118 | * spinlock to internal buffers before writing. | ||
119 | * | ||
120 | * Lock ordering (including related VFS locks) is the following: | ||
121 | * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock > | ||
122 | * dqio_mutex | ||
123 | * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem > | ||
124 | * dqptr_sem. But filesystem has to count with the fact that functions such as | ||
125 | * dquot_alloc_space() acquire dqptr_sem and they usually have to be called | ||
126 | * from inside a transaction to keep filesystem consistency after a crash. Also | ||
127 | * filesystems usually want to do some IO on dquot from ->mark_dirty which is | ||
128 | * called with dqptr_sem held. | ||
129 | * i_mutex on quota files is special (it's below dqio_mutex) | ||
130 | */ | ||
131 | |||
132 | static DEFINE_SPINLOCK(dq_list_lock); | ||
133 | static DEFINE_SPINLOCK(dq_state_lock); | ||
134 | DEFINE_SPINLOCK(dq_data_lock); | ||
135 | EXPORT_SYMBOL(dq_data_lock); | ||
136 | |||
137 | static char *quotatypes[] = INITQFNAMES; | ||
138 | static struct quota_format_type *quota_formats; /* List of registered formats */ | ||
139 | static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES; | ||
140 | |||
141 | /* SLAB cache for dquot structures */ | ||
142 | static struct kmem_cache *dquot_cachep; | ||
143 | |||
144 | int register_quota_format(struct quota_format_type *fmt) | ||
145 | { | ||
146 | spin_lock(&dq_list_lock); | ||
147 | fmt->qf_next = quota_formats; | ||
148 | quota_formats = fmt; | ||
149 | spin_unlock(&dq_list_lock); | ||
150 | return 0; | ||
151 | } | ||
152 | EXPORT_SYMBOL(register_quota_format); | ||
153 | |||
154 | void unregister_quota_format(struct quota_format_type *fmt) | ||
155 | { | ||
156 | struct quota_format_type **actqf; | ||
157 | |||
158 | spin_lock(&dq_list_lock); | ||
159 | for (actqf = "a_formats; *actqf && *actqf != fmt; actqf = &(*actqf)->qf_next); | ||
160 | if (*actqf) | ||
161 | *actqf = (*actqf)->qf_next; | ||
162 | spin_unlock(&dq_list_lock); | ||
163 | } | ||
164 | EXPORT_SYMBOL(unregister_quota_format); | ||
165 | |||
166 | static struct quota_format_type *find_quota_format(int id) | ||
167 | { | ||
168 | struct quota_format_type *actqf; | ||
169 | |||
170 | spin_lock(&dq_list_lock); | ||
171 | for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next); | ||
172 | if (!actqf || !try_module_get(actqf->qf_owner)) { | ||
173 | int qm; | ||
174 | |||
175 | spin_unlock(&dq_list_lock); | ||
176 | |||
177 | for (qm = 0; module_names[qm].qm_fmt_id && module_names[qm].qm_fmt_id != id; qm++); | ||
178 | if (!module_names[qm].qm_fmt_id || request_module(module_names[qm].qm_mod_name)) | ||
179 | return NULL; | ||
180 | |||
181 | spin_lock(&dq_list_lock); | ||
182 | for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next); | ||
183 | if (actqf && !try_module_get(actqf->qf_owner)) | ||
184 | actqf = NULL; | ||
185 | } | ||
186 | spin_unlock(&dq_list_lock); | ||
187 | return actqf; | ||
188 | } | ||
189 | |||
190 | static void put_quota_format(struct quota_format_type *fmt) | ||
191 | { | ||
192 | module_put(fmt->qf_owner); | ||
193 | } | ||
194 | |||
195 | /* | ||
196 | * Dquot List Management: | ||
197 | * The quota code uses three lists for dquot management: the inuse_list, | ||
198 | * free_dquots, and dquot_hash[] array. A single dquot structure may be | ||
199 | * on all three lists, depending on its current state. | ||
200 | * | ||
201 | * All dquots are placed to the end of inuse_list when first created, and this | ||
202 | * list is used for invalidate operation, which must look at every dquot. | ||
203 | * | ||
204 | * Unused dquots (dq_count == 0) are added to the free_dquots list when freed, | ||
205 | * and this list is searched whenever we need an available dquot. Dquots are | ||
206 | * removed from the list as soon as they are used again, and | ||
207 | * dqstats.free_dquots gives the number of dquots on the list. When | ||
208 | * dquot is invalidated it's completely released from memory. | ||
209 | * | ||
210 | * Dquots with a specific identity (device, type and id) are placed on | ||
211 | * one of the dquot_hash[] hash chains. The provides an efficient search | ||
212 | * mechanism to locate a specific dquot. | ||
213 | */ | ||
214 | |||
215 | static LIST_HEAD(inuse_list); | ||
216 | static LIST_HEAD(free_dquots); | ||
217 | static unsigned int dq_hash_bits, dq_hash_mask; | ||
218 | static struct hlist_head *dquot_hash; | ||
219 | |||
220 | struct dqstats dqstats; | ||
221 | EXPORT_SYMBOL(dqstats); | ||
222 | |||
223 | static inline unsigned int | ||
224 | hashfn(const struct super_block *sb, unsigned int id, int type) | ||
225 | { | ||
226 | unsigned long tmp; | ||
227 | |||
228 | tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type); | ||
229 | return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask; | ||
230 | } | ||
231 | |||
232 | /* | ||
233 | * Following list functions expect dq_list_lock to be held | ||
234 | */ | ||
235 | static inline void insert_dquot_hash(struct dquot *dquot) | ||
236 | { | ||
237 | struct hlist_head *head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type); | ||
238 | hlist_add_head(&dquot->dq_hash, head); | ||
239 | } | ||
240 | |||
241 | static inline void remove_dquot_hash(struct dquot *dquot) | ||
242 | { | ||
243 | hlist_del_init(&dquot->dq_hash); | ||
244 | } | ||
245 | |||
246 | static inline struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, unsigned int id, int type) | ||
247 | { | ||
248 | struct hlist_node *node; | ||
249 | struct dquot *dquot; | ||
250 | |||
251 | hlist_for_each (node, dquot_hash+hashent) { | ||
252 | dquot = hlist_entry(node, struct dquot, dq_hash); | ||
253 | if (dquot->dq_sb == sb && dquot->dq_id == id && dquot->dq_type == type) | ||
254 | return dquot; | ||
255 | } | ||
256 | return NODQUOT; | ||
257 | } | ||
258 | |||
259 | /* Add a dquot to the tail of the free list */ | ||
260 | static inline void put_dquot_last(struct dquot *dquot) | ||
261 | { | ||
262 | list_add_tail(&dquot->dq_free, &free_dquots); | ||
263 | dqstats.free_dquots++; | ||
264 | } | ||
265 | |||
266 | static inline void remove_free_dquot(struct dquot *dquot) | ||
267 | { | ||
268 | if (list_empty(&dquot->dq_free)) | ||
269 | return; | ||
270 | list_del_init(&dquot->dq_free); | ||
271 | dqstats.free_dquots--; | ||
272 | } | ||
273 | |||
274 | static inline void put_inuse(struct dquot *dquot) | ||
275 | { | ||
276 | /* We add to the back of inuse list so we don't have to restart | ||
277 | * when traversing this list and we block */ | ||
278 | list_add_tail(&dquot->dq_inuse, &inuse_list); | ||
279 | dqstats.allocated_dquots++; | ||
280 | } | ||
281 | |||
282 | static inline void remove_inuse(struct dquot *dquot) | ||
283 | { | ||
284 | dqstats.allocated_dquots--; | ||
285 | list_del(&dquot->dq_inuse); | ||
286 | } | ||
287 | /* | ||
288 | * End of list functions needing dq_list_lock | ||
289 | */ | ||
290 | |||
291 | static void wait_on_dquot(struct dquot *dquot) | ||
292 | { | ||
293 | mutex_lock(&dquot->dq_lock); | ||
294 | mutex_unlock(&dquot->dq_lock); | ||
295 | } | ||
296 | |||
297 | static inline int dquot_dirty(struct dquot *dquot) | ||
298 | { | ||
299 | return test_bit(DQ_MOD_B, &dquot->dq_flags); | ||
300 | } | ||
301 | |||
302 | static inline int mark_dquot_dirty(struct dquot *dquot) | ||
303 | { | ||
304 | return dquot->dq_sb->dq_op->mark_dirty(dquot); | ||
305 | } | ||
306 | |||
307 | int dquot_mark_dquot_dirty(struct dquot *dquot) | ||
308 | { | ||
309 | spin_lock(&dq_list_lock); | ||
310 | if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) | ||
311 | list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)-> | ||
312 | info[dquot->dq_type].dqi_dirty_list); | ||
313 | spin_unlock(&dq_list_lock); | ||
314 | return 0; | ||
315 | } | ||
316 | EXPORT_SYMBOL(dquot_mark_dquot_dirty); | ||
317 | |||
318 | /* This function needs dq_list_lock */ | ||
319 | static inline int clear_dquot_dirty(struct dquot *dquot) | ||
320 | { | ||
321 | if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags)) | ||
322 | return 0; | ||
323 | list_del_init(&dquot->dq_dirty); | ||
324 | return 1; | ||
325 | } | ||
326 | |||
327 | void mark_info_dirty(struct super_block *sb, int type) | ||
328 | { | ||
329 | set_bit(DQF_INFO_DIRTY_B, &sb_dqopt(sb)->info[type].dqi_flags); | ||
330 | } | ||
331 | EXPORT_SYMBOL(mark_info_dirty); | ||
332 | |||
333 | /* | ||
334 | * Read dquot from disk and alloc space for it | ||
335 | */ | ||
336 | |||
337 | int dquot_acquire(struct dquot *dquot) | ||
338 | { | ||
339 | int ret = 0, ret2 = 0; | ||
340 | struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); | ||
341 | |||
342 | mutex_lock(&dquot->dq_lock); | ||
343 | mutex_lock(&dqopt->dqio_mutex); | ||
344 | if (!test_bit(DQ_READ_B, &dquot->dq_flags)) | ||
345 | ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot); | ||
346 | if (ret < 0) | ||
347 | goto out_iolock; | ||
348 | set_bit(DQ_READ_B, &dquot->dq_flags); | ||
349 | /* Instantiate dquot if needed */ | ||
350 | if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) { | ||
351 | ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); | ||
352 | /* Write the info if needed */ | ||
353 | if (info_dirty(&dqopt->info[dquot->dq_type])) | ||
354 | ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); | ||
355 | if (ret < 0) | ||
356 | goto out_iolock; | ||
357 | if (ret2 < 0) { | ||
358 | ret = ret2; | ||
359 | goto out_iolock; | ||
360 | } | ||
361 | } | ||
362 | set_bit(DQ_ACTIVE_B, &dquot->dq_flags); | ||
363 | out_iolock: | ||
364 | mutex_unlock(&dqopt->dqio_mutex); | ||
365 | mutex_unlock(&dquot->dq_lock); | ||
366 | return ret; | ||
367 | } | ||
368 | EXPORT_SYMBOL(dquot_acquire); | ||
369 | |||
370 | /* | ||
371 | * Write dquot to disk | ||
372 | */ | ||
373 | int dquot_commit(struct dquot *dquot) | ||
374 | { | ||
375 | int ret = 0, ret2 = 0; | ||
376 | struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); | ||
377 | |||
378 | mutex_lock(&dqopt->dqio_mutex); | ||
379 | spin_lock(&dq_list_lock); | ||
380 | if (!clear_dquot_dirty(dquot)) { | ||
381 | spin_unlock(&dq_list_lock); | ||
382 | goto out_sem; | ||
383 | } | ||
384 | spin_unlock(&dq_list_lock); | ||
385 | /* Inactive dquot can be only if there was error during read/init | ||
386 | * => we have better not writing it */ | ||
387 | if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { | ||
388 | ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); | ||
389 | if (info_dirty(&dqopt->info[dquot->dq_type])) | ||
390 | ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); | ||
391 | if (ret >= 0) | ||
392 | ret = ret2; | ||
393 | } | ||
394 | out_sem: | ||
395 | mutex_unlock(&dqopt->dqio_mutex); | ||
396 | return ret; | ||
397 | } | ||
398 | EXPORT_SYMBOL(dquot_commit); | ||
399 | |||
400 | /* | ||
401 | * Release dquot | ||
402 | */ | ||
403 | int dquot_release(struct dquot *dquot) | ||
404 | { | ||
405 | int ret = 0, ret2 = 0; | ||
406 | struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); | ||
407 | |||
408 | mutex_lock(&dquot->dq_lock); | ||
409 | /* Check whether we are not racing with some other dqget() */ | ||
410 | if (atomic_read(&dquot->dq_count) > 1) | ||
411 | goto out_dqlock; | ||
412 | mutex_lock(&dqopt->dqio_mutex); | ||
413 | if (dqopt->ops[dquot->dq_type]->release_dqblk) { | ||
414 | ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot); | ||
415 | /* Write the info */ | ||
416 | if (info_dirty(&dqopt->info[dquot->dq_type])) | ||
417 | ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type); | ||
418 | if (ret >= 0) | ||
419 | ret = ret2; | ||
420 | } | ||
421 | clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); | ||
422 | mutex_unlock(&dqopt->dqio_mutex); | ||
423 | out_dqlock: | ||
424 | mutex_unlock(&dquot->dq_lock); | ||
425 | return ret; | ||
426 | } | ||
427 | EXPORT_SYMBOL(dquot_release); | ||
428 | |||
429 | void dquot_destroy(struct dquot *dquot) | ||
430 | { | ||
431 | kmem_cache_free(dquot_cachep, dquot); | ||
432 | } | ||
433 | EXPORT_SYMBOL(dquot_destroy); | ||
434 | |||
435 | static inline void do_destroy_dquot(struct dquot *dquot) | ||
436 | { | ||
437 | dquot->dq_sb->dq_op->destroy_dquot(dquot); | ||
438 | } | ||
439 | |||
440 | /* Invalidate all dquots on the list. Note that this function is called after | ||
441 | * quota is disabled and pointers from inodes removed so there cannot be new | ||
442 | * quota users. There can still be some users of quotas due to inodes being | ||
443 | * just deleted or pruned by prune_icache() (those are not attached to any | ||
444 | * list) or parallel quotactl call. We have to wait for such users. | ||
445 | */ | ||
446 | static void invalidate_dquots(struct super_block *sb, int type) | ||
447 | { | ||
448 | struct dquot *dquot, *tmp; | ||
449 | |||
450 | restart: | ||
451 | spin_lock(&dq_list_lock); | ||
452 | list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) { | ||
453 | if (dquot->dq_sb != sb) | ||
454 | continue; | ||
455 | if (dquot->dq_type != type) | ||
456 | continue; | ||
457 | /* Wait for dquot users */ | ||
458 | if (atomic_read(&dquot->dq_count)) { | ||
459 | DEFINE_WAIT(wait); | ||
460 | |||
461 | atomic_inc(&dquot->dq_count); | ||
462 | prepare_to_wait(&dquot->dq_wait_unused, &wait, | ||
463 | TASK_UNINTERRUPTIBLE); | ||
464 | spin_unlock(&dq_list_lock); | ||
465 | /* Once dqput() wakes us up, we know it's time to free | ||
466 | * the dquot. | ||
467 | * IMPORTANT: we rely on the fact that there is always | ||
468 | * at most one process waiting for dquot to free. | ||
469 | * Otherwise dq_count would be > 1 and we would never | ||
470 | * wake up. | ||
471 | */ | ||
472 | if (atomic_read(&dquot->dq_count) > 1) | ||
473 | schedule(); | ||
474 | finish_wait(&dquot->dq_wait_unused, &wait); | ||
475 | dqput(dquot); | ||
476 | /* At this moment dquot() need not exist (it could be | ||
477 | * reclaimed by prune_dqcache(). Hence we must | ||
478 | * restart. */ | ||
479 | goto restart; | ||
480 | } | ||
481 | /* | ||
482 | * Quota now has no users and it has been written on last | ||
483 | * dqput() | ||
484 | */ | ||
485 | remove_dquot_hash(dquot); | ||
486 | remove_free_dquot(dquot); | ||
487 | remove_inuse(dquot); | ||
488 | do_destroy_dquot(dquot); | ||
489 | } | ||
490 | spin_unlock(&dq_list_lock); | ||
491 | } | ||
492 | |||
493 | /* Call callback for every active dquot on given filesystem */ | ||
494 | int dquot_scan_active(struct super_block *sb, | ||
495 | int (*fn)(struct dquot *dquot, unsigned long priv), | ||
496 | unsigned long priv) | ||
497 | { | ||
498 | struct dquot *dquot, *old_dquot = NULL; | ||
499 | int ret = 0; | ||
500 | |||
501 | mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); | ||
502 | spin_lock(&dq_list_lock); | ||
503 | list_for_each_entry(dquot, &inuse_list, dq_inuse) { | ||
504 | if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) | ||
505 | continue; | ||
506 | if (dquot->dq_sb != sb) | ||
507 | continue; | ||
508 | /* Now we have active dquot so we can just increase use count */ | ||
509 | atomic_inc(&dquot->dq_count); | ||
510 | dqstats.lookups++; | ||
511 | spin_unlock(&dq_list_lock); | ||
512 | dqput(old_dquot); | ||
513 | old_dquot = dquot; | ||
514 | ret = fn(dquot, priv); | ||
515 | if (ret < 0) | ||
516 | goto out; | ||
517 | spin_lock(&dq_list_lock); | ||
518 | /* We are safe to continue now because our dquot could not | ||
519 | * be moved out of the inuse list while we hold the reference */ | ||
520 | } | ||
521 | spin_unlock(&dq_list_lock); | ||
522 | out: | ||
523 | dqput(old_dquot); | ||
524 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); | ||
525 | return ret; | ||
526 | } | ||
527 | EXPORT_SYMBOL(dquot_scan_active); | ||
528 | |||
529 | int vfs_quota_sync(struct super_block *sb, int type) | ||
530 | { | ||
531 | struct list_head *dirty; | ||
532 | struct dquot *dquot; | ||
533 | struct quota_info *dqopt = sb_dqopt(sb); | ||
534 | int cnt; | ||
535 | |||
536 | mutex_lock(&dqopt->dqonoff_mutex); | ||
537 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
538 | if (type != -1 && cnt != type) | ||
539 | continue; | ||
540 | if (!sb_has_quota_active(sb, cnt)) | ||
541 | continue; | ||
542 | spin_lock(&dq_list_lock); | ||
543 | dirty = &dqopt->info[cnt].dqi_dirty_list; | ||
544 | while (!list_empty(dirty)) { | ||
545 | dquot = list_first_entry(dirty, struct dquot, dq_dirty); | ||
546 | /* Dirty and inactive can be only bad dquot... */ | ||
547 | if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { | ||
548 | clear_dquot_dirty(dquot); | ||
549 | continue; | ||
550 | } | ||
551 | /* Now we have active dquot from which someone is | ||
552 | * holding reference so we can safely just increase | ||
553 | * use count */ | ||
554 | atomic_inc(&dquot->dq_count); | ||
555 | dqstats.lookups++; | ||
556 | spin_unlock(&dq_list_lock); | ||
557 | sb->dq_op->write_dquot(dquot); | ||
558 | dqput(dquot); | ||
559 | spin_lock(&dq_list_lock); | ||
560 | } | ||
561 | spin_unlock(&dq_list_lock); | ||
562 | } | ||
563 | |||
564 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
565 | if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt) | ||
566 | && info_dirty(&dqopt->info[cnt])) | ||
567 | sb->dq_op->write_info(sb, cnt); | ||
568 | spin_lock(&dq_list_lock); | ||
569 | dqstats.syncs++; | ||
570 | spin_unlock(&dq_list_lock); | ||
571 | mutex_unlock(&dqopt->dqonoff_mutex); | ||
572 | |||
573 | return 0; | ||
574 | } | ||
575 | EXPORT_SYMBOL(vfs_quota_sync); | ||
576 | |||
577 | /* Free unused dquots from cache */ | ||
578 | static void prune_dqcache(int count) | ||
579 | { | ||
580 | struct list_head *head; | ||
581 | struct dquot *dquot; | ||
582 | |||
583 | head = free_dquots.prev; | ||
584 | while (head != &free_dquots && count) { | ||
585 | dquot = list_entry(head, struct dquot, dq_free); | ||
586 | remove_dquot_hash(dquot); | ||
587 | remove_free_dquot(dquot); | ||
588 | remove_inuse(dquot); | ||
589 | do_destroy_dquot(dquot); | ||
590 | count--; | ||
591 | head = free_dquots.prev; | ||
592 | } | ||
593 | } | ||
594 | |||
595 | /* | ||
596 | * This is called from kswapd when we think we need some | ||
597 | * more memory | ||
598 | */ | ||
599 | |||
600 | static int shrink_dqcache_memory(int nr, gfp_t gfp_mask) | ||
601 | { | ||
602 | if (nr) { | ||
603 | spin_lock(&dq_list_lock); | ||
604 | prune_dqcache(nr); | ||
605 | spin_unlock(&dq_list_lock); | ||
606 | } | ||
607 | return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure; | ||
608 | } | ||
609 | |||
610 | static struct shrinker dqcache_shrinker = { | ||
611 | .shrink = shrink_dqcache_memory, | ||
612 | .seeks = DEFAULT_SEEKS, | ||
613 | }; | ||
614 | |||
615 | /* | ||
616 | * Put reference to dquot | ||
617 | * NOTE: If you change this function please check whether dqput_blocks() works right... | ||
618 | */ | ||
619 | void dqput(struct dquot *dquot) | ||
620 | { | ||
621 | int ret; | ||
622 | |||
623 | if (!dquot) | ||
624 | return; | ||
625 | #ifdef __DQUOT_PARANOIA | ||
626 | if (!atomic_read(&dquot->dq_count)) { | ||
627 | printk("VFS: dqput: trying to free free dquot\n"); | ||
628 | printk("VFS: device %s, dquot of %s %d\n", | ||
629 | dquot->dq_sb->s_id, | ||
630 | quotatypes[dquot->dq_type], | ||
631 | dquot->dq_id); | ||
632 | BUG(); | ||
633 | } | ||
634 | #endif | ||
635 | |||
636 | spin_lock(&dq_list_lock); | ||
637 | dqstats.drops++; | ||
638 | spin_unlock(&dq_list_lock); | ||
639 | we_slept: | ||
640 | spin_lock(&dq_list_lock); | ||
641 | if (atomic_read(&dquot->dq_count) > 1) { | ||
642 | /* We have more than one user... nothing to do */ | ||
643 | atomic_dec(&dquot->dq_count); | ||
644 | /* Releasing dquot during quotaoff phase? */ | ||
645 | if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_type) && | ||
646 | atomic_read(&dquot->dq_count) == 1) | ||
647 | wake_up(&dquot->dq_wait_unused); | ||
648 | spin_unlock(&dq_list_lock); | ||
649 | return; | ||
650 | } | ||
651 | /* Need to release dquot? */ | ||
652 | if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) { | ||
653 | spin_unlock(&dq_list_lock); | ||
654 | /* Commit dquot before releasing */ | ||
655 | ret = dquot->dq_sb->dq_op->write_dquot(dquot); | ||
656 | if (ret < 0) { | ||
657 | printk(KERN_ERR "VFS: cannot write quota structure on " | ||
658 | "device %s (error %d). Quota may get out of " | ||
659 | "sync!\n", dquot->dq_sb->s_id, ret); | ||
660 | /* | ||
661 | * We clear dirty bit anyway, so that we avoid | ||
662 | * infinite loop here | ||
663 | */ | ||
664 | spin_lock(&dq_list_lock); | ||
665 | clear_dquot_dirty(dquot); | ||
666 | spin_unlock(&dq_list_lock); | ||
667 | } | ||
668 | goto we_slept; | ||
669 | } | ||
670 | /* Clear flag in case dquot was inactive (something bad happened) */ | ||
671 | clear_dquot_dirty(dquot); | ||
672 | if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { | ||
673 | spin_unlock(&dq_list_lock); | ||
674 | dquot->dq_sb->dq_op->release_dquot(dquot); | ||
675 | goto we_slept; | ||
676 | } | ||
677 | atomic_dec(&dquot->dq_count); | ||
678 | #ifdef __DQUOT_PARANOIA | ||
679 | /* sanity check */ | ||
680 | BUG_ON(!list_empty(&dquot->dq_free)); | ||
681 | #endif | ||
682 | put_dquot_last(dquot); | ||
683 | spin_unlock(&dq_list_lock); | ||
684 | } | ||
685 | EXPORT_SYMBOL(dqput); | ||
686 | |||
687 | struct dquot *dquot_alloc(struct super_block *sb, int type) | ||
688 | { | ||
689 | return kmem_cache_zalloc(dquot_cachep, GFP_NOFS); | ||
690 | } | ||
691 | EXPORT_SYMBOL(dquot_alloc); | ||
692 | |||
693 | static struct dquot *get_empty_dquot(struct super_block *sb, int type) | ||
694 | { | ||
695 | struct dquot *dquot; | ||
696 | |||
697 | dquot = sb->dq_op->alloc_dquot(sb, type); | ||
698 | if(!dquot) | ||
699 | return NODQUOT; | ||
700 | |||
701 | mutex_init(&dquot->dq_lock); | ||
702 | INIT_LIST_HEAD(&dquot->dq_free); | ||
703 | INIT_LIST_HEAD(&dquot->dq_inuse); | ||
704 | INIT_HLIST_NODE(&dquot->dq_hash); | ||
705 | INIT_LIST_HEAD(&dquot->dq_dirty); | ||
706 | init_waitqueue_head(&dquot->dq_wait_unused); | ||
707 | dquot->dq_sb = sb; | ||
708 | dquot->dq_type = type; | ||
709 | atomic_set(&dquot->dq_count, 1); | ||
710 | |||
711 | return dquot; | ||
712 | } | ||
713 | |||
714 | /* | ||
715 | * Get reference to dquot | ||
716 | * | ||
717 | * Locking is slightly tricky here. We are guarded from parallel quotaoff() | ||
718 | * destroying our dquot by: | ||
719 | * a) checking for quota flags under dq_list_lock and | ||
720 | * b) getting a reference to dquot before we release dq_list_lock | ||
721 | */ | ||
722 | struct dquot *dqget(struct super_block *sb, unsigned int id, int type) | ||
723 | { | ||
724 | unsigned int hashent = hashfn(sb, id, type); | ||
725 | struct dquot *dquot = NODQUOT, *empty = NODQUOT; | ||
726 | |||
727 | if (!sb_has_quota_active(sb, type)) | ||
728 | return NODQUOT; | ||
729 | we_slept: | ||
730 | spin_lock(&dq_list_lock); | ||
731 | spin_lock(&dq_state_lock); | ||
732 | if (!sb_has_quota_active(sb, type)) { | ||
733 | spin_unlock(&dq_state_lock); | ||
734 | spin_unlock(&dq_list_lock); | ||
735 | goto out; | ||
736 | } | ||
737 | spin_unlock(&dq_state_lock); | ||
738 | |||
739 | if ((dquot = find_dquot(hashent, sb, id, type)) == NODQUOT) { | ||
740 | if (empty == NODQUOT) { | ||
741 | spin_unlock(&dq_list_lock); | ||
742 | if ((empty = get_empty_dquot(sb, type)) == NODQUOT) | ||
743 | schedule(); /* Try to wait for a moment... */ | ||
744 | goto we_slept; | ||
745 | } | ||
746 | dquot = empty; | ||
747 | empty = NODQUOT; | ||
748 | dquot->dq_id = id; | ||
749 | /* all dquots go on the inuse_list */ | ||
750 | put_inuse(dquot); | ||
751 | /* hash it first so it can be found */ | ||
752 | insert_dquot_hash(dquot); | ||
753 | dqstats.lookups++; | ||
754 | spin_unlock(&dq_list_lock); | ||
755 | } else { | ||
756 | if (!atomic_read(&dquot->dq_count)) | ||
757 | remove_free_dquot(dquot); | ||
758 | atomic_inc(&dquot->dq_count); | ||
759 | dqstats.cache_hits++; | ||
760 | dqstats.lookups++; | ||
761 | spin_unlock(&dq_list_lock); | ||
762 | } | ||
763 | /* Wait for dq_lock - after this we know that either dquot_release() is already | ||
764 | * finished or it will be canceled due to dq_count > 1 test */ | ||
765 | wait_on_dquot(dquot); | ||
766 | /* Read the dquot and instantiate it (everything done only if needed) */ | ||
767 | if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && sb->dq_op->acquire_dquot(dquot) < 0) { | ||
768 | dqput(dquot); | ||
769 | dquot = NODQUOT; | ||
770 | goto out; | ||
771 | } | ||
772 | #ifdef __DQUOT_PARANOIA | ||
773 | BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */ | ||
774 | #endif | ||
775 | out: | ||
776 | if (empty) | ||
777 | do_destroy_dquot(empty); | ||
778 | |||
779 | return dquot; | ||
780 | } | ||
781 | EXPORT_SYMBOL(dqget); | ||
782 | |||
783 | static int dqinit_needed(struct inode *inode, int type) | ||
784 | { | ||
785 | int cnt; | ||
786 | |||
787 | if (IS_NOQUOTA(inode)) | ||
788 | return 0; | ||
789 | if (type != -1) | ||
790 | return inode->i_dquot[type] == NODQUOT; | ||
791 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
792 | if (inode->i_dquot[cnt] == NODQUOT) | ||
793 | return 1; | ||
794 | return 0; | ||
795 | } | ||
796 | |||
797 | /* This routine is guarded by dqonoff_mutex mutex */ | ||
798 | static void add_dquot_ref(struct super_block *sb, int type) | ||
799 | { | ||
800 | struct inode *inode, *old_inode = NULL; | ||
801 | |||
802 | spin_lock(&inode_lock); | ||
803 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { | ||
804 | if (!atomic_read(&inode->i_writecount)) | ||
805 | continue; | ||
806 | if (!dqinit_needed(inode, type)) | ||
807 | continue; | ||
808 | if (inode->i_state & (I_FREEING|I_WILL_FREE)) | ||
809 | continue; | ||
810 | |||
811 | __iget(inode); | ||
812 | spin_unlock(&inode_lock); | ||
813 | |||
814 | iput(old_inode); | ||
815 | sb->dq_op->initialize(inode, type); | ||
816 | /* We hold a reference to 'inode' so it couldn't have been | ||
817 | * removed from s_inodes list while we dropped the inode_lock. | ||
818 | * We cannot iput the inode now as we can be holding the last | ||
819 | * reference and we cannot iput it under inode_lock. So we | ||
820 | * keep the reference and iput it later. */ | ||
821 | old_inode = inode; | ||
822 | spin_lock(&inode_lock); | ||
823 | } | ||
824 | spin_unlock(&inode_lock); | ||
825 | iput(old_inode); | ||
826 | } | ||
827 | |||
828 | /* Return 0 if dqput() won't block (note that 1 doesn't necessarily mean blocking) */ | ||
829 | static inline int dqput_blocks(struct dquot *dquot) | ||
830 | { | ||
831 | if (atomic_read(&dquot->dq_count) <= 1) | ||
832 | return 1; | ||
833 | return 0; | ||
834 | } | ||
835 | |||
836 | /* Remove references to dquots from inode - add dquot to list for freeing if needed */ | ||
837 | /* We can't race with anybody because we hold dqptr_sem for writing... */ | ||
838 | static int remove_inode_dquot_ref(struct inode *inode, int type, | ||
839 | struct list_head *tofree_head) | ||
840 | { | ||
841 | struct dquot *dquot = inode->i_dquot[type]; | ||
842 | |||
843 | inode->i_dquot[type] = NODQUOT; | ||
844 | if (dquot != NODQUOT) { | ||
845 | if (dqput_blocks(dquot)) { | ||
846 | #ifdef __DQUOT_PARANOIA | ||
847 | if (atomic_read(&dquot->dq_count) != 1) | ||
848 | printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count)); | ||
849 | #endif | ||
850 | spin_lock(&dq_list_lock); | ||
851 | list_add(&dquot->dq_free, tofree_head); /* As dquot must have currently users it can't be on the free list... */ | ||
852 | spin_unlock(&dq_list_lock); | ||
853 | return 1; | ||
854 | } | ||
855 | else | ||
856 | dqput(dquot); /* We have guaranteed we won't block */ | ||
857 | } | ||
858 | return 0; | ||
859 | } | ||
860 | |||
861 | /* Free list of dquots - called from inode.c */ | ||
862 | /* dquots are removed from inodes, no new references can be got so we are the only ones holding reference */ | ||
863 | static void put_dquot_list(struct list_head *tofree_head) | ||
864 | { | ||
865 | struct list_head *act_head; | ||
866 | struct dquot *dquot; | ||
867 | |||
868 | act_head = tofree_head->next; | ||
869 | /* So now we have dquots on the list... Just free them */ | ||
870 | while (act_head != tofree_head) { | ||
871 | dquot = list_entry(act_head, struct dquot, dq_free); | ||
872 | act_head = act_head->next; | ||
873 | list_del_init(&dquot->dq_free); /* Remove dquot from the list so we won't have problems... */ | ||
874 | dqput(dquot); | ||
875 | } | ||
876 | } | ||
877 | |||
878 | static void remove_dquot_ref(struct super_block *sb, int type, | ||
879 | struct list_head *tofree_head) | ||
880 | { | ||
881 | struct inode *inode; | ||
882 | |||
883 | spin_lock(&inode_lock); | ||
884 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { | ||
885 | if (!IS_NOQUOTA(inode)) | ||
886 | remove_inode_dquot_ref(inode, type, tofree_head); | ||
887 | } | ||
888 | spin_unlock(&inode_lock); | ||
889 | } | ||
890 | |||
891 | /* Gather all references from inodes and drop them */ | ||
892 | static void drop_dquot_ref(struct super_block *sb, int type) | ||
893 | { | ||
894 | LIST_HEAD(tofree_head); | ||
895 | |||
896 | if (sb->dq_op) { | ||
897 | down_write(&sb_dqopt(sb)->dqptr_sem); | ||
898 | remove_dquot_ref(sb, type, &tofree_head); | ||
899 | up_write(&sb_dqopt(sb)->dqptr_sem); | ||
900 | put_dquot_list(&tofree_head); | ||
901 | } | ||
902 | } | ||
903 | |||
904 | static inline void dquot_incr_inodes(struct dquot *dquot, qsize_t number) | ||
905 | { | ||
906 | dquot->dq_dqb.dqb_curinodes += number; | ||
907 | } | ||
908 | |||
909 | static inline void dquot_incr_space(struct dquot *dquot, qsize_t number) | ||
910 | { | ||
911 | dquot->dq_dqb.dqb_curspace += number; | ||
912 | } | ||
913 | |||
914 | static inline void dquot_resv_space(struct dquot *dquot, qsize_t number) | ||
915 | { | ||
916 | dquot->dq_dqb.dqb_rsvspace += number; | ||
917 | } | ||
918 | |||
919 | /* | ||
920 | * Claim reserved quota space | ||
921 | */ | ||
922 | static void dquot_claim_reserved_space(struct dquot *dquot, | ||
923 | qsize_t number) | ||
924 | { | ||
925 | WARN_ON(dquot->dq_dqb.dqb_rsvspace < number); | ||
926 | dquot->dq_dqb.dqb_curspace += number; | ||
927 | dquot->dq_dqb.dqb_rsvspace -= number; | ||
928 | } | ||
929 | |||
930 | static inline | ||
931 | void dquot_free_reserved_space(struct dquot *dquot, qsize_t number) | ||
932 | { | ||
933 | dquot->dq_dqb.dqb_rsvspace -= number; | ||
934 | } | ||
935 | |||
936 | static inline void dquot_decr_inodes(struct dquot *dquot, qsize_t number) | ||
937 | { | ||
938 | if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || | ||
939 | dquot->dq_dqb.dqb_curinodes >= number) | ||
940 | dquot->dq_dqb.dqb_curinodes -= number; | ||
941 | else | ||
942 | dquot->dq_dqb.dqb_curinodes = 0; | ||
943 | if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit) | ||
944 | dquot->dq_dqb.dqb_itime = (time_t) 0; | ||
945 | clear_bit(DQ_INODES_B, &dquot->dq_flags); | ||
946 | } | ||
947 | |||
948 | static inline void dquot_decr_space(struct dquot *dquot, qsize_t number) | ||
949 | { | ||
950 | if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || | ||
951 | dquot->dq_dqb.dqb_curspace >= number) | ||
952 | dquot->dq_dqb.dqb_curspace -= number; | ||
953 | else | ||
954 | dquot->dq_dqb.dqb_curspace = 0; | ||
955 | if (dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit) | ||
956 | dquot->dq_dqb.dqb_btime = (time_t) 0; | ||
957 | clear_bit(DQ_BLKS_B, &dquot->dq_flags); | ||
958 | } | ||
959 | |||
960 | static int warning_issued(struct dquot *dquot, const int warntype) | ||
961 | { | ||
962 | int flag = (warntype == QUOTA_NL_BHARDWARN || | ||
963 | warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B : | ||
964 | ((warntype == QUOTA_NL_IHARDWARN || | ||
965 | warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0); | ||
966 | |||
967 | if (!flag) | ||
968 | return 0; | ||
969 | return test_and_set_bit(flag, &dquot->dq_flags); | ||
970 | } | ||
971 | |||
972 | #ifdef CONFIG_PRINT_QUOTA_WARNING | ||
973 | static int flag_print_warnings = 1; | ||
974 | |||
975 | static inline int need_print_warning(struct dquot *dquot) | ||
976 | { | ||
977 | if (!flag_print_warnings) | ||
978 | return 0; | ||
979 | |||
980 | switch (dquot->dq_type) { | ||
981 | case USRQUOTA: | ||
982 | return current_fsuid() == dquot->dq_id; | ||
983 | case GRPQUOTA: | ||
984 | return in_group_p(dquot->dq_id); | ||
985 | } | ||
986 | return 0; | ||
987 | } | ||
988 | |||
989 | /* Print warning to user which exceeded quota */ | ||
990 | static void print_warning(struct dquot *dquot, const int warntype) | ||
991 | { | ||
992 | char *msg = NULL; | ||
993 | struct tty_struct *tty; | ||
994 | |||
995 | if (warntype == QUOTA_NL_IHARDBELOW || | ||
996 | warntype == QUOTA_NL_ISOFTBELOW || | ||
997 | warntype == QUOTA_NL_BHARDBELOW || | ||
998 | warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(dquot)) | ||
999 | return; | ||
1000 | |||
1001 | tty = get_current_tty(); | ||
1002 | if (!tty) | ||
1003 | return; | ||
1004 | tty_write_message(tty, dquot->dq_sb->s_id); | ||
1005 | if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN) | ||
1006 | tty_write_message(tty, ": warning, "); | ||
1007 | else | ||
1008 | tty_write_message(tty, ": write failed, "); | ||
1009 | tty_write_message(tty, quotatypes[dquot->dq_type]); | ||
1010 | switch (warntype) { | ||
1011 | case QUOTA_NL_IHARDWARN: | ||
1012 | msg = " file limit reached.\r\n"; | ||
1013 | break; | ||
1014 | case QUOTA_NL_ISOFTLONGWARN: | ||
1015 | msg = " file quota exceeded too long.\r\n"; | ||
1016 | break; | ||
1017 | case QUOTA_NL_ISOFTWARN: | ||
1018 | msg = " file quota exceeded.\r\n"; | ||
1019 | break; | ||
1020 | case QUOTA_NL_BHARDWARN: | ||
1021 | msg = " block limit reached.\r\n"; | ||
1022 | break; | ||
1023 | case QUOTA_NL_BSOFTLONGWARN: | ||
1024 | msg = " block quota exceeded too long.\r\n"; | ||
1025 | break; | ||
1026 | case QUOTA_NL_BSOFTWARN: | ||
1027 | msg = " block quota exceeded.\r\n"; | ||
1028 | break; | ||
1029 | } | ||
1030 | tty_write_message(tty, msg); | ||
1031 | tty_kref_put(tty); | ||
1032 | } | ||
1033 | #endif | ||
1034 | |||
1035 | #ifdef CONFIG_QUOTA_NETLINK_INTERFACE | ||
1036 | |||
1037 | /* Netlink family structure for quota */ | ||
1038 | static struct genl_family quota_genl_family = { | ||
1039 | .id = GENL_ID_GENERATE, | ||
1040 | .hdrsize = 0, | ||
1041 | .name = "VFS_DQUOT", | ||
1042 | .version = 1, | ||
1043 | .maxattr = QUOTA_NL_A_MAX, | ||
1044 | }; | ||
1045 | |||
1046 | /* Send warning to userspace about user which exceeded quota */ | ||
1047 | static void send_warning(const struct dquot *dquot, const char warntype) | ||
1048 | { | ||
1049 | static atomic_t seq; | ||
1050 | struct sk_buff *skb; | ||
1051 | void *msg_head; | ||
1052 | int ret; | ||
1053 | int msg_size = 4 * nla_total_size(sizeof(u32)) + | ||
1054 | 2 * nla_total_size(sizeof(u64)); | ||
1055 | |||
1056 | /* We have to allocate using GFP_NOFS as we are called from a | ||
1057 | * filesystem performing write and thus further recursion into | ||
1058 | * the fs to free some data could cause deadlocks. */ | ||
1059 | skb = genlmsg_new(msg_size, GFP_NOFS); | ||
1060 | if (!skb) { | ||
1061 | printk(KERN_ERR | ||
1062 | "VFS: Not enough memory to send quota warning.\n"); | ||
1063 | return; | ||
1064 | } | ||
1065 | msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq), | ||
1066 | "a_genl_family, 0, QUOTA_NL_C_WARNING); | ||
1067 | if (!msg_head) { | ||
1068 | printk(KERN_ERR | ||
1069 | "VFS: Cannot store netlink header in quota warning.\n"); | ||
1070 | goto err_out; | ||
1071 | } | ||
1072 | ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, dquot->dq_type); | ||
1073 | if (ret) | ||
1074 | goto attr_err_out; | ||
1075 | ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, dquot->dq_id); | ||
1076 | if (ret) | ||
1077 | goto attr_err_out; | ||
1078 | ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype); | ||
1079 | if (ret) | ||
1080 | goto attr_err_out; | ||
1081 | ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, | ||
1082 | MAJOR(dquot->dq_sb->s_dev)); | ||
1083 | if (ret) | ||
1084 | goto attr_err_out; | ||
1085 | ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, | ||
1086 | MINOR(dquot->dq_sb->s_dev)); | ||
1087 | if (ret) | ||
1088 | goto attr_err_out; | ||
1089 | ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid()); | ||
1090 | if (ret) | ||
1091 | goto attr_err_out; | ||
1092 | genlmsg_end(skb, msg_head); | ||
1093 | |||
1094 | ret = genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS); | ||
1095 | if (ret < 0 && ret != -ESRCH) | ||
1096 | printk(KERN_ERR | ||
1097 | "VFS: Failed to send notification message: %d\n", ret); | ||
1098 | return; | ||
1099 | attr_err_out: | ||
1100 | printk(KERN_ERR "VFS: Not enough space to compose quota message!\n"); | ||
1101 | err_out: | ||
1102 | kfree_skb(skb); | ||
1103 | } | ||
1104 | #endif | ||
1105 | /* | ||
1106 | * Write warnings to the console and send warning messages over netlink. | ||
1107 | * | ||
1108 | * Note that this function can sleep. | ||
1109 | */ | ||
1110 | static inline void flush_warnings(struct dquot * const *dquots, char *warntype) | ||
1111 | { | ||
1112 | int i; | ||
1113 | |||
1114 | for (i = 0; i < MAXQUOTAS; i++) | ||
1115 | if (dquots[i] != NODQUOT && warntype[i] != QUOTA_NL_NOWARN && | ||
1116 | !warning_issued(dquots[i], warntype[i])) { | ||
1117 | #ifdef CONFIG_PRINT_QUOTA_WARNING | ||
1118 | print_warning(dquots[i], warntype[i]); | ||
1119 | #endif | ||
1120 | #ifdef CONFIG_QUOTA_NETLINK_INTERFACE | ||
1121 | send_warning(dquots[i], warntype[i]); | ||
1122 | #endif | ||
1123 | } | ||
1124 | } | ||
1125 | |||
1126 | static inline char ignore_hardlimit(struct dquot *dquot) | ||
1127 | { | ||
1128 | struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type]; | ||
1129 | |||
1130 | return capable(CAP_SYS_RESOURCE) && | ||
1131 | (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || !(info->dqi_flags & V1_DQF_RSQUASH)); | ||
1132 | } | ||
1133 | |||
1134 | /* needs dq_data_lock */ | ||
1135 | static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype) | ||
1136 | { | ||
1137 | *warntype = QUOTA_NL_NOWARN; | ||
1138 | if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || | ||
1139 | test_bit(DQ_FAKE_B, &dquot->dq_flags)) | ||
1140 | return QUOTA_OK; | ||
1141 | |||
1142 | if (dquot->dq_dqb.dqb_ihardlimit && | ||
1143 | (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_ihardlimit && | ||
1144 | !ignore_hardlimit(dquot)) { | ||
1145 | *warntype = QUOTA_NL_IHARDWARN; | ||
1146 | return NO_QUOTA; | ||
1147 | } | ||
1148 | |||
1149 | if (dquot->dq_dqb.dqb_isoftlimit && | ||
1150 | (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit && | ||
1151 | dquot->dq_dqb.dqb_itime && get_seconds() >= dquot->dq_dqb.dqb_itime && | ||
1152 | !ignore_hardlimit(dquot)) { | ||
1153 | *warntype = QUOTA_NL_ISOFTLONGWARN; | ||
1154 | return NO_QUOTA; | ||
1155 | } | ||
1156 | |||
1157 | if (dquot->dq_dqb.dqb_isoftlimit && | ||
1158 | (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit && | ||
1159 | dquot->dq_dqb.dqb_itime == 0) { | ||
1160 | *warntype = QUOTA_NL_ISOFTWARN; | ||
1161 | dquot->dq_dqb.dqb_itime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace; | ||
1162 | } | ||
1163 | |||
1164 | return QUOTA_OK; | ||
1165 | } | ||
1166 | |||
1167 | /* needs dq_data_lock */ | ||
1168 | static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype) | ||
1169 | { | ||
1170 | qsize_t tspace; | ||
1171 | |||
1172 | *warntype = QUOTA_NL_NOWARN; | ||
1173 | if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || | ||
1174 | test_bit(DQ_FAKE_B, &dquot->dq_flags)) | ||
1175 | return QUOTA_OK; | ||
1176 | |||
1177 | tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace | ||
1178 | + space; | ||
1179 | |||
1180 | if (dquot->dq_dqb.dqb_bhardlimit && | ||
1181 | tspace > dquot->dq_dqb.dqb_bhardlimit && | ||
1182 | !ignore_hardlimit(dquot)) { | ||
1183 | if (!prealloc) | ||
1184 | *warntype = QUOTA_NL_BHARDWARN; | ||
1185 | return NO_QUOTA; | ||
1186 | } | ||
1187 | |||
1188 | if (dquot->dq_dqb.dqb_bsoftlimit && | ||
1189 | tspace > dquot->dq_dqb.dqb_bsoftlimit && | ||
1190 | dquot->dq_dqb.dqb_btime && get_seconds() >= dquot->dq_dqb.dqb_btime && | ||
1191 | !ignore_hardlimit(dquot)) { | ||
1192 | if (!prealloc) | ||
1193 | *warntype = QUOTA_NL_BSOFTLONGWARN; | ||
1194 | return NO_QUOTA; | ||
1195 | } | ||
1196 | |||
1197 | if (dquot->dq_dqb.dqb_bsoftlimit && | ||
1198 | tspace > dquot->dq_dqb.dqb_bsoftlimit && | ||
1199 | dquot->dq_dqb.dqb_btime == 0) { | ||
1200 | if (!prealloc) { | ||
1201 | *warntype = QUOTA_NL_BSOFTWARN; | ||
1202 | dquot->dq_dqb.dqb_btime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_bgrace; | ||
1203 | } | ||
1204 | else | ||
1205 | /* | ||
1206 | * We don't allow preallocation to exceed softlimit so exceeding will | ||
1207 | * be always printed | ||
1208 | */ | ||
1209 | return NO_QUOTA; | ||
1210 | } | ||
1211 | |||
1212 | return QUOTA_OK; | ||
1213 | } | ||
1214 | |||
1215 | static int info_idq_free(struct dquot *dquot, qsize_t inodes) | ||
1216 | { | ||
1217 | if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || | ||
1218 | dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit || | ||
1219 | !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type)) | ||
1220 | return QUOTA_NL_NOWARN; | ||
1221 | |||
1222 | if (dquot->dq_dqb.dqb_curinodes - inodes <= dquot->dq_dqb.dqb_isoftlimit) | ||
1223 | return QUOTA_NL_ISOFTBELOW; | ||
1224 | if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit && | ||
1225 | dquot->dq_dqb.dqb_curinodes - inodes < dquot->dq_dqb.dqb_ihardlimit) | ||
1226 | return QUOTA_NL_IHARDBELOW; | ||
1227 | return QUOTA_NL_NOWARN; | ||
1228 | } | ||
1229 | |||
1230 | static int info_bdq_free(struct dquot *dquot, qsize_t space) | ||
1231 | { | ||
1232 | if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || | ||
1233 | dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit) | ||
1234 | return QUOTA_NL_NOWARN; | ||
1235 | |||
1236 | if (dquot->dq_dqb.dqb_curspace - space <= dquot->dq_dqb.dqb_bsoftlimit) | ||
1237 | return QUOTA_NL_BSOFTBELOW; | ||
1238 | if (dquot->dq_dqb.dqb_curspace >= dquot->dq_dqb.dqb_bhardlimit && | ||
1239 | dquot->dq_dqb.dqb_curspace - space < dquot->dq_dqb.dqb_bhardlimit) | ||
1240 | return QUOTA_NL_BHARDBELOW; | ||
1241 | return QUOTA_NL_NOWARN; | ||
1242 | } | ||
1243 | /* | ||
1244 | * Initialize quota pointers in inode | ||
1245 | * We do things in a bit complicated way but by that we avoid calling | ||
1246 | * dqget() and thus filesystem callbacks under dqptr_sem. | ||
1247 | */ | ||
1248 | int dquot_initialize(struct inode *inode, int type) | ||
1249 | { | ||
1250 | unsigned int id = 0; | ||
1251 | int cnt, ret = 0; | ||
1252 | struct dquot *got[MAXQUOTAS] = { NODQUOT, NODQUOT }; | ||
1253 | struct super_block *sb = inode->i_sb; | ||
1254 | |||
1255 | /* First test before acquiring mutex - solves deadlocks when we | ||
1256 | * re-enter the quota code and are already holding the mutex */ | ||
1257 | if (IS_NOQUOTA(inode)) | ||
1258 | return 0; | ||
1259 | |||
1260 | /* First get references to structures we might need. */ | ||
1261 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1262 | if (type != -1 && cnt != type) | ||
1263 | continue; | ||
1264 | switch (cnt) { | ||
1265 | case USRQUOTA: | ||
1266 | id = inode->i_uid; | ||
1267 | break; | ||
1268 | case GRPQUOTA: | ||
1269 | id = inode->i_gid; | ||
1270 | break; | ||
1271 | } | ||
1272 | got[cnt] = dqget(sb, id, cnt); | ||
1273 | } | ||
1274 | |||
1275 | down_write(&sb_dqopt(sb)->dqptr_sem); | ||
1276 | /* Having dqptr_sem we know NOQUOTA flags can't be altered... */ | ||
1277 | if (IS_NOQUOTA(inode)) | ||
1278 | goto out_err; | ||
1279 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1280 | if (type != -1 && cnt != type) | ||
1281 | continue; | ||
1282 | /* Avoid races with quotaoff() */ | ||
1283 | if (!sb_has_quota_active(sb, cnt)) | ||
1284 | continue; | ||
1285 | if (inode->i_dquot[cnt] == NODQUOT) { | ||
1286 | inode->i_dquot[cnt] = got[cnt]; | ||
1287 | got[cnt] = NODQUOT; | ||
1288 | } | ||
1289 | } | ||
1290 | out_err: | ||
1291 | up_write(&sb_dqopt(sb)->dqptr_sem); | ||
1292 | /* Drop unused references */ | ||
1293 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1294 | dqput(got[cnt]); | ||
1295 | return ret; | ||
1296 | } | ||
1297 | EXPORT_SYMBOL(dquot_initialize); | ||
1298 | |||
1299 | /* | ||
1300 | * Release all quotas referenced by inode | ||
1301 | */ | ||
1302 | int dquot_drop(struct inode *inode) | ||
1303 | { | ||
1304 | int cnt; | ||
1305 | struct dquot *put[MAXQUOTAS]; | ||
1306 | |||
1307 | down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1308 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1309 | put[cnt] = inode->i_dquot[cnt]; | ||
1310 | inode->i_dquot[cnt] = NODQUOT; | ||
1311 | } | ||
1312 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1313 | |||
1314 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1315 | dqput(put[cnt]); | ||
1316 | return 0; | ||
1317 | } | ||
1318 | EXPORT_SYMBOL(dquot_drop); | ||
1319 | |||
1320 | /* Wrapper to remove references to quota structures from inode */ | ||
1321 | void vfs_dq_drop(struct inode *inode) | ||
1322 | { | ||
1323 | /* Here we can get arbitrary inode from clear_inode() so we have | ||
1324 | * to be careful. OTOH we don't need locking as quota operations | ||
1325 | * are allowed to change only at mount time */ | ||
1326 | if (!IS_NOQUOTA(inode) && inode->i_sb && inode->i_sb->dq_op | ||
1327 | && inode->i_sb->dq_op->drop) { | ||
1328 | int cnt; | ||
1329 | /* Test before calling to rule out calls from proc and such | ||
1330 | * where we are not allowed to block. Note that this is | ||
1331 | * actually reliable test even without the lock - the caller | ||
1332 | * must assure that nobody can come after the DQUOT_DROP and | ||
1333 | * add quota pointers back anyway */ | ||
1334 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1335 | if (inode->i_dquot[cnt] != NODQUOT) | ||
1336 | break; | ||
1337 | if (cnt < MAXQUOTAS) | ||
1338 | inode->i_sb->dq_op->drop(inode); | ||
1339 | } | ||
1340 | } | ||
1341 | EXPORT_SYMBOL(vfs_dq_drop); | ||
1342 | |||
1343 | /* | ||
1344 | * Following four functions update i_blocks+i_bytes fields and | ||
1345 | * quota information (together with appropriate checks) | ||
1346 | * NOTE: We absolutely rely on the fact that caller dirties | ||
1347 | * the inode (usually macros in quotaops.h care about this) and | ||
1348 | * holds a handle for the current transaction so that dquot write and | ||
1349 | * inode write go into the same transaction. | ||
1350 | */ | ||
1351 | |||
1352 | /* | ||
1353 | * This operation can block, but only after everything is updated | ||
1354 | */ | ||
1355 | int __dquot_alloc_space(struct inode *inode, qsize_t number, | ||
1356 | int warn, int reserve) | ||
1357 | { | ||
1358 | int cnt, ret = QUOTA_OK; | ||
1359 | char warntype[MAXQUOTAS]; | ||
1360 | |||
1361 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1362 | warntype[cnt] = QUOTA_NL_NOWARN; | ||
1363 | |||
1364 | spin_lock(&dq_data_lock); | ||
1365 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1366 | if (inode->i_dquot[cnt] == NODQUOT) | ||
1367 | continue; | ||
1368 | if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) | ||
1369 | == NO_QUOTA) { | ||
1370 | ret = NO_QUOTA; | ||
1371 | goto out_unlock; | ||
1372 | } | ||
1373 | } | ||
1374 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1375 | if (inode->i_dquot[cnt] == NODQUOT) | ||
1376 | continue; | ||
1377 | if (reserve) | ||
1378 | dquot_resv_space(inode->i_dquot[cnt], number); | ||
1379 | else | ||
1380 | dquot_incr_space(inode->i_dquot[cnt], number); | ||
1381 | } | ||
1382 | if (!reserve) | ||
1383 | inode_add_bytes(inode, number); | ||
1384 | out_unlock: | ||
1385 | spin_unlock(&dq_data_lock); | ||
1386 | flush_warnings(inode->i_dquot, warntype); | ||
1387 | return ret; | ||
1388 | } | ||
1389 | |||
1390 | int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) | ||
1391 | { | ||
1392 | int cnt, ret = QUOTA_OK; | ||
1393 | |||
1394 | /* | ||
1395 | * First test before acquiring mutex - solves deadlocks when we | ||
1396 | * re-enter the quota code and are already holding the mutex | ||
1397 | */ | ||
1398 | if (IS_NOQUOTA(inode)) { | ||
1399 | inode_add_bytes(inode, number); | ||
1400 | goto out; | ||
1401 | } | ||
1402 | |||
1403 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1404 | if (IS_NOQUOTA(inode)) { | ||
1405 | inode_add_bytes(inode, number); | ||
1406 | goto out_unlock; | ||
1407 | } | ||
1408 | |||
1409 | ret = __dquot_alloc_space(inode, number, warn, 0); | ||
1410 | if (ret == NO_QUOTA) | ||
1411 | goto out_unlock; | ||
1412 | |||
1413 | /* Dirtify all the dquots - this can block when journalling */ | ||
1414 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1415 | if (inode->i_dquot[cnt]) | ||
1416 | mark_dquot_dirty(inode->i_dquot[cnt]); | ||
1417 | out_unlock: | ||
1418 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1419 | out: | ||
1420 | return ret; | ||
1421 | } | ||
1422 | EXPORT_SYMBOL(dquot_alloc_space); | ||
1423 | |||
1424 | int dquot_reserve_space(struct inode *inode, qsize_t number, int warn) | ||
1425 | { | ||
1426 | int ret = QUOTA_OK; | ||
1427 | |||
1428 | if (IS_NOQUOTA(inode)) | ||
1429 | goto out; | ||
1430 | |||
1431 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1432 | if (IS_NOQUOTA(inode)) | ||
1433 | goto out_unlock; | ||
1434 | |||
1435 | ret = __dquot_alloc_space(inode, number, warn, 1); | ||
1436 | out_unlock: | ||
1437 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1438 | out: | ||
1439 | return ret; | ||
1440 | } | ||
1441 | EXPORT_SYMBOL(dquot_reserve_space); | ||
1442 | |||
1443 | /* | ||
1444 | * This operation can block, but only after everything is updated | ||
1445 | */ | ||
1446 | int dquot_alloc_inode(const struct inode *inode, qsize_t number) | ||
1447 | { | ||
1448 | int cnt, ret = NO_QUOTA; | ||
1449 | char warntype[MAXQUOTAS]; | ||
1450 | |||
1451 | /* First test before acquiring mutex - solves deadlocks when we | ||
1452 | * re-enter the quota code and are already holding the mutex */ | ||
1453 | if (IS_NOQUOTA(inode)) | ||
1454 | return QUOTA_OK; | ||
1455 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1456 | warntype[cnt] = QUOTA_NL_NOWARN; | ||
1457 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1458 | if (IS_NOQUOTA(inode)) { | ||
1459 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1460 | return QUOTA_OK; | ||
1461 | } | ||
1462 | spin_lock(&dq_data_lock); | ||
1463 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1464 | if (inode->i_dquot[cnt] == NODQUOT) | ||
1465 | continue; | ||
1466 | if (check_idq(inode->i_dquot[cnt], number, warntype+cnt) == NO_QUOTA) | ||
1467 | goto warn_put_all; | ||
1468 | } | ||
1469 | |||
1470 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1471 | if (inode->i_dquot[cnt] == NODQUOT) | ||
1472 | continue; | ||
1473 | dquot_incr_inodes(inode->i_dquot[cnt], number); | ||
1474 | } | ||
1475 | ret = QUOTA_OK; | ||
1476 | warn_put_all: | ||
1477 | spin_unlock(&dq_data_lock); | ||
1478 | if (ret == QUOTA_OK) | ||
1479 | /* Dirtify all the dquots - this can block when journalling */ | ||
1480 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1481 | if (inode->i_dquot[cnt]) | ||
1482 | mark_dquot_dirty(inode->i_dquot[cnt]); | ||
1483 | flush_warnings(inode->i_dquot, warntype); | ||
1484 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1485 | return ret; | ||
1486 | } | ||
1487 | EXPORT_SYMBOL(dquot_alloc_inode); | ||
1488 | |||
1489 | int dquot_claim_space(struct inode *inode, qsize_t number) | ||
1490 | { | ||
1491 | int cnt; | ||
1492 | int ret = QUOTA_OK; | ||
1493 | |||
1494 | if (IS_NOQUOTA(inode)) { | ||
1495 | inode_add_bytes(inode, number); | ||
1496 | goto out; | ||
1497 | } | ||
1498 | |||
1499 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1500 | if (IS_NOQUOTA(inode)) { | ||
1501 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1502 | inode_add_bytes(inode, number); | ||
1503 | goto out; | ||
1504 | } | ||
1505 | |||
1506 | spin_lock(&dq_data_lock); | ||
1507 | /* Claim reserved quotas to allocated quotas */ | ||
1508 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1509 | if (inode->i_dquot[cnt] != NODQUOT) | ||
1510 | dquot_claim_reserved_space(inode->i_dquot[cnt], | ||
1511 | number); | ||
1512 | } | ||
1513 | /* Update inode bytes */ | ||
1514 | inode_add_bytes(inode, number); | ||
1515 | spin_unlock(&dq_data_lock); | ||
1516 | /* Dirtify all the dquots - this can block when journalling */ | ||
1517 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1518 | if (inode->i_dquot[cnt]) | ||
1519 | mark_dquot_dirty(inode->i_dquot[cnt]); | ||
1520 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1521 | out: | ||
1522 | return ret; | ||
1523 | } | ||
1524 | EXPORT_SYMBOL(dquot_claim_space); | ||
1525 | |||
1526 | /* | ||
1527 | * Release reserved quota space | ||
1528 | */ | ||
1529 | void dquot_release_reserved_space(struct inode *inode, qsize_t number) | ||
1530 | { | ||
1531 | int cnt; | ||
1532 | |||
1533 | if (IS_NOQUOTA(inode)) | ||
1534 | goto out; | ||
1535 | |||
1536 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1537 | if (IS_NOQUOTA(inode)) | ||
1538 | goto out_unlock; | ||
1539 | |||
1540 | spin_lock(&dq_data_lock); | ||
1541 | /* Release reserved dquots */ | ||
1542 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1543 | if (inode->i_dquot[cnt] != NODQUOT) | ||
1544 | dquot_free_reserved_space(inode->i_dquot[cnt], number); | ||
1545 | } | ||
1546 | spin_unlock(&dq_data_lock); | ||
1547 | |||
1548 | out_unlock: | ||
1549 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1550 | out: | ||
1551 | return; | ||
1552 | } | ||
1553 | EXPORT_SYMBOL(dquot_release_reserved_space); | ||
1554 | |||
1555 | /* | ||
1556 | * This operation can block, but only after everything is updated | ||
1557 | */ | ||
1558 | int dquot_free_space(struct inode *inode, qsize_t number) | ||
1559 | { | ||
1560 | unsigned int cnt; | ||
1561 | char warntype[MAXQUOTAS]; | ||
1562 | |||
1563 | /* First test before acquiring mutex - solves deadlocks when we | ||
1564 | * re-enter the quota code and are already holding the mutex */ | ||
1565 | if (IS_NOQUOTA(inode)) { | ||
1566 | out_sub: | ||
1567 | inode_sub_bytes(inode, number); | ||
1568 | return QUOTA_OK; | ||
1569 | } | ||
1570 | |||
1571 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1572 | /* Now recheck reliably when holding dqptr_sem */ | ||
1573 | if (IS_NOQUOTA(inode)) { | ||
1574 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1575 | goto out_sub; | ||
1576 | } | ||
1577 | spin_lock(&dq_data_lock); | ||
1578 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1579 | if (inode->i_dquot[cnt] == NODQUOT) | ||
1580 | continue; | ||
1581 | warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number); | ||
1582 | dquot_decr_space(inode->i_dquot[cnt], number); | ||
1583 | } | ||
1584 | inode_sub_bytes(inode, number); | ||
1585 | spin_unlock(&dq_data_lock); | ||
1586 | /* Dirtify all the dquots - this can block when journalling */ | ||
1587 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1588 | if (inode->i_dquot[cnt]) | ||
1589 | mark_dquot_dirty(inode->i_dquot[cnt]); | ||
1590 | flush_warnings(inode->i_dquot, warntype); | ||
1591 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1592 | return QUOTA_OK; | ||
1593 | } | ||
1594 | EXPORT_SYMBOL(dquot_free_space); | ||
1595 | |||
1596 | /* | ||
1597 | * This operation can block, but only after everything is updated | ||
1598 | */ | ||
1599 | int dquot_free_inode(const struct inode *inode, qsize_t number) | ||
1600 | { | ||
1601 | unsigned int cnt; | ||
1602 | char warntype[MAXQUOTAS]; | ||
1603 | |||
1604 | /* First test before acquiring mutex - solves deadlocks when we | ||
1605 | * re-enter the quota code and are already holding the mutex */ | ||
1606 | if (IS_NOQUOTA(inode)) | ||
1607 | return QUOTA_OK; | ||
1608 | |||
1609 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1610 | /* Now recheck reliably when holding dqptr_sem */ | ||
1611 | if (IS_NOQUOTA(inode)) { | ||
1612 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1613 | return QUOTA_OK; | ||
1614 | } | ||
1615 | spin_lock(&dq_data_lock); | ||
1616 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1617 | if (inode->i_dquot[cnt] == NODQUOT) | ||
1618 | continue; | ||
1619 | warntype[cnt] = info_idq_free(inode->i_dquot[cnt], number); | ||
1620 | dquot_decr_inodes(inode->i_dquot[cnt], number); | ||
1621 | } | ||
1622 | spin_unlock(&dq_data_lock); | ||
1623 | /* Dirtify all the dquots - this can block when journalling */ | ||
1624 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1625 | if (inode->i_dquot[cnt]) | ||
1626 | mark_dquot_dirty(inode->i_dquot[cnt]); | ||
1627 | flush_warnings(inode->i_dquot, warntype); | ||
1628 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1629 | return QUOTA_OK; | ||
1630 | } | ||
1631 | EXPORT_SYMBOL(dquot_free_inode); | ||
1632 | |||
1633 | /* | ||
1634 | * call back function, get reserved quota space from underlying fs | ||
1635 | */ | ||
1636 | qsize_t dquot_get_reserved_space(struct inode *inode) | ||
1637 | { | ||
1638 | qsize_t reserved_space = 0; | ||
1639 | |||
1640 | if (sb_any_quota_active(inode->i_sb) && | ||
1641 | inode->i_sb->dq_op->get_reserved_space) | ||
1642 | reserved_space = inode->i_sb->dq_op->get_reserved_space(inode); | ||
1643 | return reserved_space; | ||
1644 | } | ||
1645 | |||
1646 | /* | ||
1647 | * Transfer the number of inode and blocks from one diskquota to an other. | ||
1648 | * | ||
1649 | * This operation can block, but only after everything is updated | ||
1650 | * A transaction must be started when entering this function. | ||
1651 | */ | ||
1652 | int dquot_transfer(struct inode *inode, struct iattr *iattr) | ||
1653 | { | ||
1654 | qsize_t space, cur_space; | ||
1655 | qsize_t rsv_space = 0; | ||
1656 | struct dquot *transfer_from[MAXQUOTAS]; | ||
1657 | struct dquot *transfer_to[MAXQUOTAS]; | ||
1658 | int cnt, ret = QUOTA_OK; | ||
1659 | int chuid = iattr->ia_valid & ATTR_UID && inode->i_uid != iattr->ia_uid, | ||
1660 | chgid = iattr->ia_valid & ATTR_GID && inode->i_gid != iattr->ia_gid; | ||
1661 | char warntype_to[MAXQUOTAS]; | ||
1662 | char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS]; | ||
1663 | |||
1664 | /* First test before acquiring mutex - solves deadlocks when we | ||
1665 | * re-enter the quota code and are already holding the mutex */ | ||
1666 | if (IS_NOQUOTA(inode)) | ||
1667 | return QUOTA_OK; | ||
1668 | /* Initialize the arrays */ | ||
1669 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1670 | transfer_from[cnt] = NODQUOT; | ||
1671 | transfer_to[cnt] = NODQUOT; | ||
1672 | warntype_to[cnt] = QUOTA_NL_NOWARN; | ||
1673 | switch (cnt) { | ||
1674 | case USRQUOTA: | ||
1675 | if (!chuid) | ||
1676 | continue; | ||
1677 | transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_uid, cnt); | ||
1678 | break; | ||
1679 | case GRPQUOTA: | ||
1680 | if (!chgid) | ||
1681 | continue; | ||
1682 | transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_gid, cnt); | ||
1683 | break; | ||
1684 | } | ||
1685 | } | ||
1686 | |||
1687 | down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1688 | /* Now recheck reliably when holding dqptr_sem */ | ||
1689 | if (IS_NOQUOTA(inode)) { /* File without quota accounting? */ | ||
1690 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1691 | goto put_all; | ||
1692 | } | ||
1693 | spin_lock(&dq_data_lock); | ||
1694 | cur_space = inode_get_bytes(inode); | ||
1695 | rsv_space = dquot_get_reserved_space(inode); | ||
1696 | space = cur_space + rsv_space; | ||
1697 | /* Build the transfer_from list and check the limits */ | ||
1698 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1699 | if (transfer_to[cnt] == NODQUOT) | ||
1700 | continue; | ||
1701 | transfer_from[cnt] = inode->i_dquot[cnt]; | ||
1702 | if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) == | ||
1703 | NO_QUOTA || check_bdq(transfer_to[cnt], space, 0, | ||
1704 | warntype_to + cnt) == NO_QUOTA) | ||
1705 | goto over_quota; | ||
1706 | } | ||
1707 | |||
1708 | /* | ||
1709 | * Finally perform the needed transfer from transfer_from to transfer_to | ||
1710 | */ | ||
1711 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1712 | /* | ||
1713 | * Skip changes for same uid or gid or for turned off quota-type. | ||
1714 | */ | ||
1715 | if (transfer_to[cnt] == NODQUOT) | ||
1716 | continue; | ||
1717 | |||
1718 | /* Due to IO error we might not have transfer_from[] structure */ | ||
1719 | if (transfer_from[cnt]) { | ||
1720 | warntype_from_inodes[cnt] = | ||
1721 | info_idq_free(transfer_from[cnt], 1); | ||
1722 | warntype_from_space[cnt] = | ||
1723 | info_bdq_free(transfer_from[cnt], space); | ||
1724 | dquot_decr_inodes(transfer_from[cnt], 1); | ||
1725 | dquot_decr_space(transfer_from[cnt], cur_space); | ||
1726 | dquot_free_reserved_space(transfer_from[cnt], | ||
1727 | rsv_space); | ||
1728 | } | ||
1729 | |||
1730 | dquot_incr_inodes(transfer_to[cnt], 1); | ||
1731 | dquot_incr_space(transfer_to[cnt], cur_space); | ||
1732 | dquot_resv_space(transfer_to[cnt], rsv_space); | ||
1733 | |||
1734 | inode->i_dquot[cnt] = transfer_to[cnt]; | ||
1735 | } | ||
1736 | spin_unlock(&dq_data_lock); | ||
1737 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1738 | |||
1739 | /* Dirtify all the dquots - this can block when journalling */ | ||
1740 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1741 | if (transfer_from[cnt]) | ||
1742 | mark_dquot_dirty(transfer_from[cnt]); | ||
1743 | if (transfer_to[cnt]) { | ||
1744 | mark_dquot_dirty(transfer_to[cnt]); | ||
1745 | /* The reference we got is transferred to the inode */ | ||
1746 | transfer_to[cnt] = NODQUOT; | ||
1747 | } | ||
1748 | } | ||
1749 | warn_put_all: | ||
1750 | flush_warnings(transfer_to, warntype_to); | ||
1751 | flush_warnings(transfer_from, warntype_from_inodes); | ||
1752 | flush_warnings(transfer_from, warntype_from_space); | ||
1753 | put_all: | ||
1754 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1755 | dqput(transfer_from[cnt]); | ||
1756 | dqput(transfer_to[cnt]); | ||
1757 | } | ||
1758 | return ret; | ||
1759 | over_quota: | ||
1760 | spin_unlock(&dq_data_lock); | ||
1761 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1762 | /* Clear dquot pointers we don't want to dqput() */ | ||
1763 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1764 | transfer_from[cnt] = NODQUOT; | ||
1765 | ret = NO_QUOTA; | ||
1766 | goto warn_put_all; | ||
1767 | } | ||
1768 | EXPORT_SYMBOL(dquot_transfer); | ||
1769 | |||
1770 | /* Wrapper for transferring ownership of an inode */ | ||
1771 | int vfs_dq_transfer(struct inode *inode, struct iattr *iattr) | ||
1772 | { | ||
1773 | if (sb_any_quota_active(inode->i_sb) && !IS_NOQUOTA(inode)) { | ||
1774 | vfs_dq_init(inode); | ||
1775 | if (inode->i_sb->dq_op->transfer(inode, iattr) == NO_QUOTA) | ||
1776 | return 1; | ||
1777 | } | ||
1778 | return 0; | ||
1779 | } | ||
1780 | EXPORT_SYMBOL(vfs_dq_transfer); | ||
1781 | |||
1782 | /* | ||
1783 | * Write info of quota file to disk | ||
1784 | */ | ||
1785 | int dquot_commit_info(struct super_block *sb, int type) | ||
1786 | { | ||
1787 | int ret; | ||
1788 | struct quota_info *dqopt = sb_dqopt(sb); | ||
1789 | |||
1790 | mutex_lock(&dqopt->dqio_mutex); | ||
1791 | ret = dqopt->ops[type]->write_file_info(sb, type); | ||
1792 | mutex_unlock(&dqopt->dqio_mutex); | ||
1793 | return ret; | ||
1794 | } | ||
1795 | EXPORT_SYMBOL(dquot_commit_info); | ||
1796 | |||
1797 | /* | ||
1798 | * Definitions of diskquota operations. | ||
1799 | */ | ||
1800 | struct dquot_operations dquot_operations = { | ||
1801 | .initialize = dquot_initialize, | ||
1802 | .drop = dquot_drop, | ||
1803 | .alloc_space = dquot_alloc_space, | ||
1804 | .alloc_inode = dquot_alloc_inode, | ||
1805 | .free_space = dquot_free_space, | ||
1806 | .free_inode = dquot_free_inode, | ||
1807 | .transfer = dquot_transfer, | ||
1808 | .write_dquot = dquot_commit, | ||
1809 | .acquire_dquot = dquot_acquire, | ||
1810 | .release_dquot = dquot_release, | ||
1811 | .mark_dirty = dquot_mark_dquot_dirty, | ||
1812 | .write_info = dquot_commit_info, | ||
1813 | .alloc_dquot = dquot_alloc, | ||
1814 | .destroy_dquot = dquot_destroy, | ||
1815 | }; | ||
1816 | |||
1817 | /* | ||
1818 | * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount) | ||
1819 | */ | ||
1820 | int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags) | ||
1821 | { | ||
1822 | int cnt, ret = 0; | ||
1823 | struct quota_info *dqopt = sb_dqopt(sb); | ||
1824 | struct inode *toputinode[MAXQUOTAS]; | ||
1825 | |||
1826 | /* Cannot turn off usage accounting without turning off limits, or | ||
1827 | * suspend quotas and simultaneously turn quotas off. */ | ||
1828 | if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED)) | ||
1829 | || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED | | ||
1830 | DQUOT_USAGE_ENABLED))) | ||
1831 | return -EINVAL; | ||
1832 | |||
1833 | /* We need to serialize quota_off() for device */ | ||
1834 | mutex_lock(&dqopt->dqonoff_mutex); | ||
1835 | |||
1836 | /* | ||
1837 | * Skip everything if there's nothing to do. We have to do this because | ||
1838 | * sometimes we are called when fill_super() failed and calling | ||
1839 | * sync_fs() in such cases does no good. | ||
1840 | */ | ||
1841 | if (!sb_any_quota_loaded(sb)) { | ||
1842 | mutex_unlock(&dqopt->dqonoff_mutex); | ||
1843 | return 0; | ||
1844 | } | ||
1845 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1846 | toputinode[cnt] = NULL; | ||
1847 | if (type != -1 && cnt != type) | ||
1848 | continue; | ||
1849 | if (!sb_has_quota_loaded(sb, cnt)) | ||
1850 | continue; | ||
1851 | |||
1852 | if (flags & DQUOT_SUSPENDED) { | ||
1853 | spin_lock(&dq_state_lock); | ||
1854 | dqopt->flags |= | ||
1855 | dquot_state_flag(DQUOT_SUSPENDED, cnt); | ||
1856 | spin_unlock(&dq_state_lock); | ||
1857 | } else { | ||
1858 | spin_lock(&dq_state_lock); | ||
1859 | dqopt->flags &= ~dquot_state_flag(flags, cnt); | ||
1860 | /* Turning off suspended quotas? */ | ||
1861 | if (!sb_has_quota_loaded(sb, cnt) && | ||
1862 | sb_has_quota_suspended(sb, cnt)) { | ||
1863 | dqopt->flags &= ~dquot_state_flag( | ||
1864 | DQUOT_SUSPENDED, cnt); | ||
1865 | spin_unlock(&dq_state_lock); | ||
1866 | iput(dqopt->files[cnt]); | ||
1867 | dqopt->files[cnt] = NULL; | ||
1868 | continue; | ||
1869 | } | ||
1870 | spin_unlock(&dq_state_lock); | ||
1871 | } | ||
1872 | |||
1873 | /* We still have to keep quota loaded? */ | ||
1874 | if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED)) | ||
1875 | continue; | ||
1876 | |||
1877 | /* Note: these are blocking operations */ | ||
1878 | drop_dquot_ref(sb, cnt); | ||
1879 | invalidate_dquots(sb, cnt); | ||
1880 | /* | ||
1881 | * Now all dquots should be invalidated, all writes done so we should be only | ||
1882 | * users of the info. No locks needed. | ||
1883 | */ | ||
1884 | if (info_dirty(&dqopt->info[cnt])) | ||
1885 | sb->dq_op->write_info(sb, cnt); | ||
1886 | if (dqopt->ops[cnt]->free_file_info) | ||
1887 | dqopt->ops[cnt]->free_file_info(sb, cnt); | ||
1888 | put_quota_format(dqopt->info[cnt].dqi_format); | ||
1889 | |||
1890 | toputinode[cnt] = dqopt->files[cnt]; | ||
1891 | if (!sb_has_quota_loaded(sb, cnt)) | ||
1892 | dqopt->files[cnt] = NULL; | ||
1893 | dqopt->info[cnt].dqi_flags = 0; | ||
1894 | dqopt->info[cnt].dqi_igrace = 0; | ||
1895 | dqopt->info[cnt].dqi_bgrace = 0; | ||
1896 | dqopt->ops[cnt] = NULL; | ||
1897 | } | ||
1898 | mutex_unlock(&dqopt->dqonoff_mutex); | ||
1899 | |||
1900 | /* Skip syncing and setting flags if quota files are hidden */ | ||
1901 | if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) | ||
1902 | goto put_inodes; | ||
1903 | |||
1904 | /* Sync the superblock so that buffers with quota data are written to | ||
1905 | * disk (and so userspace sees correct data afterwards). */ | ||
1906 | if (sb->s_op->sync_fs) | ||
1907 | sb->s_op->sync_fs(sb, 1); | ||
1908 | sync_blockdev(sb->s_bdev); | ||
1909 | /* Now the quota files are just ordinary files and we can set the | ||
1910 | * inode flags back. Moreover we discard the pagecache so that | ||
1911 | * userspace sees the writes we did bypassing the pagecache. We | ||
1912 | * must also discard the blockdev buffers so that we see the | ||
1913 | * changes done by userspace on the next quotaon() */ | ||
1914 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1915 | if (toputinode[cnt]) { | ||
1916 | mutex_lock(&dqopt->dqonoff_mutex); | ||
1917 | /* If quota was reenabled in the meantime, we have | ||
1918 | * nothing to do */ | ||
1919 | if (!sb_has_quota_loaded(sb, cnt)) { | ||
1920 | mutex_lock_nested(&toputinode[cnt]->i_mutex, I_MUTEX_QUOTA); | ||
1921 | toputinode[cnt]->i_flags &= ~(S_IMMUTABLE | | ||
1922 | S_NOATIME | S_NOQUOTA); | ||
1923 | truncate_inode_pages(&toputinode[cnt]->i_data, 0); | ||
1924 | mutex_unlock(&toputinode[cnt]->i_mutex); | ||
1925 | mark_inode_dirty(toputinode[cnt]); | ||
1926 | } | ||
1927 | mutex_unlock(&dqopt->dqonoff_mutex); | ||
1928 | } | ||
1929 | if (sb->s_bdev) | ||
1930 | invalidate_bdev(sb->s_bdev); | ||
1931 | put_inodes: | ||
1932 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1933 | if (toputinode[cnt]) { | ||
1934 | /* On remount RO, we keep the inode pointer so that we | ||
1935 | * can reenable quota on the subsequent remount RW. We | ||
1936 | * have to check 'flags' variable and not use sb_has_ | ||
1937 | * function because another quotaon / quotaoff could | ||
1938 | * change global state before we got here. We refuse | ||
1939 | * to suspend quotas when there is pending delete on | ||
1940 | * the quota file... */ | ||
1941 | if (!(flags & DQUOT_SUSPENDED)) | ||
1942 | iput(toputinode[cnt]); | ||
1943 | else if (!toputinode[cnt]->i_nlink) | ||
1944 | ret = -EBUSY; | ||
1945 | } | ||
1946 | return ret; | ||
1947 | } | ||
1948 | EXPORT_SYMBOL(vfs_quota_disable); | ||
1949 | |||
1950 | int vfs_quota_off(struct super_block *sb, int type, int remount) | ||
1951 | { | ||
1952 | return vfs_quota_disable(sb, type, remount ? DQUOT_SUSPENDED : | ||
1953 | (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED)); | ||
1954 | } | ||
1955 | EXPORT_SYMBOL(vfs_quota_off); | ||
1956 | /* | ||
1957 | * Turn quotas on on a device | ||
1958 | */ | ||
1959 | |||
1960 | /* | ||
1961 | * Helper function to turn quotas on when we already have the inode of | ||
1962 | * quota file and no quota information is loaded. | ||
1963 | */ | ||
1964 | static int vfs_load_quota_inode(struct inode *inode, int type, int format_id, | ||
1965 | unsigned int flags) | ||
1966 | { | ||
1967 | struct quota_format_type *fmt = find_quota_format(format_id); | ||
1968 | struct super_block *sb = inode->i_sb; | ||
1969 | struct quota_info *dqopt = sb_dqopt(sb); | ||
1970 | int error; | ||
1971 | int oldflags = -1; | ||
1972 | |||
1973 | if (!fmt) | ||
1974 | return -ESRCH; | ||
1975 | if (!S_ISREG(inode->i_mode)) { | ||
1976 | error = -EACCES; | ||
1977 | goto out_fmt; | ||
1978 | } | ||
1979 | if (IS_RDONLY(inode)) { | ||
1980 | error = -EROFS; | ||
1981 | goto out_fmt; | ||
1982 | } | ||
1983 | if (!sb->s_op->quota_write || !sb->s_op->quota_read) { | ||
1984 | error = -EINVAL; | ||
1985 | goto out_fmt; | ||
1986 | } | ||
1987 | /* Usage always has to be set... */ | ||
1988 | if (!(flags & DQUOT_USAGE_ENABLED)) { | ||
1989 | error = -EINVAL; | ||
1990 | goto out_fmt; | ||
1991 | } | ||
1992 | |||
1993 | if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { | ||
1994 | /* As we bypass the pagecache we must now flush the inode so | ||
1995 | * that we see all the changes from userspace... */ | ||
1996 | write_inode_now(inode, 1); | ||
1997 | /* And now flush the block cache so that kernel sees the | ||
1998 | * changes */ | ||
1999 | invalidate_bdev(sb->s_bdev); | ||
2000 | } | ||
2001 | mutex_lock(&inode->i_mutex); | ||
2002 | mutex_lock(&dqopt->dqonoff_mutex); | ||
2003 | if (sb_has_quota_loaded(sb, type)) { | ||
2004 | error = -EBUSY; | ||
2005 | goto out_lock; | ||
2006 | } | ||
2007 | |||
2008 | if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { | ||
2009 | /* We don't want quota and atime on quota files (deadlocks | ||
2010 | * possible) Also nobody should write to the file - we use | ||
2011 | * special IO operations which ignore the immutable bit. */ | ||
2012 | down_write(&dqopt->dqptr_sem); | ||
2013 | oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | S_NOQUOTA); | ||
2014 | inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE; | ||
2015 | up_write(&dqopt->dqptr_sem); | ||
2016 | sb->dq_op->drop(inode); | ||
2017 | } | ||
2018 | |||
2019 | error = -EIO; | ||
2020 | dqopt->files[type] = igrab(inode); | ||
2021 | if (!dqopt->files[type]) | ||
2022 | goto out_lock; | ||
2023 | error = -EINVAL; | ||
2024 | if (!fmt->qf_ops->check_quota_file(sb, type)) | ||
2025 | goto out_file_init; | ||
2026 | |||
2027 | dqopt->ops[type] = fmt->qf_ops; | ||
2028 | dqopt->info[type].dqi_format = fmt; | ||
2029 | dqopt->info[type].dqi_fmt_id = format_id; | ||
2030 | INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list); | ||
2031 | mutex_lock(&dqopt->dqio_mutex); | ||
2032 | if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) { | ||
2033 | mutex_unlock(&dqopt->dqio_mutex); | ||
2034 | goto out_file_init; | ||
2035 | } | ||
2036 | mutex_unlock(&dqopt->dqio_mutex); | ||
2037 | mutex_unlock(&inode->i_mutex); | ||
2038 | spin_lock(&dq_state_lock); | ||
2039 | dqopt->flags |= dquot_state_flag(flags, type); | ||
2040 | spin_unlock(&dq_state_lock); | ||
2041 | |||
2042 | add_dquot_ref(sb, type); | ||
2043 | mutex_unlock(&dqopt->dqonoff_mutex); | ||
2044 | |||
2045 | return 0; | ||
2046 | |||
2047 | out_file_init: | ||
2048 | dqopt->files[type] = NULL; | ||
2049 | iput(inode); | ||
2050 | out_lock: | ||
2051 | mutex_unlock(&dqopt->dqonoff_mutex); | ||
2052 | if (oldflags != -1) { | ||
2053 | down_write(&dqopt->dqptr_sem); | ||
2054 | /* Set the flags back (in the case of accidental quotaon() | ||
2055 | * on a wrong file we don't want to mess up the flags) */ | ||
2056 | inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE); | ||
2057 | inode->i_flags |= oldflags; | ||
2058 | up_write(&dqopt->dqptr_sem); | ||
2059 | } | ||
2060 | mutex_unlock(&inode->i_mutex); | ||
2061 | out_fmt: | ||
2062 | put_quota_format(fmt); | ||
2063 | |||
2064 | return error; | ||
2065 | } | ||
2066 | |||
2067 | /* Reenable quotas on remount RW */ | ||
2068 | static int vfs_quota_on_remount(struct super_block *sb, int type) | ||
2069 | { | ||
2070 | struct quota_info *dqopt = sb_dqopt(sb); | ||
2071 | struct inode *inode; | ||
2072 | int ret; | ||
2073 | unsigned int flags; | ||
2074 | |||
2075 | mutex_lock(&dqopt->dqonoff_mutex); | ||
2076 | if (!sb_has_quota_suspended(sb, type)) { | ||
2077 | mutex_unlock(&dqopt->dqonoff_mutex); | ||
2078 | return 0; | ||
2079 | } | ||
2080 | inode = dqopt->files[type]; | ||
2081 | dqopt->files[type] = NULL; | ||
2082 | spin_lock(&dq_state_lock); | ||
2083 | flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED | | ||
2084 | DQUOT_LIMITS_ENABLED, type); | ||
2085 | dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, type); | ||
2086 | spin_unlock(&dq_state_lock); | ||
2087 | mutex_unlock(&dqopt->dqonoff_mutex); | ||
2088 | |||
2089 | flags = dquot_generic_flag(flags, type); | ||
2090 | ret = vfs_load_quota_inode(inode, type, dqopt->info[type].dqi_fmt_id, | ||
2091 | flags); | ||
2092 | iput(inode); | ||
2093 | |||
2094 | return ret; | ||
2095 | } | ||
2096 | |||
2097 | int vfs_quota_on_path(struct super_block *sb, int type, int format_id, | ||
2098 | struct path *path) | ||
2099 | { | ||
2100 | int error = security_quota_on(path->dentry); | ||
2101 | if (error) | ||
2102 | return error; | ||
2103 | /* Quota file not on the same filesystem? */ | ||
2104 | if (path->mnt->mnt_sb != sb) | ||
2105 | error = -EXDEV; | ||
2106 | else | ||
2107 | error = vfs_load_quota_inode(path->dentry->d_inode, type, | ||
2108 | format_id, DQUOT_USAGE_ENABLED | | ||
2109 | DQUOT_LIMITS_ENABLED); | ||
2110 | return error; | ||
2111 | } | ||
2112 | EXPORT_SYMBOL(vfs_quota_on_path); | ||
2113 | |||
2114 | int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name, | ||
2115 | int remount) | ||
2116 | { | ||
2117 | struct path path; | ||
2118 | int error; | ||
2119 | |||
2120 | if (remount) | ||
2121 | return vfs_quota_on_remount(sb, type); | ||
2122 | |||
2123 | error = kern_path(name, LOOKUP_FOLLOW, &path); | ||
2124 | if (!error) { | ||
2125 | error = vfs_quota_on_path(sb, type, format_id, &path); | ||
2126 | path_put(&path); | ||
2127 | } | ||
2128 | return error; | ||
2129 | } | ||
2130 | EXPORT_SYMBOL(vfs_quota_on); | ||
2131 | |||
2132 | /* | ||
2133 | * More powerful function for turning on quotas allowing setting | ||
2134 | * of individual quota flags | ||
2135 | */ | ||
2136 | int vfs_quota_enable(struct inode *inode, int type, int format_id, | ||
2137 | unsigned int flags) | ||
2138 | { | ||
2139 | int ret = 0; | ||
2140 | struct super_block *sb = inode->i_sb; | ||
2141 | struct quota_info *dqopt = sb_dqopt(sb); | ||
2142 | |||
2143 | /* Just unsuspend quotas? */ | ||
2144 | if (flags & DQUOT_SUSPENDED) | ||
2145 | return vfs_quota_on_remount(sb, type); | ||
2146 | if (!flags) | ||
2147 | return 0; | ||
2148 | /* Just updating flags needed? */ | ||
2149 | if (sb_has_quota_loaded(sb, type)) { | ||
2150 | mutex_lock(&dqopt->dqonoff_mutex); | ||
2151 | /* Now do a reliable test... */ | ||
2152 | if (!sb_has_quota_loaded(sb, type)) { | ||
2153 | mutex_unlock(&dqopt->dqonoff_mutex); | ||
2154 | goto load_quota; | ||
2155 | } | ||
2156 | if (flags & DQUOT_USAGE_ENABLED && | ||
2157 | sb_has_quota_usage_enabled(sb, type)) { | ||
2158 | ret = -EBUSY; | ||
2159 | goto out_lock; | ||
2160 | } | ||
2161 | if (flags & DQUOT_LIMITS_ENABLED && | ||
2162 | sb_has_quota_limits_enabled(sb, type)) { | ||
2163 | ret = -EBUSY; | ||
2164 | goto out_lock; | ||
2165 | } | ||
2166 | spin_lock(&dq_state_lock); | ||
2167 | sb_dqopt(sb)->flags |= dquot_state_flag(flags, type); | ||
2168 | spin_unlock(&dq_state_lock); | ||
2169 | out_lock: | ||
2170 | mutex_unlock(&dqopt->dqonoff_mutex); | ||
2171 | return ret; | ||
2172 | } | ||
2173 | |||
2174 | load_quota: | ||
2175 | return vfs_load_quota_inode(inode, type, format_id, flags); | ||
2176 | } | ||
2177 | EXPORT_SYMBOL(vfs_quota_enable); | ||
2178 | |||
2179 | /* | ||
2180 | * This function is used when filesystem needs to initialize quotas | ||
2181 | * during mount time. | ||
2182 | */ | ||
2183 | int vfs_quota_on_mount(struct super_block *sb, char *qf_name, | ||
2184 | int format_id, int type) | ||
2185 | { | ||
2186 | struct dentry *dentry; | ||
2187 | int error; | ||
2188 | |||
2189 | dentry = lookup_one_len(qf_name, sb->s_root, strlen(qf_name)); | ||
2190 | if (IS_ERR(dentry)) | ||
2191 | return PTR_ERR(dentry); | ||
2192 | |||
2193 | if (!dentry->d_inode) { | ||
2194 | error = -ENOENT; | ||
2195 | goto out; | ||
2196 | } | ||
2197 | |||
2198 | error = security_quota_on(dentry); | ||
2199 | if (!error) | ||
2200 | error = vfs_load_quota_inode(dentry->d_inode, type, format_id, | ||
2201 | DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); | ||
2202 | |||
2203 | out: | ||
2204 | dput(dentry); | ||
2205 | return error; | ||
2206 | } | ||
2207 | EXPORT_SYMBOL(vfs_quota_on_mount); | ||
2208 | |||
2209 | /* Wrapper to turn on quotas when remounting rw */ | ||
2210 | int vfs_dq_quota_on_remount(struct super_block *sb) | ||
2211 | { | ||
2212 | int cnt; | ||
2213 | int ret = 0, err; | ||
2214 | |||
2215 | if (!sb->s_qcop || !sb->s_qcop->quota_on) | ||
2216 | return -ENOSYS; | ||
2217 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
2218 | err = sb->s_qcop->quota_on(sb, cnt, 0, NULL, 1); | ||
2219 | if (err < 0 && !ret) | ||
2220 | ret = err; | ||
2221 | } | ||
2222 | return ret; | ||
2223 | } | ||
2224 | EXPORT_SYMBOL(vfs_dq_quota_on_remount); | ||
2225 | |||
2226 | static inline qsize_t qbtos(qsize_t blocks) | ||
2227 | { | ||
2228 | return blocks << QIF_DQBLKSIZE_BITS; | ||
2229 | } | ||
2230 | |||
2231 | static inline qsize_t stoqb(qsize_t space) | ||
2232 | { | ||
2233 | return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS; | ||
2234 | } | ||
2235 | |||
2236 | /* Generic routine for getting common part of quota structure */ | ||
2237 | static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di) | ||
2238 | { | ||
2239 | struct mem_dqblk *dm = &dquot->dq_dqb; | ||
2240 | |||
2241 | spin_lock(&dq_data_lock); | ||
2242 | di->dqb_bhardlimit = stoqb(dm->dqb_bhardlimit); | ||
2243 | di->dqb_bsoftlimit = stoqb(dm->dqb_bsoftlimit); | ||
2244 | di->dqb_curspace = dm->dqb_curspace + dm->dqb_rsvspace; | ||
2245 | di->dqb_ihardlimit = dm->dqb_ihardlimit; | ||
2246 | di->dqb_isoftlimit = dm->dqb_isoftlimit; | ||
2247 | di->dqb_curinodes = dm->dqb_curinodes; | ||
2248 | di->dqb_btime = dm->dqb_btime; | ||
2249 | di->dqb_itime = dm->dqb_itime; | ||
2250 | di->dqb_valid = QIF_ALL; | ||
2251 | spin_unlock(&dq_data_lock); | ||
2252 | } | ||
2253 | |||
2254 | int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di) | ||
2255 | { | ||
2256 | struct dquot *dquot; | ||
2257 | |||
2258 | dquot = dqget(sb, id, type); | ||
2259 | if (dquot == NODQUOT) | ||
2260 | return -ESRCH; | ||
2261 | do_get_dqblk(dquot, di); | ||
2262 | dqput(dquot); | ||
2263 | |||
2264 | return 0; | ||
2265 | } | ||
2266 | EXPORT_SYMBOL(vfs_get_dqblk); | ||
2267 | |||
2268 | /* Generic routine for setting common part of quota structure */ | ||
2269 | static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di) | ||
2270 | { | ||
2271 | struct mem_dqblk *dm = &dquot->dq_dqb; | ||
2272 | int check_blim = 0, check_ilim = 0; | ||
2273 | struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type]; | ||
2274 | |||
2275 | if ((di->dqb_valid & QIF_BLIMITS && | ||
2276 | (di->dqb_bhardlimit > dqi->dqi_maxblimit || | ||
2277 | di->dqb_bsoftlimit > dqi->dqi_maxblimit)) || | ||
2278 | (di->dqb_valid & QIF_ILIMITS && | ||
2279 | (di->dqb_ihardlimit > dqi->dqi_maxilimit || | ||
2280 | di->dqb_isoftlimit > dqi->dqi_maxilimit))) | ||
2281 | return -ERANGE; | ||
2282 | |||
2283 | spin_lock(&dq_data_lock); | ||
2284 | if (di->dqb_valid & QIF_SPACE) { | ||
2285 | dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace; | ||
2286 | check_blim = 1; | ||
2287 | __set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); | ||
2288 | } | ||
2289 | if (di->dqb_valid & QIF_BLIMITS) { | ||
2290 | dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit); | ||
2291 | dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit); | ||
2292 | check_blim = 1; | ||
2293 | __set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); | ||
2294 | } | ||
2295 | if (di->dqb_valid & QIF_INODES) { | ||
2296 | dm->dqb_curinodes = di->dqb_curinodes; | ||
2297 | check_ilim = 1; | ||
2298 | __set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); | ||
2299 | } | ||
2300 | if (di->dqb_valid & QIF_ILIMITS) { | ||
2301 | dm->dqb_isoftlimit = di->dqb_isoftlimit; | ||
2302 | dm->dqb_ihardlimit = di->dqb_ihardlimit; | ||
2303 | check_ilim = 1; | ||
2304 | __set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); | ||
2305 | } | ||
2306 | if (di->dqb_valid & QIF_BTIME) { | ||
2307 | dm->dqb_btime = di->dqb_btime; | ||
2308 | check_blim = 1; | ||
2309 | __set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); | ||
2310 | } | ||
2311 | if (di->dqb_valid & QIF_ITIME) { | ||
2312 | dm->dqb_itime = di->dqb_itime; | ||
2313 | check_ilim = 1; | ||
2314 | __set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); | ||
2315 | } | ||
2316 | |||
2317 | if (check_blim) { | ||
2318 | if (!dm->dqb_bsoftlimit || dm->dqb_curspace < dm->dqb_bsoftlimit) { | ||
2319 | dm->dqb_btime = 0; | ||
2320 | clear_bit(DQ_BLKS_B, &dquot->dq_flags); | ||
2321 | } | ||
2322 | else if (!(di->dqb_valid & QIF_BTIME)) /* Set grace only if user hasn't provided his own... */ | ||
2323 | dm->dqb_btime = get_seconds() + dqi->dqi_bgrace; | ||
2324 | } | ||
2325 | if (check_ilim) { | ||
2326 | if (!dm->dqb_isoftlimit || dm->dqb_curinodes < dm->dqb_isoftlimit) { | ||
2327 | dm->dqb_itime = 0; | ||
2328 | clear_bit(DQ_INODES_B, &dquot->dq_flags); | ||
2329 | } | ||
2330 | else if (!(di->dqb_valid & QIF_ITIME)) /* Set grace only if user hasn't provided his own... */ | ||
2331 | dm->dqb_itime = get_seconds() + dqi->dqi_igrace; | ||
2332 | } | ||
2333 | if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || dm->dqb_isoftlimit) | ||
2334 | clear_bit(DQ_FAKE_B, &dquot->dq_flags); | ||
2335 | else | ||
2336 | set_bit(DQ_FAKE_B, &dquot->dq_flags); | ||
2337 | spin_unlock(&dq_data_lock); | ||
2338 | mark_dquot_dirty(dquot); | ||
2339 | |||
2340 | return 0; | ||
2341 | } | ||
2342 | |||
2343 | int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di) | ||
2344 | { | ||
2345 | struct dquot *dquot; | ||
2346 | int rc; | ||
2347 | |||
2348 | dquot = dqget(sb, id, type); | ||
2349 | if (!dquot) { | ||
2350 | rc = -ESRCH; | ||
2351 | goto out; | ||
2352 | } | ||
2353 | rc = do_set_dqblk(dquot, di); | ||
2354 | dqput(dquot); | ||
2355 | out: | ||
2356 | return rc; | ||
2357 | } | ||
2358 | EXPORT_SYMBOL(vfs_set_dqblk); | ||
2359 | |||
2360 | /* Generic routine for getting common part of quota file information */ | ||
2361 | int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) | ||
2362 | { | ||
2363 | struct mem_dqinfo *mi; | ||
2364 | |||
2365 | mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); | ||
2366 | if (!sb_has_quota_active(sb, type)) { | ||
2367 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); | ||
2368 | return -ESRCH; | ||
2369 | } | ||
2370 | mi = sb_dqopt(sb)->info + type; | ||
2371 | spin_lock(&dq_data_lock); | ||
2372 | ii->dqi_bgrace = mi->dqi_bgrace; | ||
2373 | ii->dqi_igrace = mi->dqi_igrace; | ||
2374 | ii->dqi_flags = mi->dqi_flags & DQF_MASK; | ||
2375 | ii->dqi_valid = IIF_ALL; | ||
2376 | spin_unlock(&dq_data_lock); | ||
2377 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); | ||
2378 | return 0; | ||
2379 | } | ||
2380 | EXPORT_SYMBOL(vfs_get_dqinfo); | ||
2381 | |||
2382 | /* Generic routine for setting common part of quota file information */ | ||
2383 | int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) | ||
2384 | { | ||
2385 | struct mem_dqinfo *mi; | ||
2386 | int err = 0; | ||
2387 | |||
2388 | mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); | ||
2389 | if (!sb_has_quota_active(sb, type)) { | ||
2390 | err = -ESRCH; | ||
2391 | goto out; | ||
2392 | } | ||
2393 | mi = sb_dqopt(sb)->info + type; | ||
2394 | spin_lock(&dq_data_lock); | ||
2395 | if (ii->dqi_valid & IIF_BGRACE) | ||
2396 | mi->dqi_bgrace = ii->dqi_bgrace; | ||
2397 | if (ii->dqi_valid & IIF_IGRACE) | ||
2398 | mi->dqi_igrace = ii->dqi_igrace; | ||
2399 | if (ii->dqi_valid & IIF_FLAGS) | ||
2400 | mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) | (ii->dqi_flags & DQF_MASK); | ||
2401 | spin_unlock(&dq_data_lock); | ||
2402 | mark_info_dirty(sb, type); | ||
2403 | /* Force write to disk */ | ||
2404 | sb->dq_op->write_info(sb, type); | ||
2405 | out: | ||
2406 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); | ||
2407 | return err; | ||
2408 | } | ||
2409 | EXPORT_SYMBOL(vfs_set_dqinfo); | ||
2410 | |||
2411 | struct quotactl_ops vfs_quotactl_ops = { | ||
2412 | .quota_on = vfs_quota_on, | ||
2413 | .quota_off = vfs_quota_off, | ||
2414 | .quota_sync = vfs_quota_sync, | ||
2415 | .get_info = vfs_get_dqinfo, | ||
2416 | .set_info = vfs_set_dqinfo, | ||
2417 | .get_dqblk = vfs_get_dqblk, | ||
2418 | .set_dqblk = vfs_set_dqblk | ||
2419 | }; | ||
2420 | |||
2421 | static ctl_table fs_dqstats_table[] = { | ||
2422 | { | ||
2423 | .ctl_name = FS_DQ_LOOKUPS, | ||
2424 | .procname = "lookups", | ||
2425 | .data = &dqstats.lookups, | ||
2426 | .maxlen = sizeof(int), | ||
2427 | .mode = 0444, | ||
2428 | .proc_handler = &proc_dointvec, | ||
2429 | }, | ||
2430 | { | ||
2431 | .ctl_name = FS_DQ_DROPS, | ||
2432 | .procname = "drops", | ||
2433 | .data = &dqstats.drops, | ||
2434 | .maxlen = sizeof(int), | ||
2435 | .mode = 0444, | ||
2436 | .proc_handler = &proc_dointvec, | ||
2437 | }, | ||
2438 | { | ||
2439 | .ctl_name = FS_DQ_READS, | ||
2440 | .procname = "reads", | ||
2441 | .data = &dqstats.reads, | ||
2442 | .maxlen = sizeof(int), | ||
2443 | .mode = 0444, | ||
2444 | .proc_handler = &proc_dointvec, | ||
2445 | }, | ||
2446 | { | ||
2447 | .ctl_name = FS_DQ_WRITES, | ||
2448 | .procname = "writes", | ||
2449 | .data = &dqstats.writes, | ||
2450 | .maxlen = sizeof(int), | ||
2451 | .mode = 0444, | ||
2452 | .proc_handler = &proc_dointvec, | ||
2453 | }, | ||
2454 | { | ||
2455 | .ctl_name = FS_DQ_CACHE_HITS, | ||
2456 | .procname = "cache_hits", | ||
2457 | .data = &dqstats.cache_hits, | ||
2458 | .maxlen = sizeof(int), | ||
2459 | .mode = 0444, | ||
2460 | .proc_handler = &proc_dointvec, | ||
2461 | }, | ||
2462 | { | ||
2463 | .ctl_name = FS_DQ_ALLOCATED, | ||
2464 | .procname = "allocated_dquots", | ||
2465 | .data = &dqstats.allocated_dquots, | ||
2466 | .maxlen = sizeof(int), | ||
2467 | .mode = 0444, | ||
2468 | .proc_handler = &proc_dointvec, | ||
2469 | }, | ||
2470 | { | ||
2471 | .ctl_name = FS_DQ_FREE, | ||
2472 | .procname = "free_dquots", | ||
2473 | .data = &dqstats.free_dquots, | ||
2474 | .maxlen = sizeof(int), | ||
2475 | .mode = 0444, | ||
2476 | .proc_handler = &proc_dointvec, | ||
2477 | }, | ||
2478 | { | ||
2479 | .ctl_name = FS_DQ_SYNCS, | ||
2480 | .procname = "syncs", | ||
2481 | .data = &dqstats.syncs, | ||
2482 | .maxlen = sizeof(int), | ||
2483 | .mode = 0444, | ||
2484 | .proc_handler = &proc_dointvec, | ||
2485 | }, | ||
2486 | #ifdef CONFIG_PRINT_QUOTA_WARNING | ||
2487 | { | ||
2488 | .ctl_name = FS_DQ_WARNINGS, | ||
2489 | .procname = "warnings", | ||
2490 | .data = &flag_print_warnings, | ||
2491 | .maxlen = sizeof(int), | ||
2492 | .mode = 0644, | ||
2493 | .proc_handler = &proc_dointvec, | ||
2494 | }, | ||
2495 | #endif | ||
2496 | { .ctl_name = 0 }, | ||
2497 | }; | ||
2498 | |||
2499 | static ctl_table fs_table[] = { | ||
2500 | { | ||
2501 | .ctl_name = FS_DQSTATS, | ||
2502 | .procname = "quota", | ||
2503 | .mode = 0555, | ||
2504 | .child = fs_dqstats_table, | ||
2505 | }, | ||
2506 | { .ctl_name = 0 }, | ||
2507 | }; | ||
2508 | |||
2509 | static ctl_table sys_table[] = { | ||
2510 | { | ||
2511 | .ctl_name = CTL_FS, | ||
2512 | .procname = "fs", | ||
2513 | .mode = 0555, | ||
2514 | .child = fs_table, | ||
2515 | }, | ||
2516 | { .ctl_name = 0 }, | ||
2517 | }; | ||
2518 | |||
2519 | static int __init dquot_init(void) | ||
2520 | { | ||
2521 | int i; | ||
2522 | unsigned long nr_hash, order; | ||
2523 | |||
2524 | printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__); | ||
2525 | |||
2526 | register_sysctl_table(sys_table); | ||
2527 | |||
2528 | dquot_cachep = kmem_cache_create("dquot", | ||
2529 | sizeof(struct dquot), sizeof(unsigned long) * 4, | ||
2530 | (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| | ||
2531 | SLAB_MEM_SPREAD|SLAB_PANIC), | ||
2532 | NULL); | ||
2533 | |||
2534 | order = 0; | ||
2535 | dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order); | ||
2536 | if (!dquot_hash) | ||
2537 | panic("Cannot create dquot hash table"); | ||
2538 | |||
2539 | /* Find power-of-two hlist_heads which can fit into allocation */ | ||
2540 | nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head); | ||
2541 | dq_hash_bits = 0; | ||
2542 | do { | ||
2543 | dq_hash_bits++; | ||
2544 | } while (nr_hash >> dq_hash_bits); | ||
2545 | dq_hash_bits--; | ||
2546 | |||
2547 | nr_hash = 1UL << dq_hash_bits; | ||
2548 | dq_hash_mask = nr_hash - 1; | ||
2549 | for (i = 0; i < nr_hash; i++) | ||
2550 | INIT_HLIST_HEAD(dquot_hash + i); | ||
2551 | |||
2552 | printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n", | ||
2553 | nr_hash, order, (PAGE_SIZE << order)); | ||
2554 | |||
2555 | register_shrinker(&dqcache_shrinker); | ||
2556 | |||
2557 | #ifdef CONFIG_QUOTA_NETLINK_INTERFACE | ||
2558 | if (genl_register_family("a_genl_family) != 0) | ||
2559 | printk(KERN_ERR "VFS: Failed to create quota netlink interface.\n"); | ||
2560 | #endif | ||
2561 | |||
2562 | return 0; | ||
2563 | } | ||
2564 | module_init(dquot_init); | ||