diff options
Diffstat (limited to 'fs/quota/dquot.c')
-rw-r--r-- | fs/quota/dquot.c | 2617 |
1 files changed, 2617 insertions, 0 deletions
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c new file mode 100644 index 000000000000..2ca967a5ef77 --- /dev/null +++ b/fs/quota/dquot.c | |||
@@ -0,0 +1,2617 @@ | |||
1 | /* | ||
2 | * Implementation of the diskquota system for the LINUX operating system. QUOTA | ||
3 | * is implemented using the BSD system call interface as the means of | ||
4 | * communication with the user level. This file contains the generic routines | ||
5 | * called by the different filesystems on allocation of an inode or block. | ||
6 | * These routines take care of the administration needed to have a consistent | ||
7 | * diskquota tracking system. The ideas of both user and group quotas are based | ||
8 | * on the Melbourne quota system as used on BSD derived systems. The internal | ||
9 | * implementation is based on one of the several variants of the LINUX | ||
10 | * inode-subsystem with added complexity of the diskquota system. | ||
11 | * | ||
12 | * Author: Marco van Wieringen <mvw@planets.elm.net> | ||
13 | * | ||
14 | * Fixes: Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96 | ||
15 | * | ||
16 | * Revised list management to avoid races | ||
17 | * -- Bill Hawes, <whawes@star.net>, 9/98 | ||
18 | * | ||
19 | * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...(). | ||
20 | * As the consequence the locking was moved from dquot_decr_...(), | ||
21 | * dquot_incr_...() to calling functions. | ||
22 | * invalidate_dquots() now writes modified dquots. | ||
23 | * Serialized quota_off() and quota_on() for mount point. | ||
24 | * Fixed a few bugs in grow_dquots(). | ||
25 | * Fixed deadlock in write_dquot() - we no longer account quotas on | ||
26 | * quota files | ||
27 | * remove_dquot_ref() moved to inode.c - it now traverses through inodes | ||
28 | * add_dquot_ref() restarts after blocking | ||
29 | * Added check for bogus uid and fixed check for group in quotactl. | ||
30 | * Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99 | ||
31 | * | ||
32 | * Used struct list_head instead of own list struct | ||
33 | * Invalidation of referenced dquots is no longer possible | ||
34 | * Improved free_dquots list management | ||
35 | * Quota and i_blocks are now updated in one place to avoid races | ||
36 | * Warnings are now delayed so we won't block in critical section | ||
37 | * Write updated not to require dquot lock | ||
38 | * Jan Kara, <jack@suse.cz>, 9/2000 | ||
39 | * | ||
40 | * Added dynamic quota structure allocation | ||
41 | * Jan Kara <jack@suse.cz> 12/2000 | ||
42 | * | ||
43 | * Rewritten quota interface. Implemented new quota format and | ||
44 | * formats registering. | ||
45 | * Jan Kara, <jack@suse.cz>, 2001,2002 | ||
46 | * | ||
47 | * New SMP locking. | ||
48 | * Jan Kara, <jack@suse.cz>, 10/2002 | ||
49 | * | ||
50 | * Added journalled quota support, fix lock inversion problems | ||
51 | * Jan Kara, <jack@suse.cz>, 2003,2004 | ||
52 | * | ||
53 | * (C) Copyright 1994 - 1997 Marco van Wieringen | ||
54 | */ | ||
55 | |||
56 | #include <linux/errno.h> | ||
57 | #include <linux/kernel.h> | ||
58 | #include <linux/fs.h> | ||
59 | #include <linux/mount.h> | ||
60 | #include <linux/mm.h> | ||
61 | #include <linux/time.h> | ||
62 | #include <linux/types.h> | ||
63 | #include <linux/string.h> | ||
64 | #include <linux/fcntl.h> | ||
65 | #include <linux/stat.h> | ||
66 | #include <linux/tty.h> | ||
67 | #include <linux/file.h> | ||
68 | #include <linux/slab.h> | ||
69 | #include <linux/sysctl.h> | ||
70 | #include <linux/init.h> | ||
71 | #include <linux/module.h> | ||
72 | #include <linux/proc_fs.h> | ||
73 | #include <linux/security.h> | ||
74 | #include <linux/kmod.h> | ||
75 | #include <linux/namei.h> | ||
76 | #include <linux/buffer_head.h> | ||
77 | #include <linux/capability.h> | ||
78 | #include <linux/quotaops.h> | ||
79 | #include <linux/writeback.h> /* for inode_lock, oddly enough.. */ | ||
80 | #ifdef CONFIG_QUOTA_NETLINK_INTERFACE | ||
81 | #include <net/netlink.h> | ||
82 | #include <net/genetlink.h> | ||
83 | #endif | ||
84 | |||
85 | #include <asm/uaccess.h> | ||
86 | |||
87 | #define __DQUOT_PARANOIA | ||
88 | |||
89 | /* | ||
90 | * There are three quota SMP locks. dq_list_lock protects all lists with quotas | ||
91 | * and quota formats, dqstats structure containing statistics about the lists | ||
92 | * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and | ||
93 | * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes. | ||
94 | * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly | ||
95 | * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects | ||
96 | * modifications of quota state (on quotaon and quotaoff) and readers who care | ||
97 | * about latest values take it as well. | ||
98 | * | ||
99 | * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock, | ||
100 | * dq_list_lock > dq_state_lock | ||
101 | * | ||
102 | * Note that some things (eg. sb pointer, type, id) doesn't change during | ||
103 | * the life of the dquot structure and so needn't to be protected by a lock | ||
104 | * | ||
105 | * Any operation working on dquots via inode pointers must hold dqptr_sem. If | ||
106 | * operation is just reading pointers from inode (or not using them at all) the | ||
107 | * read lock is enough. If pointers are altered function must hold write lock | ||
108 | * (these locking rules also apply for S_NOQUOTA flag in the inode - note that | ||
109 | * for altering the flag i_mutex is also needed). | ||
110 | * | ||
111 | * Each dquot has its dq_lock mutex. Locked dquots might not be referenced | ||
112 | * from inodes (dquot_alloc_space() and such don't check the dq_lock). | ||
113 | * Currently dquot is locked only when it is being read to memory (or space for | ||
114 | * it is being allocated) on the first dqget() and when it is being released on | ||
115 | * the last dqput(). The allocation and release oparations are serialized by | ||
116 | * the dq_lock and by checking the use count in dquot_release(). Write | ||
117 | * operations on dquots don't hold dq_lock as they copy data under dq_data_lock | ||
118 | * spinlock to internal buffers before writing. | ||
119 | * | ||
120 | * Lock ordering (including related VFS locks) is the following: | ||
121 | * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock > | ||
122 | * dqio_mutex | ||
123 | * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem > | ||
124 | * dqptr_sem. But filesystem has to count with the fact that functions such as | ||
125 | * dquot_alloc_space() acquire dqptr_sem and they usually have to be called | ||
126 | * from inside a transaction to keep filesystem consistency after a crash. Also | ||
127 | * filesystems usually want to do some IO on dquot from ->mark_dirty which is | ||
128 | * called with dqptr_sem held. | ||
129 | * i_mutex on quota files is special (it's below dqio_mutex) | ||
130 | */ | ||
131 | |||
132 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock); | ||
133 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock); | ||
134 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock); | ||
135 | EXPORT_SYMBOL(dq_data_lock); | ||
136 | |||
137 | static char *quotatypes[] = INITQFNAMES; | ||
138 | static struct quota_format_type *quota_formats; /* List of registered formats */ | ||
139 | static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES; | ||
140 | |||
141 | /* SLAB cache for dquot structures */ | ||
142 | static struct kmem_cache *dquot_cachep; | ||
143 | |||
144 | int register_quota_format(struct quota_format_type *fmt) | ||
145 | { | ||
146 | spin_lock(&dq_list_lock); | ||
147 | fmt->qf_next = quota_formats; | ||
148 | quota_formats = fmt; | ||
149 | spin_unlock(&dq_list_lock); | ||
150 | return 0; | ||
151 | } | ||
152 | EXPORT_SYMBOL(register_quota_format); | ||
153 | |||
154 | void unregister_quota_format(struct quota_format_type *fmt) | ||
155 | { | ||
156 | struct quota_format_type **actqf; | ||
157 | |||
158 | spin_lock(&dq_list_lock); | ||
159 | for (actqf = "a_formats; *actqf && *actqf != fmt; | ||
160 | actqf = &(*actqf)->qf_next) | ||
161 | ; | ||
162 | if (*actqf) | ||
163 | *actqf = (*actqf)->qf_next; | ||
164 | spin_unlock(&dq_list_lock); | ||
165 | } | ||
166 | EXPORT_SYMBOL(unregister_quota_format); | ||
167 | |||
168 | static struct quota_format_type *find_quota_format(int id) | ||
169 | { | ||
170 | struct quota_format_type *actqf; | ||
171 | |||
172 | spin_lock(&dq_list_lock); | ||
173 | for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; | ||
174 | actqf = actqf->qf_next) | ||
175 | ; | ||
176 | if (!actqf || !try_module_get(actqf->qf_owner)) { | ||
177 | int qm; | ||
178 | |||
179 | spin_unlock(&dq_list_lock); | ||
180 | |||
181 | for (qm = 0; module_names[qm].qm_fmt_id && | ||
182 | module_names[qm].qm_fmt_id != id; qm++) | ||
183 | ; | ||
184 | if (!module_names[qm].qm_fmt_id || | ||
185 | request_module(module_names[qm].qm_mod_name)) | ||
186 | return NULL; | ||
187 | |||
188 | spin_lock(&dq_list_lock); | ||
189 | for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; | ||
190 | actqf = actqf->qf_next) | ||
191 | ; | ||
192 | if (actqf && !try_module_get(actqf->qf_owner)) | ||
193 | actqf = NULL; | ||
194 | } | ||
195 | spin_unlock(&dq_list_lock); | ||
196 | return actqf; | ||
197 | } | ||
198 | |||
199 | static void put_quota_format(struct quota_format_type *fmt) | ||
200 | { | ||
201 | module_put(fmt->qf_owner); | ||
202 | } | ||
203 | |||
204 | /* | ||
205 | * Dquot List Management: | ||
206 | * The quota code uses three lists for dquot management: the inuse_list, | ||
207 | * free_dquots, and dquot_hash[] array. A single dquot structure may be | ||
208 | * on all three lists, depending on its current state. | ||
209 | * | ||
210 | * All dquots are placed to the end of inuse_list when first created, and this | ||
211 | * list is used for invalidate operation, which must look at every dquot. | ||
212 | * | ||
213 | * Unused dquots (dq_count == 0) are added to the free_dquots list when freed, | ||
214 | * and this list is searched whenever we need an available dquot. Dquots are | ||
215 | * removed from the list as soon as they are used again, and | ||
216 | * dqstats.free_dquots gives the number of dquots on the list. When | ||
217 | * dquot is invalidated it's completely released from memory. | ||
218 | * | ||
219 | * Dquots with a specific identity (device, type and id) are placed on | ||
220 | * one of the dquot_hash[] hash chains. The provides an efficient search | ||
221 | * mechanism to locate a specific dquot. | ||
222 | */ | ||
223 | |||
224 | static LIST_HEAD(inuse_list); | ||
225 | static LIST_HEAD(free_dquots); | ||
226 | static unsigned int dq_hash_bits, dq_hash_mask; | ||
227 | static struct hlist_head *dquot_hash; | ||
228 | |||
229 | struct dqstats dqstats; | ||
230 | EXPORT_SYMBOL(dqstats); | ||
231 | |||
232 | static inline unsigned int | ||
233 | hashfn(const struct super_block *sb, unsigned int id, int type) | ||
234 | { | ||
235 | unsigned long tmp; | ||
236 | |||
237 | tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type); | ||
238 | return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask; | ||
239 | } | ||
240 | |||
241 | /* | ||
242 | * Following list functions expect dq_list_lock to be held | ||
243 | */ | ||
244 | static inline void insert_dquot_hash(struct dquot *dquot) | ||
245 | { | ||
246 | struct hlist_head *head; | ||
247 | head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type); | ||
248 | hlist_add_head(&dquot->dq_hash, head); | ||
249 | } | ||
250 | |||
251 | static inline void remove_dquot_hash(struct dquot *dquot) | ||
252 | { | ||
253 | hlist_del_init(&dquot->dq_hash); | ||
254 | } | ||
255 | |||
256 | static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, | ||
257 | unsigned int id, int type) | ||
258 | { | ||
259 | struct hlist_node *node; | ||
260 | struct dquot *dquot; | ||
261 | |||
262 | hlist_for_each (node, dquot_hash+hashent) { | ||
263 | dquot = hlist_entry(node, struct dquot, dq_hash); | ||
264 | if (dquot->dq_sb == sb && dquot->dq_id == id && | ||
265 | dquot->dq_type == type) | ||
266 | return dquot; | ||
267 | } | ||
268 | return NULL; | ||
269 | } | ||
270 | |||
271 | /* Add a dquot to the tail of the free list */ | ||
272 | static inline void put_dquot_last(struct dquot *dquot) | ||
273 | { | ||
274 | list_add_tail(&dquot->dq_free, &free_dquots); | ||
275 | dqstats.free_dquots++; | ||
276 | } | ||
277 | |||
278 | static inline void remove_free_dquot(struct dquot *dquot) | ||
279 | { | ||
280 | if (list_empty(&dquot->dq_free)) | ||
281 | return; | ||
282 | list_del_init(&dquot->dq_free); | ||
283 | dqstats.free_dquots--; | ||
284 | } | ||
285 | |||
286 | static inline void put_inuse(struct dquot *dquot) | ||
287 | { | ||
288 | /* We add to the back of inuse list so we don't have to restart | ||
289 | * when traversing this list and we block */ | ||
290 | list_add_tail(&dquot->dq_inuse, &inuse_list); | ||
291 | dqstats.allocated_dquots++; | ||
292 | } | ||
293 | |||
294 | static inline void remove_inuse(struct dquot *dquot) | ||
295 | { | ||
296 | dqstats.allocated_dquots--; | ||
297 | list_del(&dquot->dq_inuse); | ||
298 | } | ||
299 | /* | ||
300 | * End of list functions needing dq_list_lock | ||
301 | */ | ||
302 | |||
303 | static void wait_on_dquot(struct dquot *dquot) | ||
304 | { | ||
305 | mutex_lock(&dquot->dq_lock); | ||
306 | mutex_unlock(&dquot->dq_lock); | ||
307 | } | ||
308 | |||
309 | static inline int dquot_dirty(struct dquot *dquot) | ||
310 | { | ||
311 | return test_bit(DQ_MOD_B, &dquot->dq_flags); | ||
312 | } | ||
313 | |||
314 | static inline int mark_dquot_dirty(struct dquot *dquot) | ||
315 | { | ||
316 | return dquot->dq_sb->dq_op->mark_dirty(dquot); | ||
317 | } | ||
318 | |||
319 | int dquot_mark_dquot_dirty(struct dquot *dquot) | ||
320 | { | ||
321 | spin_lock(&dq_list_lock); | ||
322 | if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) | ||
323 | list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)-> | ||
324 | info[dquot->dq_type].dqi_dirty_list); | ||
325 | spin_unlock(&dq_list_lock); | ||
326 | return 0; | ||
327 | } | ||
328 | EXPORT_SYMBOL(dquot_mark_dquot_dirty); | ||
329 | |||
330 | /* This function needs dq_list_lock */ | ||
331 | static inline int clear_dquot_dirty(struct dquot *dquot) | ||
332 | { | ||
333 | if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags)) | ||
334 | return 0; | ||
335 | list_del_init(&dquot->dq_dirty); | ||
336 | return 1; | ||
337 | } | ||
338 | |||
339 | void mark_info_dirty(struct super_block *sb, int type) | ||
340 | { | ||
341 | set_bit(DQF_INFO_DIRTY_B, &sb_dqopt(sb)->info[type].dqi_flags); | ||
342 | } | ||
343 | EXPORT_SYMBOL(mark_info_dirty); | ||
344 | |||
345 | /* | ||
346 | * Read dquot from disk and alloc space for it | ||
347 | */ | ||
348 | |||
349 | int dquot_acquire(struct dquot *dquot) | ||
350 | { | ||
351 | int ret = 0, ret2 = 0; | ||
352 | struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); | ||
353 | |||
354 | mutex_lock(&dquot->dq_lock); | ||
355 | mutex_lock(&dqopt->dqio_mutex); | ||
356 | if (!test_bit(DQ_READ_B, &dquot->dq_flags)) | ||
357 | ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot); | ||
358 | if (ret < 0) | ||
359 | goto out_iolock; | ||
360 | set_bit(DQ_READ_B, &dquot->dq_flags); | ||
361 | /* Instantiate dquot if needed */ | ||
362 | if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) { | ||
363 | ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); | ||
364 | /* Write the info if needed */ | ||
365 | if (info_dirty(&dqopt->info[dquot->dq_type])) { | ||
366 | ret2 = dqopt->ops[dquot->dq_type]->write_file_info( | ||
367 | dquot->dq_sb, dquot->dq_type); | ||
368 | } | ||
369 | if (ret < 0) | ||
370 | goto out_iolock; | ||
371 | if (ret2 < 0) { | ||
372 | ret = ret2; | ||
373 | goto out_iolock; | ||
374 | } | ||
375 | } | ||
376 | set_bit(DQ_ACTIVE_B, &dquot->dq_flags); | ||
377 | out_iolock: | ||
378 | mutex_unlock(&dqopt->dqio_mutex); | ||
379 | mutex_unlock(&dquot->dq_lock); | ||
380 | return ret; | ||
381 | } | ||
382 | EXPORT_SYMBOL(dquot_acquire); | ||
383 | |||
384 | /* | ||
385 | * Write dquot to disk | ||
386 | */ | ||
387 | int dquot_commit(struct dquot *dquot) | ||
388 | { | ||
389 | int ret = 0, ret2 = 0; | ||
390 | struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); | ||
391 | |||
392 | mutex_lock(&dqopt->dqio_mutex); | ||
393 | spin_lock(&dq_list_lock); | ||
394 | if (!clear_dquot_dirty(dquot)) { | ||
395 | spin_unlock(&dq_list_lock); | ||
396 | goto out_sem; | ||
397 | } | ||
398 | spin_unlock(&dq_list_lock); | ||
399 | /* Inactive dquot can be only if there was error during read/init | ||
400 | * => we have better not writing it */ | ||
401 | if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { | ||
402 | ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); | ||
403 | if (info_dirty(&dqopt->info[dquot->dq_type])) { | ||
404 | ret2 = dqopt->ops[dquot->dq_type]->write_file_info( | ||
405 | dquot->dq_sb, dquot->dq_type); | ||
406 | } | ||
407 | if (ret >= 0) | ||
408 | ret = ret2; | ||
409 | } | ||
410 | out_sem: | ||
411 | mutex_unlock(&dqopt->dqio_mutex); | ||
412 | return ret; | ||
413 | } | ||
414 | EXPORT_SYMBOL(dquot_commit); | ||
415 | |||
416 | /* | ||
417 | * Release dquot | ||
418 | */ | ||
419 | int dquot_release(struct dquot *dquot) | ||
420 | { | ||
421 | int ret = 0, ret2 = 0; | ||
422 | struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); | ||
423 | |||
424 | mutex_lock(&dquot->dq_lock); | ||
425 | /* Check whether we are not racing with some other dqget() */ | ||
426 | if (atomic_read(&dquot->dq_count) > 1) | ||
427 | goto out_dqlock; | ||
428 | mutex_lock(&dqopt->dqio_mutex); | ||
429 | if (dqopt->ops[dquot->dq_type]->release_dqblk) { | ||
430 | ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot); | ||
431 | /* Write the info */ | ||
432 | if (info_dirty(&dqopt->info[dquot->dq_type])) { | ||
433 | ret2 = dqopt->ops[dquot->dq_type]->write_file_info( | ||
434 | dquot->dq_sb, dquot->dq_type); | ||
435 | } | ||
436 | if (ret >= 0) | ||
437 | ret = ret2; | ||
438 | } | ||
439 | clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); | ||
440 | mutex_unlock(&dqopt->dqio_mutex); | ||
441 | out_dqlock: | ||
442 | mutex_unlock(&dquot->dq_lock); | ||
443 | return ret; | ||
444 | } | ||
445 | EXPORT_SYMBOL(dquot_release); | ||
446 | |||
447 | void dquot_destroy(struct dquot *dquot) | ||
448 | { | ||
449 | kmem_cache_free(dquot_cachep, dquot); | ||
450 | } | ||
451 | EXPORT_SYMBOL(dquot_destroy); | ||
452 | |||
453 | static inline void do_destroy_dquot(struct dquot *dquot) | ||
454 | { | ||
455 | dquot->dq_sb->dq_op->destroy_dquot(dquot); | ||
456 | } | ||
457 | |||
458 | /* Invalidate all dquots on the list. Note that this function is called after | ||
459 | * quota is disabled and pointers from inodes removed so there cannot be new | ||
460 | * quota users. There can still be some users of quotas due to inodes being | ||
461 | * just deleted or pruned by prune_icache() (those are not attached to any | ||
462 | * list) or parallel quotactl call. We have to wait for such users. | ||
463 | */ | ||
464 | static void invalidate_dquots(struct super_block *sb, int type) | ||
465 | { | ||
466 | struct dquot *dquot, *tmp; | ||
467 | |||
468 | restart: | ||
469 | spin_lock(&dq_list_lock); | ||
470 | list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) { | ||
471 | if (dquot->dq_sb != sb) | ||
472 | continue; | ||
473 | if (dquot->dq_type != type) | ||
474 | continue; | ||
475 | /* Wait for dquot users */ | ||
476 | if (atomic_read(&dquot->dq_count)) { | ||
477 | DEFINE_WAIT(wait); | ||
478 | |||
479 | atomic_inc(&dquot->dq_count); | ||
480 | prepare_to_wait(&dquot->dq_wait_unused, &wait, | ||
481 | TASK_UNINTERRUPTIBLE); | ||
482 | spin_unlock(&dq_list_lock); | ||
483 | /* Once dqput() wakes us up, we know it's time to free | ||
484 | * the dquot. | ||
485 | * IMPORTANT: we rely on the fact that there is always | ||
486 | * at most one process waiting for dquot to free. | ||
487 | * Otherwise dq_count would be > 1 and we would never | ||
488 | * wake up. | ||
489 | */ | ||
490 | if (atomic_read(&dquot->dq_count) > 1) | ||
491 | schedule(); | ||
492 | finish_wait(&dquot->dq_wait_unused, &wait); | ||
493 | dqput(dquot); | ||
494 | /* At this moment dquot() need not exist (it could be | ||
495 | * reclaimed by prune_dqcache(). Hence we must | ||
496 | * restart. */ | ||
497 | goto restart; | ||
498 | } | ||
499 | /* | ||
500 | * Quota now has no users and it has been written on last | ||
501 | * dqput() | ||
502 | */ | ||
503 | remove_dquot_hash(dquot); | ||
504 | remove_free_dquot(dquot); | ||
505 | remove_inuse(dquot); | ||
506 | do_destroy_dquot(dquot); | ||
507 | } | ||
508 | spin_unlock(&dq_list_lock); | ||
509 | } | ||
510 | |||
511 | /* Call callback for every active dquot on given filesystem */ | ||
512 | int dquot_scan_active(struct super_block *sb, | ||
513 | int (*fn)(struct dquot *dquot, unsigned long priv), | ||
514 | unsigned long priv) | ||
515 | { | ||
516 | struct dquot *dquot, *old_dquot = NULL; | ||
517 | int ret = 0; | ||
518 | |||
519 | mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); | ||
520 | spin_lock(&dq_list_lock); | ||
521 | list_for_each_entry(dquot, &inuse_list, dq_inuse) { | ||
522 | if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) | ||
523 | continue; | ||
524 | if (dquot->dq_sb != sb) | ||
525 | continue; | ||
526 | /* Now we have active dquot so we can just increase use count */ | ||
527 | atomic_inc(&dquot->dq_count); | ||
528 | dqstats.lookups++; | ||
529 | spin_unlock(&dq_list_lock); | ||
530 | dqput(old_dquot); | ||
531 | old_dquot = dquot; | ||
532 | ret = fn(dquot, priv); | ||
533 | if (ret < 0) | ||
534 | goto out; | ||
535 | spin_lock(&dq_list_lock); | ||
536 | /* We are safe to continue now because our dquot could not | ||
537 | * be moved out of the inuse list while we hold the reference */ | ||
538 | } | ||
539 | spin_unlock(&dq_list_lock); | ||
540 | out: | ||
541 | dqput(old_dquot); | ||
542 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); | ||
543 | return ret; | ||
544 | } | ||
545 | EXPORT_SYMBOL(dquot_scan_active); | ||
546 | |||
547 | int vfs_quota_sync(struct super_block *sb, int type) | ||
548 | { | ||
549 | struct list_head *dirty; | ||
550 | struct dquot *dquot; | ||
551 | struct quota_info *dqopt = sb_dqopt(sb); | ||
552 | int cnt; | ||
553 | |||
554 | mutex_lock(&dqopt->dqonoff_mutex); | ||
555 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
556 | if (type != -1 && cnt != type) | ||
557 | continue; | ||
558 | if (!sb_has_quota_active(sb, cnt)) | ||
559 | continue; | ||
560 | spin_lock(&dq_list_lock); | ||
561 | dirty = &dqopt->info[cnt].dqi_dirty_list; | ||
562 | while (!list_empty(dirty)) { | ||
563 | dquot = list_first_entry(dirty, struct dquot, | ||
564 | dq_dirty); | ||
565 | /* Dirty and inactive can be only bad dquot... */ | ||
566 | if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { | ||
567 | clear_dquot_dirty(dquot); | ||
568 | continue; | ||
569 | } | ||
570 | /* Now we have active dquot from which someone is | ||
571 | * holding reference so we can safely just increase | ||
572 | * use count */ | ||
573 | atomic_inc(&dquot->dq_count); | ||
574 | dqstats.lookups++; | ||
575 | spin_unlock(&dq_list_lock); | ||
576 | sb->dq_op->write_dquot(dquot); | ||
577 | dqput(dquot); | ||
578 | spin_lock(&dq_list_lock); | ||
579 | } | ||
580 | spin_unlock(&dq_list_lock); | ||
581 | } | ||
582 | |||
583 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
584 | if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt) | ||
585 | && info_dirty(&dqopt->info[cnt])) | ||
586 | sb->dq_op->write_info(sb, cnt); | ||
587 | spin_lock(&dq_list_lock); | ||
588 | dqstats.syncs++; | ||
589 | spin_unlock(&dq_list_lock); | ||
590 | mutex_unlock(&dqopt->dqonoff_mutex); | ||
591 | |||
592 | return 0; | ||
593 | } | ||
594 | EXPORT_SYMBOL(vfs_quota_sync); | ||
595 | |||
596 | /* Free unused dquots from cache */ | ||
597 | static void prune_dqcache(int count) | ||
598 | { | ||
599 | struct list_head *head; | ||
600 | struct dquot *dquot; | ||
601 | |||
602 | head = free_dquots.prev; | ||
603 | while (head != &free_dquots && count) { | ||
604 | dquot = list_entry(head, struct dquot, dq_free); | ||
605 | remove_dquot_hash(dquot); | ||
606 | remove_free_dquot(dquot); | ||
607 | remove_inuse(dquot); | ||
608 | do_destroy_dquot(dquot); | ||
609 | count--; | ||
610 | head = free_dquots.prev; | ||
611 | } | ||
612 | } | ||
613 | |||
614 | /* | ||
615 | * This is called from kswapd when we think we need some | ||
616 | * more memory | ||
617 | */ | ||
618 | |||
619 | static int shrink_dqcache_memory(int nr, gfp_t gfp_mask) | ||
620 | { | ||
621 | if (nr) { | ||
622 | spin_lock(&dq_list_lock); | ||
623 | prune_dqcache(nr); | ||
624 | spin_unlock(&dq_list_lock); | ||
625 | } | ||
626 | return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure; | ||
627 | } | ||
628 | |||
629 | static struct shrinker dqcache_shrinker = { | ||
630 | .shrink = shrink_dqcache_memory, | ||
631 | .seeks = DEFAULT_SEEKS, | ||
632 | }; | ||
633 | |||
634 | /* | ||
635 | * Put reference to dquot | ||
636 | * NOTE: If you change this function please check whether dqput_blocks() works right... | ||
637 | */ | ||
638 | void dqput(struct dquot *dquot) | ||
639 | { | ||
640 | int ret; | ||
641 | |||
642 | if (!dquot) | ||
643 | return; | ||
644 | #ifdef __DQUOT_PARANOIA | ||
645 | if (!atomic_read(&dquot->dq_count)) { | ||
646 | printk("VFS: dqput: trying to free free dquot\n"); | ||
647 | printk("VFS: device %s, dquot of %s %d\n", | ||
648 | dquot->dq_sb->s_id, | ||
649 | quotatypes[dquot->dq_type], | ||
650 | dquot->dq_id); | ||
651 | BUG(); | ||
652 | } | ||
653 | #endif | ||
654 | |||
655 | spin_lock(&dq_list_lock); | ||
656 | dqstats.drops++; | ||
657 | spin_unlock(&dq_list_lock); | ||
658 | we_slept: | ||
659 | spin_lock(&dq_list_lock); | ||
660 | if (atomic_read(&dquot->dq_count) > 1) { | ||
661 | /* We have more than one user... nothing to do */ | ||
662 | atomic_dec(&dquot->dq_count); | ||
663 | /* Releasing dquot during quotaoff phase? */ | ||
664 | if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_type) && | ||
665 | atomic_read(&dquot->dq_count) == 1) | ||
666 | wake_up(&dquot->dq_wait_unused); | ||
667 | spin_unlock(&dq_list_lock); | ||
668 | return; | ||
669 | } | ||
670 | /* Need to release dquot? */ | ||
671 | if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) { | ||
672 | spin_unlock(&dq_list_lock); | ||
673 | /* Commit dquot before releasing */ | ||
674 | ret = dquot->dq_sb->dq_op->write_dquot(dquot); | ||
675 | if (ret < 0) { | ||
676 | printk(KERN_ERR "VFS: cannot write quota structure on " | ||
677 | "device %s (error %d). Quota may get out of " | ||
678 | "sync!\n", dquot->dq_sb->s_id, ret); | ||
679 | /* | ||
680 | * We clear dirty bit anyway, so that we avoid | ||
681 | * infinite loop here | ||
682 | */ | ||
683 | spin_lock(&dq_list_lock); | ||
684 | clear_dquot_dirty(dquot); | ||
685 | spin_unlock(&dq_list_lock); | ||
686 | } | ||
687 | goto we_slept; | ||
688 | } | ||
689 | /* Clear flag in case dquot was inactive (something bad happened) */ | ||
690 | clear_dquot_dirty(dquot); | ||
691 | if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { | ||
692 | spin_unlock(&dq_list_lock); | ||
693 | dquot->dq_sb->dq_op->release_dquot(dquot); | ||
694 | goto we_slept; | ||
695 | } | ||
696 | atomic_dec(&dquot->dq_count); | ||
697 | #ifdef __DQUOT_PARANOIA | ||
698 | /* sanity check */ | ||
699 | BUG_ON(!list_empty(&dquot->dq_free)); | ||
700 | #endif | ||
701 | put_dquot_last(dquot); | ||
702 | spin_unlock(&dq_list_lock); | ||
703 | } | ||
704 | EXPORT_SYMBOL(dqput); | ||
705 | |||
706 | struct dquot *dquot_alloc(struct super_block *sb, int type) | ||
707 | { | ||
708 | return kmem_cache_zalloc(dquot_cachep, GFP_NOFS); | ||
709 | } | ||
710 | EXPORT_SYMBOL(dquot_alloc); | ||
711 | |||
712 | static struct dquot *get_empty_dquot(struct super_block *sb, int type) | ||
713 | { | ||
714 | struct dquot *dquot; | ||
715 | |||
716 | dquot = sb->dq_op->alloc_dquot(sb, type); | ||
717 | if(!dquot) | ||
718 | return NULL; | ||
719 | |||
720 | mutex_init(&dquot->dq_lock); | ||
721 | INIT_LIST_HEAD(&dquot->dq_free); | ||
722 | INIT_LIST_HEAD(&dquot->dq_inuse); | ||
723 | INIT_HLIST_NODE(&dquot->dq_hash); | ||
724 | INIT_LIST_HEAD(&dquot->dq_dirty); | ||
725 | init_waitqueue_head(&dquot->dq_wait_unused); | ||
726 | dquot->dq_sb = sb; | ||
727 | dquot->dq_type = type; | ||
728 | atomic_set(&dquot->dq_count, 1); | ||
729 | |||
730 | return dquot; | ||
731 | } | ||
732 | |||
733 | /* | ||
734 | * Get reference to dquot | ||
735 | * | ||
736 | * Locking is slightly tricky here. We are guarded from parallel quotaoff() | ||
737 | * destroying our dquot by: | ||
738 | * a) checking for quota flags under dq_list_lock and | ||
739 | * b) getting a reference to dquot before we release dq_list_lock | ||
740 | */ | ||
741 | struct dquot *dqget(struct super_block *sb, unsigned int id, int type) | ||
742 | { | ||
743 | unsigned int hashent = hashfn(sb, id, type); | ||
744 | struct dquot *dquot = NULL, *empty = NULL; | ||
745 | |||
746 | if (!sb_has_quota_active(sb, type)) | ||
747 | return NULL; | ||
748 | we_slept: | ||
749 | spin_lock(&dq_list_lock); | ||
750 | spin_lock(&dq_state_lock); | ||
751 | if (!sb_has_quota_active(sb, type)) { | ||
752 | spin_unlock(&dq_state_lock); | ||
753 | spin_unlock(&dq_list_lock); | ||
754 | goto out; | ||
755 | } | ||
756 | spin_unlock(&dq_state_lock); | ||
757 | |||
758 | dquot = find_dquot(hashent, sb, id, type); | ||
759 | if (!dquot) { | ||
760 | if (!empty) { | ||
761 | spin_unlock(&dq_list_lock); | ||
762 | empty = get_empty_dquot(sb, type); | ||
763 | if (!empty) | ||
764 | schedule(); /* Try to wait for a moment... */ | ||
765 | goto we_slept; | ||
766 | } | ||
767 | dquot = empty; | ||
768 | empty = NULL; | ||
769 | dquot->dq_id = id; | ||
770 | /* all dquots go on the inuse_list */ | ||
771 | put_inuse(dquot); | ||
772 | /* hash it first so it can be found */ | ||
773 | insert_dquot_hash(dquot); | ||
774 | dqstats.lookups++; | ||
775 | spin_unlock(&dq_list_lock); | ||
776 | } else { | ||
777 | if (!atomic_read(&dquot->dq_count)) | ||
778 | remove_free_dquot(dquot); | ||
779 | atomic_inc(&dquot->dq_count); | ||
780 | dqstats.cache_hits++; | ||
781 | dqstats.lookups++; | ||
782 | spin_unlock(&dq_list_lock); | ||
783 | } | ||
784 | /* Wait for dq_lock - after this we know that either dquot_release() is | ||
785 | * already finished or it will be canceled due to dq_count > 1 test */ | ||
786 | wait_on_dquot(dquot); | ||
787 | /* Read the dquot / allocate space in quota file */ | ||
788 | if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && | ||
789 | sb->dq_op->acquire_dquot(dquot) < 0) { | ||
790 | dqput(dquot); | ||
791 | dquot = NULL; | ||
792 | goto out; | ||
793 | } | ||
794 | #ifdef __DQUOT_PARANOIA | ||
795 | BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */ | ||
796 | #endif | ||
797 | out: | ||
798 | if (empty) | ||
799 | do_destroy_dquot(empty); | ||
800 | |||
801 | return dquot; | ||
802 | } | ||
803 | EXPORT_SYMBOL(dqget); | ||
804 | |||
805 | static int dqinit_needed(struct inode *inode, int type) | ||
806 | { | ||
807 | int cnt; | ||
808 | |||
809 | if (IS_NOQUOTA(inode)) | ||
810 | return 0; | ||
811 | if (type != -1) | ||
812 | return !inode->i_dquot[type]; | ||
813 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
814 | if (!inode->i_dquot[cnt]) | ||
815 | return 1; | ||
816 | return 0; | ||
817 | } | ||
818 | |||
819 | /* This routine is guarded by dqonoff_mutex mutex */ | ||
820 | static void add_dquot_ref(struct super_block *sb, int type) | ||
821 | { | ||
822 | struct inode *inode, *old_inode = NULL; | ||
823 | |||
824 | spin_lock(&inode_lock); | ||
825 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { | ||
826 | if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) | ||
827 | continue; | ||
828 | if (!atomic_read(&inode->i_writecount)) | ||
829 | continue; | ||
830 | if (!dqinit_needed(inode, type)) | ||
831 | continue; | ||
832 | |||
833 | __iget(inode); | ||
834 | spin_unlock(&inode_lock); | ||
835 | |||
836 | iput(old_inode); | ||
837 | sb->dq_op->initialize(inode, type); | ||
838 | /* We hold a reference to 'inode' so it couldn't have been | ||
839 | * removed from s_inodes list while we dropped the inode_lock. | ||
840 | * We cannot iput the inode now as we can be holding the last | ||
841 | * reference and we cannot iput it under inode_lock. So we | ||
842 | * keep the reference and iput it later. */ | ||
843 | old_inode = inode; | ||
844 | spin_lock(&inode_lock); | ||
845 | } | ||
846 | spin_unlock(&inode_lock); | ||
847 | iput(old_inode); | ||
848 | } | ||
849 | |||
850 | /* | ||
851 | * Return 0 if dqput() won't block. | ||
852 | * (note that 1 doesn't necessarily mean blocking) | ||
853 | */ | ||
854 | static inline int dqput_blocks(struct dquot *dquot) | ||
855 | { | ||
856 | if (atomic_read(&dquot->dq_count) <= 1) | ||
857 | return 1; | ||
858 | return 0; | ||
859 | } | ||
860 | |||
861 | /* | ||
862 | * Remove references to dquots from inode and add dquot to list for freeing | ||
863 | * if we have the last referece to dquot | ||
864 | * We can't race with anybody because we hold dqptr_sem for writing... | ||
865 | */ | ||
866 | static int remove_inode_dquot_ref(struct inode *inode, int type, | ||
867 | struct list_head *tofree_head) | ||
868 | { | ||
869 | struct dquot *dquot = inode->i_dquot[type]; | ||
870 | |||
871 | inode->i_dquot[type] = NULL; | ||
872 | if (dquot) { | ||
873 | if (dqput_blocks(dquot)) { | ||
874 | #ifdef __DQUOT_PARANOIA | ||
875 | if (atomic_read(&dquot->dq_count) != 1) | ||
876 | printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count)); | ||
877 | #endif | ||
878 | spin_lock(&dq_list_lock); | ||
879 | /* As dquot must have currently users it can't be on | ||
880 | * the free list... */ | ||
881 | list_add(&dquot->dq_free, tofree_head); | ||
882 | spin_unlock(&dq_list_lock); | ||
883 | return 1; | ||
884 | } | ||
885 | else | ||
886 | dqput(dquot); /* We have guaranteed we won't block */ | ||
887 | } | ||
888 | return 0; | ||
889 | } | ||
890 | |||
891 | /* | ||
892 | * Free list of dquots | ||
893 | * Dquots are removed from inodes and no new references can be got so we are | ||
894 | * the only ones holding reference | ||
895 | */ | ||
896 | static void put_dquot_list(struct list_head *tofree_head) | ||
897 | { | ||
898 | struct list_head *act_head; | ||
899 | struct dquot *dquot; | ||
900 | |||
901 | act_head = tofree_head->next; | ||
902 | while (act_head != tofree_head) { | ||
903 | dquot = list_entry(act_head, struct dquot, dq_free); | ||
904 | act_head = act_head->next; | ||
905 | /* Remove dquot from the list so we won't have problems... */ | ||
906 | list_del_init(&dquot->dq_free); | ||
907 | dqput(dquot); | ||
908 | } | ||
909 | } | ||
910 | |||
911 | static void remove_dquot_ref(struct super_block *sb, int type, | ||
912 | struct list_head *tofree_head) | ||
913 | { | ||
914 | struct inode *inode; | ||
915 | |||
916 | spin_lock(&inode_lock); | ||
917 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { | ||
918 | /* | ||
919 | * We have to scan also I_NEW inodes because they can already | ||
920 | * have quota pointer initialized. Luckily, we need to touch | ||
921 | * only quota pointers and these have separate locking | ||
922 | * (dqptr_sem). | ||
923 | */ | ||
924 | if (!IS_NOQUOTA(inode)) | ||
925 | remove_inode_dquot_ref(inode, type, tofree_head); | ||
926 | } | ||
927 | spin_unlock(&inode_lock); | ||
928 | } | ||
929 | |||
930 | /* Gather all references from inodes and drop them */ | ||
931 | static void drop_dquot_ref(struct super_block *sb, int type) | ||
932 | { | ||
933 | LIST_HEAD(tofree_head); | ||
934 | |||
935 | if (sb->dq_op) { | ||
936 | down_write(&sb_dqopt(sb)->dqptr_sem); | ||
937 | remove_dquot_ref(sb, type, &tofree_head); | ||
938 | up_write(&sb_dqopt(sb)->dqptr_sem); | ||
939 | put_dquot_list(&tofree_head); | ||
940 | } | ||
941 | } | ||
942 | |||
943 | static inline void dquot_incr_inodes(struct dquot *dquot, qsize_t number) | ||
944 | { | ||
945 | dquot->dq_dqb.dqb_curinodes += number; | ||
946 | } | ||
947 | |||
948 | static inline void dquot_incr_space(struct dquot *dquot, qsize_t number) | ||
949 | { | ||
950 | dquot->dq_dqb.dqb_curspace += number; | ||
951 | } | ||
952 | |||
953 | static inline void dquot_resv_space(struct dquot *dquot, qsize_t number) | ||
954 | { | ||
955 | dquot->dq_dqb.dqb_rsvspace += number; | ||
956 | } | ||
957 | |||
958 | /* | ||
959 | * Claim reserved quota space | ||
960 | */ | ||
961 | static void dquot_claim_reserved_space(struct dquot *dquot, | ||
962 | qsize_t number) | ||
963 | { | ||
964 | WARN_ON(dquot->dq_dqb.dqb_rsvspace < number); | ||
965 | dquot->dq_dqb.dqb_curspace += number; | ||
966 | dquot->dq_dqb.dqb_rsvspace -= number; | ||
967 | } | ||
968 | |||
969 | static inline | ||
970 | void dquot_free_reserved_space(struct dquot *dquot, qsize_t number) | ||
971 | { | ||
972 | dquot->dq_dqb.dqb_rsvspace -= number; | ||
973 | } | ||
974 | |||
975 | static void dquot_decr_inodes(struct dquot *dquot, qsize_t number) | ||
976 | { | ||
977 | if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || | ||
978 | dquot->dq_dqb.dqb_curinodes >= number) | ||
979 | dquot->dq_dqb.dqb_curinodes -= number; | ||
980 | else | ||
981 | dquot->dq_dqb.dqb_curinodes = 0; | ||
982 | if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit) | ||
983 | dquot->dq_dqb.dqb_itime = (time_t) 0; | ||
984 | clear_bit(DQ_INODES_B, &dquot->dq_flags); | ||
985 | } | ||
986 | |||
987 | static void dquot_decr_space(struct dquot *dquot, qsize_t number) | ||
988 | { | ||
989 | if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || | ||
990 | dquot->dq_dqb.dqb_curspace >= number) | ||
991 | dquot->dq_dqb.dqb_curspace -= number; | ||
992 | else | ||
993 | dquot->dq_dqb.dqb_curspace = 0; | ||
994 | if (dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit) | ||
995 | dquot->dq_dqb.dqb_btime = (time_t) 0; | ||
996 | clear_bit(DQ_BLKS_B, &dquot->dq_flags); | ||
997 | } | ||
998 | |||
999 | static int warning_issued(struct dquot *dquot, const int warntype) | ||
1000 | { | ||
1001 | int flag = (warntype == QUOTA_NL_BHARDWARN || | ||
1002 | warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B : | ||
1003 | ((warntype == QUOTA_NL_IHARDWARN || | ||
1004 | warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0); | ||
1005 | |||
1006 | if (!flag) | ||
1007 | return 0; | ||
1008 | return test_and_set_bit(flag, &dquot->dq_flags); | ||
1009 | } | ||
1010 | |||
1011 | #ifdef CONFIG_PRINT_QUOTA_WARNING | ||
1012 | static int flag_print_warnings = 1; | ||
1013 | |||
1014 | static int need_print_warning(struct dquot *dquot) | ||
1015 | { | ||
1016 | if (!flag_print_warnings) | ||
1017 | return 0; | ||
1018 | |||
1019 | switch (dquot->dq_type) { | ||
1020 | case USRQUOTA: | ||
1021 | return current_fsuid() == dquot->dq_id; | ||
1022 | case GRPQUOTA: | ||
1023 | return in_group_p(dquot->dq_id); | ||
1024 | } | ||
1025 | return 0; | ||
1026 | } | ||
1027 | |||
1028 | /* Print warning to user which exceeded quota */ | ||
1029 | static void print_warning(struct dquot *dquot, const int warntype) | ||
1030 | { | ||
1031 | char *msg = NULL; | ||
1032 | struct tty_struct *tty; | ||
1033 | |||
1034 | if (warntype == QUOTA_NL_IHARDBELOW || | ||
1035 | warntype == QUOTA_NL_ISOFTBELOW || | ||
1036 | warntype == QUOTA_NL_BHARDBELOW || | ||
1037 | warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(dquot)) | ||
1038 | return; | ||
1039 | |||
1040 | tty = get_current_tty(); | ||
1041 | if (!tty) | ||
1042 | return; | ||
1043 | tty_write_message(tty, dquot->dq_sb->s_id); | ||
1044 | if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN) | ||
1045 | tty_write_message(tty, ": warning, "); | ||
1046 | else | ||
1047 | tty_write_message(tty, ": write failed, "); | ||
1048 | tty_write_message(tty, quotatypes[dquot->dq_type]); | ||
1049 | switch (warntype) { | ||
1050 | case QUOTA_NL_IHARDWARN: | ||
1051 | msg = " file limit reached.\r\n"; | ||
1052 | break; | ||
1053 | case QUOTA_NL_ISOFTLONGWARN: | ||
1054 | msg = " file quota exceeded too long.\r\n"; | ||
1055 | break; | ||
1056 | case QUOTA_NL_ISOFTWARN: | ||
1057 | msg = " file quota exceeded.\r\n"; | ||
1058 | break; | ||
1059 | case QUOTA_NL_BHARDWARN: | ||
1060 | msg = " block limit reached.\r\n"; | ||
1061 | break; | ||
1062 | case QUOTA_NL_BSOFTLONGWARN: | ||
1063 | msg = " block quota exceeded too long.\r\n"; | ||
1064 | break; | ||
1065 | case QUOTA_NL_BSOFTWARN: | ||
1066 | msg = " block quota exceeded.\r\n"; | ||
1067 | break; | ||
1068 | } | ||
1069 | tty_write_message(tty, msg); | ||
1070 | tty_kref_put(tty); | ||
1071 | } | ||
1072 | #endif | ||
1073 | |||
1074 | #ifdef CONFIG_QUOTA_NETLINK_INTERFACE | ||
1075 | |||
1076 | /* Netlink family structure for quota */ | ||
1077 | static struct genl_family quota_genl_family = { | ||
1078 | .id = GENL_ID_GENERATE, | ||
1079 | .hdrsize = 0, | ||
1080 | .name = "VFS_DQUOT", | ||
1081 | .version = 1, | ||
1082 | .maxattr = QUOTA_NL_A_MAX, | ||
1083 | }; | ||
1084 | |||
1085 | /* Send warning to userspace about user which exceeded quota */ | ||
1086 | static void send_warning(const struct dquot *dquot, const char warntype) | ||
1087 | { | ||
1088 | static atomic_t seq; | ||
1089 | struct sk_buff *skb; | ||
1090 | void *msg_head; | ||
1091 | int ret; | ||
1092 | int msg_size = 4 * nla_total_size(sizeof(u32)) + | ||
1093 | 2 * nla_total_size(sizeof(u64)); | ||
1094 | |||
1095 | /* We have to allocate using GFP_NOFS as we are called from a | ||
1096 | * filesystem performing write and thus further recursion into | ||
1097 | * the fs to free some data could cause deadlocks. */ | ||
1098 | skb = genlmsg_new(msg_size, GFP_NOFS); | ||
1099 | if (!skb) { | ||
1100 | printk(KERN_ERR | ||
1101 | "VFS: Not enough memory to send quota warning.\n"); | ||
1102 | return; | ||
1103 | } | ||
1104 | msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq), | ||
1105 | "a_genl_family, 0, QUOTA_NL_C_WARNING); | ||
1106 | if (!msg_head) { | ||
1107 | printk(KERN_ERR | ||
1108 | "VFS: Cannot store netlink header in quota warning.\n"); | ||
1109 | goto err_out; | ||
1110 | } | ||
1111 | ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, dquot->dq_type); | ||
1112 | if (ret) | ||
1113 | goto attr_err_out; | ||
1114 | ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, dquot->dq_id); | ||
1115 | if (ret) | ||
1116 | goto attr_err_out; | ||
1117 | ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype); | ||
1118 | if (ret) | ||
1119 | goto attr_err_out; | ||
1120 | ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, | ||
1121 | MAJOR(dquot->dq_sb->s_dev)); | ||
1122 | if (ret) | ||
1123 | goto attr_err_out; | ||
1124 | ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, | ||
1125 | MINOR(dquot->dq_sb->s_dev)); | ||
1126 | if (ret) | ||
1127 | goto attr_err_out; | ||
1128 | ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid()); | ||
1129 | if (ret) | ||
1130 | goto attr_err_out; | ||
1131 | genlmsg_end(skb, msg_head); | ||
1132 | |||
1133 | genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS); | ||
1134 | return; | ||
1135 | attr_err_out: | ||
1136 | printk(KERN_ERR "VFS: Not enough space to compose quota message!\n"); | ||
1137 | err_out: | ||
1138 | kfree_skb(skb); | ||
1139 | } | ||
1140 | #endif | ||
1141 | /* | ||
1142 | * Write warnings to the console and send warning messages over netlink. | ||
1143 | * | ||
1144 | * Note that this function can sleep. | ||
1145 | */ | ||
1146 | static void flush_warnings(struct dquot *const *dquots, char *warntype) | ||
1147 | { | ||
1148 | int i; | ||
1149 | |||
1150 | for (i = 0; i < MAXQUOTAS; i++) | ||
1151 | if (dquots[i] && warntype[i] != QUOTA_NL_NOWARN && | ||
1152 | !warning_issued(dquots[i], warntype[i])) { | ||
1153 | #ifdef CONFIG_PRINT_QUOTA_WARNING | ||
1154 | print_warning(dquots[i], warntype[i]); | ||
1155 | #endif | ||
1156 | #ifdef CONFIG_QUOTA_NETLINK_INTERFACE | ||
1157 | send_warning(dquots[i], warntype[i]); | ||
1158 | #endif | ||
1159 | } | ||
1160 | } | ||
1161 | |||
1162 | static int ignore_hardlimit(struct dquot *dquot) | ||
1163 | { | ||
1164 | struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type]; | ||
1165 | |||
1166 | return capable(CAP_SYS_RESOURCE) && | ||
1167 | (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || | ||
1168 | !(info->dqi_flags & V1_DQF_RSQUASH)); | ||
1169 | } | ||
1170 | |||
1171 | /* needs dq_data_lock */ | ||
1172 | static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype) | ||
1173 | { | ||
1174 | qsize_t newinodes = dquot->dq_dqb.dqb_curinodes + inodes; | ||
1175 | |||
1176 | *warntype = QUOTA_NL_NOWARN; | ||
1177 | if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || | ||
1178 | test_bit(DQ_FAKE_B, &dquot->dq_flags)) | ||
1179 | return QUOTA_OK; | ||
1180 | |||
1181 | if (dquot->dq_dqb.dqb_ihardlimit && | ||
1182 | newinodes > dquot->dq_dqb.dqb_ihardlimit && | ||
1183 | !ignore_hardlimit(dquot)) { | ||
1184 | *warntype = QUOTA_NL_IHARDWARN; | ||
1185 | return NO_QUOTA; | ||
1186 | } | ||
1187 | |||
1188 | if (dquot->dq_dqb.dqb_isoftlimit && | ||
1189 | newinodes > dquot->dq_dqb.dqb_isoftlimit && | ||
1190 | dquot->dq_dqb.dqb_itime && | ||
1191 | get_seconds() >= dquot->dq_dqb.dqb_itime && | ||
1192 | !ignore_hardlimit(dquot)) { | ||
1193 | *warntype = QUOTA_NL_ISOFTLONGWARN; | ||
1194 | return NO_QUOTA; | ||
1195 | } | ||
1196 | |||
1197 | if (dquot->dq_dqb.dqb_isoftlimit && | ||
1198 | newinodes > dquot->dq_dqb.dqb_isoftlimit && | ||
1199 | dquot->dq_dqb.dqb_itime == 0) { | ||
1200 | *warntype = QUOTA_NL_ISOFTWARN; | ||
1201 | dquot->dq_dqb.dqb_itime = get_seconds() + | ||
1202 | sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace; | ||
1203 | } | ||
1204 | |||
1205 | return QUOTA_OK; | ||
1206 | } | ||
1207 | |||
1208 | /* needs dq_data_lock */ | ||
1209 | static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype) | ||
1210 | { | ||
1211 | qsize_t tspace; | ||
1212 | struct super_block *sb = dquot->dq_sb; | ||
1213 | |||
1214 | *warntype = QUOTA_NL_NOWARN; | ||
1215 | if (!sb_has_quota_limits_enabled(sb, dquot->dq_type) || | ||
1216 | test_bit(DQ_FAKE_B, &dquot->dq_flags)) | ||
1217 | return QUOTA_OK; | ||
1218 | |||
1219 | tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace | ||
1220 | + space; | ||
1221 | |||
1222 | if (dquot->dq_dqb.dqb_bhardlimit && | ||
1223 | tspace > dquot->dq_dqb.dqb_bhardlimit && | ||
1224 | !ignore_hardlimit(dquot)) { | ||
1225 | if (!prealloc) | ||
1226 | *warntype = QUOTA_NL_BHARDWARN; | ||
1227 | return NO_QUOTA; | ||
1228 | } | ||
1229 | |||
1230 | if (dquot->dq_dqb.dqb_bsoftlimit && | ||
1231 | tspace > dquot->dq_dqb.dqb_bsoftlimit && | ||
1232 | dquot->dq_dqb.dqb_btime && | ||
1233 | get_seconds() >= dquot->dq_dqb.dqb_btime && | ||
1234 | !ignore_hardlimit(dquot)) { | ||
1235 | if (!prealloc) | ||
1236 | *warntype = QUOTA_NL_BSOFTLONGWARN; | ||
1237 | return NO_QUOTA; | ||
1238 | } | ||
1239 | |||
1240 | if (dquot->dq_dqb.dqb_bsoftlimit && | ||
1241 | tspace > dquot->dq_dqb.dqb_bsoftlimit && | ||
1242 | dquot->dq_dqb.dqb_btime == 0) { | ||
1243 | if (!prealloc) { | ||
1244 | *warntype = QUOTA_NL_BSOFTWARN; | ||
1245 | dquot->dq_dqb.dqb_btime = get_seconds() + | ||
1246 | sb_dqopt(sb)->info[dquot->dq_type].dqi_bgrace; | ||
1247 | } | ||
1248 | else | ||
1249 | /* | ||
1250 | * We don't allow preallocation to exceed softlimit so exceeding will | ||
1251 | * be always printed | ||
1252 | */ | ||
1253 | return NO_QUOTA; | ||
1254 | } | ||
1255 | |||
1256 | return QUOTA_OK; | ||
1257 | } | ||
1258 | |||
1259 | static int info_idq_free(struct dquot *dquot, qsize_t inodes) | ||
1260 | { | ||
1261 | qsize_t newinodes; | ||
1262 | |||
1263 | if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || | ||
1264 | dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit || | ||
1265 | !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type)) | ||
1266 | return QUOTA_NL_NOWARN; | ||
1267 | |||
1268 | newinodes = dquot->dq_dqb.dqb_curinodes - inodes; | ||
1269 | if (newinodes <= dquot->dq_dqb.dqb_isoftlimit) | ||
1270 | return QUOTA_NL_ISOFTBELOW; | ||
1271 | if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit && | ||
1272 | newinodes < dquot->dq_dqb.dqb_ihardlimit) | ||
1273 | return QUOTA_NL_IHARDBELOW; | ||
1274 | return QUOTA_NL_NOWARN; | ||
1275 | } | ||
1276 | |||
1277 | static int info_bdq_free(struct dquot *dquot, qsize_t space) | ||
1278 | { | ||
1279 | if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || | ||
1280 | dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit) | ||
1281 | return QUOTA_NL_NOWARN; | ||
1282 | |||
1283 | if (dquot->dq_dqb.dqb_curspace - space <= dquot->dq_dqb.dqb_bsoftlimit) | ||
1284 | return QUOTA_NL_BSOFTBELOW; | ||
1285 | if (dquot->dq_dqb.dqb_curspace >= dquot->dq_dqb.dqb_bhardlimit && | ||
1286 | dquot->dq_dqb.dqb_curspace - space < dquot->dq_dqb.dqb_bhardlimit) | ||
1287 | return QUOTA_NL_BHARDBELOW; | ||
1288 | return QUOTA_NL_NOWARN; | ||
1289 | } | ||
1290 | /* | ||
1291 | * Initialize quota pointers in inode | ||
1292 | * We do things in a bit complicated way but by that we avoid calling | ||
1293 | * dqget() and thus filesystem callbacks under dqptr_sem. | ||
1294 | */ | ||
1295 | int dquot_initialize(struct inode *inode, int type) | ||
1296 | { | ||
1297 | unsigned int id = 0; | ||
1298 | int cnt, ret = 0; | ||
1299 | struct dquot *got[MAXQUOTAS] = { NULL, NULL }; | ||
1300 | struct super_block *sb = inode->i_sb; | ||
1301 | |||
1302 | /* First test before acquiring mutex - solves deadlocks when we | ||
1303 | * re-enter the quota code and are already holding the mutex */ | ||
1304 | if (IS_NOQUOTA(inode)) | ||
1305 | return 0; | ||
1306 | |||
1307 | /* First get references to structures we might need. */ | ||
1308 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1309 | if (type != -1 && cnt != type) | ||
1310 | continue; | ||
1311 | switch (cnt) { | ||
1312 | case USRQUOTA: | ||
1313 | id = inode->i_uid; | ||
1314 | break; | ||
1315 | case GRPQUOTA: | ||
1316 | id = inode->i_gid; | ||
1317 | break; | ||
1318 | } | ||
1319 | got[cnt] = dqget(sb, id, cnt); | ||
1320 | } | ||
1321 | |||
1322 | down_write(&sb_dqopt(sb)->dqptr_sem); | ||
1323 | /* Having dqptr_sem we know NOQUOTA flags can't be altered... */ | ||
1324 | if (IS_NOQUOTA(inode)) | ||
1325 | goto out_err; | ||
1326 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1327 | if (type != -1 && cnt != type) | ||
1328 | continue; | ||
1329 | /* Avoid races with quotaoff() */ | ||
1330 | if (!sb_has_quota_active(sb, cnt)) | ||
1331 | continue; | ||
1332 | if (!inode->i_dquot[cnt]) { | ||
1333 | inode->i_dquot[cnt] = got[cnt]; | ||
1334 | got[cnt] = NULL; | ||
1335 | } | ||
1336 | } | ||
1337 | out_err: | ||
1338 | up_write(&sb_dqopt(sb)->dqptr_sem); | ||
1339 | /* Drop unused references */ | ||
1340 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1341 | dqput(got[cnt]); | ||
1342 | return ret; | ||
1343 | } | ||
1344 | EXPORT_SYMBOL(dquot_initialize); | ||
1345 | |||
1346 | /* | ||
1347 | * Release all quotas referenced by inode | ||
1348 | */ | ||
1349 | int dquot_drop(struct inode *inode) | ||
1350 | { | ||
1351 | int cnt; | ||
1352 | struct dquot *put[MAXQUOTAS]; | ||
1353 | |||
1354 | down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1355 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1356 | put[cnt] = inode->i_dquot[cnt]; | ||
1357 | inode->i_dquot[cnt] = NULL; | ||
1358 | } | ||
1359 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1360 | |||
1361 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1362 | dqput(put[cnt]); | ||
1363 | return 0; | ||
1364 | } | ||
1365 | EXPORT_SYMBOL(dquot_drop); | ||
1366 | |||
1367 | /* Wrapper to remove references to quota structures from inode */ | ||
1368 | void vfs_dq_drop(struct inode *inode) | ||
1369 | { | ||
1370 | /* Here we can get arbitrary inode from clear_inode() so we have | ||
1371 | * to be careful. OTOH we don't need locking as quota operations | ||
1372 | * are allowed to change only at mount time */ | ||
1373 | if (!IS_NOQUOTA(inode) && inode->i_sb && inode->i_sb->dq_op | ||
1374 | && inode->i_sb->dq_op->drop) { | ||
1375 | int cnt; | ||
1376 | /* Test before calling to rule out calls from proc and such | ||
1377 | * where we are not allowed to block. Note that this is | ||
1378 | * actually reliable test even without the lock - the caller | ||
1379 | * must assure that nobody can come after the DQUOT_DROP and | ||
1380 | * add quota pointers back anyway */ | ||
1381 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1382 | if (inode->i_dquot[cnt]) | ||
1383 | break; | ||
1384 | if (cnt < MAXQUOTAS) | ||
1385 | inode->i_sb->dq_op->drop(inode); | ||
1386 | } | ||
1387 | } | ||
1388 | EXPORT_SYMBOL(vfs_dq_drop); | ||
1389 | |||
1390 | /* | ||
1391 | * Following four functions update i_blocks+i_bytes fields and | ||
1392 | * quota information (together with appropriate checks) | ||
1393 | * NOTE: We absolutely rely on the fact that caller dirties | ||
1394 | * the inode (usually macros in quotaops.h care about this) and | ||
1395 | * holds a handle for the current transaction so that dquot write and | ||
1396 | * inode write go into the same transaction. | ||
1397 | */ | ||
1398 | |||
1399 | /* | ||
1400 | * This operation can block, but only after everything is updated | ||
1401 | */ | ||
1402 | int __dquot_alloc_space(struct inode *inode, qsize_t number, | ||
1403 | int warn, int reserve) | ||
1404 | { | ||
1405 | int cnt, ret = QUOTA_OK; | ||
1406 | char warntype[MAXQUOTAS]; | ||
1407 | |||
1408 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1409 | warntype[cnt] = QUOTA_NL_NOWARN; | ||
1410 | |||
1411 | spin_lock(&dq_data_lock); | ||
1412 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1413 | if (!inode->i_dquot[cnt]) | ||
1414 | continue; | ||
1415 | if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) | ||
1416 | == NO_QUOTA) { | ||
1417 | ret = NO_QUOTA; | ||
1418 | goto out_unlock; | ||
1419 | } | ||
1420 | } | ||
1421 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1422 | if (!inode->i_dquot[cnt]) | ||
1423 | continue; | ||
1424 | if (reserve) | ||
1425 | dquot_resv_space(inode->i_dquot[cnt], number); | ||
1426 | else | ||
1427 | dquot_incr_space(inode->i_dquot[cnt], number); | ||
1428 | } | ||
1429 | if (!reserve) | ||
1430 | inode_add_bytes(inode, number); | ||
1431 | out_unlock: | ||
1432 | spin_unlock(&dq_data_lock); | ||
1433 | flush_warnings(inode->i_dquot, warntype); | ||
1434 | return ret; | ||
1435 | } | ||
1436 | |||
1437 | int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) | ||
1438 | { | ||
1439 | int cnt, ret = QUOTA_OK; | ||
1440 | |||
1441 | /* | ||
1442 | * First test before acquiring mutex - solves deadlocks when we | ||
1443 | * re-enter the quota code and are already holding the mutex | ||
1444 | */ | ||
1445 | if (IS_NOQUOTA(inode)) { | ||
1446 | inode_add_bytes(inode, number); | ||
1447 | goto out; | ||
1448 | } | ||
1449 | |||
1450 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1451 | if (IS_NOQUOTA(inode)) { | ||
1452 | inode_add_bytes(inode, number); | ||
1453 | goto out_unlock; | ||
1454 | } | ||
1455 | |||
1456 | ret = __dquot_alloc_space(inode, number, warn, 0); | ||
1457 | if (ret == NO_QUOTA) | ||
1458 | goto out_unlock; | ||
1459 | |||
1460 | /* Dirtify all the dquots - this can block when journalling */ | ||
1461 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1462 | if (inode->i_dquot[cnt]) | ||
1463 | mark_dquot_dirty(inode->i_dquot[cnt]); | ||
1464 | out_unlock: | ||
1465 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1466 | out: | ||
1467 | return ret; | ||
1468 | } | ||
1469 | EXPORT_SYMBOL(dquot_alloc_space); | ||
1470 | |||
1471 | int dquot_reserve_space(struct inode *inode, qsize_t number, int warn) | ||
1472 | { | ||
1473 | int ret = QUOTA_OK; | ||
1474 | |||
1475 | if (IS_NOQUOTA(inode)) | ||
1476 | goto out; | ||
1477 | |||
1478 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1479 | if (IS_NOQUOTA(inode)) | ||
1480 | goto out_unlock; | ||
1481 | |||
1482 | ret = __dquot_alloc_space(inode, number, warn, 1); | ||
1483 | out_unlock: | ||
1484 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1485 | out: | ||
1486 | return ret; | ||
1487 | } | ||
1488 | EXPORT_SYMBOL(dquot_reserve_space); | ||
1489 | |||
1490 | /* | ||
1491 | * This operation can block, but only after everything is updated | ||
1492 | */ | ||
1493 | int dquot_alloc_inode(const struct inode *inode, qsize_t number) | ||
1494 | { | ||
1495 | int cnt, ret = NO_QUOTA; | ||
1496 | char warntype[MAXQUOTAS]; | ||
1497 | |||
1498 | /* First test before acquiring mutex - solves deadlocks when we | ||
1499 | * re-enter the quota code and are already holding the mutex */ | ||
1500 | if (IS_NOQUOTA(inode)) | ||
1501 | return QUOTA_OK; | ||
1502 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1503 | warntype[cnt] = QUOTA_NL_NOWARN; | ||
1504 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1505 | if (IS_NOQUOTA(inode)) { | ||
1506 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1507 | return QUOTA_OK; | ||
1508 | } | ||
1509 | spin_lock(&dq_data_lock); | ||
1510 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1511 | if (!inode->i_dquot[cnt]) | ||
1512 | continue; | ||
1513 | if (check_idq(inode->i_dquot[cnt], number, warntype+cnt) | ||
1514 | == NO_QUOTA) | ||
1515 | goto warn_put_all; | ||
1516 | } | ||
1517 | |||
1518 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1519 | if (!inode->i_dquot[cnt]) | ||
1520 | continue; | ||
1521 | dquot_incr_inodes(inode->i_dquot[cnt], number); | ||
1522 | } | ||
1523 | ret = QUOTA_OK; | ||
1524 | warn_put_all: | ||
1525 | spin_unlock(&dq_data_lock); | ||
1526 | if (ret == QUOTA_OK) | ||
1527 | /* Dirtify all the dquots - this can block when journalling */ | ||
1528 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1529 | if (inode->i_dquot[cnt]) | ||
1530 | mark_dquot_dirty(inode->i_dquot[cnt]); | ||
1531 | flush_warnings(inode->i_dquot, warntype); | ||
1532 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1533 | return ret; | ||
1534 | } | ||
1535 | EXPORT_SYMBOL(dquot_alloc_inode); | ||
1536 | |||
1537 | int dquot_claim_space(struct inode *inode, qsize_t number) | ||
1538 | { | ||
1539 | int cnt; | ||
1540 | int ret = QUOTA_OK; | ||
1541 | |||
1542 | if (IS_NOQUOTA(inode)) { | ||
1543 | inode_add_bytes(inode, number); | ||
1544 | goto out; | ||
1545 | } | ||
1546 | |||
1547 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1548 | if (IS_NOQUOTA(inode)) { | ||
1549 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1550 | inode_add_bytes(inode, number); | ||
1551 | goto out; | ||
1552 | } | ||
1553 | |||
1554 | spin_lock(&dq_data_lock); | ||
1555 | /* Claim reserved quotas to allocated quotas */ | ||
1556 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1557 | if (inode->i_dquot[cnt]) | ||
1558 | dquot_claim_reserved_space(inode->i_dquot[cnt], | ||
1559 | number); | ||
1560 | } | ||
1561 | /* Update inode bytes */ | ||
1562 | inode_add_bytes(inode, number); | ||
1563 | spin_unlock(&dq_data_lock); | ||
1564 | /* Dirtify all the dquots - this can block when journalling */ | ||
1565 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1566 | if (inode->i_dquot[cnt]) | ||
1567 | mark_dquot_dirty(inode->i_dquot[cnt]); | ||
1568 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1569 | out: | ||
1570 | return ret; | ||
1571 | } | ||
1572 | EXPORT_SYMBOL(dquot_claim_space); | ||
1573 | |||
1574 | /* | ||
1575 | * Release reserved quota space | ||
1576 | */ | ||
1577 | void dquot_release_reserved_space(struct inode *inode, qsize_t number) | ||
1578 | { | ||
1579 | int cnt; | ||
1580 | |||
1581 | if (IS_NOQUOTA(inode)) | ||
1582 | goto out; | ||
1583 | |||
1584 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1585 | if (IS_NOQUOTA(inode)) | ||
1586 | goto out_unlock; | ||
1587 | |||
1588 | spin_lock(&dq_data_lock); | ||
1589 | /* Release reserved dquots */ | ||
1590 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1591 | if (inode->i_dquot[cnt]) | ||
1592 | dquot_free_reserved_space(inode->i_dquot[cnt], number); | ||
1593 | } | ||
1594 | spin_unlock(&dq_data_lock); | ||
1595 | |||
1596 | out_unlock: | ||
1597 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1598 | out: | ||
1599 | return; | ||
1600 | } | ||
1601 | EXPORT_SYMBOL(dquot_release_reserved_space); | ||
1602 | |||
1603 | /* | ||
1604 | * This operation can block, but only after everything is updated | ||
1605 | */ | ||
1606 | int dquot_free_space(struct inode *inode, qsize_t number) | ||
1607 | { | ||
1608 | unsigned int cnt; | ||
1609 | char warntype[MAXQUOTAS]; | ||
1610 | |||
1611 | /* First test before acquiring mutex - solves deadlocks when we | ||
1612 | * re-enter the quota code and are already holding the mutex */ | ||
1613 | if (IS_NOQUOTA(inode)) { | ||
1614 | out_sub: | ||
1615 | inode_sub_bytes(inode, number); | ||
1616 | return QUOTA_OK; | ||
1617 | } | ||
1618 | |||
1619 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1620 | /* Now recheck reliably when holding dqptr_sem */ | ||
1621 | if (IS_NOQUOTA(inode)) { | ||
1622 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1623 | goto out_sub; | ||
1624 | } | ||
1625 | spin_lock(&dq_data_lock); | ||
1626 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1627 | if (!inode->i_dquot[cnt]) | ||
1628 | continue; | ||
1629 | warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number); | ||
1630 | dquot_decr_space(inode->i_dquot[cnt], number); | ||
1631 | } | ||
1632 | inode_sub_bytes(inode, number); | ||
1633 | spin_unlock(&dq_data_lock); | ||
1634 | /* Dirtify all the dquots - this can block when journalling */ | ||
1635 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1636 | if (inode->i_dquot[cnt]) | ||
1637 | mark_dquot_dirty(inode->i_dquot[cnt]); | ||
1638 | flush_warnings(inode->i_dquot, warntype); | ||
1639 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1640 | return QUOTA_OK; | ||
1641 | } | ||
1642 | EXPORT_SYMBOL(dquot_free_space); | ||
1643 | |||
1644 | /* | ||
1645 | * This operation can block, but only after everything is updated | ||
1646 | */ | ||
1647 | int dquot_free_inode(const struct inode *inode, qsize_t number) | ||
1648 | { | ||
1649 | unsigned int cnt; | ||
1650 | char warntype[MAXQUOTAS]; | ||
1651 | |||
1652 | /* First test before acquiring mutex - solves deadlocks when we | ||
1653 | * re-enter the quota code and are already holding the mutex */ | ||
1654 | if (IS_NOQUOTA(inode)) | ||
1655 | return QUOTA_OK; | ||
1656 | |||
1657 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1658 | /* Now recheck reliably when holding dqptr_sem */ | ||
1659 | if (IS_NOQUOTA(inode)) { | ||
1660 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1661 | return QUOTA_OK; | ||
1662 | } | ||
1663 | spin_lock(&dq_data_lock); | ||
1664 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1665 | if (!inode->i_dquot[cnt]) | ||
1666 | continue; | ||
1667 | warntype[cnt] = info_idq_free(inode->i_dquot[cnt], number); | ||
1668 | dquot_decr_inodes(inode->i_dquot[cnt], number); | ||
1669 | } | ||
1670 | spin_unlock(&dq_data_lock); | ||
1671 | /* Dirtify all the dquots - this can block when journalling */ | ||
1672 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1673 | if (inode->i_dquot[cnt]) | ||
1674 | mark_dquot_dirty(inode->i_dquot[cnt]); | ||
1675 | flush_warnings(inode->i_dquot, warntype); | ||
1676 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1677 | return QUOTA_OK; | ||
1678 | } | ||
1679 | EXPORT_SYMBOL(dquot_free_inode); | ||
1680 | |||
1681 | /* | ||
1682 | * call back function, get reserved quota space from underlying fs | ||
1683 | */ | ||
1684 | qsize_t dquot_get_reserved_space(struct inode *inode) | ||
1685 | { | ||
1686 | qsize_t reserved_space = 0; | ||
1687 | |||
1688 | if (sb_any_quota_active(inode->i_sb) && | ||
1689 | inode->i_sb->dq_op->get_reserved_space) | ||
1690 | reserved_space = inode->i_sb->dq_op->get_reserved_space(inode); | ||
1691 | return reserved_space; | ||
1692 | } | ||
1693 | |||
1694 | /* | ||
1695 | * Transfer the number of inode and blocks from one diskquota to an other. | ||
1696 | * | ||
1697 | * This operation can block, but only after everything is updated | ||
1698 | * A transaction must be started when entering this function. | ||
1699 | */ | ||
1700 | int dquot_transfer(struct inode *inode, struct iattr *iattr) | ||
1701 | { | ||
1702 | qsize_t space, cur_space; | ||
1703 | qsize_t rsv_space = 0; | ||
1704 | struct dquot *transfer_from[MAXQUOTAS]; | ||
1705 | struct dquot *transfer_to[MAXQUOTAS]; | ||
1706 | int cnt, ret = QUOTA_OK; | ||
1707 | int chuid = iattr->ia_valid & ATTR_UID && inode->i_uid != iattr->ia_uid, | ||
1708 | chgid = iattr->ia_valid & ATTR_GID && inode->i_gid != iattr->ia_gid; | ||
1709 | char warntype_to[MAXQUOTAS]; | ||
1710 | char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS]; | ||
1711 | |||
1712 | /* First test before acquiring mutex - solves deadlocks when we | ||
1713 | * re-enter the quota code and are already holding the mutex */ | ||
1714 | if (IS_NOQUOTA(inode)) | ||
1715 | return QUOTA_OK; | ||
1716 | /* Initialize the arrays */ | ||
1717 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1718 | transfer_from[cnt] = NULL; | ||
1719 | transfer_to[cnt] = NULL; | ||
1720 | warntype_to[cnt] = QUOTA_NL_NOWARN; | ||
1721 | } | ||
1722 | if (chuid) | ||
1723 | transfer_to[USRQUOTA] = dqget(inode->i_sb, iattr->ia_uid, | ||
1724 | USRQUOTA); | ||
1725 | if (chgid) | ||
1726 | transfer_to[GRPQUOTA] = dqget(inode->i_sb, iattr->ia_gid, | ||
1727 | GRPQUOTA); | ||
1728 | |||
1729 | down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1730 | /* Now recheck reliably when holding dqptr_sem */ | ||
1731 | if (IS_NOQUOTA(inode)) { /* File without quota accounting? */ | ||
1732 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1733 | goto put_all; | ||
1734 | } | ||
1735 | spin_lock(&dq_data_lock); | ||
1736 | cur_space = inode_get_bytes(inode); | ||
1737 | rsv_space = dquot_get_reserved_space(inode); | ||
1738 | space = cur_space + rsv_space; | ||
1739 | /* Build the transfer_from list and check the limits */ | ||
1740 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1741 | if (!transfer_to[cnt]) | ||
1742 | continue; | ||
1743 | transfer_from[cnt] = inode->i_dquot[cnt]; | ||
1744 | if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) == | ||
1745 | NO_QUOTA || check_bdq(transfer_to[cnt], space, 0, | ||
1746 | warntype_to + cnt) == NO_QUOTA) | ||
1747 | goto over_quota; | ||
1748 | } | ||
1749 | |||
1750 | /* | ||
1751 | * Finally perform the needed transfer from transfer_from to transfer_to | ||
1752 | */ | ||
1753 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1754 | /* | ||
1755 | * Skip changes for same uid or gid or for turned off quota-type. | ||
1756 | */ | ||
1757 | if (!transfer_to[cnt]) | ||
1758 | continue; | ||
1759 | |||
1760 | /* Due to IO error we might not have transfer_from[] structure */ | ||
1761 | if (transfer_from[cnt]) { | ||
1762 | warntype_from_inodes[cnt] = | ||
1763 | info_idq_free(transfer_from[cnt], 1); | ||
1764 | warntype_from_space[cnt] = | ||
1765 | info_bdq_free(transfer_from[cnt], space); | ||
1766 | dquot_decr_inodes(transfer_from[cnt], 1); | ||
1767 | dquot_decr_space(transfer_from[cnt], cur_space); | ||
1768 | dquot_free_reserved_space(transfer_from[cnt], | ||
1769 | rsv_space); | ||
1770 | } | ||
1771 | |||
1772 | dquot_incr_inodes(transfer_to[cnt], 1); | ||
1773 | dquot_incr_space(transfer_to[cnt], cur_space); | ||
1774 | dquot_resv_space(transfer_to[cnt], rsv_space); | ||
1775 | |||
1776 | inode->i_dquot[cnt] = transfer_to[cnt]; | ||
1777 | } | ||
1778 | spin_unlock(&dq_data_lock); | ||
1779 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1780 | |||
1781 | /* Dirtify all the dquots - this can block when journalling */ | ||
1782 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1783 | if (transfer_from[cnt]) | ||
1784 | mark_dquot_dirty(transfer_from[cnt]); | ||
1785 | if (transfer_to[cnt]) { | ||
1786 | mark_dquot_dirty(transfer_to[cnt]); | ||
1787 | /* The reference we got is transferred to the inode */ | ||
1788 | transfer_to[cnt] = NULL; | ||
1789 | } | ||
1790 | } | ||
1791 | warn_put_all: | ||
1792 | flush_warnings(transfer_to, warntype_to); | ||
1793 | flush_warnings(transfer_from, warntype_from_inodes); | ||
1794 | flush_warnings(transfer_from, warntype_from_space); | ||
1795 | put_all: | ||
1796 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1797 | dqput(transfer_from[cnt]); | ||
1798 | dqput(transfer_to[cnt]); | ||
1799 | } | ||
1800 | return ret; | ||
1801 | over_quota: | ||
1802 | spin_unlock(&dq_data_lock); | ||
1803 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
1804 | /* Clear dquot pointers we don't want to dqput() */ | ||
1805 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1806 | transfer_from[cnt] = NULL; | ||
1807 | ret = NO_QUOTA; | ||
1808 | goto warn_put_all; | ||
1809 | } | ||
1810 | EXPORT_SYMBOL(dquot_transfer); | ||
1811 | |||
1812 | /* Wrapper for transferring ownership of an inode */ | ||
1813 | int vfs_dq_transfer(struct inode *inode, struct iattr *iattr) | ||
1814 | { | ||
1815 | if (sb_any_quota_active(inode->i_sb) && !IS_NOQUOTA(inode)) { | ||
1816 | vfs_dq_init(inode); | ||
1817 | if (inode->i_sb->dq_op->transfer(inode, iattr) == NO_QUOTA) | ||
1818 | return 1; | ||
1819 | } | ||
1820 | return 0; | ||
1821 | } | ||
1822 | EXPORT_SYMBOL(vfs_dq_transfer); | ||
1823 | |||
1824 | /* | ||
1825 | * Write info of quota file to disk | ||
1826 | */ | ||
1827 | int dquot_commit_info(struct super_block *sb, int type) | ||
1828 | { | ||
1829 | int ret; | ||
1830 | struct quota_info *dqopt = sb_dqopt(sb); | ||
1831 | |||
1832 | mutex_lock(&dqopt->dqio_mutex); | ||
1833 | ret = dqopt->ops[type]->write_file_info(sb, type); | ||
1834 | mutex_unlock(&dqopt->dqio_mutex); | ||
1835 | return ret; | ||
1836 | } | ||
1837 | EXPORT_SYMBOL(dquot_commit_info); | ||
1838 | |||
1839 | /* | ||
1840 | * Definitions of diskquota operations. | ||
1841 | */ | ||
1842 | struct dquot_operations dquot_operations = { | ||
1843 | .initialize = dquot_initialize, | ||
1844 | .drop = dquot_drop, | ||
1845 | .alloc_space = dquot_alloc_space, | ||
1846 | .alloc_inode = dquot_alloc_inode, | ||
1847 | .free_space = dquot_free_space, | ||
1848 | .free_inode = dquot_free_inode, | ||
1849 | .transfer = dquot_transfer, | ||
1850 | .write_dquot = dquot_commit, | ||
1851 | .acquire_dquot = dquot_acquire, | ||
1852 | .release_dquot = dquot_release, | ||
1853 | .mark_dirty = dquot_mark_dquot_dirty, | ||
1854 | .write_info = dquot_commit_info, | ||
1855 | .alloc_dquot = dquot_alloc, | ||
1856 | .destroy_dquot = dquot_destroy, | ||
1857 | }; | ||
1858 | |||
1859 | /* | ||
1860 | * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount) | ||
1861 | */ | ||
1862 | int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags) | ||
1863 | { | ||
1864 | int cnt, ret = 0; | ||
1865 | struct quota_info *dqopt = sb_dqopt(sb); | ||
1866 | struct inode *toputinode[MAXQUOTAS]; | ||
1867 | |||
1868 | /* Cannot turn off usage accounting without turning off limits, or | ||
1869 | * suspend quotas and simultaneously turn quotas off. */ | ||
1870 | if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED)) | ||
1871 | || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED | | ||
1872 | DQUOT_USAGE_ENABLED))) | ||
1873 | return -EINVAL; | ||
1874 | |||
1875 | /* We need to serialize quota_off() for device */ | ||
1876 | mutex_lock(&dqopt->dqonoff_mutex); | ||
1877 | |||
1878 | /* | ||
1879 | * Skip everything if there's nothing to do. We have to do this because | ||
1880 | * sometimes we are called when fill_super() failed and calling | ||
1881 | * sync_fs() in such cases does no good. | ||
1882 | */ | ||
1883 | if (!sb_any_quota_loaded(sb)) { | ||
1884 | mutex_unlock(&dqopt->dqonoff_mutex); | ||
1885 | return 0; | ||
1886 | } | ||
1887 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
1888 | toputinode[cnt] = NULL; | ||
1889 | if (type != -1 && cnt != type) | ||
1890 | continue; | ||
1891 | if (!sb_has_quota_loaded(sb, cnt)) | ||
1892 | continue; | ||
1893 | |||
1894 | if (flags & DQUOT_SUSPENDED) { | ||
1895 | spin_lock(&dq_state_lock); | ||
1896 | dqopt->flags |= | ||
1897 | dquot_state_flag(DQUOT_SUSPENDED, cnt); | ||
1898 | spin_unlock(&dq_state_lock); | ||
1899 | } else { | ||
1900 | spin_lock(&dq_state_lock); | ||
1901 | dqopt->flags &= ~dquot_state_flag(flags, cnt); | ||
1902 | /* Turning off suspended quotas? */ | ||
1903 | if (!sb_has_quota_loaded(sb, cnt) && | ||
1904 | sb_has_quota_suspended(sb, cnt)) { | ||
1905 | dqopt->flags &= ~dquot_state_flag( | ||
1906 | DQUOT_SUSPENDED, cnt); | ||
1907 | spin_unlock(&dq_state_lock); | ||
1908 | iput(dqopt->files[cnt]); | ||
1909 | dqopt->files[cnt] = NULL; | ||
1910 | continue; | ||
1911 | } | ||
1912 | spin_unlock(&dq_state_lock); | ||
1913 | } | ||
1914 | |||
1915 | /* We still have to keep quota loaded? */ | ||
1916 | if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED)) | ||
1917 | continue; | ||
1918 | |||
1919 | /* Note: these are blocking operations */ | ||
1920 | drop_dquot_ref(sb, cnt); | ||
1921 | invalidate_dquots(sb, cnt); | ||
1922 | /* | ||
1923 | * Now all dquots should be invalidated, all writes done so we | ||
1924 | * should be only users of the info. No locks needed. | ||
1925 | */ | ||
1926 | if (info_dirty(&dqopt->info[cnt])) | ||
1927 | sb->dq_op->write_info(sb, cnt); | ||
1928 | if (dqopt->ops[cnt]->free_file_info) | ||
1929 | dqopt->ops[cnt]->free_file_info(sb, cnt); | ||
1930 | put_quota_format(dqopt->info[cnt].dqi_format); | ||
1931 | |||
1932 | toputinode[cnt] = dqopt->files[cnt]; | ||
1933 | if (!sb_has_quota_loaded(sb, cnt)) | ||
1934 | dqopt->files[cnt] = NULL; | ||
1935 | dqopt->info[cnt].dqi_flags = 0; | ||
1936 | dqopt->info[cnt].dqi_igrace = 0; | ||
1937 | dqopt->info[cnt].dqi_bgrace = 0; | ||
1938 | dqopt->ops[cnt] = NULL; | ||
1939 | } | ||
1940 | mutex_unlock(&dqopt->dqonoff_mutex); | ||
1941 | |||
1942 | /* Skip syncing and setting flags if quota files are hidden */ | ||
1943 | if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) | ||
1944 | goto put_inodes; | ||
1945 | |||
1946 | /* Sync the superblock so that buffers with quota data are written to | ||
1947 | * disk (and so userspace sees correct data afterwards). */ | ||
1948 | if (sb->s_op->sync_fs) | ||
1949 | sb->s_op->sync_fs(sb, 1); | ||
1950 | sync_blockdev(sb->s_bdev); | ||
1951 | /* Now the quota files are just ordinary files and we can set the | ||
1952 | * inode flags back. Moreover we discard the pagecache so that | ||
1953 | * userspace sees the writes we did bypassing the pagecache. We | ||
1954 | * must also discard the blockdev buffers so that we see the | ||
1955 | * changes done by userspace on the next quotaon() */ | ||
1956 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1957 | if (toputinode[cnt]) { | ||
1958 | mutex_lock(&dqopt->dqonoff_mutex); | ||
1959 | /* If quota was reenabled in the meantime, we have | ||
1960 | * nothing to do */ | ||
1961 | if (!sb_has_quota_loaded(sb, cnt)) { | ||
1962 | mutex_lock_nested(&toputinode[cnt]->i_mutex, | ||
1963 | I_MUTEX_QUOTA); | ||
1964 | toputinode[cnt]->i_flags &= ~(S_IMMUTABLE | | ||
1965 | S_NOATIME | S_NOQUOTA); | ||
1966 | truncate_inode_pages(&toputinode[cnt]->i_data, | ||
1967 | 0); | ||
1968 | mutex_unlock(&toputinode[cnt]->i_mutex); | ||
1969 | mark_inode_dirty(toputinode[cnt]); | ||
1970 | } | ||
1971 | mutex_unlock(&dqopt->dqonoff_mutex); | ||
1972 | } | ||
1973 | if (sb->s_bdev) | ||
1974 | invalidate_bdev(sb->s_bdev); | ||
1975 | put_inodes: | ||
1976 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
1977 | if (toputinode[cnt]) { | ||
1978 | /* On remount RO, we keep the inode pointer so that we | ||
1979 | * can reenable quota on the subsequent remount RW. We | ||
1980 | * have to check 'flags' variable and not use sb_has_ | ||
1981 | * function because another quotaon / quotaoff could | ||
1982 | * change global state before we got here. We refuse | ||
1983 | * to suspend quotas when there is pending delete on | ||
1984 | * the quota file... */ | ||
1985 | if (!(flags & DQUOT_SUSPENDED)) | ||
1986 | iput(toputinode[cnt]); | ||
1987 | else if (!toputinode[cnt]->i_nlink) | ||
1988 | ret = -EBUSY; | ||
1989 | } | ||
1990 | return ret; | ||
1991 | } | ||
1992 | EXPORT_SYMBOL(vfs_quota_disable); | ||
1993 | |||
1994 | int vfs_quota_off(struct super_block *sb, int type, int remount) | ||
1995 | { | ||
1996 | return vfs_quota_disable(sb, type, remount ? DQUOT_SUSPENDED : | ||
1997 | (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED)); | ||
1998 | } | ||
1999 | EXPORT_SYMBOL(vfs_quota_off); | ||
2000 | /* | ||
2001 | * Turn quotas on on a device | ||
2002 | */ | ||
2003 | |||
2004 | /* | ||
2005 | * Helper function to turn quotas on when we already have the inode of | ||
2006 | * quota file and no quota information is loaded. | ||
2007 | */ | ||
2008 | static int vfs_load_quota_inode(struct inode *inode, int type, int format_id, | ||
2009 | unsigned int flags) | ||
2010 | { | ||
2011 | struct quota_format_type *fmt = find_quota_format(format_id); | ||
2012 | struct super_block *sb = inode->i_sb; | ||
2013 | struct quota_info *dqopt = sb_dqopt(sb); | ||
2014 | int error; | ||
2015 | int oldflags = -1; | ||
2016 | |||
2017 | if (!fmt) | ||
2018 | return -ESRCH; | ||
2019 | if (!S_ISREG(inode->i_mode)) { | ||
2020 | error = -EACCES; | ||
2021 | goto out_fmt; | ||
2022 | } | ||
2023 | if (IS_RDONLY(inode)) { | ||
2024 | error = -EROFS; | ||
2025 | goto out_fmt; | ||
2026 | } | ||
2027 | if (!sb->s_op->quota_write || !sb->s_op->quota_read) { | ||
2028 | error = -EINVAL; | ||
2029 | goto out_fmt; | ||
2030 | } | ||
2031 | /* Usage always has to be set... */ | ||
2032 | if (!(flags & DQUOT_USAGE_ENABLED)) { | ||
2033 | error = -EINVAL; | ||
2034 | goto out_fmt; | ||
2035 | } | ||
2036 | |||
2037 | if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { | ||
2038 | /* As we bypass the pagecache we must now flush the inode so | ||
2039 | * that we see all the changes from userspace... */ | ||
2040 | write_inode_now(inode, 1); | ||
2041 | /* And now flush the block cache so that kernel sees the | ||
2042 | * changes */ | ||
2043 | invalidate_bdev(sb->s_bdev); | ||
2044 | } | ||
2045 | mutex_lock(&inode->i_mutex); | ||
2046 | mutex_lock(&dqopt->dqonoff_mutex); | ||
2047 | if (sb_has_quota_loaded(sb, type)) { | ||
2048 | error = -EBUSY; | ||
2049 | goto out_lock; | ||
2050 | } | ||
2051 | |||
2052 | if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { | ||
2053 | /* We don't want quota and atime on quota files (deadlocks | ||
2054 | * possible) Also nobody should write to the file - we use | ||
2055 | * special IO operations which ignore the immutable bit. */ | ||
2056 | down_write(&dqopt->dqptr_sem); | ||
2057 | oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | | ||
2058 | S_NOQUOTA); | ||
2059 | inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE; | ||
2060 | up_write(&dqopt->dqptr_sem); | ||
2061 | sb->dq_op->drop(inode); | ||
2062 | } | ||
2063 | |||
2064 | error = -EIO; | ||
2065 | dqopt->files[type] = igrab(inode); | ||
2066 | if (!dqopt->files[type]) | ||
2067 | goto out_lock; | ||
2068 | error = -EINVAL; | ||
2069 | if (!fmt->qf_ops->check_quota_file(sb, type)) | ||
2070 | goto out_file_init; | ||
2071 | |||
2072 | dqopt->ops[type] = fmt->qf_ops; | ||
2073 | dqopt->info[type].dqi_format = fmt; | ||
2074 | dqopt->info[type].dqi_fmt_id = format_id; | ||
2075 | INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list); | ||
2076 | mutex_lock(&dqopt->dqio_mutex); | ||
2077 | error = dqopt->ops[type]->read_file_info(sb, type); | ||
2078 | if (error < 0) { | ||
2079 | mutex_unlock(&dqopt->dqio_mutex); | ||
2080 | goto out_file_init; | ||
2081 | } | ||
2082 | mutex_unlock(&dqopt->dqio_mutex); | ||
2083 | mutex_unlock(&inode->i_mutex); | ||
2084 | spin_lock(&dq_state_lock); | ||
2085 | dqopt->flags |= dquot_state_flag(flags, type); | ||
2086 | spin_unlock(&dq_state_lock); | ||
2087 | |||
2088 | add_dquot_ref(sb, type); | ||
2089 | mutex_unlock(&dqopt->dqonoff_mutex); | ||
2090 | |||
2091 | return 0; | ||
2092 | |||
2093 | out_file_init: | ||
2094 | dqopt->files[type] = NULL; | ||
2095 | iput(inode); | ||
2096 | out_lock: | ||
2097 | mutex_unlock(&dqopt->dqonoff_mutex); | ||
2098 | if (oldflags != -1) { | ||
2099 | down_write(&dqopt->dqptr_sem); | ||
2100 | /* Set the flags back (in the case of accidental quotaon() | ||
2101 | * on a wrong file we don't want to mess up the flags) */ | ||
2102 | inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE); | ||
2103 | inode->i_flags |= oldflags; | ||
2104 | up_write(&dqopt->dqptr_sem); | ||
2105 | } | ||
2106 | mutex_unlock(&inode->i_mutex); | ||
2107 | out_fmt: | ||
2108 | put_quota_format(fmt); | ||
2109 | |||
2110 | return error; | ||
2111 | } | ||
2112 | |||
2113 | /* Reenable quotas on remount RW */ | ||
2114 | static int vfs_quota_on_remount(struct super_block *sb, int type) | ||
2115 | { | ||
2116 | struct quota_info *dqopt = sb_dqopt(sb); | ||
2117 | struct inode *inode; | ||
2118 | int ret; | ||
2119 | unsigned int flags; | ||
2120 | |||
2121 | mutex_lock(&dqopt->dqonoff_mutex); | ||
2122 | if (!sb_has_quota_suspended(sb, type)) { | ||
2123 | mutex_unlock(&dqopt->dqonoff_mutex); | ||
2124 | return 0; | ||
2125 | } | ||
2126 | inode = dqopt->files[type]; | ||
2127 | dqopt->files[type] = NULL; | ||
2128 | spin_lock(&dq_state_lock); | ||
2129 | flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED | | ||
2130 | DQUOT_LIMITS_ENABLED, type); | ||
2131 | dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, type); | ||
2132 | spin_unlock(&dq_state_lock); | ||
2133 | mutex_unlock(&dqopt->dqonoff_mutex); | ||
2134 | |||
2135 | flags = dquot_generic_flag(flags, type); | ||
2136 | ret = vfs_load_quota_inode(inode, type, dqopt->info[type].dqi_fmt_id, | ||
2137 | flags); | ||
2138 | iput(inode); | ||
2139 | |||
2140 | return ret; | ||
2141 | } | ||
2142 | |||
2143 | int vfs_quota_on_path(struct super_block *sb, int type, int format_id, | ||
2144 | struct path *path) | ||
2145 | { | ||
2146 | int error = security_quota_on(path->dentry); | ||
2147 | if (error) | ||
2148 | return error; | ||
2149 | /* Quota file not on the same filesystem? */ | ||
2150 | if (path->mnt->mnt_sb != sb) | ||
2151 | error = -EXDEV; | ||
2152 | else | ||
2153 | error = vfs_load_quota_inode(path->dentry->d_inode, type, | ||
2154 | format_id, DQUOT_USAGE_ENABLED | | ||
2155 | DQUOT_LIMITS_ENABLED); | ||
2156 | return error; | ||
2157 | } | ||
2158 | EXPORT_SYMBOL(vfs_quota_on_path); | ||
2159 | |||
2160 | int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name, | ||
2161 | int remount) | ||
2162 | { | ||
2163 | struct path path; | ||
2164 | int error; | ||
2165 | |||
2166 | if (remount) | ||
2167 | return vfs_quota_on_remount(sb, type); | ||
2168 | |||
2169 | error = kern_path(name, LOOKUP_FOLLOW, &path); | ||
2170 | if (!error) { | ||
2171 | error = vfs_quota_on_path(sb, type, format_id, &path); | ||
2172 | path_put(&path); | ||
2173 | } | ||
2174 | return error; | ||
2175 | } | ||
2176 | EXPORT_SYMBOL(vfs_quota_on); | ||
2177 | |||
2178 | /* | ||
2179 | * More powerful function for turning on quotas allowing setting | ||
2180 | * of individual quota flags | ||
2181 | */ | ||
2182 | int vfs_quota_enable(struct inode *inode, int type, int format_id, | ||
2183 | unsigned int flags) | ||
2184 | { | ||
2185 | int ret = 0; | ||
2186 | struct super_block *sb = inode->i_sb; | ||
2187 | struct quota_info *dqopt = sb_dqopt(sb); | ||
2188 | |||
2189 | /* Just unsuspend quotas? */ | ||
2190 | if (flags & DQUOT_SUSPENDED) | ||
2191 | return vfs_quota_on_remount(sb, type); | ||
2192 | if (!flags) | ||
2193 | return 0; | ||
2194 | /* Just updating flags needed? */ | ||
2195 | if (sb_has_quota_loaded(sb, type)) { | ||
2196 | mutex_lock(&dqopt->dqonoff_mutex); | ||
2197 | /* Now do a reliable test... */ | ||
2198 | if (!sb_has_quota_loaded(sb, type)) { | ||
2199 | mutex_unlock(&dqopt->dqonoff_mutex); | ||
2200 | goto load_quota; | ||
2201 | } | ||
2202 | if (flags & DQUOT_USAGE_ENABLED && | ||
2203 | sb_has_quota_usage_enabled(sb, type)) { | ||
2204 | ret = -EBUSY; | ||
2205 | goto out_lock; | ||
2206 | } | ||
2207 | if (flags & DQUOT_LIMITS_ENABLED && | ||
2208 | sb_has_quota_limits_enabled(sb, type)) { | ||
2209 | ret = -EBUSY; | ||
2210 | goto out_lock; | ||
2211 | } | ||
2212 | spin_lock(&dq_state_lock); | ||
2213 | sb_dqopt(sb)->flags |= dquot_state_flag(flags, type); | ||
2214 | spin_unlock(&dq_state_lock); | ||
2215 | out_lock: | ||
2216 | mutex_unlock(&dqopt->dqonoff_mutex); | ||
2217 | return ret; | ||
2218 | } | ||
2219 | |||
2220 | load_quota: | ||
2221 | return vfs_load_quota_inode(inode, type, format_id, flags); | ||
2222 | } | ||
2223 | EXPORT_SYMBOL(vfs_quota_enable); | ||
2224 | |||
2225 | /* | ||
2226 | * This function is used when filesystem needs to initialize quotas | ||
2227 | * during mount time. | ||
2228 | */ | ||
2229 | int vfs_quota_on_mount(struct super_block *sb, char *qf_name, | ||
2230 | int format_id, int type) | ||
2231 | { | ||
2232 | struct dentry *dentry; | ||
2233 | int error; | ||
2234 | |||
2235 | dentry = lookup_one_len(qf_name, sb->s_root, strlen(qf_name)); | ||
2236 | if (IS_ERR(dentry)) | ||
2237 | return PTR_ERR(dentry); | ||
2238 | |||
2239 | if (!dentry->d_inode) { | ||
2240 | error = -ENOENT; | ||
2241 | goto out; | ||
2242 | } | ||
2243 | |||
2244 | error = security_quota_on(dentry); | ||
2245 | if (!error) | ||
2246 | error = vfs_load_quota_inode(dentry->d_inode, type, format_id, | ||
2247 | DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); | ||
2248 | |||
2249 | out: | ||
2250 | dput(dentry); | ||
2251 | return error; | ||
2252 | } | ||
2253 | EXPORT_SYMBOL(vfs_quota_on_mount); | ||
2254 | |||
2255 | /* Wrapper to turn on quotas when remounting rw */ | ||
2256 | int vfs_dq_quota_on_remount(struct super_block *sb) | ||
2257 | { | ||
2258 | int cnt; | ||
2259 | int ret = 0, err; | ||
2260 | |||
2261 | if (!sb->s_qcop || !sb->s_qcop->quota_on) | ||
2262 | return -ENOSYS; | ||
2263 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
2264 | err = sb->s_qcop->quota_on(sb, cnt, 0, NULL, 1); | ||
2265 | if (err < 0 && !ret) | ||
2266 | ret = err; | ||
2267 | } | ||
2268 | return ret; | ||
2269 | } | ||
2270 | EXPORT_SYMBOL(vfs_dq_quota_on_remount); | ||
2271 | |||
2272 | static inline qsize_t qbtos(qsize_t blocks) | ||
2273 | { | ||
2274 | return blocks << QIF_DQBLKSIZE_BITS; | ||
2275 | } | ||
2276 | |||
2277 | static inline qsize_t stoqb(qsize_t space) | ||
2278 | { | ||
2279 | return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS; | ||
2280 | } | ||
2281 | |||
2282 | /* Generic routine for getting common part of quota structure */ | ||
2283 | static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di) | ||
2284 | { | ||
2285 | struct mem_dqblk *dm = &dquot->dq_dqb; | ||
2286 | |||
2287 | spin_lock(&dq_data_lock); | ||
2288 | di->dqb_bhardlimit = stoqb(dm->dqb_bhardlimit); | ||
2289 | di->dqb_bsoftlimit = stoqb(dm->dqb_bsoftlimit); | ||
2290 | di->dqb_curspace = dm->dqb_curspace + dm->dqb_rsvspace; | ||
2291 | di->dqb_ihardlimit = dm->dqb_ihardlimit; | ||
2292 | di->dqb_isoftlimit = dm->dqb_isoftlimit; | ||
2293 | di->dqb_curinodes = dm->dqb_curinodes; | ||
2294 | di->dqb_btime = dm->dqb_btime; | ||
2295 | di->dqb_itime = dm->dqb_itime; | ||
2296 | di->dqb_valid = QIF_ALL; | ||
2297 | spin_unlock(&dq_data_lock); | ||
2298 | } | ||
2299 | |||
2300 | int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, | ||
2301 | struct if_dqblk *di) | ||
2302 | { | ||
2303 | struct dquot *dquot; | ||
2304 | |||
2305 | dquot = dqget(sb, id, type); | ||
2306 | if (!dquot) | ||
2307 | return -ESRCH; | ||
2308 | do_get_dqblk(dquot, di); | ||
2309 | dqput(dquot); | ||
2310 | |||
2311 | return 0; | ||
2312 | } | ||
2313 | EXPORT_SYMBOL(vfs_get_dqblk); | ||
2314 | |||
2315 | /* Generic routine for setting common part of quota structure */ | ||
2316 | static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di) | ||
2317 | { | ||
2318 | struct mem_dqblk *dm = &dquot->dq_dqb; | ||
2319 | int check_blim = 0, check_ilim = 0; | ||
2320 | struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type]; | ||
2321 | |||
2322 | if ((di->dqb_valid & QIF_BLIMITS && | ||
2323 | (di->dqb_bhardlimit > dqi->dqi_maxblimit || | ||
2324 | di->dqb_bsoftlimit > dqi->dqi_maxblimit)) || | ||
2325 | (di->dqb_valid & QIF_ILIMITS && | ||
2326 | (di->dqb_ihardlimit > dqi->dqi_maxilimit || | ||
2327 | di->dqb_isoftlimit > dqi->dqi_maxilimit))) | ||
2328 | return -ERANGE; | ||
2329 | |||
2330 | spin_lock(&dq_data_lock); | ||
2331 | if (di->dqb_valid & QIF_SPACE) { | ||
2332 | dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace; | ||
2333 | check_blim = 1; | ||
2334 | __set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); | ||
2335 | } | ||
2336 | if (di->dqb_valid & QIF_BLIMITS) { | ||
2337 | dm->dqb_bsoftlimit = qbtos(di->dqb_bsoftlimit); | ||
2338 | dm->dqb_bhardlimit = qbtos(di->dqb_bhardlimit); | ||
2339 | check_blim = 1; | ||
2340 | __set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); | ||
2341 | } | ||
2342 | if (di->dqb_valid & QIF_INODES) { | ||
2343 | dm->dqb_curinodes = di->dqb_curinodes; | ||
2344 | check_ilim = 1; | ||
2345 | __set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); | ||
2346 | } | ||
2347 | if (di->dqb_valid & QIF_ILIMITS) { | ||
2348 | dm->dqb_isoftlimit = di->dqb_isoftlimit; | ||
2349 | dm->dqb_ihardlimit = di->dqb_ihardlimit; | ||
2350 | check_ilim = 1; | ||
2351 | __set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); | ||
2352 | } | ||
2353 | if (di->dqb_valid & QIF_BTIME) { | ||
2354 | dm->dqb_btime = di->dqb_btime; | ||
2355 | check_blim = 1; | ||
2356 | __set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); | ||
2357 | } | ||
2358 | if (di->dqb_valid & QIF_ITIME) { | ||
2359 | dm->dqb_itime = di->dqb_itime; | ||
2360 | check_ilim = 1; | ||
2361 | __set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); | ||
2362 | } | ||
2363 | |||
2364 | if (check_blim) { | ||
2365 | if (!dm->dqb_bsoftlimit || | ||
2366 | dm->dqb_curspace < dm->dqb_bsoftlimit) { | ||
2367 | dm->dqb_btime = 0; | ||
2368 | clear_bit(DQ_BLKS_B, &dquot->dq_flags); | ||
2369 | } else if (!(di->dqb_valid & QIF_BTIME)) | ||
2370 | /* Set grace only if user hasn't provided his own... */ | ||
2371 | dm->dqb_btime = get_seconds() + dqi->dqi_bgrace; | ||
2372 | } | ||
2373 | if (check_ilim) { | ||
2374 | if (!dm->dqb_isoftlimit || | ||
2375 | dm->dqb_curinodes < dm->dqb_isoftlimit) { | ||
2376 | dm->dqb_itime = 0; | ||
2377 | clear_bit(DQ_INODES_B, &dquot->dq_flags); | ||
2378 | } else if (!(di->dqb_valid & QIF_ITIME)) | ||
2379 | /* Set grace only if user hasn't provided his own... */ | ||
2380 | dm->dqb_itime = get_seconds() + dqi->dqi_igrace; | ||
2381 | } | ||
2382 | if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || | ||
2383 | dm->dqb_isoftlimit) | ||
2384 | clear_bit(DQ_FAKE_B, &dquot->dq_flags); | ||
2385 | else | ||
2386 | set_bit(DQ_FAKE_B, &dquot->dq_flags); | ||
2387 | spin_unlock(&dq_data_lock); | ||
2388 | mark_dquot_dirty(dquot); | ||
2389 | |||
2390 | return 0; | ||
2391 | } | ||
2392 | |||
2393 | int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, | ||
2394 | struct if_dqblk *di) | ||
2395 | { | ||
2396 | struct dquot *dquot; | ||
2397 | int rc; | ||
2398 | |||
2399 | dquot = dqget(sb, id, type); | ||
2400 | if (!dquot) { | ||
2401 | rc = -ESRCH; | ||
2402 | goto out; | ||
2403 | } | ||
2404 | rc = do_set_dqblk(dquot, di); | ||
2405 | dqput(dquot); | ||
2406 | out: | ||
2407 | return rc; | ||
2408 | } | ||
2409 | EXPORT_SYMBOL(vfs_set_dqblk); | ||
2410 | |||
2411 | /* Generic routine for getting common part of quota file information */ | ||
2412 | int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) | ||
2413 | { | ||
2414 | struct mem_dqinfo *mi; | ||
2415 | |||
2416 | mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); | ||
2417 | if (!sb_has_quota_active(sb, type)) { | ||
2418 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); | ||
2419 | return -ESRCH; | ||
2420 | } | ||
2421 | mi = sb_dqopt(sb)->info + type; | ||
2422 | spin_lock(&dq_data_lock); | ||
2423 | ii->dqi_bgrace = mi->dqi_bgrace; | ||
2424 | ii->dqi_igrace = mi->dqi_igrace; | ||
2425 | ii->dqi_flags = mi->dqi_flags & DQF_MASK; | ||
2426 | ii->dqi_valid = IIF_ALL; | ||
2427 | spin_unlock(&dq_data_lock); | ||
2428 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); | ||
2429 | return 0; | ||
2430 | } | ||
2431 | EXPORT_SYMBOL(vfs_get_dqinfo); | ||
2432 | |||
2433 | /* Generic routine for setting common part of quota file information */ | ||
2434 | int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) | ||
2435 | { | ||
2436 | struct mem_dqinfo *mi; | ||
2437 | int err = 0; | ||
2438 | |||
2439 | mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); | ||
2440 | if (!sb_has_quota_active(sb, type)) { | ||
2441 | err = -ESRCH; | ||
2442 | goto out; | ||
2443 | } | ||
2444 | mi = sb_dqopt(sb)->info + type; | ||
2445 | spin_lock(&dq_data_lock); | ||
2446 | if (ii->dqi_valid & IIF_BGRACE) | ||
2447 | mi->dqi_bgrace = ii->dqi_bgrace; | ||
2448 | if (ii->dqi_valid & IIF_IGRACE) | ||
2449 | mi->dqi_igrace = ii->dqi_igrace; | ||
2450 | if (ii->dqi_valid & IIF_FLAGS) | ||
2451 | mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) | | ||
2452 | (ii->dqi_flags & DQF_MASK); | ||
2453 | spin_unlock(&dq_data_lock); | ||
2454 | mark_info_dirty(sb, type); | ||
2455 | /* Force write to disk */ | ||
2456 | sb->dq_op->write_info(sb, type); | ||
2457 | out: | ||
2458 | mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); | ||
2459 | return err; | ||
2460 | } | ||
2461 | EXPORT_SYMBOL(vfs_set_dqinfo); | ||
2462 | |||
2463 | struct quotactl_ops vfs_quotactl_ops = { | ||
2464 | .quota_on = vfs_quota_on, | ||
2465 | .quota_off = vfs_quota_off, | ||
2466 | .quota_sync = vfs_quota_sync, | ||
2467 | .get_info = vfs_get_dqinfo, | ||
2468 | .set_info = vfs_set_dqinfo, | ||
2469 | .get_dqblk = vfs_get_dqblk, | ||
2470 | .set_dqblk = vfs_set_dqblk | ||
2471 | }; | ||
2472 | |||
2473 | static ctl_table fs_dqstats_table[] = { | ||
2474 | { | ||
2475 | .ctl_name = FS_DQ_LOOKUPS, | ||
2476 | .procname = "lookups", | ||
2477 | .data = &dqstats.lookups, | ||
2478 | .maxlen = sizeof(int), | ||
2479 | .mode = 0444, | ||
2480 | .proc_handler = &proc_dointvec, | ||
2481 | }, | ||
2482 | { | ||
2483 | .ctl_name = FS_DQ_DROPS, | ||
2484 | .procname = "drops", | ||
2485 | .data = &dqstats.drops, | ||
2486 | .maxlen = sizeof(int), | ||
2487 | .mode = 0444, | ||
2488 | .proc_handler = &proc_dointvec, | ||
2489 | }, | ||
2490 | { | ||
2491 | .ctl_name = FS_DQ_READS, | ||
2492 | .procname = "reads", | ||
2493 | .data = &dqstats.reads, | ||
2494 | .maxlen = sizeof(int), | ||
2495 | .mode = 0444, | ||
2496 | .proc_handler = &proc_dointvec, | ||
2497 | }, | ||
2498 | { | ||
2499 | .ctl_name = FS_DQ_WRITES, | ||
2500 | .procname = "writes", | ||
2501 | .data = &dqstats.writes, | ||
2502 | .maxlen = sizeof(int), | ||
2503 | .mode = 0444, | ||
2504 | .proc_handler = &proc_dointvec, | ||
2505 | }, | ||
2506 | { | ||
2507 | .ctl_name = FS_DQ_CACHE_HITS, | ||
2508 | .procname = "cache_hits", | ||
2509 | .data = &dqstats.cache_hits, | ||
2510 | .maxlen = sizeof(int), | ||
2511 | .mode = 0444, | ||
2512 | .proc_handler = &proc_dointvec, | ||
2513 | }, | ||
2514 | { | ||
2515 | .ctl_name = FS_DQ_ALLOCATED, | ||
2516 | .procname = "allocated_dquots", | ||
2517 | .data = &dqstats.allocated_dquots, | ||
2518 | .maxlen = sizeof(int), | ||
2519 | .mode = 0444, | ||
2520 | .proc_handler = &proc_dointvec, | ||
2521 | }, | ||
2522 | { | ||
2523 | .ctl_name = FS_DQ_FREE, | ||
2524 | .procname = "free_dquots", | ||
2525 | .data = &dqstats.free_dquots, | ||
2526 | .maxlen = sizeof(int), | ||
2527 | .mode = 0444, | ||
2528 | .proc_handler = &proc_dointvec, | ||
2529 | }, | ||
2530 | { | ||
2531 | .ctl_name = FS_DQ_SYNCS, | ||
2532 | .procname = "syncs", | ||
2533 | .data = &dqstats.syncs, | ||
2534 | .maxlen = sizeof(int), | ||
2535 | .mode = 0444, | ||
2536 | .proc_handler = &proc_dointvec, | ||
2537 | }, | ||
2538 | #ifdef CONFIG_PRINT_QUOTA_WARNING | ||
2539 | { | ||
2540 | .ctl_name = FS_DQ_WARNINGS, | ||
2541 | .procname = "warnings", | ||
2542 | .data = &flag_print_warnings, | ||
2543 | .maxlen = sizeof(int), | ||
2544 | .mode = 0644, | ||
2545 | .proc_handler = &proc_dointvec, | ||
2546 | }, | ||
2547 | #endif | ||
2548 | { .ctl_name = 0 }, | ||
2549 | }; | ||
2550 | |||
2551 | static ctl_table fs_table[] = { | ||
2552 | { | ||
2553 | .ctl_name = FS_DQSTATS, | ||
2554 | .procname = "quota", | ||
2555 | .mode = 0555, | ||
2556 | .child = fs_dqstats_table, | ||
2557 | }, | ||
2558 | { .ctl_name = 0 }, | ||
2559 | }; | ||
2560 | |||
2561 | static ctl_table sys_table[] = { | ||
2562 | { | ||
2563 | .ctl_name = CTL_FS, | ||
2564 | .procname = "fs", | ||
2565 | .mode = 0555, | ||
2566 | .child = fs_table, | ||
2567 | }, | ||
2568 | { .ctl_name = 0 }, | ||
2569 | }; | ||
2570 | |||
2571 | static int __init dquot_init(void) | ||
2572 | { | ||
2573 | int i; | ||
2574 | unsigned long nr_hash, order; | ||
2575 | |||
2576 | printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__); | ||
2577 | |||
2578 | register_sysctl_table(sys_table); | ||
2579 | |||
2580 | dquot_cachep = kmem_cache_create("dquot", | ||
2581 | sizeof(struct dquot), sizeof(unsigned long) * 4, | ||
2582 | (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| | ||
2583 | SLAB_MEM_SPREAD|SLAB_PANIC), | ||
2584 | NULL); | ||
2585 | |||
2586 | order = 0; | ||
2587 | dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order); | ||
2588 | if (!dquot_hash) | ||
2589 | panic("Cannot create dquot hash table"); | ||
2590 | |||
2591 | /* Find power-of-two hlist_heads which can fit into allocation */ | ||
2592 | nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head); | ||
2593 | dq_hash_bits = 0; | ||
2594 | do { | ||
2595 | dq_hash_bits++; | ||
2596 | } while (nr_hash >> dq_hash_bits); | ||
2597 | dq_hash_bits--; | ||
2598 | |||
2599 | nr_hash = 1UL << dq_hash_bits; | ||
2600 | dq_hash_mask = nr_hash - 1; | ||
2601 | for (i = 0; i < nr_hash; i++) | ||
2602 | INIT_HLIST_HEAD(dquot_hash + i); | ||
2603 | |||
2604 | printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n", | ||
2605 | nr_hash, order, (PAGE_SIZE << order)); | ||
2606 | |||
2607 | register_shrinker(&dqcache_shrinker); | ||
2608 | |||
2609 | #ifdef CONFIG_QUOTA_NETLINK_INTERFACE | ||
2610 | if (genl_register_family("a_genl_family) != 0) | ||
2611 | printk(KERN_ERR | ||
2612 | "VFS: Failed to create quota netlink interface.\n"); | ||
2613 | #endif | ||
2614 | |||
2615 | return 0; | ||
2616 | } | ||
2617 | module_init(dquot_init); | ||