diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /fs/inode.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'fs/inode.c')
-rw-r--r-- | fs/inode.c | 1377 |
1 files changed, 1377 insertions, 0 deletions
diff --git a/fs/inode.c b/fs/inode.c new file mode 100644 index 000000000000..af8fd78d2099 --- /dev/null +++ b/fs/inode.c | |||
@@ -0,0 +1,1377 @@ | |||
1 | /* | ||
2 | * linux/fs/inode.c | ||
3 | * | ||
4 | * (C) 1997 Linus Torvalds | ||
5 | */ | ||
6 | |||
7 | #include <linux/config.h> | ||
8 | #include <linux/fs.h> | ||
9 | #include <linux/mm.h> | ||
10 | #include <linux/dcache.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/quotaops.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/writeback.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/backing-dev.h> | ||
17 | #include <linux/wait.h> | ||
18 | #include <linux/hash.h> | ||
19 | #include <linux/swap.h> | ||
20 | #include <linux/security.h> | ||
21 | #include <linux/pagemap.h> | ||
22 | #include <linux/cdev.h> | ||
23 | #include <linux/bootmem.h> | ||
24 | |||
25 | /* | ||
26 | * This is needed for the following functions: | ||
27 | * - inode_has_buffers | ||
28 | * - invalidate_inode_buffers | ||
29 | * - fsync_bdev | ||
30 | * - invalidate_bdev | ||
31 | * | ||
32 | * FIXME: remove all knowledge of the buffer layer from this file | ||
33 | */ | ||
34 | #include <linux/buffer_head.h> | ||
35 | |||
36 | /* | ||
37 | * New inode.c implementation. | ||
38 | * | ||
39 | * This implementation has the basic premise of trying | ||
40 | * to be extremely low-overhead and SMP-safe, yet be | ||
41 | * simple enough to be "obviously correct". | ||
42 | * | ||
43 | * Famous last words. | ||
44 | */ | ||
45 | |||
46 | /* inode dynamic allocation 1999, Andrea Arcangeli <andrea@suse.de> */ | ||
47 | |||
48 | /* #define INODE_PARANOIA 1 */ | ||
49 | /* #define INODE_DEBUG 1 */ | ||
50 | |||
51 | /* | ||
52 | * Inode lookup is no longer as critical as it used to be: | ||
53 | * most of the lookups are going to be through the dcache. | ||
54 | */ | ||
55 | #define I_HASHBITS i_hash_shift | ||
56 | #define I_HASHMASK i_hash_mask | ||
57 | |||
58 | static unsigned int i_hash_mask; | ||
59 | static unsigned int i_hash_shift; | ||
60 | |||
61 | /* | ||
62 | * Each inode can be on two separate lists. One is | ||
63 | * the hash list of the inode, used for lookups. The | ||
64 | * other linked list is the "type" list: | ||
65 | * "in_use" - valid inode, i_count > 0, i_nlink > 0 | ||
66 | * "dirty" - as "in_use" but also dirty | ||
67 | * "unused" - valid inode, i_count = 0 | ||
68 | * | ||
69 | * A "dirty" list is maintained for each super block, | ||
70 | * allowing for low-overhead inode sync() operations. | ||
71 | */ | ||
72 | |||
73 | LIST_HEAD(inode_in_use); | ||
74 | LIST_HEAD(inode_unused); | ||
75 | static struct hlist_head *inode_hashtable; | ||
76 | |||
77 | /* | ||
78 | * A simple spinlock to protect the list manipulations. | ||
79 | * | ||
80 | * NOTE! You also have to own the lock if you change | ||
81 | * the i_state of an inode while it is in use.. | ||
82 | */ | ||
83 | DEFINE_SPINLOCK(inode_lock); | ||
84 | |||
85 | /* | ||
86 | * iprune_sem provides exclusion between the kswapd or try_to_free_pages | ||
87 | * icache shrinking path, and the umount path. Without this exclusion, | ||
88 | * by the time prune_icache calls iput for the inode whose pages it has | ||
89 | * been invalidating, or by the time it calls clear_inode & destroy_inode | ||
90 | * from its final dispose_list, the struct super_block they refer to | ||
91 | * (for inode->i_sb->s_op) may already have been freed and reused. | ||
92 | */ | ||
93 | DECLARE_MUTEX(iprune_sem); | ||
94 | |||
95 | /* | ||
96 | * Statistics gathering.. | ||
97 | */ | ||
98 | struct inodes_stat_t inodes_stat; | ||
99 | |||
100 | static kmem_cache_t * inode_cachep; | ||
101 | |||
102 | static struct inode *alloc_inode(struct super_block *sb) | ||
103 | { | ||
104 | static struct address_space_operations empty_aops; | ||
105 | static struct inode_operations empty_iops; | ||
106 | static struct file_operations empty_fops; | ||
107 | struct inode *inode; | ||
108 | |||
109 | if (sb->s_op->alloc_inode) | ||
110 | inode = sb->s_op->alloc_inode(sb); | ||
111 | else | ||
112 | inode = (struct inode *) kmem_cache_alloc(inode_cachep, SLAB_KERNEL); | ||
113 | |||
114 | if (inode) { | ||
115 | struct address_space * const mapping = &inode->i_data; | ||
116 | |||
117 | inode->i_sb = sb; | ||
118 | inode->i_blkbits = sb->s_blocksize_bits; | ||
119 | inode->i_flags = 0; | ||
120 | atomic_set(&inode->i_count, 1); | ||
121 | inode->i_op = &empty_iops; | ||
122 | inode->i_fop = &empty_fops; | ||
123 | inode->i_nlink = 1; | ||
124 | atomic_set(&inode->i_writecount, 0); | ||
125 | inode->i_size = 0; | ||
126 | inode->i_blocks = 0; | ||
127 | inode->i_bytes = 0; | ||
128 | inode->i_generation = 0; | ||
129 | #ifdef CONFIG_QUOTA | ||
130 | memset(&inode->i_dquot, 0, sizeof(inode->i_dquot)); | ||
131 | #endif | ||
132 | inode->i_pipe = NULL; | ||
133 | inode->i_bdev = NULL; | ||
134 | inode->i_cdev = NULL; | ||
135 | inode->i_rdev = 0; | ||
136 | inode->i_security = NULL; | ||
137 | inode->dirtied_when = 0; | ||
138 | if (security_inode_alloc(inode)) { | ||
139 | if (inode->i_sb->s_op->destroy_inode) | ||
140 | inode->i_sb->s_op->destroy_inode(inode); | ||
141 | else | ||
142 | kmem_cache_free(inode_cachep, (inode)); | ||
143 | return NULL; | ||
144 | } | ||
145 | |||
146 | mapping->a_ops = &empty_aops; | ||
147 | mapping->host = inode; | ||
148 | mapping->flags = 0; | ||
149 | mapping_set_gfp_mask(mapping, GFP_HIGHUSER); | ||
150 | mapping->assoc_mapping = NULL; | ||
151 | mapping->backing_dev_info = &default_backing_dev_info; | ||
152 | |||
153 | /* | ||
154 | * If the block_device provides a backing_dev_info for client | ||
155 | * inodes then use that. Otherwise the inode share the bdev's | ||
156 | * backing_dev_info. | ||
157 | */ | ||
158 | if (sb->s_bdev) { | ||
159 | struct backing_dev_info *bdi; | ||
160 | |||
161 | bdi = sb->s_bdev->bd_inode_backing_dev_info; | ||
162 | if (!bdi) | ||
163 | bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info; | ||
164 | mapping->backing_dev_info = bdi; | ||
165 | } | ||
166 | memset(&inode->u, 0, sizeof(inode->u)); | ||
167 | inode->i_mapping = mapping; | ||
168 | } | ||
169 | return inode; | ||
170 | } | ||
171 | |||
172 | void destroy_inode(struct inode *inode) | ||
173 | { | ||
174 | if (inode_has_buffers(inode)) | ||
175 | BUG(); | ||
176 | security_inode_free(inode); | ||
177 | if (inode->i_sb->s_op->destroy_inode) | ||
178 | inode->i_sb->s_op->destroy_inode(inode); | ||
179 | else | ||
180 | kmem_cache_free(inode_cachep, (inode)); | ||
181 | } | ||
182 | |||
183 | |||
184 | /* | ||
185 | * These are initializations that only need to be done | ||
186 | * once, because the fields are idempotent across use | ||
187 | * of the inode, so let the slab aware of that. | ||
188 | */ | ||
189 | void inode_init_once(struct inode *inode) | ||
190 | { | ||
191 | memset(inode, 0, sizeof(*inode)); | ||
192 | INIT_HLIST_NODE(&inode->i_hash); | ||
193 | INIT_LIST_HEAD(&inode->i_dentry); | ||
194 | INIT_LIST_HEAD(&inode->i_devices); | ||
195 | sema_init(&inode->i_sem, 1); | ||
196 | init_rwsem(&inode->i_alloc_sem); | ||
197 | INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC); | ||
198 | rwlock_init(&inode->i_data.tree_lock); | ||
199 | spin_lock_init(&inode->i_data.i_mmap_lock); | ||
200 | INIT_LIST_HEAD(&inode->i_data.private_list); | ||
201 | spin_lock_init(&inode->i_data.private_lock); | ||
202 | INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap); | ||
203 | INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear); | ||
204 | spin_lock_init(&inode->i_lock); | ||
205 | i_size_ordered_init(inode); | ||
206 | } | ||
207 | |||
208 | EXPORT_SYMBOL(inode_init_once); | ||
209 | |||
210 | static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) | ||
211 | { | ||
212 | struct inode * inode = (struct inode *) foo; | ||
213 | |||
214 | if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == | ||
215 | SLAB_CTOR_CONSTRUCTOR) | ||
216 | inode_init_once(inode); | ||
217 | } | ||
218 | |||
219 | /* | ||
220 | * inode_lock must be held | ||
221 | */ | ||
222 | void __iget(struct inode * inode) | ||
223 | { | ||
224 | if (atomic_read(&inode->i_count)) { | ||
225 | atomic_inc(&inode->i_count); | ||
226 | return; | ||
227 | } | ||
228 | atomic_inc(&inode->i_count); | ||
229 | if (!(inode->i_state & (I_DIRTY|I_LOCK))) | ||
230 | list_move(&inode->i_list, &inode_in_use); | ||
231 | inodes_stat.nr_unused--; | ||
232 | } | ||
233 | |||
234 | /** | ||
235 | * clear_inode - clear an inode | ||
236 | * @inode: inode to clear | ||
237 | * | ||
238 | * This is called by the filesystem to tell us | ||
239 | * that the inode is no longer useful. We just | ||
240 | * terminate it with extreme prejudice. | ||
241 | */ | ||
242 | void clear_inode(struct inode *inode) | ||
243 | { | ||
244 | might_sleep(); | ||
245 | invalidate_inode_buffers(inode); | ||
246 | |||
247 | if (inode->i_data.nrpages) | ||
248 | BUG(); | ||
249 | if (!(inode->i_state & I_FREEING)) | ||
250 | BUG(); | ||
251 | if (inode->i_state & I_CLEAR) | ||
252 | BUG(); | ||
253 | wait_on_inode(inode); | ||
254 | DQUOT_DROP(inode); | ||
255 | if (inode->i_sb && inode->i_sb->s_op->clear_inode) | ||
256 | inode->i_sb->s_op->clear_inode(inode); | ||
257 | if (inode->i_bdev) | ||
258 | bd_forget(inode); | ||
259 | if (inode->i_cdev) | ||
260 | cd_forget(inode); | ||
261 | inode->i_state = I_CLEAR; | ||
262 | } | ||
263 | |||
264 | EXPORT_SYMBOL(clear_inode); | ||
265 | |||
266 | /* | ||
267 | * dispose_list - dispose of the contents of a local list | ||
268 | * @head: the head of the list to free | ||
269 | * | ||
270 | * Dispose-list gets a local list with local inodes in it, so it doesn't | ||
271 | * need to worry about list corruption and SMP locks. | ||
272 | */ | ||
273 | static void dispose_list(struct list_head *head) | ||
274 | { | ||
275 | int nr_disposed = 0; | ||
276 | |||
277 | while (!list_empty(head)) { | ||
278 | struct inode *inode; | ||
279 | |||
280 | inode = list_entry(head->next, struct inode, i_list); | ||
281 | list_del(&inode->i_list); | ||
282 | |||
283 | if (inode->i_data.nrpages) | ||
284 | truncate_inode_pages(&inode->i_data, 0); | ||
285 | clear_inode(inode); | ||
286 | destroy_inode(inode); | ||
287 | nr_disposed++; | ||
288 | } | ||
289 | spin_lock(&inode_lock); | ||
290 | inodes_stat.nr_inodes -= nr_disposed; | ||
291 | spin_unlock(&inode_lock); | ||
292 | } | ||
293 | |||
294 | /* | ||
295 | * Invalidate all inodes for a device. | ||
296 | */ | ||
297 | static int invalidate_list(struct list_head *head, struct list_head *dispose) | ||
298 | { | ||
299 | struct list_head *next; | ||
300 | int busy = 0, count = 0; | ||
301 | |||
302 | next = head->next; | ||
303 | for (;;) { | ||
304 | struct list_head * tmp = next; | ||
305 | struct inode * inode; | ||
306 | |||
307 | /* | ||
308 | * We can reschedule here without worrying about the list's | ||
309 | * consistency because the per-sb list of inodes must not | ||
310 | * change during umount anymore, and because iprune_sem keeps | ||
311 | * shrink_icache_memory() away. | ||
312 | */ | ||
313 | cond_resched_lock(&inode_lock); | ||
314 | |||
315 | next = next->next; | ||
316 | if (tmp == head) | ||
317 | break; | ||
318 | inode = list_entry(tmp, struct inode, i_sb_list); | ||
319 | invalidate_inode_buffers(inode); | ||
320 | if (!atomic_read(&inode->i_count)) { | ||
321 | hlist_del_init(&inode->i_hash); | ||
322 | list_del(&inode->i_sb_list); | ||
323 | list_move(&inode->i_list, dispose); | ||
324 | inode->i_state |= I_FREEING; | ||
325 | count++; | ||
326 | continue; | ||
327 | } | ||
328 | busy = 1; | ||
329 | } | ||
330 | /* only unused inodes may be cached with i_count zero */ | ||
331 | inodes_stat.nr_unused -= count; | ||
332 | return busy; | ||
333 | } | ||
334 | |||
335 | /* | ||
336 | * This is a two-stage process. First we collect all | ||
337 | * offending inodes onto the throw-away list, and in | ||
338 | * the second stage we actually dispose of them. This | ||
339 | * is because we don't want to sleep while messing | ||
340 | * with the global lists.. | ||
341 | */ | ||
342 | |||
343 | /** | ||
344 | * invalidate_inodes - discard the inodes on a device | ||
345 | * @sb: superblock | ||
346 | * | ||
347 | * Discard all of the inodes for a given superblock. If the discard | ||
348 | * fails because there are busy inodes then a non zero value is returned. | ||
349 | * If the discard is successful all the inodes have been discarded. | ||
350 | */ | ||
351 | int invalidate_inodes(struct super_block * sb) | ||
352 | { | ||
353 | int busy; | ||
354 | LIST_HEAD(throw_away); | ||
355 | |||
356 | down(&iprune_sem); | ||
357 | spin_lock(&inode_lock); | ||
358 | busy = invalidate_list(&sb->s_inodes, &throw_away); | ||
359 | spin_unlock(&inode_lock); | ||
360 | |||
361 | dispose_list(&throw_away); | ||
362 | up(&iprune_sem); | ||
363 | |||
364 | return busy; | ||
365 | } | ||
366 | |||
367 | EXPORT_SYMBOL(invalidate_inodes); | ||
368 | |||
369 | int __invalidate_device(struct block_device *bdev, int do_sync) | ||
370 | { | ||
371 | struct super_block *sb; | ||
372 | int res; | ||
373 | |||
374 | if (do_sync) | ||
375 | fsync_bdev(bdev); | ||
376 | |||
377 | res = 0; | ||
378 | sb = get_super(bdev); | ||
379 | if (sb) { | ||
380 | /* | ||
381 | * no need to lock the super, get_super holds the | ||
382 | * read semaphore so the filesystem cannot go away | ||
383 | * under us (->put_super runs with the write lock | ||
384 | * hold). | ||
385 | */ | ||
386 | shrink_dcache_sb(sb); | ||
387 | res = invalidate_inodes(sb); | ||
388 | drop_super(sb); | ||
389 | } | ||
390 | invalidate_bdev(bdev, 0); | ||
391 | return res; | ||
392 | } | ||
393 | |||
394 | EXPORT_SYMBOL(__invalidate_device); | ||
395 | |||
396 | static int can_unuse(struct inode *inode) | ||
397 | { | ||
398 | if (inode->i_state) | ||
399 | return 0; | ||
400 | if (inode_has_buffers(inode)) | ||
401 | return 0; | ||
402 | if (atomic_read(&inode->i_count)) | ||
403 | return 0; | ||
404 | if (inode->i_data.nrpages) | ||
405 | return 0; | ||
406 | return 1; | ||
407 | } | ||
408 | |||
409 | /* | ||
410 | * Scan `goal' inodes on the unused list for freeable ones. They are moved to | ||
411 | * a temporary list and then are freed outside inode_lock by dispose_list(). | ||
412 | * | ||
413 | * Any inodes which are pinned purely because of attached pagecache have their | ||
414 | * pagecache removed. We expect the final iput() on that inode to add it to | ||
415 | * the front of the inode_unused list. So look for it there and if the | ||
416 | * inode is still freeable, proceed. The right inode is found 99.9% of the | ||
417 | * time in testing on a 4-way. | ||
418 | * | ||
419 | * If the inode has metadata buffers attached to mapping->private_list then | ||
420 | * try to remove them. | ||
421 | */ | ||
422 | static void prune_icache(int nr_to_scan) | ||
423 | { | ||
424 | LIST_HEAD(freeable); | ||
425 | int nr_pruned = 0; | ||
426 | int nr_scanned; | ||
427 | unsigned long reap = 0; | ||
428 | |||
429 | down(&iprune_sem); | ||
430 | spin_lock(&inode_lock); | ||
431 | for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) { | ||
432 | struct inode *inode; | ||
433 | |||
434 | if (list_empty(&inode_unused)) | ||
435 | break; | ||
436 | |||
437 | inode = list_entry(inode_unused.prev, struct inode, i_list); | ||
438 | |||
439 | if (inode->i_state || atomic_read(&inode->i_count)) { | ||
440 | list_move(&inode->i_list, &inode_unused); | ||
441 | continue; | ||
442 | } | ||
443 | if (inode_has_buffers(inode) || inode->i_data.nrpages) { | ||
444 | __iget(inode); | ||
445 | spin_unlock(&inode_lock); | ||
446 | if (remove_inode_buffers(inode)) | ||
447 | reap += invalidate_inode_pages(&inode->i_data); | ||
448 | iput(inode); | ||
449 | spin_lock(&inode_lock); | ||
450 | |||
451 | if (inode != list_entry(inode_unused.next, | ||
452 | struct inode, i_list)) | ||
453 | continue; /* wrong inode or list_empty */ | ||
454 | if (!can_unuse(inode)) | ||
455 | continue; | ||
456 | } | ||
457 | hlist_del_init(&inode->i_hash); | ||
458 | list_del_init(&inode->i_sb_list); | ||
459 | list_move(&inode->i_list, &freeable); | ||
460 | inode->i_state |= I_FREEING; | ||
461 | nr_pruned++; | ||
462 | } | ||
463 | inodes_stat.nr_unused -= nr_pruned; | ||
464 | spin_unlock(&inode_lock); | ||
465 | |||
466 | dispose_list(&freeable); | ||
467 | up(&iprune_sem); | ||
468 | |||
469 | if (current_is_kswapd()) | ||
470 | mod_page_state(kswapd_inodesteal, reap); | ||
471 | else | ||
472 | mod_page_state(pginodesteal, reap); | ||
473 | } | ||
474 | |||
475 | /* | ||
476 | * shrink_icache_memory() will attempt to reclaim some unused inodes. Here, | ||
477 | * "unused" means that no dentries are referring to the inodes: the files are | ||
478 | * not open and the dcache references to those inodes have already been | ||
479 | * reclaimed. | ||
480 | * | ||
481 | * This function is passed the number of inodes to scan, and it returns the | ||
482 | * total number of remaining possibly-reclaimable inodes. | ||
483 | */ | ||
484 | static int shrink_icache_memory(int nr, unsigned int gfp_mask) | ||
485 | { | ||
486 | if (nr) { | ||
487 | /* | ||
488 | * Nasty deadlock avoidance. We may hold various FS locks, | ||
489 | * and we don't want to recurse into the FS that called us | ||
490 | * in clear_inode() and friends.. | ||
491 | */ | ||
492 | if (!(gfp_mask & __GFP_FS)) | ||
493 | return -1; | ||
494 | prune_icache(nr); | ||
495 | } | ||
496 | return (inodes_stat.nr_unused / 100) * sysctl_vfs_cache_pressure; | ||
497 | } | ||
498 | |||
499 | static void __wait_on_freeing_inode(struct inode *inode); | ||
500 | /* | ||
501 | * Called with the inode lock held. | ||
502 | * NOTE: we are not increasing the inode-refcount, you must call __iget() | ||
503 | * by hand after calling find_inode now! This simplifies iunique and won't | ||
504 | * add any additional branch in the common code. | ||
505 | */ | ||
506 | static struct inode * find_inode(struct super_block * sb, struct hlist_head *head, int (*test)(struct inode *, void *), void *data) | ||
507 | { | ||
508 | struct hlist_node *node; | ||
509 | struct inode * inode = NULL; | ||
510 | |||
511 | repeat: | ||
512 | hlist_for_each (node, head) { | ||
513 | inode = hlist_entry(node, struct inode, i_hash); | ||
514 | if (inode->i_sb != sb) | ||
515 | continue; | ||
516 | if (!test(inode, data)) | ||
517 | continue; | ||
518 | if (inode->i_state & (I_FREEING|I_CLEAR)) { | ||
519 | __wait_on_freeing_inode(inode); | ||
520 | goto repeat; | ||
521 | } | ||
522 | break; | ||
523 | } | ||
524 | return node ? inode : NULL; | ||
525 | } | ||
526 | |||
527 | /* | ||
528 | * find_inode_fast is the fast path version of find_inode, see the comment at | ||
529 | * iget_locked for details. | ||
530 | */ | ||
531 | static struct inode * find_inode_fast(struct super_block * sb, struct hlist_head *head, unsigned long ino) | ||
532 | { | ||
533 | struct hlist_node *node; | ||
534 | struct inode * inode = NULL; | ||
535 | |||
536 | repeat: | ||
537 | hlist_for_each (node, head) { | ||
538 | inode = hlist_entry(node, struct inode, i_hash); | ||
539 | if (inode->i_ino != ino) | ||
540 | continue; | ||
541 | if (inode->i_sb != sb) | ||
542 | continue; | ||
543 | if (inode->i_state & (I_FREEING|I_CLEAR)) { | ||
544 | __wait_on_freeing_inode(inode); | ||
545 | goto repeat; | ||
546 | } | ||
547 | break; | ||
548 | } | ||
549 | return node ? inode : NULL; | ||
550 | } | ||
551 | |||
552 | /** | ||
553 | * new_inode - obtain an inode | ||
554 | * @sb: superblock | ||
555 | * | ||
556 | * Allocates a new inode for given superblock. | ||
557 | */ | ||
558 | struct inode *new_inode(struct super_block *sb) | ||
559 | { | ||
560 | static unsigned long last_ino; | ||
561 | struct inode * inode; | ||
562 | |||
563 | spin_lock_prefetch(&inode_lock); | ||
564 | |||
565 | inode = alloc_inode(sb); | ||
566 | if (inode) { | ||
567 | spin_lock(&inode_lock); | ||
568 | inodes_stat.nr_inodes++; | ||
569 | list_add(&inode->i_list, &inode_in_use); | ||
570 | list_add(&inode->i_sb_list, &sb->s_inodes); | ||
571 | inode->i_ino = ++last_ino; | ||
572 | inode->i_state = 0; | ||
573 | spin_unlock(&inode_lock); | ||
574 | } | ||
575 | return inode; | ||
576 | } | ||
577 | |||
578 | EXPORT_SYMBOL(new_inode); | ||
579 | |||
580 | void unlock_new_inode(struct inode *inode) | ||
581 | { | ||
582 | /* | ||
583 | * This is special! We do not need the spinlock | ||
584 | * when clearing I_LOCK, because we're guaranteed | ||
585 | * that nobody else tries to do anything about the | ||
586 | * state of the inode when it is locked, as we | ||
587 | * just created it (so there can be no old holders | ||
588 | * that haven't tested I_LOCK). | ||
589 | */ | ||
590 | inode->i_state &= ~(I_LOCK|I_NEW); | ||
591 | wake_up_inode(inode); | ||
592 | } | ||
593 | |||
594 | EXPORT_SYMBOL(unlock_new_inode); | ||
595 | |||
596 | /* | ||
597 | * This is called without the inode lock held.. Be careful. | ||
598 | * | ||
599 | * We no longer cache the sb_flags in i_flags - see fs.h | ||
600 | * -- rmk@arm.uk.linux.org | ||
601 | */ | ||
602 | static struct inode * get_new_inode(struct super_block *sb, struct hlist_head *head, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *data) | ||
603 | { | ||
604 | struct inode * inode; | ||
605 | |||
606 | inode = alloc_inode(sb); | ||
607 | if (inode) { | ||
608 | struct inode * old; | ||
609 | |||
610 | spin_lock(&inode_lock); | ||
611 | /* We released the lock, so.. */ | ||
612 | old = find_inode(sb, head, test, data); | ||
613 | if (!old) { | ||
614 | if (set(inode, data)) | ||
615 | goto set_failed; | ||
616 | |||
617 | inodes_stat.nr_inodes++; | ||
618 | list_add(&inode->i_list, &inode_in_use); | ||
619 | list_add(&inode->i_sb_list, &sb->s_inodes); | ||
620 | hlist_add_head(&inode->i_hash, head); | ||
621 | inode->i_state = I_LOCK|I_NEW; | ||
622 | spin_unlock(&inode_lock); | ||
623 | |||
624 | /* Return the locked inode with I_NEW set, the | ||
625 | * caller is responsible for filling in the contents | ||
626 | */ | ||
627 | return inode; | ||
628 | } | ||
629 | |||
630 | /* | ||
631 | * Uhhuh, somebody else created the same inode under | ||
632 | * us. Use the old inode instead of the one we just | ||
633 | * allocated. | ||
634 | */ | ||
635 | __iget(old); | ||
636 | spin_unlock(&inode_lock); | ||
637 | destroy_inode(inode); | ||
638 | inode = old; | ||
639 | wait_on_inode(inode); | ||
640 | } | ||
641 | return inode; | ||
642 | |||
643 | set_failed: | ||
644 | spin_unlock(&inode_lock); | ||
645 | destroy_inode(inode); | ||
646 | return NULL; | ||
647 | } | ||
648 | |||
649 | /* | ||
650 | * get_new_inode_fast is the fast path version of get_new_inode, see the | ||
651 | * comment at iget_locked for details. | ||
652 | */ | ||
653 | static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_head *head, unsigned long ino) | ||
654 | { | ||
655 | struct inode * inode; | ||
656 | |||
657 | inode = alloc_inode(sb); | ||
658 | if (inode) { | ||
659 | struct inode * old; | ||
660 | |||
661 | spin_lock(&inode_lock); | ||
662 | /* We released the lock, so.. */ | ||
663 | old = find_inode_fast(sb, head, ino); | ||
664 | if (!old) { | ||
665 | inode->i_ino = ino; | ||
666 | inodes_stat.nr_inodes++; | ||
667 | list_add(&inode->i_list, &inode_in_use); | ||
668 | list_add(&inode->i_sb_list, &sb->s_inodes); | ||
669 | hlist_add_head(&inode->i_hash, head); | ||
670 | inode->i_state = I_LOCK|I_NEW; | ||
671 | spin_unlock(&inode_lock); | ||
672 | |||
673 | /* Return the locked inode with I_NEW set, the | ||
674 | * caller is responsible for filling in the contents | ||
675 | */ | ||
676 | return inode; | ||
677 | } | ||
678 | |||
679 | /* | ||
680 | * Uhhuh, somebody else created the same inode under | ||
681 | * us. Use the old inode instead of the one we just | ||
682 | * allocated. | ||
683 | */ | ||
684 | __iget(old); | ||
685 | spin_unlock(&inode_lock); | ||
686 | destroy_inode(inode); | ||
687 | inode = old; | ||
688 | wait_on_inode(inode); | ||
689 | } | ||
690 | return inode; | ||
691 | } | ||
692 | |||
693 | static inline unsigned long hash(struct super_block *sb, unsigned long hashval) | ||
694 | { | ||
695 | unsigned long tmp; | ||
696 | |||
697 | tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) / | ||
698 | L1_CACHE_BYTES; | ||
699 | tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS); | ||
700 | return tmp & I_HASHMASK; | ||
701 | } | ||
702 | |||
703 | /** | ||
704 | * iunique - get a unique inode number | ||
705 | * @sb: superblock | ||
706 | * @max_reserved: highest reserved inode number | ||
707 | * | ||
708 | * Obtain an inode number that is unique on the system for a given | ||
709 | * superblock. This is used by file systems that have no natural | ||
710 | * permanent inode numbering system. An inode number is returned that | ||
711 | * is higher than the reserved limit but unique. | ||
712 | * | ||
713 | * BUGS: | ||
714 | * With a large number of inodes live on the file system this function | ||
715 | * currently becomes quite slow. | ||
716 | */ | ||
717 | ino_t iunique(struct super_block *sb, ino_t max_reserved) | ||
718 | { | ||
719 | static ino_t counter; | ||
720 | struct inode *inode; | ||
721 | struct hlist_head * head; | ||
722 | ino_t res; | ||
723 | spin_lock(&inode_lock); | ||
724 | retry: | ||
725 | if (counter > max_reserved) { | ||
726 | head = inode_hashtable + hash(sb,counter); | ||
727 | res = counter++; | ||
728 | inode = find_inode_fast(sb, head, res); | ||
729 | if (!inode) { | ||
730 | spin_unlock(&inode_lock); | ||
731 | return res; | ||
732 | } | ||
733 | } else { | ||
734 | counter = max_reserved + 1; | ||
735 | } | ||
736 | goto retry; | ||
737 | |||
738 | } | ||
739 | |||
740 | EXPORT_SYMBOL(iunique); | ||
741 | |||
742 | struct inode *igrab(struct inode *inode) | ||
743 | { | ||
744 | spin_lock(&inode_lock); | ||
745 | if (!(inode->i_state & I_FREEING)) | ||
746 | __iget(inode); | ||
747 | else | ||
748 | /* | ||
749 | * Handle the case where s_op->clear_inode is not been | ||
750 | * called yet, and somebody is calling igrab | ||
751 | * while the inode is getting freed. | ||
752 | */ | ||
753 | inode = NULL; | ||
754 | spin_unlock(&inode_lock); | ||
755 | return inode; | ||
756 | } | ||
757 | |||
758 | EXPORT_SYMBOL(igrab); | ||
759 | |||
760 | /** | ||
761 | * ifind - internal function, you want ilookup5() or iget5(). | ||
762 | * @sb: super block of file system to search | ||
763 | * @head: the head of the list to search | ||
764 | * @test: callback used for comparisons between inodes | ||
765 | * @data: opaque data pointer to pass to @test | ||
766 | * | ||
767 | * ifind() searches for the inode specified by @data in the inode | ||
768 | * cache. This is a generalized version of ifind_fast() for file systems where | ||
769 | * the inode number is not sufficient for unique identification of an inode. | ||
770 | * | ||
771 | * If the inode is in the cache, the inode is returned with an incremented | ||
772 | * reference count. | ||
773 | * | ||
774 | * Otherwise NULL is returned. | ||
775 | * | ||
776 | * Note, @test is called with the inode_lock held, so can't sleep. | ||
777 | */ | ||
778 | static inline struct inode *ifind(struct super_block *sb, | ||
779 | struct hlist_head *head, int (*test)(struct inode *, void *), | ||
780 | void *data) | ||
781 | { | ||
782 | struct inode *inode; | ||
783 | |||
784 | spin_lock(&inode_lock); | ||
785 | inode = find_inode(sb, head, test, data); | ||
786 | if (inode) { | ||
787 | __iget(inode); | ||
788 | spin_unlock(&inode_lock); | ||
789 | wait_on_inode(inode); | ||
790 | return inode; | ||
791 | } | ||
792 | spin_unlock(&inode_lock); | ||
793 | return NULL; | ||
794 | } | ||
795 | |||
796 | /** | ||
797 | * ifind_fast - internal function, you want ilookup() or iget(). | ||
798 | * @sb: super block of file system to search | ||
799 | * @head: head of the list to search | ||
800 | * @ino: inode number to search for | ||
801 | * | ||
802 | * ifind_fast() searches for the inode @ino in the inode cache. This is for | ||
803 | * file systems where the inode number is sufficient for unique identification | ||
804 | * of an inode. | ||
805 | * | ||
806 | * If the inode is in the cache, the inode is returned with an incremented | ||
807 | * reference count. | ||
808 | * | ||
809 | * Otherwise NULL is returned. | ||
810 | */ | ||
811 | static inline struct inode *ifind_fast(struct super_block *sb, | ||
812 | struct hlist_head *head, unsigned long ino) | ||
813 | { | ||
814 | struct inode *inode; | ||
815 | |||
816 | spin_lock(&inode_lock); | ||
817 | inode = find_inode_fast(sb, head, ino); | ||
818 | if (inode) { | ||
819 | __iget(inode); | ||
820 | spin_unlock(&inode_lock); | ||
821 | wait_on_inode(inode); | ||
822 | return inode; | ||
823 | } | ||
824 | spin_unlock(&inode_lock); | ||
825 | return NULL; | ||
826 | } | ||
827 | |||
828 | /** | ||
829 | * ilookup5 - search for an inode in the inode cache | ||
830 | * @sb: super block of file system to search | ||
831 | * @hashval: hash value (usually inode number) to search for | ||
832 | * @test: callback used for comparisons between inodes | ||
833 | * @data: opaque data pointer to pass to @test | ||
834 | * | ||
835 | * ilookup5() uses ifind() to search for the inode specified by @hashval and | ||
836 | * @data in the inode cache. This is a generalized version of ilookup() for | ||
837 | * file systems where the inode number is not sufficient for unique | ||
838 | * identification of an inode. | ||
839 | * | ||
840 | * If the inode is in the cache, the inode is returned with an incremented | ||
841 | * reference count. | ||
842 | * | ||
843 | * Otherwise NULL is returned. | ||
844 | * | ||
845 | * Note, @test is called with the inode_lock held, so can't sleep. | ||
846 | */ | ||
847 | struct inode *ilookup5(struct super_block *sb, unsigned long hashval, | ||
848 | int (*test)(struct inode *, void *), void *data) | ||
849 | { | ||
850 | struct hlist_head *head = inode_hashtable + hash(sb, hashval); | ||
851 | |||
852 | return ifind(sb, head, test, data); | ||
853 | } | ||
854 | |||
855 | EXPORT_SYMBOL(ilookup5); | ||
856 | |||
857 | /** | ||
858 | * ilookup - search for an inode in the inode cache | ||
859 | * @sb: super block of file system to search | ||
860 | * @ino: inode number to search for | ||
861 | * | ||
862 | * ilookup() uses ifind_fast() to search for the inode @ino in the inode cache. | ||
863 | * This is for file systems where the inode number is sufficient for unique | ||
864 | * identification of an inode. | ||
865 | * | ||
866 | * If the inode is in the cache, the inode is returned with an incremented | ||
867 | * reference count. | ||
868 | * | ||
869 | * Otherwise NULL is returned. | ||
870 | */ | ||
871 | struct inode *ilookup(struct super_block *sb, unsigned long ino) | ||
872 | { | ||
873 | struct hlist_head *head = inode_hashtable + hash(sb, ino); | ||
874 | |||
875 | return ifind_fast(sb, head, ino); | ||
876 | } | ||
877 | |||
878 | EXPORT_SYMBOL(ilookup); | ||
879 | |||
880 | /** | ||
881 | * iget5_locked - obtain an inode from a mounted file system | ||
882 | * @sb: super block of file system | ||
883 | * @hashval: hash value (usually inode number) to get | ||
884 | * @test: callback used for comparisons between inodes | ||
885 | * @set: callback used to initialize a new struct inode | ||
886 | * @data: opaque data pointer to pass to @test and @set | ||
887 | * | ||
888 | * This is iget() without the read_inode() portion of get_new_inode(). | ||
889 | * | ||
890 | * iget5_locked() uses ifind() to search for the inode specified by @hashval | ||
891 | * and @data in the inode cache and if present it is returned with an increased | ||
892 | * reference count. This is a generalized version of iget_locked() for file | ||
893 | * systems where the inode number is not sufficient for unique identification | ||
894 | * of an inode. | ||
895 | * | ||
896 | * If the inode is not in cache, get_new_inode() is called to allocate a new | ||
897 | * inode and this is returned locked, hashed, and with the I_NEW flag set. The | ||
898 | * file system gets to fill it in before unlocking it via unlock_new_inode(). | ||
899 | * | ||
900 | * Note both @test and @set are called with the inode_lock held, so can't sleep. | ||
901 | */ | ||
902 | struct inode *iget5_locked(struct super_block *sb, unsigned long hashval, | ||
903 | int (*test)(struct inode *, void *), | ||
904 | int (*set)(struct inode *, void *), void *data) | ||
905 | { | ||
906 | struct hlist_head *head = inode_hashtable + hash(sb, hashval); | ||
907 | struct inode *inode; | ||
908 | |||
909 | inode = ifind(sb, head, test, data); | ||
910 | if (inode) | ||
911 | return inode; | ||
912 | /* | ||
913 | * get_new_inode() will do the right thing, re-trying the search | ||
914 | * in case it had to block at any point. | ||
915 | */ | ||
916 | return get_new_inode(sb, head, test, set, data); | ||
917 | } | ||
918 | |||
919 | EXPORT_SYMBOL(iget5_locked); | ||
920 | |||
921 | /** | ||
922 | * iget_locked - obtain an inode from a mounted file system | ||
923 | * @sb: super block of file system | ||
924 | * @ino: inode number to get | ||
925 | * | ||
926 | * This is iget() without the read_inode() portion of get_new_inode_fast(). | ||
927 | * | ||
928 | * iget_locked() uses ifind_fast() to search for the inode specified by @ino in | ||
929 | * the inode cache and if present it is returned with an increased reference | ||
930 | * count. This is for file systems where the inode number is sufficient for | ||
931 | * unique identification of an inode. | ||
932 | * | ||
933 | * If the inode is not in cache, get_new_inode_fast() is called to allocate a | ||
934 | * new inode and this is returned locked, hashed, and with the I_NEW flag set. | ||
935 | * The file system gets to fill it in before unlocking it via | ||
936 | * unlock_new_inode(). | ||
937 | */ | ||
938 | struct inode *iget_locked(struct super_block *sb, unsigned long ino) | ||
939 | { | ||
940 | struct hlist_head *head = inode_hashtable + hash(sb, ino); | ||
941 | struct inode *inode; | ||
942 | |||
943 | inode = ifind_fast(sb, head, ino); | ||
944 | if (inode) | ||
945 | return inode; | ||
946 | /* | ||
947 | * get_new_inode_fast() will do the right thing, re-trying the search | ||
948 | * in case it had to block at any point. | ||
949 | */ | ||
950 | return get_new_inode_fast(sb, head, ino); | ||
951 | } | ||
952 | |||
953 | EXPORT_SYMBOL(iget_locked); | ||
954 | |||
955 | /** | ||
956 | * __insert_inode_hash - hash an inode | ||
957 | * @inode: unhashed inode | ||
958 | * @hashval: unsigned long value used to locate this object in the | ||
959 | * inode_hashtable. | ||
960 | * | ||
961 | * Add an inode to the inode hash for this superblock. | ||
962 | */ | ||
963 | void __insert_inode_hash(struct inode *inode, unsigned long hashval) | ||
964 | { | ||
965 | struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval); | ||
966 | spin_lock(&inode_lock); | ||
967 | hlist_add_head(&inode->i_hash, head); | ||
968 | spin_unlock(&inode_lock); | ||
969 | } | ||
970 | |||
971 | EXPORT_SYMBOL(__insert_inode_hash); | ||
972 | |||
973 | /** | ||
974 | * remove_inode_hash - remove an inode from the hash | ||
975 | * @inode: inode to unhash | ||
976 | * | ||
977 | * Remove an inode from the superblock. | ||
978 | */ | ||
979 | void remove_inode_hash(struct inode *inode) | ||
980 | { | ||
981 | spin_lock(&inode_lock); | ||
982 | hlist_del_init(&inode->i_hash); | ||
983 | spin_unlock(&inode_lock); | ||
984 | } | ||
985 | |||
986 | EXPORT_SYMBOL(remove_inode_hash); | ||
987 | |||
988 | /* | ||
989 | * Tell the filesystem that this inode is no longer of any interest and should | ||
990 | * be completely destroyed. | ||
991 | * | ||
992 | * We leave the inode in the inode hash table until *after* the filesystem's | ||
993 | * ->delete_inode completes. This ensures that an iget (such as nfsd might | ||
994 | * instigate) will always find up-to-date information either in the hash or on | ||
995 | * disk. | ||
996 | * | ||
997 | * I_FREEING is set so that no-one will take a new reference to the inode while | ||
998 | * it is being deleted. | ||
999 | */ | ||
1000 | void generic_delete_inode(struct inode *inode) | ||
1001 | { | ||
1002 | struct super_operations *op = inode->i_sb->s_op; | ||
1003 | |||
1004 | list_del_init(&inode->i_list); | ||
1005 | list_del_init(&inode->i_sb_list); | ||
1006 | inode->i_state|=I_FREEING; | ||
1007 | inodes_stat.nr_inodes--; | ||
1008 | spin_unlock(&inode_lock); | ||
1009 | |||
1010 | if (inode->i_data.nrpages) | ||
1011 | truncate_inode_pages(&inode->i_data, 0); | ||
1012 | |||
1013 | security_inode_delete(inode); | ||
1014 | |||
1015 | if (op->delete_inode) { | ||
1016 | void (*delete)(struct inode *) = op->delete_inode; | ||
1017 | if (!is_bad_inode(inode)) | ||
1018 | DQUOT_INIT(inode); | ||
1019 | /* s_op->delete_inode internally recalls clear_inode() */ | ||
1020 | delete(inode); | ||
1021 | } else | ||
1022 | clear_inode(inode); | ||
1023 | spin_lock(&inode_lock); | ||
1024 | hlist_del_init(&inode->i_hash); | ||
1025 | spin_unlock(&inode_lock); | ||
1026 | wake_up_inode(inode); | ||
1027 | if (inode->i_state != I_CLEAR) | ||
1028 | BUG(); | ||
1029 | destroy_inode(inode); | ||
1030 | } | ||
1031 | |||
1032 | EXPORT_SYMBOL(generic_delete_inode); | ||
1033 | |||
1034 | static void generic_forget_inode(struct inode *inode) | ||
1035 | { | ||
1036 | struct super_block *sb = inode->i_sb; | ||
1037 | |||
1038 | if (!hlist_unhashed(&inode->i_hash)) { | ||
1039 | if (!(inode->i_state & (I_DIRTY|I_LOCK))) | ||
1040 | list_move(&inode->i_list, &inode_unused); | ||
1041 | inodes_stat.nr_unused++; | ||
1042 | spin_unlock(&inode_lock); | ||
1043 | if (!sb || (sb->s_flags & MS_ACTIVE)) | ||
1044 | return; | ||
1045 | write_inode_now(inode, 1); | ||
1046 | spin_lock(&inode_lock); | ||
1047 | inodes_stat.nr_unused--; | ||
1048 | hlist_del_init(&inode->i_hash); | ||
1049 | } | ||
1050 | list_del_init(&inode->i_list); | ||
1051 | list_del_init(&inode->i_sb_list); | ||
1052 | inode->i_state|=I_FREEING; | ||
1053 | inodes_stat.nr_inodes--; | ||
1054 | spin_unlock(&inode_lock); | ||
1055 | if (inode->i_data.nrpages) | ||
1056 | truncate_inode_pages(&inode->i_data, 0); | ||
1057 | clear_inode(inode); | ||
1058 | destroy_inode(inode); | ||
1059 | } | ||
1060 | |||
1061 | /* | ||
1062 | * Normal UNIX filesystem behaviour: delete the | ||
1063 | * inode when the usage count drops to zero, and | ||
1064 | * i_nlink is zero. | ||
1065 | */ | ||
1066 | static void generic_drop_inode(struct inode *inode) | ||
1067 | { | ||
1068 | if (!inode->i_nlink) | ||
1069 | generic_delete_inode(inode); | ||
1070 | else | ||
1071 | generic_forget_inode(inode); | ||
1072 | } | ||
1073 | |||
1074 | /* | ||
1075 | * Called when we're dropping the last reference | ||
1076 | * to an inode. | ||
1077 | * | ||
1078 | * Call the FS "drop()" function, defaulting to | ||
1079 | * the legacy UNIX filesystem behaviour.. | ||
1080 | * | ||
1081 | * NOTE! NOTE! NOTE! We're called with the inode lock | ||
1082 | * held, and the drop function is supposed to release | ||
1083 | * the lock! | ||
1084 | */ | ||
1085 | static inline void iput_final(struct inode *inode) | ||
1086 | { | ||
1087 | struct super_operations *op = inode->i_sb->s_op; | ||
1088 | void (*drop)(struct inode *) = generic_drop_inode; | ||
1089 | |||
1090 | if (op && op->drop_inode) | ||
1091 | drop = op->drop_inode; | ||
1092 | drop(inode); | ||
1093 | } | ||
1094 | |||
1095 | /** | ||
1096 | * iput - put an inode | ||
1097 | * @inode: inode to put | ||
1098 | * | ||
1099 | * Puts an inode, dropping its usage count. If the inode use count hits | ||
1100 | * zero, the inode is then freed and may also be destroyed. | ||
1101 | * | ||
1102 | * Consequently, iput() can sleep. | ||
1103 | */ | ||
1104 | void iput(struct inode *inode) | ||
1105 | { | ||
1106 | if (inode) { | ||
1107 | struct super_operations *op = inode->i_sb->s_op; | ||
1108 | |||
1109 | BUG_ON(inode->i_state == I_CLEAR); | ||
1110 | |||
1111 | if (op && op->put_inode) | ||
1112 | op->put_inode(inode); | ||
1113 | |||
1114 | if (atomic_dec_and_lock(&inode->i_count, &inode_lock)) | ||
1115 | iput_final(inode); | ||
1116 | } | ||
1117 | } | ||
1118 | |||
1119 | EXPORT_SYMBOL(iput); | ||
1120 | |||
1121 | /** | ||
1122 | * bmap - find a block number in a file | ||
1123 | * @inode: inode of file | ||
1124 | * @block: block to find | ||
1125 | * | ||
1126 | * Returns the block number on the device holding the inode that | ||
1127 | * is the disk block number for the block of the file requested. | ||
1128 | * That is, asked for block 4 of inode 1 the function will return the | ||
1129 | * disk block relative to the disk start that holds that block of the | ||
1130 | * file. | ||
1131 | */ | ||
1132 | sector_t bmap(struct inode * inode, sector_t block) | ||
1133 | { | ||
1134 | sector_t res = 0; | ||
1135 | if (inode->i_mapping->a_ops->bmap) | ||
1136 | res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block); | ||
1137 | return res; | ||
1138 | } | ||
1139 | |||
1140 | EXPORT_SYMBOL(bmap); | ||
1141 | |||
1142 | /** | ||
1143 | * update_atime - update the access time | ||
1144 | * @inode: inode accessed | ||
1145 | * | ||
1146 | * Update the accessed time on an inode and mark it for writeback. | ||
1147 | * This function automatically handles read only file systems and media, | ||
1148 | * as well as the "noatime" flag and inode specific "noatime" markers. | ||
1149 | */ | ||
1150 | void update_atime(struct inode *inode) | ||
1151 | { | ||
1152 | struct timespec now; | ||
1153 | |||
1154 | if (IS_NOATIME(inode)) | ||
1155 | return; | ||
1156 | if (IS_NODIRATIME(inode) && S_ISDIR(inode->i_mode)) | ||
1157 | return; | ||
1158 | if (IS_RDONLY(inode)) | ||
1159 | return; | ||
1160 | |||
1161 | now = current_fs_time(inode->i_sb); | ||
1162 | if (!timespec_equal(&inode->i_atime, &now)) { | ||
1163 | inode->i_atime = now; | ||
1164 | mark_inode_dirty_sync(inode); | ||
1165 | } else { | ||
1166 | if (!timespec_equal(&inode->i_atime, &now)) | ||
1167 | inode->i_atime = now; | ||
1168 | } | ||
1169 | } | ||
1170 | |||
1171 | EXPORT_SYMBOL(update_atime); | ||
1172 | |||
1173 | /** | ||
1174 | * inode_update_time - update mtime and ctime time | ||
1175 | * @inode: inode accessed | ||
1176 | * @ctime_too: update ctime too | ||
1177 | * | ||
1178 | * Update the mtime time on an inode and mark it for writeback. | ||
1179 | * When ctime_too is specified update the ctime too. | ||
1180 | */ | ||
1181 | |||
1182 | void inode_update_time(struct inode *inode, int ctime_too) | ||
1183 | { | ||
1184 | struct timespec now; | ||
1185 | int sync_it = 0; | ||
1186 | |||
1187 | if (IS_NOCMTIME(inode)) | ||
1188 | return; | ||
1189 | if (IS_RDONLY(inode)) | ||
1190 | return; | ||
1191 | |||
1192 | now = current_fs_time(inode->i_sb); | ||
1193 | if (!timespec_equal(&inode->i_mtime, &now)) | ||
1194 | sync_it = 1; | ||
1195 | inode->i_mtime = now; | ||
1196 | |||
1197 | if (ctime_too) { | ||
1198 | if (!timespec_equal(&inode->i_ctime, &now)) | ||
1199 | sync_it = 1; | ||
1200 | inode->i_ctime = now; | ||
1201 | } | ||
1202 | if (sync_it) | ||
1203 | mark_inode_dirty_sync(inode); | ||
1204 | } | ||
1205 | |||
1206 | EXPORT_SYMBOL(inode_update_time); | ||
1207 | |||
1208 | int inode_needs_sync(struct inode *inode) | ||
1209 | { | ||
1210 | if (IS_SYNC(inode)) | ||
1211 | return 1; | ||
1212 | if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) | ||
1213 | return 1; | ||
1214 | return 0; | ||
1215 | } | ||
1216 | |||
1217 | EXPORT_SYMBOL(inode_needs_sync); | ||
1218 | |||
1219 | /* | ||
1220 | * Quota functions that want to walk the inode lists.. | ||
1221 | */ | ||
1222 | #ifdef CONFIG_QUOTA | ||
1223 | |||
1224 | /* Function back in dquot.c */ | ||
1225 | int remove_inode_dquot_ref(struct inode *, int, struct list_head *); | ||
1226 | |||
1227 | void remove_dquot_ref(struct super_block *sb, int type, | ||
1228 | struct list_head *tofree_head) | ||
1229 | { | ||
1230 | struct inode *inode; | ||
1231 | |||
1232 | if (!sb->dq_op) | ||
1233 | return; /* nothing to do */ | ||
1234 | spin_lock(&inode_lock); /* This lock is for inodes code */ | ||
1235 | |||
1236 | /* | ||
1237 | * We don't have to lock against quota code - test IS_QUOTAINIT is | ||
1238 | * just for speedup... | ||
1239 | */ | ||
1240 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) | ||
1241 | if (!IS_NOQUOTA(inode)) | ||
1242 | remove_inode_dquot_ref(inode, type, tofree_head); | ||
1243 | |||
1244 | spin_unlock(&inode_lock); | ||
1245 | } | ||
1246 | |||
1247 | #endif | ||
1248 | |||
1249 | int inode_wait(void *word) | ||
1250 | { | ||
1251 | schedule(); | ||
1252 | return 0; | ||
1253 | } | ||
1254 | |||
1255 | /* | ||
1256 | * If we try to find an inode in the inode hash while it is being deleted, we | ||
1257 | * have to wait until the filesystem completes its deletion before reporting | ||
1258 | * that it isn't found. This is because iget will immediately call | ||
1259 | * ->read_inode, and we want to be sure that evidence of the deletion is found | ||
1260 | * by ->read_inode. | ||
1261 | * This is called with inode_lock held. | ||
1262 | */ | ||
1263 | static void __wait_on_freeing_inode(struct inode *inode) | ||
1264 | { | ||
1265 | wait_queue_head_t *wq; | ||
1266 | DEFINE_WAIT_BIT(wait, &inode->i_state, __I_LOCK); | ||
1267 | |||
1268 | /* | ||
1269 | * I_FREEING and I_CLEAR are cleared in process context under | ||
1270 | * inode_lock, so we have to give the tasks who would clear them | ||
1271 | * a chance to run and acquire inode_lock. | ||
1272 | */ | ||
1273 | if (!(inode->i_state & I_LOCK)) { | ||
1274 | spin_unlock(&inode_lock); | ||
1275 | yield(); | ||
1276 | spin_lock(&inode_lock); | ||
1277 | return; | ||
1278 | } | ||
1279 | wq = bit_waitqueue(&inode->i_state, __I_LOCK); | ||
1280 | prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); | ||
1281 | spin_unlock(&inode_lock); | ||
1282 | schedule(); | ||
1283 | finish_wait(wq, &wait.wait); | ||
1284 | spin_lock(&inode_lock); | ||
1285 | } | ||
1286 | |||
1287 | void wake_up_inode(struct inode *inode) | ||
1288 | { | ||
1289 | /* | ||
1290 | * Prevent speculative execution through spin_unlock(&inode_lock); | ||
1291 | */ | ||
1292 | smp_mb(); | ||
1293 | wake_up_bit(&inode->i_state, __I_LOCK); | ||
1294 | } | ||
1295 | |||
1296 | static __initdata unsigned long ihash_entries; | ||
1297 | static int __init set_ihash_entries(char *str) | ||
1298 | { | ||
1299 | if (!str) | ||
1300 | return 0; | ||
1301 | ihash_entries = simple_strtoul(str, &str, 0); | ||
1302 | return 1; | ||
1303 | } | ||
1304 | __setup("ihash_entries=", set_ihash_entries); | ||
1305 | |||
1306 | /* | ||
1307 | * Initialize the waitqueues and inode hash table. | ||
1308 | */ | ||
1309 | void __init inode_init_early(void) | ||
1310 | { | ||
1311 | int loop; | ||
1312 | |||
1313 | /* If hashes are distributed across NUMA nodes, defer | ||
1314 | * hash allocation until vmalloc space is available. | ||
1315 | */ | ||
1316 | if (hashdist) | ||
1317 | return; | ||
1318 | |||
1319 | inode_hashtable = | ||
1320 | alloc_large_system_hash("Inode-cache", | ||
1321 | sizeof(struct hlist_head), | ||
1322 | ihash_entries, | ||
1323 | 14, | ||
1324 | HASH_EARLY, | ||
1325 | &i_hash_shift, | ||
1326 | &i_hash_mask, | ||
1327 | 0); | ||
1328 | |||
1329 | for (loop = 0; loop < (1 << i_hash_shift); loop++) | ||
1330 | INIT_HLIST_HEAD(&inode_hashtable[loop]); | ||
1331 | } | ||
1332 | |||
1333 | void __init inode_init(unsigned long mempages) | ||
1334 | { | ||
1335 | int loop; | ||
1336 | |||
1337 | /* inode slab cache */ | ||
1338 | inode_cachep = kmem_cache_create("inode_cache", sizeof(struct inode), | ||
1339 | 0, SLAB_PANIC, init_once, NULL); | ||
1340 | set_shrinker(DEFAULT_SEEKS, shrink_icache_memory); | ||
1341 | |||
1342 | /* Hash may have been set up in inode_init_early */ | ||
1343 | if (!hashdist) | ||
1344 | return; | ||
1345 | |||
1346 | inode_hashtable = | ||
1347 | alloc_large_system_hash("Inode-cache", | ||
1348 | sizeof(struct hlist_head), | ||
1349 | ihash_entries, | ||
1350 | 14, | ||
1351 | 0, | ||
1352 | &i_hash_shift, | ||
1353 | &i_hash_mask, | ||
1354 | 0); | ||
1355 | |||
1356 | for (loop = 0; loop < (1 << i_hash_shift); loop++) | ||
1357 | INIT_HLIST_HEAD(&inode_hashtable[loop]); | ||
1358 | } | ||
1359 | |||
1360 | void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev) | ||
1361 | { | ||
1362 | inode->i_mode = mode; | ||
1363 | if (S_ISCHR(mode)) { | ||
1364 | inode->i_fop = &def_chr_fops; | ||
1365 | inode->i_rdev = rdev; | ||
1366 | } else if (S_ISBLK(mode)) { | ||
1367 | inode->i_fop = &def_blk_fops; | ||
1368 | inode->i_rdev = rdev; | ||
1369 | } else if (S_ISFIFO(mode)) | ||
1370 | inode->i_fop = &def_fifo_fops; | ||
1371 | else if (S_ISSOCK(mode)) | ||
1372 | inode->i_fop = &bad_sock_fops; | ||
1373 | else | ||
1374 | printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o)\n", | ||
1375 | mode); | ||
1376 | } | ||
1377 | EXPORT_SYMBOL(init_special_inode); | ||