aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/linux-2.6/xfs_super.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_super.c')
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c912
1 files changed, 912 insertions, 0 deletions
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
new file mode 100644
index 000000000000..53dc658cafa6
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -0,0 +1,912 @@
1/*
2 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11 *
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
22 *
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
25 *
26 * http://www.sgi.com
27 *
28 * For further information regarding this notice, see:
29 *
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31 */
32
33#include "xfs.h"
34
35#include "xfs_inum.h"
36#include "xfs_log.h"
37#include "xfs_clnt.h"
38#include "xfs_trans.h"
39#include "xfs_sb.h"
40#include "xfs_dir.h"
41#include "xfs_dir2.h"
42#include "xfs_alloc.h"
43#include "xfs_dmapi.h"
44#include "xfs_quota.h"
45#include "xfs_mount.h"
46#include "xfs_alloc_btree.h"
47#include "xfs_bmap_btree.h"
48#include "xfs_ialloc_btree.h"
49#include "xfs_btree.h"
50#include "xfs_ialloc.h"
51#include "xfs_attr_sf.h"
52#include "xfs_dir_sf.h"
53#include "xfs_dir2_sf.h"
54#include "xfs_dinode.h"
55#include "xfs_inode.h"
56#include "xfs_bmap.h"
57#include "xfs_bit.h"
58#include "xfs_rtalloc.h"
59#include "xfs_error.h"
60#include "xfs_itable.h"
61#include "xfs_rw.h"
62#include "xfs_acl.h"
63#include "xfs_cap.h"
64#include "xfs_mac.h"
65#include "xfs_attr.h"
66#include "xfs_buf_item.h"
67#include "xfs_utils.h"
68#include "xfs_version.h"
69#include "xfs_ioctl32.h"
70
71#include <linux/namei.h>
72#include <linux/init.h>
73#include <linux/mount.h>
74#include <linux/writeback.h>
75
76STATIC struct quotactl_ops linvfs_qops;
77STATIC struct super_operations linvfs_sops;
78STATIC kmem_zone_t *linvfs_inode_zone;
79
80STATIC struct xfs_mount_args *
81xfs_args_allocate(
82 struct super_block *sb)
83{
84 struct xfs_mount_args *args;
85
86 args = kmem_zalloc(sizeof(struct xfs_mount_args), KM_SLEEP);
87 args->logbufs = args->logbufsize = -1;
88 strncpy(args->fsname, sb->s_id, MAXNAMELEN);
89
90 /* Copy the already-parsed mount(2) flags we're interested in */
91 if (sb->s_flags & MS_NOATIME)
92 args->flags |= XFSMNT_NOATIME;
93 if (sb->s_flags & MS_DIRSYNC)
94 args->flags |= XFSMNT_DIRSYNC;
95 if (sb->s_flags & MS_SYNCHRONOUS)
96 args->flags |= XFSMNT_WSYNC;
97
98 /* Default to 32 bit inodes on Linux all the time */
99 args->flags |= XFSMNT_32BITINODES;
100
101 return args;
102}
103
104__uint64_t
105xfs_max_file_offset(
106 unsigned int blockshift)
107{
108 unsigned int pagefactor = 1;
109 unsigned int bitshift = BITS_PER_LONG - 1;
110
111 /* Figure out maximum filesize, on Linux this can depend on
112 * the filesystem blocksize (on 32 bit platforms).
113 * __block_prepare_write does this in an [unsigned] long...
114 * page->index << (PAGE_CACHE_SHIFT - bbits)
115 * So, for page sized blocks (4K on 32 bit platforms),
116 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
117 * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
118 * but for smaller blocksizes it is less (bbits = log2 bsize).
119 * Note1: get_block_t takes a long (implicit cast from above)
120 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
121 * can optionally convert the [unsigned] long from above into
122 * an [unsigned] long long.
123 */
124
125#if BITS_PER_LONG == 32
126# if defined(CONFIG_LBD)
127 ASSERT(sizeof(sector_t) == 8);
128 pagefactor = PAGE_CACHE_SIZE;
129 bitshift = BITS_PER_LONG;
130# else
131 pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
132# endif
133#endif
134
135 return (((__uint64_t)pagefactor) << bitshift) - 1;
136}
137
138STATIC __inline__ void
139xfs_set_inodeops(
140 struct inode *inode)
141{
142 vnode_t *vp = LINVFS_GET_VP(inode);
143
144 if (vp->v_type == VNON) {
145 vn_mark_bad(vp);
146 } else if (S_ISREG(inode->i_mode)) {
147 inode->i_op = &linvfs_file_inode_operations;
148 inode->i_fop = &linvfs_file_operations;
149 inode->i_mapping->a_ops = &linvfs_aops;
150 } else if (S_ISDIR(inode->i_mode)) {
151 inode->i_op = &linvfs_dir_inode_operations;
152 inode->i_fop = &linvfs_dir_operations;
153 } else if (S_ISLNK(inode->i_mode)) {
154 inode->i_op = &linvfs_symlink_inode_operations;
155 if (inode->i_blocks)
156 inode->i_mapping->a_ops = &linvfs_aops;
157 } else {
158 inode->i_op = &linvfs_file_inode_operations;
159 init_special_inode(inode, inode->i_mode, inode->i_rdev);
160 }
161}
162
163STATIC __inline__ void
164xfs_revalidate_inode(
165 xfs_mount_t *mp,
166 vnode_t *vp,
167 xfs_inode_t *ip)
168{
169 struct inode *inode = LINVFS_GET_IP(vp);
170
171 inode->i_mode = (ip->i_d.di_mode & MODEMASK) | VTTOIF(vp->v_type);
172 inode->i_nlink = ip->i_d.di_nlink;
173 inode->i_uid = ip->i_d.di_uid;
174 inode->i_gid = ip->i_d.di_gid;
175 if (((1 << vp->v_type) & ((1<<VBLK) | (1<<VCHR))) == 0) {
176 inode->i_rdev = 0;
177 } else {
178 xfs_dev_t dev = ip->i_df.if_u2.if_rdev;
179 inode->i_rdev = MKDEV(sysv_major(dev) & 0x1ff, sysv_minor(dev));
180 }
181 inode->i_blksize = PAGE_CACHE_SIZE;
182 inode->i_generation = ip->i_d.di_gen;
183 i_size_write(inode, ip->i_d.di_size);
184 inode->i_blocks =
185 XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
186 inode->i_atime.tv_sec = ip->i_d.di_atime.t_sec;
187 inode->i_atime.tv_nsec = ip->i_d.di_atime.t_nsec;
188 inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec;
189 inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec;
190 inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec;
191 inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec;
192 if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
193 inode->i_flags |= S_IMMUTABLE;
194 else
195 inode->i_flags &= ~S_IMMUTABLE;
196 if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
197 inode->i_flags |= S_APPEND;
198 else
199 inode->i_flags &= ~S_APPEND;
200 if (ip->i_d.di_flags & XFS_DIFLAG_SYNC)
201 inode->i_flags |= S_SYNC;
202 else
203 inode->i_flags &= ~S_SYNC;
204 if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME)
205 inode->i_flags |= S_NOATIME;
206 else
207 inode->i_flags &= ~S_NOATIME;
208 vp->v_flag &= ~VMODIFIED;
209}
210
211void
212xfs_initialize_vnode(
213 bhv_desc_t *bdp,
214 vnode_t *vp,
215 bhv_desc_t *inode_bhv,
216 int unlock)
217{
218 xfs_inode_t *ip = XFS_BHVTOI(inode_bhv);
219 struct inode *inode = LINVFS_GET_IP(vp);
220
221 if (!inode_bhv->bd_vobj) {
222 vp->v_vfsp = bhvtovfs(bdp);
223 bhv_desc_init(inode_bhv, ip, vp, &xfs_vnodeops);
224 bhv_insert(VN_BHV_HEAD(vp), inode_bhv);
225 }
226
227 /*
228 * We need to set the ops vectors, and unlock the inode, but if
229 * we have been called during the new inode create process, it is
230 * too early to fill in the Linux inode. We will get called a
231 * second time once the inode is properly set up, and then we can
232 * finish our work.
233 */
234 if (ip->i_d.di_mode != 0 && unlock && (inode->i_state & I_NEW)) {
235 vp->v_type = IFTOVT(ip->i_d.di_mode);
236 xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip);
237 xfs_set_inodeops(inode);
238
239 ip->i_flags &= ~XFS_INEW;
240 barrier();
241
242 unlock_new_inode(inode);
243 }
244}
245
246int
247xfs_blkdev_get(
248 xfs_mount_t *mp,
249 const char *name,
250 struct block_device **bdevp)
251{
252 int error = 0;
253
254 *bdevp = open_bdev_excl(name, 0, mp);
255 if (IS_ERR(*bdevp)) {
256 error = PTR_ERR(*bdevp);
257 printk("XFS: Invalid device [%s], error=%d\n", name, error);
258 }
259
260 return -error;
261}
262
263void
264xfs_blkdev_put(
265 struct block_device *bdev)
266{
267 if (bdev)
268 close_bdev_excl(bdev);
269}
270
271
272STATIC struct inode *
273linvfs_alloc_inode(
274 struct super_block *sb)
275{
276 vnode_t *vp;
277
278 vp = (vnode_t *)kmem_cache_alloc(linvfs_inode_zone,
279 kmem_flags_convert(KM_SLEEP));
280 if (!vp)
281 return NULL;
282 return LINVFS_GET_IP(vp);
283}
284
285STATIC void
286linvfs_destroy_inode(
287 struct inode *inode)
288{
289 kmem_cache_free(linvfs_inode_zone, LINVFS_GET_VP(inode));
290}
291
292STATIC void
293init_once(
294 void *data,
295 kmem_cache_t *cachep,
296 unsigned long flags)
297{
298 vnode_t *vp = (vnode_t *)data;
299
300 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
301 SLAB_CTOR_CONSTRUCTOR)
302 inode_init_once(LINVFS_GET_IP(vp));
303}
304
305STATIC int
306init_inodecache( void )
307{
308 linvfs_inode_zone = kmem_cache_create("linvfs_icache",
309 sizeof(vnode_t), 0, SLAB_RECLAIM_ACCOUNT,
310 init_once, NULL);
311 if (linvfs_inode_zone == NULL)
312 return -ENOMEM;
313 return 0;
314}
315
316STATIC void
317destroy_inodecache( void )
318{
319 if (kmem_cache_destroy(linvfs_inode_zone))
320 printk(KERN_WARNING "%s: cache still in use!\n", __FUNCTION__);
321}
322
323/*
324 * Attempt to flush the inode, this will actually fail
325 * if the inode is pinned, but we dirty the inode again
326 * at the point when it is unpinned after a log write,
327 * since this is when the inode itself becomes flushable.
328 */
329STATIC int
330linvfs_write_inode(
331 struct inode *inode,
332 int sync)
333{
334 vnode_t *vp = LINVFS_GET_VP(inode);
335 int error = 0, flags = FLUSH_INODE;
336
337 if (vp) {
338 vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
339 if (sync)
340 flags |= FLUSH_SYNC;
341 VOP_IFLUSH(vp, flags, error);
342 if (error == EAGAIN) {
343 if (sync)
344 VOP_IFLUSH(vp, flags | FLUSH_LOG, error);
345 else
346 error = 0;
347 }
348 }
349
350 return -error;
351}
352
353STATIC void
354linvfs_clear_inode(
355 struct inode *inode)
356{
357 vnode_t *vp = LINVFS_GET_VP(inode);
358
359 if (vp) {
360 vn_rele(vp);
361 vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
362 /*
363 * Do all our cleanup, and remove this vnode.
364 */
365 vn_remove(vp);
366 }
367}
368
369
370/*
371 * Enqueue a work item to be picked up by the vfs xfssyncd thread.
372 * Doing this has two advantages:
373 * - It saves on stack space, which is tight in certain situations
374 * - It can be used (with care) as a mechanism to avoid deadlocks.
375 * Flushing while allocating in a full filesystem requires both.
376 */
377STATIC void
378xfs_syncd_queue_work(
379 struct vfs *vfs,
380 void *data,
381 void (*syncer)(vfs_t *, void *))
382{
383 vfs_sync_work_t *work;
384
385 work = kmem_alloc(sizeof(struct vfs_sync_work), KM_SLEEP);
386 INIT_LIST_HEAD(&work->w_list);
387 work->w_syncer = syncer;
388 work->w_data = data;
389 work->w_vfs = vfs;
390 spin_lock(&vfs->vfs_sync_lock);
391 list_add_tail(&work->w_list, &vfs->vfs_sync_list);
392 spin_unlock(&vfs->vfs_sync_lock);
393 wake_up_process(vfs->vfs_sync_task);
394}
395
396/*
397 * Flush delayed allocate data, attempting to free up reserved space
398 * from existing allocations. At this point a new allocation attempt
399 * has failed with ENOSPC and we are in the process of scratching our
400 * heads, looking about for more room...
401 */
402STATIC void
403xfs_flush_inode_work(
404 vfs_t *vfs,
405 void *inode)
406{
407 filemap_flush(((struct inode *)inode)->i_mapping);
408 iput((struct inode *)inode);
409}
410
411void
412xfs_flush_inode(
413 xfs_inode_t *ip)
414{
415 struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip));
416 struct vfs *vfs = XFS_MTOVFS(ip->i_mount);
417
418 igrab(inode);
419 xfs_syncd_queue_work(vfs, inode, xfs_flush_inode_work);
420 delay(HZ/2);
421}
422
423/*
424 * This is the "bigger hammer" version of xfs_flush_inode_work...
425 * (IOW, "If at first you don't succeed, use a Bigger Hammer").
426 */
427STATIC void
428xfs_flush_device_work(
429 vfs_t *vfs,
430 void *inode)
431{
432 sync_blockdev(vfs->vfs_super->s_bdev);
433 iput((struct inode *)inode);
434}
435
436void
437xfs_flush_device(
438 xfs_inode_t *ip)
439{
440 struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip));
441 struct vfs *vfs = XFS_MTOVFS(ip->i_mount);
442
443 igrab(inode);
444 xfs_syncd_queue_work(vfs, inode, xfs_flush_device_work);
445 delay(HZ/2);
446 xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
447}
448
449#define SYNCD_FLAGS (SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR)
450STATIC void
451vfs_sync_worker(
452 vfs_t *vfsp,
453 void *unused)
454{
455 int error;
456
457 if (!(vfsp->vfs_flag & VFS_RDONLY))
458 VFS_SYNC(vfsp, SYNCD_FLAGS, NULL, error);
459 vfsp->vfs_sync_seq++;
460 wmb();
461 wake_up(&vfsp->vfs_wait_single_sync_task);
462}
463
464STATIC int
465xfssyncd(
466 void *arg)
467{
468 long timeleft;
469 vfs_t *vfsp = (vfs_t *) arg;
470 struct list_head tmp;
471 struct vfs_sync_work *work, *n;
472
473 daemonize("xfssyncd");
474
475 vfsp->vfs_sync_work.w_vfs = vfsp;
476 vfsp->vfs_sync_work.w_syncer = vfs_sync_worker;
477 vfsp->vfs_sync_task = current;
478 wmb();
479 wake_up(&vfsp->vfs_wait_sync_task);
480
481 INIT_LIST_HEAD(&tmp);
482 timeleft = (xfs_syncd_centisecs * HZ) / 100;
483 for (;;) {
484 set_current_state(TASK_INTERRUPTIBLE);
485 timeleft = schedule_timeout(timeleft);
486 /* swsusp */
487 try_to_freeze(PF_FREEZE);
488 if (vfsp->vfs_flag & VFS_UMOUNT)
489 break;
490
491 spin_lock(&vfsp->vfs_sync_lock);
492 /*
493 * We can get woken by laptop mode, to do a sync -
494 * that's the (only!) case where the list would be
495 * empty with time remaining.
496 */
497 if (!timeleft || list_empty(&vfsp->vfs_sync_list)) {
498 if (!timeleft)
499 timeleft = (xfs_syncd_centisecs * HZ) / 100;
500 INIT_LIST_HEAD(&vfsp->vfs_sync_work.w_list);
501 list_add_tail(&vfsp->vfs_sync_work.w_list,
502 &vfsp->vfs_sync_list);
503 }
504 list_for_each_entry_safe(work, n, &vfsp->vfs_sync_list, w_list)
505 list_move(&work->w_list, &tmp);
506 spin_unlock(&vfsp->vfs_sync_lock);
507
508 list_for_each_entry_safe(work, n, &tmp, w_list) {
509 (*work->w_syncer)(vfsp, work->w_data);
510 list_del(&work->w_list);
511 if (work == &vfsp->vfs_sync_work)
512 continue;
513 kmem_free(work, sizeof(struct vfs_sync_work));
514 }
515 }
516
517 vfsp->vfs_sync_task = NULL;
518 wmb();
519 wake_up(&vfsp->vfs_wait_sync_task);
520
521 return 0;
522}
523
524STATIC int
525linvfs_start_syncd(
526 vfs_t *vfsp)
527{
528 int pid;
529
530 pid = kernel_thread(xfssyncd, (void *) vfsp,
531 CLONE_VM | CLONE_FS | CLONE_FILES);
532 if (pid < 0)
533 return -pid;
534 wait_event(vfsp->vfs_wait_sync_task, vfsp->vfs_sync_task);
535 return 0;
536}
537
538STATIC void
539linvfs_stop_syncd(
540 vfs_t *vfsp)
541{
542 vfsp->vfs_flag |= VFS_UMOUNT;
543 wmb();
544
545 wake_up_process(vfsp->vfs_sync_task);
546 wait_event(vfsp->vfs_wait_sync_task, !vfsp->vfs_sync_task);
547}
548
549STATIC void
550linvfs_put_super(
551 struct super_block *sb)
552{
553 vfs_t *vfsp = LINVFS_GET_VFS(sb);
554 int error;
555
556 linvfs_stop_syncd(vfsp);
557 VFS_SYNC(vfsp, SYNC_ATTR|SYNC_DELWRI, NULL, error);
558 if (!error)
559 VFS_UNMOUNT(vfsp, 0, NULL, error);
560 if (error) {
561 printk("XFS unmount got error %d\n", error);
562 printk("%s: vfsp/0x%p left dangling!\n", __FUNCTION__, vfsp);
563 return;
564 }
565
566 vfs_deallocate(vfsp);
567}
568
569STATIC void
570linvfs_write_super(
571 struct super_block *sb)
572{
573 vfs_t *vfsp = LINVFS_GET_VFS(sb);
574 int error;
575
576 if (sb->s_flags & MS_RDONLY) {
577 sb->s_dirt = 0; /* paranoia */
578 return;
579 }
580 /* Push the log and superblock a little */
581 VFS_SYNC(vfsp, SYNC_FSDATA, NULL, error);
582 sb->s_dirt = 0;
583}
584
585STATIC int
586linvfs_sync_super(
587 struct super_block *sb,
588 int wait)
589{
590 vfs_t *vfsp = LINVFS_GET_VFS(sb);
591 int error;
592 int flags = SYNC_FSDATA;
593
594 if (wait)
595 flags |= SYNC_WAIT;
596
597 VFS_SYNC(vfsp, flags, NULL, error);
598 sb->s_dirt = 0;
599
600 if (unlikely(laptop_mode)) {
601 int prev_sync_seq = vfsp->vfs_sync_seq;
602
603 /*
604 * The disk must be active because we're syncing.
605 * We schedule xfssyncd now (now that the disk is
606 * active) instead of later (when it might not be).
607 */
608 wake_up_process(vfsp->vfs_sync_task);
609 /*
610 * We have to wait for the sync iteration to complete.
611 * If we don't, the disk activity caused by the sync
612 * will come after the sync is completed, and that
613 * triggers another sync from laptop mode.
614 */
615 wait_event(vfsp->vfs_wait_single_sync_task,
616 vfsp->vfs_sync_seq != prev_sync_seq);
617 }
618
619 return -error;
620}
621
622STATIC int
623linvfs_statfs(
624 struct super_block *sb,
625 struct kstatfs *statp)
626{
627 vfs_t *vfsp = LINVFS_GET_VFS(sb);
628 int error;
629
630 VFS_STATVFS(vfsp, statp, NULL, error);
631 return -error;
632}
633
634STATIC int
635linvfs_remount(
636 struct super_block *sb,
637 int *flags,
638 char *options)
639{
640 vfs_t *vfsp = LINVFS_GET_VFS(sb);
641 struct xfs_mount_args *args = xfs_args_allocate(sb);
642 int error;
643
644 VFS_PARSEARGS(vfsp, options, args, 1, error);
645 if (!error)
646 VFS_MNTUPDATE(vfsp, flags, args, error);
647 kmem_free(args, sizeof(*args));
648 return -error;
649}
650
651STATIC void
652linvfs_freeze_fs(
653 struct super_block *sb)
654{
655 VFS_FREEZE(LINVFS_GET_VFS(sb));
656}
657
658STATIC int
659linvfs_show_options(
660 struct seq_file *m,
661 struct vfsmount *mnt)
662{
663 struct vfs *vfsp = LINVFS_GET_VFS(mnt->mnt_sb);
664 int error;
665
666 VFS_SHOWARGS(vfsp, m, error);
667 return error;
668}
669
670STATIC int
671linvfs_getxstate(
672 struct super_block *sb,
673 struct fs_quota_stat *fqs)
674{
675 struct vfs *vfsp = LINVFS_GET_VFS(sb);
676 int error;
677
678 VFS_QUOTACTL(vfsp, Q_XGETQSTAT, 0, (caddr_t)fqs, error);
679 return -error;
680}
681
682STATIC int
683linvfs_setxstate(
684 struct super_block *sb,
685 unsigned int flags,
686 int op)
687{
688 struct vfs *vfsp = LINVFS_GET_VFS(sb);
689 int error;
690
691 VFS_QUOTACTL(vfsp, op, 0, (caddr_t)&flags, error);
692 return -error;
693}
694
695STATIC int
696linvfs_getxquota(
697 struct super_block *sb,
698 int type,
699 qid_t id,
700 struct fs_disk_quota *fdq)
701{
702 struct vfs *vfsp = LINVFS_GET_VFS(sb);
703 int error, getmode;
704
705 getmode = (type == GRPQUOTA) ? Q_XGETGQUOTA : Q_XGETQUOTA;
706 VFS_QUOTACTL(vfsp, getmode, id, (caddr_t)fdq, error);
707 return -error;
708}
709
710STATIC int
711linvfs_setxquota(
712 struct super_block *sb,
713 int type,
714 qid_t id,
715 struct fs_disk_quota *fdq)
716{
717 struct vfs *vfsp = LINVFS_GET_VFS(sb);
718 int error, setmode;
719
720 setmode = (type == GRPQUOTA) ? Q_XSETGQLIM : Q_XSETQLIM;
721 VFS_QUOTACTL(vfsp, setmode, id, (caddr_t)fdq, error);
722 return -error;
723}
724
725STATIC int
726linvfs_fill_super(
727 struct super_block *sb,
728 void *data,
729 int silent)
730{
731 vnode_t *rootvp;
732 struct vfs *vfsp = vfs_allocate();
733 struct xfs_mount_args *args = xfs_args_allocate(sb);
734 struct kstatfs statvfs;
735 int error, error2;
736
737 vfsp->vfs_super = sb;
738 LINVFS_SET_VFS(sb, vfsp);
739 if (sb->s_flags & MS_RDONLY)
740 vfsp->vfs_flag |= VFS_RDONLY;
741 bhv_insert_all_vfsops(vfsp);
742
743 VFS_PARSEARGS(vfsp, (char *)data, args, 0, error);
744 if (error) {
745 bhv_remove_all_vfsops(vfsp, 1);
746 goto fail_vfsop;
747 }
748
749 sb_min_blocksize(sb, BBSIZE);
750#ifdef CONFIG_XFS_EXPORT
751 sb->s_export_op = &linvfs_export_ops;
752#endif
753 sb->s_qcop = &linvfs_qops;
754 sb->s_op = &linvfs_sops;
755
756 VFS_MOUNT(vfsp, args, NULL, error);
757 if (error) {
758 bhv_remove_all_vfsops(vfsp, 1);
759 goto fail_vfsop;
760 }
761
762 VFS_STATVFS(vfsp, &statvfs, NULL, error);
763 if (error)
764 goto fail_unmount;
765
766 sb->s_dirt = 1;
767 sb->s_magic = statvfs.f_type;
768 sb->s_blocksize = statvfs.f_bsize;
769 sb->s_blocksize_bits = ffs(statvfs.f_bsize) - 1;
770 sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
771 sb->s_time_gran = 1;
772 set_posix_acl_flag(sb);
773
774 VFS_ROOT(vfsp, &rootvp, error);
775 if (error)
776 goto fail_unmount;
777
778 sb->s_root = d_alloc_root(LINVFS_GET_IP(rootvp));
779 if (!sb->s_root) {
780 error = ENOMEM;
781 goto fail_vnrele;
782 }
783 if (is_bad_inode(sb->s_root->d_inode)) {
784 error = EINVAL;
785 goto fail_vnrele;
786 }
787 if ((error = linvfs_start_syncd(vfsp)))
788 goto fail_vnrele;
789 vn_trace_exit(rootvp, __FUNCTION__, (inst_t *)__return_address);
790
791 kmem_free(args, sizeof(*args));
792 return 0;
793
794fail_vnrele:
795 if (sb->s_root) {
796 dput(sb->s_root);
797 sb->s_root = NULL;
798 } else {
799 VN_RELE(rootvp);
800 }
801
802fail_unmount:
803 VFS_UNMOUNT(vfsp, 0, NULL, error2);
804
805fail_vfsop:
806 vfs_deallocate(vfsp);
807 kmem_free(args, sizeof(*args));
808 return -error;
809}
810
811STATIC struct super_block *
812linvfs_get_sb(
813 struct file_system_type *fs_type,
814 int flags,
815 const char *dev_name,
816 void *data)
817{
818 return get_sb_bdev(fs_type, flags, dev_name, data, linvfs_fill_super);
819}
820
821STATIC struct super_operations linvfs_sops = {
822 .alloc_inode = linvfs_alloc_inode,
823 .destroy_inode = linvfs_destroy_inode,
824 .write_inode = linvfs_write_inode,
825 .clear_inode = linvfs_clear_inode,
826 .put_super = linvfs_put_super,
827 .write_super = linvfs_write_super,
828 .sync_fs = linvfs_sync_super,
829 .write_super_lockfs = linvfs_freeze_fs,
830 .statfs = linvfs_statfs,
831 .remount_fs = linvfs_remount,
832 .show_options = linvfs_show_options,
833};
834
835STATIC struct quotactl_ops linvfs_qops = {
836 .get_xstate = linvfs_getxstate,
837 .set_xstate = linvfs_setxstate,
838 .get_xquota = linvfs_getxquota,
839 .set_xquota = linvfs_setxquota,
840};
841
842STATIC struct file_system_type xfs_fs_type = {
843 .owner = THIS_MODULE,
844 .name = "xfs",
845 .get_sb = linvfs_get_sb,
846 .kill_sb = kill_block_super,
847 .fs_flags = FS_REQUIRES_DEV,
848};
849
850
851STATIC int __init
852init_xfs_fs( void )
853{
854 int error;
855 struct sysinfo si;
856 static char message[] __initdata = KERN_INFO \
857 XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled\n";
858
859 printk(message);
860
861 si_meminfo(&si);
862 xfs_physmem = si.totalram;
863
864 ktrace_init(64);
865
866 error = init_inodecache();
867 if (error < 0)
868 goto undo_inodecache;
869
870 error = pagebuf_init();
871 if (error < 0)
872 goto undo_pagebuf;
873
874 vn_init();
875 xfs_init();
876 uuid_init();
877 vfs_initquota();
878
879 error = register_filesystem(&xfs_fs_type);
880 if (error)
881 goto undo_register;
882 XFS_DM_INIT(&xfs_fs_type);
883 return 0;
884
885undo_register:
886 pagebuf_terminate();
887
888undo_pagebuf:
889 destroy_inodecache();
890
891undo_inodecache:
892 return error;
893}
894
895STATIC void __exit
896exit_xfs_fs( void )
897{
898 vfs_exitquota();
899 XFS_DM_EXIT(&xfs_fs_type);
900 unregister_filesystem(&xfs_fs_type);
901 xfs_cleanup();
902 pagebuf_terminate();
903 destroy_inodecache();
904 ktrace_uninit();
905}
906
907module_init(init_xfs_fs);
908module_exit(exit_xfs_fs);
909
910MODULE_AUTHOR("Silicon Graphics, Inc.");
911MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
912MODULE_LICENSE("GPL");