diff options
author | Steve French <sfrench@us.ibm.com> | 2008-02-15 16:06:08 -0500 |
---|---|---|
committer | Steve French <sfrench@us.ibm.com> | 2008-02-15 16:06:08 -0500 |
commit | 0a3abcf75bf391fec4e32356ab5ddb8f5d2e6b41 (patch) | |
tree | b80b1d344ec24cad28b057ef803cebac9434be01 /fs | |
parent | 70eff55d2d979cca700aa6906494f0c474f3f7ff (diff) | |
parent | 101142c37be8e5af9b847860219217e6b958c739 (diff) |
Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'fs')
389 files changed, 10005 insertions, 8244 deletions
diff --git a/fs/9p/fid.c b/fs/9p/fid.c index b364da70ff28..dfebdbe7440e 100644 --- a/fs/9p/fid.c +++ b/fs/9p/fid.c | |||
@@ -175,7 +175,7 @@ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry) | |||
175 | if (!wnames) | 175 | if (!wnames) |
176 | return ERR_PTR(-ENOMEM); | 176 | return ERR_PTR(-ENOMEM); |
177 | 177 | ||
178 | for (d = dentry, i = n; i >= 0; i--, d = d->d_parent) | 178 | for (d = dentry, i = (n-1); i >= 0; i--, d = d->d_parent) |
179 | wnames[i] = (char *) d->d_name.name; | 179 | wnames[i] = (char *) d->d_name.name; |
180 | 180 | ||
181 | clone = 1; | 181 | clone = 1; |
@@ -183,7 +183,7 @@ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry) | |||
183 | while (i < n) { | 183 | while (i < n) { |
184 | l = min(n - i, P9_MAXWELEM); | 184 | l = min(n - i, P9_MAXWELEM); |
185 | fid = p9_client_walk(fid, l, &wnames[i], clone); | 185 | fid = p9_client_walk(fid, l, &wnames[i], clone); |
186 | if (!fid) { | 186 | if (IS_ERR(fid)) { |
187 | kfree(wnames); | 187 | kfree(wnames); |
188 | return fid; | 188 | return fid; |
189 | } | 189 | } |
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c index fbb12dadba83..9b0f0222e8bb 100644 --- a/fs/9p/v9fs.c +++ b/fs/9p/v9fs.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * This file contains functions assisting in mapping VFS to 9P2000 | 4 | * This file contains functions assisting in mapping VFS to 9P2000 |
5 | * | 5 | * |
6 | * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com> | 6 | * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com> |
7 | * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov> | 7 | * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov> |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
@@ -31,7 +31,6 @@ | |||
31 | #include <linux/idr.h> | 31 | #include <linux/idr.h> |
32 | #include <net/9p/9p.h> | 32 | #include <net/9p/9p.h> |
33 | #include <net/9p/transport.h> | 33 | #include <net/9p/transport.h> |
34 | #include <net/9p/conn.h> | ||
35 | #include <net/9p/client.h> | 34 | #include <net/9p/client.h> |
36 | #include "v9fs.h" | 35 | #include "v9fs.h" |
37 | #include "v9fs_vfs.h" | 36 | #include "v9fs_vfs.h" |
@@ -43,11 +42,11 @@ | |||
43 | 42 | ||
44 | enum { | 43 | enum { |
45 | /* Options that take integer arguments */ | 44 | /* Options that take integer arguments */ |
46 | Opt_debug, Opt_msize, Opt_dfltuid, Opt_dfltgid, Opt_afid, | 45 | Opt_debug, Opt_dfltuid, Opt_dfltgid, Opt_afid, |
47 | /* String options */ | 46 | /* String options */ |
48 | Opt_uname, Opt_remotename, Opt_trans, | 47 | Opt_uname, Opt_remotename, Opt_trans, |
49 | /* Options that take no arguments */ | 48 | /* Options that take no arguments */ |
50 | Opt_legacy, Opt_nodevmap, | 49 | Opt_nodevmap, |
51 | /* Cache options */ | 50 | /* Cache options */ |
52 | Opt_cache_loose, | 51 | Opt_cache_loose, |
53 | /* Access options */ | 52 | /* Access options */ |
@@ -58,14 +57,11 @@ enum { | |||
58 | 57 | ||
59 | static match_table_t tokens = { | 58 | static match_table_t tokens = { |
60 | {Opt_debug, "debug=%x"}, | 59 | {Opt_debug, "debug=%x"}, |
61 | {Opt_msize, "msize=%u"}, | ||
62 | {Opt_dfltuid, "dfltuid=%u"}, | 60 | {Opt_dfltuid, "dfltuid=%u"}, |
63 | {Opt_dfltgid, "dfltgid=%u"}, | 61 | {Opt_dfltgid, "dfltgid=%u"}, |
64 | {Opt_afid, "afid=%u"}, | 62 | {Opt_afid, "afid=%u"}, |
65 | {Opt_uname, "uname=%s"}, | 63 | {Opt_uname, "uname=%s"}, |
66 | {Opt_remotename, "aname=%s"}, | 64 | {Opt_remotename, "aname=%s"}, |
67 | {Opt_trans, "trans=%s"}, | ||
68 | {Opt_legacy, "noextend"}, | ||
69 | {Opt_nodevmap, "nodevmap"}, | 65 | {Opt_nodevmap, "nodevmap"}, |
70 | {Opt_cache_loose, "cache=loose"}, | 66 | {Opt_cache_loose, "cache=loose"}, |
71 | {Opt_cache_loose, "loose"}, | 67 | {Opt_cache_loose, "loose"}, |
@@ -85,16 +81,14 @@ static void v9fs_parse_options(struct v9fs_session_info *v9ses) | |||
85 | char *options; | 81 | char *options; |
86 | substring_t args[MAX_OPT_ARGS]; | 82 | substring_t args[MAX_OPT_ARGS]; |
87 | char *p; | 83 | char *p; |
88 | int option; | 84 | int option = 0; |
89 | int ret; | ||
90 | char *s, *e; | 85 | char *s, *e; |
86 | int ret; | ||
91 | 87 | ||
92 | /* setup defaults */ | 88 | /* setup defaults */ |
93 | v9ses->maxdata = 8192; | ||
94 | v9ses->afid = ~0; | 89 | v9ses->afid = ~0; |
95 | v9ses->debug = 0; | 90 | v9ses->debug = 0; |
96 | v9ses->cache = 0; | 91 | v9ses->cache = 0; |
97 | v9ses->trans = v9fs_default_trans(); | ||
98 | 92 | ||
99 | if (!v9ses->options) | 93 | if (!v9ses->options) |
100 | return; | 94 | return; |
@@ -106,7 +100,8 @@ static void v9fs_parse_options(struct v9fs_session_info *v9ses) | |||
106 | continue; | 100 | continue; |
107 | token = match_token(p, tokens, args); | 101 | token = match_token(p, tokens, args); |
108 | if (token < Opt_uname) { | 102 | if (token < Opt_uname) { |
109 | if ((ret = match_int(&args[0], &option)) < 0) { | 103 | ret = match_int(&args[0], &option); |
104 | if (ret < 0) { | ||
110 | P9_DPRINTK(P9_DEBUG_ERROR, | 105 | P9_DPRINTK(P9_DEBUG_ERROR, |
111 | "integer field, but no integer?\n"); | 106 | "integer field, but no integer?\n"); |
112 | continue; | 107 | continue; |
@@ -119,9 +114,7 @@ static void v9fs_parse_options(struct v9fs_session_info *v9ses) | |||
119 | p9_debug_level = option; | 114 | p9_debug_level = option; |
120 | #endif | 115 | #endif |
121 | break; | 116 | break; |
122 | case Opt_msize: | 117 | |
123 | v9ses->maxdata = option; | ||
124 | break; | ||
125 | case Opt_dfltuid: | 118 | case Opt_dfltuid: |
126 | v9ses->dfltuid = option; | 119 | v9ses->dfltuid = option; |
127 | break; | 120 | break; |
@@ -131,18 +124,12 @@ static void v9fs_parse_options(struct v9fs_session_info *v9ses) | |||
131 | case Opt_afid: | 124 | case Opt_afid: |
132 | v9ses->afid = option; | 125 | v9ses->afid = option; |
133 | break; | 126 | break; |
134 | case Opt_trans: | ||
135 | v9ses->trans = v9fs_match_trans(&args[0]); | ||
136 | break; | ||
137 | case Opt_uname: | 127 | case Opt_uname: |
138 | match_strcpy(v9ses->uname, &args[0]); | 128 | match_strcpy(v9ses->uname, &args[0]); |
139 | break; | 129 | break; |
140 | case Opt_remotename: | 130 | case Opt_remotename: |
141 | match_strcpy(v9ses->aname, &args[0]); | 131 | match_strcpy(v9ses->aname, &args[0]); |
142 | break; | 132 | break; |
143 | case Opt_legacy: | ||
144 | v9ses->flags &= ~V9FS_EXTENDED; | ||
145 | break; | ||
146 | case Opt_nodevmap: | 133 | case Opt_nodevmap: |
147 | v9ses->nodev = 1; | 134 | v9ses->nodev = 1; |
148 | break; | 135 | break; |
@@ -185,7 +172,6 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses, | |||
185 | const char *dev_name, char *data) | 172 | const char *dev_name, char *data) |
186 | { | 173 | { |
187 | int retval = -EINVAL; | 174 | int retval = -EINVAL; |
188 | struct p9_trans *trans = NULL; | ||
189 | struct p9_fid *fid; | 175 | struct p9_fid *fid; |
190 | 176 | ||
191 | v9ses->uname = __getname(); | 177 | v9ses->uname = __getname(); |
@@ -207,24 +193,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses, | |||
207 | v9ses->options = kstrdup(data, GFP_KERNEL); | 193 | v9ses->options = kstrdup(data, GFP_KERNEL); |
208 | v9fs_parse_options(v9ses); | 194 | v9fs_parse_options(v9ses); |
209 | 195 | ||
210 | if (v9ses->trans == NULL) { | 196 | v9ses->clnt = p9_client_create(dev_name, v9ses->options); |
211 | retval = -EPROTONOSUPPORT; | ||
212 | P9_DPRINTK(P9_DEBUG_ERROR, | ||
213 | "No transport defined or default transport\n"); | ||
214 | goto error; | ||
215 | } | ||
216 | |||
217 | trans = v9ses->trans->create(dev_name, v9ses->options); | ||
218 | if (IS_ERR(trans)) { | ||
219 | retval = PTR_ERR(trans); | ||
220 | trans = NULL; | ||
221 | goto error; | ||
222 | } | ||
223 | if ((v9ses->maxdata+P9_IOHDRSZ) > v9ses->trans->maxsize) | ||
224 | v9ses->maxdata = v9ses->trans->maxsize-P9_IOHDRSZ; | ||
225 | |||
226 | v9ses->clnt = p9_client_create(trans, v9ses->maxdata+P9_IOHDRSZ, | ||
227 | v9fs_extended(v9ses)); | ||
228 | 197 | ||
229 | if (IS_ERR(v9ses->clnt)) { | 198 | if (IS_ERR(v9ses->clnt)) { |
230 | retval = PTR_ERR(v9ses->clnt); | 199 | retval = PTR_ERR(v9ses->clnt); |
@@ -236,6 +205,8 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses, | |||
236 | if (!v9ses->clnt->dotu) | 205 | if (!v9ses->clnt->dotu) |
237 | v9ses->flags &= ~V9FS_EXTENDED; | 206 | v9ses->flags &= ~V9FS_EXTENDED; |
238 | 207 | ||
208 | v9ses->maxdata = v9ses->clnt->msize; | ||
209 | |||
239 | /* for legacy mode, fall back to V9FS_ACCESS_ANY */ | 210 | /* for legacy mode, fall back to V9FS_ACCESS_ANY */ |
240 | if (!v9fs_extended(v9ses) && | 211 | if (!v9fs_extended(v9ses) && |
241 | ((v9ses->flags&V9FS_ACCESS_MASK) == V9FS_ACCESS_USER)) { | 212 | ((v9ses->flags&V9FS_ACCESS_MASK) == V9FS_ACCESS_USER)) { |
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h index db4b4193f2e2..7d3a1018db52 100644 --- a/fs/9p/v9fs.h +++ b/fs/9p/v9fs.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * V9FS definitions. | 2 | * V9FS definitions. |
3 | * | 3 | * |
4 | * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com> | 4 | * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com> |
5 | * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov> | 5 | * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
@@ -28,7 +28,6 @@ | |||
28 | 28 | ||
29 | struct v9fs_session_info { | 29 | struct v9fs_session_info { |
30 | /* options */ | 30 | /* options */ |
31 | unsigned int maxdata; | ||
32 | unsigned char flags; /* session flags */ | 31 | unsigned char flags; /* session flags */ |
33 | unsigned char nodev; /* set to 1 if no disable device mapping */ | 32 | unsigned char nodev; /* set to 1 if no disable device mapping */ |
34 | unsigned short debug; /* debug level */ | 33 | unsigned short debug; /* debug level */ |
@@ -38,10 +37,10 @@ struct v9fs_session_info { | |||
38 | char *options; /* copy of mount options */ | 37 | char *options; /* copy of mount options */ |
39 | char *uname; /* user name to mount as */ | 38 | char *uname; /* user name to mount as */ |
40 | char *aname; /* name of remote hierarchy being mounted */ | 39 | char *aname; /* name of remote hierarchy being mounted */ |
40 | unsigned int maxdata; /* max data for client interface */ | ||
41 | unsigned int dfltuid; /* default uid/muid for legacy support */ | 41 | unsigned int dfltuid; /* default uid/muid for legacy support */ |
42 | unsigned int dfltgid; /* default gid for legacy support */ | 42 | unsigned int dfltgid; /* default gid for legacy support */ |
43 | u32 uid; /* if ACCESS_SINGLE, the uid that has access */ | 43 | u32 uid; /* if ACCESS_SINGLE, the uid that has access */ |
44 | struct p9_trans_module *trans; /* 9p transport */ | ||
45 | struct p9_client *clnt; /* 9p client */ | 44 | struct p9_client *clnt; /* 9p client */ |
46 | struct dentry *debugfs_dir; | 45 | struct dentry *debugfs_dir; |
47 | }; | 46 | }; |
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index ba4b1caa9c43..a616fff8906d 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c | |||
@@ -184,7 +184,7 @@ static const struct file_operations v9fs_cached_file_operations = { | |||
184 | .open = v9fs_file_open, | 184 | .open = v9fs_file_open, |
185 | .release = v9fs_dir_release, | 185 | .release = v9fs_dir_release, |
186 | .lock = v9fs_file_lock, | 186 | .lock = v9fs_file_lock, |
187 | .mmap = generic_file_mmap, | 187 | .mmap = generic_file_readonly_mmap, |
188 | }; | 188 | }; |
189 | 189 | ||
190 | const struct file_operations v9fs_file_operations = { | 190 | const struct file_operations v9fs_file_operations = { |
@@ -194,5 +194,5 @@ const struct file_operations v9fs_file_operations = { | |||
194 | .open = v9fs_file_open, | 194 | .open = v9fs_file_open, |
195 | .release = v9fs_dir_release, | 195 | .release = v9fs_dir_release, |
196 | .lock = v9fs_file_lock, | 196 | .lock = v9fs_file_lock, |
197 | .mmap = generic_file_mmap, | 197 | .mmap = generic_file_readonly_mmap, |
198 | }; | 198 | }; |
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 23581bcb599b..6a28842052ea 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c | |||
@@ -77,6 +77,8 @@ static int unixmode2p9mode(struct v9fs_session_info *v9ses, int mode) | |||
77 | res |= P9_DMSETUID; | 77 | res |= P9_DMSETUID; |
78 | if ((mode & S_ISGID) == S_ISGID) | 78 | if ((mode & S_ISGID) == S_ISGID) |
79 | res |= P9_DMSETGID; | 79 | res |= P9_DMSETGID; |
80 | if ((mode & S_ISVTX) == S_ISVTX) | ||
81 | res |= P9_DMSETVTX; | ||
80 | if ((mode & P9_DMLINK)) | 82 | if ((mode & P9_DMLINK)) |
81 | res |= P9_DMLINK; | 83 | res |= P9_DMLINK; |
82 | } | 84 | } |
@@ -119,6 +121,9 @@ static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode) | |||
119 | 121 | ||
120 | if ((mode & P9_DMSETGID) == P9_DMSETGID) | 122 | if ((mode & P9_DMSETGID) == P9_DMSETGID) |
121 | res |= S_ISGID; | 123 | res |= S_ISGID; |
124 | |||
125 | if ((mode & P9_DMSETVTX) == P9_DMSETVTX) | ||
126 | res |= S_ISVTX; | ||
122 | } | 127 | } |
123 | 128 | ||
124 | return res; | 129 | return res; |
@@ -568,7 +573,7 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, | |||
568 | v9ses = v9fs_inode2v9ses(dir); | 573 | v9ses = v9fs_inode2v9ses(dir); |
569 | dfid = v9fs_fid_lookup(dentry->d_parent); | 574 | dfid = v9fs_fid_lookup(dentry->d_parent); |
570 | if (IS_ERR(dfid)) | 575 | if (IS_ERR(dfid)) |
571 | return ERR_PTR(PTR_ERR(dfid)); | 576 | return ERR_CAST(dfid); |
572 | 577 | ||
573 | name = (char *) dentry->d_name.name; | 578 | name = (char *) dentry->d_name.name; |
574 | fid = p9_client_walk(dfid, 1, &name, 1); | 579 | fid = p9_client_walk(dfid, 1, &name, 1); |
diff --git a/fs/Kconfig b/fs/Kconfig index ea5b35947623..d7312825592b 100644 --- a/fs/Kconfig +++ b/fs/Kconfig | |||
@@ -463,40 +463,18 @@ config OCFS2_DEBUG_FS | |||
463 | this option for debugging only as it is likely to decrease | 463 | this option for debugging only as it is likely to decrease |
464 | performance of the filesystem. | 464 | performance of the filesystem. |
465 | 465 | ||
466 | config MINIX_FS | 466 | endif # BLOCK |
467 | tristate "Minix fs support" | ||
468 | help | ||
469 | Minix is a simple operating system used in many classes about OS's. | ||
470 | The minix file system (method to organize files on a hard disk | ||
471 | partition or a floppy disk) was the original file system for Linux, | ||
472 | but has been superseded by the second extended file system ext2fs. | ||
473 | You don't want to use the minix file system on your hard disk | ||
474 | because of certain built-in restrictions, but it is sometimes found | ||
475 | on older Linux floppy disks. This option will enlarge your kernel | ||
476 | by about 28 KB. If unsure, say N. | ||
477 | |||
478 | To compile this file system support as a module, choose M here: the | ||
479 | module will be called minix. Note that the file system of your root | ||
480 | partition (the one containing the directory /) cannot be compiled as | ||
481 | a module. | ||
482 | |||
483 | config ROMFS_FS | ||
484 | tristate "ROM file system support" | ||
485 | ---help--- | ||
486 | This is a very small read-only file system mainly intended for | ||
487 | initial ram disks of installation disks, but it could be used for | ||
488 | other read-only media as well. Read | ||
489 | <file:Documentation/filesystems/romfs.txt> for details. | ||
490 | 467 | ||
491 | To compile this file system support as a module, choose M here: the | 468 | config DNOTIFY |
492 | module will be called romfs. Note that the file system of your | 469 | bool "Dnotify support" |
493 | root partition (the one containing the directory /) cannot be a | 470 | default y |
494 | module. | 471 | help |
495 | 472 | Dnotify is a directory-based per-fd file change notification system | |
496 | If you don't know whether you need it, then you don't need it: | 473 | that uses signals to communicate events to user-space. There exist |
497 | answer N. | 474 | superior alternatives, but some applications may still rely on |
475 | dnotify. | ||
498 | 476 | ||
499 | endif | 477 | If unsure, say Y. |
500 | 478 | ||
501 | config INOTIFY | 479 | config INOTIFY |
502 | bool "Inotify file change notification support" | 480 | bool "Inotify file change notification support" |
@@ -577,17 +555,6 @@ config QUOTACTL | |||
577 | depends on XFS_QUOTA || QUOTA | 555 | depends on XFS_QUOTA || QUOTA |
578 | default y | 556 | default y |
579 | 557 | ||
580 | config DNOTIFY | ||
581 | bool "Dnotify support" | ||
582 | default y | ||
583 | help | ||
584 | Dnotify is a directory-based per-fd file change notification system | ||
585 | that uses signals to communicate events to user-space. There exist | ||
586 | superior alternatives, but some applications may still rely on | ||
587 | dnotify. | ||
588 | |||
589 | If unsure, say Y. | ||
590 | |||
591 | config AUTOFS_FS | 558 | config AUTOFS_FS |
592 | tristate "Kernel automounter support" | 559 | tristate "Kernel automounter support" |
593 | help | 560 | help |
@@ -713,7 +680,7 @@ config UDF_NLS | |||
713 | depends on (UDF_FS=m && NLS) || (UDF_FS=y && NLS=y) | 680 | depends on (UDF_FS=m && NLS) || (UDF_FS=y && NLS=y) |
714 | 681 | ||
715 | endmenu | 682 | endmenu |
716 | endif | 683 | endif # BLOCK |
717 | 684 | ||
718 | if BLOCK | 685 | if BLOCK |
719 | menu "DOS/FAT/NT Filesystems" | 686 | menu "DOS/FAT/NT Filesystems" |
@@ -896,7 +863,7 @@ config NTFS_RW | |||
896 | It is perfectly safe to say N here. | 863 | It is perfectly safe to say N here. |
897 | 864 | ||
898 | endmenu | 865 | endmenu |
899 | endif | 866 | endif # BLOCK |
900 | 867 | ||
901 | menu "Pseudo filesystems" | 868 | menu "Pseudo filesystems" |
902 | 869 | ||
@@ -1417,6 +1384,24 @@ config VXFS_FS | |||
1417 | To compile this as a module, choose M here: the module will be | 1384 | To compile this as a module, choose M here: the module will be |
1418 | called freevxfs. If unsure, say N. | 1385 | called freevxfs. If unsure, say N. |
1419 | 1386 | ||
1387 | config MINIX_FS | ||
1388 | tristate "Minix file system support" | ||
1389 | depends on BLOCK | ||
1390 | help | ||
1391 | Minix is a simple operating system used in many classes about OS's. | ||
1392 | The minix file system (method to organize files on a hard disk | ||
1393 | partition or a floppy disk) was the original file system for Linux, | ||
1394 | but has been superseded by the second extended file system ext2fs. | ||
1395 | You don't want to use the minix file system on your hard disk | ||
1396 | because of certain built-in restrictions, but it is sometimes found | ||
1397 | on older Linux floppy disks. This option will enlarge your kernel | ||
1398 | by about 28 KB. If unsure, say N. | ||
1399 | |||
1400 | To compile this file system support as a module, choose M here: the | ||
1401 | module will be called minix. Note that the file system of your root | ||
1402 | partition (the one containing the directory /) cannot be compiled as | ||
1403 | a module. | ||
1404 | |||
1420 | 1405 | ||
1421 | config HPFS_FS | 1406 | config HPFS_FS |
1422 | tristate "OS/2 HPFS file system support" | 1407 | tristate "OS/2 HPFS file system support" |
@@ -1434,7 +1419,6 @@ config HPFS_FS | |||
1434 | module will be called hpfs. If unsure, say N. | 1419 | module will be called hpfs. If unsure, say N. |
1435 | 1420 | ||
1436 | 1421 | ||
1437 | |||
1438 | config QNX4FS_FS | 1422 | config QNX4FS_FS |
1439 | tristate "QNX4 file system support (read only)" | 1423 | tristate "QNX4 file system support (read only)" |
1440 | depends on BLOCK | 1424 | depends on BLOCK |
@@ -1461,6 +1445,22 @@ config QNX4FS_RW | |||
1461 | It's currently broken, so for now: | 1445 | It's currently broken, so for now: |
1462 | answer N. | 1446 | answer N. |
1463 | 1447 | ||
1448 | config ROMFS_FS | ||
1449 | tristate "ROM file system support" | ||
1450 | depends on BLOCK | ||
1451 | ---help--- | ||
1452 | This is a very small read-only file system mainly intended for | ||
1453 | initial ram disks of installation disks, but it could be used for | ||
1454 | other read-only media as well. Read | ||
1455 | <file:Documentation/filesystems/romfs.txt> for details. | ||
1456 | |||
1457 | To compile this file system support as a module, choose M here: the | ||
1458 | module will be called romfs. Note that the file system of your | ||
1459 | root partition (the one containing the directory /) cannot be a | ||
1460 | module. | ||
1461 | |||
1462 | If you don't know whether you need it, then you don't need it: | ||
1463 | answer N. | ||
1464 | 1464 | ||
1465 | 1465 | ||
1466 | config SYSV_FS | 1466 | config SYSV_FS |
@@ -1501,7 +1501,6 @@ config SYSV_FS | |||
1501 | If you haven't heard about all of this before, it's safe to say N. | 1501 | If you haven't heard about all of this before, it's safe to say N. |
1502 | 1502 | ||
1503 | 1503 | ||
1504 | |||
1505 | config UFS_FS | 1504 | config UFS_FS |
1506 | tristate "UFS file system support (read only)" | 1505 | tristate "UFS file system support (read only)" |
1507 | depends on BLOCK | 1506 | depends on BLOCK |
@@ -1779,12 +1778,9 @@ config SUNRPC_GSS | |||
1779 | tristate | 1778 | tristate |
1780 | 1779 | ||
1781 | config SUNRPC_XPRT_RDMA | 1780 | config SUNRPC_XPRT_RDMA |
1782 | tristate "RDMA transport for sunrpc (EXPERIMENTAL)" | 1781 | tristate |
1783 | depends on SUNRPC && INFINIBAND && EXPERIMENTAL | 1782 | depends on SUNRPC && INFINIBAND && EXPERIMENTAL |
1784 | default m | 1783 | default SUNRPC && INFINIBAND |
1785 | help | ||
1786 | Adds a client RPC transport for supporting kernel NFS over RDMA | ||
1787 | mounts, including Infiniband and iWARP. Experimental. | ||
1788 | 1784 | ||
1789 | config SUNRPC_BIND34 | 1785 | config SUNRPC_BIND34 |
1790 | bool "Support for rpcbind versions 3 & 4 (EXPERIMENTAL)" | 1786 | bool "Support for rpcbind versions 3 & 4 (EXPERIMENTAL)" |
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt index 7c3d5f923da1..b5c3b6114add 100644 --- a/fs/Kconfig.binfmt +++ b/fs/Kconfig.binfmt | |||
@@ -61,7 +61,8 @@ config BINFMT_SHARED_FLAT | |||
61 | 61 | ||
62 | config BINFMT_AOUT | 62 | config BINFMT_AOUT |
63 | tristate "Kernel support for a.out and ECOFF binaries" | 63 | tristate "Kernel support for a.out and ECOFF binaries" |
64 | depends on X86_32 || ALPHA || ARM || M68K || SPARC32 | 64 | depends on ARCH_SUPPORTS_AOUT && \ |
65 | (X86_32 || ALPHA || ARM || M68K || SPARC32) | ||
65 | ---help--- | 66 | ---help--- |
66 | A.out (Assembler.OUTput) is a set of formats for libraries and | 67 | A.out (Assembler.OUTput) is a set of formats for libraries and |
67 | executables used in the earliest versions of UNIX. Linux used | 68 | executables used in the earliest versions of UNIX. Linux used |
diff --git a/fs/adfs/super.c b/fs/adfs/super.c index b36695ae5c2e..9e421eeb672b 100644 --- a/fs/adfs/super.c +++ b/fs/adfs/super.c | |||
@@ -20,6 +20,8 @@ | |||
20 | #include <linux/vfs.h> | 20 | #include <linux/vfs.h> |
21 | #include <linux/parser.h> | 21 | #include <linux/parser.h> |
22 | #include <linux/bitops.h> | 22 | #include <linux/bitops.h> |
23 | #include <linux/mount.h> | ||
24 | #include <linux/seq_file.h> | ||
23 | 25 | ||
24 | #include <asm/uaccess.h> | 26 | #include <asm/uaccess.h> |
25 | #include <asm/system.h> | 27 | #include <asm/system.h> |
@@ -30,6 +32,9 @@ | |||
30 | #include "dir_f.h" | 32 | #include "dir_f.h" |
31 | #include "dir_fplus.h" | 33 | #include "dir_fplus.h" |
32 | 34 | ||
35 | #define ADFS_DEFAULT_OWNER_MASK S_IRWXU | ||
36 | #define ADFS_DEFAULT_OTHER_MASK (S_IRWXG | S_IRWXO) | ||
37 | |||
33 | void __adfs_error(struct super_block *sb, const char *function, const char *fmt, ...) | 38 | void __adfs_error(struct super_block *sb, const char *function, const char *fmt, ...) |
34 | { | 39 | { |
35 | char error_buf[128]; | 40 | char error_buf[128]; |
@@ -134,6 +139,22 @@ static void adfs_put_super(struct super_block *sb) | |||
134 | sb->s_fs_info = NULL; | 139 | sb->s_fs_info = NULL; |
135 | } | 140 | } |
136 | 141 | ||
142 | static int adfs_show_options(struct seq_file *seq, struct vfsmount *mnt) | ||
143 | { | ||
144 | struct adfs_sb_info *asb = ADFS_SB(mnt->mnt_sb); | ||
145 | |||
146 | if (asb->s_uid != 0) | ||
147 | seq_printf(seq, ",uid=%u", asb->s_uid); | ||
148 | if (asb->s_gid != 0) | ||
149 | seq_printf(seq, ",gid=%u", asb->s_gid); | ||
150 | if (asb->s_owner_mask != ADFS_DEFAULT_OWNER_MASK) | ||
151 | seq_printf(seq, ",ownmask=%o", asb->s_owner_mask); | ||
152 | if (asb->s_other_mask != ADFS_DEFAULT_OTHER_MASK) | ||
153 | seq_printf(seq, ",othmask=%o", asb->s_other_mask); | ||
154 | |||
155 | return 0; | ||
156 | } | ||
157 | |||
137 | enum {Opt_uid, Opt_gid, Opt_ownmask, Opt_othmask, Opt_err}; | 158 | enum {Opt_uid, Opt_gid, Opt_ownmask, Opt_othmask, Opt_err}; |
138 | 159 | ||
139 | static match_table_t tokens = { | 160 | static match_table_t tokens = { |
@@ -259,6 +280,7 @@ static const struct super_operations adfs_sops = { | |||
259 | .put_super = adfs_put_super, | 280 | .put_super = adfs_put_super, |
260 | .statfs = adfs_statfs, | 281 | .statfs = adfs_statfs, |
261 | .remount_fs = adfs_remount, | 282 | .remount_fs = adfs_remount, |
283 | .show_options = adfs_show_options, | ||
262 | }; | 284 | }; |
263 | 285 | ||
264 | static struct adfs_discmap *adfs_read_map(struct super_block *sb, struct adfs_discrecord *dr) | 286 | static struct adfs_discmap *adfs_read_map(struct super_block *sb, struct adfs_discrecord *dr) |
@@ -344,8 +366,8 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent) | |||
344 | /* set default options */ | 366 | /* set default options */ |
345 | asb->s_uid = 0; | 367 | asb->s_uid = 0; |
346 | asb->s_gid = 0; | 368 | asb->s_gid = 0; |
347 | asb->s_owner_mask = S_IRWXU; | 369 | asb->s_owner_mask = ADFS_DEFAULT_OWNER_MASK; |
348 | asb->s_other_mask = S_IRWXG | S_IRWXO; | 370 | asb->s_other_mask = ADFS_DEFAULT_OTHER_MASK; |
349 | 371 | ||
350 | if (parse_options(sb, data)) | 372 | if (parse_options(sb, data)) |
351 | goto error; | 373 | goto error; |
diff --git a/fs/affs/affs.h b/fs/affs/affs.h index 232c69493683..d5bd497ab9cb 100644 --- a/fs/affs/affs.h +++ b/fs/affs/affs.h | |||
@@ -174,7 +174,8 @@ extern void affs_put_inode(struct inode *inode); | |||
174 | extern void affs_drop_inode(struct inode *inode); | 174 | extern void affs_drop_inode(struct inode *inode); |
175 | extern void affs_delete_inode(struct inode *inode); | 175 | extern void affs_delete_inode(struct inode *inode); |
176 | extern void affs_clear_inode(struct inode *inode); | 176 | extern void affs_clear_inode(struct inode *inode); |
177 | extern void affs_read_inode(struct inode *inode); | 177 | extern struct inode *affs_iget(struct super_block *sb, |
178 | unsigned long ino); | ||
178 | extern int affs_write_inode(struct inode *inode, int); | 179 | extern int affs_write_inode(struct inode *inode, int); |
179 | extern int affs_add_entry(struct inode *dir, struct inode *inode, struct dentry *dentry, s32 type); | 180 | extern int affs_add_entry(struct inode *dir, struct inode *inode, struct dentry *dentry, s32 type); |
180 | 181 | ||
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c index f4de4b98004f..805573005de6 100644 --- a/fs/affs/amigaffs.c +++ b/fs/affs/amigaffs.c | |||
@@ -170,9 +170,11 @@ affs_remove_link(struct dentry *dentry) | |||
170 | if (!link_bh) | 170 | if (!link_bh) |
171 | goto done; | 171 | goto done; |
172 | 172 | ||
173 | dir = iget(sb, be32_to_cpu(AFFS_TAIL(sb, link_bh)->parent)); | 173 | dir = affs_iget(sb, be32_to_cpu(AFFS_TAIL(sb, link_bh)->parent)); |
174 | if (!dir) | 174 | if (IS_ERR(dir)) { |
175 | retval = PTR_ERR(dir); | ||
175 | goto done; | 176 | goto done; |
177 | } | ||
176 | 178 | ||
177 | affs_lock_dir(dir); | 179 | affs_lock_dir(dir); |
178 | affs_fix_dcache(dentry, link_ino); | 180 | affs_fix_dcache(dentry, link_ino); |
diff --git a/fs/affs/inode.c b/fs/affs/inode.c index 4609a6c13fe9..27fe6cbe43ae 100644 --- a/fs/affs/inode.c +++ b/fs/affs/inode.c | |||
@@ -15,20 +15,25 @@ | |||
15 | extern const struct inode_operations affs_symlink_inode_operations; | 15 | extern const struct inode_operations affs_symlink_inode_operations; |
16 | extern struct timezone sys_tz; | 16 | extern struct timezone sys_tz; |
17 | 17 | ||
18 | void | 18 | struct inode *affs_iget(struct super_block *sb, unsigned long ino) |
19 | affs_read_inode(struct inode *inode) | ||
20 | { | 19 | { |
21 | struct super_block *sb = inode->i_sb; | ||
22 | struct affs_sb_info *sbi = AFFS_SB(sb); | 20 | struct affs_sb_info *sbi = AFFS_SB(sb); |
23 | struct buffer_head *bh; | 21 | struct buffer_head *bh; |
24 | struct affs_head *head; | 22 | struct affs_head *head; |
25 | struct affs_tail *tail; | 23 | struct affs_tail *tail; |
24 | struct inode *inode; | ||
26 | u32 block; | 25 | u32 block; |
27 | u32 size; | 26 | u32 size; |
28 | u32 prot; | 27 | u32 prot; |
29 | u16 id; | 28 | u16 id; |
30 | 29 | ||
31 | pr_debug("AFFS: read_inode(%lu)\n",inode->i_ino); | 30 | inode = iget_locked(sb, ino); |
31 | if (!inode) | ||
32 | return ERR_PTR(-ENOMEM); | ||
33 | if (!(inode->i_state & I_NEW)) | ||
34 | return inode; | ||
35 | |||
36 | pr_debug("AFFS: affs_iget(%lu)\n", inode->i_ino); | ||
32 | 37 | ||
33 | block = inode->i_ino; | 38 | block = inode->i_ino; |
34 | bh = affs_bread(sb, block); | 39 | bh = affs_bread(sb, block); |
@@ -154,12 +159,13 @@ affs_read_inode(struct inode *inode) | |||
154 | sys_tz.tz_minuteswest * 60; | 159 | sys_tz.tz_minuteswest * 60; |
155 | inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_atime.tv_nsec = 0; | 160 | inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_atime.tv_nsec = 0; |
156 | affs_brelse(bh); | 161 | affs_brelse(bh); |
157 | return; | 162 | unlock_new_inode(inode); |
163 | return inode; | ||
158 | 164 | ||
159 | bad_inode: | 165 | bad_inode: |
160 | make_bad_inode(inode); | ||
161 | affs_brelse(bh); | 166 | affs_brelse(bh); |
162 | return; | 167 | iget_failed(inode); |
168 | return ERR_PTR(-EIO); | ||
163 | } | 169 | } |
164 | 170 | ||
165 | int | 171 | int |
diff --git a/fs/affs/namei.c b/fs/affs/namei.c index a42143ca0169..2218f1ee71ce 100644 --- a/fs/affs/namei.c +++ b/fs/affs/namei.c | |||
@@ -208,9 +208,8 @@ affs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) | |||
208 | affs_lock_dir(dir); | 208 | affs_lock_dir(dir); |
209 | bh = affs_find_entry(dir, dentry); | 209 | bh = affs_find_entry(dir, dentry); |
210 | affs_unlock_dir(dir); | 210 | affs_unlock_dir(dir); |
211 | if (IS_ERR(bh)) { | 211 | if (IS_ERR(bh)) |
212 | return ERR_PTR(PTR_ERR(bh)); | 212 | return ERR_CAST(bh); |
213 | } | ||
214 | if (bh) { | 213 | if (bh) { |
215 | u32 ino = bh->b_blocknr; | 214 | u32 ino = bh->b_blocknr; |
216 | 215 | ||
@@ -223,10 +222,9 @@ affs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) | |||
223 | ino = be32_to_cpu(AFFS_TAIL(sb, bh)->original); | 222 | ino = be32_to_cpu(AFFS_TAIL(sb, bh)->original); |
224 | } | 223 | } |
225 | affs_brelse(bh); | 224 | affs_brelse(bh); |
226 | inode = iget(sb, ino); | 225 | inode = affs_iget(sb, ino); |
227 | if (!inode) { | 226 | if (IS_ERR(inode)) |
228 | return ERR_PTR(-EACCES); | 227 | return ERR_PTR(PTR_ERR(inode)); |
229 | } | ||
230 | } | 228 | } |
231 | dentry->d_op = AFFS_SB(sb)->s_flags & SF_INTL ? &affs_intl_dentry_operations : &affs_dentry_operations; | 229 | dentry->d_op = AFFS_SB(sb)->s_flags & SF_INTL ? &affs_intl_dentry_operations : &affs_dentry_operations; |
232 | d_add(dentry, inode); | 230 | d_add(dentry, inode); |
diff --git a/fs/affs/super.c b/fs/affs/super.c index b53e5d0ec65c..d2dc047cb479 100644 --- a/fs/affs/super.c +++ b/fs/affs/super.c | |||
@@ -113,7 +113,6 @@ static void destroy_inodecache(void) | |||
113 | static const struct super_operations affs_sops = { | 113 | static const struct super_operations affs_sops = { |
114 | .alloc_inode = affs_alloc_inode, | 114 | .alloc_inode = affs_alloc_inode, |
115 | .destroy_inode = affs_destroy_inode, | 115 | .destroy_inode = affs_destroy_inode, |
116 | .read_inode = affs_read_inode, | ||
117 | .write_inode = affs_write_inode, | 116 | .write_inode = affs_write_inode, |
118 | .put_inode = affs_put_inode, | 117 | .put_inode = affs_put_inode, |
119 | .drop_inode = affs_drop_inode, | 118 | .drop_inode = affs_drop_inode, |
@@ -123,6 +122,7 @@ static const struct super_operations affs_sops = { | |||
123 | .write_super = affs_write_super, | 122 | .write_super = affs_write_super, |
124 | .statfs = affs_statfs, | 123 | .statfs = affs_statfs, |
125 | .remount_fs = affs_remount, | 124 | .remount_fs = affs_remount, |
125 | .show_options = generic_show_options, | ||
126 | }; | 126 | }; |
127 | 127 | ||
128 | enum { | 128 | enum { |
@@ -271,6 +271,9 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent) | |||
271 | unsigned long mount_flags; | 271 | unsigned long mount_flags; |
272 | int tmp_flags; /* fix remount prototype... */ | 272 | int tmp_flags; /* fix remount prototype... */ |
273 | u8 sig[4]; | 273 | u8 sig[4]; |
274 | int ret = -EINVAL; | ||
275 | |||
276 | save_mount_options(sb, data); | ||
274 | 277 | ||
275 | pr_debug("AFFS: read_super(%s)\n",data ? (const char *)data : "no options"); | 278 | pr_debug("AFFS: read_super(%s)\n",data ? (const char *)data : "no options"); |
276 | 279 | ||
@@ -444,7 +447,12 @@ got_root: | |||
444 | 447 | ||
445 | /* set up enough so that it can read an inode */ | 448 | /* set up enough so that it can read an inode */ |
446 | 449 | ||
447 | root_inode = iget(sb, root_block); | 450 | root_inode = affs_iget(sb, root_block); |
451 | if (IS_ERR(root_inode)) { | ||
452 | ret = PTR_ERR(root_inode); | ||
453 | goto out_error_noinode; | ||
454 | } | ||
455 | |||
448 | sb->s_root = d_alloc_root(root_inode); | 456 | sb->s_root = d_alloc_root(root_inode); |
449 | if (!sb->s_root) { | 457 | if (!sb->s_root) { |
450 | printk(KERN_ERR "AFFS: Get root inode failed\n"); | 458 | printk(KERN_ERR "AFFS: Get root inode failed\n"); |
@@ -461,12 +469,13 @@ got_root: | |||
461 | out_error: | 469 | out_error: |
462 | if (root_inode) | 470 | if (root_inode) |
463 | iput(root_inode); | 471 | iput(root_inode); |
472 | out_error_noinode: | ||
464 | kfree(sbi->s_bitmap); | 473 | kfree(sbi->s_bitmap); |
465 | affs_brelse(root_bh); | 474 | affs_brelse(root_bh); |
466 | kfree(sbi->s_prefix); | 475 | kfree(sbi->s_prefix); |
467 | kfree(sbi); | 476 | kfree(sbi); |
468 | sb->s_fs_info = NULL; | 477 | sb->s_fs_info = NULL; |
469 | return -EINVAL; | 478 | return ret; |
470 | } | 479 | } |
471 | 480 | ||
472 | static int | 481 | static int |
@@ -481,14 +490,21 @@ affs_remount(struct super_block *sb, int *flags, char *data) | |||
481 | int root_block; | 490 | int root_block; |
482 | unsigned long mount_flags; | 491 | unsigned long mount_flags; |
483 | int res = 0; | 492 | int res = 0; |
493 | char *new_opts = kstrdup(data, GFP_KERNEL); | ||
484 | 494 | ||
485 | pr_debug("AFFS: remount(flags=0x%x,opts=\"%s\")\n",*flags,data); | 495 | pr_debug("AFFS: remount(flags=0x%x,opts=\"%s\")\n",*flags,data); |
486 | 496 | ||
487 | *flags |= MS_NODIRATIME; | 497 | *flags |= MS_NODIRATIME; |
488 | 498 | ||
489 | if (!parse_options(data,&uid,&gid,&mode,&reserved,&root_block, | 499 | if (!parse_options(data, &uid, &gid, &mode, &reserved, &root_block, |
490 | &blocksize,&sbi->s_prefix,sbi->s_volume,&mount_flags)) | 500 | &blocksize, &sbi->s_prefix, sbi->s_volume, |
501 | &mount_flags)) { | ||
502 | kfree(new_opts); | ||
491 | return -EINVAL; | 503 | return -EINVAL; |
504 | } | ||
505 | kfree(sb->s_options); | ||
506 | sb->s_options = new_opts; | ||
507 | |||
492 | sbi->s_flags = mount_flags; | 508 | sbi->s_flags = mount_flags; |
493 | sbi->s_mode = mode; | 509 | sbi->s_mode = mode; |
494 | sbi->s_uid = uid; | 510 | sbi->s_uid = uid; |
diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 0cc3597c1197..b58af8f18bc4 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c | |||
@@ -512,7 +512,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, | |||
512 | key = afs_request_key(vnode->volume->cell); | 512 | key = afs_request_key(vnode->volume->cell); |
513 | if (IS_ERR(key)) { | 513 | if (IS_ERR(key)) { |
514 | _leave(" = %ld [key]", PTR_ERR(key)); | 514 | _leave(" = %ld [key]", PTR_ERR(key)); |
515 | return ERR_PTR(PTR_ERR(key)); | 515 | return ERR_CAST(key); |
516 | } | 516 | } |
517 | 517 | ||
518 | ret = afs_validate(vnode, key); | 518 | ret = afs_validate(vnode, key); |
@@ -540,7 +540,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, | |||
540 | key_put(key); | 540 | key_put(key); |
541 | if (IS_ERR(inode)) { | 541 | if (IS_ERR(inode)) { |
542 | _leave(" = %ld", PTR_ERR(inode)); | 542 | _leave(" = %ld", PTR_ERR(inode)); |
543 | return ERR_PTR(PTR_ERR(inode)); | 543 | return ERR_CAST(inode); |
544 | } | 544 | } |
545 | 545 | ||
546 | dentry->d_op = &afs_fs_dentry_operations; | 546 | dentry->d_op = &afs_fs_dentry_operations; |
diff --git a/fs/afs/inode.c b/fs/afs/inode.c index 84750c8e9f95..08db82e1343a 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c | |||
@@ -196,10 +196,7 @@ struct inode *afs_iget(struct super_block *sb, struct key *key, | |||
196 | 196 | ||
197 | /* failure */ | 197 | /* failure */ |
198 | bad_inode: | 198 | bad_inode: |
199 | make_bad_inode(inode); | 199 | iget_failed(inode); |
200 | unlock_new_inode(inode); | ||
201 | iput(inode); | ||
202 | |||
203 | _leave(" = %d [bad]", ret); | 200 | _leave(" = %d [bad]", ret); |
204 | return ERR_PTR(ret); | 201 | return ERR_PTR(ret); |
205 | } | 202 | } |
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c index 5ce43b63c60e..a3510b8ba3e7 100644 --- a/fs/afs/mntpt.c +++ b/fs/afs/mntpt.c | |||
@@ -218,16 +218,16 @@ static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd) | |||
218 | _enter("%p{%s},{%s:%p{%s},}", | 218 | _enter("%p{%s},{%s:%p{%s},}", |
219 | dentry, | 219 | dentry, |
220 | dentry->d_name.name, | 220 | dentry->d_name.name, |
221 | nd->mnt->mnt_devname, | 221 | nd->path.mnt->mnt_devname, |
222 | dentry, | 222 | dentry, |
223 | nd->dentry->d_name.name); | 223 | nd->path.dentry->d_name.name); |
224 | 224 | ||
225 | dput(nd->dentry); | 225 | dput(nd->path.dentry); |
226 | nd->dentry = dget(dentry); | 226 | nd->path.dentry = dget(dentry); |
227 | 227 | ||
228 | newmnt = afs_mntpt_do_automount(nd->dentry); | 228 | newmnt = afs_mntpt_do_automount(nd->path.dentry); |
229 | if (IS_ERR(newmnt)) { | 229 | if (IS_ERR(newmnt)) { |
230 | path_release(nd); | 230 | path_put(&nd->path); |
231 | return (void *)newmnt; | 231 | return (void *)newmnt; |
232 | } | 232 | } |
233 | 233 | ||
@@ -235,17 +235,16 @@ static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd) | |||
235 | err = do_add_mount(newmnt, nd, MNT_SHRINKABLE, &afs_vfsmounts); | 235 | err = do_add_mount(newmnt, nd, MNT_SHRINKABLE, &afs_vfsmounts); |
236 | switch (err) { | 236 | switch (err) { |
237 | case 0: | 237 | case 0: |
238 | dput(nd->dentry); | 238 | path_put(&nd->path); |
239 | mntput(nd->mnt); | 239 | nd->path.mnt = newmnt; |
240 | nd->mnt = newmnt; | 240 | nd->path.dentry = dget(newmnt->mnt_root); |
241 | nd->dentry = dget(newmnt->mnt_root); | ||
242 | schedule_delayed_work(&afs_mntpt_expiry_timer, | 241 | schedule_delayed_work(&afs_mntpt_expiry_timer, |
243 | afs_mntpt_expiry_timeout * HZ); | 242 | afs_mntpt_expiry_timeout * HZ); |
244 | break; | 243 | break; |
245 | case -EBUSY: | 244 | case -EBUSY: |
246 | /* someone else made a mount here whilst we were busy */ | 245 | /* someone else made a mount here whilst we were busy */ |
247 | while (d_mountpoint(nd->dentry) && | 246 | while (d_mountpoint(nd->path.dentry) && |
248 | follow_down(&nd->mnt, &nd->dentry)) | 247 | follow_down(&nd->path.mnt, &nd->path.dentry)) |
249 | ; | 248 | ; |
250 | err = 0; | 249 | err = 0; |
251 | default: | 250 | default: |
diff --git a/fs/afs/security.c b/fs/afs/security.c index 566fe712c682..3bcbeceba1bb 100644 --- a/fs/afs/security.c +++ b/fs/afs/security.c | |||
@@ -95,7 +95,7 @@ static struct afs_vnode *afs_get_auth_inode(struct afs_vnode *vnode, | |||
95 | auth_inode = afs_iget(vnode->vfs_inode.i_sb, key, | 95 | auth_inode = afs_iget(vnode->vfs_inode.i_sb, key, |
96 | &vnode->status.parent, NULL, NULL); | 96 | &vnode->status.parent, NULL, NULL); |
97 | if (IS_ERR(auth_inode)) | 97 | if (IS_ERR(auth_inode)) |
98 | return ERR_PTR(PTR_ERR(auth_inode)); | 98 | return ERR_CAST(auth_inode); |
99 | } | 99 | } |
100 | 100 | ||
101 | auth_vnode = AFS_FS_I(auth_inode); | 101 | auth_vnode = AFS_FS_I(auth_inode); |
@@ -287,7 +287,7 @@ static int afs_check_permit(struct afs_vnode *vnode, struct key *key, | |||
287 | int afs_permission(struct inode *inode, int mask, struct nameidata *nd) | 287 | int afs_permission(struct inode *inode, int mask, struct nameidata *nd) |
288 | { | 288 | { |
289 | struct afs_vnode *vnode = AFS_FS_I(inode); | 289 | struct afs_vnode *vnode = AFS_FS_I(inode); |
290 | afs_access_t access; | 290 | afs_access_t uninitialized_var(access); |
291 | struct key *key; | 291 | struct key *key; |
292 | int ret; | 292 | int ret; |
293 | 293 | ||
diff --git a/fs/afs/super.c b/fs/afs/super.c index 4b2558c42213..36bbce45f44b 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c | |||
@@ -52,6 +52,7 @@ static const struct super_operations afs_super_ops = { | |||
52 | .clear_inode = afs_clear_inode, | 52 | .clear_inode = afs_clear_inode, |
53 | .umount_begin = afs_umount_begin, | 53 | .umount_begin = afs_umount_begin, |
54 | .put_super = afs_put_super, | 54 | .put_super = afs_put_super, |
55 | .show_options = generic_show_options, | ||
55 | }; | 56 | }; |
56 | 57 | ||
57 | static struct kmem_cache *afs_inode_cachep; | 58 | static struct kmem_cache *afs_inode_cachep; |
@@ -357,6 +358,7 @@ static int afs_get_sb(struct file_system_type *fs_type, | |||
357 | struct super_block *sb; | 358 | struct super_block *sb; |
358 | struct afs_volume *vol; | 359 | struct afs_volume *vol; |
359 | struct key *key; | 360 | struct key *key; |
361 | char *new_opts = kstrdup(options, GFP_KERNEL); | ||
360 | int ret; | 362 | int ret; |
361 | 363 | ||
362 | _enter(",,%s,%p", dev_name, options); | 364 | _enter(",,%s,%p", dev_name, options); |
@@ -408,9 +410,11 @@ static int afs_get_sb(struct file_system_type *fs_type, | |||
408 | deactivate_super(sb); | 410 | deactivate_super(sb); |
409 | goto error; | 411 | goto error; |
410 | } | 412 | } |
413 | sb->s_options = new_opts; | ||
411 | sb->s_flags |= MS_ACTIVE; | 414 | sb->s_flags |= MS_ACTIVE; |
412 | } else { | 415 | } else { |
413 | _debug("reuse"); | 416 | _debug("reuse"); |
417 | kfree(new_opts); | ||
414 | ASSERTCMP(sb->s_flags, &, MS_ACTIVE); | 418 | ASSERTCMP(sb->s_flags, &, MS_ACTIVE); |
415 | } | 419 | } |
416 | 420 | ||
@@ -424,6 +428,7 @@ error: | |||
424 | afs_put_volume(params.volume); | 428 | afs_put_volume(params.volume); |
425 | afs_put_cell(params.cell); | 429 | afs_put_cell(params.cell); |
426 | key_put(params.key); | 430 | key_put(params.key); |
431 | kfree(new_opts); | ||
427 | _leave(" = %d", ret); | 432 | _leave(" = %d", ret); |
428 | return ret; | 433 | return ret; |
429 | } | 434 | } |
@@ -317,7 +317,7 @@ out: | |||
317 | /* wait_on_sync_kiocb: | 317 | /* wait_on_sync_kiocb: |
318 | * Waits on the given sync kiocb to complete. | 318 | * Waits on the given sync kiocb to complete. |
319 | */ | 319 | */ |
320 | ssize_t fastcall wait_on_sync_kiocb(struct kiocb *iocb) | 320 | ssize_t wait_on_sync_kiocb(struct kiocb *iocb) |
321 | { | 321 | { |
322 | while (iocb->ki_users) { | 322 | while (iocb->ki_users) { |
323 | set_current_state(TASK_UNINTERRUPTIBLE); | 323 | set_current_state(TASK_UNINTERRUPTIBLE); |
@@ -336,7 +336,7 @@ ssize_t fastcall wait_on_sync_kiocb(struct kiocb *iocb) | |||
336 | * go away, they will call put_ioctx and release any pinned memory | 336 | * go away, they will call put_ioctx and release any pinned memory |
337 | * associated with the request (held via struct page * references). | 337 | * associated with the request (held via struct page * references). |
338 | */ | 338 | */ |
339 | void fastcall exit_aio(struct mm_struct *mm) | 339 | void exit_aio(struct mm_struct *mm) |
340 | { | 340 | { |
341 | struct kioctx *ctx = mm->ioctx_list; | 341 | struct kioctx *ctx = mm->ioctx_list; |
342 | mm->ioctx_list = NULL; | 342 | mm->ioctx_list = NULL; |
@@ -365,7 +365,7 @@ void fastcall exit_aio(struct mm_struct *mm) | |||
365 | * Called when the last user of an aio context has gone away, | 365 | * Called when the last user of an aio context has gone away, |
366 | * and the struct needs to be freed. | 366 | * and the struct needs to be freed. |
367 | */ | 367 | */ |
368 | void fastcall __put_ioctx(struct kioctx *ctx) | 368 | void __put_ioctx(struct kioctx *ctx) |
369 | { | 369 | { |
370 | unsigned nr_events = ctx->max_reqs; | 370 | unsigned nr_events = ctx->max_reqs; |
371 | 371 | ||
@@ -397,8 +397,7 @@ void fastcall __put_ioctx(struct kioctx *ctx) | |||
397 | * This prevents races between the aio code path referencing the | 397 | * This prevents races between the aio code path referencing the |
398 | * req (after submitting it) and aio_complete() freeing the req. | 398 | * req (after submitting it) and aio_complete() freeing the req. |
399 | */ | 399 | */ |
400 | static struct kiocb *__aio_get_req(struct kioctx *ctx); | 400 | static struct kiocb *__aio_get_req(struct kioctx *ctx) |
401 | static struct kiocb fastcall *__aio_get_req(struct kioctx *ctx) | ||
402 | { | 401 | { |
403 | struct kiocb *req = NULL; | 402 | struct kiocb *req = NULL; |
404 | struct aio_ring *ring; | 403 | struct aio_ring *ring; |
@@ -533,7 +532,7 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) | |||
533 | * Returns true if this put was the last user of the kiocb, | 532 | * Returns true if this put was the last user of the kiocb, |
534 | * false if the request is still in use. | 533 | * false if the request is still in use. |
535 | */ | 534 | */ |
536 | int fastcall aio_put_req(struct kiocb *req) | 535 | int aio_put_req(struct kiocb *req) |
537 | { | 536 | { |
538 | struct kioctx *ctx = req->ki_ctx; | 537 | struct kioctx *ctx = req->ki_ctx; |
539 | int ret; | 538 | int ret; |
@@ -893,7 +892,7 @@ static void try_queue_kicked_iocb(struct kiocb *iocb) | |||
893 | * The retry is usually executed by aio workqueue | 892 | * The retry is usually executed by aio workqueue |
894 | * threads (See aio_kick_handler). | 893 | * threads (See aio_kick_handler). |
895 | */ | 894 | */ |
896 | void fastcall kick_iocb(struct kiocb *iocb) | 895 | void kick_iocb(struct kiocb *iocb) |
897 | { | 896 | { |
898 | /* sync iocbs are easy: they can only ever be executing from a | 897 | /* sync iocbs are easy: they can only ever be executing from a |
899 | * single context. */ | 898 | * single context. */ |
@@ -912,7 +911,7 @@ EXPORT_SYMBOL(kick_iocb); | |||
912 | * Returns true if this is the last user of the request. The | 911 | * Returns true if this is the last user of the request. The |
913 | * only other user of the request can be the cancellation code. | 912 | * only other user of the request can be the cancellation code. |
914 | */ | 913 | */ |
915 | int fastcall aio_complete(struct kiocb *iocb, long res, long res2) | 914 | int aio_complete(struct kiocb *iocb, long res, long res2) |
916 | { | 915 | { |
917 | struct kioctx *ctx = iocb->ki_ctx; | 916 | struct kioctx *ctx = iocb->ki_ctx; |
918 | struct aio_ring_info *info; | 917 | struct aio_ring_info *info; |
@@ -1330,6 +1329,10 @@ static ssize_t aio_rw_vect_retry(struct kiocb *iocb) | |||
1330 | opcode = IOCB_CMD_PWRITEV; | 1329 | opcode = IOCB_CMD_PWRITEV; |
1331 | } | 1330 | } |
1332 | 1331 | ||
1332 | /* This matches the pread()/pwrite() logic */ | ||
1333 | if (iocb->ki_pos < 0) | ||
1334 | return -EINVAL; | ||
1335 | |||
1333 | do { | 1336 | do { |
1334 | ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg], | 1337 | ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg], |
1335 | iocb->ki_nr_segs - iocb->ki_cur_seg, | 1338 | iocb->ki_nr_segs - iocb->ki_cur_seg, |
@@ -1348,6 +1351,13 @@ static ssize_t aio_rw_vect_retry(struct kiocb *iocb) | |||
1348 | if ((ret == 0) || (iocb->ki_left == 0)) | 1351 | if ((ret == 0) || (iocb->ki_left == 0)) |
1349 | ret = iocb->ki_nbytes - iocb->ki_left; | 1352 | ret = iocb->ki_nbytes - iocb->ki_left; |
1350 | 1353 | ||
1354 | /* If we managed to write some out we return that, rather than | ||
1355 | * the eventual error. */ | ||
1356 | if (opcode == IOCB_CMD_PWRITEV | ||
1357 | && ret < 0 && ret != -EIOCBQUEUED && ret != -EIOCBRETRY | ||
1358 | && iocb->ki_nbytes - iocb->ki_left) | ||
1359 | ret = iocb->ki_nbytes - iocb->ki_left; | ||
1360 | |||
1351 | return ret; | 1361 | return ret; |
1352 | } | 1362 | } |
1353 | 1363 | ||
@@ -1523,7 +1533,7 @@ static int aio_wake_function(wait_queue_t *wait, unsigned mode, | |||
1523 | return 1; | 1533 | return 1; |
1524 | } | 1534 | } |
1525 | 1535 | ||
1526 | int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | 1536 | int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, |
1527 | struct iocb *iocb) | 1537 | struct iocb *iocb) |
1528 | { | 1538 | { |
1529 | struct kiocb *req; | 1539 | struct kiocb *req; |
diff --git a/fs/autofs/autofs_i.h b/fs/autofs/autofs_i.h index 8b4cca3c4705..901a3e67ec45 100644 --- a/fs/autofs/autofs_i.h +++ b/fs/autofs/autofs_i.h | |||
@@ -150,6 +150,7 @@ extern const struct file_operations autofs_root_operations; | |||
150 | 150 | ||
151 | int autofs_fill_super(struct super_block *, void *, int); | 151 | int autofs_fill_super(struct super_block *, void *, int); |
152 | void autofs_kill_sb(struct super_block *sb); | 152 | void autofs_kill_sb(struct super_block *sb); |
153 | struct inode *autofs_iget(struct super_block *, unsigned long); | ||
153 | 154 | ||
154 | /* Queue management functions */ | 155 | /* Queue management functions */ |
155 | 156 | ||
diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c index 45f5992a0957..dda510d31f84 100644 --- a/fs/autofs/inode.c +++ b/fs/autofs/inode.c | |||
@@ -52,11 +52,9 @@ out_kill_sb: | |||
52 | kill_anon_super(sb); | 52 | kill_anon_super(sb); |
53 | } | 53 | } |
54 | 54 | ||
55 | static void autofs_read_inode(struct inode *inode); | ||
56 | |||
57 | static const struct super_operations autofs_sops = { | 55 | static const struct super_operations autofs_sops = { |
58 | .read_inode = autofs_read_inode, | ||
59 | .statfs = simple_statfs, | 56 | .statfs = simple_statfs, |
57 | .show_options = generic_show_options, | ||
60 | }; | 58 | }; |
61 | 59 | ||
62 | enum {Opt_err, Opt_fd, Opt_uid, Opt_gid, Opt_pgrp, Opt_minproto, Opt_maxproto}; | 60 | enum {Opt_err, Opt_fd, Opt_uid, Opt_gid, Opt_pgrp, Opt_minproto, Opt_maxproto}; |
@@ -143,6 +141,8 @@ int autofs_fill_super(struct super_block *s, void *data, int silent) | |||
143 | int minproto, maxproto; | 141 | int minproto, maxproto; |
144 | pid_t pgid; | 142 | pid_t pgid; |
145 | 143 | ||
144 | save_mount_options(s, data); | ||
145 | |||
146 | sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); | 146 | sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); |
147 | if (!sbi) | 147 | if (!sbi) |
148 | goto fail_unlock; | 148 | goto fail_unlock; |
@@ -164,7 +164,9 @@ int autofs_fill_super(struct super_block *s, void *data, int silent) | |||
164 | s->s_time_gran = 1; | 164 | s->s_time_gran = 1; |
165 | sbi->sb = s; | 165 | sbi->sb = s; |
166 | 166 | ||
167 | root_inode = iget(s, AUTOFS_ROOT_INO); | 167 | root_inode = autofs_iget(s, AUTOFS_ROOT_INO); |
168 | if (IS_ERR(root_inode)) | ||
169 | goto fail_free; | ||
168 | root = d_alloc_root(root_inode); | 170 | root = d_alloc_root(root_inode); |
169 | pipe = NULL; | 171 | pipe = NULL; |
170 | 172 | ||
@@ -230,11 +232,17 @@ fail_unlock: | |||
230 | return -EINVAL; | 232 | return -EINVAL; |
231 | } | 233 | } |
232 | 234 | ||
233 | static void autofs_read_inode(struct inode *inode) | 235 | struct inode *autofs_iget(struct super_block *sb, unsigned long ino) |
234 | { | 236 | { |
235 | ino_t ino = inode->i_ino; | ||
236 | unsigned int n; | 237 | unsigned int n; |
237 | struct autofs_sb_info *sbi = autofs_sbi(inode->i_sb); | 238 | struct autofs_sb_info *sbi = autofs_sbi(sb); |
239 | struct inode *inode; | ||
240 | |||
241 | inode = iget_locked(sb, ino); | ||
242 | if (!inode) | ||
243 | return ERR_PTR(-ENOMEM); | ||
244 | if (!(inode->i_state & I_NEW)) | ||
245 | return inode; | ||
238 | 246 | ||
239 | /* Initialize to the default case (stub directory) */ | 247 | /* Initialize to the default case (stub directory) */ |
240 | 248 | ||
@@ -250,7 +258,7 @@ static void autofs_read_inode(struct inode *inode) | |||
250 | inode->i_op = &autofs_root_inode_operations; | 258 | inode->i_op = &autofs_root_inode_operations; |
251 | inode->i_fop = &autofs_root_operations; | 259 | inode->i_fop = &autofs_root_operations; |
252 | inode->i_uid = inode->i_gid = 0; /* Changed in read_super */ | 260 | inode->i_uid = inode->i_gid = 0; /* Changed in read_super */ |
253 | return; | 261 | goto done; |
254 | } | 262 | } |
255 | 263 | ||
256 | inode->i_uid = inode->i_sb->s_root->d_inode->i_uid; | 264 | inode->i_uid = inode->i_sb->s_root->d_inode->i_uid; |
@@ -263,7 +271,7 @@ static void autofs_read_inode(struct inode *inode) | |||
263 | n = ino - AUTOFS_FIRST_SYMLINK; | 271 | n = ino - AUTOFS_FIRST_SYMLINK; |
264 | if (n >= AUTOFS_MAX_SYMLINKS || !test_bit(n,sbi->symlink_bitmap)) { | 272 | if (n >= AUTOFS_MAX_SYMLINKS || !test_bit(n,sbi->symlink_bitmap)) { |
265 | printk("autofs: Looking for bad symlink inode %u\n", (unsigned int) ino); | 273 | printk("autofs: Looking for bad symlink inode %u\n", (unsigned int) ino); |
266 | return; | 274 | goto done; |
267 | } | 275 | } |
268 | 276 | ||
269 | inode->i_op = &autofs_symlink_inode_operations; | 277 | inode->i_op = &autofs_symlink_inode_operations; |
@@ -275,4 +283,8 @@ static void autofs_read_inode(struct inode *inode) | |||
275 | inode->i_size = sl->len; | 283 | inode->i_size = sl->len; |
276 | inode->i_nlink = 1; | 284 | inode->i_nlink = 1; |
277 | } | 285 | } |
286 | |||
287 | done: | ||
288 | unlock_new_inode(inode); | ||
289 | return inode; | ||
278 | } | 290 | } |
diff --git a/fs/autofs/root.c b/fs/autofs/root.c index 5efff3c0d886..8aacade56956 100644 --- a/fs/autofs/root.c +++ b/fs/autofs/root.c | |||
@@ -114,8 +114,8 @@ static int try_to_fill_dentry(struct dentry *dentry, struct super_block *sb, str | |||
114 | dentry->d_time = (unsigned long) ent; | 114 | dentry->d_time = (unsigned long) ent; |
115 | 115 | ||
116 | if (!dentry->d_inode) { | 116 | if (!dentry->d_inode) { |
117 | inode = iget(sb, ent->ino); | 117 | inode = autofs_iget(sb, ent->ino); |
118 | if (!inode) { | 118 | if (IS_ERR(inode)) { |
119 | /* Failed, but leave pending for next time */ | 119 | /* Failed, but leave pending for next time */ |
120 | return 1; | 120 | return 1; |
121 | } | 121 | } |
@@ -274,6 +274,7 @@ static int autofs_root_symlink(struct inode *dir, struct dentry *dentry, const c | |||
274 | unsigned int n; | 274 | unsigned int n; |
275 | int slsize; | 275 | int slsize; |
276 | struct autofs_symlink *sl; | 276 | struct autofs_symlink *sl; |
277 | struct inode *inode; | ||
277 | 278 | ||
278 | DPRINTK(("autofs_root_symlink: %s <- ", symname)); | 279 | DPRINTK(("autofs_root_symlink: %s <- ", symname)); |
279 | autofs_say(dentry->d_name.name,dentry->d_name.len); | 280 | autofs_say(dentry->d_name.name,dentry->d_name.len); |
@@ -331,7 +332,12 @@ static int autofs_root_symlink(struct inode *dir, struct dentry *dentry, const c | |||
331 | ent->dentry = NULL; /* We don't keep the dentry for symlinks */ | 332 | ent->dentry = NULL; /* We don't keep the dentry for symlinks */ |
332 | 333 | ||
333 | autofs_hash_insert(dh,ent); | 334 | autofs_hash_insert(dh,ent); |
334 | d_instantiate(dentry, iget(dir->i_sb,ent->ino)); | 335 | |
336 | inode = autofs_iget(dir->i_sb, ent->ino); | ||
337 | if (IS_ERR(inode)) | ||
338 | return PTR_ERR(inode); | ||
339 | |||
340 | d_instantiate(dentry, inode); | ||
335 | unlock_kernel(); | 341 | unlock_kernel(); |
336 | return 0; | 342 | return 0; |
337 | } | 343 | } |
@@ -428,6 +434,7 @@ static int autofs_root_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
428 | struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb); | 434 | struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb); |
429 | struct autofs_dirhash *dh = &sbi->dirhash; | 435 | struct autofs_dirhash *dh = &sbi->dirhash; |
430 | struct autofs_dir_ent *ent; | 436 | struct autofs_dir_ent *ent; |
437 | struct inode *inode; | ||
431 | ino_t ino; | 438 | ino_t ino; |
432 | 439 | ||
433 | lock_kernel(); | 440 | lock_kernel(); |
@@ -469,7 +476,14 @@ static int autofs_root_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
469 | autofs_hash_insert(dh,ent); | 476 | autofs_hash_insert(dh,ent); |
470 | 477 | ||
471 | inc_nlink(dir); | 478 | inc_nlink(dir); |
472 | d_instantiate(dentry, iget(dir->i_sb,ino)); | 479 | |
480 | inode = autofs_iget(dir->i_sb, ino); | ||
481 | if (IS_ERR(inode)) { | ||
482 | drop_nlink(dir); | ||
483 | return PTR_ERR(inode); | ||
484 | } | ||
485 | |||
486 | d_instantiate(dentry, inode); | ||
473 | unlock_kernel(); | 487 | unlock_kernel(); |
474 | 488 | ||
475 | return 0; | 489 | return 0; |
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index 7f05d6ccdb13..2fdcf5e1d236 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c | |||
@@ -176,11 +176,16 @@ out_kill_sb: | |||
176 | static int autofs4_show_options(struct seq_file *m, struct vfsmount *mnt) | 176 | static int autofs4_show_options(struct seq_file *m, struct vfsmount *mnt) |
177 | { | 177 | { |
178 | struct autofs_sb_info *sbi = autofs4_sbi(mnt->mnt_sb); | 178 | struct autofs_sb_info *sbi = autofs4_sbi(mnt->mnt_sb); |
179 | struct inode *root_inode = mnt->mnt_sb->s_root->d_inode; | ||
179 | 180 | ||
180 | if (!sbi) | 181 | if (!sbi) |
181 | return 0; | 182 | return 0; |
182 | 183 | ||
183 | seq_printf(m, ",fd=%d", sbi->pipefd); | 184 | seq_printf(m, ",fd=%d", sbi->pipefd); |
185 | if (root_inode->i_uid != 0) | ||
186 | seq_printf(m, ",uid=%u", root_inode->i_uid); | ||
187 | if (root_inode->i_gid != 0) | ||
188 | seq_printf(m, ",gid=%u", root_inode->i_gid); | ||
184 | seq_printf(m, ",pgrp=%d", sbi->oz_pgrp); | 189 | seq_printf(m, ",pgrp=%d", sbi->oz_pgrp); |
185 | seq_printf(m, ",timeout=%lu", sbi->exp_timeout/HZ); | 190 | seq_printf(m, ",timeout=%lu", sbi->exp_timeout/HZ); |
186 | seq_printf(m, ",minproto=%d", sbi->min_proto); | 191 | seq_printf(m, ",minproto=%d", sbi->min_proto); |
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 2bbcc8151dc3..a54a946a50ae 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c | |||
@@ -368,7 +368,8 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) | |||
368 | * so we don't need to follow the mount. | 368 | * so we don't need to follow the mount. |
369 | */ | 369 | */ |
370 | if (d_mountpoint(dentry)) { | 370 | if (d_mountpoint(dentry)) { |
371 | if (!autofs4_follow_mount(&nd->mnt, &nd->dentry)) { | 371 | if (!autofs4_follow_mount(&nd->path.mnt, |
372 | &nd->path.dentry)) { | ||
372 | status = -ENOENT; | 373 | status = -ENOENT; |
373 | goto out_error; | 374 | goto out_error; |
374 | } | 375 | } |
@@ -382,7 +383,7 @@ done: | |||
382 | return NULL; | 383 | return NULL; |
383 | 384 | ||
384 | out_error: | 385 | out_error: |
385 | path_release(nd); | 386 | path_put(&nd->path); |
386 | return ERR_PTR(status); | 387 | return ERR_PTR(status); |
387 | } | 388 | } |
388 | 389 | ||
diff --git a/fs/bad_inode.c b/fs/bad_inode.c index 521ff7caadbd..f1c2ea8342f5 100644 --- a/fs/bad_inode.c +++ b/fs/bad_inode.c | |||
@@ -359,3 +359,17 @@ int is_bad_inode(struct inode *inode) | |||
359 | } | 359 | } |
360 | 360 | ||
361 | EXPORT_SYMBOL(is_bad_inode); | 361 | EXPORT_SYMBOL(is_bad_inode); |
362 | |||
363 | /** | ||
364 | * iget_failed - Mark an under-construction inode as dead and release it | ||
365 | * @inode: The inode to discard | ||
366 | * | ||
367 | * Mark an under-construction inode as dead and release it. | ||
368 | */ | ||
369 | void iget_failed(struct inode *inode) | ||
370 | { | ||
371 | make_bad_inode(inode); | ||
372 | unlock_new_inode(inode); | ||
373 | iput(inode); | ||
374 | } | ||
375 | EXPORT_SYMBOL(iget_failed); | ||
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index b28a20e61b80..82123ff3e1dd 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c | |||
@@ -35,7 +35,7 @@ static int befs_get_block(struct inode *, sector_t, struct buffer_head *, int); | |||
35 | static int befs_readpage(struct file *file, struct page *page); | 35 | static int befs_readpage(struct file *file, struct page *page); |
36 | static sector_t befs_bmap(struct address_space *mapping, sector_t block); | 36 | static sector_t befs_bmap(struct address_space *mapping, sector_t block); |
37 | static struct dentry *befs_lookup(struct inode *, struct dentry *, struct nameidata *); | 37 | static struct dentry *befs_lookup(struct inode *, struct dentry *, struct nameidata *); |
38 | static void befs_read_inode(struct inode *ino); | 38 | static struct inode *befs_iget(struct super_block *, unsigned long); |
39 | static struct inode *befs_alloc_inode(struct super_block *sb); | 39 | static struct inode *befs_alloc_inode(struct super_block *sb); |
40 | static void befs_destroy_inode(struct inode *inode); | 40 | static void befs_destroy_inode(struct inode *inode); |
41 | static int befs_init_inodecache(void); | 41 | static int befs_init_inodecache(void); |
@@ -52,12 +52,12 @@ static int befs_statfs(struct dentry *, struct kstatfs *); | |||
52 | static int parse_options(char *, befs_mount_options *); | 52 | static int parse_options(char *, befs_mount_options *); |
53 | 53 | ||
54 | static const struct super_operations befs_sops = { | 54 | static const struct super_operations befs_sops = { |
55 | .read_inode = befs_read_inode, /* initialize & read inode */ | ||
56 | .alloc_inode = befs_alloc_inode, /* allocate a new inode */ | 55 | .alloc_inode = befs_alloc_inode, /* allocate a new inode */ |
57 | .destroy_inode = befs_destroy_inode, /* deallocate an inode */ | 56 | .destroy_inode = befs_destroy_inode, /* deallocate an inode */ |
58 | .put_super = befs_put_super, /* uninit super */ | 57 | .put_super = befs_put_super, /* uninit super */ |
59 | .statfs = befs_statfs, /* statfs */ | 58 | .statfs = befs_statfs, /* statfs */ |
60 | .remount_fs = befs_remount, | 59 | .remount_fs = befs_remount, |
60 | .show_options = generic_show_options, | ||
61 | }; | 61 | }; |
62 | 62 | ||
63 | /* slab cache for befs_inode_info objects */ | 63 | /* slab cache for befs_inode_info objects */ |
@@ -198,9 +198,9 @@ befs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) | |||
198 | return ERR_PTR(-ENODATA); | 198 | return ERR_PTR(-ENODATA); |
199 | } | 199 | } |
200 | 200 | ||
201 | inode = iget(dir->i_sb, (ino_t) offset); | 201 | inode = befs_iget(dir->i_sb, (ino_t) offset); |
202 | if (!inode) | 202 | if (IS_ERR(inode)) |
203 | return ERR_PTR(-EACCES); | 203 | return ERR_CAST(inode); |
204 | 204 | ||
205 | d_add(dentry, inode); | 205 | d_add(dentry, inode); |
206 | 206 | ||
@@ -296,17 +296,23 @@ static void init_once(struct kmem_cache *cachep, void *foo) | |||
296 | inode_init_once(&bi->vfs_inode); | 296 | inode_init_once(&bi->vfs_inode); |
297 | } | 297 | } |
298 | 298 | ||
299 | static void | 299 | static struct inode *befs_iget(struct super_block *sb, unsigned long ino) |
300 | befs_read_inode(struct inode *inode) | ||
301 | { | 300 | { |
302 | struct buffer_head *bh = NULL; | 301 | struct buffer_head *bh = NULL; |
303 | befs_inode *raw_inode = NULL; | 302 | befs_inode *raw_inode = NULL; |
304 | 303 | ||
305 | struct super_block *sb = inode->i_sb; | ||
306 | befs_sb_info *befs_sb = BEFS_SB(sb); | 304 | befs_sb_info *befs_sb = BEFS_SB(sb); |
307 | befs_inode_info *befs_ino = NULL; | 305 | befs_inode_info *befs_ino = NULL; |
306 | struct inode *inode; | ||
307 | long ret = -EIO; | ||
308 | |||
309 | befs_debug(sb, "---> befs_read_inode() " "inode = %lu", ino); | ||
308 | 310 | ||
309 | befs_debug(sb, "---> befs_read_inode() " "inode = %lu", inode->i_ino); | 311 | inode = iget_locked(sb, ino); |
312 | if (IS_ERR(inode)) | ||
313 | return inode; | ||
314 | if (!(inode->i_state & I_NEW)) | ||
315 | return inode; | ||
310 | 316 | ||
311 | befs_ino = BEFS_I(inode); | 317 | befs_ino = BEFS_I(inode); |
312 | 318 | ||
@@ -402,15 +408,16 @@ befs_read_inode(struct inode *inode) | |||
402 | 408 | ||
403 | brelse(bh); | 409 | brelse(bh); |
404 | befs_debug(sb, "<--- befs_read_inode()"); | 410 | befs_debug(sb, "<--- befs_read_inode()"); |
405 | return; | 411 | unlock_new_inode(inode); |
412 | return inode; | ||
406 | 413 | ||
407 | unacquire_bh: | 414 | unacquire_bh: |
408 | brelse(bh); | 415 | brelse(bh); |
409 | 416 | ||
410 | unacquire_none: | 417 | unacquire_none: |
411 | make_bad_inode(inode); | 418 | iget_failed(inode); |
412 | befs_debug(sb, "<--- befs_read_inode() - Bad inode"); | 419 | befs_debug(sb, "<--- befs_read_inode() - Bad inode"); |
413 | return; | 420 | return ERR_PTR(ret); |
414 | } | 421 | } |
415 | 422 | ||
416 | /* Initialize the inode cache. Called at fs setup. | 423 | /* Initialize the inode cache. Called at fs setup. |
@@ -752,10 +759,12 @@ befs_fill_super(struct super_block *sb, void *data, int silent) | |||
752 | befs_sb_info *befs_sb; | 759 | befs_sb_info *befs_sb; |
753 | befs_super_block *disk_sb; | 760 | befs_super_block *disk_sb; |
754 | struct inode *root; | 761 | struct inode *root; |
755 | 762 | long ret = -EINVAL; | |
756 | const unsigned long sb_block = 0; | 763 | const unsigned long sb_block = 0; |
757 | const off_t x86_sb_off = 512; | 764 | const off_t x86_sb_off = 512; |
758 | 765 | ||
766 | save_mount_options(sb, data); | ||
767 | |||
759 | sb->s_fs_info = kmalloc(sizeof (*befs_sb), GFP_KERNEL); | 768 | sb->s_fs_info = kmalloc(sizeof (*befs_sb), GFP_KERNEL); |
760 | if (sb->s_fs_info == NULL) { | 769 | if (sb->s_fs_info == NULL) { |
761 | printk(KERN_ERR | 770 | printk(KERN_ERR |
@@ -833,7 +842,11 @@ befs_fill_super(struct super_block *sb, void *data, int silent) | |||
833 | /* Set real blocksize of fs */ | 842 | /* Set real blocksize of fs */ |
834 | sb_set_blocksize(sb, (ulong) befs_sb->block_size); | 843 | sb_set_blocksize(sb, (ulong) befs_sb->block_size); |
835 | sb->s_op = (struct super_operations *) &befs_sops; | 844 | sb->s_op = (struct super_operations *) &befs_sops; |
836 | root = iget(sb, iaddr2blockno(sb, &(befs_sb->root_dir))); | 845 | root = befs_iget(sb, iaddr2blockno(sb, &(befs_sb->root_dir))); |
846 | if (IS_ERR(root)) { | ||
847 | ret = PTR_ERR(root); | ||
848 | goto unacquire_priv_sbp; | ||
849 | } | ||
837 | sb->s_root = d_alloc_root(root); | 850 | sb->s_root = d_alloc_root(root); |
838 | if (!sb->s_root) { | 851 | if (!sb->s_root) { |
839 | iput(root); | 852 | iput(root); |
@@ -868,7 +881,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent) | |||
868 | 881 | ||
869 | unacquire_none: | 882 | unacquire_none: |
870 | sb->s_fs_info = NULL; | 883 | sb->s_fs_info = NULL; |
871 | return -EINVAL; | 884 | return ret; |
872 | } | 885 | } |
873 | 886 | ||
874 | static int | 887 | static int |
diff --git a/fs/bfs/bfs.h b/fs/bfs/bfs.h index ac7a8b1d6c3a..71faf4d23908 100644 --- a/fs/bfs/bfs.h +++ b/fs/bfs/bfs.h | |||
@@ -44,6 +44,8 @@ static inline struct bfs_inode_info *BFS_I(struct inode *inode) | |||
44 | #define printf(format, args...) \ | 44 | #define printf(format, args...) \ |
45 | printk(KERN_ERR "BFS-fs: %s(): " format, __FUNCTION__, ## args) | 45 | printk(KERN_ERR "BFS-fs: %s(): " format, __FUNCTION__, ## args) |
46 | 46 | ||
47 | /* inode.c */ | ||
48 | extern struct inode *bfs_iget(struct super_block *sb, unsigned long ino); | ||
47 | 49 | ||
48 | /* file.c */ | 50 | /* file.c */ |
49 | extern const struct inode_operations bfs_file_inops; | 51 | extern const struct inode_operations bfs_file_inops; |
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c index 1fd056d0fc3d..034950cb3cbe 100644 --- a/fs/bfs/dir.c +++ b/fs/bfs/dir.c | |||
@@ -148,10 +148,10 @@ static struct dentry *bfs_lookup(struct inode *dir, struct dentry *dentry, | |||
148 | if (bh) { | 148 | if (bh) { |
149 | unsigned long ino = (unsigned long)le16_to_cpu(de->ino); | 149 | unsigned long ino = (unsigned long)le16_to_cpu(de->ino); |
150 | brelse(bh); | 150 | brelse(bh); |
151 | inode = iget(dir->i_sb, ino); | 151 | inode = bfs_iget(dir->i_sb, ino); |
152 | if (!inode) { | 152 | if (IS_ERR(inode)) { |
153 | unlock_kernel(); | 153 | unlock_kernel(); |
154 | return ERR_PTR(-EACCES); | 154 | return ERR_CAST(inode); |
155 | } | 155 | } |
156 | } | 156 | } |
157 | unlock_kernel(); | 157 | unlock_kernel(); |
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c index a64a71d444f5..8db623838b50 100644 --- a/fs/bfs/inode.c +++ b/fs/bfs/inode.c | |||
@@ -32,17 +32,22 @@ MODULE_LICENSE("GPL"); | |||
32 | 32 | ||
33 | void dump_imap(const char *prefix, struct super_block *s); | 33 | void dump_imap(const char *prefix, struct super_block *s); |
34 | 34 | ||
35 | static void bfs_read_inode(struct inode *inode) | 35 | struct inode *bfs_iget(struct super_block *sb, unsigned long ino) |
36 | { | 36 | { |
37 | unsigned long ino = inode->i_ino; | ||
38 | struct bfs_inode *di; | 37 | struct bfs_inode *di; |
38 | struct inode *inode; | ||
39 | struct buffer_head *bh; | 39 | struct buffer_head *bh; |
40 | int block, off; | 40 | int block, off; |
41 | 41 | ||
42 | inode = iget_locked(sb, ino); | ||
43 | if (IS_ERR(inode)) | ||
44 | return ERR_PTR(-ENOMEM); | ||
45 | if (!(inode->i_state & I_NEW)) | ||
46 | return inode; | ||
47 | |||
42 | if ((ino < BFS_ROOT_INO) || (ino > BFS_SB(inode->i_sb)->si_lasti)) { | 48 | if ((ino < BFS_ROOT_INO) || (ino > BFS_SB(inode->i_sb)->si_lasti)) { |
43 | printf("Bad inode number %s:%08lx\n", inode->i_sb->s_id, ino); | 49 | printf("Bad inode number %s:%08lx\n", inode->i_sb->s_id, ino); |
44 | make_bad_inode(inode); | 50 | goto error; |
45 | return; | ||
46 | } | 51 | } |
47 | 52 | ||
48 | block = (ino - BFS_ROOT_INO) / BFS_INODES_PER_BLOCK + 1; | 53 | block = (ino - BFS_ROOT_INO) / BFS_INODES_PER_BLOCK + 1; |
@@ -50,8 +55,7 @@ static void bfs_read_inode(struct inode *inode) | |||
50 | if (!bh) { | 55 | if (!bh) { |
51 | printf("Unable to read inode %s:%08lx\n", inode->i_sb->s_id, | 56 | printf("Unable to read inode %s:%08lx\n", inode->i_sb->s_id, |
52 | ino); | 57 | ino); |
53 | make_bad_inode(inode); | 58 | goto error; |
54 | return; | ||
55 | } | 59 | } |
56 | 60 | ||
57 | off = (ino - BFS_ROOT_INO) % BFS_INODES_PER_BLOCK; | 61 | off = (ino - BFS_ROOT_INO) % BFS_INODES_PER_BLOCK; |
@@ -85,6 +89,12 @@ static void bfs_read_inode(struct inode *inode) | |||
85 | inode->i_ctime.tv_nsec = 0; | 89 | inode->i_ctime.tv_nsec = 0; |
86 | 90 | ||
87 | brelse(bh); | 91 | brelse(bh); |
92 | unlock_new_inode(inode); | ||
93 | return inode; | ||
94 | |||
95 | error: | ||
96 | iget_failed(inode); | ||
97 | return ERR_PTR(-EIO); | ||
88 | } | 98 | } |
89 | 99 | ||
90 | static int bfs_write_inode(struct inode *inode, int unused) | 100 | static int bfs_write_inode(struct inode *inode, int unused) |
@@ -276,7 +286,6 @@ static void destroy_inodecache(void) | |||
276 | static const struct super_operations bfs_sops = { | 286 | static const struct super_operations bfs_sops = { |
277 | .alloc_inode = bfs_alloc_inode, | 287 | .alloc_inode = bfs_alloc_inode, |
278 | .destroy_inode = bfs_destroy_inode, | 288 | .destroy_inode = bfs_destroy_inode, |
279 | .read_inode = bfs_read_inode, | ||
280 | .write_inode = bfs_write_inode, | 289 | .write_inode = bfs_write_inode, |
281 | .delete_inode = bfs_delete_inode, | 290 | .delete_inode = bfs_delete_inode, |
282 | .put_super = bfs_put_super, | 291 | .put_super = bfs_put_super, |
@@ -312,6 +321,7 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) | |||
312 | struct inode *inode; | 321 | struct inode *inode; |
313 | unsigned i, imap_len; | 322 | unsigned i, imap_len; |
314 | struct bfs_sb_info *info; | 323 | struct bfs_sb_info *info; |
324 | long ret = -EINVAL; | ||
315 | 325 | ||
316 | info = kzalloc(sizeof(*info), GFP_KERNEL); | 326 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
317 | if (!info) | 327 | if (!info) |
@@ -346,14 +356,16 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) | |||
346 | set_bit(i, info->si_imap); | 356 | set_bit(i, info->si_imap); |
347 | 357 | ||
348 | s->s_op = &bfs_sops; | 358 | s->s_op = &bfs_sops; |
349 | inode = iget(s, BFS_ROOT_INO); | 359 | inode = bfs_iget(s, BFS_ROOT_INO); |
350 | if (!inode) { | 360 | if (IS_ERR(inode)) { |
361 | ret = PTR_ERR(inode); | ||
351 | kfree(info->si_imap); | 362 | kfree(info->si_imap); |
352 | goto out; | 363 | goto out; |
353 | } | 364 | } |
354 | s->s_root = d_alloc_root(inode); | 365 | s->s_root = d_alloc_root(inode); |
355 | if (!s->s_root) { | 366 | if (!s->s_root) { |
356 | iput(inode); | 367 | iput(inode); |
368 | ret = -ENOMEM; | ||
357 | kfree(info->si_imap); | 369 | kfree(info->si_imap); |
358 | goto out; | 370 | goto out; |
359 | } | 371 | } |
@@ -404,7 +416,7 @@ out: | |||
404 | brelse(bh); | 416 | brelse(bh); |
405 | kfree(info); | 417 | kfree(info); |
406 | s->s_fs_info = NULL; | 418 | s->s_fs_info = NULL; |
407 | return -EINVAL; | 419 | return ret; |
408 | } | 420 | } |
409 | 421 | ||
410 | static int bfs_get_sb(struct file_system_type *fs_type, | 422 | static int bfs_get_sb(struct file_system_type *fs_type, |
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c index 7596e1e94cde..a1bb2244cac7 100644 --- a/fs/binfmt_aout.c +++ b/fs/binfmt_aout.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <asm/system.h> | 28 | #include <asm/system.h> |
29 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
30 | #include <asm/cacheflush.h> | 30 | #include <asm/cacheflush.h> |
31 | #include <asm/a.out-core.h> | ||
31 | 32 | ||
32 | static int load_aout_binary(struct linux_binprm *, struct pt_regs * regs); | 33 | static int load_aout_binary(struct linux_binprm *, struct pt_regs * regs); |
33 | static int load_aout_library(struct file*); | 34 | static int load_aout_library(struct file*); |
@@ -115,10 +116,10 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u | |||
115 | current->flags |= PF_DUMPCORE; | 116 | current->flags |= PF_DUMPCORE; |
116 | strncpy(dump.u_comm, current->comm, sizeof(dump.u_comm)); | 117 | strncpy(dump.u_comm, current->comm, sizeof(dump.u_comm)); |
117 | #ifndef __sparc__ | 118 | #ifndef __sparc__ |
118 | dump.u_ar0 = (void *)(((unsigned long)(&dump.regs)) - ((unsigned long)(&dump))); | 119 | dump.u_ar0 = offsetof(struct user, regs); |
119 | #endif | 120 | #endif |
120 | dump.signal = signr; | 121 | dump.signal = signr; |
121 | dump_thread(regs, &dump); | 122 | aout_dump_thread(regs, &dump); |
122 | 123 | ||
123 | /* If the size of the dump file exceeds the rlimit, then see what would happen | 124 | /* If the size of the dump file exceeds the rlimit, then see what would happen |
124 | if we wrote the stack, but not the data area. */ | 125 | if we wrote the stack, but not the data area. */ |
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 4628c42ca892..41a958a7585e 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
@@ -134,8 +134,7 @@ static int padzero(unsigned long elf_bss) | |||
134 | 134 | ||
135 | static int | 135 | static int |
136 | create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, | 136 | create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, |
137 | int interp_aout, unsigned long load_addr, | 137 | unsigned long load_addr, unsigned long interp_load_addr) |
138 | unsigned long interp_load_addr) | ||
139 | { | 138 | { |
140 | unsigned long p = bprm->p; | 139 | unsigned long p = bprm->p; |
141 | int argc = bprm->argc; | 140 | int argc = bprm->argc; |
@@ -223,12 +222,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, | |||
223 | 222 | ||
224 | sp = STACK_ADD(p, ei_index); | 223 | sp = STACK_ADD(p, ei_index); |
225 | 224 | ||
226 | items = (argc + 1) + (envc + 1); | 225 | items = (argc + 1) + (envc + 1) + 1; |
227 | if (interp_aout) { | ||
228 | items += 3; /* a.out interpreters require argv & envp too */ | ||
229 | } else { | ||
230 | items += 1; /* ELF interpreters only put argc on the stack */ | ||
231 | } | ||
232 | bprm->p = STACK_ROUND(sp, items); | 226 | bprm->p = STACK_ROUND(sp, items); |
233 | 227 | ||
234 | /* Point sp at the lowest address on the stack */ | 228 | /* Point sp at the lowest address on the stack */ |
@@ -251,16 +245,8 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, | |||
251 | /* Now, let's put argc (and argv, envp if appropriate) on the stack */ | 245 | /* Now, let's put argc (and argv, envp if appropriate) on the stack */ |
252 | if (__put_user(argc, sp++)) | 246 | if (__put_user(argc, sp++)) |
253 | return -EFAULT; | 247 | return -EFAULT; |
254 | if (interp_aout) { | 248 | argv = sp; |
255 | argv = sp + 2; | 249 | envp = argv + argc + 1; |
256 | envp = argv + argc + 1; | ||
257 | if (__put_user((elf_addr_t)(unsigned long)argv, sp++) || | ||
258 | __put_user((elf_addr_t)(unsigned long)envp, sp++)) | ||
259 | return -EFAULT; | ||
260 | } else { | ||
261 | argv = sp; | ||
262 | envp = argv + argc + 1; | ||
263 | } | ||
264 | 250 | ||
265 | /* Populate argv and envp */ | 251 | /* Populate argv and envp */ |
266 | p = current->mm->arg_end = current->mm->arg_start; | 252 | p = current->mm->arg_end = current->mm->arg_start; |
@@ -513,59 +499,12 @@ out: | |||
513 | return error; | 499 | return error; |
514 | } | 500 | } |
515 | 501 | ||
516 | static unsigned long load_aout_interp(struct exec *interp_ex, | ||
517 | struct file *interpreter) | ||
518 | { | ||
519 | unsigned long text_data, elf_entry = ~0UL; | ||
520 | char __user * addr; | ||
521 | loff_t offset; | ||
522 | |||
523 | current->mm->end_code = interp_ex->a_text; | ||
524 | text_data = interp_ex->a_text + interp_ex->a_data; | ||
525 | current->mm->end_data = text_data; | ||
526 | current->mm->brk = interp_ex->a_bss + text_data; | ||
527 | |||
528 | switch (N_MAGIC(*interp_ex)) { | ||
529 | case OMAGIC: | ||
530 | offset = 32; | ||
531 | addr = (char __user *)0; | ||
532 | break; | ||
533 | case ZMAGIC: | ||
534 | case QMAGIC: | ||
535 | offset = N_TXTOFF(*interp_ex); | ||
536 | addr = (char __user *)N_TXTADDR(*interp_ex); | ||
537 | break; | ||
538 | default: | ||
539 | goto out; | ||
540 | } | ||
541 | |||
542 | down_write(¤t->mm->mmap_sem); | ||
543 | do_brk(0, text_data); | ||
544 | up_write(¤t->mm->mmap_sem); | ||
545 | if (!interpreter->f_op || !interpreter->f_op->read) | ||
546 | goto out; | ||
547 | if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0) | ||
548 | goto out; | ||
549 | flush_icache_range((unsigned long)addr, | ||
550 | (unsigned long)addr + text_data); | ||
551 | |||
552 | down_write(¤t->mm->mmap_sem); | ||
553 | do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1), | ||
554 | interp_ex->a_bss); | ||
555 | up_write(¤t->mm->mmap_sem); | ||
556 | elf_entry = interp_ex->a_entry; | ||
557 | |||
558 | out: | ||
559 | return elf_entry; | ||
560 | } | ||
561 | |||
562 | /* | 502 | /* |
563 | * These are the functions used to load ELF style executables and shared | 503 | * These are the functions used to load ELF style executables and shared |
564 | * libraries. There is no binary dependent code anywhere else. | 504 | * libraries. There is no binary dependent code anywhere else. |
565 | */ | 505 | */ |
566 | 506 | ||
567 | #define INTERPRETER_NONE 0 | 507 | #define INTERPRETER_NONE 0 |
568 | #define INTERPRETER_AOUT 1 | ||
569 | #define INTERPRETER_ELF 2 | 508 | #define INTERPRETER_ELF 2 |
570 | 509 | ||
571 | #ifndef STACK_RND_MASK | 510 | #ifndef STACK_RND_MASK |
@@ -594,7 +533,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) | |||
594 | unsigned long load_addr = 0, load_bias = 0; | 533 | unsigned long load_addr = 0, load_bias = 0; |
595 | int load_addr_set = 0; | 534 | int load_addr_set = 0; |
596 | char * elf_interpreter = NULL; | 535 | char * elf_interpreter = NULL; |
597 | unsigned int interpreter_type = INTERPRETER_NONE; | ||
598 | unsigned long error; | 536 | unsigned long error; |
599 | struct elf_phdr *elf_ppnt, *elf_phdata; | 537 | struct elf_phdr *elf_ppnt, *elf_phdata; |
600 | unsigned long elf_bss, elf_brk; | 538 | unsigned long elf_bss, elf_brk; |
@@ -605,7 +543,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) | |||
605 | unsigned long interp_load_addr = 0; | 543 | unsigned long interp_load_addr = 0; |
606 | unsigned long start_code, end_code, start_data, end_data; | 544 | unsigned long start_code, end_code, start_data, end_data; |
607 | unsigned long reloc_func_desc = 0; | 545 | unsigned long reloc_func_desc = 0; |
608 | char passed_fileno[6]; | ||
609 | struct files_struct *files; | 546 | struct files_struct *files; |
610 | int executable_stack = EXSTACK_DEFAULT; | 547 | int executable_stack = EXSTACK_DEFAULT; |
611 | unsigned long def_flags = 0; | 548 | unsigned long def_flags = 0; |
@@ -774,59 +711,18 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) | |||
774 | 711 | ||
775 | /* Some simple consistency checks for the interpreter */ | 712 | /* Some simple consistency checks for the interpreter */ |
776 | if (elf_interpreter) { | 713 | if (elf_interpreter) { |
777 | static int warn; | ||
778 | interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT; | ||
779 | |||
780 | /* Now figure out which format our binary is */ | ||
781 | if ((N_MAGIC(loc->interp_ex) != OMAGIC) && | ||
782 | (N_MAGIC(loc->interp_ex) != ZMAGIC) && | ||
783 | (N_MAGIC(loc->interp_ex) != QMAGIC)) | ||
784 | interpreter_type = INTERPRETER_ELF; | ||
785 | |||
786 | if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0) | ||
787 | interpreter_type &= ~INTERPRETER_ELF; | ||
788 | |||
789 | if (interpreter_type == INTERPRETER_AOUT && warn < 10) { | ||
790 | printk(KERN_WARNING "a.out ELF interpreter %s is " | ||
791 | "deprecated and will not be supported " | ||
792 | "after Linux 2.6.25\n", elf_interpreter); | ||
793 | warn++; | ||
794 | } | ||
795 | |||
796 | retval = -ELIBBAD; | 714 | retval = -ELIBBAD; |
797 | if (!interpreter_type) | 715 | /* Not an ELF interpreter */ |
716 | if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0) | ||
798 | goto out_free_dentry; | 717 | goto out_free_dentry; |
799 | |||
800 | /* Make sure only one type was selected */ | ||
801 | if ((interpreter_type & INTERPRETER_ELF) && | ||
802 | interpreter_type != INTERPRETER_ELF) { | ||
803 | // FIXME - ratelimit this before re-enabling | ||
804 | // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n"); | ||
805 | interpreter_type = INTERPRETER_ELF; | ||
806 | } | ||
807 | /* Verify the interpreter has a valid arch */ | 718 | /* Verify the interpreter has a valid arch */ |
808 | if ((interpreter_type == INTERPRETER_ELF) && | 719 | if (!elf_check_arch(&loc->interp_elf_ex)) |
809 | !elf_check_arch(&loc->interp_elf_ex)) | ||
810 | goto out_free_dentry; | 720 | goto out_free_dentry; |
811 | } else { | 721 | } else { |
812 | /* Executables without an interpreter also need a personality */ | 722 | /* Executables without an interpreter also need a personality */ |
813 | SET_PERSONALITY(loc->elf_ex, 0); | 723 | SET_PERSONALITY(loc->elf_ex, 0); |
814 | } | 724 | } |
815 | 725 | ||
816 | /* OK, we are done with that, now set up the arg stuff, | ||
817 | and then start this sucker up */ | ||
818 | if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) { | ||
819 | char *passed_p = passed_fileno; | ||
820 | sprintf(passed_fileno, "%d", elf_exec_fileno); | ||
821 | |||
822 | if (elf_interpreter) { | ||
823 | retval = copy_strings_kernel(1, &passed_p, bprm); | ||
824 | if (retval) | ||
825 | goto out_free_dentry; | ||
826 | bprm->argc++; | ||
827 | } | ||
828 | } | ||
829 | |||
830 | /* Flush all traces of the currently running executable */ | 726 | /* Flush all traces of the currently running executable */ |
831 | retval = flush_old_exec(bprm); | 727 | retval = flush_old_exec(bprm); |
832 | if (retval) | 728 | if (retval) |
@@ -1004,24 +900,19 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) | |||
1004 | } | 900 | } |
1005 | 901 | ||
1006 | if (elf_interpreter) { | 902 | if (elf_interpreter) { |
1007 | if (interpreter_type == INTERPRETER_AOUT) { | 903 | unsigned long uninitialized_var(interp_map_addr); |
1008 | elf_entry = load_aout_interp(&loc->interp_ex, | 904 | |
1009 | interpreter); | 905 | elf_entry = load_elf_interp(&loc->interp_elf_ex, |
1010 | } else { | 906 | interpreter, |
1011 | unsigned long uninitialized_var(interp_map_addr); | 907 | &interp_map_addr, |
1012 | 908 | load_bias); | |
1013 | elf_entry = load_elf_interp(&loc->interp_elf_ex, | 909 | if (!IS_ERR((void *)elf_entry)) { |
1014 | interpreter, | 910 | /* |
1015 | &interp_map_addr, | 911 | * load_elf_interp() returns relocation |
1016 | load_bias); | 912 | * adjustment |
1017 | if (!IS_ERR((void *)elf_entry)) { | 913 | */ |
1018 | /* | 914 | interp_load_addr = elf_entry; |
1019 | * load_elf_interp() returns relocation | 915 | elf_entry += loc->interp_elf_ex.e_entry; |
1020 | * adjustment | ||
1021 | */ | ||
1022 | interp_load_addr = elf_entry; | ||
1023 | elf_entry += loc->interp_elf_ex.e_entry; | ||
1024 | } | ||
1025 | } | 916 | } |
1026 | if (BAD_ADDR(elf_entry)) { | 917 | if (BAD_ADDR(elf_entry)) { |
1027 | force_sig(SIGSEGV, current); | 918 | force_sig(SIGSEGV, current); |
@@ -1045,8 +936,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) | |||
1045 | 936 | ||
1046 | kfree(elf_phdata); | 937 | kfree(elf_phdata); |
1047 | 938 | ||
1048 | if (interpreter_type != INTERPRETER_AOUT) | 939 | sys_close(elf_exec_fileno); |
1049 | sys_close(elf_exec_fileno); | ||
1050 | 940 | ||
1051 | set_binfmt(&elf_format); | 941 | set_binfmt(&elf_format); |
1052 | 942 | ||
@@ -1061,15 +951,12 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) | |||
1061 | compute_creds(bprm); | 951 | compute_creds(bprm); |
1062 | current->flags &= ~PF_FORKNOEXEC; | 952 | current->flags &= ~PF_FORKNOEXEC; |
1063 | retval = create_elf_tables(bprm, &loc->elf_ex, | 953 | retval = create_elf_tables(bprm, &loc->elf_ex, |
1064 | (interpreter_type == INTERPRETER_AOUT), | ||
1065 | load_addr, interp_load_addr); | 954 | load_addr, interp_load_addr); |
1066 | if (retval < 0) { | 955 | if (retval < 0) { |
1067 | send_sig(SIGKILL, current, 0); | 956 | send_sig(SIGKILL, current, 0); |
1068 | goto out; | 957 | goto out; |
1069 | } | 958 | } |
1070 | /* N.B. passed_fileno might not be initialized? */ | 959 | /* N.B. passed_fileno might not be initialized? */ |
1071 | if (interpreter_type == INTERPRETER_AOUT) | ||
1072 | current->mm->arg_start += strlen(passed_fileno) + 1; | ||
1073 | current->mm->end_code = end_code; | 960 | current->mm->end_code = end_code; |
1074 | current->mm->start_code = start_code; | 961 | current->mm->start_code = start_code; |
1075 | current->mm->start_data = start_data; | 962 | current->mm->start_data = start_data; |
@@ -1077,7 +964,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) | |||
1077 | current->mm->start_stack = bprm->p; | 964 | current->mm->start_stack = bprm->p; |
1078 | 965 | ||
1079 | #ifdef arch_randomize_brk | 966 | #ifdef arch_randomize_brk |
1080 | if (current->flags & PF_RANDOMIZE) | 967 | if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) |
1081 | current->mm->brk = current->mm->start_brk = | 968 | current->mm->brk = current->mm->start_brk = |
1082 | arch_randomize_brk(current->mm); | 969 | arch_randomize_brk(current->mm); |
1083 | #endif | 970 | #endif |
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index 33764fd6db66..0498b181dd52 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c | |||
@@ -20,7 +20,6 @@ | |||
20 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
21 | #include <linux/mm.h> | 21 | #include <linux/mm.h> |
22 | #include <linux/mman.h> | 22 | #include <linux/mman.h> |
23 | #include <linux/a.out.h> | ||
24 | #include <linux/errno.h> | 23 | #include <linux/errno.h> |
25 | #include <linux/signal.h> | 24 | #include <linux/signal.h> |
26 | #include <linux/string.h> | 25 | #include <linux/string.h> |
@@ -444,12 +443,12 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
444 | 443 | ||
445 | if (strncmp(hdr->magic, "bFLT", 4)) { | 444 | if (strncmp(hdr->magic, "bFLT", 4)) { |
446 | /* | 445 | /* |
446 | * Previously, here was a printk to tell people | ||
447 | * "BINFMT_FLAT: bad header magic". | ||
448 | * But for the kernel which also use ELF FD-PIC format, this | ||
449 | * error message is confusing. | ||
447 | * because a lot of people do not manage to produce good | 450 | * because a lot of people do not manage to produce good |
448 | * flat binaries, we leave this printk to help them realise | ||
449 | * the problem. We only print the error if its not a script file | ||
450 | */ | 451 | */ |
451 | if (strncmp(hdr->magic, "#!", 2)) | ||
452 | printk("BINFMT_FLAT: bad header magic\n"); | ||
453 | ret = -ENOEXEC; | 452 | ret = -ENOEXEC; |
454 | goto err; | 453 | goto err; |
455 | } | 454 | } |
diff --git a/fs/binfmt_som.c b/fs/binfmt_som.c index 9208c41209f9..14c63527c762 100644 --- a/fs/binfmt_som.c +++ b/fs/binfmt_som.c | |||
@@ -29,7 +29,6 @@ | |||
29 | #include <linux/personality.h> | 29 | #include <linux/personality.h> |
30 | #include <linux/init.h> | 30 | #include <linux/init.h> |
31 | 31 | ||
32 | #include <asm/a.out.h> | ||
33 | #include <asm/uaccess.h> | 32 | #include <asm/uaccess.h> |
34 | #include <asm/pgtable.h> | 33 | #include <asm/pgtable.h> |
35 | 34 | ||
diff --git a/fs/block_dev.c b/fs/block_dev.c index e48a630ae266..67fe72ce6ac7 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -534,7 +534,6 @@ void __init bdev_cache_init(void) | |||
534 | if (err) | 534 | if (err) |
535 | panic("Cannot register bdev pseudo-fs"); | 535 | panic("Cannot register bdev pseudo-fs"); |
536 | bd_mnt = kern_mount(&bd_type); | 536 | bd_mnt = kern_mount(&bd_type); |
537 | err = PTR_ERR(bd_mnt); | ||
538 | if (IS_ERR(bd_mnt)) | 537 | if (IS_ERR(bd_mnt)) |
539 | panic("Cannot create bdev pseudo-fs"); | 538 | panic("Cannot create bdev pseudo-fs"); |
540 | blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */ | 539 | blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */ |
@@ -1398,19 +1397,19 @@ struct block_device *lookup_bdev(const char *path) | |||
1398 | if (error) | 1397 | if (error) |
1399 | return ERR_PTR(error); | 1398 | return ERR_PTR(error); |
1400 | 1399 | ||
1401 | inode = nd.dentry->d_inode; | 1400 | inode = nd.path.dentry->d_inode; |
1402 | error = -ENOTBLK; | 1401 | error = -ENOTBLK; |
1403 | if (!S_ISBLK(inode->i_mode)) | 1402 | if (!S_ISBLK(inode->i_mode)) |
1404 | goto fail; | 1403 | goto fail; |
1405 | error = -EACCES; | 1404 | error = -EACCES; |
1406 | if (nd.mnt->mnt_flags & MNT_NODEV) | 1405 | if (nd.path.mnt->mnt_flags & MNT_NODEV) |
1407 | goto fail; | 1406 | goto fail; |
1408 | error = -ENOMEM; | 1407 | error = -ENOMEM; |
1409 | bdev = bd_acquire(inode); | 1408 | bdev = bd_acquire(inode); |
1410 | if (!bdev) | 1409 | if (!bdev) |
1411 | goto fail; | 1410 | goto fail; |
1412 | out: | 1411 | out: |
1413 | path_release(&nd); | 1412 | path_put(&nd.path); |
1414 | return bdev; | 1413 | return bdev; |
1415 | fail: | 1414 | fail: |
1416 | bdev = ERR_PTR(error); | 1415 | bdev = ERR_PTR(error); |
diff --git a/fs/buffer.c b/fs/buffer.c index 826baf4f04bc..3ebccf4aa7e3 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -67,14 +67,14 @@ static int sync_buffer(void *word) | |||
67 | return 0; | 67 | return 0; |
68 | } | 68 | } |
69 | 69 | ||
70 | void fastcall __lock_buffer(struct buffer_head *bh) | 70 | void __lock_buffer(struct buffer_head *bh) |
71 | { | 71 | { |
72 | wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer, | 72 | wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer, |
73 | TASK_UNINTERRUPTIBLE); | 73 | TASK_UNINTERRUPTIBLE); |
74 | } | 74 | } |
75 | EXPORT_SYMBOL(__lock_buffer); | 75 | EXPORT_SYMBOL(__lock_buffer); |
76 | 76 | ||
77 | void fastcall unlock_buffer(struct buffer_head *bh) | 77 | void unlock_buffer(struct buffer_head *bh) |
78 | { | 78 | { |
79 | smp_mb__before_clear_bit(); | 79 | smp_mb__before_clear_bit(); |
80 | clear_buffer_locked(bh); | 80 | clear_buffer_locked(bh); |
@@ -678,7 +678,7 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) | |||
678 | } else { | 678 | } else { |
679 | BUG_ON(mapping->assoc_mapping != buffer_mapping); | 679 | BUG_ON(mapping->assoc_mapping != buffer_mapping); |
680 | } | 680 | } |
681 | if (list_empty(&bh->b_assoc_buffers)) { | 681 | if (!bh->b_assoc_map) { |
682 | spin_lock(&buffer_mapping->private_lock); | 682 | spin_lock(&buffer_mapping->private_lock); |
683 | list_move_tail(&bh->b_assoc_buffers, | 683 | list_move_tail(&bh->b_assoc_buffers, |
684 | &mapping->private_list); | 684 | &mapping->private_list); |
@@ -794,6 +794,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) | |||
794 | { | 794 | { |
795 | struct buffer_head *bh; | 795 | struct buffer_head *bh; |
796 | struct list_head tmp; | 796 | struct list_head tmp; |
797 | struct address_space *mapping; | ||
797 | int err = 0, err2; | 798 | int err = 0, err2; |
798 | 799 | ||
799 | INIT_LIST_HEAD(&tmp); | 800 | INIT_LIST_HEAD(&tmp); |
@@ -801,9 +802,14 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) | |||
801 | spin_lock(lock); | 802 | spin_lock(lock); |
802 | while (!list_empty(list)) { | 803 | while (!list_empty(list)) { |
803 | bh = BH_ENTRY(list->next); | 804 | bh = BH_ENTRY(list->next); |
805 | mapping = bh->b_assoc_map; | ||
804 | __remove_assoc_queue(bh); | 806 | __remove_assoc_queue(bh); |
807 | /* Avoid race with mark_buffer_dirty_inode() which does | ||
808 | * a lockless check and we rely on seeing the dirty bit */ | ||
809 | smp_mb(); | ||
805 | if (buffer_dirty(bh) || buffer_locked(bh)) { | 810 | if (buffer_dirty(bh) || buffer_locked(bh)) { |
806 | list_add(&bh->b_assoc_buffers, &tmp); | 811 | list_add(&bh->b_assoc_buffers, &tmp); |
812 | bh->b_assoc_map = mapping; | ||
807 | if (buffer_dirty(bh)) { | 813 | if (buffer_dirty(bh)) { |
808 | get_bh(bh); | 814 | get_bh(bh); |
809 | spin_unlock(lock); | 815 | spin_unlock(lock); |
@@ -822,8 +828,17 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) | |||
822 | 828 | ||
823 | while (!list_empty(&tmp)) { | 829 | while (!list_empty(&tmp)) { |
824 | bh = BH_ENTRY(tmp.prev); | 830 | bh = BH_ENTRY(tmp.prev); |
825 | list_del_init(&bh->b_assoc_buffers); | ||
826 | get_bh(bh); | 831 | get_bh(bh); |
832 | mapping = bh->b_assoc_map; | ||
833 | __remove_assoc_queue(bh); | ||
834 | /* Avoid race with mark_buffer_dirty_inode() which does | ||
835 | * a lockless check and we rely on seeing the dirty bit */ | ||
836 | smp_mb(); | ||
837 | if (buffer_dirty(bh)) { | ||
838 | list_add(&bh->b_assoc_buffers, | ||
839 | &bh->b_assoc_map->private_list); | ||
840 | bh->b_assoc_map = mapping; | ||
841 | } | ||
827 | spin_unlock(lock); | 842 | spin_unlock(lock); |
828 | wait_on_buffer(bh); | 843 | wait_on_buffer(bh); |
829 | if (!buffer_uptodate(bh)) | 844 | if (!buffer_uptodate(bh)) |
@@ -1164,7 +1179,7 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size) | |||
1164 | * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, | 1179 | * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, |
1165 | * mapping->tree_lock and the global inode_lock. | 1180 | * mapping->tree_lock and the global inode_lock. |
1166 | */ | 1181 | */ |
1167 | void fastcall mark_buffer_dirty(struct buffer_head *bh) | 1182 | void mark_buffer_dirty(struct buffer_head *bh) |
1168 | { | 1183 | { |
1169 | WARN_ON_ONCE(!buffer_uptodate(bh)); | 1184 | WARN_ON_ONCE(!buffer_uptodate(bh)); |
1170 | if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh)) | 1185 | if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh)) |
@@ -1195,7 +1210,7 @@ void __brelse(struct buffer_head * buf) | |||
1195 | void __bforget(struct buffer_head *bh) | 1210 | void __bforget(struct buffer_head *bh) |
1196 | { | 1211 | { |
1197 | clear_buffer_dirty(bh); | 1212 | clear_buffer_dirty(bh); |
1198 | if (!list_empty(&bh->b_assoc_buffers)) { | 1213 | if (bh->b_assoc_map) { |
1199 | struct address_space *buffer_mapping = bh->b_page->mapping; | 1214 | struct address_space *buffer_mapping = bh->b_page->mapping; |
1200 | 1215 | ||
1201 | spin_lock(&buffer_mapping->private_lock); | 1216 | spin_lock(&buffer_mapping->private_lock); |
@@ -1436,6 +1451,7 @@ void invalidate_bh_lrus(void) | |||
1436 | { | 1451 | { |
1437 | on_each_cpu(invalidate_bh_lru, NULL, 1, 1); | 1452 | on_each_cpu(invalidate_bh_lru, NULL, 1, 1); |
1438 | } | 1453 | } |
1454 | EXPORT_SYMBOL_GPL(invalidate_bh_lrus); | ||
1439 | 1455 | ||
1440 | void set_bh_page(struct buffer_head *bh, | 1456 | void set_bh_page(struct buffer_head *bh, |
1441 | struct page *page, unsigned long offset) | 1457 | struct page *page, unsigned long offset) |
@@ -3021,7 +3037,7 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free) | |||
3021 | do { | 3037 | do { |
3022 | struct buffer_head *next = bh->b_this_page; | 3038 | struct buffer_head *next = bh->b_this_page; |
3023 | 3039 | ||
3024 | if (!list_empty(&bh->b_assoc_buffers)) | 3040 | if (bh->b_assoc_map) |
3025 | __remove_assoc_queue(bh); | 3041 | __remove_assoc_queue(bh); |
3026 | bh = next; | 3042 | bh = next; |
3027 | } while (bh != head); | 3043 | } while (bh != head); |
diff --git a/fs/char_dev.c b/fs/char_dev.c index 2c7a8b5b4598..038674aa88a7 100644 --- a/fs/char_dev.c +++ b/fs/char_dev.c | |||
@@ -357,7 +357,7 @@ void cdev_put(struct cdev *p) | |||
357 | /* | 357 | /* |
358 | * Called every time a character special file is opened | 358 | * Called every time a character special file is opened |
359 | */ | 359 | */ |
360 | int chrdev_open(struct inode * inode, struct file * filp) | 360 | static int chrdev_open(struct inode *inode, struct file *filp) |
361 | { | 361 | { |
362 | struct cdev *p; | 362 | struct cdev *p; |
363 | struct cdev *new = NULL; | 363 | struct cdev *new = NULL; |
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index dd3bba4134b5..7f8838253410 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c | |||
@@ -259,18 +259,18 @@ static int add_mount_helper(struct vfsmount *newmnt, struct nameidata *nd, | |||
259 | int err; | 259 | int err; |
260 | 260 | ||
261 | mntget(newmnt); | 261 | mntget(newmnt); |
262 | err = do_add_mount(newmnt, nd, nd->mnt->mnt_flags, mntlist); | 262 | err = do_add_mount(newmnt, nd, nd->path.mnt->mnt_flags, mntlist); |
263 | switch (err) { | 263 | switch (err) { |
264 | case 0: | 264 | case 0: |
265 | dput(nd->dentry); | 265 | dput(nd->path.dentry); |
266 | mntput(nd->mnt); | 266 | mntput(nd->path.mnt); |
267 | nd->mnt = newmnt; | 267 | nd->path.mnt = newmnt; |
268 | nd->dentry = dget(newmnt->mnt_root); | 268 | nd->path.dentry = dget(newmnt->mnt_root); |
269 | break; | 269 | break; |
270 | case -EBUSY: | 270 | case -EBUSY: |
271 | /* someone else made a mount here whilst we were busy */ | 271 | /* someone else made a mount here whilst we were busy */ |
272 | while (d_mountpoint(nd->dentry) && | 272 | while (d_mountpoint(nd->path.dentry) && |
273 | follow_down(&nd->mnt, &nd->dentry)) | 273 | follow_down(&nd->path.mnt, &nd->path.dentry)) |
274 | ; | 274 | ; |
275 | err = 0; | 275 | err = 0; |
276 | default: | 276 | default: |
@@ -307,8 +307,8 @@ cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd) | |||
307 | 307 | ||
308 | xid = GetXid(); | 308 | xid = GetXid(); |
309 | 309 | ||
310 | dput(nd->dentry); | 310 | dput(nd->path.dentry); |
311 | nd->dentry = dget(dentry); | 311 | nd->path.dentry = dget(dentry); |
312 | 312 | ||
313 | cifs_sb = CIFS_SB(dentry->d_inode->i_sb); | 313 | cifs_sb = CIFS_SB(dentry->d_inode->i_sb); |
314 | ses = cifs_sb->tcon->ses; | 314 | ses = cifs_sb->tcon->ses; |
@@ -340,7 +340,8 @@ cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd) | |||
340 | rc = -EINVAL; | 340 | rc = -EINVAL; |
341 | goto out_err; | 341 | goto out_err; |
342 | } | 342 | } |
343 | mnt = cifs_dfs_do_refmount(nd->mnt, nd->dentry, | 343 | mnt = cifs_dfs_do_refmount(nd->path.mnt, |
344 | nd->path.dentry, | ||
344 | referrals[i].node_name); | 345 | referrals[i].node_name); |
345 | cFYI(1, ("%s: cifs_dfs_do_refmount:%s , mnt:%p", | 346 | cFYI(1, ("%s: cifs_dfs_do_refmount:%s , mnt:%p", |
346 | __FUNCTION__, | 347 | __FUNCTION__, |
@@ -357,7 +358,7 @@ cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd) | |||
357 | if (IS_ERR(mnt)) | 358 | if (IS_ERR(mnt)) |
358 | goto out_err; | 359 | goto out_err; |
359 | 360 | ||
360 | nd->mnt->mnt_flags |= MNT_SHRINKABLE; | 361 | nd->path.mnt->mnt_flags |= MNT_SHRINKABLE; |
361 | rc = add_mount_helper(mnt, nd, &cifs_dfs_automount_list); | 362 | rc = add_mount_helper(mnt, nd, &cifs_dfs_automount_list); |
362 | 363 | ||
363 | out: | 364 | out: |
@@ -367,7 +368,7 @@ out: | |||
367 | cFYI(1, ("leaving %s" , __FUNCTION__)); | 368 | cFYI(1, ("leaving %s" , __FUNCTION__)); |
368 | return ERR_PTR(rc); | 369 | return ERR_PTR(rc); |
369 | out_err: | 370 | out_err: |
370 | path_release(nd); | 371 | path_put(&nd->path); |
371 | goto out; | 372 | goto out; |
372 | } | 373 | } |
373 | 374 | ||
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 29bbf655b99c..a04b17e5a9d0 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
@@ -147,10 +147,11 @@ cifs_read_super(struct super_block *sb, void *data, | |||
147 | #endif | 147 | #endif |
148 | sb->s_blocksize = CIFS_MAX_MSGSIZE; | 148 | sb->s_blocksize = CIFS_MAX_MSGSIZE; |
149 | sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */ | 149 | sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */ |
150 | inode = iget(sb, ROOT_I); | 150 | inode = cifs_iget(sb, ROOT_I); |
151 | 151 | ||
152 | if (!inode) { | 152 | if (IS_ERR(inode)) { |
153 | rc = -ENOMEM; | 153 | rc = PTR_ERR(inode); |
154 | inode = NULL; | ||
154 | goto out_no_root; | 155 | goto out_no_root; |
155 | } | 156 | } |
156 | 157 | ||
@@ -517,7 +518,6 @@ static int cifs_remount(struct super_block *sb, int *flags, char *data) | |||
517 | } | 518 | } |
518 | 519 | ||
519 | static const struct super_operations cifs_super_ops = { | 520 | static const struct super_operations cifs_super_ops = { |
520 | .read_inode = cifs_read_inode, | ||
521 | .put_super = cifs_put_super, | 521 | .put_super = cifs_put_super, |
522 | .statfs = cifs_statfs, | 522 | .statfs = cifs_statfs, |
523 | .alloc_inode = cifs_alloc_inode, | 523 | .alloc_inode = cifs_alloc_inode, |
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 195b14de5567..68978306c3ca 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h | |||
@@ -44,6 +44,7 @@ extern void cifs_read_inode(struct inode *); | |||
44 | 44 | ||
45 | /* Functions related to inodes */ | 45 | /* Functions related to inodes */ |
46 | extern const struct inode_operations cifs_dir_inode_ops; | 46 | extern const struct inode_operations cifs_dir_inode_ops; |
47 | extern struct inode *cifs_iget(struct super_block *, unsigned long); | ||
47 | extern int cifs_create(struct inode *, struct dentry *, int, | 48 | extern int cifs_create(struct inode *, struct dentry *, int, |
48 | struct nameidata *); | 49 | struct nameidata *); |
49 | extern struct dentry *cifs_lookup(struct inode *, struct dentry *, | 50 | extern struct dentry *cifs_lookup(struct inode *, struct dentry *, |
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index e7cd392a796a..1d8aa0385ef7 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
@@ -566,10 +566,18 @@ static const struct inode_operations cifs_ipc_inode_ops = { | |||
566 | }; | 566 | }; |
567 | 567 | ||
568 | /* gets root inode */ | 568 | /* gets root inode */ |
569 | void cifs_read_inode(struct inode *inode) | 569 | struct inode *cifs_iget(struct super_block *sb, unsigned long ino) |
570 | { | 570 | { |
571 | int xid, rc; | 571 | int xid; |
572 | struct cifs_sb_info *cifs_sb; | 572 | struct cifs_sb_info *cifs_sb; |
573 | struct inode *inode; | ||
574 | long rc; | ||
575 | |||
576 | inode = iget_locked(sb, ino); | ||
577 | if (!inode) | ||
578 | return ERR_PTR(-ENOMEM); | ||
579 | if (!(inode->i_state & I_NEW)) | ||
580 | return inode; | ||
573 | 581 | ||
574 | cifs_sb = CIFS_SB(inode->i_sb); | 582 | cifs_sb = CIFS_SB(inode->i_sb); |
575 | xid = GetXid(); | 583 | xid = GetXid(); |
@@ -586,10 +594,18 @@ void cifs_read_inode(struct inode *inode) | |||
586 | inode->i_fop = &simple_dir_operations; | 594 | inode->i_fop = &simple_dir_operations; |
587 | inode->i_uid = cifs_sb->mnt_uid; | 595 | inode->i_uid = cifs_sb->mnt_uid; |
588 | inode->i_gid = cifs_sb->mnt_gid; | 596 | inode->i_gid = cifs_sb->mnt_gid; |
597 | _FreeXid(xid); | ||
598 | iget_failed(inode); | ||
599 | return ERR_PTR(rc); | ||
589 | } | 600 | } |
590 | 601 | ||
591 | /* can not call macro FreeXid here since in a void func */ | 602 | unlock_new_inode(inode); |
603 | |||
604 | /* can not call macro FreeXid here since in a void func | ||
605 | * TODO: This is no longer true | ||
606 | */ | ||
592 | _FreeXid(xid); | 607 | _FreeXid(xid); |
608 | return inode; | ||
593 | } | 609 | } |
594 | 610 | ||
595 | int cifs_unlink(struct inode *inode, struct dentry *direntry) | 611 | int cifs_unlink(struct inode *inode, struct dentry *direntry) |
diff --git a/fs/coda/pioctl.c b/fs/coda/pioctl.c index 2bf3026adc80..c21a1f552a63 100644 --- a/fs/coda/pioctl.c +++ b/fs/coda/pioctl.c | |||
@@ -75,12 +75,12 @@ static int coda_pioctl(struct inode * inode, struct file * filp, | |||
75 | if ( error ) { | 75 | if ( error ) { |
76 | return error; | 76 | return error; |
77 | } else { | 77 | } else { |
78 | target_inode = nd.dentry->d_inode; | 78 | target_inode = nd.path.dentry->d_inode; |
79 | } | 79 | } |
80 | 80 | ||
81 | /* return if it is not a Coda inode */ | 81 | /* return if it is not a Coda inode */ |
82 | if ( target_inode->i_sb != inode->i_sb ) { | 82 | if ( target_inode->i_sb != inode->i_sb ) { |
83 | path_release(&nd); | 83 | path_put(&nd.path); |
84 | return -EINVAL; | 84 | return -EINVAL; |
85 | } | 85 | } |
86 | 86 | ||
@@ -89,7 +89,7 @@ static int coda_pioctl(struct inode * inode, struct file * filp, | |||
89 | 89 | ||
90 | error = venus_pioctl(inode->i_sb, &(cnp->c_fid), cmd, &data); | 90 | error = venus_pioctl(inode->i_sb, &(cnp->c_fid), cmd, &data); |
91 | 91 | ||
92 | path_release(&nd); | 92 | path_put(&nd.path); |
93 | return error; | 93 | return error; |
94 | } | 94 | } |
95 | 95 | ||
diff --git a/fs/compat.c b/fs/compat.c index 69baca5ad608..2ce4456aad30 100644 --- a/fs/compat.c +++ b/fs/compat.c | |||
@@ -241,10 +241,10 @@ asmlinkage long compat_sys_statfs(const char __user *path, struct compat_statfs | |||
241 | error = user_path_walk(path, &nd); | 241 | error = user_path_walk(path, &nd); |
242 | if (!error) { | 242 | if (!error) { |
243 | struct kstatfs tmp; | 243 | struct kstatfs tmp; |
244 | error = vfs_statfs(nd.dentry, &tmp); | 244 | error = vfs_statfs(nd.path.dentry, &tmp); |
245 | if (!error) | 245 | if (!error) |
246 | error = put_compat_statfs(buf, &tmp); | 246 | error = put_compat_statfs(buf, &tmp); |
247 | path_release(&nd); | 247 | path_put(&nd.path); |
248 | } | 248 | } |
249 | return error; | 249 | return error; |
250 | } | 250 | } |
@@ -309,10 +309,10 @@ asmlinkage long compat_sys_statfs64(const char __user *path, compat_size_t sz, s | |||
309 | error = user_path_walk(path, &nd); | 309 | error = user_path_walk(path, &nd); |
310 | if (!error) { | 310 | if (!error) { |
311 | struct kstatfs tmp; | 311 | struct kstatfs tmp; |
312 | error = vfs_statfs(nd.dentry, &tmp); | 312 | error = vfs_statfs(nd.path.dentry, &tmp); |
313 | if (!error) | 313 | if (!error) |
314 | error = put_compat_statfs64(buf, &tmp); | 314 | error = put_compat_statfs64(buf, &tmp); |
315 | path_release(&nd); | 315 | path_put(&nd.path); |
316 | } | 316 | } |
317 | return error; | 317 | return error; |
318 | } | 318 | } |
@@ -702,9 +702,6 @@ static int do_nfs4_super_data_conv(void *raw_data) | |||
702 | real->flags = raw->flags; | 702 | real->flags = raw->flags; |
703 | real->version = raw->version; | 703 | real->version = raw->version; |
704 | } | 704 | } |
705 | else { | ||
706 | return -EINVAL; | ||
707 | } | ||
708 | 705 | ||
709 | return 0; | 706 | return 0; |
710 | } | 707 | } |
@@ -2083,51 +2080,6 @@ long asmlinkage compat_sys_nfsservctl(int cmd, void *notused, void *notused2) | |||
2083 | 2080 | ||
2084 | #ifdef CONFIG_EPOLL | 2081 | #ifdef CONFIG_EPOLL |
2085 | 2082 | ||
2086 | #ifdef CONFIG_HAS_COMPAT_EPOLL_EVENT | ||
2087 | asmlinkage long compat_sys_epoll_ctl(int epfd, int op, int fd, | ||
2088 | struct compat_epoll_event __user *event) | ||
2089 | { | ||
2090 | long err = 0; | ||
2091 | struct compat_epoll_event user; | ||
2092 | struct epoll_event __user *kernel = NULL; | ||
2093 | |||
2094 | if (event) { | ||
2095 | if (copy_from_user(&user, event, sizeof(user))) | ||
2096 | return -EFAULT; | ||
2097 | kernel = compat_alloc_user_space(sizeof(struct epoll_event)); | ||
2098 | err |= __put_user(user.events, &kernel->events); | ||
2099 | err |= __put_user(user.data, &kernel->data); | ||
2100 | } | ||
2101 | |||
2102 | return err ? err : sys_epoll_ctl(epfd, op, fd, kernel); | ||
2103 | } | ||
2104 | |||
2105 | |||
2106 | asmlinkage long compat_sys_epoll_wait(int epfd, | ||
2107 | struct compat_epoll_event __user *events, | ||
2108 | int maxevents, int timeout) | ||
2109 | { | ||
2110 | long i, ret, err = 0; | ||
2111 | struct epoll_event __user *kbuf; | ||
2112 | struct epoll_event ev; | ||
2113 | |||
2114 | if ((maxevents <= 0) || | ||
2115 | (maxevents > (INT_MAX / sizeof(struct epoll_event)))) | ||
2116 | return -EINVAL; | ||
2117 | kbuf = compat_alloc_user_space(sizeof(struct epoll_event) * maxevents); | ||
2118 | ret = sys_epoll_wait(epfd, kbuf, maxevents, timeout); | ||
2119 | for (i = 0; i < ret; i++) { | ||
2120 | err |= __get_user(ev.events, &kbuf[i].events); | ||
2121 | err |= __get_user(ev.data, &kbuf[i].data); | ||
2122 | err |= __put_user(ev.events, &events->events); | ||
2123 | err |= __put_user_unaligned(ev.data, &events->data); | ||
2124 | events++; | ||
2125 | } | ||
2126 | |||
2127 | return err ? -EFAULT: ret; | ||
2128 | } | ||
2129 | #endif /* CONFIG_HAS_COMPAT_EPOLL_EVENT */ | ||
2130 | |||
2131 | #ifdef TIF_RESTORE_SIGMASK | 2083 | #ifdef TIF_RESTORE_SIGMASK |
2132 | asmlinkage long compat_sys_epoll_pwait(int epfd, | 2084 | asmlinkage long compat_sys_epoll_pwait(int epfd, |
2133 | struct compat_epoll_event __user *events, | 2085 | struct compat_epoll_event __user *events, |
@@ -2153,11 +2105,7 @@ asmlinkage long compat_sys_epoll_pwait(int epfd, | |||
2153 | sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); | 2105 | sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); |
2154 | } | 2106 | } |
2155 | 2107 | ||
2156 | #ifdef CONFIG_HAS_COMPAT_EPOLL_EVENT | ||
2157 | err = compat_sys_epoll_wait(epfd, events, maxevents, timeout); | ||
2158 | #else | ||
2159 | err = sys_epoll_wait(epfd, events, maxevents, timeout); | 2108 | err = sys_epoll_wait(epfd, events, maxevents, timeout); |
2160 | #endif | ||
2161 | 2109 | ||
2162 | /* | 2110 | /* |
2163 | * If we changed the signal mask, we need to restore the original one. | 2111 | * If we changed the signal mask, we need to restore the original one. |
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index ffdc022cae64..c6e72aebd16b 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c | |||
@@ -78,7 +78,6 @@ | |||
78 | #include <linux/mii.h> | 78 | #include <linux/mii.h> |
79 | #include <linux/if_bonding.h> | 79 | #include <linux/if_bonding.h> |
80 | #include <linux/watchdog.h> | 80 | #include <linux/watchdog.h> |
81 | #include <linux/dm-ioctl.h> | ||
82 | 81 | ||
83 | #include <linux/soundcard.h> | 82 | #include <linux/soundcard.h> |
84 | #include <linux/lp.h> | 83 | #include <linux/lp.h> |
@@ -1993,39 +1992,6 @@ COMPATIBLE_IOCTL(STOP_ARRAY_RO) | |||
1993 | COMPATIBLE_IOCTL(RESTART_ARRAY_RW) | 1992 | COMPATIBLE_IOCTL(RESTART_ARRAY_RW) |
1994 | COMPATIBLE_IOCTL(GET_BITMAP_FILE) | 1993 | COMPATIBLE_IOCTL(GET_BITMAP_FILE) |
1995 | ULONG_IOCTL(SET_BITMAP_FILE) | 1994 | ULONG_IOCTL(SET_BITMAP_FILE) |
1996 | /* DM */ | ||
1997 | COMPATIBLE_IOCTL(DM_VERSION_32) | ||
1998 | COMPATIBLE_IOCTL(DM_REMOVE_ALL_32) | ||
1999 | COMPATIBLE_IOCTL(DM_LIST_DEVICES_32) | ||
2000 | COMPATIBLE_IOCTL(DM_DEV_CREATE_32) | ||
2001 | COMPATIBLE_IOCTL(DM_DEV_REMOVE_32) | ||
2002 | COMPATIBLE_IOCTL(DM_DEV_RENAME_32) | ||
2003 | COMPATIBLE_IOCTL(DM_DEV_SUSPEND_32) | ||
2004 | COMPATIBLE_IOCTL(DM_DEV_STATUS_32) | ||
2005 | COMPATIBLE_IOCTL(DM_DEV_WAIT_32) | ||
2006 | COMPATIBLE_IOCTL(DM_TABLE_LOAD_32) | ||
2007 | COMPATIBLE_IOCTL(DM_TABLE_CLEAR_32) | ||
2008 | COMPATIBLE_IOCTL(DM_TABLE_DEPS_32) | ||
2009 | COMPATIBLE_IOCTL(DM_TABLE_STATUS_32) | ||
2010 | COMPATIBLE_IOCTL(DM_LIST_VERSIONS_32) | ||
2011 | COMPATIBLE_IOCTL(DM_TARGET_MSG_32) | ||
2012 | COMPATIBLE_IOCTL(DM_DEV_SET_GEOMETRY_32) | ||
2013 | COMPATIBLE_IOCTL(DM_VERSION) | ||
2014 | COMPATIBLE_IOCTL(DM_REMOVE_ALL) | ||
2015 | COMPATIBLE_IOCTL(DM_LIST_DEVICES) | ||
2016 | COMPATIBLE_IOCTL(DM_DEV_CREATE) | ||
2017 | COMPATIBLE_IOCTL(DM_DEV_REMOVE) | ||
2018 | COMPATIBLE_IOCTL(DM_DEV_RENAME) | ||
2019 | COMPATIBLE_IOCTL(DM_DEV_SUSPEND) | ||
2020 | COMPATIBLE_IOCTL(DM_DEV_STATUS) | ||
2021 | COMPATIBLE_IOCTL(DM_DEV_WAIT) | ||
2022 | COMPATIBLE_IOCTL(DM_TABLE_LOAD) | ||
2023 | COMPATIBLE_IOCTL(DM_TABLE_CLEAR) | ||
2024 | COMPATIBLE_IOCTL(DM_TABLE_DEPS) | ||
2025 | COMPATIBLE_IOCTL(DM_TABLE_STATUS) | ||
2026 | COMPATIBLE_IOCTL(DM_LIST_VERSIONS) | ||
2027 | COMPATIBLE_IOCTL(DM_TARGET_MSG) | ||
2028 | COMPATIBLE_IOCTL(DM_DEV_SET_GEOMETRY) | ||
2029 | /* Big K */ | 1995 | /* Big K */ |
2030 | COMPATIBLE_IOCTL(PIO_FONT) | 1996 | COMPATIBLE_IOCTL(PIO_FONT) |
2031 | COMPATIBLE_IOCTL(GIO_FONT) | 1997 | COMPATIBLE_IOCTL(GIO_FONT) |
@@ -2887,7 +2853,7 @@ static void compat_ioctl_error(struct file *filp, unsigned int fd, | |||
2887 | /* find the name of the device. */ | 2853 | /* find the name of the device. */ |
2888 | path = (char *)__get_free_page(GFP_KERNEL); | 2854 | path = (char *)__get_free_page(GFP_KERNEL); |
2889 | if (path) { | 2855 | if (path) { |
2890 | fn = d_path(filp->f_path.dentry, filp->f_path.mnt, path, PAGE_SIZE); | 2856 | fn = d_path(&filp->f_path, path, PAGE_SIZE); |
2891 | if (IS_ERR(fn)) | 2857 | if (IS_ERR(fn)) |
2892 | fn = "?"; | 2858 | fn = "?"; |
2893 | } | 2859 | } |
@@ -2986,7 +2952,7 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd, | |||
2986 | } | 2952 | } |
2987 | 2953 | ||
2988 | do_ioctl: | 2954 | do_ioctl: |
2989 | error = vfs_ioctl(filp, fd, cmd, arg); | 2955 | error = do_vfs_ioctl(filp, fd, cmd, arg); |
2990 | out_fput: | 2956 | out_fput: |
2991 | fput_light(filp, fput_needed); | 2957 | fput_light(filp, fput_needed); |
2992 | out: | 2958 | out: |
diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c index 22700d2857da..78929ea84ff2 100644 --- a/fs/configfs/symlink.c +++ b/fs/configfs/symlink.c | |||
@@ -99,11 +99,11 @@ static int get_target(const char *symname, struct nameidata *nd, | |||
99 | 99 | ||
100 | ret = path_lookup(symname, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, nd); | 100 | ret = path_lookup(symname, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, nd); |
101 | if (!ret) { | 101 | if (!ret) { |
102 | if (nd->dentry->d_sb == configfs_sb) { | 102 | if (nd->path.dentry->d_sb == configfs_sb) { |
103 | *target = configfs_get_config_item(nd->dentry); | 103 | *target = configfs_get_config_item(nd->path.dentry); |
104 | if (!*target) { | 104 | if (!*target) { |
105 | ret = -ENOENT; | 105 | ret = -ENOENT; |
106 | path_release(nd); | 106 | path_put(&nd->path); |
107 | } | 107 | } |
108 | } else | 108 | } else |
109 | ret = -EPERM; | 109 | ret = -EPERM; |
@@ -141,7 +141,7 @@ int configfs_symlink(struct inode *dir, struct dentry *dentry, const char *symna | |||
141 | ret = create_link(parent_item, target_item, dentry); | 141 | ret = create_link(parent_item, target_item, dentry); |
142 | 142 | ||
143 | config_item_put(target_item); | 143 | config_item_put(target_item); |
144 | path_release(&nd); | 144 | path_put(&nd.path); |
145 | 145 | ||
146 | out_put: | 146 | out_put: |
147 | config_item_put(parent_item); | 147 | config_item_put(parent_item); |
diff --git a/fs/dcache.c b/fs/dcache.c index d9ca1e5ceb92..43455776711e 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -89,12 +89,20 @@ static void d_free(struct dentry *dentry) | |||
89 | if (dentry->d_op && dentry->d_op->d_release) | 89 | if (dentry->d_op && dentry->d_op->d_release) |
90 | dentry->d_op->d_release(dentry); | 90 | dentry->d_op->d_release(dentry); |
91 | /* if dentry was never inserted into hash, immediate free is OK */ | 91 | /* if dentry was never inserted into hash, immediate free is OK */ |
92 | if (dentry->d_hash.pprev == NULL) | 92 | if (hlist_unhashed(&dentry->d_hash)) |
93 | __d_free(dentry); | 93 | __d_free(dentry); |
94 | else | 94 | else |
95 | call_rcu(&dentry->d_u.d_rcu, d_callback); | 95 | call_rcu(&dentry->d_u.d_rcu, d_callback); |
96 | } | 96 | } |
97 | 97 | ||
98 | static void dentry_lru_remove(struct dentry *dentry) | ||
99 | { | ||
100 | if (!list_empty(&dentry->d_lru)) { | ||
101 | list_del_init(&dentry->d_lru); | ||
102 | dentry_stat.nr_unused--; | ||
103 | } | ||
104 | } | ||
105 | |||
98 | /* | 106 | /* |
99 | * Release the dentry's inode, using the filesystem | 107 | * Release the dentry's inode, using the filesystem |
100 | * d_iput() operation if defined. | 108 | * d_iput() operation if defined. |
@@ -211,13 +219,7 @@ repeat: | |||
211 | unhash_it: | 219 | unhash_it: |
212 | __d_drop(dentry); | 220 | __d_drop(dentry); |
213 | kill_it: | 221 | kill_it: |
214 | /* If dentry was on d_lru list | 222 | dentry_lru_remove(dentry); |
215 | * delete it from there | ||
216 | */ | ||
217 | if (!list_empty(&dentry->d_lru)) { | ||
218 | list_del(&dentry->d_lru); | ||
219 | dentry_stat.nr_unused--; | ||
220 | } | ||
221 | dentry = d_kill(dentry); | 223 | dentry = d_kill(dentry); |
222 | if (dentry) | 224 | if (dentry) |
223 | goto repeat; | 225 | goto repeat; |
@@ -285,10 +287,7 @@ int d_invalidate(struct dentry * dentry) | |||
285 | static inline struct dentry * __dget_locked(struct dentry *dentry) | 287 | static inline struct dentry * __dget_locked(struct dentry *dentry) |
286 | { | 288 | { |
287 | atomic_inc(&dentry->d_count); | 289 | atomic_inc(&dentry->d_count); |
288 | if (!list_empty(&dentry->d_lru)) { | 290 | dentry_lru_remove(dentry); |
289 | dentry_stat.nr_unused--; | ||
290 | list_del_init(&dentry->d_lru); | ||
291 | } | ||
292 | return dentry; | 291 | return dentry; |
293 | } | 292 | } |
294 | 293 | ||
@@ -404,10 +403,7 @@ static void prune_one_dentry(struct dentry * dentry) | |||
404 | 403 | ||
405 | if (dentry->d_op && dentry->d_op->d_delete) | 404 | if (dentry->d_op && dentry->d_op->d_delete) |
406 | dentry->d_op->d_delete(dentry); | 405 | dentry->d_op->d_delete(dentry); |
407 | if (!list_empty(&dentry->d_lru)) { | 406 | dentry_lru_remove(dentry); |
408 | list_del(&dentry->d_lru); | ||
409 | dentry_stat.nr_unused--; | ||
410 | } | ||
411 | __d_drop(dentry); | 407 | __d_drop(dentry); |
412 | dentry = d_kill(dentry); | 408 | dentry = d_kill(dentry); |
413 | spin_lock(&dcache_lock); | 409 | spin_lock(&dcache_lock); |
@@ -596,10 +592,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) | |||
596 | 592 | ||
597 | /* detach this root from the system */ | 593 | /* detach this root from the system */ |
598 | spin_lock(&dcache_lock); | 594 | spin_lock(&dcache_lock); |
599 | if (!list_empty(&dentry->d_lru)) { | 595 | dentry_lru_remove(dentry); |
600 | dentry_stat.nr_unused--; | ||
601 | list_del_init(&dentry->d_lru); | ||
602 | } | ||
603 | __d_drop(dentry); | 596 | __d_drop(dentry); |
604 | spin_unlock(&dcache_lock); | 597 | spin_unlock(&dcache_lock); |
605 | 598 | ||
@@ -613,11 +606,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) | |||
613 | spin_lock(&dcache_lock); | 606 | spin_lock(&dcache_lock); |
614 | list_for_each_entry(loop, &dentry->d_subdirs, | 607 | list_for_each_entry(loop, &dentry->d_subdirs, |
615 | d_u.d_child) { | 608 | d_u.d_child) { |
616 | if (!list_empty(&loop->d_lru)) { | 609 | dentry_lru_remove(loop); |
617 | dentry_stat.nr_unused--; | ||
618 | list_del_init(&loop->d_lru); | ||
619 | } | ||
620 | |||
621 | __d_drop(loop); | 610 | __d_drop(loop); |
622 | cond_resched_lock(&dcache_lock); | 611 | cond_resched_lock(&dcache_lock); |
623 | } | 612 | } |
@@ -799,10 +788,7 @@ resume: | |||
799 | struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); | 788 | struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); |
800 | next = tmp->next; | 789 | next = tmp->next; |
801 | 790 | ||
802 | if (!list_empty(&dentry->d_lru)) { | 791 | dentry_lru_remove(dentry); |
803 | dentry_stat.nr_unused--; | ||
804 | list_del_init(&dentry->d_lru); | ||
805 | } | ||
806 | /* | 792 | /* |
807 | * move only zero ref count dentries to the end | 793 | * move only zero ref count dentries to the end |
808 | * of the unused list for prune_dcache | 794 | * of the unused list for prune_dcache |
@@ -1408,9 +1394,6 @@ void d_delete(struct dentry * dentry) | |||
1408 | if (atomic_read(&dentry->d_count) == 1) { | 1394 | if (atomic_read(&dentry->d_count) == 1) { |
1409 | dentry_iput(dentry); | 1395 | dentry_iput(dentry); |
1410 | fsnotify_nameremove(dentry, isdir); | 1396 | fsnotify_nameremove(dentry, isdir); |
1411 | |||
1412 | /* remove this and other inotify debug checks after 2.6.18 */ | ||
1413 | dentry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED; | ||
1414 | return; | 1397 | return; |
1415 | } | 1398 | } |
1416 | 1399 | ||
@@ -1779,9 +1762,8 @@ shouldnt_be_hashed: | |||
1779 | * | 1762 | * |
1780 | * "buflen" should be positive. Caller holds the dcache_lock. | 1763 | * "buflen" should be positive. Caller holds the dcache_lock. |
1781 | */ | 1764 | */ |
1782 | static char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt, | 1765 | static char *__d_path(struct dentry *dentry, struct vfsmount *vfsmnt, |
1783 | struct dentry *root, struct vfsmount *rootmnt, | 1766 | struct path *root, char *buffer, int buflen) |
1784 | char *buffer, int buflen) | ||
1785 | { | 1767 | { |
1786 | char * end = buffer+buflen; | 1768 | char * end = buffer+buflen; |
1787 | char * retval; | 1769 | char * retval; |
@@ -1806,7 +1788,7 @@ static char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt, | |||
1806 | for (;;) { | 1788 | for (;;) { |
1807 | struct dentry * parent; | 1789 | struct dentry * parent; |
1808 | 1790 | ||
1809 | if (dentry == root && vfsmnt == rootmnt) | 1791 | if (dentry == root->dentry && vfsmnt == root->mnt) |
1810 | break; | 1792 | break; |
1811 | if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { | 1793 | if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { |
1812 | /* Global root? */ | 1794 | /* Global root? */ |
@@ -1847,13 +1829,23 @@ Elong: | |||
1847 | return ERR_PTR(-ENAMETOOLONG); | 1829 | return ERR_PTR(-ENAMETOOLONG); |
1848 | } | 1830 | } |
1849 | 1831 | ||
1850 | /* write full pathname into buffer and return start of pathname */ | 1832 | /** |
1851 | char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt, | 1833 | * d_path - return the path of a dentry |
1852 | char *buf, int buflen) | 1834 | * @path: path to report |
1835 | * @buf: buffer to return value in | ||
1836 | * @buflen: buffer length | ||
1837 | * | ||
1838 | * Convert a dentry into an ASCII path name. If the entry has been deleted | ||
1839 | * the string " (deleted)" is appended. Note that this is ambiguous. | ||
1840 | * | ||
1841 | * Returns the buffer or an error code if the path was too long. | ||
1842 | * | ||
1843 | * "buflen" should be positive. Caller holds the dcache_lock. | ||
1844 | */ | ||
1845 | char *d_path(struct path *path, char *buf, int buflen) | ||
1853 | { | 1846 | { |
1854 | char *res; | 1847 | char *res; |
1855 | struct vfsmount *rootmnt; | 1848 | struct path root; |
1856 | struct dentry *root; | ||
1857 | 1849 | ||
1858 | /* | 1850 | /* |
1859 | * We have various synthetic filesystems that never get mounted. On | 1851 | * We have various synthetic filesystems that never get mounted. On |
@@ -1862,18 +1854,17 @@ char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt, | |||
1862 | * user wants to identify the object in /proc/pid/fd/. The little hack | 1854 | * user wants to identify the object in /proc/pid/fd/. The little hack |
1863 | * below allows us to generate a name for these objects on demand: | 1855 | * below allows us to generate a name for these objects on demand: |
1864 | */ | 1856 | */ |
1865 | if (dentry->d_op && dentry->d_op->d_dname) | 1857 | if (path->dentry->d_op && path->dentry->d_op->d_dname) |
1866 | return dentry->d_op->d_dname(dentry, buf, buflen); | 1858 | return path->dentry->d_op->d_dname(path->dentry, buf, buflen); |
1867 | 1859 | ||
1868 | read_lock(¤t->fs->lock); | 1860 | read_lock(¤t->fs->lock); |
1869 | rootmnt = mntget(current->fs->rootmnt); | 1861 | root = current->fs->root; |
1870 | root = dget(current->fs->root); | 1862 | path_get(¤t->fs->root); |
1871 | read_unlock(¤t->fs->lock); | 1863 | read_unlock(¤t->fs->lock); |
1872 | spin_lock(&dcache_lock); | 1864 | spin_lock(&dcache_lock); |
1873 | res = __d_path(dentry, vfsmnt, root, rootmnt, buf, buflen); | 1865 | res = __d_path(path->dentry, path->mnt, &root, buf, buflen); |
1874 | spin_unlock(&dcache_lock); | 1866 | spin_unlock(&dcache_lock); |
1875 | dput(root); | 1867 | path_put(&root); |
1876 | mntput(rootmnt); | ||
1877 | return res; | 1868 | return res; |
1878 | } | 1869 | } |
1879 | 1870 | ||
@@ -1919,28 +1910,27 @@ char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen, | |||
1919 | asmlinkage long sys_getcwd(char __user *buf, unsigned long size) | 1910 | asmlinkage long sys_getcwd(char __user *buf, unsigned long size) |
1920 | { | 1911 | { |
1921 | int error; | 1912 | int error; |
1922 | struct vfsmount *pwdmnt, *rootmnt; | 1913 | struct path pwd, root; |
1923 | struct dentry *pwd, *root; | ||
1924 | char *page = (char *) __get_free_page(GFP_USER); | 1914 | char *page = (char *) __get_free_page(GFP_USER); |
1925 | 1915 | ||
1926 | if (!page) | 1916 | if (!page) |
1927 | return -ENOMEM; | 1917 | return -ENOMEM; |
1928 | 1918 | ||
1929 | read_lock(¤t->fs->lock); | 1919 | read_lock(¤t->fs->lock); |
1930 | pwdmnt = mntget(current->fs->pwdmnt); | 1920 | pwd = current->fs->pwd; |
1931 | pwd = dget(current->fs->pwd); | 1921 | path_get(¤t->fs->pwd); |
1932 | rootmnt = mntget(current->fs->rootmnt); | 1922 | root = current->fs->root; |
1933 | root = dget(current->fs->root); | 1923 | path_get(¤t->fs->root); |
1934 | read_unlock(¤t->fs->lock); | 1924 | read_unlock(¤t->fs->lock); |
1935 | 1925 | ||
1936 | error = -ENOENT; | 1926 | error = -ENOENT; |
1937 | /* Has the current directory has been unlinked? */ | 1927 | /* Has the current directory has been unlinked? */ |
1938 | spin_lock(&dcache_lock); | 1928 | spin_lock(&dcache_lock); |
1939 | if (pwd->d_parent == pwd || !d_unhashed(pwd)) { | 1929 | if (pwd.dentry->d_parent == pwd.dentry || !d_unhashed(pwd.dentry)) { |
1940 | unsigned long len; | 1930 | unsigned long len; |
1941 | char * cwd; | 1931 | char * cwd; |
1942 | 1932 | ||
1943 | cwd = __d_path(pwd, pwdmnt, root, rootmnt, page, PAGE_SIZE); | 1933 | cwd = __d_path(pwd.dentry, pwd.mnt, &root, page, PAGE_SIZE); |
1944 | spin_unlock(&dcache_lock); | 1934 | spin_unlock(&dcache_lock); |
1945 | 1935 | ||
1946 | error = PTR_ERR(cwd); | 1936 | error = PTR_ERR(cwd); |
@@ -1958,10 +1948,8 @@ asmlinkage long sys_getcwd(char __user *buf, unsigned long size) | |||
1958 | spin_unlock(&dcache_lock); | 1948 | spin_unlock(&dcache_lock); |
1959 | 1949 | ||
1960 | out: | 1950 | out: |
1961 | dput(pwd); | 1951 | path_put(&pwd); |
1962 | mntput(pwdmnt); | 1952 | path_put(&root); |
1963 | dput(root); | ||
1964 | mntput(rootmnt); | ||
1965 | free_page((unsigned long) page); | 1953 | free_page((unsigned long) page); |
1966 | return error; | 1954 | return error; |
1967 | } | 1955 | } |
diff --git a/fs/dcookies.c b/fs/dcookies.c index 792cbf55fa95..855d4b1d619a 100644 --- a/fs/dcookies.c +++ b/fs/dcookies.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/errno.h> | 24 | #include <linux/errno.h> |
25 | #include <linux/dcookies.h> | 25 | #include <linux/dcookies.h> |
26 | #include <linux/mutex.h> | 26 | #include <linux/mutex.h> |
27 | #include <linux/path.h> | ||
27 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
28 | 29 | ||
29 | /* The dcookies are allocated from a kmem_cache and | 30 | /* The dcookies are allocated from a kmem_cache and |
@@ -31,8 +32,7 @@ | |||
31 | * code here is particularly performance critical | 32 | * code here is particularly performance critical |
32 | */ | 33 | */ |
33 | struct dcookie_struct { | 34 | struct dcookie_struct { |
34 | struct dentry * dentry; | 35 | struct path path; |
35 | struct vfsmount * vfsmnt; | ||
36 | struct list_head hash_list; | 36 | struct list_head hash_list; |
37 | }; | 37 | }; |
38 | 38 | ||
@@ -51,7 +51,7 @@ static inline int is_live(void) | |||
51 | /* The dentry is locked, its address will do for the cookie */ | 51 | /* The dentry is locked, its address will do for the cookie */ |
52 | static inline unsigned long dcookie_value(struct dcookie_struct * dcs) | 52 | static inline unsigned long dcookie_value(struct dcookie_struct * dcs) |
53 | { | 53 | { |
54 | return (unsigned long)dcs->dentry; | 54 | return (unsigned long)dcs->path.dentry; |
55 | } | 55 | } |
56 | 56 | ||
57 | 57 | ||
@@ -89,19 +89,17 @@ static void hash_dcookie(struct dcookie_struct * dcs) | |||
89 | } | 89 | } |
90 | 90 | ||
91 | 91 | ||
92 | static struct dcookie_struct * alloc_dcookie(struct dentry * dentry, | 92 | static struct dcookie_struct *alloc_dcookie(struct path *path) |
93 | struct vfsmount * vfsmnt) | ||
94 | { | 93 | { |
95 | struct dcookie_struct * dcs = kmem_cache_alloc(dcookie_cache, GFP_KERNEL); | 94 | struct dcookie_struct *dcs = kmem_cache_alloc(dcookie_cache, |
95 | GFP_KERNEL); | ||
96 | if (!dcs) | 96 | if (!dcs) |
97 | return NULL; | 97 | return NULL; |
98 | 98 | ||
99 | dentry->d_cookie = dcs; | 99 | path->dentry->d_cookie = dcs; |
100 | 100 | dcs->path = *path; | |
101 | dcs->dentry = dget(dentry); | 101 | path_get(path); |
102 | dcs->vfsmnt = mntget(vfsmnt); | ||
103 | hash_dcookie(dcs); | 102 | hash_dcookie(dcs); |
104 | |||
105 | return dcs; | 103 | return dcs; |
106 | } | 104 | } |
107 | 105 | ||
@@ -109,8 +107,7 @@ static struct dcookie_struct * alloc_dcookie(struct dentry * dentry, | |||
109 | /* This is the main kernel-side routine that retrieves the cookie | 107 | /* This is the main kernel-side routine that retrieves the cookie |
110 | * value for a dentry/vfsmnt pair. | 108 | * value for a dentry/vfsmnt pair. |
111 | */ | 109 | */ |
112 | int get_dcookie(struct dentry * dentry, struct vfsmount * vfsmnt, | 110 | int get_dcookie(struct path *path, unsigned long *cookie) |
113 | unsigned long * cookie) | ||
114 | { | 111 | { |
115 | int err = 0; | 112 | int err = 0; |
116 | struct dcookie_struct * dcs; | 113 | struct dcookie_struct * dcs; |
@@ -122,10 +119,10 @@ int get_dcookie(struct dentry * dentry, struct vfsmount * vfsmnt, | |||
122 | goto out; | 119 | goto out; |
123 | } | 120 | } |
124 | 121 | ||
125 | dcs = dentry->d_cookie; | 122 | dcs = path->dentry->d_cookie; |
126 | 123 | ||
127 | if (!dcs) | 124 | if (!dcs) |
128 | dcs = alloc_dcookie(dentry, vfsmnt); | 125 | dcs = alloc_dcookie(path); |
129 | 126 | ||
130 | if (!dcs) { | 127 | if (!dcs) { |
131 | err = -ENOMEM; | 128 | err = -ENOMEM; |
@@ -174,7 +171,7 @@ asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user * buf, size_t len) | |||
174 | goto out; | 171 | goto out; |
175 | 172 | ||
176 | /* FIXME: (deleted) ? */ | 173 | /* FIXME: (deleted) ? */ |
177 | path = d_path(dcs->dentry, dcs->vfsmnt, kbuf, PAGE_SIZE); | 174 | path = d_path(&dcs->path, kbuf, PAGE_SIZE); |
178 | 175 | ||
179 | if (IS_ERR(path)) { | 176 | if (IS_ERR(path)) { |
180 | err = PTR_ERR(path); | 177 | err = PTR_ERR(path); |
@@ -254,9 +251,8 @@ out_kmem: | |||
254 | 251 | ||
255 | static void free_dcookie(struct dcookie_struct * dcs) | 252 | static void free_dcookie(struct dcookie_struct * dcs) |
256 | { | 253 | { |
257 | dcs->dentry->d_cookie = NULL; | 254 | dcs->path.dentry->d_cookie = NULL; |
258 | dput(dcs->dentry); | 255 | path_put(&dcs->path); |
259 | mntput(dcs->vfsmnt); | ||
260 | kmem_cache_free(dcookie_cache, dcs); | 256 | kmem_cache_free(dcookie_cache, dcs); |
261 | } | 257 | } |
262 | 258 | ||
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index fa6b7f7ff914..fddffe4851f5 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c | |||
@@ -56,13 +56,15 @@ const struct inode_operations debugfs_link_operations = { | |||
56 | .follow_link = debugfs_follow_link, | 56 | .follow_link = debugfs_follow_link, |
57 | }; | 57 | }; |
58 | 58 | ||
59 | static void debugfs_u8_set(void *data, u64 val) | 59 | static int debugfs_u8_set(void *data, u64 val) |
60 | { | 60 | { |
61 | *(u8 *)data = val; | 61 | *(u8 *)data = val; |
62 | return 0; | ||
62 | } | 63 | } |
63 | static u64 debugfs_u8_get(void *data) | 64 | static int debugfs_u8_get(void *data, u64 *val) |
64 | { | 65 | { |
65 | return *(u8 *)data; | 66 | *val = *(u8 *)data; |
67 | return 0; | ||
66 | } | 68 | } |
67 | DEFINE_SIMPLE_ATTRIBUTE(fops_u8, debugfs_u8_get, debugfs_u8_set, "%llu\n"); | 69 | DEFINE_SIMPLE_ATTRIBUTE(fops_u8, debugfs_u8_get, debugfs_u8_set, "%llu\n"); |
68 | 70 | ||
@@ -97,13 +99,15 @@ struct dentry *debugfs_create_u8(const char *name, mode_t mode, | |||
97 | } | 99 | } |
98 | EXPORT_SYMBOL_GPL(debugfs_create_u8); | 100 | EXPORT_SYMBOL_GPL(debugfs_create_u8); |
99 | 101 | ||
100 | static void debugfs_u16_set(void *data, u64 val) | 102 | static int debugfs_u16_set(void *data, u64 val) |
101 | { | 103 | { |
102 | *(u16 *)data = val; | 104 | *(u16 *)data = val; |
105 | return 0; | ||
103 | } | 106 | } |
104 | static u64 debugfs_u16_get(void *data) | 107 | static int debugfs_u16_get(void *data, u64 *val) |
105 | { | 108 | { |
106 | return *(u16 *)data; | 109 | *val = *(u16 *)data; |
110 | return 0; | ||
107 | } | 111 | } |
108 | DEFINE_SIMPLE_ATTRIBUTE(fops_u16, debugfs_u16_get, debugfs_u16_set, "%llu\n"); | 112 | DEFINE_SIMPLE_ATTRIBUTE(fops_u16, debugfs_u16_get, debugfs_u16_set, "%llu\n"); |
109 | 113 | ||
@@ -138,13 +142,15 @@ struct dentry *debugfs_create_u16(const char *name, mode_t mode, | |||
138 | } | 142 | } |
139 | EXPORT_SYMBOL_GPL(debugfs_create_u16); | 143 | EXPORT_SYMBOL_GPL(debugfs_create_u16); |
140 | 144 | ||
141 | static void debugfs_u32_set(void *data, u64 val) | 145 | static int debugfs_u32_set(void *data, u64 val) |
142 | { | 146 | { |
143 | *(u32 *)data = val; | 147 | *(u32 *)data = val; |
148 | return 0; | ||
144 | } | 149 | } |
145 | static u64 debugfs_u32_get(void *data) | 150 | static int debugfs_u32_get(void *data, u64 *val) |
146 | { | 151 | { |
147 | return *(u32 *)data; | 152 | *val = *(u32 *)data; |
153 | return 0; | ||
148 | } | 154 | } |
149 | DEFINE_SIMPLE_ATTRIBUTE(fops_u32, debugfs_u32_get, debugfs_u32_set, "%llu\n"); | 155 | DEFINE_SIMPLE_ATTRIBUTE(fops_u32, debugfs_u32_get, debugfs_u32_set, "%llu\n"); |
150 | 156 | ||
@@ -179,14 +185,16 @@ struct dentry *debugfs_create_u32(const char *name, mode_t mode, | |||
179 | } | 185 | } |
180 | EXPORT_SYMBOL_GPL(debugfs_create_u32); | 186 | EXPORT_SYMBOL_GPL(debugfs_create_u32); |
181 | 187 | ||
182 | static void debugfs_u64_set(void *data, u64 val) | 188 | static int debugfs_u64_set(void *data, u64 val) |
183 | { | 189 | { |
184 | *(u64 *)data = val; | 190 | *(u64 *)data = val; |
191 | return 0; | ||
185 | } | 192 | } |
186 | 193 | ||
187 | static u64 debugfs_u64_get(void *data) | 194 | static int debugfs_u64_get(void *data, u64 *val) |
188 | { | 195 | { |
189 | return *(u64 *)data; | 196 | *val = *(u64 *)data; |
197 | return 0; | ||
190 | } | 198 | } |
191 | DEFINE_SIMPLE_ATTRIBUTE(fops_u64, debugfs_u64_get, debugfs_u64_set, "%llu\n"); | 199 | DEFINE_SIMPLE_ATTRIBUTE(fops_u64, debugfs_u64_get, debugfs_u64_set, "%llu\n"); |
192 | 200 | ||
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 06ef9a255c76..f120e1207874 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c | |||
@@ -20,9 +20,12 @@ | |||
20 | #include <linux/devpts_fs.h> | 20 | #include <linux/devpts_fs.h> |
21 | #include <linux/parser.h> | 21 | #include <linux/parser.h> |
22 | #include <linux/fsnotify.h> | 22 | #include <linux/fsnotify.h> |
23 | #include <linux/seq_file.h> | ||
23 | 24 | ||
24 | #define DEVPTS_SUPER_MAGIC 0x1cd1 | 25 | #define DEVPTS_SUPER_MAGIC 0x1cd1 |
25 | 26 | ||
27 | #define DEVPTS_DEFAULT_MODE 0600 | ||
28 | |||
26 | static struct vfsmount *devpts_mnt; | 29 | static struct vfsmount *devpts_mnt; |
27 | static struct dentry *devpts_root; | 30 | static struct dentry *devpts_root; |
28 | 31 | ||
@@ -32,7 +35,7 @@ static struct { | |||
32 | uid_t uid; | 35 | uid_t uid; |
33 | gid_t gid; | 36 | gid_t gid; |
34 | umode_t mode; | 37 | umode_t mode; |
35 | } config = {.mode = 0600}; | 38 | } config = {.mode = DEVPTS_DEFAULT_MODE}; |
36 | 39 | ||
37 | enum { | 40 | enum { |
38 | Opt_uid, Opt_gid, Opt_mode, | 41 | Opt_uid, Opt_gid, Opt_mode, |
@@ -54,7 +57,7 @@ static int devpts_remount(struct super_block *sb, int *flags, char *data) | |||
54 | config.setgid = 0; | 57 | config.setgid = 0; |
55 | config.uid = 0; | 58 | config.uid = 0; |
56 | config.gid = 0; | 59 | config.gid = 0; |
57 | config.mode = 0600; | 60 | config.mode = DEVPTS_DEFAULT_MODE; |
58 | 61 | ||
59 | while ((p = strsep(&data, ",")) != NULL) { | 62 | while ((p = strsep(&data, ",")) != NULL) { |
60 | substring_t args[MAX_OPT_ARGS]; | 63 | substring_t args[MAX_OPT_ARGS]; |
@@ -81,7 +84,7 @@ static int devpts_remount(struct super_block *sb, int *flags, char *data) | |||
81 | case Opt_mode: | 84 | case Opt_mode: |
82 | if (match_octal(&args[0], &option)) | 85 | if (match_octal(&args[0], &option)) |
83 | return -EINVAL; | 86 | return -EINVAL; |
84 | config.mode = option & ~S_IFMT; | 87 | config.mode = option & S_IALLUGO; |
85 | break; | 88 | break; |
86 | default: | 89 | default: |
87 | printk(KERN_ERR "devpts: called with bogus options\n"); | 90 | printk(KERN_ERR "devpts: called with bogus options\n"); |
@@ -92,9 +95,21 @@ static int devpts_remount(struct super_block *sb, int *flags, char *data) | |||
92 | return 0; | 95 | return 0; |
93 | } | 96 | } |
94 | 97 | ||
98 | static int devpts_show_options(struct seq_file *seq, struct vfsmount *vfs) | ||
99 | { | ||
100 | if (config.setuid) | ||
101 | seq_printf(seq, ",uid=%u", config.uid); | ||
102 | if (config.setgid) | ||
103 | seq_printf(seq, ",gid=%u", config.gid); | ||
104 | seq_printf(seq, ",mode=%03o", config.mode); | ||
105 | |||
106 | return 0; | ||
107 | } | ||
108 | |||
95 | static const struct super_operations devpts_sops = { | 109 | static const struct super_operations devpts_sops = { |
96 | .statfs = simple_statfs, | 110 | .statfs = simple_statfs, |
97 | .remount_fs = devpts_remount, | 111 | .remount_fs = devpts_remount, |
112 | .show_options = devpts_show_options, | ||
98 | }; | 113 | }; |
99 | 114 | ||
100 | static int | 115 | static int |
diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c index 6308122890ca..8bf31e3fbf01 100644 --- a/fs/dlm/ast.c +++ b/fs/dlm/ast.c | |||
@@ -39,7 +39,6 @@ void dlm_add_ast(struct dlm_lkb *lkb, int type) | |||
39 | dlm_user_add_ast(lkb, type); | 39 | dlm_user_add_ast(lkb, type); |
40 | return; | 40 | return; |
41 | } | 41 | } |
42 | DLM_ASSERT(lkb->lkb_astaddr != DLM_FAKE_USER_AST, dlm_print_lkb(lkb);); | ||
43 | 42 | ||
44 | spin_lock(&ast_queue_lock); | 43 | spin_lock(&ast_queue_lock); |
45 | if (!(lkb->lkb_ast_type & (AST_COMP | AST_BAST))) { | 44 | if (!(lkb->lkb_ast_type & (AST_COMP | AST_BAST))) { |
@@ -58,8 +57,8 @@ static void process_asts(void) | |||
58 | struct dlm_ls *ls = NULL; | 57 | struct dlm_ls *ls = NULL; |
59 | struct dlm_rsb *r = NULL; | 58 | struct dlm_rsb *r = NULL; |
60 | struct dlm_lkb *lkb; | 59 | struct dlm_lkb *lkb; |
61 | void (*cast) (long param); | 60 | void (*cast) (void *astparam); |
62 | void (*bast) (long param, int mode); | 61 | void (*bast) (void *astparam, int mode); |
63 | int type = 0, found, bmode; | 62 | int type = 0, found, bmode; |
64 | 63 | ||
65 | for (;;) { | 64 | for (;;) { |
@@ -83,8 +82,8 @@ static void process_asts(void) | |||
83 | if (!found) | 82 | if (!found) |
84 | break; | 83 | break; |
85 | 84 | ||
86 | cast = lkb->lkb_astaddr; | 85 | cast = lkb->lkb_astfn; |
87 | bast = lkb->lkb_bastaddr; | 86 | bast = lkb->lkb_bastfn; |
88 | bmode = lkb->lkb_bastmode; | 87 | bmode = lkb->lkb_bastmode; |
89 | 88 | ||
90 | if ((type & AST_COMP) && cast) | 89 | if ((type & AST_COMP) && cast) |
diff --git a/fs/dlm/config.c b/fs/dlm/config.c index 2f8e3c81bc19..c3ad1dff3b25 100644 --- a/fs/dlm/config.c +++ b/fs/dlm/config.c | |||
@@ -604,7 +604,7 @@ static struct clusters clusters_root = { | |||
604 | }, | 604 | }, |
605 | }; | 605 | }; |
606 | 606 | ||
607 | int dlm_config_init(void) | 607 | int __init dlm_config_init(void) |
608 | { | 608 | { |
609 | config_group_init(&clusters_root.subsys.su_group); | 609 | config_group_init(&clusters_root.subsys.su_group); |
610 | mutex_init(&clusters_root.subsys.su_mutex); | 610 | mutex_init(&clusters_root.subsys.su_mutex); |
diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c index 12c3bfd5e660..8fc24f4507a3 100644 --- a/fs/dlm/debug_fs.c +++ b/fs/dlm/debug_fs.c | |||
@@ -162,14 +162,12 @@ static int print_resource(struct dlm_rsb *res, struct seq_file *s) | |||
162 | 162 | ||
163 | static void print_lock(struct seq_file *s, struct dlm_lkb *lkb, struct dlm_rsb *r) | 163 | static void print_lock(struct seq_file *s, struct dlm_lkb *lkb, struct dlm_rsb *r) |
164 | { | 164 | { |
165 | struct dlm_user_args *ua; | ||
166 | unsigned int waiting = 0; | 165 | unsigned int waiting = 0; |
167 | uint64_t xid = 0; | 166 | uint64_t xid = 0; |
168 | 167 | ||
169 | if (lkb->lkb_flags & DLM_IFL_USER) { | 168 | if (lkb->lkb_flags & DLM_IFL_USER) { |
170 | ua = (struct dlm_user_args *) lkb->lkb_astparam; | 169 | if (lkb->lkb_ua) |
171 | if (ua) | 170 | xid = lkb->lkb_ua->xid; |
172 | xid = ua->xid; | ||
173 | } | 171 | } |
174 | 172 | ||
175 | if (lkb->lkb_timestamp) | 173 | if (lkb->lkb_timestamp) |
@@ -543,7 +541,7 @@ void dlm_delete_debug_file(struct dlm_ls *ls) | |||
543 | debugfs_remove(ls->ls_debug_locks_dentry); | 541 | debugfs_remove(ls->ls_debug_locks_dentry); |
544 | } | 542 | } |
545 | 543 | ||
546 | int dlm_register_debugfs(void) | 544 | int __init dlm_register_debugfs(void) |
547 | { | 545 | { |
548 | mutex_init(&debug_buf_lock); | 546 | mutex_init(&debug_buf_lock); |
549 | dlm_root = debugfs_create_dir("dlm", NULL); | 547 | dlm_root = debugfs_create_dir("dlm", NULL); |
diff --git a/fs/dlm/dir.c b/fs/dlm/dir.c index ff97ba924333..85defeb64df4 100644 --- a/fs/dlm/dir.c +++ b/fs/dlm/dir.c | |||
@@ -220,6 +220,7 @@ int dlm_recover_directory(struct dlm_ls *ls) | |||
220 | last_len = 0; | 220 | last_len = 0; |
221 | 221 | ||
222 | for (;;) { | 222 | for (;;) { |
223 | int left; | ||
223 | error = dlm_recovery_stopped(ls); | 224 | error = dlm_recovery_stopped(ls); |
224 | if (error) | 225 | if (error) |
225 | goto out_free; | 226 | goto out_free; |
@@ -235,12 +236,21 @@ int dlm_recover_directory(struct dlm_ls *ls) | |||
235 | * pick namelen/name pairs out of received buffer | 236 | * pick namelen/name pairs out of received buffer |
236 | */ | 237 | */ |
237 | 238 | ||
238 | b = ls->ls_recover_buf + sizeof(struct dlm_rcom); | 239 | b = ls->ls_recover_buf->rc_buf; |
240 | left = ls->ls_recover_buf->rc_header.h_length; | ||
241 | left -= sizeof(struct dlm_rcom); | ||
239 | 242 | ||
240 | for (;;) { | 243 | for (;;) { |
241 | memcpy(&namelen, b, sizeof(uint16_t)); | 244 | __be16 v; |
242 | namelen = be16_to_cpu(namelen); | 245 | |
243 | b += sizeof(uint16_t); | 246 | error = -EINVAL; |
247 | if (left < sizeof(__be16)) | ||
248 | goto out_free; | ||
249 | |||
250 | memcpy(&v, b, sizeof(__be16)); | ||
251 | namelen = be16_to_cpu(v); | ||
252 | b += sizeof(__be16); | ||
253 | left -= sizeof(__be16); | ||
244 | 254 | ||
245 | /* namelen of 0xFFFFF marks end of names for | 255 | /* namelen of 0xFFFFF marks end of names for |
246 | this node; namelen of 0 marks end of the | 256 | this node; namelen of 0 marks end of the |
@@ -251,6 +261,12 @@ int dlm_recover_directory(struct dlm_ls *ls) | |||
251 | if (!namelen) | 261 | if (!namelen) |
252 | break; | 262 | break; |
253 | 263 | ||
264 | if (namelen > left) | ||
265 | goto out_free; | ||
266 | |||
267 | if (namelen > DLM_RESNAME_MAXLEN) | ||
268 | goto out_free; | ||
269 | |||
254 | error = -ENOMEM; | 270 | error = -ENOMEM; |
255 | de = get_free_de(ls, namelen); | 271 | de = get_free_de(ls, namelen); |
256 | if (!de) | 272 | if (!de) |
@@ -262,6 +278,7 @@ int dlm_recover_directory(struct dlm_ls *ls) | |||
262 | memcpy(de->name, b, namelen); | 278 | memcpy(de->name, b, namelen); |
263 | memcpy(last_name, b, namelen); | 279 | memcpy(last_name, b, namelen); |
264 | b += namelen; | 280 | b += namelen; |
281 | left -= namelen; | ||
265 | 282 | ||
266 | add_entry_to_hash(ls, de); | 283 | add_entry_to_hash(ls, de); |
267 | count++; | 284 | count++; |
@@ -302,6 +319,9 @@ static int get_entry(struct dlm_ls *ls, int nodeid, char *name, | |||
302 | 319 | ||
303 | write_unlock(&ls->ls_dirtbl[bucket].lock); | 320 | write_unlock(&ls->ls_dirtbl[bucket].lock); |
304 | 321 | ||
322 | if (namelen > DLM_RESNAME_MAXLEN) | ||
323 | return -EINVAL; | ||
324 | |||
305 | de = kzalloc(sizeof(struct dlm_direntry) + namelen, GFP_KERNEL); | 325 | de = kzalloc(sizeof(struct dlm_direntry) + namelen, GFP_KERNEL); |
306 | if (!de) | 326 | if (!de) |
307 | return -ENOMEM; | 327 | return -ENOMEM; |
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index ec61bbaf25df..d30ea8b433a2 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h | |||
@@ -92,8 +92,6 @@ do { \ | |||
92 | } \ | 92 | } \ |
93 | } | 93 | } |
94 | 94 | ||
95 | #define DLM_FAKE_USER_AST ERR_PTR(-EINVAL) | ||
96 | |||
97 | 95 | ||
98 | struct dlm_direntry { | 96 | struct dlm_direntry { |
99 | struct list_head list; | 97 | struct list_head list; |
@@ -146,9 +144,9 @@ struct dlm_recover { | |||
146 | 144 | ||
147 | struct dlm_args { | 145 | struct dlm_args { |
148 | uint32_t flags; | 146 | uint32_t flags; |
149 | void *astaddr; | 147 | void (*astfn) (void *astparam); |
150 | long astparam; | 148 | void *astparam; |
151 | void *bastaddr; | 149 | void (*bastfn) (void *astparam, int mode); |
152 | int mode; | 150 | int mode; |
153 | struct dlm_lksb *lksb; | 151 | struct dlm_lksb *lksb; |
154 | unsigned long timeout; | 152 | unsigned long timeout; |
@@ -253,9 +251,12 @@ struct dlm_lkb { | |||
253 | 251 | ||
254 | char *lkb_lvbptr; | 252 | char *lkb_lvbptr; |
255 | struct dlm_lksb *lkb_lksb; /* caller's status block */ | 253 | struct dlm_lksb *lkb_lksb; /* caller's status block */ |
256 | void *lkb_astaddr; /* caller's ast function */ | 254 | void (*lkb_astfn) (void *astparam); |
257 | void *lkb_bastaddr; /* caller's bast function */ | 255 | void (*lkb_bastfn) (void *astparam, int mode); |
258 | long lkb_astparam; /* caller's ast arg */ | 256 | union { |
257 | void *lkb_astparam; /* caller's ast arg */ | ||
258 | struct dlm_user_args *lkb_ua; | ||
259 | }; | ||
259 | }; | 260 | }; |
260 | 261 | ||
261 | 262 | ||
@@ -403,28 +404,34 @@ struct dlm_rcom { | |||
403 | char rc_buf[0]; | 404 | char rc_buf[0]; |
404 | }; | 405 | }; |
405 | 406 | ||
407 | union dlm_packet { | ||
408 | struct dlm_header header; /* common to other two */ | ||
409 | struct dlm_message message; | ||
410 | struct dlm_rcom rcom; | ||
411 | }; | ||
412 | |||
406 | struct rcom_config { | 413 | struct rcom_config { |
407 | uint32_t rf_lvblen; | 414 | __le32 rf_lvblen; |
408 | uint32_t rf_lsflags; | 415 | __le32 rf_lsflags; |
409 | uint64_t rf_unused; | 416 | __le64 rf_unused; |
410 | }; | 417 | }; |
411 | 418 | ||
412 | struct rcom_lock { | 419 | struct rcom_lock { |
413 | uint32_t rl_ownpid; | 420 | __le32 rl_ownpid; |
414 | uint32_t rl_lkid; | 421 | __le32 rl_lkid; |
415 | uint32_t rl_remid; | 422 | __le32 rl_remid; |
416 | uint32_t rl_parent_lkid; | 423 | __le32 rl_parent_lkid; |
417 | uint32_t rl_parent_remid; | 424 | __le32 rl_parent_remid; |
418 | uint32_t rl_exflags; | 425 | __le32 rl_exflags; |
419 | uint32_t rl_flags; | 426 | __le32 rl_flags; |
420 | uint32_t rl_lvbseq; | 427 | __le32 rl_lvbseq; |
421 | int rl_result; | 428 | __le32 rl_result; |
422 | int8_t rl_rqmode; | 429 | int8_t rl_rqmode; |
423 | int8_t rl_grmode; | 430 | int8_t rl_grmode; |
424 | int8_t rl_status; | 431 | int8_t rl_status; |
425 | int8_t rl_asts; | 432 | int8_t rl_asts; |
426 | uint16_t rl_wait_type; | 433 | __le16 rl_wait_type; |
427 | uint16_t rl_namelen; | 434 | __le16 rl_namelen; |
428 | char rl_name[DLM_RESNAME_MAXLEN]; | 435 | char rl_name[DLM_RESNAME_MAXLEN]; |
429 | char rl_lvb[0]; | 436 | char rl_lvb[0]; |
430 | }; | 437 | }; |
@@ -494,7 +501,7 @@ struct dlm_ls { | |||
494 | struct rw_semaphore ls_recv_active; /* block dlm_recv */ | 501 | struct rw_semaphore ls_recv_active; /* block dlm_recv */ |
495 | struct list_head ls_requestqueue;/* queue remote requests */ | 502 | struct list_head ls_requestqueue;/* queue remote requests */ |
496 | struct mutex ls_requestqueue_mutex; | 503 | struct mutex ls_requestqueue_mutex; |
497 | char *ls_recover_buf; | 504 | struct dlm_rcom *ls_recover_buf; |
498 | int ls_recover_nodeid; /* for debugging */ | 505 | int ls_recover_nodeid; /* for debugging */ |
499 | uint64_t ls_rcom_seq; | 506 | uint64_t ls_rcom_seq; |
500 | spinlock_t ls_rcom_spin; | 507 | spinlock_t ls_rcom_spin; |
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index ff4a198fa677..8f250ac8b928 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c | |||
@@ -436,11 +436,15 @@ static int find_rsb(struct dlm_ls *ls, char *name, int namelen, | |||
436 | { | 436 | { |
437 | struct dlm_rsb *r, *tmp; | 437 | struct dlm_rsb *r, *tmp; |
438 | uint32_t hash, bucket; | 438 | uint32_t hash, bucket; |
439 | int error = 0; | 439 | int error = -EINVAL; |
440 | |||
441 | if (namelen > DLM_RESNAME_MAXLEN) | ||
442 | goto out; | ||
440 | 443 | ||
441 | if (dlm_no_directory(ls)) | 444 | if (dlm_no_directory(ls)) |
442 | flags |= R_CREATE; | 445 | flags |= R_CREATE; |
443 | 446 | ||
447 | error = 0; | ||
444 | hash = jhash(name, namelen, 0); | 448 | hash = jhash(name, namelen, 0); |
445 | bucket = hash & (ls->ls_rsbtbl_size - 1); | 449 | bucket = hash & (ls->ls_rsbtbl_size - 1); |
446 | 450 | ||
@@ -1222,6 +1226,8 @@ static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb, | |||
1222 | b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1]; | 1226 | b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1]; |
1223 | if (b == 1) { | 1227 | if (b == 1) { |
1224 | int len = receive_extralen(ms); | 1228 | int len = receive_extralen(ms); |
1229 | if (len > DLM_RESNAME_MAXLEN) | ||
1230 | len = DLM_RESNAME_MAXLEN; | ||
1225 | memcpy(lkb->lkb_lvbptr, ms->m_extra, len); | 1231 | memcpy(lkb->lkb_lvbptr, ms->m_extra, len); |
1226 | lkb->lkb_lvbseq = ms->m_lvbseq; | 1232 | lkb->lkb_lvbseq = ms->m_lvbseq; |
1227 | } | 1233 | } |
@@ -1775,7 +1781,7 @@ static void grant_pending_locks(struct dlm_rsb *r) | |||
1775 | */ | 1781 | */ |
1776 | 1782 | ||
1777 | list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) { | 1783 | list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) { |
1778 | if (lkb->lkb_bastaddr && lock_requires_bast(lkb, high, cw)) { | 1784 | if (lkb->lkb_bastfn && lock_requires_bast(lkb, high, cw)) { |
1779 | if (cw && high == DLM_LOCK_PR) | 1785 | if (cw && high == DLM_LOCK_PR) |
1780 | queue_bast(r, lkb, DLM_LOCK_CW); | 1786 | queue_bast(r, lkb, DLM_LOCK_CW); |
1781 | else | 1787 | else |
@@ -1805,7 +1811,7 @@ static void send_bast_queue(struct dlm_rsb *r, struct list_head *head, | |||
1805 | struct dlm_lkb *gr; | 1811 | struct dlm_lkb *gr; |
1806 | 1812 | ||
1807 | list_for_each_entry(gr, head, lkb_statequeue) { | 1813 | list_for_each_entry(gr, head, lkb_statequeue) { |
1808 | if (gr->lkb_bastaddr && modes_require_bast(gr, lkb)) { | 1814 | if (gr->lkb_bastfn && modes_require_bast(gr, lkb)) { |
1809 | queue_bast(r, gr, lkb->lkb_rqmode); | 1815 | queue_bast(r, gr, lkb->lkb_rqmode); |
1810 | gr->lkb_highbast = lkb->lkb_rqmode; | 1816 | gr->lkb_highbast = lkb->lkb_rqmode; |
1811 | } | 1817 | } |
@@ -1960,8 +1966,11 @@ static void confirm_master(struct dlm_rsb *r, int error) | |||
1960 | } | 1966 | } |
1961 | 1967 | ||
1962 | static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags, | 1968 | static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags, |
1963 | int namelen, unsigned long timeout_cs, void *ast, | 1969 | int namelen, unsigned long timeout_cs, |
1964 | void *astarg, void *bast, struct dlm_args *args) | 1970 | void (*ast) (void *astparam), |
1971 | void *astparam, | ||
1972 | void (*bast) (void *astparam, int mode), | ||
1973 | struct dlm_args *args) | ||
1965 | { | 1974 | { |
1966 | int rv = -EINVAL; | 1975 | int rv = -EINVAL; |
1967 | 1976 | ||
@@ -2011,9 +2020,9 @@ static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags, | |||
2011 | an active lkb cannot be modified before locking the rsb */ | 2020 | an active lkb cannot be modified before locking the rsb */ |
2012 | 2021 | ||
2013 | args->flags = flags; | 2022 | args->flags = flags; |
2014 | args->astaddr = ast; | 2023 | args->astfn = ast; |
2015 | args->astparam = (long) astarg; | 2024 | args->astparam = astparam; |
2016 | args->bastaddr = bast; | 2025 | args->bastfn = bast; |
2017 | args->timeout = timeout_cs; | 2026 | args->timeout = timeout_cs; |
2018 | args->mode = mode; | 2027 | args->mode = mode; |
2019 | args->lksb = lksb; | 2028 | args->lksb = lksb; |
@@ -2032,7 +2041,7 @@ static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args) | |||
2032 | return -EINVAL; | 2041 | return -EINVAL; |
2033 | 2042 | ||
2034 | args->flags = flags; | 2043 | args->flags = flags; |
2035 | args->astparam = (long) astarg; | 2044 | args->astparam = astarg; |
2036 | return 0; | 2045 | return 0; |
2037 | } | 2046 | } |
2038 | 2047 | ||
@@ -2062,9 +2071,9 @@ static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb, | |||
2062 | 2071 | ||
2063 | lkb->lkb_exflags = args->flags; | 2072 | lkb->lkb_exflags = args->flags; |
2064 | lkb->lkb_sbflags = 0; | 2073 | lkb->lkb_sbflags = 0; |
2065 | lkb->lkb_astaddr = args->astaddr; | 2074 | lkb->lkb_astfn = args->astfn; |
2066 | lkb->lkb_astparam = args->astparam; | 2075 | lkb->lkb_astparam = args->astparam; |
2067 | lkb->lkb_bastaddr = args->bastaddr; | 2076 | lkb->lkb_bastfn = args->bastfn; |
2068 | lkb->lkb_rqmode = args->mode; | 2077 | lkb->lkb_rqmode = args->mode; |
2069 | lkb->lkb_lksb = args->lksb; | 2078 | lkb->lkb_lksb = args->lksb; |
2070 | lkb->lkb_lvbptr = args->lksb->sb_lvbptr; | 2079 | lkb->lkb_lvbptr = args->lksb->sb_lvbptr; |
@@ -2711,9 +2720,9 @@ static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb, | |||
2711 | /* m_result and m_bastmode are set from function args, | 2720 | /* m_result and m_bastmode are set from function args, |
2712 | not from lkb fields */ | 2721 | not from lkb fields */ |
2713 | 2722 | ||
2714 | if (lkb->lkb_bastaddr) | 2723 | if (lkb->lkb_bastfn) |
2715 | ms->m_asts |= AST_BAST; | 2724 | ms->m_asts |= AST_BAST; |
2716 | if (lkb->lkb_astaddr) | 2725 | if (lkb->lkb_astfn) |
2717 | ms->m_asts |= AST_COMP; | 2726 | ms->m_asts |= AST_COMP; |
2718 | 2727 | ||
2719 | /* compare with switch in create_message; send_remove() doesn't | 2728 | /* compare with switch in create_message; send_remove() doesn't |
@@ -2989,11 +2998,23 @@ static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb, | |||
2989 | if (!lkb->lkb_lvbptr) | 2998 | if (!lkb->lkb_lvbptr) |
2990 | return -ENOMEM; | 2999 | return -ENOMEM; |
2991 | len = receive_extralen(ms); | 3000 | len = receive_extralen(ms); |
3001 | if (len > DLM_RESNAME_MAXLEN) | ||
3002 | len = DLM_RESNAME_MAXLEN; | ||
2992 | memcpy(lkb->lkb_lvbptr, ms->m_extra, len); | 3003 | memcpy(lkb->lkb_lvbptr, ms->m_extra, len); |
2993 | } | 3004 | } |
2994 | return 0; | 3005 | return 0; |
2995 | } | 3006 | } |
2996 | 3007 | ||
3008 | static void fake_bastfn(void *astparam, int mode) | ||
3009 | { | ||
3010 | log_print("fake_bastfn should not be called"); | ||
3011 | } | ||
3012 | |||
3013 | static void fake_astfn(void *astparam) | ||
3014 | { | ||
3015 | log_print("fake_astfn should not be called"); | ||
3016 | } | ||
3017 | |||
2997 | static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb, | 3018 | static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb, |
2998 | struct dlm_message *ms) | 3019 | struct dlm_message *ms) |
2999 | { | 3020 | { |
@@ -3002,8 +3023,9 @@ static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb, | |||
3002 | lkb->lkb_remid = ms->m_lkid; | 3023 | lkb->lkb_remid = ms->m_lkid; |
3003 | lkb->lkb_grmode = DLM_LOCK_IV; | 3024 | lkb->lkb_grmode = DLM_LOCK_IV; |
3004 | lkb->lkb_rqmode = ms->m_rqmode; | 3025 | lkb->lkb_rqmode = ms->m_rqmode; |
3005 | lkb->lkb_bastaddr = (void *) (long) (ms->m_asts & AST_BAST); | 3026 | |
3006 | lkb->lkb_astaddr = (void *) (long) (ms->m_asts & AST_COMP); | 3027 | lkb->lkb_bastfn = (ms->m_asts & AST_BAST) ? &fake_bastfn : NULL; |
3028 | lkb->lkb_astfn = (ms->m_asts & AST_COMP) ? &fake_astfn : NULL; | ||
3007 | 3029 | ||
3008 | if (lkb->lkb_exflags & DLM_LKF_VALBLK) { | 3030 | if (lkb->lkb_exflags & DLM_LKF_VALBLK) { |
3009 | /* lkb was just created so there won't be an lvb yet */ | 3031 | /* lkb was just created so there won't be an lvb yet */ |
@@ -3802,7 +3824,7 @@ static void dlm_receive_message(struct dlm_ls *ls, struct dlm_message *ms, | |||
3802 | int nodeid) | 3824 | int nodeid) |
3803 | { | 3825 | { |
3804 | if (dlm_locking_stopped(ls)) { | 3826 | if (dlm_locking_stopped(ls)) { |
3805 | dlm_add_requestqueue(ls, nodeid, (struct dlm_header *) ms); | 3827 | dlm_add_requestqueue(ls, nodeid, ms); |
3806 | } else { | 3828 | } else { |
3807 | dlm_wait_requestqueue(ls); | 3829 | dlm_wait_requestqueue(ls); |
3808 | _receive_message(ls, ms); | 3830 | _receive_message(ls, ms); |
@@ -3822,21 +3844,20 @@ void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms) | |||
3822 | standard locking activity) or an RCOM (recovery message sent as part of | 3844 | standard locking activity) or an RCOM (recovery message sent as part of |
3823 | lockspace recovery). */ | 3845 | lockspace recovery). */ |
3824 | 3846 | ||
3825 | void dlm_receive_buffer(struct dlm_header *hd, int nodeid) | 3847 | void dlm_receive_buffer(union dlm_packet *p, int nodeid) |
3826 | { | 3848 | { |
3827 | struct dlm_message *ms = (struct dlm_message *) hd; | 3849 | struct dlm_header *hd = &p->header; |
3828 | struct dlm_rcom *rc = (struct dlm_rcom *) hd; | ||
3829 | struct dlm_ls *ls; | 3850 | struct dlm_ls *ls; |
3830 | int type = 0; | 3851 | int type = 0; |
3831 | 3852 | ||
3832 | switch (hd->h_cmd) { | 3853 | switch (hd->h_cmd) { |
3833 | case DLM_MSG: | 3854 | case DLM_MSG: |
3834 | dlm_message_in(ms); | 3855 | dlm_message_in(&p->message); |
3835 | type = ms->m_type; | 3856 | type = p->message.m_type; |
3836 | break; | 3857 | break; |
3837 | case DLM_RCOM: | 3858 | case DLM_RCOM: |
3838 | dlm_rcom_in(rc); | 3859 | dlm_rcom_in(&p->rcom); |
3839 | type = rc->rc_type; | 3860 | type = p->rcom.rc_type; |
3840 | break; | 3861 | break; |
3841 | default: | 3862 | default: |
3842 | log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid); | 3863 | log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid); |
@@ -3856,7 +3877,7 @@ void dlm_receive_buffer(struct dlm_header *hd, int nodeid) | |||
3856 | hd->h_lockspace, nodeid, hd->h_cmd, type); | 3877 | hd->h_lockspace, nodeid, hd->h_cmd, type); |
3857 | 3878 | ||
3858 | if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS) | 3879 | if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS) |
3859 | dlm_send_ls_not_ready(nodeid, rc); | 3880 | dlm_send_ls_not_ready(nodeid, &p->rcom); |
3860 | return; | 3881 | return; |
3861 | } | 3882 | } |
3862 | 3883 | ||
@@ -3865,9 +3886,9 @@ void dlm_receive_buffer(struct dlm_header *hd, int nodeid) | |||
3865 | 3886 | ||
3866 | down_read(&ls->ls_recv_active); | 3887 | down_read(&ls->ls_recv_active); |
3867 | if (hd->h_cmd == DLM_MSG) | 3888 | if (hd->h_cmd == DLM_MSG) |
3868 | dlm_receive_message(ls, ms, nodeid); | 3889 | dlm_receive_message(ls, &p->message, nodeid); |
3869 | else | 3890 | else |
3870 | dlm_receive_rcom(ls, rc, nodeid); | 3891 | dlm_receive_rcom(ls, &p->rcom, nodeid); |
3871 | up_read(&ls->ls_recv_active); | 3892 | up_read(&ls->ls_recv_active); |
3872 | 3893 | ||
3873 | dlm_put_lockspace(ls); | 3894 | dlm_put_lockspace(ls); |
@@ -4267,32 +4288,34 @@ static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid, | |||
4267 | return NULL; | 4288 | return NULL; |
4268 | } | 4289 | } |
4269 | 4290 | ||
4291 | /* needs at least dlm_rcom + rcom_lock */ | ||
4270 | static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb, | 4292 | static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb, |
4271 | struct dlm_rsb *r, struct dlm_rcom *rc) | 4293 | struct dlm_rsb *r, struct dlm_rcom *rc) |
4272 | { | 4294 | { |
4273 | struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; | 4295 | struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; |
4274 | int lvblen; | ||
4275 | 4296 | ||
4276 | lkb->lkb_nodeid = rc->rc_header.h_nodeid; | 4297 | lkb->lkb_nodeid = rc->rc_header.h_nodeid; |
4277 | lkb->lkb_ownpid = rl->rl_ownpid; | 4298 | lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid); |
4278 | lkb->lkb_remid = rl->rl_lkid; | 4299 | lkb->lkb_remid = le32_to_cpu(rl->rl_lkid); |
4279 | lkb->lkb_exflags = rl->rl_exflags; | 4300 | lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags); |
4280 | lkb->lkb_flags = rl->rl_flags & 0x0000FFFF; | 4301 | lkb->lkb_flags = le32_to_cpu(rl->rl_flags) & 0x0000FFFF; |
4281 | lkb->lkb_flags |= DLM_IFL_MSTCPY; | 4302 | lkb->lkb_flags |= DLM_IFL_MSTCPY; |
4282 | lkb->lkb_lvbseq = rl->rl_lvbseq; | 4303 | lkb->lkb_lvbseq = le32_to_cpu(rl->rl_lvbseq); |
4283 | lkb->lkb_rqmode = rl->rl_rqmode; | 4304 | lkb->lkb_rqmode = rl->rl_rqmode; |
4284 | lkb->lkb_grmode = rl->rl_grmode; | 4305 | lkb->lkb_grmode = rl->rl_grmode; |
4285 | /* don't set lkb_status because add_lkb wants to itself */ | 4306 | /* don't set lkb_status because add_lkb wants to itself */ |
4286 | 4307 | ||
4287 | lkb->lkb_bastaddr = (void *) (long) (rl->rl_asts & AST_BAST); | 4308 | lkb->lkb_bastfn = (rl->rl_asts & AST_BAST) ? &fake_bastfn : NULL; |
4288 | lkb->lkb_astaddr = (void *) (long) (rl->rl_asts & AST_COMP); | 4309 | lkb->lkb_astfn = (rl->rl_asts & AST_COMP) ? &fake_astfn : NULL; |
4289 | 4310 | ||
4290 | if (lkb->lkb_exflags & DLM_LKF_VALBLK) { | 4311 | if (lkb->lkb_exflags & DLM_LKF_VALBLK) { |
4312 | int lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) - | ||
4313 | sizeof(struct rcom_lock); | ||
4314 | if (lvblen > ls->ls_lvblen) | ||
4315 | return -EINVAL; | ||
4291 | lkb->lkb_lvbptr = dlm_allocate_lvb(ls); | 4316 | lkb->lkb_lvbptr = dlm_allocate_lvb(ls); |
4292 | if (!lkb->lkb_lvbptr) | 4317 | if (!lkb->lkb_lvbptr) |
4293 | return -ENOMEM; | 4318 | return -ENOMEM; |
4294 | lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) - | ||
4295 | sizeof(struct rcom_lock); | ||
4296 | memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen); | 4319 | memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen); |
4297 | } | 4320 | } |
4298 | 4321 | ||
@@ -4300,7 +4323,8 @@ static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb, | |||
4300 | The real granted mode of these converting locks cannot be determined | 4323 | The real granted mode of these converting locks cannot be determined |
4301 | until all locks have been rebuilt on the rsb (recover_conversion) */ | 4324 | until all locks have been rebuilt on the rsb (recover_conversion) */ |
4302 | 4325 | ||
4303 | if (rl->rl_wait_type == DLM_MSG_CONVERT && middle_conversion(lkb)) { | 4326 | if (rl->rl_wait_type == cpu_to_le16(DLM_MSG_CONVERT) && |
4327 | middle_conversion(lkb)) { | ||
4304 | rl->rl_status = DLM_LKSTS_CONVERT; | 4328 | rl->rl_status = DLM_LKSTS_CONVERT; |
4305 | lkb->lkb_grmode = DLM_LOCK_IV; | 4329 | lkb->lkb_grmode = DLM_LOCK_IV; |
4306 | rsb_set_flag(r, RSB_RECOVER_CONVERT); | 4330 | rsb_set_flag(r, RSB_RECOVER_CONVERT); |
@@ -4315,6 +4339,7 @@ static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb, | |||
4315 | the given values and send back our lkid. We send back our lkid by sending | 4339 | the given values and send back our lkid. We send back our lkid by sending |
4316 | back the rcom_lock struct we got but with the remid field filled in. */ | 4340 | back the rcom_lock struct we got but with the remid field filled in. */ |
4317 | 4341 | ||
4342 | /* needs at least dlm_rcom + rcom_lock */ | ||
4318 | int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc) | 4343 | int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc) |
4319 | { | 4344 | { |
4320 | struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; | 4345 | struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; |
@@ -4327,13 +4352,14 @@ int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc) | |||
4327 | goto out; | 4352 | goto out; |
4328 | } | 4353 | } |
4329 | 4354 | ||
4330 | error = find_rsb(ls, rl->rl_name, rl->rl_namelen, R_MASTER, &r); | 4355 | error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen), |
4356 | R_MASTER, &r); | ||
4331 | if (error) | 4357 | if (error) |
4332 | goto out; | 4358 | goto out; |
4333 | 4359 | ||
4334 | lock_rsb(r); | 4360 | lock_rsb(r); |
4335 | 4361 | ||
4336 | lkb = search_remid(r, rc->rc_header.h_nodeid, rl->rl_lkid); | 4362 | lkb = search_remid(r, rc->rc_header.h_nodeid, le32_to_cpu(rl->rl_lkid)); |
4337 | if (lkb) { | 4363 | if (lkb) { |
4338 | error = -EEXIST; | 4364 | error = -EEXIST; |
4339 | goto out_remid; | 4365 | goto out_remid; |
@@ -4356,18 +4382,20 @@ int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc) | |||
4356 | out_remid: | 4382 | out_remid: |
4357 | /* this is the new value returned to the lock holder for | 4383 | /* this is the new value returned to the lock holder for |
4358 | saving in its process-copy lkb */ | 4384 | saving in its process-copy lkb */ |
4359 | rl->rl_remid = lkb->lkb_id; | 4385 | rl->rl_remid = cpu_to_le32(lkb->lkb_id); |
4360 | 4386 | ||
4361 | out_unlock: | 4387 | out_unlock: |
4362 | unlock_rsb(r); | 4388 | unlock_rsb(r); |
4363 | put_rsb(r); | 4389 | put_rsb(r); |
4364 | out: | 4390 | out: |
4365 | if (error) | 4391 | if (error) |
4366 | log_debug(ls, "recover_master_copy %d %x", error, rl->rl_lkid); | 4392 | log_debug(ls, "recover_master_copy %d %x", error, |
4367 | rl->rl_result = error; | 4393 | le32_to_cpu(rl->rl_lkid)); |
4394 | rl->rl_result = cpu_to_le32(error); | ||
4368 | return error; | 4395 | return error; |
4369 | } | 4396 | } |
4370 | 4397 | ||
4398 | /* needs at least dlm_rcom + rcom_lock */ | ||
4371 | int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc) | 4399 | int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc) |
4372 | { | 4400 | { |
4373 | struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; | 4401 | struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; |
@@ -4375,15 +4403,16 @@ int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc) | |||
4375 | struct dlm_lkb *lkb; | 4403 | struct dlm_lkb *lkb; |
4376 | int error; | 4404 | int error; |
4377 | 4405 | ||
4378 | error = find_lkb(ls, rl->rl_lkid, &lkb); | 4406 | error = find_lkb(ls, le32_to_cpu(rl->rl_lkid), &lkb); |
4379 | if (error) { | 4407 | if (error) { |
4380 | log_error(ls, "recover_process_copy no lkid %x", rl->rl_lkid); | 4408 | log_error(ls, "recover_process_copy no lkid %x", |
4409 | le32_to_cpu(rl->rl_lkid)); | ||
4381 | return error; | 4410 | return error; |
4382 | } | 4411 | } |
4383 | 4412 | ||
4384 | DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb);); | 4413 | DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb);); |
4385 | 4414 | ||
4386 | error = rl->rl_result; | 4415 | error = le32_to_cpu(rl->rl_result); |
4387 | 4416 | ||
4388 | r = lkb->lkb_resource; | 4417 | r = lkb->lkb_resource; |
4389 | hold_rsb(r); | 4418 | hold_rsb(r); |
@@ -4402,7 +4431,7 @@ int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc) | |||
4402 | log_debug(ls, "master copy exists %x", lkb->lkb_id); | 4431 | log_debug(ls, "master copy exists %x", lkb->lkb_id); |
4403 | /* fall through */ | 4432 | /* fall through */ |
4404 | case 0: | 4433 | case 0: |
4405 | lkb->lkb_remid = rl->rl_remid; | 4434 | lkb->lkb_remid = le32_to_cpu(rl->rl_remid); |
4406 | break; | 4435 | break; |
4407 | default: | 4436 | default: |
4408 | log_error(ls, "dlm_recover_process_copy unknown error %d %x", | 4437 | log_error(ls, "dlm_recover_process_copy unknown error %d %x", |
@@ -4451,7 +4480,7 @@ int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua, | |||
4451 | lock and that lkb_astparam is the dlm_user_args structure. */ | 4480 | lock and that lkb_astparam is the dlm_user_args structure. */ |
4452 | 4481 | ||
4453 | error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs, | 4482 | error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs, |
4454 | DLM_FAKE_USER_AST, ua, DLM_FAKE_USER_AST, &args); | 4483 | fake_astfn, ua, fake_bastfn, &args); |
4455 | lkb->lkb_flags |= DLM_IFL_USER; | 4484 | lkb->lkb_flags |= DLM_IFL_USER; |
4456 | ua->old_mode = DLM_LOCK_IV; | 4485 | ua->old_mode = DLM_LOCK_IV; |
4457 | 4486 | ||
@@ -4504,7 +4533,7 @@ int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, | |||
4504 | /* user can change the params on its lock when it converts it, or | 4533 | /* user can change the params on its lock when it converts it, or |
4505 | add an lvb that didn't exist before */ | 4534 | add an lvb that didn't exist before */ |
4506 | 4535 | ||
4507 | ua = (struct dlm_user_args *)lkb->lkb_astparam; | 4536 | ua = lkb->lkb_ua; |
4508 | 4537 | ||
4509 | if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) { | 4538 | if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) { |
4510 | ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_KERNEL); | 4539 | ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_KERNEL); |
@@ -4525,7 +4554,7 @@ int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, | |||
4525 | ua->old_mode = lkb->lkb_grmode; | 4554 | ua->old_mode = lkb->lkb_grmode; |
4526 | 4555 | ||
4527 | error = set_lock_args(mode, &ua->lksb, flags, 0, timeout_cs, | 4556 | error = set_lock_args(mode, &ua->lksb, flags, 0, timeout_cs, |
4528 | DLM_FAKE_USER_AST, ua, DLM_FAKE_USER_AST, &args); | 4557 | fake_astfn, ua, fake_bastfn, &args); |
4529 | if (error) | 4558 | if (error) |
4530 | goto out_put; | 4559 | goto out_put; |
4531 | 4560 | ||
@@ -4555,7 +4584,7 @@ int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, | |||
4555 | if (error) | 4584 | if (error) |
4556 | goto out; | 4585 | goto out; |
4557 | 4586 | ||
4558 | ua = (struct dlm_user_args *)lkb->lkb_astparam; | 4587 | ua = lkb->lkb_ua; |
4559 | 4588 | ||
4560 | if (lvb_in && ua->lksb.sb_lvbptr) | 4589 | if (lvb_in && ua->lksb.sb_lvbptr) |
4561 | memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN); | 4590 | memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN); |
@@ -4604,7 +4633,7 @@ int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, | |||
4604 | if (error) | 4633 | if (error) |
4605 | goto out; | 4634 | goto out; |
4606 | 4635 | ||
4607 | ua = (struct dlm_user_args *)lkb->lkb_astparam; | 4636 | ua = lkb->lkb_ua; |
4608 | if (ua_tmp->castparam) | 4637 | if (ua_tmp->castparam) |
4609 | ua->castparam = ua_tmp->castparam; | 4638 | ua->castparam = ua_tmp->castparam; |
4610 | ua->user_lksb = ua_tmp->user_lksb; | 4639 | ua->user_lksb = ua_tmp->user_lksb; |
@@ -4642,7 +4671,7 @@ int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid) | |||
4642 | if (error) | 4671 | if (error) |
4643 | goto out; | 4672 | goto out; |
4644 | 4673 | ||
4645 | ua = (struct dlm_user_args *)lkb->lkb_astparam; | 4674 | ua = lkb->lkb_ua; |
4646 | 4675 | ||
4647 | error = set_unlock_args(flags, ua, &args); | 4676 | error = set_unlock_args(flags, ua, &args); |
4648 | if (error) | 4677 | if (error) |
@@ -4681,7 +4710,6 @@ int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid) | |||
4681 | 4710 | ||
4682 | static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb) | 4711 | static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb) |
4683 | { | 4712 | { |
4684 | struct dlm_user_args *ua = (struct dlm_user_args *)lkb->lkb_astparam; | ||
4685 | struct dlm_args args; | 4713 | struct dlm_args args; |
4686 | int error; | 4714 | int error; |
4687 | 4715 | ||
@@ -4690,7 +4718,7 @@ static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb) | |||
4690 | list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans); | 4718 | list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans); |
4691 | mutex_unlock(&ls->ls_orphans_mutex); | 4719 | mutex_unlock(&ls->ls_orphans_mutex); |
4692 | 4720 | ||
4693 | set_unlock_args(0, ua, &args); | 4721 | set_unlock_args(0, lkb->lkb_ua, &args); |
4694 | 4722 | ||
4695 | error = cancel_lock(ls, lkb, &args); | 4723 | error = cancel_lock(ls, lkb, &args); |
4696 | if (error == -DLM_ECANCEL) | 4724 | if (error == -DLM_ECANCEL) |
@@ -4703,11 +4731,10 @@ static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb) | |||
4703 | 4731 | ||
4704 | static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb) | 4732 | static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb) |
4705 | { | 4733 | { |
4706 | struct dlm_user_args *ua = (struct dlm_user_args *)lkb->lkb_astparam; | ||
4707 | struct dlm_args args; | 4734 | struct dlm_args args; |
4708 | int error; | 4735 | int error; |
4709 | 4736 | ||
4710 | set_unlock_args(DLM_LKF_FORCEUNLOCK, ua, &args); | 4737 | set_unlock_args(DLM_LKF_FORCEUNLOCK, lkb->lkb_ua, &args); |
4711 | 4738 | ||
4712 | error = unlock_lock(ls, lkb, &args); | 4739 | error = unlock_lock(ls, lkb, &args); |
4713 | if (error == -DLM_EUNLOCK) | 4740 | if (error == -DLM_EUNLOCK) |
diff --git a/fs/dlm/lock.h b/fs/dlm/lock.h index 27b6ed302911..05d9c82e646b 100644 --- a/fs/dlm/lock.h +++ b/fs/dlm/lock.h | |||
@@ -17,7 +17,7 @@ void dlm_print_rsb(struct dlm_rsb *r); | |||
17 | void dlm_dump_rsb(struct dlm_rsb *r); | 17 | void dlm_dump_rsb(struct dlm_rsb *r); |
18 | void dlm_print_lkb(struct dlm_lkb *lkb); | 18 | void dlm_print_lkb(struct dlm_lkb *lkb); |
19 | void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms); | 19 | void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms); |
20 | void dlm_receive_buffer(struct dlm_header *hd, int nodeid); | 20 | void dlm_receive_buffer(union dlm_packet *p, int nodeid); |
21 | int dlm_modes_compat(int mode1, int mode2); | 21 | int dlm_modes_compat(int mode1, int mode2); |
22 | void dlm_put_rsb(struct dlm_rsb *r); | 22 | void dlm_put_rsb(struct dlm_rsb *r); |
23 | void dlm_hold_rsb(struct dlm_rsb *r); | 23 | void dlm_hold_rsb(struct dlm_rsb *r); |
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index b180fdc51085..b64e55e0515d 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c | |||
@@ -191,7 +191,7 @@ static int do_uevent(struct dlm_ls *ls, int in) | |||
191 | } | 191 | } |
192 | 192 | ||
193 | 193 | ||
194 | int dlm_lockspace_init(void) | 194 | int __init dlm_lockspace_init(void) |
195 | { | 195 | { |
196 | ls_count = 0; | 196 | ls_count = 0; |
197 | mutex_init(&ls_lock); | 197 | mutex_init(&ls_lock); |
diff --git a/fs/dlm/memory.c b/fs/dlm/memory.c index f7783867491a..54c14c6d06cb 100644 --- a/fs/dlm/memory.c +++ b/fs/dlm/memory.c | |||
@@ -18,7 +18,7 @@ | |||
18 | static struct kmem_cache *lkb_cache; | 18 | static struct kmem_cache *lkb_cache; |
19 | 19 | ||
20 | 20 | ||
21 | int dlm_memory_init(void) | 21 | int __init dlm_memory_init(void) |
22 | { | 22 | { |
23 | int ret = 0; | 23 | int ret = 0; |
24 | 24 | ||
@@ -80,7 +80,7 @@ void dlm_free_lkb(struct dlm_lkb *lkb) | |||
80 | { | 80 | { |
81 | if (lkb->lkb_flags & DLM_IFL_USER) { | 81 | if (lkb->lkb_flags & DLM_IFL_USER) { |
82 | struct dlm_user_args *ua; | 82 | struct dlm_user_args *ua; |
83 | ua = (struct dlm_user_args *)lkb->lkb_astparam; | 83 | ua = lkb->lkb_ua; |
84 | if (ua) { | 84 | if (ua) { |
85 | if (ua->lksb.sb_lvbptr) | 85 | if (ua->lksb.sb_lvbptr) |
86 | kfree(ua->lksb.sb_lvbptr); | 86 | kfree(ua->lksb.sb_lvbptr); |
diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c index e69926e984db..07ac709f3ed7 100644 --- a/fs/dlm/midcomms.c +++ b/fs/dlm/midcomms.c | |||
@@ -61,9 +61,9 @@ int dlm_process_incoming_buffer(int nodeid, const void *base, | |||
61 | union { | 61 | union { |
62 | unsigned char __buf[DLM_INBUF_LEN]; | 62 | unsigned char __buf[DLM_INBUF_LEN]; |
63 | /* this is to force proper alignment on some arches */ | 63 | /* this is to force proper alignment on some arches */ |
64 | struct dlm_header dlm; | 64 | union dlm_packet p; |
65 | } __tmp; | 65 | } __tmp; |
66 | struct dlm_header *msg = &__tmp.dlm; | 66 | union dlm_packet *p = &__tmp.p; |
67 | int ret = 0; | 67 | int ret = 0; |
68 | int err = 0; | 68 | int err = 0; |
69 | uint16_t msglen; | 69 | uint16_t msglen; |
@@ -75,15 +75,22 @@ int dlm_process_incoming_buffer(int nodeid, const void *base, | |||
75 | message may wrap around the end of the buffer back to the | 75 | message may wrap around the end of the buffer back to the |
76 | start, so we need to use a temp buffer and copy_from_cb. */ | 76 | start, so we need to use a temp buffer and copy_from_cb. */ |
77 | 77 | ||
78 | copy_from_cb(msg, base, offset, sizeof(struct dlm_header), | 78 | copy_from_cb(p, base, offset, sizeof(struct dlm_header), |
79 | limit); | 79 | limit); |
80 | 80 | ||
81 | msglen = le16_to_cpu(msg->h_length); | 81 | msglen = le16_to_cpu(p->header.h_length); |
82 | lockspace = msg->h_lockspace; | 82 | lockspace = p->header.h_lockspace; |
83 | 83 | ||
84 | err = -EINVAL; | 84 | err = -EINVAL; |
85 | if (msglen < sizeof(struct dlm_header)) | 85 | if (msglen < sizeof(struct dlm_header)) |
86 | break; | 86 | break; |
87 | if (p->header.h_cmd == DLM_MSG) { | ||
88 | if (msglen < sizeof(struct dlm_message)) | ||
89 | break; | ||
90 | } else { | ||
91 | if (msglen < sizeof(struct dlm_rcom)) | ||
92 | break; | ||
93 | } | ||
87 | err = -E2BIG; | 94 | err = -E2BIG; |
88 | if (msglen > dlm_config.ci_buffer_size) { | 95 | if (msglen > dlm_config.ci_buffer_size) { |
89 | log_print("message size %d from %d too big, buf len %d", | 96 | log_print("message size %d from %d too big, buf len %d", |
@@ -104,26 +111,26 @@ int dlm_process_incoming_buffer(int nodeid, const void *base, | |||
104 | in the buffer on the stack (which should work for most | 111 | in the buffer on the stack (which should work for most |
105 | ordinary messages). */ | 112 | ordinary messages). */ |
106 | 113 | ||
107 | if (msglen > DLM_INBUF_LEN && msg == &__tmp.dlm) { | 114 | if (msglen > sizeof(__tmp) && p == &__tmp.p) { |
108 | msg = kmalloc(dlm_config.ci_buffer_size, GFP_KERNEL); | 115 | p = kmalloc(dlm_config.ci_buffer_size, GFP_KERNEL); |
109 | if (msg == NULL) | 116 | if (p == NULL) |
110 | return ret; | 117 | return ret; |
111 | } | 118 | } |
112 | 119 | ||
113 | copy_from_cb(msg, base, offset, msglen, limit); | 120 | copy_from_cb(p, base, offset, msglen, limit); |
114 | 121 | ||
115 | BUG_ON(lockspace != msg->h_lockspace); | 122 | BUG_ON(lockspace != p->header.h_lockspace); |
116 | 123 | ||
117 | ret += msglen; | 124 | ret += msglen; |
118 | offset += msglen; | 125 | offset += msglen; |
119 | offset &= (limit - 1); | 126 | offset &= (limit - 1); |
120 | len -= msglen; | 127 | len -= msglen; |
121 | 128 | ||
122 | dlm_receive_buffer(msg, nodeid); | 129 | dlm_receive_buffer(p, nodeid); |
123 | } | 130 | } |
124 | 131 | ||
125 | if (msg != &__tmp.dlm) | 132 | if (p != &__tmp.p) |
126 | kfree(msg); | 133 | kfree(p); |
127 | 134 | ||
128 | return err ? err : ret; | 135 | return err ? err : ret; |
129 | } | 136 | } |
diff --git a/fs/dlm/netlink.c b/fs/dlm/netlink.c index 863b87d0dc71..714593621f4f 100644 --- a/fs/dlm/netlink.c +++ b/fs/dlm/netlink.c | |||
@@ -78,7 +78,7 @@ static struct genl_ops dlm_nl_ops = { | |||
78 | .doit = user_cmd, | 78 | .doit = user_cmd, |
79 | }; | 79 | }; |
80 | 80 | ||
81 | int dlm_netlink_init(void) | 81 | int __init dlm_netlink_init(void) |
82 | { | 82 | { |
83 | int rv; | 83 | int rv; |
84 | 84 | ||
@@ -95,7 +95,7 @@ int dlm_netlink_init(void) | |||
95 | return rv; | 95 | return rv; |
96 | } | 96 | } |
97 | 97 | ||
98 | void dlm_netlink_exit(void) | 98 | void __exit dlm_netlink_exit(void) |
99 | { | 99 | { |
100 | genl_unregister_ops(&family, &dlm_nl_ops); | 100 | genl_unregister_ops(&family, &dlm_nl_ops); |
101 | genl_unregister_family(&family); | 101 | genl_unregister_family(&family); |
@@ -104,7 +104,6 @@ void dlm_netlink_exit(void) | |||
104 | static void fill_data(struct dlm_lock_data *data, struct dlm_lkb *lkb) | 104 | static void fill_data(struct dlm_lock_data *data, struct dlm_lkb *lkb) |
105 | { | 105 | { |
106 | struct dlm_rsb *r = lkb->lkb_resource; | 106 | struct dlm_rsb *r = lkb->lkb_resource; |
107 | struct dlm_user_args *ua = (struct dlm_user_args *) lkb->lkb_astparam; | ||
108 | 107 | ||
109 | memset(data, 0, sizeof(struct dlm_lock_data)); | 108 | memset(data, 0, sizeof(struct dlm_lock_data)); |
110 | 109 | ||
@@ -117,8 +116,8 @@ static void fill_data(struct dlm_lock_data *data, struct dlm_lkb *lkb) | |||
117 | data->grmode = lkb->lkb_grmode; | 116 | data->grmode = lkb->lkb_grmode; |
118 | data->rqmode = lkb->lkb_rqmode; | 117 | data->rqmode = lkb->lkb_rqmode; |
119 | data->timestamp = lkb->lkb_timestamp; | 118 | data->timestamp = lkb->lkb_timestamp; |
120 | if (ua) | 119 | if (lkb->lkb_ua) |
121 | data->xid = ua->xid; | 120 | data->xid = lkb->lkb_ua->xid; |
122 | if (r) { | 121 | if (r) { |
123 | data->lockspace_id = r->res_ls->ls_global_id; | 122 | data->lockspace_id = r->res_ls->ls_global_id; |
124 | data->resource_namelen = r->res_length; | 123 | data->resource_namelen = r->res_length; |
diff --git a/fs/dlm/rcom.c b/fs/dlm/rcom.c index 026824cd3acb..035e6f9990b0 100644 --- a/fs/dlm/rcom.c +++ b/fs/dlm/rcom.c | |||
@@ -78,13 +78,14 @@ static void send_rcom(struct dlm_ls *ls, struct dlm_mhandle *mh, | |||
78 | 78 | ||
79 | static void make_config(struct dlm_ls *ls, struct rcom_config *rf) | 79 | static void make_config(struct dlm_ls *ls, struct rcom_config *rf) |
80 | { | 80 | { |
81 | rf->rf_lvblen = ls->ls_lvblen; | 81 | rf->rf_lvblen = cpu_to_le32(ls->ls_lvblen); |
82 | rf->rf_lsflags = ls->ls_exflags; | 82 | rf->rf_lsflags = cpu_to_le32(ls->ls_exflags); |
83 | } | 83 | } |
84 | 84 | ||
85 | static int check_config(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid) | 85 | static int check_config(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid) |
86 | { | 86 | { |
87 | struct rcom_config *rf = (struct rcom_config *) rc->rc_buf; | 87 | struct rcom_config *rf = (struct rcom_config *) rc->rc_buf; |
88 | size_t conf_size = sizeof(struct dlm_rcom) + sizeof(struct rcom_config); | ||
88 | 89 | ||
89 | if ((rc->rc_header.h_version & 0xFFFF0000) != DLM_HEADER_MAJOR) { | 90 | if ((rc->rc_header.h_version & 0xFFFF0000) != DLM_HEADER_MAJOR) { |
90 | log_error(ls, "version mismatch: %x nodeid %d: %x", | 91 | log_error(ls, "version mismatch: %x nodeid %d: %x", |
@@ -93,11 +94,18 @@ static int check_config(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid) | |||
93 | return -EPROTO; | 94 | return -EPROTO; |
94 | } | 95 | } |
95 | 96 | ||
96 | if (rf->rf_lvblen != ls->ls_lvblen || | 97 | if (rc->rc_header.h_length < conf_size) { |
97 | rf->rf_lsflags != ls->ls_exflags) { | 98 | log_error(ls, "config too short: %d nodeid %d", |
99 | rc->rc_header.h_length, nodeid); | ||
100 | return -EPROTO; | ||
101 | } | ||
102 | |||
103 | if (le32_to_cpu(rf->rf_lvblen) != ls->ls_lvblen || | ||
104 | le32_to_cpu(rf->rf_lsflags) != ls->ls_exflags) { | ||
98 | log_error(ls, "config mismatch: %d,%x nodeid %d: %d,%x", | 105 | log_error(ls, "config mismatch: %d,%x nodeid %d: %d,%x", |
99 | ls->ls_lvblen, ls->ls_exflags, | 106 | ls->ls_lvblen, ls->ls_exflags, nodeid, |
100 | nodeid, rf->rf_lvblen, rf->rf_lsflags); | 107 | le32_to_cpu(rf->rf_lvblen), |
108 | le32_to_cpu(rf->rf_lsflags)); | ||
101 | return -EPROTO; | 109 | return -EPROTO; |
102 | } | 110 | } |
103 | return 0; | 111 | return 0; |
@@ -128,7 +136,7 @@ int dlm_rcom_status(struct dlm_ls *ls, int nodeid) | |||
128 | ls->ls_recover_nodeid = nodeid; | 136 | ls->ls_recover_nodeid = nodeid; |
129 | 137 | ||
130 | if (nodeid == dlm_our_nodeid()) { | 138 | if (nodeid == dlm_our_nodeid()) { |
131 | rc = (struct dlm_rcom *) ls->ls_recover_buf; | 139 | rc = ls->ls_recover_buf; |
132 | rc->rc_result = dlm_recover_status(ls); | 140 | rc->rc_result = dlm_recover_status(ls); |
133 | goto out; | 141 | goto out; |
134 | } | 142 | } |
@@ -147,7 +155,7 @@ int dlm_rcom_status(struct dlm_ls *ls, int nodeid) | |||
147 | if (error) | 155 | if (error) |
148 | goto out; | 156 | goto out; |
149 | 157 | ||
150 | rc = (struct dlm_rcom *) ls->ls_recover_buf; | 158 | rc = ls->ls_recover_buf; |
151 | 159 | ||
152 | if (rc->rc_result == -ESRCH) { | 160 | if (rc->rc_result == -ESRCH) { |
153 | /* we pretend the remote lockspace exists with 0 status */ | 161 | /* we pretend the remote lockspace exists with 0 status */ |
@@ -201,14 +209,15 @@ int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, int last_len) | |||
201 | { | 209 | { |
202 | struct dlm_rcom *rc; | 210 | struct dlm_rcom *rc; |
203 | struct dlm_mhandle *mh; | 211 | struct dlm_mhandle *mh; |
204 | int error = 0, len = sizeof(struct dlm_rcom); | 212 | int error = 0; |
213 | int max_size = dlm_config.ci_buffer_size - sizeof(struct dlm_rcom); | ||
205 | 214 | ||
206 | ls->ls_recover_nodeid = nodeid; | 215 | ls->ls_recover_nodeid = nodeid; |
207 | 216 | ||
208 | if (nodeid == dlm_our_nodeid()) { | 217 | if (nodeid == dlm_our_nodeid()) { |
209 | dlm_copy_master_names(ls, last_name, last_len, | 218 | dlm_copy_master_names(ls, last_name, last_len, |
210 | ls->ls_recover_buf + len, | 219 | ls->ls_recover_buf->rc_buf, |
211 | dlm_config.ci_buffer_size - len, nodeid); | 220 | max_size, nodeid); |
212 | goto out; | 221 | goto out; |
213 | } | 222 | } |
214 | 223 | ||
@@ -299,22 +308,22 @@ static void pack_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb, | |||
299 | { | 308 | { |
300 | memset(rl, 0, sizeof(*rl)); | 309 | memset(rl, 0, sizeof(*rl)); |
301 | 310 | ||
302 | rl->rl_ownpid = lkb->lkb_ownpid; | 311 | rl->rl_ownpid = cpu_to_le32(lkb->lkb_ownpid); |
303 | rl->rl_lkid = lkb->lkb_id; | 312 | rl->rl_lkid = cpu_to_le32(lkb->lkb_id); |
304 | rl->rl_exflags = lkb->lkb_exflags; | 313 | rl->rl_exflags = cpu_to_le32(lkb->lkb_exflags); |
305 | rl->rl_flags = lkb->lkb_flags; | 314 | rl->rl_flags = cpu_to_le32(lkb->lkb_flags); |
306 | rl->rl_lvbseq = lkb->lkb_lvbseq; | 315 | rl->rl_lvbseq = cpu_to_le32(lkb->lkb_lvbseq); |
307 | rl->rl_rqmode = lkb->lkb_rqmode; | 316 | rl->rl_rqmode = lkb->lkb_rqmode; |
308 | rl->rl_grmode = lkb->lkb_grmode; | 317 | rl->rl_grmode = lkb->lkb_grmode; |
309 | rl->rl_status = lkb->lkb_status; | 318 | rl->rl_status = lkb->lkb_status; |
310 | rl->rl_wait_type = lkb->lkb_wait_type; | 319 | rl->rl_wait_type = cpu_to_le16(lkb->lkb_wait_type); |
311 | 320 | ||
312 | if (lkb->lkb_bastaddr) | 321 | if (lkb->lkb_bastfn) |
313 | rl->rl_asts |= AST_BAST; | 322 | rl->rl_asts |= AST_BAST; |
314 | if (lkb->lkb_astaddr) | 323 | if (lkb->lkb_astfn) |
315 | rl->rl_asts |= AST_COMP; | 324 | rl->rl_asts |= AST_COMP; |
316 | 325 | ||
317 | rl->rl_namelen = r->res_length; | 326 | rl->rl_namelen = cpu_to_le16(r->res_length); |
318 | memcpy(rl->rl_name, r->res_name, r->res_length); | 327 | memcpy(rl->rl_name, r->res_name, r->res_length); |
319 | 328 | ||
320 | /* FIXME: might we have an lvb without DLM_LKF_VALBLK set ? | 329 | /* FIXME: might we have an lvb without DLM_LKF_VALBLK set ? |
@@ -348,6 +357,7 @@ int dlm_send_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb) | |||
348 | return error; | 357 | return error; |
349 | } | 358 | } |
350 | 359 | ||
360 | /* needs at least dlm_rcom + rcom_lock */ | ||
351 | static void receive_rcom_lock(struct dlm_ls *ls, struct dlm_rcom *rc_in) | 361 | static void receive_rcom_lock(struct dlm_ls *ls, struct dlm_rcom *rc_in) |
352 | { | 362 | { |
353 | struct dlm_rcom *rc; | 363 | struct dlm_rcom *rc; |
@@ -401,7 +411,7 @@ int dlm_send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in) | |||
401 | rc->rc_result = -ESRCH; | 411 | rc->rc_result = -ESRCH; |
402 | 412 | ||
403 | rf = (struct rcom_config *) rc->rc_buf; | 413 | rf = (struct rcom_config *) rc->rc_buf; |
404 | rf->rf_lvblen = -1; | 414 | rf->rf_lvblen = cpu_to_le32(~0U); |
405 | 415 | ||
406 | dlm_rcom_out(rc); | 416 | dlm_rcom_out(rc); |
407 | dlm_lowcomms_commit_buffer(mh); | 417 | dlm_lowcomms_commit_buffer(mh); |
@@ -439,6 +449,8 @@ static int is_old_reply(struct dlm_ls *ls, struct dlm_rcom *rc) | |||
439 | 449 | ||
440 | void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid) | 450 | void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid) |
441 | { | 451 | { |
452 | int lock_size = sizeof(struct dlm_rcom) + sizeof(struct rcom_lock); | ||
453 | |||
442 | if (dlm_recovery_stopped(ls) && (rc->rc_type != DLM_RCOM_STATUS)) { | 454 | if (dlm_recovery_stopped(ls) && (rc->rc_type != DLM_RCOM_STATUS)) { |
443 | log_debug(ls, "ignoring recovery message %x from %d", | 455 | log_debug(ls, "ignoring recovery message %x from %d", |
444 | rc->rc_type, nodeid); | 456 | rc->rc_type, nodeid); |
@@ -462,6 +474,8 @@ void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid) | |||
462 | break; | 474 | break; |
463 | 475 | ||
464 | case DLM_RCOM_LOCK: | 476 | case DLM_RCOM_LOCK: |
477 | if (rc->rc_header.h_length < lock_size) | ||
478 | goto Eshort; | ||
465 | receive_rcom_lock(ls, rc); | 479 | receive_rcom_lock(ls, rc); |
466 | break; | 480 | break; |
467 | 481 | ||
@@ -478,13 +492,18 @@ void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid) | |||
478 | break; | 492 | break; |
479 | 493 | ||
480 | case DLM_RCOM_LOCK_REPLY: | 494 | case DLM_RCOM_LOCK_REPLY: |
495 | if (rc->rc_header.h_length < lock_size) | ||
496 | goto Eshort; | ||
481 | dlm_recover_process_copy(ls, rc); | 497 | dlm_recover_process_copy(ls, rc); |
482 | break; | 498 | break; |
483 | 499 | ||
484 | default: | 500 | default: |
485 | log_error(ls, "receive_rcom bad type %d", rc->rc_type); | 501 | log_error(ls, "receive_rcom bad type %d", rc->rc_type); |
486 | } | 502 | } |
487 | out: | 503 | out: |
488 | return; | 504 | return; |
505 | Eshort: | ||
506 | log_error(ls, "recovery message %x from %d is too short", | ||
507 | rc->rc_type, nodeid); | ||
489 | } | 508 | } |
490 | 509 | ||
diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c index df075dc300fa..80aba5bdd4a4 100644 --- a/fs/dlm/recover.c +++ b/fs/dlm/recover.c | |||
@@ -94,7 +94,7 @@ void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status) | |||
94 | 94 | ||
95 | static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status) | 95 | static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status) |
96 | { | 96 | { |
97 | struct dlm_rcom *rc = (struct dlm_rcom *) ls->ls_recover_buf; | 97 | struct dlm_rcom *rc = ls->ls_recover_buf; |
98 | struct dlm_member *memb; | 98 | struct dlm_member *memb; |
99 | int error = 0, delay; | 99 | int error = 0, delay; |
100 | 100 | ||
@@ -123,7 +123,7 @@ static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status) | |||
123 | 123 | ||
124 | static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status) | 124 | static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status) |
125 | { | 125 | { |
126 | struct dlm_rcom *rc = (struct dlm_rcom *) ls->ls_recover_buf; | 126 | struct dlm_rcom *rc = ls->ls_recover_buf; |
127 | int error = 0, delay = 0, nodeid = ls->ls_low_nodeid; | 127 | int error = 0, delay = 0, nodeid = ls->ls_low_nodeid; |
128 | 128 | ||
129 | for (;;) { | 129 | for (;;) { |
diff --git a/fs/dlm/requestqueue.c b/fs/dlm/requestqueue.c index 0de04f17ccea..daa4183fbb84 100644 --- a/fs/dlm/requestqueue.c +++ b/fs/dlm/requestqueue.c | |||
@@ -20,7 +20,7 @@ | |||
20 | struct rq_entry { | 20 | struct rq_entry { |
21 | struct list_head list; | 21 | struct list_head list; |
22 | int nodeid; | 22 | int nodeid; |
23 | char request[0]; | 23 | struct dlm_message request; |
24 | }; | 24 | }; |
25 | 25 | ||
26 | /* | 26 | /* |
@@ -30,10 +30,10 @@ struct rq_entry { | |||
30 | * lockspace is enabled on some while still suspended on others. | 30 | * lockspace is enabled on some while still suspended on others. |
31 | */ | 31 | */ |
32 | 32 | ||
33 | void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd) | 33 | void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_message *ms) |
34 | { | 34 | { |
35 | struct rq_entry *e; | 35 | struct rq_entry *e; |
36 | int length = hd->h_length; | 36 | int length = ms->m_header.h_length - sizeof(struct dlm_message); |
37 | 37 | ||
38 | e = kmalloc(sizeof(struct rq_entry) + length, GFP_KERNEL); | 38 | e = kmalloc(sizeof(struct rq_entry) + length, GFP_KERNEL); |
39 | if (!e) { | 39 | if (!e) { |
@@ -42,7 +42,7 @@ void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd) | |||
42 | } | 42 | } |
43 | 43 | ||
44 | e->nodeid = nodeid; | 44 | e->nodeid = nodeid; |
45 | memcpy(e->request, hd, length); | 45 | memcpy(&e->request, ms, ms->m_header.h_length); |
46 | 46 | ||
47 | mutex_lock(&ls->ls_requestqueue_mutex); | 47 | mutex_lock(&ls->ls_requestqueue_mutex); |
48 | list_add_tail(&e->list, &ls->ls_requestqueue); | 48 | list_add_tail(&e->list, &ls->ls_requestqueue); |
@@ -76,7 +76,7 @@ int dlm_process_requestqueue(struct dlm_ls *ls) | |||
76 | e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list); | 76 | e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list); |
77 | mutex_unlock(&ls->ls_requestqueue_mutex); | 77 | mutex_unlock(&ls->ls_requestqueue_mutex); |
78 | 78 | ||
79 | dlm_receive_message_saved(ls, (struct dlm_message *)e->request); | 79 | dlm_receive_message_saved(ls, &e->request); |
80 | 80 | ||
81 | mutex_lock(&ls->ls_requestqueue_mutex); | 81 | mutex_lock(&ls->ls_requestqueue_mutex); |
82 | list_del(&e->list); | 82 | list_del(&e->list); |
@@ -176,7 +176,7 @@ void dlm_purge_requestqueue(struct dlm_ls *ls) | |||
176 | 176 | ||
177 | mutex_lock(&ls->ls_requestqueue_mutex); | 177 | mutex_lock(&ls->ls_requestqueue_mutex); |
178 | list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) { | 178 | list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) { |
179 | ms = (struct dlm_message *) e->request; | 179 | ms = &e->request; |
180 | 180 | ||
181 | if (purge_request(ls, ms, e->nodeid)) { | 181 | if (purge_request(ls, ms, e->nodeid)) { |
182 | list_del(&e->list); | 182 | list_del(&e->list); |
diff --git a/fs/dlm/requestqueue.h b/fs/dlm/requestqueue.h index aba34fc05ee4..10ce449b77da 100644 --- a/fs/dlm/requestqueue.h +++ b/fs/dlm/requestqueue.h | |||
@@ -13,7 +13,7 @@ | |||
13 | #ifndef __REQUESTQUEUE_DOT_H__ | 13 | #ifndef __REQUESTQUEUE_DOT_H__ |
14 | #define __REQUESTQUEUE_DOT_H__ | 14 | #define __REQUESTQUEUE_DOT_H__ |
15 | 15 | ||
16 | void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd); | 16 | void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_message *ms); |
17 | int dlm_process_requestqueue(struct dlm_ls *ls); | 17 | int dlm_process_requestqueue(struct dlm_ls *ls); |
18 | void dlm_wait_requestqueue(struct dlm_ls *ls); | 18 | void dlm_wait_requestqueue(struct dlm_ls *ls); |
19 | void dlm_purge_requestqueue(struct dlm_ls *ls); | 19 | void dlm_purge_requestqueue(struct dlm_ls *ls); |
diff --git a/fs/dlm/user.c b/fs/dlm/user.c index 7cbc6826239b..ebbcf38fd33b 100644 --- a/fs/dlm/user.c +++ b/fs/dlm/user.c | |||
@@ -82,7 +82,7 @@ struct dlm_lock_result32 { | |||
82 | 82 | ||
83 | static void compat_input(struct dlm_write_request *kb, | 83 | static void compat_input(struct dlm_write_request *kb, |
84 | struct dlm_write_request32 *kb32, | 84 | struct dlm_write_request32 *kb32, |
85 | int max_namelen) | 85 | size_t count) |
86 | { | 86 | { |
87 | kb->version[0] = kb32->version[0]; | 87 | kb->version[0] = kb32->version[0]; |
88 | kb->version[1] = kb32->version[1]; | 88 | kb->version[1] = kb32->version[1]; |
@@ -94,7 +94,8 @@ static void compat_input(struct dlm_write_request *kb, | |||
94 | kb->cmd == DLM_USER_REMOVE_LOCKSPACE) { | 94 | kb->cmd == DLM_USER_REMOVE_LOCKSPACE) { |
95 | kb->i.lspace.flags = kb32->i.lspace.flags; | 95 | kb->i.lspace.flags = kb32->i.lspace.flags; |
96 | kb->i.lspace.minor = kb32->i.lspace.minor; | 96 | kb->i.lspace.minor = kb32->i.lspace.minor; |
97 | strcpy(kb->i.lspace.name, kb32->i.lspace.name); | 97 | memcpy(kb->i.lspace.name, kb32->i.lspace.name, count - |
98 | offsetof(struct dlm_write_request32, i.lspace.name)); | ||
98 | } else if (kb->cmd == DLM_USER_PURGE) { | 99 | } else if (kb->cmd == DLM_USER_PURGE) { |
99 | kb->i.purge.nodeid = kb32->i.purge.nodeid; | 100 | kb->i.purge.nodeid = kb32->i.purge.nodeid; |
100 | kb->i.purge.pid = kb32->i.purge.pid; | 101 | kb->i.purge.pid = kb32->i.purge.pid; |
@@ -112,11 +113,8 @@ static void compat_input(struct dlm_write_request *kb, | |||
112 | kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr; | 113 | kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr; |
113 | kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb; | 114 | kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb; |
114 | memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN); | 115 | memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN); |
115 | if (kb->i.lock.namelen <= max_namelen) | 116 | memcpy(kb->i.lock.name, kb32->i.lock.name, count - |
116 | memcpy(kb->i.lock.name, kb32->i.lock.name, | 117 | offsetof(struct dlm_write_request32, i.lock.name)); |
117 | kb->i.lock.namelen); | ||
118 | else | ||
119 | kb->i.lock.namelen = max_namelen; | ||
120 | } | 118 | } |
121 | } | 119 | } |
122 | 120 | ||
@@ -197,8 +195,8 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, int type) | |||
197 | if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD)) | 195 | if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD)) |
198 | goto out; | 196 | goto out; |
199 | 197 | ||
200 | DLM_ASSERT(lkb->lkb_astparam, dlm_print_lkb(lkb);); | 198 | DLM_ASSERT(lkb->lkb_ua, dlm_print_lkb(lkb);); |
201 | ua = (struct dlm_user_args *)lkb->lkb_astparam; | 199 | ua = lkb->lkb_ua; |
202 | proc = ua->proc; | 200 | proc = ua->proc; |
203 | 201 | ||
204 | if (type == AST_BAST && ua->bastaddr == NULL) | 202 | if (type == AST_BAST && ua->bastaddr == NULL) |
@@ -508,7 +506,7 @@ static ssize_t device_write(struct file *file, const char __user *buf, | |||
508 | #endif | 506 | #endif |
509 | return -EINVAL; | 507 | return -EINVAL; |
510 | 508 | ||
511 | kbuf = kmalloc(count, GFP_KERNEL); | 509 | kbuf = kzalloc(count + 1, GFP_KERNEL); |
512 | if (!kbuf) | 510 | if (!kbuf) |
513 | return -ENOMEM; | 511 | return -ENOMEM; |
514 | 512 | ||
@@ -526,15 +524,14 @@ static ssize_t device_write(struct file *file, const char __user *buf, | |||
526 | if (!kbuf->is64bit) { | 524 | if (!kbuf->is64bit) { |
527 | struct dlm_write_request32 *k32buf; | 525 | struct dlm_write_request32 *k32buf; |
528 | k32buf = (struct dlm_write_request32 *)kbuf; | 526 | k32buf = (struct dlm_write_request32 *)kbuf; |
529 | kbuf = kmalloc(count + (sizeof(struct dlm_write_request) - | 527 | kbuf = kmalloc(count + 1 + (sizeof(struct dlm_write_request) - |
530 | sizeof(struct dlm_write_request32)), GFP_KERNEL); | 528 | sizeof(struct dlm_write_request32)), GFP_KERNEL); |
531 | if (!kbuf) | 529 | if (!kbuf) |
532 | return -ENOMEM; | 530 | return -ENOMEM; |
533 | 531 | ||
534 | if (proc) | 532 | if (proc) |
535 | set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags); | 533 | set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags); |
536 | compat_input(kbuf, k32buf, | 534 | compat_input(kbuf, k32buf, count + 1); |
537 | count - sizeof(struct dlm_write_request32)); | ||
538 | kfree(k32buf); | 535 | kfree(k32buf); |
539 | } | 536 | } |
540 | #endif | 537 | #endif |
@@ -774,7 +771,6 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count, | |||
774 | { | 771 | { |
775 | struct dlm_user_proc *proc = file->private_data; | 772 | struct dlm_user_proc *proc = file->private_data; |
776 | struct dlm_lkb *lkb; | 773 | struct dlm_lkb *lkb; |
777 | struct dlm_user_args *ua; | ||
778 | DECLARE_WAITQUEUE(wait, current); | 774 | DECLARE_WAITQUEUE(wait, current); |
779 | int error, type=0, bmode=0, removed = 0; | 775 | int error, type=0, bmode=0, removed = 0; |
780 | 776 | ||
@@ -845,8 +841,7 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count, | |||
845 | } | 841 | } |
846 | spin_unlock(&proc->asts_spin); | 842 | spin_unlock(&proc->asts_spin); |
847 | 843 | ||
848 | ua = (struct dlm_user_args *)lkb->lkb_astparam; | 844 | error = copy_result_to_user(lkb->lkb_ua, |
849 | error = copy_result_to_user(ua, | ||
850 | test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags), | 845 | test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags), |
851 | type, bmode, buf, count); | 846 | type, bmode, buf, count); |
852 | 847 | ||
@@ -907,7 +902,7 @@ static struct miscdevice ctl_device = { | |||
907 | .minor = MISC_DYNAMIC_MINOR, | 902 | .minor = MISC_DYNAMIC_MINOR, |
908 | }; | 903 | }; |
909 | 904 | ||
910 | int dlm_user_init(void) | 905 | int __init dlm_user_init(void) |
911 | { | 906 | { |
912 | int error; | 907 | int error; |
913 | 908 | ||
diff --git a/fs/dlm/util.c b/fs/dlm/util.c index 4d9c1f4e1bd1..e36520af7cc0 100644 --- a/fs/dlm/util.c +++ b/fs/dlm/util.c | |||
@@ -131,52 +131,8 @@ void dlm_message_in(struct dlm_message *ms) | |||
131 | ms->m_result = from_dlm_errno(le32_to_cpu(ms->m_result)); | 131 | ms->m_result = from_dlm_errno(le32_to_cpu(ms->m_result)); |
132 | } | 132 | } |
133 | 133 | ||
134 | static void rcom_lock_out(struct rcom_lock *rl) | ||
135 | { | ||
136 | rl->rl_ownpid = cpu_to_le32(rl->rl_ownpid); | ||
137 | rl->rl_lkid = cpu_to_le32(rl->rl_lkid); | ||
138 | rl->rl_remid = cpu_to_le32(rl->rl_remid); | ||
139 | rl->rl_parent_lkid = cpu_to_le32(rl->rl_parent_lkid); | ||
140 | rl->rl_parent_remid = cpu_to_le32(rl->rl_parent_remid); | ||
141 | rl->rl_exflags = cpu_to_le32(rl->rl_exflags); | ||
142 | rl->rl_flags = cpu_to_le32(rl->rl_flags); | ||
143 | rl->rl_lvbseq = cpu_to_le32(rl->rl_lvbseq); | ||
144 | rl->rl_result = cpu_to_le32(rl->rl_result); | ||
145 | rl->rl_wait_type = cpu_to_le16(rl->rl_wait_type); | ||
146 | rl->rl_namelen = cpu_to_le16(rl->rl_namelen); | ||
147 | } | ||
148 | |||
149 | static void rcom_lock_in(struct rcom_lock *rl) | ||
150 | { | ||
151 | rl->rl_ownpid = le32_to_cpu(rl->rl_ownpid); | ||
152 | rl->rl_lkid = le32_to_cpu(rl->rl_lkid); | ||
153 | rl->rl_remid = le32_to_cpu(rl->rl_remid); | ||
154 | rl->rl_parent_lkid = le32_to_cpu(rl->rl_parent_lkid); | ||
155 | rl->rl_parent_remid = le32_to_cpu(rl->rl_parent_remid); | ||
156 | rl->rl_exflags = le32_to_cpu(rl->rl_exflags); | ||
157 | rl->rl_flags = le32_to_cpu(rl->rl_flags); | ||
158 | rl->rl_lvbseq = le32_to_cpu(rl->rl_lvbseq); | ||
159 | rl->rl_result = le32_to_cpu(rl->rl_result); | ||
160 | rl->rl_wait_type = le16_to_cpu(rl->rl_wait_type); | ||
161 | rl->rl_namelen = le16_to_cpu(rl->rl_namelen); | ||
162 | } | ||
163 | |||
164 | static void rcom_config_out(struct rcom_config *rf) | ||
165 | { | ||
166 | rf->rf_lvblen = cpu_to_le32(rf->rf_lvblen); | ||
167 | rf->rf_lsflags = cpu_to_le32(rf->rf_lsflags); | ||
168 | } | ||
169 | |||
170 | static void rcom_config_in(struct rcom_config *rf) | ||
171 | { | ||
172 | rf->rf_lvblen = le32_to_cpu(rf->rf_lvblen); | ||
173 | rf->rf_lsflags = le32_to_cpu(rf->rf_lsflags); | ||
174 | } | ||
175 | |||
176 | void dlm_rcom_out(struct dlm_rcom *rc) | 134 | void dlm_rcom_out(struct dlm_rcom *rc) |
177 | { | 135 | { |
178 | int type = rc->rc_type; | ||
179 | |||
180 | header_out(&rc->rc_header); | 136 | header_out(&rc->rc_header); |
181 | 137 | ||
182 | rc->rc_type = cpu_to_le32(rc->rc_type); | 138 | rc->rc_type = cpu_to_le32(rc->rc_type); |
@@ -184,18 +140,10 @@ void dlm_rcom_out(struct dlm_rcom *rc) | |||
184 | rc->rc_id = cpu_to_le64(rc->rc_id); | 140 | rc->rc_id = cpu_to_le64(rc->rc_id); |
185 | rc->rc_seq = cpu_to_le64(rc->rc_seq); | 141 | rc->rc_seq = cpu_to_le64(rc->rc_seq); |
186 | rc->rc_seq_reply = cpu_to_le64(rc->rc_seq_reply); | 142 | rc->rc_seq_reply = cpu_to_le64(rc->rc_seq_reply); |
187 | |||
188 | if ((type == DLM_RCOM_LOCK) || (type == DLM_RCOM_LOCK_REPLY)) | ||
189 | rcom_lock_out((struct rcom_lock *) rc->rc_buf); | ||
190 | |||
191 | else if (type == DLM_RCOM_STATUS_REPLY) | ||
192 | rcom_config_out((struct rcom_config *) rc->rc_buf); | ||
193 | } | 143 | } |
194 | 144 | ||
195 | void dlm_rcom_in(struct dlm_rcom *rc) | 145 | void dlm_rcom_in(struct dlm_rcom *rc) |
196 | { | 146 | { |
197 | int type; | ||
198 | |||
199 | header_in(&rc->rc_header); | 147 | header_in(&rc->rc_header); |
200 | 148 | ||
201 | rc->rc_type = le32_to_cpu(rc->rc_type); | 149 | rc->rc_type = le32_to_cpu(rc->rc_type); |
@@ -203,13 +151,4 @@ void dlm_rcom_in(struct dlm_rcom *rc) | |||
203 | rc->rc_id = le64_to_cpu(rc->rc_id); | 151 | rc->rc_id = le64_to_cpu(rc->rc_id); |
204 | rc->rc_seq = le64_to_cpu(rc->rc_seq); | 152 | rc->rc_seq = le64_to_cpu(rc->rc_seq); |
205 | rc->rc_seq_reply = le64_to_cpu(rc->rc_seq_reply); | 153 | rc->rc_seq_reply = le64_to_cpu(rc->rc_seq_reply); |
206 | |||
207 | type = rc->rc_type; | ||
208 | |||
209 | if ((type == DLM_RCOM_LOCK) || (type == DLM_RCOM_LOCK_REPLY)) | ||
210 | rcom_lock_in((struct rcom_lock *) rc->rc_buf); | ||
211 | |||
212 | else if (type == DLM_RCOM_STATUS_REPLY) | ||
213 | rcom_config_in((struct rcom_config *) rc->rc_buf); | ||
214 | } | 154 | } |
215 | |||
diff --git a/fs/dquot.c b/fs/dquot.c index cee7c6f428f0..9c7feb62eed1 100644 --- a/fs/dquot.c +++ b/fs/dquot.c | |||
@@ -696,9 +696,8 @@ static int dqinit_needed(struct inode *inode, int type) | |||
696 | /* This routine is guarded by dqonoff_mutex mutex */ | 696 | /* This routine is guarded by dqonoff_mutex mutex */ |
697 | static void add_dquot_ref(struct super_block *sb, int type) | 697 | static void add_dquot_ref(struct super_block *sb, int type) |
698 | { | 698 | { |
699 | struct inode *inode; | 699 | struct inode *inode, *old_inode = NULL; |
700 | 700 | ||
701 | restart: | ||
702 | spin_lock(&inode_lock); | 701 | spin_lock(&inode_lock); |
703 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { | 702 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { |
704 | if (!atomic_read(&inode->i_writecount)) | 703 | if (!atomic_read(&inode->i_writecount)) |
@@ -711,12 +710,18 @@ restart: | |||
711 | __iget(inode); | 710 | __iget(inode); |
712 | spin_unlock(&inode_lock); | 711 | spin_unlock(&inode_lock); |
713 | 712 | ||
713 | iput(old_inode); | ||
714 | sb->dq_op->initialize(inode, type); | 714 | sb->dq_op->initialize(inode, type); |
715 | iput(inode); | 715 | /* We hold a reference to 'inode' so it couldn't have been |
716 | /* As we may have blocked we had better restart... */ | 716 | * removed from s_inodes list while we dropped the inode_lock. |
717 | goto restart; | 717 | * We cannot iput the inode now as we can be holding the last |
718 | * reference and we cannot iput it under inode_lock. So we | ||
719 | * keep the reference and iput it later. */ | ||
720 | old_inode = inode; | ||
721 | spin_lock(&inode_lock); | ||
718 | } | 722 | } |
719 | spin_unlock(&inode_lock); | 723 | spin_unlock(&inode_lock); |
724 | iput(old_inode); | ||
720 | } | 725 | } |
721 | 726 | ||
722 | /* Return 0 if dqput() won't block (note that 1 doesn't necessarily mean blocking) */ | 727 | /* Return 0 if dqput() won't block (note that 1 doesn't necessarily mean blocking) */ |
@@ -1628,16 +1633,17 @@ int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path) | |||
1628 | error = path_lookup(path, LOOKUP_FOLLOW, &nd); | 1633 | error = path_lookup(path, LOOKUP_FOLLOW, &nd); |
1629 | if (error < 0) | 1634 | if (error < 0) |
1630 | return error; | 1635 | return error; |
1631 | error = security_quota_on(nd.dentry); | 1636 | error = security_quota_on(nd.path.dentry); |
1632 | if (error) | 1637 | if (error) |
1633 | goto out_path; | 1638 | goto out_path; |
1634 | /* Quota file not on the same filesystem? */ | 1639 | /* Quota file not on the same filesystem? */ |
1635 | if (nd.mnt->mnt_sb != sb) | 1640 | if (nd.path.mnt->mnt_sb != sb) |
1636 | error = -EXDEV; | 1641 | error = -EXDEV; |
1637 | else | 1642 | else |
1638 | error = vfs_quota_on_inode(nd.dentry->d_inode, type, format_id); | 1643 | error = vfs_quota_on_inode(nd.path.dentry->d_inode, type, |
1644 | format_id); | ||
1639 | out_path: | 1645 | out_path: |
1640 | path_release(&nd); | 1646 | path_put(&nd.path); |
1641 | return error; | 1647 | return error; |
1642 | } | 1648 | } |
1643 | 1649 | ||
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index f8ef0af919e7..a066e109ad9c 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c | |||
@@ -355,8 +355,11 @@ static int encrypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat, | |||
355 | } | 355 | } |
356 | /* Consider doing this once, when the file is opened */ | 356 | /* Consider doing this once, when the file is opened */ |
357 | mutex_lock(&crypt_stat->cs_tfm_mutex); | 357 | mutex_lock(&crypt_stat->cs_tfm_mutex); |
358 | rc = crypto_blkcipher_setkey(crypt_stat->tfm, crypt_stat->key, | 358 | if (!(crypt_stat->flags & ECRYPTFS_KEY_SET)) { |
359 | crypt_stat->key_size); | 359 | rc = crypto_blkcipher_setkey(crypt_stat->tfm, crypt_stat->key, |
360 | crypt_stat->key_size); | ||
361 | crypt_stat->flags |= ECRYPTFS_KEY_SET; | ||
362 | } | ||
360 | if (rc) { | 363 | if (rc) { |
361 | ecryptfs_printk(KERN_ERR, "Error setting key; rc = [%d]\n", | 364 | ecryptfs_printk(KERN_ERR, "Error setting key; rc = [%d]\n", |
362 | rc); | 365 | rc); |
@@ -376,11 +379,10 @@ out: | |||
376 | * | 379 | * |
377 | * Convert an eCryptfs page index into a lower byte offset | 380 | * Convert an eCryptfs page index into a lower byte offset |
378 | */ | 381 | */ |
379 | void ecryptfs_lower_offset_for_extent(loff_t *offset, loff_t extent_num, | 382 | static void ecryptfs_lower_offset_for_extent(loff_t *offset, loff_t extent_num, |
380 | struct ecryptfs_crypt_stat *crypt_stat) | 383 | struct ecryptfs_crypt_stat *crypt_stat) |
381 | { | 384 | { |
382 | (*offset) = ((crypt_stat->extent_size | 385 | (*offset) = (crypt_stat->num_header_bytes_at_front |
383 | * crypt_stat->num_header_extents_at_front) | ||
384 | + (crypt_stat->extent_size * extent_num)); | 386 | + (crypt_stat->extent_size * extent_num)); |
385 | } | 387 | } |
386 | 388 | ||
@@ -842,15 +844,13 @@ void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat) | |||
842 | set_extent_mask_and_shift(crypt_stat); | 844 | set_extent_mask_and_shift(crypt_stat); |
843 | crypt_stat->iv_bytes = ECRYPTFS_DEFAULT_IV_BYTES; | 845 | crypt_stat->iv_bytes = ECRYPTFS_DEFAULT_IV_BYTES; |
844 | if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) | 846 | if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) |
845 | crypt_stat->num_header_extents_at_front = 0; | 847 | crypt_stat->num_header_bytes_at_front = 0; |
846 | else { | 848 | else { |
847 | if (PAGE_CACHE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE) | 849 | if (PAGE_CACHE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE) |
848 | crypt_stat->num_header_extents_at_front = | 850 | crypt_stat->num_header_bytes_at_front = |
849 | (ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE | 851 | ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; |
850 | / crypt_stat->extent_size); | ||
851 | else | 852 | else |
852 | crypt_stat->num_header_extents_at_front = | 853 | crypt_stat->num_header_bytes_at_front = PAGE_CACHE_SIZE; |
853 | (PAGE_CACHE_SIZE / crypt_stat->extent_size); | ||
854 | } | 854 | } |
855 | } | 855 | } |
856 | 856 | ||
@@ -1128,7 +1128,7 @@ write_ecryptfs_flags(char *page_virt, struct ecryptfs_crypt_stat *crypt_stat, | |||
1128 | 1128 | ||
1129 | struct ecryptfs_cipher_code_str_map_elem { | 1129 | struct ecryptfs_cipher_code_str_map_elem { |
1130 | char cipher_str[16]; | 1130 | char cipher_str[16]; |
1131 | u16 cipher_code; | 1131 | u8 cipher_code; |
1132 | }; | 1132 | }; |
1133 | 1133 | ||
1134 | /* Add support for additional ciphers by adding elements here. The | 1134 | /* Add support for additional ciphers by adding elements here. The |
@@ -1152,10 +1152,10 @@ ecryptfs_cipher_code_str_map[] = { | |||
1152 | * | 1152 | * |
1153 | * Returns zero on no match, or the cipher code on match | 1153 | * Returns zero on no match, or the cipher code on match |
1154 | */ | 1154 | */ |
1155 | u16 ecryptfs_code_for_cipher_string(struct ecryptfs_crypt_stat *crypt_stat) | 1155 | u8 ecryptfs_code_for_cipher_string(struct ecryptfs_crypt_stat *crypt_stat) |
1156 | { | 1156 | { |
1157 | int i; | 1157 | int i; |
1158 | u16 code = 0; | 1158 | u8 code = 0; |
1159 | struct ecryptfs_cipher_code_str_map_elem *map = | 1159 | struct ecryptfs_cipher_code_str_map_elem *map = |
1160 | ecryptfs_cipher_code_str_map; | 1160 | ecryptfs_cipher_code_str_map; |
1161 | 1161 | ||
@@ -1187,7 +1187,7 @@ u16 ecryptfs_code_for_cipher_string(struct ecryptfs_crypt_stat *crypt_stat) | |||
1187 | * | 1187 | * |
1188 | * Returns zero on success | 1188 | * Returns zero on success |
1189 | */ | 1189 | */ |
1190 | int ecryptfs_cipher_code_to_string(char *str, u16 cipher_code) | 1190 | int ecryptfs_cipher_code_to_string(char *str, u8 cipher_code) |
1191 | { | 1191 | { |
1192 | int rc = 0; | 1192 | int rc = 0; |
1193 | int i; | 1193 | int i; |
@@ -1236,7 +1236,8 @@ ecryptfs_write_header_metadata(char *virt, | |||
1236 | 1236 | ||
1237 | header_extent_size = (u32)crypt_stat->extent_size; | 1237 | header_extent_size = (u32)crypt_stat->extent_size; |
1238 | num_header_extents_at_front = | 1238 | num_header_extents_at_front = |
1239 | (u16)crypt_stat->num_header_extents_at_front; | 1239 | (u16)(crypt_stat->num_header_bytes_at_front |
1240 | / crypt_stat->extent_size); | ||
1240 | header_extent_size = cpu_to_be32(header_extent_size); | 1241 | header_extent_size = cpu_to_be32(header_extent_size); |
1241 | memcpy(virt, &header_extent_size, 4); | 1242 | memcpy(virt, &header_extent_size, 4); |
1242 | virt += 4; | 1243 | virt += 4; |
@@ -1311,40 +1312,16 @@ static int ecryptfs_write_headers_virt(char *page_virt, size_t *size, | |||
1311 | static int | 1312 | static int |
1312 | ecryptfs_write_metadata_to_contents(struct ecryptfs_crypt_stat *crypt_stat, | 1313 | ecryptfs_write_metadata_to_contents(struct ecryptfs_crypt_stat *crypt_stat, |
1313 | struct dentry *ecryptfs_dentry, | 1314 | struct dentry *ecryptfs_dentry, |
1314 | char *page_virt) | 1315 | char *virt) |
1315 | { | 1316 | { |
1316 | int current_header_page; | ||
1317 | int header_pages; | ||
1318 | int rc; | 1317 | int rc; |
1319 | 1318 | ||
1320 | rc = ecryptfs_write_lower(ecryptfs_dentry->d_inode, page_virt, | 1319 | rc = ecryptfs_write_lower(ecryptfs_dentry->d_inode, virt, |
1321 | 0, PAGE_CACHE_SIZE); | 1320 | 0, crypt_stat->num_header_bytes_at_front); |
1322 | if (rc) { | 1321 | if (rc) |
1323 | printk(KERN_ERR "%s: Error attempting to write header " | 1322 | printk(KERN_ERR "%s: Error attempting to write header " |
1324 | "information to lower file; rc = [%d]\n", __FUNCTION__, | 1323 | "information to lower file; rc = [%d]\n", __FUNCTION__, |
1325 | rc); | 1324 | rc); |
1326 | goto out; | ||
1327 | } | ||
1328 | header_pages = ((crypt_stat->extent_size | ||
1329 | * crypt_stat->num_header_extents_at_front) | ||
1330 | / PAGE_CACHE_SIZE); | ||
1331 | memset(page_virt, 0, PAGE_CACHE_SIZE); | ||
1332 | current_header_page = 1; | ||
1333 | while (current_header_page < header_pages) { | ||
1334 | loff_t offset; | ||
1335 | |||
1336 | offset = (((loff_t)current_header_page) << PAGE_CACHE_SHIFT); | ||
1337 | if ((rc = ecryptfs_write_lower(ecryptfs_dentry->d_inode, | ||
1338 | page_virt, offset, | ||
1339 | PAGE_CACHE_SIZE))) { | ||
1340 | printk(KERN_ERR "%s: Error attempting to write header " | ||
1341 | "information to lower file; rc = [%d]\n", | ||
1342 | __FUNCTION__, rc); | ||
1343 | goto out; | ||
1344 | } | ||
1345 | current_header_page++; | ||
1346 | } | ||
1347 | out: | ||
1348 | return rc; | 1325 | return rc; |
1349 | } | 1326 | } |
1350 | 1327 | ||
@@ -1370,15 +1347,13 @@ ecryptfs_write_metadata_to_xattr(struct dentry *ecryptfs_dentry, | |||
1370 | * retrieved via a prompt. Exactly what happens at this point should | 1347 | * retrieved via a prompt. Exactly what happens at this point should |
1371 | * be policy-dependent. | 1348 | * be policy-dependent. |
1372 | * | 1349 | * |
1373 | * TODO: Support header information spanning multiple pages | ||
1374 | * | ||
1375 | * Returns zero on success; non-zero on error | 1350 | * Returns zero on success; non-zero on error |
1376 | */ | 1351 | */ |
1377 | int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry) | 1352 | int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry) |
1378 | { | 1353 | { |
1379 | struct ecryptfs_crypt_stat *crypt_stat = | 1354 | struct ecryptfs_crypt_stat *crypt_stat = |
1380 | &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat; | 1355 | &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat; |
1381 | char *page_virt; | 1356 | char *virt; |
1382 | size_t size = 0; | 1357 | size_t size = 0; |
1383 | int rc = 0; | 1358 | int rc = 0; |
1384 | 1359 | ||
@@ -1389,40 +1364,39 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry) | |||
1389 | goto out; | 1364 | goto out; |
1390 | } | 1365 | } |
1391 | } else { | 1366 | } else { |
1367 | printk(KERN_WARNING "%s: Encrypted flag not set\n", | ||
1368 | __FUNCTION__); | ||
1392 | rc = -EINVAL; | 1369 | rc = -EINVAL; |
1393 | ecryptfs_printk(KERN_WARNING, | ||
1394 | "Called with crypt_stat->encrypted == 0\n"); | ||
1395 | goto out; | 1370 | goto out; |
1396 | } | 1371 | } |
1397 | /* Released in this function */ | 1372 | /* Released in this function */ |
1398 | page_virt = kmem_cache_zalloc(ecryptfs_header_cache_0, GFP_USER); | 1373 | virt = kzalloc(crypt_stat->num_header_bytes_at_front, GFP_KERNEL); |
1399 | if (!page_virt) { | 1374 | if (!virt) { |
1400 | ecryptfs_printk(KERN_ERR, "Out of memory\n"); | 1375 | printk(KERN_ERR "%s: Out of memory\n", __FUNCTION__); |
1401 | rc = -ENOMEM; | 1376 | rc = -ENOMEM; |
1402 | goto out; | 1377 | goto out; |
1403 | } | 1378 | } |
1404 | rc = ecryptfs_write_headers_virt(page_virt, &size, crypt_stat, | 1379 | rc = ecryptfs_write_headers_virt(virt, &size, crypt_stat, |
1405 | ecryptfs_dentry); | 1380 | ecryptfs_dentry); |
1406 | if (unlikely(rc)) { | 1381 | if (unlikely(rc)) { |
1407 | ecryptfs_printk(KERN_ERR, "Error whilst writing headers\n"); | 1382 | printk(KERN_ERR "%s: Error whilst writing headers; rc = [%d]\n", |
1408 | memset(page_virt, 0, PAGE_CACHE_SIZE); | 1383 | __FUNCTION__, rc); |
1409 | goto out_free; | 1384 | goto out_free; |
1410 | } | 1385 | } |
1411 | if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) | 1386 | if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) |
1412 | rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry, | 1387 | rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry, |
1413 | crypt_stat, page_virt, | 1388 | crypt_stat, virt, size); |
1414 | size); | ||
1415 | else | 1389 | else |
1416 | rc = ecryptfs_write_metadata_to_contents(crypt_stat, | 1390 | rc = ecryptfs_write_metadata_to_contents(crypt_stat, |
1417 | ecryptfs_dentry, | 1391 | ecryptfs_dentry, virt); |
1418 | page_virt); | ||
1419 | if (rc) { | 1392 | if (rc) { |
1420 | printk(KERN_ERR "Error writing metadata out to lower file; " | 1393 | printk(KERN_ERR "%s: Error writing metadata out to lower file; " |
1421 | "rc = [%d]\n", rc); | 1394 | "rc = [%d]\n", __FUNCTION__, rc); |
1422 | goto out_free; | 1395 | goto out_free; |
1423 | } | 1396 | } |
1424 | out_free: | 1397 | out_free: |
1425 | kmem_cache_free(ecryptfs_header_cache_0, page_virt); | 1398 | memset(virt, 0, crypt_stat->num_header_bytes_at_front); |
1399 | kfree(virt); | ||
1426 | out: | 1400 | out: |
1427 | return rc; | 1401 | return rc; |
1428 | } | 1402 | } |
@@ -1442,16 +1416,16 @@ static int parse_header_metadata(struct ecryptfs_crypt_stat *crypt_stat, | |||
1442 | virt += sizeof(u32); | 1416 | virt += sizeof(u32); |
1443 | memcpy(&num_header_extents_at_front, virt, sizeof(u16)); | 1417 | memcpy(&num_header_extents_at_front, virt, sizeof(u16)); |
1444 | num_header_extents_at_front = be16_to_cpu(num_header_extents_at_front); | 1418 | num_header_extents_at_front = be16_to_cpu(num_header_extents_at_front); |
1445 | crypt_stat->num_header_extents_at_front = | 1419 | crypt_stat->num_header_bytes_at_front = |
1446 | (int)num_header_extents_at_front; | 1420 | (((size_t)num_header_extents_at_front |
1421 | * (size_t)header_extent_size)); | ||
1447 | (*bytes_read) = (sizeof(u32) + sizeof(u16)); | 1422 | (*bytes_read) = (sizeof(u32) + sizeof(u16)); |
1448 | if ((validate_header_size == ECRYPTFS_VALIDATE_HEADER_SIZE) | 1423 | if ((validate_header_size == ECRYPTFS_VALIDATE_HEADER_SIZE) |
1449 | && ((crypt_stat->extent_size | 1424 | && (crypt_stat->num_header_bytes_at_front |
1450 | * crypt_stat->num_header_extents_at_front) | ||
1451 | < ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)) { | 1425 | < ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)) { |
1452 | rc = -EINVAL; | 1426 | rc = -EINVAL; |
1453 | printk(KERN_WARNING "Invalid number of header extents: [%zd]\n", | 1427 | printk(KERN_WARNING "Invalid header size: [%zd]\n", |
1454 | crypt_stat->num_header_extents_at_front); | 1428 | crypt_stat->num_header_bytes_at_front); |
1455 | } | 1429 | } |
1456 | return rc; | 1430 | return rc; |
1457 | } | 1431 | } |
@@ -1466,7 +1440,8 @@ static int parse_header_metadata(struct ecryptfs_crypt_stat *crypt_stat, | |||
1466 | */ | 1440 | */ |
1467 | static void set_default_header_data(struct ecryptfs_crypt_stat *crypt_stat) | 1441 | static void set_default_header_data(struct ecryptfs_crypt_stat *crypt_stat) |
1468 | { | 1442 | { |
1469 | crypt_stat->num_header_extents_at_front = 2; | 1443 | crypt_stat->num_header_bytes_at_front = |
1444 | ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; | ||
1470 | } | 1445 | } |
1471 | 1446 | ||
1472 | /** | 1447 | /** |
@@ -1552,9 +1527,10 @@ int ecryptfs_read_xattr_region(char *page_virt, struct inode *ecryptfs_inode) | |||
1552 | size = ecryptfs_getxattr_lower(lower_dentry, ECRYPTFS_XATTR_NAME, | 1527 | size = ecryptfs_getxattr_lower(lower_dentry, ECRYPTFS_XATTR_NAME, |
1553 | page_virt, ECRYPTFS_DEFAULT_EXTENT_SIZE); | 1528 | page_virt, ECRYPTFS_DEFAULT_EXTENT_SIZE); |
1554 | if (size < 0) { | 1529 | if (size < 0) { |
1555 | printk(KERN_ERR "Error attempting to read the [%s] " | 1530 | if (unlikely(ecryptfs_verbosity > 0)) |
1556 | "xattr from the lower file; return value = [%zd]\n", | 1531 | printk(KERN_INFO "Error attempting to read the [%s] " |
1557 | ECRYPTFS_XATTR_NAME, size); | 1532 | "xattr from the lower file; return value = " |
1533 | "[%zd]\n", ECRYPTFS_XATTR_NAME, size); | ||
1558 | rc = -EINVAL; | 1534 | rc = -EINVAL; |
1559 | goto out; | 1535 | goto out; |
1560 | } | 1536 | } |
@@ -1802,7 +1778,7 @@ out: | |||
1802 | } | 1778 | } |
1803 | 1779 | ||
1804 | struct kmem_cache *ecryptfs_key_tfm_cache; | 1780 | struct kmem_cache *ecryptfs_key_tfm_cache; |
1805 | struct list_head key_tfm_list; | 1781 | static struct list_head key_tfm_list; |
1806 | struct mutex key_tfm_list_mutex; | 1782 | struct mutex key_tfm_list_mutex; |
1807 | 1783 | ||
1808 | int ecryptfs_init_crypto(void) | 1784 | int ecryptfs_init_crypto(void) |
@@ -1812,6 +1788,11 @@ int ecryptfs_init_crypto(void) | |||
1812 | return 0; | 1788 | return 0; |
1813 | } | 1789 | } |
1814 | 1790 | ||
1791 | /** | ||
1792 | * ecryptfs_destroy_crypto - free all cached key_tfms on key_tfm_list | ||
1793 | * | ||
1794 | * Called only at module unload time | ||
1795 | */ | ||
1815 | int ecryptfs_destroy_crypto(void) | 1796 | int ecryptfs_destroy_crypto(void) |
1816 | { | 1797 | { |
1817 | struct ecryptfs_key_tfm *key_tfm, *key_tfm_tmp; | 1798 | struct ecryptfs_key_tfm *key_tfm, *key_tfm_tmp; |
@@ -1835,6 +1816,8 @@ ecryptfs_add_new_key_tfm(struct ecryptfs_key_tfm **key_tfm, char *cipher_name, | |||
1835 | struct ecryptfs_key_tfm *tmp_tfm; | 1816 | struct ecryptfs_key_tfm *tmp_tfm; |
1836 | int rc = 0; | 1817 | int rc = 0; |
1837 | 1818 | ||
1819 | BUG_ON(!mutex_is_locked(&key_tfm_list_mutex)); | ||
1820 | |||
1838 | tmp_tfm = kmem_cache_alloc(ecryptfs_key_tfm_cache, GFP_KERNEL); | 1821 | tmp_tfm = kmem_cache_alloc(ecryptfs_key_tfm_cache, GFP_KERNEL); |
1839 | if (key_tfm != NULL) | 1822 | if (key_tfm != NULL) |
1840 | (*key_tfm) = tmp_tfm; | 1823 | (*key_tfm) = tmp_tfm; |
@@ -1861,13 +1844,50 @@ ecryptfs_add_new_key_tfm(struct ecryptfs_key_tfm **key_tfm, char *cipher_name, | |||
1861 | (*key_tfm) = NULL; | 1844 | (*key_tfm) = NULL; |
1862 | goto out; | 1845 | goto out; |
1863 | } | 1846 | } |
1864 | mutex_lock(&key_tfm_list_mutex); | ||
1865 | list_add(&tmp_tfm->key_tfm_list, &key_tfm_list); | 1847 | list_add(&tmp_tfm->key_tfm_list, &key_tfm_list); |
1866 | mutex_unlock(&key_tfm_list_mutex); | ||
1867 | out: | 1848 | out: |
1868 | return rc; | 1849 | return rc; |
1869 | } | 1850 | } |
1870 | 1851 | ||
1852 | /** | ||
1853 | * ecryptfs_tfm_exists - Search for existing tfm for cipher_name. | ||
1854 | * @cipher_name: the name of the cipher to search for | ||
1855 | * @key_tfm: set to corresponding tfm if found | ||
1856 | * | ||
1857 | * Searches for cached key_tfm matching @cipher_name | ||
1858 | * Must be called with &key_tfm_list_mutex held | ||
1859 | * Returns 1 if found, with @key_tfm set | ||
1860 | * Returns 0 if not found, with @key_tfm set to NULL | ||
1861 | */ | ||
1862 | int ecryptfs_tfm_exists(char *cipher_name, struct ecryptfs_key_tfm **key_tfm) | ||
1863 | { | ||
1864 | struct ecryptfs_key_tfm *tmp_key_tfm; | ||
1865 | |||
1866 | BUG_ON(!mutex_is_locked(&key_tfm_list_mutex)); | ||
1867 | |||
1868 | list_for_each_entry(tmp_key_tfm, &key_tfm_list, key_tfm_list) { | ||
1869 | if (strcmp(tmp_key_tfm->cipher_name, cipher_name) == 0) { | ||
1870 | if (key_tfm) | ||
1871 | (*key_tfm) = tmp_key_tfm; | ||
1872 | return 1; | ||
1873 | } | ||
1874 | } | ||
1875 | if (key_tfm) | ||
1876 | (*key_tfm) = NULL; | ||
1877 | return 0; | ||
1878 | } | ||
1879 | |||
1880 | /** | ||
1881 | * ecryptfs_get_tfm_and_mutex_for_cipher_name | ||
1882 | * | ||
1883 | * @tfm: set to cached tfm found, or new tfm created | ||
1884 | * @tfm_mutex: set to mutex for cached tfm found, or new tfm created | ||
1885 | * @cipher_name: the name of the cipher to search for and/or add | ||
1886 | * | ||
1887 | * Sets pointers to @tfm & @tfm_mutex matching @cipher_name. | ||
1888 | * Searches for cached item first, and creates new if not found. | ||
1889 | * Returns 0 on success, non-zero if adding new cipher failed | ||
1890 | */ | ||
1871 | int ecryptfs_get_tfm_and_mutex_for_cipher_name(struct crypto_blkcipher **tfm, | 1891 | int ecryptfs_get_tfm_and_mutex_for_cipher_name(struct crypto_blkcipher **tfm, |
1872 | struct mutex **tfm_mutex, | 1892 | struct mutex **tfm_mutex, |
1873 | char *cipher_name) | 1893 | char *cipher_name) |
@@ -1877,22 +1897,17 @@ int ecryptfs_get_tfm_and_mutex_for_cipher_name(struct crypto_blkcipher **tfm, | |||
1877 | 1897 | ||
1878 | (*tfm) = NULL; | 1898 | (*tfm) = NULL; |
1879 | (*tfm_mutex) = NULL; | 1899 | (*tfm_mutex) = NULL; |
1900 | |||
1880 | mutex_lock(&key_tfm_list_mutex); | 1901 | mutex_lock(&key_tfm_list_mutex); |
1881 | list_for_each_entry(key_tfm, &key_tfm_list, key_tfm_list) { | 1902 | if (!ecryptfs_tfm_exists(cipher_name, &key_tfm)) { |
1882 | if (strcmp(key_tfm->cipher_name, cipher_name) == 0) { | 1903 | rc = ecryptfs_add_new_key_tfm(&key_tfm, cipher_name, 0); |
1883 | (*tfm) = key_tfm->key_tfm; | 1904 | if (rc) { |
1884 | (*tfm_mutex) = &key_tfm->key_tfm_mutex; | 1905 | printk(KERN_ERR "Error adding new key_tfm to list; " |
1885 | mutex_unlock(&key_tfm_list_mutex); | 1906 | "rc = [%d]\n", rc); |
1886 | goto out; | 1907 | goto out; |
1887 | } | 1908 | } |
1888 | } | 1909 | } |
1889 | mutex_unlock(&key_tfm_list_mutex); | 1910 | mutex_unlock(&key_tfm_list_mutex); |
1890 | rc = ecryptfs_add_new_key_tfm(&key_tfm, cipher_name, 0); | ||
1891 | if (rc) { | ||
1892 | printk(KERN_ERR "Error adding new key_tfm to list; rc = [%d]\n", | ||
1893 | rc); | ||
1894 | goto out; | ||
1895 | } | ||
1896 | (*tfm) = key_tfm->key_tfm; | 1911 | (*tfm) = key_tfm->key_tfm; |
1897 | (*tfm_mutex) = &key_tfm->key_tfm_mutex; | 1912 | (*tfm_mutex) = &key_tfm->key_tfm_mutex; |
1898 | out: | 1913 | out: |
diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c index cb20b964419f..841a032050a7 100644 --- a/fs/ecryptfs/dentry.c +++ b/fs/ecryptfs/dentry.c | |||
@@ -51,13 +51,13 @@ static int ecryptfs_d_revalidate(struct dentry *dentry, struct nameidata *nd) | |||
51 | 51 | ||
52 | if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate) | 52 | if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate) |
53 | goto out; | 53 | goto out; |
54 | dentry_save = nd->dentry; | 54 | dentry_save = nd->path.dentry; |
55 | vfsmount_save = nd->mnt; | 55 | vfsmount_save = nd->path.mnt; |
56 | nd->dentry = lower_dentry; | 56 | nd->path.dentry = lower_dentry; |
57 | nd->mnt = lower_mnt; | 57 | nd->path.mnt = lower_mnt; |
58 | rc = lower_dentry->d_op->d_revalidate(lower_dentry, nd); | 58 | rc = lower_dentry->d_op->d_revalidate(lower_dentry, nd); |
59 | nd->dentry = dentry_save; | 59 | nd->path.dentry = dentry_save; |
60 | nd->mnt = vfsmount_save; | 60 | nd->path.mnt = vfsmount_save; |
61 | if (dentry->d_inode) { | 61 | if (dentry->d_inode) { |
62 | struct inode *lower_inode = | 62 | struct inode *lower_inode = |
63 | ecryptfs_inode_to_lower(dentry->d_inode); | 63 | ecryptfs_inode_to_lower(dentry->d_inode); |
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index ce7a5d4aec36..5007f788da01 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h | |||
@@ -234,10 +234,11 @@ struct ecryptfs_crypt_stat { | |||
234 | #define ECRYPTFS_KEY_VALID 0x00000080 | 234 | #define ECRYPTFS_KEY_VALID 0x00000080 |
235 | #define ECRYPTFS_METADATA_IN_XATTR 0x00000100 | 235 | #define ECRYPTFS_METADATA_IN_XATTR 0x00000100 |
236 | #define ECRYPTFS_VIEW_AS_ENCRYPTED 0x00000200 | 236 | #define ECRYPTFS_VIEW_AS_ENCRYPTED 0x00000200 |
237 | #define ECRYPTFS_KEY_SET 0x00000400 | ||
237 | u32 flags; | 238 | u32 flags; |
238 | unsigned int file_version; | 239 | unsigned int file_version; |
239 | size_t iv_bytes; | 240 | size_t iv_bytes; |
240 | size_t num_header_extents_at_front; | 241 | size_t num_header_bytes_at_front; |
241 | size_t extent_size; /* Data extent size; default is 4096 */ | 242 | size_t extent_size; /* Data extent size; default is 4096 */ |
242 | size_t key_size; | 243 | size_t key_size; |
243 | size_t extent_shift; | 244 | size_t extent_shift; |
@@ -322,7 +323,6 @@ struct ecryptfs_key_tfm { | |||
322 | unsigned char cipher_name[ECRYPTFS_MAX_CIPHER_NAME_SIZE + 1]; | 323 | unsigned char cipher_name[ECRYPTFS_MAX_CIPHER_NAME_SIZE + 1]; |
323 | }; | 324 | }; |
324 | 325 | ||
325 | extern struct list_head key_tfm_list; | ||
326 | extern struct mutex key_tfm_list_mutex; | 326 | extern struct mutex key_tfm_list_mutex; |
327 | 327 | ||
328 | /** | 328 | /** |
@@ -521,11 +521,9 @@ extern struct kmem_cache *ecryptfs_file_info_cache; | |||
521 | extern struct kmem_cache *ecryptfs_dentry_info_cache; | 521 | extern struct kmem_cache *ecryptfs_dentry_info_cache; |
522 | extern struct kmem_cache *ecryptfs_inode_info_cache; | 522 | extern struct kmem_cache *ecryptfs_inode_info_cache; |
523 | extern struct kmem_cache *ecryptfs_sb_info_cache; | 523 | extern struct kmem_cache *ecryptfs_sb_info_cache; |
524 | extern struct kmem_cache *ecryptfs_header_cache_0; | ||
525 | extern struct kmem_cache *ecryptfs_header_cache_1; | 524 | extern struct kmem_cache *ecryptfs_header_cache_1; |
526 | extern struct kmem_cache *ecryptfs_header_cache_2; | 525 | extern struct kmem_cache *ecryptfs_header_cache_2; |
527 | extern struct kmem_cache *ecryptfs_xattr_cache; | 526 | extern struct kmem_cache *ecryptfs_xattr_cache; |
528 | extern struct kmem_cache *ecryptfs_lower_page_cache; | ||
529 | extern struct kmem_cache *ecryptfs_key_record_cache; | 527 | extern struct kmem_cache *ecryptfs_key_record_cache; |
530 | extern struct kmem_cache *ecryptfs_key_sig_cache; | 528 | extern struct kmem_cache *ecryptfs_key_sig_cache; |
531 | extern struct kmem_cache *ecryptfs_global_auth_tok_cache; | 529 | extern struct kmem_cache *ecryptfs_global_auth_tok_cache; |
@@ -562,8 +560,8 @@ int ecryptfs_read_and_validate_header_region(char *data, | |||
562 | struct inode *ecryptfs_inode); | 560 | struct inode *ecryptfs_inode); |
563 | int ecryptfs_read_and_validate_xattr_region(char *page_virt, | 561 | int ecryptfs_read_and_validate_xattr_region(char *page_virt, |
564 | struct dentry *ecryptfs_dentry); | 562 | struct dentry *ecryptfs_dentry); |
565 | u16 ecryptfs_code_for_cipher_string(struct ecryptfs_crypt_stat *crypt_stat); | 563 | u8 ecryptfs_code_for_cipher_string(struct ecryptfs_crypt_stat *crypt_stat); |
566 | int ecryptfs_cipher_code_to_string(char *str, u16 cipher_code); | 564 | int ecryptfs_cipher_code_to_string(char *str, u8 cipher_code); |
567 | void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat); | 565 | void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat); |
568 | int ecryptfs_generate_key_packet_set(char *dest_base, | 566 | int ecryptfs_generate_key_packet_set(char *dest_base, |
569 | struct ecryptfs_crypt_stat *crypt_stat, | 567 | struct ecryptfs_crypt_stat *crypt_stat, |
@@ -576,8 +574,6 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length); | |||
576 | int ecryptfs_inode_test(struct inode *inode, void *candidate_lower_inode); | 574 | int ecryptfs_inode_test(struct inode *inode, void *candidate_lower_inode); |
577 | int ecryptfs_inode_set(struct inode *inode, void *lower_inode); | 575 | int ecryptfs_inode_set(struct inode *inode, void *lower_inode); |
578 | void ecryptfs_init_inode(struct inode *inode, struct inode *lower_inode); | 576 | void ecryptfs_init_inode(struct inode *inode, struct inode *lower_inode); |
579 | ssize_t ecryptfs_getxattr(struct dentry *dentry, const char *name, void *value, | ||
580 | size_t size); | ||
581 | ssize_t | 577 | ssize_t |
582 | ecryptfs_getxattr_lower(struct dentry *lower_dentry, const char *name, | 578 | ecryptfs_getxattr_lower(struct dentry *lower_dentry, const char *name, |
583 | void *value, size_t size); | 579 | void *value, size_t size); |
@@ -623,6 +619,7 @@ ecryptfs_add_new_key_tfm(struct ecryptfs_key_tfm **key_tfm, char *cipher_name, | |||
623 | size_t key_size); | 619 | size_t key_size); |
624 | int ecryptfs_init_crypto(void); | 620 | int ecryptfs_init_crypto(void); |
625 | int ecryptfs_destroy_crypto(void); | 621 | int ecryptfs_destroy_crypto(void); |
622 | int ecryptfs_tfm_exists(char *cipher_name, struct ecryptfs_key_tfm **key_tfm); | ||
626 | int ecryptfs_get_tfm_and_mutex_for_cipher_name(struct crypto_blkcipher **tfm, | 623 | int ecryptfs_get_tfm_and_mutex_for_cipher_name(struct crypto_blkcipher **tfm, |
627 | struct mutex **tfm_mutex, | 624 | struct mutex **tfm_mutex, |
628 | char *cipher_name); | 625 | char *cipher_name); |
@@ -631,8 +628,6 @@ int ecryptfs_keyring_auth_tok_for_sig(struct key **auth_tok_key, | |||
631 | char *sig); | 628 | char *sig); |
632 | int ecryptfs_write_zeros(struct file *file, pgoff_t index, int start, | 629 | int ecryptfs_write_zeros(struct file *file, pgoff_t index, int start, |
633 | int num_zeros); | 630 | int num_zeros); |
634 | void ecryptfs_lower_offset_for_extent(loff_t *offset, loff_t extent_num, | ||
635 | struct ecryptfs_crypt_stat *crypt_stat); | ||
636 | int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data, | 631 | int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data, |
637 | loff_t offset, size_t size); | 632 | loff_t offset, size_t size); |
638 | int ecryptfs_write_lower_page_segment(struct inode *ecryptfs_inode, | 633 | int ecryptfs_write_lower_page_segment(struct inode *ecryptfs_inode, |
@@ -646,8 +641,6 @@ int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs, | |||
646 | pgoff_t page_index, | 641 | pgoff_t page_index, |
647 | size_t offset_in_page, size_t size, | 642 | size_t offset_in_page, size_t size, |
648 | struct inode *ecryptfs_inode); | 643 | struct inode *ecryptfs_inode); |
649 | int ecryptfs_read(char *data, loff_t offset, size_t size, | ||
650 | struct file *ecryptfs_file); | ||
651 | struct page *ecryptfs_get_locked_page(struct file *file, loff_t index); | 644 | struct page *ecryptfs_get_locked_page(struct file *file, loff_t index); |
652 | 645 | ||
653 | #endif /* #ifndef ECRYPTFS_KERNEL_H */ | 646 | #endif /* #ifndef ECRYPTFS_KERNEL_H */ |
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index c98c4690a771..2b8f5ed4adea 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c | |||
@@ -209,9 +209,10 @@ static int ecryptfs_open(struct inode *inode, struct file *file) | |||
209 | if (!(mount_crypt_stat->flags | 209 | if (!(mount_crypt_stat->flags |
210 | & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED)) { | 210 | & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED)) { |
211 | rc = -EIO; | 211 | rc = -EIO; |
212 | printk(KERN_WARNING "Attempt to read file that " | 212 | printk(KERN_WARNING "Either the lower file " |
213 | "is not in a valid eCryptfs format, " | 213 | "is not in a valid eCryptfs format, " |
214 | "and plaintext passthrough mode is not " | 214 | "or the key could not be retrieved. " |
215 | "Plaintext passthrough mode is not " | ||
215 | "enabled; returning -EIO\n"); | 216 | "enabled; returning -EIO\n"); |
216 | mutex_unlock(&crypt_stat->cs_mutex); | 217 | mutex_unlock(&crypt_stat->cs_mutex); |
217 | goto out_free; | 218 | goto out_free; |
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 5a719180983c..e23861152101 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c | |||
@@ -77,13 +77,13 @@ ecryptfs_create_underlying_file(struct inode *lower_dir_inode, | |||
77 | struct vfsmount *vfsmount_save; | 77 | struct vfsmount *vfsmount_save; |
78 | int rc; | 78 | int rc; |
79 | 79 | ||
80 | dentry_save = nd->dentry; | 80 | dentry_save = nd->path.dentry; |
81 | vfsmount_save = nd->mnt; | 81 | vfsmount_save = nd->path.mnt; |
82 | nd->dentry = lower_dentry; | 82 | nd->path.dentry = lower_dentry; |
83 | nd->mnt = lower_mnt; | 83 | nd->path.mnt = lower_mnt; |
84 | rc = vfs_create(lower_dir_inode, lower_dentry, mode, nd); | 84 | rc = vfs_create(lower_dir_inode, lower_dentry, mode, nd); |
85 | nd->dentry = dentry_save; | 85 | nd->path.dentry = dentry_save; |
86 | nd->mnt = vfsmount_save; | 86 | nd->path.mnt = vfsmount_save; |
87 | return rc; | 87 | return rc; |
88 | } | 88 | } |
89 | 89 | ||
@@ -365,8 +365,7 @@ static struct dentry *ecryptfs_lookup(struct inode *dir, struct dentry *dentry, | |||
365 | dentry->d_sb)->mount_crypt_stat; | 365 | dentry->d_sb)->mount_crypt_stat; |
366 | if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) { | 366 | if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) { |
367 | if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) | 367 | if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) |
368 | file_size = ((crypt_stat->extent_size | 368 | file_size = (crypt_stat->num_header_bytes_at_front |
369 | * crypt_stat->num_header_extents_at_front) | ||
370 | + i_size_read(lower_dentry->d_inode)); | 369 | + i_size_read(lower_dentry->d_inode)); |
371 | else | 370 | else |
372 | file_size = i_size_read(lower_dentry->d_inode); | 371 | file_size = i_size_read(lower_dentry->d_inode); |
@@ -685,7 +684,7 @@ ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr) | |||
685 | * @crypt_stat: Crypt_stat associated with file | 684 | * @crypt_stat: Crypt_stat associated with file |
686 | * @upper_size: Size of the upper file | 685 | * @upper_size: Size of the upper file |
687 | * | 686 | * |
688 | * Calculate the requried size of the lower file based on the | 687 | * Calculate the required size of the lower file based on the |
689 | * specified size of the upper file. This calculation is based on the | 688 | * specified size of the upper file. This calculation is based on the |
690 | * number of headers in the underlying file and the extent size. | 689 | * number of headers in the underlying file and the extent size. |
691 | * | 690 | * |
@@ -697,8 +696,7 @@ upper_size_to_lower_size(struct ecryptfs_crypt_stat *crypt_stat, | |||
697 | { | 696 | { |
698 | loff_t lower_size; | 697 | loff_t lower_size; |
699 | 698 | ||
700 | lower_size = (crypt_stat->extent_size | 699 | lower_size = crypt_stat->num_header_bytes_at_front; |
701 | * crypt_stat->num_header_extents_at_front); | ||
702 | if (upper_size != 0) { | 700 | if (upper_size != 0) { |
703 | loff_t num_extents; | 701 | loff_t num_extents; |
704 | 702 | ||
@@ -821,14 +819,14 @@ ecryptfs_permission(struct inode *inode, int mask, struct nameidata *nd) | |||
821 | int rc; | 819 | int rc; |
822 | 820 | ||
823 | if (nd) { | 821 | if (nd) { |
824 | struct vfsmount *vfsmnt_save = nd->mnt; | 822 | struct vfsmount *vfsmnt_save = nd->path.mnt; |
825 | struct dentry *dentry_save = nd->dentry; | 823 | struct dentry *dentry_save = nd->path.dentry; |
826 | 824 | ||
827 | nd->mnt = ecryptfs_dentry_to_lower_mnt(nd->dentry); | 825 | nd->path.mnt = ecryptfs_dentry_to_lower_mnt(nd->path.dentry); |
828 | nd->dentry = ecryptfs_dentry_to_lower(nd->dentry); | 826 | nd->path.dentry = ecryptfs_dentry_to_lower(nd->path.dentry); |
829 | rc = permission(ecryptfs_inode_to_lower(inode), mask, nd); | 827 | rc = permission(ecryptfs_inode_to_lower(inode), mask, nd); |
830 | nd->mnt = vfsmnt_save; | 828 | nd->path.mnt = vfsmnt_save; |
831 | nd->dentry = dentry_save; | 829 | nd->path.dentry = dentry_save; |
832 | } else | 830 | } else |
833 | rc = permission(ecryptfs_inode_to_lower(inode), mask, NULL); | 831 | rc = permission(ecryptfs_inode_to_lower(inode), mask, NULL); |
834 | return rc; | 832 | return rc; |
@@ -875,11 +873,11 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia) | |||
875 | if (!(mount_crypt_stat->flags | 873 | if (!(mount_crypt_stat->flags |
876 | & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED)) { | 874 | & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED)) { |
877 | rc = -EIO; | 875 | rc = -EIO; |
878 | printk(KERN_WARNING "Attempt to read file that " | 876 | printk(KERN_WARNING "Either the lower file " |
879 | "is not in a valid eCryptfs format, " | 877 | "is not in a valid eCryptfs format, " |
880 | "and plaintext passthrough mode is not " | 878 | "or the key could not be retrieved. " |
879 | "Plaintext passthrough mode is not " | ||
881 | "enabled; returning -EIO\n"); | 880 | "enabled; returning -EIO\n"); |
882 | |||
883 | mutex_unlock(&crypt_stat->cs_mutex); | 881 | mutex_unlock(&crypt_stat->cs_mutex); |
884 | goto out; | 882 | goto out; |
885 | } | 883 | } |
@@ -954,7 +952,7 @@ out: | |||
954 | return rc; | 952 | return rc; |
955 | } | 953 | } |
956 | 954 | ||
957 | ssize_t | 955 | static ssize_t |
958 | ecryptfs_getxattr(struct dentry *dentry, const char *name, void *value, | 956 | ecryptfs_getxattr(struct dentry *dentry, const char *name, void *value, |
959 | size_t size) | 957 | size_t size) |
960 | { | 958 | { |
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index f458c1f35565..682b1b2482c2 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c | |||
@@ -189,7 +189,7 @@ out: | |||
189 | } | 189 | } |
190 | 190 | ||
191 | static int | 191 | static int |
192 | parse_tag_65_packet(struct ecryptfs_session_key *session_key, u16 *cipher_code, | 192 | parse_tag_65_packet(struct ecryptfs_session_key *session_key, u8 *cipher_code, |
193 | struct ecryptfs_message *msg) | 193 | struct ecryptfs_message *msg) |
194 | { | 194 | { |
195 | size_t i = 0; | 195 | size_t i = 0; |
@@ -275,7 +275,7 @@ out: | |||
275 | 275 | ||
276 | 276 | ||
277 | static int | 277 | static int |
278 | write_tag_66_packet(char *signature, size_t cipher_code, | 278 | write_tag_66_packet(char *signature, u8 cipher_code, |
279 | struct ecryptfs_crypt_stat *crypt_stat, char **packet, | 279 | struct ecryptfs_crypt_stat *crypt_stat, char **packet, |
280 | size_t *packet_len) | 280 | size_t *packet_len) |
281 | { | 281 | { |
@@ -428,7 +428,7 @@ static int | |||
428 | decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok, | 428 | decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok, |
429 | struct ecryptfs_crypt_stat *crypt_stat) | 429 | struct ecryptfs_crypt_stat *crypt_stat) |
430 | { | 430 | { |
431 | u16 cipher_code = 0; | 431 | u8 cipher_code = 0; |
432 | struct ecryptfs_msg_ctx *msg_ctx; | 432 | struct ecryptfs_msg_ctx *msg_ctx; |
433 | struct ecryptfs_message *msg = NULL; | 433 | struct ecryptfs_message *msg = NULL; |
434 | char *auth_tok_sig; | 434 | char *auth_tok_sig; |
@@ -1537,7 +1537,7 @@ write_tag_3_packet(char *dest, size_t *remaining_bytes, | |||
1537 | struct scatterlist dst_sg; | 1537 | struct scatterlist dst_sg; |
1538 | struct scatterlist src_sg; | 1538 | struct scatterlist src_sg; |
1539 | struct mutex *tfm_mutex = NULL; | 1539 | struct mutex *tfm_mutex = NULL; |
1540 | size_t cipher_code; | 1540 | u8 cipher_code; |
1541 | size_t packet_size_length; | 1541 | size_t packet_size_length; |
1542 | size_t max_packet_size; | 1542 | size_t max_packet_size; |
1543 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat = | 1543 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat = |
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index 0249aa4ae181..d25ac9500a92 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c | |||
@@ -117,7 +117,7 @@ void __ecryptfs_printk(const char *fmt, ...) | |||
117 | * | 117 | * |
118 | * Returns zero on success; non-zero otherwise | 118 | * Returns zero on success; non-zero otherwise |
119 | */ | 119 | */ |
120 | int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry) | 120 | static int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry) |
121 | { | 121 | { |
122 | struct ecryptfs_inode_info *inode_info = | 122 | struct ecryptfs_inode_info *inode_info = |
123 | ecryptfs_inode_to_private(ecryptfs_dentry->d_inode); | 123 | ecryptfs_inode_to_private(ecryptfs_dentry->d_inode); |
@@ -226,17 +226,15 @@ out: | |||
226 | return rc; | 226 | return rc; |
227 | } | 227 | } |
228 | 228 | ||
229 | enum { ecryptfs_opt_sig, ecryptfs_opt_ecryptfs_sig, ecryptfs_opt_debug, | 229 | enum { ecryptfs_opt_sig, ecryptfs_opt_ecryptfs_sig, |
230 | ecryptfs_opt_ecryptfs_debug, ecryptfs_opt_cipher, | 230 | ecryptfs_opt_cipher, ecryptfs_opt_ecryptfs_cipher, |
231 | ecryptfs_opt_ecryptfs_cipher, ecryptfs_opt_ecryptfs_key_bytes, | 231 | ecryptfs_opt_ecryptfs_key_bytes, |
232 | ecryptfs_opt_passthrough, ecryptfs_opt_xattr_metadata, | 232 | ecryptfs_opt_passthrough, ecryptfs_opt_xattr_metadata, |
233 | ecryptfs_opt_encrypted_view, ecryptfs_opt_err }; | 233 | ecryptfs_opt_encrypted_view, ecryptfs_opt_err }; |
234 | 234 | ||
235 | static match_table_t tokens = { | 235 | static match_table_t tokens = { |
236 | {ecryptfs_opt_sig, "sig=%s"}, | 236 | {ecryptfs_opt_sig, "sig=%s"}, |
237 | {ecryptfs_opt_ecryptfs_sig, "ecryptfs_sig=%s"}, | 237 | {ecryptfs_opt_ecryptfs_sig, "ecryptfs_sig=%s"}, |
238 | {ecryptfs_opt_debug, "debug=%u"}, | ||
239 | {ecryptfs_opt_ecryptfs_debug, "ecryptfs_debug=%u"}, | ||
240 | {ecryptfs_opt_cipher, "cipher=%s"}, | 238 | {ecryptfs_opt_cipher, "cipher=%s"}, |
241 | {ecryptfs_opt_ecryptfs_cipher, "ecryptfs_cipher=%s"}, | 239 | {ecryptfs_opt_ecryptfs_cipher, "ecryptfs_cipher=%s"}, |
242 | {ecryptfs_opt_ecryptfs_key_bytes, "ecryptfs_key_bytes=%u"}, | 240 | {ecryptfs_opt_ecryptfs_key_bytes, "ecryptfs_key_bytes=%u"}, |
@@ -313,7 +311,6 @@ static int ecryptfs_parse_options(struct super_block *sb, char *options) | |||
313 | substring_t args[MAX_OPT_ARGS]; | 311 | substring_t args[MAX_OPT_ARGS]; |
314 | int token; | 312 | int token; |
315 | char *sig_src; | 313 | char *sig_src; |
316 | char *debug_src; | ||
317 | char *cipher_name_dst; | 314 | char *cipher_name_dst; |
318 | char *cipher_name_src; | 315 | char *cipher_name_src; |
319 | char *cipher_key_bytes_src; | 316 | char *cipher_key_bytes_src; |
@@ -341,16 +338,6 @@ static int ecryptfs_parse_options(struct super_block *sb, char *options) | |||
341 | } | 338 | } |
342 | sig_set = 1; | 339 | sig_set = 1; |
343 | break; | 340 | break; |
344 | case ecryptfs_opt_debug: | ||
345 | case ecryptfs_opt_ecryptfs_debug: | ||
346 | debug_src = args[0].from; | ||
347 | ecryptfs_verbosity = | ||
348 | (int)simple_strtol(debug_src, &debug_src, | ||
349 | 0); | ||
350 | ecryptfs_printk(KERN_DEBUG, | ||
351 | "Verbosity set to [%d]" "\n", | ||
352 | ecryptfs_verbosity); | ||
353 | break; | ||
354 | case ecryptfs_opt_cipher: | 341 | case ecryptfs_opt_cipher: |
355 | case ecryptfs_opt_ecryptfs_cipher: | 342 | case ecryptfs_opt_ecryptfs_cipher: |
356 | cipher_name_src = args[0].from; | 343 | cipher_name_src = args[0].from; |
@@ -423,9 +410,13 @@ static int ecryptfs_parse_options(struct super_block *sb, char *options) | |||
423 | if (!cipher_key_bytes_set) { | 410 | if (!cipher_key_bytes_set) { |
424 | mount_crypt_stat->global_default_cipher_key_size = 0; | 411 | mount_crypt_stat->global_default_cipher_key_size = 0; |
425 | } | 412 | } |
426 | rc = ecryptfs_add_new_key_tfm( | 413 | mutex_lock(&key_tfm_list_mutex); |
427 | NULL, mount_crypt_stat->global_default_cipher_name, | 414 | if (!ecryptfs_tfm_exists(mount_crypt_stat->global_default_cipher_name, |
428 | mount_crypt_stat->global_default_cipher_key_size); | 415 | NULL)) |
416 | rc = ecryptfs_add_new_key_tfm( | ||
417 | NULL, mount_crypt_stat->global_default_cipher_name, | ||
418 | mount_crypt_stat->global_default_cipher_key_size); | ||
419 | mutex_unlock(&key_tfm_list_mutex); | ||
429 | if (rc) { | 420 | if (rc) { |
430 | printk(KERN_ERR "Error attempting to initialize cipher with " | 421 | printk(KERN_ERR "Error attempting to initialize cipher with " |
431 | "name = [%s] and key size = [%td]; rc = [%d]\n", | 422 | "name = [%s] and key size = [%td]; rc = [%d]\n", |
@@ -522,8 +513,8 @@ static int ecryptfs_read_super(struct super_block *sb, const char *dev_name) | |||
522 | ecryptfs_printk(KERN_WARNING, "path_lookup() failed\n"); | 513 | ecryptfs_printk(KERN_WARNING, "path_lookup() failed\n"); |
523 | goto out; | 514 | goto out; |
524 | } | 515 | } |
525 | lower_root = nd.dentry; | 516 | lower_root = nd.path.dentry; |
526 | lower_mnt = nd.mnt; | 517 | lower_mnt = nd.path.mnt; |
527 | ecryptfs_set_superblock_lower(sb, lower_root->d_sb); | 518 | ecryptfs_set_superblock_lower(sb, lower_root->d_sb); |
528 | sb->s_maxbytes = lower_root->d_sb->s_maxbytes; | 519 | sb->s_maxbytes = lower_root->d_sb->s_maxbytes; |
529 | sb->s_blocksize = lower_root->d_sb->s_blocksize; | 520 | sb->s_blocksize = lower_root->d_sb->s_blocksize; |
@@ -535,7 +526,7 @@ static int ecryptfs_read_super(struct super_block *sb, const char *dev_name) | |||
535 | rc = 0; | 526 | rc = 0; |
536 | goto out; | 527 | goto out; |
537 | out_free: | 528 | out_free: |
538 | path_release(&nd); | 529 | path_put(&nd.path); |
539 | out: | 530 | out: |
540 | return rc; | 531 | return rc; |
541 | } | 532 | } |
@@ -654,11 +645,6 @@ static struct ecryptfs_cache_info { | |||
654 | .size = sizeof(struct ecryptfs_sb_info), | 645 | .size = sizeof(struct ecryptfs_sb_info), |
655 | }, | 646 | }, |
656 | { | 647 | { |
657 | .cache = &ecryptfs_header_cache_0, | ||
658 | .name = "ecryptfs_headers_0", | ||
659 | .size = PAGE_CACHE_SIZE, | ||
660 | }, | ||
661 | { | ||
662 | .cache = &ecryptfs_header_cache_1, | 648 | .cache = &ecryptfs_header_cache_1, |
663 | .name = "ecryptfs_headers_1", | 649 | .name = "ecryptfs_headers_1", |
664 | .size = PAGE_CACHE_SIZE, | 650 | .size = PAGE_CACHE_SIZE, |
@@ -821,6 +807,10 @@ static int __init ecryptfs_init(void) | |||
821 | "rc = [%d]\n", rc); | 807 | "rc = [%d]\n", rc); |
822 | goto out_release_messaging; | 808 | goto out_release_messaging; |
823 | } | 809 | } |
810 | if (ecryptfs_verbosity > 0) | ||
811 | printk(KERN_CRIT "eCryptfs verbosity set to %d. Secret values " | ||
812 | "will be written to the syslog!\n", ecryptfs_verbosity); | ||
813 | |||
824 | goto out; | 814 | goto out; |
825 | out_release_messaging: | 815 | out_release_messaging: |
826 | ecryptfs_release_messaging(ecryptfs_transport); | 816 | ecryptfs_release_messaging(ecryptfs_transport); |
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index 0535412d8c64..dc74b186145d 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c | |||
@@ -34,8 +34,6 @@ | |||
34 | #include <linux/scatterlist.h> | 34 | #include <linux/scatterlist.h> |
35 | #include "ecryptfs_kernel.h" | 35 | #include "ecryptfs_kernel.h" |
36 | 36 | ||
37 | struct kmem_cache *ecryptfs_lower_page_cache; | ||
38 | |||
39 | /** | 37 | /** |
40 | * ecryptfs_get_locked_page | 38 | * ecryptfs_get_locked_page |
41 | * | 39 | * |
@@ -102,13 +100,14 @@ static void set_header_info(char *page_virt, | |||
102 | struct ecryptfs_crypt_stat *crypt_stat) | 100 | struct ecryptfs_crypt_stat *crypt_stat) |
103 | { | 101 | { |
104 | size_t written; | 102 | size_t written; |
105 | int save_num_header_extents_at_front = | 103 | size_t save_num_header_bytes_at_front = |
106 | crypt_stat->num_header_extents_at_front; | 104 | crypt_stat->num_header_bytes_at_front; |
107 | 105 | ||
108 | crypt_stat->num_header_extents_at_front = 1; | 106 | crypt_stat->num_header_bytes_at_front = |
107 | ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; | ||
109 | ecryptfs_write_header_metadata(page_virt + 20, crypt_stat, &written); | 108 | ecryptfs_write_header_metadata(page_virt + 20, crypt_stat, &written); |
110 | crypt_stat->num_header_extents_at_front = | 109 | crypt_stat->num_header_bytes_at_front = |
111 | save_num_header_extents_at_front; | 110 | save_num_header_bytes_at_front; |
112 | } | 111 | } |
113 | 112 | ||
114 | /** | 113 | /** |
@@ -134,8 +133,11 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page, | |||
134 | loff_t view_extent_num = ((((loff_t)page->index) | 133 | loff_t view_extent_num = ((((loff_t)page->index) |
135 | * num_extents_per_page) | 134 | * num_extents_per_page) |
136 | + extent_num_in_page); | 135 | + extent_num_in_page); |
136 | size_t num_header_extents_at_front = | ||
137 | (crypt_stat->num_header_bytes_at_front | ||
138 | / crypt_stat->extent_size); | ||
137 | 139 | ||
138 | if (view_extent_num < crypt_stat->num_header_extents_at_front) { | 140 | if (view_extent_num < num_header_extents_at_front) { |
139 | /* This is a header extent */ | 141 | /* This is a header extent */ |
140 | char *page_virt; | 142 | char *page_virt; |
141 | 143 | ||
@@ -157,9 +159,8 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page, | |||
157 | } else { | 159 | } else { |
158 | /* This is an encrypted data extent */ | 160 | /* This is an encrypted data extent */ |
159 | loff_t lower_offset = | 161 | loff_t lower_offset = |
160 | ((view_extent_num - | 162 | ((view_extent_num * crypt_stat->extent_size) |
161 | crypt_stat->num_header_extents_at_front) | 163 | - crypt_stat->num_header_bytes_at_front); |
162 | * crypt_stat->extent_size); | ||
163 | 164 | ||
164 | rc = ecryptfs_read_lower_page_segment( | 165 | rc = ecryptfs_read_lower_page_segment( |
165 | page, (lower_offset >> PAGE_CACHE_SHIFT), | 166 | page, (lower_offset >> PAGE_CACHE_SHIFT), |
diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c index 948f57624c05..0c4928623bbc 100644 --- a/fs/ecryptfs/read_write.c +++ b/fs/ecryptfs/read_write.c | |||
@@ -293,6 +293,7 @@ int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs, | |||
293 | return rc; | 293 | return rc; |
294 | } | 294 | } |
295 | 295 | ||
296 | #if 0 | ||
296 | /** | 297 | /** |
297 | * ecryptfs_read | 298 | * ecryptfs_read |
298 | * @data: The virtual address into which to write the data read (and | 299 | * @data: The virtual address into which to write the data read (and |
@@ -371,3 +372,4 @@ int ecryptfs_read(char *data, loff_t offset, size_t size, | |||
371 | out: | 372 | out: |
372 | return rc; | 373 | return rc; |
373 | } | 374 | } |
375 | #endif /* 0 */ | ||
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c index 4859c4eecd65..c27ac2b358a1 100644 --- a/fs/ecryptfs/super.c +++ b/fs/ecryptfs/super.c | |||
@@ -156,32 +156,38 @@ static void ecryptfs_clear_inode(struct inode *inode) | |||
156 | /** | 156 | /** |
157 | * ecryptfs_show_options | 157 | * ecryptfs_show_options |
158 | * | 158 | * |
159 | * Prints the directory we are currently mounted over. | 159 | * Prints the mount options for a given superblock. |
160 | * Returns zero on success; non-zero otherwise | 160 | * Returns zero; does not fail. |
161 | */ | 161 | */ |
162 | static int ecryptfs_show_options(struct seq_file *m, struct vfsmount *mnt) | 162 | static int ecryptfs_show_options(struct seq_file *m, struct vfsmount *mnt) |
163 | { | 163 | { |
164 | struct super_block *sb = mnt->mnt_sb; | 164 | struct super_block *sb = mnt->mnt_sb; |
165 | struct dentry *lower_root_dentry = ecryptfs_dentry_to_lower(sb->s_root); | 165 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat = |
166 | struct vfsmount *lower_mnt = ecryptfs_dentry_to_lower_mnt(sb->s_root); | 166 | &ecryptfs_superblock_to_private(sb)->mount_crypt_stat; |
167 | char *tmp_page; | 167 | struct ecryptfs_global_auth_tok *walker; |
168 | char *path; | 168 | |
169 | int rc = 0; | 169 | mutex_lock(&mount_crypt_stat->global_auth_tok_list_mutex); |
170 | 170 | list_for_each_entry(walker, | |
171 | tmp_page = (char *)__get_free_page(GFP_KERNEL); | 171 | &mount_crypt_stat->global_auth_tok_list, |
172 | if (!tmp_page) { | 172 | mount_crypt_stat_list) { |
173 | rc = -ENOMEM; | 173 | seq_printf(m, ",ecryptfs_sig=%s", walker->sig); |
174 | goto out; | ||
175 | } | ||
176 | path = d_path(lower_root_dentry, lower_mnt, tmp_page, PAGE_SIZE); | ||
177 | if (IS_ERR(path)) { | ||
178 | rc = PTR_ERR(path); | ||
179 | goto out; | ||
180 | } | 174 | } |
181 | seq_printf(m, ",dir=%s", path); | 175 | mutex_unlock(&mount_crypt_stat->global_auth_tok_list_mutex); |
182 | free_page((unsigned long)tmp_page); | 176 | |
183 | out: | 177 | seq_printf(m, ",ecryptfs_cipher=%s", |
184 | return rc; | 178 | mount_crypt_stat->global_default_cipher_name); |
179 | |||
180 | if (mount_crypt_stat->global_default_cipher_key_size) | ||
181 | seq_printf(m, ",ecryptfs_key_bytes=%zd", | ||
182 | mount_crypt_stat->global_default_cipher_key_size); | ||
183 | if (mount_crypt_stat->flags & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED) | ||
184 | seq_printf(m, ",ecryptfs_passthrough"); | ||
185 | if (mount_crypt_stat->flags & ECRYPTFS_XATTR_METADATA_ENABLED) | ||
186 | seq_printf(m, ",ecryptfs_xattr_metadata"); | ||
187 | if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) | ||
188 | seq_printf(m, ",ecryptfs_encrypted_view"); | ||
189 | |||
190 | return 0; | ||
185 | } | 191 | } |
186 | 192 | ||
187 | const struct super_operations ecryptfs_sops = { | 193 | const struct super_operations ecryptfs_sops = { |
diff --git a/fs/efs/inode.c b/fs/efs/inode.c index 174696f9bf14..627c3026946d 100644 --- a/fs/efs/inode.c +++ b/fs/efs/inode.c | |||
@@ -45,17 +45,26 @@ static inline void extent_copy(efs_extent *src, efs_extent *dst) { | |||
45 | return; | 45 | return; |
46 | } | 46 | } |
47 | 47 | ||
48 | void efs_read_inode(struct inode *inode) | 48 | struct inode *efs_iget(struct super_block *super, unsigned long ino) |
49 | { | 49 | { |
50 | int i, inode_index; | 50 | int i, inode_index; |
51 | dev_t device; | 51 | dev_t device; |
52 | u32 rdev; | 52 | u32 rdev; |
53 | struct buffer_head *bh; | 53 | struct buffer_head *bh; |
54 | struct efs_sb_info *sb = SUPER_INFO(inode->i_sb); | 54 | struct efs_sb_info *sb = SUPER_INFO(super); |
55 | struct efs_inode_info *in = INODE_INFO(inode); | 55 | struct efs_inode_info *in; |
56 | efs_block_t block, offset; | 56 | efs_block_t block, offset; |
57 | struct efs_dinode *efs_inode; | 57 | struct efs_dinode *efs_inode; |
58 | 58 | struct inode *inode; | |
59 | |||
60 | inode = iget_locked(super, ino); | ||
61 | if (IS_ERR(inode)) | ||
62 | return ERR_PTR(-ENOMEM); | ||
63 | if (!(inode->i_state & I_NEW)) | ||
64 | return inode; | ||
65 | |||
66 | in = INODE_INFO(inode); | ||
67 | |||
59 | /* | 68 | /* |
60 | ** EFS layout: | 69 | ** EFS layout: |
61 | ** | 70 | ** |
@@ -159,13 +168,13 @@ void efs_read_inode(struct inode *inode) | |||
159 | break; | 168 | break; |
160 | } | 169 | } |
161 | 170 | ||
162 | return; | 171 | unlock_new_inode(inode); |
172 | return inode; | ||
163 | 173 | ||
164 | read_inode_error: | 174 | read_inode_error: |
165 | printk(KERN_WARNING "EFS: failed to read inode %lu\n", inode->i_ino); | 175 | printk(KERN_WARNING "EFS: failed to read inode %lu\n", inode->i_ino); |
166 | make_bad_inode(inode); | 176 | iget_failed(inode); |
167 | 177 | return ERR_PTR(-EIO); | |
168 | return; | ||
169 | } | 178 | } |
170 | 179 | ||
171 | static inline efs_block_t | 180 | static inline efs_block_t |
diff --git a/fs/efs/namei.c b/fs/efs/namei.c index f7f407075be1..e26704742d41 100644 --- a/fs/efs/namei.c +++ b/fs/efs/namei.c | |||
@@ -66,9 +66,10 @@ struct dentry *efs_lookup(struct inode *dir, struct dentry *dentry, struct namei | |||
66 | lock_kernel(); | 66 | lock_kernel(); |
67 | inodenum = efs_find_entry(dir, dentry->d_name.name, dentry->d_name.len); | 67 | inodenum = efs_find_entry(dir, dentry->d_name.name, dentry->d_name.len); |
68 | if (inodenum) { | 68 | if (inodenum) { |
69 | if (!(inode = iget(dir->i_sb, inodenum))) { | 69 | inode = efs_iget(dir->i_sb, inodenum); |
70 | if (IS_ERR(inode)) { | ||
70 | unlock_kernel(); | 71 | unlock_kernel(); |
71 | return ERR_PTR(-EACCES); | 72 | return ERR_CAST(inode); |
72 | } | 73 | } |
73 | } | 74 | } |
74 | unlock_kernel(); | 75 | unlock_kernel(); |
@@ -84,12 +85,11 @@ static struct inode *efs_nfs_get_inode(struct super_block *sb, u64 ino, | |||
84 | 85 | ||
85 | if (ino == 0) | 86 | if (ino == 0) |
86 | return ERR_PTR(-ESTALE); | 87 | return ERR_PTR(-ESTALE); |
87 | inode = iget(sb, ino); | 88 | inode = efs_iget(sb, ino); |
88 | if (inode == NULL) | 89 | if (IS_ERR(inode)) |
89 | return ERR_PTR(-ENOMEM); | 90 | return ERR_CAST(inode); |
90 | 91 | ||
91 | if (is_bad_inode(inode) || | 92 | if (generation && inode->i_generation != generation) { |
92 | (generation && inode->i_generation != generation)) { | ||
93 | iput(inode); | 93 | iput(inode); |
94 | return ERR_PTR(-ESTALE); | 94 | return ERR_PTR(-ESTALE); |
95 | } | 95 | } |
@@ -116,7 +116,7 @@ struct dentry *efs_get_parent(struct dentry *child) | |||
116 | struct dentry *parent; | 116 | struct dentry *parent; |
117 | struct inode *inode; | 117 | struct inode *inode; |
118 | efs_ino_t ino; | 118 | efs_ino_t ino; |
119 | int error; | 119 | long error; |
120 | 120 | ||
121 | lock_kernel(); | 121 | lock_kernel(); |
122 | 122 | ||
@@ -125,10 +125,11 @@ struct dentry *efs_get_parent(struct dentry *child) | |||
125 | if (!ino) | 125 | if (!ino) |
126 | goto fail; | 126 | goto fail; |
127 | 127 | ||
128 | error = -EACCES; | 128 | inode = efs_iget(child->d_inode->i_sb, ino); |
129 | inode = iget(child->d_inode->i_sb, ino); | 129 | if (IS_ERR(inode)) { |
130 | if (!inode) | 130 | error = PTR_ERR(inode); |
131 | goto fail; | 131 | goto fail; |
132 | } | ||
132 | 133 | ||
133 | error = -ENOMEM; | 134 | error = -ENOMEM; |
134 | parent = d_alloc_anon(inode); | 135 | parent = d_alloc_anon(inode); |
diff --git a/fs/efs/super.c b/fs/efs/super.c index c79bc627f107..14082405cdd1 100644 --- a/fs/efs/super.c +++ b/fs/efs/super.c | |||
@@ -107,7 +107,6 @@ static int efs_remount(struct super_block *sb, int *flags, char *data) | |||
107 | static const struct super_operations efs_superblock_operations = { | 107 | static const struct super_operations efs_superblock_operations = { |
108 | .alloc_inode = efs_alloc_inode, | 108 | .alloc_inode = efs_alloc_inode, |
109 | .destroy_inode = efs_destroy_inode, | 109 | .destroy_inode = efs_destroy_inode, |
110 | .read_inode = efs_read_inode, | ||
111 | .put_super = efs_put_super, | 110 | .put_super = efs_put_super, |
112 | .statfs = efs_statfs, | 111 | .statfs = efs_statfs, |
113 | .remount_fs = efs_remount, | 112 | .remount_fs = efs_remount, |
@@ -247,6 +246,7 @@ static int efs_fill_super(struct super_block *s, void *d, int silent) | |||
247 | struct efs_sb_info *sb; | 246 | struct efs_sb_info *sb; |
248 | struct buffer_head *bh; | 247 | struct buffer_head *bh; |
249 | struct inode *root; | 248 | struct inode *root; |
249 | int ret = -EINVAL; | ||
250 | 250 | ||
251 | sb = kzalloc(sizeof(struct efs_sb_info), GFP_KERNEL); | 251 | sb = kzalloc(sizeof(struct efs_sb_info), GFP_KERNEL); |
252 | if (!sb) | 252 | if (!sb) |
@@ -303,12 +303,18 @@ static int efs_fill_super(struct super_block *s, void *d, int silent) | |||
303 | } | 303 | } |
304 | s->s_op = &efs_superblock_operations; | 304 | s->s_op = &efs_superblock_operations; |
305 | s->s_export_op = &efs_export_ops; | 305 | s->s_export_op = &efs_export_ops; |
306 | root = iget(s, EFS_ROOTINODE); | 306 | root = efs_iget(s, EFS_ROOTINODE); |
307 | if (IS_ERR(root)) { | ||
308 | printk(KERN_ERR "EFS: get root inode failed\n"); | ||
309 | ret = PTR_ERR(root); | ||
310 | goto out_no_fs; | ||
311 | } | ||
312 | |||
307 | s->s_root = d_alloc_root(root); | 313 | s->s_root = d_alloc_root(root); |
308 | |||
309 | if (!(s->s_root)) { | 314 | if (!(s->s_root)) { |
310 | printk(KERN_ERR "EFS: get root inode failed\n"); | 315 | printk(KERN_ERR "EFS: get root dentry failed\n"); |
311 | iput(root); | 316 | iput(root); |
317 | ret = -ENOMEM; | ||
312 | goto out_no_fs; | 318 | goto out_no_fs; |
313 | } | 319 | } |
314 | 320 | ||
@@ -318,7 +324,7 @@ out_no_fs_ul: | |||
318 | out_no_fs: | 324 | out_no_fs: |
319 | s->s_fs_info = NULL; | 325 | s->s_fs_info = NULL; |
320 | kfree(sb); | 326 | kfree(sb); |
321 | return -EINVAL; | 327 | return ret; |
322 | } | 328 | } |
323 | 329 | ||
324 | static int efs_statfs(struct dentry *dentry, struct kstatfs *buf) { | 330 | static int efs_statfs(struct dentry *dentry, struct kstatfs *buf) { |
diff --git a/fs/eventfd.c b/fs/eventfd.c index 2ce19c000d2a..a9f130cd50ac 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/spinlock.h> | 15 | #include <linux/spinlock.h> |
16 | #include <linux/anon_inodes.h> | 16 | #include <linux/anon_inodes.h> |
17 | #include <linux/eventfd.h> | 17 | #include <linux/eventfd.h> |
18 | #include <linux/syscalls.h> | ||
18 | 19 | ||
19 | struct eventfd_ctx { | 20 | struct eventfd_ctx { |
20 | wait_queue_head_t wqh; | 21 | wait_queue_head_t wqh; |
@@ -112,14 +112,14 @@ asmlinkage long sys_uselib(const char __user * library) | |||
112 | goto out; | 112 | goto out; |
113 | 113 | ||
114 | error = -EINVAL; | 114 | error = -EINVAL; |
115 | if (!S_ISREG(nd.dentry->d_inode->i_mode)) | 115 | if (!S_ISREG(nd.path.dentry->d_inode->i_mode)) |
116 | goto exit; | 116 | goto exit; |
117 | 117 | ||
118 | error = vfs_permission(&nd, MAY_READ | MAY_EXEC); | 118 | error = vfs_permission(&nd, MAY_READ | MAY_EXEC); |
119 | if (error) | 119 | if (error) |
120 | goto exit; | 120 | goto exit; |
121 | 121 | ||
122 | file = nameidata_to_filp(&nd, O_RDONLY); | 122 | file = nameidata_to_filp(&nd, O_RDONLY|O_LARGEFILE); |
123 | error = PTR_ERR(file); | 123 | error = PTR_ERR(file); |
124 | if (IS_ERR(file)) | 124 | if (IS_ERR(file)) |
125 | goto out; | 125 | goto out; |
@@ -148,7 +148,7 @@ out: | |||
148 | return error; | 148 | return error; |
149 | exit: | 149 | exit: |
150 | release_open_intent(&nd); | 150 | release_open_intent(&nd); |
151 | path_release(&nd); | 151 | path_put(&nd.path); |
152 | goto out; | 152 | goto out; |
153 | } | 153 | } |
154 | 154 | ||
@@ -652,13 +652,14 @@ struct file *open_exec(const char *name) | |||
652 | file = ERR_PTR(err); | 652 | file = ERR_PTR(err); |
653 | 653 | ||
654 | if (!err) { | 654 | if (!err) { |
655 | struct inode *inode = nd.dentry->d_inode; | 655 | struct inode *inode = nd.path.dentry->d_inode; |
656 | file = ERR_PTR(-EACCES); | 656 | file = ERR_PTR(-EACCES); |
657 | if (S_ISREG(inode->i_mode)) { | 657 | if (S_ISREG(inode->i_mode)) { |
658 | int err = vfs_permission(&nd, MAY_EXEC); | 658 | int err = vfs_permission(&nd, MAY_EXEC); |
659 | file = ERR_PTR(err); | 659 | file = ERR_PTR(err); |
660 | if (!err) { | 660 | if (!err) { |
661 | file = nameidata_to_filp(&nd, O_RDONLY); | 661 | file = nameidata_to_filp(&nd, |
662 | O_RDONLY|O_LARGEFILE); | ||
662 | if (!IS_ERR(file)) { | 663 | if (!IS_ERR(file)) { |
663 | err = deny_write_access(file); | 664 | err = deny_write_access(file); |
664 | if (err) { | 665 | if (err) { |
@@ -671,7 +672,7 @@ out: | |||
671 | } | 672 | } |
672 | } | 673 | } |
673 | release_open_intent(&nd); | 674 | release_open_intent(&nd); |
674 | path_release(&nd); | 675 | path_put(&nd.path); |
675 | } | 676 | } |
676 | goto out; | 677 | goto out; |
677 | } | 678 | } |
@@ -782,26 +783,8 @@ static int de_thread(struct task_struct *tsk) | |||
782 | zap_other_threads(tsk); | 783 | zap_other_threads(tsk); |
783 | read_unlock(&tasklist_lock); | 784 | read_unlock(&tasklist_lock); |
784 | 785 | ||
785 | /* | 786 | /* Account for the thread group leader hanging around: */ |
786 | * Account for the thread group leader hanging around: | 787 | count = thread_group_leader(tsk) ? 1 : 2; |
787 | */ | ||
788 | count = 1; | ||
789 | if (!thread_group_leader(tsk)) { | ||
790 | count = 2; | ||
791 | /* | ||
792 | * The SIGALRM timer survives the exec, but needs to point | ||
793 | * at us as the new group leader now. We have a race with | ||
794 | * a timer firing now getting the old leader, so we need to | ||
795 | * synchronize with any firing (by calling del_timer_sync) | ||
796 | * before we can safely let the old group leader die. | ||
797 | */ | ||
798 | sig->tsk = tsk; | ||
799 | spin_unlock_irq(lock); | ||
800 | if (hrtimer_cancel(&sig->real_timer)) | ||
801 | hrtimer_restart(&sig->real_timer); | ||
802 | spin_lock_irq(lock); | ||
803 | } | ||
804 | |||
805 | sig->notify_count = count; | 788 | sig->notify_count = count; |
806 | while (atomic_read(&sig->count) > count) { | 789 | while (atomic_read(&sig->count) > count) { |
807 | __set_current_state(TASK_UNINTERRUPTIBLE); | 790 | __set_current_state(TASK_UNINTERRUPTIBLE); |
@@ -1184,7 +1167,7 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) | |||
1184 | { | 1167 | { |
1185 | int try,retval; | 1168 | int try,retval; |
1186 | struct linux_binfmt *fmt; | 1169 | struct linux_binfmt *fmt; |
1187 | #ifdef __alpha__ | 1170 | #if defined(__alpha__) && defined(CONFIG_ARCH_SUPPORTS_AOUT) |
1188 | /* handle /sbin/loader.. */ | 1171 | /* handle /sbin/loader.. */ |
1189 | { | 1172 | { |
1190 | struct exec * eh = (struct exec *) bprm->buf; | 1173 | struct exec * eh = (struct exec *) bprm->buf; |
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c index 377ad172d74b..e7b2bafa1dd9 100644 --- a/fs/ext2/balloc.c +++ b/fs/ext2/balloc.c | |||
@@ -69,9 +69,53 @@ struct ext2_group_desc * ext2_get_group_desc(struct super_block * sb, | |||
69 | return desc + offset; | 69 | return desc + offset; |
70 | } | 70 | } |
71 | 71 | ||
72 | static int ext2_valid_block_bitmap(struct super_block *sb, | ||
73 | struct ext2_group_desc *desc, | ||
74 | unsigned int block_group, | ||
75 | struct buffer_head *bh) | ||
76 | { | ||
77 | ext2_grpblk_t offset; | ||
78 | ext2_grpblk_t next_zero_bit; | ||
79 | ext2_fsblk_t bitmap_blk; | ||
80 | ext2_fsblk_t group_first_block; | ||
81 | |||
82 | group_first_block = ext2_group_first_block_no(sb, block_group); | ||
83 | |||
84 | /* check whether block bitmap block number is set */ | ||
85 | bitmap_blk = le32_to_cpu(desc->bg_block_bitmap); | ||
86 | offset = bitmap_blk - group_first_block; | ||
87 | if (!ext2_test_bit(offset, bh->b_data)) | ||
88 | /* bad block bitmap */ | ||
89 | goto err_out; | ||
90 | |||
91 | /* check whether the inode bitmap block number is set */ | ||
92 | bitmap_blk = le32_to_cpu(desc->bg_inode_bitmap); | ||
93 | offset = bitmap_blk - group_first_block; | ||
94 | if (!ext2_test_bit(offset, bh->b_data)) | ||
95 | /* bad block bitmap */ | ||
96 | goto err_out; | ||
97 | |||
98 | /* check whether the inode table block number is set */ | ||
99 | bitmap_blk = le32_to_cpu(desc->bg_inode_table); | ||
100 | offset = bitmap_blk - group_first_block; | ||
101 | next_zero_bit = ext2_find_next_zero_bit(bh->b_data, | ||
102 | offset + EXT2_SB(sb)->s_itb_per_group, | ||
103 | offset); | ||
104 | if (next_zero_bit >= offset + EXT2_SB(sb)->s_itb_per_group) | ||
105 | /* good bitmap for inode tables */ | ||
106 | return 1; | ||
107 | |||
108 | err_out: | ||
109 | ext2_error(sb, __FUNCTION__, | ||
110 | "Invalid block bitmap - " | ||
111 | "block_group = %d, block = %lu", | ||
112 | block_group, bitmap_blk); | ||
113 | return 0; | ||
114 | } | ||
115 | |||
72 | /* | 116 | /* |
73 | * Read the bitmap for a given block_group, reading into the specified | 117 | * Read the bitmap for a given block_group,and validate the |
74 | * slot in the superblock's bitmap cache. | 118 | * bits for block/inode/inode tables are set in the bitmaps |
75 | * | 119 | * |
76 | * Return buffer_head on success or NULL in case of failure. | 120 | * Return buffer_head on success or NULL in case of failure. |
77 | */ | 121 | */ |
@@ -80,17 +124,36 @@ read_block_bitmap(struct super_block *sb, unsigned int block_group) | |||
80 | { | 124 | { |
81 | struct ext2_group_desc * desc; | 125 | struct ext2_group_desc * desc; |
82 | struct buffer_head * bh = NULL; | 126 | struct buffer_head * bh = NULL; |
83 | 127 | ext2_fsblk_t bitmap_blk; | |
84 | desc = ext2_get_group_desc (sb, block_group, NULL); | 128 | |
129 | desc = ext2_get_group_desc(sb, block_group, NULL); | ||
85 | if (!desc) | 130 | if (!desc) |
86 | goto error_out; | 131 | return NULL; |
87 | bh = sb_bread(sb, le32_to_cpu(desc->bg_block_bitmap)); | 132 | bitmap_blk = le32_to_cpu(desc->bg_block_bitmap); |
88 | if (!bh) | 133 | bh = sb_getblk(sb, bitmap_blk); |
89 | ext2_error (sb, "read_block_bitmap", | 134 | if (unlikely(!bh)) { |
135 | ext2_error(sb, __FUNCTION__, | ||
136 | "Cannot read block bitmap - " | ||
137 | "block_group = %d, block_bitmap = %u", | ||
138 | block_group, le32_to_cpu(desc->bg_block_bitmap)); | ||
139 | return NULL; | ||
140 | } | ||
141 | if (likely(bh_uptodate_or_lock(bh))) | ||
142 | return bh; | ||
143 | |||
144 | if (bh_submit_read(bh) < 0) { | ||
145 | brelse(bh); | ||
146 | ext2_error(sb, __FUNCTION__, | ||
90 | "Cannot read block bitmap - " | 147 | "Cannot read block bitmap - " |
91 | "block_group = %d, block_bitmap = %u", | 148 | "block_group = %d, block_bitmap = %u", |
92 | block_group, le32_to_cpu(desc->bg_block_bitmap)); | 149 | block_group, le32_to_cpu(desc->bg_block_bitmap)); |
93 | error_out: | 150 | return NULL; |
151 | } | ||
152 | if (!ext2_valid_block_bitmap(sb, desc, block_group, bh)) { | ||
153 | brelse(bh); | ||
154 | return NULL; | ||
155 | } | ||
156 | |||
94 | return bh; | 157 | return bh; |
95 | } | 158 | } |
96 | 159 | ||
@@ -474,11 +537,13 @@ do_more: | |||
474 | in_range (block, le32_to_cpu(desc->bg_inode_table), | 537 | in_range (block, le32_to_cpu(desc->bg_inode_table), |
475 | sbi->s_itb_per_group) || | 538 | sbi->s_itb_per_group) || |
476 | in_range (block + count - 1, le32_to_cpu(desc->bg_inode_table), | 539 | in_range (block + count - 1, le32_to_cpu(desc->bg_inode_table), |
477 | sbi->s_itb_per_group)) | 540 | sbi->s_itb_per_group)) { |
478 | ext2_error (sb, "ext2_free_blocks", | 541 | ext2_error (sb, "ext2_free_blocks", |
479 | "Freeing blocks in system zones - " | 542 | "Freeing blocks in system zones - " |
480 | "Block = %lu, count = %lu", | 543 | "Block = %lu, count = %lu", |
481 | block, count); | 544 | block, count); |
545 | goto error_return; | ||
546 | } | ||
482 | 547 | ||
483 | for (i = 0, group_freed = 0; i < count; i++) { | 548 | for (i = 0, group_freed = 0; i < count; i++) { |
484 | if (!ext2_clear_bit_atomic(sb_bgl_lock(sbi, block_group), | 549 | if (!ext2_clear_bit_atomic(sb_bgl_lock(sbi, block_group), |
@@ -1250,8 +1315,8 @@ retry_alloc: | |||
1250 | smp_rmb(); | 1315 | smp_rmb(); |
1251 | 1316 | ||
1252 | /* | 1317 | /* |
1253 | * Now search the rest of the groups. We assume that | 1318 | * Now search the rest of the groups. We assume that |
1254 | * i and gdp correctly point to the last group visited. | 1319 | * group_no and gdp correctly point to the last group visited. |
1255 | */ | 1320 | */ |
1256 | for (bgi = 0; bgi < ngroups; bgi++) { | 1321 | for (bgi = 0; bgi < ngroups; bgi++) { |
1257 | group_no++; | 1322 | group_no++; |
@@ -1311,11 +1376,13 @@ allocated: | |||
1311 | in_range(ret_block, le32_to_cpu(gdp->bg_inode_table), | 1376 | in_range(ret_block, le32_to_cpu(gdp->bg_inode_table), |
1312 | EXT2_SB(sb)->s_itb_per_group) || | 1377 | EXT2_SB(sb)->s_itb_per_group) || |
1313 | in_range(ret_block + num - 1, le32_to_cpu(gdp->bg_inode_table), | 1378 | in_range(ret_block + num - 1, le32_to_cpu(gdp->bg_inode_table), |
1314 | EXT2_SB(sb)->s_itb_per_group)) | 1379 | EXT2_SB(sb)->s_itb_per_group)) { |
1315 | ext2_error(sb, "ext2_new_blocks", | 1380 | ext2_error(sb, "ext2_new_blocks", |
1316 | "Allocating block in system zone - " | 1381 | "Allocating block in system zone - " |
1317 | "blocks from "E2FSBLK", length %lu", | 1382 | "blocks from "E2FSBLK", length %lu", |
1318 | ret_block, num); | 1383 | ret_block, num); |
1384 | goto out; | ||
1385 | } | ||
1319 | 1386 | ||
1320 | performed_allocation = 1; | 1387 | performed_allocation = 1; |
1321 | 1388 | ||
@@ -1466,9 +1533,6 @@ int ext2_bg_has_super(struct super_block *sb, int group) | |||
1466 | */ | 1533 | */ |
1467 | unsigned long ext2_bg_num_gdb(struct super_block *sb, int group) | 1534 | unsigned long ext2_bg_num_gdb(struct super_block *sb, int group) |
1468 | { | 1535 | { |
1469 | if (EXT2_HAS_RO_COMPAT_FEATURE(sb,EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER)&& | 1536 | return ext2_bg_has_super(sb, group) ? EXT2_SB(sb)->s_gdb_count : 0; |
1470 | !ext2_group_sparse(group)) | ||
1471 | return 0; | ||
1472 | return EXT2_SB(sb)->s_gdb_count; | ||
1473 | } | 1537 | } |
1474 | 1538 | ||
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c index d868e26c15eb..8dededd80fe2 100644 --- a/fs/ext2/dir.c +++ b/fs/ext2/dir.c | |||
@@ -703,7 +703,7 @@ const struct file_operations ext2_dir_operations = { | |||
703 | .llseek = generic_file_llseek, | 703 | .llseek = generic_file_llseek, |
704 | .read = generic_read_dir, | 704 | .read = generic_read_dir, |
705 | .readdir = ext2_readdir, | 705 | .readdir = ext2_readdir, |
706 | .ioctl = ext2_ioctl, | 706 | .unlocked_ioctl = ext2_ioctl, |
707 | #ifdef CONFIG_COMPAT | 707 | #ifdef CONFIG_COMPAT |
708 | .compat_ioctl = ext2_compat_ioctl, | 708 | .compat_ioctl = ext2_compat_ioctl, |
709 | #endif | 709 | #endif |
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h index c87ae29c19cb..47d88da2d33b 100644 --- a/fs/ext2/ext2.h +++ b/fs/ext2/ext2.h | |||
@@ -124,9 +124,8 @@ extern void ext2_check_inodes_bitmap (struct super_block *); | |||
124 | extern unsigned long ext2_count_free (struct buffer_head *, unsigned); | 124 | extern unsigned long ext2_count_free (struct buffer_head *, unsigned); |
125 | 125 | ||
126 | /* inode.c */ | 126 | /* inode.c */ |
127 | extern void ext2_read_inode (struct inode *); | 127 | extern struct inode *ext2_iget (struct super_block *, unsigned long); |
128 | extern int ext2_write_inode (struct inode *, int); | 128 | extern int ext2_write_inode (struct inode *, int); |
129 | extern void ext2_put_inode (struct inode *); | ||
130 | extern void ext2_delete_inode (struct inode *); | 129 | extern void ext2_delete_inode (struct inode *); |
131 | extern int ext2_sync_inode (struct inode *); | 130 | extern int ext2_sync_inode (struct inode *); |
132 | extern int ext2_get_block(struct inode *, sector_t, struct buffer_head *, int); | 131 | extern int ext2_get_block(struct inode *, sector_t, struct buffer_head *, int); |
@@ -139,8 +138,7 @@ int __ext2_write_begin(struct file *file, struct address_space *mapping, | |||
139 | struct page **pagep, void **fsdata); | 138 | struct page **pagep, void **fsdata); |
140 | 139 | ||
141 | /* ioctl.c */ | 140 | /* ioctl.c */ |
142 | extern int ext2_ioctl (struct inode *, struct file *, unsigned int, | 141 | extern long ext2_ioctl(struct file *, unsigned int, unsigned long); |
143 | unsigned long); | ||
144 | extern long ext2_compat_ioctl(struct file *, unsigned int, unsigned long); | 142 | extern long ext2_compat_ioctl(struct file *, unsigned int, unsigned long); |
145 | 143 | ||
146 | /* namei.c */ | 144 | /* namei.c */ |
diff --git a/fs/ext2/file.c b/fs/ext2/file.c index c051798459a1..5f2fa9c36293 100644 --- a/fs/ext2/file.c +++ b/fs/ext2/file.c | |||
@@ -48,7 +48,7 @@ const struct file_operations ext2_file_operations = { | |||
48 | .write = do_sync_write, | 48 | .write = do_sync_write, |
49 | .aio_read = generic_file_aio_read, | 49 | .aio_read = generic_file_aio_read, |
50 | .aio_write = generic_file_aio_write, | 50 | .aio_write = generic_file_aio_write, |
51 | .ioctl = ext2_ioctl, | 51 | .unlocked_ioctl = ext2_ioctl, |
52 | #ifdef CONFIG_COMPAT | 52 | #ifdef CONFIG_COMPAT |
53 | .compat_ioctl = ext2_compat_ioctl, | 53 | .compat_ioctl = ext2_compat_ioctl, |
54 | #endif | 54 | #endif |
@@ -65,7 +65,7 @@ const struct file_operations ext2_xip_file_operations = { | |||
65 | .llseek = generic_file_llseek, | 65 | .llseek = generic_file_llseek, |
66 | .read = xip_file_read, | 66 | .read = xip_file_read, |
67 | .write = xip_file_write, | 67 | .write = xip_file_write, |
68 | .ioctl = ext2_ioctl, | 68 | .unlocked_ioctl = ext2_ioctl, |
69 | #ifdef CONFIG_COMPAT | 69 | #ifdef CONFIG_COMPAT |
70 | .compat_ioctl = ext2_compat_ioctl, | 70 | .compat_ioctl = ext2_compat_ioctl, |
71 | #endif | 71 | #endif |
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index b1ab32ab5a77..c62006805427 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c | |||
@@ -286,15 +286,12 @@ static unsigned long ext2_find_near(struct inode *inode, Indirect *ind) | |||
286 | * ext2_find_goal - find a prefered place for allocation. | 286 | * ext2_find_goal - find a prefered place for allocation. |
287 | * @inode: owner | 287 | * @inode: owner |
288 | * @block: block we want | 288 | * @block: block we want |
289 | * @chain: chain of indirect blocks | ||
290 | * @partial: pointer to the last triple within a chain | 289 | * @partial: pointer to the last triple within a chain |
291 | * | 290 | * |
292 | * Returns preferred place for a block (the goal). | 291 | * Returns preferred place for a block (the goal). |
293 | */ | 292 | */ |
294 | 293 | ||
295 | static inline int ext2_find_goal(struct inode *inode, | 294 | static inline int ext2_find_goal(struct inode *inode, long block, |
296 | long block, | ||
297 | Indirect chain[4], | ||
298 | Indirect *partial) | 295 | Indirect *partial) |
299 | { | 296 | { |
300 | struct ext2_block_alloc_info *block_i; | 297 | struct ext2_block_alloc_info *block_i; |
@@ -569,7 +566,6 @@ static void ext2_splice_branch(struct inode *inode, | |||
569 | * | 566 | * |
570 | * `handle' can be NULL if create == 0. | 567 | * `handle' can be NULL if create == 0. |
571 | * | 568 | * |
572 | * The BKL may not be held on entry here. Be sure to take it early. | ||
573 | * return > 0, # of blocks mapped or allocated. | 569 | * return > 0, # of blocks mapped or allocated. |
574 | * return = 0, if plain lookup failed. | 570 | * return = 0, if plain lookup failed. |
575 | * return < 0, error case. | 571 | * return < 0, error case. |
@@ -639,7 +635,7 @@ reread: | |||
639 | if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info)) | 635 | if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info)) |
640 | ext2_init_block_alloc_info(inode); | 636 | ext2_init_block_alloc_info(inode); |
641 | 637 | ||
642 | goal = ext2_find_goal(inode, iblock, chain, partial); | 638 | goal = ext2_find_goal(inode, iblock, partial); |
643 | 639 | ||
644 | /* the number of blocks need to allocate for [d,t]indirect blocks */ | 640 | /* the number of blocks need to allocate for [d,t]indirect blocks */ |
645 | indirect_blks = (chain + depth) - partial - 1; | 641 | indirect_blks = (chain + depth) - partial - 1; |
@@ -1185,22 +1181,33 @@ void ext2_get_inode_flags(struct ext2_inode_info *ei) | |||
1185 | ei->i_flags |= EXT2_DIRSYNC_FL; | 1181 | ei->i_flags |= EXT2_DIRSYNC_FL; |
1186 | } | 1182 | } |
1187 | 1183 | ||
1188 | void ext2_read_inode (struct inode * inode) | 1184 | struct inode *ext2_iget (struct super_block *sb, unsigned long ino) |
1189 | { | 1185 | { |
1190 | struct ext2_inode_info *ei = EXT2_I(inode); | 1186 | struct ext2_inode_info *ei; |
1191 | ino_t ino = inode->i_ino; | ||
1192 | struct buffer_head * bh; | 1187 | struct buffer_head * bh; |
1193 | struct ext2_inode * raw_inode = ext2_get_inode(inode->i_sb, ino, &bh); | 1188 | struct ext2_inode *raw_inode; |
1189 | struct inode *inode; | ||
1190 | long ret = -EIO; | ||
1194 | int n; | 1191 | int n; |
1195 | 1192 | ||
1193 | inode = iget_locked(sb, ino); | ||
1194 | if (!inode) | ||
1195 | return ERR_PTR(-ENOMEM); | ||
1196 | if (!(inode->i_state & I_NEW)) | ||
1197 | return inode; | ||
1198 | |||
1199 | ei = EXT2_I(inode); | ||
1196 | #ifdef CONFIG_EXT2_FS_POSIX_ACL | 1200 | #ifdef CONFIG_EXT2_FS_POSIX_ACL |
1197 | ei->i_acl = EXT2_ACL_NOT_CACHED; | 1201 | ei->i_acl = EXT2_ACL_NOT_CACHED; |
1198 | ei->i_default_acl = EXT2_ACL_NOT_CACHED; | 1202 | ei->i_default_acl = EXT2_ACL_NOT_CACHED; |
1199 | #endif | 1203 | #endif |
1200 | ei->i_block_alloc_info = NULL; | 1204 | ei->i_block_alloc_info = NULL; |
1201 | 1205 | ||
1202 | if (IS_ERR(raw_inode)) | 1206 | raw_inode = ext2_get_inode(inode->i_sb, ino, &bh); |
1207 | if (IS_ERR(raw_inode)) { | ||
1208 | ret = PTR_ERR(raw_inode); | ||
1203 | goto bad_inode; | 1209 | goto bad_inode; |
1210 | } | ||
1204 | 1211 | ||
1205 | inode->i_mode = le16_to_cpu(raw_inode->i_mode); | 1212 | inode->i_mode = le16_to_cpu(raw_inode->i_mode); |
1206 | inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); | 1213 | inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); |
@@ -1224,6 +1231,7 @@ void ext2_read_inode (struct inode * inode) | |||
1224 | if (inode->i_nlink == 0 && (inode->i_mode == 0 || ei->i_dtime)) { | 1231 | if (inode->i_nlink == 0 && (inode->i_mode == 0 || ei->i_dtime)) { |
1225 | /* this inode is deleted */ | 1232 | /* this inode is deleted */ |
1226 | brelse (bh); | 1233 | brelse (bh); |
1234 | ret = -ESTALE; | ||
1227 | goto bad_inode; | 1235 | goto bad_inode; |
1228 | } | 1236 | } |
1229 | inode->i_blocks = le32_to_cpu(raw_inode->i_blocks); | 1237 | inode->i_blocks = le32_to_cpu(raw_inode->i_blocks); |
@@ -1290,11 +1298,12 @@ void ext2_read_inode (struct inode * inode) | |||
1290 | } | 1298 | } |
1291 | brelse (bh); | 1299 | brelse (bh); |
1292 | ext2_set_inode_flags(inode); | 1300 | ext2_set_inode_flags(inode); |
1293 | return; | 1301 | unlock_new_inode(inode); |
1302 | return inode; | ||
1294 | 1303 | ||
1295 | bad_inode: | 1304 | bad_inode: |
1296 | make_bad_inode(inode); | 1305 | iget_failed(inode); |
1297 | return; | 1306 | return ERR_PTR(ret); |
1298 | } | 1307 | } |
1299 | 1308 | ||
1300 | static int ext2_update_inode(struct inode * inode, int do_sync) | 1309 | static int ext2_update_inode(struct inode * inode, int do_sync) |
diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c index 320b2cb3d4d2..b8ea11fee5c6 100644 --- a/fs/ext2/ioctl.c +++ b/fs/ext2/ioctl.c | |||
@@ -17,9 +17,9 @@ | |||
17 | #include <asm/uaccess.h> | 17 | #include <asm/uaccess.h> |
18 | 18 | ||
19 | 19 | ||
20 | int ext2_ioctl (struct inode * inode, struct file * filp, unsigned int cmd, | 20 | long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) |
21 | unsigned long arg) | ||
22 | { | 21 | { |
22 | struct inode *inode = filp->f_dentry->d_inode; | ||
23 | struct ext2_inode_info *ei = EXT2_I(inode); | 23 | struct ext2_inode_info *ei = EXT2_I(inode); |
24 | unsigned int flags; | 24 | unsigned int flags; |
25 | unsigned short rsv_window_size; | 25 | unsigned short rsv_window_size; |
@@ -141,9 +141,6 @@ int ext2_ioctl (struct inode * inode, struct file * filp, unsigned int cmd, | |||
141 | #ifdef CONFIG_COMPAT | 141 | #ifdef CONFIG_COMPAT |
142 | long ext2_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | 142 | long ext2_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
143 | { | 143 | { |
144 | struct inode *inode = file->f_path.dentry->d_inode; | ||
145 | int ret; | ||
146 | |||
147 | /* These are just misnamed, they actually get/put from/to user an int */ | 144 | /* These are just misnamed, they actually get/put from/to user an int */ |
148 | switch (cmd) { | 145 | switch (cmd) { |
149 | case EXT2_IOC32_GETFLAGS: | 146 | case EXT2_IOC32_GETFLAGS: |
@@ -161,9 +158,6 @@ long ext2_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
161 | default: | 158 | default: |
162 | return -ENOIOCTLCMD; | 159 | return -ENOIOCTLCMD; |
163 | } | 160 | } |
164 | lock_kernel(); | 161 | return ext2_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); |
165 | ret = ext2_ioctl(inode, file, cmd, (unsigned long) compat_ptr(arg)); | ||
166 | unlock_kernel(); | ||
167 | return ret; | ||
168 | } | 162 | } |
169 | #endif | 163 | #endif |
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index e69beed839ac..80c97fd8c571 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c | |||
@@ -63,9 +63,9 @@ static struct dentry *ext2_lookup(struct inode * dir, struct dentry *dentry, str | |||
63 | ino = ext2_inode_by_name(dir, dentry); | 63 | ino = ext2_inode_by_name(dir, dentry); |
64 | inode = NULL; | 64 | inode = NULL; |
65 | if (ino) { | 65 | if (ino) { |
66 | inode = iget(dir->i_sb, ino); | 66 | inode = ext2_iget(dir->i_sb, ino); |
67 | if (!inode) | 67 | if (IS_ERR(inode)) |
68 | return ERR_PTR(-EACCES); | 68 | return ERR_CAST(inode); |
69 | } | 69 | } |
70 | return d_splice_alias(inode, dentry); | 70 | return d_splice_alias(inode, dentry); |
71 | } | 71 | } |
@@ -83,10 +83,10 @@ struct dentry *ext2_get_parent(struct dentry *child) | |||
83 | ino = ext2_inode_by_name(child->d_inode, &dotdot); | 83 | ino = ext2_inode_by_name(child->d_inode, &dotdot); |
84 | if (!ino) | 84 | if (!ino) |
85 | return ERR_PTR(-ENOENT); | 85 | return ERR_PTR(-ENOENT); |
86 | inode = iget(child->d_inode->i_sb, ino); | 86 | inode = ext2_iget(child->d_inode->i_sb, ino); |
87 | 87 | ||
88 | if (!inode) | 88 | if (IS_ERR(inode)) |
89 | return ERR_PTR(-EACCES); | 89 | return ERR_CAST(inode); |
90 | parent = d_alloc_anon(inode); | 90 | parent = d_alloc_anon(inode); |
91 | if (!parent) { | 91 | if (!parent) { |
92 | iput(inode); | 92 | iput(inode); |
diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 6abaf75163f0..088b011bb97e 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c | |||
@@ -234,16 +234,16 @@ static int ext2_show_options(struct seq_file *seq, struct vfsmount *vfs) | |||
234 | le16_to_cpu(es->s_def_resgid) != EXT2_DEF_RESGID) { | 234 | le16_to_cpu(es->s_def_resgid) != EXT2_DEF_RESGID) { |
235 | seq_printf(seq, ",resgid=%u", sbi->s_resgid); | 235 | seq_printf(seq, ",resgid=%u", sbi->s_resgid); |
236 | } | 236 | } |
237 | if (test_opt(sb, ERRORS_CONT)) { | 237 | if (test_opt(sb, ERRORS_RO)) { |
238 | int def_errors = le16_to_cpu(es->s_errors); | 238 | int def_errors = le16_to_cpu(es->s_errors); |
239 | 239 | ||
240 | if (def_errors == EXT2_ERRORS_PANIC || | 240 | if (def_errors == EXT2_ERRORS_PANIC || |
241 | def_errors == EXT2_ERRORS_RO) { | 241 | def_errors == EXT2_ERRORS_CONTINUE) { |
242 | seq_puts(seq, ",errors=continue"); | 242 | seq_puts(seq, ",errors=remount-ro"); |
243 | } | 243 | } |
244 | } | 244 | } |
245 | if (test_opt(sb, ERRORS_RO)) | 245 | if (test_opt(sb, ERRORS_CONT)) |
246 | seq_puts(seq, ",errors=remount-ro"); | 246 | seq_puts(seq, ",errors=continue"); |
247 | if (test_opt(sb, ERRORS_PANIC)) | 247 | if (test_opt(sb, ERRORS_PANIC)) |
248 | seq_puts(seq, ",errors=panic"); | 248 | seq_puts(seq, ",errors=panic"); |
249 | if (test_opt(sb, NO_UID32)) | 249 | if (test_opt(sb, NO_UID32)) |
@@ -285,6 +285,9 @@ static int ext2_show_options(struct seq_file *seq, struct vfsmount *vfs) | |||
285 | seq_puts(seq, ",xip"); | 285 | seq_puts(seq, ",xip"); |
286 | #endif | 286 | #endif |
287 | 287 | ||
288 | if (!test_opt(sb, RESERVATION)) | ||
289 | seq_puts(seq, ",noreservation"); | ||
290 | |||
288 | return 0; | 291 | return 0; |
289 | } | 292 | } |
290 | 293 | ||
@@ -296,7 +299,6 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type, const char *da | |||
296 | static const struct super_operations ext2_sops = { | 299 | static const struct super_operations ext2_sops = { |
297 | .alloc_inode = ext2_alloc_inode, | 300 | .alloc_inode = ext2_alloc_inode, |
298 | .destroy_inode = ext2_destroy_inode, | 301 | .destroy_inode = ext2_destroy_inode, |
299 | .read_inode = ext2_read_inode, | ||
300 | .write_inode = ext2_write_inode, | 302 | .write_inode = ext2_write_inode, |
301 | .delete_inode = ext2_delete_inode, | 303 | .delete_inode = ext2_delete_inode, |
302 | .put_super = ext2_put_super, | 304 | .put_super = ext2_put_super, |
@@ -326,11 +328,10 @@ static struct inode *ext2_nfs_get_inode(struct super_block *sb, | |||
326 | * it might be "neater" to call ext2_get_inode first and check | 328 | * it might be "neater" to call ext2_get_inode first and check |
327 | * if the inode is valid..... | 329 | * if the inode is valid..... |
328 | */ | 330 | */ |
329 | inode = iget(sb, ino); | 331 | inode = ext2_iget(sb, ino); |
330 | if (inode == NULL) | 332 | if (IS_ERR(inode)) |
331 | return ERR_PTR(-ENOMEM); | 333 | return ERR_CAST(inode); |
332 | if (is_bad_inode(inode) || | 334 | if (generation && inode->i_generation != generation) { |
333 | (generation && inode->i_generation != generation)) { | ||
334 | /* we didn't find the right inode.. */ | 335 | /* we didn't find the right inode.. */ |
335 | iput(inode); | 336 | iput(inode); |
336 | return ERR_PTR(-ESTALE); | 337 | return ERR_PTR(-ESTALE); |
@@ -617,27 +618,24 @@ static int ext2_setup_super (struct super_block * sb, | |||
617 | return res; | 618 | return res; |
618 | } | 619 | } |
619 | 620 | ||
620 | static int ext2_check_descriptors (struct super_block * sb) | 621 | static int ext2_check_descriptors(struct super_block *sb) |
621 | { | 622 | { |
622 | int i; | 623 | int i; |
623 | int desc_block = 0; | ||
624 | struct ext2_sb_info *sbi = EXT2_SB(sb); | 624 | struct ext2_sb_info *sbi = EXT2_SB(sb); |
625 | unsigned long first_block = le32_to_cpu(sbi->s_es->s_first_data_block); | 625 | unsigned long first_block = le32_to_cpu(sbi->s_es->s_first_data_block); |
626 | unsigned long last_block; | 626 | unsigned long last_block; |
627 | struct ext2_group_desc * gdp = NULL; | ||
628 | 627 | ||
629 | ext2_debug ("Checking group descriptors"); | 628 | ext2_debug ("Checking group descriptors"); |
630 | 629 | ||
631 | for (i = 0; i < sbi->s_groups_count; i++) | 630 | for (i = 0; i < sbi->s_groups_count; i++) { |
632 | { | 631 | struct ext2_group_desc *gdp = ext2_get_group_desc(sb, i, NULL); |
632 | |||
633 | if (i == sbi->s_groups_count - 1) | 633 | if (i == sbi->s_groups_count - 1) |
634 | last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1; | 634 | last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1; |
635 | else | 635 | else |
636 | last_block = first_block + | 636 | last_block = first_block + |
637 | (EXT2_BLOCKS_PER_GROUP(sb) - 1); | 637 | (EXT2_BLOCKS_PER_GROUP(sb) - 1); |
638 | 638 | ||
639 | if ((i % EXT2_DESC_PER_BLOCK(sb)) == 0) | ||
640 | gdp = (struct ext2_group_desc *) sbi->s_group_desc[desc_block++]->b_data; | ||
641 | if (le32_to_cpu(gdp->bg_block_bitmap) < first_block || | 639 | if (le32_to_cpu(gdp->bg_block_bitmap) < first_block || |
642 | le32_to_cpu(gdp->bg_block_bitmap) > last_block) | 640 | le32_to_cpu(gdp->bg_block_bitmap) > last_block) |
643 | { | 641 | { |
@@ -667,7 +665,6 @@ static int ext2_check_descriptors (struct super_block * sb) | |||
667 | return 0; | 665 | return 0; |
668 | } | 666 | } |
669 | first_block += EXT2_BLOCKS_PER_GROUP(sb); | 667 | first_block += EXT2_BLOCKS_PER_GROUP(sb); |
670 | gdp++; | ||
671 | } | 668 | } |
672 | return 1; | 669 | return 1; |
673 | } | 670 | } |
@@ -750,6 +747,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) | |||
750 | unsigned long logic_sb_block; | 747 | unsigned long logic_sb_block; |
751 | unsigned long offset = 0; | 748 | unsigned long offset = 0; |
752 | unsigned long def_mount_opts; | 749 | unsigned long def_mount_opts; |
750 | long ret = -EINVAL; | ||
753 | int blocksize = BLOCK_SIZE; | 751 | int blocksize = BLOCK_SIZE; |
754 | int db_count; | 752 | int db_count; |
755 | int i, j; | 753 | int i, j; |
@@ -820,10 +818,10 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) | |||
820 | 818 | ||
821 | if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC) | 819 | if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC) |
822 | set_opt(sbi->s_mount_opt, ERRORS_PANIC); | 820 | set_opt(sbi->s_mount_opt, ERRORS_PANIC); |
823 | else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_RO) | 821 | else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_CONTINUE) |
824 | set_opt(sbi->s_mount_opt, ERRORS_RO); | ||
825 | else | ||
826 | set_opt(sbi->s_mount_opt, ERRORS_CONT); | 822 | set_opt(sbi->s_mount_opt, ERRORS_CONT); |
823 | else | ||
824 | set_opt(sbi->s_mount_opt, ERRORS_RO); | ||
827 | 825 | ||
828 | sbi->s_resuid = le16_to_cpu(es->s_def_resuid); | 826 | sbi->s_resuid = le16_to_cpu(es->s_def_resuid); |
829 | sbi->s_resgid = le16_to_cpu(es->s_def_resgid); | 827 | sbi->s_resgid = le16_to_cpu(es->s_def_resgid); |
@@ -868,8 +866,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) | |||
868 | 866 | ||
869 | blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size); | 867 | blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size); |
870 | 868 | ||
871 | if ((ext2_use_xip(sb)) && ((blocksize != PAGE_SIZE) || | 869 | if (ext2_use_xip(sb) && blocksize != PAGE_SIZE) { |
872 | (sb->s_blocksize != blocksize))) { | ||
873 | if (!silent) | 870 | if (!silent) |
874 | printk("XIP: Unsupported blocksize\n"); | 871 | printk("XIP: Unsupported blocksize\n"); |
875 | goto failed_mount; | 872 | goto failed_mount; |
@@ -1046,19 +1043,24 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) | |||
1046 | sb->s_op = &ext2_sops; | 1043 | sb->s_op = &ext2_sops; |
1047 | sb->s_export_op = &ext2_export_ops; | 1044 | sb->s_export_op = &ext2_export_ops; |
1048 | sb->s_xattr = ext2_xattr_handlers; | 1045 | sb->s_xattr = ext2_xattr_handlers; |
1049 | root = iget(sb, EXT2_ROOT_INO); | 1046 | root = ext2_iget(sb, EXT2_ROOT_INO); |
1050 | sb->s_root = d_alloc_root(root); | 1047 | if (IS_ERR(root)) { |
1051 | if (!sb->s_root) { | 1048 | ret = PTR_ERR(root); |
1052 | iput(root); | ||
1053 | printk(KERN_ERR "EXT2-fs: get root inode failed\n"); | ||
1054 | goto failed_mount3; | 1049 | goto failed_mount3; |
1055 | } | 1050 | } |
1056 | if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { | 1051 | if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { |
1057 | dput(sb->s_root); | 1052 | iput(root); |
1058 | sb->s_root = NULL; | ||
1059 | printk(KERN_ERR "EXT2-fs: corrupt root inode, run e2fsck\n"); | 1053 | printk(KERN_ERR "EXT2-fs: corrupt root inode, run e2fsck\n"); |
1060 | goto failed_mount3; | 1054 | goto failed_mount3; |
1061 | } | 1055 | } |
1056 | |||
1057 | sb->s_root = d_alloc_root(root); | ||
1058 | if (!sb->s_root) { | ||
1059 | iput(root); | ||
1060 | printk(KERN_ERR "EXT2-fs: get root inode failed\n"); | ||
1061 | ret = -ENOMEM; | ||
1062 | goto failed_mount3; | ||
1063 | } | ||
1062 | if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL)) | 1064 | if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL)) |
1063 | ext2_warning(sb, __FUNCTION__, | 1065 | ext2_warning(sb, __FUNCTION__, |
1064 | "mounting ext3 filesystem as ext2"); | 1066 | "mounting ext3 filesystem as ext2"); |
@@ -1085,7 +1087,7 @@ failed_mount: | |||
1085 | failed_sbi: | 1087 | failed_sbi: |
1086 | sb->s_fs_info = NULL; | 1088 | sb->s_fs_info = NULL; |
1087 | kfree(sbi); | 1089 | kfree(sbi); |
1088 | return -EINVAL; | 1090 | return ret; |
1089 | } | 1091 | } |
1090 | 1092 | ||
1091 | static void ext2_commit_super (struct super_block * sb, | 1093 | static void ext2_commit_super (struct super_block * sb, |
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c index a8ba7e831278..da0cb2c0e437 100644 --- a/fs/ext3/balloc.c +++ b/fs/ext3/balloc.c | |||
@@ -80,13 +80,57 @@ struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb, | |||
80 | return desc + offset; | 80 | return desc + offset; |
81 | } | 81 | } |
82 | 82 | ||
83 | static int ext3_valid_block_bitmap(struct super_block *sb, | ||
84 | struct ext3_group_desc *desc, | ||
85 | unsigned int block_group, | ||
86 | struct buffer_head *bh) | ||
87 | { | ||
88 | ext3_grpblk_t offset; | ||
89 | ext3_grpblk_t next_zero_bit; | ||
90 | ext3_fsblk_t bitmap_blk; | ||
91 | ext3_fsblk_t group_first_block; | ||
92 | |||
93 | group_first_block = ext3_group_first_block_no(sb, block_group); | ||
94 | |||
95 | /* check whether block bitmap block number is set */ | ||
96 | bitmap_blk = le32_to_cpu(desc->bg_block_bitmap); | ||
97 | offset = bitmap_blk - group_first_block; | ||
98 | if (!ext3_test_bit(offset, bh->b_data)) | ||
99 | /* bad block bitmap */ | ||
100 | goto err_out; | ||
101 | |||
102 | /* check whether the inode bitmap block number is set */ | ||
103 | bitmap_blk = le32_to_cpu(desc->bg_inode_bitmap); | ||
104 | offset = bitmap_blk - group_first_block; | ||
105 | if (!ext3_test_bit(offset, bh->b_data)) | ||
106 | /* bad block bitmap */ | ||
107 | goto err_out; | ||
108 | |||
109 | /* check whether the inode table block number is set */ | ||
110 | bitmap_blk = le32_to_cpu(desc->bg_inode_table); | ||
111 | offset = bitmap_blk - group_first_block; | ||
112 | next_zero_bit = ext3_find_next_zero_bit(bh->b_data, | ||
113 | offset + EXT3_SB(sb)->s_itb_per_group, | ||
114 | offset); | ||
115 | if (next_zero_bit >= offset + EXT3_SB(sb)->s_itb_per_group) | ||
116 | /* good bitmap for inode tables */ | ||
117 | return 1; | ||
118 | |||
119 | err_out: | ||
120 | ext3_error(sb, __FUNCTION__, | ||
121 | "Invalid block bitmap - " | ||
122 | "block_group = %d, block = %lu", | ||
123 | block_group, bitmap_blk); | ||
124 | return 0; | ||
125 | } | ||
126 | |||
83 | /** | 127 | /** |
84 | * read_block_bitmap() | 128 | * read_block_bitmap() |
85 | * @sb: super block | 129 | * @sb: super block |
86 | * @block_group: given block group | 130 | * @block_group: given block group |
87 | * | 131 | * |
88 | * Read the bitmap for a given block_group, reading into the specified | 132 | * Read the bitmap for a given block_group,and validate the |
89 | * slot in the superblock's bitmap cache. | 133 | * bits for block/inode/inode tables are set in the bitmaps |
90 | * | 134 | * |
91 | * Return buffer_head on success or NULL in case of failure. | 135 | * Return buffer_head on success or NULL in case of failure. |
92 | */ | 136 | */ |
@@ -95,17 +139,35 @@ read_block_bitmap(struct super_block *sb, unsigned int block_group) | |||
95 | { | 139 | { |
96 | struct ext3_group_desc * desc; | 140 | struct ext3_group_desc * desc; |
97 | struct buffer_head * bh = NULL; | 141 | struct buffer_head * bh = NULL; |
142 | ext3_fsblk_t bitmap_blk; | ||
98 | 143 | ||
99 | desc = ext3_get_group_desc (sb, block_group, NULL); | 144 | desc = ext3_get_group_desc(sb, block_group, NULL); |
100 | if (!desc) | 145 | if (!desc) |
101 | goto error_out; | 146 | return NULL; |
102 | bh = sb_bread(sb, le32_to_cpu(desc->bg_block_bitmap)); | 147 | bitmap_blk = le32_to_cpu(desc->bg_block_bitmap); |
103 | if (!bh) | 148 | bh = sb_getblk(sb, bitmap_blk); |
104 | ext3_error (sb, "read_block_bitmap", | 149 | if (unlikely(!bh)) { |
150 | ext3_error(sb, __FUNCTION__, | ||
105 | "Cannot read block bitmap - " | 151 | "Cannot read block bitmap - " |
106 | "block_group = %d, block_bitmap = %u", | 152 | "block_group = %d, block_bitmap = %u", |
107 | block_group, le32_to_cpu(desc->bg_block_bitmap)); | 153 | block_group, le32_to_cpu(desc->bg_block_bitmap)); |
108 | error_out: | 154 | return NULL; |
155 | } | ||
156 | if (likely(bh_uptodate_or_lock(bh))) | ||
157 | return bh; | ||
158 | |||
159 | if (bh_submit_read(bh) < 0) { | ||
160 | brelse(bh); | ||
161 | ext3_error(sb, __FUNCTION__, | ||
162 | "Cannot read block bitmap - " | ||
163 | "block_group = %d, block_bitmap = %u", | ||
164 | block_group, le32_to_cpu(desc->bg_block_bitmap)); | ||
165 | return NULL; | ||
166 | } | ||
167 | if (!ext3_valid_block_bitmap(sb, desc, block_group, bh)) { | ||
168 | brelse(bh); | ||
169 | return NULL; | ||
170 | } | ||
109 | return bh; | 171 | return bh; |
110 | } | 172 | } |
111 | /* | 173 | /* |
@@ -468,11 +530,13 @@ do_more: | |||
468 | in_range (block, le32_to_cpu(desc->bg_inode_table), | 530 | in_range (block, le32_to_cpu(desc->bg_inode_table), |
469 | sbi->s_itb_per_group) || | 531 | sbi->s_itb_per_group) || |
470 | in_range (block + count - 1, le32_to_cpu(desc->bg_inode_table), | 532 | in_range (block + count - 1, le32_to_cpu(desc->bg_inode_table), |
471 | sbi->s_itb_per_group)) | 533 | sbi->s_itb_per_group)) { |
472 | ext3_error (sb, "ext3_free_blocks", | 534 | ext3_error (sb, "ext3_free_blocks", |
473 | "Freeing blocks in system zones - " | 535 | "Freeing blocks in system zones - " |
474 | "Block = "E3FSBLK", count = %lu", | 536 | "Block = "E3FSBLK", count = %lu", |
475 | block, count); | 537 | block, count); |
538 | goto error_return; | ||
539 | } | ||
476 | 540 | ||
477 | /* | 541 | /* |
478 | * We are about to start releasing blocks in the bitmap, | 542 | * We are about to start releasing blocks in the bitmap, |
@@ -566,9 +630,7 @@ do_more: | |||
566 | jbd_unlock_bh_state(bitmap_bh); | 630 | jbd_unlock_bh_state(bitmap_bh); |
567 | 631 | ||
568 | spin_lock(sb_bgl_lock(sbi, block_group)); | 632 | spin_lock(sb_bgl_lock(sbi, block_group)); |
569 | desc->bg_free_blocks_count = | 633 | le16_add_cpu(&desc->bg_free_blocks_count, group_freed); |
570 | cpu_to_le16(le16_to_cpu(desc->bg_free_blocks_count) + | ||
571 | group_freed); | ||
572 | spin_unlock(sb_bgl_lock(sbi, block_group)); | 634 | spin_unlock(sb_bgl_lock(sbi, block_group)); |
573 | percpu_counter_add(&sbi->s_freeblocks_counter, count); | 635 | percpu_counter_add(&sbi->s_freeblocks_counter, count); |
574 | 636 | ||
@@ -1508,7 +1570,7 @@ retry_alloc: | |||
1508 | 1570 | ||
1509 | /* | 1571 | /* |
1510 | * Now search the rest of the groups. We assume that | 1572 | * Now search the rest of the groups. We assume that |
1511 | * i and gdp correctly point to the last group visited. | 1573 | * group_no and gdp correctly point to the last group visited. |
1512 | */ | 1574 | */ |
1513 | for (bgi = 0; bgi < ngroups; bgi++) { | 1575 | for (bgi = 0; bgi < ngroups; bgi++) { |
1514 | group_no++; | 1576 | group_no++; |
@@ -1575,11 +1637,13 @@ allocated: | |||
1575 | in_range(ret_block, le32_to_cpu(gdp->bg_inode_table), | 1637 | in_range(ret_block, le32_to_cpu(gdp->bg_inode_table), |
1576 | EXT3_SB(sb)->s_itb_per_group) || | 1638 | EXT3_SB(sb)->s_itb_per_group) || |
1577 | in_range(ret_block + num - 1, le32_to_cpu(gdp->bg_inode_table), | 1639 | in_range(ret_block + num - 1, le32_to_cpu(gdp->bg_inode_table), |
1578 | EXT3_SB(sb)->s_itb_per_group)) | 1640 | EXT3_SB(sb)->s_itb_per_group)) { |
1579 | ext3_error(sb, "ext3_new_block", | 1641 | ext3_error(sb, "ext3_new_block", |
1580 | "Allocating block in system zone - " | 1642 | "Allocating block in system zone - " |
1581 | "blocks from "E3FSBLK", length %lu", | 1643 | "blocks from "E3FSBLK", length %lu", |
1582 | ret_block, num); | 1644 | ret_block, num); |
1645 | goto out; | ||
1646 | } | ||
1583 | 1647 | ||
1584 | performed_allocation = 1; | 1648 | performed_allocation = 1; |
1585 | 1649 | ||
@@ -1630,8 +1694,7 @@ allocated: | |||
1630 | ret_block, goal_hits, goal_attempts); | 1694 | ret_block, goal_hits, goal_attempts); |
1631 | 1695 | ||
1632 | spin_lock(sb_bgl_lock(sbi, group_no)); | 1696 | spin_lock(sb_bgl_lock(sbi, group_no)); |
1633 | gdp->bg_free_blocks_count = | 1697 | le16_add_cpu(&gdp->bg_free_blocks_count, -num); |
1634 | cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)-num); | ||
1635 | spin_unlock(sb_bgl_lock(sbi, group_no)); | 1698 | spin_unlock(sb_bgl_lock(sbi, group_no)); |
1636 | percpu_counter_sub(&sbi->s_freeblocks_counter, num); | 1699 | percpu_counter_sub(&sbi->s_freeblocks_counter, num); |
1637 | 1700 | ||
@@ -1782,11 +1845,7 @@ static unsigned long ext3_bg_num_gdb_meta(struct super_block *sb, int group) | |||
1782 | 1845 | ||
1783 | static unsigned long ext3_bg_num_gdb_nometa(struct super_block *sb, int group) | 1846 | static unsigned long ext3_bg_num_gdb_nometa(struct super_block *sb, int group) |
1784 | { | 1847 | { |
1785 | if (EXT3_HAS_RO_COMPAT_FEATURE(sb, | 1848 | return ext3_bg_has_super(sb, group) ? EXT3_SB(sb)->s_gdb_count : 0; |
1786 | EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER) && | ||
1787 | !ext3_group_sparse(group)) | ||
1788 | return 0; | ||
1789 | return EXT3_SB(sb)->s_gdb_count; | ||
1790 | } | 1849 | } |
1791 | 1850 | ||
1792 | /** | 1851 | /** |
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c index 1bc8cd89c51d..4f4020c54683 100644 --- a/fs/ext3/ialloc.c +++ b/fs/ext3/ialloc.c | |||
@@ -164,11 +164,9 @@ void ext3_free_inode (handle_t *handle, struct inode * inode) | |||
164 | 164 | ||
165 | if (gdp) { | 165 | if (gdp) { |
166 | spin_lock(sb_bgl_lock(sbi, block_group)); | 166 | spin_lock(sb_bgl_lock(sbi, block_group)); |
167 | gdp->bg_free_inodes_count = cpu_to_le16( | 167 | le16_add_cpu(&gdp->bg_free_inodes_count, 1); |
168 | le16_to_cpu(gdp->bg_free_inodes_count) + 1); | ||
169 | if (is_directory) | 168 | if (is_directory) |
170 | gdp->bg_used_dirs_count = cpu_to_le16( | 169 | le16_add_cpu(&gdp->bg_used_dirs_count, -1); |
171 | le16_to_cpu(gdp->bg_used_dirs_count) - 1); | ||
172 | spin_unlock(sb_bgl_lock(sbi, block_group)); | 170 | spin_unlock(sb_bgl_lock(sbi, block_group)); |
173 | percpu_counter_inc(&sbi->s_freeinodes_counter); | 171 | percpu_counter_inc(&sbi->s_freeinodes_counter); |
174 | if (is_directory) | 172 | if (is_directory) |
@@ -527,11 +525,9 @@ got: | |||
527 | err = ext3_journal_get_write_access(handle, bh2); | 525 | err = ext3_journal_get_write_access(handle, bh2); |
528 | if (err) goto fail; | 526 | if (err) goto fail; |
529 | spin_lock(sb_bgl_lock(sbi, group)); | 527 | spin_lock(sb_bgl_lock(sbi, group)); |
530 | gdp->bg_free_inodes_count = | 528 | le16_add_cpu(&gdp->bg_free_inodes_count, -1); |
531 | cpu_to_le16(le16_to_cpu(gdp->bg_free_inodes_count) - 1); | ||
532 | if (S_ISDIR(mode)) { | 529 | if (S_ISDIR(mode)) { |
533 | gdp->bg_used_dirs_count = | 530 | le16_add_cpu(&gdp->bg_used_dirs_count, 1); |
534 | cpu_to_le16(le16_to_cpu(gdp->bg_used_dirs_count) + 1); | ||
535 | } | 531 | } |
536 | spin_unlock(sb_bgl_lock(sbi, group)); | 532 | spin_unlock(sb_bgl_lock(sbi, group)); |
537 | BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata"); | 533 | BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata"); |
@@ -642,14 +638,15 @@ struct inode *ext3_orphan_get(struct super_block *sb, unsigned long ino) | |||
642 | unsigned long max_ino = le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count); | 638 | unsigned long max_ino = le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count); |
643 | unsigned long block_group; | 639 | unsigned long block_group; |
644 | int bit; | 640 | int bit; |
645 | struct buffer_head *bitmap_bh = NULL; | 641 | struct buffer_head *bitmap_bh; |
646 | struct inode *inode = NULL; | 642 | struct inode *inode = NULL; |
643 | long err = -EIO; | ||
647 | 644 | ||
648 | /* Error cases - e2fsck has already cleaned up for us */ | 645 | /* Error cases - e2fsck has already cleaned up for us */ |
649 | if (ino > max_ino) { | 646 | if (ino > max_ino) { |
650 | ext3_warning(sb, __FUNCTION__, | 647 | ext3_warning(sb, __FUNCTION__, |
651 | "bad orphan ino %lu! e2fsck was run?", ino); | 648 | "bad orphan ino %lu! e2fsck was run?", ino); |
652 | goto out; | 649 | goto error; |
653 | } | 650 | } |
654 | 651 | ||
655 | block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb); | 652 | block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb); |
@@ -658,38 +655,49 @@ struct inode *ext3_orphan_get(struct super_block *sb, unsigned long ino) | |||
658 | if (!bitmap_bh) { | 655 | if (!bitmap_bh) { |
659 | ext3_warning(sb, __FUNCTION__, | 656 | ext3_warning(sb, __FUNCTION__, |
660 | "inode bitmap error for orphan %lu", ino); | 657 | "inode bitmap error for orphan %lu", ino); |
661 | goto out; | 658 | goto error; |
662 | } | 659 | } |
663 | 660 | ||
664 | /* Having the inode bit set should be a 100% indicator that this | 661 | /* Having the inode bit set should be a 100% indicator that this |
665 | * is a valid orphan (no e2fsck run on fs). Orphans also include | 662 | * is a valid orphan (no e2fsck run on fs). Orphans also include |
666 | * inodes that were being truncated, so we can't check i_nlink==0. | 663 | * inodes that were being truncated, so we can't check i_nlink==0. |
667 | */ | 664 | */ |
668 | if (!ext3_test_bit(bit, bitmap_bh->b_data) || | 665 | if (!ext3_test_bit(bit, bitmap_bh->b_data)) |
669 | !(inode = iget(sb, ino)) || is_bad_inode(inode) || | 666 | goto bad_orphan; |
670 | NEXT_ORPHAN(inode) > max_ino) { | 667 | |
671 | ext3_warning(sb, __FUNCTION__, | 668 | inode = ext3_iget(sb, ino); |
672 | "bad orphan inode %lu! e2fsck was run?", ino); | 669 | if (IS_ERR(inode)) |
673 | printk(KERN_NOTICE "ext3_test_bit(bit=%d, block=%llu) = %d\n", | 670 | goto iget_failed; |
674 | bit, (unsigned long long)bitmap_bh->b_blocknr, | 671 | |
675 | ext3_test_bit(bit, bitmap_bh->b_data)); | 672 | if (NEXT_ORPHAN(inode) > max_ino) |
676 | printk(KERN_NOTICE "inode=%p\n", inode); | 673 | goto bad_orphan; |
677 | if (inode) { | 674 | brelse(bitmap_bh); |
678 | printk(KERN_NOTICE "is_bad_inode(inode)=%d\n", | 675 | return inode; |
679 | is_bad_inode(inode)); | 676 | |
680 | printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n", | 677 | iget_failed: |
681 | NEXT_ORPHAN(inode)); | 678 | err = PTR_ERR(inode); |
682 | printk(KERN_NOTICE "max_ino=%lu\n", max_ino); | 679 | inode = NULL; |
683 | } | 680 | bad_orphan: |
681 | ext3_warning(sb, __FUNCTION__, | ||
682 | "bad orphan inode %lu! e2fsck was run?", ino); | ||
683 | printk(KERN_NOTICE "ext3_test_bit(bit=%d, block=%llu) = %d\n", | ||
684 | bit, (unsigned long long)bitmap_bh->b_blocknr, | ||
685 | ext3_test_bit(bit, bitmap_bh->b_data)); | ||
686 | printk(KERN_NOTICE "inode=%p\n", inode); | ||
687 | if (inode) { | ||
688 | printk(KERN_NOTICE "is_bad_inode(inode)=%d\n", | ||
689 | is_bad_inode(inode)); | ||
690 | printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n", | ||
691 | NEXT_ORPHAN(inode)); | ||
692 | printk(KERN_NOTICE "max_ino=%lu\n", max_ino); | ||
684 | /* Avoid freeing blocks if we got a bad deleted inode */ | 693 | /* Avoid freeing blocks if we got a bad deleted inode */ |
685 | if (inode && inode->i_nlink == 0) | 694 | if (inode->i_nlink == 0) |
686 | inode->i_blocks = 0; | 695 | inode->i_blocks = 0; |
687 | iput(inode); | 696 | iput(inode); |
688 | inode = NULL; | ||
689 | } | 697 | } |
690 | out: | ||
691 | brelse(bitmap_bh); | 698 | brelse(bitmap_bh); |
692 | return inode; | 699 | error: |
700 | return ERR_PTR(err); | ||
693 | } | 701 | } |
694 | 702 | ||
695 | unsigned long ext3_count_free_inodes (struct super_block * sb) | 703 | unsigned long ext3_count_free_inodes (struct super_block * sb) |
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 077535439288..eb95670a27eb 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c | |||
@@ -439,16 +439,14 @@ static ext3_fsblk_t ext3_find_near(struct inode *inode, Indirect *ind) | |||
439 | * ext3_find_goal - find a prefered place for allocation. | 439 | * ext3_find_goal - find a prefered place for allocation. |
440 | * @inode: owner | 440 | * @inode: owner |
441 | * @block: block we want | 441 | * @block: block we want |
442 | * @chain: chain of indirect blocks | ||
443 | * @partial: pointer to the last triple within a chain | 442 | * @partial: pointer to the last triple within a chain |
444 | * @goal: place to store the result. | ||
445 | * | 443 | * |
446 | * Normally this function find the prefered place for block allocation, | 444 | * Normally this function find the prefered place for block allocation, |
447 | * stores it in *@goal and returns zero. | 445 | * returns it. |
448 | */ | 446 | */ |
449 | 447 | ||
450 | static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block, | 448 | static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block, |
451 | Indirect chain[4], Indirect *partial) | 449 | Indirect *partial) |
452 | { | 450 | { |
453 | struct ext3_block_alloc_info *block_i; | 451 | struct ext3_block_alloc_info *block_i; |
454 | 452 | ||
@@ -884,7 +882,7 @@ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, | |||
884 | if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info)) | 882 | if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info)) |
885 | ext3_init_block_alloc_info(inode); | 883 | ext3_init_block_alloc_info(inode); |
886 | 884 | ||
887 | goal = ext3_find_goal(inode, iblock, chain, partial); | 885 | goal = ext3_find_goal(inode, iblock, partial); |
888 | 886 | ||
889 | /* the number of blocks need to allocate for [d,t]indirect blocks */ | 887 | /* the number of blocks need to allocate for [d,t]indirect blocks */ |
890 | indirect_blks = (chain + depth) - partial - 1; | 888 | indirect_blks = (chain + depth) - partial - 1; |
@@ -941,55 +939,45 @@ out: | |||
941 | return err; | 939 | return err; |
942 | } | 940 | } |
943 | 941 | ||
944 | #define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32) | 942 | /* Maximum number of blocks we map for direct IO at once. */ |
943 | #define DIO_MAX_BLOCKS 4096 | ||
944 | /* | ||
945 | * Number of credits we need for writing DIO_MAX_BLOCKS: | ||
946 | * We need sb + group descriptor + bitmap + inode -> 4 | ||
947 | * For B blocks with A block pointers per block we need: | ||
948 | * 1 (triple ind.) + (B/A/A + 2) (doubly ind.) + (B/A + 2) (indirect). | ||
949 | * If we plug in 4096 for B and 256 for A (for 1KB block size), we get 25. | ||
950 | */ | ||
951 | #define DIO_CREDITS 25 | ||
945 | 952 | ||
946 | static int ext3_get_block(struct inode *inode, sector_t iblock, | 953 | static int ext3_get_block(struct inode *inode, sector_t iblock, |
947 | struct buffer_head *bh_result, int create) | 954 | struct buffer_head *bh_result, int create) |
948 | { | 955 | { |
949 | handle_t *handle = ext3_journal_current_handle(); | 956 | handle_t *handle = ext3_journal_current_handle(); |
950 | int ret = 0; | 957 | int ret = 0, started = 0; |
951 | unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; | 958 | unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; |
952 | 959 | ||
953 | if (!create) | 960 | if (create && !handle) { /* Direct IO write... */ |
954 | goto get_block; /* A read */ | 961 | if (max_blocks > DIO_MAX_BLOCKS) |
955 | 962 | max_blocks = DIO_MAX_BLOCKS; | |
956 | if (max_blocks == 1) | 963 | handle = ext3_journal_start(inode, DIO_CREDITS + |
957 | goto get_block; /* A single block get */ | 964 | 2 * EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb)); |
958 | 965 | if (IS_ERR(handle)) { | |
959 | if (handle->h_transaction->t_state == T_LOCKED) { | ||
960 | /* | ||
961 | * Huge direct-io writes can hold off commits for long | ||
962 | * periods of time. Let this commit run. | ||
963 | */ | ||
964 | ext3_journal_stop(handle); | ||
965 | handle = ext3_journal_start(inode, DIO_CREDITS); | ||
966 | if (IS_ERR(handle)) | ||
967 | ret = PTR_ERR(handle); | 966 | ret = PTR_ERR(handle); |
968 | goto get_block; | 967 | goto out; |
969 | } | ||
970 | |||
971 | if (handle->h_buffer_credits <= EXT3_RESERVE_TRANS_BLOCKS) { | ||
972 | /* | ||
973 | * Getting low on buffer credits... | ||
974 | */ | ||
975 | ret = ext3_journal_extend(handle, DIO_CREDITS); | ||
976 | if (ret > 0) { | ||
977 | /* | ||
978 | * Couldn't extend the transaction. Start a new one. | ||
979 | */ | ||
980 | ret = ext3_journal_restart(handle, DIO_CREDITS); | ||
981 | } | 968 | } |
969 | started = 1; | ||
982 | } | 970 | } |
983 | 971 | ||
984 | get_block: | 972 | ret = ext3_get_blocks_handle(handle, inode, iblock, |
985 | if (ret == 0) { | ||
986 | ret = ext3_get_blocks_handle(handle, inode, iblock, | ||
987 | max_blocks, bh_result, create, 0); | 973 | max_blocks, bh_result, create, 0); |
988 | if (ret > 0) { | 974 | if (ret > 0) { |
989 | bh_result->b_size = (ret << inode->i_blkbits); | 975 | bh_result->b_size = (ret << inode->i_blkbits); |
990 | ret = 0; | 976 | ret = 0; |
991 | } | ||
992 | } | 977 | } |
978 | if (started) | ||
979 | ext3_journal_stop(handle); | ||
980 | out: | ||
993 | return ret; | 981 | return ret; |
994 | } | 982 | } |
995 | 983 | ||
@@ -1680,7 +1668,8 @@ static int ext3_releasepage(struct page *page, gfp_t wait) | |||
1680 | * if the machine crashes during the write. | 1668 | * if the machine crashes during the write. |
1681 | * | 1669 | * |
1682 | * If the O_DIRECT write is intantiating holes inside i_size and the machine | 1670 | * If the O_DIRECT write is intantiating holes inside i_size and the machine |
1683 | * crashes then stale disk data _may_ be exposed inside the file. | 1671 | * crashes then stale disk data _may_ be exposed inside the file. But current |
1672 | * VFS code falls back into buffered path in that case so we are safe. | ||
1684 | */ | 1673 | */ |
1685 | static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb, | 1674 | static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb, |
1686 | const struct iovec *iov, loff_t offset, | 1675 | const struct iovec *iov, loff_t offset, |
@@ -1689,7 +1678,7 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb, | |||
1689 | struct file *file = iocb->ki_filp; | 1678 | struct file *file = iocb->ki_filp; |
1690 | struct inode *inode = file->f_mapping->host; | 1679 | struct inode *inode = file->f_mapping->host; |
1691 | struct ext3_inode_info *ei = EXT3_I(inode); | 1680 | struct ext3_inode_info *ei = EXT3_I(inode); |
1692 | handle_t *handle = NULL; | 1681 | handle_t *handle; |
1693 | ssize_t ret; | 1682 | ssize_t ret; |
1694 | int orphan = 0; | 1683 | int orphan = 0; |
1695 | size_t count = iov_length(iov, nr_segs); | 1684 | size_t count = iov_length(iov, nr_segs); |
@@ -1697,17 +1686,21 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb, | |||
1697 | if (rw == WRITE) { | 1686 | if (rw == WRITE) { |
1698 | loff_t final_size = offset + count; | 1687 | loff_t final_size = offset + count; |
1699 | 1688 | ||
1700 | handle = ext3_journal_start(inode, DIO_CREDITS); | ||
1701 | if (IS_ERR(handle)) { | ||
1702 | ret = PTR_ERR(handle); | ||
1703 | goto out; | ||
1704 | } | ||
1705 | if (final_size > inode->i_size) { | 1689 | if (final_size > inode->i_size) { |
1690 | /* Credits for sb + inode write */ | ||
1691 | handle = ext3_journal_start(inode, 2); | ||
1692 | if (IS_ERR(handle)) { | ||
1693 | ret = PTR_ERR(handle); | ||
1694 | goto out; | ||
1695 | } | ||
1706 | ret = ext3_orphan_add(handle, inode); | 1696 | ret = ext3_orphan_add(handle, inode); |
1707 | if (ret) | 1697 | if (ret) { |
1708 | goto out_stop; | 1698 | ext3_journal_stop(handle); |
1699 | goto out; | ||
1700 | } | ||
1709 | orphan = 1; | 1701 | orphan = 1; |
1710 | ei->i_disksize = inode->i_size; | 1702 | ei->i_disksize = inode->i_size; |
1703 | ext3_journal_stop(handle); | ||
1711 | } | 1704 | } |
1712 | } | 1705 | } |
1713 | 1706 | ||
@@ -1715,18 +1708,21 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb, | |||
1715 | offset, nr_segs, | 1708 | offset, nr_segs, |
1716 | ext3_get_block, NULL); | 1709 | ext3_get_block, NULL); |
1717 | 1710 | ||
1718 | /* | 1711 | if (orphan) { |
1719 | * Reacquire the handle: ext3_get_block() can restart the transaction | ||
1720 | */ | ||
1721 | handle = ext3_journal_current_handle(); | ||
1722 | |||
1723 | out_stop: | ||
1724 | if (handle) { | ||
1725 | int err; | 1712 | int err; |
1726 | 1713 | ||
1727 | if (orphan && inode->i_nlink) | 1714 | /* Credits for sb + inode write */ |
1715 | handle = ext3_journal_start(inode, 2); | ||
1716 | if (IS_ERR(handle)) { | ||
1717 | /* This is really bad luck. We've written the data | ||
1718 | * but cannot extend i_size. Bail out and pretend | ||
1719 | * the write failed... */ | ||
1720 | ret = PTR_ERR(handle); | ||
1721 | goto out; | ||
1722 | } | ||
1723 | if (inode->i_nlink) | ||
1728 | ext3_orphan_del(handle, inode); | 1724 | ext3_orphan_del(handle, inode); |
1729 | if (orphan && ret > 0) { | 1725 | if (ret > 0) { |
1730 | loff_t end = offset + ret; | 1726 | loff_t end = offset + ret; |
1731 | if (end > inode->i_size) { | 1727 | if (end > inode->i_size) { |
1732 | ei->i_disksize = end; | 1728 | ei->i_disksize = end; |
@@ -2658,21 +2654,31 @@ void ext3_get_inode_flags(struct ext3_inode_info *ei) | |||
2658 | ei->i_flags |= EXT3_DIRSYNC_FL; | 2654 | ei->i_flags |= EXT3_DIRSYNC_FL; |
2659 | } | 2655 | } |
2660 | 2656 | ||
2661 | void ext3_read_inode(struct inode * inode) | 2657 | struct inode *ext3_iget(struct super_block *sb, unsigned long ino) |
2662 | { | 2658 | { |
2663 | struct ext3_iloc iloc; | 2659 | struct ext3_iloc iloc; |
2664 | struct ext3_inode *raw_inode; | 2660 | struct ext3_inode *raw_inode; |
2665 | struct ext3_inode_info *ei = EXT3_I(inode); | 2661 | struct ext3_inode_info *ei; |
2666 | struct buffer_head *bh; | 2662 | struct buffer_head *bh; |
2663 | struct inode *inode; | ||
2664 | long ret; | ||
2667 | int block; | 2665 | int block; |
2668 | 2666 | ||
2667 | inode = iget_locked(sb, ino); | ||
2668 | if (!inode) | ||
2669 | return ERR_PTR(-ENOMEM); | ||
2670 | if (!(inode->i_state & I_NEW)) | ||
2671 | return inode; | ||
2672 | |||
2673 | ei = EXT3_I(inode); | ||
2669 | #ifdef CONFIG_EXT3_FS_POSIX_ACL | 2674 | #ifdef CONFIG_EXT3_FS_POSIX_ACL |
2670 | ei->i_acl = EXT3_ACL_NOT_CACHED; | 2675 | ei->i_acl = EXT3_ACL_NOT_CACHED; |
2671 | ei->i_default_acl = EXT3_ACL_NOT_CACHED; | 2676 | ei->i_default_acl = EXT3_ACL_NOT_CACHED; |
2672 | #endif | 2677 | #endif |
2673 | ei->i_block_alloc_info = NULL; | 2678 | ei->i_block_alloc_info = NULL; |
2674 | 2679 | ||
2675 | if (__ext3_get_inode_loc(inode, &iloc, 0)) | 2680 | ret = __ext3_get_inode_loc(inode, &iloc, 0); |
2681 | if (ret < 0) | ||
2676 | goto bad_inode; | 2682 | goto bad_inode; |
2677 | bh = iloc.bh; | 2683 | bh = iloc.bh; |
2678 | raw_inode = ext3_raw_inode(&iloc); | 2684 | raw_inode = ext3_raw_inode(&iloc); |
@@ -2703,6 +2709,7 @@ void ext3_read_inode(struct inode * inode) | |||
2703 | !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) { | 2709 | !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) { |
2704 | /* this inode is deleted */ | 2710 | /* this inode is deleted */ |
2705 | brelse (bh); | 2711 | brelse (bh); |
2712 | ret = -ESTALE; | ||
2706 | goto bad_inode; | 2713 | goto bad_inode; |
2707 | } | 2714 | } |
2708 | /* The only unlinked inodes we let through here have | 2715 | /* The only unlinked inodes we let through here have |
@@ -2746,6 +2753,7 @@ void ext3_read_inode(struct inode * inode) | |||
2746 | if (EXT3_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > | 2753 | if (EXT3_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > |
2747 | EXT3_INODE_SIZE(inode->i_sb)) { | 2754 | EXT3_INODE_SIZE(inode->i_sb)) { |
2748 | brelse (bh); | 2755 | brelse (bh); |
2756 | ret = -EIO; | ||
2749 | goto bad_inode; | 2757 | goto bad_inode; |
2750 | } | 2758 | } |
2751 | if (ei->i_extra_isize == 0) { | 2759 | if (ei->i_extra_isize == 0) { |
@@ -2787,11 +2795,12 @@ void ext3_read_inode(struct inode * inode) | |||
2787 | } | 2795 | } |
2788 | brelse (iloc.bh); | 2796 | brelse (iloc.bh); |
2789 | ext3_set_inode_flags(inode); | 2797 | ext3_set_inode_flags(inode); |
2790 | return; | 2798 | unlock_new_inode(inode); |
2799 | return inode; | ||
2791 | 2800 | ||
2792 | bad_inode: | 2801 | bad_inode: |
2793 | make_bad_inode(inode); | 2802 | iget_failed(inode); |
2794 | return; | 2803 | return ERR_PTR(ret); |
2795 | } | 2804 | } |
2796 | 2805 | ||
2797 | /* | 2806 | /* |
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index 4ab6f76e63d0..dec3e0d88ab1 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c | |||
@@ -860,14 +860,10 @@ static struct buffer_head * ext3_find_entry (struct dentry *dentry, | |||
860 | int nblocks, i, err; | 860 | int nblocks, i, err; |
861 | struct inode *dir = dentry->d_parent->d_inode; | 861 | struct inode *dir = dentry->d_parent->d_inode; |
862 | int namelen; | 862 | int namelen; |
863 | const u8 *name; | ||
864 | unsigned blocksize; | ||
865 | 863 | ||
866 | *res_dir = NULL; | 864 | *res_dir = NULL; |
867 | sb = dir->i_sb; | 865 | sb = dir->i_sb; |
868 | blocksize = sb->s_blocksize; | ||
869 | namelen = dentry->d_name.len; | 866 | namelen = dentry->d_name.len; |
870 | name = dentry->d_name.name; | ||
871 | if (namelen > EXT3_NAME_LEN) | 867 | if (namelen > EXT3_NAME_LEN) |
872 | return NULL; | 868 | return NULL; |
873 | if (is_dx(dir)) { | 869 | if (is_dx(dir)) { |
@@ -1041,17 +1037,11 @@ static struct dentry *ext3_lookup(struct inode * dir, struct dentry *dentry, str | |||
1041 | if (!ext3_valid_inum(dir->i_sb, ino)) { | 1037 | if (!ext3_valid_inum(dir->i_sb, ino)) { |
1042 | ext3_error(dir->i_sb, "ext3_lookup", | 1038 | ext3_error(dir->i_sb, "ext3_lookup", |
1043 | "bad inode number: %lu", ino); | 1039 | "bad inode number: %lu", ino); |
1044 | inode = NULL; | 1040 | return ERR_PTR(-EIO); |
1045 | } else | ||
1046 | inode = iget(dir->i_sb, ino); | ||
1047 | |||
1048 | if (!inode) | ||
1049 | return ERR_PTR(-EACCES); | ||
1050 | |||
1051 | if (is_bad_inode(inode)) { | ||
1052 | iput(inode); | ||
1053 | return ERR_PTR(-ENOENT); | ||
1054 | } | 1041 | } |
1042 | inode = ext3_iget(dir->i_sb, ino); | ||
1043 | if (IS_ERR(inode)) | ||
1044 | return ERR_CAST(inode); | ||
1055 | } | 1045 | } |
1056 | return d_splice_alias(inode, dentry); | 1046 | return d_splice_alias(inode, dentry); |
1057 | } | 1047 | } |
@@ -1080,18 +1070,13 @@ struct dentry *ext3_get_parent(struct dentry *child) | |||
1080 | if (!ext3_valid_inum(child->d_inode->i_sb, ino)) { | 1070 | if (!ext3_valid_inum(child->d_inode->i_sb, ino)) { |
1081 | ext3_error(child->d_inode->i_sb, "ext3_get_parent", | 1071 | ext3_error(child->d_inode->i_sb, "ext3_get_parent", |
1082 | "bad inode number: %lu", ino); | 1072 | "bad inode number: %lu", ino); |
1083 | inode = NULL; | 1073 | return ERR_PTR(-EIO); |
1084 | } else | ||
1085 | inode = iget(child->d_inode->i_sb, ino); | ||
1086 | |||
1087 | if (!inode) | ||
1088 | return ERR_PTR(-EACCES); | ||
1089 | |||
1090 | if (is_bad_inode(inode)) { | ||
1091 | iput(inode); | ||
1092 | return ERR_PTR(-ENOENT); | ||
1093 | } | 1074 | } |
1094 | 1075 | ||
1076 | inode = ext3_iget(child->d_inode->i_sb, ino); | ||
1077 | if (IS_ERR(inode)) | ||
1078 | return ERR_CAST(inode); | ||
1079 | |||
1095 | parent = d_alloc_anon(inode); | 1080 | parent = d_alloc_anon(inode); |
1096 | if (!parent) { | 1081 | if (!parent) { |
1097 | iput(inode); | 1082 | iput(inode); |
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c index 44de1453c301..9397d779c43d 100644 --- a/fs/ext3/resize.c +++ b/fs/ext3/resize.c | |||
@@ -518,8 +518,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode, | |||
518 | EXT3_SB(sb)->s_gdb_count++; | 518 | EXT3_SB(sb)->s_gdb_count++; |
519 | kfree(o_group_desc); | 519 | kfree(o_group_desc); |
520 | 520 | ||
521 | es->s_reserved_gdt_blocks = | 521 | le16_add_cpu(&es->s_reserved_gdt_blocks, -1); |
522 | cpu_to_le16(le16_to_cpu(es->s_reserved_gdt_blocks) - 1); | ||
523 | ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh); | 522 | ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh); |
524 | 523 | ||
525 | return 0; | 524 | return 0; |
@@ -795,12 +794,11 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input) | |||
795 | "No reserved GDT blocks, can't resize"); | 794 | "No reserved GDT blocks, can't resize"); |
796 | return -EPERM; | 795 | return -EPERM; |
797 | } | 796 | } |
798 | inode = iget(sb, EXT3_RESIZE_INO); | 797 | inode = ext3_iget(sb, EXT3_RESIZE_INO); |
799 | if (!inode || is_bad_inode(inode)) { | 798 | if (IS_ERR(inode)) { |
800 | ext3_warning(sb, __FUNCTION__, | 799 | ext3_warning(sb, __FUNCTION__, |
801 | "Error opening resize inode"); | 800 | "Error opening resize inode"); |
802 | iput(inode); | 801 | return PTR_ERR(inode); |
803 | return -ENOENT; | ||
804 | } | 802 | } |
805 | } | 803 | } |
806 | 804 | ||
@@ -891,10 +889,8 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input) | |||
891 | * blocks/inodes before the group is live won't actually let us | 889 | * blocks/inodes before the group is live won't actually let us |
892 | * allocate the new space yet. | 890 | * allocate the new space yet. |
893 | */ | 891 | */ |
894 | es->s_blocks_count = cpu_to_le32(le32_to_cpu(es->s_blocks_count) + | 892 | le32_add_cpu(&es->s_blocks_count, input->blocks_count); |
895 | input->blocks_count); | 893 | le32_add_cpu(&es->s_inodes_count, EXT3_INODES_PER_GROUP(sb)); |
896 | es->s_inodes_count = cpu_to_le32(le32_to_cpu(es->s_inodes_count) + | ||
897 | EXT3_INODES_PER_GROUP(sb)); | ||
898 | 894 | ||
899 | /* | 895 | /* |
900 | * We need to protect s_groups_count against other CPUs seeing | 896 | * We need to protect s_groups_count against other CPUs seeing |
@@ -927,8 +923,7 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input) | |||
927 | 923 | ||
928 | /* Update the reserved block counts only once the new group is | 924 | /* Update the reserved block counts only once the new group is |
929 | * active. */ | 925 | * active. */ |
930 | es->s_r_blocks_count = cpu_to_le32(le32_to_cpu(es->s_r_blocks_count) + | 926 | le32_add_cpu(&es->s_r_blocks_count, input->reserved_blocks); |
931 | input->reserved_blocks); | ||
932 | 927 | ||
933 | /* Update the free space counts */ | 928 | /* Update the free space counts */ |
934 | percpu_counter_add(&sbi->s_freeblocks_counter, | 929 | percpu_counter_add(&sbi->s_freeblocks_counter, |
diff --git a/fs/ext3/super.c b/fs/ext3/super.c index f3675cc630e9..18769cc32377 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c | |||
@@ -575,16 +575,16 @@ static int ext3_show_options(struct seq_file *seq, struct vfsmount *vfs) | |||
575 | le16_to_cpu(es->s_def_resgid) != EXT3_DEF_RESGID) { | 575 | le16_to_cpu(es->s_def_resgid) != EXT3_DEF_RESGID) { |
576 | seq_printf(seq, ",resgid=%u", sbi->s_resgid); | 576 | seq_printf(seq, ",resgid=%u", sbi->s_resgid); |
577 | } | 577 | } |
578 | if (test_opt(sb, ERRORS_CONT)) { | 578 | if (test_opt(sb, ERRORS_RO)) { |
579 | int def_errors = le16_to_cpu(es->s_errors); | 579 | int def_errors = le16_to_cpu(es->s_errors); |
580 | 580 | ||
581 | if (def_errors == EXT3_ERRORS_PANIC || | 581 | if (def_errors == EXT3_ERRORS_PANIC || |
582 | def_errors == EXT3_ERRORS_RO) { | 582 | def_errors == EXT3_ERRORS_CONTINUE) { |
583 | seq_puts(seq, ",errors=continue"); | 583 | seq_puts(seq, ",errors=remount-ro"); |
584 | } | 584 | } |
585 | } | 585 | } |
586 | if (test_opt(sb, ERRORS_RO)) | 586 | if (test_opt(sb, ERRORS_CONT)) |
587 | seq_puts(seq, ",errors=remount-ro"); | 587 | seq_puts(seq, ",errors=continue"); |
588 | if (test_opt(sb, ERRORS_PANIC)) | 588 | if (test_opt(sb, ERRORS_PANIC)) |
589 | seq_puts(seq, ",errors=panic"); | 589 | seq_puts(seq, ",errors=panic"); |
590 | if (test_opt(sb, NO_UID32)) | 590 | if (test_opt(sb, NO_UID32)) |
@@ -649,11 +649,10 @@ static struct inode *ext3_nfs_get_inode(struct super_block *sb, | |||
649 | * Currently we don't know the generation for parent directory, so | 649 | * Currently we don't know the generation for parent directory, so |
650 | * a generation of 0 means "accept any" | 650 | * a generation of 0 means "accept any" |
651 | */ | 651 | */ |
652 | inode = iget(sb, ino); | 652 | inode = ext3_iget(sb, ino); |
653 | if (inode == NULL) | 653 | if (IS_ERR(inode)) |
654 | return ERR_PTR(-ENOMEM); | 654 | return ERR_CAST(inode); |
655 | if (is_bad_inode(inode) || | 655 | if (generation && inode->i_generation != generation) { |
656 | (generation && inode->i_generation != generation)) { | ||
657 | iput(inode); | 656 | iput(inode); |
658 | return ERR_PTR(-ESTALE); | 657 | return ERR_PTR(-ESTALE); |
659 | } | 658 | } |
@@ -722,7 +721,6 @@ static struct quotactl_ops ext3_qctl_operations = { | |||
722 | static const struct super_operations ext3_sops = { | 721 | static const struct super_operations ext3_sops = { |
723 | .alloc_inode = ext3_alloc_inode, | 722 | .alloc_inode = ext3_alloc_inode, |
724 | .destroy_inode = ext3_destroy_inode, | 723 | .destroy_inode = ext3_destroy_inode, |
725 | .read_inode = ext3_read_inode, | ||
726 | .write_inode = ext3_write_inode, | 724 | .write_inode = ext3_write_inode, |
727 | .dirty_inode = ext3_dirty_inode, | 725 | .dirty_inode = ext3_dirty_inode, |
728 | .delete_inode = ext3_delete_inode, | 726 | .delete_inode = ext3_delete_inode, |
@@ -1224,7 +1222,7 @@ static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es, | |||
1224 | #endif | 1222 | #endif |
1225 | if (!(__s16) le16_to_cpu(es->s_max_mnt_count)) | 1223 | if (!(__s16) le16_to_cpu(es->s_max_mnt_count)) |
1226 | es->s_max_mnt_count = cpu_to_le16(EXT3_DFL_MAX_MNT_COUNT); | 1224 | es->s_max_mnt_count = cpu_to_le16(EXT3_DFL_MAX_MNT_COUNT); |
1227 | es->s_mnt_count=cpu_to_le16(le16_to_cpu(es->s_mnt_count) + 1); | 1225 | le16_add_cpu(&es->s_mnt_count, 1); |
1228 | es->s_mtime = cpu_to_le32(get_seconds()); | 1226 | es->s_mtime = cpu_to_le32(get_seconds()); |
1229 | ext3_update_dynamic_rev(sb); | 1227 | ext3_update_dynamic_rev(sb); |
1230 | EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); | 1228 | EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); |
@@ -1252,28 +1250,24 @@ static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es, | |||
1252 | } | 1250 | } |
1253 | 1251 | ||
1254 | /* Called at mount-time, super-block is locked */ | 1252 | /* Called at mount-time, super-block is locked */ |
1255 | static int ext3_check_descriptors (struct super_block * sb) | 1253 | static int ext3_check_descriptors(struct super_block *sb) |
1256 | { | 1254 | { |
1257 | struct ext3_sb_info *sbi = EXT3_SB(sb); | 1255 | struct ext3_sb_info *sbi = EXT3_SB(sb); |
1258 | ext3_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); | 1256 | ext3_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); |
1259 | ext3_fsblk_t last_block; | 1257 | ext3_fsblk_t last_block; |
1260 | struct ext3_group_desc * gdp = NULL; | ||
1261 | int desc_block = 0; | ||
1262 | int i; | 1258 | int i; |
1263 | 1259 | ||
1264 | ext3_debug ("Checking group descriptors"); | 1260 | ext3_debug ("Checking group descriptors"); |
1265 | 1261 | ||
1266 | for (i = 0; i < sbi->s_groups_count; i++) | 1262 | for (i = 0; i < sbi->s_groups_count; i++) { |
1267 | { | 1263 | struct ext3_group_desc *gdp = ext3_get_group_desc(sb, i, NULL); |
1264 | |||
1268 | if (i == sbi->s_groups_count - 1) | 1265 | if (i == sbi->s_groups_count - 1) |
1269 | last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1; | 1266 | last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1; |
1270 | else | 1267 | else |
1271 | last_block = first_block + | 1268 | last_block = first_block + |
1272 | (EXT3_BLOCKS_PER_GROUP(sb) - 1); | 1269 | (EXT3_BLOCKS_PER_GROUP(sb) - 1); |
1273 | 1270 | ||
1274 | if ((i % EXT3_DESC_PER_BLOCK(sb)) == 0) | ||
1275 | gdp = (struct ext3_group_desc *) | ||
1276 | sbi->s_group_desc[desc_block++]->b_data; | ||
1277 | if (le32_to_cpu(gdp->bg_block_bitmap) < first_block || | 1271 | if (le32_to_cpu(gdp->bg_block_bitmap) < first_block || |
1278 | le32_to_cpu(gdp->bg_block_bitmap) > last_block) | 1272 | le32_to_cpu(gdp->bg_block_bitmap) > last_block) |
1279 | { | 1273 | { |
@@ -1306,7 +1300,6 @@ static int ext3_check_descriptors (struct super_block * sb) | |||
1306 | return 0; | 1300 | return 0; |
1307 | } | 1301 | } |
1308 | first_block += EXT3_BLOCKS_PER_GROUP(sb); | 1302 | first_block += EXT3_BLOCKS_PER_GROUP(sb); |
1309 | gdp++; | ||
1310 | } | 1303 | } |
1311 | 1304 | ||
1312 | sbi->s_es->s_free_blocks_count=cpu_to_le32(ext3_count_free_blocks(sb)); | 1305 | sbi->s_es->s_free_blocks_count=cpu_to_le32(ext3_count_free_blocks(sb)); |
@@ -1383,8 +1376,8 @@ static void ext3_orphan_cleanup (struct super_block * sb, | |||
1383 | while (es->s_last_orphan) { | 1376 | while (es->s_last_orphan) { |
1384 | struct inode *inode; | 1377 | struct inode *inode; |
1385 | 1378 | ||
1386 | if (!(inode = | 1379 | inode = ext3_orphan_get(sb, le32_to_cpu(es->s_last_orphan)); |
1387 | ext3_orphan_get(sb, le32_to_cpu(es->s_last_orphan)))) { | 1380 | if (IS_ERR(inode)) { |
1388 | es->s_last_orphan = 0; | 1381 | es->s_last_orphan = 0; |
1389 | break; | 1382 | break; |
1390 | } | 1383 | } |
@@ -1513,6 +1506,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) | |||
1513 | int db_count; | 1506 | int db_count; |
1514 | int i; | 1507 | int i; |
1515 | int needs_recovery; | 1508 | int needs_recovery; |
1509 | int ret = -EINVAL; | ||
1516 | __le32 features; | 1510 | __le32 features; |
1517 | int err; | 1511 | int err; |
1518 | 1512 | ||
@@ -1583,10 +1577,10 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) | |||
1583 | 1577 | ||
1584 | if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_PANIC) | 1578 | if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_PANIC) |
1585 | set_opt(sbi->s_mount_opt, ERRORS_PANIC); | 1579 | set_opt(sbi->s_mount_opt, ERRORS_PANIC); |
1586 | else if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_RO) | 1580 | else if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_CONTINUE) |
1587 | set_opt(sbi->s_mount_opt, ERRORS_RO); | ||
1588 | else | ||
1589 | set_opt(sbi->s_mount_opt, ERRORS_CONT); | 1581 | set_opt(sbi->s_mount_opt, ERRORS_CONT); |
1582 | else | ||
1583 | set_opt(sbi->s_mount_opt, ERRORS_RO); | ||
1590 | 1584 | ||
1591 | sbi->s_resuid = le16_to_cpu(es->s_def_resuid); | 1585 | sbi->s_resuid = le16_to_cpu(es->s_def_resuid); |
1592 | sbi->s_resgid = le16_to_cpu(es->s_def_resgid); | 1586 | sbi->s_resgid = le16_to_cpu(es->s_def_resgid); |
@@ -1882,19 +1876,24 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) | |||
1882 | * so we can safely mount the rest of the filesystem now. | 1876 | * so we can safely mount the rest of the filesystem now. |
1883 | */ | 1877 | */ |
1884 | 1878 | ||
1885 | root = iget(sb, EXT3_ROOT_INO); | 1879 | root = ext3_iget(sb, EXT3_ROOT_INO); |
1886 | sb->s_root = d_alloc_root(root); | 1880 | if (IS_ERR(root)) { |
1887 | if (!sb->s_root) { | ||
1888 | printk(KERN_ERR "EXT3-fs: get root inode failed\n"); | 1881 | printk(KERN_ERR "EXT3-fs: get root inode failed\n"); |
1889 | iput(root); | 1882 | ret = PTR_ERR(root); |
1890 | goto failed_mount4; | 1883 | goto failed_mount4; |
1891 | } | 1884 | } |
1892 | if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { | 1885 | if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { |
1893 | dput(sb->s_root); | 1886 | iput(root); |
1894 | sb->s_root = NULL; | ||
1895 | printk(KERN_ERR "EXT3-fs: corrupt root inode, run e2fsck\n"); | 1887 | printk(KERN_ERR "EXT3-fs: corrupt root inode, run e2fsck\n"); |
1896 | goto failed_mount4; | 1888 | goto failed_mount4; |
1897 | } | 1889 | } |
1890 | sb->s_root = d_alloc_root(root); | ||
1891 | if (!sb->s_root) { | ||
1892 | printk(KERN_ERR "EXT3-fs: get root dentry failed\n"); | ||
1893 | iput(root); | ||
1894 | ret = -ENOMEM; | ||
1895 | goto failed_mount4; | ||
1896 | } | ||
1898 | 1897 | ||
1899 | ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY); | 1898 | ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY); |
1900 | /* | 1899 | /* |
@@ -1946,7 +1945,7 @@ out_fail: | |||
1946 | sb->s_fs_info = NULL; | 1945 | sb->s_fs_info = NULL; |
1947 | kfree(sbi); | 1946 | kfree(sbi); |
1948 | lock_kernel(); | 1947 | lock_kernel(); |
1949 | return -EINVAL; | 1948 | return ret; |
1950 | } | 1949 | } |
1951 | 1950 | ||
1952 | /* | 1951 | /* |
@@ -1982,8 +1981,8 @@ static journal_t *ext3_get_journal(struct super_block *sb, | |||
1982 | * things happen if we iget() an unused inode, as the subsequent | 1981 | * things happen if we iget() an unused inode, as the subsequent |
1983 | * iput() will try to delete it. */ | 1982 | * iput() will try to delete it. */ |
1984 | 1983 | ||
1985 | journal_inode = iget(sb, journal_inum); | 1984 | journal_inode = ext3_iget(sb, journal_inum); |
1986 | if (!journal_inode) { | 1985 | if (IS_ERR(journal_inode)) { |
1987 | printk(KERN_ERR "EXT3-fs: no journal found.\n"); | 1986 | printk(KERN_ERR "EXT3-fs: no journal found.\n"); |
1988 | return NULL; | 1987 | return NULL; |
1989 | } | 1988 | } |
@@ -1996,7 +1995,7 @@ static journal_t *ext3_get_journal(struct super_block *sb, | |||
1996 | 1995 | ||
1997 | jbd_debug(2, "Journal inode found at %p: %Ld bytes\n", | 1996 | jbd_debug(2, "Journal inode found at %p: %Ld bytes\n", |
1998 | journal_inode, journal_inode->i_size); | 1997 | journal_inode, journal_inode->i_size); |
1999 | if (is_bad_inode(journal_inode) || !S_ISREG(journal_inode->i_mode)) { | 1998 | if (!S_ISREG(journal_inode->i_mode)) { |
2000 | printk(KERN_ERR "EXT3-fs: invalid journal inode.\n"); | 1999 | printk(KERN_ERR "EXT3-fs: invalid journal inode.\n"); |
2001 | iput(journal_inode); | 2000 | iput(journal_inode); |
2002 | return NULL; | 2001 | return NULL; |
@@ -2759,16 +2758,16 @@ static int ext3_quota_on(struct super_block *sb, int type, int format_id, | |||
2759 | if (err) | 2758 | if (err) |
2760 | return err; | 2759 | return err; |
2761 | /* Quotafile not on the same filesystem? */ | 2760 | /* Quotafile not on the same filesystem? */ |
2762 | if (nd.mnt->mnt_sb != sb) { | 2761 | if (nd.path.mnt->mnt_sb != sb) { |
2763 | path_release(&nd); | 2762 | path_put(&nd.path); |
2764 | return -EXDEV; | 2763 | return -EXDEV; |
2765 | } | 2764 | } |
2766 | /* Quotafile not of fs root? */ | 2765 | /* Quotafile not of fs root? */ |
2767 | if (nd.dentry->d_parent->d_inode != sb->s_root->d_inode) | 2766 | if (nd.path.dentry->d_parent->d_inode != sb->s_root->d_inode) |
2768 | printk(KERN_WARNING | 2767 | printk(KERN_WARNING |
2769 | "EXT3-fs: Quota file not on filesystem root. " | 2768 | "EXT3-fs: Quota file not on filesystem root. " |
2770 | "Journalled quota will not work.\n"); | 2769 | "Journalled quota will not work.\n"); |
2771 | path_release(&nd); | 2770 | path_put(&nd.path); |
2772 | return vfs_quota_on(sb, type, format_id, path); | 2771 | return vfs_quota_on(sb, type, format_id, path); |
2773 | } | 2772 | } |
2774 | 2773 | ||
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c index 408373819e34..fb89c299bece 100644 --- a/fs/ext3/xattr.c +++ b/fs/ext3/xattr.c | |||
@@ -492,8 +492,7 @@ ext3_xattr_release_block(handle_t *handle, struct inode *inode, | |||
492 | get_bh(bh); | 492 | get_bh(bh); |
493 | ext3_forget(handle, 1, inode, bh, bh->b_blocknr); | 493 | ext3_forget(handle, 1, inode, bh, bh->b_blocknr); |
494 | } else { | 494 | } else { |
495 | BHDR(bh)->h_refcount = cpu_to_le32( | 495 | le32_add_cpu(&BHDR(bh)->h_refcount, -1); |
496 | le32_to_cpu(BHDR(bh)->h_refcount) - 1); | ||
497 | error = ext3_journal_dirty_metadata(handle, bh); | 496 | error = ext3_journal_dirty_metadata(handle, bh); |
498 | if (IS_SYNC(inode)) | 497 | if (IS_SYNC(inode)) |
499 | handle->h_sync = 1; | 498 | handle->h_sync = 1; |
@@ -780,8 +779,7 @@ inserted: | |||
780 | if (error) | 779 | if (error) |
781 | goto cleanup_dquot; | 780 | goto cleanup_dquot; |
782 | lock_buffer(new_bh); | 781 | lock_buffer(new_bh); |
783 | BHDR(new_bh)->h_refcount = cpu_to_le32(1 + | 782 | le32_add_cpu(&BHDR(new_bh)->h_refcount, 1); |
784 | le32_to_cpu(BHDR(new_bh)->h_refcount)); | ||
785 | ea_bdebug(new_bh, "reusing; refcount now=%d", | 783 | ea_bdebug(new_bh, "reusing; refcount now=%d", |
786 | le32_to_cpu(BHDR(new_bh)->h_refcount)); | 784 | le32_to_cpu(BHDR(new_bh)->h_refcount)); |
787 | unlock_buffer(new_bh); | 785 | unlock_buffer(new_bh); |
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index ac75ea953d83..0737e05ba3dd 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c | |||
@@ -1700,7 +1700,7 @@ retry_alloc: | |||
1700 | 1700 | ||
1701 | /* | 1701 | /* |
1702 | * Now search the rest of the groups. We assume that | 1702 | * Now search the rest of the groups. We assume that |
1703 | * i and gdp correctly point to the last group visited. | 1703 | * group_no and gdp correctly point to the last group visited. |
1704 | */ | 1704 | */ |
1705 | for (bgi = 0; bgi < ngroups; bgi++) { | 1705 | for (bgi = 0; bgi < ngroups; bgi++) { |
1706 | group_no++; | 1706 | group_no++; |
@@ -2011,11 +2011,7 @@ static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb, | |||
2011 | static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb, | 2011 | static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb, |
2012 | ext4_group_t group) | 2012 | ext4_group_t group) |
2013 | { | 2013 | { |
2014 | if (EXT4_HAS_RO_COMPAT_FEATURE(sb, | 2014 | return ext4_bg_has_super(sb, group) ? EXT4_SB(sb)->s_gdb_count : 0; |
2015 | EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) && | ||
2016 | !ext4_group_sparse(group)) | ||
2017 | return 0; | ||
2018 | return EXT4_SB(sb)->s_gdb_count; | ||
2019 | } | 2015 | } |
2020 | 2016 | ||
2021 | /** | 2017 | /** |
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 575b5215c808..da18a74b966a 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c | |||
@@ -782,14 +782,15 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino) | |||
782 | unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count); | 782 | unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count); |
783 | ext4_group_t block_group; | 783 | ext4_group_t block_group; |
784 | int bit; | 784 | int bit; |
785 | struct buffer_head *bitmap_bh = NULL; | 785 | struct buffer_head *bitmap_bh; |
786 | struct inode *inode = NULL; | 786 | struct inode *inode = NULL; |
787 | long err = -EIO; | ||
787 | 788 | ||
788 | /* Error cases - e2fsck has already cleaned up for us */ | 789 | /* Error cases - e2fsck has already cleaned up for us */ |
789 | if (ino > max_ino) { | 790 | if (ino > max_ino) { |
790 | ext4_warning(sb, __FUNCTION__, | 791 | ext4_warning(sb, __FUNCTION__, |
791 | "bad orphan ino %lu! e2fsck was run?", ino); | 792 | "bad orphan ino %lu! e2fsck was run?", ino); |
792 | goto out; | 793 | goto error; |
793 | } | 794 | } |
794 | 795 | ||
795 | block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); | 796 | block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); |
@@ -798,38 +799,49 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino) | |||
798 | if (!bitmap_bh) { | 799 | if (!bitmap_bh) { |
799 | ext4_warning(sb, __FUNCTION__, | 800 | ext4_warning(sb, __FUNCTION__, |
800 | "inode bitmap error for orphan %lu", ino); | 801 | "inode bitmap error for orphan %lu", ino); |
801 | goto out; | 802 | goto error; |
802 | } | 803 | } |
803 | 804 | ||
804 | /* Having the inode bit set should be a 100% indicator that this | 805 | /* Having the inode bit set should be a 100% indicator that this |
805 | * is a valid orphan (no e2fsck run on fs). Orphans also include | 806 | * is a valid orphan (no e2fsck run on fs). Orphans also include |
806 | * inodes that were being truncated, so we can't check i_nlink==0. | 807 | * inodes that were being truncated, so we can't check i_nlink==0. |
807 | */ | 808 | */ |
808 | if (!ext4_test_bit(bit, bitmap_bh->b_data) || | 809 | if (!ext4_test_bit(bit, bitmap_bh->b_data)) |
809 | !(inode = iget(sb, ino)) || is_bad_inode(inode) || | 810 | goto bad_orphan; |
810 | NEXT_ORPHAN(inode) > max_ino) { | 811 | |
811 | ext4_warning(sb, __FUNCTION__, | 812 | inode = ext4_iget(sb, ino); |
812 | "bad orphan inode %lu! e2fsck was run?", ino); | 813 | if (IS_ERR(inode)) |
813 | printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n", | 814 | goto iget_failed; |
814 | bit, (unsigned long long)bitmap_bh->b_blocknr, | 815 | |
815 | ext4_test_bit(bit, bitmap_bh->b_data)); | 816 | if (NEXT_ORPHAN(inode) > max_ino) |
816 | printk(KERN_NOTICE "inode=%p\n", inode); | 817 | goto bad_orphan; |
817 | if (inode) { | 818 | brelse(bitmap_bh); |
818 | printk(KERN_NOTICE "is_bad_inode(inode)=%d\n", | 819 | return inode; |
819 | is_bad_inode(inode)); | 820 | |
820 | printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n", | 821 | iget_failed: |
821 | NEXT_ORPHAN(inode)); | 822 | err = PTR_ERR(inode); |
822 | printk(KERN_NOTICE "max_ino=%lu\n", max_ino); | 823 | inode = NULL; |
823 | } | 824 | bad_orphan: |
825 | ext4_warning(sb, __FUNCTION__, | ||
826 | "bad orphan inode %lu! e2fsck was run?", ino); | ||
827 | printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n", | ||
828 | bit, (unsigned long long)bitmap_bh->b_blocknr, | ||
829 | ext4_test_bit(bit, bitmap_bh->b_data)); | ||
830 | printk(KERN_NOTICE "inode=%p\n", inode); | ||
831 | if (inode) { | ||
832 | printk(KERN_NOTICE "is_bad_inode(inode)=%d\n", | ||
833 | is_bad_inode(inode)); | ||
834 | printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n", | ||
835 | NEXT_ORPHAN(inode)); | ||
836 | printk(KERN_NOTICE "max_ino=%lu\n", max_ino); | ||
824 | /* Avoid freeing blocks if we got a bad deleted inode */ | 837 | /* Avoid freeing blocks if we got a bad deleted inode */ |
825 | if (inode && inode->i_nlink == 0) | 838 | if (inode->i_nlink == 0) |
826 | inode->i_blocks = 0; | 839 | inode->i_blocks = 0; |
827 | iput(inode); | 840 | iput(inode); |
828 | inode = NULL; | ||
829 | } | 841 | } |
830 | out: | ||
831 | brelse(bitmap_bh); | 842 | brelse(bitmap_bh); |
832 | return inode; | 843 | error: |
844 | return ERR_PTR(err); | ||
833 | } | 845 | } |
834 | 846 | ||
835 | unsigned long ext4_count_free_inodes (struct super_block * sb) | 847 | unsigned long ext4_count_free_inodes (struct super_block * sb) |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 05c4145dd27d..7dd9b50d5ebc 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -429,16 +429,13 @@ static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind) | |||
429 | * ext4_find_goal - find a prefered place for allocation. | 429 | * ext4_find_goal - find a prefered place for allocation. |
430 | * @inode: owner | 430 | * @inode: owner |
431 | * @block: block we want | 431 | * @block: block we want |
432 | * @chain: chain of indirect blocks | ||
433 | * @partial: pointer to the last triple within a chain | 432 | * @partial: pointer to the last triple within a chain |
434 | * @goal: place to store the result. | ||
435 | * | 433 | * |
436 | * Normally this function find the prefered place for block allocation, | 434 | * Normally this function find the prefered place for block allocation, |
437 | * stores it in *@goal and returns zero. | 435 | * returns it. |
438 | */ | 436 | */ |
439 | |||
440 | static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block, | 437 | static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block, |
441 | Indirect chain[4], Indirect *partial) | 438 | Indirect *partial) |
442 | { | 439 | { |
443 | struct ext4_block_alloc_info *block_i; | 440 | struct ext4_block_alloc_info *block_i; |
444 | 441 | ||
@@ -839,7 +836,7 @@ int ext4_get_blocks_handle(handle_t *handle, struct inode *inode, | |||
839 | if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info)) | 836 | if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info)) |
840 | ext4_init_block_alloc_info(inode); | 837 | ext4_init_block_alloc_info(inode); |
841 | 838 | ||
842 | goal = ext4_find_goal(inode, iblock, chain, partial); | 839 | goal = ext4_find_goal(inode, iblock, partial); |
843 | 840 | ||
844 | /* the number of blocks need to allocate for [d,t]indirect blocks */ | 841 | /* the number of blocks need to allocate for [d,t]indirect blocks */ |
845 | indirect_blks = (chain + depth) - partial - 1; | 842 | indirect_blks = (chain + depth) - partial - 1; |
@@ -895,7 +892,16 @@ out: | |||
895 | return err; | 892 | return err; |
896 | } | 893 | } |
897 | 894 | ||
898 | #define DIO_CREDITS (EXT4_RESERVE_TRANS_BLOCKS + 32) | 895 | /* Maximum number of blocks we map for direct IO at once. */ |
896 | #define DIO_MAX_BLOCKS 4096 | ||
897 | /* | ||
898 | * Number of credits we need for writing DIO_MAX_BLOCKS: | ||
899 | * We need sb + group descriptor + bitmap + inode -> 4 | ||
900 | * For B blocks with A block pointers per block we need: | ||
901 | * 1 (triple ind.) + (B/A/A + 2) (doubly ind.) + (B/A + 2) (indirect). | ||
902 | * If we plug in 4096 for B and 256 for A (for 1KB block size), we get 25. | ||
903 | */ | ||
904 | #define DIO_CREDITS 25 | ||
899 | 905 | ||
900 | int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block, | 906 | int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block, |
901 | unsigned long max_blocks, struct buffer_head *bh, | 907 | unsigned long max_blocks, struct buffer_head *bh, |
@@ -942,49 +948,31 @@ static int ext4_get_block(struct inode *inode, sector_t iblock, | |||
942 | struct buffer_head *bh_result, int create) | 948 | struct buffer_head *bh_result, int create) |
943 | { | 949 | { |
944 | handle_t *handle = ext4_journal_current_handle(); | 950 | handle_t *handle = ext4_journal_current_handle(); |
945 | int ret = 0; | 951 | int ret = 0, started = 0; |
946 | unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; | 952 | unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; |
947 | 953 | ||
948 | if (!create) | 954 | if (create && !handle) { |
949 | goto get_block; /* A read */ | 955 | /* Direct IO write... */ |
950 | 956 | if (max_blocks > DIO_MAX_BLOCKS) | |
951 | if (max_blocks == 1) | 957 | max_blocks = DIO_MAX_BLOCKS; |
952 | goto get_block; /* A single block get */ | 958 | handle = ext4_journal_start(inode, DIO_CREDITS + |
953 | 959 | 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb)); | |
954 | if (handle->h_transaction->t_state == T_LOCKED) { | 960 | if (IS_ERR(handle)) { |
955 | /* | ||
956 | * Huge direct-io writes can hold off commits for long | ||
957 | * periods of time. Let this commit run. | ||
958 | */ | ||
959 | ext4_journal_stop(handle); | ||
960 | handle = ext4_journal_start(inode, DIO_CREDITS); | ||
961 | if (IS_ERR(handle)) | ||
962 | ret = PTR_ERR(handle); | 961 | ret = PTR_ERR(handle); |
963 | goto get_block; | 962 | goto out; |
964 | } | ||
965 | |||
966 | if (handle->h_buffer_credits <= EXT4_RESERVE_TRANS_BLOCKS) { | ||
967 | /* | ||
968 | * Getting low on buffer credits... | ||
969 | */ | ||
970 | ret = ext4_journal_extend(handle, DIO_CREDITS); | ||
971 | if (ret > 0) { | ||
972 | /* | ||
973 | * Couldn't extend the transaction. Start a new one. | ||
974 | */ | ||
975 | ret = ext4_journal_restart(handle, DIO_CREDITS); | ||
976 | } | 963 | } |
964 | started = 1; | ||
977 | } | 965 | } |
978 | 966 | ||
979 | get_block: | 967 | ret = ext4_get_blocks_wrap(handle, inode, iblock, |
980 | if (ret == 0) { | ||
981 | ret = ext4_get_blocks_wrap(handle, inode, iblock, | ||
982 | max_blocks, bh_result, create, 0); | 968 | max_blocks, bh_result, create, 0); |
983 | if (ret > 0) { | 969 | if (ret > 0) { |
984 | bh_result->b_size = (ret << inode->i_blkbits); | 970 | bh_result->b_size = (ret << inode->i_blkbits); |
985 | ret = 0; | 971 | ret = 0; |
986 | } | ||
987 | } | 972 | } |
973 | if (started) | ||
974 | ext4_journal_stop(handle); | ||
975 | out: | ||
988 | return ret; | 976 | return ret; |
989 | } | 977 | } |
990 | 978 | ||
@@ -1674,7 +1662,8 @@ static int ext4_releasepage(struct page *page, gfp_t wait) | |||
1674 | * if the machine crashes during the write. | 1662 | * if the machine crashes during the write. |
1675 | * | 1663 | * |
1676 | * If the O_DIRECT write is intantiating holes inside i_size and the machine | 1664 | * If the O_DIRECT write is intantiating holes inside i_size and the machine |
1677 | * crashes then stale disk data _may_ be exposed inside the file. | 1665 | * crashes then stale disk data _may_ be exposed inside the file. But current |
1666 | * VFS code falls back into buffered path in that case so we are safe. | ||
1678 | */ | 1667 | */ |
1679 | static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, | 1668 | static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, |
1680 | const struct iovec *iov, loff_t offset, | 1669 | const struct iovec *iov, loff_t offset, |
@@ -1683,7 +1672,7 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, | |||
1683 | struct file *file = iocb->ki_filp; | 1672 | struct file *file = iocb->ki_filp; |
1684 | struct inode *inode = file->f_mapping->host; | 1673 | struct inode *inode = file->f_mapping->host; |
1685 | struct ext4_inode_info *ei = EXT4_I(inode); | 1674 | struct ext4_inode_info *ei = EXT4_I(inode); |
1686 | handle_t *handle = NULL; | 1675 | handle_t *handle; |
1687 | ssize_t ret; | 1676 | ssize_t ret; |
1688 | int orphan = 0; | 1677 | int orphan = 0; |
1689 | size_t count = iov_length(iov, nr_segs); | 1678 | size_t count = iov_length(iov, nr_segs); |
@@ -1691,17 +1680,21 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, | |||
1691 | if (rw == WRITE) { | 1680 | if (rw == WRITE) { |
1692 | loff_t final_size = offset + count; | 1681 | loff_t final_size = offset + count; |
1693 | 1682 | ||
1694 | handle = ext4_journal_start(inode, DIO_CREDITS); | ||
1695 | if (IS_ERR(handle)) { | ||
1696 | ret = PTR_ERR(handle); | ||
1697 | goto out; | ||
1698 | } | ||
1699 | if (final_size > inode->i_size) { | 1683 | if (final_size > inode->i_size) { |
1684 | /* Credits for sb + inode write */ | ||
1685 | handle = ext4_journal_start(inode, 2); | ||
1686 | if (IS_ERR(handle)) { | ||
1687 | ret = PTR_ERR(handle); | ||
1688 | goto out; | ||
1689 | } | ||
1700 | ret = ext4_orphan_add(handle, inode); | 1690 | ret = ext4_orphan_add(handle, inode); |
1701 | if (ret) | 1691 | if (ret) { |
1702 | goto out_stop; | 1692 | ext4_journal_stop(handle); |
1693 | goto out; | ||
1694 | } | ||
1703 | orphan = 1; | 1695 | orphan = 1; |
1704 | ei->i_disksize = inode->i_size; | 1696 | ei->i_disksize = inode->i_size; |
1697 | ext4_journal_stop(handle); | ||
1705 | } | 1698 | } |
1706 | } | 1699 | } |
1707 | 1700 | ||
@@ -1709,18 +1702,21 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, | |||
1709 | offset, nr_segs, | 1702 | offset, nr_segs, |
1710 | ext4_get_block, NULL); | 1703 | ext4_get_block, NULL); |
1711 | 1704 | ||
1712 | /* | 1705 | if (orphan) { |
1713 | * Reacquire the handle: ext4_get_block() can restart the transaction | ||
1714 | */ | ||
1715 | handle = ext4_journal_current_handle(); | ||
1716 | |||
1717 | out_stop: | ||
1718 | if (handle) { | ||
1719 | int err; | 1706 | int err; |
1720 | 1707 | ||
1721 | if (orphan && inode->i_nlink) | 1708 | /* Credits for sb + inode write */ |
1709 | handle = ext4_journal_start(inode, 2); | ||
1710 | if (IS_ERR(handle)) { | ||
1711 | /* This is really bad luck. We've written the data | ||
1712 | * but cannot extend i_size. Bail out and pretend | ||
1713 | * the write failed... */ | ||
1714 | ret = PTR_ERR(handle); | ||
1715 | goto out; | ||
1716 | } | ||
1717 | if (inode->i_nlink) | ||
1722 | ext4_orphan_del(handle, inode); | 1718 | ext4_orphan_del(handle, inode); |
1723 | if (orphan && ret > 0) { | 1719 | if (ret > 0) { |
1724 | loff_t end = offset + ret; | 1720 | loff_t end = offset + ret; |
1725 | if (end > inode->i_size) { | 1721 | if (end > inode->i_size) { |
1726 | ei->i_disksize = end; | 1722 | ei->i_disksize = end; |
@@ -2683,21 +2679,31 @@ static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, | |||
2683 | } | 2679 | } |
2684 | } | 2680 | } |
2685 | 2681 | ||
2686 | void ext4_read_inode(struct inode * inode) | 2682 | struct inode *ext4_iget(struct super_block *sb, unsigned long ino) |
2687 | { | 2683 | { |
2688 | struct ext4_iloc iloc; | 2684 | struct ext4_iloc iloc; |
2689 | struct ext4_inode *raw_inode; | 2685 | struct ext4_inode *raw_inode; |
2690 | struct ext4_inode_info *ei = EXT4_I(inode); | 2686 | struct ext4_inode_info *ei; |
2691 | struct buffer_head *bh; | 2687 | struct buffer_head *bh; |
2688 | struct inode *inode; | ||
2689 | long ret; | ||
2692 | int block; | 2690 | int block; |
2693 | 2691 | ||
2692 | inode = iget_locked(sb, ino); | ||
2693 | if (!inode) | ||
2694 | return ERR_PTR(-ENOMEM); | ||
2695 | if (!(inode->i_state & I_NEW)) | ||
2696 | return inode; | ||
2697 | |||
2698 | ei = EXT4_I(inode); | ||
2694 | #ifdef CONFIG_EXT4DEV_FS_POSIX_ACL | 2699 | #ifdef CONFIG_EXT4DEV_FS_POSIX_ACL |
2695 | ei->i_acl = EXT4_ACL_NOT_CACHED; | 2700 | ei->i_acl = EXT4_ACL_NOT_CACHED; |
2696 | ei->i_default_acl = EXT4_ACL_NOT_CACHED; | 2701 | ei->i_default_acl = EXT4_ACL_NOT_CACHED; |
2697 | #endif | 2702 | #endif |
2698 | ei->i_block_alloc_info = NULL; | 2703 | ei->i_block_alloc_info = NULL; |
2699 | 2704 | ||
2700 | if (__ext4_get_inode_loc(inode, &iloc, 0)) | 2705 | ret = __ext4_get_inode_loc(inode, &iloc, 0); |
2706 | if (ret < 0) | ||
2701 | goto bad_inode; | 2707 | goto bad_inode; |
2702 | bh = iloc.bh; | 2708 | bh = iloc.bh; |
2703 | raw_inode = ext4_raw_inode(&iloc); | 2709 | raw_inode = ext4_raw_inode(&iloc); |
@@ -2723,6 +2729,7 @@ void ext4_read_inode(struct inode * inode) | |||
2723 | !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) { | 2729 | !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) { |
2724 | /* this inode is deleted */ | 2730 | /* this inode is deleted */ |
2725 | brelse (bh); | 2731 | brelse (bh); |
2732 | ret = -ESTALE; | ||
2726 | goto bad_inode; | 2733 | goto bad_inode; |
2727 | } | 2734 | } |
2728 | /* The only unlinked inodes we let through here have | 2735 | /* The only unlinked inodes we let through here have |
@@ -2750,17 +2757,12 @@ void ext4_read_inode(struct inode * inode) | |||
2750 | ei->i_data[block] = raw_inode->i_block[block]; | 2757 | ei->i_data[block] = raw_inode->i_block[block]; |
2751 | INIT_LIST_HEAD(&ei->i_orphan); | 2758 | INIT_LIST_HEAD(&ei->i_orphan); |
2752 | 2759 | ||
2753 | if (inode->i_ino >= EXT4_FIRST_INO(inode->i_sb) + 1 && | 2760 | if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { |
2754 | EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { | ||
2755 | /* | ||
2756 | * When mke2fs creates big inodes it does not zero out | ||
2757 | * the unused bytes above EXT4_GOOD_OLD_INODE_SIZE, | ||
2758 | * so ignore those first few inodes. | ||
2759 | */ | ||
2760 | ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); | 2761 | ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); |
2761 | if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > | 2762 | if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > |
2762 | EXT4_INODE_SIZE(inode->i_sb)) { | 2763 | EXT4_INODE_SIZE(inode->i_sb)) { |
2763 | brelse (bh); | 2764 | brelse (bh); |
2765 | ret = -EIO; | ||
2764 | goto bad_inode; | 2766 | goto bad_inode; |
2765 | } | 2767 | } |
2766 | if (ei->i_extra_isize == 0) { | 2768 | if (ei->i_extra_isize == 0) { |
@@ -2814,11 +2816,12 @@ void ext4_read_inode(struct inode * inode) | |||
2814 | } | 2816 | } |
2815 | brelse (iloc.bh); | 2817 | brelse (iloc.bh); |
2816 | ext4_set_inode_flags(inode); | 2818 | ext4_set_inode_flags(inode); |
2817 | return; | 2819 | unlock_new_inode(inode); |
2820 | return inode; | ||
2818 | 2821 | ||
2819 | bad_inode: | 2822 | bad_inode: |
2820 | make_bad_inode(inode); | 2823 | iget_failed(inode); |
2821 | return; | 2824 | return ERR_PTR(ret); |
2822 | } | 2825 | } |
2823 | 2826 | ||
2824 | static int ext4_inode_blocks_set(handle_t *handle, | 2827 | static int ext4_inode_blocks_set(handle_t *handle, |
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 76e5fedc0a0b..dd0fcfcb35ce 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c | |||
@@ -420,6 +420,7 @@ | |||
420 | #define MB_DEFAULT_GROUP_PREALLOC 512 | 420 | #define MB_DEFAULT_GROUP_PREALLOC 512 |
421 | 421 | ||
422 | static struct kmem_cache *ext4_pspace_cachep; | 422 | static struct kmem_cache *ext4_pspace_cachep; |
423 | static struct kmem_cache *ext4_ac_cachep; | ||
423 | 424 | ||
424 | #ifdef EXT4_BB_MAX_BLOCKS | 425 | #ifdef EXT4_BB_MAX_BLOCKS |
425 | #undef EXT4_BB_MAX_BLOCKS | 426 | #undef EXT4_BB_MAX_BLOCKS |
@@ -680,7 +681,6 @@ static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max) | |||
680 | { | 681 | { |
681 | char *bb; | 682 | char *bb; |
682 | 683 | ||
683 | /* FIXME!! is this needed */ | ||
684 | BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b)); | 684 | BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b)); |
685 | BUG_ON(max == NULL); | 685 | BUG_ON(max == NULL); |
686 | 686 | ||
@@ -964,7 +964,7 @@ static void ext4_mb_generate_buddy(struct super_block *sb, | |||
964 | grp->bb_fragments = fragments; | 964 | grp->bb_fragments = fragments; |
965 | 965 | ||
966 | if (free != grp->bb_free) { | 966 | if (free != grp->bb_free) { |
967 | printk(KERN_DEBUG | 967 | ext4_error(sb, __FUNCTION__, |
968 | "EXT4-fs: group %lu: %u blocks in bitmap, %u in gd\n", | 968 | "EXT4-fs: group %lu: %u blocks in bitmap, %u in gd\n", |
969 | group, free, grp->bb_free); | 969 | group, free, grp->bb_free); |
970 | grp->bb_free = free; | 970 | grp->bb_free = free; |
@@ -1821,13 +1821,24 @@ static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, | |||
1821 | i = ext4_find_next_zero_bit(bitmap, | 1821 | i = ext4_find_next_zero_bit(bitmap, |
1822 | EXT4_BLOCKS_PER_GROUP(sb), i); | 1822 | EXT4_BLOCKS_PER_GROUP(sb), i); |
1823 | if (i >= EXT4_BLOCKS_PER_GROUP(sb)) { | 1823 | if (i >= EXT4_BLOCKS_PER_GROUP(sb)) { |
1824 | BUG_ON(free != 0); | 1824 | /* |
1825 | * IF we corrupt the bitmap we won't find any | ||
1826 | * free blocks even though group info says we | ||
1827 | * we have free blocks | ||
1828 | */ | ||
1829 | ext4_error(sb, __FUNCTION__, "%d free blocks as per " | ||
1830 | "group info. But bitmap says 0\n", | ||
1831 | free); | ||
1825 | break; | 1832 | break; |
1826 | } | 1833 | } |
1827 | 1834 | ||
1828 | mb_find_extent(e4b, 0, i, ac->ac_g_ex.fe_len, &ex); | 1835 | mb_find_extent(e4b, 0, i, ac->ac_g_ex.fe_len, &ex); |
1829 | BUG_ON(ex.fe_len <= 0); | 1836 | BUG_ON(ex.fe_len <= 0); |
1830 | BUG_ON(free < ex.fe_len); | 1837 | if (free < ex.fe_len) { |
1838 | ext4_error(sb, __FUNCTION__, "%d free blocks as per " | ||
1839 | "group info. But got %d blocks\n", | ||
1840 | free, ex.fe_len); | ||
1841 | } | ||
1831 | 1842 | ||
1832 | ext4_mb_measure_extent(ac, &ex, e4b); | 1843 | ext4_mb_measure_extent(ac, &ex, e4b); |
1833 | 1844 | ||
@@ -2959,12 +2970,19 @@ int __init init_ext4_mballoc(void) | |||
2959 | if (ext4_pspace_cachep == NULL) | 2970 | if (ext4_pspace_cachep == NULL) |
2960 | return -ENOMEM; | 2971 | return -ENOMEM; |
2961 | 2972 | ||
2973 | ext4_ac_cachep = | ||
2974 | kmem_cache_create("ext4_alloc_context", | ||
2975 | sizeof(struct ext4_allocation_context), | ||
2976 | 0, SLAB_RECLAIM_ACCOUNT, NULL); | ||
2977 | if (ext4_ac_cachep == NULL) { | ||
2978 | kmem_cache_destroy(ext4_pspace_cachep); | ||
2979 | return -ENOMEM; | ||
2980 | } | ||
2962 | #ifdef CONFIG_PROC_FS | 2981 | #ifdef CONFIG_PROC_FS |
2963 | proc_root_ext4 = proc_mkdir(EXT4_ROOT, proc_root_fs); | 2982 | proc_root_ext4 = proc_mkdir(EXT4_ROOT, proc_root_fs); |
2964 | if (proc_root_ext4 == NULL) | 2983 | if (proc_root_ext4 == NULL) |
2965 | printk(KERN_ERR "EXT4-fs: Unable to create %s\n", EXT4_ROOT); | 2984 | printk(KERN_ERR "EXT4-fs: Unable to create %s\n", EXT4_ROOT); |
2966 | #endif | 2985 | #endif |
2967 | |||
2968 | return 0; | 2986 | return 0; |
2969 | } | 2987 | } |
2970 | 2988 | ||
@@ -2972,6 +2990,7 @@ void exit_ext4_mballoc(void) | |||
2972 | { | 2990 | { |
2973 | /* XXX: synchronize_rcu(); */ | 2991 | /* XXX: synchronize_rcu(); */ |
2974 | kmem_cache_destroy(ext4_pspace_cachep); | 2992 | kmem_cache_destroy(ext4_pspace_cachep); |
2993 | kmem_cache_destroy(ext4_ac_cachep); | ||
2975 | #ifdef CONFIG_PROC_FS | 2994 | #ifdef CONFIG_PROC_FS |
2976 | remove_proc_entry(EXT4_ROOT, proc_root_fs); | 2995 | remove_proc_entry(EXT4_ROOT, proc_root_fs); |
2977 | #endif | 2996 | #endif |
@@ -3069,7 +3088,7 @@ static int ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, | |||
3069 | 3088 | ||
3070 | out_err: | 3089 | out_err: |
3071 | sb->s_dirt = 1; | 3090 | sb->s_dirt = 1; |
3072 | put_bh(bitmap_bh); | 3091 | brelse(bitmap_bh); |
3073 | return err; | 3092 | return err; |
3074 | } | 3093 | } |
3075 | 3094 | ||
@@ -3354,13 +3373,10 @@ static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, | |||
3354 | ac->ac_pa = pa; | 3373 | ac->ac_pa = pa; |
3355 | 3374 | ||
3356 | /* we don't correct pa_pstart or pa_plen here to avoid | 3375 | /* we don't correct pa_pstart or pa_plen here to avoid |
3357 | * possible race when tte group is being loaded concurrently | 3376 | * possible race when the group is being loaded concurrently |
3358 | * instead we correct pa later, after blocks are marked | 3377 | * instead we correct pa later, after blocks are marked |
3359 | * in on-disk bitmap -- see ext4_mb_release_context() */ | 3378 | * in on-disk bitmap -- see ext4_mb_release_context() |
3360 | /* | 3379 | * Other CPUs are prevented from allocating from this pa by lg_mutex |
3361 | * FIXME!! but the other CPUs can look at this particular | ||
3362 | * pa and think that it have enought free blocks if we | ||
3363 | * don't update pa_free here right ? | ||
3364 | */ | 3380 | */ |
3365 | mb_debug("use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa); | 3381 | mb_debug("use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa); |
3366 | } | 3382 | } |
@@ -3699,7 +3715,7 @@ static int ext4_mb_release_inode_pa(struct ext4_buddy *e4b, | |||
3699 | struct buffer_head *bitmap_bh, | 3715 | struct buffer_head *bitmap_bh, |
3700 | struct ext4_prealloc_space *pa) | 3716 | struct ext4_prealloc_space *pa) |
3701 | { | 3717 | { |
3702 | struct ext4_allocation_context ac; | 3718 | struct ext4_allocation_context *ac; |
3703 | struct super_block *sb = e4b->bd_sb; | 3719 | struct super_block *sb = e4b->bd_sb; |
3704 | struct ext4_sb_info *sbi = EXT4_SB(sb); | 3720 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
3705 | unsigned long end; | 3721 | unsigned long end; |
@@ -3715,9 +3731,13 @@ static int ext4_mb_release_inode_pa(struct ext4_buddy *e4b, | |||
3715 | BUG_ON(group != e4b->bd_group && pa->pa_len != 0); | 3731 | BUG_ON(group != e4b->bd_group && pa->pa_len != 0); |
3716 | end = bit + pa->pa_len; | 3732 | end = bit + pa->pa_len; |
3717 | 3733 | ||
3718 | ac.ac_sb = sb; | 3734 | ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); |
3719 | ac.ac_inode = pa->pa_inode; | 3735 | |
3720 | ac.ac_op = EXT4_MB_HISTORY_DISCARD; | 3736 | if (ac) { |
3737 | ac->ac_sb = sb; | ||
3738 | ac->ac_inode = pa->pa_inode; | ||
3739 | ac->ac_op = EXT4_MB_HISTORY_DISCARD; | ||
3740 | } | ||
3721 | 3741 | ||
3722 | while (bit < end) { | 3742 | while (bit < end) { |
3723 | bit = ext4_find_next_zero_bit(bitmap_bh->b_data, end, bit); | 3743 | bit = ext4_find_next_zero_bit(bitmap_bh->b_data, end, bit); |
@@ -3733,24 +3753,28 @@ static int ext4_mb_release_inode_pa(struct ext4_buddy *e4b, | |||
3733 | (unsigned) group); | 3753 | (unsigned) group); |
3734 | free += next - bit; | 3754 | free += next - bit; |
3735 | 3755 | ||
3736 | ac.ac_b_ex.fe_group = group; | 3756 | if (ac) { |
3737 | ac.ac_b_ex.fe_start = bit; | 3757 | ac->ac_b_ex.fe_group = group; |
3738 | ac.ac_b_ex.fe_len = next - bit; | 3758 | ac->ac_b_ex.fe_start = bit; |
3739 | ac.ac_b_ex.fe_logical = 0; | 3759 | ac->ac_b_ex.fe_len = next - bit; |
3740 | ext4_mb_store_history(&ac); | 3760 | ac->ac_b_ex.fe_logical = 0; |
3761 | ext4_mb_store_history(ac); | ||
3762 | } | ||
3741 | 3763 | ||
3742 | mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); | 3764 | mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); |
3743 | bit = next + 1; | 3765 | bit = next + 1; |
3744 | } | 3766 | } |
3745 | if (free != pa->pa_free) { | 3767 | if (free != pa->pa_free) { |
3746 | printk(KERN_ERR "pa %p: logic %lu, phys. %lu, len %lu\n", | 3768 | printk(KERN_CRIT "pa %p: logic %lu, phys. %lu, len %lu\n", |
3747 | pa, (unsigned long) pa->pa_lstart, | 3769 | pa, (unsigned long) pa->pa_lstart, |
3748 | (unsigned long) pa->pa_pstart, | 3770 | (unsigned long) pa->pa_pstart, |
3749 | (unsigned long) pa->pa_len); | 3771 | (unsigned long) pa->pa_len); |
3750 | printk(KERN_ERR "free %u, pa_free %u\n", free, pa->pa_free); | 3772 | ext4_error(sb, __FUNCTION__, "free %u, pa_free %u\n", |
3773 | free, pa->pa_free); | ||
3751 | } | 3774 | } |
3752 | BUG_ON(free != pa->pa_free); | ||
3753 | atomic_add(free, &sbi->s_mb_discarded); | 3775 | atomic_add(free, &sbi->s_mb_discarded); |
3776 | if (ac) | ||
3777 | kmem_cache_free(ext4_ac_cachep, ac); | ||
3754 | 3778 | ||
3755 | return err; | 3779 | return err; |
3756 | } | 3780 | } |
@@ -3758,12 +3782,15 @@ static int ext4_mb_release_inode_pa(struct ext4_buddy *e4b, | |||
3758 | static int ext4_mb_release_group_pa(struct ext4_buddy *e4b, | 3782 | static int ext4_mb_release_group_pa(struct ext4_buddy *e4b, |
3759 | struct ext4_prealloc_space *pa) | 3783 | struct ext4_prealloc_space *pa) |
3760 | { | 3784 | { |
3761 | struct ext4_allocation_context ac; | 3785 | struct ext4_allocation_context *ac; |
3762 | struct super_block *sb = e4b->bd_sb; | 3786 | struct super_block *sb = e4b->bd_sb; |
3763 | ext4_group_t group; | 3787 | ext4_group_t group; |
3764 | ext4_grpblk_t bit; | 3788 | ext4_grpblk_t bit; |
3765 | 3789 | ||
3766 | ac.ac_op = EXT4_MB_HISTORY_DISCARD; | 3790 | ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); |
3791 | |||
3792 | if (ac) | ||
3793 | ac->ac_op = EXT4_MB_HISTORY_DISCARD; | ||
3767 | 3794 | ||
3768 | BUG_ON(pa->pa_deleted == 0); | 3795 | BUG_ON(pa->pa_deleted == 0); |
3769 | ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); | 3796 | ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); |
@@ -3771,13 +3798,16 @@ static int ext4_mb_release_group_pa(struct ext4_buddy *e4b, | |||
3771 | mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); | 3798 | mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); |
3772 | atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); | 3799 | atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); |
3773 | 3800 | ||
3774 | ac.ac_sb = sb; | 3801 | if (ac) { |
3775 | ac.ac_inode = NULL; | 3802 | ac->ac_sb = sb; |
3776 | ac.ac_b_ex.fe_group = group; | 3803 | ac->ac_inode = NULL; |
3777 | ac.ac_b_ex.fe_start = bit; | 3804 | ac->ac_b_ex.fe_group = group; |
3778 | ac.ac_b_ex.fe_len = pa->pa_len; | 3805 | ac->ac_b_ex.fe_start = bit; |
3779 | ac.ac_b_ex.fe_logical = 0; | 3806 | ac->ac_b_ex.fe_len = pa->pa_len; |
3780 | ext4_mb_store_history(&ac); | 3807 | ac->ac_b_ex.fe_logical = 0; |
3808 | ext4_mb_store_history(ac); | ||
3809 | kmem_cache_free(ext4_ac_cachep, ac); | ||
3810 | } | ||
3781 | 3811 | ||
3782 | return 0; | 3812 | return 0; |
3783 | } | 3813 | } |
@@ -4231,7 +4261,7 @@ static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) | |||
4231 | ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, | 4261 | ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, |
4232 | struct ext4_allocation_request *ar, int *errp) | 4262 | struct ext4_allocation_request *ar, int *errp) |
4233 | { | 4263 | { |
4234 | struct ext4_allocation_context ac; | 4264 | struct ext4_allocation_context *ac = NULL; |
4235 | struct ext4_sb_info *sbi; | 4265 | struct ext4_sb_info *sbi; |
4236 | struct super_block *sb; | 4266 | struct super_block *sb; |
4237 | ext4_fsblk_t block = 0; | 4267 | ext4_fsblk_t block = 0; |
@@ -4257,53 +4287,60 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, | |||
4257 | } | 4287 | } |
4258 | inquota = ar->len; | 4288 | inquota = ar->len; |
4259 | 4289 | ||
4290 | ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); | ||
4291 | if (!ac) { | ||
4292 | *errp = -ENOMEM; | ||
4293 | return 0; | ||
4294 | } | ||
4295 | |||
4260 | ext4_mb_poll_new_transaction(sb, handle); | 4296 | ext4_mb_poll_new_transaction(sb, handle); |
4261 | 4297 | ||
4262 | *errp = ext4_mb_initialize_context(&ac, ar); | 4298 | *errp = ext4_mb_initialize_context(ac, ar); |
4263 | if (*errp) { | 4299 | if (*errp) { |
4264 | ar->len = 0; | 4300 | ar->len = 0; |
4265 | goto out; | 4301 | goto out; |
4266 | } | 4302 | } |
4267 | 4303 | ||
4268 | ac.ac_op = EXT4_MB_HISTORY_PREALLOC; | 4304 | ac->ac_op = EXT4_MB_HISTORY_PREALLOC; |
4269 | if (!ext4_mb_use_preallocated(&ac)) { | 4305 | if (!ext4_mb_use_preallocated(ac)) { |
4270 | 4306 | ||
4271 | ac.ac_op = EXT4_MB_HISTORY_ALLOC; | 4307 | ac->ac_op = EXT4_MB_HISTORY_ALLOC; |
4272 | ext4_mb_normalize_request(&ac, ar); | 4308 | ext4_mb_normalize_request(ac, ar); |
4273 | 4309 | ||
4274 | repeat: | 4310 | repeat: |
4275 | /* allocate space in core */ | 4311 | /* allocate space in core */ |
4276 | ext4_mb_regular_allocator(&ac); | 4312 | ext4_mb_regular_allocator(ac); |
4277 | 4313 | ||
4278 | /* as we've just preallocated more space than | 4314 | /* as we've just preallocated more space than |
4279 | * user requested orinally, we store allocated | 4315 | * user requested orinally, we store allocated |
4280 | * space in a special descriptor */ | 4316 | * space in a special descriptor */ |
4281 | if (ac.ac_status == AC_STATUS_FOUND && | 4317 | if (ac->ac_status == AC_STATUS_FOUND && |
4282 | ac.ac_o_ex.fe_len < ac.ac_b_ex.fe_len) | 4318 | ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) |
4283 | ext4_mb_new_preallocation(&ac); | 4319 | ext4_mb_new_preallocation(ac); |
4284 | } | 4320 | } |
4285 | 4321 | ||
4286 | if (likely(ac.ac_status == AC_STATUS_FOUND)) { | 4322 | if (likely(ac->ac_status == AC_STATUS_FOUND)) { |
4287 | ext4_mb_mark_diskspace_used(&ac, handle); | 4323 | ext4_mb_mark_diskspace_used(ac, handle); |
4288 | *errp = 0; | 4324 | *errp = 0; |
4289 | block = ext4_grp_offs_to_block(sb, &ac.ac_b_ex); | 4325 | block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); |
4290 | ar->len = ac.ac_b_ex.fe_len; | 4326 | ar->len = ac->ac_b_ex.fe_len; |
4291 | } else { | 4327 | } else { |
4292 | freed = ext4_mb_discard_preallocations(sb, ac.ac_o_ex.fe_len); | 4328 | freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); |
4293 | if (freed) | 4329 | if (freed) |
4294 | goto repeat; | 4330 | goto repeat; |
4295 | *errp = -ENOSPC; | 4331 | *errp = -ENOSPC; |
4296 | ac.ac_b_ex.fe_len = 0; | 4332 | ac->ac_b_ex.fe_len = 0; |
4297 | ar->len = 0; | 4333 | ar->len = 0; |
4298 | ext4_mb_show_ac(&ac); | 4334 | ext4_mb_show_ac(ac); |
4299 | } | 4335 | } |
4300 | 4336 | ||
4301 | ext4_mb_release_context(&ac); | 4337 | ext4_mb_release_context(ac); |
4302 | 4338 | ||
4303 | out: | 4339 | out: |
4304 | if (ar->len < inquota) | 4340 | if (ar->len < inquota) |
4305 | DQUOT_FREE_BLOCK(ar->inode, inquota - ar->len); | 4341 | DQUOT_FREE_BLOCK(ar->inode, inquota - ar->len); |
4306 | 4342 | ||
4343 | kmem_cache_free(ext4_ac_cachep, ac); | ||
4307 | return block; | 4344 | return block; |
4308 | } | 4345 | } |
4309 | static void ext4_mb_poll_new_transaction(struct super_block *sb, | 4346 | static void ext4_mb_poll_new_transaction(struct super_block *sb, |
@@ -4405,9 +4442,9 @@ void ext4_mb_free_blocks(handle_t *handle, struct inode *inode, | |||
4405 | unsigned long block, unsigned long count, | 4442 | unsigned long block, unsigned long count, |
4406 | int metadata, unsigned long *freed) | 4443 | int metadata, unsigned long *freed) |
4407 | { | 4444 | { |
4408 | struct buffer_head *bitmap_bh = 0; | 4445 | struct buffer_head *bitmap_bh = NULL; |
4409 | struct super_block *sb = inode->i_sb; | 4446 | struct super_block *sb = inode->i_sb; |
4410 | struct ext4_allocation_context ac; | 4447 | struct ext4_allocation_context *ac = NULL; |
4411 | struct ext4_group_desc *gdp; | 4448 | struct ext4_group_desc *gdp; |
4412 | struct ext4_super_block *es; | 4449 | struct ext4_super_block *es; |
4413 | unsigned long overflow; | 4450 | unsigned long overflow; |
@@ -4436,9 +4473,12 @@ void ext4_mb_free_blocks(handle_t *handle, struct inode *inode, | |||
4436 | 4473 | ||
4437 | ext4_debug("freeing block %lu\n", block); | 4474 | ext4_debug("freeing block %lu\n", block); |
4438 | 4475 | ||
4439 | ac.ac_op = EXT4_MB_HISTORY_FREE; | 4476 | ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); |
4440 | ac.ac_inode = inode; | 4477 | if (ac) { |
4441 | ac.ac_sb = sb; | 4478 | ac->ac_op = EXT4_MB_HISTORY_FREE; |
4479 | ac->ac_inode = inode; | ||
4480 | ac->ac_sb = sb; | ||
4481 | } | ||
4442 | 4482 | ||
4443 | do_more: | 4483 | do_more: |
4444 | overflow = 0; | 4484 | overflow = 0; |
@@ -4504,10 +4544,12 @@ do_more: | |||
4504 | BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); | 4544 | BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); |
4505 | err = ext4_journal_dirty_metadata(handle, bitmap_bh); | 4545 | err = ext4_journal_dirty_metadata(handle, bitmap_bh); |
4506 | 4546 | ||
4507 | ac.ac_b_ex.fe_group = block_group; | 4547 | if (ac) { |
4508 | ac.ac_b_ex.fe_start = bit; | 4548 | ac->ac_b_ex.fe_group = block_group; |
4509 | ac.ac_b_ex.fe_len = count; | 4549 | ac->ac_b_ex.fe_start = bit; |
4510 | ext4_mb_store_history(&ac); | 4550 | ac->ac_b_ex.fe_len = count; |
4551 | ext4_mb_store_history(ac); | ||
4552 | } | ||
4511 | 4553 | ||
4512 | if (metadata) { | 4554 | if (metadata) { |
4513 | /* blocks being freed are metadata. these blocks shouldn't | 4555 | /* blocks being freed are metadata. these blocks shouldn't |
@@ -4548,5 +4590,7 @@ do_more: | |||
4548 | error_return: | 4590 | error_return: |
4549 | brelse(bitmap_bh); | 4591 | brelse(bitmap_bh); |
4550 | ext4_std_error(sb, err); | 4592 | ext4_std_error(sb, err); |
4593 | if (ac) | ||
4594 | kmem_cache_free(ext4_ac_cachep, ac); | ||
4551 | return; | 4595 | return; |
4552 | } | 4596 | } |
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c index 3ebc2332f52e..8c6c685b9d22 100644 --- a/fs/ext4/migrate.c +++ b/fs/ext4/migrate.c | |||
@@ -61,10 +61,9 @@ static int finish_range(handle_t *handle, struct inode *inode, | |||
61 | retval = ext4_journal_restart(handle, needed); | 61 | retval = ext4_journal_restart(handle, needed); |
62 | if (retval) | 62 | if (retval) |
63 | goto err_out; | 63 | goto err_out; |
64 | } | 64 | } else if (needed) { |
65 | if (needed) { | ||
66 | retval = ext4_journal_extend(handle, needed); | 65 | retval = ext4_journal_extend(handle, needed); |
67 | if (retval != 0) { | 66 | if (retval) { |
68 | /* | 67 | /* |
69 | * IF not able to extend the journal restart the journal | 68 | * IF not able to extend the journal restart the journal |
70 | */ | 69 | */ |
@@ -220,6 +219,26 @@ static int update_tind_extent_range(handle_t *handle, struct inode *inode, | |||
220 | 219 | ||
221 | } | 220 | } |
222 | 221 | ||
222 | static int extend_credit_for_blkdel(handle_t *handle, struct inode *inode) | ||
223 | { | ||
224 | int retval = 0, needed; | ||
225 | |||
226 | if (handle->h_buffer_credits > EXT4_RESERVE_TRANS_BLOCKS) | ||
227 | return 0; | ||
228 | /* | ||
229 | * We are freeing a blocks. During this we touch | ||
230 | * superblock, group descriptor and block bitmap. | ||
231 | * So allocate a credit of 3. We may update | ||
232 | * quota (user and group). | ||
233 | */ | ||
234 | needed = 3 + 2*EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb); | ||
235 | |||
236 | if (ext4_journal_extend(handle, needed) != 0) | ||
237 | retval = ext4_journal_restart(handle, needed); | ||
238 | |||
239 | return retval; | ||
240 | } | ||
241 | |||
223 | static int free_dind_blocks(handle_t *handle, | 242 | static int free_dind_blocks(handle_t *handle, |
224 | struct inode *inode, __le32 i_data) | 243 | struct inode *inode, __le32 i_data) |
225 | { | 244 | { |
@@ -234,11 +253,14 @@ static int free_dind_blocks(handle_t *handle, | |||
234 | 253 | ||
235 | tmp_idata = (__le32 *)bh->b_data; | 254 | tmp_idata = (__le32 *)bh->b_data; |
236 | for (i = 0; i < max_entries; i++) { | 255 | for (i = 0; i < max_entries; i++) { |
237 | if (tmp_idata[i]) | 256 | if (tmp_idata[i]) { |
257 | extend_credit_for_blkdel(handle, inode); | ||
238 | ext4_free_blocks(handle, inode, | 258 | ext4_free_blocks(handle, inode, |
239 | le32_to_cpu(tmp_idata[i]), 1, 1); | 259 | le32_to_cpu(tmp_idata[i]), 1, 1); |
260 | } | ||
240 | } | 261 | } |
241 | put_bh(bh); | 262 | put_bh(bh); |
263 | extend_credit_for_blkdel(handle, inode); | ||
242 | ext4_free_blocks(handle, inode, le32_to_cpu(i_data), 1, 1); | 264 | ext4_free_blocks(handle, inode, le32_to_cpu(i_data), 1, 1); |
243 | return 0; | 265 | return 0; |
244 | } | 266 | } |
@@ -267,29 +289,32 @@ static int free_tind_blocks(handle_t *handle, | |||
267 | } | 289 | } |
268 | } | 290 | } |
269 | put_bh(bh); | 291 | put_bh(bh); |
292 | extend_credit_for_blkdel(handle, inode); | ||
270 | ext4_free_blocks(handle, inode, le32_to_cpu(i_data), 1, 1); | 293 | ext4_free_blocks(handle, inode, le32_to_cpu(i_data), 1, 1); |
271 | return 0; | 294 | return 0; |
272 | } | 295 | } |
273 | 296 | ||
274 | static int free_ind_block(handle_t *handle, struct inode *inode) | 297 | static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data) |
275 | { | 298 | { |
276 | int retval; | 299 | int retval; |
277 | struct ext4_inode_info *ei = EXT4_I(inode); | ||
278 | 300 | ||
279 | if (ei->i_data[EXT4_IND_BLOCK]) | 301 | /* ei->i_data[EXT4_IND_BLOCK] */ |
302 | if (i_data[0]) { | ||
303 | extend_credit_for_blkdel(handle, inode); | ||
280 | ext4_free_blocks(handle, inode, | 304 | ext4_free_blocks(handle, inode, |
281 | le32_to_cpu(ei->i_data[EXT4_IND_BLOCK]), 1, 1); | 305 | le32_to_cpu(i_data[0]), 1, 1); |
306 | } | ||
282 | 307 | ||
283 | if (ei->i_data[EXT4_DIND_BLOCK]) { | 308 | /* ei->i_data[EXT4_DIND_BLOCK] */ |
284 | retval = free_dind_blocks(handle, inode, | 309 | if (i_data[1]) { |
285 | ei->i_data[EXT4_DIND_BLOCK]); | 310 | retval = free_dind_blocks(handle, inode, i_data[1]); |
286 | if (retval) | 311 | if (retval) |
287 | return retval; | 312 | return retval; |
288 | } | 313 | } |
289 | 314 | ||
290 | if (ei->i_data[EXT4_TIND_BLOCK]) { | 315 | /* ei->i_data[EXT4_TIND_BLOCK] */ |
291 | retval = free_tind_blocks(handle, inode, | 316 | if (i_data[2]) { |
292 | ei->i_data[EXT4_TIND_BLOCK]); | 317 | retval = free_tind_blocks(handle, inode, i_data[2]); |
293 | if (retval) | 318 | if (retval) |
294 | return retval; | 319 | return retval; |
295 | } | 320 | } |
@@ -297,15 +322,13 @@ static int free_ind_block(handle_t *handle, struct inode *inode) | |||
297 | } | 322 | } |
298 | 323 | ||
299 | static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode, | 324 | static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode, |
300 | struct inode *tmp_inode, int retval) | 325 | struct inode *tmp_inode) |
301 | { | 326 | { |
327 | int retval; | ||
328 | __le32 i_data[3]; | ||
302 | struct ext4_inode_info *ei = EXT4_I(inode); | 329 | struct ext4_inode_info *ei = EXT4_I(inode); |
303 | struct ext4_inode_info *tmp_ei = EXT4_I(tmp_inode); | 330 | struct ext4_inode_info *tmp_ei = EXT4_I(tmp_inode); |
304 | 331 | ||
305 | retval = free_ind_block(handle, inode); | ||
306 | if (retval) | ||
307 | goto err_out; | ||
308 | |||
309 | /* | 332 | /* |
310 | * One credit accounted for writing the | 333 | * One credit accounted for writing the |
311 | * i_data field of the original inode | 334 | * i_data field of the original inode |
@@ -317,6 +340,11 @@ static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode, | |||
317 | goto err_out; | 340 | goto err_out; |
318 | } | 341 | } |
319 | 342 | ||
343 | i_data[0] = ei->i_data[EXT4_IND_BLOCK]; | ||
344 | i_data[1] = ei->i_data[EXT4_DIND_BLOCK]; | ||
345 | i_data[2] = ei->i_data[EXT4_TIND_BLOCK]; | ||
346 | |||
347 | down_write(&EXT4_I(inode)->i_data_sem); | ||
320 | /* | 348 | /* |
321 | * We have the extent map build with the tmp inode. | 349 | * We have the extent map build with the tmp inode. |
322 | * Now copy the i_data across | 350 | * Now copy the i_data across |
@@ -336,8 +364,15 @@ static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode, | |||
336 | spin_lock(&inode->i_lock); | 364 | spin_lock(&inode->i_lock); |
337 | inode->i_blocks += tmp_inode->i_blocks; | 365 | inode->i_blocks += tmp_inode->i_blocks; |
338 | spin_unlock(&inode->i_lock); | 366 | spin_unlock(&inode->i_lock); |
367 | up_write(&EXT4_I(inode)->i_data_sem); | ||
339 | 368 | ||
369 | /* | ||
370 | * We mark the inode dirty after, because we decrement the | ||
371 | * i_blocks when freeing the indirect meta-data blocks | ||
372 | */ | ||
373 | retval = free_ind_block(handle, inode, i_data); | ||
340 | ext4_mark_inode_dirty(handle, inode); | 374 | ext4_mark_inode_dirty(handle, inode); |
375 | |||
341 | err_out: | 376 | err_out: |
342 | return retval; | 377 | return retval; |
343 | } | 378 | } |
@@ -365,6 +400,7 @@ static int free_ext_idx(handle_t *handle, struct inode *inode, | |||
365 | } | 400 | } |
366 | } | 401 | } |
367 | put_bh(bh); | 402 | put_bh(bh); |
403 | extend_credit_for_blkdel(handle, inode); | ||
368 | ext4_free_blocks(handle, inode, block, 1, 1); | 404 | ext4_free_blocks(handle, inode, block, 1, 1); |
369 | return retval; | 405 | return retval; |
370 | } | 406 | } |
@@ -414,7 +450,12 @@ int ext4_ext_migrate(struct inode *inode, struct file *filp, | |||
414 | if ((EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) | 450 | if ((EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) |
415 | return -EINVAL; | 451 | return -EINVAL; |
416 | 452 | ||
417 | down_write(&EXT4_I(inode)->i_data_sem); | 453 | if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0) |
454 | /* | ||
455 | * don't migrate fast symlink | ||
456 | */ | ||
457 | return retval; | ||
458 | |||
418 | handle = ext4_journal_start(inode, | 459 | handle = ext4_journal_start(inode, |
419 | EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + | 460 | EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + |
420 | EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + | 461 | EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + |
@@ -448,13 +489,6 @@ int ext4_ext_migrate(struct inode *inode, struct file *filp, | |||
448 | ext4_orphan_add(handle, tmp_inode); | 489 | ext4_orphan_add(handle, tmp_inode); |
449 | ext4_journal_stop(handle); | 490 | ext4_journal_stop(handle); |
450 | 491 | ||
451 | ei = EXT4_I(inode); | ||
452 | i_data = ei->i_data; | ||
453 | memset(&lb, 0, sizeof(lb)); | ||
454 | |||
455 | /* 32 bit block address 4 bytes */ | ||
456 | max_entries = inode->i_sb->s_blocksize >> 2; | ||
457 | |||
458 | /* | 492 | /* |
459 | * start with one credit accounted for | 493 | * start with one credit accounted for |
460 | * superblock modification. | 494 | * superblock modification. |
@@ -463,7 +497,20 @@ int ext4_ext_migrate(struct inode *inode, struct file *filp, | |||
463 | * trascation that created the inode. Later as and | 497 | * trascation that created the inode. Later as and |
464 | * when we add extents we extent the journal | 498 | * when we add extents we extent the journal |
465 | */ | 499 | */ |
500 | /* | ||
501 | * inode_mutex prevent write and truncate on the file. Read still goes | ||
502 | * through. We take i_data_sem in ext4_ext_swap_inode_data before we | ||
503 | * switch the inode format to prevent read. | ||
504 | */ | ||
505 | mutex_lock(&(inode->i_mutex)); | ||
466 | handle = ext4_journal_start(inode, 1); | 506 | handle = ext4_journal_start(inode, 1); |
507 | |||
508 | ei = EXT4_I(inode); | ||
509 | i_data = ei->i_data; | ||
510 | memset(&lb, 0, sizeof(lb)); | ||
511 | |||
512 | /* 32 bit block address 4 bytes */ | ||
513 | max_entries = inode->i_sb->s_blocksize >> 2; | ||
467 | for (i = 0; i < EXT4_NDIR_BLOCKS; i++, blk_count++) { | 514 | for (i = 0; i < EXT4_NDIR_BLOCKS; i++, blk_count++) { |
468 | if (i_data[i]) { | 515 | if (i_data[i]) { |
469 | retval = update_extent_range(handle, tmp_inode, | 516 | retval = update_extent_range(handle, tmp_inode, |
@@ -501,19 +548,6 @@ int ext4_ext_migrate(struct inode *inode, struct file *filp, | |||
501 | */ | 548 | */ |
502 | retval = finish_range(handle, tmp_inode, &lb); | 549 | retval = finish_range(handle, tmp_inode, &lb); |
503 | err_out: | 550 | err_out: |
504 | /* | ||
505 | * We are either freeing extent information or indirect | ||
506 | * blocks. During this we touch superblock, group descriptor | ||
507 | * and block bitmap. Later we mark the tmp_inode dirty | ||
508 | * via ext4_ext_tree_init. So allocate a credit of 4 | ||
509 | * We may update quota (user and group). | ||
510 | * | ||
511 | * FIXME!! we may be touching bitmaps in different block groups. | ||
512 | */ | ||
513 | if (ext4_journal_extend(handle, | ||
514 | 4 + 2*EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb)) != 0) | ||
515 | ext4_journal_restart(handle, | ||
516 | 4 + 2*EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb)); | ||
517 | if (retval) | 551 | if (retval) |
518 | /* | 552 | /* |
519 | * Failure case delete the extent information with the | 553 | * Failure case delete the extent information with the |
@@ -522,7 +556,11 @@ err_out: | |||
522 | free_ext_block(handle, tmp_inode); | 556 | free_ext_block(handle, tmp_inode); |
523 | else | 557 | else |
524 | retval = ext4_ext_swap_inode_data(handle, inode, | 558 | retval = ext4_ext_swap_inode_data(handle, inode, |
525 | tmp_inode, retval); | 559 | tmp_inode); |
560 | |||
561 | /* We mark the tmp_inode dirty via ext4_ext_tree_init. */ | ||
562 | if (ext4_journal_extend(handle, 1) != 0) | ||
563 | ext4_journal_restart(handle, 1); | ||
526 | 564 | ||
527 | /* | 565 | /* |
528 | * Mark the tmp_inode as of size zero | 566 | * Mark the tmp_inode as of size zero |
@@ -550,8 +588,7 @@ err_out: | |||
550 | tmp_inode->i_nlink = 0; | 588 | tmp_inode->i_nlink = 0; |
551 | 589 | ||
552 | ext4_journal_stop(handle); | 590 | ext4_journal_stop(handle); |
553 | 591 | mutex_unlock(&(inode->i_mutex)); | |
554 | up_write(&EXT4_I(inode)->i_data_sem); | ||
555 | 592 | ||
556 | if (tmp_inode) | 593 | if (tmp_inode) |
557 | iput(tmp_inode); | 594 | iput(tmp_inode); |
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 67b6d8a1ceff..a9347fb43bcc 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c | |||
@@ -1039,17 +1039,11 @@ static struct dentry *ext4_lookup(struct inode * dir, struct dentry *dentry, str | |||
1039 | if (!ext4_valid_inum(dir->i_sb, ino)) { | 1039 | if (!ext4_valid_inum(dir->i_sb, ino)) { |
1040 | ext4_error(dir->i_sb, "ext4_lookup", | 1040 | ext4_error(dir->i_sb, "ext4_lookup", |
1041 | "bad inode number: %lu", ino); | 1041 | "bad inode number: %lu", ino); |
1042 | inode = NULL; | 1042 | return ERR_PTR(-EIO); |
1043 | } else | ||
1044 | inode = iget(dir->i_sb, ino); | ||
1045 | |||
1046 | if (!inode) | ||
1047 | return ERR_PTR(-EACCES); | ||
1048 | |||
1049 | if (is_bad_inode(inode)) { | ||
1050 | iput(inode); | ||
1051 | return ERR_PTR(-ENOENT); | ||
1052 | } | 1043 | } |
1044 | inode = ext4_iget(dir->i_sb, ino); | ||
1045 | if (IS_ERR(inode)) | ||
1046 | return ERR_CAST(inode); | ||
1053 | } | 1047 | } |
1054 | return d_splice_alias(inode, dentry); | 1048 | return d_splice_alias(inode, dentry); |
1055 | } | 1049 | } |
@@ -1078,18 +1072,13 @@ struct dentry *ext4_get_parent(struct dentry *child) | |||
1078 | if (!ext4_valid_inum(child->d_inode->i_sb, ino)) { | 1072 | if (!ext4_valid_inum(child->d_inode->i_sb, ino)) { |
1079 | ext4_error(child->d_inode->i_sb, "ext4_get_parent", | 1073 | ext4_error(child->d_inode->i_sb, "ext4_get_parent", |
1080 | "bad inode number: %lu", ino); | 1074 | "bad inode number: %lu", ino); |
1081 | inode = NULL; | 1075 | return ERR_PTR(-EIO); |
1082 | } else | ||
1083 | inode = iget(child->d_inode->i_sb, ino); | ||
1084 | |||
1085 | if (!inode) | ||
1086 | return ERR_PTR(-EACCES); | ||
1087 | |||
1088 | if (is_bad_inode(inode)) { | ||
1089 | iput(inode); | ||
1090 | return ERR_PTR(-ENOENT); | ||
1091 | } | 1076 | } |
1092 | 1077 | ||
1078 | inode = ext4_iget(child->d_inode->i_sb, ino); | ||
1079 | if (IS_ERR(inode)) | ||
1080 | return ERR_CAST(inode); | ||
1081 | |||
1093 | parent = d_alloc_anon(inode); | 1082 | parent = d_alloc_anon(inode); |
1094 | if (!parent) { | 1083 | if (!parent) { |
1095 | iput(inode); | 1084 | iput(inode); |
@@ -2234,6 +2223,7 @@ retry: | |||
2234 | inode->i_op = &ext4_fast_symlink_inode_operations; | 2223 | inode->i_op = &ext4_fast_symlink_inode_operations; |
2235 | memcpy((char*)&EXT4_I(inode)->i_data,symname,l); | 2224 | memcpy((char*)&EXT4_I(inode)->i_data,symname,l); |
2236 | inode->i_size = l-1; | 2225 | inode->i_size = l-1; |
2226 | EXT4_I(inode)->i_flags &= ~EXT4_EXTENTS_FL; | ||
2237 | } | 2227 | } |
2238 | EXT4_I(inode)->i_disksize = inode->i_size; | 2228 | EXT4_I(inode)->i_disksize = inode->i_size; |
2239 | err = ext4_add_nondir(handle, dentry, inode); | 2229 | err = ext4_add_nondir(handle, dentry, inode); |
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index 4fbba60816f4..9477a2bd6ff2 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c | |||
@@ -779,12 +779,11 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) | |||
779 | "No reserved GDT blocks, can't resize"); | 779 | "No reserved GDT blocks, can't resize"); |
780 | return -EPERM; | 780 | return -EPERM; |
781 | } | 781 | } |
782 | inode = iget(sb, EXT4_RESIZE_INO); | 782 | inode = ext4_iget(sb, EXT4_RESIZE_INO); |
783 | if (!inode || is_bad_inode(inode)) { | 783 | if (IS_ERR(inode)) { |
784 | ext4_warning(sb, __FUNCTION__, | 784 | ext4_warning(sb, __FUNCTION__, |
785 | "Error opening resize inode"); | 785 | "Error opening resize inode"); |
786 | iput(inode); | 786 | return PTR_ERR(inode); |
787 | return -ENOENT; | ||
788 | } | 787 | } |
789 | } | 788 | } |
790 | 789 | ||
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 055a0cd0168e..13383ba18f1d 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -777,11 +777,10 @@ static struct inode *ext4_nfs_get_inode(struct super_block *sb, | |||
777 | * Currently we don't know the generation for parent directory, so | 777 | * Currently we don't know the generation for parent directory, so |
778 | * a generation of 0 means "accept any" | 778 | * a generation of 0 means "accept any" |
779 | */ | 779 | */ |
780 | inode = iget(sb, ino); | 780 | inode = ext4_iget(sb, ino); |
781 | if (inode == NULL) | 781 | if (IS_ERR(inode)) |
782 | return ERR_PTR(-ENOMEM); | 782 | return ERR_CAST(inode); |
783 | if (is_bad_inode(inode) || | 783 | if (generation && inode->i_generation != generation) { |
784 | (generation && inode->i_generation != generation)) { | ||
785 | iput(inode); | 784 | iput(inode); |
786 | return ERR_PTR(-ESTALE); | 785 | return ERR_PTR(-ESTALE); |
787 | } | 786 | } |
@@ -850,7 +849,6 @@ static struct quotactl_ops ext4_qctl_operations = { | |||
850 | static const struct super_operations ext4_sops = { | 849 | static const struct super_operations ext4_sops = { |
851 | .alloc_inode = ext4_alloc_inode, | 850 | .alloc_inode = ext4_alloc_inode, |
852 | .destroy_inode = ext4_destroy_inode, | 851 | .destroy_inode = ext4_destroy_inode, |
853 | .read_inode = ext4_read_inode, | ||
854 | .write_inode = ext4_write_inode, | 852 | .write_inode = ext4_write_inode, |
855 | .dirty_inode = ext4_dirty_inode, | 853 | .dirty_inode = ext4_dirty_inode, |
856 | .delete_inode = ext4_delete_inode, | 854 | .delete_inode = ext4_delete_inode, |
@@ -1458,7 +1456,7 @@ int ext4_group_desc_csum_verify(struct ext4_sb_info *sbi, __u32 block_group, | |||
1458 | } | 1456 | } |
1459 | 1457 | ||
1460 | /* Called at mount-time, super-block is locked */ | 1458 | /* Called at mount-time, super-block is locked */ |
1461 | static int ext4_check_descriptors (struct super_block * sb) | 1459 | static int ext4_check_descriptors(struct super_block *sb) |
1462 | { | 1460 | { |
1463 | struct ext4_sb_info *sbi = EXT4_SB(sb); | 1461 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
1464 | ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); | 1462 | ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); |
@@ -1466,8 +1464,6 @@ static int ext4_check_descriptors (struct super_block * sb) | |||
1466 | ext4_fsblk_t block_bitmap; | 1464 | ext4_fsblk_t block_bitmap; |
1467 | ext4_fsblk_t inode_bitmap; | 1465 | ext4_fsblk_t inode_bitmap; |
1468 | ext4_fsblk_t inode_table; | 1466 | ext4_fsblk_t inode_table; |
1469 | struct ext4_group_desc * gdp = NULL; | ||
1470 | int desc_block = 0; | ||
1471 | int flexbg_flag = 0; | 1467 | int flexbg_flag = 0; |
1472 | ext4_group_t i; | 1468 | ext4_group_t i; |
1473 | 1469 | ||
@@ -1476,17 +1472,15 @@ static int ext4_check_descriptors (struct super_block * sb) | |||
1476 | 1472 | ||
1477 | ext4_debug ("Checking group descriptors"); | 1473 | ext4_debug ("Checking group descriptors"); |
1478 | 1474 | ||
1479 | for (i = 0; i < sbi->s_groups_count; i++) | 1475 | for (i = 0; i < sbi->s_groups_count; i++) { |
1480 | { | 1476 | struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL); |
1477 | |||
1481 | if (i == sbi->s_groups_count - 1 || flexbg_flag) | 1478 | if (i == sbi->s_groups_count - 1 || flexbg_flag) |
1482 | last_block = ext4_blocks_count(sbi->s_es) - 1; | 1479 | last_block = ext4_blocks_count(sbi->s_es) - 1; |
1483 | else | 1480 | else |
1484 | last_block = first_block + | 1481 | last_block = first_block + |
1485 | (EXT4_BLOCKS_PER_GROUP(sb) - 1); | 1482 | (EXT4_BLOCKS_PER_GROUP(sb) - 1); |
1486 | 1483 | ||
1487 | if ((i % EXT4_DESC_PER_BLOCK(sb)) == 0) | ||
1488 | gdp = (struct ext4_group_desc *) | ||
1489 | sbi->s_group_desc[desc_block++]->b_data; | ||
1490 | block_bitmap = ext4_block_bitmap(sb, gdp); | 1484 | block_bitmap = ext4_block_bitmap(sb, gdp); |
1491 | if (block_bitmap < first_block || block_bitmap > last_block) | 1485 | if (block_bitmap < first_block || block_bitmap > last_block) |
1492 | { | 1486 | { |
@@ -1524,8 +1518,6 @@ static int ext4_check_descriptors (struct super_block * sb) | |||
1524 | } | 1518 | } |
1525 | if (!flexbg_flag) | 1519 | if (!flexbg_flag) |
1526 | first_block += EXT4_BLOCKS_PER_GROUP(sb); | 1520 | first_block += EXT4_BLOCKS_PER_GROUP(sb); |
1527 | gdp = (struct ext4_group_desc *) | ||
1528 | ((__u8 *)gdp + EXT4_DESC_SIZE(sb)); | ||
1529 | } | 1521 | } |
1530 | 1522 | ||
1531 | ext4_free_blocks_count_set(sbi->s_es, ext4_count_free_blocks(sb)); | 1523 | ext4_free_blocks_count_set(sbi->s_es, ext4_count_free_blocks(sb)); |
@@ -1811,6 +1803,7 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent) | |||
1811 | unsigned long journal_devnum = 0; | 1803 | unsigned long journal_devnum = 0; |
1812 | unsigned long def_mount_opts; | 1804 | unsigned long def_mount_opts; |
1813 | struct inode *root; | 1805 | struct inode *root; |
1806 | int ret = -EINVAL; | ||
1814 | int blocksize; | 1807 | int blocksize; |
1815 | int db_count; | 1808 | int db_count; |
1816 | int i; | 1809 | int i; |
@@ -1926,6 +1919,17 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent) | |||
1926 | printk(KERN_WARNING | 1919 | printk(KERN_WARNING |
1927 | "EXT4-fs warning: feature flags set on rev 0 fs, " | 1920 | "EXT4-fs warning: feature flags set on rev 0 fs, " |
1928 | "running e2fsck is recommended\n"); | 1921 | "running e2fsck is recommended\n"); |
1922 | |||
1923 | /* | ||
1924 | * Since ext4 is still considered development code, we require | ||
1925 | * that the TEST_FILESYS flag in s->flags be set. | ||
1926 | */ | ||
1927 | if (!(le32_to_cpu(es->s_flags) & EXT2_FLAGS_TEST_FILESYS)) { | ||
1928 | printk(KERN_WARNING "EXT4-fs: %s: not marked " | ||
1929 | "OK to use with test code.\n", sb->s_id); | ||
1930 | goto failed_mount; | ||
1931 | } | ||
1932 | |||
1929 | /* | 1933 | /* |
1930 | * Check feature flags regardless of the revision level, since we | 1934 | * Check feature flags regardless of the revision level, since we |
1931 | * previously didn't change the revision level when setting the flags, | 1935 | * previously didn't change the revision level when setting the flags, |
@@ -2243,19 +2247,24 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent) | |||
2243 | * so we can safely mount the rest of the filesystem now. | 2247 | * so we can safely mount the rest of the filesystem now. |
2244 | */ | 2248 | */ |
2245 | 2249 | ||
2246 | root = iget(sb, EXT4_ROOT_INO); | 2250 | root = ext4_iget(sb, EXT4_ROOT_INO); |
2247 | sb->s_root = d_alloc_root(root); | 2251 | if (IS_ERR(root)) { |
2248 | if (!sb->s_root) { | ||
2249 | printk(KERN_ERR "EXT4-fs: get root inode failed\n"); | 2252 | printk(KERN_ERR "EXT4-fs: get root inode failed\n"); |
2250 | iput(root); | 2253 | ret = PTR_ERR(root); |
2251 | goto failed_mount4; | 2254 | goto failed_mount4; |
2252 | } | 2255 | } |
2253 | if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { | 2256 | if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { |
2254 | dput(sb->s_root); | 2257 | iput(root); |
2255 | sb->s_root = NULL; | ||
2256 | printk(KERN_ERR "EXT4-fs: corrupt root inode, run e2fsck\n"); | 2258 | printk(KERN_ERR "EXT4-fs: corrupt root inode, run e2fsck\n"); |
2257 | goto failed_mount4; | 2259 | goto failed_mount4; |
2258 | } | 2260 | } |
2261 | sb->s_root = d_alloc_root(root); | ||
2262 | if (!sb->s_root) { | ||
2263 | printk(KERN_ERR "EXT4-fs: get root dentry failed\n"); | ||
2264 | iput(root); | ||
2265 | ret = -ENOMEM; | ||
2266 | goto failed_mount4; | ||
2267 | } | ||
2259 | 2268 | ||
2260 | ext4_setup_super (sb, es, sb->s_flags & MS_RDONLY); | 2269 | ext4_setup_super (sb, es, sb->s_flags & MS_RDONLY); |
2261 | 2270 | ||
@@ -2336,7 +2345,7 @@ out_fail: | |||
2336 | sb->s_fs_info = NULL; | 2345 | sb->s_fs_info = NULL; |
2337 | kfree(sbi); | 2346 | kfree(sbi); |
2338 | lock_kernel(); | 2347 | lock_kernel(); |
2339 | return -EINVAL; | 2348 | return ret; |
2340 | } | 2349 | } |
2341 | 2350 | ||
2342 | /* | 2351 | /* |
@@ -2372,8 +2381,8 @@ static journal_t *ext4_get_journal(struct super_block *sb, | |||
2372 | * things happen if we iget() an unused inode, as the subsequent | 2381 | * things happen if we iget() an unused inode, as the subsequent |
2373 | * iput() will try to delete it. */ | 2382 | * iput() will try to delete it. */ |
2374 | 2383 | ||
2375 | journal_inode = iget(sb, journal_inum); | 2384 | journal_inode = ext4_iget(sb, journal_inum); |
2376 | if (!journal_inode) { | 2385 | if (IS_ERR(journal_inode)) { |
2377 | printk(KERN_ERR "EXT4-fs: no journal found.\n"); | 2386 | printk(KERN_ERR "EXT4-fs: no journal found.\n"); |
2378 | return NULL; | 2387 | return NULL; |
2379 | } | 2388 | } |
@@ -2386,7 +2395,7 @@ static journal_t *ext4_get_journal(struct super_block *sb, | |||
2386 | 2395 | ||
2387 | jbd_debug(2, "Journal inode found at %p: %Ld bytes\n", | 2396 | jbd_debug(2, "Journal inode found at %p: %Ld bytes\n", |
2388 | journal_inode, journal_inode->i_size); | 2397 | journal_inode, journal_inode->i_size); |
2389 | if (is_bad_inode(journal_inode) || !S_ISREG(journal_inode->i_mode)) { | 2398 | if (!S_ISREG(journal_inode->i_mode)) { |
2390 | printk(KERN_ERR "EXT4-fs: invalid journal inode.\n"); | 2399 | printk(KERN_ERR "EXT4-fs: invalid journal inode.\n"); |
2391 | iput(journal_inode); | 2400 | iput(journal_inode); |
2392 | return NULL; | 2401 | return NULL; |
@@ -3149,16 +3158,16 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id, | |||
3149 | if (err) | 3158 | if (err) |
3150 | return err; | 3159 | return err; |
3151 | /* Quotafile not on the same filesystem? */ | 3160 | /* Quotafile not on the same filesystem? */ |
3152 | if (nd.mnt->mnt_sb != sb) { | 3161 | if (nd.path.mnt->mnt_sb != sb) { |
3153 | path_release(&nd); | 3162 | path_put(&nd.path); |
3154 | return -EXDEV; | 3163 | return -EXDEV; |
3155 | } | 3164 | } |
3156 | /* Quotafile not of fs root? */ | 3165 | /* Quotafile not of fs root? */ |
3157 | if (nd.dentry->d_parent->d_inode != sb->s_root->d_inode) | 3166 | if (nd.path.dentry->d_parent->d_inode != sb->s_root->d_inode) |
3158 | printk(KERN_WARNING | 3167 | printk(KERN_WARNING |
3159 | "EXT4-fs: Quota file not on filesystem root. " | 3168 | "EXT4-fs: Quota file not on filesystem root. " |
3160 | "Journalled quota will not work.\n"); | 3169 | "Journalled quota will not work.\n"); |
3161 | path_release(&nd); | 3170 | path_put(&nd.path); |
3162 | return vfs_quota_on(sb, type, format_id, path); | 3171 | return vfs_quota_on(sb, type, format_id, path); |
3163 | } | 3172 | } |
3164 | 3173 | ||
diff --git a/fs/fat/file.c b/fs/fat/file.c index 69a83b59dce8..c614175876e0 100644 --- a/fs/fat/file.c +++ b/fs/fat/file.c | |||
@@ -155,6 +155,42 @@ out: | |||
155 | return err; | 155 | return err; |
156 | } | 156 | } |
157 | 157 | ||
158 | static int check_mode(const struct msdos_sb_info *sbi, mode_t mode) | ||
159 | { | ||
160 | mode_t req = mode & ~S_IFMT; | ||
161 | |||
162 | /* | ||
163 | * Of the r and x bits, all (subject to umask) must be present. Of the | ||
164 | * w bits, either all (subject to umask) or none must be present. | ||
165 | */ | ||
166 | |||
167 | if (S_ISREG(mode)) { | ||
168 | req &= ~sbi->options.fs_fmask; | ||
169 | |||
170 | if ((req & (S_IRUGO | S_IXUGO)) != | ||
171 | ((S_IRUGO | S_IXUGO) & ~sbi->options.fs_fmask)) | ||
172 | return -EPERM; | ||
173 | |||
174 | if ((req & S_IWUGO) != 0 && | ||
175 | (req & S_IWUGO) != (S_IWUGO & ~sbi->options.fs_fmask)) | ||
176 | return -EPERM; | ||
177 | } else if (S_ISDIR(mode)) { | ||
178 | req &= ~sbi->options.fs_dmask; | ||
179 | |||
180 | if ((req & (S_IRUGO | S_IXUGO)) != | ||
181 | ((S_IRUGO | S_IXUGO) & ~sbi->options.fs_dmask)) | ||
182 | return -EPERM; | ||
183 | |||
184 | if ((req & S_IWUGO) != 0 && | ||
185 | (req & S_IWUGO) != (S_IWUGO & ~sbi->options.fs_dmask)) | ||
186 | return -EPERM; | ||
187 | } else { | ||
188 | return -EPERM; | ||
189 | } | ||
190 | |||
191 | return 0; | ||
192 | } | ||
193 | |||
158 | int fat_notify_change(struct dentry *dentry, struct iattr *attr) | 194 | int fat_notify_change(struct dentry *dentry, struct iattr *attr) |
159 | { | 195 | { |
160 | struct msdos_sb_info *sbi = MSDOS_SB(dentry->d_sb); | 196 | struct msdos_sb_info *sbi = MSDOS_SB(dentry->d_sb); |
@@ -186,9 +222,7 @@ int fat_notify_change(struct dentry *dentry, struct iattr *attr) | |||
186 | if (((attr->ia_valid & ATTR_UID) && | 222 | if (((attr->ia_valid & ATTR_UID) && |
187 | (attr->ia_uid != sbi->options.fs_uid)) || | 223 | (attr->ia_uid != sbi->options.fs_uid)) || |
188 | ((attr->ia_valid & ATTR_GID) && | 224 | ((attr->ia_valid & ATTR_GID) && |
189 | (attr->ia_gid != sbi->options.fs_gid)) || | 225 | (attr->ia_gid != sbi->options.fs_gid))) |
190 | ((attr->ia_valid & ATTR_MODE) && | ||
191 | (attr->ia_mode & ~MSDOS_VALID_MODE))) | ||
192 | error = -EPERM; | 226 | error = -EPERM; |
193 | 227 | ||
194 | if (error) { | 228 | if (error) { |
@@ -196,6 +230,13 @@ int fat_notify_change(struct dentry *dentry, struct iattr *attr) | |||
196 | error = 0; | 230 | error = 0; |
197 | goto out; | 231 | goto out; |
198 | } | 232 | } |
233 | |||
234 | if (attr->ia_valid & ATTR_MODE) { | ||
235 | error = check_mode(sbi, attr->ia_mode); | ||
236 | if (error != 0 && !sbi->options.quiet) | ||
237 | goto out; | ||
238 | } | ||
239 | |||
199 | error = inode_setattr(inode, attr); | 240 | error = inode_setattr(inode, attr); |
200 | if (error) | 241 | if (error) |
201 | goto out; | 242 | goto out; |
diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 920a576e1c25..53f3cf62b7c1 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c | |||
@@ -634,8 +634,6 @@ static const struct super_operations fat_sops = { | |||
634 | .clear_inode = fat_clear_inode, | 634 | .clear_inode = fat_clear_inode, |
635 | .remount_fs = fat_remount, | 635 | .remount_fs = fat_remount, |
636 | 636 | ||
637 | .read_inode = make_bad_inode, | ||
638 | |||
639 | .show_options = fat_show_options, | 637 | .show_options = fat_show_options, |
640 | }; | 638 | }; |
641 | 639 | ||
@@ -663,8 +661,8 @@ static struct dentry *fat_fh_to_dentry(struct super_block *sb, | |||
663 | if (fh_len < 5 || fh_type != 3) | 661 | if (fh_len < 5 || fh_type != 3) |
664 | return NULL; | 662 | return NULL; |
665 | 663 | ||
666 | inode = iget(sb, fh[0]); | 664 | inode = ilookup(sb, fh[0]); |
667 | if (!inode || is_bad_inode(inode) || inode->i_generation != fh[1]) { | 665 | if (!inode || inode->i_generation != fh[1]) { |
668 | if (inode) | 666 | if (inode) |
669 | iput(inode); | 667 | iput(inode); |
670 | inode = NULL; | 668 | inode = NULL; |
@@ -760,7 +758,7 @@ static struct dentry *fat_get_parent(struct dentry *child) | |||
760 | inode = fat_build_inode(child->d_sb, de, i_pos); | 758 | inode = fat_build_inode(child->d_sb, de, i_pos); |
761 | brelse(bh); | 759 | brelse(bh); |
762 | if (IS_ERR(inode)) { | 760 | if (IS_ERR(inode)) { |
763 | parent = ERR_PTR(PTR_ERR(inode)); | 761 | parent = ERR_CAST(inode); |
764 | goto out; | 762 | goto out; |
765 | } | 763 | } |
766 | parent = d_alloc_anon(inode); | 764 | parent = d_alloc_anon(inode); |
@@ -839,6 +837,8 @@ static int fat_show_options(struct seq_file *m, struct vfsmount *mnt) | |||
839 | if (!opts->numtail) | 837 | if (!opts->numtail) |
840 | seq_puts(m, ",nonumtail"); | 838 | seq_puts(m, ",nonumtail"); |
841 | } | 839 | } |
840 | if (sbi->options.flush) | ||
841 | seq_puts(m, ",flush"); | ||
842 | 842 | ||
843 | return 0; | 843 | return 0; |
844 | } | 844 | } |
@@ -1295,10 +1295,8 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, | |||
1295 | 1295 | ||
1296 | fsinfo = (struct fat_boot_fsinfo *)fsinfo_bh->b_data; | 1296 | fsinfo = (struct fat_boot_fsinfo *)fsinfo_bh->b_data; |
1297 | if (!IS_FSINFO(fsinfo)) { | 1297 | if (!IS_FSINFO(fsinfo)) { |
1298 | printk(KERN_WARNING | 1298 | printk(KERN_WARNING "FAT: Invalid FSINFO signature: " |
1299 | "FAT: Did not find valid FSINFO signature.\n" | 1299 | "0x%08x, 0x%08x (sector = %lu)\n", |
1300 | " Found signature1 0x%08x signature2 0x%08x" | ||
1301 | " (sector = %lu)\n", | ||
1302 | le32_to_cpu(fsinfo->signature1), | 1300 | le32_to_cpu(fsinfo->signature1), |
1303 | le32_to_cpu(fsinfo->signature2), | 1301 | le32_to_cpu(fsinfo->signature2), |
1304 | sbi->fsinfo_sector); | 1302 | sbi->fsinfo_sector); |
diff --git a/fs/fat/misc.c b/fs/fat/misc.c index 308f2b6b5026..61f23511eacf 100644 --- a/fs/fat/misc.c +++ b/fs/fat/misc.c | |||
@@ -55,9 +55,8 @@ void fat_clusters_flush(struct super_block *sb) | |||
55 | fsinfo = (struct fat_boot_fsinfo *)bh->b_data; | 55 | fsinfo = (struct fat_boot_fsinfo *)bh->b_data; |
56 | /* Sanity check */ | 56 | /* Sanity check */ |
57 | if (!IS_FSINFO(fsinfo)) { | 57 | if (!IS_FSINFO(fsinfo)) { |
58 | printk(KERN_ERR "FAT: Did not find valid FSINFO signature.\n" | 58 | printk(KERN_ERR "FAT: Invalid FSINFO signature: " |
59 | " Found signature1 0x%08x signature2 0x%08x" | 59 | "0x%08x, 0x%08x (sector = %lu)\n", |
60 | " (sector = %lu)\n", | ||
61 | le32_to_cpu(fsinfo->signature1), | 60 | le32_to_cpu(fsinfo->signature1), |
62 | le32_to_cpu(fsinfo->signature2), | 61 | le32_to_cpu(fsinfo->signature2), |
63 | sbi->fsinfo_sector); | 62 | sbi->fsinfo_sector); |
diff --git a/fs/fcntl.c b/fs/fcntl.c index 8685263ccc4a..e632da761fc1 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c | |||
@@ -24,7 +24,7 @@ | |||
24 | #include <asm/siginfo.h> | 24 | #include <asm/siginfo.h> |
25 | #include <asm/uaccess.h> | 25 | #include <asm/uaccess.h> |
26 | 26 | ||
27 | void fastcall set_close_on_exec(unsigned int fd, int flag) | 27 | void set_close_on_exec(unsigned int fd, int flag) |
28 | { | 28 | { |
29 | struct files_struct *files = current->files; | 29 | struct files_struct *files = current->files; |
30 | struct fdtable *fdt; | 30 | struct fdtable *fdt; |
@@ -309,7 +309,7 @@ pid_t f_getown(struct file *filp) | |||
309 | { | 309 | { |
310 | pid_t pid; | 310 | pid_t pid; |
311 | read_lock(&filp->f_owner.lock); | 311 | read_lock(&filp->f_owner.lock); |
312 | pid = pid_nr_ns(filp->f_owner.pid, current->nsproxy->pid_ns); | 312 | pid = pid_vnr(filp->f_owner.pid); |
313 | if (filp->f_owner.pid_type == PIDTYPE_PGID) | 313 | if (filp->f_owner.pid_type == PIDTYPE_PGID) |
314 | pid = -pid; | 314 | pid = -pid; |
315 | read_unlock(&filp->f_owner.lock); | 315 | read_unlock(&filp->f_owner.lock); |
@@ -24,6 +24,8 @@ struct fdtable_defer { | |||
24 | struct fdtable *next; | 24 | struct fdtable *next; |
25 | }; | 25 | }; |
26 | 26 | ||
27 | int sysctl_nr_open __read_mostly = 1024*1024; | ||
28 | |||
27 | /* | 29 | /* |
28 | * We use this list to defer free fdtables that have vmalloced | 30 | * We use this list to defer free fdtables that have vmalloced |
29 | * sets/arrays. By keeping a per-cpu list, we avoid having to embed | 31 | * sets/arrays. By keeping a per-cpu list, we avoid having to embed |
@@ -147,8 +149,8 @@ static struct fdtable * alloc_fdtable(unsigned int nr) | |||
147 | nr /= (1024 / sizeof(struct file *)); | 149 | nr /= (1024 / sizeof(struct file *)); |
148 | nr = roundup_pow_of_two(nr + 1); | 150 | nr = roundup_pow_of_two(nr + 1); |
149 | nr *= (1024 / sizeof(struct file *)); | 151 | nr *= (1024 / sizeof(struct file *)); |
150 | if (nr > NR_OPEN) | 152 | if (nr > sysctl_nr_open) |
151 | nr = NR_OPEN; | 153 | nr = sysctl_nr_open; |
152 | 154 | ||
153 | fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL); | 155 | fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL); |
154 | if (!fdt) | 156 | if (!fdt) |
@@ -233,7 +235,7 @@ int expand_files(struct files_struct *files, int nr) | |||
233 | if (nr < fdt->max_fds) | 235 | if (nr < fdt->max_fds) |
234 | return 0; | 236 | return 0; |
235 | /* Can we expand? */ | 237 | /* Can we expand? */ |
236 | if (nr >= NR_OPEN) | 238 | if (nr >= sysctl_nr_open) |
237 | return -EMFILE; | 239 | return -EMFILE; |
238 | 240 | ||
239 | /* All good, so we try */ | 241 | /* All good, so we try */ |
diff --git a/fs/file_table.c b/fs/file_table.c index 664e3f2309b8..6d27befe2d48 100644 --- a/fs/file_table.c +++ b/fs/file_table.c | |||
@@ -197,7 +197,7 @@ int init_file(struct file *file, struct vfsmount *mnt, struct dentry *dentry, | |||
197 | } | 197 | } |
198 | EXPORT_SYMBOL(init_file); | 198 | EXPORT_SYMBOL(init_file); |
199 | 199 | ||
200 | void fastcall fput(struct file *file) | 200 | void fput(struct file *file) |
201 | { | 201 | { |
202 | if (atomic_dec_and_test(&file->f_count)) | 202 | if (atomic_dec_and_test(&file->f_count)) |
203 | __fput(file); | 203 | __fput(file); |
@@ -208,7 +208,7 @@ EXPORT_SYMBOL(fput); | |||
208 | /* __fput is called from task context when aio completion releases the last | 208 | /* __fput is called from task context when aio completion releases the last |
209 | * last use of a struct file *. Do not use otherwise. | 209 | * last use of a struct file *. Do not use otherwise. |
210 | */ | 210 | */ |
211 | void fastcall __fput(struct file *file) | 211 | void __fput(struct file *file) |
212 | { | 212 | { |
213 | struct dentry *dentry = file->f_path.dentry; | 213 | struct dentry *dentry = file->f_path.dentry; |
214 | struct vfsmount *mnt = file->f_path.mnt; | 214 | struct vfsmount *mnt = file->f_path.mnt; |
@@ -241,7 +241,7 @@ void fastcall __fput(struct file *file) | |||
241 | mntput(mnt); | 241 | mntput(mnt); |
242 | } | 242 | } |
243 | 243 | ||
244 | struct file fastcall *fget(unsigned int fd) | 244 | struct file *fget(unsigned int fd) |
245 | { | 245 | { |
246 | struct file *file; | 246 | struct file *file; |
247 | struct files_struct *files = current->files; | 247 | struct files_struct *files = current->files; |
@@ -269,7 +269,7 @@ EXPORT_SYMBOL(fget); | |||
269 | * and a flag is returned to be passed to the corresponding fput_light(). | 269 | * and a flag is returned to be passed to the corresponding fput_light(). |
270 | * There must not be a cloning between an fget_light/fput_light pair. | 270 | * There must not be a cloning between an fget_light/fput_light pair. |
271 | */ | 271 | */ |
272 | struct file fastcall *fget_light(unsigned int fd, int *fput_needed) | 272 | struct file *fget_light(unsigned int fd, int *fput_needed) |
273 | { | 273 | { |
274 | struct file *file; | 274 | struct file *file; |
275 | struct files_struct *files = current->files; | 275 | struct files_struct *files = current->files; |
diff --git a/fs/freevxfs/vxfs_extern.h b/fs/freevxfs/vxfs_extern.h index 91ccee8723f7..2b46064f66b2 100644 --- a/fs/freevxfs/vxfs_extern.h +++ b/fs/freevxfs/vxfs_extern.h | |||
@@ -58,7 +58,7 @@ extern struct inode * vxfs_get_fake_inode(struct super_block *, | |||
58 | extern void vxfs_put_fake_inode(struct inode *); | 58 | extern void vxfs_put_fake_inode(struct inode *); |
59 | extern struct vxfs_inode_info * vxfs_blkiget(struct super_block *, u_long, ino_t); | 59 | extern struct vxfs_inode_info * vxfs_blkiget(struct super_block *, u_long, ino_t); |
60 | extern struct vxfs_inode_info * vxfs_stiget(struct super_block *, ino_t); | 60 | extern struct vxfs_inode_info * vxfs_stiget(struct super_block *, ino_t); |
61 | extern void vxfs_read_inode(struct inode *); | 61 | extern struct inode * vxfs_iget(struct super_block *, ino_t); |
62 | extern void vxfs_clear_inode(struct inode *); | 62 | extern void vxfs_clear_inode(struct inode *); |
63 | 63 | ||
64 | /* vxfs_lookup.c */ | 64 | /* vxfs_lookup.c */ |
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c index d1f7c5b5b3c3..ad88d2364bc2 100644 --- a/fs/freevxfs/vxfs_inode.c +++ b/fs/freevxfs/vxfs_inode.c | |||
@@ -129,7 +129,7 @@ fail: | |||
129 | * Description: | 129 | * Description: |
130 | * Search the for inode number @ino in the filesystem | 130 | * Search the for inode number @ino in the filesystem |
131 | * described by @sbp. Use the specified inode table (@ilistp). | 131 | * described by @sbp. Use the specified inode table (@ilistp). |
132 | * Returns the matching VxFS inode on success, else a NULL pointer. | 132 | * Returns the matching VxFS inode on success, else an error code. |
133 | */ | 133 | */ |
134 | static struct vxfs_inode_info * | 134 | static struct vxfs_inode_info * |
135 | __vxfs_iget(ino_t ino, struct inode *ilistp) | 135 | __vxfs_iget(ino_t ino, struct inode *ilistp) |
@@ -157,12 +157,12 @@ __vxfs_iget(ino_t ino, struct inode *ilistp) | |||
157 | } | 157 | } |
158 | 158 | ||
159 | printk(KERN_WARNING "vxfs: error on page %p\n", pp); | 159 | printk(KERN_WARNING "vxfs: error on page %p\n", pp); |
160 | return NULL; | 160 | return ERR_CAST(pp); |
161 | 161 | ||
162 | fail: | 162 | fail: |
163 | printk(KERN_WARNING "vxfs: unable to read inode %ld\n", (unsigned long)ino); | 163 | printk(KERN_WARNING "vxfs: unable to read inode %ld\n", (unsigned long)ino); |
164 | vxfs_put_page(pp); | 164 | vxfs_put_page(pp); |
165 | return NULL; | 165 | return ERR_PTR(-ENOMEM); |
166 | } | 166 | } |
167 | 167 | ||
168 | /** | 168 | /** |
@@ -178,7 +178,10 @@ fail: | |||
178 | struct vxfs_inode_info * | 178 | struct vxfs_inode_info * |
179 | vxfs_stiget(struct super_block *sbp, ino_t ino) | 179 | vxfs_stiget(struct super_block *sbp, ino_t ino) |
180 | { | 180 | { |
181 | return __vxfs_iget(ino, VXFS_SBI(sbp)->vsi_stilist); | 181 | struct vxfs_inode_info *vip; |
182 | |||
183 | vip = __vxfs_iget(ino, VXFS_SBI(sbp)->vsi_stilist); | ||
184 | return IS_ERR(vip) ? NULL : vip; | ||
182 | } | 185 | } |
183 | 186 | ||
184 | /** | 187 | /** |
@@ -282,23 +285,32 @@ vxfs_put_fake_inode(struct inode *ip) | |||
282 | } | 285 | } |
283 | 286 | ||
284 | /** | 287 | /** |
285 | * vxfs_read_inode - fill in inode information | 288 | * vxfs_iget - get an inode |
286 | * @ip: inode pointer to fill | 289 | * @sbp: the superblock to get the inode for |
290 | * @ino: the number of the inode to get | ||
287 | * | 291 | * |
288 | * Description: | 292 | * Description: |
289 | * vxfs_read_inode reads the disk inode for @ip and fills | 293 | * vxfs_read_inode creates an inode, reads the disk inode for @ino and fills |
290 | * in all relevant fields in @ip. | 294 | * in all relevant fields in the new inode. |
291 | */ | 295 | */ |
292 | void | 296 | struct inode * |
293 | vxfs_read_inode(struct inode *ip) | 297 | vxfs_iget(struct super_block *sbp, ino_t ino) |
294 | { | 298 | { |
295 | struct super_block *sbp = ip->i_sb; | ||
296 | struct vxfs_inode_info *vip; | 299 | struct vxfs_inode_info *vip; |
297 | const struct address_space_operations *aops; | 300 | const struct address_space_operations *aops; |
298 | ino_t ino = ip->i_ino; | 301 | struct inode *ip; |
299 | 302 | ||
300 | if (!(vip = __vxfs_iget(ino, VXFS_SBI(sbp)->vsi_ilist))) | 303 | ip = iget_locked(sbp, ino); |
301 | return; | 304 | if (!ip) |
305 | return ERR_PTR(-ENOMEM); | ||
306 | if (!(ip->i_state & I_NEW)) | ||
307 | return ip; | ||
308 | |||
309 | vip = __vxfs_iget(ino, VXFS_SBI(sbp)->vsi_ilist); | ||
310 | if (IS_ERR(vip)) { | ||
311 | iget_failed(ip); | ||
312 | return ERR_CAST(vip); | ||
313 | } | ||
302 | 314 | ||
303 | vxfs_iinit(ip, vip); | 315 | vxfs_iinit(ip, vip); |
304 | 316 | ||
@@ -323,7 +335,8 @@ vxfs_read_inode(struct inode *ip) | |||
323 | } else | 335 | } else |
324 | init_special_inode(ip, ip->i_mode, old_decode_dev(vip->vii_rdev)); | 336 | init_special_inode(ip, ip->i_mode, old_decode_dev(vip->vii_rdev)); |
325 | 337 | ||
326 | return; | 338 | unlock_new_inode(ip); |
339 | return ip; | ||
327 | } | 340 | } |
328 | 341 | ||
329 | /** | 342 | /** |
diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c index bf86e5444ea6..aee049cb9f84 100644 --- a/fs/freevxfs/vxfs_lookup.c +++ b/fs/freevxfs/vxfs_lookup.c | |||
@@ -213,10 +213,10 @@ vxfs_lookup(struct inode *dip, struct dentry *dp, struct nameidata *nd) | |||
213 | lock_kernel(); | 213 | lock_kernel(); |
214 | ino = vxfs_inode_by_name(dip, dp); | 214 | ino = vxfs_inode_by_name(dip, dp); |
215 | if (ino) { | 215 | if (ino) { |
216 | ip = iget(dip->i_sb, ino); | 216 | ip = vxfs_iget(dip->i_sb, ino); |
217 | if (!ip) { | 217 | if (IS_ERR(ip)) { |
218 | unlock_kernel(); | 218 | unlock_kernel(); |
219 | return ERR_PTR(-EACCES); | 219 | return ERR_CAST(ip); |
220 | } | 220 | } |
221 | } | 221 | } |
222 | unlock_kernel(); | 222 | unlock_kernel(); |
diff --git a/fs/freevxfs/vxfs_super.c b/fs/freevxfs/vxfs_super.c index 4f95572d2722..1dacda831577 100644 --- a/fs/freevxfs/vxfs_super.c +++ b/fs/freevxfs/vxfs_super.c | |||
@@ -60,7 +60,6 @@ static int vxfs_statfs(struct dentry *, struct kstatfs *); | |||
60 | static int vxfs_remount(struct super_block *, int *, char *); | 60 | static int vxfs_remount(struct super_block *, int *, char *); |
61 | 61 | ||
62 | static const struct super_operations vxfs_super_ops = { | 62 | static const struct super_operations vxfs_super_ops = { |
63 | .read_inode = vxfs_read_inode, | ||
64 | .clear_inode = vxfs_clear_inode, | 63 | .clear_inode = vxfs_clear_inode, |
65 | .put_super = vxfs_put_super, | 64 | .put_super = vxfs_put_super, |
66 | .statfs = vxfs_statfs, | 65 | .statfs = vxfs_statfs, |
@@ -153,6 +152,7 @@ static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent) | |||
153 | struct buffer_head *bp = NULL; | 152 | struct buffer_head *bp = NULL; |
154 | u_long bsize; | 153 | u_long bsize; |
155 | struct inode *root; | 154 | struct inode *root; |
155 | int ret = -EINVAL; | ||
156 | 156 | ||
157 | sbp->s_flags |= MS_RDONLY; | 157 | sbp->s_flags |= MS_RDONLY; |
158 | 158 | ||
@@ -219,7 +219,11 @@ static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent) | |||
219 | } | 219 | } |
220 | 220 | ||
221 | sbp->s_op = &vxfs_super_ops; | 221 | sbp->s_op = &vxfs_super_ops; |
222 | root = iget(sbp, VXFS_ROOT_INO); | 222 | root = vxfs_iget(sbp, VXFS_ROOT_INO); |
223 | if (IS_ERR(root)) { | ||
224 | ret = PTR_ERR(root); | ||
225 | goto out; | ||
226 | } | ||
223 | sbp->s_root = d_alloc_root(root); | 227 | sbp->s_root = d_alloc_root(root); |
224 | if (!sbp->s_root) { | 228 | if (!sbp->s_root) { |
225 | iput(root); | 229 | iput(root); |
@@ -236,7 +240,7 @@ out_free_ilist: | |||
236 | out: | 240 | out: |
237 | brelse(bp); | 241 | brelse(bp); |
238 | kfree(infp); | 242 | kfree(infp); |
239 | return -EINVAL; | 243 | return ret; |
240 | } | 244 | } |
241 | 245 | ||
242 | /* | 246 | /* |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 0b3064079fa5..c0076077d338 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -515,8 +515,7 @@ writeback_inodes(struct writeback_control *wbc) | |||
515 | might_sleep(); | 515 | might_sleep(); |
516 | spin_lock(&sb_lock); | 516 | spin_lock(&sb_lock); |
517 | restart: | 517 | restart: |
518 | sb = sb_entry(super_blocks.prev); | 518 | list_for_each_entry_reverse(sb, &super_blocks, s_list) { |
519 | for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.prev)) { | ||
520 | if (sb_has_dirty_inodes(sb)) { | 519 | if (sb_has_dirty_inodes(sb)) { |
521 | /* we're making our own get_super here */ | 520 | /* we're making our own get_super here */ |
522 | sb->s_count++; | 521 | sb->s_count++; |
@@ -581,10 +580,8 @@ static void set_sb_syncing(int val) | |||
581 | { | 580 | { |
582 | struct super_block *sb; | 581 | struct super_block *sb; |
583 | spin_lock(&sb_lock); | 582 | spin_lock(&sb_lock); |
584 | sb = sb_entry(super_blocks.prev); | 583 | list_for_each_entry_reverse(sb, &super_blocks, s_list) |
585 | for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.prev)) { | ||
586 | sb->s_syncing = val; | 584 | sb->s_syncing = val; |
587 | } | ||
588 | spin_unlock(&sb_lock); | 585 | spin_unlock(&sb_lock); |
589 | } | 586 | } |
590 | 587 | ||
@@ -658,7 +655,7 @@ int write_inode_now(struct inode *inode, int sync) | |||
658 | int ret; | 655 | int ret; |
659 | struct writeback_control wbc = { | 656 | struct writeback_control wbc = { |
660 | .nr_to_write = LONG_MAX, | 657 | .nr_to_write = LONG_MAX, |
661 | .sync_mode = WB_SYNC_ALL, | 658 | .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE, |
662 | .range_start = 0, | 659 | .range_start = 0, |
663 | .range_end = LLONG_MAX, | 660 | .range_end = LLONG_MAX, |
664 | }; | 661 | }; |
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index db534bcde45f..af639807524e 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
@@ -201,6 +201,55 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) | |||
201 | } | 201 | } |
202 | } | 202 | } |
203 | 203 | ||
204 | static unsigned len_args(unsigned numargs, struct fuse_arg *args) | ||
205 | { | ||
206 | unsigned nbytes = 0; | ||
207 | unsigned i; | ||
208 | |||
209 | for (i = 0; i < numargs; i++) | ||
210 | nbytes += args[i].size; | ||
211 | |||
212 | return nbytes; | ||
213 | } | ||
214 | |||
215 | static u64 fuse_get_unique(struct fuse_conn *fc) | ||
216 | { | ||
217 | fc->reqctr++; | ||
218 | /* zero is special */ | ||
219 | if (fc->reqctr == 0) | ||
220 | fc->reqctr = 1; | ||
221 | |||
222 | return fc->reqctr; | ||
223 | } | ||
224 | |||
225 | static void queue_request(struct fuse_conn *fc, struct fuse_req *req) | ||
226 | { | ||
227 | req->in.h.unique = fuse_get_unique(fc); | ||
228 | req->in.h.len = sizeof(struct fuse_in_header) + | ||
229 | len_args(req->in.numargs, (struct fuse_arg *) req->in.args); | ||
230 | list_add_tail(&req->list, &fc->pending); | ||
231 | req->state = FUSE_REQ_PENDING; | ||
232 | if (!req->waiting) { | ||
233 | req->waiting = 1; | ||
234 | atomic_inc(&fc->num_waiting); | ||
235 | } | ||
236 | wake_up(&fc->waitq); | ||
237 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); | ||
238 | } | ||
239 | |||
240 | static void flush_bg_queue(struct fuse_conn *fc) | ||
241 | { | ||
242 | while (fc->active_background < FUSE_MAX_BACKGROUND && | ||
243 | !list_empty(&fc->bg_queue)) { | ||
244 | struct fuse_req *req; | ||
245 | |||
246 | req = list_entry(fc->bg_queue.next, struct fuse_req, list); | ||
247 | list_del(&req->list); | ||
248 | fc->active_background++; | ||
249 | queue_request(fc, req); | ||
250 | } | ||
251 | } | ||
252 | |||
204 | /* | 253 | /* |
205 | * This function is called when a request is finished. Either a reply | 254 | * This function is called when a request is finished. Either a reply |
206 | * has arrived or it was aborted (and not yet sent) or some error | 255 | * has arrived or it was aborted (and not yet sent) or some error |
@@ -229,6 +278,8 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req) | |||
229 | clear_bdi_congested(&fc->bdi, WRITE); | 278 | clear_bdi_congested(&fc->bdi, WRITE); |
230 | } | 279 | } |
231 | fc->num_background--; | 280 | fc->num_background--; |
281 | fc->active_background--; | ||
282 | flush_bg_queue(fc); | ||
232 | } | 283 | } |
233 | spin_unlock(&fc->lock); | 284 | spin_unlock(&fc->lock); |
234 | wake_up(&req->waitq); | 285 | wake_up(&req->waitq); |
@@ -320,42 +371,6 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) | |||
320 | } | 371 | } |
321 | } | 372 | } |
322 | 373 | ||
323 | static unsigned len_args(unsigned numargs, struct fuse_arg *args) | ||
324 | { | ||
325 | unsigned nbytes = 0; | ||
326 | unsigned i; | ||
327 | |||
328 | for (i = 0; i < numargs; i++) | ||
329 | nbytes += args[i].size; | ||
330 | |||
331 | return nbytes; | ||
332 | } | ||
333 | |||
334 | static u64 fuse_get_unique(struct fuse_conn *fc) | ||
335 | { | ||
336 | fc->reqctr++; | ||
337 | /* zero is special */ | ||
338 | if (fc->reqctr == 0) | ||
339 | fc->reqctr = 1; | ||
340 | |||
341 | return fc->reqctr; | ||
342 | } | ||
343 | |||
344 | static void queue_request(struct fuse_conn *fc, struct fuse_req *req) | ||
345 | { | ||
346 | req->in.h.unique = fuse_get_unique(fc); | ||
347 | req->in.h.len = sizeof(struct fuse_in_header) + | ||
348 | len_args(req->in.numargs, (struct fuse_arg *) req->in.args); | ||
349 | list_add_tail(&req->list, &fc->pending); | ||
350 | req->state = FUSE_REQ_PENDING; | ||
351 | if (!req->waiting) { | ||
352 | req->waiting = 1; | ||
353 | atomic_inc(&fc->num_waiting); | ||
354 | } | ||
355 | wake_up(&fc->waitq); | ||
356 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); | ||
357 | } | ||
358 | |||
359 | void request_send(struct fuse_conn *fc, struct fuse_req *req) | 374 | void request_send(struct fuse_conn *fc, struct fuse_req *req) |
360 | { | 375 | { |
361 | req->isreply = 1; | 376 | req->isreply = 1; |
@@ -375,20 +390,26 @@ void request_send(struct fuse_conn *fc, struct fuse_req *req) | |||
375 | spin_unlock(&fc->lock); | 390 | spin_unlock(&fc->lock); |
376 | } | 391 | } |
377 | 392 | ||
393 | static void request_send_nowait_locked(struct fuse_conn *fc, | ||
394 | struct fuse_req *req) | ||
395 | { | ||
396 | req->background = 1; | ||
397 | fc->num_background++; | ||
398 | if (fc->num_background == FUSE_MAX_BACKGROUND) | ||
399 | fc->blocked = 1; | ||
400 | if (fc->num_background == FUSE_CONGESTION_THRESHOLD) { | ||
401 | set_bdi_congested(&fc->bdi, READ); | ||
402 | set_bdi_congested(&fc->bdi, WRITE); | ||
403 | } | ||
404 | list_add_tail(&req->list, &fc->bg_queue); | ||
405 | flush_bg_queue(fc); | ||
406 | } | ||
407 | |||
378 | static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req) | 408 | static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req) |
379 | { | 409 | { |
380 | spin_lock(&fc->lock); | 410 | spin_lock(&fc->lock); |
381 | if (fc->connected) { | 411 | if (fc->connected) { |
382 | req->background = 1; | 412 | request_send_nowait_locked(fc, req); |
383 | fc->num_background++; | ||
384 | if (fc->num_background == FUSE_MAX_BACKGROUND) | ||
385 | fc->blocked = 1; | ||
386 | if (fc->num_background == FUSE_CONGESTION_THRESHOLD) { | ||
387 | set_bdi_congested(&fc->bdi, READ); | ||
388 | set_bdi_congested(&fc->bdi, WRITE); | ||
389 | } | ||
390 | |||
391 | queue_request(fc, req); | ||
392 | spin_unlock(&fc->lock); | 413 | spin_unlock(&fc->lock); |
393 | } else { | 414 | } else { |
394 | req->out.h.error = -ENOTCONN; | 415 | req->out.h.error = -ENOTCONN; |
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 80d2f5292cf9..7fb514b6d852 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c | |||
@@ -269,12 +269,12 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry, | |||
269 | 269 | ||
270 | req = fuse_get_req(fc); | 270 | req = fuse_get_req(fc); |
271 | if (IS_ERR(req)) | 271 | if (IS_ERR(req)) |
272 | return ERR_PTR(PTR_ERR(req)); | 272 | return ERR_CAST(req); |
273 | 273 | ||
274 | forget_req = fuse_get_req(fc); | 274 | forget_req = fuse_get_req(fc); |
275 | if (IS_ERR(forget_req)) { | 275 | if (IS_ERR(forget_req)) { |
276 | fuse_put_request(fc, req); | 276 | fuse_put_request(fc, req); |
277 | return ERR_PTR(PTR_ERR(forget_req)); | 277 | return ERR_CAST(forget_req); |
278 | } | 278 | } |
279 | 279 | ||
280 | attr_version = fuse_get_attr_version(fc); | 280 | attr_version = fuse_get_attr_version(fc); |
@@ -416,6 +416,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode, | |||
416 | fuse_put_request(fc, forget_req); | 416 | fuse_put_request(fc, forget_req); |
417 | d_instantiate(entry, inode); | 417 | d_instantiate(entry, inode); |
418 | fuse_change_entry_timeout(entry, &outentry); | 418 | fuse_change_entry_timeout(entry, &outentry); |
419 | fuse_invalidate_attr(dir); | ||
419 | file = lookup_instantiate_filp(nd, entry, generic_file_open); | 420 | file = lookup_instantiate_filp(nd, entry, generic_file_open); |
420 | if (IS_ERR(file)) { | 421 | if (IS_ERR(file)) { |
421 | ff->fh = outopen.fh; | 422 | ff->fh = outopen.fh; |
@@ -1005,7 +1006,7 @@ static char *read_link(struct dentry *dentry) | |||
1005 | char *link; | 1006 | char *link; |
1006 | 1007 | ||
1007 | if (IS_ERR(req)) | 1008 | if (IS_ERR(req)) |
1008 | return ERR_PTR(PTR_ERR(req)); | 1009 | return ERR_CAST(req); |
1009 | 1010 | ||
1010 | link = (char *) __get_free_page(GFP_KERNEL); | 1011 | link = (char *) __get_free_page(GFP_KERNEL); |
1011 | if (!link) { | 1012 | if (!link) { |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index bb05d227cf30..676b0bc8a86d 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -77,8 +77,8 @@ static struct fuse_file *fuse_file_get(struct fuse_file *ff) | |||
77 | 77 | ||
78 | static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) | 78 | static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) |
79 | { | 79 | { |
80 | dput(req->dentry); | 80 | dput(req->misc.release.dentry); |
81 | mntput(req->vfsmount); | 81 | mntput(req->misc.release.vfsmount); |
82 | fuse_put_request(fc, req); | 82 | fuse_put_request(fc, req); |
83 | } | 83 | } |
84 | 84 | ||
@@ -86,7 +86,8 @@ static void fuse_file_put(struct fuse_file *ff) | |||
86 | { | 86 | { |
87 | if (atomic_dec_and_test(&ff->count)) { | 87 | if (atomic_dec_and_test(&ff->count)) { |
88 | struct fuse_req *req = ff->reserved_req; | 88 | struct fuse_req *req = ff->reserved_req; |
89 | struct fuse_conn *fc = get_fuse_conn(req->dentry->d_inode); | 89 | struct inode *inode = req->misc.release.dentry->d_inode; |
90 | struct fuse_conn *fc = get_fuse_conn(inode); | ||
90 | req->end = fuse_release_end; | 91 | req->end = fuse_release_end; |
91 | request_send_background(fc, req); | 92 | request_send_background(fc, req); |
92 | kfree(ff); | 93 | kfree(ff); |
@@ -137,7 +138,7 @@ int fuse_open_common(struct inode *inode, struct file *file, int isdir) | |||
137 | void fuse_release_fill(struct fuse_file *ff, u64 nodeid, int flags, int opcode) | 138 | void fuse_release_fill(struct fuse_file *ff, u64 nodeid, int flags, int opcode) |
138 | { | 139 | { |
139 | struct fuse_req *req = ff->reserved_req; | 140 | struct fuse_req *req = ff->reserved_req; |
140 | struct fuse_release_in *inarg = &req->misc.release_in; | 141 | struct fuse_release_in *inarg = &req->misc.release.in; |
141 | 142 | ||
142 | inarg->fh = ff->fh; | 143 | inarg->fh = ff->fh; |
143 | inarg->flags = flags; | 144 | inarg->flags = flags; |
@@ -153,13 +154,14 @@ int fuse_release_common(struct inode *inode, struct file *file, int isdir) | |||
153 | struct fuse_file *ff = file->private_data; | 154 | struct fuse_file *ff = file->private_data; |
154 | if (ff) { | 155 | if (ff) { |
155 | struct fuse_conn *fc = get_fuse_conn(inode); | 156 | struct fuse_conn *fc = get_fuse_conn(inode); |
157 | struct fuse_req *req = ff->reserved_req; | ||
156 | 158 | ||
157 | fuse_release_fill(ff, get_node_id(inode), file->f_flags, | 159 | fuse_release_fill(ff, get_node_id(inode), file->f_flags, |
158 | isdir ? FUSE_RELEASEDIR : FUSE_RELEASE); | 160 | isdir ? FUSE_RELEASEDIR : FUSE_RELEASE); |
159 | 161 | ||
160 | /* Hold vfsmount and dentry until release is finished */ | 162 | /* Hold vfsmount and dentry until release is finished */ |
161 | ff->reserved_req->vfsmount = mntget(file->f_path.mnt); | 163 | req->misc.release.vfsmount = mntget(file->f_path.mnt); |
162 | ff->reserved_req->dentry = dget(file->f_path.dentry); | 164 | req->misc.release.dentry = dget(file->f_path.dentry); |
163 | 165 | ||
164 | spin_lock(&fc->lock); | 166 | spin_lock(&fc->lock); |
165 | list_del(&ff->write_entry); | 167 | list_del(&ff->write_entry); |
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 3ab8a3048e8b..67aaf6ee38ea 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h | |||
@@ -215,7 +215,11 @@ struct fuse_req { | |||
215 | /** Data for asynchronous requests */ | 215 | /** Data for asynchronous requests */ |
216 | union { | 216 | union { |
217 | struct fuse_forget_in forget_in; | 217 | struct fuse_forget_in forget_in; |
218 | struct fuse_release_in release_in; | 218 | struct { |
219 | struct fuse_release_in in; | ||
220 | struct vfsmount *vfsmount; | ||
221 | struct dentry *dentry; | ||
222 | } release; | ||
219 | struct fuse_init_in init_in; | 223 | struct fuse_init_in init_in; |
220 | struct fuse_init_out init_out; | 224 | struct fuse_init_out init_out; |
221 | struct fuse_read_in read_in; | 225 | struct fuse_read_in read_in; |
@@ -238,12 +242,6 @@ struct fuse_req { | |||
238 | /** File used in the request (or NULL) */ | 242 | /** File used in the request (or NULL) */ |
239 | struct fuse_file *ff; | 243 | struct fuse_file *ff; |
240 | 244 | ||
241 | /** vfsmount used in release */ | ||
242 | struct vfsmount *vfsmount; | ||
243 | |||
244 | /** dentry used in release */ | ||
245 | struct dentry *dentry; | ||
246 | |||
247 | /** Request completion callback */ | 245 | /** Request completion callback */ |
248 | void (*end)(struct fuse_conn *, struct fuse_req *); | 246 | void (*end)(struct fuse_conn *, struct fuse_req *); |
249 | 247 | ||
@@ -298,6 +296,12 @@ struct fuse_conn { | |||
298 | /** Number of requests currently in the background */ | 296 | /** Number of requests currently in the background */ |
299 | unsigned num_background; | 297 | unsigned num_background; |
300 | 298 | ||
299 | /** Number of background requests currently queued for userspace */ | ||
300 | unsigned active_background; | ||
301 | |||
302 | /** The list of background requests set aside for later queuing */ | ||
303 | struct list_head bg_queue; | ||
304 | |||
301 | /** Pending interrupts */ | 305 | /** Pending interrupts */ |
302 | struct list_head interrupts; | 306 | struct list_head interrupts; |
303 | 307 | ||
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index e5e80d1a4687..033f7bdd47e8 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c | |||
@@ -29,6 +29,8 @@ DEFINE_MUTEX(fuse_mutex); | |||
29 | 29 | ||
30 | #define FUSE_SUPER_MAGIC 0x65735546 | 30 | #define FUSE_SUPER_MAGIC 0x65735546 |
31 | 31 | ||
32 | #define FUSE_DEFAULT_BLKSIZE 512 | ||
33 | |||
32 | struct fuse_mount_data { | 34 | struct fuse_mount_data { |
33 | int fd; | 35 | int fd; |
34 | unsigned rootmode; | 36 | unsigned rootmode; |
@@ -76,11 +78,6 @@ static void fuse_destroy_inode(struct inode *inode) | |||
76 | kmem_cache_free(fuse_inode_cachep, inode); | 78 | kmem_cache_free(fuse_inode_cachep, inode); |
77 | } | 79 | } |
78 | 80 | ||
79 | static void fuse_read_inode(struct inode *inode) | ||
80 | { | ||
81 | /* No op */ | ||
82 | } | ||
83 | |||
84 | void fuse_send_forget(struct fuse_conn *fc, struct fuse_req *req, | 81 | void fuse_send_forget(struct fuse_conn *fc, struct fuse_req *req, |
85 | unsigned long nodeid, u64 nlookup) | 82 | unsigned long nodeid, u64 nlookup) |
86 | { | 83 | { |
@@ -360,7 +357,7 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev) | |||
360 | char *p; | 357 | char *p; |
361 | memset(d, 0, sizeof(struct fuse_mount_data)); | 358 | memset(d, 0, sizeof(struct fuse_mount_data)); |
362 | d->max_read = ~0; | 359 | d->max_read = ~0; |
363 | d->blksize = 512; | 360 | d->blksize = FUSE_DEFAULT_BLKSIZE; |
364 | 361 | ||
365 | while ((p = strsep(&opt, ",")) != NULL) { | 362 | while ((p = strsep(&opt, ",")) != NULL) { |
366 | int token; | 363 | int token; |
@@ -445,6 +442,9 @@ static int fuse_show_options(struct seq_file *m, struct vfsmount *mnt) | |||
445 | seq_puts(m, ",allow_other"); | 442 | seq_puts(m, ",allow_other"); |
446 | if (fc->max_read != ~0) | 443 | if (fc->max_read != ~0) |
447 | seq_printf(m, ",max_read=%u", fc->max_read); | 444 | seq_printf(m, ",max_read=%u", fc->max_read); |
445 | if (mnt->mnt_sb->s_bdev && | ||
446 | mnt->mnt_sb->s_blocksize != FUSE_DEFAULT_BLKSIZE) | ||
447 | seq_printf(m, ",blksize=%lu", mnt->mnt_sb->s_blocksize); | ||
448 | return 0; | 448 | return 0; |
449 | } | 449 | } |
450 | 450 | ||
@@ -465,6 +465,7 @@ static struct fuse_conn *new_conn(void) | |||
465 | INIT_LIST_HEAD(&fc->processing); | 465 | INIT_LIST_HEAD(&fc->processing); |
466 | INIT_LIST_HEAD(&fc->io); | 466 | INIT_LIST_HEAD(&fc->io); |
467 | INIT_LIST_HEAD(&fc->interrupts); | 467 | INIT_LIST_HEAD(&fc->interrupts); |
468 | INIT_LIST_HEAD(&fc->bg_queue); | ||
468 | atomic_set(&fc->num_waiting, 0); | 469 | atomic_set(&fc->num_waiting, 0); |
469 | fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; | 470 | fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; |
470 | fc->bdi.unplug_io_fn = default_unplug_io_fn; | 471 | fc->bdi.unplug_io_fn = default_unplug_io_fn; |
@@ -514,7 +515,6 @@ static struct inode *get_root_inode(struct super_block *sb, unsigned mode) | |||
514 | static const struct super_operations fuse_super_operations = { | 515 | static const struct super_operations fuse_super_operations = { |
515 | .alloc_inode = fuse_alloc_inode, | 516 | .alloc_inode = fuse_alloc_inode, |
516 | .destroy_inode = fuse_destroy_inode, | 517 | .destroy_inode = fuse_destroy_inode, |
517 | .read_inode = fuse_read_inode, | ||
518 | .clear_inode = fuse_clear_inode, | 518 | .clear_inode = fuse_clear_inode, |
519 | .drop_inode = generic_delete_inode, | 519 | .drop_inode = generic_delete_inode, |
520 | .remount_fs = fuse_remount_fs, | 520 | .remount_fs = fuse_remount_fs, |
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c index 57e2ed932adc..c34709512b19 100644 --- a/fs/gfs2/dir.c +++ b/fs/gfs2/dir.c | |||
@@ -1498,7 +1498,7 @@ struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *name) | |||
1498 | dent = gfs2_dirent_search(dir, name, gfs2_dirent_find, &bh); | 1498 | dent = gfs2_dirent_search(dir, name, gfs2_dirent_find, &bh); |
1499 | if (dent) { | 1499 | if (dent) { |
1500 | if (IS_ERR(dent)) | 1500 | if (IS_ERR(dent)) |
1501 | return ERR_PTR(PTR_ERR(dent)); | 1501 | return ERR_CAST(dent); |
1502 | inode = gfs2_inode_lookup(dir->i_sb, | 1502 | inode = gfs2_inode_lookup(dir->i_sb, |
1503 | be16_to_cpu(dent->de_type), | 1503 | be16_to_cpu(dent->de_type), |
1504 | be64_to_cpu(dent->de_inum.no_addr), | 1504 | be64_to_cpu(dent->de_inum.no_addr), |
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 80e09c50590a..7175a4d06435 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -334,7 +334,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, | |||
334 | gl->gl_state = LM_ST_UNLOCKED; | 334 | gl->gl_state = LM_ST_UNLOCKED; |
335 | gl->gl_demote_state = LM_ST_EXCLUSIVE; | 335 | gl->gl_demote_state = LM_ST_EXCLUSIVE; |
336 | gl->gl_hash = hash; | 336 | gl->gl_hash = hash; |
337 | gl->gl_owner_pid = 0; | 337 | gl->gl_owner_pid = NULL; |
338 | gl->gl_ip = 0; | 338 | gl->gl_ip = 0; |
339 | gl->gl_ops = glops; | 339 | gl->gl_ops = glops; |
340 | gl->gl_req_gh = NULL; | 340 | gl->gl_req_gh = NULL; |
@@ -399,7 +399,7 @@ void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags, | |||
399 | INIT_LIST_HEAD(&gh->gh_list); | 399 | INIT_LIST_HEAD(&gh->gh_list); |
400 | gh->gh_gl = gl; | 400 | gh->gh_gl = gl; |
401 | gh->gh_ip = (unsigned long)__builtin_return_address(0); | 401 | gh->gh_ip = (unsigned long)__builtin_return_address(0); |
402 | gh->gh_owner_pid = current->pid; | 402 | gh->gh_owner_pid = get_pid(task_pid(current)); |
403 | gh->gh_state = state; | 403 | gh->gh_state = state; |
404 | gh->gh_flags = flags; | 404 | gh->gh_flags = flags; |
405 | gh->gh_error = 0; | 405 | gh->gh_error = 0; |
@@ -433,6 +433,7 @@ void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder * | |||
433 | 433 | ||
434 | void gfs2_holder_uninit(struct gfs2_holder *gh) | 434 | void gfs2_holder_uninit(struct gfs2_holder *gh) |
435 | { | 435 | { |
436 | put_pid(gh->gh_owner_pid); | ||
436 | gfs2_glock_put(gh->gh_gl); | 437 | gfs2_glock_put(gh->gh_gl); |
437 | gh->gh_gl = NULL; | 438 | gh->gh_gl = NULL; |
438 | gh->gh_ip = 0; | 439 | gh->gh_ip = 0; |
@@ -631,7 +632,7 @@ static void gfs2_glmutex_lock(struct gfs2_glock *gl) | |||
631 | wait_on_holder(&gh); | 632 | wait_on_holder(&gh); |
632 | gfs2_holder_uninit(&gh); | 633 | gfs2_holder_uninit(&gh); |
633 | } else { | 634 | } else { |
634 | gl->gl_owner_pid = current->pid; | 635 | gl->gl_owner_pid = get_pid(task_pid(current)); |
635 | gl->gl_ip = (unsigned long)__builtin_return_address(0); | 636 | gl->gl_ip = (unsigned long)__builtin_return_address(0); |
636 | spin_unlock(&gl->gl_spin); | 637 | spin_unlock(&gl->gl_spin); |
637 | } | 638 | } |
@@ -652,7 +653,7 @@ static int gfs2_glmutex_trylock(struct gfs2_glock *gl) | |||
652 | if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { | 653 | if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { |
653 | acquired = 0; | 654 | acquired = 0; |
654 | } else { | 655 | } else { |
655 | gl->gl_owner_pid = current->pid; | 656 | gl->gl_owner_pid = get_pid(task_pid(current)); |
656 | gl->gl_ip = (unsigned long)__builtin_return_address(0); | 657 | gl->gl_ip = (unsigned long)__builtin_return_address(0); |
657 | } | 658 | } |
658 | spin_unlock(&gl->gl_spin); | 659 | spin_unlock(&gl->gl_spin); |
@@ -668,12 +669,17 @@ static int gfs2_glmutex_trylock(struct gfs2_glock *gl) | |||
668 | 669 | ||
669 | static void gfs2_glmutex_unlock(struct gfs2_glock *gl) | 670 | static void gfs2_glmutex_unlock(struct gfs2_glock *gl) |
670 | { | 671 | { |
672 | struct pid *pid; | ||
673 | |||
671 | spin_lock(&gl->gl_spin); | 674 | spin_lock(&gl->gl_spin); |
672 | clear_bit(GLF_LOCK, &gl->gl_flags); | 675 | clear_bit(GLF_LOCK, &gl->gl_flags); |
673 | gl->gl_owner_pid = 0; | 676 | pid = gl->gl_owner_pid; |
677 | gl->gl_owner_pid = NULL; | ||
674 | gl->gl_ip = 0; | 678 | gl->gl_ip = 0; |
675 | run_queue(gl); | 679 | run_queue(gl); |
676 | spin_unlock(&gl->gl_spin); | 680 | spin_unlock(&gl->gl_spin); |
681 | |||
682 | put_pid(pid); | ||
677 | } | 683 | } |
678 | 684 | ||
679 | /** | 685 | /** |
@@ -1045,7 +1051,7 @@ static int glock_wait_internal(struct gfs2_holder *gh) | |||
1045 | } | 1051 | } |
1046 | 1052 | ||
1047 | static inline struct gfs2_holder * | 1053 | static inline struct gfs2_holder * |
1048 | find_holder_by_owner(struct list_head *head, pid_t pid) | 1054 | find_holder_by_owner(struct list_head *head, struct pid *pid) |
1049 | { | 1055 | { |
1050 | struct gfs2_holder *gh; | 1056 | struct gfs2_holder *gh; |
1051 | 1057 | ||
@@ -1082,7 +1088,7 @@ static void add_to_queue(struct gfs2_holder *gh) | |||
1082 | struct gfs2_glock *gl = gh->gh_gl; | 1088 | struct gfs2_glock *gl = gh->gh_gl; |
1083 | struct gfs2_holder *existing; | 1089 | struct gfs2_holder *existing; |
1084 | 1090 | ||
1085 | BUG_ON(!gh->gh_owner_pid); | 1091 | BUG_ON(gh->gh_owner_pid == NULL); |
1086 | if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) | 1092 | if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) |
1087 | BUG(); | 1093 | BUG(); |
1088 | 1094 | ||
@@ -1092,12 +1098,14 @@ static void add_to_queue(struct gfs2_holder *gh) | |||
1092 | if (existing) { | 1098 | if (existing) { |
1093 | print_symbol(KERN_WARNING "original: %s\n", | 1099 | print_symbol(KERN_WARNING "original: %s\n", |
1094 | existing->gh_ip); | 1100 | existing->gh_ip); |
1095 | printk(KERN_INFO "pid : %d\n", existing->gh_owner_pid); | 1101 | printk(KERN_INFO "pid : %d\n", |
1102 | pid_nr(existing->gh_owner_pid)); | ||
1096 | printk(KERN_INFO "lock type : %d lock state : %d\n", | 1103 | printk(KERN_INFO "lock type : %d lock state : %d\n", |
1097 | existing->gh_gl->gl_name.ln_type, | 1104 | existing->gh_gl->gl_name.ln_type, |
1098 | existing->gh_gl->gl_state); | 1105 | existing->gh_gl->gl_state); |
1099 | print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); | 1106 | print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); |
1100 | printk(KERN_INFO "pid : %d\n", gh->gh_owner_pid); | 1107 | printk(KERN_INFO "pid : %d\n", |
1108 | pid_nr(gh->gh_owner_pid)); | ||
1101 | printk(KERN_INFO "lock type : %d lock state : %d\n", | 1109 | printk(KERN_INFO "lock type : %d lock state : %d\n", |
1102 | gl->gl_name.ln_type, gl->gl_state); | 1110 | gl->gl_name.ln_type, gl->gl_state); |
1103 | BUG(); | 1111 | BUG(); |
@@ -1798,8 +1806,9 @@ static int dump_holder(struct glock_iter *gi, char *str, | |||
1798 | 1806 | ||
1799 | print_dbg(gi, " %s\n", str); | 1807 | print_dbg(gi, " %s\n", str); |
1800 | if (gh->gh_owner_pid) { | 1808 | if (gh->gh_owner_pid) { |
1801 | print_dbg(gi, " owner = %ld ", (long)gh->gh_owner_pid); | 1809 | print_dbg(gi, " owner = %ld ", |
1802 | gh_owner = find_task_by_pid(gh->gh_owner_pid); | 1810 | (long)pid_nr(gh->gh_owner_pid)); |
1811 | gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID); | ||
1803 | if (gh_owner) | 1812 | if (gh_owner) |
1804 | print_dbg(gi, "(%s)\n", gh_owner->comm); | 1813 | print_dbg(gi, "(%s)\n", gh_owner->comm); |
1805 | else | 1814 | else |
@@ -1877,13 +1886,13 @@ static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl) | |||
1877 | print_dbg(gi, " gl_ref = %d\n", atomic_read(&gl->gl_ref)); | 1886 | print_dbg(gi, " gl_ref = %d\n", atomic_read(&gl->gl_ref)); |
1878 | print_dbg(gi, " gl_state = %u\n", gl->gl_state); | 1887 | print_dbg(gi, " gl_state = %u\n", gl->gl_state); |
1879 | if (gl->gl_owner_pid) { | 1888 | if (gl->gl_owner_pid) { |
1880 | gl_owner = find_task_by_pid(gl->gl_owner_pid); | 1889 | gl_owner = pid_task(gl->gl_owner_pid, PIDTYPE_PID); |
1881 | if (gl_owner) | 1890 | if (gl_owner) |
1882 | print_dbg(gi, " gl_owner = pid %d (%s)\n", | 1891 | print_dbg(gi, " gl_owner = pid %d (%s)\n", |
1883 | gl->gl_owner_pid, gl_owner->comm); | 1892 | pid_nr(gl->gl_owner_pid), gl_owner->comm); |
1884 | else | 1893 | else |
1885 | print_dbg(gi, " gl_owner = %d (ended)\n", | 1894 | print_dbg(gi, " gl_owner = %d (ended)\n", |
1886 | gl->gl_owner_pid); | 1895 | pid_nr(gl->gl_owner_pid)); |
1887 | } else | 1896 | } else |
1888 | print_dbg(gi, " gl_owner = -1\n"); | 1897 | print_dbg(gi, " gl_owner = -1\n"); |
1889 | print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip); | 1898 | print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip); |
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h index b16f604eea9f..2f9c6d136b37 100644 --- a/fs/gfs2/glock.h +++ b/fs/gfs2/glock.h | |||
@@ -36,11 +36,13 @@ static inline int gfs2_glock_is_locked_by_me(struct gfs2_glock *gl) | |||
36 | { | 36 | { |
37 | struct gfs2_holder *gh; | 37 | struct gfs2_holder *gh; |
38 | int locked = 0; | 38 | int locked = 0; |
39 | struct pid *pid; | ||
39 | 40 | ||
40 | /* Look in glock's list of holders for one with current task as owner */ | 41 | /* Look in glock's list of holders for one with current task as owner */ |
41 | spin_lock(&gl->gl_spin); | 42 | spin_lock(&gl->gl_spin); |
43 | pid = task_pid(current); | ||
42 | list_for_each_entry(gh, &gl->gl_holders, gh_list) { | 44 | list_for_each_entry(gh, &gl->gl_holders, gh_list) { |
43 | if (gh->gh_owner_pid == current->pid) { | 45 | if (gh->gh_owner_pid == pid) { |
44 | locked = 1; | 46 | locked = 1; |
45 | break; | 47 | break; |
46 | } | 48 | } |
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 513aaf0dc0ab..525dcae352d6 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h | |||
@@ -151,7 +151,7 @@ struct gfs2_holder { | |||
151 | struct list_head gh_list; | 151 | struct list_head gh_list; |
152 | 152 | ||
153 | struct gfs2_glock *gh_gl; | 153 | struct gfs2_glock *gh_gl; |
154 | pid_t gh_owner_pid; | 154 | struct pid *gh_owner_pid; |
155 | unsigned int gh_state; | 155 | unsigned int gh_state; |
156 | unsigned gh_flags; | 156 | unsigned gh_flags; |
157 | 157 | ||
@@ -182,7 +182,7 @@ struct gfs2_glock { | |||
182 | unsigned int gl_hash; | 182 | unsigned int gl_hash; |
183 | unsigned int gl_demote_state; /* state requested by remote node */ | 183 | unsigned int gl_demote_state; /* state requested by remote node */ |
184 | unsigned long gl_demote_time; /* time of first demote request */ | 184 | unsigned long gl_demote_time; /* time of first demote request */ |
185 | pid_t gl_owner_pid; | 185 | struct pid *gl_owner_pid; |
186 | unsigned long gl_ip; | 186 | unsigned long gl_ip; |
187 | struct list_head gl_holders; | 187 | struct list_head gl_holders; |
188 | struct list_head gl_waiters1; /* HIF_MUTEX */ | 188 | struct list_head gl_waiters1; /* HIF_MUTEX */ |
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 728d3169e7bd..37725ade3c51 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c | |||
@@ -240,7 +240,7 @@ fail_put: | |||
240 | ip->i_gl->gl_object = NULL; | 240 | ip->i_gl->gl_object = NULL; |
241 | gfs2_glock_put(ip->i_gl); | 241 | gfs2_glock_put(ip->i_gl); |
242 | fail: | 242 | fail: |
243 | iput(inode); | 243 | iget_failed(inode); |
244 | return ERR_PTR(error); | 244 | return ERR_PTR(error); |
245 | } | 245 | } |
246 | 246 | ||
diff --git a/fs/gfs2/ops_export.c b/fs/gfs2/ops_export.c index b9da62348a87..334c7f85351b 100644 --- a/fs/gfs2/ops_export.c +++ b/fs/gfs2/ops_export.c | |||
@@ -143,7 +143,7 @@ static struct dentry *gfs2_get_parent(struct dentry *child) | |||
143 | * have to return that as a(n invalid) pointer to dentry. | 143 | * have to return that as a(n invalid) pointer to dentry. |
144 | */ | 144 | */ |
145 | if (IS_ERR(inode)) | 145 | if (IS_ERR(inode)) |
146 | return ERR_PTR(PTR_ERR(inode)); | 146 | return ERR_CAST(inode); |
147 | 147 | ||
148 | dentry = d_alloc_anon(inode); | 148 | dentry = d_alloc_anon(inode); |
149 | if (!dentry) { | 149 | if (!dentry) { |
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 43d511bba52d..4bee6aa845e4 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c | |||
@@ -884,12 +884,13 @@ static struct super_block* get_gfs2_sb(const char *dev_name) | |||
884 | dev_name); | 884 | dev_name); |
885 | goto out; | 885 | goto out; |
886 | } | 886 | } |
887 | error = vfs_getattr(nd.mnt, nd.dentry, &stat); | 887 | error = vfs_getattr(nd.path.mnt, nd.path.dentry, &stat); |
888 | 888 | ||
889 | fstype = get_fs_type("gfs2"); | 889 | fstype = get_fs_type("gfs2"); |
890 | list_for_each_entry(s, &fstype->fs_supers, s_instances) { | 890 | list_for_each_entry(s, &fstype->fs_supers, s_instances) { |
891 | if ((S_ISBLK(stat.mode) && s->s_dev == stat.rdev) || | 891 | if ((S_ISBLK(stat.mode) && s->s_dev == stat.rdev) || |
892 | (S_ISDIR(stat.mode) && s == nd.dentry->d_inode->i_sb)) { | 892 | (S_ISDIR(stat.mode) && |
893 | s == nd.path.dentry->d_inode->i_sb)) { | ||
893 | sb = s; | 894 | sb = s; |
894 | goto free_nd; | 895 | goto free_nd; |
895 | } | 896 | } |
@@ -899,7 +900,7 @@ static struct super_block* get_gfs2_sb(const char *dev_name) | |||
899 | "mount point %s\n", dev_name); | 900 | "mount point %s\n", dev_name); |
900 | 901 | ||
901 | free_nd: | 902 | free_nd: |
902 | path_release(&nd); | 903 | path_put(&nd.path); |
903 | out: | 904 | out: |
904 | return sb; | 905 | return sb; |
905 | } | 906 | } |
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c index 9f71372c1757..e87412902bed 100644 --- a/fs/gfs2/ops_inode.c +++ b/fs/gfs2/ops_inode.c | |||
@@ -111,7 +111,7 @@ static struct dentry *gfs2_lookup(struct inode *dir, struct dentry *dentry, | |||
111 | 111 | ||
112 | inode = gfs2_lookupi(dir, &dentry->d_name, 0, nd); | 112 | inode = gfs2_lookupi(dir, &dentry->d_name, 0, nd); |
113 | if (inode && IS_ERR(inode)) | 113 | if (inode && IS_ERR(inode)) |
114 | return ERR_PTR(PTR_ERR(inode)); | 114 | return ERR_CAST(inode); |
115 | 115 | ||
116 | if (inode) { | 116 | if (inode) { |
117 | struct gfs2_glock *gl = GFS2_I(inode)->i_gl; | 117 | struct gfs2_glock *gl = GFS2_I(inode)->i_gl; |
diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c index f8452a0eab56..4129cdb3f0d8 100644 --- a/fs/hfs/bfind.c +++ b/fs/hfs/bfind.c | |||
@@ -52,9 +52,9 @@ int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd) | |||
52 | rec = (e + b) / 2; | 52 | rec = (e + b) / 2; |
53 | len = hfs_brec_lenoff(bnode, rec, &off); | 53 | len = hfs_brec_lenoff(bnode, rec, &off); |
54 | keylen = hfs_brec_keylen(bnode, rec); | 54 | keylen = hfs_brec_keylen(bnode, rec); |
55 | if (keylen == HFS_BAD_KEYLEN) { | 55 | if (keylen == 0) { |
56 | res = -EINVAL; | 56 | res = -EINVAL; |
57 | goto done; | 57 | goto fail; |
58 | } | 58 | } |
59 | hfs_bnode_read(bnode, fd->key, off, keylen); | 59 | hfs_bnode_read(bnode, fd->key, off, keylen); |
60 | cmpval = bnode->tree->keycmp(fd->key, fd->search_key); | 60 | cmpval = bnode->tree->keycmp(fd->key, fd->search_key); |
@@ -71,9 +71,9 @@ int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd) | |||
71 | if (rec != e && e >= 0) { | 71 | if (rec != e && e >= 0) { |
72 | len = hfs_brec_lenoff(bnode, e, &off); | 72 | len = hfs_brec_lenoff(bnode, e, &off); |
73 | keylen = hfs_brec_keylen(bnode, e); | 73 | keylen = hfs_brec_keylen(bnode, e); |
74 | if (keylen == HFS_BAD_KEYLEN) { | 74 | if (keylen == 0) { |
75 | res = -EINVAL; | 75 | res = -EINVAL; |
76 | goto done; | 76 | goto fail; |
77 | } | 77 | } |
78 | hfs_bnode_read(bnode, fd->key, off, keylen); | 78 | hfs_bnode_read(bnode, fd->key, off, keylen); |
79 | } | 79 | } |
@@ -83,6 +83,7 @@ done: | |||
83 | fd->keylength = keylen; | 83 | fd->keylength = keylen; |
84 | fd->entryoffset = off + keylen; | 84 | fd->entryoffset = off + keylen; |
85 | fd->entrylength = len - keylen; | 85 | fd->entrylength = len - keylen; |
86 | fail: | ||
86 | return res; | 87 | return res; |
87 | } | 88 | } |
88 | 89 | ||
@@ -206,7 +207,7 @@ int hfs_brec_goto(struct hfs_find_data *fd, int cnt) | |||
206 | 207 | ||
207 | len = hfs_brec_lenoff(bnode, fd->record, &off); | 208 | len = hfs_brec_lenoff(bnode, fd->record, &off); |
208 | keylen = hfs_brec_keylen(bnode, fd->record); | 209 | keylen = hfs_brec_keylen(bnode, fd->record); |
209 | if (keylen == HFS_BAD_KEYLEN) { | 210 | if (keylen == 0) { |
210 | res = -EINVAL; | 211 | res = -EINVAL; |
211 | goto out; | 212 | goto out; |
212 | } | 213 | } |
diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c index 8626ee375ea8..878bf25dbc6a 100644 --- a/fs/hfs/brec.c +++ b/fs/hfs/brec.c | |||
@@ -49,14 +49,14 @@ u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec) | |||
49 | if (retval > node->tree->max_key_len + 2) { | 49 | if (retval > node->tree->max_key_len + 2) { |
50 | printk(KERN_ERR "hfs: keylen %d too large\n", | 50 | printk(KERN_ERR "hfs: keylen %d too large\n", |
51 | retval); | 51 | retval); |
52 | retval = HFS_BAD_KEYLEN; | 52 | retval = 0; |
53 | } | 53 | } |
54 | } else { | 54 | } else { |
55 | retval = (hfs_bnode_read_u8(node, recoff) | 1) + 1; | 55 | retval = (hfs_bnode_read_u8(node, recoff) | 1) + 1; |
56 | if (retval > node->tree->max_key_len + 1) { | 56 | if (retval > node->tree->max_key_len + 1) { |
57 | printk(KERN_ERR "hfs: keylen %d too large\n", | 57 | printk(KERN_ERR "hfs: keylen %d too large\n", |
58 | retval); | 58 | retval); |
59 | retval = HFS_BAD_KEYLEN; | 59 | retval = 0; |
60 | } | 60 | } |
61 | } | 61 | } |
62 | } | 62 | } |
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c index 110dd3515dc8..24cf6fc43021 100644 --- a/fs/hfs/btree.c +++ b/fs/hfs/btree.c | |||
@@ -81,15 +81,23 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke | |||
81 | goto fail_page; | 81 | goto fail_page; |
82 | if (!tree->node_count) | 82 | if (!tree->node_count) |
83 | goto fail_page; | 83 | goto fail_page; |
84 | if ((id == HFS_EXT_CNID) && (tree->max_key_len != HFS_MAX_EXT_KEYLEN)) { | 84 | switch (id) { |
85 | printk(KERN_ERR "hfs: invalid extent max_key_len %d\n", | 85 | case HFS_EXT_CNID: |
86 | tree->max_key_len); | 86 | if (tree->max_key_len != HFS_MAX_EXT_KEYLEN) { |
87 | goto fail_page; | 87 | printk(KERN_ERR "hfs: invalid extent max_key_len %d\n", |
88 | } | 88 | tree->max_key_len); |
89 | if ((id == HFS_CAT_CNID) && (tree->max_key_len != HFS_MAX_CAT_KEYLEN)) { | 89 | goto fail_page; |
90 | printk(KERN_ERR "hfs: invalid catalog max_key_len %d\n", | 90 | } |
91 | tree->max_key_len); | 91 | break; |
92 | goto fail_page; | 92 | case HFS_CAT_CNID: |
93 | if (tree->max_key_len != HFS_MAX_CAT_KEYLEN) { | ||
94 | printk(KERN_ERR "hfs: invalid catalog max_key_len %d\n", | ||
95 | tree->max_key_len); | ||
96 | goto fail_page; | ||
97 | } | ||
98 | break; | ||
99 | default: | ||
100 | BUG(); | ||
93 | } | 101 | } |
94 | 102 | ||
95 | tree->node_size_shift = ffs(size) - 1; | 103 | tree->node_size_shift = ffs(size) - 1; |
diff --git a/fs/hfs/hfs.h b/fs/hfs/hfs.h index c6aae61adfe6..6f194d0768b6 100644 --- a/fs/hfs/hfs.h +++ b/fs/hfs/hfs.h | |||
@@ -28,8 +28,6 @@ | |||
28 | #define HFS_MAX_NAMELEN 128 | 28 | #define HFS_MAX_NAMELEN 128 |
29 | #define HFS_MAX_VALENCE 32767U | 29 | #define HFS_MAX_VALENCE 32767U |
30 | 30 | ||
31 | #define HFS_BAD_KEYLEN 0xFF | ||
32 | |||
33 | /* Meanings of the drAtrb field of the MDB, | 31 | /* Meanings of the drAtrb field of the MDB, |
34 | * Reference: _Inside Macintosh: Files_ p. 2-61 | 32 | * Reference: _Inside Macintosh: Files_ p. 2-61 |
35 | */ | 33 | */ |
diff --git a/fs/hfs/super.c b/fs/hfs/super.c index 16cbd902f8b9..32de44ed0021 100644 --- a/fs/hfs/super.c +++ b/fs/hfs/super.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * This file may be distributed under the terms of the GNU General Public License. | 6 | * This file may be distributed under the terms of the GNU General Public License. |
7 | * | 7 | * |
8 | * This file contains hfs_read_super(), some of the super_ops and | 8 | * This file contains hfs_read_super(), some of the super_ops and |
9 | * init_module() and cleanup_module(). The remaining super_ops are in | 9 | * init_hfs_fs() and exit_hfs_fs(). The remaining super_ops are in |
10 | * inode.c since they deal with inodes. | 10 | * inode.c since they deal with inodes. |
11 | * | 11 | * |
12 | * Based on the minix file system code, (C) 1991, 1992 by Linus Torvalds | 12 | * Based on the minix file system code, (C) 1991, 1992 by Linus Torvalds |
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c index 050d29c0a5b5..bb5433608a42 100644 --- a/fs/hfsplus/btree.c +++ b/fs/hfsplus/btree.c | |||
@@ -22,6 +22,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id) | |||
22 | struct hfs_btree *tree; | 22 | struct hfs_btree *tree; |
23 | struct hfs_btree_header_rec *head; | 23 | struct hfs_btree_header_rec *head; |
24 | struct address_space *mapping; | 24 | struct address_space *mapping; |
25 | struct inode *inode; | ||
25 | struct page *page; | 26 | struct page *page; |
26 | unsigned int size; | 27 | unsigned int size; |
27 | 28 | ||
@@ -33,9 +34,10 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id) | |||
33 | spin_lock_init(&tree->hash_lock); | 34 | spin_lock_init(&tree->hash_lock); |
34 | tree->sb = sb; | 35 | tree->sb = sb; |
35 | tree->cnid = id; | 36 | tree->cnid = id; |
36 | tree->inode = iget(sb, id); | 37 | inode = hfsplus_iget(sb, id); |
37 | if (!tree->inode) | 38 | if (IS_ERR(inode)) |
38 | goto free_tree; | 39 | goto free_tree; |
40 | tree->inode = inode; | ||
39 | 41 | ||
40 | mapping = tree->inode->i_mapping; | 42 | mapping = tree->inode->i_mapping; |
41 | page = read_mapping_page(mapping, 0, NULL); | 43 | page = read_mapping_page(mapping, 0, NULL); |
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c index 1955ee61251c..29683645fa0a 100644 --- a/fs/hfsplus/dir.c +++ b/fs/hfsplus/dir.c | |||
@@ -97,9 +97,9 @@ again: | |||
97 | goto fail; | 97 | goto fail; |
98 | } | 98 | } |
99 | hfs_find_exit(&fd); | 99 | hfs_find_exit(&fd); |
100 | inode = iget(dir->i_sb, cnid); | 100 | inode = hfsplus_iget(dir->i_sb, cnid); |
101 | if (!inode) | 101 | if (IS_ERR(inode)) |
102 | return ERR_PTR(-EACCES); | 102 | return ERR_CAST(inode); |
103 | if (S_ISREG(inode->i_mode)) | 103 | if (S_ISREG(inode->i_mode)) |
104 | HFSPLUS_I(inode).dev = linkid; | 104 | HFSPLUS_I(inode).dev = linkid; |
105 | out: | 105 | out: |
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h index d9f5eda6d039..d72d0a8b25aa 100644 --- a/fs/hfsplus/hfsplus_fs.h +++ b/fs/hfsplus/hfsplus_fs.h | |||
@@ -345,6 +345,9 @@ int hfsplus_parse_options(char *, struct hfsplus_sb_info *); | |||
345 | void hfsplus_fill_defaults(struct hfsplus_sb_info *); | 345 | void hfsplus_fill_defaults(struct hfsplus_sb_info *); |
346 | int hfsplus_show_options(struct seq_file *, struct vfsmount *); | 346 | int hfsplus_show_options(struct seq_file *, struct vfsmount *); |
347 | 347 | ||
348 | /* super.c */ | ||
349 | struct inode *hfsplus_iget(struct super_block *, unsigned long); | ||
350 | |||
348 | /* tables.c */ | 351 | /* tables.c */ |
349 | extern u16 hfsplus_case_fold_table[]; | 352 | extern u16 hfsplus_case_fold_table[]; |
350 | extern u16 hfsplus_decompose_table[]; | 353 | extern u16 hfsplus_decompose_table[]; |
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index ecf70dafb643..b0f9ad362d1d 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c | |||
@@ -20,11 +20,18 @@ static void hfsplus_destroy_inode(struct inode *inode); | |||
20 | 20 | ||
21 | #include "hfsplus_fs.h" | 21 | #include "hfsplus_fs.h" |
22 | 22 | ||
23 | static void hfsplus_read_inode(struct inode *inode) | 23 | struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino) |
24 | { | 24 | { |
25 | struct hfs_find_data fd; | 25 | struct hfs_find_data fd; |
26 | struct hfsplus_vh *vhdr; | 26 | struct hfsplus_vh *vhdr; |
27 | int err; | 27 | struct inode *inode; |
28 | long err = -EIO; | ||
29 | |||
30 | inode = iget_locked(sb, ino); | ||
31 | if (!inode) | ||
32 | return ERR_PTR(-ENOMEM); | ||
33 | if (!(inode->i_state & I_NEW)) | ||
34 | return inode; | ||
28 | 35 | ||
29 | INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list); | 36 | INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list); |
30 | init_MUTEX(&HFSPLUS_I(inode).extents_lock); | 37 | init_MUTEX(&HFSPLUS_I(inode).extents_lock); |
@@ -41,7 +48,7 @@ static void hfsplus_read_inode(struct inode *inode) | |||
41 | hfs_find_exit(&fd); | 48 | hfs_find_exit(&fd); |
42 | if (err) | 49 | if (err) |
43 | goto bad_inode; | 50 | goto bad_inode; |
44 | return; | 51 | goto done; |
45 | } | 52 | } |
46 | vhdr = HFSPLUS_SB(inode->i_sb).s_vhdr; | 53 | vhdr = HFSPLUS_SB(inode->i_sb).s_vhdr; |
47 | switch(inode->i_ino) { | 54 | switch(inode->i_ino) { |
@@ -70,10 +77,13 @@ static void hfsplus_read_inode(struct inode *inode) | |||
70 | goto bad_inode; | 77 | goto bad_inode; |
71 | } | 78 | } |
72 | 79 | ||
73 | return; | 80 | done: |
81 | unlock_new_inode(inode); | ||
82 | return inode; | ||
74 | 83 | ||
75 | bad_inode: | 84 | bad_inode: |
76 | make_bad_inode(inode); | 85 | iget_failed(inode); |
86 | return ERR_PTR(err); | ||
77 | } | 87 | } |
78 | 88 | ||
79 | static int hfsplus_write_inode(struct inode *inode, int unused) | 89 | static int hfsplus_write_inode(struct inode *inode, int unused) |
@@ -262,7 +272,6 @@ static int hfsplus_remount(struct super_block *sb, int *flags, char *data) | |||
262 | static const struct super_operations hfsplus_sops = { | 272 | static const struct super_operations hfsplus_sops = { |
263 | .alloc_inode = hfsplus_alloc_inode, | 273 | .alloc_inode = hfsplus_alloc_inode, |
264 | .destroy_inode = hfsplus_destroy_inode, | 274 | .destroy_inode = hfsplus_destroy_inode, |
265 | .read_inode = hfsplus_read_inode, | ||
266 | .write_inode = hfsplus_write_inode, | 275 | .write_inode = hfsplus_write_inode, |
267 | .clear_inode = hfsplus_clear_inode, | 276 | .clear_inode = hfsplus_clear_inode, |
268 | .put_super = hfsplus_put_super, | 277 | .put_super = hfsplus_put_super, |
@@ -278,7 +287,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) | |||
278 | struct hfsplus_sb_info *sbi; | 287 | struct hfsplus_sb_info *sbi; |
279 | hfsplus_cat_entry entry; | 288 | hfsplus_cat_entry entry; |
280 | struct hfs_find_data fd; | 289 | struct hfs_find_data fd; |
281 | struct inode *root; | 290 | struct inode *root, *inode; |
282 | struct qstr str; | 291 | struct qstr str; |
283 | struct nls_table *nls = NULL; | 292 | struct nls_table *nls = NULL; |
284 | int err = -EINVAL; | 293 | int err = -EINVAL; |
@@ -366,18 +375,25 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) | |||
366 | goto cleanup; | 375 | goto cleanup; |
367 | } | 376 | } |
368 | 377 | ||
369 | HFSPLUS_SB(sb).alloc_file = iget(sb, HFSPLUS_ALLOC_CNID); | 378 | inode = hfsplus_iget(sb, HFSPLUS_ALLOC_CNID); |
370 | if (!HFSPLUS_SB(sb).alloc_file) { | 379 | if (IS_ERR(inode)) { |
371 | printk(KERN_ERR "hfs: failed to load allocation file\n"); | 380 | printk(KERN_ERR "hfs: failed to load allocation file\n"); |
381 | err = PTR_ERR(inode); | ||
372 | goto cleanup; | 382 | goto cleanup; |
373 | } | 383 | } |
384 | HFSPLUS_SB(sb).alloc_file = inode; | ||
374 | 385 | ||
375 | /* Load the root directory */ | 386 | /* Load the root directory */ |
376 | root = iget(sb, HFSPLUS_ROOT_CNID); | 387 | root = hfsplus_iget(sb, HFSPLUS_ROOT_CNID); |
388 | if (IS_ERR(root)) { | ||
389 | printk(KERN_ERR "hfs: failed to load root directory\n"); | ||
390 | err = PTR_ERR(root); | ||
391 | goto cleanup; | ||
392 | } | ||
377 | sb->s_root = d_alloc_root(root); | 393 | sb->s_root = d_alloc_root(root); |
378 | if (!sb->s_root) { | 394 | if (!sb->s_root) { |
379 | printk(KERN_ERR "hfs: failed to load root directory\n"); | ||
380 | iput(root); | 395 | iput(root); |
396 | err = -ENOMEM; | ||
381 | goto cleanup; | 397 | goto cleanup; |
382 | } | 398 | } |
383 | sb->s_root->d_op = &hfsplus_dentry_operations; | 399 | sb->s_root->d_op = &hfsplus_dentry_operations; |
@@ -390,9 +406,12 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) | |||
390 | hfs_find_exit(&fd); | 406 | hfs_find_exit(&fd); |
391 | if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) | 407 | if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) |
392 | goto cleanup; | 408 | goto cleanup; |
393 | HFSPLUS_SB(sb).hidden_dir = iget(sb, be32_to_cpu(entry.folder.id)); | 409 | inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id)); |
394 | if (!HFSPLUS_SB(sb).hidden_dir) | 410 | if (IS_ERR(inode)) { |
411 | err = PTR_ERR(inode); | ||
395 | goto cleanup; | 412 | goto cleanup; |
413 | } | ||
414 | HFSPLUS_SB(sb).hidden_dir = inode; | ||
396 | } else | 415 | } else |
397 | hfs_find_exit(&fd); | 416 | hfs_find_exit(&fd); |
398 | 417 | ||
diff --git a/fs/hfsplus/unicode.c b/fs/hfsplus/unicode.c index 9e10f9444b64..628ccf6fa402 100644 --- a/fs/hfsplus/unicode.c +++ b/fs/hfsplus/unicode.c | |||
@@ -325,7 +325,7 @@ int hfsplus_hash_dentry(struct dentry *dentry, struct qstr *str) | |||
325 | struct super_block *sb = dentry->d_sb; | 325 | struct super_block *sb = dentry->d_sb; |
326 | const char *astr; | 326 | const char *astr; |
327 | const u16 *dstr; | 327 | const u16 *dstr; |
328 | int casefold, decompose, size, dsize, len; | 328 | int casefold, decompose, size, len; |
329 | unsigned long hash; | 329 | unsigned long hash; |
330 | wchar_t c; | 330 | wchar_t c; |
331 | u16 c2; | 331 | u16 c2; |
@@ -336,6 +336,7 @@ int hfsplus_hash_dentry(struct dentry *dentry, struct qstr *str) | |||
336 | astr = str->name; | 336 | astr = str->name; |
337 | len = str->len; | 337 | len = str->len; |
338 | while (len > 0) { | 338 | while (len > 0) { |
339 | int uninitialized_var(dsize); | ||
339 | size = asc2unichar(sb, astr, len, &c); | 340 | size = asc2unichar(sb, astr, len, &c); |
340 | astr += size; | 341 | astr += size; |
341 | len -= size; | 342 | len -= size; |
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index 8966b050196e..5222345ddccf 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c | |||
@@ -11,6 +11,8 @@ | |||
11 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
12 | #include <linux/pagemap.h> | 12 | #include <linux/pagemap.h> |
13 | #include <linux/statfs.h> | 13 | #include <linux/statfs.h> |
14 | #include <linux/seq_file.h> | ||
15 | #include <linux/mount.h> | ||
14 | #include "hostfs.h" | 16 | #include "hostfs.h" |
15 | #include "init.h" | 17 | #include "init.h" |
16 | #include "kern.h" | 18 | #include "kern.h" |
@@ -202,7 +204,7 @@ static char *follow_link(char *link) | |||
202 | return ERR_PTR(n); | 204 | return ERR_PTR(n); |
203 | } | 205 | } |
204 | 206 | ||
205 | static int read_inode(struct inode *ino) | 207 | static int hostfs_read_inode(struct inode *ino) |
206 | { | 208 | { |
207 | char *name; | 209 | char *name; |
208 | int err = 0; | 210 | int err = 0; |
@@ -233,6 +235,25 @@ static int read_inode(struct inode *ino) | |||
233 | return err; | 235 | return err; |
234 | } | 236 | } |
235 | 237 | ||
238 | static struct inode *hostfs_iget(struct super_block *sb) | ||
239 | { | ||
240 | struct inode *inode; | ||
241 | long ret; | ||
242 | |||
243 | inode = iget_locked(sb, 0); | ||
244 | if (!inode) | ||
245 | return ERR_PTR(-ENOMEM); | ||
246 | if (inode->i_state & I_NEW) { | ||
247 | ret = hostfs_read_inode(inode); | ||
248 | if (ret < 0) { | ||
249 | iget_failed(inode); | ||
250 | return ERR_PTR(ret); | ||
251 | } | ||
252 | unlock_new_inode(inode); | ||
253 | } | ||
254 | return inode; | ||
255 | } | ||
256 | |||
236 | int hostfs_statfs(struct dentry *dentry, struct kstatfs *sf) | 257 | int hostfs_statfs(struct dentry *dentry, struct kstatfs *sf) |
237 | { | 258 | { |
238 | /* | 259 | /* |
@@ -303,9 +324,16 @@ static void hostfs_destroy_inode(struct inode *inode) | |||
303 | kfree(HOSTFS_I(inode)); | 324 | kfree(HOSTFS_I(inode)); |
304 | } | 325 | } |
305 | 326 | ||
306 | static void hostfs_read_inode(struct inode *inode) | 327 | static int hostfs_show_options(struct seq_file *seq, struct vfsmount *vfs) |
307 | { | 328 | { |
308 | read_inode(inode); | 329 | struct inode *root = vfs->mnt_sb->s_root->d_inode; |
330 | const char *root_path = HOSTFS_I(root)->host_filename; | ||
331 | size_t offset = strlen(root_ino) + 1; | ||
332 | |||
333 | if (strlen(root_path) > offset) | ||
334 | seq_printf(seq, ",%s", root_path + offset); | ||
335 | |||
336 | return 0; | ||
309 | } | 337 | } |
310 | 338 | ||
311 | static const struct super_operations hostfs_sbops = { | 339 | static const struct super_operations hostfs_sbops = { |
@@ -313,8 +341,8 @@ static const struct super_operations hostfs_sbops = { | |||
313 | .drop_inode = generic_delete_inode, | 341 | .drop_inode = generic_delete_inode, |
314 | .delete_inode = hostfs_delete_inode, | 342 | .delete_inode = hostfs_delete_inode, |
315 | .destroy_inode = hostfs_destroy_inode, | 343 | .destroy_inode = hostfs_destroy_inode, |
316 | .read_inode = hostfs_read_inode, | ||
317 | .statfs = hostfs_statfs, | 344 | .statfs = hostfs_statfs, |
345 | .show_options = hostfs_show_options, | ||
318 | }; | 346 | }; |
319 | 347 | ||
320 | int hostfs_readdir(struct file *file, void *ent, filldir_t filldir) | 348 | int hostfs_readdir(struct file *file, void *ent, filldir_t filldir) |
@@ -571,10 +599,11 @@ int hostfs_create(struct inode *dir, struct dentry *dentry, int mode, | |||
571 | char *name; | 599 | char *name; |
572 | int error, fd; | 600 | int error, fd; |
573 | 601 | ||
574 | error = -ENOMEM; | 602 | inode = hostfs_iget(dir->i_sb); |
575 | inode = iget(dir->i_sb, 0); | 603 | if (IS_ERR(inode)) { |
576 | if (inode == NULL) | 604 | error = PTR_ERR(inode); |
577 | goto out; | 605 | goto out; |
606 | } | ||
578 | 607 | ||
579 | error = init_inode(inode, dentry); | 608 | error = init_inode(inode, dentry); |
580 | if (error) | 609 | if (error) |
@@ -615,10 +644,11 @@ struct dentry *hostfs_lookup(struct inode *ino, struct dentry *dentry, | |||
615 | char *name; | 644 | char *name; |
616 | int err; | 645 | int err; |
617 | 646 | ||
618 | err = -ENOMEM; | 647 | inode = hostfs_iget(ino->i_sb); |
619 | inode = iget(ino->i_sb, 0); | 648 | if (IS_ERR(inode)) { |
620 | if (inode == NULL) | 649 | err = PTR_ERR(inode); |
621 | goto out; | 650 | goto out; |
651 | } | ||
622 | 652 | ||
623 | err = init_inode(inode, dentry); | 653 | err = init_inode(inode, dentry); |
624 | if (err) | 654 | if (err) |
@@ -736,11 +766,13 @@ int hostfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) | |||
736 | { | 766 | { |
737 | struct inode *inode; | 767 | struct inode *inode; |
738 | char *name; | 768 | char *name; |
739 | int err = -ENOMEM; | 769 | int err; |
740 | 770 | ||
741 | inode = iget(dir->i_sb, 0); | 771 | inode = hostfs_iget(dir->i_sb); |
742 | if (inode == NULL) | 772 | if (IS_ERR(inode)) { |
773 | err = PTR_ERR(inode); | ||
743 | goto out; | 774 | goto out; |
775 | } | ||
744 | 776 | ||
745 | err = init_inode(inode, dentry); | 777 | err = init_inode(inode, dentry); |
746 | if (err) | 778 | if (err) |
@@ -952,9 +984,11 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent) | |||
952 | 984 | ||
953 | sprintf(host_root_path, "%s/%s", root_ino, req_root); | 985 | sprintf(host_root_path, "%s/%s", root_ino, req_root); |
954 | 986 | ||
955 | root_inode = iget(sb, 0); | 987 | root_inode = hostfs_iget(sb); |
956 | if (root_inode == NULL) | 988 | if (IS_ERR(root_inode)) { |
989 | err = PTR_ERR(root_inode); | ||
957 | goto out_free; | 990 | goto out_free; |
991 | } | ||
958 | 992 | ||
959 | err = init_inode(root_inode, NULL); | 993 | err = init_inode(root_inode, NULL); |
960 | if (err) | 994 | if (err) |
@@ -972,7 +1006,7 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent) | |||
972 | if (sb->s_root == NULL) | 1006 | if (sb->s_root == NULL) |
973 | goto out_put; | 1007 | goto out_put; |
974 | 1008 | ||
975 | err = read_inode(root_inode); | 1009 | err = hostfs_read_inode(root_inode); |
976 | if (err) { | 1010 | if (err) { |
977 | /* No iput in this case because the dput does that for us */ | 1011 | /* No iput in this case because the dput does that for us */ |
978 | dput(sb->s_root); | 1012 | dput(sb->s_root); |
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c index 00971d999964..f63a699ec659 100644 --- a/fs/hpfs/super.c +++ b/fs/hpfs/super.c | |||
@@ -386,6 +386,7 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data) | |||
386 | int lowercase, conv, eas, chk, errs, chkdsk, timeshift; | 386 | int lowercase, conv, eas, chk, errs, chkdsk, timeshift; |
387 | int o; | 387 | int o; |
388 | struct hpfs_sb_info *sbi = hpfs_sb(s); | 388 | struct hpfs_sb_info *sbi = hpfs_sb(s); |
389 | char *new_opts = kstrdup(data, GFP_KERNEL); | ||
389 | 390 | ||
390 | *flags |= MS_NOATIME; | 391 | *flags |= MS_NOATIME; |
391 | 392 | ||
@@ -398,15 +399,15 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data) | |||
398 | if (!(o = parse_opts(data, &uid, &gid, &umask, &lowercase, &conv, | 399 | if (!(o = parse_opts(data, &uid, &gid, &umask, &lowercase, &conv, |
399 | &eas, &chk, &errs, &chkdsk, ×hift))) { | 400 | &eas, &chk, &errs, &chkdsk, ×hift))) { |
400 | printk("HPFS: bad mount options.\n"); | 401 | printk("HPFS: bad mount options.\n"); |
401 | return 1; | 402 | goto out_err; |
402 | } | 403 | } |
403 | if (o == 2) { | 404 | if (o == 2) { |
404 | hpfs_help(); | 405 | hpfs_help(); |
405 | return 1; | 406 | goto out_err; |
406 | } | 407 | } |
407 | if (timeshift != sbi->sb_timeshift) { | 408 | if (timeshift != sbi->sb_timeshift) { |
408 | printk("HPFS: timeshift can't be changed using remount.\n"); | 409 | printk("HPFS: timeshift can't be changed using remount.\n"); |
409 | return 1; | 410 | goto out_err; |
410 | } | 411 | } |
411 | 412 | ||
412 | unmark_dirty(s); | 413 | unmark_dirty(s); |
@@ -419,7 +420,14 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data) | |||
419 | 420 | ||
420 | if (!(*flags & MS_RDONLY)) mark_dirty(s); | 421 | if (!(*flags & MS_RDONLY)) mark_dirty(s); |
421 | 422 | ||
423 | kfree(s->s_options); | ||
424 | s->s_options = new_opts; | ||
425 | |||
422 | return 0; | 426 | return 0; |
427 | |||
428 | out_err: | ||
429 | kfree(new_opts); | ||
430 | return -EINVAL; | ||
423 | } | 431 | } |
424 | 432 | ||
425 | /* Super operations */ | 433 | /* Super operations */ |
@@ -432,6 +440,7 @@ static const struct super_operations hpfs_sops = | |||
432 | .put_super = hpfs_put_super, | 440 | .put_super = hpfs_put_super, |
433 | .statfs = hpfs_statfs, | 441 | .statfs = hpfs_statfs, |
434 | .remount_fs = hpfs_remount_fs, | 442 | .remount_fs = hpfs_remount_fs, |
443 | .show_options = generic_show_options, | ||
435 | }; | 444 | }; |
436 | 445 | ||
437 | static int hpfs_fill_super(struct super_block *s, void *options, int silent) | 446 | static int hpfs_fill_super(struct super_block *s, void *options, int silent) |
@@ -454,6 +463,8 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) | |||
454 | 463 | ||
455 | int o; | 464 | int o; |
456 | 465 | ||
466 | save_mount_options(s, options); | ||
467 | |||
457 | sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); | 468 | sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); |
458 | if (!sbi) | 469 | if (!sbi) |
459 | return -ENOMEM; | 470 | return -ENOMEM; |
diff --git a/fs/hppfs/hppfs_kern.c b/fs/hppfs/hppfs_kern.c index affb7412125e..a1e1f0f61aa5 100644 --- a/fs/hppfs/hppfs_kern.c +++ b/fs/hppfs/hppfs_kern.c | |||
@@ -155,6 +155,20 @@ static void hppfs_read_inode(struct inode *ino) | |||
155 | ino->i_blocks = proc_ino->i_blocks; | 155 | ino->i_blocks = proc_ino->i_blocks; |
156 | } | 156 | } |
157 | 157 | ||
158 | static struct inode *hppfs_iget(struct super_block *sb) | ||
159 | { | ||
160 | struct inode *inode; | ||
161 | |||
162 | inode = iget_locked(sb, 0); | ||
163 | if (!inode) | ||
164 | return ERR_PTR(-ENOMEM); | ||
165 | if (inode->i_state & I_NEW) { | ||
166 | hppfs_read_inode(inode); | ||
167 | unlock_new_inode(inode); | ||
168 | } | ||
169 | return inode; | ||
170 | } | ||
171 | |||
158 | static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry, | 172 | static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry, |
159 | struct nameidata *nd) | 173 | struct nameidata *nd) |
160 | { | 174 | { |
@@ -190,9 +204,11 @@ static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry, | |||
190 | if(IS_ERR(proc_dentry)) | 204 | if(IS_ERR(proc_dentry)) |
191 | return(proc_dentry); | 205 | return(proc_dentry); |
192 | 206 | ||
193 | inode = iget(ino->i_sb, 0); | 207 | inode = hppfs_iget(ino->i_sb); |
194 | if(inode == NULL) | 208 | if (IS_ERR(inode)) { |
209 | err = PTR_ERR(inode); | ||
195 | goto out_dput; | 210 | goto out_dput; |
211 | } | ||
196 | 212 | ||
197 | err = init_inode(inode, proc_dentry); | 213 | err = init_inode(inode, proc_dentry); |
198 | if(err) | 214 | if(err) |
@@ -652,7 +668,6 @@ static void hppfs_destroy_inode(struct inode *inode) | |||
652 | static const struct super_operations hppfs_sbops = { | 668 | static const struct super_operations hppfs_sbops = { |
653 | .alloc_inode = hppfs_alloc_inode, | 669 | .alloc_inode = hppfs_alloc_inode, |
654 | .destroy_inode = hppfs_destroy_inode, | 670 | .destroy_inode = hppfs_destroy_inode, |
655 | .read_inode = hppfs_read_inode, | ||
656 | .delete_inode = hppfs_delete_inode, | 671 | .delete_inode = hppfs_delete_inode, |
657 | .statfs = hppfs_statfs, | 672 | .statfs = hppfs_statfs, |
658 | }; | 673 | }; |
@@ -745,9 +760,11 @@ static int hppfs_fill_super(struct super_block *sb, void *d, int silent) | |||
745 | sb->s_magic = HPPFS_SUPER_MAGIC; | 760 | sb->s_magic = HPPFS_SUPER_MAGIC; |
746 | sb->s_op = &hppfs_sbops; | 761 | sb->s_op = &hppfs_sbops; |
747 | 762 | ||
748 | root_inode = iget(sb, 0); | 763 | root_inode = hppfs_iget(sb); |
749 | if(root_inode == NULL) | 764 | if (IS_ERR(root_inode)) { |
765 | err = PTR_ERR(root_inode); | ||
750 | goto out; | 766 | goto out; |
767 | } | ||
751 | 768 | ||
752 | err = init_inode(root_inode, proc_sb->s_root); | 769 | err = init_inode(root_inode, proc_sb->s_root); |
753 | if(err) | 770 | if(err) |
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 3b3cc28cdefc..eee9487ae47f 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c | |||
@@ -734,6 +734,7 @@ static const struct super_operations hugetlbfs_ops = { | |||
734 | .delete_inode = hugetlbfs_delete_inode, | 734 | .delete_inode = hugetlbfs_delete_inode, |
735 | .drop_inode = hugetlbfs_drop_inode, | 735 | .drop_inode = hugetlbfs_drop_inode, |
736 | .put_super = hugetlbfs_put_super, | 736 | .put_super = hugetlbfs_put_super, |
737 | .show_options = generic_show_options, | ||
737 | }; | 738 | }; |
738 | 739 | ||
739 | static int | 740 | static int |
@@ -817,6 +818,8 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent) | |||
817 | struct hugetlbfs_config config; | 818 | struct hugetlbfs_config config; |
818 | struct hugetlbfs_sb_info *sbinfo; | 819 | struct hugetlbfs_sb_info *sbinfo; |
819 | 820 | ||
821 | save_mount_options(sb, data); | ||
822 | |||
820 | config.nr_blocks = -1; /* No limit on size by default */ | 823 | config.nr_blocks = -1; /* No limit on size by default */ |
821 | config.nr_inodes = -1; /* No limit on number of inodes by default */ | 824 | config.nr_inodes = -1; /* No limit on number of inodes by default */ |
822 | config.uid = current->fsuid; | 825 | config.uid = current->fsuid; |
diff --git a/fs/inode.c b/fs/inode.c index 276ffd6b6fdd..53245ffcf93d 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -928,8 +928,6 @@ EXPORT_SYMBOL(ilookup); | |||
928 | * @set: callback used to initialize a new struct inode | 928 | * @set: callback used to initialize a new struct inode |
929 | * @data: opaque data pointer to pass to @test and @set | 929 | * @data: opaque data pointer to pass to @test and @set |
930 | * | 930 | * |
931 | * This is iget() without the read_inode() portion of get_new_inode(). | ||
932 | * | ||
933 | * iget5_locked() uses ifind() to search for the inode specified by @hashval | 931 | * iget5_locked() uses ifind() to search for the inode specified by @hashval |
934 | * and @data in the inode cache and if present it is returned with an increased | 932 | * and @data in the inode cache and if present it is returned with an increased |
935 | * reference count. This is a generalized version of iget_locked() for file | 933 | * reference count. This is a generalized version of iget_locked() for file |
@@ -966,8 +964,6 @@ EXPORT_SYMBOL(iget5_locked); | |||
966 | * @sb: super block of file system | 964 | * @sb: super block of file system |
967 | * @ino: inode number to get | 965 | * @ino: inode number to get |
968 | * | 966 | * |
969 | * This is iget() without the read_inode() portion of get_new_inode_fast(). | ||
970 | * | ||
971 | * iget_locked() uses ifind_fast() to search for the inode specified by @ino in | 967 | * iget_locked() uses ifind_fast() to search for the inode specified by @ino in |
972 | * the inode cache and if present it is returned with an increased reference | 968 | * the inode cache and if present it is returned with an increased reference |
973 | * count. This is for file systems where the inode number is sufficient for | 969 | * count. This is for file systems where the inode number is sufficient for |
diff --git a/fs/inotify.c b/fs/inotify.c index 2c5b92152876..690e72595e6e 100644 --- a/fs/inotify.c +++ b/fs/inotify.c | |||
@@ -168,20 +168,14 @@ static void set_dentry_child_flags(struct inode *inode, int watched) | |||
168 | struct dentry *child; | 168 | struct dentry *child; |
169 | 169 | ||
170 | list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) { | 170 | list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) { |
171 | if (!child->d_inode) { | 171 | if (!child->d_inode) |
172 | WARN_ON(child->d_flags & DCACHE_INOTIFY_PARENT_WATCHED); | ||
173 | continue; | 172 | continue; |
174 | } | 173 | |
175 | spin_lock(&child->d_lock); | 174 | spin_lock(&child->d_lock); |
176 | if (watched) { | 175 | if (watched) |
177 | WARN_ON(child->d_flags & | ||
178 | DCACHE_INOTIFY_PARENT_WATCHED); | ||
179 | child->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED; | 176 | child->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED; |
180 | } else { | 177 | else |
181 | WARN_ON(!(child->d_flags & | 178 | child->d_flags &=~DCACHE_INOTIFY_PARENT_WATCHED; |
182 | DCACHE_INOTIFY_PARENT_WATCHED)); | ||
183 | child->d_flags&=~DCACHE_INOTIFY_PARENT_WATCHED; | ||
184 | } | ||
185 | spin_unlock(&child->d_lock); | 179 | spin_unlock(&child->d_lock); |
186 | } | 180 | } |
187 | } | 181 | } |
@@ -253,7 +247,6 @@ void inotify_d_instantiate(struct dentry *entry, struct inode *inode) | |||
253 | if (!inode) | 247 | if (!inode) |
254 | return; | 248 | return; |
255 | 249 | ||
256 | WARN_ON(entry->d_flags & DCACHE_INOTIFY_PARENT_WATCHED); | ||
257 | spin_lock(&entry->d_lock); | 250 | spin_lock(&entry->d_lock); |
258 | parent = entry->d_parent; | 251 | parent = entry->d_parent; |
259 | if (parent->d_inode && inotify_inode_watched(parent->d_inode)) | 252 | if (parent->d_inode && inotify_inode_watched(parent->d_inode)) |
@@ -627,6 +620,7 @@ s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch, | |||
627 | struct inode *inode, u32 mask) | 620 | struct inode *inode, u32 mask) |
628 | { | 621 | { |
629 | int ret = 0; | 622 | int ret = 0; |
623 | int newly_watched; | ||
630 | 624 | ||
631 | /* don't allow invalid bits: we don't want flags set */ | 625 | /* don't allow invalid bits: we don't want flags set */ |
632 | mask &= IN_ALL_EVENTS | IN_ONESHOT; | 626 | mask &= IN_ALL_EVENTS | IN_ONESHOT; |
@@ -653,12 +647,18 @@ s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch, | |||
653 | */ | 647 | */ |
654 | watch->inode = igrab(inode); | 648 | watch->inode = igrab(inode); |
655 | 649 | ||
656 | if (!inotify_inode_watched(inode)) | ||
657 | set_dentry_child_flags(inode, 1); | ||
658 | |||
659 | /* Add the watch to the handle's and the inode's list */ | 650 | /* Add the watch to the handle's and the inode's list */ |
651 | newly_watched = !inotify_inode_watched(inode); | ||
660 | list_add(&watch->h_list, &ih->watches); | 652 | list_add(&watch->h_list, &ih->watches); |
661 | list_add(&watch->i_list, &inode->inotify_watches); | 653 | list_add(&watch->i_list, &inode->inotify_watches); |
654 | /* | ||
655 | * Set child flags _after_ adding the watch, so there is no race | ||
656 | * windows where newly instantiated children could miss their parent's | ||
657 | * watched flag. | ||
658 | */ | ||
659 | if (newly_watched) | ||
660 | set_dentry_child_flags(inode, 1); | ||
661 | |||
662 | out: | 662 | out: |
663 | mutex_unlock(&ih->mutex); | 663 | mutex_unlock(&ih->mutex); |
664 | mutex_unlock(&inode->inotify_mutex); | 664 | mutex_unlock(&inode->inotify_mutex); |
diff --git a/fs/inotify_user.c b/fs/inotify_user.c index 5e009331c01f..7b94a1e3c015 100644 --- a/fs/inotify_user.c +++ b/fs/inotify_user.c | |||
@@ -41,9 +41,9 @@ static struct kmem_cache *event_cachep __read_mostly; | |||
41 | static struct vfsmount *inotify_mnt __read_mostly; | 41 | static struct vfsmount *inotify_mnt __read_mostly; |
42 | 42 | ||
43 | /* these are configurable via /proc/sys/fs/inotify/ */ | 43 | /* these are configurable via /proc/sys/fs/inotify/ */ |
44 | int inotify_max_user_instances __read_mostly; | 44 | static int inotify_max_user_instances __read_mostly; |
45 | int inotify_max_user_watches __read_mostly; | 45 | static int inotify_max_user_watches __read_mostly; |
46 | int inotify_max_queued_events __read_mostly; | 46 | static int inotify_max_queued_events __read_mostly; |
47 | 47 | ||
48 | /* | 48 | /* |
49 | * Lock ordering: | 49 | * Lock ordering: |
@@ -79,6 +79,7 @@ struct inotify_device { | |||
79 | atomic_t count; /* reference count */ | 79 | atomic_t count; /* reference count */ |
80 | struct user_struct *user; /* user who opened this dev */ | 80 | struct user_struct *user; /* user who opened this dev */ |
81 | struct inotify_handle *ih; /* inotify handle */ | 81 | struct inotify_handle *ih; /* inotify handle */ |
82 | struct fasync_struct *fa; /* async notification */ | ||
82 | unsigned int queue_size; /* size of the queue (bytes) */ | 83 | unsigned int queue_size; /* size of the queue (bytes) */ |
83 | unsigned int event_count; /* number of pending events */ | 84 | unsigned int event_count; /* number of pending events */ |
84 | unsigned int max_events; /* maximum number of events */ | 85 | unsigned int max_events; /* maximum number of events */ |
@@ -248,6 +249,19 @@ inotify_dev_get_event(struct inotify_device *dev) | |||
248 | } | 249 | } |
249 | 250 | ||
250 | /* | 251 | /* |
252 | * inotify_dev_get_last_event - return the last event in the given dev's queue | ||
253 | * | ||
254 | * Caller must hold dev->ev_mutex. | ||
255 | */ | ||
256 | static inline struct inotify_kernel_event * | ||
257 | inotify_dev_get_last_event(struct inotify_device *dev) | ||
258 | { | ||
259 | if (list_empty(&dev->events)) | ||
260 | return NULL; | ||
261 | return list_entry(dev->events.prev, struct inotify_kernel_event, list); | ||
262 | } | ||
263 | |||
264 | /* | ||
251 | * inotify_dev_queue_event - event handler registered with core inotify, adds | 265 | * inotify_dev_queue_event - event handler registered with core inotify, adds |
252 | * a new event to the given device | 266 | * a new event to the given device |
253 | * | 267 | * |
@@ -269,11 +283,11 @@ static void inotify_dev_queue_event(struct inotify_watch *w, u32 wd, u32 mask, | |||
269 | /* we can safely put the watch as we don't reference it while | 283 | /* we can safely put the watch as we don't reference it while |
270 | * generating the event | 284 | * generating the event |
271 | */ | 285 | */ |
272 | if (mask & IN_IGNORED || mask & IN_ONESHOT) | 286 | if (mask & IN_IGNORED || w->mask & IN_ONESHOT) |
273 | put_inotify_watch(w); /* final put */ | 287 | put_inotify_watch(w); /* final put */ |
274 | 288 | ||
275 | /* coalescing: drop this event if it is a dupe of the previous */ | 289 | /* coalescing: drop this event if it is a dupe of the previous */ |
276 | last = inotify_dev_get_event(dev); | 290 | last = inotify_dev_get_last_event(dev); |
277 | if (last && last->event.mask == mask && last->event.wd == wd && | 291 | if (last && last->event.mask == mask && last->event.wd == wd && |
278 | last->event.cookie == cookie) { | 292 | last->event.cookie == cookie) { |
279 | const char *lastname = last->name; | 293 | const char *lastname = last->name; |
@@ -302,6 +316,7 @@ static void inotify_dev_queue_event(struct inotify_watch *w, u32 wd, u32 mask, | |||
302 | dev->queue_size += sizeof(struct inotify_event) + kevent->event.len; | 316 | dev->queue_size += sizeof(struct inotify_event) + kevent->event.len; |
303 | list_add_tail(&kevent->list, &dev->events); | 317 | list_add_tail(&kevent->list, &dev->events); |
304 | wake_up_interruptible(&dev->wq); | 318 | wake_up_interruptible(&dev->wq); |
319 | kill_fasync(&dev->fa, SIGIO, POLL_IN); | ||
305 | 320 | ||
306 | out: | 321 | out: |
307 | mutex_unlock(&dev->ev_mutex); | 322 | mutex_unlock(&dev->ev_mutex); |
@@ -352,7 +367,7 @@ static int find_inode(const char __user *dirname, struct nameidata *nd, | |||
352 | /* you can only watch an inode if you have read permissions on it */ | 367 | /* you can only watch an inode if you have read permissions on it */ |
353 | error = vfs_permission(nd, MAY_READ); | 368 | error = vfs_permission(nd, MAY_READ); |
354 | if (error) | 369 | if (error) |
355 | path_release(nd); | 370 | path_put(&nd->path); |
356 | return error; | 371 | return error; |
357 | } | 372 | } |
358 | 373 | ||
@@ -490,6 +505,13 @@ static ssize_t inotify_read(struct file *file, char __user *buf, | |||
490 | return ret; | 505 | return ret; |
491 | } | 506 | } |
492 | 507 | ||
508 | static int inotify_fasync(int fd, struct file *file, int on) | ||
509 | { | ||
510 | struct inotify_device *dev = file->private_data; | ||
511 | |||
512 | return fasync_helper(fd, file, on, &dev->fa) >= 0 ? 0 : -EIO; | ||
513 | } | ||
514 | |||
493 | static int inotify_release(struct inode *ignored, struct file *file) | 515 | static int inotify_release(struct inode *ignored, struct file *file) |
494 | { | 516 | { |
495 | struct inotify_device *dev = file->private_data; | 517 | struct inotify_device *dev = file->private_data; |
@@ -502,6 +524,9 @@ static int inotify_release(struct inode *ignored, struct file *file) | |||
502 | inotify_dev_event_dequeue(dev); | 524 | inotify_dev_event_dequeue(dev); |
503 | mutex_unlock(&dev->ev_mutex); | 525 | mutex_unlock(&dev->ev_mutex); |
504 | 526 | ||
527 | if (file->f_flags & FASYNC) | ||
528 | inotify_fasync(-1, file, 0); | ||
529 | |||
505 | /* free this device: the put matching the get in inotify_init() */ | 530 | /* free this device: the put matching the get in inotify_init() */ |
506 | put_inotify_dev(dev); | 531 | put_inotify_dev(dev); |
507 | 532 | ||
@@ -530,6 +555,7 @@ static long inotify_ioctl(struct file *file, unsigned int cmd, | |||
530 | static const struct file_operations inotify_fops = { | 555 | static const struct file_operations inotify_fops = { |
531 | .poll = inotify_poll, | 556 | .poll = inotify_poll, |
532 | .read = inotify_read, | 557 | .read = inotify_read, |
558 | .fasync = inotify_fasync, | ||
533 | .release = inotify_release, | 559 | .release = inotify_release, |
534 | .unlocked_ioctl = inotify_ioctl, | 560 | .unlocked_ioctl = inotify_ioctl, |
535 | .compat_ioctl = inotify_ioctl, | 561 | .compat_ioctl = inotify_ioctl, |
@@ -577,6 +603,7 @@ asmlinkage long sys_inotify_init(void) | |||
577 | goto out_free_dev; | 603 | goto out_free_dev; |
578 | } | 604 | } |
579 | dev->ih = ih; | 605 | dev->ih = ih; |
606 | dev->fa = NULL; | ||
580 | 607 | ||
581 | filp->f_op = &inotify_fops; | 608 | filp->f_op = &inotify_fops; |
582 | filp->f_path.mnt = mntget(inotify_mnt); | 609 | filp->f_path.mnt = mntget(inotify_mnt); |
@@ -640,7 +667,7 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask) | |||
640 | goto fput_and_out; | 667 | goto fput_and_out; |
641 | 668 | ||
642 | /* inode held in place by reference to nd; dev by fget on fd */ | 669 | /* inode held in place by reference to nd; dev by fget on fd */ |
643 | inode = nd.dentry->d_inode; | 670 | inode = nd.path.dentry->d_inode; |
644 | dev = filp->private_data; | 671 | dev = filp->private_data; |
645 | 672 | ||
646 | mutex_lock(&dev->up_mutex); | 673 | mutex_lock(&dev->up_mutex); |
@@ -649,7 +676,7 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask) | |||
649 | ret = create_watch(dev, inode, mask); | 676 | ret = create_watch(dev, inode, mask); |
650 | mutex_unlock(&dev->up_mutex); | 677 | mutex_unlock(&dev->up_mutex); |
651 | 678 | ||
652 | path_release(&nd); | 679 | path_put(&nd.path); |
653 | fput_and_out: | 680 | fput_and_out: |
654 | fput_light(filp, fput_needed); | 681 | fput_light(filp, fput_needed); |
655 | return ret; | 682 | return ret; |
diff --git a/fs/ioctl.c b/fs/ioctl.c index c2a773e8620b..f32fbde2175e 100644 --- a/fs/ioctl.c +++ b/fs/ioctl.c | |||
@@ -12,12 +12,24 @@ | |||
12 | #include <linux/fs.h> | 12 | #include <linux/fs.h> |
13 | #include <linux/security.h> | 13 | #include <linux/security.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/uaccess.h> | ||
15 | 16 | ||
16 | #include <asm/uaccess.h> | ||
17 | #include <asm/ioctls.h> | 17 | #include <asm/ioctls.h> |
18 | 18 | ||
19 | static long do_ioctl(struct file *filp, unsigned int cmd, | 19 | /** |
20 | unsigned long arg) | 20 | * vfs_ioctl - call filesystem specific ioctl methods |
21 | * @filp: open file to invoke ioctl method on | ||
22 | * @cmd: ioctl command to execute | ||
23 | * @arg: command-specific argument for ioctl | ||
24 | * | ||
25 | * Invokes filesystem specific ->unlocked_ioctl, if one exists; otherwise | ||
26 | * invokes filesystem specific ->ioctl method. If neither method exists, | ||
27 | * returns -ENOTTY. | ||
28 | * | ||
29 | * Returns 0 on success, -errno on error. | ||
30 | */ | ||
31 | long vfs_ioctl(struct file *filp, unsigned int cmd, | ||
32 | unsigned long arg) | ||
21 | { | 33 | { |
22 | int error = -ENOTTY; | 34 | int error = -ENOTTY; |
23 | 35 | ||
@@ -40,123 +52,148 @@ static long do_ioctl(struct file *filp, unsigned int cmd, | |||
40 | return error; | 52 | return error; |
41 | } | 53 | } |
42 | 54 | ||
55 | static int ioctl_fibmap(struct file *filp, int __user *p) | ||
56 | { | ||
57 | struct address_space *mapping = filp->f_mapping; | ||
58 | int res, block; | ||
59 | |||
60 | /* do we support this mess? */ | ||
61 | if (!mapping->a_ops->bmap) | ||
62 | return -EINVAL; | ||
63 | if (!capable(CAP_SYS_RAWIO)) | ||
64 | return -EPERM; | ||
65 | res = get_user(block, p); | ||
66 | if (res) | ||
67 | return res; | ||
68 | lock_kernel(); | ||
69 | res = mapping->a_ops->bmap(mapping, block); | ||
70 | unlock_kernel(); | ||
71 | return put_user(res, p); | ||
72 | } | ||
73 | |||
43 | static int file_ioctl(struct file *filp, unsigned int cmd, | 74 | static int file_ioctl(struct file *filp, unsigned int cmd, |
44 | unsigned long arg) | 75 | unsigned long arg) |
45 | { | 76 | { |
46 | int error; | 77 | struct inode *inode = filp->f_path.dentry->d_inode; |
47 | int block; | ||
48 | struct inode * inode = filp->f_path.dentry->d_inode; | ||
49 | int __user *p = (int __user *)arg; | 78 | int __user *p = (int __user *)arg; |
50 | 79 | ||
51 | switch (cmd) { | 80 | switch (cmd) { |
52 | case FIBMAP: | 81 | case FIBMAP: |
53 | { | 82 | return ioctl_fibmap(filp, p); |
54 | struct address_space *mapping = filp->f_mapping; | 83 | case FIGETBSZ: |
55 | int res; | 84 | return put_user(inode->i_sb->s_blocksize, p); |
56 | /* do we support this mess? */ | 85 | case FIONREAD: |
57 | if (!mapping->a_ops->bmap) | 86 | return put_user(i_size_read(inode) - filp->f_pos, p); |
58 | return -EINVAL; | 87 | } |
59 | if (!capable(CAP_SYS_RAWIO)) | ||
60 | return -EPERM; | ||
61 | if ((error = get_user(block, p)) != 0) | ||
62 | return error; | ||
63 | 88 | ||
89 | return vfs_ioctl(filp, cmd, arg); | ||
90 | } | ||
91 | |||
92 | static int ioctl_fionbio(struct file *filp, int __user *argp) | ||
93 | { | ||
94 | unsigned int flag; | ||
95 | int on, error; | ||
96 | |||
97 | error = get_user(on, argp); | ||
98 | if (error) | ||
99 | return error; | ||
100 | flag = O_NONBLOCK; | ||
101 | #ifdef __sparc__ | ||
102 | /* SunOS compatibility item. */ | ||
103 | if (O_NONBLOCK != O_NDELAY) | ||
104 | flag |= O_NDELAY; | ||
105 | #endif | ||
106 | if (on) | ||
107 | filp->f_flags |= flag; | ||
108 | else | ||
109 | filp->f_flags &= ~flag; | ||
110 | return error; | ||
111 | } | ||
112 | |||
113 | static int ioctl_fioasync(unsigned int fd, struct file *filp, | ||
114 | int __user *argp) | ||
115 | { | ||
116 | unsigned int flag; | ||
117 | int on, error; | ||
118 | |||
119 | error = get_user(on, argp); | ||
120 | if (error) | ||
121 | return error; | ||
122 | flag = on ? FASYNC : 0; | ||
123 | |||
124 | /* Did FASYNC state change ? */ | ||
125 | if ((flag ^ filp->f_flags) & FASYNC) { | ||
126 | if (filp->f_op && filp->f_op->fasync) { | ||
64 | lock_kernel(); | 127 | lock_kernel(); |
65 | res = mapping->a_ops->bmap(mapping, block); | 128 | error = filp->f_op->fasync(fd, filp, on); |
66 | unlock_kernel(); | 129 | unlock_kernel(); |
67 | return put_user(res, p); | 130 | } else |
68 | } | 131 | error = -ENOTTY; |
69 | case FIGETBSZ: | ||
70 | return put_user(inode->i_sb->s_blocksize, p); | ||
71 | case FIONREAD: | ||
72 | return put_user(i_size_read(inode) - filp->f_pos, p); | ||
73 | } | 132 | } |
133 | if (error) | ||
134 | return error; | ||
74 | 135 | ||
75 | return do_ioctl(filp, cmd, arg); | 136 | if (on) |
137 | filp->f_flags |= FASYNC; | ||
138 | else | ||
139 | filp->f_flags &= ~FASYNC; | ||
140 | return error; | ||
76 | } | 141 | } |
77 | 142 | ||
78 | /* | 143 | /* |
79 | * When you add any new common ioctls to the switches above and below | 144 | * When you add any new common ioctls to the switches above and below |
80 | * please update compat_sys_ioctl() too. | 145 | * please update compat_sys_ioctl() too. |
81 | * | 146 | * |
82 | * vfs_ioctl() is not for drivers and not intended to be EXPORT_SYMBOL()'d. | 147 | * do_vfs_ioctl() is not for drivers and not intended to be EXPORT_SYMBOL()'d. |
83 | * It's just a simple helper for sys_ioctl and compat_sys_ioctl. | 148 | * It's just a simple helper for sys_ioctl and compat_sys_ioctl. |
84 | */ | 149 | */ |
85 | int vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, unsigned long arg) | 150 | int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, |
151 | unsigned long arg) | ||
86 | { | 152 | { |
87 | unsigned int flag; | 153 | int error = 0; |
88 | int on, error = 0; | 154 | int __user *argp = (int __user *)arg; |
89 | 155 | ||
90 | switch (cmd) { | 156 | switch (cmd) { |
91 | case FIOCLEX: | 157 | case FIOCLEX: |
92 | set_close_on_exec(fd, 1); | 158 | set_close_on_exec(fd, 1); |
93 | break; | 159 | break; |
94 | 160 | ||
95 | case FIONCLEX: | 161 | case FIONCLEX: |
96 | set_close_on_exec(fd, 0); | 162 | set_close_on_exec(fd, 0); |
97 | break; | 163 | break; |
98 | 164 | ||
99 | case FIONBIO: | 165 | case FIONBIO: |
100 | if ((error = get_user(on, (int __user *)arg)) != 0) | 166 | error = ioctl_fionbio(filp, argp); |
101 | break; | 167 | break; |
102 | flag = O_NONBLOCK; | 168 | |
103 | #ifdef __sparc__ | 169 | case FIOASYNC: |
104 | /* SunOS compatibility item. */ | 170 | error = ioctl_fioasync(fd, filp, argp); |
105 | if(O_NONBLOCK != O_NDELAY) | 171 | break; |
106 | flag |= O_NDELAY; | 172 | |
107 | #endif | 173 | case FIOQSIZE: |
108 | if (on) | 174 | if (S_ISDIR(filp->f_path.dentry->d_inode->i_mode) || |
109 | filp->f_flags |= flag; | 175 | S_ISREG(filp->f_path.dentry->d_inode->i_mode) || |
110 | else | 176 | S_ISLNK(filp->f_path.dentry->d_inode->i_mode)) { |
111 | filp->f_flags &= ~flag; | 177 | loff_t res = |
112 | break; | 178 | inode_get_bytes(filp->f_path.dentry->d_inode); |
113 | 179 | error = copy_to_user((loff_t __user *)arg, &res, | |
114 | case FIOASYNC: | 180 | sizeof(res)) ? -EFAULT : 0; |
115 | if ((error = get_user(on, (int __user *)arg)) != 0) | 181 | } else |
116 | break; | 182 | error = -ENOTTY; |
117 | flag = on ? FASYNC : 0; | 183 | break; |
118 | 184 | default: | |
119 | /* Did FASYNC state change ? */ | 185 | if (S_ISREG(filp->f_path.dentry->d_inode->i_mode)) |
120 | if ((flag ^ filp->f_flags) & FASYNC) { | 186 | error = file_ioctl(filp, cmd, arg); |
121 | if (filp->f_op && filp->f_op->fasync) { | 187 | else |
122 | lock_kernel(); | 188 | error = vfs_ioctl(filp, cmd, arg); |
123 | error = filp->f_op->fasync(fd, filp, on); | 189 | break; |
124 | unlock_kernel(); | ||
125 | } | ||
126 | else error = -ENOTTY; | ||
127 | } | ||
128 | if (error != 0) | ||
129 | break; | ||
130 | |||
131 | if (on) | ||
132 | filp->f_flags |= FASYNC; | ||
133 | else | ||
134 | filp->f_flags &= ~FASYNC; | ||
135 | break; | ||
136 | |||
137 | case FIOQSIZE: | ||
138 | if (S_ISDIR(filp->f_path.dentry->d_inode->i_mode) || | ||
139 | S_ISREG(filp->f_path.dentry->d_inode->i_mode) || | ||
140 | S_ISLNK(filp->f_path.dentry->d_inode->i_mode)) { | ||
141 | loff_t res = inode_get_bytes(filp->f_path.dentry->d_inode); | ||
142 | error = copy_to_user((loff_t __user *)arg, &res, sizeof(res)) ? -EFAULT : 0; | ||
143 | } | ||
144 | else | ||
145 | error = -ENOTTY; | ||
146 | break; | ||
147 | default: | ||
148 | if (S_ISREG(filp->f_path.dentry->d_inode->i_mode)) | ||
149 | error = file_ioctl(filp, cmd, arg); | ||
150 | else | ||
151 | error = do_ioctl(filp, cmd, arg); | ||
152 | break; | ||
153 | } | 190 | } |
154 | return error; | 191 | return error; |
155 | } | 192 | } |
156 | 193 | ||
157 | asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg) | 194 | asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg) |
158 | { | 195 | { |
159 | struct file * filp; | 196 | struct file *filp; |
160 | int error = -EBADF; | 197 | int error = -EBADF; |
161 | int fput_needed; | 198 | int fput_needed; |
162 | 199 | ||
@@ -168,7 +205,7 @@ asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg) | |||
168 | if (error) | 205 | if (error) |
169 | goto out_fput; | 206 | goto out_fput; |
170 | 207 | ||
171 | error = vfs_ioctl(filp, fd, cmd, arg); | 208 | error = do_vfs_ioctl(filp, fd, cmd, arg); |
172 | out_fput: | 209 | out_fput: |
173 | fput_light(filp, fput_needed); | 210 | fput_light(filp, fput_needed); |
174 | out: | 211 | out: |
diff --git a/fs/isofs/export.c b/fs/isofs/export.c index 29f9753ae5e5..bb219138331a 100644 --- a/fs/isofs/export.c +++ b/fs/isofs/export.c | |||
@@ -26,11 +26,9 @@ isofs_export_iget(struct super_block *sb, | |||
26 | if (block == 0) | 26 | if (block == 0) |
27 | return ERR_PTR(-ESTALE); | 27 | return ERR_PTR(-ESTALE); |
28 | inode = isofs_iget(sb, block, offset); | 28 | inode = isofs_iget(sb, block, offset); |
29 | if (inode == NULL) | 29 | if (IS_ERR(inode)) |
30 | return ERR_PTR(-ENOMEM); | 30 | return ERR_CAST(inode); |
31 | if (is_bad_inode(inode) | 31 | if (generation && inode->i_generation != generation) { |
32 | || (generation && inode->i_generation != generation)) | ||
33 | { | ||
34 | iput(inode); | 32 | iput(inode); |
35 | return ERR_PTR(-ESTALE); | 33 | return ERR_PTR(-ESTALE); |
36 | } | 34 | } |
@@ -110,8 +108,10 @@ static struct dentry *isofs_export_get_parent(struct dentry *child) | |||
110 | parent_inode = isofs_iget(child_inode->i_sb, | 108 | parent_inode = isofs_iget(child_inode->i_sb, |
111 | parent_block, | 109 | parent_block, |
112 | parent_offset); | 110 | parent_offset); |
113 | if (parent_inode == NULL) { | 111 | if (IS_ERR(parent_inode)) { |
114 | rv = ERR_PTR(-EACCES); | 112 | rv = ERR_CAST(parent_inode); |
113 | if (rv != ERR_PTR(-ENOMEM)) | ||
114 | rv = ERR_PTR(-EACCES); | ||
115 | goto out; | 115 | goto out; |
116 | } | 116 | } |
117 | 117 | ||
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index 09e3d306e96f..044a254d526b 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c | |||
@@ -54,7 +54,7 @@ static void isofs_put_super(struct super_block *sb) | |||
54 | return; | 54 | return; |
55 | } | 55 | } |
56 | 56 | ||
57 | static void isofs_read_inode(struct inode *); | 57 | static int isofs_read_inode(struct inode *); |
58 | static int isofs_statfs (struct dentry *, struct kstatfs *); | 58 | static int isofs_statfs (struct dentry *, struct kstatfs *); |
59 | 59 | ||
60 | static struct kmem_cache *isofs_inode_cachep; | 60 | static struct kmem_cache *isofs_inode_cachep; |
@@ -107,10 +107,10 @@ static int isofs_remount(struct super_block *sb, int *flags, char *data) | |||
107 | static const struct super_operations isofs_sops = { | 107 | static const struct super_operations isofs_sops = { |
108 | .alloc_inode = isofs_alloc_inode, | 108 | .alloc_inode = isofs_alloc_inode, |
109 | .destroy_inode = isofs_destroy_inode, | 109 | .destroy_inode = isofs_destroy_inode, |
110 | .read_inode = isofs_read_inode, | ||
111 | .put_super = isofs_put_super, | 110 | .put_super = isofs_put_super, |
112 | .statfs = isofs_statfs, | 111 | .statfs = isofs_statfs, |
113 | .remount_fs = isofs_remount, | 112 | .remount_fs = isofs_remount, |
113 | .show_options = generic_show_options, | ||
114 | }; | 114 | }; |
115 | 115 | ||
116 | 116 | ||
@@ -145,7 +145,8 @@ struct iso9660_options{ | |||
145 | char nocompress; | 145 | char nocompress; |
146 | unsigned char check; | 146 | unsigned char check; |
147 | unsigned int blocksize; | 147 | unsigned int blocksize; |
148 | mode_t mode; | 148 | mode_t fmode; |
149 | mode_t dmode; | ||
149 | gid_t gid; | 150 | gid_t gid; |
150 | uid_t uid; | 151 | uid_t uid; |
151 | char *iocharset; | 152 | char *iocharset; |
@@ -306,7 +307,7 @@ enum { | |||
306 | Opt_block, Opt_check_r, Opt_check_s, Opt_cruft, Opt_gid, Opt_ignore, | 307 | Opt_block, Opt_check_r, Opt_check_s, Opt_cruft, Opt_gid, Opt_ignore, |
307 | Opt_iocharset, Opt_map_a, Opt_map_n, Opt_map_o, Opt_mode, Opt_nojoliet, | 308 | Opt_iocharset, Opt_map_a, Opt_map_n, Opt_map_o, Opt_mode, Opt_nojoliet, |
308 | Opt_norock, Opt_sb, Opt_session, Opt_uid, Opt_unhide, Opt_utf8, Opt_err, | 309 | Opt_norock, Opt_sb, Opt_session, Opt_uid, Opt_unhide, Opt_utf8, Opt_err, |
309 | Opt_nocompress, Opt_hide, Opt_showassoc, | 310 | Opt_nocompress, Opt_hide, Opt_showassoc, Opt_dmode, |
310 | }; | 311 | }; |
311 | 312 | ||
312 | static match_table_t tokens = { | 313 | static match_table_t tokens = { |
@@ -333,6 +334,7 @@ static match_table_t tokens = { | |||
333 | {Opt_uid, "uid=%u"}, | 334 | {Opt_uid, "uid=%u"}, |
334 | {Opt_gid, "gid=%u"}, | 335 | {Opt_gid, "gid=%u"}, |
335 | {Opt_mode, "mode=%u"}, | 336 | {Opt_mode, "mode=%u"}, |
337 | {Opt_dmode, "dmode=%u"}, | ||
336 | {Opt_block, "block=%u"}, | 338 | {Opt_block, "block=%u"}, |
337 | {Opt_ignore, "conv=binary"}, | 339 | {Opt_ignore, "conv=binary"}, |
338 | {Opt_ignore, "conv=b"}, | 340 | {Opt_ignore, "conv=b"}, |
@@ -360,7 +362,7 @@ static int parse_options(char *options, struct iso9660_options *popt) | |||
360 | popt->check = 'u'; /* unset */ | 362 | popt->check = 'u'; /* unset */ |
361 | popt->nocompress = 0; | 363 | popt->nocompress = 0; |
362 | popt->blocksize = 1024; | 364 | popt->blocksize = 1024; |
363 | popt->mode = S_IRUGO | S_IXUGO; /* | 365 | popt->fmode = popt->dmode = S_IRUGO | S_IXUGO; /* |
364 | * r-x for all. The disc could | 366 | * r-x for all. The disc could |
365 | * be shared with DOS machines so | 367 | * be shared with DOS machines so |
366 | * virtually anything could be | 368 | * virtually anything could be |
@@ -452,7 +454,12 @@ static int parse_options(char *options, struct iso9660_options *popt) | |||
452 | case Opt_mode: | 454 | case Opt_mode: |
453 | if (match_int(&args[0], &option)) | 455 | if (match_int(&args[0], &option)) |
454 | return 0; | 456 | return 0; |
455 | popt->mode = option; | 457 | popt->fmode = option; |
458 | break; | ||
459 | case Opt_dmode: | ||
460 | if (match_int(&args[0], &option)) | ||
461 | return 0; | ||
462 | popt->dmode = option; | ||
456 | break; | 463 | break; |
457 | case Opt_block: | 464 | case Opt_block: |
458 | if (match_int(&args[0], &option)) | 465 | if (match_int(&args[0], &option)) |
@@ -552,9 +559,11 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent) | |||
552 | int joliet_level = 0; | 559 | int joliet_level = 0; |
553 | int iso_blknum, block; | 560 | int iso_blknum, block; |
554 | int orig_zonesize; | 561 | int orig_zonesize; |
555 | int table; | 562 | int table, error = -EINVAL; |
556 | unsigned int vol_desc_start; | 563 | unsigned int vol_desc_start; |
557 | 564 | ||
565 | save_mount_options(s, data); | ||
566 | |||
558 | sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); | 567 | sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); |
559 | if (!sbi) | 568 | if (!sbi) |
560 | return -ENOMEM; | 569 | return -ENOMEM; |
@@ -802,7 +811,8 @@ root_found: | |||
802 | * on the disk as suid, so we merely allow them to set the default | 811 | * on the disk as suid, so we merely allow them to set the default |
803 | * permissions. | 812 | * permissions. |
804 | */ | 813 | */ |
805 | sbi->s_mode = opt.mode & 0777; | 814 | sbi->s_fmode = opt.fmode & 0777; |
815 | sbi->s_dmode = opt.dmode & 0777; | ||
806 | 816 | ||
807 | /* | 817 | /* |
808 | * Read the root inode, which _may_ result in changing | 818 | * Read the root inode, which _may_ result in changing |
@@ -810,6 +820,8 @@ root_found: | |||
810 | * we then decide whether to use the Joliet descriptor. | 820 | * we then decide whether to use the Joliet descriptor. |
811 | */ | 821 | */ |
812 | inode = isofs_iget(s, sbi->s_firstdatazone, 0); | 822 | inode = isofs_iget(s, sbi->s_firstdatazone, 0); |
823 | if (IS_ERR(inode)) | ||
824 | goto out_no_root; | ||
813 | 825 | ||
814 | /* | 826 | /* |
815 | * If this disk has both Rock Ridge and Joliet on it, then we | 827 | * If this disk has both Rock Ridge and Joliet on it, then we |
@@ -829,6 +841,8 @@ root_found: | |||
829 | "ISOFS: changing to secondary root\n"); | 841 | "ISOFS: changing to secondary root\n"); |
830 | iput(inode); | 842 | iput(inode); |
831 | inode = isofs_iget(s, sbi->s_firstdatazone, 0); | 843 | inode = isofs_iget(s, sbi->s_firstdatazone, 0); |
844 | if (IS_ERR(inode)) | ||
845 | goto out_no_root; | ||
832 | } | 846 | } |
833 | } | 847 | } |
834 | 848 | ||
@@ -842,8 +856,6 @@ root_found: | |||
842 | sbi->s_joliet_level = joliet_level; | 856 | sbi->s_joliet_level = joliet_level; |
843 | 857 | ||
844 | /* check the root inode */ | 858 | /* check the root inode */ |
845 | if (!inode) | ||
846 | goto out_no_root; | ||
847 | if (!inode->i_op) | 859 | if (!inode->i_op) |
848 | goto out_bad_root; | 860 | goto out_bad_root; |
849 | 861 | ||
@@ -876,11 +888,14 @@ root_found: | |||
876 | */ | 888 | */ |
877 | out_bad_root: | 889 | out_bad_root: |
878 | printk(KERN_WARNING "%s: root inode not initialized\n", __func__); | 890 | printk(KERN_WARNING "%s: root inode not initialized\n", __func__); |
879 | goto out_iput; | ||
880 | out_no_root: | ||
881 | printk(KERN_WARNING "%s: get root inode failed\n", __func__); | ||
882 | out_iput: | 891 | out_iput: |
883 | iput(inode); | 892 | iput(inode); |
893 | goto out_no_inode; | ||
894 | out_no_root: | ||
895 | error = PTR_ERR(inode); | ||
896 | if (error != -ENOMEM) | ||
897 | printk(KERN_WARNING "%s: get root inode failed\n", __func__); | ||
898 | out_no_inode: | ||
884 | #ifdef CONFIG_JOLIET | 899 | #ifdef CONFIG_JOLIET |
885 | if (sbi->s_nls_iocharset) | 900 | if (sbi->s_nls_iocharset) |
886 | unload_nls(sbi->s_nls_iocharset); | 901 | unload_nls(sbi->s_nls_iocharset); |
@@ -908,7 +923,7 @@ out_freesbi: | |||
908 | kfree(opt.iocharset); | 923 | kfree(opt.iocharset); |
909 | kfree(sbi); | 924 | kfree(sbi); |
910 | s->s_fs_info = NULL; | 925 | s->s_fs_info = NULL; |
911 | return -EINVAL; | 926 | return error; |
912 | } | 927 | } |
913 | 928 | ||
914 | static int isofs_statfs (struct dentry *dentry, struct kstatfs *buf) | 929 | static int isofs_statfs (struct dentry *dentry, struct kstatfs *buf) |
@@ -930,7 +945,7 @@ static int isofs_statfs (struct dentry *dentry, struct kstatfs *buf) | |||
930 | /* | 945 | /* |
931 | * Get a set of blocks; filling in buffer_heads if already allocated | 946 | * Get a set of blocks; filling in buffer_heads if already allocated |
932 | * or getblk() if they are not. Returns the number of blocks inserted | 947 | * or getblk() if they are not. Returns the number of blocks inserted |
933 | * (0 == error.) | 948 | * (-ve == error.) |
934 | */ | 949 | */ |
935 | int isofs_get_blocks(struct inode *inode, sector_t iblock_s, | 950 | int isofs_get_blocks(struct inode *inode, sector_t iblock_s, |
936 | struct buffer_head **bh, unsigned long nblocks) | 951 | struct buffer_head **bh, unsigned long nblocks) |
@@ -940,11 +955,12 @@ int isofs_get_blocks(struct inode *inode, sector_t iblock_s, | |||
940 | unsigned int firstext; | 955 | unsigned int firstext; |
941 | unsigned long nextblk, nextoff; | 956 | unsigned long nextblk, nextoff; |
942 | long iblock = (long)iblock_s; | 957 | long iblock = (long)iblock_s; |
943 | int section, rv; | 958 | int section, rv, error; |
944 | struct iso_inode_info *ei = ISOFS_I(inode); | 959 | struct iso_inode_info *ei = ISOFS_I(inode); |
945 | 960 | ||
946 | lock_kernel(); | 961 | lock_kernel(); |
947 | 962 | ||
963 | error = -EIO; | ||
948 | rv = 0; | 964 | rv = 0; |
949 | if (iblock < 0 || iblock != iblock_s) { | 965 | if (iblock < 0 || iblock != iblock_s) { |
950 | printk(KERN_DEBUG "%s: block number too large\n", __func__); | 966 | printk(KERN_DEBUG "%s: block number too large\n", __func__); |
@@ -983,8 +999,10 @@ int isofs_get_blocks(struct inode *inode, sector_t iblock_s, | |||
983 | 999 | ||
984 | offset += sect_size; | 1000 | offset += sect_size; |
985 | ninode = isofs_iget(inode->i_sb, nextblk, nextoff); | 1001 | ninode = isofs_iget(inode->i_sb, nextblk, nextoff); |
986 | if (!ninode) | 1002 | if (IS_ERR(ninode)) { |
1003 | error = PTR_ERR(ninode); | ||
987 | goto abort; | 1004 | goto abort; |
1005 | } | ||
988 | firstext = ISOFS_I(ninode)->i_first_extent; | 1006 | firstext = ISOFS_I(ninode)->i_first_extent; |
989 | sect_size = ISOFS_I(ninode)->i_section_size >> ISOFS_BUFFER_BITS(ninode); | 1007 | sect_size = ISOFS_I(ninode)->i_section_size >> ISOFS_BUFFER_BITS(ninode); |
990 | nextblk = ISOFS_I(ninode)->i_next_section_block; | 1008 | nextblk = ISOFS_I(ninode)->i_next_section_block; |
@@ -1015,9 +1033,10 @@ int isofs_get_blocks(struct inode *inode, sector_t iblock_s, | |||
1015 | rv++; | 1033 | rv++; |
1016 | } | 1034 | } |
1017 | 1035 | ||
1036 | error = 0; | ||
1018 | abort: | 1037 | abort: |
1019 | unlock_kernel(); | 1038 | unlock_kernel(); |
1020 | return rv; | 1039 | return rv != 0 ? rv : error; |
1021 | } | 1040 | } |
1022 | 1041 | ||
1023 | /* | 1042 | /* |
@@ -1026,12 +1045,15 @@ abort: | |||
1026 | static int isofs_get_block(struct inode *inode, sector_t iblock, | 1045 | static int isofs_get_block(struct inode *inode, sector_t iblock, |
1027 | struct buffer_head *bh_result, int create) | 1046 | struct buffer_head *bh_result, int create) |
1028 | { | 1047 | { |
1048 | int ret; | ||
1049 | |||
1029 | if (create) { | 1050 | if (create) { |
1030 | printk(KERN_DEBUG "%s: Kernel tries to allocate a block\n", __func__); | 1051 | printk(KERN_DEBUG "%s: Kernel tries to allocate a block\n", __func__); |
1031 | return -EROFS; | 1052 | return -EROFS; |
1032 | } | 1053 | } |
1033 | 1054 | ||
1034 | return isofs_get_blocks(inode, iblock, &bh_result, 1) ? 0 : -EIO; | 1055 | ret = isofs_get_blocks(inode, iblock, &bh_result, 1); |
1056 | return ret < 0 ? ret : 0; | ||
1035 | } | 1057 | } |
1036 | 1058 | ||
1037 | static int isofs_bmap(struct inode *inode, sector_t block) | 1059 | static int isofs_bmap(struct inode *inode, sector_t block) |
@@ -1186,7 +1208,7 @@ out_toomany: | |||
1186 | goto out; | 1208 | goto out; |
1187 | } | 1209 | } |
1188 | 1210 | ||
1189 | static void isofs_read_inode(struct inode *inode) | 1211 | static int isofs_read_inode(struct inode *inode) |
1190 | { | 1212 | { |
1191 | struct super_block *sb = inode->i_sb; | 1213 | struct super_block *sb = inode->i_sb; |
1192 | struct isofs_sb_info *sbi = ISOFS_SB(sb); | 1214 | struct isofs_sb_info *sbi = ISOFS_SB(sb); |
@@ -1199,6 +1221,7 @@ static void isofs_read_inode(struct inode *inode) | |||
1199 | unsigned int de_len; | 1221 | unsigned int de_len; |
1200 | unsigned long offset; | 1222 | unsigned long offset; |
1201 | struct iso_inode_info *ei = ISOFS_I(inode); | 1223 | struct iso_inode_info *ei = ISOFS_I(inode); |
1224 | int ret = -EIO; | ||
1202 | 1225 | ||
1203 | block = ei->i_iget5_block; | 1226 | block = ei->i_iget5_block; |
1204 | bh = sb_bread(inode->i_sb, block); | 1227 | bh = sb_bread(inode->i_sb, block); |
@@ -1216,6 +1239,7 @@ static void isofs_read_inode(struct inode *inode) | |||
1216 | tmpde = kmalloc(de_len, GFP_KERNEL); | 1239 | tmpde = kmalloc(de_len, GFP_KERNEL); |
1217 | if (tmpde == NULL) { | 1240 | if (tmpde == NULL) { |
1218 | printk(KERN_INFO "%s: out of memory\n", __func__); | 1241 | printk(KERN_INFO "%s: out of memory\n", __func__); |
1242 | ret = -ENOMEM; | ||
1219 | goto fail; | 1243 | goto fail; |
1220 | } | 1244 | } |
1221 | memcpy(tmpde, bh->b_data + offset, frag1); | 1245 | memcpy(tmpde, bh->b_data + offset, frag1); |
@@ -1235,7 +1259,7 @@ static void isofs_read_inode(struct inode *inode) | |||
1235 | ei->i_file_format = isofs_file_normal; | 1259 | ei->i_file_format = isofs_file_normal; |
1236 | 1260 | ||
1237 | if (de->flags[-high_sierra] & 2) { | 1261 | if (de->flags[-high_sierra] & 2) { |
1238 | inode->i_mode = S_IRUGO | S_IXUGO | S_IFDIR; | 1262 | inode->i_mode = sbi->s_dmode | S_IFDIR; |
1239 | inode->i_nlink = 1; /* | 1263 | inode->i_nlink = 1; /* |
1240 | * Set to 1. We know there are 2, but | 1264 | * Set to 1. We know there are 2, but |
1241 | * the find utility tries to optimize | 1265 | * the find utility tries to optimize |
@@ -1245,9 +1269,8 @@ static void isofs_read_inode(struct inode *inode) | |||
1245 | */ | 1269 | */ |
1246 | } else { | 1270 | } else { |
1247 | /* Everybody gets to read the file. */ | 1271 | /* Everybody gets to read the file. */ |
1248 | inode->i_mode = sbi->s_mode; | 1272 | inode->i_mode = sbi->s_fmode | S_IFREG; |
1249 | inode->i_nlink = 1; | 1273 | inode->i_nlink = 1; |
1250 | inode->i_mode |= S_IFREG; | ||
1251 | } | 1274 | } |
1252 | inode->i_uid = sbi->s_uid; | 1275 | inode->i_uid = sbi->s_uid; |
1253 | inode->i_gid = sbi->s_gid; | 1276 | inode->i_gid = sbi->s_gid; |
@@ -1259,8 +1282,10 @@ static void isofs_read_inode(struct inode *inode) | |||
1259 | 1282 | ||
1260 | ei->i_section_size = isonum_733(de->size); | 1283 | ei->i_section_size = isonum_733(de->size); |
1261 | if (de->flags[-high_sierra] & 0x80) { | 1284 | if (de->flags[-high_sierra] & 0x80) { |
1262 | if(isofs_read_level3_size(inode)) | 1285 | ret = isofs_read_level3_size(inode); |
1286 | if (ret < 0) | ||
1263 | goto fail; | 1287 | goto fail; |
1288 | ret = -EIO; | ||
1264 | } else { | 1289 | } else { |
1265 | ei->i_next_section_block = 0; | 1290 | ei->i_next_section_block = 0; |
1266 | ei->i_next_section_offset = 0; | 1291 | ei->i_next_section_offset = 0; |
@@ -1346,16 +1371,16 @@ static void isofs_read_inode(struct inode *inode) | |||
1346 | /* XXX - parse_rock_ridge_inode() had already set i_rdev. */ | 1371 | /* XXX - parse_rock_ridge_inode() had already set i_rdev. */ |
1347 | init_special_inode(inode, inode->i_mode, inode->i_rdev); | 1372 | init_special_inode(inode, inode->i_mode, inode->i_rdev); |
1348 | 1373 | ||
1374 | ret = 0; | ||
1349 | out: | 1375 | out: |
1350 | kfree(tmpde); | 1376 | kfree(tmpde); |
1351 | if (bh) | 1377 | if (bh) |
1352 | brelse(bh); | 1378 | brelse(bh); |
1353 | return; | 1379 | return ret; |
1354 | 1380 | ||
1355 | out_badread: | 1381 | out_badread: |
1356 | printk(KERN_WARNING "ISOFS: unable to read i-node block\n"); | 1382 | printk(KERN_WARNING "ISOFS: unable to read i-node block\n"); |
1357 | fail: | 1383 | fail: |
1358 | make_bad_inode(inode); | ||
1359 | goto out; | 1384 | goto out; |
1360 | } | 1385 | } |
1361 | 1386 | ||
@@ -1394,9 +1419,10 @@ struct inode *isofs_iget(struct super_block *sb, | |||
1394 | unsigned long hashval; | 1419 | unsigned long hashval; |
1395 | struct inode *inode; | 1420 | struct inode *inode; |
1396 | struct isofs_iget5_callback_data data; | 1421 | struct isofs_iget5_callback_data data; |
1422 | long ret; | ||
1397 | 1423 | ||
1398 | if (offset >= 1ul << sb->s_blocksize_bits) | 1424 | if (offset >= 1ul << sb->s_blocksize_bits) |
1399 | return NULL; | 1425 | return ERR_PTR(-EINVAL); |
1400 | 1426 | ||
1401 | data.block = block; | 1427 | data.block = block; |
1402 | data.offset = offset; | 1428 | data.offset = offset; |
@@ -1406,9 +1432,17 @@ struct inode *isofs_iget(struct super_block *sb, | |||
1406 | inode = iget5_locked(sb, hashval, &isofs_iget5_test, | 1432 | inode = iget5_locked(sb, hashval, &isofs_iget5_test, |
1407 | &isofs_iget5_set, &data); | 1433 | &isofs_iget5_set, &data); |
1408 | 1434 | ||
1409 | if (inode && (inode->i_state & I_NEW)) { | 1435 | if (!inode) |
1410 | sb->s_op->read_inode(inode); | 1436 | return ERR_PTR(-ENOMEM); |
1411 | unlock_new_inode(inode); | 1437 | |
1438 | if (inode->i_state & I_NEW) { | ||
1439 | ret = isofs_read_inode(inode); | ||
1440 | if (ret < 0) { | ||
1441 | iget_failed(inode); | ||
1442 | inode = ERR_PTR(ret); | ||
1443 | } else { | ||
1444 | unlock_new_inode(inode); | ||
1445 | } | ||
1412 | } | 1446 | } |
1413 | 1447 | ||
1414 | return inode; | 1448 | return inode; |
diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h index f3213f9f89af..d1bdf8adb351 100644 --- a/fs/isofs/isofs.h +++ b/fs/isofs/isofs.h | |||
@@ -51,7 +51,8 @@ struct isofs_sb_info { | |||
51 | unsigned char s_hide; | 51 | unsigned char s_hide; |
52 | unsigned char s_showassoc; | 52 | unsigned char s_showassoc; |
53 | 53 | ||
54 | mode_t s_mode; | 54 | mode_t s_fmode; |
55 | mode_t s_dmode; | ||
55 | gid_t s_gid; | 56 | gid_t s_gid; |
56 | uid_t s_uid; | 57 | uid_t s_uid; |
57 | struct nls_table *s_nls_iocharset; /* Native language support table */ | 58 | struct nls_table *s_nls_iocharset; /* Native language support table */ |
diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c index e2b4dad39ca9..344b247bc29a 100644 --- a/fs/isofs/namei.c +++ b/fs/isofs/namei.c | |||
@@ -179,9 +179,9 @@ struct dentry *isofs_lookup(struct inode *dir, struct dentry *dentry, struct nam | |||
179 | inode = NULL; | 179 | inode = NULL; |
180 | if (found) { | 180 | if (found) { |
181 | inode = isofs_iget(dir->i_sb, block, offset); | 181 | inode = isofs_iget(dir->i_sb, block, offset); |
182 | if (!inode) { | 182 | if (IS_ERR(inode)) { |
183 | unlock_kernel(); | 183 | unlock_kernel(); |
184 | return ERR_PTR(-EACCES); | 184 | return ERR_CAST(inode); |
185 | } | 185 | } |
186 | } | 186 | } |
187 | unlock_kernel(); | 187 | unlock_kernel(); |
diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c index f3a1db3098de..6bd48f0a7047 100644 --- a/fs/isofs/rock.c +++ b/fs/isofs/rock.c | |||
@@ -474,8 +474,10 @@ repeat: | |||
474 | isofs_iget(inode->i_sb, | 474 | isofs_iget(inode->i_sb, |
475 | ISOFS_I(inode)->i_first_extent, | 475 | ISOFS_I(inode)->i_first_extent, |
476 | 0); | 476 | 0); |
477 | if (!reloc) | 477 | if (IS_ERR(reloc)) { |
478 | ret = PTR_ERR(reloc); | ||
478 | goto out; | 479 | goto out; |
480 | } | ||
479 | inode->i_mode = reloc->i_mode; | 481 | inode->i_mode = reloc->i_mode; |
480 | inode->i_nlink = reloc->i_nlink; | 482 | inode->i_nlink = reloc->i_nlink; |
481 | inode->i_uid = reloc->i_uid; | 483 | inode->i_uid = reloc->i_uid; |
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c index 31853eb65b4c..a38c7186c570 100644 --- a/fs/jbd/commit.c +++ b/fs/jbd/commit.c | |||
@@ -104,7 +104,8 @@ static int journal_write_commit_record(journal_t *journal, | |||
104 | { | 104 | { |
105 | struct journal_head *descriptor; | 105 | struct journal_head *descriptor; |
106 | struct buffer_head *bh; | 106 | struct buffer_head *bh; |
107 | int i, ret; | 107 | journal_header_t *header; |
108 | int ret; | ||
108 | int barrier_done = 0; | 109 | int barrier_done = 0; |
109 | 110 | ||
110 | if (is_journal_aborted(journal)) | 111 | if (is_journal_aborted(journal)) |
@@ -116,13 +117,10 @@ static int journal_write_commit_record(journal_t *journal, | |||
116 | 117 | ||
117 | bh = jh2bh(descriptor); | 118 | bh = jh2bh(descriptor); |
118 | 119 | ||
119 | /* AKPM: buglet - add `i' to tmp! */ | 120 | header = (journal_header_t *)(bh->b_data); |
120 | for (i = 0; i < bh->b_size; i += 512) { | 121 | header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER); |
121 | journal_header_t *tmp = (journal_header_t*)bh->b_data; | 122 | header->h_blocktype = cpu_to_be32(JFS_COMMIT_BLOCK); |
122 | tmp->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER); | 123 | header->h_sequence = cpu_to_be32(commit_transaction->t_tid); |
123 | tmp->h_blocktype = cpu_to_be32(JFS_COMMIT_BLOCK); | ||
124 | tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid); | ||
125 | } | ||
126 | 124 | ||
127 | JBUFFER_TRACE(descriptor, "write commit block"); | 125 | JBUFFER_TRACE(descriptor, "write commit block"); |
128 | set_buffer_dirty(bh); | 126 | set_buffer_dirty(bh); |
@@ -131,6 +129,8 @@ static int journal_write_commit_record(journal_t *journal, | |||
131 | barrier_done = 1; | 129 | barrier_done = 1; |
132 | } | 130 | } |
133 | ret = sync_dirty_buffer(bh); | 131 | ret = sync_dirty_buffer(bh); |
132 | if (barrier_done) | ||
133 | clear_buffer_ordered(bh); | ||
134 | /* is it possible for another commit to fail at roughly | 134 | /* is it possible for another commit to fail at roughly |
135 | * the same time as this one? If so, we don't want to | 135 | * the same time as this one? If so, we don't want to |
136 | * trust the barrier flag in the super, but instead want | 136 | * trust the barrier flag in the super, but instead want |
@@ -148,7 +148,6 @@ static int journal_write_commit_record(journal_t *journal, | |||
148 | spin_unlock(&journal->j_state_lock); | 148 | spin_unlock(&journal->j_state_lock); |
149 | 149 | ||
150 | /* And try again, without the barrier */ | 150 | /* And try again, without the barrier */ |
151 | clear_buffer_ordered(bh); | ||
152 | set_buffer_uptodate(bh); | 151 | set_buffer_uptodate(bh); |
153 | set_buffer_dirty(bh); | 152 | set_buffer_dirty(bh); |
154 | ret = sync_dirty_buffer(bh); | 153 | ret = sync_dirty_buffer(bh); |
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index 5d14243499d4..3943a8905eb2 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c | |||
@@ -1457,7 +1457,7 @@ static const char *journal_dev_name(journal_t *journal, char *buffer) | |||
1457 | * Aborts hard --- we mark the abort as occurred, but do _nothing_ else, | 1457 | * Aborts hard --- we mark the abort as occurred, but do _nothing_ else, |
1458 | * and don't attempt to make any other journal updates. | 1458 | * and don't attempt to make any other journal updates. |
1459 | */ | 1459 | */ |
1460 | void __journal_abort_hard(journal_t *journal) | 1460 | static void __journal_abort_hard(journal_t *journal) |
1461 | { | 1461 | { |
1462 | transaction_t *transaction; | 1462 | transaction_t *transaction; |
1463 | char b[BDEVNAME_SIZE]; | 1463 | char b[BDEVNAME_SIZE]; |
diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c index c5d9694b6a2f..2b8edf4d6eaa 100644 --- a/fs/jbd/recovery.c +++ b/fs/jbd/recovery.c | |||
@@ -354,7 +354,7 @@ static int do_one_pass(journal_t *journal, | |||
354 | struct buffer_head * obh; | 354 | struct buffer_head * obh; |
355 | struct buffer_head * nbh; | 355 | struct buffer_head * nbh; |
356 | 356 | ||
357 | cond_resched(); /* We're under lock_kernel() */ | 357 | cond_resched(); |
358 | 358 | ||
359 | /* If we already know where to stop the log traversal, | 359 | /* If we already know where to stop the log traversal, |
360 | * check right now that we haven't gone past the end of | 360 | * check right now that we haven't gone past the end of |
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 4f302d279279..a8173081f831 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c | |||
@@ -136,18 +136,20 @@ static int journal_submit_commit_record(journal_t *journal, | |||
136 | 136 | ||
137 | JBUFFER_TRACE(descriptor, "submit commit block"); | 137 | JBUFFER_TRACE(descriptor, "submit commit block"); |
138 | lock_buffer(bh); | 138 | lock_buffer(bh); |
139 | 139 | get_bh(bh); | |
140 | set_buffer_dirty(bh); | 140 | set_buffer_dirty(bh); |
141 | set_buffer_uptodate(bh); | 141 | set_buffer_uptodate(bh); |
142 | bh->b_end_io = journal_end_buffer_io_sync; | 142 | bh->b_end_io = journal_end_buffer_io_sync; |
143 | 143 | ||
144 | if (journal->j_flags & JBD2_BARRIER && | 144 | if (journal->j_flags & JBD2_BARRIER && |
145 | !JBD2_HAS_COMPAT_FEATURE(journal, | 145 | !JBD2_HAS_INCOMPAT_FEATURE(journal, |
146 | JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) { | 146 | JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) { |
147 | set_buffer_ordered(bh); | 147 | set_buffer_ordered(bh); |
148 | barrier_done = 1; | 148 | barrier_done = 1; |
149 | } | 149 | } |
150 | ret = submit_bh(WRITE, bh); | 150 | ret = submit_bh(WRITE, bh); |
151 | if (barrier_done) | ||
152 | clear_buffer_ordered(bh); | ||
151 | 153 | ||
152 | /* is it possible for another commit to fail at roughly | 154 | /* is it possible for another commit to fail at roughly |
153 | * the same time as this one? If so, we don't want to | 155 | * the same time as this one? If so, we don't want to |
@@ -166,7 +168,6 @@ static int journal_submit_commit_record(journal_t *journal, | |||
166 | spin_unlock(&journal->j_state_lock); | 168 | spin_unlock(&journal->j_state_lock); |
167 | 169 | ||
168 | /* And try again, without the barrier */ | 170 | /* And try again, without the barrier */ |
169 | clear_buffer_ordered(bh); | ||
170 | set_buffer_uptodate(bh); | 171 | set_buffer_uptodate(bh); |
171 | set_buffer_dirty(bh); | 172 | set_buffer_dirty(bh); |
172 | ret = submit_bh(WRITE, bh); | 173 | ret = submit_bh(WRITE, bh); |
@@ -872,7 +873,8 @@ wait_for_iobuf: | |||
872 | if (err) | 873 | if (err) |
873 | __jbd2_journal_abort_hard(journal); | 874 | __jbd2_journal_abort_hard(journal); |
874 | } | 875 | } |
875 | err = journal_wait_on_commit_record(cbh); | 876 | if (!err && !is_journal_aborted(journal)) |
877 | err = journal_wait_on_commit_record(cbh); | ||
876 | 878 | ||
877 | if (err) | 879 | if (err) |
878 | jbd2_journal_abort(journal, err); | 880 | jbd2_journal_abort(journal, err); |
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c index 921680663fa2..146411387ada 100644 --- a/fs/jbd2/recovery.c +++ b/fs/jbd2/recovery.c | |||
@@ -397,7 +397,7 @@ static int do_one_pass(journal_t *journal, | |||
397 | struct buffer_head * obh; | 397 | struct buffer_head * obh; |
398 | struct buffer_head * nbh; | 398 | struct buffer_head * nbh; |
399 | 399 | ||
400 | cond_resched(); /* We're under lock_kernel() */ | 400 | cond_resched(); |
401 | 401 | ||
402 | /* If we already know where to stop the log traversal, | 402 | /* If we already know where to stop the log traversal, |
403 | * check right now that we haven't gone past the end of | 403 | * check right now that we haven't gone past the end of |
@@ -641,7 +641,7 @@ static int do_one_pass(journal_t *journal, | |||
641 | if (chksum_err) { | 641 | if (chksum_err) { |
642 | info->end_transaction = next_commit_ID; | 642 | info->end_transaction = next_commit_ID; |
643 | 643 | ||
644 | if (!JBD2_HAS_COMPAT_FEATURE(journal, | 644 | if (!JBD2_HAS_INCOMPAT_FEATURE(journal, |
645 | JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)){ | 645 | JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)){ |
646 | printk(KERN_ERR | 646 | printk(KERN_ERR |
647 | "JBD: Transaction %u " | 647 | "JBD: Transaction %u " |
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c index 77fc5838609c..4c80404a9aba 100644 --- a/fs/jffs2/acl.c +++ b/fs/jffs2/acl.c | |||
@@ -176,7 +176,7 @@ static void jffs2_iset_acl(struct inode *inode, struct posix_acl **i_acl, struct | |||
176 | spin_unlock(&inode->i_lock); | 176 | spin_unlock(&inode->i_lock); |
177 | } | 177 | } |
178 | 178 | ||
179 | struct posix_acl *jffs2_get_acl(struct inode *inode, int type) | 179 | static struct posix_acl *jffs2_get_acl(struct inode *inode, int type) |
180 | { | 180 | { |
181 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); | 181 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); |
182 | struct posix_acl *acl; | 182 | struct posix_acl *acl; |
@@ -345,8 +345,10 @@ int jffs2_init_acl_pre(struct inode *dir_i, struct inode *inode, int *i_mode) | |||
345 | if (!clone) | 345 | if (!clone) |
346 | return -ENOMEM; | 346 | return -ENOMEM; |
347 | rc = posix_acl_create_masq(clone, (mode_t *)i_mode); | 347 | rc = posix_acl_create_masq(clone, (mode_t *)i_mode); |
348 | if (rc < 0) | 348 | if (rc < 0) { |
349 | posix_acl_release(clone); | ||
349 | return rc; | 350 | return rc; |
351 | } | ||
350 | if (rc > 0) | 352 | if (rc > 0) |
351 | jffs2_iset_acl(inode, &f->i_acl_access, clone); | 353 | jffs2_iset_acl(inode, &f->i_acl_access, clone); |
352 | 354 | ||
diff --git a/fs/jffs2/acl.h b/fs/jffs2/acl.h index 76c6ebd1acd9..0bb7f003fd80 100644 --- a/fs/jffs2/acl.h +++ b/fs/jffs2/acl.h | |||
@@ -28,7 +28,6 @@ struct jffs2_acl_header { | |||
28 | 28 | ||
29 | #define JFFS2_ACL_NOT_CACHED ((void *)-1) | 29 | #define JFFS2_ACL_NOT_CACHED ((void *)-1) |
30 | 30 | ||
31 | extern struct posix_acl *jffs2_get_acl(struct inode *inode, int type); | ||
32 | extern int jffs2_permission(struct inode *, int, struct nameidata *); | 31 | extern int jffs2_permission(struct inode *, int, struct nameidata *); |
33 | extern int jffs2_acl_chmod(struct inode *); | 32 | extern int jffs2_acl_chmod(struct inode *); |
34 | extern int jffs2_init_acl_pre(struct inode *, struct inode *, int *); | 33 | extern int jffs2_init_acl_pre(struct inode *, struct inode *, int *); |
@@ -40,7 +39,6 @@ extern struct xattr_handler jffs2_acl_default_xattr_handler; | |||
40 | 39 | ||
41 | #else | 40 | #else |
42 | 41 | ||
43 | #define jffs2_get_acl(inode, type) (NULL) | ||
44 | #define jffs2_permission (NULL) | 42 | #define jffs2_permission (NULL) |
45 | #define jffs2_acl_chmod(inode) (0) | 43 | #define jffs2_acl_chmod(inode) (0) |
46 | #define jffs2_init_acl_pre(dir_i,inode,mode) (0) | 44 | #define jffs2_init_acl_pre(dir_i,inode,mode) (0) |
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c index 787e392ffd41..f948f7e6ec82 100644 --- a/fs/jffs2/dir.c +++ b/fs/jffs2/dir.c | |||
@@ -101,10 +101,10 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target, | |||
101 | ino = fd->ino; | 101 | ino = fd->ino; |
102 | up(&dir_f->sem); | 102 | up(&dir_f->sem); |
103 | if (ino) { | 103 | if (ino) { |
104 | inode = iget(dir_i->i_sb, ino); | 104 | inode = jffs2_iget(dir_i->i_sb, ino); |
105 | if (!inode) { | 105 | if (IS_ERR(inode)) { |
106 | printk(KERN_WARNING "iget() failed for ino #%u\n", ino); | 106 | printk(KERN_WARNING "iget() failed for ino #%u\n", ino); |
107 | return (ERR_PTR(-EIO)); | 107 | return ERR_CAST(inode); |
108 | } | 108 | } |
109 | } | 109 | } |
110 | 110 | ||
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c index d2e06f7ea96f..e26ea78c7892 100644 --- a/fs/jffs2/fs.c +++ b/fs/jffs2/fs.c | |||
@@ -97,11 +97,7 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) | |||
97 | ri->gid = cpu_to_je16((ivalid & ATTR_GID)?iattr->ia_gid:inode->i_gid); | 97 | ri->gid = cpu_to_je16((ivalid & ATTR_GID)?iattr->ia_gid:inode->i_gid); |
98 | 98 | ||
99 | if (ivalid & ATTR_MODE) | 99 | if (ivalid & ATTR_MODE) |
100 | if (iattr->ia_mode & S_ISGID && | 100 | ri->mode = cpu_to_jemode(iattr->ia_mode); |
101 | !in_group_p(je16_to_cpu(ri->gid)) && !capable(CAP_FSETID)) | ||
102 | ri->mode = cpu_to_jemode(iattr->ia_mode & ~S_ISGID); | ||
103 | else | ||
104 | ri->mode = cpu_to_jemode(iattr->ia_mode); | ||
105 | else | 101 | else |
106 | ri->mode = cpu_to_jemode(inode->i_mode); | 102 | ri->mode = cpu_to_jemode(inode->i_mode); |
107 | 103 | ||
@@ -230,16 +226,23 @@ void jffs2_clear_inode (struct inode *inode) | |||
230 | jffs2_do_clear_inode(c, f); | 226 | jffs2_do_clear_inode(c, f); |
231 | } | 227 | } |
232 | 228 | ||
233 | void jffs2_read_inode (struct inode *inode) | 229 | struct inode *jffs2_iget(struct super_block *sb, unsigned long ino) |
234 | { | 230 | { |
235 | struct jffs2_inode_info *f; | 231 | struct jffs2_inode_info *f; |
236 | struct jffs2_sb_info *c; | 232 | struct jffs2_sb_info *c; |
237 | struct jffs2_raw_inode latest_node; | 233 | struct jffs2_raw_inode latest_node; |
238 | union jffs2_device_node jdev; | 234 | union jffs2_device_node jdev; |
235 | struct inode *inode; | ||
239 | dev_t rdev = 0; | 236 | dev_t rdev = 0; |
240 | int ret; | 237 | int ret; |
241 | 238 | ||
242 | D1(printk(KERN_DEBUG "jffs2_read_inode(): inode->i_ino == %lu\n", inode->i_ino)); | 239 | D1(printk(KERN_DEBUG "jffs2_iget(): ino == %lu\n", ino)); |
240 | |||
241 | inode = iget_locked(sb, ino); | ||
242 | if (!inode) | ||
243 | return ERR_PTR(-ENOMEM); | ||
244 | if (!(inode->i_state & I_NEW)) | ||
245 | return inode; | ||
243 | 246 | ||
244 | f = JFFS2_INODE_INFO(inode); | 247 | f = JFFS2_INODE_INFO(inode); |
245 | c = JFFS2_SB_INFO(inode->i_sb); | 248 | c = JFFS2_SB_INFO(inode->i_sb); |
@@ -250,9 +253,9 @@ void jffs2_read_inode (struct inode *inode) | |||
250 | ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node); | 253 | ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node); |
251 | 254 | ||
252 | if (ret) { | 255 | if (ret) { |
253 | make_bad_inode(inode); | ||
254 | up(&f->sem); | 256 | up(&f->sem); |
255 | return; | 257 | iget_failed(inode); |
258 | return ERR_PTR(ret); | ||
256 | } | 259 | } |
257 | inode->i_mode = jemode_to_cpu(latest_node.mode); | 260 | inode->i_mode = jemode_to_cpu(latest_node.mode); |
258 | inode->i_uid = je16_to_cpu(latest_node.uid); | 261 | inode->i_uid = je16_to_cpu(latest_node.uid); |
@@ -303,19 +306,14 @@ void jffs2_read_inode (struct inode *inode) | |||
303 | if (f->metadata->size != sizeof(jdev.old) && | 306 | if (f->metadata->size != sizeof(jdev.old) && |
304 | f->metadata->size != sizeof(jdev.new)) { | 307 | f->metadata->size != sizeof(jdev.new)) { |
305 | printk(KERN_NOTICE "Device node has strange size %d\n", f->metadata->size); | 308 | printk(KERN_NOTICE "Device node has strange size %d\n", f->metadata->size); |
306 | up(&f->sem); | 309 | goto error_io; |
307 | jffs2_do_clear_inode(c, f); | ||
308 | make_bad_inode(inode); | ||
309 | return; | ||
310 | } | 310 | } |
311 | D1(printk(KERN_DEBUG "Reading device numbers from flash\n")); | 311 | D1(printk(KERN_DEBUG "Reading device numbers from flash\n")); |
312 | if (jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size) < 0) { | 312 | ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size); |
313 | if (ret < 0) { | ||
313 | /* Eep */ | 314 | /* Eep */ |
314 | printk(KERN_NOTICE "Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino); | 315 | printk(KERN_NOTICE "Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino); |
315 | up(&f->sem); | 316 | goto error; |
316 | jffs2_do_clear_inode(c, f); | ||
317 | make_bad_inode(inode); | ||
318 | return; | ||
319 | } | 317 | } |
320 | if (f->metadata->size == sizeof(jdev.old)) | 318 | if (f->metadata->size == sizeof(jdev.old)) |
321 | rdev = old_decode_dev(je16_to_cpu(jdev.old)); | 319 | rdev = old_decode_dev(je16_to_cpu(jdev.old)); |
@@ -335,6 +333,16 @@ void jffs2_read_inode (struct inode *inode) | |||
335 | up(&f->sem); | 333 | up(&f->sem); |
336 | 334 | ||
337 | D1(printk(KERN_DEBUG "jffs2_read_inode() returning\n")); | 335 | D1(printk(KERN_DEBUG "jffs2_read_inode() returning\n")); |
336 | unlock_new_inode(inode); | ||
337 | return inode; | ||
338 | |||
339 | error_io: | ||
340 | ret = -EIO; | ||
341 | error: | ||
342 | up(&f->sem); | ||
343 | jffs2_do_clear_inode(c, f); | ||
344 | iget_failed(inode); | ||
345 | return ERR_PTR(ret); | ||
338 | } | 346 | } |
339 | 347 | ||
340 | void jffs2_dirty_inode(struct inode *inode) | 348 | void jffs2_dirty_inode(struct inode *inode) |
@@ -522,15 +530,16 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent) | |||
522 | if ((ret = jffs2_do_mount_fs(c))) | 530 | if ((ret = jffs2_do_mount_fs(c))) |
523 | goto out_inohash; | 531 | goto out_inohash; |
524 | 532 | ||
525 | ret = -EINVAL; | ||
526 | |||
527 | D1(printk(KERN_DEBUG "jffs2_do_fill_super(): Getting root inode\n")); | 533 | D1(printk(KERN_DEBUG "jffs2_do_fill_super(): Getting root inode\n")); |
528 | root_i = iget(sb, 1); | 534 | root_i = jffs2_iget(sb, 1); |
529 | if (is_bad_inode(root_i)) { | 535 | if (IS_ERR(root_i)) { |
530 | D1(printk(KERN_WARNING "get root inode failed\n")); | 536 | D1(printk(KERN_WARNING "get root inode failed\n")); |
531 | goto out_root_i; | 537 | ret = PTR_ERR(root_i); |
538 | goto out_root; | ||
532 | } | 539 | } |
533 | 540 | ||
541 | ret = -ENOMEM; | ||
542 | |||
534 | D1(printk(KERN_DEBUG "jffs2_do_fill_super(): d_alloc_root()\n")); | 543 | D1(printk(KERN_DEBUG "jffs2_do_fill_super(): d_alloc_root()\n")); |
535 | sb->s_root = d_alloc_root(root_i); | 544 | sb->s_root = d_alloc_root(root_i); |
536 | if (!sb->s_root) | 545 | if (!sb->s_root) |
@@ -546,6 +555,7 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent) | |||
546 | 555 | ||
547 | out_root_i: | 556 | out_root_i: |
548 | iput(root_i); | 557 | iput(root_i); |
558 | out_root: | ||
549 | jffs2_free_ino_caches(c); | 559 | jffs2_free_ino_caches(c); |
550 | jffs2_free_raw_node_refs(c); | 560 | jffs2_free_raw_node_refs(c); |
551 | if (jffs2_blocks_use_vmalloc(c)) | 561 | if (jffs2_blocks_use_vmalloc(c)) |
@@ -615,9 +625,9 @@ struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c, | |||
615 | jffs2_do_unlink() would need the alloc_sem and we have it. | 625 | jffs2_do_unlink() would need the alloc_sem and we have it. |
616 | Just iget() it, and if read_inode() is necessary that's OK. | 626 | Just iget() it, and if read_inode() is necessary that's OK. |
617 | */ | 627 | */ |
618 | inode = iget(OFNI_BS_2SFFJ(c), inum); | 628 | inode = jffs2_iget(OFNI_BS_2SFFJ(c), inum); |
619 | if (!inode) | 629 | if (IS_ERR(inode)) |
620 | return ERR_PTR(-ENOMEM); | 630 | return ERR_CAST(inode); |
621 | } | 631 | } |
622 | if (is_bad_inode(inode)) { | 632 | if (is_bad_inode(inode)) { |
623 | printk(KERN_NOTICE "Eep. read_inode() failed for ino #%u. nlink %d\n", | 633 | printk(KERN_NOTICE "Eep. read_inode() failed for ino #%u. nlink %d\n", |
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c index 4bf86088b3ae..87c6f555e1a0 100644 --- a/fs/jffs2/nodelist.c +++ b/fs/jffs2/nodelist.c | |||
@@ -32,15 +32,18 @@ void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new | |||
32 | if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) { | 32 | if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) { |
33 | /* Duplicate. Free one */ | 33 | /* Duplicate. Free one */ |
34 | if (new->version < (*prev)->version) { | 34 | if (new->version < (*prev)->version) { |
35 | dbg_dentlist("Eep! Marking new dirent node is obsolete, old is \"%s\", ino #%u\n", | 35 | dbg_dentlist("Eep! Marking new dirent node obsolete, old is \"%s\", ino #%u\n", |
36 | (*prev)->name, (*prev)->ino); | 36 | (*prev)->name, (*prev)->ino); |
37 | jffs2_mark_node_obsolete(c, new->raw); | 37 | jffs2_mark_node_obsolete(c, new->raw); |
38 | jffs2_free_full_dirent(new); | 38 | jffs2_free_full_dirent(new); |
39 | } else { | 39 | } else { |
40 | dbg_dentlist("marking old dirent \"%s\", ino #%u bsolete\n", | 40 | dbg_dentlist("marking old dirent \"%s\", ino #%u obsolete\n", |
41 | (*prev)->name, (*prev)->ino); | 41 | (*prev)->name, (*prev)->ino); |
42 | new->next = (*prev)->next; | 42 | new->next = (*prev)->next; |
43 | jffs2_mark_node_obsolete(c, ((*prev)->raw)); | 43 | /* It may have been a 'placeholder' deletion dirent, |
44 | if jffs2_can_mark_obsolete() (see jffs2_do_unlink()) */ | ||
45 | if ((*prev)->raw) | ||
46 | jffs2_mark_node_obsolete(c, ((*prev)->raw)); | ||
44 | jffs2_free_full_dirent(*prev); | 47 | jffs2_free_full_dirent(*prev); |
45 | *prev = new; | 48 | *prev = new; |
46 | } | 49 | } |
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h index bf64686cf098..1b10d2594092 100644 --- a/fs/jffs2/os-linux.h +++ b/fs/jffs2/os-linux.h | |||
@@ -175,7 +175,7 @@ extern const struct inode_operations jffs2_symlink_inode_operations; | |||
175 | /* fs.c */ | 175 | /* fs.c */ |
176 | int jffs2_setattr (struct dentry *, struct iattr *); | 176 | int jffs2_setattr (struct dentry *, struct iattr *); |
177 | int jffs2_do_setattr (struct inode *, struct iattr *); | 177 | int jffs2_do_setattr (struct inode *, struct iattr *); |
178 | void jffs2_read_inode (struct inode *); | 178 | struct inode *jffs2_iget(struct super_block *, unsigned long); |
179 | void jffs2_clear_inode (struct inode *); | 179 | void jffs2_clear_inode (struct inode *); |
180 | void jffs2_dirty_inode(struct inode *inode); | 180 | void jffs2_dirty_inode(struct inode *inode); |
181 | struct inode *jffs2_new_inode (struct inode *dir_i, int mode, | 181 | struct inode *jffs2_new_inode (struct inode *dir_i, int mode, |
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c index 6c1ba3566f58..e512a93d6249 100644 --- a/fs/jffs2/readinode.c +++ b/fs/jffs2/readinode.c | |||
@@ -37,23 +37,24 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info | |||
37 | 37 | ||
38 | BUG_ON(tn->csize == 0); | 38 | BUG_ON(tn->csize == 0); |
39 | 39 | ||
40 | if (!jffs2_is_writebuffered(c)) | ||
41 | goto adj_acc; | ||
42 | |||
43 | /* Calculate how many bytes were already checked */ | 40 | /* Calculate how many bytes were already checked */ |
44 | ofs = ref_offset(ref) + sizeof(struct jffs2_raw_inode); | 41 | ofs = ref_offset(ref) + sizeof(struct jffs2_raw_inode); |
45 | len = ofs % c->wbuf_pagesize; | 42 | len = tn->csize; |
46 | if (likely(len)) | 43 | |
47 | len = c->wbuf_pagesize - len; | 44 | if (jffs2_is_writebuffered(c)) { |
48 | 45 | int adj = ofs % c->wbuf_pagesize; | |
49 | if (len >= tn->csize) { | 46 | if (likely(adj)) |
50 | dbg_readinode("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n", | 47 | adj = c->wbuf_pagesize - adj; |
51 | ref_offset(ref), tn->csize, ofs); | 48 | |
52 | goto adj_acc; | 49 | if (adj >= tn->csize) { |
53 | } | 50 | dbg_readinode("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n", |
51 | ref_offset(ref), tn->csize, ofs); | ||
52 | goto adj_acc; | ||
53 | } | ||
54 | 54 | ||
55 | ofs += len; | 55 | ofs += adj; |
56 | len = tn->csize - len; | 56 | len -= adj; |
57 | } | ||
57 | 58 | ||
58 | dbg_readinode("check node at %#08x, data length %u, partial CRC %#08x, correct CRC %#08x, data starts at %#08x, start checking from %#08x - %u bytes.\n", | 59 | dbg_readinode("check node at %#08x, data length %u, partial CRC %#08x, correct CRC %#08x, data starts at %#08x, start checking from %#08x - %u bytes.\n", |
59 | ref_offset(ref), tn->csize, tn->partial_crc, tn->data_crc, ofs - len, ofs, len); | 60 | ref_offset(ref), tn->csize, tn->partial_crc, tn->data_crc, ofs - len, ofs, len); |
@@ -63,7 +64,7 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info | |||
63 | * adding and jffs2_flash_read_end() interface. */ | 64 | * adding and jffs2_flash_read_end() interface. */ |
64 | if (c->mtd->point) { | 65 | if (c->mtd->point) { |
65 | err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer); | 66 | err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer); |
66 | if (!err && retlen < tn->csize) { | 67 | if (!err && retlen < len) { |
67 | JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize); | 68 | JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize); |
68 | c->mtd->unpoint(c->mtd, buffer, ofs, retlen); | 69 | c->mtd->unpoint(c->mtd, buffer, ofs, retlen); |
69 | } else if (err) | 70 | } else if (err) |
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c index ffa447511e6a..4677355996cc 100644 --- a/fs/jffs2/super.c +++ b/fs/jffs2/super.c | |||
@@ -65,7 +65,6 @@ static const struct super_operations jffs2_super_operations = | |||
65 | { | 65 | { |
66 | .alloc_inode = jffs2_alloc_inode, | 66 | .alloc_inode = jffs2_alloc_inode, |
67 | .destroy_inode =jffs2_destroy_inode, | 67 | .destroy_inode =jffs2_destroy_inode, |
68 | .read_inode = jffs2_read_inode, | ||
69 | .put_super = jffs2_put_super, | 68 | .put_super = jffs2_put_super, |
70 | .write_super = jffs2_write_super, | 69 | .write_super = jffs2_write_super, |
71 | .statfs = jffs2_statfs, | 70 | .statfs = jffs2_statfs, |
diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c index 147e2cbee9e4..776f13cbf2b5 100644 --- a/fs/jffs2/write.c +++ b/fs/jffs2/write.c | |||
@@ -177,7 +177,7 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
177 | void *hold_err = fn->raw; | 177 | void *hold_err = fn->raw; |
178 | /* Release the full_dnode which is now useless, and return */ | 178 | /* Release the full_dnode which is now useless, and return */ |
179 | jffs2_free_full_dnode(fn); | 179 | jffs2_free_full_dnode(fn); |
180 | return ERR_PTR(PTR_ERR(hold_err)); | 180 | return ERR_CAST(hold_err); |
181 | } | 181 | } |
182 | fn->ofs = je32_to_cpu(ri->offset); | 182 | fn->ofs = je32_to_cpu(ri->offset); |
183 | fn->size = je32_to_cpu(ri->dsize); | 183 | fn->size = je32_to_cpu(ri->dsize); |
@@ -313,7 +313,7 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff | |||
313 | void *hold_err = fd->raw; | 313 | void *hold_err = fd->raw; |
314 | /* Release the full_dirent which is now useless, and return */ | 314 | /* Release the full_dirent which is now useless, and return */ |
315 | jffs2_free_full_dirent(fd); | 315 | jffs2_free_full_dirent(fd); |
316 | return ERR_PTR(PTR_ERR(hold_err)); | 316 | return ERR_CAST(hold_err); |
317 | } | 317 | } |
318 | 318 | ||
319 | if (retried) { | 319 | if (retried) { |
@@ -582,7 +582,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, | |||
582 | jffs2_add_fd_to_list(c, fd, &dir_f->dents); | 582 | jffs2_add_fd_to_list(c, fd, &dir_f->dents); |
583 | up(&dir_f->sem); | 583 | up(&dir_f->sem); |
584 | } else { | 584 | } else { |
585 | struct jffs2_full_dirent **prev = &dir_f->dents; | 585 | struct jffs2_full_dirent *fd = dir_f->dents; |
586 | uint32_t nhash = full_name_hash(name, namelen); | 586 | uint32_t nhash = full_name_hash(name, namelen); |
587 | 587 | ||
588 | /* We don't actually want to reserve any space, but we do | 588 | /* We don't actually want to reserve any space, but we do |
@@ -590,21 +590,22 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, | |||
590 | down(&c->alloc_sem); | 590 | down(&c->alloc_sem); |
591 | down(&dir_f->sem); | 591 | down(&dir_f->sem); |
592 | 592 | ||
593 | while ((*prev) && (*prev)->nhash <= nhash) { | 593 | for (fd = dir_f->dents; fd; fd = fd->next) { |
594 | if ((*prev)->nhash == nhash && | 594 | if (fd->nhash == nhash && |
595 | !memcmp((*prev)->name, name, namelen) && | 595 | !memcmp(fd->name, name, namelen) && |
596 | !(*prev)->name[namelen]) { | 596 | !fd->name[namelen]) { |
597 | struct jffs2_full_dirent *this = *prev; | ||
598 | 597 | ||
599 | D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) @%08x obsolete\n", | 598 | D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) @%08x obsolete\n", |
600 | this->ino, ref_offset(this->raw))); | 599 | fd->ino, ref_offset(fd->raw))); |
601 | 600 | jffs2_mark_node_obsolete(c, fd->raw); | |
602 | *prev = this->next; | 601 | /* We don't want to remove it from the list immediately, |
603 | jffs2_mark_node_obsolete(c, (this->raw)); | 602 | because that screws up getdents()/seek() semantics even |
604 | jffs2_free_full_dirent(this); | 603 | more than they're screwed already. Turn it into a |
604 | node-less deletion dirent instead -- a placeholder */ | ||
605 | fd->raw = NULL; | ||
606 | fd->ino = 0; | ||
605 | break; | 607 | break; |
606 | } | 608 | } |
607 | prev = &((*prev)->next); | ||
608 | } | 609 | } |
609 | up(&dir_f->sem); | 610 | up(&dir_f->sem); |
610 | } | 611 | } |
@@ -630,7 +631,8 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, | |||
630 | D1(printk(KERN_DEBUG "Removing deletion dirent for \"%s\" from dir ino #%u\n", | 631 | D1(printk(KERN_DEBUG "Removing deletion dirent for \"%s\" from dir ino #%u\n", |
631 | fd->name, dead_f->inocache->ino)); | 632 | fd->name, dead_f->inocache->ino)); |
632 | } | 633 | } |
633 | jffs2_mark_node_obsolete(c, fd->raw); | 634 | if (fd->raw) |
635 | jffs2_mark_node_obsolete(c, fd->raw); | ||
634 | jffs2_free_full_dirent(fd); | 636 | jffs2_free_full_dirent(fd); |
635 | } | 637 | } |
636 | } | 638 | } |
diff --git a/fs/jfs/file.c b/fs/jfs/file.c index 87eb93694af7..7f6063acaa3b 100644 --- a/fs/jfs/file.c +++ b/fs/jfs/file.c | |||
@@ -112,5 +112,8 @@ const struct file_operations jfs_file_operations = { | |||
112 | .splice_write = generic_file_splice_write, | 112 | .splice_write = generic_file_splice_write, |
113 | .fsync = jfs_fsync, | 113 | .fsync = jfs_fsync, |
114 | .release = jfs_release, | 114 | .release = jfs_release, |
115 | .ioctl = jfs_ioctl, | 115 | .unlocked_ioctl = jfs_ioctl, |
116 | #ifdef CONFIG_COMPAT | ||
117 | .compat_ioctl = jfs_compat_ioctl, | ||
118 | #endif | ||
116 | }; | 119 | }; |
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 4672013802e1..210339784b56 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c | |||
@@ -31,11 +31,21 @@ | |||
31 | #include "jfs_debug.h" | 31 | #include "jfs_debug.h" |
32 | 32 | ||
33 | 33 | ||
34 | void jfs_read_inode(struct inode *inode) | 34 | struct inode *jfs_iget(struct super_block *sb, unsigned long ino) |
35 | { | 35 | { |
36 | if (diRead(inode)) { | 36 | struct inode *inode; |
37 | make_bad_inode(inode); | 37 | int ret; |
38 | return; | 38 | |
39 | inode = iget_locked(sb, ino); | ||
40 | if (!inode) | ||
41 | return ERR_PTR(-ENOMEM); | ||
42 | if (!(inode->i_state & I_NEW)) | ||
43 | return inode; | ||
44 | |||
45 | ret = diRead(inode); | ||
46 | if (ret < 0) { | ||
47 | iget_failed(inode); | ||
48 | return ERR_PTR(ret); | ||
39 | } | 49 | } |
40 | 50 | ||
41 | if (S_ISREG(inode->i_mode)) { | 51 | if (S_ISREG(inode->i_mode)) { |
@@ -55,6 +65,8 @@ void jfs_read_inode(struct inode *inode) | |||
55 | inode->i_op = &jfs_file_inode_operations; | 65 | inode->i_op = &jfs_file_inode_operations; |
56 | init_special_inode(inode, inode->i_mode, inode->i_rdev); | 66 | init_special_inode(inode, inode->i_mode, inode->i_rdev); |
57 | } | 67 | } |
68 | unlock_new_inode(inode); | ||
69 | return inode; | ||
58 | } | 70 | } |
59 | 71 | ||
60 | /* | 72 | /* |
diff --git a/fs/jfs/ioctl.c b/fs/jfs/ioctl.c index dfda12a073e1..a1f8e375ad21 100644 --- a/fs/jfs/ioctl.c +++ b/fs/jfs/ioctl.c | |||
@@ -51,9 +51,9 @@ static long jfs_map_ext2(unsigned long flags, int from) | |||
51 | } | 51 | } |
52 | 52 | ||
53 | 53 | ||
54 | int jfs_ioctl(struct inode * inode, struct file * filp, unsigned int cmd, | 54 | long jfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) |
55 | unsigned long arg) | ||
56 | { | 55 | { |
56 | struct inode *inode = filp->f_dentry->d_inode; | ||
57 | struct jfs_inode_info *jfs_inode = JFS_IP(inode); | 57 | struct jfs_inode_info *jfs_inode = JFS_IP(inode); |
58 | unsigned int flags; | 58 | unsigned int flags; |
59 | 59 | ||
@@ -82,6 +82,10 @@ int jfs_ioctl(struct inode * inode, struct file * filp, unsigned int cmd, | |||
82 | /* Is it quota file? Do not allow user to mess with it */ | 82 | /* Is it quota file? Do not allow user to mess with it */ |
83 | if (IS_NOQUOTA(inode)) | 83 | if (IS_NOQUOTA(inode)) |
84 | return -EPERM; | 84 | return -EPERM; |
85 | |||
86 | /* Lock against other parallel changes of flags */ | ||
87 | mutex_lock(&inode->i_mutex); | ||
88 | |||
85 | jfs_get_inode_flags(jfs_inode); | 89 | jfs_get_inode_flags(jfs_inode); |
86 | oldflags = jfs_inode->mode2; | 90 | oldflags = jfs_inode->mode2; |
87 | 91 | ||
@@ -92,8 +96,10 @@ int jfs_ioctl(struct inode * inode, struct file * filp, unsigned int cmd, | |||
92 | if ((oldflags & JFS_IMMUTABLE_FL) || | 96 | if ((oldflags & JFS_IMMUTABLE_FL) || |
93 | ((flags ^ oldflags) & | 97 | ((flags ^ oldflags) & |
94 | (JFS_APPEND_FL | JFS_IMMUTABLE_FL))) { | 98 | (JFS_APPEND_FL | JFS_IMMUTABLE_FL))) { |
95 | if (!capable(CAP_LINUX_IMMUTABLE)) | 99 | if (!capable(CAP_LINUX_IMMUTABLE)) { |
100 | mutex_unlock(&inode->i_mutex); | ||
96 | return -EPERM; | 101 | return -EPERM; |
102 | } | ||
97 | } | 103 | } |
98 | 104 | ||
99 | flags = flags & JFS_FL_USER_MODIFIABLE; | 105 | flags = flags & JFS_FL_USER_MODIFIABLE; |
@@ -101,6 +107,7 @@ int jfs_ioctl(struct inode * inode, struct file * filp, unsigned int cmd, | |||
101 | jfs_inode->mode2 = flags; | 107 | jfs_inode->mode2 = flags; |
102 | 108 | ||
103 | jfs_set_inode_flags(inode); | 109 | jfs_set_inode_flags(inode); |
110 | mutex_unlock(&inode->i_mutex); | ||
104 | inode->i_ctime = CURRENT_TIME_SEC; | 111 | inode->i_ctime = CURRENT_TIME_SEC; |
105 | mark_inode_dirty(inode); | 112 | mark_inode_dirty(inode); |
106 | return 0; | 113 | return 0; |
@@ -110,3 +117,21 @@ int jfs_ioctl(struct inode * inode, struct file * filp, unsigned int cmd, | |||
110 | } | 117 | } |
111 | } | 118 | } |
112 | 119 | ||
120 | #ifdef CONFIG_COMPAT | ||
121 | long jfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | ||
122 | { | ||
123 | /* While these ioctl numbers defined with 'long' and have different | ||
124 | * numbers than the 64bit ABI, | ||
125 | * the actual implementation only deals with ints and is compatible. | ||
126 | */ | ||
127 | switch (cmd) { | ||
128 | case JFS_IOC_GETFLAGS32: | ||
129 | cmd = JFS_IOC_GETFLAGS; | ||
130 | break; | ||
131 | case JFS_IOC_SETFLAGS32: | ||
132 | cmd = JFS_IOC_SETFLAGS; | ||
133 | break; | ||
134 | } | ||
135 | return jfs_ioctl(filp, cmd, arg); | ||
136 | } | ||
137 | #endif | ||
diff --git a/fs/jfs/jfs_dinode.h b/fs/jfs/jfs_dinode.h index c387540d3425..395c4c0d0f06 100644 --- a/fs/jfs/jfs_dinode.h +++ b/fs/jfs/jfs_dinode.h | |||
@@ -170,5 +170,7 @@ struct dinode { | |||
170 | #define JFS_IOC_GETFLAGS _IOR('f', 1, long) | 170 | #define JFS_IOC_GETFLAGS _IOR('f', 1, long) |
171 | #define JFS_IOC_SETFLAGS _IOW('f', 2, long) | 171 | #define JFS_IOC_SETFLAGS _IOW('f', 2, long) |
172 | 172 | ||
173 | #define JFS_IOC_GETFLAGS32 _IOR('f', 1, int) | ||
174 | #define JFS_IOC_SETFLAGS32 _IOW('f', 2, int) | ||
173 | 175 | ||
174 | #endif /*_H_JFS_DINODE */ | 176 | #endif /*_H_JFS_DINODE */ |
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h index 8e2cf2cde185..adb2fafcc544 100644 --- a/fs/jfs/jfs_inode.h +++ b/fs/jfs/jfs_inode.h | |||
@@ -22,9 +22,9 @@ struct fid; | |||
22 | 22 | ||
23 | extern struct inode *ialloc(struct inode *, umode_t); | 23 | extern struct inode *ialloc(struct inode *, umode_t); |
24 | extern int jfs_fsync(struct file *, struct dentry *, int); | 24 | extern int jfs_fsync(struct file *, struct dentry *, int); |
25 | extern int jfs_ioctl(struct inode *, struct file *, | 25 | extern long jfs_ioctl(struct file *, unsigned int, unsigned long); |
26 | unsigned int, unsigned long); | 26 | extern long jfs_compat_ioctl(struct file *, unsigned int, unsigned long); |
27 | extern void jfs_read_inode(struct inode *); | 27 | extern struct inode *jfs_iget(struct super_block *, unsigned long); |
28 | extern int jfs_commit_inode(struct inode *, int); | 28 | extern int jfs_commit_inode(struct inode *, int); |
29 | extern int jfs_write_inode(struct inode*, int); | 29 | extern int jfs_write_inode(struct inode*, int); |
30 | extern void jfs_delete_inode(struct inode *); | 30 | extern void jfs_delete_inode(struct inode *); |
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index f8718de3505e..0ba6778edaa2 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c | |||
@@ -1462,12 +1462,10 @@ static struct dentry *jfs_lookup(struct inode *dip, struct dentry *dentry, struc | |||
1462 | } | 1462 | } |
1463 | } | 1463 | } |
1464 | 1464 | ||
1465 | ip = iget(dip->i_sb, inum); | 1465 | ip = jfs_iget(dip->i_sb, inum); |
1466 | if (ip == NULL || is_bad_inode(ip)) { | 1466 | if (IS_ERR(ip)) { |
1467 | jfs_err("jfs_lookup: iget failed on inum %d", (uint) inum); | 1467 | jfs_err("jfs_lookup: iget failed on inum %d", (uint) inum); |
1468 | if (ip) | 1468 | return ERR_CAST(ip); |
1469 | iput(ip); | ||
1470 | return ERR_PTR(-EACCES); | ||
1471 | } | 1469 | } |
1472 | 1470 | ||
1473 | dentry = d_splice_alias(ip, dentry); | 1471 | dentry = d_splice_alias(ip, dentry); |
@@ -1485,12 +1483,11 @@ static struct inode *jfs_nfs_get_inode(struct super_block *sb, | |||
1485 | 1483 | ||
1486 | if (ino == 0) | 1484 | if (ino == 0) |
1487 | return ERR_PTR(-ESTALE); | 1485 | return ERR_PTR(-ESTALE); |
1488 | inode = iget(sb, ino); | 1486 | inode = jfs_iget(sb, ino); |
1489 | if (inode == NULL) | 1487 | if (IS_ERR(inode)) |
1490 | return ERR_PTR(-ENOMEM); | 1488 | return ERR_CAST(inode); |
1491 | 1489 | ||
1492 | if (is_bad_inode(inode) || | 1490 | if (generation && inode->i_generation != generation) { |
1493 | (generation && inode->i_generation != generation)) { | ||
1494 | iput(inode); | 1491 | iput(inode); |
1495 | return ERR_PTR(-ESTALE); | 1492 | return ERR_PTR(-ESTALE); |
1496 | } | 1493 | } |
@@ -1521,17 +1518,14 @@ struct dentry *jfs_get_parent(struct dentry *dentry) | |||
1521 | 1518 | ||
1522 | parent_ino = | 1519 | parent_ino = |
1523 | le32_to_cpu(JFS_IP(dentry->d_inode)->i_dtroot.header.idotdot); | 1520 | le32_to_cpu(JFS_IP(dentry->d_inode)->i_dtroot.header.idotdot); |
1524 | inode = iget(sb, parent_ino); | 1521 | inode = jfs_iget(sb, parent_ino); |
1525 | if (inode) { | 1522 | if (IS_ERR(inode)) { |
1526 | if (is_bad_inode(inode)) { | 1523 | parent = ERR_CAST(inode); |
1524 | } else { | ||
1525 | parent = d_alloc_anon(inode); | ||
1526 | if (!parent) { | ||
1527 | parent = ERR_PTR(-ENOMEM); | ||
1527 | iput(inode); | 1528 | iput(inode); |
1528 | parent = ERR_PTR(-EACCES); | ||
1529 | } else { | ||
1530 | parent = d_alloc_anon(inode); | ||
1531 | if (!parent) { | ||
1532 | parent = ERR_PTR(-ENOMEM); | ||
1533 | iput(inode); | ||
1534 | } | ||
1535 | } | 1529 | } |
1536 | } | 1530 | } |
1537 | 1531 | ||
@@ -1562,7 +1556,10 @@ const struct file_operations jfs_dir_operations = { | |||
1562 | .read = generic_read_dir, | 1556 | .read = generic_read_dir, |
1563 | .readdir = jfs_readdir, | 1557 | .readdir = jfs_readdir, |
1564 | .fsync = jfs_fsync, | 1558 | .fsync = jfs_fsync, |
1565 | .ioctl = jfs_ioctl, | 1559 | .unlocked_ioctl = jfs_ioctl, |
1560 | #ifdef CONFIG_COMPAT | ||
1561 | .compat_ioctl = jfs_compat_ioctl, | ||
1562 | #endif | ||
1566 | }; | 1563 | }; |
1567 | 1564 | ||
1568 | static int jfs_ci_hash(struct dentry *dir, struct qstr *this) | 1565 | static int jfs_ci_hash(struct dentry *dir, struct qstr *this) |
diff --git a/fs/jfs/super.c b/fs/jfs/super.c index 70a14001c98f..50ea65451732 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c | |||
@@ -414,7 +414,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent) | |||
414 | struct inode *inode; | 414 | struct inode *inode; |
415 | int rc; | 415 | int rc; |
416 | s64 newLVSize = 0; | 416 | s64 newLVSize = 0; |
417 | int flag; | 417 | int flag, ret = -EINVAL; |
418 | 418 | ||
419 | jfs_info("In jfs_read_super: s_flags=0x%lx", sb->s_flags); | 419 | jfs_info("In jfs_read_super: s_flags=0x%lx", sb->s_flags); |
420 | 420 | ||
@@ -461,8 +461,10 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent) | |||
461 | * Initialize direct-mapping inode/address-space | 461 | * Initialize direct-mapping inode/address-space |
462 | */ | 462 | */ |
463 | inode = new_inode(sb); | 463 | inode = new_inode(sb); |
464 | if (inode == NULL) | 464 | if (inode == NULL) { |
465 | ret = -ENOMEM; | ||
465 | goto out_kfree; | 466 | goto out_kfree; |
467 | } | ||
466 | inode->i_ino = 0; | 468 | inode->i_ino = 0; |
467 | inode->i_nlink = 1; | 469 | inode->i_nlink = 1; |
468 | inode->i_size = sb->s_bdev->bd_inode->i_size; | 470 | inode->i_size = sb->s_bdev->bd_inode->i_size; |
@@ -494,9 +496,11 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent) | |||
494 | 496 | ||
495 | sb->s_magic = JFS_SUPER_MAGIC; | 497 | sb->s_magic = JFS_SUPER_MAGIC; |
496 | 498 | ||
497 | inode = iget(sb, ROOT_I); | 499 | inode = jfs_iget(sb, ROOT_I); |
498 | if (!inode || is_bad_inode(inode)) | 500 | if (IS_ERR(inode)) { |
501 | ret = PTR_ERR(inode); | ||
499 | goto out_no_root; | 502 | goto out_no_root; |
503 | } | ||
500 | sb->s_root = d_alloc_root(inode); | 504 | sb->s_root = d_alloc_root(inode); |
501 | if (!sb->s_root) | 505 | if (!sb->s_root) |
502 | goto out_no_root; | 506 | goto out_no_root; |
@@ -536,7 +540,7 @@ out_kfree: | |||
536 | if (sbi->nls_tab) | 540 | if (sbi->nls_tab) |
537 | unload_nls(sbi->nls_tab); | 541 | unload_nls(sbi->nls_tab); |
538 | kfree(sbi); | 542 | kfree(sbi); |
539 | return -EINVAL; | 543 | return ret; |
540 | } | 544 | } |
541 | 545 | ||
542 | static void jfs_write_super_lockfs(struct super_block *sb) | 546 | static void jfs_write_super_lockfs(struct super_block *sb) |
@@ -726,7 +730,6 @@ out: | |||
726 | static const struct super_operations jfs_super_operations = { | 730 | static const struct super_operations jfs_super_operations = { |
727 | .alloc_inode = jfs_alloc_inode, | 731 | .alloc_inode = jfs_alloc_inode, |
728 | .destroy_inode = jfs_destroy_inode, | 732 | .destroy_inode = jfs_destroy_inode, |
729 | .read_inode = jfs_read_inode, | ||
730 | .dirty_inode = jfs_dirty_inode, | 733 | .dirty_inode = jfs_dirty_inode, |
731 | .write_inode = jfs_write_inode, | 734 | .write_inode = jfs_write_inode, |
732 | .delete_inode = jfs_delete_inode, | 735 | .delete_inode = jfs_delete_inode, |
diff --git a/fs/libfs.c b/fs/libfs.c index 5523bde96387..b004dfadd891 100644 --- a/fs/libfs.c +++ b/fs/libfs.c | |||
@@ -583,8 +583,8 @@ int simple_transaction_release(struct inode *inode, struct file *file) | |||
583 | /* Simple attribute files */ | 583 | /* Simple attribute files */ |
584 | 584 | ||
585 | struct simple_attr { | 585 | struct simple_attr { |
586 | u64 (*get)(void *); | 586 | int (*get)(void *, u64 *); |
587 | void (*set)(void *, u64); | 587 | int (*set)(void *, u64); |
588 | char get_buf[24]; /* enough to store a u64 and "\n\0" */ | 588 | char get_buf[24]; /* enough to store a u64 and "\n\0" */ |
589 | char set_buf[24]; | 589 | char set_buf[24]; |
590 | void *data; | 590 | void *data; |
@@ -595,7 +595,7 @@ struct simple_attr { | |||
595 | /* simple_attr_open is called by an actual attribute open file operation | 595 | /* simple_attr_open is called by an actual attribute open file operation |
596 | * to set the attribute specific access operations. */ | 596 | * to set the attribute specific access operations. */ |
597 | int simple_attr_open(struct inode *inode, struct file *file, | 597 | int simple_attr_open(struct inode *inode, struct file *file, |
598 | u64 (*get)(void *), void (*set)(void *, u64), | 598 | int (*get)(void *, u64 *), int (*set)(void *, u64), |
599 | const char *fmt) | 599 | const char *fmt) |
600 | { | 600 | { |
601 | struct simple_attr *attr; | 601 | struct simple_attr *attr; |
@@ -615,7 +615,7 @@ int simple_attr_open(struct inode *inode, struct file *file, | |||
615 | return nonseekable_open(inode, file); | 615 | return nonseekable_open(inode, file); |
616 | } | 616 | } |
617 | 617 | ||
618 | int simple_attr_close(struct inode *inode, struct file *file) | 618 | int simple_attr_release(struct inode *inode, struct file *file) |
619 | { | 619 | { |
620 | kfree(file->private_data); | 620 | kfree(file->private_data); |
621 | return 0; | 621 | return 0; |
@@ -634,15 +634,24 @@ ssize_t simple_attr_read(struct file *file, char __user *buf, | |||
634 | if (!attr->get) | 634 | if (!attr->get) |
635 | return -EACCES; | 635 | return -EACCES; |
636 | 636 | ||
637 | mutex_lock(&attr->mutex); | 637 | ret = mutex_lock_interruptible(&attr->mutex); |
638 | if (*ppos) /* continued read */ | 638 | if (ret) |
639 | return ret; | ||
640 | |||
641 | if (*ppos) { /* continued read */ | ||
639 | size = strlen(attr->get_buf); | 642 | size = strlen(attr->get_buf); |
640 | else /* first read */ | 643 | } else { /* first read */ |
644 | u64 val; | ||
645 | ret = attr->get(attr->data, &val); | ||
646 | if (ret) | ||
647 | goto out; | ||
648 | |||
641 | size = scnprintf(attr->get_buf, sizeof(attr->get_buf), | 649 | size = scnprintf(attr->get_buf, sizeof(attr->get_buf), |
642 | attr->fmt, | 650 | attr->fmt, (unsigned long long)val); |
643 | (unsigned long long)attr->get(attr->data)); | 651 | } |
644 | 652 | ||
645 | ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size); | 653 | ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size); |
654 | out: | ||
646 | mutex_unlock(&attr->mutex); | 655 | mutex_unlock(&attr->mutex); |
647 | return ret; | 656 | return ret; |
648 | } | 657 | } |
@@ -657,11 +666,13 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf, | |||
657 | ssize_t ret; | 666 | ssize_t ret; |
658 | 667 | ||
659 | attr = file->private_data; | 668 | attr = file->private_data; |
660 | |||
661 | if (!attr->set) | 669 | if (!attr->set) |
662 | return -EACCES; | 670 | return -EACCES; |
663 | 671 | ||
664 | mutex_lock(&attr->mutex); | 672 | ret = mutex_lock_interruptible(&attr->mutex); |
673 | if (ret) | ||
674 | return ret; | ||
675 | |||
665 | ret = -EFAULT; | 676 | ret = -EFAULT; |
666 | size = min(sizeof(attr->set_buf) - 1, len); | 677 | size = min(sizeof(attr->set_buf) - 1, len); |
667 | if (copy_from_user(attr->set_buf, buf, size)) | 678 | if (copy_from_user(attr->set_buf, buf, size)) |
@@ -793,6 +804,6 @@ EXPORT_SYMBOL(simple_transaction_get); | |||
793 | EXPORT_SYMBOL(simple_transaction_read); | 804 | EXPORT_SYMBOL(simple_transaction_read); |
794 | EXPORT_SYMBOL(simple_transaction_release); | 805 | EXPORT_SYMBOL(simple_transaction_release); |
795 | EXPORT_SYMBOL_GPL(simple_attr_open); | 806 | EXPORT_SYMBOL_GPL(simple_attr_open); |
796 | EXPORT_SYMBOL_GPL(simple_attr_close); | 807 | EXPORT_SYMBOL_GPL(simple_attr_release); |
797 | EXPORT_SYMBOL_GPL(simple_attr_read); | 808 | EXPORT_SYMBOL_GPL(simple_attr_read); |
798 | EXPORT_SYMBOL_GPL(simple_attr_write); | 809 | EXPORT_SYMBOL_GPL(simple_attr_write); |
diff --git a/fs/lockd/host.c b/fs/lockd/host.c index ca6b16fc3101..f1ef49fff118 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c | |||
@@ -243,10 +243,18 @@ nlm_bind_host(struct nlm_host *host) | |||
243 | .program = &nlm_program, | 243 | .program = &nlm_program, |
244 | .version = host->h_version, | 244 | .version = host->h_version, |
245 | .authflavor = RPC_AUTH_UNIX, | 245 | .authflavor = RPC_AUTH_UNIX, |
246 | .flags = (RPC_CLNT_CREATE_HARDRTRY | | 246 | .flags = (RPC_CLNT_CREATE_NOPING | |
247 | RPC_CLNT_CREATE_AUTOBIND), | 247 | RPC_CLNT_CREATE_AUTOBIND), |
248 | }; | 248 | }; |
249 | 249 | ||
250 | /* | ||
251 | * lockd retries server side blocks automatically so we want | ||
252 | * those to be soft RPC calls. Client side calls need to be | ||
253 | * hard RPC tasks. | ||
254 | */ | ||
255 | if (!host->h_server) | ||
256 | args.flags |= RPC_CLNT_CREATE_HARDRTRY; | ||
257 | |||
250 | clnt = rpc_create(&args); | 258 | clnt = rpc_create(&args); |
251 | if (!IS_ERR(clnt)) | 259 | if (!IS_ERR(clnt)) |
252 | host->h_rpcclnt = clnt; | 260 | host->h_rpcclnt = clnt; |
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index 2f4d8fa66689..fe9bdb4a220c 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c | |||
@@ -763,11 +763,20 @@ callback: | |||
763 | dprintk("lockd: GRANTing blocked lock.\n"); | 763 | dprintk("lockd: GRANTing blocked lock.\n"); |
764 | block->b_granted = 1; | 764 | block->b_granted = 1; |
765 | 765 | ||
766 | /* Schedule next grant callback in 30 seconds */ | 766 | /* keep block on the list, but don't reattempt until the RPC |
767 | nlmsvc_insert_block(block, 30 * HZ); | 767 | * completes or the submission fails |
768 | */ | ||
769 | nlmsvc_insert_block(block, NLM_NEVER); | ||
770 | |||
771 | /* Call the client -- use a soft RPC task since nlmsvc_retry_blocked | ||
772 | * will queue up a new one if this one times out | ||
773 | */ | ||
774 | error = nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG, | ||
775 | &nlmsvc_grant_ops); | ||
768 | 776 | ||
769 | /* Call the client */ | 777 | /* RPC submission failed, wait a bit and retry */ |
770 | nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG, &nlmsvc_grant_ops); | 778 | if (error < 0) |
779 | nlmsvc_insert_block(block, 10 * HZ); | ||
771 | } | 780 | } |
772 | 781 | ||
773 | /* | 782 | /* |
@@ -786,6 +795,17 @@ static void nlmsvc_grant_callback(struct rpc_task *task, void *data) | |||
786 | 795 | ||
787 | dprintk("lockd: GRANT_MSG RPC callback\n"); | 796 | dprintk("lockd: GRANT_MSG RPC callback\n"); |
788 | 797 | ||
798 | /* if the block is not on a list at this point then it has | ||
799 | * been invalidated. Don't try to requeue it. | ||
800 | * | ||
801 | * FIXME: it's possible that the block is removed from the list | ||
802 | * after this check but before the nlmsvc_insert_block. In that | ||
803 | * case it will be added back. Perhaps we need better locking | ||
804 | * for nlm_blocked? | ||
805 | */ | ||
806 | if (list_empty(&block->b_list)) | ||
807 | return; | ||
808 | |||
789 | /* Technically, we should down the file semaphore here. Since we | 809 | /* Technically, we should down the file semaphore here. Since we |
790 | * move the block towards the head of the queue only, no harm | 810 | * move the block towards the head of the queue only, no harm |
791 | * can be done, though. */ | 811 | * can be done, though. */ |
diff --git a/fs/locks.c b/fs/locks.c index 49354b9c7dc1..f36f0e61558d 100644 --- a/fs/locks.c +++ b/fs/locks.c | |||
@@ -658,8 +658,7 @@ posix_test_lock(struct file *filp, struct file_lock *fl) | |||
658 | if (cfl) { | 658 | if (cfl) { |
659 | __locks_copy_lock(fl, cfl); | 659 | __locks_copy_lock(fl, cfl); |
660 | if (cfl->fl_nspid) | 660 | if (cfl->fl_nspid) |
661 | fl->fl_pid = pid_nr_ns(cfl->fl_nspid, | 661 | fl->fl_pid = pid_vnr(cfl->fl_nspid); |
662 | task_active_pid_ns(current)); | ||
663 | } else | 662 | } else |
664 | fl->fl_type = F_UNLCK; | 663 | fl->fl_type = F_UNLCK; |
665 | unlock_kernel(); | 664 | unlock_kernel(); |
@@ -2084,7 +2083,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl, | |||
2084 | unsigned int fl_pid; | 2083 | unsigned int fl_pid; |
2085 | 2084 | ||
2086 | if (fl->fl_nspid) | 2085 | if (fl->fl_nspid) |
2087 | fl_pid = pid_nr_ns(fl->fl_nspid, task_active_pid_ns(current)); | 2086 | fl_pid = pid_vnr(fl->fl_nspid); |
2088 | else | 2087 | else |
2089 | fl_pid = fl->fl_pid; | 2088 | fl_pid = fl->fl_pid; |
2090 | 2089 | ||
diff --git a/fs/minix/inode.c b/fs/minix/inode.c index bf4cd316af81..84f6242ba6fc 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/highuid.h> | 18 | #include <linux/highuid.h> |
19 | #include <linux/vfs.h> | 19 | #include <linux/vfs.h> |
20 | 20 | ||
21 | static void minix_read_inode(struct inode * inode); | ||
22 | static int minix_write_inode(struct inode * inode, int wait); | 21 | static int minix_write_inode(struct inode * inode, int wait); |
23 | static int minix_statfs(struct dentry *dentry, struct kstatfs *buf); | 22 | static int minix_statfs(struct dentry *dentry, struct kstatfs *buf); |
24 | static int minix_remount (struct super_block * sb, int * flags, char * data); | 23 | static int minix_remount (struct super_block * sb, int * flags, char * data); |
@@ -96,7 +95,6 @@ static void destroy_inodecache(void) | |||
96 | static const struct super_operations minix_sops = { | 95 | static const struct super_operations minix_sops = { |
97 | .alloc_inode = minix_alloc_inode, | 96 | .alloc_inode = minix_alloc_inode, |
98 | .destroy_inode = minix_destroy_inode, | 97 | .destroy_inode = minix_destroy_inode, |
99 | .read_inode = minix_read_inode, | ||
100 | .write_inode = minix_write_inode, | 98 | .write_inode = minix_write_inode, |
101 | .delete_inode = minix_delete_inode, | 99 | .delete_inode = minix_delete_inode, |
102 | .put_super = minix_put_super, | 100 | .put_super = minix_put_super, |
@@ -149,6 +147,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) | |||
149 | unsigned long i, block; | 147 | unsigned long i, block; |
150 | struct inode *root_inode; | 148 | struct inode *root_inode; |
151 | struct minix_sb_info *sbi; | 149 | struct minix_sb_info *sbi; |
150 | int ret = -EINVAL; | ||
152 | 151 | ||
153 | sbi = kzalloc(sizeof(struct minix_sb_info), GFP_KERNEL); | 152 | sbi = kzalloc(sizeof(struct minix_sb_info), GFP_KERNEL); |
154 | if (!sbi) | 153 | if (!sbi) |
@@ -246,10 +245,13 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) | |||
246 | 245 | ||
247 | /* set up enough so that it can read an inode */ | 246 | /* set up enough so that it can read an inode */ |
248 | s->s_op = &minix_sops; | 247 | s->s_op = &minix_sops; |
249 | root_inode = iget(s, MINIX_ROOT_INO); | 248 | root_inode = minix_iget(s, MINIX_ROOT_INO); |
250 | if (!root_inode || is_bad_inode(root_inode)) | 249 | if (IS_ERR(root_inode)) { |
250 | ret = PTR_ERR(root_inode); | ||
251 | goto out_no_root; | 251 | goto out_no_root; |
252 | } | ||
252 | 253 | ||
254 | ret = -ENOMEM; | ||
253 | s->s_root = d_alloc_root(root_inode); | 255 | s->s_root = d_alloc_root(root_inode); |
254 | if (!s->s_root) | 256 | if (!s->s_root) |
255 | goto out_iput; | 257 | goto out_iput; |
@@ -290,6 +292,7 @@ out_freemap: | |||
290 | goto out_release; | 292 | goto out_release; |
291 | 293 | ||
292 | out_no_map: | 294 | out_no_map: |
295 | ret = -ENOMEM; | ||
293 | if (!silent) | 296 | if (!silent) |
294 | printk("MINIX-fs: can't allocate map\n"); | 297 | printk("MINIX-fs: can't allocate map\n"); |
295 | goto out_release; | 298 | goto out_release; |
@@ -316,7 +319,7 @@ out_bad_sb: | |||
316 | out: | 319 | out: |
317 | s->s_fs_info = NULL; | 320 | s->s_fs_info = NULL; |
318 | kfree(sbi); | 321 | kfree(sbi); |
319 | return -EINVAL; | 322 | return ret; |
320 | } | 323 | } |
321 | 324 | ||
322 | static int minix_statfs(struct dentry *dentry, struct kstatfs *buf) | 325 | static int minix_statfs(struct dentry *dentry, struct kstatfs *buf) |
@@ -409,7 +412,7 @@ void minix_set_inode(struct inode *inode, dev_t rdev) | |||
409 | /* | 412 | /* |
410 | * The minix V1 function to read an inode. | 413 | * The minix V1 function to read an inode. |
411 | */ | 414 | */ |
412 | static void V1_minix_read_inode(struct inode * inode) | 415 | static struct inode *V1_minix_iget(struct inode *inode) |
413 | { | 416 | { |
414 | struct buffer_head * bh; | 417 | struct buffer_head * bh; |
415 | struct minix_inode * raw_inode; | 418 | struct minix_inode * raw_inode; |
@@ -418,8 +421,8 @@ static void V1_minix_read_inode(struct inode * inode) | |||
418 | 421 | ||
419 | raw_inode = minix_V1_raw_inode(inode->i_sb, inode->i_ino, &bh); | 422 | raw_inode = minix_V1_raw_inode(inode->i_sb, inode->i_ino, &bh); |
420 | if (!raw_inode) { | 423 | if (!raw_inode) { |
421 | make_bad_inode(inode); | 424 | iget_failed(inode); |
422 | return; | 425 | return ERR_PTR(-EIO); |
423 | } | 426 | } |
424 | inode->i_mode = raw_inode->i_mode; | 427 | inode->i_mode = raw_inode->i_mode; |
425 | inode->i_uid = (uid_t)raw_inode->i_uid; | 428 | inode->i_uid = (uid_t)raw_inode->i_uid; |
@@ -435,12 +438,14 @@ static void V1_minix_read_inode(struct inode * inode) | |||
435 | minix_inode->u.i1_data[i] = raw_inode->i_zone[i]; | 438 | minix_inode->u.i1_data[i] = raw_inode->i_zone[i]; |
436 | minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0])); | 439 | minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0])); |
437 | brelse(bh); | 440 | brelse(bh); |
441 | unlock_new_inode(inode); | ||
442 | return inode; | ||
438 | } | 443 | } |
439 | 444 | ||
440 | /* | 445 | /* |
441 | * The minix V2 function to read an inode. | 446 | * The minix V2 function to read an inode. |
442 | */ | 447 | */ |
443 | static void V2_minix_read_inode(struct inode * inode) | 448 | static struct inode *V2_minix_iget(struct inode *inode) |
444 | { | 449 | { |
445 | struct buffer_head * bh; | 450 | struct buffer_head * bh; |
446 | struct minix2_inode * raw_inode; | 451 | struct minix2_inode * raw_inode; |
@@ -449,8 +454,8 @@ static void V2_minix_read_inode(struct inode * inode) | |||
449 | 454 | ||
450 | raw_inode = minix_V2_raw_inode(inode->i_sb, inode->i_ino, &bh); | 455 | raw_inode = minix_V2_raw_inode(inode->i_sb, inode->i_ino, &bh); |
451 | if (!raw_inode) { | 456 | if (!raw_inode) { |
452 | make_bad_inode(inode); | 457 | iget_failed(inode); |
453 | return; | 458 | return ERR_PTR(-EIO); |
454 | } | 459 | } |
455 | inode->i_mode = raw_inode->i_mode; | 460 | inode->i_mode = raw_inode->i_mode; |
456 | inode->i_uid = (uid_t)raw_inode->i_uid; | 461 | inode->i_uid = (uid_t)raw_inode->i_uid; |
@@ -468,17 +473,27 @@ static void V2_minix_read_inode(struct inode * inode) | |||
468 | minix_inode->u.i2_data[i] = raw_inode->i_zone[i]; | 473 | minix_inode->u.i2_data[i] = raw_inode->i_zone[i]; |
469 | minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0])); | 474 | minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0])); |
470 | brelse(bh); | 475 | brelse(bh); |
476 | unlock_new_inode(inode); | ||
477 | return inode; | ||
471 | } | 478 | } |
472 | 479 | ||
473 | /* | 480 | /* |
474 | * The global function to read an inode. | 481 | * The global function to read an inode. |
475 | */ | 482 | */ |
476 | static void minix_read_inode(struct inode * inode) | 483 | struct inode *minix_iget(struct super_block *sb, unsigned long ino) |
477 | { | 484 | { |
485 | struct inode *inode; | ||
486 | |||
487 | inode = iget_locked(sb, ino); | ||
488 | if (!inode) | ||
489 | return ERR_PTR(-ENOMEM); | ||
490 | if (!(inode->i_state & I_NEW)) | ||
491 | return inode; | ||
492 | |||
478 | if (INODE_VERSION(inode) == MINIX_V1) | 493 | if (INODE_VERSION(inode) == MINIX_V1) |
479 | V1_minix_read_inode(inode); | 494 | return V1_minix_iget(inode); |
480 | else | 495 | else |
481 | V2_minix_read_inode(inode); | 496 | return V2_minix_iget(inode); |
482 | } | 497 | } |
483 | 498 | ||
484 | /* | 499 | /* |
diff --git a/fs/minix/minix.h b/fs/minix/minix.h index ac5d3a75cb0d..326edfe96108 100644 --- a/fs/minix/minix.h +++ b/fs/minix/minix.h | |||
@@ -45,6 +45,7 @@ struct minix_sb_info { | |||
45 | unsigned short s_version; | 45 | unsigned short s_version; |
46 | }; | 46 | }; |
47 | 47 | ||
48 | extern struct inode *minix_iget(struct super_block *, unsigned long); | ||
48 | extern struct minix_inode * minix_V1_raw_inode(struct super_block *, ino_t, struct buffer_head **); | 49 | extern struct minix_inode * minix_V1_raw_inode(struct super_block *, ino_t, struct buffer_head **); |
49 | extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **); | 50 | extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **); |
50 | extern struct inode * minix_new_inode(const struct inode * dir, int * error); | 51 | extern struct inode * minix_new_inode(const struct inode * dir, int * error); |
diff --git a/fs/minix/namei.c b/fs/minix/namei.c index f4aa7a939040..102241bc9c79 100644 --- a/fs/minix/namei.c +++ b/fs/minix/namei.c | |||
@@ -54,10 +54,9 @@ static struct dentry *minix_lookup(struct inode * dir, struct dentry *dentry, st | |||
54 | 54 | ||
55 | ino = minix_inode_by_name(dentry); | 55 | ino = minix_inode_by_name(dentry); |
56 | if (ino) { | 56 | if (ino) { |
57 | inode = iget(dir->i_sb, ino); | 57 | inode = minix_iget(dir->i_sb, ino); |
58 | 58 | if (IS_ERR(inode)) | |
59 | if (!inode) | 59 | return ERR_CAST(inode); |
60 | return ERR_PTR(-EACCES); | ||
61 | } | 60 | } |
62 | d_add(dentry, inode); | 61 | d_add(dentry, inode); |
63 | return NULL; | 62 | return NULL; |
diff --git a/fs/namei.c b/fs/namei.c index 73e2e665817a..941c8e8228c0 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -106,7 +106,7 @@ | |||
106 | * any extra contention... | 106 | * any extra contention... |
107 | */ | 107 | */ |
108 | 108 | ||
109 | static int fastcall link_path_walk(const char *name, struct nameidata *nd); | 109 | static int link_path_walk(const char *name, struct nameidata *nd); |
110 | 110 | ||
111 | /* In order to reduce some races, while at the same time doing additional | 111 | /* In order to reduce some races, while at the same time doing additional |
112 | * checking and hopefully speeding things up, we copy filenames to the | 112 | * checking and hopefully speeding things up, we copy filenames to the |
@@ -231,7 +231,7 @@ int permission(struct inode *inode, int mask, struct nameidata *nd) | |||
231 | struct vfsmount *mnt = NULL; | 231 | struct vfsmount *mnt = NULL; |
232 | 232 | ||
233 | if (nd) | 233 | if (nd) |
234 | mnt = nd->mnt; | 234 | mnt = nd->path.mnt; |
235 | 235 | ||
236 | if (mask & MAY_WRITE) { | 236 | if (mask & MAY_WRITE) { |
237 | umode_t mode = inode->i_mode; | 237 | umode_t mode = inode->i_mode; |
@@ -296,7 +296,7 @@ int permission(struct inode *inode, int mask, struct nameidata *nd) | |||
296 | */ | 296 | */ |
297 | int vfs_permission(struct nameidata *nd, int mask) | 297 | int vfs_permission(struct nameidata *nd, int mask) |
298 | { | 298 | { |
299 | return permission(nd->dentry->d_inode, mask, nd); | 299 | return permission(nd->path.dentry->d_inode, mask, nd); |
300 | } | 300 | } |
301 | 301 | ||
302 | /** | 302 | /** |
@@ -362,21 +362,31 @@ int deny_write_access(struct file * file) | |||
362 | return 0; | 362 | return 0; |
363 | } | 363 | } |
364 | 364 | ||
365 | void path_release(struct nameidata *nd) | 365 | /** |
366 | * path_get - get a reference to a path | ||
367 | * @path: path to get the reference to | ||
368 | * | ||
369 | * Given a path increment the reference count to the dentry and the vfsmount. | ||
370 | */ | ||
371 | void path_get(struct path *path) | ||
366 | { | 372 | { |
367 | dput(nd->dentry); | 373 | mntget(path->mnt); |
368 | mntput(nd->mnt); | 374 | dget(path->dentry); |
369 | } | 375 | } |
376 | EXPORT_SYMBOL(path_get); | ||
370 | 377 | ||
371 | /* | 378 | /** |
372 | * umount() mustn't call path_release()/mntput() as that would clear | 379 | * path_put - put a reference to a path |
373 | * mnt_expiry_mark | 380 | * @path: path to put the reference to |
381 | * | ||
382 | * Given a path decrement the reference count to the dentry and the vfsmount. | ||
374 | */ | 383 | */ |
375 | void path_release_on_umount(struct nameidata *nd) | 384 | void path_put(struct path *path) |
376 | { | 385 | { |
377 | dput(nd->dentry); | 386 | dput(path->dentry); |
378 | mntput_no_expire(nd->mnt); | 387 | mntput(path->mnt); |
379 | } | 388 | } |
389 | EXPORT_SYMBOL(path_put); | ||
380 | 390 | ||
381 | /** | 391 | /** |
382 | * release_open_intent - free up open intent resources | 392 | * release_open_intent - free up open intent resources |
@@ -539,16 +549,16 @@ walk_init_root(const char *name, struct nameidata *nd) | |||
539 | struct fs_struct *fs = current->fs; | 549 | struct fs_struct *fs = current->fs; |
540 | 550 | ||
541 | read_lock(&fs->lock); | 551 | read_lock(&fs->lock); |
542 | if (fs->altroot && !(nd->flags & LOOKUP_NOALT)) { | 552 | if (fs->altroot.dentry && !(nd->flags & LOOKUP_NOALT)) { |
543 | nd->mnt = mntget(fs->altrootmnt); | 553 | nd->path = fs->altroot; |
544 | nd->dentry = dget(fs->altroot); | 554 | path_get(&fs->altroot); |
545 | read_unlock(&fs->lock); | 555 | read_unlock(&fs->lock); |
546 | if (__emul_lookup_dentry(name,nd)) | 556 | if (__emul_lookup_dentry(name,nd)) |
547 | return 0; | 557 | return 0; |
548 | read_lock(&fs->lock); | 558 | read_lock(&fs->lock); |
549 | } | 559 | } |
550 | nd->mnt = mntget(fs->rootmnt); | 560 | nd->path = fs->root; |
551 | nd->dentry = dget(fs->root); | 561 | path_get(&fs->root); |
552 | read_unlock(&fs->lock); | 562 | read_unlock(&fs->lock); |
553 | return 1; | 563 | return 1; |
554 | } | 564 | } |
@@ -561,7 +571,7 @@ static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *l | |||
561 | goto fail; | 571 | goto fail; |
562 | 572 | ||
563 | if (*link == '/') { | 573 | if (*link == '/') { |
564 | path_release(nd); | 574 | path_put(&nd->path); |
565 | if (!walk_init_root(link, nd)) | 575 | if (!walk_init_root(link, nd)) |
566 | /* weird __emul_prefix() stuff did it */ | 576 | /* weird __emul_prefix() stuff did it */ |
567 | goto out; | 577 | goto out; |
@@ -577,31 +587,31 @@ out: | |||
577 | */ | 587 | */ |
578 | name = __getname(); | 588 | name = __getname(); |
579 | if (unlikely(!name)) { | 589 | if (unlikely(!name)) { |
580 | path_release(nd); | 590 | path_put(&nd->path); |
581 | return -ENOMEM; | 591 | return -ENOMEM; |
582 | } | 592 | } |
583 | strcpy(name, nd->last.name); | 593 | strcpy(name, nd->last.name); |
584 | nd->last.name = name; | 594 | nd->last.name = name; |
585 | return 0; | 595 | return 0; |
586 | fail: | 596 | fail: |
587 | path_release(nd); | 597 | path_put(&nd->path); |
588 | return PTR_ERR(link); | 598 | return PTR_ERR(link); |
589 | } | 599 | } |
590 | 600 | ||
591 | static inline void dput_path(struct path *path, struct nameidata *nd) | 601 | static void path_put_conditional(struct path *path, struct nameidata *nd) |
592 | { | 602 | { |
593 | dput(path->dentry); | 603 | dput(path->dentry); |
594 | if (path->mnt != nd->mnt) | 604 | if (path->mnt != nd->path.mnt) |
595 | mntput(path->mnt); | 605 | mntput(path->mnt); |
596 | } | 606 | } |
597 | 607 | ||
598 | static inline void path_to_nameidata(struct path *path, struct nameidata *nd) | 608 | static inline void path_to_nameidata(struct path *path, struct nameidata *nd) |
599 | { | 609 | { |
600 | dput(nd->dentry); | 610 | dput(nd->path.dentry); |
601 | if (nd->mnt != path->mnt) | 611 | if (nd->path.mnt != path->mnt) |
602 | mntput(nd->mnt); | 612 | mntput(nd->path.mnt); |
603 | nd->mnt = path->mnt; | 613 | nd->path.mnt = path->mnt; |
604 | nd->dentry = path->dentry; | 614 | nd->path.dentry = path->dentry; |
605 | } | 615 | } |
606 | 616 | ||
607 | static __always_inline int __do_follow_link(struct path *path, struct nameidata *nd) | 617 | static __always_inline int __do_follow_link(struct path *path, struct nameidata *nd) |
@@ -613,7 +623,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata | |||
613 | touch_atime(path->mnt, dentry); | 623 | touch_atime(path->mnt, dentry); |
614 | nd_set_link(nd, NULL); | 624 | nd_set_link(nd, NULL); |
615 | 625 | ||
616 | if (path->mnt != nd->mnt) { | 626 | if (path->mnt != nd->path.mnt) { |
617 | path_to_nameidata(path, nd); | 627 | path_to_nameidata(path, nd); |
618 | dget(dentry); | 628 | dget(dentry); |
619 | } | 629 | } |
@@ -628,8 +638,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata | |||
628 | if (dentry->d_inode->i_op->put_link) | 638 | if (dentry->d_inode->i_op->put_link) |
629 | dentry->d_inode->i_op->put_link(dentry, nd, cookie); | 639 | dentry->d_inode->i_op->put_link(dentry, nd, cookie); |
630 | } | 640 | } |
631 | dput(dentry); | 641 | path_put(path); |
632 | mntput(path->mnt); | ||
633 | 642 | ||
634 | return error; | 643 | return error; |
635 | } | 644 | } |
@@ -661,8 +670,8 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd) | |||
661 | nd->depth--; | 670 | nd->depth--; |
662 | return err; | 671 | return err; |
663 | loop: | 672 | loop: |
664 | dput_path(path, nd); | 673 | path_put_conditional(path, nd); |
665 | path_release(nd); | 674 | path_put(&nd->path); |
666 | return err; | 675 | return err; |
667 | } | 676 | } |
668 | 677 | ||
@@ -743,37 +752,37 @@ static __always_inline void follow_dotdot(struct nameidata *nd) | |||
743 | 752 | ||
744 | while(1) { | 753 | while(1) { |
745 | struct vfsmount *parent; | 754 | struct vfsmount *parent; |
746 | struct dentry *old = nd->dentry; | 755 | struct dentry *old = nd->path.dentry; |
747 | 756 | ||
748 | read_lock(&fs->lock); | 757 | read_lock(&fs->lock); |
749 | if (nd->dentry == fs->root && | 758 | if (nd->path.dentry == fs->root.dentry && |
750 | nd->mnt == fs->rootmnt) { | 759 | nd->path.mnt == fs->root.mnt) { |
751 | read_unlock(&fs->lock); | 760 | read_unlock(&fs->lock); |
752 | break; | 761 | break; |
753 | } | 762 | } |
754 | read_unlock(&fs->lock); | 763 | read_unlock(&fs->lock); |
755 | spin_lock(&dcache_lock); | 764 | spin_lock(&dcache_lock); |
756 | if (nd->dentry != nd->mnt->mnt_root) { | 765 | if (nd->path.dentry != nd->path.mnt->mnt_root) { |
757 | nd->dentry = dget(nd->dentry->d_parent); | 766 | nd->path.dentry = dget(nd->path.dentry->d_parent); |
758 | spin_unlock(&dcache_lock); | 767 | spin_unlock(&dcache_lock); |
759 | dput(old); | 768 | dput(old); |
760 | break; | 769 | break; |
761 | } | 770 | } |
762 | spin_unlock(&dcache_lock); | 771 | spin_unlock(&dcache_lock); |
763 | spin_lock(&vfsmount_lock); | 772 | spin_lock(&vfsmount_lock); |
764 | parent = nd->mnt->mnt_parent; | 773 | parent = nd->path.mnt->mnt_parent; |
765 | if (parent == nd->mnt) { | 774 | if (parent == nd->path.mnt) { |
766 | spin_unlock(&vfsmount_lock); | 775 | spin_unlock(&vfsmount_lock); |
767 | break; | 776 | break; |
768 | } | 777 | } |
769 | mntget(parent); | 778 | mntget(parent); |
770 | nd->dentry = dget(nd->mnt->mnt_mountpoint); | 779 | nd->path.dentry = dget(nd->path.mnt->mnt_mountpoint); |
771 | spin_unlock(&vfsmount_lock); | 780 | spin_unlock(&vfsmount_lock); |
772 | dput(old); | 781 | dput(old); |
773 | mntput(nd->mnt); | 782 | mntput(nd->path.mnt); |
774 | nd->mnt = parent; | 783 | nd->path.mnt = parent; |
775 | } | 784 | } |
776 | follow_mount(&nd->mnt, &nd->dentry); | 785 | follow_mount(&nd->path.mnt, &nd->path.dentry); |
777 | } | 786 | } |
778 | 787 | ||
779 | /* | 788 | /* |
@@ -784,8 +793,8 @@ static __always_inline void follow_dotdot(struct nameidata *nd) | |||
784 | static int do_lookup(struct nameidata *nd, struct qstr *name, | 793 | static int do_lookup(struct nameidata *nd, struct qstr *name, |
785 | struct path *path) | 794 | struct path *path) |
786 | { | 795 | { |
787 | struct vfsmount *mnt = nd->mnt; | 796 | struct vfsmount *mnt = nd->path.mnt; |
788 | struct dentry *dentry = __d_lookup(nd->dentry, name); | 797 | struct dentry *dentry = __d_lookup(nd->path.dentry, name); |
789 | 798 | ||
790 | if (!dentry) | 799 | if (!dentry) |
791 | goto need_lookup; | 800 | goto need_lookup; |
@@ -798,7 +807,7 @@ done: | |||
798 | return 0; | 807 | return 0; |
799 | 808 | ||
800 | need_lookup: | 809 | need_lookup: |
801 | dentry = real_lookup(nd->dentry, name, nd); | 810 | dentry = real_lookup(nd->path.dentry, name, nd); |
802 | if (IS_ERR(dentry)) | 811 | if (IS_ERR(dentry)) |
803 | goto fail; | 812 | goto fail; |
804 | goto done; | 813 | goto done; |
@@ -823,7 +832,7 @@ fail: | |||
823 | * Returns 0 and nd will have valid dentry and mnt on success. | 832 | * Returns 0 and nd will have valid dentry and mnt on success. |
824 | * Returns error and drops reference to input namei data on failure. | 833 | * Returns error and drops reference to input namei data on failure. |
825 | */ | 834 | */ |
826 | static fastcall int __link_path_walk(const char * name, struct nameidata *nd) | 835 | static int __link_path_walk(const char *name, struct nameidata *nd) |
827 | { | 836 | { |
828 | struct path next; | 837 | struct path next; |
829 | struct inode *inode; | 838 | struct inode *inode; |
@@ -835,7 +844,7 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd) | |||
835 | if (!*name) | 844 | if (!*name) |
836 | goto return_reval; | 845 | goto return_reval; |
837 | 846 | ||
838 | inode = nd->dentry->d_inode; | 847 | inode = nd->path.dentry->d_inode; |
839 | if (nd->depth) | 848 | if (nd->depth) |
840 | lookup_flags = LOOKUP_FOLLOW | (nd->flags & LOOKUP_CONTINUE); | 849 | lookup_flags = LOOKUP_FOLLOW | (nd->flags & LOOKUP_CONTINUE); |
841 | 850 | ||
@@ -883,7 +892,7 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd) | |||
883 | if (this.name[1] != '.') | 892 | if (this.name[1] != '.') |
884 | break; | 893 | break; |
885 | follow_dotdot(nd); | 894 | follow_dotdot(nd); |
886 | inode = nd->dentry->d_inode; | 895 | inode = nd->path.dentry->d_inode; |
887 | /* fallthrough */ | 896 | /* fallthrough */ |
888 | case 1: | 897 | case 1: |
889 | continue; | 898 | continue; |
@@ -892,8 +901,9 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd) | |||
892 | * See if the low-level filesystem might want | 901 | * See if the low-level filesystem might want |
893 | * to use its own hash.. | 902 | * to use its own hash.. |
894 | */ | 903 | */ |
895 | if (nd->dentry->d_op && nd->dentry->d_op->d_hash) { | 904 | if (nd->path.dentry->d_op && nd->path.dentry->d_op->d_hash) { |
896 | err = nd->dentry->d_op->d_hash(nd->dentry, &this); | 905 | err = nd->path.dentry->d_op->d_hash(nd->path.dentry, |
906 | &this); | ||
897 | if (err < 0) | 907 | if (err < 0) |
898 | break; | 908 | break; |
899 | } | 909 | } |
@@ -915,7 +925,7 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd) | |||
915 | if (err) | 925 | if (err) |
916 | goto return_err; | 926 | goto return_err; |
917 | err = -ENOENT; | 927 | err = -ENOENT; |
918 | inode = nd->dentry->d_inode; | 928 | inode = nd->path.dentry->d_inode; |
919 | if (!inode) | 929 | if (!inode) |
920 | break; | 930 | break; |
921 | err = -ENOTDIR; | 931 | err = -ENOTDIR; |
@@ -943,13 +953,14 @@ last_component: | |||
943 | if (this.name[1] != '.') | 953 | if (this.name[1] != '.') |
944 | break; | 954 | break; |
945 | follow_dotdot(nd); | 955 | follow_dotdot(nd); |
946 | inode = nd->dentry->d_inode; | 956 | inode = nd->path.dentry->d_inode; |
947 | /* fallthrough */ | 957 | /* fallthrough */ |
948 | case 1: | 958 | case 1: |
949 | goto return_reval; | 959 | goto return_reval; |
950 | } | 960 | } |
951 | if (nd->dentry->d_op && nd->dentry->d_op->d_hash) { | 961 | if (nd->path.dentry->d_op && nd->path.dentry->d_op->d_hash) { |
952 | err = nd->dentry->d_op->d_hash(nd->dentry, &this); | 962 | err = nd->path.dentry->d_op->d_hash(nd->path.dentry, |
963 | &this); | ||
953 | if (err < 0) | 964 | if (err < 0) |
954 | break; | 965 | break; |
955 | } | 966 | } |
@@ -962,7 +973,7 @@ last_component: | |||
962 | err = do_follow_link(&next, nd); | 973 | err = do_follow_link(&next, nd); |
963 | if (err) | 974 | if (err) |
964 | goto return_err; | 975 | goto return_err; |
965 | inode = nd->dentry->d_inode; | 976 | inode = nd->path.dentry->d_inode; |
966 | } else | 977 | } else |
967 | path_to_nameidata(&next, nd); | 978 | path_to_nameidata(&next, nd); |
968 | err = -ENOENT; | 979 | err = -ENOENT; |
@@ -990,20 +1001,21 @@ return_reval: | |||
990 | * We bypassed the ordinary revalidation routines. | 1001 | * We bypassed the ordinary revalidation routines. |
991 | * We may need to check the cached dentry for staleness. | 1002 | * We may need to check the cached dentry for staleness. |
992 | */ | 1003 | */ |
993 | if (nd->dentry && nd->dentry->d_sb && | 1004 | if (nd->path.dentry && nd->path.dentry->d_sb && |
994 | (nd->dentry->d_sb->s_type->fs_flags & FS_REVAL_DOT)) { | 1005 | (nd->path.dentry->d_sb->s_type->fs_flags & FS_REVAL_DOT)) { |
995 | err = -ESTALE; | 1006 | err = -ESTALE; |
996 | /* Note: we do not d_invalidate() */ | 1007 | /* Note: we do not d_invalidate() */ |
997 | if (!nd->dentry->d_op->d_revalidate(nd->dentry, nd)) | 1008 | if (!nd->path.dentry->d_op->d_revalidate( |
1009 | nd->path.dentry, nd)) | ||
998 | break; | 1010 | break; |
999 | } | 1011 | } |
1000 | return_base: | 1012 | return_base: |
1001 | return 0; | 1013 | return 0; |
1002 | out_dput: | 1014 | out_dput: |
1003 | dput_path(&next, nd); | 1015 | path_put_conditional(&next, nd); |
1004 | break; | 1016 | break; |
1005 | } | 1017 | } |
1006 | path_release(nd); | 1018 | path_put(&nd->path); |
1007 | return_err: | 1019 | return_err: |
1008 | return err; | 1020 | return err; |
1009 | } | 1021 | } |
@@ -1015,31 +1027,30 @@ return_err: | |||
1015 | * Retry the whole path once, forcing real lookup requests | 1027 | * Retry the whole path once, forcing real lookup requests |
1016 | * instead of relying on the dcache. | 1028 | * instead of relying on the dcache. |
1017 | */ | 1029 | */ |
1018 | static int fastcall link_path_walk(const char *name, struct nameidata *nd) | 1030 | static int link_path_walk(const char *name, struct nameidata *nd) |
1019 | { | 1031 | { |
1020 | struct nameidata save = *nd; | 1032 | struct nameidata save = *nd; |
1021 | int result; | 1033 | int result; |
1022 | 1034 | ||
1023 | /* make sure the stuff we saved doesn't go away */ | 1035 | /* make sure the stuff we saved doesn't go away */ |
1024 | dget(save.dentry); | 1036 | dget(save.path.dentry); |
1025 | mntget(save.mnt); | 1037 | mntget(save.path.mnt); |
1026 | 1038 | ||
1027 | result = __link_path_walk(name, nd); | 1039 | result = __link_path_walk(name, nd); |
1028 | if (result == -ESTALE) { | 1040 | if (result == -ESTALE) { |
1029 | *nd = save; | 1041 | *nd = save; |
1030 | dget(nd->dentry); | 1042 | dget(nd->path.dentry); |
1031 | mntget(nd->mnt); | 1043 | mntget(nd->path.mnt); |
1032 | nd->flags |= LOOKUP_REVAL; | 1044 | nd->flags |= LOOKUP_REVAL; |
1033 | result = __link_path_walk(name, nd); | 1045 | result = __link_path_walk(name, nd); |
1034 | } | 1046 | } |
1035 | 1047 | ||
1036 | dput(save.dentry); | 1048 | path_put(&save.path); |
1037 | mntput(save.mnt); | ||
1038 | 1049 | ||
1039 | return result; | 1050 | return result; |
1040 | } | 1051 | } |
1041 | 1052 | ||
1042 | static int fastcall path_walk(const char * name, struct nameidata *nd) | 1053 | static int path_walk(const char *name, struct nameidata *nd) |
1043 | { | 1054 | { |
1044 | current->total_link_count = 0; | 1055 | current->total_link_count = 0; |
1045 | return link_path_walk(name, nd); | 1056 | return link_path_walk(name, nd); |
@@ -1054,9 +1065,9 @@ static int __emul_lookup_dentry(const char *name, struct nameidata *nd) | |||
1054 | if (path_walk(name, nd)) | 1065 | if (path_walk(name, nd)) |
1055 | return 0; /* something went wrong... */ | 1066 | return 0; /* something went wrong... */ |
1056 | 1067 | ||
1057 | if (!nd->dentry->d_inode || S_ISDIR(nd->dentry->d_inode->i_mode)) { | 1068 | if (!nd->path.dentry->d_inode || |
1058 | struct dentry *old_dentry = nd->dentry; | 1069 | S_ISDIR(nd->path.dentry->d_inode->i_mode)) { |
1059 | struct vfsmount *old_mnt = nd->mnt; | 1070 | struct path old_path = nd->path; |
1060 | struct qstr last = nd->last; | 1071 | struct qstr last = nd->last; |
1061 | int last_type = nd->last_type; | 1072 | int last_type = nd->last_type; |
1062 | struct fs_struct *fs = current->fs; | 1073 | struct fs_struct *fs = current->fs; |
@@ -1067,19 +1078,17 @@ static int __emul_lookup_dentry(const char *name, struct nameidata *nd) | |||
1067 | */ | 1078 | */ |
1068 | nd->last_type = LAST_ROOT; | 1079 | nd->last_type = LAST_ROOT; |
1069 | read_lock(&fs->lock); | 1080 | read_lock(&fs->lock); |
1070 | nd->mnt = mntget(fs->rootmnt); | 1081 | nd->path = fs->root; |
1071 | nd->dentry = dget(fs->root); | 1082 | path_get(&fs->root); |
1072 | read_unlock(&fs->lock); | 1083 | read_unlock(&fs->lock); |
1073 | if (path_walk(name, nd) == 0) { | 1084 | if (path_walk(name, nd) == 0) { |
1074 | if (nd->dentry->d_inode) { | 1085 | if (nd->path.dentry->d_inode) { |
1075 | dput(old_dentry); | 1086 | path_put(&old_path); |
1076 | mntput(old_mnt); | ||
1077 | return 1; | 1087 | return 1; |
1078 | } | 1088 | } |
1079 | path_release(nd); | 1089 | path_put(&nd->path); |
1080 | } | 1090 | } |
1081 | nd->dentry = old_dentry; | 1091 | nd->path = old_path; |
1082 | nd->mnt = old_mnt; | ||
1083 | nd->last = last; | 1092 | nd->last = last; |
1084 | nd->last_type = last_type; | 1093 | nd->last_type = last_type; |
1085 | } | 1094 | } |
@@ -1090,33 +1099,26 @@ void set_fs_altroot(void) | |||
1090 | { | 1099 | { |
1091 | char *emul = __emul_prefix(); | 1100 | char *emul = __emul_prefix(); |
1092 | struct nameidata nd; | 1101 | struct nameidata nd; |
1093 | struct vfsmount *mnt = NULL, *oldmnt; | 1102 | struct path path = {}, old_path; |
1094 | struct dentry *dentry = NULL, *olddentry; | ||
1095 | int err; | 1103 | int err; |
1096 | struct fs_struct *fs = current->fs; | 1104 | struct fs_struct *fs = current->fs; |
1097 | 1105 | ||
1098 | if (!emul) | 1106 | if (!emul) |
1099 | goto set_it; | 1107 | goto set_it; |
1100 | err = path_lookup(emul, LOOKUP_FOLLOW|LOOKUP_DIRECTORY|LOOKUP_NOALT, &nd); | 1108 | err = path_lookup(emul, LOOKUP_FOLLOW|LOOKUP_DIRECTORY|LOOKUP_NOALT, &nd); |
1101 | if (!err) { | 1109 | if (!err) |
1102 | mnt = nd.mnt; | 1110 | path = nd.path; |
1103 | dentry = nd.dentry; | ||
1104 | } | ||
1105 | set_it: | 1111 | set_it: |
1106 | write_lock(&fs->lock); | 1112 | write_lock(&fs->lock); |
1107 | oldmnt = fs->altrootmnt; | 1113 | old_path = fs->altroot; |
1108 | olddentry = fs->altroot; | 1114 | fs->altroot = path; |
1109 | fs->altrootmnt = mnt; | ||
1110 | fs->altroot = dentry; | ||
1111 | write_unlock(&fs->lock); | 1115 | write_unlock(&fs->lock); |
1112 | if (olddentry) { | 1116 | if (old_path.dentry) |
1113 | dput(olddentry); | 1117 | path_put(&old_path); |
1114 | mntput(oldmnt); | ||
1115 | } | ||
1116 | } | 1118 | } |
1117 | 1119 | ||
1118 | /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */ | 1120 | /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */ |
1119 | static int fastcall do_path_lookup(int dfd, const char *name, | 1121 | static int do_path_lookup(int dfd, const char *name, |
1120 | unsigned int flags, struct nameidata *nd) | 1122 | unsigned int flags, struct nameidata *nd) |
1121 | { | 1123 | { |
1122 | int retval = 0; | 1124 | int retval = 0; |
@@ -1130,21 +1132,21 @@ static int fastcall do_path_lookup(int dfd, const char *name, | |||
1130 | 1132 | ||
1131 | if (*name=='/') { | 1133 | if (*name=='/') { |
1132 | read_lock(&fs->lock); | 1134 | read_lock(&fs->lock); |
1133 | if (fs->altroot && !(nd->flags & LOOKUP_NOALT)) { | 1135 | if (fs->altroot.dentry && !(nd->flags & LOOKUP_NOALT)) { |
1134 | nd->mnt = mntget(fs->altrootmnt); | 1136 | nd->path = fs->altroot; |
1135 | nd->dentry = dget(fs->altroot); | 1137 | path_get(&fs->altroot); |
1136 | read_unlock(&fs->lock); | 1138 | read_unlock(&fs->lock); |
1137 | if (__emul_lookup_dentry(name,nd)) | 1139 | if (__emul_lookup_dentry(name,nd)) |
1138 | goto out; /* found in altroot */ | 1140 | goto out; /* found in altroot */ |
1139 | read_lock(&fs->lock); | 1141 | read_lock(&fs->lock); |
1140 | } | 1142 | } |
1141 | nd->mnt = mntget(fs->rootmnt); | 1143 | nd->path = fs->root; |
1142 | nd->dentry = dget(fs->root); | 1144 | path_get(&fs->root); |
1143 | read_unlock(&fs->lock); | 1145 | read_unlock(&fs->lock); |
1144 | } else if (dfd == AT_FDCWD) { | 1146 | } else if (dfd == AT_FDCWD) { |
1145 | read_lock(&fs->lock); | 1147 | read_lock(&fs->lock); |
1146 | nd->mnt = mntget(fs->pwdmnt); | 1148 | nd->path = fs->pwd; |
1147 | nd->dentry = dget(fs->pwd); | 1149 | path_get(&fs->pwd); |
1148 | read_unlock(&fs->lock); | 1150 | read_unlock(&fs->lock); |
1149 | } else { | 1151 | } else { |
1150 | struct dentry *dentry; | 1152 | struct dentry *dentry; |
@@ -1164,17 +1166,17 @@ static int fastcall do_path_lookup(int dfd, const char *name, | |||
1164 | if (retval) | 1166 | if (retval) |
1165 | goto fput_fail; | 1167 | goto fput_fail; |
1166 | 1168 | ||
1167 | nd->mnt = mntget(file->f_path.mnt); | 1169 | nd->path = file->f_path; |
1168 | nd->dentry = dget(dentry); | 1170 | path_get(&file->f_path); |
1169 | 1171 | ||
1170 | fput_light(file, fput_needed); | 1172 | fput_light(file, fput_needed); |
1171 | } | 1173 | } |
1172 | 1174 | ||
1173 | retval = path_walk(name, nd); | 1175 | retval = path_walk(name, nd); |
1174 | out: | 1176 | out: |
1175 | if (unlikely(!retval && !audit_dummy_context() && nd->dentry && | 1177 | if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry && |
1176 | nd->dentry->d_inode)) | 1178 | nd->path.dentry->d_inode)) |
1177 | audit_inode(name, nd->dentry); | 1179 | audit_inode(name, nd->path.dentry); |
1178 | out_fail: | 1180 | out_fail: |
1179 | return retval; | 1181 | return retval; |
1180 | 1182 | ||
@@ -1183,7 +1185,7 @@ fput_fail: | |||
1183 | goto out_fail; | 1185 | goto out_fail; |
1184 | } | 1186 | } |
1185 | 1187 | ||
1186 | int fastcall path_lookup(const char *name, unsigned int flags, | 1188 | int path_lookup(const char *name, unsigned int flags, |
1187 | struct nameidata *nd) | 1189 | struct nameidata *nd) |
1188 | { | 1190 | { |
1189 | return do_path_lookup(AT_FDCWD, name, flags, nd); | 1191 | return do_path_lookup(AT_FDCWD, name, flags, nd); |
@@ -1208,13 +1210,13 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt, | |||
1208 | nd->flags = flags; | 1210 | nd->flags = flags; |
1209 | nd->depth = 0; | 1211 | nd->depth = 0; |
1210 | 1212 | ||
1211 | nd->mnt = mntget(mnt); | 1213 | nd->path.mnt = mntget(mnt); |
1212 | nd->dentry = dget(dentry); | 1214 | nd->path.dentry = dget(dentry); |
1213 | 1215 | ||
1214 | retval = path_walk(name, nd); | 1216 | retval = path_walk(name, nd); |
1215 | if (unlikely(!retval && !audit_dummy_context() && nd->dentry && | 1217 | if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry && |
1216 | nd->dentry->d_inode)) | 1218 | nd->path.dentry->d_inode)) |
1217 | audit_inode(name, nd->dentry); | 1219 | audit_inode(name, nd->path.dentry); |
1218 | 1220 | ||
1219 | return retval; | 1221 | return retval; |
1220 | 1222 | ||
@@ -1236,7 +1238,7 @@ static int __path_lookup_intent_open(int dfd, const char *name, | |||
1236 | if (IS_ERR(nd->intent.open.file)) { | 1238 | if (IS_ERR(nd->intent.open.file)) { |
1237 | if (err == 0) { | 1239 | if (err == 0) { |
1238 | err = PTR_ERR(nd->intent.open.file); | 1240 | err = PTR_ERR(nd->intent.open.file); |
1239 | path_release(nd); | 1241 | path_put(&nd->path); |
1240 | } | 1242 | } |
1241 | } else if (err != 0) | 1243 | } else if (err != 0) |
1242 | release_open_intent(nd); | 1244 | release_open_intent(nd); |
@@ -1333,10 +1335,10 @@ static struct dentry *lookup_hash(struct nameidata *nd) | |||
1333 | { | 1335 | { |
1334 | int err; | 1336 | int err; |
1335 | 1337 | ||
1336 | err = permission(nd->dentry->d_inode, MAY_EXEC, nd); | 1338 | err = permission(nd->path.dentry->d_inode, MAY_EXEC, nd); |
1337 | if (err) | 1339 | if (err) |
1338 | return ERR_PTR(err); | 1340 | return ERR_PTR(err); |
1339 | return __lookup_hash(&nd->last, nd->dentry, nd); | 1341 | return __lookup_hash(&nd->last, nd->path.dentry, nd); |
1340 | } | 1342 | } |
1341 | 1343 | ||
1342 | static int __lookup_one_len(const char *name, struct qstr *this, | 1344 | static int __lookup_one_len(const char *name, struct qstr *this, |
@@ -1409,7 +1411,7 @@ struct dentry *lookup_one_noperm(const char *name, struct dentry *base) | |||
1409 | return __lookup_hash(&this, base, NULL); | 1411 | return __lookup_hash(&this, base, NULL); |
1410 | } | 1412 | } |
1411 | 1413 | ||
1412 | int fastcall __user_walk_fd(int dfd, const char __user *name, unsigned flags, | 1414 | int __user_walk_fd(int dfd, const char __user *name, unsigned flags, |
1413 | struct nameidata *nd) | 1415 | struct nameidata *nd) |
1414 | { | 1416 | { |
1415 | char *tmp = getname(name); | 1417 | char *tmp = getname(name); |
@@ -1422,7 +1424,7 @@ int fastcall __user_walk_fd(int dfd, const char __user *name, unsigned flags, | |||
1422 | return err; | 1424 | return err; |
1423 | } | 1425 | } |
1424 | 1426 | ||
1425 | int fastcall __user_walk(const char __user *name, unsigned flags, struct nameidata *nd) | 1427 | int __user_walk(const char __user *name, unsigned flags, struct nameidata *nd) |
1426 | { | 1428 | { |
1427 | return __user_walk_fd(AT_FDCWD, name, flags, nd); | 1429 | return __user_walk_fd(AT_FDCWD, name, flags, nd); |
1428 | } | 1430 | } |
@@ -1595,7 +1597,7 @@ int vfs_create(struct inode *dir, struct dentry *dentry, int mode, | |||
1595 | 1597 | ||
1596 | int may_open(struct nameidata *nd, int acc_mode, int flag) | 1598 | int may_open(struct nameidata *nd, int acc_mode, int flag) |
1597 | { | 1599 | { |
1598 | struct dentry *dentry = nd->dentry; | 1600 | struct dentry *dentry = nd->path.dentry; |
1599 | struct inode *inode = dentry->d_inode; | 1601 | struct inode *inode = dentry->d_inode; |
1600 | int error; | 1602 | int error; |
1601 | 1603 | ||
@@ -1616,7 +1618,7 @@ int may_open(struct nameidata *nd, int acc_mode, int flag) | |||
1616 | if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { | 1618 | if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { |
1617 | flag &= ~O_TRUNC; | 1619 | flag &= ~O_TRUNC; |
1618 | } else if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) { | 1620 | } else if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) { |
1619 | if (nd->mnt->mnt_flags & MNT_NODEV) | 1621 | if (nd->path.mnt->mnt_flags & MNT_NODEV) |
1620 | return -EACCES; | 1622 | return -EACCES; |
1621 | 1623 | ||
1622 | flag &= ~O_TRUNC; | 1624 | flag &= ~O_TRUNC; |
@@ -1678,14 +1680,14 @@ static int open_namei_create(struct nameidata *nd, struct path *path, | |||
1678 | int flag, int mode) | 1680 | int flag, int mode) |
1679 | { | 1681 | { |
1680 | int error; | 1682 | int error; |
1681 | struct dentry *dir = nd->dentry; | 1683 | struct dentry *dir = nd->path.dentry; |
1682 | 1684 | ||
1683 | if (!IS_POSIXACL(dir->d_inode)) | 1685 | if (!IS_POSIXACL(dir->d_inode)) |
1684 | mode &= ~current->fs->umask; | 1686 | mode &= ~current->fs->umask; |
1685 | error = vfs_create(dir->d_inode, path->dentry, mode, nd); | 1687 | error = vfs_create(dir->d_inode, path->dentry, mode, nd); |
1686 | mutex_unlock(&dir->d_inode->i_mutex); | 1688 | mutex_unlock(&dir->d_inode->i_mutex); |
1687 | dput(nd->dentry); | 1689 | dput(nd->path.dentry); |
1688 | nd->dentry = path->dentry; | 1690 | nd->path.dentry = path->dentry; |
1689 | if (error) | 1691 | if (error) |
1690 | return error; | 1692 | return error; |
1691 | /* Don't check for write permission, don't truncate */ | 1693 | /* Don't check for write permission, don't truncate */ |
@@ -1752,11 +1754,11 @@ int open_namei(int dfd, const char *pathname, int flag, | |||
1752 | if (nd->last_type != LAST_NORM || nd->last.name[nd->last.len]) | 1754 | if (nd->last_type != LAST_NORM || nd->last.name[nd->last.len]) |
1753 | goto exit; | 1755 | goto exit; |
1754 | 1756 | ||
1755 | dir = nd->dentry; | 1757 | dir = nd->path.dentry; |
1756 | nd->flags &= ~LOOKUP_PARENT; | 1758 | nd->flags &= ~LOOKUP_PARENT; |
1757 | mutex_lock(&dir->d_inode->i_mutex); | 1759 | mutex_lock(&dir->d_inode->i_mutex); |
1758 | path.dentry = lookup_hash(nd); | 1760 | path.dentry = lookup_hash(nd); |
1759 | path.mnt = nd->mnt; | 1761 | path.mnt = nd->path.mnt; |
1760 | 1762 | ||
1761 | do_last: | 1763 | do_last: |
1762 | error = PTR_ERR(path.dentry); | 1764 | error = PTR_ERR(path.dentry); |
@@ -1812,11 +1814,11 @@ ok: | |||
1812 | return 0; | 1814 | return 0; |
1813 | 1815 | ||
1814 | exit_dput: | 1816 | exit_dput: |
1815 | dput_path(&path, nd); | 1817 | path_put_conditional(&path, nd); |
1816 | exit: | 1818 | exit: |
1817 | if (!IS_ERR(nd->intent.open.file)) | 1819 | if (!IS_ERR(nd->intent.open.file)) |
1818 | release_open_intent(nd); | 1820 | release_open_intent(nd); |
1819 | path_release(nd); | 1821 | path_put(&nd->path); |
1820 | return error; | 1822 | return error; |
1821 | 1823 | ||
1822 | do_link: | 1824 | do_link: |
@@ -1861,10 +1863,10 @@ do_link: | |||
1861 | __putname(nd->last.name); | 1863 | __putname(nd->last.name); |
1862 | goto exit; | 1864 | goto exit; |
1863 | } | 1865 | } |
1864 | dir = nd->dentry; | 1866 | dir = nd->path.dentry; |
1865 | mutex_lock(&dir->d_inode->i_mutex); | 1867 | mutex_lock(&dir->d_inode->i_mutex); |
1866 | path.dentry = lookup_hash(nd); | 1868 | path.dentry = lookup_hash(nd); |
1867 | path.mnt = nd->mnt; | 1869 | path.mnt = nd->path.mnt; |
1868 | __putname(nd->last.name); | 1870 | __putname(nd->last.name); |
1869 | goto do_last; | 1871 | goto do_last; |
1870 | } | 1872 | } |
@@ -1877,13 +1879,13 @@ do_link: | |||
1877 | * Simple function to lookup and return a dentry and create it | 1879 | * Simple function to lookup and return a dentry and create it |
1878 | * if it doesn't exist. Is SMP-safe. | 1880 | * if it doesn't exist. Is SMP-safe. |
1879 | * | 1881 | * |
1880 | * Returns with nd->dentry->d_inode->i_mutex locked. | 1882 | * Returns with nd->path.dentry->d_inode->i_mutex locked. |
1881 | */ | 1883 | */ |
1882 | struct dentry *lookup_create(struct nameidata *nd, int is_dir) | 1884 | struct dentry *lookup_create(struct nameidata *nd, int is_dir) |
1883 | { | 1885 | { |
1884 | struct dentry *dentry = ERR_PTR(-EEXIST); | 1886 | struct dentry *dentry = ERR_PTR(-EEXIST); |
1885 | 1887 | ||
1886 | mutex_lock_nested(&nd->dentry->d_inode->i_mutex, I_MUTEX_PARENT); | 1888 | mutex_lock_nested(&nd->path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); |
1887 | /* | 1889 | /* |
1888 | * Yucky last component or no last component at all? | 1890 | * Yucky last component or no last component at all? |
1889 | * (foo/., foo/.., /////) | 1891 | * (foo/., foo/.., /////) |
@@ -1962,19 +1964,19 @@ asmlinkage long sys_mknodat(int dfd, const char __user *filename, int mode, | |||
1962 | dentry = lookup_create(&nd, 0); | 1964 | dentry = lookup_create(&nd, 0); |
1963 | error = PTR_ERR(dentry); | 1965 | error = PTR_ERR(dentry); |
1964 | 1966 | ||
1965 | if (!IS_POSIXACL(nd.dentry->d_inode)) | 1967 | if (!IS_POSIXACL(nd.path.dentry->d_inode)) |
1966 | mode &= ~current->fs->umask; | 1968 | mode &= ~current->fs->umask; |
1967 | if (!IS_ERR(dentry)) { | 1969 | if (!IS_ERR(dentry)) { |
1968 | switch (mode & S_IFMT) { | 1970 | switch (mode & S_IFMT) { |
1969 | case 0: case S_IFREG: | 1971 | case 0: case S_IFREG: |
1970 | error = vfs_create(nd.dentry->d_inode,dentry,mode,&nd); | 1972 | error = vfs_create(nd.path.dentry->d_inode,dentry,mode,&nd); |
1971 | break; | 1973 | break; |
1972 | case S_IFCHR: case S_IFBLK: | 1974 | case S_IFCHR: case S_IFBLK: |
1973 | error = vfs_mknod(nd.dentry->d_inode,dentry,mode, | 1975 | error = vfs_mknod(nd.path.dentry->d_inode,dentry,mode, |
1974 | new_decode_dev(dev)); | 1976 | new_decode_dev(dev)); |
1975 | break; | 1977 | break; |
1976 | case S_IFIFO: case S_IFSOCK: | 1978 | case S_IFIFO: case S_IFSOCK: |
1977 | error = vfs_mknod(nd.dentry->d_inode,dentry,mode,0); | 1979 | error = vfs_mknod(nd.path.dentry->d_inode,dentry,mode,0); |
1978 | break; | 1980 | break; |
1979 | case S_IFDIR: | 1981 | case S_IFDIR: |
1980 | error = -EPERM; | 1982 | error = -EPERM; |
@@ -1984,8 +1986,8 @@ asmlinkage long sys_mknodat(int dfd, const char __user *filename, int mode, | |||
1984 | } | 1986 | } |
1985 | dput(dentry); | 1987 | dput(dentry); |
1986 | } | 1988 | } |
1987 | mutex_unlock(&nd.dentry->d_inode->i_mutex); | 1989 | mutex_unlock(&nd.path.dentry->d_inode->i_mutex); |
1988 | path_release(&nd); | 1990 | path_put(&nd.path); |
1989 | out: | 1991 | out: |
1990 | putname(tmp); | 1992 | putname(tmp); |
1991 | 1993 | ||
@@ -2039,13 +2041,13 @@ asmlinkage long sys_mkdirat(int dfd, const char __user *pathname, int mode) | |||
2039 | if (IS_ERR(dentry)) | 2041 | if (IS_ERR(dentry)) |
2040 | goto out_unlock; | 2042 | goto out_unlock; |
2041 | 2043 | ||
2042 | if (!IS_POSIXACL(nd.dentry->d_inode)) | 2044 | if (!IS_POSIXACL(nd.path.dentry->d_inode)) |
2043 | mode &= ~current->fs->umask; | 2045 | mode &= ~current->fs->umask; |
2044 | error = vfs_mkdir(nd.dentry->d_inode, dentry, mode); | 2046 | error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode); |
2045 | dput(dentry); | 2047 | dput(dentry); |
2046 | out_unlock: | 2048 | out_unlock: |
2047 | mutex_unlock(&nd.dentry->d_inode->i_mutex); | 2049 | mutex_unlock(&nd.path.dentry->d_inode->i_mutex); |
2048 | path_release(&nd); | 2050 | path_put(&nd.path); |
2049 | out: | 2051 | out: |
2050 | putname(tmp); | 2052 | putname(tmp); |
2051 | out_err: | 2053 | out_err: |
@@ -2143,17 +2145,17 @@ static long do_rmdir(int dfd, const char __user *pathname) | |||
2143 | error = -EBUSY; | 2145 | error = -EBUSY; |
2144 | goto exit1; | 2146 | goto exit1; |
2145 | } | 2147 | } |
2146 | mutex_lock_nested(&nd.dentry->d_inode->i_mutex, I_MUTEX_PARENT); | 2148 | mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); |
2147 | dentry = lookup_hash(&nd); | 2149 | dentry = lookup_hash(&nd); |
2148 | error = PTR_ERR(dentry); | 2150 | error = PTR_ERR(dentry); |
2149 | if (IS_ERR(dentry)) | 2151 | if (IS_ERR(dentry)) |
2150 | goto exit2; | 2152 | goto exit2; |
2151 | error = vfs_rmdir(nd.dentry->d_inode, dentry); | 2153 | error = vfs_rmdir(nd.path.dentry->d_inode, dentry); |
2152 | dput(dentry); | 2154 | dput(dentry); |
2153 | exit2: | 2155 | exit2: |
2154 | mutex_unlock(&nd.dentry->d_inode->i_mutex); | 2156 | mutex_unlock(&nd.path.dentry->d_inode->i_mutex); |
2155 | exit1: | 2157 | exit1: |
2156 | path_release(&nd); | 2158 | path_put(&nd.path); |
2157 | exit: | 2159 | exit: |
2158 | putname(name); | 2160 | putname(name); |
2159 | return error; | 2161 | return error; |
@@ -2188,6 +2190,7 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry) | |||
2188 | 2190 | ||
2189 | /* We don't d_delete() NFS sillyrenamed files--they still exist. */ | 2191 | /* We don't d_delete() NFS sillyrenamed files--they still exist. */ |
2190 | if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) { | 2192 | if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) { |
2193 | fsnotify_link_count(dentry->d_inode); | ||
2191 | d_delete(dentry); | 2194 | d_delete(dentry); |
2192 | } | 2195 | } |
2193 | 2196 | ||
@@ -2218,7 +2221,7 @@ static long do_unlinkat(int dfd, const char __user *pathname) | |||
2218 | error = -EISDIR; | 2221 | error = -EISDIR; |
2219 | if (nd.last_type != LAST_NORM) | 2222 | if (nd.last_type != LAST_NORM) |
2220 | goto exit1; | 2223 | goto exit1; |
2221 | mutex_lock_nested(&nd.dentry->d_inode->i_mutex, I_MUTEX_PARENT); | 2224 | mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); |
2222 | dentry = lookup_hash(&nd); | 2225 | dentry = lookup_hash(&nd); |
2223 | error = PTR_ERR(dentry); | 2226 | error = PTR_ERR(dentry); |
2224 | if (!IS_ERR(dentry)) { | 2227 | if (!IS_ERR(dentry)) { |
@@ -2228,15 +2231,15 @@ static long do_unlinkat(int dfd, const char __user *pathname) | |||
2228 | inode = dentry->d_inode; | 2231 | inode = dentry->d_inode; |
2229 | if (inode) | 2232 | if (inode) |
2230 | atomic_inc(&inode->i_count); | 2233 | atomic_inc(&inode->i_count); |
2231 | error = vfs_unlink(nd.dentry->d_inode, dentry); | 2234 | error = vfs_unlink(nd.path.dentry->d_inode, dentry); |
2232 | exit2: | 2235 | exit2: |
2233 | dput(dentry); | 2236 | dput(dentry); |
2234 | } | 2237 | } |
2235 | mutex_unlock(&nd.dentry->d_inode->i_mutex); | 2238 | mutex_unlock(&nd.path.dentry->d_inode->i_mutex); |
2236 | if (inode) | 2239 | if (inode) |
2237 | iput(inode); /* truncate the inode here */ | 2240 | iput(inode); /* truncate the inode here */ |
2238 | exit1: | 2241 | exit1: |
2239 | path_release(&nd); | 2242 | path_put(&nd.path); |
2240 | exit: | 2243 | exit: |
2241 | putname(name); | 2244 | putname(name); |
2242 | return error; | 2245 | return error; |
@@ -2309,11 +2312,11 @@ asmlinkage long sys_symlinkat(const char __user *oldname, | |||
2309 | if (IS_ERR(dentry)) | 2312 | if (IS_ERR(dentry)) |
2310 | goto out_unlock; | 2313 | goto out_unlock; |
2311 | 2314 | ||
2312 | error = vfs_symlink(nd.dentry->d_inode, dentry, from, S_IALLUGO); | 2315 | error = vfs_symlink(nd.path.dentry->d_inode, dentry, from, S_IALLUGO); |
2313 | dput(dentry); | 2316 | dput(dentry); |
2314 | out_unlock: | 2317 | out_unlock: |
2315 | mutex_unlock(&nd.dentry->d_inode->i_mutex); | 2318 | mutex_unlock(&nd.path.dentry->d_inode->i_mutex); |
2316 | path_release(&nd); | 2319 | path_put(&nd.path); |
2317 | out: | 2320 | out: |
2318 | putname(to); | 2321 | putname(to); |
2319 | out_putname: | 2322 | out_putname: |
@@ -2360,7 +2363,7 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de | |||
2360 | error = dir->i_op->link(old_dentry, dir, new_dentry); | 2363 | error = dir->i_op->link(old_dentry, dir, new_dentry); |
2361 | mutex_unlock(&old_dentry->d_inode->i_mutex); | 2364 | mutex_unlock(&old_dentry->d_inode->i_mutex); |
2362 | if (!error) | 2365 | if (!error) |
2363 | fsnotify_create(dir, new_dentry); | 2366 | fsnotify_link(dir, old_dentry->d_inode, new_dentry); |
2364 | return error; | 2367 | return error; |
2365 | } | 2368 | } |
2366 | 2369 | ||
@@ -2398,20 +2401,20 @@ asmlinkage long sys_linkat(int olddfd, const char __user *oldname, | |||
2398 | if (error) | 2401 | if (error) |
2399 | goto out; | 2402 | goto out; |
2400 | error = -EXDEV; | 2403 | error = -EXDEV; |
2401 | if (old_nd.mnt != nd.mnt) | 2404 | if (old_nd.path.mnt != nd.path.mnt) |
2402 | goto out_release; | 2405 | goto out_release; |
2403 | new_dentry = lookup_create(&nd, 0); | 2406 | new_dentry = lookup_create(&nd, 0); |
2404 | error = PTR_ERR(new_dentry); | 2407 | error = PTR_ERR(new_dentry); |
2405 | if (IS_ERR(new_dentry)) | 2408 | if (IS_ERR(new_dentry)) |
2406 | goto out_unlock; | 2409 | goto out_unlock; |
2407 | error = vfs_link(old_nd.dentry, nd.dentry->d_inode, new_dentry); | 2410 | error = vfs_link(old_nd.path.dentry, nd.path.dentry->d_inode, new_dentry); |
2408 | dput(new_dentry); | 2411 | dput(new_dentry); |
2409 | out_unlock: | 2412 | out_unlock: |
2410 | mutex_unlock(&nd.dentry->d_inode->i_mutex); | 2413 | mutex_unlock(&nd.path.dentry->d_inode->i_mutex); |
2411 | out_release: | 2414 | out_release: |
2412 | path_release(&nd); | 2415 | path_put(&nd.path); |
2413 | out: | 2416 | out: |
2414 | path_release(&old_nd); | 2417 | path_put(&old_nd.path); |
2415 | exit: | 2418 | exit: |
2416 | putname(to); | 2419 | putname(to); |
2417 | 2420 | ||
@@ -2587,15 +2590,15 @@ static int do_rename(int olddfd, const char *oldname, | |||
2587 | goto exit1; | 2590 | goto exit1; |
2588 | 2591 | ||
2589 | error = -EXDEV; | 2592 | error = -EXDEV; |
2590 | if (oldnd.mnt != newnd.mnt) | 2593 | if (oldnd.path.mnt != newnd.path.mnt) |
2591 | goto exit2; | 2594 | goto exit2; |
2592 | 2595 | ||
2593 | old_dir = oldnd.dentry; | 2596 | old_dir = oldnd.path.dentry; |
2594 | error = -EBUSY; | 2597 | error = -EBUSY; |
2595 | if (oldnd.last_type != LAST_NORM) | 2598 | if (oldnd.last_type != LAST_NORM) |
2596 | goto exit2; | 2599 | goto exit2; |
2597 | 2600 | ||
2598 | new_dir = newnd.dentry; | 2601 | new_dir = newnd.path.dentry; |
2599 | if (newnd.last_type != LAST_NORM) | 2602 | if (newnd.last_type != LAST_NORM) |
2600 | goto exit2; | 2603 | goto exit2; |
2601 | 2604 | ||
@@ -2639,9 +2642,9 @@ exit4: | |||
2639 | exit3: | 2642 | exit3: |
2640 | unlock_rename(new_dir, old_dir); | 2643 | unlock_rename(new_dir, old_dir); |
2641 | exit2: | 2644 | exit2: |
2642 | path_release(&newnd); | 2645 | path_put(&newnd.path); |
2643 | exit1: | 2646 | exit1: |
2644 | path_release(&oldnd); | 2647 | path_put(&oldnd.path); |
2645 | exit: | 2648 | exit: |
2646 | return error; | 2649 | return error; |
2647 | } | 2650 | } |
@@ -2815,7 +2818,6 @@ EXPORT_SYMBOL(page_symlink); | |||
2815 | EXPORT_SYMBOL(page_symlink_inode_operations); | 2818 | EXPORT_SYMBOL(page_symlink_inode_operations); |
2816 | EXPORT_SYMBOL(path_lookup); | 2819 | EXPORT_SYMBOL(path_lookup); |
2817 | EXPORT_SYMBOL(vfs_path_lookup); | 2820 | EXPORT_SYMBOL(vfs_path_lookup); |
2818 | EXPORT_SYMBOL(path_release); | ||
2819 | EXPORT_SYMBOL(permission); | 2821 | EXPORT_SYMBOL(permission); |
2820 | EXPORT_SYMBOL(vfs_permission); | 2822 | EXPORT_SYMBOL(vfs_permission); |
2821 | EXPORT_SYMBOL(file_permission); | 2823 | EXPORT_SYMBOL(file_permission); |
diff --git a/fs/namespace.c b/fs/namespace.c index 61bf376e29e8..7953c96a2071 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -25,18 +25,21 @@ | |||
25 | #include <linux/security.h> | 25 | #include <linux/security.h> |
26 | #include <linux/mount.h> | 26 | #include <linux/mount.h> |
27 | #include <linux/ramfs.h> | 27 | #include <linux/ramfs.h> |
28 | #include <linux/log2.h> | ||
28 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
29 | #include <asm/unistd.h> | 30 | #include <asm/unistd.h> |
30 | #include "pnode.h" | 31 | #include "pnode.h" |
31 | #include "internal.h" | 32 | #include "internal.h" |
32 | 33 | ||
34 | #define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head)) | ||
35 | #define HASH_SIZE (1UL << HASH_SHIFT) | ||
36 | |||
33 | /* spinlock for vfsmount related operations, inplace of dcache_lock */ | 37 | /* spinlock for vfsmount related operations, inplace of dcache_lock */ |
34 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock); | 38 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock); |
35 | 39 | ||
36 | static int event; | 40 | static int event; |
37 | 41 | ||
38 | static struct list_head *mount_hashtable __read_mostly; | 42 | static struct list_head *mount_hashtable __read_mostly; |
39 | static int hash_mask __read_mostly, hash_bits __read_mostly; | ||
40 | static struct kmem_cache *mnt_cache __read_mostly; | 43 | static struct kmem_cache *mnt_cache __read_mostly; |
41 | static struct rw_semaphore namespace_sem; | 44 | static struct rw_semaphore namespace_sem; |
42 | 45 | ||
@@ -48,8 +51,8 @@ static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry) | |||
48 | { | 51 | { |
49 | unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); | 52 | unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); |
50 | tmp += ((unsigned long)dentry / L1_CACHE_BYTES); | 53 | tmp += ((unsigned long)dentry / L1_CACHE_BYTES); |
51 | tmp = tmp + (tmp >> hash_bits); | 54 | tmp = tmp + (tmp >> HASH_SHIFT); |
52 | return tmp & hash_mask; | 55 | return tmp & (HASH_SIZE - 1); |
53 | } | 56 | } |
54 | 57 | ||
55 | struct vfsmount *alloc_vfsmnt(const char *name) | 58 | struct vfsmount *alloc_vfsmnt(const char *name) |
@@ -154,13 +157,13 @@ static void __touch_mnt_namespace(struct mnt_namespace *ns) | |||
154 | 157 | ||
155 | static void detach_mnt(struct vfsmount *mnt, struct nameidata *old_nd) | 158 | static void detach_mnt(struct vfsmount *mnt, struct nameidata *old_nd) |
156 | { | 159 | { |
157 | old_nd->dentry = mnt->mnt_mountpoint; | 160 | old_nd->path.dentry = mnt->mnt_mountpoint; |
158 | old_nd->mnt = mnt->mnt_parent; | 161 | old_nd->path.mnt = mnt->mnt_parent; |
159 | mnt->mnt_parent = mnt; | 162 | mnt->mnt_parent = mnt; |
160 | mnt->mnt_mountpoint = mnt->mnt_root; | 163 | mnt->mnt_mountpoint = mnt->mnt_root; |
161 | list_del_init(&mnt->mnt_child); | 164 | list_del_init(&mnt->mnt_child); |
162 | list_del_init(&mnt->mnt_hash); | 165 | list_del_init(&mnt->mnt_hash); |
163 | old_nd->dentry->d_mounted--; | 166 | old_nd->path.dentry->d_mounted--; |
164 | } | 167 | } |
165 | 168 | ||
166 | void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry, | 169 | void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry, |
@@ -173,10 +176,10 @@ void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry, | |||
173 | 176 | ||
174 | static void attach_mnt(struct vfsmount *mnt, struct nameidata *nd) | 177 | static void attach_mnt(struct vfsmount *mnt, struct nameidata *nd) |
175 | { | 178 | { |
176 | mnt_set_mountpoint(nd->mnt, nd->dentry, mnt); | 179 | mnt_set_mountpoint(nd->path.mnt, nd->path.dentry, mnt); |
177 | list_add_tail(&mnt->mnt_hash, mount_hashtable + | 180 | list_add_tail(&mnt->mnt_hash, mount_hashtable + |
178 | hash(nd->mnt, nd->dentry)); | 181 | hash(nd->path.mnt, nd->path.dentry)); |
179 | list_add_tail(&mnt->mnt_child, &nd->mnt->mnt_mounts); | 182 | list_add_tail(&mnt->mnt_child, &nd->path.mnt->mnt_mounts); |
180 | } | 183 | } |
181 | 184 | ||
182 | /* | 185 | /* |
@@ -317,6 +320,50 @@ void mnt_unpin(struct vfsmount *mnt) | |||
317 | 320 | ||
318 | EXPORT_SYMBOL(mnt_unpin); | 321 | EXPORT_SYMBOL(mnt_unpin); |
319 | 322 | ||
323 | static inline void mangle(struct seq_file *m, const char *s) | ||
324 | { | ||
325 | seq_escape(m, s, " \t\n\\"); | ||
326 | } | ||
327 | |||
328 | /* | ||
329 | * Simple .show_options callback for filesystems which don't want to | ||
330 | * implement more complex mount option showing. | ||
331 | * | ||
332 | * See also save_mount_options(). | ||
333 | */ | ||
334 | int generic_show_options(struct seq_file *m, struct vfsmount *mnt) | ||
335 | { | ||
336 | const char *options = mnt->mnt_sb->s_options; | ||
337 | |||
338 | if (options != NULL && options[0]) { | ||
339 | seq_putc(m, ','); | ||
340 | mangle(m, options); | ||
341 | } | ||
342 | |||
343 | return 0; | ||
344 | } | ||
345 | EXPORT_SYMBOL(generic_show_options); | ||
346 | |||
347 | /* | ||
348 | * If filesystem uses generic_show_options(), this function should be | ||
349 | * called from the fill_super() callback. | ||
350 | * | ||
351 | * The .remount_fs callback usually needs to be handled in a special | ||
352 | * way, to make sure, that previous options are not overwritten if the | ||
353 | * remount fails. | ||
354 | * | ||
355 | * Also note, that if the filesystem's .remount_fs function doesn't | ||
356 | * reset all options to their default value, but changes only newly | ||
357 | * given options, then the displayed options will not reflect reality | ||
358 | * any more. | ||
359 | */ | ||
360 | void save_mount_options(struct super_block *sb, char *options) | ||
361 | { | ||
362 | kfree(sb->s_options); | ||
363 | sb->s_options = kstrdup(options, GFP_KERNEL); | ||
364 | } | ||
365 | EXPORT_SYMBOL(save_mount_options); | ||
366 | |||
320 | /* iterator */ | 367 | /* iterator */ |
321 | static void *m_start(struct seq_file *m, loff_t *pos) | 368 | static void *m_start(struct seq_file *m, loff_t *pos) |
322 | { | 369 | { |
@@ -338,11 +385,6 @@ static void m_stop(struct seq_file *m, void *v) | |||
338 | up_read(&namespace_sem); | 385 | up_read(&namespace_sem); |
339 | } | 386 | } |
340 | 387 | ||
341 | static inline void mangle(struct seq_file *m, const char *s) | ||
342 | { | ||
343 | seq_escape(m, s, " \t\n\\"); | ||
344 | } | ||
345 | |||
346 | static int show_vfsmnt(struct seq_file *m, void *v) | 388 | static int show_vfsmnt(struct seq_file *m, void *v) |
347 | { | 389 | { |
348 | struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list); | 390 | struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list); |
@@ -366,10 +408,11 @@ static int show_vfsmnt(struct seq_file *m, void *v) | |||
366 | { 0, NULL } | 408 | { 0, NULL } |
367 | }; | 409 | }; |
368 | struct proc_fs_info *fs_infop; | 410 | struct proc_fs_info *fs_infop; |
411 | struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; | ||
369 | 412 | ||
370 | mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none"); | 413 | mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none"); |
371 | seq_putc(m, ' '); | 414 | seq_putc(m, ' '); |
372 | seq_path(m, mnt, mnt->mnt_root, " \t\n\\"); | 415 | seq_path(m, &mnt_path, " \t\n\\"); |
373 | seq_putc(m, ' '); | 416 | seq_putc(m, ' '); |
374 | mangle(m, mnt->mnt_sb->s_type->name); | 417 | mangle(m, mnt->mnt_sb->s_type->name); |
375 | if (mnt->mnt_sb->s_subtype && mnt->mnt_sb->s_subtype[0]) { | 418 | if (mnt->mnt_sb->s_subtype && mnt->mnt_sb->s_subtype[0]) { |
@@ -401,6 +444,7 @@ struct seq_operations mounts_op = { | |||
401 | static int show_vfsstat(struct seq_file *m, void *v) | 444 | static int show_vfsstat(struct seq_file *m, void *v) |
402 | { | 445 | { |
403 | struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list); | 446 | struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list); |
447 | struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; | ||
404 | int err = 0; | 448 | int err = 0; |
405 | 449 | ||
406 | /* device */ | 450 | /* device */ |
@@ -412,7 +456,7 @@ static int show_vfsstat(struct seq_file *m, void *v) | |||
412 | 456 | ||
413 | /* mount point */ | 457 | /* mount point */ |
414 | seq_puts(m, " mounted on "); | 458 | seq_puts(m, " mounted on "); |
415 | seq_path(m, mnt, mnt->mnt_root, " \t\n\\"); | 459 | seq_path(m, &mnt_path, " \t\n\\"); |
416 | seq_putc(m, ' '); | 460 | seq_putc(m, ' '); |
417 | 461 | ||
418 | /* file system type */ | 462 | /* file system type */ |
@@ -551,7 +595,7 @@ static int do_umount(struct vfsmount *mnt, int flags) | |||
551 | * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount] | 595 | * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount] |
552 | */ | 596 | */ |
553 | if (flags & MNT_EXPIRE) { | 597 | if (flags & MNT_EXPIRE) { |
554 | if (mnt == current->fs->rootmnt || | 598 | if (mnt == current->fs->root.mnt || |
555 | flags & (MNT_FORCE | MNT_DETACH)) | 599 | flags & (MNT_FORCE | MNT_DETACH)) |
556 | return -EINVAL; | 600 | return -EINVAL; |
557 | 601 | ||
@@ -586,7 +630,7 @@ static int do_umount(struct vfsmount *mnt, int flags) | |||
586 | * /reboot - static binary that would close all descriptors and | 630 | * /reboot - static binary that would close all descriptors and |
587 | * call reboot(9). Then init(8) could umount root and exec /reboot. | 631 | * call reboot(9). Then init(8) could umount root and exec /reboot. |
588 | */ | 632 | */ |
589 | if (mnt == current->fs->rootmnt && !(flags & MNT_DETACH)) { | 633 | if (mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) { |
590 | /* | 634 | /* |
591 | * Special case for "unmounting" root ... | 635 | * Special case for "unmounting" root ... |
592 | * we just try to remount it readonly. | 636 | * we just try to remount it readonly. |
@@ -637,18 +681,20 @@ asmlinkage long sys_umount(char __user * name, int flags) | |||
637 | if (retval) | 681 | if (retval) |
638 | goto out; | 682 | goto out; |
639 | retval = -EINVAL; | 683 | retval = -EINVAL; |
640 | if (nd.dentry != nd.mnt->mnt_root) | 684 | if (nd.path.dentry != nd.path.mnt->mnt_root) |
641 | goto dput_and_out; | 685 | goto dput_and_out; |
642 | if (!check_mnt(nd.mnt)) | 686 | if (!check_mnt(nd.path.mnt)) |
643 | goto dput_and_out; | 687 | goto dput_and_out; |
644 | 688 | ||
645 | retval = -EPERM; | 689 | retval = -EPERM; |
646 | if (!capable(CAP_SYS_ADMIN)) | 690 | if (!capable(CAP_SYS_ADMIN)) |
647 | goto dput_and_out; | 691 | goto dput_and_out; |
648 | 692 | ||
649 | retval = do_umount(nd.mnt, flags); | 693 | retval = do_umount(nd.path.mnt, flags); |
650 | dput_and_out: | 694 | dput_and_out: |
651 | path_release_on_umount(&nd); | 695 | /* we mustn't call path_put() as that would clear mnt_expiry_mark */ |
696 | dput(nd.path.dentry); | ||
697 | mntput_no_expire(nd.path.mnt); | ||
652 | out: | 698 | out: |
653 | return retval; | 699 | return retval; |
654 | } | 700 | } |
@@ -671,10 +717,10 @@ static int mount_is_safe(struct nameidata *nd) | |||
671 | return 0; | 717 | return 0; |
672 | return -EPERM; | 718 | return -EPERM; |
673 | #ifdef notyet | 719 | #ifdef notyet |
674 | if (S_ISLNK(nd->dentry->d_inode->i_mode)) | 720 | if (S_ISLNK(nd->path.dentry->d_inode->i_mode)) |
675 | return -EPERM; | 721 | return -EPERM; |
676 | if (nd->dentry->d_inode->i_mode & S_ISVTX) { | 722 | if (nd->path.dentry->d_inode->i_mode & S_ISVTX) { |
677 | if (current->uid != nd->dentry->d_inode->i_uid) | 723 | if (current->uid != nd->path.dentry->d_inode->i_uid) |
678 | return -EPERM; | 724 | return -EPERM; |
679 | } | 725 | } |
680 | if (vfs_permission(nd, MAY_WRITE)) | 726 | if (vfs_permission(nd, MAY_WRITE)) |
@@ -723,8 +769,8 @@ struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry, | |||
723 | q = q->mnt_parent; | 769 | q = q->mnt_parent; |
724 | } | 770 | } |
725 | p = s; | 771 | p = s; |
726 | nd.mnt = q; | 772 | nd.path.mnt = q; |
727 | nd.dentry = p->mnt_mountpoint; | 773 | nd.path.dentry = p->mnt_mountpoint; |
728 | q = clone_mnt(p, p->mnt_root, flag); | 774 | q = clone_mnt(p, p->mnt_root, flag); |
729 | if (!q) | 775 | if (!q) |
730 | goto Enomem; | 776 | goto Enomem; |
@@ -833,8 +879,8 @@ static int attach_recursive_mnt(struct vfsmount *source_mnt, | |||
833 | struct nameidata *nd, struct nameidata *parent_nd) | 879 | struct nameidata *nd, struct nameidata *parent_nd) |
834 | { | 880 | { |
835 | LIST_HEAD(tree_list); | 881 | LIST_HEAD(tree_list); |
836 | struct vfsmount *dest_mnt = nd->mnt; | 882 | struct vfsmount *dest_mnt = nd->path.mnt; |
837 | struct dentry *dest_dentry = nd->dentry; | 883 | struct dentry *dest_dentry = nd->path.dentry; |
838 | struct vfsmount *child, *p; | 884 | struct vfsmount *child, *p; |
839 | 885 | ||
840 | if (propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list)) | 886 | if (propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list)) |
@@ -869,13 +915,13 @@ static int graft_tree(struct vfsmount *mnt, struct nameidata *nd) | |||
869 | if (mnt->mnt_sb->s_flags & MS_NOUSER) | 915 | if (mnt->mnt_sb->s_flags & MS_NOUSER) |
870 | return -EINVAL; | 916 | return -EINVAL; |
871 | 917 | ||
872 | if (S_ISDIR(nd->dentry->d_inode->i_mode) != | 918 | if (S_ISDIR(nd->path.dentry->d_inode->i_mode) != |
873 | S_ISDIR(mnt->mnt_root->d_inode->i_mode)) | 919 | S_ISDIR(mnt->mnt_root->d_inode->i_mode)) |
874 | return -ENOTDIR; | 920 | return -ENOTDIR; |
875 | 921 | ||
876 | err = -ENOENT; | 922 | err = -ENOENT; |
877 | mutex_lock(&nd->dentry->d_inode->i_mutex); | 923 | mutex_lock(&nd->path.dentry->d_inode->i_mutex); |
878 | if (IS_DEADDIR(nd->dentry->d_inode)) | 924 | if (IS_DEADDIR(nd->path.dentry->d_inode)) |
879 | goto out_unlock; | 925 | goto out_unlock; |
880 | 926 | ||
881 | err = security_sb_check_sb(mnt, nd); | 927 | err = security_sb_check_sb(mnt, nd); |
@@ -883,10 +929,10 @@ static int graft_tree(struct vfsmount *mnt, struct nameidata *nd) | |||
883 | goto out_unlock; | 929 | goto out_unlock; |
884 | 930 | ||
885 | err = -ENOENT; | 931 | err = -ENOENT; |
886 | if (IS_ROOT(nd->dentry) || !d_unhashed(nd->dentry)) | 932 | if (IS_ROOT(nd->path.dentry) || !d_unhashed(nd->path.dentry)) |
887 | err = attach_recursive_mnt(mnt, nd, NULL); | 933 | err = attach_recursive_mnt(mnt, nd, NULL); |
888 | out_unlock: | 934 | out_unlock: |
889 | mutex_unlock(&nd->dentry->d_inode->i_mutex); | 935 | mutex_unlock(&nd->path.dentry->d_inode->i_mutex); |
890 | if (!err) | 936 | if (!err) |
891 | security_sb_post_addmount(mnt, nd); | 937 | security_sb_post_addmount(mnt, nd); |
892 | return err; | 938 | return err; |
@@ -894,17 +940,18 @@ out_unlock: | |||
894 | 940 | ||
895 | /* | 941 | /* |
896 | * recursively change the type of the mountpoint. | 942 | * recursively change the type of the mountpoint. |
943 | * noinline this do_mount helper to save do_mount stack space. | ||
897 | */ | 944 | */ |
898 | static int do_change_type(struct nameidata *nd, int flag) | 945 | static noinline int do_change_type(struct nameidata *nd, int flag) |
899 | { | 946 | { |
900 | struct vfsmount *m, *mnt = nd->mnt; | 947 | struct vfsmount *m, *mnt = nd->path.mnt; |
901 | int recurse = flag & MS_REC; | 948 | int recurse = flag & MS_REC; |
902 | int type = flag & ~MS_REC; | 949 | int type = flag & ~MS_REC; |
903 | 950 | ||
904 | if (!capable(CAP_SYS_ADMIN)) | 951 | if (!capable(CAP_SYS_ADMIN)) |
905 | return -EPERM; | 952 | return -EPERM; |
906 | 953 | ||
907 | if (nd->dentry != nd->mnt->mnt_root) | 954 | if (nd->path.dentry != nd->path.mnt->mnt_root) |
908 | return -EINVAL; | 955 | return -EINVAL; |
909 | 956 | ||
910 | down_write(&namespace_sem); | 957 | down_write(&namespace_sem); |
@@ -918,8 +965,10 @@ static int do_change_type(struct nameidata *nd, int flag) | |||
918 | 965 | ||
919 | /* | 966 | /* |
920 | * do loopback mount. | 967 | * do loopback mount. |
968 | * noinline this do_mount helper to save do_mount stack space. | ||
921 | */ | 969 | */ |
922 | static int do_loopback(struct nameidata *nd, char *old_name, int recurse) | 970 | static noinline int do_loopback(struct nameidata *nd, char *old_name, |
971 | int recurse) | ||
923 | { | 972 | { |
924 | struct nameidata old_nd; | 973 | struct nameidata old_nd; |
925 | struct vfsmount *mnt = NULL; | 974 | struct vfsmount *mnt = NULL; |
@@ -934,17 +983,17 @@ static int do_loopback(struct nameidata *nd, char *old_name, int recurse) | |||
934 | 983 | ||
935 | down_write(&namespace_sem); | 984 | down_write(&namespace_sem); |
936 | err = -EINVAL; | 985 | err = -EINVAL; |
937 | if (IS_MNT_UNBINDABLE(old_nd.mnt)) | 986 | if (IS_MNT_UNBINDABLE(old_nd.path.mnt)) |
938 | goto out; | 987 | goto out; |
939 | 988 | ||
940 | if (!check_mnt(nd->mnt) || !check_mnt(old_nd.mnt)) | 989 | if (!check_mnt(nd->path.mnt) || !check_mnt(old_nd.path.mnt)) |
941 | goto out; | 990 | goto out; |
942 | 991 | ||
943 | err = -ENOMEM; | 992 | err = -ENOMEM; |
944 | if (recurse) | 993 | if (recurse) |
945 | mnt = copy_tree(old_nd.mnt, old_nd.dentry, 0); | 994 | mnt = copy_tree(old_nd.path.mnt, old_nd.path.dentry, 0); |
946 | else | 995 | else |
947 | mnt = clone_mnt(old_nd.mnt, old_nd.dentry, 0); | 996 | mnt = clone_mnt(old_nd.path.mnt, old_nd.path.dentry, 0); |
948 | 997 | ||
949 | if (!mnt) | 998 | if (!mnt) |
950 | goto out; | 999 | goto out; |
@@ -960,7 +1009,7 @@ static int do_loopback(struct nameidata *nd, char *old_name, int recurse) | |||
960 | 1009 | ||
961 | out: | 1010 | out: |
962 | up_write(&namespace_sem); | 1011 | up_write(&namespace_sem); |
963 | path_release(&old_nd); | 1012 | path_put(&old_nd.path); |
964 | return err; | 1013 | return err; |
965 | } | 1014 | } |
966 | 1015 | ||
@@ -968,29 +1017,30 @@ out: | |||
968 | * change filesystem flags. dir should be a physical root of filesystem. | 1017 | * change filesystem flags. dir should be a physical root of filesystem. |
969 | * If you've mounted a non-root directory somewhere and want to do remount | 1018 | * If you've mounted a non-root directory somewhere and want to do remount |
970 | * on it - tough luck. | 1019 | * on it - tough luck. |
1020 | * noinline this do_mount helper to save do_mount stack space. | ||
971 | */ | 1021 | */ |
972 | static int do_remount(struct nameidata *nd, int flags, int mnt_flags, | 1022 | static noinline int do_remount(struct nameidata *nd, int flags, int mnt_flags, |
973 | void *data) | 1023 | void *data) |
974 | { | 1024 | { |
975 | int err; | 1025 | int err; |
976 | struct super_block *sb = nd->mnt->mnt_sb; | 1026 | struct super_block *sb = nd->path.mnt->mnt_sb; |
977 | 1027 | ||
978 | if (!capable(CAP_SYS_ADMIN)) | 1028 | if (!capable(CAP_SYS_ADMIN)) |
979 | return -EPERM; | 1029 | return -EPERM; |
980 | 1030 | ||
981 | if (!check_mnt(nd->mnt)) | 1031 | if (!check_mnt(nd->path.mnt)) |
982 | return -EINVAL; | 1032 | return -EINVAL; |
983 | 1033 | ||
984 | if (nd->dentry != nd->mnt->mnt_root) | 1034 | if (nd->path.dentry != nd->path.mnt->mnt_root) |
985 | return -EINVAL; | 1035 | return -EINVAL; |
986 | 1036 | ||
987 | down_write(&sb->s_umount); | 1037 | down_write(&sb->s_umount); |
988 | err = do_remount_sb(sb, flags, data, 0); | 1038 | err = do_remount_sb(sb, flags, data, 0); |
989 | if (!err) | 1039 | if (!err) |
990 | nd->mnt->mnt_flags = mnt_flags; | 1040 | nd->path.mnt->mnt_flags = mnt_flags; |
991 | up_write(&sb->s_umount); | 1041 | up_write(&sb->s_umount); |
992 | if (!err) | 1042 | if (!err) |
993 | security_sb_post_remount(nd->mnt, flags, data); | 1043 | security_sb_post_remount(nd->path.mnt, flags, data); |
994 | return err; | 1044 | return err; |
995 | } | 1045 | } |
996 | 1046 | ||
@@ -1004,7 +1054,10 @@ static inline int tree_contains_unbindable(struct vfsmount *mnt) | |||
1004 | return 0; | 1054 | return 0; |
1005 | } | 1055 | } |
1006 | 1056 | ||
1007 | static int do_move_mount(struct nameidata *nd, char *old_name) | 1057 | /* |
1058 | * noinline this do_mount helper to save do_mount stack space. | ||
1059 | */ | ||
1060 | static noinline int do_move_mount(struct nameidata *nd, char *old_name) | ||
1008 | { | 1061 | { |
1009 | struct nameidata old_nd, parent_nd; | 1062 | struct nameidata old_nd, parent_nd; |
1010 | struct vfsmount *p; | 1063 | struct vfsmount *p; |
@@ -1018,69 +1071,74 @@ static int do_move_mount(struct nameidata *nd, char *old_name) | |||
1018 | return err; | 1071 | return err; |
1019 | 1072 | ||
1020 | down_write(&namespace_sem); | 1073 | down_write(&namespace_sem); |
1021 | while (d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry)) | 1074 | while (d_mountpoint(nd->path.dentry) && |
1075 | follow_down(&nd->path.mnt, &nd->path.dentry)) | ||
1022 | ; | 1076 | ; |
1023 | err = -EINVAL; | 1077 | err = -EINVAL; |
1024 | if (!check_mnt(nd->mnt) || !check_mnt(old_nd.mnt)) | 1078 | if (!check_mnt(nd->path.mnt) || !check_mnt(old_nd.path.mnt)) |
1025 | goto out; | 1079 | goto out; |
1026 | 1080 | ||
1027 | err = -ENOENT; | 1081 | err = -ENOENT; |
1028 | mutex_lock(&nd->dentry->d_inode->i_mutex); | 1082 | mutex_lock(&nd->path.dentry->d_inode->i_mutex); |
1029 | if (IS_DEADDIR(nd->dentry->d_inode)) | 1083 | if (IS_DEADDIR(nd->path.dentry->d_inode)) |
1030 | goto out1; | 1084 | goto out1; |
1031 | 1085 | ||
1032 | if (!IS_ROOT(nd->dentry) && d_unhashed(nd->dentry)) | 1086 | if (!IS_ROOT(nd->path.dentry) && d_unhashed(nd->path.dentry)) |
1033 | goto out1; | 1087 | goto out1; |
1034 | 1088 | ||
1035 | err = -EINVAL; | 1089 | err = -EINVAL; |
1036 | if (old_nd.dentry != old_nd.mnt->mnt_root) | 1090 | if (old_nd.path.dentry != old_nd.path.mnt->mnt_root) |
1037 | goto out1; | 1091 | goto out1; |
1038 | 1092 | ||
1039 | if (old_nd.mnt == old_nd.mnt->mnt_parent) | 1093 | if (old_nd.path.mnt == old_nd.path.mnt->mnt_parent) |
1040 | goto out1; | 1094 | goto out1; |
1041 | 1095 | ||
1042 | if (S_ISDIR(nd->dentry->d_inode->i_mode) != | 1096 | if (S_ISDIR(nd->path.dentry->d_inode->i_mode) != |
1043 | S_ISDIR(old_nd.dentry->d_inode->i_mode)) | 1097 | S_ISDIR(old_nd.path.dentry->d_inode->i_mode)) |
1044 | goto out1; | 1098 | goto out1; |
1045 | /* | 1099 | /* |
1046 | * Don't move a mount residing in a shared parent. | 1100 | * Don't move a mount residing in a shared parent. |
1047 | */ | 1101 | */ |
1048 | if (old_nd.mnt->mnt_parent && IS_MNT_SHARED(old_nd.mnt->mnt_parent)) | 1102 | if (old_nd.path.mnt->mnt_parent && |
1103 | IS_MNT_SHARED(old_nd.path.mnt->mnt_parent)) | ||
1049 | goto out1; | 1104 | goto out1; |
1050 | /* | 1105 | /* |
1051 | * Don't move a mount tree containing unbindable mounts to a destination | 1106 | * Don't move a mount tree containing unbindable mounts to a destination |
1052 | * mount which is shared. | 1107 | * mount which is shared. |
1053 | */ | 1108 | */ |
1054 | if (IS_MNT_SHARED(nd->mnt) && tree_contains_unbindable(old_nd.mnt)) | 1109 | if (IS_MNT_SHARED(nd->path.mnt) && |
1110 | tree_contains_unbindable(old_nd.path.mnt)) | ||
1055 | goto out1; | 1111 | goto out1; |
1056 | err = -ELOOP; | 1112 | err = -ELOOP; |
1057 | for (p = nd->mnt; p->mnt_parent != p; p = p->mnt_parent) | 1113 | for (p = nd->path.mnt; p->mnt_parent != p; p = p->mnt_parent) |
1058 | if (p == old_nd.mnt) | 1114 | if (p == old_nd.path.mnt) |
1059 | goto out1; | 1115 | goto out1; |
1060 | 1116 | ||
1061 | if ((err = attach_recursive_mnt(old_nd.mnt, nd, &parent_nd))) | 1117 | err = attach_recursive_mnt(old_nd.path.mnt, nd, &parent_nd); |
1118 | if (err) | ||
1062 | goto out1; | 1119 | goto out1; |
1063 | 1120 | ||
1064 | spin_lock(&vfsmount_lock); | 1121 | spin_lock(&vfsmount_lock); |
1065 | /* if the mount is moved, it should no longer be expire | 1122 | /* if the mount is moved, it should no longer be expire |
1066 | * automatically */ | 1123 | * automatically */ |
1067 | list_del_init(&old_nd.mnt->mnt_expire); | 1124 | list_del_init(&old_nd.path.mnt->mnt_expire); |
1068 | spin_unlock(&vfsmount_lock); | 1125 | spin_unlock(&vfsmount_lock); |
1069 | out1: | 1126 | out1: |
1070 | mutex_unlock(&nd->dentry->d_inode->i_mutex); | 1127 | mutex_unlock(&nd->path.dentry->d_inode->i_mutex); |
1071 | out: | 1128 | out: |
1072 | up_write(&namespace_sem); | 1129 | up_write(&namespace_sem); |
1073 | if (!err) | 1130 | if (!err) |
1074 | path_release(&parent_nd); | 1131 | path_put(&parent_nd.path); |
1075 | path_release(&old_nd); | 1132 | path_put(&old_nd.path); |
1076 | return err; | 1133 | return err; |
1077 | } | 1134 | } |
1078 | 1135 | ||
1079 | /* | 1136 | /* |
1080 | * create a new mount for userspace and request it to be added into the | 1137 | * create a new mount for userspace and request it to be added into the |
1081 | * namespace's tree | 1138 | * namespace's tree |
1139 | * noinline this do_mount helper to save do_mount stack space. | ||
1082 | */ | 1140 | */ |
1083 | static int do_new_mount(struct nameidata *nd, char *type, int flags, | 1141 | static noinline int do_new_mount(struct nameidata *nd, char *type, int flags, |
1084 | int mnt_flags, char *name, void *data) | 1142 | int mnt_flags, char *name, void *data) |
1085 | { | 1143 | { |
1086 | struct vfsmount *mnt; | 1144 | struct vfsmount *mnt; |
@@ -1110,16 +1168,17 @@ int do_add_mount(struct vfsmount *newmnt, struct nameidata *nd, | |||
1110 | 1168 | ||
1111 | down_write(&namespace_sem); | 1169 | down_write(&namespace_sem); |
1112 | /* Something was mounted here while we slept */ | 1170 | /* Something was mounted here while we slept */ |
1113 | while (d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry)) | 1171 | while (d_mountpoint(nd->path.dentry) && |
1172 | follow_down(&nd->path.mnt, &nd->path.dentry)) | ||
1114 | ; | 1173 | ; |
1115 | err = -EINVAL; | 1174 | err = -EINVAL; |
1116 | if (!check_mnt(nd->mnt)) | 1175 | if (!check_mnt(nd->path.mnt)) |
1117 | goto unlock; | 1176 | goto unlock; |
1118 | 1177 | ||
1119 | /* Refuse the same filesystem on the same mount point */ | 1178 | /* Refuse the same filesystem on the same mount point */ |
1120 | err = -EBUSY; | 1179 | err = -EBUSY; |
1121 | if (nd->mnt->mnt_sb == newmnt->mnt_sb && | 1180 | if (nd->path.mnt->mnt_sb == newmnt->mnt_sb && |
1122 | nd->mnt->mnt_root == nd->dentry) | 1181 | nd->path.mnt->mnt_root == nd->path.dentry) |
1123 | goto unlock; | 1182 | goto unlock; |
1124 | 1183 | ||
1125 | err = -EINVAL; | 1184 | err = -EINVAL; |
@@ -1455,7 +1514,7 @@ long do_mount(char *dev_name, char *dir_name, char *type_page, | |||
1455 | retval = do_new_mount(&nd, type_page, flags, mnt_flags, | 1514 | retval = do_new_mount(&nd, type_page, flags, mnt_flags, |
1456 | dev_name, data_page); | 1515 | dev_name, data_page); |
1457 | dput_out: | 1516 | dput_out: |
1458 | path_release(&nd); | 1517 | path_put(&nd.path); |
1459 | return retval; | 1518 | return retval; |
1460 | } | 1519 | } |
1461 | 1520 | ||
@@ -1502,17 +1561,17 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns, | |||
1502 | while (p) { | 1561 | while (p) { |
1503 | q->mnt_ns = new_ns; | 1562 | q->mnt_ns = new_ns; |
1504 | if (fs) { | 1563 | if (fs) { |
1505 | if (p == fs->rootmnt) { | 1564 | if (p == fs->root.mnt) { |
1506 | rootmnt = p; | 1565 | rootmnt = p; |
1507 | fs->rootmnt = mntget(q); | 1566 | fs->root.mnt = mntget(q); |
1508 | } | 1567 | } |
1509 | if (p == fs->pwdmnt) { | 1568 | if (p == fs->pwd.mnt) { |
1510 | pwdmnt = p; | 1569 | pwdmnt = p; |
1511 | fs->pwdmnt = mntget(q); | 1570 | fs->pwd.mnt = mntget(q); |
1512 | } | 1571 | } |
1513 | if (p == fs->altrootmnt) { | 1572 | if (p == fs->altroot.mnt) { |
1514 | altrootmnt = p; | 1573 | altrootmnt = p; |
1515 | fs->altrootmnt = mntget(q); | 1574 | fs->altroot.mnt = mntget(q); |
1516 | } | 1575 | } |
1517 | } | 1576 | } |
1518 | p = next_mnt(p, mnt_ns->root); | 1577 | p = next_mnt(p, mnt_ns->root); |
@@ -1593,44 +1652,35 @@ out1: | |||
1593 | * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values. | 1652 | * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values. |
1594 | * It can block. Requires the big lock held. | 1653 | * It can block. Requires the big lock held. |
1595 | */ | 1654 | */ |
1596 | void set_fs_root(struct fs_struct *fs, struct vfsmount *mnt, | 1655 | void set_fs_root(struct fs_struct *fs, struct path *path) |
1597 | struct dentry *dentry) | ||
1598 | { | 1656 | { |
1599 | struct dentry *old_root; | 1657 | struct path old_root; |
1600 | struct vfsmount *old_rootmnt; | 1658 | |
1601 | write_lock(&fs->lock); | 1659 | write_lock(&fs->lock); |
1602 | old_root = fs->root; | 1660 | old_root = fs->root; |
1603 | old_rootmnt = fs->rootmnt; | 1661 | fs->root = *path; |
1604 | fs->rootmnt = mntget(mnt); | 1662 | path_get(path); |
1605 | fs->root = dget(dentry); | ||
1606 | write_unlock(&fs->lock); | 1663 | write_unlock(&fs->lock); |
1607 | if (old_root) { | 1664 | if (old_root.dentry) |
1608 | dput(old_root); | 1665 | path_put(&old_root); |
1609 | mntput(old_rootmnt); | ||
1610 | } | ||
1611 | } | 1666 | } |
1612 | 1667 | ||
1613 | /* | 1668 | /* |
1614 | * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values. | 1669 | * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values. |
1615 | * It can block. Requires the big lock held. | 1670 | * It can block. Requires the big lock held. |
1616 | */ | 1671 | */ |
1617 | void set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt, | 1672 | void set_fs_pwd(struct fs_struct *fs, struct path *path) |
1618 | struct dentry *dentry) | ||
1619 | { | 1673 | { |
1620 | struct dentry *old_pwd; | 1674 | struct path old_pwd; |
1621 | struct vfsmount *old_pwdmnt; | ||
1622 | 1675 | ||
1623 | write_lock(&fs->lock); | 1676 | write_lock(&fs->lock); |
1624 | old_pwd = fs->pwd; | 1677 | old_pwd = fs->pwd; |
1625 | old_pwdmnt = fs->pwdmnt; | 1678 | fs->pwd = *path; |
1626 | fs->pwdmnt = mntget(mnt); | 1679 | path_get(path); |
1627 | fs->pwd = dget(dentry); | ||
1628 | write_unlock(&fs->lock); | 1680 | write_unlock(&fs->lock); |
1629 | 1681 | ||
1630 | if (old_pwd) { | 1682 | if (old_pwd.dentry) |
1631 | dput(old_pwd); | 1683 | path_put(&old_pwd); |
1632 | mntput(old_pwdmnt); | ||
1633 | } | ||
1634 | } | 1684 | } |
1635 | 1685 | ||
1636 | static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd) | 1686 | static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd) |
@@ -1645,12 +1695,12 @@ static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd) | |||
1645 | if (fs) { | 1695 | if (fs) { |
1646 | atomic_inc(&fs->count); | 1696 | atomic_inc(&fs->count); |
1647 | task_unlock(p); | 1697 | task_unlock(p); |
1648 | if (fs->root == old_nd->dentry | 1698 | if (fs->root.dentry == old_nd->path.dentry |
1649 | && fs->rootmnt == old_nd->mnt) | 1699 | && fs->root.mnt == old_nd->path.mnt) |
1650 | set_fs_root(fs, new_nd->mnt, new_nd->dentry); | 1700 | set_fs_root(fs, &new_nd->path); |
1651 | if (fs->pwd == old_nd->dentry | 1701 | if (fs->pwd.dentry == old_nd->path.dentry |
1652 | && fs->pwdmnt == old_nd->mnt) | 1702 | && fs->pwd.mnt == old_nd->path.mnt) |
1653 | set_fs_pwd(fs, new_nd->mnt, new_nd->dentry); | 1703 | set_fs_pwd(fs, &new_nd->path); |
1654 | put_fs_struct(fs); | 1704 | put_fs_struct(fs); |
1655 | } else | 1705 | } else |
1656 | task_unlock(p); | 1706 | task_unlock(p); |
@@ -1700,7 +1750,7 @@ asmlinkage long sys_pivot_root(const char __user * new_root, | |||
1700 | if (error) | 1750 | if (error) |
1701 | goto out0; | 1751 | goto out0; |
1702 | error = -EINVAL; | 1752 | error = -EINVAL; |
1703 | if (!check_mnt(new_nd.mnt)) | 1753 | if (!check_mnt(new_nd.path.mnt)) |
1704 | goto out1; | 1754 | goto out1; |
1705 | 1755 | ||
1706 | error = __user_walk(put_old, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old_nd); | 1756 | error = __user_walk(put_old, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old_nd); |
@@ -1709,74 +1759,78 @@ asmlinkage long sys_pivot_root(const char __user * new_root, | |||
1709 | 1759 | ||
1710 | error = security_sb_pivotroot(&old_nd, &new_nd); | 1760 | error = security_sb_pivotroot(&old_nd, &new_nd); |
1711 | if (error) { | 1761 | if (error) { |
1712 | path_release(&old_nd); | 1762 | path_put(&old_nd.path); |
1713 | goto out1; | 1763 | goto out1; |
1714 | } | 1764 | } |
1715 | 1765 | ||
1716 | read_lock(¤t->fs->lock); | 1766 | read_lock(¤t->fs->lock); |
1717 | user_nd.mnt = mntget(current->fs->rootmnt); | 1767 | user_nd.path = current->fs->root; |
1718 | user_nd.dentry = dget(current->fs->root); | 1768 | path_get(¤t->fs->root); |
1719 | read_unlock(¤t->fs->lock); | 1769 | read_unlock(¤t->fs->lock); |
1720 | down_write(&namespace_sem); | 1770 | down_write(&namespace_sem); |
1721 | mutex_lock(&old_nd.dentry->d_inode->i_mutex); | 1771 | mutex_lock(&old_nd.path.dentry->d_inode->i_mutex); |
1722 | error = -EINVAL; | 1772 | error = -EINVAL; |
1723 | if (IS_MNT_SHARED(old_nd.mnt) || | 1773 | if (IS_MNT_SHARED(old_nd.path.mnt) || |
1724 | IS_MNT_SHARED(new_nd.mnt->mnt_parent) || | 1774 | IS_MNT_SHARED(new_nd.path.mnt->mnt_parent) || |
1725 | IS_MNT_SHARED(user_nd.mnt->mnt_parent)) | 1775 | IS_MNT_SHARED(user_nd.path.mnt->mnt_parent)) |
1726 | goto out2; | 1776 | goto out2; |
1727 | if (!check_mnt(user_nd.mnt)) | 1777 | if (!check_mnt(user_nd.path.mnt)) |
1728 | goto out2; | 1778 | goto out2; |
1729 | error = -ENOENT; | 1779 | error = -ENOENT; |
1730 | if (IS_DEADDIR(new_nd.dentry->d_inode)) | 1780 | if (IS_DEADDIR(new_nd.path.dentry->d_inode)) |
1731 | goto out2; | 1781 | goto out2; |
1732 | if (d_unhashed(new_nd.dentry) && !IS_ROOT(new_nd.dentry)) | 1782 | if (d_unhashed(new_nd.path.dentry) && !IS_ROOT(new_nd.path.dentry)) |
1733 | goto out2; | 1783 | goto out2; |
1734 | if (d_unhashed(old_nd.dentry) && !IS_ROOT(old_nd.dentry)) | 1784 | if (d_unhashed(old_nd.path.dentry) && !IS_ROOT(old_nd.path.dentry)) |
1735 | goto out2; | 1785 | goto out2; |
1736 | error = -EBUSY; | 1786 | error = -EBUSY; |
1737 | if (new_nd.mnt == user_nd.mnt || old_nd.mnt == user_nd.mnt) | 1787 | if (new_nd.path.mnt == user_nd.path.mnt || |
1788 | old_nd.path.mnt == user_nd.path.mnt) | ||
1738 | goto out2; /* loop, on the same file system */ | 1789 | goto out2; /* loop, on the same file system */ |
1739 | error = -EINVAL; | 1790 | error = -EINVAL; |
1740 | if (user_nd.mnt->mnt_root != user_nd.dentry) | 1791 | if (user_nd.path.mnt->mnt_root != user_nd.path.dentry) |
1741 | goto out2; /* not a mountpoint */ | 1792 | goto out2; /* not a mountpoint */ |
1742 | if (user_nd.mnt->mnt_parent == user_nd.mnt) | 1793 | if (user_nd.path.mnt->mnt_parent == user_nd.path.mnt) |
1743 | goto out2; /* not attached */ | 1794 | goto out2; /* not attached */ |
1744 | if (new_nd.mnt->mnt_root != new_nd.dentry) | 1795 | if (new_nd.path.mnt->mnt_root != new_nd.path.dentry) |
1745 | goto out2; /* not a mountpoint */ | 1796 | goto out2; /* not a mountpoint */ |
1746 | if (new_nd.mnt->mnt_parent == new_nd.mnt) | 1797 | if (new_nd.path.mnt->mnt_parent == new_nd.path.mnt) |
1747 | goto out2; /* not attached */ | 1798 | goto out2; /* not attached */ |
1748 | tmp = old_nd.mnt; /* make sure we can reach put_old from new_root */ | 1799 | /* make sure we can reach put_old from new_root */ |
1800 | tmp = old_nd.path.mnt; | ||
1749 | spin_lock(&vfsmount_lock); | 1801 | spin_lock(&vfsmount_lock); |
1750 | if (tmp != new_nd.mnt) { | 1802 | if (tmp != new_nd.path.mnt) { |
1751 | for (;;) { | 1803 | for (;;) { |
1752 | if (tmp->mnt_parent == tmp) | 1804 | if (tmp->mnt_parent == tmp) |
1753 | goto out3; /* already mounted on put_old */ | 1805 | goto out3; /* already mounted on put_old */ |
1754 | if (tmp->mnt_parent == new_nd.mnt) | 1806 | if (tmp->mnt_parent == new_nd.path.mnt) |
1755 | break; | 1807 | break; |
1756 | tmp = tmp->mnt_parent; | 1808 | tmp = tmp->mnt_parent; |
1757 | } | 1809 | } |
1758 | if (!is_subdir(tmp->mnt_mountpoint, new_nd.dentry)) | 1810 | if (!is_subdir(tmp->mnt_mountpoint, new_nd.path.dentry)) |
1759 | goto out3; | 1811 | goto out3; |
1760 | } else if (!is_subdir(old_nd.dentry, new_nd.dentry)) | 1812 | } else if (!is_subdir(old_nd.path.dentry, new_nd.path.dentry)) |
1761 | goto out3; | 1813 | goto out3; |
1762 | detach_mnt(new_nd.mnt, &parent_nd); | 1814 | detach_mnt(new_nd.path.mnt, &parent_nd); |
1763 | detach_mnt(user_nd.mnt, &root_parent); | 1815 | detach_mnt(user_nd.path.mnt, &root_parent); |
1764 | attach_mnt(user_nd.mnt, &old_nd); /* mount old root on put_old */ | 1816 | /* mount old root on put_old */ |
1765 | attach_mnt(new_nd.mnt, &root_parent); /* mount new_root on / */ | 1817 | attach_mnt(user_nd.path.mnt, &old_nd); |
1818 | /* mount new_root on / */ | ||
1819 | attach_mnt(new_nd.path.mnt, &root_parent); | ||
1766 | touch_mnt_namespace(current->nsproxy->mnt_ns); | 1820 | touch_mnt_namespace(current->nsproxy->mnt_ns); |
1767 | spin_unlock(&vfsmount_lock); | 1821 | spin_unlock(&vfsmount_lock); |
1768 | chroot_fs_refs(&user_nd, &new_nd); | 1822 | chroot_fs_refs(&user_nd, &new_nd); |
1769 | security_sb_post_pivotroot(&user_nd, &new_nd); | 1823 | security_sb_post_pivotroot(&user_nd, &new_nd); |
1770 | error = 0; | 1824 | error = 0; |
1771 | path_release(&root_parent); | 1825 | path_put(&root_parent.path); |
1772 | path_release(&parent_nd); | 1826 | path_put(&parent_nd.path); |
1773 | out2: | 1827 | out2: |
1774 | mutex_unlock(&old_nd.dentry->d_inode->i_mutex); | 1828 | mutex_unlock(&old_nd.path.dentry->d_inode->i_mutex); |
1775 | up_write(&namespace_sem); | 1829 | up_write(&namespace_sem); |
1776 | path_release(&user_nd); | 1830 | path_put(&user_nd.path); |
1777 | path_release(&old_nd); | 1831 | path_put(&old_nd.path); |
1778 | out1: | 1832 | out1: |
1779 | path_release(&new_nd); | 1833 | path_put(&new_nd.path); |
1780 | out0: | 1834 | out0: |
1781 | unlock_kernel(); | 1835 | unlock_kernel(); |
1782 | return error; | 1836 | return error; |
@@ -1789,6 +1843,7 @@ static void __init init_mount_tree(void) | |||
1789 | { | 1843 | { |
1790 | struct vfsmount *mnt; | 1844 | struct vfsmount *mnt; |
1791 | struct mnt_namespace *ns; | 1845 | struct mnt_namespace *ns; |
1846 | struct path root; | ||
1792 | 1847 | ||
1793 | mnt = do_kern_mount("rootfs", 0, "rootfs", NULL); | 1848 | mnt = do_kern_mount("rootfs", 0, "rootfs", NULL); |
1794 | if (IS_ERR(mnt)) | 1849 | if (IS_ERR(mnt)) |
@@ -1807,15 +1862,16 @@ static void __init init_mount_tree(void) | |||
1807 | init_task.nsproxy->mnt_ns = ns; | 1862 | init_task.nsproxy->mnt_ns = ns; |
1808 | get_mnt_ns(ns); | 1863 | get_mnt_ns(ns); |
1809 | 1864 | ||
1810 | set_fs_pwd(current->fs, ns->root, ns->root->mnt_root); | 1865 | root.mnt = ns->root; |
1811 | set_fs_root(current->fs, ns->root, ns->root->mnt_root); | 1866 | root.dentry = ns->root->mnt_root; |
1867 | |||
1868 | set_fs_pwd(current->fs, &root); | ||
1869 | set_fs_root(current->fs, &root); | ||
1812 | } | 1870 | } |
1813 | 1871 | ||
1814 | void __init mnt_init(void) | 1872 | void __init mnt_init(void) |
1815 | { | 1873 | { |
1816 | struct list_head *d; | 1874 | unsigned u; |
1817 | unsigned int nr_hash; | ||
1818 | int i; | ||
1819 | int err; | 1875 | int err; |
1820 | 1876 | ||
1821 | init_rwsem(&namespace_sem); | 1877 | init_rwsem(&namespace_sem); |
@@ -1828,35 +1884,11 @@ void __init mnt_init(void) | |||
1828 | if (!mount_hashtable) | 1884 | if (!mount_hashtable) |
1829 | panic("Failed to allocate mount hash table\n"); | 1885 | panic("Failed to allocate mount hash table\n"); |
1830 | 1886 | ||
1831 | /* | 1887 | printk("Mount-cache hash table entries: %lu\n", HASH_SIZE); |
1832 | * Find the power-of-two list-heads that can fit into the allocation.. | 1888 | |
1833 | * We don't guarantee that "sizeof(struct list_head)" is necessarily | 1889 | for (u = 0; u < HASH_SIZE; u++) |
1834 | * a power-of-two. | 1890 | INIT_LIST_HEAD(&mount_hashtable[u]); |
1835 | */ | ||
1836 | nr_hash = PAGE_SIZE / sizeof(struct list_head); | ||
1837 | hash_bits = 0; | ||
1838 | do { | ||
1839 | hash_bits++; | ||
1840 | } while ((nr_hash >> hash_bits) != 0); | ||
1841 | hash_bits--; | ||
1842 | 1891 | ||
1843 | /* | ||
1844 | * Re-calculate the actual number of entries and the mask | ||
1845 | * from the number of bits we can fit. | ||
1846 | */ | ||
1847 | nr_hash = 1UL << hash_bits; | ||
1848 | hash_mask = nr_hash - 1; | ||
1849 | |||
1850 | printk("Mount-cache hash table entries: %d\n", nr_hash); | ||
1851 | |||
1852 | /* And initialize the newly allocated array */ | ||
1853 | d = mount_hashtable; | ||
1854 | i = nr_hash; | ||
1855 | do { | ||
1856 | INIT_LIST_HEAD(d); | ||
1857 | d++; | ||
1858 | i--; | ||
1859 | } while (i); | ||
1860 | err = sysfs_init(); | 1892 | err = sysfs_init(); |
1861 | if (err) | 1893 | if (err) |
1862 | printk(KERN_WARNING "%s: sysfs_init error: %d\n", | 1894 | printk(KERN_WARNING "%s: sysfs_init error: %d\n", |
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c index e1cb70c643f8..fbbb9f7afa1a 100644 --- a/fs/ncpfs/inode.c +++ b/fs/ncpfs/inode.c | |||
@@ -28,6 +28,8 @@ | |||
28 | #include <linux/init.h> | 28 | #include <linux/init.h> |
29 | #include <linux/smp_lock.h> | 29 | #include <linux/smp_lock.h> |
30 | #include <linux/vfs.h> | 30 | #include <linux/vfs.h> |
31 | #include <linux/mount.h> | ||
32 | #include <linux/seq_file.h> | ||
31 | 33 | ||
32 | #include <linux/ncp_fs.h> | 34 | #include <linux/ncp_fs.h> |
33 | 35 | ||
@@ -36,9 +38,15 @@ | |||
36 | #include "ncplib_kernel.h" | 38 | #include "ncplib_kernel.h" |
37 | #include "getopt.h" | 39 | #include "getopt.h" |
38 | 40 | ||
41 | #define NCP_DEFAULT_FILE_MODE 0600 | ||
42 | #define NCP_DEFAULT_DIR_MODE 0700 | ||
43 | #define NCP_DEFAULT_TIME_OUT 10 | ||
44 | #define NCP_DEFAULT_RETRY_COUNT 20 | ||
45 | |||
39 | static void ncp_delete_inode(struct inode *); | 46 | static void ncp_delete_inode(struct inode *); |
40 | static void ncp_put_super(struct super_block *); | 47 | static void ncp_put_super(struct super_block *); |
41 | static int ncp_statfs(struct dentry *, struct kstatfs *); | 48 | static int ncp_statfs(struct dentry *, struct kstatfs *); |
49 | static int ncp_show_options(struct seq_file *, struct vfsmount *); | ||
42 | 50 | ||
43 | static struct kmem_cache * ncp_inode_cachep; | 51 | static struct kmem_cache * ncp_inode_cachep; |
44 | 52 | ||
@@ -96,6 +104,7 @@ static const struct super_operations ncp_sops = | |||
96 | .put_super = ncp_put_super, | 104 | .put_super = ncp_put_super, |
97 | .statfs = ncp_statfs, | 105 | .statfs = ncp_statfs, |
98 | .remount_fs = ncp_remount, | 106 | .remount_fs = ncp_remount, |
107 | .show_options = ncp_show_options, | ||
99 | }; | 108 | }; |
100 | 109 | ||
101 | extern struct dentry_operations ncp_root_dentry_operations; | 110 | extern struct dentry_operations ncp_root_dentry_operations; |
@@ -304,6 +313,37 @@ static void ncp_stop_tasks(struct ncp_server *server) { | |||
304 | flush_scheduled_work(); | 313 | flush_scheduled_work(); |
305 | } | 314 | } |
306 | 315 | ||
316 | static int ncp_show_options(struct seq_file *seq, struct vfsmount *mnt) | ||
317 | { | ||
318 | struct ncp_server *server = NCP_SBP(mnt->mnt_sb); | ||
319 | unsigned int tmp; | ||
320 | |||
321 | if (server->m.uid != 0) | ||
322 | seq_printf(seq, ",uid=%u", server->m.uid); | ||
323 | if (server->m.gid != 0) | ||
324 | seq_printf(seq, ",gid=%u", server->m.gid); | ||
325 | if (server->m.mounted_uid != 0) | ||
326 | seq_printf(seq, ",owner=%u", server->m.mounted_uid); | ||
327 | tmp = server->m.file_mode & S_IALLUGO; | ||
328 | if (tmp != NCP_DEFAULT_FILE_MODE) | ||
329 | seq_printf(seq, ",mode=0%o", tmp); | ||
330 | tmp = server->m.dir_mode & S_IALLUGO; | ||
331 | if (tmp != NCP_DEFAULT_DIR_MODE) | ||
332 | seq_printf(seq, ",dirmode=0%o", tmp); | ||
333 | if (server->m.time_out != NCP_DEFAULT_TIME_OUT * HZ / 100) { | ||
334 | tmp = server->m.time_out * 100 / HZ; | ||
335 | seq_printf(seq, ",timeout=%u", tmp); | ||
336 | } | ||
337 | if (server->m.retry_count != NCP_DEFAULT_RETRY_COUNT) | ||
338 | seq_printf(seq, ",retry=%u", server->m.retry_count); | ||
339 | if (server->m.flags != 0) | ||
340 | seq_printf(seq, ",flags=%lu", server->m.flags); | ||
341 | if (server->m.wdog_pid != NULL) | ||
342 | seq_printf(seq, ",wdogpid=%u", pid_vnr(server->m.wdog_pid)); | ||
343 | |||
344 | return 0; | ||
345 | } | ||
346 | |||
307 | static const struct ncp_option ncp_opts[] = { | 347 | static const struct ncp_option ncp_opts[] = { |
308 | { "uid", OPT_INT, 'u' }, | 348 | { "uid", OPT_INT, 'u' }, |
309 | { "gid", OPT_INT, 'g' }, | 349 | { "gid", OPT_INT, 'g' }, |
@@ -331,12 +371,12 @@ static int ncp_parse_options(struct ncp_mount_data_kernel *data, char *options) | |||
331 | data->mounted_uid = 0; | 371 | data->mounted_uid = 0; |
332 | data->wdog_pid = NULL; | 372 | data->wdog_pid = NULL; |
333 | data->ncp_fd = ~0; | 373 | data->ncp_fd = ~0; |
334 | data->time_out = 10; | 374 | data->time_out = NCP_DEFAULT_TIME_OUT; |
335 | data->retry_count = 20; | 375 | data->retry_count = NCP_DEFAULT_RETRY_COUNT; |
336 | data->uid = 0; | 376 | data->uid = 0; |
337 | data->gid = 0; | 377 | data->gid = 0; |
338 | data->file_mode = 0600; | 378 | data->file_mode = NCP_DEFAULT_FILE_MODE; |
339 | data->dir_mode = 0700; | 379 | data->dir_mode = NCP_DEFAULT_DIR_MODE; |
340 | data->info_fd = -1; | 380 | data->info_fd = -1; |
341 | data->mounted_vol[0] = 0; | 381 | data->mounted_vol[0] = 0; |
342 | 382 | ||
@@ -982,12 +1022,13 @@ static struct file_system_type ncp_fs_type = { | |||
982 | .name = "ncpfs", | 1022 | .name = "ncpfs", |
983 | .get_sb = ncp_get_sb, | 1023 | .get_sb = ncp_get_sb, |
984 | .kill_sb = kill_anon_super, | 1024 | .kill_sb = kill_anon_super, |
1025 | .fs_flags = FS_BINARY_MOUNTDATA, | ||
985 | }; | 1026 | }; |
986 | 1027 | ||
987 | static int __init init_ncp_fs(void) | 1028 | static int __init init_ncp_fs(void) |
988 | { | 1029 | { |
989 | int err; | 1030 | int err; |
990 | DPRINTK("ncpfs: init_module called\n"); | 1031 | DPRINTK("ncpfs: init_ncp_fs called\n"); |
991 | 1032 | ||
992 | err = init_inodecache(); | 1033 | err = init_inodecache(); |
993 | if (err) | 1034 | if (err) |
@@ -1004,7 +1045,7 @@ out1: | |||
1004 | 1045 | ||
1005 | static void __exit exit_ncp_fs(void) | 1046 | static void __exit exit_ncp_fs(void) |
1006 | { | 1047 | { |
1007 | DPRINTK("ncpfs: cleanup_module called\n"); | 1048 | DPRINTK("ncpfs: exit_ncp_fs called\n"); |
1008 | unregister_filesystem(&ncp_fs_type); | 1049 | unregister_filesystem(&ncp_fs_type); |
1009 | destroy_inodecache(); | 1050 | destroy_inodecache(); |
1010 | } | 1051 | } |
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index bd185a572a23..ecc06c619494 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c | |||
@@ -105,7 +105,7 @@ static void nfs_callback_svc(struct svc_rqst *rqstp) | |||
105 | */ | 105 | */ |
106 | int nfs_callback_up(void) | 106 | int nfs_callback_up(void) |
107 | { | 107 | { |
108 | struct svc_serv *serv; | 108 | struct svc_serv *serv = NULL; |
109 | int ret = 0; | 109 | int ret = 0; |
110 | 110 | ||
111 | lock_kernel(); | 111 | lock_kernel(); |
@@ -122,24 +122,30 @@ int nfs_callback_up(void) | |||
122 | ret = svc_create_xprt(serv, "tcp", nfs_callback_set_tcpport, | 122 | ret = svc_create_xprt(serv, "tcp", nfs_callback_set_tcpport, |
123 | SVC_SOCK_ANONYMOUS); | 123 | SVC_SOCK_ANONYMOUS); |
124 | if (ret <= 0) | 124 | if (ret <= 0) |
125 | goto out_destroy; | 125 | goto out_err; |
126 | nfs_callback_tcpport = ret; | 126 | nfs_callback_tcpport = ret; |
127 | dprintk("Callback port = 0x%x\n", nfs_callback_tcpport); | 127 | dprintk("Callback port = 0x%x\n", nfs_callback_tcpport); |
128 | 128 | ||
129 | ret = svc_create_thread(nfs_callback_svc, serv); | 129 | ret = svc_create_thread(nfs_callback_svc, serv); |
130 | if (ret < 0) | 130 | if (ret < 0) |
131 | goto out_destroy; | 131 | goto out_err; |
132 | nfs_callback_info.serv = serv; | 132 | nfs_callback_info.serv = serv; |
133 | wait_for_completion(&nfs_callback_info.started); | 133 | wait_for_completion(&nfs_callback_info.started); |
134 | out: | 134 | out: |
135 | /* | ||
136 | * svc_create creates the svc_serv with sv_nrthreads == 1, and then | ||
137 | * svc_create_thread increments that. So we need to call svc_destroy | ||
138 | * on both success and failure so that the refcount is 1 when the | ||
139 | * thread exits. | ||
140 | */ | ||
141 | if (serv) | ||
142 | svc_destroy(serv); | ||
135 | mutex_unlock(&nfs_callback_mutex); | 143 | mutex_unlock(&nfs_callback_mutex); |
136 | unlock_kernel(); | 144 | unlock_kernel(); |
137 | return ret; | 145 | return ret; |
138 | out_destroy: | 146 | out_err: |
139 | dprintk("Couldn't create callback socket or server thread; err = %d\n", | 147 | dprintk("Couldn't create callback socket or server thread; err = %d\n", |
140 | ret); | 148 | ret); |
141 | svc_destroy(serv); | ||
142 | out_err: | ||
143 | nfs_callback_info.users--; | 149 | nfs_callback_info.users--; |
144 | goto out; | 150 | goto out; |
145 | } | 151 | } |
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 476cb0f837fd..ae04892a5e5d 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
@@ -154,7 +154,6 @@ typedef struct { | |||
154 | struct nfs_entry *entry; | 154 | struct nfs_entry *entry; |
155 | decode_dirent_t decode; | 155 | decode_dirent_t decode; |
156 | int plus; | 156 | int plus; |
157 | int error; | ||
158 | unsigned long timestamp; | 157 | unsigned long timestamp; |
159 | int timestamp_valid; | 158 | int timestamp_valid; |
160 | } nfs_readdir_descriptor_t; | 159 | } nfs_readdir_descriptor_t; |
@@ -213,7 +212,6 @@ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page *page) | |||
213 | return 0; | 212 | return 0; |
214 | error: | 213 | error: |
215 | unlock_page(page); | 214 | unlock_page(page); |
216 | desc->error = error; | ||
217 | return -EIO; | 215 | return -EIO; |
218 | } | 216 | } |
219 | 217 | ||
@@ -483,13 +481,13 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent, | |||
483 | goto out; | 481 | goto out; |
484 | } | 482 | } |
485 | timestamp = jiffies; | 483 | timestamp = jiffies; |
486 | desc->error = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred, *desc->dir_cookie, | 484 | status = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred, |
487 | page, | 485 | *desc->dir_cookie, page, |
488 | NFS_SERVER(inode)->dtsize, | 486 | NFS_SERVER(inode)->dtsize, |
489 | desc->plus); | 487 | desc->plus); |
490 | desc->page = page; | 488 | desc->page = page; |
491 | desc->ptr = kmap(page); /* matching kunmap in nfs_do_filldir */ | 489 | desc->ptr = kmap(page); /* matching kunmap in nfs_do_filldir */ |
492 | if (desc->error >= 0) { | 490 | if (status >= 0) { |
493 | desc->timestamp = timestamp; | 491 | desc->timestamp = timestamp; |
494 | desc->timestamp_valid = 1; | 492 | desc->timestamp_valid = 1; |
495 | if ((status = dir_decode(desc)) == 0) | 493 | if ((status = dir_decode(desc)) == 0) |
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c index e6242cdbaf91..fae97196daad 100644 --- a/fs/nfs/getroot.c +++ b/fs/nfs/getroot.c | |||
@@ -96,7 +96,7 @@ struct dentry *nfs_get_root(struct super_block *sb, struct nfs_fh *mntfh) | |||
96 | inode = nfs_fhget(sb, mntfh, fsinfo.fattr); | 96 | inode = nfs_fhget(sb, mntfh, fsinfo.fattr); |
97 | if (IS_ERR(inode)) { | 97 | if (IS_ERR(inode)) { |
98 | dprintk("nfs_get_root: get root inode failed\n"); | 98 | dprintk("nfs_get_root: get root inode failed\n"); |
99 | return ERR_PTR(PTR_ERR(inode)); | 99 | return ERR_CAST(inode); |
100 | } | 100 | } |
101 | 101 | ||
102 | error = nfs_superblock_set_dummy_root(sb, inode); | 102 | error = nfs_superblock_set_dummy_root(sb, inode); |
@@ -266,7 +266,7 @@ struct dentry *nfs4_get_root(struct super_block *sb, struct nfs_fh *mntfh) | |||
266 | inode = nfs_fhget(sb, mntfh, &fattr); | 266 | inode = nfs_fhget(sb, mntfh, &fattr); |
267 | if (IS_ERR(inode)) { | 267 | if (IS_ERR(inode)) { |
268 | dprintk("nfs_get_root: get root inode failed\n"); | 268 | dprintk("nfs_get_root: get root inode failed\n"); |
269 | return ERR_PTR(PTR_ERR(inode)); | 269 | return ERR_CAST(inode); |
270 | } | 270 | } |
271 | 271 | ||
272 | error = nfs_superblock_set_dummy_root(sb, inode); | 272 | error = nfs_superblock_set_dummy_root(sb, inode); |
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index be4ce1c3a3d8..607f6eb9cdb5 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c | |||
@@ -107,38 +107,40 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd) | |||
107 | 107 | ||
108 | BUG_ON(IS_ROOT(dentry)); | 108 | BUG_ON(IS_ROOT(dentry)); |
109 | dprintk("%s: enter\n", __FUNCTION__); | 109 | dprintk("%s: enter\n", __FUNCTION__); |
110 | dput(nd->dentry); | 110 | dput(nd->path.dentry); |
111 | nd->dentry = dget(dentry); | 111 | nd->path.dentry = dget(dentry); |
112 | 112 | ||
113 | /* Look it up again */ | 113 | /* Look it up again */ |
114 | parent = dget_parent(nd->dentry); | 114 | parent = dget_parent(nd->path.dentry); |
115 | err = server->nfs_client->rpc_ops->lookup(parent->d_inode, | 115 | err = server->nfs_client->rpc_ops->lookup(parent->d_inode, |
116 | &nd->dentry->d_name, | 116 | &nd->path.dentry->d_name, |
117 | &fh, &fattr); | 117 | &fh, &fattr); |
118 | dput(parent); | 118 | dput(parent); |
119 | if (err != 0) | 119 | if (err != 0) |
120 | goto out_err; | 120 | goto out_err; |
121 | 121 | ||
122 | if (fattr.valid & NFS_ATTR_FATTR_V4_REFERRAL) | 122 | if (fattr.valid & NFS_ATTR_FATTR_V4_REFERRAL) |
123 | mnt = nfs_do_refmount(nd->mnt, nd->dentry); | 123 | mnt = nfs_do_refmount(nd->path.mnt, nd->path.dentry); |
124 | else | 124 | else |
125 | mnt = nfs_do_submount(nd->mnt, nd->dentry, &fh, &fattr); | 125 | mnt = nfs_do_submount(nd->path.mnt, nd->path.dentry, &fh, |
126 | &fattr); | ||
126 | err = PTR_ERR(mnt); | 127 | err = PTR_ERR(mnt); |
127 | if (IS_ERR(mnt)) | 128 | if (IS_ERR(mnt)) |
128 | goto out_err; | 129 | goto out_err; |
129 | 130 | ||
130 | mntget(mnt); | 131 | mntget(mnt); |
131 | err = do_add_mount(mnt, nd, nd->mnt->mnt_flags|MNT_SHRINKABLE, &nfs_automount_list); | 132 | err = do_add_mount(mnt, nd, nd->path.mnt->mnt_flags|MNT_SHRINKABLE, |
133 | &nfs_automount_list); | ||
132 | if (err < 0) { | 134 | if (err < 0) { |
133 | mntput(mnt); | 135 | mntput(mnt); |
134 | if (err == -EBUSY) | 136 | if (err == -EBUSY) |
135 | goto out_follow; | 137 | goto out_follow; |
136 | goto out_err; | 138 | goto out_err; |
137 | } | 139 | } |
138 | mntput(nd->mnt); | 140 | mntput(nd->path.mnt); |
139 | dput(nd->dentry); | 141 | dput(nd->path.dentry); |
140 | nd->mnt = mnt; | 142 | nd->path.mnt = mnt; |
141 | nd->dentry = dget(mnt->mnt_root); | 143 | nd->path.dentry = dget(mnt->mnt_root); |
142 | schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout); | 144 | schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout); |
143 | out: | 145 | out: |
144 | dprintk("%s: done, returned %d\n", __FUNCTION__, err); | 146 | dprintk("%s: done, returned %d\n", __FUNCTION__, err); |
@@ -146,10 +148,11 @@ out: | |||
146 | dprintk("<-- nfs_follow_mountpoint() = %d\n", err); | 148 | dprintk("<-- nfs_follow_mountpoint() = %d\n", err); |
147 | return ERR_PTR(err); | 149 | return ERR_PTR(err); |
148 | out_err: | 150 | out_err: |
149 | path_release(nd); | 151 | path_put(&nd->path); |
150 | goto out; | 152 | goto out; |
151 | out_follow: | 153 | out_follow: |
152 | while(d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry)) | 154 | while (d_mountpoint(nd->path.dentry) && |
155 | follow_down(&nd->path.mnt, &nd->path.dentry)) | ||
153 | ; | 156 | ; |
154 | err = 0; | 157 | err = 0; |
155 | goto out; | 158 | goto out; |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 027e1095256e..7ce07862c2fb 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -1384,11 +1384,11 @@ out_close: | |||
1384 | struct dentry * | 1384 | struct dentry * |
1385 | nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd) | 1385 | nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd) |
1386 | { | 1386 | { |
1387 | struct dentry *parent; | ||
1388 | struct path path = { | 1387 | struct path path = { |
1389 | .mnt = nd->mnt, | 1388 | .mnt = nd->path.mnt, |
1390 | .dentry = dentry, | 1389 | .dentry = dentry, |
1391 | }; | 1390 | }; |
1391 | struct dentry *parent; | ||
1392 | struct iattr attr; | 1392 | struct iattr attr; |
1393 | struct rpc_cred *cred; | 1393 | struct rpc_cred *cred; |
1394 | struct nfs4_state *state; | 1394 | struct nfs4_state *state; |
@@ -1433,7 +1433,7 @@ int | |||
1433 | nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, struct nameidata *nd) | 1433 | nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, struct nameidata *nd) |
1434 | { | 1434 | { |
1435 | struct path path = { | 1435 | struct path path = { |
1436 | .mnt = nd->mnt, | 1436 | .mnt = nd->path.mnt, |
1437 | .dentry = dentry, | 1437 | .dentry = dentry, |
1438 | }; | 1438 | }; |
1439 | struct rpc_cred *cred; | 1439 | struct rpc_cred *cred; |
@@ -1885,7 +1885,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, | |||
1885 | int flags, struct nameidata *nd) | 1885 | int flags, struct nameidata *nd) |
1886 | { | 1886 | { |
1887 | struct path path = { | 1887 | struct path path = { |
1888 | .mnt = nd->mnt, | 1888 | .mnt = nd->path.mnt, |
1889 | .dentry = dentry, | 1889 | .dentry = dentry, |
1890 | }; | 1890 | }; |
1891 | struct nfs4_state *state; | 1891 | struct nfs4_state *state; |
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index f9c7432471dc..6233eb5e98c1 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -682,8 +682,8 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid) | |||
682 | if (seqid->sequence->flags & NFS_SEQID_CONFIRMED) | 682 | if (seqid->sequence->flags & NFS_SEQID_CONFIRMED) |
683 | return; | 683 | return; |
684 | printk(KERN_WARNING "NFS: v4 server returned a bad" | 684 | printk(KERN_WARNING "NFS: v4 server returned a bad" |
685 | "sequence-id error on an" | 685 | " sequence-id error on an" |
686 | "unconfirmed sequence %p!\n", | 686 | " unconfirmed sequence %p!\n", |
687 | seqid->sequence); | 687 | seqid->sequence); |
688 | case -NFS4ERR_STALE_CLIENTID: | 688 | case -NFS4ERR_STALE_CLIENTID: |
689 | case -NFS4ERR_STALE_STATEID: | 689 | case -NFS4ERR_STALE_STATEID: |
diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 7f4505f6ac6f..1fb381843650 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c | |||
@@ -190,6 +190,10 @@ static match_table_t nfs_secflavor_tokens = { | |||
190 | { Opt_sec_lkeyi, "lkeyi" }, | 190 | { Opt_sec_lkeyi, "lkeyi" }, |
191 | { Opt_sec_lkeyp, "lkeyp" }, | 191 | { Opt_sec_lkeyp, "lkeyp" }, |
192 | 192 | ||
193 | { Opt_sec_spkm, "spkm3" }, | ||
194 | { Opt_sec_spkmi, "spkm3i" }, | ||
195 | { Opt_sec_spkmp, "spkm3p" }, | ||
196 | |||
193 | { Opt_sec_err, NULL } | 197 | { Opt_sec_err, NULL } |
194 | }; | 198 | }; |
195 | 199 | ||
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index b144b1957dd9..f55c437124a2 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -697,6 +697,17 @@ int nfs_flush_incompatible(struct file *file, struct page *page) | |||
697 | } | 697 | } |
698 | 698 | ||
699 | /* | 699 | /* |
700 | * If the page cache is marked as unsafe or invalid, then we can't rely on | ||
701 | * the PageUptodate() flag. In this case, we will need to turn off | ||
702 | * write optimisations that depend on the page contents being correct. | ||
703 | */ | ||
704 | static int nfs_write_pageuptodate(struct page *page, struct inode *inode) | ||
705 | { | ||
706 | return PageUptodate(page) && | ||
707 | !(NFS_I(inode)->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA)); | ||
708 | } | ||
709 | |||
710 | /* | ||
700 | * Update and possibly write a cached page of an NFS file. | 711 | * Update and possibly write a cached page of an NFS file. |
701 | * | 712 | * |
702 | * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad | 713 | * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad |
@@ -717,10 +728,13 @@ int nfs_updatepage(struct file *file, struct page *page, | |||
717 | (long long)(page_offset(page) +offset)); | 728 | (long long)(page_offset(page) +offset)); |
718 | 729 | ||
719 | /* If we're not using byte range locks, and we know the page | 730 | /* If we're not using byte range locks, and we know the page |
720 | * is entirely in cache, it may be more efficient to avoid | 731 | * is up to date, it may be more efficient to extend the write |
721 | * fragmenting write requests. | 732 | * to cover the entire page in order to avoid fragmentation |
733 | * inefficiencies. | ||
722 | */ | 734 | */ |
723 | if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) { | 735 | if (nfs_write_pageuptodate(page, inode) && |
736 | inode->i_flock == NULL && | ||
737 | !(file->f_mode & O_SYNC)) { | ||
724 | count = max(count + offset, nfs_page_length(page)); | 738 | count = max(count + offset, nfs_page_length(page)); |
725 | offset = 0; | 739 | offset = 0; |
726 | } | 740 | } |
diff --git a/fs/nfsctl.c b/fs/nfsctl.c index 51f1b31acbf6..aed8145d9087 100644 --- a/fs/nfsctl.c +++ b/fs/nfsctl.c | |||
@@ -41,9 +41,9 @@ static struct file *do_open(char *name, int flags) | |||
41 | error = may_open(&nd, MAY_WRITE, FMODE_WRITE); | 41 | error = may_open(&nd, MAY_WRITE, FMODE_WRITE); |
42 | 42 | ||
43 | if (!error) | 43 | if (!error) |
44 | return dentry_open(nd.dentry, nd.mnt, flags); | 44 | return dentry_open(nd.path.dentry, nd.path.mnt, flags); |
45 | 45 | ||
46 | path_release(&nd); | 46 | path_put(&nd.path); |
47 | return ERR_PTR(error); | 47 | return ERR_PTR(error); |
48 | } | 48 | } |
49 | 49 | ||
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c index 79b4bf812960..8a6f7c924c75 100644 --- a/fs/nfsd/export.c +++ b/fs/nfsd/export.c | |||
@@ -63,10 +63,8 @@ static void expkey_put(struct kref *ref) | |||
63 | struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref); | 63 | struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref); |
64 | 64 | ||
65 | if (test_bit(CACHE_VALID, &key->h.flags) && | 65 | if (test_bit(CACHE_VALID, &key->h.flags) && |
66 | !test_bit(CACHE_NEGATIVE, &key->h.flags)) { | 66 | !test_bit(CACHE_NEGATIVE, &key->h.flags)) |
67 | dput(key->ek_dentry); | 67 | path_put(&key->ek_path); |
68 | mntput(key->ek_mnt); | ||
69 | } | ||
70 | auth_domain_put(key->ek_client); | 68 | auth_domain_put(key->ek_client); |
71 | kfree(key); | 69 | kfree(key); |
72 | } | 70 | } |
@@ -169,15 +167,14 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen) | |||
169 | goto out; | 167 | goto out; |
170 | 168 | ||
171 | dprintk("Found the path %s\n", buf); | 169 | dprintk("Found the path %s\n", buf); |
172 | key.ek_mnt = nd.mnt; | 170 | key.ek_path = nd.path; |
173 | key.ek_dentry = nd.dentry; | 171 | |
174 | |||
175 | ek = svc_expkey_update(&key, ek); | 172 | ek = svc_expkey_update(&key, ek); |
176 | if (ek) | 173 | if (ek) |
177 | cache_put(&ek->h, &svc_expkey_cache); | 174 | cache_put(&ek->h, &svc_expkey_cache); |
178 | else | 175 | else |
179 | err = -ENOMEM; | 176 | err = -ENOMEM; |
180 | path_release(&nd); | 177 | path_put(&nd.path); |
181 | } | 178 | } |
182 | cache_flush(); | 179 | cache_flush(); |
183 | out: | 180 | out: |
@@ -206,7 +203,7 @@ static int expkey_show(struct seq_file *m, | |||
206 | if (test_bit(CACHE_VALID, &h->flags) && | 203 | if (test_bit(CACHE_VALID, &h->flags) && |
207 | !test_bit(CACHE_NEGATIVE, &h->flags)) { | 204 | !test_bit(CACHE_NEGATIVE, &h->flags)) { |
208 | seq_printf(m, " "); | 205 | seq_printf(m, " "); |
209 | seq_path(m, ek->ek_mnt, ek->ek_dentry, "\\ \t\n"); | 206 | seq_path(m, &ek->ek_path, "\\ \t\n"); |
210 | } | 207 | } |
211 | seq_printf(m, "\n"); | 208 | seq_printf(m, "\n"); |
212 | return 0; | 209 | return 0; |
@@ -243,8 +240,8 @@ static inline void expkey_update(struct cache_head *cnew, | |||
243 | struct svc_expkey *new = container_of(cnew, struct svc_expkey, h); | 240 | struct svc_expkey *new = container_of(cnew, struct svc_expkey, h); |
244 | struct svc_expkey *item = container_of(citem, struct svc_expkey, h); | 241 | struct svc_expkey *item = container_of(citem, struct svc_expkey, h); |
245 | 242 | ||
246 | new->ek_mnt = mntget(item->ek_mnt); | 243 | new->ek_path = item->ek_path; |
247 | new->ek_dentry = dget(item->ek_dentry); | 244 | path_get(&item->ek_path); |
248 | } | 245 | } |
249 | 246 | ||
250 | static struct cache_head *expkey_alloc(void) | 247 | static struct cache_head *expkey_alloc(void) |
@@ -332,10 +329,9 @@ static void nfsd4_fslocs_free(struct nfsd4_fs_locations *fsloc) | |||
332 | static void svc_export_put(struct kref *ref) | 329 | static void svc_export_put(struct kref *ref) |
333 | { | 330 | { |
334 | struct svc_export *exp = container_of(ref, struct svc_export, h.ref); | 331 | struct svc_export *exp = container_of(ref, struct svc_export, h.ref); |
335 | dput(exp->ex_dentry); | 332 | path_put(&exp->ex_path); |
336 | mntput(exp->ex_mnt); | ||
337 | auth_domain_put(exp->ex_client); | 333 | auth_domain_put(exp->ex_client); |
338 | kfree(exp->ex_path); | 334 | kfree(exp->ex_pathname); |
339 | nfsd4_fslocs_free(&exp->ex_fslocs); | 335 | nfsd4_fslocs_free(&exp->ex_fslocs); |
340 | kfree(exp); | 336 | kfree(exp); |
341 | } | 337 | } |
@@ -349,7 +345,7 @@ static void svc_export_request(struct cache_detail *cd, | |||
349 | char *pth; | 345 | char *pth; |
350 | 346 | ||
351 | qword_add(bpp, blen, exp->ex_client->name); | 347 | qword_add(bpp, blen, exp->ex_client->name); |
352 | pth = d_path(exp->ex_dentry, exp->ex_mnt, *bpp, *blen); | 348 | pth = d_path(&exp->ex_path, *bpp, *blen); |
353 | if (IS_ERR(pth)) { | 349 | if (IS_ERR(pth)) { |
354 | /* is this correct? */ | 350 | /* is this correct? */ |
355 | (*bpp)[0] = '\n'; | 351 | (*bpp)[0] = '\n'; |
@@ -507,8 +503,8 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen) | |||
507 | struct svc_export exp, *expp; | 503 | struct svc_export exp, *expp; |
508 | int an_int; | 504 | int an_int; |
509 | 505 | ||
510 | nd.dentry = NULL; | 506 | nd.path.dentry = NULL; |
511 | exp.ex_path = NULL; | 507 | exp.ex_pathname = NULL; |
512 | 508 | ||
513 | /* fs locations */ | 509 | /* fs locations */ |
514 | exp.ex_fslocs.locations = NULL; | 510 | exp.ex_fslocs.locations = NULL; |
@@ -547,11 +543,11 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen) | |||
547 | 543 | ||
548 | exp.h.flags = 0; | 544 | exp.h.flags = 0; |
549 | exp.ex_client = dom; | 545 | exp.ex_client = dom; |
550 | exp.ex_mnt = nd.mnt; | 546 | exp.ex_path.mnt = nd.path.mnt; |
551 | exp.ex_dentry = nd.dentry; | 547 | exp.ex_path.dentry = nd.path.dentry; |
552 | exp.ex_path = kstrdup(buf, GFP_KERNEL); | 548 | exp.ex_pathname = kstrdup(buf, GFP_KERNEL); |
553 | err = -ENOMEM; | 549 | err = -ENOMEM; |
554 | if (!exp.ex_path) | 550 | if (!exp.ex_pathname) |
555 | goto out; | 551 | goto out; |
556 | 552 | ||
557 | /* expiry */ | 553 | /* expiry */ |
@@ -610,7 +606,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen) | |||
610 | goto out; | 606 | goto out; |
611 | } | 607 | } |
612 | 608 | ||
613 | err = check_export(nd.dentry->d_inode, exp.ex_flags, | 609 | err = check_export(nd.path.dentry->d_inode, exp.ex_flags, |
614 | exp.ex_uuid); | 610 | exp.ex_uuid); |
615 | if (err) goto out; | 611 | if (err) goto out; |
616 | } | 612 | } |
@@ -628,9 +624,9 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen) | |||
628 | out: | 624 | out: |
629 | nfsd4_fslocs_free(&exp.ex_fslocs); | 625 | nfsd4_fslocs_free(&exp.ex_fslocs); |
630 | kfree(exp.ex_uuid); | 626 | kfree(exp.ex_uuid); |
631 | kfree(exp.ex_path); | 627 | kfree(exp.ex_pathname); |
632 | if (nd.dentry) | 628 | if (nd.path.dentry) |
633 | path_release(&nd); | 629 | path_put(&nd.path); |
634 | out_no_path: | 630 | out_no_path: |
635 | if (dom) | 631 | if (dom) |
636 | auth_domain_put(dom); | 632 | auth_domain_put(dom); |
@@ -653,7 +649,7 @@ static int svc_export_show(struct seq_file *m, | |||
653 | return 0; | 649 | return 0; |
654 | } | 650 | } |
655 | exp = container_of(h, struct svc_export, h); | 651 | exp = container_of(h, struct svc_export, h); |
656 | seq_path(m, exp->ex_mnt, exp->ex_dentry, " \t\n\\"); | 652 | seq_path(m, &exp->ex_path, " \t\n\\"); |
657 | seq_putc(m, '\t'); | 653 | seq_putc(m, '\t'); |
658 | seq_escape(m, exp->ex_client->name, " \t\n\\"); | 654 | seq_escape(m, exp->ex_client->name, " \t\n\\"); |
659 | seq_putc(m, '('); | 655 | seq_putc(m, '('); |
@@ -680,8 +676,8 @@ static int svc_export_match(struct cache_head *a, struct cache_head *b) | |||
680 | struct svc_export *orig = container_of(a, struct svc_export, h); | 676 | struct svc_export *orig = container_of(a, struct svc_export, h); |
681 | struct svc_export *new = container_of(b, struct svc_export, h); | 677 | struct svc_export *new = container_of(b, struct svc_export, h); |
682 | return orig->ex_client == new->ex_client && | 678 | return orig->ex_client == new->ex_client && |
683 | orig->ex_dentry == new->ex_dentry && | 679 | orig->ex_path.dentry == new->ex_path.dentry && |
684 | orig->ex_mnt == new->ex_mnt; | 680 | orig->ex_path.mnt == new->ex_path.mnt; |
685 | } | 681 | } |
686 | 682 | ||
687 | static void svc_export_init(struct cache_head *cnew, struct cache_head *citem) | 683 | static void svc_export_init(struct cache_head *cnew, struct cache_head *citem) |
@@ -691,9 +687,9 @@ static void svc_export_init(struct cache_head *cnew, struct cache_head *citem) | |||
691 | 687 | ||
692 | kref_get(&item->ex_client->ref); | 688 | kref_get(&item->ex_client->ref); |
693 | new->ex_client = item->ex_client; | 689 | new->ex_client = item->ex_client; |
694 | new->ex_dentry = dget(item->ex_dentry); | 690 | new->ex_path.dentry = dget(item->ex_path.dentry); |
695 | new->ex_mnt = mntget(item->ex_mnt); | 691 | new->ex_path.mnt = mntget(item->ex_path.mnt); |
696 | new->ex_path = NULL; | 692 | new->ex_pathname = NULL; |
697 | new->ex_fslocs.locations = NULL; | 693 | new->ex_fslocs.locations = NULL; |
698 | new->ex_fslocs.locations_count = 0; | 694 | new->ex_fslocs.locations_count = 0; |
699 | new->ex_fslocs.migrated = 0; | 695 | new->ex_fslocs.migrated = 0; |
@@ -711,8 +707,8 @@ static void export_update(struct cache_head *cnew, struct cache_head *citem) | |||
711 | new->ex_fsid = item->ex_fsid; | 707 | new->ex_fsid = item->ex_fsid; |
712 | new->ex_uuid = item->ex_uuid; | 708 | new->ex_uuid = item->ex_uuid; |
713 | item->ex_uuid = NULL; | 709 | item->ex_uuid = NULL; |
714 | new->ex_path = item->ex_path; | 710 | new->ex_pathname = item->ex_pathname; |
715 | item->ex_path = NULL; | 711 | item->ex_pathname = NULL; |
716 | new->ex_fslocs.locations = item->ex_fslocs.locations; | 712 | new->ex_fslocs.locations = item->ex_fslocs.locations; |
717 | item->ex_fslocs.locations = NULL; | 713 | item->ex_fslocs.locations = NULL; |
718 | new->ex_fslocs.locations_count = item->ex_fslocs.locations_count; | 714 | new->ex_fslocs.locations_count = item->ex_fslocs.locations_count; |
@@ -755,8 +751,8 @@ svc_export_lookup(struct svc_export *exp) | |||
755 | struct cache_head *ch; | 751 | struct cache_head *ch; |
756 | int hash; | 752 | int hash; |
757 | hash = hash_ptr(exp->ex_client, EXPORT_HASHBITS); | 753 | hash = hash_ptr(exp->ex_client, EXPORT_HASHBITS); |
758 | hash ^= hash_ptr(exp->ex_dentry, EXPORT_HASHBITS); | 754 | hash ^= hash_ptr(exp->ex_path.dentry, EXPORT_HASHBITS); |
759 | hash ^= hash_ptr(exp->ex_mnt, EXPORT_HASHBITS); | 755 | hash ^= hash_ptr(exp->ex_path.mnt, EXPORT_HASHBITS); |
760 | 756 | ||
761 | ch = sunrpc_cache_lookup(&svc_export_cache, &exp->h, | 757 | ch = sunrpc_cache_lookup(&svc_export_cache, &exp->h, |
762 | hash); | 758 | hash); |
@@ -772,8 +768,8 @@ svc_export_update(struct svc_export *new, struct svc_export *old) | |||
772 | struct cache_head *ch; | 768 | struct cache_head *ch; |
773 | int hash; | 769 | int hash; |
774 | hash = hash_ptr(old->ex_client, EXPORT_HASHBITS); | 770 | hash = hash_ptr(old->ex_client, EXPORT_HASHBITS); |
775 | hash ^= hash_ptr(old->ex_dentry, EXPORT_HASHBITS); | 771 | hash ^= hash_ptr(old->ex_path.dentry, EXPORT_HASHBITS); |
776 | hash ^= hash_ptr(old->ex_mnt, EXPORT_HASHBITS); | 772 | hash ^= hash_ptr(old->ex_path.mnt, EXPORT_HASHBITS); |
777 | 773 | ||
778 | ch = sunrpc_cache_update(&svc_export_cache, &new->h, | 774 | ch = sunrpc_cache_update(&svc_export_cache, &new->h, |
779 | &old->h, | 775 | &old->h, |
@@ -815,8 +811,7 @@ static int exp_set_key(svc_client *clp, int fsid_type, u32 *fsidv, | |||
815 | key.ek_client = clp; | 811 | key.ek_client = clp; |
816 | key.ek_fsidtype = fsid_type; | 812 | key.ek_fsidtype = fsid_type; |
817 | memcpy(key.ek_fsid, fsidv, key_len(fsid_type)); | 813 | memcpy(key.ek_fsid, fsidv, key_len(fsid_type)); |
818 | key.ek_mnt = exp->ex_mnt; | 814 | key.ek_path = exp->ex_path; |
819 | key.ek_dentry = exp->ex_dentry; | ||
820 | key.h.expiry_time = NEVER; | 815 | key.h.expiry_time = NEVER; |
821 | key.h.flags = 0; | 816 | key.h.flags = 0; |
822 | 817 | ||
@@ -865,13 +860,13 @@ static svc_export *exp_get_by_name(svc_client *clp, struct vfsmount *mnt, | |||
865 | { | 860 | { |
866 | struct svc_export *exp, key; | 861 | struct svc_export *exp, key; |
867 | int err; | 862 | int err; |
868 | 863 | ||
869 | if (!clp) | 864 | if (!clp) |
870 | return ERR_PTR(-ENOENT); | 865 | return ERR_PTR(-ENOENT); |
871 | 866 | ||
872 | key.ex_client = clp; | 867 | key.ex_client = clp; |
873 | key.ex_mnt = mnt; | 868 | key.ex_path.mnt = mnt; |
874 | key.ex_dentry = dentry; | 869 | key.ex_path.dentry = dentry; |
875 | 870 | ||
876 | exp = svc_export_lookup(&key); | 871 | exp = svc_export_lookup(&key); |
877 | if (exp == NULL) | 872 | if (exp == NULL) |
@@ -968,7 +963,7 @@ static int exp_fsid_hash(svc_client *clp, struct svc_export *exp) | |||
968 | static int exp_hash(struct auth_domain *clp, struct svc_export *exp) | 963 | static int exp_hash(struct auth_domain *clp, struct svc_export *exp) |
969 | { | 964 | { |
970 | u32 fsid[2]; | 965 | u32 fsid[2]; |
971 | struct inode *inode = exp->ex_dentry->d_inode; | 966 | struct inode *inode = exp->ex_path.dentry->d_inode; |
972 | dev_t dev = inode->i_sb->s_dev; | 967 | dev_t dev = inode->i_sb->s_dev; |
973 | 968 | ||
974 | if (old_valid_dev(dev)) { | 969 | if (old_valid_dev(dev)) { |
@@ -982,7 +977,7 @@ static int exp_hash(struct auth_domain *clp, struct svc_export *exp) | |||
982 | static void exp_unhash(struct svc_export *exp) | 977 | static void exp_unhash(struct svc_export *exp) |
983 | { | 978 | { |
984 | struct svc_expkey *ek; | 979 | struct svc_expkey *ek; |
985 | struct inode *inode = exp->ex_dentry->d_inode; | 980 | struct inode *inode = exp->ex_path.dentry->d_inode; |
986 | 981 | ||
987 | ek = exp_get_key(exp->ex_client, inode->i_sb->s_dev, inode->i_ino); | 982 | ek = exp_get_key(exp->ex_client, inode->i_sb->s_dev, inode->i_ino); |
988 | if (!IS_ERR(ek)) { | 983 | if (!IS_ERR(ek)) { |
@@ -1030,15 +1025,16 @@ exp_export(struct nfsctl_export *nxp) | |||
1030 | goto out_unlock; | 1025 | goto out_unlock; |
1031 | err = -EINVAL; | 1026 | err = -EINVAL; |
1032 | 1027 | ||
1033 | exp = exp_get_by_name(clp, nd.mnt, nd.dentry, NULL); | 1028 | exp = exp_get_by_name(clp, nd.path.mnt, nd.path.dentry, NULL); |
1034 | 1029 | ||
1035 | memset(&new, 0, sizeof(new)); | 1030 | memset(&new, 0, sizeof(new)); |
1036 | 1031 | ||
1037 | /* must make sure there won't be an ex_fsid clash */ | 1032 | /* must make sure there won't be an ex_fsid clash */ |
1038 | if ((nxp->ex_flags & NFSEXP_FSID) && | 1033 | if ((nxp->ex_flags & NFSEXP_FSID) && |
1039 | (!IS_ERR(fsid_key = exp_get_fsid_key(clp, nxp->ex_dev))) && | 1034 | (!IS_ERR(fsid_key = exp_get_fsid_key(clp, nxp->ex_dev))) && |
1040 | fsid_key->ek_mnt && | 1035 | fsid_key->ek_path.mnt && |
1041 | (fsid_key->ek_mnt != nd.mnt || fsid_key->ek_dentry != nd.dentry) ) | 1036 | (fsid_key->ek_path.mnt != nd.path.mnt || |
1037 | fsid_key->ek_path.dentry != nd.path.dentry)) | ||
1042 | goto finish; | 1038 | goto finish; |
1043 | 1039 | ||
1044 | if (!IS_ERR(exp)) { | 1040 | if (!IS_ERR(exp)) { |
@@ -1054,7 +1050,7 @@ exp_export(struct nfsctl_export *nxp) | |||
1054 | goto finish; | 1050 | goto finish; |
1055 | } | 1051 | } |
1056 | 1052 | ||
1057 | err = check_export(nd.dentry->d_inode, nxp->ex_flags, NULL); | 1053 | err = check_export(nd.path.dentry->d_inode, nxp->ex_flags, NULL); |
1058 | if (err) goto finish; | 1054 | if (err) goto finish; |
1059 | 1055 | ||
1060 | err = -ENOMEM; | 1056 | err = -ENOMEM; |
@@ -1063,12 +1059,11 @@ exp_export(struct nfsctl_export *nxp) | |||
1063 | 1059 | ||
1064 | new.h.expiry_time = NEVER; | 1060 | new.h.expiry_time = NEVER; |
1065 | new.h.flags = 0; | 1061 | new.h.flags = 0; |
1066 | new.ex_path = kstrdup(nxp->ex_path, GFP_KERNEL); | 1062 | new.ex_pathname = kstrdup(nxp->ex_path, GFP_KERNEL); |
1067 | if (!new.ex_path) | 1063 | if (!new.ex_pathname) |
1068 | goto finish; | 1064 | goto finish; |
1069 | new.ex_client = clp; | 1065 | new.ex_client = clp; |
1070 | new.ex_mnt = nd.mnt; | 1066 | new.ex_path = nd.path; |
1071 | new.ex_dentry = nd.dentry; | ||
1072 | new.ex_flags = nxp->ex_flags; | 1067 | new.ex_flags = nxp->ex_flags; |
1073 | new.ex_anon_uid = nxp->ex_anon_uid; | 1068 | new.ex_anon_uid = nxp->ex_anon_uid; |
1074 | new.ex_anon_gid = nxp->ex_anon_gid; | 1069 | new.ex_anon_gid = nxp->ex_anon_gid; |
@@ -1089,15 +1084,14 @@ exp_export(struct nfsctl_export *nxp) | |||
1089 | } else | 1084 | } else |
1090 | err = 0; | 1085 | err = 0; |
1091 | finish: | 1086 | finish: |
1092 | if (new.ex_path) | 1087 | kfree(new.ex_pathname); |
1093 | kfree(new.ex_path); | ||
1094 | if (exp) | 1088 | if (exp) |
1095 | exp_put(exp); | 1089 | exp_put(exp); |
1096 | if (fsid_key && !IS_ERR(fsid_key)) | 1090 | if (fsid_key && !IS_ERR(fsid_key)) |
1097 | cache_put(&fsid_key->h, &svc_expkey_cache); | 1091 | cache_put(&fsid_key->h, &svc_expkey_cache); |
1098 | if (clp) | 1092 | if (clp) |
1099 | auth_domain_put(clp); | 1093 | auth_domain_put(clp); |
1100 | path_release(&nd); | 1094 | path_put(&nd.path); |
1101 | out_unlock: | 1095 | out_unlock: |
1102 | exp_writeunlock(); | 1096 | exp_writeunlock(); |
1103 | out: | 1097 | out: |
@@ -1148,8 +1142,8 @@ exp_unexport(struct nfsctl_export *nxp) | |||
1148 | goto out_domain; | 1142 | goto out_domain; |
1149 | 1143 | ||
1150 | err = -EINVAL; | 1144 | err = -EINVAL; |
1151 | exp = exp_get_by_name(dom, nd.mnt, nd.dentry, NULL); | 1145 | exp = exp_get_by_name(dom, nd.path.mnt, nd.path.dentry, NULL); |
1152 | path_release(&nd); | 1146 | path_put(&nd.path); |
1153 | if (IS_ERR(exp)) | 1147 | if (IS_ERR(exp)) |
1154 | goto out_domain; | 1148 | goto out_domain; |
1155 | 1149 | ||
@@ -1185,12 +1179,12 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize) | |||
1185 | printk("nfsd: exp_rootfh path not found %s", path); | 1179 | printk("nfsd: exp_rootfh path not found %s", path); |
1186 | return err; | 1180 | return err; |
1187 | } | 1181 | } |
1188 | inode = nd.dentry->d_inode; | 1182 | inode = nd.path.dentry->d_inode; |
1189 | 1183 | ||
1190 | dprintk("nfsd: exp_rootfh(%s [%p] %s:%s/%ld)\n", | 1184 | dprintk("nfsd: exp_rootfh(%s [%p] %s:%s/%ld)\n", |
1191 | path, nd.dentry, clp->name, | 1185 | path, nd.path.dentry, clp->name, |
1192 | inode->i_sb->s_id, inode->i_ino); | 1186 | inode->i_sb->s_id, inode->i_ino); |
1193 | exp = exp_parent(clp, nd.mnt, nd.dentry, NULL); | 1187 | exp = exp_parent(clp, nd.path.mnt, nd.path.dentry, NULL); |
1194 | if (IS_ERR(exp)) { | 1188 | if (IS_ERR(exp)) { |
1195 | err = PTR_ERR(exp); | 1189 | err = PTR_ERR(exp); |
1196 | goto out; | 1190 | goto out; |
@@ -1200,7 +1194,7 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize) | |||
1200 | * fh must be initialized before calling fh_compose | 1194 | * fh must be initialized before calling fh_compose |
1201 | */ | 1195 | */ |
1202 | fh_init(&fh, maxsize); | 1196 | fh_init(&fh, maxsize); |
1203 | if (fh_compose(&fh, exp, nd.dentry, NULL)) | 1197 | if (fh_compose(&fh, exp, nd.path.dentry, NULL)) |
1204 | err = -EINVAL; | 1198 | err = -EINVAL; |
1205 | else | 1199 | else |
1206 | err = 0; | 1200 | err = 0; |
@@ -1208,7 +1202,7 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize) | |||
1208 | fh_put(&fh); | 1202 | fh_put(&fh); |
1209 | exp_put(exp); | 1203 | exp_put(exp); |
1210 | out: | 1204 | out: |
1211 | path_release(&nd); | 1205 | path_put(&nd.path); |
1212 | return err; | 1206 | return err; |
1213 | } | 1207 | } |
1214 | 1208 | ||
@@ -1218,13 +1212,13 @@ static struct svc_export *exp_find(struct auth_domain *clp, int fsid_type, | |||
1218 | struct svc_export *exp; | 1212 | struct svc_export *exp; |
1219 | struct svc_expkey *ek = exp_find_key(clp, fsid_type, fsidv, reqp); | 1213 | struct svc_expkey *ek = exp_find_key(clp, fsid_type, fsidv, reqp); |
1220 | if (IS_ERR(ek)) | 1214 | if (IS_ERR(ek)) |
1221 | return ERR_PTR(PTR_ERR(ek)); | 1215 | return ERR_CAST(ek); |
1222 | 1216 | ||
1223 | exp = exp_get_by_name(clp, ek->ek_mnt, ek->ek_dentry, reqp); | 1217 | exp = exp_get_by_name(clp, ek->ek_path.mnt, ek->ek_path.dentry, reqp); |
1224 | cache_put(&ek->h, &svc_expkey_cache); | 1218 | cache_put(&ek->h, &svc_expkey_cache); |
1225 | 1219 | ||
1226 | if (IS_ERR(exp)) | 1220 | if (IS_ERR(exp)) |
1227 | return ERR_PTR(PTR_ERR(exp)); | 1221 | return ERR_CAST(exp); |
1228 | return exp; | 1222 | return exp; |
1229 | } | 1223 | } |
1230 | 1224 | ||
@@ -1359,7 +1353,7 @@ exp_pseudoroot(struct svc_rqst *rqstp, struct svc_fh *fhp) | |||
1359 | exp = rqst_exp_find(rqstp, FSID_NUM, fsidv); | 1353 | exp = rqst_exp_find(rqstp, FSID_NUM, fsidv); |
1360 | if (IS_ERR(exp)) | 1354 | if (IS_ERR(exp)) |
1361 | return nfserrno(PTR_ERR(exp)); | 1355 | return nfserrno(PTR_ERR(exp)); |
1362 | rv = fh_compose(fhp, exp, exp->ex_dentry, NULL); | 1356 | rv = fh_compose(fhp, exp, exp->ex_path.dentry, NULL); |
1363 | if (rv) | 1357 | if (rv) |
1364 | goto out; | 1358 | goto out; |
1365 | rv = check_nfsd_access(exp, rqstp); | 1359 | rv = check_nfsd_access(exp, rqstp); |
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c index eac82830bfd7..c721a1e6e9dd 100644 --- a/fs/nfsd/nfs3proc.c +++ b/fs/nfsd/nfs3proc.c | |||
@@ -67,7 +67,7 @@ nfsd3_proc_getattr(struct svc_rqst *rqstp, struct nfsd_fhandle *argp, | |||
67 | if (nfserr) | 67 | if (nfserr) |
68 | RETURN_STATUS(nfserr); | 68 | RETURN_STATUS(nfserr); |
69 | 69 | ||
70 | err = vfs_getattr(resp->fh.fh_export->ex_mnt, | 70 | err = vfs_getattr(resp->fh.fh_export->ex_path.mnt, |
71 | resp->fh.fh_dentry, &resp->stat); | 71 | resp->fh.fh_dentry, &resp->stat); |
72 | nfserr = nfserrno(err); | 72 | nfserr = nfserrno(err); |
73 | 73 | ||
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c index d7647f70e02b..17d0dd997204 100644 --- a/fs/nfsd/nfs3xdr.c +++ b/fs/nfsd/nfs3xdr.c | |||
@@ -218,7 +218,7 @@ encode_post_op_attr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp) | |||
218 | int err; | 218 | int err; |
219 | struct kstat stat; | 219 | struct kstat stat; |
220 | 220 | ||
221 | err = vfs_getattr(fhp->fh_export->ex_mnt, dentry, &stat); | 221 | err = vfs_getattr(fhp->fh_export->ex_path.mnt, dentry, &stat); |
222 | if (!err) { | 222 | if (!err) { |
223 | *p++ = xdr_one; /* attributes follow */ | 223 | *p++ = xdr_one; /* attributes follow */ |
224 | lease_get_mtime(dentry->d_inode, &stat.mtime); | 224 | lease_get_mtime(dentry->d_inode, &stat.mtime); |
@@ -270,7 +270,7 @@ void fill_post_wcc(struct svc_fh *fhp) | |||
270 | if (fhp->fh_post_saved) | 270 | if (fhp->fh_post_saved) |
271 | printk("nfsd: inode locked twice during operation.\n"); | 271 | printk("nfsd: inode locked twice during operation.\n"); |
272 | 272 | ||
273 | err = vfs_getattr(fhp->fh_export->ex_mnt, fhp->fh_dentry, | 273 | err = vfs_getattr(fhp->fh_export->ex_path.mnt, fhp->fh_dentry, |
274 | &fhp->fh_post_attr); | 274 | &fhp->fh_post_attr); |
275 | if (err) | 275 | if (err) |
276 | fhp->fh_post_saved = 0; | 276 | fhp->fh_post_saved = 0; |
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index 1602cd00dd45..1ff90625860f 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c | |||
@@ -120,9 +120,9 @@ out_no_tfm: | |||
120 | static void | 120 | static void |
121 | nfsd4_sync_rec_dir(void) | 121 | nfsd4_sync_rec_dir(void) |
122 | { | 122 | { |
123 | mutex_lock(&rec_dir.dentry->d_inode->i_mutex); | 123 | mutex_lock(&rec_dir.path.dentry->d_inode->i_mutex); |
124 | nfsd_sync_dir(rec_dir.dentry); | 124 | nfsd_sync_dir(rec_dir.path.dentry); |
125 | mutex_unlock(&rec_dir.dentry->d_inode->i_mutex); | 125 | mutex_unlock(&rec_dir.path.dentry->d_inode->i_mutex); |
126 | } | 126 | } |
127 | 127 | ||
128 | int | 128 | int |
@@ -142,9 +142,9 @@ nfsd4_create_clid_dir(struct nfs4_client *clp) | |||
142 | nfs4_save_user(&uid, &gid); | 142 | nfs4_save_user(&uid, &gid); |
143 | 143 | ||
144 | /* lock the parent */ | 144 | /* lock the parent */ |
145 | mutex_lock(&rec_dir.dentry->d_inode->i_mutex); | 145 | mutex_lock(&rec_dir.path.dentry->d_inode->i_mutex); |
146 | 146 | ||
147 | dentry = lookup_one_len(dname, rec_dir.dentry, HEXDIR_LEN-1); | 147 | dentry = lookup_one_len(dname, rec_dir.path.dentry, HEXDIR_LEN-1); |
148 | if (IS_ERR(dentry)) { | 148 | if (IS_ERR(dentry)) { |
149 | status = PTR_ERR(dentry); | 149 | status = PTR_ERR(dentry); |
150 | goto out_unlock; | 150 | goto out_unlock; |
@@ -154,11 +154,11 @@ nfsd4_create_clid_dir(struct nfs4_client *clp) | |||
154 | dprintk("NFSD: nfsd4_create_clid_dir: DIRECTORY EXISTS\n"); | 154 | dprintk("NFSD: nfsd4_create_clid_dir: DIRECTORY EXISTS\n"); |
155 | goto out_put; | 155 | goto out_put; |
156 | } | 156 | } |
157 | status = vfs_mkdir(rec_dir.dentry->d_inode, dentry, S_IRWXU); | 157 | status = vfs_mkdir(rec_dir.path.dentry->d_inode, dentry, S_IRWXU); |
158 | out_put: | 158 | out_put: |
159 | dput(dentry); | 159 | dput(dentry); |
160 | out_unlock: | 160 | out_unlock: |
161 | mutex_unlock(&rec_dir.dentry->d_inode->i_mutex); | 161 | mutex_unlock(&rec_dir.path.dentry->d_inode->i_mutex); |
162 | if (status == 0) { | 162 | if (status == 0) { |
163 | clp->cl_firststate = 1; | 163 | clp->cl_firststate = 1; |
164 | nfsd4_sync_rec_dir(); | 164 | nfsd4_sync_rec_dir(); |
@@ -221,7 +221,7 @@ nfsd4_list_rec_dir(struct dentry *dir, recdir_func *f) | |||
221 | 221 | ||
222 | nfs4_save_user(&uid, &gid); | 222 | nfs4_save_user(&uid, &gid); |
223 | 223 | ||
224 | filp = dentry_open(dget(dir), mntget(rec_dir.mnt), O_RDONLY); | 224 | filp = dentry_open(dget(dir), mntget(rec_dir.path.mnt), O_RDONLY); |
225 | status = PTR_ERR(filp); | 225 | status = PTR_ERR(filp); |
226 | if (IS_ERR(filp)) | 226 | if (IS_ERR(filp)) |
227 | goto out; | 227 | goto out; |
@@ -286,9 +286,9 @@ nfsd4_unlink_clid_dir(char *name, int namlen) | |||
286 | 286 | ||
287 | dprintk("NFSD: nfsd4_unlink_clid_dir. name %.*s\n", namlen, name); | 287 | dprintk("NFSD: nfsd4_unlink_clid_dir. name %.*s\n", namlen, name); |
288 | 288 | ||
289 | mutex_lock(&rec_dir.dentry->d_inode->i_mutex); | 289 | mutex_lock(&rec_dir.path.dentry->d_inode->i_mutex); |
290 | dentry = lookup_one_len(name, rec_dir.dentry, namlen); | 290 | dentry = lookup_one_len(name, rec_dir.path.dentry, namlen); |
291 | mutex_unlock(&rec_dir.dentry->d_inode->i_mutex); | 291 | mutex_unlock(&rec_dir.path.dentry->d_inode->i_mutex); |
292 | if (IS_ERR(dentry)) { | 292 | if (IS_ERR(dentry)) { |
293 | status = PTR_ERR(dentry); | 293 | status = PTR_ERR(dentry); |
294 | return status; | 294 | return status; |
@@ -297,7 +297,7 @@ nfsd4_unlink_clid_dir(char *name, int namlen) | |||
297 | if (!dentry->d_inode) | 297 | if (!dentry->d_inode) |
298 | goto out; | 298 | goto out; |
299 | 299 | ||
300 | status = nfsd4_clear_clid_dir(rec_dir.dentry, dentry); | 300 | status = nfsd4_clear_clid_dir(rec_dir.path.dentry, dentry); |
301 | out: | 301 | out: |
302 | dput(dentry); | 302 | dput(dentry); |
303 | return status; | 303 | return status; |
@@ -347,12 +347,12 @@ nfsd4_recdir_purge_old(void) { | |||
347 | 347 | ||
348 | if (!rec_dir_init) | 348 | if (!rec_dir_init) |
349 | return; | 349 | return; |
350 | status = nfsd4_list_rec_dir(rec_dir.dentry, purge_old); | 350 | status = nfsd4_list_rec_dir(rec_dir.path.dentry, purge_old); |
351 | if (status == 0) | 351 | if (status == 0) |
352 | nfsd4_sync_rec_dir(); | 352 | nfsd4_sync_rec_dir(); |
353 | if (status) | 353 | if (status) |
354 | printk("nfsd4: failed to purge old clients from recovery" | 354 | printk("nfsd4: failed to purge old clients from recovery" |
355 | " directory %s\n", rec_dir.dentry->d_name.name); | 355 | " directory %s\n", rec_dir.path.dentry->d_name.name); |
356 | return; | 356 | return; |
357 | } | 357 | } |
358 | 358 | ||
@@ -373,10 +373,10 @@ int | |||
373 | nfsd4_recdir_load(void) { | 373 | nfsd4_recdir_load(void) { |
374 | int status; | 374 | int status; |
375 | 375 | ||
376 | status = nfsd4_list_rec_dir(rec_dir.dentry, load_recdir); | 376 | status = nfsd4_list_rec_dir(rec_dir.path.dentry, load_recdir); |
377 | if (status) | 377 | if (status) |
378 | printk("nfsd4: failed loading clients from recovery" | 378 | printk("nfsd4: failed loading clients from recovery" |
379 | " directory %s\n", rec_dir.dentry->d_name.name); | 379 | " directory %s\n", rec_dir.path.dentry->d_name.name); |
380 | return status; | 380 | return status; |
381 | } | 381 | } |
382 | 382 | ||
@@ -415,5 +415,5 @@ nfsd4_shutdown_recdir(void) | |||
415 | if (!rec_dir_init) | 415 | if (!rec_dir_init) |
416 | return; | 416 | return; |
417 | rec_dir_init = 0; | 417 | rec_dir_init = 0; |
418 | path_release(&rec_dir); | 418 | path_put(&rec_dir.path); |
419 | } | 419 | } |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index f6744bc03dae..bcb97d8e8b8b 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -3261,11 +3261,11 @@ nfs4_reset_recoverydir(char *recdir) | |||
3261 | if (status) | 3261 | if (status) |
3262 | return status; | 3262 | return status; |
3263 | status = -ENOTDIR; | 3263 | status = -ENOTDIR; |
3264 | if (S_ISDIR(nd.dentry->d_inode->i_mode)) { | 3264 | if (S_ISDIR(nd.path.dentry->d_inode->i_mode)) { |
3265 | nfs4_set_recdir(recdir); | 3265 | nfs4_set_recdir(recdir); |
3266 | status = 0; | 3266 | status = 0; |
3267 | } | 3267 | } |
3268 | path_release(&nd); | 3268 | path_put(&nd.path); |
3269 | return status; | 3269 | return status; |
3270 | } | 3270 | } |
3271 | 3271 | ||
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index b0592e7c378d..0e6a179eccaf 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c | |||
@@ -1330,9 +1330,9 @@ static char *nfsd4_path(struct svc_rqst *rqstp, struct svc_export *exp, __be32 * | |||
1330 | *stat = exp_pseudoroot(rqstp, &tmp_fh); | 1330 | *stat = exp_pseudoroot(rqstp, &tmp_fh); |
1331 | if (*stat) | 1331 | if (*stat) |
1332 | return NULL; | 1332 | return NULL; |
1333 | rootpath = tmp_fh.fh_export->ex_path; | 1333 | rootpath = tmp_fh.fh_export->ex_pathname; |
1334 | 1334 | ||
1335 | path = exp->ex_path; | 1335 | path = exp->ex_pathname; |
1336 | 1336 | ||
1337 | if (strncmp(path, rootpath, strlen(rootpath))) { | 1337 | if (strncmp(path, rootpath, strlen(rootpath))) { |
1338 | dprintk("nfsd: fs_locations failed;" | 1338 | dprintk("nfsd: fs_locations failed;" |
@@ -1481,7 +1481,7 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp, | |||
1481 | goto out; | 1481 | goto out; |
1482 | } | 1482 | } |
1483 | 1483 | ||
1484 | err = vfs_getattr(exp->ex_mnt, dentry, &stat); | 1484 | err = vfs_getattr(exp->ex_path.mnt, dentry, &stat); |
1485 | if (err) | 1485 | if (err) |
1486 | goto out_nfserr; | 1486 | goto out_nfserr; |
1487 | if ((bmval0 & (FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL | | 1487 | if ((bmval0 & (FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL | |
@@ -1838,9 +1838,9 @@ out_acl: | |||
1838 | * and this is the root of a cross-mounted filesystem. | 1838 | * and this is the root of a cross-mounted filesystem. |
1839 | */ | 1839 | */ |
1840 | if (ignore_crossmnt == 0 && | 1840 | if (ignore_crossmnt == 0 && |
1841 | exp->ex_mnt->mnt_root->d_inode == dentry->d_inode) { | 1841 | exp->ex_path.mnt->mnt_root->d_inode == dentry->d_inode) { |
1842 | err = vfs_getattr(exp->ex_mnt->mnt_parent, | 1842 | err = vfs_getattr(exp->ex_path.mnt->mnt_parent, |
1843 | exp->ex_mnt->mnt_mountpoint, &stat); | 1843 | exp->ex_path.mnt->mnt_mountpoint, &stat); |
1844 | if (err) | 1844 | if (err) |
1845 | goto out_nfserr; | 1845 | goto out_nfserr; |
1846 | } | 1846 | } |
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c index 8fbd2dc08a92..0130b345234d 100644 --- a/fs/nfsd/nfsfh.c +++ b/fs/nfsd/nfsfh.c | |||
@@ -47,7 +47,7 @@ static int nfsd_acceptable(void *expv, struct dentry *dentry) | |||
47 | return 1; | 47 | return 1; |
48 | 48 | ||
49 | tdentry = dget(dentry); | 49 | tdentry = dget(dentry); |
50 | while (tdentry != exp->ex_dentry && ! IS_ROOT(tdentry)) { | 50 | while (tdentry != exp->ex_path.dentry && !IS_ROOT(tdentry)) { |
51 | /* make sure parents give x permission to user */ | 51 | /* make sure parents give x permission to user */ |
52 | int err; | 52 | int err; |
53 | parent = dget_parent(tdentry); | 53 | parent = dget_parent(tdentry); |
@@ -59,9 +59,9 @@ static int nfsd_acceptable(void *expv, struct dentry *dentry) | |||
59 | dput(tdentry); | 59 | dput(tdentry); |
60 | tdentry = parent; | 60 | tdentry = parent; |
61 | } | 61 | } |
62 | if (tdentry != exp->ex_dentry) | 62 | if (tdentry != exp->ex_path.dentry) |
63 | dprintk("nfsd_acceptable failed at %p %s\n", tdentry, tdentry->d_name.name); | 63 | dprintk("nfsd_acceptable failed at %p %s\n", tdentry, tdentry->d_name.name); |
64 | rv = (tdentry == exp->ex_dentry); | 64 | rv = (tdentry == exp->ex_path.dentry); |
65 | dput(tdentry); | 65 | dput(tdentry); |
66 | return rv; | 66 | return rv; |
67 | } | 67 | } |
@@ -209,9 +209,9 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access) | |||
209 | fileid_type = fh->fh_fileid_type; | 209 | fileid_type = fh->fh_fileid_type; |
210 | 210 | ||
211 | if (fileid_type == FILEID_ROOT) | 211 | if (fileid_type == FILEID_ROOT) |
212 | dentry = dget(exp->ex_dentry); | 212 | dentry = dget(exp->ex_path.dentry); |
213 | else { | 213 | else { |
214 | dentry = exportfs_decode_fh(exp->ex_mnt, fid, | 214 | dentry = exportfs_decode_fh(exp->ex_path.mnt, fid, |
215 | data_left, fileid_type, | 215 | data_left, fileid_type, |
216 | nfsd_acceptable, exp); | 216 | nfsd_acceptable, exp); |
217 | } | 217 | } |
@@ -299,7 +299,7 @@ out: | |||
299 | static void _fh_update(struct svc_fh *fhp, struct svc_export *exp, | 299 | static void _fh_update(struct svc_fh *fhp, struct svc_export *exp, |
300 | struct dentry *dentry) | 300 | struct dentry *dentry) |
301 | { | 301 | { |
302 | if (dentry != exp->ex_dentry) { | 302 | if (dentry != exp->ex_path.dentry) { |
303 | struct fid *fid = (struct fid *) | 303 | struct fid *fid = (struct fid *) |
304 | (fhp->fh_handle.fh_auth + fhp->fh_handle.fh_size/4 - 1); | 304 | (fhp->fh_handle.fh_auth + fhp->fh_handle.fh_size/4 - 1); |
305 | int maxsize = (fhp->fh_maxsize - fhp->fh_handle.fh_size)/4; | 305 | int maxsize = (fhp->fh_maxsize - fhp->fh_handle.fh_size)/4; |
@@ -344,12 +344,12 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry, | |||
344 | struct inode * inode = dentry->d_inode; | 344 | struct inode * inode = dentry->d_inode; |
345 | struct dentry *parent = dentry->d_parent; | 345 | struct dentry *parent = dentry->d_parent; |
346 | __u32 *datap; | 346 | __u32 *datap; |
347 | dev_t ex_dev = exp->ex_dentry->d_inode->i_sb->s_dev; | 347 | dev_t ex_dev = exp->ex_path.dentry->d_inode->i_sb->s_dev; |
348 | int root_export = (exp->ex_dentry == exp->ex_dentry->d_sb->s_root); | 348 | int root_export = (exp->ex_path.dentry == exp->ex_path.dentry->d_sb->s_root); |
349 | 349 | ||
350 | dprintk("nfsd: fh_compose(exp %02x:%02x/%ld %s/%s, ino=%ld)\n", | 350 | dprintk("nfsd: fh_compose(exp %02x:%02x/%ld %s/%s, ino=%ld)\n", |
351 | MAJOR(ex_dev), MINOR(ex_dev), | 351 | MAJOR(ex_dev), MINOR(ex_dev), |
352 | (long) exp->ex_dentry->d_inode->i_ino, | 352 | (long) exp->ex_path.dentry->d_inode->i_ino, |
353 | parent->d_name.name, dentry->d_name.name, | 353 | parent->d_name.name, dentry->d_name.name, |
354 | (inode ? inode->i_ino : 0)); | 354 | (inode ? inode->i_ino : 0)); |
355 | 355 | ||
@@ -391,7 +391,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry, | |||
391 | /* FALL THROUGH */ | 391 | /* FALL THROUGH */ |
392 | case FSID_MAJOR_MINOR: | 392 | case FSID_MAJOR_MINOR: |
393 | case FSID_ENCODE_DEV: | 393 | case FSID_ENCODE_DEV: |
394 | if (!(exp->ex_dentry->d_inode->i_sb->s_type->fs_flags | 394 | if (!(exp->ex_path.dentry->d_inode->i_sb->s_type->fs_flags |
395 | & FS_REQUIRES_DEV)) | 395 | & FS_REQUIRES_DEV)) |
396 | goto retry; | 396 | goto retry; |
397 | break; | 397 | break; |
@@ -454,7 +454,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry, | |||
454 | fhp->fh_handle.ofh_dev = old_encode_dev(ex_dev); | 454 | fhp->fh_handle.ofh_dev = old_encode_dev(ex_dev); |
455 | fhp->fh_handle.ofh_xdev = fhp->fh_handle.ofh_dev; | 455 | fhp->fh_handle.ofh_xdev = fhp->fh_handle.ofh_dev; |
456 | fhp->fh_handle.ofh_xino = | 456 | fhp->fh_handle.ofh_xino = |
457 | ino_t_to_u32(exp->ex_dentry->d_inode->i_ino); | 457 | ino_t_to_u32(exp->ex_path.dentry->d_inode->i_ino); |
458 | fhp->fh_handle.ofh_dirino = ino_t_to_u32(parent_ino(dentry)); | 458 | fhp->fh_handle.ofh_dirino = ino_t_to_u32(parent_ino(dentry)); |
459 | if (inode) | 459 | if (inode) |
460 | _fh_update_old(dentry, exp, &fhp->fh_handle); | 460 | _fh_update_old(dentry, exp, &fhp->fh_handle); |
@@ -465,7 +465,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry, | |||
465 | datap = fhp->fh_handle.fh_auth+0; | 465 | datap = fhp->fh_handle.fh_auth+0; |
466 | fhp->fh_handle.fh_fsid_type = fsid_type; | 466 | fhp->fh_handle.fh_fsid_type = fsid_type; |
467 | mk_fsid(fsid_type, datap, ex_dev, | 467 | mk_fsid(fsid_type, datap, ex_dev, |
468 | exp->ex_dentry->d_inode->i_ino, | 468 | exp->ex_path.dentry->d_inode->i_ino, |
469 | exp->ex_fsid, exp->ex_uuid); | 469 | exp->ex_fsid, exp->ex_uuid); |
470 | 470 | ||
471 | len = key_len(fsid_type); | 471 | len = key_len(fsid_type); |
@@ -571,7 +571,7 @@ enum fsid_source fsid_source(struct svc_fh *fhp) | |||
571 | case FSID_DEV: | 571 | case FSID_DEV: |
572 | case FSID_ENCODE_DEV: | 572 | case FSID_ENCODE_DEV: |
573 | case FSID_MAJOR_MINOR: | 573 | case FSID_MAJOR_MINOR: |
574 | if (fhp->fh_export->ex_dentry->d_inode->i_sb->s_type->fs_flags | 574 | if (fhp->fh_export->ex_path.dentry->d_inode->i_sb->s_type->fs_flags |
575 | & FS_REQUIRES_DEV) | 575 | & FS_REQUIRES_DEV) |
576 | return FSIDSOURCE_DEV; | 576 | return FSIDSOURCE_DEV; |
577 | break; | 577 | break; |
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c index 977a71f64e19..6cfc96a12483 100644 --- a/fs/nfsd/nfsproc.c +++ b/fs/nfsd/nfsproc.c | |||
@@ -41,7 +41,7 @@ static __be32 | |||
41 | nfsd_return_attrs(__be32 err, struct nfsd_attrstat *resp) | 41 | nfsd_return_attrs(__be32 err, struct nfsd_attrstat *resp) |
42 | { | 42 | { |
43 | if (err) return err; | 43 | if (err) return err; |
44 | return nfserrno(vfs_getattr(resp->fh.fh_export->ex_mnt, | 44 | return nfserrno(vfs_getattr(resp->fh.fh_export->ex_path.mnt, |
45 | resp->fh.fh_dentry, | 45 | resp->fh.fh_dentry, |
46 | &resp->stat)); | 46 | &resp->stat)); |
47 | } | 47 | } |
@@ -49,7 +49,7 @@ static __be32 | |||
49 | nfsd_return_dirop(__be32 err, struct nfsd_diropres *resp) | 49 | nfsd_return_dirop(__be32 err, struct nfsd_diropres *resp) |
50 | { | 50 | { |
51 | if (err) return err; | 51 | if (err) return err; |
52 | return nfserrno(vfs_getattr(resp->fh.fh_export->ex_mnt, | 52 | return nfserrno(vfs_getattr(resp->fh.fh_export->ex_path.mnt, |
53 | resp->fh.fh_dentry, | 53 | resp->fh.fh_dentry, |
54 | &resp->stat)); | 54 | &resp->stat)); |
55 | } | 55 | } |
@@ -164,7 +164,7 @@ nfsd_proc_read(struct svc_rqst *rqstp, struct nfsd_readargs *argp, | |||
164 | &resp->count); | 164 | &resp->count); |
165 | 165 | ||
166 | if (nfserr) return nfserr; | 166 | if (nfserr) return nfserr; |
167 | return nfserrno(vfs_getattr(resp->fh.fh_export->ex_mnt, | 167 | return nfserrno(vfs_getattr(resp->fh.fh_export->ex_path.mnt, |
168 | resp->fh.fh_dentry, | 168 | resp->fh.fh_dentry, |
169 | &resp->stat)); | 169 | &resp->stat)); |
170 | } | 170 | } |
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c index 61ad61743d94..afd08e2c90a5 100644 --- a/fs/nfsd/nfsxdr.c +++ b/fs/nfsd/nfsxdr.c | |||
@@ -207,7 +207,7 @@ encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp, | |||
207 | __be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp) | 207 | __be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp) |
208 | { | 208 | { |
209 | struct kstat stat; | 209 | struct kstat stat; |
210 | vfs_getattr(fhp->fh_export->ex_mnt, fhp->fh_dentry, &stat); | 210 | vfs_getattr(fhp->fh_export->ex_path.mnt, fhp->fh_dentry, &stat); |
211 | return encode_fattr(rqstp, p, fhp, &stat); | 211 | return encode_fattr(rqstp, p, fhp, &stat); |
212 | } | 212 | } |
213 | 213 | ||
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index cc75e4fcd02b..46f59d5365a0 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c | |||
@@ -101,7 +101,7 @@ nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp, | |||
101 | { | 101 | { |
102 | struct svc_export *exp = *expp, *exp2 = NULL; | 102 | struct svc_export *exp = *expp, *exp2 = NULL; |
103 | struct dentry *dentry = *dpp; | 103 | struct dentry *dentry = *dpp; |
104 | struct vfsmount *mnt = mntget(exp->ex_mnt); | 104 | struct vfsmount *mnt = mntget(exp->ex_path.mnt); |
105 | struct dentry *mounts = dget(dentry); | 105 | struct dentry *mounts = dget(dentry); |
106 | int err = 0; | 106 | int err = 0; |
107 | 107 | ||
@@ -156,15 +156,15 @@ nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp, | |||
156 | if (isdotent(name, len)) { | 156 | if (isdotent(name, len)) { |
157 | if (len==1) | 157 | if (len==1) |
158 | dentry = dget(dparent); | 158 | dentry = dget(dparent); |
159 | else if (dparent != exp->ex_dentry) { | 159 | else if (dparent != exp->ex_path.dentry) |
160 | dentry = dget_parent(dparent); | 160 | dentry = dget_parent(dparent); |
161 | } else if (!EX_NOHIDE(exp)) | 161 | else if (!EX_NOHIDE(exp)) |
162 | dentry = dget(dparent); /* .. == . just like at / */ | 162 | dentry = dget(dparent); /* .. == . just like at / */ |
163 | else { | 163 | else { |
164 | /* checking mountpoint crossing is very different when stepping up */ | 164 | /* checking mountpoint crossing is very different when stepping up */ |
165 | struct svc_export *exp2 = NULL; | 165 | struct svc_export *exp2 = NULL; |
166 | struct dentry *dp; | 166 | struct dentry *dp; |
167 | struct vfsmount *mnt = mntget(exp->ex_mnt); | 167 | struct vfsmount *mnt = mntget(exp->ex_path.mnt); |
168 | dentry = dget(dparent); | 168 | dentry = dget(dparent); |
169 | while(dentry == mnt->mnt_root && follow_up(&mnt, &dentry)) | 169 | while(dentry == mnt->mnt_root && follow_up(&mnt, &dentry)) |
170 | ; | 170 | ; |
@@ -721,7 +721,8 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, | |||
721 | 721 | ||
722 | DQUOT_INIT(inode); | 722 | DQUOT_INIT(inode); |
723 | } | 723 | } |
724 | *filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_mnt), flags); | 724 | *filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_path.mnt), |
725 | flags); | ||
725 | if (IS_ERR(*filp)) | 726 | if (IS_ERR(*filp)) |
726 | host_err = PTR_ERR(*filp); | 727 | host_err = PTR_ERR(*filp); |
727 | out_nfserr: | 728 | out_nfserr: |
@@ -1462,7 +1463,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp) | |||
1462 | if (!inode->i_op || !inode->i_op->readlink) | 1463 | if (!inode->i_op || !inode->i_op->readlink) |
1463 | goto out; | 1464 | goto out; |
1464 | 1465 | ||
1465 | touch_atime(fhp->fh_export->ex_mnt, dentry); | 1466 | touch_atime(fhp->fh_export->ex_path.mnt, dentry); |
1466 | /* N.B. Why does this call need a get_fs()?? | 1467 | /* N.B. Why does this call need a get_fs()?? |
1467 | * Remove the set_fs and watch the fireworks:-) --okir | 1468 | * Remove the set_fs and watch the fireworks:-) --okir |
1468 | */ | 1469 | */ |
diff --git a/fs/ocfs2/cluster/endian.h b/fs/ocfs2/cluster/endian.h deleted file mode 100644 index 2df9082f4e35..000000000000 --- a/fs/ocfs2/cluster/endian.h +++ /dev/null | |||
@@ -1,30 +0,0 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * Copyright (C) 2005 Oracle. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public | ||
8 | * License as published by the Free Software Foundation; either | ||
9 | * version 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | * General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public | ||
17 | * License along with this program; if not, write to the | ||
18 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
19 | * Boston, MA 021110-1307, USA. | ||
20 | */ | ||
21 | |||
22 | #ifndef OCFS2_CLUSTER_ENDIAN_H | ||
23 | #define OCFS2_CLUSTER_ENDIAN_H | ||
24 | |||
25 | static inline void be32_add_cpu(__be32 *var, u32 val) | ||
26 | { | ||
27 | *var = cpu_to_be32(be32_to_cpu(*var) + val); | ||
28 | } | ||
29 | |||
30 | #endif /* OCFS2_CLUSTER_ENDIAN_H */ | ||
diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c index af2070da308b..709fba25bf7e 100644 --- a/fs/ocfs2/cluster/nodemanager.c +++ b/fs/ocfs2/cluster/nodemanager.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <linux/sysctl.h> | 24 | #include <linux/sysctl.h> |
25 | #include <linux/configfs.h> | 25 | #include <linux/configfs.h> |
26 | 26 | ||
27 | #include "endian.h" | ||
28 | #include "tcp.h" | 27 | #include "tcp.h" |
29 | #include "nodemanager.h" | 28 | #include "nodemanager.h" |
30 | #include "heartbeat.h" | 29 | #include "heartbeat.h" |
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h index b2e832aca567..d25b9af28500 100644 --- a/fs/ocfs2/cluster/tcp_internal.h +++ b/fs/ocfs2/cluster/tcp_internal.h | |||
@@ -38,6 +38,15 @@ | |||
38 | * locking semantics of the file system using the protocol. It should | 38 | * locking semantics of the file system using the protocol. It should |
39 | * be somewhere else, I'm sure, but right now it isn't. | 39 | * be somewhere else, I'm sure, but right now it isn't. |
40 | * | 40 | * |
41 | * With version 11, we separate out the filesystem locking portion. The | ||
42 | * filesystem now has a major.minor version it negotiates. Version 11 | ||
43 | * introduces this negotiation to the o2dlm protocol, and as such the | ||
44 | * version here in tcp_internal.h should not need to be bumped for | ||
45 | * filesystem locking changes. | ||
46 | * | ||
47 | * New in version 11 | ||
48 | * - Negotiation of filesystem locking in the dlm join. | ||
49 | * | ||
41 | * New in version 10: | 50 | * New in version 10: |
42 | * - Meta/data locks combined | 51 | * - Meta/data locks combined |
43 | * | 52 | * |
@@ -66,7 +75,7 @@ | |||
66 | * - full 64 bit i_size in the metadata lock lvbs | 75 | * - full 64 bit i_size in the metadata lock lvbs |
67 | * - introduction of "rw" lock and pushing meta/data locking down | 76 | * - introduction of "rw" lock and pushing meta/data locking down |
68 | */ | 77 | */ |
69 | #define O2NET_PROTOCOL_VERSION 10ULL | 78 | #define O2NET_PROTOCOL_VERSION 11ULL |
70 | struct o2net_handshake { | 79 | struct o2net_handshake { |
71 | __be64 protocol_version; | 80 | __be64 protocol_version; |
72 | __be64 connector_id; | 81 | __be64 connector_id; |
diff --git a/fs/ocfs2/dlm/dlmapi.h b/fs/ocfs2/dlm/dlmapi.h index cfd5cb65cab0..b5786a787fab 100644 --- a/fs/ocfs2/dlm/dlmapi.h +++ b/fs/ocfs2/dlm/dlmapi.h | |||
@@ -193,7 +193,12 @@ enum dlm_status dlmunlock(struct dlm_ctxt *dlm, | |||
193 | dlm_astunlockfunc_t *unlockast, | 193 | dlm_astunlockfunc_t *unlockast, |
194 | void *data); | 194 | void *data); |
195 | 195 | ||
196 | struct dlm_ctxt * dlm_register_domain(const char *domain, u32 key); | 196 | struct dlm_protocol_version { |
197 | u8 pv_major; | ||
198 | u8 pv_minor; | ||
199 | }; | ||
200 | struct dlm_ctxt * dlm_register_domain(const char *domain, u32 key, | ||
201 | struct dlm_protocol_version *fs_proto); | ||
197 | 202 | ||
198 | void dlm_unregister_domain(struct dlm_ctxt *dlm); | 203 | void dlm_unregister_domain(struct dlm_ctxt *dlm); |
199 | 204 | ||
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c index 2fd8bded38f3..644bee55d8ba 100644 --- a/fs/ocfs2/dlm/dlmast.c +++ b/fs/ocfs2/dlm/dlmast.c | |||
@@ -43,7 +43,6 @@ | |||
43 | #include "cluster/heartbeat.h" | 43 | #include "cluster/heartbeat.h" |
44 | #include "cluster/nodemanager.h" | 44 | #include "cluster/nodemanager.h" |
45 | #include "cluster/tcp.h" | 45 | #include "cluster/tcp.h" |
46 | #include "cluster/endian.h" | ||
47 | 46 | ||
48 | #include "dlmapi.h" | 47 | #include "dlmapi.h" |
49 | #include "dlmcommon.h" | 48 | #include "dlmcommon.h" |
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h index e90b92f9ece1..9843ee17ea27 100644 --- a/fs/ocfs2/dlm/dlmcommon.h +++ b/fs/ocfs2/dlm/dlmcommon.h | |||
@@ -142,6 +142,12 @@ struct dlm_ctxt | |||
142 | spinlock_t work_lock; | 142 | spinlock_t work_lock; |
143 | struct list_head dlm_domain_handlers; | 143 | struct list_head dlm_domain_handlers; |
144 | struct list_head dlm_eviction_callbacks; | 144 | struct list_head dlm_eviction_callbacks; |
145 | |||
146 | /* The filesystem specifies this at domain registration. We | ||
147 | * cache it here to know what to tell other nodes. */ | ||
148 | struct dlm_protocol_version fs_locking_proto; | ||
149 | /* This is the inter-dlm communication version */ | ||
150 | struct dlm_protocol_version dlm_locking_proto; | ||
145 | }; | 151 | }; |
146 | 152 | ||
147 | static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned i) | 153 | static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned i) |
@@ -589,10 +595,24 @@ struct dlm_proxy_ast | |||
589 | #define DLM_PROXY_AST_MAX_LEN (sizeof(struct dlm_proxy_ast)+DLM_LVB_LEN) | 595 | #define DLM_PROXY_AST_MAX_LEN (sizeof(struct dlm_proxy_ast)+DLM_LVB_LEN) |
590 | 596 | ||
591 | #define DLM_MOD_KEY (0x666c6172) | 597 | #define DLM_MOD_KEY (0x666c6172) |
592 | enum dlm_query_join_response { | 598 | enum dlm_query_join_response_code { |
593 | JOIN_DISALLOW = 0, | 599 | JOIN_DISALLOW = 0, |
594 | JOIN_OK, | 600 | JOIN_OK, |
595 | JOIN_OK_NO_MAP, | 601 | JOIN_OK_NO_MAP, |
602 | JOIN_PROTOCOL_MISMATCH, | ||
603 | }; | ||
604 | |||
605 | union dlm_query_join_response { | ||
606 | u32 intval; | ||
607 | struct { | ||
608 | u8 code; /* Response code. dlm_minor and fs_minor | ||
609 | are only valid if this is JOIN_OK */ | ||
610 | u8 dlm_minor; /* The minor version of the protocol the | ||
611 | dlm is speaking. */ | ||
612 | u8 fs_minor; /* The minor version of the protocol the | ||
613 | filesystem is speaking. */ | ||
614 | u8 reserved; | ||
615 | } packet; | ||
596 | }; | 616 | }; |
597 | 617 | ||
598 | struct dlm_lock_request | 618 | struct dlm_lock_request |
@@ -633,6 +653,8 @@ struct dlm_query_join_request | |||
633 | u8 node_idx; | 653 | u8 node_idx; |
634 | u8 pad1[2]; | 654 | u8 pad1[2]; |
635 | u8 name_len; | 655 | u8 name_len; |
656 | struct dlm_protocol_version dlm_proto; | ||
657 | struct dlm_protocol_version fs_proto; | ||
636 | u8 domain[O2NM_MAX_NAME_LEN]; | 658 | u8 domain[O2NM_MAX_NAME_LEN]; |
637 | u8 node_map[BITS_TO_BYTES(O2NM_MAX_NODES)]; | 659 | u8 node_map[BITS_TO_BYTES(O2NM_MAX_NODES)]; |
638 | }; | 660 | }; |
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 6954565b8ccb..638d2ebb892b 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c | |||
@@ -123,6 +123,17 @@ DEFINE_SPINLOCK(dlm_domain_lock); | |||
123 | LIST_HEAD(dlm_domains); | 123 | LIST_HEAD(dlm_domains); |
124 | static DECLARE_WAIT_QUEUE_HEAD(dlm_domain_events); | 124 | static DECLARE_WAIT_QUEUE_HEAD(dlm_domain_events); |
125 | 125 | ||
126 | /* | ||
127 | * The supported protocol version for DLM communication. Running domains | ||
128 | * will have a negotiated version with the same major number and a minor | ||
129 | * number equal or smaller. The dlm_ctxt->dlm_locking_proto field should | ||
130 | * be used to determine what a running domain is actually using. | ||
131 | */ | ||
132 | static const struct dlm_protocol_version dlm_protocol = { | ||
133 | .pv_major = 1, | ||
134 | .pv_minor = 0, | ||
135 | }; | ||
136 | |||
126 | #define DLM_DOMAIN_BACKOFF_MS 200 | 137 | #define DLM_DOMAIN_BACKOFF_MS 200 |
127 | 138 | ||
128 | static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data, | 139 | static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data, |
@@ -133,6 +144,8 @@ static int dlm_cancel_join_handler(struct o2net_msg *msg, u32 len, void *data, | |||
133 | void **ret_data); | 144 | void **ret_data); |
134 | static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data, | 145 | static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data, |
135 | void **ret_data); | 146 | void **ret_data); |
147 | static int dlm_protocol_compare(struct dlm_protocol_version *existing, | ||
148 | struct dlm_protocol_version *request); | ||
136 | 149 | ||
137 | static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm); | 150 | static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm); |
138 | 151 | ||
@@ -668,11 +681,45 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm) | |||
668 | } | 681 | } |
669 | EXPORT_SYMBOL_GPL(dlm_unregister_domain); | 682 | EXPORT_SYMBOL_GPL(dlm_unregister_domain); |
670 | 683 | ||
684 | static int dlm_query_join_proto_check(char *proto_type, int node, | ||
685 | struct dlm_protocol_version *ours, | ||
686 | struct dlm_protocol_version *request) | ||
687 | { | ||
688 | int rc; | ||
689 | struct dlm_protocol_version proto = *request; | ||
690 | |||
691 | if (!dlm_protocol_compare(ours, &proto)) { | ||
692 | mlog(0, | ||
693 | "node %u wanted to join with %s locking protocol " | ||
694 | "%u.%u, we respond with %u.%u\n", | ||
695 | node, proto_type, | ||
696 | request->pv_major, | ||
697 | request->pv_minor, | ||
698 | proto.pv_major, proto.pv_minor); | ||
699 | request->pv_minor = proto.pv_minor; | ||
700 | rc = 0; | ||
701 | } else { | ||
702 | mlog(ML_NOTICE, | ||
703 | "Node %u wanted to join with %s locking " | ||
704 | "protocol %u.%u, but we have %u.%u, disallowing\n", | ||
705 | node, proto_type, | ||
706 | request->pv_major, | ||
707 | request->pv_minor, | ||
708 | ours->pv_major, | ||
709 | ours->pv_minor); | ||
710 | rc = 1; | ||
711 | } | ||
712 | |||
713 | return rc; | ||
714 | } | ||
715 | |||
671 | static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data, | 716 | static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data, |
672 | void **ret_data) | 717 | void **ret_data) |
673 | { | 718 | { |
674 | struct dlm_query_join_request *query; | 719 | struct dlm_query_join_request *query; |
675 | enum dlm_query_join_response response; | 720 | union dlm_query_join_response response = { |
721 | .packet.code = JOIN_DISALLOW, | ||
722 | }; | ||
676 | struct dlm_ctxt *dlm = NULL; | 723 | struct dlm_ctxt *dlm = NULL; |
677 | u8 nodenum; | 724 | u8 nodenum; |
678 | 725 | ||
@@ -690,11 +737,11 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data, | |||
690 | mlog(0, "node %u is not in our live map yet\n", | 737 | mlog(0, "node %u is not in our live map yet\n", |
691 | query->node_idx); | 738 | query->node_idx); |
692 | 739 | ||
693 | response = JOIN_DISALLOW; | 740 | response.packet.code = JOIN_DISALLOW; |
694 | goto respond; | 741 | goto respond; |
695 | } | 742 | } |
696 | 743 | ||
697 | response = JOIN_OK_NO_MAP; | 744 | response.packet.code = JOIN_OK_NO_MAP; |
698 | 745 | ||
699 | spin_lock(&dlm_domain_lock); | 746 | spin_lock(&dlm_domain_lock); |
700 | dlm = __dlm_lookup_domain_full(query->domain, query->name_len); | 747 | dlm = __dlm_lookup_domain_full(query->domain, query->name_len); |
@@ -713,7 +760,7 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data, | |||
713 | mlog(0, "disallow join as node %u does not " | 760 | mlog(0, "disallow join as node %u does not " |
714 | "have node %u in its nodemap\n", | 761 | "have node %u in its nodemap\n", |
715 | query->node_idx, nodenum); | 762 | query->node_idx, nodenum); |
716 | response = JOIN_DISALLOW; | 763 | response.packet.code = JOIN_DISALLOW; |
717 | goto unlock_respond; | 764 | goto unlock_respond; |
718 | } | 765 | } |
719 | } | 766 | } |
@@ -733,30 +780,48 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data, | |||
733 | /*If this is a brand new context and we | 780 | /*If this is a brand new context and we |
734 | * haven't started our join process yet, then | 781 | * haven't started our join process yet, then |
735 | * the other node won the race. */ | 782 | * the other node won the race. */ |
736 | response = JOIN_OK_NO_MAP; | 783 | response.packet.code = JOIN_OK_NO_MAP; |
737 | } else if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) { | 784 | } else if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) { |
738 | /* Disallow parallel joins. */ | 785 | /* Disallow parallel joins. */ |
739 | response = JOIN_DISALLOW; | 786 | response.packet.code = JOIN_DISALLOW; |
740 | } else if (dlm->reco.state & DLM_RECO_STATE_ACTIVE) { | 787 | } else if (dlm->reco.state & DLM_RECO_STATE_ACTIVE) { |
741 | mlog(0, "node %u trying to join, but recovery " | 788 | mlog(0, "node %u trying to join, but recovery " |
742 | "is ongoing.\n", bit); | 789 | "is ongoing.\n", bit); |
743 | response = JOIN_DISALLOW; | 790 | response.packet.code = JOIN_DISALLOW; |
744 | } else if (test_bit(bit, dlm->recovery_map)) { | 791 | } else if (test_bit(bit, dlm->recovery_map)) { |
745 | mlog(0, "node %u trying to join, but it " | 792 | mlog(0, "node %u trying to join, but it " |
746 | "still needs recovery.\n", bit); | 793 | "still needs recovery.\n", bit); |
747 | response = JOIN_DISALLOW; | 794 | response.packet.code = JOIN_DISALLOW; |
748 | } else if (test_bit(bit, dlm->domain_map)) { | 795 | } else if (test_bit(bit, dlm->domain_map)) { |
749 | mlog(0, "node %u trying to join, but it " | 796 | mlog(0, "node %u trying to join, but it " |
750 | "is still in the domain! needs recovery?\n", | 797 | "is still in the domain! needs recovery?\n", |
751 | bit); | 798 | bit); |
752 | response = JOIN_DISALLOW; | 799 | response.packet.code = JOIN_DISALLOW; |
753 | } else { | 800 | } else { |
754 | /* Alright we're fully a part of this domain | 801 | /* Alright we're fully a part of this domain |
755 | * so we keep some state as to who's joining | 802 | * so we keep some state as to who's joining |
756 | * and indicate to him that needs to be fixed | 803 | * and indicate to him that needs to be fixed |
757 | * up. */ | 804 | * up. */ |
758 | response = JOIN_OK; | 805 | |
759 | __dlm_set_joining_node(dlm, query->node_idx); | 806 | /* Make sure we speak compatible locking protocols. */ |
807 | if (dlm_query_join_proto_check("DLM", bit, | ||
808 | &dlm->dlm_locking_proto, | ||
809 | &query->dlm_proto)) { | ||
810 | response.packet.code = | ||
811 | JOIN_PROTOCOL_MISMATCH; | ||
812 | } else if (dlm_query_join_proto_check("fs", bit, | ||
813 | &dlm->fs_locking_proto, | ||
814 | &query->fs_proto)) { | ||
815 | response.packet.code = | ||
816 | JOIN_PROTOCOL_MISMATCH; | ||
817 | } else { | ||
818 | response.packet.dlm_minor = | ||
819 | query->dlm_proto.pv_minor; | ||
820 | response.packet.fs_minor = | ||
821 | query->fs_proto.pv_minor; | ||
822 | response.packet.code = JOIN_OK; | ||
823 | __dlm_set_joining_node(dlm, query->node_idx); | ||
824 | } | ||
760 | } | 825 | } |
761 | 826 | ||
762 | spin_unlock(&dlm->spinlock); | 827 | spin_unlock(&dlm->spinlock); |
@@ -765,9 +830,9 @@ unlock_respond: | |||
765 | spin_unlock(&dlm_domain_lock); | 830 | spin_unlock(&dlm_domain_lock); |
766 | 831 | ||
767 | respond: | 832 | respond: |
768 | mlog(0, "We respond with %u\n", response); | 833 | mlog(0, "We respond with %u\n", response.packet.code); |
769 | 834 | ||
770 | return response; | 835 | return response.intval; |
771 | } | 836 | } |
772 | 837 | ||
773 | static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data, | 838 | static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data, |
@@ -899,10 +964,11 @@ static int dlm_send_join_cancels(struct dlm_ctxt *dlm, | |||
899 | 964 | ||
900 | static int dlm_request_join(struct dlm_ctxt *dlm, | 965 | static int dlm_request_join(struct dlm_ctxt *dlm, |
901 | int node, | 966 | int node, |
902 | enum dlm_query_join_response *response) | 967 | enum dlm_query_join_response_code *response) |
903 | { | 968 | { |
904 | int status, retval; | 969 | int status; |
905 | struct dlm_query_join_request join_msg; | 970 | struct dlm_query_join_request join_msg; |
971 | union dlm_query_join_response join_resp; | ||
906 | 972 | ||
907 | mlog(0, "querying node %d\n", node); | 973 | mlog(0, "querying node %d\n", node); |
908 | 974 | ||
@@ -910,12 +976,15 @@ static int dlm_request_join(struct dlm_ctxt *dlm, | |||
910 | join_msg.node_idx = dlm->node_num; | 976 | join_msg.node_idx = dlm->node_num; |
911 | join_msg.name_len = strlen(dlm->name); | 977 | join_msg.name_len = strlen(dlm->name); |
912 | memcpy(join_msg.domain, dlm->name, join_msg.name_len); | 978 | memcpy(join_msg.domain, dlm->name, join_msg.name_len); |
979 | join_msg.dlm_proto = dlm->dlm_locking_proto; | ||
980 | join_msg.fs_proto = dlm->fs_locking_proto; | ||
913 | 981 | ||
914 | /* copy live node map to join message */ | 982 | /* copy live node map to join message */ |
915 | byte_copymap(join_msg.node_map, dlm->live_nodes_map, O2NM_MAX_NODES); | 983 | byte_copymap(join_msg.node_map, dlm->live_nodes_map, O2NM_MAX_NODES); |
916 | 984 | ||
917 | status = o2net_send_message(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, &join_msg, | 985 | status = o2net_send_message(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, &join_msg, |
918 | sizeof(join_msg), node, &retval); | 986 | sizeof(join_msg), node, |
987 | &join_resp.intval); | ||
919 | if (status < 0 && status != -ENOPROTOOPT) { | 988 | if (status < 0 && status != -ENOPROTOOPT) { |
920 | mlog_errno(status); | 989 | mlog_errno(status); |
921 | goto bail; | 990 | goto bail; |
@@ -928,14 +997,41 @@ static int dlm_request_join(struct dlm_ctxt *dlm, | |||
928 | if (status == -ENOPROTOOPT) { | 997 | if (status == -ENOPROTOOPT) { |
929 | status = 0; | 998 | status = 0; |
930 | *response = JOIN_OK_NO_MAP; | 999 | *response = JOIN_OK_NO_MAP; |
931 | } else if (retval == JOIN_DISALLOW || | 1000 | } else if (join_resp.packet.code == JOIN_DISALLOW || |
932 | retval == JOIN_OK || | 1001 | join_resp.packet.code == JOIN_OK_NO_MAP) { |
933 | retval == JOIN_OK_NO_MAP) { | 1002 | *response = join_resp.packet.code; |
934 | *response = retval; | 1003 | } else if (join_resp.packet.code == JOIN_PROTOCOL_MISMATCH) { |
1004 | mlog(ML_NOTICE, | ||
1005 | "This node requested DLM locking protocol %u.%u and " | ||
1006 | "filesystem locking protocol %u.%u. At least one of " | ||
1007 | "the protocol versions on node %d is not compatible, " | ||
1008 | "disconnecting\n", | ||
1009 | dlm->dlm_locking_proto.pv_major, | ||
1010 | dlm->dlm_locking_proto.pv_minor, | ||
1011 | dlm->fs_locking_proto.pv_major, | ||
1012 | dlm->fs_locking_proto.pv_minor, | ||
1013 | node); | ||
1014 | status = -EPROTO; | ||
1015 | *response = join_resp.packet.code; | ||
1016 | } else if (join_resp.packet.code == JOIN_OK) { | ||
1017 | *response = join_resp.packet.code; | ||
1018 | /* Use the same locking protocol as the remote node */ | ||
1019 | dlm->dlm_locking_proto.pv_minor = | ||
1020 | join_resp.packet.dlm_minor; | ||
1021 | dlm->fs_locking_proto.pv_minor = | ||
1022 | join_resp.packet.fs_minor; | ||
1023 | mlog(0, | ||
1024 | "Node %d responds JOIN_OK with DLM locking protocol " | ||
1025 | "%u.%u and fs locking protocol %u.%u\n", | ||
1026 | node, | ||
1027 | dlm->dlm_locking_proto.pv_major, | ||
1028 | dlm->dlm_locking_proto.pv_minor, | ||
1029 | dlm->fs_locking_proto.pv_major, | ||
1030 | dlm->fs_locking_proto.pv_minor); | ||
935 | } else { | 1031 | } else { |
936 | status = -EINVAL; | 1032 | status = -EINVAL; |
937 | mlog(ML_ERROR, "invalid response %d from node %u\n", retval, | 1033 | mlog(ML_ERROR, "invalid response %d from node %u\n", |
938 | node); | 1034 | join_resp.packet.code, node); |
939 | } | 1035 | } |
940 | 1036 | ||
941 | mlog(0, "status %d, node %d response is %d\n", status, node, | 1037 | mlog(0, "status %d, node %d response is %d\n", status, node, |
@@ -1008,7 +1104,7 @@ struct domain_join_ctxt { | |||
1008 | 1104 | ||
1009 | static int dlm_should_restart_join(struct dlm_ctxt *dlm, | 1105 | static int dlm_should_restart_join(struct dlm_ctxt *dlm, |
1010 | struct domain_join_ctxt *ctxt, | 1106 | struct domain_join_ctxt *ctxt, |
1011 | enum dlm_query_join_response response) | 1107 | enum dlm_query_join_response_code response) |
1012 | { | 1108 | { |
1013 | int ret; | 1109 | int ret; |
1014 | 1110 | ||
@@ -1034,7 +1130,7 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm) | |||
1034 | { | 1130 | { |
1035 | int status = 0, tmpstat, node; | 1131 | int status = 0, tmpstat, node; |
1036 | struct domain_join_ctxt *ctxt; | 1132 | struct domain_join_ctxt *ctxt; |
1037 | enum dlm_query_join_response response = JOIN_DISALLOW; | 1133 | enum dlm_query_join_response_code response = JOIN_DISALLOW; |
1038 | 1134 | ||
1039 | mlog_entry("%p", dlm); | 1135 | mlog_entry("%p", dlm); |
1040 | 1136 | ||
@@ -1450,10 +1546,38 @@ leave: | |||
1450 | } | 1546 | } |
1451 | 1547 | ||
1452 | /* | 1548 | /* |
1453 | * dlm_register_domain: one-time setup per "domain" | 1549 | * Compare a requested locking protocol version against the current one. |
1550 | * | ||
1551 | * If the major numbers are different, they are incompatible. | ||
1552 | * If the current minor is greater than the request, they are incompatible. | ||
1553 | * If the current minor is less than or equal to the request, they are | ||
1554 | * compatible, and the requester should run at the current minor version. | ||
1555 | */ | ||
1556 | static int dlm_protocol_compare(struct dlm_protocol_version *existing, | ||
1557 | struct dlm_protocol_version *request) | ||
1558 | { | ||
1559 | if (existing->pv_major != request->pv_major) | ||
1560 | return 1; | ||
1561 | |||
1562 | if (existing->pv_minor > request->pv_minor) | ||
1563 | return 1; | ||
1564 | |||
1565 | if (existing->pv_minor < request->pv_minor) | ||
1566 | request->pv_minor = existing->pv_minor; | ||
1567 | |||
1568 | return 0; | ||
1569 | } | ||
1570 | |||
1571 | /* | ||
1572 | * dlm_register_domain: one-time setup per "domain". | ||
1573 | * | ||
1574 | * The filesystem passes in the requested locking version via proto. | ||
1575 | * If registration was successful, proto will contain the negotiated | ||
1576 | * locking protocol. | ||
1454 | */ | 1577 | */ |
1455 | struct dlm_ctxt * dlm_register_domain(const char *domain, | 1578 | struct dlm_ctxt * dlm_register_domain(const char *domain, |
1456 | u32 key) | 1579 | u32 key, |
1580 | struct dlm_protocol_version *fs_proto) | ||
1457 | { | 1581 | { |
1458 | int ret; | 1582 | int ret; |
1459 | struct dlm_ctxt *dlm = NULL; | 1583 | struct dlm_ctxt *dlm = NULL; |
@@ -1496,6 +1620,15 @@ retry: | |||
1496 | goto retry; | 1620 | goto retry; |
1497 | } | 1621 | } |
1498 | 1622 | ||
1623 | if (dlm_protocol_compare(&dlm->fs_locking_proto, fs_proto)) { | ||
1624 | mlog(ML_ERROR, | ||
1625 | "Requested locking protocol version is not " | ||
1626 | "compatible with already registered domain " | ||
1627 | "\"%s\"\n", domain); | ||
1628 | ret = -EPROTO; | ||
1629 | goto leave; | ||
1630 | } | ||
1631 | |||
1499 | __dlm_get(dlm); | 1632 | __dlm_get(dlm); |
1500 | dlm->num_joins++; | 1633 | dlm->num_joins++; |
1501 | 1634 | ||
@@ -1526,6 +1659,13 @@ retry: | |||
1526 | list_add_tail(&dlm->list, &dlm_domains); | 1659 | list_add_tail(&dlm->list, &dlm_domains); |
1527 | spin_unlock(&dlm_domain_lock); | 1660 | spin_unlock(&dlm_domain_lock); |
1528 | 1661 | ||
1662 | /* | ||
1663 | * Pass the locking protocol version into the join. If the join | ||
1664 | * succeeds, it will have the negotiated protocol set. | ||
1665 | */ | ||
1666 | dlm->dlm_locking_proto = dlm_protocol; | ||
1667 | dlm->fs_locking_proto = *fs_proto; | ||
1668 | |||
1529 | ret = dlm_join_domain(dlm); | 1669 | ret = dlm_join_domain(dlm); |
1530 | if (ret) { | 1670 | if (ret) { |
1531 | mlog_errno(ret); | 1671 | mlog_errno(ret); |
@@ -1533,6 +1673,9 @@ retry: | |||
1533 | goto leave; | 1673 | goto leave; |
1534 | } | 1674 | } |
1535 | 1675 | ||
1676 | /* Tell the caller what locking protocol we negotiated */ | ||
1677 | *fs_proto = dlm->fs_locking_proto; | ||
1678 | |||
1536 | ret = 0; | 1679 | ret = 0; |
1537 | leave: | 1680 | leave: |
1538 | if (new_ctxt) | 1681 | if (new_ctxt) |
diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c index 6639baab0798..61a000f8524c 100644 --- a/fs/ocfs2/dlm/dlmfs.c +++ b/fs/ocfs2/dlm/dlmfs.c | |||
@@ -60,6 +60,8 @@ | |||
60 | #define MLOG_MASK_PREFIX ML_DLMFS | 60 | #define MLOG_MASK_PREFIX ML_DLMFS |
61 | #include "cluster/masklog.h" | 61 | #include "cluster/masklog.h" |
62 | 62 | ||
63 | #include "ocfs2_lockingver.h" | ||
64 | |||
63 | static const struct super_operations dlmfs_ops; | 65 | static const struct super_operations dlmfs_ops; |
64 | static const struct file_operations dlmfs_file_operations; | 66 | static const struct file_operations dlmfs_file_operations; |
65 | static const struct inode_operations dlmfs_dir_inode_operations; | 67 | static const struct inode_operations dlmfs_dir_inode_operations; |
@@ -70,6 +72,16 @@ static struct kmem_cache *dlmfs_inode_cache; | |||
70 | struct workqueue_struct *user_dlm_worker; | 72 | struct workqueue_struct *user_dlm_worker; |
71 | 73 | ||
72 | /* | 74 | /* |
75 | * This is the userdlmfs locking protocol version. | ||
76 | * | ||
77 | * See fs/ocfs2/dlmglue.c for more details on locking versions. | ||
78 | */ | ||
79 | static const struct dlm_protocol_version user_locking_protocol = { | ||
80 | .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR, | ||
81 | .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR, | ||
82 | }; | ||
83 | |||
84 | /* | ||
73 | * decodes a set of open flags into a valid lock level and a set of flags. | 85 | * decodes a set of open flags into a valid lock level and a set of flags. |
74 | * returns < 0 if we have invalid flags | 86 | * returns < 0 if we have invalid flags |
75 | * flags which mean something to us: | 87 | * flags which mean something to us: |
@@ -416,6 +428,7 @@ static int dlmfs_mkdir(struct inode * dir, | |||
416 | struct qstr *domain = &dentry->d_name; | 428 | struct qstr *domain = &dentry->d_name; |
417 | struct dlmfs_inode_private *ip; | 429 | struct dlmfs_inode_private *ip; |
418 | struct dlm_ctxt *dlm; | 430 | struct dlm_ctxt *dlm; |
431 | struct dlm_protocol_version proto = user_locking_protocol; | ||
419 | 432 | ||
420 | mlog(0, "mkdir %.*s\n", domain->len, domain->name); | 433 | mlog(0, "mkdir %.*s\n", domain->len, domain->name); |
421 | 434 | ||
@@ -435,7 +448,7 @@ static int dlmfs_mkdir(struct inode * dir, | |||
435 | 448 | ||
436 | ip = DLMFS_I(inode); | 449 | ip = DLMFS_I(inode); |
437 | 450 | ||
438 | dlm = user_dlm_register_context(domain); | 451 | dlm = user_dlm_register_context(domain, &proto); |
439 | if (IS_ERR(dlm)) { | 452 | if (IS_ERR(dlm)) { |
440 | status = PTR_ERR(dlm); | 453 | status = PTR_ERR(dlm); |
441 | mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n", | 454 | mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n", |
diff --git a/fs/ocfs2/dlm/userdlm.c b/fs/ocfs2/dlm/userdlm.c index 7d2f578b267d..4cb1d3dae250 100644 --- a/fs/ocfs2/dlm/userdlm.c +++ b/fs/ocfs2/dlm/userdlm.c | |||
@@ -645,7 +645,8 @@ bail: | |||
645 | return status; | 645 | return status; |
646 | } | 646 | } |
647 | 647 | ||
648 | struct dlm_ctxt *user_dlm_register_context(struct qstr *name) | 648 | struct dlm_ctxt *user_dlm_register_context(struct qstr *name, |
649 | struct dlm_protocol_version *proto) | ||
649 | { | 650 | { |
650 | struct dlm_ctxt *dlm; | 651 | struct dlm_ctxt *dlm; |
651 | u32 dlm_key; | 652 | u32 dlm_key; |
@@ -661,7 +662,7 @@ struct dlm_ctxt *user_dlm_register_context(struct qstr *name) | |||
661 | 662 | ||
662 | snprintf(domain, name->len + 1, "%.*s", name->len, name->name); | 663 | snprintf(domain, name->len + 1, "%.*s", name->len, name->name); |
663 | 664 | ||
664 | dlm = dlm_register_domain(domain, dlm_key); | 665 | dlm = dlm_register_domain(domain, dlm_key, proto); |
665 | if (IS_ERR(dlm)) | 666 | if (IS_ERR(dlm)) |
666 | mlog_errno(PTR_ERR(dlm)); | 667 | mlog_errno(PTR_ERR(dlm)); |
667 | 668 | ||
diff --git a/fs/ocfs2/dlm/userdlm.h b/fs/ocfs2/dlm/userdlm.h index c400e93bbf79..39ec27738499 100644 --- a/fs/ocfs2/dlm/userdlm.h +++ b/fs/ocfs2/dlm/userdlm.h | |||
@@ -83,7 +83,8 @@ void user_dlm_write_lvb(struct inode *inode, | |||
83 | void user_dlm_read_lvb(struct inode *inode, | 83 | void user_dlm_read_lvb(struct inode *inode, |
84 | char *val, | 84 | char *val, |
85 | unsigned int len); | 85 | unsigned int len); |
86 | struct dlm_ctxt *user_dlm_register_context(struct qstr *name); | 86 | struct dlm_ctxt *user_dlm_register_context(struct qstr *name, |
87 | struct dlm_protocol_version *proto); | ||
87 | void user_dlm_unregister_context(struct dlm_ctxt *dlm); | 88 | void user_dlm_unregister_context(struct dlm_ctxt *dlm); |
88 | 89 | ||
89 | struct dlmfs_inode_private { | 90 | struct dlmfs_inode_private { |
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 3867244fb144..351130c9b734 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <cluster/masklog.h> | 43 | #include <cluster/masklog.h> |
44 | 44 | ||
45 | #include "ocfs2.h" | 45 | #include "ocfs2.h" |
46 | #include "ocfs2_lockingver.h" | ||
46 | 47 | ||
47 | #include "alloc.h" | 48 | #include "alloc.h" |
48 | #include "dcache.h" | 49 | #include "dcache.h" |
@@ -258,6 +259,31 @@ static struct ocfs2_lock_res_ops ocfs2_flock_lops = { | |||
258 | .flags = 0, | 259 | .flags = 0, |
259 | }; | 260 | }; |
260 | 261 | ||
262 | /* | ||
263 | * This is the filesystem locking protocol version. | ||
264 | * | ||
265 | * Whenever the filesystem does new things with locks (adds or removes a | ||
266 | * lock, orders them differently, does different things underneath a lock), | ||
267 | * the version must be changed. The protocol is negotiated when joining | ||
268 | * the dlm domain. A node may join the domain if its major version is | ||
269 | * identical to all other nodes and its minor version is greater than | ||
270 | * or equal to all other nodes. When its minor version is greater than | ||
271 | * the other nodes, it will run at the minor version specified by the | ||
272 | * other nodes. | ||
273 | * | ||
274 | * If a locking change is made that will not be compatible with older | ||
275 | * versions, the major number must be increased and the minor version set | ||
276 | * to zero. If a change merely adds a behavior that can be disabled when | ||
277 | * speaking to older versions, the minor version must be increased. If a | ||
278 | * change adds a fully backwards compatible change (eg, LVB changes that | ||
279 | * are just ignored by older versions), the version does not need to be | ||
280 | * updated. | ||
281 | */ | ||
282 | const struct dlm_protocol_version ocfs2_locking_protocol = { | ||
283 | .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR, | ||
284 | .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR, | ||
285 | }; | ||
286 | |||
261 | static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres) | 287 | static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres) |
262 | { | 288 | { |
263 | return lockres->l_type == OCFS2_LOCK_TYPE_META || | 289 | return lockres->l_type == OCFS2_LOCK_TYPE_META || |
@@ -2506,7 +2532,8 @@ int ocfs2_dlm_init(struct ocfs2_super *osb) | |||
2506 | dlm_key = crc32_le(0, osb->uuid_str, strlen(osb->uuid_str)); | 2532 | dlm_key = crc32_le(0, osb->uuid_str, strlen(osb->uuid_str)); |
2507 | 2533 | ||
2508 | /* for now, uuid == domain */ | 2534 | /* for now, uuid == domain */ |
2509 | dlm = dlm_register_domain(osb->uuid_str, dlm_key); | 2535 | dlm = dlm_register_domain(osb->uuid_str, dlm_key, |
2536 | &osb->osb_locking_proto); | ||
2510 | if (IS_ERR(dlm)) { | 2537 | if (IS_ERR(dlm)) { |
2511 | status = PTR_ERR(dlm); | 2538 | status = PTR_ERR(dlm); |
2512 | mlog_errno(status); | 2539 | mlog_errno(status); |
diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h index 5f17243ba501..1d5b0699d0a9 100644 --- a/fs/ocfs2/dlmglue.h +++ b/fs/ocfs2/dlmglue.h | |||
@@ -116,4 +116,5 @@ void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb); | |||
116 | struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void); | 116 | struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void); |
117 | void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug); | 117 | void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug); |
118 | 118 | ||
119 | extern const struct dlm_protocol_version ocfs2_locking_protocol; | ||
119 | #endif /* DLMGLUE_H */ | 120 | #endif /* DLMGLUE_H */ |
diff --git a/fs/ocfs2/endian.h b/fs/ocfs2/endian.h deleted file mode 100644 index 1942e09f6ee5..000000000000 --- a/fs/ocfs2/endian.h +++ /dev/null | |||
@@ -1,45 +0,0 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * Copyright (C) 2005 Oracle. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public | ||
8 | * License as published by the Free Software Foundation; either | ||
9 | * version 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | * General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public | ||
17 | * License along with this program; if not, write to the | ||
18 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
19 | * Boston, MA 021110-1307, USA. | ||
20 | */ | ||
21 | |||
22 | #ifndef OCFS2_ENDIAN_H | ||
23 | #define OCFS2_ENDIAN_H | ||
24 | |||
25 | static inline void le16_add_cpu(__le16 *var, u16 val) | ||
26 | { | ||
27 | *var = cpu_to_le16(le16_to_cpu(*var) + val); | ||
28 | } | ||
29 | |||
30 | static inline void le32_add_cpu(__le32 *var, u32 val) | ||
31 | { | ||
32 | *var = cpu_to_le32(le32_to_cpu(*var) + val); | ||
33 | } | ||
34 | |||
35 | static inline void le64_add_cpu(__le64 *var, u64 val) | ||
36 | { | ||
37 | *var = cpu_to_le64(le64_to_cpu(*var) + val); | ||
38 | } | ||
39 | |||
40 | static inline void be32_add_cpu(__be32 *var, u32 val) | ||
41 | { | ||
42 | *var = cpu_to_be32(be32_to_cpu(*var) + val); | ||
43 | } | ||
44 | |||
45 | #endif /* OCFS2_ENDIAN_H */ | ||
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index d08480580470..6546cef212e3 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h | |||
@@ -43,7 +43,6 @@ | |||
43 | #include "dlm/dlmapi.h" | 43 | #include "dlm/dlmapi.h" |
44 | 44 | ||
45 | #include "ocfs2_fs.h" | 45 | #include "ocfs2_fs.h" |
46 | #include "endian.h" | ||
47 | #include "ocfs2_lockid.h" | 46 | #include "ocfs2_lockid.h" |
48 | 47 | ||
49 | /* Most user visible OCFS2 inodes will have very few pieces of | 48 | /* Most user visible OCFS2 inodes will have very few pieces of |
@@ -251,6 +250,7 @@ struct ocfs2_super | |||
251 | struct ocfs2_lock_res osb_rename_lockres; | 250 | struct ocfs2_lock_res osb_rename_lockres; |
252 | struct dlm_eviction_cb osb_eviction_cb; | 251 | struct dlm_eviction_cb osb_eviction_cb; |
253 | struct ocfs2_dlm_debug *osb_dlm_debug; | 252 | struct ocfs2_dlm_debug *osb_dlm_debug; |
253 | struct dlm_protocol_version osb_locking_proto; | ||
254 | 254 | ||
255 | struct dentry *osb_debug_root; | 255 | struct dentry *osb_debug_root; |
256 | 256 | ||
diff --git a/fs/ocfs2/ocfs2_lockingver.h b/fs/ocfs2/ocfs2_lockingver.h new file mode 100644 index 000000000000..82d5eeac0fff --- /dev/null +++ b/fs/ocfs2/ocfs2_lockingver.h | |||
@@ -0,0 +1,30 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * ocfs2_lockingver.h | ||
5 | * | ||
6 | * Defines OCFS2 Locking version values. | ||
7 | * | ||
8 | * Copyright (C) 2008 Oracle. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public | ||
12 | * License, version 2, as published by the Free Software Foundation. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
17 | * General Public License for more details. | ||
18 | */ | ||
19 | |||
20 | #ifndef OCFS2_LOCKINGVER_H | ||
21 | #define OCFS2_LOCKINGVER_H | ||
22 | |||
23 | /* | ||
24 | * The protocol version for ocfs2 cluster locking. See dlmglue.c for | ||
25 | * more details. | ||
26 | */ | ||
27 | #define OCFS2_LOCKING_PROTOCOL_MAJOR 1 | ||
28 | #define OCFS2_LOCKING_PROTOCOL_MINOR 0 | ||
29 | |||
30 | #endif /* OCFS2_LOCKINGVER_H */ | ||
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 01fe40ee5ea9..bec75aff3d9f 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c | |||
@@ -1355,6 +1355,7 @@ static int ocfs2_initialize_super(struct super_block *sb, | |||
1355 | sb->s_fs_info = osb; | 1355 | sb->s_fs_info = osb; |
1356 | sb->s_op = &ocfs2_sops; | 1356 | sb->s_op = &ocfs2_sops; |
1357 | sb->s_export_op = &ocfs2_export_ops; | 1357 | sb->s_export_op = &ocfs2_export_ops; |
1358 | osb->osb_locking_proto = ocfs2_locking_protocol; | ||
1358 | sb->s_time_gran = 1; | 1359 | sb->s_time_gran = 1; |
1359 | sb->s_flags |= MS_NOATIME; | 1360 | sb->s_flags |= MS_NOATIME; |
1360 | /* this is needed to support O_LARGEFILE */ | 1361 | /* this is needed to support O_LARGEFILE */ |
@@ -127,10 +127,10 @@ asmlinkage long sys_statfs(const char __user * path, struct statfs __user * buf) | |||
127 | error = user_path_walk(path, &nd); | 127 | error = user_path_walk(path, &nd); |
128 | if (!error) { | 128 | if (!error) { |
129 | struct statfs tmp; | 129 | struct statfs tmp; |
130 | error = vfs_statfs_native(nd.dentry, &tmp); | 130 | error = vfs_statfs_native(nd.path.dentry, &tmp); |
131 | if (!error && copy_to_user(buf, &tmp, sizeof(tmp))) | 131 | if (!error && copy_to_user(buf, &tmp, sizeof(tmp))) |
132 | error = -EFAULT; | 132 | error = -EFAULT; |
133 | path_release(&nd); | 133 | path_put(&nd.path); |
134 | } | 134 | } |
135 | return error; | 135 | return error; |
136 | } | 136 | } |
@@ -146,10 +146,10 @@ asmlinkage long sys_statfs64(const char __user *path, size_t sz, struct statfs64 | |||
146 | error = user_path_walk(path, &nd); | 146 | error = user_path_walk(path, &nd); |
147 | if (!error) { | 147 | if (!error) { |
148 | struct statfs64 tmp; | 148 | struct statfs64 tmp; |
149 | error = vfs_statfs64(nd.dentry, &tmp); | 149 | error = vfs_statfs64(nd.path.dentry, &tmp); |
150 | if (!error && copy_to_user(buf, &tmp, sizeof(tmp))) | 150 | if (!error && copy_to_user(buf, &tmp, sizeof(tmp))) |
151 | error = -EFAULT; | 151 | error = -EFAULT; |
152 | path_release(&nd); | 152 | path_put(&nd.path); |
153 | } | 153 | } |
154 | return error; | 154 | return error; |
155 | } | 155 | } |
@@ -233,7 +233,7 @@ static long do_sys_truncate(const char __user * path, loff_t length) | |||
233 | error = user_path_walk(path, &nd); | 233 | error = user_path_walk(path, &nd); |
234 | if (error) | 234 | if (error) |
235 | goto out; | 235 | goto out; |
236 | inode = nd.dentry->d_inode; | 236 | inode = nd.path.dentry->d_inode; |
237 | 237 | ||
238 | /* For directories it's -EISDIR, for other non-regulars - -EINVAL */ | 238 | /* For directories it's -EISDIR, for other non-regulars - -EINVAL */ |
239 | error = -EISDIR; | 239 | error = -EISDIR; |
@@ -271,13 +271,13 @@ static long do_sys_truncate(const char __user * path, loff_t length) | |||
271 | error = locks_verify_truncate(inode, NULL, length); | 271 | error = locks_verify_truncate(inode, NULL, length); |
272 | if (!error) { | 272 | if (!error) { |
273 | DQUOT_INIT(inode); | 273 | DQUOT_INIT(inode); |
274 | error = do_truncate(nd.dentry, length, 0, NULL); | 274 | error = do_truncate(nd.path.dentry, length, 0, NULL); |
275 | } | 275 | } |
276 | 276 | ||
277 | put_write_and_out: | 277 | put_write_and_out: |
278 | put_write_access(inode); | 278 | put_write_access(inode); |
279 | dput_and_out: | 279 | dput_and_out: |
280 | path_release(&nd); | 280 | path_put(&nd.path); |
281 | out: | 281 | out: |
282 | return error; | 282 | return error; |
283 | } | 283 | } |
@@ -455,14 +455,14 @@ asmlinkage long sys_faccessat(int dfd, const char __user *filename, int mode) | |||
455 | res = vfs_permission(&nd, mode); | 455 | res = vfs_permission(&nd, mode); |
456 | /* SuS v2 requires we report a read only fs too */ | 456 | /* SuS v2 requires we report a read only fs too */ |
457 | if(res || !(mode & S_IWOTH) || | 457 | if(res || !(mode & S_IWOTH) || |
458 | special_file(nd.dentry->d_inode->i_mode)) | 458 | special_file(nd.path.dentry->d_inode->i_mode)) |
459 | goto out_path_release; | 459 | goto out_path_release; |
460 | 460 | ||
461 | if(IS_RDONLY(nd.dentry->d_inode)) | 461 | if(IS_RDONLY(nd.path.dentry->d_inode)) |
462 | res = -EROFS; | 462 | res = -EROFS; |
463 | 463 | ||
464 | out_path_release: | 464 | out_path_release: |
465 | path_release(&nd); | 465 | path_put(&nd.path); |
466 | out: | 466 | out: |
467 | current->fsuid = old_fsuid; | 467 | current->fsuid = old_fsuid; |
468 | current->fsgid = old_fsgid; | 468 | current->fsgid = old_fsgid; |
@@ -490,10 +490,10 @@ asmlinkage long sys_chdir(const char __user * filename) | |||
490 | if (error) | 490 | if (error) |
491 | goto dput_and_out; | 491 | goto dput_and_out; |
492 | 492 | ||
493 | set_fs_pwd(current->fs, nd.mnt, nd.dentry); | 493 | set_fs_pwd(current->fs, &nd.path); |
494 | 494 | ||
495 | dput_and_out: | 495 | dput_and_out: |
496 | path_release(&nd); | 496 | path_put(&nd.path); |
497 | out: | 497 | out: |
498 | return error; | 498 | return error; |
499 | } | 499 | } |
@@ -501,9 +501,7 @@ out: | |||
501 | asmlinkage long sys_fchdir(unsigned int fd) | 501 | asmlinkage long sys_fchdir(unsigned int fd) |
502 | { | 502 | { |
503 | struct file *file; | 503 | struct file *file; |
504 | struct dentry *dentry; | ||
505 | struct inode *inode; | 504 | struct inode *inode; |
506 | struct vfsmount *mnt; | ||
507 | int error; | 505 | int error; |
508 | 506 | ||
509 | error = -EBADF; | 507 | error = -EBADF; |
@@ -511,9 +509,7 @@ asmlinkage long sys_fchdir(unsigned int fd) | |||
511 | if (!file) | 509 | if (!file) |
512 | goto out; | 510 | goto out; |
513 | 511 | ||
514 | dentry = file->f_path.dentry; | 512 | inode = file->f_path.dentry->d_inode; |
515 | mnt = file->f_path.mnt; | ||
516 | inode = dentry->d_inode; | ||
517 | 513 | ||
518 | error = -ENOTDIR; | 514 | error = -ENOTDIR; |
519 | if (!S_ISDIR(inode->i_mode)) | 515 | if (!S_ISDIR(inode->i_mode)) |
@@ -521,7 +517,7 @@ asmlinkage long sys_fchdir(unsigned int fd) | |||
521 | 517 | ||
522 | error = file_permission(file, MAY_EXEC); | 518 | error = file_permission(file, MAY_EXEC); |
523 | if (!error) | 519 | if (!error) |
524 | set_fs_pwd(current->fs, mnt, dentry); | 520 | set_fs_pwd(current->fs, &file->f_path); |
525 | out_putf: | 521 | out_putf: |
526 | fput(file); | 522 | fput(file); |
527 | out: | 523 | out: |
@@ -545,11 +541,11 @@ asmlinkage long sys_chroot(const char __user * filename) | |||
545 | if (!capable(CAP_SYS_CHROOT)) | 541 | if (!capable(CAP_SYS_CHROOT)) |
546 | goto dput_and_out; | 542 | goto dput_and_out; |
547 | 543 | ||
548 | set_fs_root(current->fs, nd.mnt, nd.dentry); | 544 | set_fs_root(current->fs, &nd.path); |
549 | set_fs_altroot(); | 545 | set_fs_altroot(); |
550 | error = 0; | 546 | error = 0; |
551 | dput_and_out: | 547 | dput_and_out: |
552 | path_release(&nd); | 548 | path_put(&nd.path); |
553 | out: | 549 | out: |
554 | return error; | 550 | return error; |
555 | } | 551 | } |
@@ -602,7 +598,7 @@ asmlinkage long sys_fchmodat(int dfd, const char __user *filename, | |||
602 | error = __user_walk_fd(dfd, filename, LOOKUP_FOLLOW, &nd); | 598 | error = __user_walk_fd(dfd, filename, LOOKUP_FOLLOW, &nd); |
603 | if (error) | 599 | if (error) |
604 | goto out; | 600 | goto out; |
605 | inode = nd.dentry->d_inode; | 601 | inode = nd.path.dentry->d_inode; |
606 | 602 | ||
607 | error = -EROFS; | 603 | error = -EROFS; |
608 | if (IS_RDONLY(inode)) | 604 | if (IS_RDONLY(inode)) |
@@ -617,11 +613,11 @@ asmlinkage long sys_fchmodat(int dfd, const char __user *filename, | |||
617 | mode = inode->i_mode; | 613 | mode = inode->i_mode; |
618 | newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO); | 614 | newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO); |
619 | newattrs.ia_valid = ATTR_MODE | ATTR_CTIME; | 615 | newattrs.ia_valid = ATTR_MODE | ATTR_CTIME; |
620 | error = notify_change(nd.dentry, &newattrs); | 616 | error = notify_change(nd.path.dentry, &newattrs); |
621 | mutex_unlock(&inode->i_mutex); | 617 | mutex_unlock(&inode->i_mutex); |
622 | 618 | ||
623 | dput_and_out: | 619 | dput_and_out: |
624 | path_release(&nd); | 620 | path_put(&nd.path); |
625 | out: | 621 | out: |
626 | return error; | 622 | return error; |
627 | } | 623 | } |
@@ -675,8 +671,8 @@ asmlinkage long sys_chown(const char __user * filename, uid_t user, gid_t group) | |||
675 | error = user_path_walk(filename, &nd); | 671 | error = user_path_walk(filename, &nd); |
676 | if (error) | 672 | if (error) |
677 | goto out; | 673 | goto out; |
678 | error = chown_common(nd.dentry, user, group); | 674 | error = chown_common(nd.path.dentry, user, group); |
679 | path_release(&nd); | 675 | path_put(&nd.path); |
680 | out: | 676 | out: |
681 | return error; | 677 | return error; |
682 | } | 678 | } |
@@ -695,8 +691,8 @@ asmlinkage long sys_fchownat(int dfd, const char __user *filename, uid_t user, | |||
695 | error = __user_walk_fd(dfd, filename, follow, &nd); | 691 | error = __user_walk_fd(dfd, filename, follow, &nd); |
696 | if (error) | 692 | if (error) |
697 | goto out; | 693 | goto out; |
698 | error = chown_common(nd.dentry, user, group); | 694 | error = chown_common(nd.path.dentry, user, group); |
699 | path_release(&nd); | 695 | path_put(&nd.path); |
700 | out: | 696 | out: |
701 | return error; | 697 | return error; |
702 | } | 698 | } |
@@ -709,8 +705,8 @@ asmlinkage long sys_lchown(const char __user * filename, uid_t user, gid_t group | |||
709 | error = user_path_walk_link(filename, &nd); | 705 | error = user_path_walk_link(filename, &nd); |
710 | if (error) | 706 | if (error) |
711 | goto out; | 707 | goto out; |
712 | error = chown_common(nd.dentry, user, group); | 708 | error = chown_common(nd.path.dentry, user, group); |
713 | path_release(&nd); | 709 | path_put(&nd.path); |
714 | out: | 710 | out: |
715 | return error; | 711 | return error; |
716 | } | 712 | } |
@@ -863,7 +859,7 @@ struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry | |||
863 | goto out; | 859 | goto out; |
864 | if (IS_ERR(dentry)) | 860 | if (IS_ERR(dentry)) |
865 | goto out_err; | 861 | goto out_err; |
866 | nd->intent.open.file = __dentry_open(dget(dentry), mntget(nd->mnt), | 862 | nd->intent.open.file = __dentry_open(dget(dentry), mntget(nd->path.mnt), |
867 | nd->intent.open.flags - 1, | 863 | nd->intent.open.flags - 1, |
868 | nd->intent.open.file, | 864 | nd->intent.open.file, |
869 | open); | 865 | open); |
@@ -891,9 +887,10 @@ struct file *nameidata_to_filp(struct nameidata *nd, int flags) | |||
891 | filp = nd->intent.open.file; | 887 | filp = nd->intent.open.file; |
892 | /* Has the filesystem initialised the file for us? */ | 888 | /* Has the filesystem initialised the file for us? */ |
893 | if (filp->f_path.dentry == NULL) | 889 | if (filp->f_path.dentry == NULL) |
894 | filp = __dentry_open(nd->dentry, nd->mnt, flags, filp, NULL); | 890 | filp = __dentry_open(nd->path.dentry, nd->path.mnt, flags, filp, |
891 | NULL); | ||
895 | else | 892 | else |
896 | path_release(nd); | 893 | path_put(&nd->path); |
897 | return filp; | 894 | return filp; |
898 | } | 895 | } |
899 | 896 | ||
@@ -991,7 +988,7 @@ static void __put_unused_fd(struct files_struct *files, unsigned int fd) | |||
991 | files->next_fd = fd; | 988 | files->next_fd = fd; |
992 | } | 989 | } |
993 | 990 | ||
994 | void fastcall put_unused_fd(unsigned int fd) | 991 | void put_unused_fd(unsigned int fd) |
995 | { | 992 | { |
996 | struct files_struct *files = current->files; | 993 | struct files_struct *files = current->files; |
997 | spin_lock(&files->file_lock); | 994 | spin_lock(&files->file_lock); |
@@ -1014,7 +1011,7 @@ EXPORT_SYMBOL(put_unused_fd); | |||
1014 | * will follow. | 1011 | * will follow. |
1015 | */ | 1012 | */ |
1016 | 1013 | ||
1017 | void fastcall fd_install(unsigned int fd, struct file * file) | 1014 | void fd_install(unsigned int fd, struct file *file) |
1018 | { | 1015 | { |
1019 | struct files_struct *files = current->files; | 1016 | struct files_struct *files = current->files; |
1020 | struct fdtable *fdt; | 1017 | struct fdtable *fdt; |
@@ -1061,7 +1058,6 @@ asmlinkage long sys_open(const char __user *filename, int flags, int mode) | |||
1061 | prevent_tail_call(ret); | 1058 | prevent_tail_call(ret); |
1062 | return ret; | 1059 | return ret; |
1063 | } | 1060 | } |
1064 | EXPORT_UNUSED_SYMBOL_GPL(sys_open); /* To be deleted for 2.6.25 */ | ||
1065 | 1061 | ||
1066 | asmlinkage long sys_openat(int dfd, const char __user *filename, int flags, | 1062 | asmlinkage long sys_openat(int dfd, const char __user *filename, int flags, |
1067 | int mode) | 1063 | int mode) |
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c index 6b7ff1618945..d17b4fd204e1 100644 --- a/fs/openpromfs/inode.c +++ b/fs/openpromfs/inode.c | |||
@@ -38,6 +38,8 @@ struct op_inode_info { | |||
38 | union op_inode_data u; | 38 | union op_inode_data u; |
39 | }; | 39 | }; |
40 | 40 | ||
41 | static struct inode *openprom_iget(struct super_block *sb, ino_t ino); | ||
42 | |||
41 | static inline struct op_inode_info *OP_I(struct inode *inode) | 43 | static inline struct op_inode_info *OP_I(struct inode *inode) |
42 | { | 44 | { |
43 | return container_of(inode, struct op_inode_info, vfs_inode); | 45 | return container_of(inode, struct op_inode_info, vfs_inode); |
@@ -226,10 +228,10 @@ static struct dentry *openpromfs_lookup(struct inode *dir, struct dentry *dentry | |||
226 | return ERR_PTR(-ENOENT); | 228 | return ERR_PTR(-ENOENT); |
227 | 229 | ||
228 | found: | 230 | found: |
229 | inode = iget(dir->i_sb, ino); | 231 | inode = openprom_iget(dir->i_sb, ino); |
230 | mutex_unlock(&op_mutex); | 232 | mutex_unlock(&op_mutex); |
231 | if (!inode) | 233 | if (IS_ERR(inode)) |
232 | return ERR_PTR(-EINVAL); | 234 | return ERR_CAST(inode); |
233 | ent_oi = OP_I(inode); | 235 | ent_oi = OP_I(inode); |
234 | ent_oi->type = ent_type; | 236 | ent_oi->type = ent_type; |
235 | ent_oi->u = ent_data; | 237 | ent_oi->u = ent_data; |
@@ -348,14 +350,23 @@ static void openprom_destroy_inode(struct inode *inode) | |||
348 | kmem_cache_free(op_inode_cachep, OP_I(inode)); | 350 | kmem_cache_free(op_inode_cachep, OP_I(inode)); |
349 | } | 351 | } |
350 | 352 | ||
351 | static void openprom_read_inode(struct inode * inode) | 353 | static struct inode *openprom_iget(struct super_block *sb, ino_t ino) |
352 | { | 354 | { |
353 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; | 355 | struct inode *inode; |
354 | if (inode->i_ino == OPENPROM_ROOT_INO) { | 356 | |
355 | inode->i_op = &openprom_inode_operations; | 357 | inode = iget_locked(sb, ino); |
356 | inode->i_fop = &openprom_operations; | 358 | if (!inode) |
357 | inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO; | 359 | return ERR_PTR(-ENOMEM); |
360 | if (inode->i_state & I_NEW) { | ||
361 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; | ||
362 | if (inode->i_ino == OPENPROM_ROOT_INO) { | ||
363 | inode->i_op = &openprom_inode_operations; | ||
364 | inode->i_fop = &openprom_operations; | ||
365 | inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO; | ||
366 | } | ||
367 | unlock_new_inode(inode); | ||
358 | } | 368 | } |
369 | return inode; | ||
359 | } | 370 | } |
360 | 371 | ||
361 | static int openprom_remount(struct super_block *sb, int *flags, char *data) | 372 | static int openprom_remount(struct super_block *sb, int *flags, char *data) |
@@ -367,7 +378,6 @@ static int openprom_remount(struct super_block *sb, int *flags, char *data) | |||
367 | static const struct super_operations openprom_sops = { | 378 | static const struct super_operations openprom_sops = { |
368 | .alloc_inode = openprom_alloc_inode, | 379 | .alloc_inode = openprom_alloc_inode, |
369 | .destroy_inode = openprom_destroy_inode, | 380 | .destroy_inode = openprom_destroy_inode, |
370 | .read_inode = openprom_read_inode, | ||
371 | .statfs = simple_statfs, | 381 | .statfs = simple_statfs, |
372 | .remount_fs = openprom_remount, | 382 | .remount_fs = openprom_remount, |
373 | }; | 383 | }; |
@@ -376,6 +386,7 @@ static int openprom_fill_super(struct super_block *s, void *data, int silent) | |||
376 | { | 386 | { |
377 | struct inode *root_inode; | 387 | struct inode *root_inode; |
378 | struct op_inode_info *oi; | 388 | struct op_inode_info *oi; |
389 | int ret; | ||
379 | 390 | ||
380 | s->s_flags |= MS_NOATIME; | 391 | s->s_flags |= MS_NOATIME; |
381 | s->s_blocksize = 1024; | 392 | s->s_blocksize = 1024; |
@@ -383,9 +394,11 @@ static int openprom_fill_super(struct super_block *s, void *data, int silent) | |||
383 | s->s_magic = OPENPROM_SUPER_MAGIC; | 394 | s->s_magic = OPENPROM_SUPER_MAGIC; |
384 | s->s_op = &openprom_sops; | 395 | s->s_op = &openprom_sops; |
385 | s->s_time_gran = 1; | 396 | s->s_time_gran = 1; |
386 | root_inode = iget(s, OPENPROM_ROOT_INO); | 397 | root_inode = openprom_iget(s, OPENPROM_ROOT_INO); |
387 | if (!root_inode) | 398 | if (IS_ERR(root_inode)) { |
399 | ret = PTR_ERR(root_inode); | ||
388 | goto out_no_root; | 400 | goto out_no_root; |
401 | } | ||
389 | 402 | ||
390 | oi = OP_I(root_inode); | 403 | oi = OP_I(root_inode); |
391 | oi->type = op_inode_node; | 404 | oi->type = op_inode_node; |
@@ -393,13 +406,15 @@ static int openprom_fill_super(struct super_block *s, void *data, int silent) | |||
393 | 406 | ||
394 | s->s_root = d_alloc_root(root_inode); | 407 | s->s_root = d_alloc_root(root_inode); |
395 | if (!s->s_root) | 408 | if (!s->s_root) |
396 | goto out_no_root; | 409 | goto out_no_root_dentry; |
397 | return 0; | 410 | return 0; |
398 | 411 | ||
412 | out_no_root_dentry: | ||
413 | iput(root_inode); | ||
414 | ret = -ENOMEM; | ||
399 | out_no_root: | 415 | out_no_root: |
400 | printk("openprom_fill_super: get root inode failed\n"); | 416 | printk("openprom_fill_super: get root inode failed\n"); |
401 | iput(root_inode); | 417 | return ret; |
402 | return -ENOMEM; | ||
403 | } | 418 | } |
404 | 419 | ||
405 | static int openprom_get_sb(struct file_system_type *fs_type, | 420 | static int openprom_get_sb(struct file_system_type *fs_type, |
diff --git a/fs/partitions/Kconfig b/fs/partitions/Kconfig index a99acd8de353..cb5f0a3f1b03 100644 --- a/fs/partitions/Kconfig +++ b/fs/partitions/Kconfig | |||
@@ -198,7 +198,7 @@ config LDM_DEBUG | |||
198 | 198 | ||
199 | config SGI_PARTITION | 199 | config SGI_PARTITION |
200 | bool "SGI partition support" if PARTITION_ADVANCED | 200 | bool "SGI partition support" if PARTITION_ADVANCED |
201 | default y if (SGI_IP22 || SGI_IP27 || ((MACH_JAZZ || SNI_RM) && !CPU_LITTLE_ENDIAN)) | 201 | default y if DEFAULT_SGI_PARTITION |
202 | help | 202 | help |
203 | Say Y here if you would like to be able to read the hard disk | 203 | Say Y here if you would like to be able to read the hard disk |
204 | partition table format used by SGI machines. | 204 | partition table format used by SGI machines. |
diff --git a/fs/partitions/check.c b/fs/partitions/check.c index 739da701ae7b..03f808c5b79d 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/fs.h> | 18 | #include <linux/fs.h> |
19 | #include <linux/kmod.h> | 19 | #include <linux/kmod.h> |
20 | #include <linux/ctype.h> | 20 | #include <linux/ctype.h> |
21 | #include <linux/genhd.h> | ||
21 | 22 | ||
22 | #include "check.h" | 23 | #include "check.h" |
23 | 24 | ||
@@ -215,9 +216,25 @@ static ssize_t part_stat_show(struct device *dev, | |||
215 | { | 216 | { |
216 | struct hd_struct *p = dev_to_part(dev); | 217 | struct hd_struct *p = dev_to_part(dev); |
217 | 218 | ||
218 | return sprintf(buf, "%8u %8llu %8u %8llu\n", | 219 | preempt_disable(); |
219 | p->ios[0], (unsigned long long)p->sectors[0], | 220 | part_round_stats(p); |
220 | p->ios[1], (unsigned long long)p->sectors[1]); | 221 | preempt_enable(); |
222 | return sprintf(buf, | ||
223 | "%8lu %8lu %8llu %8u " | ||
224 | "%8lu %8lu %8llu %8u " | ||
225 | "%8u %8u %8u" | ||
226 | "\n", | ||
227 | part_stat_read(p, ios[READ]), | ||
228 | part_stat_read(p, merges[READ]), | ||
229 | (unsigned long long)part_stat_read(p, sectors[READ]), | ||
230 | jiffies_to_msecs(part_stat_read(p, ticks[READ])), | ||
231 | part_stat_read(p, ios[WRITE]), | ||
232 | part_stat_read(p, merges[WRITE]), | ||
233 | (unsigned long long)part_stat_read(p, sectors[WRITE]), | ||
234 | jiffies_to_msecs(part_stat_read(p, ticks[WRITE])), | ||
235 | p->in_flight, | ||
236 | jiffies_to_msecs(part_stat_read(p, io_ticks)), | ||
237 | jiffies_to_msecs(part_stat_read(p, time_in_queue))); | ||
221 | } | 238 | } |
222 | 239 | ||
223 | #ifdef CONFIG_FAIL_MAKE_REQUEST | 240 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
@@ -273,6 +290,7 @@ static struct attribute_group *part_attr_groups[] = { | |||
273 | static void part_release(struct device *dev) | 290 | static void part_release(struct device *dev) |
274 | { | 291 | { |
275 | struct hd_struct *p = dev_to_part(dev); | 292 | struct hd_struct *p = dev_to_part(dev); |
293 | free_part_stats(p); | ||
276 | kfree(p); | 294 | kfree(p); |
277 | } | 295 | } |
278 | 296 | ||
@@ -312,13 +330,20 @@ void delete_partition(struct gendisk *disk, int part) | |||
312 | disk->part[part-1] = NULL; | 330 | disk->part[part-1] = NULL; |
313 | p->start_sect = 0; | 331 | p->start_sect = 0; |
314 | p->nr_sects = 0; | 332 | p->nr_sects = 0; |
315 | p->ios[0] = p->ios[1] = 0; | 333 | part_stat_set_all(p, 0); |
316 | p->sectors[0] = p->sectors[1] = 0; | ||
317 | kobject_put(p->holder_dir); | 334 | kobject_put(p->holder_dir); |
318 | device_del(&p->dev); | 335 | device_del(&p->dev); |
319 | put_device(&p->dev); | 336 | put_device(&p->dev); |
320 | } | 337 | } |
321 | 338 | ||
339 | static ssize_t whole_disk_show(struct device *dev, | ||
340 | struct device_attribute *attr, char *buf) | ||
341 | { | ||
342 | return 0; | ||
343 | } | ||
344 | static DEVICE_ATTR(whole_disk, S_IRUSR | S_IRGRP | S_IROTH, | ||
345 | whole_disk_show, NULL); | ||
346 | |||
322 | void add_partition(struct gendisk *disk, int part, sector_t start, sector_t len, int flags) | 347 | void add_partition(struct gendisk *disk, int part, sector_t start, sector_t len, int flags) |
323 | { | 348 | { |
324 | struct hd_struct *p; | 349 | struct hd_struct *p; |
@@ -328,6 +353,10 @@ void add_partition(struct gendisk *disk, int part, sector_t start, sector_t len, | |||
328 | if (!p) | 353 | if (!p) |
329 | return; | 354 | return; |
330 | 355 | ||
356 | if (!init_part_stats(p)) { | ||
357 | kfree(p); | ||
358 | return; | ||
359 | } | ||
331 | p->start_sect = start; | 360 | p->start_sect = start; |
332 | p->nr_sects = len; | 361 | p->nr_sects = len; |
333 | p->partno = part; | 362 | p->partno = part; |
@@ -352,13 +381,8 @@ void add_partition(struct gendisk *disk, int part, sector_t start, sector_t len, | |||
352 | device_add(&p->dev); | 381 | device_add(&p->dev); |
353 | partition_sysfs_add_subdir(p); | 382 | partition_sysfs_add_subdir(p); |
354 | p->dev.uevent_suppress = 0; | 383 | p->dev.uevent_suppress = 0; |
355 | if (flags & ADDPART_FLAG_WHOLEDISK) { | 384 | if (flags & ADDPART_FLAG_WHOLEDISK) |
356 | static struct attribute addpartattr = { | 385 | err = device_create_file(&p->dev, &dev_attr_whole_disk); |
357 | .name = "whole_disk", | ||
358 | .mode = S_IRUSR | S_IRGRP | S_IROTH, | ||
359 | }; | ||
360 | err = sysfs_create_file(&p->dev.kobj, &addpartattr); | ||
361 | } | ||
362 | 386 | ||
363 | /* suppress uevent if the disk supresses it */ | 387 | /* suppress uevent if the disk supresses it */ |
364 | if (!disk->dev.uevent_suppress) | 388 | if (!disk->dev.uevent_suppress) |
@@ -171,7 +171,7 @@ static void anon_pipe_buf_release(struct pipe_inode_info *pipe, | |||
171 | * | 171 | * |
172 | * Description: | 172 | * Description: |
173 | * This function returns a kernel virtual address mapping for the | 173 | * This function returns a kernel virtual address mapping for the |
174 | * passed in @pipe_buffer. If @atomic is set, an atomic map is provided | 174 | * pipe_buffer passed in @buf. If @atomic is set, an atomic map is provided |
175 | * and the caller has to be careful not to fault before calling | 175 | * and the caller has to be careful not to fault before calling |
176 | * the unmap function. | 176 | * the unmap function. |
177 | * | 177 | * |
@@ -208,15 +208,15 @@ void generic_pipe_buf_unmap(struct pipe_inode_info *pipe, | |||
208 | } | 208 | } |
209 | 209 | ||
210 | /** | 210 | /** |
211 | * generic_pipe_buf_steal - attempt to take ownership of a @pipe_buffer | 211 | * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer |
212 | * @pipe: the pipe that the buffer belongs to | 212 | * @pipe: the pipe that the buffer belongs to |
213 | * @buf: the buffer to attempt to steal | 213 | * @buf: the buffer to attempt to steal |
214 | * | 214 | * |
215 | * Description: | 215 | * Description: |
216 | * This function attempts to steal the @struct page attached to | 216 | * This function attempts to steal the &struct page attached to |
217 | * @buf. If successful, this function returns 0 and returns with | 217 | * @buf. If successful, this function returns 0 and returns with |
218 | * the page locked. The caller may then reuse the page for whatever | 218 | * the page locked. The caller may then reuse the page for whatever |
219 | * he wishes, the typical use is insertion into a different file | 219 | * he wishes; the typical use is insertion into a different file |
220 | * page cache. | 220 | * page cache. |
221 | */ | 221 | */ |
222 | int generic_pipe_buf_steal(struct pipe_inode_info *pipe, | 222 | int generic_pipe_buf_steal(struct pipe_inode_info *pipe, |
@@ -238,7 +238,7 @@ int generic_pipe_buf_steal(struct pipe_inode_info *pipe, | |||
238 | } | 238 | } |
239 | 239 | ||
240 | /** | 240 | /** |
241 | * generic_pipe_buf_get - get a reference to a @struct pipe_buffer | 241 | * generic_pipe_buf_get - get a reference to a &struct pipe_buffer |
242 | * @pipe: the pipe that the buffer belongs to | 242 | * @pipe: the pipe that the buffer belongs to |
243 | * @buf: the buffer to get a reference to | 243 | * @buf: the buffer to get a reference to |
244 | * | 244 | * |
@@ -576,9 +576,7 @@ bad_pipe_w(struct file *filp, const char __user *buf, size_t count, | |||
576 | return -EBADF; | 576 | return -EBADF; |
577 | } | 577 | } |
578 | 578 | ||
579 | static int | 579 | static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) |
580 | pipe_ioctl(struct inode *pino, struct file *filp, | ||
581 | unsigned int cmd, unsigned long arg) | ||
582 | { | 580 | { |
583 | struct inode *inode = filp->f_path.dentry->d_inode; | 581 | struct inode *inode = filp->f_path.dentry->d_inode; |
584 | struct pipe_inode_info *pipe; | 582 | struct pipe_inode_info *pipe; |
@@ -785,7 +783,7 @@ const struct file_operations read_fifo_fops = { | |||
785 | .aio_read = pipe_read, | 783 | .aio_read = pipe_read, |
786 | .write = bad_pipe_w, | 784 | .write = bad_pipe_w, |
787 | .poll = pipe_poll, | 785 | .poll = pipe_poll, |
788 | .ioctl = pipe_ioctl, | 786 | .unlocked_ioctl = pipe_ioctl, |
789 | .open = pipe_read_open, | 787 | .open = pipe_read_open, |
790 | .release = pipe_read_release, | 788 | .release = pipe_read_release, |
791 | .fasync = pipe_read_fasync, | 789 | .fasync = pipe_read_fasync, |
@@ -797,7 +795,7 @@ const struct file_operations write_fifo_fops = { | |||
797 | .write = do_sync_write, | 795 | .write = do_sync_write, |
798 | .aio_write = pipe_write, | 796 | .aio_write = pipe_write, |
799 | .poll = pipe_poll, | 797 | .poll = pipe_poll, |
800 | .ioctl = pipe_ioctl, | 798 | .unlocked_ioctl = pipe_ioctl, |
801 | .open = pipe_write_open, | 799 | .open = pipe_write_open, |
802 | .release = pipe_write_release, | 800 | .release = pipe_write_release, |
803 | .fasync = pipe_write_fasync, | 801 | .fasync = pipe_write_fasync, |
@@ -810,7 +808,7 @@ const struct file_operations rdwr_fifo_fops = { | |||
810 | .write = do_sync_write, | 808 | .write = do_sync_write, |
811 | .aio_write = pipe_write, | 809 | .aio_write = pipe_write, |
812 | .poll = pipe_poll, | 810 | .poll = pipe_poll, |
813 | .ioctl = pipe_ioctl, | 811 | .unlocked_ioctl = pipe_ioctl, |
814 | .open = pipe_rdwr_open, | 812 | .open = pipe_rdwr_open, |
815 | .release = pipe_rdwr_release, | 813 | .release = pipe_rdwr_release, |
816 | .fasync = pipe_rdwr_fasync, | 814 | .fasync = pipe_rdwr_fasync, |
@@ -822,7 +820,7 @@ static const struct file_operations read_pipe_fops = { | |||
822 | .aio_read = pipe_read, | 820 | .aio_read = pipe_read, |
823 | .write = bad_pipe_w, | 821 | .write = bad_pipe_w, |
824 | .poll = pipe_poll, | 822 | .poll = pipe_poll, |
825 | .ioctl = pipe_ioctl, | 823 | .unlocked_ioctl = pipe_ioctl, |
826 | .open = pipe_read_open, | 824 | .open = pipe_read_open, |
827 | .release = pipe_read_release, | 825 | .release = pipe_read_release, |
828 | .fasync = pipe_read_fasync, | 826 | .fasync = pipe_read_fasync, |
@@ -834,7 +832,7 @@ static const struct file_operations write_pipe_fops = { | |||
834 | .write = do_sync_write, | 832 | .write = do_sync_write, |
835 | .aio_write = pipe_write, | 833 | .aio_write = pipe_write, |
836 | .poll = pipe_poll, | 834 | .poll = pipe_poll, |
837 | .ioctl = pipe_ioctl, | 835 | .unlocked_ioctl = pipe_ioctl, |
838 | .open = pipe_write_open, | 836 | .open = pipe_write_open, |
839 | .release = pipe_write_release, | 837 | .release = pipe_write_release, |
840 | .fasync = pipe_write_fasync, | 838 | .fasync = pipe_write_fasync, |
@@ -847,7 +845,7 @@ static const struct file_operations rdwr_pipe_fops = { | |||
847 | .write = do_sync_write, | 845 | .write = do_sync_write, |
848 | .aio_write = pipe_write, | 846 | .aio_write = pipe_write, |
849 | .poll = pipe_poll, | 847 | .poll = pipe_poll, |
850 | .ioctl = pipe_ioctl, | 848 | .unlocked_ioctl = pipe_ioctl, |
851 | .open = pipe_rdwr_open, | 849 | .open = pipe_rdwr_open, |
852 | .release = pipe_rdwr_release, | 850 | .release = pipe_rdwr_release, |
853 | .fasync = pipe_rdwr_fasync, | 851 | .fasync = pipe_rdwr_fasync, |
diff --git a/fs/pnode.c b/fs/pnode.c index 89940f243fc2..05ba692bc540 100644 --- a/fs/pnode.c +++ b/fs/pnode.c | |||
@@ -83,6 +83,8 @@ void change_mnt_propagation(struct vfsmount *mnt, int type) | |||
83 | mnt->mnt_master = NULL; | 83 | mnt->mnt_master = NULL; |
84 | if (type == MS_UNBINDABLE) | 84 | if (type == MS_UNBINDABLE) |
85 | mnt->mnt_flags |= MNT_UNBINDABLE; | 85 | mnt->mnt_flags |= MNT_UNBINDABLE; |
86 | else | ||
87 | mnt->mnt_flags &= ~MNT_UNBINDABLE; | ||
86 | } | 88 | } |
87 | } | 89 | } |
88 | 90 | ||
diff --git a/fs/proc/array.c b/fs/proc/array.c index 6ba2746e4517..07d6c4853fe8 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c | |||
@@ -77,6 +77,7 @@ | |||
77 | #include <linux/cpuset.h> | 77 | #include <linux/cpuset.h> |
78 | #include <linux/rcupdate.h> | 78 | #include <linux/rcupdate.h> |
79 | #include <linux/delayacct.h> | 79 | #include <linux/delayacct.h> |
80 | #include <linux/seq_file.h> | ||
80 | #include <linux/pid_namespace.h> | 81 | #include <linux/pid_namespace.h> |
81 | 82 | ||
82 | #include <asm/pgtable.h> | 83 | #include <asm/pgtable.h> |
@@ -88,18 +89,21 @@ | |||
88 | do { memcpy(buffer, string, strlen(string)); \ | 89 | do { memcpy(buffer, string, strlen(string)); \ |
89 | buffer += strlen(string); } while (0) | 90 | buffer += strlen(string); } while (0) |
90 | 91 | ||
91 | static inline char *task_name(struct task_struct *p, char *buf) | 92 | static inline void task_name(struct seq_file *m, struct task_struct *p) |
92 | { | 93 | { |
93 | int i; | 94 | int i; |
95 | char *buf, *end; | ||
94 | char *name; | 96 | char *name; |
95 | char tcomm[sizeof(p->comm)]; | 97 | char tcomm[sizeof(p->comm)]; |
96 | 98 | ||
97 | get_task_comm(tcomm, p); | 99 | get_task_comm(tcomm, p); |
98 | 100 | ||
99 | ADDBUF(buf, "Name:\t"); | 101 | seq_printf(m, "Name:\t"); |
102 | end = m->buf + m->size; | ||
103 | buf = m->buf + m->count; | ||
100 | name = tcomm; | 104 | name = tcomm; |
101 | i = sizeof(tcomm); | 105 | i = sizeof(tcomm); |
102 | do { | 106 | while (i && (buf < end)) { |
103 | unsigned char c = *name; | 107 | unsigned char c = *name; |
104 | name++; | 108 | name++; |
105 | i--; | 109 | i--; |
@@ -107,20 +111,21 @@ static inline char *task_name(struct task_struct *p, char *buf) | |||
107 | if (!c) | 111 | if (!c) |
108 | break; | 112 | break; |
109 | if (c == '\\') { | 113 | if (c == '\\') { |
110 | buf[1] = c; | 114 | buf++; |
111 | buf += 2; | 115 | if (buf < end) |
116 | *buf++ = c; | ||
112 | continue; | 117 | continue; |
113 | } | 118 | } |
114 | if (c == '\n') { | 119 | if (c == '\n') { |
115 | buf[0] = '\\'; | 120 | *buf++ = '\\'; |
116 | buf[1] = 'n'; | 121 | if (buf < end) |
117 | buf += 2; | 122 | *buf++ = 'n'; |
118 | continue; | 123 | continue; |
119 | } | 124 | } |
120 | buf++; | 125 | buf++; |
121 | } while (i); | 126 | } |
122 | *buf = '\n'; | 127 | m->count = buf - m->buf; |
123 | return buf+1; | 128 | seq_printf(m, "\n"); |
124 | } | 129 | } |
125 | 130 | ||
126 | /* | 131 | /* |
@@ -151,21 +156,20 @@ static inline const char *get_task_state(struct task_struct *tsk) | |||
151 | return *p; | 156 | return *p; |
152 | } | 157 | } |
153 | 158 | ||
154 | static inline char *task_state(struct task_struct *p, char *buffer) | 159 | static inline void task_state(struct seq_file *m, struct pid_namespace *ns, |
160 | struct pid *pid, struct task_struct *p) | ||
155 | { | 161 | { |
156 | struct group_info *group_info; | 162 | struct group_info *group_info; |
157 | int g; | 163 | int g; |
158 | struct fdtable *fdt = NULL; | 164 | struct fdtable *fdt = NULL; |
159 | struct pid_namespace *ns; | ||
160 | pid_t ppid, tpid; | 165 | pid_t ppid, tpid; |
161 | 166 | ||
162 | ns = current->nsproxy->pid_ns; | ||
163 | rcu_read_lock(); | 167 | rcu_read_lock(); |
164 | ppid = pid_alive(p) ? | 168 | ppid = pid_alive(p) ? |
165 | task_tgid_nr_ns(rcu_dereference(p->real_parent), ns) : 0; | 169 | task_tgid_nr_ns(rcu_dereference(p->real_parent), ns) : 0; |
166 | tpid = pid_alive(p) && p->ptrace ? | 170 | tpid = pid_alive(p) && p->ptrace ? |
167 | task_pid_nr_ns(rcu_dereference(p->parent), ns) : 0; | 171 | task_pid_nr_ns(rcu_dereference(p->parent), ns) : 0; |
168 | buffer += sprintf(buffer, | 172 | seq_printf(m, |
169 | "State:\t%s\n" | 173 | "State:\t%s\n" |
170 | "Tgid:\t%d\n" | 174 | "Tgid:\t%d\n" |
171 | "Pid:\t%d\n" | 175 | "Pid:\t%d\n" |
@@ -175,7 +179,7 @@ static inline char *task_state(struct task_struct *p, char *buffer) | |||
175 | "Gid:\t%d\t%d\t%d\t%d\n", | 179 | "Gid:\t%d\t%d\t%d\t%d\n", |
176 | get_task_state(p), | 180 | get_task_state(p), |
177 | task_tgid_nr_ns(p, ns), | 181 | task_tgid_nr_ns(p, ns), |
178 | task_pid_nr_ns(p, ns), | 182 | pid_nr_ns(pid, ns), |
179 | ppid, tpid, | 183 | ppid, tpid, |
180 | p->uid, p->euid, p->suid, p->fsuid, | 184 | p->uid, p->euid, p->suid, p->fsuid, |
181 | p->gid, p->egid, p->sgid, p->fsgid); | 185 | p->gid, p->egid, p->sgid, p->fsgid); |
@@ -183,7 +187,7 @@ static inline char *task_state(struct task_struct *p, char *buffer) | |||
183 | task_lock(p); | 187 | task_lock(p); |
184 | if (p->files) | 188 | if (p->files) |
185 | fdt = files_fdtable(p->files); | 189 | fdt = files_fdtable(p->files); |
186 | buffer += sprintf(buffer, | 190 | seq_printf(m, |
187 | "FDSize:\t%d\n" | 191 | "FDSize:\t%d\n" |
188 | "Groups:\t", | 192 | "Groups:\t", |
189 | fdt ? fdt->max_fds : 0); | 193 | fdt ? fdt->max_fds : 0); |
@@ -194,20 +198,18 @@ static inline char *task_state(struct task_struct *p, char *buffer) | |||
194 | task_unlock(p); | 198 | task_unlock(p); |
195 | 199 | ||
196 | for (g = 0; g < min(group_info->ngroups, NGROUPS_SMALL); g++) | 200 | for (g = 0; g < min(group_info->ngroups, NGROUPS_SMALL); g++) |
197 | buffer += sprintf(buffer, "%d ", GROUP_AT(group_info, g)); | 201 | seq_printf(m, "%d ", GROUP_AT(group_info, g)); |
198 | put_group_info(group_info); | 202 | put_group_info(group_info); |
199 | 203 | ||
200 | buffer += sprintf(buffer, "\n"); | 204 | seq_printf(m, "\n"); |
201 | return buffer; | ||
202 | } | 205 | } |
203 | 206 | ||
204 | static char *render_sigset_t(const char *header, sigset_t *set, char *buffer) | 207 | static void render_sigset_t(struct seq_file *m, const char *header, |
208 | sigset_t *set) | ||
205 | { | 209 | { |
206 | int i, len; | 210 | int i; |
207 | 211 | ||
208 | len = strlen(header); | 212 | seq_printf(m, "%s", header); |
209 | memcpy(buffer, header, len); | ||
210 | buffer += len; | ||
211 | 213 | ||
212 | i = _NSIG; | 214 | i = _NSIG; |
213 | do { | 215 | do { |
@@ -218,12 +220,10 @@ static char *render_sigset_t(const char *header, sigset_t *set, char *buffer) | |||
218 | if (sigismember(set, i+2)) x |= 2; | 220 | if (sigismember(set, i+2)) x |= 2; |
219 | if (sigismember(set, i+3)) x |= 4; | 221 | if (sigismember(set, i+3)) x |= 4; |
220 | if (sigismember(set, i+4)) x |= 8; | 222 | if (sigismember(set, i+4)) x |= 8; |
221 | *buffer++ = (x < 10 ? '0' : 'a' - 10) + x; | 223 | seq_printf(m, "%x", x); |
222 | } while (i >= 4); | 224 | } while (i >= 4); |
223 | 225 | ||
224 | *buffer++ = '\n'; | 226 | seq_printf(m, "\n"); |
225 | *buffer = 0; | ||
226 | return buffer; | ||
227 | } | 227 | } |
228 | 228 | ||
229 | static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign, | 229 | static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign, |
@@ -241,7 +241,7 @@ static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign, | |||
241 | } | 241 | } |
242 | } | 242 | } |
243 | 243 | ||
244 | static inline char *task_sig(struct task_struct *p, char *buffer) | 244 | static inline void task_sig(struct seq_file *m, struct task_struct *p) |
245 | { | 245 | { |
246 | unsigned long flags; | 246 | unsigned long flags; |
247 | sigset_t pending, shpending, blocked, ignored, caught; | 247 | sigset_t pending, shpending, blocked, ignored, caught; |
@@ -268,67 +268,66 @@ static inline char *task_sig(struct task_struct *p, char *buffer) | |||
268 | } | 268 | } |
269 | rcu_read_unlock(); | 269 | rcu_read_unlock(); |
270 | 270 | ||
271 | buffer += sprintf(buffer, "Threads:\t%d\n", num_threads); | 271 | seq_printf(m, "Threads:\t%d\n", num_threads); |
272 | buffer += sprintf(buffer, "SigQ:\t%lu/%lu\n", qsize, qlim); | 272 | seq_printf(m, "SigQ:\t%lu/%lu\n", qsize, qlim); |
273 | 273 | ||
274 | /* render them all */ | 274 | /* render them all */ |
275 | buffer = render_sigset_t("SigPnd:\t", &pending, buffer); | 275 | render_sigset_t(m, "SigPnd:\t", &pending); |
276 | buffer = render_sigset_t("ShdPnd:\t", &shpending, buffer); | 276 | render_sigset_t(m, "ShdPnd:\t", &shpending); |
277 | buffer = render_sigset_t("SigBlk:\t", &blocked, buffer); | 277 | render_sigset_t(m, "SigBlk:\t", &blocked); |
278 | buffer = render_sigset_t("SigIgn:\t", &ignored, buffer); | 278 | render_sigset_t(m, "SigIgn:\t", &ignored); |
279 | buffer = render_sigset_t("SigCgt:\t", &caught, buffer); | 279 | render_sigset_t(m, "SigCgt:\t", &caught); |
280 | |||
281 | return buffer; | ||
282 | } | 280 | } |
283 | 281 | ||
284 | static char *render_cap_t(const char *header, kernel_cap_t *a, char *buffer) | 282 | static void render_cap_t(struct seq_file *m, const char *header, |
283 | kernel_cap_t *a) | ||
285 | { | 284 | { |
286 | unsigned __capi; | 285 | unsigned __capi; |
287 | 286 | ||
288 | buffer += sprintf(buffer, "%s", header); | 287 | seq_printf(m, "%s", header); |
289 | CAP_FOR_EACH_U32(__capi) { | 288 | CAP_FOR_EACH_U32(__capi) { |
290 | buffer += sprintf(buffer, "%08x", | 289 | seq_printf(m, "%08x", |
291 | a->cap[(_LINUX_CAPABILITY_U32S-1) - __capi]); | 290 | a->cap[(_LINUX_CAPABILITY_U32S-1) - __capi]); |
292 | } | 291 | } |
293 | return buffer + sprintf(buffer, "\n"); | 292 | seq_printf(m, "\n"); |
294 | } | 293 | } |
295 | 294 | ||
296 | static inline char *task_cap(struct task_struct *p, char *buffer) | 295 | static inline void task_cap(struct seq_file *m, struct task_struct *p) |
297 | { | 296 | { |
298 | buffer = render_cap_t("CapInh:\t", &p->cap_inheritable, buffer); | 297 | render_cap_t(m, "CapInh:\t", &p->cap_inheritable); |
299 | buffer = render_cap_t("CapPrm:\t", &p->cap_permitted, buffer); | 298 | render_cap_t(m, "CapPrm:\t", &p->cap_permitted); |
300 | return render_cap_t("CapEff:\t", &p->cap_effective, buffer); | 299 | render_cap_t(m, "CapEff:\t", &p->cap_effective); |
301 | } | 300 | } |
302 | 301 | ||
303 | static inline char *task_context_switch_counts(struct task_struct *p, | 302 | static inline void task_context_switch_counts(struct seq_file *m, |
304 | char *buffer) | 303 | struct task_struct *p) |
305 | { | 304 | { |
306 | return buffer + sprintf(buffer, "voluntary_ctxt_switches:\t%lu\n" | 305 | seq_printf(m, "voluntary_ctxt_switches:\t%lu\n" |
307 | "nonvoluntary_ctxt_switches:\t%lu\n", | 306 | "nonvoluntary_ctxt_switches:\t%lu\n", |
308 | p->nvcsw, | 307 | p->nvcsw, |
309 | p->nivcsw); | 308 | p->nivcsw); |
310 | } | 309 | } |
311 | 310 | ||
312 | int proc_pid_status(struct task_struct *task, char *buffer) | 311 | int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, |
312 | struct pid *pid, struct task_struct *task) | ||
313 | { | 313 | { |
314 | char *orig = buffer; | ||
315 | struct mm_struct *mm = get_task_mm(task); | 314 | struct mm_struct *mm = get_task_mm(task); |
316 | 315 | ||
317 | buffer = task_name(task, buffer); | 316 | task_name(m, task); |
318 | buffer = task_state(task, buffer); | 317 | task_state(m, ns, pid, task); |
319 | 318 | ||
320 | if (mm) { | 319 | if (mm) { |
321 | buffer = task_mem(mm, buffer); | 320 | task_mem(m, mm); |
322 | mmput(mm); | 321 | mmput(mm); |
323 | } | 322 | } |
324 | buffer = task_sig(task, buffer); | 323 | task_sig(m, task); |
325 | buffer = task_cap(task, buffer); | 324 | task_cap(m, task); |
326 | buffer = cpuset_task_status_allowed(task, buffer); | 325 | cpuset_task_status_allowed(m, task); |
327 | #if defined(CONFIG_S390) | 326 | #if defined(CONFIG_S390) |
328 | buffer = task_show_regs(task, buffer); | 327 | task_show_regs(m, task); |
329 | #endif | 328 | #endif |
330 | buffer = task_context_switch_counts(task, buffer); | 329 | task_context_switch_counts(m, task); |
331 | return buffer - orig; | 330 | return 0; |
332 | } | 331 | } |
333 | 332 | ||
334 | /* | 333 | /* |
@@ -390,14 +389,14 @@ static cputime_t task_gtime(struct task_struct *p) | |||
390 | return p->gtime; | 389 | return p->gtime; |
391 | } | 390 | } |
392 | 391 | ||
393 | static int do_task_stat(struct task_struct *task, char *buffer, int whole) | 392 | static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, |
393 | struct pid *pid, struct task_struct *task, int whole) | ||
394 | { | 394 | { |
395 | unsigned long vsize, eip, esp, wchan = ~0UL; | 395 | unsigned long vsize, eip, esp, wchan = ~0UL; |
396 | long priority, nice; | 396 | long priority, nice; |
397 | int tty_pgrp = -1, tty_nr = 0; | 397 | int tty_pgrp = -1, tty_nr = 0; |
398 | sigset_t sigign, sigcatch; | 398 | sigset_t sigign, sigcatch; |
399 | char state; | 399 | char state; |
400 | int res; | ||
401 | pid_t ppid = 0, pgid = -1, sid = -1; | 400 | pid_t ppid = 0, pgid = -1, sid = -1; |
402 | int num_threads = 0; | 401 | int num_threads = 0; |
403 | struct mm_struct *mm; | 402 | struct mm_struct *mm; |
@@ -409,9 +408,6 @@ static int do_task_stat(struct task_struct *task, char *buffer, int whole) | |||
409 | unsigned long rsslim = 0; | 408 | unsigned long rsslim = 0; |
410 | char tcomm[sizeof(task->comm)]; | 409 | char tcomm[sizeof(task->comm)]; |
411 | unsigned long flags; | 410 | unsigned long flags; |
412 | struct pid_namespace *ns; | ||
413 | |||
414 | ns = current->nsproxy->pid_ns; | ||
415 | 411 | ||
416 | state = *get_task_state(task); | 412 | state = *get_task_state(task); |
417 | vsize = eip = esp = 0; | 413 | vsize = eip = esp = 0; |
@@ -498,10 +494,10 @@ static int do_task_stat(struct task_struct *task, char *buffer, int whole) | |||
498 | /* convert nsec -> ticks */ | 494 | /* convert nsec -> ticks */ |
499 | start_time = nsec_to_clock_t(start_time); | 495 | start_time = nsec_to_clock_t(start_time); |
500 | 496 | ||
501 | res = sprintf(buffer, "%d (%s) %c %d %d %d %d %d %u %lu \ | 497 | seq_printf(m, "%d (%s) %c %d %d %d %d %d %u %lu \ |
502 | %lu %lu %lu %lu %lu %ld %ld %ld %ld %d 0 %llu %lu %ld %lu %lu %lu %lu %lu \ | 498 | %lu %lu %lu %lu %lu %ld %ld %ld %ld %d 0 %llu %lu %ld %lu %lu %lu %lu %lu \ |
503 | %lu %lu %lu %lu %lu %lu %lu %lu %d %d %u %u %llu %lu %ld\n", | 499 | %lu %lu %lu %lu %lu %lu %lu %lu %d %d %u %u %llu %lu %ld\n", |
504 | task_pid_nr_ns(task, ns), | 500 | pid_nr_ns(pid, ns), |
505 | tcomm, | 501 | tcomm, |
506 | state, | 502 | state, |
507 | ppid, | 503 | ppid, |
@@ -550,20 +546,23 @@ static int do_task_stat(struct task_struct *task, char *buffer, int whole) | |||
550 | cputime_to_clock_t(cgtime)); | 546 | cputime_to_clock_t(cgtime)); |
551 | if (mm) | 547 | if (mm) |
552 | mmput(mm); | 548 | mmput(mm); |
553 | return res; | 549 | return 0; |
554 | } | 550 | } |
555 | 551 | ||
556 | int proc_tid_stat(struct task_struct *task, char *buffer) | 552 | int proc_tid_stat(struct seq_file *m, struct pid_namespace *ns, |
553 | struct pid *pid, struct task_struct *task) | ||
557 | { | 554 | { |
558 | return do_task_stat(task, buffer, 0); | 555 | return do_task_stat(m, ns, pid, task, 0); |
559 | } | 556 | } |
560 | 557 | ||
561 | int proc_tgid_stat(struct task_struct *task, char *buffer) | 558 | int proc_tgid_stat(struct seq_file *m, struct pid_namespace *ns, |
559 | struct pid *pid, struct task_struct *task) | ||
562 | { | 560 | { |
563 | return do_task_stat(task, buffer, 1); | 561 | return do_task_stat(m, ns, pid, task, 1); |
564 | } | 562 | } |
565 | 563 | ||
566 | int proc_pid_statm(struct task_struct *task, char *buffer) | 564 | int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns, |
565 | struct pid *pid, struct task_struct *task) | ||
567 | { | 566 | { |
568 | int size = 0, resident = 0, shared = 0, text = 0, lib = 0, data = 0; | 567 | int size = 0, resident = 0, shared = 0, text = 0, lib = 0, data = 0; |
569 | struct mm_struct *mm = get_task_mm(task); | 568 | struct mm_struct *mm = get_task_mm(task); |
@@ -572,7 +571,8 @@ int proc_pid_statm(struct task_struct *task, char *buffer) | |||
572 | size = task_statm(mm, &shared, &text, &data, &resident); | 571 | size = task_statm(mm, &shared, &text, &data, &resident); |
573 | mmput(mm); | 572 | mmput(mm); |
574 | } | 573 | } |
574 | seq_printf(m, "%d %d %d %d %d %d %d\n", | ||
575 | size, resident, shared, text, lib, data, 0); | ||
575 | 576 | ||
576 | return sprintf(buffer, "%d %d %d %d %d %d %d\n", | 577 | return 0; |
577 | size, resident, shared, text, lib, data, 0); | ||
578 | } | 578 | } |
diff --git a/fs/proc/base.c b/fs/proc/base.c index c59852b38787..88f8edf18258 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -121,6 +121,10 @@ struct pid_entry { | |||
121 | NOD(NAME, (S_IFREG|(MODE)), \ | 121 | NOD(NAME, (S_IFREG|(MODE)), \ |
122 | NULL, &proc_info_file_operations, \ | 122 | NULL, &proc_info_file_operations, \ |
123 | { .proc_read = &proc_##OTYPE } ) | 123 | { .proc_read = &proc_##OTYPE } ) |
124 | #define ONE(NAME, MODE, OTYPE) \ | ||
125 | NOD(NAME, (S_IFREG|(MODE)), \ | ||
126 | NULL, &proc_single_file_operations, \ | ||
127 | { .proc_show = &proc_##OTYPE } ) | ||
124 | 128 | ||
125 | int maps_protect; | 129 | int maps_protect; |
126 | EXPORT_SYMBOL(maps_protect); | 130 | EXPORT_SYMBOL(maps_protect); |
@@ -149,7 +153,7 @@ static int get_nr_threads(struct task_struct *tsk) | |||
149 | return count; | 153 | return count; |
150 | } | 154 | } |
151 | 155 | ||
152 | static int proc_cwd_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt) | 156 | static int proc_cwd_link(struct inode *inode, struct path *path) |
153 | { | 157 | { |
154 | struct task_struct *task = get_proc_task(inode); | 158 | struct task_struct *task = get_proc_task(inode); |
155 | struct fs_struct *fs = NULL; | 159 | struct fs_struct *fs = NULL; |
@@ -161,8 +165,8 @@ static int proc_cwd_link(struct inode *inode, struct dentry **dentry, struct vfs | |||
161 | } | 165 | } |
162 | if (fs) { | 166 | if (fs) { |
163 | read_lock(&fs->lock); | 167 | read_lock(&fs->lock); |
164 | *mnt = mntget(fs->pwdmnt); | 168 | *path = fs->pwd; |
165 | *dentry = dget(fs->pwd); | 169 | path_get(&fs->pwd); |
166 | read_unlock(&fs->lock); | 170 | read_unlock(&fs->lock); |
167 | result = 0; | 171 | result = 0; |
168 | put_fs_struct(fs); | 172 | put_fs_struct(fs); |
@@ -170,7 +174,7 @@ static int proc_cwd_link(struct inode *inode, struct dentry **dentry, struct vfs | |||
170 | return result; | 174 | return result; |
171 | } | 175 | } |
172 | 176 | ||
173 | static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt) | 177 | static int proc_root_link(struct inode *inode, struct path *path) |
174 | { | 178 | { |
175 | struct task_struct *task = get_proc_task(inode); | 179 | struct task_struct *task = get_proc_task(inode); |
176 | struct fs_struct *fs = NULL; | 180 | struct fs_struct *fs = NULL; |
@@ -182,8 +186,8 @@ static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vf | |||
182 | } | 186 | } |
183 | if (fs) { | 187 | if (fs) { |
184 | read_lock(&fs->lock); | 188 | read_lock(&fs->lock); |
185 | *mnt = mntget(fs->rootmnt); | 189 | *path = fs->root; |
186 | *dentry = dget(fs->root); | 190 | path_get(&fs->root); |
187 | read_unlock(&fs->lock); | 191 | read_unlock(&fs->lock); |
188 | result = 0; | 192 | result = 0; |
189 | put_fs_struct(fs); | 193 | put_fs_struct(fs); |
@@ -502,7 +506,7 @@ static const struct inode_operations proc_def_inode_operations = { | |||
502 | .setattr = proc_setattr, | 506 | .setattr = proc_setattr, |
503 | }; | 507 | }; |
504 | 508 | ||
505 | extern struct seq_operations mounts_op; | 509 | extern const struct seq_operations mounts_op; |
506 | struct proc_mounts { | 510 | struct proc_mounts { |
507 | struct seq_file m; | 511 | struct seq_file m; |
508 | int event; | 512 | int event; |
@@ -581,7 +585,7 @@ static const struct file_operations proc_mounts_operations = { | |||
581 | .poll = mounts_poll, | 585 | .poll = mounts_poll, |
582 | }; | 586 | }; |
583 | 587 | ||
584 | extern struct seq_operations mountstats_op; | 588 | extern const struct seq_operations mountstats_op; |
585 | static int mountstats_open(struct inode *inode, struct file *file) | 589 | static int mountstats_open(struct inode *inode, struct file *file) |
586 | { | 590 | { |
587 | int ret = seq_open(file, &mountstats_op); | 591 | int ret = seq_open(file, &mountstats_op); |
@@ -658,6 +662,45 @@ static const struct file_operations proc_info_file_operations = { | |||
658 | .read = proc_info_read, | 662 | .read = proc_info_read, |
659 | }; | 663 | }; |
660 | 664 | ||
665 | static int proc_single_show(struct seq_file *m, void *v) | ||
666 | { | ||
667 | struct inode *inode = m->private; | ||
668 | struct pid_namespace *ns; | ||
669 | struct pid *pid; | ||
670 | struct task_struct *task; | ||
671 | int ret; | ||
672 | |||
673 | ns = inode->i_sb->s_fs_info; | ||
674 | pid = proc_pid(inode); | ||
675 | task = get_pid_task(pid, PIDTYPE_PID); | ||
676 | if (!task) | ||
677 | return -ESRCH; | ||
678 | |||
679 | ret = PROC_I(inode)->op.proc_show(m, ns, pid, task); | ||
680 | |||
681 | put_task_struct(task); | ||
682 | return ret; | ||
683 | } | ||
684 | |||
685 | static int proc_single_open(struct inode *inode, struct file *filp) | ||
686 | { | ||
687 | int ret; | ||
688 | ret = single_open(filp, proc_single_show, NULL); | ||
689 | if (!ret) { | ||
690 | struct seq_file *m = filp->private_data; | ||
691 | |||
692 | m->private = inode; | ||
693 | } | ||
694 | return ret; | ||
695 | } | ||
696 | |||
697 | static const struct file_operations proc_single_file_operations = { | ||
698 | .open = proc_single_open, | ||
699 | .read = seq_read, | ||
700 | .llseek = seq_lseek, | ||
701 | .release = single_release, | ||
702 | }; | ||
703 | |||
661 | static int mem_open(struct inode* inode, struct file* file) | 704 | static int mem_open(struct inode* inode, struct file* file) |
662 | { | 705 | { |
663 | file->private_data = (void*)((long)current->self_exec_id); | 706 | file->private_data = (void*)((long)current->self_exec_id); |
@@ -1121,39 +1164,36 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd) | |||
1121 | int error = -EACCES; | 1164 | int error = -EACCES; |
1122 | 1165 | ||
1123 | /* We don't need a base pointer in the /proc filesystem */ | 1166 | /* We don't need a base pointer in the /proc filesystem */ |
1124 | path_release(nd); | 1167 | path_put(&nd->path); |
1125 | 1168 | ||
1126 | /* Are we allowed to snoop on the tasks file descriptors? */ | 1169 | /* Are we allowed to snoop on the tasks file descriptors? */ |
1127 | if (!proc_fd_access_allowed(inode)) | 1170 | if (!proc_fd_access_allowed(inode)) |
1128 | goto out; | 1171 | goto out; |
1129 | 1172 | ||
1130 | error = PROC_I(inode)->op.proc_get_link(inode, &nd->dentry, &nd->mnt); | 1173 | error = PROC_I(inode)->op.proc_get_link(inode, &nd->path); |
1131 | nd->last_type = LAST_BIND; | 1174 | nd->last_type = LAST_BIND; |
1132 | out: | 1175 | out: |
1133 | return ERR_PTR(error); | 1176 | return ERR_PTR(error); |
1134 | } | 1177 | } |
1135 | 1178 | ||
1136 | static int do_proc_readlink(struct dentry *dentry, struct vfsmount *mnt, | 1179 | static int do_proc_readlink(struct path *path, char __user *buffer, int buflen) |
1137 | char __user *buffer, int buflen) | ||
1138 | { | 1180 | { |
1139 | struct inode * inode; | ||
1140 | char *tmp = (char*)__get_free_page(GFP_TEMPORARY); | 1181 | char *tmp = (char*)__get_free_page(GFP_TEMPORARY); |
1141 | char *path; | 1182 | char *pathname; |
1142 | int len; | 1183 | int len; |
1143 | 1184 | ||
1144 | if (!tmp) | 1185 | if (!tmp) |
1145 | return -ENOMEM; | 1186 | return -ENOMEM; |
1146 | 1187 | ||
1147 | inode = dentry->d_inode; | 1188 | pathname = d_path(path, tmp, PAGE_SIZE); |
1148 | path = d_path(dentry, mnt, tmp, PAGE_SIZE); | 1189 | len = PTR_ERR(pathname); |
1149 | len = PTR_ERR(path); | 1190 | if (IS_ERR(pathname)) |
1150 | if (IS_ERR(path)) | ||
1151 | goto out; | 1191 | goto out; |
1152 | len = tmp + PAGE_SIZE - 1 - path; | 1192 | len = tmp + PAGE_SIZE - 1 - pathname; |
1153 | 1193 | ||
1154 | if (len > buflen) | 1194 | if (len > buflen) |
1155 | len = buflen; | 1195 | len = buflen; |
1156 | if (copy_to_user(buffer, path, len)) | 1196 | if (copy_to_user(buffer, pathname, len)) |
1157 | len = -EFAULT; | 1197 | len = -EFAULT; |
1158 | out: | 1198 | out: |
1159 | free_page((unsigned long)tmp); | 1199 | free_page((unsigned long)tmp); |
@@ -1164,20 +1204,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b | |||
1164 | { | 1204 | { |
1165 | int error = -EACCES; | 1205 | int error = -EACCES; |
1166 | struct inode *inode = dentry->d_inode; | 1206 | struct inode *inode = dentry->d_inode; |
1167 | struct dentry *de; | 1207 | struct path path; |
1168 | struct vfsmount *mnt = NULL; | ||
1169 | 1208 | ||
1170 | /* Are we allowed to snoop on the tasks file descriptors? */ | 1209 | /* Are we allowed to snoop on the tasks file descriptors? */ |
1171 | if (!proc_fd_access_allowed(inode)) | 1210 | if (!proc_fd_access_allowed(inode)) |
1172 | goto out; | 1211 | goto out; |
1173 | 1212 | ||
1174 | error = PROC_I(inode)->op.proc_get_link(inode, &de, &mnt); | 1213 | error = PROC_I(inode)->op.proc_get_link(inode, &path); |
1175 | if (error) | 1214 | if (error) |
1176 | goto out; | 1215 | goto out; |
1177 | 1216 | ||
1178 | error = do_proc_readlink(de, mnt, buffer, buflen); | 1217 | error = do_proc_readlink(&path, buffer, buflen); |
1179 | dput(de); | 1218 | path_put(&path); |
1180 | mntput(mnt); | ||
1181 | out: | 1219 | out: |
1182 | return error; | 1220 | return error; |
1183 | } | 1221 | } |
@@ -1404,8 +1442,7 @@ out: | |||
1404 | 1442 | ||
1405 | #define PROC_FDINFO_MAX 64 | 1443 | #define PROC_FDINFO_MAX 64 |
1406 | 1444 | ||
1407 | static int proc_fd_info(struct inode *inode, struct dentry **dentry, | 1445 | static int proc_fd_info(struct inode *inode, struct path *path, char *info) |
1408 | struct vfsmount **mnt, char *info) | ||
1409 | { | 1446 | { |
1410 | struct task_struct *task = get_proc_task(inode); | 1447 | struct task_struct *task = get_proc_task(inode); |
1411 | struct files_struct *files = NULL; | 1448 | struct files_struct *files = NULL; |
@@ -1424,10 +1461,10 @@ static int proc_fd_info(struct inode *inode, struct dentry **dentry, | |||
1424 | spin_lock(&files->file_lock); | 1461 | spin_lock(&files->file_lock); |
1425 | file = fcheck_files(files, fd); | 1462 | file = fcheck_files(files, fd); |
1426 | if (file) { | 1463 | if (file) { |
1427 | if (mnt) | 1464 | if (path) { |
1428 | *mnt = mntget(file->f_path.mnt); | 1465 | *path = file->f_path; |
1429 | if (dentry) | 1466 | path_get(&file->f_path); |
1430 | *dentry = dget(file->f_path.dentry); | 1467 | } |
1431 | if (info) | 1468 | if (info) |
1432 | snprintf(info, PROC_FDINFO_MAX, | 1469 | snprintf(info, PROC_FDINFO_MAX, |
1433 | "pos:\t%lli\n" | 1470 | "pos:\t%lli\n" |
@@ -1444,10 +1481,9 @@ static int proc_fd_info(struct inode *inode, struct dentry **dentry, | |||
1444 | return -ENOENT; | 1481 | return -ENOENT; |
1445 | } | 1482 | } |
1446 | 1483 | ||
1447 | static int proc_fd_link(struct inode *inode, struct dentry **dentry, | 1484 | static int proc_fd_link(struct inode *inode, struct path *path) |
1448 | struct vfsmount **mnt) | ||
1449 | { | 1485 | { |
1450 | return proc_fd_info(inode, dentry, mnt, NULL); | 1486 | return proc_fd_info(inode, path, NULL); |
1451 | } | 1487 | } |
1452 | 1488 | ||
1453 | static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd) | 1489 | static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd) |
@@ -1641,7 +1677,7 @@ static ssize_t proc_fdinfo_read(struct file *file, char __user *buf, | |||
1641 | size_t len, loff_t *ppos) | 1677 | size_t len, loff_t *ppos) |
1642 | { | 1678 | { |
1643 | char tmp[PROC_FDINFO_MAX]; | 1679 | char tmp[PROC_FDINFO_MAX]; |
1644 | int err = proc_fd_info(file->f_path.dentry->d_inode, NULL, NULL, tmp); | 1680 | int err = proc_fd_info(file->f_path.dentry->d_inode, NULL, tmp); |
1645 | if (!err) | 1681 | if (!err) |
1646 | err = simple_read_from_buffer(buf, len, ppos, tmp, strlen(tmp)); | 1682 | err = simple_read_from_buffer(buf, len, ppos, tmp, strlen(tmp)); |
1647 | return err; | 1683 | return err; |
@@ -2058,15 +2094,23 @@ static const struct file_operations proc_coredump_filter_operations = { | |||
2058 | static int proc_self_readlink(struct dentry *dentry, char __user *buffer, | 2094 | static int proc_self_readlink(struct dentry *dentry, char __user *buffer, |
2059 | int buflen) | 2095 | int buflen) |
2060 | { | 2096 | { |
2097 | struct pid_namespace *ns = dentry->d_sb->s_fs_info; | ||
2098 | pid_t tgid = task_tgid_nr_ns(current, ns); | ||
2061 | char tmp[PROC_NUMBUF]; | 2099 | char tmp[PROC_NUMBUF]; |
2062 | sprintf(tmp, "%d", task_tgid_vnr(current)); | 2100 | if (!tgid) |
2101 | return -ENOENT; | ||
2102 | sprintf(tmp, "%d", tgid); | ||
2063 | return vfs_readlink(dentry,buffer,buflen,tmp); | 2103 | return vfs_readlink(dentry,buffer,buflen,tmp); |
2064 | } | 2104 | } |
2065 | 2105 | ||
2066 | static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd) | 2106 | static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd) |
2067 | { | 2107 | { |
2108 | struct pid_namespace *ns = dentry->d_sb->s_fs_info; | ||
2109 | pid_t tgid = task_tgid_nr_ns(current, ns); | ||
2068 | char tmp[PROC_NUMBUF]; | 2110 | char tmp[PROC_NUMBUF]; |
2069 | sprintf(tmp, "%d", task_tgid_vnr(current)); | 2111 | if (!tgid) |
2112 | return ERR_PTR(-ENOENT); | ||
2113 | sprintf(tmp, "%d", task_tgid_nr_ns(current, ns)); | ||
2070 | return ERR_PTR(vfs_follow_link(nd,tmp)); | 2114 | return ERR_PTR(vfs_follow_link(nd,tmp)); |
2071 | } | 2115 | } |
2072 | 2116 | ||
@@ -2231,14 +2275,14 @@ static const struct pid_entry tgid_base_stuff[] = { | |||
2231 | DIR("fdinfo", S_IRUSR|S_IXUSR, fdinfo), | 2275 | DIR("fdinfo", S_IRUSR|S_IXUSR, fdinfo), |
2232 | REG("environ", S_IRUSR, environ), | 2276 | REG("environ", S_IRUSR, environ), |
2233 | INF("auxv", S_IRUSR, pid_auxv), | 2277 | INF("auxv", S_IRUSR, pid_auxv), |
2234 | INF("status", S_IRUGO, pid_status), | 2278 | ONE("status", S_IRUGO, pid_status), |
2235 | INF("limits", S_IRUSR, pid_limits), | 2279 | INF("limits", S_IRUSR, pid_limits), |
2236 | #ifdef CONFIG_SCHED_DEBUG | 2280 | #ifdef CONFIG_SCHED_DEBUG |
2237 | REG("sched", S_IRUGO|S_IWUSR, pid_sched), | 2281 | REG("sched", S_IRUGO|S_IWUSR, pid_sched), |
2238 | #endif | 2282 | #endif |
2239 | INF("cmdline", S_IRUGO, pid_cmdline), | 2283 | INF("cmdline", S_IRUGO, pid_cmdline), |
2240 | INF("stat", S_IRUGO, tgid_stat), | 2284 | ONE("stat", S_IRUGO, tgid_stat), |
2241 | INF("statm", S_IRUGO, pid_statm), | 2285 | ONE("statm", S_IRUGO, pid_statm), |
2242 | REG("maps", S_IRUGO, maps), | 2286 | REG("maps", S_IRUGO, maps), |
2243 | #ifdef CONFIG_NUMA | 2287 | #ifdef CONFIG_NUMA |
2244 | REG("numa_maps", S_IRUGO, numa_maps), | 2288 | REG("numa_maps", S_IRUGO, numa_maps), |
@@ -2562,14 +2606,14 @@ static const struct pid_entry tid_base_stuff[] = { | |||
2562 | DIR("fdinfo", S_IRUSR|S_IXUSR, fdinfo), | 2606 | DIR("fdinfo", S_IRUSR|S_IXUSR, fdinfo), |
2563 | REG("environ", S_IRUSR, environ), | 2607 | REG("environ", S_IRUSR, environ), |
2564 | INF("auxv", S_IRUSR, pid_auxv), | 2608 | INF("auxv", S_IRUSR, pid_auxv), |
2565 | INF("status", S_IRUGO, pid_status), | 2609 | ONE("status", S_IRUGO, pid_status), |
2566 | INF("limits", S_IRUSR, pid_limits), | 2610 | INF("limits", S_IRUSR, pid_limits), |
2567 | #ifdef CONFIG_SCHED_DEBUG | 2611 | #ifdef CONFIG_SCHED_DEBUG |
2568 | REG("sched", S_IRUGO|S_IWUSR, pid_sched), | 2612 | REG("sched", S_IRUGO|S_IWUSR, pid_sched), |
2569 | #endif | 2613 | #endif |
2570 | INF("cmdline", S_IRUGO, pid_cmdline), | 2614 | INF("cmdline", S_IRUGO, pid_cmdline), |
2571 | INF("stat", S_IRUGO, tid_stat), | 2615 | ONE("stat", S_IRUGO, tid_stat), |
2572 | INF("statm", S_IRUGO, pid_statm), | 2616 | ONE("statm", S_IRUGO, pid_statm), |
2573 | REG("maps", S_IRUGO, maps), | 2617 | REG("maps", S_IRUGO, maps), |
2574 | #ifdef CONFIG_NUMA | 2618 | #ifdef CONFIG_NUMA |
2575 | REG("numa_maps", S_IRUGO, numa_maps), | 2619 | REG("numa_maps", S_IRUGO, numa_maps), |
diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 6a2fe5187b62..68971e66cd41 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c | |||
@@ -25,12 +25,6 @@ | |||
25 | 25 | ||
26 | #include "internal.h" | 26 | #include "internal.h" |
27 | 27 | ||
28 | static ssize_t proc_file_read(struct file *file, char __user *buf, | ||
29 | size_t nbytes, loff_t *ppos); | ||
30 | static ssize_t proc_file_write(struct file *file, const char __user *buffer, | ||
31 | size_t count, loff_t *ppos); | ||
32 | static loff_t proc_file_lseek(struct file *, loff_t, int); | ||
33 | |||
34 | DEFINE_SPINLOCK(proc_subdir_lock); | 28 | DEFINE_SPINLOCK(proc_subdir_lock); |
35 | 29 | ||
36 | static int proc_match(int len, const char *name, struct proc_dir_entry *de) | 30 | static int proc_match(int len, const char *name, struct proc_dir_entry *de) |
@@ -40,12 +34,6 @@ static int proc_match(int len, const char *name, struct proc_dir_entry *de) | |||
40 | return !memcmp(name, de->name, len); | 34 | return !memcmp(name, de->name, len); |
41 | } | 35 | } |
42 | 36 | ||
43 | static const struct file_operations proc_file_operations = { | ||
44 | .llseek = proc_file_lseek, | ||
45 | .read = proc_file_read, | ||
46 | .write = proc_file_write, | ||
47 | }; | ||
48 | |||
49 | /* buffer size is one page but our output routines use some slack for overruns */ | 37 | /* buffer size is one page but our output routines use some slack for overruns */ |
50 | #define PROC_BLOCK_SIZE (PAGE_SIZE - 1024) | 38 | #define PROC_BLOCK_SIZE (PAGE_SIZE - 1024) |
51 | 39 | ||
@@ -233,6 +221,12 @@ proc_file_lseek(struct file *file, loff_t offset, int orig) | |||
233 | return retval; | 221 | return retval; |
234 | } | 222 | } |
235 | 223 | ||
224 | static const struct file_operations proc_file_operations = { | ||
225 | .llseek = proc_file_lseek, | ||
226 | .read = proc_file_read, | ||
227 | .write = proc_file_write, | ||
228 | }; | ||
229 | |||
236 | static int proc_notify_change(struct dentry *dentry, struct iattr *iattr) | 230 | static int proc_notify_change(struct dentry *dentry, struct iattr *iattr) |
237 | { | 231 | { |
238 | struct inode *inode = dentry->d_inode; | 232 | struct inode *inode = dentry->d_inode; |
@@ -406,12 +400,12 @@ struct dentry *proc_lookup(struct inode * dir, struct dentry *dentry, struct nam | |||
406 | spin_unlock(&proc_subdir_lock); | 400 | spin_unlock(&proc_subdir_lock); |
407 | error = -EINVAL; | 401 | error = -EINVAL; |
408 | inode = proc_get_inode(dir->i_sb, ino, de); | 402 | inode = proc_get_inode(dir->i_sb, ino, de); |
409 | spin_lock(&proc_subdir_lock); | 403 | goto out_unlock; |
410 | break; | ||
411 | } | 404 | } |
412 | } | 405 | } |
413 | } | 406 | } |
414 | spin_unlock(&proc_subdir_lock); | 407 | spin_unlock(&proc_subdir_lock); |
408 | out_unlock: | ||
415 | unlock_kernel(); | 409 | unlock_kernel(); |
416 | 410 | ||
417 | if (inode) { | 411 | if (inode) { |
@@ -527,6 +521,7 @@ static const struct inode_operations proc_dir_inode_operations = { | |||
527 | static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp) | 521 | static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp) |
528 | { | 522 | { |
529 | unsigned int i; | 523 | unsigned int i; |
524 | struct proc_dir_entry *tmp; | ||
530 | 525 | ||
531 | i = get_inode_number(); | 526 | i = get_inode_number(); |
532 | if (i == 0) | 527 | if (i == 0) |
@@ -550,6 +545,15 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp | |||
550 | } | 545 | } |
551 | 546 | ||
552 | spin_lock(&proc_subdir_lock); | 547 | spin_lock(&proc_subdir_lock); |
548 | |||
549 | for (tmp = dir->subdir; tmp; tmp = tmp->next) | ||
550 | if (strcmp(tmp->name, dp->name) == 0) { | ||
551 | printk(KERN_WARNING "proc_dir_entry '%s' already " | ||
552 | "registered\n", dp->name); | ||
553 | dump_stack(); | ||
554 | break; | ||
555 | } | ||
556 | |||
553 | dp->next = dir->subdir; | 557 | dp->next = dir->subdir; |
554 | dp->parent = dir; | 558 | dp->parent = dir; |
555 | dir->subdir = dp; | 559 | dir->subdir = dp; |
@@ -558,7 +562,7 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp | |||
558 | return 0; | 562 | return 0; |
559 | } | 563 | } |
560 | 564 | ||
561 | static struct proc_dir_entry *proc_create(struct proc_dir_entry **parent, | 565 | static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent, |
562 | const char *name, | 566 | const char *name, |
563 | mode_t mode, | 567 | mode_t mode, |
564 | nlink_t nlink) | 568 | nlink_t nlink) |
@@ -601,7 +605,7 @@ struct proc_dir_entry *proc_symlink(const char *name, | |||
601 | { | 605 | { |
602 | struct proc_dir_entry *ent; | 606 | struct proc_dir_entry *ent; |
603 | 607 | ||
604 | ent = proc_create(&parent,name, | 608 | ent = __proc_create(&parent, name, |
605 | (S_IFLNK | S_IRUGO | S_IWUGO | S_IXUGO),1); | 609 | (S_IFLNK | S_IRUGO | S_IWUGO | S_IXUGO),1); |
606 | 610 | ||
607 | if (ent) { | 611 | if (ent) { |
@@ -626,7 +630,7 @@ struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode, | |||
626 | { | 630 | { |
627 | struct proc_dir_entry *ent; | 631 | struct proc_dir_entry *ent; |
628 | 632 | ||
629 | ent = proc_create(&parent, name, S_IFDIR | mode, 2); | 633 | ent = __proc_create(&parent, name, S_IFDIR | mode, 2); |
630 | if (ent) { | 634 | if (ent) { |
631 | if (proc_register(parent, ent) < 0) { | 635 | if (proc_register(parent, ent) < 0) { |
632 | kfree(ent); | 636 | kfree(ent); |
@@ -660,7 +664,7 @@ struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode, | |||
660 | nlink = 1; | 664 | nlink = 1; |
661 | } | 665 | } |
662 | 666 | ||
663 | ent = proc_create(&parent,name,mode,nlink); | 667 | ent = __proc_create(&parent, name, mode, nlink); |
664 | if (ent) { | 668 | if (ent) { |
665 | if (proc_register(parent, ent) < 0) { | 669 | if (proc_register(parent, ent) < 0) { |
666 | kfree(ent); | 670 | kfree(ent); |
@@ -670,6 +674,38 @@ struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode, | |||
670 | return ent; | 674 | return ent; |
671 | } | 675 | } |
672 | 676 | ||
677 | struct proc_dir_entry *proc_create(const char *name, mode_t mode, | ||
678 | struct proc_dir_entry *parent, | ||
679 | const struct file_operations *proc_fops) | ||
680 | { | ||
681 | struct proc_dir_entry *pde; | ||
682 | nlink_t nlink; | ||
683 | |||
684 | if (S_ISDIR(mode)) { | ||
685 | if ((mode & S_IALLUGO) == 0) | ||
686 | mode |= S_IRUGO | S_IXUGO; | ||
687 | nlink = 2; | ||
688 | } else { | ||
689 | if ((mode & S_IFMT) == 0) | ||
690 | mode |= S_IFREG; | ||
691 | if ((mode & S_IALLUGO) == 0) | ||
692 | mode |= S_IRUGO; | ||
693 | nlink = 1; | ||
694 | } | ||
695 | |||
696 | pde = __proc_create(&parent, name, mode, nlink); | ||
697 | if (!pde) | ||
698 | goto out; | ||
699 | pde->proc_fops = proc_fops; | ||
700 | if (proc_register(parent, pde) < 0) | ||
701 | goto out_free; | ||
702 | return pde; | ||
703 | out_free: | ||
704 | kfree(pde); | ||
705 | out: | ||
706 | return NULL; | ||
707 | } | ||
708 | |||
673 | void free_proc_entry(struct proc_dir_entry *de) | 709 | void free_proc_entry(struct proc_dir_entry *de) |
674 | { | 710 | { |
675 | unsigned int ino = de->low_ino; | 711 | unsigned int ino = de->low_ino; |
@@ -679,7 +715,7 @@ void free_proc_entry(struct proc_dir_entry *de) | |||
679 | 715 | ||
680 | release_inode_number(ino); | 716 | release_inode_number(ino); |
681 | 717 | ||
682 | if (S_ISLNK(de->mode) && de->data) | 718 | if (S_ISLNK(de->mode)) |
683 | kfree(de->data); | 719 | kfree(de->data); |
684 | kfree(de); | 720 | kfree(de); |
685 | } | 721 | } |
diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 1a551d92e1d8..82b3a1b5a70b 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c | |||
@@ -73,11 +73,6 @@ static void proc_delete_inode(struct inode *inode) | |||
73 | 73 | ||
74 | struct vfsmount *proc_mnt; | 74 | struct vfsmount *proc_mnt; |
75 | 75 | ||
76 | static void proc_read_inode(struct inode * inode) | ||
77 | { | ||
78 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; | ||
79 | } | ||
80 | |||
81 | static struct kmem_cache * proc_inode_cachep; | 76 | static struct kmem_cache * proc_inode_cachep; |
82 | 77 | ||
83 | static struct inode *proc_alloc_inode(struct super_block *sb) | 78 | static struct inode *proc_alloc_inode(struct super_block *sb) |
@@ -128,7 +123,6 @@ static int proc_remount(struct super_block *sb, int *flags, char *data) | |||
128 | static const struct super_operations proc_sops = { | 123 | static const struct super_operations proc_sops = { |
129 | .alloc_inode = proc_alloc_inode, | 124 | .alloc_inode = proc_alloc_inode, |
130 | .destroy_inode = proc_destroy_inode, | 125 | .destroy_inode = proc_destroy_inode, |
131 | .read_inode = proc_read_inode, | ||
132 | .drop_inode = generic_delete_inode, | 126 | .drop_inode = generic_delete_inode, |
133 | .delete_inode = proc_delete_inode, | 127 | .delete_inode = proc_delete_inode, |
134 | .statfs = simple_statfs, | 128 | .statfs = simple_statfs, |
@@ -401,39 +395,41 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino, | |||
401 | if (de != NULL && !try_module_get(de->owner)) | 395 | if (de != NULL && !try_module_get(de->owner)) |
402 | goto out_mod; | 396 | goto out_mod; |
403 | 397 | ||
404 | inode = iget(sb, ino); | 398 | inode = iget_locked(sb, ino); |
405 | if (!inode) | 399 | if (!inode) |
406 | goto out_ino; | 400 | goto out_ino; |
407 | 401 | if (inode->i_state & I_NEW) { | |
408 | PROC_I(inode)->fd = 0; | 402 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; |
409 | PROC_I(inode)->pde = de; | 403 | PROC_I(inode)->fd = 0; |
410 | if (de) { | 404 | PROC_I(inode)->pde = de; |
411 | if (de->mode) { | 405 | if (de) { |
412 | inode->i_mode = de->mode; | 406 | if (de->mode) { |
413 | inode->i_uid = de->uid; | 407 | inode->i_mode = de->mode; |
414 | inode->i_gid = de->gid; | 408 | inode->i_uid = de->uid; |
415 | } | 409 | inode->i_gid = de->gid; |
416 | if (de->size) | 410 | } |
417 | inode->i_size = de->size; | 411 | if (de->size) |
418 | if (de->nlink) | 412 | inode->i_size = de->size; |
419 | inode->i_nlink = de->nlink; | 413 | if (de->nlink) |
420 | if (de->proc_iops) | 414 | inode->i_nlink = de->nlink; |
421 | inode->i_op = de->proc_iops; | 415 | if (de->proc_iops) |
422 | if (de->proc_fops) { | 416 | inode->i_op = de->proc_iops; |
423 | if (S_ISREG(inode->i_mode)) { | 417 | if (de->proc_fops) { |
418 | if (S_ISREG(inode->i_mode)) { | ||
424 | #ifdef CONFIG_COMPAT | 419 | #ifdef CONFIG_COMPAT |
425 | if (!de->proc_fops->compat_ioctl) | 420 | if (!de->proc_fops->compat_ioctl) |
426 | inode->i_fop = | 421 | inode->i_fop = |
427 | &proc_reg_file_ops_no_compat; | 422 | &proc_reg_file_ops_no_compat; |
428 | else | 423 | else |
429 | #endif | 424 | #endif |
430 | inode->i_fop = &proc_reg_file_ops; | 425 | inode->i_fop = &proc_reg_file_ops; |
426 | } else { | ||
427 | inode->i_fop = de->proc_fops; | ||
428 | } | ||
431 | } | 429 | } |
432 | else | ||
433 | inode->i_fop = de->proc_fops; | ||
434 | } | 430 | } |
431 | unlock_new_inode(inode); | ||
435 | } | 432 | } |
436 | |||
437 | return inode; | 433 | return inode; |
438 | 434 | ||
439 | out_ino: | 435 | out_ino: |
@@ -471,4 +467,3 @@ out_no_root: | |||
471 | de_put(&proc_root); | 467 | de_put(&proc_root); |
472 | return -ENOMEM; | 468 | return -ENOMEM; |
473 | } | 469 | } |
474 | MODULE_LICENSE("GPL"); | ||
diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 7d57e8069924..1c81c8f1aeed 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h | |||
@@ -46,12 +46,17 @@ extern int nommu_vma_show(struct seq_file *, struct vm_area_struct *); | |||
46 | 46 | ||
47 | extern int maps_protect; | 47 | extern int maps_protect; |
48 | 48 | ||
49 | extern void create_seq_entry(char *name, mode_t mode, const struct file_operations *f); | 49 | extern void create_seq_entry(char *name, mode_t mode, |
50 | extern int proc_exe_link(struct inode *, struct dentry **, struct vfsmount **); | 50 | const struct file_operations *f); |
51 | extern int proc_tid_stat(struct task_struct *, char *); | 51 | extern int proc_exe_link(struct inode *, struct path *); |
52 | extern int proc_tgid_stat(struct task_struct *, char *); | 52 | extern int proc_tid_stat(struct seq_file *m, struct pid_namespace *ns, |
53 | extern int proc_pid_status(struct task_struct *, char *); | 53 | struct pid *pid, struct task_struct *task); |
54 | extern int proc_pid_statm(struct task_struct *, char *); | 54 | extern int proc_tgid_stat(struct seq_file *m, struct pid_namespace *ns, |
55 | struct pid *pid, struct task_struct *task); | ||
56 | extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, | ||
57 | struct pid *pid, struct task_struct *task); | ||
58 | extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns, | ||
59 | struct pid *pid, struct task_struct *task); | ||
55 | extern loff_t mem_lseek(struct file *file, loff_t offset, int orig); | 60 | extern loff_t mem_lseek(struct file *file, loff_t offset, int orig); |
56 | 61 | ||
57 | extern const struct file_operations proc_maps_operations; | 62 | extern const struct file_operations proc_maps_operations; |
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index 7dd26e18cbfd..e78c81fcf547 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
13 | #include <linux/proc_fs.h> | 13 | #include <linux/proc_fs.h> |
14 | #include <linux/user.h> | 14 | #include <linux/user.h> |
15 | #include <linux/a.out.h> | ||
16 | #include <linux/capability.h> | 15 | #include <linux/capability.h> |
17 | #include <linux/elf.h> | 16 | #include <linux/elf.h> |
18 | #include <linux/elfcore.h> | 17 | #include <linux/elfcore.h> |
diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c index 22f789de3909..941e95114b5a 100644 --- a/fs/proc/nommu.c +++ b/fs/proc/nommu.c | |||
@@ -67,7 +67,7 @@ int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma) | |||
67 | if (len < 1) | 67 | if (len < 1) |
68 | len = 1; | 68 | len = 1; |
69 | seq_printf(m, "%*c", len, ' '); | 69 | seq_printf(m, "%*c", len, ' '); |
70 | seq_path(m, file->f_path.mnt, file->f_path.dentry, ""); | 70 | seq_path(m, &file->f_path, ""); |
71 | } | 71 | } |
72 | 72 | ||
73 | seq_putc(m, '\n'); | 73 | seq_putc(m, '\n'); |
@@ -116,7 +116,7 @@ static void *nommu_vma_list_next(struct seq_file *m, void *v, loff_t *pos) | |||
116 | return rb_next((struct rb_node *) v); | 116 | return rb_next((struct rb_node *) v); |
117 | } | 117 | } |
118 | 118 | ||
119 | static struct seq_operations proc_nommu_vma_list_seqop = { | 119 | static const struct seq_operations proc_nommu_vma_list_seqop = { |
120 | .start = nommu_vma_list_start, | 120 | .start = nommu_vma_list_start, |
121 | .next = nommu_vma_list_next, | 121 | .next = nommu_vma_list_next, |
122 | .stop = nommu_vma_list_stop, | 122 | .stop = nommu_vma_list_stop, |
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index 51288db37a0c..468805d40e2b 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/mm.h> | 29 | #include <linux/mm.h> |
30 | #include <linux/mmzone.h> | 30 | #include <linux/mmzone.h> |
31 | #include <linux/pagemap.h> | 31 | #include <linux/pagemap.h> |
32 | #include <linux/interrupt.h> | ||
32 | #include <linux/swap.h> | 33 | #include <linux/swap.h> |
33 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
34 | #include <linux/smp.h> | 35 | #include <linux/smp.h> |
@@ -64,7 +65,6 @@ | |||
64 | */ | 65 | */ |
65 | extern int get_hardware_list(char *); | 66 | extern int get_hardware_list(char *); |
66 | extern int get_stram_list(char *); | 67 | extern int get_stram_list(char *); |
67 | extern int get_filesystem_list(char *); | ||
68 | extern int get_exec_domain_list(char *); | 68 | extern int get_exec_domain_list(char *); |
69 | extern int get_dma_list(char *); | 69 | extern int get_dma_list(char *); |
70 | 70 | ||
@@ -84,10 +84,15 @@ static int loadavg_read_proc(char *page, char **start, off_t off, | |||
84 | { | 84 | { |
85 | int a, b, c; | 85 | int a, b, c; |
86 | int len; | 86 | int len; |
87 | unsigned long seq; | ||
88 | |||
89 | do { | ||
90 | seq = read_seqbegin(&xtime_lock); | ||
91 | a = avenrun[0] + (FIXED_1/200); | ||
92 | b = avenrun[1] + (FIXED_1/200); | ||
93 | c = avenrun[2] + (FIXED_1/200); | ||
94 | } while (read_seqretry(&xtime_lock, seq)); | ||
87 | 95 | ||
88 | a = avenrun[0] + (FIXED_1/200); | ||
89 | b = avenrun[1] + (FIXED_1/200); | ||
90 | c = avenrun[2] + (FIXED_1/200); | ||
91 | len = sprintf(page,"%d.%02d %d.%02d %d.%02d %ld/%d %d\n", | 96 | len = sprintf(page,"%d.%02d %d.%02d %d.%02d %ld/%d %d\n", |
92 | LOAD_INT(a), LOAD_FRAC(a), | 97 | LOAD_INT(a), LOAD_FRAC(a), |
93 | LOAD_INT(b), LOAD_FRAC(b), | 98 | LOAD_INT(b), LOAD_FRAC(b), |
@@ -217,7 +222,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off, | |||
217 | #undef K | 222 | #undef K |
218 | } | 223 | } |
219 | 224 | ||
220 | extern struct seq_operations fragmentation_op; | 225 | extern const struct seq_operations fragmentation_op; |
221 | static int fragmentation_open(struct inode *inode, struct file *file) | 226 | static int fragmentation_open(struct inode *inode, struct file *file) |
222 | { | 227 | { |
223 | (void)inode; | 228 | (void)inode; |
@@ -231,7 +236,7 @@ static const struct file_operations fragmentation_file_operations = { | |||
231 | .release = seq_release, | 236 | .release = seq_release, |
232 | }; | 237 | }; |
233 | 238 | ||
234 | extern struct seq_operations pagetypeinfo_op; | 239 | extern const struct seq_operations pagetypeinfo_op; |
235 | static int pagetypeinfo_open(struct inode *inode, struct file *file) | 240 | static int pagetypeinfo_open(struct inode *inode, struct file *file) |
236 | { | 241 | { |
237 | return seq_open(file, &pagetypeinfo_op); | 242 | return seq_open(file, &pagetypeinfo_op); |
@@ -244,7 +249,7 @@ static const struct file_operations pagetypeinfo_file_ops = { | |||
244 | .release = seq_release, | 249 | .release = seq_release, |
245 | }; | 250 | }; |
246 | 251 | ||
247 | extern struct seq_operations zoneinfo_op; | 252 | extern const struct seq_operations zoneinfo_op; |
248 | static int zoneinfo_open(struct inode *inode, struct file *file) | 253 | static int zoneinfo_open(struct inode *inode, struct file *file) |
249 | { | 254 | { |
250 | return seq_open(file, &zoneinfo_op); | 255 | return seq_open(file, &zoneinfo_op); |
@@ -269,7 +274,7 @@ static int version_read_proc(char *page, char **start, off_t off, | |||
269 | return proc_calc_metrics(page, start, off, count, eof, len); | 274 | return proc_calc_metrics(page, start, off, count, eof, len); |
270 | } | 275 | } |
271 | 276 | ||
272 | extern struct seq_operations cpuinfo_op; | 277 | extern const struct seq_operations cpuinfo_op; |
273 | static int cpuinfo_open(struct inode *inode, struct file *file) | 278 | static int cpuinfo_open(struct inode *inode, struct file *file) |
274 | { | 279 | { |
275 | return seq_open(file, &cpuinfo_op); | 280 | return seq_open(file, &cpuinfo_op); |
@@ -322,7 +327,7 @@ static void devinfo_stop(struct seq_file *f, void *v) | |||
322 | /* Nothing to do */ | 327 | /* Nothing to do */ |
323 | } | 328 | } |
324 | 329 | ||
325 | static struct seq_operations devinfo_ops = { | 330 | static const struct seq_operations devinfo_ops = { |
326 | .start = devinfo_start, | 331 | .start = devinfo_start, |
327 | .next = devinfo_next, | 332 | .next = devinfo_next, |
328 | .stop = devinfo_stop, | 333 | .stop = devinfo_stop, |
@@ -341,7 +346,7 @@ static const struct file_operations proc_devinfo_operations = { | |||
341 | .release = seq_release, | 346 | .release = seq_release, |
342 | }; | 347 | }; |
343 | 348 | ||
344 | extern struct seq_operations vmstat_op; | 349 | extern const struct seq_operations vmstat_op; |
345 | static int vmstat_open(struct inode *inode, struct file *file) | 350 | static int vmstat_open(struct inode *inode, struct file *file) |
346 | { | 351 | { |
347 | return seq_open(file, &vmstat_op); | 352 | return seq_open(file, &vmstat_op); |
@@ -372,7 +377,7 @@ static int stram_read_proc(char *page, char **start, off_t off, | |||
372 | #endif | 377 | #endif |
373 | 378 | ||
374 | #ifdef CONFIG_BLOCK | 379 | #ifdef CONFIG_BLOCK |
375 | extern struct seq_operations partitions_op; | 380 | extern const struct seq_operations partitions_op; |
376 | static int partitions_open(struct inode *inode, struct file *file) | 381 | static int partitions_open(struct inode *inode, struct file *file) |
377 | { | 382 | { |
378 | return seq_open(file, &partitions_op); | 383 | return seq_open(file, &partitions_op); |
@@ -384,7 +389,7 @@ static const struct file_operations proc_partitions_operations = { | |||
384 | .release = seq_release, | 389 | .release = seq_release, |
385 | }; | 390 | }; |
386 | 391 | ||
387 | extern struct seq_operations diskstats_op; | 392 | extern const struct seq_operations diskstats_op; |
388 | static int diskstats_open(struct inode *inode, struct file *file) | 393 | static int diskstats_open(struct inode *inode, struct file *file) |
389 | { | 394 | { |
390 | return seq_open(file, &diskstats_op); | 395 | return seq_open(file, &diskstats_op); |
@@ -398,7 +403,7 @@ static const struct file_operations proc_diskstats_operations = { | |||
398 | #endif | 403 | #endif |
399 | 404 | ||
400 | #ifdef CONFIG_MODULES | 405 | #ifdef CONFIG_MODULES |
401 | extern struct seq_operations modules_op; | 406 | extern const struct seq_operations modules_op; |
402 | static int modules_open(struct inode *inode, struct file *file) | 407 | static int modules_open(struct inode *inode, struct file *file) |
403 | { | 408 | { |
404 | return seq_open(file, &modules_op); | 409 | return seq_open(file, &modules_op); |
@@ -425,7 +430,7 @@ static const struct file_operations proc_slabinfo_operations = { | |||
425 | }; | 430 | }; |
426 | 431 | ||
427 | #ifdef CONFIG_DEBUG_SLAB_LEAK | 432 | #ifdef CONFIG_DEBUG_SLAB_LEAK |
428 | extern struct seq_operations slabstats_op; | 433 | extern const struct seq_operations slabstats_op; |
429 | static int slabstats_open(struct inode *inode, struct file *file) | 434 | static int slabstats_open(struct inode *inode, struct file *file) |
430 | { | 435 | { |
431 | unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL); | 436 | unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL); |
@@ -599,8 +604,7 @@ static void int_seq_stop(struct seq_file *f, void *v) | |||
599 | } | 604 | } |
600 | 605 | ||
601 | 606 | ||
602 | extern int show_interrupts(struct seq_file *f, void *v); /* In arch code */ | 607 | static const struct seq_operations int_seq_ops = { |
603 | static struct seq_operations int_seq_ops = { | ||
604 | .start = int_seq_start, | 608 | .start = int_seq_start, |
605 | .next = int_seq_next, | 609 | .next = int_seq_next, |
606 | .stop = int_seq_stop, | 610 | .stop = int_seq_stop, |
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c index 4823c9677fac..14e9b5aaf863 100644 --- a/fs/proc/proc_net.c +++ b/fs/proc/proc_net.c | |||
@@ -67,12 +67,7 @@ EXPORT_SYMBOL_GPL(seq_release_net); | |||
67 | struct proc_dir_entry *proc_net_fops_create(struct net *net, | 67 | struct proc_dir_entry *proc_net_fops_create(struct net *net, |
68 | const char *name, mode_t mode, const struct file_operations *fops) | 68 | const char *name, mode_t mode, const struct file_operations *fops) |
69 | { | 69 | { |
70 | struct proc_dir_entry *res; | 70 | return proc_create(name, mode, net->proc_net, fops); |
71 | |||
72 | res = create_proc_entry(name, mode, net->proc_net); | ||
73 | if (res) | ||
74 | res->proc_fops = fops; | ||
75 | return res; | ||
76 | } | 71 | } |
77 | EXPORT_SYMBOL_GPL(proc_net_fops_create); | 72 | EXPORT_SYMBOL_GPL(proc_net_fops_create); |
78 | 73 | ||
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 4e57fcf85982..614c34b6d1c2 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c | |||
@@ -9,7 +9,7 @@ | |||
9 | 9 | ||
10 | static struct dentry_operations proc_sys_dentry_operations; | 10 | static struct dentry_operations proc_sys_dentry_operations; |
11 | static const struct file_operations proc_sys_file_operations; | 11 | static const struct file_operations proc_sys_file_operations; |
12 | static struct inode_operations proc_sys_inode_operations; | 12 | static const struct inode_operations proc_sys_inode_operations; |
13 | 13 | ||
14 | static void proc_sys_refresh_inode(struct inode *inode, struct ctl_table *table) | 14 | static void proc_sys_refresh_inode(struct inode *inode, struct ctl_table *table) |
15 | { | 15 | { |
@@ -407,7 +407,7 @@ static int proc_sys_permission(struct inode *inode, int mask, struct nameidata * | |||
407 | if (!nd || !depth) | 407 | if (!nd || !depth) |
408 | goto out; | 408 | goto out; |
409 | 409 | ||
410 | dentry = nd->dentry; | 410 | dentry = nd->path.dentry; |
411 | table = do_proc_sys_lookup(dentry->d_parent, &dentry->d_name, &head); | 411 | table = do_proc_sys_lookup(dentry->d_parent, &dentry->d_name, &head); |
412 | 412 | ||
413 | /* If the entry does not exist deny permission */ | 413 | /* If the entry does not exist deny permission */ |
@@ -446,7 +446,7 @@ static const struct file_operations proc_sys_file_operations = { | |||
446 | .readdir = proc_sys_readdir, | 446 | .readdir = proc_sys_readdir, |
447 | }; | 447 | }; |
448 | 448 | ||
449 | static struct inode_operations proc_sys_inode_operations = { | 449 | static const struct inode_operations proc_sys_inode_operations = { |
450 | .lookup = proc_sys_lookup, | 450 | .lookup = proc_sys_lookup, |
451 | .permission = proc_sys_permission, | 451 | .permission = proc_sys_permission, |
452 | .setattr = proc_sys_setattr, | 452 | .setattr = proc_sys_setattr, |
diff --git a/fs/proc/proc_tty.c b/fs/proc/proc_tty.c index 22846225acfa..49816e00b51a 100644 --- a/fs/proc/proc_tty.c +++ b/fs/proc/proc_tty.c | |||
@@ -15,9 +15,6 @@ | |||
15 | #include <linux/seq_file.h> | 15 | #include <linux/seq_file.h> |
16 | #include <linux/bitops.h> | 16 | #include <linux/bitops.h> |
17 | 17 | ||
18 | static int tty_ldiscs_read_proc(char *page, char **start, off_t off, | ||
19 | int count, int *eof, void *data); | ||
20 | |||
21 | /* | 18 | /* |
22 | * The /proc/tty directory inodes... | 19 | * The /proc/tty directory inodes... |
23 | */ | 20 | */ |
@@ -120,7 +117,7 @@ static void t_stop(struct seq_file *m, void *v) | |||
120 | mutex_unlock(&tty_mutex); | 117 | mutex_unlock(&tty_mutex); |
121 | } | 118 | } |
122 | 119 | ||
123 | static struct seq_operations tty_drivers_op = { | 120 | static const struct seq_operations tty_drivers_op = { |
124 | .start = t_start, | 121 | .start = t_start, |
125 | .next = t_next, | 122 | .next = t_next, |
126 | .stop = t_stop, | 123 | .stop = t_stop, |
diff --git a/fs/proc/root.c b/fs/proc/root.c index 81f99e691f99..ef0fb57fc9ef 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c | |||
@@ -232,6 +232,7 @@ void pid_ns_release_proc(struct pid_namespace *ns) | |||
232 | EXPORT_SYMBOL(proc_symlink); | 232 | EXPORT_SYMBOL(proc_symlink); |
233 | EXPORT_SYMBOL(proc_mkdir); | 233 | EXPORT_SYMBOL(proc_mkdir); |
234 | EXPORT_SYMBOL(create_proc_entry); | 234 | EXPORT_SYMBOL(create_proc_entry); |
235 | EXPORT_SYMBOL(proc_create); | ||
235 | EXPORT_SYMBOL(remove_proc_entry); | 236 | EXPORT_SYMBOL(remove_proc_entry); |
236 | EXPORT_SYMBOL(proc_root); | 237 | EXPORT_SYMBOL(proc_root); |
237 | EXPORT_SYMBOL(proc_root_fs); | 238 | EXPORT_SYMBOL(proc_root_fs); |
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 38338ed98cc6..49958cffbd8d 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -9,13 +9,14 @@ | |||
9 | #include <linux/mempolicy.h> | 9 | #include <linux/mempolicy.h> |
10 | #include <linux/swap.h> | 10 | #include <linux/swap.h> |
11 | #include <linux/swapops.h> | 11 | #include <linux/swapops.h> |
12 | #include <linux/seq_file.h> | ||
12 | 13 | ||
13 | #include <asm/elf.h> | 14 | #include <asm/elf.h> |
14 | #include <asm/uaccess.h> | 15 | #include <asm/uaccess.h> |
15 | #include <asm/tlbflush.h> | 16 | #include <asm/tlbflush.h> |
16 | #include "internal.h" | 17 | #include "internal.h" |
17 | 18 | ||
18 | char *task_mem(struct mm_struct *mm, char *buffer) | 19 | void task_mem(struct seq_file *m, struct mm_struct *mm) |
19 | { | 20 | { |
20 | unsigned long data, text, lib; | 21 | unsigned long data, text, lib; |
21 | unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss; | 22 | unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss; |
@@ -37,7 +38,7 @@ char *task_mem(struct mm_struct *mm, char *buffer) | |||
37 | data = mm->total_vm - mm->shared_vm - mm->stack_vm; | 38 | data = mm->total_vm - mm->shared_vm - mm->stack_vm; |
38 | text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10; | 39 | text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10; |
39 | lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text; | 40 | lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text; |
40 | buffer += sprintf(buffer, | 41 | seq_printf(m, |
41 | "VmPeak:\t%8lu kB\n" | 42 | "VmPeak:\t%8lu kB\n" |
42 | "VmSize:\t%8lu kB\n" | 43 | "VmSize:\t%8lu kB\n" |
43 | "VmLck:\t%8lu kB\n" | 44 | "VmLck:\t%8lu kB\n" |
@@ -56,7 +57,6 @@ char *task_mem(struct mm_struct *mm, char *buffer) | |||
56 | data << (PAGE_SHIFT-10), | 57 | data << (PAGE_SHIFT-10), |
57 | mm->stack_vm << (PAGE_SHIFT-10), text, lib, | 58 | mm->stack_vm << (PAGE_SHIFT-10), text, lib, |
58 | (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10); | 59 | (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10); |
59 | return buffer; | ||
60 | } | 60 | } |
61 | 61 | ||
62 | unsigned long task_vsize(struct mm_struct *mm) | 62 | unsigned long task_vsize(struct mm_struct *mm) |
@@ -75,7 +75,7 @@ int task_statm(struct mm_struct *mm, int *shared, int *text, | |||
75 | return mm->total_vm; | 75 | return mm->total_vm; |
76 | } | 76 | } |
77 | 77 | ||
78 | int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt) | 78 | int proc_exe_link(struct inode *inode, struct path *path) |
79 | { | 79 | { |
80 | struct vm_area_struct * vma; | 80 | struct vm_area_struct * vma; |
81 | int result = -ENOENT; | 81 | int result = -ENOENT; |
@@ -98,8 +98,8 @@ int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount * | |||
98 | } | 98 | } |
99 | 99 | ||
100 | if (vma) { | 100 | if (vma) { |
101 | *mnt = mntget(vma->vm_file->f_path.mnt); | 101 | *path = vma->vm_file->f_path; |
102 | *dentry = dget(vma->vm_file->f_path.dentry); | 102 | path_get(&vma->vm_file->f_path); |
103 | result = 0; | 103 | result = 0; |
104 | } | 104 | } |
105 | 105 | ||
@@ -216,7 +216,7 @@ static void m_stop(struct seq_file *m, void *v) | |||
216 | } | 216 | } |
217 | 217 | ||
218 | static int do_maps_open(struct inode *inode, struct file *file, | 218 | static int do_maps_open(struct inode *inode, struct file *file, |
219 | struct seq_operations *ops) | 219 | const struct seq_operations *ops) |
220 | { | 220 | { |
221 | struct proc_maps_private *priv; | 221 | struct proc_maps_private *priv; |
222 | int ret = -ENOMEM; | 222 | int ret = -ENOMEM; |
@@ -271,7 +271,7 @@ static int show_map(struct seq_file *m, void *v) | |||
271 | */ | 271 | */ |
272 | if (file) { | 272 | if (file) { |
273 | pad_len_spaces(m, len); | 273 | pad_len_spaces(m, len); |
274 | seq_path(m, file->f_path.mnt, file->f_path.dentry, "\n"); | 274 | seq_path(m, &file->f_path, "\n"); |
275 | } else { | 275 | } else { |
276 | const char *name = arch_vma_name(vma); | 276 | const char *name = arch_vma_name(vma); |
277 | if (!name) { | 277 | if (!name) { |
@@ -299,7 +299,7 @@ static int show_map(struct seq_file *m, void *v) | |||
299 | return 0; | 299 | return 0; |
300 | } | 300 | } |
301 | 301 | ||
302 | static struct seq_operations proc_pid_maps_op = { | 302 | static const struct seq_operations proc_pid_maps_op = { |
303 | .start = m_start, | 303 | .start = m_start, |
304 | .next = m_next, | 304 | .next = m_next, |
305 | .stop = m_stop, | 305 | .stop = m_stop, |
@@ -434,7 +434,7 @@ static int show_smap(struct seq_file *m, void *v) | |||
434 | return ret; | 434 | return ret; |
435 | } | 435 | } |
436 | 436 | ||
437 | static struct seq_operations proc_pid_smaps_op = { | 437 | static const struct seq_operations proc_pid_smaps_op = { |
438 | .start = m_start, | 438 | .start = m_start, |
439 | .next = m_next, | 439 | .next = m_next, |
440 | .stop = m_stop, | 440 | .stop = m_stop, |
@@ -734,7 +734,7 @@ static int show_numa_map_checked(struct seq_file *m, void *v) | |||
734 | return show_numa_map(m, v); | 734 | return show_numa_map(m, v); |
735 | } | 735 | } |
736 | 736 | ||
737 | static struct seq_operations proc_pid_numa_maps_op = { | 737 | static const struct seq_operations proc_pid_numa_maps_op = { |
738 | .start = m_start, | 738 | .start = m_start, |
739 | .next = m_next, | 739 | .next = m_next, |
740 | .stop = m_stop, | 740 | .stop = m_stop, |
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index 1932c2ca3457..8011528518bd 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c | |||
@@ -12,7 +12,7 @@ | |||
12 | * each process that owns it. Non-shared memory is counted | 12 | * each process that owns it. Non-shared memory is counted |
13 | * accurately. | 13 | * accurately. |
14 | */ | 14 | */ |
15 | char *task_mem(struct mm_struct *mm, char *buffer) | 15 | void task_mem(struct seq_file *m, struct mm_struct *mm) |
16 | { | 16 | { |
17 | struct vm_list_struct *vml; | 17 | struct vm_list_struct *vml; |
18 | unsigned long bytes = 0, sbytes = 0, slack = 0; | 18 | unsigned long bytes = 0, sbytes = 0, slack = 0; |
@@ -58,14 +58,13 @@ char *task_mem(struct mm_struct *mm, char *buffer) | |||
58 | 58 | ||
59 | bytes += kobjsize(current); /* includes kernel stack */ | 59 | bytes += kobjsize(current); /* includes kernel stack */ |
60 | 60 | ||
61 | buffer += sprintf(buffer, | 61 | seq_printf(m, |
62 | "Mem:\t%8lu bytes\n" | 62 | "Mem:\t%8lu bytes\n" |
63 | "Slack:\t%8lu bytes\n" | 63 | "Slack:\t%8lu bytes\n" |
64 | "Shared:\t%8lu bytes\n", | 64 | "Shared:\t%8lu bytes\n", |
65 | bytes, slack, sbytes); | 65 | bytes, slack, sbytes); |
66 | 66 | ||
67 | up_read(&mm->mmap_sem); | 67 | up_read(&mm->mmap_sem); |
68 | return buffer; | ||
69 | } | 68 | } |
70 | 69 | ||
71 | unsigned long task_vsize(struct mm_struct *mm) | 70 | unsigned long task_vsize(struct mm_struct *mm) |
@@ -104,7 +103,7 @@ int task_statm(struct mm_struct *mm, int *shared, int *text, | |||
104 | return size; | 103 | return size; |
105 | } | 104 | } |
106 | 105 | ||
107 | int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt) | 106 | int proc_exe_link(struct inode *inode, struct path *path) |
108 | { | 107 | { |
109 | struct vm_list_struct *vml; | 108 | struct vm_list_struct *vml; |
110 | struct vm_area_struct *vma; | 109 | struct vm_area_struct *vma; |
@@ -127,8 +126,8 @@ int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount * | |||
127 | } | 126 | } |
128 | 127 | ||
129 | if (vma) { | 128 | if (vma) { |
130 | *mnt = mntget(vma->vm_file->f_path.mnt); | 129 | *path = vma->vm_file->f_path; |
131 | *dentry = dget(vma->vm_file->f_path.dentry); | 130 | path_get(&vma->vm_file->f_path); |
132 | result = 0; | 131 | result = 0; |
133 | } | 132 | } |
134 | 133 | ||
@@ -199,7 +198,7 @@ static void *m_next(struct seq_file *m, void *_vml, loff_t *pos) | |||
199 | return vml ? vml->next : NULL; | 198 | return vml ? vml->next : NULL; |
200 | } | 199 | } |
201 | 200 | ||
202 | static struct seq_operations proc_pid_maps_ops = { | 201 | static const struct seq_operations proc_pid_maps_ops = { |
203 | .start = m_start, | 202 | .start = m_start, |
204 | .next = m_next, | 203 | .next = m_next, |
205 | .stop = m_stop, | 204 | .stop = m_stop, |
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 523e1098ae88..9ac0f5e064e0 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c | |||
@@ -10,7 +10,6 @@ | |||
10 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
11 | #include <linux/proc_fs.h> | 11 | #include <linux/proc_fs.h> |
12 | #include <linux/user.h> | 12 | #include <linux/user.h> |
13 | #include <linux/a.out.h> | ||
14 | #include <linux/elf.h> | 13 | #include <linux/elf.h> |
15 | #include <linux/elfcore.h> | 14 | #include <linux/elfcore.h> |
16 | #include <linux/highmem.h> | 15 | #include <linux/highmem.h> |
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c index 638bdb963213..b31ab78052b3 100644 --- a/fs/qnx4/inode.c +++ b/fs/qnx4/inode.c | |||
@@ -125,7 +125,6 @@ static int qnx4_write_inode(struct inode *inode, int unused) | |||
125 | static void qnx4_put_super(struct super_block *sb); | 125 | static void qnx4_put_super(struct super_block *sb); |
126 | static struct inode *qnx4_alloc_inode(struct super_block *sb); | 126 | static struct inode *qnx4_alloc_inode(struct super_block *sb); |
127 | static void qnx4_destroy_inode(struct inode *inode); | 127 | static void qnx4_destroy_inode(struct inode *inode); |
128 | static void qnx4_read_inode(struct inode *); | ||
129 | static int qnx4_remount(struct super_block *sb, int *flags, char *data); | 128 | static int qnx4_remount(struct super_block *sb, int *flags, char *data); |
130 | static int qnx4_statfs(struct dentry *, struct kstatfs *); | 129 | static int qnx4_statfs(struct dentry *, struct kstatfs *); |
131 | 130 | ||
@@ -133,7 +132,6 @@ static const struct super_operations qnx4_sops = | |||
133 | { | 132 | { |
134 | .alloc_inode = qnx4_alloc_inode, | 133 | .alloc_inode = qnx4_alloc_inode, |
135 | .destroy_inode = qnx4_destroy_inode, | 134 | .destroy_inode = qnx4_destroy_inode, |
136 | .read_inode = qnx4_read_inode, | ||
137 | .put_super = qnx4_put_super, | 135 | .put_super = qnx4_put_super, |
138 | .statfs = qnx4_statfs, | 136 | .statfs = qnx4_statfs, |
139 | .remount_fs = qnx4_remount, | 137 | .remount_fs = qnx4_remount, |
@@ -357,6 +355,7 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent) | |||
357 | struct inode *root; | 355 | struct inode *root; |
358 | const char *errmsg; | 356 | const char *errmsg; |
359 | struct qnx4_sb_info *qs; | 357 | struct qnx4_sb_info *qs; |
358 | int ret = -EINVAL; | ||
360 | 359 | ||
361 | qs = kzalloc(sizeof(struct qnx4_sb_info), GFP_KERNEL); | 360 | qs = kzalloc(sizeof(struct qnx4_sb_info), GFP_KERNEL); |
362 | if (!qs) | 361 | if (!qs) |
@@ -396,12 +395,14 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent) | |||
396 | } | 395 | } |
397 | 396 | ||
398 | /* does root not have inode number QNX4_ROOT_INO ?? */ | 397 | /* does root not have inode number QNX4_ROOT_INO ?? */ |
399 | root = iget(s, QNX4_ROOT_INO * QNX4_INODES_PER_BLOCK); | 398 | root = qnx4_iget(s, QNX4_ROOT_INO * QNX4_INODES_PER_BLOCK); |
400 | if (!root) { | 399 | if (IS_ERR(root)) { |
401 | printk("qnx4: get inode failed\n"); | 400 | printk("qnx4: get inode failed\n"); |
401 | ret = PTR_ERR(root); | ||
402 | goto out; | 402 | goto out; |
403 | } | 403 | } |
404 | 404 | ||
405 | ret = -ENOMEM; | ||
405 | s->s_root = d_alloc_root(root); | 406 | s->s_root = d_alloc_root(root); |
406 | if (s->s_root == NULL) | 407 | if (s->s_root == NULL) |
407 | goto outi; | 408 | goto outi; |
@@ -417,7 +418,7 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent) | |||
417 | outnobh: | 418 | outnobh: |
418 | kfree(qs); | 419 | kfree(qs); |
419 | s->s_fs_info = NULL; | 420 | s->s_fs_info = NULL; |
420 | return -EINVAL; | 421 | return ret; |
421 | } | 422 | } |
422 | 423 | ||
423 | static void qnx4_put_super(struct super_block *sb) | 424 | static void qnx4_put_super(struct super_block *sb) |
@@ -462,29 +463,38 @@ static const struct address_space_operations qnx4_aops = { | |||
462 | .bmap = qnx4_bmap | 463 | .bmap = qnx4_bmap |
463 | }; | 464 | }; |
464 | 465 | ||
465 | static void qnx4_read_inode(struct inode *inode) | 466 | struct inode *qnx4_iget(struct super_block *sb, unsigned long ino) |
466 | { | 467 | { |
467 | struct buffer_head *bh; | 468 | struct buffer_head *bh; |
468 | struct qnx4_inode_entry *raw_inode; | 469 | struct qnx4_inode_entry *raw_inode; |
469 | int block, ino; | 470 | int block; |
470 | struct super_block *sb = inode->i_sb; | 471 | struct qnx4_inode_entry *qnx4_inode; |
471 | struct qnx4_inode_entry *qnx4_inode = qnx4_raw_inode(inode); | 472 | struct inode *inode; |
472 | 473 | ||
473 | ino = inode->i_ino; | 474 | inode = iget_locked(sb, ino); |
475 | if (!inode) | ||
476 | return ERR_PTR(-ENOMEM); | ||
477 | if (!(inode->i_state & I_NEW)) | ||
478 | return inode; | ||
479 | |||
480 | qnx4_inode = qnx4_raw_inode(inode); | ||
474 | inode->i_mode = 0; | 481 | inode->i_mode = 0; |
475 | 482 | ||
476 | QNX4DEBUG(("Reading inode : [%d]\n", ino)); | 483 | QNX4DEBUG(("Reading inode : [%d]\n", ino)); |
477 | if (!ino) { | 484 | if (!ino) { |
478 | printk("qnx4: bad inode number on dev %s: %d is out of range\n", | 485 | printk(KERN_ERR "qnx4: bad inode number on dev %s: %lu is " |
486 | "out of range\n", | ||
479 | sb->s_id, ino); | 487 | sb->s_id, ino); |
480 | return; | 488 | iget_failed(inode); |
489 | return ERR_PTR(-EIO); | ||
481 | } | 490 | } |
482 | block = ino / QNX4_INODES_PER_BLOCK; | 491 | block = ino / QNX4_INODES_PER_BLOCK; |
483 | 492 | ||
484 | if (!(bh = sb_bread(sb, block))) { | 493 | if (!(bh = sb_bread(sb, block))) { |
485 | printk("qnx4: major problem: unable to read inode from dev " | 494 | printk("qnx4: major problem: unable to read inode from dev " |
486 | "%s\n", sb->s_id); | 495 | "%s\n", sb->s_id); |
487 | return; | 496 | iget_failed(inode); |
497 | return ERR_PTR(-EIO); | ||
488 | } | 498 | } |
489 | raw_inode = ((struct qnx4_inode_entry *) bh->b_data) + | 499 | raw_inode = ((struct qnx4_inode_entry *) bh->b_data) + |
490 | (ino % QNX4_INODES_PER_BLOCK); | 500 | (ino % QNX4_INODES_PER_BLOCK); |
@@ -515,9 +525,16 @@ static void qnx4_read_inode(struct inode *inode) | |||
515 | inode->i_op = &page_symlink_inode_operations; | 525 | inode->i_op = &page_symlink_inode_operations; |
516 | inode->i_mapping->a_ops = &qnx4_aops; | 526 | inode->i_mapping->a_ops = &qnx4_aops; |
517 | qnx4_i(inode)->mmu_private = inode->i_size; | 527 | qnx4_i(inode)->mmu_private = inode->i_size; |
518 | } else | 528 | } else { |
519 | printk("qnx4: bad inode %d on dev %s\n",ino,sb->s_id); | 529 | printk(KERN_ERR "qnx4: bad inode %lu on dev %s\n", |
530 | ino, sb->s_id); | ||
531 | iget_failed(inode); | ||
532 | brelse(bh); | ||
533 | return ERR_PTR(-EIO); | ||
534 | } | ||
520 | brelse(bh); | 535 | brelse(bh); |
536 | unlock_new_inode(inode); | ||
537 | return inode; | ||
521 | } | 538 | } |
522 | 539 | ||
523 | static struct kmem_cache *qnx4_inode_cachep; | 540 | static struct kmem_cache *qnx4_inode_cachep; |
diff --git a/fs/qnx4/namei.c b/fs/qnx4/namei.c index 733cdf01d645..775eed3a4085 100644 --- a/fs/qnx4/namei.c +++ b/fs/qnx4/namei.c | |||
@@ -128,10 +128,12 @@ struct dentry * qnx4_lookup(struct inode *dir, struct dentry *dentry, struct nam | |||
128 | } | 128 | } |
129 | brelse(bh); | 129 | brelse(bh); |
130 | 130 | ||
131 | if ((foundinode = iget(dir->i_sb, ino)) == NULL) { | 131 | foundinode = qnx4_iget(dir->i_sb, ino); |
132 | if (IS_ERR(foundinode)) { | ||
132 | unlock_kernel(); | 133 | unlock_kernel(); |
133 | QNX4DEBUG(("qnx4: lookup->iget -> NULL\n")); | 134 | QNX4DEBUG(("qnx4: lookup->iget -> error %ld\n", |
134 | return ERR_PTR(-EACCES); | 135 | PTR_ERR(foundinode))); |
136 | return ERR_CAST(foundinode); | ||
135 | } | 137 | } |
136 | out: | 138 | out: |
137 | unlock_kernel(); | 139 | unlock_kernel(); |
diff --git a/fs/quota.c b/fs/quota.c index 99b24b52bfc8..84f28dd72116 100644 --- a/fs/quota.c +++ b/fs/quota.c | |||
@@ -341,11 +341,11 @@ static inline struct super_block *quotactl_block(const char __user *special) | |||
341 | char *tmp = getname(special); | 341 | char *tmp = getname(special); |
342 | 342 | ||
343 | if (IS_ERR(tmp)) | 343 | if (IS_ERR(tmp)) |
344 | return ERR_PTR(PTR_ERR(tmp)); | 344 | return ERR_CAST(tmp); |
345 | bdev = lookup_bdev(tmp); | 345 | bdev = lookup_bdev(tmp); |
346 | putname(tmp); | 346 | putname(tmp); |
347 | if (IS_ERR(bdev)) | 347 | if (IS_ERR(bdev)) |
348 | return ERR_PTR(PTR_ERR(bdev)); | 348 | return ERR_CAST(bdev); |
349 | sb = get_super(bdev); | 349 | sb = get_super(bdev); |
350 | bdput(bdev); | 350 | bdput(bdev); |
351 | if (!sb) | 351 | if (!sb) |
diff --git a/fs/read_write.c b/fs/read_write.c index 1c177f29e1b7..49a98718ecdf 100644 --- a/fs/read_write.c +++ b/fs/read_write.c | |||
@@ -366,7 +366,6 @@ asmlinkage ssize_t sys_read(unsigned int fd, char __user * buf, size_t count) | |||
366 | 366 | ||
367 | return ret; | 367 | return ret; |
368 | } | 368 | } |
369 | EXPORT_UNUSED_SYMBOL_GPL(sys_read); /* to be deleted for 2.6.25 */ | ||
370 | 369 | ||
371 | asmlinkage ssize_t sys_write(unsigned int fd, const char __user * buf, size_t count) | 370 | asmlinkage ssize_t sys_write(unsigned int fd, const char __user * buf, size_t count) |
372 | { | 371 | { |
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 195309857e63..57917932212e 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c | |||
@@ -1536,7 +1536,7 @@ static struct dentry *reiserfs_get_dentry(struct super_block *sb, | |||
1536 | if (!inode) | 1536 | if (!inode) |
1537 | inode = ERR_PTR(-ESTALE); | 1537 | inode = ERR_PTR(-ESTALE); |
1538 | if (IS_ERR(inode)) | 1538 | if (IS_ERR(inode)) |
1539 | return ERR_PTR(PTR_ERR(inode)); | 1539 | return ERR_CAST(inode); |
1540 | result = d_alloc_anon(inode); | 1540 | result = d_alloc_anon(inode); |
1541 | if (!result) { | 1541 | if (!result) { |
1542 | iput(inode); | 1542 | iput(inode); |
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c index 5e7388b32d02..740bb8c0c1ae 100644 --- a/fs/reiserfs/prints.c +++ b/fs/reiserfs/prints.c | |||
@@ -575,6 +575,8 @@ void print_block(struct buffer_head *bh, ...) //int print_mode, int first, int l | |||
575 | printk | 575 | printk |
576 | ("Block %llu contains unformatted data\n", | 576 | ("Block %llu contains unformatted data\n", |
577 | (unsigned long long)bh->b_blocknr); | 577 | (unsigned long long)bh->b_blocknr); |
578 | |||
579 | va_end(args); | ||
578 | } | 580 | } |
579 | 581 | ||
580 | static char print_tb_buf[2048]; | 582 | static char print_tb_buf[2048]; |
diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c index 001144621672..8f86c52b30d8 100644 --- a/fs/reiserfs/procfs.c +++ b/fs/reiserfs/procfs.c | |||
@@ -444,7 +444,7 @@ static int r_show(struct seq_file *m, void *v) | |||
444 | return show(m, v); | 444 | return show(m, v); |
445 | } | 445 | } |
446 | 446 | ||
447 | static struct seq_operations r_ops = { | 447 | static const struct seq_operations r_ops = { |
448 | .start = r_start, | 448 | .start = r_start, |
449 | .next = r_next, | 449 | .next = r_next, |
450 | .stop = r_stop, | 450 | .stop = r_stop, |
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 5cd85fe5df5d..6841452e0dea 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c | |||
@@ -617,6 +617,7 @@ static const struct super_operations reiserfs_sops = { | |||
617 | .unlockfs = reiserfs_unlockfs, | 617 | .unlockfs = reiserfs_unlockfs, |
618 | .statfs = reiserfs_statfs, | 618 | .statfs = reiserfs_statfs, |
619 | .remount_fs = reiserfs_remount, | 619 | .remount_fs = reiserfs_remount, |
620 | .show_options = generic_show_options, | ||
620 | #ifdef CONFIG_QUOTA | 621 | #ifdef CONFIG_QUOTA |
621 | .quota_read = reiserfs_quota_read, | 622 | .quota_read = reiserfs_quota_read, |
622 | .quota_write = reiserfs_quota_write, | 623 | .quota_write = reiserfs_quota_write, |
@@ -1138,6 +1139,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) | |||
1138 | unsigned long safe_mask = 0; | 1139 | unsigned long safe_mask = 0; |
1139 | unsigned int commit_max_age = (unsigned int)-1; | 1140 | unsigned int commit_max_age = (unsigned int)-1; |
1140 | struct reiserfs_journal *journal = SB_JOURNAL(s); | 1141 | struct reiserfs_journal *journal = SB_JOURNAL(s); |
1142 | char *new_opts = kstrdup(arg, GFP_KERNEL); | ||
1141 | int err; | 1143 | int err; |
1142 | #ifdef CONFIG_QUOTA | 1144 | #ifdef CONFIG_QUOTA |
1143 | int i; | 1145 | int i; |
@@ -1153,7 +1155,8 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) | |||
1153 | REISERFS_SB(s)->s_qf_names[i] = NULL; | 1155 | REISERFS_SB(s)->s_qf_names[i] = NULL; |
1154 | } | 1156 | } |
1155 | #endif | 1157 | #endif |
1156 | return -EINVAL; | 1158 | err = -EINVAL; |
1159 | goto out_err; | ||
1157 | } | 1160 | } |
1158 | 1161 | ||
1159 | handle_attrs(s); | 1162 | handle_attrs(s); |
@@ -1191,9 +1194,9 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) | |||
1191 | } | 1194 | } |
1192 | 1195 | ||
1193 | if (blocks) { | 1196 | if (blocks) { |
1194 | int rc = reiserfs_resize(s, blocks); | 1197 | err = reiserfs_resize(s, blocks); |
1195 | if (rc != 0) | 1198 | if (err != 0) |
1196 | return rc; | 1199 | goto out_err; |
1197 | } | 1200 | } |
1198 | 1201 | ||
1199 | if (*mount_flags & MS_RDONLY) { | 1202 | if (*mount_flags & MS_RDONLY) { |
@@ -1201,16 +1204,16 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) | |||
1201 | /* remount read-only */ | 1204 | /* remount read-only */ |
1202 | if (s->s_flags & MS_RDONLY) | 1205 | if (s->s_flags & MS_RDONLY) |
1203 | /* it is read-only already */ | 1206 | /* it is read-only already */ |
1204 | return 0; | 1207 | goto out_ok; |
1205 | /* try to remount file system with read-only permissions */ | 1208 | /* try to remount file system with read-only permissions */ |
1206 | if (sb_umount_state(rs) == REISERFS_VALID_FS | 1209 | if (sb_umount_state(rs) == REISERFS_VALID_FS |
1207 | || REISERFS_SB(s)->s_mount_state != REISERFS_VALID_FS) { | 1210 | || REISERFS_SB(s)->s_mount_state != REISERFS_VALID_FS) { |
1208 | return 0; | 1211 | goto out_ok; |
1209 | } | 1212 | } |
1210 | 1213 | ||
1211 | err = journal_begin(&th, s, 10); | 1214 | err = journal_begin(&th, s, 10); |
1212 | if (err) | 1215 | if (err) |
1213 | return err; | 1216 | goto out_err; |
1214 | 1217 | ||
1215 | /* Mounting a rw partition read-only. */ | 1218 | /* Mounting a rw partition read-only. */ |
1216 | reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1); | 1219 | reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1); |
@@ -1220,11 +1223,13 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) | |||
1220 | /* remount read-write */ | 1223 | /* remount read-write */ |
1221 | if (!(s->s_flags & MS_RDONLY)) { | 1224 | if (!(s->s_flags & MS_RDONLY)) { |
1222 | reiserfs_xattr_init(s, *mount_flags); | 1225 | reiserfs_xattr_init(s, *mount_flags); |
1223 | return 0; /* We are read-write already */ | 1226 | goto out_ok; /* We are read-write already */ |
1224 | } | 1227 | } |
1225 | 1228 | ||
1226 | if (reiserfs_is_journal_aborted(journal)) | 1229 | if (reiserfs_is_journal_aborted(journal)) { |
1227 | return journal->j_errno; | 1230 | err = journal->j_errno; |
1231 | goto out_err; | ||
1232 | } | ||
1228 | 1233 | ||
1229 | handle_data_mode(s, mount_options); | 1234 | handle_data_mode(s, mount_options); |
1230 | handle_barrier_mode(s, mount_options); | 1235 | handle_barrier_mode(s, mount_options); |
@@ -1232,7 +1237,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) | |||
1232 | s->s_flags &= ~MS_RDONLY; /* now it is safe to call journal_begin */ | 1237 | s->s_flags &= ~MS_RDONLY; /* now it is safe to call journal_begin */ |
1233 | err = journal_begin(&th, s, 10); | 1238 | err = journal_begin(&th, s, 10); |
1234 | if (err) | 1239 | if (err) |
1235 | return err; | 1240 | goto out_err; |
1236 | 1241 | ||
1237 | /* Mount a partition which is read-only, read-write */ | 1242 | /* Mount a partition which is read-only, read-write */ |
1238 | reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1); | 1243 | reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1); |
@@ -1247,7 +1252,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) | |||
1247 | SB_JOURNAL(s)->j_must_wait = 1; | 1252 | SB_JOURNAL(s)->j_must_wait = 1; |
1248 | err = journal_end(&th, s, 10); | 1253 | err = journal_end(&th, s, 10); |
1249 | if (err) | 1254 | if (err) |
1250 | return err; | 1255 | goto out_err; |
1251 | s->s_dirt = 0; | 1256 | s->s_dirt = 0; |
1252 | 1257 | ||
1253 | if (!(*mount_flags & MS_RDONLY)) { | 1258 | if (!(*mount_flags & MS_RDONLY)) { |
@@ -1255,7 +1260,14 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) | |||
1255 | reiserfs_xattr_init(s, *mount_flags); | 1260 | reiserfs_xattr_init(s, *mount_flags); |
1256 | } | 1261 | } |
1257 | 1262 | ||
1263 | out_ok: | ||
1264 | kfree(s->s_options); | ||
1265 | s->s_options = new_opts; | ||
1258 | return 0; | 1266 | return 0; |
1267 | |||
1268 | out_err: | ||
1269 | kfree(new_opts); | ||
1270 | return err; | ||
1259 | } | 1271 | } |
1260 | 1272 | ||
1261 | static int read_super_block(struct super_block *s, int offset) | 1273 | static int read_super_block(struct super_block *s, int offset) |
@@ -1559,6 +1571,8 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent) | |||
1559 | struct reiserfs_sb_info *sbi; | 1571 | struct reiserfs_sb_info *sbi; |
1560 | int errval = -EINVAL; | 1572 | int errval = -EINVAL; |
1561 | 1573 | ||
1574 | save_mount_options(s, data); | ||
1575 | |||
1562 | sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL); | 1576 | sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL); |
1563 | if (!sbi) { | 1577 | if (!sbi) { |
1564 | errval = -ENOMEM; | 1578 | errval = -ENOMEM; |
@@ -2012,29 +2026,29 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id, | |||
2012 | if (err) | 2026 | if (err) |
2013 | return err; | 2027 | return err; |
2014 | /* Quotafile not on the same filesystem? */ | 2028 | /* Quotafile not on the same filesystem? */ |
2015 | if (nd.mnt->mnt_sb != sb) { | 2029 | if (nd.path.mnt->mnt_sb != sb) { |
2016 | path_release(&nd); | 2030 | path_put(&nd.path); |
2017 | return -EXDEV; | 2031 | return -EXDEV; |
2018 | } | 2032 | } |
2019 | /* We must not pack tails for quota files on reiserfs for quota IO to work */ | 2033 | /* We must not pack tails for quota files on reiserfs for quota IO to work */ |
2020 | if (!REISERFS_I(nd.dentry->d_inode)->i_flags & i_nopack_mask) { | 2034 | if (!REISERFS_I(nd.path.dentry->d_inode)->i_flags & i_nopack_mask) { |
2021 | reiserfs_warning(sb, | 2035 | reiserfs_warning(sb, |
2022 | "reiserfs: Quota file must have tail packing disabled."); | 2036 | "reiserfs: Quota file must have tail packing disabled."); |
2023 | path_release(&nd); | 2037 | path_put(&nd.path); |
2024 | return -EINVAL; | 2038 | return -EINVAL; |
2025 | } | 2039 | } |
2026 | /* Not journalling quota? No more tests needed... */ | 2040 | /* Not journalling quota? No more tests needed... */ |
2027 | if (!REISERFS_SB(sb)->s_qf_names[USRQUOTA] && | 2041 | if (!REISERFS_SB(sb)->s_qf_names[USRQUOTA] && |
2028 | !REISERFS_SB(sb)->s_qf_names[GRPQUOTA]) { | 2042 | !REISERFS_SB(sb)->s_qf_names[GRPQUOTA]) { |
2029 | path_release(&nd); | 2043 | path_put(&nd.path); |
2030 | return vfs_quota_on(sb, type, format_id, path); | 2044 | return vfs_quota_on(sb, type, format_id, path); |
2031 | } | 2045 | } |
2032 | /* Quotafile not of fs root? */ | 2046 | /* Quotafile not of fs root? */ |
2033 | if (nd.dentry->d_parent->d_inode != sb->s_root->d_inode) | 2047 | if (nd.path.dentry->d_parent->d_inode != sb->s_root->d_inode) |
2034 | reiserfs_warning(sb, | 2048 | reiserfs_warning(sb, |
2035 | "reiserfs: Quota file not on filesystem root. " | 2049 | "reiserfs: Quota file not on filesystem root. " |
2036 | "Journalled quota will not work."); | 2050 | "Journalled quota will not work."); |
2037 | path_release(&nd); | 2051 | path_put(&nd.path); |
2038 | return vfs_quota_on(sb, type, format_id, path); | 2052 | return vfs_quota_on(sb, type, format_id, path); |
2039 | } | 2053 | } |
2040 | 2054 | ||
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index 1597f6b649e0..eba037b3338f 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c | |||
@@ -155,7 +155,7 @@ static struct dentry *get_xa_file_dentry(const struct inode *inode, | |||
155 | 155 | ||
156 | xadir = open_xa_dir(inode, flags); | 156 | xadir = open_xa_dir(inode, flags); |
157 | if (IS_ERR(xadir)) { | 157 | if (IS_ERR(xadir)) { |
158 | return ERR_PTR(PTR_ERR(xadir)); | 158 | return ERR_CAST(xadir); |
159 | } else if (xadir && !xadir->d_inode) { | 159 | } else if (xadir && !xadir->d_inode) { |
160 | dput(xadir); | 160 | dput(xadir); |
161 | return ERR_PTR(-ENODATA); | 161 | return ERR_PTR(-ENODATA); |
@@ -164,7 +164,7 @@ static struct dentry *get_xa_file_dentry(const struct inode *inode, | |||
164 | xafile = lookup_one_len(name, xadir, strlen(name)); | 164 | xafile = lookup_one_len(name, xadir, strlen(name)); |
165 | if (IS_ERR(xafile)) { | 165 | if (IS_ERR(xafile)) { |
166 | dput(xadir); | 166 | dput(xadir); |
167 | return ERR_PTR(PTR_ERR(xafile)); | 167 | return ERR_CAST(xafile); |
168 | } | 168 | } |
169 | 169 | ||
170 | if (xafile->d_inode) { /* file exists */ | 170 | if (xafile->d_inode) { /* file exists */ |
@@ -1084,7 +1084,7 @@ ssize_t reiserfs_listxattr(struct dentry * dentry, char *buffer, size_t size) | |||
1084 | } | 1084 | } |
1085 | 1085 | ||
1086 | /* This is the implementation for the xattr plugin infrastructure */ | 1086 | /* This is the implementation for the xattr plugin infrastructure */ |
1087 | static struct list_head xattr_handlers = LIST_HEAD_INIT(xattr_handlers); | 1087 | static LIST_HEAD(xattr_handlers); |
1088 | static DEFINE_RWLOCK(handler_lock); | 1088 | static DEFINE_RWLOCK(handler_lock); |
1089 | 1089 | ||
1090 | static struct reiserfs_xattr_handler *find_xattr_handler_prefix(const char | 1090 | static struct reiserfs_xattr_handler *find_xattr_handler_prefix(const char |
diff --git a/fs/romfs/inode.c b/fs/romfs/inode.c index a49cf5b9a195..00b6f0a518c8 100644 --- a/fs/romfs/inode.c +++ b/fs/romfs/inode.c | |||
@@ -84,6 +84,8 @@ struct romfs_inode_info { | |||
84 | struct inode vfs_inode; | 84 | struct inode vfs_inode; |
85 | }; | 85 | }; |
86 | 86 | ||
87 | static struct inode *romfs_iget(struct super_block *, unsigned long); | ||
88 | |||
87 | /* instead of private superblock data */ | 89 | /* instead of private superblock data */ |
88 | static inline unsigned long romfs_maxsize(struct super_block *sb) | 90 | static inline unsigned long romfs_maxsize(struct super_block *sb) |
89 | { | 91 | { |
@@ -117,7 +119,7 @@ static int romfs_fill_super(struct super_block *s, void *data, int silent) | |||
117 | struct buffer_head *bh; | 119 | struct buffer_head *bh; |
118 | struct romfs_super_block *rsb; | 120 | struct romfs_super_block *rsb; |
119 | struct inode *root; | 121 | struct inode *root; |
120 | int sz; | 122 | int sz, ret = -EINVAL; |
121 | 123 | ||
122 | /* I would parse the options here, but there are none.. :) */ | 124 | /* I would parse the options here, but there are none.. :) */ |
123 | 125 | ||
@@ -157,10 +159,13 @@ static int romfs_fill_super(struct super_block *s, void *data, int silent) | |||
157 | & ROMFH_MASK; | 159 | & ROMFH_MASK; |
158 | 160 | ||
159 | s->s_op = &romfs_ops; | 161 | s->s_op = &romfs_ops; |
160 | root = iget(s, sz); | 162 | root = romfs_iget(s, sz); |
161 | if (!root) | 163 | if (IS_ERR(root)) { |
164 | ret = PTR_ERR(root); | ||
162 | goto out; | 165 | goto out; |
166 | } | ||
163 | 167 | ||
168 | ret = -ENOMEM; | ||
164 | s->s_root = d_alloc_root(root); | 169 | s->s_root = d_alloc_root(root); |
165 | if (!s->s_root) | 170 | if (!s->s_root) |
166 | goto outiput; | 171 | goto outiput; |
@@ -173,7 +178,7 @@ outiput: | |||
173 | out: | 178 | out: |
174 | brelse(bh); | 179 | brelse(bh); |
175 | outnobh: | 180 | outnobh: |
176 | return -EINVAL; | 181 | return ret; |
177 | } | 182 | } |
178 | 183 | ||
179 | /* That's simple too. */ | 184 | /* That's simple too. */ |
@@ -389,8 +394,11 @@ romfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) | |||
389 | if ((be32_to_cpu(ri.next) & ROMFH_TYPE) == ROMFH_HRD) | 394 | if ((be32_to_cpu(ri.next) & ROMFH_TYPE) == ROMFH_HRD) |
390 | offset = be32_to_cpu(ri.spec) & ROMFH_MASK; | 395 | offset = be32_to_cpu(ri.spec) & ROMFH_MASK; |
391 | 396 | ||
392 | if ((inode = iget(dir->i_sb, offset))) | 397 | inode = romfs_iget(dir->i_sb, offset); |
393 | goto outi; | 398 | if (IS_ERR(inode)) { |
399 | res = PTR_ERR(inode); | ||
400 | goto out; | ||
401 | } | ||
394 | 402 | ||
395 | /* | 403 | /* |
396 | * it's a bit funky, _lookup needs to return an error code | 404 | * it's a bit funky, _lookup needs to return an error code |
@@ -402,7 +410,7 @@ romfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) | |||
402 | */ | 410 | */ |
403 | 411 | ||
404 | out0: inode = NULL; | 412 | out0: inode = NULL; |
405 | outi: res = 0; | 413 | res = 0; |
406 | d_add (dentry, inode); | 414 | d_add (dentry, inode); |
407 | 415 | ||
408 | out: unlock_kernel(); | 416 | out: unlock_kernel(); |
@@ -478,20 +486,29 @@ static mode_t romfs_modemap[] = | |||
478 | S_IFBLK+0600, S_IFCHR+0600, S_IFSOCK+0644, S_IFIFO+0644 | 486 | S_IFBLK+0600, S_IFCHR+0600, S_IFSOCK+0644, S_IFIFO+0644 |
479 | }; | 487 | }; |
480 | 488 | ||
481 | static void | 489 | static struct inode * |
482 | romfs_read_inode(struct inode *i) | 490 | romfs_iget(struct super_block *sb, unsigned long ino) |
483 | { | 491 | { |
484 | int nextfh, ino; | 492 | int nextfh; |
485 | struct romfs_inode ri; | 493 | struct romfs_inode ri; |
494 | struct inode *i; | ||
495 | |||
496 | ino &= ROMFH_MASK; | ||
497 | i = iget_locked(sb, ino); | ||
498 | if (!i) | ||
499 | return ERR_PTR(-ENOMEM); | ||
500 | if (!(i->i_state & I_NEW)) | ||
501 | return i; | ||
486 | 502 | ||
487 | ino = i->i_ino & ROMFH_MASK; | ||
488 | i->i_mode = 0; | 503 | i->i_mode = 0; |
489 | 504 | ||
490 | /* Loop for finding the real hard link */ | 505 | /* Loop for finding the real hard link */ |
491 | for(;;) { | 506 | for(;;) { |
492 | if (romfs_copyfrom(i, &ri, ino, ROMFH_SIZE) <= 0) { | 507 | if (romfs_copyfrom(i, &ri, ino, ROMFH_SIZE) <= 0) { |
493 | printk("romfs: read error for inode 0x%x\n", ino); | 508 | printk(KERN_ERR "romfs: read error for inode 0x%lx\n", |
494 | return; | 509 | ino); |
510 | iget_failed(i); | ||
511 | return ERR_PTR(-EIO); | ||
495 | } | 512 | } |
496 | /* XXX: do romfs_checksum here too (with name) */ | 513 | /* XXX: do romfs_checksum here too (with name) */ |
497 | 514 | ||
@@ -548,6 +565,8 @@ romfs_read_inode(struct inode *i) | |||
548 | init_special_inode(i, ino, | 565 | init_special_inode(i, ino, |
549 | MKDEV(nextfh>>16,nextfh&0xffff)); | 566 | MKDEV(nextfh>>16,nextfh&0xffff)); |
550 | } | 567 | } |
568 | unlock_new_inode(i); | ||
569 | return i; | ||
551 | } | 570 | } |
552 | 571 | ||
553 | static struct kmem_cache * romfs_inode_cachep; | 572 | static struct kmem_cache * romfs_inode_cachep; |
@@ -599,7 +618,6 @@ static int romfs_remount(struct super_block *sb, int *flags, char *data) | |||
599 | static const struct super_operations romfs_ops = { | 618 | static const struct super_operations romfs_ops = { |
600 | .alloc_inode = romfs_alloc_inode, | 619 | .alloc_inode = romfs_alloc_inode, |
601 | .destroy_inode = romfs_destroy_inode, | 620 | .destroy_inode = romfs_destroy_inode, |
602 | .read_inode = romfs_read_inode, | ||
603 | .statfs = romfs_statfs, | 621 | .statfs = romfs_statfs, |
604 | .remount_fs = romfs_remount, | 622 | .remount_fs = romfs_remount, |
605 | }; | 623 | }; |
diff --git a/fs/select.c b/fs/select.c index 47f47925aea2..5633fe980781 100644 --- a/fs/select.c +++ b/fs/select.c | |||
@@ -739,7 +739,7 @@ asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds, | |||
739 | timeout_jiffies = -1; | 739 | timeout_jiffies = -1; |
740 | else | 740 | else |
741 | #endif | 741 | #endif |
742 | timeout_jiffies = msecs_to_jiffies(timeout_msecs); | 742 | timeout_jiffies = msecs_to_jiffies(timeout_msecs) + 1; |
743 | } else { | 743 | } else { |
744 | /* Infinite (< 0) or no (0) timeout */ | 744 | /* Infinite (< 0) or no (0) timeout */ |
745 | timeout_jiffies = timeout_msecs; | 745 | timeout_jiffies = timeout_msecs; |
diff --git a/fs/seq_file.c b/fs/seq_file.c index ca71c115bdaa..853770274f20 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c | |||
@@ -342,13 +342,11 @@ int seq_printf(struct seq_file *m, const char *f, ...) | |||
342 | } | 342 | } |
343 | EXPORT_SYMBOL(seq_printf); | 343 | EXPORT_SYMBOL(seq_printf); |
344 | 344 | ||
345 | int seq_path(struct seq_file *m, | 345 | int seq_path(struct seq_file *m, struct path *path, char *esc) |
346 | struct vfsmount *mnt, struct dentry *dentry, | ||
347 | char *esc) | ||
348 | { | 346 | { |
349 | if (m->count < m->size) { | 347 | if (m->count < m->size) { |
350 | char *s = m->buf + m->count; | 348 | char *s = m->buf + m->count; |
351 | char *p = d_path(dentry, mnt, s, m->size - m->count); | 349 | char *p = d_path(path, s, m->size - m->count); |
352 | if (!IS_ERR(p)) { | 350 | if (!IS_ERR(p)) { |
353 | while (s <= p) { | 351 | while (s <= p) { |
354 | char c = *p++; | 352 | char c = *p++; |
diff --git a/fs/signalfd.c b/fs/signalfd.c index 2d3e107da2d3..cb2b63ae0bf4 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/list.h> | 27 | #include <linux/list.h> |
28 | #include <linux/anon_inodes.h> | 28 | #include <linux/anon_inodes.h> |
29 | #include <linux/signalfd.h> | 29 | #include <linux/signalfd.h> |
30 | #include <linux/syscalls.h> | ||
30 | 31 | ||
31 | struct signalfd_ctx { | 32 | struct signalfd_ctx { |
32 | sigset_t sigmask; | 33 | sigset_t sigmask; |
diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c index 4e5c22ca802e..376ef3ee6ed7 100644 --- a/fs/smbfs/inode.c +++ b/fs/smbfs/inode.c | |||
@@ -505,7 +505,7 @@ static int smb_fill_super(struct super_block *sb, void *raw_data, int silent) | |||
505 | if (warn_count < 5) { | 505 | if (warn_count < 5) { |
506 | warn_count++; | 506 | warn_count++; |
507 | printk(KERN_EMERG "smbfs is deprecated and will be removed" | 507 | printk(KERN_EMERG "smbfs is deprecated and will be removed" |
508 | "from the 2.6.27 kernel. Please migrate to cifs\n"); | 508 | " from the 2.6.27 kernel. Please migrate to cifs\n"); |
509 | } | 509 | } |
510 | 510 | ||
511 | if (!raw_data) | 511 | if (!raw_data) |
diff --git a/fs/smbfs/sock.c b/fs/smbfs/sock.c index e48bd8235a8e..e37fe4deebd0 100644 --- a/fs/smbfs/sock.c +++ b/fs/smbfs/sock.c | |||
@@ -329,9 +329,8 @@ smb_receive(struct smb_sb_info *server, struct smb_request *req) | |||
329 | msg.msg_control = NULL; | 329 | msg.msg_control = NULL; |
330 | 330 | ||
331 | /* Dont repeat bytes and count available bufferspace */ | 331 | /* Dont repeat bytes and count available bufferspace */ |
332 | rlen = smb_move_iov(&p, &num, iov, req->rq_bytes_recvd); | 332 | rlen = min_t(int, smb_move_iov(&p, &num, iov, req->rq_bytes_recvd), |
333 | if (req->rq_rlen < rlen) | 333 | (req->rq_rlen - req->rq_bytes_recvd)); |
334 | rlen = req->rq_rlen; | ||
335 | 334 | ||
336 | result = kernel_recvmsg(sock, &msg, p, num, rlen, flags); | 335 | result = kernel_recvmsg(sock, &msg, p, num, rlen, flags); |
337 | 336 | ||
diff --git a/fs/splice.c b/fs/splice.c index 4ee49e86edde..9b559ee711a8 100644 --- a/fs/splice.c +++ b/fs/splice.c | |||
@@ -1179,6 +1179,9 @@ static int copy_from_user_mmap_sem(void *dst, const void __user *src, size_t n) | |||
1179 | { | 1179 | { |
1180 | int partial; | 1180 | int partial; |
1181 | 1181 | ||
1182 | if (!access_ok(VERIFY_READ, src, n)) | ||
1183 | return -EFAULT; | ||
1184 | |||
1182 | pagefault_disable(); | 1185 | pagefault_disable(); |
1183 | partial = __copy_from_user_inatomic(dst, src, n); | 1186 | partial = __copy_from_user_inatomic(dst, src, n); |
1184 | pagefault_enable(); | 1187 | pagefault_enable(); |
@@ -1231,7 +1234,7 @@ static int get_iovec_page_array(const struct iovec __user *iov, | |||
1231 | if (unlikely(!len)) | 1234 | if (unlikely(!len)) |
1232 | break; | 1235 | break; |
1233 | error = -EFAULT; | 1236 | error = -EFAULT; |
1234 | if (unlikely(!base)) | 1237 | if (!access_ok(VERIFY_READ, base, len)) |
1235 | break; | 1238 | break; |
1236 | 1239 | ||
1237 | /* | 1240 | /* |
@@ -1387,6 +1390,11 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *iov, | |||
1387 | break; | 1390 | break; |
1388 | } | 1391 | } |
1389 | 1392 | ||
1393 | if (unlikely(!access_ok(VERIFY_WRITE, base, len))) { | ||
1394 | error = -EFAULT; | ||
1395 | break; | ||
1396 | } | ||
1397 | |||
1390 | sd.len = 0; | 1398 | sd.len = 0; |
1391 | sd.total_len = len; | 1399 | sd.total_len = len; |
1392 | sd.flags = flags; | 1400 | sd.flags = flags; |
@@ -62,8 +62,8 @@ int vfs_stat_fd(int dfd, char __user *name, struct kstat *stat) | |||
62 | 62 | ||
63 | error = __user_walk_fd(dfd, name, LOOKUP_FOLLOW, &nd); | 63 | error = __user_walk_fd(dfd, name, LOOKUP_FOLLOW, &nd); |
64 | if (!error) { | 64 | if (!error) { |
65 | error = vfs_getattr(nd.mnt, nd.dentry, stat); | 65 | error = vfs_getattr(nd.path.mnt, nd.path.dentry, stat); |
66 | path_release(&nd); | 66 | path_put(&nd.path); |
67 | } | 67 | } |
68 | return error; | 68 | return error; |
69 | } | 69 | } |
@@ -82,8 +82,8 @@ int vfs_lstat_fd(int dfd, char __user *name, struct kstat *stat) | |||
82 | 82 | ||
83 | error = __user_walk_fd(dfd, name, 0, &nd); | 83 | error = __user_walk_fd(dfd, name, 0, &nd); |
84 | if (!error) { | 84 | if (!error) { |
85 | error = vfs_getattr(nd.mnt, nd.dentry, stat); | 85 | error = vfs_getattr(nd.path.mnt, nd.path.dentry, stat); |
86 | path_release(&nd); | 86 | path_put(&nd.path); |
87 | } | 87 | } |
88 | return error; | 88 | return error; |
89 | } | 89 | } |
@@ -302,17 +302,18 @@ asmlinkage long sys_readlinkat(int dfd, const char __user *path, | |||
302 | 302 | ||
303 | error = __user_walk_fd(dfd, path, 0, &nd); | 303 | error = __user_walk_fd(dfd, path, 0, &nd); |
304 | if (!error) { | 304 | if (!error) { |
305 | struct inode * inode = nd.dentry->d_inode; | 305 | struct inode *inode = nd.path.dentry->d_inode; |
306 | 306 | ||
307 | error = -EINVAL; | 307 | error = -EINVAL; |
308 | if (inode->i_op && inode->i_op->readlink) { | 308 | if (inode->i_op && inode->i_op->readlink) { |
309 | error = security_inode_readlink(nd.dentry); | 309 | error = security_inode_readlink(nd.path.dentry); |
310 | if (!error) { | 310 | if (!error) { |
311 | touch_atime(nd.mnt, nd.dentry); | 311 | touch_atime(nd.path.mnt, nd.path.dentry); |
312 | error = inode->i_op->readlink(nd.dentry, buf, bufsiz); | 312 | error = inode->i_op->readlink(nd.path.dentry, |
313 | buf, bufsiz); | ||
313 | } | 314 | } |
314 | } | 315 | } |
315 | path_release(&nd); | 316 | path_put(&nd.path); |
316 | } | 317 | } |
317 | return error; | 318 | return error; |
318 | } | 319 | } |
diff --git a/fs/super.c b/fs/super.c index ceaf2e3d594c..88811f60c8de 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -105,6 +105,7 @@ static inline void destroy_super(struct super_block *s) | |||
105 | { | 105 | { |
106 | security_sb_free(s); | 106 | security_sb_free(s); |
107 | kfree(s->s_subtype); | 107 | kfree(s->s_subtype); |
108 | kfree(s->s_options); | ||
108 | kfree(s); | 109 | kfree(s); |
109 | } | 110 | } |
110 | 111 | ||
@@ -603,6 +604,7 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force) | |||
603 | mark_files_ro(sb); | 604 | mark_files_ro(sb); |
604 | else if (!fs_may_remount_ro(sb)) | 605 | else if (!fs_may_remount_ro(sb)) |
605 | return -EBUSY; | 606 | return -EBUSY; |
607 | DQUOT_OFF(sb); | ||
606 | } | 608 | } |
607 | 609 | ||
608 | if (sb->s_op->remount_fs) { | 610 | if (sb->s_op->remount_fs) { |
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c index 0871c3dadce1..477904915032 100644 --- a/fs/sysfs/group.c +++ b/fs/sysfs/group.c | |||
@@ -77,7 +77,12 @@ void sysfs_remove_group(struct kobject * kobj, | |||
77 | 77 | ||
78 | if (grp->name) { | 78 | if (grp->name) { |
79 | sd = sysfs_get_dirent(dir_sd, grp->name); | 79 | sd = sysfs_get_dirent(dir_sd, grp->name); |
80 | BUG_ON(!sd); | 80 | if (!sd) { |
81 | printk(KERN_WARNING "sysfs group %p not found for " | ||
82 | "kobject '%s'\n", grp, kobject_name(kobj)); | ||
83 | WARN_ON(!sd); | ||
84 | return; | ||
85 | } | ||
81 | } else | 86 | } else |
82 | sd = sysfs_get(dir_sd); | 87 | sd = sysfs_get(dir_sd); |
83 | 88 | ||
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c index 81ec6c548c07..c5d60de0658f 100644 --- a/fs/sysv/inode.c +++ b/fs/sysv/inode.c | |||
@@ -169,20 +169,27 @@ void sysv_set_inode(struct inode *inode, dev_t rdev) | |||
169 | init_special_inode(inode, inode->i_mode, rdev); | 169 | init_special_inode(inode, inode->i_mode, rdev); |
170 | } | 170 | } |
171 | 171 | ||
172 | static void sysv_read_inode(struct inode *inode) | 172 | struct inode *sysv_iget(struct super_block *sb, unsigned int ino) |
173 | { | 173 | { |
174 | struct super_block * sb = inode->i_sb; | ||
175 | struct sysv_sb_info * sbi = SYSV_SB(sb); | 174 | struct sysv_sb_info * sbi = SYSV_SB(sb); |
176 | struct buffer_head * bh; | 175 | struct buffer_head * bh; |
177 | struct sysv_inode * raw_inode; | 176 | struct sysv_inode * raw_inode; |
178 | struct sysv_inode_info * si; | 177 | struct sysv_inode_info * si; |
179 | unsigned int block, ino = inode->i_ino; | 178 | struct inode *inode; |
179 | unsigned int block; | ||
180 | 180 | ||
181 | if (!ino || ino > sbi->s_ninodes) { | 181 | if (!ino || ino > sbi->s_ninodes) { |
182 | printk("Bad inode number on dev %s: %d is out of range\n", | 182 | printk("Bad inode number on dev %s: %d is out of range\n", |
183 | inode->i_sb->s_id, ino); | 183 | sb->s_id, ino); |
184 | goto bad_inode; | 184 | return ERR_PTR(-EIO); |
185 | } | 185 | } |
186 | |||
187 | inode = iget_locked(sb, ino); | ||
188 | if (!inode) | ||
189 | return ERR_PTR(-ENOMEM); | ||
190 | if (!(inode->i_state & I_NEW)) | ||
191 | return inode; | ||
192 | |||
186 | raw_inode = sysv_raw_inode(sb, ino, &bh); | 193 | raw_inode = sysv_raw_inode(sb, ino, &bh); |
187 | if (!raw_inode) { | 194 | if (!raw_inode) { |
188 | printk("Major problem: unable to read inode from dev %s\n", | 195 | printk("Major problem: unable to read inode from dev %s\n", |
@@ -214,11 +221,12 @@ static void sysv_read_inode(struct inode *inode) | |||
214 | old_decode_dev(fs32_to_cpu(sbi, si->i_data[0]))); | 221 | old_decode_dev(fs32_to_cpu(sbi, si->i_data[0]))); |
215 | else | 222 | else |
216 | sysv_set_inode(inode, 0); | 223 | sysv_set_inode(inode, 0); |
217 | return; | 224 | unlock_new_inode(inode); |
225 | return inode; | ||
218 | 226 | ||
219 | bad_inode: | 227 | bad_inode: |
220 | make_bad_inode(inode); | 228 | iget_failed(inode); |
221 | return; | 229 | return ERR_PTR(-EIO); |
222 | } | 230 | } |
223 | 231 | ||
224 | static struct buffer_head * sysv_update_inode(struct inode * inode) | 232 | static struct buffer_head * sysv_update_inode(struct inode * inode) |
@@ -328,7 +336,6 @@ static void init_once(struct kmem_cache *cachep, void *p) | |||
328 | const struct super_operations sysv_sops = { | 336 | const struct super_operations sysv_sops = { |
329 | .alloc_inode = sysv_alloc_inode, | 337 | .alloc_inode = sysv_alloc_inode, |
330 | .destroy_inode = sysv_destroy_inode, | 338 | .destroy_inode = sysv_destroy_inode, |
331 | .read_inode = sysv_read_inode, | ||
332 | .write_inode = sysv_write_inode, | 339 | .write_inode = sysv_write_inode, |
333 | .delete_inode = sysv_delete_inode, | 340 | .delete_inode = sysv_delete_inode, |
334 | .put_super = sysv_put_super, | 341 | .put_super = sysv_put_super, |
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c index 6bd850b7641a..a1f1ef33e81c 100644 --- a/fs/sysv/namei.c +++ b/fs/sysv/namei.c | |||
@@ -53,9 +53,9 @@ static struct dentry *sysv_lookup(struct inode * dir, struct dentry * dentry, st | |||
53 | ino = sysv_inode_by_name(dentry); | 53 | ino = sysv_inode_by_name(dentry); |
54 | 54 | ||
55 | if (ino) { | 55 | if (ino) { |
56 | inode = iget(dir->i_sb, ino); | 56 | inode = sysv_iget(dir->i_sb, ino); |
57 | if (!inode) | 57 | if (IS_ERR(inode)) |
58 | return ERR_PTR(-EACCES); | 58 | return ERR_CAST(inode); |
59 | } | 59 | } |
60 | d_add(dentry, inode); | 60 | d_add(dentry, inode); |
61 | return NULL; | 61 | return NULL; |
diff --git a/fs/sysv/super.c b/fs/sysv/super.c index 6f9707a1b954..5a903da54551 100644 --- a/fs/sysv/super.c +++ b/fs/sysv/super.c | |||
@@ -332,8 +332,8 @@ static int complete_read_super(struct super_block *sb, int silent, int size) | |||
332 | sb->s_magic = SYSV_MAGIC_BASE + sbi->s_type; | 332 | sb->s_magic = SYSV_MAGIC_BASE + sbi->s_type; |
333 | /* set up enough so that it can read an inode */ | 333 | /* set up enough so that it can read an inode */ |
334 | sb->s_op = &sysv_sops; | 334 | sb->s_op = &sysv_sops; |
335 | root_inode = iget(sb,SYSV_ROOT_INO); | 335 | root_inode = sysv_iget(sb, SYSV_ROOT_INO); |
336 | if (!root_inode || is_bad_inode(root_inode)) { | 336 | if (IS_ERR(root_inode)) { |
337 | printk("SysV FS: get root inode failed\n"); | 337 | printk("SysV FS: get root inode failed\n"); |
338 | return 0; | 338 | return 0; |
339 | } | 339 | } |
diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h index 64c03bdf06a5..42d51d1c05cd 100644 --- a/fs/sysv/sysv.h +++ b/fs/sysv/sysv.h | |||
@@ -141,6 +141,7 @@ extern int __sysv_write_begin(struct file *file, struct address_space *mapping, | |||
141 | struct page **pagep, void **fsdata); | 141 | struct page **pagep, void **fsdata); |
142 | 142 | ||
143 | /* inode.c */ | 143 | /* inode.c */ |
144 | extern struct inode *sysv_iget(struct super_block *, unsigned int); | ||
144 | extern int sysv_write_inode(struct inode *, int); | 145 | extern int sysv_write_inode(struct inode *, int); |
145 | extern int sysv_sync_inode(struct inode *); | 146 | extern int sysv_sync_inode(struct inode *); |
146 | extern int sysv_sync_file(struct file *, struct dentry *, int); | 147 | extern int sysv_sync_file(struct file *, struct dentry *, int); |
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c index ab26176f6b91..f855dcbbdfb8 100644 --- a/fs/udf/balloc.c +++ b/fs/udf/balloc.c | |||
@@ -28,15 +28,16 @@ | |||
28 | #include "udf_i.h" | 28 | #include "udf_i.h" |
29 | #include "udf_sb.h" | 29 | #include "udf_sb.h" |
30 | 30 | ||
31 | #define udf_clear_bit(nr,addr) ext2_clear_bit(nr,addr) | 31 | #define udf_clear_bit(nr, addr) ext2_clear_bit(nr, addr) |
32 | #define udf_set_bit(nr,addr) ext2_set_bit(nr,addr) | 32 | #define udf_set_bit(nr, addr) ext2_set_bit(nr, addr) |
33 | #define udf_test_bit(nr, addr) ext2_test_bit(nr, addr) | 33 | #define udf_test_bit(nr, addr) ext2_test_bit(nr, addr) |
34 | #define udf_find_first_one_bit(addr, size) find_first_one_bit(addr, size) | 34 | #define udf_find_first_one_bit(addr, size) find_first_one_bit(addr, size) |
35 | #define udf_find_next_one_bit(addr, size, offset) find_next_one_bit(addr, size, offset) | 35 | #define udf_find_next_one_bit(addr, size, offset) \ |
36 | find_next_one_bit(addr, size, offset) | ||
36 | 37 | ||
37 | #define leBPL_to_cpup(x) leNUM_to_cpup(BITS_PER_LONG, x) | 38 | #define leBPL_to_cpup(x) leNUM_to_cpup(BITS_PER_LONG, x) |
38 | #define leNUM_to_cpup(x,y) xleNUM_to_cpup(x,y) | 39 | #define leNUM_to_cpup(x, y) xleNUM_to_cpup(x, y) |
39 | #define xleNUM_to_cpup(x,y) (le ## x ## _to_cpup(y)) | 40 | #define xleNUM_to_cpup(x, y) (le ## x ## _to_cpup(y)) |
40 | #define uintBPL_t uint(BITS_PER_LONG) | 41 | #define uintBPL_t uint(BITS_PER_LONG) |
41 | #define uint(x) xuint(x) | 42 | #define uint(x) xuint(x) |
42 | #define xuint(x) __le ## x | 43 | #define xuint(x) __le ## x |
@@ -62,7 +63,8 @@ static inline int find_next_one_bit(void *addr, int size, int offset) | |||
62 | result += BITS_PER_LONG; | 63 | result += BITS_PER_LONG; |
63 | } | 64 | } |
64 | while (size & ~(BITS_PER_LONG - 1)) { | 65 | while (size & ~(BITS_PER_LONG - 1)) { |
65 | if ((tmp = leBPL_to_cpup(p++))) | 66 | tmp = leBPL_to_cpup(p++); |
67 | if (tmp) | ||
66 | goto found_middle; | 68 | goto found_middle; |
67 | result += BITS_PER_LONG; | 69 | result += BITS_PER_LONG; |
68 | size -= BITS_PER_LONG; | 70 | size -= BITS_PER_LONG; |
@@ -88,12 +90,12 @@ static int read_block_bitmap(struct super_block *sb, | |||
88 | kernel_lb_addr loc; | 90 | kernel_lb_addr loc; |
89 | 91 | ||
90 | loc.logicalBlockNum = bitmap->s_extPosition; | 92 | loc.logicalBlockNum = bitmap->s_extPosition; |
91 | loc.partitionReferenceNum = UDF_SB_PARTITION(sb); | 93 | loc.partitionReferenceNum = UDF_SB(sb)->s_partition; |
92 | 94 | ||
93 | bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block)); | 95 | bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block)); |
94 | if (!bh) { | 96 | if (!bh) |
95 | retval = -EIO; | 97 | retval = -EIO; |
96 | } | 98 | |
97 | bitmap->s_block_bitmap[bitmap_nr] = bh; | 99 | bitmap->s_block_bitmap[bitmap_nr] = bh; |
98 | return retval; | 100 | return retval; |
99 | } | 101 | } |
@@ -138,6 +140,20 @@ static inline int load_block_bitmap(struct super_block *sb, | |||
138 | return slot; | 140 | return slot; |
139 | } | 141 | } |
140 | 142 | ||
143 | static bool udf_add_free_space(struct udf_sb_info *sbi, | ||
144 | u16 partition, u32 cnt) | ||
145 | { | ||
146 | struct logicalVolIntegrityDesc *lvid; | ||
147 | |||
148 | if (sbi->s_lvid_bh == NULL) | ||
149 | return false; | ||
150 | |||
151 | lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data; | ||
152 | lvid->freeSpaceTable[partition] = cpu_to_le32(le32_to_cpu( | ||
153 | lvid->freeSpaceTable[partition]) + cnt); | ||
154 | return true; | ||
155 | } | ||
156 | |||
141 | static void udf_bitmap_free_blocks(struct super_block *sb, | 157 | static void udf_bitmap_free_blocks(struct super_block *sb, |
142 | struct inode *inode, | 158 | struct inode *inode, |
143 | struct udf_bitmap *bitmap, | 159 | struct udf_bitmap *bitmap, |
@@ -155,57 +171,58 @@ static void udf_bitmap_free_blocks(struct super_block *sb, | |||
155 | 171 | ||
156 | mutex_lock(&sbi->s_alloc_mutex); | 172 | mutex_lock(&sbi->s_alloc_mutex); |
157 | if (bloc.logicalBlockNum < 0 || | 173 | if (bloc.logicalBlockNum < 0 || |
158 | (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) { | 174 | (bloc.logicalBlockNum + count) > |
175 | sbi->s_partmaps[bloc.partitionReferenceNum].s_partition_len) { | ||
159 | udf_debug("%d < %d || %d + %d > %d\n", | 176 | udf_debug("%d < %d || %d + %d > %d\n", |
160 | bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count, | 177 | bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count, |
161 | UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)); | 178 | sbi->s_partmaps[bloc.partitionReferenceNum]. |
179 | s_partition_len); | ||
162 | goto error_return; | 180 | goto error_return; |
163 | } | 181 | } |
164 | 182 | ||
165 | block = bloc.logicalBlockNum + offset + (sizeof(struct spaceBitmapDesc) << 3); | 183 | block = bloc.logicalBlockNum + offset + |
184 | (sizeof(struct spaceBitmapDesc) << 3); | ||
166 | 185 | ||
167 | do_more: | 186 | do { |
168 | overflow = 0; | 187 | overflow = 0; |
169 | block_group = block >> (sb->s_blocksize_bits + 3); | 188 | block_group = block >> (sb->s_blocksize_bits + 3); |
170 | bit = block % (sb->s_blocksize << 3); | 189 | bit = block % (sb->s_blocksize << 3); |
171 | 190 | ||
172 | /* | 191 | /* |
173 | * Check to see if we are freeing blocks across a group boundary. | 192 | * Check to see if we are freeing blocks across a group boundary. |
174 | */ | 193 | */ |
175 | if (bit + count > (sb->s_blocksize << 3)) { | 194 | if (bit + count > (sb->s_blocksize << 3)) { |
176 | overflow = bit + count - (sb->s_blocksize << 3); | 195 | overflow = bit + count - (sb->s_blocksize << 3); |
177 | count -= overflow; | 196 | count -= overflow; |
178 | } | 197 | } |
179 | bitmap_nr = load_block_bitmap(sb, bitmap, block_group); | 198 | bitmap_nr = load_block_bitmap(sb, bitmap, block_group); |
180 | if (bitmap_nr < 0) | 199 | if (bitmap_nr < 0) |
181 | goto error_return; | 200 | goto error_return; |
182 | 201 | ||
183 | bh = bitmap->s_block_bitmap[bitmap_nr]; | 202 | bh = bitmap->s_block_bitmap[bitmap_nr]; |
184 | for (i = 0; i < count; i++) { | 203 | for (i = 0; i < count; i++) { |
185 | if (udf_set_bit(bit + i, bh->b_data)) { | 204 | if (udf_set_bit(bit + i, bh->b_data)) { |
186 | udf_debug("bit %ld already set\n", bit + i); | 205 | udf_debug("bit %ld already set\n", bit + i); |
187 | udf_debug("byte=%2x\n", ((char *)bh->b_data)[(bit + i) >> 3]); | 206 | udf_debug("byte=%2x\n", |
188 | } else { | 207 | ((char *)bh->b_data)[(bit + i) >> 3]); |
189 | if (inode) | 208 | } else { |
190 | DQUOT_FREE_BLOCK(inode, 1); | 209 | if (inode) |
191 | if (UDF_SB_LVIDBH(sb)) { | 210 | DQUOT_FREE_BLOCK(inode, 1); |
192 | UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] = | 211 | udf_add_free_space(sbi, sbi->s_partition, 1); |
193 | cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)]) + 1); | ||
194 | } | 212 | } |
195 | } | 213 | } |
196 | } | 214 | mark_buffer_dirty(bh); |
197 | mark_buffer_dirty(bh); | 215 | if (overflow) { |
198 | if (overflow) { | 216 | block += count; |
199 | block += count; | 217 | count = overflow; |
200 | count = overflow; | 218 | } |
201 | goto do_more; | 219 | } while (overflow); |
202 | } | 220 | |
203 | error_return: | 221 | error_return: |
204 | sb->s_dirt = 1; | 222 | sb->s_dirt = 1; |
205 | if (UDF_SB_LVIDBH(sb)) | 223 | if (sbi->s_lvid_bh) |
206 | mark_buffer_dirty(UDF_SB_LVIDBH(sb)); | 224 | mark_buffer_dirty(sbi->s_lvid_bh); |
207 | mutex_unlock(&sbi->s_alloc_mutex); | 225 | mutex_unlock(&sbi->s_alloc_mutex); |
208 | return; | ||
209 | } | 226 | } |
210 | 227 | ||
211 | static int udf_bitmap_prealloc_blocks(struct super_block *sb, | 228 | static int udf_bitmap_prealloc_blocks(struct super_block *sb, |
@@ -219,53 +236,50 @@ static int udf_bitmap_prealloc_blocks(struct super_block *sb, | |||
219 | int bit, block, block_group, group_start; | 236 | int bit, block, block_group, group_start; |
220 | int nr_groups, bitmap_nr; | 237 | int nr_groups, bitmap_nr; |
221 | struct buffer_head *bh; | 238 | struct buffer_head *bh; |
239 | __u32 part_len; | ||
222 | 240 | ||
223 | mutex_lock(&sbi->s_alloc_mutex); | 241 | mutex_lock(&sbi->s_alloc_mutex); |
224 | if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition)) | 242 | part_len = sbi->s_partmaps[partition].s_partition_len; |
243 | if (first_block < 0 || first_block >= part_len) | ||
225 | goto out; | 244 | goto out; |
226 | 245 | ||
227 | if (first_block + block_count > UDF_SB_PARTLEN(sb, partition)) | 246 | if (first_block + block_count > part_len) |
228 | block_count = UDF_SB_PARTLEN(sb, partition) - first_block; | 247 | block_count = part_len - first_block; |
229 | 248 | ||
230 | repeat: | 249 | do { |
231 | nr_groups = (UDF_SB_PARTLEN(sb, partition) + | 250 | nr_groups = udf_compute_nr_groups(sb, partition); |
232 | (sizeof(struct spaceBitmapDesc) << 3) + | 251 | block = first_block + (sizeof(struct spaceBitmapDesc) << 3); |
233 | (sb->s_blocksize * 8) - 1) / (sb->s_blocksize * 8); | 252 | block_group = block >> (sb->s_blocksize_bits + 3); |
234 | block = first_block + (sizeof(struct spaceBitmapDesc) << 3); | 253 | group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc); |
235 | block_group = block >> (sb->s_blocksize_bits + 3); | ||
236 | group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc); | ||
237 | 254 | ||
238 | bitmap_nr = load_block_bitmap(sb, bitmap, block_group); | 255 | bitmap_nr = load_block_bitmap(sb, bitmap, block_group); |
239 | if (bitmap_nr < 0) | 256 | if (bitmap_nr < 0) |
240 | goto out; | 257 | goto out; |
241 | bh = bitmap->s_block_bitmap[bitmap_nr]; | 258 | bh = bitmap->s_block_bitmap[bitmap_nr]; |
242 | 259 | ||
243 | bit = block % (sb->s_blocksize << 3); | 260 | bit = block % (sb->s_blocksize << 3); |
244 | 261 | ||
245 | while (bit < (sb->s_blocksize << 3) && block_count > 0) { | 262 | while (bit < (sb->s_blocksize << 3) && block_count > 0) { |
246 | if (!udf_test_bit(bit, bh->b_data)) { | 263 | if (!udf_test_bit(bit, bh->b_data)) |
247 | goto out; | 264 | goto out; |
248 | } else if (DQUOT_PREALLOC_BLOCK(inode, 1)) { | 265 | else if (DQUOT_PREALLOC_BLOCK(inode, 1)) |
249 | goto out; | 266 | goto out; |
250 | } else if (!udf_clear_bit(bit, bh->b_data)) { | 267 | else if (!udf_clear_bit(bit, bh->b_data)) { |
251 | udf_debug("bit already cleared for block %d\n", bit); | 268 | udf_debug("bit already cleared for block %d\n", bit); |
252 | DQUOT_FREE_BLOCK(inode, 1); | 269 | DQUOT_FREE_BLOCK(inode, 1); |
253 | goto out; | 270 | goto out; |
271 | } | ||
272 | block_count--; | ||
273 | alloc_count++; | ||
274 | bit++; | ||
275 | block++; | ||
254 | } | 276 | } |
255 | block_count--; | 277 | mark_buffer_dirty(bh); |
256 | alloc_count++; | 278 | } while (block_count > 0); |
257 | bit++; | 279 | |
258 | block++; | ||
259 | } | ||
260 | mark_buffer_dirty(bh); | ||
261 | if (block_count > 0) | ||
262 | goto repeat; | ||
263 | out: | 280 | out: |
264 | if (UDF_SB_LVIDBH(sb)) { | 281 | if (udf_add_free_space(sbi, partition, -alloc_count)) |
265 | UDF_SB_LVID(sb)->freeSpaceTable[partition] = | 282 | mark_buffer_dirty(sbi->s_lvid_bh); |
266 | cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - alloc_count); | ||
267 | mark_buffer_dirty(UDF_SB_LVIDBH(sb)); | ||
268 | } | ||
269 | sb->s_dirt = 1; | 283 | sb->s_dirt = 1; |
270 | mutex_unlock(&sbi->s_alloc_mutex); | 284 | mutex_unlock(&sbi->s_alloc_mutex); |
271 | return alloc_count; | 285 | return alloc_count; |
@@ -287,7 +301,7 @@ static int udf_bitmap_new_block(struct super_block *sb, | |||
287 | mutex_lock(&sbi->s_alloc_mutex); | 301 | mutex_lock(&sbi->s_alloc_mutex); |
288 | 302 | ||
289 | repeat: | 303 | repeat: |
290 | if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition)) | 304 | if (goal < 0 || goal >= sbi->s_partmaps[partition].s_partition_len) |
291 | goal = 0; | 305 | goal = 0; |
292 | 306 | ||
293 | nr_groups = bitmap->s_nr_groups; | 307 | nr_groups = bitmap->s_nr_groups; |
@@ -312,14 +326,16 @@ repeat: | |||
312 | if (bit < end_goal) | 326 | if (bit < end_goal) |
313 | goto got_block; | 327 | goto got_block; |
314 | 328 | ||
315 | ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF, sb->s_blocksize - ((bit + 7) >> 3)); | 329 | ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF, |
330 | sb->s_blocksize - ((bit + 7) >> 3)); | ||
316 | newbit = (ptr - ((char *)bh->b_data)) << 3; | 331 | newbit = (ptr - ((char *)bh->b_data)) << 3; |
317 | if (newbit < sb->s_blocksize << 3) { | 332 | if (newbit < sb->s_blocksize << 3) { |
318 | bit = newbit; | 333 | bit = newbit; |
319 | goto search_back; | 334 | goto search_back; |
320 | } | 335 | } |
321 | 336 | ||
322 | newbit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, bit); | 337 | newbit = udf_find_next_one_bit(bh->b_data, |
338 | sb->s_blocksize << 3, bit); | ||
323 | if (newbit < sb->s_blocksize << 3) { | 339 | if (newbit < sb->s_blocksize << 3) { |
324 | bit = newbit; | 340 | bit = newbit; |
325 | goto got_block; | 341 | goto got_block; |
@@ -358,15 +374,20 @@ repeat: | |||
358 | if (bit < sb->s_blocksize << 3) | 374 | if (bit < sb->s_blocksize << 3) |
359 | goto search_back; | 375 | goto search_back; |
360 | else | 376 | else |
361 | bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, group_start << 3); | 377 | bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, |
378 | group_start << 3); | ||
362 | if (bit >= sb->s_blocksize << 3) { | 379 | if (bit >= sb->s_blocksize << 3) { |
363 | mutex_unlock(&sbi->s_alloc_mutex); | 380 | mutex_unlock(&sbi->s_alloc_mutex); |
364 | return 0; | 381 | return 0; |
365 | } | 382 | } |
366 | 383 | ||
367 | search_back: | 384 | search_back: |
368 | for (i = 0; i < 7 && bit > (group_start << 3) && udf_test_bit(bit - 1, bh->b_data); i++, bit--) | 385 | i = 0; |
369 | ; /* empty loop */ | 386 | while (i < 7 && bit > (group_start << 3) && |
387 | udf_test_bit(bit - 1, bh->b_data)) { | ||
388 | ++i; | ||
389 | --bit; | ||
390 | } | ||
370 | 391 | ||
371 | got_block: | 392 | got_block: |
372 | 393 | ||
@@ -389,11 +410,8 @@ got_block: | |||
389 | 410 | ||
390 | mark_buffer_dirty(bh); | 411 | mark_buffer_dirty(bh); |
391 | 412 | ||
392 | if (UDF_SB_LVIDBH(sb)) { | 413 | if (udf_add_free_space(sbi, partition, -1)) |
393 | UDF_SB_LVID(sb)->freeSpaceTable[partition] = | 414 | mark_buffer_dirty(sbi->s_lvid_bh); |
394 | cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - 1); | ||
395 | mark_buffer_dirty(UDF_SB_LVIDBH(sb)); | ||
396 | } | ||
397 | sb->s_dirt = 1; | 415 | sb->s_dirt = 1; |
398 | mutex_unlock(&sbi->s_alloc_mutex); | 416 | mutex_unlock(&sbi->s_alloc_mutex); |
399 | *err = 0; | 417 | *err = 0; |
@@ -418,56 +436,70 @@ static void udf_table_free_blocks(struct super_block *sb, | |||
418 | struct extent_position oepos, epos; | 436 | struct extent_position oepos, epos; |
419 | int8_t etype; | 437 | int8_t etype; |
420 | int i; | 438 | int i; |
439 | struct udf_inode_info *iinfo; | ||
421 | 440 | ||
422 | mutex_lock(&sbi->s_alloc_mutex); | 441 | mutex_lock(&sbi->s_alloc_mutex); |
423 | if (bloc.logicalBlockNum < 0 || | 442 | if (bloc.logicalBlockNum < 0 || |
424 | (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) { | 443 | (bloc.logicalBlockNum + count) > |
444 | sbi->s_partmaps[bloc.partitionReferenceNum].s_partition_len) { | ||
425 | udf_debug("%d < %d || %d + %d > %d\n", | 445 | udf_debug("%d < %d || %d + %d > %d\n", |
426 | bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count, | 446 | bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count, |
427 | UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)); | 447 | sbi->s_partmaps[bloc.partitionReferenceNum]. |
448 | s_partition_len); | ||
428 | goto error_return; | 449 | goto error_return; |
429 | } | 450 | } |
430 | 451 | ||
431 | /* We do this up front - There are some error conditions that could occure, | 452 | iinfo = UDF_I(table); |
432 | but.. oh well */ | 453 | /* We do this up front - There are some error conditions that |
454 | could occure, but.. oh well */ | ||
433 | if (inode) | 455 | if (inode) |
434 | DQUOT_FREE_BLOCK(inode, count); | 456 | DQUOT_FREE_BLOCK(inode, count); |
435 | if (UDF_SB_LVIDBH(sb)) { | 457 | if (udf_add_free_space(sbi, sbi->s_partition, count)) |
436 | UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] = | 458 | mark_buffer_dirty(sbi->s_lvid_bh); |
437 | cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)]) + count); | ||
438 | mark_buffer_dirty(UDF_SB_LVIDBH(sb)); | ||
439 | } | ||
440 | 459 | ||
441 | start = bloc.logicalBlockNum + offset; | 460 | start = bloc.logicalBlockNum + offset; |
442 | end = bloc.logicalBlockNum + offset + count - 1; | 461 | end = bloc.logicalBlockNum + offset + count - 1; |
443 | 462 | ||
444 | epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry); | 463 | epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry); |
445 | elen = 0; | 464 | elen = 0; |
446 | epos.block = oepos.block = UDF_I_LOCATION(table); | 465 | epos.block = oepos.block = iinfo->i_location; |
447 | epos.bh = oepos.bh = NULL; | 466 | epos.bh = oepos.bh = NULL; |
448 | 467 | ||
449 | while (count && | 468 | while (count && |
450 | (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { | 469 | (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { |
451 | if (((eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits)) == start)) { | 470 | if (((eloc.logicalBlockNum + |
452 | if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits)) { | 471 | (elen >> sb->s_blocksize_bits)) == start)) { |
453 | count -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits); | 472 | if ((0x3FFFFFFF - elen) < |
454 | start += ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits); | 473 | (count << sb->s_blocksize_bits)) { |
455 | elen = (etype << 30) | (0x40000000 - sb->s_blocksize); | 474 | uint32_t tmp = ((0x3FFFFFFF - elen) >> |
475 | sb->s_blocksize_bits); | ||
476 | count -= tmp; | ||
477 | start += tmp; | ||
478 | elen = (etype << 30) | | ||
479 | (0x40000000 - sb->s_blocksize); | ||
456 | } else { | 480 | } else { |
457 | elen = (etype << 30) | (elen + (count << sb->s_blocksize_bits)); | 481 | elen = (etype << 30) | |
482 | (elen + | ||
483 | (count << sb->s_blocksize_bits)); | ||
458 | start += count; | 484 | start += count; |
459 | count = 0; | 485 | count = 0; |
460 | } | 486 | } |
461 | udf_write_aext(table, &oepos, eloc, elen, 1); | 487 | udf_write_aext(table, &oepos, eloc, elen, 1); |
462 | } else if (eloc.logicalBlockNum == (end + 1)) { | 488 | } else if (eloc.logicalBlockNum == (end + 1)) { |
463 | if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits)) { | 489 | if ((0x3FFFFFFF - elen) < |
464 | count -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits); | 490 | (count << sb->s_blocksize_bits)) { |
465 | end -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits); | 491 | uint32_t tmp = ((0x3FFFFFFF - elen) >> |
466 | eloc.logicalBlockNum -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits); | 492 | sb->s_blocksize_bits); |
467 | elen = (etype << 30) | (0x40000000 - sb->s_blocksize); | 493 | count -= tmp; |
494 | end -= tmp; | ||
495 | eloc.logicalBlockNum -= tmp; | ||
496 | elen = (etype << 30) | | ||
497 | (0x40000000 - sb->s_blocksize); | ||
468 | } else { | 498 | } else { |
469 | eloc.logicalBlockNum = start; | 499 | eloc.logicalBlockNum = start; |
470 | elen = (etype << 30) | (elen + (count << sb->s_blocksize_bits)); | 500 | elen = (etype << 30) | |
501 | (elen + | ||
502 | (count << sb->s_blocksize_bits)); | ||
471 | end -= count; | 503 | end -= count; |
472 | count = 0; | 504 | count = 0; |
473 | } | 505 | } |
@@ -488,9 +520,9 @@ static void udf_table_free_blocks(struct super_block *sb, | |||
488 | 520 | ||
489 | if (count) { | 521 | if (count) { |
490 | /* | 522 | /* |
491 | * NOTE: we CANNOT use udf_add_aext here, as it can try to allocate | 523 | * NOTE: we CANNOT use udf_add_aext here, as it can try to |
492 | * a new block, and since we hold the super block lock already | 524 | * allocate a new block, and since we hold the super block |
493 | * very bad things would happen :) | 525 | * lock already very bad things would happen :) |
494 | * | 526 | * |
495 | * We copy the behavior of udf_add_aext, but instead of | 527 | * We copy the behavior of udf_add_aext, but instead of |
496 | * trying to allocate a new block close to the existing one, | 528 | * trying to allocate a new block close to the existing one, |
@@ -509,11 +541,11 @@ static void udf_table_free_blocks(struct super_block *sb, | |||
509 | elen = EXT_RECORDED_ALLOCATED | | 541 | elen = EXT_RECORDED_ALLOCATED | |
510 | (count << sb->s_blocksize_bits); | 542 | (count << sb->s_blocksize_bits); |
511 | 543 | ||
512 | if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT) { | 544 | if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) |
513 | adsize = sizeof(short_ad); | 545 | adsize = sizeof(short_ad); |
514 | } else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG) { | 546 | else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) |
515 | adsize = sizeof(long_ad); | 547 | adsize = sizeof(long_ad); |
516 | } else { | 548 | else { |
517 | brelse(oepos.bh); | 549 | brelse(oepos.bh); |
518 | brelse(epos.bh); | 550 | brelse(epos.bh); |
519 | goto error_return; | 551 | goto error_return; |
@@ -531,56 +563,70 @@ static void udf_table_free_blocks(struct super_block *sb, | |||
531 | eloc.logicalBlockNum++; | 563 | eloc.logicalBlockNum++; |
532 | elen -= sb->s_blocksize; | 564 | elen -= sb->s_blocksize; |
533 | 565 | ||
534 | if (!(epos.bh = udf_tread(sb, udf_get_lb_pblock(sb, epos.block, 0)))) { | 566 | epos.bh = udf_tread(sb, |
567 | udf_get_lb_pblock(sb, epos.block, 0)); | ||
568 | if (!epos.bh) { | ||
535 | brelse(oepos.bh); | 569 | brelse(oepos.bh); |
536 | goto error_return; | 570 | goto error_return; |
537 | } | 571 | } |
538 | aed = (struct allocExtDesc *)(epos.bh->b_data); | 572 | aed = (struct allocExtDesc *)(epos.bh->b_data); |
539 | aed->previousAllocExtLocation = cpu_to_le32(oepos.block.logicalBlockNum); | 573 | aed->previousAllocExtLocation = |
574 | cpu_to_le32(oepos.block.logicalBlockNum); | ||
540 | if (epos.offset + adsize > sb->s_blocksize) { | 575 | if (epos.offset + adsize > sb->s_blocksize) { |
541 | loffset = epos.offset; | 576 | loffset = epos.offset; |
542 | aed->lengthAllocDescs = cpu_to_le32(adsize); | 577 | aed->lengthAllocDescs = cpu_to_le32(adsize); |
543 | sptr = UDF_I_DATA(table) + epos.offset - adsize; | 578 | sptr = iinfo->i_ext.i_data + epos.offset |
544 | dptr = epos.bh->b_data + sizeof(struct allocExtDesc); | 579 | - adsize; |
580 | dptr = epos.bh->b_data + | ||
581 | sizeof(struct allocExtDesc); | ||
545 | memcpy(dptr, sptr, adsize); | 582 | memcpy(dptr, sptr, adsize); |
546 | epos.offset = sizeof(struct allocExtDesc) + adsize; | 583 | epos.offset = sizeof(struct allocExtDesc) + |
584 | adsize; | ||
547 | } else { | 585 | } else { |
548 | loffset = epos.offset + adsize; | 586 | loffset = epos.offset + adsize; |
549 | aed->lengthAllocDescs = cpu_to_le32(0); | 587 | aed->lengthAllocDescs = cpu_to_le32(0); |
550 | if (oepos.bh) { | 588 | if (oepos.bh) { |
551 | sptr = oepos.bh->b_data + epos.offset; | 589 | sptr = oepos.bh->b_data + epos.offset; |
552 | aed = (struct allocExtDesc *)oepos.bh->b_data; | 590 | aed = (struct allocExtDesc *) |
591 | oepos.bh->b_data; | ||
553 | aed->lengthAllocDescs = | 592 | aed->lengthAllocDescs = |
554 | cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize); | 593 | cpu_to_le32(le32_to_cpu( |
594 | aed->lengthAllocDescs) + | ||
595 | adsize); | ||
555 | } else { | 596 | } else { |
556 | sptr = UDF_I_DATA(table) + epos.offset; | 597 | sptr = iinfo->i_ext.i_data + |
557 | UDF_I_LENALLOC(table) += adsize; | 598 | epos.offset; |
599 | iinfo->i_lenAlloc += adsize; | ||
558 | mark_inode_dirty(table); | 600 | mark_inode_dirty(table); |
559 | } | 601 | } |
560 | epos.offset = sizeof(struct allocExtDesc); | 602 | epos.offset = sizeof(struct allocExtDesc); |
561 | } | 603 | } |
562 | if (UDF_SB_UDFREV(sb) >= 0x0200) | 604 | if (sbi->s_udfrev >= 0x0200) |
563 | udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 3, 1, | 605 | udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, |
564 | epos.block.logicalBlockNum, sizeof(tag)); | 606 | 3, 1, epos.block.logicalBlockNum, |
607 | sizeof(tag)); | ||
565 | else | 608 | else |
566 | udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 2, 1, | 609 | udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, |
567 | epos.block.logicalBlockNum, sizeof(tag)); | 610 | 2, 1, epos.block.logicalBlockNum, |
568 | 611 | sizeof(tag)); | |
569 | switch (UDF_I_ALLOCTYPE(table)) { | 612 | |
570 | case ICBTAG_FLAG_AD_SHORT: | 613 | switch (iinfo->i_alloc_type) { |
571 | sad = (short_ad *)sptr; | 614 | case ICBTAG_FLAG_AD_SHORT: |
572 | sad->extLength = cpu_to_le32( | 615 | sad = (short_ad *)sptr; |
573 | EXT_NEXT_EXTENT_ALLOCDECS | | 616 | sad->extLength = cpu_to_le32( |
574 | sb->s_blocksize); | 617 | EXT_NEXT_EXTENT_ALLOCDECS | |
575 | sad->extPosition = cpu_to_le32(epos.block.logicalBlockNum); | 618 | sb->s_blocksize); |
576 | break; | 619 | sad->extPosition = |
577 | case ICBTAG_FLAG_AD_LONG: | 620 | cpu_to_le32(epos.block.logicalBlockNum); |
578 | lad = (long_ad *)sptr; | 621 | break; |
579 | lad->extLength = cpu_to_le32( | 622 | case ICBTAG_FLAG_AD_LONG: |
580 | EXT_NEXT_EXTENT_ALLOCDECS | | 623 | lad = (long_ad *)sptr; |
581 | sb->s_blocksize); | 624 | lad->extLength = cpu_to_le32( |
582 | lad->extLocation = cpu_to_lelb(epos.block); | 625 | EXT_NEXT_EXTENT_ALLOCDECS | |
583 | break; | 626 | sb->s_blocksize); |
627 | lad->extLocation = | ||
628 | cpu_to_lelb(epos.block); | ||
629 | break; | ||
584 | } | 630 | } |
585 | if (oepos.bh) { | 631 | if (oepos.bh) { |
586 | udf_update_tag(oepos.bh->b_data, loffset); | 632 | udf_update_tag(oepos.bh->b_data, loffset); |
@@ -590,16 +636,18 @@ static void udf_table_free_blocks(struct super_block *sb, | |||
590 | } | 636 | } |
591 | } | 637 | } |
592 | 638 | ||
593 | if (elen) { /* It's possible that stealing the block emptied the extent */ | 639 | /* It's possible that stealing the block emptied the extent */ |
640 | if (elen) { | ||
594 | udf_write_aext(table, &epos, eloc, elen, 1); | 641 | udf_write_aext(table, &epos, eloc, elen, 1); |
595 | 642 | ||
596 | if (!epos.bh) { | 643 | if (!epos.bh) { |
597 | UDF_I_LENALLOC(table) += adsize; | 644 | iinfo->i_lenAlloc += adsize; |
598 | mark_inode_dirty(table); | 645 | mark_inode_dirty(table); |
599 | } else { | 646 | } else { |
600 | aed = (struct allocExtDesc *)epos.bh->b_data; | 647 | aed = (struct allocExtDesc *)epos.bh->b_data; |
601 | aed->lengthAllocDescs = | 648 | aed->lengthAllocDescs = |
602 | cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize); | 649 | cpu_to_le32(le32_to_cpu( |
650 | aed->lengthAllocDescs) + adsize); | ||
603 | udf_update_tag(epos.bh->b_data, epos.offset); | 651 | udf_update_tag(epos.bh->b_data, epos.offset); |
604 | mark_buffer_dirty(epos.bh); | 652 | mark_buffer_dirty(epos.bh); |
605 | } | 653 | } |
@@ -626,20 +674,23 @@ static int udf_table_prealloc_blocks(struct super_block *sb, | |||
626 | kernel_lb_addr eloc; | 674 | kernel_lb_addr eloc; |
627 | struct extent_position epos; | 675 | struct extent_position epos; |
628 | int8_t etype = -1; | 676 | int8_t etype = -1; |
677 | struct udf_inode_info *iinfo; | ||
629 | 678 | ||
630 | if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition)) | 679 | if (first_block < 0 || |
680 | first_block >= sbi->s_partmaps[partition].s_partition_len) | ||
631 | return 0; | 681 | return 0; |
632 | 682 | ||
633 | if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT) | 683 | iinfo = UDF_I(table); |
684 | if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) | ||
634 | adsize = sizeof(short_ad); | 685 | adsize = sizeof(short_ad); |
635 | else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG) | 686 | else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) |
636 | adsize = sizeof(long_ad); | 687 | adsize = sizeof(long_ad); |
637 | else | 688 | else |
638 | return 0; | 689 | return 0; |
639 | 690 | ||
640 | mutex_lock(&sbi->s_alloc_mutex); | 691 | mutex_lock(&sbi->s_alloc_mutex); |
641 | epos.offset = sizeof(struct unallocSpaceEntry); | 692 | epos.offset = sizeof(struct unallocSpaceEntry); |
642 | epos.block = UDF_I_LOCATION(table); | 693 | epos.block = iinfo->i_location; |
643 | epos.bh = NULL; | 694 | epos.bh = NULL; |
644 | eloc.logicalBlockNum = 0xFFFFFFFF; | 695 | eloc.logicalBlockNum = 0xFFFFFFFF; |
645 | 696 | ||
@@ -654,26 +705,26 @@ static int udf_table_prealloc_blocks(struct super_block *sb, | |||
654 | epos.offset -= adsize; | 705 | epos.offset -= adsize; |
655 | 706 | ||
656 | alloc_count = (elen >> sb->s_blocksize_bits); | 707 | alloc_count = (elen >> sb->s_blocksize_bits); |
657 | if (inode && DQUOT_PREALLOC_BLOCK(inode, alloc_count > block_count ? block_count : alloc_count)) { | 708 | if (inode && DQUOT_PREALLOC_BLOCK(inode, |
709 | alloc_count > block_count ? block_count : alloc_count)) | ||
658 | alloc_count = 0; | 710 | alloc_count = 0; |
659 | } else if (alloc_count > block_count) { | 711 | else if (alloc_count > block_count) { |
660 | alloc_count = block_count; | 712 | alloc_count = block_count; |
661 | eloc.logicalBlockNum += alloc_count; | 713 | eloc.logicalBlockNum += alloc_count; |
662 | elen -= (alloc_count << sb->s_blocksize_bits); | 714 | elen -= (alloc_count << sb->s_blocksize_bits); |
663 | udf_write_aext(table, &epos, eloc, (etype << 30) | elen, 1); | 715 | udf_write_aext(table, &epos, eloc, |
664 | } else { | 716 | (etype << 30) | elen, 1); |
665 | udf_delete_aext(table, epos, eloc, (etype << 30) | elen); | 717 | } else |
666 | } | 718 | udf_delete_aext(table, epos, eloc, |
719 | (etype << 30) | elen); | ||
667 | } else { | 720 | } else { |
668 | alloc_count = 0; | 721 | alloc_count = 0; |
669 | } | 722 | } |
670 | 723 | ||
671 | brelse(epos.bh); | 724 | brelse(epos.bh); |
672 | 725 | ||
673 | if (alloc_count && UDF_SB_LVIDBH(sb)) { | 726 | if (alloc_count && udf_add_free_space(sbi, partition, -alloc_count)) { |
674 | UDF_SB_LVID(sb)->freeSpaceTable[partition] = | 727 | mark_buffer_dirty(sbi->s_lvid_bh); |
675 | cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - alloc_count); | ||
676 | mark_buffer_dirty(UDF_SB_LVIDBH(sb)); | ||
677 | sb->s_dirt = 1; | 728 | sb->s_dirt = 1; |
678 | } | 729 | } |
679 | mutex_unlock(&sbi->s_alloc_mutex); | 730 | mutex_unlock(&sbi->s_alloc_mutex); |
@@ -692,33 +743,35 @@ static int udf_table_new_block(struct super_block *sb, | |||
692 | kernel_lb_addr eloc, uninitialized_var(goal_eloc); | 743 | kernel_lb_addr eloc, uninitialized_var(goal_eloc); |
693 | struct extent_position epos, goal_epos; | 744 | struct extent_position epos, goal_epos; |
694 | int8_t etype; | 745 | int8_t etype; |
746 | struct udf_inode_info *iinfo = UDF_I(table); | ||
695 | 747 | ||
696 | *err = -ENOSPC; | 748 | *err = -ENOSPC; |
697 | 749 | ||
698 | if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT) | 750 | if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) |
699 | adsize = sizeof(short_ad); | 751 | adsize = sizeof(short_ad); |
700 | else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG) | 752 | else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) |
701 | adsize = sizeof(long_ad); | 753 | adsize = sizeof(long_ad); |
702 | else | 754 | else |
703 | return newblock; | 755 | return newblock; |
704 | 756 | ||
705 | mutex_lock(&sbi->s_alloc_mutex); | 757 | mutex_lock(&sbi->s_alloc_mutex); |
706 | if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition)) | 758 | if (goal < 0 || goal >= sbi->s_partmaps[partition].s_partition_len) |
707 | goal = 0; | 759 | goal = 0; |
708 | 760 | ||
709 | /* We search for the closest matching block to goal. If we find a exact hit, | 761 | /* We search for the closest matching block to goal. If we find |
710 | we stop. Otherwise we keep going till we run out of extents. | 762 | a exact hit, we stop. Otherwise we keep going till we run out |
711 | We store the buffer_head, bloc, and extoffset of the current closest | 763 | of extents. We store the buffer_head, bloc, and extoffset |
712 | match and use that when we are done. | 764 | of the current closest match and use that when we are done. |
713 | */ | 765 | */ |
714 | epos.offset = sizeof(struct unallocSpaceEntry); | 766 | epos.offset = sizeof(struct unallocSpaceEntry); |
715 | epos.block = UDF_I_LOCATION(table); | 767 | epos.block = iinfo->i_location; |
716 | epos.bh = goal_epos.bh = NULL; | 768 | epos.bh = goal_epos.bh = NULL; |
717 | 769 | ||
718 | while (spread && | 770 | while (spread && |
719 | (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { | 771 | (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { |
720 | if (goal >= eloc.logicalBlockNum) { | 772 | if (goal >= eloc.logicalBlockNum) { |
721 | if (goal < eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits)) | 773 | if (goal < eloc.logicalBlockNum + |
774 | (elen >> sb->s_blocksize_bits)) | ||
722 | nspread = 0; | 775 | nspread = 0; |
723 | else | 776 | else |
724 | nspread = goal - eloc.logicalBlockNum - | 777 | nspread = goal - eloc.logicalBlockNum - |
@@ -771,11 +824,8 @@ static int udf_table_new_block(struct super_block *sb, | |||
771 | udf_delete_aext(table, goal_epos, goal_eloc, goal_elen); | 824 | udf_delete_aext(table, goal_epos, goal_eloc, goal_elen); |
772 | brelse(goal_epos.bh); | 825 | brelse(goal_epos.bh); |
773 | 826 | ||
774 | if (UDF_SB_LVIDBH(sb)) { | 827 | if (udf_add_free_space(sbi, partition, -1)) |
775 | UDF_SB_LVID(sb)->freeSpaceTable[partition] = | 828 | mark_buffer_dirty(sbi->s_lvid_bh); |
776 | cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition]) - 1); | ||
777 | mark_buffer_dirty(UDF_SB_LVIDBH(sb)); | ||
778 | } | ||
779 | 829 | ||
780 | sb->s_dirt = 1; | 830 | sb->s_dirt = 1; |
781 | mutex_unlock(&sbi->s_alloc_mutex); | 831 | mutex_unlock(&sbi->s_alloc_mutex); |
@@ -789,22 +839,23 @@ inline void udf_free_blocks(struct super_block *sb, | |||
789 | uint32_t count) | 839 | uint32_t count) |
790 | { | 840 | { |
791 | uint16_t partition = bloc.partitionReferenceNum; | 841 | uint16_t partition = bloc.partitionReferenceNum; |
842 | struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; | ||
792 | 843 | ||
793 | if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) { | 844 | if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) { |
794 | return udf_bitmap_free_blocks(sb, inode, | 845 | return udf_bitmap_free_blocks(sb, inode, |
795 | UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap, | 846 | map->s_uspace.s_bitmap, |
796 | bloc, offset, count); | 847 | bloc, offset, count); |
797 | } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) { | 848 | } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) { |
798 | return udf_table_free_blocks(sb, inode, | 849 | return udf_table_free_blocks(sb, inode, |
799 | UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table, | 850 | map->s_uspace.s_table, |
800 | bloc, offset, count); | 851 | bloc, offset, count); |
801 | } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) { | 852 | } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) { |
802 | return udf_bitmap_free_blocks(sb, inode, | 853 | return udf_bitmap_free_blocks(sb, inode, |
803 | UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap, | 854 | map->s_fspace.s_bitmap, |
804 | bloc, offset, count); | 855 | bloc, offset, count); |
805 | } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) { | 856 | } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) { |
806 | return udf_table_free_blocks(sb, inode, | 857 | return udf_table_free_blocks(sb, inode, |
807 | UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table, | 858 | map->s_fspace.s_table, |
808 | bloc, offset, count); | 859 | bloc, offset, count); |
809 | } else { | 860 | } else { |
810 | return; | 861 | return; |
@@ -816,51 +867,55 @@ inline int udf_prealloc_blocks(struct super_block *sb, | |||
816 | uint16_t partition, uint32_t first_block, | 867 | uint16_t partition, uint32_t first_block, |
817 | uint32_t block_count) | 868 | uint32_t block_count) |
818 | { | 869 | { |
819 | if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) { | 870 | struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; |
871 | |||
872 | if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) | ||
820 | return udf_bitmap_prealloc_blocks(sb, inode, | 873 | return udf_bitmap_prealloc_blocks(sb, inode, |
821 | UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap, | 874 | map->s_uspace.s_bitmap, |
822 | partition, first_block, block_count); | 875 | partition, first_block, |
823 | } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) { | 876 | block_count); |
877 | else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) | ||
824 | return udf_table_prealloc_blocks(sb, inode, | 878 | return udf_table_prealloc_blocks(sb, inode, |
825 | UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table, | 879 | map->s_uspace.s_table, |
826 | partition, first_block, block_count); | 880 | partition, first_block, |
827 | } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) { | 881 | block_count); |
882 | else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) | ||
828 | return udf_bitmap_prealloc_blocks(sb, inode, | 883 | return udf_bitmap_prealloc_blocks(sb, inode, |
829 | UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap, | 884 | map->s_fspace.s_bitmap, |
830 | partition, first_block, block_count); | 885 | partition, first_block, |
831 | } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) { | 886 | block_count); |
887 | else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) | ||
832 | return udf_table_prealloc_blocks(sb, inode, | 888 | return udf_table_prealloc_blocks(sb, inode, |
833 | UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table, | 889 | map->s_fspace.s_table, |
834 | partition, first_block, block_count); | 890 | partition, first_block, |
835 | } else { | 891 | block_count); |
892 | else | ||
836 | return 0; | 893 | return 0; |
837 | } | ||
838 | } | 894 | } |
839 | 895 | ||
840 | inline int udf_new_block(struct super_block *sb, | 896 | inline int udf_new_block(struct super_block *sb, |
841 | struct inode *inode, | 897 | struct inode *inode, |
842 | uint16_t partition, uint32_t goal, int *err) | 898 | uint16_t partition, uint32_t goal, int *err) |
843 | { | 899 | { |
844 | int ret; | 900 | struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; |
845 | 901 | ||
846 | if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) { | 902 | if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) |
847 | ret = udf_bitmap_new_block(sb, inode, | 903 | return udf_bitmap_new_block(sb, inode, |
848 | UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap, | 904 | map->s_uspace.s_bitmap, |
849 | partition, goal, err); | 905 | partition, goal, err); |
850 | return ret; | 906 | else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) |
851 | } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) { | ||
852 | return udf_table_new_block(sb, inode, | 907 | return udf_table_new_block(sb, inode, |
853 | UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table, | 908 | map->s_uspace.s_table, |
854 | partition, goal, err); | 909 | partition, goal, err); |
855 | } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) { | 910 | else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) |
856 | return udf_bitmap_new_block(sb, inode, | 911 | return udf_bitmap_new_block(sb, inode, |
857 | UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap, | 912 | map->s_fspace.s_bitmap, |
858 | partition, goal, err); | 913 | partition, goal, err); |
859 | } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) { | 914 | else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) |
860 | return udf_table_new_block(sb, inode, | 915 | return udf_table_new_block(sb, inode, |
861 | UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table, | 916 | map->s_fspace.s_table, |
862 | partition, goal, err); | 917 | partition, goal, err); |
863 | } else { | 918 | else { |
864 | *err = -EIO; | 919 | *err = -EIO; |
865 | return 0; | 920 | return 0; |
866 | } | 921 | } |
diff --git a/fs/udf/crc.c b/fs/udf/crc.c index 85aaee5fab26..b1661296e786 100644 --- a/fs/udf/crc.c +++ b/fs/udf/crc.c | |||
@@ -79,7 +79,7 @@ static uint16_t crc_table[256] = { | |||
79 | * July 21, 1997 - Andrew E. Mileski | 79 | * July 21, 1997 - Andrew E. Mileski |
80 | * Adapted from OSTA-UDF(tm) 1.50 standard. | 80 | * Adapted from OSTA-UDF(tm) 1.50 standard. |
81 | */ | 81 | */ |
82 | uint16_t udf_crc(uint8_t * data, uint32_t size, uint16_t crc) | 82 | uint16_t udf_crc(uint8_t *data, uint32_t size, uint16_t crc) |
83 | { | 83 | { |
84 | while (size--) | 84 | while (size--) |
85 | crc = crc_table[(crc >> 8 ^ *(data++)) & 0xffU] ^ (crc << 8); | 85 | crc = crc_table[(crc >> 8 ^ *(data++)) & 0xffU] ^ (crc << 8); |
diff --git a/fs/udf/dir.c b/fs/udf/dir.c index 9e3b9f97ddbc..8d8643ada199 100644 --- a/fs/udf/dir.c +++ b/fs/udf/dir.c | |||
@@ -36,80 +36,20 @@ | |||
36 | #include "udf_i.h" | 36 | #include "udf_i.h" |
37 | #include "udf_sb.h" | 37 | #include "udf_sb.h" |
38 | 38 | ||
39 | /* Prototypes for file operations */ | 39 | static int do_udf_readdir(struct inode *dir, struct file *filp, |
40 | static int udf_readdir(struct file *, void *, filldir_t); | 40 | filldir_t filldir, void *dirent) |
41 | static int do_udf_readdir(struct inode *, struct file *, filldir_t, void *); | ||
42 | |||
43 | /* readdir and lookup functions */ | ||
44 | |||
45 | const struct file_operations udf_dir_operations = { | ||
46 | .read = generic_read_dir, | ||
47 | .readdir = udf_readdir, | ||
48 | .ioctl = udf_ioctl, | ||
49 | .fsync = udf_fsync_file, | ||
50 | }; | ||
51 | |||
52 | /* | ||
53 | * udf_readdir | ||
54 | * | ||
55 | * PURPOSE | ||
56 | * Read a directory entry. | ||
57 | * | ||
58 | * DESCRIPTION | ||
59 | * Optional - sys_getdents() will return -ENOTDIR if this routine is not | ||
60 | * available. | ||
61 | * | ||
62 | * Refer to sys_getdents() in fs/readdir.c | ||
63 | * sys_getdents() -> . | ||
64 | * | ||
65 | * PRE-CONDITIONS | ||
66 | * filp Pointer to directory file. | ||
67 | * buf Pointer to directory entry buffer. | ||
68 | * filldir Pointer to filldir function. | ||
69 | * | ||
70 | * POST-CONDITIONS | ||
71 | * <return> >=0 on success. | ||
72 | * | ||
73 | * HISTORY | ||
74 | * July 1, 1997 - Andrew E. Mileski | ||
75 | * Written, tested, and released. | ||
76 | */ | ||
77 | |||
78 | int udf_readdir(struct file *filp, void *dirent, filldir_t filldir) | ||
79 | { | ||
80 | struct inode *dir = filp->f_path.dentry->d_inode; | ||
81 | int result; | ||
82 | |||
83 | lock_kernel(); | ||
84 | |||
85 | if (filp->f_pos == 0) { | ||
86 | if (filldir(dirent, ".", 1, filp->f_pos, dir->i_ino, DT_DIR) < 0) { | ||
87 | unlock_kernel(); | ||
88 | return 0; | ||
89 | } | ||
90 | filp->f_pos++; | ||
91 | } | ||
92 | |||
93 | result = do_udf_readdir(dir, filp, filldir, dirent); | ||
94 | unlock_kernel(); | ||
95 | return result; | ||
96 | } | ||
97 | |||
98 | static int | ||
99 | do_udf_readdir(struct inode *dir, struct file *filp, filldir_t filldir, | ||
100 | void *dirent) | ||
101 | { | 41 | { |
102 | struct udf_fileident_bh fibh; | 42 | struct udf_fileident_bh fibh; |
103 | struct fileIdentDesc *fi = NULL; | 43 | struct fileIdentDesc *fi = NULL; |
104 | struct fileIdentDesc cfi; | 44 | struct fileIdentDesc cfi; |
105 | int block, iblock; | 45 | int block, iblock; |
106 | loff_t nf_pos = filp->f_pos - 1; | 46 | loff_t nf_pos = (filp->f_pos - 1) << 2; |
107 | int flen; | 47 | int flen; |
108 | char fname[UDF_NAME_LEN]; | 48 | char fname[UDF_NAME_LEN]; |
109 | char *nameptr; | 49 | char *nameptr; |
110 | uint16_t liu; | 50 | uint16_t liu; |
111 | uint8_t lfi; | 51 | uint8_t lfi; |
112 | loff_t size = (udf_ext0_offset(dir) + dir->i_size) >> 2; | 52 | loff_t size = udf_ext0_offset(dir) + dir->i_size; |
113 | struct buffer_head *tmp, *bha[16]; | 53 | struct buffer_head *tmp, *bha[16]; |
114 | kernel_lb_addr eloc; | 54 | kernel_lb_addr eloc; |
115 | uint32_t elen; | 55 | uint32_t elen; |
@@ -117,23 +57,26 @@ do_udf_readdir(struct inode *dir, struct file *filp, filldir_t filldir, | |||
117 | int i, num; | 57 | int i, num; |
118 | unsigned int dt_type; | 58 | unsigned int dt_type; |
119 | struct extent_position epos = { NULL, 0, {0, 0} }; | 59 | struct extent_position epos = { NULL, 0, {0, 0} }; |
60 | struct udf_inode_info *iinfo; | ||
120 | 61 | ||
121 | if (nf_pos >= size) | 62 | if (nf_pos >= size) |
122 | return 0; | 63 | return 0; |
123 | 64 | ||
124 | if (nf_pos == 0) | 65 | if (nf_pos == 0) |
125 | nf_pos = (udf_ext0_offset(dir) >> 2); | 66 | nf_pos = udf_ext0_offset(dir); |
126 | 67 | ||
127 | fibh.soffset = fibh.eoffset = (nf_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2; | 68 | fibh.soffset = fibh.eoffset = nf_pos & (dir->i_sb->s_blocksize - 1); |
128 | if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) { | 69 | iinfo = UDF_I(dir); |
70 | if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { | ||
129 | fibh.sbh = fibh.ebh = NULL; | 71 | fibh.sbh = fibh.ebh = NULL; |
130 | } else if (inode_bmap(dir, nf_pos >> (dir->i_sb->s_blocksize_bits - 2), | 72 | } else if (inode_bmap(dir, nf_pos >> dir->i_sb->s_blocksize_bits, |
131 | &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) { | 73 | &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) { |
132 | block = udf_get_lb_pblock(dir->i_sb, eloc, offset); | 74 | block = udf_get_lb_pblock(dir->i_sb, eloc, offset); |
133 | if ((++offset << dir->i_sb->s_blocksize_bits) < elen) { | 75 | if ((++offset << dir->i_sb->s_blocksize_bits) < elen) { |
134 | if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT) | 76 | if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) |
135 | epos.offset -= sizeof(short_ad); | 77 | epos.offset -= sizeof(short_ad); |
136 | else if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_LONG) | 78 | else if (iinfo->i_alloc_type == |
79 | ICBTAG_FLAG_AD_LONG) | ||
137 | epos.offset -= sizeof(long_ad); | 80 | epos.offset -= sizeof(long_ad); |
138 | } else { | 81 | } else { |
139 | offset = 0; | 82 | offset = 0; |
@@ -168,7 +111,7 @@ do_udf_readdir(struct inode *dir, struct file *filp, filldir_t filldir, | |||
168 | } | 111 | } |
169 | 112 | ||
170 | while (nf_pos < size) { | 113 | while (nf_pos < size) { |
171 | filp->f_pos = nf_pos + 1; | 114 | filp->f_pos = (nf_pos >> 2) + 1; |
172 | 115 | ||
173 | fi = udf_fileident_read(dir, &nf_pos, &fibh, &cfi, &epos, &eloc, | 116 | fi = udf_fileident_read(dir, &nf_pos, &fibh, &cfi, &epos, &eloc, |
174 | &elen, &offset); | 117 | &elen, &offset); |
@@ -235,7 +178,7 @@ do_udf_readdir(struct inode *dir, struct file *filp, filldir_t filldir, | |||
235 | } | 178 | } |
236 | } /* end while */ | 179 | } /* end while */ |
237 | 180 | ||
238 | filp->f_pos = nf_pos + 1; | 181 | filp->f_pos = (nf_pos >> 2) + 1; |
239 | 182 | ||
240 | if (fibh.sbh != fibh.ebh) | 183 | if (fibh.sbh != fibh.ebh) |
241 | brelse(fibh.ebh); | 184 | brelse(fibh.ebh); |
@@ -244,3 +187,57 @@ do_udf_readdir(struct inode *dir, struct file *filp, filldir_t filldir, | |||
244 | 187 | ||
245 | return 0; | 188 | return 0; |
246 | } | 189 | } |
190 | |||
191 | /* | ||
192 | * udf_readdir | ||
193 | * | ||
194 | * PURPOSE | ||
195 | * Read a directory entry. | ||
196 | * | ||
197 | * DESCRIPTION | ||
198 | * Optional - sys_getdents() will return -ENOTDIR if this routine is not | ||
199 | * available. | ||
200 | * | ||
201 | * Refer to sys_getdents() in fs/readdir.c | ||
202 | * sys_getdents() -> . | ||
203 | * | ||
204 | * PRE-CONDITIONS | ||
205 | * filp Pointer to directory file. | ||
206 | * buf Pointer to directory entry buffer. | ||
207 | * filldir Pointer to filldir function. | ||
208 | * | ||
209 | * POST-CONDITIONS | ||
210 | * <return> >=0 on success. | ||
211 | * | ||
212 | * HISTORY | ||
213 | * July 1, 1997 - Andrew E. Mileski | ||
214 | * Written, tested, and released. | ||
215 | */ | ||
216 | |||
217 | static int udf_readdir(struct file *filp, void *dirent, filldir_t filldir) | ||
218 | { | ||
219 | struct inode *dir = filp->f_path.dentry->d_inode; | ||
220 | int result; | ||
221 | |||
222 | lock_kernel(); | ||
223 | |||
224 | if (filp->f_pos == 0) { | ||
225 | if (filldir(dirent, ".", 1, filp->f_pos, dir->i_ino, DT_DIR) < 0) { | ||
226 | unlock_kernel(); | ||
227 | return 0; | ||
228 | } | ||
229 | filp->f_pos++; | ||
230 | } | ||
231 | |||
232 | result = do_udf_readdir(dir, filp, filldir, dirent); | ||
233 | unlock_kernel(); | ||
234 | return result; | ||
235 | } | ||
236 | |||
237 | /* readdir and lookup functions */ | ||
238 | const struct file_operations udf_dir_operations = { | ||
239 | .read = generic_read_dir, | ||
240 | .readdir = udf_readdir, | ||
241 | .ioctl = udf_ioctl, | ||
242 | .fsync = udf_fsync_file, | ||
243 | }; | ||
diff --git a/fs/udf/directory.c b/fs/udf/directory.c index ff8c08fd7bf5..2820f8fcf4cc 100644 --- a/fs/udf/directory.c +++ b/fs/udf/directory.c | |||
@@ -19,7 +19,7 @@ | |||
19 | #include <linux/buffer_head.h> | 19 | #include <linux/buffer_head.h> |
20 | 20 | ||
21 | #if 0 | 21 | #if 0 |
22 | static uint8_t *udf_filead_read(struct inode *dir, uint8_t * tmpad, | 22 | static uint8_t *udf_filead_read(struct inode *dir, uint8_t *tmpad, |
23 | uint8_t ad_size, kernel_lb_addr fe_loc, | 23 | uint8_t ad_size, kernel_lb_addr fe_loc, |
24 | int *pos, int *offset, struct buffer_head **bh, | 24 | int *pos, int *offset, struct buffer_head **bh, |
25 | int *error) | 25 | int *error) |
@@ -45,7 +45,8 @@ static uint8_t *udf_filead_read(struct inode *dir, uint8_t * tmpad, | |||
45 | block = udf_get_lb_pblock(dir->i_sb, fe_loc, ++*pos); | 45 | block = udf_get_lb_pblock(dir->i_sb, fe_loc, ++*pos); |
46 | if (!block) | 46 | if (!block) |
47 | return NULL; | 47 | return NULL; |
48 | if (!(*bh = udf_tread(dir->i_sb, block))) | 48 | *bh = udf_tread(dir->i_sb, block); |
49 | if (!*bh) | ||
49 | return NULL; | 50 | return NULL; |
50 | } else if (*offset > dir->i_sb->s_blocksize) { | 51 | } else if (*offset > dir->i_sb->s_blocksize) { |
51 | ad = tmpad; | 52 | ad = tmpad; |
@@ -57,10 +58,12 @@ static uint8_t *udf_filead_read(struct inode *dir, uint8_t * tmpad, | |||
57 | block = udf_get_lb_pblock(dir->i_sb, fe_loc, ++*pos); | 58 | block = udf_get_lb_pblock(dir->i_sb, fe_loc, ++*pos); |
58 | if (!block) | 59 | if (!block) |
59 | return NULL; | 60 | return NULL; |
60 | if (!((*bh) = udf_tread(dir->i_sb, block))) | 61 | (*bh) = udf_tread(dir->i_sb, block); |
62 | if (!*bh) | ||
61 | return NULL; | 63 | return NULL; |
62 | 64 | ||
63 | memcpy((uint8_t *)ad + remainder, (*bh)->b_data, ad_size - remainder); | 65 | memcpy((uint8_t *)ad + remainder, (*bh)->b_data, |
66 | ad_size - remainder); | ||
64 | *offset = ad_size - remainder; | 67 | *offset = ad_size - remainder; |
65 | } | 68 | } |
66 | 69 | ||
@@ -68,29 +71,31 @@ static uint8_t *udf_filead_read(struct inode *dir, uint8_t * tmpad, | |||
68 | } | 71 | } |
69 | #endif | 72 | #endif |
70 | 73 | ||
71 | struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t * nf_pos, | 74 | struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos, |
72 | struct udf_fileident_bh *fibh, | 75 | struct udf_fileident_bh *fibh, |
73 | struct fileIdentDesc *cfi, | 76 | struct fileIdentDesc *cfi, |
74 | struct extent_position *epos, | 77 | struct extent_position *epos, |
75 | kernel_lb_addr * eloc, uint32_t * elen, | 78 | kernel_lb_addr *eloc, uint32_t *elen, |
76 | sector_t * offset) | 79 | sector_t *offset) |
77 | { | 80 | { |
78 | struct fileIdentDesc *fi; | 81 | struct fileIdentDesc *fi; |
79 | int i, num, block; | 82 | int i, num, block; |
80 | struct buffer_head *tmp, *bha[16]; | 83 | struct buffer_head *tmp, *bha[16]; |
84 | struct udf_inode_info *iinfo = UDF_I(dir); | ||
81 | 85 | ||
82 | fibh->soffset = fibh->eoffset; | 86 | fibh->soffset = fibh->eoffset; |
83 | 87 | ||
84 | if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) { | 88 | if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { |
85 | fi = udf_get_fileident(UDF_I_DATA(dir) - | 89 | fi = udf_get_fileident(iinfo->i_ext.i_data - |
86 | (UDF_I_EFE(dir) ? | 90 | (iinfo->i_efe ? |
87 | sizeof(struct extendedFileEntry) : | 91 | sizeof(struct extendedFileEntry) : |
88 | sizeof(struct fileEntry)), | 92 | sizeof(struct fileEntry)), |
89 | dir->i_sb->s_blocksize, &(fibh->eoffset)); | 93 | dir->i_sb->s_blocksize, |
94 | &(fibh->eoffset)); | ||
90 | if (!fi) | 95 | if (!fi) |
91 | return NULL; | 96 | return NULL; |
92 | 97 | ||
93 | *nf_pos += ((fibh->eoffset - fibh->soffset) >> 2); | 98 | *nf_pos += fibh->eoffset - fibh->soffset; |
94 | 99 | ||
95 | memcpy((uint8_t *)cfi, (uint8_t *)fi, | 100 | memcpy((uint8_t *)cfi, (uint8_t *)fi, |
96 | sizeof(struct fileIdentDesc)); | 101 | sizeof(struct fileIdentDesc)); |
@@ -100,6 +105,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t * nf_pos, | |||
100 | 105 | ||
101 | if (fibh->eoffset == dir->i_sb->s_blocksize) { | 106 | if (fibh->eoffset == dir->i_sb->s_blocksize) { |
102 | int lextoffset = epos->offset; | 107 | int lextoffset = epos->offset; |
108 | unsigned char blocksize_bits = dir->i_sb->s_blocksize_bits; | ||
103 | 109 | ||
104 | if (udf_next_aext(dir, epos, eloc, elen, 1) != | 110 | if (udf_next_aext(dir, epos, eloc, elen, 1) != |
105 | (EXT_RECORDED_ALLOCATED >> 30)) | 111 | (EXT_RECORDED_ALLOCATED >> 30)) |
@@ -109,24 +115,27 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t * nf_pos, | |||
109 | 115 | ||
110 | (*offset)++; | 116 | (*offset)++; |
111 | 117 | ||
112 | if ((*offset << dir->i_sb->s_blocksize_bits) >= *elen) | 118 | if ((*offset << blocksize_bits) >= *elen) |
113 | *offset = 0; | 119 | *offset = 0; |
114 | else | 120 | else |
115 | epos->offset = lextoffset; | 121 | epos->offset = lextoffset; |
116 | 122 | ||
117 | brelse(fibh->sbh); | 123 | brelse(fibh->sbh); |
118 | if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block))) | 124 | fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block); |
125 | if (!fibh->sbh) | ||
119 | return NULL; | 126 | return NULL; |
120 | fibh->soffset = fibh->eoffset = 0; | 127 | fibh->soffset = fibh->eoffset = 0; |
121 | 128 | ||
122 | if (!(*offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9)) - 1))) { | 129 | if (!(*offset & ((16 >> (blocksize_bits - 9)) - 1))) { |
123 | i = 16 >> (dir->i_sb->s_blocksize_bits - 9); | 130 | i = 16 >> (blocksize_bits - 9); |
124 | if (i + *offset > (*elen >> dir->i_sb->s_blocksize_bits)) | 131 | if (i + *offset > (*elen >> blocksize_bits)) |
125 | i = (*elen >> dir->i_sb->s_blocksize_bits)-*offset; | 132 | i = (*elen >> blocksize_bits)-*offset; |
126 | for (num = 0; i > 0; i--) { | 133 | for (num = 0; i > 0; i--) { |
127 | block = udf_get_lb_pblock(dir->i_sb, *eloc, *offset + i); | 134 | block = udf_get_lb_pblock(dir->i_sb, *eloc, |
135 | *offset + i); | ||
128 | tmp = udf_tgetblk(dir->i_sb, block); | 136 | tmp = udf_tgetblk(dir->i_sb, block); |
129 | if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp)) | 137 | if (tmp && !buffer_uptodate(tmp) && |
138 | !buffer_locked(tmp)) | ||
130 | bha[num++] = tmp; | 139 | bha[num++] = tmp; |
131 | else | 140 | else |
132 | brelse(tmp); | 141 | brelse(tmp); |
@@ -148,7 +157,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t * nf_pos, | |||
148 | if (!fi) | 157 | if (!fi) |
149 | return NULL; | 158 | return NULL; |
150 | 159 | ||
151 | *nf_pos += ((fibh->eoffset - fibh->soffset) >> 2); | 160 | *nf_pos += fibh->eoffset - fibh->soffset; |
152 | 161 | ||
153 | if (fibh->eoffset <= dir->i_sb->s_blocksize) { | 162 | if (fibh->eoffset <= dir->i_sb->s_blocksize) { |
154 | memcpy((uint8_t *)cfi, (uint8_t *)fi, | 163 | memcpy((uint8_t *)cfi, (uint8_t *)fi, |
@@ -172,20 +181,23 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t * nf_pos, | |||
172 | fibh->soffset -= dir->i_sb->s_blocksize; | 181 | fibh->soffset -= dir->i_sb->s_blocksize; |
173 | fibh->eoffset -= dir->i_sb->s_blocksize; | 182 | fibh->eoffset -= dir->i_sb->s_blocksize; |
174 | 183 | ||
175 | if (!(fibh->ebh = udf_tread(dir->i_sb, block))) | 184 | fibh->ebh = udf_tread(dir->i_sb, block); |
185 | if (!fibh->ebh) | ||
176 | return NULL; | 186 | return NULL; |
177 | 187 | ||
178 | if (sizeof(struct fileIdentDesc) > -fibh->soffset) { | 188 | if (sizeof(struct fileIdentDesc) > -fibh->soffset) { |
179 | int fi_len; | 189 | int fi_len; |
180 | 190 | ||
181 | memcpy((uint8_t *)cfi, (uint8_t *)fi, -fibh->soffset); | 191 | memcpy((uint8_t *)cfi, (uint8_t *)fi, -fibh->soffset); |
182 | memcpy((uint8_t *)cfi - fibh->soffset, fibh->ebh->b_data, | 192 | memcpy((uint8_t *)cfi - fibh->soffset, |
193 | fibh->ebh->b_data, | ||
183 | sizeof(struct fileIdentDesc) + fibh->soffset); | 194 | sizeof(struct fileIdentDesc) + fibh->soffset); |
184 | 195 | ||
185 | fi_len = (sizeof(struct fileIdentDesc) + cfi->lengthFileIdent + | 196 | fi_len = (sizeof(struct fileIdentDesc) + |
197 | cfi->lengthFileIdent + | ||
186 | le16_to_cpu(cfi->lengthOfImpUse) + 3) & ~3; | 198 | le16_to_cpu(cfi->lengthOfImpUse) + 3) & ~3; |
187 | 199 | ||
188 | *nf_pos += ((fi_len - (fibh->eoffset - fibh->soffset)) >> 2); | 200 | *nf_pos += fi_len - (fibh->eoffset - fibh->soffset); |
189 | fibh->eoffset = fibh->soffset + fi_len; | 201 | fibh->eoffset = fibh->soffset + fi_len; |
190 | } else { | 202 | } else { |
191 | memcpy((uint8_t *)cfi, (uint8_t *)fi, | 203 | memcpy((uint8_t *)cfi, (uint8_t *)fi, |
@@ -210,11 +222,10 @@ struct fileIdentDesc *udf_get_fileident(void *buffer, int bufsize, int *offset) | |||
210 | 222 | ||
211 | ptr = buffer; | 223 | ptr = buffer; |
212 | 224 | ||
213 | if ((*offset > 0) && (*offset < bufsize)) { | 225 | if ((*offset > 0) && (*offset < bufsize)) |
214 | ptr += *offset; | 226 | ptr += *offset; |
215 | } | ||
216 | fi = (struct fileIdentDesc *)ptr; | 227 | fi = (struct fileIdentDesc *)ptr; |
217 | if (le16_to_cpu(fi->descTag.tagIdent) != TAG_IDENT_FID) { | 228 | if (fi->descTag.tagIdent != cpu_to_le16(TAG_IDENT_FID)) { |
218 | udf_debug("0x%x != TAG_IDENT_FID\n", | 229 | udf_debug("0x%x != TAG_IDENT_FID\n", |
219 | le16_to_cpu(fi->descTag.tagIdent)); | 230 | le16_to_cpu(fi->descTag.tagIdent)); |
220 | udf_debug("offset: %u sizeof: %lu bufsize: %u\n", | 231 | udf_debug("offset: %u sizeof: %lu bufsize: %u\n", |
@@ -222,12 +233,11 @@ struct fileIdentDesc *udf_get_fileident(void *buffer, int bufsize, int *offset) | |||
222 | bufsize); | 233 | bufsize); |
223 | return NULL; | 234 | return NULL; |
224 | } | 235 | } |
225 | if ((*offset + sizeof(struct fileIdentDesc)) > bufsize) { | 236 | if ((*offset + sizeof(struct fileIdentDesc)) > bufsize) |
226 | lengthThisIdent = sizeof(struct fileIdentDesc); | 237 | lengthThisIdent = sizeof(struct fileIdentDesc); |
227 | } else { | 238 | else |
228 | lengthThisIdent = sizeof(struct fileIdentDesc) + | 239 | lengthThisIdent = sizeof(struct fileIdentDesc) + |
229 | fi->lengthFileIdent + le16_to_cpu(fi->lengthOfImpUse); | 240 | fi->lengthFileIdent + le16_to_cpu(fi->lengthOfImpUse); |
230 | } | ||
231 | 241 | ||
232 | /* we need to figure padding, too! */ | 242 | /* we need to figure padding, too! */ |
233 | padlen = lengthThisIdent % UDF_NAME_PAD; | 243 | padlen = lengthThisIdent % UDF_NAME_PAD; |
@@ -252,17 +262,17 @@ static extent_ad *udf_get_fileextent(void *buffer, int bufsize, int *offset) | |||
252 | 262 | ||
253 | fe = (struct fileEntry *)buffer; | 263 | fe = (struct fileEntry *)buffer; |
254 | 264 | ||
255 | if (le16_to_cpu(fe->descTag.tagIdent) != TAG_IDENT_FE) { | 265 | if (fe->descTag.tagIdent != cpu_to_le16(TAG_IDENT_FE)) { |
256 | udf_debug("0x%x != TAG_IDENT_FE\n", | 266 | udf_debug("0x%x != TAG_IDENT_FE\n", |
257 | le16_to_cpu(fe->descTag.tagIdent)); | 267 | le16_to_cpu(fe->descTag.tagIdent)); |
258 | return NULL; | 268 | return NULL; |
259 | } | 269 | } |
260 | 270 | ||
261 | ptr = (uint8_t *)(fe->extendedAttr) + le32_to_cpu(fe->lengthExtendedAttr); | 271 | ptr = (uint8_t *)(fe->extendedAttr) + |
272 | le32_to_cpu(fe->lengthExtendedAttr); | ||
262 | 273 | ||
263 | if ((*offset > 0) && (*offset < le32_to_cpu(fe->lengthAllocDescs))) { | 274 | if ((*offset > 0) && (*offset < le32_to_cpu(fe->lengthAllocDescs))) |
264 | ptr += *offset; | 275 | ptr += *offset; |
265 | } | ||
266 | 276 | ||
267 | ext = (extent_ad *)ptr; | 277 | ext = (extent_ad *)ptr; |
268 | 278 | ||
@@ -271,7 +281,7 @@ static extent_ad *udf_get_fileextent(void *buffer, int bufsize, int *offset) | |||
271 | } | 281 | } |
272 | #endif | 282 | #endif |
273 | 283 | ||
274 | short_ad *udf_get_fileshortad(uint8_t *ptr, int maxoffset, int *offset, | 284 | short_ad *udf_get_fileshortad(uint8_t *ptr, int maxoffset, uint32_t *offset, |
275 | int inc) | 285 | int inc) |
276 | { | 286 | { |
277 | short_ad *sa; | 287 | short_ad *sa; |
@@ -281,17 +291,20 @@ short_ad *udf_get_fileshortad(uint8_t *ptr, int maxoffset, int *offset, | |||
281 | return NULL; | 291 | return NULL; |
282 | } | 292 | } |
283 | 293 | ||
284 | if ((*offset < 0) || ((*offset + sizeof(short_ad)) > maxoffset)) | 294 | if ((*offset + sizeof(short_ad)) > maxoffset) |
285 | return NULL; | ||
286 | else if ((sa = (short_ad *)ptr)->extLength == 0) | ||
287 | return NULL; | 295 | return NULL; |
296 | else { | ||
297 | sa = (short_ad *)ptr; | ||
298 | if (sa->extLength == 0) | ||
299 | return NULL; | ||
300 | } | ||
288 | 301 | ||
289 | if (inc) | 302 | if (inc) |
290 | *offset += sizeof(short_ad); | 303 | *offset += sizeof(short_ad); |
291 | return sa; | 304 | return sa; |
292 | } | 305 | } |
293 | 306 | ||
294 | long_ad *udf_get_filelongad(uint8_t *ptr, int maxoffset, int *offset, int inc) | 307 | long_ad *udf_get_filelongad(uint8_t *ptr, int maxoffset, uint32_t *offset, int inc) |
295 | { | 308 | { |
296 | long_ad *la; | 309 | long_ad *la; |
297 | 310 | ||
@@ -300,10 +313,13 @@ long_ad *udf_get_filelongad(uint8_t *ptr, int maxoffset, int *offset, int inc) | |||
300 | return NULL; | 313 | return NULL; |
301 | } | 314 | } |
302 | 315 | ||
303 | if ((*offset < 0) || ((*offset + sizeof(long_ad)) > maxoffset)) | 316 | if ((*offset + sizeof(long_ad)) > maxoffset) |
304 | return NULL; | ||
305 | else if ((la = (long_ad *)ptr)->extLength == 0) | ||
306 | return NULL; | 317 | return NULL; |
318 | else { | ||
319 | la = (long_ad *)ptr; | ||
320 | if (la->extLength == 0) | ||
321 | return NULL; | ||
322 | } | ||
307 | 323 | ||
308 | if (inc) | 324 | if (inc) |
309 | *offset += sizeof(long_ad); | 325 | *offset += sizeof(long_ad); |
diff --git a/fs/udf/file.c b/fs/udf/file.c index 7c7a1b39d56c..97c71ae7c689 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c | |||
@@ -45,12 +45,13 @@ static int udf_adinicb_readpage(struct file *file, struct page *page) | |||
45 | { | 45 | { |
46 | struct inode *inode = page->mapping->host; | 46 | struct inode *inode = page->mapping->host; |
47 | char *kaddr; | 47 | char *kaddr; |
48 | struct udf_inode_info *iinfo = UDF_I(inode); | ||
48 | 49 | ||
49 | BUG_ON(!PageLocked(page)); | 50 | BUG_ON(!PageLocked(page)); |
50 | 51 | ||
51 | kaddr = kmap(page); | 52 | kaddr = kmap(page); |
52 | memset(kaddr, 0, PAGE_CACHE_SIZE); | 53 | memset(kaddr, 0, PAGE_CACHE_SIZE); |
53 | memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), inode->i_size); | 54 | memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, inode->i_size); |
54 | flush_dcache_page(page); | 55 | flush_dcache_page(page); |
55 | SetPageUptodate(page); | 56 | SetPageUptodate(page); |
56 | kunmap(page); | 57 | kunmap(page); |
@@ -59,15 +60,17 @@ static int udf_adinicb_readpage(struct file *file, struct page *page) | |||
59 | return 0; | 60 | return 0; |
60 | } | 61 | } |
61 | 62 | ||
62 | static int udf_adinicb_writepage(struct page *page, struct writeback_control *wbc) | 63 | static int udf_adinicb_writepage(struct page *page, |
64 | struct writeback_control *wbc) | ||
63 | { | 65 | { |
64 | struct inode *inode = page->mapping->host; | 66 | struct inode *inode = page->mapping->host; |
65 | char *kaddr; | 67 | char *kaddr; |
68 | struct udf_inode_info *iinfo = UDF_I(inode); | ||
66 | 69 | ||
67 | BUG_ON(!PageLocked(page)); | 70 | BUG_ON(!PageLocked(page)); |
68 | 71 | ||
69 | kaddr = kmap(page); | 72 | kaddr = kmap(page); |
70 | memcpy(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), kaddr, inode->i_size); | 73 | memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr, kaddr, inode->i_size); |
71 | mark_inode_dirty(inode); | 74 | mark_inode_dirty(inode); |
72 | SetPageUptodate(page); | 75 | SetPageUptodate(page); |
73 | kunmap(page); | 76 | kunmap(page); |
@@ -84,9 +87,10 @@ static int udf_adinicb_write_end(struct file *file, | |||
84 | struct inode *inode = mapping->host; | 87 | struct inode *inode = mapping->host; |
85 | unsigned offset = pos & (PAGE_CACHE_SIZE - 1); | 88 | unsigned offset = pos & (PAGE_CACHE_SIZE - 1); |
86 | char *kaddr; | 89 | char *kaddr; |
90 | struct udf_inode_info *iinfo = UDF_I(inode); | ||
87 | 91 | ||
88 | kaddr = kmap_atomic(page, KM_USER0); | 92 | kaddr = kmap_atomic(page, KM_USER0); |
89 | memcpy(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, | 93 | memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr + offset, |
90 | kaddr + offset, copied); | 94 | kaddr + offset, copied); |
91 | kunmap_atomic(kaddr, KM_USER0); | 95 | kunmap_atomic(kaddr, KM_USER0); |
92 | 96 | ||
@@ -109,25 +113,27 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov, | |||
109 | struct inode *inode = file->f_path.dentry->d_inode; | 113 | struct inode *inode = file->f_path.dentry->d_inode; |
110 | int err, pos; | 114 | int err, pos; |
111 | size_t count = iocb->ki_left; | 115 | size_t count = iocb->ki_left; |
116 | struct udf_inode_info *iinfo = UDF_I(inode); | ||
112 | 117 | ||
113 | if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) { | 118 | if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { |
114 | if (file->f_flags & O_APPEND) | 119 | if (file->f_flags & O_APPEND) |
115 | pos = inode->i_size; | 120 | pos = inode->i_size; |
116 | else | 121 | else |
117 | pos = ppos; | 122 | pos = ppos; |
118 | 123 | ||
119 | if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) + | 124 | if (inode->i_sb->s_blocksize < |
125 | (udf_file_entry_alloc_offset(inode) + | ||
120 | pos + count)) { | 126 | pos + count)) { |
121 | udf_expand_file_adinicb(inode, pos + count, &err); | 127 | udf_expand_file_adinicb(inode, pos + count, &err); |
122 | if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) { | 128 | if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { |
123 | udf_debug("udf_expand_adinicb: err=%d\n", err); | 129 | udf_debug("udf_expand_adinicb: err=%d\n", err); |
124 | return err; | 130 | return err; |
125 | } | 131 | } |
126 | } else { | 132 | } else { |
127 | if (pos + count > inode->i_size) | 133 | if (pos + count > inode->i_size) |
128 | UDF_I_LENALLOC(inode) = pos + count; | 134 | iinfo->i_lenAlloc = pos + count; |
129 | else | 135 | else |
130 | UDF_I_LENALLOC(inode) = inode->i_size; | 136 | iinfo->i_lenAlloc = inode->i_size; |
131 | } | 137 | } |
132 | } | 138 | } |
133 | 139 | ||
@@ -191,23 +197,28 @@ int udf_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, | |||
191 | 197 | ||
192 | switch (cmd) { | 198 | switch (cmd) { |
193 | case UDF_GETVOLIDENT: | 199 | case UDF_GETVOLIDENT: |
194 | return copy_to_user((char __user *)arg, | 200 | if (copy_to_user((char __user *)arg, |
195 | UDF_SB_VOLIDENT(inode->i_sb), 32) ? -EFAULT : 0; | 201 | UDF_SB(inode->i_sb)->s_volume_ident, 32)) |
202 | return -EFAULT; | ||
203 | else | ||
204 | return 0; | ||
196 | case UDF_RELOCATE_BLOCKS: | 205 | case UDF_RELOCATE_BLOCKS: |
197 | if (!capable(CAP_SYS_ADMIN)) | 206 | if (!capable(CAP_SYS_ADMIN)) |
198 | return -EACCES; | 207 | return -EACCES; |
199 | if (get_user(old_block, (long __user *)arg)) | 208 | if (get_user(old_block, (long __user *)arg)) |
200 | return -EFAULT; | 209 | return -EFAULT; |
201 | if ((result = udf_relocate_blocks(inode->i_sb, | 210 | result = udf_relocate_blocks(inode->i_sb, |
202 | old_block, &new_block)) == 0) | 211 | old_block, &new_block); |
212 | if (result == 0) | ||
203 | result = put_user(new_block, (long __user *)arg); | 213 | result = put_user(new_block, (long __user *)arg); |
204 | return result; | 214 | return result; |
205 | case UDF_GETEASIZE: | 215 | case UDF_GETEASIZE: |
206 | result = put_user(UDF_I_LENEATTR(inode), (int __user *)arg); | 216 | result = put_user(UDF_I(inode)->i_lenEAttr, (int __user *)arg); |
207 | break; | 217 | break; |
208 | case UDF_GETEABLOCK: | 218 | case UDF_GETEABLOCK: |
209 | result = copy_to_user((char __user *)arg, UDF_I_DATA(inode), | 219 | result = copy_to_user((char __user *)arg, |
210 | UDF_I_LENEATTR(inode)) ? -EFAULT : 0; | 220 | UDF_I(inode)->i_ext.i_data, |
221 | UDF_I(inode)->i_lenEAttr) ? -EFAULT : 0; | ||
211 | break; | 222 | break; |
212 | } | 223 | } |
213 | 224 | ||
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c index 636d8f613929..84360315aca2 100644 --- a/fs/udf/ialloc.c +++ b/fs/udf/ialloc.c | |||
@@ -43,19 +43,21 @@ void udf_free_inode(struct inode *inode) | |||
43 | clear_inode(inode); | 43 | clear_inode(inode); |
44 | 44 | ||
45 | mutex_lock(&sbi->s_alloc_mutex); | 45 | mutex_lock(&sbi->s_alloc_mutex); |
46 | if (sbi->s_lvidbh) { | 46 | if (sbi->s_lvid_bh) { |
47 | struct logicalVolIntegrityDescImpUse *lvidiu = | ||
48 | udf_sb_lvidiu(sbi); | ||
47 | if (S_ISDIR(inode->i_mode)) | 49 | if (S_ISDIR(inode->i_mode)) |
48 | UDF_SB_LVIDIU(sb)->numDirs = | 50 | lvidiu->numDirs = |
49 | cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs) - 1); | 51 | cpu_to_le32(le32_to_cpu(lvidiu->numDirs) - 1); |
50 | else | 52 | else |
51 | UDF_SB_LVIDIU(sb)->numFiles = | 53 | lvidiu->numFiles = |
52 | cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles) - 1); | 54 | cpu_to_le32(le32_to_cpu(lvidiu->numFiles) - 1); |
53 | 55 | ||
54 | mark_buffer_dirty(sbi->s_lvidbh); | 56 | mark_buffer_dirty(sbi->s_lvid_bh); |
55 | } | 57 | } |
56 | mutex_unlock(&sbi->s_alloc_mutex); | 58 | mutex_unlock(&sbi->s_alloc_mutex); |
57 | 59 | ||
58 | udf_free_blocks(sb, NULL, UDF_I_LOCATION(inode), 0, 1); | 60 | udf_free_blocks(sb, NULL, UDF_I(inode)->i_location, 0, 1); |
59 | } | 61 | } |
60 | 62 | ||
61 | struct inode *udf_new_inode(struct inode *dir, int mode, int *err) | 63 | struct inode *udf_new_inode(struct inode *dir, int mode, int *err) |
@@ -64,7 +66,9 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err) | |||
64 | struct udf_sb_info *sbi = UDF_SB(sb); | 66 | struct udf_sb_info *sbi = UDF_SB(sb); |
65 | struct inode *inode; | 67 | struct inode *inode; |
66 | int block; | 68 | int block; |
67 | uint32_t start = UDF_I_LOCATION(dir).logicalBlockNum; | 69 | uint32_t start = UDF_I(dir)->i_location.logicalBlockNum; |
70 | struct udf_inode_info *iinfo; | ||
71 | struct udf_inode_info *dinfo = UDF_I(dir); | ||
68 | 72 | ||
69 | inode = new_inode(sb); | 73 | inode = new_inode(sb); |
70 | 74 | ||
@@ -74,13 +78,15 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err) | |||
74 | } | 78 | } |
75 | *err = -ENOSPC; | 79 | *err = -ENOSPC; |
76 | 80 | ||
77 | UDF_I_UNIQUE(inode) = 0; | 81 | iinfo = UDF_I(inode); |
78 | UDF_I_LENEXTENTS(inode) = 0; | 82 | iinfo->i_unique = 0; |
79 | UDF_I_NEXT_ALLOC_BLOCK(inode) = 0; | 83 | iinfo->i_lenExtents = 0; |
80 | UDF_I_NEXT_ALLOC_GOAL(inode) = 0; | 84 | iinfo->i_next_alloc_block = 0; |
81 | UDF_I_STRAT4096(inode) = 0; | 85 | iinfo->i_next_alloc_goal = 0; |
86 | iinfo->i_strat4096 = 0; | ||
82 | 87 | ||
83 | block = udf_new_block(dir->i_sb, NULL, UDF_I_LOCATION(dir).partitionReferenceNum, | 88 | block = udf_new_block(dir->i_sb, NULL, |
89 | dinfo->i_location.partitionReferenceNum, | ||
84 | start, err); | 90 | start, err); |
85 | if (*err) { | 91 | if (*err) { |
86 | iput(inode); | 92 | iput(inode); |
@@ -88,21 +94,27 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err) | |||
88 | } | 94 | } |
89 | 95 | ||
90 | mutex_lock(&sbi->s_alloc_mutex); | 96 | mutex_lock(&sbi->s_alloc_mutex); |
91 | if (UDF_SB_LVIDBH(sb)) { | 97 | if (sbi->s_lvid_bh) { |
98 | struct logicalVolIntegrityDesc *lvid = | ||
99 | (struct logicalVolIntegrityDesc *) | ||
100 | sbi->s_lvid_bh->b_data; | ||
101 | struct logicalVolIntegrityDescImpUse *lvidiu = | ||
102 | udf_sb_lvidiu(sbi); | ||
92 | struct logicalVolHeaderDesc *lvhd; | 103 | struct logicalVolHeaderDesc *lvhd; |
93 | uint64_t uniqueID; | 104 | uint64_t uniqueID; |
94 | lvhd = (struct logicalVolHeaderDesc *)(UDF_SB_LVID(sb)->logicalVolContentsUse); | 105 | lvhd = (struct logicalVolHeaderDesc *) |
106 | (lvid->logicalVolContentsUse); | ||
95 | if (S_ISDIR(mode)) | 107 | if (S_ISDIR(mode)) |
96 | UDF_SB_LVIDIU(sb)->numDirs = | 108 | lvidiu->numDirs = |
97 | cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs) + 1); | 109 | cpu_to_le32(le32_to_cpu(lvidiu->numDirs) + 1); |
98 | else | 110 | else |
99 | UDF_SB_LVIDIU(sb)->numFiles = | 111 | lvidiu->numFiles = |
100 | cpu_to_le32(le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles) + 1); | 112 | cpu_to_le32(le32_to_cpu(lvidiu->numFiles) + 1); |
101 | UDF_I_UNIQUE(inode) = uniqueID = le64_to_cpu(lvhd->uniqueID); | 113 | iinfo->i_unique = uniqueID = le64_to_cpu(lvhd->uniqueID); |
102 | if (!(++uniqueID & 0x00000000FFFFFFFFUL)) | 114 | if (!(++uniqueID & 0x00000000FFFFFFFFUL)) |
103 | uniqueID += 16; | 115 | uniqueID += 16; |
104 | lvhd->uniqueID = cpu_to_le64(uniqueID); | 116 | lvhd->uniqueID = cpu_to_le64(uniqueID); |
105 | mark_buffer_dirty(UDF_SB_LVIDBH(sb)); | 117 | mark_buffer_dirty(sbi->s_lvid_bh); |
106 | } | 118 | } |
107 | inode->i_mode = mode; | 119 | inode->i_mode = mode; |
108 | inode->i_uid = current->fsuid; | 120 | inode->i_uid = current->fsuid; |
@@ -114,35 +126,41 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err) | |||
114 | inode->i_gid = current->fsgid; | 126 | inode->i_gid = current->fsgid; |
115 | } | 127 | } |
116 | 128 | ||
117 | UDF_I_LOCATION(inode).logicalBlockNum = block; | 129 | iinfo->i_location.logicalBlockNum = block; |
118 | UDF_I_LOCATION(inode).partitionReferenceNum = UDF_I_LOCATION(dir).partitionReferenceNum; | 130 | iinfo->i_location.partitionReferenceNum = |
119 | inode->i_ino = udf_get_lb_pblock(sb, UDF_I_LOCATION(inode), 0); | 131 | dinfo->i_location.partitionReferenceNum; |
132 | inode->i_ino = udf_get_lb_pblock(sb, iinfo->i_location, 0); | ||
120 | inode->i_blocks = 0; | 133 | inode->i_blocks = 0; |
121 | UDF_I_LENEATTR(inode) = 0; | 134 | iinfo->i_lenEAttr = 0; |
122 | UDF_I_LENALLOC(inode) = 0; | 135 | iinfo->i_lenAlloc = 0; |
123 | UDF_I_USE(inode) = 0; | 136 | iinfo->i_use = 0; |
124 | if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE)) { | 137 | if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE)) { |
125 | UDF_I_EFE(inode) = 1; | 138 | iinfo->i_efe = 1; |
126 | UDF_UPDATE_UDFREV(inode->i_sb, UDF_VERS_USE_EXTENDED_FE); | 139 | if (UDF_VERS_USE_EXTENDED_FE > sbi->s_udfrev) |
127 | UDF_I_DATA(inode) = kzalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL); | 140 | sbi->s_udfrev = UDF_VERS_USE_EXTENDED_FE; |
141 | iinfo->i_ext.i_data = kzalloc(inode->i_sb->s_blocksize - | ||
142 | sizeof(struct extendedFileEntry), | ||
143 | GFP_KERNEL); | ||
128 | } else { | 144 | } else { |
129 | UDF_I_EFE(inode) = 0; | 145 | iinfo->i_efe = 0; |
130 | UDF_I_DATA(inode) = kzalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL); | 146 | iinfo->i_ext.i_data = kzalloc(inode->i_sb->s_blocksize - |
147 | sizeof(struct fileEntry), | ||
148 | GFP_KERNEL); | ||
131 | } | 149 | } |
132 | if (!UDF_I_DATA(inode)) { | 150 | if (!iinfo->i_ext.i_data) { |
133 | iput(inode); | 151 | iput(inode); |
134 | *err = -ENOMEM; | 152 | *err = -ENOMEM; |
135 | mutex_unlock(&sbi->s_alloc_mutex); | 153 | mutex_unlock(&sbi->s_alloc_mutex); |
136 | return NULL; | 154 | return NULL; |
137 | } | 155 | } |
138 | if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_AD_IN_ICB)) | 156 | if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_AD_IN_ICB)) |
139 | UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB; | 157 | iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB; |
140 | else if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) | 158 | else if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) |
141 | UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT; | 159 | iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT; |
142 | else | 160 | else |
143 | UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG; | 161 | iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG; |
144 | inode->i_mtime = inode->i_atime = inode->i_ctime = | 162 | inode->i_mtime = inode->i_atime = inode->i_ctime = |
145 | UDF_I_CRTIME(inode) = current_fs_time(inode->i_sb); | 163 | iinfo->i_crtime = current_fs_time(inode->i_sb); |
146 | insert_inode_hash(inode); | 164 | insert_inode_hash(inode); |
147 | mark_inode_dirty(inode); | 165 | mark_inode_dirty(inode); |
148 | mutex_unlock(&sbi->s_alloc_mutex); | 166 | mutex_unlock(&sbi->s_alloc_mutex); |
diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 6ff8151984cf..24cfa55d0fdc 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c | |||
@@ -19,7 +19,8 @@ | |||
19 | * 10/04/98 dgb Added rudimentary directory functions | 19 | * 10/04/98 dgb Added rudimentary directory functions |
20 | * 10/07/98 Fully working udf_block_map! It works! | 20 | * 10/07/98 Fully working udf_block_map! It works! |
21 | * 11/25/98 bmap altered to better support extents | 21 | * 11/25/98 bmap altered to better support extents |
22 | * 12/06/98 blf partition support in udf_iget, udf_block_map and udf_read_inode | 22 | * 12/06/98 blf partition support in udf_iget, udf_block_map |
23 | * and udf_read_inode | ||
23 | * 12/12/98 rewrote udf_block_map to handle next extents and descs across | 24 | * 12/12/98 rewrote udf_block_map to handle next extents and descs across |
24 | * block boundaries (which is not actually allowed) | 25 | * block boundaries (which is not actually allowed) |
25 | * 12/20/98 added support for strategy 4096 | 26 | * 12/20/98 added support for strategy 4096 |
@@ -51,7 +52,7 @@ static int udf_update_inode(struct inode *, int); | |||
51 | static void udf_fill_inode(struct inode *, struct buffer_head *); | 52 | static void udf_fill_inode(struct inode *, struct buffer_head *); |
52 | static int udf_alloc_i_data(struct inode *inode, size_t size); | 53 | static int udf_alloc_i_data(struct inode *inode, size_t size); |
53 | static struct buffer_head *inode_getblk(struct inode *, sector_t, int *, | 54 | static struct buffer_head *inode_getblk(struct inode *, sector_t, int *, |
54 | long *, int *); | 55 | sector_t *, int *); |
55 | static int8_t udf_insert_aext(struct inode *, struct extent_position, | 56 | static int8_t udf_insert_aext(struct inode *, struct extent_position, |
56 | kernel_lb_addr, uint32_t); | 57 | kernel_lb_addr, uint32_t); |
57 | static void udf_split_extents(struct inode *, int *, int, int, | 58 | static void udf_split_extents(struct inode *, int *, int, int, |
@@ -111,16 +112,18 @@ no_delete: | |||
111 | */ | 112 | */ |
112 | void udf_clear_inode(struct inode *inode) | 113 | void udf_clear_inode(struct inode *inode) |
113 | { | 114 | { |
115 | struct udf_inode_info *iinfo; | ||
114 | if (!(inode->i_sb->s_flags & MS_RDONLY)) { | 116 | if (!(inode->i_sb->s_flags & MS_RDONLY)) { |
115 | lock_kernel(); | 117 | lock_kernel(); |
116 | /* Discard preallocation for directories, symlinks, etc. */ | 118 | /* Discard preallocation for directories, symlinks, etc. */ |
117 | udf_discard_prealloc(inode); | 119 | udf_discard_prealloc(inode); |
118 | udf_truncate_tail_extent(inode); | 120 | udf_truncate_tail_extent(inode); |
119 | unlock_kernel(); | 121 | unlock_kernel(); |
120 | write_inode_now(inode, 1); | 122 | write_inode_now(inode, 0); |
121 | } | 123 | } |
122 | kfree(UDF_I_DATA(inode)); | 124 | iinfo = UDF_I(inode); |
123 | UDF_I_DATA(inode) = NULL; | 125 | kfree(iinfo->i_ext.i_data); |
126 | iinfo->i_ext.i_data = NULL; | ||
124 | } | 127 | } |
125 | 128 | ||
126 | static int udf_writepage(struct page *page, struct writeback_control *wbc) | 129 | static int udf_writepage(struct page *page, struct writeback_control *wbc) |
@@ -160,6 +163,7 @@ void udf_expand_file_adinicb(struct inode *inode, int newsize, int *err) | |||
160 | { | 163 | { |
161 | struct page *page; | 164 | struct page *page; |
162 | char *kaddr; | 165 | char *kaddr; |
166 | struct udf_inode_info *iinfo = UDF_I(inode); | ||
163 | struct writeback_control udf_wbc = { | 167 | struct writeback_control udf_wbc = { |
164 | .sync_mode = WB_SYNC_NONE, | 168 | .sync_mode = WB_SYNC_NONE, |
165 | .nr_to_write = 1, | 169 | .nr_to_write = 1, |
@@ -168,11 +172,11 @@ void udf_expand_file_adinicb(struct inode *inode, int newsize, int *err) | |||
168 | /* from now on we have normal address_space methods */ | 172 | /* from now on we have normal address_space methods */ |
169 | inode->i_data.a_ops = &udf_aops; | 173 | inode->i_data.a_ops = &udf_aops; |
170 | 174 | ||
171 | if (!UDF_I_LENALLOC(inode)) { | 175 | if (!iinfo->i_lenAlloc) { |
172 | if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) | 176 | if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) |
173 | UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT; | 177 | iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT; |
174 | else | 178 | else |
175 | UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG; | 179 | iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG; |
176 | mark_inode_dirty(inode); | 180 | mark_inode_dirty(inode); |
177 | return; | 181 | return; |
178 | } | 182 | } |
@@ -182,21 +186,21 @@ void udf_expand_file_adinicb(struct inode *inode, int newsize, int *err) | |||
182 | 186 | ||
183 | if (!PageUptodate(page)) { | 187 | if (!PageUptodate(page)) { |
184 | kaddr = kmap(page); | 188 | kaddr = kmap(page); |
185 | memset(kaddr + UDF_I_LENALLOC(inode), 0x00, | 189 | memset(kaddr + iinfo->i_lenAlloc, 0x00, |
186 | PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode)); | 190 | PAGE_CACHE_SIZE - iinfo->i_lenAlloc); |
187 | memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), | 191 | memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, |
188 | UDF_I_LENALLOC(inode)); | 192 | iinfo->i_lenAlloc); |
189 | flush_dcache_page(page); | 193 | flush_dcache_page(page); |
190 | SetPageUptodate(page); | 194 | SetPageUptodate(page); |
191 | kunmap(page); | 195 | kunmap(page); |
192 | } | 196 | } |
193 | memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0x00, | 197 | memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0x00, |
194 | UDF_I_LENALLOC(inode)); | 198 | iinfo->i_lenAlloc); |
195 | UDF_I_LENALLOC(inode) = 0; | 199 | iinfo->i_lenAlloc = 0; |
196 | if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) | 200 | if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) |
197 | UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT; | 201 | iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT; |
198 | else | 202 | else |
199 | UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG; | 203 | iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG; |
200 | 204 | ||
201 | inode->i_data.a_ops->writepage(page, &udf_wbc); | 205 | inode->i_data.a_ops->writepage(page, &udf_wbc); |
202 | page_cache_release(page); | 206 | page_cache_release(page); |
@@ -215,9 +219,10 @@ struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block, | |||
215 | struct extent_position epos; | 219 | struct extent_position epos; |
216 | 220 | ||
217 | struct udf_fileident_bh sfibh, dfibh; | 221 | struct udf_fileident_bh sfibh, dfibh; |
218 | loff_t f_pos = udf_ext0_offset(inode) >> 2; | 222 | loff_t f_pos = udf_ext0_offset(inode); |
219 | int size = (udf_ext0_offset(inode) + inode->i_size) >> 2; | 223 | int size = udf_ext0_offset(inode) + inode->i_size; |
220 | struct fileIdentDesc cfi, *sfi, *dfi; | 224 | struct fileIdentDesc cfi, *sfi, *dfi; |
225 | struct udf_inode_info *iinfo = UDF_I(inode); | ||
221 | 226 | ||
222 | if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) | 227 | if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) |
223 | alloctype = ICBTAG_FLAG_AD_SHORT; | 228 | alloctype = ICBTAG_FLAG_AD_SHORT; |
@@ -225,19 +230,20 @@ struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block, | |||
225 | alloctype = ICBTAG_FLAG_AD_LONG; | 230 | alloctype = ICBTAG_FLAG_AD_LONG; |
226 | 231 | ||
227 | if (!inode->i_size) { | 232 | if (!inode->i_size) { |
228 | UDF_I_ALLOCTYPE(inode) = alloctype; | 233 | iinfo->i_alloc_type = alloctype; |
229 | mark_inode_dirty(inode); | 234 | mark_inode_dirty(inode); |
230 | return NULL; | 235 | return NULL; |
231 | } | 236 | } |
232 | 237 | ||
233 | /* alloc block, and copy data to it */ | 238 | /* alloc block, and copy data to it */ |
234 | *block = udf_new_block(inode->i_sb, inode, | 239 | *block = udf_new_block(inode->i_sb, inode, |
235 | UDF_I_LOCATION(inode).partitionReferenceNum, | 240 | iinfo->i_location.partitionReferenceNum, |
236 | UDF_I_LOCATION(inode).logicalBlockNum, err); | 241 | iinfo->i_location.logicalBlockNum, err); |
237 | if (!(*block)) | 242 | if (!(*block)) |
238 | return NULL; | 243 | return NULL; |
239 | newblock = udf_get_pblock(inode->i_sb, *block, | 244 | newblock = udf_get_pblock(inode->i_sb, *block, |
240 | UDF_I_LOCATION(inode).partitionReferenceNum, 0); | 245 | iinfo->i_location.partitionReferenceNum, |
246 | 0); | ||
241 | if (!newblock) | 247 | if (!newblock) |
242 | return NULL; | 248 | return NULL; |
243 | dbh = udf_tgetblk(inode->i_sb, newblock); | 249 | dbh = udf_tgetblk(inode->i_sb, newblock); |
@@ -249,39 +255,44 @@ struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block, | |||
249 | unlock_buffer(dbh); | 255 | unlock_buffer(dbh); |
250 | mark_buffer_dirty_inode(dbh, inode); | 256 | mark_buffer_dirty_inode(dbh, inode); |
251 | 257 | ||
252 | sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2; | 258 | sfibh.soffset = sfibh.eoffset = |
259 | f_pos & (inode->i_sb->s_blocksize - 1); | ||
253 | sfibh.sbh = sfibh.ebh = NULL; | 260 | sfibh.sbh = sfibh.ebh = NULL; |
254 | dfibh.soffset = dfibh.eoffset = 0; | 261 | dfibh.soffset = dfibh.eoffset = 0; |
255 | dfibh.sbh = dfibh.ebh = dbh; | 262 | dfibh.sbh = dfibh.ebh = dbh; |
256 | while ((f_pos < size)) { | 263 | while (f_pos < size) { |
257 | UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB; | 264 | iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB; |
258 | sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL); | 265 | sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, |
266 | NULL, NULL, NULL); | ||
259 | if (!sfi) { | 267 | if (!sfi) { |
260 | brelse(dbh); | 268 | brelse(dbh); |
261 | return NULL; | 269 | return NULL; |
262 | } | 270 | } |
263 | UDF_I_ALLOCTYPE(inode) = alloctype; | 271 | iinfo->i_alloc_type = alloctype; |
264 | sfi->descTag.tagLocation = cpu_to_le32(*block); | 272 | sfi->descTag.tagLocation = cpu_to_le32(*block); |
265 | dfibh.soffset = dfibh.eoffset; | 273 | dfibh.soffset = dfibh.eoffset; |
266 | dfibh.eoffset += (sfibh.eoffset - sfibh.soffset); | 274 | dfibh.eoffset += (sfibh.eoffset - sfibh.soffset); |
267 | dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset); | 275 | dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset); |
268 | if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse, | 276 | if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse, |
269 | sfi->fileIdent + le16_to_cpu(sfi->lengthOfImpUse))) { | 277 | sfi->fileIdent + |
270 | UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB; | 278 | le16_to_cpu(sfi->lengthOfImpUse))) { |
279 | iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB; | ||
271 | brelse(dbh); | 280 | brelse(dbh); |
272 | return NULL; | 281 | return NULL; |
273 | } | 282 | } |
274 | } | 283 | } |
275 | mark_buffer_dirty_inode(dbh, inode); | 284 | mark_buffer_dirty_inode(dbh, inode); |
276 | 285 | ||
277 | memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0, UDF_I_LENALLOC(inode)); | 286 | memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0, |
278 | UDF_I_LENALLOC(inode) = 0; | 287 | iinfo->i_lenAlloc); |
288 | iinfo->i_lenAlloc = 0; | ||
279 | eloc.logicalBlockNum = *block; | 289 | eloc.logicalBlockNum = *block; |
280 | eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum; | 290 | eloc.partitionReferenceNum = |
281 | elen = inode->i_size; | 291 | iinfo->i_location.partitionReferenceNum; |
282 | UDF_I_LENEXTENTS(inode) = elen; | 292 | elen = inode->i_sb->s_blocksize; |
293 | iinfo->i_lenExtents = elen; | ||
283 | epos.bh = NULL; | 294 | epos.bh = NULL; |
284 | epos.block = UDF_I_LOCATION(inode); | 295 | epos.block = iinfo->i_location; |
285 | epos.offset = udf_file_entry_alloc_offset(inode); | 296 | epos.offset = udf_file_entry_alloc_offset(inode); |
286 | udf_add_aext(inode, &epos, eloc, elen, 0); | 297 | udf_add_aext(inode, &epos, eloc, elen, 0); |
287 | /* UniqueID stuff */ | 298 | /* UniqueID stuff */ |
@@ -296,7 +307,8 @@ static int udf_get_block(struct inode *inode, sector_t block, | |||
296 | { | 307 | { |
297 | int err, new; | 308 | int err, new; |
298 | struct buffer_head *bh; | 309 | struct buffer_head *bh; |
299 | unsigned long phys; | 310 | sector_t phys = 0; |
311 | struct udf_inode_info *iinfo; | ||
300 | 312 | ||
301 | if (!create) { | 313 | if (!create) { |
302 | phys = udf_block_map(inode, block); | 314 | phys = udf_block_map(inode, block); |
@@ -314,9 +326,10 @@ static int udf_get_block(struct inode *inode, sector_t block, | |||
314 | if (block < 0) | 326 | if (block < 0) |
315 | goto abort_negative; | 327 | goto abort_negative; |
316 | 328 | ||
317 | if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1) { | 329 | iinfo = UDF_I(inode); |
318 | UDF_I_NEXT_ALLOC_BLOCK(inode)++; | 330 | if (block == iinfo->i_next_alloc_block + 1) { |
319 | UDF_I_NEXT_ALLOC_GOAL(inode)++; | 331 | iinfo->i_next_alloc_block++; |
332 | iinfo->i_next_alloc_goal++; | ||
320 | } | 333 | } |
321 | 334 | ||
322 | err = 0; | 335 | err = 0; |
@@ -366,32 +379,35 @@ static struct buffer_head *udf_getblk(struct inode *inode, long block, | |||
366 | 379 | ||
367 | /* Extend the file by 'blocks' blocks, return the number of extents added */ | 380 | /* Extend the file by 'blocks' blocks, return the number of extents added */ |
368 | int udf_extend_file(struct inode *inode, struct extent_position *last_pos, | 381 | int udf_extend_file(struct inode *inode, struct extent_position *last_pos, |
369 | kernel_long_ad * last_ext, sector_t blocks) | 382 | kernel_long_ad *last_ext, sector_t blocks) |
370 | { | 383 | { |
371 | sector_t add; | 384 | sector_t add; |
372 | int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK); | 385 | int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK); |
373 | struct super_block *sb = inode->i_sb; | 386 | struct super_block *sb = inode->i_sb; |
374 | kernel_lb_addr prealloc_loc = {}; | 387 | kernel_lb_addr prealloc_loc = {}; |
375 | int prealloc_len = 0; | 388 | int prealloc_len = 0; |
389 | struct udf_inode_info *iinfo; | ||
376 | 390 | ||
377 | /* The previous extent is fake and we should not extend by anything | 391 | /* The previous extent is fake and we should not extend by anything |
378 | * - there's nothing to do... */ | 392 | * - there's nothing to do... */ |
379 | if (!blocks && fake) | 393 | if (!blocks && fake) |
380 | return 0; | 394 | return 0; |
381 | 395 | ||
396 | iinfo = UDF_I(inode); | ||
382 | /* Round the last extent up to a multiple of block size */ | 397 | /* Round the last extent up to a multiple of block size */ |
383 | if (last_ext->extLength & (sb->s_blocksize - 1)) { | 398 | if (last_ext->extLength & (sb->s_blocksize - 1)) { |
384 | last_ext->extLength = | 399 | last_ext->extLength = |
385 | (last_ext->extLength & UDF_EXTENT_FLAG_MASK) | | 400 | (last_ext->extLength & UDF_EXTENT_FLAG_MASK) | |
386 | (((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) + | 401 | (((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) + |
387 | sb->s_blocksize - 1) & ~(sb->s_blocksize - 1)); | 402 | sb->s_blocksize - 1) & ~(sb->s_blocksize - 1)); |
388 | UDF_I_LENEXTENTS(inode) = | 403 | iinfo->i_lenExtents = |
389 | (UDF_I_LENEXTENTS(inode) + sb->s_blocksize - 1) & | 404 | (iinfo->i_lenExtents + sb->s_blocksize - 1) & |
390 | ~(sb->s_blocksize - 1); | 405 | ~(sb->s_blocksize - 1); |
391 | } | 406 | } |
392 | 407 | ||
393 | /* Last extent are just preallocated blocks? */ | 408 | /* Last extent are just preallocated blocks? */ |
394 | if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_ALLOCATED) { | 409 | if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == |
410 | EXT_NOT_RECORDED_ALLOCATED) { | ||
395 | /* Save the extent so that we can reattach it to the end */ | 411 | /* Save the extent so that we can reattach it to the end */ |
396 | prealloc_loc = last_ext->extLocation; | 412 | prealloc_loc = last_ext->extLocation; |
397 | prealloc_len = last_ext->extLength; | 413 | prealloc_len = last_ext->extLength; |
@@ -399,13 +415,15 @@ int udf_extend_file(struct inode *inode, struct extent_position *last_pos, | |||
399 | last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | | 415 | last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | |
400 | (last_ext->extLength & UDF_EXTENT_LENGTH_MASK); | 416 | (last_ext->extLength & UDF_EXTENT_LENGTH_MASK); |
401 | last_ext->extLocation.logicalBlockNum = 0; | 417 | last_ext->extLocation.logicalBlockNum = 0; |
402 | last_ext->extLocation.partitionReferenceNum = 0; | 418 | last_ext->extLocation.partitionReferenceNum = 0; |
403 | } | 419 | } |
404 | 420 | ||
405 | /* Can we merge with the previous extent? */ | 421 | /* Can we merge with the previous extent? */ |
406 | if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_NOT_ALLOCATED) { | 422 | if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == |
407 | add = ((1 << 30) - sb->s_blocksize - (last_ext->extLength & | 423 | EXT_NOT_RECORDED_NOT_ALLOCATED) { |
408 | UDF_EXTENT_LENGTH_MASK)) >> sb->s_blocksize_bits; | 424 | add = ((1 << 30) - sb->s_blocksize - |
425 | (last_ext->extLength & UDF_EXTENT_LENGTH_MASK)) >> | ||
426 | sb->s_blocksize_bits; | ||
409 | if (add > blocks) | 427 | if (add > blocks) |
410 | add = blocks; | 428 | add = blocks; |
411 | blocks -= add; | 429 | blocks -= add; |
@@ -416,9 +434,9 @@ int udf_extend_file(struct inode *inode, struct extent_position *last_pos, | |||
416 | udf_add_aext(inode, last_pos, last_ext->extLocation, | 434 | udf_add_aext(inode, last_pos, last_ext->extLocation, |
417 | last_ext->extLength, 1); | 435 | last_ext->extLength, 1); |
418 | count++; | 436 | count++; |
419 | } else { | 437 | } else |
420 | udf_write_aext(inode, last_pos, last_ext->extLocation, last_ext->extLength, 1); | 438 | udf_write_aext(inode, last_pos, last_ext->extLocation, |
421 | } | 439 | last_ext->extLength, 1); |
422 | 440 | ||
423 | /* Managed to do everything necessary? */ | 441 | /* Managed to do everything necessary? */ |
424 | if (!blocks) | 442 | if (!blocks) |
@@ -426,9 +444,10 @@ int udf_extend_file(struct inode *inode, struct extent_position *last_pos, | |||
426 | 444 | ||
427 | /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */ | 445 | /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */ |
428 | last_ext->extLocation.logicalBlockNum = 0; | 446 | last_ext->extLocation.logicalBlockNum = 0; |
429 | last_ext->extLocation.partitionReferenceNum = 0; | 447 | last_ext->extLocation.partitionReferenceNum = 0; |
430 | add = (1 << (30-sb->s_blocksize_bits)) - 1; | 448 | add = (1 << (30-sb->s_blocksize_bits)) - 1; |
431 | last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | (add << sb->s_blocksize_bits); | 449 | last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | |
450 | (add << sb->s_blocksize_bits); | ||
432 | 451 | ||
433 | /* Create enough extents to cover the whole hole */ | 452 | /* Create enough extents to cover the whole hole */ |
434 | while (blocks > add) { | 453 | while (blocks > add) { |
@@ -450,7 +469,8 @@ int udf_extend_file(struct inode *inode, struct extent_position *last_pos, | |||
450 | out: | 469 | out: |
451 | /* Do we have some preallocated blocks saved? */ | 470 | /* Do we have some preallocated blocks saved? */ |
452 | if (prealloc_len) { | 471 | if (prealloc_len) { |
453 | if (udf_add_aext(inode, last_pos, prealloc_loc, prealloc_len, 1) == -1) | 472 | if (udf_add_aext(inode, last_pos, prealloc_loc, |
473 | prealloc_len, 1) == -1) | ||
454 | return -1; | 474 | return -1; |
455 | last_ext->extLocation = prealloc_loc; | 475 | last_ext->extLocation = prealloc_loc; |
456 | last_ext->extLength = prealloc_len; | 476 | last_ext->extLength = prealloc_len; |
@@ -458,9 +478,9 @@ out: | |||
458 | } | 478 | } |
459 | 479 | ||
460 | /* last_pos should point to the last written extent... */ | 480 | /* last_pos should point to the last written extent... */ |
461 | if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT) | 481 | if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) |
462 | last_pos->offset -= sizeof(short_ad); | 482 | last_pos->offset -= sizeof(short_ad); |
463 | else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG) | 483 | else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) |
464 | last_pos->offset -= sizeof(long_ad); | 484 | last_pos->offset -= sizeof(long_ad); |
465 | else | 485 | else |
466 | return -1; | 486 | return -1; |
@@ -469,7 +489,7 @@ out: | |||
469 | } | 489 | } |
470 | 490 | ||
471 | static struct buffer_head *inode_getblk(struct inode *inode, sector_t block, | 491 | static struct buffer_head *inode_getblk(struct inode *inode, sector_t block, |
472 | int *err, long *phys, int *new) | 492 | int *err, sector_t *phys, int *new) |
473 | { | 493 | { |
474 | static sector_t last_block; | 494 | static sector_t last_block; |
475 | struct buffer_head *result = NULL; | 495 | struct buffer_head *result = NULL; |
@@ -483,11 +503,12 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block, | |||
483 | uint32_t newblocknum, newblock; | 503 | uint32_t newblocknum, newblock; |
484 | sector_t offset = 0; | 504 | sector_t offset = 0; |
485 | int8_t etype; | 505 | int8_t etype; |
486 | int goal = 0, pgoal = UDF_I_LOCATION(inode).logicalBlockNum; | 506 | struct udf_inode_info *iinfo = UDF_I(inode); |
507 | int goal = 0, pgoal = iinfo->i_location.logicalBlockNum; | ||
487 | int lastblock = 0; | 508 | int lastblock = 0; |
488 | 509 | ||
489 | prev_epos.offset = udf_file_entry_alloc_offset(inode); | 510 | prev_epos.offset = udf_file_entry_alloc_offset(inode); |
490 | prev_epos.block = UDF_I_LOCATION(inode); | 511 | prev_epos.block = iinfo->i_location; |
491 | prev_epos.bh = NULL; | 512 | prev_epos.bh = NULL; |
492 | cur_epos = next_epos = prev_epos; | 513 | cur_epos = next_epos = prev_epos; |
493 | b_off = (loff_t)block << inode->i_sb->s_blocksize_bits; | 514 | b_off = (loff_t)block << inode->i_sb->s_blocksize_bits; |
@@ -515,7 +536,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block, | |||
515 | prev_epos.offset = cur_epos.offset; | 536 | prev_epos.offset = cur_epos.offset; |
516 | cur_epos.offset = next_epos.offset; | 537 | cur_epos.offset = next_epos.offset; |
517 | 538 | ||
518 | if ((etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1)) == -1) | 539 | etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1); |
540 | if (etype == -1) | ||
519 | break; | 541 | break; |
520 | 542 | ||
521 | c = !c; | 543 | c = !c; |
@@ -569,9 +591,11 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block, | |||
569 | startnum = 1; | 591 | startnum = 1; |
570 | } else { | 592 | } else { |
571 | /* Create a fake extent when there's not one */ | 593 | /* Create a fake extent when there's not one */ |
572 | memset(&laarr[0].extLocation, 0x00, sizeof(kernel_lb_addr)); | 594 | memset(&laarr[0].extLocation, 0x00, |
595 | sizeof(kernel_lb_addr)); | ||
573 | laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED; | 596 | laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED; |
574 | /* Will udf_extend_file() create real extent from a fake one? */ | 597 | /* Will udf_extend_file() create real extent from |
598 | a fake one? */ | ||
575 | startnum = (offset > 0); | 599 | startnum = (offset > 0); |
576 | } | 600 | } |
577 | /* Create extents for the hole between EOF and offset */ | 601 | /* Create extents for the hole between EOF and offset */ |
@@ -589,14 +613,16 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block, | |||
589 | offset = 0; | 613 | offset = 0; |
590 | count += ret; | 614 | count += ret; |
591 | /* We are not covered by a preallocated extent? */ | 615 | /* We are not covered by a preallocated extent? */ |
592 | if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) != EXT_NOT_RECORDED_ALLOCATED) { | 616 | if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) != |
617 | EXT_NOT_RECORDED_ALLOCATED) { | ||
593 | /* Is there any real extent? - otherwise we overwrite | 618 | /* Is there any real extent? - otherwise we overwrite |
594 | * the fake one... */ | 619 | * the fake one... */ |
595 | if (count) | 620 | if (count) |
596 | c = !c; | 621 | c = !c; |
597 | laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | | 622 | laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | |
598 | inode->i_sb->s_blocksize; | 623 | inode->i_sb->s_blocksize; |
599 | memset(&laarr[c].extLocation, 0x00, sizeof(kernel_lb_addr)); | 624 | memset(&laarr[c].extLocation, 0x00, |
625 | sizeof(kernel_lb_addr)); | ||
600 | count++; | 626 | count++; |
601 | endnum++; | 627 | endnum++; |
602 | } | 628 | } |
@@ -605,7 +631,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block, | |||
605 | } else { | 631 | } else { |
606 | endnum = startnum = ((count > 2) ? 2 : count); | 632 | endnum = startnum = ((count > 2) ? 2 : count); |
607 | 633 | ||
608 | /* if the current extent is in position 0, swap it with the previous */ | 634 | /* if the current extent is in position 0, |
635 | swap it with the previous */ | ||
609 | if (!c && count != 1) { | 636 | if (!c && count != 1) { |
610 | laarr[2] = laarr[0]; | 637 | laarr[2] = laarr[0]; |
611 | laarr[0] = laarr[1]; | 638 | laarr[0] = laarr[1]; |
@@ -613,44 +640,47 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block, | |||
613 | c = 1; | 640 | c = 1; |
614 | } | 641 | } |
615 | 642 | ||
616 | /* if the current block is located in an extent, read the next extent */ | 643 | /* if the current block is located in an extent, |
617 | if ((etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0)) != -1) { | 644 | read the next extent */ |
645 | etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0); | ||
646 | if (etype != -1) { | ||
618 | laarr[c + 1].extLength = (etype << 30) | elen; | 647 | laarr[c + 1].extLength = (etype << 30) | elen; |
619 | laarr[c + 1].extLocation = eloc; | 648 | laarr[c + 1].extLocation = eloc; |
620 | count++; | 649 | count++; |
621 | startnum++; | 650 | startnum++; |
622 | endnum++; | 651 | endnum++; |
623 | } else { | 652 | } else |
624 | lastblock = 1; | 653 | lastblock = 1; |
625 | } | ||
626 | } | 654 | } |
627 | 655 | ||
628 | /* if the current extent is not recorded but allocated, get the | 656 | /* if the current extent is not recorded but allocated, get the |
629 | * block in the extent corresponding to the requested block */ | 657 | * block in the extent corresponding to the requested block */ |
630 | if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { | 658 | if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) |
631 | newblocknum = laarr[c].extLocation.logicalBlockNum + offset; | 659 | newblocknum = laarr[c].extLocation.logicalBlockNum + offset; |
632 | } else { /* otherwise, allocate a new block */ | 660 | else { /* otherwise, allocate a new block */ |
633 | if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block) | 661 | if (iinfo->i_next_alloc_block == block) |
634 | goal = UDF_I_NEXT_ALLOC_GOAL(inode); | 662 | goal = iinfo->i_next_alloc_goal; |
635 | 663 | ||
636 | if (!goal) { | 664 | if (!goal) { |
637 | if (!(goal = pgoal)) | 665 | if (!(goal = pgoal)) /* XXX: what was intended here? */ |
638 | goal = UDF_I_LOCATION(inode).logicalBlockNum + 1; | 666 | goal = iinfo->i_location.logicalBlockNum + 1; |
639 | } | 667 | } |
640 | 668 | ||
641 | if (!(newblocknum = udf_new_block(inode->i_sb, inode, | 669 | newblocknum = udf_new_block(inode->i_sb, inode, |
642 | UDF_I_LOCATION(inode).partitionReferenceNum, | 670 | iinfo->i_location.partitionReferenceNum, |
643 | goal, err))) { | 671 | goal, err); |
672 | if (!newblocknum) { | ||
644 | brelse(prev_epos.bh); | 673 | brelse(prev_epos.bh); |
645 | *err = -ENOSPC; | 674 | *err = -ENOSPC; |
646 | return NULL; | 675 | return NULL; |
647 | } | 676 | } |
648 | UDF_I_LENEXTENTS(inode) += inode->i_sb->s_blocksize; | 677 | iinfo->i_lenExtents += inode->i_sb->s_blocksize; |
649 | } | 678 | } |
650 | 679 | ||
651 | /* if the extent the requsted block is located in contains multiple blocks, | 680 | /* if the extent the requsted block is located in contains multiple |
652 | * split the extent into at most three extents. blocks prior to requested | 681 | * blocks, split the extent into at most three extents. blocks prior |
653 | * block, requested block, and blocks after requested block */ | 682 | * to requested block, requested block, and blocks after requested |
683 | * block */ | ||
654 | udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum); | 684 | udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum); |
655 | 685 | ||
656 | #ifdef UDF_PREALLOCATE | 686 | #ifdef UDF_PREALLOCATE |
@@ -668,15 +698,15 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block, | |||
668 | 698 | ||
669 | brelse(prev_epos.bh); | 699 | brelse(prev_epos.bh); |
670 | 700 | ||
671 | if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum, | 701 | newblock = udf_get_pblock(inode->i_sb, newblocknum, |
672 | UDF_I_LOCATION(inode).partitionReferenceNum, 0))) { | 702 | iinfo->i_location.partitionReferenceNum, 0); |
703 | if (!newblock) | ||
673 | return NULL; | 704 | return NULL; |
674 | } | ||
675 | *phys = newblock; | 705 | *phys = newblock; |
676 | *err = 0; | 706 | *err = 0; |
677 | *new = 1; | 707 | *new = 1; |
678 | UDF_I_NEXT_ALLOC_BLOCK(inode) = block; | 708 | iinfo->i_next_alloc_block = block; |
679 | UDF_I_NEXT_ALLOC_GOAL(inode) = newblocknum; | 709 | iinfo->i_next_alloc_goal = newblocknum; |
680 | inode->i_ctime = current_fs_time(inode->i_sb); | 710 | inode->i_ctime = current_fs_time(inode->i_sb); |
681 | 711 | ||
682 | if (IS_SYNC(inode)) | 712 | if (IS_SYNC(inode)) |
@@ -692,16 +722,20 @@ static void udf_split_extents(struct inode *inode, int *c, int offset, | |||
692 | kernel_long_ad laarr[EXTENT_MERGE_SIZE], | 722 | kernel_long_ad laarr[EXTENT_MERGE_SIZE], |
693 | int *endnum) | 723 | int *endnum) |
694 | { | 724 | { |
725 | unsigned long blocksize = inode->i_sb->s_blocksize; | ||
726 | unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; | ||
727 | |||
695 | if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) || | 728 | if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) || |
696 | (laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) { | 729 | (laarr[*c].extLength >> 30) == |
730 | (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) { | ||
697 | int curr = *c; | 731 | int curr = *c; |
698 | int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) + | 732 | int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) + |
699 | inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits; | 733 | blocksize - 1) >> blocksize_bits; |
700 | int8_t etype = (laarr[curr].extLength >> 30); | 734 | int8_t etype = (laarr[curr].extLength >> 30); |
701 | 735 | ||
702 | if (blen == 1) { | 736 | if (blen == 1) |
703 | ; | 737 | ; |
704 | } else if (!offset || blen == offset + 1) { | 738 | else if (!offset || blen == offset + 1) { |
705 | laarr[curr + 2] = laarr[curr + 1]; | 739 | laarr[curr + 2] = laarr[curr + 1]; |
706 | laarr[curr + 1] = laarr[curr]; | 740 | laarr[curr + 1] = laarr[curr]; |
707 | } else { | 741 | } else { |
@@ -711,15 +745,18 @@ static void udf_split_extents(struct inode *inode, int *c, int offset, | |||
711 | 745 | ||
712 | if (offset) { | 746 | if (offset) { |
713 | if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { | 747 | if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { |
714 | udf_free_blocks(inode->i_sb, inode, laarr[curr].extLocation, 0, offset); | 748 | udf_free_blocks(inode->i_sb, inode, |
715 | laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | | 749 | laarr[curr].extLocation, |
716 | (offset << inode->i_sb->s_blocksize_bits); | 750 | 0, offset); |
751 | laarr[curr].extLength = | ||
752 | EXT_NOT_RECORDED_NOT_ALLOCATED | | ||
753 | (offset << blocksize_bits); | ||
717 | laarr[curr].extLocation.logicalBlockNum = 0; | 754 | laarr[curr].extLocation.logicalBlockNum = 0; |
718 | laarr[curr].extLocation.partitionReferenceNum = 0; | 755 | laarr[curr].extLocation. |
719 | } else { | 756 | partitionReferenceNum = 0; |
757 | } else | ||
720 | laarr[curr].extLength = (etype << 30) | | 758 | laarr[curr].extLength = (etype << 30) | |
721 | (offset << inode->i_sb->s_blocksize_bits); | 759 | (offset << blocksize_bits); |
722 | } | ||
723 | curr++; | 760 | curr++; |
724 | (*c)++; | 761 | (*c)++; |
725 | (*endnum)++; | 762 | (*endnum)++; |
@@ -728,16 +765,17 @@ static void udf_split_extents(struct inode *inode, int *c, int offset, | |||
728 | laarr[curr].extLocation.logicalBlockNum = newblocknum; | 765 | laarr[curr].extLocation.logicalBlockNum = newblocknum; |
729 | if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) | 766 | if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) |
730 | laarr[curr].extLocation.partitionReferenceNum = | 767 | laarr[curr].extLocation.partitionReferenceNum = |
731 | UDF_I_LOCATION(inode).partitionReferenceNum; | 768 | UDF_I(inode)->i_location.partitionReferenceNum; |
732 | laarr[curr].extLength = EXT_RECORDED_ALLOCATED | | 769 | laarr[curr].extLength = EXT_RECORDED_ALLOCATED | |
733 | inode->i_sb->s_blocksize; | 770 | blocksize; |
734 | curr++; | 771 | curr++; |
735 | 772 | ||
736 | if (blen != offset + 1) { | 773 | if (blen != offset + 1) { |
737 | if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) | 774 | if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) |
738 | laarr[curr].extLocation.logicalBlockNum += (offset + 1); | 775 | laarr[curr].extLocation.logicalBlockNum += |
776 | offset + 1; | ||
739 | laarr[curr].extLength = (etype << 30) | | 777 | laarr[curr].extLength = (etype << 30) | |
740 | ((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits); | 778 | ((blen - (offset + 1)) << blocksize_bits); |
741 | curr++; | 779 | curr++; |
742 | (*endnum)++; | 780 | (*endnum)++; |
743 | } | 781 | } |
@@ -756,69 +794,86 @@ static void udf_prealloc_extents(struct inode *inode, int c, int lastblock, | |||
756 | else | 794 | else |
757 | start = c; | 795 | start = c; |
758 | } else { | 796 | } else { |
759 | if ((laarr[c + 1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { | 797 | if ((laarr[c + 1].extLength >> 30) == |
798 | (EXT_NOT_RECORDED_ALLOCATED >> 30)) { | ||
760 | start = c + 1; | 799 | start = c + 1; |
761 | length = currlength = (((laarr[c + 1].extLength & UDF_EXTENT_LENGTH_MASK) + | 800 | length = currlength = |
762 | inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); | 801 | (((laarr[c + 1].extLength & |
763 | } else { | 802 | UDF_EXTENT_LENGTH_MASK) + |
803 | inode->i_sb->s_blocksize - 1) >> | ||
804 | inode->i_sb->s_blocksize_bits); | ||
805 | } else | ||
764 | start = c; | 806 | start = c; |
765 | } | ||
766 | } | 807 | } |
767 | 808 | ||
768 | for (i = start + 1; i <= *endnum; i++) { | 809 | for (i = start + 1; i <= *endnum; i++) { |
769 | if (i == *endnum) { | 810 | if (i == *endnum) { |
770 | if (lastblock) | 811 | if (lastblock) |
771 | length += UDF_DEFAULT_PREALLOC_BLOCKS; | 812 | length += UDF_DEFAULT_PREALLOC_BLOCKS; |
772 | } else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) { | 813 | } else if ((laarr[i].extLength >> 30) == |
773 | length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + | 814 | (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) { |
774 | inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); | 815 | length += (((laarr[i].extLength & |
775 | } else { | 816 | UDF_EXTENT_LENGTH_MASK) + |
817 | inode->i_sb->s_blocksize - 1) >> | ||
818 | inode->i_sb->s_blocksize_bits); | ||
819 | } else | ||
776 | break; | 820 | break; |
777 | } | ||
778 | } | 821 | } |
779 | 822 | ||
780 | if (length) { | 823 | if (length) { |
781 | int next = laarr[start].extLocation.logicalBlockNum + | 824 | int next = laarr[start].extLocation.logicalBlockNum + |
782 | (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) + | 825 | (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) + |
783 | inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); | 826 | inode->i_sb->s_blocksize - 1) >> |
827 | inode->i_sb->s_blocksize_bits); | ||
784 | int numalloc = udf_prealloc_blocks(inode->i_sb, inode, | 828 | int numalloc = udf_prealloc_blocks(inode->i_sb, inode, |
785 | laarr[start].extLocation.partitionReferenceNum, | 829 | laarr[start].extLocation.partitionReferenceNum, |
786 | next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length : | 830 | next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? |
787 | UDF_DEFAULT_PREALLOC_BLOCKS) - currlength); | 831 | length : UDF_DEFAULT_PREALLOC_BLOCKS) - |
832 | currlength); | ||
788 | if (numalloc) { | 833 | if (numalloc) { |
789 | if (start == (c + 1)) { | 834 | if (start == (c + 1)) |
790 | laarr[start].extLength += | 835 | laarr[start].extLength += |
791 | (numalloc << inode->i_sb->s_blocksize_bits); | 836 | (numalloc << |
792 | } else { | 837 | inode->i_sb->s_blocksize_bits); |
838 | else { | ||
793 | memmove(&laarr[c + 2], &laarr[c + 1], | 839 | memmove(&laarr[c + 2], &laarr[c + 1], |
794 | sizeof(long_ad) * (*endnum - (c + 1))); | 840 | sizeof(long_ad) * (*endnum - (c + 1))); |
795 | (*endnum)++; | 841 | (*endnum)++; |
796 | laarr[c + 1].extLocation.logicalBlockNum = next; | 842 | laarr[c + 1].extLocation.logicalBlockNum = next; |
797 | laarr[c + 1].extLocation.partitionReferenceNum = | 843 | laarr[c + 1].extLocation.partitionReferenceNum = |
798 | laarr[c].extLocation.partitionReferenceNum; | 844 | laarr[c].extLocation. |
799 | laarr[c + 1].extLength = EXT_NOT_RECORDED_ALLOCATED | | 845 | partitionReferenceNum; |
800 | (numalloc << inode->i_sb->s_blocksize_bits); | 846 | laarr[c + 1].extLength = |
847 | EXT_NOT_RECORDED_ALLOCATED | | ||
848 | (numalloc << | ||
849 | inode->i_sb->s_blocksize_bits); | ||
801 | start = c + 1; | 850 | start = c + 1; |
802 | } | 851 | } |
803 | 852 | ||
804 | for (i = start + 1; numalloc && i < *endnum; i++) { | 853 | for (i = start + 1; numalloc && i < *endnum; i++) { |
805 | int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + | 854 | int elen = ((laarr[i].extLength & |
806 | inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits; | 855 | UDF_EXTENT_LENGTH_MASK) + |
856 | inode->i_sb->s_blocksize - 1) >> | ||
857 | inode->i_sb->s_blocksize_bits; | ||
807 | 858 | ||
808 | if (elen > numalloc) { | 859 | if (elen > numalloc) { |
809 | laarr[i].extLength -= | 860 | laarr[i].extLength -= |
810 | (numalloc << inode->i_sb->s_blocksize_bits); | 861 | (numalloc << |
862 | inode->i_sb->s_blocksize_bits); | ||
811 | numalloc = 0; | 863 | numalloc = 0; |
812 | } else { | 864 | } else { |
813 | numalloc -= elen; | 865 | numalloc -= elen; |
814 | if (*endnum > (i + 1)) | 866 | if (*endnum > (i + 1)) |
815 | memmove(&laarr[i], &laarr[i + 1], | 867 | memmove(&laarr[i], |
816 | sizeof(long_ad) * (*endnum - (i + 1))); | 868 | &laarr[i + 1], |
869 | sizeof(long_ad) * | ||
870 | (*endnum - (i + 1))); | ||
817 | i--; | 871 | i--; |
818 | (*endnum)--; | 872 | (*endnum)--; |
819 | } | 873 | } |
820 | } | 874 | } |
821 | UDF_I_LENEXTENTS(inode) += numalloc << inode->i_sb->s_blocksize_bits; | 875 | UDF_I(inode)->i_lenExtents += |
876 | numalloc << inode->i_sb->s_blocksize_bits; | ||
822 | } | 877 | } |
823 | } | 878 | } |
824 | } | 879 | } |
@@ -828,70 +883,97 @@ static void udf_merge_extents(struct inode *inode, | |||
828 | int *endnum) | 883 | int *endnum) |
829 | { | 884 | { |
830 | int i; | 885 | int i; |
886 | unsigned long blocksize = inode->i_sb->s_blocksize; | ||
887 | unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; | ||
831 | 888 | ||
832 | for (i = 0; i < (*endnum - 1); i++) { | 889 | for (i = 0; i < (*endnum - 1); i++) { |
833 | if ((laarr[i].extLength >> 30) == (laarr[i + 1].extLength >> 30)) { | 890 | kernel_long_ad *li /*l[i]*/ = &laarr[i]; |
834 | if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) || | 891 | kernel_long_ad *lip1 /*l[i plus 1]*/ = &laarr[i + 1]; |
835 | ((laarr[i + 1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) == | 892 | |
836 | (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + | 893 | if (((li->extLength >> 30) == (lip1->extLength >> 30)) && |
837 | inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits))) { | 894 | (((li->extLength >> 30) == |
838 | if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + | 895 | (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) || |
839 | (laarr[i + 1].extLength & UDF_EXTENT_LENGTH_MASK) + | 896 | ((lip1->extLocation.logicalBlockNum - |
840 | inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) { | 897 | li->extLocation.logicalBlockNum) == |
841 | laarr[i + 1].extLength = (laarr[i + 1].extLength - | 898 | (((li->extLength & UDF_EXTENT_LENGTH_MASK) + |
842 | (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + | 899 | blocksize - 1) >> blocksize_bits)))) { |
843 | UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize - 1); | 900 | |
844 | laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) + | 901 | if (((li->extLength & UDF_EXTENT_LENGTH_MASK) + |
845 | (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize; | 902 | (lip1->extLength & UDF_EXTENT_LENGTH_MASK) + |
846 | laarr[i + 1].extLocation.logicalBlockNum = | 903 | blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) { |
847 | laarr[i].extLocation.logicalBlockNum + | 904 | lip1->extLength = (lip1->extLength - |
848 | ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >> | 905 | (li->extLength & |
849 | inode->i_sb->s_blocksize_bits); | 906 | UDF_EXTENT_LENGTH_MASK) + |
850 | } else { | 907 | UDF_EXTENT_LENGTH_MASK) & |
851 | laarr[i].extLength = laarr[i + 1].extLength + | 908 | ~(blocksize - 1); |
852 | (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + | 909 | li->extLength = (li->extLength & |
853 | inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize - 1)); | 910 | UDF_EXTENT_FLAG_MASK) + |
854 | if (*endnum > (i + 2)) | 911 | (UDF_EXTENT_LENGTH_MASK + 1) - |
855 | memmove(&laarr[i + 1], &laarr[i + 2], | 912 | blocksize; |
856 | sizeof(long_ad) * (*endnum - (i + 2))); | 913 | lip1->extLocation.logicalBlockNum = |
857 | i--; | 914 | li->extLocation.logicalBlockNum + |
858 | (*endnum)--; | 915 | ((li->extLength & |
859 | } | 916 | UDF_EXTENT_LENGTH_MASK) >> |
917 | blocksize_bits); | ||
918 | } else { | ||
919 | li->extLength = lip1->extLength + | ||
920 | (((li->extLength & | ||
921 | UDF_EXTENT_LENGTH_MASK) + | ||
922 | blocksize - 1) & ~(blocksize - 1)); | ||
923 | if (*endnum > (i + 2)) | ||
924 | memmove(&laarr[i + 1], &laarr[i + 2], | ||
925 | sizeof(long_ad) * | ||
926 | (*endnum - (i + 2))); | ||
927 | i--; | ||
928 | (*endnum)--; | ||
860 | } | 929 | } |
861 | } else if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) && | 930 | } else if (((li->extLength >> 30) == |
862 | ((laarr[i + 1].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))) { | 931 | (EXT_NOT_RECORDED_ALLOCATED >> 30)) && |
863 | udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0, | 932 | ((lip1->extLength >> 30) == |
864 | ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + | 933 | (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))) { |
865 | inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); | 934 | udf_free_blocks(inode->i_sb, inode, li->extLocation, 0, |
866 | laarr[i].extLocation.logicalBlockNum = 0; | 935 | ((li->extLength & |
867 | laarr[i].extLocation.partitionReferenceNum = 0; | 936 | UDF_EXTENT_LENGTH_MASK) + |
868 | 937 | blocksize - 1) >> blocksize_bits); | |
869 | if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + | 938 | li->extLocation.logicalBlockNum = 0; |
870 | (laarr[i + 1].extLength & UDF_EXTENT_LENGTH_MASK) + | 939 | li->extLocation.partitionReferenceNum = 0; |
871 | inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) { | 940 | |
872 | laarr[i + 1].extLength = (laarr[i + 1].extLength - | 941 | if (((li->extLength & UDF_EXTENT_LENGTH_MASK) + |
873 | (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + | 942 | (lip1->extLength & UDF_EXTENT_LENGTH_MASK) + |
874 | UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize - 1); | 943 | blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) { |
875 | laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) + | 944 | lip1->extLength = (lip1->extLength - |
876 | (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize; | 945 | (li->extLength & |
946 | UDF_EXTENT_LENGTH_MASK) + | ||
947 | UDF_EXTENT_LENGTH_MASK) & | ||
948 | ~(blocksize - 1); | ||
949 | li->extLength = (li->extLength & | ||
950 | UDF_EXTENT_FLAG_MASK) + | ||
951 | (UDF_EXTENT_LENGTH_MASK + 1) - | ||
952 | blocksize; | ||
877 | } else { | 953 | } else { |
878 | laarr[i].extLength = laarr[i + 1].extLength + | 954 | li->extLength = lip1->extLength + |
879 | (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + | 955 | (((li->extLength & |
880 | inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize - 1)); | 956 | UDF_EXTENT_LENGTH_MASK) + |
957 | blocksize - 1) & ~(blocksize - 1)); | ||
881 | if (*endnum > (i + 2)) | 958 | if (*endnum > (i + 2)) |
882 | memmove(&laarr[i + 1], &laarr[i + 2], | 959 | memmove(&laarr[i + 1], &laarr[i + 2], |
883 | sizeof(long_ad) * (*endnum - (i + 2))); | 960 | sizeof(long_ad) * |
961 | (*endnum - (i + 2))); | ||
884 | i--; | 962 | i--; |
885 | (*endnum)--; | 963 | (*endnum)--; |
886 | } | 964 | } |
887 | } else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { | 965 | } else if ((li->extLength >> 30) == |
888 | udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0, | 966 | (EXT_NOT_RECORDED_ALLOCATED >> 30)) { |
889 | ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + | 967 | udf_free_blocks(inode->i_sb, inode, |
890 | inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); | 968 | li->extLocation, 0, |
891 | laarr[i].extLocation.logicalBlockNum = 0; | 969 | ((li->extLength & |
892 | laarr[i].extLocation.partitionReferenceNum = 0; | 970 | UDF_EXTENT_LENGTH_MASK) + |
893 | laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) | | 971 | blocksize - 1) >> blocksize_bits); |
894 | EXT_NOT_RECORDED_NOT_ALLOCATED; | 972 | li->extLocation.logicalBlockNum = 0; |
973 | li->extLocation.partitionReferenceNum = 0; | ||
974 | li->extLength = (li->extLength & | ||
975 | UDF_EXTENT_LENGTH_MASK) | | ||
976 | EXT_NOT_RECORDED_NOT_ALLOCATED; | ||
895 | } | 977 | } |
896 | } | 978 | } |
897 | } | 979 | } |
@@ -953,6 +1035,7 @@ void udf_truncate(struct inode *inode) | |||
953 | { | 1035 | { |
954 | int offset; | 1036 | int offset; |
955 | int err; | 1037 | int err; |
1038 | struct udf_inode_info *iinfo; | ||
956 | 1039 | ||
957 | if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || | 1040 | if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || |
958 | S_ISLNK(inode->i_mode))) | 1041 | S_ISLNK(inode->i_mode))) |
@@ -961,25 +1044,28 @@ void udf_truncate(struct inode *inode) | |||
961 | return; | 1044 | return; |
962 | 1045 | ||
963 | lock_kernel(); | 1046 | lock_kernel(); |
964 | if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) { | 1047 | iinfo = UDF_I(inode); |
965 | if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) + | 1048 | if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { |
966 | inode->i_size)) { | 1049 | if (inode->i_sb->s_blocksize < |
1050 | (udf_file_entry_alloc_offset(inode) + | ||
1051 | inode->i_size)) { | ||
967 | udf_expand_file_adinicb(inode, inode->i_size, &err); | 1052 | udf_expand_file_adinicb(inode, inode->i_size, &err); |
968 | if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) { | 1053 | if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { |
969 | inode->i_size = UDF_I_LENALLOC(inode); | 1054 | inode->i_size = iinfo->i_lenAlloc; |
970 | unlock_kernel(); | 1055 | unlock_kernel(); |
971 | return; | 1056 | return; |
972 | } else { | 1057 | } else |
973 | udf_truncate_extents(inode); | 1058 | udf_truncate_extents(inode); |
974 | } | ||
975 | } else { | 1059 | } else { |
976 | offset = inode->i_size & (inode->i_sb->s_blocksize - 1); | 1060 | offset = inode->i_size & (inode->i_sb->s_blocksize - 1); |
977 | memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 0x00, | 1061 | memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr + offset, |
978 | inode->i_sb->s_blocksize - offset - udf_file_entry_alloc_offset(inode)); | 1062 | 0x00, inode->i_sb->s_blocksize - |
979 | UDF_I_LENALLOC(inode) = inode->i_size; | 1063 | offset - udf_file_entry_alloc_offset(inode)); |
1064 | iinfo->i_lenAlloc = inode->i_size; | ||
980 | } | 1065 | } |
981 | } else { | 1066 | } else { |
982 | block_truncate_page(inode->i_mapping, inode->i_size, udf_get_block); | 1067 | block_truncate_page(inode->i_mapping, inode->i_size, |
1068 | udf_get_block); | ||
983 | udf_truncate_extents(inode); | 1069 | udf_truncate_extents(inode); |
984 | } | 1070 | } |
985 | 1071 | ||
@@ -996,6 +1082,7 @@ static void __udf_read_inode(struct inode *inode) | |||
996 | struct buffer_head *bh = NULL; | 1082 | struct buffer_head *bh = NULL; |
997 | struct fileEntry *fe; | 1083 | struct fileEntry *fe; |
998 | uint16_t ident; | 1084 | uint16_t ident; |
1085 | struct udf_inode_info *iinfo = UDF_I(inode); | ||
999 | 1086 | ||
1000 | /* | 1087 | /* |
1001 | * Set defaults, but the inode is still incomplete! | 1088 | * Set defaults, but the inode is still incomplete! |
@@ -1009,7 +1096,7 @@ static void __udf_read_inode(struct inode *inode) | |||
1009 | * i_nlink = 1 | 1096 | * i_nlink = 1 |
1010 | * i_op = NULL; | 1097 | * i_op = NULL; |
1011 | */ | 1098 | */ |
1012 | bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident); | 1099 | bh = udf_read_ptagged(inode->i_sb, iinfo->i_location, 0, &ident); |
1013 | if (!bh) { | 1100 | if (!bh) { |
1014 | printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n", | 1101 | printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n", |
1015 | inode->i_ino); | 1102 | inode->i_ino); |
@@ -1019,8 +1106,8 @@ static void __udf_read_inode(struct inode *inode) | |||
1019 | 1106 | ||
1020 | if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE && | 1107 | if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE && |
1021 | ident != TAG_IDENT_USE) { | 1108 | ident != TAG_IDENT_USE) { |
1022 | printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed ident=%d\n", | 1109 | printk(KERN_ERR "udf: udf_read_inode(ino %ld) " |
1023 | inode->i_ino, ident); | 1110 | "failed ident=%d\n", inode->i_ino, ident); |
1024 | brelse(bh); | 1111 | brelse(bh); |
1025 | make_bad_inode(inode); | 1112 | make_bad_inode(inode); |
1026 | return; | 1113 | return; |
@@ -1028,11 +1115,12 @@ static void __udf_read_inode(struct inode *inode) | |||
1028 | 1115 | ||
1029 | fe = (struct fileEntry *)bh->b_data; | 1116 | fe = (struct fileEntry *)bh->b_data; |
1030 | 1117 | ||
1031 | if (le16_to_cpu(fe->icbTag.strategyType) == 4096) { | 1118 | if (fe->icbTag.strategyType == cpu_to_le16(4096)) { |
1032 | struct buffer_head *ibh = NULL, *nbh = NULL; | 1119 | struct buffer_head *ibh = NULL, *nbh = NULL; |
1033 | struct indirectEntry *ie; | 1120 | struct indirectEntry *ie; |
1034 | 1121 | ||
1035 | ibh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1, &ident); | 1122 | ibh = udf_read_ptagged(inode->i_sb, iinfo->i_location, 1, |
1123 | &ident); | ||
1036 | if (ident == TAG_IDENT_IE) { | 1124 | if (ident == TAG_IDENT_IE) { |
1037 | if (ibh) { | 1125 | if (ibh) { |
1038 | kernel_lb_addr loc; | 1126 | kernel_lb_addr loc; |
@@ -1041,10 +1129,12 @@ static void __udf_read_inode(struct inode *inode) | |||
1041 | loc = lelb_to_cpu(ie->indirectICB.extLocation); | 1129 | loc = lelb_to_cpu(ie->indirectICB.extLocation); |
1042 | 1130 | ||
1043 | if (ie->indirectICB.extLength && | 1131 | if (ie->indirectICB.extLength && |
1044 | (nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident))) { | 1132 | (nbh = udf_read_ptagged(inode->i_sb, loc, 0, |
1133 | &ident))) { | ||
1045 | if (ident == TAG_IDENT_FE || | 1134 | if (ident == TAG_IDENT_FE || |
1046 | ident == TAG_IDENT_EFE) { | 1135 | ident == TAG_IDENT_EFE) { |
1047 | memcpy(&UDF_I_LOCATION(inode), &loc, | 1136 | memcpy(&iinfo->i_location, |
1137 | &loc, | ||
1048 | sizeof(kernel_lb_addr)); | 1138 | sizeof(kernel_lb_addr)); |
1049 | brelse(bh); | 1139 | brelse(bh); |
1050 | brelse(ibh); | 1140 | brelse(ibh); |
@@ -1062,7 +1152,7 @@ static void __udf_read_inode(struct inode *inode) | |||
1062 | } else { | 1152 | } else { |
1063 | brelse(ibh); | 1153 | brelse(ibh); |
1064 | } | 1154 | } |
1065 | } else if (le16_to_cpu(fe->icbTag.strategyType) != 4) { | 1155 | } else if (fe->icbTag.strategyType != cpu_to_le16(4)) { |
1066 | printk(KERN_ERR "udf: unsupported strategy type: %d\n", | 1156 | printk(KERN_ERR "udf: unsupported strategy type: %d\n", |
1067 | le16_to_cpu(fe->icbTag.strategyType)); | 1157 | le16_to_cpu(fe->icbTag.strategyType)); |
1068 | brelse(bh); | 1158 | brelse(bh); |
@@ -1081,51 +1171,63 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) | |||
1081 | time_t convtime; | 1171 | time_t convtime; |
1082 | long convtime_usec; | 1172 | long convtime_usec; |
1083 | int offset; | 1173 | int offset; |
1174 | struct udf_sb_info *sbi = UDF_SB(inode->i_sb); | ||
1175 | struct udf_inode_info *iinfo = UDF_I(inode); | ||
1084 | 1176 | ||
1085 | fe = (struct fileEntry *)bh->b_data; | 1177 | fe = (struct fileEntry *)bh->b_data; |
1086 | efe = (struct extendedFileEntry *)bh->b_data; | 1178 | efe = (struct extendedFileEntry *)bh->b_data; |
1087 | 1179 | ||
1088 | if (le16_to_cpu(fe->icbTag.strategyType) == 4) | 1180 | if (fe->icbTag.strategyType == cpu_to_le16(4)) |
1089 | UDF_I_STRAT4096(inode) = 0; | 1181 | iinfo->i_strat4096 = 0; |
1090 | else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */ | 1182 | else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */ |
1091 | UDF_I_STRAT4096(inode) = 1; | 1183 | iinfo->i_strat4096 = 1; |
1092 | 1184 | ||
1093 | UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK; | 1185 | iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) & |
1094 | UDF_I_UNIQUE(inode) = 0; | 1186 | ICBTAG_FLAG_AD_MASK; |
1095 | UDF_I_LENEATTR(inode) = 0; | 1187 | iinfo->i_unique = 0; |
1096 | UDF_I_LENEXTENTS(inode) = 0; | 1188 | iinfo->i_lenEAttr = 0; |
1097 | UDF_I_LENALLOC(inode) = 0; | 1189 | iinfo->i_lenExtents = 0; |
1098 | UDF_I_NEXT_ALLOC_BLOCK(inode) = 0; | 1190 | iinfo->i_lenAlloc = 0; |
1099 | UDF_I_NEXT_ALLOC_GOAL(inode) = 0; | 1191 | iinfo->i_next_alloc_block = 0; |
1100 | if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE) { | 1192 | iinfo->i_next_alloc_goal = 0; |
1101 | UDF_I_EFE(inode) = 1; | 1193 | if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) { |
1102 | UDF_I_USE(inode) = 0; | 1194 | iinfo->i_efe = 1; |
1103 | if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry))) { | 1195 | iinfo->i_use = 0; |
1196 | if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - | ||
1197 | sizeof(struct extendedFileEntry))) { | ||
1104 | make_bad_inode(inode); | 1198 | make_bad_inode(inode); |
1105 | return; | 1199 | return; |
1106 | } | 1200 | } |
1107 | memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct extendedFileEntry), | 1201 | memcpy(iinfo->i_ext.i_data, |
1108 | inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry)); | 1202 | bh->b_data + sizeof(struct extendedFileEntry), |
1109 | } else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE) { | 1203 | inode->i_sb->s_blocksize - |
1110 | UDF_I_EFE(inode) = 0; | 1204 | sizeof(struct extendedFileEntry)); |
1111 | UDF_I_USE(inode) = 0; | 1205 | } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) { |
1112 | if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct fileEntry))) { | 1206 | iinfo->i_efe = 0; |
1207 | iinfo->i_use = 0; | ||
1208 | if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - | ||
1209 | sizeof(struct fileEntry))) { | ||
1113 | make_bad_inode(inode); | 1210 | make_bad_inode(inode); |
1114 | return; | 1211 | return; |
1115 | } | 1212 | } |
1116 | memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct fileEntry), | 1213 | memcpy(iinfo->i_ext.i_data, |
1214 | bh->b_data + sizeof(struct fileEntry), | ||
1117 | inode->i_sb->s_blocksize - sizeof(struct fileEntry)); | 1215 | inode->i_sb->s_blocksize - sizeof(struct fileEntry)); |
1118 | } else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE) { | 1216 | } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) { |
1119 | UDF_I_EFE(inode) = 0; | 1217 | iinfo->i_efe = 0; |
1120 | UDF_I_USE(inode) = 1; | 1218 | iinfo->i_use = 1; |
1121 | UDF_I_LENALLOC(inode) = | 1219 | iinfo->i_lenAlloc = le32_to_cpu( |
1122 | le32_to_cpu(((struct unallocSpaceEntry *)bh->b_data)->lengthAllocDescs); | 1220 | ((struct unallocSpaceEntry *)bh->b_data)-> |
1123 | if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry))) { | 1221 | lengthAllocDescs); |
1222 | if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - | ||
1223 | sizeof(struct unallocSpaceEntry))) { | ||
1124 | make_bad_inode(inode); | 1224 | make_bad_inode(inode); |
1125 | return; | 1225 | return; |
1126 | } | 1226 | } |
1127 | memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct unallocSpaceEntry), | 1227 | memcpy(iinfo->i_ext.i_data, |
1128 | inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry)); | 1228 | bh->b_data + sizeof(struct unallocSpaceEntry), |
1229 | inode->i_sb->s_blocksize - | ||
1230 | sizeof(struct unallocSpaceEntry)); | ||
1129 | return; | 1231 | return; |
1130 | } | 1232 | } |
1131 | 1233 | ||
@@ -1146,12 +1248,12 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) | |||
1146 | inode->i_nlink = 1; | 1248 | inode->i_nlink = 1; |
1147 | 1249 | ||
1148 | inode->i_size = le64_to_cpu(fe->informationLength); | 1250 | inode->i_size = le64_to_cpu(fe->informationLength); |
1149 | UDF_I_LENEXTENTS(inode) = inode->i_size; | 1251 | iinfo->i_lenExtents = inode->i_size; |
1150 | 1252 | ||
1151 | inode->i_mode = udf_convert_permissions(fe); | 1253 | inode->i_mode = udf_convert_permissions(fe); |
1152 | inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask; | 1254 | inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask; |
1153 | 1255 | ||
1154 | if (UDF_I_EFE(inode) == 0) { | 1256 | if (iinfo->i_efe == 0) { |
1155 | inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) << | 1257 | inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) << |
1156 | (inode->i_sb->s_blocksize_bits - 9); | 1258 | (inode->i_sb->s_blocksize_bits - 9); |
1157 | 1259 | ||
@@ -1160,7 +1262,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) | |||
1160 | inode->i_atime.tv_sec = convtime; | 1262 | inode->i_atime.tv_sec = convtime; |
1161 | inode->i_atime.tv_nsec = convtime_usec * 1000; | 1263 | inode->i_atime.tv_nsec = convtime_usec * 1000; |
1162 | } else { | 1264 | } else { |
1163 | inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb); | 1265 | inode->i_atime = sbi->s_record_time; |
1164 | } | 1266 | } |
1165 | 1267 | ||
1166 | if (udf_stamp_to_time(&convtime, &convtime_usec, | 1268 | if (udf_stamp_to_time(&convtime, &convtime_usec, |
@@ -1168,7 +1270,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) | |||
1168 | inode->i_mtime.tv_sec = convtime; | 1270 | inode->i_mtime.tv_sec = convtime; |
1169 | inode->i_mtime.tv_nsec = convtime_usec * 1000; | 1271 | inode->i_mtime.tv_nsec = convtime_usec * 1000; |
1170 | } else { | 1272 | } else { |
1171 | inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb); | 1273 | inode->i_mtime = sbi->s_record_time; |
1172 | } | 1274 | } |
1173 | 1275 | ||
1174 | if (udf_stamp_to_time(&convtime, &convtime_usec, | 1276 | if (udf_stamp_to_time(&convtime, &convtime_usec, |
@@ -1176,13 +1278,13 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) | |||
1176 | inode->i_ctime.tv_sec = convtime; | 1278 | inode->i_ctime.tv_sec = convtime; |
1177 | inode->i_ctime.tv_nsec = convtime_usec * 1000; | 1279 | inode->i_ctime.tv_nsec = convtime_usec * 1000; |
1178 | } else { | 1280 | } else { |
1179 | inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb); | 1281 | inode->i_ctime = sbi->s_record_time; |
1180 | } | 1282 | } |
1181 | 1283 | ||
1182 | UDF_I_UNIQUE(inode) = le64_to_cpu(fe->uniqueID); | 1284 | iinfo->i_unique = le64_to_cpu(fe->uniqueID); |
1183 | UDF_I_LENEATTR(inode) = le32_to_cpu(fe->lengthExtendedAttr); | 1285 | iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr); |
1184 | UDF_I_LENALLOC(inode) = le32_to_cpu(fe->lengthAllocDescs); | 1286 | iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs); |
1185 | offset = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode); | 1287 | offset = sizeof(struct fileEntry) + iinfo->i_lenEAttr; |
1186 | } else { | 1288 | } else { |
1187 | inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) << | 1289 | inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) << |
1188 | (inode->i_sb->s_blocksize_bits - 9); | 1290 | (inode->i_sb->s_blocksize_bits - 9); |
@@ -1192,7 +1294,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) | |||
1192 | inode->i_atime.tv_sec = convtime; | 1294 | inode->i_atime.tv_sec = convtime; |
1193 | inode->i_atime.tv_nsec = convtime_usec * 1000; | 1295 | inode->i_atime.tv_nsec = convtime_usec * 1000; |
1194 | } else { | 1296 | } else { |
1195 | inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb); | 1297 | inode->i_atime = sbi->s_record_time; |
1196 | } | 1298 | } |
1197 | 1299 | ||
1198 | if (udf_stamp_to_time(&convtime, &convtime_usec, | 1300 | if (udf_stamp_to_time(&convtime, &convtime_usec, |
@@ -1200,15 +1302,15 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) | |||
1200 | inode->i_mtime.tv_sec = convtime; | 1302 | inode->i_mtime.tv_sec = convtime; |
1201 | inode->i_mtime.tv_nsec = convtime_usec * 1000; | 1303 | inode->i_mtime.tv_nsec = convtime_usec * 1000; |
1202 | } else { | 1304 | } else { |
1203 | inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb); | 1305 | inode->i_mtime = sbi->s_record_time; |
1204 | } | 1306 | } |
1205 | 1307 | ||
1206 | if (udf_stamp_to_time(&convtime, &convtime_usec, | 1308 | if (udf_stamp_to_time(&convtime, &convtime_usec, |
1207 | lets_to_cpu(efe->createTime))) { | 1309 | lets_to_cpu(efe->createTime))) { |
1208 | UDF_I_CRTIME(inode).tv_sec = convtime; | 1310 | iinfo->i_crtime.tv_sec = convtime; |
1209 | UDF_I_CRTIME(inode).tv_nsec = convtime_usec * 1000; | 1311 | iinfo->i_crtime.tv_nsec = convtime_usec * 1000; |
1210 | } else { | 1312 | } else { |
1211 | UDF_I_CRTIME(inode) = UDF_SB_RECORDTIME(inode->i_sb); | 1313 | iinfo->i_crtime = sbi->s_record_time; |
1212 | } | 1314 | } |
1213 | 1315 | ||
1214 | if (udf_stamp_to_time(&convtime, &convtime_usec, | 1316 | if (udf_stamp_to_time(&convtime, &convtime_usec, |
@@ -1216,13 +1318,14 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) | |||
1216 | inode->i_ctime.tv_sec = convtime; | 1318 | inode->i_ctime.tv_sec = convtime; |
1217 | inode->i_ctime.tv_nsec = convtime_usec * 1000; | 1319 | inode->i_ctime.tv_nsec = convtime_usec * 1000; |
1218 | } else { | 1320 | } else { |
1219 | inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb); | 1321 | inode->i_ctime = sbi->s_record_time; |
1220 | } | 1322 | } |
1221 | 1323 | ||
1222 | UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID); | 1324 | iinfo->i_unique = le64_to_cpu(efe->uniqueID); |
1223 | UDF_I_LENEATTR(inode) = le32_to_cpu(efe->lengthExtendedAttr); | 1325 | iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr); |
1224 | UDF_I_LENALLOC(inode) = le32_to_cpu(efe->lengthAllocDescs); | 1326 | iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs); |
1225 | offset = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode); | 1327 | offset = sizeof(struct extendedFileEntry) + |
1328 | iinfo->i_lenEAttr; | ||
1226 | } | 1329 | } |
1227 | 1330 | ||
1228 | switch (fe->icbTag.fileType) { | 1331 | switch (fe->icbTag.fileType) { |
@@ -1235,7 +1338,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) | |||
1235 | case ICBTAG_FILE_TYPE_REALTIME: | 1338 | case ICBTAG_FILE_TYPE_REALTIME: |
1236 | case ICBTAG_FILE_TYPE_REGULAR: | 1339 | case ICBTAG_FILE_TYPE_REGULAR: |
1237 | case ICBTAG_FILE_TYPE_UNDEF: | 1340 | case ICBTAG_FILE_TYPE_UNDEF: |
1238 | if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) | 1341 | if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) |
1239 | inode->i_data.a_ops = &udf_adinicb_aops; | 1342 | inode->i_data.a_ops = &udf_adinicb_aops; |
1240 | else | 1343 | else |
1241 | inode->i_data.a_ops = &udf_aops; | 1344 | inode->i_data.a_ops = &udf_aops; |
@@ -1261,31 +1364,33 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) | |||
1261 | inode->i_mode = S_IFLNK | S_IRWXUGO; | 1364 | inode->i_mode = S_IFLNK | S_IRWXUGO; |
1262 | break; | 1365 | break; |
1263 | default: | 1366 | default: |
1264 | printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n", | 1367 | printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown " |
1265 | inode->i_ino, fe->icbTag.fileType); | 1368 | "file type=%d\n", inode->i_ino, |
1369 | fe->icbTag.fileType); | ||
1266 | make_bad_inode(inode); | 1370 | make_bad_inode(inode); |
1267 | return; | 1371 | return; |
1268 | } | 1372 | } |
1269 | if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { | 1373 | if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { |
1270 | struct deviceSpec *dsea = (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1); | 1374 | struct deviceSpec *dsea = |
1375 | (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1); | ||
1271 | if (dsea) { | 1376 | if (dsea) { |
1272 | init_special_inode(inode, inode->i_mode, | 1377 | init_special_inode(inode, inode->i_mode, |
1273 | MKDEV(le32_to_cpu(dsea->majorDeviceIdent), | 1378 | MKDEV(le32_to_cpu(dsea->majorDeviceIdent), |
1274 | le32_to_cpu(dsea->minorDeviceIdent))); | 1379 | le32_to_cpu(dsea->minorDeviceIdent))); |
1275 | /* Developer ID ??? */ | 1380 | /* Developer ID ??? */ |
1276 | } else { | 1381 | } else |
1277 | make_bad_inode(inode); | 1382 | make_bad_inode(inode); |
1278 | } | ||
1279 | } | 1383 | } |
1280 | } | 1384 | } |
1281 | 1385 | ||
1282 | static int udf_alloc_i_data(struct inode *inode, size_t size) | 1386 | static int udf_alloc_i_data(struct inode *inode, size_t size) |
1283 | { | 1387 | { |
1284 | UDF_I_DATA(inode) = kmalloc(size, GFP_KERNEL); | 1388 | struct udf_inode_info *iinfo = UDF_I(inode); |
1389 | iinfo->i_ext.i_data = kmalloc(size, GFP_KERNEL); | ||
1285 | 1390 | ||
1286 | if (!UDF_I_DATA(inode)) { | 1391 | if (!iinfo->i_ext.i_data) { |
1287 | printk(KERN_ERR "udf:udf_alloc_i_data (ino %ld) no free memory\n", | 1392 | printk(KERN_ERR "udf:udf_alloc_i_data (ino %ld) " |
1288 | inode->i_ino); | 1393 | "no free memory\n", inode->i_ino); |
1289 | return -ENOMEM; | 1394 | return -ENOMEM; |
1290 | } | 1395 | } |
1291 | 1396 | ||
@@ -1301,12 +1406,12 @@ static mode_t udf_convert_permissions(struct fileEntry *fe) | |||
1301 | permissions = le32_to_cpu(fe->permissions); | 1406 | permissions = le32_to_cpu(fe->permissions); |
1302 | flags = le16_to_cpu(fe->icbTag.flags); | 1407 | flags = le16_to_cpu(fe->icbTag.flags); |
1303 | 1408 | ||
1304 | mode = (( permissions ) & S_IRWXO) | | 1409 | mode = ((permissions) & S_IRWXO) | |
1305 | (( permissions >> 2 ) & S_IRWXG) | | 1410 | ((permissions >> 2) & S_IRWXG) | |
1306 | (( permissions >> 4 ) & S_IRWXU) | | 1411 | ((permissions >> 4) & S_IRWXU) | |
1307 | (( flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) | | 1412 | ((flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) | |
1308 | (( flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) | | 1413 | ((flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) | |
1309 | (( flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0); | 1414 | ((flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0); |
1310 | 1415 | ||
1311 | return mode; | 1416 | return mode; |
1312 | } | 1417 | } |
@@ -1350,11 +1455,15 @@ static int udf_update_inode(struct inode *inode, int do_sync) | |||
1350 | uint32_t udfperms; | 1455 | uint32_t udfperms; |
1351 | uint16_t icbflags; | 1456 | uint16_t icbflags; |
1352 | uint16_t crclen; | 1457 | uint16_t crclen; |
1353 | int i; | ||
1354 | kernel_timestamp cpu_time; | 1458 | kernel_timestamp cpu_time; |
1355 | int err = 0; | 1459 | int err = 0; |
1460 | struct udf_sb_info *sbi = UDF_SB(inode->i_sb); | ||
1461 | unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; | ||
1462 | struct udf_inode_info *iinfo = UDF_I(inode); | ||
1356 | 1463 | ||
1357 | bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0)); | 1464 | bh = udf_tread(inode->i_sb, |
1465 | udf_get_lb_pblock(inode->i_sb, | ||
1466 | iinfo->i_location, 0)); | ||
1358 | if (!bh) { | 1467 | if (!bh) { |
1359 | udf_debug("bread failure\n"); | 1468 | udf_debug("bread failure\n"); |
1360 | return -EIO; | 1469 | return -EIO; |
@@ -1365,23 +1474,24 @@ static int udf_update_inode(struct inode *inode, int do_sync) | |||
1365 | fe = (struct fileEntry *)bh->b_data; | 1474 | fe = (struct fileEntry *)bh->b_data; |
1366 | efe = (struct extendedFileEntry *)bh->b_data; | 1475 | efe = (struct extendedFileEntry *)bh->b_data; |
1367 | 1476 | ||
1368 | if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE) { | 1477 | if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) { |
1369 | struct unallocSpaceEntry *use = | 1478 | struct unallocSpaceEntry *use = |
1370 | (struct unallocSpaceEntry *)bh->b_data; | 1479 | (struct unallocSpaceEntry *)bh->b_data; |
1371 | 1480 | ||
1372 | use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode)); | 1481 | use->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc); |
1373 | memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), UDF_I_DATA(inode), | 1482 | memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), |
1374 | inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry)); | 1483 | iinfo->i_ext.i_data, inode->i_sb->s_blocksize - |
1375 | crclen = sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) - sizeof(tag); | 1484 | sizeof(struct unallocSpaceEntry)); |
1376 | use->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum); | 1485 | crclen = sizeof(struct unallocSpaceEntry) + |
1486 | iinfo->i_lenAlloc - sizeof(tag); | ||
1487 | use->descTag.tagLocation = cpu_to_le32( | ||
1488 | iinfo->i_location. | ||
1489 | logicalBlockNum); | ||
1377 | use->descTag.descCRCLength = cpu_to_le16(crclen); | 1490 | use->descTag.descCRCLength = cpu_to_le16(crclen); |
1378 | use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use + sizeof(tag), crclen, 0)); | 1491 | use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use + |
1379 | 1492 | sizeof(tag), crclen, | |
1380 | use->descTag.tagChecksum = 0; | 1493 | 0)); |
1381 | for (i = 0; i < 16; i++) { | 1494 | use->descTag.tagChecksum = udf_tag_checksum(&use->descTag); |
1382 | if (i != 4) | ||
1383 | use->descTag.tagChecksum += ((uint8_t *)&(use->descTag))[i]; | ||
1384 | } | ||
1385 | 1495 | ||
1386 | mark_buffer_dirty(bh); | 1496 | mark_buffer_dirty(bh); |
1387 | brelse(bh); | 1497 | brelse(bh); |
@@ -1398,14 +1508,14 @@ static int udf_update_inode(struct inode *inode, int do_sync) | |||
1398 | else | 1508 | else |
1399 | fe->gid = cpu_to_le32(inode->i_gid); | 1509 | fe->gid = cpu_to_le32(inode->i_gid); |
1400 | 1510 | ||
1401 | udfperms = ((inode->i_mode & S_IRWXO) ) | | 1511 | udfperms = ((inode->i_mode & S_IRWXO)) | |
1402 | ((inode->i_mode & S_IRWXG) << 2) | | 1512 | ((inode->i_mode & S_IRWXG) << 2) | |
1403 | ((inode->i_mode & S_IRWXU) << 4); | 1513 | ((inode->i_mode & S_IRWXU) << 4); |
1404 | 1514 | ||
1405 | udfperms |= (le32_to_cpu(fe->permissions) & | 1515 | udfperms |= (le32_to_cpu(fe->permissions) & |
1406 | (FE_PERM_O_DELETE | FE_PERM_O_CHATTR | | 1516 | (FE_PERM_O_DELETE | FE_PERM_O_CHATTR | |
1407 | FE_PERM_G_DELETE | FE_PERM_G_CHATTR | | 1517 | FE_PERM_G_DELETE | FE_PERM_G_CHATTR | |
1408 | FE_PERM_U_DELETE | FE_PERM_U_CHATTR)); | 1518 | FE_PERM_U_DELETE | FE_PERM_U_CHATTR)); |
1409 | fe->permissions = cpu_to_le32(udfperms); | 1519 | fe->permissions = cpu_to_le32(udfperms); |
1410 | 1520 | ||
1411 | if (S_ISDIR(inode->i_mode)) | 1521 | if (S_ISDIR(inode->i_mode)) |
@@ -1426,8 +1536,9 @@ static int udf_update_inode(struct inode *inode, int do_sync) | |||
1426 | sizeof(regid), 12, 0x3); | 1536 | sizeof(regid), 12, 0x3); |
1427 | dsea->attrType = cpu_to_le32(12); | 1537 | dsea->attrType = cpu_to_le32(12); |
1428 | dsea->attrSubtype = 1; | 1538 | dsea->attrSubtype = 1; |
1429 | dsea->attrLength = cpu_to_le32(sizeof(struct deviceSpec) + | 1539 | dsea->attrLength = cpu_to_le32( |
1430 | sizeof(regid)); | 1540 | sizeof(struct deviceSpec) + |
1541 | sizeof(regid)); | ||
1431 | dsea->impUseLength = cpu_to_le32(sizeof(regid)); | 1542 | dsea->impUseLength = cpu_to_le32(sizeof(regid)); |
1432 | } | 1543 | } |
1433 | eid = (regid *)dsea->impUse; | 1544 | eid = (regid *)dsea->impUse; |
@@ -1439,12 +1550,13 @@ static int udf_update_inode(struct inode *inode, int do_sync) | |||
1439 | dsea->minorDeviceIdent = cpu_to_le32(iminor(inode)); | 1550 | dsea->minorDeviceIdent = cpu_to_le32(iminor(inode)); |
1440 | } | 1551 | } |
1441 | 1552 | ||
1442 | if (UDF_I_EFE(inode) == 0) { | 1553 | if (iinfo->i_efe == 0) { |
1443 | memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode), | 1554 | memcpy(bh->b_data + sizeof(struct fileEntry), |
1555 | iinfo->i_ext.i_data, | ||
1444 | inode->i_sb->s_blocksize - sizeof(struct fileEntry)); | 1556 | inode->i_sb->s_blocksize - sizeof(struct fileEntry)); |
1445 | fe->logicalBlocksRecorded = cpu_to_le64( | 1557 | fe->logicalBlocksRecorded = cpu_to_le64( |
1446 | (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >> | 1558 | (inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >> |
1447 | (inode->i_sb->s_blocksize_bits - 9)); | 1559 | (blocksize_bits - 9)); |
1448 | 1560 | ||
1449 | if (udf_time_to_stamp(&cpu_time, inode->i_atime)) | 1561 | if (udf_time_to_stamp(&cpu_time, inode->i_atime)) |
1450 | fe->accessTime = cpu_to_lets(cpu_time); | 1562 | fe->accessTime = cpu_to_lets(cpu_time); |
@@ -1456,40 +1568,41 @@ static int udf_update_inode(struct inode *inode, int do_sync) | |||
1456 | strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER); | 1568 | strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER); |
1457 | fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; | 1569 | fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; |
1458 | fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; | 1570 | fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; |
1459 | fe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode)); | 1571 | fe->uniqueID = cpu_to_le64(iinfo->i_unique); |
1460 | fe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode)); | 1572 | fe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr); |
1461 | fe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode)); | 1573 | fe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc); |
1462 | fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE); | 1574 | fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE); |
1463 | crclen = sizeof(struct fileEntry); | 1575 | crclen = sizeof(struct fileEntry); |
1464 | } else { | 1576 | } else { |
1465 | memcpy(bh->b_data + sizeof(struct extendedFileEntry), UDF_I_DATA(inode), | 1577 | memcpy(bh->b_data + sizeof(struct extendedFileEntry), |
1466 | inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry)); | 1578 | iinfo->i_ext.i_data, |
1579 | inode->i_sb->s_blocksize - | ||
1580 | sizeof(struct extendedFileEntry)); | ||
1467 | efe->objectSize = cpu_to_le64(inode->i_size); | 1581 | efe->objectSize = cpu_to_le64(inode->i_size); |
1468 | efe->logicalBlocksRecorded = cpu_to_le64( | 1582 | efe->logicalBlocksRecorded = cpu_to_le64( |
1469 | (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >> | 1583 | (inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >> |
1470 | (inode->i_sb->s_blocksize_bits - 9)); | 1584 | (blocksize_bits - 9)); |
1471 | 1585 | ||
1472 | if (UDF_I_CRTIME(inode).tv_sec > inode->i_atime.tv_sec || | 1586 | if (iinfo->i_crtime.tv_sec > inode->i_atime.tv_sec || |
1473 | (UDF_I_CRTIME(inode).tv_sec == inode->i_atime.tv_sec && | 1587 | (iinfo->i_crtime.tv_sec == inode->i_atime.tv_sec && |
1474 | UDF_I_CRTIME(inode).tv_nsec > inode->i_atime.tv_nsec)) { | 1588 | iinfo->i_crtime.tv_nsec > inode->i_atime.tv_nsec)) |
1475 | UDF_I_CRTIME(inode) = inode->i_atime; | 1589 | iinfo->i_crtime = inode->i_atime; |
1476 | } | 1590 | |
1477 | if (UDF_I_CRTIME(inode).tv_sec > inode->i_mtime.tv_sec || | 1591 | if (iinfo->i_crtime.tv_sec > inode->i_mtime.tv_sec || |
1478 | (UDF_I_CRTIME(inode).tv_sec == inode->i_mtime.tv_sec && | 1592 | (iinfo->i_crtime.tv_sec == inode->i_mtime.tv_sec && |
1479 | UDF_I_CRTIME(inode).tv_nsec > inode->i_mtime.tv_nsec)) { | 1593 | iinfo->i_crtime.tv_nsec > inode->i_mtime.tv_nsec)) |
1480 | UDF_I_CRTIME(inode) = inode->i_mtime; | 1594 | iinfo->i_crtime = inode->i_mtime; |
1481 | } | 1595 | |
1482 | if (UDF_I_CRTIME(inode).tv_sec > inode->i_ctime.tv_sec || | 1596 | if (iinfo->i_crtime.tv_sec > inode->i_ctime.tv_sec || |
1483 | (UDF_I_CRTIME(inode).tv_sec == inode->i_ctime.tv_sec && | 1597 | (iinfo->i_crtime.tv_sec == inode->i_ctime.tv_sec && |
1484 | UDF_I_CRTIME(inode).tv_nsec > inode->i_ctime.tv_nsec)) { | 1598 | iinfo->i_crtime.tv_nsec > inode->i_ctime.tv_nsec)) |
1485 | UDF_I_CRTIME(inode) = inode->i_ctime; | 1599 | iinfo->i_crtime = inode->i_ctime; |
1486 | } | ||
1487 | 1600 | ||
1488 | if (udf_time_to_stamp(&cpu_time, inode->i_atime)) | 1601 | if (udf_time_to_stamp(&cpu_time, inode->i_atime)) |
1489 | efe->accessTime = cpu_to_lets(cpu_time); | 1602 | efe->accessTime = cpu_to_lets(cpu_time); |
1490 | if (udf_time_to_stamp(&cpu_time, inode->i_mtime)) | 1603 | if (udf_time_to_stamp(&cpu_time, inode->i_mtime)) |
1491 | efe->modificationTime = cpu_to_lets(cpu_time); | 1604 | efe->modificationTime = cpu_to_lets(cpu_time); |
1492 | if (udf_time_to_stamp(&cpu_time, UDF_I_CRTIME(inode))) | 1605 | if (udf_time_to_stamp(&cpu_time, iinfo->i_crtime)) |
1493 | efe->createTime = cpu_to_lets(cpu_time); | 1606 | efe->createTime = cpu_to_lets(cpu_time); |
1494 | if (udf_time_to_stamp(&cpu_time, inode->i_ctime)) | 1607 | if (udf_time_to_stamp(&cpu_time, inode->i_ctime)) |
1495 | efe->attrTime = cpu_to_lets(cpu_time); | 1608 | efe->attrTime = cpu_to_lets(cpu_time); |
@@ -1498,13 +1611,13 @@ static int udf_update_inode(struct inode *inode, int do_sync) | |||
1498 | strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER); | 1611 | strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER); |
1499 | efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; | 1612 | efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; |
1500 | efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; | 1613 | efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; |
1501 | efe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode)); | 1614 | efe->uniqueID = cpu_to_le64(iinfo->i_unique); |
1502 | efe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode)); | 1615 | efe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr); |
1503 | efe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode)); | 1616 | efe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc); |
1504 | efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE); | 1617 | efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE); |
1505 | crclen = sizeof(struct extendedFileEntry); | 1618 | crclen = sizeof(struct extendedFileEntry); |
1506 | } | 1619 | } |
1507 | if (UDF_I_STRAT4096(inode)) { | 1620 | if (iinfo->i_strat4096) { |
1508 | fe->icbTag.strategyType = cpu_to_le16(4096); | 1621 | fe->icbTag.strategyType = cpu_to_le16(4096); |
1509 | fe->icbTag.strategyParameter = cpu_to_le16(1); | 1622 | fe->icbTag.strategyParameter = cpu_to_le16(1); |
1510 | fe->icbTag.numEntries = cpu_to_le16(2); | 1623 | fe->icbTag.numEntries = cpu_to_le16(2); |
@@ -1528,7 +1641,7 @@ static int udf_update_inode(struct inode *inode, int do_sync) | |||
1528 | else if (S_ISSOCK(inode->i_mode)) | 1641 | else if (S_ISSOCK(inode->i_mode)) |
1529 | fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET; | 1642 | fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET; |
1530 | 1643 | ||
1531 | icbflags = UDF_I_ALLOCTYPE(inode) | | 1644 | icbflags = iinfo->i_alloc_type | |
1532 | ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) | | 1645 | ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) | |
1533 | ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) | | 1646 | ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) | |
1534 | ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) | | 1647 | ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) | |
@@ -1537,29 +1650,28 @@ static int udf_update_inode(struct inode *inode, int do_sync) | |||
1537 | ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY)); | 1650 | ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY)); |
1538 | 1651 | ||
1539 | fe->icbTag.flags = cpu_to_le16(icbflags); | 1652 | fe->icbTag.flags = cpu_to_le16(icbflags); |
1540 | if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200) | 1653 | if (sbi->s_udfrev >= 0x0200) |
1541 | fe->descTag.descVersion = cpu_to_le16(3); | 1654 | fe->descTag.descVersion = cpu_to_le16(3); |
1542 | else | 1655 | else |
1543 | fe->descTag.descVersion = cpu_to_le16(2); | 1656 | fe->descTag.descVersion = cpu_to_le16(2); |
1544 | fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb)); | 1657 | fe->descTag.tagSerialNum = cpu_to_le16(sbi->s_serial_number); |
1545 | fe->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum); | 1658 | fe->descTag.tagLocation = cpu_to_le32( |
1546 | crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag); | 1659 | iinfo->i_location.logicalBlockNum); |
1660 | crclen += iinfo->i_lenEAttr + iinfo->i_lenAlloc - | ||
1661 | sizeof(tag); | ||
1547 | fe->descTag.descCRCLength = cpu_to_le16(crclen); | 1662 | fe->descTag.descCRCLength = cpu_to_le16(crclen); |
1548 | fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0)); | 1663 | fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), |
1549 | 1664 | crclen, 0)); | |
1550 | fe->descTag.tagChecksum = 0; | 1665 | fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag); |
1551 | for (i = 0; i < 16; i++) { | ||
1552 | if (i != 4) | ||
1553 | fe->descTag.tagChecksum += ((uint8_t *)&(fe->descTag))[i]; | ||
1554 | } | ||
1555 | 1666 | ||
1556 | /* write the data blocks */ | 1667 | /* write the data blocks */ |
1557 | mark_buffer_dirty(bh); | 1668 | mark_buffer_dirty(bh); |
1558 | if (do_sync) { | 1669 | if (do_sync) { |
1559 | sync_dirty_buffer(bh); | 1670 | sync_dirty_buffer(bh); |
1560 | if (buffer_req(bh) && !buffer_uptodate(bh)) { | 1671 | if (buffer_req(bh) && !buffer_uptodate(bh)) { |
1561 | printk("IO error syncing udf inode [%s:%08lx]\n", | 1672 | printk(KERN_WARNING "IO error syncing udf inode " |
1562 | inode->i_sb->s_id, inode->i_ino); | 1673 | "[%s:%08lx]\n", inode->i_sb->s_id, |
1674 | inode->i_ino); | ||
1563 | err = -EIO; | 1675 | err = -EIO; |
1564 | } | 1676 | } |
1565 | } | 1677 | } |
@@ -1577,7 +1689,7 @@ struct inode *udf_iget(struct super_block *sb, kernel_lb_addr ino) | |||
1577 | return NULL; | 1689 | return NULL; |
1578 | 1690 | ||
1579 | if (inode->i_state & I_NEW) { | 1691 | if (inode->i_state & I_NEW) { |
1580 | memcpy(&UDF_I_LOCATION(inode), &ino, sizeof(kernel_lb_addr)); | 1692 | memcpy(&UDF_I(inode)->i_location, &ino, sizeof(kernel_lb_addr)); |
1581 | __udf_read_inode(inode); | 1693 | __udf_read_inode(inode); |
1582 | unlock_new_inode(inode); | 1694 | unlock_new_inode(inode); |
1583 | } | 1695 | } |
@@ -1585,7 +1697,8 @@ struct inode *udf_iget(struct super_block *sb, kernel_lb_addr ino) | |||
1585 | if (is_bad_inode(inode)) | 1697 | if (is_bad_inode(inode)) |
1586 | goto out_iput; | 1698 | goto out_iput; |
1587 | 1699 | ||
1588 | if (ino.logicalBlockNum >= UDF_SB_PARTLEN(sb, ino.partitionReferenceNum)) { | 1700 | if (ino.logicalBlockNum >= UDF_SB(sb)-> |
1701 | s_partmaps[ino.partitionReferenceNum].s_partition_len) { | ||
1589 | udf_debug("block=%d, partition=%d out of range\n", | 1702 | udf_debug("block=%d, partition=%d out of range\n", |
1590 | ino.logicalBlockNum, ino.partitionReferenceNum); | 1703 | ino.logicalBlockNum, ino.partitionReferenceNum); |
1591 | make_bad_inode(inode); | 1704 | make_bad_inode(inode); |
@@ -1599,7 +1712,7 @@ struct inode *udf_iget(struct super_block *sb, kernel_lb_addr ino) | |||
1599 | return NULL; | 1712 | return NULL; |
1600 | } | 1713 | } |
1601 | 1714 | ||
1602 | int8_t udf_add_aext(struct inode * inode, struct extent_position * epos, | 1715 | int8_t udf_add_aext(struct inode *inode, struct extent_position *epos, |
1603 | kernel_lb_addr eloc, uint32_t elen, int inc) | 1716 | kernel_lb_addr eloc, uint32_t elen, int inc) |
1604 | { | 1717 | { |
1605 | int adsize; | 1718 | int adsize; |
@@ -1608,15 +1721,18 @@ int8_t udf_add_aext(struct inode * inode, struct extent_position * epos, | |||
1608 | struct allocExtDesc *aed; | 1721 | struct allocExtDesc *aed; |
1609 | int8_t etype; | 1722 | int8_t etype; |
1610 | uint8_t *ptr; | 1723 | uint8_t *ptr; |
1724 | struct udf_inode_info *iinfo = UDF_I(inode); | ||
1611 | 1725 | ||
1612 | if (!epos->bh) | 1726 | if (!epos->bh) |
1613 | ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode); | 1727 | ptr = iinfo->i_ext.i_data + epos->offset - |
1728 | udf_file_entry_alloc_offset(inode) + | ||
1729 | iinfo->i_lenEAttr; | ||
1614 | else | 1730 | else |
1615 | ptr = epos->bh->b_data + epos->offset; | 1731 | ptr = epos->bh->b_data + epos->offset; |
1616 | 1732 | ||
1617 | if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT) | 1733 | if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) |
1618 | adsize = sizeof(short_ad); | 1734 | adsize = sizeof(short_ad); |
1619 | else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG) | 1735 | else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) |
1620 | adsize = sizeof(long_ad); | 1736 | adsize = sizeof(long_ad); |
1621 | else | 1737 | else |
1622 | return -1; | 1738 | return -1; |
@@ -1627,15 +1743,16 @@ int8_t udf_add_aext(struct inode * inode, struct extent_position * epos, | |||
1627 | int err, loffset; | 1743 | int err, loffset; |
1628 | kernel_lb_addr obloc = epos->block; | 1744 | kernel_lb_addr obloc = epos->block; |
1629 | 1745 | ||
1630 | if (!(epos->block.logicalBlockNum = udf_new_block(inode->i_sb, NULL, | 1746 | epos->block.logicalBlockNum = udf_new_block(inode->i_sb, NULL, |
1631 | obloc.partitionReferenceNum, | 1747 | obloc.partitionReferenceNum, |
1632 | obloc.logicalBlockNum, &err))) { | 1748 | obloc.logicalBlockNum, &err); |
1749 | if (!epos->block.logicalBlockNum) | ||
1633 | return -1; | 1750 | return -1; |
1634 | } | 1751 | nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb, |
1635 | if (!(nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb, | 1752 | epos->block, |
1636 | epos->block, 0)))) { | 1753 | 0)); |
1754 | if (!nbh) | ||
1637 | return -1; | 1755 | return -1; |
1638 | } | ||
1639 | lock_buffer(nbh); | 1756 | lock_buffer(nbh); |
1640 | memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize); | 1757 | memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize); |
1641 | set_buffer_uptodate(nbh); | 1758 | set_buffer_uptodate(nbh); |
@@ -1644,7 +1761,8 @@ int8_t udf_add_aext(struct inode * inode, struct extent_position * epos, | |||
1644 | 1761 | ||
1645 | aed = (struct allocExtDesc *)(nbh->b_data); | 1762 | aed = (struct allocExtDesc *)(nbh->b_data); |
1646 | if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT)) | 1763 | if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT)) |
1647 | aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum); | 1764 | aed->previousAllocExtLocation = |
1765 | cpu_to_le32(obloc.logicalBlockNum); | ||
1648 | if (epos->offset + adsize > inode->i_sb->s_blocksize) { | 1766 | if (epos->offset + adsize > inode->i_sb->s_blocksize) { |
1649 | loffset = epos->offset; | 1767 | loffset = epos->offset; |
1650 | aed->lengthAllocDescs = cpu_to_le32(adsize); | 1768 | aed->lengthAllocDescs = cpu_to_le32(adsize); |
@@ -1661,24 +1779,26 @@ int8_t udf_add_aext(struct inode * inode, struct extent_position * epos, | |||
1661 | if (epos->bh) { | 1779 | if (epos->bh) { |
1662 | aed = (struct allocExtDesc *)epos->bh->b_data; | 1780 | aed = (struct allocExtDesc *)epos->bh->b_data; |
1663 | aed->lengthAllocDescs = | 1781 | aed->lengthAllocDescs = |
1664 | cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize); | 1782 | cpu_to_le32(le32_to_cpu( |
1783 | aed->lengthAllocDescs) + adsize); | ||
1665 | } else { | 1784 | } else { |
1666 | UDF_I_LENALLOC(inode) += adsize; | 1785 | iinfo->i_lenAlloc += adsize; |
1667 | mark_inode_dirty(inode); | 1786 | mark_inode_dirty(inode); |
1668 | } | 1787 | } |
1669 | } | 1788 | } |
1670 | if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200) | 1789 | if (UDF_SB(inode->i_sb)->s_udfrev >= 0x0200) |
1671 | udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1, | 1790 | udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1, |
1672 | epos->block.logicalBlockNum, sizeof(tag)); | 1791 | epos->block.logicalBlockNum, sizeof(tag)); |
1673 | else | 1792 | else |
1674 | udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1, | 1793 | udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1, |
1675 | epos->block.logicalBlockNum, sizeof(tag)); | 1794 | epos->block.logicalBlockNum, sizeof(tag)); |
1676 | switch (UDF_I_ALLOCTYPE(inode)) { | 1795 | switch (iinfo->i_alloc_type) { |
1677 | case ICBTAG_FLAG_AD_SHORT: | 1796 | case ICBTAG_FLAG_AD_SHORT: |
1678 | sad = (short_ad *)sptr; | 1797 | sad = (short_ad *)sptr; |
1679 | sad->extLength = cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS | | 1798 | sad->extLength = cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS | |
1680 | inode->i_sb->s_blocksize); | 1799 | inode->i_sb->s_blocksize); |
1681 | sad->extPosition = cpu_to_le32(epos->block.logicalBlockNum); | 1800 | sad->extPosition = |
1801 | cpu_to_le32(epos->block.logicalBlockNum); | ||
1682 | break; | 1802 | break; |
1683 | case ICBTAG_FLAG_AD_LONG: | 1803 | case ICBTAG_FLAG_AD_LONG: |
1684 | lad = (long_ad *)sptr; | 1804 | lad = (long_ad *)sptr; |
@@ -1690,10 +1810,11 @@ int8_t udf_add_aext(struct inode * inode, struct extent_position * epos, | |||
1690 | } | 1810 | } |
1691 | if (epos->bh) { | 1811 | if (epos->bh) { |
1692 | if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || | 1812 | if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || |
1693 | UDF_SB_UDFREV(inode->i_sb) >= 0x0201) | 1813 | UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) |
1694 | udf_update_tag(epos->bh->b_data, loffset); | 1814 | udf_update_tag(epos->bh->b_data, loffset); |
1695 | else | 1815 | else |
1696 | udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc)); | 1816 | udf_update_tag(epos->bh->b_data, |
1817 | sizeof(struct allocExtDesc)); | ||
1697 | mark_buffer_dirty_inode(epos->bh, inode); | 1818 | mark_buffer_dirty_inode(epos->bh, inode); |
1698 | brelse(epos->bh); | 1819 | brelse(epos->bh); |
1699 | } else { | 1820 | } else { |
@@ -1705,36 +1826,43 @@ int8_t udf_add_aext(struct inode * inode, struct extent_position * epos, | |||
1705 | etype = udf_write_aext(inode, epos, eloc, elen, inc); | 1826 | etype = udf_write_aext(inode, epos, eloc, elen, inc); |
1706 | 1827 | ||
1707 | if (!epos->bh) { | 1828 | if (!epos->bh) { |
1708 | UDF_I_LENALLOC(inode) += adsize; | 1829 | iinfo->i_lenAlloc += adsize; |
1709 | mark_inode_dirty(inode); | 1830 | mark_inode_dirty(inode); |
1710 | } else { | 1831 | } else { |
1711 | aed = (struct allocExtDesc *)epos->bh->b_data; | 1832 | aed = (struct allocExtDesc *)epos->bh->b_data; |
1712 | aed->lengthAllocDescs = | 1833 | aed->lengthAllocDescs = |
1713 | cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize); | 1834 | cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + |
1714 | if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201) | 1835 | adsize); |
1715 | udf_update_tag(epos->bh->b_data, epos->offset + (inc ? 0 : adsize)); | 1836 | if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || |
1837 | UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) | ||
1838 | udf_update_tag(epos->bh->b_data, | ||
1839 | epos->offset + (inc ? 0 : adsize)); | ||
1716 | else | 1840 | else |
1717 | udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc)); | 1841 | udf_update_tag(epos->bh->b_data, |
1842 | sizeof(struct allocExtDesc)); | ||
1718 | mark_buffer_dirty_inode(epos->bh, inode); | 1843 | mark_buffer_dirty_inode(epos->bh, inode); |
1719 | } | 1844 | } |
1720 | 1845 | ||
1721 | return etype; | 1846 | return etype; |
1722 | } | 1847 | } |
1723 | 1848 | ||
1724 | int8_t udf_write_aext(struct inode * inode, struct extent_position * epos, | 1849 | int8_t udf_write_aext(struct inode *inode, struct extent_position *epos, |
1725 | kernel_lb_addr eloc, uint32_t elen, int inc) | 1850 | kernel_lb_addr eloc, uint32_t elen, int inc) |
1726 | { | 1851 | { |
1727 | int adsize; | 1852 | int adsize; |
1728 | uint8_t *ptr; | 1853 | uint8_t *ptr; |
1729 | short_ad *sad; | 1854 | short_ad *sad; |
1730 | long_ad *lad; | 1855 | long_ad *lad; |
1856 | struct udf_inode_info *iinfo = UDF_I(inode); | ||
1731 | 1857 | ||
1732 | if (!epos->bh) | 1858 | if (!epos->bh) |
1733 | ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode); | 1859 | ptr = iinfo->i_ext.i_data + epos->offset - |
1860 | udf_file_entry_alloc_offset(inode) + | ||
1861 | iinfo->i_lenEAttr; | ||
1734 | else | 1862 | else |
1735 | ptr = epos->bh->b_data + epos->offset; | 1863 | ptr = epos->bh->b_data + epos->offset; |
1736 | 1864 | ||
1737 | switch (UDF_I_ALLOCTYPE(inode)) { | 1865 | switch (iinfo->i_alloc_type) { |
1738 | case ICBTAG_FLAG_AD_SHORT: | 1866 | case ICBTAG_FLAG_AD_SHORT: |
1739 | sad = (short_ad *)ptr; | 1867 | sad = (short_ad *)ptr; |
1740 | sad->extLength = cpu_to_le32(elen); | 1868 | sad->extLength = cpu_to_le32(elen); |
@@ -1754,10 +1882,12 @@ int8_t udf_write_aext(struct inode * inode, struct extent_position * epos, | |||
1754 | 1882 | ||
1755 | if (epos->bh) { | 1883 | if (epos->bh) { |
1756 | if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || | 1884 | if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || |
1757 | UDF_SB_UDFREV(inode->i_sb) >= 0x0201) { | 1885 | UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) { |
1758 | struct allocExtDesc *aed = (struct allocExtDesc *)epos->bh->b_data; | 1886 | struct allocExtDesc *aed = |
1887 | (struct allocExtDesc *)epos->bh->b_data; | ||
1759 | udf_update_tag(epos->bh->b_data, | 1888 | udf_update_tag(epos->bh->b_data, |
1760 | le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc)); | 1889 | le32_to_cpu(aed->lengthAllocDescs) + |
1890 | sizeof(struct allocExtDesc)); | ||
1761 | } | 1891 | } |
1762 | mark_buffer_dirty_inode(epos->bh, inode); | 1892 | mark_buffer_dirty_inode(epos->bh, inode); |
1763 | } else { | 1893 | } else { |
@@ -1770,19 +1900,21 @@ int8_t udf_write_aext(struct inode * inode, struct extent_position * epos, | |||
1770 | return (elen >> 30); | 1900 | return (elen >> 30); |
1771 | } | 1901 | } |
1772 | 1902 | ||
1773 | int8_t udf_next_aext(struct inode * inode, struct extent_position * epos, | 1903 | int8_t udf_next_aext(struct inode *inode, struct extent_position *epos, |
1774 | kernel_lb_addr * eloc, uint32_t * elen, int inc) | 1904 | kernel_lb_addr *eloc, uint32_t *elen, int inc) |
1775 | { | 1905 | { |
1776 | int8_t etype; | 1906 | int8_t etype; |
1777 | 1907 | ||
1778 | while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) == | 1908 | while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) == |
1779 | (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) { | 1909 | (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) { |
1910 | int block; | ||
1780 | epos->block = *eloc; | 1911 | epos->block = *eloc; |
1781 | epos->offset = sizeof(struct allocExtDesc); | 1912 | epos->offset = sizeof(struct allocExtDesc); |
1782 | brelse(epos->bh); | 1913 | brelse(epos->bh); |
1783 | if (!(epos->bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, epos->block, 0)))) { | 1914 | block = udf_get_lb_pblock(inode->i_sb, epos->block, 0); |
1784 | udf_debug("reading block %d failed!\n", | 1915 | epos->bh = udf_tread(inode->i_sb, block); |
1785 | udf_get_lb_pblock(inode->i_sb, epos->block, 0)); | 1916 | if (!epos->bh) { |
1917 | udf_debug("reading block %d failed!\n", block); | ||
1786 | return -1; | 1918 | return -1; |
1787 | } | 1919 | } |
1788 | } | 1920 | } |
@@ -1790,47 +1922,55 @@ int8_t udf_next_aext(struct inode * inode, struct extent_position * epos, | |||
1790 | return etype; | 1922 | return etype; |
1791 | } | 1923 | } |
1792 | 1924 | ||
1793 | int8_t udf_current_aext(struct inode * inode, struct extent_position * epos, | 1925 | int8_t udf_current_aext(struct inode *inode, struct extent_position *epos, |
1794 | kernel_lb_addr * eloc, uint32_t * elen, int inc) | 1926 | kernel_lb_addr *eloc, uint32_t *elen, int inc) |
1795 | { | 1927 | { |
1796 | int alen; | 1928 | int alen; |
1797 | int8_t etype; | 1929 | int8_t etype; |
1798 | uint8_t *ptr; | 1930 | uint8_t *ptr; |
1799 | short_ad *sad; | 1931 | short_ad *sad; |
1800 | long_ad *lad; | 1932 | long_ad *lad; |
1801 | 1933 | struct udf_inode_info *iinfo = UDF_I(inode); | |
1802 | 1934 | ||
1803 | if (!epos->bh) { | 1935 | if (!epos->bh) { |
1804 | if (!epos->offset) | 1936 | if (!epos->offset) |
1805 | epos->offset = udf_file_entry_alloc_offset(inode); | 1937 | epos->offset = udf_file_entry_alloc_offset(inode); |
1806 | ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode); | 1938 | ptr = iinfo->i_ext.i_data + epos->offset - |
1807 | alen = udf_file_entry_alloc_offset(inode) + UDF_I_LENALLOC(inode); | 1939 | udf_file_entry_alloc_offset(inode) + |
1940 | iinfo->i_lenEAttr; | ||
1941 | alen = udf_file_entry_alloc_offset(inode) + | ||
1942 | iinfo->i_lenAlloc; | ||
1808 | } else { | 1943 | } else { |
1809 | if (!epos->offset) | 1944 | if (!epos->offset) |
1810 | epos->offset = sizeof(struct allocExtDesc); | 1945 | epos->offset = sizeof(struct allocExtDesc); |
1811 | ptr = epos->bh->b_data + epos->offset; | 1946 | ptr = epos->bh->b_data + epos->offset; |
1812 | alen = sizeof(struct allocExtDesc) + | 1947 | alen = sizeof(struct allocExtDesc) + |
1813 | le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)->lengthAllocDescs); | 1948 | le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)-> |
1949 | lengthAllocDescs); | ||
1814 | } | 1950 | } |
1815 | 1951 | ||
1816 | switch (UDF_I_ALLOCTYPE(inode)) { | 1952 | switch (iinfo->i_alloc_type) { |
1817 | case ICBTAG_FLAG_AD_SHORT: | 1953 | case ICBTAG_FLAG_AD_SHORT: |
1818 | if (!(sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc))) | 1954 | sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc); |
1955 | if (!sad) | ||
1819 | return -1; | 1956 | return -1; |
1820 | etype = le32_to_cpu(sad->extLength) >> 30; | 1957 | etype = le32_to_cpu(sad->extLength) >> 30; |
1821 | eloc->logicalBlockNum = le32_to_cpu(sad->extPosition); | 1958 | eloc->logicalBlockNum = le32_to_cpu(sad->extPosition); |
1822 | eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum; | 1959 | eloc->partitionReferenceNum = |
1960 | iinfo->i_location.partitionReferenceNum; | ||
1823 | *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK; | 1961 | *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK; |
1824 | break; | 1962 | break; |
1825 | case ICBTAG_FLAG_AD_LONG: | 1963 | case ICBTAG_FLAG_AD_LONG: |
1826 | if (!(lad = udf_get_filelongad(ptr, alen, &epos->offset, inc))) | 1964 | lad = udf_get_filelongad(ptr, alen, &epos->offset, inc); |
1965 | if (!lad) | ||
1827 | return -1; | 1966 | return -1; |
1828 | etype = le32_to_cpu(lad->extLength) >> 30; | 1967 | etype = le32_to_cpu(lad->extLength) >> 30; |
1829 | *eloc = lelb_to_cpu(lad->extLocation); | 1968 | *eloc = lelb_to_cpu(lad->extLocation); |
1830 | *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK; | 1969 | *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK; |
1831 | break; | 1970 | break; |
1832 | default: | 1971 | default: |
1833 | udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode)); | 1972 | udf_debug("alloc_type = %d unsupported\n", |
1973 | iinfo->i_alloc_type); | ||
1834 | return -1; | 1974 | return -1; |
1835 | } | 1975 | } |
1836 | 1976 | ||
@@ -1858,22 +1998,24 @@ static int8_t udf_insert_aext(struct inode *inode, struct extent_position epos, | |||
1858 | return (nelen >> 30); | 1998 | return (nelen >> 30); |
1859 | } | 1999 | } |
1860 | 2000 | ||
1861 | int8_t udf_delete_aext(struct inode * inode, struct extent_position epos, | 2001 | int8_t udf_delete_aext(struct inode *inode, struct extent_position epos, |
1862 | kernel_lb_addr eloc, uint32_t elen) | 2002 | kernel_lb_addr eloc, uint32_t elen) |
1863 | { | 2003 | { |
1864 | struct extent_position oepos; | 2004 | struct extent_position oepos; |
1865 | int adsize; | 2005 | int adsize; |
1866 | int8_t etype; | 2006 | int8_t etype; |
1867 | struct allocExtDesc *aed; | 2007 | struct allocExtDesc *aed; |
2008 | struct udf_inode_info *iinfo; | ||
1868 | 2009 | ||
1869 | if (epos.bh) { | 2010 | if (epos.bh) { |
1870 | get_bh(epos.bh); | 2011 | get_bh(epos.bh); |
1871 | get_bh(epos.bh); | 2012 | get_bh(epos.bh); |
1872 | } | 2013 | } |
1873 | 2014 | ||
1874 | if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT) | 2015 | iinfo = UDF_I(inode); |
2016 | if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) | ||
1875 | adsize = sizeof(short_ad); | 2017 | adsize = sizeof(short_ad); |
1876 | else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG) | 2018 | else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) |
1877 | adsize = sizeof(long_ad); | 2019 | adsize = sizeof(long_ad); |
1878 | else | 2020 | else |
1879 | adsize = 0; | 2021 | adsize = 0; |
@@ -1900,33 +2042,39 @@ int8_t udf_delete_aext(struct inode * inode, struct extent_position epos, | |||
1900 | udf_write_aext(inode, &oepos, eloc, elen, 1); | 2042 | udf_write_aext(inode, &oepos, eloc, elen, 1); |
1901 | udf_write_aext(inode, &oepos, eloc, elen, 1); | 2043 | udf_write_aext(inode, &oepos, eloc, elen, 1); |
1902 | if (!oepos.bh) { | 2044 | if (!oepos.bh) { |
1903 | UDF_I_LENALLOC(inode) -= (adsize * 2); | 2045 | iinfo->i_lenAlloc -= (adsize * 2); |
1904 | mark_inode_dirty(inode); | 2046 | mark_inode_dirty(inode); |
1905 | } else { | 2047 | } else { |
1906 | aed = (struct allocExtDesc *)oepos.bh->b_data; | 2048 | aed = (struct allocExtDesc *)oepos.bh->b_data; |
1907 | aed->lengthAllocDescs = | 2049 | aed->lengthAllocDescs = |
1908 | cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2 * adsize)); | 2050 | cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - |
2051 | (2 * adsize)); | ||
1909 | if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || | 2052 | if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || |
1910 | UDF_SB_UDFREV(inode->i_sb) >= 0x0201) | 2053 | UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) |
1911 | udf_update_tag(oepos.bh->b_data, oepos.offset - (2 * adsize)); | 2054 | udf_update_tag(oepos.bh->b_data, |
2055 | oepos.offset - (2 * adsize)); | ||
1912 | else | 2056 | else |
1913 | udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc)); | 2057 | udf_update_tag(oepos.bh->b_data, |
2058 | sizeof(struct allocExtDesc)); | ||
1914 | mark_buffer_dirty_inode(oepos.bh, inode); | 2059 | mark_buffer_dirty_inode(oepos.bh, inode); |
1915 | } | 2060 | } |
1916 | } else { | 2061 | } else { |
1917 | udf_write_aext(inode, &oepos, eloc, elen, 1); | 2062 | udf_write_aext(inode, &oepos, eloc, elen, 1); |
1918 | if (!oepos.bh) { | 2063 | if (!oepos.bh) { |
1919 | UDF_I_LENALLOC(inode) -= adsize; | 2064 | iinfo->i_lenAlloc -= adsize; |
1920 | mark_inode_dirty(inode); | 2065 | mark_inode_dirty(inode); |
1921 | } else { | 2066 | } else { |
1922 | aed = (struct allocExtDesc *)oepos.bh->b_data; | 2067 | aed = (struct allocExtDesc *)oepos.bh->b_data; |
1923 | aed->lengthAllocDescs = | 2068 | aed->lengthAllocDescs = |
1924 | cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize); | 2069 | cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - |
2070 | adsize); | ||
1925 | if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || | 2071 | if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || |
1926 | UDF_SB_UDFREV(inode->i_sb) >= 0x0201) | 2072 | UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) |
1927 | udf_update_tag(oepos.bh->b_data, epos.offset - adsize); | 2073 | udf_update_tag(oepos.bh->b_data, |
2074 | epos.offset - adsize); | ||
1928 | else | 2075 | else |
1929 | udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc)); | 2076 | udf_update_tag(oepos.bh->b_data, |
2077 | sizeof(struct allocExtDesc)); | ||
1930 | mark_buffer_dirty_inode(oepos.bh, inode); | 2078 | mark_buffer_dirty_inode(oepos.bh, inode); |
1931 | } | 2079 | } |
1932 | } | 2080 | } |
@@ -1937,34 +2085,38 @@ int8_t udf_delete_aext(struct inode * inode, struct extent_position epos, | |||
1937 | return (elen >> 30); | 2085 | return (elen >> 30); |
1938 | } | 2086 | } |
1939 | 2087 | ||
1940 | int8_t inode_bmap(struct inode * inode, sector_t block, | 2088 | int8_t inode_bmap(struct inode *inode, sector_t block, |
1941 | struct extent_position * pos, kernel_lb_addr * eloc, | 2089 | struct extent_position *pos, kernel_lb_addr *eloc, |
1942 | uint32_t * elen, sector_t * offset) | 2090 | uint32_t *elen, sector_t *offset) |
1943 | { | 2091 | { |
2092 | unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; | ||
1944 | loff_t lbcount = 0, bcount = | 2093 | loff_t lbcount = 0, bcount = |
1945 | (loff_t) block << inode->i_sb->s_blocksize_bits; | 2094 | (loff_t) block << blocksize_bits; |
1946 | int8_t etype; | 2095 | int8_t etype; |
2096 | struct udf_inode_info *iinfo; | ||
1947 | 2097 | ||
1948 | if (block < 0) { | 2098 | if (block < 0) { |
1949 | printk(KERN_ERR "udf: inode_bmap: block < 0\n"); | 2099 | printk(KERN_ERR "udf: inode_bmap: block < 0\n"); |
1950 | return -1; | 2100 | return -1; |
1951 | } | 2101 | } |
1952 | 2102 | ||
2103 | iinfo = UDF_I(inode); | ||
1953 | pos->offset = 0; | 2104 | pos->offset = 0; |
1954 | pos->block = UDF_I_LOCATION(inode); | 2105 | pos->block = iinfo->i_location; |
1955 | pos->bh = NULL; | 2106 | pos->bh = NULL; |
1956 | *elen = 0; | 2107 | *elen = 0; |
1957 | 2108 | ||
1958 | do { | 2109 | do { |
1959 | if ((etype = udf_next_aext(inode, pos, eloc, elen, 1)) == -1) { | 2110 | etype = udf_next_aext(inode, pos, eloc, elen, 1); |
1960 | *offset = (bcount - lbcount) >> inode->i_sb->s_blocksize_bits; | 2111 | if (etype == -1) { |
1961 | UDF_I_LENEXTENTS(inode) = lbcount; | 2112 | *offset = (bcount - lbcount) >> blocksize_bits; |
2113 | iinfo->i_lenExtents = lbcount; | ||
1962 | return -1; | 2114 | return -1; |
1963 | } | 2115 | } |
1964 | lbcount += *elen; | 2116 | lbcount += *elen; |
1965 | } while (lbcount <= bcount); | 2117 | } while (lbcount <= bcount); |
1966 | 2118 | ||
1967 | *offset = (bcount + *elen - lbcount) >> inode->i_sb->s_blocksize_bits; | 2119 | *offset = (bcount + *elen - lbcount) >> blocksize_bits; |
1968 | 2120 | ||
1969 | return etype; | 2121 | return etype; |
1970 | } | 2122 | } |
@@ -1979,7 +2131,8 @@ long udf_block_map(struct inode *inode, sector_t block) | |||
1979 | 2131 | ||
1980 | lock_kernel(); | 2132 | lock_kernel(); |
1981 | 2133 | ||
1982 | if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) | 2134 | if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) == |
2135 | (EXT_RECORDED_ALLOCATED >> 30)) | ||
1983 | ret = udf_get_lb_pblock(inode->i_sb, eloc, offset); | 2136 | ret = udf_get_lb_pblock(inode->i_sb, eloc, offset); |
1984 | else | 2137 | else |
1985 | ret = 0; | 2138 | ret = 0; |
diff --git a/fs/udf/misc.c b/fs/udf/misc.c index 15297deb5051..a1d6da0caf71 100644 --- a/fs/udf/misc.c +++ b/fs/udf/misc.c | |||
@@ -51,18 +51,18 @@ struct genericFormat *udf_add_extendedattr(struct inode *inode, uint32_t size, | |||
51 | uint8_t *ea = NULL, *ad = NULL; | 51 | uint8_t *ea = NULL, *ad = NULL; |
52 | int offset; | 52 | int offset; |
53 | uint16_t crclen; | 53 | uint16_t crclen; |
54 | int i; | 54 | struct udf_inode_info *iinfo = UDF_I(inode); |
55 | 55 | ||
56 | ea = UDF_I_DATA(inode); | 56 | ea = iinfo->i_ext.i_data; |
57 | if (UDF_I_LENEATTR(inode)) { | 57 | if (iinfo->i_lenEAttr) { |
58 | ad = UDF_I_DATA(inode) + UDF_I_LENEATTR(inode); | 58 | ad = iinfo->i_ext.i_data + iinfo->i_lenEAttr; |
59 | } else { | 59 | } else { |
60 | ad = ea; | 60 | ad = ea; |
61 | size += sizeof(struct extendedAttrHeaderDesc); | 61 | size += sizeof(struct extendedAttrHeaderDesc); |
62 | } | 62 | } |
63 | 63 | ||
64 | offset = inode->i_sb->s_blocksize - udf_file_entry_alloc_offset(inode) - | 64 | offset = inode->i_sb->s_blocksize - udf_file_entry_alloc_offset(inode) - |
65 | UDF_I_LENALLOC(inode); | 65 | iinfo->i_lenAlloc; |
66 | 66 | ||
67 | /* TODO - Check for FreeEASpace */ | 67 | /* TODO - Check for FreeEASpace */ |
68 | 68 | ||
@@ -70,69 +70,80 @@ struct genericFormat *udf_add_extendedattr(struct inode *inode, uint32_t size, | |||
70 | struct extendedAttrHeaderDesc *eahd; | 70 | struct extendedAttrHeaderDesc *eahd; |
71 | eahd = (struct extendedAttrHeaderDesc *)ea; | 71 | eahd = (struct extendedAttrHeaderDesc *)ea; |
72 | 72 | ||
73 | if (UDF_I_LENALLOC(inode)) { | 73 | if (iinfo->i_lenAlloc) |
74 | memmove(&ad[size], ad, UDF_I_LENALLOC(inode)); | 74 | memmove(&ad[size], ad, iinfo->i_lenAlloc); |
75 | } | ||
76 | 75 | ||
77 | if (UDF_I_LENEATTR(inode)) { | 76 | if (iinfo->i_lenEAttr) { |
78 | /* check checksum/crc */ | 77 | /* check checksum/crc */ |
79 | if (le16_to_cpu(eahd->descTag.tagIdent) != TAG_IDENT_EAHD || | 78 | if (eahd->descTag.tagIdent != |
80 | le32_to_cpu(eahd->descTag.tagLocation) != UDF_I_LOCATION(inode).logicalBlockNum) { | 79 | cpu_to_le16(TAG_IDENT_EAHD) || |
80 | le32_to_cpu(eahd->descTag.tagLocation) != | ||
81 | iinfo->i_location.logicalBlockNum) | ||
81 | return NULL; | 82 | return NULL; |
82 | } | ||
83 | } else { | 83 | } else { |
84 | struct udf_sb_info *sbi = UDF_SB(inode->i_sb); | ||
85 | |||
84 | size -= sizeof(struct extendedAttrHeaderDesc); | 86 | size -= sizeof(struct extendedAttrHeaderDesc); |
85 | UDF_I_LENEATTR(inode) += sizeof(struct extendedAttrHeaderDesc); | 87 | iinfo->i_lenEAttr += |
88 | sizeof(struct extendedAttrHeaderDesc); | ||
86 | eahd->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EAHD); | 89 | eahd->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EAHD); |
87 | if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200) | 90 | if (sbi->s_udfrev >= 0x0200) |
88 | eahd->descTag.descVersion = cpu_to_le16(3); | 91 | eahd->descTag.descVersion = cpu_to_le16(3); |
89 | else | 92 | else |
90 | eahd->descTag.descVersion = cpu_to_le16(2); | 93 | eahd->descTag.descVersion = cpu_to_le16(2); |
91 | eahd->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb)); | 94 | eahd->descTag.tagSerialNum = |
92 | eahd->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum); | 95 | cpu_to_le16(sbi->s_serial_number); |
96 | eahd->descTag.tagLocation = cpu_to_le32( | ||
97 | iinfo->i_location.logicalBlockNum); | ||
93 | eahd->impAttrLocation = cpu_to_le32(0xFFFFFFFF); | 98 | eahd->impAttrLocation = cpu_to_le32(0xFFFFFFFF); |
94 | eahd->appAttrLocation = cpu_to_le32(0xFFFFFFFF); | 99 | eahd->appAttrLocation = cpu_to_le32(0xFFFFFFFF); |
95 | } | 100 | } |
96 | 101 | ||
97 | offset = UDF_I_LENEATTR(inode); | 102 | offset = iinfo->i_lenEAttr; |
98 | if (type < 2048) { | 103 | if (type < 2048) { |
99 | if (le32_to_cpu(eahd->appAttrLocation) < UDF_I_LENEATTR(inode)) { | 104 | if (le32_to_cpu(eahd->appAttrLocation) < |
100 | uint32_t aal = le32_to_cpu(eahd->appAttrLocation); | 105 | iinfo->i_lenEAttr) { |
106 | uint32_t aal = | ||
107 | le32_to_cpu(eahd->appAttrLocation); | ||
101 | memmove(&ea[offset - aal + size], | 108 | memmove(&ea[offset - aal + size], |
102 | &ea[aal], offset - aal); | 109 | &ea[aal], offset - aal); |
103 | offset -= aal; | 110 | offset -= aal; |
104 | eahd->appAttrLocation = cpu_to_le32(aal + size); | 111 | eahd->appAttrLocation = |
112 | cpu_to_le32(aal + size); | ||
105 | } | 113 | } |
106 | if (le32_to_cpu(eahd->impAttrLocation) < UDF_I_LENEATTR(inode)) { | 114 | if (le32_to_cpu(eahd->impAttrLocation) < |
107 | uint32_t ial = le32_to_cpu(eahd->impAttrLocation); | 115 | iinfo->i_lenEAttr) { |
116 | uint32_t ial = | ||
117 | le32_to_cpu(eahd->impAttrLocation); | ||
108 | memmove(&ea[offset - ial + size], | 118 | memmove(&ea[offset - ial + size], |
109 | &ea[ial], offset - ial); | 119 | &ea[ial], offset - ial); |
110 | offset -= ial; | 120 | offset -= ial; |
111 | eahd->impAttrLocation = cpu_to_le32(ial + size); | 121 | eahd->impAttrLocation = |
122 | cpu_to_le32(ial + size); | ||
112 | } | 123 | } |
113 | } else if (type < 65536) { | 124 | } else if (type < 65536) { |
114 | if (le32_to_cpu(eahd->appAttrLocation) < UDF_I_LENEATTR(inode)) { | 125 | if (le32_to_cpu(eahd->appAttrLocation) < |
115 | uint32_t aal = le32_to_cpu(eahd->appAttrLocation); | 126 | iinfo->i_lenEAttr) { |
127 | uint32_t aal = | ||
128 | le32_to_cpu(eahd->appAttrLocation); | ||
116 | memmove(&ea[offset - aal + size], | 129 | memmove(&ea[offset - aal + size], |
117 | &ea[aal], offset - aal); | 130 | &ea[aal], offset - aal); |
118 | offset -= aal; | 131 | offset -= aal; |
119 | eahd->appAttrLocation = cpu_to_le32(aal + size); | 132 | eahd->appAttrLocation = |
133 | cpu_to_le32(aal + size); | ||
120 | } | 134 | } |
121 | } | 135 | } |
122 | /* rewrite CRC + checksum of eahd */ | 136 | /* rewrite CRC + checksum of eahd */ |
123 | crclen = sizeof(struct extendedAttrHeaderDesc) - sizeof(tag); | 137 | crclen = sizeof(struct extendedAttrHeaderDesc) - sizeof(tag); |
124 | eahd->descTag.descCRCLength = cpu_to_le16(crclen); | 138 | eahd->descTag.descCRCLength = cpu_to_le16(crclen); |
125 | eahd->descTag.descCRC = cpu_to_le16(udf_crc((char *)eahd + | 139 | eahd->descTag.descCRC = cpu_to_le16(udf_crc((char *)eahd + |
126 | sizeof(tag), crclen, 0)); | 140 | sizeof(tag), crclen, 0)); |
127 | eahd->descTag.tagChecksum = 0; | 141 | eahd->descTag.tagChecksum = udf_tag_checksum(&eahd->descTag); |
128 | for (i = 0; i < 16; i++) | 142 | iinfo->i_lenEAttr += size; |
129 | if (i != 4) | ||
130 | eahd->descTag.tagChecksum += ((uint8_t *)&(eahd->descTag))[i]; | ||
131 | UDF_I_LENEATTR(inode) += size; | ||
132 | return (struct genericFormat *)&ea[offset]; | 143 | return (struct genericFormat *)&ea[offset]; |
133 | } | 144 | } |
134 | if (loc & 0x02) { | 145 | if (loc & 0x02) |
135 | } | 146 | ; |
136 | 147 | ||
137 | return NULL; | 148 | return NULL; |
138 | } | 149 | } |
@@ -143,18 +154,20 @@ struct genericFormat *udf_get_extendedattr(struct inode *inode, uint32_t type, | |||
143 | struct genericFormat *gaf; | 154 | struct genericFormat *gaf; |
144 | uint8_t *ea = NULL; | 155 | uint8_t *ea = NULL; |
145 | uint32_t offset; | 156 | uint32_t offset; |
157 | struct udf_inode_info *iinfo = UDF_I(inode); | ||
146 | 158 | ||
147 | ea = UDF_I_DATA(inode); | 159 | ea = iinfo->i_ext.i_data; |
148 | 160 | ||
149 | if (UDF_I_LENEATTR(inode)) { | 161 | if (iinfo->i_lenEAttr) { |
150 | struct extendedAttrHeaderDesc *eahd; | 162 | struct extendedAttrHeaderDesc *eahd; |
151 | eahd = (struct extendedAttrHeaderDesc *)ea; | 163 | eahd = (struct extendedAttrHeaderDesc *)ea; |
152 | 164 | ||
153 | /* check checksum/crc */ | 165 | /* check checksum/crc */ |
154 | if (le16_to_cpu(eahd->descTag.tagIdent) != TAG_IDENT_EAHD || | 166 | if (eahd->descTag.tagIdent != |
155 | le32_to_cpu(eahd->descTag.tagLocation) != UDF_I_LOCATION(inode).logicalBlockNum) { | 167 | cpu_to_le16(TAG_IDENT_EAHD) || |
168 | le32_to_cpu(eahd->descTag.tagLocation) != | ||
169 | iinfo->i_location.logicalBlockNum) | ||
156 | return NULL; | 170 | return NULL; |
157 | } | ||
158 | 171 | ||
159 | if (type < 2048) | 172 | if (type < 2048) |
160 | offset = sizeof(struct extendedAttrHeaderDesc); | 173 | offset = sizeof(struct extendedAttrHeaderDesc); |
@@ -163,9 +176,10 @@ struct genericFormat *udf_get_extendedattr(struct inode *inode, uint32_t type, | |||
163 | else | 176 | else |
164 | offset = le32_to_cpu(eahd->appAttrLocation); | 177 | offset = le32_to_cpu(eahd->appAttrLocation); |
165 | 178 | ||
166 | while (offset < UDF_I_LENEATTR(inode)) { | 179 | while (offset < iinfo->i_lenEAttr) { |
167 | gaf = (struct genericFormat *)&ea[offset]; | 180 | gaf = (struct genericFormat *)&ea[offset]; |
168 | if (le32_to_cpu(gaf->attrType) == type && gaf->attrSubtype == subtype) | 181 | if (le32_to_cpu(gaf->attrType) == type && |
182 | gaf->attrSubtype == subtype) | ||
169 | return gaf; | 183 | return gaf; |
170 | else | 184 | else |
171 | offset += le32_to_cpu(gaf->attrLength); | 185 | offset += le32_to_cpu(gaf->attrLength); |
@@ -186,21 +200,20 @@ struct genericFormat *udf_get_extendedattr(struct inode *inode, uint32_t type, | |||
186 | * Written, tested, and released. | 200 | * Written, tested, and released. |
187 | */ | 201 | */ |
188 | struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block, | 202 | struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block, |
189 | uint32_t location, uint16_t * ident) | 203 | uint32_t location, uint16_t *ident) |
190 | { | 204 | { |
191 | tag *tag_p; | 205 | tag *tag_p; |
192 | struct buffer_head *bh = NULL; | 206 | struct buffer_head *bh = NULL; |
193 | register uint8_t checksum; | 207 | struct udf_sb_info *sbi = UDF_SB(sb); |
194 | register int i; | ||
195 | 208 | ||
196 | /* Read the block */ | 209 | /* Read the block */ |
197 | if (block == 0xFFFFFFFF) | 210 | if (block == 0xFFFFFFFF) |
198 | return NULL; | 211 | return NULL; |
199 | 212 | ||
200 | bh = udf_tread(sb, block + UDF_SB_SESSION(sb)); | 213 | bh = udf_tread(sb, block + sbi->s_session); |
201 | if (!bh) { | 214 | if (!bh) { |
202 | udf_debug("block=%d, location=%d: read failed\n", | 215 | udf_debug("block=%d, location=%d: read failed\n", |
203 | block + UDF_SB_SESSION(sb), location); | 216 | block + sbi->s_session, location); |
204 | return NULL; | 217 | return NULL; |
205 | } | 218 | } |
206 | 219 | ||
@@ -210,24 +223,20 @@ struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block, | |||
210 | 223 | ||
211 | if (location != le32_to_cpu(tag_p->tagLocation)) { | 224 | if (location != le32_to_cpu(tag_p->tagLocation)) { |
212 | udf_debug("location mismatch block %u, tag %u != %u\n", | 225 | udf_debug("location mismatch block %u, tag %u != %u\n", |
213 | block + UDF_SB_SESSION(sb), le32_to_cpu(tag_p->tagLocation), location); | 226 | block + sbi->s_session, |
227 | le32_to_cpu(tag_p->tagLocation), location); | ||
214 | goto error_out; | 228 | goto error_out; |
215 | } | 229 | } |
216 | 230 | ||
217 | /* Verify the tag checksum */ | 231 | /* Verify the tag checksum */ |
218 | checksum = 0U; | 232 | if (udf_tag_checksum(tag_p) != tag_p->tagChecksum) { |
219 | for (i = 0; i < 4; i++) | ||
220 | checksum += (uint8_t)(bh->b_data[i]); | ||
221 | for (i = 5; i < 16; i++) | ||
222 | checksum += (uint8_t)(bh->b_data[i]); | ||
223 | if (checksum != tag_p->tagChecksum) { | ||
224 | printk(KERN_ERR "udf: tag checksum failed block %d\n", block); | 233 | printk(KERN_ERR "udf: tag checksum failed block %d\n", block); |
225 | goto error_out; | 234 | goto error_out; |
226 | } | 235 | } |
227 | 236 | ||
228 | /* Verify the tag version */ | 237 | /* Verify the tag version */ |
229 | if (le16_to_cpu(tag_p->descVersion) != 0x0002U && | 238 | if (tag_p->descVersion != cpu_to_le16(0x0002U) && |
230 | le16_to_cpu(tag_p->descVersion) != 0x0003U) { | 239 | tag_p->descVersion != cpu_to_le16(0x0003U)) { |
231 | udf_debug("tag version 0x%04x != 0x0002 || 0x0003 block %d\n", | 240 | udf_debug("tag version 0x%04x != 0x0002 || 0x0003 block %d\n", |
232 | le16_to_cpu(tag_p->descVersion), block); | 241 | le16_to_cpu(tag_p->descVersion), block); |
233 | goto error_out; | 242 | goto error_out; |
@@ -236,11 +245,11 @@ struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block, | |||
236 | /* Verify the descriptor CRC */ | 245 | /* Verify the descriptor CRC */ |
237 | if (le16_to_cpu(tag_p->descCRCLength) + sizeof(tag) > sb->s_blocksize || | 246 | if (le16_to_cpu(tag_p->descCRCLength) + sizeof(tag) > sb->s_blocksize || |
238 | le16_to_cpu(tag_p->descCRC) == udf_crc(bh->b_data + sizeof(tag), | 247 | le16_to_cpu(tag_p->descCRC) == udf_crc(bh->b_data + sizeof(tag), |
239 | le16_to_cpu(tag_p->descCRCLength), 0)) { | 248 | le16_to_cpu(tag_p->descCRCLength), 0)) |
240 | return bh; | 249 | return bh; |
241 | } | 250 | |
242 | udf_debug("Crc failure block %d: crc = %d, crclen = %d\n", | 251 | udf_debug("Crc failure block %d: crc = %d, crclen = %d\n", |
243 | block + UDF_SB_SESSION(sb), le16_to_cpu(tag_p->descCRC), | 252 | block + sbi->s_session, le16_to_cpu(tag_p->descCRC), |
244 | le16_to_cpu(tag_p->descCRCLength)); | 253 | le16_to_cpu(tag_p->descCRCLength)); |
245 | 254 | ||
246 | error_out: | 255 | error_out: |
@@ -249,7 +258,7 @@ error_out: | |||
249 | } | 258 | } |
250 | 259 | ||
251 | struct buffer_head *udf_read_ptagged(struct super_block *sb, kernel_lb_addr loc, | 260 | struct buffer_head *udf_read_ptagged(struct super_block *sb, kernel_lb_addr loc, |
252 | uint32_t offset, uint16_t * ident) | 261 | uint32_t offset, uint16_t *ident) |
253 | { | 262 | { |
254 | return udf_read_tagged(sb, udf_get_lb_pblock(sb, loc, offset), | 263 | return udf_read_tagged(sb, udf_get_lb_pblock(sb, loc, offset), |
255 | loc.logicalBlockNum + offset, ident); | 264 | loc.logicalBlockNum + offset, ident); |
@@ -258,17 +267,11 @@ struct buffer_head *udf_read_ptagged(struct super_block *sb, kernel_lb_addr loc, | |||
258 | void udf_update_tag(char *data, int length) | 267 | void udf_update_tag(char *data, int length) |
259 | { | 268 | { |
260 | tag *tptr = (tag *)data; | 269 | tag *tptr = (tag *)data; |
261 | int i; | ||
262 | |||
263 | length -= sizeof(tag); | 270 | length -= sizeof(tag); |
264 | 271 | ||
265 | tptr->tagChecksum = 0; | ||
266 | tptr->descCRCLength = cpu_to_le16(length); | 272 | tptr->descCRCLength = cpu_to_le16(length); |
267 | tptr->descCRC = cpu_to_le16(udf_crc(data + sizeof(tag), length, 0)); | 273 | tptr->descCRC = cpu_to_le16(udf_crc(data + sizeof(tag), length, 0)); |
268 | 274 | tptr->tagChecksum = udf_tag_checksum(tptr); | |
269 | for (i = 0; i < 16; i++) | ||
270 | if (i != 4) | ||
271 | tptr->tagChecksum += (uint8_t)(data[i]); | ||
272 | } | 275 | } |
273 | 276 | ||
274 | void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum, | 277 | void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum, |
@@ -281,3 +284,14 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum, | |||
281 | tptr->tagLocation = cpu_to_le32(loc); | 284 | tptr->tagLocation = cpu_to_le32(loc); |
282 | udf_update_tag(data, length); | 285 | udf_update_tag(data, length); |
283 | } | 286 | } |
287 | |||
288 | u8 udf_tag_checksum(const tag *t) | ||
289 | { | ||
290 | u8 *data = (u8 *)t; | ||
291 | u8 checksum = 0; | ||
292 | int i; | ||
293 | for (i = 0; i < sizeof(tag); ++i) | ||
294 | if (i != 4) /* position of checksum */ | ||
295 | checksum += data[i]; | ||
296 | return checksum; | ||
297 | } | ||
diff --git a/fs/udf/namei.c b/fs/udf/namei.c index bec96a6b3343..112a5fb0b27b 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c | |||
@@ -43,12 +43,10 @@ static inline int udf_match(int len1, const char *name1, int len2, | |||
43 | 43 | ||
44 | int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi, | 44 | int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi, |
45 | struct fileIdentDesc *sfi, struct udf_fileident_bh *fibh, | 45 | struct fileIdentDesc *sfi, struct udf_fileident_bh *fibh, |
46 | uint8_t * impuse, uint8_t * fileident) | 46 | uint8_t *impuse, uint8_t *fileident) |
47 | { | 47 | { |
48 | uint16_t crclen = fibh->eoffset - fibh->soffset - sizeof(tag); | 48 | uint16_t crclen = fibh->eoffset - fibh->soffset - sizeof(tag); |
49 | uint16_t crc; | 49 | uint16_t crc; |
50 | uint8_t checksum = 0; | ||
51 | int i; | ||
52 | int offset; | 50 | int offset; |
53 | uint16_t liu = le16_to_cpu(cfi->lengthOfImpUse); | 51 | uint16_t liu = le16_to_cpu(cfi->lengthOfImpUse); |
54 | uint8_t lfi = cfi->lengthFileIdent; | 52 | uint8_t lfi = cfi->lengthFileIdent; |
@@ -56,7 +54,7 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi, | |||
56 | sizeof(struct fileIdentDesc); | 54 | sizeof(struct fileIdentDesc); |
57 | int adinicb = 0; | 55 | int adinicb = 0; |
58 | 56 | ||
59 | if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) | 57 | if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) |
60 | adinicb = 1; | 58 | adinicb = 1; |
61 | 59 | ||
62 | offset = fibh->soffset + sizeof(struct fileIdentDesc); | 60 | offset = fibh->soffset + sizeof(struct fileIdentDesc); |
@@ -68,7 +66,8 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi, | |||
68 | memcpy(fibh->ebh->b_data + offset, impuse, liu); | 66 | memcpy(fibh->ebh->b_data + offset, impuse, liu); |
69 | } else { | 67 | } else { |
70 | memcpy((uint8_t *)sfi->impUse, impuse, -offset); | 68 | memcpy((uint8_t *)sfi->impUse, impuse, -offset); |
71 | memcpy(fibh->ebh->b_data, impuse - offset, liu + offset); | 69 | memcpy(fibh->ebh->b_data, impuse - offset, |
70 | liu + offset); | ||
72 | } | 71 | } |
73 | } | 72 | } |
74 | 73 | ||
@@ -80,8 +79,10 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi, | |||
80 | } else if (offset >= 0) { | 79 | } else if (offset >= 0) { |
81 | memcpy(fibh->ebh->b_data + offset, fileident, lfi); | 80 | memcpy(fibh->ebh->b_data + offset, fileident, lfi); |
82 | } else { | 81 | } else { |
83 | memcpy((uint8_t *)sfi->fileIdent + liu, fileident, -offset); | 82 | memcpy((uint8_t *)sfi->fileIdent + liu, fileident, |
84 | memcpy(fibh->ebh->b_data, fileident - offset, lfi + offset); | 83 | -offset); |
84 | memcpy(fibh->ebh->b_data, fileident - offset, | ||
85 | lfi + offset); | ||
85 | } | 86 | } |
86 | } | 87 | } |
87 | 88 | ||
@@ -101,27 +102,29 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi, | |||
101 | 102 | ||
102 | if (fibh->sbh == fibh->ebh) { | 103 | if (fibh->sbh == fibh->ebh) { |
103 | crc = udf_crc((uint8_t *)sfi->impUse, | 104 | crc = udf_crc((uint8_t *)sfi->impUse, |
104 | crclen + sizeof(tag) - sizeof(struct fileIdentDesc), crc); | 105 | crclen + sizeof(tag) - |
106 | sizeof(struct fileIdentDesc), crc); | ||
105 | } else if (sizeof(struct fileIdentDesc) >= -fibh->soffset) { | 107 | } else if (sizeof(struct fileIdentDesc) >= -fibh->soffset) { |
106 | crc = udf_crc(fibh->ebh->b_data + sizeof(struct fileIdentDesc) + fibh->soffset, | 108 | crc = udf_crc(fibh->ebh->b_data + |
107 | crclen + sizeof(tag) - sizeof(struct fileIdentDesc), crc); | 109 | sizeof(struct fileIdentDesc) + |
110 | fibh->soffset, | ||
111 | crclen + sizeof(tag) - | ||
112 | sizeof(struct fileIdentDesc), | ||
113 | crc); | ||
108 | } else { | 114 | } else { |
109 | crc = udf_crc((uint8_t *)sfi->impUse, | 115 | crc = udf_crc((uint8_t *)sfi->impUse, |
110 | -fibh->soffset - sizeof(struct fileIdentDesc), crc); | 116 | -fibh->soffset - sizeof(struct fileIdentDesc), |
117 | crc); | ||
111 | crc = udf_crc(fibh->ebh->b_data, fibh->eoffset, crc); | 118 | crc = udf_crc(fibh->ebh->b_data, fibh->eoffset, crc); |
112 | } | 119 | } |
113 | 120 | ||
114 | cfi->descTag.descCRC = cpu_to_le16(crc); | 121 | cfi->descTag.descCRC = cpu_to_le16(crc); |
115 | cfi->descTag.descCRCLength = cpu_to_le16(crclen); | 122 | cfi->descTag.descCRCLength = cpu_to_le16(crclen); |
123 | cfi->descTag.tagChecksum = udf_tag_checksum(&cfi->descTag); | ||
116 | 124 | ||
117 | for (i = 0; i < 16; i++) { | ||
118 | if (i != 4) | ||
119 | checksum += ((uint8_t *)&cfi->descTag)[i]; | ||
120 | } | ||
121 | |||
122 | cfi->descTag.tagChecksum = checksum; | ||
123 | if (adinicb || (sizeof(struct fileIdentDesc) <= -fibh->soffset)) { | 125 | if (adinicb || (sizeof(struct fileIdentDesc) <= -fibh->soffset)) { |
124 | memcpy((uint8_t *)sfi, (uint8_t *)cfi, sizeof(struct fileIdentDesc)); | 126 | memcpy((uint8_t *)sfi, (uint8_t *)cfi, |
127 | sizeof(struct fileIdentDesc)); | ||
125 | } else { | 128 | } else { |
126 | memcpy((uint8_t *)sfi, (uint8_t *)cfi, -fibh->soffset); | 129 | memcpy((uint8_t *)sfi, (uint8_t *)cfi, -fibh->soffset); |
127 | memcpy(fibh->ebh->b_data, (uint8_t *)cfi - fibh->soffset, | 130 | memcpy(fibh->ebh->b_data, (uint8_t *)cfi - fibh->soffset, |
@@ -155,26 +158,28 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir, | |||
155 | uint32_t elen; | 158 | uint32_t elen; |
156 | sector_t offset; | 159 | sector_t offset; |
157 | struct extent_position epos = {}; | 160 | struct extent_position epos = {}; |
161 | struct udf_inode_info *dinfo = UDF_I(dir); | ||
158 | 162 | ||
159 | size = (udf_ext0_offset(dir) + dir->i_size) >> 2; | 163 | size = udf_ext0_offset(dir) + dir->i_size; |
160 | f_pos = (udf_ext0_offset(dir) >> 2); | 164 | f_pos = udf_ext0_offset(dir); |
161 | 165 | ||
162 | fibh->soffset = fibh->eoffset = (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2; | 166 | fibh->soffset = fibh->eoffset = f_pos & (dir->i_sb->s_blocksize - 1); |
163 | if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) { | 167 | if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) |
164 | fibh->sbh = fibh->ebh = NULL; | 168 | fibh->sbh = fibh->ebh = NULL; |
165 | } else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2), | 169 | else if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, |
166 | &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) { | 170 | &epos, &eloc, &elen, &offset) == |
171 | (EXT_RECORDED_ALLOCATED >> 30)) { | ||
167 | block = udf_get_lb_pblock(dir->i_sb, eloc, offset); | 172 | block = udf_get_lb_pblock(dir->i_sb, eloc, offset); |
168 | if ((++offset << dir->i_sb->s_blocksize_bits) < elen) { | 173 | if ((++offset << dir->i_sb->s_blocksize_bits) < elen) { |
169 | if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT) | 174 | if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) |
170 | epos.offset -= sizeof(short_ad); | 175 | epos.offset -= sizeof(short_ad); |
171 | else if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_LONG) | 176 | else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) |
172 | epos.offset -= sizeof(long_ad); | 177 | epos.offset -= sizeof(long_ad); |
173 | } else { | 178 | } else |
174 | offset = 0; | 179 | offset = 0; |
175 | } | ||
176 | 180 | ||
177 | if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block))) { | 181 | fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block); |
182 | if (!fibh->sbh) { | ||
178 | brelse(epos.bh); | 183 | brelse(epos.bh); |
179 | return NULL; | 184 | return NULL; |
180 | } | 185 | } |
@@ -183,7 +188,7 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir, | |||
183 | return NULL; | 188 | return NULL; |
184 | } | 189 | } |
185 | 190 | ||
186 | while ((f_pos < size)) { | 191 | while (f_pos < size) { |
187 | fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc, | 192 | fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc, |
188 | &elen, &offset); | 193 | &elen, &offset); |
189 | if (!fi) { | 194 | if (!fi) { |
@@ -202,14 +207,18 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir, | |||
202 | } else { | 207 | } else { |
203 | int poffset; /* Unpaded ending offset */ | 208 | int poffset; /* Unpaded ending offset */ |
204 | 209 | ||
205 | poffset = fibh->soffset + sizeof(struct fileIdentDesc) + liu + lfi; | 210 | poffset = fibh->soffset + sizeof(struct fileIdentDesc) + |
211 | liu + lfi; | ||
206 | 212 | ||
207 | if (poffset >= lfi) { | 213 | if (poffset >= lfi) |
208 | nameptr = (uint8_t *)(fibh->ebh->b_data + poffset - lfi); | 214 | nameptr = (uint8_t *)(fibh->ebh->b_data + |
209 | } else { | 215 | poffset - lfi); |
216 | else { | ||
210 | nameptr = fname; | 217 | nameptr = fname; |
211 | memcpy(nameptr, fi->fileIdent + liu, lfi - poffset); | 218 | memcpy(nameptr, fi->fileIdent + liu, |
212 | memcpy(nameptr + lfi - poffset, fibh->ebh->b_data, poffset); | 219 | lfi - poffset); |
220 | memcpy(nameptr + lfi - poffset, | ||
221 | fibh->ebh->b_data, poffset); | ||
213 | } | 222 | } |
214 | } | 223 | } |
215 | 224 | ||
@@ -226,11 +235,11 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir, | |||
226 | if (!lfi) | 235 | if (!lfi) |
227 | continue; | 236 | continue; |
228 | 237 | ||
229 | if ((flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi))) { | 238 | flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi); |
230 | if (udf_match(flen, fname, dentry->d_name.len, dentry->d_name.name)) { | 239 | if (flen && udf_match(flen, fname, dentry->d_name.len, |
231 | brelse(epos.bh); | 240 | dentry->d_name.name)) { |
232 | return fi; | 241 | brelse(epos.bh); |
233 | } | 242 | return fi; |
234 | } | 243 | } |
235 | } | 244 | } |
236 | 245 | ||
@@ -291,16 +300,16 @@ static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry, | |||
291 | if (!strncmp(dentry->d_name.name, ".B=", 3)) { | 300 | if (!strncmp(dentry->d_name.name, ".B=", 3)) { |
292 | kernel_lb_addr lb = { | 301 | kernel_lb_addr lb = { |
293 | .logicalBlockNum = 0, | 302 | .logicalBlockNum = 0, |
294 | .partitionReferenceNum = simple_strtoul(dentry->d_name.name + 3, | 303 | .partitionReferenceNum = |
295 | NULL, 0), | 304 | simple_strtoul(dentry->d_name.name + 3, |
305 | NULL, 0), | ||
296 | }; | 306 | }; |
297 | inode = udf_iget(dir->i_sb, lb); | 307 | inode = udf_iget(dir->i_sb, lb); |
298 | if (!inode) { | 308 | if (!inode) { |
299 | unlock_kernel(); | 309 | unlock_kernel(); |
300 | return ERR_PTR(-EACCES); | 310 | return ERR_PTR(-EACCES); |
301 | } | 311 | } |
302 | } | 312 | } else |
303 | else | ||
304 | #endif /* UDF_RECOVERY */ | 313 | #endif /* UDF_RECOVERY */ |
305 | 314 | ||
306 | if (udf_find_entry(dir, dentry, &fibh, &cfi)) { | 315 | if (udf_find_entry(dir, dentry, &fibh, &cfi)) { |
@@ -325,14 +334,14 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir, | |||
325 | struct udf_fileident_bh *fibh, | 334 | struct udf_fileident_bh *fibh, |
326 | struct fileIdentDesc *cfi, int *err) | 335 | struct fileIdentDesc *cfi, int *err) |
327 | { | 336 | { |
328 | struct super_block *sb; | 337 | struct super_block *sb = dir->i_sb; |
329 | struct fileIdentDesc *fi = NULL; | 338 | struct fileIdentDesc *fi = NULL; |
330 | char name[UDF_NAME_LEN], fname[UDF_NAME_LEN]; | 339 | char name[UDF_NAME_LEN], fname[UDF_NAME_LEN]; |
331 | int namelen; | 340 | int namelen; |
332 | loff_t f_pos; | 341 | loff_t f_pos; |
333 | int flen; | 342 | int flen; |
334 | char *nameptr; | 343 | char *nameptr; |
335 | loff_t size = (udf_ext0_offset(dir) + dir->i_size) >> 2; | 344 | loff_t size = udf_ext0_offset(dir) + dir->i_size; |
336 | int nfidlen; | 345 | int nfidlen; |
337 | uint8_t lfi; | 346 | uint8_t lfi; |
338 | uint16_t liu; | 347 | uint16_t liu; |
@@ -341,16 +350,16 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir, | |||
341 | uint32_t elen; | 350 | uint32_t elen; |
342 | sector_t offset; | 351 | sector_t offset; |
343 | struct extent_position epos = {}; | 352 | struct extent_position epos = {}; |
344 | 353 | struct udf_inode_info *dinfo; | |
345 | sb = dir->i_sb; | ||
346 | 354 | ||
347 | if (dentry) { | 355 | if (dentry) { |
348 | if (!dentry->d_name.len) { | 356 | if (!dentry->d_name.len) { |
349 | *err = -EINVAL; | 357 | *err = -EINVAL; |
350 | return NULL; | 358 | return NULL; |
351 | } | 359 | } |
352 | if (!(namelen = udf_put_filename(sb, dentry->d_name.name, name, | 360 | namelen = udf_put_filename(sb, dentry->d_name.name, name, |
353 | dentry->d_name.len))) { | 361 | dentry->d_name.len); |
362 | if (!namelen) { | ||
354 | *err = -ENAMETOOLONG; | 363 | *err = -ENAMETOOLONG; |
355 | return NULL; | 364 | return NULL; |
356 | } | 365 | } |
@@ -360,39 +369,40 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir, | |||
360 | 369 | ||
361 | nfidlen = (sizeof(struct fileIdentDesc) + namelen + 3) & ~3; | 370 | nfidlen = (sizeof(struct fileIdentDesc) + namelen + 3) & ~3; |
362 | 371 | ||
363 | f_pos = (udf_ext0_offset(dir) >> 2); | 372 | f_pos = udf_ext0_offset(dir); |
364 | 373 | ||
365 | fibh->soffset = fibh->eoffset = (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2; | 374 | fibh->soffset = fibh->eoffset = f_pos & (dir->i_sb->s_blocksize - 1); |
366 | if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) { | 375 | dinfo = UDF_I(dir); |
376 | if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) | ||
367 | fibh->sbh = fibh->ebh = NULL; | 377 | fibh->sbh = fibh->ebh = NULL; |
368 | } else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2), | 378 | else if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, |
369 | &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) { | 379 | &epos, &eloc, &elen, &offset) == |
380 | (EXT_RECORDED_ALLOCATED >> 30)) { | ||
370 | block = udf_get_lb_pblock(dir->i_sb, eloc, offset); | 381 | block = udf_get_lb_pblock(dir->i_sb, eloc, offset); |
371 | if ((++offset << dir->i_sb->s_blocksize_bits) < elen) { | 382 | if ((++offset << dir->i_sb->s_blocksize_bits) < elen) { |
372 | if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT) | 383 | if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) |
373 | epos.offset -= sizeof(short_ad); | 384 | epos.offset -= sizeof(short_ad); |
374 | else if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_LONG) | 385 | else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) |
375 | epos.offset -= sizeof(long_ad); | 386 | epos.offset -= sizeof(long_ad); |
376 | } else { | 387 | } else |
377 | offset = 0; | 388 | offset = 0; |
378 | } | ||
379 | 389 | ||
380 | if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block))) { | 390 | fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block); |
391 | if (!fibh->sbh) { | ||
381 | brelse(epos.bh); | 392 | brelse(epos.bh); |
382 | *err = -EIO; | 393 | *err = -EIO; |
383 | return NULL; | 394 | return NULL; |
384 | } | 395 | } |
385 | 396 | ||
386 | block = UDF_I_LOCATION(dir).logicalBlockNum; | 397 | block = dinfo->i_location.logicalBlockNum; |
387 | |||
388 | } else { | 398 | } else { |
389 | block = udf_get_lb_pblock(dir->i_sb, UDF_I_LOCATION(dir), 0); | 399 | block = udf_get_lb_pblock(dir->i_sb, dinfo->i_location, 0); |
390 | fibh->sbh = fibh->ebh = NULL; | 400 | fibh->sbh = fibh->ebh = NULL; |
391 | fibh->soffset = fibh->eoffset = sb->s_blocksize; | 401 | fibh->soffset = fibh->eoffset = sb->s_blocksize; |
392 | goto add; | 402 | goto add; |
393 | } | 403 | } |
394 | 404 | ||
395 | while ((f_pos < size)) { | 405 | while (f_pos < size) { |
396 | fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc, | 406 | fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc, |
397 | &elen, &offset); | 407 | &elen, &offset); |
398 | 408 | ||
@@ -408,33 +418,39 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir, | |||
408 | liu = le16_to_cpu(cfi->lengthOfImpUse); | 418 | liu = le16_to_cpu(cfi->lengthOfImpUse); |
409 | lfi = cfi->lengthFileIdent; | 419 | lfi = cfi->lengthFileIdent; |
410 | 420 | ||
411 | if (fibh->sbh == fibh->ebh) { | 421 | if (fibh->sbh == fibh->ebh) |
412 | nameptr = fi->fileIdent + liu; | 422 | nameptr = fi->fileIdent + liu; |
413 | } else { | 423 | else { |
414 | int poffset; /* Unpaded ending offset */ | 424 | int poffset; /* Unpaded ending offset */ |
415 | 425 | ||
416 | poffset = fibh->soffset + sizeof(struct fileIdentDesc) + liu + lfi; | 426 | poffset = fibh->soffset + sizeof(struct fileIdentDesc) + |
427 | liu + lfi; | ||
417 | 428 | ||
418 | if (poffset >= lfi) { | 429 | if (poffset >= lfi) |
419 | nameptr = (char *)(fibh->ebh->b_data + poffset - lfi); | 430 | nameptr = (char *)(fibh->ebh->b_data + |
420 | } else { | 431 | poffset - lfi); |
432 | else { | ||
421 | nameptr = fname; | 433 | nameptr = fname; |
422 | memcpy(nameptr, fi->fileIdent + liu, lfi - poffset); | 434 | memcpy(nameptr, fi->fileIdent + liu, |
423 | memcpy(nameptr + lfi - poffset, fibh->ebh->b_data, poffset); | 435 | lfi - poffset); |
436 | memcpy(nameptr + lfi - poffset, | ||
437 | fibh->ebh->b_data, poffset); | ||
424 | } | 438 | } |
425 | } | 439 | } |
426 | 440 | ||
427 | if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) { | 441 | if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) { |
428 | if (((sizeof(struct fileIdentDesc) + liu + lfi + 3) & ~3) == nfidlen) { | 442 | if (((sizeof(struct fileIdentDesc) + |
443 | liu + lfi + 3) & ~3) == nfidlen) { | ||
429 | brelse(epos.bh); | 444 | brelse(epos.bh); |
430 | cfi->descTag.tagSerialNum = cpu_to_le16(1); | 445 | cfi->descTag.tagSerialNum = cpu_to_le16(1); |
431 | cfi->fileVersionNum = cpu_to_le16(1); | 446 | cfi->fileVersionNum = cpu_to_le16(1); |
432 | cfi->fileCharacteristics = 0; | 447 | cfi->fileCharacteristics = 0; |
433 | cfi->lengthFileIdent = namelen; | 448 | cfi->lengthFileIdent = namelen; |
434 | cfi->lengthOfImpUse = cpu_to_le16(0); | 449 | cfi->lengthOfImpUse = cpu_to_le16(0); |
435 | if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name)) { | 450 | if (!udf_write_fi(dir, cfi, fi, fibh, NULL, |
451 | name)) | ||
436 | return fi; | 452 | return fi; |
437 | } else { | 453 | else { |
438 | *err = -EIO; | 454 | *err = -EIO; |
439 | return NULL; | 455 | return NULL; |
440 | } | 456 | } |
@@ -444,8 +460,9 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir, | |||
444 | if (!lfi || !dentry) | 460 | if (!lfi || !dentry) |
445 | continue; | 461 | continue; |
446 | 462 | ||
447 | if ((flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi)) && | 463 | flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi); |
448 | udf_match(flen, fname, dentry->d_name.len, dentry->d_name.name)) { | 464 | if (flen && udf_match(flen, fname, dentry->d_name.len, |
465 | dentry->d_name.name)) { | ||
449 | if (fibh->sbh != fibh->ebh) | 466 | if (fibh->sbh != fibh->ebh) |
450 | brelse(fibh->ebh); | 467 | brelse(fibh->ebh); |
451 | brelse(fibh->sbh); | 468 | brelse(fibh->sbh); |
@@ -456,29 +473,34 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir, | |||
456 | } | 473 | } |
457 | 474 | ||
458 | add: | 475 | add: |
476 | if (dinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { | ||
477 | elen = (elen + sb->s_blocksize - 1) & ~(sb->s_blocksize - 1); | ||
478 | if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) | ||
479 | epos.offset -= sizeof(short_ad); | ||
480 | else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) | ||
481 | epos.offset -= sizeof(long_ad); | ||
482 | udf_write_aext(dir, &epos, eloc, elen, 1); | ||
483 | } | ||
459 | f_pos += nfidlen; | 484 | f_pos += nfidlen; |
460 | 485 | ||
461 | if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB && | 486 | if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB && |
462 | sb->s_blocksize - fibh->eoffset < nfidlen) { | 487 | sb->s_blocksize - fibh->eoffset < nfidlen) { |
463 | brelse(epos.bh); | 488 | brelse(epos.bh); |
464 | epos.bh = NULL; | 489 | epos.bh = NULL; |
465 | fibh->soffset -= udf_ext0_offset(dir); | 490 | fibh->soffset -= udf_ext0_offset(dir); |
466 | fibh->eoffset -= udf_ext0_offset(dir); | 491 | fibh->eoffset -= udf_ext0_offset(dir); |
467 | f_pos -= (udf_ext0_offset(dir) >> 2); | 492 | f_pos -= udf_ext0_offset(dir); |
468 | if (fibh->sbh != fibh->ebh) | 493 | if (fibh->sbh != fibh->ebh) |
469 | brelse(fibh->ebh); | 494 | brelse(fibh->ebh); |
470 | brelse(fibh->sbh); | 495 | brelse(fibh->sbh); |
471 | if (!(fibh->sbh = fibh->ebh = udf_expand_dir_adinicb(dir, &block, err))) | 496 | fibh->sbh = fibh->ebh = |
497 | udf_expand_dir_adinicb(dir, &block, err); | ||
498 | if (!fibh->sbh) | ||
472 | return NULL; | 499 | return NULL; |
473 | epos.block = UDF_I_LOCATION(dir); | 500 | epos.block = dinfo->i_location; |
474 | eloc.logicalBlockNum = block; | ||
475 | eloc.partitionReferenceNum = UDF_I_LOCATION(dir).partitionReferenceNum; | ||
476 | elen = dir->i_sb->s_blocksize; | ||
477 | epos.offset = udf_file_entry_alloc_offset(dir); | 501 | epos.offset = udf_file_entry_alloc_offset(dir); |
478 | if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT) | 502 | /* Load extent udf_expand_dir_adinicb() has created */ |
479 | epos.offset += sizeof(short_ad); | 503 | udf_current_aext(dir, &epos, &eloc, &elen, 1); |
480 | else if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_LONG) | ||
481 | epos.offset += sizeof(long_ad); | ||
482 | } | 504 | } |
483 | 505 | ||
484 | if (sb->s_blocksize - fibh->eoffset >= nfidlen) { | 506 | if (sb->s_blocksize - fibh->eoffset >= nfidlen) { |
@@ -489,15 +511,19 @@ add: | |||
489 | fibh->sbh = fibh->ebh; | 511 | fibh->sbh = fibh->ebh; |
490 | } | 512 | } |
491 | 513 | ||
492 | if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) { | 514 | if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { |
493 | block = UDF_I_LOCATION(dir).logicalBlockNum; | 515 | block = dinfo->i_location.logicalBlockNum; |
494 | fi = (struct fileIdentDesc *)(UDF_I_DATA(dir) + fibh->soffset - | 516 | fi = (struct fileIdentDesc *) |
495 | udf_ext0_offset(dir) + | 517 | (dinfo->i_ext.i_data + |
496 | UDF_I_LENEATTR(dir)); | 518 | fibh->soffset - |
519 | udf_ext0_offset(dir) + | ||
520 | dinfo->i_lenEAttr); | ||
497 | } else { | 521 | } else { |
498 | block = eloc.logicalBlockNum + ((elen - 1) >> | 522 | block = eloc.logicalBlockNum + |
499 | dir->i_sb->s_blocksize_bits); | 523 | ((elen - 1) >> |
500 | fi = (struct fileIdentDesc *)(fibh->sbh->b_data + fibh->soffset); | 524 | dir->i_sb->s_blocksize_bits); |
525 | fi = (struct fileIdentDesc *) | ||
526 | (fibh->sbh->b_data + fibh->soffset); | ||
501 | } | 527 | } |
502 | } else { | 528 | } else { |
503 | fibh->soffset = fibh->eoffset - sb->s_blocksize; | 529 | fibh->soffset = fibh->eoffset - sb->s_blocksize; |
@@ -509,7 +535,8 @@ add: | |||
509 | 535 | ||
510 | block = eloc.logicalBlockNum + ((elen - 1) >> | 536 | block = eloc.logicalBlockNum + ((elen - 1) >> |
511 | dir->i_sb->s_blocksize_bits); | 537 | dir->i_sb->s_blocksize_bits); |
512 | fibh->ebh = udf_bread(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2), 1, err); | 538 | fibh->ebh = udf_bread(dir, |
539 | f_pos >> dir->i_sb->s_blocksize_bits, 1, err); | ||
513 | if (!fibh->ebh) { | 540 | if (!fibh->ebh) { |
514 | brelse(epos.bh); | 541 | brelse(epos.bh); |
515 | brelse(fibh->sbh); | 542 | brelse(fibh->sbh); |
@@ -521,32 +548,34 @@ add: | |||
521 | (EXT_RECORDED_ALLOCATED >> 30)) { | 548 | (EXT_RECORDED_ALLOCATED >> 30)) { |
522 | block = eloc.logicalBlockNum + ((elen - 1) >> | 549 | block = eloc.logicalBlockNum + ((elen - 1) >> |
523 | dir->i_sb->s_blocksize_bits); | 550 | dir->i_sb->s_blocksize_bits); |
524 | } else { | 551 | } else |
525 | block++; | 552 | block++; |
526 | } | ||
527 | 553 | ||
528 | brelse(fibh->sbh); | 554 | brelse(fibh->sbh); |
529 | fibh->sbh = fibh->ebh; | 555 | fibh->sbh = fibh->ebh; |
530 | fi = (struct fileIdentDesc *)(fibh->sbh->b_data); | 556 | fi = (struct fileIdentDesc *)(fibh->sbh->b_data); |
531 | } else { | 557 | } else { |
532 | fi = (struct fileIdentDesc *) | 558 | fi = (struct fileIdentDesc *) |
533 | (fibh->sbh->b_data + sb->s_blocksize + fibh->soffset); | 559 | (fibh->sbh->b_data + sb->s_blocksize + |
560 | fibh->soffset); | ||
534 | } | 561 | } |
535 | } | 562 | } |
536 | 563 | ||
537 | memset(cfi, 0, sizeof(struct fileIdentDesc)); | 564 | memset(cfi, 0, sizeof(struct fileIdentDesc)); |
538 | if (UDF_SB_UDFREV(sb) >= 0x0200) | 565 | if (UDF_SB(sb)->s_udfrev >= 0x0200) |
539 | udf_new_tag((char *)cfi, TAG_IDENT_FID, 3, 1, block, sizeof(tag)); | 566 | udf_new_tag((char *)cfi, TAG_IDENT_FID, 3, 1, block, |
567 | sizeof(tag)); | ||
540 | else | 568 | else |
541 | udf_new_tag((char *)cfi, TAG_IDENT_FID, 2, 1, block, sizeof(tag)); | 569 | udf_new_tag((char *)cfi, TAG_IDENT_FID, 2, 1, block, |
570 | sizeof(tag)); | ||
542 | cfi->fileVersionNum = cpu_to_le16(1); | 571 | cfi->fileVersionNum = cpu_to_le16(1); |
543 | cfi->lengthFileIdent = namelen; | 572 | cfi->lengthFileIdent = namelen; |
544 | cfi->lengthOfImpUse = cpu_to_le16(0); | 573 | cfi->lengthOfImpUse = cpu_to_le16(0); |
545 | if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name)) { | 574 | if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name)) { |
546 | brelse(epos.bh); | 575 | brelse(epos.bh); |
547 | dir->i_size += nfidlen; | 576 | dir->i_size += nfidlen; |
548 | if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) | 577 | if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) |
549 | UDF_I_LENALLOC(dir) += nfidlen; | 578 | dinfo->i_lenAlloc += nfidlen; |
550 | mark_inode_dirty(dir); | 579 | mark_inode_dirty(dir); |
551 | return fi; | 580 | return fi; |
552 | } else { | 581 | } else { |
@@ -578,6 +607,7 @@ static int udf_create(struct inode *dir, struct dentry *dentry, int mode, | |||
578 | struct inode *inode; | 607 | struct inode *inode; |
579 | struct fileIdentDesc cfi, *fi; | 608 | struct fileIdentDesc cfi, *fi; |
580 | int err; | 609 | int err; |
610 | struct udf_inode_info *iinfo; | ||
581 | 611 | ||
582 | lock_kernel(); | 612 | lock_kernel(); |
583 | inode = udf_new_inode(dir, mode, &err); | 613 | inode = udf_new_inode(dir, mode, &err); |
@@ -586,7 +616,8 @@ static int udf_create(struct inode *dir, struct dentry *dentry, int mode, | |||
586 | return err; | 616 | return err; |
587 | } | 617 | } |
588 | 618 | ||
589 | if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) | 619 | iinfo = UDF_I(inode); |
620 | if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) | ||
590 | inode->i_data.a_ops = &udf_adinicb_aops; | 621 | inode->i_data.a_ops = &udf_adinicb_aops; |
591 | else | 622 | else |
592 | inode->i_data.a_ops = &udf_aops; | 623 | inode->i_data.a_ops = &udf_aops; |
@@ -595,7 +626,8 @@ static int udf_create(struct inode *dir, struct dentry *dentry, int mode, | |||
595 | inode->i_mode = mode; | 626 | inode->i_mode = mode; |
596 | mark_inode_dirty(inode); | 627 | mark_inode_dirty(inode); |
597 | 628 | ||
598 | if (!(fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err))) { | 629 | fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); |
630 | if (!fi) { | ||
599 | inode->i_nlink--; | 631 | inode->i_nlink--; |
600 | mark_inode_dirty(inode); | 632 | mark_inode_dirty(inode); |
601 | iput(inode); | 633 | iput(inode); |
@@ -603,13 +635,12 @@ static int udf_create(struct inode *dir, struct dentry *dentry, int mode, | |||
603 | return err; | 635 | return err; |
604 | } | 636 | } |
605 | cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); | 637 | cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); |
606 | cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode)); | 638 | cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location); |
607 | *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = | 639 | *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = |
608 | cpu_to_le32(UDF_I_UNIQUE(inode) & 0x00000000FFFFFFFFUL); | 640 | cpu_to_le32(iinfo->i_unique & 0x00000000FFFFFFFFUL); |
609 | udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); | 641 | udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); |
610 | if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) { | 642 | if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) |
611 | mark_inode_dirty(dir); | 643 | mark_inode_dirty(dir); |
612 | } | ||
613 | if (fibh.sbh != fibh.ebh) | 644 | if (fibh.sbh != fibh.ebh) |
614 | brelse(fibh.ebh); | 645 | brelse(fibh.ebh); |
615 | brelse(fibh.sbh); | 646 | brelse(fibh.sbh); |
@@ -626,6 +657,7 @@ static int udf_mknod(struct inode *dir, struct dentry *dentry, int mode, | |||
626 | struct udf_fileident_bh fibh; | 657 | struct udf_fileident_bh fibh; |
627 | struct fileIdentDesc cfi, *fi; | 658 | struct fileIdentDesc cfi, *fi; |
628 | int err; | 659 | int err; |
660 | struct udf_inode_info *iinfo; | ||
629 | 661 | ||
630 | if (!old_valid_dev(rdev)) | 662 | if (!old_valid_dev(rdev)) |
631 | return -EINVAL; | 663 | return -EINVAL; |
@@ -636,9 +668,11 @@ static int udf_mknod(struct inode *dir, struct dentry *dentry, int mode, | |||
636 | if (!inode) | 668 | if (!inode) |
637 | goto out; | 669 | goto out; |
638 | 670 | ||
671 | iinfo = UDF_I(inode); | ||
639 | inode->i_uid = current->fsuid; | 672 | inode->i_uid = current->fsuid; |
640 | init_special_inode(inode, mode, rdev); | 673 | init_special_inode(inode, mode, rdev); |
641 | if (!(fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err))) { | 674 | fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); |
675 | if (!fi) { | ||
642 | inode->i_nlink--; | 676 | inode->i_nlink--; |
643 | mark_inode_dirty(inode); | 677 | mark_inode_dirty(inode); |
644 | iput(inode); | 678 | iput(inode); |
@@ -646,13 +680,12 @@ static int udf_mknod(struct inode *dir, struct dentry *dentry, int mode, | |||
646 | return err; | 680 | return err; |
647 | } | 681 | } |
648 | cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); | 682 | cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); |
649 | cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode)); | 683 | cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location); |
650 | *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = | 684 | *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = |
651 | cpu_to_le32(UDF_I_UNIQUE(inode) & 0x00000000FFFFFFFFUL); | 685 | cpu_to_le32(iinfo->i_unique & 0x00000000FFFFFFFFUL); |
652 | udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); | 686 | udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); |
653 | if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) { | 687 | if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) |
654 | mark_inode_dirty(dir); | 688 | mark_inode_dirty(dir); |
655 | } | ||
656 | mark_inode_dirty(inode); | 689 | mark_inode_dirty(inode); |
657 | 690 | ||
658 | if (fibh.sbh != fibh.ebh) | 691 | if (fibh.sbh != fibh.ebh) |
@@ -672,6 +705,8 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
672 | struct udf_fileident_bh fibh; | 705 | struct udf_fileident_bh fibh; |
673 | struct fileIdentDesc cfi, *fi; | 706 | struct fileIdentDesc cfi, *fi; |
674 | int err; | 707 | int err; |
708 | struct udf_inode_info *dinfo = UDF_I(dir); | ||
709 | struct udf_inode_info *iinfo; | ||
675 | 710 | ||
676 | lock_kernel(); | 711 | lock_kernel(); |
677 | err = -EMLINK; | 712 | err = -EMLINK; |
@@ -683,9 +718,11 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
683 | if (!inode) | 718 | if (!inode) |
684 | goto out; | 719 | goto out; |
685 | 720 | ||
721 | iinfo = UDF_I(inode); | ||
686 | inode->i_op = &udf_dir_inode_operations; | 722 | inode->i_op = &udf_dir_inode_operations; |
687 | inode->i_fop = &udf_dir_operations; | 723 | inode->i_fop = &udf_dir_operations; |
688 | if (!(fi = udf_add_entry(inode, NULL, &fibh, &cfi, &err))) { | 724 | fi = udf_add_entry(inode, NULL, &fibh, &cfi, &err); |
725 | if (!fi) { | ||
689 | inode->i_nlink--; | 726 | inode->i_nlink--; |
690 | mark_inode_dirty(inode); | 727 | mark_inode_dirty(inode); |
691 | iput(inode); | 728 | iput(inode); |
@@ -693,10 +730,11 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
693 | } | 730 | } |
694 | inode->i_nlink = 2; | 731 | inode->i_nlink = 2; |
695 | cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); | 732 | cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); |
696 | cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(dir)); | 733 | cfi.icb.extLocation = cpu_to_lelb(dinfo->i_location); |
697 | *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = | 734 | *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = |
698 | cpu_to_le32(UDF_I_UNIQUE(dir) & 0x00000000FFFFFFFFUL); | 735 | cpu_to_le32(dinfo->i_unique & 0x00000000FFFFFFFFUL); |
699 | cfi.fileCharacteristics = FID_FILE_CHAR_DIRECTORY | FID_FILE_CHAR_PARENT; | 736 | cfi.fileCharacteristics = |
737 | FID_FILE_CHAR_DIRECTORY | FID_FILE_CHAR_PARENT; | ||
700 | udf_write_fi(inode, &cfi, fi, &fibh, NULL, NULL); | 738 | udf_write_fi(inode, &cfi, fi, &fibh, NULL, NULL); |
701 | brelse(fibh.sbh); | 739 | brelse(fibh.sbh); |
702 | inode->i_mode = S_IFDIR | mode; | 740 | inode->i_mode = S_IFDIR | mode; |
@@ -704,16 +742,17 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
704 | inode->i_mode |= S_ISGID; | 742 | inode->i_mode |= S_ISGID; |
705 | mark_inode_dirty(inode); | 743 | mark_inode_dirty(inode); |
706 | 744 | ||
707 | if (!(fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err))) { | 745 | fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); |
746 | if (!fi) { | ||
708 | inode->i_nlink = 0; | 747 | inode->i_nlink = 0; |
709 | mark_inode_dirty(inode); | 748 | mark_inode_dirty(inode); |
710 | iput(inode); | 749 | iput(inode); |
711 | goto out; | 750 | goto out; |
712 | } | 751 | } |
713 | cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); | 752 | cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); |
714 | cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode)); | 753 | cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location); |
715 | *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = | 754 | *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = |
716 | cpu_to_le32(UDF_I_UNIQUE(inode) & 0x00000000FFFFFFFFUL); | 755 | cpu_to_le32(iinfo->i_unique & 0x00000000FFFFFFFFUL); |
717 | cfi.fileCharacteristics |= FID_FILE_CHAR_DIRECTORY; | 756 | cfi.fileCharacteristics |= FID_FILE_CHAR_DIRECTORY; |
718 | udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); | 757 | udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); |
719 | inc_nlink(dir); | 758 | inc_nlink(dir); |
@@ -734,32 +773,33 @@ static int empty_dir(struct inode *dir) | |||
734 | struct fileIdentDesc *fi, cfi; | 773 | struct fileIdentDesc *fi, cfi; |
735 | struct udf_fileident_bh fibh; | 774 | struct udf_fileident_bh fibh; |
736 | loff_t f_pos; | 775 | loff_t f_pos; |
737 | loff_t size = (udf_ext0_offset(dir) + dir->i_size) >> 2; | 776 | loff_t size = udf_ext0_offset(dir) + dir->i_size; |
738 | int block; | 777 | int block; |
739 | kernel_lb_addr eloc; | 778 | kernel_lb_addr eloc; |
740 | uint32_t elen; | 779 | uint32_t elen; |
741 | sector_t offset; | 780 | sector_t offset; |
742 | struct extent_position epos = {}; | 781 | struct extent_position epos = {}; |
782 | struct udf_inode_info *dinfo = UDF_I(dir); | ||
743 | 783 | ||
744 | f_pos = (udf_ext0_offset(dir) >> 2); | 784 | f_pos = udf_ext0_offset(dir); |
785 | fibh.soffset = fibh.eoffset = f_pos & (dir->i_sb->s_blocksize - 1); | ||
745 | 786 | ||
746 | fibh.soffset = fibh.eoffset = (f_pos & ((dir->i_sb->s_blocksize - 1) >> 2)) << 2; | 787 | if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) |
747 | |||
748 | if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) { | ||
749 | fibh.sbh = fibh.ebh = NULL; | 788 | fibh.sbh = fibh.ebh = NULL; |
750 | } else if (inode_bmap(dir, f_pos >> (dir->i_sb->s_blocksize_bits - 2), | 789 | else if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, |
751 | &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) { | 790 | &epos, &eloc, &elen, &offset) == |
791 | (EXT_RECORDED_ALLOCATED >> 30)) { | ||
752 | block = udf_get_lb_pblock(dir->i_sb, eloc, offset); | 792 | block = udf_get_lb_pblock(dir->i_sb, eloc, offset); |
753 | if ((++offset << dir->i_sb->s_blocksize_bits) < elen) { | 793 | if ((++offset << dir->i_sb->s_blocksize_bits) < elen) { |
754 | if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_SHORT) | 794 | if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) |
755 | epos.offset -= sizeof(short_ad); | 795 | epos.offset -= sizeof(short_ad); |
756 | else if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_LONG) | 796 | else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) |
757 | epos.offset -= sizeof(long_ad); | 797 | epos.offset -= sizeof(long_ad); |
758 | } else { | 798 | } else |
759 | offset = 0; | 799 | offset = 0; |
760 | } | ||
761 | 800 | ||
762 | if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block))) { | 801 | fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block); |
802 | if (!fibh.sbh) { | ||
763 | brelse(epos.bh); | 803 | brelse(epos.bh); |
764 | return 0; | 804 | return 0; |
765 | } | 805 | } |
@@ -768,7 +808,7 @@ static int empty_dir(struct inode *dir) | |||
768 | return 0; | 808 | return 0; |
769 | } | 809 | } |
770 | 810 | ||
771 | while ((f_pos < size)) { | 811 | while (f_pos < size) { |
772 | fi = udf_fileident_read(dir, &f_pos, &fibh, &cfi, &epos, &eloc, | 812 | fi = udf_fileident_read(dir, &f_pos, &fibh, &cfi, &epos, &eloc, |
773 | &elen, &offset); | 813 | &elen, &offset); |
774 | if (!fi) { | 814 | if (!fi) { |
@@ -828,7 +868,8 @@ static int udf_rmdir(struct inode *dir, struct dentry *dentry) | |||
828 | clear_nlink(inode); | 868 | clear_nlink(inode); |
829 | inode->i_size = 0; | 869 | inode->i_size = 0; |
830 | inode_dec_link_count(dir); | 870 | inode_dec_link_count(dir); |
831 | inode->i_ctime = dir->i_ctime = dir->i_mtime = current_fs_time(dir->i_sb); | 871 | inode->i_ctime = dir->i_ctime = dir->i_mtime = |
872 | current_fs_time(dir->i_sb); | ||
832 | mark_inode_dirty(dir); | 873 | mark_inode_dirty(dir); |
833 | 874 | ||
834 | end_rmdir: | 875 | end_rmdir: |
@@ -901,36 +942,42 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry, | |||
901 | int block; | 942 | int block; |
902 | char name[UDF_NAME_LEN]; | 943 | char name[UDF_NAME_LEN]; |
903 | int namelen; | 944 | int namelen; |
945 | struct buffer_head *bh; | ||
946 | struct udf_inode_info *iinfo; | ||
904 | 947 | ||
905 | lock_kernel(); | 948 | lock_kernel(); |
906 | if (!(inode = udf_new_inode(dir, S_IFLNK, &err))) | 949 | inode = udf_new_inode(dir, S_IFLNK, &err); |
950 | if (!inode) | ||
907 | goto out; | 951 | goto out; |
908 | 952 | ||
953 | iinfo = UDF_I(inode); | ||
909 | inode->i_mode = S_IFLNK | S_IRWXUGO; | 954 | inode->i_mode = S_IFLNK | S_IRWXUGO; |
910 | inode->i_data.a_ops = &udf_symlink_aops; | 955 | inode->i_data.a_ops = &udf_symlink_aops; |
911 | inode->i_op = &page_symlink_inode_operations; | 956 | inode->i_op = &page_symlink_inode_operations; |
912 | 957 | ||
913 | if (UDF_I_ALLOCTYPE(inode) != ICBTAG_FLAG_AD_IN_ICB) { | 958 | if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { |
914 | kernel_lb_addr eloc; | 959 | kernel_lb_addr eloc; |
915 | uint32_t elen; | 960 | uint32_t elen; |
916 | 961 | ||
917 | block = udf_new_block(inode->i_sb, inode, | 962 | block = udf_new_block(inode->i_sb, inode, |
918 | UDF_I_LOCATION(inode).partitionReferenceNum, | 963 | iinfo->i_location.partitionReferenceNum, |
919 | UDF_I_LOCATION(inode).logicalBlockNum, &err); | 964 | iinfo->i_location.logicalBlockNum, &err); |
920 | if (!block) | 965 | if (!block) |
921 | goto out_no_entry; | 966 | goto out_no_entry; |
922 | epos.block = UDF_I_LOCATION(inode); | 967 | epos.block = iinfo->i_location; |
923 | epos.offset = udf_file_entry_alloc_offset(inode); | 968 | epos.offset = udf_file_entry_alloc_offset(inode); |
924 | epos.bh = NULL; | 969 | epos.bh = NULL; |
925 | eloc.logicalBlockNum = block; | 970 | eloc.logicalBlockNum = block; |
926 | eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum; | 971 | eloc.partitionReferenceNum = |
972 | iinfo->i_location.partitionReferenceNum; | ||
927 | elen = inode->i_sb->s_blocksize; | 973 | elen = inode->i_sb->s_blocksize; |
928 | UDF_I_LENEXTENTS(inode) = elen; | 974 | iinfo->i_lenExtents = elen; |
929 | udf_add_aext(inode, &epos, eloc, elen, 0); | 975 | udf_add_aext(inode, &epos, eloc, elen, 0); |
930 | brelse(epos.bh); | 976 | brelse(epos.bh); |
931 | 977 | ||
932 | block = udf_get_pblock(inode->i_sb, block, | 978 | block = udf_get_pblock(inode->i_sb, block, |
933 | UDF_I_LOCATION(inode).partitionReferenceNum, 0); | 979 | iinfo->i_location.partitionReferenceNum, |
980 | 0); | ||
934 | epos.bh = udf_tread(inode->i_sb, block); | 981 | epos.bh = udf_tread(inode->i_sb, block); |
935 | lock_buffer(epos.bh); | 982 | lock_buffer(epos.bh); |
936 | memset(epos.bh->b_data, 0x00, inode->i_sb->s_blocksize); | 983 | memset(epos.bh->b_data, 0x00, inode->i_sb->s_blocksize); |
@@ -938,9 +985,8 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry, | |||
938 | unlock_buffer(epos.bh); | 985 | unlock_buffer(epos.bh); |
939 | mark_buffer_dirty_inode(epos.bh, inode); | 986 | mark_buffer_dirty_inode(epos.bh, inode); |
940 | ea = epos.bh->b_data + udf_ext0_offset(inode); | 987 | ea = epos.bh->b_data + udf_ext0_offset(inode); |
941 | } else { | 988 | } else |
942 | ea = UDF_I_DATA(inode) + UDF_I_LENEATTR(inode); | 989 | ea = iinfo->i_ext.i_data + iinfo->i_lenEAttr; |
943 | } | ||
944 | 990 | ||
945 | eoffset = inode->i_sb->s_blocksize - udf_ext0_offset(inode); | 991 | eoffset = inode->i_sb->s_blocksize - udf_ext0_offset(inode); |
946 | pc = (struct pathComponent *)ea; | 992 | pc = (struct pathComponent *)ea; |
@@ -977,7 +1023,8 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry, | |||
977 | if (compstart[0] == '.') { | 1023 | if (compstart[0] == '.') { |
978 | if ((symname - compstart) == 1) | 1024 | if ((symname - compstart) == 1) |
979 | pc->componentType = 4; | 1025 | pc->componentType = 4; |
980 | else if ((symname - compstart) == 2 && compstart[1] == '.') | 1026 | else if ((symname - compstart) == 2 && |
1027 | compstart[1] == '.') | ||
981 | pc->componentType = 3; | 1028 | pc->componentType = 3; |
982 | } | 1029 | } |
983 | 1030 | ||
@@ -987,7 +1034,8 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry, | |||
987 | if (!namelen) | 1034 | if (!namelen) |
988 | goto out_no_entry; | 1035 | goto out_no_entry; |
989 | 1036 | ||
990 | if (elen + sizeof(struct pathComponent) + namelen > eoffset) | 1037 | if (elen + sizeof(struct pathComponent) + namelen > |
1038 | eoffset) | ||
991 | goto out_no_entry; | 1039 | goto out_no_entry; |
992 | else | 1040 | else |
993 | pc->lengthComponentIdent = namelen; | 1041 | pc->lengthComponentIdent = namelen; |
@@ -1006,30 +1054,34 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry, | |||
1006 | 1054 | ||
1007 | brelse(epos.bh); | 1055 | brelse(epos.bh); |
1008 | inode->i_size = elen; | 1056 | inode->i_size = elen; |
1009 | if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) | 1057 | if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) |
1010 | UDF_I_LENALLOC(inode) = inode->i_size; | 1058 | iinfo->i_lenAlloc = inode->i_size; |
1011 | mark_inode_dirty(inode); | 1059 | mark_inode_dirty(inode); |
1012 | 1060 | ||
1013 | if (!(fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err))) | 1061 | fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); |
1062 | if (!fi) | ||
1014 | goto out_no_entry; | 1063 | goto out_no_entry; |
1015 | cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); | 1064 | cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); |
1016 | cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode)); | 1065 | cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location); |
1017 | if (UDF_SB_LVIDBH(inode->i_sb)) { | 1066 | bh = UDF_SB(inode->i_sb)->s_lvid_bh; |
1067 | if (bh) { | ||
1068 | struct logicalVolIntegrityDesc *lvid = | ||
1069 | (struct logicalVolIntegrityDesc *)bh->b_data; | ||
1018 | struct logicalVolHeaderDesc *lvhd; | 1070 | struct logicalVolHeaderDesc *lvhd; |
1019 | uint64_t uniqueID; | 1071 | uint64_t uniqueID; |
1020 | lvhd = (struct logicalVolHeaderDesc *)(UDF_SB_LVID(inode->i_sb)->logicalVolContentsUse); | 1072 | lvhd = (struct logicalVolHeaderDesc *) |
1073 | lvid->logicalVolContentsUse; | ||
1021 | uniqueID = le64_to_cpu(lvhd->uniqueID); | 1074 | uniqueID = le64_to_cpu(lvhd->uniqueID); |
1022 | *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = | 1075 | *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = |
1023 | cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL); | 1076 | cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL); |
1024 | if (!(++uniqueID & 0x00000000FFFFFFFFUL)) | 1077 | if (!(++uniqueID & 0x00000000FFFFFFFFUL)) |
1025 | uniqueID += 16; | 1078 | uniqueID += 16; |
1026 | lvhd->uniqueID = cpu_to_le64(uniqueID); | 1079 | lvhd->uniqueID = cpu_to_le64(uniqueID); |
1027 | mark_buffer_dirty(UDF_SB_LVIDBH(inode->i_sb)); | 1080 | mark_buffer_dirty(bh); |
1028 | } | 1081 | } |
1029 | udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); | 1082 | udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); |
1030 | if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) { | 1083 | if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) |
1031 | mark_inode_dirty(dir); | 1084 | mark_inode_dirty(dir); |
1032 | } | ||
1033 | if (fibh.sbh != fibh.ebh) | 1085 | if (fibh.sbh != fibh.ebh) |
1034 | brelse(fibh.ebh); | 1086 | brelse(fibh.ebh); |
1035 | brelse(fibh.sbh); | 1087 | brelse(fibh.sbh); |
@@ -1053,6 +1105,7 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir, | |||
1053 | struct udf_fileident_bh fibh; | 1105 | struct udf_fileident_bh fibh; |
1054 | struct fileIdentDesc cfi, *fi; | 1106 | struct fileIdentDesc cfi, *fi; |
1055 | int err; | 1107 | int err; |
1108 | struct buffer_head *bh; | ||
1056 | 1109 | ||
1057 | lock_kernel(); | 1110 | lock_kernel(); |
1058 | if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) { | 1111 | if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) { |
@@ -1060,28 +1113,32 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir, | |||
1060 | return -EMLINK; | 1113 | return -EMLINK; |
1061 | } | 1114 | } |
1062 | 1115 | ||
1063 | if (!(fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err))) { | 1116 | fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); |
1117 | if (!fi) { | ||
1064 | unlock_kernel(); | 1118 | unlock_kernel(); |
1065 | return err; | 1119 | return err; |
1066 | } | 1120 | } |
1067 | cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); | 1121 | cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); |
1068 | cfi.icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(inode)); | 1122 | cfi.icb.extLocation = cpu_to_lelb(UDF_I(inode)->i_location); |
1069 | if (UDF_SB_LVIDBH(inode->i_sb)) { | 1123 | bh = UDF_SB(inode->i_sb)->s_lvid_bh; |
1124 | if (bh) { | ||
1125 | struct logicalVolIntegrityDesc *lvid = | ||
1126 | (struct logicalVolIntegrityDesc *)bh->b_data; | ||
1070 | struct logicalVolHeaderDesc *lvhd; | 1127 | struct logicalVolHeaderDesc *lvhd; |
1071 | uint64_t uniqueID; | 1128 | uint64_t uniqueID; |
1072 | lvhd = (struct logicalVolHeaderDesc *)(UDF_SB_LVID(inode->i_sb)->logicalVolContentsUse); | 1129 | lvhd = (struct logicalVolHeaderDesc *) |
1130 | (lvid->logicalVolContentsUse); | ||
1073 | uniqueID = le64_to_cpu(lvhd->uniqueID); | 1131 | uniqueID = le64_to_cpu(lvhd->uniqueID); |
1074 | *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = | 1132 | *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = |
1075 | cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL); | 1133 | cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL); |
1076 | if (!(++uniqueID & 0x00000000FFFFFFFFUL)) | 1134 | if (!(++uniqueID & 0x00000000FFFFFFFFUL)) |
1077 | uniqueID += 16; | 1135 | uniqueID += 16; |
1078 | lvhd->uniqueID = cpu_to_le64(uniqueID); | 1136 | lvhd->uniqueID = cpu_to_le64(uniqueID); |
1079 | mark_buffer_dirty(UDF_SB_LVIDBH(inode->i_sb)); | 1137 | mark_buffer_dirty(bh); |
1080 | } | 1138 | } |
1081 | udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); | 1139 | udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); |
1082 | if (UDF_I_ALLOCTYPE(dir) == ICBTAG_FLAG_AD_IN_ICB) { | 1140 | if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) |
1083 | mark_inode_dirty(dir); | 1141 | mark_inode_dirty(dir); |
1084 | } | ||
1085 | 1142 | ||
1086 | if (fibh.sbh != fibh.ebh) | 1143 | if (fibh.sbh != fibh.ebh) |
1087 | brelse(fibh.ebh); | 1144 | brelse(fibh.ebh); |
@@ -1105,13 +1162,16 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
1105 | struct inode *old_inode = old_dentry->d_inode; | 1162 | struct inode *old_inode = old_dentry->d_inode; |
1106 | struct inode *new_inode = new_dentry->d_inode; | 1163 | struct inode *new_inode = new_dentry->d_inode; |
1107 | struct udf_fileident_bh ofibh, nfibh; | 1164 | struct udf_fileident_bh ofibh, nfibh; |
1108 | struct fileIdentDesc *ofi = NULL, *nfi = NULL, *dir_fi = NULL, ocfi, ncfi; | 1165 | struct fileIdentDesc *ofi = NULL, *nfi = NULL, *dir_fi = NULL; |
1166 | struct fileIdentDesc ocfi, ncfi; | ||
1109 | struct buffer_head *dir_bh = NULL; | 1167 | struct buffer_head *dir_bh = NULL; |
1110 | int retval = -ENOENT; | 1168 | int retval = -ENOENT; |
1111 | kernel_lb_addr tloc; | 1169 | kernel_lb_addr tloc; |
1170 | struct udf_inode_info *old_iinfo = UDF_I(old_inode); | ||
1112 | 1171 | ||
1113 | lock_kernel(); | 1172 | lock_kernel(); |
1114 | if ((ofi = udf_find_entry(old_dir, old_dentry, &ofibh, &ocfi))) { | 1173 | ofi = udf_find_entry(old_dir, old_dentry, &ofibh, &ocfi); |
1174 | if (ofi) { | ||
1115 | if (ofibh.sbh != ofibh.ebh) | 1175 | if (ofibh.sbh != ofibh.ebh) |
1116 | brelse(ofibh.ebh); | 1176 | brelse(ofibh.ebh); |
1117 | brelse(ofibh.sbh); | 1177 | brelse(ofibh.sbh); |
@@ -1131,7 +1191,7 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
1131 | } | 1191 | } |
1132 | } | 1192 | } |
1133 | if (S_ISDIR(old_inode->i_mode)) { | 1193 | if (S_ISDIR(old_inode->i_mode)) { |
1134 | uint32_t offset = udf_ext0_offset(old_inode); | 1194 | int offset = udf_ext0_offset(old_inode); |
1135 | 1195 | ||
1136 | if (new_inode) { | 1196 | if (new_inode) { |
1137 | retval = -ENOTEMPTY; | 1197 | retval = -ENOTEMPTY; |
@@ -1139,30 +1199,36 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
1139 | goto end_rename; | 1199 | goto end_rename; |
1140 | } | 1200 | } |
1141 | retval = -EIO; | 1201 | retval = -EIO; |
1142 | if (UDF_I_ALLOCTYPE(old_inode) == ICBTAG_FLAG_AD_IN_ICB) { | 1202 | if (old_iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { |
1143 | dir_fi = udf_get_fileident(UDF_I_DATA(old_inode) - | 1203 | dir_fi = udf_get_fileident( |
1144 | (UDF_I_EFE(old_inode) ? | 1204 | old_iinfo->i_ext.i_data - |
1145 | sizeof(struct extendedFileEntry) : | 1205 | (old_iinfo->i_efe ? |
1146 | sizeof(struct fileEntry)), | 1206 | sizeof(struct extendedFileEntry) : |
1147 | old_inode->i_sb->s_blocksize, &offset); | 1207 | sizeof(struct fileEntry)), |
1208 | old_inode->i_sb->s_blocksize, &offset); | ||
1148 | } else { | 1209 | } else { |
1149 | dir_bh = udf_bread(old_inode, 0, 0, &retval); | 1210 | dir_bh = udf_bread(old_inode, 0, 0, &retval); |
1150 | if (!dir_bh) | 1211 | if (!dir_bh) |
1151 | goto end_rename; | 1212 | goto end_rename; |
1152 | dir_fi = udf_get_fileident(dir_bh->b_data, old_inode->i_sb->s_blocksize, &offset); | 1213 | dir_fi = udf_get_fileident(dir_bh->b_data, |
1214 | old_inode->i_sb->s_blocksize, &offset); | ||
1153 | } | 1215 | } |
1154 | if (!dir_fi) | 1216 | if (!dir_fi) |
1155 | goto end_rename; | 1217 | goto end_rename; |
1156 | tloc = lelb_to_cpu(dir_fi->icb.extLocation); | 1218 | tloc = lelb_to_cpu(dir_fi->icb.extLocation); |
1157 | if (udf_get_lb_pblock(old_inode->i_sb, tloc, 0) != old_dir->i_ino) | 1219 | if (udf_get_lb_pblock(old_inode->i_sb, tloc, 0) != |
1220 | old_dir->i_ino) | ||
1158 | goto end_rename; | 1221 | goto end_rename; |
1159 | 1222 | ||
1160 | retval = -EMLINK; | 1223 | retval = -EMLINK; |
1161 | if (!new_inode && new_dir->i_nlink >= (256 << sizeof(new_dir->i_nlink)) - 1) | 1224 | if (!new_inode && |
1225 | new_dir->i_nlink >= | ||
1226 | (256 << sizeof(new_dir->i_nlink)) - 1) | ||
1162 | goto end_rename; | 1227 | goto end_rename; |
1163 | } | 1228 | } |
1164 | if (!nfi) { | 1229 | if (!nfi) { |
1165 | nfi = udf_add_entry(new_dir, new_dentry, &nfibh, &ncfi, &retval); | 1230 | nfi = udf_add_entry(new_dir, new_dentry, &nfibh, &ncfi, |
1231 | &retval); | ||
1166 | if (!nfi) | 1232 | if (!nfi) |
1167 | goto end_rename; | 1233 | goto end_rename; |
1168 | } | 1234 | } |
@@ -1194,18 +1260,19 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
1194 | mark_inode_dirty(old_dir); | 1260 | mark_inode_dirty(old_dir); |
1195 | 1261 | ||
1196 | if (dir_fi) { | 1262 | if (dir_fi) { |
1197 | dir_fi->icb.extLocation = cpu_to_lelb(UDF_I_LOCATION(new_dir)); | 1263 | dir_fi->icb.extLocation = cpu_to_lelb(UDF_I(new_dir)->i_location); |
1198 | udf_update_tag((char *)dir_fi, (sizeof(struct fileIdentDesc) + | 1264 | udf_update_tag((char *)dir_fi, |
1199 | le16_to_cpu(dir_fi->lengthOfImpUse) + 3) & ~3); | 1265 | (sizeof(struct fileIdentDesc) + |
1200 | if (UDF_I_ALLOCTYPE(old_inode) == ICBTAG_FLAG_AD_IN_ICB) { | 1266 | le16_to_cpu(dir_fi->lengthOfImpUse) + 3) & ~3); |
1267 | if (old_iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) | ||
1201 | mark_inode_dirty(old_inode); | 1268 | mark_inode_dirty(old_inode); |
1202 | } else { | 1269 | else |
1203 | mark_buffer_dirty_inode(dir_bh, old_inode); | 1270 | mark_buffer_dirty_inode(dir_bh, old_inode); |
1204 | } | 1271 | |
1205 | inode_dec_link_count(old_dir); | 1272 | inode_dec_link_count(old_dir); |
1206 | if (new_inode) { | 1273 | if (new_inode) |
1207 | inode_dec_link_count(new_inode); | 1274 | inode_dec_link_count(new_inode); |
1208 | } else { | 1275 | else { |
1209 | inc_nlink(new_dir); | 1276 | inc_nlink(new_dir); |
1210 | mark_inode_dirty(new_dir); | 1277 | mark_inode_dirty(new_dir); |
1211 | } | 1278 | } |
diff --git a/fs/udf/partition.c b/fs/udf/partition.c index aaab24c8c498..fc533345ab89 100644 --- a/fs/udf/partition.c +++ b/fs/udf/partition.c | |||
@@ -31,15 +31,18 @@ | |||
31 | inline uint32_t udf_get_pblock(struct super_block *sb, uint32_t block, | 31 | inline uint32_t udf_get_pblock(struct super_block *sb, uint32_t block, |
32 | uint16_t partition, uint32_t offset) | 32 | uint16_t partition, uint32_t offset) |
33 | { | 33 | { |
34 | if (partition >= UDF_SB_NUMPARTS(sb)) { | 34 | struct udf_sb_info *sbi = UDF_SB(sb); |
35 | udf_debug("block=%d, partition=%d, offset=%d: invalid partition\n", | 35 | struct udf_part_map *map; |
36 | block, partition, offset); | 36 | if (partition >= sbi->s_partitions) { |
37 | udf_debug("block=%d, partition=%d, offset=%d: " | ||
38 | "invalid partition\n", block, partition, offset); | ||
37 | return 0xFFFFFFFF; | 39 | return 0xFFFFFFFF; |
38 | } | 40 | } |
39 | if (UDF_SB_PARTFUNC(sb, partition)) | 41 | map = &sbi->s_partmaps[partition]; |
40 | return UDF_SB_PARTFUNC(sb, partition)(sb, block, partition, offset); | 42 | if (map->s_partition_func) |
43 | return map->s_partition_func(sb, block, partition, offset); | ||
41 | else | 44 | else |
42 | return UDF_SB_PARTROOT(sb, partition) + block + offset; | 45 | return map->s_partition_root + block + offset; |
43 | } | 46 | } |
44 | 47 | ||
45 | uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block, | 48 | uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block, |
@@ -49,12 +52,18 @@ uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block, | |||
49 | uint32_t newblock; | 52 | uint32_t newblock; |
50 | uint32_t index; | 53 | uint32_t index; |
51 | uint32_t loc; | 54 | uint32_t loc; |
55 | struct udf_sb_info *sbi = UDF_SB(sb); | ||
56 | struct udf_part_map *map; | ||
57 | struct udf_virtual_data *vdata; | ||
58 | struct udf_inode_info *iinfo; | ||
52 | 59 | ||
53 | index = (sb->s_blocksize - UDF_SB_TYPEVIRT(sb,partition).s_start_offset) / sizeof(uint32_t); | 60 | map = &sbi->s_partmaps[partition]; |
61 | vdata = &map->s_type_specific.s_virtual; | ||
62 | index = (sb->s_blocksize - vdata->s_start_offset) / sizeof(uint32_t); | ||
54 | 63 | ||
55 | if (block > UDF_SB_TYPEVIRT(sb,partition).s_num_entries) { | 64 | if (block > vdata->s_num_entries) { |
56 | udf_debug("Trying to access block beyond end of VAT (%d max %d)\n", | 65 | udf_debug("Trying to access block beyond end of VAT " |
57 | block, UDF_SB_TYPEVIRT(sb,partition).s_num_entries); | 66 | "(%d max %d)\n", block, vdata->s_num_entries); |
58 | return 0xFFFFFFFF; | 67 | return 0xFFFFFFFF; |
59 | } | 68 | } |
60 | 69 | ||
@@ -64,12 +73,13 @@ uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block, | |||
64 | index = block % (sb->s_blocksize / sizeof(uint32_t)); | 73 | index = block % (sb->s_blocksize / sizeof(uint32_t)); |
65 | } else { | 74 | } else { |
66 | newblock = 0; | 75 | newblock = 0; |
67 | index = UDF_SB_TYPEVIRT(sb,partition).s_start_offset / sizeof(uint32_t) + block; | 76 | index = vdata->s_start_offset / sizeof(uint32_t) + block; |
68 | } | 77 | } |
69 | 78 | ||
70 | loc = udf_block_map(UDF_SB_VAT(sb), newblock); | 79 | loc = udf_block_map(sbi->s_vat_inode, newblock); |
71 | 80 | ||
72 | if (!(bh = sb_bread(sb, loc))) { | 81 | bh = sb_bread(sb, loc); |
82 | if (!bh) { | ||
73 | udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%d,%d) VAT: %d[%d]\n", | 83 | udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%d,%d) VAT: %d[%d]\n", |
74 | sb, block, partition, loc, index); | 84 | sb, block, partition, loc, index); |
75 | return 0xFFFFFFFF; | 85 | return 0xFFFFFFFF; |
@@ -79,50 +89,61 @@ uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block, | |||
79 | 89 | ||
80 | brelse(bh); | 90 | brelse(bh); |
81 | 91 | ||
82 | if (UDF_I_LOCATION(UDF_SB_VAT(sb)).partitionReferenceNum == partition) { | 92 | iinfo = UDF_I(sbi->s_vat_inode); |
93 | if (iinfo->i_location.partitionReferenceNum == partition) { | ||
83 | udf_debug("recursive call to udf_get_pblock!\n"); | 94 | udf_debug("recursive call to udf_get_pblock!\n"); |
84 | return 0xFFFFFFFF; | 95 | return 0xFFFFFFFF; |
85 | } | 96 | } |
86 | 97 | ||
87 | return udf_get_pblock(sb, loc, | 98 | return udf_get_pblock(sb, loc, |
88 | UDF_I_LOCATION(UDF_SB_VAT(sb)).partitionReferenceNum, | 99 | iinfo->i_location.partitionReferenceNum, |
89 | offset); | 100 | offset); |
90 | } | 101 | } |
91 | 102 | ||
92 | inline uint32_t udf_get_pblock_virt20(struct super_block * sb, uint32_t block, | 103 | inline uint32_t udf_get_pblock_virt20(struct super_block *sb, uint32_t block, |
93 | uint16_t partition, uint32_t offset) | 104 | uint16_t partition, uint32_t offset) |
94 | { | 105 | { |
95 | return udf_get_pblock_virt15(sb, block, partition, offset); | 106 | return udf_get_pblock_virt15(sb, block, partition, offset); |
96 | } | 107 | } |
97 | 108 | ||
98 | uint32_t udf_get_pblock_spar15(struct super_block * sb, uint32_t block, | 109 | uint32_t udf_get_pblock_spar15(struct super_block *sb, uint32_t block, |
99 | uint16_t partition, uint32_t offset) | 110 | uint16_t partition, uint32_t offset) |
100 | { | 111 | { |
101 | int i; | 112 | int i; |
102 | struct sparingTable *st = NULL; | 113 | struct sparingTable *st = NULL; |
103 | uint32_t packet = (block + offset) & ~(UDF_SB_TYPESPAR(sb,partition).s_packet_len - 1); | 114 | struct udf_sb_info *sbi = UDF_SB(sb); |
115 | struct udf_part_map *map; | ||
116 | uint32_t packet; | ||
117 | struct udf_sparing_data *sdata; | ||
118 | |||
119 | map = &sbi->s_partmaps[partition]; | ||
120 | sdata = &map->s_type_specific.s_sparing; | ||
121 | packet = (block + offset) & ~(sdata->s_packet_len - 1); | ||
104 | 122 | ||
105 | for (i = 0; i < 4; i++) { | 123 | for (i = 0; i < 4; i++) { |
106 | if (UDF_SB_TYPESPAR(sb,partition).s_spar_map[i] != NULL) { | 124 | if (sdata->s_spar_map[i] != NULL) { |
107 | st = (struct sparingTable *)UDF_SB_TYPESPAR(sb,partition).s_spar_map[i]->b_data; | 125 | st = (struct sparingTable *) |
126 | sdata->s_spar_map[i]->b_data; | ||
108 | break; | 127 | break; |
109 | } | 128 | } |
110 | } | 129 | } |
111 | 130 | ||
112 | if (st) { | 131 | if (st) { |
113 | for (i = 0; i < le16_to_cpu(st->reallocationTableLen); i++) { | 132 | for (i = 0; i < le16_to_cpu(st->reallocationTableLen); i++) { |
114 | if (le32_to_cpu(st->mapEntry[i].origLocation) >= 0xFFFFFFF0) { | 133 | struct sparingEntry *entry = &st->mapEntry[i]; |
134 | u32 origLoc = le32_to_cpu(entry->origLocation); | ||
135 | if (origLoc >= 0xFFFFFFF0) | ||
115 | break; | 136 | break; |
116 | } else if (le32_to_cpu(st->mapEntry[i].origLocation) == packet) { | 137 | else if (origLoc == packet) |
117 | return le32_to_cpu(st->mapEntry[i].mappedLocation) + | 138 | return le32_to_cpu(entry->mappedLocation) + |
118 | ((block + offset) & (UDF_SB_TYPESPAR(sb,partition).s_packet_len - 1)); | 139 | ((block + offset) & |
119 | } else if (le32_to_cpu(st->mapEntry[i].origLocation) > packet) { | 140 | (sdata->s_packet_len - 1)); |
141 | else if (origLoc > packet) | ||
120 | break; | 142 | break; |
121 | } | ||
122 | } | 143 | } |
123 | } | 144 | } |
124 | 145 | ||
125 | return UDF_SB_PARTROOT(sb,partition) + block + offset; | 146 | return map->s_partition_root + block + offset; |
126 | } | 147 | } |
127 | 148 | ||
128 | int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block) | 149 | int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block) |
@@ -132,69 +153,109 @@ int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block) | |||
132 | struct sparingEntry mapEntry; | 153 | struct sparingEntry mapEntry; |
133 | uint32_t packet; | 154 | uint32_t packet; |
134 | int i, j, k, l; | 155 | int i, j, k, l; |
156 | struct udf_sb_info *sbi = UDF_SB(sb); | ||
157 | u16 reallocationTableLen; | ||
158 | struct buffer_head *bh; | ||
135 | 159 | ||
136 | for (i = 0; i < UDF_SB_NUMPARTS(sb); i++) { | 160 | for (i = 0; i < sbi->s_partitions; i++) { |
137 | if (old_block > UDF_SB_PARTROOT(sb,i) && | 161 | struct udf_part_map *map = &sbi->s_partmaps[i]; |
138 | old_block < UDF_SB_PARTROOT(sb,i) + UDF_SB_PARTLEN(sb,i)) { | 162 | if (old_block > map->s_partition_root && |
139 | sdata = &UDF_SB_TYPESPAR(sb,i); | 163 | old_block < map->s_partition_root + map->s_partition_len) { |
140 | packet = (old_block - UDF_SB_PARTROOT(sb,i)) & ~(sdata->s_packet_len - 1); | 164 | sdata = &map->s_type_specific.s_sparing; |
165 | packet = (old_block - map->s_partition_root) & | ||
166 | ~(sdata->s_packet_len - 1); | ||
141 | 167 | ||
142 | for (j = 0; j < 4; j++) { | 168 | for (j = 0; j < 4; j++) |
143 | if (UDF_SB_TYPESPAR(sb,i).s_spar_map[j] != NULL) { | 169 | if (sdata->s_spar_map[j] != NULL) { |
144 | st = (struct sparingTable *)sdata->s_spar_map[j]->b_data; | 170 | st = (struct sparingTable *) |
171 | sdata->s_spar_map[j]->b_data; | ||
145 | break; | 172 | break; |
146 | } | 173 | } |
147 | } | ||
148 | 174 | ||
149 | if (!st) | 175 | if (!st) |
150 | return 1; | 176 | return 1; |
151 | 177 | ||
152 | for (k = 0; k < le16_to_cpu(st->reallocationTableLen); k++) { | 178 | reallocationTableLen = |
153 | if (le32_to_cpu(st->mapEntry[k].origLocation) == 0xFFFFFFFF) { | 179 | le16_to_cpu(st->reallocationTableLen); |
180 | for (k = 0; k < reallocationTableLen; k++) { | ||
181 | struct sparingEntry *entry = &st->mapEntry[k]; | ||
182 | u32 origLoc = le32_to_cpu(entry->origLocation); | ||
183 | |||
184 | if (origLoc == 0xFFFFFFFF) { | ||
154 | for (; j < 4; j++) { | 185 | for (; j < 4; j++) { |
155 | if (sdata->s_spar_map[j]) { | 186 | int len; |
156 | st = (struct sparingTable *)sdata->s_spar_map[j]->b_data; | 187 | bh = sdata->s_spar_map[j]; |
157 | st->mapEntry[k].origLocation = cpu_to_le32(packet); | 188 | if (!bh) |
158 | udf_update_tag((char *)st, sizeof(struct sparingTable) + le16_to_cpu(st->reallocationTableLen) * sizeof(struct sparingEntry)); | 189 | continue; |
159 | mark_buffer_dirty(sdata->s_spar_map[j]); | 190 | |
160 | } | 191 | st = (struct sparingTable *) |
192 | bh->b_data; | ||
193 | entry->origLocation = | ||
194 | cpu_to_le32(packet); | ||
195 | len = | ||
196 | sizeof(struct sparingTable) + | ||
197 | reallocationTableLen * | ||
198 | sizeof(struct sparingEntry); | ||
199 | udf_update_tag((char *)st, len); | ||
200 | mark_buffer_dirty(bh); | ||
161 | } | 201 | } |
162 | *new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) + | 202 | *new_block = le32_to_cpu( |
163 | ((old_block - UDF_SB_PARTROOT(sb,i)) & (sdata->s_packet_len - 1)); | 203 | entry->mappedLocation) + |
204 | ((old_block - | ||
205 | map->s_partition_root) & | ||
206 | (sdata->s_packet_len - 1)); | ||
164 | return 0; | 207 | return 0; |
165 | } else if (le32_to_cpu(st->mapEntry[k].origLocation) == packet) { | 208 | } else if (origLoc == packet) { |
166 | *new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) + | 209 | *new_block = le32_to_cpu( |
167 | ((old_block - UDF_SB_PARTROOT(sb,i)) & (sdata->s_packet_len - 1)); | 210 | entry->mappedLocation) + |
211 | ((old_block - | ||
212 | map->s_partition_root) & | ||
213 | (sdata->s_packet_len - 1)); | ||
168 | return 0; | 214 | return 0; |
169 | } else if (le32_to_cpu(st->mapEntry[k].origLocation) > packet) { | 215 | } else if (origLoc > packet) |
170 | break; | 216 | break; |
171 | } | ||
172 | } | 217 | } |
173 | 218 | ||
174 | for (l = k; l < le16_to_cpu(st->reallocationTableLen); l++) { | 219 | for (l = k; l < reallocationTableLen; l++) { |
175 | if (le32_to_cpu(st->mapEntry[l].origLocation) == 0xFFFFFFFF) { | 220 | struct sparingEntry *entry = &st->mapEntry[l]; |
176 | for (; j < 4; j++) { | 221 | u32 origLoc = le32_to_cpu(entry->origLocation); |
177 | if (sdata->s_spar_map[j]) { | 222 | |
178 | st = (struct sparingTable *)sdata->s_spar_map[j]->b_data; | 223 | if (origLoc != 0xFFFFFFFF) |
179 | mapEntry = st->mapEntry[l]; | 224 | continue; |
180 | mapEntry.origLocation = cpu_to_le32(packet); | 225 | |
181 | memmove(&st->mapEntry[k + 1], &st->mapEntry[k], (l - k) * sizeof(struct sparingEntry)); | 226 | for (; j < 4; j++) { |
182 | st->mapEntry[k] = mapEntry; | 227 | bh = sdata->s_spar_map[j]; |
183 | udf_update_tag((char *)st, sizeof(struct sparingTable) + le16_to_cpu(st->reallocationTableLen) * sizeof(struct sparingEntry)); | 228 | if (!bh) |
184 | mark_buffer_dirty(sdata->s_spar_map[j]); | 229 | continue; |
185 | } | 230 | |
186 | } | 231 | st = (struct sparingTable *)bh->b_data; |
187 | *new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) + | 232 | mapEntry = st->mapEntry[l]; |
188 | ((old_block - UDF_SB_PARTROOT(sb,i)) & (sdata->s_packet_len - 1)); | 233 | mapEntry.origLocation = |
189 | return 0; | 234 | cpu_to_le32(packet); |
235 | memmove(&st->mapEntry[k + 1], | ||
236 | &st->mapEntry[k], | ||
237 | (l - k) * | ||
238 | sizeof(struct sparingEntry)); | ||
239 | st->mapEntry[k] = mapEntry; | ||
240 | udf_update_tag((char *)st, | ||
241 | sizeof(struct sparingTable) + | ||
242 | reallocationTableLen * | ||
243 | sizeof(struct sparingEntry)); | ||
244 | mark_buffer_dirty(bh); | ||
190 | } | 245 | } |
246 | *new_block = | ||
247 | le32_to_cpu( | ||
248 | st->mapEntry[k].mappedLocation) + | ||
249 | ((old_block - map->s_partition_root) & | ||
250 | (sdata->s_packet_len - 1)); | ||
251 | return 0; | ||
191 | } | 252 | } |
192 | 253 | ||
193 | return 1; | 254 | return 1; |
194 | } /* if old_block */ | 255 | } /* if old_block */ |
195 | } | 256 | } |
196 | 257 | ||
197 | if (i == UDF_SB_NUMPARTS(sb)) { | 258 | if (i == sbi->s_partitions) { |
198 | /* outside of partitions */ | 259 | /* outside of partitions */ |
199 | /* for now, fail =) */ | 260 | /* for now, fail =) */ |
200 | return 1; | 261 | return 1; |
diff --git a/fs/udf/super.c b/fs/udf/super.c index 4360c7a05743..f3ac4abfc946 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c | |||
@@ -33,8 +33,8 @@ | |||
33 | * 10/17/98 added freespace count for "df" | 33 | * 10/17/98 added freespace count for "df" |
34 | * 11/11/98 gr added novrs option | 34 | * 11/11/98 gr added novrs option |
35 | * 11/26/98 dgb added fileset,anchor mount options | 35 | * 11/26/98 dgb added fileset,anchor mount options |
36 | * 12/06/98 blf really hosed things royally. vat/sparing support. sequenced vol descs | 36 | * 12/06/98 blf really hosed things royally. vat/sparing support. sequenced |
37 | * rewrote option handling based on isofs | 37 | * vol descs. rewrote option handling based on isofs |
38 | * 12/20/98 find the free space bitmap (if it exists) | 38 | * 12/20/98 find the free space bitmap (if it exists) |
39 | */ | 39 | */ |
40 | 40 | ||
@@ -52,6 +52,9 @@ | |||
52 | #include <linux/buffer_head.h> | 52 | #include <linux/buffer_head.h> |
53 | #include <linux/vfs.h> | 53 | #include <linux/vfs.h> |
54 | #include <linux/vmalloc.h> | 54 | #include <linux/vmalloc.h> |
55 | #include <linux/errno.h> | ||
56 | #include <linux/mount.h> | ||
57 | #include <linux/seq_file.h> | ||
55 | #include <asm/byteorder.h> | 58 | #include <asm/byteorder.h> |
56 | 59 | ||
57 | #include <linux/udf_fs.h> | 60 | #include <linux/udf_fs.h> |
@@ -70,6 +73,8 @@ | |||
70 | #define VDS_POS_TERMINATING_DESC 6 | 73 | #define VDS_POS_TERMINATING_DESC 6 |
71 | #define VDS_POS_LENGTH 7 | 74 | #define VDS_POS_LENGTH 7 |
72 | 75 | ||
76 | #define UDF_DEFAULT_BLOCKSIZE 2048 | ||
77 | |||
73 | static char error_buf[1024]; | 78 | static char error_buf[1024]; |
74 | 79 | ||
75 | /* These are the "meat" - everything else is stuffing */ | 80 | /* These are the "meat" - everything else is stuffing */ |
@@ -94,6 +99,17 @@ static void udf_open_lvid(struct super_block *); | |||
94 | static void udf_close_lvid(struct super_block *); | 99 | static void udf_close_lvid(struct super_block *); |
95 | static unsigned int udf_count_free(struct super_block *); | 100 | static unsigned int udf_count_free(struct super_block *); |
96 | static int udf_statfs(struct dentry *, struct kstatfs *); | 101 | static int udf_statfs(struct dentry *, struct kstatfs *); |
102 | static int udf_show_options(struct seq_file *, struct vfsmount *); | ||
103 | |||
104 | struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi) | ||
105 | { | ||
106 | struct logicalVolIntegrityDesc *lvid = | ||
107 | (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data; | ||
108 | __u32 number_of_partitions = le32_to_cpu(lvid->numOfPartitions); | ||
109 | __u32 offset = number_of_partitions * 2 * | ||
110 | sizeof(uint32_t)/sizeof(uint8_t); | ||
111 | return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]); | ||
112 | } | ||
97 | 113 | ||
98 | /* UDF filesystem type */ | 114 | /* UDF filesystem type */ |
99 | static int udf_get_sb(struct file_system_type *fs_type, | 115 | static int udf_get_sb(struct file_system_type *fs_type, |
@@ -116,7 +132,7 @@ static struct kmem_cache *udf_inode_cachep; | |||
116 | static struct inode *udf_alloc_inode(struct super_block *sb) | 132 | static struct inode *udf_alloc_inode(struct super_block *sb) |
117 | { | 133 | { |
118 | struct udf_inode_info *ei; | 134 | struct udf_inode_info *ei; |
119 | ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep, GFP_KERNEL); | 135 | ei = kmem_cache_alloc(udf_inode_cachep, GFP_KERNEL); |
120 | if (!ei) | 136 | if (!ei) |
121 | return NULL; | 137 | return NULL; |
122 | 138 | ||
@@ -170,6 +186,7 @@ static const struct super_operations udf_sb_ops = { | |||
170 | .write_super = udf_write_super, | 186 | .write_super = udf_write_super, |
171 | .statfs = udf_statfs, | 187 | .statfs = udf_statfs, |
172 | .remount_fs = udf_remount_fs, | 188 | .remount_fs = udf_remount_fs, |
189 | .show_options = udf_show_options, | ||
173 | }; | 190 | }; |
174 | 191 | ||
175 | struct udf_options { | 192 | struct udf_options { |
@@ -218,6 +235,79 @@ static void __exit exit_udf_fs(void) | |||
218 | module_init(init_udf_fs) | 235 | module_init(init_udf_fs) |
219 | module_exit(exit_udf_fs) | 236 | module_exit(exit_udf_fs) |
220 | 237 | ||
238 | static int udf_sb_alloc_partition_maps(struct super_block *sb, u32 count) | ||
239 | { | ||
240 | struct udf_sb_info *sbi = UDF_SB(sb); | ||
241 | |||
242 | sbi->s_partmaps = kcalloc(count, sizeof(struct udf_part_map), | ||
243 | GFP_KERNEL); | ||
244 | if (!sbi->s_partmaps) { | ||
245 | udf_error(sb, __FUNCTION__, | ||
246 | "Unable to allocate space for %d partition maps", | ||
247 | count); | ||
248 | sbi->s_partitions = 0; | ||
249 | return -ENOMEM; | ||
250 | } | ||
251 | |||
252 | sbi->s_partitions = count; | ||
253 | return 0; | ||
254 | } | ||
255 | |||
256 | static int udf_show_options(struct seq_file *seq, struct vfsmount *mnt) | ||
257 | { | ||
258 | struct super_block *sb = mnt->mnt_sb; | ||
259 | struct udf_sb_info *sbi = UDF_SB(sb); | ||
260 | |||
261 | if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT)) | ||
262 | seq_puts(seq, ",nostrict"); | ||
263 | if (sb->s_blocksize != UDF_DEFAULT_BLOCKSIZE) | ||
264 | seq_printf(seq, ",bs=%lu", sb->s_blocksize); | ||
265 | if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE)) | ||
266 | seq_puts(seq, ",unhide"); | ||
267 | if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE)) | ||
268 | seq_puts(seq, ",undelete"); | ||
269 | if (!UDF_QUERY_FLAG(sb, UDF_FLAG_USE_AD_IN_ICB)) | ||
270 | seq_puts(seq, ",noadinicb"); | ||
271 | if (UDF_QUERY_FLAG(sb, UDF_FLAG_USE_SHORT_AD)) | ||
272 | seq_puts(seq, ",shortad"); | ||
273 | if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_FORGET)) | ||
274 | seq_puts(seq, ",uid=forget"); | ||
275 | if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_IGNORE)) | ||
276 | seq_puts(seq, ",uid=ignore"); | ||
277 | if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_FORGET)) | ||
278 | seq_puts(seq, ",gid=forget"); | ||
279 | if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_IGNORE)) | ||
280 | seq_puts(seq, ",gid=ignore"); | ||
281 | if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET)) | ||
282 | seq_printf(seq, ",uid=%u", sbi->s_uid); | ||
283 | if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET)) | ||
284 | seq_printf(seq, ",gid=%u", sbi->s_gid); | ||
285 | if (sbi->s_umask != 0) | ||
286 | seq_printf(seq, ",umask=%o", sbi->s_umask); | ||
287 | if (UDF_QUERY_FLAG(sb, UDF_FLAG_SESSION_SET)) | ||
288 | seq_printf(seq, ",session=%u", sbi->s_session); | ||
289 | if (UDF_QUERY_FLAG(sb, UDF_FLAG_LASTBLOCK_SET)) | ||
290 | seq_printf(seq, ",lastblock=%u", sbi->s_last_block); | ||
291 | /* | ||
292 | * s_anchor[2] could be zeroed out in case there is no anchor | ||
293 | * in the specified block, but then the "anchor=N" option | ||
294 | * originally given by the user wasn't effective, so it's OK | ||
295 | * if we don't show it. | ||
296 | */ | ||
297 | if (sbi->s_anchor[2] != 0) | ||
298 | seq_printf(seq, ",anchor=%u", sbi->s_anchor[2]); | ||
299 | /* | ||
300 | * volume, partition, fileset and rootdir seem to be ignored | ||
301 | * currently | ||
302 | */ | ||
303 | if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) | ||
304 | seq_puts(seq, ",utf8"); | ||
305 | if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP) && sbi->s_nls_map) | ||
306 | seq_printf(seq, ",iocharset=%s", sbi->s_nls_map->charset); | ||
307 | |||
308 | return 0; | ||
309 | } | ||
310 | |||
221 | /* | 311 | /* |
222 | * udf_parse_options | 312 | * udf_parse_options |
223 | * | 313 | * |
@@ -310,13 +400,14 @@ static match_table_t tokens = { | |||
310 | {Opt_err, NULL} | 400 | {Opt_err, NULL} |
311 | }; | 401 | }; |
312 | 402 | ||
313 | static int udf_parse_options(char *options, struct udf_options *uopt) | 403 | static int udf_parse_options(char *options, struct udf_options *uopt, |
404 | bool remount) | ||
314 | { | 405 | { |
315 | char *p; | 406 | char *p; |
316 | int option; | 407 | int option; |
317 | 408 | ||
318 | uopt->novrs = 0; | 409 | uopt->novrs = 0; |
319 | uopt->blocksize = 2048; | 410 | uopt->blocksize = UDF_DEFAULT_BLOCKSIZE; |
320 | uopt->partition = 0xFFFF; | 411 | uopt->partition = 0xFFFF; |
321 | uopt->session = 0xFFFFFFFF; | 412 | uopt->session = 0xFFFFFFFF; |
322 | uopt->lastblock = 0; | 413 | uopt->lastblock = 0; |
@@ -386,11 +477,15 @@ static int udf_parse_options(char *options, struct udf_options *uopt) | |||
386 | if (match_int(args, &option)) | 477 | if (match_int(args, &option)) |
387 | return 0; | 478 | return 0; |
388 | uopt->session = option; | 479 | uopt->session = option; |
480 | if (!remount) | ||
481 | uopt->flags |= (1 << UDF_FLAG_SESSION_SET); | ||
389 | break; | 482 | break; |
390 | case Opt_lastblock: | 483 | case Opt_lastblock: |
391 | if (match_int(args, &option)) | 484 | if (match_int(args, &option)) |
392 | return 0; | 485 | return 0; |
393 | uopt->lastblock = option; | 486 | uopt->lastblock = option; |
487 | if (!remount) | ||
488 | uopt->flags |= (1 << UDF_FLAG_LASTBLOCK_SET); | ||
394 | break; | 489 | break; |
395 | case Opt_anchor: | 490 | case Opt_anchor: |
396 | if (match_int(args, &option)) | 491 | if (match_int(args, &option)) |
@@ -447,7 +542,7 @@ static int udf_parse_options(char *options, struct udf_options *uopt) | |||
447 | return 1; | 542 | return 1; |
448 | } | 543 | } |
449 | 544 | ||
450 | void udf_write_super(struct super_block *sb) | 545 | static void udf_write_super(struct super_block *sb) |
451 | { | 546 | { |
452 | lock_kernel(); | 547 | lock_kernel(); |
453 | 548 | ||
@@ -461,22 +556,23 @@ void udf_write_super(struct super_block *sb) | |||
461 | static int udf_remount_fs(struct super_block *sb, int *flags, char *options) | 556 | static int udf_remount_fs(struct super_block *sb, int *flags, char *options) |
462 | { | 557 | { |
463 | struct udf_options uopt; | 558 | struct udf_options uopt; |
559 | struct udf_sb_info *sbi = UDF_SB(sb); | ||
464 | 560 | ||
465 | uopt.flags = UDF_SB(sb)->s_flags; | 561 | uopt.flags = sbi->s_flags; |
466 | uopt.uid = UDF_SB(sb)->s_uid; | 562 | uopt.uid = sbi->s_uid; |
467 | uopt.gid = UDF_SB(sb)->s_gid; | 563 | uopt.gid = sbi->s_gid; |
468 | uopt.umask = UDF_SB(sb)->s_umask; | 564 | uopt.umask = sbi->s_umask; |
469 | 565 | ||
470 | if (!udf_parse_options(options, &uopt)) | 566 | if (!udf_parse_options(options, &uopt, true)) |
471 | return -EINVAL; | 567 | return -EINVAL; |
472 | 568 | ||
473 | UDF_SB(sb)->s_flags = uopt.flags; | 569 | sbi->s_flags = uopt.flags; |
474 | UDF_SB(sb)->s_uid = uopt.uid; | 570 | sbi->s_uid = uopt.uid; |
475 | UDF_SB(sb)->s_gid = uopt.gid; | 571 | sbi->s_gid = uopt.gid; |
476 | UDF_SB(sb)->s_umask = uopt.umask; | 572 | sbi->s_umask = uopt.umask; |
477 | 573 | ||
478 | if (UDF_SB_LVIDBH(sb)) { | 574 | if (sbi->s_lvid_bh) { |
479 | int write_rev = le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFWriteRev); | 575 | int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev); |
480 | if (write_rev > UDF_MAX_WRITE_VERSION) | 576 | if (write_rev > UDF_MAX_WRITE_VERSION) |
481 | *flags |= MS_RDONLY; | 577 | *flags |= MS_RDONLY; |
482 | } | 578 | } |
@@ -538,17 +634,19 @@ static int udf_vrs(struct super_block *sb, int silent) | |||
538 | int iso9660 = 0; | 634 | int iso9660 = 0; |
539 | int nsr02 = 0; | 635 | int nsr02 = 0; |
540 | int nsr03 = 0; | 636 | int nsr03 = 0; |
637 | struct udf_sb_info *sbi; | ||
541 | 638 | ||
542 | /* Block size must be a multiple of 512 */ | 639 | /* Block size must be a multiple of 512 */ |
543 | if (sb->s_blocksize & 511) | 640 | if (sb->s_blocksize & 511) |
544 | return 0; | 641 | return 0; |
642 | sbi = UDF_SB(sb); | ||
545 | 643 | ||
546 | if (sb->s_blocksize < sizeof(struct volStructDesc)) | 644 | if (sb->s_blocksize < sizeof(struct volStructDesc)) |
547 | sectorsize = sizeof(struct volStructDesc); | 645 | sectorsize = sizeof(struct volStructDesc); |
548 | else | 646 | else |
549 | sectorsize = sb->s_blocksize; | 647 | sectorsize = sb->s_blocksize; |
550 | 648 | ||
551 | sector += (UDF_SB_SESSION(sb) << sb->s_blocksize_bits); | 649 | sector += (sbi->s_session << sb->s_blocksize_bits); |
552 | 650 | ||
553 | udf_debug("Starting at sector %u (%ld byte sectors)\n", | 651 | udf_debug("Starting at sector %u (%ld byte sectors)\n", |
554 | (sector >> sb->s_blocksize_bits), sb->s_blocksize); | 652 | (sector >> sb->s_blocksize_bits), sb->s_blocksize); |
@@ -561,47 +659,52 @@ static int udf_vrs(struct super_block *sb, int silent) | |||
561 | 659 | ||
562 | /* Look for ISO descriptors */ | 660 | /* Look for ISO descriptors */ |
563 | vsd = (struct volStructDesc *)(bh->b_data + | 661 | vsd = (struct volStructDesc *)(bh->b_data + |
564 | (sector & (sb->s_blocksize - 1))); | 662 | (sector & (sb->s_blocksize - 1))); |
565 | 663 | ||
566 | if (vsd->stdIdent[0] == 0) { | 664 | if (vsd->stdIdent[0] == 0) { |
567 | brelse(bh); | 665 | brelse(bh); |
568 | break; | 666 | break; |
569 | } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_CD001, VSD_STD_ID_LEN)) { | 667 | } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_CD001, |
668 | VSD_STD_ID_LEN)) { | ||
570 | iso9660 = sector; | 669 | iso9660 = sector; |
571 | switch (vsd->structType) { | 670 | switch (vsd->structType) { |
572 | case 0: | 671 | case 0: |
573 | udf_debug("ISO9660 Boot Record found\n"); | 672 | udf_debug("ISO9660 Boot Record found\n"); |
574 | break; | 673 | break; |
575 | case 1: | 674 | case 1: |
576 | udf_debug | 675 | udf_debug("ISO9660 Primary Volume Descriptor " |
577 | ("ISO9660 Primary Volume Descriptor found\n"); | 676 | "found\n"); |
578 | break; | 677 | break; |
579 | case 2: | 678 | case 2: |
580 | udf_debug | 679 | udf_debug("ISO9660 Supplementary Volume " |
581 | ("ISO9660 Supplementary Volume Descriptor found\n"); | 680 | "Descriptor found\n"); |
582 | break; | 681 | break; |
583 | case 3: | 682 | case 3: |
584 | udf_debug | 683 | udf_debug("ISO9660 Volume Partition Descriptor " |
585 | ("ISO9660 Volume Partition Descriptor found\n"); | 684 | "found\n"); |
586 | break; | 685 | break; |
587 | case 255: | 686 | case 255: |
588 | udf_debug | 687 | udf_debug("ISO9660 Volume Descriptor Set " |
589 | ("ISO9660 Volume Descriptor Set Terminator found\n"); | 688 | "Terminator found\n"); |
590 | break; | 689 | break; |
591 | default: | 690 | default: |
592 | udf_debug("ISO9660 VRS (%u) found\n", | 691 | udf_debug("ISO9660 VRS (%u) found\n", |
593 | vsd->structType); | 692 | vsd->structType); |
594 | break; | 693 | break; |
595 | } | 694 | } |
596 | } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_BEA01, VSD_STD_ID_LEN)) { | 695 | } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_BEA01, |
597 | } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_TEA01, VSD_STD_ID_LEN)) { | 696 | VSD_STD_ID_LEN)) |
697 | ; /* nothing */ | ||
698 | else if (!strncmp(vsd->stdIdent, VSD_STD_ID_TEA01, | ||
699 | VSD_STD_ID_LEN)) { | ||
598 | brelse(bh); | 700 | brelse(bh); |
599 | break; | 701 | break; |
600 | } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR02, VSD_STD_ID_LEN)) { | 702 | } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR02, |
703 | VSD_STD_ID_LEN)) | ||
601 | nsr02 = sector; | 704 | nsr02 = sector; |
602 | } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR03, VSD_STD_ID_LEN)) { | 705 | else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR03, |
706 | VSD_STD_ID_LEN)) | ||
603 | nsr03 = sector; | 707 | nsr03 = sector; |
604 | } | ||
605 | brelse(bh); | 708 | brelse(bh); |
606 | } | 709 | } |
607 | 710 | ||
@@ -609,7 +712,7 @@ static int udf_vrs(struct super_block *sb, int silent) | |||
609 | return nsr03; | 712 | return nsr03; |
610 | else if (nsr02) | 713 | else if (nsr02) |
611 | return nsr02; | 714 | return nsr02; |
612 | else if (sector - (UDF_SB_SESSION(sb) << sb->s_blocksize_bits) == 32768) | 715 | else if (sector - (sbi->s_session << sb->s_blocksize_bits) == 32768) |
613 | return -1; | 716 | return -1; |
614 | else | 717 | else |
615 | return 0; | 718 | return 0; |
@@ -634,11 +737,15 @@ static int udf_vrs(struct super_block *sb, int silent) | |||
634 | */ | 737 | */ |
635 | static void udf_find_anchor(struct super_block *sb) | 738 | static void udf_find_anchor(struct super_block *sb) |
636 | { | 739 | { |
637 | int lastblock = UDF_SB_LASTBLOCK(sb); | 740 | int lastblock; |
638 | struct buffer_head *bh = NULL; | 741 | struct buffer_head *bh = NULL; |
639 | uint16_t ident; | 742 | uint16_t ident; |
640 | uint32_t location; | 743 | uint32_t location; |
641 | int i; | 744 | int i; |
745 | struct udf_sb_info *sbi; | ||
746 | |||
747 | sbi = UDF_SB(sb); | ||
748 | lastblock = sbi->s_last_block; | ||
642 | 749 | ||
643 | if (lastblock) { | 750 | if (lastblock) { |
644 | int varlastblock = udf_variable_to_fixed(lastblock); | 751 | int varlastblock = udf_variable_to_fixed(lastblock); |
@@ -658,57 +765,83 @@ static void udf_find_anchor(struct super_block *sb) | |||
658 | * however, if the disc isn't closed, it could be 512 */ | 765 | * however, if the disc isn't closed, it could be 512 */ |
659 | 766 | ||
660 | for (i = 0; !lastblock && i < ARRAY_SIZE(last); i++) { | 767 | for (i = 0; !lastblock && i < ARRAY_SIZE(last); i++) { |
661 | if (last[i] < 0 || !(bh = sb_bread(sb, last[i]))) { | 768 | ident = location = 0; |
662 | ident = location = 0; | 769 | if (last[i] >= 0) { |
663 | } else { | 770 | bh = sb_bread(sb, last[i]); |
664 | ident = le16_to_cpu(((tag *)bh->b_data)->tagIdent); | 771 | if (bh) { |
665 | location = le32_to_cpu(((tag *)bh->b_data)->tagLocation); | 772 | tag *t = (tag *)bh->b_data; |
666 | brelse(bh); | 773 | ident = le16_to_cpu(t->tagIdent); |
774 | location = le32_to_cpu(t->tagLocation); | ||
775 | brelse(bh); | ||
776 | } | ||
667 | } | 777 | } |
668 | 778 | ||
669 | if (ident == TAG_IDENT_AVDP) { | 779 | if (ident == TAG_IDENT_AVDP) { |
670 | if (location == last[i] - UDF_SB_SESSION(sb)) { | 780 | if (location == last[i] - sbi->s_session) { |
671 | lastblock = UDF_SB_ANCHOR(sb)[0] = last[i] - UDF_SB_SESSION(sb); | 781 | lastblock = last[i] - sbi->s_session; |
672 | UDF_SB_ANCHOR(sb)[1] = last[i] - 256 - UDF_SB_SESSION(sb); | 782 | sbi->s_anchor[0] = lastblock; |
673 | } else if (location == udf_variable_to_fixed(last[i]) - UDF_SB_SESSION(sb)) { | 783 | sbi->s_anchor[1] = lastblock - 256; |
784 | } else if (location == | ||
785 | udf_variable_to_fixed(last[i]) - | ||
786 | sbi->s_session) { | ||
674 | UDF_SET_FLAG(sb, UDF_FLAG_VARCONV); | 787 | UDF_SET_FLAG(sb, UDF_FLAG_VARCONV); |
675 | lastblock = UDF_SB_ANCHOR(sb)[0] = udf_variable_to_fixed(last[i]) - UDF_SB_SESSION(sb); | 788 | lastblock = |
676 | UDF_SB_ANCHOR(sb)[1] = lastblock - 256 - UDF_SB_SESSION(sb); | 789 | udf_variable_to_fixed(last[i]) - |
790 | sbi->s_session; | ||
791 | sbi->s_anchor[0] = lastblock; | ||
792 | sbi->s_anchor[1] = lastblock - 256 - | ||
793 | sbi->s_session; | ||
677 | } else { | 794 | } else { |
678 | udf_debug("Anchor found at block %d, location mismatch %d.\n", | 795 | udf_debug("Anchor found at block %d, " |
796 | "location mismatch %d.\n", | ||
679 | last[i], location); | 797 | last[i], location); |
680 | } | 798 | } |
681 | } else if (ident == TAG_IDENT_FE || ident == TAG_IDENT_EFE) { | 799 | } else if (ident == TAG_IDENT_FE || |
800 | ident == TAG_IDENT_EFE) { | ||
682 | lastblock = last[i]; | 801 | lastblock = last[i]; |
683 | UDF_SB_ANCHOR(sb)[3] = 512; | 802 | sbi->s_anchor[3] = 512; |
684 | } else { | 803 | } else { |
685 | if (last[i] < 256 || !(bh = sb_bread(sb, last[i] - 256))) { | 804 | ident = location = 0; |
686 | ident = location = 0; | 805 | if (last[i] >= 256) { |
687 | } else { | 806 | bh = sb_bread(sb, last[i] - 256); |
688 | ident = le16_to_cpu(((tag *)bh->b_data)->tagIdent); | 807 | if (bh) { |
689 | location = le32_to_cpu(((tag *)bh->b_data)->tagLocation); | 808 | tag *t = (tag *)bh->b_data; |
690 | brelse(bh); | 809 | ident = le16_to_cpu( |
810 | t->tagIdent); | ||
811 | location = le32_to_cpu( | ||
812 | t->tagLocation); | ||
813 | brelse(bh); | ||
814 | } | ||
691 | } | 815 | } |
692 | 816 | ||
693 | if (ident == TAG_IDENT_AVDP && | 817 | if (ident == TAG_IDENT_AVDP && |
694 | location == last[i] - 256 - UDF_SB_SESSION(sb)) { | 818 | location == last[i] - 256 - |
819 | sbi->s_session) { | ||
695 | lastblock = last[i]; | 820 | lastblock = last[i]; |
696 | UDF_SB_ANCHOR(sb)[1] = last[i] - 256; | 821 | sbi->s_anchor[1] = last[i] - 256; |
697 | } else { | 822 | } else { |
698 | if (last[i] < 312 + UDF_SB_SESSION(sb) || | 823 | ident = location = 0; |
699 | !(bh = sb_bread(sb, last[i] - 312 - UDF_SB_SESSION(sb)))) { | 824 | if (last[i] >= 312 + sbi->s_session) { |
700 | ident = location = 0; | 825 | bh = sb_bread(sb, |
701 | } else { | 826 | last[i] - 312 - |
702 | ident = le16_to_cpu(((tag *)bh->b_data)->tagIdent); | 827 | sbi->s_session); |
703 | location = le32_to_cpu(((tag *)bh->b_data)->tagLocation); | 828 | if (bh) { |
704 | brelse(bh); | 829 | tag *t = (tag *) |
830 | bh->b_data; | ||
831 | ident = le16_to_cpu( | ||
832 | t->tagIdent); | ||
833 | location = le32_to_cpu( | ||
834 | t->tagLocation); | ||
835 | brelse(bh); | ||
836 | } | ||
705 | } | 837 | } |
706 | 838 | ||
707 | if (ident == TAG_IDENT_AVDP && | 839 | if (ident == TAG_IDENT_AVDP && |
708 | location == udf_variable_to_fixed(last[i]) - 256) { | 840 | location == udf_variable_to_fixed(last[i]) - 256) { |
709 | UDF_SET_FLAG(sb, UDF_FLAG_VARCONV); | 841 | UDF_SET_FLAG(sb, |
842 | UDF_FLAG_VARCONV); | ||
710 | lastblock = udf_variable_to_fixed(last[i]); | 843 | lastblock = udf_variable_to_fixed(last[i]); |
711 | UDF_SB_ANCHOR(sb)[1] = lastblock - 256; | 844 | sbi->s_anchor[1] = lastblock - 256; |
712 | } | 845 | } |
713 | } | 846 | } |
714 | } | 847 | } |
@@ -716,10 +849,12 @@ static void udf_find_anchor(struct super_block *sb) | |||
716 | } | 849 | } |
717 | 850 | ||
718 | if (!lastblock) { | 851 | if (!lastblock) { |
719 | /* We havn't found the lastblock. check 312 */ | 852 | /* We haven't found the lastblock. check 312 */ |
720 | if ((bh = sb_bread(sb, 312 + UDF_SB_SESSION(sb)))) { | 853 | bh = sb_bread(sb, 312 + sbi->s_session); |
721 | ident = le16_to_cpu(((tag *)bh->b_data)->tagIdent); | 854 | if (bh) { |
722 | location = le32_to_cpu(((tag *)bh->b_data)->tagLocation); | 855 | tag *t = (tag *)bh->b_data; |
856 | ident = le16_to_cpu(t->tagIdent); | ||
857 | location = le32_to_cpu(t->tagLocation); | ||
723 | brelse(bh); | 858 | brelse(bh); |
724 | 859 | ||
725 | if (ident == TAG_IDENT_AVDP && location == 256) | 860 | if (ident == TAG_IDENT_AVDP && location == 256) |
@@ -727,29 +862,33 @@ static void udf_find_anchor(struct super_block *sb) | |||
727 | } | 862 | } |
728 | } | 863 | } |
729 | 864 | ||
730 | for (i = 0; i < ARRAY_SIZE(UDF_SB_ANCHOR(sb)); i++) { | 865 | for (i = 0; i < ARRAY_SIZE(sbi->s_anchor); i++) { |
731 | if (UDF_SB_ANCHOR(sb)[i]) { | 866 | if (sbi->s_anchor[i]) { |
732 | if (!(bh = udf_read_tagged(sb, UDF_SB_ANCHOR(sb)[i], | 867 | bh = udf_read_tagged(sb, sbi->s_anchor[i], |
733 | UDF_SB_ANCHOR(sb)[i], &ident))) { | 868 | sbi->s_anchor[i], &ident); |
734 | UDF_SB_ANCHOR(sb)[i] = 0; | 869 | if (!bh) |
735 | } else { | 870 | sbi->s_anchor[i] = 0; |
871 | else { | ||
736 | brelse(bh); | 872 | brelse(bh); |
737 | if ((ident != TAG_IDENT_AVDP) && | 873 | if ((ident != TAG_IDENT_AVDP) && |
738 | (i || (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE))) { | 874 | (i || (ident != TAG_IDENT_FE && |
739 | UDF_SB_ANCHOR(sb)[i] = 0; | 875 | ident != TAG_IDENT_EFE))) |
740 | } | 876 | sbi->s_anchor[i] = 0; |
741 | } | 877 | } |
742 | } | 878 | } |
743 | } | 879 | } |
744 | 880 | ||
745 | UDF_SB_LASTBLOCK(sb) = lastblock; | 881 | sbi->s_last_block = lastblock; |
746 | } | 882 | } |
747 | 883 | ||
748 | static int udf_find_fileset(struct super_block *sb, kernel_lb_addr *fileset, kernel_lb_addr *root) | 884 | static int udf_find_fileset(struct super_block *sb, |
885 | kernel_lb_addr *fileset, | ||
886 | kernel_lb_addr *root) | ||
749 | { | 887 | { |
750 | struct buffer_head *bh = NULL; | 888 | struct buffer_head *bh = NULL; |
751 | long lastblock; | 889 | long lastblock; |
752 | uint16_t ident; | 890 | uint16_t ident; |
891 | struct udf_sb_info *sbi; | ||
753 | 892 | ||
754 | if (fileset->logicalBlockNum != 0xFFFFFFFF || | 893 | if (fileset->logicalBlockNum != 0xFFFFFFFF || |
755 | fileset->partitionReferenceNum != 0xFFFF) { | 894 | fileset->partitionReferenceNum != 0xFFFF) { |
@@ -764,22 +903,27 @@ static int udf_find_fileset(struct super_block *sb, kernel_lb_addr *fileset, ker | |||
764 | 903 | ||
765 | } | 904 | } |
766 | 905 | ||
767 | if (!bh) { /* Search backwards through the partitions */ | 906 | sbi = UDF_SB(sb); |
907 | if (!bh) { | ||
908 | /* Search backwards through the partitions */ | ||
768 | kernel_lb_addr newfileset; | 909 | kernel_lb_addr newfileset; |
769 | 910 | ||
770 | /* --> cvg: FIXME - is it reasonable? */ | 911 | /* --> cvg: FIXME - is it reasonable? */ |
771 | return 1; | 912 | return 1; |
772 | 913 | ||
773 | for (newfileset.partitionReferenceNum = UDF_SB_NUMPARTS(sb) - 1; | 914 | for (newfileset.partitionReferenceNum = sbi->s_partitions - 1; |
774 | (newfileset.partitionReferenceNum != 0xFFFF && | 915 | (newfileset.partitionReferenceNum != 0xFFFF && |
775 | fileset->logicalBlockNum == 0xFFFFFFFF && | 916 | fileset->logicalBlockNum == 0xFFFFFFFF && |
776 | fileset->partitionReferenceNum == 0xFFFF); | 917 | fileset->partitionReferenceNum == 0xFFFF); |
777 | newfileset.partitionReferenceNum--) { | 918 | newfileset.partitionReferenceNum--) { |
778 | lastblock = UDF_SB_PARTLEN(sb, newfileset.partitionReferenceNum); | 919 | lastblock = sbi->s_partmaps |
920 | [newfileset.partitionReferenceNum] | ||
921 | .s_partition_len; | ||
779 | newfileset.logicalBlockNum = 0; | 922 | newfileset.logicalBlockNum = 0; |
780 | 923 | ||
781 | do { | 924 | do { |
782 | bh = udf_read_ptagged(sb, newfileset, 0, &ident); | 925 | bh = udf_read_ptagged(sb, newfileset, 0, |
926 | &ident); | ||
783 | if (!bh) { | 927 | if (!bh) { |
784 | newfileset.logicalBlockNum++; | 928 | newfileset.logicalBlockNum++; |
785 | continue; | 929 | continue; |
@@ -789,11 +933,12 @@ static int udf_find_fileset(struct super_block *sb, kernel_lb_addr *fileset, ker | |||
789 | case TAG_IDENT_SBD: | 933 | case TAG_IDENT_SBD: |
790 | { | 934 | { |
791 | struct spaceBitmapDesc *sp; | 935 | struct spaceBitmapDesc *sp; |
792 | sp = (struct spaceBitmapDesc *)bh->b_data; | 936 | sp = (struct spaceBitmapDesc *) |
937 | bh->b_data; | ||
793 | newfileset.logicalBlockNum += 1 + | 938 | newfileset.logicalBlockNum += 1 + |
794 | ((le32_to_cpu(sp->numOfBytes) + | 939 | ((le32_to_cpu(sp->numOfBytes) + |
795 | sizeof(struct spaceBitmapDesc) - 1) | 940 | sizeof(struct spaceBitmapDesc) |
796 | >> sb->s_blocksize_bits); | 941 | - 1) >> sb->s_blocksize_bits); |
797 | brelse(bh); | 942 | brelse(bh); |
798 | break; | 943 | break; |
799 | } | 944 | } |
@@ -818,7 +963,7 @@ static int udf_find_fileset(struct super_block *sb, kernel_lb_addr *fileset, ker | |||
818 | fileset->logicalBlockNum, | 963 | fileset->logicalBlockNum, |
819 | fileset->partitionReferenceNum); | 964 | fileset->partitionReferenceNum); |
820 | 965 | ||
821 | UDF_SB_PARTITION(sb) = fileset->partitionReferenceNum; | 966 | sbi->s_partition = fileset->partitionReferenceNum; |
822 | udf_load_fileset(sb, bh, root); | 967 | udf_load_fileset(sb, bh, root); |
823 | brelse(bh); | 968 | brelse(bh); |
824 | return 0; | 969 | return 0; |
@@ -840,26 +985,26 @@ static void udf_load_pvoldesc(struct super_block *sb, struct buffer_head *bh) | |||
840 | lets_to_cpu(pvoldesc->recordingDateAndTime))) { | 985 | lets_to_cpu(pvoldesc->recordingDateAndTime))) { |
841 | kernel_timestamp ts; | 986 | kernel_timestamp ts; |
842 | ts = lets_to_cpu(pvoldesc->recordingDateAndTime); | 987 | ts = lets_to_cpu(pvoldesc->recordingDateAndTime); |
843 | udf_debug("recording time %ld/%ld, %04u/%02u/%02u %02u:%02u (%x)\n", | 988 | udf_debug("recording time %ld/%ld, %04u/%02u/%02u" |
989 | " %02u:%02u (%x)\n", | ||
844 | recording, recording_usec, | 990 | recording, recording_usec, |
845 | ts.year, ts.month, ts.day, ts.hour, | 991 | ts.year, ts.month, ts.day, ts.hour, |
846 | ts.minute, ts.typeAndTimezone); | 992 | ts.minute, ts.typeAndTimezone); |
847 | UDF_SB_RECORDTIME(sb).tv_sec = recording; | 993 | UDF_SB(sb)->s_record_time.tv_sec = recording; |
848 | UDF_SB_RECORDTIME(sb).tv_nsec = recording_usec * 1000; | 994 | UDF_SB(sb)->s_record_time.tv_nsec = recording_usec * 1000; |
849 | } | 995 | } |
850 | 996 | ||
851 | if (!udf_build_ustr(&instr, pvoldesc->volIdent, 32)) { | 997 | if (!udf_build_ustr(&instr, pvoldesc->volIdent, 32)) |
852 | if (udf_CS0toUTF8(&outstr, &instr)) { | 998 | if (udf_CS0toUTF8(&outstr, &instr)) { |
853 | strncpy(UDF_SB_VOLIDENT(sb), outstr.u_name, | 999 | strncpy(UDF_SB(sb)->s_volume_ident, outstr.u_name, |
854 | outstr.u_len > 31 ? 31 : outstr.u_len); | 1000 | outstr.u_len > 31 ? 31 : outstr.u_len); |
855 | udf_debug("volIdent[] = '%s'\n", UDF_SB_VOLIDENT(sb)); | 1001 | udf_debug("volIdent[] = '%s'\n", |
1002 | UDF_SB(sb)->s_volume_ident); | ||
856 | } | 1003 | } |
857 | } | ||
858 | 1004 | ||
859 | if (!udf_build_ustr(&instr, pvoldesc->volSetIdent, 128)) { | 1005 | if (!udf_build_ustr(&instr, pvoldesc->volSetIdent, 128)) |
860 | if (udf_CS0toUTF8(&outstr, &instr)) | 1006 | if (udf_CS0toUTF8(&outstr, &instr)) |
861 | udf_debug("volSetIdent[] = '%s'\n", outstr.u_name); | 1007 | udf_debug("volSetIdent[] = '%s'\n", outstr.u_name); |
862 | } | ||
863 | } | 1008 | } |
864 | 1009 | ||
865 | static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh, | 1010 | static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh, |
@@ -871,65 +1016,124 @@ static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh, | |||
871 | 1016 | ||
872 | *root = lelb_to_cpu(fset->rootDirectoryICB.extLocation); | 1017 | *root = lelb_to_cpu(fset->rootDirectoryICB.extLocation); |
873 | 1018 | ||
874 | UDF_SB_SERIALNUM(sb) = le16_to_cpu(fset->descTag.tagSerialNum); | 1019 | UDF_SB(sb)->s_serial_number = le16_to_cpu(fset->descTag.tagSerialNum); |
875 | 1020 | ||
876 | udf_debug("Rootdir at block=%d, partition=%d\n", | 1021 | udf_debug("Rootdir at block=%d, partition=%d\n", |
877 | root->logicalBlockNum, root->partitionReferenceNum); | 1022 | root->logicalBlockNum, root->partitionReferenceNum); |
878 | } | 1023 | } |
879 | 1024 | ||
1025 | int udf_compute_nr_groups(struct super_block *sb, u32 partition) | ||
1026 | { | ||
1027 | struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; | ||
1028 | return (map->s_partition_len + | ||
1029 | (sizeof(struct spaceBitmapDesc) << 3) + | ||
1030 | (sb->s_blocksize * 8) - 1) / | ||
1031 | (sb->s_blocksize * 8); | ||
1032 | } | ||
1033 | |||
1034 | static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index) | ||
1035 | { | ||
1036 | struct udf_bitmap *bitmap; | ||
1037 | int nr_groups; | ||
1038 | int size; | ||
1039 | |||
1040 | nr_groups = udf_compute_nr_groups(sb, index); | ||
1041 | size = sizeof(struct udf_bitmap) + | ||
1042 | (sizeof(struct buffer_head *) * nr_groups); | ||
1043 | |||
1044 | if (size <= PAGE_SIZE) | ||
1045 | bitmap = kmalloc(size, GFP_KERNEL); | ||
1046 | else | ||
1047 | bitmap = vmalloc(size); /* TODO: get rid of vmalloc */ | ||
1048 | |||
1049 | if (bitmap == NULL) { | ||
1050 | udf_error(sb, __FUNCTION__, | ||
1051 | "Unable to allocate space for bitmap " | ||
1052 | "and %d buffer_head pointers", nr_groups); | ||
1053 | return NULL; | ||
1054 | } | ||
1055 | |||
1056 | memset(bitmap, 0x00, size); | ||
1057 | bitmap->s_block_bitmap = (struct buffer_head **)(bitmap + 1); | ||
1058 | bitmap->s_nr_groups = nr_groups; | ||
1059 | return bitmap; | ||
1060 | } | ||
1061 | |||
880 | static int udf_load_partdesc(struct super_block *sb, struct buffer_head *bh) | 1062 | static int udf_load_partdesc(struct super_block *sb, struct buffer_head *bh) |
881 | { | 1063 | { |
882 | struct partitionDesc *p; | 1064 | struct partitionDesc *p; |
883 | int i; | 1065 | int i; |
1066 | struct udf_part_map *map; | ||
1067 | struct udf_sb_info *sbi; | ||
884 | 1068 | ||
885 | p = (struct partitionDesc *)bh->b_data; | 1069 | p = (struct partitionDesc *)bh->b_data; |
1070 | sbi = UDF_SB(sb); | ||
886 | 1071 | ||
887 | for (i = 0; i < UDF_SB_NUMPARTS(sb); i++) { | 1072 | for (i = 0; i < sbi->s_partitions; i++) { |
1073 | map = &sbi->s_partmaps[i]; | ||
888 | udf_debug("Searching map: (%d == %d)\n", | 1074 | udf_debug("Searching map: (%d == %d)\n", |
889 | UDF_SB_PARTMAPS(sb)[i].s_partition_num, le16_to_cpu(p->partitionNumber)); | 1075 | map->s_partition_num, |
890 | if (UDF_SB_PARTMAPS(sb)[i].s_partition_num == le16_to_cpu(p->partitionNumber)) { | 1076 | le16_to_cpu(p->partitionNumber)); |
891 | UDF_SB_PARTLEN(sb,i) = le32_to_cpu(p->partitionLength); /* blocks */ | 1077 | if (map->s_partition_num == |
892 | UDF_SB_PARTROOT(sb,i) = le32_to_cpu(p->partitionStartingLocation); | 1078 | le16_to_cpu(p->partitionNumber)) { |
893 | if (le32_to_cpu(p->accessType) == PD_ACCESS_TYPE_READ_ONLY) | 1079 | map->s_partition_len = |
894 | UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_READ_ONLY; | 1080 | le32_to_cpu(p->partitionLength); /* blocks */ |
895 | if (le32_to_cpu(p->accessType) == PD_ACCESS_TYPE_WRITE_ONCE) | 1081 | map->s_partition_root = |
896 | UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_WRITE_ONCE; | 1082 | le32_to_cpu(p->partitionStartingLocation); |
897 | if (le32_to_cpu(p->accessType) == PD_ACCESS_TYPE_REWRITABLE) | 1083 | if (p->accessType == |
898 | UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_REWRITABLE; | 1084 | cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY)) |
899 | if (le32_to_cpu(p->accessType) == PD_ACCESS_TYPE_OVERWRITABLE) | 1085 | map->s_partition_flags |= |
900 | UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_OVERWRITABLE; | 1086 | UDF_PART_FLAG_READ_ONLY; |
901 | 1087 | if (p->accessType == | |
902 | if (!strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR02) || | 1088 | cpu_to_le32(PD_ACCESS_TYPE_WRITE_ONCE)) |
903 | !strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR03)) { | 1089 | map->s_partition_flags |= |
1090 | UDF_PART_FLAG_WRITE_ONCE; | ||
1091 | if (p->accessType == | ||
1092 | cpu_to_le32(PD_ACCESS_TYPE_REWRITABLE)) | ||
1093 | map->s_partition_flags |= | ||
1094 | UDF_PART_FLAG_REWRITABLE; | ||
1095 | if (p->accessType == | ||
1096 | cpu_to_le32(PD_ACCESS_TYPE_OVERWRITABLE)) | ||
1097 | map->s_partition_flags |= | ||
1098 | UDF_PART_FLAG_OVERWRITABLE; | ||
1099 | |||
1100 | if (!strcmp(p->partitionContents.ident, | ||
1101 | PD_PARTITION_CONTENTS_NSR02) || | ||
1102 | !strcmp(p->partitionContents.ident, | ||
1103 | PD_PARTITION_CONTENTS_NSR03)) { | ||
904 | struct partitionHeaderDesc *phd; | 1104 | struct partitionHeaderDesc *phd; |
905 | 1105 | ||
906 | phd = (struct partitionHeaderDesc *)(p->partitionContentsUse); | 1106 | phd = (struct partitionHeaderDesc *) |
1107 | (p->partitionContentsUse); | ||
907 | if (phd->unallocSpaceTable.extLength) { | 1108 | if (phd->unallocSpaceTable.extLength) { |
908 | kernel_lb_addr loc = { | 1109 | kernel_lb_addr loc = { |
909 | .logicalBlockNum = le32_to_cpu(phd->unallocSpaceTable.extPosition), | 1110 | .logicalBlockNum = le32_to_cpu(phd->unallocSpaceTable.extPosition), |
910 | .partitionReferenceNum = i, | 1111 | .partitionReferenceNum = i, |
911 | }; | 1112 | }; |
912 | 1113 | ||
913 | UDF_SB_PARTMAPS(sb)[i].s_uspace.s_table = | 1114 | map->s_uspace.s_table = |
914 | udf_iget(sb, loc); | 1115 | udf_iget(sb, loc); |
915 | if (!UDF_SB_PARTMAPS(sb)[i].s_uspace.s_table) { | 1116 | if (!map->s_uspace.s_table) { |
916 | udf_debug("cannot load unallocSpaceTable (part %d)\n", i); | 1117 | udf_debug("cannot load unallocSpaceTable (part %d)\n", i); |
917 | return 1; | 1118 | return 1; |
918 | } | 1119 | } |
919 | UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_UNALLOC_TABLE; | 1120 | map->s_partition_flags |= |
1121 | UDF_PART_FLAG_UNALLOC_TABLE; | ||
920 | udf_debug("unallocSpaceTable (part %d) @ %ld\n", | 1122 | udf_debug("unallocSpaceTable (part %d) @ %ld\n", |
921 | i, UDF_SB_PARTMAPS(sb)[i].s_uspace.s_table->i_ino); | 1123 | i, map->s_uspace.s_table->i_ino); |
922 | } | 1124 | } |
923 | if (phd->unallocSpaceBitmap.extLength) { | 1125 | if (phd->unallocSpaceBitmap.extLength) { |
924 | UDF_SB_ALLOC_BITMAP(sb, i, s_uspace); | 1126 | struct udf_bitmap *bitmap = |
925 | if (UDF_SB_PARTMAPS(sb)[i].s_uspace.s_bitmap != NULL) { | 1127 | udf_sb_alloc_bitmap(sb, i); |
926 | UDF_SB_PARTMAPS(sb)[i].s_uspace.s_bitmap->s_extLength = | 1128 | map->s_uspace.s_bitmap = bitmap; |
1129 | if (bitmap != NULL) { | ||
1130 | bitmap->s_extLength = | ||
927 | le32_to_cpu(phd->unallocSpaceBitmap.extLength); | 1131 | le32_to_cpu(phd->unallocSpaceBitmap.extLength); |
928 | UDF_SB_PARTMAPS(sb)[i].s_uspace.s_bitmap->s_extPosition = | 1132 | bitmap->s_extPosition = |
929 | le32_to_cpu(phd->unallocSpaceBitmap.extPosition); | 1133 | le32_to_cpu(phd->unallocSpaceBitmap.extPosition); |
930 | UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_UNALLOC_BITMAP; | 1134 | map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP; |
931 | udf_debug("unallocSpaceBitmap (part %d) @ %d\n", | 1135 | udf_debug("unallocSpaceBitmap (part %d) @ %d\n", |
932 | i, UDF_SB_PARTMAPS(sb)[i].s_uspace.s_bitmap->s_extPosition); | 1136 | i, bitmap->s_extPosition); |
933 | } | 1137 | } |
934 | } | 1138 | } |
935 | if (phd->partitionIntegrityTable.extLength) | 1139 | if (phd->partitionIntegrityTable.extLength) |
@@ -940,40 +1144,45 @@ static int udf_load_partdesc(struct super_block *sb, struct buffer_head *bh) | |||
940 | .partitionReferenceNum = i, | 1144 | .partitionReferenceNum = i, |
941 | }; | 1145 | }; |
942 | 1146 | ||
943 | UDF_SB_PARTMAPS(sb)[i].s_fspace.s_table = | 1147 | map->s_fspace.s_table = |
944 | udf_iget(sb, loc); | 1148 | udf_iget(sb, loc); |
945 | if (!UDF_SB_PARTMAPS(sb)[i].s_fspace.s_table) { | 1149 | if (!map->s_fspace.s_table) { |
946 | udf_debug("cannot load freedSpaceTable (part %d)\n", i); | 1150 | udf_debug("cannot load freedSpaceTable (part %d)\n", i); |
947 | return 1; | 1151 | return 1; |
948 | } | 1152 | } |
949 | UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_FREED_TABLE; | 1153 | map->s_partition_flags |= |
1154 | UDF_PART_FLAG_FREED_TABLE; | ||
950 | udf_debug("freedSpaceTable (part %d) @ %ld\n", | 1155 | udf_debug("freedSpaceTable (part %d) @ %ld\n", |
951 | i, UDF_SB_PARTMAPS(sb)[i].s_fspace.s_table->i_ino); | 1156 | i, map->s_fspace.s_table->i_ino); |
952 | } | 1157 | } |
953 | if (phd->freedSpaceBitmap.extLength) { | 1158 | if (phd->freedSpaceBitmap.extLength) { |
954 | UDF_SB_ALLOC_BITMAP(sb, i, s_fspace); | 1159 | struct udf_bitmap *bitmap = |
955 | if (UDF_SB_PARTMAPS(sb)[i].s_fspace.s_bitmap != NULL) { | 1160 | udf_sb_alloc_bitmap(sb, i); |
956 | UDF_SB_PARTMAPS(sb)[i].s_fspace.s_bitmap->s_extLength = | 1161 | map->s_fspace.s_bitmap = bitmap; |
1162 | if (bitmap != NULL) { | ||
1163 | bitmap->s_extLength = | ||
957 | le32_to_cpu(phd->freedSpaceBitmap.extLength); | 1164 | le32_to_cpu(phd->freedSpaceBitmap.extLength); |
958 | UDF_SB_PARTMAPS(sb)[i].s_fspace.s_bitmap->s_extPosition = | 1165 | bitmap->s_extPosition = |
959 | le32_to_cpu(phd->freedSpaceBitmap.extPosition); | 1166 | le32_to_cpu(phd->freedSpaceBitmap.extPosition); |
960 | UDF_SB_PARTFLAGS(sb,i) |= UDF_PART_FLAG_FREED_BITMAP; | 1167 | map->s_partition_flags |= UDF_PART_FLAG_FREED_BITMAP; |
961 | udf_debug("freedSpaceBitmap (part %d) @ %d\n", | 1168 | udf_debug("freedSpaceBitmap (part %d) @ %d\n", |
962 | i, UDF_SB_PARTMAPS(sb)[i].s_fspace.s_bitmap->s_extPosition); | 1169 | i, bitmap->s_extPosition); |
963 | } | 1170 | } |
964 | } | 1171 | } |
965 | } | 1172 | } |
966 | break; | 1173 | break; |
967 | } | 1174 | } |
968 | } | 1175 | } |
969 | if (i == UDF_SB_NUMPARTS(sb)) { | 1176 | if (i == sbi->s_partitions) |
970 | udf_debug("Partition (%d) not found in partition map\n", | 1177 | udf_debug("Partition (%d) not found in partition map\n", |
971 | le16_to_cpu(p->partitionNumber)); | 1178 | le16_to_cpu(p->partitionNumber)); |
972 | } else { | 1179 | else |
973 | udf_debug("Partition (%d:%d type %x) starts at physical %d, block length %d\n", | 1180 | udf_debug("Partition (%d:%d type %x) starts at physical %d, " |
974 | le16_to_cpu(p->partitionNumber), i, UDF_SB_PARTTYPE(sb,i), | 1181 | "block length %d\n", |
975 | UDF_SB_PARTROOT(sb,i), UDF_SB_PARTLEN(sb,i)); | 1182 | le16_to_cpu(p->partitionNumber), i, |
976 | } | 1183 | map->s_partition_type, |
1184 | map->s_partition_root, | ||
1185 | map->s_partition_len); | ||
977 | return 0; | 1186 | return 0; |
978 | } | 1187 | } |
979 | 1188 | ||
@@ -983,70 +1192,105 @@ static int udf_load_logicalvol(struct super_block *sb, struct buffer_head *bh, | |||
983 | struct logicalVolDesc *lvd; | 1192 | struct logicalVolDesc *lvd; |
984 | int i, j, offset; | 1193 | int i, j, offset; |
985 | uint8_t type; | 1194 | uint8_t type; |
1195 | struct udf_sb_info *sbi = UDF_SB(sb); | ||
1196 | struct genericPartitionMap *gpm; | ||
986 | 1197 | ||
987 | lvd = (struct logicalVolDesc *)bh->b_data; | 1198 | lvd = (struct logicalVolDesc *)bh->b_data; |
988 | 1199 | ||
989 | UDF_SB_ALLOC_PARTMAPS(sb, le32_to_cpu(lvd->numPartitionMaps)); | 1200 | i = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps)); |
1201 | if (i != 0) | ||
1202 | return i; | ||
990 | 1203 | ||
991 | for (i = 0, offset = 0; | 1204 | for (i = 0, offset = 0; |
992 | i < UDF_SB_NUMPARTS(sb) && offset < le32_to_cpu(lvd->mapTableLength); | 1205 | i < sbi->s_partitions && offset < le32_to_cpu(lvd->mapTableLength); |
993 | i++, offset += ((struct genericPartitionMap *)&(lvd->partitionMaps[offset]))->partitionMapLength) { | 1206 | i++, offset += gpm->partitionMapLength) { |
994 | type = ((struct genericPartitionMap *)&(lvd->partitionMaps[offset]))->partitionMapType; | 1207 | struct udf_part_map *map = &sbi->s_partmaps[i]; |
1208 | gpm = (struct genericPartitionMap *) | ||
1209 | &(lvd->partitionMaps[offset]); | ||
1210 | type = gpm->partitionMapType; | ||
995 | if (type == 1) { | 1211 | if (type == 1) { |
996 | struct genericPartitionMap1 *gpm1 = (struct genericPartitionMap1 *)&(lvd->partitionMaps[offset]); | 1212 | struct genericPartitionMap1 *gpm1 = |
997 | UDF_SB_PARTTYPE(sb,i) = UDF_TYPE1_MAP15; | 1213 | (struct genericPartitionMap1 *)gpm; |
998 | UDF_SB_PARTVSN(sb,i) = le16_to_cpu(gpm1->volSeqNum); | 1214 | map->s_partition_type = UDF_TYPE1_MAP15; |
999 | UDF_SB_PARTNUM(sb,i) = le16_to_cpu(gpm1->partitionNum); | 1215 | map->s_volumeseqnum = le16_to_cpu(gpm1->volSeqNum); |
1000 | UDF_SB_PARTFUNC(sb,i) = NULL; | 1216 | map->s_partition_num = le16_to_cpu(gpm1->partitionNum); |
1217 | map->s_partition_func = NULL; | ||
1001 | } else if (type == 2) { | 1218 | } else if (type == 2) { |
1002 | struct udfPartitionMap2 *upm2 = (struct udfPartitionMap2 *)&(lvd->partitionMaps[offset]); | 1219 | struct udfPartitionMap2 *upm2 = |
1003 | if (!strncmp(upm2->partIdent.ident, UDF_ID_VIRTUAL, strlen(UDF_ID_VIRTUAL))) { | 1220 | (struct udfPartitionMap2 *)gpm; |
1004 | if (le16_to_cpu(((__le16 *)upm2->partIdent.identSuffix)[0]) == 0x0150) { | 1221 | if (!strncmp(upm2->partIdent.ident, UDF_ID_VIRTUAL, |
1005 | UDF_SB_PARTTYPE(sb,i) = UDF_VIRTUAL_MAP15; | 1222 | strlen(UDF_ID_VIRTUAL))) { |
1006 | UDF_SB_PARTFUNC(sb,i) = udf_get_pblock_virt15; | 1223 | u16 suf = |
1007 | } else if (le16_to_cpu(((__le16 *)upm2->partIdent.identSuffix)[0]) == 0x0200) { | 1224 | le16_to_cpu(((__le16 *)upm2->partIdent. |
1008 | UDF_SB_PARTTYPE(sb,i) = UDF_VIRTUAL_MAP20; | 1225 | identSuffix)[0]); |
1009 | UDF_SB_PARTFUNC(sb,i) = udf_get_pblock_virt20; | 1226 | if (suf == 0x0150) { |
1227 | map->s_partition_type = | ||
1228 | UDF_VIRTUAL_MAP15; | ||
1229 | map->s_partition_func = | ||
1230 | udf_get_pblock_virt15; | ||
1231 | } else if (suf == 0x0200) { | ||
1232 | map->s_partition_type = | ||
1233 | UDF_VIRTUAL_MAP20; | ||
1234 | map->s_partition_func = | ||
1235 | udf_get_pblock_virt20; | ||
1010 | } | 1236 | } |
1011 | } else if (!strncmp(upm2->partIdent.ident, UDF_ID_SPARABLE, strlen(UDF_ID_SPARABLE))) { | 1237 | } else if (!strncmp(upm2->partIdent.ident, |
1238 | UDF_ID_SPARABLE, | ||
1239 | strlen(UDF_ID_SPARABLE))) { | ||
1012 | uint32_t loc; | 1240 | uint32_t loc; |
1013 | uint16_t ident; | 1241 | uint16_t ident; |
1014 | struct sparingTable *st; | 1242 | struct sparingTable *st; |
1015 | struct sparablePartitionMap *spm = (struct sparablePartitionMap *)&(lvd->partitionMaps[offset]); | 1243 | struct sparablePartitionMap *spm = |
1244 | (struct sparablePartitionMap *)gpm; | ||
1016 | 1245 | ||
1017 | UDF_SB_PARTTYPE(sb,i) = UDF_SPARABLE_MAP15; | 1246 | map->s_partition_type = UDF_SPARABLE_MAP15; |
1018 | UDF_SB_TYPESPAR(sb,i).s_packet_len = le16_to_cpu(spm->packetLength); | 1247 | map->s_type_specific.s_sparing.s_packet_len = |
1248 | le16_to_cpu(spm->packetLength); | ||
1019 | for (j = 0; j < spm->numSparingTables; j++) { | 1249 | for (j = 0; j < spm->numSparingTables; j++) { |
1020 | loc = le32_to_cpu(spm->locSparingTable[j]); | 1250 | struct buffer_head *bh2; |
1021 | UDF_SB_TYPESPAR(sb,i).s_spar_map[j] = | 1251 | |
1022 | udf_read_tagged(sb, loc, loc, &ident); | 1252 | loc = le32_to_cpu( |
1023 | if (UDF_SB_TYPESPAR(sb,i).s_spar_map[j] != NULL) { | 1253 | spm->locSparingTable[j]); |
1024 | st = (struct sparingTable *)UDF_SB_TYPESPAR(sb,i).s_spar_map[j]->b_data; | 1254 | bh2 = udf_read_tagged(sb, loc, loc, |
1025 | if (ident != 0 || | 1255 | &ident); |
1026 | strncmp(st->sparingIdent.ident, UDF_ID_SPARING, strlen(UDF_ID_SPARING))) { | 1256 | map->s_type_specific.s_sparing. |
1027 | brelse(UDF_SB_TYPESPAR(sb,i).s_spar_map[j]); | 1257 | s_spar_map[j] = bh2; |
1028 | UDF_SB_TYPESPAR(sb,i).s_spar_map[j] = NULL; | 1258 | |
1259 | if (bh2 != NULL) { | ||
1260 | st = (struct sparingTable *) | ||
1261 | bh2->b_data; | ||
1262 | if (ident != 0 || strncmp( | ||
1263 | st->sparingIdent.ident, | ||
1264 | UDF_ID_SPARING, | ||
1265 | strlen(UDF_ID_SPARING))) { | ||
1266 | brelse(bh2); | ||
1267 | map->s_type_specific. | ||
1268 | s_sparing. | ||
1269 | s_spar_map[j] = | ||
1270 | NULL; | ||
1029 | } | 1271 | } |
1030 | } | 1272 | } |
1031 | } | 1273 | } |
1032 | UDF_SB_PARTFUNC(sb,i) = udf_get_pblock_spar15; | 1274 | map->s_partition_func = udf_get_pblock_spar15; |
1033 | } else { | 1275 | } else { |
1034 | udf_debug("Unknown ident: %s\n", upm2->partIdent.ident); | 1276 | udf_debug("Unknown ident: %s\n", |
1277 | upm2->partIdent.ident); | ||
1035 | continue; | 1278 | continue; |
1036 | } | 1279 | } |
1037 | UDF_SB_PARTVSN(sb,i) = le16_to_cpu(upm2->volSeqNum); | 1280 | map->s_volumeseqnum = le16_to_cpu(upm2->volSeqNum); |
1038 | UDF_SB_PARTNUM(sb,i) = le16_to_cpu(upm2->partitionNum); | 1281 | map->s_partition_num = le16_to_cpu(upm2->partitionNum); |
1039 | } | 1282 | } |
1040 | udf_debug("Partition (%d:%d) type %d on volume %d\n", | 1283 | udf_debug("Partition (%d:%d) type %d on volume %d\n", |
1041 | i, UDF_SB_PARTNUM(sb,i), type, UDF_SB_PARTVSN(sb,i)); | 1284 | i, map->s_partition_num, type, |
1285 | map->s_volumeseqnum); | ||
1042 | } | 1286 | } |
1043 | 1287 | ||
1044 | if (fileset) { | 1288 | if (fileset) { |
1045 | long_ad *la = (long_ad *)&(lvd->logicalVolContentsUse[0]); | 1289 | long_ad *la = (long_ad *)&(lvd->logicalVolContentsUse[0]); |
1046 | 1290 | ||
1047 | *fileset = lelb_to_cpu(la->extLocation); | 1291 | *fileset = lelb_to_cpu(la->extLocation); |
1048 | udf_debug("FileSet found in LogicalVolDesc at block=%d, partition=%d\n", | 1292 | udf_debug("FileSet found in LogicalVolDesc at block=%d, " |
1049 | fileset->logicalBlockNum, | 1293 | "partition=%d\n", fileset->logicalBlockNum, |
1050 | fileset->partitionReferenceNum); | 1294 | fileset->partitionReferenceNum); |
1051 | } | 1295 | } |
1052 | if (lvd->integritySeqExt.extLength) | 1296 | if (lvd->integritySeqExt.extLength) |
@@ -1063,22 +1307,26 @@ static void udf_load_logicalvolint(struct super_block *sb, kernel_extent_ad loc) | |||
1063 | { | 1307 | { |
1064 | struct buffer_head *bh = NULL; | 1308 | struct buffer_head *bh = NULL; |
1065 | uint16_t ident; | 1309 | uint16_t ident; |
1310 | struct udf_sb_info *sbi = UDF_SB(sb); | ||
1311 | struct logicalVolIntegrityDesc *lvid; | ||
1066 | 1312 | ||
1067 | while (loc.extLength > 0 && | 1313 | while (loc.extLength > 0 && |
1068 | (bh = udf_read_tagged(sb, loc.extLocation, | 1314 | (bh = udf_read_tagged(sb, loc.extLocation, |
1069 | loc.extLocation, &ident)) && | 1315 | loc.extLocation, &ident)) && |
1070 | ident == TAG_IDENT_LVID) { | 1316 | ident == TAG_IDENT_LVID) { |
1071 | UDF_SB_LVIDBH(sb) = bh; | 1317 | sbi->s_lvid_bh = bh; |
1318 | lvid = (struct logicalVolIntegrityDesc *)bh->b_data; | ||
1072 | 1319 | ||
1073 | if (UDF_SB_LVID(sb)->nextIntegrityExt.extLength) | 1320 | if (lvid->nextIntegrityExt.extLength) |
1074 | udf_load_logicalvolint(sb, leea_to_cpu(UDF_SB_LVID(sb)->nextIntegrityExt)); | 1321 | udf_load_logicalvolint(sb, |
1322 | leea_to_cpu(lvid->nextIntegrityExt)); | ||
1075 | 1323 | ||
1076 | if (UDF_SB_LVIDBH(sb) != bh) | 1324 | if (sbi->s_lvid_bh != bh) |
1077 | brelse(bh); | 1325 | brelse(bh); |
1078 | loc.extLength -= sb->s_blocksize; | 1326 | loc.extLength -= sb->s_blocksize; |
1079 | loc.extLocation++; | 1327 | loc.extLocation++; |
1080 | } | 1328 | } |
1081 | if (UDF_SB_LVIDBH(sb) != bh) | 1329 | if (sbi->s_lvid_bh != bh) |
1082 | brelse(bh); | 1330 | brelse(bh); |
1083 | } | 1331 | } |
1084 | 1332 | ||
@@ -1097,11 +1345,12 @@ static void udf_load_logicalvolint(struct super_block *sb, kernel_extent_ad loc) | |||
1097 | * July 1, 1997 - Andrew E. Mileski | 1345 | * July 1, 1997 - Andrew E. Mileski |
1098 | * Written, tested, and released. | 1346 | * Written, tested, and released. |
1099 | */ | 1347 | */ |
1100 | static int udf_process_sequence(struct super_block *sb, long block, long lastblock, | 1348 | static int udf_process_sequence(struct super_block *sb, long block, |
1101 | kernel_lb_addr *fileset) | 1349 | long lastblock, kernel_lb_addr *fileset) |
1102 | { | 1350 | { |
1103 | struct buffer_head *bh = NULL; | 1351 | struct buffer_head *bh = NULL; |
1104 | struct udf_vds_record vds[VDS_POS_LENGTH]; | 1352 | struct udf_vds_record vds[VDS_POS_LENGTH]; |
1353 | struct udf_vds_record *curr; | ||
1105 | struct generic_desc *gd; | 1354 | struct generic_desc *gd; |
1106 | struct volDescPtr *vdp; | 1355 | struct volDescPtr *vdp; |
1107 | int done = 0; | 1356 | int done = 0; |
@@ -1124,43 +1373,51 @@ static int udf_process_sequence(struct super_block *sb, long block, long lastblo | |||
1124 | vdsn = le32_to_cpu(gd->volDescSeqNum); | 1373 | vdsn = le32_to_cpu(gd->volDescSeqNum); |
1125 | switch (ident) { | 1374 | switch (ident) { |
1126 | case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */ | 1375 | case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */ |
1127 | if (vdsn >= vds[VDS_POS_PRIMARY_VOL_DESC].volDescSeqNum) { | 1376 | curr = &vds[VDS_POS_PRIMARY_VOL_DESC]; |
1128 | vds[VDS_POS_PRIMARY_VOL_DESC].volDescSeqNum = vdsn; | 1377 | if (vdsn >= curr->volDescSeqNum) { |
1129 | vds[VDS_POS_PRIMARY_VOL_DESC].block = block; | 1378 | curr->volDescSeqNum = vdsn; |
1379 | curr->block = block; | ||
1130 | } | 1380 | } |
1131 | break; | 1381 | break; |
1132 | case TAG_IDENT_VDP: /* ISO 13346 3/10.3 */ | 1382 | case TAG_IDENT_VDP: /* ISO 13346 3/10.3 */ |
1133 | if (vdsn >= vds[VDS_POS_VOL_DESC_PTR].volDescSeqNum) { | 1383 | curr = &vds[VDS_POS_VOL_DESC_PTR]; |
1134 | vds[VDS_POS_VOL_DESC_PTR].volDescSeqNum = vdsn; | 1384 | if (vdsn >= curr->volDescSeqNum) { |
1135 | vds[VDS_POS_VOL_DESC_PTR].block = block; | 1385 | curr->volDescSeqNum = vdsn; |
1386 | curr->block = block; | ||
1136 | 1387 | ||
1137 | vdp = (struct volDescPtr *)bh->b_data; | 1388 | vdp = (struct volDescPtr *)bh->b_data; |
1138 | next_s = le32_to_cpu(vdp->nextVolDescSeqExt.extLocation); | 1389 | next_s = le32_to_cpu( |
1139 | next_e = le32_to_cpu(vdp->nextVolDescSeqExt.extLength); | 1390 | vdp->nextVolDescSeqExt.extLocation); |
1391 | next_e = le32_to_cpu( | ||
1392 | vdp->nextVolDescSeqExt.extLength); | ||
1140 | next_e = next_e >> sb->s_blocksize_bits; | 1393 | next_e = next_e >> sb->s_blocksize_bits; |
1141 | next_e += next_s; | 1394 | next_e += next_s; |
1142 | } | 1395 | } |
1143 | break; | 1396 | break; |
1144 | case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */ | 1397 | case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */ |
1145 | if (vdsn >= vds[VDS_POS_IMP_USE_VOL_DESC].volDescSeqNum) { | 1398 | curr = &vds[VDS_POS_IMP_USE_VOL_DESC]; |
1146 | vds[VDS_POS_IMP_USE_VOL_DESC].volDescSeqNum = vdsn; | 1399 | if (vdsn >= curr->volDescSeqNum) { |
1147 | vds[VDS_POS_IMP_USE_VOL_DESC].block = block; | 1400 | curr->volDescSeqNum = vdsn; |
1401 | curr->block = block; | ||
1148 | } | 1402 | } |
1149 | break; | 1403 | break; |
1150 | case TAG_IDENT_PD: /* ISO 13346 3/10.5 */ | 1404 | case TAG_IDENT_PD: /* ISO 13346 3/10.5 */ |
1151 | if (!vds[VDS_POS_PARTITION_DESC].block) | 1405 | curr = &vds[VDS_POS_PARTITION_DESC]; |
1152 | vds[VDS_POS_PARTITION_DESC].block = block; | 1406 | if (!curr->block) |
1407 | curr->block = block; | ||
1153 | break; | 1408 | break; |
1154 | case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */ | 1409 | case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */ |
1155 | if (vdsn >= vds[VDS_POS_LOGICAL_VOL_DESC].volDescSeqNum) { | 1410 | curr = &vds[VDS_POS_LOGICAL_VOL_DESC]; |
1156 | vds[VDS_POS_LOGICAL_VOL_DESC].volDescSeqNum = vdsn; | 1411 | if (vdsn >= curr->volDescSeqNum) { |
1157 | vds[VDS_POS_LOGICAL_VOL_DESC].block = block; | 1412 | curr->volDescSeqNum = vdsn; |
1413 | curr->block = block; | ||
1158 | } | 1414 | } |
1159 | break; | 1415 | break; |
1160 | case TAG_IDENT_USD: /* ISO 13346 3/10.8 */ | 1416 | case TAG_IDENT_USD: /* ISO 13346 3/10.8 */ |
1161 | if (vdsn >= vds[VDS_POS_UNALLOC_SPACE_DESC].volDescSeqNum) { | 1417 | curr = &vds[VDS_POS_UNALLOC_SPACE_DESC]; |
1162 | vds[VDS_POS_UNALLOC_SPACE_DESC].volDescSeqNum = vdsn; | 1418 | if (vdsn >= curr->volDescSeqNum) { |
1163 | vds[VDS_POS_UNALLOC_SPACE_DESC].block = block; | 1419 | curr->volDescSeqNum = vdsn; |
1420 | curr->block = block; | ||
1164 | } | 1421 | } |
1165 | break; | 1422 | break; |
1166 | case TAG_IDENT_TD: /* ISO 13346 3/10.9 */ | 1423 | case TAG_IDENT_TD: /* ISO 13346 3/10.9 */ |
@@ -1169,32 +1426,38 @@ static int udf_process_sequence(struct super_block *sb, long block, long lastblo | |||
1169 | block = next_s; | 1426 | block = next_s; |
1170 | lastblock = next_e; | 1427 | lastblock = next_e; |
1171 | next_s = next_e = 0; | 1428 | next_s = next_e = 0; |
1172 | } else { | 1429 | } else |
1173 | done = 1; | 1430 | done = 1; |
1174 | } | ||
1175 | break; | 1431 | break; |
1176 | } | 1432 | } |
1177 | brelse(bh); | 1433 | brelse(bh); |
1178 | } | 1434 | } |
1179 | for (i = 0; i < VDS_POS_LENGTH; i++) { | 1435 | for (i = 0; i < VDS_POS_LENGTH; i++) { |
1180 | if (vds[i].block) { | 1436 | if (vds[i].block) { |
1181 | bh = udf_read_tagged(sb, vds[i].block, vds[i].block, &ident); | 1437 | bh = udf_read_tagged(sb, vds[i].block, vds[i].block, |
1438 | &ident); | ||
1182 | 1439 | ||
1183 | if (i == VDS_POS_PRIMARY_VOL_DESC) { | 1440 | if (i == VDS_POS_PRIMARY_VOL_DESC) { |
1184 | udf_load_pvoldesc(sb, bh); | 1441 | udf_load_pvoldesc(sb, bh); |
1185 | } else if (i == VDS_POS_LOGICAL_VOL_DESC) { | 1442 | } else if (i == VDS_POS_LOGICAL_VOL_DESC) { |
1186 | udf_load_logicalvol(sb, bh, fileset); | 1443 | if (udf_load_logicalvol(sb, bh, fileset)) { |
1444 | brelse(bh); | ||
1445 | return 1; | ||
1446 | } | ||
1187 | } else if (i == VDS_POS_PARTITION_DESC) { | 1447 | } else if (i == VDS_POS_PARTITION_DESC) { |
1188 | struct buffer_head *bh2 = NULL; | 1448 | struct buffer_head *bh2 = NULL; |
1189 | if (udf_load_partdesc(sb, bh)) { | 1449 | if (udf_load_partdesc(sb, bh)) { |
1190 | brelse(bh); | 1450 | brelse(bh); |
1191 | return 1; | 1451 | return 1; |
1192 | } | 1452 | } |
1193 | for (j = vds[i].block + 1; j < vds[VDS_POS_TERMINATING_DESC].block; j++) { | 1453 | for (j = vds[i].block + 1; |
1454 | j < vds[VDS_POS_TERMINATING_DESC].block; | ||
1455 | j++) { | ||
1194 | bh2 = udf_read_tagged(sb, j, j, &ident); | 1456 | bh2 = udf_read_tagged(sb, j, j, &ident); |
1195 | gd = (struct generic_desc *)bh2->b_data; | 1457 | gd = (struct generic_desc *)bh2->b_data; |
1196 | if (ident == TAG_IDENT_PD) | 1458 | if (ident == TAG_IDENT_PD) |
1197 | if (udf_load_partdesc(sb, bh2)) { | 1459 | if (udf_load_partdesc(sb, |
1460 | bh2)) { | ||
1198 | brelse(bh); | 1461 | brelse(bh); |
1199 | brelse(bh2); | 1462 | brelse(bh2); |
1200 | return 1; | 1463 | return 1; |
@@ -1222,14 +1485,17 @@ static int udf_check_valid(struct super_block *sb, int novrs, int silent) | |||
1222 | } | 1485 | } |
1223 | /* Check that it is NSR02 compliant */ | 1486 | /* Check that it is NSR02 compliant */ |
1224 | /* Process any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1) */ | 1487 | /* Process any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1) */ |
1225 | else if ((block = udf_vrs(sb, silent)) == -1) { | 1488 | else { |
1226 | udf_debug("Failed to read byte 32768. Assuming open disc. " | 1489 | block = udf_vrs(sb, silent); |
1227 | "Skipping validity check\n"); | 1490 | if (block == -1) { |
1228 | if (!UDF_SB_LASTBLOCK(sb)) | 1491 | struct udf_sb_info *sbi = UDF_SB(sb); |
1229 | UDF_SB_LASTBLOCK(sb) = udf_get_last_block(sb); | 1492 | udf_debug("Failed to read byte 32768. Assuming open " |
1230 | return 0; | 1493 | "disc. Skipping validity check\n"); |
1231 | } else { | 1494 | if (!sbi->s_last_block) |
1232 | return !block; | 1495 | sbi->s_last_block = udf_get_last_block(sb); |
1496 | return 0; | ||
1497 | } else | ||
1498 | return !block; | ||
1233 | } | 1499 | } |
1234 | } | 1500 | } |
1235 | 1501 | ||
@@ -1240,100 +1506,121 @@ static int udf_load_partition(struct super_block *sb, kernel_lb_addr *fileset) | |||
1240 | struct buffer_head *bh; | 1506 | struct buffer_head *bh; |
1241 | long main_s, main_e, reserve_s, reserve_e; | 1507 | long main_s, main_e, reserve_s, reserve_e; |
1242 | int i, j; | 1508 | int i, j; |
1509 | struct udf_sb_info *sbi; | ||
1243 | 1510 | ||
1244 | if (!sb) | 1511 | if (!sb) |
1245 | return 1; | 1512 | return 1; |
1513 | sbi = UDF_SB(sb); | ||
1246 | 1514 | ||
1247 | for (i = 0; i < ARRAY_SIZE(UDF_SB_ANCHOR(sb)); i++) { | 1515 | for (i = 0; i < ARRAY_SIZE(sbi->s_anchor); i++) { |
1248 | if (UDF_SB_ANCHOR(sb)[i] && | 1516 | if (!sbi->s_anchor[i]) |
1249 | (bh = udf_read_tagged(sb, UDF_SB_ANCHOR(sb)[i], | 1517 | continue; |
1250 | UDF_SB_ANCHOR(sb)[i], &ident))) { | 1518 | bh = udf_read_tagged(sb, sbi->s_anchor[i], sbi->s_anchor[i], |
1251 | anchor = (struct anchorVolDescPtr *)bh->b_data; | 1519 | &ident); |
1520 | if (!bh) | ||
1521 | continue; | ||
1252 | 1522 | ||
1253 | /* Locate the main sequence */ | 1523 | anchor = (struct anchorVolDescPtr *)bh->b_data; |
1254 | main_s = le32_to_cpu(anchor->mainVolDescSeqExt.extLocation); | ||
1255 | main_e = le32_to_cpu(anchor->mainVolDescSeqExt.extLength ); | ||
1256 | main_e = main_e >> sb->s_blocksize_bits; | ||
1257 | main_e += main_s; | ||
1258 | 1524 | ||
1259 | /* Locate the reserve sequence */ | 1525 | /* Locate the main sequence */ |
1260 | reserve_s = le32_to_cpu(anchor->reserveVolDescSeqExt.extLocation); | 1526 | main_s = le32_to_cpu(anchor->mainVolDescSeqExt.extLocation); |
1261 | reserve_e = le32_to_cpu(anchor->reserveVolDescSeqExt.extLength); | 1527 | main_e = le32_to_cpu(anchor->mainVolDescSeqExt.extLength); |
1262 | reserve_e = reserve_e >> sb->s_blocksize_bits; | 1528 | main_e = main_e >> sb->s_blocksize_bits; |
1263 | reserve_e += reserve_s; | 1529 | main_e += main_s; |
1264 | 1530 | ||
1265 | brelse(bh); | 1531 | /* Locate the reserve sequence */ |
1532 | reserve_s = le32_to_cpu( | ||
1533 | anchor->reserveVolDescSeqExt.extLocation); | ||
1534 | reserve_e = le32_to_cpu( | ||
1535 | anchor->reserveVolDescSeqExt.extLength); | ||
1536 | reserve_e = reserve_e >> sb->s_blocksize_bits; | ||
1537 | reserve_e += reserve_s; | ||
1266 | 1538 | ||
1267 | /* Process the main & reserve sequences */ | 1539 | brelse(bh); |
1268 | /* responsible for finding the PartitionDesc(s) */ | 1540 | |
1269 | if (!(udf_process_sequence(sb, main_s, main_e, fileset) && | 1541 | /* Process the main & reserve sequences */ |
1270 | udf_process_sequence(sb, reserve_s, reserve_e, fileset))) { | 1542 | /* responsible for finding the PartitionDesc(s) */ |
1271 | break; | 1543 | if (!(udf_process_sequence(sb, main_s, main_e, |
1272 | } | 1544 | fileset) && |
1273 | } | 1545 | udf_process_sequence(sb, reserve_s, reserve_e, |
1546 | fileset))) | ||
1547 | break; | ||
1274 | } | 1548 | } |
1275 | 1549 | ||
1276 | if (i == ARRAY_SIZE(UDF_SB_ANCHOR(sb))) { | 1550 | if (i == ARRAY_SIZE(sbi->s_anchor)) { |
1277 | udf_debug("No Anchor block found\n"); | 1551 | udf_debug("No Anchor block found\n"); |
1278 | return 1; | 1552 | return 1; |
1279 | } else | 1553 | } |
1280 | udf_debug("Using anchor in block %d\n", UDF_SB_ANCHOR(sb)[i]); | 1554 | udf_debug("Using anchor in block %d\n", sbi->s_anchor[i]); |
1281 | 1555 | ||
1282 | for (i = 0; i < UDF_SB_NUMPARTS(sb); i++) { | 1556 | for (i = 0; i < sbi->s_partitions; i++) { |
1283 | kernel_lb_addr uninitialized_var(ino); | 1557 | kernel_lb_addr uninitialized_var(ino); |
1284 | switch (UDF_SB_PARTTYPE(sb, i)) { | 1558 | struct udf_part_map *map = &sbi->s_partmaps[i]; |
1559 | switch (map->s_partition_type) { | ||
1285 | case UDF_VIRTUAL_MAP15: | 1560 | case UDF_VIRTUAL_MAP15: |
1286 | case UDF_VIRTUAL_MAP20: | 1561 | case UDF_VIRTUAL_MAP20: |
1287 | if (!UDF_SB_LASTBLOCK(sb)) { | 1562 | if (!sbi->s_last_block) { |
1288 | UDF_SB_LASTBLOCK(sb) = udf_get_last_block(sb); | 1563 | sbi->s_last_block = udf_get_last_block(sb); |
1289 | udf_find_anchor(sb); | 1564 | udf_find_anchor(sb); |
1290 | } | 1565 | } |
1291 | 1566 | ||
1292 | if (!UDF_SB_LASTBLOCK(sb)) { | 1567 | if (!sbi->s_last_block) { |
1293 | udf_debug("Unable to determine Lastblock (For " | 1568 | udf_debug("Unable to determine Lastblock (For " |
1294 | "Virtual Partition)\n"); | 1569 | "Virtual Partition)\n"); |
1295 | return 1; | 1570 | return 1; |
1296 | } | 1571 | } |
1297 | 1572 | ||
1298 | for (j = 0; j < UDF_SB_NUMPARTS(sb); j++) { | 1573 | for (j = 0; j < sbi->s_partitions; j++) { |
1574 | struct udf_part_map *map2 = &sbi->s_partmaps[j]; | ||
1299 | if (j != i && | 1575 | if (j != i && |
1300 | UDF_SB_PARTVSN(sb, i) == UDF_SB_PARTVSN(sb, j) && | 1576 | map->s_volumeseqnum == |
1301 | UDF_SB_PARTNUM(sb, i) == UDF_SB_PARTNUM(sb, j)) { | 1577 | map2->s_volumeseqnum && |
1578 | map->s_partition_num == | ||
1579 | map2->s_partition_num) { | ||
1302 | ino.partitionReferenceNum = j; | 1580 | ino.partitionReferenceNum = j; |
1303 | ino.logicalBlockNum = UDF_SB_LASTBLOCK(sb) - UDF_SB_PARTROOT(sb, j); | 1581 | ino.logicalBlockNum = |
1582 | sbi->s_last_block - | ||
1583 | map2->s_partition_root; | ||
1304 | break; | 1584 | break; |
1305 | } | 1585 | } |
1306 | } | 1586 | } |
1307 | 1587 | ||
1308 | if (j == UDF_SB_NUMPARTS(sb)) | 1588 | if (j == sbi->s_partitions) |
1309 | return 1; | 1589 | return 1; |
1310 | 1590 | ||
1311 | if (!(UDF_SB_VAT(sb) = udf_iget(sb, ino))) | 1591 | sbi->s_vat_inode = udf_iget(sb, ino); |
1592 | if (!sbi->s_vat_inode) | ||
1312 | return 1; | 1593 | return 1; |
1313 | 1594 | ||
1314 | if (UDF_SB_PARTTYPE(sb, i) == UDF_VIRTUAL_MAP15) { | 1595 | if (map->s_partition_type == UDF_VIRTUAL_MAP15) { |
1315 | UDF_SB_TYPEVIRT(sb, i).s_start_offset = | 1596 | map->s_type_specific.s_virtual.s_start_offset = |
1316 | udf_ext0_offset(UDF_SB_VAT(sb)); | 1597 | udf_ext0_offset(sbi->s_vat_inode); |
1317 | UDF_SB_TYPEVIRT(sb, i).s_num_entries = | 1598 | map->s_type_specific.s_virtual.s_num_entries = |
1318 | (UDF_SB_VAT(sb)->i_size - 36) >> 2; | 1599 | (sbi->s_vat_inode->i_size - 36) >> 2; |
1319 | } else if (UDF_SB_PARTTYPE(sb, i) == UDF_VIRTUAL_MAP20) { | 1600 | } else if (map->s_partition_type == UDF_VIRTUAL_MAP20) { |
1320 | struct buffer_head *bh = NULL; | ||
1321 | uint32_t pos; | 1601 | uint32_t pos; |
1602 | struct virtualAllocationTable20 *vat20; | ||
1322 | 1603 | ||
1323 | pos = udf_block_map(UDF_SB_VAT(sb), 0); | 1604 | pos = udf_block_map(sbi->s_vat_inode, 0); |
1324 | bh = sb_bread(sb, pos); | 1605 | bh = sb_bread(sb, pos); |
1325 | if (!bh) | 1606 | if (!bh) |
1326 | return 1; | 1607 | return 1; |
1327 | UDF_SB_TYPEVIRT(sb, i).s_start_offset = | 1608 | vat20 = (struct virtualAllocationTable20 *) |
1328 | le16_to_cpu(((struct virtualAllocationTable20 *)bh->b_data + | 1609 | bh->b_data + |
1329 | udf_ext0_offset(UDF_SB_VAT(sb)))->lengthHeader) + | 1610 | udf_ext0_offset(sbi->s_vat_inode); |
1330 | udf_ext0_offset(UDF_SB_VAT(sb)); | 1611 | map->s_type_specific.s_virtual.s_start_offset = |
1331 | UDF_SB_TYPEVIRT(sb, i).s_num_entries = (UDF_SB_VAT(sb)->i_size - | 1612 | le16_to_cpu(vat20->lengthHeader) + |
1332 | UDF_SB_TYPEVIRT(sb, i).s_start_offset) >> 2; | 1613 | udf_ext0_offset(sbi->s_vat_inode); |
1614 | map->s_type_specific.s_virtual.s_num_entries = | ||
1615 | (sbi->s_vat_inode->i_size - | ||
1616 | map->s_type_specific.s_virtual. | ||
1617 | s_start_offset) >> 2; | ||
1333 | brelse(bh); | 1618 | brelse(bh); |
1334 | } | 1619 | } |
1335 | UDF_SB_PARTROOT(sb, i) = udf_get_pblock(sb, 0, i, 0); | 1620 | map->s_partition_root = udf_get_pblock(sb, 0, i, 0); |
1336 | UDF_SB_PARTLEN(sb, i) = UDF_SB_PARTLEN(sb, ino.partitionReferenceNum); | 1621 | map->s_partition_len = |
1622 | sbi->s_partmaps[ino.partitionReferenceNum]. | ||
1623 | s_partition_len; | ||
1337 | } | 1624 | } |
1338 | } | 1625 | } |
1339 | return 0; | 1626 | return 0; |
@@ -1341,62 +1628,86 @@ static int udf_load_partition(struct super_block *sb, kernel_lb_addr *fileset) | |||
1341 | 1628 | ||
1342 | static void udf_open_lvid(struct super_block *sb) | 1629 | static void udf_open_lvid(struct super_block *sb) |
1343 | { | 1630 | { |
1344 | if (UDF_SB_LVIDBH(sb)) { | 1631 | struct udf_sb_info *sbi = UDF_SB(sb); |
1345 | int i; | 1632 | struct buffer_head *bh = sbi->s_lvid_bh; |
1633 | if (bh) { | ||
1346 | kernel_timestamp cpu_time; | 1634 | kernel_timestamp cpu_time; |
1635 | struct logicalVolIntegrityDesc *lvid = | ||
1636 | (struct logicalVolIntegrityDesc *)bh->b_data; | ||
1637 | struct logicalVolIntegrityDescImpUse *lvidiu = | ||
1638 | udf_sb_lvidiu(sbi); | ||
1347 | 1639 | ||
1348 | UDF_SB_LVIDIU(sb)->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; | 1640 | lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; |
1349 | UDF_SB_LVIDIU(sb)->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; | 1641 | lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; |
1350 | if (udf_time_to_stamp(&cpu_time, CURRENT_TIME)) | 1642 | if (udf_time_to_stamp(&cpu_time, CURRENT_TIME)) |
1351 | UDF_SB_LVID(sb)->recordingDateAndTime = cpu_to_lets(cpu_time); | 1643 | lvid->recordingDateAndTime = cpu_to_lets(cpu_time); |
1352 | UDF_SB_LVID(sb)->integrityType = LVID_INTEGRITY_TYPE_OPEN; | 1644 | lvid->integrityType = LVID_INTEGRITY_TYPE_OPEN; |
1353 | |||
1354 | UDF_SB_LVID(sb)->descTag.descCRC = cpu_to_le16(udf_crc((char *)UDF_SB_LVID(sb) + sizeof(tag), | ||
1355 | le16_to_cpu(UDF_SB_LVID(sb)->descTag.descCRCLength), 0)); | ||
1356 | 1645 | ||
1357 | UDF_SB_LVID(sb)->descTag.tagChecksum = 0; | 1646 | lvid->descTag.descCRC = cpu_to_le16( |
1358 | for (i = 0; i < 16; i++) | 1647 | udf_crc((char *)lvid + sizeof(tag), |
1359 | if (i != 4) | 1648 | le16_to_cpu(lvid->descTag.descCRCLength), |
1360 | UDF_SB_LVID(sb)->descTag.tagChecksum += | 1649 | 0)); |
1361 | ((uint8_t *) &(UDF_SB_LVID(sb)->descTag))[i]; | ||
1362 | 1650 | ||
1363 | mark_buffer_dirty(UDF_SB_LVIDBH(sb)); | 1651 | lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag); |
1652 | mark_buffer_dirty(bh); | ||
1364 | } | 1653 | } |
1365 | } | 1654 | } |
1366 | 1655 | ||
1367 | static void udf_close_lvid(struct super_block *sb) | 1656 | static void udf_close_lvid(struct super_block *sb) |
1368 | { | 1657 | { |
1369 | kernel_timestamp cpu_time; | 1658 | kernel_timestamp cpu_time; |
1370 | int i; | 1659 | struct udf_sb_info *sbi = UDF_SB(sb); |
1660 | struct buffer_head *bh = sbi->s_lvid_bh; | ||
1661 | struct logicalVolIntegrityDesc *lvid; | ||
1662 | |||
1663 | if (!bh) | ||
1664 | return; | ||
1665 | |||
1666 | lvid = (struct logicalVolIntegrityDesc *)bh->b_data; | ||
1371 | 1667 | ||
1372 | if (UDF_SB_LVIDBH(sb) && | 1668 | if (lvid->integrityType == LVID_INTEGRITY_TYPE_OPEN) { |
1373 | UDF_SB_LVID(sb)->integrityType == LVID_INTEGRITY_TYPE_OPEN) { | 1669 | struct logicalVolIntegrityDescImpUse *lvidiu = |
1374 | UDF_SB_LVIDIU(sb)->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; | 1670 | udf_sb_lvidiu(sbi); |
1375 | UDF_SB_LVIDIU(sb)->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; | 1671 | lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; |
1672 | lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; | ||
1376 | if (udf_time_to_stamp(&cpu_time, CURRENT_TIME)) | 1673 | if (udf_time_to_stamp(&cpu_time, CURRENT_TIME)) |
1377 | UDF_SB_LVID(sb)->recordingDateAndTime = cpu_to_lets(cpu_time); | 1674 | lvid->recordingDateAndTime = cpu_to_lets(cpu_time); |
1378 | if (UDF_MAX_WRITE_VERSION > le16_to_cpu(UDF_SB_LVIDIU(sb)->maxUDFWriteRev)) | 1675 | if (UDF_MAX_WRITE_VERSION > le16_to_cpu(lvidiu->maxUDFWriteRev)) |
1379 | UDF_SB_LVIDIU(sb)->maxUDFWriteRev = cpu_to_le16(UDF_MAX_WRITE_VERSION); | 1676 | lvidiu->maxUDFWriteRev = |
1380 | if (UDF_SB_UDFREV(sb) > le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFReadRev)) | 1677 | cpu_to_le16(UDF_MAX_WRITE_VERSION); |
1381 | UDF_SB_LVIDIU(sb)->minUDFReadRev = cpu_to_le16(UDF_SB_UDFREV(sb)); | 1678 | if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFReadRev)) |
1382 | if (UDF_SB_UDFREV(sb) > le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFWriteRev)) | 1679 | lvidiu->minUDFReadRev = cpu_to_le16(sbi->s_udfrev); |
1383 | UDF_SB_LVIDIU(sb)->minUDFWriteRev = cpu_to_le16(UDF_SB_UDFREV(sb)); | 1680 | if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFWriteRev)) |
1384 | UDF_SB_LVID(sb)->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE); | 1681 | lvidiu->minUDFWriteRev = cpu_to_le16(sbi->s_udfrev); |
1385 | 1682 | lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE); | |
1386 | UDF_SB_LVID(sb)->descTag.descCRC = | 1683 | |
1387 | cpu_to_le16(udf_crc((char *)UDF_SB_LVID(sb) + sizeof(tag), | 1684 | lvid->descTag.descCRC = cpu_to_le16( |
1388 | le16_to_cpu(UDF_SB_LVID(sb)->descTag.descCRCLength), 0)); | 1685 | udf_crc((char *)lvid + sizeof(tag), |
1389 | 1686 | le16_to_cpu(lvid->descTag.descCRCLength), | |
1390 | UDF_SB_LVID(sb)->descTag.tagChecksum = 0; | 1687 | 0)); |
1391 | for (i = 0; i < 16; i++) | 1688 | |
1392 | if (i != 4) | 1689 | lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag); |
1393 | UDF_SB_LVID(sb)->descTag.tagChecksum += | 1690 | mark_buffer_dirty(bh); |
1394 | ((uint8_t *)&(UDF_SB_LVID(sb)->descTag))[i]; | ||
1395 | |||
1396 | mark_buffer_dirty(UDF_SB_LVIDBH(sb)); | ||
1397 | } | 1691 | } |
1398 | } | 1692 | } |
1399 | 1693 | ||
1694 | static void udf_sb_free_bitmap(struct udf_bitmap *bitmap) | ||
1695 | { | ||
1696 | int i; | ||
1697 | int nr_groups = bitmap->s_nr_groups; | ||
1698 | int size = sizeof(struct udf_bitmap) + (sizeof(struct buffer_head *) * | ||
1699 | nr_groups); | ||
1700 | |||
1701 | for (i = 0; i < nr_groups; i++) | ||
1702 | if (bitmap->s_block_bitmap[i]) | ||
1703 | brelse(bitmap->s_block_bitmap[i]); | ||
1704 | |||
1705 | if (size <= PAGE_SIZE) | ||
1706 | kfree(bitmap); | ||
1707 | else | ||
1708 | vfree(bitmap); | ||
1709 | } | ||
1710 | |||
1400 | /* | 1711 | /* |
1401 | * udf_read_super | 1712 | * udf_read_super |
1402 | * | 1713 | * |
@@ -1426,16 +1737,15 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) | |||
1426 | uopt.gid = -1; | 1737 | uopt.gid = -1; |
1427 | uopt.umask = 0; | 1738 | uopt.umask = 0; |
1428 | 1739 | ||
1429 | sbi = kmalloc(sizeof(struct udf_sb_info), GFP_KERNEL); | 1740 | sbi = kzalloc(sizeof(struct udf_sb_info), GFP_KERNEL); |
1430 | if (!sbi) | 1741 | if (!sbi) |
1431 | return -ENOMEM; | 1742 | return -ENOMEM; |
1432 | 1743 | ||
1433 | sb->s_fs_info = sbi; | 1744 | sb->s_fs_info = sbi; |
1434 | memset(UDF_SB(sb), 0x00, sizeof(struct udf_sb_info)); | ||
1435 | 1745 | ||
1436 | mutex_init(&sbi->s_alloc_mutex); | 1746 | mutex_init(&sbi->s_alloc_mutex); |
1437 | 1747 | ||
1438 | if (!udf_parse_options((char *)options, &uopt)) | 1748 | if (!udf_parse_options((char *)options, &uopt, false)) |
1439 | goto error_out; | 1749 | goto error_out; |
1440 | 1750 | ||
1441 | if (uopt.flags & (1 << UDF_FLAG_UTF8) && | 1751 | if (uopt.flags & (1 << UDF_FLAG_UTF8) && |
@@ -1459,30 +1769,31 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) | |||
1459 | fileset.logicalBlockNum = 0xFFFFFFFF; | 1769 | fileset.logicalBlockNum = 0xFFFFFFFF; |
1460 | fileset.partitionReferenceNum = 0xFFFF; | 1770 | fileset.partitionReferenceNum = 0xFFFF; |
1461 | 1771 | ||
1462 | UDF_SB(sb)->s_flags = uopt.flags; | 1772 | sbi->s_flags = uopt.flags; |
1463 | UDF_SB(sb)->s_uid = uopt.uid; | 1773 | sbi->s_uid = uopt.uid; |
1464 | UDF_SB(sb)->s_gid = uopt.gid; | 1774 | sbi->s_gid = uopt.gid; |
1465 | UDF_SB(sb)->s_umask = uopt.umask; | 1775 | sbi->s_umask = uopt.umask; |
1466 | UDF_SB(sb)->s_nls_map = uopt.nls_map; | 1776 | sbi->s_nls_map = uopt.nls_map; |
1467 | 1777 | ||
1468 | /* Set the block size for all transfers */ | 1778 | /* Set the block size for all transfers */ |
1469 | if (!udf_set_blocksize(sb, uopt.blocksize)) | 1779 | if (!udf_set_blocksize(sb, uopt.blocksize)) |
1470 | goto error_out; | 1780 | goto error_out; |
1471 | 1781 | ||
1472 | if (uopt.session == 0xFFFFFFFF) | 1782 | if (uopt.session == 0xFFFFFFFF) |
1473 | UDF_SB_SESSION(sb) = udf_get_last_session(sb); | 1783 | sbi->s_session = udf_get_last_session(sb); |
1474 | else | 1784 | else |
1475 | UDF_SB_SESSION(sb) = uopt.session; | 1785 | sbi->s_session = uopt.session; |
1476 | 1786 | ||
1477 | udf_debug("Multi-session=%d\n", UDF_SB_SESSION(sb)); | 1787 | udf_debug("Multi-session=%d\n", sbi->s_session); |
1478 | 1788 | ||
1479 | UDF_SB_LASTBLOCK(sb) = uopt.lastblock; | 1789 | sbi->s_last_block = uopt.lastblock; |
1480 | UDF_SB_ANCHOR(sb)[0] = UDF_SB_ANCHOR(sb)[1] = 0; | 1790 | sbi->s_anchor[0] = sbi->s_anchor[1] = 0; |
1481 | UDF_SB_ANCHOR(sb)[2] = uopt.anchor; | 1791 | sbi->s_anchor[2] = uopt.anchor; |
1482 | UDF_SB_ANCHOR(sb)[3] = 256; | 1792 | sbi->s_anchor[3] = 256; |
1483 | 1793 | ||
1484 | if (udf_check_valid(sb, uopt.novrs, silent)) { /* read volume recognition sequences */ | 1794 | if (udf_check_valid(sb, uopt.novrs, silent)) { |
1485 | printk("UDF-fs: No VRS found\n"); | 1795 | /* read volume recognition sequences */ |
1796 | printk(KERN_WARNING "UDF-fs: No VRS found\n"); | ||
1486 | goto error_out; | 1797 | goto error_out; |
1487 | } | 1798 | } |
1488 | 1799 | ||
@@ -1496,27 +1807,30 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) | |||
1496 | sb->s_time_gran = 1000; | 1807 | sb->s_time_gran = 1000; |
1497 | 1808 | ||
1498 | if (udf_load_partition(sb, &fileset)) { | 1809 | if (udf_load_partition(sb, &fileset)) { |
1499 | printk("UDF-fs: No partition found (1)\n"); | 1810 | printk(KERN_WARNING "UDF-fs: No partition found (1)\n"); |
1500 | goto error_out; | 1811 | goto error_out; |
1501 | } | 1812 | } |
1502 | 1813 | ||
1503 | udf_debug("Lastblock=%d\n", UDF_SB_LASTBLOCK(sb)); | 1814 | udf_debug("Lastblock=%d\n", sbi->s_last_block); |
1504 | 1815 | ||
1505 | if (UDF_SB_LVIDBH(sb)) { | 1816 | if (sbi->s_lvid_bh) { |
1506 | uint16_t minUDFReadRev = le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFReadRev); | 1817 | struct logicalVolIntegrityDescImpUse *lvidiu = |
1507 | uint16_t minUDFWriteRev = le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFWriteRev); | 1818 | udf_sb_lvidiu(sbi); |
1508 | /* uint16_t maxUDFWriteRev = le16_to_cpu(UDF_SB_LVIDIU(sb)->maxUDFWriteRev); */ | 1819 | uint16_t minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev); |
1820 | uint16_t minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev); | ||
1821 | /* uint16_t maxUDFWriteRev = | ||
1822 | le16_to_cpu(lvidiu->maxUDFWriteRev); */ | ||
1509 | 1823 | ||
1510 | if (minUDFReadRev > UDF_MAX_READ_VERSION) { | 1824 | if (minUDFReadRev > UDF_MAX_READ_VERSION) { |
1511 | printk("UDF-fs: minUDFReadRev=%x (max is %x)\n", | 1825 | printk(KERN_ERR "UDF-fs: minUDFReadRev=%x " |
1512 | le16_to_cpu(UDF_SB_LVIDIU(sb)->minUDFReadRev), | 1826 | "(max is %x)\n", |
1827 | le16_to_cpu(lvidiu->minUDFReadRev), | ||
1513 | UDF_MAX_READ_VERSION); | 1828 | UDF_MAX_READ_VERSION); |
1514 | goto error_out; | 1829 | goto error_out; |
1515 | } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION) { | 1830 | } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION) |
1516 | sb->s_flags |= MS_RDONLY; | 1831 | sb->s_flags |= MS_RDONLY; |
1517 | } | ||
1518 | 1832 | ||
1519 | UDF_SB_UDFREV(sb) = minUDFWriteRev; | 1833 | sbi->s_udfrev = minUDFWriteRev; |
1520 | 1834 | ||
1521 | if (minUDFReadRev >= UDF_VERS_USE_EXTENDED_FE) | 1835 | if (minUDFReadRev >= UDF_VERS_USE_EXTENDED_FE) |
1522 | UDF_SET_FLAG(sb, UDF_FLAG_USE_EXTENDED_FE); | 1836 | UDF_SET_FLAG(sb, UDF_FLAG_USE_EXTENDED_FE); |
@@ -1524,29 +1838,30 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) | |||
1524 | UDF_SET_FLAG(sb, UDF_FLAG_USE_STREAMS); | 1838 | UDF_SET_FLAG(sb, UDF_FLAG_USE_STREAMS); |
1525 | } | 1839 | } |
1526 | 1840 | ||
1527 | if (!UDF_SB_NUMPARTS(sb)) { | 1841 | if (!sbi->s_partitions) { |
1528 | printk("UDF-fs: No partition found (2)\n"); | 1842 | printk(KERN_WARNING "UDF-fs: No partition found (2)\n"); |
1529 | goto error_out; | 1843 | goto error_out; |
1530 | } | 1844 | } |
1531 | 1845 | ||
1532 | if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_READ_ONLY) { | 1846 | if (sbi->s_partmaps[sbi->s_partition].s_partition_flags & |
1533 | printk("UDF-fs: Partition marked readonly; forcing readonly mount\n"); | 1847 | UDF_PART_FLAG_READ_ONLY) { |
1848 | printk(KERN_NOTICE "UDF-fs: Partition marked readonly; " | ||
1849 | "forcing readonly mount\n"); | ||
1534 | sb->s_flags |= MS_RDONLY; | 1850 | sb->s_flags |= MS_RDONLY; |
1535 | } | 1851 | } |
1536 | 1852 | ||
1537 | if (udf_find_fileset(sb, &fileset, &rootdir)) { | 1853 | if (udf_find_fileset(sb, &fileset, &rootdir)) { |
1538 | printk("UDF-fs: No fileset found\n"); | 1854 | printk(KERN_WARNING "UDF-fs: No fileset found\n"); |
1539 | goto error_out; | 1855 | goto error_out; |
1540 | } | 1856 | } |
1541 | 1857 | ||
1542 | if (!silent) { | 1858 | if (!silent) { |
1543 | kernel_timestamp ts; | 1859 | kernel_timestamp ts; |
1544 | udf_time_to_stamp(&ts, UDF_SB_RECORDTIME(sb)); | 1860 | udf_time_to_stamp(&ts, sbi->s_record_time); |
1545 | udf_info("UDF %s (%s) Mounting volume '%s', " | 1861 | udf_info("UDF: Mounting volume '%s', " |
1546 | "timestamp %04u/%02u/%02u %02u:%02u (%x)\n", | 1862 | "timestamp %04u/%02u/%02u %02u:%02u (%x)\n", |
1547 | UDFFS_VERSION, UDFFS_DATE, | 1863 | sbi->s_volume_ident, ts.year, ts.month, ts.day, |
1548 | UDF_SB_VOLIDENT(sb), ts.year, ts.month, ts.day, ts.hour, ts.minute, | 1864 | ts.hour, ts.minute, ts.typeAndTimezone); |
1549 | ts.typeAndTimezone); | ||
1550 | } | 1865 | } |
1551 | if (!(sb->s_flags & MS_RDONLY)) | 1866 | if (!(sb->s_flags & MS_RDONLY)) |
1552 | udf_open_lvid(sb); | 1867 | udf_open_lvid(sb); |
@@ -1556,7 +1871,8 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) | |||
1556 | /* perhaps it's not extensible enough, but for now ... */ | 1871 | /* perhaps it's not extensible enough, but for now ... */ |
1557 | inode = udf_iget(sb, rootdir); | 1872 | inode = udf_iget(sb, rootdir); |
1558 | if (!inode) { | 1873 | if (!inode) { |
1559 | printk("UDF-fs: Error in udf_iget, block=%d, partition=%d\n", | 1874 | printk(KERN_ERR "UDF-fs: Error in udf_iget, block=%d, " |
1875 | "partition=%d\n", | ||
1560 | rootdir.logicalBlockNum, rootdir.partitionReferenceNum); | 1876 | rootdir.logicalBlockNum, rootdir.partitionReferenceNum); |
1561 | goto error_out; | 1877 | goto error_out; |
1562 | } | 1878 | } |
@@ -1564,7 +1880,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) | |||
1564 | /* Allocate a dentry for the root inode */ | 1880 | /* Allocate a dentry for the root inode */ |
1565 | sb->s_root = d_alloc_root(inode); | 1881 | sb->s_root = d_alloc_root(inode); |
1566 | if (!sb->s_root) { | 1882 | if (!sb->s_root) { |
1567 | printk("UDF-fs: Couldn't allocate root dentry\n"); | 1883 | printk(KERN_ERR "UDF-fs: Couldn't allocate root dentry\n"); |
1568 | iput(inode); | 1884 | iput(inode); |
1569 | goto error_out; | 1885 | goto error_out; |
1570 | } | 1886 | } |
@@ -1572,30 +1888,32 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) | |||
1572 | return 0; | 1888 | return 0; |
1573 | 1889 | ||
1574 | error_out: | 1890 | error_out: |
1575 | if (UDF_SB_VAT(sb)) | 1891 | if (sbi->s_vat_inode) |
1576 | iput(UDF_SB_VAT(sb)); | 1892 | iput(sbi->s_vat_inode); |
1577 | if (UDF_SB_NUMPARTS(sb)) { | 1893 | if (sbi->s_partitions) { |
1578 | if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_TABLE) | 1894 | struct udf_part_map *map = &sbi->s_partmaps[sbi->s_partition]; |
1579 | iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_uspace.s_table); | 1895 | if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) |
1580 | if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_TABLE) | 1896 | iput(map->s_uspace.s_table); |
1581 | iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_fspace.s_table); | 1897 | if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) |
1582 | if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_BITMAP) | 1898 | iput(map->s_fspace.s_table); |
1583 | UDF_SB_FREE_BITMAP(sb,UDF_SB_PARTITION(sb), s_uspace); | 1899 | if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) |
1584 | if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_BITMAP) | 1900 | udf_sb_free_bitmap(map->s_uspace.s_bitmap); |
1585 | UDF_SB_FREE_BITMAP(sb,UDF_SB_PARTITION(sb), s_fspace); | 1901 | if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) |
1586 | if (UDF_SB_PARTTYPE(sb, UDF_SB_PARTITION(sb)) == UDF_SPARABLE_MAP15) { | 1902 | udf_sb_free_bitmap(map->s_fspace.s_bitmap); |
1903 | if (map->s_partition_type == UDF_SPARABLE_MAP15) | ||
1587 | for (i = 0; i < 4; i++) | 1904 | for (i = 0; i < 4; i++) |
1588 | brelse(UDF_SB_TYPESPAR(sb, UDF_SB_PARTITION(sb)).s_spar_map[i]); | 1905 | brelse(map->s_type_specific.s_sparing. |
1589 | } | 1906 | s_spar_map[i]); |
1590 | } | 1907 | } |
1591 | #ifdef CONFIG_UDF_NLS | 1908 | #ifdef CONFIG_UDF_NLS |
1592 | if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) | 1909 | if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) |
1593 | unload_nls(UDF_SB(sb)->s_nls_map); | 1910 | unload_nls(sbi->s_nls_map); |
1594 | #endif | 1911 | #endif |
1595 | if (!(sb->s_flags & MS_RDONLY)) | 1912 | if (!(sb->s_flags & MS_RDONLY)) |
1596 | udf_close_lvid(sb); | 1913 | udf_close_lvid(sb); |
1597 | brelse(UDF_SB_LVIDBH(sb)); | 1914 | brelse(sbi->s_lvid_bh); |
1598 | UDF_SB_FREE(sb); | 1915 | |
1916 | kfree(sbi->s_partmaps); | ||
1599 | kfree(sbi); | 1917 | kfree(sbi); |
1600 | sb->s_fs_info = NULL; | 1918 | sb->s_fs_info = NULL; |
1601 | 1919 | ||
@@ -1614,7 +1932,7 @@ void udf_error(struct super_block *sb, const char *function, | |||
1614 | va_start(args, fmt); | 1932 | va_start(args, fmt); |
1615 | vsnprintf(error_buf, sizeof(error_buf), fmt, args); | 1933 | vsnprintf(error_buf, sizeof(error_buf), fmt, args); |
1616 | va_end(args); | 1934 | va_end(args); |
1617 | printk (KERN_CRIT "UDF-fs error (device %s): %s: %s\n", | 1935 | printk(KERN_CRIT "UDF-fs error (device %s): %s: %s\n", |
1618 | sb->s_id, function, error_buf); | 1936 | sb->s_id, function, error_buf); |
1619 | } | 1937 | } |
1620 | 1938 | ||
@@ -1646,31 +1964,34 @@ void udf_warning(struct super_block *sb, const char *function, | |||
1646 | static void udf_put_super(struct super_block *sb) | 1964 | static void udf_put_super(struct super_block *sb) |
1647 | { | 1965 | { |
1648 | int i; | 1966 | int i; |
1967 | struct udf_sb_info *sbi; | ||
1649 | 1968 | ||
1650 | if (UDF_SB_VAT(sb)) | 1969 | sbi = UDF_SB(sb); |
1651 | iput(UDF_SB_VAT(sb)); | 1970 | if (sbi->s_vat_inode) |
1652 | if (UDF_SB_NUMPARTS(sb)) { | 1971 | iput(sbi->s_vat_inode); |
1653 | if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_TABLE) | 1972 | if (sbi->s_partitions) { |
1654 | iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_uspace.s_table); | 1973 | struct udf_part_map *map = &sbi->s_partmaps[sbi->s_partition]; |
1655 | if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_TABLE) | 1974 | if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) |
1656 | iput(UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_fspace.s_table); | 1975 | iput(map->s_uspace.s_table); |
1657 | if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_BITMAP) | 1976 | if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) |
1658 | UDF_SB_FREE_BITMAP(sb,UDF_SB_PARTITION(sb), s_uspace); | 1977 | iput(map->s_fspace.s_table); |
1659 | if (UDF_SB_PARTFLAGS(sb, UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_BITMAP) | 1978 | if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) |
1660 | UDF_SB_FREE_BITMAP(sb,UDF_SB_PARTITION(sb), s_fspace); | 1979 | udf_sb_free_bitmap(map->s_uspace.s_bitmap); |
1661 | if (UDF_SB_PARTTYPE(sb, UDF_SB_PARTITION(sb)) == UDF_SPARABLE_MAP15) { | 1980 | if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) |
1981 | udf_sb_free_bitmap(map->s_fspace.s_bitmap); | ||
1982 | if (map->s_partition_type == UDF_SPARABLE_MAP15) | ||
1662 | for (i = 0; i < 4; i++) | 1983 | for (i = 0; i < 4; i++) |
1663 | brelse(UDF_SB_TYPESPAR(sb, UDF_SB_PARTITION(sb)).s_spar_map[i]); | 1984 | brelse(map->s_type_specific.s_sparing. |
1664 | } | 1985 | s_spar_map[i]); |
1665 | } | 1986 | } |
1666 | #ifdef CONFIG_UDF_NLS | 1987 | #ifdef CONFIG_UDF_NLS |
1667 | if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) | 1988 | if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) |
1668 | unload_nls(UDF_SB(sb)->s_nls_map); | 1989 | unload_nls(sbi->s_nls_map); |
1669 | #endif | 1990 | #endif |
1670 | if (!(sb->s_flags & MS_RDONLY)) | 1991 | if (!(sb->s_flags & MS_RDONLY)) |
1671 | udf_close_lvid(sb); | 1992 | udf_close_lvid(sb); |
1672 | brelse(UDF_SB_LVIDBH(sb)); | 1993 | brelse(sbi->s_lvid_bh); |
1673 | UDF_SB_FREE(sb); | 1994 | kfree(sbi->s_partmaps); |
1674 | kfree(sb->s_fs_info); | 1995 | kfree(sb->s_fs_info); |
1675 | sb->s_fs_info = NULL; | 1996 | sb->s_fs_info = NULL; |
1676 | } | 1997 | } |
@@ -1691,15 +2012,22 @@ static void udf_put_super(struct super_block *sb) | |||
1691 | static int udf_statfs(struct dentry *dentry, struct kstatfs *buf) | 2012 | static int udf_statfs(struct dentry *dentry, struct kstatfs *buf) |
1692 | { | 2013 | { |
1693 | struct super_block *sb = dentry->d_sb; | 2014 | struct super_block *sb = dentry->d_sb; |
2015 | struct udf_sb_info *sbi = UDF_SB(sb); | ||
2016 | struct logicalVolIntegrityDescImpUse *lvidiu; | ||
2017 | |||
2018 | if (sbi->s_lvid_bh != NULL) | ||
2019 | lvidiu = udf_sb_lvidiu(sbi); | ||
2020 | else | ||
2021 | lvidiu = NULL; | ||
1694 | 2022 | ||
1695 | buf->f_type = UDF_SUPER_MAGIC; | 2023 | buf->f_type = UDF_SUPER_MAGIC; |
1696 | buf->f_bsize = sb->s_blocksize; | 2024 | buf->f_bsize = sb->s_blocksize; |
1697 | buf->f_blocks = UDF_SB_PARTLEN(sb, UDF_SB_PARTITION(sb)); | 2025 | buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len; |
1698 | buf->f_bfree = udf_count_free(sb); | 2026 | buf->f_bfree = udf_count_free(sb); |
1699 | buf->f_bavail = buf->f_bfree; | 2027 | buf->f_bavail = buf->f_bfree; |
1700 | buf->f_files = (UDF_SB_LVIDBH(sb) ? | 2028 | buf->f_files = (lvidiu != NULL ? (le32_to_cpu(lvidiu->numFiles) + |
1701 | (le32_to_cpu(UDF_SB_LVIDIU(sb)->numFiles) + | 2029 | le32_to_cpu(lvidiu->numDirs)) : 0) |
1702 | le32_to_cpu(UDF_SB_LVIDIU(sb)->numDirs)) : 0) + buf->f_bfree; | 2030 | + buf->f_bfree; |
1703 | buf->f_ffree = buf->f_bfree; | 2031 | buf->f_ffree = buf->f_bfree; |
1704 | /* __kernel_fsid_t f_fsid */ | 2032 | /* __kernel_fsid_t f_fsid */ |
1705 | buf->f_namelen = UDF_NAME_LEN - 2; | 2033 | buf->f_namelen = UDF_NAME_LEN - 2; |
@@ -1711,7 +2039,8 @@ static unsigned char udf_bitmap_lookup[16] = { | |||
1711 | 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4 | 2039 | 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4 |
1712 | }; | 2040 | }; |
1713 | 2041 | ||
1714 | static unsigned int udf_count_free_bitmap(struct super_block *sb, struct udf_bitmap *bitmap) | 2042 | static unsigned int udf_count_free_bitmap(struct super_block *sb, |
2043 | struct udf_bitmap *bitmap) | ||
1715 | { | 2044 | { |
1716 | struct buffer_head *bh = NULL; | 2045 | struct buffer_head *bh = NULL; |
1717 | unsigned int accum = 0; | 2046 | unsigned int accum = 0; |
@@ -1727,7 +2056,7 @@ static unsigned int udf_count_free_bitmap(struct super_block *sb, struct udf_bit | |||
1727 | lock_kernel(); | 2056 | lock_kernel(); |
1728 | 2057 | ||
1729 | loc.logicalBlockNum = bitmap->s_extPosition; | 2058 | loc.logicalBlockNum = bitmap->s_extPosition; |
1730 | loc.partitionReferenceNum = UDF_SB_PARTITION(sb); | 2059 | loc.partitionReferenceNum = UDF_SB(sb)->s_partition; |
1731 | bh = udf_read_ptagged(sb, loc, 0, &ident); | 2060 | bh = udf_read_ptagged(sb, loc, 0, &ident); |
1732 | 2061 | ||
1733 | if (!bh) { | 2062 | if (!bh) { |
@@ -1772,7 +2101,8 @@ out: | |||
1772 | return accum; | 2101 | return accum; |
1773 | } | 2102 | } |
1774 | 2103 | ||
1775 | static unsigned int udf_count_free_table(struct super_block *sb, struct inode *table) | 2104 | static unsigned int udf_count_free_table(struct super_block *sb, |
2105 | struct inode *table) | ||
1776 | { | 2106 | { |
1777 | unsigned int accum = 0; | 2107 | unsigned int accum = 0; |
1778 | uint32_t elen; | 2108 | uint32_t elen; |
@@ -1782,13 +2112,13 @@ static unsigned int udf_count_free_table(struct super_block *sb, struct inode *t | |||
1782 | 2112 | ||
1783 | lock_kernel(); | 2113 | lock_kernel(); |
1784 | 2114 | ||
1785 | epos.block = UDF_I_LOCATION(table); | 2115 | epos.block = UDF_I(table)->i_location; |
1786 | epos.offset = sizeof(struct unallocSpaceEntry); | 2116 | epos.offset = sizeof(struct unallocSpaceEntry); |
1787 | epos.bh = NULL; | 2117 | epos.bh = NULL; |
1788 | 2118 | ||
1789 | while ((etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { | 2119 | while ((etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) |
1790 | accum += (elen >> table->i_sb->s_blocksize_bits); | 2120 | accum += (elen >> table->i_sb->s_blocksize_bits); |
1791 | } | 2121 | |
1792 | brelse(epos.bh); | 2122 | brelse(epos.bh); |
1793 | 2123 | ||
1794 | unlock_kernel(); | 2124 | unlock_kernel(); |
@@ -1799,10 +2129,17 @@ static unsigned int udf_count_free_table(struct super_block *sb, struct inode *t | |||
1799 | static unsigned int udf_count_free(struct super_block *sb) | 2129 | static unsigned int udf_count_free(struct super_block *sb) |
1800 | { | 2130 | { |
1801 | unsigned int accum = 0; | 2131 | unsigned int accum = 0; |
1802 | 2132 | struct udf_sb_info *sbi; | |
1803 | if (UDF_SB_LVIDBH(sb)) { | 2133 | struct udf_part_map *map; |
1804 | if (le32_to_cpu(UDF_SB_LVID(sb)->numOfPartitions) > UDF_SB_PARTITION(sb)) { | 2134 | |
1805 | accum = le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)]); | 2135 | sbi = UDF_SB(sb); |
2136 | if (sbi->s_lvid_bh) { | ||
2137 | struct logicalVolIntegrityDesc *lvid = | ||
2138 | (struct logicalVolIntegrityDesc *) | ||
2139 | sbi->s_lvid_bh->b_data; | ||
2140 | if (le32_to_cpu(lvid->numOfPartitions) > sbi->s_partition) { | ||
2141 | accum = le32_to_cpu( | ||
2142 | lvid->freeSpaceTable[sbi->s_partition]); | ||
1806 | if (accum == 0xFFFFFFFF) | 2143 | if (accum == 0xFFFFFFFF) |
1807 | accum = 0; | 2144 | accum = 0; |
1808 | } | 2145 | } |
@@ -1811,24 +2148,25 @@ static unsigned int udf_count_free(struct super_block *sb) | |||
1811 | if (accum) | 2148 | if (accum) |
1812 | return accum; | 2149 | return accum; |
1813 | 2150 | ||
1814 | if (UDF_SB_PARTFLAGS(sb,UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_BITMAP) { | 2151 | map = &sbi->s_partmaps[sbi->s_partition]; |
2152 | if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) { | ||
1815 | accum += udf_count_free_bitmap(sb, | 2153 | accum += udf_count_free_bitmap(sb, |
1816 | UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_uspace.s_bitmap); | 2154 | map->s_uspace.s_bitmap); |
1817 | } | 2155 | } |
1818 | if (UDF_SB_PARTFLAGS(sb,UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_BITMAP) { | 2156 | if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) { |
1819 | accum += udf_count_free_bitmap(sb, | 2157 | accum += udf_count_free_bitmap(sb, |
1820 | UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_fspace.s_bitmap); | 2158 | map->s_fspace.s_bitmap); |
1821 | } | 2159 | } |
1822 | if (accum) | 2160 | if (accum) |
1823 | return accum; | 2161 | return accum; |
1824 | 2162 | ||
1825 | if (UDF_SB_PARTFLAGS(sb,UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_UNALLOC_TABLE) { | 2163 | if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) { |
1826 | accum += udf_count_free_table(sb, | 2164 | accum += udf_count_free_table(sb, |
1827 | UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_uspace.s_table); | 2165 | map->s_uspace.s_table); |
1828 | } | 2166 | } |
1829 | if (UDF_SB_PARTFLAGS(sb,UDF_SB_PARTITION(sb)) & UDF_PART_FLAG_FREED_TABLE) { | 2167 | if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) { |
1830 | accum += udf_count_free_table(sb, | 2168 | accum += udf_count_free_table(sb, |
1831 | UDF_SB_PARTMAPS(sb)[UDF_SB_PARTITION(sb)].s_fspace.s_table); | 2169 | map->s_fspace.s_table); |
1832 | } | 2170 | } |
1833 | 2171 | ||
1834 | return accum; | 2172 | return accum; |
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c index e6f933dd6a7b..6ec99221e50c 100644 --- a/fs/udf/symlink.c +++ b/fs/udf/symlink.c | |||
@@ -33,7 +33,8 @@ | |||
33 | #include <linux/buffer_head.h> | 33 | #include <linux/buffer_head.h> |
34 | #include "udf_i.h" | 34 | #include "udf_i.h" |
35 | 35 | ||
36 | static void udf_pc_to_char(struct super_block *sb, char *from, int fromlen, char *to) | 36 | static void udf_pc_to_char(struct super_block *sb, char *from, int fromlen, |
37 | char *to) | ||
37 | { | 38 | { |
38 | struct pathComponent *pc; | 39 | struct pathComponent *pc; |
39 | int elen = 0; | 40 | int elen = 0; |
@@ -78,10 +79,12 @@ static int udf_symlink_filler(struct file *file, struct page *page) | |||
78 | char *symlink; | 79 | char *symlink; |
79 | int err = -EIO; | 80 | int err = -EIO; |
80 | char *p = kmap(page); | 81 | char *p = kmap(page); |
82 | struct udf_inode_info *iinfo; | ||
81 | 83 | ||
82 | lock_kernel(); | 84 | lock_kernel(); |
83 | if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) { | 85 | iinfo = UDF_I(inode); |
84 | symlink = UDF_I_DATA(inode) + UDF_I_LENEATTR(inode); | 86 | if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { |
87 | symlink = iinfo->i_ext.i_data + iinfo->i_lenEAttr; | ||
85 | } else { | 88 | } else { |
86 | bh = sb_bread(inode->i_sb, udf_block_map(inode, 0)); | 89 | bh = sb_bread(inode->i_sb, udf_block_map(inode, 0)); |
87 | 90 | ||
diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c index 7fc3912885a5..fe61be17cdab 100644 --- a/fs/udf/truncate.c +++ b/fs/udf/truncate.c | |||
@@ -74,17 +74,18 @@ void udf_truncate_tail_extent(struct inode *inode) | |||
74 | uint64_t lbcount = 0; | 74 | uint64_t lbcount = 0; |
75 | int8_t etype = -1, netype; | 75 | int8_t etype = -1, netype; |
76 | int adsize; | 76 | int adsize; |
77 | struct udf_inode_info *iinfo = UDF_I(inode); | ||
77 | 78 | ||
78 | if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB || | 79 | if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB || |
79 | inode->i_size == UDF_I_LENEXTENTS(inode)) | 80 | inode->i_size == iinfo->i_lenExtents) |
80 | return; | 81 | return; |
81 | /* Are we going to delete the file anyway? */ | 82 | /* Are we going to delete the file anyway? */ |
82 | if (inode->i_nlink == 0) | 83 | if (inode->i_nlink == 0) |
83 | return; | 84 | return; |
84 | 85 | ||
85 | if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT) | 86 | if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) |
86 | adsize = sizeof(short_ad); | 87 | adsize = sizeof(short_ad); |
87 | else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG) | 88 | else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) |
88 | adsize = sizeof(long_ad); | 89 | adsize = sizeof(long_ad); |
89 | else | 90 | else |
90 | BUG(); | 91 | BUG(); |
@@ -117,7 +118,7 @@ void udf_truncate_tail_extent(struct inode *inode) | |||
117 | } | 118 | } |
118 | /* This inode entry is in-memory only and thus we don't have to mark | 119 | /* This inode entry is in-memory only and thus we don't have to mark |
119 | * the inode dirty */ | 120 | * the inode dirty */ |
120 | UDF_I_LENEXTENTS(inode) = inode->i_size; | 121 | iinfo->i_lenExtents = inode->i_size; |
121 | brelse(epos.bh); | 122 | brelse(epos.bh); |
122 | } | 123 | } |
123 | 124 | ||
@@ -129,19 +130,20 @@ void udf_discard_prealloc(struct inode *inode) | |||
129 | uint64_t lbcount = 0; | 130 | uint64_t lbcount = 0; |
130 | int8_t etype = -1, netype; | 131 | int8_t etype = -1, netype; |
131 | int adsize; | 132 | int adsize; |
133 | struct udf_inode_info *iinfo = UDF_I(inode); | ||
132 | 134 | ||
133 | if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB || | 135 | if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB || |
134 | inode->i_size == UDF_I_LENEXTENTS(inode)) | 136 | inode->i_size == iinfo->i_lenExtents) |
135 | return; | 137 | return; |
136 | 138 | ||
137 | if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT) | 139 | if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) |
138 | adsize = sizeof(short_ad); | 140 | adsize = sizeof(short_ad); |
139 | else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG) | 141 | else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) |
140 | adsize = sizeof(long_ad); | 142 | adsize = sizeof(long_ad); |
141 | else | 143 | else |
142 | adsize = 0; | 144 | adsize = 0; |
143 | 145 | ||
144 | epos.block = UDF_I_LOCATION(inode); | 146 | epos.block = iinfo->i_location; |
145 | 147 | ||
146 | /* Find the last extent in the file */ | 148 | /* Find the last extent in the file */ |
147 | while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) { | 149 | while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) { |
@@ -153,8 +155,9 @@ void udf_discard_prealloc(struct inode *inode) | |||
153 | lbcount -= elen; | 155 | lbcount -= elen; |
154 | extent_trunc(inode, &epos, eloc, etype, elen, 0); | 156 | extent_trunc(inode, &epos, eloc, etype, elen, 0); |
155 | if (!epos.bh) { | 157 | if (!epos.bh) { |
156 | UDF_I_LENALLOC(inode) = | 158 | iinfo->i_lenAlloc = |
157 | epos.offset - udf_file_entry_alloc_offset(inode); | 159 | epos.offset - |
160 | udf_file_entry_alloc_offset(inode); | ||
158 | mark_inode_dirty(inode); | 161 | mark_inode_dirty(inode); |
159 | } else { | 162 | } else { |
160 | struct allocExtDesc *aed = | 163 | struct allocExtDesc *aed = |
@@ -163,7 +166,7 @@ void udf_discard_prealloc(struct inode *inode) | |||
163 | cpu_to_le32(epos.offset - | 166 | cpu_to_le32(epos.offset - |
164 | sizeof(struct allocExtDesc)); | 167 | sizeof(struct allocExtDesc)); |
165 | if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || | 168 | if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || |
166 | UDF_SB_UDFREV(inode->i_sb) >= 0x0201) | 169 | UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) |
167 | udf_update_tag(epos.bh->b_data, epos.offset); | 170 | udf_update_tag(epos.bh->b_data, epos.offset); |
168 | else | 171 | else |
169 | udf_update_tag(epos.bh->b_data, | 172 | udf_update_tag(epos.bh->b_data, |
@@ -173,7 +176,7 @@ void udf_discard_prealloc(struct inode *inode) | |||
173 | } | 176 | } |
174 | /* This inode entry is in-memory only and thus we don't have to mark | 177 | /* This inode entry is in-memory only and thus we don't have to mark |
175 | * the inode dirty */ | 178 | * the inode dirty */ |
176 | UDF_I_LENEXTENTS(inode) = lbcount; | 179 | iinfo->i_lenExtents = lbcount; |
177 | brelse(epos.bh); | 180 | brelse(epos.bh); |
178 | } | 181 | } |
179 | 182 | ||
@@ -184,13 +187,15 @@ void udf_truncate_extents(struct inode *inode) | |||
184 | uint32_t elen, nelen = 0, indirect_ext_len = 0, lenalloc; | 187 | uint32_t elen, nelen = 0, indirect_ext_len = 0, lenalloc; |
185 | int8_t etype; | 188 | int8_t etype; |
186 | struct super_block *sb = inode->i_sb; | 189 | struct super_block *sb = inode->i_sb; |
190 | struct udf_sb_info *sbi = UDF_SB(sb); | ||
187 | sector_t first_block = inode->i_size >> sb->s_blocksize_bits, offset; | 191 | sector_t first_block = inode->i_size >> sb->s_blocksize_bits, offset; |
188 | loff_t byte_offset; | 192 | loff_t byte_offset; |
189 | int adsize; | 193 | int adsize; |
194 | struct udf_inode_info *iinfo = UDF_I(inode); | ||
190 | 195 | ||
191 | if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT) | 196 | if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) |
192 | adsize = sizeof(short_ad); | 197 | adsize = sizeof(short_ad); |
193 | else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG) | 198 | else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) |
194 | adsize = sizeof(long_ad); | 199 | adsize = sizeof(long_ad); |
195 | else | 200 | else |
196 | BUG(); | 201 | BUG(); |
@@ -212,7 +217,8 @@ void udf_truncate_extents(struct inode *inode) | |||
212 | else | 217 | else |
213 | lenalloc -= sizeof(struct allocExtDesc); | 218 | lenalloc -= sizeof(struct allocExtDesc); |
214 | 219 | ||
215 | while ((etype = udf_current_aext(inode, &epos, &eloc, &elen, 0)) != -1) { | 220 | while ((etype = udf_current_aext(inode, &epos, &eloc, |
221 | &elen, 0)) != -1) { | ||
216 | if (etype == (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) { | 222 | if (etype == (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) { |
217 | udf_write_aext(inode, &epos, neloc, nelen, 0); | 223 | udf_write_aext(inode, &epos, neloc, nelen, 0); |
218 | if (indirect_ext_len) { | 224 | if (indirect_ext_len) { |
@@ -224,35 +230,43 @@ void udf_truncate_extents(struct inode *inode) | |||
224 | 0, indirect_ext_len); | 230 | 0, indirect_ext_len); |
225 | } else { | 231 | } else { |
226 | if (!epos.bh) { | 232 | if (!epos.bh) { |
227 | UDF_I_LENALLOC(inode) = lenalloc; | 233 | iinfo->i_lenAlloc = |
234 | lenalloc; | ||
228 | mark_inode_dirty(inode); | 235 | mark_inode_dirty(inode); |
229 | } else { | 236 | } else { |
230 | struct allocExtDesc *aed = | 237 | struct allocExtDesc *aed = |
231 | (struct allocExtDesc *)(epos.bh->b_data); | 238 | (struct allocExtDesc *) |
239 | (epos.bh->b_data); | ||
240 | int len = | ||
241 | sizeof(struct allocExtDesc); | ||
242 | |||
232 | aed->lengthAllocDescs = | 243 | aed->lengthAllocDescs = |
233 | cpu_to_le32(lenalloc); | 244 | cpu_to_le32(lenalloc); |
234 | if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT) || | 245 | if (!UDF_QUERY_FLAG(sb, |
235 | UDF_SB_UDFREV(sb) >= 0x0201) | 246 | UDF_FLAG_STRICT) || |
236 | udf_update_tag(epos.bh->b_data, | 247 | sbi->s_udfrev >= 0x0201) |
237 | lenalloc + | 248 | len += lenalloc; |
238 | sizeof(struct allocExtDesc)); | 249 | |
239 | else | 250 | udf_update_tag(epos.bh->b_data, |
240 | udf_update_tag(epos.bh->b_data, | 251 | len); |
241 | sizeof(struct allocExtDesc)); | 252 | mark_buffer_dirty_inode( |
242 | mark_buffer_dirty_inode(epos.bh, inode); | 253 | epos.bh, inode); |
243 | } | 254 | } |
244 | } | 255 | } |
245 | brelse(epos.bh); | 256 | brelse(epos.bh); |
246 | epos.offset = sizeof(struct allocExtDesc); | 257 | epos.offset = sizeof(struct allocExtDesc); |
247 | epos.block = eloc; | 258 | epos.block = eloc; |
248 | epos.bh = udf_tread(sb, udf_get_lb_pblock(sb, eloc, 0)); | 259 | epos.bh = udf_tread(sb, |
260 | udf_get_lb_pblock(sb, eloc, 0)); | ||
249 | if (elen) | 261 | if (elen) |
250 | indirect_ext_len = (elen + sb->s_blocksize -1) >> | 262 | indirect_ext_len = |
263 | (elen + sb->s_blocksize - 1) >> | ||
251 | sb->s_blocksize_bits; | 264 | sb->s_blocksize_bits; |
252 | else | 265 | else |
253 | indirect_ext_len = 1; | 266 | indirect_ext_len = 1; |
254 | } else { | 267 | } else { |
255 | extent_trunc(inode, &epos, eloc, etype, elen, 0); | 268 | extent_trunc(inode, &epos, eloc, etype, |
269 | elen, 0); | ||
256 | epos.offset += adsize; | 270 | epos.offset += adsize; |
257 | } | 271 | } |
258 | } | 272 | } |
@@ -264,19 +278,20 @@ void udf_truncate_extents(struct inode *inode) | |||
264 | indirect_ext_len); | 278 | indirect_ext_len); |
265 | } else { | 279 | } else { |
266 | if (!epos.bh) { | 280 | if (!epos.bh) { |
267 | UDF_I_LENALLOC(inode) = lenalloc; | 281 | iinfo->i_lenAlloc = lenalloc; |
268 | mark_inode_dirty(inode); | 282 | mark_inode_dirty(inode); |
269 | } else { | 283 | } else { |
270 | struct allocExtDesc *aed = | 284 | struct allocExtDesc *aed = |
271 | (struct allocExtDesc *)(epos.bh->b_data); | 285 | (struct allocExtDesc *)(epos.bh->b_data); |
272 | aed->lengthAllocDescs = cpu_to_le32(lenalloc); | 286 | aed->lengthAllocDescs = cpu_to_le32(lenalloc); |
273 | if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT) || | 287 | if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT) || |
274 | UDF_SB_UDFREV(sb) >= 0x0201) | 288 | sbi->s_udfrev >= 0x0201) |
275 | udf_update_tag(epos.bh->b_data, | 289 | udf_update_tag(epos.bh->b_data, |
276 | lenalloc + sizeof(struct allocExtDesc)); | 290 | lenalloc + |
291 | sizeof(struct allocExtDesc)); | ||
277 | else | 292 | else |
278 | udf_update_tag(epos.bh->b_data, | 293 | udf_update_tag(epos.bh->b_data, |
279 | sizeof(struct allocExtDesc)); | 294 | sizeof(struct allocExtDesc)); |
280 | mark_buffer_dirty_inode(epos.bh, inode); | 295 | mark_buffer_dirty_inode(epos.bh, inode); |
281 | } | 296 | } |
282 | } | 297 | } |
@@ -290,13 +305,16 @@ void udf_truncate_extents(struct inode *inode) | |||
290 | * extending the file by 'offset' blocks. | 305 | * extending the file by 'offset' blocks. |
291 | */ | 306 | */ |
292 | if ((!epos.bh && | 307 | if ((!epos.bh && |
293 | epos.offset == udf_file_entry_alloc_offset(inode)) || | 308 | epos.offset == |
294 | (epos.bh && epos.offset == sizeof(struct allocExtDesc))) { | 309 | udf_file_entry_alloc_offset(inode)) || |
310 | (epos.bh && epos.offset == | ||
311 | sizeof(struct allocExtDesc))) { | ||
295 | /* File has no extents at all or has empty last | 312 | /* File has no extents at all or has empty last |
296 | * indirect extent! Create a fake extent... */ | 313 | * indirect extent! Create a fake extent... */ |
297 | extent.extLocation.logicalBlockNum = 0; | 314 | extent.extLocation.logicalBlockNum = 0; |
298 | extent.extLocation.partitionReferenceNum = 0; | 315 | extent.extLocation.partitionReferenceNum = 0; |
299 | extent.extLength = EXT_NOT_RECORDED_NOT_ALLOCATED; | 316 | extent.extLength = |
317 | EXT_NOT_RECORDED_NOT_ALLOCATED; | ||
300 | } else { | 318 | } else { |
301 | epos.offset -= adsize; | 319 | epos.offset -= adsize; |
302 | etype = udf_next_aext(inode, &epos, | 320 | etype = udf_next_aext(inode, &epos, |
@@ -305,10 +323,12 @@ void udf_truncate_extents(struct inode *inode) | |||
305 | extent.extLength |= etype << 30; | 323 | extent.extLength |= etype << 30; |
306 | } | 324 | } |
307 | udf_extend_file(inode, &epos, &extent, | 325 | udf_extend_file(inode, &epos, &extent, |
308 | offset + ((inode->i_size & (sb->s_blocksize - 1)) != 0)); | 326 | offset + |
327 | ((inode->i_size & | ||
328 | (sb->s_blocksize - 1)) != 0)); | ||
309 | } | 329 | } |
310 | } | 330 | } |
311 | UDF_I_LENEXTENTS(inode) = inode->i_size; | 331 | iinfo->i_lenExtents = inode->i_size; |
312 | 332 | ||
313 | brelse(epos.bh); | 333 | brelse(epos.bh); |
314 | } | 334 | } |
diff --git a/fs/udf/udf_i.h b/fs/udf/udf_i.h index d7dbe6f3ba0c..ccc52f16bf7d 100644 --- a/fs/udf/udf_i.h +++ b/fs/udf/udf_i.h | |||
@@ -7,20 +7,4 @@ static inline struct udf_inode_info *UDF_I(struct inode *inode) | |||
7 | return list_entry(inode, struct udf_inode_info, vfs_inode); | 7 | return list_entry(inode, struct udf_inode_info, vfs_inode); |
8 | } | 8 | } |
9 | 9 | ||
10 | #define UDF_I_LOCATION(X) ( UDF_I(X)->i_location ) | ||
11 | #define UDF_I_LENEATTR(X) ( UDF_I(X)->i_lenEAttr ) | ||
12 | #define UDF_I_LENALLOC(X) ( UDF_I(X)->i_lenAlloc ) | ||
13 | #define UDF_I_LENEXTENTS(X) ( UDF_I(X)->i_lenExtents ) | ||
14 | #define UDF_I_UNIQUE(X) ( UDF_I(X)->i_unique ) | ||
15 | #define UDF_I_ALLOCTYPE(X) ( UDF_I(X)->i_alloc_type ) | ||
16 | #define UDF_I_EFE(X) ( UDF_I(X)->i_efe ) | ||
17 | #define UDF_I_USE(X) ( UDF_I(X)->i_use ) | ||
18 | #define UDF_I_STRAT4096(X) ( UDF_I(X)->i_strat4096 ) | ||
19 | #define UDF_I_NEXT_ALLOC_BLOCK(X) ( UDF_I(X)->i_next_alloc_block ) | ||
20 | #define UDF_I_NEXT_ALLOC_GOAL(X) ( UDF_I(X)->i_next_alloc_goal ) | ||
21 | #define UDF_I_CRTIME(X) ( UDF_I(X)->i_crtime ) | ||
22 | #define UDF_I_SAD(X) ( UDF_I(X)->i_ext.i_sad ) | ||
23 | #define UDF_I_LAD(X) ( UDF_I(X)->i_ext.i_lad ) | ||
24 | #define UDF_I_DATA(X) ( UDF_I(X)->i_ext.i_data ) | ||
25 | |||
26 | #endif /* !defined(_LINUX_UDF_I_H) */ | 10 | #endif /* !defined(_LINUX_UDF_I_H) */ |
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h index 3c2982017c6d..737d1c604eea 100644 --- a/fs/udf/udf_sb.h +++ b/fs/udf/udf_sb.h | |||
@@ -26,6 +26,8 @@ | |||
26 | #define UDF_FLAG_GID_IGNORE 14 | 26 | #define UDF_FLAG_GID_IGNORE 14 |
27 | #define UDF_FLAG_UID_SET 15 | 27 | #define UDF_FLAG_UID_SET 15 |
28 | #define UDF_FLAG_GID_SET 16 | 28 | #define UDF_FLAG_GID_SET 16 |
29 | #define UDF_FLAG_SESSION_SET 17 | ||
30 | #define UDF_FLAG_LASTBLOCK_SET 18 | ||
29 | 31 | ||
30 | #define UDF_PART_FLAG_UNALLOC_BITMAP 0x0001 | 32 | #define UDF_PART_FLAG_UNALLOC_BITMAP 0x0001 |
31 | #define UDF_PART_FLAG_UNALLOC_TABLE 0x0002 | 33 | #define UDF_PART_FLAG_UNALLOC_TABLE 0x0002 |
@@ -41,96 +43,12 @@ static inline struct udf_sb_info *UDF_SB(struct super_block *sb) | |||
41 | return sb->s_fs_info; | 43 | return sb->s_fs_info; |
42 | } | 44 | } |
43 | 45 | ||
44 | #define UDF_SB_FREE(X)\ | 46 | struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi); |
45 | {\ | ||
46 | if (UDF_SB(X)) {\ | ||
47 | kfree(UDF_SB_PARTMAPS(X));\ | ||
48 | UDF_SB_PARTMAPS(X) = NULL;\ | ||
49 | }\ | ||
50 | } | ||
51 | |||
52 | #define UDF_SB_ALLOC_PARTMAPS(X,Y)\ | ||
53 | {\ | ||
54 | UDF_SB_PARTMAPS(X) = kmalloc(sizeof(struct udf_part_map) * Y, GFP_KERNEL);\ | ||
55 | if (UDF_SB_PARTMAPS(X) != NULL) {\ | ||
56 | UDF_SB_NUMPARTS(X) = Y;\ | ||
57 | memset(UDF_SB_PARTMAPS(X), 0x00, sizeof(struct udf_part_map) * Y);\ | ||
58 | } else {\ | ||
59 | UDF_SB_NUMPARTS(X) = 0;\ | ||
60 | udf_error(X, __FUNCTION__, "Unable to allocate space for %d partition maps", Y);\ | ||
61 | }\ | ||
62 | } | ||
63 | |||
64 | #define UDF_SB_ALLOC_BITMAP(X,Y,Z)\ | ||
65 | {\ | ||
66 | int nr_groups = ((UDF_SB_PARTLEN((X),(Y)) + (sizeof(struct spaceBitmapDesc) << 3) +\ | ||
67 | ((X)->s_blocksize * 8) - 1) / ((X)->s_blocksize * 8));\ | ||
68 | int size = sizeof(struct udf_bitmap) + (sizeof(struct buffer_head *) * nr_groups);\ | ||
69 | if (size <= PAGE_SIZE)\ | ||
70 | UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap = kmalloc(size, GFP_KERNEL);\ | ||
71 | else\ | ||
72 | UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap = vmalloc(size);\ | ||
73 | if (UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap != NULL) {\ | ||
74 | memset(UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap, 0x00, size);\ | ||
75 | UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap->s_block_bitmap =\ | ||
76 | (struct buffer_head **)(UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap + 1);\ | ||
77 | UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap->s_nr_groups = nr_groups;\ | ||
78 | } else {\ | ||
79 | udf_error(X, __FUNCTION__, "Unable to allocate space for bitmap and %d buffer_head pointers", nr_groups);\ | ||
80 | }\ | ||
81 | } | ||
82 | 47 | ||
83 | #define UDF_SB_FREE_BITMAP(X,Y,Z)\ | 48 | int udf_compute_nr_groups(struct super_block *sb, u32 partition); |
84 | {\ | ||
85 | int i;\ | ||
86 | int nr_groups = UDF_SB_BITMAP_NR_GROUPS(X,Y,Z);\ | ||
87 | int size = sizeof(struct udf_bitmap) + (sizeof(struct buffer_head *) * nr_groups);\ | ||
88 | for (i = 0; i < nr_groups; i++) {\ | ||
89 | if (UDF_SB_BITMAP(X,Y,Z,i))\ | ||
90 | brelse(UDF_SB_BITMAP(X,Y,Z,i));\ | ||
91 | }\ | ||
92 | if (size <= PAGE_SIZE)\ | ||
93 | kfree(UDF_SB_PARTMAPS(X)[Y].Z.s_bitmap);\ | ||
94 | else\ | ||
95 | vfree(UDF_SB_PARTMAPS(X)[Y].Z.s_bitmap);\ | ||
96 | } | ||
97 | 49 | ||
98 | #define UDF_QUERY_FLAG(X,Y) ( UDF_SB(X)->s_flags & ( 1 << (Y) ) ) | 50 | #define UDF_QUERY_FLAG(X,Y) ( UDF_SB(X)->s_flags & ( 1 << (Y) ) ) |
99 | #define UDF_SET_FLAG(X,Y) ( UDF_SB(X)->s_flags |= ( 1 << (Y) ) ) | 51 | #define UDF_SET_FLAG(X,Y) ( UDF_SB(X)->s_flags |= ( 1 << (Y) ) ) |
100 | #define UDF_CLEAR_FLAG(X,Y) ( UDF_SB(X)->s_flags &= ~( 1 << (Y) ) ) | 52 | #define UDF_CLEAR_FLAG(X,Y) ( UDF_SB(X)->s_flags &= ~( 1 << (Y) ) ) |
101 | 53 | ||
102 | #define UDF_UPDATE_UDFREV(X,Y) ( ((Y) > UDF_SB_UDFREV(X)) ? UDF_SB_UDFREV(X) = (Y) : UDF_SB_UDFREV(X) ) | ||
103 | |||
104 | #define UDF_SB_PARTMAPS(X) ( UDF_SB(X)->s_partmaps ) | ||
105 | #define UDF_SB_PARTTYPE(X,Y) ( UDF_SB_PARTMAPS(X)[(Y)].s_partition_type ) | ||
106 | #define UDF_SB_PARTROOT(X,Y) ( UDF_SB_PARTMAPS(X)[(Y)].s_partition_root ) | ||
107 | #define UDF_SB_PARTLEN(X,Y) ( UDF_SB_PARTMAPS(X)[(Y)].s_partition_len ) | ||
108 | #define UDF_SB_PARTVSN(X,Y) ( UDF_SB_PARTMAPS(X)[(Y)].s_volumeseqnum ) | ||
109 | #define UDF_SB_PARTNUM(X,Y) ( UDF_SB_PARTMAPS(X)[(Y)].s_partition_num ) | ||
110 | #define UDF_SB_TYPESPAR(X,Y) ( UDF_SB_PARTMAPS(X)[(Y)].s_type_specific.s_sparing ) | ||
111 | #define UDF_SB_TYPEVIRT(X,Y) ( UDF_SB_PARTMAPS(X)[(Y)].s_type_specific.s_virtual ) | ||
112 | #define UDF_SB_PARTFUNC(X,Y) ( UDF_SB_PARTMAPS(X)[(Y)].s_partition_func ) | ||
113 | #define UDF_SB_PARTFLAGS(X,Y) ( UDF_SB_PARTMAPS(X)[(Y)].s_partition_flags ) | ||
114 | #define UDF_SB_BITMAP(X,Y,Z,I) ( UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap->s_block_bitmap[I] ) | ||
115 | #define UDF_SB_BITMAP_NR_GROUPS(X,Y,Z) ( UDF_SB_PARTMAPS(X)[(Y)].Z.s_bitmap->s_nr_groups ) | ||
116 | |||
117 | #define UDF_SB_VOLIDENT(X) ( UDF_SB(X)->s_volident ) | ||
118 | #define UDF_SB_NUMPARTS(X) ( UDF_SB(X)->s_partitions ) | ||
119 | #define UDF_SB_PARTITION(X) ( UDF_SB(X)->s_partition ) | ||
120 | #define UDF_SB_SESSION(X) ( UDF_SB(X)->s_session ) | ||
121 | #define UDF_SB_ANCHOR(X) ( UDF_SB(X)->s_anchor ) | ||
122 | #define UDF_SB_LASTBLOCK(X) ( UDF_SB(X)->s_lastblock ) | ||
123 | #define UDF_SB_LVIDBH(X) ( UDF_SB(X)->s_lvidbh ) | ||
124 | #define UDF_SB_LVID(X) ( (struct logicalVolIntegrityDesc *)UDF_SB_LVIDBH(X)->b_data ) | ||
125 | #define UDF_SB_LVIDIU(X) ( (struct logicalVolIntegrityDescImpUse *)&(UDF_SB_LVID(X)->impUse[le32_to_cpu(UDF_SB_LVID(X)->numOfPartitions) * 2 * sizeof(uint32_t)/sizeof(uint8_t)]) ) | ||
126 | |||
127 | #define UDF_SB_UMASK(X) ( UDF_SB(X)->s_umask ) | ||
128 | #define UDF_SB_GID(X) ( UDF_SB(X)->s_gid ) | ||
129 | #define UDF_SB_UID(X) ( UDF_SB(X)->s_uid ) | ||
130 | #define UDF_SB_RECORDTIME(X) ( UDF_SB(X)->s_recordtime ) | ||
131 | #define UDF_SB_SERIALNUM(X) ( UDF_SB(X)->s_serialnum ) | ||
132 | #define UDF_SB_UDFREV(X) ( UDF_SB(X)->s_udfrev ) | ||
133 | #define UDF_SB_FLAGS(X) ( UDF_SB(X)->s_flags ) | ||
134 | #define UDF_SB_VAT(X) ( UDF_SB(X)->s_vat ) | ||
135 | |||
136 | #endif /* __LINUX_UDF_SB_H */ | 54 | #endif /* __LINUX_UDF_SB_H */ |
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h index c8016cc9e7e6..681dc2b66cdb 100644 --- a/fs/udf/udfdecl.h +++ b/fs/udf/udfdecl.h | |||
@@ -24,18 +24,21 @@ | |||
24 | #define UDF_PATH_LEN 1023 | 24 | #define UDF_PATH_LEN 1023 |
25 | 25 | ||
26 | #define udf_file_entry_alloc_offset(inode)\ | 26 | #define udf_file_entry_alloc_offset(inode)\ |
27 | (UDF_I_USE(inode) ?\ | 27 | (UDF_I(inode)->i_use ?\ |
28 | sizeof(struct unallocSpaceEntry) :\ | 28 | sizeof(struct unallocSpaceEntry) :\ |
29 | ((UDF_I_EFE(inode) ?\ | 29 | ((UDF_I(inode)->i_efe ?\ |
30 | sizeof(struct extendedFileEntry) :\ | 30 | sizeof(struct extendedFileEntry) :\ |
31 | sizeof(struct fileEntry)) + UDF_I_LENEATTR(inode))) | 31 | sizeof(struct fileEntry)) + UDF_I(inode)->i_lenEAttr)) |
32 | 32 | ||
33 | #define udf_ext0_offset(inode)\ | 33 | #define udf_ext0_offset(inode)\ |
34 | (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB ?\ | 34 | (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB ?\ |
35 | udf_file_entry_alloc_offset(inode) : 0) | 35 | udf_file_entry_alloc_offset(inode) : 0) |
36 | 36 | ||
37 | #define udf_get_lb_pblock(sb,loc,offset) udf_get_pblock((sb), (loc).logicalBlockNum, (loc).partitionReferenceNum, (offset)) | 37 | #define udf_get_lb_pblock(sb,loc,offset) udf_get_pblock((sb), (loc).logicalBlockNum, (loc).partitionReferenceNum, (offset)) |
38 | 38 | ||
39 | /* computes tag checksum */ | ||
40 | u8 udf_tag_checksum(const tag *t); | ||
41 | |||
39 | struct dentry; | 42 | struct dentry; |
40 | struct inode; | 43 | struct inode; |
41 | struct task_struct; | 44 | struct task_struct; |
@@ -185,8 +188,8 @@ extern struct fileIdentDesc *udf_fileident_read(struct inode *, loff_t *, | |||
185 | sector_t *); | 188 | sector_t *); |
186 | extern struct fileIdentDesc *udf_get_fileident(void *buffer, int bufsize, | 189 | extern struct fileIdentDesc *udf_get_fileident(void *buffer, int bufsize, |
187 | int *offset); | 190 | int *offset); |
188 | extern long_ad *udf_get_filelongad(uint8_t *, int, int *, int); | 191 | extern long_ad *udf_get_filelongad(uint8_t *, int, uint32_t *, int); |
189 | extern short_ad *udf_get_fileshortad(uint8_t *, int, int *, int); | 192 | extern short_ad *udf_get_fileshortad(uint8_t *, int, uint32_t *, int); |
190 | 193 | ||
191 | /* crc.c */ | 194 | /* crc.c */ |
192 | extern uint16_t udf_crc(uint8_t *, uint32_t, uint16_t); | 195 | extern uint16_t udf_crc(uint8_t *, uint32_t, uint16_t); |
diff --git a/fs/udf/udftime.c b/fs/udf/udftime.c index adcb87c2da7e..ce595732ba6f 100644 --- a/fs/udf/udftime.c +++ b/fs/udf/udftime.c | |||
@@ -18,8 +18,10 @@ | |||
18 | Boston, MA 02111-1307, USA. */ | 18 | Boston, MA 02111-1307, USA. */ |
19 | 19 | ||
20 | /* | 20 | /* |
21 | * dgb 10/02/98: ripped this from glibc source to help convert timestamps to unix time | 21 | * dgb 10/02/98: ripped this from glibc source to help convert timestamps |
22 | * 10/04/98: added new table-based lookup after seeing how ugly the gnu code is | 22 | * to unix time |
23 | * 10/04/98: added new table-based lookup after seeing how ugly | ||
24 | * the gnu code is | ||
23 | * blf 09/27/99: ripped out all the old code and inserted new table from | 25 | * blf 09/27/99: ripped out all the old code and inserted new table from |
24 | * John Brockmeyer (without leap second corrections) | 26 | * John Brockmeyer (without leap second corrections) |
25 | * rewrote udf_stamp_to_time and fixed timezone accounting in | 27 | * rewrote udf_stamp_to_time and fixed timezone accounting in |
@@ -55,27 +57,27 @@ static const unsigned short int __mon_yday[2][13] = { | |||
55 | 57 | ||
56 | #define MAX_YEAR_SECONDS 69 | 58 | #define MAX_YEAR_SECONDS 69 |
57 | #define SPD 0x15180 /*3600*24 */ | 59 | #define SPD 0x15180 /*3600*24 */ |
58 | #define SPY(y,l,s) (SPD * (365*y+l)+s) | 60 | #define SPY(y, l, s) (SPD * (365 * y + l) + s) |
59 | 61 | ||
60 | static time_t year_seconds[MAX_YEAR_SECONDS]= { | 62 | static time_t year_seconds[MAX_YEAR_SECONDS] = { |
61 | /*1970*/ SPY( 0, 0,0), SPY( 1, 0,0), SPY( 2, 0,0), SPY( 3, 1,0), | 63 | /*1970*/ SPY(0, 0, 0), SPY(1, 0, 0), SPY(2, 0, 0), SPY(3, 1, 0), |
62 | /*1974*/ SPY( 4, 1,0), SPY( 5, 1,0), SPY( 6, 1,0), SPY( 7, 2,0), | 64 | /*1974*/ SPY(4, 1, 0), SPY(5, 1, 0), SPY(6, 1, 0), SPY(7, 2, 0), |
63 | /*1978*/ SPY( 8, 2,0), SPY( 9, 2,0), SPY(10, 2,0), SPY(11, 3,0), | 65 | /*1978*/ SPY(8, 2, 0), SPY(9, 2, 0), SPY(10, 2, 0), SPY(11, 3, 0), |
64 | /*1982*/ SPY(12, 3,0), SPY(13, 3,0), SPY(14, 3,0), SPY(15, 4,0), | 66 | /*1982*/ SPY(12, 3, 0), SPY(13, 3, 0), SPY(14, 3, 0), SPY(15, 4, 0), |
65 | /*1986*/ SPY(16, 4,0), SPY(17, 4,0), SPY(18, 4,0), SPY(19, 5,0), | 67 | /*1986*/ SPY(16, 4, 0), SPY(17, 4, 0), SPY(18, 4, 0), SPY(19, 5, 0), |
66 | /*1990*/ SPY(20, 5,0), SPY(21, 5,0), SPY(22, 5,0), SPY(23, 6,0), | 68 | /*1990*/ SPY(20, 5, 0), SPY(21, 5, 0), SPY(22, 5, 0), SPY(23, 6, 0), |
67 | /*1994*/ SPY(24, 6,0), SPY(25, 6,0), SPY(26, 6,0), SPY(27, 7,0), | 69 | /*1994*/ SPY(24, 6, 0), SPY(25, 6, 0), SPY(26, 6, 0), SPY(27, 7, 0), |
68 | /*1998*/ SPY(28, 7,0), SPY(29, 7,0), SPY(30, 7,0), SPY(31, 8,0), | 70 | /*1998*/ SPY(28, 7, 0), SPY(29, 7, 0), SPY(30, 7, 0), SPY(31, 8, 0), |
69 | /*2002*/ SPY(32, 8,0), SPY(33, 8,0), SPY(34, 8,0), SPY(35, 9,0), | 71 | /*2002*/ SPY(32, 8, 0), SPY(33, 8, 0), SPY(34, 8, 0), SPY(35, 9, 0), |
70 | /*2006*/ SPY(36, 9,0), SPY(37, 9,0), SPY(38, 9,0), SPY(39,10,0), | 72 | /*2006*/ SPY(36, 9, 0), SPY(37, 9, 0), SPY(38, 9, 0), SPY(39, 10, 0), |
71 | /*2010*/ SPY(40,10,0), SPY(41,10,0), SPY(42,10,0), SPY(43,11,0), | 73 | /*2010*/ SPY(40, 10, 0), SPY(41, 10, 0), SPY(42, 10, 0), SPY(43, 11, 0), |
72 | /*2014*/ SPY(44,11,0), SPY(45,11,0), SPY(46,11,0), SPY(47,12,0), | 74 | /*2014*/ SPY(44, 11, 0), SPY(45, 11, 0), SPY(46, 11, 0), SPY(47, 12, 0), |
73 | /*2018*/ SPY(48,12,0), SPY(49,12,0), SPY(50,12,0), SPY(51,13,0), | 75 | /*2018*/ SPY(48, 12, 0), SPY(49, 12, 0), SPY(50, 12, 0), SPY(51, 13, 0), |
74 | /*2022*/ SPY(52,13,0), SPY(53,13,0), SPY(54,13,0), SPY(55,14,0), | 76 | /*2022*/ SPY(52, 13, 0), SPY(53, 13, 0), SPY(54, 13, 0), SPY(55, 14, 0), |
75 | /*2026*/ SPY(56,14,0), SPY(57,14,0), SPY(58,14,0), SPY(59,15,0), | 77 | /*2026*/ SPY(56, 14, 0), SPY(57, 14, 0), SPY(58, 14, 0), SPY(59, 15, 0), |
76 | /*2030*/ SPY(60,15,0), SPY(61,15,0), SPY(62,15,0), SPY(63,16,0), | 78 | /*2030*/ SPY(60, 15, 0), SPY(61, 15, 0), SPY(62, 15, 0), SPY(63, 16, 0), |
77 | /*2034*/ SPY(64,16,0), SPY(65,16,0), SPY(66,16,0), SPY(67,17,0), | 79 | /*2034*/ SPY(64, 16, 0), SPY(65, 16, 0), SPY(66, 16, 0), SPY(67, 17, 0), |
78 | /*2038*/ SPY(68,17,0) | 80 | /*2038*/ SPY(68, 17, 0) |
79 | }; | 81 | }; |
80 | 82 | ||
81 | extern struct timezone sys_tz; | 83 | extern struct timezone sys_tz; |
@@ -115,7 +117,7 @@ time_t *udf_stamp_to_time(time_t *dest, long *dest_usec, kernel_timestamp src) | |||
115 | return dest; | 117 | return dest; |
116 | } | 118 | } |
117 | 119 | ||
118 | kernel_timestamp *udf_time_to_stamp(kernel_timestamp * dest, struct timespec ts) | 120 | kernel_timestamp *udf_time_to_stamp(kernel_timestamp *dest, struct timespec ts) |
119 | { | 121 | { |
120 | long int days, rem, y; | 122 | long int days, rem, y; |
121 | const unsigned short int *ip; | 123 | const unsigned short int *ip; |
@@ -137,7 +139,7 @@ kernel_timestamp *udf_time_to_stamp(kernel_timestamp * dest, struct timespec ts) | |||
137 | dest->second = rem % 60; | 139 | dest->second = rem % 60; |
138 | y = 1970; | 140 | y = 1970; |
139 | 141 | ||
140 | #define DIV(a,b) ((a) / (b) - ((a) % (b) < 0)) | 142 | #define DIV(a, b) ((a) / (b) - ((a) % (b) < 0)) |
141 | #define LEAPS_THRU_END_OF(y) (DIV (y, 4) - DIV (y, 100) + DIV (y, 400)) | 143 | #define LEAPS_THRU_END_OF(y) (DIV (y, 4) - DIV (y, 100) + DIV (y, 400)) |
142 | 144 | ||
143 | while (days < 0 || days >= (__isleap(y) ? 366 : 365)) { | 145 | while (days < 0 || days >= (__isleap(y) ? 366 : 365)) { |
@@ -145,8 +147,8 @@ kernel_timestamp *udf_time_to_stamp(kernel_timestamp * dest, struct timespec ts) | |||
145 | 147 | ||
146 | /* Adjust DAYS and Y to match the guessed year. */ | 148 | /* Adjust DAYS and Y to match the guessed year. */ |
147 | days -= ((yg - y) * 365 | 149 | days -= ((yg - y) * 365 |
148 | + LEAPS_THRU_END_OF (yg - 1) | 150 | + LEAPS_THRU_END_OF(yg - 1) |
149 | - LEAPS_THRU_END_OF (y - 1)); | 151 | - LEAPS_THRU_END_OF(y - 1)); |
150 | y = yg; | 152 | y = yg; |
151 | } | 153 | } |
152 | dest->year = y; | 154 | dest->year = y; |
@@ -158,7 +160,8 @@ kernel_timestamp *udf_time_to_stamp(kernel_timestamp * dest, struct timespec ts) | |||
158 | dest->day = days + 1; | 160 | dest->day = days + 1; |
159 | 161 | ||
160 | dest->centiseconds = ts.tv_nsec / 10000000; | 162 | dest->centiseconds = ts.tv_nsec / 10000000; |
161 | dest->hundredsOfMicroseconds = (ts.tv_nsec / 1000 - dest->centiseconds * 10000) / 100; | 163 | dest->hundredsOfMicroseconds = (ts.tv_nsec / 1000 - |
164 | dest->centiseconds * 10000) / 100; | ||
162 | dest->microseconds = (ts.tv_nsec / 1000 - dest->centiseconds * 10000 - | 165 | dest->microseconds = (ts.tv_nsec / 1000 - dest->centiseconds * 10000 - |
163 | dest->hundredsOfMicroseconds * 100); | 166 | dest->hundredsOfMicroseconds * 100); |
164 | return dest; | 167 | return dest; |
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c index 9e6099c26c27..e533b11703bf 100644 --- a/fs/udf/unicode.c +++ b/fs/udf/unicode.c | |||
@@ -136,12 +136,18 @@ int udf_CS0toUTF8(struct ustr *utf_o, struct ustr *ocu_i) | |||
136 | if (c < 0x80U) { | 136 | if (c < 0x80U) { |
137 | utf_o->u_name[utf_o->u_len++] = (uint8_t)c; | 137 | utf_o->u_name[utf_o->u_len++] = (uint8_t)c; |
138 | } else if (c < 0x800U) { | 138 | } else if (c < 0x800U) { |
139 | utf_o->u_name[utf_o->u_len++] = (uint8_t)(0xc0 | (c >> 6)); | 139 | utf_o->u_name[utf_o->u_len++] = |
140 | utf_o->u_name[utf_o->u_len++] = (uint8_t)(0x80 | (c & 0x3f)); | 140 | (uint8_t)(0xc0 | (c >> 6)); |
141 | utf_o->u_name[utf_o->u_len++] = | ||
142 | (uint8_t)(0x80 | (c & 0x3f)); | ||
141 | } else { | 143 | } else { |
142 | utf_o->u_name[utf_o->u_len++] = (uint8_t)(0xe0 | (c >> 12)); | 144 | utf_o->u_name[utf_o->u_len++] = |
143 | utf_o->u_name[utf_o->u_len++] = (uint8_t)(0x80 | ((c >> 6) & 0x3f)); | 145 | (uint8_t)(0xe0 | (c >> 12)); |
144 | utf_o->u_name[utf_o->u_len++] = (uint8_t)(0x80 | (c & 0x3f)); | 146 | utf_o->u_name[utf_o->u_len++] = |
147 | (uint8_t)(0x80 | | ||
148 | ((c >> 6) & 0x3f)); | ||
149 | utf_o->u_name[utf_o->u_len++] = | ||
150 | (uint8_t)(0x80 | (c & 0x3f)); | ||
145 | } | 151 | } |
146 | } | 152 | } |
147 | utf_o->u_cmpID = 8; | 153 | utf_o->u_cmpID = 8; |
@@ -232,9 +238,8 @@ try_again: | |||
232 | goto error_out; | 238 | goto error_out; |
233 | } | 239 | } |
234 | 240 | ||
235 | if (max_val == 0xffffU) { | 241 | if (max_val == 0xffffU) |
236 | ocu[++u_len] = (uint8_t)(utf_char >> 8); | 242 | ocu[++u_len] = (uint8_t)(utf_char >> 8); |
237 | } | ||
238 | ocu[++u_len] = (uint8_t)(utf_char & 0xffU); | 243 | ocu[++u_len] = (uint8_t)(utf_char & 0xffU); |
239 | } | 244 | } |
240 | 245 | ||
@@ -330,29 +335,29 @@ int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname, | |||
330 | struct ustr filename, unifilename; | 335 | struct ustr filename, unifilename; |
331 | int len; | 336 | int len; |
332 | 337 | ||
333 | if (udf_build_ustr_exact(&unifilename, sname, flen)) { | 338 | if (udf_build_ustr_exact(&unifilename, sname, flen)) |
334 | return 0; | 339 | return 0; |
335 | } | ||
336 | 340 | ||
337 | if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) { | 341 | if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) { |
338 | if (!udf_CS0toUTF8(&filename, &unifilename)) { | 342 | if (!udf_CS0toUTF8(&filename, &unifilename)) { |
339 | udf_debug("Failed in udf_get_filename: sname = %s\n", sname); | 343 | udf_debug("Failed in udf_get_filename: sname = %s\n", |
344 | sname); | ||
340 | return 0; | 345 | return 0; |
341 | } | 346 | } |
342 | } else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) { | 347 | } else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) { |
343 | if (!udf_CS0toNLS(UDF_SB(sb)->s_nls_map, &filename, &unifilename)) { | 348 | if (!udf_CS0toNLS(UDF_SB(sb)->s_nls_map, &filename, |
344 | udf_debug("Failed in udf_get_filename: sname = %s\n", sname); | 349 | &unifilename)) { |
350 | udf_debug("Failed in udf_get_filename: sname = %s\n", | ||
351 | sname); | ||
345 | return 0; | 352 | return 0; |
346 | } | 353 | } |
347 | } else { | 354 | } else |
348 | return 0; | 355 | return 0; |
349 | } | ||
350 | 356 | ||
351 | len = udf_translate_to_linux(dname, filename.u_name, filename.u_len, | 357 | len = udf_translate_to_linux(dname, filename.u_name, filename.u_len, |
352 | unifilename.u_name, unifilename.u_len); | 358 | unifilename.u_name, unifilename.u_len); |
353 | if (len) { | 359 | if (len) |
354 | return len; | 360 | return len; |
355 | } | ||
356 | 361 | ||
357 | return 0; | 362 | return 0; |
358 | } | 363 | } |
@@ -363,23 +368,20 @@ int udf_put_filename(struct super_block *sb, const uint8_t *sname, | |||
363 | struct ustr unifilename; | 368 | struct ustr unifilename; |
364 | int namelen; | 369 | int namelen; |
365 | 370 | ||
366 | if (!(udf_char_to_ustr(&unifilename, sname, flen))) { | 371 | if (!udf_char_to_ustr(&unifilename, sname, flen)) |
367 | return 0; | 372 | return 0; |
368 | } | ||
369 | 373 | ||
370 | if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) { | 374 | if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) { |
371 | namelen = udf_UTF8toCS0(dname, &unifilename, UDF_NAME_LEN); | 375 | namelen = udf_UTF8toCS0(dname, &unifilename, UDF_NAME_LEN); |
372 | if (!namelen) { | 376 | if (!namelen) |
373 | return 0; | 377 | return 0; |
374 | } | ||
375 | } else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) { | 378 | } else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) { |
376 | namelen = udf_NLStoCS0(UDF_SB(sb)->s_nls_map, dname, &unifilename, UDF_NAME_LEN); | 379 | namelen = udf_NLStoCS0(UDF_SB(sb)->s_nls_map, dname, |
377 | if (!namelen) { | 380 | &unifilename, UDF_NAME_LEN); |
381 | if (!namelen) | ||
378 | return 0; | 382 | return 0; |
379 | } | 383 | } else |
380 | } else { | ||
381 | return 0; | 384 | return 0; |
382 | } | ||
383 | 385 | ||
384 | return namelen; | 386 | return namelen; |
385 | } | 387 | } |
@@ -389,8 +391,9 @@ int udf_put_filename(struct super_block *sb, const uint8_t *sname, | |||
389 | #define CRC_MARK '#' | 391 | #define CRC_MARK '#' |
390 | #define EXT_SIZE 5 | 392 | #define EXT_SIZE 5 |
391 | 393 | ||
392 | static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName, int udfLen, | 394 | static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName, |
393 | uint8_t *fidName, int fidNameLen) | 395 | int udfLen, uint8_t *fidName, |
396 | int fidNameLen) | ||
394 | { | 397 | { |
395 | int index, newIndex = 0, needsCRC = 0; | 398 | int index, newIndex = 0, needsCRC = 0; |
396 | int extIndex = 0, newExtIndex = 0, hasExt = 0; | 399 | int extIndex = 0, newExtIndex = 0, hasExt = 0; |
@@ -409,13 +412,16 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName, int udfLen | |||
409 | if (curr == '/' || curr == 0) { | 412 | if (curr == '/' || curr == 0) { |
410 | needsCRC = 1; | 413 | needsCRC = 1; |
411 | curr = ILLEGAL_CHAR_MARK; | 414 | curr = ILLEGAL_CHAR_MARK; |
412 | while (index + 1 < udfLen && (udfName[index + 1] == '/' || | 415 | while (index + 1 < udfLen && |
413 | udfName[index + 1] == 0)) | 416 | (udfName[index + 1] == '/' || |
417 | udfName[index + 1] == 0)) | ||
414 | index++; | 418 | index++; |
415 | } if (curr == EXT_MARK && (udfLen - index - 1) <= EXT_SIZE) { | 419 | } |
416 | if (udfLen == index + 1) { | 420 | if (curr == EXT_MARK && |
421 | (udfLen - index - 1) <= EXT_SIZE) { | ||
422 | if (udfLen == index + 1) | ||
417 | hasExt = 0; | 423 | hasExt = 0; |
418 | } else { | 424 | else { |
419 | hasExt = 1; | 425 | hasExt = 1; |
420 | extIndex = index; | 426 | extIndex = index; |
421 | newExtIndex = newIndex; | 427 | newExtIndex = newIndex; |
@@ -433,16 +439,18 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName, int udfLen | |||
433 | 439 | ||
434 | if (hasExt) { | 440 | if (hasExt) { |
435 | int maxFilenameLen; | 441 | int maxFilenameLen; |
436 | for(index = 0; index < EXT_SIZE && extIndex + index + 1 < udfLen; index++) { | 442 | for (index = 0; |
443 | index < EXT_SIZE && extIndex + index + 1 < udfLen; | ||
444 | index++) { | ||
437 | curr = udfName[extIndex + index + 1]; | 445 | curr = udfName[extIndex + index + 1]; |
438 | 446 | ||
439 | if (curr == '/' || curr == 0) { | 447 | if (curr == '/' || curr == 0) { |
440 | needsCRC = 1; | 448 | needsCRC = 1; |
441 | curr = ILLEGAL_CHAR_MARK; | 449 | curr = ILLEGAL_CHAR_MARK; |
442 | while(extIndex + index + 2 < udfLen && | 450 | while (extIndex + index + 2 < udfLen && |
443 | (index + 1 < EXT_SIZE | 451 | (index + 1 < EXT_SIZE && |
444 | && (udfName[extIndex + index + 2] == '/' || | 452 | (udfName[extIndex + index + 2] == '/' || |
445 | udfName[extIndex + index + 2] == 0))) | 453 | udfName[extIndex + index + 2] == 0))) |
446 | index++; | 454 | index++; |
447 | } | 455 | } |
448 | ext[localExtIndex++] = curr; | 456 | ext[localExtIndex++] = curr; |
@@ -452,9 +460,8 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName, int udfLen | |||
452 | newIndex = maxFilenameLen; | 460 | newIndex = maxFilenameLen; |
453 | else | 461 | else |
454 | newIndex = newExtIndex; | 462 | newIndex = newExtIndex; |
455 | } else if (newIndex > 250) { | 463 | } else if (newIndex > 250) |
456 | newIndex = 250; | 464 | newIndex = 250; |
457 | } | ||
458 | newName[newIndex++] = CRC_MARK; | 465 | newName[newIndex++] = CRC_MARK; |
459 | valueCRC = udf_crc(fidName, fidNameLen, 0); | 466 | valueCRC = udf_crc(fidName, fidNameLen, 0); |
460 | newName[newIndex++] = hexChar[(valueCRC & 0xf000) >> 12]; | 467 | newName[newIndex++] = hexChar[(valueCRC & 0xf000) >> 12]; |
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c index f63a09ce8683..1fca381f0ce2 100644 --- a/fs/ufs/balloc.c +++ b/fs/ufs/balloc.c | |||
@@ -9,7 +9,6 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/fs.h> | 11 | #include <linux/fs.h> |
12 | #include <linux/ufs_fs.h> | ||
13 | #include <linux/stat.h> | 12 | #include <linux/stat.h> |
14 | #include <linux/time.h> | 13 | #include <linux/time.h> |
15 | #include <linux/string.h> | 14 | #include <linux/string.h> |
@@ -19,6 +18,7 @@ | |||
19 | #include <linux/bitops.h> | 18 | #include <linux/bitops.h> |
20 | #include <asm/byteorder.h> | 19 | #include <asm/byteorder.h> |
21 | 20 | ||
21 | #include "ufs_fs.h" | ||
22 | #include "ufs.h" | 22 | #include "ufs.h" |
23 | #include "swab.h" | 23 | #include "swab.h" |
24 | #include "util.h" | 24 | #include "util.h" |
diff --git a/fs/ufs/cylinder.c b/fs/ufs/cylinder.c index 2a815665644f..b4676322ddb6 100644 --- a/fs/ufs/cylinder.c +++ b/fs/ufs/cylinder.c | |||
@@ -9,7 +9,6 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/fs.h> | 11 | #include <linux/fs.h> |
12 | #include <linux/ufs_fs.h> | ||
13 | #include <linux/time.h> | 12 | #include <linux/time.h> |
14 | #include <linux/stat.h> | 13 | #include <linux/stat.h> |
15 | #include <linux/string.h> | 14 | #include <linux/string.h> |
@@ -17,6 +16,7 @@ | |||
17 | 16 | ||
18 | #include <asm/byteorder.h> | 17 | #include <asm/byteorder.h> |
19 | 18 | ||
19 | #include "ufs_fs.h" | ||
20 | #include "ufs.h" | 20 | #include "ufs.h" |
21 | #include "swab.h" | 21 | #include "swab.h" |
22 | #include "util.h" | 22 | #include "util.h" |
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c index aaf2878305ce..ef563fc8d72c 100644 --- a/fs/ufs/dir.c +++ b/fs/ufs/dir.c | |||
@@ -18,9 +18,9 @@ | |||
18 | 18 | ||
19 | #include <linux/time.h> | 19 | #include <linux/time.h> |
20 | #include <linux/fs.h> | 20 | #include <linux/fs.h> |
21 | #include <linux/ufs_fs.h> | ||
22 | #include <linux/swap.h> | 21 | #include <linux/swap.h> |
23 | 22 | ||
23 | #include "ufs_fs.h" | ||
24 | #include "ufs.h" | 24 | #include "ufs.h" |
25 | #include "swab.h" | 25 | #include "swab.h" |
26 | #include "util.h" | 26 | #include "util.h" |
diff --git a/fs/ufs/file.c b/fs/ufs/file.c index a46c97bf023f..625ef17c6f83 100644 --- a/fs/ufs/file.c +++ b/fs/ufs/file.c | |||
@@ -24,9 +24,9 @@ | |||
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include <linux/fs.h> | 26 | #include <linux/fs.h> |
27 | #include <linux/ufs_fs.h> | ||
28 | #include <linux/buffer_head.h> /* for sync_mapping_buffers() */ | 27 | #include <linux/buffer_head.h> /* for sync_mapping_buffers() */ |
29 | 28 | ||
29 | #include "ufs_fs.h" | ||
30 | #include "ufs.h" | 30 | #include "ufs.h" |
31 | 31 | ||
32 | 32 | ||
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c index 7e260bc0d94f..ac181f6806a3 100644 --- a/fs/ufs/ialloc.c +++ b/fs/ufs/ialloc.c | |||
@@ -24,7 +24,6 @@ | |||
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include <linux/fs.h> | 26 | #include <linux/fs.h> |
27 | #include <linux/ufs_fs.h> | ||
28 | #include <linux/time.h> | 27 | #include <linux/time.h> |
29 | #include <linux/stat.h> | 28 | #include <linux/stat.h> |
30 | #include <linux/string.h> | 29 | #include <linux/string.h> |
@@ -34,6 +33,7 @@ | |||
34 | #include <linux/bitops.h> | 33 | #include <linux/bitops.h> |
35 | #include <asm/byteorder.h> | 34 | #include <asm/byteorder.h> |
36 | 35 | ||
36 | #include "ufs_fs.h" | ||
37 | #include "ufs.h" | 37 | #include "ufs.h" |
38 | #include "swab.h" | 38 | #include "swab.h" |
39 | #include "util.h" | 39 | #include "util.h" |
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index 4320782761ae..5446b888fc8e 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c | |||
@@ -30,7 +30,6 @@ | |||
30 | 30 | ||
31 | #include <linux/errno.h> | 31 | #include <linux/errno.h> |
32 | #include <linux/fs.h> | 32 | #include <linux/fs.h> |
33 | #include <linux/ufs_fs.h> | ||
34 | #include <linux/time.h> | 33 | #include <linux/time.h> |
35 | #include <linux/stat.h> | 34 | #include <linux/stat.h> |
36 | #include <linux/string.h> | 35 | #include <linux/string.h> |
@@ -38,6 +37,7 @@ | |||
38 | #include <linux/smp_lock.h> | 37 | #include <linux/smp_lock.h> |
39 | #include <linux/buffer_head.h> | 38 | #include <linux/buffer_head.h> |
40 | 39 | ||
40 | #include "ufs_fs.h" | ||
41 | #include "ufs.h" | 41 | #include "ufs.h" |
42 | #include "swab.h" | 42 | #include "swab.h" |
43 | #include "util.h" | 43 | #include "util.h" |
@@ -714,26 +714,30 @@ static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode) | |||
714 | return 0; | 714 | return 0; |
715 | } | 715 | } |
716 | 716 | ||
717 | void ufs_read_inode(struct inode * inode) | 717 | struct inode *ufs_iget(struct super_block *sb, unsigned long ino) |
718 | { | 718 | { |
719 | struct ufs_inode_info *ufsi = UFS_I(inode); | 719 | struct ufs_inode_info *ufsi; |
720 | struct super_block * sb; | 720 | struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; |
721 | struct ufs_sb_private_info * uspi; | ||
722 | struct buffer_head * bh; | 721 | struct buffer_head * bh; |
722 | struct inode *inode; | ||
723 | int err; | 723 | int err; |
724 | 724 | ||
725 | UFSD("ENTER, ino %lu\n", inode->i_ino); | 725 | UFSD("ENTER, ino %lu\n", ino); |
726 | |||
727 | sb = inode->i_sb; | ||
728 | uspi = UFS_SB(sb)->s_uspi; | ||
729 | 726 | ||
730 | if (inode->i_ino < UFS_ROOTINO || | 727 | if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) { |
731 | inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) { | ||
732 | ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n", | 728 | ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n", |
733 | inode->i_ino); | 729 | ino); |
734 | goto bad_inode; | 730 | return ERR_PTR(-EIO); |
735 | } | 731 | } |
736 | 732 | ||
733 | inode = iget_locked(sb, ino); | ||
734 | if (!inode) | ||
735 | return ERR_PTR(-ENOMEM); | ||
736 | if (!(inode->i_state & I_NEW)) | ||
737 | return inode; | ||
738 | |||
739 | ufsi = UFS_I(inode); | ||
740 | |||
737 | bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino)); | 741 | bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino)); |
738 | if (!bh) { | 742 | if (!bh) { |
739 | ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n", | 743 | ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n", |
@@ -765,10 +769,12 @@ void ufs_read_inode(struct inode * inode) | |||
765 | brelse(bh); | 769 | brelse(bh); |
766 | 770 | ||
767 | UFSD("EXIT\n"); | 771 | UFSD("EXIT\n"); |
768 | return; | 772 | unlock_new_inode(inode); |
773 | return inode; | ||
769 | 774 | ||
770 | bad_inode: | 775 | bad_inode: |
771 | make_bad_inode(inode); | 776 | iget_failed(inode); |
777 | return ERR_PTR(-EIO); | ||
772 | } | 778 | } |
773 | 779 | ||
774 | static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode) | 780 | static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode) |
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c index d8bfbee2fe2b..e3a9b1fac75a 100644 --- a/fs/ufs/namei.c +++ b/fs/ufs/namei.c | |||
@@ -29,8 +29,9 @@ | |||
29 | 29 | ||
30 | #include <linux/time.h> | 30 | #include <linux/time.h> |
31 | #include <linux/fs.h> | 31 | #include <linux/fs.h> |
32 | #include <linux/ufs_fs.h> | ||
33 | #include <linux/smp_lock.h> | 32 | #include <linux/smp_lock.h> |
33 | |||
34 | #include "ufs_fs.h" | ||
34 | #include "ufs.h" | 35 | #include "ufs.h" |
35 | #include "util.h" | 36 | #include "util.h" |
36 | 37 | ||
@@ -57,10 +58,10 @@ static struct dentry *ufs_lookup(struct inode * dir, struct dentry *dentry, stru | |||
57 | lock_kernel(); | 58 | lock_kernel(); |
58 | ino = ufs_inode_by_name(dir, dentry); | 59 | ino = ufs_inode_by_name(dir, dentry); |
59 | if (ino) { | 60 | if (ino) { |
60 | inode = iget(dir->i_sb, ino); | 61 | inode = ufs_iget(dir->i_sb, ino); |
61 | if (!inode) { | 62 | if (IS_ERR(inode)) { |
62 | unlock_kernel(); | 63 | unlock_kernel(); |
63 | return ERR_PTR(-EACCES); | 64 | return ERR_CAST(inode); |
64 | } | 65 | } |
65 | } | 66 | } |
66 | unlock_kernel(); | 67 | unlock_kernel(); |
diff --git a/fs/ufs/super.c b/fs/ufs/super.c index 0072cb33ebec..85b22b5977fa 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c | |||
@@ -76,7 +76,6 @@ | |||
76 | 76 | ||
77 | #include <linux/errno.h> | 77 | #include <linux/errno.h> |
78 | #include <linux/fs.h> | 78 | #include <linux/fs.h> |
79 | #include <linux/ufs_fs.h> | ||
80 | #include <linux/slab.h> | 79 | #include <linux/slab.h> |
81 | #include <linux/time.h> | 80 | #include <linux/time.h> |
82 | #include <linux/stat.h> | 81 | #include <linux/stat.h> |
@@ -91,6 +90,7 @@ | |||
91 | #include <linux/mount.h> | 90 | #include <linux/mount.h> |
92 | #include <linux/seq_file.h> | 91 | #include <linux/seq_file.h> |
93 | 92 | ||
93 | #include "ufs_fs.h" | ||
94 | #include "ufs.h" | 94 | #include "ufs.h" |
95 | #include "swab.h" | 95 | #include "swab.h" |
96 | #include "util.h" | 96 | #include "util.h" |
@@ -131,6 +131,8 @@ static void ufs_print_super_stuff(struct super_block *sb, | |||
131 | printk(KERN_INFO" cs_nffree(Num of free frags): %llu\n", | 131 | printk(KERN_INFO" cs_nffree(Num of free frags): %llu\n", |
132 | (unsigned long long) | 132 | (unsigned long long) |
133 | fs64_to_cpu(sb, usb3->fs_un1.fs_u2.cs_nffree)); | 133 | fs64_to_cpu(sb, usb3->fs_un1.fs_u2.cs_nffree)); |
134 | printk(KERN_INFO" fs_maxsymlinklen: %u\n", | ||
135 | fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen)); | ||
134 | } else { | 136 | } else { |
135 | printk(" sblkno: %u\n", fs32_to_cpu(sb, usb1->fs_sblkno)); | 137 | printk(" sblkno: %u\n", fs32_to_cpu(sb, usb1->fs_sblkno)); |
136 | printk(" cblkno: %u\n", fs32_to_cpu(sb, usb1->fs_cblkno)); | 138 | printk(" cblkno: %u\n", fs32_to_cpu(sb, usb1->fs_cblkno)); |
@@ -633,6 +635,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent) | |||
633 | unsigned block_size, super_block_size; | 635 | unsigned block_size, super_block_size; |
634 | unsigned flags; | 636 | unsigned flags; |
635 | unsigned super_block_offset; | 637 | unsigned super_block_offset; |
638 | int ret = -EINVAL; | ||
636 | 639 | ||
637 | uspi = NULL; | 640 | uspi = NULL; |
638 | ubh = NULL; | 641 | ubh = NULL; |
@@ -1060,17 +1063,21 @@ magic_found: | |||
1060 | uspi->s_bpf = uspi->s_fsize << 3; | 1063 | uspi->s_bpf = uspi->s_fsize << 3; |
1061 | uspi->s_bpfshift = uspi->s_fshift + 3; | 1064 | uspi->s_bpfshift = uspi->s_fshift + 3; |
1062 | uspi->s_bpfmask = uspi->s_bpf - 1; | 1065 | uspi->s_bpfmask = uspi->s_bpf - 1; |
1063 | if ((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == | 1066 | if ((sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_44BSD || |
1064 | UFS_MOUNT_UFSTYPE_44BSD) | 1067 | (sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) == UFS_MOUNT_UFSTYPE_UFS2) |
1065 | uspi->s_maxsymlinklen = | 1068 | uspi->s_maxsymlinklen = |
1066 | fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen); | 1069 | fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen); |
1067 | 1070 | ||
1068 | inode = iget(sb, UFS_ROOTINO); | 1071 | inode = ufs_iget(sb, UFS_ROOTINO); |
1069 | if (!inode || is_bad_inode(inode)) | 1072 | if (IS_ERR(inode)) { |
1073 | ret = PTR_ERR(inode); | ||
1070 | goto failed; | 1074 | goto failed; |
1075 | } | ||
1071 | sb->s_root = d_alloc_root(inode); | 1076 | sb->s_root = d_alloc_root(inode); |
1072 | if (!sb->s_root) | 1077 | if (!sb->s_root) { |
1078 | ret = -ENOMEM; | ||
1073 | goto dalloc_failed; | 1079 | goto dalloc_failed; |
1080 | } | ||
1074 | 1081 | ||
1075 | ufs_setup_cstotal(sb); | 1082 | ufs_setup_cstotal(sb); |
1076 | /* | 1083 | /* |
@@ -1092,7 +1099,7 @@ failed: | |||
1092 | kfree(sbi); | 1099 | kfree(sbi); |
1093 | sb->s_fs_info = NULL; | 1100 | sb->s_fs_info = NULL; |
1094 | UFSD("EXIT (FAILED)\n"); | 1101 | UFSD("EXIT (FAILED)\n"); |
1095 | return -EINVAL; | 1102 | return ret; |
1096 | 1103 | ||
1097 | failed_nomem: | 1104 | failed_nomem: |
1098 | UFSD("EXIT (NOMEM)\n"); | 1105 | UFSD("EXIT (NOMEM)\n"); |
@@ -1326,7 +1333,6 @@ static ssize_t ufs_quota_write(struct super_block *, int, const char *, size_t, | |||
1326 | static const struct super_operations ufs_super_ops = { | 1333 | static const struct super_operations ufs_super_ops = { |
1327 | .alloc_inode = ufs_alloc_inode, | 1334 | .alloc_inode = ufs_alloc_inode, |
1328 | .destroy_inode = ufs_destroy_inode, | 1335 | .destroy_inode = ufs_destroy_inode, |
1329 | .read_inode = ufs_read_inode, | ||
1330 | .write_inode = ufs_write_inode, | 1336 | .write_inode = ufs_write_inode, |
1331 | .delete_inode = ufs_delete_inode, | 1337 | .delete_inode = ufs_delete_inode, |
1332 | .put_super = ufs_put_super, | 1338 | .put_super = ufs_put_super, |
diff --git a/fs/ufs/symlink.c b/fs/ufs/symlink.c index 43ac10e75a4a..c0156eda44bc 100644 --- a/fs/ufs/symlink.c +++ b/fs/ufs/symlink.c | |||
@@ -27,7 +27,8 @@ | |||
27 | 27 | ||
28 | #include <linux/fs.h> | 28 | #include <linux/fs.h> |
29 | #include <linux/namei.h> | 29 | #include <linux/namei.h> |
30 | #include <linux/ufs_fs.h> | 30 | |
31 | #include "ufs_fs.h" | ||
31 | #include "ufs.h" | 32 | #include "ufs.h" |
32 | 33 | ||
33 | 34 | ||
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c index 311ded34c2b2..41dd431ce228 100644 --- a/fs/ufs/truncate.c +++ b/fs/ufs/truncate.c | |||
@@ -36,7 +36,6 @@ | |||
36 | 36 | ||
37 | #include <linux/errno.h> | 37 | #include <linux/errno.h> |
38 | #include <linux/fs.h> | 38 | #include <linux/fs.h> |
39 | #include <linux/ufs_fs.h> | ||
40 | #include <linux/fcntl.h> | 39 | #include <linux/fcntl.h> |
41 | #include <linux/time.h> | 40 | #include <linux/time.h> |
42 | #include <linux/stat.h> | 41 | #include <linux/stat.h> |
@@ -46,6 +45,7 @@ | |||
46 | #include <linux/blkdev.h> | 45 | #include <linux/blkdev.h> |
47 | #include <linux/sched.h> | 46 | #include <linux/sched.h> |
48 | 47 | ||
48 | #include "ufs_fs.h" | ||
49 | #include "ufs.h" | 49 | #include "ufs.h" |
50 | #include "swab.h" | 50 | #include "swab.h" |
51 | #include "util.h" | 51 | #include "util.h" |
diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h index 7faa4cd71a27..fcb9231bb9ed 100644 --- a/fs/ufs/ufs.h +++ b/fs/ufs/ufs.h | |||
@@ -106,7 +106,7 @@ extern void ufs_free_inode (struct inode *inode); | |||
106 | extern struct inode * ufs_new_inode (struct inode *, int); | 106 | extern struct inode * ufs_new_inode (struct inode *, int); |
107 | 107 | ||
108 | /* inode.c */ | 108 | /* inode.c */ |
109 | extern void ufs_read_inode (struct inode *); | 109 | extern struct inode *ufs_iget(struct super_block *, unsigned long); |
110 | extern void ufs_put_inode (struct inode *); | 110 | extern void ufs_put_inode (struct inode *); |
111 | extern int ufs_write_inode (struct inode *, int); | 111 | extern int ufs_write_inode (struct inode *, int); |
112 | extern int ufs_sync_inode (struct inode *); | 112 | extern int ufs_sync_inode (struct inode *); |
diff --git a/fs/ufs/ufs_fs.h b/fs/ufs/ufs_fs.h new file mode 100644 index 000000000000..54bde1895a80 --- /dev/null +++ b/fs/ufs/ufs_fs.h | |||
@@ -0,0 +1,947 @@ | |||
1 | /* | ||
2 | * linux/include/linux/ufs_fs.h | ||
3 | * | ||
4 | * Copyright (C) 1996 | ||
5 | * Adrian Rodriguez (adrian@franklins-tower.rutgers.edu) | ||
6 | * Laboratory for Computer Science Research Computing Facility | ||
7 | * Rutgers, The State University of New Jersey | ||
8 | * | ||
9 | * Clean swab support by Fare <fare@tunes.org> | ||
10 | * just hope no one is using NNUUXXI on __?64 structure elements | ||
11 | * 64-bit clean thanks to Maciej W. Rozycki <macro@ds2.pg.gda.pl> | ||
12 | * | ||
13 | * 4.4BSD (FreeBSD) support added on February 1st 1998 by | ||
14 | * Niels Kristian Bech Jensen <nkbj@image.dk> partially based | ||
15 | * on code by Martin von Loewis <martin@mira.isdn.cs.tu-berlin.de>. | ||
16 | * | ||
17 | * NeXTstep support added on February 5th 1998 by | ||
18 | * Niels Kristian Bech Jensen <nkbj@image.dk>. | ||
19 | * | ||
20 | * Write support by Daniel Pirkl <daniel.pirkl@email.cz> | ||
21 | * | ||
22 | * HP/UX hfs filesystem support added by | ||
23 | * Martin K. Petersen <mkp@mkp.net>, August 1999 | ||
24 | * | ||
25 | * UFS2 (of FreeBSD 5.x) support added by | ||
26 | * Niraj Kumar <niraj17@iitbombay.org> , Jan 2004 | ||
27 | * | ||
28 | */ | ||
29 | |||
30 | #ifndef __LINUX_UFS_FS_H | ||
31 | #define __LINUX_UFS_FS_H | ||
32 | |||
33 | #include <linux/types.h> | ||
34 | #include <linux/kernel.h> | ||
35 | #include <linux/stat.h> | ||
36 | #include <linux/fs.h> | ||
37 | |||
38 | #include <asm/div64.h> | ||
39 | typedef __u64 __bitwise __fs64; | ||
40 | typedef __u32 __bitwise __fs32; | ||
41 | typedef __u16 __bitwise __fs16; | ||
42 | |||
43 | #define UFS_BBLOCK 0 | ||
44 | #define UFS_BBSIZE 8192 | ||
45 | #define UFS_SBLOCK 8192 | ||
46 | #define UFS_SBSIZE 8192 | ||
47 | |||
48 | #define UFS_SECTOR_SIZE 512 | ||
49 | #define UFS_SECTOR_BITS 9 | ||
50 | #define UFS_MAGIC 0x00011954 | ||
51 | #define UFS2_MAGIC 0x19540119 | ||
52 | #define UFS_CIGAM 0x54190100 /* byteswapped MAGIC */ | ||
53 | |||
54 | /* Copied from FreeBSD */ | ||
55 | /* | ||
56 | * Each disk drive contains some number of filesystems. | ||
57 | * A filesystem consists of a number of cylinder groups. | ||
58 | * Each cylinder group has inodes and data. | ||
59 | * | ||
60 | * A filesystem is described by its super-block, which in turn | ||
61 | * describes the cylinder groups. The super-block is critical | ||
62 | * data and is replicated in each cylinder group to protect against | ||
63 | * catastrophic loss. This is done at `newfs' time and the critical | ||
64 | * super-block data does not change, so the copies need not be | ||
65 | * referenced further unless disaster strikes. | ||
66 | * | ||
67 | * For filesystem fs, the offsets of the various blocks of interest | ||
68 | * are given in the super block as: | ||
69 | * [fs->fs_sblkno] Super-block | ||
70 | * [fs->fs_cblkno] Cylinder group block | ||
71 | * [fs->fs_iblkno] Inode blocks | ||
72 | * [fs->fs_dblkno] Data blocks | ||
73 | * The beginning of cylinder group cg in fs, is given by | ||
74 | * the ``cgbase(fs, cg)'' macro. | ||
75 | * | ||
76 | * Depending on the architecture and the media, the superblock may | ||
77 | * reside in any one of four places. For tiny media where every block | ||
78 | * counts, it is placed at the very front of the partition. Historically, | ||
79 | * UFS1 placed it 8K from the front to leave room for the disk label and | ||
80 | * a small bootstrap. For UFS2 it got moved to 64K from the front to leave | ||
81 | * room for the disk label and a bigger bootstrap, and for really piggy | ||
82 | * systems we check at 256K from the front if the first three fail. In | ||
83 | * all cases the size of the superblock will be SBLOCKSIZE. All values are | ||
84 | * given in byte-offset form, so they do not imply a sector size. The | ||
85 | * SBLOCKSEARCH specifies the order in which the locations should be searched. | ||
86 | */ | ||
87 | #define SBLOCK_FLOPPY 0 | ||
88 | #define SBLOCK_UFS1 8192 | ||
89 | #define SBLOCK_UFS2 65536 | ||
90 | #define SBLOCK_PIGGY 262144 | ||
91 | #define SBLOCKSIZE 8192 | ||
92 | #define SBLOCKSEARCH \ | ||
93 | { SBLOCK_UFS2, SBLOCK_UFS1, SBLOCK_FLOPPY, SBLOCK_PIGGY, -1 } | ||
94 | |||
95 | |||
96 | /* HP specific MAGIC values */ | ||
97 | |||
98 | #define UFS_MAGIC_LFN 0x00095014 /* fs supports filenames > 14 chars */ | ||
99 | #define UFS_CIGAM_LFN 0x14500900 /* srahc 41 < semanelif stroppus sf */ | ||
100 | |||
101 | #define UFS_MAGIC_SEC 0x00612195 /* B1 security fs */ | ||
102 | #define UFS_CIGAM_SEC 0x95216100 | ||
103 | |||
104 | #define UFS_MAGIC_FEA 0x00195612 /* fs_featurebits supported */ | ||
105 | #define UFS_CIGAM_FEA 0x12561900 | ||
106 | |||
107 | #define UFS_MAGIC_4GB 0x05231994 /* fs > 4 GB && fs_featurebits */ | ||
108 | #define UFS_CIGAM_4GB 0x94192305 | ||
109 | |||
110 | /* Seems somebody at HP goofed here. B1 and lfs are both 0x2 !?! */ | ||
111 | #define UFS_FSF_LFN 0x00000001 /* long file names */ | ||
112 | #define UFS_FSF_B1 0x00000002 /* B1 security */ | ||
113 | #define UFS_FSF_LFS 0x00000002 /* large files */ | ||
114 | #define UFS_FSF_LUID 0x00000004 /* large UIDs */ | ||
115 | |||
116 | /* End of HP stuff */ | ||
117 | |||
118 | |||
119 | #define UFS_BSIZE 8192 | ||
120 | #define UFS_MINBSIZE 4096 | ||
121 | #define UFS_FSIZE 1024 | ||
122 | #define UFS_MAXFRAG (UFS_BSIZE / UFS_FSIZE) | ||
123 | |||
124 | #define UFS_NDADDR 12 | ||
125 | #define UFS_NINDIR 3 | ||
126 | |||
127 | #define UFS_IND_BLOCK (UFS_NDADDR + 0) | ||
128 | #define UFS_DIND_BLOCK (UFS_NDADDR + 1) | ||
129 | #define UFS_TIND_BLOCK (UFS_NDADDR + 2) | ||
130 | |||
131 | #define UFS_NDIR_FRAGMENT (UFS_NDADDR << uspi->s_fpbshift) | ||
132 | #define UFS_IND_FRAGMENT (UFS_IND_BLOCK << uspi->s_fpbshift) | ||
133 | #define UFS_DIND_FRAGMENT (UFS_DIND_BLOCK << uspi->s_fpbshift) | ||
134 | #define UFS_TIND_FRAGMENT (UFS_TIND_BLOCK << uspi->s_fpbshift) | ||
135 | |||
136 | #define UFS_ROOTINO 2 | ||
137 | #define UFS_FIRST_INO (UFS_ROOTINO + 1) | ||
138 | |||
139 | #define UFS_USEEFT ((__u16)65535) | ||
140 | |||
141 | #define UFS_FSOK 0x7c269d38 | ||
142 | #define UFS_FSACTIVE ((__s8)0x00) | ||
143 | #define UFS_FSCLEAN ((__s8)0x01) | ||
144 | #define UFS_FSSTABLE ((__s8)0x02) | ||
145 | #define UFS_FSOSF1 ((__s8)0x03) /* is this correct for DEC OSF/1? */ | ||
146 | #define UFS_FSBAD ((__s8)0xff) | ||
147 | |||
148 | /* From here to next blank line, s_flags for ufs_sb_info */ | ||
149 | /* directory entry encoding */ | ||
150 | #define UFS_DE_MASK 0x00000010 /* mask for the following */ | ||
151 | #define UFS_DE_OLD 0x00000000 | ||
152 | #define UFS_DE_44BSD 0x00000010 | ||
153 | /* uid encoding */ | ||
154 | #define UFS_UID_MASK 0x00000060 /* mask for the following */ | ||
155 | #define UFS_UID_OLD 0x00000000 | ||
156 | #define UFS_UID_44BSD 0x00000020 | ||
157 | #define UFS_UID_EFT 0x00000040 | ||
158 | /* superblock state encoding */ | ||
159 | #define UFS_ST_MASK 0x00000700 /* mask for the following */ | ||
160 | #define UFS_ST_OLD 0x00000000 | ||
161 | #define UFS_ST_44BSD 0x00000100 | ||
162 | #define UFS_ST_SUN 0x00000200 /* Solaris */ | ||
163 | #define UFS_ST_SUNOS 0x00000300 | ||
164 | #define UFS_ST_SUNx86 0x00000400 /* Solaris x86 */ | ||
165 | /*cylinder group encoding */ | ||
166 | #define UFS_CG_MASK 0x00003000 /* mask for the following */ | ||
167 | #define UFS_CG_OLD 0x00000000 | ||
168 | #define UFS_CG_44BSD 0x00002000 | ||
169 | #define UFS_CG_SUN 0x00001000 | ||
170 | /* filesystem type encoding */ | ||
171 | #define UFS_TYPE_MASK 0x00010000 /* mask for the following */ | ||
172 | #define UFS_TYPE_UFS1 0x00000000 | ||
173 | #define UFS_TYPE_UFS2 0x00010000 | ||
174 | |||
175 | |||
176 | /* fs_inodefmt options */ | ||
177 | #define UFS_42INODEFMT -1 | ||
178 | #define UFS_44INODEFMT 2 | ||
179 | |||
180 | /* | ||
181 | * MINFREE gives the minimum acceptable percentage of file system | ||
182 | * blocks which may be free. If the freelist drops below this level | ||
183 | * only the superuser may continue to allocate blocks. This may | ||
184 | * be set to 0 if no reserve of free blocks is deemed necessary, | ||
185 | * however throughput drops by fifty percent if the file system | ||
186 | * is run at between 95% and 100% full; thus the minimum default | ||
187 | * value of fs_minfree is 5%. However, to get good clustering | ||
188 | * performance, 10% is a better choice. hence we use 10% as our | ||
189 | * default value. With 10% free space, fragmentation is not a | ||
190 | * problem, so we choose to optimize for time. | ||
191 | */ | ||
192 | #define UFS_MINFREE 5 | ||
193 | #define UFS_DEFAULTOPT UFS_OPTTIME | ||
194 | |||
195 | /* | ||
196 | * Turn file system block numbers into disk block addresses. | ||
197 | * This maps file system blocks to device size blocks. | ||
198 | */ | ||
199 | #define ufs_fsbtodb(uspi, b) ((b) << (uspi)->s_fsbtodb) | ||
200 | #define ufs_dbtofsb(uspi, b) ((b) >> (uspi)->s_fsbtodb) | ||
201 | |||
202 | /* | ||
203 | * Cylinder group macros to locate things in cylinder groups. | ||
204 | * They calc file system addresses of cylinder group data structures. | ||
205 | */ | ||
206 | #define ufs_cgbase(c) (uspi->s_fpg * (c)) | ||
207 | #define ufs_cgstart(c) ((uspi)->fs_magic == UFS2_MAGIC ? ufs_cgbase(c) : \ | ||
208 | (ufs_cgbase(c) + uspi->s_cgoffset * ((c) & ~uspi->s_cgmask))) | ||
209 | #define ufs_cgsblock(c) (ufs_cgstart(c) + uspi->s_sblkno) /* super blk */ | ||
210 | #define ufs_cgcmin(c) (ufs_cgstart(c) + uspi->s_cblkno) /* cg block */ | ||
211 | #define ufs_cgimin(c) (ufs_cgstart(c) + uspi->s_iblkno) /* inode blk */ | ||
212 | #define ufs_cgdmin(c) (ufs_cgstart(c) + uspi->s_dblkno) /* 1st data */ | ||
213 | |||
214 | /* | ||
215 | * Macros for handling inode numbers: | ||
216 | * inode number to file system block offset. | ||
217 | * inode number to cylinder group number. | ||
218 | * inode number to file system block address. | ||
219 | */ | ||
220 | #define ufs_inotocg(x) ((x) / uspi->s_ipg) | ||
221 | #define ufs_inotocgoff(x) ((x) % uspi->s_ipg) | ||
222 | #define ufs_inotofsba(x) (((u64)ufs_cgimin(ufs_inotocg(x))) + ufs_inotocgoff(x) / uspi->s_inopf) | ||
223 | #define ufs_inotofsbo(x) ((x) % uspi->s_inopf) | ||
224 | |||
225 | /* | ||
226 | * Compute the cylinder and rotational position of a cyl block addr. | ||
227 | */ | ||
228 | #define ufs_cbtocylno(bno) \ | ||
229 | ((bno) * uspi->s_nspf / uspi->s_spc) | ||
230 | #define ufs_cbtorpos(bno) \ | ||
231 | ((((bno) * uspi->s_nspf % uspi->s_spc / uspi->s_nsect \ | ||
232 | * uspi->s_trackskew + (bno) * uspi->s_nspf % uspi->s_spc \ | ||
233 | % uspi->s_nsect * uspi->s_interleave) % uspi->s_nsect \ | ||
234 | * uspi->s_nrpos) / uspi->s_npsect) | ||
235 | |||
236 | /* | ||
237 | * The following macros optimize certain frequently calculated | ||
238 | * quantities by using shifts and masks in place of divisions | ||
239 | * modulos and multiplications. | ||
240 | */ | ||
241 | #define ufs_blkoff(loc) ((loc) & uspi->s_qbmask) | ||
242 | #define ufs_fragoff(loc) ((loc) & uspi->s_qfmask) | ||
243 | #define ufs_lblktosize(blk) ((blk) << uspi->s_bshift) | ||
244 | #define ufs_lblkno(loc) ((loc) >> uspi->s_bshift) | ||
245 | #define ufs_numfrags(loc) ((loc) >> uspi->s_fshift) | ||
246 | #define ufs_blkroundup(size) (((size) + uspi->s_qbmask) & uspi->s_bmask) | ||
247 | #define ufs_fragroundup(size) (((size) + uspi->s_qfmask) & uspi->s_fmask) | ||
248 | #define ufs_fragstoblks(frags) ((frags) >> uspi->s_fpbshift) | ||
249 | #define ufs_blkstofrags(blks) ((blks) << uspi->s_fpbshift) | ||
250 | #define ufs_fragnum(fsb) ((fsb) & uspi->s_fpbmask) | ||
251 | #define ufs_blknum(fsb) ((fsb) & ~uspi->s_fpbmask) | ||
252 | |||
253 | #define UFS_MAXNAMLEN 255 | ||
254 | #define UFS_MAXMNTLEN 512 | ||
255 | #define UFS2_MAXMNTLEN 468 | ||
256 | #define UFS2_MAXVOLLEN 32 | ||
257 | #define UFS_MAXCSBUFS 31 | ||
258 | #define UFS_LINK_MAX 32000 | ||
259 | /* | ||
260 | #define UFS2_NOCSPTRS ((128 / sizeof(void *)) - 4) | ||
261 | */ | ||
262 | #define UFS2_NOCSPTRS 28 | ||
263 | |||
264 | /* | ||
265 | * UFS_DIR_PAD defines the directory entries boundaries | ||
266 | * (must be a multiple of 4) | ||
267 | */ | ||
268 | #define UFS_DIR_PAD 4 | ||
269 | #define UFS_DIR_ROUND (UFS_DIR_PAD - 1) | ||
270 | #define UFS_DIR_REC_LEN(name_len) (((name_len) + 1 + 8 + UFS_DIR_ROUND) & ~UFS_DIR_ROUND) | ||
271 | |||
272 | struct ufs_timeval { | ||
273 | __fs32 tv_sec; | ||
274 | __fs32 tv_usec; | ||
275 | }; | ||
276 | |||
277 | struct ufs_dir_entry { | ||
278 | __fs32 d_ino; /* inode number of this entry */ | ||
279 | __fs16 d_reclen; /* length of this entry */ | ||
280 | union { | ||
281 | __fs16 d_namlen; /* actual length of d_name */ | ||
282 | struct { | ||
283 | __u8 d_type; /* file type */ | ||
284 | __u8 d_namlen; /* length of string in d_name */ | ||
285 | } d_44; | ||
286 | } d_u; | ||
287 | __u8 d_name[UFS_MAXNAMLEN + 1]; /* file name */ | ||
288 | }; | ||
289 | |||
290 | struct ufs_csum { | ||
291 | __fs32 cs_ndir; /* number of directories */ | ||
292 | __fs32 cs_nbfree; /* number of free blocks */ | ||
293 | __fs32 cs_nifree; /* number of free inodes */ | ||
294 | __fs32 cs_nffree; /* number of free frags */ | ||
295 | }; | ||
296 | struct ufs2_csum_total { | ||
297 | __fs64 cs_ndir; /* number of directories */ | ||
298 | __fs64 cs_nbfree; /* number of free blocks */ | ||
299 | __fs64 cs_nifree; /* number of free inodes */ | ||
300 | __fs64 cs_nffree; /* number of free frags */ | ||
301 | __fs64 cs_numclusters; /* number of free clusters */ | ||
302 | __fs64 cs_spare[3]; /* future expansion */ | ||
303 | }; | ||
304 | |||
305 | struct ufs_csum_core { | ||
306 | __u64 cs_ndir; /* number of directories */ | ||
307 | __u64 cs_nbfree; /* number of free blocks */ | ||
308 | __u64 cs_nifree; /* number of free inodes */ | ||
309 | __u64 cs_nffree; /* number of free frags */ | ||
310 | __u64 cs_numclusters; /* number of free clusters */ | ||
311 | }; | ||
312 | |||
313 | /* | ||
314 | * File system flags | ||
315 | */ | ||
316 | #define UFS_UNCLEAN 0x01 /* file system not clean at mount (unused) */ | ||
317 | #define UFS_DOSOFTDEP 0x02 /* file system using soft dependencies */ | ||
318 | #define UFS_NEEDSFSCK 0x04 /* needs sync fsck (FreeBSD compat, unused) */ | ||
319 | #define UFS_INDEXDIRS 0x08 /* kernel supports indexed directories */ | ||
320 | #define UFS_ACLS 0x10 /* file system has ACLs enabled */ | ||
321 | #define UFS_MULTILABEL 0x20 /* file system is MAC multi-label */ | ||
322 | #define UFS_FLAGS_UPDATED 0x80 /* flags have been moved to new location */ | ||
323 | |||
324 | #if 0 | ||
325 | /* | ||
326 | * This is the actual superblock, as it is laid out on the disk. | ||
327 | * Do NOT use this structure, because of sizeof(ufs_super_block) > 512 and | ||
328 | * it may occupy several blocks, use | ||
329 | * struct ufs_super_block_(first,second,third) instead. | ||
330 | */ | ||
331 | struct ufs_super_block { | ||
332 | union { | ||
333 | struct { | ||
334 | __fs32 fs_link; /* UNUSED */ | ||
335 | } fs_42; | ||
336 | struct { | ||
337 | __fs32 fs_state; /* file system state flag */ | ||
338 | } fs_sun; | ||
339 | } fs_u0; | ||
340 | __fs32 fs_rlink; /* UNUSED */ | ||
341 | __fs32 fs_sblkno; /* addr of super-block in filesys */ | ||
342 | __fs32 fs_cblkno; /* offset of cyl-block in filesys */ | ||
343 | __fs32 fs_iblkno; /* offset of inode-blocks in filesys */ | ||
344 | __fs32 fs_dblkno; /* offset of first data after cg */ | ||
345 | __fs32 fs_cgoffset; /* cylinder group offset in cylinder */ | ||
346 | __fs32 fs_cgmask; /* used to calc mod fs_ntrak */ | ||
347 | __fs32 fs_time; /* last time written -- time_t */ | ||
348 | __fs32 fs_size; /* number of blocks in fs */ | ||
349 | __fs32 fs_dsize; /* number of data blocks in fs */ | ||
350 | __fs32 fs_ncg; /* number of cylinder groups */ | ||
351 | __fs32 fs_bsize; /* size of basic blocks in fs */ | ||
352 | __fs32 fs_fsize; /* size of frag blocks in fs */ | ||
353 | __fs32 fs_frag; /* number of frags in a block in fs */ | ||
354 | /* these are configuration parameters */ | ||
355 | __fs32 fs_minfree; /* minimum percentage of free blocks */ | ||
356 | __fs32 fs_rotdelay; /* num of ms for optimal next block */ | ||
357 | __fs32 fs_rps; /* disk revolutions per second */ | ||
358 | /* these fields can be computed from the others */ | ||
359 | __fs32 fs_bmask; /* ``blkoff'' calc of blk offsets */ | ||
360 | __fs32 fs_fmask; /* ``fragoff'' calc of frag offsets */ | ||
361 | __fs32 fs_bshift; /* ``lblkno'' calc of logical blkno */ | ||
362 | __fs32 fs_fshift; /* ``numfrags'' calc number of frags */ | ||
363 | /* these are configuration parameters */ | ||
364 | __fs32 fs_maxcontig; /* max number of contiguous blks */ | ||
365 | __fs32 fs_maxbpg; /* max number of blks per cyl group */ | ||
366 | /* these fields can be computed from the others */ | ||
367 | __fs32 fs_fragshift; /* block to frag shift */ | ||
368 | __fs32 fs_fsbtodb; /* fsbtodb and dbtofsb shift constant */ | ||
369 | __fs32 fs_sbsize; /* actual size of super block */ | ||
370 | __fs32 fs_csmask; /* csum block offset */ | ||
371 | __fs32 fs_csshift; /* csum block number */ | ||
372 | __fs32 fs_nindir; /* value of NINDIR */ | ||
373 | __fs32 fs_inopb; /* value of INOPB */ | ||
374 | __fs32 fs_nspf; /* value of NSPF */ | ||
375 | /* yet another configuration parameter */ | ||
376 | __fs32 fs_optim; /* optimization preference, see below */ | ||
377 | /* these fields are derived from the hardware */ | ||
378 | union { | ||
379 | struct { | ||
380 | __fs32 fs_npsect; /* # sectors/track including spares */ | ||
381 | } fs_sun; | ||
382 | struct { | ||
383 | __fs32 fs_state; /* file system state time stamp */ | ||
384 | } fs_sunx86; | ||
385 | } fs_u1; | ||
386 | __fs32 fs_interleave; /* hardware sector interleave */ | ||
387 | __fs32 fs_trackskew; /* sector 0 skew, per track */ | ||
388 | /* a unique id for this filesystem (currently unused and unmaintained) */ | ||
389 | /* In 4.3 Tahoe this space is used by fs_headswitch and fs_trkseek */ | ||
390 | /* Neither of those fields is used in the Tahoe code right now but */ | ||
391 | /* there could be problems if they are. */ | ||
392 | __fs32 fs_id[2]; /* file system id */ | ||
393 | /* sizes determined by number of cylinder groups and their sizes */ | ||
394 | __fs32 fs_csaddr; /* blk addr of cyl grp summary area */ | ||
395 | __fs32 fs_cssize; /* size of cyl grp summary area */ | ||
396 | __fs32 fs_cgsize; /* cylinder group size */ | ||
397 | /* these fields are derived from the hardware */ | ||
398 | __fs32 fs_ntrak; /* tracks per cylinder */ | ||
399 | __fs32 fs_nsect; /* sectors per track */ | ||
400 | __fs32 fs_spc; /* sectors per cylinder */ | ||
401 | /* this comes from the disk driver partitioning */ | ||
402 | __fs32 fs_ncyl; /* cylinders in file system */ | ||
403 | /* these fields can be computed from the others */ | ||
404 | __fs32 fs_cpg; /* cylinders per group */ | ||
405 | __fs32 fs_ipg; /* inodes per cylinder group */ | ||
406 | __fs32 fs_fpg; /* blocks per group * fs_frag */ | ||
407 | /* this data must be re-computed after crashes */ | ||
408 | struct ufs_csum fs_cstotal; /* cylinder summary information */ | ||
409 | /* these fields are cleared at mount time */ | ||
410 | __s8 fs_fmod; /* super block modified flag */ | ||
411 | __s8 fs_clean; /* file system is clean flag */ | ||
412 | __s8 fs_ronly; /* mounted read-only flag */ | ||
413 | __s8 fs_flags; | ||
414 | union { | ||
415 | struct { | ||
416 | __s8 fs_fsmnt[UFS_MAXMNTLEN];/* name mounted on */ | ||
417 | __fs32 fs_cgrotor; /* last cg searched */ | ||
418 | __fs32 fs_csp[UFS_MAXCSBUFS];/*list of fs_cs info buffers */ | ||
419 | __fs32 fs_maxcluster; | ||
420 | __fs32 fs_cpc; /* cyl per cycle in postbl */ | ||
421 | __fs16 fs_opostbl[16][8]; /* old rotation block list head */ | ||
422 | } fs_u1; | ||
423 | struct { | ||
424 | __s8 fs_fsmnt[UFS2_MAXMNTLEN]; /* name mounted on */ | ||
425 | __u8 fs_volname[UFS2_MAXVOLLEN]; /* volume name */ | ||
426 | __fs64 fs_swuid; /* system-wide uid */ | ||
427 | __fs32 fs_pad; /* due to alignment of fs_swuid */ | ||
428 | __fs32 fs_cgrotor; /* last cg searched */ | ||
429 | __fs32 fs_ocsp[UFS2_NOCSPTRS]; /*list of fs_cs info buffers */ | ||
430 | __fs32 fs_contigdirs;/*# of contiguously allocated dirs */ | ||
431 | __fs32 fs_csp; /* cg summary info buffer for fs_cs */ | ||
432 | __fs32 fs_maxcluster; | ||
433 | __fs32 fs_active;/* used by snapshots to track fs */ | ||
434 | __fs32 fs_old_cpc; /* cyl per cycle in postbl */ | ||
435 | __fs32 fs_maxbsize;/*maximum blocking factor permitted */ | ||
436 | __fs64 fs_sparecon64[17];/*old rotation block list head */ | ||
437 | __fs64 fs_sblockloc; /* byte offset of standard superblock */ | ||
438 | struct ufs2_csum_total fs_cstotal;/*cylinder summary information*/ | ||
439 | struct ufs_timeval fs_time; /* last time written */ | ||
440 | __fs64 fs_size; /* number of blocks in fs */ | ||
441 | __fs64 fs_dsize; /* number of data blocks in fs */ | ||
442 | __fs64 fs_csaddr; /* blk addr of cyl grp summary area */ | ||
443 | __fs64 fs_pendingblocks;/* blocks in process of being freed */ | ||
444 | __fs32 fs_pendinginodes;/*inodes in process of being freed */ | ||
445 | } fs_u2; | ||
446 | } fs_u11; | ||
447 | union { | ||
448 | struct { | ||
449 | __fs32 fs_sparecon[53];/* reserved for future constants */ | ||
450 | __fs32 fs_reclaim; | ||
451 | __fs32 fs_sparecon2[1]; | ||
452 | __fs32 fs_state; /* file system state time stamp */ | ||
453 | __fs32 fs_qbmask[2]; /* ~usb_bmask */ | ||
454 | __fs32 fs_qfmask[2]; /* ~usb_fmask */ | ||
455 | } fs_sun; | ||
456 | struct { | ||
457 | __fs32 fs_sparecon[53];/* reserved for future constants */ | ||
458 | __fs32 fs_reclaim; | ||
459 | __fs32 fs_sparecon2[1]; | ||
460 | __fs32 fs_npsect; /* # sectors/track including spares */ | ||
461 | __fs32 fs_qbmask[2]; /* ~usb_bmask */ | ||
462 | __fs32 fs_qfmask[2]; /* ~usb_fmask */ | ||
463 | } fs_sunx86; | ||
464 | struct { | ||
465 | __fs32 fs_sparecon[50];/* reserved for future constants */ | ||
466 | __fs32 fs_contigsumsize;/* size of cluster summary array */ | ||
467 | __fs32 fs_maxsymlinklen;/* max length of an internal symlink */ | ||
468 | __fs32 fs_inodefmt; /* format of on-disk inodes */ | ||
469 | __fs32 fs_maxfilesize[2]; /* max representable file size */ | ||
470 | __fs32 fs_qbmask[2]; /* ~usb_bmask */ | ||
471 | __fs32 fs_qfmask[2]; /* ~usb_fmask */ | ||
472 | __fs32 fs_state; /* file system state time stamp */ | ||
473 | } fs_44; | ||
474 | } fs_u2; | ||
475 | __fs32 fs_postblformat; /* format of positional layout tables */ | ||
476 | __fs32 fs_nrpos; /* number of rotational positions */ | ||
477 | __fs32 fs_postbloff; /* (__s16) rotation block list head */ | ||
478 | __fs32 fs_rotbloff; /* (__u8) blocks for each rotation */ | ||
479 | __fs32 fs_magic; /* magic number */ | ||
480 | __u8 fs_space[1]; /* list of blocks for each rotation */ | ||
481 | }; | ||
482 | #endif/*struct ufs_super_block*/ | ||
483 | |||
484 | /* | ||
485 | * Preference for optimization. | ||
486 | */ | ||
487 | #define UFS_OPTTIME 0 /* minimize allocation time */ | ||
488 | #define UFS_OPTSPACE 1 /* minimize disk fragmentation */ | ||
489 | |||
490 | /* | ||
491 | * Rotational layout table format types | ||
492 | */ | ||
493 | #define UFS_42POSTBLFMT -1 /* 4.2BSD rotational table format */ | ||
494 | #define UFS_DYNAMICPOSTBLFMT 1 /* dynamic rotational table format */ | ||
495 | |||
496 | /* | ||
497 | * Convert cylinder group to base address of its global summary info. | ||
498 | */ | ||
499 | #define fs_cs(indx) s_csp[(indx)] | ||
500 | |||
501 | /* | ||
502 | * Cylinder group block for a file system. | ||
503 | * | ||
504 | * Writable fields in the cylinder group are protected by the associated | ||
505 | * super block lock fs->fs_lock. | ||
506 | */ | ||
507 | #define CG_MAGIC 0x090255 | ||
508 | #define ufs_cg_chkmagic(sb, ucg) \ | ||
509 | (fs32_to_cpu((sb), (ucg)->cg_magic) == CG_MAGIC) | ||
510 | /* | ||
511 | * Macros for access to old cylinder group array structures | ||
512 | */ | ||
513 | #define ufs_ocg_blktot(sb, ucg) fs32_to_cpu((sb), ((struct ufs_old_cylinder_group *)(ucg))->cg_btot) | ||
514 | #define ufs_ocg_blks(sb, ucg, cylno) fs32_to_cpu((sb), ((struct ufs_old_cylinder_group *)(ucg))->cg_b[cylno]) | ||
515 | #define ufs_ocg_inosused(sb, ucg) fs32_to_cpu((sb), ((struct ufs_old_cylinder_group *)(ucg))->cg_iused) | ||
516 | #define ufs_ocg_blksfree(sb, ucg) fs32_to_cpu((sb), ((struct ufs_old_cylinder_group *)(ucg))->cg_free) | ||
517 | #define ufs_ocg_chkmagic(sb, ucg) \ | ||
518 | (fs32_to_cpu((sb), ((struct ufs_old_cylinder_group *)(ucg))->cg_magic) == CG_MAGIC) | ||
519 | |||
520 | /* | ||
521 | * size of this structure is 172 B | ||
522 | */ | ||
523 | struct ufs_cylinder_group { | ||
524 | __fs32 cg_link; /* linked list of cyl groups */ | ||
525 | __fs32 cg_magic; /* magic number */ | ||
526 | __fs32 cg_time; /* time last written */ | ||
527 | __fs32 cg_cgx; /* we are the cgx'th cylinder group */ | ||
528 | __fs16 cg_ncyl; /* number of cyl's this cg */ | ||
529 | __fs16 cg_niblk; /* number of inode blocks this cg */ | ||
530 | __fs32 cg_ndblk; /* number of data blocks this cg */ | ||
531 | struct ufs_csum cg_cs; /* cylinder summary information */ | ||
532 | __fs32 cg_rotor; /* position of last used block */ | ||
533 | __fs32 cg_frotor; /* position of last used frag */ | ||
534 | __fs32 cg_irotor; /* position of last used inode */ | ||
535 | __fs32 cg_frsum[UFS_MAXFRAG]; /* counts of available frags */ | ||
536 | __fs32 cg_btotoff; /* (__u32) block totals per cylinder */ | ||
537 | __fs32 cg_boff; /* (short) free block positions */ | ||
538 | __fs32 cg_iusedoff; /* (char) used inode map */ | ||
539 | __fs32 cg_freeoff; /* (u_char) free block map */ | ||
540 | __fs32 cg_nextfreeoff; /* (u_char) next available space */ | ||
541 | union { | ||
542 | struct { | ||
543 | __fs32 cg_clustersumoff; /* (u_int32) counts of avail clusters */ | ||
544 | __fs32 cg_clusteroff; /* (u_int8) free cluster map */ | ||
545 | __fs32 cg_nclusterblks; /* number of clusters this cg */ | ||
546 | __fs32 cg_sparecon[13]; /* reserved for future use */ | ||
547 | } cg_44; | ||
548 | struct { | ||
549 | __fs32 cg_clustersumoff;/* (u_int32) counts of avail clusters */ | ||
550 | __fs32 cg_clusteroff; /* (u_int8) free cluster map */ | ||
551 | __fs32 cg_nclusterblks;/* number of clusters this cg */ | ||
552 | __fs32 cg_niblk; /* number of inode blocks this cg */ | ||
553 | __fs32 cg_initediblk; /* last initialized inode */ | ||
554 | __fs32 cg_sparecon32[3];/* reserved for future use */ | ||
555 | __fs64 cg_time; /* time last written */ | ||
556 | __fs64 cg_sparecon[3]; /* reserved for future use */ | ||
557 | } cg_u2; | ||
558 | __fs32 cg_sparecon[16]; /* reserved for future use */ | ||
559 | } cg_u; | ||
560 | __u8 cg_space[1]; /* space for cylinder group maps */ | ||
561 | /* actually longer */ | ||
562 | }; | ||
563 | |||
564 | /* Historic Cylinder group info */ | ||
565 | struct ufs_old_cylinder_group { | ||
566 | __fs32 cg_link; /* linked list of cyl groups */ | ||
567 | __fs32 cg_rlink; /* for incore cyl groups */ | ||
568 | __fs32 cg_time; /* time last written */ | ||
569 | __fs32 cg_cgx; /* we are the cgx'th cylinder group */ | ||
570 | __fs16 cg_ncyl; /* number of cyl's this cg */ | ||
571 | __fs16 cg_niblk; /* number of inode blocks this cg */ | ||
572 | __fs32 cg_ndblk; /* number of data blocks this cg */ | ||
573 | struct ufs_csum cg_cs; /* cylinder summary information */ | ||
574 | __fs32 cg_rotor; /* position of last used block */ | ||
575 | __fs32 cg_frotor; /* position of last used frag */ | ||
576 | __fs32 cg_irotor; /* position of last used inode */ | ||
577 | __fs32 cg_frsum[8]; /* counts of available frags */ | ||
578 | __fs32 cg_btot[32]; /* block totals per cylinder */ | ||
579 | __fs16 cg_b[32][8]; /* positions of free blocks */ | ||
580 | __u8 cg_iused[256]; /* used inode map */ | ||
581 | __fs32 cg_magic; /* magic number */ | ||
582 | __u8 cg_free[1]; /* free block map */ | ||
583 | /* actually longer */ | ||
584 | }; | ||
585 | |||
586 | /* | ||
587 | * structure of an on-disk inode | ||
588 | */ | ||
589 | struct ufs_inode { | ||
590 | __fs16 ui_mode; /* 0x0 */ | ||
591 | __fs16 ui_nlink; /* 0x2 */ | ||
592 | union { | ||
593 | struct { | ||
594 | __fs16 ui_suid; /* 0x4 */ | ||
595 | __fs16 ui_sgid; /* 0x6 */ | ||
596 | } oldids; | ||
597 | __fs32 ui_inumber; /* 0x4 lsf: inode number */ | ||
598 | __fs32 ui_author; /* 0x4 GNU HURD: author */ | ||
599 | } ui_u1; | ||
600 | __fs64 ui_size; /* 0x8 */ | ||
601 | struct ufs_timeval ui_atime; /* 0x10 access */ | ||
602 | struct ufs_timeval ui_mtime; /* 0x18 modification */ | ||
603 | struct ufs_timeval ui_ctime; /* 0x20 creation */ | ||
604 | union { | ||
605 | struct { | ||
606 | __fs32 ui_db[UFS_NDADDR];/* 0x28 data blocks */ | ||
607 | __fs32 ui_ib[UFS_NINDIR];/* 0x58 indirect blocks */ | ||
608 | } ui_addr; | ||
609 | __u8 ui_symlink[4*(UFS_NDADDR+UFS_NINDIR)];/* 0x28 fast symlink */ | ||
610 | } ui_u2; | ||
611 | __fs32 ui_flags; /* 0x64 immutable, append-only... */ | ||
612 | __fs32 ui_blocks; /* 0x68 blocks in use */ | ||
613 | __fs32 ui_gen; /* 0x6c like ext2 i_version, for NFS support */ | ||
614 | union { | ||
615 | struct { | ||
616 | __fs32 ui_shadow; /* 0x70 shadow inode with security data */ | ||
617 | __fs32 ui_uid; /* 0x74 long EFT version of uid */ | ||
618 | __fs32 ui_gid; /* 0x78 long EFT version of gid */ | ||
619 | __fs32 ui_oeftflag; /* 0x7c reserved */ | ||
620 | } ui_sun; | ||
621 | struct { | ||
622 | __fs32 ui_uid; /* 0x70 File owner */ | ||
623 | __fs32 ui_gid; /* 0x74 File group */ | ||
624 | __fs32 ui_spare[2]; /* 0x78 reserved */ | ||
625 | } ui_44; | ||
626 | struct { | ||
627 | __fs32 ui_uid; /* 0x70 */ | ||
628 | __fs32 ui_gid; /* 0x74 */ | ||
629 | __fs16 ui_modeh; /* 0x78 mode high bits */ | ||
630 | __fs16 ui_spare; /* 0x7A unused */ | ||
631 | __fs32 ui_trans; /* 0x7c filesystem translator */ | ||
632 | } ui_hurd; | ||
633 | } ui_u3; | ||
634 | }; | ||
635 | |||
636 | #define UFS_NXADDR 2 /* External addresses in inode. */ | ||
637 | struct ufs2_inode { | ||
638 | __fs16 ui_mode; /* 0: IFMT, permissions; see below. */ | ||
639 | __fs16 ui_nlink; /* 2: File link count. */ | ||
640 | __fs32 ui_uid; /* 4: File owner. */ | ||
641 | __fs32 ui_gid; /* 8: File group. */ | ||
642 | __fs32 ui_blksize; /* 12: Inode blocksize. */ | ||
643 | __fs64 ui_size; /* 16: File byte count. */ | ||
644 | __fs64 ui_blocks; /* 24: Bytes actually held. */ | ||
645 | __fs64 ui_atime; /* 32: Last access time. */ | ||
646 | __fs64 ui_mtime; /* 40: Last modified time. */ | ||
647 | __fs64 ui_ctime; /* 48: Last inode change time. */ | ||
648 | __fs64 ui_birthtime; /* 56: Inode creation time. */ | ||
649 | __fs32 ui_mtimensec; /* 64: Last modified time. */ | ||
650 | __fs32 ui_atimensec; /* 68: Last access time. */ | ||
651 | __fs32 ui_ctimensec; /* 72: Last inode change time. */ | ||
652 | __fs32 ui_birthnsec; /* 76: Inode creation time. */ | ||
653 | __fs32 ui_gen; /* 80: Generation number. */ | ||
654 | __fs32 ui_kernflags; /* 84: Kernel flags. */ | ||
655 | __fs32 ui_flags; /* 88: Status flags (chflags). */ | ||
656 | __fs32 ui_extsize; /* 92: External attributes block. */ | ||
657 | __fs64 ui_extb[UFS_NXADDR];/* 96: External attributes block. */ | ||
658 | union { | ||
659 | struct { | ||
660 | __fs64 ui_db[UFS_NDADDR]; /* 112: Direct disk blocks. */ | ||
661 | __fs64 ui_ib[UFS_NINDIR];/* 208: Indirect disk blocks.*/ | ||
662 | } ui_addr; | ||
663 | __u8 ui_symlink[2*4*(UFS_NDADDR+UFS_NINDIR)];/* 0x28 fast symlink */ | ||
664 | } ui_u2; | ||
665 | __fs64 ui_spare[3]; /* 232: Reserved; currently unused */ | ||
666 | }; | ||
667 | |||
668 | |||
669 | /* FreeBSD has these in sys/stat.h */ | ||
670 | /* ui_flags that can be set by a file owner */ | ||
671 | #define UFS_UF_SETTABLE 0x0000ffff | ||
672 | #define UFS_UF_NODUMP 0x00000001 /* do not dump */ | ||
673 | #define UFS_UF_IMMUTABLE 0x00000002 /* immutable (can't "change") */ | ||
674 | #define UFS_UF_APPEND 0x00000004 /* append-only */ | ||
675 | #define UFS_UF_OPAQUE 0x00000008 /* directory is opaque (unionfs) */ | ||
676 | #define UFS_UF_NOUNLINK 0x00000010 /* can't be removed or renamed */ | ||
677 | /* ui_flags that only root can set */ | ||
678 | #define UFS_SF_SETTABLE 0xffff0000 | ||
679 | #define UFS_SF_ARCHIVED 0x00010000 /* archived */ | ||
680 | #define UFS_SF_IMMUTABLE 0x00020000 /* immutable (can't "change") */ | ||
681 | #define UFS_SF_APPEND 0x00040000 /* append-only */ | ||
682 | #define UFS_SF_NOUNLINK 0x00100000 /* can't be removed or renamed */ | ||
683 | |||
684 | /* | ||
685 | * This structure is used for reading disk structures larger | ||
686 | * than the size of fragment. | ||
687 | */ | ||
688 | struct ufs_buffer_head { | ||
689 | __u64 fragment; /* first fragment */ | ||
690 | __u64 count; /* number of fragments */ | ||
691 | struct buffer_head * bh[UFS_MAXFRAG]; /* buffers */ | ||
692 | }; | ||
693 | |||
694 | struct ufs_cg_private_info { | ||
695 | struct ufs_buffer_head c_ubh; | ||
696 | __u32 c_cgx; /* number of cylidner group */ | ||
697 | __u16 c_ncyl; /* number of cyl's this cg */ | ||
698 | __u16 c_niblk; /* number of inode blocks this cg */ | ||
699 | __u32 c_ndblk; /* number of data blocks this cg */ | ||
700 | __u32 c_rotor; /* position of last used block */ | ||
701 | __u32 c_frotor; /* position of last used frag */ | ||
702 | __u32 c_irotor; /* position of last used inode */ | ||
703 | __u32 c_btotoff; /* (__u32) block totals per cylinder */ | ||
704 | __u32 c_boff; /* (short) free block positions */ | ||
705 | __u32 c_iusedoff; /* (char) used inode map */ | ||
706 | __u32 c_freeoff; /* (u_char) free block map */ | ||
707 | __u32 c_nextfreeoff; /* (u_char) next available space */ | ||
708 | __u32 c_clustersumoff;/* (u_int32) counts of avail clusters */ | ||
709 | __u32 c_clusteroff; /* (u_int8) free cluster map */ | ||
710 | __u32 c_nclusterblks; /* number of clusters this cg */ | ||
711 | }; | ||
712 | |||
713 | |||
714 | struct ufs_sb_private_info { | ||
715 | struct ufs_buffer_head s_ubh; /* buffer containing super block */ | ||
716 | struct ufs_csum_core cs_total; | ||
717 | __u32 s_sblkno; /* offset of super-blocks in filesys */ | ||
718 | __u32 s_cblkno; /* offset of cg-block in filesys */ | ||
719 | __u32 s_iblkno; /* offset of inode-blocks in filesys */ | ||
720 | __u32 s_dblkno; /* offset of first data after cg */ | ||
721 | __u32 s_cgoffset; /* cylinder group offset in cylinder */ | ||
722 | __u32 s_cgmask; /* used to calc mod fs_ntrak */ | ||
723 | __u32 s_size; /* number of blocks (fragments) in fs */ | ||
724 | __u32 s_dsize; /* number of data blocks in fs */ | ||
725 | __u64 s_u2_size; /* ufs2: number of blocks (fragments) in fs */ | ||
726 | __u64 s_u2_dsize; /*ufs2: number of data blocks in fs */ | ||
727 | __u32 s_ncg; /* number of cylinder groups */ | ||
728 | __u32 s_bsize; /* size of basic blocks */ | ||
729 | __u32 s_fsize; /* size of fragments */ | ||
730 | __u32 s_fpb; /* fragments per block */ | ||
731 | __u32 s_minfree; /* minimum percentage of free blocks */ | ||
732 | __u32 s_bmask; /* `blkoff'' calc of blk offsets */ | ||
733 | __u32 s_fmask; /* s_fsize mask */ | ||
734 | __u32 s_bshift; /* `lblkno'' calc of logical blkno */ | ||
735 | __u32 s_fshift; /* s_fsize shift */ | ||
736 | __u32 s_fpbshift; /* fragments per block shift */ | ||
737 | __u32 s_fsbtodb; /* fsbtodb and dbtofsb shift constant */ | ||
738 | __u32 s_sbsize; /* actual size of super block */ | ||
739 | __u32 s_csmask; /* csum block offset */ | ||
740 | __u32 s_csshift; /* csum block number */ | ||
741 | __u32 s_nindir; /* value of NINDIR */ | ||
742 | __u32 s_inopb; /* value of INOPB */ | ||
743 | __u32 s_nspf; /* value of NSPF */ | ||
744 | __u32 s_npsect; /* # sectors/track including spares */ | ||
745 | __u32 s_interleave; /* hardware sector interleave */ | ||
746 | __u32 s_trackskew; /* sector 0 skew, per track */ | ||
747 | __u64 s_csaddr; /* blk addr of cyl grp summary area */ | ||
748 | __u32 s_cssize; /* size of cyl grp summary area */ | ||
749 | __u32 s_cgsize; /* cylinder group size */ | ||
750 | __u32 s_ntrak; /* tracks per cylinder */ | ||
751 | __u32 s_nsect; /* sectors per track */ | ||
752 | __u32 s_spc; /* sectors per cylinder */ | ||
753 | __u32 s_ipg; /* inodes per cylinder group */ | ||
754 | __u32 s_fpg; /* fragments per group */ | ||
755 | __u32 s_cpc; /* cyl per cycle in postbl */ | ||
756 | __s32 s_contigsumsize;/* size of cluster summary array, 44bsd */ | ||
757 | __s64 s_qbmask; /* ~usb_bmask */ | ||
758 | __s64 s_qfmask; /* ~usb_fmask */ | ||
759 | __s32 s_postblformat; /* format of positional layout tables */ | ||
760 | __s32 s_nrpos; /* number of rotational positions */ | ||
761 | __s32 s_postbloff; /* (__s16) rotation block list head */ | ||
762 | __s32 s_rotbloff; /* (__u8) blocks for each rotation */ | ||
763 | |||
764 | __u32 s_fpbmask; /* fragments per block mask */ | ||
765 | __u32 s_apb; /* address per block */ | ||
766 | __u32 s_2apb; /* address per block^2 */ | ||
767 | __u32 s_3apb; /* address per block^3 */ | ||
768 | __u32 s_apbmask; /* address per block mask */ | ||
769 | __u32 s_apbshift; /* address per block shift */ | ||
770 | __u32 s_2apbshift; /* address per block shift * 2 */ | ||
771 | __u32 s_3apbshift; /* address per block shift * 3 */ | ||
772 | __u32 s_nspfshift; /* number of sector per fragment shift */ | ||
773 | __u32 s_nspb; /* number of sector per block */ | ||
774 | __u32 s_inopf; /* inodes per fragment */ | ||
775 | __u32 s_sbbase; /* offset of NeXTstep superblock */ | ||
776 | __u32 s_bpf; /* bits per fragment */ | ||
777 | __u32 s_bpfshift; /* bits per fragment shift*/ | ||
778 | __u32 s_bpfmask; /* bits per fragment mask */ | ||
779 | |||
780 | __u32 s_maxsymlinklen;/* upper limit on fast symlinks' size */ | ||
781 | __s32 fs_magic; /* filesystem magic */ | ||
782 | unsigned int s_dirblksize; | ||
783 | }; | ||
784 | |||
785 | /* | ||
786 | * Sizes of this structures are: | ||
787 | * ufs_super_block_first 512 | ||
788 | * ufs_super_block_second 512 | ||
789 | * ufs_super_block_third 356 | ||
790 | */ | ||
791 | struct ufs_super_block_first { | ||
792 | union { | ||
793 | struct { | ||
794 | __fs32 fs_link; /* UNUSED */ | ||
795 | } fs_42; | ||
796 | struct { | ||
797 | __fs32 fs_state; /* file system state flag */ | ||
798 | } fs_sun; | ||
799 | } fs_u0; | ||
800 | __fs32 fs_rlink; | ||
801 | __fs32 fs_sblkno; | ||
802 | __fs32 fs_cblkno; | ||
803 | __fs32 fs_iblkno; | ||
804 | __fs32 fs_dblkno; | ||
805 | __fs32 fs_cgoffset; | ||
806 | __fs32 fs_cgmask; | ||
807 | __fs32 fs_time; | ||
808 | __fs32 fs_size; | ||
809 | __fs32 fs_dsize; | ||
810 | __fs32 fs_ncg; | ||
811 | __fs32 fs_bsize; | ||
812 | __fs32 fs_fsize; | ||
813 | __fs32 fs_frag; | ||
814 | __fs32 fs_minfree; | ||
815 | __fs32 fs_rotdelay; | ||
816 | __fs32 fs_rps; | ||
817 | __fs32 fs_bmask; | ||
818 | __fs32 fs_fmask; | ||
819 | __fs32 fs_bshift; | ||
820 | __fs32 fs_fshift; | ||
821 | __fs32 fs_maxcontig; | ||
822 | __fs32 fs_maxbpg; | ||
823 | __fs32 fs_fragshift; | ||
824 | __fs32 fs_fsbtodb; | ||
825 | __fs32 fs_sbsize; | ||
826 | __fs32 fs_csmask; | ||
827 | __fs32 fs_csshift; | ||
828 | __fs32 fs_nindir; | ||
829 | __fs32 fs_inopb; | ||
830 | __fs32 fs_nspf; | ||
831 | __fs32 fs_optim; | ||
832 | union { | ||
833 | struct { | ||
834 | __fs32 fs_npsect; | ||
835 | } fs_sun; | ||
836 | struct { | ||
837 | __fs32 fs_state; | ||
838 | } fs_sunx86; | ||
839 | } fs_u1; | ||
840 | __fs32 fs_interleave; | ||
841 | __fs32 fs_trackskew; | ||
842 | __fs32 fs_id[2]; | ||
843 | __fs32 fs_csaddr; | ||
844 | __fs32 fs_cssize; | ||
845 | __fs32 fs_cgsize; | ||
846 | __fs32 fs_ntrak; | ||
847 | __fs32 fs_nsect; | ||
848 | __fs32 fs_spc; | ||
849 | __fs32 fs_ncyl; | ||
850 | __fs32 fs_cpg; | ||
851 | __fs32 fs_ipg; | ||
852 | __fs32 fs_fpg; | ||
853 | struct ufs_csum fs_cstotal; | ||
854 | __s8 fs_fmod; | ||
855 | __s8 fs_clean; | ||
856 | __s8 fs_ronly; | ||
857 | __s8 fs_flags; | ||
858 | __s8 fs_fsmnt[UFS_MAXMNTLEN - 212]; | ||
859 | |||
860 | }; | ||
861 | |||
862 | struct ufs_super_block_second { | ||
863 | union { | ||
864 | struct { | ||
865 | __s8 fs_fsmnt[212]; | ||
866 | __fs32 fs_cgrotor; | ||
867 | __fs32 fs_csp[UFS_MAXCSBUFS]; | ||
868 | __fs32 fs_maxcluster; | ||
869 | __fs32 fs_cpc; | ||
870 | __fs16 fs_opostbl[82]; | ||
871 | } fs_u1; | ||
872 | struct { | ||
873 | __s8 fs_fsmnt[UFS2_MAXMNTLEN - UFS_MAXMNTLEN + 212]; | ||
874 | __u8 fs_volname[UFS2_MAXVOLLEN]; | ||
875 | __fs64 fs_swuid; | ||
876 | __fs32 fs_pad; | ||
877 | __fs32 fs_cgrotor; | ||
878 | __fs32 fs_ocsp[UFS2_NOCSPTRS]; | ||
879 | __fs32 fs_contigdirs; | ||
880 | __fs32 fs_csp; | ||
881 | __fs32 fs_maxcluster; | ||
882 | __fs32 fs_active; | ||
883 | __fs32 fs_old_cpc; | ||
884 | __fs32 fs_maxbsize; | ||
885 | __fs64 fs_sparecon64[17]; | ||
886 | __fs64 fs_sblockloc; | ||
887 | __fs64 cs_ndir; | ||
888 | __fs64 cs_nbfree; | ||
889 | } fs_u2; | ||
890 | } fs_un; | ||
891 | }; | ||
892 | |||
893 | struct ufs_super_block_third { | ||
894 | union { | ||
895 | struct { | ||
896 | __fs16 fs_opostbl[46]; | ||
897 | } fs_u1; | ||
898 | struct { | ||
899 | __fs64 cs_nifree; /* number of free inodes */ | ||
900 | __fs64 cs_nffree; /* number of free frags */ | ||
901 | __fs64 cs_numclusters; /* number of free clusters */ | ||
902 | __fs64 cs_spare[3]; /* future expansion */ | ||
903 | struct ufs_timeval fs_time; /* last time written */ | ||
904 | __fs64 fs_size; /* number of blocks in fs */ | ||
905 | __fs64 fs_dsize; /* number of data blocks in fs */ | ||
906 | __fs64 fs_csaddr; /* blk addr of cyl grp summary area */ | ||
907 | __fs64 fs_pendingblocks;/* blocks in process of being freed */ | ||
908 | __fs32 fs_pendinginodes;/*inodes in process of being freed */ | ||
909 | } __attribute__ ((packed)) fs_u2; | ||
910 | } fs_un1; | ||
911 | union { | ||
912 | struct { | ||
913 | __fs32 fs_sparecon[53];/* reserved for future constants */ | ||
914 | __fs32 fs_reclaim; | ||
915 | __fs32 fs_sparecon2[1]; | ||
916 | __fs32 fs_state; /* file system state time stamp */ | ||
917 | __fs32 fs_qbmask[2]; /* ~usb_bmask */ | ||
918 | __fs32 fs_qfmask[2]; /* ~usb_fmask */ | ||
919 | } fs_sun; | ||
920 | struct { | ||
921 | __fs32 fs_sparecon[53];/* reserved for future constants */ | ||
922 | __fs32 fs_reclaim; | ||
923 | __fs32 fs_sparecon2[1]; | ||
924 | __fs32 fs_npsect; /* # sectors/track including spares */ | ||
925 | __fs32 fs_qbmask[2]; /* ~usb_bmask */ | ||
926 | __fs32 fs_qfmask[2]; /* ~usb_fmask */ | ||
927 | } fs_sunx86; | ||
928 | struct { | ||
929 | __fs32 fs_sparecon[50];/* reserved for future constants */ | ||
930 | __fs32 fs_contigsumsize;/* size of cluster summary array */ | ||
931 | __fs32 fs_maxsymlinklen;/* max length of an internal symlink */ | ||
932 | __fs32 fs_inodefmt; /* format of on-disk inodes */ | ||
933 | __fs32 fs_maxfilesize[2]; /* max representable file size */ | ||
934 | __fs32 fs_qbmask[2]; /* ~usb_bmask */ | ||
935 | __fs32 fs_qfmask[2]; /* ~usb_fmask */ | ||
936 | __fs32 fs_state; /* file system state time stamp */ | ||
937 | } fs_44; | ||
938 | } fs_un2; | ||
939 | __fs32 fs_postblformat; | ||
940 | __fs32 fs_nrpos; | ||
941 | __fs32 fs_postbloff; | ||
942 | __fs32 fs_rotbloff; | ||
943 | __fs32 fs_magic; | ||
944 | __u8 fs_space[1]; | ||
945 | }; | ||
946 | |||
947 | #endif /* __LINUX_UFS_FS_H */ | ||
diff --git a/fs/ufs/util.c b/fs/ufs/util.c index 410084dae389..85a7fc9e4a4e 100644 --- a/fs/ufs/util.c +++ b/fs/ufs/util.c | |||
@@ -8,9 +8,9 @@ | |||
8 | 8 | ||
9 | #include <linux/string.h> | 9 | #include <linux/string.h> |
10 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
11 | #include <linux/ufs_fs.h> | ||
12 | #include <linux/buffer_head.h> | 11 | #include <linux/buffer_head.h> |
13 | 12 | ||
13 | #include "ufs_fs.h" | ||
14 | #include "ufs.h" | 14 | #include "ufs.h" |
15 | #include "swab.h" | 15 | #include "swab.h" |
16 | #include "util.h" | 16 | #include "util.h" |
diff --git a/fs/utimes.c b/fs/utimes.c index b9912ecbee24..b18da9c0b97f 100644 --- a/fs/utimes.c +++ b/fs/utimes.c | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/sched.h> | 6 | #include <linux/sched.h> |
7 | #include <linux/stat.h> | 7 | #include <linux/stat.h> |
8 | #include <linux/utime.h> | 8 | #include <linux/utime.h> |
9 | #include <linux/syscalls.h> | ||
9 | #include <asm/uaccess.h> | 10 | #include <asm/uaccess.h> |
10 | #include <asm/unistd.h> | 11 | #include <asm/unistd.h> |
11 | 12 | ||
@@ -83,7 +84,7 @@ long do_utimes(int dfd, char __user *filename, struct timespec *times, int flags | |||
83 | if (error) | 84 | if (error) |
84 | goto out; | 85 | goto out; |
85 | 86 | ||
86 | dentry = nd.dentry; | 87 | dentry = nd.path.dentry; |
87 | } | 88 | } |
88 | 89 | ||
89 | inode = dentry->d_inode; | 90 | inode = dentry->d_inode; |
@@ -137,7 +138,7 @@ dput_and_out: | |||
137 | if (f) | 138 | if (f) |
138 | fput(f); | 139 | fput(f); |
139 | else | 140 | else |
140 | path_release(&nd); | 141 | path_put(&nd.path); |
141 | out: | 142 | out: |
142 | return error; | 143 | return error; |
143 | } | 144 | } |
diff --git a/fs/vfat/namei.c b/fs/vfat/namei.c index c28add2fbe95..cd450bea9f1a 100644 --- a/fs/vfat/namei.c +++ b/fs/vfat/namei.c | |||
@@ -705,7 +705,7 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry, | |||
705 | brelse(sinfo.bh); | 705 | brelse(sinfo.bh); |
706 | if (IS_ERR(inode)) { | 706 | if (IS_ERR(inode)) { |
707 | unlock_kernel(); | 707 | unlock_kernel(); |
708 | return ERR_PTR(PTR_ERR(inode)); | 708 | return ERR_CAST(inode); |
709 | } | 709 | } |
710 | alias = d_find_alias(inode); | 710 | alias = d_find_alias(inode); |
711 | if (alias) { | 711 | if (alias) { |
diff --git a/fs/xattr.c b/fs/xattr.c index f7c8f87bb390..3acab1615460 100644 --- a/fs/xattr.c +++ b/fs/xattr.c | |||
@@ -262,8 +262,8 @@ sys_setxattr(char __user *path, char __user *name, void __user *value, | |||
262 | error = user_path_walk(path, &nd); | 262 | error = user_path_walk(path, &nd); |
263 | if (error) | 263 | if (error) |
264 | return error; | 264 | return error; |
265 | error = setxattr(nd.dentry, name, value, size, flags); | 265 | error = setxattr(nd.path.dentry, name, value, size, flags); |
266 | path_release(&nd); | 266 | path_put(&nd.path); |
267 | return error; | 267 | return error; |
268 | } | 268 | } |
269 | 269 | ||
@@ -277,8 +277,8 @@ sys_lsetxattr(char __user *path, char __user *name, void __user *value, | |||
277 | error = user_path_walk_link(path, &nd); | 277 | error = user_path_walk_link(path, &nd); |
278 | if (error) | 278 | if (error) |
279 | return error; | 279 | return error; |
280 | error = setxattr(nd.dentry, name, value, size, flags); | 280 | error = setxattr(nd.path.dentry, name, value, size, flags); |
281 | path_release(&nd); | 281 | path_put(&nd.path); |
282 | return error; | 282 | return error; |
283 | } | 283 | } |
284 | 284 | ||
@@ -347,8 +347,8 @@ sys_getxattr(char __user *path, char __user *name, void __user *value, | |||
347 | error = user_path_walk(path, &nd); | 347 | error = user_path_walk(path, &nd); |
348 | if (error) | 348 | if (error) |
349 | return error; | 349 | return error; |
350 | error = getxattr(nd.dentry, name, value, size); | 350 | error = getxattr(nd.path.dentry, name, value, size); |
351 | path_release(&nd); | 351 | path_put(&nd.path); |
352 | return error; | 352 | return error; |
353 | } | 353 | } |
354 | 354 | ||
@@ -362,8 +362,8 @@ sys_lgetxattr(char __user *path, char __user *name, void __user *value, | |||
362 | error = user_path_walk_link(path, &nd); | 362 | error = user_path_walk_link(path, &nd); |
363 | if (error) | 363 | if (error) |
364 | return error; | 364 | return error; |
365 | error = getxattr(nd.dentry, name, value, size); | 365 | error = getxattr(nd.path.dentry, name, value, size); |
366 | path_release(&nd); | 366 | path_put(&nd.path); |
367 | return error; | 367 | return error; |
368 | } | 368 | } |
369 | 369 | ||
@@ -421,8 +421,8 @@ sys_listxattr(char __user *path, char __user *list, size_t size) | |||
421 | error = user_path_walk(path, &nd); | 421 | error = user_path_walk(path, &nd); |
422 | if (error) | 422 | if (error) |
423 | return error; | 423 | return error; |
424 | error = listxattr(nd.dentry, list, size); | 424 | error = listxattr(nd.path.dentry, list, size); |
425 | path_release(&nd); | 425 | path_put(&nd.path); |
426 | return error; | 426 | return error; |
427 | } | 427 | } |
428 | 428 | ||
@@ -435,8 +435,8 @@ sys_llistxattr(char __user *path, char __user *list, size_t size) | |||
435 | error = user_path_walk_link(path, &nd); | 435 | error = user_path_walk_link(path, &nd); |
436 | if (error) | 436 | if (error) |
437 | return error; | 437 | return error; |
438 | error = listxattr(nd.dentry, list, size); | 438 | error = listxattr(nd.path.dentry, list, size); |
439 | path_release(&nd); | 439 | path_put(&nd.path); |
440 | return error; | 440 | return error; |
441 | } | 441 | } |
442 | 442 | ||
@@ -482,8 +482,8 @@ sys_removexattr(char __user *path, char __user *name) | |||
482 | error = user_path_walk(path, &nd); | 482 | error = user_path_walk(path, &nd); |
483 | if (error) | 483 | if (error) |
484 | return error; | 484 | return error; |
485 | error = removexattr(nd.dentry, name); | 485 | error = removexattr(nd.path.dentry, name); |
486 | path_release(&nd); | 486 | path_put(&nd.path); |
487 | return error; | 487 | return error; |
488 | } | 488 | } |
489 | 489 | ||
@@ -496,8 +496,8 @@ sys_lremovexattr(char __user *path, char __user *name) | |||
496 | error = user_path_walk_link(path, &nd); | 496 | error = user_path_walk_link(path, &nd); |
497 | if (error) | 497 | if (error) |
498 | return error; | 498 | return error; |
499 | error = removexattr(nd.dentry, name); | 499 | error = removexattr(nd.path.dentry, name); |
500 | path_release(&nd); | 500 | path_put(&nd.path); |
501 | return error; | 501 | return error; |
502 | } | 502 | } |
503 | 503 | ||
diff --git a/fs/xfs/Makefile-linux-2.6 b/fs/xfs/Makefile-linux-2.6 index d1491aa7a0e2..97316451fc6d 100644 --- a/fs/xfs/Makefile-linux-2.6 +++ b/fs/xfs/Makefile-linux-2.6 | |||
@@ -70,7 +70,6 @@ xfs-y += xfs_alloc.o \ | |||
70 | xfs_iget.o \ | 70 | xfs_iget.o \ |
71 | xfs_inode.o \ | 71 | xfs_inode.o \ |
72 | xfs_inode_item.o \ | 72 | xfs_inode_item.o \ |
73 | xfs_iocore.o \ | ||
74 | xfs_iomap.o \ | 73 | xfs_iomap.o \ |
75 | xfs_itable.o \ | 74 | xfs_itable.o \ |
76 | xfs_dfrag.o \ | 75 | xfs_dfrag.o \ |
diff --git a/fs/xfs/linux-2.6/spin.h b/fs/xfs/linux-2.6/spin.h deleted file mode 100644 index 50a6191178f4..000000000000 --- a/fs/xfs/linux-2.6/spin.h +++ /dev/null | |||
@@ -1,45 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it would be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write the Free Software Foundation, | ||
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
17 | */ | ||
18 | #ifndef __XFS_SUPPORT_SPIN_H__ | ||
19 | #define __XFS_SUPPORT_SPIN_H__ | ||
20 | |||
21 | #include <linux/sched.h> /* preempt needs this */ | ||
22 | #include <linux/spinlock.h> | ||
23 | |||
24 | /* | ||
25 | * Map lock_t from IRIX to Linux spinlocks. | ||
26 | * | ||
27 | * We do not make use of lock_t from interrupt context, so we do not | ||
28 | * have to worry about disabling interrupts at all (unlike IRIX). | ||
29 | */ | ||
30 | |||
31 | typedef spinlock_t lock_t; | ||
32 | |||
33 | #define SPLDECL(s) unsigned long s | ||
34 | #ifndef DEFINE_SPINLOCK | ||
35 | #define DEFINE_SPINLOCK(s) spinlock_t s = SPIN_LOCK_UNLOCKED | ||
36 | #endif | ||
37 | |||
38 | #define spinlock_init(lock, name) spin_lock_init(lock) | ||
39 | #define spinlock_destroy(lock) | ||
40 | #define mutex_spinlock(lock) ({ spin_lock(lock); 0; }) | ||
41 | #define mutex_spinunlock(lock, s) do { spin_unlock(lock); (void)s; } while (0) | ||
42 | #define nested_spinlock(lock) spin_lock(lock) | ||
43 | #define nested_spinunlock(lock) spin_unlock(lock) | ||
44 | |||
45 | #endif /* __XFS_SUPPORT_SPIN_H__ */ | ||
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 2e34b104107c..e0519529c26c 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c | |||
@@ -107,6 +107,18 @@ xfs_page_trace( | |||
107 | #define xfs_page_trace(tag, inode, page, pgoff) | 107 | #define xfs_page_trace(tag, inode, page, pgoff) |
108 | #endif | 108 | #endif |
109 | 109 | ||
110 | STATIC struct block_device * | ||
111 | xfs_find_bdev_for_inode( | ||
112 | struct xfs_inode *ip) | ||
113 | { | ||
114 | struct xfs_mount *mp = ip->i_mount; | ||
115 | |||
116 | if (XFS_IS_REALTIME_INODE(ip)) | ||
117 | return mp->m_rtdev_targp->bt_bdev; | ||
118 | else | ||
119 | return mp->m_ddev_targp->bt_bdev; | ||
120 | } | ||
121 | |||
110 | /* | 122 | /* |
111 | * Schedule IO completion handling on a xfsdatad if this was | 123 | * Schedule IO completion handling on a xfsdatad if this was |
112 | * the final hold on this ioend. If we are asked to wait, | 124 | * the final hold on this ioend. If we are asked to wait, |
@@ -151,7 +163,7 @@ xfs_destroy_ioend( | |||
151 | /* | 163 | /* |
152 | * Update on-disk file size now that data has been written to disk. | 164 | * Update on-disk file size now that data has been written to disk. |
153 | * The current in-memory file size is i_size. If a write is beyond | 165 | * The current in-memory file size is i_size. If a write is beyond |
154 | * eof io_new_size will be the intended file size until i_size is | 166 | * eof i_new_size will be the intended file size until i_size is |
155 | * updated. If this write does not extend all the way to the valid | 167 | * updated. If this write does not extend all the way to the valid |
156 | * file size then restrict this update to the end of the write. | 168 | * file size then restrict this update to the end of the write. |
157 | */ | 169 | */ |
@@ -173,7 +185,7 @@ xfs_setfilesize( | |||
173 | 185 | ||
174 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 186 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
175 | 187 | ||
176 | isize = MAX(ip->i_size, ip->i_iocore.io_new_size); | 188 | isize = MAX(ip->i_size, ip->i_new_size); |
177 | isize = MIN(isize, bsize); | 189 | isize = MIN(isize, bsize); |
178 | 190 | ||
179 | if (ip->i_d.di_size < isize) { | 191 | if (ip->i_d.di_size < isize) { |
@@ -226,12 +238,13 @@ xfs_end_bio_unwritten( | |||
226 | { | 238 | { |
227 | xfs_ioend_t *ioend = | 239 | xfs_ioend_t *ioend = |
228 | container_of(work, xfs_ioend_t, io_work); | 240 | container_of(work, xfs_ioend_t, io_work); |
241 | struct xfs_inode *ip = XFS_I(ioend->io_inode); | ||
229 | xfs_off_t offset = ioend->io_offset; | 242 | xfs_off_t offset = ioend->io_offset; |
230 | size_t size = ioend->io_size; | 243 | size_t size = ioend->io_size; |
231 | 244 | ||
232 | if (likely(!ioend->io_error)) { | 245 | if (likely(!ioend->io_error)) { |
233 | xfs_bmap(XFS_I(ioend->io_inode), offset, size, | 246 | if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) |
234 | BMAPI_UNWRITTEN, NULL, NULL); | 247 | xfs_iomap_write_unwritten(ip, offset, size); |
235 | xfs_setfilesize(ioend); | 248 | xfs_setfilesize(ioend); |
236 | } | 249 | } |
237 | xfs_destroy_ioend(ioend); | 250 | xfs_destroy_ioend(ioend); |
@@ -304,7 +317,7 @@ xfs_map_blocks( | |||
304 | xfs_inode_t *ip = XFS_I(inode); | 317 | xfs_inode_t *ip = XFS_I(inode); |
305 | int error, nmaps = 1; | 318 | int error, nmaps = 1; |
306 | 319 | ||
307 | error = xfs_bmap(ip, offset, count, | 320 | error = xfs_iomap(ip, offset, count, |
308 | flags, mapp, &nmaps); | 321 | flags, mapp, &nmaps); |
309 | if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE))) | 322 | if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE))) |
310 | xfs_iflags_set(ip, XFS_IMODIFIED); | 323 | xfs_iflags_set(ip, XFS_IMODIFIED); |
@@ -1323,7 +1336,7 @@ __xfs_get_blocks( | |||
1323 | offset = (xfs_off_t)iblock << inode->i_blkbits; | 1336 | offset = (xfs_off_t)iblock << inode->i_blkbits; |
1324 | ASSERT(bh_result->b_size >= (1 << inode->i_blkbits)); | 1337 | ASSERT(bh_result->b_size >= (1 << inode->i_blkbits)); |
1325 | size = bh_result->b_size; | 1338 | size = bh_result->b_size; |
1326 | error = xfs_bmap(XFS_I(inode), offset, size, | 1339 | error = xfs_iomap(XFS_I(inode), offset, size, |
1327 | create ? flags : BMAPI_READ, &iomap, &niomap); | 1340 | create ? flags : BMAPI_READ, &iomap, &niomap); |
1328 | if (error) | 1341 | if (error) |
1329 | return -error; | 1342 | return -error; |
@@ -1471,28 +1484,21 @@ xfs_vm_direct_IO( | |||
1471 | { | 1484 | { |
1472 | struct file *file = iocb->ki_filp; | 1485 | struct file *file = iocb->ki_filp; |
1473 | struct inode *inode = file->f_mapping->host; | 1486 | struct inode *inode = file->f_mapping->host; |
1474 | xfs_iomap_t iomap; | 1487 | struct block_device *bdev; |
1475 | int maps = 1; | ||
1476 | int error; | ||
1477 | ssize_t ret; | 1488 | ssize_t ret; |
1478 | 1489 | ||
1479 | error = xfs_bmap(XFS_I(inode), offset, 0, | 1490 | bdev = xfs_find_bdev_for_inode(XFS_I(inode)); |
1480 | BMAPI_DEVICE, &iomap, &maps); | ||
1481 | if (error) | ||
1482 | return -error; | ||
1483 | 1491 | ||
1484 | if (rw == WRITE) { | 1492 | if (rw == WRITE) { |
1485 | iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN); | 1493 | iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN); |
1486 | ret = blockdev_direct_IO_own_locking(rw, iocb, inode, | 1494 | ret = blockdev_direct_IO_own_locking(rw, iocb, inode, |
1487 | iomap.iomap_target->bt_bdev, | 1495 | bdev, iov, offset, nr_segs, |
1488 | iov, offset, nr_segs, | ||
1489 | xfs_get_blocks_direct, | 1496 | xfs_get_blocks_direct, |
1490 | xfs_end_io_direct); | 1497 | xfs_end_io_direct); |
1491 | } else { | 1498 | } else { |
1492 | iocb->private = xfs_alloc_ioend(inode, IOMAP_READ); | 1499 | iocb->private = xfs_alloc_ioend(inode, IOMAP_READ); |
1493 | ret = blockdev_direct_IO_no_locking(rw, iocb, inode, | 1500 | ret = blockdev_direct_IO_no_locking(rw, iocb, inode, |
1494 | iomap.iomap_target->bt_bdev, | 1501 | bdev, iov, offset, nr_segs, |
1495 | iov, offset, nr_segs, | ||
1496 | xfs_get_blocks_direct, | 1502 | xfs_get_blocks_direct, |
1497 | xfs_end_io_direct); | 1503 | xfs_end_io_direct); |
1498 | } | 1504 | } |
@@ -1525,8 +1531,7 @@ xfs_vm_bmap( | |||
1525 | struct inode *inode = (struct inode *)mapping->host; | 1531 | struct inode *inode = (struct inode *)mapping->host; |
1526 | struct xfs_inode *ip = XFS_I(inode); | 1532 | struct xfs_inode *ip = XFS_I(inode); |
1527 | 1533 | ||
1528 | vn_trace_entry(XFS_I(inode), __FUNCTION__, | 1534 | xfs_itrace_entry(XFS_I(inode)); |
1529 | (inst_t *)__return_address); | ||
1530 | xfs_rwlock(ip, VRWLOCK_READ); | 1535 | xfs_rwlock(ip, VRWLOCK_READ); |
1531 | xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF); | 1536 | xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF); |
1532 | xfs_rwunlock(ip, VRWLOCK_READ); | 1537 | xfs_rwunlock(ip, VRWLOCK_READ); |
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 0382c19d6523..e347bfd47c91 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c | |||
@@ -387,8 +387,6 @@ _xfs_buf_lookup_pages( | |||
387 | if (unlikely(page == NULL)) { | 387 | if (unlikely(page == NULL)) { |
388 | if (flags & XBF_READ_AHEAD) { | 388 | if (flags & XBF_READ_AHEAD) { |
389 | bp->b_page_count = i; | 389 | bp->b_page_count = i; |
390 | for (i = 0; i < bp->b_page_count; i++) | ||
391 | unlock_page(bp->b_pages[i]); | ||
392 | return -ENOMEM; | 390 | return -ENOMEM; |
393 | } | 391 | } |
394 | 392 | ||
@@ -418,24 +416,17 @@ _xfs_buf_lookup_pages( | |||
418 | ASSERT(!PagePrivate(page)); | 416 | ASSERT(!PagePrivate(page)); |
419 | if (!PageUptodate(page)) { | 417 | if (!PageUptodate(page)) { |
420 | page_count--; | 418 | page_count--; |
421 | if (blocksize >= PAGE_CACHE_SIZE) { | 419 | if (blocksize < PAGE_CACHE_SIZE && !PagePrivate(page)) { |
422 | if (flags & XBF_READ) | ||
423 | bp->b_locked = 1; | ||
424 | } else if (!PagePrivate(page)) { | ||
425 | if (test_page_region(page, offset, nbytes)) | 420 | if (test_page_region(page, offset, nbytes)) |
426 | page_count++; | 421 | page_count++; |
427 | } | 422 | } |
428 | } | 423 | } |
429 | 424 | ||
425 | unlock_page(page); | ||
430 | bp->b_pages[i] = page; | 426 | bp->b_pages[i] = page; |
431 | offset = 0; | 427 | offset = 0; |
432 | } | 428 | } |
433 | 429 | ||
434 | if (!bp->b_locked) { | ||
435 | for (i = 0; i < bp->b_page_count; i++) | ||
436 | unlock_page(bp->b_pages[i]); | ||
437 | } | ||
438 | |||
439 | if (page_count == bp->b_page_count) | 430 | if (page_count == bp->b_page_count) |
440 | bp->b_flags |= XBF_DONE; | 431 | bp->b_flags |= XBF_DONE; |
441 | 432 | ||
@@ -751,7 +742,6 @@ xfs_buf_associate_memory( | |||
751 | bp->b_pages[i] = mem_to_page((void *)pageaddr); | 742 | bp->b_pages[i] = mem_to_page((void *)pageaddr); |
752 | pageaddr += PAGE_CACHE_SIZE; | 743 | pageaddr += PAGE_CACHE_SIZE; |
753 | } | 744 | } |
754 | bp->b_locked = 0; | ||
755 | 745 | ||
756 | bp->b_count_desired = len; | 746 | bp->b_count_desired = len; |
757 | bp->b_buffer_length = buflen; | 747 | bp->b_buffer_length = buflen; |
@@ -1098,25 +1088,13 @@ xfs_buf_iostart( | |||
1098 | return status; | 1088 | return status; |
1099 | } | 1089 | } |
1100 | 1090 | ||
1101 | STATIC_INLINE int | ||
1102 | _xfs_buf_iolocked( | ||
1103 | xfs_buf_t *bp) | ||
1104 | { | ||
1105 | ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE)); | ||
1106 | if (bp->b_flags & XBF_READ) | ||
1107 | return bp->b_locked; | ||
1108 | return 0; | ||
1109 | } | ||
1110 | |||
1111 | STATIC_INLINE void | 1091 | STATIC_INLINE void |
1112 | _xfs_buf_ioend( | 1092 | _xfs_buf_ioend( |
1113 | xfs_buf_t *bp, | 1093 | xfs_buf_t *bp, |
1114 | int schedule) | 1094 | int schedule) |
1115 | { | 1095 | { |
1116 | if (atomic_dec_and_test(&bp->b_io_remaining) == 1) { | 1096 | if (atomic_dec_and_test(&bp->b_io_remaining) == 1) |
1117 | bp->b_locked = 0; | ||
1118 | xfs_buf_ioend(bp, schedule); | 1097 | xfs_buf_ioend(bp, schedule); |
1119 | } | ||
1120 | } | 1098 | } |
1121 | 1099 | ||
1122 | STATIC void | 1100 | STATIC void |
@@ -1147,10 +1125,6 @@ xfs_buf_bio_end_io( | |||
1147 | 1125 | ||
1148 | if (--bvec >= bio->bi_io_vec) | 1126 | if (--bvec >= bio->bi_io_vec) |
1149 | prefetchw(&bvec->bv_page->flags); | 1127 | prefetchw(&bvec->bv_page->flags); |
1150 | |||
1151 | if (_xfs_buf_iolocked(bp)) { | ||
1152 | unlock_page(page); | ||
1153 | } | ||
1154 | } while (bvec >= bio->bi_io_vec); | 1128 | } while (bvec >= bio->bi_io_vec); |
1155 | 1129 | ||
1156 | _xfs_buf_ioend(bp, 1); | 1130 | _xfs_buf_ioend(bp, 1); |
@@ -1161,13 +1135,12 @@ STATIC void | |||
1161 | _xfs_buf_ioapply( | 1135 | _xfs_buf_ioapply( |
1162 | xfs_buf_t *bp) | 1136 | xfs_buf_t *bp) |
1163 | { | 1137 | { |
1164 | int i, rw, map_i, total_nr_pages, nr_pages; | 1138 | int rw, map_i, total_nr_pages, nr_pages; |
1165 | struct bio *bio; | 1139 | struct bio *bio; |
1166 | int offset = bp->b_offset; | 1140 | int offset = bp->b_offset; |
1167 | int size = bp->b_count_desired; | 1141 | int size = bp->b_count_desired; |
1168 | sector_t sector = bp->b_bn; | 1142 | sector_t sector = bp->b_bn; |
1169 | unsigned int blocksize = bp->b_target->bt_bsize; | 1143 | unsigned int blocksize = bp->b_target->bt_bsize; |
1170 | int locking = _xfs_buf_iolocked(bp); | ||
1171 | 1144 | ||
1172 | total_nr_pages = bp->b_page_count; | 1145 | total_nr_pages = bp->b_page_count; |
1173 | map_i = 0; | 1146 | map_i = 0; |
@@ -1190,7 +1163,7 @@ _xfs_buf_ioapply( | |||
1190 | * filesystem block size is not smaller than the page size. | 1163 | * filesystem block size is not smaller than the page size. |
1191 | */ | 1164 | */ |
1192 | if ((bp->b_buffer_length < PAGE_CACHE_SIZE) && | 1165 | if ((bp->b_buffer_length < PAGE_CACHE_SIZE) && |
1193 | (bp->b_flags & XBF_READ) && locking && | 1166 | (bp->b_flags & XBF_READ) && |
1194 | (blocksize >= PAGE_CACHE_SIZE)) { | 1167 | (blocksize >= PAGE_CACHE_SIZE)) { |
1195 | bio = bio_alloc(GFP_NOIO, 1); | 1168 | bio = bio_alloc(GFP_NOIO, 1); |
1196 | 1169 | ||
@@ -1207,24 +1180,6 @@ _xfs_buf_ioapply( | |||
1207 | goto submit_io; | 1180 | goto submit_io; |
1208 | } | 1181 | } |
1209 | 1182 | ||
1210 | /* Lock down the pages which we need to for the request */ | ||
1211 | if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) { | ||
1212 | for (i = 0; size; i++) { | ||
1213 | int nbytes = PAGE_CACHE_SIZE - offset; | ||
1214 | struct page *page = bp->b_pages[i]; | ||
1215 | |||
1216 | if (nbytes > size) | ||
1217 | nbytes = size; | ||
1218 | |||
1219 | lock_page(page); | ||
1220 | |||
1221 | size -= nbytes; | ||
1222 | offset = 0; | ||
1223 | } | ||
1224 | offset = bp->b_offset; | ||
1225 | size = bp->b_count_desired; | ||
1226 | } | ||
1227 | |||
1228 | next_chunk: | 1183 | next_chunk: |
1229 | atomic_inc(&bp->b_io_remaining); | 1184 | atomic_inc(&bp->b_io_remaining); |
1230 | nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT); | 1185 | nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT); |
@@ -1571,7 +1526,7 @@ xfs_alloc_delwrite_queue( | |||
1571 | 1526 | ||
1572 | INIT_LIST_HEAD(&btp->bt_list); | 1527 | INIT_LIST_HEAD(&btp->bt_list); |
1573 | INIT_LIST_HEAD(&btp->bt_delwrite_queue); | 1528 | INIT_LIST_HEAD(&btp->bt_delwrite_queue); |
1574 | spinlock_init(&btp->bt_delwrite_lock, "delwri_lock"); | 1529 | spin_lock_init(&btp->bt_delwrite_lock); |
1575 | btp->bt_flags = 0; | 1530 | btp->bt_flags = 0; |
1576 | btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd"); | 1531 | btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd"); |
1577 | if (IS_ERR(btp->bt_task)) { | 1532 | if (IS_ERR(btp->bt_task)) { |
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index b5908a34b15d..a3d207de48b8 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h | |||
@@ -143,7 +143,6 @@ typedef struct xfs_buf { | |||
143 | void *b_fspriv2; | 143 | void *b_fspriv2; |
144 | void *b_fspriv3; | 144 | void *b_fspriv3; |
145 | unsigned short b_error; /* error code on I/O */ | 145 | unsigned short b_error; /* error code on I/O */ |
146 | unsigned short b_locked; /* page array is locked */ | ||
147 | unsigned int b_page_count; /* size of page array */ | 146 | unsigned int b_page_count; /* size of page array */ |
148 | unsigned int b_offset; /* page offset in first page */ | 147 | unsigned int b_offset; /* page offset in first page */ |
149 | struct page **b_pages; /* array of page pointers */ | 148 | struct page **b_pages; /* array of page pointers */ |
diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c index 15bd4948832c..ca4f66c4de16 100644 --- a/fs/xfs/linux-2.6/xfs_export.c +++ b/fs/xfs/linux-2.6/xfs_export.c | |||
@@ -118,20 +118,29 @@ xfs_nfs_get_inode( | |||
118 | u64 ino, | 118 | u64 ino, |
119 | u32 generation) | 119 | u32 generation) |
120 | { | 120 | { |
121 | xfs_fid_t xfid; | 121 | xfs_mount_t *mp = XFS_M(sb); |
122 | bhv_vnode_t *vp; | 122 | xfs_inode_t *ip; |
123 | int error; | 123 | int error; |
124 | 124 | ||
125 | xfid.fid_len = sizeof(xfs_fid_t) - sizeof(xfid.fid_len); | 125 | /* |
126 | xfid.fid_pad = 0; | 126 | * NFS can sometimes send requests for ino 0. Fail them gracefully. |
127 | xfid.fid_ino = ino; | 127 | */ |
128 | xfid.fid_gen = generation; | 128 | if (ino == 0) |
129 | return ERR_PTR(-ESTALE); | ||
129 | 130 | ||
130 | error = xfs_vget(XFS_M(sb), &vp, &xfid); | 131 | error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0); |
131 | if (error) | 132 | if (error) |
132 | return ERR_PTR(-error); | 133 | return ERR_PTR(-error); |
134 | if (!ip) | ||
135 | return ERR_PTR(-EIO); | ||
136 | |||
137 | if (!ip->i_d.di_mode || ip->i_d.di_gen != generation) { | ||
138 | xfs_iput_new(ip, XFS_ILOCK_SHARED); | ||
139 | return ERR_PTR(-ENOENT); | ||
140 | } | ||
133 | 141 | ||
134 | return vp ? vn_to_inode(vp) : NULL; | 142 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
143 | return ip->i_vnode; | ||
135 | } | 144 | } |
136 | 145 | ||
137 | STATIC struct dentry * | 146 | STATIC struct dentry * |
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c index 21a1c2b1c5fc..edab1ffbb163 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/linux-2.6/xfs_file.c | |||
@@ -350,8 +350,8 @@ xfs_file_readdir( | |||
350 | 350 | ||
351 | size = buf.used; | 351 | size = buf.used; |
352 | de = (struct hack_dirent *)buf.dirent; | 352 | de = (struct hack_dirent *)buf.dirent; |
353 | curr_offset = de->offset /* & 0x7fffffff */; | ||
354 | while (size > 0) { | 353 | while (size > 0) { |
354 | curr_offset = de->offset /* & 0x7fffffff */; | ||
355 | if (filldir(dirent, de->name, de->namlen, | 355 | if (filldir(dirent, de->name, de->namlen, |
356 | curr_offset & 0x7fffffff, | 356 | curr_offset & 0x7fffffff, |
357 | de->ino, de->d_type)) { | 357 | de->ino, de->d_type)) { |
@@ -362,7 +362,6 @@ xfs_file_readdir( | |||
362 | sizeof(u64)); | 362 | sizeof(u64)); |
363 | size -= reclen; | 363 | size -= reclen; |
364 | de = (struct hack_dirent *)((char *)de + reclen); | 364 | de = (struct hack_dirent *)((char *)de + reclen); |
365 | curr_offset = de->offset /* & 0x7fffffff */; | ||
366 | } | 365 | } |
367 | } | 366 | } |
368 | 367 | ||
diff --git a/fs/xfs/linux-2.6/xfs_globals.c b/fs/xfs/linux-2.6/xfs_globals.c index 9febf9dc999d..ef90e64641e6 100644 --- a/fs/xfs/linux-2.6/xfs_globals.c +++ b/fs/xfs/linux-2.6/xfs_globals.c | |||
@@ -47,5 +47,6 @@ xfs_param_t xfs_params = { | |||
47 | /* | 47 | /* |
48 | * Global system credential structure. | 48 | * Global system credential structure. |
49 | */ | 49 | */ |
50 | cred_t sys_cred_val, *sys_cred = &sys_cred_val; | 50 | static cred_t sys_cred_val; |
51 | cred_t *sys_cred = &sys_cred_val; | ||
51 | 52 | ||
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index 98a56568bb24..a9952e490ac9 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c | |||
@@ -75,7 +75,6 @@ xfs_find_handle( | |||
75 | xfs_handle_t handle; | 75 | xfs_handle_t handle; |
76 | xfs_fsop_handlereq_t hreq; | 76 | xfs_fsop_handlereq_t hreq; |
77 | struct inode *inode; | 77 | struct inode *inode; |
78 | bhv_vnode_t *vp; | ||
79 | 78 | ||
80 | if (copy_from_user(&hreq, arg, sizeof(hreq))) | 79 | if (copy_from_user(&hreq, arg, sizeof(hreq))) |
81 | return -XFS_ERROR(EFAULT); | 80 | return -XFS_ERROR(EFAULT); |
@@ -92,10 +91,10 @@ xfs_find_handle( | |||
92 | if (error) | 91 | if (error) |
93 | return error; | 92 | return error; |
94 | 93 | ||
95 | ASSERT(nd.dentry); | 94 | ASSERT(nd.path.dentry); |
96 | ASSERT(nd.dentry->d_inode); | 95 | ASSERT(nd.path.dentry->d_inode); |
97 | inode = igrab(nd.dentry->d_inode); | 96 | inode = igrab(nd.path.dentry->d_inode); |
98 | path_release(&nd); | 97 | path_put(&nd.path); |
99 | break; | 98 | break; |
100 | } | 99 | } |
101 | 100 | ||
@@ -134,21 +133,16 @@ xfs_find_handle( | |||
134 | return -XFS_ERROR(EBADF); | 133 | return -XFS_ERROR(EBADF); |
135 | } | 134 | } |
136 | 135 | ||
137 | /* we need the vnode */ | ||
138 | vp = vn_from_inode(inode); | ||
139 | |||
140 | /* now we can grab the fsid */ | 136 | /* now we can grab the fsid */ |
141 | memcpy(&handle.ha_fsid, XFS_I(inode)->i_mount->m_fixedfsid, | 137 | memcpy(&handle.ha_fsid, XFS_I(inode)->i_mount->m_fixedfsid, |
142 | sizeof(xfs_fsid_t)); | 138 | sizeof(xfs_fsid_t)); |
143 | hsize = sizeof(xfs_fsid_t); | 139 | hsize = sizeof(xfs_fsid_t); |
144 | 140 | ||
145 | if (cmd != XFS_IOC_PATH_TO_FSHANDLE) { | 141 | if (cmd != XFS_IOC_PATH_TO_FSHANDLE) { |
146 | xfs_inode_t *ip; | 142 | xfs_inode_t *ip = XFS_I(inode); |
147 | int lock_mode; | 143 | int lock_mode; |
148 | 144 | ||
149 | /* need to get access to the xfs_inode to read the generation */ | 145 | /* need to get access to the xfs_inode to read the generation */ |
150 | ip = xfs_vtoi(vp); | ||
151 | ASSERT(ip); | ||
152 | lock_mode = xfs_ilock_map_shared(ip); | 146 | lock_mode = xfs_ilock_map_shared(ip); |
153 | 147 | ||
154 | /* fill in fid section of handle from inode */ | 148 | /* fill in fid section of handle from inode */ |
@@ -176,21 +170,19 @@ xfs_find_handle( | |||
176 | 170 | ||
177 | 171 | ||
178 | /* | 172 | /* |
179 | * Convert userspace handle data into vnode (and inode). | 173 | * Convert userspace handle data into inode. |
180 | * We [ab]use the fact that all the fsop_handlereq ioctl calls | 174 | * |
181 | * have a data structure argument whose first component is always | 175 | * We use the fact that all the fsop_handlereq ioctl calls have a data |
182 | * a xfs_fsop_handlereq_t, so we can cast to and from this type. | 176 | * structure argument whose first component is always a xfs_fsop_handlereq_t, |
183 | * This allows us to optimise the copy_from_user calls and gives | 177 | * so we can pass that sub structure into this handy, shared routine. |
184 | * a handy, shared routine. | ||
185 | * | 178 | * |
186 | * If no error, caller must always VN_RELE the returned vp. | 179 | * If no error, caller must always iput the returned inode. |
187 | */ | 180 | */ |
188 | STATIC int | 181 | STATIC int |
189 | xfs_vget_fsop_handlereq( | 182 | xfs_vget_fsop_handlereq( |
190 | xfs_mount_t *mp, | 183 | xfs_mount_t *mp, |
191 | struct inode *parinode, /* parent inode pointer */ | 184 | struct inode *parinode, /* parent inode pointer */ |
192 | xfs_fsop_handlereq_t *hreq, | 185 | xfs_fsop_handlereq_t *hreq, |
193 | bhv_vnode_t **vp, | ||
194 | struct inode **inode) | 186 | struct inode **inode) |
195 | { | 187 | { |
196 | void __user *hanp; | 188 | void __user *hanp; |
@@ -199,8 +191,6 @@ xfs_vget_fsop_handlereq( | |||
199 | xfs_handle_t *handlep; | 191 | xfs_handle_t *handlep; |
200 | xfs_handle_t handle; | 192 | xfs_handle_t handle; |
201 | xfs_inode_t *ip; | 193 | xfs_inode_t *ip; |
202 | struct inode *inodep; | ||
203 | bhv_vnode_t *vpp; | ||
204 | xfs_ino_t ino; | 194 | xfs_ino_t ino; |
205 | __u32 igen; | 195 | __u32 igen; |
206 | int error; | 196 | int error; |
@@ -241,7 +231,7 @@ xfs_vget_fsop_handlereq( | |||
241 | } | 231 | } |
242 | 232 | ||
243 | /* | 233 | /* |
244 | * Get the XFS inode, building a vnode to go with it. | 234 | * Get the XFS inode, building a Linux inode to go with it. |
245 | */ | 235 | */ |
246 | error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0); | 236 | error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0); |
247 | if (error) | 237 | if (error) |
@@ -253,12 +243,9 @@ xfs_vget_fsop_handlereq( | |||
253 | return XFS_ERROR(ENOENT); | 243 | return XFS_ERROR(ENOENT); |
254 | } | 244 | } |
255 | 245 | ||
256 | vpp = XFS_ITOV(ip); | ||
257 | inodep = vn_to_inode(vpp); | ||
258 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | 246 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
259 | 247 | ||
260 | *vp = vpp; | 248 | *inode = XFS_ITOV(ip); |
261 | *inode = inodep; | ||
262 | return 0; | 249 | return 0; |
263 | } | 250 | } |
264 | 251 | ||
@@ -275,7 +262,6 @@ xfs_open_by_handle( | |||
275 | struct file *filp; | 262 | struct file *filp; |
276 | struct inode *inode; | 263 | struct inode *inode; |
277 | struct dentry *dentry; | 264 | struct dentry *dentry; |
278 | bhv_vnode_t *vp; | ||
279 | xfs_fsop_handlereq_t hreq; | 265 | xfs_fsop_handlereq_t hreq; |
280 | 266 | ||
281 | if (!capable(CAP_SYS_ADMIN)) | 267 | if (!capable(CAP_SYS_ADMIN)) |
@@ -283,7 +269,7 @@ xfs_open_by_handle( | |||
283 | if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) | 269 | if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) |
284 | return -XFS_ERROR(EFAULT); | 270 | return -XFS_ERROR(EFAULT); |
285 | 271 | ||
286 | error = xfs_vget_fsop_handlereq(mp, parinode, &hreq, &vp, &inode); | 272 | error = xfs_vget_fsop_handlereq(mp, parinode, &hreq, &inode); |
287 | if (error) | 273 | if (error) |
288 | return -error; | 274 | return -error; |
289 | 275 | ||
@@ -385,7 +371,6 @@ xfs_readlink_by_handle( | |||
385 | { | 371 | { |
386 | struct inode *inode; | 372 | struct inode *inode; |
387 | xfs_fsop_handlereq_t hreq; | 373 | xfs_fsop_handlereq_t hreq; |
388 | bhv_vnode_t *vp; | ||
389 | __u32 olen; | 374 | __u32 olen; |
390 | void *link; | 375 | void *link; |
391 | int error; | 376 | int error; |
@@ -395,7 +380,7 @@ xfs_readlink_by_handle( | |||
395 | if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) | 380 | if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) |
396 | return -XFS_ERROR(EFAULT); | 381 | return -XFS_ERROR(EFAULT); |
397 | 382 | ||
398 | error = xfs_vget_fsop_handlereq(mp, parinode, &hreq, &vp, &inode); | 383 | error = xfs_vget_fsop_handlereq(mp, parinode, &hreq, &inode); |
399 | if (error) | 384 | if (error) |
400 | return -error; | 385 | return -error; |
401 | 386 | ||
@@ -438,34 +423,32 @@ xfs_fssetdm_by_handle( | |||
438 | struct fsdmidata fsd; | 423 | struct fsdmidata fsd; |
439 | xfs_fsop_setdm_handlereq_t dmhreq; | 424 | xfs_fsop_setdm_handlereq_t dmhreq; |
440 | struct inode *inode; | 425 | struct inode *inode; |
441 | bhv_vnode_t *vp; | ||
442 | 426 | ||
443 | if (!capable(CAP_MKNOD)) | 427 | if (!capable(CAP_MKNOD)) |
444 | return -XFS_ERROR(EPERM); | 428 | return -XFS_ERROR(EPERM); |
445 | if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t))) | 429 | if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t))) |
446 | return -XFS_ERROR(EFAULT); | 430 | return -XFS_ERROR(EFAULT); |
447 | 431 | ||
448 | error = xfs_vget_fsop_handlereq(mp, parinode, &dmhreq.hreq, &vp, &inode); | 432 | error = xfs_vget_fsop_handlereq(mp, parinode, &dmhreq.hreq, &inode); |
449 | if (error) | 433 | if (error) |
450 | return -error; | 434 | return -error; |
451 | 435 | ||
452 | if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) { | 436 | if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) { |
453 | VN_RELE(vp); | 437 | error = -XFS_ERROR(EPERM); |
454 | return -XFS_ERROR(EPERM); | 438 | goto out; |
455 | } | 439 | } |
456 | 440 | ||
457 | if (copy_from_user(&fsd, dmhreq.data, sizeof(fsd))) { | 441 | if (copy_from_user(&fsd, dmhreq.data, sizeof(fsd))) { |
458 | VN_RELE(vp); | 442 | error = -XFS_ERROR(EFAULT); |
459 | return -XFS_ERROR(EFAULT); | 443 | goto out; |
460 | } | 444 | } |
461 | 445 | ||
462 | error = xfs_set_dmattrs(xfs_vtoi(vp), | 446 | error = -xfs_set_dmattrs(XFS_I(inode), fsd.fsd_dmevmask, |
463 | fsd.fsd_dmevmask, fsd.fsd_dmstate); | 447 | fsd.fsd_dmstate); |
464 | 448 | ||
465 | VN_RELE(vp); | 449 | out: |
466 | if (error) | 450 | iput(inode); |
467 | return -error; | 451 | return error; |
468 | return 0; | ||
469 | } | 452 | } |
470 | 453 | ||
471 | STATIC int | 454 | STATIC int |
@@ -478,7 +461,6 @@ xfs_attrlist_by_handle( | |||
478 | attrlist_cursor_kern_t *cursor; | 461 | attrlist_cursor_kern_t *cursor; |
479 | xfs_fsop_attrlist_handlereq_t al_hreq; | 462 | xfs_fsop_attrlist_handlereq_t al_hreq; |
480 | struct inode *inode; | 463 | struct inode *inode; |
481 | bhv_vnode_t *vp; | ||
482 | char *kbuf; | 464 | char *kbuf; |
483 | 465 | ||
484 | if (!capable(CAP_SYS_ADMIN)) | 466 | if (!capable(CAP_SYS_ADMIN)) |
@@ -488,8 +470,7 @@ xfs_attrlist_by_handle( | |||
488 | if (al_hreq.buflen > XATTR_LIST_MAX) | 470 | if (al_hreq.buflen > XATTR_LIST_MAX) |
489 | return -XFS_ERROR(EINVAL); | 471 | return -XFS_ERROR(EINVAL); |
490 | 472 | ||
491 | error = xfs_vget_fsop_handlereq(mp, parinode, &al_hreq.hreq, | 473 | error = xfs_vget_fsop_handlereq(mp, parinode, &al_hreq.hreq, &inode); |
492 | &vp, &inode); | ||
493 | if (error) | 474 | if (error) |
494 | goto out; | 475 | goto out; |
495 | 476 | ||
@@ -509,7 +490,7 @@ xfs_attrlist_by_handle( | |||
509 | out_kfree: | 490 | out_kfree: |
510 | kfree(kbuf); | 491 | kfree(kbuf); |
511 | out_vn_rele: | 492 | out_vn_rele: |
512 | VN_RELE(vp); | 493 | iput(inode); |
513 | out: | 494 | out: |
514 | return -error; | 495 | return -error; |
515 | } | 496 | } |
@@ -531,7 +512,7 @@ xfs_attrmulti_attr_get( | |||
531 | if (!kbuf) | 512 | if (!kbuf) |
532 | return ENOMEM; | 513 | return ENOMEM; |
533 | 514 | ||
534 | error = xfs_attr_get(XFS_I(inode), name, kbuf, len, flags, NULL); | 515 | error = xfs_attr_get(XFS_I(inode), name, kbuf, (int *)len, flags, NULL); |
535 | if (error) | 516 | if (error) |
536 | goto out_kfree; | 517 | goto out_kfree; |
537 | 518 | ||
@@ -598,7 +579,6 @@ xfs_attrmulti_by_handle( | |||
598 | xfs_attr_multiop_t *ops; | 579 | xfs_attr_multiop_t *ops; |
599 | xfs_fsop_attrmulti_handlereq_t am_hreq; | 580 | xfs_fsop_attrmulti_handlereq_t am_hreq; |
600 | struct inode *inode; | 581 | struct inode *inode; |
601 | bhv_vnode_t *vp; | ||
602 | unsigned int i, size; | 582 | unsigned int i, size; |
603 | char *attr_name; | 583 | char *attr_name; |
604 | 584 | ||
@@ -607,7 +587,7 @@ xfs_attrmulti_by_handle( | |||
607 | if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t))) | 587 | if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t))) |
608 | return -XFS_ERROR(EFAULT); | 588 | return -XFS_ERROR(EFAULT); |
609 | 589 | ||
610 | error = xfs_vget_fsop_handlereq(mp, parinode, &am_hreq.hreq, &vp, &inode); | 590 | error = xfs_vget_fsop_handlereq(mp, parinode, &am_hreq.hreq, &inode); |
611 | if (error) | 591 | if (error) |
612 | goto out; | 592 | goto out; |
613 | 593 | ||
@@ -666,7 +646,7 @@ xfs_attrmulti_by_handle( | |||
666 | out_kfree_ops: | 646 | out_kfree_ops: |
667 | kfree(ops); | 647 | kfree(ops); |
668 | out_vn_rele: | 648 | out_vn_rele: |
669 | VN_RELE(vp); | 649 | iput(inode); |
670 | out: | 650 | out: |
671 | return -error; | 651 | return -error; |
672 | } | 652 | } |
@@ -702,7 +682,6 @@ xfs_ioc_fsgeometry( | |||
702 | 682 | ||
703 | STATIC int | 683 | STATIC int |
704 | xfs_ioc_xattr( | 684 | xfs_ioc_xattr( |
705 | bhv_vnode_t *vp, | ||
706 | xfs_inode_t *ip, | 685 | xfs_inode_t *ip, |
707 | struct file *filp, | 686 | struct file *filp, |
708 | unsigned int cmd, | 687 | unsigned int cmd, |
@@ -735,12 +714,10 @@ xfs_ioctl( | |||
735 | void __user *arg) | 714 | void __user *arg) |
736 | { | 715 | { |
737 | struct inode *inode = filp->f_path.dentry->d_inode; | 716 | struct inode *inode = filp->f_path.dentry->d_inode; |
738 | bhv_vnode_t *vp = vn_from_inode(inode); | ||
739 | xfs_mount_t *mp = ip->i_mount; | 717 | xfs_mount_t *mp = ip->i_mount; |
740 | int error; | 718 | int error; |
741 | 719 | ||
742 | vn_trace_entry(XFS_I(inode), "xfs_ioctl", (inst_t *)__return_address); | 720 | xfs_itrace_entry(XFS_I(inode)); |
743 | |||
744 | switch (cmd) { | 721 | switch (cmd) { |
745 | 722 | ||
746 | case XFS_IOC_ALLOCSP: | 723 | case XFS_IOC_ALLOCSP: |
@@ -764,7 +741,7 @@ xfs_ioctl( | |||
764 | case XFS_IOC_DIOINFO: { | 741 | case XFS_IOC_DIOINFO: { |
765 | struct dioattr da; | 742 | struct dioattr da; |
766 | xfs_buftarg_t *target = | 743 | xfs_buftarg_t *target = |
767 | (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ? | 744 | XFS_IS_REALTIME_INODE(ip) ? |
768 | mp->m_rtdev_targp : mp->m_ddev_targp; | 745 | mp->m_rtdev_targp : mp->m_ddev_targp; |
769 | 746 | ||
770 | da.d_mem = da.d_miniosz = 1 << target->bt_sshift; | 747 | da.d_mem = da.d_miniosz = 1 << target->bt_sshift; |
@@ -796,7 +773,7 @@ xfs_ioctl( | |||
796 | case XFS_IOC_GETXFLAGS: | 773 | case XFS_IOC_GETXFLAGS: |
797 | case XFS_IOC_SETXFLAGS: | 774 | case XFS_IOC_SETXFLAGS: |
798 | case XFS_IOC_FSSETXATTR: | 775 | case XFS_IOC_FSSETXATTR: |
799 | return xfs_ioc_xattr(vp, ip, filp, cmd, arg); | 776 | return xfs_ioc_xattr(ip, filp, cmd, arg); |
800 | 777 | ||
801 | case XFS_IOC_FSSETDM: { | 778 | case XFS_IOC_FSSETDM: { |
802 | struct fsdmidata dmi; | 779 | struct fsdmidata dmi; |
@@ -1203,7 +1180,6 @@ xfs_ioc_fsgetxattr( | |||
1203 | 1180 | ||
1204 | STATIC int | 1181 | STATIC int |
1205 | xfs_ioc_xattr( | 1182 | xfs_ioc_xattr( |
1206 | bhv_vnode_t *vp, | ||
1207 | xfs_inode_t *ip, | 1183 | xfs_inode_t *ip, |
1208 | struct file *filp, | 1184 | struct file *filp, |
1209 | unsigned int cmd, | 1185 | unsigned int cmd, |
@@ -1237,7 +1213,7 @@ xfs_ioc_xattr( | |||
1237 | 1213 | ||
1238 | error = xfs_setattr(ip, vattr, attr_flags, NULL); | 1214 | error = xfs_setattr(ip, vattr, attr_flags, NULL); |
1239 | if (likely(!error)) | 1215 | if (likely(!error)) |
1240 | __vn_revalidate(vp, vattr); /* update flags */ | 1216 | vn_revalidate(XFS_ITOV(ip)); /* update flags */ |
1241 | error = -error; | 1217 | error = -error; |
1242 | break; | 1218 | break; |
1243 | } | 1219 | } |
@@ -1272,7 +1248,7 @@ xfs_ioc_xattr( | |||
1272 | 1248 | ||
1273 | error = xfs_setattr(ip, vattr, attr_flags, NULL); | 1249 | error = xfs_setattr(ip, vattr, attr_flags, NULL); |
1274 | if (likely(!error)) | 1250 | if (likely(!error)) |
1275 | __vn_revalidate(vp, vattr); /* update flags */ | 1251 | vn_revalidate(XFS_ITOV(ip)); /* update flags */ |
1276 | error = -error; | 1252 | error = -error; |
1277 | break; | 1253 | break; |
1278 | } | 1254 | } |
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c index bf2a956b63c2..a4b254eb43b2 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include "xfs_error.h" | 44 | #include "xfs_error.h" |
45 | #include "xfs_dfrag.h" | 45 | #include "xfs_dfrag.h" |
46 | #include "xfs_vnodeops.h" | 46 | #include "xfs_vnodeops.h" |
47 | #include "xfs_ioctl32.h" | ||
47 | 48 | ||
48 | #define _NATIVE_IOC(cmd, type) \ | 49 | #define _NATIVE_IOC(cmd, type) \ |
49 | _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type)) | 50 | _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type)) |
@@ -379,9 +380,6 @@ xfs_compat_ioctl( | |||
379 | switch (cmd) { | 380 | switch (cmd) { |
380 | case XFS_IOC_DIOINFO: | 381 | case XFS_IOC_DIOINFO: |
381 | case XFS_IOC_FSGEOMETRY: | 382 | case XFS_IOC_FSGEOMETRY: |
382 | case XFS_IOC_GETVERSION: | ||
383 | case XFS_IOC_GETXFLAGS: | ||
384 | case XFS_IOC_SETXFLAGS: | ||
385 | case XFS_IOC_FSGETXATTR: | 383 | case XFS_IOC_FSGETXATTR: |
386 | case XFS_IOC_FSSETXATTR: | 384 | case XFS_IOC_FSSETXATTR: |
387 | case XFS_IOC_FSGETXATTRA: | 385 | case XFS_IOC_FSGETXATTRA: |
@@ -407,6 +405,11 @@ xfs_compat_ioctl( | |||
407 | case XFS_IOC_ERROR_CLEARALL: | 405 | case XFS_IOC_ERROR_CLEARALL: |
408 | break; | 406 | break; |
409 | 407 | ||
408 | case XFS_IOC32_GETXFLAGS: | ||
409 | case XFS_IOC32_SETXFLAGS: | ||
410 | case XFS_IOC32_GETVERSION: | ||
411 | cmd = _NATIVE_IOC(cmd, long); | ||
412 | break; | ||
410 | #ifdef BROKEN_X86_ALIGNMENT | 413 | #ifdef BROKEN_X86_ALIGNMENT |
411 | /* xfs_flock_t has wrong u32 vs u64 alignment */ | 414 | /* xfs_flock_t has wrong u32 vs u64 alignment */ |
412 | case XFS_IOC_ALLOCSP_32: | 415 | case XFS_IOC_ALLOCSP_32: |
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index 5e8bb7f71b5a..cc4abd3daa49 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #include <linux/xattr.h> | 52 | #include <linux/xattr.h> |
53 | #include <linux/namei.h> | 53 | #include <linux/namei.h> |
54 | #include <linux/security.h> | 54 | #include <linux/security.h> |
55 | #include <linux/falloc.h> | ||
55 | 56 | ||
56 | /* | 57 | /* |
57 | * Bring the atime in the XFS inode uptodate. | 58 | * Bring the atime in the XFS inode uptodate. |
@@ -71,6 +72,22 @@ xfs_synchronize_atime( | |||
71 | } | 72 | } |
72 | 73 | ||
73 | /* | 74 | /* |
75 | * If the linux inode exists, mark it dirty. | ||
76 | * Used when commiting a dirty inode into a transaction so that | ||
77 | * the inode will get written back by the linux code | ||
78 | */ | ||
79 | void | ||
80 | xfs_mark_inode_dirty_sync( | ||
81 | xfs_inode_t *ip) | ||
82 | { | ||
83 | bhv_vnode_t *vp; | ||
84 | |||
85 | vp = XFS_ITOV_NULL(ip); | ||
86 | if (vp) | ||
87 | mark_inode_dirty_sync(vn_to_inode(vp)); | ||
88 | } | ||
89 | |||
90 | /* | ||
74 | * Change the requested timestamp in the given inode. | 91 | * Change the requested timestamp in the given inode. |
75 | * We don't lock across timestamp updates, and we don't log them but | 92 | * We don't lock across timestamp updates, and we don't log them but |
76 | * we do record the fact that there is dirty information in core. | 93 | * we do record the fact that there is dirty information in core. |
@@ -184,10 +201,6 @@ xfs_validate_fields( | |||
184 | struct xfs_inode *ip = XFS_I(inode); | 201 | struct xfs_inode *ip = XFS_I(inode); |
185 | loff_t size; | 202 | loff_t size; |
186 | 203 | ||
187 | inode->i_nlink = ip->i_d.di_nlink; | ||
188 | inode->i_blocks = | ||
189 | XFS_FSB_TO_BB(ip->i_mount, ip->i_d.di_nblocks + | ||
190 | ip->i_delayed_blks); | ||
191 | /* we're under i_sem so i_size can't change under us */ | 204 | /* we're under i_sem so i_size can't change under us */ |
192 | size = XFS_ISIZE(ip); | 205 | size = XFS_ISIZE(ip); |
193 | if (i_size_read(inode) != size) | 206 | if (i_size_read(inode) != size) |
@@ -542,12 +555,31 @@ xfs_vn_put_link( | |||
542 | 555 | ||
543 | #ifdef CONFIG_XFS_POSIX_ACL | 556 | #ifdef CONFIG_XFS_POSIX_ACL |
544 | STATIC int | 557 | STATIC int |
558 | xfs_check_acl( | ||
559 | struct inode *inode, | ||
560 | int mask) | ||
561 | { | ||
562 | struct xfs_inode *ip = XFS_I(inode); | ||
563 | int error; | ||
564 | |||
565 | xfs_itrace_entry(ip); | ||
566 | |||
567 | if (XFS_IFORK_Q(ip)) { | ||
568 | error = xfs_acl_iaccess(ip, mask, NULL); | ||
569 | if (error != -1) | ||
570 | return -error; | ||
571 | } | ||
572 | |||
573 | return -EAGAIN; | ||
574 | } | ||
575 | |||
576 | STATIC int | ||
545 | xfs_vn_permission( | 577 | xfs_vn_permission( |
546 | struct inode *inode, | 578 | struct inode *inode, |
547 | int mode, | 579 | int mask, |
548 | struct nameidata *nd) | 580 | struct nameidata *nd) |
549 | { | 581 | { |
550 | return -xfs_access(XFS_I(inode), mode << 6, NULL); | 582 | return generic_permission(inode, mask, xfs_check_acl); |
551 | } | 583 | } |
552 | #else | 584 | #else |
553 | #define xfs_vn_permission NULL | 585 | #define xfs_vn_permission NULL |
@@ -555,33 +587,61 @@ xfs_vn_permission( | |||
555 | 587 | ||
556 | STATIC int | 588 | STATIC int |
557 | xfs_vn_getattr( | 589 | xfs_vn_getattr( |
558 | struct vfsmount *mnt, | 590 | struct vfsmount *mnt, |
559 | struct dentry *dentry, | 591 | struct dentry *dentry, |
560 | struct kstat *stat) | 592 | struct kstat *stat) |
561 | { | 593 | { |
562 | struct inode *inode = dentry->d_inode; | 594 | struct inode *inode = dentry->d_inode; |
563 | bhv_vattr_t vattr = { .va_mask = XFS_AT_STAT }; | 595 | struct xfs_inode *ip = XFS_I(inode); |
564 | int error; | 596 | struct xfs_mount *mp = ip->i_mount; |
565 | 597 | ||
566 | error = xfs_getattr(XFS_I(inode), &vattr, ATTR_LAZY); | 598 | xfs_itrace_entry(ip); |
567 | if (likely(!error)) { | 599 | |
568 | stat->size = i_size_read(inode); | 600 | if (XFS_FORCED_SHUTDOWN(mp)) |
569 | stat->dev = inode->i_sb->s_dev; | 601 | return XFS_ERROR(EIO); |
570 | stat->rdev = (vattr.va_rdev == 0) ? 0 : | 602 | |
571 | MKDEV(sysv_major(vattr.va_rdev) & 0x1ff, | 603 | stat->size = XFS_ISIZE(ip); |
572 | sysv_minor(vattr.va_rdev)); | 604 | stat->dev = inode->i_sb->s_dev; |
573 | stat->mode = vattr.va_mode; | 605 | stat->mode = ip->i_d.di_mode; |
574 | stat->nlink = vattr.va_nlink; | 606 | stat->nlink = ip->i_d.di_nlink; |
575 | stat->uid = vattr.va_uid; | 607 | stat->uid = ip->i_d.di_uid; |
576 | stat->gid = vattr.va_gid; | 608 | stat->gid = ip->i_d.di_gid; |
577 | stat->ino = vattr.va_nodeid; | 609 | stat->ino = ip->i_ino; |
578 | stat->atime = vattr.va_atime; | 610 | #if XFS_BIG_INUMS |
579 | stat->mtime = vattr.va_mtime; | 611 | stat->ino += mp->m_inoadd; |
580 | stat->ctime = vattr.va_ctime; | 612 | #endif |
581 | stat->blocks = vattr.va_nblocks; | 613 | stat->atime = inode->i_atime; |
582 | stat->blksize = vattr.va_blocksize; | 614 | stat->mtime.tv_sec = ip->i_d.di_mtime.t_sec; |
615 | stat->mtime.tv_nsec = ip->i_d.di_mtime.t_nsec; | ||
616 | stat->ctime.tv_sec = ip->i_d.di_ctime.t_sec; | ||
617 | stat->ctime.tv_nsec = ip->i_d.di_ctime.t_nsec; | ||
618 | stat->blocks = | ||
619 | XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks); | ||
620 | |||
621 | |||
622 | switch (inode->i_mode & S_IFMT) { | ||
623 | case S_IFBLK: | ||
624 | case S_IFCHR: | ||
625 | stat->blksize = BLKDEV_IOSIZE; | ||
626 | stat->rdev = MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff, | ||
627 | sysv_minor(ip->i_df.if_u2.if_rdev)); | ||
628 | break; | ||
629 | default: | ||
630 | if (XFS_IS_REALTIME_INODE(ip)) { | ||
631 | /* | ||
632 | * If the file blocks are being allocated from a | ||
633 | * realtime volume, then return the inode's realtime | ||
634 | * extent size or the realtime volume's extent size. | ||
635 | */ | ||
636 | stat->blksize = | ||
637 | xfs_get_extsz_hint(ip) << mp->m_sb.sb_blocklog; | ||
638 | } else | ||
639 | stat->blksize = xfs_preferred_iosize(mp); | ||
640 | stat->rdev = 0; | ||
641 | break; | ||
583 | } | 642 | } |
584 | return -error; | 643 | |
644 | return 0; | ||
585 | } | 645 | } |
586 | 646 | ||
587 | STATIC int | 647 | STATIC int |
@@ -636,7 +696,7 @@ xfs_vn_setattr( | |||
636 | 696 | ||
637 | error = xfs_setattr(XFS_I(inode), &vattr, flags, NULL); | 697 | error = xfs_setattr(XFS_I(inode), &vattr, flags, NULL); |
638 | if (likely(!error)) | 698 | if (likely(!error)) |
639 | __vn_revalidate(vn_from_inode(inode), &vattr); | 699 | vn_revalidate(vn_from_inode(inode)); |
640 | return -error; | 700 | return -error; |
641 | } | 701 | } |
642 | 702 | ||
@@ -750,6 +810,47 @@ xfs_vn_removexattr( | |||
750 | return namesp->attr_remove(vp, attr, xflags); | 810 | return namesp->attr_remove(vp, attr, xflags); |
751 | } | 811 | } |
752 | 812 | ||
813 | STATIC long | ||
814 | xfs_vn_fallocate( | ||
815 | struct inode *inode, | ||
816 | int mode, | ||
817 | loff_t offset, | ||
818 | loff_t len) | ||
819 | { | ||
820 | long error; | ||
821 | loff_t new_size = 0; | ||
822 | xfs_flock64_t bf; | ||
823 | xfs_inode_t *ip = XFS_I(inode); | ||
824 | |||
825 | /* preallocation on directories not yet supported */ | ||
826 | error = -ENODEV; | ||
827 | if (S_ISDIR(inode->i_mode)) | ||
828 | goto out_error; | ||
829 | |||
830 | bf.l_whence = 0; | ||
831 | bf.l_start = offset; | ||
832 | bf.l_len = len; | ||
833 | |||
834 | xfs_ilock(ip, XFS_IOLOCK_EXCL); | ||
835 | error = xfs_change_file_space(ip, XFS_IOC_RESVSP, &bf, | ||
836 | 0, NULL, ATTR_NOLOCK); | ||
837 | if (!error && !(mode & FALLOC_FL_KEEP_SIZE) && | ||
838 | offset + len > i_size_read(inode)) | ||
839 | new_size = offset + len; | ||
840 | |||
841 | /* Change file size if needed */ | ||
842 | if (new_size) { | ||
843 | bhv_vattr_t va; | ||
844 | |||
845 | va.va_mask = XFS_AT_SIZE; | ||
846 | va.va_size = new_size; | ||
847 | error = xfs_setattr(ip, &va, ATTR_NOLOCK, NULL); | ||
848 | } | ||
849 | |||
850 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | ||
851 | out_error: | ||
852 | return error; | ||
853 | } | ||
753 | 854 | ||
754 | const struct inode_operations xfs_inode_operations = { | 855 | const struct inode_operations xfs_inode_operations = { |
755 | .permission = xfs_vn_permission, | 856 | .permission = xfs_vn_permission, |
@@ -760,6 +861,7 @@ const struct inode_operations xfs_inode_operations = { | |||
760 | .getxattr = xfs_vn_getxattr, | 861 | .getxattr = xfs_vn_getxattr, |
761 | .listxattr = xfs_vn_listxattr, | 862 | .listxattr = xfs_vn_listxattr, |
762 | .removexattr = xfs_vn_removexattr, | 863 | .removexattr = xfs_vn_removexattr, |
864 | .fallocate = xfs_vn_fallocate, | ||
763 | }; | 865 | }; |
764 | 866 | ||
765 | const struct inode_operations xfs_dir_inode_operations = { | 867 | const struct inode_operations xfs_dir_inode_operations = { |
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h index dc3752de22da..3ca39c4e5d2a 100644 --- a/fs/xfs/linux-2.6/xfs_linux.h +++ b/fs/xfs/linux-2.6/xfs_linux.h | |||
@@ -43,7 +43,6 @@ | |||
43 | 43 | ||
44 | #include <kmem.h> | 44 | #include <kmem.h> |
45 | #include <mrlock.h> | 45 | #include <mrlock.h> |
46 | #include <spin.h> | ||
47 | #include <sv.h> | 46 | #include <sv.h> |
48 | #include <mutex.h> | 47 | #include <mutex.h> |
49 | #include <sema.h> | 48 | #include <sema.h> |
@@ -75,6 +74,7 @@ | |||
75 | #include <linux/notifier.h> | 74 | #include <linux/notifier.h> |
76 | #include <linux/delay.h> | 75 | #include <linux/delay.h> |
77 | #include <linux/log2.h> | 76 | #include <linux/log2.h> |
77 | #include <linux/spinlock.h> | ||
78 | 78 | ||
79 | #include <asm/page.h> | 79 | #include <asm/page.h> |
80 | #include <asm/div64.h> | 80 | #include <asm/div64.h> |
@@ -136,43 +136,19 @@ | |||
136 | #define current_restore_flags_nested(sp, f) \ | 136 | #define current_restore_flags_nested(sp, f) \ |
137 | (current->flags = ((current->flags & ~(f)) | (*(sp) & (f)))) | 137 | (current->flags = ((current->flags & ~(f)) | (*(sp) & (f)))) |
138 | 138 | ||
139 | #define NBPP PAGE_SIZE | 139 | #define spinlock_destroy(lock) |
140 | #define NDPP (1 << (PAGE_SHIFT - 9)) | ||
141 | 140 | ||
142 | #define NBBY 8 /* number of bits per byte */ | 141 | #define NBBY 8 /* number of bits per byte */ |
143 | #define NBPC PAGE_SIZE /* Number of bytes per click */ | ||
144 | #define BPCSHIFT PAGE_SHIFT /* LOG2(NBPC) if exact */ | ||
145 | 142 | ||
146 | /* | 143 | /* |
147 | * Size of block device i/o is parameterized here. | 144 | * Size of block device i/o is parameterized here. |
148 | * Currently the system supports page-sized i/o. | 145 | * Currently the system supports page-sized i/o. |
149 | */ | 146 | */ |
150 | #define BLKDEV_IOSHIFT BPCSHIFT | 147 | #define BLKDEV_IOSHIFT PAGE_CACHE_SHIFT |
151 | #define BLKDEV_IOSIZE (1<<BLKDEV_IOSHIFT) | 148 | #define BLKDEV_IOSIZE (1<<BLKDEV_IOSHIFT) |
152 | /* number of BB's per block device block */ | 149 | /* number of BB's per block device block */ |
153 | #define BLKDEV_BB BTOBB(BLKDEV_IOSIZE) | 150 | #define BLKDEV_BB BTOBB(BLKDEV_IOSIZE) |
154 | 151 | ||
155 | /* bytes to clicks */ | ||
156 | #define btoc(x) (((__psunsigned_t)(x)+(NBPC-1))>>BPCSHIFT) | ||
157 | #define btoct(x) ((__psunsigned_t)(x)>>BPCSHIFT) | ||
158 | #define btoc64(x) (((__uint64_t)(x)+(NBPC-1))>>BPCSHIFT) | ||
159 | #define btoct64(x) ((__uint64_t)(x)>>BPCSHIFT) | ||
160 | |||
161 | /* off_t bytes to clicks */ | ||
162 | #define offtoc(x) (((__uint64_t)(x)+(NBPC-1))>>BPCSHIFT) | ||
163 | #define offtoct(x) ((xfs_off_t)(x)>>BPCSHIFT) | ||
164 | |||
165 | /* clicks to off_t bytes */ | ||
166 | #define ctooff(x) ((xfs_off_t)(x)<<BPCSHIFT) | ||
167 | |||
168 | /* clicks to bytes */ | ||
169 | #define ctob(x) ((__psunsigned_t)(x)<<BPCSHIFT) | ||
170 | #define btoct(x) ((__psunsigned_t)(x)>>BPCSHIFT) | ||
171 | #define ctob64(x) ((__uint64_t)(x)<<BPCSHIFT) | ||
172 | |||
173 | /* bytes to clicks */ | ||
174 | #define btoc(x) (((__psunsigned_t)(x)+(NBPC-1))>>BPCSHIFT) | ||
175 | |||
176 | #define ENOATTR ENODATA /* Attribute not found */ | 152 | #define ENOATTR ENODATA /* Attribute not found */ |
177 | #define EWRONGFS EINVAL /* Mount with wrong filesystem type */ | 153 | #define EWRONGFS EINVAL /* Mount with wrong filesystem type */ |
178 | #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ | 154 | #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ |
@@ -205,10 +181,6 @@ | |||
205 | #define xfs_stack_trace() dump_stack() | 181 | #define xfs_stack_trace() dump_stack() |
206 | #define xfs_itruncate_data(ip, off) \ | 182 | #define xfs_itruncate_data(ip, off) \ |
207 | (-vmtruncate(vn_to_inode(XFS_ITOV(ip)), (off))) | 183 | (-vmtruncate(vn_to_inode(XFS_ITOV(ip)), (off))) |
208 | #define xfs_statvfs_fsid(statp, mp) \ | ||
209 | ({ u64 id = huge_encode_dev((mp)->m_ddev_targp->bt_dev); \ | ||
210 | __kernel_fsid_t *fsid = &(statp)->f_fsid; \ | ||
211 | (fsid->val[0] = (u32)id, fsid->val[1] = (u32)(id >> 32)); }) | ||
212 | 184 | ||
213 | 185 | ||
214 | /* Move the kernel do_div definition off to one side */ | 186 | /* Move the kernel do_div definition off to one side */ |
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c index 6f614f35f650..166353388490 100644 --- a/fs/xfs/linux-2.6/xfs_lrw.c +++ b/fs/xfs/linux-2.6/xfs_lrw.c | |||
@@ -58,14 +58,12 @@ | |||
58 | void | 58 | void |
59 | xfs_rw_enter_trace( | 59 | xfs_rw_enter_trace( |
60 | int tag, | 60 | int tag, |
61 | xfs_iocore_t *io, | 61 | xfs_inode_t *ip, |
62 | void *data, | 62 | void *data, |
63 | size_t segs, | 63 | size_t segs, |
64 | loff_t offset, | 64 | loff_t offset, |
65 | int ioflags) | 65 | int ioflags) |
66 | { | 66 | { |
67 | xfs_inode_t *ip = XFS_IO_INODE(io); | ||
68 | |||
69 | if (ip->i_rwtrace == NULL) | 67 | if (ip->i_rwtrace == NULL) |
70 | return; | 68 | return; |
71 | ktrace_enter(ip->i_rwtrace, | 69 | ktrace_enter(ip->i_rwtrace, |
@@ -78,8 +76,8 @@ xfs_rw_enter_trace( | |||
78 | (void *)((unsigned long)((offset >> 32) & 0xffffffff)), | 76 | (void *)((unsigned long)((offset >> 32) & 0xffffffff)), |
79 | (void *)((unsigned long)(offset & 0xffffffff)), | 77 | (void *)((unsigned long)(offset & 0xffffffff)), |
80 | (void *)((unsigned long)ioflags), | 78 | (void *)((unsigned long)ioflags), |
81 | (void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)), | 79 | (void *)((unsigned long)((ip->i_new_size >> 32) & 0xffffffff)), |
82 | (void *)((unsigned long)(io->io_new_size & 0xffffffff)), | 80 | (void *)((unsigned long)(ip->i_new_size & 0xffffffff)), |
83 | (void *)((unsigned long)current_pid()), | 81 | (void *)((unsigned long)current_pid()), |
84 | (void *)NULL, | 82 | (void *)NULL, |
85 | (void *)NULL, | 83 | (void *)NULL, |
@@ -89,13 +87,12 @@ xfs_rw_enter_trace( | |||
89 | 87 | ||
90 | void | 88 | void |
91 | xfs_inval_cached_trace( | 89 | xfs_inval_cached_trace( |
92 | xfs_iocore_t *io, | 90 | xfs_inode_t *ip, |
93 | xfs_off_t offset, | 91 | xfs_off_t offset, |
94 | xfs_off_t len, | 92 | xfs_off_t len, |
95 | xfs_off_t first, | 93 | xfs_off_t first, |
96 | xfs_off_t last) | 94 | xfs_off_t last) |
97 | { | 95 | { |
98 | xfs_inode_t *ip = XFS_IO_INODE(io); | ||
99 | 96 | ||
100 | if (ip->i_rwtrace == NULL) | 97 | if (ip->i_rwtrace == NULL) |
101 | return; | 98 | return; |
@@ -131,7 +128,7 @@ xfs_inval_cached_trace( | |||
131 | */ | 128 | */ |
132 | STATIC int | 129 | STATIC int |
133 | xfs_iozero( | 130 | xfs_iozero( |
134 | struct inode *ip, /* inode */ | 131 | struct xfs_inode *ip, /* inode */ |
135 | loff_t pos, /* offset in file */ | 132 | loff_t pos, /* offset in file */ |
136 | size_t count) /* size of data to zero */ | 133 | size_t count) /* size of data to zero */ |
137 | { | 134 | { |
@@ -139,7 +136,7 @@ xfs_iozero( | |||
139 | struct address_space *mapping; | 136 | struct address_space *mapping; |
140 | int status; | 137 | int status; |
141 | 138 | ||
142 | mapping = ip->i_mapping; | 139 | mapping = ip->i_vnode->i_mapping; |
143 | do { | 140 | do { |
144 | unsigned offset, bytes; | 141 | unsigned offset, bytes; |
145 | void *fsdata; | 142 | void *fsdata; |
@@ -205,7 +202,7 @@ xfs_read( | |||
205 | 202 | ||
206 | if (unlikely(ioflags & IO_ISDIRECT)) { | 203 | if (unlikely(ioflags & IO_ISDIRECT)) { |
207 | xfs_buftarg_t *target = | 204 | xfs_buftarg_t *target = |
208 | (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ? | 205 | XFS_IS_REALTIME_INODE(ip) ? |
209 | mp->m_rtdev_targp : mp->m_ddev_targp; | 206 | mp->m_rtdev_targp : mp->m_ddev_targp; |
210 | if ((*offset & target->bt_smask) || | 207 | if ((*offset & target->bt_smask) || |
211 | (size & target->bt_smask)) { | 208 | (size & target->bt_smask)) { |
@@ -246,9 +243,8 @@ xfs_read( | |||
246 | 243 | ||
247 | if (unlikely(ioflags & IO_ISDIRECT)) { | 244 | if (unlikely(ioflags & IO_ISDIRECT)) { |
248 | if (VN_CACHED(vp)) | 245 | if (VN_CACHED(vp)) |
249 | ret = xfs_flushinval_pages(ip, | 246 | ret = xfs_flushinval_pages(ip, (*offset & PAGE_CACHE_MASK), |
250 | ctooff(offtoct(*offset)), | 247 | -1, FI_REMAPF_LOCKED); |
251 | -1, FI_REMAPF_LOCKED); | ||
252 | mutex_unlock(&inode->i_mutex); | 248 | mutex_unlock(&inode->i_mutex); |
253 | if (ret) { | 249 | if (ret) { |
254 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | 250 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); |
@@ -256,7 +252,7 @@ xfs_read( | |||
256 | } | 252 | } |
257 | } | 253 | } |
258 | 254 | ||
259 | xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore, | 255 | xfs_rw_enter_trace(XFS_READ_ENTER, ip, |
260 | (void *)iovp, segs, *offset, ioflags); | 256 | (void *)iovp, segs, *offset, ioflags); |
261 | 257 | ||
262 | iocb->ki_pos = *offset; | 258 | iocb->ki_pos = *offset; |
@@ -301,7 +297,7 @@ xfs_splice_read( | |||
301 | return -error; | 297 | return -error; |
302 | } | 298 | } |
303 | } | 299 | } |
304 | xfs_rw_enter_trace(XFS_SPLICE_READ_ENTER, &ip->i_iocore, | 300 | xfs_rw_enter_trace(XFS_SPLICE_READ_ENTER, ip, |
305 | pipe, count, *ppos, ioflags); | 301 | pipe, count, *ppos, ioflags); |
306 | ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); | 302 | ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); |
307 | if (ret > 0) | 303 | if (ret > 0) |
@@ -323,7 +319,6 @@ xfs_splice_write( | |||
323 | { | 319 | { |
324 | bhv_vnode_t *vp = XFS_ITOV(ip); | 320 | bhv_vnode_t *vp = XFS_ITOV(ip); |
325 | xfs_mount_t *mp = ip->i_mount; | 321 | xfs_mount_t *mp = ip->i_mount; |
326 | xfs_iocore_t *io = &ip->i_iocore; | ||
327 | ssize_t ret; | 322 | ssize_t ret; |
328 | struct inode *inode = outfilp->f_mapping->host; | 323 | struct inode *inode = outfilp->f_mapping->host; |
329 | xfs_fsize_t isize, new_size; | 324 | xfs_fsize_t isize, new_size; |
@@ -350,10 +345,10 @@ xfs_splice_write( | |||
350 | 345 | ||
351 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 346 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
352 | if (new_size > ip->i_size) | 347 | if (new_size > ip->i_size) |
353 | io->io_new_size = new_size; | 348 | ip->i_new_size = new_size; |
354 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 349 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
355 | 350 | ||
356 | xfs_rw_enter_trace(XFS_SPLICE_WRITE_ENTER, &ip->i_iocore, | 351 | xfs_rw_enter_trace(XFS_SPLICE_WRITE_ENTER, ip, |
357 | pipe, count, *ppos, ioflags); | 352 | pipe, count, *ppos, ioflags); |
358 | ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags); | 353 | ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags); |
359 | if (ret > 0) | 354 | if (ret > 0) |
@@ -370,9 +365,9 @@ xfs_splice_write( | |||
370 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 365 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
371 | } | 366 | } |
372 | 367 | ||
373 | if (io->io_new_size) { | 368 | if (ip->i_new_size) { |
374 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 369 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
375 | io->io_new_size = 0; | 370 | ip->i_new_size = 0; |
376 | if (ip->i_d.di_size > ip->i_size) | 371 | if (ip->i_d.di_size > ip->i_size) |
377 | ip->i_d.di_size = ip->i_size; | 372 | ip->i_d.di_size = ip->i_size; |
378 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 373 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
@@ -389,20 +384,19 @@ xfs_splice_write( | |||
389 | */ | 384 | */ |
390 | STATIC int /* error (positive) */ | 385 | STATIC int /* error (positive) */ |
391 | xfs_zero_last_block( | 386 | xfs_zero_last_block( |
392 | struct inode *ip, | 387 | xfs_inode_t *ip, |
393 | xfs_iocore_t *io, | ||
394 | xfs_fsize_t offset, | 388 | xfs_fsize_t offset, |
395 | xfs_fsize_t isize) | 389 | xfs_fsize_t isize) |
396 | { | 390 | { |
397 | xfs_fileoff_t last_fsb; | 391 | xfs_fileoff_t last_fsb; |
398 | xfs_mount_t *mp = io->io_mount; | 392 | xfs_mount_t *mp = ip->i_mount; |
399 | int nimaps; | 393 | int nimaps; |
400 | int zero_offset; | 394 | int zero_offset; |
401 | int zero_len; | 395 | int zero_len; |
402 | int error = 0; | 396 | int error = 0; |
403 | xfs_bmbt_irec_t imap; | 397 | xfs_bmbt_irec_t imap; |
404 | 398 | ||
405 | ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0); | 399 | ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0); |
406 | 400 | ||
407 | zero_offset = XFS_B_FSB_OFFSET(mp, isize); | 401 | zero_offset = XFS_B_FSB_OFFSET(mp, isize); |
408 | if (zero_offset == 0) { | 402 | if (zero_offset == 0) { |
@@ -415,7 +409,7 @@ xfs_zero_last_block( | |||
415 | 409 | ||
416 | last_fsb = XFS_B_TO_FSBT(mp, isize); | 410 | last_fsb = XFS_B_TO_FSBT(mp, isize); |
417 | nimaps = 1; | 411 | nimaps = 1; |
418 | error = XFS_BMAPI(mp, NULL, io, last_fsb, 1, 0, NULL, 0, &imap, | 412 | error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap, |
419 | &nimaps, NULL, NULL); | 413 | &nimaps, NULL, NULL); |
420 | if (error) { | 414 | if (error) { |
421 | return error; | 415 | return error; |
@@ -433,14 +427,14 @@ xfs_zero_last_block( | |||
433 | * out sync. We need to drop the ilock while we do this so we | 427 | * out sync. We need to drop the ilock while we do this so we |
434 | * don't deadlock when the buffer cache calls back to us. | 428 | * don't deadlock when the buffer cache calls back to us. |
435 | */ | 429 | */ |
436 | XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD); | 430 | xfs_iunlock(ip, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD); |
437 | 431 | ||
438 | zero_len = mp->m_sb.sb_blocksize - zero_offset; | 432 | zero_len = mp->m_sb.sb_blocksize - zero_offset; |
439 | if (isize + zero_len > offset) | 433 | if (isize + zero_len > offset) |
440 | zero_len = offset - isize; | 434 | zero_len = offset - isize; |
441 | error = xfs_iozero(ip, isize, zero_len); | 435 | error = xfs_iozero(ip, isize, zero_len); |
442 | 436 | ||
443 | XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); | 437 | xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); |
444 | ASSERT(error >= 0); | 438 | ASSERT(error >= 0); |
445 | return error; | 439 | return error; |
446 | } | 440 | } |
@@ -458,35 +452,33 @@ xfs_zero_last_block( | |||
458 | 452 | ||
459 | int /* error (positive) */ | 453 | int /* error (positive) */ |
460 | xfs_zero_eof( | 454 | xfs_zero_eof( |
461 | bhv_vnode_t *vp, | 455 | xfs_inode_t *ip, |
462 | xfs_iocore_t *io, | ||
463 | xfs_off_t offset, /* starting I/O offset */ | 456 | xfs_off_t offset, /* starting I/O offset */ |
464 | xfs_fsize_t isize) /* current inode size */ | 457 | xfs_fsize_t isize) /* current inode size */ |
465 | { | 458 | { |
466 | struct inode *ip = vn_to_inode(vp); | 459 | xfs_mount_t *mp = ip->i_mount; |
467 | xfs_fileoff_t start_zero_fsb; | 460 | xfs_fileoff_t start_zero_fsb; |
468 | xfs_fileoff_t end_zero_fsb; | 461 | xfs_fileoff_t end_zero_fsb; |
469 | xfs_fileoff_t zero_count_fsb; | 462 | xfs_fileoff_t zero_count_fsb; |
470 | xfs_fileoff_t last_fsb; | 463 | xfs_fileoff_t last_fsb; |
471 | xfs_fileoff_t zero_off; | 464 | xfs_fileoff_t zero_off; |
472 | xfs_fsize_t zero_len; | 465 | xfs_fsize_t zero_len; |
473 | xfs_mount_t *mp = io->io_mount; | ||
474 | int nimaps; | 466 | int nimaps; |
475 | int error = 0; | 467 | int error = 0; |
476 | xfs_bmbt_irec_t imap; | 468 | xfs_bmbt_irec_t imap; |
477 | 469 | ||
478 | ASSERT(ismrlocked(io->io_lock, MR_UPDATE)); | 470 | ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); |
479 | ASSERT(ismrlocked(io->io_iolock, MR_UPDATE)); | 471 | ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE)); |
480 | ASSERT(offset > isize); | 472 | ASSERT(offset > isize); |
481 | 473 | ||
482 | /* | 474 | /* |
483 | * First handle zeroing the block on which isize resides. | 475 | * First handle zeroing the block on which isize resides. |
484 | * We only zero a part of that block so it is handled specially. | 476 | * We only zero a part of that block so it is handled specially. |
485 | */ | 477 | */ |
486 | error = xfs_zero_last_block(ip, io, offset, isize); | 478 | error = xfs_zero_last_block(ip, offset, isize); |
487 | if (error) { | 479 | if (error) { |
488 | ASSERT(ismrlocked(io->io_lock, MR_UPDATE)); | 480 | ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); |
489 | ASSERT(ismrlocked(io->io_iolock, MR_UPDATE)); | 481 | ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE)); |
490 | return error; | 482 | return error; |
491 | } | 483 | } |
492 | 484 | ||
@@ -514,11 +506,11 @@ xfs_zero_eof( | |||
514 | while (start_zero_fsb <= end_zero_fsb) { | 506 | while (start_zero_fsb <= end_zero_fsb) { |
515 | nimaps = 1; | 507 | nimaps = 1; |
516 | zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; | 508 | zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; |
517 | error = XFS_BMAPI(mp, NULL, io, start_zero_fsb, zero_count_fsb, | 509 | error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb, |
518 | 0, NULL, 0, &imap, &nimaps, NULL, NULL); | 510 | 0, NULL, 0, &imap, &nimaps, NULL, NULL); |
519 | if (error) { | 511 | if (error) { |
520 | ASSERT(ismrlocked(io->io_lock, MR_UPDATE)); | 512 | ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); |
521 | ASSERT(ismrlocked(io->io_iolock, MR_UPDATE)); | 513 | ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE)); |
522 | return error; | 514 | return error; |
523 | } | 515 | } |
524 | ASSERT(nimaps > 0); | 516 | ASSERT(nimaps > 0); |
@@ -542,7 +534,7 @@ xfs_zero_eof( | |||
542 | * Drop the inode lock while we're doing the I/O. | 534 | * Drop the inode lock while we're doing the I/O. |
543 | * We'll still have the iolock to protect us. | 535 | * We'll still have the iolock to protect us. |
544 | */ | 536 | */ |
545 | XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); | 537 | xfs_iunlock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); |
546 | 538 | ||
547 | zero_off = XFS_FSB_TO_B(mp, start_zero_fsb); | 539 | zero_off = XFS_FSB_TO_B(mp, start_zero_fsb); |
548 | zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount); | 540 | zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount); |
@@ -558,14 +550,13 @@ xfs_zero_eof( | |||
558 | start_zero_fsb = imap.br_startoff + imap.br_blockcount; | 550 | start_zero_fsb = imap.br_startoff + imap.br_blockcount; |
559 | ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); | 551 | ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); |
560 | 552 | ||
561 | XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); | 553 | xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); |
562 | } | 554 | } |
563 | 555 | ||
564 | return 0; | 556 | return 0; |
565 | 557 | ||
566 | out_lock: | 558 | out_lock: |
567 | 559 | xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); | |
568 | XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); | ||
569 | ASSERT(error >= 0); | 560 | ASSERT(error >= 0); |
570 | return error; | 561 | return error; |
571 | } | 562 | } |
@@ -587,7 +578,6 @@ xfs_write( | |||
587 | xfs_mount_t *mp; | 578 | xfs_mount_t *mp; |
588 | ssize_t ret = 0, error = 0; | 579 | ssize_t ret = 0, error = 0; |
589 | xfs_fsize_t isize, new_size; | 580 | xfs_fsize_t isize, new_size; |
590 | xfs_iocore_t *io; | ||
591 | int iolock; | 581 | int iolock; |
592 | int eventsent = 0; | 582 | int eventsent = 0; |
593 | bhv_vrwlock_t locktype; | 583 | bhv_vrwlock_t locktype; |
@@ -607,8 +597,7 @@ xfs_write( | |||
607 | if (count == 0) | 597 | if (count == 0) |
608 | return 0; | 598 | return 0; |
609 | 599 | ||
610 | io = &xip->i_iocore; | 600 | mp = xip->i_mount; |
611 | mp = io->io_mount; | ||
612 | 601 | ||
613 | xfs_wait_for_freeze(mp, SB_FREEZE_WRITE); | 602 | xfs_wait_for_freeze(mp, SB_FREEZE_WRITE); |
614 | 603 | ||
@@ -667,7 +656,7 @@ start: | |||
667 | 656 | ||
668 | if (ioflags & IO_ISDIRECT) { | 657 | if (ioflags & IO_ISDIRECT) { |
669 | xfs_buftarg_t *target = | 658 | xfs_buftarg_t *target = |
670 | (xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ? | 659 | XFS_IS_REALTIME_INODE(xip) ? |
671 | mp->m_rtdev_targp : mp->m_ddev_targp; | 660 | mp->m_rtdev_targp : mp->m_ddev_targp; |
672 | 661 | ||
673 | if ((pos & target->bt_smask) || (count & target->bt_smask)) { | 662 | if ((pos & target->bt_smask) || (count & target->bt_smask)) { |
@@ -688,7 +677,7 @@ start: | |||
688 | 677 | ||
689 | new_size = pos + count; | 678 | new_size = pos + count; |
690 | if (new_size > xip->i_size) | 679 | if (new_size > xip->i_size) |
691 | io->io_new_size = new_size; | 680 | xip->i_new_size = new_size; |
692 | 681 | ||
693 | if (likely(!(ioflags & IO_INVIS))) { | 682 | if (likely(!(ioflags & IO_INVIS))) { |
694 | file_update_time(file); | 683 | file_update_time(file); |
@@ -706,7 +695,7 @@ start: | |||
706 | */ | 695 | */ |
707 | 696 | ||
708 | if (pos > xip->i_size) { | 697 | if (pos > xip->i_size) { |
709 | error = xfs_zero_eof(vp, io, pos, xip->i_size); | 698 | error = xfs_zero_eof(xip, pos, xip->i_size); |
710 | if (error) { | 699 | if (error) { |
711 | xfs_iunlock(xip, XFS_ILOCK_EXCL); | 700 | xfs_iunlock(xip, XFS_ILOCK_EXCL); |
712 | goto out_unlock_internal; | 701 | goto out_unlock_internal; |
@@ -740,10 +729,10 @@ retry: | |||
740 | if ((ioflags & IO_ISDIRECT)) { | 729 | if ((ioflags & IO_ISDIRECT)) { |
741 | if (VN_CACHED(vp)) { | 730 | if (VN_CACHED(vp)) { |
742 | WARN_ON(need_i_mutex == 0); | 731 | WARN_ON(need_i_mutex == 0); |
743 | xfs_inval_cached_trace(io, pos, -1, | 732 | xfs_inval_cached_trace(xip, pos, -1, |
744 | ctooff(offtoct(pos)), -1); | 733 | (pos & PAGE_CACHE_MASK), -1); |
745 | error = xfs_flushinval_pages(xip, | 734 | error = xfs_flushinval_pages(xip, |
746 | ctooff(offtoct(pos)), | 735 | (pos & PAGE_CACHE_MASK), |
747 | -1, FI_REMAPF_LOCKED); | 736 | -1, FI_REMAPF_LOCKED); |
748 | if (error) | 737 | if (error) |
749 | goto out_unlock_internal; | 738 | goto out_unlock_internal; |
@@ -751,7 +740,7 @@ retry: | |||
751 | 740 | ||
752 | if (need_i_mutex) { | 741 | if (need_i_mutex) { |
753 | /* demote the lock now the cached pages are gone */ | 742 | /* demote the lock now the cached pages are gone */ |
754 | XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL); | 743 | xfs_ilock_demote(xip, XFS_IOLOCK_EXCL); |
755 | mutex_unlock(&inode->i_mutex); | 744 | mutex_unlock(&inode->i_mutex); |
756 | 745 | ||
757 | iolock = XFS_IOLOCK_SHARED; | 746 | iolock = XFS_IOLOCK_SHARED; |
@@ -759,7 +748,7 @@ retry: | |||
759 | need_i_mutex = 0; | 748 | need_i_mutex = 0; |
760 | } | 749 | } |
761 | 750 | ||
762 | xfs_rw_enter_trace(XFS_DIOWR_ENTER, io, (void *)iovp, segs, | 751 | xfs_rw_enter_trace(XFS_DIOWR_ENTER, xip, (void *)iovp, segs, |
763 | *offset, ioflags); | 752 | *offset, ioflags); |
764 | ret = generic_file_direct_write(iocb, iovp, | 753 | ret = generic_file_direct_write(iocb, iovp, |
765 | &segs, pos, offset, count, ocount); | 754 | &segs, pos, offset, count, ocount); |
@@ -779,7 +768,7 @@ retry: | |||
779 | goto relock; | 768 | goto relock; |
780 | } | 769 | } |
781 | } else { | 770 | } else { |
782 | xfs_rw_enter_trace(XFS_WRITE_ENTER, io, (void *)iovp, segs, | 771 | xfs_rw_enter_trace(XFS_WRITE_ENTER, xip, (void *)iovp, segs, |
783 | *offset, ioflags); | 772 | *offset, ioflags); |
784 | ret = generic_file_buffered_write(iocb, iovp, segs, | 773 | ret = generic_file_buffered_write(iocb, iovp, segs, |
785 | pos, offset, count, ret); | 774 | pos, offset, count, ret); |
@@ -843,9 +832,9 @@ retry: | |||
843 | } | 832 | } |
844 | 833 | ||
845 | out_unlock_internal: | 834 | out_unlock_internal: |
846 | if (io->io_new_size) { | 835 | if (xip->i_new_size) { |
847 | xfs_ilock(xip, XFS_ILOCK_EXCL); | 836 | xfs_ilock(xip, XFS_ILOCK_EXCL); |
848 | io->io_new_size = 0; | 837 | xip->i_new_size = 0; |
849 | /* | 838 | /* |
850 | * If this was a direct or synchronous I/O that failed (such | 839 | * If this was a direct or synchronous I/O that failed (such |
851 | * as ENOSPC) then part of the I/O may have been written to | 840 | * as ENOSPC) then part of the I/O may have been written to |
@@ -894,25 +883,6 @@ xfs_bdstrat_cb(struct xfs_buf *bp) | |||
894 | } | 883 | } |
895 | } | 884 | } |
896 | 885 | ||
897 | |||
898 | int | ||
899 | xfs_bmap( | ||
900 | xfs_inode_t *ip, | ||
901 | xfs_off_t offset, | ||
902 | ssize_t count, | ||
903 | int flags, | ||
904 | xfs_iomap_t *iomapp, | ||
905 | int *niomaps) | ||
906 | { | ||
907 | xfs_iocore_t *io = &ip->i_iocore; | ||
908 | |||
909 | ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG); | ||
910 | ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) == | ||
911 | ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0)); | ||
912 | |||
913 | return xfs_iomap(io, offset, count, flags, iomapp, niomaps); | ||
914 | } | ||
915 | |||
916 | /* | 886 | /* |
917 | * Wrapper around bdstrat so that we can stop data | 887 | * Wrapper around bdstrat so that we can stop data |
918 | * from going to disk in case we are shutting down the filesystem. | 888 | * from going to disk in case we are shutting down the filesystem. |
diff --git a/fs/xfs/linux-2.6/xfs_lrw.h b/fs/xfs/linux-2.6/xfs_lrw.h index 4b7747a828d9..e200253139cf 100644 --- a/fs/xfs/linux-2.6/xfs_lrw.h +++ b/fs/xfs/linux-2.6/xfs_lrw.h | |||
@@ -19,7 +19,6 @@ | |||
19 | #define __XFS_LRW_H__ | 19 | #define __XFS_LRW_H__ |
20 | 20 | ||
21 | struct xfs_mount; | 21 | struct xfs_mount; |
22 | struct xfs_iocore; | ||
23 | struct xfs_inode; | 22 | struct xfs_inode; |
24 | struct xfs_bmbt_irec; | 23 | struct xfs_bmbt_irec; |
25 | struct xfs_buf; | 24 | struct xfs_buf; |
@@ -60,20 +59,19 @@ struct xfs_iomap; | |||
60 | #define XFS_IOMAP_UNWRITTEN 27 | 59 | #define XFS_IOMAP_UNWRITTEN 27 |
61 | #define XFS_SPLICE_READ_ENTER 28 | 60 | #define XFS_SPLICE_READ_ENTER 28 |
62 | #define XFS_SPLICE_WRITE_ENTER 29 | 61 | #define XFS_SPLICE_WRITE_ENTER 29 |
63 | extern void xfs_rw_enter_trace(int, struct xfs_iocore *, | 62 | extern void xfs_rw_enter_trace(int, struct xfs_inode *, |
64 | void *, size_t, loff_t, int); | 63 | void *, size_t, loff_t, int); |
65 | extern void xfs_inval_cached_trace(struct xfs_iocore *, | 64 | extern void xfs_inval_cached_trace(struct xfs_inode *, |
66 | xfs_off_t, xfs_off_t, xfs_off_t, xfs_off_t); | 65 | xfs_off_t, xfs_off_t, xfs_off_t, xfs_off_t); |
67 | #else | 66 | #else |
68 | #define xfs_rw_enter_trace(tag, io, data, size, offset, ioflags) | 67 | #define xfs_rw_enter_trace(tag, ip, data, size, offset, ioflags) |
69 | #define xfs_inval_cached_trace(io, offset, len, first, last) | 68 | #define xfs_inval_cached_trace(ip, offset, len, first, last) |
70 | #endif | 69 | #endif |
71 | 70 | ||
72 | extern int xfsbdstrat(struct xfs_mount *, struct xfs_buf *); | 71 | extern int xfsbdstrat(struct xfs_mount *, struct xfs_buf *); |
73 | extern int xfs_bdstrat_cb(struct xfs_buf *); | 72 | extern int xfs_bdstrat_cb(struct xfs_buf *); |
74 | extern int xfs_dev_is_read_only(struct xfs_mount *, char *); | 73 | extern int xfs_dev_is_read_only(struct xfs_mount *, char *); |
75 | 74 | ||
76 | extern int xfs_zero_eof(struct inode *, struct xfs_iocore *, xfs_off_t, | 75 | extern int xfs_zero_eof(struct xfs_inode *, xfs_off_t, xfs_fsize_t); |
77 | xfs_fsize_t); | ||
78 | 76 | ||
79 | #endif /* __XFS_LRW_H__ */ | 77 | #endif /* __XFS_LRW_H__ */ |
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 8cb63c60c048..21dfc9da235e 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include "xfs_rtalloc.h" | 41 | #include "xfs_rtalloc.h" |
42 | #include "xfs_error.h" | 42 | #include "xfs_error.h" |
43 | #include "xfs_itable.h" | 43 | #include "xfs_itable.h" |
44 | #include "xfs_fsops.h" | ||
44 | #include "xfs_rw.h" | 45 | #include "xfs_rw.h" |
45 | #include "xfs_acl.h" | 46 | #include "xfs_acl.h" |
46 | #include "xfs_attr.h" | 47 | #include "xfs_attr.h" |
@@ -49,6 +50,8 @@ | |||
49 | #include "xfs_vnodeops.h" | 50 | #include "xfs_vnodeops.h" |
50 | #include "xfs_vfsops.h" | 51 | #include "xfs_vfsops.h" |
51 | #include "xfs_version.h" | 52 | #include "xfs_version.h" |
53 | #include "xfs_log_priv.h" | ||
54 | #include "xfs_trans_priv.h" | ||
52 | 55 | ||
53 | #include <linux/namei.h> | 56 | #include <linux/namei.h> |
54 | #include <linux/init.h> | 57 | #include <linux/init.h> |
@@ -87,6 +90,435 @@ xfs_args_allocate( | |||
87 | return args; | 90 | return args; |
88 | } | 91 | } |
89 | 92 | ||
93 | #define MNTOPT_LOGBUFS "logbufs" /* number of XFS log buffers */ | ||
94 | #define MNTOPT_LOGBSIZE "logbsize" /* size of XFS log buffers */ | ||
95 | #define MNTOPT_LOGDEV "logdev" /* log device */ | ||
96 | #define MNTOPT_RTDEV "rtdev" /* realtime I/O device */ | ||
97 | #define MNTOPT_BIOSIZE "biosize" /* log2 of preferred buffered io size */ | ||
98 | #define MNTOPT_WSYNC "wsync" /* safe-mode nfs compatible mount */ | ||
99 | #define MNTOPT_INO64 "ino64" /* force inodes into 64-bit range */ | ||
100 | #define MNTOPT_NOALIGN "noalign" /* turn off stripe alignment */ | ||
101 | #define MNTOPT_SWALLOC "swalloc" /* turn on stripe width allocation */ | ||
102 | #define MNTOPT_SUNIT "sunit" /* data volume stripe unit */ | ||
103 | #define MNTOPT_SWIDTH "swidth" /* data volume stripe width */ | ||
104 | #define MNTOPT_NOUUID "nouuid" /* ignore filesystem UUID */ | ||
105 | #define MNTOPT_MTPT "mtpt" /* filesystem mount point */ | ||
106 | #define MNTOPT_GRPID "grpid" /* group-ID from parent directory */ | ||
107 | #define MNTOPT_NOGRPID "nogrpid" /* group-ID from current process */ | ||
108 | #define MNTOPT_BSDGROUPS "bsdgroups" /* group-ID from parent directory */ | ||
109 | #define MNTOPT_SYSVGROUPS "sysvgroups" /* group-ID from current process */ | ||
110 | #define MNTOPT_ALLOCSIZE "allocsize" /* preferred allocation size */ | ||
111 | #define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */ | ||
112 | #define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and | ||
113 | * unwritten extent conversion */ | ||
114 | #define MNTOPT_NOBARRIER "nobarrier" /* .. disable */ | ||
115 | #define MNTOPT_OSYNCISOSYNC "osyncisosync" /* o_sync is REALLY o_sync */ | ||
116 | #define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */ | ||
117 | #define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */ | ||
118 | #define MNTOPT_NOIKEEP "noikeep" /* free empty inode clusters */ | ||
119 | #define MNTOPT_LARGEIO "largeio" /* report large I/O sizes in stat() */ | ||
120 | #define MNTOPT_NOLARGEIO "nolargeio" /* do not report large I/O sizes | ||
121 | * in stat(). */ | ||
122 | #define MNTOPT_ATTR2 "attr2" /* do use attr2 attribute format */ | ||
123 | #define MNTOPT_NOATTR2 "noattr2" /* do not use attr2 attribute format */ | ||
124 | #define MNTOPT_FILESTREAM "filestreams" /* use filestreams allocator */ | ||
125 | #define MNTOPT_QUOTA "quota" /* disk quotas (user) */ | ||
126 | #define MNTOPT_NOQUOTA "noquota" /* no quotas */ | ||
127 | #define MNTOPT_USRQUOTA "usrquota" /* user quota enabled */ | ||
128 | #define MNTOPT_GRPQUOTA "grpquota" /* group quota enabled */ | ||
129 | #define MNTOPT_PRJQUOTA "prjquota" /* project quota enabled */ | ||
130 | #define MNTOPT_UQUOTA "uquota" /* user quota (IRIX variant) */ | ||
131 | #define MNTOPT_GQUOTA "gquota" /* group quota (IRIX variant) */ | ||
132 | #define MNTOPT_PQUOTA "pquota" /* project quota (IRIX variant) */ | ||
133 | #define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */ | ||
134 | #define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */ | ||
135 | #define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */ | ||
136 | #define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */ | ||
137 | #define MNTOPT_DMAPI "dmapi" /* DMI enabled (DMAPI / XDSM) */ | ||
138 | #define MNTOPT_XDSM "xdsm" /* DMI enabled (DMAPI / XDSM) */ | ||
139 | #define MNTOPT_DMI "dmi" /* DMI enabled (DMAPI / XDSM) */ | ||
140 | |||
141 | STATIC unsigned long | ||
142 | suffix_strtoul(char *s, char **endp, unsigned int base) | ||
143 | { | ||
144 | int last, shift_left_factor = 0; | ||
145 | char *value = s; | ||
146 | |||
147 | last = strlen(value) - 1; | ||
148 | if (value[last] == 'K' || value[last] == 'k') { | ||
149 | shift_left_factor = 10; | ||
150 | value[last] = '\0'; | ||
151 | } | ||
152 | if (value[last] == 'M' || value[last] == 'm') { | ||
153 | shift_left_factor = 20; | ||
154 | value[last] = '\0'; | ||
155 | } | ||
156 | if (value[last] == 'G' || value[last] == 'g') { | ||
157 | shift_left_factor = 30; | ||
158 | value[last] = '\0'; | ||
159 | } | ||
160 | |||
161 | return simple_strtoul((const char *)s, endp, base) << shift_left_factor; | ||
162 | } | ||
163 | |||
164 | STATIC int | ||
165 | xfs_parseargs( | ||
166 | struct xfs_mount *mp, | ||
167 | char *options, | ||
168 | struct xfs_mount_args *args, | ||
169 | int update) | ||
170 | { | ||
171 | char *this_char, *value, *eov; | ||
172 | int dsunit, dswidth, vol_dsunit, vol_dswidth; | ||
173 | int iosize; | ||
174 | int ikeep = 0; | ||
175 | |||
176 | args->flags |= XFSMNT_BARRIER; | ||
177 | args->flags2 |= XFSMNT2_COMPAT_IOSIZE; | ||
178 | |||
179 | if (!options) | ||
180 | goto done; | ||
181 | |||
182 | iosize = dsunit = dswidth = vol_dsunit = vol_dswidth = 0; | ||
183 | |||
184 | while ((this_char = strsep(&options, ",")) != NULL) { | ||
185 | if (!*this_char) | ||
186 | continue; | ||
187 | if ((value = strchr(this_char, '=')) != NULL) | ||
188 | *value++ = 0; | ||
189 | |||
190 | if (!strcmp(this_char, MNTOPT_LOGBUFS)) { | ||
191 | if (!value || !*value) { | ||
192 | cmn_err(CE_WARN, | ||
193 | "XFS: %s option requires an argument", | ||
194 | this_char); | ||
195 | return EINVAL; | ||
196 | } | ||
197 | args->logbufs = simple_strtoul(value, &eov, 10); | ||
198 | } else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) { | ||
199 | if (!value || !*value) { | ||
200 | cmn_err(CE_WARN, | ||
201 | "XFS: %s option requires an argument", | ||
202 | this_char); | ||
203 | return EINVAL; | ||
204 | } | ||
205 | args->logbufsize = suffix_strtoul(value, &eov, 10); | ||
206 | } else if (!strcmp(this_char, MNTOPT_LOGDEV)) { | ||
207 | if (!value || !*value) { | ||
208 | cmn_err(CE_WARN, | ||
209 | "XFS: %s option requires an argument", | ||
210 | this_char); | ||
211 | return EINVAL; | ||
212 | } | ||
213 | strncpy(args->logname, value, MAXNAMELEN); | ||
214 | } else if (!strcmp(this_char, MNTOPT_MTPT)) { | ||
215 | if (!value || !*value) { | ||
216 | cmn_err(CE_WARN, | ||
217 | "XFS: %s option requires an argument", | ||
218 | this_char); | ||
219 | return EINVAL; | ||
220 | } | ||
221 | strncpy(args->mtpt, value, MAXNAMELEN); | ||
222 | } else if (!strcmp(this_char, MNTOPT_RTDEV)) { | ||
223 | if (!value || !*value) { | ||
224 | cmn_err(CE_WARN, | ||
225 | "XFS: %s option requires an argument", | ||
226 | this_char); | ||
227 | return EINVAL; | ||
228 | } | ||
229 | strncpy(args->rtname, value, MAXNAMELEN); | ||
230 | } else if (!strcmp(this_char, MNTOPT_BIOSIZE)) { | ||
231 | if (!value || !*value) { | ||
232 | cmn_err(CE_WARN, | ||
233 | "XFS: %s option requires an argument", | ||
234 | this_char); | ||
235 | return EINVAL; | ||
236 | } | ||
237 | iosize = simple_strtoul(value, &eov, 10); | ||
238 | args->flags |= XFSMNT_IOSIZE; | ||
239 | args->iosizelog = (uint8_t) iosize; | ||
240 | } else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) { | ||
241 | if (!value || !*value) { | ||
242 | cmn_err(CE_WARN, | ||
243 | "XFS: %s option requires an argument", | ||
244 | this_char); | ||
245 | return EINVAL; | ||
246 | } | ||
247 | iosize = suffix_strtoul(value, &eov, 10); | ||
248 | args->flags |= XFSMNT_IOSIZE; | ||
249 | args->iosizelog = ffs(iosize) - 1; | ||
250 | } else if (!strcmp(this_char, MNTOPT_GRPID) || | ||
251 | !strcmp(this_char, MNTOPT_BSDGROUPS)) { | ||
252 | mp->m_flags |= XFS_MOUNT_GRPID; | ||
253 | } else if (!strcmp(this_char, MNTOPT_NOGRPID) || | ||
254 | !strcmp(this_char, MNTOPT_SYSVGROUPS)) { | ||
255 | mp->m_flags &= ~XFS_MOUNT_GRPID; | ||
256 | } else if (!strcmp(this_char, MNTOPT_WSYNC)) { | ||
257 | args->flags |= XFSMNT_WSYNC; | ||
258 | } else if (!strcmp(this_char, MNTOPT_OSYNCISOSYNC)) { | ||
259 | args->flags |= XFSMNT_OSYNCISOSYNC; | ||
260 | } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) { | ||
261 | args->flags |= XFSMNT_NORECOVERY; | ||
262 | } else if (!strcmp(this_char, MNTOPT_INO64)) { | ||
263 | args->flags |= XFSMNT_INO64; | ||
264 | #if !XFS_BIG_INUMS | ||
265 | cmn_err(CE_WARN, | ||
266 | "XFS: %s option not allowed on this system", | ||
267 | this_char); | ||
268 | return EINVAL; | ||
269 | #endif | ||
270 | } else if (!strcmp(this_char, MNTOPT_NOALIGN)) { | ||
271 | args->flags |= XFSMNT_NOALIGN; | ||
272 | } else if (!strcmp(this_char, MNTOPT_SWALLOC)) { | ||
273 | args->flags |= XFSMNT_SWALLOC; | ||
274 | } else if (!strcmp(this_char, MNTOPT_SUNIT)) { | ||
275 | if (!value || !*value) { | ||
276 | cmn_err(CE_WARN, | ||
277 | "XFS: %s option requires an argument", | ||
278 | this_char); | ||
279 | return EINVAL; | ||
280 | } | ||
281 | dsunit = simple_strtoul(value, &eov, 10); | ||
282 | } else if (!strcmp(this_char, MNTOPT_SWIDTH)) { | ||
283 | if (!value || !*value) { | ||
284 | cmn_err(CE_WARN, | ||
285 | "XFS: %s option requires an argument", | ||
286 | this_char); | ||
287 | return EINVAL; | ||
288 | } | ||
289 | dswidth = simple_strtoul(value, &eov, 10); | ||
290 | } else if (!strcmp(this_char, MNTOPT_64BITINODE)) { | ||
291 | args->flags &= ~XFSMNT_32BITINODES; | ||
292 | #if !XFS_BIG_INUMS | ||
293 | cmn_err(CE_WARN, | ||
294 | "XFS: %s option not allowed on this system", | ||
295 | this_char); | ||
296 | return EINVAL; | ||
297 | #endif | ||
298 | } else if (!strcmp(this_char, MNTOPT_NOUUID)) { | ||
299 | args->flags |= XFSMNT_NOUUID; | ||
300 | } else if (!strcmp(this_char, MNTOPT_BARRIER)) { | ||
301 | args->flags |= XFSMNT_BARRIER; | ||
302 | } else if (!strcmp(this_char, MNTOPT_NOBARRIER)) { | ||
303 | args->flags &= ~XFSMNT_BARRIER; | ||
304 | } else if (!strcmp(this_char, MNTOPT_IKEEP)) { | ||
305 | ikeep = 1; | ||
306 | args->flags &= ~XFSMNT_IDELETE; | ||
307 | } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) { | ||
308 | args->flags |= XFSMNT_IDELETE; | ||
309 | } else if (!strcmp(this_char, MNTOPT_LARGEIO)) { | ||
310 | args->flags2 &= ~XFSMNT2_COMPAT_IOSIZE; | ||
311 | } else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) { | ||
312 | args->flags2 |= XFSMNT2_COMPAT_IOSIZE; | ||
313 | } else if (!strcmp(this_char, MNTOPT_ATTR2)) { | ||
314 | args->flags |= XFSMNT_ATTR2; | ||
315 | } else if (!strcmp(this_char, MNTOPT_NOATTR2)) { | ||
316 | args->flags &= ~XFSMNT_ATTR2; | ||
317 | } else if (!strcmp(this_char, MNTOPT_FILESTREAM)) { | ||
318 | args->flags2 |= XFSMNT2_FILESTREAMS; | ||
319 | } else if (!strcmp(this_char, MNTOPT_NOQUOTA)) { | ||
320 | args->flags &= ~(XFSMNT_UQUOTAENF|XFSMNT_UQUOTA); | ||
321 | args->flags &= ~(XFSMNT_GQUOTAENF|XFSMNT_GQUOTA); | ||
322 | } else if (!strcmp(this_char, MNTOPT_QUOTA) || | ||
323 | !strcmp(this_char, MNTOPT_UQUOTA) || | ||
324 | !strcmp(this_char, MNTOPT_USRQUOTA)) { | ||
325 | args->flags |= XFSMNT_UQUOTA | XFSMNT_UQUOTAENF; | ||
326 | } else if (!strcmp(this_char, MNTOPT_QUOTANOENF) || | ||
327 | !strcmp(this_char, MNTOPT_UQUOTANOENF)) { | ||
328 | args->flags |= XFSMNT_UQUOTA; | ||
329 | args->flags &= ~XFSMNT_UQUOTAENF; | ||
330 | } else if (!strcmp(this_char, MNTOPT_PQUOTA) || | ||
331 | !strcmp(this_char, MNTOPT_PRJQUOTA)) { | ||
332 | args->flags |= XFSMNT_PQUOTA | XFSMNT_PQUOTAENF; | ||
333 | } else if (!strcmp(this_char, MNTOPT_PQUOTANOENF)) { | ||
334 | args->flags |= XFSMNT_PQUOTA; | ||
335 | args->flags &= ~XFSMNT_PQUOTAENF; | ||
336 | } else if (!strcmp(this_char, MNTOPT_GQUOTA) || | ||
337 | !strcmp(this_char, MNTOPT_GRPQUOTA)) { | ||
338 | args->flags |= XFSMNT_GQUOTA | XFSMNT_GQUOTAENF; | ||
339 | } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) { | ||
340 | args->flags |= XFSMNT_GQUOTA; | ||
341 | args->flags &= ~XFSMNT_GQUOTAENF; | ||
342 | } else if (!strcmp(this_char, MNTOPT_DMAPI)) { | ||
343 | args->flags |= XFSMNT_DMAPI; | ||
344 | } else if (!strcmp(this_char, MNTOPT_XDSM)) { | ||
345 | args->flags |= XFSMNT_DMAPI; | ||
346 | } else if (!strcmp(this_char, MNTOPT_DMI)) { | ||
347 | args->flags |= XFSMNT_DMAPI; | ||
348 | } else if (!strcmp(this_char, "ihashsize")) { | ||
349 | cmn_err(CE_WARN, | ||
350 | "XFS: ihashsize no longer used, option is deprecated."); | ||
351 | } else if (!strcmp(this_char, "osyncisdsync")) { | ||
352 | /* no-op, this is now the default */ | ||
353 | cmn_err(CE_WARN, | ||
354 | "XFS: osyncisdsync is now the default, option is deprecated."); | ||
355 | } else if (!strcmp(this_char, "irixsgid")) { | ||
356 | cmn_err(CE_WARN, | ||
357 | "XFS: irixsgid is now a sysctl(2) variable, option is deprecated."); | ||
358 | } else { | ||
359 | cmn_err(CE_WARN, | ||
360 | "XFS: unknown mount option [%s].", this_char); | ||
361 | return EINVAL; | ||
362 | } | ||
363 | } | ||
364 | |||
365 | if (args->flags & XFSMNT_NORECOVERY) { | ||
366 | if ((mp->m_flags & XFS_MOUNT_RDONLY) == 0) { | ||
367 | cmn_err(CE_WARN, | ||
368 | "XFS: no-recovery mounts must be read-only."); | ||
369 | return EINVAL; | ||
370 | } | ||
371 | } | ||
372 | |||
373 | if ((args->flags & XFSMNT_NOALIGN) && (dsunit || dswidth)) { | ||
374 | cmn_err(CE_WARN, | ||
375 | "XFS: sunit and swidth options incompatible with the noalign option"); | ||
376 | return EINVAL; | ||
377 | } | ||
378 | |||
379 | if ((args->flags & XFSMNT_GQUOTA) && (args->flags & XFSMNT_PQUOTA)) { | ||
380 | cmn_err(CE_WARN, | ||
381 | "XFS: cannot mount with both project and group quota"); | ||
382 | return EINVAL; | ||
383 | } | ||
384 | |||
385 | if ((args->flags & XFSMNT_DMAPI) && *args->mtpt == '\0') { | ||
386 | printk("XFS: %s option needs the mount point option as well\n", | ||
387 | MNTOPT_DMAPI); | ||
388 | return EINVAL; | ||
389 | } | ||
390 | |||
391 | if ((dsunit && !dswidth) || (!dsunit && dswidth)) { | ||
392 | cmn_err(CE_WARN, | ||
393 | "XFS: sunit and swidth must be specified together"); | ||
394 | return EINVAL; | ||
395 | } | ||
396 | |||
397 | if (dsunit && (dswidth % dsunit != 0)) { | ||
398 | cmn_err(CE_WARN, | ||
399 | "XFS: stripe width (%d) must be a multiple of the stripe unit (%d)", | ||
400 | dswidth, dsunit); | ||
401 | return EINVAL; | ||
402 | } | ||
403 | |||
404 | /* | ||
405 | * Applications using DMI filesystems often expect the | ||
406 | * inode generation number to be monotonically increasing. | ||
407 | * If we delete inode chunks we break this assumption, so | ||
408 | * keep unused inode chunks on disk for DMI filesystems | ||
409 | * until we come up with a better solution. | ||
410 | * Note that if "ikeep" or "noikeep" mount options are | ||
411 | * supplied, then they are honored. | ||
412 | */ | ||
413 | if (!(args->flags & XFSMNT_DMAPI) && !ikeep) | ||
414 | args->flags |= XFSMNT_IDELETE; | ||
415 | |||
416 | if ((args->flags & XFSMNT_NOALIGN) != XFSMNT_NOALIGN) { | ||
417 | if (dsunit) { | ||
418 | args->sunit = dsunit; | ||
419 | args->flags |= XFSMNT_RETERR; | ||
420 | } else { | ||
421 | args->sunit = vol_dsunit; | ||
422 | } | ||
423 | dswidth ? (args->swidth = dswidth) : | ||
424 | (args->swidth = vol_dswidth); | ||
425 | } else { | ||
426 | args->sunit = args->swidth = 0; | ||
427 | } | ||
428 | |||
429 | done: | ||
430 | if (args->flags & XFSMNT_32BITINODES) | ||
431 | mp->m_flags |= XFS_MOUNT_SMALL_INUMS; | ||
432 | if (args->flags2) | ||
433 | args->flags |= XFSMNT_FLAGS2; | ||
434 | return 0; | ||
435 | } | ||
436 | |||
437 | struct proc_xfs_info { | ||
438 | int flag; | ||
439 | char *str; | ||
440 | }; | ||
441 | |||
442 | STATIC int | ||
443 | xfs_showargs( | ||
444 | struct xfs_mount *mp, | ||
445 | struct seq_file *m) | ||
446 | { | ||
447 | static struct proc_xfs_info xfs_info_set[] = { | ||
448 | /* the few simple ones we can get from the mount struct */ | ||
449 | { XFS_MOUNT_WSYNC, "," MNTOPT_WSYNC }, | ||
450 | { XFS_MOUNT_INO64, "," MNTOPT_INO64 }, | ||
451 | { XFS_MOUNT_NOALIGN, "," MNTOPT_NOALIGN }, | ||
452 | { XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC }, | ||
453 | { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID }, | ||
454 | { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY }, | ||
455 | { XFS_MOUNT_OSYNCISOSYNC, "," MNTOPT_OSYNCISOSYNC }, | ||
456 | { XFS_MOUNT_ATTR2, "," MNTOPT_ATTR2 }, | ||
457 | { XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM }, | ||
458 | { XFS_MOUNT_DMAPI, "," MNTOPT_DMAPI }, | ||
459 | { XFS_MOUNT_GRPID, "," MNTOPT_GRPID }, | ||
460 | { 0, NULL } | ||
461 | }; | ||
462 | static struct proc_xfs_info xfs_info_unset[] = { | ||
463 | /* the few simple ones we can get from the mount struct */ | ||
464 | { XFS_MOUNT_IDELETE, "," MNTOPT_IKEEP }, | ||
465 | { XFS_MOUNT_COMPAT_IOSIZE, "," MNTOPT_LARGEIO }, | ||
466 | { XFS_MOUNT_BARRIER, "," MNTOPT_NOBARRIER }, | ||
467 | { XFS_MOUNT_SMALL_INUMS, "," MNTOPT_64BITINODE }, | ||
468 | { 0, NULL } | ||
469 | }; | ||
470 | struct proc_xfs_info *xfs_infop; | ||
471 | |||
472 | for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) { | ||
473 | if (mp->m_flags & xfs_infop->flag) | ||
474 | seq_puts(m, xfs_infop->str); | ||
475 | } | ||
476 | for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) { | ||
477 | if (!(mp->m_flags & xfs_infop->flag)) | ||
478 | seq_puts(m, xfs_infop->str); | ||
479 | } | ||
480 | |||
481 | if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) | ||
482 | seq_printf(m, "," MNTOPT_ALLOCSIZE "=%dk", | ||
483 | (int)(1 << mp->m_writeio_log) >> 10); | ||
484 | |||
485 | if (mp->m_logbufs > 0) | ||
486 | seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs); | ||
487 | if (mp->m_logbsize > 0) | ||
488 | seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10); | ||
489 | |||
490 | if (mp->m_logname) | ||
491 | seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname); | ||
492 | if (mp->m_rtname) | ||
493 | seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname); | ||
494 | |||
495 | if (mp->m_dalign > 0) | ||
496 | seq_printf(m, "," MNTOPT_SUNIT "=%d", | ||
497 | (int)XFS_FSB_TO_BB(mp, mp->m_dalign)); | ||
498 | if (mp->m_swidth > 0) | ||
499 | seq_printf(m, "," MNTOPT_SWIDTH "=%d", | ||
500 | (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); | ||
501 | |||
502 | if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD)) | ||
503 | seq_puts(m, "," MNTOPT_USRQUOTA); | ||
504 | else if (mp->m_qflags & XFS_UQUOTA_ACCT) | ||
505 | seq_puts(m, "," MNTOPT_UQUOTANOENF); | ||
506 | |||
507 | if (mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD)) | ||
508 | seq_puts(m, "," MNTOPT_PRJQUOTA); | ||
509 | else if (mp->m_qflags & XFS_PQUOTA_ACCT) | ||
510 | seq_puts(m, "," MNTOPT_PQUOTANOENF); | ||
511 | |||
512 | if (mp->m_qflags & (XFS_GQUOTA_ACCT|XFS_OQUOTA_ENFD)) | ||
513 | seq_puts(m, "," MNTOPT_GRPQUOTA); | ||
514 | else if (mp->m_qflags & XFS_GQUOTA_ACCT) | ||
515 | seq_puts(m, "," MNTOPT_GQUOTANOENF); | ||
516 | |||
517 | if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT)) | ||
518 | seq_puts(m, "," MNTOPT_NOQUOTA); | ||
519 | |||
520 | return 0; | ||
521 | } | ||
90 | __uint64_t | 522 | __uint64_t |
91 | xfs_max_file_offset( | 523 | xfs_max_file_offset( |
92 | unsigned int blockshift) | 524 | unsigned int blockshift) |
@@ -137,7 +569,7 @@ xfs_set_inodeops( | |||
137 | break; | 569 | break; |
138 | case S_IFLNK: | 570 | case S_IFLNK: |
139 | inode->i_op = &xfs_symlink_inode_operations; | 571 | inode->i_op = &xfs_symlink_inode_operations; |
140 | if (inode->i_blocks) | 572 | if (!(XFS_I(inode)->i_df.if_flags & XFS_IFINLINE)) |
141 | inode->i_mapping->a_ops = &xfs_address_space_operations; | 573 | inode->i_mapping->a_ops = &xfs_address_space_operations; |
142 | break; | 574 | break; |
143 | default: | 575 | default: |
@@ -174,8 +606,6 @@ xfs_revalidate_inode( | |||
174 | 606 | ||
175 | inode->i_generation = ip->i_d.di_gen; | 607 | inode->i_generation = ip->i_d.di_gen; |
176 | i_size_write(inode, ip->i_d.di_size); | 608 | i_size_write(inode, ip->i_d.di_size); |
177 | inode->i_blocks = | ||
178 | XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks); | ||
179 | inode->i_atime.tv_sec = ip->i_d.di_atime.t_sec; | 609 | inode->i_atime.tv_sec = ip->i_d.di_atime.t_sec; |
180 | inode->i_atime.tv_nsec = ip->i_d.di_atime.t_nsec; | 610 | inode->i_atime.tv_nsec = ip->i_d.di_atime.t_nsec; |
181 | inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec; | 611 | inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec; |
@@ -334,6 +764,64 @@ xfs_blkdev_issue_flush( | |||
334 | blkdev_issue_flush(buftarg->bt_bdev, NULL); | 764 | blkdev_issue_flush(buftarg->bt_bdev, NULL); |
335 | } | 765 | } |
336 | 766 | ||
767 | /* | ||
768 | * XFS AIL push thread support | ||
769 | */ | ||
770 | void | ||
771 | xfsaild_wakeup( | ||
772 | xfs_mount_t *mp, | ||
773 | xfs_lsn_t threshold_lsn) | ||
774 | { | ||
775 | mp->m_ail.xa_target = threshold_lsn; | ||
776 | wake_up_process(mp->m_ail.xa_task); | ||
777 | } | ||
778 | |||
779 | int | ||
780 | xfsaild( | ||
781 | void *data) | ||
782 | { | ||
783 | xfs_mount_t *mp = (xfs_mount_t *)data; | ||
784 | xfs_lsn_t last_pushed_lsn = 0; | ||
785 | long tout = 0; | ||
786 | |||
787 | while (!kthread_should_stop()) { | ||
788 | if (tout) | ||
789 | schedule_timeout_interruptible(msecs_to_jiffies(tout)); | ||
790 | tout = 1000; | ||
791 | |||
792 | /* swsusp */ | ||
793 | try_to_freeze(); | ||
794 | |||
795 | ASSERT(mp->m_log); | ||
796 | if (XFS_FORCED_SHUTDOWN(mp)) | ||
797 | continue; | ||
798 | |||
799 | tout = xfsaild_push(mp, &last_pushed_lsn); | ||
800 | } | ||
801 | |||
802 | return 0; | ||
803 | } /* xfsaild */ | ||
804 | |||
805 | int | ||
806 | xfsaild_start( | ||
807 | xfs_mount_t *mp) | ||
808 | { | ||
809 | mp->m_ail.xa_target = 0; | ||
810 | mp->m_ail.xa_task = kthread_run(xfsaild, mp, "xfsaild"); | ||
811 | if (IS_ERR(mp->m_ail.xa_task)) | ||
812 | return -PTR_ERR(mp->m_ail.xa_task); | ||
813 | return 0; | ||
814 | } | ||
815 | |||
816 | void | ||
817 | xfsaild_stop( | ||
818 | xfs_mount_t *mp) | ||
819 | { | ||
820 | kthread_stop(mp->m_ail.xa_task); | ||
821 | } | ||
822 | |||
823 | |||
824 | |||
337 | STATIC struct inode * | 825 | STATIC struct inode * |
338 | xfs_fs_alloc_inode( | 826 | xfs_fs_alloc_inode( |
339 | struct super_block *sb) | 827 | struct super_block *sb) |
@@ -361,7 +849,7 @@ xfs_fs_inode_init_once( | |||
361 | inode_init_once(vn_to_inode((bhv_vnode_t *)vnode)); | 849 | inode_init_once(vn_to_inode((bhv_vnode_t *)vnode)); |
362 | } | 850 | } |
363 | 851 | ||
364 | STATIC int | 852 | STATIC int __init |
365 | xfs_init_zones(void) | 853 | xfs_init_zones(void) |
366 | { | 854 | { |
367 | xfs_vnode_zone = kmem_zone_init_flags(sizeof(bhv_vnode_t), "xfs_vnode", | 855 | xfs_vnode_zone = kmem_zone_init_flags(sizeof(bhv_vnode_t), "xfs_vnode", |
@@ -410,8 +898,7 @@ xfs_fs_write_inode( | |||
410 | { | 898 | { |
411 | int error = 0, flags = FLUSH_INODE; | 899 | int error = 0, flags = FLUSH_INODE; |
412 | 900 | ||
413 | vn_trace_entry(XFS_I(inode), __FUNCTION__, | 901 | xfs_itrace_entry(XFS_I(inode)); |
414 | (inst_t *)__return_address); | ||
415 | if (sync) { | 902 | if (sync) { |
416 | filemap_fdatawait(inode->i_mapping); | 903 | filemap_fdatawait(inode->i_mapping); |
417 | flags |= FLUSH_SYNC; | 904 | flags |= FLUSH_SYNC; |
@@ -438,8 +925,7 @@ xfs_fs_clear_inode( | |||
438 | * find an inode with di_mode == 0 but without IGET_CREATE set. | 925 | * find an inode with di_mode == 0 but without IGET_CREATE set. |
439 | */ | 926 | */ |
440 | if (ip) { | 927 | if (ip) { |
441 | vn_trace_entry(ip, __FUNCTION__, (inst_t *)__return_address); | 928 | xfs_itrace_entry(ip); |
442 | |||
443 | XFS_STATS_INC(vn_rele); | 929 | XFS_STATS_INC(vn_rele); |
444 | XFS_STATS_INC(vn_remove); | 930 | XFS_STATS_INC(vn_remove); |
445 | XFS_STATS_INC(vn_reclaim); | 931 | XFS_STATS_INC(vn_reclaim); |
@@ -683,8 +1169,44 @@ xfs_fs_statfs( | |||
683 | struct dentry *dentry, | 1169 | struct dentry *dentry, |
684 | struct kstatfs *statp) | 1170 | struct kstatfs *statp) |
685 | { | 1171 | { |
686 | return -xfs_statvfs(XFS_M(dentry->d_sb), statp, | 1172 | struct xfs_mount *mp = XFS_M(dentry->d_sb); |
687 | vn_from_inode(dentry->d_inode)); | 1173 | xfs_sb_t *sbp = &mp->m_sb; |
1174 | __uint64_t fakeinos, id; | ||
1175 | xfs_extlen_t lsize; | ||
1176 | |||
1177 | statp->f_type = XFS_SB_MAGIC; | ||
1178 | statp->f_namelen = MAXNAMELEN - 1; | ||
1179 | |||
1180 | id = huge_encode_dev(mp->m_ddev_targp->bt_dev); | ||
1181 | statp->f_fsid.val[0] = (u32)id; | ||
1182 | statp->f_fsid.val[1] = (u32)(id >> 32); | ||
1183 | |||
1184 | xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT); | ||
1185 | |||
1186 | spin_lock(&mp->m_sb_lock); | ||
1187 | statp->f_bsize = sbp->sb_blocksize; | ||
1188 | lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; | ||
1189 | statp->f_blocks = sbp->sb_dblocks - lsize; | ||
1190 | statp->f_bfree = statp->f_bavail = | ||
1191 | sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); | ||
1192 | fakeinos = statp->f_bfree << sbp->sb_inopblog; | ||
1193 | #if XFS_BIG_INUMS | ||
1194 | fakeinos += mp->m_inoadd; | ||
1195 | #endif | ||
1196 | statp->f_files = | ||
1197 | MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER); | ||
1198 | if (mp->m_maxicount) | ||
1199 | #if XFS_BIG_INUMS | ||
1200 | if (!mp->m_inoadd) | ||
1201 | #endif | ||
1202 | statp->f_files = min_t(typeof(statp->f_files), | ||
1203 | statp->f_files, | ||
1204 | mp->m_maxicount); | ||
1205 | statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree); | ||
1206 | spin_unlock(&mp->m_sb_lock); | ||
1207 | |||
1208 | XFS_QM_DQSTATVFS(XFS_I(dentry->d_inode), statp); | ||
1209 | return 0; | ||
688 | } | 1210 | } |
689 | 1211 | ||
690 | STATIC int | 1212 | STATIC int |
@@ -704,11 +1226,19 @@ xfs_fs_remount( | |||
704 | return -error; | 1226 | return -error; |
705 | } | 1227 | } |
706 | 1228 | ||
1229 | /* | ||
1230 | * Second stage of a freeze. The data is already frozen so we only | ||
1231 | * need to take care of themetadata. Once that's done write a dummy | ||
1232 | * record to dirty the log in case of a crash while frozen. | ||
1233 | */ | ||
707 | STATIC void | 1234 | STATIC void |
708 | xfs_fs_lockfs( | 1235 | xfs_fs_lockfs( |
709 | struct super_block *sb) | 1236 | struct super_block *sb) |
710 | { | 1237 | { |
711 | xfs_freeze(XFS_M(sb)); | 1238 | struct xfs_mount *mp = XFS_M(sb); |
1239 | |||
1240 | xfs_attr_quiesce(mp); | ||
1241 | xfs_fs_log_dummy(mp); | ||
712 | } | 1242 | } |
713 | 1243 | ||
714 | STATIC int | 1244 | STATIC int |
@@ -779,7 +1309,6 @@ xfs_fs_fill_super( | |||
779 | struct inode *rootvp; | 1309 | struct inode *rootvp; |
780 | struct xfs_mount *mp = NULL; | 1310 | struct xfs_mount *mp = NULL; |
781 | struct xfs_mount_args *args = xfs_args_allocate(sb, silent); | 1311 | struct xfs_mount_args *args = xfs_args_allocate(sb, silent); |
782 | struct kstatfs statvfs; | ||
783 | int error; | 1312 | int error; |
784 | 1313 | ||
785 | mp = xfs_mount_init(); | 1314 | mp = xfs_mount_init(); |
@@ -807,21 +1336,19 @@ xfs_fs_fill_super( | |||
807 | if (error) | 1336 | if (error) |
808 | goto fail_vfsop; | 1337 | goto fail_vfsop; |
809 | 1338 | ||
810 | error = xfs_statvfs(mp, &statvfs, NULL); | ||
811 | if (error) | ||
812 | goto fail_unmount; | ||
813 | |||
814 | sb->s_dirt = 1; | 1339 | sb->s_dirt = 1; |
815 | sb->s_magic = statvfs.f_type; | 1340 | sb->s_magic = XFS_SB_MAGIC; |
816 | sb->s_blocksize = statvfs.f_bsize; | 1341 | sb->s_blocksize = mp->m_sb.sb_blocksize; |
817 | sb->s_blocksize_bits = ffs(statvfs.f_bsize) - 1; | 1342 | sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; |
818 | sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits); | 1343 | sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits); |
819 | sb->s_time_gran = 1; | 1344 | sb->s_time_gran = 1; |
820 | set_posix_acl_flag(sb); | 1345 | set_posix_acl_flag(sb); |
821 | 1346 | ||
822 | error = xfs_root(mp, &rootvp); | 1347 | rootvp = igrab(mp->m_rootip->i_vnode); |
823 | if (error) | 1348 | if (!rootvp) { |
1349 | error = ENOENT; | ||
824 | goto fail_unmount; | 1350 | goto fail_unmount; |
1351 | } | ||
825 | 1352 | ||
826 | sb->s_root = d_alloc_root(vn_to_inode(rootvp)); | 1353 | sb->s_root = d_alloc_root(vn_to_inode(rootvp)); |
827 | if (!sb->s_root) { | 1354 | if (!sb->s_root) { |
@@ -841,8 +1368,7 @@ xfs_fs_fill_super( | |||
841 | goto fail_vnrele; | 1368 | goto fail_vnrele; |
842 | } | 1369 | } |
843 | 1370 | ||
844 | vn_trace_exit(XFS_I(sb->s_root->d_inode), __FUNCTION__, | 1371 | xfs_itrace_exit(XFS_I(sb->s_root->d_inode)); |
845 | (inst_t *)__return_address); | ||
846 | 1372 | ||
847 | kmem_free(args, sizeof(*args)); | 1373 | kmem_free(args, sizeof(*args)); |
848 | return 0; | 1374 | return 0; |
diff --git a/fs/xfs/linux-2.6/xfs_vnode.c b/fs/xfs/linux-2.6/xfs_vnode.c index 814169fd7e1e..bc7afe007338 100644 --- a/fs/xfs/linux-2.6/xfs_vnode.c +++ b/fs/xfs/linux-2.6/xfs_vnode.c | |||
@@ -40,7 +40,7 @@ | |||
40 | #define vptosync(v) (&vsync[((unsigned long)v) % NVSYNC]) | 40 | #define vptosync(v) (&vsync[((unsigned long)v) % NVSYNC]) |
41 | static wait_queue_head_t vsync[NVSYNC]; | 41 | static wait_queue_head_t vsync[NVSYNC]; |
42 | 42 | ||
43 | void | 43 | void __init |
44 | vn_init(void) | 44 | vn_init(void) |
45 | { | 45 | { |
46 | int i; | 46 | int i; |
@@ -82,84 +82,55 @@ vn_ioerror( | |||
82 | xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ, f, l); | 82 | xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ, f, l); |
83 | } | 83 | } |
84 | 84 | ||
85 | bhv_vnode_t * | ||
86 | vn_initialize( | ||
87 | struct inode *inode) | ||
88 | { | ||
89 | bhv_vnode_t *vp = vn_from_inode(inode); | ||
90 | |||
91 | XFS_STATS_INC(vn_active); | ||
92 | XFS_STATS_INC(vn_alloc); | ||
93 | |||
94 | ASSERT(VN_CACHED(vp) == 0); | ||
95 | |||
96 | return vp; | ||
97 | } | ||
98 | |||
99 | /* | 85 | /* |
100 | * Revalidate the Linux inode from the vattr. | 86 | * Revalidate the Linux inode from the XFS inode. |
101 | * Note: i_size _not_ updated; we must hold the inode | 87 | * Note: i_size _not_ updated; we must hold the inode |
102 | * semaphore when doing that - callers responsibility. | 88 | * semaphore when doing that - callers responsibility. |
103 | */ | 89 | */ |
104 | void | 90 | int |
105 | vn_revalidate_core( | 91 | vn_revalidate( |
106 | bhv_vnode_t *vp, | 92 | bhv_vnode_t *vp) |
107 | bhv_vattr_t *vap) | ||
108 | { | 93 | { |
109 | struct inode *inode = vn_to_inode(vp); | 94 | struct inode *inode = vn_to_inode(vp); |
110 | 95 | struct xfs_inode *ip = XFS_I(inode); | |
111 | inode->i_mode = vap->va_mode; | 96 | struct xfs_mount *mp = ip->i_mount; |
112 | inode->i_nlink = vap->va_nlink; | 97 | unsigned long xflags; |
113 | inode->i_uid = vap->va_uid; | 98 | |
114 | inode->i_gid = vap->va_gid; | 99 | xfs_itrace_entry(ip); |
115 | inode->i_blocks = vap->va_nblocks; | 100 | |
116 | inode->i_mtime = vap->va_mtime; | 101 | if (XFS_FORCED_SHUTDOWN(mp)) |
117 | inode->i_ctime = vap->va_ctime; | 102 | return -EIO; |
118 | if (vap->va_xflags & XFS_XFLAG_IMMUTABLE) | 103 | |
104 | xfs_ilock(ip, XFS_ILOCK_SHARED); | ||
105 | inode->i_mode = ip->i_d.di_mode; | ||
106 | inode->i_uid = ip->i_d.di_uid; | ||
107 | inode->i_gid = ip->i_d.di_gid; | ||
108 | inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec; | ||
109 | inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec; | ||
110 | inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec; | ||
111 | inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec; | ||
112 | |||
113 | xflags = xfs_ip2xflags(ip); | ||
114 | if (xflags & XFS_XFLAG_IMMUTABLE) | ||
119 | inode->i_flags |= S_IMMUTABLE; | 115 | inode->i_flags |= S_IMMUTABLE; |
120 | else | 116 | else |
121 | inode->i_flags &= ~S_IMMUTABLE; | 117 | inode->i_flags &= ~S_IMMUTABLE; |
122 | if (vap->va_xflags & XFS_XFLAG_APPEND) | 118 | if (xflags & XFS_XFLAG_APPEND) |
123 | inode->i_flags |= S_APPEND; | 119 | inode->i_flags |= S_APPEND; |
124 | else | 120 | else |
125 | inode->i_flags &= ~S_APPEND; | 121 | inode->i_flags &= ~S_APPEND; |
126 | if (vap->va_xflags & XFS_XFLAG_SYNC) | 122 | if (xflags & XFS_XFLAG_SYNC) |
127 | inode->i_flags |= S_SYNC; | 123 | inode->i_flags |= S_SYNC; |
128 | else | 124 | else |
129 | inode->i_flags &= ~S_SYNC; | 125 | inode->i_flags &= ~S_SYNC; |
130 | if (vap->va_xflags & XFS_XFLAG_NOATIME) | 126 | if (xflags & XFS_XFLAG_NOATIME) |
131 | inode->i_flags |= S_NOATIME; | 127 | inode->i_flags |= S_NOATIME; |
132 | else | 128 | else |
133 | inode->i_flags &= ~S_NOATIME; | 129 | inode->i_flags &= ~S_NOATIME; |
134 | } | 130 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
135 | |||
136 | /* | ||
137 | * Revalidate the Linux inode from the vnode. | ||
138 | */ | ||
139 | int | ||
140 | __vn_revalidate( | ||
141 | bhv_vnode_t *vp, | ||
142 | bhv_vattr_t *vattr) | ||
143 | { | ||
144 | int error; | ||
145 | |||
146 | vn_trace_entry(xfs_vtoi(vp), __FUNCTION__, (inst_t *)__return_address); | ||
147 | vattr->va_mask = XFS_AT_STAT | XFS_AT_XFLAGS; | ||
148 | error = xfs_getattr(xfs_vtoi(vp), vattr, 0); | ||
149 | if (likely(!error)) { | ||
150 | vn_revalidate_core(vp, vattr); | ||
151 | xfs_iflags_clear(xfs_vtoi(vp), XFS_IMODIFIED); | ||
152 | } | ||
153 | return -error; | ||
154 | } | ||
155 | |||
156 | int | ||
157 | vn_revalidate( | ||
158 | bhv_vnode_t *vp) | ||
159 | { | ||
160 | bhv_vattr_t vattr; | ||
161 | 131 | ||
162 | return __vn_revalidate(vp, &vattr); | 132 | xfs_iflags_clear(ip, XFS_IMODIFIED); |
133 | return 0; | ||
163 | } | 134 | } |
164 | 135 | ||
165 | /* | 136 | /* |
@@ -179,7 +150,7 @@ vn_hold( | |||
179 | return vp; | 150 | return vp; |
180 | } | 151 | } |
181 | 152 | ||
182 | #ifdef XFS_VNODE_TRACE | 153 | #ifdef XFS_INODE_TRACE |
183 | 154 | ||
184 | /* | 155 | /* |
185 | * Reference count of Linux inode if present, -1 if the xfs_inode | 156 | * Reference count of Linux inode if present, -1 if the xfs_inode |
@@ -211,32 +182,32 @@ static inline int xfs_icount(struct xfs_inode *ip) | |||
211 | * Vnode tracing code. | 182 | * Vnode tracing code. |
212 | */ | 183 | */ |
213 | void | 184 | void |
214 | vn_trace_entry(xfs_inode_t *ip, const char *func, inst_t *ra) | 185 | _xfs_itrace_entry(xfs_inode_t *ip, const char *func, inst_t *ra) |
215 | { | 186 | { |
216 | KTRACE_ENTER(ip, VNODE_KTRACE_ENTRY, func, 0, ra); | 187 | KTRACE_ENTER(ip, INODE_KTRACE_ENTRY, func, 0, ra); |
217 | } | 188 | } |
218 | 189 | ||
219 | void | 190 | void |
220 | vn_trace_exit(xfs_inode_t *ip, const char *func, inst_t *ra) | 191 | _xfs_itrace_exit(xfs_inode_t *ip, const char *func, inst_t *ra) |
221 | { | 192 | { |
222 | KTRACE_ENTER(ip, VNODE_KTRACE_EXIT, func, 0, ra); | 193 | KTRACE_ENTER(ip, INODE_KTRACE_EXIT, func, 0, ra); |
223 | } | 194 | } |
224 | 195 | ||
225 | void | 196 | void |
226 | vn_trace_hold(xfs_inode_t *ip, char *file, int line, inst_t *ra) | 197 | xfs_itrace_hold(xfs_inode_t *ip, char *file, int line, inst_t *ra) |
227 | { | 198 | { |
228 | KTRACE_ENTER(ip, VNODE_KTRACE_HOLD, file, line, ra); | 199 | KTRACE_ENTER(ip, INODE_KTRACE_HOLD, file, line, ra); |
229 | } | 200 | } |
230 | 201 | ||
231 | void | 202 | void |
232 | vn_trace_ref(xfs_inode_t *ip, char *file, int line, inst_t *ra) | 203 | _xfs_itrace_ref(xfs_inode_t *ip, char *file, int line, inst_t *ra) |
233 | { | 204 | { |
234 | KTRACE_ENTER(ip, VNODE_KTRACE_REF, file, line, ra); | 205 | KTRACE_ENTER(ip, INODE_KTRACE_REF, file, line, ra); |
235 | } | 206 | } |
236 | 207 | ||
237 | void | 208 | void |
238 | vn_trace_rele(xfs_inode_t *ip, char *file, int line, inst_t *ra) | 209 | xfs_itrace_rele(xfs_inode_t *ip, char *file, int line, inst_t *ra) |
239 | { | 210 | { |
240 | KTRACE_ENTER(ip, VNODE_KTRACE_RELE, file, line, ra); | 211 | KTRACE_ENTER(ip, INODE_KTRACE_RELE, file, line, ra); |
241 | } | 212 | } |
242 | #endif /* XFS_VNODE_TRACE */ | 213 | #endif /* XFS_INODE_TRACE */ |
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h index 55fb46948589..b5ea418693b1 100644 --- a/fs/xfs/linux-2.6/xfs_vnode.h +++ b/fs/xfs/linux-2.6/xfs_vnode.h | |||
@@ -187,10 +187,7 @@ typedef struct bhv_vattr { | |||
187 | (VN_ISREG(vp) && ((mode) & (VSGID|(VEXEC>>3))) == VSGID) | 187 | (VN_ISREG(vp) && ((mode) & (VSGID|(VEXEC>>3))) == VSGID) |
188 | 188 | ||
189 | extern void vn_init(void); | 189 | extern void vn_init(void); |
190 | extern bhv_vnode_t *vn_initialize(struct inode *); | ||
191 | extern int vn_revalidate(bhv_vnode_t *); | 190 | extern int vn_revalidate(bhv_vnode_t *); |
192 | extern int __vn_revalidate(bhv_vnode_t *, bhv_vattr_t *); | ||
193 | extern void vn_revalidate_core(bhv_vnode_t *, bhv_vattr_t *); | ||
194 | 191 | ||
195 | /* | 192 | /* |
196 | * Yeah, these don't take vnode anymore at all, all this should be | 193 | * Yeah, these don't take vnode anymore at all, all this should be |
@@ -210,12 +207,12 @@ static inline int vn_count(bhv_vnode_t *vp) | |||
210 | */ | 207 | */ |
211 | extern bhv_vnode_t *vn_hold(bhv_vnode_t *); | 208 | extern bhv_vnode_t *vn_hold(bhv_vnode_t *); |
212 | 209 | ||
213 | #if defined(XFS_VNODE_TRACE) | 210 | #if defined(XFS_INODE_TRACE) |
214 | #define VN_HOLD(vp) \ | 211 | #define VN_HOLD(vp) \ |
215 | ((void)vn_hold(vp), \ | 212 | ((void)vn_hold(vp), \ |
216 | vn_trace_hold(xfs_vtoi(vp), __FILE__, __LINE__, (inst_t *)__return_address)) | 213 | xfs_itrace_hold(xfs_vtoi(vp), __FILE__, __LINE__, (inst_t *)__return_address)) |
217 | #define VN_RELE(vp) \ | 214 | #define VN_RELE(vp) \ |
218 | (vn_trace_rele(xfs_vtoi(vp), __FILE__, __LINE__, (inst_t *)__return_address), \ | 215 | (xfs_itrace_rele(xfs_vtoi(vp), __FILE__, __LINE__, (inst_t *)__return_address), \ |
219 | iput(vn_to_inode(vp))) | 216 | iput(vn_to_inode(vp))) |
220 | #else | 217 | #else |
221 | #define VN_HOLD(vp) ((void)vn_hold(vp)) | 218 | #define VN_HOLD(vp) ((void)vn_hold(vp)) |
@@ -238,11 +235,6 @@ static inline bhv_vnode_t *vn_grab(bhv_vnode_t *vp) | |||
238 | /* | 235 | /* |
239 | * Dealing with bad inodes | 236 | * Dealing with bad inodes |
240 | */ | 237 | */ |
241 | static inline void vn_mark_bad(bhv_vnode_t *vp) | ||
242 | { | ||
243 | make_bad_inode(vn_to_inode(vp)); | ||
244 | } | ||
245 | |||
246 | static inline int VN_BAD(bhv_vnode_t *vp) | 238 | static inline int VN_BAD(bhv_vnode_t *vp) |
247 | { | 239 | { |
248 | return is_bad_inode(vn_to_inode(vp)); | 240 | return is_bad_inode(vn_to_inode(vp)); |
@@ -296,26 +288,36 @@ static inline void vn_atime_to_time_t(bhv_vnode_t *vp, time_t *tt) | |||
296 | /* | 288 | /* |
297 | * Tracking vnode activity. | 289 | * Tracking vnode activity. |
298 | */ | 290 | */ |
299 | #if defined(XFS_VNODE_TRACE) | 291 | #if defined(XFS_INODE_TRACE) |
300 | 292 | ||
301 | #define VNODE_TRACE_SIZE 16 /* number of trace entries */ | 293 | #define INODE_TRACE_SIZE 16 /* number of trace entries */ |
302 | #define VNODE_KTRACE_ENTRY 1 | 294 | #define INODE_KTRACE_ENTRY 1 |
303 | #define VNODE_KTRACE_EXIT 2 | 295 | #define INODE_KTRACE_EXIT 2 |
304 | #define VNODE_KTRACE_HOLD 3 | 296 | #define INODE_KTRACE_HOLD 3 |
305 | #define VNODE_KTRACE_REF 4 | 297 | #define INODE_KTRACE_REF 4 |
306 | #define VNODE_KTRACE_RELE 5 | 298 | #define INODE_KTRACE_RELE 5 |
307 | 299 | ||
308 | extern void vn_trace_entry(struct xfs_inode *, const char *, inst_t *); | 300 | extern void _xfs_itrace_entry(struct xfs_inode *, const char *, inst_t *); |
309 | extern void vn_trace_exit(struct xfs_inode *, const char *, inst_t *); | 301 | extern void _xfs_itrace_exit(struct xfs_inode *, const char *, inst_t *); |
310 | extern void vn_trace_hold(struct xfs_inode *, char *, int, inst_t *); | 302 | extern void xfs_itrace_hold(struct xfs_inode *, char *, int, inst_t *); |
311 | extern void vn_trace_ref(struct xfs_inode *, char *, int, inst_t *); | 303 | extern void _xfs_itrace_ref(struct xfs_inode *, char *, int, inst_t *); |
312 | extern void vn_trace_rele(struct xfs_inode *, char *, int, inst_t *); | 304 | extern void xfs_itrace_rele(struct xfs_inode *, char *, int, inst_t *); |
305 | #define xfs_itrace_entry(ip) \ | ||
306 | _xfs_itrace_entry(ip, __FUNCTION__, (inst_t *)__return_address) | ||
307 | #define xfs_itrace_exit(ip) \ | ||
308 | _xfs_itrace_exit(ip, __FUNCTION__, (inst_t *)__return_address) | ||
309 | #define xfs_itrace_exit_tag(ip, tag) \ | ||
310 | _xfs_itrace_exit(ip, tag, (inst_t *)__return_address) | ||
311 | #define xfs_itrace_ref(ip) \ | ||
312 | _xfs_itrace_ref(ip, __FILE__, __LINE__, (inst_t *)__return_address) | ||
313 | |||
313 | #else | 314 | #else |
314 | #define vn_trace_entry(a,b,c) | 315 | #define xfs_itrace_entry(a) |
315 | #define vn_trace_exit(a,b,c) | 316 | #define xfs_itrace_exit(a) |
316 | #define vn_trace_hold(a,b,c,d) | 317 | #define xfs_itrace_exit_tag(a, b) |
317 | #define vn_trace_ref(a,b,c,d) | 318 | #define xfs_itrace_hold(a, b, c, d) |
318 | #define vn_trace_rele(a,b,c,d) | 319 | #define xfs_itrace_ref(a) |
320 | #define xfs_itrace_rele(a, b, c, d) | ||
319 | #endif | 321 | #endif |
320 | 322 | ||
321 | #endif /* __XFS_VNODE_H__ */ | 323 | #endif /* __XFS_VNODE_H__ */ |
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c index cfdd35ee9f7a..665babcca6a6 100644 --- a/fs/xfs/quota/xfs_dquot.c +++ b/fs/xfs/quota/xfs_dquot.c | |||
@@ -1209,7 +1209,6 @@ xfs_qm_dqflush( | |||
1209 | xfs_buf_t *bp; | 1209 | xfs_buf_t *bp; |
1210 | xfs_disk_dquot_t *ddqp; | 1210 | xfs_disk_dquot_t *ddqp; |
1211 | int error; | 1211 | int error; |
1212 | SPLDECL(s); | ||
1213 | 1212 | ||
1214 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); | 1213 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); |
1215 | ASSERT(XFS_DQ_IS_FLUSH_LOCKED(dqp)); | 1214 | ASSERT(XFS_DQ_IS_FLUSH_LOCKED(dqp)); |
@@ -1270,9 +1269,9 @@ xfs_qm_dqflush( | |||
1270 | mp = dqp->q_mount; | 1269 | mp = dqp->q_mount; |
1271 | 1270 | ||
1272 | /* lsn is 64 bits */ | 1271 | /* lsn is 64 bits */ |
1273 | AIL_LOCK(mp, s); | 1272 | spin_lock(&mp->m_ail_lock); |
1274 | dqp->q_logitem.qli_flush_lsn = dqp->q_logitem.qli_item.li_lsn; | 1273 | dqp->q_logitem.qli_flush_lsn = dqp->q_logitem.qli_item.li_lsn; |
1275 | AIL_UNLOCK(mp, s); | 1274 | spin_unlock(&mp->m_ail_lock); |
1276 | 1275 | ||
1277 | /* | 1276 | /* |
1278 | * Attach an iodone routine so that we can remove this dquot from the | 1277 | * Attach an iodone routine so that we can remove this dquot from the |
@@ -1318,7 +1317,6 @@ xfs_qm_dqflush_done( | |||
1318 | xfs_dq_logitem_t *qip) | 1317 | xfs_dq_logitem_t *qip) |
1319 | { | 1318 | { |
1320 | xfs_dquot_t *dqp; | 1319 | xfs_dquot_t *dqp; |
1321 | SPLDECL(s); | ||
1322 | 1320 | ||
1323 | dqp = qip->qli_dquot; | 1321 | dqp = qip->qli_dquot; |
1324 | 1322 | ||
@@ -1333,15 +1331,15 @@ xfs_qm_dqflush_done( | |||
1333 | if ((qip->qli_item.li_flags & XFS_LI_IN_AIL) && | 1331 | if ((qip->qli_item.li_flags & XFS_LI_IN_AIL) && |
1334 | qip->qli_item.li_lsn == qip->qli_flush_lsn) { | 1332 | qip->qli_item.li_lsn == qip->qli_flush_lsn) { |
1335 | 1333 | ||
1336 | AIL_LOCK(dqp->q_mount, s); | 1334 | spin_lock(&dqp->q_mount->m_ail_lock); |
1337 | /* | 1335 | /* |
1338 | * xfs_trans_delete_ail() drops the AIL lock. | 1336 | * xfs_trans_delete_ail() drops the AIL lock. |
1339 | */ | 1337 | */ |
1340 | if (qip->qli_item.li_lsn == qip->qli_flush_lsn) | 1338 | if (qip->qli_item.li_lsn == qip->qli_flush_lsn) |
1341 | xfs_trans_delete_ail(dqp->q_mount, | 1339 | xfs_trans_delete_ail(dqp->q_mount, |
1342 | (xfs_log_item_t*)qip, s); | 1340 | (xfs_log_item_t*)qip); |
1343 | else | 1341 | else |
1344 | AIL_UNLOCK(dqp->q_mount, s); | 1342 | spin_unlock(&dqp->q_mount->m_ail_lock); |
1345 | } | 1343 | } |
1346 | 1344 | ||
1347 | /* | 1345 | /* |
diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h index 78d3ab95c5fd..5c371a92e3e2 100644 --- a/fs/xfs/quota/xfs_dquot.h +++ b/fs/xfs/quota/xfs_dquot.h | |||
@@ -123,11 +123,6 @@ XFS_DQ_IS_LOCKED(xfs_dquot_t *dqp) | |||
123 | vsema(&((dqp)->q_flock)); \ | 123 | vsema(&((dqp)->q_flock)); \ |
124 | (dqp)->dq_flags &= ~(XFS_DQ_FLOCKED); } | 124 | (dqp)->dq_flags &= ~(XFS_DQ_FLOCKED); } |
125 | 125 | ||
126 | #define XFS_DQ_PINLOCK(dqp) mutex_spinlock( \ | ||
127 | &(XFS_DQ_TO_QINF(dqp)->qi_pinlock)) | ||
128 | #define XFS_DQ_PINUNLOCK(dqp, s) mutex_spinunlock( \ | ||
129 | &(XFS_DQ_TO_QINF(dqp)->qi_pinlock), s) | ||
130 | |||
131 | #define XFS_DQ_IS_FLUSH_LOCKED(dqp) (issemalocked(&((dqp)->q_flock))) | 126 | #define XFS_DQ_IS_FLUSH_LOCKED(dqp) (issemalocked(&((dqp)->q_flock))) |
132 | #define XFS_DQ_IS_ON_FREELIST(dqp) ((dqp)->dq_flnext != (dqp)) | 127 | #define XFS_DQ_IS_ON_FREELIST(dqp) ((dqp)->dq_flnext != (dqp)) |
133 | #define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY) | 128 | #define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY) |
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c index ddb61fe22a5c..1800e8d1f646 100644 --- a/fs/xfs/quota/xfs_dquot_item.c +++ b/fs/xfs/quota/xfs_dquot_item.c | |||
@@ -94,14 +94,13 @@ STATIC void | |||
94 | xfs_qm_dquot_logitem_pin( | 94 | xfs_qm_dquot_logitem_pin( |
95 | xfs_dq_logitem_t *logitem) | 95 | xfs_dq_logitem_t *logitem) |
96 | { | 96 | { |
97 | unsigned long s; | ||
98 | xfs_dquot_t *dqp; | 97 | xfs_dquot_t *dqp; |
99 | 98 | ||
100 | dqp = logitem->qli_dquot; | 99 | dqp = logitem->qli_dquot; |
101 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); | 100 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); |
102 | s = XFS_DQ_PINLOCK(dqp); | 101 | spin_lock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock)); |
103 | dqp->q_pincount++; | 102 | dqp->q_pincount++; |
104 | XFS_DQ_PINUNLOCK(dqp, s); | 103 | spin_unlock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock)); |
105 | } | 104 | } |
106 | 105 | ||
107 | /* | 106 | /* |
@@ -115,17 +114,16 @@ xfs_qm_dquot_logitem_unpin( | |||
115 | xfs_dq_logitem_t *logitem, | 114 | xfs_dq_logitem_t *logitem, |
116 | int stale) | 115 | int stale) |
117 | { | 116 | { |
118 | unsigned long s; | ||
119 | xfs_dquot_t *dqp; | 117 | xfs_dquot_t *dqp; |
120 | 118 | ||
121 | dqp = logitem->qli_dquot; | 119 | dqp = logitem->qli_dquot; |
122 | ASSERT(dqp->q_pincount > 0); | 120 | ASSERT(dqp->q_pincount > 0); |
123 | s = XFS_DQ_PINLOCK(dqp); | 121 | spin_lock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock)); |
124 | dqp->q_pincount--; | 122 | dqp->q_pincount--; |
125 | if (dqp->q_pincount == 0) { | 123 | if (dqp->q_pincount == 0) { |
126 | sv_broadcast(&dqp->q_pinwait); | 124 | sv_broadcast(&dqp->q_pinwait); |
127 | } | 125 | } |
128 | XFS_DQ_PINUNLOCK(dqp, s); | 126 | spin_unlock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock)); |
129 | } | 127 | } |
130 | 128 | ||
131 | /* ARGSUSED */ | 129 | /* ARGSUSED */ |
@@ -189,8 +187,6 @@ void | |||
189 | xfs_qm_dqunpin_wait( | 187 | xfs_qm_dqunpin_wait( |
190 | xfs_dquot_t *dqp) | 188 | xfs_dquot_t *dqp) |
191 | { | 189 | { |
192 | SPLDECL(s); | ||
193 | |||
194 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); | 190 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); |
195 | if (dqp->q_pincount == 0) { | 191 | if (dqp->q_pincount == 0) { |
196 | return; | 192 | return; |
@@ -200,9 +196,9 @@ xfs_qm_dqunpin_wait( | |||
200 | * Give the log a push so we don't wait here too long. | 196 | * Give the log a push so we don't wait here too long. |
201 | */ | 197 | */ |
202 | xfs_log_force(dqp->q_mount, (xfs_lsn_t)0, XFS_LOG_FORCE); | 198 | xfs_log_force(dqp->q_mount, (xfs_lsn_t)0, XFS_LOG_FORCE); |
203 | s = XFS_DQ_PINLOCK(dqp); | 199 | spin_lock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock)); |
204 | if (dqp->q_pincount == 0) { | 200 | if (dqp->q_pincount == 0) { |
205 | XFS_DQ_PINUNLOCK(dqp, s); | 201 | spin_unlock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock)); |
206 | return; | 202 | return; |
207 | } | 203 | } |
208 | sv_wait(&(dqp->q_pinwait), PINOD, | 204 | sv_wait(&(dqp->q_pinwait), PINOD, |
@@ -216,8 +212,8 @@ xfs_qm_dqunpin_wait( | |||
216 | * If so, we want to push it out to help us take this item off the AIL as soon | 212 | * If so, we want to push it out to help us take this item off the AIL as soon |
217 | * as possible. | 213 | * as possible. |
218 | * | 214 | * |
219 | * We must not be holding the AIL_LOCK at this point. Calling incore() to | 215 | * We must not be holding the AIL lock at this point. Calling incore() to |
220 | * search the buffer cache can be a time consuming thing, and AIL_LOCK is a | 216 | * search the buffer cache can be a time consuming thing, and AIL lock is a |
221 | * spinlock. | 217 | * spinlock. |
222 | */ | 218 | */ |
223 | STATIC void | 219 | STATIC void |
@@ -322,7 +318,7 @@ xfs_qm_dquot_logitem_trylock( | |||
322 | * want to do that now since we might sleep in the device | 318 | * want to do that now since we might sleep in the device |
323 | * strategy routine. We also don't want to grab the buffer lock | 319 | * strategy routine. We also don't want to grab the buffer lock |
324 | * here because we'd like not to call into the buffer cache | 320 | * here because we'd like not to call into the buffer cache |
325 | * while holding the AIL_LOCK. | 321 | * while holding the AIL lock. |
326 | * Make sure to only return PUSHBUF if we set pushbuf_flag | 322 | * Make sure to only return PUSHBUF if we set pushbuf_flag |
327 | * ourselves. If someone else is doing it then we don't | 323 | * ourselves. If someone else is doing it then we don't |
328 | * want to go to the push routine and duplicate their efforts. | 324 | * want to go to the push routine and duplicate their efforts. |
@@ -562,15 +558,14 @@ xfs_qm_qoffend_logitem_committed( | |||
562 | xfs_lsn_t lsn) | 558 | xfs_lsn_t lsn) |
563 | { | 559 | { |
564 | xfs_qoff_logitem_t *qfs; | 560 | xfs_qoff_logitem_t *qfs; |
565 | SPLDECL(s); | ||
566 | 561 | ||
567 | qfs = qfe->qql_start_lip; | 562 | qfs = qfe->qql_start_lip; |
568 | AIL_LOCK(qfs->qql_item.li_mountp,s); | 563 | spin_lock(&qfs->qql_item.li_mountp->m_ail_lock); |
569 | /* | 564 | /* |
570 | * Delete the qoff-start logitem from the AIL. | 565 | * Delete the qoff-start logitem from the AIL. |
571 | * xfs_trans_delete_ail() drops the AIL lock. | 566 | * xfs_trans_delete_ail() drops the AIL lock. |
572 | */ | 567 | */ |
573 | xfs_trans_delete_ail(qfs->qql_item.li_mountp, (xfs_log_item_t *)qfs, s); | 568 | xfs_trans_delete_ail(qfs->qql_item.li_mountp, (xfs_log_item_t *)qfs); |
574 | kmem_free(qfs, sizeof(xfs_qoff_logitem_t)); | 569 | kmem_free(qfs, sizeof(xfs_qoff_logitem_t)); |
575 | kmem_free(qfe, sizeof(xfs_qoff_logitem_t)); | 570 | kmem_free(qfe, sizeof(xfs_qoff_logitem_t)); |
576 | return (xfs_lsn_t)-1; | 571 | return (xfs_lsn_t)-1; |
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c index d488645f833d..1f3da5b8657b 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/quota/xfs_qm.c | |||
@@ -310,7 +310,6 @@ xfs_qm_mount_quotas( | |||
310 | xfs_mount_t *mp, | 310 | xfs_mount_t *mp, |
311 | int mfsi_flags) | 311 | int mfsi_flags) |
312 | { | 312 | { |
313 | unsigned long s; | ||
314 | int error = 0; | 313 | int error = 0; |
315 | uint sbf; | 314 | uint sbf; |
316 | 315 | ||
@@ -367,13 +366,13 @@ xfs_qm_mount_quotas( | |||
367 | 366 | ||
368 | write_changes: | 367 | write_changes: |
369 | /* | 368 | /* |
370 | * We actually don't have to acquire the SB_LOCK at all. | 369 | * We actually don't have to acquire the m_sb_lock at all. |
371 | * This can only be called from mount, and that's single threaded. XXX | 370 | * This can only be called from mount, and that's single threaded. XXX |
372 | */ | 371 | */ |
373 | s = XFS_SB_LOCK(mp); | 372 | spin_lock(&mp->m_sb_lock); |
374 | sbf = mp->m_sb.sb_qflags; | 373 | sbf = mp->m_sb.sb_qflags; |
375 | mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL; | 374 | mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL; |
376 | XFS_SB_UNLOCK(mp, s); | 375 | spin_unlock(&mp->m_sb_lock); |
377 | 376 | ||
378 | if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) { | 377 | if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) { |
379 | if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) { | 378 | if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) { |
@@ -1139,7 +1138,7 @@ xfs_qm_init_quotainfo( | |||
1139 | return error; | 1138 | return error; |
1140 | } | 1139 | } |
1141 | 1140 | ||
1142 | spinlock_init(&qinf->qi_pinlock, "xfs_qinf_pin"); | 1141 | spin_lock_init(&qinf->qi_pinlock); |
1143 | xfs_qm_list_init(&qinf->qi_dqlist, "mpdqlist", 0); | 1142 | xfs_qm_list_init(&qinf->qi_dqlist, "mpdqlist", 0); |
1144 | qinf->qi_dqreclaims = 0; | 1143 | qinf->qi_dqreclaims = 0; |
1145 | 1144 | ||
@@ -1370,7 +1369,6 @@ xfs_qm_qino_alloc( | |||
1370 | { | 1369 | { |
1371 | xfs_trans_t *tp; | 1370 | xfs_trans_t *tp; |
1372 | int error; | 1371 | int error; |
1373 | unsigned long s; | ||
1374 | int committed; | 1372 | int committed; |
1375 | 1373 | ||
1376 | tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE); | 1374 | tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE); |
@@ -1402,7 +1400,7 @@ xfs_qm_qino_alloc( | |||
1402 | * sbfields arg may contain fields other than *QUOTINO; | 1400 | * sbfields arg may contain fields other than *QUOTINO; |
1403 | * VERSIONNUM for example. | 1401 | * VERSIONNUM for example. |
1404 | */ | 1402 | */ |
1405 | s = XFS_SB_LOCK(mp); | 1403 | spin_lock(&mp->m_sb_lock); |
1406 | if (flags & XFS_QMOPT_SBVERSION) { | 1404 | if (flags & XFS_QMOPT_SBVERSION) { |
1407 | #if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) | 1405 | #if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) |
1408 | unsigned oldv = mp->m_sb.sb_versionnum; | 1406 | unsigned oldv = mp->m_sb.sb_versionnum; |
@@ -1429,7 +1427,7 @@ xfs_qm_qino_alloc( | |||
1429 | mp->m_sb.sb_uquotino = (*ip)->i_ino; | 1427 | mp->m_sb.sb_uquotino = (*ip)->i_ino; |
1430 | else | 1428 | else |
1431 | mp->m_sb.sb_gquotino = (*ip)->i_ino; | 1429 | mp->m_sb.sb_gquotino = (*ip)->i_ino; |
1432 | XFS_SB_UNLOCK(mp, s); | 1430 | spin_unlock(&mp->m_sb_lock); |
1433 | xfs_mod_sb(tp, sbfields); | 1431 | xfs_mod_sb(tp, sbfields); |
1434 | 1432 | ||
1435 | if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) { | 1433 | if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) { |
@@ -1650,14 +1648,14 @@ xfs_qm_quotacheck_dqadjust( | |||
1650 | * Adjust the inode count and the block count to reflect this inode's | 1648 | * Adjust the inode count and the block count to reflect this inode's |
1651 | * resource usage. | 1649 | * resource usage. |
1652 | */ | 1650 | */ |
1653 | be64_add(&dqp->q_core.d_icount, 1); | 1651 | be64_add_cpu(&dqp->q_core.d_icount, 1); |
1654 | dqp->q_res_icount++; | 1652 | dqp->q_res_icount++; |
1655 | if (nblks) { | 1653 | if (nblks) { |
1656 | be64_add(&dqp->q_core.d_bcount, nblks); | 1654 | be64_add_cpu(&dqp->q_core.d_bcount, nblks); |
1657 | dqp->q_res_bcount += nblks; | 1655 | dqp->q_res_bcount += nblks; |
1658 | } | 1656 | } |
1659 | if (rtblks) { | 1657 | if (rtblks) { |
1660 | be64_add(&dqp->q_core.d_rtbcount, rtblks); | 1658 | be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks); |
1661 | dqp->q_res_rtbcount += rtblks; | 1659 | dqp->q_res_rtbcount += rtblks; |
1662 | } | 1660 | } |
1663 | 1661 | ||
diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h index 23ccaa5fceaf..baf537c1c177 100644 --- a/fs/xfs/quota/xfs_qm.h +++ b/fs/xfs/quota/xfs_qm.h | |||
@@ -52,8 +52,8 @@ extern kmem_zone_t *qm_dqtrxzone; | |||
52 | /* | 52 | /* |
53 | * Dquot hashtable constants/threshold values. | 53 | * Dquot hashtable constants/threshold values. |
54 | */ | 54 | */ |
55 | #define XFS_QM_HASHSIZE_LOW (NBPP / sizeof(xfs_dqhash_t)) | 55 | #define XFS_QM_HASHSIZE_LOW (PAGE_SIZE / sizeof(xfs_dqhash_t)) |
56 | #define XFS_QM_HASHSIZE_HIGH ((NBPP * 4) / sizeof(xfs_dqhash_t)) | 56 | #define XFS_QM_HASHSIZE_HIGH ((PAGE_SIZE * 4) / sizeof(xfs_dqhash_t)) |
57 | 57 | ||
58 | /* | 58 | /* |
59 | * This defines the unit of allocation of dquots. | 59 | * This defines the unit of allocation of dquots. |
@@ -106,7 +106,7 @@ typedef struct xfs_qm { | |||
106 | typedef struct xfs_quotainfo { | 106 | typedef struct xfs_quotainfo { |
107 | xfs_inode_t *qi_uquotaip; /* user quota inode */ | 107 | xfs_inode_t *qi_uquotaip; /* user quota inode */ |
108 | xfs_inode_t *qi_gquotaip; /* group quota inode */ | 108 | xfs_inode_t *qi_gquotaip; /* group quota inode */ |
109 | lock_t qi_pinlock; /* dquot pinning mutex */ | 109 | spinlock_t qi_pinlock; /* dquot pinning lock */ |
110 | xfs_dqlist_t qi_dqlist; /* all dquots in filesys */ | 110 | xfs_dqlist_t qi_dqlist; /* all dquots in filesys */ |
111 | int qi_dqreclaims; /* a change here indicates | 111 | int qi_dqreclaims; /* a change here indicates |
112 | a removal in the dqlist */ | 112 | a removal in the dqlist */ |
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c index ad5579d4eac4..2cc5886cfe85 100644 --- a/fs/xfs/quota/xfs_qm_syscalls.c +++ b/fs/xfs/quota/xfs_qm_syscalls.c | |||
@@ -200,7 +200,6 @@ xfs_qm_scall_quotaoff( | |||
200 | boolean_t force) | 200 | boolean_t force) |
201 | { | 201 | { |
202 | uint dqtype; | 202 | uint dqtype; |
203 | unsigned long s; | ||
204 | int error; | 203 | int error; |
205 | uint inactivate_flags; | 204 | uint inactivate_flags; |
206 | xfs_qoff_logitem_t *qoffstart; | 205 | xfs_qoff_logitem_t *qoffstart; |
@@ -237,9 +236,9 @@ xfs_qm_scall_quotaoff( | |||
237 | if ((flags & XFS_ALL_QUOTA_ACCT) == 0) { | 236 | if ((flags & XFS_ALL_QUOTA_ACCT) == 0) { |
238 | mp->m_qflags &= ~(flags); | 237 | mp->m_qflags &= ~(flags); |
239 | 238 | ||
240 | s = XFS_SB_LOCK(mp); | 239 | spin_lock(&mp->m_sb_lock); |
241 | mp->m_sb.sb_qflags = mp->m_qflags; | 240 | mp->m_sb.sb_qflags = mp->m_qflags; |
242 | XFS_SB_UNLOCK(mp, s); | 241 | spin_unlock(&mp->m_sb_lock); |
243 | mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); | 242 | mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); |
244 | 243 | ||
245 | /* XXX what to do if error ? Revert back to old vals incore ? */ | 244 | /* XXX what to do if error ? Revert back to old vals incore ? */ |
@@ -415,7 +414,6 @@ xfs_qm_scall_quotaon( | |||
415 | uint flags) | 414 | uint flags) |
416 | { | 415 | { |
417 | int error; | 416 | int error; |
418 | unsigned long s; | ||
419 | uint qf; | 417 | uint qf; |
420 | uint accflags; | 418 | uint accflags; |
421 | __int64_t sbflags; | 419 | __int64_t sbflags; |
@@ -468,10 +466,10 @@ xfs_qm_scall_quotaon( | |||
468 | * Change sb_qflags on disk but not incore mp->qflags | 466 | * Change sb_qflags on disk but not incore mp->qflags |
469 | * if this is the root filesystem. | 467 | * if this is the root filesystem. |
470 | */ | 468 | */ |
471 | s = XFS_SB_LOCK(mp); | 469 | spin_lock(&mp->m_sb_lock); |
472 | qf = mp->m_sb.sb_qflags; | 470 | qf = mp->m_sb.sb_qflags; |
473 | mp->m_sb.sb_qflags = qf | flags; | 471 | mp->m_sb.sb_qflags = qf | flags; |
474 | XFS_SB_UNLOCK(mp, s); | 472 | spin_unlock(&mp->m_sb_lock); |
475 | 473 | ||
476 | /* | 474 | /* |
477 | * There's nothing to change if it's the same. | 475 | * There's nothing to change if it's the same. |
@@ -815,7 +813,6 @@ xfs_qm_log_quotaoff( | |||
815 | { | 813 | { |
816 | xfs_trans_t *tp; | 814 | xfs_trans_t *tp; |
817 | int error; | 815 | int error; |
818 | unsigned long s; | ||
819 | xfs_qoff_logitem_t *qoffi=NULL; | 816 | xfs_qoff_logitem_t *qoffi=NULL; |
820 | uint oldsbqflag=0; | 817 | uint oldsbqflag=0; |
821 | 818 | ||
@@ -832,10 +829,10 @@ xfs_qm_log_quotaoff( | |||
832 | qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT); | 829 | qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT); |
833 | xfs_trans_log_quotaoff_item(tp, qoffi); | 830 | xfs_trans_log_quotaoff_item(tp, qoffi); |
834 | 831 | ||
835 | s = XFS_SB_LOCK(mp); | 832 | spin_lock(&mp->m_sb_lock); |
836 | oldsbqflag = mp->m_sb.sb_qflags; | 833 | oldsbqflag = mp->m_sb.sb_qflags; |
837 | mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL; | 834 | mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL; |
838 | XFS_SB_UNLOCK(mp, s); | 835 | spin_unlock(&mp->m_sb_lock); |
839 | 836 | ||
840 | xfs_mod_sb(tp, XFS_SB_QFLAGS); | 837 | xfs_mod_sb(tp, XFS_SB_QFLAGS); |
841 | 838 | ||
@@ -854,9 +851,9 @@ error0: | |||
854 | * No one else is modifying sb_qflags, so this is OK. | 851 | * No one else is modifying sb_qflags, so this is OK. |
855 | * We still hold the quotaofflock. | 852 | * We still hold the quotaofflock. |
856 | */ | 853 | */ |
857 | s = XFS_SB_LOCK(mp); | 854 | spin_lock(&mp->m_sb_lock); |
858 | mp->m_sb.sb_qflags = oldsbqflag; | 855 | mp->m_sb.sb_qflags = oldsbqflag; |
859 | XFS_SB_UNLOCK(mp, s); | 856 | spin_unlock(&mp->m_sb_lock); |
860 | } | 857 | } |
861 | *qoffstartp = qoffi; | 858 | *qoffstartp = qoffi; |
862 | return (error); | 859 | return (error); |
diff --git a/fs/xfs/quota/xfs_trans_dquot.c b/fs/xfs/quota/xfs_trans_dquot.c index 7de6874bf1b8..f441f836ca8b 100644 --- a/fs/xfs/quota/xfs_trans_dquot.c +++ b/fs/xfs/quota/xfs_trans_dquot.c | |||
@@ -421,13 +421,13 @@ xfs_trans_apply_dquot_deltas( | |||
421 | (xfs_qcnt_t) -qtrx->qt_icount_delta); | 421 | (xfs_qcnt_t) -qtrx->qt_icount_delta); |
422 | #endif | 422 | #endif |
423 | if (totalbdelta) | 423 | if (totalbdelta) |
424 | be64_add(&d->d_bcount, (xfs_qcnt_t)totalbdelta); | 424 | be64_add_cpu(&d->d_bcount, (xfs_qcnt_t)totalbdelta); |
425 | 425 | ||
426 | if (qtrx->qt_icount_delta) | 426 | if (qtrx->qt_icount_delta) |
427 | be64_add(&d->d_icount, (xfs_qcnt_t)qtrx->qt_icount_delta); | 427 | be64_add_cpu(&d->d_icount, (xfs_qcnt_t)qtrx->qt_icount_delta); |
428 | 428 | ||
429 | if (totalrtbdelta) | 429 | if (totalrtbdelta) |
430 | be64_add(&d->d_rtbcount, (xfs_qcnt_t)totalrtbdelta); | 430 | be64_add_cpu(&d->d_rtbcount, (xfs_qcnt_t)totalrtbdelta); |
431 | 431 | ||
432 | /* | 432 | /* |
433 | * Get any default limits in use. | 433 | * Get any default limits in use. |
diff --git a/fs/xfs/support/debug.c b/fs/xfs/support/debug.c index f45a49ffd3a3..c27abef7b84f 100644 --- a/fs/xfs/support/debug.c +++ b/fs/xfs/support/debug.c | |||
@@ -17,7 +17,6 @@ | |||
17 | */ | 17 | */ |
18 | #include <xfs.h> | 18 | #include <xfs.h> |
19 | #include "debug.h" | 19 | #include "debug.h" |
20 | #include "spin.h" | ||
21 | 20 | ||
22 | static char message[1024]; /* keep it off the stack */ | 21 | static char message[1024]; /* keep it off the stack */ |
23 | static DEFINE_SPINLOCK(xfs_err_lock); | 22 | static DEFINE_SPINLOCK(xfs_err_lock); |
@@ -81,3 +80,9 @@ assfail(char *expr, char *file, int line) | |||
81 | printk("Assertion failed: %s, file: %s, line: %d\n", expr, file, line); | 80 | printk("Assertion failed: %s, file: %s, line: %d\n", expr, file, line); |
82 | BUG(); | 81 | BUG(); |
83 | } | 82 | } |
83 | |||
84 | void | ||
85 | xfs_hex_dump(void *p, int length) | ||
86 | { | ||
87 | print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_OFFSET, 16, 1, p, length, 1); | ||
88 | } | ||
diff --git a/fs/xfs/support/ktrace.c b/fs/xfs/support/ktrace.c index 5cf2e86caa71..129067cfcb86 100644 --- a/fs/xfs/support/ktrace.c +++ b/fs/xfs/support/ktrace.c | |||
@@ -21,7 +21,7 @@ static kmem_zone_t *ktrace_hdr_zone; | |||
21 | static kmem_zone_t *ktrace_ent_zone; | 21 | static kmem_zone_t *ktrace_ent_zone; |
22 | static int ktrace_zentries; | 22 | static int ktrace_zentries; |
23 | 23 | ||
24 | void | 24 | void __init |
25 | ktrace_init(int zentries) | 25 | ktrace_init(int zentries) |
26 | { | 26 | { |
27 | ktrace_zentries = zentries; | 27 | ktrace_zentries = zentries; |
@@ -36,7 +36,7 @@ ktrace_init(int zentries) | |||
36 | ASSERT(ktrace_ent_zone); | 36 | ASSERT(ktrace_ent_zone); |
37 | } | 37 | } |
38 | 38 | ||
39 | void | 39 | void __exit |
40 | ktrace_uninit(void) | 40 | ktrace_uninit(void) |
41 | { | 41 | { |
42 | kmem_zone_destroy(ktrace_hdr_zone); | 42 | kmem_zone_destroy(ktrace_hdr_zone); |
@@ -90,8 +90,6 @@ ktrace_alloc(int nentries, unsigned int __nocast sleep) | |||
90 | return NULL; | 90 | return NULL; |
91 | } | 91 | } |
92 | 92 | ||
93 | spinlock_init(&(ktp->kt_lock), "kt_lock"); | ||
94 | |||
95 | ktp->kt_entries = ktep; | 93 | ktp->kt_entries = ktep; |
96 | ktp->kt_nentries = nentries; | 94 | ktp->kt_nentries = nentries; |
97 | ktp->kt_index = 0; | 95 | ktp->kt_index = 0; |
@@ -114,8 +112,6 @@ ktrace_free(ktrace_t *ktp) | |||
114 | if (ktp == (ktrace_t *)NULL) | 112 | if (ktp == (ktrace_t *)NULL) |
115 | return; | 113 | return; |
116 | 114 | ||
117 | spinlock_destroy(&ktp->kt_lock); | ||
118 | |||
119 | /* | 115 | /* |
120 | * Special treatment for the Vnode trace buffer. | 116 | * Special treatment for the Vnode trace buffer. |
121 | */ | 117 | */ |
diff --git a/fs/xfs/support/ktrace.h b/fs/xfs/support/ktrace.h index 0d73216287c0..56e72b40a859 100644 --- a/fs/xfs/support/ktrace.h +++ b/fs/xfs/support/ktrace.h | |||
@@ -18,8 +18,6 @@ | |||
18 | #ifndef __XFS_SUPPORT_KTRACE_H__ | 18 | #ifndef __XFS_SUPPORT_KTRACE_H__ |
19 | #define __XFS_SUPPORT_KTRACE_H__ | 19 | #define __XFS_SUPPORT_KTRACE_H__ |
20 | 20 | ||
21 | #include <spin.h> | ||
22 | |||
23 | /* | 21 | /* |
24 | * Trace buffer entry structure. | 22 | * Trace buffer entry structure. |
25 | */ | 23 | */ |
@@ -31,7 +29,6 @@ typedef struct ktrace_entry { | |||
31 | * Trace buffer header structure. | 29 | * Trace buffer header structure. |
32 | */ | 30 | */ |
33 | typedef struct ktrace { | 31 | typedef struct ktrace { |
34 | lock_t kt_lock; /* mutex to guard counters */ | ||
35 | int kt_nentries; /* number of entries in trace buf */ | 32 | int kt_nentries; /* number of entries in trace buf */ |
36 | int kt_index; /* current index in entries */ | 33 | int kt_index; /* current index in entries */ |
37 | int kt_rollover; | 34 | int kt_rollover; |
diff --git a/fs/xfs/support/uuid.c b/fs/xfs/support/uuid.c index e157015c70ff..493a6ecf8590 100644 --- a/fs/xfs/support/uuid.c +++ b/fs/xfs/support/uuid.c | |||
@@ -133,7 +133,7 @@ uuid_table_remove(uuid_t *uuid) | |||
133 | mutex_unlock(&uuid_monitor); | 133 | mutex_unlock(&uuid_monitor); |
134 | } | 134 | } |
135 | 135 | ||
136 | void | 136 | void __init |
137 | uuid_init(void) | 137 | uuid_init(void) |
138 | { | 138 | { |
139 | mutex_init(&uuid_monitor); | 139 | mutex_init(&uuid_monitor); |
diff --git a/fs/xfs/xfs.h b/fs/xfs/xfs.h index b5a7d92c6843..540e4c989825 100644 --- a/fs/xfs/xfs.h +++ b/fs/xfs/xfs.h | |||
@@ -37,7 +37,7 @@ | |||
37 | #define XFS_LOG_TRACE 1 | 37 | #define XFS_LOG_TRACE 1 |
38 | #define XFS_RW_TRACE 1 | 38 | #define XFS_RW_TRACE 1 |
39 | #define XFS_BUF_TRACE 1 | 39 | #define XFS_BUF_TRACE 1 |
40 | #define XFS_VNODE_TRACE 1 | 40 | #define XFS_INODE_TRACE 1 |
41 | #define XFS_FILESTREAMS_TRACE 1 | 41 | #define XFS_FILESTREAMS_TRACE 1 |
42 | #endif | 42 | #endif |
43 | 43 | ||
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c index 5bfb66f33caf..7272fe39a92d 100644 --- a/fs/xfs/xfs_acl.c +++ b/fs/xfs/xfs_acl.c | |||
@@ -392,32 +392,6 @@ xfs_acl_allow_set( | |||
392 | } | 392 | } |
393 | 393 | ||
394 | /* | 394 | /* |
395 | * The access control process to determine the access permission: | ||
396 | * if uid == file owner id, use the file owner bits. | ||
397 | * if gid == file owner group id, use the file group bits. | ||
398 | * scan ACL for a matching user or group, and use matched entry | ||
399 | * permission. Use total permissions of all matching group entries, | ||
400 | * until all acl entries are exhausted. The final permission produced | ||
401 | * by matching acl entry or entries needs to be & with group permission. | ||
402 | * if not owner, owning group, or matching entry in ACL, use file | ||
403 | * other bits. | ||
404 | */ | ||
405 | STATIC int | ||
406 | xfs_acl_capability_check( | ||
407 | mode_t mode, | ||
408 | cred_t *cr) | ||
409 | { | ||
410 | if ((mode & ACL_READ) && !capable_cred(cr, CAP_DAC_READ_SEARCH)) | ||
411 | return EACCES; | ||
412 | if ((mode & ACL_WRITE) && !capable_cred(cr, CAP_DAC_OVERRIDE)) | ||
413 | return EACCES; | ||
414 | if ((mode & ACL_EXECUTE) && !capable_cred(cr, CAP_DAC_OVERRIDE)) | ||
415 | return EACCES; | ||
416 | |||
417 | return 0; | ||
418 | } | ||
419 | |||
420 | /* | ||
421 | * Note: cr is only used here for the capability check if the ACL test fails. | 395 | * Note: cr is only used here for the capability check if the ACL test fails. |
422 | * It is not used to find out the credentials uid or groups etc, as was | 396 | * It is not used to find out the credentials uid or groups etc, as was |
423 | * done in IRIX. It is assumed that the uid and groups for the current | 397 | * done in IRIX. It is assumed that the uid and groups for the current |
@@ -438,7 +412,6 @@ xfs_acl_access( | |||
438 | 412 | ||
439 | matched.ae_tag = 0; /* Invalid type */ | 413 | matched.ae_tag = 0; /* Invalid type */ |
440 | matched.ae_perm = 0; | 414 | matched.ae_perm = 0; |
441 | md >>= 6; /* Normalize the bits for comparison */ | ||
442 | 415 | ||
443 | for (i = 0; i < fap->acl_cnt; i++) { | 416 | for (i = 0; i < fap->acl_cnt; i++) { |
444 | /* | 417 | /* |
@@ -520,7 +493,8 @@ xfs_acl_access( | |||
520 | break; | 493 | break; |
521 | } | 494 | } |
522 | 495 | ||
523 | return xfs_acl_capability_check(md, cr); | 496 | /* EACCES tells generic_permission to check for capability overrides */ |
497 | return EACCES; | ||
524 | } | 498 | } |
525 | 499 | ||
526 | /* | 500 | /* |
diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h index 34b7d3391299..332a772461c4 100644 --- a/fs/xfs/xfs_acl.h +++ b/fs/xfs/xfs_acl.h | |||
@@ -75,7 +75,6 @@ extern int xfs_acl_vremove(bhv_vnode_t *, int); | |||
75 | #define _ACL_GET_DEFAULT(pv,pd) (xfs_acl_vtoacl(pv,NULL,pd) == 0) | 75 | #define _ACL_GET_DEFAULT(pv,pd) (xfs_acl_vtoacl(pv,NULL,pd) == 0) |
76 | #define _ACL_ACCESS_EXISTS xfs_acl_vhasacl_access | 76 | #define _ACL_ACCESS_EXISTS xfs_acl_vhasacl_access |
77 | #define _ACL_DEFAULT_EXISTS xfs_acl_vhasacl_default | 77 | #define _ACL_DEFAULT_EXISTS xfs_acl_vhasacl_default |
78 | #define _ACL_XFS_IACCESS(i,m,c) (XFS_IFORK_Q(i) ? xfs_acl_iaccess(i,m,c) : -1) | ||
79 | 78 | ||
80 | #define _ACL_ALLOC(a) ((a) = kmem_zone_alloc(xfs_acl_zone, KM_SLEEP)) | 79 | #define _ACL_ALLOC(a) ((a) = kmem_zone_alloc(xfs_acl_zone, KM_SLEEP)) |
81 | #define _ACL_FREE(a) ((a)? kmem_zone_free(xfs_acl_zone, (a)):(void)0) | 80 | #define _ACL_FREE(a) ((a)? kmem_zone_free(xfs_acl_zone, (a)):(void)0) |
@@ -95,7 +94,6 @@ extern int xfs_acl_vremove(bhv_vnode_t *, int); | |||
95 | #define _ACL_GET_DEFAULT(pv,pd) (0) | 94 | #define _ACL_GET_DEFAULT(pv,pd) (0) |
96 | #define _ACL_ACCESS_EXISTS (NULL) | 95 | #define _ACL_ACCESS_EXISTS (NULL) |
97 | #define _ACL_DEFAULT_EXISTS (NULL) | 96 | #define _ACL_DEFAULT_EXISTS (NULL) |
98 | #define _ACL_XFS_IACCESS(i,m,c) (-1) | ||
99 | #endif | 97 | #endif |
100 | 98 | ||
101 | #endif /* __XFS_ACL_H__ */ | 99 | #endif /* __XFS_ACL_H__ */ |
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h index 9381b0360c4b..61b292a9fb41 100644 --- a/fs/xfs/xfs_ag.h +++ b/fs/xfs/xfs_ag.h | |||
@@ -193,7 +193,7 @@ typedef struct xfs_perag | |||
193 | xfs_agino_t pagi_count; /* number of allocated inodes */ | 193 | xfs_agino_t pagi_count; /* number of allocated inodes */ |
194 | int pagb_count; /* pagb slots in use */ | 194 | int pagb_count; /* pagb slots in use */ |
195 | #ifdef __KERNEL__ | 195 | #ifdef __KERNEL__ |
196 | lock_t pagb_lock; /* lock for pagb_list */ | 196 | spinlock_t pagb_lock; /* lock for pagb_list */ |
197 | #endif | 197 | #endif |
198 | xfs_perag_busy_t *pagb_list; /* unstable blocks */ | 198 | xfs_perag_busy_t *pagb_list; /* unstable blocks */ |
199 | atomic_t pagf_fstrms; /* # of filestreams active in this AG */ | 199 | atomic_t pagf_fstrms; /* # of filestreams active in this AG */ |
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index 012a649a19c3..bdbfbbee4959 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c | |||
@@ -592,7 +592,7 @@ xfs_alloc_ag_vextent( | |||
592 | if (!(args->wasfromfl)) { | 592 | if (!(args->wasfromfl)) { |
593 | 593 | ||
594 | agf = XFS_BUF_TO_AGF(args->agbp); | 594 | agf = XFS_BUF_TO_AGF(args->agbp); |
595 | be32_add(&agf->agf_freeblks, -(args->len)); | 595 | be32_add_cpu(&agf->agf_freeblks, -(args->len)); |
596 | xfs_trans_agblocks_delta(args->tp, | 596 | xfs_trans_agblocks_delta(args->tp, |
597 | -((long)(args->len))); | 597 | -((long)(args->len))); |
598 | args->pag->pagf_freeblks -= args->len; | 598 | args->pag->pagf_freeblks -= args->len; |
@@ -1720,7 +1720,7 @@ xfs_free_ag_extent( | |||
1720 | 1720 | ||
1721 | agf = XFS_BUF_TO_AGF(agbp); | 1721 | agf = XFS_BUF_TO_AGF(agbp); |
1722 | pag = &mp->m_perag[agno]; | 1722 | pag = &mp->m_perag[agno]; |
1723 | be32_add(&agf->agf_freeblks, len); | 1723 | be32_add_cpu(&agf->agf_freeblks, len); |
1724 | xfs_trans_agblocks_delta(tp, len); | 1724 | xfs_trans_agblocks_delta(tp, len); |
1725 | pag->pagf_freeblks += len; | 1725 | pag->pagf_freeblks += len; |
1726 | XFS_WANT_CORRUPTED_GOTO( | 1726 | XFS_WANT_CORRUPTED_GOTO( |
@@ -2008,18 +2008,18 @@ xfs_alloc_get_freelist( | |||
2008 | * Get the block number and update the data structures. | 2008 | * Get the block number and update the data structures. |
2009 | */ | 2009 | */ |
2010 | bno = be32_to_cpu(agfl->agfl_bno[be32_to_cpu(agf->agf_flfirst)]); | 2010 | bno = be32_to_cpu(agfl->agfl_bno[be32_to_cpu(agf->agf_flfirst)]); |
2011 | be32_add(&agf->agf_flfirst, 1); | 2011 | be32_add_cpu(&agf->agf_flfirst, 1); |
2012 | xfs_trans_brelse(tp, agflbp); | 2012 | xfs_trans_brelse(tp, agflbp); |
2013 | if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp)) | 2013 | if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp)) |
2014 | agf->agf_flfirst = 0; | 2014 | agf->agf_flfirst = 0; |
2015 | pag = &mp->m_perag[be32_to_cpu(agf->agf_seqno)]; | 2015 | pag = &mp->m_perag[be32_to_cpu(agf->agf_seqno)]; |
2016 | be32_add(&agf->agf_flcount, -1); | 2016 | be32_add_cpu(&agf->agf_flcount, -1); |
2017 | xfs_trans_agflist_delta(tp, -1); | 2017 | xfs_trans_agflist_delta(tp, -1); |
2018 | pag->pagf_flcount--; | 2018 | pag->pagf_flcount--; |
2019 | 2019 | ||
2020 | logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT; | 2020 | logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT; |
2021 | if (btreeblk) { | 2021 | if (btreeblk) { |
2022 | be32_add(&agf->agf_btreeblks, 1); | 2022 | be32_add_cpu(&agf->agf_btreeblks, 1); |
2023 | pag->pagf_btreeblks++; | 2023 | pag->pagf_btreeblks++; |
2024 | logflags |= XFS_AGF_BTREEBLKS; | 2024 | logflags |= XFS_AGF_BTREEBLKS; |
2025 | } | 2025 | } |
@@ -2117,17 +2117,17 @@ xfs_alloc_put_freelist( | |||
2117 | be32_to_cpu(agf->agf_seqno), &agflbp))) | 2117 | be32_to_cpu(agf->agf_seqno), &agflbp))) |
2118 | return error; | 2118 | return error; |
2119 | agfl = XFS_BUF_TO_AGFL(agflbp); | 2119 | agfl = XFS_BUF_TO_AGFL(agflbp); |
2120 | be32_add(&agf->agf_fllast, 1); | 2120 | be32_add_cpu(&agf->agf_fllast, 1); |
2121 | if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp)) | 2121 | if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp)) |
2122 | agf->agf_fllast = 0; | 2122 | agf->agf_fllast = 0; |
2123 | pag = &mp->m_perag[be32_to_cpu(agf->agf_seqno)]; | 2123 | pag = &mp->m_perag[be32_to_cpu(agf->agf_seqno)]; |
2124 | be32_add(&agf->agf_flcount, 1); | 2124 | be32_add_cpu(&agf->agf_flcount, 1); |
2125 | xfs_trans_agflist_delta(tp, 1); | 2125 | xfs_trans_agflist_delta(tp, 1); |
2126 | pag->pagf_flcount++; | 2126 | pag->pagf_flcount++; |
2127 | 2127 | ||
2128 | logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT; | 2128 | logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT; |
2129 | if (btreeblk) { | 2129 | if (btreeblk) { |
2130 | be32_add(&agf->agf_btreeblks, -1); | 2130 | be32_add_cpu(&agf->agf_btreeblks, -1); |
2131 | pag->pagf_btreeblks--; | 2131 | pag->pagf_btreeblks--; |
2132 | logflags |= XFS_AGF_BTREEBLKS; | 2132 | logflags |= XFS_AGF_BTREEBLKS; |
2133 | } | 2133 | } |
@@ -2206,7 +2206,7 @@ xfs_alloc_read_agf( | |||
2206 | be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]); | 2206 | be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]); |
2207 | pag->pagf_levels[XFS_BTNUM_CNTi] = | 2207 | pag->pagf_levels[XFS_BTNUM_CNTi] = |
2208 | be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]); | 2208 | be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]); |
2209 | spinlock_init(&pag->pagb_lock, "xfspagb"); | 2209 | spin_lock_init(&pag->pagb_lock); |
2210 | pag->pagb_list = kmem_zalloc(XFS_PAGB_NUM_SLOTS * | 2210 | pag->pagb_list = kmem_zalloc(XFS_PAGB_NUM_SLOTS * |
2211 | sizeof(xfs_perag_busy_t), KM_SLEEP); | 2211 | sizeof(xfs_perag_busy_t), KM_SLEEP); |
2212 | pag->pagf_init = 1; | 2212 | pag->pagf_init = 1; |
@@ -2500,10 +2500,9 @@ xfs_alloc_mark_busy(xfs_trans_t *tp, | |||
2500 | xfs_mount_t *mp; | 2500 | xfs_mount_t *mp; |
2501 | xfs_perag_busy_t *bsy; | 2501 | xfs_perag_busy_t *bsy; |
2502 | int n; | 2502 | int n; |
2503 | SPLDECL(s); | ||
2504 | 2503 | ||
2505 | mp = tp->t_mountp; | 2504 | mp = tp->t_mountp; |
2506 | s = mutex_spinlock(&mp->m_perag[agno].pagb_lock); | 2505 | spin_lock(&mp->m_perag[agno].pagb_lock); |
2507 | 2506 | ||
2508 | /* search pagb_list for an open slot */ | 2507 | /* search pagb_list for an open slot */ |
2509 | for (bsy = mp->m_perag[agno].pagb_list, n = 0; | 2508 | for (bsy = mp->m_perag[agno].pagb_list, n = 0; |
@@ -2533,7 +2532,7 @@ xfs_alloc_mark_busy(xfs_trans_t *tp, | |||
2533 | xfs_trans_set_sync(tp); | 2532 | xfs_trans_set_sync(tp); |
2534 | } | 2533 | } |
2535 | 2534 | ||
2536 | mutex_spinunlock(&mp->m_perag[agno].pagb_lock, s); | 2535 | spin_unlock(&mp->m_perag[agno].pagb_lock); |
2537 | } | 2536 | } |
2538 | 2537 | ||
2539 | void | 2538 | void |
@@ -2543,11 +2542,10 @@ xfs_alloc_clear_busy(xfs_trans_t *tp, | |||
2543 | { | 2542 | { |
2544 | xfs_mount_t *mp; | 2543 | xfs_mount_t *mp; |
2545 | xfs_perag_busy_t *list; | 2544 | xfs_perag_busy_t *list; |
2546 | SPLDECL(s); | ||
2547 | 2545 | ||
2548 | mp = tp->t_mountp; | 2546 | mp = tp->t_mountp; |
2549 | 2547 | ||
2550 | s = mutex_spinlock(&mp->m_perag[agno].pagb_lock); | 2548 | spin_lock(&mp->m_perag[agno].pagb_lock); |
2551 | list = mp->m_perag[agno].pagb_list; | 2549 | list = mp->m_perag[agno].pagb_list; |
2552 | 2550 | ||
2553 | ASSERT(idx < XFS_PAGB_NUM_SLOTS); | 2551 | ASSERT(idx < XFS_PAGB_NUM_SLOTS); |
@@ -2559,7 +2557,7 @@ xfs_alloc_clear_busy(xfs_trans_t *tp, | |||
2559 | TRACE_UNBUSY("xfs_alloc_clear_busy", "missing", agno, idx, tp); | 2557 | TRACE_UNBUSY("xfs_alloc_clear_busy", "missing", agno, idx, tp); |
2560 | } | 2558 | } |
2561 | 2559 | ||
2562 | mutex_spinunlock(&mp->m_perag[agno].pagb_lock, s); | 2560 | spin_unlock(&mp->m_perag[agno].pagb_lock); |
2563 | } | 2561 | } |
2564 | 2562 | ||
2565 | 2563 | ||
@@ -2578,11 +2576,10 @@ xfs_alloc_search_busy(xfs_trans_t *tp, | |||
2578 | xfs_agblock_t uend, bend; | 2576 | xfs_agblock_t uend, bend; |
2579 | xfs_lsn_t lsn; | 2577 | xfs_lsn_t lsn; |
2580 | int cnt; | 2578 | int cnt; |
2581 | SPLDECL(s); | ||
2582 | 2579 | ||
2583 | mp = tp->t_mountp; | 2580 | mp = tp->t_mountp; |
2584 | 2581 | ||
2585 | s = mutex_spinlock(&mp->m_perag[agno].pagb_lock); | 2582 | spin_lock(&mp->m_perag[agno].pagb_lock); |
2586 | cnt = mp->m_perag[agno].pagb_count; | 2583 | cnt = mp->m_perag[agno].pagb_count; |
2587 | 2584 | ||
2588 | uend = bno + len - 1; | 2585 | uend = bno + len - 1; |
@@ -2615,12 +2612,12 @@ xfs_alloc_search_busy(xfs_trans_t *tp, | |||
2615 | if (cnt) { | 2612 | if (cnt) { |
2616 | TRACE_BUSYSEARCH("xfs_alloc_search_busy", "found", agno, bno, len, n, tp); | 2613 | TRACE_BUSYSEARCH("xfs_alloc_search_busy", "found", agno, bno, len, n, tp); |
2617 | lsn = bsy->busy_tp->t_commit_lsn; | 2614 | lsn = bsy->busy_tp->t_commit_lsn; |
2618 | mutex_spinunlock(&mp->m_perag[agno].pagb_lock, s); | 2615 | spin_unlock(&mp->m_perag[agno].pagb_lock); |
2619 | xfs_log_force(mp, lsn, XFS_LOG_FORCE|XFS_LOG_SYNC); | 2616 | xfs_log_force(mp, lsn, XFS_LOG_FORCE|XFS_LOG_SYNC); |
2620 | } else { | 2617 | } else { |
2621 | TRACE_BUSYSEARCH("xfs_alloc_search_busy", "not-found", agno, bno, len, n, tp); | 2618 | TRACE_BUSYSEARCH("xfs_alloc_search_busy", "not-found", agno, bno, len, n, tp); |
2622 | n = -1; | 2619 | n = -1; |
2623 | mutex_spinunlock(&mp->m_perag[agno].pagb_lock, s); | 2620 | spin_unlock(&mp->m_perag[agno].pagb_lock); |
2624 | } | 2621 | } |
2625 | 2622 | ||
2626 | return n; | 2623 | return n; |
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index 1603ce595853..3ce2645508ae 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c | |||
@@ -221,7 +221,7 @@ xfs_alloc_delrec( | |||
221 | */ | 221 | */ |
222 | bno = be32_to_cpu(agf->agf_roots[cur->bc_btnum]); | 222 | bno = be32_to_cpu(agf->agf_roots[cur->bc_btnum]); |
223 | agf->agf_roots[cur->bc_btnum] = *lpp; | 223 | agf->agf_roots[cur->bc_btnum] = *lpp; |
224 | be32_add(&agf->agf_levels[cur->bc_btnum], -1); | 224 | be32_add_cpu(&agf->agf_levels[cur->bc_btnum], -1); |
225 | mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_levels[cur->bc_btnum]--; | 225 | mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_levels[cur->bc_btnum]--; |
226 | /* | 226 | /* |
227 | * Put this buffer/block on the ag's freelist. | 227 | * Put this buffer/block on the ag's freelist. |
@@ -1256,9 +1256,9 @@ xfs_alloc_lshift( | |||
1256 | /* | 1256 | /* |
1257 | * Bump and log left's numrecs, decrement and log right's numrecs. | 1257 | * Bump and log left's numrecs, decrement and log right's numrecs. |
1258 | */ | 1258 | */ |
1259 | be16_add(&left->bb_numrecs, 1); | 1259 | be16_add_cpu(&left->bb_numrecs, 1); |
1260 | xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); | 1260 | xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); |
1261 | be16_add(&right->bb_numrecs, -1); | 1261 | be16_add_cpu(&right->bb_numrecs, -1); |
1262 | xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); | 1262 | xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); |
1263 | /* | 1263 | /* |
1264 | * Slide the contents of right down one entry. | 1264 | * Slide the contents of right down one entry. |
@@ -1346,7 +1346,7 @@ xfs_alloc_newroot( | |||
1346 | 1346 | ||
1347 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); | 1347 | agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); |
1348 | agf->agf_roots[cur->bc_btnum] = cpu_to_be32(nbno); | 1348 | agf->agf_roots[cur->bc_btnum] = cpu_to_be32(nbno); |
1349 | be32_add(&agf->agf_levels[cur->bc_btnum], 1); | 1349 | be32_add_cpu(&agf->agf_levels[cur->bc_btnum], 1); |
1350 | seqno = be32_to_cpu(agf->agf_seqno); | 1350 | seqno = be32_to_cpu(agf->agf_seqno); |
1351 | mp->m_perag[seqno].pagf_levels[cur->bc_btnum]++; | 1351 | mp->m_perag[seqno].pagf_levels[cur->bc_btnum]++; |
1352 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, | 1352 | xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, |
@@ -1558,9 +1558,9 @@ xfs_alloc_rshift( | |||
1558 | /* | 1558 | /* |
1559 | * Decrement and log left's numrecs, bump and log right's numrecs. | 1559 | * Decrement and log left's numrecs, bump and log right's numrecs. |
1560 | */ | 1560 | */ |
1561 | be16_add(&left->bb_numrecs, -1); | 1561 | be16_add_cpu(&left->bb_numrecs, -1); |
1562 | xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); | 1562 | xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); |
1563 | be16_add(&right->bb_numrecs, 1); | 1563 | be16_add_cpu(&right->bb_numrecs, 1); |
1564 | xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); | 1564 | xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); |
1565 | /* | 1565 | /* |
1566 | * Using a temporary cursor, update the parent key values of the | 1566 | * Using a temporary cursor, update the parent key values of the |
@@ -1643,7 +1643,7 @@ xfs_alloc_split( | |||
1643 | */ | 1643 | */ |
1644 | if ((be16_to_cpu(left->bb_numrecs) & 1) && | 1644 | if ((be16_to_cpu(left->bb_numrecs) & 1) && |
1645 | cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1) | 1645 | cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1) |
1646 | be16_add(&right->bb_numrecs, 1); | 1646 | be16_add_cpu(&right->bb_numrecs, 1); |
1647 | i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1; | 1647 | i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1; |
1648 | /* | 1648 | /* |
1649 | * For non-leaf blocks, copy keys and addresses over to the new block. | 1649 | * For non-leaf blocks, copy keys and addresses over to the new block. |
@@ -1689,7 +1689,7 @@ xfs_alloc_split( | |||
1689 | * Adjust numrecs, sibling pointers. | 1689 | * Adjust numrecs, sibling pointers. |
1690 | */ | 1690 | */ |
1691 | lbno = XFS_DADDR_TO_AGBNO(cur->bc_mp, XFS_BUF_ADDR(lbp)); | 1691 | lbno = XFS_DADDR_TO_AGBNO(cur->bc_mp, XFS_BUF_ADDR(lbp)); |
1692 | be16_add(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs))); | 1692 | be16_add_cpu(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs))); |
1693 | right->bb_rightsib = left->bb_rightsib; | 1693 | right->bb_rightsib = left->bb_rightsib; |
1694 | left->bb_rightsib = cpu_to_be32(rbno); | 1694 | left->bb_rightsib = cpu_to_be32(rbno); |
1695 | right->bb_leftsib = cpu_to_be32(lbno); | 1695 | right->bb_leftsib = cpu_to_be32(lbno); |
diff --git a/fs/xfs/xfs_arch.h b/fs/xfs/xfs_arch.h index c4836890b726..f9472a2076d4 100644 --- a/fs/xfs/xfs_arch.h +++ b/fs/xfs/xfs_arch.h | |||
@@ -170,21 +170,6 @@ | |||
170 | } \ | 170 | } \ |
171 | } | 171 | } |
172 | 172 | ||
173 | static inline void be16_add(__be16 *a, __s16 b) | ||
174 | { | ||
175 | *a = cpu_to_be16(be16_to_cpu(*a) + b); | ||
176 | } | ||
177 | |||
178 | static inline void be32_add(__be32 *a, __s32 b) | ||
179 | { | ||
180 | *a = cpu_to_be32(be32_to_cpu(*a) + b); | ||
181 | } | ||
182 | |||
183 | static inline void be64_add(__be64 *a, __s64 b) | ||
184 | { | ||
185 | *a = cpu_to_be64(be64_to_cpu(*a) + b); | ||
186 | } | ||
187 | |||
188 | /* | 173 | /* |
189 | * In directories inode numbers are stored as unaligned arrays of unsigned | 174 | * In directories inode numbers are stored as unaligned arrays of unsigned |
190 | * 8bit integers on disk. | 175 | * 8bit integers on disk. |
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c index 93fa64dd1be6..e58f321fdae9 100644 --- a/fs/xfs/xfs_attr.c +++ b/fs/xfs/xfs_attr.c | |||
@@ -929,7 +929,7 @@ xfs_attr_shortform_addname(xfs_da_args_t *args) | |||
929 | * This leaf block cannot have a "remote" value, we only call this routine | 929 | * This leaf block cannot have a "remote" value, we only call this routine |
930 | * if bmap_one_block() says there is only one block (ie: no remote blks). | 930 | * if bmap_one_block() says there is only one block (ie: no remote blks). |
931 | */ | 931 | */ |
932 | int | 932 | STATIC int |
933 | xfs_attr_leaf_addname(xfs_da_args_t *args) | 933 | xfs_attr_leaf_addname(xfs_da_args_t *args) |
934 | { | 934 | { |
935 | xfs_inode_t *dp; | 935 | xfs_inode_t *dp; |
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c index 81f45dae1c57..b08e2a2a8add 100644 --- a/fs/xfs/xfs_attr_leaf.c +++ b/fs/xfs/xfs_attr_leaf.c | |||
@@ -226,17 +226,15 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes) | |||
226 | STATIC void | 226 | STATIC void |
227 | xfs_sbversion_add_attr2(xfs_mount_t *mp, xfs_trans_t *tp) | 227 | xfs_sbversion_add_attr2(xfs_mount_t *mp, xfs_trans_t *tp) |
228 | { | 228 | { |
229 | unsigned long s; | ||
230 | |||
231 | if ((mp->m_flags & XFS_MOUNT_ATTR2) && | 229 | if ((mp->m_flags & XFS_MOUNT_ATTR2) && |
232 | !(XFS_SB_VERSION_HASATTR2(&mp->m_sb))) { | 230 | !(XFS_SB_VERSION_HASATTR2(&mp->m_sb))) { |
233 | s = XFS_SB_LOCK(mp); | 231 | spin_lock(&mp->m_sb_lock); |
234 | if (!XFS_SB_VERSION_HASATTR2(&mp->m_sb)) { | 232 | if (!XFS_SB_VERSION_HASATTR2(&mp->m_sb)) { |
235 | XFS_SB_VERSION_ADDATTR2(&mp->m_sb); | 233 | XFS_SB_VERSION_ADDATTR2(&mp->m_sb); |
236 | XFS_SB_UNLOCK(mp, s); | 234 | spin_unlock(&mp->m_sb_lock); |
237 | xfs_mod_sb(tp, XFS_SB_VERSIONNUM | XFS_SB_FEATURES2); | 235 | xfs_mod_sb(tp, XFS_SB_VERSIONNUM | XFS_SB_FEATURES2); |
238 | } else | 236 | } else |
239 | XFS_SB_UNLOCK(mp, s); | 237 | spin_unlock(&mp->m_sb_lock); |
240 | } | 238 | } |
241 | } | 239 | } |
242 | 240 | ||
@@ -319,7 +317,7 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff) | |||
319 | memcpy(sfe->nameval, args->name, args->namelen); | 317 | memcpy(sfe->nameval, args->name, args->namelen); |
320 | memcpy(&sfe->nameval[args->namelen], args->value, args->valuelen); | 318 | memcpy(&sfe->nameval[args->namelen], args->value, args->valuelen); |
321 | sf->hdr.count++; | 319 | sf->hdr.count++; |
322 | be16_add(&sf->hdr.totsize, size); | 320 | be16_add_cpu(&sf->hdr.totsize, size); |
323 | xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA); | 321 | xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA); |
324 | 322 | ||
325 | xfs_sbversion_add_attr2(mp, args->trans); | 323 | xfs_sbversion_add_attr2(mp, args->trans); |
@@ -365,7 +363,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args) | |||
365 | if (end != totsize) | 363 | if (end != totsize) |
366 | memmove(&((char *)sf)[base], &((char *)sf)[end], totsize - end); | 364 | memmove(&((char *)sf)[base], &((char *)sf)[end], totsize - end); |
367 | sf->hdr.count--; | 365 | sf->hdr.count--; |
368 | be16_add(&sf->hdr.totsize, -size); | 366 | be16_add_cpu(&sf->hdr.totsize, -size); |
369 | 367 | ||
370 | /* | 368 | /* |
371 | * Fix up the start offset of the attribute fork | 369 | * Fix up the start offset of the attribute fork |
@@ -1135,7 +1133,7 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex) | |||
1135 | xfs_da_log_buf(args->trans, bp, | 1133 | xfs_da_log_buf(args->trans, bp, |
1136 | XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry))); | 1134 | XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry))); |
1137 | } | 1135 | } |
1138 | be16_add(&hdr->count, 1); | 1136 | be16_add_cpu(&hdr->count, 1); |
1139 | 1137 | ||
1140 | /* | 1138 | /* |
1141 | * Allocate space for the new string (at the end of the run). | 1139 | * Allocate space for the new string (at the end of the run). |
@@ -1149,7 +1147,7 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex) | |||
1149 | mp->m_sb.sb_blocksize, NULL)); | 1147 | mp->m_sb.sb_blocksize, NULL)); |
1150 | ASSERT(be16_to_cpu(map->size) < XFS_LBSIZE(mp)); | 1148 | ASSERT(be16_to_cpu(map->size) < XFS_LBSIZE(mp)); |
1151 | ASSERT((be16_to_cpu(map->size) & 0x3) == 0); | 1149 | ASSERT((be16_to_cpu(map->size) & 0x3) == 0); |
1152 | be16_add(&map->size, | 1150 | be16_add_cpu(&map->size, |
1153 | -xfs_attr_leaf_newentsize(args->namelen, args->valuelen, | 1151 | -xfs_attr_leaf_newentsize(args->namelen, args->valuelen, |
1154 | mp->m_sb.sb_blocksize, &tmp)); | 1152 | mp->m_sb.sb_blocksize, &tmp)); |
1155 | entry->nameidx = cpu_to_be16(be16_to_cpu(map->base) + | 1153 | entry->nameidx = cpu_to_be16(be16_to_cpu(map->base) + |
@@ -1216,12 +1214,12 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex) | |||
1216 | map = &hdr->freemap[0]; | 1214 | map = &hdr->freemap[0]; |
1217 | for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; map++, i++) { | 1215 | for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; map++, i++) { |
1218 | if (be16_to_cpu(map->base) == tmp) { | 1216 | if (be16_to_cpu(map->base) == tmp) { |
1219 | be16_add(&map->base, sizeof(xfs_attr_leaf_entry_t)); | 1217 | be16_add_cpu(&map->base, sizeof(xfs_attr_leaf_entry_t)); |
1220 | be16_add(&map->size, | 1218 | be16_add_cpu(&map->size, |
1221 | -((int)sizeof(xfs_attr_leaf_entry_t))); | 1219 | -((int)sizeof(xfs_attr_leaf_entry_t))); |
1222 | } | 1220 | } |
1223 | } | 1221 | } |
1224 | be16_add(&hdr->usedbytes, xfs_attr_leaf_entsize(leaf, args->index)); | 1222 | be16_add_cpu(&hdr->usedbytes, xfs_attr_leaf_entsize(leaf, args->index)); |
1225 | xfs_da_log_buf(args->trans, bp, | 1223 | xfs_da_log_buf(args->trans, bp, |
1226 | XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr))); | 1224 | XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr))); |
1227 | return(0); | 1225 | return(0); |
@@ -1729,9 +1727,9 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args) | |||
1729 | ASSERT(be16_to_cpu(map->base) < XFS_LBSIZE(mp)); | 1727 | ASSERT(be16_to_cpu(map->base) < XFS_LBSIZE(mp)); |
1730 | ASSERT(be16_to_cpu(map->size) < XFS_LBSIZE(mp)); | 1728 | ASSERT(be16_to_cpu(map->size) < XFS_LBSIZE(mp)); |
1731 | if (be16_to_cpu(map->base) == tablesize) { | 1729 | if (be16_to_cpu(map->base) == tablesize) { |
1732 | be16_add(&map->base, | 1730 | be16_add_cpu(&map->base, |
1733 | -((int)sizeof(xfs_attr_leaf_entry_t))); | 1731 | -((int)sizeof(xfs_attr_leaf_entry_t))); |
1734 | be16_add(&map->size, sizeof(xfs_attr_leaf_entry_t)); | 1732 | be16_add_cpu(&map->size, sizeof(xfs_attr_leaf_entry_t)); |
1735 | } | 1733 | } |
1736 | 1734 | ||
1737 | if ((be16_to_cpu(map->base) + be16_to_cpu(map->size)) | 1735 | if ((be16_to_cpu(map->base) + be16_to_cpu(map->size)) |
@@ -1753,19 +1751,19 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args) | |||
1753 | if ((before >= 0) || (after >= 0)) { | 1751 | if ((before >= 0) || (after >= 0)) { |
1754 | if ((before >= 0) && (after >= 0)) { | 1752 | if ((before >= 0) && (after >= 0)) { |
1755 | map = &hdr->freemap[before]; | 1753 | map = &hdr->freemap[before]; |
1756 | be16_add(&map->size, entsize); | 1754 | be16_add_cpu(&map->size, entsize); |
1757 | be16_add(&map->size, | 1755 | be16_add_cpu(&map->size, |
1758 | be16_to_cpu(hdr->freemap[after].size)); | 1756 | be16_to_cpu(hdr->freemap[after].size)); |
1759 | hdr->freemap[after].base = 0; | 1757 | hdr->freemap[after].base = 0; |
1760 | hdr->freemap[after].size = 0; | 1758 | hdr->freemap[after].size = 0; |
1761 | } else if (before >= 0) { | 1759 | } else if (before >= 0) { |
1762 | map = &hdr->freemap[before]; | 1760 | map = &hdr->freemap[before]; |
1763 | be16_add(&map->size, entsize); | 1761 | be16_add_cpu(&map->size, entsize); |
1764 | } else { | 1762 | } else { |
1765 | map = &hdr->freemap[after]; | 1763 | map = &hdr->freemap[after]; |
1766 | /* both on-disk, don't endian flip twice */ | 1764 | /* both on-disk, don't endian flip twice */ |
1767 | map->base = entry->nameidx; | 1765 | map->base = entry->nameidx; |
1768 | be16_add(&map->size, entsize); | 1766 | be16_add_cpu(&map->size, entsize); |
1769 | } | 1767 | } |
1770 | } else { | 1768 | } else { |
1771 | /* | 1769 | /* |
@@ -1790,7 +1788,7 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args) | |||
1790 | * Compress the remaining entries and zero out the removed stuff. | 1788 | * Compress the remaining entries and zero out the removed stuff. |
1791 | */ | 1789 | */ |
1792 | memset(XFS_ATTR_LEAF_NAME(leaf, args->index), 0, entsize); | 1790 | memset(XFS_ATTR_LEAF_NAME(leaf, args->index), 0, entsize); |
1793 | be16_add(&hdr->usedbytes, -entsize); | 1791 | be16_add_cpu(&hdr->usedbytes, -entsize); |
1794 | xfs_da_log_buf(args->trans, bp, | 1792 | xfs_da_log_buf(args->trans, bp, |
1795 | XFS_DA_LOGRANGE(leaf, XFS_ATTR_LEAF_NAME(leaf, args->index), | 1793 | XFS_DA_LOGRANGE(leaf, XFS_ATTR_LEAF_NAME(leaf, args->index), |
1796 | entsize)); | 1794 | entsize)); |
@@ -1798,7 +1796,7 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args) | |||
1798 | tmp = (be16_to_cpu(hdr->count) - args->index) | 1796 | tmp = (be16_to_cpu(hdr->count) - args->index) |
1799 | * sizeof(xfs_attr_leaf_entry_t); | 1797 | * sizeof(xfs_attr_leaf_entry_t); |
1800 | memmove((char *)entry, (char *)(entry+1), tmp); | 1798 | memmove((char *)entry, (char *)(entry+1), tmp); |
1801 | be16_add(&hdr->count, -1); | 1799 | be16_add_cpu(&hdr->count, -1); |
1802 | xfs_da_log_buf(args->trans, bp, | 1800 | xfs_da_log_buf(args->trans, bp, |
1803 | XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry))); | 1801 | XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry))); |
1804 | entry = &leaf->entries[be16_to_cpu(hdr->count)]; | 1802 | entry = &leaf->entries[be16_to_cpu(hdr->count)]; |
@@ -2184,15 +2182,15 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s, | |||
2184 | */ | 2182 | */ |
2185 | if (entry_s->flags & XFS_ATTR_INCOMPLETE) { /* skip partials? */ | 2183 | if (entry_s->flags & XFS_ATTR_INCOMPLETE) { /* skip partials? */ |
2186 | memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp); | 2184 | memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp); |
2187 | be16_add(&hdr_s->usedbytes, -tmp); | 2185 | be16_add_cpu(&hdr_s->usedbytes, -tmp); |
2188 | be16_add(&hdr_s->count, -1); | 2186 | be16_add_cpu(&hdr_s->count, -1); |
2189 | entry_d--; /* to compensate for ++ in loop hdr */ | 2187 | entry_d--; /* to compensate for ++ in loop hdr */ |
2190 | desti--; | 2188 | desti--; |
2191 | if ((start_s + i) < offset) | 2189 | if ((start_s + i) < offset) |
2192 | result++; /* insertion index adjustment */ | 2190 | result++; /* insertion index adjustment */ |
2193 | } else { | 2191 | } else { |
2194 | #endif /* GROT */ | 2192 | #endif /* GROT */ |
2195 | be16_add(&hdr_d->firstused, -tmp); | 2193 | be16_add_cpu(&hdr_d->firstused, -tmp); |
2196 | /* both on-disk, don't endian flip twice */ | 2194 | /* both on-disk, don't endian flip twice */ |
2197 | entry_d->hashval = entry_s->hashval; | 2195 | entry_d->hashval = entry_s->hashval; |
2198 | /* both on-disk, don't endian flip twice */ | 2196 | /* both on-disk, don't endian flip twice */ |
@@ -2205,10 +2203,10 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s, | |||
2205 | ASSERT(be16_to_cpu(entry_s->nameidx) + tmp | 2203 | ASSERT(be16_to_cpu(entry_s->nameidx) + tmp |
2206 | <= XFS_LBSIZE(mp)); | 2204 | <= XFS_LBSIZE(mp)); |
2207 | memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp); | 2205 | memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp); |
2208 | be16_add(&hdr_s->usedbytes, -tmp); | 2206 | be16_add_cpu(&hdr_s->usedbytes, -tmp); |
2209 | be16_add(&hdr_d->usedbytes, tmp); | 2207 | be16_add_cpu(&hdr_d->usedbytes, tmp); |
2210 | be16_add(&hdr_s->count, -1); | 2208 | be16_add_cpu(&hdr_s->count, -1); |
2211 | be16_add(&hdr_d->count, 1); | 2209 | be16_add_cpu(&hdr_d->count, 1); |
2212 | tmp = be16_to_cpu(hdr_d->count) | 2210 | tmp = be16_to_cpu(hdr_d->count) |
2213 | * sizeof(xfs_attr_leaf_entry_t) | 2211 | * sizeof(xfs_attr_leaf_entry_t) |
2214 | + sizeof(xfs_attr_leaf_hdr_t); | 2212 | + sizeof(xfs_attr_leaf_hdr_t); |
@@ -2249,7 +2247,7 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s, | |||
2249 | * Fill in the freemap information | 2247 | * Fill in the freemap information |
2250 | */ | 2248 | */ |
2251 | hdr_d->freemap[0].base = cpu_to_be16(sizeof(xfs_attr_leaf_hdr_t)); | 2249 | hdr_d->freemap[0].base = cpu_to_be16(sizeof(xfs_attr_leaf_hdr_t)); |
2252 | be16_add(&hdr_d->freemap[0].base, be16_to_cpu(hdr_d->count) * | 2250 | be16_add_cpu(&hdr_d->freemap[0].base, be16_to_cpu(hdr_d->count) * |
2253 | sizeof(xfs_attr_leaf_entry_t)); | 2251 | sizeof(xfs_attr_leaf_entry_t)); |
2254 | hdr_d->freemap[0].size = cpu_to_be16(be16_to_cpu(hdr_d->firstused) | 2252 | hdr_d->freemap[0].size = cpu_to_be16(be16_to_cpu(hdr_d->firstused) |
2255 | - be16_to_cpu(hdr_d->freemap[0].base)); | 2253 | - be16_to_cpu(hdr_d->freemap[0].base)); |
diff --git a/fs/xfs/xfs_bit.c b/fs/xfs/xfs_bit.c index fab0b6d5a41b..48228848f5ae 100644 --- a/fs/xfs/xfs_bit.c +++ b/fs/xfs/xfs_bit.c | |||
@@ -25,109 +25,6 @@ | |||
25 | * XFS bit manipulation routines, used in non-realtime code. | 25 | * XFS bit manipulation routines, used in non-realtime code. |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #ifndef HAVE_ARCH_HIGHBIT | ||
29 | /* | ||
30 | * Index of high bit number in byte, -1 for none set, 0..7 otherwise. | ||
31 | */ | ||
32 | static const char xfs_highbit[256] = { | ||
33 | -1, 0, 1, 1, 2, 2, 2, 2, /* 00 .. 07 */ | ||
34 | 3, 3, 3, 3, 3, 3, 3, 3, /* 08 .. 0f */ | ||
35 | 4, 4, 4, 4, 4, 4, 4, 4, /* 10 .. 17 */ | ||
36 | 4, 4, 4, 4, 4, 4, 4, 4, /* 18 .. 1f */ | ||
37 | 5, 5, 5, 5, 5, 5, 5, 5, /* 20 .. 27 */ | ||
38 | 5, 5, 5, 5, 5, 5, 5, 5, /* 28 .. 2f */ | ||
39 | 5, 5, 5, 5, 5, 5, 5, 5, /* 30 .. 37 */ | ||
40 | 5, 5, 5, 5, 5, 5, 5, 5, /* 38 .. 3f */ | ||
41 | 6, 6, 6, 6, 6, 6, 6, 6, /* 40 .. 47 */ | ||
42 | 6, 6, 6, 6, 6, 6, 6, 6, /* 48 .. 4f */ | ||
43 | 6, 6, 6, 6, 6, 6, 6, 6, /* 50 .. 57 */ | ||
44 | 6, 6, 6, 6, 6, 6, 6, 6, /* 58 .. 5f */ | ||
45 | 6, 6, 6, 6, 6, 6, 6, 6, /* 60 .. 67 */ | ||
46 | 6, 6, 6, 6, 6, 6, 6, 6, /* 68 .. 6f */ | ||
47 | 6, 6, 6, 6, 6, 6, 6, 6, /* 70 .. 77 */ | ||
48 | 6, 6, 6, 6, 6, 6, 6, 6, /* 78 .. 7f */ | ||
49 | 7, 7, 7, 7, 7, 7, 7, 7, /* 80 .. 87 */ | ||
50 | 7, 7, 7, 7, 7, 7, 7, 7, /* 88 .. 8f */ | ||
51 | 7, 7, 7, 7, 7, 7, 7, 7, /* 90 .. 97 */ | ||
52 | 7, 7, 7, 7, 7, 7, 7, 7, /* 98 .. 9f */ | ||
53 | 7, 7, 7, 7, 7, 7, 7, 7, /* a0 .. a7 */ | ||
54 | 7, 7, 7, 7, 7, 7, 7, 7, /* a8 .. af */ | ||
55 | 7, 7, 7, 7, 7, 7, 7, 7, /* b0 .. b7 */ | ||
56 | 7, 7, 7, 7, 7, 7, 7, 7, /* b8 .. bf */ | ||
57 | 7, 7, 7, 7, 7, 7, 7, 7, /* c0 .. c7 */ | ||
58 | 7, 7, 7, 7, 7, 7, 7, 7, /* c8 .. cf */ | ||
59 | 7, 7, 7, 7, 7, 7, 7, 7, /* d0 .. d7 */ | ||
60 | 7, 7, 7, 7, 7, 7, 7, 7, /* d8 .. df */ | ||
61 | 7, 7, 7, 7, 7, 7, 7, 7, /* e0 .. e7 */ | ||
62 | 7, 7, 7, 7, 7, 7, 7, 7, /* e8 .. ef */ | ||
63 | 7, 7, 7, 7, 7, 7, 7, 7, /* f0 .. f7 */ | ||
64 | 7, 7, 7, 7, 7, 7, 7, 7, /* f8 .. ff */ | ||
65 | }; | ||
66 | #endif | ||
67 | |||
68 | /* | ||
69 | * xfs_highbit32: get high bit set out of 32-bit argument, -1 if none set. | ||
70 | */ | ||
71 | inline int | ||
72 | xfs_highbit32( | ||
73 | __uint32_t v) | ||
74 | { | ||
75 | #ifdef HAVE_ARCH_HIGHBIT | ||
76 | return highbit32(v); | ||
77 | #else | ||
78 | int i; | ||
79 | |||
80 | if (v & 0xffff0000) | ||
81 | if (v & 0xff000000) | ||
82 | i = 24; | ||
83 | else | ||
84 | i = 16; | ||
85 | else if (v & 0x0000ffff) | ||
86 | if (v & 0x0000ff00) | ||
87 | i = 8; | ||
88 | else | ||
89 | i = 0; | ||
90 | else | ||
91 | return -1; | ||
92 | return i + xfs_highbit[(v >> i) & 0xff]; | ||
93 | #endif | ||
94 | } | ||
95 | |||
96 | /* | ||
97 | * xfs_lowbit64: get low bit set out of 64-bit argument, -1 if none set. | ||
98 | */ | ||
99 | int | ||
100 | xfs_lowbit64( | ||
101 | __uint64_t v) | ||
102 | { | ||
103 | __uint32_t w = (__uint32_t)v; | ||
104 | int n = 0; | ||
105 | |||
106 | if (w) { /* lower bits */ | ||
107 | n = ffs(w); | ||
108 | } else { /* upper bits */ | ||
109 | w = (__uint32_t)(v >> 32); | ||
110 | if (w && (n = ffs(w))) | ||
111 | n += 32; | ||
112 | } | ||
113 | return n - 1; | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * xfs_highbit64: get high bit set out of 64-bit argument, -1 if none set. | ||
118 | */ | ||
119 | int | ||
120 | xfs_highbit64( | ||
121 | __uint64_t v) | ||
122 | { | ||
123 | __uint32_t h = (__uint32_t)(v >> 32); | ||
124 | |||
125 | if (h) | ||
126 | return xfs_highbit32(h) + 32; | ||
127 | return xfs_highbit32((__uint32_t)v); | ||
128 | } | ||
129 | |||
130 | |||
131 | /* | 28 | /* |
132 | * Return whether bitmap is empty. | 29 | * Return whether bitmap is empty. |
133 | * Size is number of words in the bitmap, which is padded to word boundary | 30 | * Size is number of words in the bitmap, which is padded to word boundary |
diff --git a/fs/xfs/xfs_bit.h b/fs/xfs/xfs_bit.h index 082641a9782c..325a007dec91 100644 --- a/fs/xfs/xfs_bit.h +++ b/fs/xfs/xfs_bit.h | |||
@@ -47,13 +47,30 @@ static inline __uint64_t xfs_mask64lo(int n) | |||
47 | } | 47 | } |
48 | 48 | ||
49 | /* Get high bit set out of 32-bit argument, -1 if none set */ | 49 | /* Get high bit set out of 32-bit argument, -1 if none set */ |
50 | extern int xfs_highbit32(__uint32_t v); | 50 | static inline int xfs_highbit32(__uint32_t v) |
51 | 51 | { | |
52 | /* Get low bit set out of 64-bit argument, -1 if none set */ | 52 | return fls(v) - 1; |
53 | extern int xfs_lowbit64(__uint64_t v); | 53 | } |
54 | 54 | ||
55 | /* Get high bit set out of 64-bit argument, -1 if none set */ | 55 | /* Get high bit set out of 64-bit argument, -1 if none set */ |
56 | extern int xfs_highbit64(__uint64_t); | 56 | static inline int xfs_highbit64(__uint64_t v) |
57 | { | ||
58 | return fls64(v) - 1; | ||
59 | } | ||
60 | |||
61 | /* Get low bit set out of 32-bit argument, -1 if none set */ | ||
62 | static inline int xfs_lowbit32(__uint32_t v) | ||
63 | { | ||
64 | __uint32_t t = v; | ||
65 | return (t) ? find_first_bit((unsigned long *)&t, 32) : -1; | ||
66 | } | ||
67 | |||
68 | /* Get low bit set out of 64-bit argument, -1 if none set */ | ||
69 | static inline int xfs_lowbit64(__uint64_t v) | ||
70 | { | ||
71 | __uint64_t t = v; | ||
72 | return (t) ? find_first_bit((unsigned long *)&t, 64) : -1; | ||
73 | } | ||
57 | 74 | ||
58 | /* Return whether bitmap is empty (1 == empty) */ | 75 | /* Return whether bitmap is empty (1 == empty) */ |
59 | extern int xfs_bitmap_empty(uint *map, uint size); | 76 | extern int xfs_bitmap_empty(uint *map, uint size); |
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 2e9b34b7344b..1c0a5a585a82 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c | |||
@@ -2830,11 +2830,11 @@ xfs_bmap_btalloc( | |||
2830 | args.prod = align; | 2830 | args.prod = align; |
2831 | if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod))) | 2831 | if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod))) |
2832 | args.mod = (xfs_extlen_t)(args.prod - args.mod); | 2832 | args.mod = (xfs_extlen_t)(args.prod - args.mod); |
2833 | } else if (mp->m_sb.sb_blocksize >= NBPP) { | 2833 | } else if (mp->m_sb.sb_blocksize >= PAGE_CACHE_SIZE) { |
2834 | args.prod = 1; | 2834 | args.prod = 1; |
2835 | args.mod = 0; | 2835 | args.mod = 0; |
2836 | } else { | 2836 | } else { |
2837 | args.prod = NBPP >> mp->m_sb.sb_blocklog; | 2837 | args.prod = PAGE_CACHE_SIZE >> mp->m_sb.sb_blocklog; |
2838 | if ((args.mod = (xfs_extlen_t)(do_mod(ap->off, args.prod)))) | 2838 | if ((args.mod = (xfs_extlen_t)(do_mod(ap->off, args.prod)))) |
2839 | args.mod = (xfs_extlen_t)(args.prod - args.mod); | 2839 | args.mod = (xfs_extlen_t)(args.prod - args.mod); |
2840 | } | 2840 | } |
@@ -2969,7 +2969,7 @@ STATIC int | |||
2969 | xfs_bmap_alloc( | 2969 | xfs_bmap_alloc( |
2970 | xfs_bmalloca_t *ap) /* bmap alloc argument struct */ | 2970 | xfs_bmalloca_t *ap) /* bmap alloc argument struct */ |
2971 | { | 2971 | { |
2972 | if ((ap->ip->i_d.di_flags & XFS_DIFLAG_REALTIME) && ap->userdata) | 2972 | if (XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata) |
2973 | return xfs_bmap_rtalloc(ap); | 2973 | return xfs_bmap_rtalloc(ap); |
2974 | return xfs_bmap_btalloc(ap); | 2974 | return xfs_bmap_btalloc(ap); |
2975 | } | 2975 | } |
@@ -3096,8 +3096,7 @@ xfs_bmap_del_extent( | |||
3096 | /* | 3096 | /* |
3097 | * Realtime allocation. Free it and record di_nblocks update. | 3097 | * Realtime allocation. Free it and record di_nblocks update. |
3098 | */ | 3098 | */ |
3099 | if (whichfork == XFS_DATA_FORK && | 3099 | if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) { |
3100 | (ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) { | ||
3101 | xfs_fsblock_t bno; | 3100 | xfs_fsblock_t bno; |
3102 | xfs_filblks_t len; | 3101 | xfs_filblks_t len; |
3103 | 3102 | ||
@@ -3956,7 +3955,6 @@ xfs_bmap_add_attrfork( | |||
3956 | xfs_bmap_free_t flist; /* freed extent records */ | 3955 | xfs_bmap_free_t flist; /* freed extent records */ |
3957 | xfs_mount_t *mp; /* mount structure */ | 3956 | xfs_mount_t *mp; /* mount structure */ |
3958 | xfs_trans_t *tp; /* transaction pointer */ | 3957 | xfs_trans_t *tp; /* transaction pointer */ |
3959 | unsigned long s; /* spinlock spl value */ | ||
3960 | int blks; /* space reservation */ | 3958 | int blks; /* space reservation */ |
3961 | int version = 1; /* superblock attr version */ | 3959 | int version = 1; /* superblock attr version */ |
3962 | int committed; /* xaction was committed */ | 3960 | int committed; /* xaction was committed */ |
@@ -4053,7 +4051,7 @@ xfs_bmap_add_attrfork( | |||
4053 | (!XFS_SB_VERSION_HASATTR2(&mp->m_sb) && version == 2)) { | 4051 | (!XFS_SB_VERSION_HASATTR2(&mp->m_sb) && version == 2)) { |
4054 | __int64_t sbfields = 0; | 4052 | __int64_t sbfields = 0; |
4055 | 4053 | ||
4056 | s = XFS_SB_LOCK(mp); | 4054 | spin_lock(&mp->m_sb_lock); |
4057 | if (!XFS_SB_VERSION_HASATTR(&mp->m_sb)) { | 4055 | if (!XFS_SB_VERSION_HASATTR(&mp->m_sb)) { |
4058 | XFS_SB_VERSION_ADDATTR(&mp->m_sb); | 4056 | XFS_SB_VERSION_ADDATTR(&mp->m_sb); |
4059 | sbfields |= XFS_SB_VERSIONNUM; | 4057 | sbfields |= XFS_SB_VERSIONNUM; |
@@ -4063,10 +4061,10 @@ xfs_bmap_add_attrfork( | |||
4063 | sbfields |= (XFS_SB_VERSIONNUM | XFS_SB_FEATURES2); | 4061 | sbfields |= (XFS_SB_VERSIONNUM | XFS_SB_FEATURES2); |
4064 | } | 4062 | } |
4065 | if (sbfields) { | 4063 | if (sbfields) { |
4066 | XFS_SB_UNLOCK(mp, s); | 4064 | spin_unlock(&mp->m_sb_lock); |
4067 | xfs_mod_sb(tp, sbfields); | 4065 | xfs_mod_sb(tp, sbfields); |
4068 | } else | 4066 | } else |
4069 | XFS_SB_UNLOCK(mp, s); | 4067 | spin_unlock(&mp->m_sb_lock); |
4070 | } | 4068 | } |
4071 | if ((error = xfs_bmap_finish(&tp, &flist, &committed))) | 4069 | if ((error = xfs_bmap_finish(&tp, &flist, &committed))) |
4072 | goto error2; | 4070 | goto error2; |
@@ -6394,7 +6392,7 @@ xfs_bmap_count_blocks( | |||
6394 | * Recursively walks each level of a btree | 6392 | * Recursively walks each level of a btree |
6395 | * to count total fsblocks is use. | 6393 | * to count total fsblocks is use. |
6396 | */ | 6394 | */ |
6397 | int /* error */ | 6395 | STATIC int /* error */ |
6398 | xfs_bmap_count_tree( | 6396 | xfs_bmap_count_tree( |
6399 | xfs_mount_t *mp, /* file system mount point */ | 6397 | xfs_mount_t *mp, /* file system mount point */ |
6400 | xfs_trans_t *tp, /* transaction pointer */ | 6398 | xfs_trans_t *tp, /* transaction pointer */ |
@@ -6470,7 +6468,7 @@ xfs_bmap_count_tree( | |||
6470 | /* | 6468 | /* |
6471 | * Count leaf blocks given a range of extent records. | 6469 | * Count leaf blocks given a range of extent records. |
6472 | */ | 6470 | */ |
6473 | int | 6471 | STATIC int |
6474 | xfs_bmap_count_leaves( | 6472 | xfs_bmap_count_leaves( |
6475 | xfs_ifork_t *ifp, | 6473 | xfs_ifork_t *ifp, |
6476 | xfs_extnum_t idx, | 6474 | xfs_extnum_t idx, |
@@ -6490,7 +6488,7 @@ xfs_bmap_count_leaves( | |||
6490 | * Count leaf blocks given a range of extent records originally | 6488 | * Count leaf blocks given a range of extent records originally |
6491 | * in btree format. | 6489 | * in btree format. |
6492 | */ | 6490 | */ |
6493 | int | 6491 | STATIC int |
6494 | xfs_bmap_disk_count_leaves( | 6492 | xfs_bmap_disk_count_leaves( |
6495 | xfs_extnum_t idx, | 6493 | xfs_extnum_t idx, |
6496 | xfs_bmbt_block_t *block, | 6494 | xfs_bmbt_block_t *block, |
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h index 68267d75ff19..87224b7d7984 100644 --- a/fs/xfs/xfs_bmap.h +++ b/fs/xfs/xfs_bmap.h | |||
@@ -25,6 +25,8 @@ struct xfs_inode; | |||
25 | struct xfs_mount; | 25 | struct xfs_mount; |
26 | struct xfs_trans; | 26 | struct xfs_trans; |
27 | 27 | ||
28 | extern kmem_zone_t *xfs_bmap_free_item_zone; | ||
29 | |||
28 | /* | 30 | /* |
29 | * DELTA: describe a change to the in-core extent list. | 31 | * DELTA: describe a change to the in-core extent list. |
30 | * | 32 | * |
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 32b49ec00fb5..bd18987326a3 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c | |||
@@ -631,7 +631,7 @@ xfs_bmbt_delrec( | |||
631 | memcpy(lrp, rrp, numrrecs * sizeof(*lrp)); | 631 | memcpy(lrp, rrp, numrrecs * sizeof(*lrp)); |
632 | xfs_bmbt_log_recs(cur, lbp, numlrecs + 1, numlrecs + numrrecs); | 632 | xfs_bmbt_log_recs(cur, lbp, numlrecs + 1, numlrecs + numrrecs); |
633 | } | 633 | } |
634 | be16_add(&left->bb_numrecs, numrrecs); | 634 | be16_add_cpu(&left->bb_numrecs, numrrecs); |
635 | left->bb_rightsib = right->bb_rightsib; | 635 | left->bb_rightsib = right->bb_rightsib; |
636 | xfs_bmbt_log_block(cur, lbp, XFS_BB_RIGHTSIB | XFS_BB_NUMRECS); | 636 | xfs_bmbt_log_block(cur, lbp, XFS_BB_RIGHTSIB | XFS_BB_NUMRECS); |
637 | if (be64_to_cpu(left->bb_rightsib) != NULLDFSBNO) { | 637 | if (be64_to_cpu(left->bb_rightsib) != NULLDFSBNO) { |
@@ -924,7 +924,7 @@ xfs_bmbt_killroot( | |||
924 | xfs_iroot_realloc(ip, i, cur->bc_private.b.whichfork); | 924 | xfs_iroot_realloc(ip, i, cur->bc_private.b.whichfork); |
925 | block = ifp->if_broot; | 925 | block = ifp->if_broot; |
926 | } | 926 | } |
927 | be16_add(&block->bb_numrecs, i); | 927 | be16_add_cpu(&block->bb_numrecs, i); |
928 | ASSERT(block->bb_numrecs == cblock->bb_numrecs); | 928 | ASSERT(block->bb_numrecs == cblock->bb_numrecs); |
929 | kp = XFS_BMAP_KEY_IADDR(block, 1, cur); | 929 | kp = XFS_BMAP_KEY_IADDR(block, 1, cur); |
930 | ckp = XFS_BMAP_KEY_IADDR(cblock, 1, cur); | 930 | ckp = XFS_BMAP_KEY_IADDR(cblock, 1, cur); |
@@ -947,7 +947,7 @@ xfs_bmbt_killroot( | |||
947 | XFS_TRANS_DQ_BCOUNT, -1L); | 947 | XFS_TRANS_DQ_BCOUNT, -1L); |
948 | xfs_trans_binval(cur->bc_tp, cbp); | 948 | xfs_trans_binval(cur->bc_tp, cbp); |
949 | cur->bc_bufs[level - 1] = NULL; | 949 | cur->bc_bufs[level - 1] = NULL; |
950 | be16_add(&block->bb_level, -1); | 950 | be16_add_cpu(&block->bb_level, -1); |
951 | xfs_trans_log_inode(cur->bc_tp, ip, | 951 | xfs_trans_log_inode(cur->bc_tp, ip, |
952 | XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork)); | 952 | XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork)); |
953 | cur->bc_nlevels--; | 953 | cur->bc_nlevels--; |
@@ -1401,9 +1401,9 @@ xfs_bmbt_rshift( | |||
1401 | key.br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(rrp)); | 1401 | key.br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(rrp)); |
1402 | rkp = &key; | 1402 | rkp = &key; |
1403 | } | 1403 | } |
1404 | be16_add(&left->bb_numrecs, -1); | 1404 | be16_add_cpu(&left->bb_numrecs, -1); |
1405 | xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS); | 1405 | xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS); |
1406 | be16_add(&right->bb_numrecs, 1); | 1406 | be16_add_cpu(&right->bb_numrecs, 1); |
1407 | #ifdef DEBUG | 1407 | #ifdef DEBUG |
1408 | if (level > 0) | 1408 | if (level > 0) |
1409 | xfs_btree_check_key(XFS_BTNUM_BMAP, rkp, rkp + 1); | 1409 | xfs_btree_check_key(XFS_BTNUM_BMAP, rkp, rkp + 1); |
@@ -1535,7 +1535,7 @@ xfs_bmbt_split( | |||
1535 | right->bb_numrecs = cpu_to_be16(be16_to_cpu(left->bb_numrecs) / 2); | 1535 | right->bb_numrecs = cpu_to_be16(be16_to_cpu(left->bb_numrecs) / 2); |
1536 | if ((be16_to_cpu(left->bb_numrecs) & 1) && | 1536 | if ((be16_to_cpu(left->bb_numrecs) & 1) && |
1537 | cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1) | 1537 | cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1) |
1538 | be16_add(&right->bb_numrecs, 1); | 1538 | be16_add_cpu(&right->bb_numrecs, 1); |
1539 | i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1; | 1539 | i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1; |
1540 | if (level > 0) { | 1540 | if (level > 0) { |
1541 | lkp = XFS_BMAP_KEY_IADDR(left, i, cur); | 1541 | lkp = XFS_BMAP_KEY_IADDR(left, i, cur); |
@@ -1562,7 +1562,7 @@ xfs_bmbt_split( | |||
1562 | xfs_bmbt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); | 1562 | xfs_bmbt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); |
1563 | *startoff = xfs_bmbt_disk_get_startoff(rrp); | 1563 | *startoff = xfs_bmbt_disk_get_startoff(rrp); |
1564 | } | 1564 | } |
1565 | be16_add(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs))); | 1565 | be16_add_cpu(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs))); |
1566 | right->bb_rightsib = left->bb_rightsib; | 1566 | right->bb_rightsib = left->bb_rightsib; |
1567 | left->bb_rightsib = cpu_to_be64(args.fsbno); | 1567 | left->bb_rightsib = cpu_to_be64(args.fsbno); |
1568 | right->bb_leftsib = cpu_to_be64(lbno); | 1568 | right->bb_leftsib = cpu_to_be64(lbno); |
@@ -2062,8 +2062,7 @@ xfs_bmbt_insert( | |||
2062 | pcur->bc_private.b.allocated; | 2062 | pcur->bc_private.b.allocated; |
2063 | pcur->bc_private.b.allocated = 0; | 2063 | pcur->bc_private.b.allocated = 0; |
2064 | ASSERT((cur->bc_private.b.firstblock != NULLFSBLOCK) || | 2064 | ASSERT((cur->bc_private.b.firstblock != NULLFSBLOCK) || |
2065 | (cur->bc_private.b.ip->i_d.di_flags & | 2065 | XFS_IS_REALTIME_INODE(cur->bc_private.b.ip)); |
2066 | XFS_DIFLAG_REALTIME)); | ||
2067 | cur->bc_private.b.firstblock = | 2066 | cur->bc_private.b.firstblock = |
2068 | pcur->bc_private.b.firstblock; | 2067 | pcur->bc_private.b.firstblock; |
2069 | ASSERT(cur->bc_private.b.flist == | 2068 | ASSERT(cur->bc_private.b.flist == |
@@ -2241,7 +2240,7 @@ xfs_bmbt_newroot( | |||
2241 | bp = xfs_btree_get_bufl(args.mp, cur->bc_tp, args.fsbno, 0); | 2240 | bp = xfs_btree_get_bufl(args.mp, cur->bc_tp, args.fsbno, 0); |
2242 | cblock = XFS_BUF_TO_BMBT_BLOCK(bp); | 2241 | cblock = XFS_BUF_TO_BMBT_BLOCK(bp); |
2243 | *cblock = *block; | 2242 | *cblock = *block; |
2244 | be16_add(&block->bb_level, 1); | 2243 | be16_add_cpu(&block->bb_level, 1); |
2245 | block->bb_numrecs = cpu_to_be16(1); | 2244 | block->bb_numrecs = cpu_to_be16(1); |
2246 | cur->bc_nlevels++; | 2245 | cur->bc_nlevels++; |
2247 | cur->bc_ptrs[level + 1] = 1; | 2246 | cur->bc_ptrs[level + 1] = 1; |
diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index 6e40a0a198ff..7440b78f9cec 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h | |||
@@ -24,6 +24,8 @@ struct xfs_inode; | |||
24 | struct xfs_mount; | 24 | struct xfs_mount; |
25 | struct xfs_trans; | 25 | struct xfs_trans; |
26 | 26 | ||
27 | extern kmem_zone_t *xfs_btree_cur_zone; | ||
28 | |||
27 | /* | 29 | /* |
28 | * This nonsense is to make -wlint happy. | 30 | * This nonsense is to make -wlint happy. |
29 | */ | 31 | */ |
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index c8f2c2886fe4..63debd147eb5 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c | |||
@@ -378,7 +378,6 @@ xfs_buf_item_unpin( | |||
378 | xfs_mount_t *mp; | 378 | xfs_mount_t *mp; |
379 | xfs_buf_t *bp; | 379 | xfs_buf_t *bp; |
380 | int freed; | 380 | int freed; |
381 | SPLDECL(s); | ||
382 | 381 | ||
383 | bp = bip->bli_buf; | 382 | bp = bip->bli_buf; |
384 | ASSERT(bp != NULL); | 383 | ASSERT(bp != NULL); |
@@ -409,8 +408,8 @@ xfs_buf_item_unpin( | |||
409 | XFS_BUF_SET_FSPRIVATE(bp, NULL); | 408 | XFS_BUF_SET_FSPRIVATE(bp, NULL); |
410 | XFS_BUF_CLR_IODONE_FUNC(bp); | 409 | XFS_BUF_CLR_IODONE_FUNC(bp); |
411 | } else { | 410 | } else { |
412 | AIL_LOCK(mp,s); | 411 | spin_lock(&mp->m_ail_lock); |
413 | xfs_trans_delete_ail(mp, (xfs_log_item_t *)bip, s); | 412 | xfs_trans_delete_ail(mp, (xfs_log_item_t *)bip); |
414 | xfs_buf_item_relse(bp); | 413 | xfs_buf_item_relse(bp); |
415 | ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL); | 414 | ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL); |
416 | } | 415 | } |
@@ -1113,7 +1112,6 @@ xfs_buf_iodone( | |||
1113 | xfs_buf_log_item_t *bip) | 1112 | xfs_buf_log_item_t *bip) |
1114 | { | 1113 | { |
1115 | struct xfs_mount *mp; | 1114 | struct xfs_mount *mp; |
1116 | SPLDECL(s); | ||
1117 | 1115 | ||
1118 | ASSERT(bip->bli_buf == bp); | 1116 | ASSERT(bip->bli_buf == bp); |
1119 | 1117 | ||
@@ -1128,11 +1126,11 @@ xfs_buf_iodone( | |||
1128 | * | 1126 | * |
1129 | * Either way, AIL is useless if we're forcing a shutdown. | 1127 | * Either way, AIL is useless if we're forcing a shutdown. |
1130 | */ | 1128 | */ |
1131 | AIL_LOCK(mp,s); | 1129 | spin_lock(&mp->m_ail_lock); |
1132 | /* | 1130 | /* |
1133 | * xfs_trans_delete_ail() drops the AIL lock. | 1131 | * xfs_trans_delete_ail() drops the AIL lock. |
1134 | */ | 1132 | */ |
1135 | xfs_trans_delete_ail(mp, (xfs_log_item_t *)bip, s); | 1133 | xfs_trans_delete_ail(mp, (xfs_log_item_t *)bip); |
1136 | 1134 | ||
1137 | #ifdef XFS_TRANS_DEBUG | 1135 | #ifdef XFS_TRANS_DEBUG |
1138 | kmem_free(bip->bli_orig, XFS_BUF_COUNT(bp)); | 1136 | kmem_free(bip->bli_orig, XFS_BUF_COUNT(bp)); |
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h index d7e136143066..5a41c348bb1c 100644 --- a/fs/xfs/xfs_buf_item.h +++ b/fs/xfs/xfs_buf_item.h | |||
@@ -18,6 +18,8 @@ | |||
18 | #ifndef __XFS_BUF_ITEM_H__ | 18 | #ifndef __XFS_BUF_ITEM_H__ |
19 | #define __XFS_BUF_ITEM_H__ | 19 | #define __XFS_BUF_ITEM_H__ |
20 | 20 | ||
21 | extern kmem_zone_t *xfs_buf_item_zone; | ||
22 | |||
21 | /* | 23 | /* |
22 | * This is the structure used to lay out a buf log item in the | 24 | * This is the structure used to lay out a buf log item in the |
23 | * log. The data map describes which 128 byte chunks of the buffer | 25 | * log. The data map describes which 128 byte chunks of the buffer |
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c index 26d09e2e1a7f..021a8f7e563f 100644 --- a/fs/xfs/xfs_da_btree.c +++ b/fs/xfs/xfs_da_btree.c | |||
@@ -511,12 +511,12 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, | |||
511 | * Move the req'd B-tree elements from high in node1 to | 511 | * Move the req'd B-tree elements from high in node1 to |
512 | * low in node2. | 512 | * low in node2. |
513 | */ | 513 | */ |
514 | be16_add(&node2->hdr.count, count); | 514 | be16_add_cpu(&node2->hdr.count, count); |
515 | tmp = count * (uint)sizeof(xfs_da_node_entry_t); | 515 | tmp = count * (uint)sizeof(xfs_da_node_entry_t); |
516 | btree_s = &node1->btree[be16_to_cpu(node1->hdr.count) - count]; | 516 | btree_s = &node1->btree[be16_to_cpu(node1->hdr.count) - count]; |
517 | btree_d = &node2->btree[0]; | 517 | btree_d = &node2->btree[0]; |
518 | memcpy(btree_d, btree_s, tmp); | 518 | memcpy(btree_d, btree_s, tmp); |
519 | be16_add(&node1->hdr.count, -count); | 519 | be16_add_cpu(&node1->hdr.count, -count); |
520 | } else { | 520 | } else { |
521 | /* | 521 | /* |
522 | * Move the req'd B-tree elements from low in node2 to | 522 | * Move the req'd B-tree elements from low in node2 to |
@@ -527,7 +527,7 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, | |||
527 | btree_s = &node2->btree[0]; | 527 | btree_s = &node2->btree[0]; |
528 | btree_d = &node1->btree[be16_to_cpu(node1->hdr.count)]; | 528 | btree_d = &node1->btree[be16_to_cpu(node1->hdr.count)]; |
529 | memcpy(btree_d, btree_s, tmp); | 529 | memcpy(btree_d, btree_s, tmp); |
530 | be16_add(&node1->hdr.count, count); | 530 | be16_add_cpu(&node1->hdr.count, count); |
531 | xfs_da_log_buf(tp, blk1->bp, | 531 | xfs_da_log_buf(tp, blk1->bp, |
532 | XFS_DA_LOGRANGE(node1, btree_d, tmp)); | 532 | XFS_DA_LOGRANGE(node1, btree_d, tmp)); |
533 | 533 | ||
@@ -539,7 +539,7 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, | |||
539 | btree_s = &node2->btree[count]; | 539 | btree_s = &node2->btree[count]; |
540 | btree_d = &node2->btree[0]; | 540 | btree_d = &node2->btree[0]; |
541 | memmove(btree_d, btree_s, tmp); | 541 | memmove(btree_d, btree_s, tmp); |
542 | be16_add(&node2->hdr.count, -count); | 542 | be16_add_cpu(&node2->hdr.count, -count); |
543 | } | 543 | } |
544 | 544 | ||
545 | /* | 545 | /* |
@@ -604,7 +604,7 @@ xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk, | |||
604 | btree->before = cpu_to_be32(newblk->blkno); | 604 | btree->before = cpu_to_be32(newblk->blkno); |
605 | xfs_da_log_buf(state->args->trans, oldblk->bp, | 605 | xfs_da_log_buf(state->args->trans, oldblk->bp, |
606 | XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree))); | 606 | XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree))); |
607 | be16_add(&node->hdr.count, 1); | 607 | be16_add_cpu(&node->hdr.count, 1); |
608 | xfs_da_log_buf(state->args->trans, oldblk->bp, | 608 | xfs_da_log_buf(state->args->trans, oldblk->bp, |
609 | XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr))); | 609 | XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr))); |
610 | 610 | ||
@@ -959,7 +959,7 @@ xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk) | |||
959 | memset((char *)btree, 0, sizeof(xfs_da_node_entry_t)); | 959 | memset((char *)btree, 0, sizeof(xfs_da_node_entry_t)); |
960 | xfs_da_log_buf(state->args->trans, drop_blk->bp, | 960 | xfs_da_log_buf(state->args->trans, drop_blk->bp, |
961 | XFS_DA_LOGRANGE(node, btree, sizeof(*btree))); | 961 | XFS_DA_LOGRANGE(node, btree, sizeof(*btree))); |
962 | be16_add(&node->hdr.count, -1); | 962 | be16_add_cpu(&node->hdr.count, -1); |
963 | xfs_da_log_buf(state->args->trans, drop_blk->bp, | 963 | xfs_da_log_buf(state->args->trans, drop_blk->bp, |
964 | XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr))); | 964 | XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr))); |
965 | 965 | ||
@@ -1018,7 +1018,7 @@ xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, | |||
1018 | */ | 1018 | */ |
1019 | tmp = be16_to_cpu(drop_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t); | 1019 | tmp = be16_to_cpu(drop_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t); |
1020 | memcpy(btree, &drop_node->btree[0], tmp); | 1020 | memcpy(btree, &drop_node->btree[0], tmp); |
1021 | be16_add(&save_node->hdr.count, be16_to_cpu(drop_node->hdr.count)); | 1021 | be16_add_cpu(&save_node->hdr.count, be16_to_cpu(drop_node->hdr.count)); |
1022 | 1022 | ||
1023 | xfs_da_log_buf(tp, save_blk->bp, | 1023 | xfs_da_log_buf(tp, save_blk->bp, |
1024 | XFS_DA_LOGRANGE(save_node, &save_node->hdr, | 1024 | XFS_DA_LOGRANGE(save_node, &save_node->hdr, |
@@ -2218,7 +2218,7 @@ xfs_da_state_free(xfs_da_state_t *state) | |||
2218 | 2218 | ||
2219 | #ifdef XFS_DABUF_DEBUG | 2219 | #ifdef XFS_DABUF_DEBUG |
2220 | xfs_dabuf_t *xfs_dabuf_global_list; | 2220 | xfs_dabuf_t *xfs_dabuf_global_list; |
2221 | lock_t xfs_dabuf_global_lock; | 2221 | spinlock_t xfs_dabuf_global_lock; |
2222 | #endif | 2222 | #endif |
2223 | 2223 | ||
2224 | /* | 2224 | /* |
@@ -2264,10 +2264,9 @@ xfs_da_buf_make(int nbuf, xfs_buf_t **bps, inst_t *ra) | |||
2264 | } | 2264 | } |
2265 | #ifdef XFS_DABUF_DEBUG | 2265 | #ifdef XFS_DABUF_DEBUG |
2266 | { | 2266 | { |
2267 | SPLDECL(s); | ||
2268 | xfs_dabuf_t *p; | 2267 | xfs_dabuf_t *p; |
2269 | 2268 | ||
2270 | s = mutex_spinlock(&xfs_dabuf_global_lock); | 2269 | spin_lock(&xfs_dabuf_global_lock); |
2271 | for (p = xfs_dabuf_global_list; p; p = p->next) { | 2270 | for (p = xfs_dabuf_global_list; p; p = p->next) { |
2272 | ASSERT(p->blkno != dabuf->blkno || | 2271 | ASSERT(p->blkno != dabuf->blkno || |
2273 | p->target != dabuf->target); | 2272 | p->target != dabuf->target); |
@@ -2277,7 +2276,7 @@ xfs_da_buf_make(int nbuf, xfs_buf_t **bps, inst_t *ra) | |||
2277 | xfs_dabuf_global_list->prev = dabuf; | 2276 | xfs_dabuf_global_list->prev = dabuf; |
2278 | dabuf->next = xfs_dabuf_global_list; | 2277 | dabuf->next = xfs_dabuf_global_list; |
2279 | xfs_dabuf_global_list = dabuf; | 2278 | xfs_dabuf_global_list = dabuf; |
2280 | mutex_spinunlock(&xfs_dabuf_global_lock, s); | 2279 | spin_unlock(&xfs_dabuf_global_lock); |
2281 | } | 2280 | } |
2282 | #endif | 2281 | #endif |
2283 | return dabuf; | 2282 | return dabuf; |
@@ -2319,16 +2318,14 @@ xfs_da_buf_done(xfs_dabuf_t *dabuf) | |||
2319 | kmem_free(dabuf->data, BBTOB(dabuf->bbcount)); | 2318 | kmem_free(dabuf->data, BBTOB(dabuf->bbcount)); |
2320 | #ifdef XFS_DABUF_DEBUG | 2319 | #ifdef XFS_DABUF_DEBUG |
2321 | { | 2320 | { |
2322 | SPLDECL(s); | 2321 | spin_lock(&xfs_dabuf_global_lock); |
2323 | |||
2324 | s = mutex_spinlock(&xfs_dabuf_global_lock); | ||
2325 | if (dabuf->prev) | 2322 | if (dabuf->prev) |
2326 | dabuf->prev->next = dabuf->next; | 2323 | dabuf->prev->next = dabuf->next; |
2327 | else | 2324 | else |
2328 | xfs_dabuf_global_list = dabuf->next; | 2325 | xfs_dabuf_global_list = dabuf->next; |
2329 | if (dabuf->next) | 2326 | if (dabuf->next) |
2330 | dabuf->next->prev = dabuf->prev; | 2327 | dabuf->next->prev = dabuf->prev; |
2331 | mutex_spinunlock(&xfs_dabuf_global_lock, s); | 2328 | spin_unlock(&xfs_dabuf_global_lock); |
2332 | } | 2329 | } |
2333 | memset(dabuf, 0, XFS_DA_BUF_SIZE(dabuf->nbuf)); | 2330 | memset(dabuf, 0, XFS_DA_BUF_SIZE(dabuf->nbuf)); |
2334 | #endif | 2331 | #endif |
diff --git a/fs/xfs/xfs_da_btree.h b/fs/xfs/xfs_da_btree.h index 44dabf02f2a3..7facf86f74f9 100644 --- a/fs/xfs/xfs_da_btree.h +++ b/fs/xfs/xfs_da_btree.h | |||
@@ -260,6 +260,7 @@ void xfs_da_binval(struct xfs_trans *tp, xfs_dabuf_t *dabuf); | |||
260 | xfs_daddr_t xfs_da_blkno(xfs_dabuf_t *dabuf); | 260 | xfs_daddr_t xfs_da_blkno(xfs_dabuf_t *dabuf); |
261 | 261 | ||
262 | extern struct kmem_zone *xfs_da_state_zone; | 262 | extern struct kmem_zone *xfs_da_state_zone; |
263 | extern struct kmem_zone *xfs_dabuf_zone; | ||
263 | #endif /* __KERNEL__ */ | 264 | #endif /* __KERNEL__ */ |
264 | 265 | ||
265 | #endif /* __XFS_DA_BTREE_H__ */ | 266 | #endif /* __XFS_DA_BTREE_H__ */ |
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c index 584f1ae85cd9..3f53fad356a3 100644 --- a/fs/xfs/xfs_dfrag.c +++ b/fs/xfs/xfs_dfrag.c | |||
@@ -52,76 +52,72 @@ xfs_swapext( | |||
52 | xfs_swapext_t __user *sxu) | 52 | xfs_swapext_t __user *sxu) |
53 | { | 53 | { |
54 | xfs_swapext_t *sxp; | 54 | xfs_swapext_t *sxp; |
55 | xfs_inode_t *ip=NULL, *tip=NULL; | 55 | xfs_inode_t *ip, *tip; |
56 | xfs_mount_t *mp; | 56 | struct file *file, *target_file; |
57 | struct file *fp = NULL, *tfp = NULL; | ||
58 | bhv_vnode_t *vp, *tvp; | ||
59 | int error = 0; | 57 | int error = 0; |
60 | 58 | ||
61 | sxp = kmem_alloc(sizeof(xfs_swapext_t), KM_MAYFAIL); | 59 | sxp = kmem_alloc(sizeof(xfs_swapext_t), KM_MAYFAIL); |
62 | if (!sxp) { | 60 | if (!sxp) { |
63 | error = XFS_ERROR(ENOMEM); | 61 | error = XFS_ERROR(ENOMEM); |
64 | goto error0; | 62 | goto out; |
65 | } | 63 | } |
66 | 64 | ||
67 | if (copy_from_user(sxp, sxu, sizeof(xfs_swapext_t))) { | 65 | if (copy_from_user(sxp, sxu, sizeof(xfs_swapext_t))) { |
68 | error = XFS_ERROR(EFAULT); | 66 | error = XFS_ERROR(EFAULT); |
69 | goto error0; | 67 | goto out_free_sxp; |
70 | } | 68 | } |
71 | 69 | ||
72 | /* Pull information for the target fd */ | 70 | /* Pull information for the target fd */ |
73 | if (((fp = fget((int)sxp->sx_fdtarget)) == NULL) || | 71 | file = fget((int)sxp->sx_fdtarget); |
74 | ((vp = vn_from_inode(fp->f_path.dentry->d_inode)) == NULL)) { | 72 | if (!file) { |
75 | error = XFS_ERROR(EINVAL); | 73 | error = XFS_ERROR(EINVAL); |
76 | goto error0; | 74 | goto out_free_sxp; |
77 | } | 75 | } |
78 | 76 | ||
79 | ip = xfs_vtoi(vp); | 77 | if (!(file->f_mode & FMODE_WRITE) || (file->f_flags & O_APPEND)) { |
80 | if (ip == NULL) { | ||
81 | error = XFS_ERROR(EBADF); | 78 | error = XFS_ERROR(EBADF); |
82 | goto error0; | 79 | goto out_put_file; |
83 | } | 80 | } |
84 | 81 | ||
85 | if (((tfp = fget((int)sxp->sx_fdtmp)) == NULL) || | 82 | target_file = fget((int)sxp->sx_fdtmp); |
86 | ((tvp = vn_from_inode(tfp->f_path.dentry->d_inode)) == NULL)) { | 83 | if (!target_file) { |
87 | error = XFS_ERROR(EINVAL); | 84 | error = XFS_ERROR(EINVAL); |
88 | goto error0; | 85 | goto out_put_file; |
89 | } | 86 | } |
90 | 87 | ||
91 | tip = xfs_vtoi(tvp); | 88 | if (!(target_file->f_mode & FMODE_WRITE) || |
92 | if (tip == NULL) { | 89 | (target_file->f_flags & O_APPEND)) { |
93 | error = XFS_ERROR(EBADF); | 90 | error = XFS_ERROR(EBADF); |
94 | goto error0; | 91 | goto out_put_target_file; |
95 | } | 92 | } |
96 | 93 | ||
94 | ip = XFS_I(file->f_path.dentry->d_inode); | ||
95 | tip = XFS_I(target_file->f_path.dentry->d_inode); | ||
96 | |||
97 | if (ip->i_mount != tip->i_mount) { | 97 | if (ip->i_mount != tip->i_mount) { |
98 | error = XFS_ERROR(EINVAL); | 98 | error = XFS_ERROR(EINVAL); |
99 | goto error0; | 99 | goto out_put_target_file; |
100 | } | 100 | } |
101 | 101 | ||
102 | if (ip->i_ino == tip->i_ino) { | 102 | if (ip->i_ino == tip->i_ino) { |
103 | error = XFS_ERROR(EINVAL); | 103 | error = XFS_ERROR(EINVAL); |
104 | goto error0; | 104 | goto out_put_target_file; |
105 | } | 105 | } |
106 | 106 | ||
107 | mp = ip->i_mount; | 107 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { |
108 | 108 | error = XFS_ERROR(EIO); | |
109 | if (XFS_FORCED_SHUTDOWN(mp)) { | 109 | goto out_put_target_file; |
110 | error = XFS_ERROR(EIO); | ||
111 | goto error0; | ||
112 | } | 110 | } |
113 | 111 | ||
114 | error = XFS_SWAP_EXTENTS(mp, &ip->i_iocore, &tip->i_iocore, sxp); | 112 | error = xfs_swap_extents(ip, tip, sxp); |
115 | |||
116 | error0: | ||
117 | if (fp != NULL) | ||
118 | fput(fp); | ||
119 | if (tfp != NULL) | ||
120 | fput(tfp); | ||
121 | |||
122 | if (sxp != NULL) | ||
123 | kmem_free(sxp, sizeof(xfs_swapext_t)); | ||
124 | 113 | ||
114 | out_put_target_file: | ||
115 | fput(target_file); | ||
116 | out_put_file: | ||
117 | fput(file); | ||
118 | out_free_sxp: | ||
119 | kmem_free(sxp, sizeof(xfs_swapext_t)); | ||
120 | out: | ||
125 | return error; | 121 | return error; |
126 | } | 122 | } |
127 | 123 | ||
@@ -169,15 +165,6 @@ xfs_swap_extents( | |||
169 | xfs_lock_inodes(ips, 2, 0, lock_flags); | 165 | xfs_lock_inodes(ips, 2, 0, lock_flags); |
170 | locked = 1; | 166 | locked = 1; |
171 | 167 | ||
172 | /* Check permissions */ | ||
173 | error = xfs_iaccess(ip, S_IWUSR, NULL); | ||
174 | if (error) | ||
175 | goto error0; | ||
176 | |||
177 | error = xfs_iaccess(tip, S_IWUSR, NULL); | ||
178 | if (error) | ||
179 | goto error0; | ||
180 | |||
181 | /* Verify that both files have the same format */ | 168 | /* Verify that both files have the same format */ |
182 | if ((ip->i_d.di_mode & S_IFMT) != (tip->i_d.di_mode & S_IFMT)) { | 169 | if ((ip->i_d.di_mode & S_IFMT) != (tip->i_d.di_mode & S_IFMT)) { |
183 | error = XFS_ERROR(EINVAL); | 170 | error = XFS_ERROR(EINVAL); |
@@ -185,8 +172,7 @@ xfs_swap_extents( | |||
185 | } | 172 | } |
186 | 173 | ||
187 | /* Verify both files are either real-time or non-realtime */ | 174 | /* Verify both files are either real-time or non-realtime */ |
188 | if ((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != | 175 | if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) { |
189 | (tip->i_d.di_flags & XFS_DIFLAG_REALTIME)) { | ||
190 | error = XFS_ERROR(EINVAL); | 176 | error = XFS_ERROR(EINVAL); |
191 | goto error0; | 177 | goto error0; |
192 | } | 178 | } |
@@ -199,7 +185,7 @@ xfs_swap_extents( | |||
199 | } | 185 | } |
200 | 186 | ||
201 | if (VN_CACHED(tvp) != 0) { | 187 | if (VN_CACHED(tvp) != 0) { |
202 | xfs_inval_cached_trace(&tip->i_iocore, 0, -1, 0, -1); | 188 | xfs_inval_cached_trace(tip, 0, -1, 0, -1); |
203 | error = xfs_flushinval_pages(tip, 0, -1, | 189 | error = xfs_flushinval_pages(tip, 0, -1, |
204 | FI_REMAPF_LOCKED); | 190 | FI_REMAPF_LOCKED); |
205 | if (error) | 191 | if (error) |
diff --git a/fs/xfs/xfs_dinode.h b/fs/xfs/xfs_dinode.h index dedd713574e1..c9065eaf2a4d 100644 --- a/fs/xfs/xfs_dinode.h +++ b/fs/xfs/xfs_dinode.h | |||
@@ -171,69 +171,35 @@ typedef enum xfs_dinode_fmt | |||
171 | /* | 171 | /* |
172 | * Inode data & attribute fork sizes, per inode. | 172 | * Inode data & attribute fork sizes, per inode. |
173 | */ | 173 | */ |
174 | #define XFS_CFORK_Q(dcp) ((dcp)->di_forkoff != 0) | 174 | #define XFS_DFORK_Q(dip) ((dip)->di_core.di_forkoff != 0) |
175 | #define XFS_CFORK_Q_DISK(dcp) ((dcp)->di_forkoff != 0) | 175 | #define XFS_DFORK_BOFF(dip) ((int)((dip)->di_core.di_forkoff << 3)) |
176 | |||
177 | #define XFS_CFORK_BOFF(dcp) ((int)((dcp)->di_forkoff << 3)) | ||
178 | #define XFS_CFORK_BOFF_DISK(dcp) ((int)((dcp)->di_forkoff << 3)) | ||
179 | |||
180 | #define XFS_CFORK_DSIZE_DISK(dcp,mp) \ | ||
181 | (XFS_CFORK_Q_DISK(dcp) ? XFS_CFORK_BOFF_DISK(dcp) : XFS_LITINO(mp)) | ||
182 | #define XFS_CFORK_DSIZE(dcp,mp) \ | ||
183 | (XFS_CFORK_Q(dcp) ? XFS_CFORK_BOFF(dcp) : XFS_LITINO(mp)) | ||
184 | |||
185 | #define XFS_CFORK_ASIZE_DISK(dcp,mp) \ | ||
186 | (XFS_CFORK_Q_DISK(dcp) ? XFS_LITINO(mp) - XFS_CFORK_BOFF_DISK(dcp) : 0) | ||
187 | #define XFS_CFORK_ASIZE(dcp,mp) \ | ||
188 | (XFS_CFORK_Q(dcp) ? XFS_LITINO(mp) - XFS_CFORK_BOFF(dcp) : 0) | ||
189 | |||
190 | #define XFS_CFORK_SIZE_DISK(dcp,mp,w) \ | ||
191 | ((w) == XFS_DATA_FORK ? \ | ||
192 | XFS_CFORK_DSIZE_DISK(dcp, mp) : \ | ||
193 | XFS_CFORK_ASIZE_DISK(dcp, mp)) | ||
194 | #define XFS_CFORK_SIZE(dcp,mp,w) \ | ||
195 | ((w) == XFS_DATA_FORK ? \ | ||
196 | XFS_CFORK_DSIZE(dcp, mp) : XFS_CFORK_ASIZE(dcp, mp)) | ||
197 | 176 | ||
198 | #define XFS_DFORK_DSIZE(dip,mp) \ | 177 | #define XFS_DFORK_DSIZE(dip,mp) \ |
199 | XFS_CFORK_DSIZE_DISK(&(dip)->di_core, mp) | 178 | (XFS_DFORK_Q(dip) ? \ |
200 | #define XFS_DFORK_DSIZE_HOST(dip,mp) \ | 179 | XFS_DFORK_BOFF(dip) : \ |
201 | XFS_CFORK_DSIZE(&(dip)->di_core, mp) | 180 | XFS_LITINO(mp)) |
202 | #define XFS_DFORK_ASIZE(dip,mp) \ | 181 | #define XFS_DFORK_ASIZE(dip,mp) \ |
203 | XFS_CFORK_ASIZE_DISK(&(dip)->di_core, mp) | 182 | (XFS_DFORK_Q(dip) ? \ |
204 | #define XFS_DFORK_ASIZE_HOST(dip,mp) \ | 183 | XFS_LITINO(mp) - XFS_DFORK_BOFF(dip) : \ |
205 | XFS_CFORK_ASIZE(&(dip)->di_core, mp) | 184 | 0) |
206 | #define XFS_DFORK_SIZE(dip,mp,w) \ | 185 | #define XFS_DFORK_SIZE(dip,mp,w) \ |
207 | XFS_CFORK_SIZE_DISK(&(dip)->di_core, mp, w) | 186 | ((w) == XFS_DATA_FORK ? \ |
208 | #define XFS_DFORK_SIZE_HOST(dip,mp,w) \ | 187 | XFS_DFORK_DSIZE(dip, mp) : \ |
209 | XFS_CFORK_SIZE(&(dip)->di_core, mp, w) | 188 | XFS_DFORK_ASIZE(dip, mp)) |
210 | 189 | ||
211 | #define XFS_DFORK_Q(dip) XFS_CFORK_Q_DISK(&(dip)->di_core) | 190 | #define XFS_DFORK_DPTR(dip) ((dip)->di_u.di_c) |
212 | #define XFS_DFORK_BOFF(dip) XFS_CFORK_BOFF_DISK(&(dip)->di_core) | 191 | #define XFS_DFORK_APTR(dip) \ |
213 | #define XFS_DFORK_DPTR(dip) ((dip)->di_u.di_c) | ||
214 | #define XFS_DFORK_APTR(dip) \ | ||
215 | ((dip)->di_u.di_c + XFS_DFORK_BOFF(dip)) | 192 | ((dip)->di_u.di_c + XFS_DFORK_BOFF(dip)) |
216 | #define XFS_DFORK_PTR(dip,w) \ | 193 | #define XFS_DFORK_PTR(dip,w) \ |
217 | ((w) == XFS_DATA_FORK ? XFS_DFORK_DPTR(dip) : XFS_DFORK_APTR(dip)) | 194 | ((w) == XFS_DATA_FORK ? XFS_DFORK_DPTR(dip) : XFS_DFORK_APTR(dip)) |
218 | #define XFS_CFORK_FORMAT(dcp,w) \ | 195 | #define XFS_DFORK_FORMAT(dip,w) \ |
219 | ((w) == XFS_DATA_FORK ? (dcp)->di_format : (dcp)->di_aformat) | ||
220 | #define XFS_CFORK_FMT_SET(dcp,w,n) \ | ||
221 | ((w) == XFS_DATA_FORK ? \ | 196 | ((w) == XFS_DATA_FORK ? \ |
222 | ((dcp)->di_format = (n)) : ((dcp)->di_aformat = (n))) | 197 | (dip)->di_core.di_format : \ |
223 | #define XFS_DFORK_FORMAT(dip,w) XFS_CFORK_FORMAT(&(dip)->di_core, w) | 198 | (dip)->di_core.di_aformat) |
224 | 199 | #define XFS_DFORK_NEXTENTS(dip,w) \ | |
225 | #define XFS_CFORK_NEXTENTS_DISK(dcp,w) \ | ||
226 | ((w) == XFS_DATA_FORK ? \ | 200 | ((w) == XFS_DATA_FORK ? \ |
227 | be32_to_cpu((dcp)->di_nextents) : \ | 201 | be32_to_cpu((dip)->di_core.di_nextents) : \ |
228 | be16_to_cpu((dcp)->di_anextents)) | 202 | be16_to_cpu((dip)->di_core.di_anextents)) |
229 | #define XFS_CFORK_NEXTENTS(dcp,w) \ | ||
230 | ((w) == XFS_DATA_FORK ? (dcp)->di_nextents : (dcp)->di_anextents) | ||
231 | #define XFS_DFORK_NEXTENTS(dip,w) XFS_CFORK_NEXTENTS_DISK(&(dip)->di_core, w) | ||
232 | #define XFS_DFORK_NEXTENTS_HOST(dip,w) XFS_CFORK_NEXTENTS(&(dip)->di_core, w) | ||
233 | |||
234 | #define XFS_CFORK_NEXT_SET(dcp,w,n) \ | ||
235 | ((w) == XFS_DATA_FORK ? \ | ||
236 | ((dcp)->di_nextents = (n)) : ((dcp)->di_anextents = (n))) | ||
237 | 203 | ||
238 | #define XFS_BUF_TO_DINODE(bp) ((xfs_dinode_t *)XFS_BUF_PTR(bp)) | 204 | #define XFS_BUF_TO_DINODE(bp) ((xfs_dinode_t *)XFS_BUF_PTR(bp)) |
239 | 205 | ||
@@ -273,6 +239,12 @@ typedef enum xfs_dinode_fmt | |||
273 | #define XFS_DIFLAG_NODEFRAG (1 << XFS_DIFLAG_NODEFRAG_BIT) | 239 | #define XFS_DIFLAG_NODEFRAG (1 << XFS_DIFLAG_NODEFRAG_BIT) |
274 | #define XFS_DIFLAG_FILESTREAM (1 << XFS_DIFLAG_FILESTREAM_BIT) | 240 | #define XFS_DIFLAG_FILESTREAM (1 << XFS_DIFLAG_FILESTREAM_BIT) |
275 | 241 | ||
242 | #ifdef CONFIG_XFS_RT | ||
243 | #define XFS_IS_REALTIME_INODE(ip) ((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME) | ||
244 | #else | ||
245 | #define XFS_IS_REALTIME_INODE(ip) (0) | ||
246 | #endif | ||
247 | |||
276 | #define XFS_DIFLAG_ANY \ | 248 | #define XFS_DIFLAG_ANY \ |
277 | (XFS_DIFLAG_REALTIME | XFS_DIFLAG_PREALLOC | XFS_DIFLAG_NEWRTBM | \ | 249 | (XFS_DIFLAG_REALTIME | XFS_DIFLAG_PREALLOC | XFS_DIFLAG_NEWRTBM | \ |
278 | XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND | XFS_DIFLAG_SYNC | \ | 250 | XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND | XFS_DIFLAG_SYNC | \ |
diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c index b0f1ee8fcb90..be7c4251fa61 100644 --- a/fs/xfs/xfs_dir2.c +++ b/fs/xfs/xfs_dir2.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include "xfs_dir2_node.h" | 42 | #include "xfs_dir2_node.h" |
43 | #include "xfs_dir2_trace.h" | 43 | #include "xfs_dir2_trace.h" |
44 | #include "xfs_error.h" | 44 | #include "xfs_error.h" |
45 | #include "xfs_vnodeops.h" | ||
45 | 46 | ||
46 | 47 | ||
47 | void | 48 | void |
@@ -301,7 +302,7 @@ xfs_readdir( | |||
301 | int rval; /* return value */ | 302 | int rval; /* return value */ |
302 | int v; /* type-checking value */ | 303 | int v; /* type-checking value */ |
303 | 304 | ||
304 | vn_trace_entry(dp, __FUNCTION__, (inst_t *)__return_address); | 305 | xfs_itrace_entry(dp); |
305 | 306 | ||
306 | if (XFS_FORCED_SHUTDOWN(dp->i_mount)) | 307 | if (XFS_FORCED_SHUTDOWN(dp->i_mount)) |
307 | return XFS_ERROR(EIO); | 308 | return XFS_ERROR(EIO); |
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c index a5f4f4fb8868..fb5a556725b3 100644 --- a/fs/xfs/xfs_dir2_block.c +++ b/fs/xfs/xfs_dir2_block.c | |||
@@ -271,7 +271,7 @@ xfs_dir2_block_addname( | |||
271 | } | 271 | } |
272 | lfloglow = toidx + 1 - (be32_to_cpu(btp->stale) - 1); | 272 | lfloglow = toidx + 1 - (be32_to_cpu(btp->stale) - 1); |
273 | lfloghigh -= be32_to_cpu(btp->stale) - 1; | 273 | lfloghigh -= be32_to_cpu(btp->stale) - 1; |
274 | be32_add(&btp->count, -(be32_to_cpu(btp->stale) - 1)); | 274 | be32_add_cpu(&btp->count, -(be32_to_cpu(btp->stale) - 1)); |
275 | xfs_dir2_data_make_free(tp, bp, | 275 | xfs_dir2_data_make_free(tp, bp, |
276 | (xfs_dir2_data_aoff_t)((char *)blp - (char *)block), | 276 | (xfs_dir2_data_aoff_t)((char *)blp - (char *)block), |
277 | (xfs_dir2_data_aoff_t)((be32_to_cpu(btp->stale) - 1) * sizeof(*blp)), | 277 | (xfs_dir2_data_aoff_t)((be32_to_cpu(btp->stale) - 1) * sizeof(*blp)), |
@@ -326,7 +326,7 @@ xfs_dir2_block_addname( | |||
326 | /* | 326 | /* |
327 | * Update the tail (entry count). | 327 | * Update the tail (entry count). |
328 | */ | 328 | */ |
329 | be32_add(&btp->count, 1); | 329 | be32_add_cpu(&btp->count, 1); |
330 | /* | 330 | /* |
331 | * If we now need to rebuild the bestfree map, do so. | 331 | * If we now need to rebuild the bestfree map, do so. |
332 | * This needs to happen before the next call to use_free. | 332 | * This needs to happen before the next call to use_free. |
@@ -387,7 +387,7 @@ xfs_dir2_block_addname( | |||
387 | lfloglow = MIN(mid, lfloglow); | 387 | lfloglow = MIN(mid, lfloglow); |
388 | lfloghigh = MAX(highstale, lfloghigh); | 388 | lfloghigh = MAX(highstale, lfloghigh); |
389 | } | 389 | } |
390 | be32_add(&btp->stale, -1); | 390 | be32_add_cpu(&btp->stale, -1); |
391 | } | 391 | } |
392 | /* | 392 | /* |
393 | * Point to the new data entry. | 393 | * Point to the new data entry. |
@@ -767,7 +767,7 @@ xfs_dir2_block_removename( | |||
767 | /* | 767 | /* |
768 | * Fix up the block tail. | 768 | * Fix up the block tail. |
769 | */ | 769 | */ |
770 | be32_add(&btp->stale, 1); | 770 | be32_add_cpu(&btp->stale, 1); |
771 | xfs_dir2_block_log_tail(tp, bp); | 771 | xfs_dir2_block_log_tail(tp, bp); |
772 | /* | 772 | /* |
773 | * Remove the leaf entry by marking it stale. | 773 | * Remove the leaf entry by marking it stale. |
diff --git a/fs/xfs/xfs_dir2_data.c b/fs/xfs/xfs_dir2_data.c index d2452699e9b1..fb8c9e08b23d 100644 --- a/fs/xfs/xfs_dir2_data.c +++ b/fs/xfs/xfs_dir2_data.c | |||
@@ -587,7 +587,7 @@ xfs_dir2_data_make_free( | |||
587 | /* | 587 | /* |
588 | * Fix up the new big freespace. | 588 | * Fix up the new big freespace. |
589 | */ | 589 | */ |
590 | be16_add(&prevdup->length, len + be16_to_cpu(postdup->length)); | 590 | be16_add_cpu(&prevdup->length, len + be16_to_cpu(postdup->length)); |
591 | *xfs_dir2_data_unused_tag_p(prevdup) = | 591 | *xfs_dir2_data_unused_tag_p(prevdup) = |
592 | cpu_to_be16((char *)prevdup - (char *)d); | 592 | cpu_to_be16((char *)prevdup - (char *)d); |
593 | xfs_dir2_data_log_unused(tp, bp, prevdup); | 593 | xfs_dir2_data_log_unused(tp, bp, prevdup); |
@@ -621,7 +621,7 @@ xfs_dir2_data_make_free( | |||
621 | */ | 621 | */ |
622 | else if (prevdup) { | 622 | else if (prevdup) { |
623 | dfp = xfs_dir2_data_freefind(d, prevdup); | 623 | dfp = xfs_dir2_data_freefind(d, prevdup); |
624 | be16_add(&prevdup->length, len); | 624 | be16_add_cpu(&prevdup->length, len); |
625 | *xfs_dir2_data_unused_tag_p(prevdup) = | 625 | *xfs_dir2_data_unused_tag_p(prevdup) = |
626 | cpu_to_be16((char *)prevdup - (char *)d); | 626 | cpu_to_be16((char *)prevdup - (char *)d); |
627 | xfs_dir2_data_log_unused(tp, bp, prevdup); | 627 | xfs_dir2_data_log_unused(tp, bp, prevdup); |
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c index 0ca0020ba09f..bc52b803d79b 100644 --- a/fs/xfs/xfs_dir2_leaf.c +++ b/fs/xfs/xfs_dir2_leaf.c | |||
@@ -359,7 +359,7 @@ xfs_dir2_leaf_addname( | |||
359 | bestsp--; | 359 | bestsp--; |
360 | memmove(&bestsp[0], &bestsp[1], | 360 | memmove(&bestsp[0], &bestsp[1], |
361 | be32_to_cpu(ltp->bestcount) * sizeof(bestsp[0])); | 361 | be32_to_cpu(ltp->bestcount) * sizeof(bestsp[0])); |
362 | be32_add(<p->bestcount, 1); | 362 | be32_add_cpu(<p->bestcount, 1); |
363 | xfs_dir2_leaf_log_tail(tp, lbp); | 363 | xfs_dir2_leaf_log_tail(tp, lbp); |
364 | xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1); | 364 | xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1); |
365 | } | 365 | } |
@@ -445,7 +445,7 @@ xfs_dir2_leaf_addname( | |||
445 | */ | 445 | */ |
446 | lfloglow = index; | 446 | lfloglow = index; |
447 | lfloghigh = be16_to_cpu(leaf->hdr.count); | 447 | lfloghigh = be16_to_cpu(leaf->hdr.count); |
448 | be16_add(&leaf->hdr.count, 1); | 448 | be16_add_cpu(&leaf->hdr.count, 1); |
449 | } | 449 | } |
450 | /* | 450 | /* |
451 | * There are stale entries. | 451 | * There are stale entries. |
@@ -523,7 +523,7 @@ xfs_dir2_leaf_addname( | |||
523 | lfloglow = MIN(index, lfloglow); | 523 | lfloglow = MIN(index, lfloglow); |
524 | lfloghigh = MAX(highstale, lfloghigh); | 524 | lfloghigh = MAX(highstale, lfloghigh); |
525 | } | 525 | } |
526 | be16_add(&leaf->hdr.stale, -1); | 526 | be16_add_cpu(&leaf->hdr.stale, -1); |
527 | } | 527 | } |
528 | /* | 528 | /* |
529 | * Fill in the new leaf entry. | 529 | * Fill in the new leaf entry. |
@@ -626,7 +626,7 @@ xfs_dir2_leaf_compact( | |||
626 | * Update and log the header, log the leaf entries. | 626 | * Update and log the header, log the leaf entries. |
627 | */ | 627 | */ |
628 | ASSERT(be16_to_cpu(leaf->hdr.stale) == from - to); | 628 | ASSERT(be16_to_cpu(leaf->hdr.stale) == from - to); |
629 | be16_add(&leaf->hdr.count, -(be16_to_cpu(leaf->hdr.stale))); | 629 | be16_add_cpu(&leaf->hdr.count, -(be16_to_cpu(leaf->hdr.stale))); |
630 | leaf->hdr.stale = 0; | 630 | leaf->hdr.stale = 0; |
631 | xfs_dir2_leaf_log_header(args->trans, bp); | 631 | xfs_dir2_leaf_log_header(args->trans, bp); |
632 | if (loglow != -1) | 632 | if (loglow != -1) |
@@ -728,7 +728,7 @@ xfs_dir2_leaf_compact_x1( | |||
728 | /* | 728 | /* |
729 | * Adjust the leaf header values. | 729 | * Adjust the leaf header values. |
730 | */ | 730 | */ |
731 | be16_add(&leaf->hdr.count, -(from - to)); | 731 | be16_add_cpu(&leaf->hdr.count, -(from - to)); |
732 | leaf->hdr.stale = cpu_to_be16(1); | 732 | leaf->hdr.stale = cpu_to_be16(1); |
733 | /* | 733 | /* |
734 | * Remember the low/high stale value only in the "right" | 734 | * Remember the low/high stale value only in the "right" |
@@ -1470,7 +1470,7 @@ xfs_dir2_leaf_removename( | |||
1470 | /* | 1470 | /* |
1471 | * We just mark the leaf entry stale by putting a null in it. | 1471 | * We just mark the leaf entry stale by putting a null in it. |
1472 | */ | 1472 | */ |
1473 | be16_add(&leaf->hdr.stale, 1); | 1473 | be16_add_cpu(&leaf->hdr.stale, 1); |
1474 | xfs_dir2_leaf_log_header(tp, lbp); | 1474 | xfs_dir2_leaf_log_header(tp, lbp); |
1475 | lep->address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR); | 1475 | lep->address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR); |
1476 | xfs_dir2_leaf_log_ents(tp, lbp, index, index); | 1476 | xfs_dir2_leaf_log_ents(tp, lbp, index, index); |
@@ -1531,7 +1531,7 @@ xfs_dir2_leaf_removename( | |||
1531 | */ | 1531 | */ |
1532 | memmove(&bestsp[db - i], bestsp, | 1532 | memmove(&bestsp[db - i], bestsp, |
1533 | (be32_to_cpu(ltp->bestcount) - (db - i)) * sizeof(*bestsp)); | 1533 | (be32_to_cpu(ltp->bestcount) - (db - i)) * sizeof(*bestsp)); |
1534 | be32_add(<p->bestcount, -(db - i)); | 1534 | be32_add_cpu(<p->bestcount, -(db - i)); |
1535 | xfs_dir2_leaf_log_tail(tp, lbp); | 1535 | xfs_dir2_leaf_log_tail(tp, lbp); |
1536 | xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1); | 1536 | xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1); |
1537 | } else | 1537 | } else |
@@ -1712,7 +1712,7 @@ xfs_dir2_leaf_trim_data( | |||
1712 | * Eliminate the last bests entry from the table. | 1712 | * Eliminate the last bests entry from the table. |
1713 | */ | 1713 | */ |
1714 | bestsp = xfs_dir2_leaf_bests_p(ltp); | 1714 | bestsp = xfs_dir2_leaf_bests_p(ltp); |
1715 | be32_add(<p->bestcount, -1); | 1715 | be32_add_cpu(<p->bestcount, -1); |
1716 | memmove(&bestsp[1], &bestsp[0], be32_to_cpu(ltp->bestcount) * sizeof(*bestsp)); | 1716 | memmove(&bestsp[1], &bestsp[0], be32_to_cpu(ltp->bestcount) * sizeof(*bestsp)); |
1717 | xfs_dir2_leaf_log_tail(tp, lbp); | 1717 | xfs_dir2_leaf_log_tail(tp, lbp); |
1718 | xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1); | 1718 | xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1); |
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c index eb18e399e836..8dade711f099 100644 --- a/fs/xfs/xfs_dir2_node.c +++ b/fs/xfs/xfs_dir2_node.c | |||
@@ -254,7 +254,7 @@ xfs_dir2_leafn_add( | |||
254 | (be16_to_cpu(leaf->hdr.count) - index) * sizeof(*lep)); | 254 | (be16_to_cpu(leaf->hdr.count) - index) * sizeof(*lep)); |
255 | lfloglow = index; | 255 | lfloglow = index; |
256 | lfloghigh = be16_to_cpu(leaf->hdr.count); | 256 | lfloghigh = be16_to_cpu(leaf->hdr.count); |
257 | be16_add(&leaf->hdr.count, 1); | 257 | be16_add_cpu(&leaf->hdr.count, 1); |
258 | } | 258 | } |
259 | /* | 259 | /* |
260 | * There are stale entries. We'll use one for the new entry. | 260 | * There are stale entries. We'll use one for the new entry. |
@@ -322,7 +322,7 @@ xfs_dir2_leafn_add( | |||
322 | lfloglow = MIN(index, lfloglow); | 322 | lfloglow = MIN(index, lfloglow); |
323 | lfloghigh = MAX(highstale, lfloghigh); | 323 | lfloghigh = MAX(highstale, lfloghigh); |
324 | } | 324 | } |
325 | be16_add(&leaf->hdr.stale, -1); | 325 | be16_add_cpu(&leaf->hdr.stale, -1); |
326 | } | 326 | } |
327 | /* | 327 | /* |
328 | * Insert the new entry, log everything. | 328 | * Insert the new entry, log everything. |
@@ -697,10 +697,10 @@ xfs_dir2_leafn_moveents( | |||
697 | /* | 697 | /* |
698 | * Update the headers and log them. | 698 | * Update the headers and log them. |
699 | */ | 699 | */ |
700 | be16_add(&leaf_s->hdr.count, -(count)); | 700 | be16_add_cpu(&leaf_s->hdr.count, -(count)); |
701 | be16_add(&leaf_s->hdr.stale, -(stale)); | 701 | be16_add_cpu(&leaf_s->hdr.stale, -(stale)); |
702 | be16_add(&leaf_d->hdr.count, count); | 702 | be16_add_cpu(&leaf_d->hdr.count, count); |
703 | be16_add(&leaf_d->hdr.stale, stale); | 703 | be16_add_cpu(&leaf_d->hdr.stale, stale); |
704 | xfs_dir2_leaf_log_header(tp, bp_s); | 704 | xfs_dir2_leaf_log_header(tp, bp_s); |
705 | xfs_dir2_leaf_log_header(tp, bp_d); | 705 | xfs_dir2_leaf_log_header(tp, bp_d); |
706 | xfs_dir2_leafn_check(args->dp, bp_s); | 706 | xfs_dir2_leafn_check(args->dp, bp_s); |
@@ -885,7 +885,7 @@ xfs_dir2_leafn_remove( | |||
885 | * Kill the leaf entry by marking it stale. | 885 | * Kill the leaf entry by marking it stale. |
886 | * Log the leaf block changes. | 886 | * Log the leaf block changes. |
887 | */ | 887 | */ |
888 | be16_add(&leaf->hdr.stale, 1); | 888 | be16_add_cpu(&leaf->hdr.stale, 1); |
889 | xfs_dir2_leaf_log_header(tp, bp); | 889 | xfs_dir2_leaf_log_header(tp, bp); |
890 | lep->address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR); | 890 | lep->address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR); |
891 | xfs_dir2_leaf_log_ents(tp, bp, index, index); | 891 | xfs_dir2_leaf_log_ents(tp, bp, index, index); |
@@ -971,7 +971,7 @@ xfs_dir2_leafn_remove( | |||
971 | /* | 971 | /* |
972 | * One less used entry in the free table. | 972 | * One less used entry in the free table. |
973 | */ | 973 | */ |
974 | be32_add(&free->hdr.nused, -1); | 974 | be32_add_cpu(&free->hdr.nused, -1); |
975 | xfs_dir2_free_log_header(tp, fbp); | 975 | xfs_dir2_free_log_header(tp, fbp); |
976 | /* | 976 | /* |
977 | * If this was the last entry in the table, we can | 977 | * If this was the last entry in the table, we can |
@@ -1642,7 +1642,7 @@ xfs_dir2_node_addname_int( | |||
1642 | * (this should always be true) then update the header. | 1642 | * (this should always be true) then update the header. |
1643 | */ | 1643 | */ |
1644 | if (be16_to_cpu(free->bests[findex]) == NULLDATAOFF) { | 1644 | if (be16_to_cpu(free->bests[findex]) == NULLDATAOFF) { |
1645 | be32_add(&free->hdr.nused, 1); | 1645 | be32_add_cpu(&free->hdr.nused, 1); |
1646 | xfs_dir2_free_log_header(tp, fbp); | 1646 | xfs_dir2_free_log_header(tp, fbp); |
1647 | } | 1647 | } |
1648 | /* | 1648 | /* |
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c index a4634d94e561..05e5365d3c31 100644 --- a/fs/xfs/xfs_error.c +++ b/fs/xfs/xfs_error.c | |||
@@ -230,37 +230,6 @@ xfs_error_report( | |||
230 | } | 230 | } |
231 | } | 231 | } |
232 | 232 | ||
233 | STATIC void | ||
234 | xfs_hex_dump(void *p, int length) | ||
235 | { | ||
236 | __uint8_t *uip = (__uint8_t*)p; | ||
237 | int i; | ||
238 | char sbuf[128], *s; | ||
239 | |||
240 | s = sbuf; | ||
241 | *s = '\0'; | ||
242 | for (i=0; i<length; i++, uip++) { | ||
243 | if ((i % 16) == 0) { | ||
244 | if (*s != '\0') | ||
245 | cmn_err(CE_ALERT, "%s\n", sbuf); | ||
246 | s = sbuf; | ||
247 | sprintf(s, "0x%x: ", i); | ||
248 | while( *s != '\0') | ||
249 | s++; | ||
250 | } | ||
251 | sprintf(s, "%02x ", *uip); | ||
252 | |||
253 | /* | ||
254 | * the kernel sprintf is a void; user sprintf returns | ||
255 | * the sprintf'ed string's length. Find the new end- | ||
256 | * of-string | ||
257 | */ | ||
258 | while( *s != '\0') | ||
259 | s++; | ||
260 | } | ||
261 | cmn_err(CE_ALERT, "%s\n", sbuf); | ||
262 | } | ||
263 | |||
264 | void | 233 | void |
265 | xfs_corruption_error( | 234 | xfs_corruption_error( |
266 | char *tag, | 235 | char *tag, |
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h index 10e9d9619ae5..6490d2a9f8e1 100644 --- a/fs/xfs/xfs_error.h +++ b/fs/xfs/xfs_error.h | |||
@@ -174,6 +174,8 @@ extern void xfs_cmn_err(int panic_tag, int level, struct xfs_mount *mp, | |||
174 | /* PRINTFLIKE3 */ | 174 | /* PRINTFLIKE3 */ |
175 | extern void xfs_fs_cmn_err(int level, struct xfs_mount *mp, char *fmt, ...); | 175 | extern void xfs_fs_cmn_err(int level, struct xfs_mount *mp, char *fmt, ...); |
176 | 176 | ||
177 | extern void xfs_hex_dump(void *p, int length); | ||
178 | |||
177 | #define xfs_fs_repair_cmn_err(level, mp, fmt, args...) \ | 179 | #define xfs_fs_repair_cmn_err(level, mp, fmt, args...) \ |
178 | xfs_fs_cmn_err(level, mp, fmt " Unmount and run xfs_repair.", ## args) | 180 | xfs_fs_cmn_err(level, mp, fmt " Unmount and run xfs_repair.", ## args) |
179 | 181 | ||
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c index f938a51be81b..132bd07b9bb8 100644 --- a/fs/xfs/xfs_extfree_item.c +++ b/fs/xfs/xfs_extfree_item.c | |||
@@ -110,19 +110,18 @@ STATIC void | |||
110 | xfs_efi_item_unpin(xfs_efi_log_item_t *efip, int stale) | 110 | xfs_efi_item_unpin(xfs_efi_log_item_t *efip, int stale) |
111 | { | 111 | { |
112 | xfs_mount_t *mp; | 112 | xfs_mount_t *mp; |
113 | SPLDECL(s); | ||
114 | 113 | ||
115 | mp = efip->efi_item.li_mountp; | 114 | mp = efip->efi_item.li_mountp; |
116 | AIL_LOCK(mp, s); | 115 | spin_lock(&mp->m_ail_lock); |
117 | if (efip->efi_flags & XFS_EFI_CANCELED) { | 116 | if (efip->efi_flags & XFS_EFI_CANCELED) { |
118 | /* | 117 | /* |
119 | * xfs_trans_delete_ail() drops the AIL lock. | 118 | * xfs_trans_delete_ail() drops the AIL lock. |
120 | */ | 119 | */ |
121 | xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip, s); | 120 | xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip); |
122 | xfs_efi_item_free(efip); | 121 | xfs_efi_item_free(efip); |
123 | } else { | 122 | } else { |
124 | efip->efi_flags |= XFS_EFI_COMMITTED; | 123 | efip->efi_flags |= XFS_EFI_COMMITTED; |
125 | AIL_UNLOCK(mp, s); | 124 | spin_unlock(&mp->m_ail_lock); |
126 | } | 125 | } |
127 | } | 126 | } |
128 | 127 | ||
@@ -138,10 +137,9 @@ xfs_efi_item_unpin_remove(xfs_efi_log_item_t *efip, xfs_trans_t *tp) | |||
138 | { | 137 | { |
139 | xfs_mount_t *mp; | 138 | xfs_mount_t *mp; |
140 | xfs_log_item_desc_t *lidp; | 139 | xfs_log_item_desc_t *lidp; |
141 | SPLDECL(s); | ||
142 | 140 | ||
143 | mp = efip->efi_item.li_mountp; | 141 | mp = efip->efi_item.li_mountp; |
144 | AIL_LOCK(mp, s); | 142 | spin_lock(&mp->m_ail_lock); |
145 | if (efip->efi_flags & XFS_EFI_CANCELED) { | 143 | if (efip->efi_flags & XFS_EFI_CANCELED) { |
146 | /* | 144 | /* |
147 | * free the xaction descriptor pointing to this item | 145 | * free the xaction descriptor pointing to this item |
@@ -152,11 +150,11 @@ xfs_efi_item_unpin_remove(xfs_efi_log_item_t *efip, xfs_trans_t *tp) | |||
152 | * pull the item off the AIL. | 150 | * pull the item off the AIL. |
153 | * xfs_trans_delete_ail() drops the AIL lock. | 151 | * xfs_trans_delete_ail() drops the AIL lock. |
154 | */ | 152 | */ |
155 | xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip, s); | 153 | xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip); |
156 | xfs_efi_item_free(efip); | 154 | xfs_efi_item_free(efip); |
157 | } else { | 155 | } else { |
158 | efip->efi_flags |= XFS_EFI_COMMITTED; | 156 | efip->efi_flags |= XFS_EFI_COMMITTED; |
159 | AIL_UNLOCK(mp, s); | 157 | spin_unlock(&mp->m_ail_lock); |
160 | } | 158 | } |
161 | } | 159 | } |
162 | 160 | ||
@@ -350,13 +348,12 @@ xfs_efi_release(xfs_efi_log_item_t *efip, | |||
350 | { | 348 | { |
351 | xfs_mount_t *mp; | 349 | xfs_mount_t *mp; |
352 | int extents_left; | 350 | int extents_left; |
353 | SPLDECL(s); | ||
354 | 351 | ||
355 | mp = efip->efi_item.li_mountp; | 352 | mp = efip->efi_item.li_mountp; |
356 | ASSERT(efip->efi_next_extent > 0); | 353 | ASSERT(efip->efi_next_extent > 0); |
357 | ASSERT(efip->efi_flags & XFS_EFI_COMMITTED); | 354 | ASSERT(efip->efi_flags & XFS_EFI_COMMITTED); |
358 | 355 | ||
359 | AIL_LOCK(mp, s); | 356 | spin_lock(&mp->m_ail_lock); |
360 | ASSERT(efip->efi_next_extent >= nextents); | 357 | ASSERT(efip->efi_next_extent >= nextents); |
361 | efip->efi_next_extent -= nextents; | 358 | efip->efi_next_extent -= nextents; |
362 | extents_left = efip->efi_next_extent; | 359 | extents_left = efip->efi_next_extent; |
@@ -364,10 +361,10 @@ xfs_efi_release(xfs_efi_log_item_t *efip, | |||
364 | /* | 361 | /* |
365 | * xfs_trans_delete_ail() drops the AIL lock. | 362 | * xfs_trans_delete_ail() drops the AIL lock. |
366 | */ | 363 | */ |
367 | xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip, s); | 364 | xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip); |
368 | xfs_efi_item_free(efip); | 365 | xfs_efi_item_free(efip); |
369 | } else { | 366 | } else { |
370 | AIL_UNLOCK(mp, s); | 367 | spin_unlock(&mp->m_ail_lock); |
371 | } | 368 | } |
372 | } | 369 | } |
373 | 370 | ||
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c index 36d8f6aa11af..eb03eab5ca52 100644 --- a/fs/xfs/xfs_filestream.c +++ b/fs/xfs/xfs_filestream.c | |||
@@ -348,7 +348,7 @@ _xfs_filestream_update_ag( | |||
348 | } | 348 | } |
349 | 349 | ||
350 | /* xfs_fstrm_free_func(): callback for freeing cached stream items. */ | 350 | /* xfs_fstrm_free_func(): callback for freeing cached stream items. */ |
351 | void | 351 | STATIC void |
352 | xfs_fstrm_free_func( | 352 | xfs_fstrm_free_func( |
353 | unsigned long ino, | 353 | unsigned long ino, |
354 | void *data) | 354 | void *data) |
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h index aab966276517..3bed6433d050 100644 --- a/fs/xfs/xfs_fs.h +++ b/fs/xfs/xfs_fs.h | |||
@@ -419,9 +419,13 @@ typedef struct xfs_handle { | |||
419 | /* | 419 | /* |
420 | * ioctl commands that are used by Linux filesystems | 420 | * ioctl commands that are used by Linux filesystems |
421 | */ | 421 | */ |
422 | #define XFS_IOC_GETXFLAGS _IOR('f', 1, long) | 422 | #define XFS_IOC_GETXFLAGS FS_IOC_GETFLAGS |
423 | #define XFS_IOC_SETXFLAGS _IOW('f', 2, long) | 423 | #define XFS_IOC_SETXFLAGS FS_IOC_SETFLAGS |
424 | #define XFS_IOC_GETVERSION _IOR('v', 1, long) | 424 | #define XFS_IOC_GETVERSION FS_IOC_GETVERSION |
425 | /* 32-bit compat counterparts */ | ||
426 | #define XFS_IOC32_GETXFLAGS FS_IOC32_GETFLAGS | ||
427 | #define XFS_IOC32_SETXFLAGS FS_IOC32_SETFLAGS | ||
428 | #define XFS_IOC32_GETVERSION FS_IOC32_GETVERSION | ||
425 | 429 | ||
426 | /* | 430 | /* |
427 | * ioctl commands that replace IRIX fcntl()'s | 431 | * ioctl commands that replace IRIX fcntl()'s |
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index c92d5b821029..eadc1591c795 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c | |||
@@ -318,7 +318,7 @@ xfs_growfs_data_private( | |||
318 | } | 318 | } |
319 | ASSERT(bp); | 319 | ASSERT(bp); |
320 | agi = XFS_BUF_TO_AGI(bp); | 320 | agi = XFS_BUF_TO_AGI(bp); |
321 | be32_add(&agi->agi_length, new); | 321 | be32_add_cpu(&agi->agi_length, new); |
322 | ASSERT(nagcount == oagcount || | 322 | ASSERT(nagcount == oagcount || |
323 | be32_to_cpu(agi->agi_length) == mp->m_sb.sb_agblocks); | 323 | be32_to_cpu(agi->agi_length) == mp->m_sb.sb_agblocks); |
324 | xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH); | 324 | xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH); |
@@ -331,7 +331,7 @@ xfs_growfs_data_private( | |||
331 | } | 331 | } |
332 | ASSERT(bp); | 332 | ASSERT(bp); |
333 | agf = XFS_BUF_TO_AGF(bp); | 333 | agf = XFS_BUF_TO_AGF(bp); |
334 | be32_add(&agf->agf_length, new); | 334 | be32_add_cpu(&agf->agf_length, new); |
335 | ASSERT(be32_to_cpu(agf->agf_length) == | 335 | ASSERT(be32_to_cpu(agf->agf_length) == |
336 | be32_to_cpu(agi->agi_length)); | 336 | be32_to_cpu(agi->agi_length)); |
337 | xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH); | 337 | xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH); |
@@ -462,15 +462,13 @@ xfs_fs_counts( | |||
462 | xfs_mount_t *mp, | 462 | xfs_mount_t *mp, |
463 | xfs_fsop_counts_t *cnt) | 463 | xfs_fsop_counts_t *cnt) |
464 | { | 464 | { |
465 | unsigned long s; | ||
466 | |||
467 | xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT); | 465 | xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT); |
468 | s = XFS_SB_LOCK(mp); | 466 | spin_lock(&mp->m_sb_lock); |
469 | cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); | 467 | cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); |
470 | cnt->freertx = mp->m_sb.sb_frextents; | 468 | cnt->freertx = mp->m_sb.sb_frextents; |
471 | cnt->freeino = mp->m_sb.sb_ifree; | 469 | cnt->freeino = mp->m_sb.sb_ifree; |
472 | cnt->allocino = mp->m_sb.sb_icount; | 470 | cnt->allocino = mp->m_sb.sb_icount; |
473 | XFS_SB_UNLOCK(mp, s); | 471 | spin_unlock(&mp->m_sb_lock); |
474 | return 0; | 472 | return 0; |
475 | } | 473 | } |
476 | 474 | ||
@@ -497,7 +495,6 @@ xfs_reserve_blocks( | |||
497 | { | 495 | { |
498 | __int64_t lcounter, delta, fdblks_delta; | 496 | __int64_t lcounter, delta, fdblks_delta; |
499 | __uint64_t request; | 497 | __uint64_t request; |
500 | unsigned long s; | ||
501 | 498 | ||
502 | /* If inval is null, report current values and return */ | 499 | /* If inval is null, report current values and return */ |
503 | if (inval == (__uint64_t *)NULL) { | 500 | if (inval == (__uint64_t *)NULL) { |
@@ -515,7 +512,7 @@ xfs_reserve_blocks( | |||
515 | * problem. we needto work out if we are freeing or allocation | 512 | * problem. we needto work out if we are freeing or allocation |
516 | * blocks first, then we can do the modification as necessary. | 513 | * blocks first, then we can do the modification as necessary. |
517 | * | 514 | * |
518 | * We do this under the XFS_SB_LOCK so that if we are near | 515 | * We do this under the m_sb_lock so that if we are near |
519 | * ENOSPC, we will hold out any changes while we work out | 516 | * ENOSPC, we will hold out any changes while we work out |
520 | * what to do. This means that the amount of free space can | 517 | * what to do. This means that the amount of free space can |
521 | * change while we do this, so we need to retry if we end up | 518 | * change while we do this, so we need to retry if we end up |
@@ -526,7 +523,7 @@ xfs_reserve_blocks( | |||
526 | * enabled, disabled or even compiled in.... | 523 | * enabled, disabled or even compiled in.... |
527 | */ | 524 | */ |
528 | retry: | 525 | retry: |
529 | s = XFS_SB_LOCK(mp); | 526 | spin_lock(&mp->m_sb_lock); |
530 | xfs_icsb_sync_counters_flags(mp, XFS_ICSB_SB_LOCKED); | 527 | xfs_icsb_sync_counters_flags(mp, XFS_ICSB_SB_LOCKED); |
531 | 528 | ||
532 | /* | 529 | /* |
@@ -569,7 +566,7 @@ out: | |||
569 | outval->resblks = mp->m_resblks; | 566 | outval->resblks = mp->m_resblks; |
570 | outval->resblks_avail = mp->m_resblks_avail; | 567 | outval->resblks_avail = mp->m_resblks_avail; |
571 | } | 568 | } |
572 | XFS_SB_UNLOCK(mp, s); | 569 | spin_unlock(&mp->m_sb_lock); |
573 | 570 | ||
574 | if (fdblks_delta) { | 571 | if (fdblks_delta) { |
575 | /* | 572 | /* |
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index 1409c2d61c11..c5836b951d0c 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c | |||
@@ -301,8 +301,8 @@ xfs_ialloc_ag_alloc( | |||
301 | } | 301 | } |
302 | xfs_trans_inode_alloc_buf(tp, fbuf); | 302 | xfs_trans_inode_alloc_buf(tp, fbuf); |
303 | } | 303 | } |
304 | be32_add(&agi->agi_count, newlen); | 304 | be32_add_cpu(&agi->agi_count, newlen); |
305 | be32_add(&agi->agi_freecount, newlen); | 305 | be32_add_cpu(&agi->agi_freecount, newlen); |
306 | agno = be32_to_cpu(agi->agi_seqno); | 306 | agno = be32_to_cpu(agi->agi_seqno); |
307 | down_read(&args.mp->m_peraglock); | 307 | down_read(&args.mp->m_peraglock); |
308 | args.mp->m_perag[agno].pagi_freecount += newlen; | 308 | args.mp->m_perag[agno].pagi_freecount += newlen; |
@@ -885,7 +885,7 @@ nextag: | |||
885 | if ((error = xfs_inobt_update(cur, rec.ir_startino, rec.ir_freecount, | 885 | if ((error = xfs_inobt_update(cur, rec.ir_startino, rec.ir_freecount, |
886 | rec.ir_free))) | 886 | rec.ir_free))) |
887 | goto error0; | 887 | goto error0; |
888 | be32_add(&agi->agi_freecount, -1); | 888 | be32_add_cpu(&agi->agi_freecount, -1); |
889 | xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); | 889 | xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); |
890 | down_read(&mp->m_peraglock); | 890 | down_read(&mp->m_peraglock); |
891 | mp->m_perag[tagno].pagi_freecount--; | 891 | mp->m_perag[tagno].pagi_freecount--; |
@@ -1065,8 +1065,8 @@ xfs_difree( | |||
1065 | * to be freed when the transaction is committed. | 1065 | * to be freed when the transaction is committed. |
1066 | */ | 1066 | */ |
1067 | ilen = XFS_IALLOC_INODES(mp); | 1067 | ilen = XFS_IALLOC_INODES(mp); |
1068 | be32_add(&agi->agi_count, -ilen); | 1068 | be32_add_cpu(&agi->agi_count, -ilen); |
1069 | be32_add(&agi->agi_freecount, -(ilen - 1)); | 1069 | be32_add_cpu(&agi->agi_freecount, -(ilen - 1)); |
1070 | xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT); | 1070 | xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT); |
1071 | down_read(&mp->m_peraglock); | 1071 | down_read(&mp->m_peraglock); |
1072 | mp->m_perag[agno].pagi_freecount -= ilen - 1; | 1072 | mp->m_perag[agno].pagi_freecount -= ilen - 1; |
@@ -1095,7 +1095,7 @@ xfs_difree( | |||
1095 | /* | 1095 | /* |
1096 | * Change the inode free counts and log the ag/sb changes. | 1096 | * Change the inode free counts and log the ag/sb changes. |
1097 | */ | 1097 | */ |
1098 | be32_add(&agi->agi_freecount, 1); | 1098 | be32_add_cpu(&agi->agi_freecount, 1); |
1099 | xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); | 1099 | xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); |
1100 | down_read(&mp->m_peraglock); | 1100 | down_read(&mp->m_peraglock); |
1101 | mp->m_perag[agno].pagi_freecount++; | 1101 | mp->m_perag[agno].pagi_freecount++; |
diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c index 8cdeeaf8632b..e5310c90e50f 100644 --- a/fs/xfs/xfs_ialloc_btree.c +++ b/fs/xfs/xfs_ialloc_btree.c | |||
@@ -189,7 +189,7 @@ xfs_inobt_delrec( | |||
189 | */ | 189 | */ |
190 | bno = be32_to_cpu(agi->agi_root); | 190 | bno = be32_to_cpu(agi->agi_root); |
191 | agi->agi_root = *pp; | 191 | agi->agi_root = *pp; |
192 | be32_add(&agi->agi_level, -1); | 192 | be32_add_cpu(&agi->agi_level, -1); |
193 | /* | 193 | /* |
194 | * Free the block. | 194 | * Free the block. |
195 | */ | 195 | */ |
@@ -1132,7 +1132,7 @@ xfs_inobt_lshift( | |||
1132 | /* | 1132 | /* |
1133 | * Bump and log left's numrecs, decrement and log right's numrecs. | 1133 | * Bump and log left's numrecs, decrement and log right's numrecs. |
1134 | */ | 1134 | */ |
1135 | be16_add(&left->bb_numrecs, 1); | 1135 | be16_add_cpu(&left->bb_numrecs, 1); |
1136 | xfs_inobt_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); | 1136 | xfs_inobt_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); |
1137 | #ifdef DEBUG | 1137 | #ifdef DEBUG |
1138 | if (level > 0) | 1138 | if (level > 0) |
@@ -1140,7 +1140,7 @@ xfs_inobt_lshift( | |||
1140 | else | 1140 | else |
1141 | xfs_btree_check_rec(cur->bc_btnum, lrp - 1, lrp); | 1141 | xfs_btree_check_rec(cur->bc_btnum, lrp - 1, lrp); |
1142 | #endif | 1142 | #endif |
1143 | be16_add(&right->bb_numrecs, -1); | 1143 | be16_add_cpu(&right->bb_numrecs, -1); |
1144 | xfs_inobt_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); | 1144 | xfs_inobt_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); |
1145 | /* | 1145 | /* |
1146 | * Slide the contents of right down one entry. | 1146 | * Slide the contents of right down one entry. |
@@ -1232,7 +1232,7 @@ xfs_inobt_newroot( | |||
1232 | * Set the root data in the a.g. inode structure. | 1232 | * Set the root data in the a.g. inode structure. |
1233 | */ | 1233 | */ |
1234 | agi->agi_root = cpu_to_be32(args.agbno); | 1234 | agi->agi_root = cpu_to_be32(args.agbno); |
1235 | be32_add(&agi->agi_level, 1); | 1235 | be32_add_cpu(&agi->agi_level, 1); |
1236 | xfs_ialloc_log_agi(args.tp, cur->bc_private.i.agbp, | 1236 | xfs_ialloc_log_agi(args.tp, cur->bc_private.i.agbp, |
1237 | XFS_AGI_ROOT | XFS_AGI_LEVEL); | 1237 | XFS_AGI_ROOT | XFS_AGI_LEVEL); |
1238 | /* | 1238 | /* |
@@ -1426,9 +1426,9 @@ xfs_inobt_rshift( | |||
1426 | /* | 1426 | /* |
1427 | * Decrement and log left's numrecs, bump and log right's numrecs. | 1427 | * Decrement and log left's numrecs, bump and log right's numrecs. |
1428 | */ | 1428 | */ |
1429 | be16_add(&left->bb_numrecs, -1); | 1429 | be16_add_cpu(&left->bb_numrecs, -1); |
1430 | xfs_inobt_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); | 1430 | xfs_inobt_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); |
1431 | be16_add(&right->bb_numrecs, 1); | 1431 | be16_add_cpu(&right->bb_numrecs, 1); |
1432 | #ifdef DEBUG | 1432 | #ifdef DEBUG |
1433 | if (level > 0) | 1433 | if (level > 0) |
1434 | xfs_btree_check_key(cur->bc_btnum, rkp, rkp + 1); | 1434 | xfs_btree_check_key(cur->bc_btnum, rkp, rkp + 1); |
@@ -1529,7 +1529,7 @@ xfs_inobt_split( | |||
1529 | */ | 1529 | */ |
1530 | if ((be16_to_cpu(left->bb_numrecs) & 1) && | 1530 | if ((be16_to_cpu(left->bb_numrecs) & 1) && |
1531 | cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1) | 1531 | cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1) |
1532 | be16_add(&right->bb_numrecs, 1); | 1532 | be16_add_cpu(&right->bb_numrecs, 1); |
1533 | i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1; | 1533 | i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1; |
1534 | /* | 1534 | /* |
1535 | * For non-leaf blocks, copy keys and addresses over to the new block. | 1535 | * For non-leaf blocks, copy keys and addresses over to the new block. |
@@ -1565,7 +1565,7 @@ xfs_inobt_split( | |||
1565 | * Find the left block number by looking in the buffer. | 1565 | * Find the left block number by looking in the buffer. |
1566 | * Adjust numrecs, sibling pointers. | 1566 | * Adjust numrecs, sibling pointers. |
1567 | */ | 1567 | */ |
1568 | be16_add(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs))); | 1568 | be16_add_cpu(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs))); |
1569 | right->bb_rightsib = left->bb_rightsib; | 1569 | right->bb_rightsib = left->bb_rightsib; |
1570 | left->bb_rightsib = cpu_to_be32(args.agbno); | 1570 | left->bb_rightsib = cpu_to_be32(args.agbno); |
1571 | right->bb_leftsib = cpu_to_be32(lbno); | 1571 | right->bb_leftsib = cpu_to_be32(lbno); |
diff --git a/fs/xfs/xfs_ialloc_btree.h b/fs/xfs/xfs_ialloc_btree.h index bf8e9aff272e..8efc4a5b8b92 100644 --- a/fs/xfs/xfs_ialloc_btree.h +++ b/fs/xfs/xfs_ialloc_btree.h | |||
@@ -81,8 +81,6 @@ typedef struct xfs_btree_sblock xfs_inobt_block_t; | |||
81 | #define XFS_INOBT_MASK(i) ((xfs_inofree_t)1 << (i)) | 81 | #define XFS_INOBT_MASK(i) ((xfs_inofree_t)1 << (i)) |
82 | #define XFS_INOBT_IS_FREE(rp,i) \ | 82 | #define XFS_INOBT_IS_FREE(rp,i) \ |
83 | (((rp)->ir_free & XFS_INOBT_MASK(i)) != 0) | 83 | (((rp)->ir_free & XFS_INOBT_MASK(i)) != 0) |
84 | #define XFS_INOBT_IS_FREE_DISK(rp,i) \ | ||
85 | ((be64_to_cpu((rp)->ir_free) & XFS_INOBT_MASK(i)) != 0) | ||
86 | #define XFS_INOBT_SET_FREE(rp,i) ((rp)->ir_free |= XFS_INOBT_MASK(i)) | 84 | #define XFS_INOBT_SET_FREE(rp,i) ((rp)->ir_free |= XFS_INOBT_MASK(i)) |
87 | #define XFS_INOBT_CLR_FREE(rp,i) ((rp)->ir_free &= ~XFS_INOBT_MASK(i)) | 85 | #define XFS_INOBT_CLR_FREE(rp,i) ((rp)->ir_free &= ~XFS_INOBT_MASK(i)) |
88 | 86 | ||
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index fb69ef180b27..f01b07687faf 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c | |||
@@ -65,7 +65,7 @@ | |||
65 | */ | 65 | */ |
66 | STATIC int | 66 | STATIC int |
67 | xfs_iget_core( | 67 | xfs_iget_core( |
68 | bhv_vnode_t *vp, | 68 | struct inode *inode, |
69 | xfs_mount_t *mp, | 69 | xfs_mount_t *mp, |
70 | xfs_trans_t *tp, | 70 | xfs_trans_t *tp, |
71 | xfs_ino_t ino, | 71 | xfs_ino_t ino, |
@@ -74,9 +74,9 @@ xfs_iget_core( | |||
74 | xfs_inode_t **ipp, | 74 | xfs_inode_t **ipp, |
75 | xfs_daddr_t bno) | 75 | xfs_daddr_t bno) |
76 | { | 76 | { |
77 | struct inode *old_inode; | ||
77 | xfs_inode_t *ip; | 78 | xfs_inode_t *ip; |
78 | xfs_inode_t *iq; | 79 | xfs_inode_t *iq; |
79 | bhv_vnode_t *inode_vp; | ||
80 | int error; | 80 | int error; |
81 | xfs_icluster_t *icl, *new_icl = NULL; | 81 | xfs_icluster_t *icl, *new_icl = NULL; |
82 | unsigned long first_index, mask; | 82 | unsigned long first_index, mask; |
@@ -111,8 +111,8 @@ again: | |||
111 | goto again; | 111 | goto again; |
112 | } | 112 | } |
113 | 113 | ||
114 | inode_vp = XFS_ITOV_NULL(ip); | 114 | old_inode = ip->i_vnode; |
115 | if (inode_vp == NULL) { | 115 | if (old_inode == NULL) { |
116 | /* | 116 | /* |
117 | * If IRECLAIM is set this inode is | 117 | * If IRECLAIM is set this inode is |
118 | * on its way out of the system, | 118 | * on its way out of the system, |
@@ -140,28 +140,9 @@ again: | |||
140 | return ENOENT; | 140 | return ENOENT; |
141 | } | 141 | } |
142 | 142 | ||
143 | /* | 143 | xfs_itrace_exit_tag(ip, "xfs_iget.alloc"); |
144 | * There may be transactions sitting in the | ||
145 | * incore log buffers or being flushed to disk | ||
146 | * at this time. We can't clear the | ||
147 | * XFS_IRECLAIMABLE flag until these | ||
148 | * transactions have hit the disk, otherwise we | ||
149 | * will void the guarantee the flag provides | ||
150 | * xfs_iunpin() | ||
151 | */ | ||
152 | if (xfs_ipincount(ip)) { | ||
153 | read_unlock(&pag->pag_ici_lock); | ||
154 | xfs_log_force(mp, 0, | ||
155 | XFS_LOG_FORCE|XFS_LOG_SYNC); | ||
156 | XFS_STATS_INC(xs_ig_frecycle); | ||
157 | goto again; | ||
158 | } | ||
159 | |||
160 | vn_trace_exit(ip, "xfs_iget.alloc", | ||
161 | (inst_t *)__return_address); | ||
162 | 144 | ||
163 | XFS_STATS_INC(xs_ig_found); | 145 | XFS_STATS_INC(xs_ig_found); |
164 | |||
165 | xfs_iflags_clear(ip, XFS_IRECLAIMABLE); | 146 | xfs_iflags_clear(ip, XFS_IRECLAIMABLE); |
166 | read_unlock(&pag->pag_ici_lock); | 147 | read_unlock(&pag->pag_ici_lock); |
167 | 148 | ||
@@ -171,13 +152,11 @@ again: | |||
171 | 152 | ||
172 | goto finish_inode; | 153 | goto finish_inode; |
173 | 154 | ||
174 | } else if (vp != inode_vp) { | 155 | } else if (inode != old_inode) { |
175 | struct inode *inode = vn_to_inode(inode_vp); | ||
176 | |||
177 | /* The inode is being torn down, pause and | 156 | /* The inode is being torn down, pause and |
178 | * try again. | 157 | * try again. |
179 | */ | 158 | */ |
180 | if (inode->i_state & (I_FREEING | I_CLEAR)) { | 159 | if (old_inode->i_state & (I_FREEING | I_CLEAR)) { |
181 | read_unlock(&pag->pag_ici_lock); | 160 | read_unlock(&pag->pag_ici_lock); |
182 | delay(1); | 161 | delay(1); |
183 | XFS_STATS_INC(xs_ig_frecycle); | 162 | XFS_STATS_INC(xs_ig_frecycle); |
@@ -190,7 +169,7 @@ again: | |||
190 | */ | 169 | */ |
191 | cmn_err(CE_PANIC, | 170 | cmn_err(CE_PANIC, |
192 | "xfs_iget_core: ambiguous vns: vp/0x%p, invp/0x%p", | 171 | "xfs_iget_core: ambiguous vns: vp/0x%p, invp/0x%p", |
193 | inode_vp, vp); | 172 | old_inode, inode); |
194 | } | 173 | } |
195 | 174 | ||
196 | /* | 175 | /* |
@@ -200,20 +179,16 @@ again: | |||
200 | XFS_STATS_INC(xs_ig_found); | 179 | XFS_STATS_INC(xs_ig_found); |
201 | 180 | ||
202 | finish_inode: | 181 | finish_inode: |
203 | if (ip->i_d.di_mode == 0) { | 182 | if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) { |
204 | if (!(flags & XFS_IGET_CREATE)) { | 183 | xfs_put_perag(mp, pag); |
205 | xfs_put_perag(mp, pag); | 184 | return ENOENT; |
206 | return ENOENT; | ||
207 | } | ||
208 | xfs_iocore_inode_reinit(ip); | ||
209 | } | 185 | } |
210 | 186 | ||
211 | if (lock_flags != 0) | 187 | if (lock_flags != 0) |
212 | xfs_ilock(ip, lock_flags); | 188 | xfs_ilock(ip, lock_flags); |
213 | 189 | ||
214 | xfs_iflags_clear(ip, XFS_ISTALE); | 190 | xfs_iflags_clear(ip, XFS_ISTALE); |
215 | vn_trace_exit(ip, "xfs_iget.found", | 191 | xfs_itrace_exit_tag(ip, "xfs_iget.found"); |
216 | (inst_t *)__return_address); | ||
217 | goto return_ip; | 192 | goto return_ip; |
218 | } | 193 | } |
219 | 194 | ||
@@ -234,10 +209,16 @@ finish_inode: | |||
234 | return error; | 209 | return error; |
235 | } | 210 | } |
236 | 211 | ||
237 | vn_trace_exit(ip, "xfs_iget.alloc", (inst_t *)__return_address); | 212 | xfs_itrace_exit_tag(ip, "xfs_iget.alloc"); |
213 | |||
214 | |||
215 | mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, | ||
216 | "xfsino", ip->i_ino); | ||
217 | mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); | ||
218 | init_waitqueue_head(&ip->i_ipin_wait); | ||
219 | atomic_set(&ip->i_pincount, 0); | ||
220 | initnsema(&ip->i_flock, 1, "xfsfino"); | ||
238 | 221 | ||
239 | xfs_inode_lock_init(ip, vp); | ||
240 | xfs_iocore_inode_init(ip); | ||
241 | if (lock_flags) | 222 | if (lock_flags) |
242 | xfs_ilock(ip, lock_flags); | 223 | xfs_ilock(ip, lock_flags); |
243 | 224 | ||
@@ -333,9 +314,6 @@ finish_inode: | |||
333 | ASSERT(ip->i_df.if_ext_max == | 314 | ASSERT(ip->i_df.if_ext_max == |
334 | XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t)); | 315 | XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t)); |
335 | 316 | ||
336 | ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) == | ||
337 | ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0)); | ||
338 | |||
339 | xfs_iflags_set(ip, XFS_IMODIFIED); | 317 | xfs_iflags_set(ip, XFS_IMODIFIED); |
340 | *ipp = ip; | 318 | *ipp = ip; |
341 | 319 | ||
@@ -343,7 +321,7 @@ finish_inode: | |||
343 | * If we have a real type for an on-disk inode, we can set ops(&unlock) | 321 | * If we have a real type for an on-disk inode, we can set ops(&unlock) |
344 | * now. If it's a new inode being created, xfs_ialloc will handle it. | 322 | * now. If it's a new inode being created, xfs_ialloc will handle it. |
345 | */ | 323 | */ |
346 | xfs_initialize_vnode(mp, vp, ip); | 324 | xfs_initialize_vnode(mp, inode, ip); |
347 | return 0; | 325 | return 0; |
348 | } | 326 | } |
349 | 327 | ||
@@ -363,69 +341,58 @@ xfs_iget( | |||
363 | xfs_daddr_t bno) | 341 | xfs_daddr_t bno) |
364 | { | 342 | { |
365 | struct inode *inode; | 343 | struct inode *inode; |
366 | bhv_vnode_t *vp = NULL; | 344 | xfs_inode_t *ip; |
367 | int error; | 345 | int error; |
368 | 346 | ||
369 | XFS_STATS_INC(xs_ig_attempts); | 347 | XFS_STATS_INC(xs_ig_attempts); |
370 | 348 | ||
371 | retry: | 349 | retry: |
372 | inode = iget_locked(mp->m_super, ino); | 350 | inode = iget_locked(mp->m_super, ino); |
373 | if (inode) { | 351 | if (!inode) |
374 | xfs_inode_t *ip; | 352 | /* If we got no inode we are out of memory */ |
375 | 353 | return ENOMEM; | |
376 | vp = vn_from_inode(inode); | 354 | |
377 | if (inode->i_state & I_NEW) { | 355 | if (inode->i_state & I_NEW) { |
378 | vn_initialize(inode); | 356 | XFS_STATS_INC(vn_active); |
379 | error = xfs_iget_core(vp, mp, tp, ino, flags, | 357 | XFS_STATS_INC(vn_alloc); |
380 | lock_flags, ipp, bno); | 358 | |
381 | if (error) { | 359 | error = xfs_iget_core(inode, mp, tp, ino, flags, |
382 | vn_mark_bad(vp); | 360 | lock_flags, ipp, bno); |
383 | if (inode->i_state & I_NEW) | 361 | if (error) { |
384 | unlock_new_inode(inode); | 362 | make_bad_inode(inode); |
385 | iput(inode); | 363 | if (inode->i_state & I_NEW) |
386 | } | 364 | unlock_new_inode(inode); |
387 | } else { | 365 | iput(inode); |
388 | /* | ||
389 | * If the inode is not fully constructed due to | ||
390 | * filehandle mismatches wait for the inode to go | ||
391 | * away and try again. | ||
392 | * | ||
393 | * iget_locked will call __wait_on_freeing_inode | ||
394 | * to wait for the inode to go away. | ||
395 | */ | ||
396 | if (is_bad_inode(inode) || | ||
397 | ((ip = xfs_vtoi(vp)) == NULL)) { | ||
398 | iput(inode); | ||
399 | delay(1); | ||
400 | goto retry; | ||
401 | } | ||
402 | |||
403 | if (lock_flags != 0) | ||
404 | xfs_ilock(ip, lock_flags); | ||
405 | XFS_STATS_INC(xs_ig_found); | ||
406 | *ipp = ip; | ||
407 | error = 0; | ||
408 | } | 366 | } |
409 | } else | 367 | return error; |
410 | error = ENOMEM; /* If we got no inode we are out of memory */ | 368 | } |
411 | 369 | ||
412 | return error; | 370 | /* |
413 | } | 371 | * If the inode is not fully constructed due to |
372 | * filehandle mismatches wait for the inode to go | ||
373 | * away and try again. | ||
374 | * | ||
375 | * iget_locked will call __wait_on_freeing_inode | ||
376 | * to wait for the inode to go away. | ||
377 | */ | ||
378 | if (is_bad_inode(inode)) { | ||
379 | iput(inode); | ||
380 | delay(1); | ||
381 | goto retry; | ||
382 | } | ||
414 | 383 | ||
415 | /* | 384 | ip = XFS_I(inode); |
416 | * Do the setup for the various locks within the incore inode. | 385 | if (!ip) { |
417 | */ | 386 | iput(inode); |
418 | void | 387 | delay(1); |
419 | xfs_inode_lock_init( | 388 | goto retry; |
420 | xfs_inode_t *ip, | 389 | } |
421 | bhv_vnode_t *vp) | 390 | |
422 | { | 391 | if (lock_flags != 0) |
423 | mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, | 392 | xfs_ilock(ip, lock_flags); |
424 | "xfsino", ip->i_ino); | 393 | XFS_STATS_INC(xs_ig_found); |
425 | mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); | 394 | *ipp = ip; |
426 | init_waitqueue_head(&ip->i_ipin_wait); | 395 | return 0; |
427 | atomic_set(&ip->i_pincount, 0); | ||
428 | initnsema(&ip->i_flock, 1, "xfsfino"); | ||
429 | } | 396 | } |
430 | 397 | ||
431 | /* | 398 | /* |
@@ -465,11 +432,9 @@ void | |||
465 | xfs_iput(xfs_inode_t *ip, | 432 | xfs_iput(xfs_inode_t *ip, |
466 | uint lock_flags) | 433 | uint lock_flags) |
467 | { | 434 | { |
468 | bhv_vnode_t *vp = XFS_ITOV(ip); | 435 | xfs_itrace_entry(ip); |
469 | |||
470 | vn_trace_entry(ip, "xfs_iput", (inst_t *)__return_address); | ||
471 | xfs_iunlock(ip, lock_flags); | 436 | xfs_iunlock(ip, lock_flags); |
472 | VN_RELE(vp); | 437 | IRELE(ip); |
473 | } | 438 | } |
474 | 439 | ||
475 | /* | 440 | /* |
@@ -479,20 +444,19 @@ void | |||
479 | xfs_iput_new(xfs_inode_t *ip, | 444 | xfs_iput_new(xfs_inode_t *ip, |
480 | uint lock_flags) | 445 | uint lock_flags) |
481 | { | 446 | { |
482 | bhv_vnode_t *vp = XFS_ITOV(ip); | 447 | struct inode *inode = ip->i_vnode; |
483 | struct inode *inode = vn_to_inode(vp); | ||
484 | 448 | ||
485 | vn_trace_entry(ip, "xfs_iput_new", (inst_t *)__return_address); | 449 | xfs_itrace_entry(ip); |
486 | 450 | ||
487 | if ((ip->i_d.di_mode == 0)) { | 451 | if ((ip->i_d.di_mode == 0)) { |
488 | ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE)); | 452 | ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE)); |
489 | vn_mark_bad(vp); | 453 | make_bad_inode(inode); |
490 | } | 454 | } |
491 | if (inode->i_state & I_NEW) | 455 | if (inode->i_state & I_NEW) |
492 | unlock_new_inode(inode); | 456 | unlock_new_inode(inode); |
493 | if (lock_flags) | 457 | if (lock_flags) |
494 | xfs_iunlock(ip, lock_flags); | 458 | xfs_iunlock(ip, lock_flags); |
495 | VN_RELE(vp); | 459 | IRELE(ip); |
496 | } | 460 | } |
497 | 461 | ||
498 | 462 | ||
@@ -505,8 +469,6 @@ xfs_iput_new(xfs_inode_t *ip, | |||
505 | void | 469 | void |
506 | xfs_ireclaim(xfs_inode_t *ip) | 470 | xfs_ireclaim(xfs_inode_t *ip) |
507 | { | 471 | { |
508 | bhv_vnode_t *vp; | ||
509 | |||
510 | /* | 472 | /* |
511 | * Remove from old hash list and mount list. | 473 | * Remove from old hash list and mount list. |
512 | */ | 474 | */ |
@@ -535,9 +497,8 @@ xfs_ireclaim(xfs_inode_t *ip) | |||
535 | /* | 497 | /* |
536 | * Pull our behavior descriptor from the vnode chain. | 498 | * Pull our behavior descriptor from the vnode chain. |
537 | */ | 499 | */ |
538 | vp = XFS_ITOV_NULL(ip); | 500 | if (ip->i_vnode) { |
539 | if (vp) { | 501 | ip->i_vnode->i_private = NULL; |
540 | vn_to_inode(vp)->i_private = NULL; | ||
541 | ip->i_vnode = NULL; | 502 | ip->i_vnode = NULL; |
542 | } | 503 | } |
543 | 504 | ||
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 344948082819..a550546a7083 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
@@ -15,6 +15,8 @@ | |||
15 | * along with this program; if not, write the Free Software Foundation, | 15 | * along with this program; if not, write the Free Software Foundation, |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | 16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
17 | */ | 17 | */ |
18 | #include <linux/log2.h> | ||
19 | |||
18 | #include "xfs.h" | 20 | #include "xfs.h" |
19 | #include "xfs_fs.h" | 21 | #include "xfs_fs.h" |
20 | #include "xfs_types.h" | 22 | #include "xfs_types.h" |
@@ -826,15 +828,17 @@ xfs_ip2xflags( | |||
826 | xfs_icdinode_t *dic = &ip->i_d; | 828 | xfs_icdinode_t *dic = &ip->i_d; |
827 | 829 | ||
828 | return _xfs_dic2xflags(dic->di_flags) | | 830 | return _xfs_dic2xflags(dic->di_flags) | |
829 | (XFS_CFORK_Q(dic) ? XFS_XFLAG_HASATTR : 0); | 831 | (XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0); |
830 | } | 832 | } |
831 | 833 | ||
832 | uint | 834 | uint |
833 | xfs_dic2xflags( | 835 | xfs_dic2xflags( |
834 | xfs_dinode_core_t *dic) | 836 | xfs_dinode_t *dip) |
835 | { | 837 | { |
838 | xfs_dinode_core_t *dic = &dip->di_core; | ||
839 | |||
836 | return _xfs_dic2xflags(be16_to_cpu(dic->di_flags)) | | 840 | return _xfs_dic2xflags(be16_to_cpu(dic->di_flags)) | |
837 | (XFS_CFORK_Q_DISK(dic) ? XFS_XFLAG_HASATTR : 0); | 841 | (XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0); |
838 | } | 842 | } |
839 | 843 | ||
840 | /* | 844 | /* |
@@ -884,8 +888,8 @@ xfs_iread( | |||
884 | * Initialize inode's trace buffers. | 888 | * Initialize inode's trace buffers. |
885 | * Do this before xfs_iformat in case it adds entries. | 889 | * Do this before xfs_iformat in case it adds entries. |
886 | */ | 890 | */ |
887 | #ifdef XFS_VNODE_TRACE | 891 | #ifdef XFS_INODE_TRACE |
888 | ip->i_trace = ktrace_alloc(VNODE_TRACE_SIZE, KM_SLEEP); | 892 | ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_SLEEP); |
889 | #endif | 893 | #endif |
890 | #ifdef XFS_BMAP_TRACE | 894 | #ifdef XFS_BMAP_TRACE |
891 | ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_SLEEP); | 895 | ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_SLEEP); |
@@ -1220,10 +1224,8 @@ xfs_ialloc( | |||
1220 | ip->i_d.di_extsize = pip->i_d.di_extsize; | 1224 | ip->i_d.di_extsize = pip->i_d.di_extsize; |
1221 | } | 1225 | } |
1222 | } else if ((mode & S_IFMT) == S_IFREG) { | 1226 | } else if ((mode & S_IFMT) == S_IFREG) { |
1223 | if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) { | 1227 | if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) |
1224 | di_flags |= XFS_DIFLAG_REALTIME; | 1228 | di_flags |= XFS_DIFLAG_REALTIME; |
1225 | ip->i_iocore.io_flags |= XFS_IOCORE_RT; | ||
1226 | } | ||
1227 | if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { | 1229 | if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { |
1228 | di_flags |= XFS_DIFLAG_EXTSIZE; | 1230 | di_flags |= XFS_DIFLAG_EXTSIZE; |
1229 | ip->i_d.di_extsize = pip->i_d.di_extsize; | 1231 | ip->i_d.di_extsize = pip->i_d.di_extsize; |
@@ -1298,7 +1300,10 @@ xfs_isize_check( | |||
1298 | if ((ip->i_d.di_mode & S_IFMT) != S_IFREG) | 1300 | if ((ip->i_d.di_mode & S_IFMT) != S_IFREG) |
1299 | return; | 1301 | return; |
1300 | 1302 | ||
1301 | if (ip->i_d.di_flags & (XFS_DIFLAG_REALTIME | XFS_DIFLAG_EXTSIZE)) | 1303 | if (XFS_IS_REALTIME_INODE(ip)) |
1304 | return; | ||
1305 | |||
1306 | if (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) | ||
1302 | return; | 1307 | return; |
1303 | 1308 | ||
1304 | nimaps = 2; | 1309 | nimaps = 2; |
@@ -1711,7 +1716,7 @@ xfs_itruncate_finish( | |||
1711 | * runs. | 1716 | * runs. |
1712 | */ | 1717 | */ |
1713 | XFS_BMAP_INIT(&free_list, &first_block); | 1718 | XFS_BMAP_INIT(&free_list, &first_block); |
1714 | error = XFS_BUNMAPI(mp, ntp, &ip->i_iocore, | 1719 | error = xfs_bunmapi(ntp, ip, |
1715 | first_unmap_block, unmap_len, | 1720 | first_unmap_block, unmap_len, |
1716 | XFS_BMAPI_AFLAG(fork) | | 1721 | XFS_BMAPI_AFLAG(fork) | |
1717 | (sync ? 0 : XFS_BMAPI_ASYNC), | 1722 | (sync ? 0 : XFS_BMAPI_ASYNC), |
@@ -1844,8 +1849,6 @@ xfs_igrow_start( | |||
1844 | xfs_fsize_t new_size, | 1849 | xfs_fsize_t new_size, |
1845 | cred_t *credp) | 1850 | cred_t *credp) |
1846 | { | 1851 | { |
1847 | int error; | ||
1848 | |||
1849 | ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0); | 1852 | ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0); |
1850 | ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0); | 1853 | ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0); |
1851 | ASSERT(new_size > ip->i_size); | 1854 | ASSERT(new_size > ip->i_size); |
@@ -1855,9 +1858,7 @@ xfs_igrow_start( | |||
1855 | * xfs_write_file() beyond the end of the file | 1858 | * xfs_write_file() beyond the end of the file |
1856 | * and any blocks between the old and new file sizes. | 1859 | * and any blocks between the old and new file sizes. |
1857 | */ | 1860 | */ |
1858 | error = xfs_zero_eof(XFS_ITOV(ip), &ip->i_iocore, new_size, | 1861 | return xfs_zero_eof(ip, new_size, ip->i_size); |
1859 | ip->i_size); | ||
1860 | return error; | ||
1861 | } | 1862 | } |
1862 | 1863 | ||
1863 | /* | 1864 | /* |
@@ -1959,24 +1960,6 @@ xfs_iunlink( | |||
1959 | ASSERT(agi->agi_unlinked[bucket_index]); | 1960 | ASSERT(agi->agi_unlinked[bucket_index]); |
1960 | ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino); | 1961 | ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino); |
1961 | 1962 | ||
1962 | error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0); | ||
1963 | if (error) | ||
1964 | return error; | ||
1965 | |||
1966 | /* | ||
1967 | * Clear the on-disk di_nlink. This is to prevent xfs_bulkstat | ||
1968 | * from picking up this inode when it is reclaimed (its incore state | ||
1969 | * initialzed but not flushed to disk yet). The in-core di_nlink is | ||
1970 | * already cleared in xfs_droplink() and a corresponding transaction | ||
1971 | * logged. The hack here just synchronizes the in-core to on-disk | ||
1972 | * di_nlink value in advance before the actual inode sync to disk. | ||
1973 | * This is OK because the inode is already unlinked and would never | ||
1974 | * change its di_nlink again for this inode generation. | ||
1975 | * This is a temporary hack that would require a proper fix | ||
1976 | * in the future. | ||
1977 | */ | ||
1978 | dip->di_core.di_nlink = 0; | ||
1979 | |||
1980 | if (be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO) { | 1963 | if (be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO) { |
1981 | /* | 1964 | /* |
1982 | * There is already another inode in the bucket we need | 1965 | * There is already another inode in the bucket we need |
@@ -1984,6 +1967,10 @@ xfs_iunlink( | |||
1984 | * Here we put the head pointer into our next pointer, | 1967 | * Here we put the head pointer into our next pointer, |
1985 | * and then we fall through to point the head at us. | 1968 | * and then we fall through to point the head at us. |
1986 | */ | 1969 | */ |
1970 | error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0); | ||
1971 | if (error) | ||
1972 | return error; | ||
1973 | |||
1987 | ASSERT(be32_to_cpu(dip->di_next_unlinked) == NULLAGINO); | 1974 | ASSERT(be32_to_cpu(dip->di_next_unlinked) == NULLAGINO); |
1988 | /* both on-disk, don't endian flip twice */ | 1975 | /* both on-disk, don't endian flip twice */ |
1989 | dip->di_next_unlinked = agi->agi_unlinked[bucket_index]; | 1976 | dip->di_next_unlinked = agi->agi_unlinked[bucket_index]; |
@@ -2209,7 +2196,6 @@ xfs_ifree_cluster( | |||
2209 | xfs_inode_log_item_t *iip; | 2196 | xfs_inode_log_item_t *iip; |
2210 | xfs_log_item_t *lip; | 2197 | xfs_log_item_t *lip; |
2211 | xfs_perag_t *pag = xfs_get_perag(mp, inum); | 2198 | xfs_perag_t *pag = xfs_get_perag(mp, inum); |
2212 | SPLDECL(s); | ||
2213 | 2199 | ||
2214 | if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) { | 2200 | if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) { |
2215 | blks_per_cluster = 1; | 2201 | blks_per_cluster = 1; |
@@ -2311,9 +2297,9 @@ xfs_ifree_cluster( | |||
2311 | iip = (xfs_inode_log_item_t *)lip; | 2297 | iip = (xfs_inode_log_item_t *)lip; |
2312 | ASSERT(iip->ili_logged == 1); | 2298 | ASSERT(iip->ili_logged == 1); |
2313 | lip->li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*)) xfs_istale_done; | 2299 | lip->li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*)) xfs_istale_done; |
2314 | AIL_LOCK(mp,s); | 2300 | spin_lock(&mp->m_ail_lock); |
2315 | iip->ili_flush_lsn = iip->ili_item.li_lsn; | 2301 | iip->ili_flush_lsn = iip->ili_item.li_lsn; |
2316 | AIL_UNLOCK(mp, s); | 2302 | spin_unlock(&mp->m_ail_lock); |
2317 | xfs_iflags_set(iip->ili_inode, XFS_ISTALE); | 2303 | xfs_iflags_set(iip->ili_inode, XFS_ISTALE); |
2318 | pre_flushed++; | 2304 | pre_flushed++; |
2319 | } | 2305 | } |
@@ -2334,9 +2320,9 @@ xfs_ifree_cluster( | |||
2334 | iip->ili_last_fields = iip->ili_format.ilf_fields; | 2320 | iip->ili_last_fields = iip->ili_format.ilf_fields; |
2335 | iip->ili_format.ilf_fields = 0; | 2321 | iip->ili_format.ilf_fields = 0; |
2336 | iip->ili_logged = 1; | 2322 | iip->ili_logged = 1; |
2337 | AIL_LOCK(mp,s); | 2323 | spin_lock(&mp->m_ail_lock); |
2338 | iip->ili_flush_lsn = iip->ili_item.li_lsn; | 2324 | iip->ili_flush_lsn = iip->ili_item.li_lsn; |
2339 | AIL_UNLOCK(mp, s); | 2325 | spin_unlock(&mp->m_ail_lock); |
2340 | 2326 | ||
2341 | xfs_buf_attach_iodone(bp, | 2327 | xfs_buf_attach_iodone(bp, |
2342 | (void(*)(xfs_buf_t*,xfs_log_item_t*)) | 2328 | (void(*)(xfs_buf_t*,xfs_log_item_t*)) |
@@ -2374,6 +2360,8 @@ xfs_ifree( | |||
2374 | int error; | 2360 | int error; |
2375 | int delete; | 2361 | int delete; |
2376 | xfs_ino_t first_ino; | 2362 | xfs_ino_t first_ino; |
2363 | xfs_dinode_t *dip; | ||
2364 | xfs_buf_t *ibp; | ||
2377 | 2365 | ||
2378 | ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); | 2366 | ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); |
2379 | ASSERT(ip->i_transp == tp); | 2367 | ASSERT(ip->i_transp == tp); |
@@ -2409,8 +2397,27 @@ xfs_ifree( | |||
2409 | * by reincarnations of this inode. | 2397 | * by reincarnations of this inode. |
2410 | */ | 2398 | */ |
2411 | ip->i_d.di_gen++; | 2399 | ip->i_d.di_gen++; |
2400 | |||
2412 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 2401 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
2413 | 2402 | ||
2403 | error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, 0, 0); | ||
2404 | if (error) | ||
2405 | return error; | ||
2406 | |||
2407 | /* | ||
2408 | * Clear the on-disk di_mode. This is to prevent xfs_bulkstat | ||
2409 | * from picking up this inode when it is reclaimed (its incore state | ||
2410 | * initialzed but not flushed to disk yet). The in-core di_mode is | ||
2411 | * already cleared and a corresponding transaction logged. | ||
2412 | * The hack here just synchronizes the in-core to on-disk | ||
2413 | * di_mode value in advance before the actual inode sync to disk. | ||
2414 | * This is OK because the inode is already unlinked and would never | ||
2415 | * change its di_mode again for this inode generation. | ||
2416 | * This is a temporary hack that would require a proper fix | ||
2417 | * in the future. | ||
2418 | */ | ||
2419 | dip->di_core.di_mode = 0; | ||
2420 | |||
2414 | if (delete) { | 2421 | if (delete) { |
2415 | xfs_ifree_cluster(ip, tp, first_ino); | 2422 | xfs_ifree_cluster(ip, tp, first_ino); |
2416 | } | 2423 | } |
@@ -2735,7 +2742,6 @@ void | |||
2735 | xfs_idestroy( | 2742 | xfs_idestroy( |
2736 | xfs_inode_t *ip) | 2743 | xfs_inode_t *ip) |
2737 | { | 2744 | { |
2738 | |||
2739 | switch (ip->i_d.di_mode & S_IFMT) { | 2745 | switch (ip->i_d.di_mode & S_IFMT) { |
2740 | case S_IFREG: | 2746 | case S_IFREG: |
2741 | case S_IFDIR: | 2747 | case S_IFDIR: |
@@ -2749,7 +2755,7 @@ xfs_idestroy( | |||
2749 | mrfree(&ip->i_iolock); | 2755 | mrfree(&ip->i_iolock); |
2750 | freesema(&ip->i_flock); | 2756 | freesema(&ip->i_flock); |
2751 | 2757 | ||
2752 | #ifdef XFS_VNODE_TRACE | 2758 | #ifdef XFS_INODE_TRACE |
2753 | ktrace_free(ip->i_trace); | 2759 | ktrace_free(ip->i_trace); |
2754 | #endif | 2760 | #endif |
2755 | #ifdef XFS_BMAP_TRACE | 2761 | #ifdef XFS_BMAP_TRACE |
@@ -2775,16 +2781,15 @@ xfs_idestroy( | |||
2775 | */ | 2781 | */ |
2776 | xfs_mount_t *mp = ip->i_mount; | 2782 | xfs_mount_t *mp = ip->i_mount; |
2777 | xfs_log_item_t *lip = &ip->i_itemp->ili_item; | 2783 | xfs_log_item_t *lip = &ip->i_itemp->ili_item; |
2778 | int s; | ||
2779 | 2784 | ||
2780 | ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) || | 2785 | ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) || |
2781 | XFS_FORCED_SHUTDOWN(ip->i_mount)); | 2786 | XFS_FORCED_SHUTDOWN(ip->i_mount)); |
2782 | if (lip->li_flags & XFS_LI_IN_AIL) { | 2787 | if (lip->li_flags & XFS_LI_IN_AIL) { |
2783 | AIL_LOCK(mp, s); | 2788 | spin_lock(&mp->m_ail_lock); |
2784 | if (lip->li_flags & XFS_LI_IN_AIL) | 2789 | if (lip->li_flags & XFS_LI_IN_AIL) |
2785 | xfs_trans_delete_ail(mp, lip, s); | 2790 | xfs_trans_delete_ail(mp, lip); |
2786 | else | 2791 | else |
2787 | AIL_UNLOCK(mp, s); | 2792 | spin_unlock(&mp->m_ail_lock); |
2788 | } | 2793 | } |
2789 | xfs_inode_item_destroy(ip); | 2794 | xfs_inode_item_destroy(ip); |
2790 | } | 2795 | } |
@@ -2816,40 +2821,8 @@ xfs_iunpin( | |||
2816 | { | 2821 | { |
2817 | ASSERT(atomic_read(&ip->i_pincount) > 0); | 2822 | ASSERT(atomic_read(&ip->i_pincount) > 0); |
2818 | 2823 | ||
2819 | if (atomic_dec_and_lock(&ip->i_pincount, &ip->i_flags_lock)) { | 2824 | if (atomic_dec_and_test(&ip->i_pincount)) |
2820 | |||
2821 | /* | ||
2822 | * If the inode is currently being reclaimed, the link between | ||
2823 | * the bhv_vnode and the xfs_inode will be broken after the | ||
2824 | * XFS_IRECLAIM* flag is set. Hence, if these flags are not | ||
2825 | * set, then we can move forward and mark the linux inode dirty | ||
2826 | * knowing that it is still valid as it won't freed until after | ||
2827 | * the bhv_vnode<->xfs_inode link is broken in xfs_reclaim. The | ||
2828 | * i_flags_lock is used to synchronise the setting of the | ||
2829 | * XFS_IRECLAIM* flags and the breaking of the link, and so we | ||
2830 | * can execute atomically w.r.t to reclaim by holding this lock | ||
2831 | * here. | ||
2832 | * | ||
2833 | * However, we still need to issue the unpin wakeup call as the | ||
2834 | * inode reclaim may be blocked waiting for the inode to become | ||
2835 | * unpinned. | ||
2836 | */ | ||
2837 | |||
2838 | if (!__xfs_iflags_test(ip, XFS_IRECLAIM|XFS_IRECLAIMABLE)) { | ||
2839 | bhv_vnode_t *vp = XFS_ITOV_NULL(ip); | ||
2840 | struct inode *inode = NULL; | ||
2841 | |||
2842 | BUG_ON(vp == NULL); | ||
2843 | inode = vn_to_inode(vp); | ||
2844 | BUG_ON(inode->i_state & I_CLEAR); | ||
2845 | |||
2846 | /* make sync come back and flush this inode */ | ||
2847 | if (!(inode->i_state & (I_NEW|I_FREEING))) | ||
2848 | mark_inode_dirty_sync(inode); | ||
2849 | } | ||
2850 | spin_unlock(&ip->i_flags_lock); | ||
2851 | wake_up(&ip->i_ipin_wait); | 2825 | wake_up(&ip->i_ipin_wait); |
2852 | } | ||
2853 | } | 2826 | } |
2854 | 2827 | ||
2855 | /* | 2828 | /* |
@@ -3338,7 +3311,6 @@ xfs_iflush_int( | |||
3338 | #ifdef XFS_TRANS_DEBUG | 3311 | #ifdef XFS_TRANS_DEBUG |
3339 | int first; | 3312 | int first; |
3340 | #endif | 3313 | #endif |
3341 | SPLDECL(s); | ||
3342 | 3314 | ||
3343 | ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS)); | 3315 | ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS)); |
3344 | ASSERT(issemalocked(&(ip->i_flock))); | 3316 | ASSERT(issemalocked(&(ip->i_flock))); |
@@ -3533,9 +3505,9 @@ xfs_iflush_int( | |||
3533 | iip->ili_logged = 1; | 3505 | iip->ili_logged = 1; |
3534 | 3506 | ||
3535 | ASSERT(sizeof(xfs_lsn_t) == 8); /* don't lock if it shrinks */ | 3507 | ASSERT(sizeof(xfs_lsn_t) == 8); /* don't lock if it shrinks */ |
3536 | AIL_LOCK(mp,s); | 3508 | spin_lock(&mp->m_ail_lock); |
3537 | iip->ili_flush_lsn = iip->ili_item.li_lsn; | 3509 | iip->ili_flush_lsn = iip->ili_item.li_lsn; |
3538 | AIL_UNLOCK(mp, s); | 3510 | spin_unlock(&mp->m_ail_lock); |
3539 | 3511 | ||
3540 | /* | 3512 | /* |
3541 | * Attach the function xfs_iflush_done to the inode's | 3513 | * Attach the function xfs_iflush_done to the inode's |
@@ -3611,95 +3583,6 @@ xfs_iflush_all( | |||
3611 | XFS_MOUNT_IUNLOCK(mp); | 3583 | XFS_MOUNT_IUNLOCK(mp); |
3612 | } | 3584 | } |
3613 | 3585 | ||
3614 | /* | ||
3615 | * xfs_iaccess: check accessibility of inode for mode. | ||
3616 | */ | ||
3617 | int | ||
3618 | xfs_iaccess( | ||
3619 | xfs_inode_t *ip, | ||
3620 | mode_t mode, | ||
3621 | cred_t *cr) | ||
3622 | { | ||
3623 | int error; | ||
3624 | mode_t orgmode = mode; | ||
3625 | struct inode *inode = vn_to_inode(XFS_ITOV(ip)); | ||
3626 | |||
3627 | if (mode & S_IWUSR) { | ||
3628 | umode_t imode = inode->i_mode; | ||
3629 | |||
3630 | if (IS_RDONLY(inode) && | ||
3631 | (S_ISREG(imode) || S_ISDIR(imode) || S_ISLNK(imode))) | ||
3632 | return XFS_ERROR(EROFS); | ||
3633 | |||
3634 | if (IS_IMMUTABLE(inode)) | ||
3635 | return XFS_ERROR(EACCES); | ||
3636 | } | ||
3637 | |||
3638 | /* | ||
3639 | * If there's an Access Control List it's used instead of | ||
3640 | * the mode bits. | ||
3641 | */ | ||
3642 | if ((error = _ACL_XFS_IACCESS(ip, mode, cr)) != -1) | ||
3643 | return error ? XFS_ERROR(error) : 0; | ||
3644 | |||
3645 | if (current_fsuid(cr) != ip->i_d.di_uid) { | ||
3646 | mode >>= 3; | ||
3647 | if (!in_group_p((gid_t)ip->i_d.di_gid)) | ||
3648 | mode >>= 3; | ||
3649 | } | ||
3650 | |||
3651 | /* | ||
3652 | * If the DACs are ok we don't need any capability check. | ||
3653 | */ | ||
3654 | if ((ip->i_d.di_mode & mode) == mode) | ||
3655 | return 0; | ||
3656 | /* | ||
3657 | * Read/write DACs are always overridable. | ||
3658 | * Executable DACs are overridable if at least one exec bit is set. | ||
3659 | */ | ||
3660 | if (!(orgmode & S_IXUSR) || | ||
3661 | (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode)) | ||
3662 | if (capable_cred(cr, CAP_DAC_OVERRIDE)) | ||
3663 | return 0; | ||
3664 | |||
3665 | if ((orgmode == S_IRUSR) || | ||
3666 | (S_ISDIR(inode->i_mode) && (!(orgmode & S_IWUSR)))) { | ||
3667 | if (capable_cred(cr, CAP_DAC_READ_SEARCH)) | ||
3668 | return 0; | ||
3669 | #ifdef NOISE | ||
3670 | cmn_err(CE_NOTE, "Ick: mode=%o, orgmode=%o", mode, orgmode); | ||
3671 | #endif /* NOISE */ | ||
3672 | return XFS_ERROR(EACCES); | ||
3673 | } | ||
3674 | return XFS_ERROR(EACCES); | ||
3675 | } | ||
3676 | |||
3677 | /* | ||
3678 | * xfs_iroundup: round up argument to next power of two | ||
3679 | */ | ||
3680 | uint | ||
3681 | xfs_iroundup( | ||
3682 | uint v) | ||
3683 | { | ||
3684 | int i; | ||
3685 | uint m; | ||
3686 | |||
3687 | if ((v & (v - 1)) == 0) | ||
3688 | return v; | ||
3689 | ASSERT((v & 0x80000000) == 0); | ||
3690 | if ((v & (v + 1)) == 0) | ||
3691 | return v + 1; | ||
3692 | for (i = 0, m = 1; i < 31; i++, m <<= 1) { | ||
3693 | if (v & m) | ||
3694 | continue; | ||
3695 | v |= m; | ||
3696 | if ((v & (v + 1)) == 0) | ||
3697 | return v + 1; | ||
3698 | } | ||
3699 | ASSERT(0); | ||
3700 | return( 0 ); | ||
3701 | } | ||
3702 | |||
3703 | #ifdef XFS_ILOCK_TRACE | 3586 | #ifdef XFS_ILOCK_TRACE |
3704 | ktrace_t *xfs_ilock_trace_buf; | 3587 | ktrace_t *xfs_ilock_trace_buf; |
3705 | 3588 | ||
@@ -4206,7 +4089,7 @@ xfs_iext_realloc_direct( | |||
4206 | return; | 4089 | return; |
4207 | } | 4090 | } |
4208 | if (!is_power_of_2(new_size)){ | 4091 | if (!is_power_of_2(new_size)){ |
4209 | rnew_size = xfs_iroundup(new_size); | 4092 | rnew_size = roundup_pow_of_two(new_size); |
4210 | } | 4093 | } |
4211 | if (rnew_size != ifp->if_real_bytes) { | 4094 | if (rnew_size != ifp->if_real_bytes) { |
4212 | ifp->if_u1.if_extents = | 4095 | ifp->if_u1.if_extents = |
@@ -4229,7 +4112,7 @@ xfs_iext_realloc_direct( | |||
4229 | else { | 4112 | else { |
4230 | new_size += ifp->if_bytes; | 4113 | new_size += ifp->if_bytes; |
4231 | if (!is_power_of_2(new_size)) { | 4114 | if (!is_power_of_2(new_size)) { |
4232 | rnew_size = xfs_iroundup(new_size); | 4115 | rnew_size = roundup_pow_of_two(new_size); |
4233 | } | 4116 | } |
4234 | xfs_iext_inline_to_direct(ifp, rnew_size); | 4117 | xfs_iext_inline_to_direct(ifp, rnew_size); |
4235 | } | 4118 | } |
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index e5aff929cc65..bfcd72cbaeea 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h | |||
@@ -132,45 +132,6 @@ typedef struct dm_attrs_s { | |||
132 | __uint16_t da_pad; /* DMIG extra padding */ | 132 | __uint16_t da_pad; /* DMIG extra padding */ |
133 | } dm_attrs_t; | 133 | } dm_attrs_t; |
134 | 134 | ||
135 | typedef struct xfs_iocore { | ||
136 | void *io_obj; /* pointer to container | ||
137 | * inode or dcxvn structure */ | ||
138 | struct xfs_mount *io_mount; /* fs mount struct ptr */ | ||
139 | #ifdef DEBUG | ||
140 | mrlock_t *io_lock; /* inode IO lock */ | ||
141 | mrlock_t *io_iolock; /* inode IO lock */ | ||
142 | #endif | ||
143 | |||
144 | /* I/O state */ | ||
145 | xfs_fsize_t io_new_size; /* sz when write completes */ | ||
146 | |||
147 | /* Miscellaneous state. */ | ||
148 | unsigned int io_flags; /* IO related flags */ | ||
149 | |||
150 | /* DMAPI state */ | ||
151 | dm_attrs_t io_dmattrs; | ||
152 | |||
153 | } xfs_iocore_t; | ||
154 | |||
155 | #define io_dmevmask io_dmattrs.da_dmevmask | ||
156 | #define io_dmstate io_dmattrs.da_dmstate | ||
157 | |||
158 | #define XFS_IO_INODE(io) ((xfs_inode_t *) ((io)->io_obj)) | ||
159 | #define XFS_IO_DCXVN(io) ((dcxvn_t *) ((io)->io_obj)) | ||
160 | |||
161 | /* | ||
162 | * Flags in the flags field | ||
163 | */ | ||
164 | |||
165 | #define XFS_IOCORE_RT 0x1 | ||
166 | |||
167 | /* | ||
168 | * xfs_iocore prototypes | ||
169 | */ | ||
170 | |||
171 | extern void xfs_iocore_inode_init(struct xfs_inode *); | ||
172 | extern void xfs_iocore_inode_reinit(struct xfs_inode *); | ||
173 | |||
174 | /* | 135 | /* |
175 | * This is the xfs inode cluster structure. This structure is used by | 136 | * This is the xfs inode cluster structure. This structure is used by |
176 | * xfs_iflush to find inodes that share a cluster and can be flushed to disk at | 137 | * xfs_iflush to find inodes that share a cluster and can be flushed to disk at |
@@ -181,7 +142,7 @@ typedef struct xfs_icluster { | |||
181 | xfs_daddr_t icl_blkno; /* starting block number of | 142 | xfs_daddr_t icl_blkno; /* starting block number of |
182 | * the cluster */ | 143 | * the cluster */ |
183 | struct xfs_buf *icl_buf; /* the inode buffer */ | 144 | struct xfs_buf *icl_buf; /* the inode buffer */ |
184 | lock_t icl_lock; /* inode list lock */ | 145 | spinlock_t icl_lock; /* inode list lock */ |
185 | } xfs_icluster_t; | 146 | } xfs_icluster_t; |
186 | 147 | ||
187 | /* | 148 | /* |
@@ -283,9 +244,6 @@ typedef struct xfs_inode { | |||
283 | struct xfs_inode **i_refcache; /* ptr to entry in ref cache */ | 244 | struct xfs_inode **i_refcache; /* ptr to entry in ref cache */ |
284 | struct xfs_inode *i_release; /* inode to unref */ | 245 | struct xfs_inode *i_release; /* inode to unref */ |
285 | #endif | 246 | #endif |
286 | /* I/O state */ | ||
287 | xfs_iocore_t i_iocore; /* I/O core */ | ||
288 | |||
289 | /* Miscellaneous state. */ | 247 | /* Miscellaneous state. */ |
290 | unsigned short i_flags; /* see defined flags below */ | 248 | unsigned short i_flags; /* see defined flags below */ |
291 | unsigned char i_update_core; /* timestamps/size is dirty */ | 249 | unsigned char i_update_core; /* timestamps/size is dirty */ |
@@ -298,9 +256,10 @@ typedef struct xfs_inode { | |||
298 | struct hlist_node i_cnode; /* cluster link node */ | 256 | struct hlist_node i_cnode; /* cluster link node */ |
299 | 257 | ||
300 | xfs_fsize_t i_size; /* in-memory size */ | 258 | xfs_fsize_t i_size; /* in-memory size */ |
259 | xfs_fsize_t i_new_size; /* size when write completes */ | ||
301 | atomic_t i_iocount; /* outstanding I/O count */ | 260 | atomic_t i_iocount; /* outstanding I/O count */ |
302 | /* Trace buffers per inode. */ | 261 | /* Trace buffers per inode. */ |
303 | #ifdef XFS_VNODE_TRACE | 262 | #ifdef XFS_INODE_TRACE |
304 | struct ktrace *i_trace; /* general inode trace */ | 263 | struct ktrace *i_trace; /* general inode trace */ |
305 | #endif | 264 | #endif |
306 | #ifdef XFS_BMAP_TRACE | 265 | #ifdef XFS_BMAP_TRACE |
@@ -382,17 +341,42 @@ xfs_iflags_test_and_clear(xfs_inode_t *ip, unsigned short flags) | |||
382 | /* | 341 | /* |
383 | * Fork handling. | 342 | * Fork handling. |
384 | */ | 343 | */ |
385 | #define XFS_IFORK_PTR(ip,w) \ | ||
386 | ((w) == XFS_DATA_FORK ? &(ip)->i_df : (ip)->i_afp) | ||
387 | #define XFS_IFORK_Q(ip) XFS_CFORK_Q(&(ip)->i_d) | ||
388 | #define XFS_IFORK_DSIZE(ip) XFS_CFORK_DSIZE(&ip->i_d, ip->i_mount) | ||
389 | #define XFS_IFORK_ASIZE(ip) XFS_CFORK_ASIZE(&ip->i_d, ip->i_mount) | ||
390 | #define XFS_IFORK_SIZE(ip,w) XFS_CFORK_SIZE(&ip->i_d, ip->i_mount, w) | ||
391 | #define XFS_IFORK_FORMAT(ip,w) XFS_CFORK_FORMAT(&ip->i_d, w) | ||
392 | #define XFS_IFORK_FMT_SET(ip,w,n) XFS_CFORK_FMT_SET(&ip->i_d, w, n) | ||
393 | #define XFS_IFORK_NEXTENTS(ip,w) XFS_CFORK_NEXTENTS(&ip->i_d, w) | ||
394 | #define XFS_IFORK_NEXT_SET(ip,w,n) XFS_CFORK_NEXT_SET(&ip->i_d, w, n) | ||
395 | 344 | ||
345 | #define XFS_IFORK_Q(ip) ((ip)->i_d.di_forkoff != 0) | ||
346 | #define XFS_IFORK_BOFF(ip) ((int)((ip)->i_d.di_forkoff << 3)) | ||
347 | |||
348 | #define XFS_IFORK_PTR(ip,w) \ | ||
349 | ((w) == XFS_DATA_FORK ? \ | ||
350 | &(ip)->i_df : \ | ||
351 | (ip)->i_afp) | ||
352 | #define XFS_IFORK_DSIZE(ip) \ | ||
353 | (XFS_IFORK_Q(ip) ? \ | ||
354 | XFS_IFORK_BOFF(ip) : \ | ||
355 | XFS_LITINO((ip)->i_mount)) | ||
356 | #define XFS_IFORK_ASIZE(ip) \ | ||
357 | (XFS_IFORK_Q(ip) ? \ | ||
358 | XFS_LITINO((ip)->i_mount) - XFS_IFORK_BOFF(ip) : \ | ||
359 | 0) | ||
360 | #define XFS_IFORK_SIZE(ip,w) \ | ||
361 | ((w) == XFS_DATA_FORK ? \ | ||
362 | XFS_IFORK_DSIZE(ip) : \ | ||
363 | XFS_IFORK_ASIZE(ip)) | ||
364 | #define XFS_IFORK_FORMAT(ip,w) \ | ||
365 | ((w) == XFS_DATA_FORK ? \ | ||
366 | (ip)->i_d.di_format : \ | ||
367 | (ip)->i_d.di_aformat) | ||
368 | #define XFS_IFORK_FMT_SET(ip,w,n) \ | ||
369 | ((w) == XFS_DATA_FORK ? \ | ||
370 | ((ip)->i_d.di_format = (n)) : \ | ||
371 | ((ip)->i_d.di_aformat = (n))) | ||
372 | #define XFS_IFORK_NEXTENTS(ip,w) \ | ||
373 | ((w) == XFS_DATA_FORK ? \ | ||
374 | (ip)->i_d.di_nextents : \ | ||
375 | (ip)->i_d.di_anextents) | ||
376 | #define XFS_IFORK_NEXT_SET(ip,w,n) \ | ||
377 | ((w) == XFS_DATA_FORK ? \ | ||
378 | ((ip)->i_d.di_nextents = (n)) : \ | ||
379 | ((ip)->i_d.di_anextents = (n))) | ||
396 | 380 | ||
397 | #ifdef __KERNEL__ | 381 | #ifdef __KERNEL__ |
398 | 382 | ||
@@ -509,7 +493,6 @@ void xfs_ihash_init(struct xfs_mount *); | |||
509 | void xfs_ihash_free(struct xfs_mount *); | 493 | void xfs_ihash_free(struct xfs_mount *); |
510 | xfs_inode_t *xfs_inode_incore(struct xfs_mount *, xfs_ino_t, | 494 | xfs_inode_t *xfs_inode_incore(struct xfs_mount *, xfs_ino_t, |
511 | struct xfs_trans *); | 495 | struct xfs_trans *); |
512 | void xfs_inode_lock_init(xfs_inode_t *, bhv_vnode_t *); | ||
513 | int xfs_iget(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, | 496 | int xfs_iget(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, |
514 | uint, uint, xfs_inode_t **, xfs_daddr_t); | 497 | uint, uint, xfs_inode_t **, xfs_daddr_t); |
515 | void xfs_iput(xfs_inode_t *, uint); | 498 | void xfs_iput(xfs_inode_t *, uint); |
@@ -545,7 +528,7 @@ void xfs_dinode_to_disk(struct xfs_dinode_core *, | |||
545 | struct xfs_icdinode *); | 528 | struct xfs_icdinode *); |
546 | 529 | ||
547 | uint xfs_ip2xflags(struct xfs_inode *); | 530 | uint xfs_ip2xflags(struct xfs_inode *); |
548 | uint xfs_dic2xflags(struct xfs_dinode_core *); | 531 | uint xfs_dic2xflags(struct xfs_dinode *); |
549 | int xfs_ifree(struct xfs_trans *, xfs_inode_t *, | 532 | int xfs_ifree(struct xfs_trans *, xfs_inode_t *, |
550 | struct xfs_bmap_free *); | 533 | struct xfs_bmap_free *); |
551 | int xfs_itruncate_start(xfs_inode_t *, uint, xfs_fsize_t); | 534 | int xfs_itruncate_start(xfs_inode_t *, uint, xfs_fsize_t); |
@@ -567,13 +550,12 @@ void xfs_iunpin(xfs_inode_t *); | |||
567 | int xfs_iextents_copy(xfs_inode_t *, xfs_bmbt_rec_t *, int); | 550 | int xfs_iextents_copy(xfs_inode_t *, xfs_bmbt_rec_t *, int); |
568 | int xfs_iflush(xfs_inode_t *, uint); | 551 | int xfs_iflush(xfs_inode_t *, uint); |
569 | void xfs_iflush_all(struct xfs_mount *); | 552 | void xfs_iflush_all(struct xfs_mount *); |
570 | int xfs_iaccess(xfs_inode_t *, mode_t, cred_t *); | ||
571 | uint xfs_iroundup(uint); | ||
572 | void xfs_ichgtime(xfs_inode_t *, int); | 553 | void xfs_ichgtime(xfs_inode_t *, int); |
573 | xfs_fsize_t xfs_file_last_byte(xfs_inode_t *); | 554 | xfs_fsize_t xfs_file_last_byte(xfs_inode_t *); |
574 | void xfs_lock_inodes(xfs_inode_t **, int, int, uint); | 555 | void xfs_lock_inodes(xfs_inode_t **, int, int, uint); |
575 | 556 | ||
576 | void xfs_synchronize_atime(xfs_inode_t *); | 557 | void xfs_synchronize_atime(xfs_inode_t *); |
558 | void xfs_mark_inode_dirty_sync(xfs_inode_t *); | ||
577 | 559 | ||
578 | xfs_bmbt_rec_host_t *xfs_iext_get_ext(xfs_ifork_t *, xfs_extnum_t); | 560 | xfs_bmbt_rec_host_t *xfs_iext_get_ext(xfs_ifork_t *, xfs_extnum_t); |
579 | void xfs_iext_insert(xfs_ifork_t *, xfs_extnum_t, xfs_extnum_t, | 561 | void xfs_iext_insert(xfs_ifork_t *, xfs_extnum_t, xfs_extnum_t, |
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 565d470a6b4a..034ca7202295 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c | |||
@@ -274,6 +274,11 @@ xfs_inode_item_format( | |||
274 | */ | 274 | */ |
275 | xfs_synchronize_atime(ip); | 275 | xfs_synchronize_atime(ip); |
276 | 276 | ||
277 | /* | ||
278 | * make sure the linux inode is dirty | ||
279 | */ | ||
280 | xfs_mark_inode_dirty_sync(ip); | ||
281 | |||
277 | vecp->i_addr = (xfs_caddr_t)&ip->i_d; | 282 | vecp->i_addr = (xfs_caddr_t)&ip->i_d; |
278 | vecp->i_len = sizeof(xfs_dinode_core_t); | 283 | vecp->i_len = sizeof(xfs_dinode_core_t); |
279 | XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_ICORE); | 284 | XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_ICORE); |
@@ -615,7 +620,7 @@ xfs_inode_item_trylock( | |||
615 | return XFS_ITEM_PUSHBUF; | 620 | return XFS_ITEM_PUSHBUF; |
616 | } else { | 621 | } else { |
617 | /* | 622 | /* |
618 | * We hold the AIL_LOCK, so we must specify the | 623 | * We hold the AIL lock, so we must specify the |
619 | * NONOTIFY flag so that we won't double trip. | 624 | * NONOTIFY flag so that we won't double trip. |
620 | */ | 625 | */ |
621 | xfs_iunlock(ip, XFS_ILOCK_SHARED|XFS_IUNLOCK_NONOTIFY); | 626 | xfs_iunlock(ip, XFS_ILOCK_SHARED|XFS_IUNLOCK_NONOTIFY); |
@@ -749,7 +754,7 @@ xfs_inode_item_committed( | |||
749 | * marked delayed write. If that's the case, we'll initiate a bawrite on that | 754 | * marked delayed write. If that's the case, we'll initiate a bawrite on that |
750 | * buffer to expedite the process. | 755 | * buffer to expedite the process. |
751 | * | 756 | * |
752 | * We aren't holding the AIL_LOCK (or the flush lock) when this gets called, | 757 | * We aren't holding the AIL lock (or the flush lock) when this gets called, |
753 | * so it is inherently race-y. | 758 | * so it is inherently race-y. |
754 | */ | 759 | */ |
755 | STATIC void | 760 | STATIC void |
@@ -792,7 +797,7 @@ xfs_inode_item_pushbuf( | |||
792 | if (XFS_BUF_ISDELAYWRITE(bp)) { | 797 | if (XFS_BUF_ISDELAYWRITE(bp)) { |
793 | /* | 798 | /* |
794 | * We were racing with iflush because we don't hold | 799 | * We were racing with iflush because we don't hold |
795 | * the AIL_LOCK or the flush lock. However, at this point, | 800 | * the AIL lock or the flush lock. However, at this point, |
796 | * we have the buffer, and we know that it's dirty. | 801 | * we have the buffer, and we know that it's dirty. |
797 | * So, it's possible that iflush raced with us, and | 802 | * So, it's possible that iflush raced with us, and |
798 | * this item is already taken off the AIL. | 803 | * this item is already taken off the AIL. |
@@ -968,7 +973,6 @@ xfs_iflush_done( | |||
968 | xfs_inode_log_item_t *iip) | 973 | xfs_inode_log_item_t *iip) |
969 | { | 974 | { |
970 | xfs_inode_t *ip; | 975 | xfs_inode_t *ip; |
971 | SPLDECL(s); | ||
972 | 976 | ||
973 | ip = iip->ili_inode; | 977 | ip = iip->ili_inode; |
974 | 978 | ||
@@ -983,15 +987,15 @@ xfs_iflush_done( | |||
983 | */ | 987 | */ |
984 | if (iip->ili_logged && | 988 | if (iip->ili_logged && |
985 | (iip->ili_item.li_lsn == iip->ili_flush_lsn)) { | 989 | (iip->ili_item.li_lsn == iip->ili_flush_lsn)) { |
986 | AIL_LOCK(ip->i_mount, s); | 990 | spin_lock(&ip->i_mount->m_ail_lock); |
987 | if (iip->ili_item.li_lsn == iip->ili_flush_lsn) { | 991 | if (iip->ili_item.li_lsn == iip->ili_flush_lsn) { |
988 | /* | 992 | /* |
989 | * xfs_trans_delete_ail() drops the AIL lock. | 993 | * xfs_trans_delete_ail() drops the AIL lock. |
990 | */ | 994 | */ |
991 | xfs_trans_delete_ail(ip->i_mount, | 995 | xfs_trans_delete_ail(ip->i_mount, |
992 | (xfs_log_item_t*)iip, s); | 996 | (xfs_log_item_t*)iip); |
993 | } else { | 997 | } else { |
994 | AIL_UNLOCK(ip->i_mount, s); | 998 | spin_unlock(&ip->i_mount->m_ail_lock); |
995 | } | 999 | } |
996 | } | 1000 | } |
997 | 1001 | ||
@@ -1025,21 +1029,19 @@ xfs_iflush_abort( | |||
1025 | { | 1029 | { |
1026 | xfs_inode_log_item_t *iip; | 1030 | xfs_inode_log_item_t *iip; |
1027 | xfs_mount_t *mp; | 1031 | xfs_mount_t *mp; |
1028 | SPLDECL(s); | ||
1029 | 1032 | ||
1030 | iip = ip->i_itemp; | 1033 | iip = ip->i_itemp; |
1031 | mp = ip->i_mount; | 1034 | mp = ip->i_mount; |
1032 | if (iip) { | 1035 | if (iip) { |
1033 | if (iip->ili_item.li_flags & XFS_LI_IN_AIL) { | 1036 | if (iip->ili_item.li_flags & XFS_LI_IN_AIL) { |
1034 | AIL_LOCK(mp, s); | 1037 | spin_lock(&mp->m_ail_lock); |
1035 | if (iip->ili_item.li_flags & XFS_LI_IN_AIL) { | 1038 | if (iip->ili_item.li_flags & XFS_LI_IN_AIL) { |
1036 | /* | 1039 | /* |
1037 | * xfs_trans_delete_ail() drops the AIL lock. | 1040 | * xfs_trans_delete_ail() drops the AIL lock. |
1038 | */ | 1041 | */ |
1039 | xfs_trans_delete_ail(mp, (xfs_log_item_t *)iip, | 1042 | xfs_trans_delete_ail(mp, (xfs_log_item_t *)iip); |
1040 | s); | ||
1041 | } else | 1043 | } else |
1042 | AIL_UNLOCK(mp, s); | 1044 | spin_unlock(&mp->m_ail_lock); |
1043 | } | 1045 | } |
1044 | iip->ili_logged = 0; | 1046 | iip->ili_logged = 0; |
1045 | /* | 1047 | /* |
diff --git a/fs/xfs/xfs_iocore.c b/fs/xfs/xfs_iocore.c deleted file mode 100644 index b27b5d5be841..000000000000 --- a/fs/xfs/xfs_iocore.c +++ /dev/null | |||
@@ -1,119 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it would be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write the Free Software Foundation, | ||
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
17 | */ | ||
18 | #include "xfs.h" | ||
19 | #include "xfs_fs.h" | ||
20 | #include "xfs_types.h" | ||
21 | #include "xfs_bit.h" | ||
22 | #include "xfs_log.h" | ||
23 | #include "xfs_inum.h" | ||
24 | #include "xfs_trans.h" | ||
25 | #include "xfs_sb.h" | ||
26 | #include "xfs_ag.h" | ||
27 | #include "xfs_dir2.h" | ||
28 | #include "xfs_dfrag.h" | ||
29 | #include "xfs_dmapi.h" | ||
30 | #include "xfs_mount.h" | ||
31 | #include "xfs_bmap_btree.h" | ||
32 | #include "xfs_alloc_btree.h" | ||
33 | #include "xfs_ialloc_btree.h" | ||
34 | #include "xfs_dir2_sf.h" | ||
35 | #include "xfs_attr_sf.h" | ||
36 | #include "xfs_dinode.h" | ||
37 | #include "xfs_inode.h" | ||
38 | #include "xfs_inode_item.h" | ||
39 | #include "xfs_itable.h" | ||
40 | #include "xfs_btree.h" | ||
41 | #include "xfs_alloc.h" | ||
42 | #include "xfs_ialloc.h" | ||
43 | #include "xfs_bmap.h" | ||
44 | #include "xfs_error.h" | ||
45 | #include "xfs_rw.h" | ||
46 | #include "xfs_quota.h" | ||
47 | #include "xfs_trans_space.h" | ||
48 | #include "xfs_iomap.h" | ||
49 | |||
50 | |||
51 | STATIC xfs_fsize_t | ||
52 | xfs_size_fn( | ||
53 | xfs_inode_t *ip) | ||
54 | { | ||
55 | return XFS_ISIZE(ip); | ||
56 | } | ||
57 | |||
58 | STATIC int | ||
59 | xfs_ioinit( | ||
60 | struct xfs_mount *mp, | ||
61 | struct xfs_mount_args *mntargs, | ||
62 | int flags) | ||
63 | { | ||
64 | return xfs_mountfs(mp, flags); | ||
65 | } | ||
66 | |||
67 | xfs_ioops_t xfs_iocore_xfs = { | ||
68 | .xfs_ioinit = (xfs_ioinit_t) xfs_ioinit, | ||
69 | .xfs_bmapi_func = (xfs_bmapi_t) xfs_bmapi, | ||
70 | .xfs_bunmapi_func = (xfs_bunmapi_t) xfs_bunmapi, | ||
71 | .xfs_bmap_eof_func = (xfs_bmap_eof_t) xfs_bmap_eof, | ||
72 | .xfs_iomap_write_direct = | ||
73 | (xfs_iomap_write_direct_t) xfs_iomap_write_direct, | ||
74 | .xfs_iomap_write_delay = | ||
75 | (xfs_iomap_write_delay_t) xfs_iomap_write_delay, | ||
76 | .xfs_iomap_write_allocate = | ||
77 | (xfs_iomap_write_allocate_t) xfs_iomap_write_allocate, | ||
78 | .xfs_iomap_write_unwritten = | ||
79 | (xfs_iomap_write_unwritten_t) xfs_iomap_write_unwritten, | ||
80 | .xfs_ilock = (xfs_lock_t) xfs_ilock, | ||
81 | .xfs_lck_map_shared = (xfs_lck_map_shared_t) xfs_ilock_map_shared, | ||
82 | .xfs_ilock_demote = (xfs_lock_demote_t) xfs_ilock_demote, | ||
83 | .xfs_ilock_nowait = (xfs_lock_nowait_t) xfs_ilock_nowait, | ||
84 | .xfs_unlock = (xfs_unlk_t) xfs_iunlock, | ||
85 | .xfs_size_func = (xfs_size_t) xfs_size_fn, | ||
86 | .xfs_iodone = (xfs_iodone_t) fs_noerr, | ||
87 | .xfs_swap_extents_func = (xfs_swap_extents_t) xfs_swap_extents, | ||
88 | }; | ||
89 | |||
90 | void | ||
91 | xfs_iocore_inode_reinit( | ||
92 | xfs_inode_t *ip) | ||
93 | { | ||
94 | xfs_iocore_t *io = &ip->i_iocore; | ||
95 | |||
96 | io->io_flags = 0; | ||
97 | if (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) | ||
98 | io->io_flags |= XFS_IOCORE_RT; | ||
99 | io->io_dmevmask = ip->i_d.di_dmevmask; | ||
100 | io->io_dmstate = ip->i_d.di_dmstate; | ||
101 | } | ||
102 | |||
103 | void | ||
104 | xfs_iocore_inode_init( | ||
105 | xfs_inode_t *ip) | ||
106 | { | ||
107 | xfs_iocore_t *io = &ip->i_iocore; | ||
108 | xfs_mount_t *mp = ip->i_mount; | ||
109 | |||
110 | io->io_mount = mp; | ||
111 | #ifdef DEBUG | ||
112 | io->io_lock = &ip->i_lock; | ||
113 | io->io_iolock = &ip->i_iolock; | ||
114 | #endif | ||
115 | |||
116 | io->io_obj = (void *)ip; | ||
117 | |||
118 | xfs_iocore_inode_reinit(ip); | ||
119 | } | ||
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 72786e356d56..fde37f87d52f 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c | |||
@@ -53,12 +53,10 @@ | |||
53 | void | 53 | void |
54 | xfs_iomap_enter_trace( | 54 | xfs_iomap_enter_trace( |
55 | int tag, | 55 | int tag, |
56 | xfs_iocore_t *io, | 56 | xfs_inode_t *ip, |
57 | xfs_off_t offset, | 57 | xfs_off_t offset, |
58 | ssize_t count) | 58 | ssize_t count) |
59 | { | 59 | { |
60 | xfs_inode_t *ip = XFS_IO_INODE(io); | ||
61 | |||
62 | if (!ip->i_rwtrace) | 60 | if (!ip->i_rwtrace) |
63 | return; | 61 | return; |
64 | 62 | ||
@@ -70,8 +68,8 @@ xfs_iomap_enter_trace( | |||
70 | (void *)((unsigned long)((offset >> 32) & 0xffffffff)), | 68 | (void *)((unsigned long)((offset >> 32) & 0xffffffff)), |
71 | (void *)((unsigned long)(offset & 0xffffffff)), | 69 | (void *)((unsigned long)(offset & 0xffffffff)), |
72 | (void *)((unsigned long)count), | 70 | (void *)((unsigned long)count), |
73 | (void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)), | 71 | (void *)((unsigned long)((ip->i_new_size >> 32) & 0xffffffff)), |
74 | (void *)((unsigned long)(io->io_new_size & 0xffffffff)), | 72 | (void *)((unsigned long)(ip->i_new_size & 0xffffffff)), |
75 | (void *)((unsigned long)current_pid()), | 73 | (void *)((unsigned long)current_pid()), |
76 | (void *)NULL, | 74 | (void *)NULL, |
77 | (void *)NULL, | 75 | (void *)NULL, |
@@ -84,15 +82,13 @@ xfs_iomap_enter_trace( | |||
84 | void | 82 | void |
85 | xfs_iomap_map_trace( | 83 | xfs_iomap_map_trace( |
86 | int tag, | 84 | int tag, |
87 | xfs_iocore_t *io, | 85 | xfs_inode_t *ip, |
88 | xfs_off_t offset, | 86 | xfs_off_t offset, |
89 | ssize_t count, | 87 | ssize_t count, |
90 | xfs_iomap_t *iomapp, | 88 | xfs_iomap_t *iomapp, |
91 | xfs_bmbt_irec_t *imapp, | 89 | xfs_bmbt_irec_t *imapp, |
92 | int flags) | 90 | int flags) |
93 | { | 91 | { |
94 | xfs_inode_t *ip = XFS_IO_INODE(io); | ||
95 | |||
96 | if (!ip->i_rwtrace) | 92 | if (!ip->i_rwtrace) |
97 | return; | 93 | return; |
98 | 94 | ||
@@ -126,7 +122,7 @@ xfs_iomap_map_trace( | |||
126 | 122 | ||
127 | STATIC int | 123 | STATIC int |
128 | xfs_imap_to_bmap( | 124 | xfs_imap_to_bmap( |
129 | xfs_iocore_t *io, | 125 | xfs_inode_t *ip, |
130 | xfs_off_t offset, | 126 | xfs_off_t offset, |
131 | xfs_bmbt_irec_t *imap, | 127 | xfs_bmbt_irec_t *imap, |
132 | xfs_iomap_t *iomapp, | 128 | xfs_iomap_t *iomapp, |
@@ -134,11 +130,10 @@ xfs_imap_to_bmap( | |||
134 | int iomaps, /* Number of iomap entries */ | 130 | int iomaps, /* Number of iomap entries */ |
135 | int flags) | 131 | int flags) |
136 | { | 132 | { |
137 | xfs_mount_t *mp; | 133 | xfs_mount_t *mp = ip->i_mount; |
138 | int pbm; | 134 | int pbm; |
139 | xfs_fsblock_t start_block; | 135 | xfs_fsblock_t start_block; |
140 | 136 | ||
141 | mp = io->io_mount; | ||
142 | 137 | ||
143 | for (pbm = 0; imaps && pbm < iomaps; imaps--, iomapp++, imap++, pbm++) { | 138 | for (pbm = 0; imaps && pbm < iomaps; imaps--, iomapp++, imap++, pbm++) { |
144 | iomapp->iomap_offset = XFS_FSB_TO_B(mp, imap->br_startoff); | 139 | iomapp->iomap_offset = XFS_FSB_TO_B(mp, imap->br_startoff); |
@@ -146,7 +141,7 @@ xfs_imap_to_bmap( | |||
146 | iomapp->iomap_bsize = XFS_FSB_TO_B(mp, imap->br_blockcount); | 141 | iomapp->iomap_bsize = XFS_FSB_TO_B(mp, imap->br_blockcount); |
147 | iomapp->iomap_flags = flags; | 142 | iomapp->iomap_flags = flags; |
148 | 143 | ||
149 | if (io->io_flags & XFS_IOCORE_RT) { | 144 | if (XFS_IS_REALTIME_INODE(ip)) { |
150 | iomapp->iomap_flags |= IOMAP_REALTIME; | 145 | iomapp->iomap_flags |= IOMAP_REALTIME; |
151 | iomapp->iomap_target = mp->m_rtdev_targp; | 146 | iomapp->iomap_target = mp->m_rtdev_targp; |
152 | } else { | 147 | } else { |
@@ -160,7 +155,7 @@ xfs_imap_to_bmap( | |||
160 | iomapp->iomap_bn = IOMAP_DADDR_NULL; | 155 | iomapp->iomap_bn = IOMAP_DADDR_NULL; |
161 | iomapp->iomap_flags |= IOMAP_DELAY; | 156 | iomapp->iomap_flags |= IOMAP_DELAY; |
162 | } else { | 157 | } else { |
163 | iomapp->iomap_bn = XFS_FSB_TO_DB_IO(io, start_block); | 158 | iomapp->iomap_bn = XFS_FSB_TO_DB(ip, start_block); |
164 | if (ISUNWRITTEN(imap)) | 159 | if (ISUNWRITTEN(imap)) |
165 | iomapp->iomap_flags |= IOMAP_UNWRITTEN; | 160 | iomapp->iomap_flags |= IOMAP_UNWRITTEN; |
166 | } | 161 | } |
@@ -172,14 +167,14 @@ xfs_imap_to_bmap( | |||
172 | 167 | ||
173 | int | 168 | int |
174 | xfs_iomap( | 169 | xfs_iomap( |
175 | xfs_iocore_t *io, | 170 | xfs_inode_t *ip, |
176 | xfs_off_t offset, | 171 | xfs_off_t offset, |
177 | ssize_t count, | 172 | ssize_t count, |
178 | int flags, | 173 | int flags, |
179 | xfs_iomap_t *iomapp, | 174 | xfs_iomap_t *iomapp, |
180 | int *niomaps) | 175 | int *niomaps) |
181 | { | 176 | { |
182 | xfs_mount_t *mp = io->io_mount; | 177 | xfs_mount_t *mp = ip->i_mount; |
183 | xfs_fileoff_t offset_fsb, end_fsb; | 178 | xfs_fileoff_t offset_fsb, end_fsb; |
184 | int error = 0; | 179 | int error = 0; |
185 | int lockmode = 0; | 180 | int lockmode = 0; |
@@ -188,45 +183,37 @@ xfs_iomap( | |||
188 | int bmapi_flags = 0; | 183 | int bmapi_flags = 0; |
189 | int iomap_flags = 0; | 184 | int iomap_flags = 0; |
190 | 185 | ||
186 | ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG); | ||
187 | |||
191 | if (XFS_FORCED_SHUTDOWN(mp)) | 188 | if (XFS_FORCED_SHUTDOWN(mp)) |
192 | return XFS_ERROR(EIO); | 189 | return XFS_ERROR(EIO); |
193 | 190 | ||
194 | switch (flags & | 191 | switch (flags & (BMAPI_READ | BMAPI_WRITE | BMAPI_ALLOCATE)) { |
195 | (BMAPI_READ | BMAPI_WRITE | BMAPI_ALLOCATE | | ||
196 | BMAPI_UNWRITTEN | BMAPI_DEVICE)) { | ||
197 | case BMAPI_READ: | 192 | case BMAPI_READ: |
198 | xfs_iomap_enter_trace(XFS_IOMAP_READ_ENTER, io, offset, count); | 193 | xfs_iomap_enter_trace(XFS_IOMAP_READ_ENTER, ip, offset, count); |
199 | lockmode = XFS_LCK_MAP_SHARED(mp, io); | 194 | lockmode = xfs_ilock_map_shared(ip); |
200 | bmapi_flags = XFS_BMAPI_ENTIRE; | 195 | bmapi_flags = XFS_BMAPI_ENTIRE; |
201 | break; | 196 | break; |
202 | case BMAPI_WRITE: | 197 | case BMAPI_WRITE: |
203 | xfs_iomap_enter_trace(XFS_IOMAP_WRITE_ENTER, io, offset, count); | 198 | xfs_iomap_enter_trace(XFS_IOMAP_WRITE_ENTER, ip, offset, count); |
204 | lockmode = XFS_ILOCK_EXCL|XFS_EXTSIZE_WR; | 199 | lockmode = XFS_ILOCK_EXCL|XFS_EXTSIZE_WR; |
205 | if (flags & BMAPI_IGNSTATE) | 200 | if (flags & BMAPI_IGNSTATE) |
206 | bmapi_flags |= XFS_BMAPI_IGSTATE|XFS_BMAPI_ENTIRE; | 201 | bmapi_flags |= XFS_BMAPI_IGSTATE|XFS_BMAPI_ENTIRE; |
207 | XFS_ILOCK(mp, io, lockmode); | 202 | xfs_ilock(ip, lockmode); |
208 | break; | 203 | break; |
209 | case BMAPI_ALLOCATE: | 204 | case BMAPI_ALLOCATE: |
210 | xfs_iomap_enter_trace(XFS_IOMAP_ALLOC_ENTER, io, offset, count); | 205 | xfs_iomap_enter_trace(XFS_IOMAP_ALLOC_ENTER, ip, offset, count); |
211 | lockmode = XFS_ILOCK_SHARED|XFS_EXTSIZE_RD; | 206 | lockmode = XFS_ILOCK_SHARED|XFS_EXTSIZE_RD; |
212 | bmapi_flags = XFS_BMAPI_ENTIRE; | 207 | bmapi_flags = XFS_BMAPI_ENTIRE; |
208 | |||
213 | /* Attempt non-blocking lock */ | 209 | /* Attempt non-blocking lock */ |
214 | if (flags & BMAPI_TRYLOCK) { | 210 | if (flags & BMAPI_TRYLOCK) { |
215 | if (!XFS_ILOCK_NOWAIT(mp, io, lockmode)) | 211 | if (!xfs_ilock_nowait(ip, lockmode)) |
216 | return XFS_ERROR(EAGAIN); | 212 | return XFS_ERROR(EAGAIN); |
217 | } else { | 213 | } else { |
218 | XFS_ILOCK(mp, io, lockmode); | 214 | xfs_ilock(ip, lockmode); |
219 | } | 215 | } |
220 | break; | 216 | break; |
221 | case BMAPI_UNWRITTEN: | ||
222 | goto phase2; | ||
223 | case BMAPI_DEVICE: | ||
224 | lockmode = XFS_LCK_MAP_SHARED(mp, io); | ||
225 | iomapp->iomap_target = io->io_flags & XFS_IOCORE_RT ? | ||
226 | mp->m_rtdev_targp : mp->m_ddev_targp; | ||
227 | error = 0; | ||
228 | *niomaps = 1; | ||
229 | goto out; | ||
230 | default: | 217 | default: |
231 | BUG(); | 218 | BUG(); |
232 | } | 219 | } |
@@ -237,7 +224,7 @@ xfs_iomap( | |||
237 | end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); | 224 | end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); |
238 | offset_fsb = XFS_B_TO_FSBT(mp, offset); | 225 | offset_fsb = XFS_B_TO_FSBT(mp, offset); |
239 | 226 | ||
240 | error = XFS_BMAPI(mp, NULL, io, offset_fsb, | 227 | error = xfs_bmapi(NULL, ip, offset_fsb, |
241 | (xfs_filblks_t)(end_fsb - offset_fsb), | 228 | (xfs_filblks_t)(end_fsb - offset_fsb), |
242 | bmapi_flags, NULL, 0, &imap, | 229 | bmapi_flags, NULL, 0, &imap, |
243 | &nimaps, NULL, NULL); | 230 | &nimaps, NULL, NULL); |
@@ -245,54 +232,48 @@ xfs_iomap( | |||
245 | if (error) | 232 | if (error) |
246 | goto out; | 233 | goto out; |
247 | 234 | ||
248 | phase2: | 235 | switch (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)) { |
249 | switch (flags & (BMAPI_WRITE|BMAPI_ALLOCATE|BMAPI_UNWRITTEN)) { | ||
250 | case BMAPI_WRITE: | 236 | case BMAPI_WRITE: |
251 | /* If we found an extent, return it */ | 237 | /* If we found an extent, return it */ |
252 | if (nimaps && | 238 | if (nimaps && |
253 | (imap.br_startblock != HOLESTARTBLOCK) && | 239 | (imap.br_startblock != HOLESTARTBLOCK) && |
254 | (imap.br_startblock != DELAYSTARTBLOCK)) { | 240 | (imap.br_startblock != DELAYSTARTBLOCK)) { |
255 | xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, io, | 241 | xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, ip, |
256 | offset, count, iomapp, &imap, flags); | 242 | offset, count, iomapp, &imap, flags); |
257 | break; | 243 | break; |
258 | } | 244 | } |
259 | 245 | ||
260 | if (flags & (BMAPI_DIRECT|BMAPI_MMAP)) { | 246 | if (flags & (BMAPI_DIRECT|BMAPI_MMAP)) { |
261 | error = XFS_IOMAP_WRITE_DIRECT(mp, io, offset, | 247 | error = xfs_iomap_write_direct(ip, offset, count, flags, |
262 | count, flags, &imap, &nimaps, nimaps); | 248 | &imap, &nimaps, nimaps); |
263 | } else { | 249 | } else { |
264 | error = XFS_IOMAP_WRITE_DELAY(mp, io, offset, count, | 250 | error = xfs_iomap_write_delay(ip, offset, count, flags, |
265 | flags, &imap, &nimaps); | 251 | &imap, &nimaps); |
266 | } | 252 | } |
267 | if (!error) { | 253 | if (!error) { |
268 | xfs_iomap_map_trace(XFS_IOMAP_ALLOC_MAP, io, | 254 | xfs_iomap_map_trace(XFS_IOMAP_ALLOC_MAP, ip, |
269 | offset, count, iomapp, &imap, flags); | 255 | offset, count, iomapp, &imap, flags); |
270 | } | 256 | } |
271 | iomap_flags = IOMAP_NEW; | 257 | iomap_flags = IOMAP_NEW; |
272 | break; | 258 | break; |
273 | case BMAPI_ALLOCATE: | 259 | case BMAPI_ALLOCATE: |
274 | /* If we found an extent, return it */ | 260 | /* If we found an extent, return it */ |
275 | XFS_IUNLOCK(mp, io, lockmode); | 261 | xfs_iunlock(ip, lockmode); |
276 | lockmode = 0; | 262 | lockmode = 0; |
277 | 263 | ||
278 | if (nimaps && !ISNULLSTARTBLOCK(imap.br_startblock)) { | 264 | if (nimaps && !ISNULLSTARTBLOCK(imap.br_startblock)) { |
279 | xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, io, | 265 | xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, ip, |
280 | offset, count, iomapp, &imap, flags); | 266 | offset, count, iomapp, &imap, flags); |
281 | break; | 267 | break; |
282 | } | 268 | } |
283 | 269 | ||
284 | error = XFS_IOMAP_WRITE_ALLOCATE(mp, io, offset, count, | 270 | error = xfs_iomap_write_allocate(ip, offset, count, |
285 | &imap, &nimaps); | 271 | &imap, &nimaps); |
286 | break; | 272 | break; |
287 | case BMAPI_UNWRITTEN: | ||
288 | lockmode = 0; | ||
289 | error = XFS_IOMAP_WRITE_UNWRITTEN(mp, io, offset, count); | ||
290 | nimaps = 0; | ||
291 | break; | ||
292 | } | 273 | } |
293 | 274 | ||
294 | if (nimaps) { | 275 | if (nimaps) { |
295 | *niomaps = xfs_imap_to_bmap(io, offset, &imap, | 276 | *niomaps = xfs_imap_to_bmap(ip, offset, &imap, |
296 | iomapp, nimaps, *niomaps, iomap_flags); | 277 | iomapp, nimaps, *niomaps, iomap_flags); |
297 | } else if (niomaps) { | 278 | } else if (niomaps) { |
298 | *niomaps = 0; | 279 | *niomaps = 0; |
@@ -300,14 +281,15 @@ phase2: | |||
300 | 281 | ||
301 | out: | 282 | out: |
302 | if (lockmode) | 283 | if (lockmode) |
303 | XFS_IUNLOCK(mp, io, lockmode); | 284 | xfs_iunlock(ip, lockmode); |
304 | return XFS_ERROR(error); | 285 | return XFS_ERROR(error); |
305 | } | 286 | } |
306 | 287 | ||
288 | |||
307 | STATIC int | 289 | STATIC int |
308 | xfs_iomap_eof_align_last_fsb( | 290 | xfs_iomap_eof_align_last_fsb( |
309 | xfs_mount_t *mp, | 291 | xfs_mount_t *mp, |
310 | xfs_iocore_t *io, | 292 | xfs_inode_t *ip, |
311 | xfs_fsize_t isize, | 293 | xfs_fsize_t isize, |
312 | xfs_extlen_t extsize, | 294 | xfs_extlen_t extsize, |
313 | xfs_fileoff_t *last_fsb) | 295 | xfs_fileoff_t *last_fsb) |
@@ -316,7 +298,7 @@ xfs_iomap_eof_align_last_fsb( | |||
316 | xfs_extlen_t align; | 298 | xfs_extlen_t align; |
317 | int eof, error; | 299 | int eof, error; |
318 | 300 | ||
319 | if (io->io_flags & XFS_IOCORE_RT) | 301 | if (XFS_IS_REALTIME_INODE(ip)) |
320 | ; | 302 | ; |
321 | /* | 303 | /* |
322 | * If mounted with the "-o swalloc" option, roundup the allocation | 304 | * If mounted with the "-o swalloc" option, roundup the allocation |
@@ -347,7 +329,7 @@ xfs_iomap_eof_align_last_fsb( | |||
347 | } | 329 | } |
348 | 330 | ||
349 | if (new_last_fsb) { | 331 | if (new_last_fsb) { |
350 | error = XFS_BMAP_EOF(mp, io, new_last_fsb, XFS_DATA_FORK, &eof); | 332 | error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof); |
351 | if (error) | 333 | if (error) |
352 | return error; | 334 | return error; |
353 | if (eof) | 335 | if (eof) |
@@ -416,7 +398,6 @@ xfs_iomap_write_direct( | |||
416 | int found) | 398 | int found) |
417 | { | 399 | { |
418 | xfs_mount_t *mp = ip->i_mount; | 400 | xfs_mount_t *mp = ip->i_mount; |
419 | xfs_iocore_t *io = &ip->i_iocore; | ||
420 | xfs_fileoff_t offset_fsb; | 401 | xfs_fileoff_t offset_fsb; |
421 | xfs_fileoff_t last_fsb; | 402 | xfs_fileoff_t last_fsb; |
422 | xfs_filblks_t count_fsb, resaligned; | 403 | xfs_filblks_t count_fsb, resaligned; |
@@ -446,13 +427,13 @@ xfs_iomap_write_direct( | |||
446 | extsz = xfs_get_extsz_hint(ip); | 427 | extsz = xfs_get_extsz_hint(ip); |
447 | 428 | ||
448 | isize = ip->i_size; | 429 | isize = ip->i_size; |
449 | if (io->io_new_size > isize) | 430 | if (ip->i_new_size > isize) |
450 | isize = io->io_new_size; | 431 | isize = ip->i_new_size; |
451 | 432 | ||
452 | offset_fsb = XFS_B_TO_FSBT(mp, offset); | 433 | offset_fsb = XFS_B_TO_FSBT(mp, offset); |
453 | last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); | 434 | last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); |
454 | if ((offset + count) > isize) { | 435 | if ((offset + count) > isize) { |
455 | error = xfs_iomap_eof_align_last_fsb(mp, io, isize, extsz, | 436 | error = xfs_iomap_eof_align_last_fsb(mp, ip, isize, extsz, |
456 | &last_fsb); | 437 | &last_fsb); |
457 | if (error) | 438 | if (error) |
458 | goto error_out; | 439 | goto error_out; |
@@ -519,7 +500,7 @@ xfs_iomap_write_direct( | |||
519 | */ | 500 | */ |
520 | XFS_BMAP_INIT(&free_list, &firstfsb); | 501 | XFS_BMAP_INIT(&free_list, &firstfsb); |
521 | nimaps = 1; | 502 | nimaps = 1; |
522 | error = XFS_BMAPI(mp, tp, io, offset_fsb, count_fsb, bmapi_flag, | 503 | error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, bmapi_flag, |
523 | &firstfsb, 0, &imap, &nimaps, &free_list, NULL); | 504 | &firstfsb, 0, &imap, &nimaps, &free_list, NULL); |
524 | if (error) | 505 | if (error) |
525 | goto error0; | 506 | goto error0; |
@@ -542,7 +523,8 @@ xfs_iomap_write_direct( | |||
542 | goto error_out; | 523 | goto error_out; |
543 | } | 524 | } |
544 | 525 | ||
545 | if (unlikely(!imap.br_startblock && !(io->io_flags & XFS_IOCORE_RT))) { | 526 | if (unlikely(!imap.br_startblock && |
527 | !(XFS_IS_REALTIME_INODE(ip)))) { | ||
546 | error = xfs_cmn_err_fsblock_zero(ip, &imap); | 528 | error = xfs_cmn_err_fsblock_zero(ip, &imap); |
547 | goto error_out; | 529 | goto error_out; |
548 | } | 530 | } |
@@ -577,7 +559,7 @@ error_out: | |||
577 | STATIC int | 559 | STATIC int |
578 | xfs_iomap_eof_want_preallocate( | 560 | xfs_iomap_eof_want_preallocate( |
579 | xfs_mount_t *mp, | 561 | xfs_mount_t *mp, |
580 | xfs_iocore_t *io, | 562 | xfs_inode_t *ip, |
581 | xfs_fsize_t isize, | 563 | xfs_fsize_t isize, |
582 | xfs_off_t offset, | 564 | xfs_off_t offset, |
583 | size_t count, | 565 | size_t count, |
@@ -604,7 +586,7 @@ xfs_iomap_eof_want_preallocate( | |||
604 | while (count_fsb > 0) { | 586 | while (count_fsb > 0) { |
605 | imaps = nimaps; | 587 | imaps = nimaps; |
606 | firstblock = NULLFSBLOCK; | 588 | firstblock = NULLFSBLOCK; |
607 | error = XFS_BMAPI(mp, NULL, io, start_fsb, count_fsb, 0, | 589 | error = xfs_bmapi(NULL, ip, start_fsb, count_fsb, 0, |
608 | &firstblock, 0, imap, &imaps, NULL, NULL); | 590 | &firstblock, 0, imap, &imaps, NULL, NULL); |
609 | if (error) | 591 | if (error) |
610 | return error; | 592 | return error; |
@@ -630,7 +612,6 @@ xfs_iomap_write_delay( | |||
630 | int *nmaps) | 612 | int *nmaps) |
631 | { | 613 | { |
632 | xfs_mount_t *mp = ip->i_mount; | 614 | xfs_mount_t *mp = ip->i_mount; |
633 | xfs_iocore_t *io = &ip->i_iocore; | ||
634 | xfs_fileoff_t offset_fsb; | 615 | xfs_fileoff_t offset_fsb; |
635 | xfs_fileoff_t last_fsb; | 616 | xfs_fileoff_t last_fsb; |
636 | xfs_off_t aligned_offset; | 617 | xfs_off_t aligned_offset; |
@@ -658,10 +639,10 @@ xfs_iomap_write_delay( | |||
658 | 639 | ||
659 | retry: | 640 | retry: |
660 | isize = ip->i_size; | 641 | isize = ip->i_size; |
661 | if (io->io_new_size > isize) | 642 | if (ip->i_new_size > isize) |
662 | isize = io->io_new_size; | 643 | isize = ip->i_new_size; |
663 | 644 | ||
664 | error = xfs_iomap_eof_want_preallocate(mp, io, isize, offset, count, | 645 | error = xfs_iomap_eof_want_preallocate(mp, ip, isize, offset, count, |
665 | ioflag, imap, XFS_WRITE_IMAPS, &prealloc); | 646 | ioflag, imap, XFS_WRITE_IMAPS, &prealloc); |
666 | if (error) | 647 | if (error) |
667 | return error; | 648 | return error; |
@@ -675,7 +656,7 @@ retry: | |||
675 | } | 656 | } |
676 | 657 | ||
677 | if (prealloc || extsz) { | 658 | if (prealloc || extsz) { |
678 | error = xfs_iomap_eof_align_last_fsb(mp, io, isize, extsz, | 659 | error = xfs_iomap_eof_align_last_fsb(mp, ip, isize, extsz, |
679 | &last_fsb); | 660 | &last_fsb); |
680 | if (error) | 661 | if (error) |
681 | return error; | 662 | return error; |
@@ -683,7 +664,7 @@ retry: | |||
683 | 664 | ||
684 | nimaps = XFS_WRITE_IMAPS; | 665 | nimaps = XFS_WRITE_IMAPS; |
685 | firstblock = NULLFSBLOCK; | 666 | firstblock = NULLFSBLOCK; |
686 | error = XFS_BMAPI(mp, NULL, io, offset_fsb, | 667 | error = xfs_bmapi(NULL, ip, offset_fsb, |
687 | (xfs_filblks_t)(last_fsb - offset_fsb), | 668 | (xfs_filblks_t)(last_fsb - offset_fsb), |
688 | XFS_BMAPI_DELAY | XFS_BMAPI_WRITE | | 669 | XFS_BMAPI_DELAY | XFS_BMAPI_WRITE | |
689 | XFS_BMAPI_ENTIRE, &firstblock, 1, imap, | 670 | XFS_BMAPI_ENTIRE, &firstblock, 1, imap, |
@@ -697,7 +678,7 @@ retry: | |||
697 | */ | 678 | */ |
698 | if (nimaps == 0) { | 679 | if (nimaps == 0) { |
699 | xfs_iomap_enter_trace(XFS_IOMAP_WRITE_NOSPACE, | 680 | xfs_iomap_enter_trace(XFS_IOMAP_WRITE_NOSPACE, |
700 | io, offset, count); | 681 | ip, offset, count); |
701 | if (xfs_flush_space(ip, &fsynced, &ioflag)) | 682 | if (xfs_flush_space(ip, &fsynced, &ioflag)) |
702 | return XFS_ERROR(ENOSPC); | 683 | return XFS_ERROR(ENOSPC); |
703 | 684 | ||
@@ -705,7 +686,8 @@ retry: | |||
705 | goto retry; | 686 | goto retry; |
706 | } | 687 | } |
707 | 688 | ||
708 | if (unlikely(!imap[0].br_startblock && !(io->io_flags & XFS_IOCORE_RT))) | 689 | if (unlikely(!imap[0].br_startblock && |
690 | !(XFS_IS_REALTIME_INODE(ip)))) | ||
709 | return xfs_cmn_err_fsblock_zero(ip, &imap[0]); | 691 | return xfs_cmn_err_fsblock_zero(ip, &imap[0]); |
710 | 692 | ||
711 | *ret_imap = imap[0]; | 693 | *ret_imap = imap[0]; |
@@ -720,6 +702,9 @@ retry: | |||
720 | * the originating callers request. | 702 | * the originating callers request. |
721 | * | 703 | * |
722 | * Called without a lock on the inode. | 704 | * Called without a lock on the inode. |
705 | * | ||
706 | * We no longer bother to look at the incoming map - all we have to | ||
707 | * guarantee is that whatever we allocate fills the required range. | ||
723 | */ | 708 | */ |
724 | int | 709 | int |
725 | xfs_iomap_write_allocate( | 710 | xfs_iomap_write_allocate( |
@@ -730,15 +715,14 @@ xfs_iomap_write_allocate( | |||
730 | int *retmap) | 715 | int *retmap) |
731 | { | 716 | { |
732 | xfs_mount_t *mp = ip->i_mount; | 717 | xfs_mount_t *mp = ip->i_mount; |
733 | xfs_iocore_t *io = &ip->i_iocore; | ||
734 | xfs_fileoff_t offset_fsb, last_block; | 718 | xfs_fileoff_t offset_fsb, last_block; |
735 | xfs_fileoff_t end_fsb, map_start_fsb; | 719 | xfs_fileoff_t end_fsb, map_start_fsb; |
736 | xfs_fsblock_t first_block; | 720 | xfs_fsblock_t first_block; |
737 | xfs_bmap_free_t free_list; | 721 | xfs_bmap_free_t free_list; |
738 | xfs_filblks_t count_fsb; | 722 | xfs_filblks_t count_fsb; |
739 | xfs_bmbt_irec_t imap[XFS_STRAT_WRITE_IMAPS]; | 723 | xfs_bmbt_irec_t imap; |
740 | xfs_trans_t *tp; | 724 | xfs_trans_t *tp; |
741 | int i, nimaps, committed; | 725 | int nimaps, committed; |
742 | int error = 0; | 726 | int error = 0; |
743 | int nres; | 727 | int nres; |
744 | 728 | ||
@@ -785,13 +769,38 @@ xfs_iomap_write_allocate( | |||
785 | 769 | ||
786 | XFS_BMAP_INIT(&free_list, &first_block); | 770 | XFS_BMAP_INIT(&free_list, &first_block); |
787 | 771 | ||
788 | nimaps = XFS_STRAT_WRITE_IMAPS; | ||
789 | /* | 772 | /* |
790 | * Ensure we don't go beyond eof - it is possible | 773 | * it is possible that the extents have changed since |
791 | * the extents changed since we did the read call, | 774 | * we did the read call as we dropped the ilock for a |
792 | * we dropped the ilock in the interim. | 775 | * while. We have to be careful about truncates or hole |
776 | * punchs here - we are not allowed to allocate | ||
777 | * non-delalloc blocks here. | ||
778 | * | ||
779 | * The only protection against truncation is the pages | ||
780 | * for the range we are being asked to convert are | ||
781 | * locked and hence a truncate will block on them | ||
782 | * first. | ||
783 | * | ||
784 | * As a result, if we go beyond the range we really | ||
785 | * need and hit an delalloc extent boundary followed by | ||
786 | * a hole while we have excess blocks in the map, we | ||
787 | * will fill the hole incorrectly and overrun the | ||
788 | * transaction reservation. | ||
789 | * | ||
790 | * Using a single map prevents this as we are forced to | ||
791 | * check each map we look for overlap with the desired | ||
792 | * range and abort as soon as we find it. Also, given | ||
793 | * that we only return a single map, having one beyond | ||
794 | * what we can return is probably a bit silly. | ||
795 | * | ||
796 | * We also need to check that we don't go beyond EOF; | ||
797 | * this is a truncate optimisation as a truncate sets | ||
798 | * the new file size before block on the pages we | ||
799 | * currently have locked under writeback. Because they | ||
800 | * are about to be tossed, we don't need to write them | ||
801 | * back.... | ||
793 | */ | 802 | */ |
794 | 803 | nimaps = 1; | |
795 | end_fsb = XFS_B_TO_FSB(mp, ip->i_size); | 804 | end_fsb = XFS_B_TO_FSB(mp, ip->i_size); |
796 | xfs_bmap_last_offset(NULL, ip, &last_block, | 805 | xfs_bmap_last_offset(NULL, ip, &last_block, |
797 | XFS_DATA_FORK); | 806 | XFS_DATA_FORK); |
@@ -805,9 +814,9 @@ xfs_iomap_write_allocate( | |||
805 | } | 814 | } |
806 | 815 | ||
807 | /* Go get the actual blocks */ | 816 | /* Go get the actual blocks */ |
808 | error = XFS_BMAPI(mp, tp, io, map_start_fsb, count_fsb, | 817 | error = xfs_bmapi(tp, ip, map_start_fsb, count_fsb, |
809 | XFS_BMAPI_WRITE, &first_block, 1, | 818 | XFS_BMAPI_WRITE, &first_block, 1, |
810 | imap, &nimaps, &free_list, NULL); | 819 | &imap, &nimaps, &free_list, NULL); |
811 | if (error) | 820 | if (error) |
812 | goto trans_cancel; | 821 | goto trans_cancel; |
813 | 822 | ||
@@ -826,27 +835,24 @@ xfs_iomap_write_allocate( | |||
826 | * See if we were able to allocate an extent that | 835 | * See if we were able to allocate an extent that |
827 | * covers at least part of the callers request | 836 | * covers at least part of the callers request |
828 | */ | 837 | */ |
829 | for (i = 0; i < nimaps; i++) { | 838 | if (unlikely(!imap.br_startblock && |
830 | if (unlikely(!imap[i].br_startblock && | 839 | XFS_IS_REALTIME_INODE(ip))) |
831 | !(io->io_flags & XFS_IOCORE_RT))) | 840 | return xfs_cmn_err_fsblock_zero(ip, &imap); |
832 | return xfs_cmn_err_fsblock_zero(ip, &imap[i]); | 841 | if ((offset_fsb >= imap.br_startoff) && |
833 | if ((offset_fsb >= imap[i].br_startoff) && | 842 | (offset_fsb < (imap.br_startoff + |
834 | (offset_fsb < (imap[i].br_startoff + | 843 | imap.br_blockcount))) { |
835 | imap[i].br_blockcount))) { | 844 | *map = imap; |
836 | *map = imap[i]; | 845 | *retmap = 1; |
837 | *retmap = 1; | 846 | XFS_STATS_INC(xs_xstrat_quick); |
838 | XFS_STATS_INC(xs_xstrat_quick); | 847 | return 0; |
839 | return 0; | ||
840 | } | ||
841 | count_fsb -= imap[i].br_blockcount; | ||
842 | } | 848 | } |
843 | 849 | ||
844 | /* So far we have not mapped the requested part of the | 850 | /* |
851 | * So far we have not mapped the requested part of the | ||
845 | * file, just surrounding data, try again. | 852 | * file, just surrounding data, try again. |
846 | */ | 853 | */ |
847 | nimaps--; | 854 | count_fsb -= imap.br_blockcount; |
848 | map_start_fsb = imap[nimaps].br_startoff + | 855 | map_start_fsb = imap.br_startoff + imap.br_blockcount; |
849 | imap[nimaps].br_blockcount; | ||
850 | } | 856 | } |
851 | 857 | ||
852 | trans_cancel: | 858 | trans_cancel: |
@@ -864,7 +870,6 @@ xfs_iomap_write_unwritten( | |||
864 | size_t count) | 870 | size_t count) |
865 | { | 871 | { |
866 | xfs_mount_t *mp = ip->i_mount; | 872 | xfs_mount_t *mp = ip->i_mount; |
867 | xfs_iocore_t *io = &ip->i_iocore; | ||
868 | xfs_fileoff_t offset_fsb; | 873 | xfs_fileoff_t offset_fsb; |
869 | xfs_filblks_t count_fsb; | 874 | xfs_filblks_t count_fsb; |
870 | xfs_filblks_t numblks_fsb; | 875 | xfs_filblks_t numblks_fsb; |
@@ -877,8 +882,7 @@ xfs_iomap_write_unwritten( | |||
877 | int committed; | 882 | int committed; |
878 | int error; | 883 | int error; |
879 | 884 | ||
880 | xfs_iomap_enter_trace(XFS_IOMAP_UNWRITTEN, | 885 | xfs_iomap_enter_trace(XFS_IOMAP_UNWRITTEN, ip, offset, count); |
881 | &ip->i_iocore, offset, count); | ||
882 | 886 | ||
883 | offset_fsb = XFS_B_TO_FSBT(mp, offset); | 887 | offset_fsb = XFS_B_TO_FSBT(mp, offset); |
884 | count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); | 888 | count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); |
@@ -912,7 +916,7 @@ xfs_iomap_write_unwritten( | |||
912 | */ | 916 | */ |
913 | XFS_BMAP_INIT(&free_list, &firstfsb); | 917 | XFS_BMAP_INIT(&free_list, &firstfsb); |
914 | nimaps = 1; | 918 | nimaps = 1; |
915 | error = XFS_BMAPI(mp, tp, io, offset_fsb, count_fsb, | 919 | error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, |
916 | XFS_BMAPI_WRITE|XFS_BMAPI_CONVERT, &firstfsb, | 920 | XFS_BMAPI_WRITE|XFS_BMAPI_CONVERT, &firstfsb, |
917 | 1, &imap, &nimaps, &free_list, NULL); | 921 | 1, &imap, &nimaps, &free_list, NULL); |
918 | if (error) | 922 | if (error) |
@@ -928,7 +932,7 @@ xfs_iomap_write_unwritten( | |||
928 | return XFS_ERROR(error); | 932 | return XFS_ERROR(error); |
929 | 933 | ||
930 | if (unlikely(!imap.br_startblock && | 934 | if (unlikely(!imap.br_startblock && |
931 | !(io->io_flags & XFS_IOCORE_RT))) | 935 | !(XFS_IS_REALTIME_INODE(ip)))) |
932 | return xfs_cmn_err_fsblock_zero(ip, &imap); | 936 | return xfs_cmn_err_fsblock_zero(ip, &imap); |
933 | 937 | ||
934 | if ((numblks_fsb = imap.br_blockcount) == 0) { | 938 | if ((numblks_fsb = imap.br_blockcount) == 0) { |
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h index f5c09887fe93..ee1a0c134cc2 100644 --- a/fs/xfs/xfs_iomap.h +++ b/fs/xfs/xfs_iomap.h | |||
@@ -36,14 +36,12 @@ typedef enum { | |||
36 | BMAPI_READ = (1 << 0), /* read extents */ | 36 | BMAPI_READ = (1 << 0), /* read extents */ |
37 | BMAPI_WRITE = (1 << 1), /* create extents */ | 37 | BMAPI_WRITE = (1 << 1), /* create extents */ |
38 | BMAPI_ALLOCATE = (1 << 2), /* delayed allocate to real extents */ | 38 | BMAPI_ALLOCATE = (1 << 2), /* delayed allocate to real extents */ |
39 | BMAPI_UNWRITTEN = (1 << 3), /* unwritten extents to real extents */ | ||
40 | /* modifiers */ | 39 | /* modifiers */ |
41 | BMAPI_IGNSTATE = (1 << 4), /* ignore unwritten state on read */ | 40 | BMAPI_IGNSTATE = (1 << 4), /* ignore unwritten state on read */ |
42 | BMAPI_DIRECT = (1 << 5), /* direct instead of buffered write */ | 41 | BMAPI_DIRECT = (1 << 5), /* direct instead of buffered write */ |
43 | BMAPI_MMAP = (1 << 6), /* allocate for mmap write */ | 42 | BMAPI_MMAP = (1 << 6), /* allocate for mmap write */ |
44 | BMAPI_SYNC = (1 << 7), /* sync write to flush delalloc space */ | 43 | BMAPI_SYNC = (1 << 7), /* sync write to flush delalloc space */ |
45 | BMAPI_TRYLOCK = (1 << 8), /* non-blocking request */ | 44 | BMAPI_TRYLOCK = (1 << 8), /* non-blocking request */ |
46 | BMAPI_DEVICE = (1 << 9), /* we only want to know the device */ | ||
47 | } bmapi_flags_t; | 45 | } bmapi_flags_t; |
48 | 46 | ||
49 | 47 | ||
@@ -73,11 +71,10 @@ typedef struct xfs_iomap { | |||
73 | iomap_flags_t iomap_flags; | 71 | iomap_flags_t iomap_flags; |
74 | } xfs_iomap_t; | 72 | } xfs_iomap_t; |
75 | 73 | ||
76 | struct xfs_iocore; | ||
77 | struct xfs_inode; | 74 | struct xfs_inode; |
78 | struct xfs_bmbt_irec; | 75 | struct xfs_bmbt_irec; |
79 | 76 | ||
80 | extern int xfs_iomap(struct xfs_iocore *, xfs_off_t, ssize_t, int, | 77 | extern int xfs_iomap(struct xfs_inode *, xfs_off_t, ssize_t, int, |
81 | struct xfs_iomap *, int *); | 78 | struct xfs_iomap *, int *); |
82 | extern int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t, | 79 | extern int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t, |
83 | int, struct xfs_bmbt_irec *, int *, int); | 80 | int, struct xfs_bmbt_irec *, int *, int); |
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index 9fc4c2886529..658aab6b1bbf 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c | |||
@@ -170,7 +170,7 @@ xfs_bulkstat_one_dinode( | |||
170 | buf->bs_mtime.tv_nsec = be32_to_cpu(dic->di_mtime.t_nsec); | 170 | buf->bs_mtime.tv_nsec = be32_to_cpu(dic->di_mtime.t_nsec); |
171 | buf->bs_ctime.tv_sec = be32_to_cpu(dic->di_ctime.t_sec); | 171 | buf->bs_ctime.tv_sec = be32_to_cpu(dic->di_ctime.t_sec); |
172 | buf->bs_ctime.tv_nsec = be32_to_cpu(dic->di_ctime.t_nsec); | 172 | buf->bs_ctime.tv_nsec = be32_to_cpu(dic->di_ctime.t_nsec); |
173 | buf->bs_xflags = xfs_dic2xflags(dic); | 173 | buf->bs_xflags = xfs_dic2xflags(dip); |
174 | buf->bs_extsize = be32_to_cpu(dic->di_extsize) << mp->m_sb.sb_blocklog; | 174 | buf->bs_extsize = be32_to_cpu(dic->di_extsize) << mp->m_sb.sb_blocklog; |
175 | buf->bs_extents = be32_to_cpu(dic->di_nextents); | 175 | buf->bs_extents = be32_to_cpu(dic->di_nextents); |
176 | buf->bs_gen = be32_to_cpu(dic->di_gen); | 176 | buf->bs_gen = be32_to_cpu(dic->di_gen); |
@@ -291,7 +291,7 @@ xfs_bulkstat_use_dinode( | |||
291 | dip = (xfs_dinode_t *) | 291 | dip = (xfs_dinode_t *) |
292 | xfs_buf_offset(bp, clustidx << mp->m_sb.sb_inodelog); | 292 | xfs_buf_offset(bp, clustidx << mp->m_sb.sb_inodelog); |
293 | /* | 293 | /* |
294 | * Check the buffer containing the on-disk inode for di_nlink == 0. | 294 | * Check the buffer containing the on-disk inode for di_mode == 0. |
295 | * This is to prevent xfs_bulkstat from picking up just reclaimed | 295 | * This is to prevent xfs_bulkstat from picking up just reclaimed |
296 | * inodes that have their in-core state initialized but not flushed | 296 | * inodes that have their in-core state initialized but not flushed |
297 | * to disk yet. This is a temporary hack that would require a proper | 297 | * to disk yet. This is a temporary hack that would require a proper |
@@ -299,7 +299,7 @@ xfs_bulkstat_use_dinode( | |||
299 | */ | 299 | */ |
300 | if (be16_to_cpu(dip->di_core.di_magic) != XFS_DINODE_MAGIC || | 300 | if (be16_to_cpu(dip->di_core.di_magic) != XFS_DINODE_MAGIC || |
301 | !XFS_DINODE_GOOD_VERSION(dip->di_core.di_version) || | 301 | !XFS_DINODE_GOOD_VERSION(dip->di_core.di_version) || |
302 | !dip->di_core.di_nlink) | 302 | !dip->di_core.di_mode) |
303 | return 0; | 303 | return 0; |
304 | if (flags & BULKSTAT_FG_QUICK) { | 304 | if (flags & BULKSTAT_FG_QUICK) { |
305 | *dipp = dip; | 305 | *dipp = dip; |
@@ -307,7 +307,7 @@ xfs_bulkstat_use_dinode( | |||
307 | } | 307 | } |
308 | /* BULKSTAT_FG_INLINE: if attr fork is local, or not there, use it */ | 308 | /* BULKSTAT_FG_INLINE: if attr fork is local, or not there, use it */ |
309 | aformat = dip->di_core.di_aformat; | 309 | aformat = dip->di_core.di_aformat; |
310 | if ((XFS_CFORK_Q(&dip->di_core) == 0) || | 310 | if ((XFS_DFORK_Q(dip) == 0) || |
311 | (aformat == XFS_DINODE_FMT_LOCAL) || | 311 | (aformat == XFS_DINODE_FMT_LOCAL) || |
312 | (aformat == XFS_DINODE_FMT_EXTENTS && !dip->di_core.di_anextents)) { | 312 | (aformat == XFS_DINODE_FMT_EXTENTS && !dip->di_core.di_anextents)) { |
313 | *dipp = dip; | 313 | *dipp = dip; |
@@ -399,7 +399,7 @@ xfs_bulkstat( | |||
399 | (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog); | 399 | (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog); |
400 | nimask = ~(nicluster - 1); | 400 | nimask = ~(nicluster - 1); |
401 | nbcluster = nicluster >> mp->m_sb.sb_inopblog; | 401 | nbcluster = nicluster >> mp->m_sb.sb_inopblog; |
402 | irbuf = kmem_zalloc_greedy(&irbsize, NBPC, NBPC * 4, | 402 | irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4, |
403 | KM_SLEEP | KM_MAYFAIL | KM_LARGE); | 403 | KM_SLEEP | KM_MAYFAIL | KM_LARGE); |
404 | nirbuf = irbsize / sizeof(*irbuf); | 404 | nirbuf = irbsize / sizeof(*irbuf); |
405 | 405 | ||
@@ -830,7 +830,7 @@ xfs_inumbers( | |||
830 | agino = XFS_INO_TO_AGINO(mp, ino); | 830 | agino = XFS_INO_TO_AGINO(mp, ino); |
831 | left = *count; | 831 | left = *count; |
832 | *count = 0; | 832 | *count = 0; |
833 | bcount = MIN(left, (int)(NBPP / sizeof(*buffer))); | 833 | bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer))); |
834 | buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP); | 834 | buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP); |
835 | error = bufidx = 0; | 835 | error = bufidx = 0; |
836 | cur = NULL; | 836 | cur = NULL; |
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 77c12715a7d0..a75edca1860f 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
@@ -399,10 +399,10 @@ xfs_log_notify(xfs_mount_t *mp, /* mount of partition */ | |||
399 | { | 399 | { |
400 | xlog_t *log = mp->m_log; | 400 | xlog_t *log = mp->m_log; |
401 | xlog_in_core_t *iclog = (xlog_in_core_t *)iclog_hndl; | 401 | xlog_in_core_t *iclog = (xlog_in_core_t *)iclog_hndl; |
402 | int abortflg, spl; | 402 | int abortflg; |
403 | 403 | ||
404 | cb->cb_next = NULL; | 404 | cb->cb_next = NULL; |
405 | spl = LOG_LOCK(log); | 405 | spin_lock(&log->l_icloglock); |
406 | abortflg = (iclog->ic_state & XLOG_STATE_IOERROR); | 406 | abortflg = (iclog->ic_state & XLOG_STATE_IOERROR); |
407 | if (!abortflg) { | 407 | if (!abortflg) { |
408 | ASSERT_ALWAYS((iclog->ic_state == XLOG_STATE_ACTIVE) || | 408 | ASSERT_ALWAYS((iclog->ic_state == XLOG_STATE_ACTIVE) || |
@@ -411,7 +411,7 @@ xfs_log_notify(xfs_mount_t *mp, /* mount of partition */ | |||
411 | *(iclog->ic_callback_tail) = cb; | 411 | *(iclog->ic_callback_tail) = cb; |
412 | iclog->ic_callback_tail = &(cb->cb_next); | 412 | iclog->ic_callback_tail = &(cb->cb_next); |
413 | } | 413 | } |
414 | LOG_UNLOCK(log, spl); | 414 | spin_unlock(&log->l_icloglock); |
415 | return abortflg; | 415 | return abortflg; |
416 | } /* xfs_log_notify */ | 416 | } /* xfs_log_notify */ |
417 | 417 | ||
@@ -498,11 +498,14 @@ xfs_log_reserve(xfs_mount_t *mp, | |||
498 | * Return error or zero. | 498 | * Return error or zero. |
499 | */ | 499 | */ |
500 | int | 500 | int |
501 | xfs_log_mount(xfs_mount_t *mp, | 501 | xfs_log_mount( |
502 | xfs_buftarg_t *log_target, | 502 | xfs_mount_t *mp, |
503 | xfs_daddr_t blk_offset, | 503 | xfs_buftarg_t *log_target, |
504 | int num_bblks) | 504 | xfs_daddr_t blk_offset, |
505 | int num_bblks) | ||
505 | { | 506 | { |
507 | int error; | ||
508 | |||
506 | if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) | 509 | if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) |
507 | cmn_err(CE_NOTE, "XFS mounting filesystem %s", mp->m_fsname); | 510 | cmn_err(CE_NOTE, "XFS mounting filesystem %s", mp->m_fsname); |
508 | else { | 511 | else { |
@@ -515,11 +518,21 @@ xfs_log_mount(xfs_mount_t *mp, | |||
515 | mp->m_log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks); | 518 | mp->m_log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks); |
516 | 519 | ||
517 | /* | 520 | /* |
521 | * Initialize the AIL now we have a log. | ||
522 | */ | ||
523 | spin_lock_init(&mp->m_ail_lock); | ||
524 | error = xfs_trans_ail_init(mp); | ||
525 | if (error) { | ||
526 | cmn_err(CE_WARN, "XFS: AIL initialisation failed: error %d", error); | ||
527 | goto error; | ||
528 | } | ||
529 | |||
530 | /* | ||
518 | * skip log recovery on a norecovery mount. pretend it all | 531 | * skip log recovery on a norecovery mount. pretend it all |
519 | * just worked. | 532 | * just worked. |
520 | */ | 533 | */ |
521 | if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) { | 534 | if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) { |
522 | int error, readonly = (mp->m_flags & XFS_MOUNT_RDONLY); | 535 | int readonly = (mp->m_flags & XFS_MOUNT_RDONLY); |
523 | 536 | ||
524 | if (readonly) | 537 | if (readonly) |
525 | mp->m_flags &= ~XFS_MOUNT_RDONLY; | 538 | mp->m_flags &= ~XFS_MOUNT_RDONLY; |
@@ -530,8 +543,7 @@ xfs_log_mount(xfs_mount_t *mp, | |||
530 | mp->m_flags |= XFS_MOUNT_RDONLY; | 543 | mp->m_flags |= XFS_MOUNT_RDONLY; |
531 | if (error) { | 544 | if (error) { |
532 | cmn_err(CE_WARN, "XFS: log mount/recovery failed: error %d", error); | 545 | cmn_err(CE_WARN, "XFS: log mount/recovery failed: error %d", error); |
533 | xlog_dealloc_log(mp->m_log); | 546 | goto error; |
534 | return error; | ||
535 | } | 547 | } |
536 | } | 548 | } |
537 | 549 | ||
@@ -540,6 +552,9 @@ xfs_log_mount(xfs_mount_t *mp, | |||
540 | 552 | ||
541 | /* End mounting message in xfs_log_mount_finish */ | 553 | /* End mounting message in xfs_log_mount_finish */ |
542 | return 0; | 554 | return 0; |
555 | error: | ||
556 | xfs_log_unmount_dealloc(mp); | ||
557 | return error; | ||
543 | } /* xfs_log_mount */ | 558 | } /* xfs_log_mount */ |
544 | 559 | ||
545 | /* | 560 | /* |
@@ -606,7 +621,6 @@ xfs_log_unmount_write(xfs_mount_t *mp) | |||
606 | xfs_log_ticket_t tic = NULL; | 621 | xfs_log_ticket_t tic = NULL; |
607 | xfs_lsn_t lsn; | 622 | xfs_lsn_t lsn; |
608 | int error; | 623 | int error; |
609 | SPLDECL(s); | ||
610 | 624 | ||
611 | /* the data section must be 32 bit size aligned */ | 625 | /* the data section must be 32 bit size aligned */ |
612 | struct { | 626 | struct { |
@@ -659,24 +673,24 @@ xfs_log_unmount_write(xfs_mount_t *mp) | |||
659 | } | 673 | } |
660 | 674 | ||
661 | 675 | ||
662 | s = LOG_LOCK(log); | 676 | spin_lock(&log->l_icloglock); |
663 | iclog = log->l_iclog; | 677 | iclog = log->l_iclog; |
664 | iclog->ic_refcnt++; | 678 | iclog->ic_refcnt++; |
665 | LOG_UNLOCK(log, s); | 679 | spin_unlock(&log->l_icloglock); |
666 | xlog_state_want_sync(log, iclog); | 680 | xlog_state_want_sync(log, iclog); |
667 | (void) xlog_state_release_iclog(log, iclog); | 681 | (void) xlog_state_release_iclog(log, iclog); |
668 | 682 | ||
669 | s = LOG_LOCK(log); | 683 | spin_lock(&log->l_icloglock); |
670 | if (!(iclog->ic_state == XLOG_STATE_ACTIVE || | 684 | if (!(iclog->ic_state == XLOG_STATE_ACTIVE || |
671 | iclog->ic_state == XLOG_STATE_DIRTY)) { | 685 | iclog->ic_state == XLOG_STATE_DIRTY)) { |
672 | if (!XLOG_FORCED_SHUTDOWN(log)) { | 686 | if (!XLOG_FORCED_SHUTDOWN(log)) { |
673 | sv_wait(&iclog->ic_forcesema, PMEM, | 687 | sv_wait(&iclog->ic_forcesema, PMEM, |
674 | &log->l_icloglock, s); | 688 | &log->l_icloglock, s); |
675 | } else { | 689 | } else { |
676 | LOG_UNLOCK(log, s); | 690 | spin_unlock(&log->l_icloglock); |
677 | } | 691 | } |
678 | } else { | 692 | } else { |
679 | LOG_UNLOCK(log, s); | 693 | spin_unlock(&log->l_icloglock); |
680 | } | 694 | } |
681 | if (tic) { | 695 | if (tic) { |
682 | xlog_trace_loggrant(log, tic, "unmount rec"); | 696 | xlog_trace_loggrant(log, tic, "unmount rec"); |
@@ -697,15 +711,15 @@ xfs_log_unmount_write(xfs_mount_t *mp) | |||
697 | * a file system that went into forced_shutdown as | 711 | * a file system that went into forced_shutdown as |
698 | * the result of an unmount.. | 712 | * the result of an unmount.. |
699 | */ | 713 | */ |
700 | s = LOG_LOCK(log); | 714 | spin_lock(&log->l_icloglock); |
701 | iclog = log->l_iclog; | 715 | iclog = log->l_iclog; |
702 | iclog->ic_refcnt++; | 716 | iclog->ic_refcnt++; |
703 | LOG_UNLOCK(log, s); | 717 | spin_unlock(&log->l_icloglock); |
704 | 718 | ||
705 | xlog_state_want_sync(log, iclog); | 719 | xlog_state_want_sync(log, iclog); |
706 | (void) xlog_state_release_iclog(log, iclog); | 720 | (void) xlog_state_release_iclog(log, iclog); |
707 | 721 | ||
708 | s = LOG_LOCK(log); | 722 | spin_lock(&log->l_icloglock); |
709 | 723 | ||
710 | if ( ! ( iclog->ic_state == XLOG_STATE_ACTIVE | 724 | if ( ! ( iclog->ic_state == XLOG_STATE_ACTIVE |
711 | || iclog->ic_state == XLOG_STATE_DIRTY | 725 | || iclog->ic_state == XLOG_STATE_DIRTY |
@@ -714,7 +728,7 @@ xfs_log_unmount_write(xfs_mount_t *mp) | |||
714 | sv_wait(&iclog->ic_forcesema, PMEM, | 728 | sv_wait(&iclog->ic_forcesema, PMEM, |
715 | &log->l_icloglock, s); | 729 | &log->l_icloglock, s); |
716 | } else { | 730 | } else { |
717 | LOG_UNLOCK(log, s); | 731 | spin_unlock(&log->l_icloglock); |
718 | } | 732 | } |
719 | } | 733 | } |
720 | 734 | ||
@@ -723,10 +737,14 @@ xfs_log_unmount_write(xfs_mount_t *mp) | |||
723 | 737 | ||
724 | /* | 738 | /* |
725 | * Deallocate log structures for unmount/relocation. | 739 | * Deallocate log structures for unmount/relocation. |
740 | * | ||
741 | * We need to stop the aild from running before we destroy | ||
742 | * and deallocate the log as the aild references the log. | ||
726 | */ | 743 | */ |
727 | void | 744 | void |
728 | xfs_log_unmount_dealloc(xfs_mount_t *mp) | 745 | xfs_log_unmount_dealloc(xfs_mount_t *mp) |
729 | { | 746 | { |
747 | xfs_trans_ail_destroy(mp); | ||
730 | xlog_dealloc_log(mp->m_log); | 748 | xlog_dealloc_log(mp->m_log); |
731 | } | 749 | } |
732 | 750 | ||
@@ -762,20 +780,18 @@ xfs_log_move_tail(xfs_mount_t *mp, | |||
762 | xlog_ticket_t *tic; | 780 | xlog_ticket_t *tic; |
763 | xlog_t *log = mp->m_log; | 781 | xlog_t *log = mp->m_log; |
764 | int need_bytes, free_bytes, cycle, bytes; | 782 | int need_bytes, free_bytes, cycle, bytes; |
765 | SPLDECL(s); | ||
766 | 783 | ||
767 | if (XLOG_FORCED_SHUTDOWN(log)) | 784 | if (XLOG_FORCED_SHUTDOWN(log)) |
768 | return; | 785 | return; |
769 | ASSERT(!XFS_FORCED_SHUTDOWN(mp)); | ||
770 | 786 | ||
771 | if (tail_lsn == 0) { | 787 | if (tail_lsn == 0) { |
772 | /* needed since sync_lsn is 64 bits */ | 788 | /* needed since sync_lsn is 64 bits */ |
773 | s = LOG_LOCK(log); | 789 | spin_lock(&log->l_icloglock); |
774 | tail_lsn = log->l_last_sync_lsn; | 790 | tail_lsn = log->l_last_sync_lsn; |
775 | LOG_UNLOCK(log, s); | 791 | spin_unlock(&log->l_icloglock); |
776 | } | 792 | } |
777 | 793 | ||
778 | s = GRANT_LOCK(log); | 794 | spin_lock(&log->l_grant_lock); |
779 | 795 | ||
780 | /* Also an invalid lsn. 1 implies that we aren't passing in a valid | 796 | /* Also an invalid lsn. 1 implies that we aren't passing in a valid |
781 | * tail_lsn. | 797 | * tail_lsn. |
@@ -824,7 +840,7 @@ xfs_log_move_tail(xfs_mount_t *mp, | |||
824 | tic = tic->t_next; | 840 | tic = tic->t_next; |
825 | } while (tic != log->l_reserve_headq); | 841 | } while (tic != log->l_reserve_headq); |
826 | } | 842 | } |
827 | GRANT_UNLOCK(log, s); | 843 | spin_unlock(&log->l_grant_lock); |
828 | } /* xfs_log_move_tail */ | 844 | } /* xfs_log_move_tail */ |
829 | 845 | ||
830 | /* | 846 | /* |
@@ -836,14 +852,13 @@ xfs_log_move_tail(xfs_mount_t *mp, | |||
836 | int | 852 | int |
837 | xfs_log_need_covered(xfs_mount_t *mp) | 853 | xfs_log_need_covered(xfs_mount_t *mp) |
838 | { | 854 | { |
839 | SPLDECL(s); | ||
840 | int needed = 0, gen; | 855 | int needed = 0, gen; |
841 | xlog_t *log = mp->m_log; | 856 | xlog_t *log = mp->m_log; |
842 | 857 | ||
843 | if (!xfs_fs_writable(mp)) | 858 | if (!xfs_fs_writable(mp)) |
844 | return 0; | 859 | return 0; |
845 | 860 | ||
846 | s = LOG_LOCK(log); | 861 | spin_lock(&log->l_icloglock); |
847 | if (((log->l_covered_state == XLOG_STATE_COVER_NEED) || | 862 | if (((log->l_covered_state == XLOG_STATE_COVER_NEED) || |
848 | (log->l_covered_state == XLOG_STATE_COVER_NEED2)) | 863 | (log->l_covered_state == XLOG_STATE_COVER_NEED2)) |
849 | && !xfs_trans_first_ail(mp, &gen) | 864 | && !xfs_trans_first_ail(mp, &gen) |
@@ -856,7 +871,7 @@ xfs_log_need_covered(xfs_mount_t *mp) | |||
856 | } | 871 | } |
857 | needed = 1; | 872 | needed = 1; |
858 | } | 873 | } |
859 | LOG_UNLOCK(log, s); | 874 | spin_unlock(&log->l_icloglock); |
860 | return needed; | 875 | return needed; |
861 | } | 876 | } |
862 | 877 | ||
@@ -881,17 +896,16 @@ xfs_lsn_t | |||
881 | xlog_assign_tail_lsn(xfs_mount_t *mp) | 896 | xlog_assign_tail_lsn(xfs_mount_t *mp) |
882 | { | 897 | { |
883 | xfs_lsn_t tail_lsn; | 898 | xfs_lsn_t tail_lsn; |
884 | SPLDECL(s); | ||
885 | xlog_t *log = mp->m_log; | 899 | xlog_t *log = mp->m_log; |
886 | 900 | ||
887 | tail_lsn = xfs_trans_tail_ail(mp); | 901 | tail_lsn = xfs_trans_tail_ail(mp); |
888 | s = GRANT_LOCK(log); | 902 | spin_lock(&log->l_grant_lock); |
889 | if (tail_lsn != 0) { | 903 | if (tail_lsn != 0) { |
890 | log->l_tail_lsn = tail_lsn; | 904 | log->l_tail_lsn = tail_lsn; |
891 | } else { | 905 | } else { |
892 | tail_lsn = log->l_tail_lsn = log->l_last_sync_lsn; | 906 | tail_lsn = log->l_tail_lsn = log->l_last_sync_lsn; |
893 | } | 907 | } |
894 | GRANT_UNLOCK(log, s); | 908 | spin_unlock(&log->l_grant_lock); |
895 | 909 | ||
896 | return tail_lsn; | 910 | return tail_lsn; |
897 | } /* xlog_assign_tail_lsn */ | 911 | } /* xlog_assign_tail_lsn */ |
@@ -911,7 +925,7 @@ xlog_assign_tail_lsn(xfs_mount_t *mp) | |||
911 | * the tail. The details of this case are described below, but the end | 925 | * the tail. The details of this case are described below, but the end |
912 | * result is that we return the size of the log as the amount of space left. | 926 | * result is that we return the size of the log as the amount of space left. |
913 | */ | 927 | */ |
914 | int | 928 | STATIC int |
915 | xlog_space_left(xlog_t *log, int cycle, int bytes) | 929 | xlog_space_left(xlog_t *log, int cycle, int bytes) |
916 | { | 930 | { |
917 | int free_bytes; | 931 | int free_bytes; |
@@ -1165,7 +1179,7 @@ xlog_alloc_log(xfs_mount_t *mp, | |||
1165 | log->l_flags |= XLOG_ACTIVE_RECOVERY; | 1179 | log->l_flags |= XLOG_ACTIVE_RECOVERY; |
1166 | 1180 | ||
1167 | log->l_prev_block = -1; | 1181 | log->l_prev_block = -1; |
1168 | ASSIGN_ANY_LSN_HOST(log->l_tail_lsn, 1, 0); | 1182 | log->l_tail_lsn = xlog_assign_lsn(1, 0); |
1169 | /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ | 1183 | /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ |
1170 | log->l_last_sync_lsn = log->l_tail_lsn; | 1184 | log->l_last_sync_lsn = log->l_tail_lsn; |
1171 | log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ | 1185 | log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ |
@@ -1193,8 +1207,8 @@ xlog_alloc_log(xfs_mount_t *mp, | |||
1193 | ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); | 1207 | ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); |
1194 | log->l_xbuf = bp; | 1208 | log->l_xbuf = bp; |
1195 | 1209 | ||
1196 | spinlock_init(&log->l_icloglock, "iclog"); | 1210 | spin_lock_init(&log->l_icloglock); |
1197 | spinlock_init(&log->l_grant_lock, "grhead_iclog"); | 1211 | spin_lock_init(&log->l_grant_lock); |
1198 | initnsema(&log->l_flushsema, 0, "ic-flush"); | 1212 | initnsema(&log->l_flushsema, 0, "ic-flush"); |
1199 | xlog_state_ticket_alloc(log); /* wait until after icloglock inited */ | 1213 | xlog_state_ticket_alloc(log); /* wait until after icloglock inited */ |
1200 | 1214 | ||
@@ -1231,12 +1245,12 @@ xlog_alloc_log(xfs_mount_t *mp, | |||
1231 | 1245 | ||
1232 | head = &iclog->ic_header; | 1246 | head = &iclog->ic_header; |
1233 | memset(head, 0, sizeof(xlog_rec_header_t)); | 1247 | memset(head, 0, sizeof(xlog_rec_header_t)); |
1234 | INT_SET(head->h_magicno, ARCH_CONVERT, XLOG_HEADER_MAGIC_NUM); | 1248 | head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); |
1235 | INT_SET(head->h_version, ARCH_CONVERT, | 1249 | head->h_version = cpu_to_be32( |
1236 | XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1); | 1250 | XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1); |
1237 | INT_SET(head->h_size, ARCH_CONVERT, log->l_iclog_size); | 1251 | head->h_size = cpu_to_be32(log->l_iclog_size); |
1238 | /* new fields */ | 1252 | /* new fields */ |
1239 | INT_SET(head->h_fmt, ARCH_CONVERT, XLOG_FMT); | 1253 | head->h_fmt = cpu_to_be32(XLOG_FMT); |
1240 | memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t)); | 1254 | memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t)); |
1241 | 1255 | ||
1242 | 1256 | ||
@@ -1293,7 +1307,7 @@ xlog_commit_record(xfs_mount_t *mp, | |||
1293 | * pushes on an lsn which is further along in the log once we reach the high | 1307 | * pushes on an lsn which is further along in the log once we reach the high |
1294 | * water mark. In this manner, we would be creating a low water mark. | 1308 | * water mark. In this manner, we would be creating a low water mark. |
1295 | */ | 1309 | */ |
1296 | void | 1310 | STATIC void |
1297 | xlog_grant_push_ail(xfs_mount_t *mp, | 1311 | xlog_grant_push_ail(xfs_mount_t *mp, |
1298 | int need_bytes) | 1312 | int need_bytes) |
1299 | { | 1313 | { |
@@ -1305,11 +1319,10 @@ xlog_grant_push_ail(xfs_mount_t *mp, | |||
1305 | int threshold_block; /* block in lsn we'd like to be at */ | 1319 | int threshold_block; /* block in lsn we'd like to be at */ |
1306 | int threshold_cycle; /* lsn cycle we'd like to be at */ | 1320 | int threshold_cycle; /* lsn cycle we'd like to be at */ |
1307 | int free_threshold; | 1321 | int free_threshold; |
1308 | SPLDECL(s); | ||
1309 | 1322 | ||
1310 | ASSERT(BTOBB(need_bytes) < log->l_logBBsize); | 1323 | ASSERT(BTOBB(need_bytes) < log->l_logBBsize); |
1311 | 1324 | ||
1312 | s = GRANT_LOCK(log); | 1325 | spin_lock(&log->l_grant_lock); |
1313 | free_bytes = xlog_space_left(log, | 1326 | free_bytes = xlog_space_left(log, |
1314 | log->l_grant_reserve_cycle, | 1327 | log->l_grant_reserve_cycle, |
1315 | log->l_grant_reserve_bytes); | 1328 | log->l_grant_reserve_bytes); |
@@ -1331,8 +1344,7 @@ xlog_grant_push_ail(xfs_mount_t *mp, | |||
1331 | threshold_block -= log->l_logBBsize; | 1344 | threshold_block -= log->l_logBBsize; |
1332 | threshold_cycle += 1; | 1345 | threshold_cycle += 1; |
1333 | } | 1346 | } |
1334 | ASSIGN_ANY_LSN_HOST(threshold_lsn, threshold_cycle, | 1347 | threshold_lsn = xlog_assign_lsn(threshold_cycle, threshold_block); |
1335 | threshold_block); | ||
1336 | 1348 | ||
1337 | /* Don't pass in an lsn greater than the lsn of the last | 1349 | /* Don't pass in an lsn greater than the lsn of the last |
1338 | * log record known to be on disk. | 1350 | * log record known to be on disk. |
@@ -1340,7 +1352,7 @@ xlog_grant_push_ail(xfs_mount_t *mp, | |||
1340 | if (XFS_LSN_CMP(threshold_lsn, log->l_last_sync_lsn) > 0) | 1352 | if (XFS_LSN_CMP(threshold_lsn, log->l_last_sync_lsn) > 0) |
1341 | threshold_lsn = log->l_last_sync_lsn; | 1353 | threshold_lsn = log->l_last_sync_lsn; |
1342 | } | 1354 | } |
1343 | GRANT_UNLOCK(log, s); | 1355 | spin_unlock(&log->l_grant_lock); |
1344 | 1356 | ||
1345 | /* | 1357 | /* |
1346 | * Get the transaction layer to kick the dirty buffers out to | 1358 | * Get the transaction layer to kick the dirty buffers out to |
@@ -1378,19 +1390,18 @@ xlog_grant_push_ail(xfs_mount_t *mp, | |||
1378 | * is added immediately before calling bwrite(). | 1390 | * is added immediately before calling bwrite(). |
1379 | */ | 1391 | */ |
1380 | 1392 | ||
1381 | int | 1393 | STATIC int |
1382 | xlog_sync(xlog_t *log, | 1394 | xlog_sync(xlog_t *log, |
1383 | xlog_in_core_t *iclog) | 1395 | xlog_in_core_t *iclog) |
1384 | { | 1396 | { |
1385 | xfs_caddr_t dptr; /* pointer to byte sized element */ | 1397 | xfs_caddr_t dptr; /* pointer to byte sized element */ |
1386 | xfs_buf_t *bp; | 1398 | xfs_buf_t *bp; |
1387 | int i, ops; | 1399 | int i; |
1388 | uint count; /* byte count of bwrite */ | 1400 | uint count; /* byte count of bwrite */ |
1389 | uint count_init; /* initial count before roundup */ | 1401 | uint count_init; /* initial count before roundup */ |
1390 | int roundoff; /* roundoff to BB or stripe */ | 1402 | int roundoff; /* roundoff to BB or stripe */ |
1391 | int split = 0; /* split write into two regions */ | 1403 | int split = 0; /* split write into two regions */ |
1392 | int error; | 1404 | int error; |
1393 | SPLDECL(s); | ||
1394 | int v2 = XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb); | 1405 | int v2 = XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb); |
1395 | 1406 | ||
1396 | XFS_STATS_INC(xs_log_writes); | 1407 | XFS_STATS_INC(xs_log_writes); |
@@ -1415,30 +1426,26 @@ xlog_sync(xlog_t *log, | |||
1415 | roundoff < BBTOB(1))); | 1426 | roundoff < BBTOB(1))); |
1416 | 1427 | ||
1417 | /* move grant heads by roundoff in sync */ | 1428 | /* move grant heads by roundoff in sync */ |
1418 | s = GRANT_LOCK(log); | 1429 | spin_lock(&log->l_grant_lock); |
1419 | xlog_grant_add_space(log, roundoff); | 1430 | xlog_grant_add_space(log, roundoff); |
1420 | GRANT_UNLOCK(log, s); | 1431 | spin_unlock(&log->l_grant_lock); |
1421 | 1432 | ||
1422 | /* put cycle number in every block */ | 1433 | /* put cycle number in every block */ |
1423 | xlog_pack_data(log, iclog, roundoff); | 1434 | xlog_pack_data(log, iclog, roundoff); |
1424 | 1435 | ||
1425 | /* real byte length */ | 1436 | /* real byte length */ |
1426 | if (v2) { | 1437 | if (v2) { |
1427 | INT_SET(iclog->ic_header.h_len, | 1438 | iclog->ic_header.h_len = |
1428 | ARCH_CONVERT, | 1439 | cpu_to_be32(iclog->ic_offset + roundoff); |
1429 | iclog->ic_offset + roundoff); | ||
1430 | } else { | 1440 | } else { |
1431 | INT_SET(iclog->ic_header.h_len, ARCH_CONVERT, iclog->ic_offset); | 1441 | iclog->ic_header.h_len = |
1442 | cpu_to_be32(iclog->ic_offset); | ||
1432 | } | 1443 | } |
1433 | 1444 | ||
1434 | /* put ops count in correct order */ | ||
1435 | ops = iclog->ic_header.h_num_logops; | ||
1436 | INT_SET(iclog->ic_header.h_num_logops, ARCH_CONVERT, ops); | ||
1437 | |||
1438 | bp = iclog->ic_bp; | 1445 | bp = iclog->ic_bp; |
1439 | ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1); | 1446 | ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1); |
1440 | XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); | 1447 | XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); |
1441 | XFS_BUF_SET_ADDR(bp, BLOCK_LSN(INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT))); | 1448 | XFS_BUF_SET_ADDR(bp, BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn))); |
1442 | 1449 | ||
1443 | XFS_STATS_ADD(xs_log_blocks, BTOBB(count)); | 1450 | XFS_STATS_ADD(xs_log_blocks, BTOBB(count)); |
1444 | 1451 | ||
@@ -1501,10 +1508,10 @@ xlog_sync(xlog_t *log, | |||
1501 | * a new cycle. Watch out for the header magic number | 1508 | * a new cycle. Watch out for the header magic number |
1502 | * case, though. | 1509 | * case, though. |
1503 | */ | 1510 | */ |
1504 | for (i=0; i<split; i += BBSIZE) { | 1511 | for (i = 0; i < split; i += BBSIZE) { |
1505 | INT_MOD(*(uint *)dptr, ARCH_CONVERT, +1); | 1512 | be32_add_cpu((__be32 *)dptr, 1); |
1506 | if (INT_GET(*(uint *)dptr, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM) | 1513 | if (be32_to_cpu(*(__be32 *)dptr) == XLOG_HEADER_MAGIC_NUM) |
1507 | INT_MOD(*(uint *)dptr, ARCH_CONVERT, +1); | 1514 | be32_add_cpu((__be32 *)dptr, 1); |
1508 | dptr += BBSIZE; | 1515 | dptr += BBSIZE; |
1509 | } | 1516 | } |
1510 | 1517 | ||
@@ -1527,14 +1534,13 @@ xlog_sync(xlog_t *log, | |||
1527 | /* | 1534 | /* |
1528 | * Deallocate a log structure | 1535 | * Deallocate a log structure |
1529 | */ | 1536 | */ |
1530 | void | 1537 | STATIC void |
1531 | xlog_dealloc_log(xlog_t *log) | 1538 | xlog_dealloc_log(xlog_t *log) |
1532 | { | 1539 | { |
1533 | xlog_in_core_t *iclog, *next_iclog; | 1540 | xlog_in_core_t *iclog, *next_iclog; |
1534 | xlog_ticket_t *tic, *next_tic; | 1541 | xlog_ticket_t *tic, *next_tic; |
1535 | int i; | 1542 | int i; |
1536 | 1543 | ||
1537 | |||
1538 | iclog = log->l_iclog; | 1544 | iclog = log->l_iclog; |
1539 | for (i=0; i<log->l_iclog_bufs; i++) { | 1545 | for (i=0; i<log->l_iclog_bufs; i++) { |
1540 | sv_destroy(&iclog->ic_forcesema); | 1546 | sv_destroy(&iclog->ic_forcesema); |
@@ -1565,7 +1571,7 @@ xlog_dealloc_log(xlog_t *log) | |||
1565 | tic = log->l_unmount_free; | 1571 | tic = log->l_unmount_free; |
1566 | while (tic) { | 1572 | while (tic) { |
1567 | next_tic = tic->t_next; | 1573 | next_tic = tic->t_next; |
1568 | kmem_free(tic, NBPP); | 1574 | kmem_free(tic, PAGE_SIZE); |
1569 | tic = next_tic; | 1575 | tic = next_tic; |
1570 | } | 1576 | } |
1571 | } | 1577 | } |
@@ -1592,14 +1598,12 @@ xlog_state_finish_copy(xlog_t *log, | |||
1592 | int record_cnt, | 1598 | int record_cnt, |
1593 | int copy_bytes) | 1599 | int copy_bytes) |
1594 | { | 1600 | { |
1595 | SPLDECL(s); | 1601 | spin_lock(&log->l_icloglock); |
1596 | 1602 | ||
1597 | s = LOG_LOCK(log); | 1603 | be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt); |
1598 | |||
1599 | iclog->ic_header.h_num_logops += record_cnt; | ||
1600 | iclog->ic_offset += copy_bytes; | 1604 | iclog->ic_offset += copy_bytes; |
1601 | 1605 | ||
1602 | LOG_UNLOCK(log, s); | 1606 | spin_unlock(&log->l_icloglock); |
1603 | } /* xlog_state_finish_copy */ | 1607 | } /* xlog_state_finish_copy */ |
1604 | 1608 | ||
1605 | 1609 | ||
@@ -1752,7 +1756,7 @@ xlog_print_tic_res(xfs_mount_t *mp, xlog_ticket_t *ticket) | |||
1752 | * we don't update ic_offset until the end when we know exactly how many | 1756 | * we don't update ic_offset until the end when we know exactly how many |
1753 | * bytes have been written out. | 1757 | * bytes have been written out. |
1754 | */ | 1758 | */ |
1755 | int | 1759 | STATIC int |
1756 | xlog_write(xfs_mount_t * mp, | 1760 | xlog_write(xfs_mount_t * mp, |
1757 | xfs_log_iovec_t reg[], | 1761 | xfs_log_iovec_t reg[], |
1758 | int nentries, | 1762 | int nentries, |
@@ -1823,7 +1827,7 @@ xlog_write(xfs_mount_t * mp, | |||
1823 | 1827 | ||
1824 | /* start_lsn is the first lsn written to. That's all we need. */ | 1828 | /* start_lsn is the first lsn written to. That's all we need. */ |
1825 | if (! *start_lsn) | 1829 | if (! *start_lsn) |
1826 | *start_lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT); | 1830 | *start_lsn = be64_to_cpu(iclog->ic_header.h_lsn); |
1827 | 1831 | ||
1828 | /* This loop writes out as many regions as can fit in the amount | 1832 | /* This loop writes out as many regions as can fit in the amount |
1829 | * of space which was allocated by xlog_state_get_iclog_space(). | 1833 | * of space which was allocated by xlog_state_get_iclog_space(). |
@@ -1839,7 +1843,7 @@ xlog_write(xfs_mount_t * mp, | |||
1839 | */ | 1843 | */ |
1840 | if (ticket->t_flags & XLOG_TIC_INITED) { | 1844 | if (ticket->t_flags & XLOG_TIC_INITED) { |
1841 | logop_head = (xlog_op_header_t *)ptr; | 1845 | logop_head = (xlog_op_header_t *)ptr; |
1842 | INT_SET(logop_head->oh_tid, ARCH_CONVERT, ticket->t_tid); | 1846 | logop_head->oh_tid = cpu_to_be32(ticket->t_tid); |
1843 | logop_head->oh_clientid = ticket->t_clientid; | 1847 | logop_head->oh_clientid = ticket->t_clientid; |
1844 | logop_head->oh_len = 0; | 1848 | logop_head->oh_len = 0; |
1845 | logop_head->oh_flags = XLOG_START_TRANS; | 1849 | logop_head->oh_flags = XLOG_START_TRANS; |
@@ -1853,7 +1857,7 @@ xlog_write(xfs_mount_t * mp, | |||
1853 | 1857 | ||
1854 | /* Copy log operation header directly into data section */ | 1858 | /* Copy log operation header directly into data section */ |
1855 | logop_head = (xlog_op_header_t *)ptr; | 1859 | logop_head = (xlog_op_header_t *)ptr; |
1856 | INT_SET(logop_head->oh_tid, ARCH_CONVERT, ticket->t_tid); | 1860 | logop_head->oh_tid = cpu_to_be32(ticket->t_tid); |
1857 | logop_head->oh_clientid = ticket->t_clientid; | 1861 | logop_head->oh_clientid = ticket->t_clientid; |
1858 | logop_head->oh_res2 = 0; | 1862 | logop_head->oh_res2 = 0; |
1859 | 1863 | ||
@@ -1888,13 +1892,14 @@ xlog_write(xfs_mount_t * mp, | |||
1888 | 1892 | ||
1889 | copy_off = partial_copy_len; | 1893 | copy_off = partial_copy_len; |
1890 | if (need_copy <= iclog->ic_size - log_offset) { /*complete write */ | 1894 | if (need_copy <= iclog->ic_size - log_offset) { /*complete write */ |
1891 | INT_SET(logop_head->oh_len, ARCH_CONVERT, copy_len = need_copy); | 1895 | copy_len = need_copy; |
1896 | logop_head->oh_len = cpu_to_be32(copy_len); | ||
1892 | if (partial_copy) | 1897 | if (partial_copy) |
1893 | logop_head->oh_flags|= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS); | 1898 | logop_head->oh_flags|= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS); |
1894 | partial_copy_len = partial_copy = 0; | 1899 | partial_copy_len = partial_copy = 0; |
1895 | } else { /* partial write */ | 1900 | } else { /* partial write */ |
1896 | copy_len = iclog->ic_size - log_offset; | 1901 | copy_len = iclog->ic_size - log_offset; |
1897 | INT_SET(logop_head->oh_len, ARCH_CONVERT, copy_len); | 1902 | logop_head->oh_len = cpu_to_be32(copy_len); |
1898 | logop_head->oh_flags |= XLOG_CONTINUE_TRANS; | 1903 | logop_head->oh_flags |= XLOG_CONTINUE_TRANS; |
1899 | if (partial_copy) | 1904 | if (partial_copy) |
1900 | logop_head->oh_flags |= XLOG_WAS_CONT_TRANS; | 1905 | logop_head->oh_flags |= XLOG_WAS_CONT_TRANS; |
@@ -1992,7 +1997,8 @@ xlog_state_clean_log(xlog_t *log) | |||
1992 | * We don't need to cover the dummy. | 1997 | * We don't need to cover the dummy. |
1993 | */ | 1998 | */ |
1994 | if (!changed && | 1999 | if (!changed && |
1995 | (INT_GET(iclog->ic_header.h_num_logops, ARCH_CONVERT) == XLOG_COVER_OPS)) { | 2000 | (be32_to_cpu(iclog->ic_header.h_num_logops) == |
2001 | XLOG_COVER_OPS)) { | ||
1996 | changed = 1; | 2002 | changed = 1; |
1997 | } else { | 2003 | } else { |
1998 | /* | 2004 | /* |
@@ -2060,7 +2066,7 @@ xlog_get_lowest_lsn( | |||
2060 | lowest_lsn = 0; | 2066 | lowest_lsn = 0; |
2061 | do { | 2067 | do { |
2062 | if (!(lsn_log->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY))) { | 2068 | if (!(lsn_log->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY))) { |
2063 | lsn = INT_GET(lsn_log->ic_header.h_lsn, ARCH_CONVERT); | 2069 | lsn = be64_to_cpu(lsn_log->ic_header.h_lsn); |
2064 | if ((lsn && !lowest_lsn) || | 2070 | if ((lsn && !lowest_lsn) || |
2065 | (XFS_LSN_CMP(lsn, lowest_lsn) < 0)) { | 2071 | (XFS_LSN_CMP(lsn, lowest_lsn) < 0)) { |
2066 | lowest_lsn = lsn; | 2072 | lowest_lsn = lsn; |
@@ -2089,9 +2095,8 @@ xlog_state_do_callback( | |||
2089 | int funcdidcallbacks; /* flag: function did callbacks */ | 2095 | int funcdidcallbacks; /* flag: function did callbacks */ |
2090 | int repeats; /* for issuing console warnings if | 2096 | int repeats; /* for issuing console warnings if |
2091 | * looping too many times */ | 2097 | * looping too many times */ |
2092 | SPLDECL(s); | ||
2093 | 2098 | ||
2094 | s = LOG_LOCK(log); | 2099 | spin_lock(&log->l_icloglock); |
2095 | first_iclog = iclog = log->l_iclog; | 2100 | first_iclog = iclog = log->l_iclog; |
2096 | ioerrors = 0; | 2101 | ioerrors = 0; |
2097 | funcdidcallbacks = 0; | 2102 | funcdidcallbacks = 0; |
@@ -2136,7 +2141,7 @@ xlog_state_do_callback( | |||
2136 | * to DO_CALLBACK, we will not process it when | 2141 | * to DO_CALLBACK, we will not process it when |
2137 | * we retry since a previous iclog is in the | 2142 | * we retry since a previous iclog is in the |
2138 | * CALLBACK and the state cannot change since | 2143 | * CALLBACK and the state cannot change since |
2139 | * we are holding the LOG_LOCK. | 2144 | * we are holding the l_icloglock. |
2140 | */ | 2145 | */ |
2141 | if (!(iclog->ic_state & | 2146 | if (!(iclog->ic_state & |
2142 | (XLOG_STATE_DONE_SYNC | | 2147 | (XLOG_STATE_DONE_SYNC | |
@@ -2162,11 +2167,9 @@ xlog_state_do_callback( | |||
2162 | */ | 2167 | */ |
2163 | 2168 | ||
2164 | lowest_lsn = xlog_get_lowest_lsn(log); | 2169 | lowest_lsn = xlog_get_lowest_lsn(log); |
2165 | if (lowest_lsn && ( | 2170 | if (lowest_lsn && |
2166 | XFS_LSN_CMP( | 2171 | XFS_LSN_CMP(lowest_lsn, |
2167 | lowest_lsn, | 2172 | be64_to_cpu(iclog->ic_header.h_lsn)) < 0) { |
2168 | INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) | ||
2169 | )<0)) { | ||
2170 | iclog = iclog->ic_next; | 2173 | iclog = iclog->ic_next; |
2171 | continue; /* Leave this iclog for | 2174 | continue; /* Leave this iclog for |
2172 | * another thread */ | 2175 | * another thread */ |
@@ -2174,19 +2177,18 @@ xlog_state_do_callback( | |||
2174 | 2177 | ||
2175 | iclog->ic_state = XLOG_STATE_CALLBACK; | 2178 | iclog->ic_state = XLOG_STATE_CALLBACK; |
2176 | 2179 | ||
2177 | LOG_UNLOCK(log, s); | 2180 | spin_unlock(&log->l_icloglock); |
2178 | 2181 | ||
2179 | /* l_last_sync_lsn field protected by | 2182 | /* l_last_sync_lsn field protected by |
2180 | * GRANT_LOCK. Don't worry about iclog's lsn. | 2183 | * l_grant_lock. Don't worry about iclog's lsn. |
2181 | * No one else can be here except us. | 2184 | * No one else can be here except us. |
2182 | */ | 2185 | */ |
2183 | s = GRANT_LOCK(log); | 2186 | spin_lock(&log->l_grant_lock); |
2184 | ASSERT(XFS_LSN_CMP( | 2187 | ASSERT(XFS_LSN_CMP(log->l_last_sync_lsn, |
2185 | log->l_last_sync_lsn, | 2188 | be64_to_cpu(iclog->ic_header.h_lsn)) <= 0); |
2186 | INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) | 2189 | log->l_last_sync_lsn = |
2187 | )<=0); | 2190 | be64_to_cpu(iclog->ic_header.h_lsn); |
2188 | log->l_last_sync_lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT); | 2191 | spin_unlock(&log->l_grant_lock); |
2189 | GRANT_UNLOCK(log, s); | ||
2190 | 2192 | ||
2191 | /* | 2193 | /* |
2192 | * Keep processing entries in the callback list | 2194 | * Keep processing entries in the callback list |
@@ -2195,7 +2197,7 @@ xlog_state_do_callback( | |||
2195 | * empty and change the state to DIRTY so that | 2197 | * empty and change the state to DIRTY so that |
2196 | * we don't miss any more callbacks being added. | 2198 | * we don't miss any more callbacks being added. |
2197 | */ | 2199 | */ |
2198 | s = LOG_LOCK(log); | 2200 | spin_lock(&log->l_icloglock); |
2199 | } else { | 2201 | } else { |
2200 | ioerrors++; | 2202 | ioerrors++; |
2201 | } | 2203 | } |
@@ -2204,14 +2206,14 @@ xlog_state_do_callback( | |||
2204 | while (cb) { | 2206 | while (cb) { |
2205 | iclog->ic_callback_tail = &(iclog->ic_callback); | 2207 | iclog->ic_callback_tail = &(iclog->ic_callback); |
2206 | iclog->ic_callback = NULL; | 2208 | iclog->ic_callback = NULL; |
2207 | LOG_UNLOCK(log, s); | 2209 | spin_unlock(&log->l_icloglock); |
2208 | 2210 | ||
2209 | /* perform callbacks in the order given */ | 2211 | /* perform callbacks in the order given */ |
2210 | for (; cb; cb = cb_next) { | 2212 | for (; cb; cb = cb_next) { |
2211 | cb_next = cb->cb_next; | 2213 | cb_next = cb->cb_next; |
2212 | cb->cb_func(cb->cb_arg, aborted); | 2214 | cb->cb_func(cb->cb_arg, aborted); |
2213 | } | 2215 | } |
2214 | s = LOG_LOCK(log); | 2216 | spin_lock(&log->l_icloglock); |
2215 | cb = iclog->ic_callback; | 2217 | cb = iclog->ic_callback; |
2216 | } | 2218 | } |
2217 | 2219 | ||
@@ -2258,7 +2260,7 @@ xlog_state_do_callback( | |||
2258 | * | 2260 | * |
2259 | * SYNCING - i/o completion will go through logs | 2261 | * SYNCING - i/o completion will go through logs |
2260 | * DONE_SYNC - interrupt thread should be waiting for | 2262 | * DONE_SYNC - interrupt thread should be waiting for |
2261 | * LOG_LOCK | 2263 | * l_icloglock |
2262 | * IOERROR - give up hope all ye who enter here | 2264 | * IOERROR - give up hope all ye who enter here |
2263 | */ | 2265 | */ |
2264 | if (iclog->ic_state == XLOG_STATE_WANT_SYNC || | 2266 | if (iclog->ic_state == XLOG_STATE_WANT_SYNC || |
@@ -2276,7 +2278,7 @@ xlog_state_do_callback( | |||
2276 | flushcnt = log->l_flushcnt; | 2278 | flushcnt = log->l_flushcnt; |
2277 | log->l_flushcnt = 0; | 2279 | log->l_flushcnt = 0; |
2278 | } | 2280 | } |
2279 | LOG_UNLOCK(log, s); | 2281 | spin_unlock(&log->l_icloglock); |
2280 | while (flushcnt--) | 2282 | while (flushcnt--) |
2281 | vsema(&log->l_flushsema); | 2283 | vsema(&log->l_flushsema); |
2282 | } /* xlog_state_do_callback */ | 2284 | } /* xlog_state_do_callback */ |
@@ -2296,15 +2298,14 @@ xlog_state_do_callback( | |||
2296 | * global state machine log lock. Assume that the calls to cvsema won't | 2298 | * global state machine log lock. Assume that the calls to cvsema won't |
2297 | * take a long time. At least we know it won't sleep. | 2299 | * take a long time. At least we know it won't sleep. |
2298 | */ | 2300 | */ |
2299 | void | 2301 | STATIC void |
2300 | xlog_state_done_syncing( | 2302 | xlog_state_done_syncing( |
2301 | xlog_in_core_t *iclog, | 2303 | xlog_in_core_t *iclog, |
2302 | int aborted) | 2304 | int aborted) |
2303 | { | 2305 | { |
2304 | xlog_t *log = iclog->ic_log; | 2306 | xlog_t *log = iclog->ic_log; |
2305 | SPLDECL(s); | ||
2306 | 2307 | ||
2307 | s = LOG_LOCK(log); | 2308 | spin_lock(&log->l_icloglock); |
2308 | 2309 | ||
2309 | ASSERT(iclog->ic_state == XLOG_STATE_SYNCING || | 2310 | ASSERT(iclog->ic_state == XLOG_STATE_SYNCING || |
2310 | iclog->ic_state == XLOG_STATE_IOERROR); | 2311 | iclog->ic_state == XLOG_STATE_IOERROR); |
@@ -2320,7 +2321,7 @@ xlog_state_done_syncing( | |||
2320 | */ | 2321 | */ |
2321 | if (iclog->ic_state != XLOG_STATE_IOERROR) { | 2322 | if (iclog->ic_state != XLOG_STATE_IOERROR) { |
2322 | if (--iclog->ic_bwritecnt == 1) { | 2323 | if (--iclog->ic_bwritecnt == 1) { |
2323 | LOG_UNLOCK(log, s); | 2324 | spin_unlock(&log->l_icloglock); |
2324 | return; | 2325 | return; |
2325 | } | 2326 | } |
2326 | iclog->ic_state = XLOG_STATE_DONE_SYNC; | 2327 | iclog->ic_state = XLOG_STATE_DONE_SYNC; |
@@ -2332,7 +2333,7 @@ xlog_state_done_syncing( | |||
2332 | * I/O, the others get to wait for the result. | 2333 | * I/O, the others get to wait for the result. |
2333 | */ | 2334 | */ |
2334 | sv_broadcast(&iclog->ic_writesema); | 2335 | sv_broadcast(&iclog->ic_writesema); |
2335 | LOG_UNLOCK(log, s); | 2336 | spin_unlock(&log->l_icloglock); |
2336 | xlog_state_do_callback(log, aborted, iclog); /* also cleans log */ | 2337 | xlog_state_do_callback(log, aborted, iclog); /* also cleans log */ |
2337 | } /* xlog_state_done_syncing */ | 2338 | } /* xlog_state_done_syncing */ |
2338 | 2339 | ||
@@ -2357,7 +2358,7 @@ xlog_state_done_syncing( | |||
2357 | * needs to be incremented, depending on the amount of data which | 2358 | * needs to be incremented, depending on the amount of data which |
2358 | * is copied. | 2359 | * is copied. |
2359 | */ | 2360 | */ |
2360 | int | 2361 | STATIC int |
2361 | xlog_state_get_iclog_space(xlog_t *log, | 2362 | xlog_state_get_iclog_space(xlog_t *log, |
2362 | int len, | 2363 | int len, |
2363 | xlog_in_core_t **iclogp, | 2364 | xlog_in_core_t **iclogp, |
@@ -2365,23 +2366,22 @@ xlog_state_get_iclog_space(xlog_t *log, | |||
2365 | int *continued_write, | 2366 | int *continued_write, |
2366 | int *logoffsetp) | 2367 | int *logoffsetp) |
2367 | { | 2368 | { |
2368 | SPLDECL(s); | ||
2369 | int log_offset; | 2369 | int log_offset; |
2370 | xlog_rec_header_t *head; | 2370 | xlog_rec_header_t *head; |
2371 | xlog_in_core_t *iclog; | 2371 | xlog_in_core_t *iclog; |
2372 | int error; | 2372 | int error; |
2373 | 2373 | ||
2374 | restart: | 2374 | restart: |
2375 | s = LOG_LOCK(log); | 2375 | spin_lock(&log->l_icloglock); |
2376 | if (XLOG_FORCED_SHUTDOWN(log)) { | 2376 | if (XLOG_FORCED_SHUTDOWN(log)) { |
2377 | LOG_UNLOCK(log, s); | 2377 | spin_unlock(&log->l_icloglock); |
2378 | return XFS_ERROR(EIO); | 2378 | return XFS_ERROR(EIO); |
2379 | } | 2379 | } |
2380 | 2380 | ||
2381 | iclog = log->l_iclog; | 2381 | iclog = log->l_iclog; |
2382 | if (! (iclog->ic_state == XLOG_STATE_ACTIVE)) { | 2382 | if (! (iclog->ic_state == XLOG_STATE_ACTIVE)) { |
2383 | log->l_flushcnt++; | 2383 | log->l_flushcnt++; |
2384 | LOG_UNLOCK(log, s); | 2384 | spin_unlock(&log->l_icloglock); |
2385 | xlog_trace_iclog(iclog, XLOG_TRACE_SLEEP_FLUSH); | 2385 | xlog_trace_iclog(iclog, XLOG_TRACE_SLEEP_FLUSH); |
2386 | XFS_STATS_INC(xs_log_noiclogs); | 2386 | XFS_STATS_INC(xs_log_noiclogs); |
2387 | /* Ensure that log writes happen */ | 2387 | /* Ensure that log writes happen */ |
@@ -2404,8 +2404,9 @@ restart: | |||
2404 | xlog_tic_add_region(ticket, | 2404 | xlog_tic_add_region(ticket, |
2405 | log->l_iclog_hsize, | 2405 | log->l_iclog_hsize, |
2406 | XLOG_REG_TYPE_LRHEADER); | 2406 | XLOG_REG_TYPE_LRHEADER); |
2407 | INT_SET(head->h_cycle, ARCH_CONVERT, log->l_curr_cycle); | 2407 | head->h_cycle = cpu_to_be32(log->l_curr_cycle); |
2408 | ASSIGN_LSN(head->h_lsn, log); | 2408 | head->h_lsn = cpu_to_be64( |
2409 | xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block)); | ||
2409 | ASSERT(log->l_curr_block >= 0); | 2410 | ASSERT(log->l_curr_block >= 0); |
2410 | } | 2411 | } |
2411 | 2412 | ||
@@ -2423,12 +2424,12 @@ restart: | |||
2423 | 2424 | ||
2424 | /* If I'm the only one writing to this iclog, sync it to disk */ | 2425 | /* If I'm the only one writing to this iclog, sync it to disk */ |
2425 | if (iclog->ic_refcnt == 1) { | 2426 | if (iclog->ic_refcnt == 1) { |
2426 | LOG_UNLOCK(log, s); | 2427 | spin_unlock(&log->l_icloglock); |
2427 | if ((error = xlog_state_release_iclog(log, iclog))) | 2428 | if ((error = xlog_state_release_iclog(log, iclog))) |
2428 | return error; | 2429 | return error; |
2429 | } else { | 2430 | } else { |
2430 | iclog->ic_refcnt--; | 2431 | iclog->ic_refcnt--; |
2431 | LOG_UNLOCK(log, s); | 2432 | spin_unlock(&log->l_icloglock); |
2432 | } | 2433 | } |
2433 | goto restart; | 2434 | goto restart; |
2434 | } | 2435 | } |
@@ -2449,7 +2450,7 @@ restart: | |||
2449 | *iclogp = iclog; | 2450 | *iclogp = iclog; |
2450 | 2451 | ||
2451 | ASSERT(iclog->ic_offset <= iclog->ic_size); | 2452 | ASSERT(iclog->ic_offset <= iclog->ic_size); |
2452 | LOG_UNLOCK(log, s); | 2453 | spin_unlock(&log->l_icloglock); |
2453 | 2454 | ||
2454 | *logoffsetp = log_offset; | 2455 | *logoffsetp = log_offset; |
2455 | return 0; | 2456 | return 0; |
@@ -2467,7 +2468,6 @@ xlog_grant_log_space(xlog_t *log, | |||
2467 | { | 2468 | { |
2468 | int free_bytes; | 2469 | int free_bytes; |
2469 | int need_bytes; | 2470 | int need_bytes; |
2470 | SPLDECL(s); | ||
2471 | #ifdef DEBUG | 2471 | #ifdef DEBUG |
2472 | xfs_lsn_t tail_lsn; | 2472 | xfs_lsn_t tail_lsn; |
2473 | #endif | 2473 | #endif |
@@ -2479,7 +2479,7 @@ xlog_grant_log_space(xlog_t *log, | |||
2479 | #endif | 2479 | #endif |
2480 | 2480 | ||
2481 | /* Is there space or do we need to sleep? */ | 2481 | /* Is there space or do we need to sleep? */ |
2482 | s = GRANT_LOCK(log); | 2482 | spin_lock(&log->l_grant_lock); |
2483 | xlog_trace_loggrant(log, tic, "xlog_grant_log_space: enter"); | 2483 | xlog_trace_loggrant(log, tic, "xlog_grant_log_space: enter"); |
2484 | 2484 | ||
2485 | /* something is already sleeping; insert new transaction at end */ | 2485 | /* something is already sleeping; insert new transaction at end */ |
@@ -2502,7 +2502,7 @@ xlog_grant_log_space(xlog_t *log, | |||
2502 | */ | 2502 | */ |
2503 | xlog_trace_loggrant(log, tic, | 2503 | xlog_trace_loggrant(log, tic, |
2504 | "xlog_grant_log_space: wake 1"); | 2504 | "xlog_grant_log_space: wake 1"); |
2505 | s = GRANT_LOCK(log); | 2505 | spin_lock(&log->l_grant_lock); |
2506 | } | 2506 | } |
2507 | if (tic->t_flags & XFS_LOG_PERM_RESERV) | 2507 | if (tic->t_flags & XFS_LOG_PERM_RESERV) |
2508 | need_bytes = tic->t_unit_res*tic->t_ocnt; | 2508 | need_bytes = tic->t_unit_res*tic->t_ocnt; |
@@ -2524,14 +2524,14 @@ redo: | |||
2524 | sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s); | 2524 | sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s); |
2525 | 2525 | ||
2526 | if (XLOG_FORCED_SHUTDOWN(log)) { | 2526 | if (XLOG_FORCED_SHUTDOWN(log)) { |
2527 | s = GRANT_LOCK(log); | 2527 | spin_lock(&log->l_grant_lock); |
2528 | goto error_return; | 2528 | goto error_return; |
2529 | } | 2529 | } |
2530 | 2530 | ||
2531 | xlog_trace_loggrant(log, tic, | 2531 | xlog_trace_loggrant(log, tic, |
2532 | "xlog_grant_log_space: wake 2"); | 2532 | "xlog_grant_log_space: wake 2"); |
2533 | xlog_grant_push_ail(log->l_mp, need_bytes); | 2533 | xlog_grant_push_ail(log->l_mp, need_bytes); |
2534 | s = GRANT_LOCK(log); | 2534 | spin_lock(&log->l_grant_lock); |
2535 | goto redo; | 2535 | goto redo; |
2536 | } else if (tic->t_flags & XLOG_TIC_IN_Q) | 2536 | } else if (tic->t_flags & XLOG_TIC_IN_Q) |
2537 | xlog_del_ticketq(&log->l_reserve_headq, tic); | 2537 | xlog_del_ticketq(&log->l_reserve_headq, tic); |
@@ -2553,7 +2553,7 @@ redo: | |||
2553 | #endif | 2553 | #endif |
2554 | xlog_trace_loggrant(log, tic, "xlog_grant_log_space: exit"); | 2554 | xlog_trace_loggrant(log, tic, "xlog_grant_log_space: exit"); |
2555 | xlog_verify_grant_head(log, 1); | 2555 | xlog_verify_grant_head(log, 1); |
2556 | GRANT_UNLOCK(log, s); | 2556 | spin_unlock(&log->l_grant_lock); |
2557 | return 0; | 2557 | return 0; |
2558 | 2558 | ||
2559 | error_return: | 2559 | error_return: |
@@ -2567,7 +2567,7 @@ redo: | |||
2567 | */ | 2567 | */ |
2568 | tic->t_curr_res = 0; | 2568 | tic->t_curr_res = 0; |
2569 | tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ | 2569 | tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ |
2570 | GRANT_UNLOCK(log, s); | 2570 | spin_unlock(&log->l_grant_lock); |
2571 | return XFS_ERROR(EIO); | 2571 | return XFS_ERROR(EIO); |
2572 | } /* xlog_grant_log_space */ | 2572 | } /* xlog_grant_log_space */ |
2573 | 2573 | ||
@@ -2581,7 +2581,6 @@ STATIC int | |||
2581 | xlog_regrant_write_log_space(xlog_t *log, | 2581 | xlog_regrant_write_log_space(xlog_t *log, |
2582 | xlog_ticket_t *tic) | 2582 | xlog_ticket_t *tic) |
2583 | { | 2583 | { |
2584 | SPLDECL(s); | ||
2585 | int free_bytes, need_bytes; | 2584 | int free_bytes, need_bytes; |
2586 | xlog_ticket_t *ntic; | 2585 | xlog_ticket_t *ntic; |
2587 | #ifdef DEBUG | 2586 | #ifdef DEBUG |
@@ -2599,7 +2598,7 @@ xlog_regrant_write_log_space(xlog_t *log, | |||
2599 | panic("regrant Recovery problem"); | 2598 | panic("regrant Recovery problem"); |
2600 | #endif | 2599 | #endif |
2601 | 2600 | ||
2602 | s = GRANT_LOCK(log); | 2601 | spin_lock(&log->l_grant_lock); |
2603 | xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: enter"); | 2602 | xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: enter"); |
2604 | 2603 | ||
2605 | if (XLOG_FORCED_SHUTDOWN(log)) | 2604 | if (XLOG_FORCED_SHUTDOWN(log)) |
@@ -2638,14 +2637,14 @@ xlog_regrant_write_log_space(xlog_t *log, | |||
2638 | /* If we're shutting down, this tic is already | 2637 | /* If we're shutting down, this tic is already |
2639 | * off the queue */ | 2638 | * off the queue */ |
2640 | if (XLOG_FORCED_SHUTDOWN(log)) { | 2639 | if (XLOG_FORCED_SHUTDOWN(log)) { |
2641 | s = GRANT_LOCK(log); | 2640 | spin_lock(&log->l_grant_lock); |
2642 | goto error_return; | 2641 | goto error_return; |
2643 | } | 2642 | } |
2644 | 2643 | ||
2645 | xlog_trace_loggrant(log, tic, | 2644 | xlog_trace_loggrant(log, tic, |
2646 | "xlog_regrant_write_log_space: wake 1"); | 2645 | "xlog_regrant_write_log_space: wake 1"); |
2647 | xlog_grant_push_ail(log->l_mp, tic->t_unit_res); | 2646 | xlog_grant_push_ail(log->l_mp, tic->t_unit_res); |
2648 | s = GRANT_LOCK(log); | 2647 | spin_lock(&log->l_grant_lock); |
2649 | } | 2648 | } |
2650 | } | 2649 | } |
2651 | 2650 | ||
@@ -2665,14 +2664,14 @@ redo: | |||
2665 | 2664 | ||
2666 | /* If we're shutting down, this tic is already off the queue */ | 2665 | /* If we're shutting down, this tic is already off the queue */ |
2667 | if (XLOG_FORCED_SHUTDOWN(log)) { | 2666 | if (XLOG_FORCED_SHUTDOWN(log)) { |
2668 | s = GRANT_LOCK(log); | 2667 | spin_lock(&log->l_grant_lock); |
2669 | goto error_return; | 2668 | goto error_return; |
2670 | } | 2669 | } |
2671 | 2670 | ||
2672 | xlog_trace_loggrant(log, tic, | 2671 | xlog_trace_loggrant(log, tic, |
2673 | "xlog_regrant_write_log_space: wake 2"); | 2672 | "xlog_regrant_write_log_space: wake 2"); |
2674 | xlog_grant_push_ail(log->l_mp, need_bytes); | 2673 | xlog_grant_push_ail(log->l_mp, need_bytes); |
2675 | s = GRANT_LOCK(log); | 2674 | spin_lock(&log->l_grant_lock); |
2676 | goto redo; | 2675 | goto redo; |
2677 | } else if (tic->t_flags & XLOG_TIC_IN_Q) | 2676 | } else if (tic->t_flags & XLOG_TIC_IN_Q) |
2678 | xlog_del_ticketq(&log->l_write_headq, tic); | 2677 | xlog_del_ticketq(&log->l_write_headq, tic); |
@@ -2689,7 +2688,7 @@ redo: | |||
2689 | 2688 | ||
2690 | xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: exit"); | 2689 | xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: exit"); |
2691 | xlog_verify_grant_head(log, 1); | 2690 | xlog_verify_grant_head(log, 1); |
2692 | GRANT_UNLOCK(log, s); | 2691 | spin_unlock(&log->l_grant_lock); |
2693 | return 0; | 2692 | return 0; |
2694 | 2693 | ||
2695 | 2694 | ||
@@ -2704,7 +2703,7 @@ redo: | |||
2704 | */ | 2703 | */ |
2705 | tic->t_curr_res = 0; | 2704 | tic->t_curr_res = 0; |
2706 | tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ | 2705 | tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ |
2707 | GRANT_UNLOCK(log, s); | 2706 | spin_unlock(&log->l_grant_lock); |
2708 | return XFS_ERROR(EIO); | 2707 | return XFS_ERROR(EIO); |
2709 | } /* xlog_regrant_write_log_space */ | 2708 | } /* xlog_regrant_write_log_space */ |
2710 | 2709 | ||
@@ -2720,14 +2719,12 @@ STATIC void | |||
2720 | xlog_regrant_reserve_log_space(xlog_t *log, | 2719 | xlog_regrant_reserve_log_space(xlog_t *log, |
2721 | xlog_ticket_t *ticket) | 2720 | xlog_ticket_t *ticket) |
2722 | { | 2721 | { |
2723 | SPLDECL(s); | ||
2724 | |||
2725 | xlog_trace_loggrant(log, ticket, | 2722 | xlog_trace_loggrant(log, ticket, |
2726 | "xlog_regrant_reserve_log_space: enter"); | 2723 | "xlog_regrant_reserve_log_space: enter"); |
2727 | if (ticket->t_cnt > 0) | 2724 | if (ticket->t_cnt > 0) |
2728 | ticket->t_cnt--; | 2725 | ticket->t_cnt--; |
2729 | 2726 | ||
2730 | s = GRANT_LOCK(log); | 2727 | spin_lock(&log->l_grant_lock); |
2731 | xlog_grant_sub_space(log, ticket->t_curr_res); | 2728 | xlog_grant_sub_space(log, ticket->t_curr_res); |
2732 | ticket->t_curr_res = ticket->t_unit_res; | 2729 | ticket->t_curr_res = ticket->t_unit_res; |
2733 | xlog_tic_reset_res(ticket); | 2730 | xlog_tic_reset_res(ticket); |
@@ -2737,7 +2734,7 @@ xlog_regrant_reserve_log_space(xlog_t *log, | |||
2737 | 2734 | ||
2738 | /* just return if we still have some of the pre-reserved space */ | 2735 | /* just return if we still have some of the pre-reserved space */ |
2739 | if (ticket->t_cnt > 0) { | 2736 | if (ticket->t_cnt > 0) { |
2740 | GRANT_UNLOCK(log, s); | 2737 | spin_unlock(&log->l_grant_lock); |
2741 | return; | 2738 | return; |
2742 | } | 2739 | } |
2743 | 2740 | ||
@@ -2745,7 +2742,7 @@ xlog_regrant_reserve_log_space(xlog_t *log, | |||
2745 | xlog_trace_loggrant(log, ticket, | 2742 | xlog_trace_loggrant(log, ticket, |
2746 | "xlog_regrant_reserve_log_space: exit"); | 2743 | "xlog_regrant_reserve_log_space: exit"); |
2747 | xlog_verify_grant_head(log, 0); | 2744 | xlog_verify_grant_head(log, 0); |
2748 | GRANT_UNLOCK(log, s); | 2745 | spin_unlock(&log->l_grant_lock); |
2749 | ticket->t_curr_res = ticket->t_unit_res; | 2746 | ticket->t_curr_res = ticket->t_unit_res; |
2750 | xlog_tic_reset_res(ticket); | 2747 | xlog_tic_reset_res(ticket); |
2751 | } /* xlog_regrant_reserve_log_space */ | 2748 | } /* xlog_regrant_reserve_log_space */ |
@@ -2769,12 +2766,10 @@ STATIC void | |||
2769 | xlog_ungrant_log_space(xlog_t *log, | 2766 | xlog_ungrant_log_space(xlog_t *log, |
2770 | xlog_ticket_t *ticket) | 2767 | xlog_ticket_t *ticket) |
2771 | { | 2768 | { |
2772 | SPLDECL(s); | ||
2773 | |||
2774 | if (ticket->t_cnt > 0) | 2769 | if (ticket->t_cnt > 0) |
2775 | ticket->t_cnt--; | 2770 | ticket->t_cnt--; |
2776 | 2771 | ||
2777 | s = GRANT_LOCK(log); | 2772 | spin_lock(&log->l_grant_lock); |
2778 | xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: enter"); | 2773 | xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: enter"); |
2779 | 2774 | ||
2780 | xlog_grant_sub_space(log, ticket->t_curr_res); | 2775 | xlog_grant_sub_space(log, ticket->t_curr_res); |
@@ -2791,7 +2786,7 @@ xlog_ungrant_log_space(xlog_t *log, | |||
2791 | 2786 | ||
2792 | xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: exit"); | 2787 | xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: exit"); |
2793 | xlog_verify_grant_head(log, 1); | 2788 | xlog_verify_grant_head(log, 1); |
2794 | GRANT_UNLOCK(log, s); | 2789 | spin_unlock(&log->l_grant_lock); |
2795 | xfs_log_move_tail(log->l_mp, 1); | 2790 | xfs_log_move_tail(log->l_mp, 1); |
2796 | } /* xlog_ungrant_log_space */ | 2791 | } /* xlog_ungrant_log_space */ |
2797 | 2792 | ||
@@ -2799,15 +2794,13 @@ xlog_ungrant_log_space(xlog_t *log, | |||
2799 | /* | 2794 | /* |
2800 | * Atomically put back used ticket. | 2795 | * Atomically put back used ticket. |
2801 | */ | 2796 | */ |
2802 | void | 2797 | STATIC void |
2803 | xlog_state_put_ticket(xlog_t *log, | 2798 | xlog_state_put_ticket(xlog_t *log, |
2804 | xlog_ticket_t *tic) | 2799 | xlog_ticket_t *tic) |
2805 | { | 2800 | { |
2806 | unsigned long s; | 2801 | spin_lock(&log->l_icloglock); |
2807 | |||
2808 | s = LOG_LOCK(log); | ||
2809 | xlog_ticket_put(log, tic); | 2802 | xlog_ticket_put(log, tic); |
2810 | LOG_UNLOCK(log, s); | 2803 | spin_unlock(&log->l_icloglock); |
2811 | } /* xlog_state_put_ticket */ | 2804 | } /* xlog_state_put_ticket */ |
2812 | 2805 | ||
2813 | /* | 2806 | /* |
@@ -2819,19 +2812,18 @@ xlog_state_put_ticket(xlog_t *log, | |||
2819 | * | 2812 | * |
2820 | * | 2813 | * |
2821 | */ | 2814 | */ |
2822 | int | 2815 | STATIC int |
2823 | xlog_state_release_iclog(xlog_t *log, | 2816 | xlog_state_release_iclog(xlog_t *log, |
2824 | xlog_in_core_t *iclog) | 2817 | xlog_in_core_t *iclog) |
2825 | { | 2818 | { |
2826 | SPLDECL(s); | ||
2827 | int sync = 0; /* do we sync? */ | 2819 | int sync = 0; /* do we sync? */ |
2828 | 2820 | ||
2829 | xlog_assign_tail_lsn(log->l_mp); | 2821 | xlog_assign_tail_lsn(log->l_mp); |
2830 | 2822 | ||
2831 | s = LOG_LOCK(log); | 2823 | spin_lock(&log->l_icloglock); |
2832 | 2824 | ||
2833 | if (iclog->ic_state & XLOG_STATE_IOERROR) { | 2825 | if (iclog->ic_state & XLOG_STATE_IOERROR) { |
2834 | LOG_UNLOCK(log, s); | 2826 | spin_unlock(&log->l_icloglock); |
2835 | return XFS_ERROR(EIO); | 2827 | return XFS_ERROR(EIO); |
2836 | } | 2828 | } |
2837 | 2829 | ||
@@ -2843,12 +2835,12 @@ xlog_state_release_iclog(xlog_t *log, | |||
2843 | iclog->ic_state == XLOG_STATE_WANT_SYNC) { | 2835 | iclog->ic_state == XLOG_STATE_WANT_SYNC) { |
2844 | sync++; | 2836 | sync++; |
2845 | iclog->ic_state = XLOG_STATE_SYNCING; | 2837 | iclog->ic_state = XLOG_STATE_SYNCING; |
2846 | INT_SET(iclog->ic_header.h_tail_lsn, ARCH_CONVERT, log->l_tail_lsn); | 2838 | iclog->ic_header.h_tail_lsn = cpu_to_be64(log->l_tail_lsn); |
2847 | xlog_verify_tail_lsn(log, iclog, log->l_tail_lsn); | 2839 | xlog_verify_tail_lsn(log, iclog, log->l_tail_lsn); |
2848 | /* cycle incremented when incrementing curr_block */ | 2840 | /* cycle incremented when incrementing curr_block */ |
2849 | } | 2841 | } |
2850 | 2842 | ||
2851 | LOG_UNLOCK(log, s); | 2843 | spin_unlock(&log->l_icloglock); |
2852 | 2844 | ||
2853 | /* | 2845 | /* |
2854 | * We let the log lock go, so it's possible that we hit a log I/O | 2846 | * We let the log lock go, so it's possible that we hit a log I/O |
@@ -2881,7 +2873,7 @@ xlog_state_switch_iclogs(xlog_t *log, | |||
2881 | if (!eventual_size) | 2873 | if (!eventual_size) |
2882 | eventual_size = iclog->ic_offset; | 2874 | eventual_size = iclog->ic_offset; |
2883 | iclog->ic_state = XLOG_STATE_WANT_SYNC; | 2875 | iclog->ic_state = XLOG_STATE_WANT_SYNC; |
2884 | INT_SET(iclog->ic_header.h_prev_block, ARCH_CONVERT, log->l_prev_block); | 2876 | iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block); |
2885 | log->l_prev_block = log->l_curr_block; | 2877 | log->l_prev_block = log->l_curr_block; |
2886 | log->l_prev_cycle = log->l_curr_cycle; | 2878 | log->l_prev_cycle = log->l_curr_cycle; |
2887 | 2879 | ||
@@ -2939,13 +2931,12 @@ xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed) | |||
2939 | { | 2931 | { |
2940 | xlog_in_core_t *iclog; | 2932 | xlog_in_core_t *iclog; |
2941 | xfs_lsn_t lsn; | 2933 | xfs_lsn_t lsn; |
2942 | SPLDECL(s); | ||
2943 | 2934 | ||
2944 | s = LOG_LOCK(log); | 2935 | spin_lock(&log->l_icloglock); |
2945 | 2936 | ||
2946 | iclog = log->l_iclog; | 2937 | iclog = log->l_iclog; |
2947 | if (iclog->ic_state & XLOG_STATE_IOERROR) { | 2938 | if (iclog->ic_state & XLOG_STATE_IOERROR) { |
2948 | LOG_UNLOCK(log, s); | 2939 | spin_unlock(&log->l_icloglock); |
2949 | return XFS_ERROR(EIO); | 2940 | return XFS_ERROR(EIO); |
2950 | } | 2941 | } |
2951 | 2942 | ||
@@ -2978,15 +2969,15 @@ xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed) | |||
2978 | * the previous sync. | 2969 | * the previous sync. |
2979 | */ | 2970 | */ |
2980 | iclog->ic_refcnt++; | 2971 | iclog->ic_refcnt++; |
2981 | lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT); | 2972 | lsn = be64_to_cpu(iclog->ic_header.h_lsn); |
2982 | xlog_state_switch_iclogs(log, iclog, 0); | 2973 | xlog_state_switch_iclogs(log, iclog, 0); |
2983 | LOG_UNLOCK(log, s); | 2974 | spin_unlock(&log->l_icloglock); |
2984 | 2975 | ||
2985 | if (xlog_state_release_iclog(log, iclog)) | 2976 | if (xlog_state_release_iclog(log, iclog)) |
2986 | return XFS_ERROR(EIO); | 2977 | return XFS_ERROR(EIO); |
2987 | *log_flushed = 1; | 2978 | *log_flushed = 1; |
2988 | s = LOG_LOCK(log); | 2979 | spin_lock(&log->l_icloglock); |
2989 | if (INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) == lsn && | 2980 | if (be64_to_cpu(iclog->ic_header.h_lsn) == lsn && |
2990 | iclog->ic_state != XLOG_STATE_DIRTY) | 2981 | iclog->ic_state != XLOG_STATE_DIRTY) |
2991 | goto maybe_sleep; | 2982 | goto maybe_sleep; |
2992 | else | 2983 | else |
@@ -3011,12 +3002,12 @@ maybe_sleep: | |||
3011 | if (flags & XFS_LOG_SYNC) { | 3002 | if (flags & XFS_LOG_SYNC) { |
3012 | /* | 3003 | /* |
3013 | * We must check if we're shutting down here, before | 3004 | * We must check if we're shutting down here, before |
3014 | * we wait, while we're holding the LOG_LOCK. | 3005 | * we wait, while we're holding the l_icloglock. |
3015 | * Then we check again after waking up, in case our | 3006 | * Then we check again after waking up, in case our |
3016 | * sleep was disturbed by a bad news. | 3007 | * sleep was disturbed by a bad news. |
3017 | */ | 3008 | */ |
3018 | if (iclog->ic_state & XLOG_STATE_IOERROR) { | 3009 | if (iclog->ic_state & XLOG_STATE_IOERROR) { |
3019 | LOG_UNLOCK(log, s); | 3010 | spin_unlock(&log->l_icloglock); |
3020 | return XFS_ERROR(EIO); | 3011 | return XFS_ERROR(EIO); |
3021 | } | 3012 | } |
3022 | XFS_STATS_INC(xs_log_force_sleep); | 3013 | XFS_STATS_INC(xs_log_force_sleep); |
@@ -3033,7 +3024,7 @@ maybe_sleep: | |||
3033 | } else { | 3024 | } else { |
3034 | 3025 | ||
3035 | no_sleep: | 3026 | no_sleep: |
3036 | LOG_UNLOCK(log, s); | 3027 | spin_unlock(&log->l_icloglock); |
3037 | } | 3028 | } |
3038 | return 0; | 3029 | return 0; |
3039 | } /* xlog_state_sync_all */ | 3030 | } /* xlog_state_sync_all */ |
@@ -3051,7 +3042,7 @@ no_sleep: | |||
3051 | * If filesystem activity goes to zero, the iclog will get flushed only by | 3042 | * If filesystem activity goes to zero, the iclog will get flushed only by |
3052 | * bdflush(). | 3043 | * bdflush(). |
3053 | */ | 3044 | */ |
3054 | int | 3045 | STATIC int |
3055 | xlog_state_sync(xlog_t *log, | 3046 | xlog_state_sync(xlog_t *log, |
3056 | xfs_lsn_t lsn, | 3047 | xfs_lsn_t lsn, |
3057 | uint flags, | 3048 | uint flags, |
@@ -3059,26 +3050,24 @@ xlog_state_sync(xlog_t *log, | |||
3059 | { | 3050 | { |
3060 | xlog_in_core_t *iclog; | 3051 | xlog_in_core_t *iclog; |
3061 | int already_slept = 0; | 3052 | int already_slept = 0; |
3062 | SPLDECL(s); | ||
3063 | |||
3064 | 3053 | ||
3065 | try_again: | 3054 | try_again: |
3066 | s = LOG_LOCK(log); | 3055 | spin_lock(&log->l_icloglock); |
3067 | iclog = log->l_iclog; | 3056 | iclog = log->l_iclog; |
3068 | 3057 | ||
3069 | if (iclog->ic_state & XLOG_STATE_IOERROR) { | 3058 | if (iclog->ic_state & XLOG_STATE_IOERROR) { |
3070 | LOG_UNLOCK(log, s); | 3059 | spin_unlock(&log->l_icloglock); |
3071 | return XFS_ERROR(EIO); | 3060 | return XFS_ERROR(EIO); |
3072 | } | 3061 | } |
3073 | 3062 | ||
3074 | do { | 3063 | do { |
3075 | if (INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) != lsn) { | 3064 | if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) { |
3076 | iclog = iclog->ic_next; | 3065 | iclog = iclog->ic_next; |
3077 | continue; | 3066 | continue; |
3078 | } | 3067 | } |
3079 | 3068 | ||
3080 | if (iclog->ic_state == XLOG_STATE_DIRTY) { | 3069 | if (iclog->ic_state == XLOG_STATE_DIRTY) { |
3081 | LOG_UNLOCK(log, s); | 3070 | spin_unlock(&log->l_icloglock); |
3082 | return 0; | 3071 | return 0; |
3083 | } | 3072 | } |
3084 | 3073 | ||
@@ -3113,11 +3102,11 @@ try_again: | |||
3113 | } else { | 3102 | } else { |
3114 | iclog->ic_refcnt++; | 3103 | iclog->ic_refcnt++; |
3115 | xlog_state_switch_iclogs(log, iclog, 0); | 3104 | xlog_state_switch_iclogs(log, iclog, 0); |
3116 | LOG_UNLOCK(log, s); | 3105 | spin_unlock(&log->l_icloglock); |
3117 | if (xlog_state_release_iclog(log, iclog)) | 3106 | if (xlog_state_release_iclog(log, iclog)) |
3118 | return XFS_ERROR(EIO); | 3107 | return XFS_ERROR(EIO); |
3119 | *log_flushed = 1; | 3108 | *log_flushed = 1; |
3120 | s = LOG_LOCK(log); | 3109 | spin_lock(&log->l_icloglock); |
3121 | } | 3110 | } |
3122 | } | 3111 | } |
3123 | 3112 | ||
@@ -3129,7 +3118,7 @@ try_again: | |||
3129 | * gotten a log write error. | 3118 | * gotten a log write error. |
3130 | */ | 3119 | */ |
3131 | if (iclog->ic_state & XLOG_STATE_IOERROR) { | 3120 | if (iclog->ic_state & XLOG_STATE_IOERROR) { |
3132 | LOG_UNLOCK(log, s); | 3121 | spin_unlock(&log->l_icloglock); |
3133 | return XFS_ERROR(EIO); | 3122 | return XFS_ERROR(EIO); |
3134 | } | 3123 | } |
3135 | XFS_STATS_INC(xs_log_force_sleep); | 3124 | XFS_STATS_INC(xs_log_force_sleep); |
@@ -3143,13 +3132,13 @@ try_again: | |||
3143 | return XFS_ERROR(EIO); | 3132 | return XFS_ERROR(EIO); |
3144 | *log_flushed = 1; | 3133 | *log_flushed = 1; |
3145 | } else { /* just return */ | 3134 | } else { /* just return */ |
3146 | LOG_UNLOCK(log, s); | 3135 | spin_unlock(&log->l_icloglock); |
3147 | } | 3136 | } |
3148 | return 0; | 3137 | return 0; |
3149 | 3138 | ||
3150 | } while (iclog != log->l_iclog); | 3139 | } while (iclog != log->l_iclog); |
3151 | 3140 | ||
3152 | LOG_UNLOCK(log, s); | 3141 | spin_unlock(&log->l_icloglock); |
3153 | return 0; | 3142 | return 0; |
3154 | } /* xlog_state_sync */ | 3143 | } /* xlog_state_sync */ |
3155 | 3144 | ||
@@ -3158,12 +3147,10 @@ try_again: | |||
3158 | * Called when we want to mark the current iclog as being ready to sync to | 3147 | * Called when we want to mark the current iclog as being ready to sync to |
3159 | * disk. | 3148 | * disk. |
3160 | */ | 3149 | */ |
3161 | void | 3150 | STATIC void |
3162 | xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog) | 3151 | xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog) |
3163 | { | 3152 | { |
3164 | SPLDECL(s); | 3153 | spin_lock(&log->l_icloglock); |
3165 | |||
3166 | s = LOG_LOCK(log); | ||
3167 | 3154 | ||
3168 | if (iclog->ic_state == XLOG_STATE_ACTIVE) { | 3155 | if (iclog->ic_state == XLOG_STATE_ACTIVE) { |
3169 | xlog_state_switch_iclogs(log, iclog, 0); | 3156 | xlog_state_switch_iclogs(log, iclog, 0); |
@@ -3172,7 +3159,7 @@ xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog) | |||
3172 | (XLOG_STATE_WANT_SYNC|XLOG_STATE_IOERROR)); | 3159 | (XLOG_STATE_WANT_SYNC|XLOG_STATE_IOERROR)); |
3173 | } | 3160 | } |
3174 | 3161 | ||
3175 | LOG_UNLOCK(log, s); | 3162 | spin_unlock(&log->l_icloglock); |
3176 | } /* xlog_state_want_sync */ | 3163 | } /* xlog_state_want_sync */ |
3177 | 3164 | ||
3178 | 3165 | ||
@@ -3193,16 +3180,15 @@ xlog_state_ticket_alloc(xlog_t *log) | |||
3193 | xlog_ticket_t *t_list; | 3180 | xlog_ticket_t *t_list; |
3194 | xlog_ticket_t *next; | 3181 | xlog_ticket_t *next; |
3195 | xfs_caddr_t buf; | 3182 | xfs_caddr_t buf; |
3196 | uint i = (NBPP / sizeof(xlog_ticket_t)) - 2; | 3183 | uint i = (PAGE_SIZE / sizeof(xlog_ticket_t)) - 2; |
3197 | SPLDECL(s); | ||
3198 | 3184 | ||
3199 | /* | 3185 | /* |
3200 | * The kmem_zalloc may sleep, so we shouldn't be holding the | 3186 | * The kmem_zalloc may sleep, so we shouldn't be holding the |
3201 | * global lock. XXXmiken: may want to use zone allocator. | 3187 | * global lock. XXXmiken: may want to use zone allocator. |
3202 | */ | 3188 | */ |
3203 | buf = (xfs_caddr_t) kmem_zalloc(NBPP, KM_SLEEP); | 3189 | buf = (xfs_caddr_t) kmem_zalloc(PAGE_SIZE, KM_SLEEP); |
3204 | 3190 | ||
3205 | s = LOG_LOCK(log); | 3191 | spin_lock(&log->l_icloglock); |
3206 | 3192 | ||
3207 | /* Attach 1st ticket to Q, so we can keep track of allocated memory */ | 3193 | /* Attach 1st ticket to Q, so we can keep track of allocated memory */ |
3208 | t_list = (xlog_ticket_t *)buf; | 3194 | t_list = (xlog_ticket_t *)buf; |
@@ -3231,7 +3217,7 @@ xlog_state_ticket_alloc(xlog_t *log) | |||
3231 | } | 3217 | } |
3232 | t_list->t_next = NULL; | 3218 | t_list->t_next = NULL; |
3233 | log->l_tail = t_list; | 3219 | log->l_tail = t_list; |
3234 | LOG_UNLOCK(log, s); | 3220 | spin_unlock(&log->l_icloglock); |
3235 | } /* xlog_state_ticket_alloc */ | 3221 | } /* xlog_state_ticket_alloc */ |
3236 | 3222 | ||
3237 | 3223 | ||
@@ -3273,7 +3259,7 @@ xlog_ticket_put(xlog_t *log, | |||
3273 | /* | 3259 | /* |
3274 | * Grab ticket off freelist or allocation some more | 3260 | * Grab ticket off freelist or allocation some more |
3275 | */ | 3261 | */ |
3276 | xlog_ticket_t * | 3262 | STATIC xlog_ticket_t * |
3277 | xlog_ticket_get(xlog_t *log, | 3263 | xlog_ticket_get(xlog_t *log, |
3278 | int unit_bytes, | 3264 | int unit_bytes, |
3279 | int cnt, | 3265 | int cnt, |
@@ -3282,15 +3268,14 @@ xlog_ticket_get(xlog_t *log, | |||
3282 | { | 3268 | { |
3283 | xlog_ticket_t *tic; | 3269 | xlog_ticket_t *tic; |
3284 | uint num_headers; | 3270 | uint num_headers; |
3285 | SPLDECL(s); | ||
3286 | 3271 | ||
3287 | alloc: | 3272 | alloc: |
3288 | if (log->l_freelist == NULL) | 3273 | if (log->l_freelist == NULL) |
3289 | xlog_state_ticket_alloc(log); /* potentially sleep */ | 3274 | xlog_state_ticket_alloc(log); /* potentially sleep */ |
3290 | 3275 | ||
3291 | s = LOG_LOCK(log); | 3276 | spin_lock(&log->l_icloglock); |
3292 | if (log->l_freelist == NULL) { | 3277 | if (log->l_freelist == NULL) { |
3293 | LOG_UNLOCK(log, s); | 3278 | spin_unlock(&log->l_icloglock); |
3294 | goto alloc; | 3279 | goto alloc; |
3295 | } | 3280 | } |
3296 | tic = log->l_freelist; | 3281 | tic = log->l_freelist; |
@@ -3298,7 +3283,7 @@ xlog_ticket_get(xlog_t *log, | |||
3298 | if (log->l_freelist == NULL) | 3283 | if (log->l_freelist == NULL) |
3299 | log->l_tail = NULL; | 3284 | log->l_tail = NULL; |
3300 | log->l_ticket_cnt--; | 3285 | log->l_ticket_cnt--; |
3301 | LOG_UNLOCK(log, s); | 3286 | spin_unlock(&log->l_icloglock); |
3302 | 3287 | ||
3303 | /* | 3288 | /* |
3304 | * Permanent reservations have up to 'cnt'-1 active log operations | 3289 | * Permanent reservations have up to 'cnt'-1 active log operations |
@@ -3473,10 +3458,9 @@ xlog_verify_iclog(xlog_t *log, | |||
3473 | __uint8_t clientid; | 3458 | __uint8_t clientid; |
3474 | int len, i, j, k, op_len; | 3459 | int len, i, j, k, op_len; |
3475 | int idx; | 3460 | int idx; |
3476 | SPLDECL(s); | ||
3477 | 3461 | ||
3478 | /* check validity of iclog pointers */ | 3462 | /* check validity of iclog pointers */ |
3479 | s = LOG_LOCK(log); | 3463 | spin_lock(&log->l_icloglock); |
3480 | icptr = log->l_iclog; | 3464 | icptr = log->l_iclog; |
3481 | for (i=0; i < log->l_iclog_bufs; i++) { | 3465 | for (i=0; i < log->l_iclog_bufs; i++) { |
3482 | if (icptr == NULL) | 3466 | if (icptr == NULL) |
@@ -3485,21 +3469,21 @@ xlog_verify_iclog(xlog_t *log, | |||
3485 | } | 3469 | } |
3486 | if (icptr != log->l_iclog) | 3470 | if (icptr != log->l_iclog) |
3487 | xlog_panic("xlog_verify_iclog: corrupt iclog ring"); | 3471 | xlog_panic("xlog_verify_iclog: corrupt iclog ring"); |
3488 | LOG_UNLOCK(log, s); | 3472 | spin_unlock(&log->l_icloglock); |
3489 | 3473 | ||
3490 | /* check log magic numbers */ | 3474 | /* check log magic numbers */ |
3491 | ptr = (xfs_caddr_t) &(iclog->ic_header); | 3475 | if (be32_to_cpu(iclog->ic_header.h_magicno) != XLOG_HEADER_MAGIC_NUM) |
3492 | if (INT_GET(*(uint *)ptr, ARCH_CONVERT) != XLOG_HEADER_MAGIC_NUM) | ||
3493 | xlog_panic("xlog_verify_iclog: invalid magic num"); | 3476 | xlog_panic("xlog_verify_iclog: invalid magic num"); |
3494 | 3477 | ||
3495 | for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&(iclog->ic_header))+count; | 3478 | ptr = (xfs_caddr_t) &iclog->ic_header; |
3479 | for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&iclog->ic_header) + count; | ||
3496 | ptr += BBSIZE) { | 3480 | ptr += BBSIZE) { |
3497 | if (INT_GET(*(uint *)ptr, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM) | 3481 | if (be32_to_cpu(*(__be32 *)ptr) == XLOG_HEADER_MAGIC_NUM) |
3498 | xlog_panic("xlog_verify_iclog: unexpected magic num"); | 3482 | xlog_panic("xlog_verify_iclog: unexpected magic num"); |
3499 | } | 3483 | } |
3500 | 3484 | ||
3501 | /* check fields */ | 3485 | /* check fields */ |
3502 | len = INT_GET(iclog->ic_header.h_num_logops, ARCH_CONVERT); | 3486 | len = be32_to_cpu(iclog->ic_header.h_num_logops); |
3503 | ptr = iclog->ic_datap; | 3487 | ptr = iclog->ic_datap; |
3504 | base_ptr = ptr; | 3488 | base_ptr = ptr; |
3505 | ophead = (xlog_op_header_t *)ptr; | 3489 | ophead = (xlog_op_header_t *)ptr; |
@@ -3517,9 +3501,11 @@ xlog_verify_iclog(xlog_t *log, | |||
3517 | if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { | 3501 | if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { |
3518 | j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); | 3502 | j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); |
3519 | k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); | 3503 | k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); |
3520 | clientid = GET_CLIENT_ID(xhdr[j].hic_xheader.xh_cycle_data[k], ARCH_CONVERT); | 3504 | clientid = xlog_get_client_id( |
3505 | xhdr[j].hic_xheader.xh_cycle_data[k]); | ||
3521 | } else { | 3506 | } else { |
3522 | clientid = GET_CLIENT_ID(iclog->ic_header.h_cycle_data[idx], ARCH_CONVERT); | 3507 | clientid = xlog_get_client_id( |
3508 | iclog->ic_header.h_cycle_data[idx]); | ||
3523 | } | 3509 | } |
3524 | } | 3510 | } |
3525 | if (clientid != XFS_TRANSACTION && clientid != XFS_LOG) | 3511 | if (clientid != XFS_TRANSACTION && clientid != XFS_LOG) |
@@ -3531,16 +3517,16 @@ xlog_verify_iclog(xlog_t *log, | |||
3531 | field_offset = (__psint_t) | 3517 | field_offset = (__psint_t) |
3532 | ((xfs_caddr_t)&(ophead->oh_len) - base_ptr); | 3518 | ((xfs_caddr_t)&(ophead->oh_len) - base_ptr); |
3533 | if (syncing == B_FALSE || (field_offset & 0x1ff)) { | 3519 | if (syncing == B_FALSE || (field_offset & 0x1ff)) { |
3534 | op_len = INT_GET(ophead->oh_len, ARCH_CONVERT); | 3520 | op_len = be32_to_cpu(ophead->oh_len); |
3535 | } else { | 3521 | } else { |
3536 | idx = BTOBBT((__psint_t)&ophead->oh_len - | 3522 | idx = BTOBBT((__psint_t)&ophead->oh_len - |
3537 | (__psint_t)iclog->ic_datap); | 3523 | (__psint_t)iclog->ic_datap); |
3538 | if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { | 3524 | if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { |
3539 | j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); | 3525 | j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); |
3540 | k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); | 3526 | k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); |
3541 | op_len = INT_GET(xhdr[j].hic_xheader.xh_cycle_data[k], ARCH_CONVERT); | 3527 | op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]); |
3542 | } else { | 3528 | } else { |
3543 | op_len = INT_GET(iclog->ic_header.h_cycle_data[idx], ARCH_CONVERT); | 3529 | op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]); |
3544 | } | 3530 | } |
3545 | } | 3531 | } |
3546 | ptr += sizeof(xlog_op_header_t) + op_len; | 3532 | ptr += sizeof(xlog_op_header_t) + op_len; |
@@ -3549,7 +3535,7 @@ xlog_verify_iclog(xlog_t *log, | |||
3549 | #endif | 3535 | #endif |
3550 | 3536 | ||
3551 | /* | 3537 | /* |
3552 | * Mark all iclogs IOERROR. LOG_LOCK is held by the caller. | 3538 | * Mark all iclogs IOERROR. l_icloglock is held by the caller. |
3553 | */ | 3539 | */ |
3554 | STATIC int | 3540 | STATIC int |
3555 | xlog_state_ioerror( | 3541 | xlog_state_ioerror( |
@@ -3597,8 +3583,6 @@ xfs_log_force_umount( | |||
3597 | xlog_t *log; | 3583 | xlog_t *log; |
3598 | int retval; | 3584 | int retval; |
3599 | int dummy; | 3585 | int dummy; |
3600 | SPLDECL(s); | ||
3601 | SPLDECL(s2); | ||
3602 | 3586 | ||
3603 | log = mp->m_log; | 3587 | log = mp->m_log; |
3604 | 3588 | ||
@@ -3627,8 +3611,8 @@ xfs_log_force_umount( | |||
3627 | * before we mark the filesystem SHUTDOWN and wake | 3611 | * before we mark the filesystem SHUTDOWN and wake |
3628 | * everybody up to tell the bad news. | 3612 | * everybody up to tell the bad news. |
3629 | */ | 3613 | */ |
3630 | s = GRANT_LOCK(log); | 3614 | spin_lock(&log->l_grant_lock); |
3631 | s2 = LOG_LOCK(log); | 3615 | spin_lock(&log->l_icloglock); |
3632 | mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; | 3616 | mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; |
3633 | XFS_BUF_DONE(mp->m_sb_bp); | 3617 | XFS_BUF_DONE(mp->m_sb_bp); |
3634 | /* | 3618 | /* |
@@ -3644,7 +3628,7 @@ xfs_log_force_umount( | |||
3644 | */ | 3628 | */ |
3645 | if (logerror) | 3629 | if (logerror) |
3646 | retval = xlog_state_ioerror(log); | 3630 | retval = xlog_state_ioerror(log); |
3647 | LOG_UNLOCK(log, s2); | 3631 | spin_unlock(&log->l_icloglock); |
3648 | 3632 | ||
3649 | /* | 3633 | /* |
3650 | * We don't want anybody waiting for log reservations | 3634 | * We don't want anybody waiting for log reservations |
@@ -3667,7 +3651,7 @@ xfs_log_force_umount( | |||
3667 | tic = tic->t_next; | 3651 | tic = tic->t_next; |
3668 | } while (tic != log->l_write_headq); | 3652 | } while (tic != log->l_write_headq); |
3669 | } | 3653 | } |
3670 | GRANT_UNLOCK(log, s); | 3654 | spin_unlock(&log->l_grant_lock); |
3671 | 3655 | ||
3672 | if (! (log->l_iclog->ic_state & XLOG_STATE_IOERROR)) { | 3656 | if (! (log->l_iclog->ic_state & XLOG_STATE_IOERROR)) { |
3673 | ASSERT(!logerror); | 3657 | ASSERT(!logerror); |
@@ -3676,9 +3660,9 @@ xfs_log_force_umount( | |||
3676 | * log down completely. | 3660 | * log down completely. |
3677 | */ | 3661 | */ |
3678 | xlog_state_sync_all(log, XFS_LOG_FORCE|XFS_LOG_SYNC, &dummy); | 3662 | xlog_state_sync_all(log, XFS_LOG_FORCE|XFS_LOG_SYNC, &dummy); |
3679 | s2 = LOG_LOCK(log); | 3663 | spin_lock(&log->l_icloglock); |
3680 | retval = xlog_state_ioerror(log); | 3664 | retval = xlog_state_ioerror(log); |
3681 | LOG_UNLOCK(log, s2); | 3665 | spin_unlock(&log->l_icloglock); |
3682 | } | 3666 | } |
3683 | /* | 3667 | /* |
3684 | * Wake up everybody waiting on xfs_log_force. | 3668 | * Wake up everybody waiting on xfs_log_force. |
@@ -3691,13 +3675,13 @@ xfs_log_force_umount( | |||
3691 | { | 3675 | { |
3692 | xlog_in_core_t *iclog; | 3676 | xlog_in_core_t *iclog; |
3693 | 3677 | ||
3694 | s = LOG_LOCK(log); | 3678 | spin_lock(&log->l_icloglock); |
3695 | iclog = log->l_iclog; | 3679 | iclog = log->l_iclog; |
3696 | do { | 3680 | do { |
3697 | ASSERT(iclog->ic_callback == 0); | 3681 | ASSERT(iclog->ic_callback == 0); |
3698 | iclog = iclog->ic_next; | 3682 | iclog = iclog->ic_next; |
3699 | } while (iclog != log->l_iclog); | 3683 | } while (iclog != log->l_iclog); |
3700 | LOG_UNLOCK(log, s); | 3684 | spin_unlock(&log->l_icloglock); |
3701 | } | 3685 | } |
3702 | #endif | 3686 | #endif |
3703 | /* return non-zero if log IOERROR transition had already happened */ | 3687 | /* return non-zero if log IOERROR transition had already happened */ |
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h index ebbe93f4f97b..4cdac048df5e 100644 --- a/fs/xfs/xfs_log.h +++ b/fs/xfs/xfs_log.h | |||
@@ -22,8 +22,9 @@ | |||
22 | 22 | ||
23 | #define CYCLE_LSN(lsn) ((uint)((lsn)>>32)) | 23 | #define CYCLE_LSN(lsn) ((uint)((lsn)>>32)) |
24 | #define BLOCK_LSN(lsn) ((uint)(lsn)) | 24 | #define BLOCK_LSN(lsn) ((uint)(lsn)) |
25 | |||
25 | /* this is used in a spot where we might otherwise double-endian-flip */ | 26 | /* this is used in a spot where we might otherwise double-endian-flip */ |
26 | #define CYCLE_LSN_DISK(lsn) (((uint *)&(lsn))[0]) | 27 | #define CYCLE_LSN_DISK(lsn) (((__be32 *)&(lsn))[0]) |
27 | 28 | ||
28 | #ifdef __KERNEL__ | 29 | #ifdef __KERNEL__ |
29 | /* | 30 | /* |
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index 752f964b3699..e008233ee249 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h | |||
@@ -55,32 +55,21 @@ struct xfs_mount; | |||
55 | BTOBB(XLOG_MAX_ICLOGS << (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? \ | 55 | BTOBB(XLOG_MAX_ICLOGS << (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? \ |
56 | XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT)) | 56 | XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT)) |
57 | 57 | ||
58 | /* | ||
59 | * set lsns | ||
60 | */ | ||
61 | 58 | ||
62 | #define ASSIGN_ANY_LSN_HOST(lsn,cycle,block) \ | 59 | static inline xfs_lsn_t xlog_assign_lsn(uint cycle, uint block) |
63 | { \ | 60 | { |
64 | (lsn) = ((xfs_lsn_t)(cycle)<<32)|(block); \ | 61 | return ((xfs_lsn_t)cycle << 32) | block; |
65 | } | 62 | } |
66 | #define ASSIGN_ANY_LSN_DISK(lsn,cycle,block) \ | ||
67 | { \ | ||
68 | INT_SET(((uint *)&(lsn))[0], ARCH_CONVERT, (cycle)); \ | ||
69 | INT_SET(((uint *)&(lsn))[1], ARCH_CONVERT, (block)); \ | ||
70 | } | ||
71 | #define ASSIGN_LSN(lsn,log) \ | ||
72 | ASSIGN_ANY_LSN_DISK(lsn,(log)->l_curr_cycle,(log)->l_curr_block); | ||
73 | |||
74 | #define XLOG_SET(f,b) (((f) & (b)) == (b)) | ||
75 | |||
76 | #define GET_CYCLE(ptr, arch) \ | ||
77 | (INT_GET(*(uint *)(ptr), arch) == XLOG_HEADER_MAGIC_NUM ? \ | ||
78 | INT_GET(*((uint *)(ptr)+1), arch) : \ | ||
79 | INT_GET(*(uint *)(ptr), arch) \ | ||
80 | ) | ||
81 | 63 | ||
82 | #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1) | 64 | static inline uint xlog_get_cycle(char *ptr) |
65 | { | ||
66 | if (be32_to_cpu(*(__be32 *)ptr) == XLOG_HEADER_MAGIC_NUM) | ||
67 | return be32_to_cpu(*((__be32 *)ptr + 1)); | ||
68 | else | ||
69 | return be32_to_cpu(*(__be32 *)ptr); | ||
70 | } | ||
83 | 71 | ||
72 | #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1) | ||
84 | 73 | ||
85 | #ifdef __KERNEL__ | 74 | #ifdef __KERNEL__ |
86 | 75 | ||
@@ -96,19 +85,10 @@ struct xfs_mount; | |||
96 | * | 85 | * |
97 | * this has endian issues, of course. | 86 | * this has endian issues, of course. |
98 | */ | 87 | */ |
99 | 88 | static inline uint xlog_get_client_id(__be32 i) | |
100 | #ifndef XFS_NATIVE_HOST | 89 | { |
101 | #define GET_CLIENT_ID(i,arch) \ | 90 | return be32_to_cpu(i) >> 24; |
102 | ((i) & 0xff) | 91 | } |
103 | #else | ||
104 | #define GET_CLIENT_ID(i,arch) \ | ||
105 | ((i) >> 24) | ||
106 | #endif | ||
107 | |||
108 | #define GRANT_LOCK(log) mutex_spinlock(&(log)->l_grant_lock) | ||
109 | #define GRANT_UNLOCK(log, s) mutex_spinunlock(&(log)->l_grant_lock, s) | ||
110 | #define LOG_LOCK(log) mutex_spinlock(&(log)->l_icloglock) | ||
111 | #define LOG_UNLOCK(log, s) mutex_spinunlock(&(log)->l_icloglock, s) | ||
112 | 92 | ||
113 | #define xlog_panic(args...) cmn_err(CE_PANIC, ## args) | 93 | #define xlog_panic(args...) cmn_err(CE_PANIC, ## args) |
114 | #define xlog_exit(args...) cmn_err(CE_PANIC, ## args) | 94 | #define xlog_exit(args...) cmn_err(CE_PANIC, ## args) |
@@ -285,11 +265,11 @@ typedef struct xlog_ticket { | |||
285 | 265 | ||
286 | 266 | ||
287 | typedef struct xlog_op_header { | 267 | typedef struct xlog_op_header { |
288 | xlog_tid_t oh_tid; /* transaction id of operation : 4 b */ | 268 | __be32 oh_tid; /* transaction id of operation : 4 b */ |
289 | int oh_len; /* bytes in data region : 4 b */ | 269 | __be32 oh_len; /* bytes in data region : 4 b */ |
290 | __uint8_t oh_clientid; /* who sent me this : 1 b */ | 270 | __u8 oh_clientid; /* who sent me this : 1 b */ |
291 | __uint8_t oh_flags; /* : 1 b */ | 271 | __u8 oh_flags; /* : 1 b */ |
292 | ushort oh_res2; /* 32 bit align : 2 b */ | 272 | __u16 oh_res2; /* 32 bit align : 2 b */ |
293 | } xlog_op_header_t; | 273 | } xlog_op_header_t; |
294 | 274 | ||
295 | 275 | ||
@@ -307,25 +287,25 @@ typedef struct xlog_op_header { | |||
307 | #endif | 287 | #endif |
308 | 288 | ||
309 | typedef struct xlog_rec_header { | 289 | typedef struct xlog_rec_header { |
310 | uint h_magicno; /* log record (LR) identifier : 4 */ | 290 | __be32 h_magicno; /* log record (LR) identifier : 4 */ |
311 | uint h_cycle; /* write cycle of log : 4 */ | 291 | __be32 h_cycle; /* write cycle of log : 4 */ |
312 | int h_version; /* LR version : 4 */ | 292 | __be32 h_version; /* LR version : 4 */ |
313 | int h_len; /* len in bytes; should be 64-bit aligned: 4 */ | 293 | __be32 h_len; /* len in bytes; should be 64-bit aligned: 4 */ |
314 | xfs_lsn_t h_lsn; /* lsn of this LR : 8 */ | 294 | __be64 h_lsn; /* lsn of this LR : 8 */ |
315 | xfs_lsn_t h_tail_lsn; /* lsn of 1st LR w/ buffers not committed: 8 */ | 295 | __be64 h_tail_lsn; /* lsn of 1st LR w/ buffers not committed: 8 */ |
316 | uint h_chksum; /* may not be used; non-zero if used : 4 */ | 296 | __be32 h_chksum; /* may not be used; non-zero if used : 4 */ |
317 | int h_prev_block; /* block number to previous LR : 4 */ | 297 | __be32 h_prev_block; /* block number to previous LR : 4 */ |
318 | int h_num_logops; /* number of log operations in this LR : 4 */ | 298 | __be32 h_num_logops; /* number of log operations in this LR : 4 */ |
319 | uint h_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; | 299 | __be32 h_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; |
320 | /* new fields */ | 300 | /* new fields */ |
321 | int h_fmt; /* format of log record : 4 */ | 301 | __be32 h_fmt; /* format of log record : 4 */ |
322 | uuid_t h_fs_uuid; /* uuid of FS : 16 */ | 302 | uuid_t h_fs_uuid; /* uuid of FS : 16 */ |
323 | int h_size; /* iclog size : 4 */ | 303 | __be32 h_size; /* iclog size : 4 */ |
324 | } xlog_rec_header_t; | 304 | } xlog_rec_header_t; |
325 | 305 | ||
326 | typedef struct xlog_rec_ext_header { | 306 | typedef struct xlog_rec_ext_header { |
327 | uint xh_cycle; /* write cycle of log : 4 */ | 307 | __be32 xh_cycle; /* write cycle of log : 4 */ |
328 | uint xh_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; /* : 256 */ | 308 | __be32 xh_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; /* : 256 */ |
329 | } xlog_rec_ext_header_t; | 309 | } xlog_rec_ext_header_t; |
330 | 310 | ||
331 | #ifdef __KERNEL__ | 311 | #ifdef __KERNEL__ |
@@ -415,7 +395,7 @@ typedef struct log { | |||
415 | xlog_ticket_t *l_unmount_free;/* kmem_free these addresses */ | 395 | xlog_ticket_t *l_unmount_free;/* kmem_free these addresses */ |
416 | xlog_ticket_t *l_tail; /* free list of tickets */ | 396 | xlog_ticket_t *l_tail; /* free list of tickets */ |
417 | xlog_in_core_t *l_iclog; /* head log queue */ | 397 | xlog_in_core_t *l_iclog; /* head log queue */ |
418 | lock_t l_icloglock; /* grab to change iclog state */ | 398 | spinlock_t l_icloglock; /* grab to change iclog state */ |
419 | xfs_lsn_t l_tail_lsn; /* lsn of 1st LR with unflushed | 399 | xfs_lsn_t l_tail_lsn; /* lsn of 1st LR with unflushed |
420 | * buffers */ | 400 | * buffers */ |
421 | xfs_lsn_t l_last_sync_lsn;/* lsn of last LR on disk */ | 401 | xfs_lsn_t l_last_sync_lsn;/* lsn of last LR on disk */ |
@@ -439,7 +419,7 @@ typedef struct log { | |||
439 | char *l_iclog_bak[XLOG_MAX_ICLOGS]; | 419 | char *l_iclog_bak[XLOG_MAX_ICLOGS]; |
440 | 420 | ||
441 | /* The following block of fields are changed while holding grant_lock */ | 421 | /* The following block of fields are changed while holding grant_lock */ |
442 | lock_t l_grant_lock; | 422 | spinlock_t l_grant_lock; |
443 | xlog_ticket_t *l_reserve_headq; | 423 | xlog_ticket_t *l_reserve_headq; |
444 | xlog_ticket_t *l_write_headq; | 424 | xlog_ticket_t *l_write_headq; |
445 | int l_grant_reserve_cycle; | 425 | int l_grant_reserve_cycle; |
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 851eca8a7150..b82d5d4d2462 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c | |||
@@ -198,7 +198,7 @@ xlog_header_check_dump( | |||
198 | cmn_err(CE_DEBUG, " log : uuid = "); | 198 | cmn_err(CE_DEBUG, " log : uuid = "); |
199 | for (b = 0; b < 16; b++) | 199 | for (b = 0; b < 16; b++) |
200 | cmn_err(CE_DEBUG, "%02x",((uchar_t *)&head->h_fs_uuid)[b]); | 200 | cmn_err(CE_DEBUG, "%02x",((uchar_t *)&head->h_fs_uuid)[b]); |
201 | cmn_err(CE_DEBUG, ", fmt = %d\n", INT_GET(head->h_fmt, ARCH_CONVERT)); | 201 | cmn_err(CE_DEBUG, ", fmt = %d\n", be32_to_cpu(head->h_fmt)); |
202 | } | 202 | } |
203 | #else | 203 | #else |
204 | #define xlog_header_check_dump(mp, head) | 204 | #define xlog_header_check_dump(mp, head) |
@@ -212,14 +212,14 @@ xlog_header_check_recover( | |||
212 | xfs_mount_t *mp, | 212 | xfs_mount_t *mp, |
213 | xlog_rec_header_t *head) | 213 | xlog_rec_header_t *head) |
214 | { | 214 | { |
215 | ASSERT(INT_GET(head->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM); | 215 | ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM); |
216 | 216 | ||
217 | /* | 217 | /* |
218 | * IRIX doesn't write the h_fmt field and leaves it zeroed | 218 | * IRIX doesn't write the h_fmt field and leaves it zeroed |
219 | * (XLOG_FMT_UNKNOWN). This stops us from trying to recover | 219 | * (XLOG_FMT_UNKNOWN). This stops us from trying to recover |
220 | * a dirty log created in IRIX. | 220 | * a dirty log created in IRIX. |
221 | */ | 221 | */ |
222 | if (unlikely(INT_GET(head->h_fmt, ARCH_CONVERT) != XLOG_FMT)) { | 222 | if (unlikely(be32_to_cpu(head->h_fmt) != XLOG_FMT)) { |
223 | xlog_warn( | 223 | xlog_warn( |
224 | "XFS: dirty log written in incompatible format - can't recover"); | 224 | "XFS: dirty log written in incompatible format - can't recover"); |
225 | xlog_header_check_dump(mp, head); | 225 | xlog_header_check_dump(mp, head); |
@@ -245,7 +245,7 @@ xlog_header_check_mount( | |||
245 | xfs_mount_t *mp, | 245 | xfs_mount_t *mp, |
246 | xlog_rec_header_t *head) | 246 | xlog_rec_header_t *head) |
247 | { | 247 | { |
248 | ASSERT(INT_GET(head->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM); | 248 | ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM); |
249 | 249 | ||
250 | if (uuid_is_nil(&head->h_fs_uuid)) { | 250 | if (uuid_is_nil(&head->h_fs_uuid)) { |
251 | /* | 251 | /* |
@@ -293,7 +293,7 @@ xlog_recover_iodone( | |||
293 | * Note that the algorithm can not be perfect because the disk will not | 293 | * Note that the algorithm can not be perfect because the disk will not |
294 | * necessarily be perfect. | 294 | * necessarily be perfect. |
295 | */ | 295 | */ |
296 | int | 296 | STATIC int |
297 | xlog_find_cycle_start( | 297 | xlog_find_cycle_start( |
298 | xlog_t *log, | 298 | xlog_t *log, |
299 | xfs_buf_t *bp, | 299 | xfs_buf_t *bp, |
@@ -311,7 +311,7 @@ xlog_find_cycle_start( | |||
311 | if ((error = xlog_bread(log, mid_blk, 1, bp))) | 311 | if ((error = xlog_bread(log, mid_blk, 1, bp))) |
312 | return error; | 312 | return error; |
313 | offset = xlog_align(log, mid_blk, 1, bp); | 313 | offset = xlog_align(log, mid_blk, 1, bp); |
314 | mid_cycle = GET_CYCLE(offset, ARCH_CONVERT); | 314 | mid_cycle = xlog_get_cycle(offset); |
315 | if (mid_cycle == cycle) { | 315 | if (mid_cycle == cycle) { |
316 | *last_blk = mid_blk; | 316 | *last_blk = mid_blk; |
317 | /* last_half_cycle == mid_cycle */ | 317 | /* last_half_cycle == mid_cycle */ |
@@ -371,7 +371,7 @@ xlog_find_verify_cycle( | |||
371 | 371 | ||
372 | buf = xlog_align(log, i, bcount, bp); | 372 | buf = xlog_align(log, i, bcount, bp); |
373 | for (j = 0; j < bcount; j++) { | 373 | for (j = 0; j < bcount; j++) { |
374 | cycle = GET_CYCLE(buf, ARCH_CONVERT); | 374 | cycle = xlog_get_cycle(buf); |
375 | if (cycle == stop_on_cycle_no) { | 375 | if (cycle == stop_on_cycle_no) { |
376 | *new_blk = i+j; | 376 | *new_blk = i+j; |
377 | goto out; | 377 | goto out; |
@@ -447,8 +447,7 @@ xlog_find_verify_log_record( | |||
447 | 447 | ||
448 | head = (xlog_rec_header_t *)offset; | 448 | head = (xlog_rec_header_t *)offset; |
449 | 449 | ||
450 | if (XLOG_HEADER_MAGIC_NUM == | 450 | if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(head->h_magicno)) |
451 | INT_GET(head->h_magicno, ARCH_CONVERT)) | ||
452 | break; | 451 | break; |
453 | 452 | ||
454 | if (!smallmem) | 453 | if (!smallmem) |
@@ -480,7 +479,7 @@ xlog_find_verify_log_record( | |||
480 | * record do we update last_blk. | 479 | * record do we update last_blk. |
481 | */ | 480 | */ |
482 | if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { | 481 | if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { |
483 | uint h_size = INT_GET(head->h_size, ARCH_CONVERT); | 482 | uint h_size = be32_to_cpu(head->h_size); |
484 | 483 | ||
485 | xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE; | 484 | xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE; |
486 | if (h_size % XLOG_HEADER_CYCLE_SIZE) | 485 | if (h_size % XLOG_HEADER_CYCLE_SIZE) |
@@ -489,8 +488,8 @@ xlog_find_verify_log_record( | |||
489 | xhdrs = 1; | 488 | xhdrs = 1; |
490 | } | 489 | } |
491 | 490 | ||
492 | if (*last_blk - i + extra_bblks | 491 | if (*last_blk - i + extra_bblks != |
493 | != BTOBB(INT_GET(head->h_len, ARCH_CONVERT)) + xhdrs) | 492 | BTOBB(be32_to_cpu(head->h_len)) + xhdrs) |
494 | *last_blk = i; | 493 | *last_blk = i; |
495 | 494 | ||
496 | out: | 495 | out: |
@@ -550,13 +549,13 @@ xlog_find_head( | |||
550 | if ((error = xlog_bread(log, 0, 1, bp))) | 549 | if ((error = xlog_bread(log, 0, 1, bp))) |
551 | goto bp_err; | 550 | goto bp_err; |
552 | offset = xlog_align(log, 0, 1, bp); | 551 | offset = xlog_align(log, 0, 1, bp); |
553 | first_half_cycle = GET_CYCLE(offset, ARCH_CONVERT); | 552 | first_half_cycle = xlog_get_cycle(offset); |
554 | 553 | ||
555 | last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */ | 554 | last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */ |
556 | if ((error = xlog_bread(log, last_blk, 1, bp))) | 555 | if ((error = xlog_bread(log, last_blk, 1, bp))) |
557 | goto bp_err; | 556 | goto bp_err; |
558 | offset = xlog_align(log, last_blk, 1, bp); | 557 | offset = xlog_align(log, last_blk, 1, bp); |
559 | last_half_cycle = GET_CYCLE(offset, ARCH_CONVERT); | 558 | last_half_cycle = xlog_get_cycle(offset); |
560 | ASSERT(last_half_cycle != 0); | 559 | ASSERT(last_half_cycle != 0); |
561 | 560 | ||
562 | /* | 561 | /* |
@@ -808,7 +807,7 @@ xlog_find_tail( | |||
808 | if ((error = xlog_bread(log, 0, 1, bp))) | 807 | if ((error = xlog_bread(log, 0, 1, bp))) |
809 | goto bread_err; | 808 | goto bread_err; |
810 | offset = xlog_align(log, 0, 1, bp); | 809 | offset = xlog_align(log, 0, 1, bp); |
811 | if (GET_CYCLE(offset, ARCH_CONVERT) == 0) { | 810 | if (xlog_get_cycle(offset) == 0) { |
812 | *tail_blk = 0; | 811 | *tail_blk = 0; |
813 | /* leave all other log inited values alone */ | 812 | /* leave all other log inited values alone */ |
814 | goto exit; | 813 | goto exit; |
@@ -823,8 +822,7 @@ xlog_find_tail( | |||
823 | if ((error = xlog_bread(log, i, 1, bp))) | 822 | if ((error = xlog_bread(log, i, 1, bp))) |
824 | goto bread_err; | 823 | goto bread_err; |
825 | offset = xlog_align(log, i, 1, bp); | 824 | offset = xlog_align(log, i, 1, bp); |
826 | if (XLOG_HEADER_MAGIC_NUM == | 825 | if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(*(__be32 *)offset)) { |
827 | INT_GET(*(uint *)offset, ARCH_CONVERT)) { | ||
828 | found = 1; | 826 | found = 1; |
829 | break; | 827 | break; |
830 | } | 828 | } |
@@ -841,7 +839,7 @@ xlog_find_tail( | |||
841 | goto bread_err; | 839 | goto bread_err; |
842 | offset = xlog_align(log, i, 1, bp); | 840 | offset = xlog_align(log, i, 1, bp); |
843 | if (XLOG_HEADER_MAGIC_NUM == | 841 | if (XLOG_HEADER_MAGIC_NUM == |
844 | INT_GET(*(uint*)offset, ARCH_CONVERT)) { | 842 | be32_to_cpu(*(__be32 *)offset)) { |
845 | found = 2; | 843 | found = 2; |
846 | break; | 844 | break; |
847 | } | 845 | } |
@@ -855,7 +853,7 @@ xlog_find_tail( | |||
855 | 853 | ||
856 | /* find blk_no of tail of log */ | 854 | /* find blk_no of tail of log */ |
857 | rhead = (xlog_rec_header_t *)offset; | 855 | rhead = (xlog_rec_header_t *)offset; |
858 | *tail_blk = BLOCK_LSN(INT_GET(rhead->h_tail_lsn, ARCH_CONVERT)); | 856 | *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn)); |
859 | 857 | ||
860 | /* | 858 | /* |
861 | * Reset log values according to the state of the log when we | 859 | * Reset log values according to the state of the log when we |
@@ -869,11 +867,11 @@ xlog_find_tail( | |||
869 | */ | 867 | */ |
870 | log->l_prev_block = i; | 868 | log->l_prev_block = i; |
871 | log->l_curr_block = (int)*head_blk; | 869 | log->l_curr_block = (int)*head_blk; |
872 | log->l_curr_cycle = INT_GET(rhead->h_cycle, ARCH_CONVERT); | 870 | log->l_curr_cycle = be32_to_cpu(rhead->h_cycle); |
873 | if (found == 2) | 871 | if (found == 2) |
874 | log->l_curr_cycle++; | 872 | log->l_curr_cycle++; |
875 | log->l_tail_lsn = INT_GET(rhead->h_tail_lsn, ARCH_CONVERT); | 873 | log->l_tail_lsn = be64_to_cpu(rhead->h_tail_lsn); |
876 | log->l_last_sync_lsn = INT_GET(rhead->h_lsn, ARCH_CONVERT); | 874 | log->l_last_sync_lsn = be64_to_cpu(rhead->h_lsn); |
877 | log->l_grant_reserve_cycle = log->l_curr_cycle; | 875 | log->l_grant_reserve_cycle = log->l_curr_cycle; |
878 | log->l_grant_reserve_bytes = BBTOB(log->l_curr_block); | 876 | log->l_grant_reserve_bytes = BBTOB(log->l_curr_block); |
879 | log->l_grant_write_cycle = log->l_curr_cycle; | 877 | log->l_grant_write_cycle = log->l_curr_cycle; |
@@ -891,8 +889,8 @@ xlog_find_tail( | |||
891 | * unmount record rather than the block after it. | 889 | * unmount record rather than the block after it. |
892 | */ | 890 | */ |
893 | if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { | 891 | if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { |
894 | int h_size = INT_GET(rhead->h_size, ARCH_CONVERT); | 892 | int h_size = be32_to_cpu(rhead->h_size); |
895 | int h_version = INT_GET(rhead->h_version, ARCH_CONVERT); | 893 | int h_version = be32_to_cpu(rhead->h_version); |
896 | 894 | ||
897 | if ((h_version & XLOG_VERSION_2) && | 895 | if ((h_version & XLOG_VERSION_2) && |
898 | (h_size > XLOG_HEADER_CYCLE_SIZE)) { | 896 | (h_size > XLOG_HEADER_CYCLE_SIZE)) { |
@@ -906,10 +904,10 @@ xlog_find_tail( | |||
906 | hblks = 1; | 904 | hblks = 1; |
907 | } | 905 | } |
908 | after_umount_blk = (i + hblks + (int) | 906 | after_umount_blk = (i + hblks + (int) |
909 | BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT))) % log->l_logBBsize; | 907 | BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize; |
910 | tail_lsn = log->l_tail_lsn; | 908 | tail_lsn = log->l_tail_lsn; |
911 | if (*head_blk == after_umount_blk && | 909 | if (*head_blk == after_umount_blk && |
912 | INT_GET(rhead->h_num_logops, ARCH_CONVERT) == 1) { | 910 | be32_to_cpu(rhead->h_num_logops) == 1) { |
913 | umount_data_blk = (i + hblks) % log->l_logBBsize; | 911 | umount_data_blk = (i + hblks) % log->l_logBBsize; |
914 | if ((error = xlog_bread(log, umount_data_blk, 1, bp))) { | 912 | if ((error = xlog_bread(log, umount_data_blk, 1, bp))) { |
915 | goto bread_err; | 913 | goto bread_err; |
@@ -922,10 +920,12 @@ xlog_find_tail( | |||
922 | * log records will point recovery to after the | 920 | * log records will point recovery to after the |
923 | * current unmount record. | 921 | * current unmount record. |
924 | */ | 922 | */ |
925 | ASSIGN_ANY_LSN_HOST(log->l_tail_lsn, log->l_curr_cycle, | 923 | log->l_tail_lsn = |
926 | after_umount_blk); | 924 | xlog_assign_lsn(log->l_curr_cycle, |
927 | ASSIGN_ANY_LSN_HOST(log->l_last_sync_lsn, log->l_curr_cycle, | 925 | after_umount_blk); |
928 | after_umount_blk); | 926 | log->l_last_sync_lsn = |
927 | xlog_assign_lsn(log->l_curr_cycle, | ||
928 | after_umount_blk); | ||
929 | *tail_blk = after_umount_blk; | 929 | *tail_blk = after_umount_blk; |
930 | 930 | ||
931 | /* | 931 | /* |
@@ -986,7 +986,7 @@ exit: | |||
986 | * -1 => use *blk_no as the first block of the log | 986 | * -1 => use *blk_no as the first block of the log |
987 | * >0 => error has occurred | 987 | * >0 => error has occurred |
988 | */ | 988 | */ |
989 | int | 989 | STATIC int |
990 | xlog_find_zeroed( | 990 | xlog_find_zeroed( |
991 | xlog_t *log, | 991 | xlog_t *log, |
992 | xfs_daddr_t *blk_no) | 992 | xfs_daddr_t *blk_no) |
@@ -1007,7 +1007,7 @@ xlog_find_zeroed( | |||
1007 | if ((error = xlog_bread(log, 0, 1, bp))) | 1007 | if ((error = xlog_bread(log, 0, 1, bp))) |
1008 | goto bp_err; | 1008 | goto bp_err; |
1009 | offset = xlog_align(log, 0, 1, bp); | 1009 | offset = xlog_align(log, 0, 1, bp); |
1010 | first_cycle = GET_CYCLE(offset, ARCH_CONVERT); | 1010 | first_cycle = xlog_get_cycle(offset); |
1011 | if (first_cycle == 0) { /* completely zeroed log */ | 1011 | if (first_cycle == 0) { /* completely zeroed log */ |
1012 | *blk_no = 0; | 1012 | *blk_no = 0; |
1013 | xlog_put_bp(bp); | 1013 | xlog_put_bp(bp); |
@@ -1018,7 +1018,7 @@ xlog_find_zeroed( | |||
1018 | if ((error = xlog_bread(log, log_bbnum-1, 1, bp))) | 1018 | if ((error = xlog_bread(log, log_bbnum-1, 1, bp))) |
1019 | goto bp_err; | 1019 | goto bp_err; |
1020 | offset = xlog_align(log, log_bbnum-1, 1, bp); | 1020 | offset = xlog_align(log, log_bbnum-1, 1, bp); |
1021 | last_cycle = GET_CYCLE(offset, ARCH_CONVERT); | 1021 | last_cycle = xlog_get_cycle(offset); |
1022 | if (last_cycle != 0) { /* log completely written to */ | 1022 | if (last_cycle != 0) { /* log completely written to */ |
1023 | xlog_put_bp(bp); | 1023 | xlog_put_bp(bp); |
1024 | return 0; | 1024 | return 0; |
@@ -1098,13 +1098,13 @@ xlog_add_record( | |||
1098 | xlog_rec_header_t *recp = (xlog_rec_header_t *)buf; | 1098 | xlog_rec_header_t *recp = (xlog_rec_header_t *)buf; |
1099 | 1099 | ||
1100 | memset(buf, 0, BBSIZE); | 1100 | memset(buf, 0, BBSIZE); |
1101 | INT_SET(recp->h_magicno, ARCH_CONVERT, XLOG_HEADER_MAGIC_NUM); | 1101 | recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); |
1102 | INT_SET(recp->h_cycle, ARCH_CONVERT, cycle); | 1102 | recp->h_cycle = cpu_to_be32(cycle); |
1103 | INT_SET(recp->h_version, ARCH_CONVERT, | 1103 | recp->h_version = cpu_to_be32( |
1104 | XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1); | 1104 | XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1); |
1105 | ASSIGN_ANY_LSN_DISK(recp->h_lsn, cycle, block); | 1105 | recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block)); |
1106 | ASSIGN_ANY_LSN_DISK(recp->h_tail_lsn, tail_cycle, tail_block); | 1106 | recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block)); |
1107 | INT_SET(recp->h_fmt, ARCH_CONVERT, XLOG_FMT); | 1107 | recp->h_fmt = cpu_to_be32(XLOG_FMT); |
1108 | memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t)); | 1108 | memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t)); |
1109 | } | 1109 | } |
1110 | 1110 | ||
@@ -2211,7 +2211,7 @@ xlog_recover_do_buffer_trans( | |||
2211 | * overlap with future reads of those inodes. | 2211 | * overlap with future reads of those inodes. |
2212 | */ | 2212 | */ |
2213 | if (XFS_DINODE_MAGIC == | 2213 | if (XFS_DINODE_MAGIC == |
2214 | INT_GET(*((__uint16_t *)(xfs_buf_offset(bp, 0))), ARCH_CONVERT) && | 2214 | be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) && |
2215 | (XFS_BUF_COUNT(bp) != MAX(log->l_mp->m_sb.sb_blocksize, | 2215 | (XFS_BUF_COUNT(bp) != MAX(log->l_mp->m_sb.sb_blocksize, |
2216 | (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) { | 2216 | (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) { |
2217 | XFS_BUF_STALE(bp); | 2217 | XFS_BUF_STALE(bp); |
@@ -2581,8 +2581,7 @@ xlog_recover_do_dquot_trans( | |||
2581 | /* | 2581 | /* |
2582 | * This type of quotas was turned off, so ignore this record. | 2582 | * This type of quotas was turned off, so ignore this record. |
2583 | */ | 2583 | */ |
2584 | type = INT_GET(recddq->d_flags, ARCH_CONVERT) & | 2584 | type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP); |
2585 | (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP); | ||
2586 | ASSERT(type); | 2585 | ASSERT(type); |
2587 | if (log->l_quotaoffs_flag & type) | 2586 | if (log->l_quotaoffs_flag & type) |
2588 | return (0); | 2587 | return (0); |
@@ -2660,7 +2659,6 @@ xlog_recover_do_efi_trans( | |||
2660 | xfs_mount_t *mp; | 2659 | xfs_mount_t *mp; |
2661 | xfs_efi_log_item_t *efip; | 2660 | xfs_efi_log_item_t *efip; |
2662 | xfs_efi_log_format_t *efi_formatp; | 2661 | xfs_efi_log_format_t *efi_formatp; |
2663 | SPLDECL(s); | ||
2664 | 2662 | ||
2665 | if (pass == XLOG_RECOVER_PASS1) { | 2663 | if (pass == XLOG_RECOVER_PASS1) { |
2666 | return 0; | 2664 | return 0; |
@@ -2678,11 +2676,11 @@ xlog_recover_do_efi_trans( | |||
2678 | efip->efi_next_extent = efi_formatp->efi_nextents; | 2676 | efip->efi_next_extent = efi_formatp->efi_nextents; |
2679 | efip->efi_flags |= XFS_EFI_COMMITTED; | 2677 | efip->efi_flags |= XFS_EFI_COMMITTED; |
2680 | 2678 | ||
2681 | AIL_LOCK(mp,s); | 2679 | spin_lock(&mp->m_ail_lock); |
2682 | /* | 2680 | /* |
2683 | * xfs_trans_update_ail() drops the AIL lock. | 2681 | * xfs_trans_update_ail() drops the AIL lock. |
2684 | */ | 2682 | */ |
2685 | xfs_trans_update_ail(mp, (xfs_log_item_t *)efip, lsn, s); | 2683 | xfs_trans_update_ail(mp, (xfs_log_item_t *)efip, lsn); |
2686 | return 0; | 2684 | return 0; |
2687 | } | 2685 | } |
2688 | 2686 | ||
@@ -2707,7 +2705,6 @@ xlog_recover_do_efd_trans( | |||
2707 | xfs_log_item_t *lip; | 2705 | xfs_log_item_t *lip; |
2708 | int gen; | 2706 | int gen; |
2709 | __uint64_t efi_id; | 2707 | __uint64_t efi_id; |
2710 | SPLDECL(s); | ||
2711 | 2708 | ||
2712 | if (pass == XLOG_RECOVER_PASS1) { | 2709 | if (pass == XLOG_RECOVER_PASS1) { |
2713 | return; | 2710 | return; |
@@ -2725,7 +2722,7 @@ xlog_recover_do_efd_trans( | |||
2725 | * in the AIL. | 2722 | * in the AIL. |
2726 | */ | 2723 | */ |
2727 | mp = log->l_mp; | 2724 | mp = log->l_mp; |
2728 | AIL_LOCK(mp,s); | 2725 | spin_lock(&mp->m_ail_lock); |
2729 | lip = xfs_trans_first_ail(mp, &gen); | 2726 | lip = xfs_trans_first_ail(mp, &gen); |
2730 | while (lip != NULL) { | 2727 | while (lip != NULL) { |
2731 | if (lip->li_type == XFS_LI_EFI) { | 2728 | if (lip->li_type == XFS_LI_EFI) { |
@@ -2735,22 +2732,14 @@ xlog_recover_do_efd_trans( | |||
2735 | * xfs_trans_delete_ail() drops the | 2732 | * xfs_trans_delete_ail() drops the |
2736 | * AIL lock. | 2733 | * AIL lock. |
2737 | */ | 2734 | */ |
2738 | xfs_trans_delete_ail(mp, lip, s); | 2735 | xfs_trans_delete_ail(mp, lip); |
2739 | break; | 2736 | xfs_efi_item_free(efip); |
2737 | return; | ||
2740 | } | 2738 | } |
2741 | } | 2739 | } |
2742 | lip = xfs_trans_next_ail(mp, lip, &gen, NULL); | 2740 | lip = xfs_trans_next_ail(mp, lip, &gen, NULL); |
2743 | } | 2741 | } |
2744 | 2742 | spin_unlock(&mp->m_ail_lock); | |
2745 | /* | ||
2746 | * If we found it, then free it up. If it wasn't there, it | ||
2747 | * must have been overwritten in the log. Oh well. | ||
2748 | */ | ||
2749 | if (lip != NULL) { | ||
2750 | xfs_efi_item_free(efip); | ||
2751 | } else { | ||
2752 | AIL_UNLOCK(mp, s); | ||
2753 | } | ||
2754 | } | 2743 | } |
2755 | 2744 | ||
2756 | /* | 2745 | /* |
@@ -2897,8 +2886,8 @@ xlog_recover_process_data( | |||
2897 | unsigned long hash; | 2886 | unsigned long hash; |
2898 | uint flags; | 2887 | uint flags; |
2899 | 2888 | ||
2900 | lp = dp + INT_GET(rhead->h_len, ARCH_CONVERT); | 2889 | lp = dp + be32_to_cpu(rhead->h_len); |
2901 | num_logops = INT_GET(rhead->h_num_logops, ARCH_CONVERT); | 2890 | num_logops = be32_to_cpu(rhead->h_num_logops); |
2902 | 2891 | ||
2903 | /* check the log format matches our own - else we can't recover */ | 2892 | /* check the log format matches our own - else we can't recover */ |
2904 | if (xlog_header_check_recover(log->l_mp, rhead)) | 2893 | if (xlog_header_check_recover(log->l_mp, rhead)) |
@@ -2915,15 +2904,20 @@ xlog_recover_process_data( | |||
2915 | ASSERT(0); | 2904 | ASSERT(0); |
2916 | return (XFS_ERROR(EIO)); | 2905 | return (XFS_ERROR(EIO)); |
2917 | } | 2906 | } |
2918 | tid = INT_GET(ohead->oh_tid, ARCH_CONVERT); | 2907 | tid = be32_to_cpu(ohead->oh_tid); |
2919 | hash = XLOG_RHASH(tid); | 2908 | hash = XLOG_RHASH(tid); |
2920 | trans = xlog_recover_find_tid(rhash[hash], tid); | 2909 | trans = xlog_recover_find_tid(rhash[hash], tid); |
2921 | if (trans == NULL) { /* not found; add new tid */ | 2910 | if (trans == NULL) { /* not found; add new tid */ |
2922 | if (ohead->oh_flags & XLOG_START_TRANS) | 2911 | if (ohead->oh_flags & XLOG_START_TRANS) |
2923 | xlog_recover_new_tid(&rhash[hash], tid, | 2912 | xlog_recover_new_tid(&rhash[hash], tid, |
2924 | INT_GET(rhead->h_lsn, ARCH_CONVERT)); | 2913 | be64_to_cpu(rhead->h_lsn)); |
2925 | } else { | 2914 | } else { |
2926 | ASSERT(dp+INT_GET(ohead->oh_len, ARCH_CONVERT) <= lp); | 2915 | if (dp + be32_to_cpu(ohead->oh_len) > lp) { |
2916 | xlog_warn( | ||
2917 | "XFS: xlog_recover_process_data: bad length"); | ||
2918 | WARN_ON(1); | ||
2919 | return (XFS_ERROR(EIO)); | ||
2920 | } | ||
2927 | flags = ohead->oh_flags & ~XLOG_END_TRANS; | 2921 | flags = ohead->oh_flags & ~XLOG_END_TRANS; |
2928 | if (flags & XLOG_WAS_CONT_TRANS) | 2922 | if (flags & XLOG_WAS_CONT_TRANS) |
2929 | flags &= ~XLOG_CONTINUE_TRANS; | 2923 | flags &= ~XLOG_CONTINUE_TRANS; |
@@ -2937,8 +2931,7 @@ xlog_recover_process_data( | |||
2937 | break; | 2931 | break; |
2938 | case XLOG_WAS_CONT_TRANS: | 2932 | case XLOG_WAS_CONT_TRANS: |
2939 | error = xlog_recover_add_to_cont_trans(trans, | 2933 | error = xlog_recover_add_to_cont_trans(trans, |
2940 | dp, INT_GET(ohead->oh_len, | 2934 | dp, be32_to_cpu(ohead->oh_len)); |
2941 | ARCH_CONVERT)); | ||
2942 | break; | 2935 | break; |
2943 | case XLOG_START_TRANS: | 2936 | case XLOG_START_TRANS: |
2944 | xlog_warn( | 2937 | xlog_warn( |
@@ -2949,8 +2942,7 @@ xlog_recover_process_data( | |||
2949 | case 0: | 2942 | case 0: |
2950 | case XLOG_CONTINUE_TRANS: | 2943 | case XLOG_CONTINUE_TRANS: |
2951 | error = xlog_recover_add_to_trans(trans, | 2944 | error = xlog_recover_add_to_trans(trans, |
2952 | dp, INT_GET(ohead->oh_len, | 2945 | dp, be32_to_cpu(ohead->oh_len)); |
2953 | ARCH_CONVERT)); | ||
2954 | break; | 2946 | break; |
2955 | default: | 2947 | default: |
2956 | xlog_warn( | 2948 | xlog_warn( |
@@ -2962,7 +2954,7 @@ xlog_recover_process_data( | |||
2962 | if (error) | 2954 | if (error) |
2963 | return error; | 2955 | return error; |
2964 | } | 2956 | } |
2965 | dp += INT_GET(ohead->oh_len, ARCH_CONVERT); | 2957 | dp += be32_to_cpu(ohead->oh_len); |
2966 | num_logops--; | 2958 | num_logops--; |
2967 | } | 2959 | } |
2968 | return 0; | 2960 | return 0; |
@@ -3075,10 +3067,9 @@ xlog_recover_process_efis( | |||
3075 | xfs_efi_log_item_t *efip; | 3067 | xfs_efi_log_item_t *efip; |
3076 | int gen; | 3068 | int gen; |
3077 | xfs_mount_t *mp; | 3069 | xfs_mount_t *mp; |
3078 | SPLDECL(s); | ||
3079 | 3070 | ||
3080 | mp = log->l_mp; | 3071 | mp = log->l_mp; |
3081 | AIL_LOCK(mp,s); | 3072 | spin_lock(&mp->m_ail_lock); |
3082 | 3073 | ||
3083 | lip = xfs_trans_first_ail(mp, &gen); | 3074 | lip = xfs_trans_first_ail(mp, &gen); |
3084 | while (lip != NULL) { | 3075 | while (lip != NULL) { |
@@ -3099,12 +3090,12 @@ xlog_recover_process_efis( | |||
3099 | continue; | 3090 | continue; |
3100 | } | 3091 | } |
3101 | 3092 | ||
3102 | AIL_UNLOCK(mp, s); | 3093 | spin_unlock(&mp->m_ail_lock); |
3103 | xlog_recover_process_efi(mp, efip); | 3094 | xlog_recover_process_efi(mp, efip); |
3104 | AIL_LOCK(mp,s); | 3095 | spin_lock(&mp->m_ail_lock); |
3105 | lip = xfs_trans_next_ail(mp, lip, &gen, NULL); | 3096 | lip = xfs_trans_next_ail(mp, lip, &gen, NULL); |
3106 | } | 3097 | } |
3107 | AIL_UNLOCK(mp, s); | 3098 | spin_unlock(&mp->m_ail_lock); |
3108 | } | 3099 | } |
3109 | 3100 | ||
3110 | /* | 3101 | /* |
@@ -3315,16 +3306,16 @@ xlog_pack_data_checksum( | |||
3315 | int size) | 3306 | int size) |
3316 | { | 3307 | { |
3317 | int i; | 3308 | int i; |
3318 | uint *up; | 3309 | __be32 *up; |
3319 | uint chksum = 0; | 3310 | uint chksum = 0; |
3320 | 3311 | ||
3321 | up = (uint *)iclog->ic_datap; | 3312 | up = (__be32 *)iclog->ic_datap; |
3322 | /* divide length by 4 to get # words */ | 3313 | /* divide length by 4 to get # words */ |
3323 | for (i = 0; i < (size >> 2); i++) { | 3314 | for (i = 0; i < (size >> 2); i++) { |
3324 | chksum ^= INT_GET(*up, ARCH_CONVERT); | 3315 | chksum ^= be32_to_cpu(*up); |
3325 | up++; | 3316 | up++; |
3326 | } | 3317 | } |
3327 | INT_SET(iclog->ic_header.h_chksum, ARCH_CONVERT, chksum); | 3318 | iclog->ic_header.h_chksum = cpu_to_be32(chksum); |
3328 | } | 3319 | } |
3329 | #else | 3320 | #else |
3330 | #define xlog_pack_data_checksum(log, iclog, size) | 3321 | #define xlog_pack_data_checksum(log, iclog, size) |
@@ -3341,7 +3332,7 @@ xlog_pack_data( | |||
3341 | { | 3332 | { |
3342 | int i, j, k; | 3333 | int i, j, k; |
3343 | int size = iclog->ic_offset + roundoff; | 3334 | int size = iclog->ic_offset + roundoff; |
3344 | uint cycle_lsn; | 3335 | __be32 cycle_lsn; |
3345 | xfs_caddr_t dp; | 3336 | xfs_caddr_t dp; |
3346 | xlog_in_core_2_t *xhdr; | 3337 | xlog_in_core_2_t *xhdr; |
3347 | 3338 | ||
@@ -3352,8 +3343,8 @@ xlog_pack_data( | |||
3352 | dp = iclog->ic_datap; | 3343 | dp = iclog->ic_datap; |
3353 | for (i = 0; i < BTOBB(size) && | 3344 | for (i = 0; i < BTOBB(size) && |
3354 | i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) { | 3345 | i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) { |
3355 | iclog->ic_header.h_cycle_data[i] = *(uint *)dp; | 3346 | iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp; |
3356 | *(uint *)dp = cycle_lsn; | 3347 | *(__be32 *)dp = cycle_lsn; |
3357 | dp += BBSIZE; | 3348 | dp += BBSIZE; |
3358 | } | 3349 | } |
3359 | 3350 | ||
@@ -3362,8 +3353,8 @@ xlog_pack_data( | |||
3362 | for ( ; i < BTOBB(size); i++) { | 3353 | for ( ; i < BTOBB(size); i++) { |
3363 | j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); | 3354 | j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); |
3364 | k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); | 3355 | k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); |
3365 | xhdr[j].hic_xheader.xh_cycle_data[k] = *(uint *)dp; | 3356 | xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp; |
3366 | *(uint *)dp = cycle_lsn; | 3357 | *(__be32 *)dp = cycle_lsn; |
3367 | dp += BBSIZE; | 3358 | dp += BBSIZE; |
3368 | } | 3359 | } |
3369 | 3360 | ||
@@ -3380,21 +3371,21 @@ xlog_unpack_data_checksum( | |||
3380 | xfs_caddr_t dp, | 3371 | xfs_caddr_t dp, |
3381 | xlog_t *log) | 3372 | xlog_t *log) |
3382 | { | 3373 | { |
3383 | uint *up = (uint *)dp; | 3374 | __be32 *up = (__be32 *)dp; |
3384 | uint chksum = 0; | 3375 | uint chksum = 0; |
3385 | int i; | 3376 | int i; |
3386 | 3377 | ||
3387 | /* divide length by 4 to get # words */ | 3378 | /* divide length by 4 to get # words */ |
3388 | for (i=0; i < INT_GET(rhead->h_len, ARCH_CONVERT) >> 2; i++) { | 3379 | for (i=0; i < be32_to_cpu(rhead->h_len) >> 2; i++) { |
3389 | chksum ^= INT_GET(*up, ARCH_CONVERT); | 3380 | chksum ^= be32_to_cpu(*up); |
3390 | up++; | 3381 | up++; |
3391 | } | 3382 | } |
3392 | if (chksum != INT_GET(rhead->h_chksum, ARCH_CONVERT)) { | 3383 | if (chksum != be32_to_cpu(rhead->h_chksum)) { |
3393 | if (rhead->h_chksum || | 3384 | if (rhead->h_chksum || |
3394 | ((log->l_flags & XLOG_CHKSUM_MISMATCH) == 0)) { | 3385 | ((log->l_flags & XLOG_CHKSUM_MISMATCH) == 0)) { |
3395 | cmn_err(CE_DEBUG, | 3386 | cmn_err(CE_DEBUG, |
3396 | "XFS: LogR chksum mismatch: was (0x%x) is (0x%x)\n", | 3387 | "XFS: LogR chksum mismatch: was (0x%x) is (0x%x)\n", |
3397 | INT_GET(rhead->h_chksum, ARCH_CONVERT), chksum); | 3388 | be32_to_cpu(rhead->h_chksum), chksum); |
3398 | cmn_err(CE_DEBUG, | 3389 | cmn_err(CE_DEBUG, |
3399 | "XFS: Disregard message if filesystem was created with non-DEBUG kernel"); | 3390 | "XFS: Disregard message if filesystem was created with non-DEBUG kernel"); |
3400 | if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { | 3391 | if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { |
@@ -3418,18 +3409,18 @@ xlog_unpack_data( | |||
3418 | int i, j, k; | 3409 | int i, j, k; |
3419 | xlog_in_core_2_t *xhdr; | 3410 | xlog_in_core_2_t *xhdr; |
3420 | 3411 | ||
3421 | for (i = 0; i < BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)) && | 3412 | for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) && |
3422 | i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) { | 3413 | i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) { |
3423 | *(uint *)dp = *(uint *)&rhead->h_cycle_data[i]; | 3414 | *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i]; |
3424 | dp += BBSIZE; | 3415 | dp += BBSIZE; |
3425 | } | 3416 | } |
3426 | 3417 | ||
3427 | if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { | 3418 | if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { |
3428 | xhdr = (xlog_in_core_2_t *)rhead; | 3419 | xhdr = (xlog_in_core_2_t *)rhead; |
3429 | for ( ; i < BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)); i++) { | 3420 | for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) { |
3430 | j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); | 3421 | j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); |
3431 | k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); | 3422 | k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); |
3432 | *(uint *)dp = xhdr[j].hic_xheader.xh_cycle_data[k]; | 3423 | *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k]; |
3433 | dp += BBSIZE; | 3424 | dp += BBSIZE; |
3434 | } | 3425 | } |
3435 | } | 3426 | } |
@@ -3445,24 +3436,21 @@ xlog_valid_rec_header( | |||
3445 | { | 3436 | { |
3446 | int hlen; | 3437 | int hlen; |
3447 | 3438 | ||
3448 | if (unlikely( | 3439 | if (unlikely(be32_to_cpu(rhead->h_magicno) != XLOG_HEADER_MAGIC_NUM)) { |
3449 | (INT_GET(rhead->h_magicno, ARCH_CONVERT) != | ||
3450 | XLOG_HEADER_MAGIC_NUM))) { | ||
3451 | XFS_ERROR_REPORT("xlog_valid_rec_header(1)", | 3440 | XFS_ERROR_REPORT("xlog_valid_rec_header(1)", |
3452 | XFS_ERRLEVEL_LOW, log->l_mp); | 3441 | XFS_ERRLEVEL_LOW, log->l_mp); |
3453 | return XFS_ERROR(EFSCORRUPTED); | 3442 | return XFS_ERROR(EFSCORRUPTED); |
3454 | } | 3443 | } |
3455 | if (unlikely( | 3444 | if (unlikely( |
3456 | (!rhead->h_version || | 3445 | (!rhead->h_version || |
3457 | (INT_GET(rhead->h_version, ARCH_CONVERT) & | 3446 | (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) { |
3458 | (~XLOG_VERSION_OKBITS)) != 0))) { | ||
3459 | xlog_warn("XFS: %s: unrecognised log version (%d).", | 3447 | xlog_warn("XFS: %s: unrecognised log version (%d).", |
3460 | __FUNCTION__, INT_GET(rhead->h_version, ARCH_CONVERT)); | 3448 | __FUNCTION__, be32_to_cpu(rhead->h_version)); |
3461 | return XFS_ERROR(EIO); | 3449 | return XFS_ERROR(EIO); |
3462 | } | 3450 | } |
3463 | 3451 | ||
3464 | /* LR body must have data or it wouldn't have been written */ | 3452 | /* LR body must have data or it wouldn't have been written */ |
3465 | hlen = INT_GET(rhead->h_len, ARCH_CONVERT); | 3453 | hlen = be32_to_cpu(rhead->h_len); |
3466 | if (unlikely( hlen <= 0 || hlen > INT_MAX )) { | 3454 | if (unlikely( hlen <= 0 || hlen > INT_MAX )) { |
3467 | XFS_ERROR_REPORT("xlog_valid_rec_header(2)", | 3455 | XFS_ERROR_REPORT("xlog_valid_rec_header(2)", |
3468 | XFS_ERRLEVEL_LOW, log->l_mp); | 3456 | XFS_ERRLEVEL_LOW, log->l_mp); |
@@ -3522,9 +3510,8 @@ xlog_do_recovery_pass( | |||
3522 | error = xlog_valid_rec_header(log, rhead, tail_blk); | 3510 | error = xlog_valid_rec_header(log, rhead, tail_blk); |
3523 | if (error) | 3511 | if (error) |
3524 | goto bread_err1; | 3512 | goto bread_err1; |
3525 | h_size = INT_GET(rhead->h_size, ARCH_CONVERT); | 3513 | h_size = be32_to_cpu(rhead->h_size); |
3526 | if ((INT_GET(rhead->h_version, ARCH_CONVERT) | 3514 | if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) && |
3527 | & XLOG_VERSION_2) && | ||
3528 | (h_size > XLOG_HEADER_CYCLE_SIZE)) { | 3515 | (h_size > XLOG_HEADER_CYCLE_SIZE)) { |
3529 | hblks = h_size / XLOG_HEADER_CYCLE_SIZE; | 3516 | hblks = h_size / XLOG_HEADER_CYCLE_SIZE; |
3530 | if (h_size % XLOG_HEADER_CYCLE_SIZE) | 3517 | if (h_size % XLOG_HEADER_CYCLE_SIZE) |
@@ -3561,7 +3548,7 @@ xlog_do_recovery_pass( | |||
3561 | goto bread_err2; | 3548 | goto bread_err2; |
3562 | 3549 | ||
3563 | /* blocks in data section */ | 3550 | /* blocks in data section */ |
3564 | bblks = (int)BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)); | 3551 | bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); |
3565 | error = xlog_bread(log, blk_no + hblks, bblks, dbp); | 3552 | error = xlog_bread(log, blk_no + hblks, bblks, dbp); |
3566 | if (error) | 3553 | if (error) |
3567 | goto bread_err2; | 3554 | goto bread_err2; |
@@ -3636,7 +3623,7 @@ xlog_do_recovery_pass( | |||
3636 | if (error) | 3623 | if (error) |
3637 | goto bread_err2; | 3624 | goto bread_err2; |
3638 | 3625 | ||
3639 | bblks = (int)BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)); | 3626 | bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); |
3640 | blk_no += hblks; | 3627 | blk_no += hblks; |
3641 | 3628 | ||
3642 | /* Read in data for log record */ | 3629 | /* Read in data for log record */ |
@@ -3707,7 +3694,7 @@ xlog_do_recovery_pass( | |||
3707 | error = xlog_valid_rec_header(log, rhead, blk_no); | 3694 | error = xlog_valid_rec_header(log, rhead, blk_no); |
3708 | if (error) | 3695 | if (error) |
3709 | goto bread_err2; | 3696 | goto bread_err2; |
3710 | bblks = (int)BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)); | 3697 | bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); |
3711 | if ((error = xlog_bread(log, blk_no+hblks, bblks, dbp))) | 3698 | if ((error = xlog_bread(log, blk_no+hblks, bblks, dbp))) |
3712 | goto bread_err2; | 3699 | goto bread_err2; |
3713 | offset = xlog_align(log, blk_no+hblks, bblks, dbp); | 3700 | offset = xlog_align(log, blk_no+hblks, bblks, dbp); |
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index ebdb76da527c..6409b3762995 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c | |||
@@ -136,15 +136,9 @@ xfs_mount_init(void) | |||
136 | mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB; | 136 | mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB; |
137 | } | 137 | } |
138 | 138 | ||
139 | AIL_LOCKINIT(&mp->m_ail_lock, "xfs_ail"); | 139 | spin_lock_init(&mp->m_sb_lock); |
140 | spinlock_init(&mp->m_sb_lock, "xfs_sb"); | ||
141 | mutex_init(&mp->m_ilock); | 140 | mutex_init(&mp->m_ilock); |
142 | mutex_init(&mp->m_growlock); | 141 | mutex_init(&mp->m_growlock); |
143 | /* | ||
144 | * Initialize the AIL. | ||
145 | */ | ||
146 | xfs_trans_ail_init(mp); | ||
147 | |||
148 | atomic_set(&mp->m_active_trans, 0); | 142 | atomic_set(&mp->m_active_trans, 0); |
149 | 143 | ||
150 | return mp; | 144 | return mp; |
@@ -171,7 +165,7 @@ xfs_mount_free( | |||
171 | sizeof(xfs_perag_t) * mp->m_sb.sb_agcount); | 165 | sizeof(xfs_perag_t) * mp->m_sb.sb_agcount); |
172 | } | 166 | } |
173 | 167 | ||
174 | AIL_LOCK_DESTROY(&mp->m_ail_lock); | 168 | spinlock_destroy(&mp->m_ail_lock); |
175 | spinlock_destroy(&mp->m_sb_lock); | 169 | spinlock_destroy(&mp->m_sb_lock); |
176 | mutex_destroy(&mp->m_ilock); | 170 | mutex_destroy(&mp->m_ilock); |
177 | mutex_destroy(&mp->m_growlock); | 171 | mutex_destroy(&mp->m_growlock); |
@@ -616,7 +610,7 @@ xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp) | |||
616 | int i; | 610 | int i; |
617 | 611 | ||
618 | mp->m_agfrotor = mp->m_agirotor = 0; | 612 | mp->m_agfrotor = mp->m_agirotor = 0; |
619 | spinlock_init(&mp->m_agirotor_lock, "m_agirotor_lock"); | 613 | spin_lock_init(&mp->m_agirotor_lock); |
620 | mp->m_maxagi = mp->m_sb.sb_agcount; | 614 | mp->m_maxagi = mp->m_sb.sb_agcount; |
621 | mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG; | 615 | mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG; |
622 | mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT; | 616 | mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT; |
@@ -696,7 +690,6 @@ xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount) | |||
696 | uint64_t bfreelst = 0; | 690 | uint64_t bfreelst = 0; |
697 | uint64_t btree = 0; | 691 | uint64_t btree = 0; |
698 | int error; | 692 | int error; |
699 | int s; | ||
700 | 693 | ||
701 | for (index = 0; index < agcount; index++) { | 694 | for (index = 0; index < agcount; index++) { |
702 | /* | 695 | /* |
@@ -721,11 +714,11 @@ xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount) | |||
721 | /* | 714 | /* |
722 | * Overwrite incore superblock counters with just-read data | 715 | * Overwrite incore superblock counters with just-read data |
723 | */ | 716 | */ |
724 | s = XFS_SB_LOCK(mp); | 717 | spin_lock(&mp->m_sb_lock); |
725 | sbp->sb_ifree = ifree; | 718 | sbp->sb_ifree = ifree; |
726 | sbp->sb_icount = ialloc; | 719 | sbp->sb_icount = ialloc; |
727 | sbp->sb_fdblocks = bfree + bfreelst + btree; | 720 | sbp->sb_fdblocks = bfree + bfreelst + btree; |
728 | XFS_SB_UNLOCK(mp, s); | 721 | spin_unlock(&mp->m_sb_lock); |
729 | 722 | ||
730 | /* Fixup the per-cpu counters as well. */ | 723 | /* Fixup the per-cpu counters as well. */ |
731 | xfs_icsb_reinit_counters(mp); | 724 | xfs_icsb_reinit_counters(mp); |
@@ -734,49 +727,13 @@ xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount) | |||
734 | } | 727 | } |
735 | 728 | ||
736 | /* | 729 | /* |
737 | * xfs_mountfs | 730 | * Update alignment values based on mount options and sb values |
738 | * | ||
739 | * This function does the following on an initial mount of a file system: | ||
740 | * - reads the superblock from disk and init the mount struct | ||
741 | * - if we're a 32-bit kernel, do a size check on the superblock | ||
742 | * so we don't mount terabyte filesystems | ||
743 | * - init mount struct realtime fields | ||
744 | * - allocate inode hash table for fs | ||
745 | * - init directory manager | ||
746 | * - perform recovery and init the log manager | ||
747 | */ | 731 | */ |
748 | int | 732 | STATIC int |
749 | xfs_mountfs( | 733 | xfs_update_alignment(xfs_mount_t *mp, int mfsi_flags, __uint64_t *update_flags) |
750 | xfs_mount_t *mp, | ||
751 | int mfsi_flags) | ||
752 | { | 734 | { |
753 | xfs_buf_t *bp; | ||
754 | xfs_sb_t *sbp = &(mp->m_sb); | 735 | xfs_sb_t *sbp = &(mp->m_sb); |
755 | xfs_inode_t *rip; | ||
756 | bhv_vnode_t *rvp = NULL; | ||
757 | int readio_log, writeio_log; | ||
758 | xfs_daddr_t d; | ||
759 | __uint64_t resblks; | ||
760 | __int64_t update_flags; | ||
761 | uint quotamount, quotaflags; | ||
762 | int agno; | ||
763 | int uuid_mounted = 0; | ||
764 | int error = 0; | ||
765 | 736 | ||
766 | if (mp->m_sb_bp == NULL) { | ||
767 | if ((error = xfs_readsb(mp, mfsi_flags))) { | ||
768 | return error; | ||
769 | } | ||
770 | } | ||
771 | xfs_mount_common(mp, sbp); | ||
772 | |||
773 | /* | ||
774 | * Check if sb_agblocks is aligned at stripe boundary | ||
775 | * If sb_agblocks is NOT aligned turn off m_dalign since | ||
776 | * allocator alignment is within an ag, therefore ag has | ||
777 | * to be aligned at stripe boundary. | ||
778 | */ | ||
779 | update_flags = 0LL; | ||
780 | if (mp->m_dalign && !(mfsi_flags & XFS_MFSI_SECOND)) { | 737 | if (mp->m_dalign && !(mfsi_flags & XFS_MFSI_SECOND)) { |
781 | /* | 738 | /* |
782 | * If stripe unit and stripe width are not multiples | 739 | * If stripe unit and stripe width are not multiples |
@@ -787,8 +744,7 @@ xfs_mountfs( | |||
787 | if (mp->m_flags & XFS_MOUNT_RETERR) { | 744 | if (mp->m_flags & XFS_MOUNT_RETERR) { |
788 | cmn_err(CE_WARN, | 745 | cmn_err(CE_WARN, |
789 | "XFS: alignment check 1 failed"); | 746 | "XFS: alignment check 1 failed"); |
790 | error = XFS_ERROR(EINVAL); | 747 | return XFS_ERROR(EINVAL); |
791 | goto error1; | ||
792 | } | 748 | } |
793 | mp->m_dalign = mp->m_swidth = 0; | 749 | mp->m_dalign = mp->m_swidth = 0; |
794 | } else { | 750 | } else { |
@@ -798,8 +754,7 @@ xfs_mountfs( | |||
798 | mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign); | 754 | mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign); |
799 | if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) { | 755 | if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) { |
800 | if (mp->m_flags & XFS_MOUNT_RETERR) { | 756 | if (mp->m_flags & XFS_MOUNT_RETERR) { |
801 | error = XFS_ERROR(EINVAL); | 757 | return XFS_ERROR(EINVAL); |
802 | goto error1; | ||
803 | } | 758 | } |
804 | xfs_fs_cmn_err(CE_WARN, mp, | 759 | xfs_fs_cmn_err(CE_WARN, mp, |
805 | "stripe alignment turned off: sunit(%d)/swidth(%d) incompatible with agsize(%d)", | 760 | "stripe alignment turned off: sunit(%d)/swidth(%d) incompatible with agsize(%d)", |
@@ -816,8 +771,7 @@ xfs_mountfs( | |||
816 | "stripe alignment turned off: sunit(%d) less than bsize(%d)", | 771 | "stripe alignment turned off: sunit(%d) less than bsize(%d)", |
817 | mp->m_dalign, | 772 | mp->m_dalign, |
818 | mp->m_blockmask +1); | 773 | mp->m_blockmask +1); |
819 | error = XFS_ERROR(EINVAL); | 774 | return XFS_ERROR(EINVAL); |
820 | goto error1; | ||
821 | } | 775 | } |
822 | mp->m_swidth = 0; | 776 | mp->m_swidth = 0; |
823 | } | 777 | } |
@@ -830,11 +784,11 @@ xfs_mountfs( | |||
830 | if (XFS_SB_VERSION_HASDALIGN(sbp)) { | 784 | if (XFS_SB_VERSION_HASDALIGN(sbp)) { |
831 | if (sbp->sb_unit != mp->m_dalign) { | 785 | if (sbp->sb_unit != mp->m_dalign) { |
832 | sbp->sb_unit = mp->m_dalign; | 786 | sbp->sb_unit = mp->m_dalign; |
833 | update_flags |= XFS_SB_UNIT; | 787 | *update_flags |= XFS_SB_UNIT; |
834 | } | 788 | } |
835 | if (sbp->sb_width != mp->m_swidth) { | 789 | if (sbp->sb_width != mp->m_swidth) { |
836 | sbp->sb_width = mp->m_swidth; | 790 | sbp->sb_width = mp->m_swidth; |
837 | update_flags |= XFS_SB_WIDTH; | 791 | *update_flags |= XFS_SB_WIDTH; |
838 | } | 792 | } |
839 | } | 793 | } |
840 | } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN && | 794 | } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN && |
@@ -843,49 +797,45 @@ xfs_mountfs( | |||
843 | mp->m_swidth = sbp->sb_width; | 797 | mp->m_swidth = sbp->sb_width; |
844 | } | 798 | } |
845 | 799 | ||
846 | xfs_alloc_compute_maxlevels(mp); | 800 | return 0; |
847 | xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK); | 801 | } |
848 | xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK); | ||
849 | xfs_ialloc_compute_maxlevels(mp); | ||
850 | 802 | ||
851 | if (sbp->sb_imax_pct) { | 803 | /* |
852 | __uint64_t icount; | 804 | * Set the maximum inode count for this filesystem |
805 | */ | ||
806 | STATIC void | ||
807 | xfs_set_maxicount(xfs_mount_t *mp) | ||
808 | { | ||
809 | xfs_sb_t *sbp = &(mp->m_sb); | ||
810 | __uint64_t icount; | ||
853 | 811 | ||
854 | /* Make sure the maximum inode count is a multiple of the | 812 | if (sbp->sb_imax_pct) { |
855 | * units we allocate inodes in. | 813 | /* |
814 | * Make sure the maximum inode count is a multiple | ||
815 | * of the units we allocate inodes in. | ||
856 | */ | 816 | */ |
857 | |||
858 | icount = sbp->sb_dblocks * sbp->sb_imax_pct; | 817 | icount = sbp->sb_dblocks * sbp->sb_imax_pct; |
859 | do_div(icount, 100); | 818 | do_div(icount, 100); |
860 | do_div(icount, mp->m_ialloc_blks); | 819 | do_div(icount, mp->m_ialloc_blks); |
861 | mp->m_maxicount = (icount * mp->m_ialloc_blks) << | 820 | mp->m_maxicount = (icount * mp->m_ialloc_blks) << |
862 | sbp->sb_inopblog; | 821 | sbp->sb_inopblog; |
863 | } else | 822 | } else { |
864 | mp->m_maxicount = 0; | 823 | mp->m_maxicount = 0; |
865 | |||
866 | mp->m_maxioffset = xfs_max_file_offset(sbp->sb_blocklog); | ||
867 | |||
868 | /* | ||
869 | * XFS uses the uuid from the superblock as the unique | ||
870 | * identifier for fsid. We can not use the uuid from the volume | ||
871 | * since a single partition filesystem is identical to a single | ||
872 | * partition volume/filesystem. | ||
873 | */ | ||
874 | if ((mfsi_flags & XFS_MFSI_SECOND) == 0 && | ||
875 | (mp->m_flags & XFS_MOUNT_NOUUID) == 0) { | ||
876 | if (xfs_uuid_mount(mp)) { | ||
877 | error = XFS_ERROR(EINVAL); | ||
878 | goto error1; | ||
879 | } | ||
880 | uuid_mounted=1; | ||
881 | } | 824 | } |
825 | } | ||
826 | |||
827 | /* | ||
828 | * Set the default minimum read and write sizes unless | ||
829 | * already specified in a mount option. | ||
830 | * We use smaller I/O sizes when the file system | ||
831 | * is being used for NFS service (wsync mount option). | ||
832 | */ | ||
833 | STATIC void | ||
834 | xfs_set_rw_sizes(xfs_mount_t *mp) | ||
835 | { | ||
836 | xfs_sb_t *sbp = &(mp->m_sb); | ||
837 | int readio_log, writeio_log; | ||
882 | 838 | ||
883 | /* | ||
884 | * Set the default minimum read and write sizes unless | ||
885 | * already specified in a mount option. | ||
886 | * We use smaller I/O sizes when the file system | ||
887 | * is being used for NFS service (wsync mount option). | ||
888 | */ | ||
889 | if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) { | 839 | if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) { |
890 | if (mp->m_flags & XFS_MOUNT_WSYNC) { | 840 | if (mp->m_flags & XFS_MOUNT_WSYNC) { |
891 | readio_log = XFS_WSYNC_READIO_LOG; | 841 | readio_log = XFS_WSYNC_READIO_LOG; |
@@ -911,17 +861,14 @@ xfs_mountfs( | |||
911 | mp->m_writeio_log = writeio_log; | 861 | mp->m_writeio_log = writeio_log; |
912 | } | 862 | } |
913 | mp->m_writeio_blocks = 1 << (mp->m_writeio_log - sbp->sb_blocklog); | 863 | mp->m_writeio_blocks = 1 << (mp->m_writeio_log - sbp->sb_blocklog); |
864 | } | ||
914 | 865 | ||
915 | /* | 866 | /* |
916 | * Set the inode cluster size. | 867 | * Set whether we're using inode alignment. |
917 | * This may still be overridden by the file system | 868 | */ |
918 | * block size if it is larger than the chosen cluster size. | 869 | STATIC void |
919 | */ | 870 | xfs_set_inoalignment(xfs_mount_t *mp) |
920 | mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE; | 871 | { |
921 | |||
922 | /* | ||
923 | * Set whether we're using inode alignment. | ||
924 | */ | ||
925 | if (XFS_SB_VERSION_HASALIGN(&mp->m_sb) && | 872 | if (XFS_SB_VERSION_HASALIGN(&mp->m_sb) && |
926 | mp->m_sb.sb_inoalignmt >= | 873 | mp->m_sb.sb_inoalignmt >= |
927 | XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size)) | 874 | XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size)) |
@@ -937,14 +884,22 @@ xfs_mountfs( | |||
937 | mp->m_sinoalign = mp->m_dalign; | 884 | mp->m_sinoalign = mp->m_dalign; |
938 | else | 885 | else |
939 | mp->m_sinoalign = 0; | 886 | mp->m_sinoalign = 0; |
940 | /* | 887 | } |
941 | * Check that the data (and log if separate) are an ok size. | 888 | |
942 | */ | 889 | /* |
890 | * Check that the data (and log if separate) are an ok size. | ||
891 | */ | ||
892 | STATIC int | ||
893 | xfs_check_sizes(xfs_mount_t *mp, int mfsi_flags) | ||
894 | { | ||
895 | xfs_buf_t *bp; | ||
896 | xfs_daddr_t d; | ||
897 | int error; | ||
898 | |||
943 | d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks); | 899 | d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks); |
944 | if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) { | 900 | if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) { |
945 | cmn_err(CE_WARN, "XFS: size check 1 failed"); | 901 | cmn_err(CE_WARN, "XFS: size check 1 failed"); |
946 | error = XFS_ERROR(E2BIG); | 902 | return XFS_ERROR(E2BIG); |
947 | goto error1; | ||
948 | } | 903 | } |
949 | error = xfs_read_buf(mp, mp->m_ddev_targp, | 904 | error = xfs_read_buf(mp, mp->m_ddev_targp, |
950 | d - XFS_FSS_TO_BB(mp, 1), | 905 | d - XFS_FSS_TO_BB(mp, 1), |
@@ -953,10 +908,9 @@ xfs_mountfs( | |||
953 | xfs_buf_relse(bp); | 908 | xfs_buf_relse(bp); |
954 | } else { | 909 | } else { |
955 | cmn_err(CE_WARN, "XFS: size check 2 failed"); | 910 | cmn_err(CE_WARN, "XFS: size check 2 failed"); |
956 | if (error == ENOSPC) { | 911 | if (error == ENOSPC) |
957 | error = XFS_ERROR(E2BIG); | 912 | error = XFS_ERROR(E2BIG); |
958 | } | 913 | return error; |
959 | goto error1; | ||
960 | } | 914 | } |
961 | 915 | ||
962 | if (((mfsi_flags & XFS_MFSI_CLIENT) == 0) && | 916 | if (((mfsi_flags & XFS_MFSI_CLIENT) == 0) && |
@@ -964,8 +918,7 @@ xfs_mountfs( | |||
964 | d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks); | 918 | d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks); |
965 | if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) { | 919 | if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) { |
966 | cmn_err(CE_WARN, "XFS: size check 3 failed"); | 920 | cmn_err(CE_WARN, "XFS: size check 3 failed"); |
967 | error = XFS_ERROR(E2BIG); | 921 | return XFS_ERROR(E2BIG); |
968 | goto error1; | ||
969 | } | 922 | } |
970 | error = xfs_read_buf(mp, mp->m_logdev_targp, | 923 | error = xfs_read_buf(mp, mp->m_logdev_targp, |
971 | d - XFS_FSB_TO_BB(mp, 1), | 924 | d - XFS_FSB_TO_BB(mp, 1), |
@@ -974,17 +927,111 @@ xfs_mountfs( | |||
974 | xfs_buf_relse(bp); | 927 | xfs_buf_relse(bp); |
975 | } else { | 928 | } else { |
976 | cmn_err(CE_WARN, "XFS: size check 3 failed"); | 929 | cmn_err(CE_WARN, "XFS: size check 3 failed"); |
977 | if (error == ENOSPC) { | 930 | if (error == ENOSPC) |
978 | error = XFS_ERROR(E2BIG); | 931 | error = XFS_ERROR(E2BIG); |
979 | } | 932 | return error; |
933 | } | ||
934 | } | ||
935 | return 0; | ||
936 | } | ||
937 | |||
938 | /* | ||
939 | * xfs_mountfs | ||
940 | * | ||
941 | * This function does the following on an initial mount of a file system: | ||
942 | * - reads the superblock from disk and init the mount struct | ||
943 | * - if we're a 32-bit kernel, do a size check on the superblock | ||
944 | * so we don't mount terabyte filesystems | ||
945 | * - init mount struct realtime fields | ||
946 | * - allocate inode hash table for fs | ||
947 | * - init directory manager | ||
948 | * - perform recovery and init the log manager | ||
949 | */ | ||
950 | int | ||
951 | xfs_mountfs( | ||
952 | xfs_mount_t *mp, | ||
953 | int mfsi_flags) | ||
954 | { | ||
955 | xfs_sb_t *sbp = &(mp->m_sb); | ||
956 | xfs_inode_t *rip; | ||
957 | bhv_vnode_t *rvp = NULL; | ||
958 | __uint64_t resblks; | ||
959 | __int64_t update_flags = 0LL; | ||
960 | uint quotamount, quotaflags; | ||
961 | int agno; | ||
962 | int uuid_mounted = 0; | ||
963 | int error = 0; | ||
964 | |||
965 | if (mp->m_sb_bp == NULL) { | ||
966 | error = xfs_readsb(mp, mfsi_flags); | ||
967 | if (error) | ||
968 | return error; | ||
969 | } | ||
970 | xfs_mount_common(mp, sbp); | ||
971 | |||
972 | /* | ||
973 | * Check if sb_agblocks is aligned at stripe boundary | ||
974 | * If sb_agblocks is NOT aligned turn off m_dalign since | ||
975 | * allocator alignment is within an ag, therefore ag has | ||
976 | * to be aligned at stripe boundary. | ||
977 | */ | ||
978 | error = xfs_update_alignment(mp, mfsi_flags, &update_flags); | ||
979 | if (error) | ||
980 | goto error1; | ||
981 | |||
982 | xfs_alloc_compute_maxlevels(mp); | ||
983 | xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK); | ||
984 | xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK); | ||
985 | xfs_ialloc_compute_maxlevels(mp); | ||
986 | |||
987 | xfs_set_maxicount(mp); | ||
988 | |||
989 | mp->m_maxioffset = xfs_max_file_offset(sbp->sb_blocklog); | ||
990 | |||
991 | /* | ||
992 | * XFS uses the uuid from the superblock as the unique | ||
993 | * identifier for fsid. We can not use the uuid from the volume | ||
994 | * since a single partition filesystem is identical to a single | ||
995 | * partition volume/filesystem. | ||
996 | */ | ||
997 | if ((mfsi_flags & XFS_MFSI_SECOND) == 0 && | ||
998 | (mp->m_flags & XFS_MOUNT_NOUUID) == 0) { | ||
999 | if (xfs_uuid_mount(mp)) { | ||
1000 | error = XFS_ERROR(EINVAL); | ||
980 | goto error1; | 1001 | goto error1; |
981 | } | 1002 | } |
1003 | uuid_mounted=1; | ||
982 | } | 1004 | } |
983 | 1005 | ||
984 | /* | 1006 | /* |
1007 | * Set the minimum read and write sizes | ||
1008 | */ | ||
1009 | xfs_set_rw_sizes(mp); | ||
1010 | |||
1011 | /* | ||
1012 | * Set the inode cluster size. | ||
1013 | * This may still be overridden by the file system | ||
1014 | * block size if it is larger than the chosen cluster size. | ||
1015 | */ | ||
1016 | mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE; | ||
1017 | |||
1018 | /* | ||
1019 | * Set inode alignment fields | ||
1020 | */ | ||
1021 | xfs_set_inoalignment(mp); | ||
1022 | |||
1023 | /* | ||
1024 | * Check that the data (and log if separate) are an ok size. | ||
1025 | */ | ||
1026 | error = xfs_check_sizes(mp, mfsi_flags); | ||
1027 | if (error) | ||
1028 | goto error1; | ||
1029 | |||
1030 | /* | ||
985 | * Initialize realtime fields in the mount structure | 1031 | * Initialize realtime fields in the mount structure |
986 | */ | 1032 | */ |
987 | if ((error = xfs_rtmount_init(mp))) { | 1033 | error = xfs_rtmount_init(mp); |
1034 | if (error) { | ||
988 | cmn_err(CE_WARN, "XFS: RT mount failed"); | 1035 | cmn_err(CE_WARN, "XFS: RT mount failed"); |
989 | goto error1; | 1036 | goto error1; |
990 | } | 1037 | } |
@@ -1102,7 +1149,8 @@ xfs_mountfs( | |||
1102 | /* | 1149 | /* |
1103 | * Initialize realtime inode pointers in the mount structure | 1150 | * Initialize realtime inode pointers in the mount structure |
1104 | */ | 1151 | */ |
1105 | if ((error = xfs_rtmount_inodes(mp))) { | 1152 | error = xfs_rtmount_inodes(mp); |
1153 | if (error) { | ||
1106 | /* | 1154 | /* |
1107 | * Free up the root inode. | 1155 | * Free up the root inode. |
1108 | */ | 1156 | */ |
@@ -1120,7 +1168,8 @@ xfs_mountfs( | |||
1120 | /* | 1168 | /* |
1121 | * Initialise the XFS quota management subsystem for this mount | 1169 | * Initialise the XFS quota management subsystem for this mount |
1122 | */ | 1170 | */ |
1123 | if ((error = XFS_QM_INIT(mp, "amount, "aflags))) | 1171 | error = XFS_QM_INIT(mp, "amount, "aflags); |
1172 | if (error) | ||
1124 | goto error4; | 1173 | goto error4; |
1125 | 1174 | ||
1126 | /* | 1175 | /* |
@@ -1137,7 +1186,8 @@ xfs_mountfs( | |||
1137 | /* | 1186 | /* |
1138 | * Complete the quota initialisation, post-log-replay component. | 1187 | * Complete the quota initialisation, post-log-replay component. |
1139 | */ | 1188 | */ |
1140 | if ((error = XFS_QM_MOUNT(mp, quotamount, quotaflags, mfsi_flags))) | 1189 | error = XFS_QM_MOUNT(mp, quotamount, quotaflags, mfsi_flags); |
1190 | if (error) | ||
1141 | goto error4; | 1191 | goto error4; |
1142 | 1192 | ||
1143 | /* | 1193 | /* |
@@ -1255,7 +1305,6 @@ xfs_unmountfs(xfs_mount_t *mp, struct cred *cr) | |||
1255 | #if defined(DEBUG) || defined(INDUCE_IO_ERROR) | 1305 | #if defined(DEBUG) || defined(INDUCE_IO_ERROR) |
1256 | xfs_errortag_clearall(mp, 0); | 1306 | xfs_errortag_clearall(mp, 0); |
1257 | #endif | 1307 | #endif |
1258 | XFS_IODONE(mp); | ||
1259 | xfs_mount_free(mp); | 1308 | xfs_mount_free(mp); |
1260 | return 0; | 1309 | return 0; |
1261 | } | 1310 | } |
@@ -1441,7 +1490,7 @@ xfs_mod_sb(xfs_trans_t *tp, __int64_t fields) | |||
1441 | * Fields are not allowed to dip below zero, so if the delta would | 1490 | * Fields are not allowed to dip below zero, so if the delta would |
1442 | * do this do not apply it and return EINVAL. | 1491 | * do this do not apply it and return EINVAL. |
1443 | * | 1492 | * |
1444 | * The SB_LOCK must be held when this routine is called. | 1493 | * The m_sb_lock must be held when this routine is called. |
1445 | */ | 1494 | */ |
1446 | int | 1495 | int |
1447 | xfs_mod_incore_sb_unlocked( | 1496 | xfs_mod_incore_sb_unlocked( |
@@ -1606,7 +1655,7 @@ xfs_mod_incore_sb_unlocked( | |||
1606 | /* | 1655 | /* |
1607 | * xfs_mod_incore_sb() is used to change a field in the in-core | 1656 | * xfs_mod_incore_sb() is used to change a field in the in-core |
1608 | * superblock structure by the specified delta. This modification | 1657 | * superblock structure by the specified delta. This modification |
1609 | * is protected by the SB_LOCK. Just use the xfs_mod_incore_sb_unlocked() | 1658 | * is protected by the m_sb_lock. Just use the xfs_mod_incore_sb_unlocked() |
1610 | * routine to do the work. | 1659 | * routine to do the work. |
1611 | */ | 1660 | */ |
1612 | int | 1661 | int |
@@ -1616,7 +1665,6 @@ xfs_mod_incore_sb( | |||
1616 | int64_t delta, | 1665 | int64_t delta, |
1617 | int rsvd) | 1666 | int rsvd) |
1618 | { | 1667 | { |
1619 | unsigned long s; | ||
1620 | int status; | 1668 | int status; |
1621 | 1669 | ||
1622 | /* check for per-cpu counters */ | 1670 | /* check for per-cpu counters */ |
@@ -1633,9 +1681,9 @@ xfs_mod_incore_sb( | |||
1633 | /* FALLTHROUGH */ | 1681 | /* FALLTHROUGH */ |
1634 | #endif | 1682 | #endif |
1635 | default: | 1683 | default: |
1636 | s = XFS_SB_LOCK(mp); | 1684 | spin_lock(&mp->m_sb_lock); |
1637 | status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); | 1685 | status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); |
1638 | XFS_SB_UNLOCK(mp, s); | 1686 | spin_unlock(&mp->m_sb_lock); |
1639 | break; | 1687 | break; |
1640 | } | 1688 | } |
1641 | 1689 | ||
@@ -1656,7 +1704,6 @@ xfs_mod_incore_sb( | |||
1656 | int | 1704 | int |
1657 | xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) | 1705 | xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) |
1658 | { | 1706 | { |
1659 | unsigned long s; | ||
1660 | int status=0; | 1707 | int status=0; |
1661 | xfs_mod_sb_t *msbp; | 1708 | xfs_mod_sb_t *msbp; |
1662 | 1709 | ||
@@ -1664,10 +1711,10 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) | |||
1664 | * Loop through the array of mod structures and apply each | 1711 | * Loop through the array of mod structures and apply each |
1665 | * individually. If any fail, then back out all those | 1712 | * individually. If any fail, then back out all those |
1666 | * which have already been applied. Do all of this within | 1713 | * which have already been applied. Do all of this within |
1667 | * the scope of the SB_LOCK so that all of the changes will | 1714 | * the scope of the m_sb_lock so that all of the changes will |
1668 | * be atomic. | 1715 | * be atomic. |
1669 | */ | 1716 | */ |
1670 | s = XFS_SB_LOCK(mp); | 1717 | spin_lock(&mp->m_sb_lock); |
1671 | msbp = &msb[0]; | 1718 | msbp = &msb[0]; |
1672 | for (msbp = &msbp[0]; msbp < (msb + nmsb); msbp++) { | 1719 | for (msbp = &msbp[0]; msbp < (msb + nmsb); msbp++) { |
1673 | /* | 1720 | /* |
@@ -1681,11 +1728,11 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) | |||
1681 | case XFS_SBS_IFREE: | 1728 | case XFS_SBS_IFREE: |
1682 | case XFS_SBS_FDBLOCKS: | 1729 | case XFS_SBS_FDBLOCKS: |
1683 | if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { | 1730 | if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { |
1684 | XFS_SB_UNLOCK(mp, s); | 1731 | spin_unlock(&mp->m_sb_lock); |
1685 | status = xfs_icsb_modify_counters(mp, | 1732 | status = xfs_icsb_modify_counters(mp, |
1686 | msbp->msb_field, | 1733 | msbp->msb_field, |
1687 | msbp->msb_delta, rsvd); | 1734 | msbp->msb_delta, rsvd); |
1688 | s = XFS_SB_LOCK(mp); | 1735 | spin_lock(&mp->m_sb_lock); |
1689 | break; | 1736 | break; |
1690 | } | 1737 | } |
1691 | /* FALLTHROUGH */ | 1738 | /* FALLTHROUGH */ |
@@ -1719,12 +1766,12 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) | |||
1719 | case XFS_SBS_IFREE: | 1766 | case XFS_SBS_IFREE: |
1720 | case XFS_SBS_FDBLOCKS: | 1767 | case XFS_SBS_FDBLOCKS: |
1721 | if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { | 1768 | if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { |
1722 | XFS_SB_UNLOCK(mp, s); | 1769 | spin_unlock(&mp->m_sb_lock); |
1723 | status = xfs_icsb_modify_counters(mp, | 1770 | status = xfs_icsb_modify_counters(mp, |
1724 | msbp->msb_field, | 1771 | msbp->msb_field, |
1725 | -(msbp->msb_delta), | 1772 | -(msbp->msb_delta), |
1726 | rsvd); | 1773 | rsvd); |
1727 | s = XFS_SB_LOCK(mp); | 1774 | spin_lock(&mp->m_sb_lock); |
1728 | break; | 1775 | break; |
1729 | } | 1776 | } |
1730 | /* FALLTHROUGH */ | 1777 | /* FALLTHROUGH */ |
@@ -1740,7 +1787,7 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) | |||
1740 | msbp--; | 1787 | msbp--; |
1741 | } | 1788 | } |
1742 | } | 1789 | } |
1743 | XFS_SB_UNLOCK(mp, s); | 1790 | spin_unlock(&mp->m_sb_lock); |
1744 | return status; | 1791 | return status; |
1745 | } | 1792 | } |
1746 | 1793 | ||
@@ -1888,12 +1935,12 @@ xfs_mount_log_sbunit( | |||
1888 | * | 1935 | * |
1889 | * Locking rules: | 1936 | * Locking rules: |
1890 | * | 1937 | * |
1891 | * 1. XFS_SB_LOCK() before picking up per-cpu locks | 1938 | * 1. m_sb_lock before picking up per-cpu locks |
1892 | * 2. per-cpu locks always picked up via for_each_online_cpu() order | 1939 | * 2. per-cpu locks always picked up via for_each_online_cpu() order |
1893 | * 3. accurate counter sync requires XFS_SB_LOCK + per cpu locks | 1940 | * 3. accurate counter sync requires m_sb_lock + per cpu locks |
1894 | * 4. modifying per-cpu counters requires holding per-cpu lock | 1941 | * 4. modifying per-cpu counters requires holding per-cpu lock |
1895 | * 5. modifying global counters requires holding XFS_SB_LOCK | 1942 | * 5. modifying global counters requires holding m_sb_lock |
1896 | * 6. enabling or disabling a counter requires holding the XFS_SB_LOCK | 1943 | * 6. enabling or disabling a counter requires holding the m_sb_lock |
1897 | * and _none_ of the per-cpu locks. | 1944 | * and _none_ of the per-cpu locks. |
1898 | * | 1945 | * |
1899 | * Disabled counters are only ever re-enabled by a balance operation | 1946 | * Disabled counters are only ever re-enabled by a balance operation |
@@ -1920,7 +1967,6 @@ xfs_icsb_cpu_notify( | |||
1920 | { | 1967 | { |
1921 | xfs_icsb_cnts_t *cntp; | 1968 | xfs_icsb_cnts_t *cntp; |
1922 | xfs_mount_t *mp; | 1969 | xfs_mount_t *mp; |
1923 | int s; | ||
1924 | 1970 | ||
1925 | mp = (xfs_mount_t *)container_of(nfb, xfs_mount_t, m_icsb_notifier); | 1971 | mp = (xfs_mount_t *)container_of(nfb, xfs_mount_t, m_icsb_notifier); |
1926 | cntp = (xfs_icsb_cnts_t *) | 1972 | cntp = (xfs_icsb_cnts_t *) |
@@ -1946,7 +1992,7 @@ xfs_icsb_cpu_notify( | |||
1946 | * count into the total on the global superblock and | 1992 | * count into the total on the global superblock and |
1947 | * re-enable the counters. */ | 1993 | * re-enable the counters. */ |
1948 | xfs_icsb_lock(mp); | 1994 | xfs_icsb_lock(mp); |
1949 | s = XFS_SB_LOCK(mp); | 1995 | spin_lock(&mp->m_sb_lock); |
1950 | xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT); | 1996 | xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT); |
1951 | xfs_icsb_disable_counter(mp, XFS_SBS_IFREE); | 1997 | xfs_icsb_disable_counter(mp, XFS_SBS_IFREE); |
1952 | xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS); | 1998 | xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS); |
@@ -1963,7 +2009,7 @@ xfs_icsb_cpu_notify( | |||
1963 | XFS_ICSB_SB_LOCKED, 0); | 2009 | XFS_ICSB_SB_LOCKED, 0); |
1964 | xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, | 2010 | xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, |
1965 | XFS_ICSB_SB_LOCKED, 0); | 2011 | XFS_ICSB_SB_LOCKED, 0); |
1966 | XFS_SB_UNLOCK(mp, s); | 2012 | spin_unlock(&mp->m_sb_lock); |
1967 | xfs_icsb_unlock(mp); | 2013 | xfs_icsb_unlock(mp); |
1968 | break; | 2014 | break; |
1969 | } | 2015 | } |
@@ -2194,11 +2240,10 @@ xfs_icsb_sync_counters_flags( | |||
2194 | int flags) | 2240 | int flags) |
2195 | { | 2241 | { |
2196 | xfs_icsb_cnts_t cnt; | 2242 | xfs_icsb_cnts_t cnt; |
2197 | int s; | ||
2198 | 2243 | ||
2199 | /* Pass 1: lock all counters */ | 2244 | /* Pass 1: lock all counters */ |
2200 | if ((flags & XFS_ICSB_SB_LOCKED) == 0) | 2245 | if ((flags & XFS_ICSB_SB_LOCKED) == 0) |
2201 | s = XFS_SB_LOCK(mp); | 2246 | spin_lock(&mp->m_sb_lock); |
2202 | 2247 | ||
2203 | xfs_icsb_count(mp, &cnt, flags); | 2248 | xfs_icsb_count(mp, &cnt, flags); |
2204 | 2249 | ||
@@ -2211,7 +2256,7 @@ xfs_icsb_sync_counters_flags( | |||
2211 | mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks; | 2256 | mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks; |
2212 | 2257 | ||
2213 | if ((flags & XFS_ICSB_SB_LOCKED) == 0) | 2258 | if ((flags & XFS_ICSB_SB_LOCKED) == 0) |
2214 | XFS_SB_UNLOCK(mp, s); | 2259 | spin_unlock(&mp->m_sb_lock); |
2215 | } | 2260 | } |
2216 | 2261 | ||
2217 | /* | 2262 | /* |
@@ -2252,11 +2297,10 @@ xfs_icsb_balance_counter( | |||
2252 | { | 2297 | { |
2253 | uint64_t count, resid; | 2298 | uint64_t count, resid; |
2254 | int weight = num_online_cpus(); | 2299 | int weight = num_online_cpus(); |
2255 | int s; | ||
2256 | uint64_t min = (uint64_t)min_per_cpu; | 2300 | uint64_t min = (uint64_t)min_per_cpu; |
2257 | 2301 | ||
2258 | if (!(flags & XFS_ICSB_SB_LOCKED)) | 2302 | if (!(flags & XFS_ICSB_SB_LOCKED)) |
2259 | s = XFS_SB_LOCK(mp); | 2303 | spin_lock(&mp->m_sb_lock); |
2260 | 2304 | ||
2261 | /* disable counter and sync counter */ | 2305 | /* disable counter and sync counter */ |
2262 | xfs_icsb_disable_counter(mp, field); | 2306 | xfs_icsb_disable_counter(mp, field); |
@@ -2290,10 +2334,10 @@ xfs_icsb_balance_counter( | |||
2290 | xfs_icsb_enable_counter(mp, field, count, resid); | 2334 | xfs_icsb_enable_counter(mp, field, count, resid); |
2291 | out: | 2335 | out: |
2292 | if (!(flags & XFS_ICSB_SB_LOCKED)) | 2336 | if (!(flags & XFS_ICSB_SB_LOCKED)) |
2293 | XFS_SB_UNLOCK(mp, s); | 2337 | spin_unlock(&mp->m_sb_lock); |
2294 | } | 2338 | } |
2295 | 2339 | ||
2296 | int | 2340 | STATIC int |
2297 | xfs_icsb_modify_counters( | 2341 | xfs_icsb_modify_counters( |
2298 | xfs_mount_t *mp, | 2342 | xfs_mount_t *mp, |
2299 | xfs_sb_field_t field, | 2343 | xfs_sb_field_t field, |
@@ -2302,7 +2346,7 @@ xfs_icsb_modify_counters( | |||
2302 | { | 2346 | { |
2303 | xfs_icsb_cnts_t *icsbp; | 2347 | xfs_icsb_cnts_t *icsbp; |
2304 | long long lcounter; /* long counter for 64 bit fields */ | 2348 | long long lcounter; /* long counter for 64 bit fields */ |
2305 | int cpu, ret = 0, s; | 2349 | int cpu, ret = 0; |
2306 | 2350 | ||
2307 | might_sleep(); | 2351 | might_sleep(); |
2308 | again: | 2352 | again: |
@@ -2380,15 +2424,15 @@ slow_path: | |||
2380 | * running atomically here, we know a rebalance cannot | 2424 | * running atomically here, we know a rebalance cannot |
2381 | * be in progress. Hence we can go straight to operating | 2425 | * be in progress. Hence we can go straight to operating |
2382 | * on the global superblock. We do not call xfs_mod_incore_sb() | 2426 | * on the global superblock. We do not call xfs_mod_incore_sb() |
2383 | * here even though we need to get the SB_LOCK. Doing so | 2427 | * here even though we need to get the m_sb_lock. Doing so |
2384 | * will cause us to re-enter this function and deadlock. | 2428 | * will cause us to re-enter this function and deadlock. |
2385 | * Hence we get the SB_LOCK ourselves and then call | 2429 | * Hence we get the m_sb_lock ourselves and then call |
2386 | * xfs_mod_incore_sb_unlocked() as the unlocked path operates | 2430 | * xfs_mod_incore_sb_unlocked() as the unlocked path operates |
2387 | * directly on the global counters. | 2431 | * directly on the global counters. |
2388 | */ | 2432 | */ |
2389 | s = XFS_SB_LOCK(mp); | 2433 | spin_lock(&mp->m_sb_lock); |
2390 | ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); | 2434 | ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); |
2391 | XFS_SB_UNLOCK(mp, s); | 2435 | spin_unlock(&mp->m_sb_lock); |
2392 | 2436 | ||
2393 | /* | 2437 | /* |
2394 | * Now that we've modified the global superblock, we | 2438 | * Now that we've modified the global superblock, we |
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index c618f7cb5f0e..f7c620ec6e69 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h | |||
@@ -56,20 +56,12 @@ struct cred; | |||
56 | struct log; | 56 | struct log; |
57 | struct xfs_mount_args; | 57 | struct xfs_mount_args; |
58 | struct xfs_inode; | 58 | struct xfs_inode; |
59 | struct xfs_iocore; | ||
60 | struct xfs_bmbt_irec; | 59 | struct xfs_bmbt_irec; |
61 | struct xfs_bmap_free; | 60 | struct xfs_bmap_free; |
62 | struct xfs_extdelta; | 61 | struct xfs_extdelta; |
63 | struct xfs_swapext; | 62 | struct xfs_swapext; |
64 | struct xfs_mru_cache; | 63 | struct xfs_mru_cache; |
65 | 64 | ||
66 | #define AIL_LOCK_T lock_t | ||
67 | #define AIL_LOCKINIT(x,y) spinlock_init(x,y) | ||
68 | #define AIL_LOCK_DESTROY(x) spinlock_destroy(x) | ||
69 | #define AIL_LOCK(mp,s) s=mutex_spinlock(&(mp)->m_ail_lock) | ||
70 | #define AIL_UNLOCK(mp,s) mutex_spinunlock(&(mp)->m_ail_lock, s) | ||
71 | |||
72 | |||
73 | /* | 65 | /* |
74 | * Prototypes and functions for the Data Migration subsystem. | 66 | * Prototypes and functions for the Data Migration subsystem. |
75 | */ | 67 | */ |
@@ -196,105 +188,6 @@ typedef struct xfs_qmops { | |||
196 | #define XFS_QM_QUOTACTL(mp, cmd, id, addr) \ | 188 | #define XFS_QM_QUOTACTL(mp, cmd, id, addr) \ |
197 | (*(mp)->m_qm_ops->xfs_quotactl)(mp, cmd, id, addr) | 189 | (*(mp)->m_qm_ops->xfs_quotactl)(mp, cmd, id, addr) |
198 | 190 | ||
199 | |||
200 | /* | ||
201 | * Prototypes and functions for I/O core modularization. | ||
202 | */ | ||
203 | |||
204 | typedef int (*xfs_ioinit_t)(struct xfs_mount *, | ||
205 | struct xfs_mount_args *, int); | ||
206 | typedef int (*xfs_bmapi_t)(struct xfs_trans *, void *, | ||
207 | xfs_fileoff_t, xfs_filblks_t, int, | ||
208 | xfs_fsblock_t *, xfs_extlen_t, | ||
209 | struct xfs_bmbt_irec *, int *, | ||
210 | struct xfs_bmap_free *, struct xfs_extdelta *); | ||
211 | typedef int (*xfs_bunmapi_t)(struct xfs_trans *, | ||
212 | void *, xfs_fileoff_t, | ||
213 | xfs_filblks_t, int, xfs_extnum_t, | ||
214 | xfs_fsblock_t *, struct xfs_bmap_free *, | ||
215 | struct xfs_extdelta *, int *); | ||
216 | typedef int (*xfs_bmap_eof_t)(void *, xfs_fileoff_t, int, int *); | ||
217 | typedef int (*xfs_iomap_write_direct_t)( | ||
218 | void *, xfs_off_t, size_t, int, | ||
219 | struct xfs_bmbt_irec *, int *, int); | ||
220 | typedef int (*xfs_iomap_write_delay_t)( | ||
221 | void *, xfs_off_t, size_t, int, | ||
222 | struct xfs_bmbt_irec *, int *); | ||
223 | typedef int (*xfs_iomap_write_allocate_t)( | ||
224 | void *, xfs_off_t, size_t, | ||
225 | struct xfs_bmbt_irec *, int *); | ||
226 | typedef int (*xfs_iomap_write_unwritten_t)( | ||
227 | void *, xfs_off_t, size_t); | ||
228 | typedef uint (*xfs_lck_map_shared_t)(void *); | ||
229 | typedef void (*xfs_lock_t)(void *, uint); | ||
230 | typedef void (*xfs_lock_demote_t)(void *, uint); | ||
231 | typedef int (*xfs_lock_nowait_t)(void *, uint); | ||
232 | typedef void (*xfs_unlk_t)(void *, unsigned int); | ||
233 | typedef xfs_fsize_t (*xfs_size_t)(void *); | ||
234 | typedef xfs_fsize_t (*xfs_iodone_t)(struct xfs_mount *); | ||
235 | typedef int (*xfs_swap_extents_t)(void *, void *, | ||
236 | struct xfs_swapext*); | ||
237 | |||
238 | typedef struct xfs_ioops { | ||
239 | xfs_ioinit_t xfs_ioinit; | ||
240 | xfs_bmapi_t xfs_bmapi_func; | ||
241 | xfs_bunmapi_t xfs_bunmapi_func; | ||
242 | xfs_bmap_eof_t xfs_bmap_eof_func; | ||
243 | xfs_iomap_write_direct_t xfs_iomap_write_direct; | ||
244 | xfs_iomap_write_delay_t xfs_iomap_write_delay; | ||
245 | xfs_iomap_write_allocate_t xfs_iomap_write_allocate; | ||
246 | xfs_iomap_write_unwritten_t xfs_iomap_write_unwritten; | ||
247 | xfs_lock_t xfs_ilock; | ||
248 | xfs_lck_map_shared_t xfs_lck_map_shared; | ||
249 | xfs_lock_demote_t xfs_ilock_demote; | ||
250 | xfs_lock_nowait_t xfs_ilock_nowait; | ||
251 | xfs_unlk_t xfs_unlock; | ||
252 | xfs_size_t xfs_size_func; | ||
253 | xfs_iodone_t xfs_iodone; | ||
254 | xfs_swap_extents_t xfs_swap_extents_func; | ||
255 | } xfs_ioops_t; | ||
256 | |||
257 | #define XFS_IOINIT(mp, args, flags) \ | ||
258 | (*(mp)->m_io_ops.xfs_ioinit)(mp, args, flags) | ||
259 | #define XFS_BMAPI(mp, trans,io,bno,len,f,first,tot,mval,nmap,flist,delta) \ | ||
260 | (*(mp)->m_io_ops.xfs_bmapi_func) \ | ||
261 | (trans,(io)->io_obj,bno,len,f,first,tot,mval,nmap,flist,delta) | ||
262 | #define XFS_BUNMAPI(mp, trans,io,bno,len,f,nexts,first,flist,delta,done) \ | ||
263 | (*(mp)->m_io_ops.xfs_bunmapi_func) \ | ||
264 | (trans,(io)->io_obj,bno,len,f,nexts,first,flist,delta,done) | ||
265 | #define XFS_BMAP_EOF(mp, io, endoff, whichfork, eof) \ | ||
266 | (*(mp)->m_io_ops.xfs_bmap_eof_func) \ | ||
267 | ((io)->io_obj, endoff, whichfork, eof) | ||
268 | #define XFS_IOMAP_WRITE_DIRECT(mp, io, offset, count, flags, mval, nmap, found)\ | ||
269 | (*(mp)->m_io_ops.xfs_iomap_write_direct) \ | ||
270 | ((io)->io_obj, offset, count, flags, mval, nmap, found) | ||
271 | #define XFS_IOMAP_WRITE_DELAY(mp, io, offset, count, flags, mval, nmap) \ | ||
272 | (*(mp)->m_io_ops.xfs_iomap_write_delay) \ | ||
273 | ((io)->io_obj, offset, count, flags, mval, nmap) | ||
274 | #define XFS_IOMAP_WRITE_ALLOCATE(mp, io, offset, count, mval, nmap) \ | ||
275 | (*(mp)->m_io_ops.xfs_iomap_write_allocate) \ | ||
276 | ((io)->io_obj, offset, count, mval, nmap) | ||
277 | #define XFS_IOMAP_WRITE_UNWRITTEN(mp, io, offset, count) \ | ||
278 | (*(mp)->m_io_ops.xfs_iomap_write_unwritten) \ | ||
279 | ((io)->io_obj, offset, count) | ||
280 | #define XFS_LCK_MAP_SHARED(mp, io) \ | ||
281 | (*(mp)->m_io_ops.xfs_lck_map_shared)((io)->io_obj) | ||
282 | #define XFS_ILOCK(mp, io, mode) \ | ||
283 | (*(mp)->m_io_ops.xfs_ilock)((io)->io_obj, mode) | ||
284 | #define XFS_ILOCK_NOWAIT(mp, io, mode) \ | ||
285 | (*(mp)->m_io_ops.xfs_ilock_nowait)((io)->io_obj, mode) | ||
286 | #define XFS_IUNLOCK(mp, io, mode) \ | ||
287 | (*(mp)->m_io_ops.xfs_unlock)((io)->io_obj, mode) | ||
288 | #define XFS_ILOCK_DEMOTE(mp, io, mode) \ | ||
289 | (*(mp)->m_io_ops.xfs_ilock_demote)((io)->io_obj, mode) | ||
290 | #define XFS_SIZE(mp, io) \ | ||
291 | (*(mp)->m_io_ops.xfs_size_func)((io)->io_obj) | ||
292 | #define XFS_IODONE(mp) \ | ||
293 | (*(mp)->m_io_ops.xfs_iodone)(mp) | ||
294 | #define XFS_SWAP_EXTENTS(mp, io, tio, sxp) \ | ||
295 | (*(mp)->m_io_ops.xfs_swap_extents_func) \ | ||
296 | ((io)->io_obj, (tio)->io_obj, sxp) | ||
297 | |||
298 | #ifdef HAVE_PERCPU_SB | 191 | #ifdef HAVE_PERCPU_SB |
299 | 192 | ||
300 | /* | 193 | /* |
@@ -326,14 +219,20 @@ extern void xfs_icsb_sync_counters_flags(struct xfs_mount *, int); | |||
326 | #define xfs_icsb_sync_counters_flags(mp, flags) do { } while (0) | 219 | #define xfs_icsb_sync_counters_flags(mp, flags) do { } while (0) |
327 | #endif | 220 | #endif |
328 | 221 | ||
222 | typedef struct xfs_ail { | ||
223 | xfs_ail_entry_t xa_ail; | ||
224 | uint xa_gen; | ||
225 | struct task_struct *xa_task; | ||
226 | xfs_lsn_t xa_target; | ||
227 | } xfs_ail_t; | ||
228 | |||
329 | typedef struct xfs_mount { | 229 | typedef struct xfs_mount { |
330 | struct super_block *m_super; | 230 | struct super_block *m_super; |
331 | xfs_tid_t m_tid; /* next unused tid for fs */ | 231 | xfs_tid_t m_tid; /* next unused tid for fs */ |
332 | AIL_LOCK_T m_ail_lock; /* fs AIL mutex */ | 232 | spinlock_t m_ail_lock; /* fs AIL mutex */ |
333 | xfs_ail_entry_t m_ail; /* fs active log item list */ | 233 | xfs_ail_t m_ail; /* fs active log item list */ |
334 | uint m_ail_gen; /* fs AIL generation count */ | ||
335 | xfs_sb_t m_sb; /* copy of fs superblock */ | 234 | xfs_sb_t m_sb; /* copy of fs superblock */ |
336 | lock_t m_sb_lock; /* sb counter mutex */ | 235 | spinlock_t m_sb_lock; /* sb counter lock */ |
337 | struct xfs_buf *m_sb_bp; /* buffer for superblock */ | 236 | struct xfs_buf *m_sb_bp; /* buffer for superblock */ |
338 | char *m_fsname; /* filesystem name */ | 237 | char *m_fsname; /* filesystem name */ |
339 | int m_fsname_len; /* strlen of fs name */ | 238 | int m_fsname_len; /* strlen of fs name */ |
@@ -342,7 +241,7 @@ typedef struct xfs_mount { | |||
342 | int m_bsize; /* fs logical block size */ | 241 | int m_bsize; /* fs logical block size */ |
343 | xfs_agnumber_t m_agfrotor; /* last ag where space found */ | 242 | xfs_agnumber_t m_agfrotor; /* last ag where space found */ |
344 | xfs_agnumber_t m_agirotor; /* last ag dir inode alloced */ | 243 | xfs_agnumber_t m_agirotor; /* last ag dir inode alloced */ |
345 | lock_t m_agirotor_lock;/* .. and lock protecting it */ | 244 | spinlock_t m_agirotor_lock;/* .. and lock protecting it */ |
346 | xfs_agnumber_t m_maxagi; /* highest inode alloc group */ | 245 | xfs_agnumber_t m_maxagi; /* highest inode alloc group */ |
347 | struct xfs_inode *m_inodes; /* active inode list */ | 246 | struct xfs_inode *m_inodes; /* active inode list */ |
348 | struct list_head m_del_inodes; /* inodes to reclaim */ | 247 | struct list_head m_del_inodes; /* inodes to reclaim */ |
@@ -423,7 +322,6 @@ typedef struct xfs_mount { | |||
423 | * hash table */ | 322 | * hash table */ |
424 | struct xfs_dmops *m_dm_ops; /* vector of DMI ops */ | 323 | struct xfs_dmops *m_dm_ops; /* vector of DMI ops */ |
425 | struct xfs_qmops *m_qm_ops; /* vector of XQM ops */ | 324 | struct xfs_qmops *m_qm_ops; /* vector of XQM ops */ |
426 | struct xfs_ioops m_io_ops; /* vector of I/O ops */ | ||
427 | atomic_t m_active_trans; /* number trans frozen */ | 325 | atomic_t m_active_trans; /* number trans frozen */ |
428 | #ifdef HAVE_PERCPU_SB | 326 | #ifdef HAVE_PERCPU_SB |
429 | xfs_icsb_cnts_t *m_sb_cnts; /* per-cpu superblock counters */ | 327 | xfs_icsb_cnts_t *m_sb_cnts; /* per-cpu superblock counters */ |
@@ -610,8 +508,6 @@ typedef struct xfs_mod_sb { | |||
610 | 508 | ||
611 | #define XFS_MOUNT_ILOCK(mp) mutex_lock(&((mp)->m_ilock)) | 509 | #define XFS_MOUNT_ILOCK(mp) mutex_lock(&((mp)->m_ilock)) |
612 | #define XFS_MOUNT_IUNLOCK(mp) mutex_unlock(&((mp)->m_ilock)) | 510 | #define XFS_MOUNT_IUNLOCK(mp) mutex_unlock(&((mp)->m_ilock)) |
613 | #define XFS_SB_LOCK(mp) mutex_spinlock(&(mp)->m_sb_lock) | ||
614 | #define XFS_SB_UNLOCK(mp,s) mutex_spinunlock(&(mp)->m_sb_lock,(s)) | ||
615 | 511 | ||
616 | extern xfs_mount_t *xfs_mount_init(void); | 512 | extern xfs_mount_t *xfs_mount_init(void); |
617 | extern void xfs_mod_sb(xfs_trans_t *, __int64_t); | 513 | extern void xfs_mod_sb(xfs_trans_t *, __int64_t); |
@@ -646,7 +542,6 @@ extern int xfs_qmops_get(struct xfs_mount *, struct xfs_mount_args *); | |||
646 | extern void xfs_qmops_put(struct xfs_mount *); | 542 | extern void xfs_qmops_put(struct xfs_mount *); |
647 | 543 | ||
648 | extern struct xfs_dmops xfs_dmcore_xfs; | 544 | extern struct xfs_dmops xfs_dmcore_xfs; |
649 | extern struct xfs_ioops xfs_iocore_xfs; | ||
650 | 545 | ||
651 | extern int xfs_init(void); | 546 | extern int xfs_init(void); |
652 | extern void xfs_cleanup(void); | 547 | extern void xfs_cleanup(void); |
diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c index e0b358c1c533..a0b2c0a2589a 100644 --- a/fs/xfs/xfs_mru_cache.c +++ b/fs/xfs/xfs_mru_cache.c | |||
@@ -225,10 +225,14 @@ _xfs_mru_cache_list_insert( | |||
225 | * list need to be deleted. For each element this involves removing it from the | 225 | * list need to be deleted. For each element this involves removing it from the |
226 | * data store, removing it from the reap list, calling the client's free | 226 | * data store, removing it from the reap list, calling the client's free |
227 | * function and deleting the element from the element zone. | 227 | * function and deleting the element from the element zone. |
228 | * | ||
229 | * We get called holding the mru->lock, which we drop and then reacquire. | ||
230 | * Sparse need special help with this to tell it we know what we are doing. | ||
228 | */ | 231 | */ |
229 | STATIC void | 232 | STATIC void |
230 | _xfs_mru_cache_clear_reap_list( | 233 | _xfs_mru_cache_clear_reap_list( |
231 | xfs_mru_cache_t *mru) | 234 | xfs_mru_cache_t *mru) __releases(mru->lock) __acquires(mru->lock) |
235 | |||
232 | { | 236 | { |
233 | xfs_mru_cache_elem_t *elem, *next; | 237 | xfs_mru_cache_elem_t *elem, *next; |
234 | struct list_head tmp; | 238 | struct list_head tmp; |
@@ -245,7 +249,7 @@ _xfs_mru_cache_clear_reap_list( | |||
245 | */ | 249 | */ |
246 | list_move(&elem->list_node, &tmp); | 250 | list_move(&elem->list_node, &tmp); |
247 | } | 251 | } |
248 | mutex_spinunlock(&mru->lock, 0); | 252 | spin_unlock(&mru->lock); |
249 | 253 | ||
250 | list_for_each_entry_safe(elem, next, &tmp, list_node) { | 254 | list_for_each_entry_safe(elem, next, &tmp, list_node) { |
251 | 255 | ||
@@ -259,7 +263,7 @@ _xfs_mru_cache_clear_reap_list( | |||
259 | kmem_zone_free(xfs_mru_elem_zone, elem); | 263 | kmem_zone_free(xfs_mru_elem_zone, elem); |
260 | } | 264 | } |
261 | 265 | ||
262 | mutex_spinlock(&mru->lock); | 266 | spin_lock(&mru->lock); |
263 | } | 267 | } |
264 | 268 | ||
265 | /* | 269 | /* |
@@ -280,7 +284,7 @@ _xfs_mru_cache_reap( | |||
280 | if (!mru || !mru->lists) | 284 | if (!mru || !mru->lists) |
281 | return; | 285 | return; |
282 | 286 | ||
283 | mutex_spinlock(&mru->lock); | 287 | spin_lock(&mru->lock); |
284 | next = _xfs_mru_cache_migrate(mru, jiffies); | 288 | next = _xfs_mru_cache_migrate(mru, jiffies); |
285 | _xfs_mru_cache_clear_reap_list(mru); | 289 | _xfs_mru_cache_clear_reap_list(mru); |
286 | 290 | ||
@@ -294,7 +298,7 @@ _xfs_mru_cache_reap( | |||
294 | queue_delayed_work(xfs_mru_reap_wq, &mru->work, next); | 298 | queue_delayed_work(xfs_mru_reap_wq, &mru->work, next); |
295 | } | 299 | } |
296 | 300 | ||
297 | mutex_spinunlock(&mru->lock, 0); | 301 | spin_unlock(&mru->lock); |
298 | } | 302 | } |
299 | 303 | ||
300 | int | 304 | int |
@@ -368,7 +372,7 @@ xfs_mru_cache_create( | |||
368 | */ | 372 | */ |
369 | INIT_RADIX_TREE(&mru->store, GFP_ATOMIC); | 373 | INIT_RADIX_TREE(&mru->store, GFP_ATOMIC); |
370 | INIT_LIST_HEAD(&mru->reap_list); | 374 | INIT_LIST_HEAD(&mru->reap_list); |
371 | spinlock_init(&mru->lock, "xfs_mru_cache"); | 375 | spin_lock_init(&mru->lock); |
372 | INIT_DELAYED_WORK(&mru->work, _xfs_mru_cache_reap); | 376 | INIT_DELAYED_WORK(&mru->work, _xfs_mru_cache_reap); |
373 | 377 | ||
374 | mru->grp_time = grp_time; | 378 | mru->grp_time = grp_time; |
@@ -398,17 +402,17 @@ xfs_mru_cache_flush( | |||
398 | if (!mru || !mru->lists) | 402 | if (!mru || !mru->lists) |
399 | return; | 403 | return; |
400 | 404 | ||
401 | mutex_spinlock(&mru->lock); | 405 | spin_lock(&mru->lock); |
402 | if (mru->queued) { | 406 | if (mru->queued) { |
403 | mutex_spinunlock(&mru->lock, 0); | 407 | spin_unlock(&mru->lock); |
404 | cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work); | 408 | cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work); |
405 | mutex_spinlock(&mru->lock); | 409 | spin_lock(&mru->lock); |
406 | } | 410 | } |
407 | 411 | ||
408 | _xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time); | 412 | _xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time); |
409 | _xfs_mru_cache_clear_reap_list(mru); | 413 | _xfs_mru_cache_clear_reap_list(mru); |
410 | 414 | ||
411 | mutex_spinunlock(&mru->lock, 0); | 415 | spin_unlock(&mru->lock); |
412 | } | 416 | } |
413 | 417 | ||
414 | void | 418 | void |
@@ -454,13 +458,13 @@ xfs_mru_cache_insert( | |||
454 | elem->key = key; | 458 | elem->key = key; |
455 | elem->value = value; | 459 | elem->value = value; |
456 | 460 | ||
457 | mutex_spinlock(&mru->lock); | 461 | spin_lock(&mru->lock); |
458 | 462 | ||
459 | radix_tree_insert(&mru->store, key, elem); | 463 | radix_tree_insert(&mru->store, key, elem); |
460 | radix_tree_preload_end(); | 464 | radix_tree_preload_end(); |
461 | _xfs_mru_cache_list_insert(mru, elem); | 465 | _xfs_mru_cache_list_insert(mru, elem); |
462 | 466 | ||
463 | mutex_spinunlock(&mru->lock, 0); | 467 | spin_unlock(&mru->lock); |
464 | 468 | ||
465 | return 0; | 469 | return 0; |
466 | } | 470 | } |
@@ -483,14 +487,14 @@ xfs_mru_cache_remove( | |||
483 | if (!mru || !mru->lists) | 487 | if (!mru || !mru->lists) |
484 | return NULL; | 488 | return NULL; |
485 | 489 | ||
486 | mutex_spinlock(&mru->lock); | 490 | spin_lock(&mru->lock); |
487 | elem = radix_tree_delete(&mru->store, key); | 491 | elem = radix_tree_delete(&mru->store, key); |
488 | if (elem) { | 492 | if (elem) { |
489 | value = elem->value; | 493 | value = elem->value; |
490 | list_del(&elem->list_node); | 494 | list_del(&elem->list_node); |
491 | } | 495 | } |
492 | 496 | ||
493 | mutex_spinunlock(&mru->lock, 0); | 497 | spin_unlock(&mru->lock); |
494 | 498 | ||
495 | if (elem) | 499 | if (elem) |
496 | kmem_zone_free(xfs_mru_elem_zone, elem); | 500 | kmem_zone_free(xfs_mru_elem_zone, elem); |
@@ -528,6 +532,10 @@ xfs_mru_cache_delete( | |||
528 | * | 532 | * |
529 | * If the element isn't found, this function returns NULL and the spinlock is | 533 | * If the element isn't found, this function returns NULL and the spinlock is |
530 | * released. xfs_mru_cache_done() should NOT be called when this occurs. | 534 | * released. xfs_mru_cache_done() should NOT be called when this occurs. |
535 | * | ||
536 | * Because sparse isn't smart enough to know about conditional lock return | ||
537 | * status, we need to help it get it right by annotating the path that does | ||
538 | * not release the lock. | ||
531 | */ | 539 | */ |
532 | void * | 540 | void * |
533 | xfs_mru_cache_lookup( | 541 | xfs_mru_cache_lookup( |
@@ -540,14 +548,14 @@ xfs_mru_cache_lookup( | |||
540 | if (!mru || !mru->lists) | 548 | if (!mru || !mru->lists) |
541 | return NULL; | 549 | return NULL; |
542 | 550 | ||
543 | mutex_spinlock(&mru->lock); | 551 | spin_lock(&mru->lock); |
544 | elem = radix_tree_lookup(&mru->store, key); | 552 | elem = radix_tree_lookup(&mru->store, key); |
545 | if (elem) { | 553 | if (elem) { |
546 | list_del(&elem->list_node); | 554 | list_del(&elem->list_node); |
547 | _xfs_mru_cache_list_insert(mru, elem); | 555 | _xfs_mru_cache_list_insert(mru, elem); |
548 | } | 556 | __release(mru_lock); /* help sparse not be stupid */ |
549 | else | 557 | } else |
550 | mutex_spinunlock(&mru->lock, 0); | 558 | spin_unlock(&mru->lock); |
551 | 559 | ||
552 | return elem ? elem->value : NULL; | 560 | return elem ? elem->value : NULL; |
553 | } | 561 | } |
@@ -571,10 +579,12 @@ xfs_mru_cache_peek( | |||
571 | if (!mru || !mru->lists) | 579 | if (!mru || !mru->lists) |
572 | return NULL; | 580 | return NULL; |
573 | 581 | ||
574 | mutex_spinlock(&mru->lock); | 582 | spin_lock(&mru->lock); |
575 | elem = radix_tree_lookup(&mru->store, key); | 583 | elem = radix_tree_lookup(&mru->store, key); |
576 | if (!elem) | 584 | if (!elem) |
577 | mutex_spinunlock(&mru->lock, 0); | 585 | spin_unlock(&mru->lock); |
586 | else | ||
587 | __release(mru_lock); /* help sparse not be stupid */ | ||
578 | 588 | ||
579 | return elem ? elem->value : NULL; | 589 | return elem ? elem->value : NULL; |
580 | } | 590 | } |
@@ -586,7 +596,7 @@ xfs_mru_cache_peek( | |||
586 | */ | 596 | */ |
587 | void | 597 | void |
588 | xfs_mru_cache_done( | 598 | xfs_mru_cache_done( |
589 | xfs_mru_cache_t *mru) | 599 | xfs_mru_cache_t *mru) __releases(mru->lock) |
590 | { | 600 | { |
591 | mutex_spinunlock(&mru->lock, 0); | 601 | spin_unlock(&mru->lock); |
592 | } | 602 | } |
diff --git a/fs/xfs/xfs_qmops.c b/fs/xfs/xfs_qmops.c index 2ec1d8a27352..a294e58db8dd 100644 --- a/fs/xfs/xfs_qmops.c +++ b/fs/xfs/xfs_qmops.c | |||
@@ -49,18 +49,17 @@ xfs_mount_reset_sbqflags(xfs_mount_t *mp) | |||
49 | { | 49 | { |
50 | int error; | 50 | int error; |
51 | xfs_trans_t *tp; | 51 | xfs_trans_t *tp; |
52 | unsigned long s; | ||
53 | 52 | ||
54 | mp->m_qflags = 0; | 53 | mp->m_qflags = 0; |
55 | /* | 54 | /* |
56 | * It is OK to look at sb_qflags here in mount path, | 55 | * It is OK to look at sb_qflags here in mount path, |
57 | * without SB_LOCK. | 56 | * without m_sb_lock. |
58 | */ | 57 | */ |
59 | if (mp->m_sb.sb_qflags == 0) | 58 | if (mp->m_sb.sb_qflags == 0) |
60 | return 0; | 59 | return 0; |
61 | s = XFS_SB_LOCK(mp); | 60 | spin_lock(&mp->m_sb_lock); |
62 | mp->m_sb.sb_qflags = 0; | 61 | mp->m_sb.sb_qflags = 0; |
63 | XFS_SB_UNLOCK(mp, s); | 62 | spin_unlock(&mp->m_sb_lock); |
64 | 63 | ||
65 | /* | 64 | /* |
66 | * if the fs is readonly, let the incore superblock run | 65 | * if the fs is readonly, let the incore superblock run |
diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c index 44ea0ba36476..7eb157a59f9e 100644 --- a/fs/xfs/xfs_rename.c +++ b/fs/xfs/xfs_rename.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include "xfs_refcache.h" | 39 | #include "xfs_refcache.h" |
40 | #include "xfs_utils.h" | 40 | #include "xfs_utils.h" |
41 | #include "xfs_trans_space.h" | 41 | #include "xfs_trans_space.h" |
42 | #include "xfs_vnodeops.h" | ||
42 | 43 | ||
43 | 44 | ||
44 | /* | 45 | /* |
@@ -118,7 +119,7 @@ xfs_lock_for_rename( | |||
118 | inum1 = ip1->i_ino; | 119 | inum1 = ip1->i_ino; |
119 | 120 | ||
120 | ASSERT(ip1); | 121 | ASSERT(ip1); |
121 | ITRACE(ip1); | 122 | xfs_itrace_ref(ip1); |
122 | 123 | ||
123 | /* | 124 | /* |
124 | * Unlock dp1 and lock dp2 if they are different. | 125 | * Unlock dp1 and lock dp2 if they are different. |
@@ -141,7 +142,7 @@ xfs_lock_for_rename( | |||
141 | IRELE (ip1); | 142 | IRELE (ip1); |
142 | return error; | 143 | return error; |
143 | } else { | 144 | } else { |
144 | ITRACE(ip2); | 145 | xfs_itrace_ref(ip2); |
145 | } | 146 | } |
146 | 147 | ||
147 | /* | 148 | /* |
@@ -247,8 +248,8 @@ xfs_rename( | |||
247 | int src_namelen = VNAMELEN(src_vname); | 248 | int src_namelen = VNAMELEN(src_vname); |
248 | int target_namelen = VNAMELEN(target_vname); | 249 | int target_namelen = VNAMELEN(target_vname); |
249 | 250 | ||
250 | vn_trace_entry(src_dp, "xfs_rename", (inst_t *)__return_address); | 251 | xfs_itrace_entry(src_dp); |
251 | vn_trace_entry(xfs_vtoi(target_dir_vp), "xfs_rename", (inst_t *)__return_address); | 252 | xfs_itrace_entry(xfs_vtoi(target_dir_vp)); |
252 | 253 | ||
253 | /* | 254 | /* |
254 | * Find the XFS behavior descriptor for the target directory | 255 | * Find the XFS behavior descriptor for the target directory |
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c index 47082c01872d..ca83ddf72af4 100644 --- a/fs/xfs/xfs_rtalloc.c +++ b/fs/xfs/xfs_rtalloc.c | |||
@@ -73,18 +73,6 @@ STATIC int xfs_rtmodify_summary(xfs_mount_t *, xfs_trans_t *, int, | |||
73 | */ | 73 | */ |
74 | 74 | ||
75 | /* | 75 | /* |
76 | * xfs_lowbit32: get low bit set out of 32-bit argument, -1 if none set. | ||
77 | */ | ||
78 | STATIC int | ||
79 | xfs_lowbit32( | ||
80 | __uint32_t v) | ||
81 | { | ||
82 | if (v) | ||
83 | return ffs(v) - 1; | ||
84 | return -1; | ||
85 | } | ||
86 | |||
87 | /* | ||
88 | * Allocate space to the bitmap or summary file, and zero it, for growfs. | 76 | * Allocate space to the bitmap or summary file, and zero it, for growfs. |
89 | */ | 77 | */ |
90 | STATIC int /* error */ | 78 | STATIC int /* error */ |
@@ -444,6 +432,7 @@ xfs_rtallocate_extent_near( | |||
444 | } | 432 | } |
445 | bbno = XFS_BITTOBLOCK(mp, bno); | 433 | bbno = XFS_BITTOBLOCK(mp, bno); |
446 | i = 0; | 434 | i = 0; |
435 | ASSERT(minlen != 0); | ||
447 | log2len = xfs_highbit32(minlen); | 436 | log2len = xfs_highbit32(minlen); |
448 | /* | 437 | /* |
449 | * Loop over all bitmap blocks (bbno + i is current block). | 438 | * Loop over all bitmap blocks (bbno + i is current block). |
@@ -612,6 +601,8 @@ xfs_rtallocate_extent_size( | |||
612 | xfs_suminfo_t sum; /* summary information for extents */ | 601 | xfs_suminfo_t sum; /* summary information for extents */ |
613 | 602 | ||
614 | ASSERT(minlen % prod == 0 && maxlen % prod == 0); | 603 | ASSERT(minlen % prod == 0 && maxlen % prod == 0); |
604 | ASSERT(maxlen != 0); | ||
605 | |||
615 | /* | 606 | /* |
616 | * Loop over all the levels starting with maxlen. | 607 | * Loop over all the levels starting with maxlen. |
617 | * At each level, look at all the bitmap blocks, to see if there | 608 | * At each level, look at all the bitmap blocks, to see if there |
@@ -669,6 +660,9 @@ xfs_rtallocate_extent_size( | |||
669 | *rtblock = NULLRTBLOCK; | 660 | *rtblock = NULLRTBLOCK; |
670 | return 0; | 661 | return 0; |
671 | } | 662 | } |
663 | ASSERT(minlen != 0); | ||
664 | ASSERT(maxlen != 0); | ||
665 | |||
672 | /* | 666 | /* |
673 | * Loop over sizes, from maxlen down to minlen. | 667 | * Loop over sizes, from maxlen down to minlen. |
674 | * This time, when we do the allocations, allow smaller ones | 668 | * This time, when we do the allocations, allow smaller ones |
@@ -1954,6 +1948,7 @@ xfs_growfs_rt( | |||
1954 | nsbp->sb_blocksize * nsbp->sb_rextsize); | 1948 | nsbp->sb_blocksize * nsbp->sb_rextsize); |
1955 | nsbp->sb_rextents = nsbp->sb_rblocks; | 1949 | nsbp->sb_rextents = nsbp->sb_rblocks; |
1956 | do_div(nsbp->sb_rextents, nsbp->sb_rextsize); | 1950 | do_div(nsbp->sb_rextents, nsbp->sb_rextsize); |
1951 | ASSERT(nsbp->sb_rextents != 0); | ||
1957 | nsbp->sb_rextslog = xfs_highbit32(nsbp->sb_rextents); | 1952 | nsbp->sb_rextslog = xfs_highbit32(nsbp->sb_rextents); |
1958 | nrsumlevels = nmp->m_rsumlevels = nsbp->sb_rextslog + 1; | 1953 | nrsumlevels = nmp->m_rsumlevels = nsbp->sb_rextslog + 1; |
1959 | nrsumsize = | 1954 | nrsumsize = |
diff --git a/fs/xfs/xfs_rtalloc.h b/fs/xfs/xfs_rtalloc.h index 799c1f871263..8d8dcd215716 100644 --- a/fs/xfs/xfs_rtalloc.h +++ b/fs/xfs/xfs_rtalloc.h | |||
@@ -21,8 +21,6 @@ | |||
21 | struct xfs_mount; | 21 | struct xfs_mount; |
22 | struct xfs_trans; | 22 | struct xfs_trans; |
23 | 23 | ||
24 | #define XFS_IS_REALTIME_INODE(ip) ((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME) | ||
25 | |||
26 | /* Min and max rt extent sizes, specified in bytes */ | 24 | /* Min and max rt extent sizes, specified in bytes */ |
27 | #define XFS_MAX_RTEXTSIZE (1024 * 1024 * 1024) /* 1GB */ | 25 | #define XFS_MAX_RTEXTSIZE (1024 * 1024 * 1024) /* 1GB */ |
28 | #define XFS_DFL_RTEXTSIZE (64 * 1024) /* 64KB */ | 26 | #define XFS_DFL_RTEXTSIZE (64 * 1024) /* 64KB */ |
diff --git a/fs/xfs/xfs_rw.h b/fs/xfs/xfs_rw.h index 49875e1d129f..f87db5344ce6 100644 --- a/fs/xfs/xfs_rw.h +++ b/fs/xfs/xfs_rw.h | |||
@@ -32,18 +32,10 @@ struct xfs_mount; | |||
32 | static inline xfs_daddr_t | 32 | static inline xfs_daddr_t |
33 | xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb) | 33 | xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb) |
34 | { | 34 | { |
35 | return (((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME) ? \ | 35 | return (XFS_IS_REALTIME_INODE(ip) ? \ |
36 | (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \ | 36 | (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \ |
37 | XFS_FSB_TO_DADDR((ip)->i_mount, (fsb))); | 37 | XFS_FSB_TO_DADDR((ip)->i_mount, (fsb))); |
38 | } | 38 | } |
39 | #define XFS_FSB_TO_DB_IO(io,fsb) xfs_fsb_to_db_io(io,fsb) | ||
40 | static inline xfs_daddr_t | ||
41 | xfs_fsb_to_db_io(struct xfs_iocore *io, xfs_fsblock_t fsb) | ||
42 | { | ||
43 | return (((io)->io_flags & XFS_IOCORE_RT) ? \ | ||
44 | XFS_FSB_TO_BB((io)->io_mount, (fsb)) : \ | ||
45 | XFS_FSB_TO_DADDR((io)->io_mount, (fsb))); | ||
46 | } | ||
47 | 39 | ||
48 | /* | 40 | /* |
49 | * Flags for xfs_free_eofblocks | 41 | * Flags for xfs_free_eofblocks |
@@ -61,7 +53,7 @@ xfs_get_extsz_hint( | |||
61 | { | 53 | { |
62 | xfs_extlen_t extsz; | 54 | xfs_extlen_t extsz; |
63 | 55 | ||
64 | if (unlikely(ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) { | 56 | if (unlikely(XFS_IS_REALTIME_INODE(ip))) { |
65 | extsz = (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) | 57 | extsz = (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) |
66 | ? ip->i_d.di_extsize | 58 | ? ip->i_d.di_extsize |
67 | : ip->i_mount->m_sb.sb_rextsize; | 59 | : ip->i_mount->m_sb.sb_rextsize; |
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index 8878322ee793..140386434aa3 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c | |||
@@ -567,26 +567,26 @@ xfs_trans_apply_sb_deltas( | |||
567 | */ | 567 | */ |
568 | if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) { | 568 | if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) { |
569 | if (tp->t_icount_delta) | 569 | if (tp->t_icount_delta) |
570 | be64_add(&sbp->sb_icount, tp->t_icount_delta); | 570 | be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta); |
571 | if (tp->t_ifree_delta) | 571 | if (tp->t_ifree_delta) |
572 | be64_add(&sbp->sb_ifree, tp->t_ifree_delta); | 572 | be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta); |
573 | if (tp->t_fdblocks_delta) | 573 | if (tp->t_fdblocks_delta) |
574 | be64_add(&sbp->sb_fdblocks, tp->t_fdblocks_delta); | 574 | be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta); |
575 | if (tp->t_res_fdblocks_delta) | 575 | if (tp->t_res_fdblocks_delta) |
576 | be64_add(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta); | 576 | be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta); |
577 | } | 577 | } |
578 | 578 | ||
579 | if (tp->t_frextents_delta) | 579 | if (tp->t_frextents_delta) |
580 | be64_add(&sbp->sb_frextents, tp->t_frextents_delta); | 580 | be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta); |
581 | if (tp->t_res_frextents_delta) | 581 | if (tp->t_res_frextents_delta) |
582 | be64_add(&sbp->sb_frextents, tp->t_res_frextents_delta); | 582 | be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta); |
583 | 583 | ||
584 | if (tp->t_dblocks_delta) { | 584 | if (tp->t_dblocks_delta) { |
585 | be64_add(&sbp->sb_dblocks, tp->t_dblocks_delta); | 585 | be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta); |
586 | whole = 1; | 586 | whole = 1; |
587 | } | 587 | } |
588 | if (tp->t_agcount_delta) { | 588 | if (tp->t_agcount_delta) { |
589 | be32_add(&sbp->sb_agcount, tp->t_agcount_delta); | 589 | be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta); |
590 | whole = 1; | 590 | whole = 1; |
591 | } | 591 | } |
592 | if (tp->t_imaxpct_delta) { | 592 | if (tp->t_imaxpct_delta) { |
@@ -594,19 +594,19 @@ xfs_trans_apply_sb_deltas( | |||
594 | whole = 1; | 594 | whole = 1; |
595 | } | 595 | } |
596 | if (tp->t_rextsize_delta) { | 596 | if (tp->t_rextsize_delta) { |
597 | be32_add(&sbp->sb_rextsize, tp->t_rextsize_delta); | 597 | be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta); |
598 | whole = 1; | 598 | whole = 1; |
599 | } | 599 | } |
600 | if (tp->t_rbmblocks_delta) { | 600 | if (tp->t_rbmblocks_delta) { |
601 | be32_add(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta); | 601 | be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta); |
602 | whole = 1; | 602 | whole = 1; |
603 | } | 603 | } |
604 | if (tp->t_rblocks_delta) { | 604 | if (tp->t_rblocks_delta) { |
605 | be64_add(&sbp->sb_rblocks, tp->t_rblocks_delta); | 605 | be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta); |
606 | whole = 1; | 606 | whole = 1; |
607 | } | 607 | } |
608 | if (tp->t_rextents_delta) { | 608 | if (tp->t_rextents_delta) { |
609 | be64_add(&sbp->sb_rextents, tp->t_rextents_delta); | 609 | be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta); |
610 | whole = 1; | 610 | whole = 1; |
611 | } | 611 | } |
612 | if (tp->t_rextslog_delta) { | 612 | if (tp->t_rextslog_delta) { |
@@ -1322,7 +1322,6 @@ xfs_trans_chunk_committed( | |||
1322 | xfs_lsn_t item_lsn; | 1322 | xfs_lsn_t item_lsn; |
1323 | struct xfs_mount *mp; | 1323 | struct xfs_mount *mp; |
1324 | int i; | 1324 | int i; |
1325 | SPLDECL(s); | ||
1326 | 1325 | ||
1327 | lidp = licp->lic_descs; | 1326 | lidp = licp->lic_descs; |
1328 | for (i = 0; i < licp->lic_unused; i++, lidp++) { | 1327 | for (i = 0; i < licp->lic_unused; i++, lidp++) { |
@@ -1363,7 +1362,7 @@ xfs_trans_chunk_committed( | |||
1363 | * the test below. | 1362 | * the test below. |
1364 | */ | 1363 | */ |
1365 | mp = lip->li_mountp; | 1364 | mp = lip->li_mountp; |
1366 | AIL_LOCK(mp,s); | 1365 | spin_lock(&mp->m_ail_lock); |
1367 | if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) { | 1366 | if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) { |
1368 | /* | 1367 | /* |
1369 | * This will set the item's lsn to item_lsn | 1368 | * This will set the item's lsn to item_lsn |
@@ -1372,9 +1371,9 @@ xfs_trans_chunk_committed( | |||
1372 | * | 1371 | * |
1373 | * xfs_trans_update_ail() drops the AIL lock. | 1372 | * xfs_trans_update_ail() drops the AIL lock. |
1374 | */ | 1373 | */ |
1375 | xfs_trans_update_ail(mp, lip, item_lsn, s); | 1374 | xfs_trans_update_ail(mp, lip, item_lsn); |
1376 | } else { | 1375 | } else { |
1377 | AIL_UNLOCK(mp, s); | 1376 | spin_unlock(&mp->m_ail_lock); |
1378 | } | 1377 | } |
1379 | 1378 | ||
1380 | /* | 1379 | /* |
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index 0e26e729023e..7f40628d85c7 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h | |||
@@ -992,8 +992,9 @@ int _xfs_trans_commit(xfs_trans_t *, | |||
992 | int *); | 992 | int *); |
993 | #define xfs_trans_commit(tp, flags) _xfs_trans_commit(tp, flags, NULL) | 993 | #define xfs_trans_commit(tp, flags) _xfs_trans_commit(tp, flags, NULL) |
994 | void xfs_trans_cancel(xfs_trans_t *, int); | 994 | void xfs_trans_cancel(xfs_trans_t *, int); |
995 | void xfs_trans_ail_init(struct xfs_mount *); | 995 | int xfs_trans_ail_init(struct xfs_mount *); |
996 | xfs_lsn_t xfs_trans_push_ail(struct xfs_mount *, xfs_lsn_t); | 996 | void xfs_trans_ail_destroy(struct xfs_mount *); |
997 | void xfs_trans_push_ail(struct xfs_mount *, xfs_lsn_t); | ||
997 | xfs_lsn_t xfs_trans_tail_ail(struct xfs_mount *); | 998 | xfs_lsn_t xfs_trans_tail_ail(struct xfs_mount *); |
998 | void xfs_trans_unlocked_item(struct xfs_mount *, | 999 | void xfs_trans_unlocked_item(struct xfs_mount *, |
999 | xfs_log_item_t *); | 1000 | xfs_log_item_t *); |
@@ -1001,6 +1002,8 @@ xfs_log_busy_slot_t *xfs_trans_add_busy(xfs_trans_t *tp, | |||
1001 | xfs_agnumber_t ag, | 1002 | xfs_agnumber_t ag, |
1002 | xfs_extlen_t idx); | 1003 | xfs_extlen_t idx); |
1003 | 1004 | ||
1005 | extern kmem_zone_t *xfs_trans_zone; | ||
1006 | |||
1004 | #endif /* __KERNEL__ */ | 1007 | #endif /* __KERNEL__ */ |
1005 | 1008 | ||
1006 | #endif /* __XFS_TRANS_H__ */ | 1009 | #endif /* __XFS_TRANS_H__ */ |
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index 5b2ff59f19cf..4d6330eddc8d 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c | |||
@@ -34,9 +34,9 @@ STATIC xfs_log_item_t * xfs_ail_min(xfs_ail_entry_t *); | |||
34 | STATIC xfs_log_item_t * xfs_ail_next(xfs_ail_entry_t *, xfs_log_item_t *); | 34 | STATIC xfs_log_item_t * xfs_ail_next(xfs_ail_entry_t *, xfs_log_item_t *); |
35 | 35 | ||
36 | #ifdef DEBUG | 36 | #ifdef DEBUG |
37 | STATIC void xfs_ail_check(xfs_ail_entry_t *); | 37 | STATIC void xfs_ail_check(xfs_ail_entry_t *, xfs_log_item_t *); |
38 | #else | 38 | #else |
39 | #define xfs_ail_check(a) | 39 | #define xfs_ail_check(a,l) |
40 | #endif /* DEBUG */ | 40 | #endif /* DEBUG */ |
41 | 41 | ||
42 | 42 | ||
@@ -55,16 +55,15 @@ xfs_trans_tail_ail( | |||
55 | { | 55 | { |
56 | xfs_lsn_t lsn; | 56 | xfs_lsn_t lsn; |
57 | xfs_log_item_t *lip; | 57 | xfs_log_item_t *lip; |
58 | SPLDECL(s); | ||
59 | 58 | ||
60 | AIL_LOCK(mp,s); | 59 | spin_lock(&mp->m_ail_lock); |
61 | lip = xfs_ail_min(&(mp->m_ail)); | 60 | lip = xfs_ail_min(&(mp->m_ail.xa_ail)); |
62 | if (lip == NULL) { | 61 | if (lip == NULL) { |
63 | lsn = (xfs_lsn_t)0; | 62 | lsn = (xfs_lsn_t)0; |
64 | } else { | 63 | } else { |
65 | lsn = lip->li_lsn; | 64 | lsn = lip->li_lsn; |
66 | } | 65 | } |
67 | AIL_UNLOCK(mp, s); | 66 | spin_unlock(&mp->m_ail_lock); |
68 | 67 | ||
69 | return lsn; | 68 | return lsn; |
70 | } | 69 | } |
@@ -72,120 +71,185 @@ xfs_trans_tail_ail( | |||
72 | /* | 71 | /* |
73 | * xfs_trans_push_ail | 72 | * xfs_trans_push_ail |
74 | * | 73 | * |
75 | * This routine is called to move the tail of the AIL | 74 | * This routine is called to move the tail of the AIL forward. It does this by |
76 | * forward. It does this by trying to flush items in the AIL | 75 | * trying to flush items in the AIL whose lsns are below the given |
77 | * whose lsns are below the given threshold_lsn. | 76 | * threshold_lsn. |
78 | * | 77 | * |
79 | * The routine returns the lsn of the tail of the log. | 78 | * the push is run asynchronously in a separate thread, so we return the tail |
79 | * of the log right now instead of the tail after the push. This means we will | ||
80 | * either continue right away, or we will sleep waiting on the async thread to | ||
81 | * do it's work. | ||
82 | * | ||
83 | * We do this unlocked - we only need to know whether there is anything in the | ||
84 | * AIL at the time we are called. We don't need to access the contents of | ||
85 | * any of the objects, so the lock is not needed. | ||
80 | */ | 86 | */ |
81 | xfs_lsn_t | 87 | void |
82 | xfs_trans_push_ail( | 88 | xfs_trans_push_ail( |
83 | xfs_mount_t *mp, | 89 | xfs_mount_t *mp, |
84 | xfs_lsn_t threshold_lsn) | 90 | xfs_lsn_t threshold_lsn) |
85 | { | 91 | { |
86 | xfs_lsn_t lsn; | ||
87 | xfs_log_item_t *lip; | 92 | xfs_log_item_t *lip; |
88 | int gen; | ||
89 | int restarts; | ||
90 | int lock_result; | ||
91 | int flush_log; | ||
92 | SPLDECL(s); | ||
93 | 93 | ||
94 | #define XFS_TRANS_PUSH_AIL_RESTARTS 1000 | 94 | lip = xfs_ail_min(&mp->m_ail.xa_ail); |
95 | if (lip && !XFS_FORCED_SHUTDOWN(mp)) { | ||
96 | if (XFS_LSN_CMP(threshold_lsn, mp->m_ail.xa_target) > 0) | ||
97 | xfsaild_wakeup(mp, threshold_lsn); | ||
98 | } | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | * Return the item in the AIL with the current lsn. | ||
103 | * Return the current tree generation number for use | ||
104 | * in calls to xfs_trans_next_ail(). | ||
105 | */ | ||
106 | STATIC xfs_log_item_t * | ||
107 | xfs_trans_first_push_ail( | ||
108 | xfs_mount_t *mp, | ||
109 | int *gen, | ||
110 | xfs_lsn_t lsn) | ||
111 | { | ||
112 | xfs_log_item_t *lip; | ||
113 | |||
114 | lip = xfs_ail_min(&(mp->m_ail.xa_ail)); | ||
115 | *gen = (int)mp->m_ail.xa_gen; | ||
116 | if (lsn == 0) | ||
117 | return lip; | ||
118 | |||
119 | while (lip && (XFS_LSN_CMP(lip->li_lsn, lsn) < 0)) | ||
120 | lip = lip->li_ail.ail_forw; | ||
95 | 121 | ||
96 | AIL_LOCK(mp,s); | 122 | return lip; |
97 | lip = xfs_trans_first_ail(mp, &gen); | 123 | } |
98 | if (lip == NULL || XFS_FORCED_SHUTDOWN(mp)) { | 124 | |
125 | /* | ||
126 | * Function that does the work of pushing on the AIL | ||
127 | */ | ||
128 | long | ||
129 | xfsaild_push( | ||
130 | xfs_mount_t *mp, | ||
131 | xfs_lsn_t *last_lsn) | ||
132 | { | ||
133 | long tout = 1000; /* milliseconds */ | ||
134 | xfs_lsn_t last_pushed_lsn = *last_lsn; | ||
135 | xfs_lsn_t target = mp->m_ail.xa_target; | ||
136 | xfs_lsn_t lsn; | ||
137 | xfs_log_item_t *lip; | ||
138 | int gen; | ||
139 | int restarts; | ||
140 | int flush_log, count, stuck; | ||
141 | |||
142 | #define XFS_TRANS_PUSH_AIL_RESTARTS 10 | ||
143 | |||
144 | spin_lock(&mp->m_ail_lock); | ||
145 | lip = xfs_trans_first_push_ail(mp, &gen, *last_lsn); | ||
146 | if (!lip || XFS_FORCED_SHUTDOWN(mp)) { | ||
99 | /* | 147 | /* |
100 | * Just return if the AIL is empty. | 148 | * AIL is empty or our push has reached the end. |
101 | */ | 149 | */ |
102 | AIL_UNLOCK(mp, s); | 150 | spin_unlock(&mp->m_ail_lock); |
103 | return (xfs_lsn_t)0; | 151 | last_pushed_lsn = 0; |
152 | goto out; | ||
104 | } | 153 | } |
105 | 154 | ||
106 | XFS_STATS_INC(xs_push_ail); | 155 | XFS_STATS_INC(xs_push_ail); |
107 | 156 | ||
108 | /* | 157 | /* |
109 | * While the item we are looking at is below the given threshold | 158 | * While the item we are looking at is below the given threshold |
110 | * try to flush it out. Make sure to limit the number of times | 159 | * try to flush it out. We'd like not to stop until we've at least |
111 | * we allow xfs_trans_next_ail() to restart scanning from the | ||
112 | * beginning of the list. We'd like not to stop until we've at least | ||
113 | * tried to push on everything in the AIL with an LSN less than | 160 | * tried to push on everything in the AIL with an LSN less than |
114 | * the given threshold. However, we may give up before that if | 161 | * the given threshold. |
115 | * we realize that we've been holding the AIL_LOCK for 'too long', | 162 | * |
116 | * blocking interrupts. Currently, too long is < 500us roughly. | 163 | * However, we will stop after a certain number of pushes and wait |
164 | * for a reduced timeout to fire before pushing further. This | ||
165 | * prevents use from spinning when we can't do anything or there is | ||
166 | * lots of contention on the AIL lists. | ||
117 | */ | 167 | */ |
118 | flush_log = 0; | 168 | tout = 10; |
119 | restarts = 0; | 169 | lsn = lip->li_lsn; |
120 | while (((restarts < XFS_TRANS_PUSH_AIL_RESTARTS) && | 170 | flush_log = stuck = count = restarts = 0; |
121 | (XFS_LSN_CMP(lip->li_lsn, threshold_lsn) < 0))) { | 171 | while ((XFS_LSN_CMP(lip->li_lsn, target) < 0)) { |
172 | int lock_result; | ||
122 | /* | 173 | /* |
123 | * If we can lock the item without sleeping, unlock | 174 | * If we can lock the item without sleeping, unlock the AIL |
124 | * the AIL lock and flush the item. Then re-grab the | 175 | * lock and flush the item. Then re-grab the AIL lock so we |
125 | * AIL lock so we can look for the next item on the | 176 | * can look for the next item on the AIL. List changes are |
126 | * AIL. Since we unlock the AIL while we flush the | 177 | * handled by the AIL lookup functions internally |
127 | * item, the next routine may start over again at the | ||
128 | * the beginning of the list if anything has changed. | ||
129 | * That is what the generation count is for. | ||
130 | * | 178 | * |
131 | * If we can't lock the item, either its holder will flush | 179 | * If we can't lock the item, either its holder will flush it |
132 | * it or it is already being flushed or it is being relogged. | 180 | * or it is already being flushed or it is being relogged. In |
133 | * In any of these case it is being taken care of and we | 181 | * any of these case it is being taken care of and we can just |
134 | * can just skip to the next item in the list. | 182 | * skip to the next item in the list. |
135 | */ | 183 | */ |
136 | lock_result = IOP_TRYLOCK(lip); | 184 | lock_result = IOP_TRYLOCK(lip); |
185 | spin_unlock(&mp->m_ail_lock); | ||
137 | switch (lock_result) { | 186 | switch (lock_result) { |
138 | case XFS_ITEM_SUCCESS: | 187 | case XFS_ITEM_SUCCESS: |
139 | AIL_UNLOCK(mp, s); | ||
140 | XFS_STATS_INC(xs_push_ail_success); | 188 | XFS_STATS_INC(xs_push_ail_success); |
141 | IOP_PUSH(lip); | 189 | IOP_PUSH(lip); |
142 | AIL_LOCK(mp,s); | 190 | last_pushed_lsn = lsn; |
143 | break; | 191 | break; |
144 | 192 | ||
145 | case XFS_ITEM_PUSHBUF: | 193 | case XFS_ITEM_PUSHBUF: |
146 | AIL_UNLOCK(mp, s); | ||
147 | XFS_STATS_INC(xs_push_ail_pushbuf); | 194 | XFS_STATS_INC(xs_push_ail_pushbuf); |
148 | #ifdef XFSRACEDEBUG | ||
149 | delay_for_intr(); | ||
150 | delay(300); | ||
151 | #endif | ||
152 | ASSERT(lip->li_ops->iop_pushbuf); | ||
153 | ASSERT(lip); | ||
154 | IOP_PUSHBUF(lip); | 195 | IOP_PUSHBUF(lip); |
155 | AIL_LOCK(mp,s); | 196 | last_pushed_lsn = lsn; |
156 | break; | 197 | break; |
157 | 198 | ||
158 | case XFS_ITEM_PINNED: | 199 | case XFS_ITEM_PINNED: |
159 | XFS_STATS_INC(xs_push_ail_pinned); | 200 | XFS_STATS_INC(xs_push_ail_pinned); |
201 | stuck++; | ||
160 | flush_log = 1; | 202 | flush_log = 1; |
161 | break; | 203 | break; |
162 | 204 | ||
163 | case XFS_ITEM_LOCKED: | 205 | case XFS_ITEM_LOCKED: |
164 | XFS_STATS_INC(xs_push_ail_locked); | 206 | XFS_STATS_INC(xs_push_ail_locked); |
207 | last_pushed_lsn = lsn; | ||
208 | stuck++; | ||
165 | break; | 209 | break; |
166 | 210 | ||
167 | case XFS_ITEM_FLUSHING: | 211 | case XFS_ITEM_FLUSHING: |
168 | XFS_STATS_INC(xs_push_ail_flushing); | 212 | XFS_STATS_INC(xs_push_ail_flushing); |
213 | last_pushed_lsn = lsn; | ||
214 | stuck++; | ||
169 | break; | 215 | break; |
170 | 216 | ||
171 | default: | 217 | default: |
172 | ASSERT(0); | 218 | ASSERT(0); |
173 | break; | 219 | break; |
174 | } | 220 | } |
175 | 221 | ||
176 | lip = xfs_trans_next_ail(mp, lip, &gen, &restarts); | 222 | spin_lock(&mp->m_ail_lock); |
177 | if (lip == NULL) { | 223 | /* should we bother continuing? */ |
224 | if (XFS_FORCED_SHUTDOWN(mp)) | ||
225 | break; | ||
226 | ASSERT(mp->m_log); | ||
227 | |||
228 | count++; | ||
229 | |||
230 | /* | ||
231 | * Are there too many items we can't do anything with? | ||
232 | * If we we are skipping too many items because we can't flush | ||
233 | * them or they are already being flushed, we back off and | ||
234 | * given them time to complete whatever operation is being | ||
235 | * done. i.e. remove pressure from the AIL while we can't make | ||
236 | * progress so traversals don't slow down further inserts and | ||
237 | * removals to/from the AIL. | ||
238 | * | ||
239 | * The value of 100 is an arbitrary magic number based on | ||
240 | * observation. | ||
241 | */ | ||
242 | if (stuck > 100) | ||
178 | break; | 243 | break; |
179 | } | ||
180 | if (XFS_FORCED_SHUTDOWN(mp)) { | ||
181 | /* | ||
182 | * Just return if we shut down during the last try. | ||
183 | */ | ||
184 | AIL_UNLOCK(mp, s); | ||
185 | return (xfs_lsn_t)0; | ||
186 | } | ||
187 | 244 | ||
245 | lip = xfs_trans_next_ail(mp, lip, &gen, &restarts); | ||
246 | if (lip == NULL) | ||
247 | break; | ||
248 | if (restarts > XFS_TRANS_PUSH_AIL_RESTARTS) | ||
249 | break; | ||
250 | lsn = lip->li_lsn; | ||
188 | } | 251 | } |
252 | spin_unlock(&mp->m_ail_lock); | ||
189 | 253 | ||
190 | if (flush_log) { | 254 | if (flush_log) { |
191 | /* | 255 | /* |
@@ -193,22 +257,35 @@ xfs_trans_push_ail( | |||
193 | * push out the log so it will become unpinned and | 257 | * push out the log so it will become unpinned and |
194 | * move forward in the AIL. | 258 | * move forward in the AIL. |
195 | */ | 259 | */ |
196 | AIL_UNLOCK(mp, s); | ||
197 | XFS_STATS_INC(xs_push_ail_flush); | 260 | XFS_STATS_INC(xs_push_ail_flush); |
198 | xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); | 261 | xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); |
199 | AIL_LOCK(mp, s); | ||
200 | } | 262 | } |
201 | 263 | ||
202 | lip = xfs_ail_min(&(mp->m_ail)); | 264 | /* |
203 | if (lip == NULL) { | 265 | * We reached the target so wait a bit longer for I/O to complete and |
204 | lsn = (xfs_lsn_t)0; | 266 | * remove pushed items from the AIL before we start the next scan from |
205 | } else { | 267 | * the start of the AIL. |
206 | lsn = lip->li_lsn; | 268 | */ |
269 | if ((XFS_LSN_CMP(lsn, target) >= 0)) { | ||
270 | tout += 20; | ||
271 | last_pushed_lsn = 0; | ||
272 | } else if ((restarts > XFS_TRANS_PUSH_AIL_RESTARTS) || | ||
273 | (count && ((stuck * 100) / count > 90))) { | ||
274 | /* | ||
275 | * Either there is a lot of contention on the AIL or we | ||
276 | * are stuck due to operations in progress. "Stuck" in this | ||
277 | * case is defined as >90% of the items we tried to push | ||
278 | * were stuck. | ||
279 | * | ||
280 | * Backoff a bit more to allow some I/O to complete before | ||
281 | * continuing from where we were. | ||
282 | */ | ||
283 | tout += 10; | ||
207 | } | 284 | } |
208 | 285 | out: | |
209 | AIL_UNLOCK(mp, s); | 286 | *last_lsn = last_pushed_lsn; |
210 | return lsn; | 287 | return tout; |
211 | } /* xfs_trans_push_ail */ | 288 | } /* xfsaild_push */ |
212 | 289 | ||
213 | 290 | ||
214 | /* | 291 | /* |
@@ -249,7 +326,7 @@ xfs_trans_unlocked_item( | |||
249 | * the call to xfs_log_move_tail() doesn't do anything if there's | 326 | * the call to xfs_log_move_tail() doesn't do anything if there's |
250 | * not enough free space to wake people up so we're safe calling it. | 327 | * not enough free space to wake people up so we're safe calling it. |
251 | */ | 328 | */ |
252 | min_lip = xfs_ail_min(&mp->m_ail); | 329 | min_lip = xfs_ail_min(&mp->m_ail.xa_ail); |
253 | 330 | ||
254 | if (min_lip == lip) | 331 | if (min_lip == lip) |
255 | xfs_log_move_tail(mp, 1); | 332 | xfs_log_move_tail(mp, 1); |
@@ -269,21 +346,19 @@ xfs_trans_unlocked_item( | |||
269 | * has changed. | 346 | * has changed. |
270 | * | 347 | * |
271 | * This function must be called with the AIL lock held. The lock | 348 | * This function must be called with the AIL lock held. The lock |
272 | * is dropped before returning, so the caller must pass in the | 349 | * is dropped before returning. |
273 | * cookie returned by AIL_LOCK. | ||
274 | */ | 350 | */ |
275 | void | 351 | void |
276 | xfs_trans_update_ail( | 352 | xfs_trans_update_ail( |
277 | xfs_mount_t *mp, | 353 | xfs_mount_t *mp, |
278 | xfs_log_item_t *lip, | 354 | xfs_log_item_t *lip, |
279 | xfs_lsn_t lsn, | 355 | xfs_lsn_t lsn) __releases(mp->m_ail_lock) |
280 | unsigned long s) __releases(mp->m_ail_lock) | ||
281 | { | 356 | { |
282 | xfs_ail_entry_t *ailp; | 357 | xfs_ail_entry_t *ailp; |
283 | xfs_log_item_t *dlip=NULL; | 358 | xfs_log_item_t *dlip=NULL; |
284 | xfs_log_item_t *mlip; /* ptr to minimum lip */ | 359 | xfs_log_item_t *mlip; /* ptr to minimum lip */ |
285 | 360 | ||
286 | ailp = &(mp->m_ail); | 361 | ailp = &(mp->m_ail.xa_ail); |
287 | mlip = xfs_ail_min(ailp); | 362 | mlip = xfs_ail_min(ailp); |
288 | 363 | ||
289 | if (lip->li_flags & XFS_LI_IN_AIL) { | 364 | if (lip->li_flags & XFS_LI_IN_AIL) { |
@@ -296,14 +371,14 @@ xfs_trans_update_ail( | |||
296 | lip->li_lsn = lsn; | 371 | lip->li_lsn = lsn; |
297 | 372 | ||
298 | xfs_ail_insert(ailp, lip); | 373 | xfs_ail_insert(ailp, lip); |
299 | mp->m_ail_gen++; | 374 | mp->m_ail.xa_gen++; |
300 | 375 | ||
301 | if (mlip == dlip) { | 376 | if (mlip == dlip) { |
302 | mlip = xfs_ail_min(&(mp->m_ail)); | 377 | mlip = xfs_ail_min(&(mp->m_ail.xa_ail)); |
303 | AIL_UNLOCK(mp, s); | 378 | spin_unlock(&mp->m_ail_lock); |
304 | xfs_log_move_tail(mp, mlip->li_lsn); | 379 | xfs_log_move_tail(mp, mlip->li_lsn); |
305 | } else { | 380 | } else { |
306 | AIL_UNLOCK(mp, s); | 381 | spin_unlock(&mp->m_ail_lock); |
307 | } | 382 | } |
308 | 383 | ||
309 | 384 | ||
@@ -322,21 +397,19 @@ xfs_trans_update_ail( | |||
322 | * has changed. | 397 | * has changed. |
323 | * | 398 | * |
324 | * This function must be called with the AIL lock held. The lock | 399 | * This function must be called with the AIL lock held. The lock |
325 | * is dropped before returning, so the caller must pass in the | 400 | * is dropped before returning. |
326 | * cookie returned by AIL_LOCK. | ||
327 | */ | 401 | */ |
328 | void | 402 | void |
329 | xfs_trans_delete_ail( | 403 | xfs_trans_delete_ail( |
330 | xfs_mount_t *mp, | 404 | xfs_mount_t *mp, |
331 | xfs_log_item_t *lip, | 405 | xfs_log_item_t *lip) __releases(mp->m_ail_lock) |
332 | unsigned long s) __releases(mp->m_ail_lock) | ||
333 | { | 406 | { |
334 | xfs_ail_entry_t *ailp; | 407 | xfs_ail_entry_t *ailp; |
335 | xfs_log_item_t *dlip; | 408 | xfs_log_item_t *dlip; |
336 | xfs_log_item_t *mlip; | 409 | xfs_log_item_t *mlip; |
337 | 410 | ||
338 | if (lip->li_flags & XFS_LI_IN_AIL) { | 411 | if (lip->li_flags & XFS_LI_IN_AIL) { |
339 | ailp = &(mp->m_ail); | 412 | ailp = &(mp->m_ail.xa_ail); |
340 | mlip = xfs_ail_min(ailp); | 413 | mlip = xfs_ail_min(ailp); |
341 | dlip = xfs_ail_delete(ailp, lip); | 414 | dlip = xfs_ail_delete(ailp, lip); |
342 | ASSERT(dlip == lip); | 415 | ASSERT(dlip == lip); |
@@ -344,14 +417,14 @@ xfs_trans_delete_ail( | |||
344 | 417 | ||
345 | lip->li_flags &= ~XFS_LI_IN_AIL; | 418 | lip->li_flags &= ~XFS_LI_IN_AIL; |
346 | lip->li_lsn = 0; | 419 | lip->li_lsn = 0; |
347 | mp->m_ail_gen++; | 420 | mp->m_ail.xa_gen++; |
348 | 421 | ||
349 | if (mlip == dlip) { | 422 | if (mlip == dlip) { |
350 | mlip = xfs_ail_min(&(mp->m_ail)); | 423 | mlip = xfs_ail_min(&(mp->m_ail.xa_ail)); |
351 | AIL_UNLOCK(mp, s); | 424 | spin_unlock(&mp->m_ail_lock); |
352 | xfs_log_move_tail(mp, (mlip ? mlip->li_lsn : 0)); | 425 | xfs_log_move_tail(mp, (mlip ? mlip->li_lsn : 0)); |
353 | } else { | 426 | } else { |
354 | AIL_UNLOCK(mp, s); | 427 | spin_unlock(&mp->m_ail_lock); |
355 | } | 428 | } |
356 | } | 429 | } |
357 | else { | 430 | else { |
@@ -360,12 +433,12 @@ xfs_trans_delete_ail( | |||
360 | * serious trouble if we get to this stage. | 433 | * serious trouble if we get to this stage. |
361 | */ | 434 | */ |
362 | if (XFS_FORCED_SHUTDOWN(mp)) | 435 | if (XFS_FORCED_SHUTDOWN(mp)) |
363 | AIL_UNLOCK(mp, s); | 436 | spin_unlock(&mp->m_ail_lock); |
364 | else { | 437 | else { |
365 | xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp, | 438 | xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp, |
366 | "%s: attempting to delete a log item that is not in the AIL", | 439 | "%s: attempting to delete a log item that is not in the AIL", |
367 | __FUNCTION__); | 440 | __FUNCTION__); |
368 | AIL_UNLOCK(mp, s); | 441 | spin_unlock(&mp->m_ail_lock); |
369 | xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); | 442 | xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); |
370 | } | 443 | } |
371 | } | 444 | } |
@@ -385,10 +458,10 @@ xfs_trans_first_ail( | |||
385 | { | 458 | { |
386 | xfs_log_item_t *lip; | 459 | xfs_log_item_t *lip; |
387 | 460 | ||
388 | lip = xfs_ail_min(&(mp->m_ail)); | 461 | lip = xfs_ail_min(&(mp->m_ail.xa_ail)); |
389 | *gen = (int)mp->m_ail_gen; | 462 | *gen = (int)mp->m_ail.xa_gen; |
390 | 463 | ||
391 | return (lip); | 464 | return lip; |
392 | } | 465 | } |
393 | 466 | ||
394 | /* | 467 | /* |
@@ -408,11 +481,11 @@ xfs_trans_next_ail( | |||
408 | xfs_log_item_t *nlip; | 481 | xfs_log_item_t *nlip; |
409 | 482 | ||
410 | ASSERT(mp && lip && gen); | 483 | ASSERT(mp && lip && gen); |
411 | if (mp->m_ail_gen == *gen) { | 484 | if (mp->m_ail.xa_gen == *gen) { |
412 | nlip = xfs_ail_next(&(mp->m_ail), lip); | 485 | nlip = xfs_ail_next(&(mp->m_ail.xa_ail), lip); |
413 | } else { | 486 | } else { |
414 | nlip = xfs_ail_min(&(mp->m_ail)); | 487 | nlip = xfs_ail_min(&(mp->m_ail).xa_ail); |
415 | *gen = (int)mp->m_ail_gen; | 488 | *gen = (int)mp->m_ail.xa_gen; |
416 | if (restarts != NULL) { | 489 | if (restarts != NULL) { |
417 | XFS_STATS_INC(xs_push_ail_restarts); | 490 | XFS_STATS_INC(xs_push_ail_restarts); |
418 | (*restarts)++; | 491 | (*restarts)++; |
@@ -437,12 +510,20 @@ xfs_trans_next_ail( | |||
437 | /* | 510 | /* |
438 | * Initialize the doubly linked list to point only to itself. | 511 | * Initialize the doubly linked list to point only to itself. |
439 | */ | 512 | */ |
440 | void | 513 | int |
441 | xfs_trans_ail_init( | 514 | xfs_trans_ail_init( |
442 | xfs_mount_t *mp) | 515 | xfs_mount_t *mp) |
443 | { | 516 | { |
444 | mp->m_ail.ail_forw = (xfs_log_item_t*)&(mp->m_ail); | 517 | mp->m_ail.xa_ail.ail_forw = (xfs_log_item_t*)&mp->m_ail.xa_ail; |
445 | mp->m_ail.ail_back = (xfs_log_item_t*)&(mp->m_ail); | 518 | mp->m_ail.xa_ail.ail_back = (xfs_log_item_t*)&mp->m_ail.xa_ail; |
519 | return xfsaild_start(mp); | ||
520 | } | ||
521 | |||
522 | void | ||
523 | xfs_trans_ail_destroy( | ||
524 | xfs_mount_t *mp) | ||
525 | { | ||
526 | xfsaild_stop(mp); | ||
446 | } | 527 | } |
447 | 528 | ||
448 | /* | 529 | /* |
@@ -482,7 +563,7 @@ xfs_ail_insert( | |||
482 | next_lip->li_ail.ail_forw = lip; | 563 | next_lip->li_ail.ail_forw = lip; |
483 | lip->li_ail.ail_forw->li_ail.ail_back = lip; | 564 | lip->li_ail.ail_forw->li_ail.ail_back = lip; |
484 | 565 | ||
485 | xfs_ail_check(base); | 566 | xfs_ail_check(base, lip); |
486 | return; | 567 | return; |
487 | } | 568 | } |
488 | 569 | ||
@@ -496,12 +577,12 @@ xfs_ail_delete( | |||
496 | xfs_log_item_t *lip) | 577 | xfs_log_item_t *lip) |
497 | /* ARGSUSED */ | 578 | /* ARGSUSED */ |
498 | { | 579 | { |
580 | xfs_ail_check(base, lip); | ||
499 | lip->li_ail.ail_forw->li_ail.ail_back = lip->li_ail.ail_back; | 581 | lip->li_ail.ail_forw->li_ail.ail_back = lip->li_ail.ail_back; |
500 | lip->li_ail.ail_back->li_ail.ail_forw = lip->li_ail.ail_forw; | 582 | lip->li_ail.ail_back->li_ail.ail_forw = lip->li_ail.ail_forw; |
501 | lip->li_ail.ail_forw = NULL; | 583 | lip->li_ail.ail_forw = NULL; |
502 | lip->li_ail.ail_back = NULL; | 584 | lip->li_ail.ail_back = NULL; |
503 | 585 | ||
504 | xfs_ail_check(base); | ||
505 | return lip; | 586 | return lip; |
506 | } | 587 | } |
507 | 588 | ||
@@ -545,13 +626,13 @@ xfs_ail_next( | |||
545 | */ | 626 | */ |
546 | STATIC void | 627 | STATIC void |
547 | xfs_ail_check( | 628 | xfs_ail_check( |
548 | xfs_ail_entry_t *base) | 629 | xfs_ail_entry_t *base, |
630 | xfs_log_item_t *lip) | ||
549 | { | 631 | { |
550 | xfs_log_item_t *lip; | ||
551 | xfs_log_item_t *prev_lip; | 632 | xfs_log_item_t *prev_lip; |
552 | 633 | ||
553 | lip = base->ail_forw; | 634 | prev_lip = base->ail_forw; |
554 | if (lip == (xfs_log_item_t*)base) { | 635 | if (prev_lip == (xfs_log_item_t*)base) { |
555 | /* | 636 | /* |
556 | * Make sure the pointers are correct when the list | 637 | * Make sure the pointers are correct when the list |
557 | * is empty. | 638 | * is empty. |
@@ -561,9 +642,27 @@ xfs_ail_check( | |||
561 | } | 642 | } |
562 | 643 | ||
563 | /* | 644 | /* |
645 | * Check the next and previous entries are valid. | ||
646 | */ | ||
647 | ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0); | ||
648 | prev_lip = lip->li_ail.ail_back; | ||
649 | if (prev_lip != (xfs_log_item_t*)base) { | ||
650 | ASSERT(prev_lip->li_ail.ail_forw == lip); | ||
651 | ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0); | ||
652 | } | ||
653 | prev_lip = lip->li_ail.ail_forw; | ||
654 | if (prev_lip != (xfs_log_item_t*)base) { | ||
655 | ASSERT(prev_lip->li_ail.ail_back == lip); | ||
656 | ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0); | ||
657 | } | ||
658 | |||
659 | |||
660 | #ifdef XFS_TRANS_DEBUG | ||
661 | /* | ||
564 | * Walk the list checking forward and backward pointers, | 662 | * Walk the list checking forward and backward pointers, |
565 | * lsn ordering, and that every entry has the XFS_LI_IN_AIL | 663 | * lsn ordering, and that every entry has the XFS_LI_IN_AIL |
566 | * flag set. | 664 | * flag set. This is really expensive, so only do it when |
665 | * specifically debugging the transaction subsystem. | ||
567 | */ | 666 | */ |
568 | prev_lip = (xfs_log_item_t*)base; | 667 | prev_lip = (xfs_log_item_t*)base; |
569 | while (lip != (xfs_log_item_t*)base) { | 668 | while (lip != (xfs_log_item_t*)base) { |
@@ -578,5 +677,6 @@ xfs_ail_check( | |||
578 | } | 677 | } |
579 | ASSERT(lip == (xfs_log_item_t*)base); | 678 | ASSERT(lip == (xfs_log_item_t*)base); |
580 | ASSERT(base->ail_back == prev_lip); | 679 | ASSERT(base->ail_back == prev_lip); |
680 | #endif /* XFS_TRANS_DEBUG */ | ||
581 | } | 681 | } |
582 | #endif /* DEBUG */ | 682 | #endif /* DEBUG */ |
diff --git a/fs/xfs/xfs_trans_item.c b/fs/xfs/xfs_trans_item.c index 2912aac07c7b..66a09f0d894b 100644 --- a/fs/xfs/xfs_trans_item.c +++ b/fs/xfs/xfs_trans_item.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include "xfs_log.h" | 21 | #include "xfs_log.h" |
22 | #include "xfs_inum.h" | 22 | #include "xfs_inum.h" |
23 | #include "xfs_trans.h" | 23 | #include "xfs_trans.h" |
24 | #include "xfs_trans_priv.h" | ||
24 | 25 | ||
25 | STATIC int xfs_trans_unlock_chunk(xfs_log_item_chunk_t *, | 26 | STATIC int xfs_trans_unlock_chunk(xfs_log_item_chunk_t *, |
26 | int, int, xfs_lsn_t); | 27 | int, int, xfs_lsn_t); |
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h index 447ac4308c91..3c748c456ed4 100644 --- a/fs/xfs/xfs_trans_priv.h +++ b/fs/xfs/xfs_trans_priv.h | |||
@@ -47,15 +47,22 @@ xfs_log_busy_slot_t *xfs_trans_add_busy(xfs_trans_t *tp, | |||
47 | * From xfs_trans_ail.c | 47 | * From xfs_trans_ail.c |
48 | */ | 48 | */ |
49 | void xfs_trans_update_ail(struct xfs_mount *mp, | 49 | void xfs_trans_update_ail(struct xfs_mount *mp, |
50 | struct xfs_log_item *lip, xfs_lsn_t lsn, | 50 | struct xfs_log_item *lip, xfs_lsn_t lsn) |
51 | unsigned long s) | ||
52 | __releases(mp->m_ail_lock); | 51 | __releases(mp->m_ail_lock); |
53 | void xfs_trans_delete_ail(struct xfs_mount *mp, | 52 | void xfs_trans_delete_ail(struct xfs_mount *mp, |
54 | struct xfs_log_item *lip, unsigned long s) | 53 | struct xfs_log_item *lip) |
55 | __releases(mp->m_ail_lock); | 54 | __releases(mp->m_ail_lock); |
56 | struct xfs_log_item *xfs_trans_first_ail(struct xfs_mount *, int *); | 55 | struct xfs_log_item *xfs_trans_first_ail(struct xfs_mount *, int *); |
57 | struct xfs_log_item *xfs_trans_next_ail(struct xfs_mount *, | 56 | struct xfs_log_item *xfs_trans_next_ail(struct xfs_mount *, |
58 | struct xfs_log_item *, int *, int *); | 57 | struct xfs_log_item *, int *, int *); |
59 | 58 | ||
60 | 59 | ||
60 | /* | ||
61 | * AIL push thread support | ||
62 | */ | ||
63 | long xfsaild_push(struct xfs_mount *, xfs_lsn_t *); | ||
64 | void xfsaild_wakeup(struct xfs_mount *, xfs_lsn_t); | ||
65 | int xfsaild_start(struct xfs_mount *); | ||
66 | void xfsaild_stop(struct xfs_mount *); | ||
67 | |||
61 | #endif /* __XFS_TRANS_PRIV_H__ */ | 68 | #endif /* __XFS_TRANS_PRIV_H__ */ |
diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c index 673b405eaa31..45d740df53b7 100644 --- a/fs/xfs/xfs_utils.c +++ b/fs/xfs/xfs_utils.c | |||
@@ -73,7 +73,7 @@ xfs_dir_lookup_int( | |||
73 | { | 73 | { |
74 | int error; | 74 | int error; |
75 | 75 | ||
76 | vn_trace_entry(dp, __FUNCTION__, (inst_t *)__return_address); | 76 | xfs_itrace_entry(dp); |
77 | 77 | ||
78 | error = xfs_dir_lookup(NULL, dp, VNAME(dentry), VNAMELEN(dentry), inum); | 78 | error = xfs_dir_lookup(NULL, dp, VNAME(dentry), VNAMELEN(dentry), inum); |
79 | if (!error) { | 79 | if (!error) { |
@@ -302,6 +302,7 @@ xfs_droplink( | |||
302 | 302 | ||
303 | ASSERT (ip->i_d.di_nlink > 0); | 303 | ASSERT (ip->i_d.di_nlink > 0); |
304 | ip->i_d.di_nlink--; | 304 | ip->i_d.di_nlink--; |
305 | drop_nlink(ip->i_vnode); | ||
305 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 306 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
306 | 307 | ||
307 | error = 0; | 308 | error = 0; |
@@ -330,7 +331,6 @@ xfs_bump_ino_vers2( | |||
330 | xfs_inode_t *ip) | 331 | xfs_inode_t *ip) |
331 | { | 332 | { |
332 | xfs_mount_t *mp; | 333 | xfs_mount_t *mp; |
333 | unsigned long s; | ||
334 | 334 | ||
335 | ASSERT(ismrlocked (&ip->i_lock, MR_UPDATE)); | 335 | ASSERT(ismrlocked (&ip->i_lock, MR_UPDATE)); |
336 | ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1); | 336 | ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1); |
@@ -340,13 +340,13 @@ xfs_bump_ino_vers2( | |||
340 | memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); | 340 | memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); |
341 | mp = tp->t_mountp; | 341 | mp = tp->t_mountp; |
342 | if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) { | 342 | if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) { |
343 | s = XFS_SB_LOCK(mp); | 343 | spin_lock(&mp->m_sb_lock); |
344 | if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) { | 344 | if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) { |
345 | XFS_SB_VERSION_ADDNLINK(&mp->m_sb); | 345 | XFS_SB_VERSION_ADDNLINK(&mp->m_sb); |
346 | XFS_SB_UNLOCK(mp, s); | 346 | spin_unlock(&mp->m_sb_lock); |
347 | xfs_mod_sb(tp, XFS_SB_VERSIONNUM); | 347 | xfs_mod_sb(tp, XFS_SB_VERSIONNUM); |
348 | } else { | 348 | } else { |
349 | XFS_SB_UNLOCK(mp, s); | 349 | spin_unlock(&mp->m_sb_lock); |
350 | } | 350 | } |
351 | } | 351 | } |
352 | /* Caller must log the inode */ | 352 | /* Caller must log the inode */ |
@@ -366,6 +366,7 @@ xfs_bumplink( | |||
366 | 366 | ||
367 | ASSERT(ip->i_d.di_nlink > 0); | 367 | ASSERT(ip->i_d.di_nlink > 0); |
368 | ip->i_d.di_nlink++; | 368 | ip->i_d.di_nlink++; |
369 | inc_nlink(ip->i_vnode); | ||
369 | if ((ip->i_d.di_version == XFS_DINODE_VERSION_1) && | 370 | if ((ip->i_d.di_version == XFS_DINODE_VERSION_1) && |
370 | (ip->i_d.di_nlink > XFS_MAXLINK_1)) { | 371 | (ip->i_d.di_nlink > XFS_MAXLINK_1)) { |
371 | /* | 372 | /* |
diff --git a/fs/xfs/xfs_utils.h b/fs/xfs/xfs_utils.h index a00b26d8840e..f857fcccb723 100644 --- a/fs/xfs/xfs_utils.h +++ b/fs/xfs/xfs_utils.h | |||
@@ -20,8 +20,6 @@ | |||
20 | 20 | ||
21 | #define IRELE(ip) VN_RELE(XFS_ITOV(ip)) | 21 | #define IRELE(ip) VN_RELE(XFS_ITOV(ip)) |
22 | #define IHOLD(ip) VN_HOLD(XFS_ITOV(ip)) | 22 | #define IHOLD(ip) VN_HOLD(XFS_ITOV(ip)) |
23 | #define ITRACE(ip) vn_trace_ref(ip, __FILE__, __LINE__, \ | ||
24 | (inst_t *)__return_address) | ||
25 | 23 | ||
26 | extern int xfs_get_dir_entry (bhv_vname_t *, xfs_inode_t **); | 24 | extern int xfs_get_dir_entry (bhv_vname_t *, xfs_inode_t **); |
27 | extern int xfs_dir_lookup_int (xfs_inode_t *, uint, bhv_vname_t *, xfs_ino_t *, | 25 | extern int xfs_dir_lookup_int (xfs_inode_t *, uint, bhv_vname_t *, xfs_ino_t *, |
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c index a1544597bcd3..413587f02155 100644 --- a/fs/xfs/xfs_vfsops.c +++ b/fs/xfs/xfs_vfsops.c | |||
@@ -58,17 +58,12 @@ | |||
58 | #include "xfs_vfsops.h" | 58 | #include "xfs_vfsops.h" |
59 | 59 | ||
60 | 60 | ||
61 | int | 61 | int __init |
62 | xfs_init(void) | 62 | xfs_init(void) |
63 | { | 63 | { |
64 | extern kmem_zone_t *xfs_bmap_free_item_zone; | ||
65 | extern kmem_zone_t *xfs_btree_cur_zone; | ||
66 | extern kmem_zone_t *xfs_trans_zone; | ||
67 | extern kmem_zone_t *xfs_buf_item_zone; | ||
68 | extern kmem_zone_t *xfs_dabuf_zone; | ||
69 | #ifdef XFS_DABUF_DEBUG | 64 | #ifdef XFS_DABUF_DEBUG |
70 | extern lock_t xfs_dabuf_global_lock; | 65 | extern spinlock_t xfs_dabuf_global_lock; |
71 | spinlock_init(&xfs_dabuf_global_lock, "xfsda"); | 66 | spin_lock_init(&xfs_dabuf_global_lock); |
72 | #endif | 67 | #endif |
73 | 68 | ||
74 | /* | 69 | /* |
@@ -152,18 +147,12 @@ xfs_init(void) | |||
152 | return 0; | 147 | return 0; |
153 | } | 148 | } |
154 | 149 | ||
155 | void | 150 | void __exit |
156 | xfs_cleanup(void) | 151 | xfs_cleanup(void) |
157 | { | 152 | { |
158 | extern kmem_zone_t *xfs_bmap_free_item_zone; | ||
159 | extern kmem_zone_t *xfs_btree_cur_zone; | ||
160 | extern kmem_zone_t *xfs_inode_zone; | 153 | extern kmem_zone_t *xfs_inode_zone; |
161 | extern kmem_zone_t *xfs_trans_zone; | ||
162 | extern kmem_zone_t *xfs_da_state_zone; | ||
163 | extern kmem_zone_t *xfs_dabuf_zone; | ||
164 | extern kmem_zone_t *xfs_efd_zone; | 154 | extern kmem_zone_t *xfs_efd_zone; |
165 | extern kmem_zone_t *xfs_efi_zone; | 155 | extern kmem_zone_t *xfs_efi_zone; |
166 | extern kmem_zone_t *xfs_buf_item_zone; | ||
167 | extern kmem_zone_t *xfs_icluster_zone; | 156 | extern kmem_zone_t *xfs_icluster_zone; |
168 | 157 | ||
169 | xfs_cleanup_procfs(); | 158 | xfs_cleanup_procfs(); |
@@ -449,8 +438,6 @@ xfs_mount( | |||
449 | if (error) | 438 | if (error) |
450 | return error; | 439 | return error; |
451 | 440 | ||
452 | mp->m_io_ops = xfs_iocore_xfs; | ||
453 | |||
454 | if (args->flags & XFSMNT_QUIET) | 441 | if (args->flags & XFSMNT_QUIET) |
455 | flags |= XFS_MFSI_QUIET; | 442 | flags |= XFS_MFSI_QUIET; |
456 | 443 | ||
@@ -544,7 +531,7 @@ xfs_mount( | |||
544 | if ((error = xfs_filestream_mount(mp))) | 531 | if ((error = xfs_filestream_mount(mp))) |
545 | goto error2; | 532 | goto error2; |
546 | 533 | ||
547 | error = XFS_IOINIT(mp, args, flags); | 534 | error = xfs_mountfs(mp, flags); |
548 | if (error) | 535 | if (error) |
549 | goto error2; | 536 | goto error2; |
550 | 537 | ||
@@ -694,7 +681,7 @@ xfs_quiesce_fs( | |||
694 | * care of the metadata. New transactions are already blocked, so we need to | 681 | * care of the metadata. New transactions are already blocked, so we need to |
695 | * wait for any remaining transactions to drain out before proceding. | 682 | * wait for any remaining transactions to drain out before proceding. |
696 | */ | 683 | */ |
697 | STATIC void | 684 | void |
698 | xfs_attr_quiesce( | 685 | xfs_attr_quiesce( |
699 | xfs_mount_t *mp) | 686 | xfs_mount_t *mp) |
700 | { | 687 | { |
@@ -821,80 +808,6 @@ fscorrupt_out2: | |||
821 | } | 808 | } |
822 | 809 | ||
823 | /* | 810 | /* |
824 | * xfs_root extracts the root vnode from a vfs. | ||
825 | * | ||
826 | * vfsp -- the vfs struct for the desired file system | ||
827 | * vpp -- address of the caller's vnode pointer which should be | ||
828 | * set to the desired fs root vnode | ||
829 | */ | ||
830 | int | ||
831 | xfs_root( | ||
832 | xfs_mount_t *mp, | ||
833 | bhv_vnode_t **vpp) | ||
834 | { | ||
835 | bhv_vnode_t *vp; | ||
836 | |||
837 | vp = XFS_ITOV(mp->m_rootip); | ||
838 | VN_HOLD(vp); | ||
839 | *vpp = vp; | ||
840 | return 0; | ||
841 | } | ||
842 | |||
843 | /* | ||
844 | * xfs_statvfs | ||
845 | * | ||
846 | * Fill in the statvfs structure for the given file system. We use | ||
847 | * the superblock lock in the mount structure to ensure a consistent | ||
848 | * snapshot of the counters returned. | ||
849 | */ | ||
850 | int | ||
851 | xfs_statvfs( | ||
852 | xfs_mount_t *mp, | ||
853 | bhv_statvfs_t *statp, | ||
854 | bhv_vnode_t *vp) | ||
855 | { | ||
856 | __uint64_t fakeinos; | ||
857 | xfs_extlen_t lsize; | ||
858 | xfs_sb_t *sbp; | ||
859 | unsigned long s; | ||
860 | |||
861 | sbp = &(mp->m_sb); | ||
862 | |||
863 | statp->f_type = XFS_SB_MAGIC; | ||
864 | |||
865 | xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT); | ||
866 | s = XFS_SB_LOCK(mp); | ||
867 | statp->f_bsize = sbp->sb_blocksize; | ||
868 | lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; | ||
869 | statp->f_blocks = sbp->sb_dblocks - lsize; | ||
870 | statp->f_bfree = statp->f_bavail = | ||
871 | sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); | ||
872 | fakeinos = statp->f_bfree << sbp->sb_inopblog; | ||
873 | #if XFS_BIG_INUMS | ||
874 | fakeinos += mp->m_inoadd; | ||
875 | #endif | ||
876 | statp->f_files = | ||
877 | MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER); | ||
878 | if (mp->m_maxicount) | ||
879 | #if XFS_BIG_INUMS | ||
880 | if (!mp->m_inoadd) | ||
881 | #endif | ||
882 | statp->f_files = min_t(typeof(statp->f_files), | ||
883 | statp->f_files, | ||
884 | mp->m_maxicount); | ||
885 | statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree); | ||
886 | XFS_SB_UNLOCK(mp, s); | ||
887 | |||
888 | xfs_statvfs_fsid(statp, mp); | ||
889 | statp->f_namelen = MAXNAMELEN - 1; | ||
890 | |||
891 | if (vp) | ||
892 | XFS_QM_DQSTATVFS(xfs_vtoi(vp), statp); | ||
893 | return 0; | ||
894 | } | ||
895 | |||
896 | |||
897 | /* | ||
898 | * xfs_sync flushes any pending I/O to file system vfsp. | 811 | * xfs_sync flushes any pending I/O to file system vfsp. |
899 | * | 812 | * |
900 | * This routine is called by vfs_sync() to make sure that things make it | 813 | * This routine is called by vfs_sync() to make sure that things make it |
@@ -981,8 +894,6 @@ xfs_sync_inodes( | |||
981 | int *bypassed) | 894 | int *bypassed) |
982 | { | 895 | { |
983 | xfs_inode_t *ip = NULL; | 896 | xfs_inode_t *ip = NULL; |
984 | xfs_inode_t *ip_next; | ||
985 | xfs_buf_t *bp; | ||
986 | bhv_vnode_t *vp = NULL; | 897 | bhv_vnode_t *vp = NULL; |
987 | int error; | 898 | int error; |
988 | int last_error; | 899 | int last_error; |
@@ -992,7 +903,6 @@ xfs_sync_inodes( | |||
992 | boolean_t mount_locked; | 903 | boolean_t mount_locked; |
993 | boolean_t vnode_refed; | 904 | boolean_t vnode_refed; |
994 | int preempt; | 905 | int preempt; |
995 | xfs_dinode_t *dip; | ||
996 | xfs_iptr_t *ipointer; | 906 | xfs_iptr_t *ipointer; |
997 | #ifdef DEBUG | 907 | #ifdef DEBUG |
998 | boolean_t ipointer_in = B_FALSE; | 908 | boolean_t ipointer_in = B_FALSE; |
@@ -1045,6 +955,8 @@ xfs_sync_inodes( | |||
1045 | 955 | ||
1046 | #define XFS_PREEMPT_MASK 0x7f | 956 | #define XFS_PREEMPT_MASK 0x7f |
1047 | 957 | ||
958 | ASSERT(!(flags & SYNC_BDFLUSH)); | ||
959 | |||
1048 | if (bypassed) | 960 | if (bypassed) |
1049 | *bypassed = 0; | 961 | *bypassed = 0; |
1050 | if (mp->m_flags & XFS_MOUNT_RDONLY) | 962 | if (mp->m_flags & XFS_MOUNT_RDONLY) |
@@ -1057,7 +969,7 @@ xfs_sync_inodes( | |||
1057 | ipointer = (xfs_iptr_t *)kmem_zalloc(sizeof(xfs_iptr_t), KM_SLEEP); | 969 | ipointer = (xfs_iptr_t *)kmem_zalloc(sizeof(xfs_iptr_t), KM_SLEEP); |
1058 | 970 | ||
1059 | fflag = XFS_B_ASYNC; /* default is don't wait */ | 971 | fflag = XFS_B_ASYNC; /* default is don't wait */ |
1060 | if (flags & (SYNC_BDFLUSH | SYNC_DELWRI)) | 972 | if (flags & SYNC_DELWRI) |
1061 | fflag = XFS_B_DELWRI; | 973 | fflag = XFS_B_DELWRI; |
1062 | if (flags & SYNC_WAIT) | 974 | if (flags & SYNC_WAIT) |
1063 | fflag = 0; /* synchronous overrides all */ | 975 | fflag = 0; /* synchronous overrides all */ |
@@ -1147,24 +1059,6 @@ xfs_sync_inodes( | |||
1147 | } | 1059 | } |
1148 | 1060 | ||
1149 | /* | 1061 | /* |
1150 | * If this is just vfs_sync() or pflushd() calling | ||
1151 | * then we can skip inodes for which it looks like | ||
1152 | * there is nothing to do. Since we don't have the | ||
1153 | * inode locked this is racy, but these are periodic | ||
1154 | * calls so it doesn't matter. For the others we want | ||
1155 | * to know for sure, so we at least try to lock them. | ||
1156 | */ | ||
1157 | if (flags & SYNC_BDFLUSH) { | ||
1158 | if (((ip->i_itemp == NULL) || | ||
1159 | !(ip->i_itemp->ili_format.ilf_fields & | ||
1160 | XFS_ILOG_ALL)) && | ||
1161 | (ip->i_update_core == 0)) { | ||
1162 | ip = ip->i_mnext; | ||
1163 | continue; | ||
1164 | } | ||
1165 | } | ||
1166 | |||
1167 | /* | ||
1168 | * Try to lock without sleeping. We're out of order with | 1062 | * Try to lock without sleeping. We're out of order with |
1169 | * the inode list lock here, so if we fail we need to drop | 1063 | * the inode list lock here, so if we fail we need to drop |
1170 | * the mount lock and try again. If we're called from | 1064 | * the mount lock and try again. If we're called from |
@@ -1181,7 +1075,7 @@ xfs_sync_inodes( | |||
1181 | * it. | 1075 | * it. |
1182 | */ | 1076 | */ |
1183 | if (xfs_ilock_nowait(ip, lock_flags) == 0) { | 1077 | if (xfs_ilock_nowait(ip, lock_flags) == 0) { |
1184 | if ((flags & SYNC_BDFLUSH) || (vp == NULL)) { | 1078 | if (vp == NULL) { |
1185 | ip = ip->i_mnext; | 1079 | ip = ip->i_mnext; |
1186 | continue; | 1080 | continue; |
1187 | } | 1081 | } |
@@ -1242,160 +1136,27 @@ xfs_sync_inodes( | |||
1242 | xfs_ilock(ip, XFS_ILOCK_SHARED); | 1136 | xfs_ilock(ip, XFS_ILOCK_SHARED); |
1243 | } | 1137 | } |
1244 | 1138 | ||
1245 | if (flags & SYNC_BDFLUSH) { | 1139 | if ((flags & SYNC_ATTR) && |
1246 | if ((flags & SYNC_ATTR) && | 1140 | (ip->i_update_core || |
1247 | ((ip->i_update_core) || | 1141 | (ip->i_itemp && ip->i_itemp->ili_format.ilf_fields))) { |
1248 | ((ip->i_itemp != NULL) && | 1142 | if (mount_locked) |
1249 | (ip->i_itemp->ili_format.ilf_fields != 0)))) { | 1143 | IPOINTER_INSERT(ip, mp); |
1250 | |||
1251 | /* Insert marker and drop lock if not already | ||
1252 | * done. | ||
1253 | */ | ||
1254 | if (mount_locked) { | ||
1255 | IPOINTER_INSERT(ip, mp); | ||
1256 | } | ||
1257 | |||
1258 | /* | ||
1259 | * We don't want the periodic flushing of the | ||
1260 | * inodes by vfs_sync() to interfere with | ||
1261 | * I/O to the file, especially read I/O | ||
1262 | * where it is only the access time stamp | ||
1263 | * that is being flushed out. To prevent | ||
1264 | * long periods where we have both inode | ||
1265 | * locks held shared here while reading the | ||
1266 | * inode's buffer in from disk, we drop the | ||
1267 | * inode lock while reading in the inode | ||
1268 | * buffer. We have to release the buffer | ||
1269 | * and reacquire the inode lock so that they | ||
1270 | * are acquired in the proper order (inode | ||
1271 | * locks first). The buffer will go at the | ||
1272 | * end of the lru chain, though, so we can | ||
1273 | * expect it to still be there when we go | ||
1274 | * for it again in xfs_iflush(). | ||
1275 | */ | ||
1276 | if ((xfs_ipincount(ip) == 0) && | ||
1277 | xfs_iflock_nowait(ip)) { | ||
1278 | |||
1279 | xfs_ifunlock(ip); | ||
1280 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | ||
1281 | |||
1282 | error = xfs_itobp(mp, NULL, ip, | ||
1283 | &dip, &bp, 0, 0); | ||
1284 | if (!error) { | ||
1285 | xfs_buf_relse(bp); | ||
1286 | } else { | ||
1287 | /* Bailing out, remove the | ||
1288 | * marker and free it. | ||
1289 | */ | ||
1290 | XFS_MOUNT_ILOCK(mp); | ||
1291 | IPOINTER_REMOVE(ip, mp); | ||
1292 | XFS_MOUNT_IUNLOCK(mp); | ||
1293 | |||
1294 | ASSERT(!(lock_flags & | ||
1295 | XFS_IOLOCK_SHARED)); | ||
1296 | |||
1297 | kmem_free(ipointer, | ||
1298 | sizeof(xfs_iptr_t)); | ||
1299 | return (0); | ||
1300 | } | ||
1301 | |||
1302 | /* | ||
1303 | * Since we dropped the inode lock, | ||
1304 | * the inode may have been reclaimed. | ||
1305 | * Therefore, we reacquire the mount | ||
1306 | * lock and check to see if we were the | ||
1307 | * inode reclaimed. If this happened | ||
1308 | * then the ipointer marker will no | ||
1309 | * longer point back at us. In this | ||
1310 | * case, move ip along to the inode | ||
1311 | * after the marker, remove the marker | ||
1312 | * and continue. | ||
1313 | */ | ||
1314 | XFS_MOUNT_ILOCK(mp); | ||
1315 | mount_locked = B_TRUE; | ||
1316 | |||
1317 | if (ip != ipointer->ip_mprev) { | ||
1318 | IPOINTER_REMOVE(ip, mp); | ||
1319 | |||
1320 | ASSERT(!vnode_refed); | ||
1321 | ASSERT(!(lock_flags & | ||
1322 | XFS_IOLOCK_SHARED)); | ||
1323 | continue; | ||
1324 | } | ||
1325 | |||
1326 | ASSERT(ip->i_mount == mp); | ||
1327 | |||
1328 | if (xfs_ilock_nowait(ip, | ||
1329 | XFS_ILOCK_SHARED) == 0) { | ||
1330 | ASSERT(ip->i_mount == mp); | ||
1331 | /* | ||
1332 | * We failed to reacquire | ||
1333 | * the inode lock without | ||
1334 | * sleeping, so just skip | ||
1335 | * the inode for now. We | ||
1336 | * clear the ILOCK bit from | ||
1337 | * the lock_flags so that we | ||
1338 | * won't try to drop a lock | ||
1339 | * we don't hold below. | ||
1340 | */ | ||
1341 | lock_flags &= ~XFS_ILOCK_SHARED; | ||
1342 | IPOINTER_REMOVE(ip_next, mp); | ||
1343 | } else if ((xfs_ipincount(ip) == 0) && | ||
1344 | xfs_iflock_nowait(ip)) { | ||
1345 | ASSERT(ip->i_mount == mp); | ||
1346 | /* | ||
1347 | * Since this is vfs_sync() | ||
1348 | * calling we only flush the | ||
1349 | * inode out if we can lock | ||
1350 | * it without sleeping and | ||
1351 | * it is not pinned. Drop | ||
1352 | * the mount lock here so | ||
1353 | * that we don't hold it for | ||
1354 | * too long. We already have | ||
1355 | * a marker in the list here. | ||
1356 | */ | ||
1357 | XFS_MOUNT_IUNLOCK(mp); | ||
1358 | mount_locked = B_FALSE; | ||
1359 | error = xfs_iflush(ip, | ||
1360 | XFS_IFLUSH_DELWRI); | ||
1361 | } else { | ||
1362 | ASSERT(ip->i_mount == mp); | ||
1363 | IPOINTER_REMOVE(ip_next, mp); | ||
1364 | } | ||
1365 | } | ||
1366 | |||
1367 | } | ||
1368 | 1144 | ||
1369 | } else { | 1145 | if (flags & SYNC_WAIT) { |
1370 | if ((flags & SYNC_ATTR) && | 1146 | xfs_iflock(ip); |
1371 | ((ip->i_update_core) || | 1147 | error = xfs_iflush(ip, XFS_IFLUSH_SYNC); |
1372 | ((ip->i_itemp != NULL) && | ||
1373 | (ip->i_itemp->ili_format.ilf_fields != 0)))) { | ||
1374 | if (mount_locked) { | ||
1375 | IPOINTER_INSERT(ip, mp); | ||
1376 | } | ||
1377 | 1148 | ||
1378 | if (flags & SYNC_WAIT) { | 1149 | /* |
1379 | xfs_iflock(ip); | 1150 | * If we can't acquire the flush lock, then the inode |
1380 | error = xfs_iflush(ip, | 1151 | * is already being flushed so don't bother waiting. |
1381 | XFS_IFLUSH_SYNC); | 1152 | * |
1382 | } else { | 1153 | * If we can lock it then do a delwri flush so we can |
1383 | /* | 1154 | * combine multiple inode flushes in each disk write. |
1384 | * If we can't acquire the flush | 1155 | */ |
1385 | * lock, then the inode is already | 1156 | } else if (xfs_iflock_nowait(ip)) { |
1386 | * being flushed so don't bother | 1157 | error = xfs_iflush(ip, XFS_IFLUSH_DELWRI); |
1387 | * waiting. If we can lock it then | 1158 | } else if (bypassed) { |
1388 | * do a delwri flush so we can | 1159 | (*bypassed)++; |
1389 | * combine multiple inode flushes | ||
1390 | * in each disk write. | ||
1391 | */ | ||
1392 | if (xfs_iflock_nowait(ip)) { | ||
1393 | error = xfs_iflush(ip, | ||
1394 | XFS_IFLUSH_DELWRI); | ||
1395 | } | ||
1396 | else if (bypassed) | ||
1397 | (*bypassed)++; | ||
1398 | } | ||
1399 | } | 1160 | } |
1400 | } | 1161 | } |
1401 | 1162 | ||
@@ -1627,499 +1388,3 @@ xfs_syncsub( | |||
1627 | 1388 | ||
1628 | return XFS_ERROR(last_error); | 1389 | return XFS_ERROR(last_error); |
1629 | } | 1390 | } |
1630 | |||
1631 | /* | ||
1632 | * xfs_vget - called by DMAPI and NFSD to get vnode from file handle | ||
1633 | */ | ||
1634 | int | ||
1635 | xfs_vget( | ||
1636 | xfs_mount_t *mp, | ||
1637 | bhv_vnode_t **vpp, | ||
1638 | xfs_fid_t *xfid) | ||
1639 | { | ||
1640 | xfs_inode_t *ip; | ||
1641 | int error; | ||
1642 | xfs_ino_t ino; | ||
1643 | unsigned int igen; | ||
1644 | |||
1645 | /* | ||
1646 | * Invalid. Since handles can be created in user space and passed in | ||
1647 | * via gethandle(), this is not cause for a panic. | ||
1648 | */ | ||
1649 | if (xfid->fid_len != sizeof(*xfid) - sizeof(xfid->fid_len)) | ||
1650 | return XFS_ERROR(EINVAL); | ||
1651 | |||
1652 | ino = xfid->fid_ino; | ||
1653 | igen = xfid->fid_gen; | ||
1654 | |||
1655 | /* | ||
1656 | * NFS can sometimes send requests for ino 0. Fail them gracefully. | ||
1657 | */ | ||
1658 | if (ino == 0) | ||
1659 | return XFS_ERROR(ESTALE); | ||
1660 | |||
1661 | error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0); | ||
1662 | if (error) { | ||
1663 | *vpp = NULL; | ||
1664 | return error; | ||
1665 | } | ||
1666 | |||
1667 | if (ip == NULL) { | ||
1668 | *vpp = NULL; | ||
1669 | return XFS_ERROR(EIO); | ||
1670 | } | ||
1671 | |||
1672 | if (ip->i_d.di_mode == 0 || ip->i_d.di_gen != igen) { | ||
1673 | xfs_iput_new(ip, XFS_ILOCK_SHARED); | ||
1674 | *vpp = NULL; | ||
1675 | return XFS_ERROR(ENOENT); | ||
1676 | } | ||
1677 | |||
1678 | *vpp = XFS_ITOV(ip); | ||
1679 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | ||
1680 | return 0; | ||
1681 | } | ||
1682 | |||
1683 | |||
1684 | #define MNTOPT_LOGBUFS "logbufs" /* number of XFS log buffers */ | ||
1685 | #define MNTOPT_LOGBSIZE "logbsize" /* size of XFS log buffers */ | ||
1686 | #define MNTOPT_LOGDEV "logdev" /* log device */ | ||
1687 | #define MNTOPT_RTDEV "rtdev" /* realtime I/O device */ | ||
1688 | #define MNTOPT_BIOSIZE "biosize" /* log2 of preferred buffered io size */ | ||
1689 | #define MNTOPT_WSYNC "wsync" /* safe-mode nfs compatible mount */ | ||
1690 | #define MNTOPT_INO64 "ino64" /* force inodes into 64-bit range */ | ||
1691 | #define MNTOPT_NOALIGN "noalign" /* turn off stripe alignment */ | ||
1692 | #define MNTOPT_SWALLOC "swalloc" /* turn on stripe width allocation */ | ||
1693 | #define MNTOPT_SUNIT "sunit" /* data volume stripe unit */ | ||
1694 | #define MNTOPT_SWIDTH "swidth" /* data volume stripe width */ | ||
1695 | #define MNTOPT_NOUUID "nouuid" /* ignore filesystem UUID */ | ||
1696 | #define MNTOPT_MTPT "mtpt" /* filesystem mount point */ | ||
1697 | #define MNTOPT_GRPID "grpid" /* group-ID from parent directory */ | ||
1698 | #define MNTOPT_NOGRPID "nogrpid" /* group-ID from current process */ | ||
1699 | #define MNTOPT_BSDGROUPS "bsdgroups" /* group-ID from parent directory */ | ||
1700 | #define MNTOPT_SYSVGROUPS "sysvgroups" /* group-ID from current process */ | ||
1701 | #define MNTOPT_ALLOCSIZE "allocsize" /* preferred allocation size */ | ||
1702 | #define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */ | ||
1703 | #define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and | ||
1704 | * unwritten extent conversion */ | ||
1705 | #define MNTOPT_NOBARRIER "nobarrier" /* .. disable */ | ||
1706 | #define MNTOPT_OSYNCISOSYNC "osyncisosync" /* o_sync is REALLY o_sync */ | ||
1707 | #define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */ | ||
1708 | #define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */ | ||
1709 | #define MNTOPT_NOIKEEP "noikeep" /* free empty inode clusters */ | ||
1710 | #define MNTOPT_LARGEIO "largeio" /* report large I/O sizes in stat() */ | ||
1711 | #define MNTOPT_NOLARGEIO "nolargeio" /* do not report large I/O sizes | ||
1712 | * in stat(). */ | ||
1713 | #define MNTOPT_ATTR2 "attr2" /* do use attr2 attribute format */ | ||
1714 | #define MNTOPT_NOATTR2 "noattr2" /* do not use attr2 attribute format */ | ||
1715 | #define MNTOPT_FILESTREAM "filestreams" /* use filestreams allocator */ | ||
1716 | #define MNTOPT_QUOTA "quota" /* disk quotas (user) */ | ||
1717 | #define MNTOPT_NOQUOTA "noquota" /* no quotas */ | ||
1718 | #define MNTOPT_USRQUOTA "usrquota" /* user quota enabled */ | ||
1719 | #define MNTOPT_GRPQUOTA "grpquota" /* group quota enabled */ | ||
1720 | #define MNTOPT_PRJQUOTA "prjquota" /* project quota enabled */ | ||
1721 | #define MNTOPT_UQUOTA "uquota" /* user quota (IRIX variant) */ | ||
1722 | #define MNTOPT_GQUOTA "gquota" /* group quota (IRIX variant) */ | ||
1723 | #define MNTOPT_PQUOTA "pquota" /* project quota (IRIX variant) */ | ||
1724 | #define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */ | ||
1725 | #define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */ | ||
1726 | #define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */ | ||
1727 | #define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */ | ||
1728 | #define MNTOPT_DMAPI "dmapi" /* DMI enabled (DMAPI / XDSM) */ | ||
1729 | #define MNTOPT_XDSM "xdsm" /* DMI enabled (DMAPI / XDSM) */ | ||
1730 | #define MNTOPT_DMI "dmi" /* DMI enabled (DMAPI / XDSM) */ | ||
1731 | |||
1732 | STATIC unsigned long | ||
1733 | suffix_strtoul(char *s, char **endp, unsigned int base) | ||
1734 | { | ||
1735 | int last, shift_left_factor = 0; | ||
1736 | char *value = s; | ||
1737 | |||
1738 | last = strlen(value) - 1; | ||
1739 | if (value[last] == 'K' || value[last] == 'k') { | ||
1740 | shift_left_factor = 10; | ||
1741 | value[last] = '\0'; | ||
1742 | } | ||
1743 | if (value[last] == 'M' || value[last] == 'm') { | ||
1744 | shift_left_factor = 20; | ||
1745 | value[last] = '\0'; | ||
1746 | } | ||
1747 | if (value[last] == 'G' || value[last] == 'g') { | ||
1748 | shift_left_factor = 30; | ||
1749 | value[last] = '\0'; | ||
1750 | } | ||
1751 | |||
1752 | return simple_strtoul((const char *)s, endp, base) << shift_left_factor; | ||
1753 | } | ||
1754 | |||
1755 | int | ||
1756 | xfs_parseargs( | ||
1757 | struct xfs_mount *mp, | ||
1758 | char *options, | ||
1759 | struct xfs_mount_args *args, | ||
1760 | int update) | ||
1761 | { | ||
1762 | char *this_char, *value, *eov; | ||
1763 | int dsunit, dswidth, vol_dsunit, vol_dswidth; | ||
1764 | int iosize; | ||
1765 | int ikeep = 0; | ||
1766 | |||
1767 | args->flags |= XFSMNT_BARRIER; | ||
1768 | args->flags2 |= XFSMNT2_COMPAT_IOSIZE; | ||
1769 | |||
1770 | if (!options) | ||
1771 | goto done; | ||
1772 | |||
1773 | iosize = dsunit = dswidth = vol_dsunit = vol_dswidth = 0; | ||
1774 | |||
1775 | while ((this_char = strsep(&options, ",")) != NULL) { | ||
1776 | if (!*this_char) | ||
1777 | continue; | ||
1778 | if ((value = strchr(this_char, '=')) != NULL) | ||
1779 | *value++ = 0; | ||
1780 | |||
1781 | if (!strcmp(this_char, MNTOPT_LOGBUFS)) { | ||
1782 | if (!value || !*value) { | ||
1783 | cmn_err(CE_WARN, | ||
1784 | "XFS: %s option requires an argument", | ||
1785 | this_char); | ||
1786 | return EINVAL; | ||
1787 | } | ||
1788 | args->logbufs = simple_strtoul(value, &eov, 10); | ||
1789 | } else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) { | ||
1790 | if (!value || !*value) { | ||
1791 | cmn_err(CE_WARN, | ||
1792 | "XFS: %s option requires an argument", | ||
1793 | this_char); | ||
1794 | return EINVAL; | ||
1795 | } | ||
1796 | args->logbufsize = suffix_strtoul(value, &eov, 10); | ||
1797 | } else if (!strcmp(this_char, MNTOPT_LOGDEV)) { | ||
1798 | if (!value || !*value) { | ||
1799 | cmn_err(CE_WARN, | ||
1800 | "XFS: %s option requires an argument", | ||
1801 | this_char); | ||
1802 | return EINVAL; | ||
1803 | } | ||
1804 | strncpy(args->logname, value, MAXNAMELEN); | ||
1805 | } else if (!strcmp(this_char, MNTOPT_MTPT)) { | ||
1806 | if (!value || !*value) { | ||
1807 | cmn_err(CE_WARN, | ||
1808 | "XFS: %s option requires an argument", | ||
1809 | this_char); | ||
1810 | return EINVAL; | ||
1811 | } | ||
1812 | strncpy(args->mtpt, value, MAXNAMELEN); | ||
1813 | } else if (!strcmp(this_char, MNTOPT_RTDEV)) { | ||
1814 | if (!value || !*value) { | ||
1815 | cmn_err(CE_WARN, | ||
1816 | "XFS: %s option requires an argument", | ||
1817 | this_char); | ||
1818 | return EINVAL; | ||
1819 | } | ||
1820 | strncpy(args->rtname, value, MAXNAMELEN); | ||
1821 | } else if (!strcmp(this_char, MNTOPT_BIOSIZE)) { | ||
1822 | if (!value || !*value) { | ||
1823 | cmn_err(CE_WARN, | ||
1824 | "XFS: %s option requires an argument", | ||
1825 | this_char); | ||
1826 | return EINVAL; | ||
1827 | } | ||
1828 | iosize = simple_strtoul(value, &eov, 10); | ||
1829 | args->flags |= XFSMNT_IOSIZE; | ||
1830 | args->iosizelog = (uint8_t) iosize; | ||
1831 | } else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) { | ||
1832 | if (!value || !*value) { | ||
1833 | cmn_err(CE_WARN, | ||
1834 | "XFS: %s option requires an argument", | ||
1835 | this_char); | ||
1836 | return EINVAL; | ||
1837 | } | ||
1838 | iosize = suffix_strtoul(value, &eov, 10); | ||
1839 | args->flags |= XFSMNT_IOSIZE; | ||
1840 | args->iosizelog = ffs(iosize) - 1; | ||
1841 | } else if (!strcmp(this_char, MNTOPT_GRPID) || | ||
1842 | !strcmp(this_char, MNTOPT_BSDGROUPS)) { | ||
1843 | mp->m_flags |= XFS_MOUNT_GRPID; | ||
1844 | } else if (!strcmp(this_char, MNTOPT_NOGRPID) || | ||
1845 | !strcmp(this_char, MNTOPT_SYSVGROUPS)) { | ||
1846 | mp->m_flags &= ~XFS_MOUNT_GRPID; | ||
1847 | } else if (!strcmp(this_char, MNTOPT_WSYNC)) { | ||
1848 | args->flags |= XFSMNT_WSYNC; | ||
1849 | } else if (!strcmp(this_char, MNTOPT_OSYNCISOSYNC)) { | ||
1850 | args->flags |= XFSMNT_OSYNCISOSYNC; | ||
1851 | } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) { | ||
1852 | args->flags |= XFSMNT_NORECOVERY; | ||
1853 | } else if (!strcmp(this_char, MNTOPT_INO64)) { | ||
1854 | args->flags |= XFSMNT_INO64; | ||
1855 | #if !XFS_BIG_INUMS | ||
1856 | cmn_err(CE_WARN, | ||
1857 | "XFS: %s option not allowed on this system", | ||
1858 | this_char); | ||
1859 | return EINVAL; | ||
1860 | #endif | ||
1861 | } else if (!strcmp(this_char, MNTOPT_NOALIGN)) { | ||
1862 | args->flags |= XFSMNT_NOALIGN; | ||
1863 | } else if (!strcmp(this_char, MNTOPT_SWALLOC)) { | ||
1864 | args->flags |= XFSMNT_SWALLOC; | ||
1865 | } else if (!strcmp(this_char, MNTOPT_SUNIT)) { | ||
1866 | if (!value || !*value) { | ||
1867 | cmn_err(CE_WARN, | ||
1868 | "XFS: %s option requires an argument", | ||
1869 | this_char); | ||
1870 | return EINVAL; | ||
1871 | } | ||
1872 | dsunit = simple_strtoul(value, &eov, 10); | ||
1873 | } else if (!strcmp(this_char, MNTOPT_SWIDTH)) { | ||
1874 | if (!value || !*value) { | ||
1875 | cmn_err(CE_WARN, | ||
1876 | "XFS: %s option requires an argument", | ||
1877 | this_char); | ||
1878 | return EINVAL; | ||
1879 | } | ||
1880 | dswidth = simple_strtoul(value, &eov, 10); | ||
1881 | } else if (!strcmp(this_char, MNTOPT_64BITINODE)) { | ||
1882 | args->flags &= ~XFSMNT_32BITINODES; | ||
1883 | #if !XFS_BIG_INUMS | ||
1884 | cmn_err(CE_WARN, | ||
1885 | "XFS: %s option not allowed on this system", | ||
1886 | this_char); | ||
1887 | return EINVAL; | ||
1888 | #endif | ||
1889 | } else if (!strcmp(this_char, MNTOPT_NOUUID)) { | ||
1890 | args->flags |= XFSMNT_NOUUID; | ||
1891 | } else if (!strcmp(this_char, MNTOPT_BARRIER)) { | ||
1892 | args->flags |= XFSMNT_BARRIER; | ||
1893 | } else if (!strcmp(this_char, MNTOPT_NOBARRIER)) { | ||
1894 | args->flags &= ~XFSMNT_BARRIER; | ||
1895 | } else if (!strcmp(this_char, MNTOPT_IKEEP)) { | ||
1896 | ikeep = 1; | ||
1897 | args->flags &= ~XFSMNT_IDELETE; | ||
1898 | } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) { | ||
1899 | args->flags |= XFSMNT_IDELETE; | ||
1900 | } else if (!strcmp(this_char, MNTOPT_LARGEIO)) { | ||
1901 | args->flags2 &= ~XFSMNT2_COMPAT_IOSIZE; | ||
1902 | } else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) { | ||
1903 | args->flags2 |= XFSMNT2_COMPAT_IOSIZE; | ||
1904 | } else if (!strcmp(this_char, MNTOPT_ATTR2)) { | ||
1905 | args->flags |= XFSMNT_ATTR2; | ||
1906 | } else if (!strcmp(this_char, MNTOPT_NOATTR2)) { | ||
1907 | args->flags &= ~XFSMNT_ATTR2; | ||
1908 | } else if (!strcmp(this_char, MNTOPT_FILESTREAM)) { | ||
1909 | args->flags2 |= XFSMNT2_FILESTREAMS; | ||
1910 | } else if (!strcmp(this_char, MNTOPT_NOQUOTA)) { | ||
1911 | args->flags &= ~(XFSMNT_UQUOTAENF|XFSMNT_UQUOTA); | ||
1912 | args->flags &= ~(XFSMNT_GQUOTAENF|XFSMNT_GQUOTA); | ||
1913 | } else if (!strcmp(this_char, MNTOPT_QUOTA) || | ||
1914 | !strcmp(this_char, MNTOPT_UQUOTA) || | ||
1915 | !strcmp(this_char, MNTOPT_USRQUOTA)) { | ||
1916 | args->flags |= XFSMNT_UQUOTA | XFSMNT_UQUOTAENF; | ||
1917 | } else if (!strcmp(this_char, MNTOPT_QUOTANOENF) || | ||
1918 | !strcmp(this_char, MNTOPT_UQUOTANOENF)) { | ||
1919 | args->flags |= XFSMNT_UQUOTA; | ||
1920 | args->flags &= ~XFSMNT_UQUOTAENF; | ||
1921 | } else if (!strcmp(this_char, MNTOPT_PQUOTA) || | ||
1922 | !strcmp(this_char, MNTOPT_PRJQUOTA)) { | ||
1923 | args->flags |= XFSMNT_PQUOTA | XFSMNT_PQUOTAENF; | ||
1924 | } else if (!strcmp(this_char, MNTOPT_PQUOTANOENF)) { | ||
1925 | args->flags |= XFSMNT_PQUOTA; | ||
1926 | args->flags &= ~XFSMNT_PQUOTAENF; | ||
1927 | } else if (!strcmp(this_char, MNTOPT_GQUOTA) || | ||
1928 | !strcmp(this_char, MNTOPT_GRPQUOTA)) { | ||
1929 | args->flags |= XFSMNT_GQUOTA | XFSMNT_GQUOTAENF; | ||
1930 | } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) { | ||
1931 | args->flags |= XFSMNT_GQUOTA; | ||
1932 | args->flags &= ~XFSMNT_GQUOTAENF; | ||
1933 | } else if (!strcmp(this_char, MNTOPT_DMAPI)) { | ||
1934 | args->flags |= XFSMNT_DMAPI; | ||
1935 | } else if (!strcmp(this_char, MNTOPT_XDSM)) { | ||
1936 | args->flags |= XFSMNT_DMAPI; | ||
1937 | } else if (!strcmp(this_char, MNTOPT_DMI)) { | ||
1938 | args->flags |= XFSMNT_DMAPI; | ||
1939 | } else if (!strcmp(this_char, "ihashsize")) { | ||
1940 | cmn_err(CE_WARN, | ||
1941 | "XFS: ihashsize no longer used, option is deprecated."); | ||
1942 | } else if (!strcmp(this_char, "osyncisdsync")) { | ||
1943 | /* no-op, this is now the default */ | ||
1944 | cmn_err(CE_WARN, | ||
1945 | "XFS: osyncisdsync is now the default, option is deprecated."); | ||
1946 | } else if (!strcmp(this_char, "irixsgid")) { | ||
1947 | cmn_err(CE_WARN, | ||
1948 | "XFS: irixsgid is now a sysctl(2) variable, option is deprecated."); | ||
1949 | } else { | ||
1950 | cmn_err(CE_WARN, | ||
1951 | "XFS: unknown mount option [%s].", this_char); | ||
1952 | return EINVAL; | ||
1953 | } | ||
1954 | } | ||
1955 | |||
1956 | if (args->flags & XFSMNT_NORECOVERY) { | ||
1957 | if ((mp->m_flags & XFS_MOUNT_RDONLY) == 0) { | ||
1958 | cmn_err(CE_WARN, | ||
1959 | "XFS: no-recovery mounts must be read-only."); | ||
1960 | return EINVAL; | ||
1961 | } | ||
1962 | } | ||
1963 | |||
1964 | if ((args->flags & XFSMNT_NOALIGN) && (dsunit || dswidth)) { | ||
1965 | cmn_err(CE_WARN, | ||
1966 | "XFS: sunit and swidth options incompatible with the noalign option"); | ||
1967 | return EINVAL; | ||
1968 | } | ||
1969 | |||
1970 | if ((args->flags & XFSMNT_GQUOTA) && (args->flags & XFSMNT_PQUOTA)) { | ||
1971 | cmn_err(CE_WARN, | ||
1972 | "XFS: cannot mount with both project and group quota"); | ||
1973 | return EINVAL; | ||
1974 | } | ||
1975 | |||
1976 | if ((args->flags & XFSMNT_DMAPI) && *args->mtpt == '\0') { | ||
1977 | printk("XFS: %s option needs the mount point option as well\n", | ||
1978 | MNTOPT_DMAPI); | ||
1979 | return EINVAL; | ||
1980 | } | ||
1981 | |||
1982 | if ((dsunit && !dswidth) || (!dsunit && dswidth)) { | ||
1983 | cmn_err(CE_WARN, | ||
1984 | "XFS: sunit and swidth must be specified together"); | ||
1985 | return EINVAL; | ||
1986 | } | ||
1987 | |||
1988 | if (dsunit && (dswidth % dsunit != 0)) { | ||
1989 | cmn_err(CE_WARN, | ||
1990 | "XFS: stripe width (%d) must be a multiple of the stripe unit (%d)", | ||
1991 | dswidth, dsunit); | ||
1992 | return EINVAL; | ||
1993 | } | ||
1994 | |||
1995 | /* | ||
1996 | * Applications using DMI filesystems often expect the | ||
1997 | * inode generation number to be monotonically increasing. | ||
1998 | * If we delete inode chunks we break this assumption, so | ||
1999 | * keep unused inode chunks on disk for DMI filesystems | ||
2000 | * until we come up with a better solution. | ||
2001 | * Note that if "ikeep" or "noikeep" mount options are | ||
2002 | * supplied, then they are honored. | ||
2003 | */ | ||
2004 | if (!(args->flags & XFSMNT_DMAPI) && !ikeep) | ||
2005 | args->flags |= XFSMNT_IDELETE; | ||
2006 | |||
2007 | if ((args->flags & XFSMNT_NOALIGN) != XFSMNT_NOALIGN) { | ||
2008 | if (dsunit) { | ||
2009 | args->sunit = dsunit; | ||
2010 | args->flags |= XFSMNT_RETERR; | ||
2011 | } else { | ||
2012 | args->sunit = vol_dsunit; | ||
2013 | } | ||
2014 | dswidth ? (args->swidth = dswidth) : | ||
2015 | (args->swidth = vol_dswidth); | ||
2016 | } else { | ||
2017 | args->sunit = args->swidth = 0; | ||
2018 | } | ||
2019 | |||
2020 | done: | ||
2021 | if (args->flags & XFSMNT_32BITINODES) | ||
2022 | mp->m_flags |= XFS_MOUNT_SMALL_INUMS; | ||
2023 | if (args->flags2) | ||
2024 | args->flags |= XFSMNT_FLAGS2; | ||
2025 | return 0; | ||
2026 | } | ||
2027 | |||
2028 | int | ||
2029 | xfs_showargs( | ||
2030 | struct xfs_mount *mp, | ||
2031 | struct seq_file *m) | ||
2032 | { | ||
2033 | static struct proc_xfs_info { | ||
2034 | int flag; | ||
2035 | char *str; | ||
2036 | } xfs_info[] = { | ||
2037 | /* the few simple ones we can get from the mount struct */ | ||
2038 | { XFS_MOUNT_WSYNC, "," MNTOPT_WSYNC }, | ||
2039 | { XFS_MOUNT_INO64, "," MNTOPT_INO64 }, | ||
2040 | { XFS_MOUNT_NOALIGN, "," MNTOPT_NOALIGN }, | ||
2041 | { XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC }, | ||
2042 | { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID }, | ||
2043 | { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY }, | ||
2044 | { XFS_MOUNT_OSYNCISOSYNC, "," MNTOPT_OSYNCISOSYNC }, | ||
2045 | { 0, NULL } | ||
2046 | }; | ||
2047 | struct proc_xfs_info *xfs_infop; | ||
2048 | |||
2049 | for (xfs_infop = xfs_info; xfs_infop->flag; xfs_infop++) { | ||
2050 | if (mp->m_flags & xfs_infop->flag) | ||
2051 | seq_puts(m, xfs_infop->str); | ||
2052 | } | ||
2053 | |||
2054 | if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) | ||
2055 | seq_printf(m, "," MNTOPT_ALLOCSIZE "=%dk", | ||
2056 | (int)(1 << mp->m_writeio_log) >> 10); | ||
2057 | |||
2058 | if (mp->m_logbufs > 0) | ||
2059 | seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs); | ||
2060 | if (mp->m_logbsize > 0) | ||
2061 | seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10); | ||
2062 | |||
2063 | if (mp->m_logname) | ||
2064 | seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname); | ||
2065 | if (mp->m_rtname) | ||
2066 | seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname); | ||
2067 | |||
2068 | if (mp->m_dalign > 0) | ||
2069 | seq_printf(m, "," MNTOPT_SUNIT "=%d", | ||
2070 | (int)XFS_FSB_TO_BB(mp, mp->m_dalign)); | ||
2071 | if (mp->m_swidth > 0) | ||
2072 | seq_printf(m, "," MNTOPT_SWIDTH "=%d", | ||
2073 | (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); | ||
2074 | |||
2075 | if (!(mp->m_flags & XFS_MOUNT_IDELETE)) | ||
2076 | seq_printf(m, "," MNTOPT_IKEEP); | ||
2077 | if (!(mp->m_flags & XFS_MOUNT_COMPAT_IOSIZE)) | ||
2078 | seq_printf(m, "," MNTOPT_LARGEIO); | ||
2079 | |||
2080 | if (!(mp->m_flags & XFS_MOUNT_SMALL_INUMS)) | ||
2081 | seq_printf(m, "," MNTOPT_64BITINODE); | ||
2082 | if (mp->m_flags & XFS_MOUNT_GRPID) | ||
2083 | seq_printf(m, "," MNTOPT_GRPID); | ||
2084 | |||
2085 | if (mp->m_qflags & XFS_UQUOTA_ACCT) { | ||
2086 | if (mp->m_qflags & XFS_UQUOTA_ENFD) | ||
2087 | seq_puts(m, "," MNTOPT_USRQUOTA); | ||
2088 | else | ||
2089 | seq_puts(m, "," MNTOPT_UQUOTANOENF); | ||
2090 | } | ||
2091 | |||
2092 | if (mp->m_qflags & XFS_PQUOTA_ACCT) { | ||
2093 | if (mp->m_qflags & XFS_OQUOTA_ENFD) | ||
2094 | seq_puts(m, "," MNTOPT_PRJQUOTA); | ||
2095 | else | ||
2096 | seq_puts(m, "," MNTOPT_PQUOTANOENF); | ||
2097 | } | ||
2098 | |||
2099 | if (mp->m_qflags & XFS_GQUOTA_ACCT) { | ||
2100 | if (mp->m_qflags & XFS_OQUOTA_ENFD) | ||
2101 | seq_puts(m, "," MNTOPT_GRPQUOTA); | ||
2102 | else | ||
2103 | seq_puts(m, "," MNTOPT_GQUOTANOENF); | ||
2104 | } | ||
2105 | |||
2106 | if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT)) | ||
2107 | seq_puts(m, "," MNTOPT_NOQUOTA); | ||
2108 | |||
2109 | if (mp->m_flags & XFS_MOUNT_DMAPI) | ||
2110 | seq_puts(m, "," MNTOPT_DMAPI); | ||
2111 | return 0; | ||
2112 | } | ||
2113 | |||
2114 | /* | ||
2115 | * Second stage of a freeze. The data is already frozen so we only | ||
2116 | * need to take care of themetadata. Once that's done write a dummy | ||
2117 | * record to dirty the log in case of a crash while frozen. | ||
2118 | */ | ||
2119 | void | ||
2120 | xfs_freeze( | ||
2121 | xfs_mount_t *mp) | ||
2122 | { | ||
2123 | xfs_attr_quiesce(mp); | ||
2124 | xfs_fs_log_dummy(mp); | ||
2125 | } | ||
diff --git a/fs/xfs/xfs_vfsops.h b/fs/xfs/xfs_vfsops.h index a592fe02a339..1688817c55ed 100644 --- a/fs/xfs/xfs_vfsops.h +++ b/fs/xfs/xfs_vfsops.h | |||
@@ -13,16 +13,9 @@ int xfs_mount(struct xfs_mount *mp, struct xfs_mount_args *args, | |||
13 | int xfs_unmount(struct xfs_mount *mp, int flags, struct cred *credp); | 13 | int xfs_unmount(struct xfs_mount *mp, int flags, struct cred *credp); |
14 | int xfs_mntupdate(struct xfs_mount *mp, int *flags, | 14 | int xfs_mntupdate(struct xfs_mount *mp, int *flags, |
15 | struct xfs_mount_args *args); | 15 | struct xfs_mount_args *args); |
16 | int xfs_root(struct xfs_mount *mp, bhv_vnode_t **vpp); | ||
17 | int xfs_statvfs(struct xfs_mount *mp, struct kstatfs *statp, | ||
18 | bhv_vnode_t *vp); | ||
19 | int xfs_sync(struct xfs_mount *mp, int flags); | 16 | int xfs_sync(struct xfs_mount *mp, int flags); |
20 | int xfs_vget(struct xfs_mount *mp, bhv_vnode_t **vpp, struct xfs_fid *xfid); | ||
21 | int xfs_parseargs(struct xfs_mount *mp, char *options, | ||
22 | struct xfs_mount_args *args, int update); | ||
23 | int xfs_showargs(struct xfs_mount *mp, struct seq_file *m); | ||
24 | void xfs_freeze(struct xfs_mount *mp); | ||
25 | void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, char *fname, | 17 | void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, char *fname, |
26 | int lnnum); | 18 | int lnnum); |
19 | void xfs_attr_quiesce(struct xfs_mount *mp); | ||
27 | 20 | ||
28 | #endif /* _XFS_VFSOPS_H */ | 21 | #endif /* _XFS_VFSOPS_H */ |
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index efd5aff9eaf6..51305242ff8c 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c | |||
@@ -88,7 +88,7 @@ xfs_getattr( | |||
88 | bhv_vnode_t *vp = XFS_ITOV(ip); | 88 | bhv_vnode_t *vp = XFS_ITOV(ip); |
89 | xfs_mount_t *mp = ip->i_mount; | 89 | xfs_mount_t *mp = ip->i_mount; |
90 | 90 | ||
91 | vn_trace_entry(ip, __FUNCTION__, (inst_t *)__return_address); | 91 | xfs_itrace_entry(ip); |
92 | 92 | ||
93 | if (XFS_FORCED_SHUTDOWN(mp)) | 93 | if (XFS_FORCED_SHUTDOWN(mp)) |
94 | return XFS_ERROR(EIO); | 94 | return XFS_ERROR(EIO); |
@@ -136,7 +136,7 @@ xfs_getattr( | |||
136 | default: | 136 | default: |
137 | vap->va_rdev = 0; | 137 | vap->va_rdev = 0; |
138 | 138 | ||
139 | if (!(ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) { | 139 | if (!(XFS_IS_REALTIME_INODE(ip))) { |
140 | vap->va_blocksize = xfs_preferred_iosize(mp); | 140 | vap->va_blocksize = xfs_preferred_iosize(mp); |
141 | } else { | 141 | } else { |
142 | 142 | ||
@@ -228,7 +228,7 @@ xfs_setattr( | |||
228 | int file_owner; | 228 | int file_owner; |
229 | int need_iolock = 1; | 229 | int need_iolock = 1; |
230 | 230 | ||
231 | vn_trace_entry(ip, __FUNCTION__, (inst_t *)__return_address); | 231 | xfs_itrace_entry(ip); |
232 | 232 | ||
233 | if (mp->m_flags & XFS_MOUNT_RDONLY) | 233 | if (mp->m_flags & XFS_MOUNT_RDONLY) |
234 | return XFS_ERROR(EROFS); | 234 | return XFS_ERROR(EROFS); |
@@ -508,7 +508,7 @@ xfs_setattr( | |||
508 | */ | 508 | */ |
509 | if ((ip->i_d.di_nextents || ip->i_delayed_blks) && | 509 | if ((ip->i_d.di_nextents || ip->i_delayed_blks) && |
510 | (mask & XFS_AT_XFLAGS) && | 510 | (mask & XFS_AT_XFLAGS) && |
511 | (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != | 511 | (XFS_IS_REALTIME_INODE(ip)) != |
512 | (vap->va_xflags & XFS_XFLAG_REALTIME)) { | 512 | (vap->va_xflags & XFS_XFLAG_REALTIME)) { |
513 | code = XFS_ERROR(EINVAL); /* EFBIG? */ | 513 | code = XFS_ERROR(EINVAL); /* EFBIG? */ |
514 | goto error_return; | 514 | goto error_return; |
@@ -520,7 +520,7 @@ xfs_setattr( | |||
520 | if ((mask & XFS_AT_EXTSIZE) && vap->va_extsize != 0) { | 520 | if ((mask & XFS_AT_EXTSIZE) && vap->va_extsize != 0) { |
521 | xfs_extlen_t size; | 521 | xfs_extlen_t size; |
522 | 522 | ||
523 | if ((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) || | 523 | if (XFS_IS_REALTIME_INODE(ip) || |
524 | ((mask & XFS_AT_XFLAGS) && | 524 | ((mask & XFS_AT_XFLAGS) && |
525 | (vap->va_xflags & XFS_XFLAG_REALTIME))) { | 525 | (vap->va_xflags & XFS_XFLAG_REALTIME))) { |
526 | size = mp->m_sb.sb_rextsize << | 526 | size = mp->m_sb.sb_rextsize << |
@@ -804,12 +804,8 @@ xfs_setattr( | |||
804 | if (vap->va_xflags & XFS_XFLAG_EXTSZINHERIT) | 804 | if (vap->va_xflags & XFS_XFLAG_EXTSZINHERIT) |
805 | di_flags |= XFS_DIFLAG_EXTSZINHERIT; | 805 | di_flags |= XFS_DIFLAG_EXTSZINHERIT; |
806 | } else if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) { | 806 | } else if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) { |
807 | if (vap->va_xflags & XFS_XFLAG_REALTIME) { | 807 | if (vap->va_xflags & XFS_XFLAG_REALTIME) |
808 | di_flags |= XFS_DIFLAG_REALTIME; | 808 | di_flags |= XFS_DIFLAG_REALTIME; |
809 | ip->i_iocore.io_flags |= XFS_IOCORE_RT; | ||
810 | } else { | ||
811 | ip->i_iocore.io_flags &= ~XFS_IOCORE_RT; | ||
812 | } | ||
813 | if (vap->va_xflags & XFS_XFLAG_EXTSIZE) | 809 | if (vap->va_xflags & XFS_XFLAG_EXTSIZE) |
814 | di_flags |= XFS_DIFLAG_EXTSIZE; | 810 | di_flags |= XFS_DIFLAG_EXTSIZE; |
815 | } | 811 | } |
@@ -902,28 +898,6 @@ xfs_setattr( | |||
902 | return code; | 898 | return code; |
903 | } | 899 | } |
904 | 900 | ||
905 | |||
906 | /* | ||
907 | * xfs_access | ||
908 | * Null conversion from vnode mode bits to inode mode bits, as in efs. | ||
909 | */ | ||
910 | int | ||
911 | xfs_access( | ||
912 | xfs_inode_t *ip, | ||
913 | int mode, | ||
914 | cred_t *credp) | ||
915 | { | ||
916 | int error; | ||
917 | |||
918 | vn_trace_entry(ip, __FUNCTION__, (inst_t *)__return_address); | ||
919 | |||
920 | xfs_ilock(ip, XFS_ILOCK_SHARED); | ||
921 | error = xfs_iaccess(ip, mode, credp); | ||
922 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | ||
923 | return error; | ||
924 | } | ||
925 | |||
926 | |||
927 | /* | 901 | /* |
928 | * The maximum pathlen is 1024 bytes. Since the minimum file system | 902 | * The maximum pathlen is 1024 bytes. Since the minimum file system |
929 | * blocksize is 512 bytes, we can get a max of 2 extents back from | 903 | * blocksize is 512 bytes, we can get a max of 2 extents back from |
@@ -987,7 +961,7 @@ xfs_readlink( | |||
987 | int pathlen; | 961 | int pathlen; |
988 | int error = 0; | 962 | int error = 0; |
989 | 963 | ||
990 | vn_trace_entry(ip, __FUNCTION__, (inst_t *)__return_address); | 964 | xfs_itrace_entry(ip); |
991 | 965 | ||
992 | if (XFS_FORCED_SHUTDOWN(mp)) | 966 | if (XFS_FORCED_SHUTDOWN(mp)) |
993 | return XFS_ERROR(EIO); | 967 | return XFS_ERROR(EIO); |
@@ -1033,7 +1007,7 @@ xfs_fsync( | |||
1033 | int error; | 1007 | int error; |
1034 | int log_flushed = 0, changed = 1; | 1008 | int log_flushed = 0, changed = 1; |
1035 | 1009 | ||
1036 | vn_trace_entry(ip, __FUNCTION__, (inst_t *)__return_address); | 1010 | xfs_itrace_entry(ip); |
1037 | 1011 | ||
1038 | ASSERT(start >= 0 && stop >= -1); | 1012 | ASSERT(start >= 0 && stop >= -1); |
1039 | 1013 | ||
@@ -1149,7 +1123,7 @@ xfs_fsync( | |||
1149 | * If this inode is on the RT dev we need to flush that | 1123 | * If this inode is on the RT dev we need to flush that |
1150 | * cache as well. | 1124 | * cache as well. |
1151 | */ | 1125 | */ |
1152 | if (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) | 1126 | if (XFS_IS_REALTIME_INODE(ip)) |
1153 | xfs_blkdev_issue_flush(ip->i_mount->m_rtdev_targp); | 1127 | xfs_blkdev_issue_flush(ip->i_mount->m_rtdev_targp); |
1154 | } | 1128 | } |
1155 | 1129 | ||
@@ -1188,7 +1162,7 @@ xfs_free_eofblocks( | |||
1188 | 1162 | ||
1189 | nimaps = 1; | 1163 | nimaps = 1; |
1190 | xfs_ilock(ip, XFS_ILOCK_SHARED); | 1164 | xfs_ilock(ip, XFS_ILOCK_SHARED); |
1191 | error = XFS_BMAPI(mp, NULL, &ip->i_iocore, end_fsb, map_len, 0, | 1165 | error = xfs_bmapi(NULL, ip, end_fsb, map_len, 0, |
1192 | NULL, 0, &imap, &nimaps, NULL, NULL); | 1166 | NULL, 0, &imap, &nimaps, NULL, NULL); |
1193 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | 1167 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
1194 | 1168 | ||
@@ -1562,9 +1536,6 @@ xfs_release( | |||
1562 | error = xfs_free_eofblocks(mp, ip, XFS_FREE_EOF_LOCK); | 1536 | error = xfs_free_eofblocks(mp, ip, XFS_FREE_EOF_LOCK); |
1563 | if (error) | 1537 | if (error) |
1564 | return error; | 1538 | return error; |
1565 | /* Update linux inode block count after free above */ | ||
1566 | vn_to_inode(vp)->i_blocks = XFS_FSB_TO_BB(mp, | ||
1567 | ip->i_d.di_nblocks + ip->i_delayed_blks); | ||
1568 | } | 1539 | } |
1569 | } | 1540 | } |
1570 | 1541 | ||
@@ -1592,7 +1563,7 @@ xfs_inactive( | |||
1592 | int error; | 1563 | int error; |
1593 | int truncate; | 1564 | int truncate; |
1594 | 1565 | ||
1595 | vn_trace_entry(ip, __FUNCTION__, (inst_t *)__return_address); | 1566 | xfs_itrace_entry(ip); |
1596 | 1567 | ||
1597 | /* | 1568 | /* |
1598 | * If the inode is already free, then there can be nothing | 1569 | * If the inode is already free, then there can be nothing |
@@ -1638,9 +1609,6 @@ xfs_inactive( | |||
1638 | error = xfs_free_eofblocks(mp, ip, XFS_FREE_EOF_LOCK); | 1609 | error = xfs_free_eofblocks(mp, ip, XFS_FREE_EOF_LOCK); |
1639 | if (error) | 1610 | if (error) |
1640 | return VN_INACTIVE_CACHE; | 1611 | return VN_INACTIVE_CACHE; |
1641 | /* Update linux inode block count after free above */ | ||
1642 | vn_to_inode(vp)->i_blocks = XFS_FSB_TO_BB(mp, | ||
1643 | ip->i_d.di_nblocks + ip->i_delayed_blks); | ||
1644 | } | 1612 | } |
1645 | goto out; | 1613 | goto out; |
1646 | } | 1614 | } |
@@ -1805,7 +1773,7 @@ xfs_lookup( | |||
1805 | int error; | 1773 | int error; |
1806 | uint lock_mode; | 1774 | uint lock_mode; |
1807 | 1775 | ||
1808 | vn_trace_entry(dp, __FUNCTION__, (inst_t *)__return_address); | 1776 | xfs_itrace_entry(dp); |
1809 | 1777 | ||
1810 | if (XFS_FORCED_SHUTDOWN(dp->i_mount)) | 1778 | if (XFS_FORCED_SHUTDOWN(dp->i_mount)) |
1811 | return XFS_ERROR(EIO); | 1779 | return XFS_ERROR(EIO); |
@@ -1814,7 +1782,7 @@ xfs_lookup( | |||
1814 | error = xfs_dir_lookup_int(dp, lock_mode, dentry, &e_inum, &ip); | 1782 | error = xfs_dir_lookup_int(dp, lock_mode, dentry, &e_inum, &ip); |
1815 | if (!error) { | 1783 | if (!error) { |
1816 | *vpp = XFS_ITOV(ip); | 1784 | *vpp = XFS_ITOV(ip); |
1817 | ITRACE(ip); | 1785 | xfs_itrace_ref(ip); |
1818 | } | 1786 | } |
1819 | xfs_iunlock_map_shared(dp, lock_mode); | 1787 | xfs_iunlock_map_shared(dp, lock_mode); |
1820 | return error; | 1788 | return error; |
@@ -1848,7 +1816,7 @@ xfs_create( | |||
1848 | int namelen; | 1816 | int namelen; |
1849 | 1817 | ||
1850 | ASSERT(!*vpp); | 1818 | ASSERT(!*vpp); |
1851 | vn_trace_entry(dp, __FUNCTION__, (inst_t *)__return_address); | 1819 | xfs_itrace_entry(dp); |
1852 | 1820 | ||
1853 | namelen = VNAMELEN(dentry); | 1821 | namelen = VNAMELEN(dentry); |
1854 | 1822 | ||
@@ -1930,7 +1898,7 @@ xfs_create( | |||
1930 | goto error_return; | 1898 | goto error_return; |
1931 | goto abort_return; | 1899 | goto abort_return; |
1932 | } | 1900 | } |
1933 | ITRACE(ip); | 1901 | xfs_itrace_ref(ip); |
1934 | 1902 | ||
1935 | /* | 1903 | /* |
1936 | * At this point, we've gotten a newly allocated inode. | 1904 | * At this point, we've gotten a newly allocated inode. |
@@ -2098,7 +2066,7 @@ again: | |||
2098 | 2066 | ||
2099 | e_inum = ip->i_ino; | 2067 | e_inum = ip->i_ino; |
2100 | 2068 | ||
2101 | ITRACE(ip); | 2069 | xfs_itrace_ref(ip); |
2102 | 2070 | ||
2103 | /* | 2071 | /* |
2104 | * We want to lock in increasing inum. Since we've already | 2072 | * We want to lock in increasing inum. Since we've already |
@@ -2321,7 +2289,7 @@ xfs_remove( | |||
2321 | uint resblks; | 2289 | uint resblks; |
2322 | int namelen; | 2290 | int namelen; |
2323 | 2291 | ||
2324 | vn_trace_entry(dp, __FUNCTION__, (inst_t *)__return_address); | 2292 | xfs_itrace_entry(dp); |
2325 | 2293 | ||
2326 | if (XFS_FORCED_SHUTDOWN(mp)) | 2294 | if (XFS_FORCED_SHUTDOWN(mp)) |
2327 | return XFS_ERROR(EIO); | 2295 | return XFS_ERROR(EIO); |
@@ -2364,9 +2332,8 @@ xfs_remove( | |||
2364 | 2332 | ||
2365 | dm_di_mode = ip->i_d.di_mode; | 2333 | dm_di_mode = ip->i_d.di_mode; |
2366 | 2334 | ||
2367 | vn_trace_entry(ip, __FUNCTION__, (inst_t *)__return_address); | 2335 | xfs_itrace_entry(ip); |
2368 | 2336 | xfs_itrace_ref(ip); | |
2369 | ITRACE(ip); | ||
2370 | 2337 | ||
2371 | error = XFS_QM_DQATTACH(mp, dp, 0); | 2338 | error = XFS_QM_DQATTACH(mp, dp, 0); |
2372 | if (!error && dp != ip) | 2339 | if (!error && dp != ip) |
@@ -2498,8 +2465,7 @@ xfs_remove( | |||
2498 | if (link_zero && xfs_inode_is_filestream(ip)) | 2465 | if (link_zero && xfs_inode_is_filestream(ip)) |
2499 | xfs_filestream_deassociate(ip); | 2466 | xfs_filestream_deassociate(ip); |
2500 | 2467 | ||
2501 | vn_trace_exit(ip, __FUNCTION__, (inst_t *)__return_address); | 2468 | xfs_itrace_exit(ip); |
2502 | |||
2503 | IRELE(ip); | 2469 | IRELE(ip); |
2504 | 2470 | ||
2505 | /* Fall through to std_return with error = 0 */ | 2471 | /* Fall through to std_return with error = 0 */ |
@@ -2562,8 +2528,8 @@ xfs_link( | |||
2562 | char *target_name = VNAME(dentry); | 2528 | char *target_name = VNAME(dentry); |
2563 | int target_namelen; | 2529 | int target_namelen; |
2564 | 2530 | ||
2565 | vn_trace_entry(tdp, __FUNCTION__, (inst_t *)__return_address); | 2531 | xfs_itrace_entry(tdp); |
2566 | vn_trace_entry(xfs_vtoi(src_vp), __FUNCTION__, (inst_t *)__return_address); | 2532 | xfs_itrace_entry(xfs_vtoi(src_vp)); |
2567 | 2533 | ||
2568 | target_namelen = VNAMELEN(dentry); | 2534 | target_namelen = VNAMELEN(dentry); |
2569 | ASSERT(!VN_ISDIR(src_vp)); | 2535 | ASSERT(!VN_ISDIR(src_vp)); |
@@ -2744,7 +2710,7 @@ xfs_mkdir( | |||
2744 | 2710 | ||
2745 | /* Return through std_return after this point. */ | 2711 | /* Return through std_return after this point. */ |
2746 | 2712 | ||
2747 | vn_trace_entry(dp, __FUNCTION__, (inst_t *)__return_address); | 2713 | xfs_itrace_entry(dp); |
2748 | 2714 | ||
2749 | mp = dp->i_mount; | 2715 | mp = dp->i_mount; |
2750 | udqp = gdqp = NULL; | 2716 | udqp = gdqp = NULL; |
@@ -2810,7 +2776,7 @@ xfs_mkdir( | |||
2810 | goto error_return; | 2776 | goto error_return; |
2811 | goto abort_return; | 2777 | goto abort_return; |
2812 | } | 2778 | } |
2813 | ITRACE(cdp); | 2779 | xfs_itrace_ref(cdp); |
2814 | 2780 | ||
2815 | /* | 2781 | /* |
2816 | * Now we add the directory inode to the transaction. | 2782 | * Now we add the directory inode to the transaction. |
@@ -2936,7 +2902,7 @@ xfs_rmdir( | |||
2936 | int last_cdp_link; | 2902 | int last_cdp_link; |
2937 | uint resblks; | 2903 | uint resblks; |
2938 | 2904 | ||
2939 | vn_trace_entry(dp, __FUNCTION__, (inst_t *)__return_address); | 2905 | xfs_itrace_entry(dp); |
2940 | 2906 | ||
2941 | if (XFS_FORCED_SHUTDOWN(mp)) | 2907 | if (XFS_FORCED_SHUTDOWN(mp)) |
2942 | return XFS_ERROR(EIO); | 2908 | return XFS_ERROR(EIO); |
@@ -3041,7 +3007,7 @@ xfs_rmdir( | |||
3041 | VN_HOLD(dir_vp); | 3007 | VN_HOLD(dir_vp); |
3042 | } | 3008 | } |
3043 | 3009 | ||
3044 | ITRACE(cdp); | 3010 | xfs_itrace_ref(cdp); |
3045 | xfs_trans_ijoin(tp, cdp, XFS_ILOCK_EXCL); | 3011 | xfs_trans_ijoin(tp, cdp, XFS_ILOCK_EXCL); |
3046 | 3012 | ||
3047 | ASSERT(cdp->i_d.di_nlink >= 2); | 3013 | ASSERT(cdp->i_d.di_nlink >= 2); |
@@ -3189,8 +3155,7 @@ xfs_symlink( | |||
3189 | ip = NULL; | 3155 | ip = NULL; |
3190 | tp = NULL; | 3156 | tp = NULL; |
3191 | 3157 | ||
3192 | vn_trace_entry(dp, __FUNCTION__, (inst_t *)__return_address); | 3158 | xfs_itrace_entry(dp); |
3193 | |||
3194 | 3159 | ||
3195 | if (XFS_FORCED_SHUTDOWN(mp)) | 3160 | if (XFS_FORCED_SHUTDOWN(mp)) |
3196 | return XFS_ERROR(EIO); | 3161 | return XFS_ERROR(EIO); |
@@ -3317,7 +3282,7 @@ xfs_symlink( | |||
3317 | goto error_return; | 3282 | goto error_return; |
3318 | goto error1; | 3283 | goto error1; |
3319 | } | 3284 | } |
3320 | ITRACE(ip); | 3285 | xfs_itrace_ref(ip); |
3321 | 3286 | ||
3322 | /* | 3287 | /* |
3323 | * An error after we've joined dp to the transaction will result in the | 3288 | * An error after we've joined dp to the transaction will result in the |
@@ -3465,27 +3430,6 @@ std_return: | |||
3465 | goto std_return; | 3430 | goto std_return; |
3466 | } | 3431 | } |
3467 | 3432 | ||
3468 | |||
3469 | int | ||
3470 | xfs_fid2( | ||
3471 | xfs_inode_t *ip, | ||
3472 | xfs_fid_t *xfid) | ||
3473 | { | ||
3474 | vn_trace_entry(ip, __FUNCTION__, (inst_t *)__return_address); | ||
3475 | |||
3476 | xfid->fid_len = sizeof(xfs_fid_t) - sizeof(xfid->fid_len); | ||
3477 | xfid->fid_pad = 0; | ||
3478 | /* | ||
3479 | * use memcpy because the inode is a long long and there's no | ||
3480 | * assurance that xfid->fid_ino is properly aligned. | ||
3481 | */ | ||
3482 | memcpy(&xfid->fid_ino, &ip->i_ino, sizeof(xfid->fid_ino)); | ||
3483 | xfid->fid_gen = ip->i_d.di_gen; | ||
3484 | |||
3485 | return 0; | ||
3486 | } | ||
3487 | |||
3488 | |||
3489 | int | 3433 | int |
3490 | xfs_rwlock( | 3434 | xfs_rwlock( |
3491 | xfs_inode_t *ip, | 3435 | xfs_inode_t *ip, |
@@ -3558,11 +3502,11 @@ xfs_inode_flush( | |||
3558 | if (iip && iip->ili_last_lsn) { | 3502 | if (iip && iip->ili_last_lsn) { |
3559 | xlog_t *log = mp->m_log; | 3503 | xlog_t *log = mp->m_log; |
3560 | xfs_lsn_t sync_lsn; | 3504 | xfs_lsn_t sync_lsn; |
3561 | int s, log_flags = XFS_LOG_FORCE; | 3505 | int log_flags = XFS_LOG_FORCE; |
3562 | 3506 | ||
3563 | s = GRANT_LOCK(log); | 3507 | spin_lock(&log->l_grant_lock); |
3564 | sync_lsn = log->l_last_sync_lsn; | 3508 | sync_lsn = log->l_last_sync_lsn; |
3565 | GRANT_UNLOCK(log, s); | 3509 | spin_unlock(&log->l_grant_lock); |
3566 | 3510 | ||
3567 | if ((XFS_LSN_CMP(iip->ili_last_lsn, sync_lsn) > 0)) { | 3511 | if ((XFS_LSN_CMP(iip->ili_last_lsn, sync_lsn) > 0)) { |
3568 | if (flags & FLUSH_SYNC) | 3512 | if (flags & FLUSH_SYNC) |
@@ -3637,8 +3581,8 @@ xfs_set_dmattrs( | |||
3637 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 3581 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
3638 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | 3582 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); |
3639 | 3583 | ||
3640 | ip->i_iocore.io_dmevmask = ip->i_d.di_dmevmask = evmask; | 3584 | ip->i_d.di_dmevmask = evmask; |
3641 | ip->i_iocore.io_dmstate = ip->i_d.di_dmstate = state; | 3585 | ip->i_d.di_dmstate = state; |
3642 | 3586 | ||
3643 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 3587 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
3644 | IHOLD(ip); | 3588 | IHOLD(ip); |
@@ -3653,7 +3597,7 @@ xfs_reclaim( | |||
3653 | { | 3597 | { |
3654 | bhv_vnode_t *vp = XFS_ITOV(ip); | 3598 | bhv_vnode_t *vp = XFS_ITOV(ip); |
3655 | 3599 | ||
3656 | vn_trace_entry(ip, __FUNCTION__, (inst_t *)__return_address); | 3600 | xfs_itrace_entry(ip); |
3657 | 3601 | ||
3658 | ASSERT(!VN_MAPPED(vp)); | 3602 | ASSERT(!VN_MAPPED(vp)); |
3659 | 3603 | ||
@@ -3871,7 +3815,7 @@ xfs_alloc_file_space( | |||
3871 | int committed; | 3815 | int committed; |
3872 | int error; | 3816 | int error; |
3873 | 3817 | ||
3874 | vn_trace_entry(ip, __FUNCTION__, (inst_t *)__return_address); | 3818 | xfs_itrace_entry(ip); |
3875 | 3819 | ||
3876 | if (XFS_FORCED_SHUTDOWN(mp)) | 3820 | if (XFS_FORCED_SHUTDOWN(mp)) |
3877 | return XFS_ERROR(EIO); | 3821 | return XFS_ERROR(EIO); |
@@ -3976,7 +3920,7 @@ retry: | |||
3976 | * Issue the xfs_bmapi() call to allocate the blocks | 3920 | * Issue the xfs_bmapi() call to allocate the blocks |
3977 | */ | 3921 | */ |
3978 | XFS_BMAP_INIT(&free_list, &firstfsb); | 3922 | XFS_BMAP_INIT(&free_list, &firstfsb); |
3979 | error = XFS_BMAPI(mp, tp, &ip->i_iocore, startoffset_fsb, | 3923 | error = xfs_bmapi(tp, ip, startoffset_fsb, |
3980 | allocatesize_fsb, bmapi_flag, | 3924 | allocatesize_fsb, bmapi_flag, |
3981 | &firstfsb, 0, imapp, &nimaps, | 3925 | &firstfsb, 0, imapp, &nimaps, |
3982 | &free_list, NULL); | 3926 | &free_list, NULL); |
@@ -4052,13 +3996,13 @@ xfs_zero_remaining_bytes( | |||
4052 | int error = 0; | 3996 | int error = 0; |
4053 | 3997 | ||
4054 | bp = xfs_buf_get_noaddr(mp->m_sb.sb_blocksize, | 3998 | bp = xfs_buf_get_noaddr(mp->m_sb.sb_blocksize, |
4055 | ip->i_d.di_flags & XFS_DIFLAG_REALTIME ? | 3999 | XFS_IS_REALTIME_INODE(ip) ? |
4056 | mp->m_rtdev_targp : mp->m_ddev_targp); | 4000 | mp->m_rtdev_targp : mp->m_ddev_targp); |
4057 | 4001 | ||
4058 | for (offset = startoff; offset <= endoff; offset = lastoffset + 1) { | 4002 | for (offset = startoff; offset <= endoff; offset = lastoffset + 1) { |
4059 | offset_fsb = XFS_B_TO_FSBT(mp, offset); | 4003 | offset_fsb = XFS_B_TO_FSBT(mp, offset); |
4060 | nimap = 1; | 4004 | nimap = 1; |
4061 | error = XFS_BMAPI(mp, NULL, &ip->i_iocore, offset_fsb, 1, 0, | 4005 | error = xfs_bmapi(NULL, ip, offset_fsb, 1, 0, |
4062 | NULL, 0, &imap, &nimap, NULL, NULL); | 4006 | NULL, 0, &imap, &nimap, NULL, NULL); |
4063 | if (error || nimap < 1) | 4007 | if (error || nimap < 1) |
4064 | break; | 4008 | break; |
@@ -4141,7 +4085,7 @@ xfs_free_file_space( | |||
4141 | vp = XFS_ITOV(ip); | 4085 | vp = XFS_ITOV(ip); |
4142 | mp = ip->i_mount; | 4086 | mp = ip->i_mount; |
4143 | 4087 | ||
4144 | vn_trace_entry(ip, __FUNCTION__, (inst_t *)__return_address); | 4088 | xfs_itrace_entry(ip); |
4145 | 4089 | ||
4146 | if ((error = XFS_QM_DQATTACH(mp, ip, 0))) | 4090 | if ((error = XFS_QM_DQATTACH(mp, ip, 0))) |
4147 | return error; | 4091 | return error; |
@@ -4149,7 +4093,7 @@ xfs_free_file_space( | |||
4149 | error = 0; | 4093 | error = 0; |
4150 | if (len <= 0) /* if nothing being freed */ | 4094 | if (len <= 0) /* if nothing being freed */ |
4151 | return error; | 4095 | return error; |
4152 | rt = (ip->i_d.di_flags & XFS_DIFLAG_REALTIME); | 4096 | rt = XFS_IS_REALTIME_INODE(ip); |
4153 | startoffset_fsb = XFS_B_TO_FSB(mp, offset); | 4097 | startoffset_fsb = XFS_B_TO_FSB(mp, offset); |
4154 | end_dmi_offset = offset + len; | 4098 | end_dmi_offset = offset + len; |
4155 | endoffset_fsb = XFS_B_TO_FSBT(mp, end_dmi_offset); | 4099 | endoffset_fsb = XFS_B_TO_FSBT(mp, end_dmi_offset); |
@@ -4172,15 +4116,12 @@ xfs_free_file_space( | |||
4172 | vn_iowait(ip); /* wait for the completion of any pending DIOs */ | 4116 | vn_iowait(ip); /* wait for the completion of any pending DIOs */ |
4173 | } | 4117 | } |
4174 | 4118 | ||
4175 | rounding = max_t(uint, 1 << mp->m_sb.sb_blocklog, NBPP); | 4119 | rounding = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE); |
4176 | ioffset = offset & ~(rounding - 1); | 4120 | ioffset = offset & ~(rounding - 1); |
4177 | 4121 | ||
4178 | if (VN_CACHED(vp) != 0) { | 4122 | if (VN_CACHED(vp) != 0) { |
4179 | xfs_inval_cached_trace(&ip->i_iocore, ioffset, -1, | 4123 | xfs_inval_cached_trace(ip, ioffset, -1, ioffset, -1); |
4180 | ctooff(offtoct(ioffset)), -1); | 4124 | error = xfs_flushinval_pages(ip, ioffset, -1, FI_REMAPF_LOCKED); |
4181 | error = xfs_flushinval_pages(ip, | ||
4182 | ctooff(offtoct(ioffset)), | ||
4183 | -1, FI_REMAPF_LOCKED); | ||
4184 | if (error) | 4125 | if (error) |
4185 | goto out_unlock_iolock; | 4126 | goto out_unlock_iolock; |
4186 | } | 4127 | } |
@@ -4193,7 +4134,7 @@ xfs_free_file_space( | |||
4193 | */ | 4134 | */ |
4194 | if (rt && !XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb)) { | 4135 | if (rt && !XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb)) { |
4195 | nimap = 1; | 4136 | nimap = 1; |
4196 | error = XFS_BMAPI(mp, NULL, &ip->i_iocore, startoffset_fsb, | 4137 | error = xfs_bmapi(NULL, ip, startoffset_fsb, |
4197 | 1, 0, NULL, 0, &imap, &nimap, NULL, NULL); | 4138 | 1, 0, NULL, 0, &imap, &nimap, NULL, NULL); |
4198 | if (error) | 4139 | if (error) |
4199 | goto out_unlock_iolock; | 4140 | goto out_unlock_iolock; |
@@ -4208,7 +4149,7 @@ xfs_free_file_space( | |||
4208 | startoffset_fsb += mp->m_sb.sb_rextsize - mod; | 4149 | startoffset_fsb += mp->m_sb.sb_rextsize - mod; |
4209 | } | 4150 | } |
4210 | nimap = 1; | 4151 | nimap = 1; |
4211 | error = XFS_BMAPI(mp, NULL, &ip->i_iocore, endoffset_fsb - 1, | 4152 | error = xfs_bmapi(NULL, ip, endoffset_fsb - 1, |
4212 | 1, 0, NULL, 0, &imap, &nimap, NULL, NULL); | 4153 | 1, 0, NULL, 0, &imap, &nimap, NULL, NULL); |
4213 | if (error) | 4154 | if (error) |
4214 | goto out_unlock_iolock; | 4155 | goto out_unlock_iolock; |
@@ -4284,7 +4225,7 @@ xfs_free_file_space( | |||
4284 | * issue the bunmapi() call to free the blocks | 4225 | * issue the bunmapi() call to free the blocks |
4285 | */ | 4226 | */ |
4286 | XFS_BMAP_INIT(&free_list, &firstfsb); | 4227 | XFS_BMAP_INIT(&free_list, &firstfsb); |
4287 | error = XFS_BUNMAPI(mp, tp, &ip->i_iocore, startoffset_fsb, | 4228 | error = xfs_bunmapi(tp, ip, startoffset_fsb, |
4288 | endoffset_fsb - startoffset_fsb, | 4229 | endoffset_fsb - startoffset_fsb, |
4289 | 0, 2, &firstfsb, &free_list, NULL, &done); | 4230 | 0, 2, &firstfsb, &free_list, NULL, &done); |
4290 | if (error) { | 4231 | if (error) { |
@@ -4347,23 +4288,11 @@ xfs_change_file_space( | |||
4347 | xfs_trans_t *tp; | 4288 | xfs_trans_t *tp; |
4348 | bhv_vattr_t va; | 4289 | bhv_vattr_t va; |
4349 | 4290 | ||
4350 | vn_trace_entry(ip, __FUNCTION__, (inst_t *)__return_address); | 4291 | xfs_itrace_entry(ip); |
4351 | 4292 | ||
4352 | /* | ||
4353 | * must be a regular file and have write permission | ||
4354 | */ | ||
4355 | if (!S_ISREG(ip->i_d.di_mode)) | 4293 | if (!S_ISREG(ip->i_d.di_mode)) |
4356 | return XFS_ERROR(EINVAL); | 4294 | return XFS_ERROR(EINVAL); |
4357 | 4295 | ||
4358 | xfs_ilock(ip, XFS_ILOCK_SHARED); | ||
4359 | |||
4360 | if ((error = xfs_iaccess(ip, S_IWUSR, credp))) { | ||
4361 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | ||
4362 | return error; | ||
4363 | } | ||
4364 | |||
4365 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | ||
4366 | |||
4367 | switch (bf->l_whence) { | 4296 | switch (bf->l_whence) { |
4368 | case 0: /*SEEK_SET*/ | 4297 | case 0: /*SEEK_SET*/ |
4369 | break; | 4298 | break; |
diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h index b7e461c40cfb..4e3970f0e5e3 100644 --- a/fs/xfs/xfs_vnodeops.h +++ b/fs/xfs/xfs_vnodeops.h | |||
@@ -18,7 +18,6 @@ int xfs_open(struct xfs_inode *ip); | |||
18 | int xfs_getattr(struct xfs_inode *ip, struct bhv_vattr *vap, int flags); | 18 | int xfs_getattr(struct xfs_inode *ip, struct bhv_vattr *vap, int flags); |
19 | int xfs_setattr(struct xfs_inode *ip, struct bhv_vattr *vap, int flags, | 19 | int xfs_setattr(struct xfs_inode *ip, struct bhv_vattr *vap, int flags, |
20 | struct cred *credp); | 20 | struct cred *credp); |
21 | int xfs_access(struct xfs_inode *ip, int mode, struct cred *credp); | ||
22 | int xfs_readlink(struct xfs_inode *ip, char *link); | 21 | int xfs_readlink(struct xfs_inode *ip, char *link); |
23 | int xfs_fsync(struct xfs_inode *ip, int flag, xfs_off_t start, | 22 | int xfs_fsync(struct xfs_inode *ip, int flag, xfs_off_t start, |
24 | xfs_off_t stop); | 23 | xfs_off_t stop); |
@@ -39,7 +38,6 @@ int xfs_readdir(struct xfs_inode *dp, void *dirent, size_t bufsize, | |||
39 | int xfs_symlink(struct xfs_inode *dp, bhv_vname_t *dentry, | 38 | int xfs_symlink(struct xfs_inode *dp, bhv_vname_t *dentry, |
40 | char *target_path, mode_t mode, bhv_vnode_t **vpp, | 39 | char *target_path, mode_t mode, bhv_vnode_t **vpp, |
41 | struct cred *credp); | 40 | struct cred *credp); |
42 | int xfs_fid2(struct xfs_inode *ip, struct xfs_fid *xfid); | ||
43 | int xfs_rwlock(struct xfs_inode *ip, bhv_vrwlock_t locktype); | 41 | int xfs_rwlock(struct xfs_inode *ip, bhv_vrwlock_t locktype); |
44 | void xfs_rwunlock(struct xfs_inode *ip, bhv_vrwlock_t locktype); | 42 | void xfs_rwunlock(struct xfs_inode *ip, bhv_vrwlock_t locktype); |
45 | int xfs_inode_flush(struct xfs_inode *ip, int flags); | 43 | int xfs_inode_flush(struct xfs_inode *ip, int flags); |