diff options
author | Jiri Kosina <jkosina@suse.cz> | 2010-08-04 09:14:38 -0400 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2010-08-04 09:14:38 -0400 |
commit | d790d4d583aeaed9fc6f8a9f4d9f8ce6b1c15c7f (patch) | |
tree | 854ab394486288d40fa8179cbfaf66e8bdc44b0f /fs | |
parent | 73b2c7165b76b20eb1290e7efebc33cfd21db1ca (diff) | |
parent | 3a09b1be53d23df780a0cd0e4087a05e2ca4a00c (diff) |
Merge branch 'master' into for-next
Diffstat (limited to 'fs')
206 files changed, 5706 insertions, 5706 deletions
diff --git a/fs/9p/Makefile b/fs/9p/Makefile index 1a940ec7af61..91fba025fcbe 100644 --- a/fs/9p/Makefile +++ b/fs/9p/Makefile | |||
@@ -8,6 +8,8 @@ obj-$(CONFIG_9P_FS) := 9p.o | |||
8 | vfs_dir.o \ | 8 | vfs_dir.o \ |
9 | vfs_dentry.o \ | 9 | vfs_dentry.o \ |
10 | v9fs.o \ | 10 | v9fs.o \ |
11 | fid.o | 11 | fid.o \ |
12 | xattr.o \ | ||
13 | xattr_user.o | ||
12 | 14 | ||
13 | 9p-$(CONFIG_9P_FSCACHE) += cache.o | 15 | 9p-$(CONFIG_9P_FSCACHE) += cache.o |
diff --git a/fs/9p/fid.c b/fs/9p/fid.c index 7317b39b2815..358563689064 100644 --- a/fs/9p/fid.c +++ b/fs/9p/fid.c | |||
@@ -97,6 +97,34 @@ static struct p9_fid *v9fs_fid_find(struct dentry *dentry, u32 uid, int any) | |||
97 | return ret; | 97 | return ret; |
98 | } | 98 | } |
99 | 99 | ||
100 | /* | ||
101 | * We need to hold v9ses->rename_sem as long as we hold references | ||
102 | * to returned path array. Array element contain pointers to | ||
103 | * dentry names. | ||
104 | */ | ||
105 | static int build_path_from_dentry(struct v9fs_session_info *v9ses, | ||
106 | struct dentry *dentry, char ***names) | ||
107 | { | ||
108 | int n = 0, i; | ||
109 | char **wnames; | ||
110 | struct dentry *ds; | ||
111 | |||
112 | for (ds = dentry; !IS_ROOT(ds); ds = ds->d_parent) | ||
113 | n++; | ||
114 | |||
115 | wnames = kmalloc(sizeof(char *) * n, GFP_KERNEL); | ||
116 | if (!wnames) | ||
117 | goto err_out; | ||
118 | |||
119 | for (ds = dentry, i = (n-1); i >= 0; i--, ds = ds->d_parent) | ||
120 | wnames[i] = (char *)ds->d_name.name; | ||
121 | |||
122 | *names = wnames; | ||
123 | return n; | ||
124 | err_out: | ||
125 | return -ENOMEM; | ||
126 | } | ||
127 | |||
100 | /** | 128 | /** |
101 | * v9fs_fid_lookup - lookup for a fid, try to walk if not found | 129 | * v9fs_fid_lookup - lookup for a fid, try to walk if not found |
102 | * @dentry: dentry to look for fid in | 130 | * @dentry: dentry to look for fid in |
@@ -112,7 +140,7 @@ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry) | |||
112 | int i, n, l, clone, any, access; | 140 | int i, n, l, clone, any, access; |
113 | u32 uid; | 141 | u32 uid; |
114 | struct p9_fid *fid, *old_fid = NULL; | 142 | struct p9_fid *fid, *old_fid = NULL; |
115 | struct dentry *d, *ds; | 143 | struct dentry *ds; |
116 | struct v9fs_session_info *v9ses; | 144 | struct v9fs_session_info *v9ses; |
117 | char **wnames, *uname; | 145 | char **wnames, *uname; |
118 | 146 | ||
@@ -139,49 +167,62 @@ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry) | |||
139 | fid = v9fs_fid_find(dentry, uid, any); | 167 | fid = v9fs_fid_find(dentry, uid, any); |
140 | if (fid) | 168 | if (fid) |
141 | return fid; | 169 | return fid; |
142 | 170 | /* | |
171 | * we don't have a matching fid. To do a TWALK we need | ||
172 | * parent fid. We need to prevent rename when we want to | ||
173 | * look at the parent. | ||
174 | */ | ||
175 | down_read(&v9ses->rename_sem); | ||
143 | ds = dentry->d_parent; | 176 | ds = dentry->d_parent; |
144 | fid = v9fs_fid_find(ds, uid, any); | 177 | fid = v9fs_fid_find(ds, uid, any); |
145 | if (!fid) { /* walk from the root */ | 178 | if (fid) { |
146 | n = 0; | 179 | /* Found the parent fid do a lookup with that */ |
147 | for (ds = dentry; !IS_ROOT(ds); ds = ds->d_parent) | 180 | fid = p9_client_walk(fid, 1, (char **)&dentry->d_name.name, 1); |
148 | n++; | 181 | goto fid_out; |
182 | } | ||
183 | up_read(&v9ses->rename_sem); | ||
149 | 184 | ||
150 | fid = v9fs_fid_find(ds, uid, any); | 185 | /* start from the root and try to do a lookup */ |
151 | if (!fid) { /* the user is not attached to the fs yet */ | 186 | fid = v9fs_fid_find(dentry->d_sb->s_root, uid, any); |
152 | if (access == V9FS_ACCESS_SINGLE) | 187 | if (!fid) { |
153 | return ERR_PTR(-EPERM); | 188 | /* the user is not attached to the fs yet */ |
189 | if (access == V9FS_ACCESS_SINGLE) | ||
190 | return ERR_PTR(-EPERM); | ||
154 | 191 | ||
155 | if (v9fs_proto_dotu(v9ses)) | 192 | if (v9fs_proto_dotu(v9ses) || v9fs_proto_dotl(v9ses)) |
156 | uname = NULL; | 193 | uname = NULL; |
157 | else | 194 | else |
158 | uname = v9ses->uname; | 195 | uname = v9ses->uname; |
159 | 196 | ||
160 | fid = p9_client_attach(v9ses->clnt, NULL, uname, uid, | 197 | fid = p9_client_attach(v9ses->clnt, NULL, uname, uid, |
161 | v9ses->aname); | 198 | v9ses->aname); |
162 | 199 | if (IS_ERR(fid)) | |
163 | if (IS_ERR(fid)) | 200 | return fid; |
164 | return fid; | ||
165 | |||
166 | v9fs_fid_add(ds, fid); | ||
167 | } | ||
168 | } else /* walk from the parent */ | ||
169 | n = 1; | ||
170 | 201 | ||
171 | if (ds == dentry) | 202 | v9fs_fid_add(dentry->d_sb->s_root, fid); |
203 | } | ||
204 | /* If we are root ourself just return that */ | ||
205 | if (dentry->d_sb->s_root == dentry) | ||
172 | return fid; | 206 | return fid; |
173 | 207 | /* | |
174 | wnames = kmalloc(sizeof(char *) * n, GFP_KERNEL); | 208 | * Do a multipath walk with attached root. |
175 | if (!wnames) | 209 | * When walking parent we need to make sure we |
176 | return ERR_PTR(-ENOMEM); | 210 | * don't have a parallel rename happening |
177 | 211 | */ | |
178 | for (d = dentry, i = (n-1); i >= 0; i--, d = d->d_parent) | 212 | down_read(&v9ses->rename_sem); |
179 | wnames[i] = (char *) d->d_name.name; | 213 | n = build_path_from_dentry(v9ses, dentry, &wnames); |
180 | 214 | if (n < 0) { | |
215 | fid = ERR_PTR(n); | ||
216 | goto err_out; | ||
217 | } | ||
181 | clone = 1; | 218 | clone = 1; |
182 | i = 0; | 219 | i = 0; |
183 | while (i < n) { | 220 | while (i < n) { |
184 | l = min(n - i, P9_MAXWELEM); | 221 | l = min(n - i, P9_MAXWELEM); |
222 | /* | ||
223 | * We need to hold rename lock when doing a multipath | ||
224 | * walk to ensure none of the patch component change | ||
225 | */ | ||
185 | fid = p9_client_walk(fid, l, &wnames[i], clone); | 226 | fid = p9_client_walk(fid, l, &wnames[i], clone); |
186 | if (IS_ERR(fid)) { | 227 | if (IS_ERR(fid)) { |
187 | if (old_fid) { | 228 | if (old_fid) { |
@@ -193,15 +234,17 @@ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry) | |||
193 | p9_client_clunk(old_fid); | 234 | p9_client_clunk(old_fid); |
194 | } | 235 | } |
195 | kfree(wnames); | 236 | kfree(wnames); |
196 | return fid; | 237 | goto err_out; |
197 | } | 238 | } |
198 | old_fid = fid; | 239 | old_fid = fid; |
199 | i += l; | 240 | i += l; |
200 | clone = 0; | 241 | clone = 0; |
201 | } | 242 | } |
202 | |||
203 | kfree(wnames); | 243 | kfree(wnames); |
244 | fid_out: | ||
204 | v9fs_fid_add(dentry, fid); | 245 | v9fs_fid_add(dentry, fid); |
246 | err_out: | ||
247 | up_read(&v9ses->rename_sem); | ||
205 | return fid; | 248 | return fid; |
206 | } | 249 | } |
207 | 250 | ||
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c index f8b86e92cd66..38dc0e067599 100644 --- a/fs/9p/v9fs.c +++ b/fs/9p/v9fs.c | |||
@@ -237,6 +237,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses, | |||
237 | __putname(v9ses->uname); | 237 | __putname(v9ses->uname); |
238 | return ERR_PTR(-ENOMEM); | 238 | return ERR_PTR(-ENOMEM); |
239 | } | 239 | } |
240 | init_rwsem(&v9ses->rename_sem); | ||
240 | 241 | ||
241 | rc = bdi_setup_and_register(&v9ses->bdi, "9p", BDI_CAP_MAP_COPY); | 242 | rc = bdi_setup_and_register(&v9ses->bdi, "9p", BDI_CAP_MAP_COPY); |
242 | if (rc) { | 243 | if (rc) { |
@@ -278,7 +279,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses, | |||
278 | v9ses->maxdata = v9ses->clnt->msize - P9_IOHDRSZ; | 279 | v9ses->maxdata = v9ses->clnt->msize - P9_IOHDRSZ; |
279 | 280 | ||
280 | /* for legacy mode, fall back to V9FS_ACCESS_ANY */ | 281 | /* for legacy mode, fall back to V9FS_ACCESS_ANY */ |
281 | if (!v9fs_proto_dotu(v9ses) && | 282 | if (!(v9fs_proto_dotu(v9ses) || v9fs_proto_dotl(v9ses)) && |
282 | ((v9ses->flags&V9FS_ACCESS_MASK) == V9FS_ACCESS_USER)) { | 283 | ((v9ses->flags&V9FS_ACCESS_MASK) == V9FS_ACCESS_USER)) { |
283 | 284 | ||
284 | v9ses->flags &= ~V9FS_ACCESS_MASK; | 285 | v9ses->flags &= ~V9FS_ACCESS_MASK; |
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h index bec4d0bcb458..4c963c9fc41f 100644 --- a/fs/9p/v9fs.h +++ b/fs/9p/v9fs.h | |||
@@ -104,6 +104,7 @@ struct v9fs_session_info { | |||
104 | struct p9_client *clnt; /* 9p client */ | 104 | struct p9_client *clnt; /* 9p client */ |
105 | struct list_head slist; /* list of sessions registered with v9fs */ | 105 | struct list_head slist; /* list of sessions registered with v9fs */ |
106 | struct backing_dev_info bdi; | 106 | struct backing_dev_info bdi; |
107 | struct rw_semaphore rename_sem; | ||
107 | }; | 108 | }; |
108 | 109 | ||
109 | struct p9_fid *v9fs_session_init(struct v9fs_session_info *, const char *, | 110 | struct p9_fid *v9fs_session_init(struct v9fs_session_info *, const char *, |
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h index 32ef4009d030..f47c6bbb01b3 100644 --- a/fs/9p/v9fs_vfs.h +++ b/fs/9p/v9fs_vfs.h | |||
@@ -55,6 +55,7 @@ struct inode *v9fs_get_inode(struct super_block *sb, int mode); | |||
55 | void v9fs_clear_inode(struct inode *inode); | 55 | void v9fs_clear_inode(struct inode *inode); |
56 | ino_t v9fs_qid2ino(struct p9_qid *qid); | 56 | ino_t v9fs_qid2ino(struct p9_qid *qid); |
57 | void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *); | 57 | void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *); |
58 | void v9fs_stat2inode_dotl(struct p9_stat_dotl *, struct inode *); | ||
58 | int v9fs_dir_release(struct inode *inode, struct file *filp); | 59 | int v9fs_dir_release(struct inode *inode, struct file *filp); |
59 | int v9fs_file_open(struct inode *inode, struct file *file); | 60 | int v9fs_file_open(struct inode *inode, struct file *file); |
60 | void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat); | 61 | void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat); |
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c index d61e3b28ce37..16c8a2a98c1b 100644 --- a/fs/9p/vfs_dir.c +++ b/fs/9p/vfs_dir.c | |||
@@ -87,29 +87,19 @@ static void p9stat_init(struct p9_wstat *stbuf) | |||
87 | } | 87 | } |
88 | 88 | ||
89 | /** | 89 | /** |
90 | * v9fs_dir_readdir - read a directory | 90 | * v9fs_alloc_rdir_buf - Allocate buffer used for read and readdir |
91 | * @filp: opened file structure | 91 | * @filp: opened file structure |
92 | * @dirent: directory structure ??? | 92 | * @buflen: Length in bytes of buffer to allocate |
93 | * @filldir: function to populate directory structure ??? | ||
94 | * | 93 | * |
95 | */ | 94 | */ |
96 | 95 | ||
97 | static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir) | 96 | static int v9fs_alloc_rdir_buf(struct file *filp, int buflen) |
98 | { | 97 | { |
99 | int over; | ||
100 | struct p9_wstat st; | ||
101 | int err = 0; | ||
102 | struct p9_fid *fid; | ||
103 | int buflen; | ||
104 | int reclen = 0; | ||
105 | struct p9_rdir *rdir; | 98 | struct p9_rdir *rdir; |
99 | struct p9_fid *fid; | ||
100 | int err = 0; | ||
106 | 101 | ||
107 | P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", filp->f_path.dentry->d_name.name); | ||
108 | fid = filp->private_data; | 102 | fid = filp->private_data; |
109 | |||
110 | buflen = fid->clnt->msize - P9_IOHDRSZ; | ||
111 | |||
112 | /* allocate rdir on demand */ | ||
113 | if (!fid->rdir) { | 103 | if (!fid->rdir) { |
114 | rdir = kmalloc(sizeof(struct p9_rdir) + buflen, GFP_KERNEL); | 104 | rdir = kmalloc(sizeof(struct p9_rdir) + buflen, GFP_KERNEL); |
115 | 105 | ||
@@ -128,6 +118,36 @@ static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir) | |||
128 | spin_unlock(&filp->f_dentry->d_lock); | 118 | spin_unlock(&filp->f_dentry->d_lock); |
129 | kfree(rdir); | 119 | kfree(rdir); |
130 | } | 120 | } |
121 | exit: | ||
122 | return err; | ||
123 | } | ||
124 | |||
125 | /** | ||
126 | * v9fs_dir_readdir - read a directory | ||
127 | * @filp: opened file structure | ||
128 | * @dirent: directory structure ??? | ||
129 | * @filldir: function to populate directory structure ??? | ||
130 | * | ||
131 | */ | ||
132 | |||
133 | static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir) | ||
134 | { | ||
135 | int over; | ||
136 | struct p9_wstat st; | ||
137 | int err = 0; | ||
138 | struct p9_fid *fid; | ||
139 | int buflen; | ||
140 | int reclen = 0; | ||
141 | struct p9_rdir *rdir; | ||
142 | |||
143 | P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", filp->f_path.dentry->d_name.name); | ||
144 | fid = filp->private_data; | ||
145 | |||
146 | buflen = fid->clnt->msize - P9_IOHDRSZ; | ||
147 | |||
148 | err = v9fs_alloc_rdir_buf(filp, buflen); | ||
149 | if (err) | ||
150 | goto exit; | ||
131 | rdir = (struct p9_rdir *) fid->rdir; | 151 | rdir = (struct p9_rdir *) fid->rdir; |
132 | 152 | ||
133 | err = mutex_lock_interruptible(&rdir->mutex); | 153 | err = mutex_lock_interruptible(&rdir->mutex); |
@@ -146,7 +166,7 @@ static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir) | |||
146 | while (rdir->head < rdir->tail) { | 166 | while (rdir->head < rdir->tail) { |
147 | p9stat_init(&st); | 167 | p9stat_init(&st); |
148 | err = p9stat_read(rdir->buf + rdir->head, | 168 | err = p9stat_read(rdir->buf + rdir->head, |
149 | buflen - rdir->head, &st, | 169 | rdir->tail - rdir->head, &st, |
150 | fid->clnt->proto_version); | 170 | fid->clnt->proto_version); |
151 | if (err) { | 171 | if (err) { |
152 | P9_DPRINTK(P9_DEBUG_VFS, "returned %d\n", err); | 172 | P9_DPRINTK(P9_DEBUG_VFS, "returned %d\n", err); |
@@ -176,6 +196,88 @@ exit: | |||
176 | return err; | 196 | return err; |
177 | } | 197 | } |
178 | 198 | ||
199 | /** | ||
200 | * v9fs_dir_readdir_dotl - read a directory | ||
201 | * @filp: opened file structure | ||
202 | * @dirent: buffer to fill dirent structures | ||
203 | * @filldir: function to populate dirent structures | ||
204 | * | ||
205 | */ | ||
206 | static int v9fs_dir_readdir_dotl(struct file *filp, void *dirent, | ||
207 | filldir_t filldir) | ||
208 | { | ||
209 | int over; | ||
210 | int err = 0; | ||
211 | struct p9_fid *fid; | ||
212 | int buflen; | ||
213 | struct p9_rdir *rdir; | ||
214 | struct p9_dirent curdirent; | ||
215 | u64 oldoffset = 0; | ||
216 | |||
217 | P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", filp->f_path.dentry->d_name.name); | ||
218 | fid = filp->private_data; | ||
219 | |||
220 | buflen = fid->clnt->msize - P9_READDIRHDRSZ; | ||
221 | |||
222 | err = v9fs_alloc_rdir_buf(filp, buflen); | ||
223 | if (err) | ||
224 | goto exit; | ||
225 | rdir = (struct p9_rdir *) fid->rdir; | ||
226 | |||
227 | err = mutex_lock_interruptible(&rdir->mutex); | ||
228 | if (err) | ||
229 | return err; | ||
230 | |||
231 | while (err == 0) { | ||
232 | if (rdir->tail == rdir->head) { | ||
233 | err = p9_client_readdir(fid, rdir->buf, buflen, | ||
234 | filp->f_pos); | ||
235 | if (err <= 0) | ||
236 | goto unlock_and_exit; | ||
237 | |||
238 | rdir->head = 0; | ||
239 | rdir->tail = err; | ||
240 | } | ||
241 | |||
242 | while (rdir->head < rdir->tail) { | ||
243 | |||
244 | err = p9dirent_read(rdir->buf + rdir->head, | ||
245 | buflen - rdir->head, &curdirent, | ||
246 | fid->clnt->proto_version); | ||
247 | if (err < 0) { | ||
248 | P9_DPRINTK(P9_DEBUG_VFS, "returned %d\n", err); | ||
249 | err = -EIO; | ||
250 | goto unlock_and_exit; | ||
251 | } | ||
252 | |||
253 | /* d_off in dirent structure tracks the offset into | ||
254 | * the next dirent in the dir. However, filldir() | ||
255 | * expects offset into the current dirent. Hence | ||
256 | * while calling filldir send the offset from the | ||
257 | * previous dirent structure. | ||
258 | */ | ||
259 | over = filldir(dirent, curdirent.d_name, | ||
260 | strlen(curdirent.d_name), | ||
261 | oldoffset, v9fs_qid2ino(&curdirent.qid), | ||
262 | curdirent.d_type); | ||
263 | oldoffset = curdirent.d_off; | ||
264 | |||
265 | if (over) { | ||
266 | err = 0; | ||
267 | goto unlock_and_exit; | ||
268 | } | ||
269 | |||
270 | filp->f_pos = curdirent.d_off; | ||
271 | rdir->head += err; | ||
272 | } | ||
273 | } | ||
274 | |||
275 | unlock_and_exit: | ||
276 | mutex_unlock(&rdir->mutex); | ||
277 | exit: | ||
278 | return err; | ||
279 | } | ||
280 | |||
179 | 281 | ||
180 | /** | 282 | /** |
181 | * v9fs_dir_release - close a directory | 283 | * v9fs_dir_release - close a directory |
@@ -207,7 +309,7 @@ const struct file_operations v9fs_dir_operations = { | |||
207 | const struct file_operations v9fs_dir_operations_dotl = { | 309 | const struct file_operations v9fs_dir_operations_dotl = { |
208 | .read = generic_read_dir, | 310 | .read = generic_read_dir, |
209 | .llseek = generic_file_llseek, | 311 | .llseek = generic_file_llseek, |
210 | .readdir = v9fs_dir_readdir, | 312 | .readdir = v9fs_dir_readdir_dotl, |
211 | .open = v9fs_file_open, | 313 | .open = v9fs_file_open, |
212 | .release = v9fs_dir_release, | 314 | .release = v9fs_dir_release, |
213 | }; | 315 | }; |
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index 2bedc6c94fc2..e97c92bd6f16 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c | |||
@@ -59,9 +59,13 @@ int v9fs_file_open(struct inode *inode, struct file *file) | |||
59 | struct p9_fid *fid; | 59 | struct p9_fid *fid; |
60 | int omode; | 60 | int omode; |
61 | 61 | ||
62 | P9_DPRINTK(P9_DEBUG_VFS, "inode: %p file: %p \n", inode, file); | 62 | P9_DPRINTK(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file); |
63 | v9ses = v9fs_inode2v9ses(inode); | 63 | v9ses = v9fs_inode2v9ses(inode); |
64 | omode = v9fs_uflags2omode(file->f_flags, v9fs_proto_dotu(v9ses)); | 64 | if (v9fs_proto_dotl(v9ses)) |
65 | omode = file->f_flags; | ||
66 | else | ||
67 | omode = v9fs_uflags2omode(file->f_flags, | ||
68 | v9fs_proto_dotu(v9ses)); | ||
65 | fid = file->private_data; | 69 | fid = file->private_data; |
66 | if (!fid) { | 70 | if (!fid) { |
67 | fid = v9fs_fid_clone(file->f_path.dentry); | 71 | fid = v9fs_fid_clone(file->f_path.dentry); |
@@ -73,11 +77,12 @@ int v9fs_file_open(struct inode *inode, struct file *file) | |||
73 | p9_client_clunk(fid); | 77 | p9_client_clunk(fid); |
74 | return err; | 78 | return err; |
75 | } | 79 | } |
76 | if (omode & P9_OTRUNC) { | 80 | if (file->f_flags & O_TRUNC) { |
77 | i_size_write(inode, 0); | 81 | i_size_write(inode, 0); |
78 | inode->i_blocks = 0; | 82 | inode->i_blocks = 0; |
79 | } | 83 | } |
80 | if ((file->f_flags & O_APPEND) && (!v9fs_proto_dotu(v9ses))) | 84 | if ((file->f_flags & O_APPEND) && |
85 | (!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses))) | ||
81 | generic_file_llseek(file, 0, SEEK_END); | 86 | generic_file_llseek(file, 0, SEEK_END); |
82 | } | 87 | } |
83 | 88 | ||
@@ -139,7 +144,7 @@ ssize_t | |||
139 | v9fs_file_readn(struct file *filp, char *data, char __user *udata, u32 count, | 144 | v9fs_file_readn(struct file *filp, char *data, char __user *udata, u32 count, |
140 | u64 offset) | 145 | u64 offset) |
141 | { | 146 | { |
142 | int n, total; | 147 | int n, total, size; |
143 | struct p9_fid *fid = filp->private_data; | 148 | struct p9_fid *fid = filp->private_data; |
144 | 149 | ||
145 | P9_DPRINTK(P9_DEBUG_VFS, "fid %d offset %llu count %d\n", fid->fid, | 150 | P9_DPRINTK(P9_DEBUG_VFS, "fid %d offset %llu count %d\n", fid->fid, |
@@ -147,6 +152,7 @@ v9fs_file_readn(struct file *filp, char *data, char __user *udata, u32 count, | |||
147 | 152 | ||
148 | n = 0; | 153 | n = 0; |
149 | total = 0; | 154 | total = 0; |
155 | size = fid->iounit ? fid->iounit : fid->clnt->msize - P9_IOHDRSZ; | ||
150 | do { | 156 | do { |
151 | n = p9_client_read(fid, data, udata, offset, count); | 157 | n = p9_client_read(fid, data, udata, offset, count); |
152 | if (n <= 0) | 158 | if (n <= 0) |
@@ -160,7 +166,7 @@ v9fs_file_readn(struct file *filp, char *data, char __user *udata, u32 count, | |||
160 | offset += n; | 166 | offset += n; |
161 | count -= n; | 167 | count -= n; |
162 | total += n; | 168 | total += n; |
163 | } while (count > 0 && n == (fid->clnt->msize - P9_IOHDRSZ)); | 169 | } while (count > 0 && n == size); |
164 | 170 | ||
165 | if (n < 0) | 171 | if (n < 0) |
166 | total = n; | 172 | total = n; |
@@ -183,11 +189,13 @@ v9fs_file_read(struct file *filp, char __user *udata, size_t count, | |||
183 | { | 189 | { |
184 | int ret; | 190 | int ret; |
185 | struct p9_fid *fid; | 191 | struct p9_fid *fid; |
192 | size_t size; | ||
186 | 193 | ||
187 | P9_DPRINTK(P9_DEBUG_VFS, "count %zu offset %lld\n", count, *offset); | 194 | P9_DPRINTK(P9_DEBUG_VFS, "count %zu offset %lld\n", count, *offset); |
188 | fid = filp->private_data; | 195 | fid = filp->private_data; |
189 | 196 | ||
190 | if (count > (fid->clnt->msize - P9_IOHDRSZ)) | 197 | size = fid->iounit ? fid->iounit : fid->clnt->msize - P9_IOHDRSZ; |
198 | if (count > size) | ||
191 | ret = v9fs_file_readn(filp, NULL, udata, count, *offset); | 199 | ret = v9fs_file_readn(filp, NULL, udata, count, *offset); |
192 | else | 200 | else |
193 | ret = p9_client_read(fid, NULL, udata, *offset, count); | 201 | ret = p9_client_read(fid, NULL, udata, *offset, count); |
@@ -224,9 +232,7 @@ v9fs_file_write(struct file *filp, const char __user * data, | |||
224 | fid = filp->private_data; | 232 | fid = filp->private_data; |
225 | clnt = fid->clnt; | 233 | clnt = fid->clnt; |
226 | 234 | ||
227 | rsize = fid->iounit; | 235 | rsize = fid->iounit ? fid->iounit : clnt->msize - P9_IOHDRSZ; |
228 | if (!rsize || rsize > clnt->msize-P9_IOHDRSZ) | ||
229 | rsize = clnt->msize - P9_IOHDRSZ; | ||
230 | 236 | ||
231 | do { | 237 | do { |
232 | if (count < rsize) | 238 | if (count < rsize) |
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 4331b3b5ee1c..6e94f3247cec 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/idr.h> | 35 | #include <linux/idr.h> |
36 | #include <linux/sched.h> | 36 | #include <linux/sched.h> |
37 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
38 | #include <linux/xattr.h> | ||
38 | #include <net/9p/9p.h> | 39 | #include <net/9p/9p.h> |
39 | #include <net/9p/client.h> | 40 | #include <net/9p/client.h> |
40 | 41 | ||
@@ -42,6 +43,7 @@ | |||
42 | #include "v9fs_vfs.h" | 43 | #include "v9fs_vfs.h" |
43 | #include "fid.h" | 44 | #include "fid.h" |
44 | #include "cache.h" | 45 | #include "cache.h" |
46 | #include "xattr.h" | ||
45 | 47 | ||
46 | static const struct inode_operations v9fs_dir_inode_operations; | 48 | static const struct inode_operations v9fs_dir_inode_operations; |
47 | static const struct inode_operations v9fs_dir_inode_operations_dotu; | 49 | static const struct inode_operations v9fs_dir_inode_operations_dotu; |
@@ -236,6 +238,41 @@ void v9fs_destroy_inode(struct inode *inode) | |||
236 | #endif | 238 | #endif |
237 | 239 | ||
238 | /** | 240 | /** |
241 | * v9fs_get_fsgid_for_create - Helper function to get the gid for creating a | ||
242 | * new file system object. This checks the S_ISGID to determine the owning | ||
243 | * group of the new file system object. | ||
244 | */ | ||
245 | |||
246 | static gid_t v9fs_get_fsgid_for_create(struct inode *dir_inode) | ||
247 | { | ||
248 | BUG_ON(dir_inode == NULL); | ||
249 | |||
250 | if (dir_inode->i_mode & S_ISGID) { | ||
251 | /* set_gid bit is set.*/ | ||
252 | return dir_inode->i_gid; | ||
253 | } | ||
254 | return current_fsgid(); | ||
255 | } | ||
256 | |||
257 | /** | ||
258 | * v9fs_dentry_from_dir_inode - helper function to get the dentry from | ||
259 | * dir inode. | ||
260 | * | ||
261 | */ | ||
262 | |||
263 | static struct dentry *v9fs_dentry_from_dir_inode(struct inode *inode) | ||
264 | { | ||
265 | struct dentry *dentry; | ||
266 | |||
267 | spin_lock(&dcache_lock); | ||
268 | /* Directory should have only one entry. */ | ||
269 | BUG_ON(S_ISDIR(inode->i_mode) && !list_is_singular(&inode->i_dentry)); | ||
270 | dentry = list_entry(inode->i_dentry.next, struct dentry, d_alias); | ||
271 | spin_unlock(&dcache_lock); | ||
272 | return dentry; | ||
273 | } | ||
274 | |||
275 | /** | ||
239 | * v9fs_get_inode - helper function to setup an inode | 276 | * v9fs_get_inode - helper function to setup an inode |
240 | * @sb: superblock | 277 | * @sb: superblock |
241 | * @mode: mode to setup inode with | 278 | * @mode: mode to setup inode with |
@@ -267,7 +304,13 @@ struct inode *v9fs_get_inode(struct super_block *sb, int mode) | |||
267 | case S_IFBLK: | 304 | case S_IFBLK: |
268 | case S_IFCHR: | 305 | case S_IFCHR: |
269 | case S_IFSOCK: | 306 | case S_IFSOCK: |
270 | if (!v9fs_proto_dotu(v9ses)) { | 307 | if (v9fs_proto_dotl(v9ses)) { |
308 | inode->i_op = &v9fs_file_inode_operations_dotl; | ||
309 | inode->i_fop = &v9fs_file_operations_dotl; | ||
310 | } else if (v9fs_proto_dotu(v9ses)) { | ||
311 | inode->i_op = &v9fs_file_inode_operations; | ||
312 | inode->i_fop = &v9fs_file_operations; | ||
313 | } else { | ||
271 | P9_DPRINTK(P9_DEBUG_ERROR, | 314 | P9_DPRINTK(P9_DEBUG_ERROR, |
272 | "special files without extended mode\n"); | 315 | "special files without extended mode\n"); |
273 | err = -EINVAL; | 316 | err = -EINVAL; |
@@ -396,23 +439,14 @@ void v9fs_clear_inode(struct inode *inode) | |||
396 | #endif | 439 | #endif |
397 | } | 440 | } |
398 | 441 | ||
399 | /** | ||
400 | * v9fs_inode_from_fid - populate an inode by issuing a attribute request | ||
401 | * @v9ses: session information | ||
402 | * @fid: fid to issue attribute request for | ||
403 | * @sb: superblock on which to create inode | ||
404 | * | ||
405 | */ | ||
406 | |||
407 | static struct inode * | 442 | static struct inode * |
408 | v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid, | 443 | v9fs_inode(struct v9fs_session_info *v9ses, struct p9_fid *fid, |
409 | struct super_block *sb) | 444 | struct super_block *sb) |
410 | { | 445 | { |
411 | int err, umode; | 446 | int err, umode; |
412 | struct inode *ret; | 447 | struct inode *ret = NULL; |
413 | struct p9_wstat *st; | 448 | struct p9_wstat *st; |
414 | 449 | ||
415 | ret = NULL; | ||
416 | st = p9_client_stat(fid); | 450 | st = p9_client_stat(fid); |
417 | if (IS_ERR(st)) | 451 | if (IS_ERR(st)) |
418 | return ERR_CAST(st); | 452 | return ERR_CAST(st); |
@@ -433,15 +467,62 @@ v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid, | |||
433 | #endif | 467 | #endif |
434 | p9stat_free(st); | 468 | p9stat_free(st); |
435 | kfree(st); | 469 | kfree(st); |
436 | |||
437 | return ret; | 470 | return ret; |
438 | |||
439 | error: | 471 | error: |
440 | p9stat_free(st); | 472 | p9stat_free(st); |
441 | kfree(st); | 473 | kfree(st); |
442 | return ERR_PTR(err); | 474 | return ERR_PTR(err); |
443 | } | 475 | } |
444 | 476 | ||
477 | static struct inode * | ||
478 | v9fs_inode_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid, | ||
479 | struct super_block *sb) | ||
480 | { | ||
481 | struct inode *ret = NULL; | ||
482 | int err; | ||
483 | struct p9_stat_dotl *st; | ||
484 | |||
485 | st = p9_client_getattr_dotl(fid, P9_STATS_BASIC); | ||
486 | if (IS_ERR(st)) | ||
487 | return ERR_CAST(st); | ||
488 | |||
489 | ret = v9fs_get_inode(sb, st->st_mode); | ||
490 | if (IS_ERR(ret)) { | ||
491 | err = PTR_ERR(ret); | ||
492 | goto error; | ||
493 | } | ||
494 | |||
495 | v9fs_stat2inode_dotl(st, ret); | ||
496 | ret->i_ino = v9fs_qid2ino(&st->qid); | ||
497 | #ifdef CONFIG_9P_FSCACHE | ||
498 | v9fs_vcookie_set_qid(ret, &st->qid); | ||
499 | v9fs_cache_inode_get_cookie(ret); | ||
500 | #endif | ||
501 | kfree(st); | ||
502 | return ret; | ||
503 | error: | ||
504 | kfree(st); | ||
505 | return ERR_PTR(err); | ||
506 | } | ||
507 | |||
508 | /** | ||
509 | * v9fs_inode_from_fid - Helper routine to populate an inode by | ||
510 | * issuing a attribute request | ||
511 | * @v9ses: session information | ||
512 | * @fid: fid to issue attribute request for | ||
513 | * @sb: superblock on which to create inode | ||
514 | * | ||
515 | */ | ||
516 | static inline struct inode * | ||
517 | v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid, | ||
518 | struct super_block *sb) | ||
519 | { | ||
520 | if (v9fs_proto_dotl(v9ses)) | ||
521 | return v9fs_inode_dotl(v9ses, fid, sb); | ||
522 | else | ||
523 | return v9fs_inode(v9ses, fid, sb); | ||
524 | } | ||
525 | |||
445 | /** | 526 | /** |
446 | * v9fs_remove - helper function to remove files and directories | 527 | * v9fs_remove - helper function to remove files and directories |
447 | * @dir: directory inode that is being deleted | 528 | * @dir: directory inode that is being deleted |
@@ -563,6 +644,118 @@ error: | |||
563 | } | 644 | } |
564 | 645 | ||
565 | /** | 646 | /** |
647 | * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol. | ||
648 | * @dir: directory inode that is being created | ||
649 | * @dentry: dentry that is being deleted | ||
650 | * @mode: create permissions | ||
651 | * @nd: path information | ||
652 | * | ||
653 | */ | ||
654 | |||
655 | static int | ||
656 | v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int mode, | ||
657 | struct nameidata *nd) | ||
658 | { | ||
659 | int err = 0; | ||
660 | char *name = NULL; | ||
661 | gid_t gid; | ||
662 | int flags; | ||
663 | struct v9fs_session_info *v9ses; | ||
664 | struct p9_fid *fid = NULL; | ||
665 | struct p9_fid *dfid, *ofid; | ||
666 | struct file *filp; | ||
667 | struct p9_qid qid; | ||
668 | struct inode *inode; | ||
669 | |||
670 | v9ses = v9fs_inode2v9ses(dir); | ||
671 | if (nd && nd->flags & LOOKUP_OPEN) | ||
672 | flags = nd->intent.open.flags - 1; | ||
673 | else | ||
674 | flags = O_RDWR; | ||
675 | |||
676 | name = (char *) dentry->d_name.name; | ||
677 | P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_create_dotl: name:%s flags:0x%x " | ||
678 | "mode:0x%x\n", name, flags, mode); | ||
679 | |||
680 | dfid = v9fs_fid_lookup(dentry->d_parent); | ||
681 | if (IS_ERR(dfid)) { | ||
682 | err = PTR_ERR(dfid); | ||
683 | P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err); | ||
684 | return err; | ||
685 | } | ||
686 | |||
687 | /* clone a fid to use for creation */ | ||
688 | ofid = p9_client_walk(dfid, 0, NULL, 1); | ||
689 | if (IS_ERR(ofid)) { | ||
690 | err = PTR_ERR(ofid); | ||
691 | P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err); | ||
692 | return err; | ||
693 | } | ||
694 | |||
695 | gid = v9fs_get_fsgid_for_create(dir); | ||
696 | err = p9_client_create_dotl(ofid, name, flags, mode, gid, &qid); | ||
697 | if (err < 0) { | ||
698 | P9_DPRINTK(P9_DEBUG_VFS, | ||
699 | "p9_client_open_dotl failed in creat %d\n", | ||
700 | err); | ||
701 | goto error; | ||
702 | } | ||
703 | |||
704 | /* No need to populate the inode if we are not opening the file AND | ||
705 | * not in cached mode. | ||
706 | */ | ||
707 | if (!v9ses->cache && !(nd && nd->flags & LOOKUP_OPEN)) { | ||
708 | /* Not in cached mode. No need to populate inode with stat */ | ||
709 | dentry->d_op = &v9fs_dentry_operations; | ||
710 | p9_client_clunk(ofid); | ||
711 | d_instantiate(dentry, NULL); | ||
712 | return 0; | ||
713 | } | ||
714 | |||
715 | /* Now walk from the parent so we can get an unopened fid. */ | ||
716 | fid = p9_client_walk(dfid, 1, &name, 1); | ||
717 | if (IS_ERR(fid)) { | ||
718 | err = PTR_ERR(fid); | ||
719 | P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err); | ||
720 | fid = NULL; | ||
721 | goto error; | ||
722 | } | ||
723 | |||
724 | /* instantiate inode and assign the unopened fid to dentry */ | ||
725 | inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb); | ||
726 | if (IS_ERR(inode)) { | ||
727 | err = PTR_ERR(inode); | ||
728 | P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err); | ||
729 | goto error; | ||
730 | } | ||
731 | dentry->d_op = &v9fs_cached_dentry_operations; | ||
732 | d_instantiate(dentry, inode); | ||
733 | err = v9fs_fid_add(dentry, fid); | ||
734 | if (err < 0) | ||
735 | goto error; | ||
736 | |||
737 | /* if we are opening a file, assign the open fid to the file */ | ||
738 | if (nd && nd->flags & LOOKUP_OPEN) { | ||
739 | filp = lookup_instantiate_filp(nd, dentry, v9fs_open_created); | ||
740 | if (IS_ERR(filp)) { | ||
741 | p9_client_clunk(ofid); | ||
742 | return PTR_ERR(filp); | ||
743 | } | ||
744 | filp->private_data = ofid; | ||
745 | } else | ||
746 | p9_client_clunk(ofid); | ||
747 | |||
748 | return 0; | ||
749 | |||
750 | error: | ||
751 | if (ofid) | ||
752 | p9_client_clunk(ofid); | ||
753 | if (fid) | ||
754 | p9_client_clunk(fid); | ||
755 | return err; | ||
756 | } | ||
757 | |||
758 | /** | ||
566 | * v9fs_vfs_create - VFS hook to create files | 759 | * v9fs_vfs_create - VFS hook to create files |
567 | * @dir: directory inode that is being created | 760 | * @dir: directory inode that is being created |
568 | * @dentry: dentry that is being deleted | 761 | * @dentry: dentry that is being deleted |
@@ -652,6 +845,83 @@ static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
652 | return err; | 845 | return err; |
653 | } | 846 | } |
654 | 847 | ||
848 | |||
849 | /** | ||
850 | * v9fs_vfs_mkdir_dotl - VFS mkdir hook to create a directory | ||
851 | * @dir: inode that is being unlinked | ||
852 | * @dentry: dentry that is being unlinked | ||
853 | * @mode: mode for new directory | ||
854 | * | ||
855 | */ | ||
856 | |||
857 | static int v9fs_vfs_mkdir_dotl(struct inode *dir, struct dentry *dentry, | ||
858 | int mode) | ||
859 | { | ||
860 | int err; | ||
861 | struct v9fs_session_info *v9ses; | ||
862 | struct p9_fid *fid = NULL, *dfid = NULL; | ||
863 | gid_t gid; | ||
864 | char *name; | ||
865 | struct inode *inode; | ||
866 | struct p9_qid qid; | ||
867 | struct dentry *dir_dentry; | ||
868 | |||
869 | P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name); | ||
870 | err = 0; | ||
871 | v9ses = v9fs_inode2v9ses(dir); | ||
872 | |||
873 | mode |= S_IFDIR; | ||
874 | dir_dentry = v9fs_dentry_from_dir_inode(dir); | ||
875 | dfid = v9fs_fid_lookup(dir_dentry); | ||
876 | if (IS_ERR(dfid)) { | ||
877 | err = PTR_ERR(dfid); | ||
878 | P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err); | ||
879 | dfid = NULL; | ||
880 | goto error; | ||
881 | } | ||
882 | |||
883 | gid = v9fs_get_fsgid_for_create(dir); | ||
884 | if (gid < 0) { | ||
885 | P9_DPRINTK(P9_DEBUG_VFS, "v9fs_get_fsgid_for_create failed\n"); | ||
886 | goto error; | ||
887 | } | ||
888 | |||
889 | name = (char *) dentry->d_name.name; | ||
890 | err = p9_client_mkdir_dotl(dfid, name, mode, gid, &qid); | ||
891 | if (err < 0) | ||
892 | goto error; | ||
893 | |||
894 | /* instantiate inode and assign the unopened fid to the dentry */ | ||
895 | if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { | ||
896 | fid = p9_client_walk(dfid, 1, &name, 1); | ||
897 | if (IS_ERR(fid)) { | ||
898 | err = PTR_ERR(fid); | ||
899 | P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", | ||
900 | err); | ||
901 | fid = NULL; | ||
902 | goto error; | ||
903 | } | ||
904 | |||
905 | inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb); | ||
906 | if (IS_ERR(inode)) { | ||
907 | err = PTR_ERR(inode); | ||
908 | P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", | ||
909 | err); | ||
910 | goto error; | ||
911 | } | ||
912 | dentry->d_op = &v9fs_cached_dentry_operations; | ||
913 | d_instantiate(dentry, inode); | ||
914 | err = v9fs_fid_add(dentry, fid); | ||
915 | if (err < 0) | ||
916 | goto error; | ||
917 | fid = NULL; | ||
918 | } | ||
919 | error: | ||
920 | if (fid) | ||
921 | p9_client_clunk(fid); | ||
922 | return err; | ||
923 | } | ||
924 | |||
655 | /** | 925 | /** |
656 | * v9fs_vfs_lookup - VFS lookup hook to "walk" to a new inode | 926 | * v9fs_vfs_lookup - VFS lookup hook to "walk" to a new inode |
657 | * @dir: inode that is being walked from | 927 | * @dir: inode that is being walked from |
@@ -678,6 +948,7 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, | |||
678 | 948 | ||
679 | sb = dir->i_sb; | 949 | sb = dir->i_sb; |
680 | v9ses = v9fs_inode2v9ses(dir); | 950 | v9ses = v9fs_inode2v9ses(dir); |
951 | /* We can walk d_parent because we hold the dir->i_mutex */ | ||
681 | dfid = v9fs_fid_lookup(dentry->d_parent); | 952 | dfid = v9fs_fid_lookup(dentry->d_parent); |
682 | if (IS_ERR(dfid)) | 953 | if (IS_ERR(dfid)) |
683 | return ERR_CAST(dfid); | 954 | return ERR_CAST(dfid); |
@@ -785,27 +1056,33 @@ v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
785 | goto clunk_olddir; | 1056 | goto clunk_olddir; |
786 | } | 1057 | } |
787 | 1058 | ||
1059 | down_write(&v9ses->rename_sem); | ||
788 | if (v9fs_proto_dotl(v9ses)) { | 1060 | if (v9fs_proto_dotl(v9ses)) { |
789 | retval = p9_client_rename(oldfid, newdirfid, | 1061 | retval = p9_client_rename(oldfid, newdirfid, |
790 | (char *) new_dentry->d_name.name); | 1062 | (char *) new_dentry->d_name.name); |
791 | if (retval != -ENOSYS) | 1063 | if (retval != -ENOSYS) |
792 | goto clunk_newdir; | 1064 | goto clunk_newdir; |
793 | } | 1065 | } |
1066 | if (old_dentry->d_parent != new_dentry->d_parent) { | ||
1067 | /* | ||
1068 | * 9P .u can only handle file rename in the same directory | ||
1069 | */ | ||
794 | 1070 | ||
795 | /* 9P can only handle file rename in the same directory */ | ||
796 | if (memcmp(&olddirfid->qid, &newdirfid->qid, sizeof(newdirfid->qid))) { | ||
797 | P9_DPRINTK(P9_DEBUG_ERROR, | 1071 | P9_DPRINTK(P9_DEBUG_ERROR, |
798 | "old dir and new dir are different\n"); | 1072 | "old dir and new dir are different\n"); |
799 | retval = -EXDEV; | 1073 | retval = -EXDEV; |
800 | goto clunk_newdir; | 1074 | goto clunk_newdir; |
801 | } | 1075 | } |
802 | |||
803 | v9fs_blank_wstat(&wstat); | 1076 | v9fs_blank_wstat(&wstat); |
804 | wstat.muid = v9ses->uname; | 1077 | wstat.muid = v9ses->uname; |
805 | wstat.name = (char *) new_dentry->d_name.name; | 1078 | wstat.name = (char *) new_dentry->d_name.name; |
806 | retval = p9_client_wstat(oldfid, &wstat); | 1079 | retval = p9_client_wstat(oldfid, &wstat); |
807 | 1080 | ||
808 | clunk_newdir: | 1081 | clunk_newdir: |
1082 | if (!retval) | ||
1083 | /* successful rename */ | ||
1084 | d_move(old_dentry, new_dentry); | ||
1085 | up_write(&v9ses->rename_sem); | ||
809 | p9_client_clunk(newdirfid); | 1086 | p9_client_clunk(newdirfid); |
810 | 1087 | ||
811 | clunk_olddir: | 1088 | clunk_olddir: |
@@ -853,6 +1130,42 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, | |||
853 | return 0; | 1130 | return 0; |
854 | } | 1131 | } |
855 | 1132 | ||
1133 | static int | ||
1134 | v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry, | ||
1135 | struct kstat *stat) | ||
1136 | { | ||
1137 | int err; | ||
1138 | struct v9fs_session_info *v9ses; | ||
1139 | struct p9_fid *fid; | ||
1140 | struct p9_stat_dotl *st; | ||
1141 | |||
1142 | P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry); | ||
1143 | err = -EPERM; | ||
1144 | v9ses = v9fs_inode2v9ses(dentry->d_inode); | ||
1145 | if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) | ||
1146 | return simple_getattr(mnt, dentry, stat); | ||
1147 | |||
1148 | fid = v9fs_fid_lookup(dentry); | ||
1149 | if (IS_ERR(fid)) | ||
1150 | return PTR_ERR(fid); | ||
1151 | |||
1152 | /* Ask for all the fields in stat structure. Server will return | ||
1153 | * whatever it supports | ||
1154 | */ | ||
1155 | |||
1156 | st = p9_client_getattr_dotl(fid, P9_STATS_ALL); | ||
1157 | if (IS_ERR(st)) | ||
1158 | return PTR_ERR(st); | ||
1159 | |||
1160 | v9fs_stat2inode_dotl(st, dentry->d_inode); | ||
1161 | generic_fillattr(dentry->d_inode, stat); | ||
1162 | /* Change block size to what the server returned */ | ||
1163 | stat->blksize = st->st_blksize; | ||
1164 | |||
1165 | kfree(st); | ||
1166 | return 0; | ||
1167 | } | ||
1168 | |||
856 | /** | 1169 | /** |
857 | * v9fs_vfs_setattr - set file metadata | 1170 | * v9fs_vfs_setattr - set file metadata |
858 | * @dentry: file whose metadata to set | 1171 | * @dentry: file whose metadata to set |
@@ -903,6 +1216,49 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr) | |||
903 | } | 1216 | } |
904 | 1217 | ||
905 | /** | 1218 | /** |
1219 | * v9fs_vfs_setattr_dotl - set file metadata | ||
1220 | * @dentry: file whose metadata to set | ||
1221 | * @iattr: metadata assignment structure | ||
1222 | * | ||
1223 | */ | ||
1224 | |||
1225 | static int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr) | ||
1226 | { | ||
1227 | int retval; | ||
1228 | struct v9fs_session_info *v9ses; | ||
1229 | struct p9_fid *fid; | ||
1230 | struct p9_iattr_dotl p9attr; | ||
1231 | |||
1232 | P9_DPRINTK(P9_DEBUG_VFS, "\n"); | ||
1233 | |||
1234 | retval = inode_change_ok(dentry->d_inode, iattr); | ||
1235 | if (retval) | ||
1236 | return retval; | ||
1237 | |||
1238 | p9attr.valid = iattr->ia_valid; | ||
1239 | p9attr.mode = iattr->ia_mode; | ||
1240 | p9attr.uid = iattr->ia_uid; | ||
1241 | p9attr.gid = iattr->ia_gid; | ||
1242 | p9attr.size = iattr->ia_size; | ||
1243 | p9attr.atime_sec = iattr->ia_atime.tv_sec; | ||
1244 | p9attr.atime_nsec = iattr->ia_atime.tv_nsec; | ||
1245 | p9attr.mtime_sec = iattr->ia_mtime.tv_sec; | ||
1246 | p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec; | ||
1247 | |||
1248 | retval = -EPERM; | ||
1249 | v9ses = v9fs_inode2v9ses(dentry->d_inode); | ||
1250 | fid = v9fs_fid_lookup(dentry); | ||
1251 | if (IS_ERR(fid)) | ||
1252 | return PTR_ERR(fid); | ||
1253 | |||
1254 | retval = p9_client_setattr(fid, &p9attr); | ||
1255 | if (retval >= 0) | ||
1256 | retval = inode_setattr(dentry->d_inode, iattr); | ||
1257 | |||
1258 | return retval; | ||
1259 | } | ||
1260 | |||
1261 | /** | ||
906 | * v9fs_stat2inode - populate an inode structure with mistat info | 1262 | * v9fs_stat2inode - populate an inode structure with mistat info |
907 | * @stat: Plan 9 metadata (mistat) structure | 1263 | * @stat: Plan 9 metadata (mistat) structure |
908 | * @inode: inode to populate | 1264 | * @inode: inode to populate |
@@ -980,6 +1336,77 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode, | |||
980 | } | 1336 | } |
981 | 1337 | ||
982 | /** | 1338 | /** |
1339 | * v9fs_stat2inode_dotl - populate an inode structure with stat info | ||
1340 | * @stat: stat structure | ||
1341 | * @inode: inode to populate | ||
1342 | * @sb: superblock of filesystem | ||
1343 | * | ||
1344 | */ | ||
1345 | |||
1346 | void | ||
1347 | v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode) | ||
1348 | { | ||
1349 | |||
1350 | if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) { | ||
1351 | inode->i_atime.tv_sec = stat->st_atime_sec; | ||
1352 | inode->i_atime.tv_nsec = stat->st_atime_nsec; | ||
1353 | inode->i_mtime.tv_sec = stat->st_mtime_sec; | ||
1354 | inode->i_mtime.tv_nsec = stat->st_mtime_nsec; | ||
1355 | inode->i_ctime.tv_sec = stat->st_ctime_sec; | ||
1356 | inode->i_ctime.tv_nsec = stat->st_ctime_nsec; | ||
1357 | inode->i_uid = stat->st_uid; | ||
1358 | inode->i_gid = stat->st_gid; | ||
1359 | inode->i_nlink = stat->st_nlink; | ||
1360 | inode->i_mode = stat->st_mode; | ||
1361 | inode->i_rdev = new_decode_dev(stat->st_rdev); | ||
1362 | |||
1363 | if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode))) | ||
1364 | init_special_inode(inode, inode->i_mode, inode->i_rdev); | ||
1365 | |||
1366 | i_size_write(inode, stat->st_size); | ||
1367 | inode->i_blocks = stat->st_blocks; | ||
1368 | } else { | ||
1369 | if (stat->st_result_mask & P9_STATS_ATIME) { | ||
1370 | inode->i_atime.tv_sec = stat->st_atime_sec; | ||
1371 | inode->i_atime.tv_nsec = stat->st_atime_nsec; | ||
1372 | } | ||
1373 | if (stat->st_result_mask & P9_STATS_MTIME) { | ||
1374 | inode->i_mtime.tv_sec = stat->st_mtime_sec; | ||
1375 | inode->i_mtime.tv_nsec = stat->st_mtime_nsec; | ||
1376 | } | ||
1377 | if (stat->st_result_mask & P9_STATS_CTIME) { | ||
1378 | inode->i_ctime.tv_sec = stat->st_ctime_sec; | ||
1379 | inode->i_ctime.tv_nsec = stat->st_ctime_nsec; | ||
1380 | } | ||
1381 | if (stat->st_result_mask & P9_STATS_UID) | ||
1382 | inode->i_uid = stat->st_uid; | ||
1383 | if (stat->st_result_mask & P9_STATS_GID) | ||
1384 | inode->i_gid = stat->st_gid; | ||
1385 | if (stat->st_result_mask & P9_STATS_NLINK) | ||
1386 | inode->i_nlink = stat->st_nlink; | ||
1387 | if (stat->st_result_mask & P9_STATS_MODE) { | ||
1388 | inode->i_mode = stat->st_mode; | ||
1389 | if ((S_ISBLK(inode->i_mode)) || | ||
1390 | (S_ISCHR(inode->i_mode))) | ||
1391 | init_special_inode(inode, inode->i_mode, | ||
1392 | inode->i_rdev); | ||
1393 | } | ||
1394 | if (stat->st_result_mask & P9_STATS_RDEV) | ||
1395 | inode->i_rdev = new_decode_dev(stat->st_rdev); | ||
1396 | if (stat->st_result_mask & P9_STATS_SIZE) | ||
1397 | i_size_write(inode, stat->st_size); | ||
1398 | if (stat->st_result_mask & P9_STATS_BLOCKS) | ||
1399 | inode->i_blocks = stat->st_blocks; | ||
1400 | } | ||
1401 | if (stat->st_result_mask & P9_STATS_GEN) | ||
1402 | inode->i_generation = stat->st_gen; | ||
1403 | |||
1404 | /* Currently we don't support P9_STATS_BTIME and P9_STATS_DATA_VERSION | ||
1405 | * because the inode structure does not have fields for them. | ||
1406 | */ | ||
1407 | } | ||
1408 | |||
1409 | /** | ||
983 | * v9fs_qid2ino - convert qid into inode number | 1410 | * v9fs_qid2ino - convert qid into inode number |
984 | * @qid: qid to hash | 1411 | * @qid: qid to hash |
985 | * | 1412 | * |
@@ -1022,7 +1449,7 @@ static int v9fs_readlink(struct dentry *dentry, char *buffer, int buflen) | |||
1022 | if (IS_ERR(fid)) | 1449 | if (IS_ERR(fid)) |
1023 | return PTR_ERR(fid); | 1450 | return PTR_ERR(fid); |
1024 | 1451 | ||
1025 | if (!v9fs_proto_dotu(v9ses)) | 1452 | if (!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses)) |
1026 | return -EBADF; | 1453 | return -EBADF; |
1027 | 1454 | ||
1028 | st = p9_client_stat(fid); | 1455 | st = p9_client_stat(fid); |
@@ -1128,6 +1555,99 @@ static int v9fs_vfs_mkspecial(struct inode *dir, struct dentry *dentry, | |||
1128 | } | 1555 | } |
1129 | 1556 | ||
1130 | /** | 1557 | /** |
1558 | * v9fs_vfs_symlink_dotl - helper function to create symlinks | ||
1559 | * @dir: directory inode containing symlink | ||
1560 | * @dentry: dentry for symlink | ||
1561 | * @symname: symlink data | ||
1562 | * | ||
1563 | * See Also: 9P2000.L RFC for more information | ||
1564 | * | ||
1565 | */ | ||
1566 | |||
1567 | static int | ||
1568 | v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry, | ||
1569 | const char *symname) | ||
1570 | { | ||
1571 | struct v9fs_session_info *v9ses; | ||
1572 | struct p9_fid *dfid; | ||
1573 | struct p9_fid *fid = NULL; | ||
1574 | struct inode *inode; | ||
1575 | struct p9_qid qid; | ||
1576 | char *name; | ||
1577 | int err; | ||
1578 | gid_t gid; | ||
1579 | |||
1580 | name = (char *) dentry->d_name.name; | ||
1581 | P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_symlink_dotl : %lu,%s,%s\n", | ||
1582 | dir->i_ino, name, symname); | ||
1583 | v9ses = v9fs_inode2v9ses(dir); | ||
1584 | |||
1585 | dfid = v9fs_fid_lookup(dentry->d_parent); | ||
1586 | if (IS_ERR(dfid)) { | ||
1587 | err = PTR_ERR(dfid); | ||
1588 | P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err); | ||
1589 | return err; | ||
1590 | } | ||
1591 | |||
1592 | gid = v9fs_get_fsgid_for_create(dir); | ||
1593 | |||
1594 | if (gid < 0) { | ||
1595 | P9_DPRINTK(P9_DEBUG_VFS, "v9fs_get_egid failed %d\n", gid); | ||
1596 | goto error; | ||
1597 | } | ||
1598 | |||
1599 | /* Server doesn't alter fid on TSYMLINK. Hence no need to clone it. */ | ||
1600 | err = p9_client_symlink(dfid, name, (char *)symname, gid, &qid); | ||
1601 | |||
1602 | if (err < 0) { | ||
1603 | P9_DPRINTK(P9_DEBUG_VFS, "p9_client_symlink failed %d\n", err); | ||
1604 | goto error; | ||
1605 | } | ||
1606 | |||
1607 | if (v9ses->cache) { | ||
1608 | /* Now walk from the parent so we can get an unopened fid. */ | ||
1609 | fid = p9_client_walk(dfid, 1, &name, 1); | ||
1610 | if (IS_ERR(fid)) { | ||
1611 | err = PTR_ERR(fid); | ||
1612 | P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", | ||
1613 | err); | ||
1614 | fid = NULL; | ||
1615 | goto error; | ||
1616 | } | ||
1617 | |||
1618 | /* instantiate inode and assign the unopened fid to dentry */ | ||
1619 | inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb); | ||
1620 | if (IS_ERR(inode)) { | ||
1621 | err = PTR_ERR(inode); | ||
1622 | P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", | ||
1623 | err); | ||
1624 | goto error; | ||
1625 | } | ||
1626 | dentry->d_op = &v9fs_cached_dentry_operations; | ||
1627 | d_instantiate(dentry, inode); | ||
1628 | err = v9fs_fid_add(dentry, fid); | ||
1629 | if (err < 0) | ||
1630 | goto error; | ||
1631 | fid = NULL; | ||
1632 | } else { | ||
1633 | /* Not in cached mode. No need to populate inode with stat */ | ||
1634 | inode = v9fs_get_inode(dir->i_sb, S_IFLNK); | ||
1635 | if (IS_ERR(inode)) { | ||
1636 | err = PTR_ERR(inode); | ||
1637 | goto error; | ||
1638 | } | ||
1639 | dentry->d_op = &v9fs_dentry_operations; | ||
1640 | d_instantiate(dentry, inode); | ||
1641 | } | ||
1642 | |||
1643 | error: | ||
1644 | if (fid) | ||
1645 | p9_client_clunk(fid); | ||
1646 | |||
1647 | return err; | ||
1648 | } | ||
1649 | |||
1650 | /** | ||
1131 | * v9fs_vfs_symlink - helper function to create symlinks | 1651 | * v9fs_vfs_symlink - helper function to create symlinks |
1132 | * @dir: directory inode containing symlink | 1652 | * @dir: directory inode containing symlink |
1133 | * @dentry: dentry for symlink | 1653 | * @dentry: dentry for symlink |
@@ -1186,6 +1706,76 @@ clunk_fid: | |||
1186 | } | 1706 | } |
1187 | 1707 | ||
1188 | /** | 1708 | /** |
1709 | * v9fs_vfs_link_dotl - create a hardlink for dotl | ||
1710 | * @old_dentry: dentry for file to link to | ||
1711 | * @dir: inode destination for new link | ||
1712 | * @dentry: dentry for link | ||
1713 | * | ||
1714 | */ | ||
1715 | |||
1716 | static int | ||
1717 | v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir, | ||
1718 | struct dentry *dentry) | ||
1719 | { | ||
1720 | int err; | ||
1721 | struct p9_fid *dfid, *oldfid; | ||
1722 | char *name; | ||
1723 | struct v9fs_session_info *v9ses; | ||
1724 | struct dentry *dir_dentry; | ||
1725 | |||
1726 | P9_DPRINTK(P9_DEBUG_VFS, "dir ino: %lu, old_name: %s, new_name: %s\n", | ||
1727 | dir->i_ino, old_dentry->d_name.name, | ||
1728 | dentry->d_name.name); | ||
1729 | |||
1730 | v9ses = v9fs_inode2v9ses(dir); | ||
1731 | dir_dentry = v9fs_dentry_from_dir_inode(dir); | ||
1732 | dfid = v9fs_fid_lookup(dir_dentry); | ||
1733 | if (IS_ERR(dfid)) | ||
1734 | return PTR_ERR(dfid); | ||
1735 | |||
1736 | oldfid = v9fs_fid_lookup(old_dentry); | ||
1737 | if (IS_ERR(oldfid)) | ||
1738 | return PTR_ERR(oldfid); | ||
1739 | |||
1740 | name = (char *) dentry->d_name.name; | ||
1741 | |||
1742 | err = p9_client_link(dfid, oldfid, (char *)dentry->d_name.name); | ||
1743 | |||
1744 | if (err < 0) { | ||
1745 | P9_DPRINTK(P9_DEBUG_VFS, "p9_client_link failed %d\n", err); | ||
1746 | return err; | ||
1747 | } | ||
1748 | |||
1749 | if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { | ||
1750 | /* Get the latest stat info from server. */ | ||
1751 | struct p9_fid *fid; | ||
1752 | struct p9_stat_dotl *st; | ||
1753 | |||
1754 | fid = v9fs_fid_lookup(old_dentry); | ||
1755 | if (IS_ERR(fid)) | ||
1756 | return PTR_ERR(fid); | ||
1757 | |||
1758 | st = p9_client_getattr_dotl(fid, P9_STATS_BASIC); | ||
1759 | if (IS_ERR(st)) | ||
1760 | return PTR_ERR(st); | ||
1761 | |||
1762 | v9fs_stat2inode_dotl(st, old_dentry->d_inode); | ||
1763 | |||
1764 | kfree(st); | ||
1765 | } else { | ||
1766 | /* Caching disabled. No need to get upto date stat info. | ||
1767 | * This dentry will be released immediately. So, just i_count++ | ||
1768 | */ | ||
1769 | atomic_inc(&old_dentry->d_inode->i_count); | ||
1770 | } | ||
1771 | |||
1772 | dentry->d_op = old_dentry->d_op; | ||
1773 | d_instantiate(dentry, old_dentry->d_inode); | ||
1774 | |||
1775 | return err; | ||
1776 | } | ||
1777 | |||
1778 | /** | ||
1189 | * v9fs_vfs_mknod - create a special file | 1779 | * v9fs_vfs_mknod - create a special file |
1190 | * @dir: inode destination for new link | 1780 | * @dir: inode destination for new link |
1191 | * @dentry: dentry for file | 1781 | * @dentry: dentry for file |
@@ -1230,6 +1820,100 @@ v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) | |||
1230 | return retval; | 1820 | return retval; |
1231 | } | 1821 | } |
1232 | 1822 | ||
1823 | /** | ||
1824 | * v9fs_vfs_mknod_dotl - create a special file | ||
1825 | * @dir: inode destination for new link | ||
1826 | * @dentry: dentry for file | ||
1827 | * @mode: mode for creation | ||
1828 | * @rdev: device associated with special file | ||
1829 | * | ||
1830 | */ | ||
1831 | static int | ||
1832 | v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int mode, | ||
1833 | dev_t rdev) | ||
1834 | { | ||
1835 | int err; | ||
1836 | char *name; | ||
1837 | struct v9fs_session_info *v9ses; | ||
1838 | struct p9_fid *fid = NULL, *dfid = NULL; | ||
1839 | struct inode *inode; | ||
1840 | gid_t gid; | ||
1841 | struct p9_qid qid; | ||
1842 | struct dentry *dir_dentry; | ||
1843 | |||
1844 | P9_DPRINTK(P9_DEBUG_VFS, | ||
1845 | " %lu,%s mode: %x MAJOR: %u MINOR: %u\n", dir->i_ino, | ||
1846 | dentry->d_name.name, mode, MAJOR(rdev), MINOR(rdev)); | ||
1847 | |||
1848 | if (!new_valid_dev(rdev)) | ||
1849 | return -EINVAL; | ||
1850 | |||
1851 | v9ses = v9fs_inode2v9ses(dir); | ||
1852 | dir_dentry = v9fs_dentry_from_dir_inode(dir); | ||
1853 | dfid = v9fs_fid_lookup(dir_dentry); | ||
1854 | if (IS_ERR(dfid)) { | ||
1855 | err = PTR_ERR(dfid); | ||
1856 | P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err); | ||
1857 | dfid = NULL; | ||
1858 | goto error; | ||
1859 | } | ||
1860 | |||
1861 | gid = v9fs_get_fsgid_for_create(dir); | ||
1862 | if (gid < 0) { | ||
1863 | P9_DPRINTK(P9_DEBUG_VFS, "v9fs_get_fsgid_for_create failed\n"); | ||
1864 | goto error; | ||
1865 | } | ||
1866 | |||
1867 | name = (char *) dentry->d_name.name; | ||
1868 | |||
1869 | err = p9_client_mknod_dotl(dfid, name, mode, rdev, gid, &qid); | ||
1870 | if (err < 0) | ||
1871 | goto error; | ||
1872 | |||
1873 | /* instantiate inode and assign the unopened fid to the dentry */ | ||
1874 | if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { | ||
1875 | fid = p9_client_walk(dfid, 1, &name, 1); | ||
1876 | if (IS_ERR(fid)) { | ||
1877 | err = PTR_ERR(fid); | ||
1878 | P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", | ||
1879 | err); | ||
1880 | fid = NULL; | ||
1881 | goto error; | ||
1882 | } | ||
1883 | |||
1884 | inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb); | ||
1885 | if (IS_ERR(inode)) { | ||
1886 | err = PTR_ERR(inode); | ||
1887 | P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", | ||
1888 | err); | ||
1889 | goto error; | ||
1890 | } | ||
1891 | dentry->d_op = &v9fs_cached_dentry_operations; | ||
1892 | d_instantiate(dentry, inode); | ||
1893 | err = v9fs_fid_add(dentry, fid); | ||
1894 | if (err < 0) | ||
1895 | goto error; | ||
1896 | fid = NULL; | ||
1897 | } else { | ||
1898 | /* | ||
1899 | * Not in cached mode. No need to populate inode with stat. | ||
1900 | * socket syscall returns a fd, so we need instantiate | ||
1901 | */ | ||
1902 | inode = v9fs_get_inode(dir->i_sb, mode); | ||
1903 | if (IS_ERR(inode)) { | ||
1904 | err = PTR_ERR(inode); | ||
1905 | goto error; | ||
1906 | } | ||
1907 | dentry->d_op = &v9fs_dentry_operations; | ||
1908 | d_instantiate(dentry, inode); | ||
1909 | } | ||
1910 | |||
1911 | error: | ||
1912 | if (fid) | ||
1913 | p9_client_clunk(fid); | ||
1914 | return err; | ||
1915 | } | ||
1916 | |||
1233 | static const struct inode_operations v9fs_dir_inode_operations_dotu = { | 1917 | static const struct inode_operations v9fs_dir_inode_operations_dotu = { |
1234 | .create = v9fs_vfs_create, | 1918 | .create = v9fs_vfs_create, |
1235 | .lookup = v9fs_vfs_lookup, | 1919 | .lookup = v9fs_vfs_lookup, |
@@ -1238,24 +1922,29 @@ static const struct inode_operations v9fs_dir_inode_operations_dotu = { | |||
1238 | .unlink = v9fs_vfs_unlink, | 1922 | .unlink = v9fs_vfs_unlink, |
1239 | .mkdir = v9fs_vfs_mkdir, | 1923 | .mkdir = v9fs_vfs_mkdir, |
1240 | .rmdir = v9fs_vfs_rmdir, | 1924 | .rmdir = v9fs_vfs_rmdir, |
1241 | .mknod = v9fs_vfs_mknod, | 1925 | .mknod = v9fs_vfs_mknod_dotl, |
1242 | .rename = v9fs_vfs_rename, | 1926 | .rename = v9fs_vfs_rename, |
1243 | .getattr = v9fs_vfs_getattr, | 1927 | .getattr = v9fs_vfs_getattr, |
1244 | .setattr = v9fs_vfs_setattr, | 1928 | .setattr = v9fs_vfs_setattr, |
1245 | }; | 1929 | }; |
1246 | 1930 | ||
1247 | static const struct inode_operations v9fs_dir_inode_operations_dotl = { | 1931 | static const struct inode_operations v9fs_dir_inode_operations_dotl = { |
1248 | .create = v9fs_vfs_create, | 1932 | .create = v9fs_vfs_create_dotl, |
1249 | .lookup = v9fs_vfs_lookup, | 1933 | .lookup = v9fs_vfs_lookup, |
1250 | .symlink = v9fs_vfs_symlink, | 1934 | .link = v9fs_vfs_link_dotl, |
1251 | .link = v9fs_vfs_link, | 1935 | .symlink = v9fs_vfs_symlink_dotl, |
1252 | .unlink = v9fs_vfs_unlink, | 1936 | .unlink = v9fs_vfs_unlink, |
1253 | .mkdir = v9fs_vfs_mkdir, | 1937 | .mkdir = v9fs_vfs_mkdir_dotl, |
1254 | .rmdir = v9fs_vfs_rmdir, | 1938 | .rmdir = v9fs_vfs_rmdir, |
1255 | .mknod = v9fs_vfs_mknod, | 1939 | .mknod = v9fs_vfs_mknod_dotl, |
1256 | .rename = v9fs_vfs_rename, | 1940 | .rename = v9fs_vfs_rename, |
1257 | .getattr = v9fs_vfs_getattr, | 1941 | .getattr = v9fs_vfs_getattr_dotl, |
1258 | .setattr = v9fs_vfs_setattr, | 1942 | .setattr = v9fs_vfs_setattr_dotl, |
1943 | .setxattr = generic_setxattr, | ||
1944 | .getxattr = generic_getxattr, | ||
1945 | .removexattr = generic_removexattr, | ||
1946 | .listxattr = v9fs_listxattr, | ||
1947 | |||
1259 | }; | 1948 | }; |
1260 | 1949 | ||
1261 | static const struct inode_operations v9fs_dir_inode_operations = { | 1950 | static const struct inode_operations v9fs_dir_inode_operations = { |
@@ -1276,8 +1965,12 @@ static const struct inode_operations v9fs_file_inode_operations = { | |||
1276 | }; | 1965 | }; |
1277 | 1966 | ||
1278 | static const struct inode_operations v9fs_file_inode_operations_dotl = { | 1967 | static const struct inode_operations v9fs_file_inode_operations_dotl = { |
1279 | .getattr = v9fs_vfs_getattr, | 1968 | .getattr = v9fs_vfs_getattr_dotl, |
1280 | .setattr = v9fs_vfs_setattr, | 1969 | .setattr = v9fs_vfs_setattr_dotl, |
1970 | .setxattr = generic_setxattr, | ||
1971 | .getxattr = generic_getxattr, | ||
1972 | .removexattr = generic_removexattr, | ||
1973 | .listxattr = v9fs_listxattr, | ||
1281 | }; | 1974 | }; |
1282 | 1975 | ||
1283 | static const struct inode_operations v9fs_symlink_inode_operations = { | 1976 | static const struct inode_operations v9fs_symlink_inode_operations = { |
@@ -1292,6 +1985,10 @@ static const struct inode_operations v9fs_symlink_inode_operations_dotl = { | |||
1292 | .readlink = generic_readlink, | 1985 | .readlink = generic_readlink, |
1293 | .follow_link = v9fs_vfs_follow_link, | 1986 | .follow_link = v9fs_vfs_follow_link, |
1294 | .put_link = v9fs_vfs_put_link, | 1987 | .put_link = v9fs_vfs_put_link, |
1295 | .getattr = v9fs_vfs_getattr, | 1988 | .getattr = v9fs_vfs_getattr_dotl, |
1296 | .setattr = v9fs_vfs_setattr, | 1989 | .setattr = v9fs_vfs_setattr_dotl, |
1990 | .setxattr = generic_setxattr, | ||
1991 | .getxattr = generic_getxattr, | ||
1992 | .removexattr = generic_removexattr, | ||
1993 | .listxattr = v9fs_listxattr, | ||
1297 | }; | 1994 | }; |
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c index be74d020436e..4b9ede0b41b7 100644 --- a/fs/9p/vfs_super.c +++ b/fs/9p/vfs_super.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include "v9fs.h" | 45 | #include "v9fs.h" |
46 | #include "v9fs_vfs.h" | 46 | #include "v9fs_vfs.h" |
47 | #include "fid.h" | 47 | #include "fid.h" |
48 | #include "xattr.h" | ||
48 | 49 | ||
49 | static const struct super_operations v9fs_super_ops, v9fs_super_ops_dotl; | 50 | static const struct super_operations v9fs_super_ops, v9fs_super_ops_dotl; |
50 | 51 | ||
@@ -77,9 +78,10 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses, | |||
77 | sb->s_blocksize_bits = fls(v9ses->maxdata - 1); | 78 | sb->s_blocksize_bits = fls(v9ses->maxdata - 1); |
78 | sb->s_blocksize = 1 << sb->s_blocksize_bits; | 79 | sb->s_blocksize = 1 << sb->s_blocksize_bits; |
79 | sb->s_magic = V9FS_MAGIC; | 80 | sb->s_magic = V9FS_MAGIC; |
80 | if (v9fs_proto_dotl(v9ses)) | 81 | if (v9fs_proto_dotl(v9ses)) { |
81 | sb->s_op = &v9fs_super_ops_dotl; | 82 | sb->s_op = &v9fs_super_ops_dotl; |
82 | else | 83 | sb->s_xattr = v9fs_xattr_handlers; |
84 | } else | ||
83 | sb->s_op = &v9fs_super_ops; | 85 | sb->s_op = &v9fs_super_ops; |
84 | sb->s_bdi = &v9ses->bdi; | 86 | sb->s_bdi = &v9ses->bdi; |
85 | 87 | ||
@@ -107,7 +109,6 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags, | |||
107 | struct inode *inode = NULL; | 109 | struct inode *inode = NULL; |
108 | struct dentry *root = NULL; | 110 | struct dentry *root = NULL; |
109 | struct v9fs_session_info *v9ses = NULL; | 111 | struct v9fs_session_info *v9ses = NULL; |
110 | struct p9_wstat *st = NULL; | ||
111 | int mode = S_IRWXUGO | S_ISVTX; | 112 | int mode = S_IRWXUGO | S_ISVTX; |
112 | struct p9_fid *fid; | 113 | struct p9_fid *fid; |
113 | int retval = 0; | 114 | int retval = 0; |
@@ -124,16 +125,10 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags, | |||
124 | goto close_session; | 125 | goto close_session; |
125 | } | 126 | } |
126 | 127 | ||
127 | st = p9_client_stat(fid); | ||
128 | if (IS_ERR(st)) { | ||
129 | retval = PTR_ERR(st); | ||
130 | goto clunk_fid; | ||
131 | } | ||
132 | |||
133 | sb = sget(fs_type, NULL, v9fs_set_super, v9ses); | 128 | sb = sget(fs_type, NULL, v9fs_set_super, v9ses); |
134 | if (IS_ERR(sb)) { | 129 | if (IS_ERR(sb)) { |
135 | retval = PTR_ERR(sb); | 130 | retval = PTR_ERR(sb); |
136 | goto free_stat; | 131 | goto clunk_fid; |
137 | } | 132 | } |
138 | v9fs_fill_super(sb, v9ses, flags, data); | 133 | v9fs_fill_super(sb, v9ses, flags, data); |
139 | 134 | ||
@@ -151,22 +146,38 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags, | |||
151 | } | 146 | } |
152 | 147 | ||
153 | sb->s_root = root; | 148 | sb->s_root = root; |
154 | root->d_inode->i_ino = v9fs_qid2ino(&st->qid); | ||
155 | 149 | ||
156 | v9fs_stat2inode(st, root->d_inode, sb); | 150 | if (v9fs_proto_dotl(v9ses)) { |
151 | struct p9_stat_dotl *st = NULL; | ||
152 | st = p9_client_getattr_dotl(fid, P9_STATS_BASIC); | ||
153 | if (IS_ERR(st)) { | ||
154 | retval = PTR_ERR(st); | ||
155 | goto clunk_fid; | ||
156 | } | ||
157 | |||
158 | v9fs_stat2inode_dotl(st, root->d_inode); | ||
159 | kfree(st); | ||
160 | } else { | ||
161 | struct p9_wstat *st = NULL; | ||
162 | st = p9_client_stat(fid); | ||
163 | if (IS_ERR(st)) { | ||
164 | retval = PTR_ERR(st); | ||
165 | goto clunk_fid; | ||
166 | } | ||
167 | |||
168 | root->d_inode->i_ino = v9fs_qid2ino(&st->qid); | ||
169 | v9fs_stat2inode(st, root->d_inode, sb); | ||
170 | |||
171 | p9stat_free(st); | ||
172 | kfree(st); | ||
173 | } | ||
157 | 174 | ||
158 | v9fs_fid_add(root, fid); | 175 | v9fs_fid_add(root, fid); |
159 | p9stat_free(st); | ||
160 | kfree(st); | ||
161 | 176 | ||
162 | P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n"); | 177 | P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n"); |
163 | simple_set_mnt(mnt, sb); | 178 | simple_set_mnt(mnt, sb); |
164 | return 0; | 179 | return 0; |
165 | 180 | ||
166 | free_stat: | ||
167 | p9stat_free(st); | ||
168 | kfree(st); | ||
169 | |||
170 | clunk_fid: | 181 | clunk_fid: |
171 | p9_client_clunk(fid); | 182 | p9_client_clunk(fid); |
172 | 183 | ||
@@ -176,8 +187,6 @@ close_session: | |||
176 | return retval; | 187 | return retval; |
177 | 188 | ||
178 | release_sb: | 189 | release_sb: |
179 | p9stat_free(st); | ||
180 | kfree(st); | ||
181 | deactivate_locked_super(sb); | 190 | deactivate_locked_super(sb); |
182 | return retval; | 191 | return retval; |
183 | } | 192 | } |
@@ -278,4 +287,5 @@ struct file_system_type v9fs_fs_type = { | |||
278 | .get_sb = v9fs_get_sb, | 287 | .get_sb = v9fs_get_sb, |
279 | .kill_sb = v9fs_kill_super, | 288 | .kill_sb = v9fs_kill_super, |
280 | .owner = THIS_MODULE, | 289 | .owner = THIS_MODULE, |
290 | .fs_flags = FS_RENAME_DOES_D_MOVE, | ||
281 | }; | 291 | }; |
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c new file mode 100644 index 000000000000..f88e5c2dc873 --- /dev/null +++ b/fs/9p/xattr.c | |||
@@ -0,0 +1,160 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corporation, 2010 | ||
3 | * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of version 2.1 of the GNU Lesser General Public License | ||
7 | * as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it would be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/fs.h> | ||
17 | #include <linux/sched.h> | ||
18 | #include <net/9p/9p.h> | ||
19 | #include <net/9p/client.h> | ||
20 | |||
21 | #include "fid.h" | ||
22 | #include "xattr.h" | ||
23 | |||
24 | /* | ||
25 | * v9fs_xattr_get() | ||
26 | * | ||
27 | * Copy an extended attribute into the buffer | ||
28 | * provided, or compute the buffer size required. | ||
29 | * Buffer is NULL to compute the size of the buffer required. | ||
30 | * | ||
31 | * Returns a negative error number on failure, or the number of bytes | ||
32 | * used / required on success. | ||
33 | */ | ||
34 | ssize_t v9fs_xattr_get(struct dentry *dentry, const char *name, | ||
35 | void *buffer, size_t buffer_size) | ||
36 | { | ||
37 | ssize_t retval; | ||
38 | int msize, read_count; | ||
39 | u64 offset = 0, attr_size; | ||
40 | struct p9_fid *fid, *attr_fid; | ||
41 | |||
42 | P9_DPRINTK(P9_DEBUG_VFS, "%s: name = %s value_len = %zu\n", | ||
43 | __func__, name, buffer_size); | ||
44 | |||
45 | fid = v9fs_fid_lookup(dentry); | ||
46 | if (IS_ERR(fid)) | ||
47 | return PTR_ERR(fid); | ||
48 | |||
49 | attr_fid = p9_client_xattrwalk(fid, name, &attr_size); | ||
50 | if (IS_ERR(attr_fid)) { | ||
51 | retval = PTR_ERR(attr_fid); | ||
52 | P9_DPRINTK(P9_DEBUG_VFS, | ||
53 | "p9_client_attrwalk failed %zd\n", retval); | ||
54 | attr_fid = NULL; | ||
55 | goto error; | ||
56 | } | ||
57 | if (!buffer_size) { | ||
58 | /* request to get the attr_size */ | ||
59 | retval = attr_size; | ||
60 | goto error; | ||
61 | } | ||
62 | if (attr_size > buffer_size) { | ||
63 | retval = -ERANGE; | ||
64 | goto error; | ||
65 | } | ||
66 | msize = attr_fid->clnt->msize; | ||
67 | while (attr_size) { | ||
68 | if (attr_size > (msize - P9_IOHDRSZ)) | ||
69 | read_count = msize - P9_IOHDRSZ; | ||
70 | else | ||
71 | read_count = attr_size; | ||
72 | read_count = p9_client_read(attr_fid, ((char *)buffer)+offset, | ||
73 | NULL, offset, read_count); | ||
74 | if (read_count < 0) { | ||
75 | /* error in xattr read */ | ||
76 | retval = read_count; | ||
77 | goto error; | ||
78 | } | ||
79 | offset += read_count; | ||
80 | attr_size -= read_count; | ||
81 | } | ||
82 | /* Total read xattr bytes */ | ||
83 | retval = offset; | ||
84 | error: | ||
85 | if (attr_fid) | ||
86 | p9_client_clunk(attr_fid); | ||
87 | return retval; | ||
88 | |||
89 | } | ||
90 | |||
91 | /* | ||
92 | * v9fs_xattr_set() | ||
93 | * | ||
94 | * Create, replace or remove an extended attribute for this inode. Buffer | ||
95 | * is NULL to remove an existing extended attribute, and non-NULL to | ||
96 | * either replace an existing extended attribute, or create a new extended | ||
97 | * attribute. The flags XATTR_REPLACE and XATTR_CREATE | ||
98 | * specify that an extended attribute must exist and must not exist | ||
99 | * previous to the call, respectively. | ||
100 | * | ||
101 | * Returns 0, or a negative error number on failure. | ||
102 | */ | ||
103 | int v9fs_xattr_set(struct dentry *dentry, const char *name, | ||
104 | const void *value, size_t value_len, int flags) | ||
105 | { | ||
106 | u64 offset = 0; | ||
107 | int retval, msize, write_count; | ||
108 | struct p9_fid *fid = NULL; | ||
109 | |||
110 | P9_DPRINTK(P9_DEBUG_VFS, "%s: name = %s value_len = %zu flags = %d\n", | ||
111 | __func__, name, value_len, flags); | ||
112 | |||
113 | fid = v9fs_fid_clone(dentry); | ||
114 | if (IS_ERR(fid)) { | ||
115 | retval = PTR_ERR(fid); | ||
116 | fid = NULL; | ||
117 | goto error; | ||
118 | } | ||
119 | /* | ||
120 | * On success fid points to xattr | ||
121 | */ | ||
122 | retval = p9_client_xattrcreate(fid, name, value_len, flags); | ||
123 | if (retval < 0) { | ||
124 | P9_DPRINTK(P9_DEBUG_VFS, | ||
125 | "p9_client_xattrcreate failed %d\n", retval); | ||
126 | goto error; | ||
127 | } | ||
128 | msize = fid->clnt->msize;; | ||
129 | while (value_len) { | ||
130 | if (value_len > (msize - P9_IOHDRSZ)) | ||
131 | write_count = msize - P9_IOHDRSZ; | ||
132 | else | ||
133 | write_count = value_len; | ||
134 | write_count = p9_client_write(fid, ((char *)value)+offset, | ||
135 | NULL, offset, write_count); | ||
136 | if (write_count < 0) { | ||
137 | /* error in xattr write */ | ||
138 | retval = write_count; | ||
139 | goto error; | ||
140 | } | ||
141 | offset += write_count; | ||
142 | value_len -= write_count; | ||
143 | } | ||
144 | /* Total read xattr bytes */ | ||
145 | retval = offset; | ||
146 | error: | ||
147 | if (fid) | ||
148 | retval = p9_client_clunk(fid); | ||
149 | return retval; | ||
150 | } | ||
151 | |||
152 | ssize_t v9fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) | ||
153 | { | ||
154 | return v9fs_xattr_get(dentry, NULL, buffer, buffer_size); | ||
155 | } | ||
156 | |||
157 | const struct xattr_handler *v9fs_xattr_handlers[] = { | ||
158 | &v9fs_xattr_user_handler, | ||
159 | NULL | ||
160 | }; | ||
diff --git a/fs/9p/xattr.h b/fs/9p/xattr.h new file mode 100644 index 000000000000..9ddf672ae5c4 --- /dev/null +++ b/fs/9p/xattr.h | |||
@@ -0,0 +1,27 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corporation, 2010 | ||
3 | * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of version 2.1 of the GNU Lesser General Public License | ||
7 | * as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it would be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
12 | * | ||
13 | */ | ||
14 | #ifndef FS_9P_XATTR_H | ||
15 | #define FS_9P_XATTR_H | ||
16 | |||
17 | #include <linux/xattr.h> | ||
18 | |||
19 | extern const struct xattr_handler *v9fs_xattr_handlers[]; | ||
20 | extern struct xattr_handler v9fs_xattr_user_handler; | ||
21 | |||
22 | extern ssize_t v9fs_xattr_get(struct dentry *, const char *, | ||
23 | void *, size_t); | ||
24 | extern int v9fs_xattr_set(struct dentry *, const char *, | ||
25 | const void *, size_t, int); | ||
26 | extern ssize_t v9fs_listxattr(struct dentry *, char *, size_t); | ||
27 | #endif /* FS_9P_XATTR_H */ | ||
diff --git a/fs/9p/xattr_user.c b/fs/9p/xattr_user.c new file mode 100644 index 000000000000..d0b701b72080 --- /dev/null +++ b/fs/9p/xattr_user.c | |||
@@ -0,0 +1,80 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corporation, 2010 | ||
3 | * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of version 2.1 of the GNU Lesser General Public License | ||
7 | * as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it would be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | |||
16 | #include <linux/module.h> | ||
17 | #include <linux/string.h> | ||
18 | #include <linux/fs.h> | ||
19 | #include <linux/slab.h> | ||
20 | #include "xattr.h" | ||
21 | |||
22 | static int v9fs_xattr_user_get(struct dentry *dentry, const char *name, | ||
23 | void *buffer, size_t size, int type) | ||
24 | { | ||
25 | int retval; | ||
26 | char *full_name; | ||
27 | size_t name_len; | ||
28 | size_t prefix_len = XATTR_USER_PREFIX_LEN; | ||
29 | |||
30 | if (name == NULL) | ||
31 | return -EINVAL; | ||
32 | |||
33 | if (strcmp(name, "") == 0) | ||
34 | return -EINVAL; | ||
35 | |||
36 | name_len = strlen(name); | ||
37 | full_name = kmalloc(prefix_len + name_len + 1 , GFP_KERNEL); | ||
38 | if (!full_name) | ||
39 | return -ENOMEM; | ||
40 | memcpy(full_name, XATTR_USER_PREFIX, prefix_len); | ||
41 | memcpy(full_name+prefix_len, name, name_len); | ||
42 | full_name[prefix_len + name_len] = '\0'; | ||
43 | |||
44 | retval = v9fs_xattr_get(dentry, full_name, buffer, size); | ||
45 | kfree(full_name); | ||
46 | return retval; | ||
47 | } | ||
48 | |||
49 | static int v9fs_xattr_user_set(struct dentry *dentry, const char *name, | ||
50 | const void *value, size_t size, int flags, int type) | ||
51 | { | ||
52 | int retval; | ||
53 | char *full_name; | ||
54 | size_t name_len; | ||
55 | size_t prefix_len = XATTR_USER_PREFIX_LEN; | ||
56 | |||
57 | if (name == NULL) | ||
58 | return -EINVAL; | ||
59 | |||
60 | if (strcmp(name, "") == 0) | ||
61 | return -EINVAL; | ||
62 | |||
63 | name_len = strlen(name); | ||
64 | full_name = kmalloc(prefix_len + name_len + 1 , GFP_KERNEL); | ||
65 | if (!full_name) | ||
66 | return -ENOMEM; | ||
67 | memcpy(full_name, XATTR_USER_PREFIX, prefix_len); | ||
68 | memcpy(full_name + prefix_len, name, name_len); | ||
69 | full_name[prefix_len + name_len] = '\0'; | ||
70 | |||
71 | retval = v9fs_xattr_set(dentry, full_name, value, size, flags); | ||
72 | kfree(full_name); | ||
73 | return retval; | ||
74 | } | ||
75 | |||
76 | struct xattr_handler v9fs_xattr_user_handler = { | ||
77 | .prefix = XATTR_USER_PREFIX, | ||
78 | .get = v9fs_xattr_user_get, | ||
79 | .set = v9fs_xattr_user_set, | ||
80 | }; | ||
diff --git a/fs/afs/write.c b/fs/afs/write.c index 3dab9e9948d0..722743b152d8 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c | |||
@@ -680,7 +680,6 @@ int afs_writeback_all(struct afs_vnode *vnode) | |||
680 | { | 680 | { |
681 | struct address_space *mapping = vnode->vfs_inode.i_mapping; | 681 | struct address_space *mapping = vnode->vfs_inode.i_mapping; |
682 | struct writeback_control wbc = { | 682 | struct writeback_control wbc = { |
683 | .bdi = mapping->backing_dev_info, | ||
684 | .sync_mode = WB_SYNC_ALL, | 683 | .sync_mode = WB_SYNC_ALL, |
685 | .nr_to_write = LONG_MAX, | 684 | .nr_to_write = LONG_MAX, |
686 | .range_cyclic = 1, | 685 | .range_cyclic = 1, |
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index b6ab27ccf214..811384bec8de 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c | |||
@@ -68,11 +68,7 @@ | |||
68 | * Here we can be a bit looser than the data sections since this | 68 | * Here we can be a bit looser than the data sections since this |
69 | * needs to only meet arch ABI requirements. | 69 | * needs to only meet arch ABI requirements. |
70 | */ | 70 | */ |
71 | #ifdef ARCH_SLAB_MINALIGN | 71 | #define FLAT_STACK_ALIGN max_t(unsigned long, sizeof(void *), ARCH_SLAB_MINALIGN) |
72 | #define FLAT_STACK_ALIGN (ARCH_SLAB_MINALIGN) | ||
73 | #else | ||
74 | #define FLAT_STACK_ALIGN (sizeof(void *)) | ||
75 | #endif | ||
76 | 72 | ||
77 | #define RELOC_FAILED 0xff00ff01 /* Relocation incorrect somewhere */ | 73 | #define RELOC_FAILED 0xff00ff01 /* Relocation incorrect somewhere */ |
78 | #define UNLOADED_LIB 0x7ff000ff /* Placeholder for unused library */ | 74 | #define UNLOADED_LIB 0x7ff000ff /* Placeholder for unused library */ |
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 0d1d966b0fe4..c3df14ce2cc2 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c | |||
@@ -2304,12 +2304,17 @@ noinline int btrfs_leaf_free_space(struct btrfs_root *root, | |||
2304 | return ret; | 2304 | return ret; |
2305 | } | 2305 | } |
2306 | 2306 | ||
2307 | /* | ||
2308 | * min slot controls the lowest index we're willing to push to the | ||
2309 | * right. We'll push up to and including min_slot, but no lower | ||
2310 | */ | ||
2307 | static noinline int __push_leaf_right(struct btrfs_trans_handle *trans, | 2311 | static noinline int __push_leaf_right(struct btrfs_trans_handle *trans, |
2308 | struct btrfs_root *root, | 2312 | struct btrfs_root *root, |
2309 | struct btrfs_path *path, | 2313 | struct btrfs_path *path, |
2310 | int data_size, int empty, | 2314 | int data_size, int empty, |
2311 | struct extent_buffer *right, | 2315 | struct extent_buffer *right, |
2312 | int free_space, u32 left_nritems) | 2316 | int free_space, u32 left_nritems, |
2317 | u32 min_slot) | ||
2313 | { | 2318 | { |
2314 | struct extent_buffer *left = path->nodes[0]; | 2319 | struct extent_buffer *left = path->nodes[0]; |
2315 | struct extent_buffer *upper = path->nodes[1]; | 2320 | struct extent_buffer *upper = path->nodes[1]; |
@@ -2327,7 +2332,7 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans, | |||
2327 | if (empty) | 2332 | if (empty) |
2328 | nr = 0; | 2333 | nr = 0; |
2329 | else | 2334 | else |
2330 | nr = 1; | 2335 | nr = max_t(u32, 1, min_slot); |
2331 | 2336 | ||
2332 | if (path->slots[0] >= left_nritems) | 2337 | if (path->slots[0] >= left_nritems) |
2333 | push_space += data_size; | 2338 | push_space += data_size; |
@@ -2469,10 +2474,14 @@ out_unlock: | |||
2469 | * | 2474 | * |
2470 | * returns 1 if the push failed because the other node didn't have enough | 2475 | * returns 1 if the push failed because the other node didn't have enough |
2471 | * room, 0 if everything worked out and < 0 if there were major errors. | 2476 | * room, 0 if everything worked out and < 0 if there were major errors. |
2477 | * | ||
2478 | * this will push starting from min_slot to the end of the leaf. It won't | ||
2479 | * push any slot lower than min_slot | ||
2472 | */ | 2480 | */ |
2473 | static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root | 2481 | static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root |
2474 | *root, struct btrfs_path *path, int data_size, | 2482 | *root, struct btrfs_path *path, |
2475 | int empty) | 2483 | int min_data_size, int data_size, |
2484 | int empty, u32 min_slot) | ||
2476 | { | 2485 | { |
2477 | struct extent_buffer *left = path->nodes[0]; | 2486 | struct extent_buffer *left = path->nodes[0]; |
2478 | struct extent_buffer *right; | 2487 | struct extent_buffer *right; |
@@ -2514,8 +2523,8 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root | |||
2514 | if (left_nritems == 0) | 2523 | if (left_nritems == 0) |
2515 | goto out_unlock; | 2524 | goto out_unlock; |
2516 | 2525 | ||
2517 | return __push_leaf_right(trans, root, path, data_size, empty, | 2526 | return __push_leaf_right(trans, root, path, min_data_size, empty, |
2518 | right, free_space, left_nritems); | 2527 | right, free_space, left_nritems, min_slot); |
2519 | out_unlock: | 2528 | out_unlock: |
2520 | btrfs_tree_unlock(right); | 2529 | btrfs_tree_unlock(right); |
2521 | free_extent_buffer(right); | 2530 | free_extent_buffer(right); |
@@ -2525,12 +2534,17 @@ out_unlock: | |||
2525 | /* | 2534 | /* |
2526 | * push some data in the path leaf to the left, trying to free up at | 2535 | * push some data in the path leaf to the left, trying to free up at |
2527 | * least data_size bytes. returns zero if the push worked, nonzero otherwise | 2536 | * least data_size bytes. returns zero if the push worked, nonzero otherwise |
2537 | * | ||
2538 | * max_slot can put a limit on how far into the leaf we'll push items. The | ||
2539 | * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the | ||
2540 | * items | ||
2528 | */ | 2541 | */ |
2529 | static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, | 2542 | static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, |
2530 | struct btrfs_root *root, | 2543 | struct btrfs_root *root, |
2531 | struct btrfs_path *path, int data_size, | 2544 | struct btrfs_path *path, int data_size, |
2532 | int empty, struct extent_buffer *left, | 2545 | int empty, struct extent_buffer *left, |
2533 | int free_space, int right_nritems) | 2546 | int free_space, u32 right_nritems, |
2547 | u32 max_slot) | ||
2534 | { | 2548 | { |
2535 | struct btrfs_disk_key disk_key; | 2549 | struct btrfs_disk_key disk_key; |
2536 | struct extent_buffer *right = path->nodes[0]; | 2550 | struct extent_buffer *right = path->nodes[0]; |
@@ -2549,9 +2563,9 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, | |||
2549 | slot = path->slots[1]; | 2563 | slot = path->slots[1]; |
2550 | 2564 | ||
2551 | if (empty) | 2565 | if (empty) |
2552 | nr = right_nritems; | 2566 | nr = min(right_nritems, max_slot); |
2553 | else | 2567 | else |
2554 | nr = right_nritems - 1; | 2568 | nr = min(right_nritems - 1, max_slot); |
2555 | 2569 | ||
2556 | for (i = 0; i < nr; i++) { | 2570 | for (i = 0; i < nr; i++) { |
2557 | item = btrfs_item_nr(right, i); | 2571 | item = btrfs_item_nr(right, i); |
@@ -2712,10 +2726,14 @@ out: | |||
2712 | /* | 2726 | /* |
2713 | * push some data in the path leaf to the left, trying to free up at | 2727 | * push some data in the path leaf to the left, trying to free up at |
2714 | * least data_size bytes. returns zero if the push worked, nonzero otherwise | 2728 | * least data_size bytes. returns zero if the push worked, nonzero otherwise |
2729 | * | ||
2730 | * max_slot can put a limit on how far into the leaf we'll push items. The | ||
2731 | * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the | ||
2732 | * items | ||
2715 | */ | 2733 | */ |
2716 | static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root | 2734 | static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root |
2717 | *root, struct btrfs_path *path, int data_size, | 2735 | *root, struct btrfs_path *path, int min_data_size, |
2718 | int empty) | 2736 | int data_size, int empty, u32 max_slot) |
2719 | { | 2737 | { |
2720 | struct extent_buffer *right = path->nodes[0]; | 2738 | struct extent_buffer *right = path->nodes[0]; |
2721 | struct extent_buffer *left; | 2739 | struct extent_buffer *left; |
@@ -2761,8 +2779,9 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root | |||
2761 | goto out; | 2779 | goto out; |
2762 | } | 2780 | } |
2763 | 2781 | ||
2764 | return __push_leaf_left(trans, root, path, data_size, | 2782 | return __push_leaf_left(trans, root, path, min_data_size, |
2765 | empty, left, free_space, right_nritems); | 2783 | empty, left, free_space, right_nritems, |
2784 | max_slot); | ||
2766 | out: | 2785 | out: |
2767 | btrfs_tree_unlock(left); | 2786 | btrfs_tree_unlock(left); |
2768 | free_extent_buffer(left); | 2787 | free_extent_buffer(left); |
@@ -2855,6 +2874,64 @@ static noinline int copy_for_split(struct btrfs_trans_handle *trans, | |||
2855 | } | 2874 | } |
2856 | 2875 | ||
2857 | /* | 2876 | /* |
2877 | * double splits happen when we need to insert a big item in the middle | ||
2878 | * of a leaf. A double split can leave us with 3 mostly empty leaves: | ||
2879 | * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ] | ||
2880 | * A B C | ||
2881 | * | ||
2882 | * We avoid this by trying to push the items on either side of our target | ||
2883 | * into the adjacent leaves. If all goes well we can avoid the double split | ||
2884 | * completely. | ||
2885 | */ | ||
2886 | static noinline int push_for_double_split(struct btrfs_trans_handle *trans, | ||
2887 | struct btrfs_root *root, | ||
2888 | struct btrfs_path *path, | ||
2889 | int data_size) | ||
2890 | { | ||
2891 | int ret; | ||
2892 | int progress = 0; | ||
2893 | int slot; | ||
2894 | u32 nritems; | ||
2895 | |||
2896 | slot = path->slots[0]; | ||
2897 | |||
2898 | /* | ||
2899 | * try to push all the items after our slot into the | ||
2900 | * right leaf | ||
2901 | */ | ||
2902 | ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot); | ||
2903 | if (ret < 0) | ||
2904 | return ret; | ||
2905 | |||
2906 | if (ret == 0) | ||
2907 | progress++; | ||
2908 | |||
2909 | nritems = btrfs_header_nritems(path->nodes[0]); | ||
2910 | /* | ||
2911 | * our goal is to get our slot at the start or end of a leaf. If | ||
2912 | * we've done so we're done | ||
2913 | */ | ||
2914 | if (path->slots[0] == 0 || path->slots[0] == nritems) | ||
2915 | return 0; | ||
2916 | |||
2917 | if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size) | ||
2918 | return 0; | ||
2919 | |||
2920 | /* try to push all the items before our slot into the next leaf */ | ||
2921 | slot = path->slots[0]; | ||
2922 | ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot); | ||
2923 | if (ret < 0) | ||
2924 | return ret; | ||
2925 | |||
2926 | if (ret == 0) | ||
2927 | progress++; | ||
2928 | |||
2929 | if (progress) | ||
2930 | return 0; | ||
2931 | return 1; | ||
2932 | } | ||
2933 | |||
2934 | /* | ||
2858 | * split the path's leaf in two, making sure there is at least data_size | 2935 | * split the path's leaf in two, making sure there is at least data_size |
2859 | * available for the resulting leaf level of the path. | 2936 | * available for the resulting leaf level of the path. |
2860 | * | 2937 | * |
@@ -2876,6 +2953,7 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans, | |||
2876 | int wret; | 2953 | int wret; |
2877 | int split; | 2954 | int split; |
2878 | int num_doubles = 0; | 2955 | int num_doubles = 0; |
2956 | int tried_avoid_double = 0; | ||
2879 | 2957 | ||
2880 | l = path->nodes[0]; | 2958 | l = path->nodes[0]; |
2881 | slot = path->slots[0]; | 2959 | slot = path->slots[0]; |
@@ -2884,12 +2962,14 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans, | |||
2884 | return -EOVERFLOW; | 2962 | return -EOVERFLOW; |
2885 | 2963 | ||
2886 | /* first try to make some room by pushing left and right */ | 2964 | /* first try to make some room by pushing left and right */ |
2887 | if (data_size && ins_key->type != BTRFS_DIR_ITEM_KEY) { | 2965 | if (data_size) { |
2888 | wret = push_leaf_right(trans, root, path, data_size, 0); | 2966 | wret = push_leaf_right(trans, root, path, data_size, |
2967 | data_size, 0, 0); | ||
2889 | if (wret < 0) | 2968 | if (wret < 0) |
2890 | return wret; | 2969 | return wret; |
2891 | if (wret) { | 2970 | if (wret) { |
2892 | wret = push_leaf_left(trans, root, path, data_size, 0); | 2971 | wret = push_leaf_left(trans, root, path, data_size, |
2972 | data_size, 0, (u32)-1); | ||
2893 | if (wret < 0) | 2973 | if (wret < 0) |
2894 | return wret; | 2974 | return wret; |
2895 | } | 2975 | } |
@@ -2923,6 +3003,8 @@ again: | |||
2923 | if (mid != nritems && | 3003 | if (mid != nritems && |
2924 | leaf_space_used(l, mid, nritems - mid) + | 3004 | leaf_space_used(l, mid, nritems - mid) + |
2925 | data_size > BTRFS_LEAF_DATA_SIZE(root)) { | 3005 | data_size > BTRFS_LEAF_DATA_SIZE(root)) { |
3006 | if (data_size && !tried_avoid_double) | ||
3007 | goto push_for_double; | ||
2926 | split = 2; | 3008 | split = 2; |
2927 | } | 3009 | } |
2928 | } | 3010 | } |
@@ -2939,6 +3021,8 @@ again: | |||
2939 | if (mid != nritems && | 3021 | if (mid != nritems && |
2940 | leaf_space_used(l, mid, nritems - mid) + | 3022 | leaf_space_used(l, mid, nritems - mid) + |
2941 | data_size > BTRFS_LEAF_DATA_SIZE(root)) { | 3023 | data_size > BTRFS_LEAF_DATA_SIZE(root)) { |
3024 | if (data_size && !tried_avoid_double) | ||
3025 | goto push_for_double; | ||
2942 | split = 2 ; | 3026 | split = 2 ; |
2943 | } | 3027 | } |
2944 | } | 3028 | } |
@@ -3019,6 +3103,13 @@ again: | |||
3019 | } | 3103 | } |
3020 | 3104 | ||
3021 | return ret; | 3105 | return ret; |
3106 | |||
3107 | push_for_double: | ||
3108 | push_for_double_split(trans, root, path, data_size); | ||
3109 | tried_avoid_double = 1; | ||
3110 | if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size) | ||
3111 | return 0; | ||
3112 | goto again; | ||
3022 | } | 3113 | } |
3023 | 3114 | ||
3024 | static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, | 3115 | static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, |
@@ -3915,13 +4006,15 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, | |||
3915 | extent_buffer_get(leaf); | 4006 | extent_buffer_get(leaf); |
3916 | 4007 | ||
3917 | btrfs_set_path_blocking(path); | 4008 | btrfs_set_path_blocking(path); |
3918 | wret = push_leaf_left(trans, root, path, 1, 1); | 4009 | wret = push_leaf_left(trans, root, path, 1, 1, |
4010 | 1, (u32)-1); | ||
3919 | if (wret < 0 && wret != -ENOSPC) | 4011 | if (wret < 0 && wret != -ENOSPC) |
3920 | ret = wret; | 4012 | ret = wret; |
3921 | 4013 | ||
3922 | if (path->nodes[0] == leaf && | 4014 | if (path->nodes[0] == leaf && |
3923 | btrfs_header_nritems(leaf)) { | 4015 | btrfs_header_nritems(leaf)) { |
3924 | wret = push_leaf_right(trans, root, path, 1, 1); | 4016 | wret = push_leaf_right(trans, root, path, 1, |
4017 | 1, 1, 0); | ||
3925 | if (wret < 0 && wret != -ENOSPC) | 4018 | if (wret < 0 && wret != -ENOSPC) |
3926 | ret = wret; | 4019 | ret = wret; |
3927 | } | 4020 | } |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index a4080c21ec55..d74e6af9b53a 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -2594,7 +2594,6 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page, | |||
2594 | .sync_io = wbc->sync_mode == WB_SYNC_ALL, | 2594 | .sync_io = wbc->sync_mode == WB_SYNC_ALL, |
2595 | }; | 2595 | }; |
2596 | struct writeback_control wbc_writepages = { | 2596 | struct writeback_control wbc_writepages = { |
2597 | .bdi = wbc->bdi, | ||
2598 | .sync_mode = wbc->sync_mode, | 2597 | .sync_mode = wbc->sync_mode, |
2599 | .older_than_this = NULL, | 2598 | .older_than_this = NULL, |
2600 | .nr_to_write = 64, | 2599 | .nr_to_write = 64, |
@@ -2628,7 +2627,6 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode, | |||
2628 | .sync_io = mode == WB_SYNC_ALL, | 2627 | .sync_io = mode == WB_SYNC_ALL, |
2629 | }; | 2628 | }; |
2630 | struct writeback_control wbc_writepages = { | 2629 | struct writeback_control wbc_writepages = { |
2631 | .bdi = inode->i_mapping->backing_dev_info, | ||
2632 | .sync_mode = mode, | 2630 | .sync_mode = mode, |
2633 | .older_than_this = NULL, | 2631 | .older_than_this = NULL, |
2634 | .nr_to_write = nr_pages * 2, | 2632 | .nr_to_write = nr_pages * 2, |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 4dbaf89b1337..9254b3d58dbe 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -1458,7 +1458,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, | |||
1458 | */ | 1458 | */ |
1459 | 1459 | ||
1460 | /* the destination must be opened for writing */ | 1460 | /* the destination must be opened for writing */ |
1461 | if (!(file->f_mode & FMODE_WRITE)) | 1461 | if (!(file->f_mode & FMODE_WRITE) || (file->f_flags & O_APPEND)) |
1462 | return -EINVAL; | 1462 | return -EINVAL; |
1463 | 1463 | ||
1464 | ret = mnt_want_write(file->f_path.mnt); | 1464 | ret = mnt_want_write(file->f_path.mnt); |
@@ -1511,7 +1511,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, | |||
1511 | 1511 | ||
1512 | /* determine range to clone */ | 1512 | /* determine range to clone */ |
1513 | ret = -EINVAL; | 1513 | ret = -EINVAL; |
1514 | if (off >= src->i_size || off + len > src->i_size) | 1514 | if (off + len > src->i_size || off + len < off) |
1515 | goto out_unlock; | 1515 | goto out_unlock; |
1516 | if (len == 0) | 1516 | if (len == 0) |
1517 | olen = len = src->i_size - off; | 1517 | olen = len = src->i_size - off; |
@@ -1578,6 +1578,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, | |||
1578 | u64 disko = 0, diskl = 0; | 1578 | u64 disko = 0, diskl = 0; |
1579 | u64 datao = 0, datal = 0; | 1579 | u64 datao = 0, datal = 0; |
1580 | u8 comp; | 1580 | u8 comp; |
1581 | u64 endoff; | ||
1581 | 1582 | ||
1582 | size = btrfs_item_size_nr(leaf, slot); | 1583 | size = btrfs_item_size_nr(leaf, slot); |
1583 | read_extent_buffer(leaf, buf, | 1584 | read_extent_buffer(leaf, buf, |
@@ -1712,9 +1713,18 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, | |||
1712 | btrfs_release_path(root, path); | 1713 | btrfs_release_path(root, path); |
1713 | 1714 | ||
1714 | inode->i_mtime = inode->i_ctime = CURRENT_TIME; | 1715 | inode->i_mtime = inode->i_ctime = CURRENT_TIME; |
1715 | if (new_key.offset + datal > inode->i_size) | 1716 | |
1716 | btrfs_i_size_write(inode, | 1717 | /* |
1717 | new_key.offset + datal); | 1718 | * we round up to the block size at eof when |
1719 | * determining which extents to clone above, | ||
1720 | * but shouldn't round up the file size | ||
1721 | */ | ||
1722 | endoff = new_key.offset + datal; | ||
1723 | if (endoff > off+olen) | ||
1724 | endoff = off+olen; | ||
1725 | if (endoff > inode->i_size) | ||
1726 | btrfs_i_size_write(inode, endoff); | ||
1727 | |||
1718 | BTRFS_I(inode)->flags = BTRFS_I(src)->flags; | 1728 | BTRFS_I(inode)->flags = BTRFS_I(src)->flags; |
1719 | ret = btrfs_update_inode(trans, root, inode); | 1729 | ret = btrfs_update_inode(trans, root, inode); |
1720 | BUG_ON(ret); | 1730 | BUG_ON(ret); |
diff --git a/fs/ceph/Kconfig b/fs/ceph/Kconfig index 04b8280582a9..bc87b9c1d27e 100644 --- a/fs/ceph/Kconfig +++ b/fs/ceph/Kconfig | |||
@@ -2,7 +2,7 @@ config CEPH_FS | |||
2 | tristate "Ceph distributed file system (EXPERIMENTAL)" | 2 | tristate "Ceph distributed file system (EXPERIMENTAL)" |
3 | depends on INET && EXPERIMENTAL | 3 | depends on INET && EXPERIMENTAL |
4 | select LIBCRC32C | 4 | select LIBCRC32C |
5 | select CONFIG_CRYPTO_AES | 5 | select CRYPTO_AES |
6 | help | 6 | help |
7 | Choose Y or M here to include support for mounting the | 7 | Choose Y or M here to include support for mounting the |
8 | experimental Ceph distributed file system. Ceph is an extremely | 8 | experimental Ceph distributed file system. Ceph is an extremely |
diff --git a/fs/ceph/auth_x.c b/fs/ceph/auth_x.c index 83d4d2785ffe..6d44053ecff1 100644 --- a/fs/ceph/auth_x.c +++ b/fs/ceph/auth_x.c | |||
@@ -493,7 +493,7 @@ static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result, | |||
493 | return -EAGAIN; | 493 | return -EAGAIN; |
494 | } | 494 | } |
495 | 495 | ||
496 | op = le32_to_cpu(head->op); | 496 | op = le16_to_cpu(head->op); |
497 | result = le32_to_cpu(head->result); | 497 | result = le32_to_cpu(head->result); |
498 | dout("handle_reply op %d result %d\n", op, result); | 498 | dout("handle_reply op %d result %d\n", op, result); |
499 | switch (op) { | 499 | switch (op) { |
@@ -613,6 +613,9 @@ static void ceph_x_destroy(struct ceph_auth_client *ac) | |||
613 | remove_ticket_handler(ac, th); | 613 | remove_ticket_handler(ac, th); |
614 | } | 614 | } |
615 | 615 | ||
616 | if (xi->auth_authorizer.buf) | ||
617 | ceph_buffer_put(xi->auth_authorizer.buf); | ||
618 | |||
616 | kfree(ac->private); | 619 | kfree(ac->private); |
617 | ac->private = NULL; | 620 | ac->private = NULL; |
618 | } | 621 | } |
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 619b61655ee5..b81be9a56487 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c | |||
@@ -244,8 +244,14 @@ static struct ceph_cap *get_cap(struct ceph_cap_reservation *ctx) | |||
244 | struct ceph_cap *cap = NULL; | 244 | struct ceph_cap *cap = NULL; |
245 | 245 | ||
246 | /* temporary, until we do something about cap import/export */ | 246 | /* temporary, until we do something about cap import/export */ |
247 | if (!ctx) | 247 | if (!ctx) { |
248 | return kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS); | 248 | cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS); |
249 | if (cap) { | ||
250 | caps_use_count++; | ||
251 | caps_total_count++; | ||
252 | } | ||
253 | return cap; | ||
254 | } | ||
249 | 255 | ||
250 | spin_lock(&caps_list_lock); | 256 | spin_lock(&caps_list_lock); |
251 | dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n", | 257 | dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n", |
@@ -621,7 +627,7 @@ retry: | |||
621 | if (fmode >= 0) | 627 | if (fmode >= 0) |
622 | __ceph_get_fmode(ci, fmode); | 628 | __ceph_get_fmode(ci, fmode); |
623 | spin_unlock(&inode->i_lock); | 629 | spin_unlock(&inode->i_lock); |
624 | wake_up(&ci->i_cap_wq); | 630 | wake_up_all(&ci->i_cap_wq); |
625 | return 0; | 631 | return 0; |
626 | } | 632 | } |
627 | 633 | ||
@@ -1175,7 +1181,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, | |||
1175 | } | 1181 | } |
1176 | 1182 | ||
1177 | if (wake) | 1183 | if (wake) |
1178 | wake_up(&ci->i_cap_wq); | 1184 | wake_up_all(&ci->i_cap_wq); |
1179 | 1185 | ||
1180 | return delayed; | 1186 | return delayed; |
1181 | } | 1187 | } |
@@ -2147,7 +2153,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had) | |||
2147 | else if (flushsnaps) | 2153 | else if (flushsnaps) |
2148 | ceph_flush_snaps(ci); | 2154 | ceph_flush_snaps(ci); |
2149 | if (wake) | 2155 | if (wake) |
2150 | wake_up(&ci->i_cap_wq); | 2156 | wake_up_all(&ci->i_cap_wq); |
2151 | if (put) | 2157 | if (put) |
2152 | iput(inode); | 2158 | iput(inode); |
2153 | } | 2159 | } |
@@ -2223,7 +2229,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr, | |||
2223 | iput(inode); | 2229 | iput(inode); |
2224 | } else if (complete_capsnap) { | 2230 | } else if (complete_capsnap) { |
2225 | ceph_flush_snaps(ci); | 2231 | ceph_flush_snaps(ci); |
2226 | wake_up(&ci->i_cap_wq); | 2232 | wake_up_all(&ci->i_cap_wq); |
2227 | } | 2233 | } |
2228 | if (drop_capsnap) | 2234 | if (drop_capsnap) |
2229 | iput(inode); | 2235 | iput(inode); |
@@ -2399,7 +2405,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant, | |||
2399 | if (queue_invalidate) | 2405 | if (queue_invalidate) |
2400 | ceph_queue_invalidate(inode); | 2406 | ceph_queue_invalidate(inode); |
2401 | if (wake) | 2407 | if (wake) |
2402 | wake_up(&ci->i_cap_wq); | 2408 | wake_up_all(&ci->i_cap_wq); |
2403 | 2409 | ||
2404 | if (check_caps == 1) | 2410 | if (check_caps == 1) |
2405 | ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY, | 2411 | ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY, |
@@ -2454,7 +2460,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid, | |||
2454 | struct ceph_inode_info, | 2460 | struct ceph_inode_info, |
2455 | i_flushing_item)->vfs_inode); | 2461 | i_flushing_item)->vfs_inode); |
2456 | mdsc->num_cap_flushing--; | 2462 | mdsc->num_cap_flushing--; |
2457 | wake_up(&mdsc->cap_flushing_wq); | 2463 | wake_up_all(&mdsc->cap_flushing_wq); |
2458 | dout(" inode %p now !flushing\n", inode); | 2464 | dout(" inode %p now !flushing\n", inode); |
2459 | 2465 | ||
2460 | if (ci->i_dirty_caps == 0) { | 2466 | if (ci->i_dirty_caps == 0) { |
@@ -2466,7 +2472,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid, | |||
2466 | } | 2472 | } |
2467 | } | 2473 | } |
2468 | spin_unlock(&mdsc->cap_dirty_lock); | 2474 | spin_unlock(&mdsc->cap_dirty_lock); |
2469 | wake_up(&ci->i_cap_wq); | 2475 | wake_up_all(&ci->i_cap_wq); |
2470 | 2476 | ||
2471 | out: | 2477 | out: |
2472 | spin_unlock(&inode->i_lock); | 2478 | spin_unlock(&inode->i_lock); |
@@ -2886,18 +2892,19 @@ int ceph_encode_inode_release(void **p, struct inode *inode, | |||
2886 | struct ceph_inode_info *ci = ceph_inode(inode); | 2892 | struct ceph_inode_info *ci = ceph_inode(inode); |
2887 | struct ceph_cap *cap; | 2893 | struct ceph_cap *cap; |
2888 | struct ceph_mds_request_release *rel = *p; | 2894 | struct ceph_mds_request_release *rel = *p; |
2895 | int used, dirty; | ||
2889 | int ret = 0; | 2896 | int ret = 0; |
2890 | int used = 0; | ||
2891 | 2897 | ||
2892 | spin_lock(&inode->i_lock); | 2898 | spin_lock(&inode->i_lock); |
2893 | used = __ceph_caps_used(ci); | 2899 | used = __ceph_caps_used(ci); |
2900 | dirty = __ceph_caps_dirty(ci); | ||
2894 | 2901 | ||
2895 | dout("encode_inode_release %p mds%d used %s drop %s unless %s\n", inode, | 2902 | dout("encode_inode_release %p mds%d used|dirty %s drop %s unless %s\n", |
2896 | mds, ceph_cap_string(used), ceph_cap_string(drop), | 2903 | inode, mds, ceph_cap_string(used|dirty), ceph_cap_string(drop), |
2897 | ceph_cap_string(unless)); | 2904 | ceph_cap_string(unless)); |
2898 | 2905 | ||
2899 | /* only drop unused caps */ | 2906 | /* only drop unused, clean caps */ |
2900 | drop &= ~used; | 2907 | drop &= ~(used | dirty); |
2901 | 2908 | ||
2902 | cap = __get_cap_for_mds(ci, mds); | 2909 | cap = __get_cap_for_mds(ci, mds); |
2903 | if (cap && __cap_is_valid(cap)) { | 2910 | if (cap && __cap_is_valid(cap)) { |
@@ -2977,6 +2984,7 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry, | |||
2977 | memcpy(*p, dentry->d_name.name, dentry->d_name.len); | 2984 | memcpy(*p, dentry->d_name.name, dentry->d_name.len); |
2978 | *p += dentry->d_name.len; | 2985 | *p += dentry->d_name.len; |
2979 | rel->dname_seq = cpu_to_le32(di->lease_seq); | 2986 | rel->dname_seq = cpu_to_le32(di->lease_seq); |
2987 | __ceph_mdsc_drop_dentry_lease(dentry); | ||
2980 | } | 2988 | } |
2981 | spin_unlock(&dentry->d_lock); | 2989 | spin_unlock(&dentry->d_lock); |
2982 | return ret; | 2990 | return ret; |
diff --git a/fs/ceph/crush/mapper.c b/fs/ceph/crush/mapper.c index 9ba54efb6543..a4eec133258e 100644 --- a/fs/ceph/crush/mapper.c +++ b/fs/ceph/crush/mapper.c | |||
@@ -238,7 +238,7 @@ static int bucket_straw_choose(struct crush_bucket_straw *bucket, | |||
238 | 238 | ||
239 | static int crush_bucket_choose(struct crush_bucket *in, int x, int r) | 239 | static int crush_bucket_choose(struct crush_bucket *in, int x, int r) |
240 | { | 240 | { |
241 | dprintk("choose %d x=%d r=%d\n", in->id, x, r); | 241 | dprintk(" crush_bucket_choose %d x=%d r=%d\n", in->id, x, r); |
242 | switch (in->alg) { | 242 | switch (in->alg) { |
243 | case CRUSH_BUCKET_UNIFORM: | 243 | case CRUSH_BUCKET_UNIFORM: |
244 | return bucket_uniform_choose((struct crush_bucket_uniform *)in, | 244 | return bucket_uniform_choose((struct crush_bucket_uniform *)in, |
@@ -264,7 +264,7 @@ static int crush_bucket_choose(struct crush_bucket *in, int x, int r) | |||
264 | */ | 264 | */ |
265 | static int is_out(struct crush_map *map, __u32 *weight, int item, int x) | 265 | static int is_out(struct crush_map *map, __u32 *weight, int item, int x) |
266 | { | 266 | { |
267 | if (weight[item] >= 0x1000) | 267 | if (weight[item] >= 0x10000) |
268 | return 0; | 268 | return 0; |
269 | if (weight[item] == 0) | 269 | if (weight[item] == 0) |
270 | return 1; | 270 | return 1; |
@@ -305,7 +305,9 @@ static int crush_choose(struct crush_map *map, | |||
305 | int itemtype; | 305 | int itemtype; |
306 | int collide, reject; | 306 | int collide, reject; |
307 | const int orig_tries = 5; /* attempts before we fall back to search */ | 307 | const int orig_tries = 5; /* attempts before we fall back to search */ |
308 | dprintk("choose bucket %d x %d outpos %d\n", bucket->id, x, outpos); | 308 | |
309 | dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d\n", recurse_to_leaf ? "_LEAF" : "", | ||
310 | bucket->id, x, outpos, numrep); | ||
309 | 311 | ||
310 | for (rep = outpos; rep < numrep; rep++) { | 312 | for (rep = outpos; rep < numrep; rep++) { |
311 | /* keep trying until we get a non-out, non-colliding item */ | 313 | /* keep trying until we get a non-out, non-colliding item */ |
@@ -366,6 +368,7 @@ static int crush_choose(struct crush_map *map, | |||
366 | BUG_ON(item >= 0 || | 368 | BUG_ON(item >= 0 || |
367 | (-1-item) >= map->max_buckets); | 369 | (-1-item) >= map->max_buckets); |
368 | in = map->buckets[-1-item]; | 370 | in = map->buckets[-1-item]; |
371 | retry_bucket = 1; | ||
369 | continue; | 372 | continue; |
370 | } | 373 | } |
371 | 374 | ||
@@ -377,15 +380,25 @@ static int crush_choose(struct crush_map *map, | |||
377 | } | 380 | } |
378 | } | 381 | } |
379 | 382 | ||
380 | if (recurse_to_leaf && | 383 | reject = 0; |
381 | item < 0 && | 384 | if (recurse_to_leaf) { |
382 | crush_choose(map, map->buckets[-1-item], | 385 | if (item < 0) { |
383 | weight, | 386 | if (crush_choose(map, |
384 | x, outpos+1, 0, | 387 | map->buckets[-1-item], |
385 | out2, outpos, | 388 | weight, |
386 | firstn, 0, NULL) <= outpos) { | 389 | x, outpos+1, 0, |
387 | reject = 1; | 390 | out2, outpos, |
388 | } else { | 391 | firstn, 0, |
392 | NULL) <= outpos) | ||
393 | /* didn't get leaf */ | ||
394 | reject = 1; | ||
395 | } else { | ||
396 | /* we already have a leaf! */ | ||
397 | out2[outpos] = item; | ||
398 | } | ||
399 | } | ||
400 | |||
401 | if (!reject) { | ||
389 | /* out? */ | 402 | /* out? */ |
390 | if (itemtype == 0) | 403 | if (itemtype == 0) |
391 | reject = is_out(map, weight, | 404 | reject = is_out(map, weight, |
@@ -424,12 +437,12 @@ reject: | |||
424 | continue; | 437 | continue; |
425 | } | 438 | } |
426 | 439 | ||
427 | dprintk("choose got %d\n", item); | 440 | dprintk("CHOOSE got %d\n", item); |
428 | out[outpos] = item; | 441 | out[outpos] = item; |
429 | outpos++; | 442 | outpos++; |
430 | } | 443 | } |
431 | 444 | ||
432 | dprintk("choose returns %d\n", outpos); | 445 | dprintk("CHOOSE returns %d\n", outpos); |
433 | return outpos; | 446 | return outpos; |
434 | } | 447 | } |
435 | 448 | ||
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c index 3be33fb066cc..f2f5332ddbba 100644 --- a/fs/ceph/debugfs.c +++ b/fs/ceph/debugfs.c | |||
@@ -261,7 +261,7 @@ static int osdc_show(struct seq_file *s, void *pp) | |||
261 | 261 | ||
262 | static int caps_show(struct seq_file *s, void *p) | 262 | static int caps_show(struct seq_file *s, void *p) |
263 | { | 263 | { |
264 | struct ceph_client *client = p; | 264 | struct ceph_client *client = s->private; |
265 | int total, avail, used, reserved, min; | 265 | int total, avail, used, reserved, min; |
266 | 266 | ||
267 | ceph_reservation_status(client, &total, &avail, &used, &reserved, &min); | 267 | ceph_reservation_status(client, &total, &avail, &used, &reserved, &min); |
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index f85719310db2..f94ed3c7f6a5 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c | |||
@@ -266,6 +266,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir) | |||
266 | spin_lock(&inode->i_lock); | 266 | spin_lock(&inode->i_lock); |
267 | if ((filp->f_pos == 2 || fi->dentry) && | 267 | if ((filp->f_pos == 2 || fi->dentry) && |
268 | !ceph_test_opt(client, NOASYNCREADDIR) && | 268 | !ceph_test_opt(client, NOASYNCREADDIR) && |
269 | ceph_snap(inode) != CEPH_SNAPDIR && | ||
269 | (ci->i_ceph_flags & CEPH_I_COMPLETE) && | 270 | (ci->i_ceph_flags & CEPH_I_COMPLETE) && |
270 | __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) { | 271 | __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) { |
271 | err = __dcache_readdir(filp, dirent, filldir); | 272 | err = __dcache_readdir(filp, dirent, filldir); |
@@ -1013,18 +1014,22 @@ out_touch: | |||
1013 | 1014 | ||
1014 | /* | 1015 | /* |
1015 | * When a dentry is released, clear the dir I_COMPLETE if it was part | 1016 | * When a dentry is released, clear the dir I_COMPLETE if it was part |
1016 | * of the current dir gen. | 1017 | * of the current dir gen or if this is in the snapshot namespace. |
1017 | */ | 1018 | */ |
1018 | static void ceph_dentry_release(struct dentry *dentry) | 1019 | static void ceph_dentry_release(struct dentry *dentry) |
1019 | { | 1020 | { |
1020 | struct ceph_dentry_info *di = ceph_dentry(dentry); | 1021 | struct ceph_dentry_info *di = ceph_dentry(dentry); |
1021 | struct inode *parent_inode = dentry->d_parent->d_inode; | 1022 | struct inode *parent_inode = dentry->d_parent->d_inode; |
1023 | u64 snapid = ceph_snap(parent_inode); | ||
1022 | 1024 | ||
1023 | if (parent_inode) { | 1025 | dout("dentry_release %p parent %p\n", dentry, parent_inode); |
1026 | |||
1027 | if (parent_inode && snapid != CEPH_SNAPDIR) { | ||
1024 | struct ceph_inode_info *ci = ceph_inode(parent_inode); | 1028 | struct ceph_inode_info *ci = ceph_inode(parent_inode); |
1025 | 1029 | ||
1026 | spin_lock(&parent_inode->i_lock); | 1030 | spin_lock(&parent_inode->i_lock); |
1027 | if (ci->i_shared_gen == di->lease_shared_gen) { | 1031 | if (ci->i_shared_gen == di->lease_shared_gen || |
1032 | snapid <= CEPH_MAXSNAP) { | ||
1028 | dout(" clearing %p complete (d_release)\n", | 1033 | dout(" clearing %p complete (d_release)\n", |
1029 | parent_inode); | 1034 | parent_inode); |
1030 | ci->i_ceph_flags &= ~CEPH_I_COMPLETE; | 1035 | ci->i_ceph_flags &= ~CEPH_I_COMPLETE; |
@@ -1241,7 +1246,9 @@ struct dentry_operations ceph_dentry_ops = { | |||
1241 | 1246 | ||
1242 | struct dentry_operations ceph_snapdir_dentry_ops = { | 1247 | struct dentry_operations ceph_snapdir_dentry_ops = { |
1243 | .d_revalidate = ceph_snapdir_d_revalidate, | 1248 | .d_revalidate = ceph_snapdir_d_revalidate, |
1249 | .d_release = ceph_dentry_release, | ||
1244 | }; | 1250 | }; |
1245 | 1251 | ||
1246 | struct dentry_operations ceph_snap_dentry_ops = { | 1252 | struct dentry_operations ceph_snap_dentry_ops = { |
1253 | .d_release = ceph_dentry_release, | ||
1247 | }; | 1254 | }; |
diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 6251a1574b94..7c08698fad3e 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c | |||
@@ -265,7 +265,7 @@ int ceph_release(struct inode *inode, struct file *file) | |||
265 | kmem_cache_free(ceph_file_cachep, cf); | 265 | kmem_cache_free(ceph_file_cachep, cf); |
266 | 266 | ||
267 | /* wake up anyone waiting for caps on this inode */ | 267 | /* wake up anyone waiting for caps on this inode */ |
268 | wake_up(&ci->i_cap_wq); | 268 | wake_up_all(&ci->i_cap_wq); |
269 | return 0; | 269 | return 0; |
270 | } | 270 | } |
271 | 271 | ||
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index ab47f46ca282..389f9dbd9949 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c | |||
@@ -854,8 +854,8 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in, | |||
854 | d_drop(dn); | 854 | d_drop(dn); |
855 | realdn = d_materialise_unique(dn, in); | 855 | realdn = d_materialise_unique(dn, in); |
856 | if (IS_ERR(realdn)) { | 856 | if (IS_ERR(realdn)) { |
857 | pr_err("splice_dentry error %p inode %p ino %llx.%llx\n", | 857 | pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n", |
858 | dn, in, ceph_vinop(in)); | 858 | PTR_ERR(realdn), dn, in, ceph_vinop(in)); |
859 | if (prehash) | 859 | if (prehash) |
860 | *prehash = false; /* don't rehash on error */ | 860 | *prehash = false; /* don't rehash on error */ |
861 | dn = realdn; /* note realdn contains the error */ | 861 | dn = realdn; /* note realdn contains the error */ |
@@ -1199,8 +1199,10 @@ retry_lookup: | |||
1199 | goto out; | 1199 | goto out; |
1200 | } | 1200 | } |
1201 | err = ceph_init_dentry(dn); | 1201 | err = ceph_init_dentry(dn); |
1202 | if (err < 0) | 1202 | if (err < 0) { |
1203 | dput(dn); | ||
1203 | goto out; | 1204 | goto out; |
1205 | } | ||
1204 | } else if (dn->d_inode && | 1206 | } else if (dn->d_inode && |
1205 | (ceph_ino(dn->d_inode) != vino.ino || | 1207 | (ceph_ino(dn->d_inode) != vino.ino || |
1206 | ceph_snap(dn->d_inode) != vino.snap)) { | 1208 | ceph_snap(dn->d_inode) != vino.snap)) { |
@@ -1234,18 +1236,23 @@ retry_lookup: | |||
1234 | goto out; | 1236 | goto out; |
1235 | } | 1237 | } |
1236 | dn = splice_dentry(dn, in, NULL); | 1238 | dn = splice_dentry(dn, in, NULL); |
1239 | if (IS_ERR(dn)) | ||
1240 | dn = NULL; | ||
1237 | } | 1241 | } |
1238 | 1242 | ||
1239 | if (fill_inode(in, &rinfo->dir_in[i], NULL, session, | 1243 | if (fill_inode(in, &rinfo->dir_in[i], NULL, session, |
1240 | req->r_request_started, -1, | 1244 | req->r_request_started, -1, |
1241 | &req->r_caps_reservation) < 0) { | 1245 | &req->r_caps_reservation) < 0) { |
1242 | pr_err("fill_inode badness on %p\n", in); | 1246 | pr_err("fill_inode badness on %p\n", in); |
1243 | dput(dn); | 1247 | goto next_item; |
1244 | continue; | ||
1245 | } | 1248 | } |
1246 | update_dentry_lease(dn, rinfo->dir_dlease[i], | 1249 | if (dn) |
1247 | req->r_session, req->r_request_started); | 1250 | update_dentry_lease(dn, rinfo->dir_dlease[i], |
1248 | dput(dn); | 1251 | req->r_session, |
1252 | req->r_request_started); | ||
1253 | next_item: | ||
1254 | if (dn) | ||
1255 | dput(dn); | ||
1249 | } | 1256 | } |
1250 | req->r_did_prepopulate = true; | 1257 | req->r_did_prepopulate = true; |
1251 | 1258 | ||
@@ -1494,7 +1501,7 @@ retry: | |||
1494 | if (wrbuffer_refs == 0) | 1501 | if (wrbuffer_refs == 0) |
1495 | ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); | 1502 | ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); |
1496 | if (wake) | 1503 | if (wake) |
1497 | wake_up(&ci->i_cap_wq); | 1504 | wake_up_all(&ci->i_cap_wq); |
1498 | } | 1505 | } |
1499 | 1506 | ||
1500 | 1507 | ||
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 1766947fc07a..dd440bd438a9 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c | |||
@@ -868,7 +868,7 @@ static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap, | |||
868 | { | 868 | { |
869 | struct ceph_inode_info *ci = ceph_inode(inode); | 869 | struct ceph_inode_info *ci = ceph_inode(inode); |
870 | 870 | ||
871 | wake_up(&ci->i_cap_wq); | 871 | wake_up_all(&ci->i_cap_wq); |
872 | if (arg) { | 872 | if (arg) { |
873 | spin_lock(&inode->i_lock); | 873 | spin_lock(&inode->i_lock); |
874 | ci->i_wanted_max_size = 0; | 874 | ci->i_wanted_max_size = 0; |
@@ -1514,6 +1514,9 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, | |||
1514 | ceph_encode_filepath(&p, end, ino1, path1); | 1514 | ceph_encode_filepath(&p, end, ino1, path1); |
1515 | ceph_encode_filepath(&p, end, ino2, path2); | 1515 | ceph_encode_filepath(&p, end, ino2, path2); |
1516 | 1516 | ||
1517 | /* make note of release offset, in case we need to replay */ | ||
1518 | req->r_request_release_offset = p - msg->front.iov_base; | ||
1519 | |||
1517 | /* cap releases */ | 1520 | /* cap releases */ |
1518 | releases = 0; | 1521 | releases = 0; |
1519 | if (req->r_inode_drop) | 1522 | if (req->r_inode_drop) |
@@ -1561,7 +1564,7 @@ static void complete_request(struct ceph_mds_client *mdsc, | |||
1561 | if (req->r_callback) | 1564 | if (req->r_callback) |
1562 | req->r_callback(mdsc, req); | 1565 | req->r_callback(mdsc, req); |
1563 | else | 1566 | else |
1564 | complete(&req->r_completion); | 1567 | complete_all(&req->r_completion); |
1565 | } | 1568 | } |
1566 | 1569 | ||
1567 | /* | 1570 | /* |
@@ -1580,6 +1583,32 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc, | |||
1580 | dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req, | 1583 | dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req, |
1581 | req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts); | 1584 | req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts); |
1582 | 1585 | ||
1586 | if (req->r_got_unsafe) { | ||
1587 | /* | ||
1588 | * Replay. Do not regenerate message (and rebuild | ||
1589 | * paths, etc.); just use the original message. | ||
1590 | * Rebuilding paths will break for renames because | ||
1591 | * d_move mangles the src name. | ||
1592 | */ | ||
1593 | msg = req->r_request; | ||
1594 | rhead = msg->front.iov_base; | ||
1595 | |||
1596 | flags = le32_to_cpu(rhead->flags); | ||
1597 | flags |= CEPH_MDS_FLAG_REPLAY; | ||
1598 | rhead->flags = cpu_to_le32(flags); | ||
1599 | |||
1600 | if (req->r_target_inode) | ||
1601 | rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode)); | ||
1602 | |||
1603 | rhead->num_retry = req->r_attempts - 1; | ||
1604 | |||
1605 | /* remove cap/dentry releases from message */ | ||
1606 | rhead->num_releases = 0; | ||
1607 | msg->hdr.front_len = cpu_to_le32(req->r_request_release_offset); | ||
1608 | msg->front.iov_len = req->r_request_release_offset; | ||
1609 | return 0; | ||
1610 | } | ||
1611 | |||
1583 | if (req->r_request) { | 1612 | if (req->r_request) { |
1584 | ceph_msg_put(req->r_request); | 1613 | ceph_msg_put(req->r_request); |
1585 | req->r_request = NULL; | 1614 | req->r_request = NULL; |
@@ -1601,13 +1630,9 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc, | |||
1601 | rhead->flags = cpu_to_le32(flags); | 1630 | rhead->flags = cpu_to_le32(flags); |
1602 | rhead->num_fwd = req->r_num_fwd; | 1631 | rhead->num_fwd = req->r_num_fwd; |
1603 | rhead->num_retry = req->r_attempts - 1; | 1632 | rhead->num_retry = req->r_attempts - 1; |
1633 | rhead->ino = 0; | ||
1604 | 1634 | ||
1605 | dout(" r_locked_dir = %p\n", req->r_locked_dir); | 1635 | dout(" r_locked_dir = %p\n", req->r_locked_dir); |
1606 | |||
1607 | if (req->r_target_inode && req->r_got_unsafe) | ||
1608 | rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode)); | ||
1609 | else | ||
1610 | rhead->ino = 0; | ||
1611 | return 0; | 1636 | return 0; |
1612 | } | 1637 | } |
1613 | 1638 | ||
@@ -1907,7 +1932,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) | |||
1907 | if (head->safe) { | 1932 | if (head->safe) { |
1908 | req->r_got_safe = true; | 1933 | req->r_got_safe = true; |
1909 | __unregister_request(mdsc, req); | 1934 | __unregister_request(mdsc, req); |
1910 | complete(&req->r_safe_completion); | 1935 | complete_all(&req->r_safe_completion); |
1911 | 1936 | ||
1912 | if (req->r_got_unsafe) { | 1937 | if (req->r_got_unsafe) { |
1913 | /* | 1938 | /* |
@@ -1922,7 +1947,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) | |||
1922 | 1947 | ||
1923 | /* last unsafe request during umount? */ | 1948 | /* last unsafe request during umount? */ |
1924 | if (mdsc->stopping && !__get_oldest_req(mdsc)) | 1949 | if (mdsc->stopping && !__get_oldest_req(mdsc)) |
1925 | complete(&mdsc->safe_umount_waiters); | 1950 | complete_all(&mdsc->safe_umount_waiters); |
1926 | mutex_unlock(&mdsc->mutex); | 1951 | mutex_unlock(&mdsc->mutex); |
1927 | goto out; | 1952 | goto out; |
1928 | } | 1953 | } |
@@ -2101,7 +2126,7 @@ static void handle_session(struct ceph_mds_session *session, | |||
2101 | pr_info("mds%d reconnect denied\n", session->s_mds); | 2126 | pr_info("mds%d reconnect denied\n", session->s_mds); |
2102 | remove_session_caps(session); | 2127 | remove_session_caps(session); |
2103 | wake = 1; /* for good measure */ | 2128 | wake = 1; /* for good measure */ |
2104 | complete(&mdsc->session_close_waiters); | 2129 | complete_all(&mdsc->session_close_waiters); |
2105 | kick_requests(mdsc, mds); | 2130 | kick_requests(mdsc, mds); |
2106 | break; | 2131 | break; |
2107 | 2132 | ||
@@ -2783,6 +2808,12 @@ void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc) | |||
2783 | drop_leases(mdsc); | 2808 | drop_leases(mdsc); |
2784 | ceph_flush_dirty_caps(mdsc); | 2809 | ceph_flush_dirty_caps(mdsc); |
2785 | wait_requests(mdsc); | 2810 | wait_requests(mdsc); |
2811 | |||
2812 | /* | ||
2813 | * wait for reply handlers to drop their request refs and | ||
2814 | * their inode/dcache refs | ||
2815 | */ | ||
2816 | ceph_msgr_flush(); | ||
2786 | } | 2817 | } |
2787 | 2818 | ||
2788 | /* | 2819 | /* |
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index b292fa42a66d..952410c60d09 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h | |||
@@ -188,6 +188,7 @@ struct ceph_mds_request { | |||
188 | int r_old_inode_drop, r_old_inode_unless; | 188 | int r_old_inode_drop, r_old_inode_unless; |
189 | 189 | ||
190 | struct ceph_msg *r_request; /* original request */ | 190 | struct ceph_msg *r_request; /* original request */ |
191 | int r_request_release_offset; | ||
191 | struct ceph_msg *r_reply; | 192 | struct ceph_msg *r_reply; |
192 | struct ceph_mds_reply_info_parsed r_reply_info; | 193 | struct ceph_mds_reply_info_parsed r_reply_info; |
193 | int r_err; | 194 | int r_err; |
diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index 64b8b1f7863d..15167b2daa55 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c | |||
@@ -43,7 +43,8 @@ static void ceph_fault(struct ceph_connection *con); | |||
43 | * nicely render a sockaddr as a string. | 43 | * nicely render a sockaddr as a string. |
44 | */ | 44 | */ |
45 | #define MAX_ADDR_STR 20 | 45 | #define MAX_ADDR_STR 20 |
46 | static char addr_str[MAX_ADDR_STR][40]; | 46 | #define MAX_ADDR_STR_LEN 60 |
47 | static char addr_str[MAX_ADDR_STR][MAX_ADDR_STR_LEN]; | ||
47 | static DEFINE_SPINLOCK(addr_str_lock); | 48 | static DEFINE_SPINLOCK(addr_str_lock); |
48 | static int last_addr_str; | 49 | static int last_addr_str; |
49 | 50 | ||
@@ -52,7 +53,6 @@ const char *pr_addr(const struct sockaddr_storage *ss) | |||
52 | int i; | 53 | int i; |
53 | char *s; | 54 | char *s; |
54 | struct sockaddr_in *in4 = (void *)ss; | 55 | struct sockaddr_in *in4 = (void *)ss; |
55 | unsigned char *quad = (void *)&in4->sin_addr.s_addr; | ||
56 | struct sockaddr_in6 *in6 = (void *)ss; | 56 | struct sockaddr_in6 *in6 = (void *)ss; |
57 | 57 | ||
58 | spin_lock(&addr_str_lock); | 58 | spin_lock(&addr_str_lock); |
@@ -64,25 +64,13 @@ const char *pr_addr(const struct sockaddr_storage *ss) | |||
64 | 64 | ||
65 | switch (ss->ss_family) { | 65 | switch (ss->ss_family) { |
66 | case AF_INET: | 66 | case AF_INET: |
67 | sprintf(s, "%u.%u.%u.%u:%u", | 67 | snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%u", &in4->sin_addr, |
68 | (unsigned int)quad[0], | 68 | (unsigned int)ntohs(in4->sin_port)); |
69 | (unsigned int)quad[1], | ||
70 | (unsigned int)quad[2], | ||
71 | (unsigned int)quad[3], | ||
72 | (unsigned int)ntohs(in4->sin_port)); | ||
73 | break; | 69 | break; |
74 | 70 | ||
75 | case AF_INET6: | 71 | case AF_INET6: |
76 | sprintf(s, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%u", | 72 | snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%u", &in6->sin6_addr, |
77 | in6->sin6_addr.s6_addr16[0], | 73 | (unsigned int)ntohs(in6->sin6_port)); |
78 | in6->sin6_addr.s6_addr16[1], | ||
79 | in6->sin6_addr.s6_addr16[2], | ||
80 | in6->sin6_addr.s6_addr16[3], | ||
81 | in6->sin6_addr.s6_addr16[4], | ||
82 | in6->sin6_addr.s6_addr16[5], | ||
83 | in6->sin6_addr.s6_addr16[6], | ||
84 | in6->sin6_addr.s6_addr16[7], | ||
85 | (unsigned int)ntohs(in6->sin6_port)); | ||
86 | break; | 74 | break; |
87 | 75 | ||
88 | default: | 76 | default: |
@@ -215,12 +203,13 @@ static void set_sock_callbacks(struct socket *sock, | |||
215 | */ | 203 | */ |
216 | static struct socket *ceph_tcp_connect(struct ceph_connection *con) | 204 | static struct socket *ceph_tcp_connect(struct ceph_connection *con) |
217 | { | 205 | { |
218 | struct sockaddr *paddr = (struct sockaddr *)&con->peer_addr.in_addr; | 206 | struct sockaddr_storage *paddr = &con->peer_addr.in_addr; |
219 | struct socket *sock; | 207 | struct socket *sock; |
220 | int ret; | 208 | int ret; |
221 | 209 | ||
222 | BUG_ON(con->sock); | 210 | BUG_ON(con->sock); |
223 | ret = sock_create_kern(AF_INET, SOCK_STREAM, IPPROTO_TCP, &sock); | 211 | ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM, |
212 | IPPROTO_TCP, &sock); | ||
224 | if (ret) | 213 | if (ret) |
225 | return ERR_PTR(ret); | 214 | return ERR_PTR(ret); |
226 | con->sock = sock; | 215 | con->sock = sock; |
@@ -234,7 +223,8 @@ static struct socket *ceph_tcp_connect(struct ceph_connection *con) | |||
234 | 223 | ||
235 | dout("connect %s\n", pr_addr(&con->peer_addr.in_addr)); | 224 | dout("connect %s\n", pr_addr(&con->peer_addr.in_addr)); |
236 | 225 | ||
237 | ret = sock->ops->connect(sock, paddr, sizeof(*paddr), O_NONBLOCK); | 226 | ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr), |
227 | O_NONBLOCK); | ||
238 | if (ret == -EINPROGRESS) { | 228 | if (ret == -EINPROGRESS) { |
239 | dout("connect %s EINPROGRESS sk_state = %u\n", | 229 | dout("connect %s EINPROGRESS sk_state = %u\n", |
240 | pr_addr(&con->peer_addr.in_addr), | 230 | pr_addr(&con->peer_addr.in_addr), |
@@ -657,7 +647,7 @@ static void prepare_write_connect(struct ceph_messenger *msgr, | |||
657 | dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con, | 647 | dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con, |
658 | con->connect_seq, global_seq, proto); | 648 | con->connect_seq, global_seq, proto); |
659 | 649 | ||
660 | con->out_connect.features = CEPH_FEATURE_SUPPORTED_CLIENT; | 650 | con->out_connect.features = cpu_to_le64(CEPH_FEATURE_SUPPORTED_CLIENT); |
661 | con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT); | 651 | con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT); |
662 | con->out_connect.connect_seq = cpu_to_le32(con->connect_seq); | 652 | con->out_connect.connect_seq = cpu_to_le32(con->connect_seq); |
663 | con->out_connect.global_seq = cpu_to_le32(global_seq); | 653 | con->out_connect.global_seq = cpu_to_le32(global_seq); |
@@ -1009,19 +999,32 @@ int ceph_parse_ips(const char *c, const char *end, | |||
1009 | struct sockaddr_in *in4 = (void *)ss; | 999 | struct sockaddr_in *in4 = (void *)ss; |
1010 | struct sockaddr_in6 *in6 = (void *)ss; | 1000 | struct sockaddr_in6 *in6 = (void *)ss; |
1011 | int port; | 1001 | int port; |
1002 | char delim = ','; | ||
1003 | |||
1004 | if (*p == '[') { | ||
1005 | delim = ']'; | ||
1006 | p++; | ||
1007 | } | ||
1012 | 1008 | ||
1013 | memset(ss, 0, sizeof(*ss)); | 1009 | memset(ss, 0, sizeof(*ss)); |
1014 | if (in4_pton(p, end - p, (u8 *)&in4->sin_addr.s_addr, | 1010 | if (in4_pton(p, end - p, (u8 *)&in4->sin_addr.s_addr, |
1015 | ',', &ipend)) { | 1011 | delim, &ipend)) |
1016 | ss->ss_family = AF_INET; | 1012 | ss->ss_family = AF_INET; |
1017 | } else if (in6_pton(p, end - p, (u8 *)&in6->sin6_addr.s6_addr, | 1013 | else if (in6_pton(p, end - p, (u8 *)&in6->sin6_addr.s6_addr, |
1018 | ',', &ipend)) { | 1014 | delim, &ipend)) |
1019 | ss->ss_family = AF_INET6; | 1015 | ss->ss_family = AF_INET6; |
1020 | } else { | 1016 | else |
1021 | goto bad; | 1017 | goto bad; |
1022 | } | ||
1023 | p = ipend; | 1018 | p = ipend; |
1024 | 1019 | ||
1020 | if (delim == ']') { | ||
1021 | if (*p != ']') { | ||
1022 | dout("missing matching ']'\n"); | ||
1023 | goto bad; | ||
1024 | } | ||
1025 | p++; | ||
1026 | } | ||
1027 | |||
1025 | /* port? */ | 1028 | /* port? */ |
1026 | if (p < end && *p == ':') { | 1029 | if (p < end && *p == ':') { |
1027 | port = 0; | 1030 | port = 0; |
@@ -1055,7 +1058,7 @@ int ceph_parse_ips(const char *c, const char *end, | |||
1055 | return 0; | 1058 | return 0; |
1056 | 1059 | ||
1057 | bad: | 1060 | bad: |
1058 | pr_err("parse_ips bad ip '%s'\n", c); | 1061 | pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c); |
1059 | return -EINVAL; | 1062 | return -EINVAL; |
1060 | } | 1063 | } |
1061 | 1064 | ||
@@ -1396,10 +1399,12 @@ static int read_partial_message(struct ceph_connection *con) | |||
1396 | if (!con->in_msg) { | 1399 | if (!con->in_msg) { |
1397 | dout("got hdr type %d front %d data %d\n", con->in_hdr.type, | 1400 | dout("got hdr type %d front %d data %d\n", con->in_hdr.type, |
1398 | con->in_hdr.front_len, con->in_hdr.data_len); | 1401 | con->in_hdr.front_len, con->in_hdr.data_len); |
1402 | skip = 0; | ||
1399 | con->in_msg = ceph_alloc_msg(con, &con->in_hdr, &skip); | 1403 | con->in_msg = ceph_alloc_msg(con, &con->in_hdr, &skip); |
1400 | if (skip) { | 1404 | if (skip) { |
1401 | /* skip this message */ | 1405 | /* skip this message */ |
1402 | dout("alloc_msg said skip message\n"); | 1406 | dout("alloc_msg said skip message\n"); |
1407 | BUG_ON(con->in_msg); | ||
1403 | con->in_base_pos = -front_len - middle_len - data_len - | 1408 | con->in_base_pos = -front_len - middle_len - data_len - |
1404 | sizeof(m->footer); | 1409 | sizeof(m->footer); |
1405 | con->in_tag = CEPH_MSGR_TAG_READY; | 1410 | con->in_tag = CEPH_MSGR_TAG_READY; |
@@ -2013,20 +2018,20 @@ void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg) | |||
2013 | { | 2018 | { |
2014 | mutex_lock(&con->mutex); | 2019 | mutex_lock(&con->mutex); |
2015 | if (!list_empty(&msg->list_head)) { | 2020 | if (!list_empty(&msg->list_head)) { |
2016 | dout("con_revoke %p msg %p\n", con, msg); | 2021 | dout("con_revoke %p msg %p - was on queue\n", con, msg); |
2017 | list_del_init(&msg->list_head); | 2022 | list_del_init(&msg->list_head); |
2018 | ceph_msg_put(msg); | 2023 | ceph_msg_put(msg); |
2019 | msg->hdr.seq = 0; | 2024 | msg->hdr.seq = 0; |
2020 | if (con->out_msg == msg) { | 2025 | } |
2021 | ceph_msg_put(con->out_msg); | 2026 | if (con->out_msg == msg) { |
2022 | con->out_msg = NULL; | 2027 | dout("con_revoke %p msg %p - was sending\n", con, msg); |
2023 | } | 2028 | con->out_msg = NULL; |
2024 | if (con->out_kvec_is_msg) { | 2029 | if (con->out_kvec_is_msg) { |
2025 | con->out_skip = con->out_kvec_bytes; | 2030 | con->out_skip = con->out_kvec_bytes; |
2026 | con->out_kvec_is_msg = false; | 2031 | con->out_kvec_is_msg = false; |
2027 | } | 2032 | } |
2028 | } else { | 2033 | ceph_msg_put(msg); |
2029 | dout("con_revoke %p msg %p - not queued (sent?)\n", con, msg); | 2034 | msg->hdr.seq = 0; |
2030 | } | 2035 | } |
2031 | mutex_unlock(&con->mutex); | 2036 | mutex_unlock(&con->mutex); |
2032 | } | 2037 | } |
diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c index 07a539906e67..54fe01c50706 100644 --- a/fs/ceph/mon_client.c +++ b/fs/ceph/mon_client.c | |||
@@ -345,7 +345,7 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc, | |||
345 | 345 | ||
346 | out: | 346 | out: |
347 | mutex_unlock(&monc->mutex); | 347 | mutex_unlock(&monc->mutex); |
348 | wake_up(&client->auth_wq); | 348 | wake_up_all(&client->auth_wq); |
349 | } | 349 | } |
350 | 350 | ||
351 | /* | 351 | /* |
@@ -462,7 +462,7 @@ static void handle_statfs_reply(struct ceph_mon_client *monc, | |||
462 | } | 462 | } |
463 | mutex_unlock(&monc->mutex); | 463 | mutex_unlock(&monc->mutex); |
464 | if (req) { | 464 | if (req) { |
465 | complete(&req->completion); | 465 | complete_all(&req->completion); |
466 | put_generic_request(req); | 466 | put_generic_request(req); |
467 | } | 467 | } |
468 | return; | 468 | return; |
@@ -718,14 +718,15 @@ static void handle_auth_reply(struct ceph_mon_client *monc, | |||
718 | monc->m_auth->front_max); | 718 | monc->m_auth->front_max); |
719 | if (ret < 0) { | 719 | if (ret < 0) { |
720 | monc->client->auth_err = ret; | 720 | monc->client->auth_err = ret; |
721 | wake_up(&monc->client->auth_wq); | 721 | wake_up_all(&monc->client->auth_wq); |
722 | } else if (ret > 0) { | 722 | } else if (ret > 0) { |
723 | __send_prepared_auth_request(monc, ret); | 723 | __send_prepared_auth_request(monc, ret); |
724 | } else if (!was_auth && monc->auth->ops->is_authenticated(monc->auth)) { | 724 | } else if (!was_auth && monc->auth->ops->is_authenticated(monc->auth)) { |
725 | dout("authenticated, starting session\n"); | 725 | dout("authenticated, starting session\n"); |
726 | 726 | ||
727 | monc->client->msgr->inst.name.type = CEPH_ENTITY_TYPE_CLIENT; | 727 | monc->client->msgr->inst.name.type = CEPH_ENTITY_TYPE_CLIENT; |
728 | monc->client->msgr->inst.name.num = monc->auth->global_id; | 728 | monc->client->msgr->inst.name.num = |
729 | cpu_to_le64(monc->auth->global_id); | ||
729 | 730 | ||
730 | __send_subscribe(monc); | 731 | __send_subscribe(monc); |
731 | __resend_generic_request(monc); | 732 | __resend_generic_request(monc); |
diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index d25b4add85b4..e38522347898 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c | |||
@@ -862,12 +862,12 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, | |||
862 | if (req->r_callback) | 862 | if (req->r_callback) |
863 | req->r_callback(req, msg); | 863 | req->r_callback(req, msg); |
864 | else | 864 | else |
865 | complete(&req->r_completion); | 865 | complete_all(&req->r_completion); |
866 | 866 | ||
867 | if (flags & CEPH_OSD_FLAG_ONDISK) { | 867 | if (flags & CEPH_OSD_FLAG_ONDISK) { |
868 | if (req->r_safe_callback) | 868 | if (req->r_safe_callback) |
869 | req->r_safe_callback(req, msg); | 869 | req->r_safe_callback(req, msg); |
870 | complete(&req->r_safe_completion); /* fsync waiter */ | 870 | complete_all(&req->r_safe_completion); /* fsync waiter */ |
871 | } | 871 | } |
872 | 872 | ||
873 | done: | 873 | done: |
@@ -1083,7 +1083,7 @@ done: | |||
1083 | if (newmap) | 1083 | if (newmap) |
1084 | kick_requests(osdc, NULL); | 1084 | kick_requests(osdc, NULL); |
1085 | up_read(&osdc->map_sem); | 1085 | up_read(&osdc->map_sem); |
1086 | wake_up(&osdc->client->auth_wq); | 1086 | wake_up_all(&osdc->client->auth_wq); |
1087 | return; | 1087 | return; |
1088 | 1088 | ||
1089 | bad: | 1089 | bad: |
@@ -1344,7 +1344,7 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) | |||
1344 | int type = le16_to_cpu(msg->hdr.type); | 1344 | int type = le16_to_cpu(msg->hdr.type); |
1345 | 1345 | ||
1346 | if (!osd) | 1346 | if (!osd) |
1347 | return; | 1347 | goto out; |
1348 | osdc = osd->o_osdc; | 1348 | osdc = osd->o_osdc; |
1349 | 1349 | ||
1350 | switch (type) { | 1350 | switch (type) { |
@@ -1359,6 +1359,7 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) | |||
1359 | pr_err("received unknown message type %d %s\n", type, | 1359 | pr_err("received unknown message type %d %s\n", type, |
1360 | ceph_msg_type_name(type)); | 1360 | ceph_msg_type_name(type)); |
1361 | } | 1361 | } |
1362 | out: | ||
1362 | ceph_msg_put(msg); | 1363 | ceph_msg_put(msg); |
1363 | } | 1364 | } |
1364 | 1365 | ||
diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c index ddc656fb5c05..416d46adbf87 100644 --- a/fs/ceph/osdmap.c +++ b/fs/ceph/osdmap.c | |||
@@ -568,6 +568,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) | |||
568 | if (ev > CEPH_PG_POOL_VERSION) { | 568 | if (ev > CEPH_PG_POOL_VERSION) { |
569 | pr_warning("got unknown v %d > %d of ceph_pg_pool\n", | 569 | pr_warning("got unknown v %d > %d of ceph_pg_pool\n", |
570 | ev, CEPH_PG_POOL_VERSION); | 570 | ev, CEPH_PG_POOL_VERSION); |
571 | kfree(pi); | ||
571 | goto bad; | 572 | goto bad; |
572 | } | 573 | } |
573 | __decode_pool(p, pi); | 574 | __decode_pool(p, pi); |
@@ -707,6 +708,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, | |||
707 | newcrush = crush_decode(*p, min(*p+len, end)); | 708 | newcrush = crush_decode(*p, min(*p+len, end)); |
708 | if (IS_ERR(newcrush)) | 709 | if (IS_ERR(newcrush)) |
709 | return ERR_CAST(newcrush); | 710 | return ERR_CAST(newcrush); |
711 | *p += len; | ||
710 | } | 712 | } |
711 | 713 | ||
712 | /* new flags? */ | 714 | /* new flags? */ |
@@ -829,12 +831,13 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, | |||
829 | /* remove any? */ | 831 | /* remove any? */ |
830 | while (rbp && pgid_cmp(rb_entry(rbp, struct ceph_pg_mapping, | 832 | while (rbp && pgid_cmp(rb_entry(rbp, struct ceph_pg_mapping, |
831 | node)->pgid, pgid) <= 0) { | 833 | node)->pgid, pgid) <= 0) { |
832 | struct rb_node *cur = rbp; | 834 | struct ceph_pg_mapping *cur = |
835 | rb_entry(rbp, struct ceph_pg_mapping, node); | ||
836 | |||
833 | rbp = rb_next(rbp); | 837 | rbp = rb_next(rbp); |
834 | dout(" removed pg_temp %llx\n", | 838 | dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid); |
835 | *(u64 *)&rb_entry(cur, struct ceph_pg_mapping, | 839 | rb_erase(&cur->node, &map->pg_temp); |
836 | node)->pgid); | 840 | kfree(cur); |
837 | rb_erase(cur, &map->pg_temp); | ||
838 | } | 841 | } |
839 | 842 | ||
840 | if (pglen) { | 843 | if (pglen) { |
@@ -850,19 +853,22 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, | |||
850 | for (j = 0; j < pglen; j++) | 853 | for (j = 0; j < pglen; j++) |
851 | pg->osds[j] = ceph_decode_32(p); | 854 | pg->osds[j] = ceph_decode_32(p); |
852 | err = __insert_pg_mapping(pg, &map->pg_temp); | 855 | err = __insert_pg_mapping(pg, &map->pg_temp); |
853 | if (err) | 856 | if (err) { |
857 | kfree(pg); | ||
854 | goto bad; | 858 | goto bad; |
859 | } | ||
855 | dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, | 860 | dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, |
856 | pglen); | 861 | pglen); |
857 | } | 862 | } |
858 | } | 863 | } |
859 | while (rbp) { | 864 | while (rbp) { |
860 | struct rb_node *cur = rbp; | 865 | struct ceph_pg_mapping *cur = |
866 | rb_entry(rbp, struct ceph_pg_mapping, node); | ||
867 | |||
861 | rbp = rb_next(rbp); | 868 | rbp = rb_next(rbp); |
862 | dout(" removed pg_temp %llx\n", | 869 | dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid); |
863 | *(u64 *)&rb_entry(cur, struct ceph_pg_mapping, | 870 | rb_erase(&cur->node, &map->pg_temp); |
864 | node)->pgid); | 871 | kfree(cur); |
865 | rb_erase(cur, &map->pg_temp); | ||
866 | } | 872 | } |
867 | 873 | ||
868 | /* ignore the rest */ | 874 | /* ignore the rest */ |
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig index 80f352596807..5739fd7f88b4 100644 --- a/fs/cifs/Kconfig +++ b/fs/cifs/Kconfig | |||
@@ -131,6 +131,15 @@ config CIFS_DFS_UPCALL | |||
131 | IP addresses) which is needed for implicit mounts of DFS junction | 131 | IP addresses) which is needed for implicit mounts of DFS junction |
132 | points. If unsure, say N. | 132 | points. If unsure, say N. |
133 | 133 | ||
134 | config CIFS_FSCACHE | ||
135 | bool "Provide CIFS client caching support (EXPERIMENTAL)" | ||
136 | depends on EXPERIMENTAL | ||
137 | depends on CIFS=m && FSCACHE || CIFS=y && FSCACHE=y | ||
138 | help | ||
139 | Makes CIFS FS-Cache capable. Say Y here if you want your CIFS data | ||
140 | to be cached locally on disk through the general filesystem cache | ||
141 | manager. If unsure, say N. | ||
142 | |||
134 | config CIFS_EXPERIMENTAL | 143 | config CIFS_EXPERIMENTAL |
135 | bool "CIFS Experimental Features (EXPERIMENTAL)" | 144 | bool "CIFS Experimental Features (EXPERIMENTAL)" |
136 | depends on CIFS && EXPERIMENTAL | 145 | depends on CIFS && EXPERIMENTAL |
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile index 9948c0030e86..adefa60a9bdc 100644 --- a/fs/cifs/Makefile +++ b/fs/cifs/Makefile | |||
@@ -11,3 +11,5 @@ cifs-y := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o \ | |||
11 | cifs-$(CONFIG_CIFS_UPCALL) += cifs_spnego.o | 11 | cifs-$(CONFIG_CIFS_UPCALL) += cifs_spnego.o |
12 | 12 | ||
13 | cifs-$(CONFIG_CIFS_DFS_UPCALL) += dns_resolve.o cifs_dfs_ref.o | 13 | cifs-$(CONFIG_CIFS_DFS_UPCALL) += dns_resolve.o cifs_dfs_ref.o |
14 | |||
15 | cifs-$(CONFIG_CIFS_FSCACHE) += fscache.o cache.o | ||
diff --git a/fs/cifs/cache.c b/fs/cifs/cache.c new file mode 100644 index 000000000000..224d7bbd1fcc --- /dev/null +++ b/fs/cifs/cache.c | |||
@@ -0,0 +1,331 @@ | |||
1 | /* | ||
2 | * fs/cifs/cache.c - CIFS filesystem cache index structure definitions | ||
3 | * | ||
4 | * Copyright (c) 2010 Novell, Inc. | ||
5 | * Authors(s): Suresh Jayaraman (sjayaraman@suse.de> | ||
6 | * | ||
7 | * This library is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU Lesser General Public License as published | ||
9 | * by the Free Software Foundation; either version 2.1 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This library is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
15 | * the GNU Lesser General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU Lesser General Public License | ||
18 | * along with this library; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | */ | ||
21 | #include "fscache.h" | ||
22 | #include "cifs_debug.h" | ||
23 | |||
24 | /* | ||
25 | * CIFS filesystem definition for FS-Cache | ||
26 | */ | ||
27 | struct fscache_netfs cifs_fscache_netfs = { | ||
28 | .name = "cifs", | ||
29 | .version = 0, | ||
30 | }; | ||
31 | |||
32 | /* | ||
33 | * Register CIFS for caching with FS-Cache | ||
34 | */ | ||
35 | int cifs_fscache_register(void) | ||
36 | { | ||
37 | return fscache_register_netfs(&cifs_fscache_netfs); | ||
38 | } | ||
39 | |||
40 | /* | ||
41 | * Unregister CIFS for caching | ||
42 | */ | ||
43 | void cifs_fscache_unregister(void) | ||
44 | { | ||
45 | fscache_unregister_netfs(&cifs_fscache_netfs); | ||
46 | } | ||
47 | |||
48 | /* | ||
49 | * Key layout of CIFS server cache index object | ||
50 | */ | ||
51 | struct cifs_server_key { | ||
52 | uint16_t family; /* address family */ | ||
53 | uint16_t port; /* IP port */ | ||
54 | union { | ||
55 | struct in_addr ipv4_addr; | ||
56 | struct in6_addr ipv6_addr; | ||
57 | } addr[0]; | ||
58 | }; | ||
59 | |||
60 | /* | ||
61 | * Server object keyed by {IPaddress,port,family} tuple | ||
62 | */ | ||
63 | static uint16_t cifs_server_get_key(const void *cookie_netfs_data, | ||
64 | void *buffer, uint16_t maxbuf) | ||
65 | { | ||
66 | const struct TCP_Server_Info *server = cookie_netfs_data; | ||
67 | const struct sockaddr *sa = (struct sockaddr *) &server->addr.sockAddr; | ||
68 | struct cifs_server_key *key = buffer; | ||
69 | uint16_t key_len = sizeof(struct cifs_server_key); | ||
70 | |||
71 | memset(key, 0, key_len); | ||
72 | |||
73 | /* | ||
74 | * Should not be a problem as sin_family/sin6_family overlays | ||
75 | * sa_family field | ||
76 | */ | ||
77 | switch (sa->sa_family) { | ||
78 | case AF_INET: | ||
79 | key->family = server->addr.sockAddr.sin_family; | ||
80 | key->port = server->addr.sockAddr.sin_port; | ||
81 | key->addr[0].ipv4_addr = server->addr.sockAddr.sin_addr; | ||
82 | key_len += sizeof(key->addr[0].ipv4_addr); | ||
83 | break; | ||
84 | |||
85 | case AF_INET6: | ||
86 | key->family = server->addr.sockAddr6.sin6_family; | ||
87 | key->port = server->addr.sockAddr6.sin6_port; | ||
88 | key->addr[0].ipv6_addr = server->addr.sockAddr6.sin6_addr; | ||
89 | key_len += sizeof(key->addr[0].ipv6_addr); | ||
90 | break; | ||
91 | |||
92 | default: | ||
93 | cERROR(1, "CIFS: Unknown network family '%d'", sa->sa_family); | ||
94 | key_len = 0; | ||
95 | break; | ||
96 | } | ||
97 | |||
98 | return key_len; | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | * Server object for FS-Cache | ||
103 | */ | ||
104 | const struct fscache_cookie_def cifs_fscache_server_index_def = { | ||
105 | .name = "CIFS.server", | ||
106 | .type = FSCACHE_COOKIE_TYPE_INDEX, | ||
107 | .get_key = cifs_server_get_key, | ||
108 | }; | ||
109 | |||
110 | /* | ||
111 | * Auxiliary data attached to CIFS superblock within the cache | ||
112 | */ | ||
113 | struct cifs_fscache_super_auxdata { | ||
114 | u64 resource_id; /* unique server resource id */ | ||
115 | }; | ||
116 | |||
117 | static char *extract_sharename(const char *treename) | ||
118 | { | ||
119 | const char *src; | ||
120 | char *delim, *dst; | ||
121 | int len; | ||
122 | |||
123 | /* skip double chars at the beginning */ | ||
124 | src = treename + 2; | ||
125 | |||
126 | /* share name is always preceded by '\\' now */ | ||
127 | delim = strchr(src, '\\'); | ||
128 | if (!delim) | ||
129 | return ERR_PTR(-EINVAL); | ||
130 | delim++; | ||
131 | len = strlen(delim); | ||
132 | |||
133 | /* caller has to free the memory */ | ||
134 | dst = kstrndup(delim, len, GFP_KERNEL); | ||
135 | if (!dst) | ||
136 | return ERR_PTR(-ENOMEM); | ||
137 | |||
138 | return dst; | ||
139 | } | ||
140 | |||
141 | /* | ||
142 | * Superblock object currently keyed by share name | ||
143 | */ | ||
144 | static uint16_t cifs_super_get_key(const void *cookie_netfs_data, void *buffer, | ||
145 | uint16_t maxbuf) | ||
146 | { | ||
147 | const struct cifsTconInfo *tcon = cookie_netfs_data; | ||
148 | char *sharename; | ||
149 | uint16_t len; | ||
150 | |||
151 | sharename = extract_sharename(tcon->treeName); | ||
152 | if (IS_ERR(sharename)) { | ||
153 | cFYI(1, "CIFS: couldn't extract sharename\n"); | ||
154 | sharename = NULL; | ||
155 | return 0; | ||
156 | } | ||
157 | |||
158 | len = strlen(sharename); | ||
159 | if (len > maxbuf) | ||
160 | return 0; | ||
161 | |||
162 | memcpy(buffer, sharename, len); | ||
163 | |||
164 | kfree(sharename); | ||
165 | |||
166 | return len; | ||
167 | } | ||
168 | |||
169 | static uint16_t | ||
170 | cifs_fscache_super_get_aux(const void *cookie_netfs_data, void *buffer, | ||
171 | uint16_t maxbuf) | ||
172 | { | ||
173 | struct cifs_fscache_super_auxdata auxdata; | ||
174 | const struct cifsTconInfo *tcon = cookie_netfs_data; | ||
175 | |||
176 | memset(&auxdata, 0, sizeof(auxdata)); | ||
177 | auxdata.resource_id = tcon->resource_id; | ||
178 | |||
179 | if (maxbuf > sizeof(auxdata)) | ||
180 | maxbuf = sizeof(auxdata); | ||
181 | |||
182 | memcpy(buffer, &auxdata, maxbuf); | ||
183 | |||
184 | return maxbuf; | ||
185 | } | ||
186 | |||
187 | static enum | ||
188 | fscache_checkaux cifs_fscache_super_check_aux(void *cookie_netfs_data, | ||
189 | const void *data, | ||
190 | uint16_t datalen) | ||
191 | { | ||
192 | struct cifs_fscache_super_auxdata auxdata; | ||
193 | const struct cifsTconInfo *tcon = cookie_netfs_data; | ||
194 | |||
195 | if (datalen != sizeof(auxdata)) | ||
196 | return FSCACHE_CHECKAUX_OBSOLETE; | ||
197 | |||
198 | memset(&auxdata, 0, sizeof(auxdata)); | ||
199 | auxdata.resource_id = tcon->resource_id; | ||
200 | |||
201 | if (memcmp(data, &auxdata, datalen) != 0) | ||
202 | return FSCACHE_CHECKAUX_OBSOLETE; | ||
203 | |||
204 | return FSCACHE_CHECKAUX_OKAY; | ||
205 | } | ||
206 | |||
207 | /* | ||
208 | * Superblock object for FS-Cache | ||
209 | */ | ||
210 | const struct fscache_cookie_def cifs_fscache_super_index_def = { | ||
211 | .name = "CIFS.super", | ||
212 | .type = FSCACHE_COOKIE_TYPE_INDEX, | ||
213 | .get_key = cifs_super_get_key, | ||
214 | .get_aux = cifs_fscache_super_get_aux, | ||
215 | .check_aux = cifs_fscache_super_check_aux, | ||
216 | }; | ||
217 | |||
218 | /* | ||
219 | * Auxiliary data attached to CIFS inode within the cache | ||
220 | */ | ||
221 | struct cifs_fscache_inode_auxdata { | ||
222 | struct timespec last_write_time; | ||
223 | struct timespec last_change_time; | ||
224 | u64 eof; | ||
225 | }; | ||
226 | |||
227 | static uint16_t cifs_fscache_inode_get_key(const void *cookie_netfs_data, | ||
228 | void *buffer, uint16_t maxbuf) | ||
229 | { | ||
230 | const struct cifsInodeInfo *cifsi = cookie_netfs_data; | ||
231 | uint16_t keylen; | ||
232 | |||
233 | /* use the UniqueId as the key */ | ||
234 | keylen = sizeof(cifsi->uniqueid); | ||
235 | if (keylen > maxbuf) | ||
236 | keylen = 0; | ||
237 | else | ||
238 | memcpy(buffer, &cifsi->uniqueid, keylen); | ||
239 | |||
240 | return keylen; | ||
241 | } | ||
242 | |||
243 | static void | ||
244 | cifs_fscache_inode_get_attr(const void *cookie_netfs_data, uint64_t *size) | ||
245 | { | ||
246 | const struct cifsInodeInfo *cifsi = cookie_netfs_data; | ||
247 | |||
248 | *size = cifsi->vfs_inode.i_size; | ||
249 | } | ||
250 | |||
251 | static uint16_t | ||
252 | cifs_fscache_inode_get_aux(const void *cookie_netfs_data, void *buffer, | ||
253 | uint16_t maxbuf) | ||
254 | { | ||
255 | struct cifs_fscache_inode_auxdata auxdata; | ||
256 | const struct cifsInodeInfo *cifsi = cookie_netfs_data; | ||
257 | |||
258 | memset(&auxdata, 0, sizeof(auxdata)); | ||
259 | auxdata.eof = cifsi->server_eof; | ||
260 | auxdata.last_write_time = cifsi->vfs_inode.i_mtime; | ||
261 | auxdata.last_change_time = cifsi->vfs_inode.i_ctime; | ||
262 | |||
263 | if (maxbuf > sizeof(auxdata)) | ||
264 | maxbuf = sizeof(auxdata); | ||
265 | |||
266 | memcpy(buffer, &auxdata, maxbuf); | ||
267 | |||
268 | return maxbuf; | ||
269 | } | ||
270 | |||
271 | static enum | ||
272 | fscache_checkaux cifs_fscache_inode_check_aux(void *cookie_netfs_data, | ||
273 | const void *data, | ||
274 | uint16_t datalen) | ||
275 | { | ||
276 | struct cifs_fscache_inode_auxdata auxdata; | ||
277 | struct cifsInodeInfo *cifsi = cookie_netfs_data; | ||
278 | |||
279 | if (datalen != sizeof(auxdata)) | ||
280 | return FSCACHE_CHECKAUX_OBSOLETE; | ||
281 | |||
282 | memset(&auxdata, 0, sizeof(auxdata)); | ||
283 | auxdata.eof = cifsi->server_eof; | ||
284 | auxdata.last_write_time = cifsi->vfs_inode.i_mtime; | ||
285 | auxdata.last_change_time = cifsi->vfs_inode.i_ctime; | ||
286 | |||
287 | if (memcmp(data, &auxdata, datalen) != 0) | ||
288 | return FSCACHE_CHECKAUX_OBSOLETE; | ||
289 | |||
290 | return FSCACHE_CHECKAUX_OKAY; | ||
291 | } | ||
292 | |||
293 | static void cifs_fscache_inode_now_uncached(void *cookie_netfs_data) | ||
294 | { | ||
295 | struct cifsInodeInfo *cifsi = cookie_netfs_data; | ||
296 | struct pagevec pvec; | ||
297 | pgoff_t first; | ||
298 | int loop, nr_pages; | ||
299 | |||
300 | pagevec_init(&pvec, 0); | ||
301 | first = 0; | ||
302 | |||
303 | cFYI(1, "cifs inode 0x%p now uncached", cifsi); | ||
304 | |||
305 | for (;;) { | ||
306 | nr_pages = pagevec_lookup(&pvec, | ||
307 | cifsi->vfs_inode.i_mapping, first, | ||
308 | PAGEVEC_SIZE - pagevec_count(&pvec)); | ||
309 | if (!nr_pages) | ||
310 | break; | ||
311 | |||
312 | for (loop = 0; loop < nr_pages; loop++) | ||
313 | ClearPageFsCache(pvec.pages[loop]); | ||
314 | |||
315 | first = pvec.pages[nr_pages - 1]->index + 1; | ||
316 | |||
317 | pvec.nr = nr_pages; | ||
318 | pagevec_release(&pvec); | ||
319 | cond_resched(); | ||
320 | } | ||
321 | } | ||
322 | |||
323 | const struct fscache_cookie_def cifs_fscache_inode_object_def = { | ||
324 | .name = "CIFS.uniqueid", | ||
325 | .type = FSCACHE_COOKIE_TYPE_DATAFILE, | ||
326 | .get_key = cifs_fscache_inode_get_key, | ||
327 | .get_attr = cifs_fscache_inode_get_attr, | ||
328 | .get_aux = cifs_fscache_inode_get_aux, | ||
329 | .check_aux = cifs_fscache_inode_check_aux, | ||
330 | .now_uncached = cifs_fscache_inode_now_uncached, | ||
331 | }; | ||
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index ac19a6f3dae0..dc1ed50ea06e 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c | |||
@@ -230,28 +230,22 @@ compose_mount_options_err: | |||
230 | goto compose_mount_options_out; | 230 | goto compose_mount_options_out; |
231 | } | 231 | } |
232 | 232 | ||
233 | 233 | /** | |
234 | static struct vfsmount *cifs_dfs_do_refmount(const struct vfsmount *mnt_parent, | 234 | * cifs_dfs_do_refmount - mounts specified path using provided refferal |
235 | struct dentry *dentry, const struct dfs_info3_param *ref) | 235 | * @cifs_sb: parent/root superblock |
236 | * @fullpath: full path in UNC format | ||
237 | * @ref: server's referral | ||
238 | */ | ||
239 | static struct vfsmount *cifs_dfs_do_refmount(struct cifs_sb_info *cifs_sb, | ||
240 | const char *fullpath, const struct dfs_info3_param *ref) | ||
236 | { | 241 | { |
237 | struct cifs_sb_info *cifs_sb; | ||
238 | struct vfsmount *mnt; | 242 | struct vfsmount *mnt; |
239 | char *mountdata; | 243 | char *mountdata; |
240 | char *devname = NULL; | 244 | char *devname = NULL; |
241 | char *fullpath; | ||
242 | |||
243 | cifs_sb = CIFS_SB(dentry->d_inode->i_sb); | ||
244 | /* | ||
245 | * this function gives us a path with a double backslash prefix. We | ||
246 | * require a single backslash for DFS. | ||
247 | */ | ||
248 | fullpath = build_path_from_dentry(dentry); | ||
249 | if (!fullpath) | ||
250 | return ERR_PTR(-ENOMEM); | ||
251 | 245 | ||
246 | /* strip first '\' from fullpath */ | ||
252 | mountdata = cifs_compose_mount_options(cifs_sb->mountdata, | 247 | mountdata = cifs_compose_mount_options(cifs_sb->mountdata, |
253 | fullpath + 1, ref, &devname); | 248 | fullpath + 1, ref, &devname); |
254 | kfree(fullpath); | ||
255 | 249 | ||
256 | if (IS_ERR(mountdata)) | 250 | if (IS_ERR(mountdata)) |
257 | return (struct vfsmount *)mountdata; | 251 | return (struct vfsmount *)mountdata; |
@@ -357,8 +351,8 @@ cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd) | |||
357 | rc = -EINVAL; | 351 | rc = -EINVAL; |
358 | goto out_err; | 352 | goto out_err; |
359 | } | 353 | } |
360 | mnt = cifs_dfs_do_refmount(nd->path.mnt, | 354 | mnt = cifs_dfs_do_refmount(cifs_sb, |
361 | nd->path.dentry, referrals + i); | 355 | full_path, referrals + i); |
362 | cFYI(1, "%s: cifs_dfs_do_refmount:%s , mnt:%p", __func__, | 356 | cFYI(1, "%s: cifs_dfs_do_refmount:%s , mnt:%p", __func__, |
363 | referrals[i].node_name, mnt); | 357 | referrals[i].node_name, mnt); |
364 | 358 | ||
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h index 246a167cb913..9e771450c3b8 100644 --- a/fs/cifs/cifs_fs_sb.h +++ b/fs/cifs/cifs_fs_sb.h | |||
@@ -35,6 +35,7 @@ | |||
35 | #define CIFS_MOUNT_DYNPERM 0x1000 /* allow in-memory only mode setting */ | 35 | #define CIFS_MOUNT_DYNPERM 0x1000 /* allow in-memory only mode setting */ |
36 | #define CIFS_MOUNT_NOPOSIXBRL 0x2000 /* mandatory not posix byte range lock */ | 36 | #define CIFS_MOUNT_NOPOSIXBRL 0x2000 /* mandatory not posix byte range lock */ |
37 | #define CIFS_MOUNT_NOSSYNC 0x4000 /* don't do slow SMBflush on every sync*/ | 37 | #define CIFS_MOUNT_NOSSYNC 0x4000 /* don't do slow SMBflush on every sync*/ |
38 | #define CIFS_MOUNT_FSCACHE 0x8000 /* local caching enabled */ | ||
38 | 39 | ||
39 | struct cifs_sb_info { | 40 | struct cifs_sb_info { |
40 | struct cifsTconInfo *tcon; /* primary mount */ | 41 | struct cifsTconInfo *tcon; /* primary mount */ |
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c index 379bd7d9c05f..6effccff85a5 100644 --- a/fs/cifs/cifs_spnego.c +++ b/fs/cifs/cifs_spnego.c | |||
@@ -144,6 +144,9 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo) | |||
144 | sprintf(dp, ";uid=0x%x", sesInfo->linux_uid); | 144 | sprintf(dp, ";uid=0x%x", sesInfo->linux_uid); |
145 | 145 | ||
146 | dp = description + strlen(description); | 146 | dp = description + strlen(description); |
147 | sprintf(dp, ";creduid=0x%x", sesInfo->cred_uid); | ||
148 | |||
149 | dp = description + strlen(description); | ||
147 | sprintf(dp, ";user=%s", sesInfo->userName); | 150 | sprintf(dp, ";user=%s", sesInfo->userName); |
148 | 151 | ||
149 | dp = description + strlen(description); | 152 | dp = description + strlen(description); |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 78c02eb4cb1f..8a2cf129e535 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <linux/key-type.h> | 47 | #include <linux/key-type.h> |
48 | #include "dns_resolve.h" | 48 | #include "dns_resolve.h" |
49 | #include "cifs_spnego.h" | 49 | #include "cifs_spnego.h" |
50 | #include "fscache.h" | ||
50 | #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */ | 51 | #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */ |
51 | 52 | ||
52 | int cifsFYI = 0; | 53 | int cifsFYI = 0; |
@@ -329,6 +330,12 @@ cifs_destroy_inode(struct inode *inode) | |||
329 | } | 330 | } |
330 | 331 | ||
331 | static void | 332 | static void |
333 | cifs_clear_inode(struct inode *inode) | ||
334 | { | ||
335 | cifs_fscache_release_inode_cookie(inode); | ||
336 | } | ||
337 | |||
338 | static void | ||
332 | cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server) | 339 | cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server) |
333 | { | 340 | { |
334 | seq_printf(s, ",addr="); | 341 | seq_printf(s, ",addr="); |
@@ -473,14 +480,25 @@ static int cifs_remount(struct super_block *sb, int *flags, char *data) | |||
473 | return 0; | 480 | return 0; |
474 | } | 481 | } |
475 | 482 | ||
483 | void cifs_drop_inode(struct inode *inode) | ||
484 | { | ||
485 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); | ||
486 | |||
487 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) | ||
488 | return generic_drop_inode(inode); | ||
489 | |||
490 | return generic_delete_inode(inode); | ||
491 | } | ||
492 | |||
476 | static const struct super_operations cifs_super_ops = { | 493 | static const struct super_operations cifs_super_ops = { |
477 | .put_super = cifs_put_super, | 494 | .put_super = cifs_put_super, |
478 | .statfs = cifs_statfs, | 495 | .statfs = cifs_statfs, |
479 | .alloc_inode = cifs_alloc_inode, | 496 | .alloc_inode = cifs_alloc_inode, |
480 | .destroy_inode = cifs_destroy_inode, | 497 | .destroy_inode = cifs_destroy_inode, |
481 | /* .drop_inode = generic_delete_inode, | 498 | .drop_inode = cifs_drop_inode, |
482 | .delete_inode = cifs_delete_inode, */ /* Do not need above two | 499 | .clear_inode = cifs_clear_inode, |
483 | functions unless later we add lazy close of inodes or unless the | 500 | /* .delete_inode = cifs_delete_inode, */ /* Do not need above |
501 | function unless later we add lazy close of inodes or unless the | ||
484 | kernel forgets to call us with the same number of releases (closes) | 502 | kernel forgets to call us with the same number of releases (closes) |
485 | as opens */ | 503 | as opens */ |
486 | .show_options = cifs_show_options, | 504 | .show_options = cifs_show_options, |
@@ -892,6 +910,10 @@ init_cifs(void) | |||
892 | cFYI(1, "cifs_max_pending set to max of 256"); | 910 | cFYI(1, "cifs_max_pending set to max of 256"); |
893 | } | 911 | } |
894 | 912 | ||
913 | rc = cifs_fscache_register(); | ||
914 | if (rc) | ||
915 | goto out; | ||
916 | |||
895 | rc = cifs_init_inodecache(); | 917 | rc = cifs_init_inodecache(); |
896 | if (rc) | 918 | if (rc) |
897 | goto out_clean_proc; | 919 | goto out_clean_proc; |
@@ -913,7 +935,7 @@ init_cifs(void) | |||
913 | goto out_unregister_filesystem; | 935 | goto out_unregister_filesystem; |
914 | #endif | 936 | #endif |
915 | #ifdef CONFIG_CIFS_DFS_UPCALL | 937 | #ifdef CONFIG_CIFS_DFS_UPCALL |
916 | rc = register_key_type(&key_type_dns_resolver); | 938 | rc = cifs_init_dns_resolver(); |
917 | if (rc) | 939 | if (rc) |
918 | goto out_unregister_key_type; | 940 | goto out_unregister_key_type; |
919 | #endif | 941 | #endif |
@@ -925,7 +947,7 @@ init_cifs(void) | |||
925 | 947 | ||
926 | out_unregister_resolver_key: | 948 | out_unregister_resolver_key: |
927 | #ifdef CONFIG_CIFS_DFS_UPCALL | 949 | #ifdef CONFIG_CIFS_DFS_UPCALL |
928 | unregister_key_type(&key_type_dns_resolver); | 950 | cifs_exit_dns_resolver(); |
929 | out_unregister_key_type: | 951 | out_unregister_key_type: |
930 | #endif | 952 | #endif |
931 | #ifdef CONFIG_CIFS_UPCALL | 953 | #ifdef CONFIG_CIFS_UPCALL |
@@ -941,6 +963,8 @@ init_cifs(void) | |||
941 | cifs_destroy_inodecache(); | 963 | cifs_destroy_inodecache(); |
942 | out_clean_proc: | 964 | out_clean_proc: |
943 | cifs_proc_clean(); | 965 | cifs_proc_clean(); |
966 | cifs_fscache_unregister(); | ||
967 | out: | ||
944 | return rc; | 968 | return rc; |
945 | } | 969 | } |
946 | 970 | ||
@@ -949,9 +973,10 @@ exit_cifs(void) | |||
949 | { | 973 | { |
950 | cFYI(DBG2, "exit_cifs"); | 974 | cFYI(DBG2, "exit_cifs"); |
951 | cifs_proc_clean(); | 975 | cifs_proc_clean(); |
976 | cifs_fscache_unregister(); | ||
952 | #ifdef CONFIG_CIFS_DFS_UPCALL | 977 | #ifdef CONFIG_CIFS_DFS_UPCALL |
953 | cifs_dfs_release_automount_timer(); | 978 | cifs_dfs_release_automount_timer(); |
954 | unregister_key_type(&key_type_dns_resolver); | 979 | cifs_exit_dns_resolver(); |
955 | #endif | 980 | #endif |
956 | #ifdef CONFIG_CIFS_UPCALL | 981 | #ifdef CONFIG_CIFS_UPCALL |
957 | unregister_key_type(&cifs_spnego_key_type); | 982 | unregister_key_type(&cifs_spnego_key_type); |
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index a7eb65c84b1c..d82f5fb4761e 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h | |||
@@ -114,5 +114,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); | |||
114 | extern const struct export_operations cifs_export_ops; | 114 | extern const struct export_operations cifs_export_ops; |
115 | #endif /* EXPERIMENTAL */ | 115 | #endif /* EXPERIMENTAL */ |
116 | 116 | ||
117 | #define CIFS_VERSION "1.64" | 117 | #define CIFS_VERSION "1.65" |
118 | #endif /* _CIFSFS_H */ | 118 | #endif /* _CIFSFS_H */ |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index a88479ceaad5..59906146ad36 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
@@ -16,6 +16,9 @@ | |||
16 | * the GNU Lesser General Public License for more details. | 16 | * the GNU Lesser General Public License for more details. |
17 | * | 17 | * |
18 | */ | 18 | */ |
19 | #ifndef _CIFS_GLOB_H | ||
20 | #define _CIFS_GLOB_H | ||
21 | |||
19 | #include <linux/in.h> | 22 | #include <linux/in.h> |
20 | #include <linux/in6.h> | 23 | #include <linux/in6.h> |
21 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
@@ -34,7 +37,7 @@ | |||
34 | #define MAX_SHARE_SIZE 64 /* used to be 20, this should still be enough */ | 37 | #define MAX_SHARE_SIZE 64 /* used to be 20, this should still be enough */ |
35 | #define MAX_USERNAME_SIZE 32 /* 32 is to allow for 15 char names + null | 38 | #define MAX_USERNAME_SIZE 32 /* 32 is to allow for 15 char names + null |
36 | termination then *2 for unicode versions */ | 39 | termination then *2 for unicode versions */ |
37 | #define MAX_PASSWORD_SIZE 16 | 40 | #define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */ |
38 | 41 | ||
39 | #define CIFS_MIN_RCV_POOL 4 | 42 | #define CIFS_MIN_RCV_POOL 4 |
40 | 43 | ||
@@ -80,8 +83,7 @@ enum statusEnum { | |||
80 | }; | 83 | }; |
81 | 84 | ||
82 | enum securityEnum { | 85 | enum securityEnum { |
83 | PLAINTXT = 0, /* Legacy with Plaintext passwords */ | 86 | LANMAN = 0, /* Legacy LANMAN auth */ |
84 | LANMAN, /* Legacy LANMAN auth */ | ||
85 | NTLM, /* Legacy NTLM012 auth with NTLM hash */ | 87 | NTLM, /* Legacy NTLM012 auth with NTLM hash */ |
86 | NTLMv2, /* Legacy NTLM auth with NTLMv2 hash */ | 88 | NTLMv2, /* Legacy NTLM auth with NTLMv2 hash */ |
87 | RawNTLMSSP, /* NTLMSSP without SPNEGO, NTLMv2 hash */ | 89 | RawNTLMSSP, /* NTLMSSP without SPNEGO, NTLMv2 hash */ |
@@ -142,7 +144,6 @@ struct TCP_Server_Info { | |||
142 | struct list_head pending_mid_q; | 144 | struct list_head pending_mid_q; |
143 | void *Server_NlsInfo; /* BB - placeholder for future NLS info */ | 145 | void *Server_NlsInfo; /* BB - placeholder for future NLS info */ |
144 | unsigned short server_codepage; /* codepage for the server */ | 146 | unsigned short server_codepage; /* codepage for the server */ |
145 | unsigned long ip_address; /* IP addr for the server if known */ | ||
146 | enum protocolEnum protocolType; | 147 | enum protocolEnum protocolType; |
147 | char versionMajor; | 148 | char versionMajor; |
148 | char versionMinor; | 149 | char versionMinor; |
@@ -190,19 +191,9 @@ struct TCP_Server_Info { | |||
190 | bool sec_mskerberos; /* supports legacy MS Kerberos */ | 191 | bool sec_mskerberos; /* supports legacy MS Kerberos */ |
191 | bool sec_kerberosu2u; /* supports U2U Kerberos */ | 192 | bool sec_kerberosu2u; /* supports U2U Kerberos */ |
192 | bool sec_ntlmssp; /* supports NTLMSSP */ | 193 | bool sec_ntlmssp; /* supports NTLMSSP */ |
193 | }; | 194 | #ifdef CONFIG_CIFS_FSCACHE |
194 | 195 | struct fscache_cookie *fscache; /* client index cache cookie */ | |
195 | /* | 196 | #endif |
196 | * The following is our shortcut to user information. We surface the uid, | ||
197 | * and name. We always get the password on the fly in case it | ||
198 | * has changed. We also hang a list of sessions owned by this user off here. | ||
199 | */ | ||
200 | struct cifsUidInfo { | ||
201 | struct list_head userList; | ||
202 | struct list_head sessionList; /* SMB sessions for this user */ | ||
203 | uid_t linux_uid; | ||
204 | char user[MAX_USERNAME_SIZE + 1]; /* ascii name of user */ | ||
205 | /* BB may need ptr or callback for PAM or WinBind info */ | ||
206 | }; | 197 | }; |
207 | 198 | ||
208 | /* | 199 | /* |
@@ -212,9 +203,6 @@ struct cifsSesInfo { | |||
212 | struct list_head smb_ses_list; | 203 | struct list_head smb_ses_list; |
213 | struct list_head tcon_list; | 204 | struct list_head tcon_list; |
214 | struct mutex session_mutex; | 205 | struct mutex session_mutex; |
215 | #if 0 | ||
216 | struct cifsUidInfo *uidInfo; /* pointer to user info */ | ||
217 | #endif | ||
218 | struct TCP_Server_Info *server; /* pointer to server info */ | 206 | struct TCP_Server_Info *server; /* pointer to server info */ |
219 | int ses_count; /* reference counter */ | 207 | int ses_count; /* reference counter */ |
220 | enum statusEnum status; | 208 | enum statusEnum status; |
@@ -226,7 +214,8 @@ struct cifsSesInfo { | |||
226 | char *serverNOS; /* name of network operating system of server */ | 214 | char *serverNOS; /* name of network operating system of server */ |
227 | char *serverDomain; /* security realm of server */ | 215 | char *serverDomain; /* security realm of server */ |
228 | int Suid; /* remote smb uid */ | 216 | int Suid; /* remote smb uid */ |
229 | uid_t linux_uid; /* local Linux uid */ | 217 | uid_t linux_uid; /* overriding owner of files on the mount */ |
218 | uid_t cred_uid; /* owner of credentials */ | ||
230 | int capabilities; | 219 | int capabilities; |
231 | char serverName[SERVER_NAME_LEN_WITH_NULL * 2]; /* BB make bigger for | 220 | char serverName[SERVER_NAME_LEN_WITH_NULL * 2]; /* BB make bigger for |
232 | TCP names - will ipv6 and sctp addresses fit? */ | 221 | TCP names - will ipv6 and sctp addresses fit? */ |
@@ -311,6 +300,10 @@ struct cifsTconInfo { | |||
311 | bool local_lease:1; /* check leases (only) on local system not remote */ | 300 | bool local_lease:1; /* check leases (only) on local system not remote */ |
312 | bool broken_posix_open; /* e.g. Samba server versions < 3.3.2, 3.2.9 */ | 301 | bool broken_posix_open; /* e.g. Samba server versions < 3.3.2, 3.2.9 */ |
313 | bool need_reconnect:1; /* connection reset, tid now invalid */ | 302 | bool need_reconnect:1; /* connection reset, tid now invalid */ |
303 | #ifdef CONFIG_CIFS_FSCACHE | ||
304 | u64 resource_id; /* server resource id */ | ||
305 | struct fscache_cookie *fscache; /* cookie for share */ | ||
306 | #endif | ||
314 | /* BB add field for back pointer to sb struct(s)? */ | 307 | /* BB add field for back pointer to sb struct(s)? */ |
315 | }; | 308 | }; |
316 | 309 | ||
@@ -398,6 +391,9 @@ struct cifsInodeInfo { | |||
398 | bool invalid_mapping:1; /* pagecache is invalid */ | 391 | bool invalid_mapping:1; /* pagecache is invalid */ |
399 | u64 server_eof; /* current file size on server */ | 392 | u64 server_eof; /* current file size on server */ |
400 | u64 uniqueid; /* server inode number */ | 393 | u64 uniqueid; /* server inode number */ |
394 | #ifdef CONFIG_CIFS_FSCACHE | ||
395 | struct fscache_cookie *fscache; | ||
396 | #endif | ||
401 | struct inode vfs_inode; | 397 | struct inode vfs_inode; |
402 | }; | 398 | }; |
403 | 399 | ||
@@ -733,3 +729,5 @@ GLOBAL_EXTERN unsigned int cifs_min_small; /* min size of small buf pool */ | |||
733 | GLOBAL_EXTERN unsigned int cifs_max_pending; /* MAX requests at once to server*/ | 729 | GLOBAL_EXTERN unsigned int cifs_max_pending; /* MAX requests at once to server*/ |
734 | 730 | ||
735 | extern const struct slow_work_ops cifs_oplock_break_ops; | 731 | extern const struct slow_work_ops cifs_oplock_break_ops; |
732 | |||
733 | #endif /* _CIFS_GLOB_H */ | ||
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index fb1657e0fdb8..2eaebbd31132 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h | |||
@@ -86,7 +86,9 @@ extern unsigned int smbCalcSize(struct smb_hdr *ptr); | |||
86 | extern unsigned int smbCalcSize_LE(struct smb_hdr *ptr); | 86 | extern unsigned int smbCalcSize_LE(struct smb_hdr *ptr); |
87 | extern int decode_negTokenInit(unsigned char *security_blob, int length, | 87 | extern int decode_negTokenInit(unsigned char *security_blob, int length, |
88 | struct TCP_Server_Info *server); | 88 | struct TCP_Server_Info *server); |
89 | extern int cifs_convert_address(char *src, void *dst); | 89 | extern int cifs_convert_address(struct sockaddr *dst, char *src); |
90 | extern int cifs_fill_sockaddr(struct sockaddr *dst, char *src, | ||
91 | unsigned short int port); | ||
90 | extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr); | 92 | extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr); |
91 | extern void header_assemble(struct smb_hdr *, char /* command */ , | 93 | extern void header_assemble(struct smb_hdr *, char /* command */ , |
92 | const struct cifsTconInfo *, int /* length of | 94 | const struct cifsTconInfo *, int /* length of |
@@ -106,7 +108,6 @@ extern struct cifsFileInfo *cifs_new_fileinfo(struct inode *newinode, | |||
106 | __u16 fileHandle, struct file *file, | 108 | __u16 fileHandle, struct file *file, |
107 | struct vfsmount *mnt, unsigned int oflags); | 109 | struct vfsmount *mnt, unsigned int oflags); |
108 | extern int cifs_posix_open(char *full_path, struct inode **pinode, | 110 | extern int cifs_posix_open(char *full_path, struct inode **pinode, |
109 | struct vfsmount *mnt, | ||
110 | struct super_block *sb, | 111 | struct super_block *sb, |
111 | int mode, int oflags, | 112 | int mode, int oflags, |
112 | __u32 *poplock, __u16 *pnetfid, int xid); | 113 | __u32 *poplock, __u16 *pnetfid, int xid); |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 2208f06e4c45..2a43a0aca965 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include "nterr.h" | 48 | #include "nterr.h" |
49 | #include "rfc1002pdu.h" | 49 | #include "rfc1002pdu.h" |
50 | #include "cn_cifs.h" | 50 | #include "cn_cifs.h" |
51 | #include "fscache.h" | ||
51 | 52 | ||
52 | #define CIFS_PORT 445 | 53 | #define CIFS_PORT 445 |
53 | #define RFC1001_PORT 139 | 54 | #define RFC1001_PORT 139 |
@@ -66,6 +67,7 @@ struct smb_vol { | |||
66 | char *iocharset; /* local code page for mapping to and from Unicode */ | 67 | char *iocharset; /* local code page for mapping to and from Unicode */ |
67 | char source_rfc1001_name[16]; /* netbios name of client */ | 68 | char source_rfc1001_name[16]; /* netbios name of client */ |
68 | char target_rfc1001_name[16]; /* netbios name of server for Win9x/ME */ | 69 | char target_rfc1001_name[16]; /* netbios name of server for Win9x/ME */ |
70 | uid_t cred_uid; | ||
69 | uid_t linux_uid; | 71 | uid_t linux_uid; |
70 | gid_t linux_gid; | 72 | gid_t linux_gid; |
71 | mode_t file_mode; | 73 | mode_t file_mode; |
@@ -97,6 +99,7 @@ struct smb_vol { | |||
97 | bool noblocksnd:1; | 99 | bool noblocksnd:1; |
98 | bool noautotune:1; | 100 | bool noautotune:1; |
99 | bool nostrictsync:1; /* do not force expensive SMBflush on every sync */ | 101 | bool nostrictsync:1; /* do not force expensive SMBflush on every sync */ |
102 | bool fsc:1; /* enable fscache */ | ||
100 | unsigned int rsize; | 103 | unsigned int rsize; |
101 | unsigned int wsize; | 104 | unsigned int wsize; |
102 | bool sockopt_tcp_nodelay:1; | 105 | bool sockopt_tcp_nodelay:1; |
@@ -830,7 +833,8 @@ cifs_parse_mount_options(char *options, const char *devname, | |||
830 | /* null target name indicates to use *SMBSERVR default called name | 833 | /* null target name indicates to use *SMBSERVR default called name |
831 | if we end up sending RFC1001 session initialize */ | 834 | if we end up sending RFC1001 session initialize */ |
832 | vol->target_rfc1001_name[0] = 0; | 835 | vol->target_rfc1001_name[0] = 0; |
833 | vol->linux_uid = current_uid(); /* use current_euid() instead? */ | 836 | vol->cred_uid = current_uid(); |
837 | vol->linux_uid = current_uid(); | ||
834 | vol->linux_gid = current_gid(); | 838 | vol->linux_gid = current_gid(); |
835 | 839 | ||
836 | /* default to only allowing write access to owner of the mount */ | 840 | /* default to only allowing write access to owner of the mount */ |
@@ -1257,6 +1261,12 @@ cifs_parse_mount_options(char *options, const char *devname, | |||
1257 | } else if ((strnicmp(data, "nocase", 6) == 0) || | 1261 | } else if ((strnicmp(data, "nocase", 6) == 0) || |
1258 | (strnicmp(data, "ignorecase", 10) == 0)) { | 1262 | (strnicmp(data, "ignorecase", 10) == 0)) { |
1259 | vol->nocase = 1; | 1263 | vol->nocase = 1; |
1264 | } else if (strnicmp(data, "mand", 4) == 0) { | ||
1265 | /* ignore */ | ||
1266 | } else if (strnicmp(data, "nomand", 6) == 0) { | ||
1267 | /* ignore */ | ||
1268 | } else if (strnicmp(data, "_netdev", 7) == 0) { | ||
1269 | /* ignore */ | ||
1260 | } else if (strnicmp(data, "brl", 3) == 0) { | 1270 | } else if (strnicmp(data, "brl", 3) == 0) { |
1261 | vol->nobrl = 0; | 1271 | vol->nobrl = 0; |
1262 | } else if ((strnicmp(data, "nobrl", 5) == 0) || | 1272 | } else if ((strnicmp(data, "nobrl", 5) == 0) || |
@@ -1331,6 +1341,8 @@ cifs_parse_mount_options(char *options, const char *devname, | |||
1331 | printk(KERN_WARNING "CIFS: Mount option noac not " | 1341 | printk(KERN_WARNING "CIFS: Mount option noac not " |
1332 | "supported. Instead set " | 1342 | "supported. Instead set " |
1333 | "/proc/fs/cifs/LookupCacheEnabled to 0\n"); | 1343 | "/proc/fs/cifs/LookupCacheEnabled to 0\n"); |
1344 | } else if (strnicmp(data, "fsc", 3) == 0) { | ||
1345 | vol->fsc = true; | ||
1334 | } else | 1346 | } else |
1335 | printk(KERN_WARNING "CIFS: Unknown mount option %s\n", | 1347 | printk(KERN_WARNING "CIFS: Unknown mount option %s\n", |
1336 | data); | 1348 | data); |
@@ -1380,18 +1392,92 @@ cifs_parse_mount_options(char *options, const char *devname, | |||
1380 | return 0; | 1392 | return 0; |
1381 | } | 1393 | } |
1382 | 1394 | ||
1395 | static bool | ||
1396 | match_address(struct TCP_Server_Info *server, struct sockaddr *addr) | ||
1397 | { | ||
1398 | struct sockaddr_in *addr4 = (struct sockaddr_in *)addr; | ||
1399 | struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)addr; | ||
1400 | |||
1401 | switch (addr->sa_family) { | ||
1402 | case AF_INET: | ||
1403 | if (addr4->sin_addr.s_addr != | ||
1404 | server->addr.sockAddr.sin_addr.s_addr) | ||
1405 | return false; | ||
1406 | if (addr4->sin_port && | ||
1407 | addr4->sin_port != server->addr.sockAddr.sin_port) | ||
1408 | return false; | ||
1409 | break; | ||
1410 | case AF_INET6: | ||
1411 | if (!ipv6_addr_equal(&addr6->sin6_addr, | ||
1412 | &server->addr.sockAddr6.sin6_addr)) | ||
1413 | return false; | ||
1414 | if (addr6->sin6_scope_id != | ||
1415 | server->addr.sockAddr6.sin6_scope_id) | ||
1416 | return false; | ||
1417 | if (addr6->sin6_port && | ||
1418 | addr6->sin6_port != server->addr.sockAddr6.sin6_port) | ||
1419 | return false; | ||
1420 | break; | ||
1421 | } | ||
1422 | |||
1423 | return true; | ||
1424 | } | ||
1425 | |||
1426 | static bool | ||
1427 | match_security(struct TCP_Server_Info *server, struct smb_vol *vol) | ||
1428 | { | ||
1429 | unsigned int secFlags; | ||
1430 | |||
1431 | if (vol->secFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL))) | ||
1432 | secFlags = vol->secFlg; | ||
1433 | else | ||
1434 | secFlags = global_secflags | vol->secFlg; | ||
1435 | |||
1436 | switch (server->secType) { | ||
1437 | case LANMAN: | ||
1438 | if (!(secFlags & (CIFSSEC_MAY_LANMAN|CIFSSEC_MAY_PLNTXT))) | ||
1439 | return false; | ||
1440 | break; | ||
1441 | case NTLMv2: | ||
1442 | if (!(secFlags & CIFSSEC_MAY_NTLMV2)) | ||
1443 | return false; | ||
1444 | break; | ||
1445 | case NTLM: | ||
1446 | if (!(secFlags & CIFSSEC_MAY_NTLM)) | ||
1447 | return false; | ||
1448 | break; | ||
1449 | case Kerberos: | ||
1450 | if (!(secFlags & CIFSSEC_MAY_KRB5)) | ||
1451 | return false; | ||
1452 | break; | ||
1453 | case RawNTLMSSP: | ||
1454 | if (!(secFlags & CIFSSEC_MAY_NTLMSSP)) | ||
1455 | return false; | ||
1456 | break; | ||
1457 | default: | ||
1458 | /* shouldn't happen */ | ||
1459 | return false; | ||
1460 | } | ||
1461 | |||
1462 | /* now check if signing mode is acceptible */ | ||
1463 | if ((secFlags & CIFSSEC_MAY_SIGN) == 0 && | ||
1464 | (server->secMode & SECMODE_SIGN_REQUIRED)) | ||
1465 | return false; | ||
1466 | else if (((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) && | ||
1467 | (server->secMode & | ||
1468 | (SECMODE_SIGN_ENABLED|SECMODE_SIGN_REQUIRED)) == 0) | ||
1469 | return false; | ||
1470 | |||
1471 | return true; | ||
1472 | } | ||
1473 | |||
1383 | static struct TCP_Server_Info * | 1474 | static struct TCP_Server_Info * |
1384 | cifs_find_tcp_session(struct sockaddr_storage *addr, unsigned short int port) | 1475 | cifs_find_tcp_session(struct sockaddr *addr, struct smb_vol *vol) |
1385 | { | 1476 | { |
1386 | struct list_head *tmp; | ||
1387 | struct TCP_Server_Info *server; | 1477 | struct TCP_Server_Info *server; |
1388 | struct sockaddr_in *addr4 = (struct sockaddr_in *) addr; | ||
1389 | struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *) addr; | ||
1390 | 1478 | ||
1391 | write_lock(&cifs_tcp_ses_lock); | 1479 | write_lock(&cifs_tcp_ses_lock); |
1392 | list_for_each(tmp, &cifs_tcp_ses_list) { | 1480 | list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { |
1393 | server = list_entry(tmp, struct TCP_Server_Info, | ||
1394 | tcp_ses_list); | ||
1395 | /* | 1481 | /* |
1396 | * the demux thread can exit on its own while still in CifsNew | 1482 | * the demux thread can exit on its own while still in CifsNew |
1397 | * so don't accept any sockets in that state. Since the | 1483 | * so don't accept any sockets in that state. Since the |
@@ -1401,37 +1487,11 @@ cifs_find_tcp_session(struct sockaddr_storage *addr, unsigned short int port) | |||
1401 | if (server->tcpStatus == CifsNew) | 1487 | if (server->tcpStatus == CifsNew) |
1402 | continue; | 1488 | continue; |
1403 | 1489 | ||
1404 | switch (addr->ss_family) { | 1490 | if (!match_address(server, addr)) |
1405 | case AF_INET: | 1491 | continue; |
1406 | if (addr4->sin_addr.s_addr == | ||
1407 | server->addr.sockAddr.sin_addr.s_addr) { | ||
1408 | addr4->sin_port = htons(port); | ||
1409 | /* user overrode default port? */ | ||
1410 | if (addr4->sin_port) { | ||
1411 | if (addr4->sin_port != | ||
1412 | server->addr.sockAddr.sin_port) | ||
1413 | continue; | ||
1414 | } | ||
1415 | break; | ||
1416 | } else | ||
1417 | continue; | ||
1418 | 1492 | ||
1419 | case AF_INET6: | 1493 | if (!match_security(server, vol)) |
1420 | if (ipv6_addr_equal(&addr6->sin6_addr, | 1494 | continue; |
1421 | &server->addr.sockAddr6.sin6_addr) && | ||
1422 | (addr6->sin6_scope_id == | ||
1423 | server->addr.sockAddr6.sin6_scope_id)) { | ||
1424 | addr6->sin6_port = htons(port); | ||
1425 | /* user overrode default port? */ | ||
1426 | if (addr6->sin6_port) { | ||
1427 | if (addr6->sin6_port != | ||
1428 | server->addr.sockAddr6.sin6_port) | ||
1429 | continue; | ||
1430 | } | ||
1431 | break; | ||
1432 | } else | ||
1433 | continue; | ||
1434 | } | ||
1435 | 1495 | ||
1436 | ++server->srv_count; | 1496 | ++server->srv_count; |
1437 | write_unlock(&cifs_tcp_ses_lock); | 1497 | write_unlock(&cifs_tcp_ses_lock); |
@@ -1460,6 +1520,8 @@ cifs_put_tcp_session(struct TCP_Server_Info *server) | |||
1460 | server->tcpStatus = CifsExiting; | 1520 | server->tcpStatus = CifsExiting; |
1461 | spin_unlock(&GlobalMid_Lock); | 1521 | spin_unlock(&GlobalMid_Lock); |
1462 | 1522 | ||
1523 | cifs_fscache_release_client_cookie(server); | ||
1524 | |||
1463 | task = xchg(&server->tsk, NULL); | 1525 | task = xchg(&server->tsk, NULL); |
1464 | if (task) | 1526 | if (task) |
1465 | force_sig(SIGKILL, task); | 1527 | force_sig(SIGKILL, task); |
@@ -1479,7 +1541,9 @@ cifs_get_tcp_session(struct smb_vol *volume_info) | |||
1479 | cFYI(1, "UNC: %s ip: %s", volume_info->UNC, volume_info->UNCip); | 1541 | cFYI(1, "UNC: %s ip: %s", volume_info->UNC, volume_info->UNCip); |
1480 | 1542 | ||
1481 | if (volume_info->UNCip && volume_info->UNC) { | 1543 | if (volume_info->UNCip && volume_info->UNC) { |
1482 | rc = cifs_convert_address(volume_info->UNCip, &addr); | 1544 | rc = cifs_fill_sockaddr((struct sockaddr *)&addr, |
1545 | volume_info->UNCip, | ||
1546 | volume_info->port); | ||
1483 | if (!rc) { | 1547 | if (!rc) { |
1484 | /* we failed translating address */ | 1548 | /* we failed translating address */ |
1485 | rc = -EINVAL; | 1549 | rc = -EINVAL; |
@@ -1499,7 +1563,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info) | |||
1499 | } | 1563 | } |
1500 | 1564 | ||
1501 | /* see if we already have a matching tcp_ses */ | 1565 | /* see if we already have a matching tcp_ses */ |
1502 | tcp_ses = cifs_find_tcp_session(&addr, volume_info->port); | 1566 | tcp_ses = cifs_find_tcp_session((struct sockaddr *)&addr, volume_info); |
1503 | if (tcp_ses) | 1567 | if (tcp_ses) |
1504 | return tcp_ses; | 1568 | return tcp_ses; |
1505 | 1569 | ||
@@ -1543,12 +1607,10 @@ cifs_get_tcp_session(struct smb_vol *volume_info) | |||
1543 | cFYI(1, "attempting ipv6 connect"); | 1607 | cFYI(1, "attempting ipv6 connect"); |
1544 | /* BB should we allow ipv6 on port 139? */ | 1608 | /* BB should we allow ipv6 on port 139? */ |
1545 | /* other OS never observed in Wild doing 139 with v6 */ | 1609 | /* other OS never observed in Wild doing 139 with v6 */ |
1546 | sin_server6->sin6_port = htons(volume_info->port); | ||
1547 | memcpy(&tcp_ses->addr.sockAddr6, sin_server6, | 1610 | memcpy(&tcp_ses->addr.sockAddr6, sin_server6, |
1548 | sizeof(struct sockaddr_in6)); | 1611 | sizeof(struct sockaddr_in6)); |
1549 | rc = ipv6_connect(tcp_ses); | 1612 | rc = ipv6_connect(tcp_ses); |
1550 | } else { | 1613 | } else { |
1551 | sin_server->sin_port = htons(volume_info->port); | ||
1552 | memcpy(&tcp_ses->addr.sockAddr, sin_server, | 1614 | memcpy(&tcp_ses->addr.sockAddr, sin_server, |
1553 | sizeof(struct sockaddr_in)); | 1615 | sizeof(struct sockaddr_in)); |
1554 | rc = ipv4_connect(tcp_ses); | 1616 | rc = ipv4_connect(tcp_ses); |
@@ -1577,6 +1639,8 @@ cifs_get_tcp_session(struct smb_vol *volume_info) | |||
1577 | list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list); | 1639 | list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list); |
1578 | write_unlock(&cifs_tcp_ses_lock); | 1640 | write_unlock(&cifs_tcp_ses_lock); |
1579 | 1641 | ||
1642 | cifs_fscache_get_client_cookie(tcp_ses); | ||
1643 | |||
1580 | return tcp_ses; | 1644 | return tcp_ses; |
1581 | 1645 | ||
1582 | out_err: | 1646 | out_err: |
@@ -1591,17 +1655,27 @@ out_err: | |||
1591 | } | 1655 | } |
1592 | 1656 | ||
1593 | static struct cifsSesInfo * | 1657 | static struct cifsSesInfo * |
1594 | cifs_find_smb_ses(struct TCP_Server_Info *server, char *username) | 1658 | cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol) |
1595 | { | 1659 | { |
1596 | struct list_head *tmp; | ||
1597 | struct cifsSesInfo *ses; | 1660 | struct cifsSesInfo *ses; |
1598 | 1661 | ||
1599 | write_lock(&cifs_tcp_ses_lock); | 1662 | write_lock(&cifs_tcp_ses_lock); |
1600 | list_for_each(tmp, &server->smb_ses_list) { | 1663 | list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { |
1601 | ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list); | 1664 | switch (server->secType) { |
1602 | if (strncmp(ses->userName, username, MAX_USERNAME_SIZE)) | 1665 | case Kerberos: |
1603 | continue; | 1666 | if (vol->cred_uid != ses->cred_uid) |
1604 | 1667 | continue; | |
1668 | break; | ||
1669 | default: | ||
1670 | /* anything else takes username/password */ | ||
1671 | if (strncmp(ses->userName, vol->username, | ||
1672 | MAX_USERNAME_SIZE)) | ||
1673 | continue; | ||
1674 | if (strlen(vol->username) != 0 && | ||
1675 | strncmp(ses->password, vol->password, | ||
1676 | MAX_PASSWORD_SIZE)) | ||
1677 | continue; | ||
1678 | } | ||
1605 | ++ses->ses_count; | 1679 | ++ses->ses_count; |
1606 | write_unlock(&cifs_tcp_ses_lock); | 1680 | write_unlock(&cifs_tcp_ses_lock); |
1607 | return ses; | 1681 | return ses; |
@@ -1643,7 +1717,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) | |||
1643 | 1717 | ||
1644 | xid = GetXid(); | 1718 | xid = GetXid(); |
1645 | 1719 | ||
1646 | ses = cifs_find_smb_ses(server, volume_info->username); | 1720 | ses = cifs_find_smb_ses(server, volume_info); |
1647 | if (ses) { | 1721 | if (ses) { |
1648 | cFYI(1, "Existing smb sess found (status=%d)", ses->status); | 1722 | cFYI(1, "Existing smb sess found (status=%d)", ses->status); |
1649 | 1723 | ||
@@ -1706,6 +1780,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) | |||
1706 | if (ses->domainName) | 1780 | if (ses->domainName) |
1707 | strcpy(ses->domainName, volume_info->domainname); | 1781 | strcpy(ses->domainName, volume_info->domainname); |
1708 | } | 1782 | } |
1783 | ses->cred_uid = volume_info->cred_uid; | ||
1709 | ses->linux_uid = volume_info->linux_uid; | 1784 | ses->linux_uid = volume_info->linux_uid; |
1710 | ses->overrideSecFlg = volume_info->secFlg; | 1785 | ses->overrideSecFlg = volume_info->secFlg; |
1711 | 1786 | ||
@@ -1773,6 +1848,7 @@ cifs_put_tcon(struct cifsTconInfo *tcon) | |||
1773 | CIFSSMBTDis(xid, tcon); | 1848 | CIFSSMBTDis(xid, tcon); |
1774 | _FreeXid(xid); | 1849 | _FreeXid(xid); |
1775 | 1850 | ||
1851 | cifs_fscache_release_super_cookie(tcon); | ||
1776 | tconInfoFree(tcon); | 1852 | tconInfoFree(tcon); |
1777 | cifs_put_smb_ses(ses); | 1853 | cifs_put_smb_ses(ses); |
1778 | } | 1854 | } |
@@ -1843,6 +1919,8 @@ cifs_get_tcon(struct cifsSesInfo *ses, struct smb_vol *volume_info) | |||
1843 | list_add(&tcon->tcon_list, &ses->tcon_list); | 1919 | list_add(&tcon->tcon_list, &ses->tcon_list); |
1844 | write_unlock(&cifs_tcp_ses_lock); | 1920 | write_unlock(&cifs_tcp_ses_lock); |
1845 | 1921 | ||
1922 | cifs_fscache_get_super_cookie(tcon); | ||
1923 | |||
1846 | return tcon; | 1924 | return tcon; |
1847 | 1925 | ||
1848 | out_fail: | 1926 | out_fail: |
@@ -2397,6 +2475,8 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info, | |||
2397 | cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_GID; | 2475 | cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_GID; |
2398 | if (pvolume_info->dynperm) | 2476 | if (pvolume_info->dynperm) |
2399 | cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DYNPERM; | 2477 | cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DYNPERM; |
2478 | if (pvolume_info->fsc) | ||
2479 | cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_FSCACHE; | ||
2400 | if (pvolume_info->direct_io) { | 2480 | if (pvolume_info->direct_io) { |
2401 | cFYI(1, "mounting share using direct i/o"); | 2481 | cFYI(1, "mounting share using direct i/o"); |
2402 | cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO; | 2482 | cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO; |
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 391816b461ca..a7de5e9fff11 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | #include <linux/namei.h> | 26 | #include <linux/namei.h> |
27 | #include <linux/mount.h> | 27 | #include <linux/mount.h> |
28 | #include <linux/file.h> | ||
28 | #include "cifsfs.h" | 29 | #include "cifsfs.h" |
29 | #include "cifspdu.h" | 30 | #include "cifspdu.h" |
30 | #include "cifsglob.h" | 31 | #include "cifsglob.h" |
@@ -129,12 +130,6 @@ cifs_bp_rename_retry: | |||
129 | return full_path; | 130 | return full_path; |
130 | } | 131 | } |
131 | 132 | ||
132 | /* | ||
133 | * When called with struct file pointer set to NULL, there is no way we could | ||
134 | * update file->private_data, but getting it stuck on openFileList provides a | ||
135 | * way to access it from cifs_fill_filedata and thereby set file->private_data | ||
136 | * from cifs_open. | ||
137 | */ | ||
138 | struct cifsFileInfo * | 133 | struct cifsFileInfo * |
139 | cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle, | 134 | cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle, |
140 | struct file *file, struct vfsmount *mnt, unsigned int oflags) | 135 | struct file *file, struct vfsmount *mnt, unsigned int oflags) |
@@ -184,12 +179,13 @@ cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle, | |||
184 | } | 179 | } |
185 | write_unlock(&GlobalSMBSeslock); | 180 | write_unlock(&GlobalSMBSeslock); |
186 | 181 | ||
182 | file->private_data = pCifsFile; | ||
183 | |||
187 | return pCifsFile; | 184 | return pCifsFile; |
188 | } | 185 | } |
189 | 186 | ||
190 | int cifs_posix_open(char *full_path, struct inode **pinode, | 187 | int cifs_posix_open(char *full_path, struct inode **pinode, |
191 | struct vfsmount *mnt, struct super_block *sb, | 188 | struct super_block *sb, int mode, int oflags, |
192 | int mode, int oflags, | ||
193 | __u32 *poplock, __u16 *pnetfid, int xid) | 189 | __u32 *poplock, __u16 *pnetfid, int xid) |
194 | { | 190 | { |
195 | int rc; | 191 | int rc; |
@@ -258,19 +254,6 @@ int cifs_posix_open(char *full_path, struct inode **pinode, | |||
258 | cifs_fattr_to_inode(*pinode, &fattr); | 254 | cifs_fattr_to_inode(*pinode, &fattr); |
259 | } | 255 | } |
260 | 256 | ||
261 | /* | ||
262 | * cifs_fill_filedata() takes care of setting cifsFileInfo pointer to | ||
263 | * file->private_data. | ||
264 | */ | ||
265 | if (mnt) { | ||
266 | struct cifsFileInfo *pfile_info; | ||
267 | |||
268 | pfile_info = cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, | ||
269 | oflags); | ||
270 | if (pfile_info == NULL) | ||
271 | rc = -ENOMEM; | ||
272 | } | ||
273 | |||
274 | posix_open_ret: | 257 | posix_open_ret: |
275 | kfree(presp_data); | 258 | kfree(presp_data); |
276 | return rc; | 259 | return rc; |
@@ -298,7 +281,6 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, | |||
298 | int create_options = CREATE_NOT_DIR; | 281 | int create_options = CREATE_NOT_DIR; |
299 | __u32 oplock = 0; | 282 | __u32 oplock = 0; |
300 | int oflags; | 283 | int oflags; |
301 | bool posix_create = false; | ||
302 | /* | 284 | /* |
303 | * BB below access is probably too much for mknod to request | 285 | * BB below access is probably too much for mknod to request |
304 | * but we have to do query and setpathinfo so requesting | 286 | * but we have to do query and setpathinfo so requesting |
@@ -339,7 +321,6 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, | |||
339 | (CIFS_UNIX_POSIX_PATH_OPS_CAP & | 321 | (CIFS_UNIX_POSIX_PATH_OPS_CAP & |
340 | le64_to_cpu(tcon->fsUnixInfo.Capability))) { | 322 | le64_to_cpu(tcon->fsUnixInfo.Capability))) { |
341 | rc = cifs_posix_open(full_path, &newinode, | 323 | rc = cifs_posix_open(full_path, &newinode, |
342 | nd ? nd->path.mnt : NULL, | ||
343 | inode->i_sb, mode, oflags, &oplock, &fileHandle, xid); | 324 | inode->i_sb, mode, oflags, &oplock, &fileHandle, xid); |
344 | /* EIO could indicate that (posix open) operation is not | 325 | /* EIO could indicate that (posix open) operation is not |
345 | supported, despite what server claimed in capability | 326 | supported, despite what server claimed in capability |
@@ -347,7 +328,6 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, | |||
347 | handled in posix open */ | 328 | handled in posix open */ |
348 | 329 | ||
349 | if (rc == 0) { | 330 | if (rc == 0) { |
350 | posix_create = true; | ||
351 | if (newinode == NULL) /* query inode info */ | 331 | if (newinode == NULL) /* query inode info */ |
352 | goto cifs_create_get_file_info; | 332 | goto cifs_create_get_file_info; |
353 | else /* success, no need to query */ | 333 | else /* success, no need to query */ |
@@ -478,21 +458,28 @@ cifs_create_set_dentry: | |||
478 | else | 458 | else |
479 | cFYI(1, "Create worked, get_inode_info failed rc = %d", rc); | 459 | cFYI(1, "Create worked, get_inode_info failed rc = %d", rc); |
480 | 460 | ||
481 | /* nfsd case - nfs srv does not set nd */ | 461 | if (newinode && nd && (nd->flags & LOOKUP_OPEN)) { |
482 | if ((nd == NULL) || (!(nd->flags & LOOKUP_OPEN))) { | ||
483 | /* mknod case - do not leave file open */ | ||
484 | CIFSSMBClose(xid, tcon, fileHandle); | ||
485 | } else if (!(posix_create) && (newinode)) { | ||
486 | struct cifsFileInfo *pfile_info; | 462 | struct cifsFileInfo *pfile_info; |
487 | /* | 463 | struct file *filp; |
488 | * cifs_fill_filedata() takes care of setting cifsFileInfo | 464 | |
489 | * pointer to file->private_data. | 465 | filp = lookup_instantiate_filp(nd, direntry, generic_file_open); |
490 | */ | 466 | if (IS_ERR(filp)) { |
491 | pfile_info = cifs_new_fileinfo(newinode, fileHandle, NULL, | 467 | rc = PTR_ERR(filp); |
468 | CIFSSMBClose(xid, tcon, fileHandle); | ||
469 | goto cifs_create_out; | ||
470 | } | ||
471 | |||
472 | pfile_info = cifs_new_fileinfo(newinode, fileHandle, filp, | ||
492 | nd->path.mnt, oflags); | 473 | nd->path.mnt, oflags); |
493 | if (pfile_info == NULL) | 474 | if (pfile_info == NULL) { |
475 | fput(filp); | ||
476 | CIFSSMBClose(xid, tcon, fileHandle); | ||
494 | rc = -ENOMEM; | 477 | rc = -ENOMEM; |
478 | } | ||
479 | } else { | ||
480 | CIFSSMBClose(xid, tcon, fileHandle); | ||
495 | } | 481 | } |
482 | |||
496 | cifs_create_out: | 483 | cifs_create_out: |
497 | kfree(buf); | 484 | kfree(buf); |
498 | kfree(full_path); | 485 | kfree(full_path); |
@@ -636,6 +623,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, | |||
636 | bool posix_open = false; | 623 | bool posix_open = false; |
637 | struct cifs_sb_info *cifs_sb; | 624 | struct cifs_sb_info *cifs_sb; |
638 | struct cifsTconInfo *pTcon; | 625 | struct cifsTconInfo *pTcon; |
626 | struct cifsFileInfo *cfile; | ||
639 | struct inode *newInode = NULL; | 627 | struct inode *newInode = NULL; |
640 | char *full_path = NULL; | 628 | char *full_path = NULL; |
641 | struct file *filp; | 629 | struct file *filp; |
@@ -703,7 +691,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, | |||
703 | if (nd && !(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) && | 691 | if (nd && !(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) && |
704 | (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open && | 692 | (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open && |
705 | (nd->intent.open.flags & O_CREAT)) { | 693 | (nd->intent.open.flags & O_CREAT)) { |
706 | rc = cifs_posix_open(full_path, &newInode, nd->path.mnt, | 694 | rc = cifs_posix_open(full_path, &newInode, |
707 | parent_dir_inode->i_sb, | 695 | parent_dir_inode->i_sb, |
708 | nd->intent.open.create_mode, | 696 | nd->intent.open.create_mode, |
709 | nd->intent.open.flags, &oplock, | 697 | nd->intent.open.flags, &oplock, |
@@ -733,8 +721,25 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, | |||
733 | else | 721 | else |
734 | direntry->d_op = &cifs_dentry_ops; | 722 | direntry->d_op = &cifs_dentry_ops; |
735 | d_add(direntry, newInode); | 723 | d_add(direntry, newInode); |
736 | if (posix_open) | 724 | if (posix_open) { |
737 | filp = lookup_instantiate_filp(nd, direntry, NULL); | 725 | filp = lookup_instantiate_filp(nd, direntry, |
726 | generic_file_open); | ||
727 | if (IS_ERR(filp)) { | ||
728 | rc = PTR_ERR(filp); | ||
729 | CIFSSMBClose(xid, pTcon, fileHandle); | ||
730 | goto lookup_out; | ||
731 | } | ||
732 | |||
733 | cfile = cifs_new_fileinfo(newInode, fileHandle, filp, | ||
734 | nd->path.mnt, | ||
735 | nd->intent.open.flags); | ||
736 | if (cfile == NULL) { | ||
737 | fput(filp); | ||
738 | CIFSSMBClose(xid, pTcon, fileHandle); | ||
739 | rc = -ENOMEM; | ||
740 | goto lookup_out; | ||
741 | } | ||
742 | } | ||
738 | /* since paths are not looked up by component - the parent | 743 | /* since paths are not looked up by component - the parent |
739 | directories are presumed to be good here */ | 744 | directories are presumed to be good here */ |
740 | renew_parental_timestamps(direntry); | 745 | renew_parental_timestamps(direntry); |
@@ -755,6 +760,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, | |||
755 | is a common return code */ | 760 | is a common return code */ |
756 | } | 761 | } |
757 | 762 | ||
763 | lookup_out: | ||
758 | kfree(full_path); | 764 | kfree(full_path); |
759 | FreeXid(xid); | 765 | FreeXid(xid); |
760 | return ERR_PTR(rc); | 766 | return ERR_PTR(rc); |
diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c index 4db2c5e7283f..3ad7f4300c45 100644 --- a/fs/cifs/dns_resolve.c +++ b/fs/cifs/dns_resolve.c | |||
@@ -24,12 +24,16 @@ | |||
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <linux/keyctl.h> | ||
28 | #include <linux/key-type.h> | ||
27 | #include <keys/user-type.h> | 29 | #include <keys/user-type.h> |
28 | #include "dns_resolve.h" | 30 | #include "dns_resolve.h" |
29 | #include "cifsglob.h" | 31 | #include "cifsglob.h" |
30 | #include "cifsproto.h" | 32 | #include "cifsproto.h" |
31 | #include "cifs_debug.h" | 33 | #include "cifs_debug.h" |
32 | 34 | ||
35 | static const struct cred *dns_resolver_cache; | ||
36 | |||
33 | /* Checks if supplied name is IP address | 37 | /* Checks if supplied name is IP address |
34 | * returns: | 38 | * returns: |
35 | * 1 - name is IP | 39 | * 1 - name is IP |
@@ -40,7 +44,7 @@ is_ip(char *name) | |||
40 | { | 44 | { |
41 | struct sockaddr_storage ss; | 45 | struct sockaddr_storage ss; |
42 | 46 | ||
43 | return cifs_convert_address(name, &ss); | 47 | return cifs_convert_address((struct sockaddr *)&ss, name); |
44 | } | 48 | } |
45 | 49 | ||
46 | static int | 50 | static int |
@@ -94,6 +98,7 @@ struct key_type key_type_dns_resolver = { | |||
94 | int | 98 | int |
95 | dns_resolve_server_name_to_ip(const char *unc, char **ip_addr) | 99 | dns_resolve_server_name_to_ip(const char *unc, char **ip_addr) |
96 | { | 100 | { |
101 | const struct cred *saved_cred; | ||
97 | int rc = -EAGAIN; | 102 | int rc = -EAGAIN; |
98 | struct key *rkey = ERR_PTR(-EAGAIN); | 103 | struct key *rkey = ERR_PTR(-EAGAIN); |
99 | char *name; | 104 | char *name; |
@@ -133,8 +138,15 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr) | |||
133 | goto skip_upcall; | 138 | goto skip_upcall; |
134 | } | 139 | } |
135 | 140 | ||
141 | saved_cred = override_creds(dns_resolver_cache); | ||
136 | rkey = request_key(&key_type_dns_resolver, name, ""); | 142 | rkey = request_key(&key_type_dns_resolver, name, ""); |
143 | revert_creds(saved_cred); | ||
137 | if (!IS_ERR(rkey)) { | 144 | if (!IS_ERR(rkey)) { |
145 | if (!(rkey->perm & KEY_USR_VIEW)) { | ||
146 | down_read(&rkey->sem); | ||
147 | rkey->perm |= KEY_USR_VIEW; | ||
148 | up_read(&rkey->sem); | ||
149 | } | ||
138 | len = rkey->type_data.x[0]; | 150 | len = rkey->type_data.x[0]; |
139 | data = rkey->payload.data; | 151 | data = rkey->payload.data; |
140 | } else { | 152 | } else { |
@@ -165,4 +177,61 @@ out: | |||
165 | return rc; | 177 | return rc; |
166 | } | 178 | } |
167 | 179 | ||
180 | int __init cifs_init_dns_resolver(void) | ||
181 | { | ||
182 | struct cred *cred; | ||
183 | struct key *keyring; | ||
184 | int ret; | ||
185 | |||
186 | printk(KERN_NOTICE "Registering the %s key type\n", | ||
187 | key_type_dns_resolver.name); | ||
188 | |||
189 | /* create an override credential set with a special thread keyring in | ||
190 | * which DNS requests are cached | ||
191 | * | ||
192 | * this is used to prevent malicious redirections from being installed | ||
193 | * with add_key(). | ||
194 | */ | ||
195 | cred = prepare_kernel_cred(NULL); | ||
196 | if (!cred) | ||
197 | return -ENOMEM; | ||
198 | |||
199 | keyring = key_alloc(&key_type_keyring, ".dns_resolver", 0, 0, cred, | ||
200 | (KEY_POS_ALL & ~KEY_POS_SETATTR) | | ||
201 | KEY_USR_VIEW | KEY_USR_READ, | ||
202 | KEY_ALLOC_NOT_IN_QUOTA); | ||
203 | if (IS_ERR(keyring)) { | ||
204 | ret = PTR_ERR(keyring); | ||
205 | goto failed_put_cred; | ||
206 | } | ||
207 | |||
208 | ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL); | ||
209 | if (ret < 0) | ||
210 | goto failed_put_key; | ||
211 | |||
212 | ret = register_key_type(&key_type_dns_resolver); | ||
213 | if (ret < 0) | ||
214 | goto failed_put_key; | ||
215 | |||
216 | /* instruct request_key() to use this special keyring as a cache for | ||
217 | * the results it looks up */ | ||
218 | cred->thread_keyring = keyring; | ||
219 | cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING; | ||
220 | dns_resolver_cache = cred; | ||
221 | return 0; | ||
222 | |||
223 | failed_put_key: | ||
224 | key_put(keyring); | ||
225 | failed_put_cred: | ||
226 | put_cred(cred); | ||
227 | return ret; | ||
228 | } | ||
168 | 229 | ||
230 | void cifs_exit_dns_resolver(void) | ||
231 | { | ||
232 | key_revoke(dns_resolver_cache->thread_keyring); | ||
233 | unregister_key_type(&key_type_dns_resolver); | ||
234 | put_cred(dns_resolver_cache); | ||
235 | printk(KERN_NOTICE "Unregistered %s key type\n", | ||
236 | key_type_dns_resolver.name); | ||
237 | } | ||
diff --git a/fs/cifs/dns_resolve.h b/fs/cifs/dns_resolve.h index 966e9288930b..5d7f291df162 100644 --- a/fs/cifs/dns_resolve.h +++ b/fs/cifs/dns_resolve.h | |||
@@ -24,8 +24,8 @@ | |||
24 | #define _DNS_RESOLVE_H | 24 | #define _DNS_RESOLVE_H |
25 | 25 | ||
26 | #ifdef __KERNEL__ | 26 | #ifdef __KERNEL__ |
27 | #include <linux/key-type.h> | 27 | extern int __init cifs_init_dns_resolver(void); |
28 | extern struct key_type key_type_dns_resolver; | 28 | extern void cifs_exit_dns_resolver(void); |
29 | extern int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr); | 29 | extern int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr); |
30 | #endif /* KERNEL */ | 30 | #endif /* KERNEL */ |
31 | 31 | ||
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 75541af4b3db..fa04a00d126d 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include "cifs_unicode.h" | 40 | #include "cifs_unicode.h" |
41 | #include "cifs_debug.h" | 41 | #include "cifs_debug.h" |
42 | #include "cifs_fs_sb.h" | 42 | #include "cifs_fs_sb.h" |
43 | #include "fscache.h" | ||
43 | 44 | ||
44 | static inline int cifs_convert_flags(unsigned int flags) | 45 | static inline int cifs_convert_flags(unsigned int flags) |
45 | { | 46 | { |
@@ -162,44 +163,12 @@ psx_client_can_cache: | |||
162 | return 0; | 163 | return 0; |
163 | } | 164 | } |
164 | 165 | ||
165 | static struct cifsFileInfo * | ||
166 | cifs_fill_filedata(struct file *file) | ||
167 | { | ||
168 | struct list_head *tmp; | ||
169 | struct cifsFileInfo *pCifsFile = NULL; | ||
170 | struct cifsInodeInfo *pCifsInode = NULL; | ||
171 | |||
172 | /* search inode for this file and fill in file->private_data */ | ||
173 | pCifsInode = CIFS_I(file->f_path.dentry->d_inode); | ||
174 | read_lock(&GlobalSMBSeslock); | ||
175 | list_for_each(tmp, &pCifsInode->openFileList) { | ||
176 | pCifsFile = list_entry(tmp, struct cifsFileInfo, flist); | ||
177 | if ((pCifsFile->pfile == NULL) && | ||
178 | (pCifsFile->pid == current->tgid)) { | ||
179 | /* mode set in cifs_create */ | ||
180 | |||
181 | /* needed for writepage */ | ||
182 | pCifsFile->pfile = file; | ||
183 | file->private_data = pCifsFile; | ||
184 | break; | ||
185 | } | ||
186 | } | ||
187 | read_unlock(&GlobalSMBSeslock); | ||
188 | |||
189 | if (file->private_data != NULL) { | ||
190 | return pCifsFile; | ||
191 | } else if ((file->f_flags & O_CREAT) && (file->f_flags & O_EXCL)) | ||
192 | cERROR(1, "could not find file instance for " | ||
193 | "new file %p", file); | ||
194 | return NULL; | ||
195 | } | ||
196 | |||
197 | /* all arguments to this function must be checked for validity in caller */ | 166 | /* all arguments to this function must be checked for validity in caller */ |
198 | static inline int cifs_open_inode_helper(struct inode *inode, struct file *file, | 167 | static inline int cifs_open_inode_helper(struct inode *inode, |
199 | struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile, | ||
200 | struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf, | 168 | struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf, |
201 | char *full_path, int xid) | 169 | char *full_path, int xid) |
202 | { | 170 | { |
171 | struct cifsInodeInfo *pCifsInode = CIFS_I(inode); | ||
203 | struct timespec temp; | 172 | struct timespec temp; |
204 | int rc; | 173 | int rc; |
205 | 174 | ||
@@ -213,36 +182,35 @@ static inline int cifs_open_inode_helper(struct inode *inode, struct file *file, | |||
213 | /* if not oplocked, invalidate inode pages if mtime or file | 182 | /* if not oplocked, invalidate inode pages if mtime or file |
214 | size changed */ | 183 | size changed */ |
215 | temp = cifs_NTtimeToUnix(buf->LastWriteTime); | 184 | temp = cifs_NTtimeToUnix(buf->LastWriteTime); |
216 | if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) && | 185 | if (timespec_equal(&inode->i_mtime, &temp) && |
217 | (file->f_path.dentry->d_inode->i_size == | 186 | (inode->i_size == |
218 | (loff_t)le64_to_cpu(buf->EndOfFile))) { | 187 | (loff_t)le64_to_cpu(buf->EndOfFile))) { |
219 | cFYI(1, "inode unchanged on server"); | 188 | cFYI(1, "inode unchanged on server"); |
220 | } else { | 189 | } else { |
221 | if (file->f_path.dentry->d_inode->i_mapping) { | 190 | if (inode->i_mapping) { |
222 | /* BB no need to lock inode until after invalidate | 191 | /* BB no need to lock inode until after invalidate |
223 | since namei code should already have it locked? */ | 192 | since namei code should already have it locked? */ |
224 | rc = filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping); | 193 | rc = filemap_write_and_wait(inode->i_mapping); |
225 | if (rc != 0) | 194 | if (rc != 0) |
226 | CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc; | 195 | pCifsInode->write_behind_rc = rc; |
227 | } | 196 | } |
228 | cFYI(1, "invalidating remote inode since open detected it " | 197 | cFYI(1, "invalidating remote inode since open detected it " |
229 | "changed"); | 198 | "changed"); |
230 | invalidate_remote_inode(file->f_path.dentry->d_inode); | 199 | invalidate_remote_inode(inode); |
231 | } | 200 | } |
232 | 201 | ||
233 | client_can_cache: | 202 | client_can_cache: |
234 | if (pTcon->unix_ext) | 203 | if (pTcon->unix_ext) |
235 | rc = cifs_get_inode_info_unix(&file->f_path.dentry->d_inode, | 204 | rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb, |
236 | full_path, inode->i_sb, xid); | 205 | xid); |
237 | else | 206 | else |
238 | rc = cifs_get_inode_info(&file->f_path.dentry->d_inode, | 207 | rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb, |
239 | full_path, buf, inode->i_sb, xid, NULL); | 208 | xid, NULL); |
240 | 209 | ||
241 | if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) { | 210 | if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) { |
242 | pCifsInode->clientCanCacheAll = true; | 211 | pCifsInode->clientCanCacheAll = true; |
243 | pCifsInode->clientCanCacheRead = true; | 212 | pCifsInode->clientCanCacheRead = true; |
244 | cFYI(1, "Exclusive Oplock granted on inode %p", | 213 | cFYI(1, "Exclusive Oplock granted on inode %p", inode); |
245 | file->f_path.dentry->d_inode); | ||
246 | } else if ((*oplock & 0xF) == OPLOCK_READ) | 214 | } else if ((*oplock & 0xF) == OPLOCK_READ) |
247 | pCifsInode->clientCanCacheRead = true; | 215 | pCifsInode->clientCanCacheRead = true; |
248 | 216 | ||
@@ -256,7 +224,7 @@ int cifs_open(struct inode *inode, struct file *file) | |||
256 | __u32 oplock; | 224 | __u32 oplock; |
257 | struct cifs_sb_info *cifs_sb; | 225 | struct cifs_sb_info *cifs_sb; |
258 | struct cifsTconInfo *tcon; | 226 | struct cifsTconInfo *tcon; |
259 | struct cifsFileInfo *pCifsFile; | 227 | struct cifsFileInfo *pCifsFile = NULL; |
260 | struct cifsInodeInfo *pCifsInode; | 228 | struct cifsInodeInfo *pCifsInode; |
261 | char *full_path = NULL; | 229 | char *full_path = NULL; |
262 | int desiredAccess; | 230 | int desiredAccess; |
@@ -270,12 +238,6 @@ int cifs_open(struct inode *inode, struct file *file) | |||
270 | tcon = cifs_sb->tcon; | 238 | tcon = cifs_sb->tcon; |
271 | 239 | ||
272 | pCifsInode = CIFS_I(file->f_path.dentry->d_inode); | 240 | pCifsInode = CIFS_I(file->f_path.dentry->d_inode); |
273 | pCifsFile = cifs_fill_filedata(file); | ||
274 | if (pCifsFile) { | ||
275 | rc = 0; | ||
276 | FreeXid(xid); | ||
277 | return rc; | ||
278 | } | ||
279 | 241 | ||
280 | full_path = build_path_from_dentry(file->f_path.dentry); | 242 | full_path = build_path_from_dentry(file->f_path.dentry); |
281 | if (full_path == NULL) { | 243 | if (full_path == NULL) { |
@@ -299,8 +261,7 @@ int cifs_open(struct inode *inode, struct file *file) | |||
299 | int oflags = (int) cifs_posix_convert_flags(file->f_flags); | 261 | int oflags = (int) cifs_posix_convert_flags(file->f_flags); |
300 | oflags |= SMB_O_CREAT; | 262 | oflags |= SMB_O_CREAT; |
301 | /* can not refresh inode info since size could be stale */ | 263 | /* can not refresh inode info since size could be stale */ |
302 | rc = cifs_posix_open(full_path, &inode, file->f_path.mnt, | 264 | rc = cifs_posix_open(full_path, &inode, inode->i_sb, |
303 | inode->i_sb, | ||
304 | cifs_sb->mnt_file_mode /* ignored */, | 265 | cifs_sb->mnt_file_mode /* ignored */, |
305 | oflags, &oplock, &netfid, xid); | 266 | oflags, &oplock, &netfid, xid); |
306 | if (rc == 0) { | 267 | if (rc == 0) { |
@@ -308,9 +269,23 @@ int cifs_open(struct inode *inode, struct file *file) | |||
308 | /* no need for special case handling of setting mode | 269 | /* no need for special case handling of setting mode |
309 | on read only files needed here */ | 270 | on read only files needed here */ |
310 | 271 | ||
311 | pCifsFile = cifs_fill_filedata(file); | 272 | rc = cifs_posix_open_inode_helper(inode, file, |
312 | cifs_posix_open_inode_helper(inode, file, pCifsInode, | 273 | pCifsInode, oplock, netfid); |
313 | oplock, netfid); | 274 | if (rc != 0) { |
275 | CIFSSMBClose(xid, tcon, netfid); | ||
276 | goto out; | ||
277 | } | ||
278 | |||
279 | pCifsFile = cifs_new_fileinfo(inode, netfid, file, | ||
280 | file->f_path.mnt, | ||
281 | oflags); | ||
282 | if (pCifsFile == NULL) { | ||
283 | CIFSSMBClose(xid, tcon, netfid); | ||
284 | rc = -ENOMEM; | ||
285 | } | ||
286 | |||
287 | cifs_fscache_set_inode_cookie(inode, file); | ||
288 | |||
314 | goto out; | 289 | goto out; |
315 | } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { | 290 | } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { |
316 | if (tcon->ses->serverNOS) | 291 | if (tcon->ses->serverNOS) |
@@ -391,16 +366,18 @@ int cifs_open(struct inode *inode, struct file *file) | |||
391 | goto out; | 366 | goto out; |
392 | } | 367 | } |
393 | 368 | ||
369 | rc = cifs_open_inode_helper(inode, tcon, &oplock, buf, full_path, xid); | ||
370 | if (rc != 0) | ||
371 | goto out; | ||
372 | |||
394 | pCifsFile = cifs_new_fileinfo(inode, netfid, file, file->f_path.mnt, | 373 | pCifsFile = cifs_new_fileinfo(inode, netfid, file, file->f_path.mnt, |
395 | file->f_flags); | 374 | file->f_flags); |
396 | file->private_data = pCifsFile; | 375 | if (pCifsFile == NULL) { |
397 | if (file->private_data == NULL) { | ||
398 | rc = -ENOMEM; | 376 | rc = -ENOMEM; |
399 | goto out; | 377 | goto out; |
400 | } | 378 | } |
401 | 379 | ||
402 | rc = cifs_open_inode_helper(inode, file, pCifsInode, pCifsFile, tcon, | 380 | cifs_fscache_set_inode_cookie(inode, file); |
403 | &oplock, buf, full_path, xid); | ||
404 | 381 | ||
405 | if (oplock & CIFS_CREATE_ACTION) { | 382 | if (oplock & CIFS_CREATE_ACTION) { |
406 | /* time to set mode which we can not set earlier due to | 383 | /* time to set mode which we can not set earlier due to |
@@ -456,7 +433,7 @@ static int cifs_reopen_file(struct file *file, bool can_flush) | |||
456 | __u16 netfid; | 433 | __u16 netfid; |
457 | 434 | ||
458 | if (file->private_data) | 435 | if (file->private_data) |
459 | pCifsFile = (struct cifsFileInfo *)file->private_data; | 436 | pCifsFile = file->private_data; |
460 | else | 437 | else |
461 | return -EBADF; | 438 | return -EBADF; |
462 | 439 | ||
@@ -513,8 +490,7 @@ reopen_error_exit: | |||
513 | le64_to_cpu(tcon->fsUnixInfo.Capability))) { | 490 | le64_to_cpu(tcon->fsUnixInfo.Capability))) { |
514 | int oflags = (int) cifs_posix_convert_flags(file->f_flags); | 491 | int oflags = (int) cifs_posix_convert_flags(file->f_flags); |
515 | /* can not refresh inode info since size could be stale */ | 492 | /* can not refresh inode info since size could be stale */ |
516 | rc = cifs_posix_open(full_path, NULL, file->f_path.mnt, | 493 | rc = cifs_posix_open(full_path, NULL, inode->i_sb, |
517 | inode->i_sb, | ||
518 | cifs_sb->mnt_file_mode /* ignored */, | 494 | cifs_sb->mnt_file_mode /* ignored */, |
519 | oflags, &oplock, &netfid, xid); | 495 | oflags, &oplock, &netfid, xid); |
520 | if (rc == 0) { | 496 | if (rc == 0) { |
@@ -595,8 +571,7 @@ int cifs_close(struct inode *inode, struct file *file) | |||
595 | int xid, timeout; | 571 | int xid, timeout; |
596 | struct cifs_sb_info *cifs_sb; | 572 | struct cifs_sb_info *cifs_sb; |
597 | struct cifsTconInfo *pTcon; | 573 | struct cifsTconInfo *pTcon; |
598 | struct cifsFileInfo *pSMBFile = | 574 | struct cifsFileInfo *pSMBFile = file->private_data; |
599 | (struct cifsFileInfo *)file->private_data; | ||
600 | 575 | ||
601 | xid = GetXid(); | 576 | xid = GetXid(); |
602 | 577 | ||
@@ -671,8 +646,7 @@ int cifs_closedir(struct inode *inode, struct file *file) | |||
671 | { | 646 | { |
672 | int rc = 0; | 647 | int rc = 0; |
673 | int xid; | 648 | int xid; |
674 | struct cifsFileInfo *pCFileStruct = | 649 | struct cifsFileInfo *pCFileStruct = file->private_data; |
675 | (struct cifsFileInfo *)file->private_data; | ||
676 | char *ptmp; | 650 | char *ptmp; |
677 | 651 | ||
678 | cFYI(1, "Closedir inode = 0x%p", inode); | 652 | cFYI(1, "Closedir inode = 0x%p", inode); |
@@ -893,8 +867,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) | |||
893 | length, pfLock, | 867 | length, pfLock, |
894 | posix_lock_type, wait_flag); | 868 | posix_lock_type, wait_flag); |
895 | } else { | 869 | } else { |
896 | struct cifsFileInfo *fid = | 870 | struct cifsFileInfo *fid = file->private_data; |
897 | (struct cifsFileInfo *)file->private_data; | ||
898 | 871 | ||
899 | if (numLock) { | 872 | if (numLock) { |
900 | rc = CIFSSMBLock(xid, tcon, netfid, length, | 873 | rc = CIFSSMBLock(xid, tcon, netfid, length, |
@@ -995,7 +968,7 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data, | |||
995 | 968 | ||
996 | if (file->private_data == NULL) | 969 | if (file->private_data == NULL) |
997 | return -EBADF; | 970 | return -EBADF; |
998 | open_file = (struct cifsFileInfo *) file->private_data; | 971 | open_file = file->private_data; |
999 | 972 | ||
1000 | rc = generic_write_checks(file, poffset, &write_size, 0); | 973 | rc = generic_write_checks(file, poffset, &write_size, 0); |
1001 | if (rc) | 974 | if (rc) |
@@ -1097,7 +1070,7 @@ static ssize_t cifs_write(struct file *file, const char *write_data, | |||
1097 | 1070 | ||
1098 | if (file->private_data == NULL) | 1071 | if (file->private_data == NULL) |
1099 | return -EBADF; | 1072 | return -EBADF; |
1100 | open_file = (struct cifsFileInfo *)file->private_data; | 1073 | open_file = file->private_data; |
1101 | 1074 | ||
1102 | xid = GetXid(); | 1075 | xid = GetXid(); |
1103 | 1076 | ||
@@ -1681,8 +1654,7 @@ int cifs_fsync(struct file *file, int datasync) | |||
1681 | int xid; | 1654 | int xid; |
1682 | int rc = 0; | 1655 | int rc = 0; |
1683 | struct cifsTconInfo *tcon; | 1656 | struct cifsTconInfo *tcon; |
1684 | struct cifsFileInfo *smbfile = | 1657 | struct cifsFileInfo *smbfile = file->private_data; |
1685 | (struct cifsFileInfo *)file->private_data; | ||
1686 | struct inode *inode = file->f_path.dentry->d_inode; | 1658 | struct inode *inode = file->f_path.dentry->d_inode; |
1687 | 1659 | ||
1688 | xid = GetXid(); | 1660 | xid = GetXid(); |
@@ -1786,7 +1758,7 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data, | |||
1786 | FreeXid(xid); | 1758 | FreeXid(xid); |
1787 | return rc; | 1759 | return rc; |
1788 | } | 1760 | } |
1789 | open_file = (struct cifsFileInfo *)file->private_data; | 1761 | open_file = file->private_data; |
1790 | 1762 | ||
1791 | if ((file->f_flags & O_ACCMODE) == O_WRONLY) | 1763 | if ((file->f_flags & O_ACCMODE) == O_WRONLY) |
1792 | cFYI(1, "attempting read on write only file instance"); | 1764 | cFYI(1, "attempting read on write only file instance"); |
@@ -1867,7 +1839,7 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size, | |||
1867 | FreeXid(xid); | 1839 | FreeXid(xid); |
1868 | return rc; | 1840 | return rc; |
1869 | } | 1841 | } |
1870 | open_file = (struct cifsFileInfo *)file->private_data; | 1842 | open_file = file->private_data; |
1871 | 1843 | ||
1872 | if ((file->f_flags & O_ACCMODE) == O_WRONLY) | 1844 | if ((file->f_flags & O_ACCMODE) == O_WRONLY) |
1873 | cFYI(1, "attempting read on write only file instance"); | 1845 | cFYI(1, "attempting read on write only file instance"); |
@@ -1972,6 +1944,9 @@ static void cifs_copy_cache_pages(struct address_space *mapping, | |||
1972 | SetPageUptodate(page); | 1944 | SetPageUptodate(page); |
1973 | unlock_page(page); | 1945 | unlock_page(page); |
1974 | data += PAGE_CACHE_SIZE; | 1946 | data += PAGE_CACHE_SIZE; |
1947 | |||
1948 | /* add page to FS-Cache */ | ||
1949 | cifs_readpage_to_fscache(mapping->host, page); | ||
1975 | } | 1950 | } |
1976 | return; | 1951 | return; |
1977 | } | 1952 | } |
@@ -1998,10 +1973,19 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, | |||
1998 | FreeXid(xid); | 1973 | FreeXid(xid); |
1999 | return rc; | 1974 | return rc; |
2000 | } | 1975 | } |
2001 | open_file = (struct cifsFileInfo *)file->private_data; | 1976 | open_file = file->private_data; |
2002 | cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); | 1977 | cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); |
2003 | pTcon = cifs_sb->tcon; | 1978 | pTcon = cifs_sb->tcon; |
2004 | 1979 | ||
1980 | /* | ||
1981 | * Reads as many pages as possible from fscache. Returns -ENOBUFS | ||
1982 | * immediately if the cookie is negative | ||
1983 | */ | ||
1984 | rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list, | ||
1985 | &num_pages); | ||
1986 | if (rc == 0) | ||
1987 | goto read_complete; | ||
1988 | |||
2005 | cFYI(DBG2, "rpages: num pages %d", num_pages); | 1989 | cFYI(DBG2, "rpages: num pages %d", num_pages); |
2006 | for (i = 0; i < num_pages; ) { | 1990 | for (i = 0; i < num_pages; ) { |
2007 | unsigned contig_pages; | 1991 | unsigned contig_pages; |
@@ -2112,6 +2096,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, | |||
2112 | smb_read_data = NULL; | 2096 | smb_read_data = NULL; |
2113 | } | 2097 | } |
2114 | 2098 | ||
2099 | read_complete: | ||
2115 | FreeXid(xid); | 2100 | FreeXid(xid); |
2116 | return rc; | 2101 | return rc; |
2117 | } | 2102 | } |
@@ -2122,6 +2107,11 @@ static int cifs_readpage_worker(struct file *file, struct page *page, | |||
2122 | char *read_data; | 2107 | char *read_data; |
2123 | int rc; | 2108 | int rc; |
2124 | 2109 | ||
2110 | /* Is the page cached? */ | ||
2111 | rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page); | ||
2112 | if (rc == 0) | ||
2113 | goto read_complete; | ||
2114 | |||
2125 | page_cache_get(page); | 2115 | page_cache_get(page); |
2126 | read_data = kmap(page); | 2116 | read_data = kmap(page); |
2127 | /* for reads over a certain size could initiate async read ahead */ | 2117 | /* for reads over a certain size could initiate async read ahead */ |
@@ -2141,11 +2131,17 @@ static int cifs_readpage_worker(struct file *file, struct page *page, | |||
2141 | 2131 | ||
2142 | flush_dcache_page(page); | 2132 | flush_dcache_page(page); |
2143 | SetPageUptodate(page); | 2133 | SetPageUptodate(page); |
2134 | |||
2135 | /* send this page to the cache */ | ||
2136 | cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page); | ||
2137 | |||
2144 | rc = 0; | 2138 | rc = 0; |
2145 | 2139 | ||
2146 | io_error: | 2140 | io_error: |
2147 | kunmap(page); | 2141 | kunmap(page); |
2148 | page_cache_release(page); | 2142 | page_cache_release(page); |
2143 | |||
2144 | read_complete: | ||
2149 | return rc; | 2145 | return rc; |
2150 | } | 2146 | } |
2151 | 2147 | ||
@@ -2295,6 +2291,22 @@ out: | |||
2295 | return rc; | 2291 | return rc; |
2296 | } | 2292 | } |
2297 | 2293 | ||
2294 | static int cifs_release_page(struct page *page, gfp_t gfp) | ||
2295 | { | ||
2296 | if (PagePrivate(page)) | ||
2297 | return 0; | ||
2298 | |||
2299 | return cifs_fscache_release_page(page, gfp); | ||
2300 | } | ||
2301 | |||
2302 | static void cifs_invalidate_page(struct page *page, unsigned long offset) | ||
2303 | { | ||
2304 | struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host); | ||
2305 | |||
2306 | if (offset == 0) | ||
2307 | cifs_fscache_invalidate_page(page, &cifsi->vfs_inode); | ||
2308 | } | ||
2309 | |||
2298 | static void | 2310 | static void |
2299 | cifs_oplock_break(struct slow_work *work) | 2311 | cifs_oplock_break(struct slow_work *work) |
2300 | { | 2312 | { |
@@ -2368,6 +2380,8 @@ const struct address_space_operations cifs_addr_ops = { | |||
2368 | .write_begin = cifs_write_begin, | 2380 | .write_begin = cifs_write_begin, |
2369 | .write_end = cifs_write_end, | 2381 | .write_end = cifs_write_end, |
2370 | .set_page_dirty = __set_page_dirty_nobuffers, | 2382 | .set_page_dirty = __set_page_dirty_nobuffers, |
2383 | .releasepage = cifs_release_page, | ||
2384 | .invalidatepage = cifs_invalidate_page, | ||
2371 | /* .sync_page = cifs_sync_page, */ | 2385 | /* .sync_page = cifs_sync_page, */ |
2372 | /* .direct_IO = */ | 2386 | /* .direct_IO = */ |
2373 | }; | 2387 | }; |
@@ -2384,6 +2398,8 @@ const struct address_space_operations cifs_addr_ops_smallbuf = { | |||
2384 | .write_begin = cifs_write_begin, | 2398 | .write_begin = cifs_write_begin, |
2385 | .write_end = cifs_write_end, | 2399 | .write_end = cifs_write_end, |
2386 | .set_page_dirty = __set_page_dirty_nobuffers, | 2400 | .set_page_dirty = __set_page_dirty_nobuffers, |
2401 | .releasepage = cifs_release_page, | ||
2402 | .invalidatepage = cifs_invalidate_page, | ||
2387 | /* .sync_page = cifs_sync_page, */ | 2403 | /* .sync_page = cifs_sync_page, */ |
2388 | /* .direct_IO = */ | 2404 | /* .direct_IO = */ |
2389 | }; | 2405 | }; |
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c new file mode 100644 index 000000000000..9f3f5c4be161 --- /dev/null +++ b/fs/cifs/fscache.c | |||
@@ -0,0 +1,236 @@ | |||
1 | /* | ||
2 | * fs/cifs/fscache.c - CIFS filesystem cache interface | ||
3 | * | ||
4 | * Copyright (c) 2010 Novell, Inc. | ||
5 | * Author(s): Suresh Jayaraman (sjayaraman@suse.de> | ||
6 | * | ||
7 | * This library is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU Lesser General Public License as published | ||
9 | * by the Free Software Foundation; either version 2.1 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This library is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
15 | * the GNU Lesser General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU Lesser General Public License | ||
18 | * along with this library; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | */ | ||
21 | #include "fscache.h" | ||
22 | #include "cifsglob.h" | ||
23 | #include "cifs_debug.h" | ||
24 | #include "cifs_fs_sb.h" | ||
25 | |||
26 | void cifs_fscache_get_client_cookie(struct TCP_Server_Info *server) | ||
27 | { | ||
28 | server->fscache = | ||
29 | fscache_acquire_cookie(cifs_fscache_netfs.primary_index, | ||
30 | &cifs_fscache_server_index_def, server); | ||
31 | cFYI(1, "CIFS: get client cookie (0x%p/0x%p)", server, | ||
32 | server->fscache); | ||
33 | } | ||
34 | |||
35 | void cifs_fscache_release_client_cookie(struct TCP_Server_Info *server) | ||
36 | { | ||
37 | cFYI(1, "CIFS: release client cookie (0x%p/0x%p)", server, | ||
38 | server->fscache); | ||
39 | fscache_relinquish_cookie(server->fscache, 0); | ||
40 | server->fscache = NULL; | ||
41 | } | ||
42 | |||
43 | void cifs_fscache_get_super_cookie(struct cifsTconInfo *tcon) | ||
44 | { | ||
45 | struct TCP_Server_Info *server = tcon->ses->server; | ||
46 | |||
47 | tcon->fscache = | ||
48 | fscache_acquire_cookie(server->fscache, | ||
49 | &cifs_fscache_super_index_def, tcon); | ||
50 | cFYI(1, "CIFS: get superblock cookie (0x%p/0x%p)", | ||
51 | server->fscache, tcon->fscache); | ||
52 | } | ||
53 | |||
54 | void cifs_fscache_release_super_cookie(struct cifsTconInfo *tcon) | ||
55 | { | ||
56 | cFYI(1, "CIFS: releasing superblock cookie (0x%p)", tcon->fscache); | ||
57 | fscache_relinquish_cookie(tcon->fscache, 0); | ||
58 | tcon->fscache = NULL; | ||
59 | } | ||
60 | |||
61 | static void cifs_fscache_enable_inode_cookie(struct inode *inode) | ||
62 | { | ||
63 | struct cifsInodeInfo *cifsi = CIFS_I(inode); | ||
64 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); | ||
65 | |||
66 | if (cifsi->fscache) | ||
67 | return; | ||
68 | |||
69 | cifsi->fscache = fscache_acquire_cookie(cifs_sb->tcon->fscache, | ||
70 | &cifs_fscache_inode_object_def, | ||
71 | cifsi); | ||
72 | cFYI(1, "CIFS: got FH cookie (0x%p/0x%p)", | ||
73 | cifs_sb->tcon->fscache, cifsi->fscache); | ||
74 | } | ||
75 | |||
76 | void cifs_fscache_release_inode_cookie(struct inode *inode) | ||
77 | { | ||
78 | struct cifsInodeInfo *cifsi = CIFS_I(inode); | ||
79 | |||
80 | if (cifsi->fscache) { | ||
81 | cFYI(1, "CIFS releasing inode cookie (0x%p)", | ||
82 | cifsi->fscache); | ||
83 | fscache_relinquish_cookie(cifsi->fscache, 0); | ||
84 | cifsi->fscache = NULL; | ||
85 | } | ||
86 | } | ||
87 | |||
88 | static void cifs_fscache_disable_inode_cookie(struct inode *inode) | ||
89 | { | ||
90 | struct cifsInodeInfo *cifsi = CIFS_I(inode); | ||
91 | |||
92 | if (cifsi->fscache) { | ||
93 | cFYI(1, "CIFS disabling inode cookie (0x%p)", | ||
94 | cifsi->fscache); | ||
95 | fscache_relinquish_cookie(cifsi->fscache, 1); | ||
96 | cifsi->fscache = NULL; | ||
97 | } | ||
98 | } | ||
99 | |||
100 | void cifs_fscache_set_inode_cookie(struct inode *inode, struct file *filp) | ||
101 | { | ||
102 | if ((filp->f_flags & O_ACCMODE) != O_RDONLY) | ||
103 | cifs_fscache_disable_inode_cookie(inode); | ||
104 | else { | ||
105 | cifs_fscache_enable_inode_cookie(inode); | ||
106 | cFYI(1, "CIFS: fscache inode cookie set"); | ||
107 | } | ||
108 | } | ||
109 | |||
110 | void cifs_fscache_reset_inode_cookie(struct inode *inode) | ||
111 | { | ||
112 | struct cifsInodeInfo *cifsi = CIFS_I(inode); | ||
113 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); | ||
114 | struct fscache_cookie *old = cifsi->fscache; | ||
115 | |||
116 | if (cifsi->fscache) { | ||
117 | /* retire the current fscache cache and get a new one */ | ||
118 | fscache_relinquish_cookie(cifsi->fscache, 1); | ||
119 | |||
120 | cifsi->fscache = fscache_acquire_cookie(cifs_sb->tcon->fscache, | ||
121 | &cifs_fscache_inode_object_def, | ||
122 | cifsi); | ||
123 | cFYI(1, "CIFS: new cookie 0x%p oldcookie 0x%p", | ||
124 | cifsi->fscache, old); | ||
125 | } | ||
126 | } | ||
127 | |||
128 | int cifs_fscache_release_page(struct page *page, gfp_t gfp) | ||
129 | { | ||
130 | if (PageFsCache(page)) { | ||
131 | struct inode *inode = page->mapping->host; | ||
132 | struct cifsInodeInfo *cifsi = CIFS_I(inode); | ||
133 | |||
134 | cFYI(1, "CIFS: fscache release page (0x%p/0x%p)", | ||
135 | page, cifsi->fscache); | ||
136 | if (!fscache_maybe_release_page(cifsi->fscache, page, gfp)) | ||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | return 1; | ||
141 | } | ||
142 | |||
143 | static void cifs_readpage_from_fscache_complete(struct page *page, void *ctx, | ||
144 | int error) | ||
145 | { | ||
146 | cFYI(1, "CFS: readpage_from_fscache_complete (0x%p/%d)", | ||
147 | page, error); | ||
148 | if (!error) | ||
149 | SetPageUptodate(page); | ||
150 | unlock_page(page); | ||
151 | } | ||
152 | |||
153 | /* | ||
154 | * Retrieve a page from FS-Cache | ||
155 | */ | ||
156 | int __cifs_readpage_from_fscache(struct inode *inode, struct page *page) | ||
157 | { | ||
158 | int ret; | ||
159 | |||
160 | cFYI(1, "CIFS: readpage_from_fscache(fsc:%p, p:%p, i:0x%p", | ||
161 | CIFS_I(inode)->fscache, page, inode); | ||
162 | ret = fscache_read_or_alloc_page(CIFS_I(inode)->fscache, page, | ||
163 | cifs_readpage_from_fscache_complete, | ||
164 | NULL, | ||
165 | GFP_KERNEL); | ||
166 | switch (ret) { | ||
167 | |||
168 | case 0: /* page found in fscache, read submitted */ | ||
169 | cFYI(1, "CIFS: readpage_from_fscache: submitted"); | ||
170 | return ret; | ||
171 | case -ENOBUFS: /* page won't be cached */ | ||
172 | case -ENODATA: /* page not in cache */ | ||
173 | cFYI(1, "CIFS: readpage_from_fscache %d", ret); | ||
174 | return 1; | ||
175 | |||
176 | default: | ||
177 | cERROR(1, "unknown error ret = %d", ret); | ||
178 | } | ||
179 | return ret; | ||
180 | } | ||
181 | |||
182 | /* | ||
183 | * Retrieve a set of pages from FS-Cache | ||
184 | */ | ||
185 | int __cifs_readpages_from_fscache(struct inode *inode, | ||
186 | struct address_space *mapping, | ||
187 | struct list_head *pages, | ||
188 | unsigned *nr_pages) | ||
189 | { | ||
190 | int ret; | ||
191 | |||
192 | cFYI(1, "CIFS: __cifs_readpages_from_fscache (0x%p/%u/0x%p)", | ||
193 | CIFS_I(inode)->fscache, *nr_pages, inode); | ||
194 | ret = fscache_read_or_alloc_pages(CIFS_I(inode)->fscache, mapping, | ||
195 | pages, nr_pages, | ||
196 | cifs_readpage_from_fscache_complete, | ||
197 | NULL, | ||
198 | mapping_gfp_mask(mapping)); | ||
199 | switch (ret) { | ||
200 | case 0: /* read submitted to the cache for all pages */ | ||
201 | cFYI(1, "CIFS: readpages_from_fscache: submitted"); | ||
202 | return ret; | ||
203 | |||
204 | case -ENOBUFS: /* some pages are not cached and can't be */ | ||
205 | case -ENODATA: /* some pages are not cached */ | ||
206 | cFYI(1, "CIFS: readpages_from_fscache: no page"); | ||
207 | return 1; | ||
208 | |||
209 | default: | ||
210 | cFYI(1, "unknown error ret = %d", ret); | ||
211 | } | ||
212 | |||
213 | return ret; | ||
214 | } | ||
215 | |||
216 | void __cifs_readpage_to_fscache(struct inode *inode, struct page *page) | ||
217 | { | ||
218 | int ret; | ||
219 | |||
220 | cFYI(1, "CIFS: readpage_to_fscache(fsc: %p, p: %p, i: %p", | ||
221 | CIFS_I(inode)->fscache, page, inode); | ||
222 | ret = fscache_write_page(CIFS_I(inode)->fscache, page, GFP_KERNEL); | ||
223 | if (ret != 0) | ||
224 | fscache_uncache_page(CIFS_I(inode)->fscache, page); | ||
225 | } | ||
226 | |||
227 | void __cifs_fscache_invalidate_page(struct page *page, struct inode *inode) | ||
228 | { | ||
229 | struct cifsInodeInfo *cifsi = CIFS_I(inode); | ||
230 | struct fscache_cookie *cookie = cifsi->fscache; | ||
231 | |||
232 | cFYI(1, "CIFS: fscache invalidatepage (0x%p/0x%p)", page, cookie); | ||
233 | fscache_wait_on_page_write(cookie, page); | ||
234 | fscache_uncache_page(cookie, page); | ||
235 | } | ||
236 | |||
diff --git a/fs/cifs/fscache.h b/fs/cifs/fscache.h new file mode 100644 index 000000000000..31b88ec2341e --- /dev/null +++ b/fs/cifs/fscache.h | |||
@@ -0,0 +1,136 @@ | |||
1 | /* | ||
2 | * fs/cifs/fscache.h - CIFS filesystem cache interface definitions | ||
3 | * | ||
4 | * Copyright (c) 2010 Novell, Inc. | ||
5 | * Authors(s): Suresh Jayaraman (sjayaraman@suse.de> | ||
6 | * | ||
7 | * This library is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU Lesser General Public License as published | ||
9 | * by the Free Software Foundation; either version 2.1 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This library is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
15 | * the GNU Lesser General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU Lesser General Public License | ||
18 | * along with this library; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | */ | ||
21 | #ifndef _CIFS_FSCACHE_H | ||
22 | #define _CIFS_FSCACHE_H | ||
23 | |||
24 | #include <linux/fscache.h> | ||
25 | |||
26 | #include "cifsglob.h" | ||
27 | |||
28 | #ifdef CONFIG_CIFS_FSCACHE | ||
29 | |||
30 | extern struct fscache_netfs cifs_fscache_netfs; | ||
31 | extern const struct fscache_cookie_def cifs_fscache_server_index_def; | ||
32 | extern const struct fscache_cookie_def cifs_fscache_super_index_def; | ||
33 | extern const struct fscache_cookie_def cifs_fscache_inode_object_def; | ||
34 | |||
35 | extern int cifs_fscache_register(void); | ||
36 | extern void cifs_fscache_unregister(void); | ||
37 | |||
38 | /* | ||
39 | * fscache.c | ||
40 | */ | ||
41 | extern void cifs_fscache_get_client_cookie(struct TCP_Server_Info *); | ||
42 | extern void cifs_fscache_release_client_cookie(struct TCP_Server_Info *); | ||
43 | extern void cifs_fscache_get_super_cookie(struct cifsTconInfo *); | ||
44 | extern void cifs_fscache_release_super_cookie(struct cifsTconInfo *); | ||
45 | |||
46 | extern void cifs_fscache_release_inode_cookie(struct inode *); | ||
47 | extern void cifs_fscache_set_inode_cookie(struct inode *, struct file *); | ||
48 | extern void cifs_fscache_reset_inode_cookie(struct inode *); | ||
49 | |||
50 | extern void __cifs_fscache_invalidate_page(struct page *, struct inode *); | ||
51 | extern int cifs_fscache_release_page(struct page *page, gfp_t gfp); | ||
52 | extern int __cifs_readpage_from_fscache(struct inode *, struct page *); | ||
53 | extern int __cifs_readpages_from_fscache(struct inode *, | ||
54 | struct address_space *, | ||
55 | struct list_head *, | ||
56 | unsigned *); | ||
57 | |||
58 | extern void __cifs_readpage_to_fscache(struct inode *, struct page *); | ||
59 | |||
60 | static inline void cifs_fscache_invalidate_page(struct page *page, | ||
61 | struct inode *inode) | ||
62 | { | ||
63 | if (PageFsCache(page)) | ||
64 | __cifs_fscache_invalidate_page(page, inode); | ||
65 | } | ||
66 | |||
67 | static inline int cifs_readpage_from_fscache(struct inode *inode, | ||
68 | struct page *page) | ||
69 | { | ||
70 | if (CIFS_I(inode)->fscache) | ||
71 | return __cifs_readpage_from_fscache(inode, page); | ||
72 | |||
73 | return -ENOBUFS; | ||
74 | } | ||
75 | |||
76 | static inline int cifs_readpages_from_fscache(struct inode *inode, | ||
77 | struct address_space *mapping, | ||
78 | struct list_head *pages, | ||
79 | unsigned *nr_pages) | ||
80 | { | ||
81 | if (CIFS_I(inode)->fscache) | ||
82 | return __cifs_readpages_from_fscache(inode, mapping, pages, | ||
83 | nr_pages); | ||
84 | return -ENOBUFS; | ||
85 | } | ||
86 | |||
87 | static inline void cifs_readpage_to_fscache(struct inode *inode, | ||
88 | struct page *page) | ||
89 | { | ||
90 | if (PageFsCache(page)) | ||
91 | __cifs_readpage_to_fscache(inode, page); | ||
92 | } | ||
93 | |||
94 | #else /* CONFIG_CIFS_FSCACHE */ | ||
95 | static inline int cifs_fscache_register(void) { return 0; } | ||
96 | static inline void cifs_fscache_unregister(void) {} | ||
97 | |||
98 | static inline void | ||
99 | cifs_fscache_get_client_cookie(struct TCP_Server_Info *server) {} | ||
100 | static inline void | ||
101 | cifs_fscache_release_client_cookie(struct TCP_Server_Info *server) {} | ||
102 | static inline void cifs_fscache_get_super_cookie(struct cifsTconInfo *tcon) {} | ||
103 | static inline void | ||
104 | cifs_fscache_release_super_cookie(struct cifsTconInfo *tcon) {} | ||
105 | |||
106 | static inline void cifs_fscache_release_inode_cookie(struct inode *inode) {} | ||
107 | static inline void cifs_fscache_set_inode_cookie(struct inode *inode, | ||
108 | struct file *filp) {} | ||
109 | static inline void cifs_fscache_reset_inode_cookie(struct inode *inode) {} | ||
110 | static inline int cifs_fscache_release_page(struct page *page, gfp_t gfp) | ||
111 | { | ||
112 | return 1; /* May release page */ | ||
113 | } | ||
114 | |||
115 | static inline void cifs_fscache_invalidate_page(struct page *page, | ||
116 | struct inode *inode) {} | ||
117 | static inline int | ||
118 | cifs_readpage_from_fscache(struct inode *inode, struct page *page) | ||
119 | { | ||
120 | return -ENOBUFS; | ||
121 | } | ||
122 | |||
123 | static inline int cifs_readpages_from_fscache(struct inode *inode, | ||
124 | struct address_space *mapping, | ||
125 | struct list_head *pages, | ||
126 | unsigned *nr_pages) | ||
127 | { | ||
128 | return -ENOBUFS; | ||
129 | } | ||
130 | |||
131 | static inline void cifs_readpage_to_fscache(struct inode *inode, | ||
132 | struct page *page) {} | ||
133 | |||
134 | #endif /* CONFIG_CIFS_FSCACHE */ | ||
135 | |||
136 | #endif /* _CIFS_FSCACHE_H */ | ||
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 62b324f26a56..a15b3a9bbff4 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include "cifsproto.h" | 29 | #include "cifsproto.h" |
30 | #include "cifs_debug.h" | 30 | #include "cifs_debug.h" |
31 | #include "cifs_fs_sb.h" | 31 | #include "cifs_fs_sb.h" |
32 | #include "fscache.h" | ||
32 | 33 | ||
33 | 34 | ||
34 | static void cifs_set_ops(struct inode *inode, const bool is_dfs_referral) | 35 | static void cifs_set_ops(struct inode *inode, const bool is_dfs_referral) |
@@ -288,7 +289,7 @@ int cifs_get_file_info_unix(struct file *filp) | |||
288 | struct inode *inode = filp->f_path.dentry->d_inode; | 289 | struct inode *inode = filp->f_path.dentry->d_inode; |
289 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); | 290 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); |
290 | struct cifsTconInfo *tcon = cifs_sb->tcon; | 291 | struct cifsTconInfo *tcon = cifs_sb->tcon; |
291 | struct cifsFileInfo *cfile = (struct cifsFileInfo *) filp->private_data; | 292 | struct cifsFileInfo *cfile = filp->private_data; |
292 | 293 | ||
293 | xid = GetXid(); | 294 | xid = GetXid(); |
294 | rc = CIFSSMBUnixQFileInfo(xid, tcon, cfile->netfid, &find_data); | 295 | rc = CIFSSMBUnixQFileInfo(xid, tcon, cfile->netfid, &find_data); |
@@ -515,7 +516,7 @@ int cifs_get_file_info(struct file *filp) | |||
515 | struct inode *inode = filp->f_path.dentry->d_inode; | 516 | struct inode *inode = filp->f_path.dentry->d_inode; |
516 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); | 517 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); |
517 | struct cifsTconInfo *tcon = cifs_sb->tcon; | 518 | struct cifsTconInfo *tcon = cifs_sb->tcon; |
518 | struct cifsFileInfo *cfile = (struct cifsFileInfo *) filp->private_data; | 519 | struct cifsFileInfo *cfile = filp->private_data; |
519 | 520 | ||
520 | xid = GetXid(); | 521 | xid = GetXid(); |
521 | rc = CIFSSMBQFileInfo(xid, tcon, cfile->netfid, &find_data); | 522 | rc = CIFSSMBQFileInfo(xid, tcon, cfile->netfid, &find_data); |
@@ -723,9 +724,14 @@ cifs_find_inode(struct inode *inode, void *opaque) | |||
723 | { | 724 | { |
724 | struct cifs_fattr *fattr = (struct cifs_fattr *) opaque; | 725 | struct cifs_fattr *fattr = (struct cifs_fattr *) opaque; |
725 | 726 | ||
727 | /* don't match inode with different uniqueid */ | ||
726 | if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid) | 728 | if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid) |
727 | return 0; | 729 | return 0; |
728 | 730 | ||
731 | /* don't match inode of different type */ | ||
732 | if ((inode->i_mode & S_IFMT) != (fattr->cf_mode & S_IFMT)) | ||
733 | return 0; | ||
734 | |||
729 | /* | 735 | /* |
730 | * uh oh -- it's a directory. We can't use it since hardlinked dirs are | 736 | * uh oh -- it's a directory. We can't use it since hardlinked dirs are |
731 | * verboten. Disable serverino and return it as if it were found, the | 737 | * verboten. Disable serverino and return it as if it were found, the |
@@ -776,6 +782,10 @@ retry_iget5_locked: | |||
776 | inode->i_flags |= S_NOATIME | S_NOCMTIME; | 782 | inode->i_flags |= S_NOATIME | S_NOCMTIME; |
777 | if (inode->i_state & I_NEW) { | 783 | if (inode->i_state & I_NEW) { |
778 | inode->i_ino = hash; | 784 | inode->i_ino = hash; |
785 | #ifdef CONFIG_CIFS_FSCACHE | ||
786 | /* initialize per-inode cache cookie pointer */ | ||
787 | CIFS_I(inode)->fscache = NULL; | ||
788 | #endif | ||
779 | unlock_new_inode(inode); | 789 | unlock_new_inode(inode); |
780 | } | 790 | } |
781 | } | 791 | } |
@@ -807,6 +817,11 @@ struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino) | |||
807 | if (!inode) | 817 | if (!inode) |
808 | return ERR_PTR(-ENOMEM); | 818 | return ERR_PTR(-ENOMEM); |
809 | 819 | ||
820 | #ifdef CONFIG_CIFS_FSCACHE | ||
821 | /* populate tcon->resource_id */ | ||
822 | cifs_sb->tcon->resource_id = CIFS_I(inode)->uniqueid; | ||
823 | #endif | ||
824 | |||
810 | if (rc && cifs_sb->tcon->ipc) { | 825 | if (rc && cifs_sb->tcon->ipc) { |
811 | cFYI(1, "ipc connection - fake read inode"); | 826 | cFYI(1, "ipc connection - fake read inode"); |
812 | inode->i_mode |= S_IFDIR; | 827 | inode->i_mode |= S_IFDIR; |
@@ -1401,6 +1416,10 @@ cifs_do_rename(int xid, struct dentry *from_dentry, const char *fromPath, | |||
1401 | if (rc == 0 || rc != -ETXTBSY) | 1416 | if (rc == 0 || rc != -ETXTBSY) |
1402 | return rc; | 1417 | return rc; |
1403 | 1418 | ||
1419 | /* open-file renames don't work across directories */ | ||
1420 | if (to_dentry->d_parent != from_dentry->d_parent) | ||
1421 | return rc; | ||
1422 | |||
1404 | /* open the file to be renamed -- we need DELETE perms */ | 1423 | /* open the file to be renamed -- we need DELETE perms */ |
1405 | rc = CIFSSMBOpen(xid, pTcon, fromPath, FILE_OPEN, DELETE, | 1424 | rc = CIFSSMBOpen(xid, pTcon, fromPath, FILE_OPEN, DELETE, |
1406 | CREATE_NOT_DIR, &srcfid, &oplock, NULL, | 1425 | CREATE_NOT_DIR, &srcfid, &oplock, NULL, |
@@ -1564,6 +1583,7 @@ cifs_invalidate_mapping(struct inode *inode) | |||
1564 | cifs_i->write_behind_rc = rc; | 1583 | cifs_i->write_behind_rc = rc; |
1565 | } | 1584 | } |
1566 | invalidate_remote_inode(inode); | 1585 | invalidate_remote_inode(inode); |
1586 | cifs_fscache_reset_inode_cookie(inode); | ||
1567 | } | 1587 | } |
1568 | 1588 | ||
1569 | int cifs_revalidate_file(struct file *filp) | 1589 | int cifs_revalidate_file(struct file *filp) |
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c index 505926f1ee6b..9d38a71c8e14 100644 --- a/fs/cifs/ioctl.c +++ b/fs/cifs/ioctl.c | |||
@@ -41,8 +41,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) | |||
41 | __u64 ExtAttrMask = 0; | 41 | __u64 ExtAttrMask = 0; |
42 | __u64 caps; | 42 | __u64 caps; |
43 | struct cifsTconInfo *tcon; | 43 | struct cifsTconInfo *tcon; |
44 | struct cifsFileInfo *pSMBFile = | 44 | struct cifsFileInfo *pSMBFile = filep->private_data; |
45 | (struct cifsFileInfo *)filep->private_data; | ||
46 | #endif /* CONFIG_CIFS_POSIX */ | 45 | #endif /* CONFIG_CIFS_POSIX */ |
47 | 46 | ||
48 | xid = GetXid(); | 47 | xid = GetXid(); |
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c index d35d52889cb5..c6721ee26dbc 100644 --- a/fs/cifs/netmisc.c +++ b/fs/cifs/netmisc.c | |||
@@ -61,6 +61,7 @@ static const struct smb_to_posix_error mapping_table_ERRDOS[] = { | |||
61 | {ERRremcd, -EACCES}, | 61 | {ERRremcd, -EACCES}, |
62 | {ERRdiffdevice, -EXDEV}, | 62 | {ERRdiffdevice, -EXDEV}, |
63 | {ERRnofiles, -ENOENT}, | 63 | {ERRnofiles, -ENOENT}, |
64 | {ERRwriteprot, -EROFS}, | ||
64 | {ERRbadshare, -ETXTBSY}, | 65 | {ERRbadshare, -ETXTBSY}, |
65 | {ERRlock, -EACCES}, | 66 | {ERRlock, -EACCES}, |
66 | {ERRunsup, -EINVAL}, | 67 | {ERRunsup, -EINVAL}, |
@@ -164,7 +165,7 @@ cifs_inet_pton(const int address_family, const char *cp, void *dst) | |||
164 | * Returns 0 on failure. | 165 | * Returns 0 on failure. |
165 | */ | 166 | */ |
166 | int | 167 | int |
167 | cifs_convert_address(char *src, void *dst) | 168 | cifs_convert_address(struct sockaddr *dst, char *src) |
168 | { | 169 | { |
169 | int rc; | 170 | int rc; |
170 | char *pct, *endp; | 171 | char *pct, *endp; |
@@ -201,6 +202,27 @@ cifs_convert_address(char *src, void *dst) | |||
201 | return rc; | 202 | return rc; |
202 | } | 203 | } |
203 | 204 | ||
205 | int | ||
206 | cifs_fill_sockaddr(struct sockaddr *dst, char *src, | ||
207 | const unsigned short int port) | ||
208 | { | ||
209 | if (!cifs_convert_address(dst, src)) | ||
210 | return 0; | ||
211 | |||
212 | switch (dst->sa_family) { | ||
213 | case AF_INET: | ||
214 | ((struct sockaddr_in *)dst)->sin_port = htons(port); | ||
215 | break; | ||
216 | case AF_INET6: | ||
217 | ((struct sockaddr_in6 *)dst)->sin6_port = htons(port); | ||
218 | break; | ||
219 | default: | ||
220 | return 0; | ||
221 | } | ||
222 | |||
223 | return 1; | ||
224 | } | ||
225 | |||
204 | /***************************************************************************** | 226 | /***************************************************************************** |
205 | convert a NT status code to a dos class/code | 227 | convert a NT status code to a dos class/code |
206 | *****************************************************************************/ | 228 | *****************************************************************************/ |
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index daf1753af674..d5e591fab475 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c | |||
@@ -847,6 +847,11 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir) | |||
847 | end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + max_len; | 847 | end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + max_len; |
848 | 848 | ||
849 | tmp_buf = kmalloc(UNICODE_NAME_MAX, GFP_KERNEL); | 849 | tmp_buf = kmalloc(UNICODE_NAME_MAX, GFP_KERNEL); |
850 | if (tmp_buf == NULL) { | ||
851 | rc = -ENOMEM; | ||
852 | break; | ||
853 | } | ||
854 | |||
850 | for (i = 0; (i < num_to_fill) && (rc == 0); i++) { | 855 | for (i = 0; (i < num_to_fill) && (rc == 0); i++) { |
851 | if (current_entry == NULL) { | 856 | if (current_entry == NULL) { |
852 | /* evaluate whether this case is an error */ | 857 | /* evaluate whether this case is an error */ |
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 7707389bdf2c..0a57cb7db5dd 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c | |||
@@ -730,15 +730,7 @@ ssetup_ntlmssp_authenticate: | |||
730 | 730 | ||
731 | /* calculate session key */ | 731 | /* calculate session key */ |
732 | setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp); | 732 | setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp); |
733 | if (first_time) /* should this be moved into common code | 733 | /* FIXME: calculate MAC key */ |
734 | with similar ntlmv2 path? */ | ||
735 | /* cifs_calculate_ntlmv2_mac_key(ses->server->mac_signing_key, | ||
736 | response BB FIXME, v2_sess_key); */ | ||
737 | |||
738 | /* copy session key */ | ||
739 | |||
740 | /* memcpy(bcc_ptr, (char *)ntlm_session_key,LM2_SESS_KEY_SIZE); | ||
741 | bcc_ptr += LM2_SESS_KEY_SIZE; */ | ||
742 | memcpy(bcc_ptr, (char *)v2_sess_key, | 734 | memcpy(bcc_ptr, (char *)v2_sess_key, |
743 | sizeof(struct ntlmv2_resp)); | 735 | sizeof(struct ntlmv2_resp)); |
744 | bcc_ptr += sizeof(struct ntlmv2_resp); | 736 | bcc_ptr += sizeof(struct ntlmv2_resp); |
diff --git a/fs/cifs/smberr.h b/fs/cifs/smberr.h index c5084d27db7c..7f16cb825fe5 100644 --- a/fs/cifs/smberr.h +++ b/fs/cifs/smberr.h | |||
@@ -76,6 +76,7 @@ | |||
76 | #define ERRnofiles 18 /* A File Search command can find no | 76 | #define ERRnofiles 18 /* A File Search command can find no |
77 | more files matching the specified | 77 | more files matching the specified |
78 | criteria. */ | 78 | criteria. */ |
79 | #define ERRwriteprot 19 /* media is write protected */ | ||
79 | #define ERRgeneral 31 | 80 | #define ERRgeneral 31 |
80 | #define ERRbadshare 32 /* The sharing mode specified for an | 81 | #define ERRbadshare 32 /* The sharing mode specified for an |
81 | Open conflicts with existing FIDs on | 82 | Open conflicts with existing FIDs on |
diff --git a/fs/dcache.c b/fs/dcache.c index d96047b4a633..86d4db15473e 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -590,6 +590,8 @@ static void prune_dcache(int count) | |||
590 | up_read(&sb->s_umount); | 590 | up_read(&sb->s_umount); |
591 | } | 591 | } |
592 | spin_lock(&sb_lock); | 592 | spin_lock(&sb_lock); |
593 | /* lock was dropped, must reset next */ | ||
594 | list_safe_reset_next(sb, n, s_list); | ||
593 | count -= pruned; | 595 | count -= pruned; |
594 | __put_super(sb); | 596 | __put_super(sb); |
595 | /* more work left to do? */ | 597 | /* more work left to do? */ |
@@ -894,7 +896,7 @@ EXPORT_SYMBOL(shrink_dcache_parent); | |||
894 | * | 896 | * |
895 | * In this case we return -1 to tell the caller that we baled. | 897 | * In this case we return -1 to tell the caller that we baled. |
896 | */ | 898 | */ |
897 | static int shrink_dcache_memory(int nr, gfp_t gfp_mask) | 899 | static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) |
898 | { | 900 | { |
899 | if (nr) { | 901 | if (nr) { |
900 | if (!(gfp_mask & __GFP_FS)) | 902 | if (!(gfp_mask & __GFP_FS)) |
diff --git a/fs/direct-io.c b/fs/direct-io.c index 7600aacf531d..a10cb91cadea 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
@@ -218,7 +218,7 @@ static struct page *dio_get_page(struct dio *dio) | |||
218 | * filesystems can use it to hold additional state between get_block calls and | 218 | * filesystems can use it to hold additional state between get_block calls and |
219 | * dio_complete. | 219 | * dio_complete. |
220 | */ | 220 | */ |
221 | static int dio_complete(struct dio *dio, loff_t offset, int ret) | 221 | static int dio_complete(struct dio *dio, loff_t offset, int ret, bool is_async) |
222 | { | 222 | { |
223 | ssize_t transferred = 0; | 223 | ssize_t transferred = 0; |
224 | 224 | ||
@@ -239,14 +239,6 @@ static int dio_complete(struct dio *dio, loff_t offset, int ret) | |||
239 | transferred = dio->i_size - offset; | 239 | transferred = dio->i_size - offset; |
240 | } | 240 | } |
241 | 241 | ||
242 | if (dio->end_io && dio->result) | ||
243 | dio->end_io(dio->iocb, offset, transferred, | ||
244 | dio->map_bh.b_private); | ||
245 | |||
246 | if (dio->flags & DIO_LOCKING) | ||
247 | /* lockdep: non-owner release */ | ||
248 | up_read_non_owner(&dio->inode->i_alloc_sem); | ||
249 | |||
250 | if (ret == 0) | 242 | if (ret == 0) |
251 | ret = dio->page_errors; | 243 | ret = dio->page_errors; |
252 | if (ret == 0) | 244 | if (ret == 0) |
@@ -254,6 +246,17 @@ static int dio_complete(struct dio *dio, loff_t offset, int ret) | |||
254 | if (ret == 0) | 246 | if (ret == 0) |
255 | ret = transferred; | 247 | ret = transferred; |
256 | 248 | ||
249 | if (dio->end_io && dio->result) { | ||
250 | dio->end_io(dio->iocb, offset, transferred, | ||
251 | dio->map_bh.b_private, ret, is_async); | ||
252 | } else if (is_async) { | ||
253 | aio_complete(dio->iocb, ret, 0); | ||
254 | } | ||
255 | |||
256 | if (dio->flags & DIO_LOCKING) | ||
257 | /* lockdep: non-owner release */ | ||
258 | up_read_non_owner(&dio->inode->i_alloc_sem); | ||
259 | |||
257 | return ret; | 260 | return ret; |
258 | } | 261 | } |
259 | 262 | ||
@@ -277,8 +280,7 @@ static void dio_bio_end_aio(struct bio *bio, int error) | |||
277 | spin_unlock_irqrestore(&dio->bio_lock, flags); | 280 | spin_unlock_irqrestore(&dio->bio_lock, flags); |
278 | 281 | ||
279 | if (remaining == 0) { | 282 | if (remaining == 0) { |
280 | int ret = dio_complete(dio, dio->iocb->ki_pos, 0); | 283 | dio_complete(dio, dio->iocb->ki_pos, 0, true); |
281 | aio_complete(dio->iocb, ret, 0); | ||
282 | kfree(dio); | 284 | kfree(dio); |
283 | } | 285 | } |
284 | } | 286 | } |
@@ -1126,7 +1128,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, | |||
1126 | spin_unlock_irqrestore(&dio->bio_lock, flags); | 1128 | spin_unlock_irqrestore(&dio->bio_lock, flags); |
1127 | 1129 | ||
1128 | if (ret2 == 0) { | 1130 | if (ret2 == 0) { |
1129 | ret = dio_complete(dio, offset, ret); | 1131 | ret = dio_complete(dio, offset, ret, false); |
1130 | kfree(dio); | 1132 | kfree(dio); |
1131 | } else | 1133 | } else |
1132 | BUG_ON(ret != -EIOCBQUEUED); | 1134 | BUG_ON(ret != -EIOCBQUEUED); |
diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c index 2d8dbce9d485..46c4dd8dfcc3 100644 --- a/fs/ecryptfs/messaging.c +++ b/fs/ecryptfs/messaging.c | |||
@@ -31,9 +31,9 @@ static struct mutex ecryptfs_msg_ctx_lists_mux; | |||
31 | 31 | ||
32 | static struct hlist_head *ecryptfs_daemon_hash; | 32 | static struct hlist_head *ecryptfs_daemon_hash; |
33 | struct mutex ecryptfs_daemon_hash_mux; | 33 | struct mutex ecryptfs_daemon_hash_mux; |
34 | static int ecryptfs_hash_buckets; | 34 | static int ecryptfs_hash_bits; |
35 | #define ecryptfs_uid_hash(uid) \ | 35 | #define ecryptfs_uid_hash(uid) \ |
36 | hash_long((unsigned long)uid, ecryptfs_hash_buckets) | 36 | hash_long((unsigned long)uid, ecryptfs_hash_bits) |
37 | 37 | ||
38 | static u32 ecryptfs_msg_counter; | 38 | static u32 ecryptfs_msg_counter; |
39 | static struct ecryptfs_msg_ctx *ecryptfs_msg_ctx_arr; | 39 | static struct ecryptfs_msg_ctx *ecryptfs_msg_ctx_arr; |
@@ -486,18 +486,19 @@ int ecryptfs_init_messaging(void) | |||
486 | } | 486 | } |
487 | mutex_init(&ecryptfs_daemon_hash_mux); | 487 | mutex_init(&ecryptfs_daemon_hash_mux); |
488 | mutex_lock(&ecryptfs_daemon_hash_mux); | 488 | mutex_lock(&ecryptfs_daemon_hash_mux); |
489 | ecryptfs_hash_buckets = 1; | 489 | ecryptfs_hash_bits = 1; |
490 | while (ecryptfs_number_of_users >> ecryptfs_hash_buckets) | 490 | while (ecryptfs_number_of_users >> ecryptfs_hash_bits) |
491 | ecryptfs_hash_buckets++; | 491 | ecryptfs_hash_bits++; |
492 | ecryptfs_daemon_hash = kmalloc((sizeof(struct hlist_head) | 492 | ecryptfs_daemon_hash = kmalloc((sizeof(struct hlist_head) |
493 | * ecryptfs_hash_buckets), GFP_KERNEL); | 493 | * (1 << ecryptfs_hash_bits)), |
494 | GFP_KERNEL); | ||
494 | if (!ecryptfs_daemon_hash) { | 495 | if (!ecryptfs_daemon_hash) { |
495 | rc = -ENOMEM; | 496 | rc = -ENOMEM; |
496 | printk(KERN_ERR "%s: Failed to allocate memory\n", __func__); | 497 | printk(KERN_ERR "%s: Failed to allocate memory\n", __func__); |
497 | mutex_unlock(&ecryptfs_daemon_hash_mux); | 498 | mutex_unlock(&ecryptfs_daemon_hash_mux); |
498 | goto out; | 499 | goto out; |
499 | } | 500 | } |
500 | for (i = 0; i < ecryptfs_hash_buckets; i++) | 501 | for (i = 0; i < (1 << ecryptfs_hash_bits); i++) |
501 | INIT_HLIST_HEAD(&ecryptfs_daemon_hash[i]); | 502 | INIT_HLIST_HEAD(&ecryptfs_daemon_hash[i]); |
502 | mutex_unlock(&ecryptfs_daemon_hash_mux); | 503 | mutex_unlock(&ecryptfs_daemon_hash_mux); |
503 | ecryptfs_msg_ctx_arr = kmalloc((sizeof(struct ecryptfs_msg_ctx) | 504 | ecryptfs_msg_ctx_arr = kmalloc((sizeof(struct ecryptfs_msg_ctx) |
@@ -554,7 +555,7 @@ void ecryptfs_release_messaging(void) | |||
554 | int i; | 555 | int i; |
555 | 556 | ||
556 | mutex_lock(&ecryptfs_daemon_hash_mux); | 557 | mutex_lock(&ecryptfs_daemon_hash_mux); |
557 | for (i = 0; i < ecryptfs_hash_buckets; i++) { | 558 | for (i = 0; i < (1 << ecryptfs_hash_bits); i++) { |
558 | int rc; | 559 | int rc; |
559 | 560 | ||
560 | hlist_for_each_entry(daemon, elem, | 561 | hlist_for_each_entry(daemon, elem, |
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c index ca7e2a0ed98a..2bcc0431bada 100644 --- a/fs/ext2/acl.c +++ b/fs/ext2/acl.c | |||
@@ -200,6 +200,7 @@ ext2_set_acl(struct inode *inode, int type, struct posix_acl *acl) | |||
200 | return error; | 200 | return error; |
201 | else { | 201 | else { |
202 | inode->i_mode = mode; | 202 | inode->i_mode = mode; |
203 | inode->i_ctime = CURRENT_TIME_SEC; | ||
203 | mark_inode_dirty(inode); | 204 | mark_inode_dirty(inode); |
204 | if (error == 0) | 205 | if (error == 0) |
205 | acl = NULL; | 206 | acl = NULL; |
diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c index 01552abbca3c..8a11fe212183 100644 --- a/fs/ext3/acl.c +++ b/fs/ext3/acl.c | |||
@@ -205,6 +205,7 @@ ext3_set_acl(handle_t *handle, struct inode *inode, int type, | |||
205 | return error; | 205 | return error; |
206 | else { | 206 | else { |
207 | inode->i_mode = mode; | 207 | inode->i_mode = mode; |
208 | inode->i_ctime = CURRENT_TIME_SEC; | ||
208 | ext3_mark_inode_dirty(handle, inode); | 209 | ext3_mark_inode_dirty(handle, inode); |
209 | if (error == 0) | 210 | if (error == 0) |
210 | acl = NULL; | 211 | acl = NULL; |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 42272d67955a..0afc8c1d8cf3 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -3775,7 +3775,8 @@ static ext4_io_end_t *ext4_init_io_end (struct inode *inode, gfp_t flags) | |||
3775 | } | 3775 | } |
3776 | 3776 | ||
3777 | static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, | 3777 | static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, |
3778 | ssize_t size, void *private) | 3778 | ssize_t size, void *private, int ret, |
3779 | bool is_async) | ||
3779 | { | 3780 | { |
3780 | ext4_io_end_t *io_end = iocb->private; | 3781 | ext4_io_end_t *io_end = iocb->private; |
3781 | struct workqueue_struct *wq; | 3782 | struct workqueue_struct *wq; |
@@ -3784,7 +3785,7 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, | |||
3784 | 3785 | ||
3785 | /* if not async direct IO or dio with 0 bytes write, just return */ | 3786 | /* if not async direct IO or dio with 0 bytes write, just return */ |
3786 | if (!io_end || !size) | 3787 | if (!io_end || !size) |
3787 | return; | 3788 | goto out; |
3788 | 3789 | ||
3789 | ext_debug("ext4_end_io_dio(): io_end 0x%p" | 3790 | ext_debug("ext4_end_io_dio(): io_end 0x%p" |
3790 | "for inode %lu, iocb 0x%p, offset %llu, size %llu\n", | 3791 | "for inode %lu, iocb 0x%p, offset %llu, size %llu\n", |
@@ -3795,7 +3796,7 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, | |||
3795 | if (io_end->flag != EXT4_IO_UNWRITTEN){ | 3796 | if (io_end->flag != EXT4_IO_UNWRITTEN){ |
3796 | ext4_free_io_end(io_end); | 3797 | ext4_free_io_end(io_end); |
3797 | iocb->private = NULL; | 3798 | iocb->private = NULL; |
3798 | return; | 3799 | goto out; |
3799 | } | 3800 | } |
3800 | 3801 | ||
3801 | io_end->offset = offset; | 3802 | io_end->offset = offset; |
@@ -3812,6 +3813,9 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, | |||
3812 | list_add_tail(&io_end->list, &ei->i_completed_io_list); | 3813 | list_add_tail(&io_end->list, &ei->i_completed_io_list); |
3813 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); | 3814 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); |
3814 | iocb->private = NULL; | 3815 | iocb->private = NULL; |
3816 | out: | ||
3817 | if (is_async) | ||
3818 | aio_complete(iocb, ret, 0); | ||
3815 | } | 3819 | } |
3816 | 3820 | ||
3817 | static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate) | 3821 | static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate) |
diff --git a/fs/fcntl.c b/fs/fcntl.c index 51e11bf5708f..9d175d623aab 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c | |||
@@ -733,12 +733,14 @@ static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band) | |||
733 | { | 733 | { |
734 | while (fa) { | 734 | while (fa) { |
735 | struct fown_struct *fown; | 735 | struct fown_struct *fown; |
736 | unsigned long flags; | ||
737 | |||
736 | if (fa->magic != FASYNC_MAGIC) { | 738 | if (fa->magic != FASYNC_MAGIC) { |
737 | printk(KERN_ERR "kill_fasync: bad magic number in " | 739 | printk(KERN_ERR "kill_fasync: bad magic number in " |
738 | "fasync_struct!\n"); | 740 | "fasync_struct!\n"); |
739 | return; | 741 | return; |
740 | } | 742 | } |
741 | spin_lock(&fa->fa_lock); | 743 | spin_lock_irqsave(&fa->fa_lock, flags); |
742 | if (fa->fa_file) { | 744 | if (fa->fa_file) { |
743 | fown = &fa->fa_file->f_owner; | 745 | fown = &fa->fa_file->f_owner; |
744 | /* Don't send SIGURG to processes which have not set a | 746 | /* Don't send SIGURG to processes which have not set a |
@@ -747,7 +749,7 @@ static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band) | |||
747 | if (!(sig == SIGURG && fown->signum == 0)) | 749 | if (!(sig == SIGURG && fown->signum == 0)) |
748 | send_sigio(fown, fa->fa_fd, band); | 750 | send_sigio(fown, fa->fa_fd, band); |
749 | } | 751 | } |
750 | spin_unlock(&fa->fa_lock); | 752 | spin_unlock_irqrestore(&fa->fa_lock, flags); |
751 | fa = rcu_dereference(fa->fa_next); | 753 | fa = rcu_dereference(fa->fa_next); |
752 | } | 754 | } |
753 | } | 755 | } |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 1d1088f48bc2..d5be1693ac93 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -38,51 +38,18 @@ int nr_pdflush_threads; | |||
38 | /* | 38 | /* |
39 | * Passed into wb_writeback(), essentially a subset of writeback_control | 39 | * Passed into wb_writeback(), essentially a subset of writeback_control |
40 | */ | 40 | */ |
41 | struct wb_writeback_args { | 41 | struct wb_writeback_work { |
42 | long nr_pages; | 42 | long nr_pages; |
43 | struct super_block *sb; | 43 | struct super_block *sb; |
44 | enum writeback_sync_modes sync_mode; | 44 | enum writeback_sync_modes sync_mode; |
45 | unsigned int for_kupdate:1; | 45 | unsigned int for_kupdate:1; |
46 | unsigned int range_cyclic:1; | 46 | unsigned int range_cyclic:1; |
47 | unsigned int for_background:1; | 47 | unsigned int for_background:1; |
48 | }; | ||
49 | 48 | ||
50 | /* | ||
51 | * Work items for the bdi_writeback threads | ||
52 | */ | ||
53 | struct bdi_work { | ||
54 | struct list_head list; /* pending work list */ | 49 | struct list_head list; /* pending work list */ |
55 | struct rcu_head rcu_head; /* for RCU free/clear of work */ | 50 | struct completion *done; /* set if the caller waits */ |
56 | |||
57 | unsigned long seen; /* threads that have seen this work */ | ||
58 | atomic_t pending; /* number of threads still to do work */ | ||
59 | |||
60 | struct wb_writeback_args args; /* writeback arguments */ | ||
61 | |||
62 | unsigned long state; /* flag bits, see WS_* */ | ||
63 | }; | 51 | }; |
64 | 52 | ||
65 | enum { | ||
66 | WS_USED_B = 0, | ||
67 | WS_ONSTACK_B, | ||
68 | }; | ||
69 | |||
70 | #define WS_USED (1 << WS_USED_B) | ||
71 | #define WS_ONSTACK (1 << WS_ONSTACK_B) | ||
72 | |||
73 | static inline bool bdi_work_on_stack(struct bdi_work *work) | ||
74 | { | ||
75 | return test_bit(WS_ONSTACK_B, &work->state); | ||
76 | } | ||
77 | |||
78 | static inline void bdi_work_init(struct bdi_work *work, | ||
79 | struct wb_writeback_args *args) | ||
80 | { | ||
81 | INIT_RCU_HEAD(&work->rcu_head); | ||
82 | work->args = *args; | ||
83 | work->state = WS_USED; | ||
84 | } | ||
85 | |||
86 | /** | 53 | /** |
87 | * writeback_in_progress - determine whether there is writeback in progress | 54 | * writeback_in_progress - determine whether there is writeback in progress |
88 | * @bdi: the device's backing_dev_info structure. | 55 | * @bdi: the device's backing_dev_info structure. |
@@ -95,76 +62,11 @@ int writeback_in_progress(struct backing_dev_info *bdi) | |||
95 | return !list_empty(&bdi->work_list); | 62 | return !list_empty(&bdi->work_list); |
96 | } | 63 | } |
97 | 64 | ||
98 | static void bdi_work_clear(struct bdi_work *work) | 65 | static void bdi_queue_work(struct backing_dev_info *bdi, |
99 | { | 66 | struct wb_writeback_work *work) |
100 | clear_bit(WS_USED_B, &work->state); | ||
101 | smp_mb__after_clear_bit(); | ||
102 | /* | ||
103 | * work can have disappeared at this point. bit waitq functions | ||
104 | * should be able to tolerate this, provided bdi_sched_wait does | ||
105 | * not dereference it's pointer argument. | ||
106 | */ | ||
107 | wake_up_bit(&work->state, WS_USED_B); | ||
108 | } | ||
109 | |||
110 | static void bdi_work_free(struct rcu_head *head) | ||
111 | { | ||
112 | struct bdi_work *work = container_of(head, struct bdi_work, rcu_head); | ||
113 | |||
114 | if (!bdi_work_on_stack(work)) | ||
115 | kfree(work); | ||
116 | else | ||
117 | bdi_work_clear(work); | ||
118 | } | ||
119 | |||
120 | static void wb_work_complete(struct bdi_work *work) | ||
121 | { | ||
122 | const enum writeback_sync_modes sync_mode = work->args.sync_mode; | ||
123 | int onstack = bdi_work_on_stack(work); | ||
124 | |||
125 | /* | ||
126 | * For allocated work, we can clear the done/seen bit right here. | ||
127 | * For on-stack work, we need to postpone both the clear and free | ||
128 | * to after the RCU grace period, since the stack could be invalidated | ||
129 | * as soon as bdi_work_clear() has done the wakeup. | ||
130 | */ | ||
131 | if (!onstack) | ||
132 | bdi_work_clear(work); | ||
133 | if (sync_mode == WB_SYNC_NONE || onstack) | ||
134 | call_rcu(&work->rcu_head, bdi_work_free); | ||
135 | } | ||
136 | |||
137 | static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work) | ||
138 | { | ||
139 | /* | ||
140 | * The caller has retrieved the work arguments from this work, | ||
141 | * drop our reference. If this is the last ref, delete and free it | ||
142 | */ | ||
143 | if (atomic_dec_and_test(&work->pending)) { | ||
144 | struct backing_dev_info *bdi = wb->bdi; | ||
145 | |||
146 | spin_lock(&bdi->wb_lock); | ||
147 | list_del_rcu(&work->list); | ||
148 | spin_unlock(&bdi->wb_lock); | ||
149 | |||
150 | wb_work_complete(work); | ||
151 | } | ||
152 | } | ||
153 | |||
154 | static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work) | ||
155 | { | 67 | { |
156 | work->seen = bdi->wb_mask; | ||
157 | BUG_ON(!work->seen); | ||
158 | atomic_set(&work->pending, bdi->wb_cnt); | ||
159 | BUG_ON(!bdi->wb_cnt); | ||
160 | |||
161 | /* | ||
162 | * list_add_tail_rcu() contains the necessary barriers to | ||
163 | * make sure the above stores are seen before the item is | ||
164 | * noticed on the list | ||
165 | */ | ||
166 | spin_lock(&bdi->wb_lock); | 68 | spin_lock(&bdi->wb_lock); |
167 | list_add_tail_rcu(&work->list, &bdi->work_list); | 69 | list_add_tail(&work->list, &bdi->work_list); |
168 | spin_unlock(&bdi->wb_lock); | 70 | spin_unlock(&bdi->wb_lock); |
169 | 71 | ||
170 | /* | 72 | /* |
@@ -181,97 +83,59 @@ static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work) | |||
181 | } | 83 | } |
182 | } | 84 | } |
183 | 85 | ||
184 | /* | 86 | static void |
185 | * Used for on-stack allocated work items. The caller needs to wait until | 87 | __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, |
186 | * the wb threads have acked the work before it's safe to continue. | 88 | bool range_cyclic, bool for_background) |
187 | */ | ||
188 | static void bdi_wait_on_work_clear(struct bdi_work *work) | ||
189 | { | ||
190 | wait_on_bit(&work->state, WS_USED_B, bdi_sched_wait, | ||
191 | TASK_UNINTERRUPTIBLE); | ||
192 | } | ||
193 | |||
194 | static void bdi_alloc_queue_work(struct backing_dev_info *bdi, | ||
195 | struct wb_writeback_args *args) | ||
196 | { | 89 | { |
197 | struct bdi_work *work; | 90 | struct wb_writeback_work *work; |
198 | 91 | ||
199 | /* | 92 | /* |
200 | * This is WB_SYNC_NONE writeback, so if allocation fails just | 93 | * This is WB_SYNC_NONE writeback, so if allocation fails just |
201 | * wakeup the thread for old dirty data writeback | 94 | * wakeup the thread for old dirty data writeback |
202 | */ | 95 | */ |
203 | work = kmalloc(sizeof(*work), GFP_ATOMIC); | 96 | work = kzalloc(sizeof(*work), GFP_ATOMIC); |
204 | if (work) { | 97 | if (!work) { |
205 | bdi_work_init(work, args); | 98 | if (bdi->wb.task) |
206 | bdi_queue_work(bdi, work); | 99 | wake_up_process(bdi->wb.task); |
207 | } else { | 100 | return; |
208 | struct bdi_writeback *wb = &bdi->wb; | ||
209 | |||
210 | if (wb->task) | ||
211 | wake_up_process(wb->task); | ||
212 | } | 101 | } |
102 | |||
103 | work->sync_mode = WB_SYNC_NONE; | ||
104 | work->nr_pages = nr_pages; | ||
105 | work->range_cyclic = range_cyclic; | ||
106 | work->for_background = for_background; | ||
107 | |||
108 | bdi_queue_work(bdi, work); | ||
213 | } | 109 | } |
214 | 110 | ||
215 | /** | 111 | /** |
216 | * bdi_sync_writeback - start and wait for writeback | 112 | * bdi_start_writeback - start writeback |
217 | * @bdi: the backing device to write from | 113 | * @bdi: the backing device to write from |
218 | * @sb: write inodes from this super_block | 114 | * @nr_pages: the number of pages to write |
219 | * | 115 | * |
220 | * Description: | 116 | * Description: |
221 | * This does WB_SYNC_ALL data integrity writeback and waits for the | 117 | * This does WB_SYNC_NONE opportunistic writeback. The IO is only |
222 | * IO to complete. Callers must hold the sb s_umount semaphore for | 118 | * started when this function returns, we make no guarentees on |
223 | * reading, to avoid having the super disappear before we are done. | 119 | * completion. Caller need not hold sb s_umount semaphore. |
120 | * | ||
224 | */ | 121 | */ |
225 | static void bdi_sync_writeback(struct backing_dev_info *bdi, | 122 | void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages) |
226 | struct super_block *sb) | ||
227 | { | 123 | { |
228 | struct wb_writeback_args args = { | 124 | __bdi_start_writeback(bdi, nr_pages, true, false); |
229 | .sb = sb, | ||
230 | .sync_mode = WB_SYNC_ALL, | ||
231 | .nr_pages = LONG_MAX, | ||
232 | .range_cyclic = 0, | ||
233 | }; | ||
234 | struct bdi_work work; | ||
235 | |||
236 | bdi_work_init(&work, &args); | ||
237 | work.state |= WS_ONSTACK; | ||
238 | |||
239 | bdi_queue_work(bdi, &work); | ||
240 | bdi_wait_on_work_clear(&work); | ||
241 | } | 125 | } |
242 | 126 | ||
243 | /** | 127 | /** |
244 | * bdi_start_writeback - start writeback | 128 | * bdi_start_background_writeback - start background writeback |
245 | * @bdi: the backing device to write from | 129 | * @bdi: the backing device to write from |
246 | * @sb: write inodes from this super_block | ||
247 | * @nr_pages: the number of pages to write | ||
248 | * | 130 | * |
249 | * Description: | 131 | * Description: |
250 | * This does WB_SYNC_NONE opportunistic writeback. The IO is only | 132 | * This does WB_SYNC_NONE background writeback. The IO is only |
251 | * started when this function returns, we make no guarentees on | 133 | * started when this function returns, we make no guarentees on |
252 | * completion. Caller need not hold sb s_umount semaphore. | 134 | * completion. Caller need not hold sb s_umount semaphore. |
253 | * | ||
254 | */ | 135 | */ |
255 | void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb, | 136 | void bdi_start_background_writeback(struct backing_dev_info *bdi) |
256 | long nr_pages) | ||
257 | { | 137 | { |
258 | struct wb_writeback_args args = { | 138 | __bdi_start_writeback(bdi, LONG_MAX, true, true); |
259 | .sb = sb, | ||
260 | .sync_mode = WB_SYNC_NONE, | ||
261 | .nr_pages = nr_pages, | ||
262 | .range_cyclic = 1, | ||
263 | }; | ||
264 | |||
265 | /* | ||
266 | * We treat @nr_pages=0 as the special case to do background writeback, | ||
267 | * ie. to sync pages until the background dirty threshold is reached. | ||
268 | */ | ||
269 | if (!nr_pages) { | ||
270 | args.nr_pages = LONG_MAX; | ||
271 | args.for_background = 1; | ||
272 | } | ||
273 | |||
274 | bdi_alloc_queue_work(bdi, &args); | ||
275 | } | 139 | } |
276 | 140 | ||
277 | /* | 141 | /* |
@@ -561,75 +425,69 @@ select_queue: | |||
561 | return ret; | 425 | return ret; |
562 | } | 426 | } |
563 | 427 | ||
564 | static void unpin_sb_for_writeback(struct super_block *sb) | ||
565 | { | ||
566 | up_read(&sb->s_umount); | ||
567 | put_super(sb); | ||
568 | } | ||
569 | |||
570 | enum sb_pin_state { | ||
571 | SB_PINNED, | ||
572 | SB_NOT_PINNED, | ||
573 | SB_PIN_FAILED | ||
574 | }; | ||
575 | |||
576 | /* | 428 | /* |
577 | * For WB_SYNC_NONE writeback, the caller does not have the sb pinned | 429 | * For background writeback the caller does not have the sb pinned |
578 | * before calling writeback. So make sure that we do pin it, so it doesn't | 430 | * before calling writeback. So make sure that we do pin it, so it doesn't |
579 | * go away while we are writing inodes from it. | 431 | * go away while we are writing inodes from it. |
580 | */ | 432 | */ |
581 | static enum sb_pin_state pin_sb_for_writeback(struct writeback_control *wbc, | 433 | static bool pin_sb_for_writeback(struct super_block *sb) |
582 | struct super_block *sb) | ||
583 | { | 434 | { |
584 | /* | ||
585 | * Caller must already hold the ref for this | ||
586 | */ | ||
587 | if (wbc->sync_mode == WB_SYNC_ALL) { | ||
588 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); | ||
589 | return SB_NOT_PINNED; | ||
590 | } | ||
591 | spin_lock(&sb_lock); | 435 | spin_lock(&sb_lock); |
436 | if (list_empty(&sb->s_instances)) { | ||
437 | spin_unlock(&sb_lock); | ||
438 | return false; | ||
439 | } | ||
440 | |||
592 | sb->s_count++; | 441 | sb->s_count++; |
442 | spin_unlock(&sb_lock); | ||
443 | |||
593 | if (down_read_trylock(&sb->s_umount)) { | 444 | if (down_read_trylock(&sb->s_umount)) { |
594 | if (sb->s_root) { | 445 | if (sb->s_root) |
595 | spin_unlock(&sb_lock); | 446 | return true; |
596 | return SB_PINNED; | ||
597 | } | ||
598 | /* | ||
599 | * umounted, drop rwsem again and fall through to failure | ||
600 | */ | ||
601 | up_read(&sb->s_umount); | 447 | up_read(&sb->s_umount); |
602 | } | 448 | } |
603 | sb->s_count--; | 449 | |
604 | spin_unlock(&sb_lock); | 450 | put_super(sb); |
605 | return SB_PIN_FAILED; | 451 | return false; |
606 | } | 452 | } |
607 | 453 | ||
608 | /* | 454 | /* |
609 | * Write a portion of b_io inodes which belong to @sb. | 455 | * Write a portion of b_io inodes which belong to @sb. |
610 | * If @wbc->sb != NULL, then find and write all such | 456 | * |
457 | * If @only_this_sb is true, then find and write all such | ||
611 | * inodes. Otherwise write only ones which go sequentially | 458 | * inodes. Otherwise write only ones which go sequentially |
612 | * in reverse order. | 459 | * in reverse order. |
460 | * | ||
613 | * Return 1, if the caller writeback routine should be | 461 | * Return 1, if the caller writeback routine should be |
614 | * interrupted. Otherwise return 0. | 462 | * interrupted. Otherwise return 0. |
615 | */ | 463 | */ |
616 | static int writeback_sb_inodes(struct super_block *sb, | 464 | static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb, |
617 | struct bdi_writeback *wb, | 465 | struct writeback_control *wbc, bool only_this_sb) |
618 | struct writeback_control *wbc) | ||
619 | { | 466 | { |
620 | while (!list_empty(&wb->b_io)) { | 467 | while (!list_empty(&wb->b_io)) { |
621 | long pages_skipped; | 468 | long pages_skipped; |
622 | struct inode *inode = list_entry(wb->b_io.prev, | 469 | struct inode *inode = list_entry(wb->b_io.prev, |
623 | struct inode, i_list); | 470 | struct inode, i_list); |
624 | if (wbc->sb && sb != inode->i_sb) { | 471 | |
625 | /* super block given and doesn't | 472 | if (inode->i_sb != sb) { |
626 | match, skip this inode */ | 473 | if (only_this_sb) { |
627 | redirty_tail(inode); | 474 | /* |
628 | continue; | 475 | * We only want to write back data for this |
629 | } | 476 | * superblock, move all inodes not belonging |
630 | if (sb != inode->i_sb) | 477 | * to it back onto the dirty list. |
631 | /* finish with this superblock */ | 478 | */ |
479 | redirty_tail(inode); | ||
480 | continue; | ||
481 | } | ||
482 | |||
483 | /* | ||
484 | * The inode belongs to a different superblock. | ||
485 | * Bounce back to the caller to unpin this and | ||
486 | * pin the next superblock. | ||
487 | */ | ||
632 | return 0; | 488 | return 0; |
489 | } | ||
490 | |||
633 | if (inode->i_state & (I_NEW | I_WILL_FREE)) { | 491 | if (inode->i_state & (I_NEW | I_WILL_FREE)) { |
634 | requeue_io(inode); | 492 | requeue_io(inode); |
635 | continue; | 493 | continue; |
@@ -667,8 +525,8 @@ static int writeback_sb_inodes(struct super_block *sb, | |||
667 | return 1; | 525 | return 1; |
668 | } | 526 | } |
669 | 527 | ||
670 | static void writeback_inodes_wb(struct bdi_writeback *wb, | 528 | void writeback_inodes_wb(struct bdi_writeback *wb, |
671 | struct writeback_control *wbc) | 529 | struct writeback_control *wbc) |
672 | { | 530 | { |
673 | int ret = 0; | 531 | int ret = 0; |
674 | 532 | ||
@@ -681,24 +539,14 @@ static void writeback_inodes_wb(struct bdi_writeback *wb, | |||
681 | struct inode *inode = list_entry(wb->b_io.prev, | 539 | struct inode *inode = list_entry(wb->b_io.prev, |
682 | struct inode, i_list); | 540 | struct inode, i_list); |
683 | struct super_block *sb = inode->i_sb; | 541 | struct super_block *sb = inode->i_sb; |
684 | enum sb_pin_state state; | ||
685 | 542 | ||
686 | if (wbc->sb && sb != wbc->sb) { | 543 | if (!pin_sb_for_writeback(sb)) { |
687 | /* super block given and doesn't | ||
688 | match, skip this inode */ | ||
689 | redirty_tail(inode); | ||
690 | continue; | ||
691 | } | ||
692 | state = pin_sb_for_writeback(wbc, sb); | ||
693 | |||
694 | if (state == SB_PIN_FAILED) { | ||
695 | requeue_io(inode); | 544 | requeue_io(inode); |
696 | continue; | 545 | continue; |
697 | } | 546 | } |
698 | ret = writeback_sb_inodes(sb, wb, wbc); | 547 | ret = writeback_sb_inodes(sb, wb, wbc, false); |
548 | drop_super(sb); | ||
699 | 549 | ||
700 | if (state == SB_PINNED) | ||
701 | unpin_sb_for_writeback(sb); | ||
702 | if (ret) | 550 | if (ret) |
703 | break; | 551 | break; |
704 | } | 552 | } |
@@ -706,11 +554,17 @@ static void writeback_inodes_wb(struct bdi_writeback *wb, | |||
706 | /* Leave any unwritten inodes on b_io */ | 554 | /* Leave any unwritten inodes on b_io */ |
707 | } | 555 | } |
708 | 556 | ||
709 | void writeback_inodes_wbc(struct writeback_control *wbc) | 557 | static void __writeback_inodes_sb(struct super_block *sb, |
558 | struct bdi_writeback *wb, struct writeback_control *wbc) | ||
710 | { | 559 | { |
711 | struct backing_dev_info *bdi = wbc->bdi; | 560 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); |
712 | 561 | ||
713 | writeback_inodes_wb(&bdi->wb, wbc); | 562 | wbc->wb_start = jiffies; /* livelock avoidance */ |
563 | spin_lock(&inode_lock); | ||
564 | if (!wbc->for_kupdate || list_empty(&wb->b_io)) | ||
565 | queue_io(wb, wbc->older_than_this); | ||
566 | writeback_sb_inodes(sb, wb, wbc, true); | ||
567 | spin_unlock(&inode_lock); | ||
714 | } | 568 | } |
715 | 569 | ||
716 | /* | 570 | /* |
@@ -748,16 +602,14 @@ static inline bool over_bground_thresh(void) | |||
748 | * all dirty pages if they are all attached to "old" mappings. | 602 | * all dirty pages if they are all attached to "old" mappings. |
749 | */ | 603 | */ |
750 | static long wb_writeback(struct bdi_writeback *wb, | 604 | static long wb_writeback(struct bdi_writeback *wb, |
751 | struct wb_writeback_args *args) | 605 | struct wb_writeback_work *work) |
752 | { | 606 | { |
753 | struct writeback_control wbc = { | 607 | struct writeback_control wbc = { |
754 | .bdi = wb->bdi, | 608 | .sync_mode = work->sync_mode, |
755 | .sb = args->sb, | ||
756 | .sync_mode = args->sync_mode, | ||
757 | .older_than_this = NULL, | 609 | .older_than_this = NULL, |
758 | .for_kupdate = args->for_kupdate, | 610 | .for_kupdate = work->for_kupdate, |
759 | .for_background = args->for_background, | 611 | .for_background = work->for_background, |
760 | .range_cyclic = args->range_cyclic, | 612 | .range_cyclic = work->range_cyclic, |
761 | }; | 613 | }; |
762 | unsigned long oldest_jif; | 614 | unsigned long oldest_jif; |
763 | long wrote = 0; | 615 | long wrote = 0; |
@@ -777,21 +629,24 @@ static long wb_writeback(struct bdi_writeback *wb, | |||
777 | /* | 629 | /* |
778 | * Stop writeback when nr_pages has been consumed | 630 | * Stop writeback when nr_pages has been consumed |
779 | */ | 631 | */ |
780 | if (args->nr_pages <= 0) | 632 | if (work->nr_pages <= 0) |
781 | break; | 633 | break; |
782 | 634 | ||
783 | /* | 635 | /* |
784 | * For background writeout, stop when we are below the | 636 | * For background writeout, stop when we are below the |
785 | * background dirty threshold | 637 | * background dirty threshold |
786 | */ | 638 | */ |
787 | if (args->for_background && !over_bground_thresh()) | 639 | if (work->for_background && !over_bground_thresh()) |
788 | break; | 640 | break; |
789 | 641 | ||
790 | wbc.more_io = 0; | 642 | wbc.more_io = 0; |
791 | wbc.nr_to_write = MAX_WRITEBACK_PAGES; | 643 | wbc.nr_to_write = MAX_WRITEBACK_PAGES; |
792 | wbc.pages_skipped = 0; | 644 | wbc.pages_skipped = 0; |
793 | writeback_inodes_wb(wb, &wbc); | 645 | if (work->sb) |
794 | args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; | 646 | __writeback_inodes_sb(work->sb, wb, &wbc); |
647 | else | ||
648 | writeback_inodes_wb(wb, &wbc); | ||
649 | work->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; | ||
795 | wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write; | 650 | wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write; |
796 | 651 | ||
797 | /* | 652 | /* |
@@ -827,31 +682,21 @@ static long wb_writeback(struct bdi_writeback *wb, | |||
827 | } | 682 | } |
828 | 683 | ||
829 | /* | 684 | /* |
830 | * Return the next bdi_work struct that hasn't been processed by this | 685 | * Return the next wb_writeback_work struct that hasn't been processed yet. |
831 | * wb thread yet. ->seen is initially set for each thread that exists | ||
832 | * for this device, when a thread first notices a piece of work it | ||
833 | * clears its bit. Depending on writeback type, the thread will notify | ||
834 | * completion on either receiving the work (WB_SYNC_NONE) or after | ||
835 | * it is done (WB_SYNC_ALL). | ||
836 | */ | 686 | */ |
837 | static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi, | 687 | static struct wb_writeback_work * |
838 | struct bdi_writeback *wb) | 688 | get_next_work_item(struct backing_dev_info *bdi, struct bdi_writeback *wb) |
839 | { | 689 | { |
840 | struct bdi_work *work, *ret = NULL; | 690 | struct wb_writeback_work *work = NULL; |
841 | 691 | ||
842 | rcu_read_lock(); | 692 | spin_lock(&bdi->wb_lock); |
843 | 693 | if (!list_empty(&bdi->work_list)) { | |
844 | list_for_each_entry_rcu(work, &bdi->work_list, list) { | 694 | work = list_entry(bdi->work_list.next, |
845 | if (!test_bit(wb->nr, &work->seen)) | 695 | struct wb_writeback_work, list); |
846 | continue; | 696 | list_del_init(&work->list); |
847 | clear_bit(wb->nr, &work->seen); | ||
848 | |||
849 | ret = work; | ||
850 | break; | ||
851 | } | 697 | } |
852 | 698 | spin_unlock(&bdi->wb_lock); | |
853 | rcu_read_unlock(); | 699 | return work; |
854 | return ret; | ||
855 | } | 700 | } |
856 | 701 | ||
857 | static long wb_check_old_data_flush(struct bdi_writeback *wb) | 702 | static long wb_check_old_data_flush(struct bdi_writeback *wb) |
@@ -876,14 +721,14 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb) | |||
876 | (inodes_stat.nr_inodes - inodes_stat.nr_unused); | 721 | (inodes_stat.nr_inodes - inodes_stat.nr_unused); |
877 | 722 | ||
878 | if (nr_pages) { | 723 | if (nr_pages) { |
879 | struct wb_writeback_args args = { | 724 | struct wb_writeback_work work = { |
880 | .nr_pages = nr_pages, | 725 | .nr_pages = nr_pages, |
881 | .sync_mode = WB_SYNC_NONE, | 726 | .sync_mode = WB_SYNC_NONE, |
882 | .for_kupdate = 1, | 727 | .for_kupdate = 1, |
883 | .range_cyclic = 1, | 728 | .range_cyclic = 1, |
884 | }; | 729 | }; |
885 | 730 | ||
886 | return wb_writeback(wb, &args); | 731 | return wb_writeback(wb, &work); |
887 | } | 732 | } |
888 | 733 | ||
889 | return 0; | 734 | return 0; |
@@ -895,33 +740,27 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb) | |||
895 | long wb_do_writeback(struct bdi_writeback *wb, int force_wait) | 740 | long wb_do_writeback(struct bdi_writeback *wb, int force_wait) |
896 | { | 741 | { |
897 | struct backing_dev_info *bdi = wb->bdi; | 742 | struct backing_dev_info *bdi = wb->bdi; |
898 | struct bdi_work *work; | 743 | struct wb_writeback_work *work; |
899 | long wrote = 0; | 744 | long wrote = 0; |
900 | 745 | ||
901 | while ((work = get_next_work_item(bdi, wb)) != NULL) { | 746 | while ((work = get_next_work_item(bdi, wb)) != NULL) { |
902 | struct wb_writeback_args args = work->args; | ||
903 | |||
904 | /* | 747 | /* |
905 | * Override sync mode, in case we must wait for completion | 748 | * Override sync mode, in case we must wait for completion |
749 | * because this thread is exiting now. | ||
906 | */ | 750 | */ |
907 | if (force_wait) | 751 | if (force_wait) |
908 | work->args.sync_mode = args.sync_mode = WB_SYNC_ALL; | 752 | work->sync_mode = WB_SYNC_ALL; |
909 | |||
910 | /* | ||
911 | * If this isn't a data integrity operation, just notify | ||
912 | * that we have seen this work and we are now starting it. | ||
913 | */ | ||
914 | if (args.sync_mode == WB_SYNC_NONE) | ||
915 | wb_clear_pending(wb, work); | ||
916 | 753 | ||
917 | wrote += wb_writeback(wb, &args); | 754 | wrote += wb_writeback(wb, work); |
918 | 755 | ||
919 | /* | 756 | /* |
920 | * This is a data integrity writeback, so only do the | 757 | * Notify the caller of completion if this is a synchronous |
921 | * notification when we have completed the work. | 758 | * work item, otherwise just free it. |
922 | */ | 759 | */ |
923 | if (args.sync_mode == WB_SYNC_ALL) | 760 | if (work->done) |
924 | wb_clear_pending(wb, work); | 761 | complete(work->done); |
762 | else | ||
763 | kfree(work); | ||
925 | } | 764 | } |
926 | 765 | ||
927 | /* | 766 | /* |
@@ -978,42 +817,27 @@ int bdi_writeback_task(struct bdi_writeback *wb) | |||
978 | } | 817 | } |
979 | 818 | ||
980 | /* | 819 | /* |
981 | * Schedule writeback for all backing devices. This does WB_SYNC_NONE | 820 | * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back |
982 | * writeback, for integrity writeback see bdi_sync_writeback(). | 821 | * the whole world. |
983 | */ | 822 | */ |
984 | static void bdi_writeback_all(struct super_block *sb, long nr_pages) | 823 | void wakeup_flusher_threads(long nr_pages) |
985 | { | 824 | { |
986 | struct wb_writeback_args args = { | ||
987 | .sb = sb, | ||
988 | .nr_pages = nr_pages, | ||
989 | .sync_mode = WB_SYNC_NONE, | ||
990 | }; | ||
991 | struct backing_dev_info *bdi; | 825 | struct backing_dev_info *bdi; |
992 | 826 | ||
993 | rcu_read_lock(); | 827 | if (!nr_pages) { |
828 | nr_pages = global_page_state(NR_FILE_DIRTY) + | ||
829 | global_page_state(NR_UNSTABLE_NFS); | ||
830 | } | ||
994 | 831 | ||
832 | rcu_read_lock(); | ||
995 | list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { | 833 | list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { |
996 | if (!bdi_has_dirty_io(bdi)) | 834 | if (!bdi_has_dirty_io(bdi)) |
997 | continue; | 835 | continue; |
998 | 836 | __bdi_start_writeback(bdi, nr_pages, false, false); | |
999 | bdi_alloc_queue_work(bdi, &args); | ||
1000 | } | 837 | } |
1001 | |||
1002 | rcu_read_unlock(); | 838 | rcu_read_unlock(); |
1003 | } | 839 | } |
1004 | 840 | ||
1005 | /* | ||
1006 | * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back | ||
1007 | * the whole world. | ||
1008 | */ | ||
1009 | void wakeup_flusher_threads(long nr_pages) | ||
1010 | { | ||
1011 | if (nr_pages == 0) | ||
1012 | nr_pages = global_page_state(NR_FILE_DIRTY) + | ||
1013 | global_page_state(NR_UNSTABLE_NFS); | ||
1014 | bdi_writeback_all(NULL, nr_pages); | ||
1015 | } | ||
1016 | |||
1017 | static noinline void block_dump___mark_inode_dirty(struct inode *inode) | 841 | static noinline void block_dump___mark_inode_dirty(struct inode *inode) |
1018 | { | 842 | { |
1019 | if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { | 843 | if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { |
@@ -1218,12 +1042,20 @@ void writeback_inodes_sb(struct super_block *sb) | |||
1218 | { | 1042 | { |
1219 | unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY); | 1043 | unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY); |
1220 | unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS); | 1044 | unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS); |
1221 | long nr_to_write; | 1045 | DECLARE_COMPLETION_ONSTACK(done); |
1046 | struct wb_writeback_work work = { | ||
1047 | .sb = sb, | ||
1048 | .sync_mode = WB_SYNC_NONE, | ||
1049 | .done = &done, | ||
1050 | }; | ||
1051 | |||
1052 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); | ||
1222 | 1053 | ||
1223 | nr_to_write = nr_dirty + nr_unstable + | 1054 | work.nr_pages = nr_dirty + nr_unstable + |
1224 | (inodes_stat.nr_inodes - inodes_stat.nr_unused); | 1055 | (inodes_stat.nr_inodes - inodes_stat.nr_unused); |
1225 | 1056 | ||
1226 | bdi_start_writeback(sb->s_bdi, sb, nr_to_write); | 1057 | bdi_queue_work(sb->s_bdi, &work); |
1058 | wait_for_completion(&done); | ||
1227 | } | 1059 | } |
1228 | EXPORT_SYMBOL(writeback_inodes_sb); | 1060 | EXPORT_SYMBOL(writeback_inodes_sb); |
1229 | 1061 | ||
@@ -1237,7 +1069,9 @@ EXPORT_SYMBOL(writeback_inodes_sb); | |||
1237 | int writeback_inodes_sb_if_idle(struct super_block *sb) | 1069 | int writeback_inodes_sb_if_idle(struct super_block *sb) |
1238 | { | 1070 | { |
1239 | if (!writeback_in_progress(sb->s_bdi)) { | 1071 | if (!writeback_in_progress(sb->s_bdi)) { |
1072 | down_read(&sb->s_umount); | ||
1240 | writeback_inodes_sb(sb); | 1073 | writeback_inodes_sb(sb); |
1074 | up_read(&sb->s_umount); | ||
1241 | return 1; | 1075 | return 1; |
1242 | } else | 1076 | } else |
1243 | return 0; | 1077 | return 0; |
@@ -1253,7 +1087,20 @@ EXPORT_SYMBOL(writeback_inodes_sb_if_idle); | |||
1253 | */ | 1087 | */ |
1254 | void sync_inodes_sb(struct super_block *sb) | 1088 | void sync_inodes_sb(struct super_block *sb) |
1255 | { | 1089 | { |
1256 | bdi_sync_writeback(sb->s_bdi, sb); | 1090 | DECLARE_COMPLETION_ONSTACK(done); |
1091 | struct wb_writeback_work work = { | ||
1092 | .sb = sb, | ||
1093 | .sync_mode = WB_SYNC_ALL, | ||
1094 | .nr_pages = LONG_MAX, | ||
1095 | .range_cyclic = 0, | ||
1096 | .done = &done, | ||
1097 | }; | ||
1098 | |||
1099 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); | ||
1100 | |||
1101 | bdi_queue_work(sb->s_bdi, &work); | ||
1102 | wait_for_completion(&done); | ||
1103 | |||
1257 | wait_sb_inodes(sb); | 1104 | wait_sb_inodes(sb); |
1258 | } | 1105 | } |
1259 | EXPORT_SYMBOL(sync_inodes_sb); | 1106 | EXPORT_SYMBOL(sync_inodes_sb); |
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 9f8b52500d63..5e96cbd8a454 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c | |||
@@ -136,10 +136,7 @@ static int gfs2_writeback_writepage(struct page *page, | |||
136 | if (ret <= 0) | 136 | if (ret <= 0) |
137 | return ret; | 137 | return ret; |
138 | 138 | ||
139 | ret = mpage_writepage(page, gfs2_get_block_noalloc, wbc); | 139 | return nobh_writepage(page, gfs2_get_block_noalloc, wbc); |
140 | if (ret == -EAGAIN) | ||
141 | ret = block_write_full_page(page, gfs2_get_block_noalloc, wbc); | ||
142 | return ret; | ||
143 | } | 140 | } |
144 | 141 | ||
145 | /** | 142 | /** |
@@ -637,9 +634,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping, | |||
637 | } | 634 | } |
638 | } | 635 | } |
639 | 636 | ||
640 | error = gfs2_write_alloc_required(ip, pos, len, &alloc_required); | 637 | alloc_required = gfs2_write_alloc_required(ip, pos, len); |
641 | if (error) | ||
642 | goto out_unlock; | ||
643 | 638 | ||
644 | if (alloc_required || gfs2_is_jdata(ip)) | 639 | if (alloc_required || gfs2_is_jdata(ip)) |
645 | gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks); | 640 | gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks); |
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 4a48c0f4b402..6f482809d1a3 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c | |||
@@ -1040,7 +1040,8 @@ static int trunc_start(struct gfs2_inode *ip, u64 size) | |||
1040 | goto out; | 1040 | goto out; |
1041 | 1041 | ||
1042 | if (gfs2_is_stuffed(ip)) { | 1042 | if (gfs2_is_stuffed(ip)) { |
1043 | u64 dsize = size + sizeof(struct gfs2_inode); | 1043 | u64 dsize = size + sizeof(struct gfs2_dinode); |
1044 | ip->i_disksize = size; | ||
1044 | ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; | 1045 | ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; |
1045 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); | 1046 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); |
1046 | gfs2_dinode_out(ip, dibh->b_data); | 1047 | gfs2_dinode_out(ip, dibh->b_data); |
@@ -1243,13 +1244,12 @@ int gfs2_file_dealloc(struct gfs2_inode *ip) | |||
1243 | * @ip: the file being written to | 1244 | * @ip: the file being written to |
1244 | * @offset: the offset to write to | 1245 | * @offset: the offset to write to |
1245 | * @len: the number of bytes being written | 1246 | * @len: the number of bytes being written |
1246 | * @alloc_required: set to 1 if an alloc is required, 0 otherwise | ||
1247 | * | 1247 | * |
1248 | * Returns: errno | 1248 | * Returns: 1 if an alloc is required, 0 otherwise |
1249 | */ | 1249 | */ |
1250 | 1250 | ||
1251 | int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, | 1251 | int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, |
1252 | unsigned int len, int *alloc_required) | 1252 | unsigned int len) |
1253 | { | 1253 | { |
1254 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 1254 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
1255 | struct buffer_head bh; | 1255 | struct buffer_head bh; |
@@ -1257,26 +1257,23 @@ int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, | |||
1257 | u64 lblock, lblock_stop, size; | 1257 | u64 lblock, lblock_stop, size; |
1258 | u64 end_of_file; | 1258 | u64 end_of_file; |
1259 | 1259 | ||
1260 | *alloc_required = 0; | ||
1261 | |||
1262 | if (!len) | 1260 | if (!len) |
1263 | return 0; | 1261 | return 0; |
1264 | 1262 | ||
1265 | if (gfs2_is_stuffed(ip)) { | 1263 | if (gfs2_is_stuffed(ip)) { |
1266 | if (offset + len > | 1264 | if (offset + len > |
1267 | sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) | 1265 | sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) |
1268 | *alloc_required = 1; | 1266 | return 1; |
1269 | return 0; | 1267 | return 0; |
1270 | } | 1268 | } |
1271 | 1269 | ||
1272 | *alloc_required = 1; | ||
1273 | shift = sdp->sd_sb.sb_bsize_shift; | 1270 | shift = sdp->sd_sb.sb_bsize_shift; |
1274 | BUG_ON(gfs2_is_dir(ip)); | 1271 | BUG_ON(gfs2_is_dir(ip)); |
1275 | end_of_file = (ip->i_disksize + sdp->sd_sb.sb_bsize - 1) >> shift; | 1272 | end_of_file = (ip->i_disksize + sdp->sd_sb.sb_bsize - 1) >> shift; |
1276 | lblock = offset >> shift; | 1273 | lblock = offset >> shift; |
1277 | lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift; | 1274 | lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift; |
1278 | if (lblock_stop > end_of_file) | 1275 | if (lblock_stop > end_of_file) |
1279 | return 0; | 1276 | return 1; |
1280 | 1277 | ||
1281 | size = (lblock_stop - lblock) << shift; | 1278 | size = (lblock_stop - lblock) << shift; |
1282 | do { | 1279 | do { |
@@ -1284,12 +1281,11 @@ int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, | |||
1284 | bh.b_size = size; | 1281 | bh.b_size = size; |
1285 | gfs2_block_map(&ip->i_inode, lblock, &bh, 0); | 1282 | gfs2_block_map(&ip->i_inode, lblock, &bh, 0); |
1286 | if (!buffer_mapped(&bh)) | 1283 | if (!buffer_mapped(&bh)) |
1287 | return 0; | 1284 | return 1; |
1288 | size -= bh.b_size; | 1285 | size -= bh.b_size; |
1289 | lblock += (bh.b_size >> ip->i_inode.i_blkbits); | 1286 | lblock += (bh.b_size >> ip->i_inode.i_blkbits); |
1290 | } while(size > 0); | 1287 | } while(size > 0); |
1291 | 1288 | ||
1292 | *alloc_required = 0; | ||
1293 | return 0; | 1289 | return 0; |
1294 | } | 1290 | } |
1295 | 1291 | ||
diff --git a/fs/gfs2/bmap.h b/fs/gfs2/bmap.h index c983177e05ac..a20a5213135a 100644 --- a/fs/gfs2/bmap.h +++ b/fs/gfs2/bmap.h | |||
@@ -52,6 +52,6 @@ int gfs2_truncatei(struct gfs2_inode *ip, u64 size); | |||
52 | int gfs2_truncatei_resume(struct gfs2_inode *ip); | 52 | int gfs2_truncatei_resume(struct gfs2_inode *ip); |
53 | int gfs2_file_dealloc(struct gfs2_inode *ip); | 53 | int gfs2_file_dealloc(struct gfs2_inode *ip); |
54 | int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, | 54 | int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, |
55 | unsigned int len, int *alloc_required); | 55 | unsigned int len); |
56 | 56 | ||
57 | #endif /* __BMAP_DOT_H__ */ | 57 | #endif /* __BMAP_DOT_H__ */ |
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c index 8295c5b5d4a9..b9dd88a78dd4 100644 --- a/fs/gfs2/dir.c +++ b/fs/gfs2/dir.c | |||
@@ -392,7 +392,7 @@ static int gfs2_dirent_find_space(const struct gfs2_dirent *dent, | |||
392 | unsigned totlen = be16_to_cpu(dent->de_rec_len); | 392 | unsigned totlen = be16_to_cpu(dent->de_rec_len); |
393 | 393 | ||
394 | if (gfs2_dirent_sentinel(dent)) | 394 | if (gfs2_dirent_sentinel(dent)) |
395 | actual = GFS2_DIRENT_SIZE(0); | 395 | actual = 0; |
396 | if (totlen - actual >= required) | 396 | if (totlen - actual >= required) |
397 | return 1; | 397 | return 1; |
398 | return 0; | 398 | return 0; |
@@ -955,7 +955,12 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name) | |||
955 | /* Change the pointers. | 955 | /* Change the pointers. |
956 | Don't bother distinguishing stuffed from non-stuffed. | 956 | Don't bother distinguishing stuffed from non-stuffed. |
957 | This code is complicated enough already. */ | 957 | This code is complicated enough already. */ |
958 | lp = kmalloc(half_len * sizeof(__be64), GFP_NOFS | __GFP_NOFAIL); | 958 | lp = kmalloc(half_len * sizeof(__be64), GFP_NOFS); |
959 | if (!lp) { | ||
960 | error = -ENOMEM; | ||
961 | goto fail_brelse; | ||
962 | } | ||
963 | |||
959 | /* Change the pointers */ | 964 | /* Change the pointers */ |
960 | for (x = 0; x < half_len; x++) | 965 | for (x = 0; x < half_len; x++) |
961 | lp[x] = cpu_to_be64(bn); | 966 | lp[x] = cpu_to_be64(bn); |
@@ -1063,7 +1068,9 @@ static int dir_double_exhash(struct gfs2_inode *dip) | |||
1063 | 1068 | ||
1064 | /* Allocate both the "from" and "to" buffers in one big chunk */ | 1069 | /* Allocate both the "from" and "to" buffers in one big chunk */ |
1065 | 1070 | ||
1066 | buf = kcalloc(3, sdp->sd_hash_bsize, GFP_NOFS | __GFP_NOFAIL); | 1071 | buf = kcalloc(3, sdp->sd_hash_bsize, GFP_NOFS); |
1072 | if (!buf) | ||
1073 | return -ENOMEM; | ||
1067 | 1074 | ||
1068 | for (block = dip->i_disksize >> sdp->sd_hash_bsize_shift; block--;) { | 1075 | for (block = dip->i_disksize >> sdp->sd_hash_bsize_shift; block--;) { |
1069 | error = gfs2_dir_read_data(dip, (char *)buf, | 1076 | error = gfs2_dir_read_data(dip, (char *)buf, |
@@ -1231,6 +1238,25 @@ static int do_filldir_main(struct gfs2_inode *dip, u64 *offset, | |||
1231 | return 0; | 1238 | return 0; |
1232 | } | 1239 | } |
1233 | 1240 | ||
1241 | static void *gfs2_alloc_sort_buffer(unsigned size) | ||
1242 | { | ||
1243 | void *ptr = NULL; | ||
1244 | |||
1245 | if (size < KMALLOC_MAX_SIZE) | ||
1246 | ptr = kmalloc(size, GFP_NOFS | __GFP_NOWARN); | ||
1247 | if (!ptr) | ||
1248 | ptr = __vmalloc(size, GFP_NOFS, PAGE_KERNEL); | ||
1249 | return ptr; | ||
1250 | } | ||
1251 | |||
1252 | static void gfs2_free_sort_buffer(void *ptr) | ||
1253 | { | ||
1254 | if (is_vmalloc_addr(ptr)) | ||
1255 | vfree(ptr); | ||
1256 | else | ||
1257 | kfree(ptr); | ||
1258 | } | ||
1259 | |||
1234 | static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque, | 1260 | static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque, |
1235 | filldir_t filldir, int *copied, unsigned *depth, | 1261 | filldir_t filldir, int *copied, unsigned *depth, |
1236 | u64 leaf_no) | 1262 | u64 leaf_no) |
@@ -1271,7 +1297,7 @@ static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque, | |||
1271 | * 99 is the maximum number of entries that can fit in a single | 1297 | * 99 is the maximum number of entries that can fit in a single |
1272 | * leaf block. | 1298 | * leaf block. |
1273 | */ | 1299 | */ |
1274 | larr = vmalloc((leaves + entries + 99) * sizeof(void *)); | 1300 | larr = gfs2_alloc_sort_buffer((leaves + entries + 99) * sizeof(void *)); |
1275 | if (!larr) | 1301 | if (!larr) |
1276 | goto out; | 1302 | goto out; |
1277 | darr = (const struct gfs2_dirent **)(larr + leaves); | 1303 | darr = (const struct gfs2_dirent **)(larr + leaves); |
@@ -1282,7 +1308,7 @@ static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque, | |||
1282 | do { | 1308 | do { |
1283 | error = get_leaf(ip, lfn, &bh); | 1309 | error = get_leaf(ip, lfn, &bh); |
1284 | if (error) | 1310 | if (error) |
1285 | goto out_kfree; | 1311 | goto out_free; |
1286 | lf = (struct gfs2_leaf *)bh->b_data; | 1312 | lf = (struct gfs2_leaf *)bh->b_data; |
1287 | lfn = be64_to_cpu(lf->lf_next); | 1313 | lfn = be64_to_cpu(lf->lf_next); |
1288 | if (lf->lf_entries) { | 1314 | if (lf->lf_entries) { |
@@ -1291,7 +1317,7 @@ static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque, | |||
1291 | gfs2_dirent_gather, NULL, &g); | 1317 | gfs2_dirent_gather, NULL, &g); |
1292 | error = PTR_ERR(dent); | 1318 | error = PTR_ERR(dent); |
1293 | if (IS_ERR(dent)) | 1319 | if (IS_ERR(dent)) |
1294 | goto out_kfree; | 1320 | goto out_free; |
1295 | if (entries2 != g.offset) { | 1321 | if (entries2 != g.offset) { |
1296 | fs_warn(sdp, "Number of entries corrupt in dir " | 1322 | fs_warn(sdp, "Number of entries corrupt in dir " |
1297 | "leaf %llu, entries2 (%u) != " | 1323 | "leaf %llu, entries2 (%u) != " |
@@ -1300,7 +1326,7 @@ static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque, | |||
1300 | entries2, g.offset); | 1326 | entries2, g.offset); |
1301 | 1327 | ||
1302 | error = -EIO; | 1328 | error = -EIO; |
1303 | goto out_kfree; | 1329 | goto out_free; |
1304 | } | 1330 | } |
1305 | error = 0; | 1331 | error = 0; |
1306 | larr[leaf++] = bh; | 1332 | larr[leaf++] = bh; |
@@ -1312,10 +1338,10 @@ static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque, | |||
1312 | BUG_ON(entries2 != entries); | 1338 | BUG_ON(entries2 != entries); |
1313 | error = do_filldir_main(ip, offset, opaque, filldir, darr, | 1339 | error = do_filldir_main(ip, offset, opaque, filldir, darr, |
1314 | entries, copied); | 1340 | entries, copied); |
1315 | out_kfree: | 1341 | out_free: |
1316 | for(i = 0; i < leaf; i++) | 1342 | for(i = 0; i < leaf; i++) |
1317 | brelse(larr[i]); | 1343 | brelse(larr[i]); |
1318 | vfree(larr); | 1344 | gfs2_free_sort_buffer(larr); |
1319 | out: | 1345 | out: |
1320 | return error; | 1346 | return error; |
1321 | } | 1347 | } |
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index ed9a94f0ef15..4edd662c8232 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c | |||
@@ -351,7 +351,6 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
351 | unsigned long last_index; | 351 | unsigned long last_index; |
352 | u64 pos = page->index << PAGE_CACHE_SHIFT; | 352 | u64 pos = page->index << PAGE_CACHE_SHIFT; |
353 | unsigned int data_blocks, ind_blocks, rblocks; | 353 | unsigned int data_blocks, ind_blocks, rblocks; |
354 | int alloc_required = 0; | ||
355 | struct gfs2_holder gh; | 354 | struct gfs2_holder gh; |
356 | struct gfs2_alloc *al; | 355 | struct gfs2_alloc *al; |
357 | int ret; | 356 | int ret; |
@@ -364,8 +363,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
364 | set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); | 363 | set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); |
365 | set_bit(GIF_SW_PAGED, &ip->i_flags); | 364 | set_bit(GIF_SW_PAGED, &ip->i_flags); |
366 | 365 | ||
367 | ret = gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE, &alloc_required); | 366 | if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) |
368 | if (ret || !alloc_required) | ||
369 | goto out_unlock; | 367 | goto out_unlock; |
370 | ret = -ENOMEM; | 368 | ret = -ENOMEM; |
371 | al = gfs2_alloc_get(ip); | 369 | al = gfs2_alloc_get(ip); |
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index ddcdbf493536..9adf8f924e08 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -328,6 +328,30 @@ static void gfs2_holder_wake(struct gfs2_holder *gh) | |||
328 | } | 328 | } |
329 | 329 | ||
330 | /** | 330 | /** |
331 | * do_error - Something unexpected has happened during a lock request | ||
332 | * | ||
333 | */ | ||
334 | |||
335 | static inline void do_error(struct gfs2_glock *gl, const int ret) | ||
336 | { | ||
337 | struct gfs2_holder *gh, *tmp; | ||
338 | |||
339 | list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { | ||
340 | if (test_bit(HIF_HOLDER, &gh->gh_iflags)) | ||
341 | continue; | ||
342 | if (ret & LM_OUT_ERROR) | ||
343 | gh->gh_error = -EIO; | ||
344 | else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) | ||
345 | gh->gh_error = GLR_TRYFAILED; | ||
346 | else | ||
347 | continue; | ||
348 | list_del_init(&gh->gh_list); | ||
349 | trace_gfs2_glock_queue(gh, 0); | ||
350 | gfs2_holder_wake(gh); | ||
351 | } | ||
352 | } | ||
353 | |||
354 | /** | ||
331 | * do_promote - promote as many requests as possible on the current queue | 355 | * do_promote - promote as many requests as possible on the current queue |
332 | * @gl: The glock | 356 | * @gl: The glock |
333 | * | 357 | * |
@@ -375,36 +399,13 @@ restart: | |||
375 | } | 399 | } |
376 | if (gh->gh_list.prev == &gl->gl_holders) | 400 | if (gh->gh_list.prev == &gl->gl_holders) |
377 | return 1; | 401 | return 1; |
402 | do_error(gl, 0); | ||
378 | break; | 403 | break; |
379 | } | 404 | } |
380 | return 0; | 405 | return 0; |
381 | } | 406 | } |
382 | 407 | ||
383 | /** | 408 | /** |
384 | * do_error - Something unexpected has happened during a lock request | ||
385 | * | ||
386 | */ | ||
387 | |||
388 | static inline void do_error(struct gfs2_glock *gl, const int ret) | ||
389 | { | ||
390 | struct gfs2_holder *gh, *tmp; | ||
391 | |||
392 | list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { | ||
393 | if (test_bit(HIF_HOLDER, &gh->gh_iflags)) | ||
394 | continue; | ||
395 | if (ret & LM_OUT_ERROR) | ||
396 | gh->gh_error = -EIO; | ||
397 | else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) | ||
398 | gh->gh_error = GLR_TRYFAILED; | ||
399 | else | ||
400 | continue; | ||
401 | list_del_init(&gh->gh_list); | ||
402 | trace_gfs2_glock_queue(gh, 0); | ||
403 | gfs2_holder_wake(gh); | ||
404 | } | ||
405 | } | ||
406 | |||
407 | /** | ||
408 | * find_first_waiter - find the first gh that's waiting for the glock | 409 | * find_first_waiter - find the first gh that's waiting for the glock |
409 | * @gl: the glock | 410 | * @gl: the glock |
410 | */ | 411 | */ |
@@ -1062,6 +1063,9 @@ int gfs2_glock_nq(struct gfs2_holder *gh) | |||
1062 | 1063 | ||
1063 | spin_lock(&gl->gl_spin); | 1064 | spin_lock(&gl->gl_spin); |
1064 | add_to_queue(gh); | 1065 | add_to_queue(gh); |
1066 | if ((LM_FLAG_NOEXP & gh->gh_flags) && | ||
1067 | test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) | ||
1068 | set_bit(GLF_REPLY_PENDING, &gl->gl_flags); | ||
1065 | run_queue(gl, 1); | 1069 | run_queue(gl, 1); |
1066 | spin_unlock(&gl->gl_spin); | 1070 | spin_unlock(&gl->gl_spin); |
1067 | 1071 | ||
@@ -1319,6 +1323,36 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) | |||
1319 | } | 1323 | } |
1320 | 1324 | ||
1321 | /** | 1325 | /** |
1326 | * gfs2_should_freeze - Figure out if glock should be frozen | ||
1327 | * @gl: The glock in question | ||
1328 | * | ||
1329 | * Glocks are not frozen if (a) the result of the dlm operation is | ||
1330 | * an error, (b) the locking operation was an unlock operation or | ||
1331 | * (c) if there is a "noexp" flagged request anywhere in the queue | ||
1332 | * | ||
1333 | * Returns: 1 if freezing should occur, 0 otherwise | ||
1334 | */ | ||
1335 | |||
1336 | static int gfs2_should_freeze(const struct gfs2_glock *gl) | ||
1337 | { | ||
1338 | const struct gfs2_holder *gh; | ||
1339 | |||
1340 | if (gl->gl_reply & ~LM_OUT_ST_MASK) | ||
1341 | return 0; | ||
1342 | if (gl->gl_target == LM_ST_UNLOCKED) | ||
1343 | return 0; | ||
1344 | |||
1345 | list_for_each_entry(gh, &gl->gl_holders, gh_list) { | ||
1346 | if (test_bit(HIF_HOLDER, &gh->gh_iflags)) | ||
1347 | continue; | ||
1348 | if (LM_FLAG_NOEXP & gh->gh_flags) | ||
1349 | return 0; | ||
1350 | } | ||
1351 | |||
1352 | return 1; | ||
1353 | } | ||
1354 | |||
1355 | /** | ||
1322 | * gfs2_glock_complete - Callback used by locking | 1356 | * gfs2_glock_complete - Callback used by locking |
1323 | * @gl: Pointer to the glock | 1357 | * @gl: Pointer to the glock |
1324 | * @ret: The return value from the dlm | 1358 | * @ret: The return value from the dlm |
@@ -1328,18 +1362,17 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) | |||
1328 | void gfs2_glock_complete(struct gfs2_glock *gl, int ret) | 1362 | void gfs2_glock_complete(struct gfs2_glock *gl, int ret) |
1329 | { | 1363 | { |
1330 | struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; | 1364 | struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; |
1365 | |||
1331 | gl->gl_reply = ret; | 1366 | gl->gl_reply = ret; |
1367 | |||
1332 | if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) { | 1368 | if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) { |
1333 | struct gfs2_holder *gh; | ||
1334 | spin_lock(&gl->gl_spin); | 1369 | spin_lock(&gl->gl_spin); |
1335 | gh = find_first_waiter(gl); | 1370 | if (gfs2_should_freeze(gl)) { |
1336 | if ((!(gh && (gh->gh_flags & LM_FLAG_NOEXP)) && | ||
1337 | (gl->gl_target != LM_ST_UNLOCKED)) || | ||
1338 | ((ret & ~LM_OUT_ST_MASK) != 0)) | ||
1339 | set_bit(GLF_FROZEN, &gl->gl_flags); | 1371 | set_bit(GLF_FROZEN, &gl->gl_flags); |
1340 | spin_unlock(&gl->gl_spin); | 1372 | spin_unlock(&gl->gl_spin); |
1341 | if (test_bit(GLF_FROZEN, &gl->gl_flags)) | ||
1342 | return; | 1373 | return; |
1374 | } | ||
1375 | spin_unlock(&gl->gl_spin); | ||
1343 | } | 1376 | } |
1344 | set_bit(GLF_REPLY_PENDING, &gl->gl_flags); | 1377 | set_bit(GLF_REPLY_PENDING, &gl->gl_flags); |
1345 | gfs2_glock_hold(gl); | 1378 | gfs2_glock_hold(gl); |
@@ -1348,7 +1381,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret) | |||
1348 | } | 1381 | } |
1349 | 1382 | ||
1350 | 1383 | ||
1351 | static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask) | 1384 | static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) |
1352 | { | 1385 | { |
1353 | struct gfs2_glock *gl; | 1386 | struct gfs2_glock *gl; |
1354 | int may_demote; | 1387 | int may_demote; |
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index b5d7363b22da..8fcbce48a128 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h | |||
@@ -460,6 +460,7 @@ enum { | |||
460 | SDF_NOBARRIERS = 3, | 460 | SDF_NOBARRIERS = 3, |
461 | SDF_NORECOVERY = 4, | 461 | SDF_NORECOVERY = 4, |
462 | SDF_DEMOTE = 5, | 462 | SDF_DEMOTE = 5, |
463 | SDF_NOJOURNALID = 6, | ||
463 | }; | 464 | }; |
464 | 465 | ||
465 | #define GFS2_FSNAME_LEN 256 | 466 | #define GFS2_FSNAME_LEN 256 |
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index b5612cbb62a5..f03afd9c44bc 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c | |||
@@ -169,7 +169,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, | |||
169 | { | 169 | { |
170 | struct inode *inode; | 170 | struct inode *inode; |
171 | struct gfs2_inode *ip; | 171 | struct gfs2_inode *ip; |
172 | struct gfs2_glock *io_gl; | 172 | struct gfs2_glock *io_gl = NULL; |
173 | int error; | 173 | int error; |
174 | 174 | ||
175 | inode = gfs2_iget(sb, no_addr); | 175 | inode = gfs2_iget(sb, no_addr); |
@@ -198,6 +198,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, | |||
198 | ip->i_iopen_gh.gh_gl->gl_object = ip; | 198 | ip->i_iopen_gh.gh_gl->gl_object = ip; |
199 | 199 | ||
200 | gfs2_glock_put(io_gl); | 200 | gfs2_glock_put(io_gl); |
201 | io_gl = NULL; | ||
201 | 202 | ||
202 | if ((type == DT_UNKNOWN) && (no_formal_ino == 0)) | 203 | if ((type == DT_UNKNOWN) && (no_formal_ino == 0)) |
203 | goto gfs2_nfsbypass; | 204 | goto gfs2_nfsbypass; |
@@ -228,7 +229,8 @@ gfs2_nfsbypass: | |||
228 | fail_glock: | 229 | fail_glock: |
229 | gfs2_glock_dq(&ip->i_iopen_gh); | 230 | gfs2_glock_dq(&ip->i_iopen_gh); |
230 | fail_iopen: | 231 | fail_iopen: |
231 | gfs2_glock_put(io_gl); | 232 | if (io_gl) |
233 | gfs2_glock_put(io_gl); | ||
232 | fail_put: | 234 | fail_put: |
233 | if (inode->i_state & I_NEW) | 235 | if (inode->i_state & I_NEW) |
234 | ip->i_gl->gl_object = NULL; | 236 | ip->i_gl->gl_object = NULL; |
@@ -256,7 +258,7 @@ void gfs2_process_unlinked_inode(struct super_block *sb, u64 no_addr) | |||
256 | { | 258 | { |
257 | struct gfs2_sbd *sdp; | 259 | struct gfs2_sbd *sdp; |
258 | struct gfs2_inode *ip; | 260 | struct gfs2_inode *ip; |
259 | struct gfs2_glock *io_gl; | 261 | struct gfs2_glock *io_gl = NULL; |
260 | int error; | 262 | int error; |
261 | struct gfs2_holder gh; | 263 | struct gfs2_holder gh; |
262 | struct inode *inode; | 264 | struct inode *inode; |
@@ -293,6 +295,7 @@ void gfs2_process_unlinked_inode(struct super_block *sb, u64 no_addr) | |||
293 | 295 | ||
294 | ip->i_iopen_gh.gh_gl->gl_object = ip; | 296 | ip->i_iopen_gh.gh_gl->gl_object = ip; |
295 | gfs2_glock_put(io_gl); | 297 | gfs2_glock_put(io_gl); |
298 | io_gl = NULL; | ||
296 | 299 | ||
297 | inode->i_mode = DT2IF(DT_UNKNOWN); | 300 | inode->i_mode = DT2IF(DT_UNKNOWN); |
298 | 301 | ||
@@ -319,7 +322,8 @@ void gfs2_process_unlinked_inode(struct super_block *sb, u64 no_addr) | |||
319 | fail_glock: | 322 | fail_glock: |
320 | gfs2_glock_dq(&ip->i_iopen_gh); | 323 | gfs2_glock_dq(&ip->i_iopen_gh); |
321 | fail_iopen: | 324 | fail_iopen: |
322 | gfs2_glock_put(io_gl); | 325 | if (io_gl) |
326 | gfs2_glock_put(io_gl); | ||
323 | fail_put: | 327 | fail_put: |
324 | ip->i_gl->gl_object = NULL; | 328 | ip->i_gl->gl_object = NULL; |
325 | gfs2_glock_put(ip->i_gl); | 329 | gfs2_glock_put(ip->i_gl); |
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 3593b3a7290e..45a4a36195d8 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c | |||
@@ -76,7 +76,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) | |||
76 | 76 | ||
77 | sb->s_fs_info = sdp; | 77 | sb->s_fs_info = sdp; |
78 | sdp->sd_vfs = sb; | 78 | sdp->sd_vfs = sb; |
79 | 79 | set_bit(SDF_NOJOURNALID, &sdp->sd_flags); | |
80 | gfs2_tune_init(&sdp->sd_tune); | 80 | gfs2_tune_init(&sdp->sd_tune); |
81 | 81 | ||
82 | init_waitqueue_head(&sdp->sd_glock_wait); | 82 | init_waitqueue_head(&sdp->sd_glock_wait); |
@@ -1050,7 +1050,8 @@ static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent) | |||
1050 | ret = match_int(&tmp[0], &option); | 1050 | ret = match_int(&tmp[0], &option); |
1051 | if (ret || option < 0) | 1051 | if (ret || option < 0) |
1052 | goto hostdata_error; | 1052 | goto hostdata_error; |
1053 | ls->ls_jid = option; | 1053 | if (test_and_clear_bit(SDF_NOJOURNALID, &sdp->sd_flags)) |
1054 | ls->ls_jid = option; | ||
1054 | break; | 1055 | break; |
1055 | case Opt_id: | 1056 | case Opt_id: |
1056 | /* Obsolete, but left for backward compat purposes */ | 1057 | /* Obsolete, but left for backward compat purposes */ |
@@ -1102,6 +1103,24 @@ void gfs2_lm_unmount(struct gfs2_sbd *sdp) | |||
1102 | lm->lm_unmount(sdp); | 1103 | lm->lm_unmount(sdp); |
1103 | } | 1104 | } |
1104 | 1105 | ||
1106 | static int gfs2_journalid_wait(void *word) | ||
1107 | { | ||
1108 | if (signal_pending(current)) | ||
1109 | return -EINTR; | ||
1110 | schedule(); | ||
1111 | return 0; | ||
1112 | } | ||
1113 | |||
1114 | static int wait_on_journal(struct gfs2_sbd *sdp) | ||
1115 | { | ||
1116 | if (sdp->sd_args.ar_spectator) | ||
1117 | return 0; | ||
1118 | if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) | ||
1119 | return 0; | ||
1120 | |||
1121 | return wait_on_bit(&sdp->sd_flags, SDF_NOJOURNALID, gfs2_journalid_wait, TASK_INTERRUPTIBLE); | ||
1122 | } | ||
1123 | |||
1105 | void gfs2_online_uevent(struct gfs2_sbd *sdp) | 1124 | void gfs2_online_uevent(struct gfs2_sbd *sdp) |
1106 | { | 1125 | { |
1107 | struct super_block *sb = sdp->sd_vfs; | 1126 | struct super_block *sb = sdp->sd_vfs; |
@@ -1194,6 +1213,10 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent | |||
1194 | if (error) | 1213 | if (error) |
1195 | goto fail_locking; | 1214 | goto fail_locking; |
1196 | 1215 | ||
1216 | error = wait_on_journal(sdp); | ||
1217 | if (error) | ||
1218 | goto fail_sb; | ||
1219 | |||
1197 | error = init_inodes(sdp, DO); | 1220 | error = init_inodes(sdp, DO); |
1198 | if (error) | 1221 | if (error) |
1199 | goto fail_sb; | 1222 | goto fail_sb; |
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 49667d68769e..8bb643cb2658 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c | |||
@@ -77,7 +77,7 @@ static LIST_HEAD(qd_lru_list); | |||
77 | static atomic_t qd_lru_count = ATOMIC_INIT(0); | 77 | static atomic_t qd_lru_count = ATOMIC_INIT(0); |
78 | static DEFINE_SPINLOCK(qd_lru_lock); | 78 | static DEFINE_SPINLOCK(qd_lru_lock); |
79 | 79 | ||
80 | int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask) | 80 | int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) |
81 | { | 81 | { |
82 | struct gfs2_quota_data *qd; | 82 | struct gfs2_quota_data *qd; |
83 | struct gfs2_sbd *sdp; | 83 | struct gfs2_sbd *sdp; |
@@ -694,10 +694,8 @@ get_a_page: | |||
694 | if (!buffer_mapped(bh)) | 694 | if (!buffer_mapped(bh)) |
695 | goto unlock_out; | 695 | goto unlock_out; |
696 | /* If it's a newly allocated disk block for quota, zero it */ | 696 | /* If it's a newly allocated disk block for quota, zero it */ |
697 | if (buffer_new(bh)) { | 697 | if (buffer_new(bh)) |
698 | memset(bh->b_data, 0, bh->b_size); | 698 | zero_user(page, pos - blocksize, bh->b_size); |
699 | set_buffer_uptodate(bh); | ||
700 | } | ||
701 | } | 699 | } |
702 | 700 | ||
703 | if (PageUptodate(page)) | 701 | if (PageUptodate(page)) |
@@ -723,7 +721,7 @@ get_a_page: | |||
723 | 721 | ||
724 | /* If quota straddles page boundary, we need to update the rest of the | 722 | /* If quota straddles page boundary, we need to update the rest of the |
725 | * quota at the beginning of the next page */ | 723 | * quota at the beginning of the next page */ |
726 | if (offset != 0) { /* first page, offset is closer to PAGE_CACHE_SIZE */ | 724 | if ((offset + sizeof(struct gfs2_quota)) > PAGE_CACHE_SIZE) { |
727 | ptr = ptr + nbytes; | 725 | ptr = ptr + nbytes; |
728 | nbytes = sizeof(struct gfs2_quota) - nbytes; | 726 | nbytes = sizeof(struct gfs2_quota) - nbytes; |
729 | offset = 0; | 727 | offset = 0; |
@@ -789,15 +787,9 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) | |||
789 | goto out; | 787 | goto out; |
790 | 788 | ||
791 | for (x = 0; x < num_qd; x++) { | 789 | for (x = 0; x < num_qd; x++) { |
792 | int alloc_required; | ||
793 | |||
794 | offset = qd2offset(qda[x]); | 790 | offset = qd2offset(qda[x]); |
795 | error = gfs2_write_alloc_required(ip, offset, | 791 | if (gfs2_write_alloc_required(ip, offset, |
796 | sizeof(struct gfs2_quota), | 792 | sizeof(struct gfs2_quota))) |
797 | &alloc_required); | ||
798 | if (error) | ||
799 | goto out_gunlock; | ||
800 | if (alloc_required) | ||
801 | nalloc++; | 793 | nalloc++; |
802 | } | 794 | } |
803 | 795 | ||
@@ -1586,10 +1578,7 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id, | |||
1586 | goto out_i; | 1578 | goto out_i; |
1587 | 1579 | ||
1588 | offset = qd2offset(qd); | 1580 | offset = qd2offset(qd); |
1589 | error = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota), | 1581 | alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota)); |
1590 | &alloc_required); | ||
1591 | if (error) | ||
1592 | goto out_i; | ||
1593 | if (alloc_required) { | 1582 | if (alloc_required) { |
1594 | al = gfs2_alloc_get(ip); | 1583 | al = gfs2_alloc_get(ip); |
1595 | if (al == NULL) | 1584 | if (al == NULL) |
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h index 195f60c8bd14..e7d236ca48bd 100644 --- a/fs/gfs2/quota.h +++ b/fs/gfs2/quota.h | |||
@@ -51,7 +51,7 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip) | |||
51 | return ret; | 51 | return ret; |
52 | } | 52 | } |
53 | 53 | ||
54 | extern int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask); | 54 | extern int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask); |
55 | extern const struct quotactl_ops gfs2_quotactl_ops; | 55 | extern const struct quotactl_ops gfs2_quotactl_ops; |
56 | 56 | ||
57 | #endif /* __QUOTA_DOT_H__ */ | 57 | #endif /* __QUOTA_DOT_H__ */ |
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index 4d1aad38f1b1..4140811a921c 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c | |||
@@ -342,8 +342,6 @@ int gfs2_jdesc_check(struct gfs2_jdesc *jd) | |||
342 | { | 342 | { |
343 | struct gfs2_inode *ip = GFS2_I(jd->jd_inode); | 343 | struct gfs2_inode *ip = GFS2_I(jd->jd_inode); |
344 | struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); | 344 | struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); |
345 | int ar; | ||
346 | int error; | ||
347 | 345 | ||
348 | if (ip->i_disksize < (8 << 20) || ip->i_disksize > (1 << 30) || | 346 | if (ip->i_disksize < (8 << 20) || ip->i_disksize > (1 << 30) || |
349 | (ip->i_disksize & (sdp->sd_sb.sb_bsize - 1))) { | 347 | (ip->i_disksize & (sdp->sd_sb.sb_bsize - 1))) { |
@@ -352,13 +350,12 @@ int gfs2_jdesc_check(struct gfs2_jdesc *jd) | |||
352 | } | 350 | } |
353 | jd->jd_blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift; | 351 | jd->jd_blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift; |
354 | 352 | ||
355 | error = gfs2_write_alloc_required(ip, 0, ip->i_disksize, &ar); | 353 | if (gfs2_write_alloc_required(ip, 0, ip->i_disksize)) { |
356 | if (!error && ar) { | ||
357 | gfs2_consist_inode(ip); | 354 | gfs2_consist_inode(ip); |
358 | error = -EIO; | 355 | return -EIO; |
359 | } | 356 | } |
360 | 357 | ||
361 | return error; | 358 | return 0; |
362 | } | 359 | } |
363 | 360 | ||
364 | /** | 361 | /** |
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c index 37f5393e68e6..d019d0d55e00 100644 --- a/fs/gfs2/sys.c +++ b/fs/gfs2/sys.c | |||
@@ -325,6 +325,30 @@ static ssize_t lkfirst_show(struct gfs2_sbd *sdp, char *buf) | |||
325 | return sprintf(buf, "%d\n", ls->ls_first); | 325 | return sprintf(buf, "%d\n", ls->ls_first); |
326 | } | 326 | } |
327 | 327 | ||
328 | static ssize_t lkfirst_store(struct gfs2_sbd *sdp, const char *buf, size_t len) | ||
329 | { | ||
330 | unsigned first; | ||
331 | int rv; | ||
332 | |||
333 | rv = sscanf(buf, "%u", &first); | ||
334 | if (rv != 1 || first > 1) | ||
335 | return -EINVAL; | ||
336 | spin_lock(&sdp->sd_jindex_spin); | ||
337 | rv = -EBUSY; | ||
338 | if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0) | ||
339 | goto out; | ||
340 | rv = -EINVAL; | ||
341 | if (sdp->sd_args.ar_spectator) | ||
342 | goto out; | ||
343 | if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) | ||
344 | goto out; | ||
345 | sdp->sd_lockstruct.ls_first = first; | ||
346 | rv = 0; | ||
347 | out: | ||
348 | spin_unlock(&sdp->sd_jindex_spin); | ||
349 | return rv ? rv : len; | ||
350 | } | ||
351 | |||
328 | static ssize_t first_done_show(struct gfs2_sbd *sdp, char *buf) | 352 | static ssize_t first_done_show(struct gfs2_sbd *sdp, char *buf) |
329 | { | 353 | { |
330 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | 354 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; |
@@ -377,14 +401,41 @@ static ssize_t jid_show(struct gfs2_sbd *sdp, char *buf) | |||
377 | return sprintf(buf, "%u\n", sdp->sd_lockstruct.ls_jid); | 401 | return sprintf(buf, "%u\n", sdp->sd_lockstruct.ls_jid); |
378 | } | 402 | } |
379 | 403 | ||
404 | static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len) | ||
405 | { | ||
406 | unsigned jid; | ||
407 | int rv; | ||
408 | |||
409 | rv = sscanf(buf, "%u", &jid); | ||
410 | if (rv != 1) | ||
411 | return -EINVAL; | ||
412 | |||
413 | spin_lock(&sdp->sd_jindex_spin); | ||
414 | rv = -EINVAL; | ||
415 | if (sdp->sd_args.ar_spectator) | ||
416 | goto out; | ||
417 | if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) | ||
418 | goto out; | ||
419 | rv = -EBUSY; | ||
420 | if (test_and_clear_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0) | ||
421 | goto out; | ||
422 | sdp->sd_lockstruct.ls_jid = jid; | ||
423 | smp_mb__after_clear_bit(); | ||
424 | wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID); | ||
425 | rv = 0; | ||
426 | out: | ||
427 | spin_unlock(&sdp->sd_jindex_spin); | ||
428 | return rv ? rv : len; | ||
429 | } | ||
430 | |||
380 | #define GDLM_ATTR(_name,_mode,_show,_store) \ | 431 | #define GDLM_ATTR(_name,_mode,_show,_store) \ |
381 | static struct gfs2_attr gdlm_attr_##_name = __ATTR(_name,_mode,_show,_store) | 432 | static struct gfs2_attr gdlm_attr_##_name = __ATTR(_name,_mode,_show,_store) |
382 | 433 | ||
383 | GDLM_ATTR(proto_name, 0444, proto_name_show, NULL); | 434 | GDLM_ATTR(proto_name, 0444, proto_name_show, NULL); |
384 | GDLM_ATTR(block, 0644, block_show, block_store); | 435 | GDLM_ATTR(block, 0644, block_show, block_store); |
385 | GDLM_ATTR(withdraw, 0644, withdraw_show, withdraw_store); | 436 | GDLM_ATTR(withdraw, 0644, withdraw_show, withdraw_store); |
386 | GDLM_ATTR(jid, 0444, jid_show, NULL); | 437 | GDLM_ATTR(jid, 0644, jid_show, jid_store); |
387 | GDLM_ATTR(first, 0444, lkfirst_show, NULL); | 438 | GDLM_ATTR(first, 0644, lkfirst_show, lkfirst_store); |
388 | GDLM_ATTR(first_done, 0444, first_done_show, NULL); | 439 | GDLM_ATTR(first_done, 0444, first_done_show, NULL); |
389 | GDLM_ATTR(recover, 0600, NULL, recover_store); | 440 | GDLM_ATTR(recover, 0600, NULL, recover_store); |
390 | GDLM_ATTR(recover_done, 0444, recover_done_show, NULL); | 441 | GDLM_ATTR(recover_done, 0444, recover_done_show, NULL); |
@@ -564,7 +615,7 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj, | |||
564 | 615 | ||
565 | add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name); | 616 | add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name); |
566 | add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name); | 617 | add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name); |
567 | if (!sdp->sd_args.ar_spectator) | 618 | if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags)) |
568 | add_uevent_var(env, "JOURNALID=%u", sdp->sd_lockstruct.ls_jid); | 619 | add_uevent_var(env, "JOURNALID=%u", sdp->sd_lockstruct.ls_jid); |
569 | if (gfs2_uuid_valid(uuid)) | 620 | if (gfs2_uuid_valid(uuid)) |
570 | add_uevent_var(env, "UUID=%pUB", uuid); | 621 | add_uevent_var(env, "UUID=%pUB", uuid); |
diff --git a/fs/inode.c b/fs/inode.c index 2bee20ae3d65..722860b323a9 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -512,7 +512,7 @@ static void prune_icache(int nr_to_scan) | |||
512 | * This function is passed the number of inodes to scan, and it returns the | 512 | * This function is passed the number of inodes to scan, and it returns the |
513 | * total number of remaining possibly-reclaimable inodes. | 513 | * total number of remaining possibly-reclaimable inodes. |
514 | */ | 514 | */ |
515 | static int shrink_icache_memory(int nr, gfp_t gfp_mask) | 515 | static int shrink_icache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) |
516 | { | 516 | { |
517 | if (nr) { | 517 | if (nr) { |
518 | /* | 518 | /* |
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index bc2ff5932769..036880895bfc 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c | |||
@@ -297,7 +297,6 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction, | |||
297 | struct page *new_page; | 297 | struct page *new_page; |
298 | unsigned int new_offset; | 298 | unsigned int new_offset; |
299 | struct buffer_head *bh_in = jh2bh(jh_in); | 299 | struct buffer_head *bh_in = jh2bh(jh_in); |
300 | struct jbd2_buffer_trigger_type *triggers; | ||
301 | journal_t *journal = transaction->t_journal; | 300 | journal_t *journal = transaction->t_journal; |
302 | 301 | ||
303 | /* | 302 | /* |
@@ -328,21 +327,21 @@ repeat: | |||
328 | done_copy_out = 1; | 327 | done_copy_out = 1; |
329 | new_page = virt_to_page(jh_in->b_frozen_data); | 328 | new_page = virt_to_page(jh_in->b_frozen_data); |
330 | new_offset = offset_in_page(jh_in->b_frozen_data); | 329 | new_offset = offset_in_page(jh_in->b_frozen_data); |
331 | triggers = jh_in->b_frozen_triggers; | ||
332 | } else { | 330 | } else { |
333 | new_page = jh2bh(jh_in)->b_page; | 331 | new_page = jh2bh(jh_in)->b_page; |
334 | new_offset = offset_in_page(jh2bh(jh_in)->b_data); | 332 | new_offset = offset_in_page(jh2bh(jh_in)->b_data); |
335 | triggers = jh_in->b_triggers; | ||
336 | } | 333 | } |
337 | 334 | ||
338 | mapped_data = kmap_atomic(new_page, KM_USER0); | 335 | mapped_data = kmap_atomic(new_page, KM_USER0); |
339 | /* | 336 | /* |
340 | * Fire any commit trigger. Do this before checking for escaping, | 337 | * Fire data frozen trigger if data already wasn't frozen. Do this |
341 | * as the trigger may modify the magic offset. If a copy-out | 338 | * before checking for escaping, as the trigger may modify the magic |
342 | * happens afterwards, it will have the correct data in the buffer. | 339 | * offset. If a copy-out happens afterwards, it will have the correct |
340 | * data in the buffer. | ||
343 | */ | 341 | */ |
344 | jbd2_buffer_commit_trigger(jh_in, mapped_data + new_offset, | 342 | if (!done_copy_out) |
345 | triggers); | 343 | jbd2_buffer_frozen_trigger(jh_in, mapped_data + new_offset, |
344 | jh_in->b_triggers); | ||
346 | 345 | ||
347 | /* | 346 | /* |
348 | * Check for escaping | 347 | * Check for escaping |
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index e214d68620ac..b8e0806681bb 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c | |||
@@ -725,6 +725,9 @@ done: | |||
725 | page = jh2bh(jh)->b_page; | 725 | page = jh2bh(jh)->b_page; |
726 | offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK; | 726 | offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK; |
727 | source = kmap_atomic(page, KM_USER0); | 727 | source = kmap_atomic(page, KM_USER0); |
728 | /* Fire data frozen trigger just before we copy the data */ | ||
729 | jbd2_buffer_frozen_trigger(jh, source + offset, | ||
730 | jh->b_triggers); | ||
728 | memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size); | 731 | memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size); |
729 | kunmap_atomic(source, KM_USER0); | 732 | kunmap_atomic(source, KM_USER0); |
730 | 733 | ||
@@ -963,15 +966,15 @@ void jbd2_journal_set_triggers(struct buffer_head *bh, | |||
963 | jh->b_triggers = type; | 966 | jh->b_triggers = type; |
964 | } | 967 | } |
965 | 968 | ||
966 | void jbd2_buffer_commit_trigger(struct journal_head *jh, void *mapped_data, | 969 | void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data, |
967 | struct jbd2_buffer_trigger_type *triggers) | 970 | struct jbd2_buffer_trigger_type *triggers) |
968 | { | 971 | { |
969 | struct buffer_head *bh = jh2bh(jh); | 972 | struct buffer_head *bh = jh2bh(jh); |
970 | 973 | ||
971 | if (!triggers || !triggers->t_commit) | 974 | if (!triggers || !triggers->t_frozen) |
972 | return; | 975 | return; |
973 | 976 | ||
974 | triggers->t_commit(triggers, bh, mapped_data, bh->b_size); | 977 | triggers->t_frozen(triggers, bh, mapped_data, bh->b_size); |
975 | } | 978 | } |
976 | 979 | ||
977 | void jbd2_buffer_abort_trigger(struct journal_head *jh, | 980 | void jbd2_buffer_abort_trigger(struct journal_head *jh, |
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c index a2d58c96f1b4..d258e261bdc7 100644 --- a/fs/jffs2/xattr.c +++ b/fs/jffs2/xattr.c | |||
@@ -626,7 +626,7 @@ void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *i | |||
626 | 626 | ||
627 | static int check_xattr_ref_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) | 627 | static int check_xattr_ref_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) |
628 | { | 628 | { |
629 | /* success of check_xattr_ref_inode() means taht inode (ic) dose not have | 629 | /* success of check_xattr_ref_inode() means that inode (ic) dose not have |
630 | * duplicate name/value pairs. If duplicate name/value pair would be found, | 630 | * duplicate name/value pairs. If duplicate name/value pair would be found, |
631 | * one will be removed. | 631 | * one will be removed. |
632 | */ | 632 | */ |
diff --git a/fs/mbcache.c b/fs/mbcache.c index ec88ff3d04a9..e28f21b95344 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c | |||
@@ -115,7 +115,7 @@ mb_cache_indexes(struct mb_cache *cache) | |||
115 | * What the mbcache registers as to get shrunk dynamically. | 115 | * What the mbcache registers as to get shrunk dynamically. |
116 | */ | 116 | */ |
117 | 117 | ||
118 | static int mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask); | 118 | static int mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask); |
119 | 119 | ||
120 | static struct shrinker mb_cache_shrinker = { | 120 | static struct shrinker mb_cache_shrinker = { |
121 | .shrink = mb_cache_shrink_fn, | 121 | .shrink = mb_cache_shrink_fn, |
@@ -191,13 +191,14 @@ forget: | |||
191 | * This function is called by the kernel memory management when memory | 191 | * This function is called by the kernel memory management when memory |
192 | * gets low. | 192 | * gets low. |
193 | * | 193 | * |
194 | * @shrink: (ignored) | ||
194 | * @nr_to_scan: Number of objects to scan | 195 | * @nr_to_scan: Number of objects to scan |
195 | * @gfp_mask: (ignored) | 196 | * @gfp_mask: (ignored) |
196 | * | 197 | * |
197 | * Returns the number of objects which are present in the cache. | 198 | * Returns the number of objects which are present in the cache. |
198 | */ | 199 | */ |
199 | static int | 200 | static int |
200 | mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask) | 201 | mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) |
201 | { | 202 | { |
202 | LIST_HEAD(free_list); | 203 | LIST_HEAD(free_list); |
203 | struct list_head *l, *ltmp; | 204 | struct list_head *l, *ltmp; |
diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 7ec9b34a59f8..d25b5257b7a1 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c | |||
@@ -1286,6 +1286,55 @@ static void nfs4_session_set_rwsize(struct nfs_server *server) | |||
1286 | #endif /* CONFIG_NFS_V4_1 */ | 1286 | #endif /* CONFIG_NFS_V4_1 */ |
1287 | } | 1287 | } |
1288 | 1288 | ||
1289 | static int nfs4_server_common_setup(struct nfs_server *server, | ||
1290 | struct nfs_fh *mntfh) | ||
1291 | { | ||
1292 | struct nfs_fattr *fattr; | ||
1293 | int error; | ||
1294 | |||
1295 | BUG_ON(!server->nfs_client); | ||
1296 | BUG_ON(!server->nfs_client->rpc_ops); | ||
1297 | BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops); | ||
1298 | |||
1299 | fattr = nfs_alloc_fattr(); | ||
1300 | if (fattr == NULL) | ||
1301 | return -ENOMEM; | ||
1302 | |||
1303 | /* We must ensure the session is initialised first */ | ||
1304 | error = nfs4_init_session(server); | ||
1305 | if (error < 0) | ||
1306 | goto out; | ||
1307 | |||
1308 | /* Probe the root fh to retrieve its FSID and filehandle */ | ||
1309 | error = nfs4_get_rootfh(server, mntfh); | ||
1310 | if (error < 0) | ||
1311 | goto out; | ||
1312 | |||
1313 | dprintk("Server FSID: %llx:%llx\n", | ||
1314 | (unsigned long long) server->fsid.major, | ||
1315 | (unsigned long long) server->fsid.minor); | ||
1316 | dprintk("Mount FH: %d\n", mntfh->size); | ||
1317 | |||
1318 | nfs4_session_set_rwsize(server); | ||
1319 | |||
1320 | error = nfs_probe_fsinfo(server, mntfh, fattr); | ||
1321 | if (error < 0) | ||
1322 | goto out; | ||
1323 | |||
1324 | if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN) | ||
1325 | server->namelen = NFS4_MAXNAMLEN; | ||
1326 | |||
1327 | spin_lock(&nfs_client_lock); | ||
1328 | list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks); | ||
1329 | list_add_tail(&server->master_link, &nfs_volume_list); | ||
1330 | spin_unlock(&nfs_client_lock); | ||
1331 | |||
1332 | server->mount_time = jiffies; | ||
1333 | out: | ||
1334 | nfs_free_fattr(fattr); | ||
1335 | return error; | ||
1336 | } | ||
1337 | |||
1289 | /* | 1338 | /* |
1290 | * Create a version 4 volume record | 1339 | * Create a version 4 volume record |
1291 | */ | 1340 | */ |
@@ -1346,7 +1395,6 @@ error: | |||
1346 | struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data, | 1395 | struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data, |
1347 | struct nfs_fh *mntfh) | 1396 | struct nfs_fh *mntfh) |
1348 | { | 1397 | { |
1349 | struct nfs_fattr *fattr; | ||
1350 | struct nfs_server *server; | 1398 | struct nfs_server *server; |
1351 | int error; | 1399 | int error; |
1352 | 1400 | ||
@@ -1356,55 +1404,19 @@ struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data, | |||
1356 | if (!server) | 1404 | if (!server) |
1357 | return ERR_PTR(-ENOMEM); | 1405 | return ERR_PTR(-ENOMEM); |
1358 | 1406 | ||
1359 | error = -ENOMEM; | ||
1360 | fattr = nfs_alloc_fattr(); | ||
1361 | if (fattr == NULL) | ||
1362 | goto error; | ||
1363 | |||
1364 | /* set up the general RPC client */ | 1407 | /* set up the general RPC client */ |
1365 | error = nfs4_init_server(server, data); | 1408 | error = nfs4_init_server(server, data); |
1366 | if (error < 0) | 1409 | if (error < 0) |
1367 | goto error; | 1410 | goto error; |
1368 | 1411 | ||
1369 | BUG_ON(!server->nfs_client); | 1412 | error = nfs4_server_common_setup(server, mntfh); |
1370 | BUG_ON(!server->nfs_client->rpc_ops); | ||
1371 | BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops); | ||
1372 | |||
1373 | error = nfs4_init_session(server); | ||
1374 | if (error < 0) | ||
1375 | goto error; | ||
1376 | |||
1377 | /* Probe the root fh to retrieve its FSID */ | ||
1378 | error = nfs4_get_rootfh(server, mntfh); | ||
1379 | if (error < 0) | 1413 | if (error < 0) |
1380 | goto error; | 1414 | goto error; |
1381 | 1415 | ||
1382 | dprintk("Server FSID: %llx:%llx\n", | ||
1383 | (unsigned long long) server->fsid.major, | ||
1384 | (unsigned long long) server->fsid.minor); | ||
1385 | dprintk("Mount FH: %d\n", mntfh->size); | ||
1386 | |||
1387 | nfs4_session_set_rwsize(server); | ||
1388 | |||
1389 | error = nfs_probe_fsinfo(server, mntfh, fattr); | ||
1390 | if (error < 0) | ||
1391 | goto error; | ||
1392 | |||
1393 | if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN) | ||
1394 | server->namelen = NFS4_MAXNAMLEN; | ||
1395 | |||
1396 | spin_lock(&nfs_client_lock); | ||
1397 | list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks); | ||
1398 | list_add_tail(&server->master_link, &nfs_volume_list); | ||
1399 | spin_unlock(&nfs_client_lock); | ||
1400 | |||
1401 | server->mount_time = jiffies; | ||
1402 | dprintk("<-- nfs4_create_server() = %p\n", server); | 1416 | dprintk("<-- nfs4_create_server() = %p\n", server); |
1403 | nfs_free_fattr(fattr); | ||
1404 | return server; | 1417 | return server; |
1405 | 1418 | ||
1406 | error: | 1419 | error: |
1407 | nfs_free_fattr(fattr); | ||
1408 | nfs_free_server(server); | 1420 | nfs_free_server(server); |
1409 | dprintk("<-- nfs4_create_server() = error %d\n", error); | 1421 | dprintk("<-- nfs4_create_server() = error %d\n", error); |
1410 | return ERR_PTR(error); | 1422 | return ERR_PTR(error); |
@@ -1418,7 +1430,6 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data, | |||
1418 | { | 1430 | { |
1419 | struct nfs_client *parent_client; | 1431 | struct nfs_client *parent_client; |
1420 | struct nfs_server *server, *parent_server; | 1432 | struct nfs_server *server, *parent_server; |
1421 | struct nfs_fattr *fattr; | ||
1422 | int error; | 1433 | int error; |
1423 | 1434 | ||
1424 | dprintk("--> nfs4_create_referral_server()\n"); | 1435 | dprintk("--> nfs4_create_referral_server()\n"); |
@@ -1427,11 +1438,6 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data, | |||
1427 | if (!server) | 1438 | if (!server) |
1428 | return ERR_PTR(-ENOMEM); | 1439 | return ERR_PTR(-ENOMEM); |
1429 | 1440 | ||
1430 | error = -ENOMEM; | ||
1431 | fattr = nfs_alloc_fattr(); | ||
1432 | if (fattr == NULL) | ||
1433 | goto error; | ||
1434 | |||
1435 | parent_server = NFS_SB(data->sb); | 1441 | parent_server = NFS_SB(data->sb); |
1436 | parent_client = parent_server->nfs_client; | 1442 | parent_client = parent_server->nfs_client; |
1437 | 1443 | ||
@@ -1456,40 +1462,14 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data, | |||
1456 | if (error < 0) | 1462 | if (error < 0) |
1457 | goto error; | 1463 | goto error; |
1458 | 1464 | ||
1459 | BUG_ON(!server->nfs_client); | 1465 | error = nfs4_server_common_setup(server, mntfh); |
1460 | BUG_ON(!server->nfs_client->rpc_ops); | ||
1461 | BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops); | ||
1462 | |||
1463 | /* Probe the root fh to retrieve its FSID and filehandle */ | ||
1464 | error = nfs4_get_rootfh(server, mntfh); | ||
1465 | if (error < 0) | ||
1466 | goto error; | ||
1467 | |||
1468 | /* probe the filesystem info for this server filesystem */ | ||
1469 | error = nfs_probe_fsinfo(server, mntfh, fattr); | ||
1470 | if (error < 0) | 1466 | if (error < 0) |
1471 | goto error; | 1467 | goto error; |
1472 | 1468 | ||
1473 | if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN) | ||
1474 | server->namelen = NFS4_MAXNAMLEN; | ||
1475 | |||
1476 | dprintk("Referral FSID: %llx:%llx\n", | ||
1477 | (unsigned long long) server->fsid.major, | ||
1478 | (unsigned long long) server->fsid.minor); | ||
1479 | |||
1480 | spin_lock(&nfs_client_lock); | ||
1481 | list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks); | ||
1482 | list_add_tail(&server->master_link, &nfs_volume_list); | ||
1483 | spin_unlock(&nfs_client_lock); | ||
1484 | |||
1485 | server->mount_time = jiffies; | ||
1486 | |||
1487 | nfs_free_fattr(fattr); | ||
1488 | dprintk("<-- nfs_create_referral_server() = %p\n", server); | 1469 | dprintk("<-- nfs_create_referral_server() = %p\n", server); |
1489 | return server; | 1470 | return server; |
1490 | 1471 | ||
1491 | error: | 1472 | error: |
1492 | nfs_free_fattr(fattr); | ||
1493 | nfs_free_server(server); | 1473 | nfs_free_server(server); |
1494 | dprintk("<-- nfs4_create_referral_server() = error %d\n", error); | 1474 | dprintk("<-- nfs4_create_referral_server() = error %d\n", error); |
1495 | return ERR_PTR(error); | 1475 | return ERR_PTR(error); |
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 782b431ef91c..e60416d3f818 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
@@ -1710,7 +1710,7 @@ static void nfs_access_free_list(struct list_head *head) | |||
1710 | } | 1710 | } |
1711 | } | 1711 | } |
1712 | 1712 | ||
1713 | int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask) | 1713 | int nfs_access_cache_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) |
1714 | { | 1714 | { |
1715 | LIST_HEAD(head); | 1715 | LIST_HEAD(head); |
1716 | struct nfs_inode *nfsi; | 1716 | struct nfs_inode *nfsi; |
diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 36a5e74f51b4..f036153d9f50 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/pagemap.h> | 27 | #include <linux/pagemap.h> |
28 | #include <linux/aio.h> | 28 | #include <linux/aio.h> |
29 | #include <linux/gfp.h> | 29 | #include <linux/gfp.h> |
30 | #include <linux/swap.h> | ||
30 | 31 | ||
31 | #include <asm/uaccess.h> | 32 | #include <asm/uaccess.h> |
32 | #include <asm/system.h> | 33 | #include <asm/system.h> |
@@ -493,11 +494,19 @@ static void nfs_invalidate_page(struct page *page, unsigned long offset) | |||
493 | */ | 494 | */ |
494 | static int nfs_release_page(struct page *page, gfp_t gfp) | 495 | static int nfs_release_page(struct page *page, gfp_t gfp) |
495 | { | 496 | { |
497 | struct address_space *mapping = page->mapping; | ||
498 | |||
496 | dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page); | 499 | dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page); |
497 | 500 | ||
498 | /* Only do I/O if gfp is a superset of GFP_KERNEL */ | 501 | /* Only do I/O if gfp is a superset of GFP_KERNEL */ |
499 | if ((gfp & GFP_KERNEL) == GFP_KERNEL) | 502 | if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL) { |
500 | nfs_wb_page(page->mapping->host, page); | 503 | int how = FLUSH_SYNC; |
504 | |||
505 | /* Don't let kswapd deadlock waiting for OOM RPC calls */ | ||
506 | if (current_is_kswapd()) | ||
507 | how = 0; | ||
508 | nfs_commit_inode(mapping->host, how); | ||
509 | } | ||
501 | /* If PagePrivate() is set, then the page is not freeable */ | 510 | /* If PagePrivate() is set, then the page is not freeable */ |
502 | if (PagePrivate(page)) | 511 | if (PagePrivate(page)) |
503 | return 0; | 512 | return 0; |
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c index 7428f7d6273b..a70e446e1605 100644 --- a/fs/nfs/getroot.c +++ b/fs/nfs/getroot.c | |||
@@ -146,7 +146,7 @@ int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh) | |||
146 | goto out; | 146 | goto out; |
147 | } | 147 | } |
148 | 148 | ||
149 | if (!(fsinfo.fattr->valid & NFS_ATTR_FATTR_MODE) | 149 | if (!(fsinfo.fattr->valid & NFS_ATTR_FATTR_TYPE) |
150 | || !S_ISDIR(fsinfo.fattr->mode)) { | 150 | || !S_ISDIR(fsinfo.fattr->mode)) { |
151 | printk(KERN_ERR "nfs4_get_rootfh:" | 151 | printk(KERN_ERR "nfs4_get_rootfh:" |
152 | " getroot encountered non-directory\n"); | 152 | " getroot encountered non-directory\n"); |
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index d8bd619e386c..e70f44b9b3f4 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h | |||
@@ -205,7 +205,8 @@ extern struct rpc_procinfo nfs4_procedures[]; | |||
205 | void nfs_close_context(struct nfs_open_context *ctx, int is_sync); | 205 | void nfs_close_context(struct nfs_open_context *ctx, int is_sync); |
206 | 206 | ||
207 | /* dir.c */ | 207 | /* dir.c */ |
208 | extern int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask); | 208 | extern int nfs_access_cache_shrinker(struct shrinker *shrink, |
209 | int nr_to_scan, gfp_t gfp_mask); | ||
209 | 210 | ||
210 | /* inode.c */ | 211 | /* inode.c */ |
211 | extern struct workqueue_struct *nfsiod_workqueue; | 212 | extern struct workqueue_struct *nfsiod_workqueue; |
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 6bdef28efa33..65c8dae4b267 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c | |||
@@ -862,8 +862,8 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const | |||
862 | bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET; | 862 | bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET; |
863 | *p++ = cpu_to_be32(NFS4_SET_TO_CLIENT_TIME); | 863 | *p++ = cpu_to_be32(NFS4_SET_TO_CLIENT_TIME); |
864 | *p++ = cpu_to_be32(0); | 864 | *p++ = cpu_to_be32(0); |
865 | *p++ = cpu_to_be32(iap->ia_mtime.tv_sec); | 865 | *p++ = cpu_to_be32(iap->ia_atime.tv_sec); |
866 | *p++ = cpu_to_be32(iap->ia_mtime.tv_nsec); | 866 | *p++ = cpu_to_be32(iap->ia_atime.tv_nsec); |
867 | } | 867 | } |
868 | else if (iap->ia_valid & ATTR_ATIME) { | 868 | else if (iap->ia_valid & ATTR_ATIME) { |
869 | bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET; | 869 | bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET; |
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c index 6bd19d843af7..df101d9f546a 100644 --- a/fs/nfs/nfsroot.c +++ b/fs/nfs/nfsroot.c | |||
@@ -105,7 +105,7 @@ static char nfs_root_name[256] __initdata = ""; | |||
105 | static __be32 servaddr __initdata = 0; | 105 | static __be32 servaddr __initdata = 0; |
106 | 106 | ||
107 | /* Name of directory to mount */ | 107 | /* Name of directory to mount */ |
108 | static char nfs_export_path[NFS_MAXPATHLEN] __initdata = { 0, }; | 108 | static char nfs_export_path[NFS_MAXPATHLEN + 1] __initdata = { 0, }; |
109 | 109 | ||
110 | /* NFS-related data */ | 110 | /* NFS-related data */ |
111 | static struct nfs_mount_data nfs_data __initdata = { 0, };/* NFS mount info */ | 111 | static struct nfs_mount_data nfs_data __initdata = { 0, };/* NFS mount info */ |
diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 04214fc5c304..f9df16de4a56 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c | |||
@@ -570,6 +570,22 @@ static void nfs_show_mountd_options(struct seq_file *m, struct nfs_server *nfss, | |||
570 | nfs_show_mountd_netid(m, nfss, showdefaults); | 570 | nfs_show_mountd_netid(m, nfss, showdefaults); |
571 | } | 571 | } |
572 | 572 | ||
573 | #ifdef CONFIG_NFS_V4 | ||
574 | static void nfs_show_nfsv4_options(struct seq_file *m, struct nfs_server *nfss, | ||
575 | int showdefaults) | ||
576 | { | ||
577 | struct nfs_client *clp = nfss->nfs_client; | ||
578 | |||
579 | seq_printf(m, ",clientaddr=%s", clp->cl_ipaddr); | ||
580 | seq_printf(m, ",minorversion=%u", clp->cl_minorversion); | ||
581 | } | ||
582 | #else | ||
583 | static void nfs_show_nfsv4_options(struct seq_file *m, struct nfs_server *nfss, | ||
584 | int showdefaults) | ||
585 | { | ||
586 | } | ||
587 | #endif | ||
588 | |||
573 | /* | 589 | /* |
574 | * Describe the mount options in force on this server representation | 590 | * Describe the mount options in force on this server representation |
575 | */ | 591 | */ |
@@ -631,11 +647,9 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss, | |||
631 | 647 | ||
632 | if (version != 4) | 648 | if (version != 4) |
633 | nfs_show_mountd_options(m, nfss, showdefaults); | 649 | nfs_show_mountd_options(m, nfss, showdefaults); |
650 | else | ||
651 | nfs_show_nfsv4_options(m, nfss, showdefaults); | ||
634 | 652 | ||
635 | #ifdef CONFIG_NFS_V4 | ||
636 | if (clp->rpc_ops->version == 4) | ||
637 | seq_printf(m, ",clientaddr=%s", clp->cl_ipaddr); | ||
638 | #endif | ||
639 | if (nfss->options & NFS_OPTION_FSCACHE) | 653 | if (nfss->options & NFS_OPTION_FSCACHE) |
640 | seq_printf(m, ",fsc"); | 654 | seq_printf(m, ",fsc"); |
641 | } | 655 | } |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 91679e2631ee..9f81bdd91c55 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -222,7 +222,7 @@ static void nfs_end_page_writeback(struct page *page) | |||
222 | clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); | 222 | clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); |
223 | } | 223 | } |
224 | 224 | ||
225 | static struct nfs_page *nfs_find_and_lock_request(struct page *page) | 225 | static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock) |
226 | { | 226 | { |
227 | struct inode *inode = page->mapping->host; | 227 | struct inode *inode = page->mapping->host; |
228 | struct nfs_page *req; | 228 | struct nfs_page *req; |
@@ -241,7 +241,10 @@ static struct nfs_page *nfs_find_and_lock_request(struct page *page) | |||
241 | * request as dirty (in which case we don't care). | 241 | * request as dirty (in which case we don't care). |
242 | */ | 242 | */ |
243 | spin_unlock(&inode->i_lock); | 243 | spin_unlock(&inode->i_lock); |
244 | ret = nfs_wait_on_request(req); | 244 | if (!nonblock) |
245 | ret = nfs_wait_on_request(req); | ||
246 | else | ||
247 | ret = -EAGAIN; | ||
245 | nfs_release_request(req); | 248 | nfs_release_request(req); |
246 | if (ret != 0) | 249 | if (ret != 0) |
247 | return ERR_PTR(ret); | 250 | return ERR_PTR(ret); |
@@ -256,12 +259,12 @@ static struct nfs_page *nfs_find_and_lock_request(struct page *page) | |||
256 | * May return an error if the user signalled nfs_wait_on_request(). | 259 | * May return an error if the user signalled nfs_wait_on_request(). |
257 | */ | 260 | */ |
258 | static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, | 261 | static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, |
259 | struct page *page) | 262 | struct page *page, bool nonblock) |
260 | { | 263 | { |
261 | struct nfs_page *req; | 264 | struct nfs_page *req; |
262 | int ret = 0; | 265 | int ret = 0; |
263 | 266 | ||
264 | req = nfs_find_and_lock_request(page); | 267 | req = nfs_find_and_lock_request(page, nonblock); |
265 | if (!req) | 268 | if (!req) |
266 | goto out; | 269 | goto out; |
267 | ret = PTR_ERR(req); | 270 | ret = PTR_ERR(req); |
@@ -283,12 +286,20 @@ out: | |||
283 | static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio) | 286 | static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio) |
284 | { | 287 | { |
285 | struct inode *inode = page->mapping->host; | 288 | struct inode *inode = page->mapping->host; |
289 | int ret; | ||
286 | 290 | ||
287 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); | 291 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); |
288 | nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); | 292 | nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); |
289 | 293 | ||
290 | nfs_pageio_cond_complete(pgio, page->index); | 294 | nfs_pageio_cond_complete(pgio, page->index); |
291 | return nfs_page_async_flush(pgio, page); | 295 | ret = nfs_page_async_flush(pgio, page, |
296 | wbc->sync_mode == WB_SYNC_NONE || | ||
297 | wbc->nonblocking != 0); | ||
298 | if (ret == -EAGAIN) { | ||
299 | redirty_page_for_writepage(wbc, page); | ||
300 | ret = 0; | ||
301 | } | ||
302 | return ret; | ||
292 | } | 303 | } |
293 | 304 | ||
294 | /* | 305 | /* |
@@ -1379,7 +1390,7 @@ static const struct rpc_call_ops nfs_commit_ops = { | |||
1379 | .rpc_release = nfs_commit_release, | 1390 | .rpc_release = nfs_commit_release, |
1380 | }; | 1391 | }; |
1381 | 1392 | ||
1382 | static int nfs_commit_inode(struct inode *inode, int how) | 1393 | int nfs_commit_inode(struct inode *inode, int how) |
1383 | { | 1394 | { |
1384 | LIST_HEAD(head); | 1395 | LIST_HEAD(head); |
1385 | int may_wait = how & FLUSH_SYNC; | 1396 | int may_wait = how & FLUSH_SYNC; |
@@ -1443,11 +1454,6 @@ out_mark_dirty: | |||
1443 | return ret; | 1454 | return ret; |
1444 | } | 1455 | } |
1445 | #else | 1456 | #else |
1446 | static int nfs_commit_inode(struct inode *inode, int how) | ||
1447 | { | ||
1448 | return 0; | ||
1449 | } | ||
1450 | |||
1451 | static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc) | 1457 | static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc) |
1452 | { | 1458 | { |
1453 | return 0; | 1459 | return 0; |
@@ -1546,7 +1552,7 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage, | |||
1546 | 1552 | ||
1547 | nfs_fscache_release_page(page, GFP_KERNEL); | 1553 | nfs_fscache_release_page(page, GFP_KERNEL); |
1548 | 1554 | ||
1549 | req = nfs_find_and_lock_request(page); | 1555 | req = nfs_find_and_lock_request(page, false); |
1550 | ret = PTR_ERR(req); | 1556 | ret = PTR_ERR(req); |
1551 | if (IS_ERR(req)) | 1557 | if (IS_ERR(req)) |
1552 | goto out; | 1558 | goto out; |
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 3623ca20cc18..96337a4fbbdf 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c | |||
@@ -196,15 +196,14 @@ int ocfs2_get_block(struct inode *inode, sector_t iblock, | |||
196 | dump_stack(); | 196 | dump_stack(); |
197 | goto bail; | 197 | goto bail; |
198 | } | 198 | } |
199 | |||
200 | past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); | ||
201 | mlog(0, "Inode %lu, past_eof = %llu\n", inode->i_ino, | ||
202 | (unsigned long long)past_eof); | ||
203 | |||
204 | if (create && (iblock >= past_eof)) | ||
205 | set_buffer_new(bh_result); | ||
206 | } | 199 | } |
207 | 200 | ||
201 | past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); | ||
202 | mlog(0, "Inode %lu, past_eof = %llu\n", inode->i_ino, | ||
203 | (unsigned long long)past_eof); | ||
204 | if (create && (iblock >= past_eof)) | ||
205 | set_buffer_new(bh_result); | ||
206 | |||
208 | bail: | 207 | bail: |
209 | if (err < 0) | 208 | if (err < 0) |
210 | err = -EIO; | 209 | err = -EIO; |
@@ -459,36 +458,6 @@ int walk_page_buffers( handle_t *handle, | |||
459 | return ret; | 458 | return ret; |
460 | } | 459 | } |
461 | 460 | ||
462 | handle_t *ocfs2_start_walk_page_trans(struct inode *inode, | ||
463 | struct page *page, | ||
464 | unsigned from, | ||
465 | unsigned to) | ||
466 | { | ||
467 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | ||
468 | handle_t *handle; | ||
469 | int ret = 0; | ||
470 | |||
471 | handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); | ||
472 | if (IS_ERR(handle)) { | ||
473 | ret = -ENOMEM; | ||
474 | mlog_errno(ret); | ||
475 | goto out; | ||
476 | } | ||
477 | |||
478 | if (ocfs2_should_order_data(inode)) { | ||
479 | ret = ocfs2_jbd2_file_inode(handle, inode); | ||
480 | if (ret < 0) | ||
481 | mlog_errno(ret); | ||
482 | } | ||
483 | out: | ||
484 | if (ret) { | ||
485 | if (!IS_ERR(handle)) | ||
486 | ocfs2_commit_trans(osb, handle); | ||
487 | handle = ERR_PTR(ret); | ||
488 | } | ||
489 | return handle; | ||
490 | } | ||
491 | |||
492 | static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block) | 461 | static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block) |
493 | { | 462 | { |
494 | sector_t status; | 463 | sector_t status; |
@@ -609,7 +578,9 @@ bail: | |||
609 | static void ocfs2_dio_end_io(struct kiocb *iocb, | 578 | static void ocfs2_dio_end_io(struct kiocb *iocb, |
610 | loff_t offset, | 579 | loff_t offset, |
611 | ssize_t bytes, | 580 | ssize_t bytes, |
612 | void *private) | 581 | void *private, |
582 | int ret, | ||
583 | bool is_async) | ||
613 | { | 584 | { |
614 | struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; | 585 | struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; |
615 | int level; | 586 | int level; |
@@ -623,6 +594,9 @@ static void ocfs2_dio_end_io(struct kiocb *iocb, | |||
623 | if (!level) | 594 | if (!level) |
624 | up_read(&inode->i_alloc_sem); | 595 | up_read(&inode->i_alloc_sem); |
625 | ocfs2_rw_unlock(inode, level); | 596 | ocfs2_rw_unlock(inode, level); |
597 | |||
598 | if (is_async) | ||
599 | aio_complete(iocb, ret, 0); | ||
626 | } | 600 | } |
627 | 601 | ||
628 | /* | 602 | /* |
@@ -1131,23 +1105,37 @@ out: | |||
1131 | */ | 1105 | */ |
1132 | static int ocfs2_grab_pages_for_write(struct address_space *mapping, | 1106 | static int ocfs2_grab_pages_for_write(struct address_space *mapping, |
1133 | struct ocfs2_write_ctxt *wc, | 1107 | struct ocfs2_write_ctxt *wc, |
1134 | u32 cpos, loff_t user_pos, int new, | 1108 | u32 cpos, loff_t user_pos, |
1109 | unsigned user_len, int new, | ||
1135 | struct page *mmap_page) | 1110 | struct page *mmap_page) |
1136 | { | 1111 | { |
1137 | int ret = 0, i; | 1112 | int ret = 0, i; |
1138 | unsigned long start, target_index, index; | 1113 | unsigned long start, target_index, end_index, index; |
1139 | struct inode *inode = mapping->host; | 1114 | struct inode *inode = mapping->host; |
1115 | loff_t last_byte; | ||
1140 | 1116 | ||
1141 | target_index = user_pos >> PAGE_CACHE_SHIFT; | 1117 | target_index = user_pos >> PAGE_CACHE_SHIFT; |
1142 | 1118 | ||
1143 | /* | 1119 | /* |
1144 | * Figure out how many pages we'll be manipulating here. For | 1120 | * Figure out how many pages we'll be manipulating here. For |
1145 | * non allocating write, we just change the one | 1121 | * non allocating write, we just change the one |
1146 | * page. Otherwise, we'll need a whole clusters worth. | 1122 | * page. Otherwise, we'll need a whole clusters worth. If we're |
1123 | * writing past i_size, we only need enough pages to cover the | ||
1124 | * last page of the write. | ||
1147 | */ | 1125 | */ |
1148 | if (new) { | 1126 | if (new) { |
1149 | wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb); | 1127 | wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb); |
1150 | start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos); | 1128 | start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos); |
1129 | /* | ||
1130 | * We need the index *past* the last page we could possibly | ||
1131 | * touch. This is the page past the end of the write or | ||
1132 | * i_size, whichever is greater. | ||
1133 | */ | ||
1134 | last_byte = max(user_pos + user_len, i_size_read(inode)); | ||
1135 | BUG_ON(last_byte < 1); | ||
1136 | end_index = ((last_byte - 1) >> PAGE_CACHE_SHIFT) + 1; | ||
1137 | if ((start + wc->w_num_pages) > end_index) | ||
1138 | wc->w_num_pages = end_index - start; | ||
1151 | } else { | 1139 | } else { |
1152 | wc->w_num_pages = 1; | 1140 | wc->w_num_pages = 1; |
1153 | start = target_index; | 1141 | start = target_index; |
@@ -1620,21 +1608,20 @@ out: | |||
1620 | * write path can treat it as an non-allocating write, which has no | 1608 | * write path can treat it as an non-allocating write, which has no |
1621 | * special case code for sparse/nonsparse files. | 1609 | * special case code for sparse/nonsparse files. |
1622 | */ | 1610 | */ |
1623 | static int ocfs2_expand_nonsparse_inode(struct inode *inode, loff_t pos, | 1611 | static int ocfs2_expand_nonsparse_inode(struct inode *inode, |
1624 | unsigned len, | 1612 | struct buffer_head *di_bh, |
1613 | loff_t pos, unsigned len, | ||
1625 | struct ocfs2_write_ctxt *wc) | 1614 | struct ocfs2_write_ctxt *wc) |
1626 | { | 1615 | { |
1627 | int ret; | 1616 | int ret; |
1628 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | ||
1629 | loff_t newsize = pos + len; | 1617 | loff_t newsize = pos + len; |
1630 | 1618 | ||
1631 | if (ocfs2_sparse_alloc(osb)) | 1619 | BUG_ON(ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))); |
1632 | return 0; | ||
1633 | 1620 | ||
1634 | if (newsize <= i_size_read(inode)) | 1621 | if (newsize <= i_size_read(inode)) |
1635 | return 0; | 1622 | return 0; |
1636 | 1623 | ||
1637 | ret = ocfs2_extend_no_holes(inode, newsize, pos); | 1624 | ret = ocfs2_extend_no_holes(inode, di_bh, newsize, pos); |
1638 | if (ret) | 1625 | if (ret) |
1639 | mlog_errno(ret); | 1626 | mlog_errno(ret); |
1640 | 1627 | ||
@@ -1644,6 +1631,18 @@ static int ocfs2_expand_nonsparse_inode(struct inode *inode, loff_t pos, | |||
1644 | return ret; | 1631 | return ret; |
1645 | } | 1632 | } |
1646 | 1633 | ||
1634 | static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh, | ||
1635 | loff_t pos) | ||
1636 | { | ||
1637 | int ret = 0; | ||
1638 | |||
1639 | BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))); | ||
1640 | if (pos > i_size_read(inode)) | ||
1641 | ret = ocfs2_zero_extend(inode, di_bh, pos); | ||
1642 | |||
1643 | return ret; | ||
1644 | } | ||
1645 | |||
1647 | int ocfs2_write_begin_nolock(struct address_space *mapping, | 1646 | int ocfs2_write_begin_nolock(struct address_space *mapping, |
1648 | loff_t pos, unsigned len, unsigned flags, | 1647 | loff_t pos, unsigned len, unsigned flags, |
1649 | struct page **pagep, void **fsdata, | 1648 | struct page **pagep, void **fsdata, |
@@ -1679,7 +1678,11 @@ int ocfs2_write_begin_nolock(struct address_space *mapping, | |||
1679 | } | 1678 | } |
1680 | } | 1679 | } |
1681 | 1680 | ||
1682 | ret = ocfs2_expand_nonsparse_inode(inode, pos, len, wc); | 1681 | if (ocfs2_sparse_alloc(osb)) |
1682 | ret = ocfs2_zero_tail(inode, di_bh, pos); | ||
1683 | else | ||
1684 | ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos, len, | ||
1685 | wc); | ||
1683 | if (ret) { | 1686 | if (ret) { |
1684 | mlog_errno(ret); | 1687 | mlog_errno(ret); |
1685 | goto out; | 1688 | goto out; |
@@ -1789,7 +1792,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping, | |||
1789 | * that we can zero and flush if we error after adding the | 1792 | * that we can zero and flush if we error after adding the |
1790 | * extent. | 1793 | * extent. |
1791 | */ | 1794 | */ |
1792 | ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, | 1795 | ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len, |
1793 | cluster_of_pages, mmap_page); | 1796 | cluster_of_pages, mmap_page); |
1794 | if (ret) { | 1797 | if (ret) { |
1795 | mlog_errno(ret); | 1798 | mlog_errno(ret); |
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 6b5a492e1749..153abb5abef0 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c | |||
@@ -1671,7 +1671,7 @@ struct dlm_ctxt * dlm_register_domain(const char *domain, | |||
1671 | struct dlm_ctxt *dlm = NULL; | 1671 | struct dlm_ctxt *dlm = NULL; |
1672 | struct dlm_ctxt *new_ctxt = NULL; | 1672 | struct dlm_ctxt *new_ctxt = NULL; |
1673 | 1673 | ||
1674 | if (strlen(domain) > O2NM_MAX_NAME_LEN) { | 1674 | if (strlen(domain) >= O2NM_MAX_NAME_LEN) { |
1675 | ret = -ENAMETOOLONG; | 1675 | ret = -ENAMETOOLONG; |
1676 | mlog(ML_ERROR, "domain name length too long\n"); | 1676 | mlog(ML_ERROR, "domain name length too long\n"); |
1677 | goto leave; | 1677 | goto leave; |
@@ -1709,6 +1709,7 @@ retry: | |||
1709 | } | 1709 | } |
1710 | 1710 | ||
1711 | if (dlm_protocol_compare(&dlm->fs_locking_proto, fs_proto)) { | 1711 | if (dlm_protocol_compare(&dlm->fs_locking_proto, fs_proto)) { |
1712 | spin_unlock(&dlm_domain_lock); | ||
1712 | mlog(ML_ERROR, | 1713 | mlog(ML_ERROR, |
1713 | "Requested locking protocol version is not " | 1714 | "Requested locking protocol version is not " |
1714 | "compatible with already registered domain " | 1715 | "compatible with already registered domain " |
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 4a7506a4e314..94b97fc6a88e 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c | |||
@@ -2808,14 +2808,8 @@ again: | |||
2808 | mlog(0, "trying again...\n"); | 2808 | mlog(0, "trying again...\n"); |
2809 | goto again; | 2809 | goto again; |
2810 | } | 2810 | } |
2811 | /* now that we are sure the MIGRATING state is there, drop | ||
2812 | * the unneded state which blocked threads trying to DIRTY */ | ||
2813 | spin_lock(&res->spinlock); | ||
2814 | BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY)); | ||
2815 | BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING)); | ||
2816 | res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY; | ||
2817 | spin_unlock(&res->spinlock); | ||
2818 | 2811 | ||
2812 | ret = 0; | ||
2819 | /* did the target go down or die? */ | 2813 | /* did the target go down or die? */ |
2820 | spin_lock(&dlm->spinlock); | 2814 | spin_lock(&dlm->spinlock); |
2821 | if (!test_bit(target, dlm->domain_map)) { | 2815 | if (!test_bit(target, dlm->domain_map)) { |
@@ -2826,9 +2820,21 @@ again: | |||
2826 | spin_unlock(&dlm->spinlock); | 2820 | spin_unlock(&dlm->spinlock); |
2827 | 2821 | ||
2828 | /* | 2822 | /* |
2823 | * if target is down, we need to clear DLM_LOCK_RES_BLOCK_DIRTY for | ||
2824 | * another try; otherwise, we are sure the MIGRATING state is there, | ||
2825 | * drop the unneded state which blocked threads trying to DIRTY | ||
2826 | */ | ||
2827 | spin_lock(&res->spinlock); | ||
2828 | BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY)); | ||
2829 | res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY; | ||
2830 | if (!ret) | ||
2831 | BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING)); | ||
2832 | spin_unlock(&res->spinlock); | ||
2833 | |||
2834 | /* | ||
2829 | * at this point: | 2835 | * at this point: |
2830 | * | 2836 | * |
2831 | * o the DLM_LOCK_RES_MIGRATING flag is set | 2837 | * o the DLM_LOCK_RES_MIGRATING flag is set if target not down |
2832 | * o there are no pending asts on this lockres | 2838 | * o there are no pending asts on this lockres |
2833 | * o all processes trying to reserve an ast on this | 2839 | * o all processes trying to reserve an ast on this |
2834 | * lockres must wait for the MIGRATING flag to clear | 2840 | * lockres must wait for the MIGRATING flag to clear |
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index f8b75ce4be70..9dfaac73b36d 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c | |||
@@ -463,7 +463,7 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm) | |||
463 | if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { | 463 | if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { |
464 | int bit; | 464 | int bit; |
465 | 465 | ||
466 | bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES+1, 0); | 466 | bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES, 0); |
467 | if (bit >= O2NM_MAX_NODES || bit < 0) | 467 | if (bit >= O2NM_MAX_NODES || bit < 0) |
468 | dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); | 468 | dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); |
469 | else | 469 | else |
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 6a13ea64c447..2b10b36d1577 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c | |||
@@ -724,28 +724,55 @@ leave: | |||
724 | return status; | 724 | return status; |
725 | } | 725 | } |
726 | 726 | ||
727 | /* | ||
728 | * While a write will already be ordering the data, a truncate will not. | ||
729 | * Thus, we need to explicitly order the zeroed pages. | ||
730 | */ | ||
731 | static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode) | ||
732 | { | ||
733 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | ||
734 | handle_t *handle = NULL; | ||
735 | int ret = 0; | ||
736 | |||
737 | if (!ocfs2_should_order_data(inode)) | ||
738 | goto out; | ||
739 | |||
740 | handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); | ||
741 | if (IS_ERR(handle)) { | ||
742 | ret = -ENOMEM; | ||
743 | mlog_errno(ret); | ||
744 | goto out; | ||
745 | } | ||
746 | |||
747 | ret = ocfs2_jbd2_file_inode(handle, inode); | ||
748 | if (ret < 0) | ||
749 | mlog_errno(ret); | ||
750 | |||
751 | out: | ||
752 | if (ret) { | ||
753 | if (!IS_ERR(handle)) | ||
754 | ocfs2_commit_trans(osb, handle); | ||
755 | handle = ERR_PTR(ret); | ||
756 | } | ||
757 | return handle; | ||
758 | } | ||
759 | |||
727 | /* Some parts of this taken from generic_cont_expand, which turned out | 760 | /* Some parts of this taken from generic_cont_expand, which turned out |
728 | * to be too fragile to do exactly what we need without us having to | 761 | * to be too fragile to do exactly what we need without us having to |
729 | * worry about recursive locking in ->write_begin() and ->write_end(). */ | 762 | * worry about recursive locking in ->write_begin() and ->write_end(). */ |
730 | static int ocfs2_write_zero_page(struct inode *inode, | 763 | static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from, |
731 | u64 size) | 764 | u64 abs_to) |
732 | { | 765 | { |
733 | struct address_space *mapping = inode->i_mapping; | 766 | struct address_space *mapping = inode->i_mapping; |
734 | struct page *page; | 767 | struct page *page; |
735 | unsigned long index; | 768 | unsigned long index = abs_from >> PAGE_CACHE_SHIFT; |
736 | unsigned int offset; | ||
737 | handle_t *handle = NULL; | 769 | handle_t *handle = NULL; |
738 | int ret; | 770 | int ret = 0; |
771 | unsigned zero_from, zero_to, block_start, block_end; | ||
739 | 772 | ||
740 | offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */ | 773 | BUG_ON(abs_from >= abs_to); |
741 | /* ugh. in prepare/commit_write, if from==to==start of block, we | 774 | BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT)); |
742 | ** skip the prepare. make sure we never send an offset for the start | 775 | BUG_ON(abs_from & (inode->i_blkbits - 1)); |
743 | ** of a block | ||
744 | */ | ||
745 | if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) { | ||
746 | offset++; | ||
747 | } | ||
748 | index = size >> PAGE_CACHE_SHIFT; | ||
749 | 776 | ||
750 | page = grab_cache_page(mapping, index); | 777 | page = grab_cache_page(mapping, index); |
751 | if (!page) { | 778 | if (!page) { |
@@ -754,31 +781,56 @@ static int ocfs2_write_zero_page(struct inode *inode, | |||
754 | goto out; | 781 | goto out; |
755 | } | 782 | } |
756 | 783 | ||
757 | ret = ocfs2_prepare_write_nolock(inode, page, offset, offset); | 784 | /* Get the offsets within the page that we want to zero */ |
758 | if (ret < 0) { | 785 | zero_from = abs_from & (PAGE_CACHE_SIZE - 1); |
759 | mlog_errno(ret); | 786 | zero_to = abs_to & (PAGE_CACHE_SIZE - 1); |
760 | goto out_unlock; | 787 | if (!zero_to) |
761 | } | 788 | zero_to = PAGE_CACHE_SIZE; |
762 | 789 | ||
763 | if (ocfs2_should_order_data(inode)) { | 790 | mlog(0, |
764 | handle = ocfs2_start_walk_page_trans(inode, page, offset, | 791 | "abs_from = %llu, abs_to = %llu, index = %lu, zero_from = %u, zero_to = %u\n", |
765 | offset); | 792 | (unsigned long long)abs_from, (unsigned long long)abs_to, |
766 | if (IS_ERR(handle)) { | 793 | index, zero_from, zero_to); |
767 | ret = PTR_ERR(handle); | 794 | |
768 | handle = NULL; | 795 | /* We know that zero_from is block aligned */ |
796 | for (block_start = zero_from; block_start < zero_to; | ||
797 | block_start = block_end) { | ||
798 | block_end = block_start + (1 << inode->i_blkbits); | ||
799 | |||
800 | /* | ||
801 | * block_start is block-aligned. Bump it by one to | ||
802 | * force ocfs2_{prepare,commit}_write() to zero the | ||
803 | * whole block. | ||
804 | */ | ||
805 | ret = ocfs2_prepare_write_nolock(inode, page, | ||
806 | block_start + 1, | ||
807 | block_start + 1); | ||
808 | if (ret < 0) { | ||
809 | mlog_errno(ret); | ||
769 | goto out_unlock; | 810 | goto out_unlock; |
770 | } | 811 | } |
771 | } | ||
772 | 812 | ||
773 | /* must not update i_size! */ | 813 | if (!handle) { |
774 | ret = block_commit_write(page, offset, offset); | 814 | handle = ocfs2_zero_start_ordered_transaction(inode); |
775 | if (ret < 0) | 815 | if (IS_ERR(handle)) { |
776 | mlog_errno(ret); | 816 | ret = PTR_ERR(handle); |
777 | else | 817 | handle = NULL; |
778 | ret = 0; | 818 | break; |
819 | } | ||
820 | } | ||
821 | |||
822 | /* must not update i_size! */ | ||
823 | ret = block_commit_write(page, block_start + 1, | ||
824 | block_start + 1); | ||
825 | if (ret < 0) | ||
826 | mlog_errno(ret); | ||
827 | else | ||
828 | ret = 0; | ||
829 | } | ||
779 | 830 | ||
780 | if (handle) | 831 | if (handle) |
781 | ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); | 832 | ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); |
833 | |||
782 | out_unlock: | 834 | out_unlock: |
783 | unlock_page(page); | 835 | unlock_page(page); |
784 | page_cache_release(page); | 836 | page_cache_release(page); |
@@ -786,22 +838,114 @@ out: | |||
786 | return ret; | 838 | return ret; |
787 | } | 839 | } |
788 | 840 | ||
789 | static int ocfs2_zero_extend(struct inode *inode, | 841 | /* |
790 | u64 zero_to_size) | 842 | * Find the next range to zero. We do this in terms of bytes because |
843 | * that's what ocfs2_zero_extend() wants, and it is dealing with the | ||
844 | * pagecache. We may return multiple extents. | ||
845 | * | ||
846 | * zero_start and zero_end are ocfs2_zero_extend()s current idea of what | ||
847 | * needs to be zeroed. range_start and range_end return the next zeroing | ||
848 | * range. A subsequent call should pass the previous range_end as its | ||
849 | * zero_start. If range_end is 0, there's nothing to do. | ||
850 | * | ||
851 | * Unwritten extents are skipped over. Refcounted extents are CoWd. | ||
852 | */ | ||
853 | static int ocfs2_zero_extend_get_range(struct inode *inode, | ||
854 | struct buffer_head *di_bh, | ||
855 | u64 zero_start, u64 zero_end, | ||
856 | u64 *range_start, u64 *range_end) | ||
791 | { | 857 | { |
792 | int ret = 0; | 858 | int rc = 0, needs_cow = 0; |
793 | u64 start_off; | 859 | u32 p_cpos, zero_clusters = 0; |
794 | struct super_block *sb = inode->i_sb; | 860 | u32 zero_cpos = |
861 | zero_start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits; | ||
862 | u32 last_cpos = ocfs2_clusters_for_bytes(inode->i_sb, zero_end); | ||
863 | unsigned int num_clusters = 0; | ||
864 | unsigned int ext_flags = 0; | ||
795 | 865 | ||
796 | start_off = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode)); | 866 | while (zero_cpos < last_cpos) { |
797 | while (start_off < zero_to_size) { | 867 | rc = ocfs2_get_clusters(inode, zero_cpos, &p_cpos, |
798 | ret = ocfs2_write_zero_page(inode, start_off); | 868 | &num_clusters, &ext_flags); |
799 | if (ret < 0) { | 869 | if (rc) { |
800 | mlog_errno(ret); | 870 | mlog_errno(rc); |
871 | goto out; | ||
872 | } | ||
873 | |||
874 | if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) { | ||
875 | zero_clusters = num_clusters; | ||
876 | if (ext_flags & OCFS2_EXT_REFCOUNTED) | ||
877 | needs_cow = 1; | ||
878 | break; | ||
879 | } | ||
880 | |||
881 | zero_cpos += num_clusters; | ||
882 | } | ||
883 | if (!zero_clusters) { | ||
884 | *range_end = 0; | ||
885 | goto out; | ||
886 | } | ||
887 | |||
888 | while ((zero_cpos + zero_clusters) < last_cpos) { | ||
889 | rc = ocfs2_get_clusters(inode, zero_cpos + zero_clusters, | ||
890 | &p_cpos, &num_clusters, | ||
891 | &ext_flags); | ||
892 | if (rc) { | ||
893 | mlog_errno(rc); | ||
801 | goto out; | 894 | goto out; |
802 | } | 895 | } |
803 | 896 | ||
804 | start_off += sb->s_blocksize; | 897 | if (!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN)) |
898 | break; | ||
899 | if (ext_flags & OCFS2_EXT_REFCOUNTED) | ||
900 | needs_cow = 1; | ||
901 | zero_clusters += num_clusters; | ||
902 | } | ||
903 | if ((zero_cpos + zero_clusters) > last_cpos) | ||
904 | zero_clusters = last_cpos - zero_cpos; | ||
905 | |||
906 | if (needs_cow) { | ||
907 | rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos, zero_clusters, | ||
908 | UINT_MAX); | ||
909 | if (rc) { | ||
910 | mlog_errno(rc); | ||
911 | goto out; | ||
912 | } | ||
913 | } | ||
914 | |||
915 | *range_start = ocfs2_clusters_to_bytes(inode->i_sb, zero_cpos); | ||
916 | *range_end = ocfs2_clusters_to_bytes(inode->i_sb, | ||
917 | zero_cpos + zero_clusters); | ||
918 | |||
919 | out: | ||
920 | return rc; | ||
921 | } | ||
922 | |||
923 | /* | ||
924 | * Zero one range returned from ocfs2_zero_extend_get_range(). The caller | ||
925 | * has made sure that the entire range needs zeroing. | ||
926 | */ | ||
927 | static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start, | ||
928 | u64 range_end) | ||
929 | { | ||
930 | int rc = 0; | ||
931 | u64 next_pos; | ||
932 | u64 zero_pos = range_start; | ||
933 | |||
934 | mlog(0, "range_start = %llu, range_end = %llu\n", | ||
935 | (unsigned long long)range_start, | ||
936 | (unsigned long long)range_end); | ||
937 | BUG_ON(range_start >= range_end); | ||
938 | |||
939 | while (zero_pos < range_end) { | ||
940 | next_pos = (zero_pos & PAGE_CACHE_MASK) + PAGE_CACHE_SIZE; | ||
941 | if (next_pos > range_end) | ||
942 | next_pos = range_end; | ||
943 | rc = ocfs2_write_zero_page(inode, zero_pos, next_pos); | ||
944 | if (rc < 0) { | ||
945 | mlog_errno(rc); | ||
946 | break; | ||
947 | } | ||
948 | zero_pos = next_pos; | ||
805 | 949 | ||
806 | /* | 950 | /* |
807 | * Very large extends have the potential to lock up | 951 | * Very large extends have the potential to lock up |
@@ -810,16 +954,63 @@ static int ocfs2_zero_extend(struct inode *inode, | |||
810 | cond_resched(); | 954 | cond_resched(); |
811 | } | 955 | } |
812 | 956 | ||
813 | out: | 957 | return rc; |
958 | } | ||
959 | |||
960 | int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh, | ||
961 | loff_t zero_to_size) | ||
962 | { | ||
963 | int ret = 0; | ||
964 | u64 zero_start, range_start = 0, range_end = 0; | ||
965 | struct super_block *sb = inode->i_sb; | ||
966 | |||
967 | zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode)); | ||
968 | mlog(0, "zero_start %llu for i_size %llu\n", | ||
969 | (unsigned long long)zero_start, | ||
970 | (unsigned long long)i_size_read(inode)); | ||
971 | while (zero_start < zero_to_size) { | ||
972 | ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start, | ||
973 | zero_to_size, | ||
974 | &range_start, | ||
975 | &range_end); | ||
976 | if (ret) { | ||
977 | mlog_errno(ret); | ||
978 | break; | ||
979 | } | ||
980 | if (!range_end) | ||
981 | break; | ||
982 | /* Trim the ends */ | ||
983 | if (range_start < zero_start) | ||
984 | range_start = zero_start; | ||
985 | if (range_end > zero_to_size) | ||
986 | range_end = zero_to_size; | ||
987 | |||
988 | ret = ocfs2_zero_extend_range(inode, range_start, | ||
989 | range_end); | ||
990 | if (ret) { | ||
991 | mlog_errno(ret); | ||
992 | break; | ||
993 | } | ||
994 | zero_start = range_end; | ||
995 | } | ||
996 | |||
814 | return ret; | 997 | return ret; |
815 | } | 998 | } |
816 | 999 | ||
817 | int ocfs2_extend_no_holes(struct inode *inode, u64 new_i_size, u64 zero_to) | 1000 | int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh, |
1001 | u64 new_i_size, u64 zero_to) | ||
818 | { | 1002 | { |
819 | int ret; | 1003 | int ret; |
820 | u32 clusters_to_add; | 1004 | u32 clusters_to_add; |
821 | struct ocfs2_inode_info *oi = OCFS2_I(inode); | 1005 | struct ocfs2_inode_info *oi = OCFS2_I(inode); |
822 | 1006 | ||
1007 | /* | ||
1008 | * Only quota files call this without a bh, and they can't be | ||
1009 | * refcounted. | ||
1010 | */ | ||
1011 | BUG_ON(!di_bh && (oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)); | ||
1012 | BUG_ON(!di_bh && !(oi->ip_flags & OCFS2_INODE_SYSTEM_FILE)); | ||
1013 | |||
823 | clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size); | 1014 | clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size); |
824 | if (clusters_to_add < oi->ip_clusters) | 1015 | if (clusters_to_add < oi->ip_clusters) |
825 | clusters_to_add = 0; | 1016 | clusters_to_add = 0; |
@@ -840,7 +1031,7 @@ int ocfs2_extend_no_holes(struct inode *inode, u64 new_i_size, u64 zero_to) | |||
840 | * still need to zero the area between the old i_size and the | 1031 | * still need to zero the area between the old i_size and the |
841 | * new i_size. | 1032 | * new i_size. |
842 | */ | 1033 | */ |
843 | ret = ocfs2_zero_extend(inode, zero_to); | 1034 | ret = ocfs2_zero_extend(inode, di_bh, zero_to); |
844 | if (ret < 0) | 1035 | if (ret < 0) |
845 | mlog_errno(ret); | 1036 | mlog_errno(ret); |
846 | 1037 | ||
@@ -862,27 +1053,15 @@ static int ocfs2_extend_file(struct inode *inode, | |||
862 | goto out; | 1053 | goto out; |
863 | 1054 | ||
864 | if (i_size_read(inode) == new_i_size) | 1055 | if (i_size_read(inode) == new_i_size) |
865 | goto out; | 1056 | goto out; |
866 | BUG_ON(new_i_size < i_size_read(inode)); | 1057 | BUG_ON(new_i_size < i_size_read(inode)); |
867 | 1058 | ||
868 | /* | 1059 | /* |
869 | * Fall through for converting inline data, even if the fs | ||
870 | * supports sparse files. | ||
871 | * | ||
872 | * The check for inline data here is legal - nobody can add | ||
873 | * the feature since we have i_mutex. We must check it again | ||
874 | * after acquiring ip_alloc_sem though, as paths like mmap | ||
875 | * might have raced us to converting the inode to extents. | ||
876 | */ | ||
877 | if (!(oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) | ||
878 | && ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) | ||
879 | goto out_update_size; | ||
880 | |||
881 | /* | ||
882 | * The alloc sem blocks people in read/write from reading our | 1060 | * The alloc sem blocks people in read/write from reading our |
883 | * allocation until we're done changing it. We depend on | 1061 | * allocation until we're done changing it. We depend on |
884 | * i_mutex to block other extend/truncate calls while we're | 1062 | * i_mutex to block other extend/truncate calls while we're |
885 | * here. | 1063 | * here. We even have to hold it for sparse files because there |
1064 | * might be some tail zeroing. | ||
886 | */ | 1065 | */ |
887 | down_write(&oi->ip_alloc_sem); | 1066 | down_write(&oi->ip_alloc_sem); |
888 | 1067 | ||
@@ -899,14 +1078,16 @@ static int ocfs2_extend_file(struct inode *inode, | |||
899 | ret = ocfs2_convert_inline_data_to_extents(inode, di_bh); | 1078 | ret = ocfs2_convert_inline_data_to_extents(inode, di_bh); |
900 | if (ret) { | 1079 | if (ret) { |
901 | up_write(&oi->ip_alloc_sem); | 1080 | up_write(&oi->ip_alloc_sem); |
902 | |||
903 | mlog_errno(ret); | 1081 | mlog_errno(ret); |
904 | goto out; | 1082 | goto out; |
905 | } | 1083 | } |
906 | } | 1084 | } |
907 | 1085 | ||
908 | if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) | 1086 | if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) |
909 | ret = ocfs2_extend_no_holes(inode, new_i_size, new_i_size); | 1087 | ret = ocfs2_zero_extend(inode, di_bh, new_i_size); |
1088 | else | ||
1089 | ret = ocfs2_extend_no_holes(inode, di_bh, new_i_size, | ||
1090 | new_i_size); | ||
910 | 1091 | ||
911 | up_write(&oi->ip_alloc_sem); | 1092 | up_write(&oi->ip_alloc_sem); |
912 | 1093 | ||
diff --git a/fs/ocfs2/file.h b/fs/ocfs2/file.h index d66cf4f7c70e..97bf761c9e7c 100644 --- a/fs/ocfs2/file.h +++ b/fs/ocfs2/file.h | |||
@@ -54,8 +54,10 @@ int ocfs2_add_inode_data(struct ocfs2_super *osb, | |||
54 | int ocfs2_simple_size_update(struct inode *inode, | 54 | int ocfs2_simple_size_update(struct inode *inode, |
55 | struct buffer_head *di_bh, | 55 | struct buffer_head *di_bh, |
56 | u64 new_i_size); | 56 | u64 new_i_size); |
57 | int ocfs2_extend_no_holes(struct inode *inode, u64 new_i_size, | 57 | int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh, |
58 | u64 zero_to); | 58 | u64 new_i_size, u64 zero_to); |
59 | int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh, | ||
60 | loff_t zero_to); | ||
59 | int ocfs2_setattr(struct dentry *dentry, struct iattr *attr); | 61 | int ocfs2_setattr(struct dentry *dentry, struct iattr *attr); |
60 | int ocfs2_getattr(struct vfsmount *mnt, struct dentry *dentry, | 62 | int ocfs2_getattr(struct vfsmount *mnt, struct dentry *dentry, |
61 | struct kstat *stat); | 63 | struct kstat *stat); |
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 47878cf16418..625de9d7088c 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c | |||
@@ -472,7 +472,7 @@ static inline struct ocfs2_triggers *to_ocfs2_trigger(struct jbd2_buffer_trigger | |||
472 | return container_of(triggers, struct ocfs2_triggers, ot_triggers); | 472 | return container_of(triggers, struct ocfs2_triggers, ot_triggers); |
473 | } | 473 | } |
474 | 474 | ||
475 | static void ocfs2_commit_trigger(struct jbd2_buffer_trigger_type *triggers, | 475 | static void ocfs2_frozen_trigger(struct jbd2_buffer_trigger_type *triggers, |
476 | struct buffer_head *bh, | 476 | struct buffer_head *bh, |
477 | void *data, size_t size) | 477 | void *data, size_t size) |
478 | { | 478 | { |
@@ -491,7 +491,7 @@ static void ocfs2_commit_trigger(struct jbd2_buffer_trigger_type *triggers, | |||
491 | * Quota blocks have their own trigger because the struct ocfs2_block_check | 491 | * Quota blocks have their own trigger because the struct ocfs2_block_check |
492 | * offset depends on the blocksize. | 492 | * offset depends on the blocksize. |
493 | */ | 493 | */ |
494 | static void ocfs2_dq_commit_trigger(struct jbd2_buffer_trigger_type *triggers, | 494 | static void ocfs2_dq_frozen_trigger(struct jbd2_buffer_trigger_type *triggers, |
495 | struct buffer_head *bh, | 495 | struct buffer_head *bh, |
496 | void *data, size_t size) | 496 | void *data, size_t size) |
497 | { | 497 | { |
@@ -511,7 +511,7 @@ static void ocfs2_dq_commit_trigger(struct jbd2_buffer_trigger_type *triggers, | |||
511 | * Directory blocks also have their own trigger because the | 511 | * Directory blocks also have their own trigger because the |
512 | * struct ocfs2_block_check offset depends on the blocksize. | 512 | * struct ocfs2_block_check offset depends on the blocksize. |
513 | */ | 513 | */ |
514 | static void ocfs2_db_commit_trigger(struct jbd2_buffer_trigger_type *triggers, | 514 | static void ocfs2_db_frozen_trigger(struct jbd2_buffer_trigger_type *triggers, |
515 | struct buffer_head *bh, | 515 | struct buffer_head *bh, |
516 | void *data, size_t size) | 516 | void *data, size_t size) |
517 | { | 517 | { |
@@ -544,7 +544,7 @@ static void ocfs2_abort_trigger(struct jbd2_buffer_trigger_type *triggers, | |||
544 | 544 | ||
545 | static struct ocfs2_triggers di_triggers = { | 545 | static struct ocfs2_triggers di_triggers = { |
546 | .ot_triggers = { | 546 | .ot_triggers = { |
547 | .t_commit = ocfs2_commit_trigger, | 547 | .t_frozen = ocfs2_frozen_trigger, |
548 | .t_abort = ocfs2_abort_trigger, | 548 | .t_abort = ocfs2_abort_trigger, |
549 | }, | 549 | }, |
550 | .ot_offset = offsetof(struct ocfs2_dinode, i_check), | 550 | .ot_offset = offsetof(struct ocfs2_dinode, i_check), |
@@ -552,7 +552,7 @@ static struct ocfs2_triggers di_triggers = { | |||
552 | 552 | ||
553 | static struct ocfs2_triggers eb_triggers = { | 553 | static struct ocfs2_triggers eb_triggers = { |
554 | .ot_triggers = { | 554 | .ot_triggers = { |
555 | .t_commit = ocfs2_commit_trigger, | 555 | .t_frozen = ocfs2_frozen_trigger, |
556 | .t_abort = ocfs2_abort_trigger, | 556 | .t_abort = ocfs2_abort_trigger, |
557 | }, | 557 | }, |
558 | .ot_offset = offsetof(struct ocfs2_extent_block, h_check), | 558 | .ot_offset = offsetof(struct ocfs2_extent_block, h_check), |
@@ -560,7 +560,7 @@ static struct ocfs2_triggers eb_triggers = { | |||
560 | 560 | ||
561 | static struct ocfs2_triggers rb_triggers = { | 561 | static struct ocfs2_triggers rb_triggers = { |
562 | .ot_triggers = { | 562 | .ot_triggers = { |
563 | .t_commit = ocfs2_commit_trigger, | 563 | .t_frozen = ocfs2_frozen_trigger, |
564 | .t_abort = ocfs2_abort_trigger, | 564 | .t_abort = ocfs2_abort_trigger, |
565 | }, | 565 | }, |
566 | .ot_offset = offsetof(struct ocfs2_refcount_block, rf_check), | 566 | .ot_offset = offsetof(struct ocfs2_refcount_block, rf_check), |
@@ -568,7 +568,7 @@ static struct ocfs2_triggers rb_triggers = { | |||
568 | 568 | ||
569 | static struct ocfs2_triggers gd_triggers = { | 569 | static struct ocfs2_triggers gd_triggers = { |
570 | .ot_triggers = { | 570 | .ot_triggers = { |
571 | .t_commit = ocfs2_commit_trigger, | 571 | .t_frozen = ocfs2_frozen_trigger, |
572 | .t_abort = ocfs2_abort_trigger, | 572 | .t_abort = ocfs2_abort_trigger, |
573 | }, | 573 | }, |
574 | .ot_offset = offsetof(struct ocfs2_group_desc, bg_check), | 574 | .ot_offset = offsetof(struct ocfs2_group_desc, bg_check), |
@@ -576,14 +576,14 @@ static struct ocfs2_triggers gd_triggers = { | |||
576 | 576 | ||
577 | static struct ocfs2_triggers db_triggers = { | 577 | static struct ocfs2_triggers db_triggers = { |
578 | .ot_triggers = { | 578 | .ot_triggers = { |
579 | .t_commit = ocfs2_db_commit_trigger, | 579 | .t_frozen = ocfs2_db_frozen_trigger, |
580 | .t_abort = ocfs2_abort_trigger, | 580 | .t_abort = ocfs2_abort_trigger, |
581 | }, | 581 | }, |
582 | }; | 582 | }; |
583 | 583 | ||
584 | static struct ocfs2_triggers xb_triggers = { | 584 | static struct ocfs2_triggers xb_triggers = { |
585 | .ot_triggers = { | 585 | .ot_triggers = { |
586 | .t_commit = ocfs2_commit_trigger, | 586 | .t_frozen = ocfs2_frozen_trigger, |
587 | .t_abort = ocfs2_abort_trigger, | 587 | .t_abort = ocfs2_abort_trigger, |
588 | }, | 588 | }, |
589 | .ot_offset = offsetof(struct ocfs2_xattr_block, xb_check), | 589 | .ot_offset = offsetof(struct ocfs2_xattr_block, xb_check), |
@@ -591,14 +591,14 @@ static struct ocfs2_triggers xb_triggers = { | |||
591 | 591 | ||
592 | static struct ocfs2_triggers dq_triggers = { | 592 | static struct ocfs2_triggers dq_triggers = { |
593 | .ot_triggers = { | 593 | .ot_triggers = { |
594 | .t_commit = ocfs2_dq_commit_trigger, | 594 | .t_frozen = ocfs2_dq_frozen_trigger, |
595 | .t_abort = ocfs2_abort_trigger, | 595 | .t_abort = ocfs2_abort_trigger, |
596 | }, | 596 | }, |
597 | }; | 597 | }; |
598 | 598 | ||
599 | static struct ocfs2_triggers dr_triggers = { | 599 | static struct ocfs2_triggers dr_triggers = { |
600 | .ot_triggers = { | 600 | .ot_triggers = { |
601 | .t_commit = ocfs2_commit_trigger, | 601 | .t_frozen = ocfs2_frozen_trigger, |
602 | .t_abort = ocfs2_abort_trigger, | 602 | .t_abort = ocfs2_abort_trigger, |
603 | }, | 603 | }, |
604 | .ot_offset = offsetof(struct ocfs2_dx_root_block, dr_check), | 604 | .ot_offset = offsetof(struct ocfs2_dx_root_block, dr_check), |
@@ -606,7 +606,7 @@ static struct ocfs2_triggers dr_triggers = { | |||
606 | 606 | ||
607 | static struct ocfs2_triggers dl_triggers = { | 607 | static struct ocfs2_triggers dl_triggers = { |
608 | .ot_triggers = { | 608 | .ot_triggers = { |
609 | .t_commit = ocfs2_commit_trigger, | 609 | .t_frozen = ocfs2_frozen_trigger, |
610 | .t_abort = ocfs2_abort_trigger, | 610 | .t_abort = ocfs2_abort_trigger, |
611 | }, | 611 | }, |
612 | .ot_offset = offsetof(struct ocfs2_dx_leaf, dl_check), | 612 | .ot_offset = offsetof(struct ocfs2_dx_leaf, dl_check), |
@@ -1936,7 +1936,7 @@ void ocfs2_orphan_scan_work(struct work_struct *work) | |||
1936 | mutex_lock(&os->os_lock); | 1936 | mutex_lock(&os->os_lock); |
1937 | ocfs2_queue_orphan_scan(osb); | 1937 | ocfs2_queue_orphan_scan(osb); |
1938 | if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE) | 1938 | if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE) |
1939 | schedule_delayed_work(&os->os_orphan_scan_work, | 1939 | queue_delayed_work(ocfs2_wq, &os->os_orphan_scan_work, |
1940 | ocfs2_orphan_scan_timeout()); | 1940 | ocfs2_orphan_scan_timeout()); |
1941 | mutex_unlock(&os->os_lock); | 1941 | mutex_unlock(&os->os_lock); |
1942 | } | 1942 | } |
@@ -1976,8 +1976,8 @@ void ocfs2_orphan_scan_start(struct ocfs2_super *osb) | |||
1976 | atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE); | 1976 | atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE); |
1977 | else { | 1977 | else { |
1978 | atomic_set(&os->os_state, ORPHAN_SCAN_ACTIVE); | 1978 | atomic_set(&os->os_state, ORPHAN_SCAN_ACTIVE); |
1979 | schedule_delayed_work(&os->os_orphan_scan_work, | 1979 | queue_delayed_work(ocfs2_wq, &os->os_orphan_scan_work, |
1980 | ocfs2_orphan_scan_timeout()); | 1980 | ocfs2_orphan_scan_timeout()); |
1981 | } | 1981 | } |
1982 | } | 1982 | } |
1983 | 1983 | ||
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c index 3d7419682dc0..ec6adbf8f551 100644 --- a/fs/ocfs2/localalloc.c +++ b/fs/ocfs2/localalloc.c | |||
@@ -118,6 +118,7 @@ unsigned int ocfs2_la_default_mb(struct ocfs2_super *osb) | |||
118 | { | 118 | { |
119 | unsigned int la_mb; | 119 | unsigned int la_mb; |
120 | unsigned int gd_mb; | 120 | unsigned int gd_mb; |
121 | unsigned int la_max_mb; | ||
121 | unsigned int megs_per_slot; | 122 | unsigned int megs_per_slot; |
122 | struct super_block *sb = osb->sb; | 123 | struct super_block *sb = osb->sb; |
123 | 124 | ||
@@ -182,6 +183,12 @@ unsigned int ocfs2_la_default_mb(struct ocfs2_super *osb) | |||
182 | if (megs_per_slot < la_mb) | 183 | if (megs_per_slot < la_mb) |
183 | la_mb = megs_per_slot; | 184 | la_mb = megs_per_slot; |
184 | 185 | ||
186 | /* We can't store more bits than we can in a block. */ | ||
187 | la_max_mb = ocfs2_clusters_to_megabytes(osb->sb, | ||
188 | ocfs2_local_alloc_size(sb) * 8); | ||
189 | if (la_mb > la_max_mb) | ||
190 | la_mb = la_max_mb; | ||
191 | |||
185 | return la_mb; | 192 | return la_mb; |
186 | } | 193 | } |
187 | 194 | ||
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index 2bb35fe00511..4607923eb24c 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c | |||
@@ -775,7 +775,7 @@ static int ocfs2_acquire_dquot(struct dquot *dquot) | |||
775 | * locking allocators ranks above a transaction start | 775 | * locking allocators ranks above a transaction start |
776 | */ | 776 | */ |
777 | WARN_ON(journal_current_handle()); | 777 | WARN_ON(journal_current_handle()); |
778 | status = ocfs2_extend_no_holes(gqinode, | 778 | status = ocfs2_extend_no_holes(gqinode, NULL, |
779 | gqinode->i_size + (need_alloc << sb->s_blocksize_bits), | 779 | gqinode->i_size + (need_alloc << sb->s_blocksize_bits), |
780 | gqinode->i_size); | 780 | gqinode->i_size); |
781 | if (status < 0) | 781 | if (status < 0) |
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c index 8bd70d4d184d..dc78764ccc4c 100644 --- a/fs/ocfs2/quota_local.c +++ b/fs/ocfs2/quota_local.c | |||
@@ -971,7 +971,7 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk( | |||
971 | u64 p_blkno; | 971 | u64 p_blkno; |
972 | 972 | ||
973 | /* We are protected by dqio_sem so no locking needed */ | 973 | /* We are protected by dqio_sem so no locking needed */ |
974 | status = ocfs2_extend_no_holes(lqinode, | 974 | status = ocfs2_extend_no_holes(lqinode, NULL, |
975 | lqinode->i_size + 2 * sb->s_blocksize, | 975 | lqinode->i_size + 2 * sb->s_blocksize, |
976 | lqinode->i_size); | 976 | lqinode->i_size); |
977 | if (status < 0) { | 977 | if (status < 0) { |
@@ -1114,7 +1114,7 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file( | |||
1114 | return ocfs2_local_quota_add_chunk(sb, type, offset); | 1114 | return ocfs2_local_quota_add_chunk(sb, type, offset); |
1115 | 1115 | ||
1116 | /* We are protected by dqio_sem so no locking needed */ | 1116 | /* We are protected by dqio_sem so no locking needed */ |
1117 | status = ocfs2_extend_no_holes(lqinode, | 1117 | status = ocfs2_extend_no_holes(lqinode, NULL, |
1118 | lqinode->i_size + sb->s_blocksize, | 1118 | lqinode->i_size + sb->s_blocksize, |
1119 | lqinode->i_size); | 1119 | lqinode->i_size); |
1120 | if (status < 0) { | 1120 | if (status < 0) { |
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 4793f36f6518..3ac5aa733e9c 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c | |||
@@ -2931,6 +2931,12 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle, | |||
2931 | 2931 | ||
2932 | offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits; | 2932 | offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits; |
2933 | end = offset + (new_len << OCFS2_SB(sb)->s_clustersize_bits); | 2933 | end = offset + (new_len << OCFS2_SB(sb)->s_clustersize_bits); |
2934 | /* | ||
2935 | * We only duplicate pages until we reach the page contains i_size - 1. | ||
2936 | * So trim 'end' to i_size. | ||
2937 | */ | ||
2938 | if (end > i_size_read(context->inode)) | ||
2939 | end = i_size_read(context->inode); | ||
2934 | 2940 | ||
2935 | while (offset < end) { | 2941 | while (offset < end) { |
2936 | page_index = offset >> PAGE_CACHE_SHIFT; | 2942 | page_index = offset >> PAGE_CACHE_SHIFT; |
@@ -4166,6 +4172,12 @@ static int __ocfs2_reflink(struct dentry *old_dentry, | |||
4166 | struct inode *inode = old_dentry->d_inode; | 4172 | struct inode *inode = old_dentry->d_inode; |
4167 | struct buffer_head *new_bh = NULL; | 4173 | struct buffer_head *new_bh = NULL; |
4168 | 4174 | ||
4175 | if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) { | ||
4176 | ret = -EINVAL; | ||
4177 | mlog_errno(ret); | ||
4178 | goto out; | ||
4179 | } | ||
4180 | |||
4169 | ret = filemap_fdatawrite(inode->i_mapping); | 4181 | ret = filemap_fdatawrite(inode->i_mapping); |
4170 | if (ret) { | 4182 | if (ret) { |
4171 | mlog_errno(ret); | 4183 | mlog_errno(ret); |
diff --git a/fs/ocfs2/reservations.c b/fs/ocfs2/reservations.c index 40650021fc24..d8b6e4259b80 100644 --- a/fs/ocfs2/reservations.c +++ b/fs/ocfs2/reservations.c | |||
@@ -26,7 +26,6 @@ | |||
26 | 26 | ||
27 | #include <linux/fs.h> | 27 | #include <linux/fs.h> |
28 | #include <linux/types.h> | 28 | #include <linux/types.h> |
29 | #include <linux/slab.h> | ||
30 | #include <linux/highmem.h> | 29 | #include <linux/highmem.h> |
31 | #include <linux/bitops.h> | 30 | #include <linux/bitops.h> |
32 | #include <linux/list.h> | 31 | #include <linux/list.h> |
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index f4c2a9eb8c4d..a8e6a95a353f 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c | |||
@@ -741,7 +741,7 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb, | |||
741 | le16_to_cpu(bg->bg_free_bits_count)); | 741 | le16_to_cpu(bg->bg_free_bits_count)); |
742 | le32_add_cpu(&cl->cl_recs[alloc_rec].c_total, | 742 | le32_add_cpu(&cl->cl_recs[alloc_rec].c_total, |
743 | le16_to_cpu(bg->bg_bits)); | 743 | le16_to_cpu(bg->bg_bits)); |
744 | cl->cl_recs[alloc_rec].c_blkno = cpu_to_le64(bg->bg_blkno); | 744 | cl->cl_recs[alloc_rec].c_blkno = bg->bg_blkno; |
745 | if (le16_to_cpu(cl->cl_next_free_rec) < le16_to_cpu(cl->cl_count)) | 745 | if (le16_to_cpu(cl->cl_next_free_rec) < le16_to_cpu(cl->cl_count)) |
746 | le16_add_cpu(&cl->cl_next_free_rec, 1); | 746 | le16_add_cpu(&cl->cl_next_free_rec, 1); |
747 | 747 | ||
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index e97b34842cfe..d03469f61801 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c | |||
@@ -709,7 +709,7 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode, | |||
709 | struct ocfs2_xattr_value_buf *vb, | 709 | struct ocfs2_xattr_value_buf *vb, |
710 | struct ocfs2_xattr_set_ctxt *ctxt) | 710 | struct ocfs2_xattr_set_ctxt *ctxt) |
711 | { | 711 | { |
712 | int status = 0; | 712 | int status = 0, credits; |
713 | handle_t *handle = ctxt->handle; | 713 | handle_t *handle = ctxt->handle; |
714 | enum ocfs2_alloc_restarted why; | 714 | enum ocfs2_alloc_restarted why; |
715 | u32 prev_clusters, logical_start = le32_to_cpu(vb->vb_xv->xr_clusters); | 715 | u32 prev_clusters, logical_start = le32_to_cpu(vb->vb_xv->xr_clusters); |
@@ -719,38 +719,54 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode, | |||
719 | 719 | ||
720 | ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb); | 720 | ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb); |
721 | 721 | ||
722 | status = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh, | 722 | while (clusters_to_add) { |
723 | OCFS2_JOURNAL_ACCESS_WRITE); | 723 | status = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh, |
724 | if (status < 0) { | 724 | OCFS2_JOURNAL_ACCESS_WRITE); |
725 | mlog_errno(status); | 725 | if (status < 0) { |
726 | goto leave; | 726 | mlog_errno(status); |
727 | } | 727 | break; |
728 | } | ||
728 | 729 | ||
729 | prev_clusters = le32_to_cpu(vb->vb_xv->xr_clusters); | 730 | prev_clusters = le32_to_cpu(vb->vb_xv->xr_clusters); |
730 | status = ocfs2_add_clusters_in_btree(handle, | 731 | status = ocfs2_add_clusters_in_btree(handle, |
731 | &et, | 732 | &et, |
732 | &logical_start, | 733 | &logical_start, |
733 | clusters_to_add, | 734 | clusters_to_add, |
734 | 0, | 735 | 0, |
735 | ctxt->data_ac, | 736 | ctxt->data_ac, |
736 | ctxt->meta_ac, | 737 | ctxt->meta_ac, |
737 | &why); | 738 | &why); |
738 | if (status < 0) { | 739 | if ((status < 0) && (status != -EAGAIN)) { |
739 | mlog_errno(status); | 740 | if (status != -ENOSPC) |
740 | goto leave; | 741 | mlog_errno(status); |
741 | } | 742 | break; |
743 | } | ||
742 | 744 | ||
743 | ocfs2_journal_dirty(handle, vb->vb_bh); | 745 | ocfs2_journal_dirty(handle, vb->vb_bh); |
744 | 746 | ||
745 | clusters_to_add -= le32_to_cpu(vb->vb_xv->xr_clusters) - prev_clusters; | 747 | clusters_to_add -= le32_to_cpu(vb->vb_xv->xr_clusters) - |
748 | prev_clusters; | ||
746 | 749 | ||
747 | /* | 750 | if (why != RESTART_NONE && clusters_to_add) { |
748 | * We should have already allocated enough space before the transaction, | 751 | /* |
749 | * so no need to restart. | 752 | * We can only fail in case the alloc file doesn't give |
750 | */ | 753 | * up enough clusters. |
751 | BUG_ON(why != RESTART_NONE || clusters_to_add); | 754 | */ |
752 | 755 | BUG_ON(why == RESTART_META); | |
753 | leave: | 756 | |
757 | mlog(0, "restarting xattr value extension for %u" | ||
758 | " clusters,.\n", clusters_to_add); | ||
759 | credits = ocfs2_calc_extend_credits(inode->i_sb, | ||
760 | &vb->vb_xv->xr_list, | ||
761 | clusters_to_add); | ||
762 | status = ocfs2_extend_trans(handle, credits); | ||
763 | if (status < 0) { | ||
764 | status = -ENOMEM; | ||
765 | mlog_errno(status); | ||
766 | break; | ||
767 | } | ||
768 | } | ||
769 | } | ||
754 | 770 | ||
755 | return status; | 771 | return status; |
756 | } | 772 | } |
@@ -6788,16 +6804,15 @@ out: | |||
6788 | return ret; | 6804 | return ret; |
6789 | } | 6805 | } |
6790 | 6806 | ||
6791 | static int ocfs2_reflink_xattr_buckets(handle_t *handle, | 6807 | static int ocfs2_reflink_xattr_bucket(handle_t *handle, |
6792 | u64 blkno, u64 new_blkno, u32 clusters, | 6808 | u64 blkno, u64 new_blkno, u32 clusters, |
6809 | u32 *cpos, int num_buckets, | ||
6793 | struct ocfs2_alloc_context *meta_ac, | 6810 | struct ocfs2_alloc_context *meta_ac, |
6794 | struct ocfs2_alloc_context *data_ac, | 6811 | struct ocfs2_alloc_context *data_ac, |
6795 | struct ocfs2_reflink_xattr_tree_args *args) | 6812 | struct ocfs2_reflink_xattr_tree_args *args) |
6796 | { | 6813 | { |
6797 | int i, j, ret = 0; | 6814 | int i, j, ret = 0; |
6798 | struct super_block *sb = args->reflink->old_inode->i_sb; | 6815 | struct super_block *sb = args->reflink->old_inode->i_sb; |
6799 | u32 bpc = ocfs2_xattr_buckets_per_cluster(OCFS2_SB(sb)); | ||
6800 | u32 num_buckets = clusters * bpc; | ||
6801 | int bpb = args->old_bucket->bu_blocks; | 6816 | int bpb = args->old_bucket->bu_blocks; |
6802 | struct ocfs2_xattr_value_buf vb = { | 6817 | struct ocfs2_xattr_value_buf vb = { |
6803 | .vb_access = ocfs2_journal_access, | 6818 | .vb_access = ocfs2_journal_access, |
@@ -6816,14 +6831,6 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle, | |||
6816 | break; | 6831 | break; |
6817 | } | 6832 | } |
6818 | 6833 | ||
6819 | /* | ||
6820 | * The real bucket num in this series of blocks is stored | ||
6821 | * in the 1st bucket. | ||
6822 | */ | ||
6823 | if (i == 0) | ||
6824 | num_buckets = le16_to_cpu( | ||
6825 | bucket_xh(args->old_bucket)->xh_num_buckets); | ||
6826 | |||
6827 | ret = ocfs2_xattr_bucket_journal_access(handle, | 6834 | ret = ocfs2_xattr_bucket_journal_access(handle, |
6828 | args->new_bucket, | 6835 | args->new_bucket, |
6829 | OCFS2_JOURNAL_ACCESS_CREATE); | 6836 | OCFS2_JOURNAL_ACCESS_CREATE); |
@@ -6837,6 +6844,18 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle, | |||
6837 | bucket_block(args->old_bucket, j), | 6844 | bucket_block(args->old_bucket, j), |
6838 | sb->s_blocksize); | 6845 | sb->s_blocksize); |
6839 | 6846 | ||
6847 | /* | ||
6848 | * Record the start cpos so that we can use it to initialize | ||
6849 | * our xattr tree we also set the xh_num_bucket for the new | ||
6850 | * bucket. | ||
6851 | */ | ||
6852 | if (i == 0) { | ||
6853 | *cpos = le32_to_cpu(bucket_xh(args->new_bucket)-> | ||
6854 | xh_entries[0].xe_name_hash); | ||
6855 | bucket_xh(args->new_bucket)->xh_num_buckets = | ||
6856 | cpu_to_le16(num_buckets); | ||
6857 | } | ||
6858 | |||
6840 | ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket); | 6859 | ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket); |
6841 | 6860 | ||
6842 | ret = ocfs2_reflink_xattr_header(handle, args->reflink, | 6861 | ret = ocfs2_reflink_xattr_header(handle, args->reflink, |
@@ -6866,6 +6885,7 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle, | |||
6866 | } | 6885 | } |
6867 | 6886 | ||
6868 | ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket); | 6887 | ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket); |
6888 | |||
6869 | ocfs2_xattr_bucket_relse(args->old_bucket); | 6889 | ocfs2_xattr_bucket_relse(args->old_bucket); |
6870 | ocfs2_xattr_bucket_relse(args->new_bucket); | 6890 | ocfs2_xattr_bucket_relse(args->new_bucket); |
6871 | } | 6891 | } |
@@ -6874,6 +6894,75 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle, | |||
6874 | ocfs2_xattr_bucket_relse(args->new_bucket); | 6894 | ocfs2_xattr_bucket_relse(args->new_bucket); |
6875 | return ret; | 6895 | return ret; |
6876 | } | 6896 | } |
6897 | |||
6898 | static int ocfs2_reflink_xattr_buckets(handle_t *handle, | ||
6899 | struct inode *inode, | ||
6900 | struct ocfs2_reflink_xattr_tree_args *args, | ||
6901 | struct ocfs2_extent_tree *et, | ||
6902 | struct ocfs2_alloc_context *meta_ac, | ||
6903 | struct ocfs2_alloc_context *data_ac, | ||
6904 | u64 blkno, u32 cpos, u32 len) | ||
6905 | { | ||
6906 | int ret, first_inserted = 0; | ||
6907 | u32 p_cluster, num_clusters, reflink_cpos = 0; | ||
6908 | u64 new_blkno; | ||
6909 | unsigned int num_buckets, reflink_buckets; | ||
6910 | unsigned int bpc = | ||
6911 | ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb)); | ||
6912 | |||
6913 | ret = ocfs2_read_xattr_bucket(args->old_bucket, blkno); | ||
6914 | if (ret) { | ||
6915 | mlog_errno(ret); | ||
6916 | goto out; | ||
6917 | } | ||
6918 | num_buckets = le16_to_cpu(bucket_xh(args->old_bucket)->xh_num_buckets); | ||
6919 | ocfs2_xattr_bucket_relse(args->old_bucket); | ||
6920 | |||
6921 | while (len && num_buckets) { | ||
6922 | ret = ocfs2_claim_clusters(handle, data_ac, | ||
6923 | 1, &p_cluster, &num_clusters); | ||
6924 | if (ret) { | ||
6925 | mlog_errno(ret); | ||
6926 | goto out; | ||
6927 | } | ||
6928 | |||
6929 | new_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster); | ||
6930 | reflink_buckets = min(num_buckets, bpc * num_clusters); | ||
6931 | |||
6932 | ret = ocfs2_reflink_xattr_bucket(handle, blkno, | ||
6933 | new_blkno, num_clusters, | ||
6934 | &reflink_cpos, reflink_buckets, | ||
6935 | meta_ac, data_ac, args); | ||
6936 | if (ret) { | ||
6937 | mlog_errno(ret); | ||
6938 | goto out; | ||
6939 | } | ||
6940 | |||
6941 | /* | ||
6942 | * For the 1st allocated cluster, we make it use the same cpos | ||
6943 | * so that the xattr tree looks the same as the original one | ||
6944 | * in the most case. | ||
6945 | */ | ||
6946 | if (!first_inserted) { | ||
6947 | reflink_cpos = cpos; | ||
6948 | first_inserted = 1; | ||
6949 | } | ||
6950 | ret = ocfs2_insert_extent(handle, et, reflink_cpos, new_blkno, | ||
6951 | num_clusters, 0, meta_ac); | ||
6952 | if (ret) | ||
6953 | mlog_errno(ret); | ||
6954 | |||
6955 | mlog(0, "insert new xattr extent rec start %llu len %u to %u\n", | ||
6956 | (unsigned long long)new_blkno, num_clusters, reflink_cpos); | ||
6957 | |||
6958 | len -= num_clusters; | ||
6959 | blkno += ocfs2_clusters_to_blocks(inode->i_sb, num_clusters); | ||
6960 | num_buckets -= reflink_buckets; | ||
6961 | } | ||
6962 | out: | ||
6963 | return ret; | ||
6964 | } | ||
6965 | |||
6877 | /* | 6966 | /* |
6878 | * Create the same xattr extent record in the new inode's xattr tree. | 6967 | * Create the same xattr extent record in the new inode's xattr tree. |
6879 | */ | 6968 | */ |
@@ -6885,8 +6974,6 @@ static int ocfs2_reflink_xattr_rec(struct inode *inode, | |||
6885 | void *para) | 6974 | void *para) |
6886 | { | 6975 | { |
6887 | int ret, credits = 0; | 6976 | int ret, credits = 0; |
6888 | u32 p_cluster, num_clusters; | ||
6889 | u64 new_blkno; | ||
6890 | handle_t *handle; | 6977 | handle_t *handle; |
6891 | struct ocfs2_reflink_xattr_tree_args *args = | 6978 | struct ocfs2_reflink_xattr_tree_args *args = |
6892 | (struct ocfs2_reflink_xattr_tree_args *)para; | 6979 | (struct ocfs2_reflink_xattr_tree_args *)para; |
@@ -6895,6 +6982,9 @@ static int ocfs2_reflink_xattr_rec(struct inode *inode, | |||
6895 | struct ocfs2_alloc_context *data_ac = NULL; | 6982 | struct ocfs2_alloc_context *data_ac = NULL; |
6896 | struct ocfs2_extent_tree et; | 6983 | struct ocfs2_extent_tree et; |
6897 | 6984 | ||
6985 | mlog(0, "reflink xattr buckets %llu len %u\n", | ||
6986 | (unsigned long long)blkno, len); | ||
6987 | |||
6898 | ocfs2_init_xattr_tree_extent_tree(&et, | 6988 | ocfs2_init_xattr_tree_extent_tree(&et, |
6899 | INODE_CACHE(args->reflink->new_inode), | 6989 | INODE_CACHE(args->reflink->new_inode), |
6900 | args->new_blk_bh); | 6990 | args->new_blk_bh); |
@@ -6914,32 +7004,12 @@ static int ocfs2_reflink_xattr_rec(struct inode *inode, | |||
6914 | goto out; | 7004 | goto out; |
6915 | } | 7005 | } |
6916 | 7006 | ||
6917 | ret = ocfs2_claim_clusters(handle, data_ac, | 7007 | ret = ocfs2_reflink_xattr_buckets(handle, inode, args, &et, |
6918 | len, &p_cluster, &num_clusters); | 7008 | meta_ac, data_ac, |
6919 | if (ret) { | 7009 | blkno, cpos, len); |
6920 | mlog_errno(ret); | ||
6921 | goto out_commit; | ||
6922 | } | ||
6923 | |||
6924 | new_blkno = ocfs2_clusters_to_blocks(osb->sb, p_cluster); | ||
6925 | |||
6926 | mlog(0, "reflink xattr buckets %llu to %llu, len %u\n", | ||
6927 | (unsigned long long)blkno, (unsigned long long)new_blkno, len); | ||
6928 | ret = ocfs2_reflink_xattr_buckets(handle, blkno, new_blkno, len, | ||
6929 | meta_ac, data_ac, args); | ||
6930 | if (ret) { | ||
6931 | mlog_errno(ret); | ||
6932 | goto out_commit; | ||
6933 | } | ||
6934 | |||
6935 | mlog(0, "insert new xattr extent rec start %llu len %u to %u\n", | ||
6936 | (unsigned long long)new_blkno, len, cpos); | ||
6937 | ret = ocfs2_insert_extent(handle, &et, cpos, new_blkno, | ||
6938 | len, 0, meta_ac); | ||
6939 | if (ret) | 7010 | if (ret) |
6940 | mlog_errno(ret); | 7011 | mlog_errno(ret); |
6941 | 7012 | ||
6942 | out_commit: | ||
6943 | ocfs2_commit_trans(osb, handle); | 7013 | ocfs2_commit_trans(osb, handle); |
6944 | 7014 | ||
6945 | out: | 7015 | out: |
diff --git a/fs/partitions/ibm.c b/fs/partitions/ibm.c index 3e73de5967ff..fc8497643fd0 100644 --- a/fs/partitions/ibm.c +++ b/fs/partitions/ibm.c | |||
@@ -74,6 +74,7 @@ int ibm_partition(struct parsed_partitions *state) | |||
74 | } *label; | 74 | } *label; |
75 | unsigned char *data; | 75 | unsigned char *data; |
76 | Sector sect; | 76 | Sector sect; |
77 | sector_t labelsect; | ||
77 | 78 | ||
78 | res = 0; | 79 | res = 0; |
79 | blocksize = bdev_logical_block_size(bdev); | 80 | blocksize = bdev_logical_block_size(bdev); |
@@ -98,10 +99,19 @@ int ibm_partition(struct parsed_partitions *state) | |||
98 | goto out_freeall; | 99 | goto out_freeall; |
99 | 100 | ||
100 | /* | 101 | /* |
102 | * Special case for FBA disks: label sector does not depend on | ||
103 | * blocksize. | ||
104 | */ | ||
105 | if ((info->cu_type == 0x6310 && info->dev_type == 0x9336) || | ||
106 | (info->cu_type == 0x3880 && info->dev_type == 0x3370)) | ||
107 | labelsect = info->label_block; | ||
108 | else | ||
109 | labelsect = info->label_block * (blocksize >> 9); | ||
110 | |||
111 | /* | ||
101 | * Get volume label, extract name and type. | 112 | * Get volume label, extract name and type. |
102 | */ | 113 | */ |
103 | data = read_part_sector(state, info->label_block*(blocksize/512), | 114 | data = read_part_sector(state, labelsect, §); |
104 | §); | ||
105 | if (data == NULL) | 115 | if (data == NULL) |
106 | goto out_readerr; | 116 | goto out_readerr; |
107 | 117 | ||
diff --git a/fs/proc/array.c b/fs/proc/array.c index 9b58d38bc911..fff6572676ae 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c | |||
@@ -176,7 +176,7 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns, | |||
176 | if (tracer) | 176 | if (tracer) |
177 | tpid = task_pid_nr_ns(tracer, ns); | 177 | tpid = task_pid_nr_ns(tracer, ns); |
178 | } | 178 | } |
179 | cred = get_cred((struct cred *) __task_cred(p)); | 179 | cred = get_task_cred(p); |
180 | seq_printf(m, | 180 | seq_printf(m, |
181 | "State:\t%s\n" | 181 | "State:\t%s\n" |
182 | "Tgid:\t%d\n" | 182 | "Tgid:\t%d\n" |
diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c index ce94801f48ca..d9396a4fc7ff 100644 --- a/fs/proc/proc_devtree.c +++ b/fs/proc/proc_devtree.c | |||
@@ -209,6 +209,9 @@ void proc_device_tree_add_node(struct device_node *np, | |||
209 | for (pp = np->properties; pp != NULL; pp = pp->next) { | 209 | for (pp = np->properties; pp != NULL; pp = pp->next) { |
210 | p = pp->name; | 210 | p = pp->name; |
211 | 211 | ||
212 | if (strchr(p, '/')) | ||
213 | continue; | ||
214 | |||
212 | if (duplicate_name(de, p)) | 215 | if (duplicate_name(de, p)) |
213 | p = fixup_name(np, de, p); | 216 | p = fixup_name(np, de, p); |
214 | 217 | ||
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index 46d4b5d72bd3..cb6306e63843 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c | |||
@@ -122,11 +122,20 @@ int task_statm(struct mm_struct *mm, int *shared, int *text, | |||
122 | return size; | 122 | return size; |
123 | } | 123 | } |
124 | 124 | ||
125 | static void pad_len_spaces(struct seq_file *m, int len) | ||
126 | { | ||
127 | len = 25 + sizeof(void*) * 6 - len; | ||
128 | if (len < 1) | ||
129 | len = 1; | ||
130 | seq_printf(m, "%*c", len, ' '); | ||
131 | } | ||
132 | |||
125 | /* | 133 | /* |
126 | * display a single VMA to a sequenced file | 134 | * display a single VMA to a sequenced file |
127 | */ | 135 | */ |
128 | static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma) | 136 | static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma) |
129 | { | 137 | { |
138 | struct mm_struct *mm = vma->vm_mm; | ||
130 | unsigned long ino = 0; | 139 | unsigned long ino = 0; |
131 | struct file *file; | 140 | struct file *file; |
132 | dev_t dev = 0; | 141 | dev_t dev = 0; |
@@ -155,11 +164,14 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma) | |||
155 | MAJOR(dev), MINOR(dev), ino, &len); | 164 | MAJOR(dev), MINOR(dev), ino, &len); |
156 | 165 | ||
157 | if (file) { | 166 | if (file) { |
158 | len = 25 + sizeof(void *) * 6 - len; | 167 | pad_len_spaces(m, len); |
159 | if (len < 1) | ||
160 | len = 1; | ||
161 | seq_printf(m, "%*c", len, ' '); | ||
162 | seq_path(m, &file->f_path, ""); | 168 | seq_path(m, &file->f_path, ""); |
169 | } else if (mm) { | ||
170 | if (vma->vm_start <= mm->start_stack && | ||
171 | vma->vm_end >= mm->start_stack) { | ||
172 | pad_len_spaces(m, len); | ||
173 | seq_puts(m, "[stack]"); | ||
174 | } | ||
163 | } | 175 | } |
164 | 176 | ||
165 | seq_putc(m, '\n'); | 177 | seq_putc(m, '\n'); |
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 12c233da1b6b..437d2ca2de97 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c | |||
@@ -676,7 +676,7 @@ static void prune_dqcache(int count) | |||
676 | * This is called from kswapd when we think we need some | 676 | * This is called from kswapd when we think we need some |
677 | * more memory | 677 | * more memory |
678 | */ | 678 | */ |
679 | static int shrink_dqcache_memory(int nr, gfp_t gfp_mask) | 679 | static int shrink_dqcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) |
680 | { | 680 | { |
681 | if (nr) { | 681 | if (nr) { |
682 | spin_lock(&dq_list_lock); | 682 | spin_lock(&dq_list_lock); |
diff --git a/fs/splice.c b/fs/splice.c index 740e6b9faf7a..efdbfece9932 100644 --- a/fs/splice.c +++ b/fs/splice.c | |||
@@ -1282,7 +1282,8 @@ static int direct_splice_actor(struct pipe_inode_info *pipe, | |||
1282 | { | 1282 | { |
1283 | struct file *file = sd->u.file; | 1283 | struct file *file = sd->u.file; |
1284 | 1284 | ||
1285 | return do_splice_from(pipe, file, &sd->pos, sd->total_len, sd->flags); | 1285 | return do_splice_from(pipe, file, &file->f_pos, sd->total_len, |
1286 | sd->flags); | ||
1286 | } | 1287 | } |
1287 | 1288 | ||
1288 | /** | 1289 | /** |
@@ -1371,8 +1372,7 @@ static long do_splice(struct file *in, loff_t __user *off_in, | |||
1371 | if (off_in) | 1372 | if (off_in) |
1372 | return -ESPIPE; | 1373 | return -ESPIPE; |
1373 | if (off_out) { | 1374 | if (off_out) { |
1374 | if (!out->f_op || !out->f_op->llseek || | 1375 | if (!(out->f_mode & FMODE_PWRITE)) |
1375 | out->f_op->llseek == no_llseek) | ||
1376 | return -EINVAL; | 1376 | return -EINVAL; |
1377 | if (copy_from_user(&offset, off_out, sizeof(loff_t))) | 1377 | if (copy_from_user(&offset, off_out, sizeof(loff_t))) |
1378 | return -EFAULT; | 1378 | return -EFAULT; |
@@ -1392,8 +1392,7 @@ static long do_splice(struct file *in, loff_t __user *off_in, | |||
1392 | if (off_out) | 1392 | if (off_out) |
1393 | return -ESPIPE; | 1393 | return -ESPIPE; |
1394 | if (off_in) { | 1394 | if (off_in) { |
1395 | if (!in->f_op || !in->f_op->llseek || | 1395 | if (!(in->f_mode & FMODE_PREAD)) |
1396 | in->f_op->llseek == no_llseek) | ||
1397 | return -EINVAL; | 1396 | return -EINVAL; |
1398 | if (copy_from_user(&offset, off_in, sizeof(loff_t))) | 1397 | if (copy_from_user(&offset, off_in, sizeof(loff_t))) |
1399 | return -EFAULT; | 1398 | return -EFAULT; |
diff --git a/fs/super.c b/fs/super.c index 5c35bc7a499e..938119ab8dcb 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -374,6 +374,8 @@ void sync_supers(void) | |||
374 | up_read(&sb->s_umount); | 374 | up_read(&sb->s_umount); |
375 | 375 | ||
376 | spin_lock(&sb_lock); | 376 | spin_lock(&sb_lock); |
377 | /* lock was dropped, must reset next */ | ||
378 | list_safe_reset_next(sb, n, s_list); | ||
377 | __put_super(sb); | 379 | __put_super(sb); |
378 | } | 380 | } |
379 | } | 381 | } |
@@ -405,6 +407,8 @@ void iterate_supers(void (*f)(struct super_block *, void *), void *arg) | |||
405 | up_read(&sb->s_umount); | 407 | up_read(&sb->s_umount); |
406 | 408 | ||
407 | spin_lock(&sb_lock); | 409 | spin_lock(&sb_lock); |
410 | /* lock was dropped, must reset next */ | ||
411 | list_safe_reset_next(sb, n, s_list); | ||
408 | __put_super(sb); | 412 | __put_super(sb); |
409 | } | 413 | } |
410 | spin_unlock(&sb_lock); | 414 | spin_unlock(&sb_lock); |
@@ -585,6 +589,8 @@ static void do_emergency_remount(struct work_struct *work) | |||
585 | } | 589 | } |
586 | up_write(&sb->s_umount); | 590 | up_write(&sb->s_umount); |
587 | spin_lock(&sb_lock); | 591 | spin_lock(&sb_lock); |
592 | /* lock was dropped, must reset next */ | ||
593 | list_safe_reset_next(sb, n, s_list); | ||
588 | __put_super(sb); | 594 | __put_super(sb); |
589 | } | 595 | } |
590 | spin_unlock(&sb_lock); | 596 | spin_unlock(&sb_lock); |
diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c index f71246bebfe4..a7ac78f8e67a 100644 --- a/fs/sysfs/symlink.c +++ b/fs/sysfs/symlink.c | |||
@@ -28,6 +28,7 @@ static int sysfs_do_create_link(struct kobject *kobj, struct kobject *target, | |||
28 | struct sysfs_dirent *target_sd = NULL; | 28 | struct sysfs_dirent *target_sd = NULL; |
29 | struct sysfs_dirent *sd = NULL; | 29 | struct sysfs_dirent *sd = NULL; |
30 | struct sysfs_addrm_cxt acxt; | 30 | struct sysfs_addrm_cxt acxt; |
31 | enum kobj_ns_type ns_type; | ||
31 | int error; | 32 | int error; |
32 | 33 | ||
33 | BUG_ON(!name); | 34 | BUG_ON(!name); |
@@ -58,16 +59,29 @@ static int sysfs_do_create_link(struct kobject *kobj, struct kobject *target, | |||
58 | if (!sd) | 59 | if (!sd) |
59 | goto out_put; | 60 | goto out_put; |
60 | 61 | ||
61 | if (sysfs_ns_type(parent_sd)) | 62 | ns_type = sysfs_ns_type(parent_sd); |
63 | if (ns_type) | ||
62 | sd->s_ns = target->ktype->namespace(target); | 64 | sd->s_ns = target->ktype->namespace(target); |
63 | sd->s_symlink.target_sd = target_sd; | 65 | sd->s_symlink.target_sd = target_sd; |
64 | target_sd = NULL; /* reference is now owned by the symlink */ | 66 | target_sd = NULL; /* reference is now owned by the symlink */ |
65 | 67 | ||
66 | sysfs_addrm_start(&acxt, parent_sd); | 68 | sysfs_addrm_start(&acxt, parent_sd); |
67 | if (warn) | 69 | /* Symlinks must be between directories with the same ns_type */ |
68 | error = sysfs_add_one(&acxt, sd); | 70 | if (!ns_type || |
69 | else | 71 | (ns_type == sysfs_ns_type(sd->s_symlink.target_sd->s_parent))) { |
70 | error = __sysfs_add_one(&acxt, sd); | 72 | if (warn) |
73 | error = sysfs_add_one(&acxt, sd); | ||
74 | else | ||
75 | error = __sysfs_add_one(&acxt, sd); | ||
76 | } else { | ||
77 | error = -EINVAL; | ||
78 | WARN(1, KERN_WARNING | ||
79 | "sysfs: symlink across ns_types %s/%s -> %s/%s\n", | ||
80 | parent_sd->s_name, | ||
81 | sd->s_name, | ||
82 | sd->s_symlink.target_sd->s_parent->s_name, | ||
83 | sd->s_symlink.target_sd->s_name); | ||
84 | } | ||
71 | sysfs_addrm_finish(&acxt); | 85 | sysfs_addrm_finish(&acxt); |
72 | 86 | ||
73 | if (error) | 87 | if (error) |
@@ -122,7 +136,7 @@ void sysfs_delete_link(struct kobject *kobj, struct kobject *targ, | |||
122 | { | 136 | { |
123 | const void *ns = NULL; | 137 | const void *ns = NULL; |
124 | spin_lock(&sysfs_assoc_lock); | 138 | spin_lock(&sysfs_assoc_lock); |
125 | if (targ->sd) | 139 | if (targ->sd && sysfs_ns_type(kobj->sd)) |
126 | ns = targ->sd->s_ns; | 140 | ns = targ->sd->s_ns; |
127 | spin_unlock(&sysfs_assoc_lock); | 141 | spin_unlock(&sysfs_assoc_lock); |
128 | sysfs_hash_and_remove(kobj->sd, ns, name); | 142 | sysfs_hash_and_remove(kobj->sd, ns, name); |
diff --git a/fs/sysv/ialloc.c b/fs/sysv/ialloc.c index bbd69bdb0fa8..fcc498ec9b33 100644 --- a/fs/sysv/ialloc.c +++ b/fs/sysv/ialloc.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/stat.h> | 25 | #include <linux/stat.h> |
26 | #include <linux/string.h> | 26 | #include <linux/string.h> |
27 | #include <linux/buffer_head.h> | 27 | #include <linux/buffer_head.h> |
28 | #include <linux/writeback.h> | ||
28 | #include "sysv.h" | 29 | #include "sysv.h" |
29 | 30 | ||
30 | /* We don't trust the value of | 31 | /* We don't trust the value of |
@@ -139,6 +140,9 @@ struct inode * sysv_new_inode(const struct inode * dir, mode_t mode) | |||
139 | struct inode *inode; | 140 | struct inode *inode; |
140 | sysv_ino_t ino; | 141 | sysv_ino_t ino; |
141 | unsigned count; | 142 | unsigned count; |
143 | struct writeback_control wbc = { | ||
144 | .sync_mode = WB_SYNC_NONE | ||
145 | }; | ||
142 | 146 | ||
143 | inode = new_inode(sb); | 147 | inode = new_inode(sb); |
144 | if (!inode) | 148 | if (!inode) |
@@ -168,7 +172,7 @@ struct inode * sysv_new_inode(const struct inode * dir, mode_t mode) | |||
168 | insert_inode_hash(inode); | 172 | insert_inode_hash(inode); |
169 | mark_inode_dirty(inode); | 173 | mark_inode_dirty(inode); |
170 | 174 | ||
171 | sysv_write_inode(inode, 0); /* ensure inode not allocated again */ | 175 | sysv_write_inode(inode, &wbc); /* ensure inode not allocated again */ |
172 | mark_inode_dirty(inode); /* cleared by sysv_write_inode() */ | 176 | mark_inode_dirty(inode); /* cleared by sysv_write_inode() */ |
173 | /* That's it. */ | 177 | /* That's it. */ |
174 | unlock_super(sb); | 178 | unlock_super(sb); |
diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c index 076ca50e9933..c8ff0d1ae5d3 100644 --- a/fs/ubifs/budget.c +++ b/fs/ubifs/budget.c | |||
@@ -62,7 +62,9 @@ | |||
62 | */ | 62 | */ |
63 | static void shrink_liability(struct ubifs_info *c, int nr_to_write) | 63 | static void shrink_liability(struct ubifs_info *c, int nr_to_write) |
64 | { | 64 | { |
65 | down_read(&c->vfs_sb->s_umount); | ||
65 | writeback_inodes_sb(c->vfs_sb); | 66 | writeback_inodes_sb(c->vfs_sb); |
67 | up_read(&c->vfs_sb->s_umount); | ||
66 | } | 68 | } |
67 | 69 | ||
68 | /** | 70 | /** |
diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c index ad7f67b827ea..0084a33c4c69 100644 --- a/fs/ubifs/lpt.c +++ b/fs/ubifs/lpt.c | |||
@@ -1457,13 +1457,13 @@ struct ubifs_lprops *ubifs_lpt_lookup(struct ubifs_info *c, int lnum) | |||
1457 | shft -= UBIFS_LPT_FANOUT_SHIFT; | 1457 | shft -= UBIFS_LPT_FANOUT_SHIFT; |
1458 | nnode = ubifs_get_nnode(c, nnode, iip); | 1458 | nnode = ubifs_get_nnode(c, nnode, iip); |
1459 | if (IS_ERR(nnode)) | 1459 | if (IS_ERR(nnode)) |
1460 | return ERR_PTR(PTR_ERR(nnode)); | 1460 | return ERR_CAST(nnode); |
1461 | } | 1461 | } |
1462 | iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1)); | 1462 | iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1)); |
1463 | shft -= UBIFS_LPT_FANOUT_SHIFT; | 1463 | shft -= UBIFS_LPT_FANOUT_SHIFT; |
1464 | pnode = ubifs_get_pnode(c, nnode, iip); | 1464 | pnode = ubifs_get_pnode(c, nnode, iip); |
1465 | if (IS_ERR(pnode)) | 1465 | if (IS_ERR(pnode)) |
1466 | return ERR_PTR(PTR_ERR(pnode)); | 1466 | return ERR_CAST(pnode); |
1467 | iip = (i & (UBIFS_LPT_FANOUT - 1)); | 1467 | iip = (i & (UBIFS_LPT_FANOUT - 1)); |
1468 | dbg_lp("LEB %d, free %d, dirty %d, flags %d", lnum, | 1468 | dbg_lp("LEB %d, free %d, dirty %d, flags %d", lnum, |
1469 | pnode->lprops[iip].free, pnode->lprops[iip].dirty, | 1469 | pnode->lprops[iip].free, pnode->lprops[iip].dirty, |
@@ -1586,7 +1586,7 @@ struct ubifs_lprops *ubifs_lpt_lookup_dirty(struct ubifs_info *c, int lnum) | |||
1586 | nnode = c->nroot; | 1586 | nnode = c->nroot; |
1587 | nnode = dirty_cow_nnode(c, nnode); | 1587 | nnode = dirty_cow_nnode(c, nnode); |
1588 | if (IS_ERR(nnode)) | 1588 | if (IS_ERR(nnode)) |
1589 | return ERR_PTR(PTR_ERR(nnode)); | 1589 | return ERR_CAST(nnode); |
1590 | i = lnum - c->main_first; | 1590 | i = lnum - c->main_first; |
1591 | shft = c->lpt_hght * UBIFS_LPT_FANOUT_SHIFT; | 1591 | shft = c->lpt_hght * UBIFS_LPT_FANOUT_SHIFT; |
1592 | for (h = 1; h < c->lpt_hght; h++) { | 1592 | for (h = 1; h < c->lpt_hght; h++) { |
@@ -1594,19 +1594,19 @@ struct ubifs_lprops *ubifs_lpt_lookup_dirty(struct ubifs_info *c, int lnum) | |||
1594 | shft -= UBIFS_LPT_FANOUT_SHIFT; | 1594 | shft -= UBIFS_LPT_FANOUT_SHIFT; |
1595 | nnode = ubifs_get_nnode(c, nnode, iip); | 1595 | nnode = ubifs_get_nnode(c, nnode, iip); |
1596 | if (IS_ERR(nnode)) | 1596 | if (IS_ERR(nnode)) |
1597 | return ERR_PTR(PTR_ERR(nnode)); | 1597 | return ERR_CAST(nnode); |
1598 | nnode = dirty_cow_nnode(c, nnode); | 1598 | nnode = dirty_cow_nnode(c, nnode); |
1599 | if (IS_ERR(nnode)) | 1599 | if (IS_ERR(nnode)) |
1600 | return ERR_PTR(PTR_ERR(nnode)); | 1600 | return ERR_CAST(nnode); |
1601 | } | 1601 | } |
1602 | iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1)); | 1602 | iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1)); |
1603 | shft -= UBIFS_LPT_FANOUT_SHIFT; | 1603 | shft -= UBIFS_LPT_FANOUT_SHIFT; |
1604 | pnode = ubifs_get_pnode(c, nnode, iip); | 1604 | pnode = ubifs_get_pnode(c, nnode, iip); |
1605 | if (IS_ERR(pnode)) | 1605 | if (IS_ERR(pnode)) |
1606 | return ERR_PTR(PTR_ERR(pnode)); | 1606 | return ERR_CAST(pnode); |
1607 | pnode = dirty_cow_pnode(c, pnode); | 1607 | pnode = dirty_cow_pnode(c, pnode); |
1608 | if (IS_ERR(pnode)) | 1608 | if (IS_ERR(pnode)) |
1609 | return ERR_PTR(PTR_ERR(pnode)); | 1609 | return ERR_CAST(pnode); |
1610 | iip = (i & (UBIFS_LPT_FANOUT - 1)); | 1610 | iip = (i & (UBIFS_LPT_FANOUT - 1)); |
1611 | dbg_lp("LEB %d, free %d, dirty %d, flags %d", lnum, | 1611 | dbg_lp("LEB %d, free %d, dirty %d, flags %d", lnum, |
1612 | pnode->lprops[iip].free, pnode->lprops[iip].dirty, | 1612 | pnode->lprops[iip].free, pnode->lprops[iip].dirty, |
diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c index 13cb7a4237bf..d12535b7fc78 100644 --- a/fs/ubifs/lpt_commit.c +++ b/fs/ubifs/lpt_commit.c | |||
@@ -646,7 +646,7 @@ static struct ubifs_pnode *pnode_lookup(struct ubifs_info *c, int i) | |||
646 | shft -= UBIFS_LPT_FANOUT_SHIFT; | 646 | shft -= UBIFS_LPT_FANOUT_SHIFT; |
647 | nnode = ubifs_get_nnode(c, nnode, iip); | 647 | nnode = ubifs_get_nnode(c, nnode, iip); |
648 | if (IS_ERR(nnode)) | 648 | if (IS_ERR(nnode)) |
649 | return ERR_PTR(PTR_ERR(nnode)); | 649 | return ERR_CAST(nnode); |
650 | } | 650 | } |
651 | iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1)); | 651 | iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1)); |
652 | return ubifs_get_pnode(c, nnode, iip); | 652 | return ubifs_get_pnode(c, nnode, iip); |
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c index 109c6ea03bb5..daae9e1f5382 100644 --- a/fs/ubifs/recovery.c +++ b/fs/ubifs/recovery.c | |||
@@ -24,7 +24,7 @@ | |||
24 | * This file implements functions needed to recover from unclean un-mounts. | 24 | * This file implements functions needed to recover from unclean un-mounts. |
25 | * When UBIFS is mounted, it checks a flag on the master node to determine if | 25 | * When UBIFS is mounted, it checks a flag on the master node to determine if |
26 | * an un-mount was completed successfully. If not, the process of mounting | 26 | * an un-mount was completed successfully. If not, the process of mounting |
27 | * incorparates additional checking and fixing of on-flash data structures. | 27 | * incorporates additional checking and fixing of on-flash data structures. |
28 | * UBIFS always cleans away all remnants of an unclean un-mount, so that | 28 | * UBIFS always cleans away all remnants of an unclean un-mount, so that |
29 | * errors do not accumulate. However UBIFS defers recovery if it is mounted | 29 | * errors do not accumulate. However UBIFS defers recovery if it is mounted |
30 | * read-only, and the flash is not modified in that case. | 30 | * read-only, and the flash is not modified in that case. |
@@ -1063,8 +1063,21 @@ int ubifs_rcvry_gc_commit(struct ubifs_info *c) | |||
1063 | } | 1063 | } |
1064 | err = ubifs_find_dirty_leb(c, &lp, wbuf->offs, 2); | 1064 | err = ubifs_find_dirty_leb(c, &lp, wbuf->offs, 2); |
1065 | if (err) { | 1065 | if (err) { |
1066 | if (err == -ENOSPC) | 1066 | /* |
1067 | dbg_err("could not find a dirty LEB"); | 1067 | * There are no dirty or empty LEBs subject to here being |
1068 | * enough for the index. Try to use | ||
1069 | * 'ubifs_find_free_leb_for_idx()', which will return any empty | ||
1070 | * LEBs (ignoring index requirements). If the index then | ||
1071 | * doesn't have enough LEBs the recovery commit will fail - | ||
1072 | * which is the same result anyway i.e. recovery fails. So | ||
1073 | * there is no problem ignoring index requirements and just | ||
1074 | * grabbing a free LEB since we have already established there | ||
1075 | * is not a dirty LEB we could have used instead. | ||
1076 | */ | ||
1077 | if (err == -ENOSPC) { | ||
1078 | dbg_rcvry("could not find a dirty LEB"); | ||
1079 | goto find_free; | ||
1080 | } | ||
1068 | return err; | 1081 | return err; |
1069 | } | 1082 | } |
1070 | ubifs_assert(!(lp.flags & LPROPS_INDEX)); | 1083 | ubifs_assert(!(lp.flags & LPROPS_INDEX)); |
@@ -1139,8 +1152,8 @@ int ubifs_rcvry_gc_commit(struct ubifs_info *c) | |||
1139 | find_free: | 1152 | find_free: |
1140 | /* | 1153 | /* |
1141 | * There is no GC head LEB or the free space in the GC head LEB is too | 1154 | * There is no GC head LEB or the free space in the GC head LEB is too |
1142 | * small. Allocate gc_lnum by calling 'ubifs_find_free_leb_for_idx()' so | 1155 | * small, or there are not dirty LEBs. Allocate gc_lnum by calling |
1143 | * GC is not run. | 1156 | * 'ubifs_find_free_leb_for_idx()' so GC is not run. |
1144 | */ | 1157 | */ |
1145 | lnum = ubifs_find_free_leb_for_idx(c); | 1158 | lnum = ubifs_find_free_leb_for_idx(c); |
1146 | if (lnum < 0) { | 1159 | if (lnum < 0) { |
diff --git a/fs/ubifs/shrinker.c b/fs/ubifs/shrinker.c index 02feb59cefca..0b201114a5ad 100644 --- a/fs/ubifs/shrinker.c +++ b/fs/ubifs/shrinker.c | |||
@@ -277,7 +277,7 @@ static int kick_a_thread(void) | |||
277 | return 0; | 277 | return 0; |
278 | } | 278 | } |
279 | 279 | ||
280 | int ubifs_shrinker(int nr, gfp_t gfp_mask) | 280 | int ubifs_shrinker(struct shrinker *shrink, int nr, gfp_t gfp_mask) |
281 | { | 281 | { |
282 | int freed, contention = 0; | 282 | int freed, contention = 0; |
283 | long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt); | 283 | long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt); |
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 4d2f2157dd3f..5fc5a0988970 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c | |||
@@ -1307,6 +1307,8 @@ static int mount_ubifs(struct ubifs_info *c) | |||
1307 | if (err) | 1307 | if (err) |
1308 | goto out_orphans; | 1308 | goto out_orphans; |
1309 | err = ubifs_rcvry_gc_commit(c); | 1309 | err = ubifs_rcvry_gc_commit(c); |
1310 | if (err) | ||
1311 | goto out_orphans; | ||
1310 | } else { | 1312 | } else { |
1311 | err = take_gc_lnum(c); | 1313 | err = take_gc_lnum(c); |
1312 | if (err) | 1314 | if (err) |
@@ -1318,7 +1320,7 @@ static int mount_ubifs(struct ubifs_info *c) | |||
1318 | */ | 1320 | */ |
1319 | err = ubifs_leb_unmap(c, c->gc_lnum); | 1321 | err = ubifs_leb_unmap(c, c->gc_lnum); |
1320 | if (err) | 1322 | if (err) |
1321 | return err; | 1323 | goto out_orphans; |
1322 | } | 1324 | } |
1323 | 1325 | ||
1324 | err = dbg_check_lprops(c); | 1326 | err = dbg_check_lprops(c); |
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 2eef553d50c8..04310878f449 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h | |||
@@ -1575,7 +1575,7 @@ int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot); | |||
1575 | int ubifs_tnc_end_commit(struct ubifs_info *c); | 1575 | int ubifs_tnc_end_commit(struct ubifs_info *c); |
1576 | 1576 | ||
1577 | /* shrinker.c */ | 1577 | /* shrinker.c */ |
1578 | int ubifs_shrinker(int nr_to_scan, gfp_t gfp_mask); | 1578 | int ubifs_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask); |
1579 | 1579 | ||
1580 | /* commit.c */ | 1580 | /* commit.c */ |
1581 | int ubifs_bg_thread(void *info); | 1581 | int ubifs_bg_thread(void *info); |
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile index c8fb13f83b3f..0dce969d6cad 100644 --- a/fs/xfs/Makefile +++ b/fs/xfs/Makefile | |||
@@ -87,11 +87,9 @@ xfs-y += xfs_alloc.o \ | |||
87 | xfs_trans_buf.o \ | 87 | xfs_trans_buf.o \ |
88 | xfs_trans_extfree.o \ | 88 | xfs_trans_extfree.o \ |
89 | xfs_trans_inode.o \ | 89 | xfs_trans_inode.o \ |
90 | xfs_trans_item.o \ | ||
91 | xfs_utils.o \ | 90 | xfs_utils.o \ |
92 | xfs_vnodeops.o \ | 91 | xfs_vnodeops.o \ |
93 | xfs_rw.o \ | 92 | xfs_rw.o |
94 | xfs_dmops.o | ||
95 | 93 | ||
96 | xfs-$(CONFIG_XFS_TRACE) += xfs_btree_trace.o | 94 | xfs-$(CONFIG_XFS_TRACE) += xfs_btree_trace.o |
97 | 95 | ||
diff --git a/fs/xfs/linux-2.6/xfs_acl.c b/fs/xfs/linux-2.6/xfs_acl.c index 9f769b5b38fc..b2771862fd3d 100644 --- a/fs/xfs/linux-2.6/xfs_acl.c +++ b/fs/xfs/linux-2.6/xfs_acl.c | |||
@@ -225,7 +225,7 @@ xfs_check_acl(struct inode *inode, int mask) | |||
225 | struct posix_acl *acl; | 225 | struct posix_acl *acl; |
226 | int error = -EAGAIN; | 226 | int error = -EAGAIN; |
227 | 227 | ||
228 | xfs_itrace_entry(ip); | 228 | trace_xfs_check_acl(ip); |
229 | 229 | ||
230 | /* | 230 | /* |
231 | * If there is no attribute fork no ACL exists on this inode and | 231 | * If there is no attribute fork no ACL exists on this inode and |
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 34640d6dbdcb..d24e78f32f3e 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c | |||
@@ -21,19 +21,12 @@ | |||
21 | #include "xfs_inum.h" | 21 | #include "xfs_inum.h" |
22 | #include "xfs_sb.h" | 22 | #include "xfs_sb.h" |
23 | #include "xfs_ag.h" | 23 | #include "xfs_ag.h" |
24 | #include "xfs_dir2.h" | ||
25 | #include "xfs_trans.h" | 24 | #include "xfs_trans.h" |
26 | #include "xfs_dmapi.h" | ||
27 | #include "xfs_mount.h" | 25 | #include "xfs_mount.h" |
28 | #include "xfs_bmap_btree.h" | 26 | #include "xfs_bmap_btree.h" |
29 | #include "xfs_alloc_btree.h" | ||
30 | #include "xfs_ialloc_btree.h" | ||
31 | #include "xfs_dir2_sf.h" | ||
32 | #include "xfs_attr_sf.h" | ||
33 | #include "xfs_dinode.h" | 27 | #include "xfs_dinode.h" |
34 | #include "xfs_inode.h" | 28 | #include "xfs_inode.h" |
35 | #include "xfs_alloc.h" | 29 | #include "xfs_alloc.h" |
36 | #include "xfs_btree.h" | ||
37 | #include "xfs_error.h" | 30 | #include "xfs_error.h" |
38 | #include "xfs_rw.h" | 31 | #include "xfs_rw.h" |
39 | #include "xfs_iomap.h" | 32 | #include "xfs_iomap.h" |
@@ -92,18 +85,15 @@ void | |||
92 | xfs_count_page_state( | 85 | xfs_count_page_state( |
93 | struct page *page, | 86 | struct page *page, |
94 | int *delalloc, | 87 | int *delalloc, |
95 | int *unmapped, | ||
96 | int *unwritten) | 88 | int *unwritten) |
97 | { | 89 | { |
98 | struct buffer_head *bh, *head; | 90 | struct buffer_head *bh, *head; |
99 | 91 | ||
100 | *delalloc = *unmapped = *unwritten = 0; | 92 | *delalloc = *unwritten = 0; |
101 | 93 | ||
102 | bh = head = page_buffers(page); | 94 | bh = head = page_buffers(page); |
103 | do { | 95 | do { |
104 | if (buffer_uptodate(bh) && !buffer_mapped(bh)) | 96 | if (buffer_unwritten(bh)) |
105 | (*unmapped) = 1; | ||
106 | else if (buffer_unwritten(bh)) | ||
107 | (*unwritten) = 1; | 97 | (*unwritten) = 1; |
108 | else if (buffer_delay(bh)) | 98 | else if (buffer_delay(bh)) |
109 | (*delalloc) = 1; | 99 | (*delalloc) = 1; |
@@ -212,23 +202,17 @@ xfs_setfilesize( | |||
212 | } | 202 | } |
213 | 203 | ||
214 | /* | 204 | /* |
215 | * Schedule IO completion handling on a xfsdatad if this was | 205 | * Schedule IO completion handling on the final put of an ioend. |
216 | * the final hold on this ioend. If we are asked to wait, | ||
217 | * flush the workqueue. | ||
218 | */ | 206 | */ |
219 | STATIC void | 207 | STATIC void |
220 | xfs_finish_ioend( | 208 | xfs_finish_ioend( |
221 | xfs_ioend_t *ioend, | 209 | struct xfs_ioend *ioend) |
222 | int wait) | ||
223 | { | 210 | { |
224 | if (atomic_dec_and_test(&ioend->io_remaining)) { | 211 | if (atomic_dec_and_test(&ioend->io_remaining)) { |
225 | struct workqueue_struct *wq; | 212 | if (ioend->io_type == IO_UNWRITTEN) |
226 | 213 | queue_work(xfsconvertd_workqueue, &ioend->io_work); | |
227 | wq = (ioend->io_type == IO_UNWRITTEN) ? | 214 | else |
228 | xfsconvertd_workqueue : xfsdatad_workqueue; | 215 | queue_work(xfsdatad_workqueue, &ioend->io_work); |
229 | queue_work(wq, &ioend->io_work); | ||
230 | if (wait) | ||
231 | flush_workqueue(wq); | ||
232 | } | 216 | } |
233 | } | 217 | } |
234 | 218 | ||
@@ -272,11 +256,25 @@ xfs_end_io( | |||
272 | */ | 256 | */ |
273 | if (error == EAGAIN) { | 257 | if (error == EAGAIN) { |
274 | atomic_inc(&ioend->io_remaining); | 258 | atomic_inc(&ioend->io_remaining); |
275 | xfs_finish_ioend(ioend, 0); | 259 | xfs_finish_ioend(ioend); |
276 | /* ensure we don't spin on blocked ioends */ | 260 | /* ensure we don't spin on blocked ioends */ |
277 | delay(1); | 261 | delay(1); |
278 | } else | 262 | } else { |
263 | if (ioend->io_iocb) | ||
264 | aio_complete(ioend->io_iocb, ioend->io_result, 0); | ||
279 | xfs_destroy_ioend(ioend); | 265 | xfs_destroy_ioend(ioend); |
266 | } | ||
267 | } | ||
268 | |||
269 | /* | ||
270 | * Call IO completion handling in caller context on the final put of an ioend. | ||
271 | */ | ||
272 | STATIC void | ||
273 | xfs_finish_ioend_sync( | ||
274 | struct xfs_ioend *ioend) | ||
275 | { | ||
276 | if (atomic_dec_and_test(&ioend->io_remaining)) | ||
277 | xfs_end_io(&ioend->io_work); | ||
280 | } | 278 | } |
281 | 279 | ||
282 | /* | 280 | /* |
@@ -309,6 +307,8 @@ xfs_alloc_ioend( | |||
309 | atomic_inc(&XFS_I(ioend->io_inode)->i_iocount); | 307 | atomic_inc(&XFS_I(ioend->io_inode)->i_iocount); |
310 | ioend->io_offset = 0; | 308 | ioend->io_offset = 0; |
311 | ioend->io_size = 0; | 309 | ioend->io_size = 0; |
310 | ioend->io_iocb = NULL; | ||
311 | ioend->io_result = 0; | ||
312 | 312 | ||
313 | INIT_WORK(&ioend->io_work, xfs_end_io); | 313 | INIT_WORK(&ioend->io_work, xfs_end_io); |
314 | return ioend; | 314 | return ioend; |
@@ -358,7 +358,7 @@ xfs_end_bio( | |||
358 | bio->bi_end_io = NULL; | 358 | bio->bi_end_io = NULL; |
359 | bio_put(bio); | 359 | bio_put(bio); |
360 | 360 | ||
361 | xfs_finish_ioend(ioend, 0); | 361 | xfs_finish_ioend(ioend); |
362 | } | 362 | } |
363 | 363 | ||
364 | STATIC void | 364 | STATIC void |
@@ -500,7 +500,7 @@ xfs_submit_ioend( | |||
500 | } | 500 | } |
501 | if (bio) | 501 | if (bio) |
502 | xfs_submit_ioend_bio(wbc, ioend, bio); | 502 | xfs_submit_ioend_bio(wbc, ioend, bio); |
503 | xfs_finish_ioend(ioend, 0); | 503 | xfs_finish_ioend(ioend); |
504 | } while ((ioend = next) != NULL); | 504 | } while ((ioend = next) != NULL); |
505 | } | 505 | } |
506 | 506 | ||
@@ -614,31 +614,30 @@ xfs_map_at_offset( | |||
614 | STATIC unsigned int | 614 | STATIC unsigned int |
615 | xfs_probe_page( | 615 | xfs_probe_page( |
616 | struct page *page, | 616 | struct page *page, |
617 | unsigned int pg_offset, | 617 | unsigned int pg_offset) |
618 | int mapped) | ||
619 | { | 618 | { |
619 | struct buffer_head *bh, *head; | ||
620 | int ret = 0; | 620 | int ret = 0; |
621 | 621 | ||
622 | if (PageWriteback(page)) | 622 | if (PageWriteback(page)) |
623 | return 0; | 623 | return 0; |
624 | if (!PageDirty(page)) | ||
625 | return 0; | ||
626 | if (!page->mapping) | ||
627 | return 0; | ||
628 | if (!page_has_buffers(page)) | ||
629 | return 0; | ||
624 | 630 | ||
625 | if (page->mapping && PageDirty(page)) { | 631 | bh = head = page_buffers(page); |
626 | if (page_has_buffers(page)) { | 632 | do { |
627 | struct buffer_head *bh, *head; | 633 | if (!buffer_uptodate(bh)) |
628 | 634 | break; | |
629 | bh = head = page_buffers(page); | 635 | if (!buffer_mapped(bh)) |
630 | do { | 636 | break; |
631 | if (!buffer_uptodate(bh)) | 637 | ret += bh->b_size; |
632 | break; | 638 | if (ret >= pg_offset) |
633 | if (mapped != buffer_mapped(bh)) | 639 | break; |
634 | break; | 640 | } while ((bh = bh->b_this_page) != head); |
635 | ret += bh->b_size; | ||
636 | if (ret >= pg_offset) | ||
637 | break; | ||
638 | } while ((bh = bh->b_this_page) != head); | ||
639 | } else | ||
640 | ret = mapped ? 0 : PAGE_CACHE_SIZE; | ||
641 | } | ||
642 | 641 | ||
643 | return ret; | 642 | return ret; |
644 | } | 643 | } |
@@ -648,8 +647,7 @@ xfs_probe_cluster( | |||
648 | struct inode *inode, | 647 | struct inode *inode, |
649 | struct page *startpage, | 648 | struct page *startpage, |
650 | struct buffer_head *bh, | 649 | struct buffer_head *bh, |
651 | struct buffer_head *head, | 650 | struct buffer_head *head) |
652 | int mapped) | ||
653 | { | 651 | { |
654 | struct pagevec pvec; | 652 | struct pagevec pvec; |
655 | pgoff_t tindex, tlast, tloff; | 653 | pgoff_t tindex, tlast, tloff; |
@@ -658,7 +656,7 @@ xfs_probe_cluster( | |||
658 | 656 | ||
659 | /* First sum forwards in this page */ | 657 | /* First sum forwards in this page */ |
660 | do { | 658 | do { |
661 | if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh))) | 659 | if (!buffer_uptodate(bh) || !buffer_mapped(bh)) |
662 | return total; | 660 | return total; |
663 | total += bh->b_size; | 661 | total += bh->b_size; |
664 | } while ((bh = bh->b_this_page) != head); | 662 | } while ((bh = bh->b_this_page) != head); |
@@ -692,7 +690,7 @@ xfs_probe_cluster( | |||
692 | pg_offset = PAGE_CACHE_SIZE; | 690 | pg_offset = PAGE_CACHE_SIZE; |
693 | 691 | ||
694 | if (page->index == tindex && trylock_page(page)) { | 692 | if (page->index == tindex && trylock_page(page)) { |
695 | pg_len = xfs_probe_page(page, pg_offset, mapped); | 693 | pg_len = xfs_probe_page(page, pg_offset); |
696 | unlock_page(page); | 694 | unlock_page(page); |
697 | } | 695 | } |
698 | 696 | ||
@@ -761,7 +759,6 @@ xfs_convert_page( | |||
761 | struct xfs_bmbt_irec *imap, | 759 | struct xfs_bmbt_irec *imap, |
762 | xfs_ioend_t **ioendp, | 760 | xfs_ioend_t **ioendp, |
763 | struct writeback_control *wbc, | 761 | struct writeback_control *wbc, |
764 | int startio, | ||
765 | int all_bh) | 762 | int all_bh) |
766 | { | 763 | { |
767 | struct buffer_head *bh, *head; | 764 | struct buffer_head *bh, *head; |
@@ -832,19 +829,14 @@ xfs_convert_page( | |||
832 | ASSERT(imap->br_startblock != DELAYSTARTBLOCK); | 829 | ASSERT(imap->br_startblock != DELAYSTARTBLOCK); |
833 | 830 | ||
834 | xfs_map_at_offset(inode, bh, imap, offset); | 831 | xfs_map_at_offset(inode, bh, imap, offset); |
835 | if (startio) { | 832 | xfs_add_to_ioend(inode, bh, offset, type, |
836 | xfs_add_to_ioend(inode, bh, offset, | 833 | ioendp, done); |
837 | type, ioendp, done); | 834 | |
838 | } else { | ||
839 | set_buffer_dirty(bh); | ||
840 | unlock_buffer(bh); | ||
841 | mark_buffer_dirty(bh); | ||
842 | } | ||
843 | page_dirty--; | 835 | page_dirty--; |
844 | count++; | 836 | count++; |
845 | } else { | 837 | } else { |
846 | type = IO_NEW; | 838 | type = IO_NEW; |
847 | if (buffer_mapped(bh) && all_bh && startio) { | 839 | if (buffer_mapped(bh) && all_bh) { |
848 | lock_buffer(bh); | 840 | lock_buffer(bh); |
849 | xfs_add_to_ioend(inode, bh, offset, | 841 | xfs_add_to_ioend(inode, bh, offset, |
850 | type, ioendp, done); | 842 | type, ioendp, done); |
@@ -859,14 +851,12 @@ xfs_convert_page( | |||
859 | if (uptodate && bh == head) | 851 | if (uptodate && bh == head) |
860 | SetPageUptodate(page); | 852 | SetPageUptodate(page); |
861 | 853 | ||
862 | if (startio) { | 854 | if (count) { |
863 | if (count) { | 855 | wbc->nr_to_write--; |
864 | wbc->nr_to_write--; | 856 | if (wbc->nr_to_write <= 0) |
865 | if (wbc->nr_to_write <= 0) | 857 | done = 1; |
866 | done = 1; | ||
867 | } | ||
868 | xfs_start_page_writeback(page, !page_dirty, count); | ||
869 | } | 858 | } |
859 | xfs_start_page_writeback(page, !page_dirty, count); | ||
870 | 860 | ||
871 | return done; | 861 | return done; |
872 | fail_unlock_page: | 862 | fail_unlock_page: |
@@ -886,7 +876,6 @@ xfs_cluster_write( | |||
886 | struct xfs_bmbt_irec *imap, | 876 | struct xfs_bmbt_irec *imap, |
887 | xfs_ioend_t **ioendp, | 877 | xfs_ioend_t **ioendp, |
888 | struct writeback_control *wbc, | 878 | struct writeback_control *wbc, |
889 | int startio, | ||
890 | int all_bh, | 879 | int all_bh, |
891 | pgoff_t tlast) | 880 | pgoff_t tlast) |
892 | { | 881 | { |
@@ -902,7 +891,7 @@ xfs_cluster_write( | |||
902 | 891 | ||
903 | for (i = 0; i < pagevec_count(&pvec); i++) { | 892 | for (i = 0; i < pagevec_count(&pvec); i++) { |
904 | done = xfs_convert_page(inode, pvec.pages[i], tindex++, | 893 | done = xfs_convert_page(inode, pvec.pages[i], tindex++, |
905 | imap, ioendp, wbc, startio, all_bh); | 894 | imap, ioendp, wbc, all_bh); |
906 | if (done) | 895 | if (done) |
907 | break; | 896 | break; |
908 | } | 897 | } |
@@ -981,7 +970,7 @@ xfs_aops_discard_page( | |||
981 | */ | 970 | */ |
982 | error = xfs_bmapi(NULL, ip, offset_fsb, 1, | 971 | error = xfs_bmapi(NULL, ip, offset_fsb, 1, |
983 | XFS_BMAPI_ENTIRE, NULL, 0, &imap, | 972 | XFS_BMAPI_ENTIRE, NULL, 0, &imap, |
984 | &nimaps, NULL, NULL); | 973 | &nimaps, NULL); |
985 | 974 | ||
986 | if (error) { | 975 | if (error) { |
987 | /* something screwed, just bail */ | 976 | /* something screwed, just bail */ |
@@ -1009,7 +998,7 @@ xfs_aops_discard_page( | |||
1009 | */ | 998 | */ |
1010 | xfs_bmap_init(&flist, &firstblock); | 999 | xfs_bmap_init(&flist, &firstblock); |
1011 | error = xfs_bunmapi(NULL, ip, offset_fsb, 1, 0, 1, &firstblock, | 1000 | error = xfs_bunmapi(NULL, ip, offset_fsb, 1, 0, 1, &firstblock, |
1012 | &flist, NULL, &done); | 1001 | &flist, &done); |
1013 | 1002 | ||
1014 | ASSERT(!flist.xbf_count && !flist.xbf_first); | 1003 | ASSERT(!flist.xbf_count && !flist.xbf_first); |
1015 | if (error) { | 1004 | if (error) { |
@@ -1032,50 +1021,66 @@ out_invalidate: | |||
1032 | } | 1021 | } |
1033 | 1022 | ||
1034 | /* | 1023 | /* |
1035 | * Calling this without startio set means we are being asked to make a dirty | 1024 | * Write out a dirty page. |
1036 | * page ready for freeing it's buffers. When called with startio set then | 1025 | * |
1037 | * we are coming from writepage. | 1026 | * For delalloc space on the page we need to allocate space and flush it. |
1027 | * For unwritten space on the page we need to start the conversion to | ||
1028 | * regular allocated space. | ||
1029 | * For any other dirty buffer heads on the page we should flush them. | ||
1038 | * | 1030 | * |
1039 | * When called with startio set it is important that we write the WHOLE | 1031 | * If we detect that a transaction would be required to flush the page, we |
1040 | * page if possible. | 1032 | * have to check the process flags first, if we are already in a transaction |
1041 | * The bh->b_state's cannot know if any of the blocks or which block for | 1033 | * or disk I/O during allocations is off, we need to fail the writepage and |
1042 | * that matter are dirty due to mmap writes, and therefore bh uptodate is | 1034 | * redirty the page. |
1043 | * only valid if the page itself isn't completely uptodate. Some layers | ||
1044 | * may clear the page dirty flag prior to calling write page, under the | ||
1045 | * assumption the entire page will be written out; by not writing out the | ||
1046 | * whole page the page can be reused before all valid dirty data is | ||
1047 | * written out. Note: in the case of a page that has been dirty'd by | ||
1048 | * mapwrite and but partially setup by block_prepare_write the | ||
1049 | * bh->b_states's will not agree and only ones setup by BPW/BCW will have | ||
1050 | * valid state, thus the whole page must be written out thing. | ||
1051 | */ | 1035 | */ |
1052 | |||
1053 | STATIC int | 1036 | STATIC int |
1054 | xfs_page_state_convert( | 1037 | xfs_vm_writepage( |
1055 | struct inode *inode, | 1038 | struct page *page, |
1056 | struct page *page, | 1039 | struct writeback_control *wbc) |
1057 | struct writeback_control *wbc, | ||
1058 | int startio, | ||
1059 | int unmapped) /* also implies page uptodate */ | ||
1060 | { | 1040 | { |
1041 | struct inode *inode = page->mapping->host; | ||
1042 | int delalloc, unwritten; | ||
1061 | struct buffer_head *bh, *head; | 1043 | struct buffer_head *bh, *head; |
1062 | struct xfs_bmbt_irec imap; | 1044 | struct xfs_bmbt_irec imap; |
1063 | xfs_ioend_t *ioend = NULL, *iohead = NULL; | 1045 | xfs_ioend_t *ioend = NULL, *iohead = NULL; |
1064 | loff_t offset; | 1046 | loff_t offset; |
1065 | unsigned long p_offset = 0; | ||
1066 | unsigned int type; | 1047 | unsigned int type; |
1067 | __uint64_t end_offset; | 1048 | __uint64_t end_offset; |
1068 | pgoff_t end_index, last_index; | 1049 | pgoff_t end_index, last_index; |
1069 | ssize_t size, len; | 1050 | ssize_t size, len; |
1070 | int flags, err, imap_valid = 0, uptodate = 1; | 1051 | int flags, err, imap_valid = 0, uptodate = 1; |
1071 | int page_dirty, count = 0; | 1052 | int count = 0; |
1072 | int trylock = 0; | 1053 | int all_bh = 0; |
1073 | int all_bh = unmapped; | ||
1074 | 1054 | ||
1075 | if (startio) { | 1055 | trace_xfs_writepage(inode, page, 0); |
1076 | if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking) | 1056 | |
1077 | trylock |= BMAPI_TRYLOCK; | 1057 | ASSERT(page_has_buffers(page)); |
1078 | } | 1058 | |
1059 | /* | ||
1060 | * Refuse to write the page out if we are called from reclaim context. | ||
1061 | * | ||
1062 | * This avoids stack overflows when called from deeply used stacks in | ||
1063 | * random callers for direct reclaim or memcg reclaim. We explicitly | ||
1064 | * allow reclaim from kswapd as the stack usage there is relatively low. | ||
1065 | * | ||
1066 | * This should really be done by the core VM, but until that happens | ||
1067 | * filesystems like XFS, btrfs and ext4 have to take care of this | ||
1068 | * by themselves. | ||
1069 | */ | ||
1070 | if ((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == PF_MEMALLOC) | ||
1071 | goto out_fail; | ||
1072 | |||
1073 | /* | ||
1074 | * We need a transaction if there are delalloc or unwritten buffers | ||
1075 | * on the page. | ||
1076 | * | ||
1077 | * If we need a transaction and the process flags say we are already | ||
1078 | * in a transaction, or no IO is allowed then mark the page dirty | ||
1079 | * again and leave the page as is. | ||
1080 | */ | ||
1081 | xfs_count_page_state(page, &delalloc, &unwritten); | ||
1082 | if ((current->flags & PF_FSTRANS) && (delalloc || unwritten)) | ||
1083 | goto out_fail; | ||
1079 | 1084 | ||
1080 | /* Is this page beyond the end of the file? */ | 1085 | /* Is this page beyond the end of the file? */ |
1081 | offset = i_size_read(inode); | 1086 | offset = i_size_read(inode); |
@@ -1084,50 +1089,33 @@ xfs_page_state_convert( | |||
1084 | if (page->index >= end_index) { | 1089 | if (page->index >= end_index) { |
1085 | if ((page->index >= end_index + 1) || | 1090 | if ((page->index >= end_index + 1) || |
1086 | !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) { | 1091 | !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) { |
1087 | if (startio) | 1092 | unlock_page(page); |
1088 | unlock_page(page); | ||
1089 | return 0; | 1093 | return 0; |
1090 | } | 1094 | } |
1091 | } | 1095 | } |
1092 | 1096 | ||
1093 | /* | ||
1094 | * page_dirty is initially a count of buffers on the page before | ||
1095 | * EOF and is decremented as we move each into a cleanable state. | ||
1096 | * | ||
1097 | * Derivation: | ||
1098 | * | ||
1099 | * End offset is the highest offset that this page should represent. | ||
1100 | * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1)) | ||
1101 | * will evaluate non-zero and be less than PAGE_CACHE_SIZE and | ||
1102 | * hence give us the correct page_dirty count. On any other page, | ||
1103 | * it will be zero and in that case we need page_dirty to be the | ||
1104 | * count of buffers on the page. | ||
1105 | */ | ||
1106 | end_offset = min_t(unsigned long long, | 1097 | end_offset = min_t(unsigned long long, |
1107 | (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset); | 1098 | (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, |
1099 | offset); | ||
1108 | len = 1 << inode->i_blkbits; | 1100 | len = 1 << inode->i_blkbits; |
1109 | p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1), | ||
1110 | PAGE_CACHE_SIZE); | ||
1111 | p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE; | ||
1112 | page_dirty = p_offset / len; | ||
1113 | 1101 | ||
1114 | bh = head = page_buffers(page); | 1102 | bh = head = page_buffers(page); |
1115 | offset = page_offset(page); | 1103 | offset = page_offset(page); |
1116 | flags = BMAPI_READ; | 1104 | flags = BMAPI_READ; |
1117 | type = IO_NEW; | 1105 | type = IO_NEW; |
1118 | 1106 | ||
1119 | /* TODO: cleanup count and page_dirty */ | ||
1120 | |||
1121 | do { | 1107 | do { |
1122 | if (offset >= end_offset) | 1108 | if (offset >= end_offset) |
1123 | break; | 1109 | break; |
1124 | if (!buffer_uptodate(bh)) | 1110 | if (!buffer_uptodate(bh)) |
1125 | uptodate = 0; | 1111 | uptodate = 0; |
1126 | if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) { | 1112 | |
1127 | /* | 1113 | /* |
1128 | * the iomap is actually still valid, but the ioend | 1114 | * A hole may still be marked uptodate because discard_buffer |
1129 | * isn't. shouldn't happen too often. | 1115 | * leaves the flag set. |
1130 | */ | 1116 | */ |
1117 | if (!buffer_mapped(bh) && buffer_uptodate(bh)) { | ||
1118 | ASSERT(!buffer_dirty(bh)); | ||
1131 | imap_valid = 0; | 1119 | imap_valid = 0; |
1132 | continue; | 1120 | continue; |
1133 | } | 1121 | } |
@@ -1135,19 +1123,7 @@ xfs_page_state_convert( | |||
1135 | if (imap_valid) | 1123 | if (imap_valid) |
1136 | imap_valid = xfs_imap_valid(inode, &imap, offset); | 1124 | imap_valid = xfs_imap_valid(inode, &imap, offset); |
1137 | 1125 | ||
1138 | /* | 1126 | if (buffer_unwritten(bh) || buffer_delay(bh)) { |
1139 | * First case, map an unwritten extent and prepare for | ||
1140 | * extent state conversion transaction on completion. | ||
1141 | * | ||
1142 | * Second case, allocate space for a delalloc buffer. | ||
1143 | * We can return EAGAIN here in the release page case. | ||
1144 | * | ||
1145 | * Third case, an unmapped buffer was found, and we are | ||
1146 | * in a path where we need to write the whole page out. | ||
1147 | */ | ||
1148 | if (buffer_unwritten(bh) || buffer_delay(bh) || | ||
1149 | ((buffer_uptodate(bh) || PageUptodate(page)) && | ||
1150 | !buffer_mapped(bh) && (unmapped || startio))) { | ||
1151 | int new_ioend = 0; | 1127 | int new_ioend = 0; |
1152 | 1128 | ||
1153 | /* | 1129 | /* |
@@ -1161,15 +1137,16 @@ xfs_page_state_convert( | |||
1161 | flags = BMAPI_WRITE | BMAPI_IGNSTATE; | 1137 | flags = BMAPI_WRITE | BMAPI_IGNSTATE; |
1162 | } else if (buffer_delay(bh)) { | 1138 | } else if (buffer_delay(bh)) { |
1163 | type = IO_DELAY; | 1139 | type = IO_DELAY; |
1164 | flags = BMAPI_ALLOCATE | trylock; | 1140 | flags = BMAPI_ALLOCATE; |
1165 | } else { | 1141 | |
1166 | type = IO_NEW; | 1142 | if (wbc->sync_mode == WB_SYNC_NONE && |
1167 | flags = BMAPI_WRITE | BMAPI_MMAP; | 1143 | wbc->nonblocking) |
1144 | flags |= BMAPI_TRYLOCK; | ||
1168 | } | 1145 | } |
1169 | 1146 | ||
1170 | if (!imap_valid) { | 1147 | if (!imap_valid) { |
1171 | /* | 1148 | /* |
1172 | * if we didn't have a valid mapping then we | 1149 | * If we didn't have a valid mapping then we |
1173 | * need to ensure that we put the new mapping | 1150 | * need to ensure that we put the new mapping |
1174 | * in a new ioend structure. This needs to be | 1151 | * in a new ioend structure. This needs to be |
1175 | * done to ensure that the ioends correctly | 1152 | * done to ensure that the ioends correctly |
@@ -1177,14 +1154,7 @@ xfs_page_state_convert( | |||
1177 | * for unwritten extent conversion. | 1154 | * for unwritten extent conversion. |
1178 | */ | 1155 | */ |
1179 | new_ioend = 1; | 1156 | new_ioend = 1; |
1180 | if (type == IO_NEW) { | 1157 | err = xfs_map_blocks(inode, offset, len, |
1181 | size = xfs_probe_cluster(inode, | ||
1182 | page, bh, head, 0); | ||
1183 | } else { | ||
1184 | size = len; | ||
1185 | } | ||
1186 | |||
1187 | err = xfs_map_blocks(inode, offset, size, | ||
1188 | &imap, flags); | 1158 | &imap, flags); |
1189 | if (err) | 1159 | if (err) |
1190 | goto error; | 1160 | goto error; |
@@ -1193,19 +1163,11 @@ xfs_page_state_convert( | |||
1193 | } | 1163 | } |
1194 | if (imap_valid) { | 1164 | if (imap_valid) { |
1195 | xfs_map_at_offset(inode, bh, &imap, offset); | 1165 | xfs_map_at_offset(inode, bh, &imap, offset); |
1196 | if (startio) { | 1166 | xfs_add_to_ioend(inode, bh, offset, type, |
1197 | xfs_add_to_ioend(inode, bh, offset, | 1167 | &ioend, new_ioend); |
1198 | type, &ioend, | ||
1199 | new_ioend); | ||
1200 | } else { | ||
1201 | set_buffer_dirty(bh); | ||
1202 | unlock_buffer(bh); | ||
1203 | mark_buffer_dirty(bh); | ||
1204 | } | ||
1205 | page_dirty--; | ||
1206 | count++; | 1168 | count++; |
1207 | } | 1169 | } |
1208 | } else if (buffer_uptodate(bh) && startio) { | 1170 | } else if (buffer_uptodate(bh)) { |
1209 | /* | 1171 | /* |
1210 | * we got here because the buffer is already mapped. | 1172 | * we got here because the buffer is already mapped. |
1211 | * That means it must already have extents allocated | 1173 | * That means it must already have extents allocated |
@@ -1213,8 +1175,7 @@ xfs_page_state_convert( | |||
1213 | */ | 1175 | */ |
1214 | if (!imap_valid || flags != BMAPI_READ) { | 1176 | if (!imap_valid || flags != BMAPI_READ) { |
1215 | flags = BMAPI_READ; | 1177 | flags = BMAPI_READ; |
1216 | size = xfs_probe_cluster(inode, page, bh, | 1178 | size = xfs_probe_cluster(inode, page, bh, head); |
1217 | head, 1); | ||
1218 | err = xfs_map_blocks(inode, offset, size, | 1179 | err = xfs_map_blocks(inode, offset, size, |
1219 | &imap, flags); | 1180 | &imap, flags); |
1220 | if (err) | 1181 | if (err) |
@@ -1233,18 +1194,16 @@ xfs_page_state_convert( | |||
1233 | */ | 1194 | */ |
1234 | type = IO_NEW; | 1195 | type = IO_NEW; |
1235 | if (trylock_buffer(bh)) { | 1196 | if (trylock_buffer(bh)) { |
1236 | ASSERT(buffer_mapped(bh)); | ||
1237 | if (imap_valid) | 1197 | if (imap_valid) |
1238 | all_bh = 1; | 1198 | all_bh = 1; |
1239 | xfs_add_to_ioend(inode, bh, offset, type, | 1199 | xfs_add_to_ioend(inode, bh, offset, type, |
1240 | &ioend, !imap_valid); | 1200 | &ioend, !imap_valid); |
1241 | page_dirty--; | ||
1242 | count++; | 1201 | count++; |
1243 | } else { | 1202 | } else { |
1244 | imap_valid = 0; | 1203 | imap_valid = 0; |
1245 | } | 1204 | } |
1246 | } else if ((buffer_uptodate(bh) || PageUptodate(page)) && | 1205 | } else if (PageUptodate(page)) { |
1247 | (unmapped || startio)) { | 1206 | ASSERT(buffer_mapped(bh)); |
1248 | imap_valid = 0; | 1207 | imap_valid = 0; |
1249 | } | 1208 | } |
1250 | 1209 | ||
@@ -1256,8 +1215,7 @@ xfs_page_state_convert( | |||
1256 | if (uptodate && bh == head) | 1215 | if (uptodate && bh == head) |
1257 | SetPageUptodate(page); | 1216 | SetPageUptodate(page); |
1258 | 1217 | ||
1259 | if (startio) | 1218 | xfs_start_page_writeback(page, 1, count); |
1260 | xfs_start_page_writeback(page, 1, count); | ||
1261 | 1219 | ||
1262 | if (ioend && imap_valid) { | 1220 | if (ioend && imap_valid) { |
1263 | xfs_off_t end_index; | 1221 | xfs_off_t end_index; |
@@ -1275,131 +1233,27 @@ xfs_page_state_convert( | |||
1275 | end_index = last_index; | 1233 | end_index = last_index; |
1276 | 1234 | ||
1277 | xfs_cluster_write(inode, page->index + 1, &imap, &ioend, | 1235 | xfs_cluster_write(inode, page->index + 1, &imap, &ioend, |
1278 | wbc, startio, all_bh, end_index); | 1236 | wbc, all_bh, end_index); |
1279 | } | 1237 | } |
1280 | 1238 | ||
1281 | if (iohead) | 1239 | if (iohead) |
1282 | xfs_submit_ioend(wbc, iohead); | 1240 | xfs_submit_ioend(wbc, iohead); |
1283 | 1241 | ||
1284 | return page_dirty; | 1242 | return 0; |
1285 | 1243 | ||
1286 | error: | 1244 | error: |
1287 | if (iohead) | 1245 | if (iohead) |
1288 | xfs_cancel_ioend(iohead); | 1246 | xfs_cancel_ioend(iohead); |
1289 | 1247 | ||
1290 | /* | 1248 | xfs_aops_discard_page(page); |
1291 | * If it's delalloc and we have nowhere to put it, | 1249 | ClearPageUptodate(page); |
1292 | * throw it away, unless the lower layers told | 1250 | unlock_page(page); |
1293 | * us to try again. | ||
1294 | */ | ||
1295 | if (err != -EAGAIN) { | ||
1296 | if (!unmapped) | ||
1297 | xfs_aops_discard_page(page); | ||
1298 | ClearPageUptodate(page); | ||
1299 | } | ||
1300 | return err; | 1251 | return err; |
1301 | } | ||
1302 | |||
1303 | /* | ||
1304 | * writepage: Called from one of two places: | ||
1305 | * | ||
1306 | * 1. we are flushing a delalloc buffer head. | ||
1307 | * | ||
1308 | * 2. we are writing out a dirty page. Typically the page dirty | ||
1309 | * state is cleared before we get here. In this case is it | ||
1310 | * conceivable we have no buffer heads. | ||
1311 | * | ||
1312 | * For delalloc space on the page we need to allocate space and | ||
1313 | * flush it. For unmapped buffer heads on the page we should | ||
1314 | * allocate space if the page is uptodate. For any other dirty | ||
1315 | * buffer heads on the page we should flush them. | ||
1316 | * | ||
1317 | * If we detect that a transaction would be required to flush | ||
1318 | * the page, we have to check the process flags first, if we | ||
1319 | * are already in a transaction or disk I/O during allocations | ||
1320 | * is off, we need to fail the writepage and redirty the page. | ||
1321 | */ | ||
1322 | |||
1323 | STATIC int | ||
1324 | xfs_vm_writepage( | ||
1325 | struct page *page, | ||
1326 | struct writeback_control *wbc) | ||
1327 | { | ||
1328 | int error; | ||
1329 | int need_trans; | ||
1330 | int delalloc, unmapped, unwritten; | ||
1331 | struct inode *inode = page->mapping->host; | ||
1332 | |||
1333 | trace_xfs_writepage(inode, page, 0); | ||
1334 | |||
1335 | /* | ||
1336 | * Refuse to write the page out if we are called from reclaim context. | ||
1337 | * | ||
1338 | * This is primarily to avoid stack overflows when called from deep | ||
1339 | * used stacks in random callers for direct reclaim, but disabling | ||
1340 | * reclaim for kswap is a nice side-effect as kswapd causes rather | ||
1341 | * suboptimal I/O patters, too. | ||
1342 | * | ||
1343 | * This should really be done by the core VM, but until that happens | ||
1344 | * filesystems like XFS, btrfs and ext4 have to take care of this | ||
1345 | * by themselves. | ||
1346 | */ | ||
1347 | if (current->flags & PF_MEMALLOC) | ||
1348 | goto out_fail; | ||
1349 | |||
1350 | /* | ||
1351 | * We need a transaction if: | ||
1352 | * 1. There are delalloc buffers on the page | ||
1353 | * 2. The page is uptodate and we have unmapped buffers | ||
1354 | * 3. The page is uptodate and we have no buffers | ||
1355 | * 4. There are unwritten buffers on the page | ||
1356 | */ | ||
1357 | |||
1358 | if (!page_has_buffers(page)) { | ||
1359 | unmapped = 1; | ||
1360 | need_trans = 1; | ||
1361 | } else { | ||
1362 | xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); | ||
1363 | if (!PageUptodate(page)) | ||
1364 | unmapped = 0; | ||
1365 | need_trans = delalloc + unmapped + unwritten; | ||
1366 | } | ||
1367 | |||
1368 | /* | ||
1369 | * If we need a transaction and the process flags say | ||
1370 | * we are already in a transaction, or no IO is allowed | ||
1371 | * then mark the page dirty again and leave the page | ||
1372 | * as is. | ||
1373 | */ | ||
1374 | if (current_test_flags(PF_FSTRANS) && need_trans) | ||
1375 | goto out_fail; | ||
1376 | |||
1377 | /* | ||
1378 | * Delay hooking up buffer heads until we have | ||
1379 | * made our go/no-go decision. | ||
1380 | */ | ||
1381 | if (!page_has_buffers(page)) | ||
1382 | create_empty_buffers(page, 1 << inode->i_blkbits, 0); | ||
1383 | |||
1384 | /* | ||
1385 | * Convert delayed allocate, unwritten or unmapped space | ||
1386 | * to real space and flush out to disk. | ||
1387 | */ | ||
1388 | error = xfs_page_state_convert(inode, page, wbc, 1, unmapped); | ||
1389 | if (error == -EAGAIN) | ||
1390 | goto out_fail; | ||
1391 | if (unlikely(error < 0)) | ||
1392 | goto out_unlock; | ||
1393 | |||
1394 | return 0; | ||
1395 | 1252 | ||
1396 | out_fail: | 1253 | out_fail: |
1397 | redirty_page_for_writepage(wbc, page); | 1254 | redirty_page_for_writepage(wbc, page); |
1398 | unlock_page(page); | 1255 | unlock_page(page); |
1399 | return 0; | 1256 | return 0; |
1400 | out_unlock: | ||
1401 | unlock_page(page); | ||
1402 | return error; | ||
1403 | } | 1257 | } |
1404 | 1258 | ||
1405 | STATIC int | 1259 | STATIC int |
@@ -1413,65 +1267,27 @@ xfs_vm_writepages( | |||
1413 | 1267 | ||
1414 | /* | 1268 | /* |
1415 | * Called to move a page into cleanable state - and from there | 1269 | * Called to move a page into cleanable state - and from there |
1416 | * to be released. Possibly the page is already clean. We always | 1270 | * to be released. The page should already be clean. We always |
1417 | * have buffer heads in this call. | 1271 | * have buffer heads in this call. |
1418 | * | 1272 | * |
1419 | * Returns 0 if the page is ok to release, 1 otherwise. | 1273 | * Returns 1 if the page is ok to release, 0 otherwise. |
1420 | * | ||
1421 | * Possible scenarios are: | ||
1422 | * | ||
1423 | * 1. We are being called to release a page which has been written | ||
1424 | * to via regular I/O. buffer heads will be dirty and possibly | ||
1425 | * delalloc. If no delalloc buffer heads in this case then we | ||
1426 | * can just return zero. | ||
1427 | * | ||
1428 | * 2. We are called to release a page which has been written via | ||
1429 | * mmap, all we need to do is ensure there is no delalloc | ||
1430 | * state in the buffer heads, if not we can let the caller | ||
1431 | * free them and we should come back later via writepage. | ||
1432 | */ | 1274 | */ |
1433 | STATIC int | 1275 | STATIC int |
1434 | xfs_vm_releasepage( | 1276 | xfs_vm_releasepage( |
1435 | struct page *page, | 1277 | struct page *page, |
1436 | gfp_t gfp_mask) | 1278 | gfp_t gfp_mask) |
1437 | { | 1279 | { |
1438 | struct inode *inode = page->mapping->host; | 1280 | int delalloc, unwritten; |
1439 | int dirty, delalloc, unmapped, unwritten; | ||
1440 | struct writeback_control wbc = { | ||
1441 | .sync_mode = WB_SYNC_ALL, | ||
1442 | .nr_to_write = 1, | ||
1443 | }; | ||
1444 | 1281 | ||
1445 | trace_xfs_releasepage(inode, page, 0); | 1282 | trace_xfs_releasepage(page->mapping->host, page, 0); |
1446 | |||
1447 | if (!page_has_buffers(page)) | ||
1448 | return 0; | ||
1449 | 1283 | ||
1450 | xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); | 1284 | xfs_count_page_state(page, &delalloc, &unwritten); |
1451 | if (!delalloc && !unwritten) | ||
1452 | goto free_buffers; | ||
1453 | 1285 | ||
1454 | if (!(gfp_mask & __GFP_FS)) | 1286 | if (WARN_ON(delalloc)) |
1455 | return 0; | 1287 | return 0; |
1456 | 1288 | if (WARN_ON(unwritten)) | |
1457 | /* If we are already inside a transaction or the thread cannot | ||
1458 | * do I/O, we cannot release this page. | ||
1459 | */ | ||
1460 | if (current_test_flags(PF_FSTRANS)) | ||
1461 | return 0; | 1289 | return 0; |
1462 | 1290 | ||
1463 | /* | ||
1464 | * Convert delalloc space to real space, do not flush the | ||
1465 | * data out to disk, that will be done by the caller. | ||
1466 | * Never need to allocate space here - we will always | ||
1467 | * come back to writepage in that case. | ||
1468 | */ | ||
1469 | dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0); | ||
1470 | if (dirty == 0 && !unwritten) | ||
1471 | goto free_buffers; | ||
1472 | return 0; | ||
1473 | |||
1474 | free_buffers: | ||
1475 | return try_to_free_buffers(page); | 1291 | return try_to_free_buffers(page); |
1476 | } | 1292 | } |
1477 | 1293 | ||
@@ -1481,9 +1297,9 @@ __xfs_get_blocks( | |||
1481 | sector_t iblock, | 1297 | sector_t iblock, |
1482 | struct buffer_head *bh_result, | 1298 | struct buffer_head *bh_result, |
1483 | int create, | 1299 | int create, |
1484 | int direct, | 1300 | int direct) |
1485 | bmapi_flags_t flags) | ||
1486 | { | 1301 | { |
1302 | int flags = create ? BMAPI_WRITE : BMAPI_READ; | ||
1487 | struct xfs_bmbt_irec imap; | 1303 | struct xfs_bmbt_irec imap; |
1488 | xfs_off_t offset; | 1304 | xfs_off_t offset; |
1489 | ssize_t size; | 1305 | ssize_t size; |
@@ -1498,8 +1314,11 @@ __xfs_get_blocks( | |||
1498 | if (!create && direct && offset >= i_size_read(inode)) | 1314 | if (!create && direct && offset >= i_size_read(inode)) |
1499 | return 0; | 1315 | return 0; |
1500 | 1316 | ||
1501 | error = xfs_iomap(XFS_I(inode), offset, size, | 1317 | if (direct && create) |
1502 | create ? flags : BMAPI_READ, &imap, &nimap, &new); | 1318 | flags |= BMAPI_DIRECT; |
1319 | |||
1320 | error = xfs_iomap(XFS_I(inode), offset, size, flags, &imap, &nimap, | ||
1321 | &new); | ||
1503 | if (error) | 1322 | if (error) |
1504 | return -error; | 1323 | return -error; |
1505 | if (nimap == 0) | 1324 | if (nimap == 0) |
@@ -1579,8 +1398,7 @@ xfs_get_blocks( | |||
1579 | struct buffer_head *bh_result, | 1398 | struct buffer_head *bh_result, |
1580 | int create) | 1399 | int create) |
1581 | { | 1400 | { |
1582 | return __xfs_get_blocks(inode, iblock, | 1401 | return __xfs_get_blocks(inode, iblock, bh_result, create, 0); |
1583 | bh_result, create, 0, BMAPI_WRITE); | ||
1584 | } | 1402 | } |
1585 | 1403 | ||
1586 | STATIC int | 1404 | STATIC int |
@@ -1590,61 +1408,59 @@ xfs_get_blocks_direct( | |||
1590 | struct buffer_head *bh_result, | 1408 | struct buffer_head *bh_result, |
1591 | int create) | 1409 | int create) |
1592 | { | 1410 | { |
1593 | return __xfs_get_blocks(inode, iblock, | 1411 | return __xfs_get_blocks(inode, iblock, bh_result, create, 1); |
1594 | bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT); | ||
1595 | } | 1412 | } |
1596 | 1413 | ||
1414 | /* | ||
1415 | * Complete a direct I/O write request. | ||
1416 | * | ||
1417 | * If the private argument is non-NULL __xfs_get_blocks signals us that we | ||
1418 | * need to issue a transaction to convert the range from unwritten to written | ||
1419 | * extents. In case this is regular synchronous I/O we just call xfs_end_io | ||
1420 | * to do this and we are done. But in case this was a successfull AIO | ||
1421 | * request this handler is called from interrupt context, from which we | ||
1422 | * can't start transactions. In that case offload the I/O completion to | ||
1423 | * the workqueues we also use for buffered I/O completion. | ||
1424 | */ | ||
1597 | STATIC void | 1425 | STATIC void |
1598 | xfs_end_io_direct( | 1426 | xfs_end_io_direct_write( |
1599 | struct kiocb *iocb, | 1427 | struct kiocb *iocb, |
1600 | loff_t offset, | 1428 | loff_t offset, |
1601 | ssize_t size, | 1429 | ssize_t size, |
1602 | void *private) | 1430 | void *private, |
1431 | int ret, | ||
1432 | bool is_async) | ||
1603 | { | 1433 | { |
1604 | xfs_ioend_t *ioend = iocb->private; | 1434 | struct xfs_ioend *ioend = iocb->private; |
1605 | 1435 | ||
1606 | /* | 1436 | /* |
1607 | * Non-NULL private data means we need to issue a transaction to | 1437 | * blockdev_direct_IO can return an error even after the I/O |
1608 | * convert a range from unwritten to written extents. This needs | 1438 | * completion handler was called. Thus we need to protect |
1609 | * to happen from process context but aio+dio I/O completion | 1439 | * against double-freeing. |
1610 | * happens from irq context so we need to defer it to a workqueue. | ||
1611 | * This is not necessary for synchronous direct I/O, but we do | ||
1612 | * it anyway to keep the code uniform and simpler. | ||
1613 | * | ||
1614 | * Well, if only it were that simple. Because synchronous direct I/O | ||
1615 | * requires extent conversion to occur *before* we return to userspace, | ||
1616 | * we have to wait for extent conversion to complete. Look at the | ||
1617 | * iocb that has been passed to us to determine if this is AIO or | ||
1618 | * not. If it is synchronous, tell xfs_finish_ioend() to kick the | ||
1619 | * workqueue and wait for it to complete. | ||
1620 | * | ||
1621 | * The core direct I/O code might be changed to always call the | ||
1622 | * completion handler in the future, in which case all this can | ||
1623 | * go away. | ||
1624 | */ | 1440 | */ |
1441 | iocb->private = NULL; | ||
1442 | |||
1625 | ioend->io_offset = offset; | 1443 | ioend->io_offset = offset; |
1626 | ioend->io_size = size; | 1444 | ioend->io_size = size; |
1627 | if (ioend->io_type == IO_READ) { | 1445 | if (private && size > 0) |
1628 | xfs_finish_ioend(ioend, 0); | 1446 | ioend->io_type = IO_UNWRITTEN; |
1629 | } else if (private && size > 0) { | 1447 | |
1630 | xfs_finish_ioend(ioend, is_sync_kiocb(iocb)); | 1448 | if (is_async) { |
1631 | } else { | ||
1632 | /* | 1449 | /* |
1633 | * A direct I/O write ioend starts it's life in unwritten | 1450 | * If we are converting an unwritten extent we need to delay |
1634 | * state in case they map an unwritten extent. This write | 1451 | * the AIO completion until after the unwrittent extent |
1635 | * didn't map an unwritten extent so switch it's completion | 1452 | * conversion has completed, otherwise do it ASAP. |
1636 | * handler. | ||
1637 | */ | 1453 | */ |
1638 | ioend->io_type = IO_NEW; | 1454 | if (ioend->io_type == IO_UNWRITTEN) { |
1639 | xfs_finish_ioend(ioend, 0); | 1455 | ioend->io_iocb = iocb; |
1456 | ioend->io_result = ret; | ||
1457 | } else { | ||
1458 | aio_complete(iocb, ret, 0); | ||
1459 | } | ||
1460 | xfs_finish_ioend(ioend); | ||
1461 | } else { | ||
1462 | xfs_finish_ioend_sync(ioend); | ||
1640 | } | 1463 | } |
1641 | |||
1642 | /* | ||
1643 | * blockdev_direct_IO can return an error even after the I/O | ||
1644 | * completion handler was called. Thus we need to protect | ||
1645 | * against double-freeing. | ||
1646 | */ | ||
1647 | iocb->private = NULL; | ||
1648 | } | 1464 | } |
1649 | 1465 | ||
1650 | STATIC ssize_t | 1466 | STATIC ssize_t |
@@ -1655,23 +1471,26 @@ xfs_vm_direct_IO( | |||
1655 | loff_t offset, | 1471 | loff_t offset, |
1656 | unsigned long nr_segs) | 1472 | unsigned long nr_segs) |
1657 | { | 1473 | { |
1658 | struct file *file = iocb->ki_filp; | 1474 | struct inode *inode = iocb->ki_filp->f_mapping->host; |
1659 | struct inode *inode = file->f_mapping->host; | 1475 | struct block_device *bdev = xfs_find_bdev_for_inode(inode); |
1660 | struct block_device *bdev; | 1476 | ssize_t ret; |
1661 | ssize_t ret; | 1477 | |
1662 | 1478 | if (rw & WRITE) { | |
1663 | bdev = xfs_find_bdev_for_inode(inode); | 1479 | iocb->private = xfs_alloc_ioend(inode, IO_NEW); |
1664 | 1480 | ||
1665 | iocb->private = xfs_alloc_ioend(inode, rw == WRITE ? | 1481 | ret = blockdev_direct_IO_no_locking(rw, iocb, inode, bdev, iov, |
1666 | IO_UNWRITTEN : IO_READ); | 1482 | offset, nr_segs, |
1667 | 1483 | xfs_get_blocks_direct, | |
1668 | ret = blockdev_direct_IO_no_locking(rw, iocb, inode, bdev, iov, | 1484 | xfs_end_io_direct_write); |
1669 | offset, nr_segs, | 1485 | if (ret != -EIOCBQUEUED && iocb->private) |
1670 | xfs_get_blocks_direct, | 1486 | xfs_destroy_ioend(iocb->private); |
1671 | xfs_end_io_direct); | 1487 | } else { |
1488 | ret = blockdev_direct_IO_no_locking(rw, iocb, inode, bdev, iov, | ||
1489 | offset, nr_segs, | ||
1490 | xfs_get_blocks_direct, | ||
1491 | NULL); | ||
1492 | } | ||
1672 | 1493 | ||
1673 | if (unlikely(ret != -EIOCBQUEUED && iocb->private)) | ||
1674 | xfs_destroy_ioend(iocb->private); | ||
1675 | return ret; | 1494 | return ret; |
1676 | } | 1495 | } |
1677 | 1496 | ||
@@ -1686,8 +1505,8 @@ xfs_vm_write_begin( | |||
1686 | void **fsdata) | 1505 | void **fsdata) |
1687 | { | 1506 | { |
1688 | *pagep = NULL; | 1507 | *pagep = NULL; |
1689 | return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, | 1508 | return block_write_begin(file, mapping, pos, len, flags | AOP_FLAG_NOFS, |
1690 | xfs_get_blocks); | 1509 | pagep, fsdata, xfs_get_blocks); |
1691 | } | 1510 | } |
1692 | 1511 | ||
1693 | STATIC sector_t | 1512 | STATIC sector_t |
@@ -1698,7 +1517,7 @@ xfs_vm_bmap( | |||
1698 | struct inode *inode = (struct inode *)mapping->host; | 1517 | struct inode *inode = (struct inode *)mapping->host; |
1699 | struct xfs_inode *ip = XFS_I(inode); | 1518 | struct xfs_inode *ip = XFS_I(inode); |
1700 | 1519 | ||
1701 | xfs_itrace_entry(XFS_I(inode)); | 1520 | trace_xfs_vm_bmap(XFS_I(inode)); |
1702 | xfs_ilock(ip, XFS_IOLOCK_SHARED); | 1521 | xfs_ilock(ip, XFS_IOLOCK_SHARED); |
1703 | xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF); | 1522 | xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF); |
1704 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | 1523 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); |
diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h index 4cfc6ea87df8..c5057fb6237a 100644 --- a/fs/xfs/linux-2.6/xfs_aops.h +++ b/fs/xfs/linux-2.6/xfs_aops.h | |||
@@ -37,6 +37,8 @@ typedef struct xfs_ioend { | |||
37 | size_t io_size; /* size of the extent */ | 37 | size_t io_size; /* size of the extent */ |
38 | xfs_off_t io_offset; /* offset in the file */ | 38 | xfs_off_t io_offset; /* offset in the file */ |
39 | struct work_struct io_work; /* xfsdatad work queue */ | 39 | struct work_struct io_work; /* xfsdatad work queue */ |
40 | struct kiocb *io_iocb; | ||
41 | int io_result; | ||
40 | } xfs_ioend_t; | 42 | } xfs_ioend_t; |
41 | 43 | ||
42 | extern const struct address_space_operations xfs_address_space_operations; | 44 | extern const struct address_space_operations xfs_address_space_operations; |
@@ -45,6 +47,6 @@ extern int xfs_get_blocks(struct inode *, sector_t, struct buffer_head *, int); | |||
45 | extern void xfs_ioend_init(void); | 47 | extern void xfs_ioend_init(void); |
46 | extern void xfs_ioend_wait(struct xfs_inode *); | 48 | extern void xfs_ioend_wait(struct xfs_inode *); |
47 | 49 | ||
48 | extern void xfs_count_page_state(struct page *, int *, int *, int *); | 50 | extern void xfs_count_page_state(struct page *, int *, int *); |
49 | 51 | ||
50 | #endif /* __XFS_AOPS_H__ */ | 52 | #endif /* __XFS_AOPS_H__ */ |
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 649ade8ef598..ea79072f5210 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c | |||
@@ -39,13 +39,12 @@ | |||
39 | #include "xfs_inum.h" | 39 | #include "xfs_inum.h" |
40 | #include "xfs_log.h" | 40 | #include "xfs_log.h" |
41 | #include "xfs_ag.h" | 41 | #include "xfs_ag.h" |
42 | #include "xfs_dmapi.h" | ||
43 | #include "xfs_mount.h" | 42 | #include "xfs_mount.h" |
44 | #include "xfs_trace.h" | 43 | #include "xfs_trace.h" |
45 | 44 | ||
46 | static kmem_zone_t *xfs_buf_zone; | 45 | static kmem_zone_t *xfs_buf_zone; |
47 | STATIC int xfsbufd(void *); | 46 | STATIC int xfsbufd(void *); |
48 | STATIC int xfsbufd_wakeup(int, gfp_t); | 47 | STATIC int xfsbufd_wakeup(struct shrinker *, int, gfp_t); |
49 | STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int); | 48 | STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int); |
50 | static struct shrinker xfs_buf_shake = { | 49 | static struct shrinker xfs_buf_shake = { |
51 | .shrink = xfsbufd_wakeup, | 50 | .shrink = xfsbufd_wakeup, |
@@ -340,7 +339,7 @@ _xfs_buf_lookup_pages( | |||
340 | __func__, gfp_mask); | 339 | __func__, gfp_mask); |
341 | 340 | ||
342 | XFS_STATS_INC(xb_page_retries); | 341 | XFS_STATS_INC(xb_page_retries); |
343 | xfsbufd_wakeup(0, gfp_mask); | 342 | xfsbufd_wakeup(NULL, 0, gfp_mask); |
344 | congestion_wait(BLK_RW_ASYNC, HZ/50); | 343 | congestion_wait(BLK_RW_ASYNC, HZ/50); |
345 | goto retry; | 344 | goto retry; |
346 | } | 345 | } |
@@ -579,9 +578,9 @@ _xfs_buf_read( | |||
579 | XBF_READ_AHEAD | _XBF_RUN_QUEUES); | 578 | XBF_READ_AHEAD | _XBF_RUN_QUEUES); |
580 | 579 | ||
581 | status = xfs_buf_iorequest(bp); | 580 | status = xfs_buf_iorequest(bp); |
582 | if (!status && !(flags & XBF_ASYNC)) | 581 | if (status || XFS_BUF_ISERROR(bp) || (flags & XBF_ASYNC)) |
583 | status = xfs_buf_iowait(bp); | 582 | return status; |
584 | return status; | 583 | return xfs_buf_iowait(bp); |
585 | } | 584 | } |
586 | 585 | ||
587 | xfs_buf_t * | 586 | xfs_buf_t * |
@@ -897,36 +896,6 @@ xfs_buf_unlock( | |||
897 | trace_xfs_buf_unlock(bp, _RET_IP_); | 896 | trace_xfs_buf_unlock(bp, _RET_IP_); |
898 | } | 897 | } |
899 | 898 | ||
900 | |||
901 | /* | ||
902 | * Pinning Buffer Storage in Memory | ||
903 | * Ensure that no attempt to force a buffer to disk will succeed. | ||
904 | */ | ||
905 | void | ||
906 | xfs_buf_pin( | ||
907 | xfs_buf_t *bp) | ||
908 | { | ||
909 | trace_xfs_buf_pin(bp, _RET_IP_); | ||
910 | atomic_inc(&bp->b_pin_count); | ||
911 | } | ||
912 | |||
913 | void | ||
914 | xfs_buf_unpin( | ||
915 | xfs_buf_t *bp) | ||
916 | { | ||
917 | trace_xfs_buf_unpin(bp, _RET_IP_); | ||
918 | |||
919 | if (atomic_dec_and_test(&bp->b_pin_count)) | ||
920 | wake_up_all(&bp->b_waiters); | ||
921 | } | ||
922 | |||
923 | int | ||
924 | xfs_buf_ispin( | ||
925 | xfs_buf_t *bp) | ||
926 | { | ||
927 | return atomic_read(&bp->b_pin_count); | ||
928 | } | ||
929 | |||
930 | STATIC void | 899 | STATIC void |
931 | xfs_buf_wait_unpin( | 900 | xfs_buf_wait_unpin( |
932 | xfs_buf_t *bp) | 901 | xfs_buf_t *bp) |
@@ -1018,13 +987,12 @@ xfs_bwrite( | |||
1018 | { | 987 | { |
1019 | int error; | 988 | int error; |
1020 | 989 | ||
1021 | bp->b_strat = xfs_bdstrat_cb; | ||
1022 | bp->b_mount = mp; | 990 | bp->b_mount = mp; |
1023 | bp->b_flags |= XBF_WRITE; | 991 | bp->b_flags |= XBF_WRITE; |
1024 | bp->b_flags &= ~(XBF_ASYNC | XBF_READ); | 992 | bp->b_flags &= ~(XBF_ASYNC | XBF_READ); |
1025 | 993 | ||
1026 | xfs_buf_delwri_dequeue(bp); | 994 | xfs_buf_delwri_dequeue(bp); |
1027 | xfs_buf_iostrategy(bp); | 995 | xfs_bdstrat_cb(bp); |
1028 | 996 | ||
1029 | error = xfs_buf_iowait(bp); | 997 | error = xfs_buf_iowait(bp); |
1030 | if (error) | 998 | if (error) |
@@ -1040,7 +1008,6 @@ xfs_bdwrite( | |||
1040 | { | 1008 | { |
1041 | trace_xfs_buf_bdwrite(bp, _RET_IP_); | 1009 | trace_xfs_buf_bdwrite(bp, _RET_IP_); |
1042 | 1010 | ||
1043 | bp->b_strat = xfs_bdstrat_cb; | ||
1044 | bp->b_mount = mp; | 1011 | bp->b_mount = mp; |
1045 | 1012 | ||
1046 | bp->b_flags &= ~XBF_READ; | 1013 | bp->b_flags &= ~XBF_READ; |
@@ -1075,7 +1042,6 @@ xfs_bioerror( | |||
1075 | XFS_BUF_UNDONE(bp); | 1042 | XFS_BUF_UNDONE(bp); |
1076 | XFS_BUF_STALE(bp); | 1043 | XFS_BUF_STALE(bp); |
1077 | 1044 | ||
1078 | XFS_BUF_CLR_BDSTRAT_FUNC(bp); | ||
1079 | xfs_biodone(bp); | 1045 | xfs_biodone(bp); |
1080 | 1046 | ||
1081 | return EIO; | 1047 | return EIO; |
@@ -1105,7 +1071,6 @@ xfs_bioerror_relse( | |||
1105 | XFS_BUF_DONE(bp); | 1071 | XFS_BUF_DONE(bp); |
1106 | XFS_BUF_STALE(bp); | 1072 | XFS_BUF_STALE(bp); |
1107 | XFS_BUF_CLR_IODONE_FUNC(bp); | 1073 | XFS_BUF_CLR_IODONE_FUNC(bp); |
1108 | XFS_BUF_CLR_BDSTRAT_FUNC(bp); | ||
1109 | if (!(fl & XBF_ASYNC)) { | 1074 | if (!(fl & XBF_ASYNC)) { |
1110 | /* | 1075 | /* |
1111 | * Mark b_error and B_ERROR _both_. | 1076 | * Mark b_error and B_ERROR _both_. |
@@ -1311,8 +1276,19 @@ submit_io: | |||
1311 | if (size) | 1276 | if (size) |
1312 | goto next_chunk; | 1277 | goto next_chunk; |
1313 | } else { | 1278 | } else { |
1314 | bio_put(bio); | 1279 | /* |
1280 | * if we get here, no pages were added to the bio. However, | ||
1281 | * we can't just error out here - if the pages are locked then | ||
1282 | * we have to unlock them otherwise we can hang on a later | ||
1283 | * access to the page. | ||
1284 | */ | ||
1315 | xfs_buf_ioerror(bp, EIO); | 1285 | xfs_buf_ioerror(bp, EIO); |
1286 | if (bp->b_flags & _XBF_PAGE_LOCKED) { | ||
1287 | int i; | ||
1288 | for (i = 0; i < bp->b_page_count; i++) | ||
1289 | unlock_page(bp->b_pages[i]); | ||
1290 | } | ||
1291 | bio_put(bio); | ||
1316 | } | 1292 | } |
1317 | } | 1293 | } |
1318 | 1294 | ||
@@ -1762,6 +1738,7 @@ xfs_buf_runall_queues( | |||
1762 | 1738 | ||
1763 | STATIC int | 1739 | STATIC int |
1764 | xfsbufd_wakeup( | 1740 | xfsbufd_wakeup( |
1741 | struct shrinker *shrink, | ||
1765 | int priority, | 1742 | int priority, |
1766 | gfp_t mask) | 1743 | gfp_t mask) |
1767 | { | 1744 | { |
@@ -1803,7 +1780,7 @@ xfs_buf_delwri_split( | |||
1803 | trace_xfs_buf_delwri_split(bp, _RET_IP_); | 1780 | trace_xfs_buf_delwri_split(bp, _RET_IP_); |
1804 | ASSERT(bp->b_flags & XBF_DELWRI); | 1781 | ASSERT(bp->b_flags & XBF_DELWRI); |
1805 | 1782 | ||
1806 | if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) { | 1783 | if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) { |
1807 | if (!force && | 1784 | if (!force && |
1808 | time_before(jiffies, bp->b_queuetime + age)) { | 1785 | time_before(jiffies, bp->b_queuetime + age)) { |
1809 | xfs_buf_unlock(bp); | 1786 | xfs_buf_unlock(bp); |
@@ -1888,7 +1865,7 @@ xfsbufd( | |||
1888 | struct xfs_buf *bp; | 1865 | struct xfs_buf *bp; |
1889 | bp = list_first_entry(&tmp, struct xfs_buf, b_list); | 1866 | bp = list_first_entry(&tmp, struct xfs_buf, b_list); |
1890 | list_del_init(&bp->b_list); | 1867 | list_del_init(&bp->b_list); |
1891 | xfs_buf_iostrategy(bp); | 1868 | xfs_bdstrat_cb(bp); |
1892 | count++; | 1869 | count++; |
1893 | } | 1870 | } |
1894 | if (count) | 1871 | if (count) |
@@ -1935,7 +1912,7 @@ xfs_flush_buftarg( | |||
1935 | bp->b_flags &= ~XBF_ASYNC; | 1912 | bp->b_flags &= ~XBF_ASYNC; |
1936 | list_add(&bp->b_list, &wait_list); | 1913 | list_add(&bp->b_list, &wait_list); |
1937 | } | 1914 | } |
1938 | xfs_buf_iostrategy(bp); | 1915 | xfs_bdstrat_cb(bp); |
1939 | } | 1916 | } |
1940 | 1917 | ||
1941 | if (wait) { | 1918 | if (wait) { |
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index 5fbecefa5dfd..d072e5ff923b 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h | |||
@@ -44,57 +44,57 @@ typedef enum { | |||
44 | XBRW_ZERO = 3, /* Zero target memory */ | 44 | XBRW_ZERO = 3, /* Zero target memory */ |
45 | } xfs_buf_rw_t; | 45 | } xfs_buf_rw_t; |
46 | 46 | ||
47 | typedef enum { | 47 | #define XBF_READ (1 << 0) /* buffer intended for reading from device */ |
48 | XBF_READ = (1 << 0), /* buffer intended for reading from device */ | 48 | #define XBF_WRITE (1 << 1) /* buffer intended for writing to device */ |
49 | XBF_WRITE = (1 << 1), /* buffer intended for writing to device */ | 49 | #define XBF_MAPPED (1 << 2) /* buffer mapped (b_addr valid) */ |
50 | XBF_MAPPED = (1 << 2), /* buffer mapped (b_addr valid) */ | 50 | #define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */ |
51 | XBF_ASYNC = (1 << 4), /* initiator will not wait for completion */ | 51 | #define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ |
52 | XBF_DONE = (1 << 5), /* all pages in the buffer uptodate */ | 52 | #define XBF_DELWRI (1 << 6) /* buffer has dirty pages */ |
53 | XBF_DELWRI = (1 << 6), /* buffer has dirty pages */ | 53 | #define XBF_STALE (1 << 7) /* buffer has been staled, do not find it */ |
54 | XBF_STALE = (1 << 7), /* buffer has been staled, do not find it */ | 54 | #define XBF_FS_MANAGED (1 << 8) /* filesystem controls freeing memory */ |
55 | XBF_FS_MANAGED = (1 << 8), /* filesystem controls freeing memory */ | 55 | #define XBF_ORDERED (1 << 11)/* use ordered writes */ |
56 | XBF_ORDERED = (1 << 11), /* use ordered writes */ | 56 | #define XBF_READ_AHEAD (1 << 12)/* asynchronous read-ahead */ |
57 | XBF_READ_AHEAD = (1 << 12), /* asynchronous read-ahead */ | 57 | #define XBF_LOG_BUFFER (1 << 13)/* this is a buffer used for the log */ |
58 | XBF_LOG_BUFFER = (1 << 13), /* this is a buffer used for the log */ | 58 | |
59 | 59 | /* flags used only as arguments to access routines */ | |
60 | /* flags used only as arguments to access routines */ | 60 | #define XBF_LOCK (1 << 14)/* lock requested */ |
61 | XBF_LOCK = (1 << 14), /* lock requested */ | 61 | #define XBF_TRYLOCK (1 << 15)/* lock requested, but do not wait */ |
62 | XBF_TRYLOCK = (1 << 15), /* lock requested, but do not wait */ | 62 | #define XBF_DONT_BLOCK (1 << 16)/* do not block in current thread */ |
63 | XBF_DONT_BLOCK = (1 << 16), /* do not block in current thread */ | 63 | |
64 | 64 | /* flags used only internally */ | |
65 | /* flags used only internally */ | 65 | #define _XBF_PAGE_CACHE (1 << 17)/* backed by pagecache */ |
66 | _XBF_PAGE_CACHE = (1 << 17),/* backed by pagecache */ | 66 | #define _XBF_PAGES (1 << 18)/* backed by refcounted pages */ |
67 | _XBF_PAGES = (1 << 18), /* backed by refcounted pages */ | 67 | #define _XBF_RUN_QUEUES (1 << 19)/* run block device task queue */ |
68 | _XBF_RUN_QUEUES = (1 << 19),/* run block device task queue */ | 68 | #define _XBF_DELWRI_Q (1 << 21)/* buffer on delwri queue */ |
69 | _XBF_DELWRI_Q = (1 << 21), /* buffer on delwri queue */ | 69 | |
70 | 70 | /* | |
71 | /* | 71 | * Special flag for supporting metadata blocks smaller than a FSB. |
72 | * Special flag for supporting metadata blocks smaller than a FSB. | 72 | * |
73 | * | 73 | * In this case we can have multiple xfs_buf_t on a single page and |
74 | * In this case we can have multiple xfs_buf_t on a single page and | 74 | * need to lock out concurrent xfs_buf_t readers as they only |
75 | * need to lock out concurrent xfs_buf_t readers as they only | 75 | * serialise access to the buffer. |
76 | * serialise access to the buffer. | 76 | * |
77 | * | 77 | * If the FSB size >= PAGE_CACHE_SIZE case, we have no serialisation |
78 | * If the FSB size >= PAGE_CACHE_SIZE case, we have no serialisation | 78 | * between reads of the page. Hence we can have one thread read the |
79 | * between reads of the page. Hence we can have one thread read the | 79 | * page and modify it, but then race with another thread that thinks |
80 | * page and modify it, but then race with another thread that thinks | 80 | * the page is not up-to-date and hence reads it again. |
81 | * the page is not up-to-date and hence reads it again. | 81 | * |
82 | * | 82 | * The result is that the first modifcation to the page is lost. |
83 | * The result is that the first modifcation to the page is lost. | 83 | * This sort of AGF/AGI reading race can happen when unlinking inodes |
84 | * This sort of AGF/AGI reading race can happen when unlinking inodes | 84 | * that require truncation and results in the AGI unlinked list |
85 | * that require truncation and results in the AGI unlinked list | 85 | * modifications being lost. |
86 | * modifications being lost. | 86 | */ |
87 | */ | 87 | #define _XBF_PAGE_LOCKED (1 << 22) |
88 | _XBF_PAGE_LOCKED = (1 << 22), | 88 | |
89 | 89 | /* | |
90 | /* | 90 | * If we try a barrier write, but it fails we have to communicate |
91 | * If we try a barrier write, but it fails we have to communicate | 91 | * this to the upper layers. Unfortunately b_error gets overwritten |
92 | * this to the upper layers. Unfortunately b_error gets overwritten | 92 | * when the buffer is re-issued so we have to add another flag to |
93 | * when the buffer is re-issued so we have to add another flag to | 93 | * keep this information. |
94 | * keep this information. | 94 | */ |
95 | */ | 95 | #define _XFS_BARRIER_FAILED (1 << 23) |
96 | _XFS_BARRIER_FAILED = (1 << 23), | 96 | |
97 | } xfs_buf_flags_t; | 97 | typedef unsigned int xfs_buf_flags_t; |
98 | 98 | ||
99 | #define XFS_BUF_FLAGS \ | 99 | #define XFS_BUF_FLAGS \ |
100 | { XBF_READ, "READ" }, \ | 100 | { XBF_READ, "READ" }, \ |
@@ -187,7 +187,6 @@ typedef struct xfs_buf { | |||
187 | atomic_t b_io_remaining; /* #outstanding I/O requests */ | 187 | atomic_t b_io_remaining; /* #outstanding I/O requests */ |
188 | xfs_buf_iodone_t b_iodone; /* I/O completion function */ | 188 | xfs_buf_iodone_t b_iodone; /* I/O completion function */ |
189 | xfs_buf_relse_t b_relse; /* releasing function */ | 189 | xfs_buf_relse_t b_relse; /* releasing function */ |
190 | xfs_buf_bdstrat_t b_strat; /* pre-write function */ | ||
191 | struct completion b_iowait; /* queue for I/O waiters */ | 190 | struct completion b_iowait; /* queue for I/O waiters */ |
192 | void *b_fspriv; | 191 | void *b_fspriv; |
193 | void *b_fspriv2; | 192 | void *b_fspriv2; |
@@ -245,11 +244,6 @@ extern int xfs_buf_iowait(xfs_buf_t *); | |||
245 | extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *, | 244 | extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *, |
246 | xfs_buf_rw_t); | 245 | xfs_buf_rw_t); |
247 | 246 | ||
248 | static inline int xfs_buf_iostrategy(xfs_buf_t *bp) | ||
249 | { | ||
250 | return bp->b_strat ? bp->b_strat(bp) : xfs_buf_iorequest(bp); | ||
251 | } | ||
252 | |||
253 | static inline int xfs_buf_geterror(xfs_buf_t *bp) | 247 | static inline int xfs_buf_geterror(xfs_buf_t *bp) |
254 | { | 248 | { |
255 | return bp ? bp->b_error : ENOMEM; | 249 | return bp ? bp->b_error : ENOMEM; |
@@ -258,11 +252,6 @@ static inline int xfs_buf_geterror(xfs_buf_t *bp) | |||
258 | /* Buffer Utility Routines */ | 252 | /* Buffer Utility Routines */ |
259 | extern xfs_caddr_t xfs_buf_offset(xfs_buf_t *, size_t); | 253 | extern xfs_caddr_t xfs_buf_offset(xfs_buf_t *, size_t); |
260 | 254 | ||
261 | /* Pinning Buffer Storage in Memory */ | ||
262 | extern void xfs_buf_pin(xfs_buf_t *); | ||
263 | extern void xfs_buf_unpin(xfs_buf_t *); | ||
264 | extern int xfs_buf_ispin(xfs_buf_t *); | ||
265 | |||
266 | /* Delayed Write Buffer Routines */ | 255 | /* Delayed Write Buffer Routines */ |
267 | extern void xfs_buf_delwri_dequeue(xfs_buf_t *); | 256 | extern void xfs_buf_delwri_dequeue(xfs_buf_t *); |
268 | extern void xfs_buf_delwri_promote(xfs_buf_t *); | 257 | extern void xfs_buf_delwri_promote(xfs_buf_t *); |
@@ -326,8 +315,6 @@ extern void xfs_buf_terminate(void); | |||
326 | #define XFS_BUF_IODONE_FUNC(bp) ((bp)->b_iodone) | 315 | #define XFS_BUF_IODONE_FUNC(bp) ((bp)->b_iodone) |
327 | #define XFS_BUF_SET_IODONE_FUNC(bp, func) ((bp)->b_iodone = (func)) | 316 | #define XFS_BUF_SET_IODONE_FUNC(bp, func) ((bp)->b_iodone = (func)) |
328 | #define XFS_BUF_CLR_IODONE_FUNC(bp) ((bp)->b_iodone = NULL) | 317 | #define XFS_BUF_CLR_IODONE_FUNC(bp) ((bp)->b_iodone = NULL) |
329 | #define XFS_BUF_SET_BDSTRAT_FUNC(bp, func) ((bp)->b_strat = (func)) | ||
330 | #define XFS_BUF_CLR_BDSTRAT_FUNC(bp) ((bp)->b_strat = NULL) | ||
331 | 318 | ||
332 | #define XFS_BUF_FSPRIVATE(bp, type) ((type)(bp)->b_fspriv) | 319 | #define XFS_BUF_FSPRIVATE(bp, type) ((type)(bp)->b_fspriv) |
333 | #define XFS_BUF_SET_FSPRIVATE(bp, val) ((bp)->b_fspriv = (void*)(val)) | 320 | #define XFS_BUF_SET_FSPRIVATE(bp, val) ((bp)->b_fspriv = (void*)(val)) |
@@ -351,7 +338,7 @@ extern void xfs_buf_terminate(void); | |||
351 | #define XFS_BUF_SET_VTYPE(bp, type) do { } while (0) | 338 | #define XFS_BUF_SET_VTYPE(bp, type) do { } while (0) |
352 | #define XFS_BUF_SET_REF(bp, ref) do { } while (0) | 339 | #define XFS_BUF_SET_REF(bp, ref) do { } while (0) |
353 | 340 | ||
354 | #define XFS_BUF_ISPINNED(bp) xfs_buf_ispin(bp) | 341 | #define XFS_BUF_ISPINNED(bp) atomic_read(&((bp)->b_pin_count)) |
355 | 342 | ||
356 | #define XFS_BUF_VALUSEMA(bp) xfs_buf_lock_value(bp) | 343 | #define XFS_BUF_VALUSEMA(bp) xfs_buf_lock_value(bp) |
357 | #define XFS_BUF_CPSEMA(bp) (xfs_buf_cond_lock(bp) == 0) | 344 | #define XFS_BUF_CPSEMA(bp) (xfs_buf_cond_lock(bp) == 0) |
@@ -370,8 +357,6 @@ static inline void xfs_buf_relse(xfs_buf_t *bp) | |||
370 | xfs_buf_rele(bp); | 357 | xfs_buf_rele(bp); |
371 | } | 358 | } |
372 | 359 | ||
373 | #define xfs_bpin(bp) xfs_buf_pin(bp) | ||
374 | #define xfs_bunpin(bp) xfs_buf_unpin(bp) | ||
375 | #define xfs_biodone(bp) xfs_buf_ioend(bp, 0) | 360 | #define xfs_biodone(bp) xfs_buf_ioend(bp, 0) |
376 | 361 | ||
377 | #define xfs_biomove(bp, off, len, data, rw) \ | 362 | #define xfs_biomove(bp, off, len, data, rw) \ |
diff --git a/fs/xfs/linux-2.6/xfs_dmapi_priv.h b/fs/xfs/linux-2.6/xfs_dmapi_priv.h deleted file mode 100644 index a8b0b1685eed..000000000000 --- a/fs/xfs/linux-2.6/xfs_dmapi_priv.h +++ /dev/null | |||
@@ -1,28 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2000-2006 Silicon Graphics, Inc. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it would be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write the Free Software Foundation, | ||
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
17 | */ | ||
18 | #ifndef __XFS_DMAPI_PRIV_H__ | ||
19 | #define __XFS_DMAPI_PRIV_H__ | ||
20 | |||
21 | /* | ||
22 | * Based on IO_ISDIRECT, decide which i_ flag is set. | ||
23 | */ | ||
24 | #define DM_SEM_FLAG_RD(ioflags) (((ioflags) & IO_ISDIRECT) ? \ | ||
25 | DM_FLAGS_IMUX : 0) | ||
26 | #define DM_SEM_FLAG_WR (DM_FLAGS_IALLOCSEM_WR | DM_FLAGS_IMUX) | ||
27 | |||
28 | #endif /*__XFS_DMAPI_PRIV_H__*/ | ||
diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c index 846b75aeb2ab..3764d74790ec 100644 --- a/fs/xfs/linux-2.6/xfs_export.c +++ b/fs/xfs/linux-2.6/xfs_export.c | |||
@@ -23,13 +23,13 @@ | |||
23 | #include "xfs_sb.h" | 23 | #include "xfs_sb.h" |
24 | #include "xfs_ag.h" | 24 | #include "xfs_ag.h" |
25 | #include "xfs_dir2.h" | 25 | #include "xfs_dir2.h" |
26 | #include "xfs_dmapi.h" | ||
27 | #include "xfs_mount.h" | 26 | #include "xfs_mount.h" |
28 | #include "xfs_export.h" | 27 | #include "xfs_export.h" |
29 | #include "xfs_vnodeops.h" | 28 | #include "xfs_vnodeops.h" |
30 | #include "xfs_bmap_btree.h" | 29 | #include "xfs_bmap_btree.h" |
31 | #include "xfs_inode.h" | 30 | #include "xfs_inode.h" |
32 | #include "xfs_inode_item.h" | 31 | #include "xfs_inode_item.h" |
32 | #include "xfs_trace.h" | ||
33 | 33 | ||
34 | /* | 34 | /* |
35 | * Note that we only accept fileids which are long enough rather than allow | 35 | * Note that we only accept fileids which are long enough rather than allow |
@@ -128,13 +128,11 @@ xfs_nfs_get_inode( | |||
128 | return ERR_PTR(-ESTALE); | 128 | return ERR_PTR(-ESTALE); |
129 | 129 | ||
130 | /* | 130 | /* |
131 | * The XFS_IGET_BULKSTAT means that an invalid inode number is just | 131 | * The XFS_IGET_UNTRUSTED means that an invalid inode number is just |
132 | * fine and not an indication of a corrupted filesystem. Because | 132 | * fine and not an indication of a corrupted filesystem as clients can |
133 | * clients can send any kind of invalid file handle, e.g. after | 133 | * send invalid file handles and we have to handle it gracefully.. |
134 | * a restore on the server we have to deal with this case gracefully. | ||
135 | */ | 134 | */ |
136 | error = xfs_iget(mp, NULL, ino, XFS_IGET_BULKSTAT, | 135 | error = xfs_iget(mp, NULL, ino, XFS_IGET_UNTRUSTED, 0, &ip); |
137 | XFS_ILOCK_SHARED, &ip, 0); | ||
138 | if (error) { | 136 | if (error) { |
139 | /* | 137 | /* |
140 | * EINVAL means the inode cluster doesn't exist anymore. | 138 | * EINVAL means the inode cluster doesn't exist anymore. |
@@ -149,11 +147,10 @@ xfs_nfs_get_inode( | |||
149 | } | 147 | } |
150 | 148 | ||
151 | if (ip->i_d.di_gen != generation) { | 149 | if (ip->i_d.di_gen != generation) { |
152 | xfs_iput_new(ip, XFS_ILOCK_SHARED); | 150 | IRELE(ip); |
153 | return ERR_PTR(-ENOENT); | 151 | return ERR_PTR(-ENOENT); |
154 | } | 152 | } |
155 | 153 | ||
156 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | ||
157 | return VFS_I(ip); | 154 | return VFS_I(ip); |
158 | } | 155 | } |
159 | 156 | ||
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c index 257a56b127cf..ba8ad422a165 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/linux-2.6/xfs_file.c | |||
@@ -22,23 +22,15 @@ | |||
22 | #include "xfs_inum.h" | 22 | #include "xfs_inum.h" |
23 | #include "xfs_sb.h" | 23 | #include "xfs_sb.h" |
24 | #include "xfs_ag.h" | 24 | #include "xfs_ag.h" |
25 | #include "xfs_dir2.h" | ||
26 | #include "xfs_trans.h" | 25 | #include "xfs_trans.h" |
27 | #include "xfs_dmapi.h" | ||
28 | #include "xfs_mount.h" | 26 | #include "xfs_mount.h" |
29 | #include "xfs_bmap_btree.h" | 27 | #include "xfs_bmap_btree.h" |
30 | #include "xfs_alloc_btree.h" | ||
31 | #include "xfs_ialloc_btree.h" | ||
32 | #include "xfs_alloc.h" | 28 | #include "xfs_alloc.h" |
33 | #include "xfs_btree.h" | ||
34 | #include "xfs_attr_sf.h" | ||
35 | #include "xfs_dir2_sf.h" | ||
36 | #include "xfs_dinode.h" | 29 | #include "xfs_dinode.h" |
37 | #include "xfs_inode.h" | 30 | #include "xfs_inode.h" |
38 | #include "xfs_inode_item.h" | 31 | #include "xfs_inode_item.h" |
39 | #include "xfs_bmap.h" | 32 | #include "xfs_bmap.h" |
40 | #include "xfs_error.h" | 33 | #include "xfs_error.h" |
41 | #include "xfs_rw.h" | ||
42 | #include "xfs_vnodeops.h" | 34 | #include "xfs_vnodeops.h" |
43 | #include "xfs_da_btree.h" | 35 | #include "xfs_da_btree.h" |
44 | #include "xfs_ioctl.h" | 36 | #include "xfs_ioctl.h" |
@@ -108,7 +100,7 @@ xfs_file_fsync( | |||
108 | int error = 0; | 100 | int error = 0; |
109 | int log_flushed = 0; | 101 | int log_flushed = 0; |
110 | 102 | ||
111 | xfs_itrace_entry(ip); | 103 | trace_xfs_file_fsync(ip); |
112 | 104 | ||
113 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | 105 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) |
114 | return -XFS_ERROR(EIO); | 106 | return -XFS_ERROR(EIO); |
@@ -166,8 +158,7 @@ xfs_file_fsync( | |||
166 | * transaction. So we play it safe and fire off the | 158 | * transaction. So we play it safe and fire off the |
167 | * transaction anyway. | 159 | * transaction anyway. |
168 | */ | 160 | */ |
169 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | 161 | xfs_trans_ijoin(tp, ip); |
170 | xfs_trans_ihold(tp, ip); | ||
171 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 162 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
172 | xfs_trans_set_sync(tp); | 163 | xfs_trans_set_sync(tp); |
173 | error = _xfs_trans_commit(tp, 0, &log_flushed); | 164 | error = _xfs_trans_commit(tp, 0, &log_flushed); |
@@ -275,20 +266,6 @@ xfs_file_aio_read( | |||
275 | mutex_lock(&inode->i_mutex); | 266 | mutex_lock(&inode->i_mutex); |
276 | xfs_ilock(ip, XFS_IOLOCK_SHARED); | 267 | xfs_ilock(ip, XFS_IOLOCK_SHARED); |
277 | 268 | ||
278 | if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) { | ||
279 | int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags); | ||
280 | int iolock = XFS_IOLOCK_SHARED; | ||
281 | |||
282 | ret = -XFS_SEND_DATA(mp, DM_EVENT_READ, ip, iocb->ki_pos, size, | ||
283 | dmflags, &iolock); | ||
284 | if (ret) { | ||
285 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | ||
286 | if (unlikely(ioflags & IO_ISDIRECT)) | ||
287 | mutex_unlock(&inode->i_mutex); | ||
288 | return ret; | ||
289 | } | ||
290 | } | ||
291 | |||
292 | if (unlikely(ioflags & IO_ISDIRECT)) { | 269 | if (unlikely(ioflags & IO_ISDIRECT)) { |
293 | if (inode->i_mapping->nrpages) { | 270 | if (inode->i_mapping->nrpages) { |
294 | ret = -xfs_flushinval_pages(ip, | 271 | ret = -xfs_flushinval_pages(ip, |
@@ -321,7 +298,6 @@ xfs_file_splice_read( | |||
321 | unsigned int flags) | 298 | unsigned int flags) |
322 | { | 299 | { |
323 | struct xfs_inode *ip = XFS_I(infilp->f_mapping->host); | 300 | struct xfs_inode *ip = XFS_I(infilp->f_mapping->host); |
324 | struct xfs_mount *mp = ip->i_mount; | ||
325 | int ioflags = 0; | 301 | int ioflags = 0; |
326 | ssize_t ret; | 302 | ssize_t ret; |
327 | 303 | ||
@@ -335,18 +311,6 @@ xfs_file_splice_read( | |||
335 | 311 | ||
336 | xfs_ilock(ip, XFS_IOLOCK_SHARED); | 312 | xfs_ilock(ip, XFS_IOLOCK_SHARED); |
337 | 313 | ||
338 | if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) { | ||
339 | int iolock = XFS_IOLOCK_SHARED; | ||
340 | int error; | ||
341 | |||
342 | error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip, *ppos, count, | ||
343 | FILP_DELAY_FLAG(infilp), &iolock); | ||
344 | if (error) { | ||
345 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | ||
346 | return -error; | ||
347 | } | ||
348 | } | ||
349 | |||
350 | trace_xfs_file_splice_read(ip, count, *ppos, ioflags); | 314 | trace_xfs_file_splice_read(ip, count, *ppos, ioflags); |
351 | 315 | ||
352 | ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); | 316 | ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); |
@@ -367,7 +331,6 @@ xfs_file_splice_write( | |||
367 | { | 331 | { |
368 | struct inode *inode = outfilp->f_mapping->host; | 332 | struct inode *inode = outfilp->f_mapping->host; |
369 | struct xfs_inode *ip = XFS_I(inode); | 333 | struct xfs_inode *ip = XFS_I(inode); |
370 | struct xfs_mount *mp = ip->i_mount; | ||
371 | xfs_fsize_t isize, new_size; | 334 | xfs_fsize_t isize, new_size; |
372 | int ioflags = 0; | 335 | int ioflags = 0; |
373 | ssize_t ret; | 336 | ssize_t ret; |
@@ -382,18 +345,6 @@ xfs_file_splice_write( | |||
382 | 345 | ||
383 | xfs_ilock(ip, XFS_IOLOCK_EXCL); | 346 | xfs_ilock(ip, XFS_IOLOCK_EXCL); |
384 | 347 | ||
385 | if (DM_EVENT_ENABLED(ip, DM_EVENT_WRITE) && !(ioflags & IO_INVIS)) { | ||
386 | int iolock = XFS_IOLOCK_EXCL; | ||
387 | int error; | ||
388 | |||
389 | error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip, *ppos, count, | ||
390 | FILP_DELAY_FLAG(outfilp), &iolock); | ||
391 | if (error) { | ||
392 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | ||
393 | return -error; | ||
394 | } | ||
395 | } | ||
396 | |||
397 | new_size = *ppos + count; | 348 | new_size = *ppos + count; |
398 | 349 | ||
399 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 350 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
@@ -463,7 +414,7 @@ xfs_zero_last_block( | |||
463 | last_fsb = XFS_B_TO_FSBT(mp, isize); | 414 | last_fsb = XFS_B_TO_FSBT(mp, isize); |
464 | nimaps = 1; | 415 | nimaps = 1; |
465 | error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap, | 416 | error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap, |
466 | &nimaps, NULL, NULL); | 417 | &nimaps, NULL); |
467 | if (error) { | 418 | if (error) { |
468 | return error; | 419 | return error; |
469 | } | 420 | } |
@@ -558,7 +509,7 @@ xfs_zero_eof( | |||
558 | nimaps = 1; | 509 | nimaps = 1; |
559 | zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; | 510 | zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; |
560 | error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb, | 511 | error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb, |
561 | 0, NULL, 0, &imap, &nimaps, NULL, NULL); | 512 | 0, NULL, 0, &imap, &nimaps, NULL); |
562 | if (error) { | 513 | if (error) { |
563 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); | 514 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); |
564 | return error; | 515 | return error; |
@@ -627,7 +578,6 @@ xfs_file_aio_write( | |||
627 | int ioflags = 0; | 578 | int ioflags = 0; |
628 | xfs_fsize_t isize, new_size; | 579 | xfs_fsize_t isize, new_size; |
629 | int iolock; | 580 | int iolock; |
630 | int eventsent = 0; | ||
631 | size_t ocount = 0, count; | 581 | size_t ocount = 0, count; |
632 | int need_i_mutex; | 582 | int need_i_mutex; |
633 | 583 | ||
@@ -673,33 +623,6 @@ start: | |||
673 | goto out_unlock_mutex; | 623 | goto out_unlock_mutex; |
674 | } | 624 | } |
675 | 625 | ||
676 | if ((DM_EVENT_ENABLED(ip, DM_EVENT_WRITE) && | ||
677 | !(ioflags & IO_INVIS) && !eventsent)) { | ||
678 | int dmflags = FILP_DELAY_FLAG(file); | ||
679 | |||
680 | if (need_i_mutex) | ||
681 | dmflags |= DM_FLAGS_IMUX; | ||
682 | |||
683 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | ||
684 | error = XFS_SEND_DATA(ip->i_mount, DM_EVENT_WRITE, ip, | ||
685 | pos, count, dmflags, &iolock); | ||
686 | if (error) { | ||
687 | goto out_unlock_internal; | ||
688 | } | ||
689 | xfs_ilock(ip, XFS_ILOCK_EXCL); | ||
690 | eventsent = 1; | ||
691 | |||
692 | /* | ||
693 | * The iolock was dropped and reacquired in XFS_SEND_DATA | ||
694 | * so we have to recheck the size when appending. | ||
695 | * We will only "goto start;" once, since having sent the | ||
696 | * event prevents another call to XFS_SEND_DATA, which is | ||
697 | * what allows the size to change in the first place. | ||
698 | */ | ||
699 | if ((file->f_flags & O_APPEND) && pos != ip->i_size) | ||
700 | goto start; | ||
701 | } | ||
702 | |||
703 | if (ioflags & IO_ISDIRECT) { | 626 | if (ioflags & IO_ISDIRECT) { |
704 | xfs_buftarg_t *target = | 627 | xfs_buftarg_t *target = |
705 | XFS_IS_REALTIME_INODE(ip) ? | 628 | XFS_IS_REALTIME_INODE(ip) ? |
@@ -830,22 +753,6 @@ write_retry: | |||
830 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 753 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
831 | } | 754 | } |
832 | 755 | ||
833 | if (ret == -ENOSPC && | ||
834 | DM_EVENT_ENABLED(ip, DM_EVENT_NOSPACE) && !(ioflags & IO_INVIS)) { | ||
835 | xfs_iunlock(ip, iolock); | ||
836 | if (need_i_mutex) | ||
837 | mutex_unlock(&inode->i_mutex); | ||
838 | error = XFS_SEND_NAMESP(ip->i_mount, DM_EVENT_NOSPACE, ip, | ||
839 | DM_RIGHT_NULL, ip, DM_RIGHT_NULL, NULL, NULL, | ||
840 | 0, 0, 0); /* Delay flag intentionally unused */ | ||
841 | if (need_i_mutex) | ||
842 | mutex_lock(&inode->i_mutex); | ||
843 | xfs_ilock(ip, iolock); | ||
844 | if (error) | ||
845 | goto out_unlock_internal; | ||
846 | goto start; | ||
847 | } | ||
848 | |||
849 | error = -ret; | 756 | error = -ret; |
850 | if (ret <= 0) | 757 | if (ret <= 0) |
851 | goto out_unlock_internal; | 758 | goto out_unlock_internal; |
@@ -1014,9 +921,6 @@ const struct file_operations xfs_file_operations = { | |||
1014 | .open = xfs_file_open, | 921 | .open = xfs_file_open, |
1015 | .release = xfs_file_release, | 922 | .release = xfs_file_release, |
1016 | .fsync = xfs_file_fsync, | 923 | .fsync = xfs_file_fsync, |
1017 | #ifdef HAVE_FOP_OPEN_EXEC | ||
1018 | .open_exec = xfs_file_open_exec, | ||
1019 | #endif | ||
1020 | }; | 924 | }; |
1021 | 925 | ||
1022 | const struct file_operations xfs_dir_file_operations = { | 926 | const struct file_operations xfs_dir_file_operations = { |
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c index b6918d76bc7b..1f279b012f94 100644 --- a/fs/xfs/linux-2.6/xfs_fs_subr.c +++ b/fs/xfs/linux-2.6/xfs_fs_subr.c | |||
@@ -21,10 +21,6 @@ | |||
21 | #include "xfs_inode.h" | 21 | #include "xfs_inode.h" |
22 | #include "xfs_trace.h" | 22 | #include "xfs_trace.h" |
23 | 23 | ||
24 | int fs_noerr(void) { return 0; } | ||
25 | int fs_nosys(void) { return ENOSYS; } | ||
26 | void fs_noval(void) { return; } | ||
27 | |||
28 | /* | 24 | /* |
29 | * note: all filemap functions return negative error codes. These | 25 | * note: all filemap functions return negative error codes. These |
30 | * need to be inverted before returning to the xfs core functions. | 26 | * need to be inverted before returning to the xfs core functions. |
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.h b/fs/xfs/linux-2.6/xfs_fs_subr.h deleted file mode 100644 index 82bb19b2599e..000000000000 --- a/fs/xfs/linux-2.6/xfs_fs_subr.h +++ /dev/null | |||
@@ -1,25 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2000,2002,2005 Silicon Graphics, Inc. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it would be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write the Free Software Foundation, | ||
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
17 | */ | ||
18 | #ifndef __XFS_FS_SUBR_H__ | ||
19 | #define __XFS_FS_SUBR_H__ | ||
20 | |||
21 | extern int fs_noerr(void); | ||
22 | extern int fs_nosys(void); | ||
23 | extern void fs_noval(void); | ||
24 | |||
25 | #endif /* __XFS_FS_SUBR_H__ */ | ||
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index 699b60cbab9c..237f5ffb2ee8 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c | |||
@@ -23,24 +23,15 @@ | |||
23 | #include "xfs_trans.h" | 23 | #include "xfs_trans.h" |
24 | #include "xfs_sb.h" | 24 | #include "xfs_sb.h" |
25 | #include "xfs_ag.h" | 25 | #include "xfs_ag.h" |
26 | #include "xfs_dir2.h" | ||
27 | #include "xfs_alloc.h" | 26 | #include "xfs_alloc.h" |
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_mount.h" | 27 | #include "xfs_mount.h" |
30 | #include "xfs_bmap_btree.h" | 28 | #include "xfs_bmap_btree.h" |
31 | #include "xfs_alloc_btree.h" | ||
32 | #include "xfs_ialloc_btree.h" | ||
33 | #include "xfs_attr_sf.h" | ||
34 | #include "xfs_dir2_sf.h" | ||
35 | #include "xfs_dinode.h" | 29 | #include "xfs_dinode.h" |
36 | #include "xfs_inode.h" | 30 | #include "xfs_inode.h" |
37 | #include "xfs_ioctl.h" | 31 | #include "xfs_ioctl.h" |
38 | #include "xfs_btree.h" | ||
39 | #include "xfs_ialloc.h" | ||
40 | #include "xfs_rtalloc.h" | 32 | #include "xfs_rtalloc.h" |
41 | #include "xfs_itable.h" | 33 | #include "xfs_itable.h" |
42 | #include "xfs_error.h" | 34 | #include "xfs_error.h" |
43 | #include "xfs_rw.h" | ||
44 | #include "xfs_attr.h" | 35 | #include "xfs_attr.h" |
45 | #include "xfs_bmap.h" | 36 | #include "xfs_bmap.h" |
46 | #include "xfs_buf_item.h" | 37 | #include "xfs_buf_item.h" |
@@ -679,10 +670,9 @@ xfs_ioc_bulkstat( | |||
679 | error = xfs_bulkstat_single(mp, &inlast, | 670 | error = xfs_bulkstat_single(mp, &inlast, |
680 | bulkreq.ubuffer, &done); | 671 | bulkreq.ubuffer, &done); |
681 | else /* XFS_IOC_FSBULKSTAT */ | 672 | else /* XFS_IOC_FSBULKSTAT */ |
682 | error = xfs_bulkstat(mp, &inlast, &count, | 673 | error = xfs_bulkstat(mp, &inlast, &count, xfs_bulkstat_one, |
683 | (bulkstat_one_pf)xfs_bulkstat_one, NULL, | 674 | sizeof(xfs_bstat_t), bulkreq.ubuffer, |
684 | sizeof(xfs_bstat_t), bulkreq.ubuffer, | 675 | &done); |
685 | BULKSTAT_FG_QUICK, &done); | ||
686 | 676 | ||
687 | if (error) | 677 | if (error) |
688 | return -error; | 678 | return -error; |
@@ -909,7 +899,7 @@ xfs_ioctl_setattr( | |||
909 | struct xfs_dquot *olddquot = NULL; | 899 | struct xfs_dquot *olddquot = NULL; |
910 | int code; | 900 | int code; |
911 | 901 | ||
912 | xfs_itrace_entry(ip); | 902 | trace_xfs_ioctl_setattr(ip); |
913 | 903 | ||
914 | if (mp->m_flags & XFS_MOUNT_RDONLY) | 904 | if (mp->m_flags & XFS_MOUNT_RDONLY) |
915 | return XFS_ERROR(EROFS); | 905 | return XFS_ERROR(EROFS); |
@@ -1044,8 +1034,7 @@ xfs_ioctl_setattr( | |||
1044 | } | 1034 | } |
1045 | } | 1035 | } |
1046 | 1036 | ||
1047 | xfs_trans_ijoin(tp, ip, lock_flags); | 1037 | xfs_trans_ijoin(tp, ip); |
1048 | xfs_trans_ihold(tp, ip); | ||
1049 | 1038 | ||
1050 | /* | 1039 | /* |
1051 | * Change file ownership. Must be the owner or privileged. | 1040 | * Change file ownership. Must be the owner or privileged. |
@@ -1117,16 +1106,7 @@ xfs_ioctl_setattr( | |||
1117 | xfs_qm_dqrele(udqp); | 1106 | xfs_qm_dqrele(udqp); |
1118 | xfs_qm_dqrele(gdqp); | 1107 | xfs_qm_dqrele(gdqp); |
1119 | 1108 | ||
1120 | if (code) | 1109 | return code; |
1121 | return code; | ||
1122 | |||
1123 | if (DM_EVENT_ENABLED(ip, DM_EVENT_ATTRIBUTE)) { | ||
1124 | XFS_SEND_NAMESP(mp, DM_EVENT_ATTRIBUTE, ip, DM_RIGHT_NULL, | ||
1125 | NULL, DM_RIGHT_NULL, NULL, NULL, 0, 0, | ||
1126 | (mask & FSX_NONBLOCK) ? DM_FLAGS_NDELAY : 0); | ||
1127 | } | ||
1128 | |||
1129 | return 0; | ||
1130 | 1110 | ||
1131 | error_return: | 1111 | error_return: |
1132 | xfs_qm_dqrele(udqp); | 1112 | xfs_qm_dqrele(udqp); |
@@ -1302,7 +1282,7 @@ xfs_file_ioctl( | |||
1302 | if (filp->f_mode & FMODE_NOCMTIME) | 1282 | if (filp->f_mode & FMODE_NOCMTIME) |
1303 | ioflags |= IO_INVIS; | 1283 | ioflags |= IO_INVIS; |
1304 | 1284 | ||
1305 | xfs_itrace_entry(ip); | 1285 | trace_xfs_file_ioctl(ip); |
1306 | 1286 | ||
1307 | switch (cmd) { | 1287 | switch (cmd) { |
1308 | case XFS_IOC_ALLOCSP: | 1288 | case XFS_IOC_ALLOCSP: |
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c index 9287135e9bfc..6c83f7f62dc9 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c | |||
@@ -28,12 +28,8 @@ | |||
28 | #include "xfs_trans.h" | 28 | #include "xfs_trans.h" |
29 | #include "xfs_sb.h" | 29 | #include "xfs_sb.h" |
30 | #include "xfs_ag.h" | 30 | #include "xfs_ag.h" |
31 | #include "xfs_dir2.h" | ||
32 | #include "xfs_dmapi.h" | ||
33 | #include "xfs_mount.h" | 31 | #include "xfs_mount.h" |
34 | #include "xfs_bmap_btree.h" | 32 | #include "xfs_bmap_btree.h" |
35 | #include "xfs_attr_sf.h" | ||
36 | #include "xfs_dir2_sf.h" | ||
37 | #include "xfs_vnode.h" | 33 | #include "xfs_vnode.h" |
38 | #include "xfs_dinode.h" | 34 | #include "xfs_dinode.h" |
39 | #include "xfs_inode.h" | 35 | #include "xfs_inode.h" |
@@ -237,15 +233,12 @@ xfs_bulkstat_one_compat( | |||
237 | xfs_ino_t ino, /* inode number to get data for */ | 233 | xfs_ino_t ino, /* inode number to get data for */ |
238 | void __user *buffer, /* buffer to place output in */ | 234 | void __user *buffer, /* buffer to place output in */ |
239 | int ubsize, /* size of buffer */ | 235 | int ubsize, /* size of buffer */ |
240 | void *private_data, /* my private data */ | ||
241 | xfs_daddr_t bno, /* starting bno of inode cluster */ | ||
242 | int *ubused, /* bytes used by me */ | 236 | int *ubused, /* bytes used by me */ |
243 | void *dibuff, /* on-disk inode buffer */ | ||
244 | int *stat) /* BULKSTAT_RV_... */ | 237 | int *stat) /* BULKSTAT_RV_... */ |
245 | { | 238 | { |
246 | return xfs_bulkstat_one_int(mp, ino, buffer, ubsize, | 239 | return xfs_bulkstat_one_int(mp, ino, buffer, ubsize, |
247 | xfs_bulkstat_one_fmt_compat, bno, | 240 | xfs_bulkstat_one_fmt_compat, |
248 | ubused, dibuff, stat); | 241 | ubused, stat); |
249 | } | 242 | } |
250 | 243 | ||
251 | /* copied from xfs_ioctl.c */ | 244 | /* copied from xfs_ioctl.c */ |
@@ -298,13 +291,11 @@ xfs_compat_ioc_bulkstat( | |||
298 | int res; | 291 | int res; |
299 | 292 | ||
300 | error = xfs_bulkstat_one_compat(mp, inlast, bulkreq.ubuffer, | 293 | error = xfs_bulkstat_one_compat(mp, inlast, bulkreq.ubuffer, |
301 | sizeof(compat_xfs_bstat_t), | 294 | sizeof(compat_xfs_bstat_t), 0, &res); |
302 | NULL, 0, NULL, NULL, &res); | ||
303 | } else if (cmd == XFS_IOC_FSBULKSTAT_32) { | 295 | } else if (cmd == XFS_IOC_FSBULKSTAT_32) { |
304 | error = xfs_bulkstat(mp, &inlast, &count, | 296 | error = xfs_bulkstat(mp, &inlast, &count, |
305 | xfs_bulkstat_one_compat, NULL, | 297 | xfs_bulkstat_one_compat, sizeof(compat_xfs_bstat_t), |
306 | sizeof(compat_xfs_bstat_t), bulkreq.ubuffer, | 298 | bulkreq.ubuffer, &done); |
307 | BULKSTAT_FG_QUICK, &done); | ||
308 | } else | 299 | } else |
309 | error = XFS_ERROR(EINVAL); | 300 | error = XFS_ERROR(EINVAL); |
310 | if (error) | 301 | if (error) |
@@ -549,7 +540,7 @@ xfs_file_compat_ioctl( | |||
549 | if (filp->f_mode & FMODE_NOCMTIME) | 540 | if (filp->f_mode & FMODE_NOCMTIME) |
550 | ioflags |= IO_INVIS; | 541 | ioflags |= IO_INVIS; |
551 | 542 | ||
552 | xfs_itrace_entry(ip); | 543 | trace_xfs_file_compat_ioctl(ip); |
553 | 544 | ||
554 | switch (cmd) { | 545 | switch (cmd) { |
555 | /* No size or alignment issues on any arch */ | 546 | /* No size or alignment issues on any arch */ |
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index 44f0b2de153e..536b81e63a3d 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c | |||
@@ -24,21 +24,13 @@ | |||
24 | #include "xfs_trans.h" | 24 | #include "xfs_trans.h" |
25 | #include "xfs_sb.h" | 25 | #include "xfs_sb.h" |
26 | #include "xfs_ag.h" | 26 | #include "xfs_ag.h" |
27 | #include "xfs_dir2.h" | ||
28 | #include "xfs_alloc.h" | 27 | #include "xfs_alloc.h" |
29 | #include "xfs_dmapi.h" | ||
30 | #include "xfs_quota.h" | 28 | #include "xfs_quota.h" |
31 | #include "xfs_mount.h" | 29 | #include "xfs_mount.h" |
32 | #include "xfs_bmap_btree.h" | 30 | #include "xfs_bmap_btree.h" |
33 | #include "xfs_alloc_btree.h" | ||
34 | #include "xfs_ialloc_btree.h" | ||
35 | #include "xfs_dir2_sf.h" | ||
36 | #include "xfs_attr_sf.h" | ||
37 | #include "xfs_dinode.h" | 31 | #include "xfs_dinode.h" |
38 | #include "xfs_inode.h" | 32 | #include "xfs_inode.h" |
39 | #include "xfs_bmap.h" | 33 | #include "xfs_bmap.h" |
40 | #include "xfs_btree.h" | ||
41 | #include "xfs_ialloc.h" | ||
42 | #include "xfs_rtalloc.h" | 34 | #include "xfs_rtalloc.h" |
43 | #include "xfs_error.h" | 35 | #include "xfs_error.h" |
44 | #include "xfs_itable.h" | 36 | #include "xfs_itable.h" |
@@ -496,7 +488,7 @@ xfs_vn_getattr( | |||
496 | struct xfs_inode *ip = XFS_I(inode); | 488 | struct xfs_inode *ip = XFS_I(inode); |
497 | struct xfs_mount *mp = ip->i_mount; | 489 | struct xfs_mount *mp = ip->i_mount; |
498 | 490 | ||
499 | xfs_itrace_entry(ip); | 491 | trace_xfs_getattr(ip); |
500 | 492 | ||
501 | if (XFS_FORCED_SHUTDOWN(mp)) | 493 | if (XFS_FORCED_SHUTDOWN(mp)) |
502 | return XFS_ERROR(EIO); | 494 | return XFS_ERROR(EIO); |
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h index facfb323a706..998a9d7fb9c8 100644 --- a/fs/xfs/linux-2.6/xfs_linux.h +++ b/fs/xfs/linux-2.6/xfs_linux.h | |||
@@ -87,7 +87,6 @@ | |||
87 | #include <xfs_aops.h> | 87 | #include <xfs_aops.h> |
88 | #include <xfs_super.h> | 88 | #include <xfs_super.h> |
89 | #include <xfs_globals.h> | 89 | #include <xfs_globals.h> |
90 | #include <xfs_fs_subr.h> | ||
91 | #include <xfs_buf.h> | 90 | #include <xfs_buf.h> |
92 | 91 | ||
93 | /* | 92 | /* |
diff --git a/fs/xfs/linux-2.6/xfs_quotaops.c b/fs/xfs/linux-2.6/xfs_quotaops.c index 067cafbfc635..bfd5ac9d1f6f 100644 --- a/fs/xfs/linux-2.6/xfs_quotaops.c +++ b/fs/xfs/linux-2.6/xfs_quotaops.c | |||
@@ -16,7 +16,6 @@ | |||
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | 16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
17 | */ | 17 | */ |
18 | #include "xfs.h" | 18 | #include "xfs.h" |
19 | #include "xfs_dmapi.h" | ||
20 | #include "xfs_sb.h" | 19 | #include "xfs_sb.h" |
21 | #include "xfs_inum.h" | 20 | #include "xfs_inum.h" |
22 | #include "xfs_log.h" | 21 | #include "xfs_log.h" |
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index f2d1718c9165..758df94690ed 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c | |||
@@ -25,14 +25,11 @@ | |||
25 | #include "xfs_ag.h" | 25 | #include "xfs_ag.h" |
26 | #include "xfs_dir2.h" | 26 | #include "xfs_dir2.h" |
27 | #include "xfs_alloc.h" | 27 | #include "xfs_alloc.h" |
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_quota.h" | 28 | #include "xfs_quota.h" |
30 | #include "xfs_mount.h" | 29 | #include "xfs_mount.h" |
31 | #include "xfs_bmap_btree.h" | 30 | #include "xfs_bmap_btree.h" |
32 | #include "xfs_alloc_btree.h" | 31 | #include "xfs_alloc_btree.h" |
33 | #include "xfs_ialloc_btree.h" | 32 | #include "xfs_ialloc_btree.h" |
34 | #include "xfs_dir2_sf.h" | ||
35 | #include "xfs_attr_sf.h" | ||
36 | #include "xfs_dinode.h" | 33 | #include "xfs_dinode.h" |
37 | #include "xfs_inode.h" | 34 | #include "xfs_inode.h" |
38 | #include "xfs_btree.h" | 35 | #include "xfs_btree.h" |
@@ -43,7 +40,6 @@ | |||
43 | #include "xfs_error.h" | 40 | #include "xfs_error.h" |
44 | #include "xfs_itable.h" | 41 | #include "xfs_itable.h" |
45 | #include "xfs_fsops.h" | 42 | #include "xfs_fsops.h" |
46 | #include "xfs_rw.h" | ||
47 | #include "xfs_attr.h" | 43 | #include "xfs_attr.h" |
48 | #include "xfs_buf_item.h" | 44 | #include "xfs_buf_item.h" |
49 | #include "xfs_utils.h" | 45 | #include "xfs_utils.h" |
@@ -94,7 +90,6 @@ mempool_t *xfs_ioend_pool; | |||
94 | #define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and | 90 | #define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and |
95 | * unwritten extent conversion */ | 91 | * unwritten extent conversion */ |
96 | #define MNTOPT_NOBARRIER "nobarrier" /* .. disable */ | 92 | #define MNTOPT_NOBARRIER "nobarrier" /* .. disable */ |
97 | #define MNTOPT_OSYNCISOSYNC "osyncisosync" /* o_sync is REALLY o_sync */ | ||
98 | #define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */ | 93 | #define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */ |
99 | #define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */ | 94 | #define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */ |
100 | #define MNTOPT_NOIKEEP "noikeep" /* free empty inode clusters */ | 95 | #define MNTOPT_NOIKEEP "noikeep" /* free empty inode clusters */ |
@@ -116,9 +111,6 @@ mempool_t *xfs_ioend_pool; | |||
116 | #define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */ | 111 | #define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */ |
117 | #define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */ | 112 | #define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */ |
118 | #define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */ | 113 | #define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */ |
119 | #define MNTOPT_DMAPI "dmapi" /* DMI enabled (DMAPI / XDSM) */ | ||
120 | #define MNTOPT_XDSM "xdsm" /* DMI enabled (DMAPI / XDSM) */ | ||
121 | #define MNTOPT_DMI "dmi" /* DMI enabled (DMAPI / XDSM) */ | ||
122 | #define MNTOPT_DELAYLOG "delaylog" /* Delayed loging enabled */ | 114 | #define MNTOPT_DELAYLOG "delaylog" /* Delayed loging enabled */ |
123 | #define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed loging disabled */ | 115 | #define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed loging disabled */ |
124 | 116 | ||
@@ -172,15 +164,13 @@ suffix_strtoul(char *s, char **endp, unsigned int base) | |||
172 | STATIC int | 164 | STATIC int |
173 | xfs_parseargs( | 165 | xfs_parseargs( |
174 | struct xfs_mount *mp, | 166 | struct xfs_mount *mp, |
175 | char *options, | 167 | char *options) |
176 | char **mtpt) | ||
177 | { | 168 | { |
178 | struct super_block *sb = mp->m_super; | 169 | struct super_block *sb = mp->m_super; |
179 | char *this_char, *value, *eov; | 170 | char *this_char, *value, *eov; |
180 | int dsunit = 0; | 171 | int dsunit = 0; |
181 | int dswidth = 0; | 172 | int dswidth = 0; |
182 | int iosize = 0; | 173 | int iosize = 0; |
183 | int dmapi_implies_ikeep = 1; | ||
184 | __uint8_t iosizelog = 0; | 174 | __uint8_t iosizelog = 0; |
185 | 175 | ||
186 | /* | 176 | /* |
@@ -243,15 +233,10 @@ xfs_parseargs( | |||
243 | if (!mp->m_logname) | 233 | if (!mp->m_logname) |
244 | return ENOMEM; | 234 | return ENOMEM; |
245 | } else if (!strcmp(this_char, MNTOPT_MTPT)) { | 235 | } else if (!strcmp(this_char, MNTOPT_MTPT)) { |
246 | if (!value || !*value) { | 236 | cmn_err(CE_WARN, |
247 | cmn_err(CE_WARN, | 237 | "XFS: %s option not allowed on this system", |
248 | "XFS: %s option requires an argument", | 238 | this_char); |
249 | this_char); | 239 | return EINVAL; |
250 | return EINVAL; | ||
251 | } | ||
252 | *mtpt = kstrndup(value, MAXNAMELEN, GFP_KERNEL); | ||
253 | if (!*mtpt) | ||
254 | return ENOMEM; | ||
255 | } else if (!strcmp(this_char, MNTOPT_RTDEV)) { | 240 | } else if (!strcmp(this_char, MNTOPT_RTDEV)) { |
256 | if (!value || !*value) { | 241 | if (!value || !*value) { |
257 | cmn_err(CE_WARN, | 242 | cmn_err(CE_WARN, |
@@ -288,8 +273,6 @@ xfs_parseargs( | |||
288 | mp->m_flags &= ~XFS_MOUNT_GRPID; | 273 | mp->m_flags &= ~XFS_MOUNT_GRPID; |
289 | } else if (!strcmp(this_char, MNTOPT_WSYNC)) { | 274 | } else if (!strcmp(this_char, MNTOPT_WSYNC)) { |
290 | mp->m_flags |= XFS_MOUNT_WSYNC; | 275 | mp->m_flags |= XFS_MOUNT_WSYNC; |
291 | } else if (!strcmp(this_char, MNTOPT_OSYNCISOSYNC)) { | ||
292 | mp->m_flags |= XFS_MOUNT_OSYNCISOSYNC; | ||
293 | } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) { | 276 | } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) { |
294 | mp->m_flags |= XFS_MOUNT_NORECOVERY; | 277 | mp->m_flags |= XFS_MOUNT_NORECOVERY; |
295 | } else if (!strcmp(this_char, MNTOPT_NOALIGN)) { | 278 | } else if (!strcmp(this_char, MNTOPT_NOALIGN)) { |
@@ -329,7 +312,6 @@ xfs_parseargs( | |||
329 | } else if (!strcmp(this_char, MNTOPT_IKEEP)) { | 312 | } else if (!strcmp(this_char, MNTOPT_IKEEP)) { |
330 | mp->m_flags |= XFS_MOUNT_IKEEP; | 313 | mp->m_flags |= XFS_MOUNT_IKEEP; |
331 | } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) { | 314 | } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) { |
332 | dmapi_implies_ikeep = 0; | ||
333 | mp->m_flags &= ~XFS_MOUNT_IKEEP; | 315 | mp->m_flags &= ~XFS_MOUNT_IKEEP; |
334 | } else if (!strcmp(this_char, MNTOPT_LARGEIO)) { | 316 | } else if (!strcmp(this_char, MNTOPT_LARGEIO)) { |
335 | mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE; | 317 | mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE; |
@@ -370,12 +352,6 @@ xfs_parseargs( | |||
370 | } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) { | 352 | } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) { |
371 | mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE); | 353 | mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE); |
372 | mp->m_qflags &= ~XFS_OQUOTA_ENFD; | 354 | mp->m_qflags &= ~XFS_OQUOTA_ENFD; |
373 | } else if (!strcmp(this_char, MNTOPT_DMAPI)) { | ||
374 | mp->m_flags |= XFS_MOUNT_DMAPI; | ||
375 | } else if (!strcmp(this_char, MNTOPT_XDSM)) { | ||
376 | mp->m_flags |= XFS_MOUNT_DMAPI; | ||
377 | } else if (!strcmp(this_char, MNTOPT_DMI)) { | ||
378 | mp->m_flags |= XFS_MOUNT_DMAPI; | ||
379 | } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) { | 355 | } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) { |
380 | mp->m_flags |= XFS_MOUNT_DELAYLOG; | 356 | mp->m_flags |= XFS_MOUNT_DELAYLOG; |
381 | cmn_err(CE_WARN, | 357 | cmn_err(CE_WARN, |
@@ -387,9 +363,11 @@ xfs_parseargs( | |||
387 | cmn_err(CE_WARN, | 363 | cmn_err(CE_WARN, |
388 | "XFS: ihashsize no longer used, option is deprecated."); | 364 | "XFS: ihashsize no longer used, option is deprecated."); |
389 | } else if (!strcmp(this_char, "osyncisdsync")) { | 365 | } else if (!strcmp(this_char, "osyncisdsync")) { |
390 | /* no-op, this is now the default */ | ||
391 | cmn_err(CE_WARN, | 366 | cmn_err(CE_WARN, |
392 | "XFS: osyncisdsync is now the default, option is deprecated."); | 367 | "XFS: osyncisdsync has no effect, option is deprecated."); |
368 | } else if (!strcmp(this_char, "osyncisosync")) { | ||
369 | cmn_err(CE_WARN, | ||
370 | "XFS: osyncisosync has no effect, option is deprecated."); | ||
393 | } else if (!strcmp(this_char, "irixsgid")) { | 371 | } else if (!strcmp(this_char, "irixsgid")) { |
394 | cmn_err(CE_WARN, | 372 | cmn_err(CE_WARN, |
395 | "XFS: irixsgid is now a sysctl(2) variable, option is deprecated."); | 373 | "XFS: irixsgid is now a sysctl(2) variable, option is deprecated."); |
@@ -430,12 +408,6 @@ xfs_parseargs( | |||
430 | return EINVAL; | 408 | return EINVAL; |
431 | } | 409 | } |
432 | 410 | ||
433 | if ((mp->m_flags & XFS_MOUNT_DMAPI) && (!*mtpt || *mtpt[0] == '\0')) { | ||
434 | printk("XFS: %s option needs the mount point option as well\n", | ||
435 | MNTOPT_DMAPI); | ||
436 | return EINVAL; | ||
437 | } | ||
438 | |||
439 | if ((dsunit && !dswidth) || (!dsunit && dswidth)) { | 411 | if ((dsunit && !dswidth) || (!dsunit && dswidth)) { |
440 | cmn_err(CE_WARN, | 412 | cmn_err(CE_WARN, |
441 | "XFS: sunit and swidth must be specified together"); | 413 | "XFS: sunit and swidth must be specified together"); |
@@ -449,18 +421,6 @@ xfs_parseargs( | |||
449 | return EINVAL; | 421 | return EINVAL; |
450 | } | 422 | } |
451 | 423 | ||
452 | /* | ||
453 | * Applications using DMI filesystems often expect the | ||
454 | * inode generation number to be monotonically increasing. | ||
455 | * If we delete inode chunks we break this assumption, so | ||
456 | * keep unused inode chunks on disk for DMI filesystems | ||
457 | * until we come up with a better solution. | ||
458 | * Note that if "ikeep" or "noikeep" mount options are | ||
459 | * supplied, then they are honored. | ||
460 | */ | ||
461 | if ((mp->m_flags & XFS_MOUNT_DMAPI) && dmapi_implies_ikeep) | ||
462 | mp->m_flags |= XFS_MOUNT_IKEEP; | ||
463 | |||
464 | done: | 424 | done: |
465 | if (!(mp->m_flags & XFS_MOUNT_NOALIGN)) { | 425 | if (!(mp->m_flags & XFS_MOUNT_NOALIGN)) { |
466 | /* | 426 | /* |
@@ -539,10 +499,8 @@ xfs_showargs( | |||
539 | { XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC }, | 499 | { XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC }, |
540 | { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID }, | 500 | { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID }, |
541 | { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY }, | 501 | { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY }, |
542 | { XFS_MOUNT_OSYNCISOSYNC, "," MNTOPT_OSYNCISOSYNC }, | ||
543 | { XFS_MOUNT_ATTR2, "," MNTOPT_ATTR2 }, | 502 | { XFS_MOUNT_ATTR2, "," MNTOPT_ATTR2 }, |
544 | { XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM }, | 503 | { XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM }, |
545 | { XFS_MOUNT_DMAPI, "," MNTOPT_DMAPI }, | ||
546 | { XFS_MOUNT_GRPID, "," MNTOPT_GRPID }, | 504 | { XFS_MOUNT_GRPID, "," MNTOPT_GRPID }, |
547 | { XFS_MOUNT_DELAYLOG, "," MNTOPT_DELAYLOG }, | 505 | { XFS_MOUNT_DELAYLOG, "," MNTOPT_DELAYLOG }, |
548 | { 0, NULL } | 506 | { 0, NULL } |
@@ -947,7 +905,7 @@ xfs_fs_destroy_inode( | |||
947 | { | 905 | { |
948 | struct xfs_inode *ip = XFS_I(inode); | 906 | struct xfs_inode *ip = XFS_I(inode); |
949 | 907 | ||
950 | xfs_itrace_entry(ip); | 908 | trace_xfs_destroy_inode(ip); |
951 | 909 | ||
952 | XFS_STATS_INC(vn_reclaim); | 910 | XFS_STATS_INC(vn_reclaim); |
953 | 911 | ||
@@ -1063,10 +1021,8 @@ xfs_log_inode( | |||
1063 | * an inode in another recent transaction. So we play it safe and | 1021 | * an inode in another recent transaction. So we play it safe and |
1064 | * fire off the transaction anyway. | 1022 | * fire off the transaction anyway. |
1065 | */ | 1023 | */ |
1066 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | 1024 | xfs_trans_ijoin(tp, ip); |
1067 | xfs_trans_ihold(tp, ip); | ||
1068 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 1025 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
1069 | xfs_trans_set_sync(tp); | ||
1070 | error = xfs_trans_commit(tp, 0); | 1026 | error = xfs_trans_commit(tp, 0); |
1071 | xfs_ilock_demote(ip, XFS_ILOCK_EXCL); | 1027 | xfs_ilock_demote(ip, XFS_ILOCK_EXCL); |
1072 | 1028 | ||
@@ -1082,27 +1038,18 @@ xfs_fs_write_inode( | |||
1082 | struct xfs_mount *mp = ip->i_mount; | 1038 | struct xfs_mount *mp = ip->i_mount; |
1083 | int error = EAGAIN; | 1039 | int error = EAGAIN; |
1084 | 1040 | ||
1085 | xfs_itrace_entry(ip); | 1041 | trace_xfs_write_inode(ip); |
1086 | 1042 | ||
1087 | if (XFS_FORCED_SHUTDOWN(mp)) | 1043 | if (XFS_FORCED_SHUTDOWN(mp)) |
1088 | return XFS_ERROR(EIO); | 1044 | return XFS_ERROR(EIO); |
1089 | 1045 | ||
1090 | if (wbc->sync_mode == WB_SYNC_ALL) { | 1046 | if (wbc->sync_mode == WB_SYNC_ALL) { |
1091 | /* | 1047 | /* |
1092 | * Make sure the inode has hit stable storage. By using the | 1048 | * Make sure the inode has made it it into the log. Instead |
1093 | * log and the fsync transactions we reduce the IOs we have | 1049 | * of forcing it all the way to stable storage using a |
1094 | * to do here from two (log and inode) to just the log. | 1050 | * synchronous transaction we let the log force inside the |
1095 | * | 1051 | * ->sync_fs call do that for thus, which reduces the number |
1096 | * Note: We still need to do a delwri write of the inode after | 1052 | * of synchronous log foces dramatically. |
1097 | * this to flush it to the backing buffer so that bulkstat | ||
1098 | * works properly if this is the first time the inode has been | ||
1099 | * written. Because we hold the ilock atomically over the | ||
1100 | * transaction commit and the inode flush we are guaranteed | ||
1101 | * that the inode is not pinned when it returns. If the flush | ||
1102 | * lock is already held, then the inode has already been | ||
1103 | * flushed once and we don't need to flush it again. Hence | ||
1104 | * the code will only flush the inode if it isn't already | ||
1105 | * being flushed. | ||
1106 | */ | 1053 | */ |
1107 | xfs_ioend_wait(ip); | 1054 | xfs_ioend_wait(ip); |
1108 | xfs_ilock(ip, XFS_ILOCK_SHARED); | 1055 | xfs_ilock(ip, XFS_ILOCK_SHARED); |
@@ -1116,27 +1063,29 @@ xfs_fs_write_inode( | |||
1116 | * We make this non-blocking if the inode is contended, return | 1063 | * We make this non-blocking if the inode is contended, return |
1117 | * EAGAIN to indicate to the caller that they did not succeed. | 1064 | * EAGAIN to indicate to the caller that they did not succeed. |
1118 | * This prevents the flush path from blocking on inodes inside | 1065 | * This prevents the flush path from blocking on inodes inside |
1119 | * another operation right now, they get caught later by xfs_sync. | 1066 | * another operation right now, they get caught later by |
1067 | * xfs_sync. | ||
1120 | */ | 1068 | */ |
1121 | if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) | 1069 | if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) |
1122 | goto out; | 1070 | goto out; |
1123 | } | ||
1124 | 1071 | ||
1125 | if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip)) | 1072 | if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip)) |
1126 | goto out_unlock; | 1073 | goto out_unlock; |
1127 | 1074 | ||
1128 | /* | 1075 | /* |
1129 | * Now we have the flush lock and the inode is not pinned, we can check | 1076 | * Now we have the flush lock and the inode is not pinned, we |
1130 | * if the inode is really clean as we know that there are no pending | 1077 | * can check if the inode is really clean as we know that |
1131 | * transaction completions, it is not waiting on the delayed write | 1078 | * there are no pending transaction completions, it is not |
1132 | * queue and there is no IO in progress. | 1079 | * waiting on the delayed write queue and there is no IO in |
1133 | */ | 1080 | * progress. |
1134 | if (xfs_inode_clean(ip)) { | 1081 | */ |
1135 | xfs_ifunlock(ip); | 1082 | if (xfs_inode_clean(ip)) { |
1136 | error = 0; | 1083 | xfs_ifunlock(ip); |
1137 | goto out_unlock; | 1084 | error = 0; |
1085 | goto out_unlock; | ||
1086 | } | ||
1087 | error = xfs_iflush(ip, 0); | ||
1138 | } | 1088 | } |
1139 | error = xfs_iflush(ip, 0); | ||
1140 | 1089 | ||
1141 | out_unlock: | 1090 | out_unlock: |
1142 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | 1091 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
@@ -1156,7 +1105,8 @@ xfs_fs_clear_inode( | |||
1156 | { | 1105 | { |
1157 | xfs_inode_t *ip = XFS_I(inode); | 1106 | xfs_inode_t *ip = XFS_I(inode); |
1158 | 1107 | ||
1159 | xfs_itrace_entry(ip); | 1108 | trace_xfs_clear_inode(ip); |
1109 | |||
1160 | XFS_STATS_INC(vn_rele); | 1110 | XFS_STATS_INC(vn_rele); |
1161 | XFS_STATS_INC(vn_remove); | 1111 | XFS_STATS_INC(vn_remove); |
1162 | XFS_STATS_DEC(vn_active); | 1112 | XFS_STATS_DEC(vn_active); |
@@ -1193,22 +1143,13 @@ xfs_fs_put_super( | |||
1193 | { | 1143 | { |
1194 | struct xfs_mount *mp = XFS_M(sb); | 1144 | struct xfs_mount *mp = XFS_M(sb); |
1195 | 1145 | ||
1146 | /* | ||
1147 | * Unregister the memory shrinker before we tear down the mount | ||
1148 | * structure so we don't have memory reclaim racing with us here. | ||
1149 | */ | ||
1150 | xfs_inode_shrinker_unregister(mp); | ||
1196 | xfs_syncd_stop(mp); | 1151 | xfs_syncd_stop(mp); |
1197 | 1152 | ||
1198 | if (!(sb->s_flags & MS_RDONLY)) { | ||
1199 | /* | ||
1200 | * XXX(hch): this should be SYNC_WAIT. | ||
1201 | * | ||
1202 | * Or more likely not needed at all because the VFS is already | ||
1203 | * calling ->sync_fs after shutting down all filestem | ||
1204 | * operations and just before calling ->put_super. | ||
1205 | */ | ||
1206 | xfs_sync_data(mp, 0); | ||
1207 | xfs_sync_attr(mp, 0); | ||
1208 | } | ||
1209 | |||
1210 | XFS_SEND_PREUNMOUNT(mp); | ||
1211 | |||
1212 | /* | 1153 | /* |
1213 | * Blow away any referenced inode in the filestreams cache. | 1154 | * Blow away any referenced inode in the filestreams cache. |
1214 | * This can and will cause log traffic as inodes go inactive | 1155 | * This can and will cause log traffic as inodes go inactive |
@@ -1218,14 +1159,10 @@ xfs_fs_put_super( | |||
1218 | 1159 | ||
1219 | XFS_bflush(mp->m_ddev_targp); | 1160 | XFS_bflush(mp->m_ddev_targp); |
1220 | 1161 | ||
1221 | XFS_SEND_UNMOUNT(mp); | ||
1222 | |||
1223 | xfs_unmountfs(mp); | 1162 | xfs_unmountfs(mp); |
1224 | xfs_freesb(mp); | 1163 | xfs_freesb(mp); |
1225 | xfs_inode_shrinker_unregister(mp); | ||
1226 | xfs_icsb_destroy_counters(mp); | 1164 | xfs_icsb_destroy_counters(mp); |
1227 | xfs_close_devices(mp); | 1165 | xfs_close_devices(mp); |
1228 | xfs_dmops_put(mp); | ||
1229 | xfs_free_fsname(mp); | 1166 | xfs_free_fsname(mp); |
1230 | kfree(mp); | 1167 | kfree(mp); |
1231 | } | 1168 | } |
@@ -1543,7 +1480,6 @@ xfs_fs_fill_super( | |||
1543 | struct inode *root; | 1480 | struct inode *root; |
1544 | struct xfs_mount *mp = NULL; | 1481 | struct xfs_mount *mp = NULL; |
1545 | int flags = 0, error = ENOMEM; | 1482 | int flags = 0, error = ENOMEM; |
1546 | char *mtpt = NULL; | ||
1547 | 1483 | ||
1548 | mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL); | 1484 | mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL); |
1549 | if (!mp) | 1485 | if (!mp) |
@@ -1559,7 +1495,7 @@ xfs_fs_fill_super( | |||
1559 | mp->m_super = sb; | 1495 | mp->m_super = sb; |
1560 | sb->s_fs_info = mp; | 1496 | sb->s_fs_info = mp; |
1561 | 1497 | ||
1562 | error = xfs_parseargs(mp, (char *)data, &mtpt); | 1498 | error = xfs_parseargs(mp, (char *)data); |
1563 | if (error) | 1499 | if (error) |
1564 | goto out_free_fsname; | 1500 | goto out_free_fsname; |
1565 | 1501 | ||
@@ -1571,16 +1507,12 @@ xfs_fs_fill_super( | |||
1571 | #endif | 1507 | #endif |
1572 | sb->s_op = &xfs_super_operations; | 1508 | sb->s_op = &xfs_super_operations; |
1573 | 1509 | ||
1574 | error = xfs_dmops_get(mp); | ||
1575 | if (error) | ||
1576 | goto out_free_fsname; | ||
1577 | |||
1578 | if (silent) | 1510 | if (silent) |
1579 | flags |= XFS_MFSI_QUIET; | 1511 | flags |= XFS_MFSI_QUIET; |
1580 | 1512 | ||
1581 | error = xfs_open_devices(mp); | 1513 | error = xfs_open_devices(mp); |
1582 | if (error) | 1514 | if (error) |
1583 | goto out_put_dmops; | 1515 | goto out_free_fsname; |
1584 | 1516 | ||
1585 | if (xfs_icsb_init_counters(mp)) | 1517 | if (xfs_icsb_init_counters(mp)) |
1586 | mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB; | 1518 | mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB; |
@@ -1608,8 +1540,6 @@ xfs_fs_fill_super( | |||
1608 | if (error) | 1540 | if (error) |
1609 | goto out_filestream_unmount; | 1541 | goto out_filestream_unmount; |
1610 | 1542 | ||
1611 | XFS_SEND_MOUNT(mp, DM_RIGHT_NULL, mtpt, mp->m_fsname); | ||
1612 | |||
1613 | sb->s_magic = XFS_SB_MAGIC; | 1543 | sb->s_magic = XFS_SB_MAGIC; |
1614 | sb->s_blocksize = mp->m_sb.sb_blocksize; | 1544 | sb->s_blocksize = mp->m_sb.sb_blocksize; |
1615 | sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; | 1545 | sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; |
@@ -1638,7 +1568,6 @@ xfs_fs_fill_super( | |||
1638 | 1568 | ||
1639 | xfs_inode_shrinker_register(mp); | 1569 | xfs_inode_shrinker_register(mp); |
1640 | 1570 | ||
1641 | kfree(mtpt); | ||
1642 | return 0; | 1571 | return 0; |
1643 | 1572 | ||
1644 | out_filestream_unmount: | 1573 | out_filestream_unmount: |
@@ -1648,11 +1577,8 @@ xfs_fs_fill_super( | |||
1648 | out_destroy_counters: | 1577 | out_destroy_counters: |
1649 | xfs_icsb_destroy_counters(mp); | 1578 | xfs_icsb_destroy_counters(mp); |
1650 | xfs_close_devices(mp); | 1579 | xfs_close_devices(mp); |
1651 | out_put_dmops: | ||
1652 | xfs_dmops_put(mp); | ||
1653 | out_free_fsname: | 1580 | out_free_fsname: |
1654 | xfs_free_fsname(mp); | 1581 | xfs_free_fsname(mp); |
1655 | kfree(mtpt); | ||
1656 | kfree(mp); | 1582 | kfree(mp); |
1657 | out: | 1583 | out: |
1658 | return -error; | 1584 | return -error; |
@@ -1759,6 +1685,12 @@ xfs_init_zones(void) | |||
1759 | if (!xfs_trans_zone) | 1685 | if (!xfs_trans_zone) |
1760 | goto out_destroy_ifork_zone; | 1686 | goto out_destroy_ifork_zone; |
1761 | 1687 | ||
1688 | xfs_log_item_desc_zone = | ||
1689 | kmem_zone_init(sizeof(struct xfs_log_item_desc), | ||
1690 | "xfs_log_item_desc"); | ||
1691 | if (!xfs_log_item_desc_zone) | ||
1692 | goto out_destroy_trans_zone; | ||
1693 | |||
1762 | /* | 1694 | /* |
1763 | * The size of the zone allocated buf log item is the maximum | 1695 | * The size of the zone allocated buf log item is the maximum |
1764 | * size possible under XFS. This wastes a little bit of memory, | 1696 | * size possible under XFS. This wastes a little bit of memory, |
@@ -1768,7 +1700,7 @@ xfs_init_zones(void) | |||
1768 | (((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) / | 1700 | (((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) / |
1769 | NBWORD) * sizeof(int))), "xfs_buf_item"); | 1701 | NBWORD) * sizeof(int))), "xfs_buf_item"); |
1770 | if (!xfs_buf_item_zone) | 1702 | if (!xfs_buf_item_zone) |
1771 | goto out_destroy_trans_zone; | 1703 | goto out_destroy_log_item_desc_zone; |
1772 | 1704 | ||
1773 | xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) + | 1705 | xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) + |
1774 | ((XFS_EFD_MAX_FAST_EXTENTS - 1) * | 1706 | ((XFS_EFD_MAX_FAST_EXTENTS - 1) * |
@@ -1805,6 +1737,8 @@ xfs_init_zones(void) | |||
1805 | kmem_zone_destroy(xfs_efd_zone); | 1737 | kmem_zone_destroy(xfs_efd_zone); |
1806 | out_destroy_buf_item_zone: | 1738 | out_destroy_buf_item_zone: |
1807 | kmem_zone_destroy(xfs_buf_item_zone); | 1739 | kmem_zone_destroy(xfs_buf_item_zone); |
1740 | out_destroy_log_item_desc_zone: | ||
1741 | kmem_zone_destroy(xfs_log_item_desc_zone); | ||
1808 | out_destroy_trans_zone: | 1742 | out_destroy_trans_zone: |
1809 | kmem_zone_destroy(xfs_trans_zone); | 1743 | kmem_zone_destroy(xfs_trans_zone); |
1810 | out_destroy_ifork_zone: | 1744 | out_destroy_ifork_zone: |
@@ -1835,6 +1769,7 @@ xfs_destroy_zones(void) | |||
1835 | kmem_zone_destroy(xfs_efi_zone); | 1769 | kmem_zone_destroy(xfs_efi_zone); |
1836 | kmem_zone_destroy(xfs_efd_zone); | 1770 | kmem_zone_destroy(xfs_efd_zone); |
1837 | kmem_zone_destroy(xfs_buf_item_zone); | 1771 | kmem_zone_destroy(xfs_buf_item_zone); |
1772 | kmem_zone_destroy(xfs_log_item_desc_zone); | ||
1838 | kmem_zone_destroy(xfs_trans_zone); | 1773 | kmem_zone_destroy(xfs_trans_zone); |
1839 | kmem_zone_destroy(xfs_ifork_zone); | 1774 | kmem_zone_destroy(xfs_ifork_zone); |
1840 | kmem_zone_destroy(xfs_dabuf_zone); | 1775 | kmem_zone_destroy(xfs_dabuf_zone); |
@@ -1883,7 +1818,6 @@ init_xfs_fs(void) | |||
1883 | goto out_cleanup_procfs; | 1818 | goto out_cleanup_procfs; |
1884 | 1819 | ||
1885 | vfs_initquota(); | 1820 | vfs_initquota(); |
1886 | xfs_inode_shrinker_init(); | ||
1887 | 1821 | ||
1888 | error = register_filesystem(&xfs_fs_type); | 1822 | error = register_filesystem(&xfs_fs_type); |
1889 | if (error) | 1823 | if (error) |
@@ -1911,7 +1845,6 @@ exit_xfs_fs(void) | |||
1911 | { | 1845 | { |
1912 | vfs_exitquota(); | 1846 | vfs_exitquota(); |
1913 | unregister_filesystem(&xfs_fs_type); | 1847 | unregister_filesystem(&xfs_fs_type); |
1914 | xfs_inode_shrinker_destroy(); | ||
1915 | xfs_sysctl_unregister(); | 1848 | xfs_sysctl_unregister(); |
1916 | xfs_cleanup_procfs(); | 1849 | xfs_cleanup_procfs(); |
1917 | xfs_buf_terminate(); | 1850 | xfs_buf_terminate(); |
diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h index 519618e9279e..1ef4a4d2d997 100644 --- a/fs/xfs/linux-2.6/xfs_super.h +++ b/fs/xfs/linux-2.6/xfs_super.h | |||
@@ -56,12 +56,6 @@ extern void xfs_qm_exit(void); | |||
56 | # define XFS_BIGFS_STRING | 56 | # define XFS_BIGFS_STRING |
57 | #endif | 57 | #endif |
58 | 58 | ||
59 | #ifdef CONFIG_XFS_DMAPI | ||
60 | # define XFS_DMAPI_STRING "dmapi support, " | ||
61 | #else | ||
62 | # define XFS_DMAPI_STRING | ||
63 | #endif | ||
64 | |||
65 | #ifdef DEBUG | 59 | #ifdef DEBUG |
66 | # define XFS_DBG_STRING "debug" | 60 | # define XFS_DBG_STRING "debug" |
67 | #else | 61 | #else |
@@ -72,7 +66,6 @@ extern void xfs_qm_exit(void); | |||
72 | XFS_SECURITY_STRING \ | 66 | XFS_SECURITY_STRING \ |
73 | XFS_REALTIME_STRING \ | 67 | XFS_REALTIME_STRING \ |
74 | XFS_BIGFS_STRING \ | 68 | XFS_BIGFS_STRING \ |
75 | XFS_DMAPI_STRING \ | ||
76 | XFS_DBG_STRING /* DBG must be last */ | 69 | XFS_DBG_STRING /* DBG must be last */ |
77 | 70 | ||
78 | struct xfs_inode; | 71 | struct xfs_inode; |
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index ef7f0218bccb..dfcbd98d1599 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c | |||
@@ -24,25 +24,14 @@ | |||
24 | #include "xfs_trans.h" | 24 | #include "xfs_trans.h" |
25 | #include "xfs_sb.h" | 25 | #include "xfs_sb.h" |
26 | #include "xfs_ag.h" | 26 | #include "xfs_ag.h" |
27 | #include "xfs_dir2.h" | ||
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_mount.h" | 27 | #include "xfs_mount.h" |
30 | #include "xfs_bmap_btree.h" | 28 | #include "xfs_bmap_btree.h" |
31 | #include "xfs_alloc_btree.h" | ||
32 | #include "xfs_ialloc_btree.h" | ||
33 | #include "xfs_btree.h" | ||
34 | #include "xfs_dir2_sf.h" | ||
35 | #include "xfs_attr_sf.h" | ||
36 | #include "xfs_inode.h" | 29 | #include "xfs_inode.h" |
37 | #include "xfs_dinode.h" | 30 | #include "xfs_dinode.h" |
38 | #include "xfs_error.h" | 31 | #include "xfs_error.h" |
39 | #include "xfs_mru_cache.h" | ||
40 | #include "xfs_filestream.h" | 32 | #include "xfs_filestream.h" |
41 | #include "xfs_vnodeops.h" | 33 | #include "xfs_vnodeops.h" |
42 | #include "xfs_utils.h" | ||
43 | #include "xfs_buf_item.h" | ||
44 | #include "xfs_inode_item.h" | 34 | #include "xfs_inode_item.h" |
45 | #include "xfs_rw.h" | ||
46 | #include "xfs_quota.h" | 35 | #include "xfs_quota.h" |
47 | #include "xfs_trace.h" | 36 | #include "xfs_trace.h" |
48 | 37 | ||
@@ -144,6 +133,41 @@ restart: | |||
144 | return last_error; | 133 | return last_error; |
145 | } | 134 | } |
146 | 135 | ||
136 | /* | ||
137 | * Select the next per-ag structure to iterate during the walk. The reclaim | ||
138 | * walk is optimised only to walk AGs with reclaimable inodes in them. | ||
139 | */ | ||
140 | static struct xfs_perag * | ||
141 | xfs_inode_ag_iter_next_pag( | ||
142 | struct xfs_mount *mp, | ||
143 | xfs_agnumber_t *first, | ||
144 | int tag) | ||
145 | { | ||
146 | struct xfs_perag *pag = NULL; | ||
147 | |||
148 | if (tag == XFS_ICI_RECLAIM_TAG) { | ||
149 | int found; | ||
150 | int ref; | ||
151 | |||
152 | spin_lock(&mp->m_perag_lock); | ||
153 | found = radix_tree_gang_lookup_tag(&mp->m_perag_tree, | ||
154 | (void **)&pag, *first, 1, tag); | ||
155 | if (found <= 0) { | ||
156 | spin_unlock(&mp->m_perag_lock); | ||
157 | return NULL; | ||
158 | } | ||
159 | *first = pag->pag_agno + 1; | ||
160 | /* open coded pag reference increment */ | ||
161 | ref = atomic_inc_return(&pag->pag_ref); | ||
162 | spin_unlock(&mp->m_perag_lock); | ||
163 | trace_xfs_perag_get_reclaim(mp, pag->pag_agno, ref, _RET_IP_); | ||
164 | } else { | ||
165 | pag = xfs_perag_get(mp, *first); | ||
166 | (*first)++; | ||
167 | } | ||
168 | return pag; | ||
169 | } | ||
170 | |||
147 | int | 171 | int |
148 | xfs_inode_ag_iterator( | 172 | xfs_inode_ag_iterator( |
149 | struct xfs_mount *mp, | 173 | struct xfs_mount *mp, |
@@ -154,16 +178,15 @@ xfs_inode_ag_iterator( | |||
154 | int exclusive, | 178 | int exclusive, |
155 | int *nr_to_scan) | 179 | int *nr_to_scan) |
156 | { | 180 | { |
181 | struct xfs_perag *pag; | ||
157 | int error = 0; | 182 | int error = 0; |
158 | int last_error = 0; | 183 | int last_error = 0; |
159 | xfs_agnumber_t ag; | 184 | xfs_agnumber_t ag; |
160 | int nr; | 185 | int nr; |
161 | 186 | ||
162 | nr = nr_to_scan ? *nr_to_scan : INT_MAX; | 187 | nr = nr_to_scan ? *nr_to_scan : INT_MAX; |
163 | for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) { | 188 | ag = 0; |
164 | struct xfs_perag *pag; | 189 | while ((pag = xfs_inode_ag_iter_next_pag(mp, &ag, tag))) { |
165 | |||
166 | pag = xfs_perag_get(mp, ag); | ||
167 | error = xfs_inode_ag_walk(mp, pag, execute, flags, tag, | 190 | error = xfs_inode_ag_walk(mp, pag, execute, flags, tag, |
168 | exclusive, &nr); | 191 | exclusive, &nr); |
169 | xfs_perag_put(pag); | 192 | xfs_perag_put(pag); |
@@ -285,7 +308,7 @@ xfs_sync_inode_attr( | |||
285 | /* | 308 | /* |
286 | * Write out pagecache data for the whole filesystem. | 309 | * Write out pagecache data for the whole filesystem. |
287 | */ | 310 | */ |
288 | int | 311 | STATIC int |
289 | xfs_sync_data( | 312 | xfs_sync_data( |
290 | struct xfs_mount *mp, | 313 | struct xfs_mount *mp, |
291 | int flags) | 314 | int flags) |
@@ -306,7 +329,7 @@ xfs_sync_data( | |||
306 | /* | 329 | /* |
307 | * Write out inode metadata (attributes) for the whole filesystem. | 330 | * Write out inode metadata (attributes) for the whole filesystem. |
308 | */ | 331 | */ |
309 | int | 332 | STATIC int |
310 | xfs_sync_attr( | 333 | xfs_sync_attr( |
311 | struct xfs_mount *mp, | 334 | struct xfs_mount *mp, |
312 | int flags) | 335 | int flags) |
@@ -339,8 +362,7 @@ xfs_commit_dummy_trans( | |||
339 | 362 | ||
340 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 363 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
341 | 364 | ||
342 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | 365 | xfs_trans_ijoin(tp, ip); |
343 | xfs_trans_ihold(tp, ip); | ||
344 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 366 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
345 | error = xfs_trans_commit(tp, 0); | 367 | error = xfs_trans_commit(tp, 0); |
346 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 368 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
@@ -640,6 +662,17 @@ __xfs_inode_set_reclaim_tag( | |||
640 | radix_tree_tag_set(&pag->pag_ici_root, | 662 | radix_tree_tag_set(&pag->pag_ici_root, |
641 | XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), | 663 | XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), |
642 | XFS_ICI_RECLAIM_TAG); | 664 | XFS_ICI_RECLAIM_TAG); |
665 | |||
666 | if (!pag->pag_ici_reclaimable) { | ||
667 | /* propagate the reclaim tag up into the perag radix tree */ | ||
668 | spin_lock(&ip->i_mount->m_perag_lock); | ||
669 | radix_tree_tag_set(&ip->i_mount->m_perag_tree, | ||
670 | XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), | ||
671 | XFS_ICI_RECLAIM_TAG); | ||
672 | spin_unlock(&ip->i_mount->m_perag_lock); | ||
673 | trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno, | ||
674 | -1, _RET_IP_); | ||
675 | } | ||
643 | pag->pag_ici_reclaimable++; | 676 | pag->pag_ici_reclaimable++; |
644 | } | 677 | } |
645 | 678 | ||
@@ -674,6 +707,16 @@ __xfs_inode_clear_reclaim_tag( | |||
674 | radix_tree_tag_clear(&pag->pag_ici_root, | 707 | radix_tree_tag_clear(&pag->pag_ici_root, |
675 | XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); | 708 | XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); |
676 | pag->pag_ici_reclaimable--; | 709 | pag->pag_ici_reclaimable--; |
710 | if (!pag->pag_ici_reclaimable) { | ||
711 | /* clear the reclaim tag from the perag radix tree */ | ||
712 | spin_lock(&ip->i_mount->m_perag_lock); | ||
713 | radix_tree_tag_clear(&ip->i_mount->m_perag_tree, | ||
714 | XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), | ||
715 | XFS_ICI_RECLAIM_TAG); | ||
716 | spin_unlock(&ip->i_mount->m_perag_lock); | ||
717 | trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno, | ||
718 | -1, _RET_IP_); | ||
719 | } | ||
677 | } | 720 | } |
678 | 721 | ||
679 | /* | 722 | /* |
@@ -812,7 +855,36 @@ out: | |||
812 | reclaim: | 855 | reclaim: |
813 | xfs_ifunlock(ip); | 856 | xfs_ifunlock(ip); |
814 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 857 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
815 | xfs_ireclaim(ip); | 858 | |
859 | XFS_STATS_INC(xs_ig_reclaims); | ||
860 | /* | ||
861 | * Remove the inode from the per-AG radix tree. | ||
862 | * | ||
863 | * Because radix_tree_delete won't complain even if the item was never | ||
864 | * added to the tree assert that it's been there before to catch | ||
865 | * problems with the inode life time early on. | ||
866 | */ | ||
867 | write_lock(&pag->pag_ici_lock); | ||
868 | if (!radix_tree_delete(&pag->pag_ici_root, | ||
869 | XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino))) | ||
870 | ASSERT(0); | ||
871 | write_unlock(&pag->pag_ici_lock); | ||
872 | |||
873 | /* | ||
874 | * Here we do an (almost) spurious inode lock in order to coordinate | ||
875 | * with inode cache radix tree lookups. This is because the lookup | ||
876 | * can reference the inodes in the cache without taking references. | ||
877 | * | ||
878 | * We make that OK here by ensuring that we wait until the inode is | ||
879 | * unlocked after the lookup before we go ahead and free it. We get | ||
880 | * both the ilock and the iolock because the code may need to drop the | ||
881 | * ilock one but will still hold the iolock. | ||
882 | */ | ||
883 | xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | ||
884 | xfs_qm_dqdetach(ip); | ||
885 | xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | ||
886 | |||
887 | xfs_inode_free(ip); | ||
816 | return error; | 888 | return error; |
817 | 889 | ||
818 | } | 890 | } |
@@ -828,83 +900,52 @@ xfs_reclaim_inodes( | |||
828 | 900 | ||
829 | /* | 901 | /* |
830 | * Shrinker infrastructure. | 902 | * Shrinker infrastructure. |
831 | * | ||
832 | * This is all far more complex than it needs to be. It adds a global list of | ||
833 | * mounts because the shrinkers can only call a global context. We need to make | ||
834 | * the shrinkers pass a context to avoid the need for global state. | ||
835 | */ | 903 | */ |
836 | static LIST_HEAD(xfs_mount_list); | ||
837 | static struct rw_semaphore xfs_mount_list_lock; | ||
838 | |||
839 | static int | 904 | static int |
840 | xfs_reclaim_inode_shrink( | 905 | xfs_reclaim_inode_shrink( |
906 | struct shrinker *shrink, | ||
841 | int nr_to_scan, | 907 | int nr_to_scan, |
842 | gfp_t gfp_mask) | 908 | gfp_t gfp_mask) |
843 | { | 909 | { |
844 | struct xfs_mount *mp; | 910 | struct xfs_mount *mp; |
845 | struct xfs_perag *pag; | 911 | struct xfs_perag *pag; |
846 | xfs_agnumber_t ag; | 912 | xfs_agnumber_t ag; |
847 | int reclaimable = 0; | 913 | int reclaimable; |
848 | 914 | ||
915 | mp = container_of(shrink, struct xfs_mount, m_inode_shrink); | ||
849 | if (nr_to_scan) { | 916 | if (nr_to_scan) { |
850 | if (!(gfp_mask & __GFP_FS)) | 917 | if (!(gfp_mask & __GFP_FS)) |
851 | return -1; | 918 | return -1; |
852 | 919 | ||
853 | down_read(&xfs_mount_list_lock); | 920 | xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0, |
854 | list_for_each_entry(mp, &xfs_mount_list, m_mplist) { | ||
855 | xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0, | ||
856 | XFS_ICI_RECLAIM_TAG, 1, &nr_to_scan); | 921 | XFS_ICI_RECLAIM_TAG, 1, &nr_to_scan); |
857 | if (nr_to_scan <= 0) | 922 | /* if we don't exhaust the scan, don't bother coming back */ |
858 | break; | 923 | if (nr_to_scan > 0) |
859 | } | 924 | return -1; |
860 | up_read(&xfs_mount_list_lock); | 925 | } |
861 | } | ||
862 | 926 | ||
863 | down_read(&xfs_mount_list_lock); | 927 | reclaimable = 0; |
864 | list_for_each_entry(mp, &xfs_mount_list, m_mplist) { | 928 | ag = 0; |
865 | for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) { | 929 | while ((pag = xfs_inode_ag_iter_next_pag(mp, &ag, |
866 | pag = xfs_perag_get(mp, ag); | 930 | XFS_ICI_RECLAIM_TAG))) { |
867 | reclaimable += pag->pag_ici_reclaimable; | 931 | reclaimable += pag->pag_ici_reclaimable; |
868 | xfs_perag_put(pag); | 932 | xfs_perag_put(pag); |
869 | } | ||
870 | } | 933 | } |
871 | up_read(&xfs_mount_list_lock); | ||
872 | return reclaimable; | 934 | return reclaimable; |
873 | } | 935 | } |
874 | 936 | ||
875 | static struct shrinker xfs_inode_shrinker = { | ||
876 | .shrink = xfs_reclaim_inode_shrink, | ||
877 | .seeks = DEFAULT_SEEKS, | ||
878 | }; | ||
879 | |||
880 | void __init | ||
881 | xfs_inode_shrinker_init(void) | ||
882 | { | ||
883 | init_rwsem(&xfs_mount_list_lock); | ||
884 | register_shrinker(&xfs_inode_shrinker); | ||
885 | } | ||
886 | |||
887 | void | ||
888 | xfs_inode_shrinker_destroy(void) | ||
889 | { | ||
890 | ASSERT(list_empty(&xfs_mount_list)); | ||
891 | unregister_shrinker(&xfs_inode_shrinker); | ||
892 | } | ||
893 | |||
894 | void | 937 | void |
895 | xfs_inode_shrinker_register( | 938 | xfs_inode_shrinker_register( |
896 | struct xfs_mount *mp) | 939 | struct xfs_mount *mp) |
897 | { | 940 | { |
898 | down_write(&xfs_mount_list_lock); | 941 | mp->m_inode_shrink.shrink = xfs_reclaim_inode_shrink; |
899 | list_add_tail(&mp->m_mplist, &xfs_mount_list); | 942 | mp->m_inode_shrink.seeks = DEFAULT_SEEKS; |
900 | up_write(&xfs_mount_list_lock); | 943 | register_shrinker(&mp->m_inode_shrink); |
901 | } | 944 | } |
902 | 945 | ||
903 | void | 946 | void |
904 | xfs_inode_shrinker_unregister( | 947 | xfs_inode_shrinker_unregister( |
905 | struct xfs_mount *mp) | 948 | struct xfs_mount *mp) |
906 | { | 949 | { |
907 | down_write(&xfs_mount_list_lock); | 950 | unregister_shrinker(&mp->m_inode_shrink); |
908 | list_del(&mp->m_mplist); | ||
909 | up_write(&xfs_mount_list_lock); | ||
910 | } | 951 | } |
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h index cdcbaaca9880..fe78726196f8 100644 --- a/fs/xfs/linux-2.6/xfs_sync.h +++ b/fs/xfs/linux-2.6/xfs_sync.h | |||
@@ -35,9 +35,6 @@ typedef struct xfs_sync_work { | |||
35 | int xfs_syncd_init(struct xfs_mount *mp); | 35 | int xfs_syncd_init(struct xfs_mount *mp); |
36 | void xfs_syncd_stop(struct xfs_mount *mp); | 36 | void xfs_syncd_stop(struct xfs_mount *mp); |
37 | 37 | ||
38 | int xfs_sync_attr(struct xfs_mount *mp, int flags); | ||
39 | int xfs_sync_data(struct xfs_mount *mp, int flags); | ||
40 | |||
41 | int xfs_quiesce_data(struct xfs_mount *mp); | 38 | int xfs_quiesce_data(struct xfs_mount *mp); |
42 | void xfs_quiesce_attr(struct xfs_mount *mp); | 39 | void xfs_quiesce_attr(struct xfs_mount *mp); |
43 | 40 | ||
@@ -55,8 +52,6 @@ int xfs_inode_ag_iterator(struct xfs_mount *mp, | |||
55 | int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags), | 52 | int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags), |
56 | int flags, int tag, int write_lock, int *nr_to_scan); | 53 | int flags, int tag, int write_lock, int *nr_to_scan); |
57 | 54 | ||
58 | void xfs_inode_shrinker_init(void); | ||
59 | void xfs_inode_shrinker_destroy(void); | ||
60 | void xfs_inode_shrinker_register(struct xfs_mount *mp); | 55 | void xfs_inode_shrinker_register(struct xfs_mount *mp); |
61 | void xfs_inode_shrinker_unregister(struct xfs_mount *mp); | 56 | void xfs_inode_shrinker_unregister(struct xfs_mount *mp); |
62 | 57 | ||
diff --git a/fs/xfs/linux-2.6/xfs_trace.c b/fs/xfs/linux-2.6/xfs_trace.c index d12be8470cba..88d25d4aa56e 100644 --- a/fs/xfs/linux-2.6/xfs_trace.c +++ b/fs/xfs/linux-2.6/xfs_trace.c | |||
@@ -24,17 +24,13 @@ | |||
24 | #include "xfs_trans.h" | 24 | #include "xfs_trans.h" |
25 | #include "xfs_sb.h" | 25 | #include "xfs_sb.h" |
26 | #include "xfs_ag.h" | 26 | #include "xfs_ag.h" |
27 | #include "xfs_dir2.h" | ||
28 | #include "xfs_da_btree.h" | 27 | #include "xfs_da_btree.h" |
29 | #include "xfs_bmap_btree.h" | 28 | #include "xfs_bmap_btree.h" |
30 | #include "xfs_alloc_btree.h" | 29 | #include "xfs_alloc_btree.h" |
31 | #include "xfs_ialloc_btree.h" | 30 | #include "xfs_ialloc_btree.h" |
32 | #include "xfs_dir2_sf.h" | ||
33 | #include "xfs_attr_sf.h" | ||
34 | #include "xfs_dinode.h" | 31 | #include "xfs_dinode.h" |
35 | #include "xfs_inode.h" | 32 | #include "xfs_inode.h" |
36 | #include "xfs_btree.h" | 33 | #include "xfs_btree.h" |
37 | #include "xfs_dmapi.h" | ||
38 | #include "xfs_mount.h" | 34 | #include "xfs_mount.h" |
39 | #include "xfs_ialloc.h" | 35 | #include "xfs_ialloc.h" |
40 | #include "xfs_itable.h" | 36 | #include "xfs_itable.h" |
diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h index 73d5aa117384..c657cdca2cd2 100644 --- a/fs/xfs/linux-2.6/xfs_trace.h +++ b/fs/xfs/linux-2.6/xfs_trace.h | |||
@@ -124,7 +124,10 @@ DEFINE_EVENT(xfs_perag_class, name, \ | |||
124 | unsigned long caller_ip), \ | 124 | unsigned long caller_ip), \ |
125 | TP_ARGS(mp, agno, refcount, caller_ip)) | 125 | TP_ARGS(mp, agno, refcount, caller_ip)) |
126 | DEFINE_PERAG_REF_EVENT(xfs_perag_get); | 126 | DEFINE_PERAG_REF_EVENT(xfs_perag_get); |
127 | DEFINE_PERAG_REF_EVENT(xfs_perag_get_reclaim); | ||
127 | DEFINE_PERAG_REF_EVENT(xfs_perag_put); | 128 | DEFINE_PERAG_REF_EVENT(xfs_perag_put); |
129 | DEFINE_PERAG_REF_EVENT(xfs_perag_set_reclaim); | ||
130 | DEFINE_PERAG_REF_EVENT(xfs_perag_clear_reclaim); | ||
128 | 131 | ||
129 | TRACE_EVENT(xfs_attr_list_node_descend, | 132 | TRACE_EVENT(xfs_attr_list_node_descend, |
130 | TP_PROTO(struct xfs_attr_list_context *ctx, | 133 | TP_PROTO(struct xfs_attr_list_context *ctx, |
@@ -314,8 +317,6 @@ DEFINE_BUF_EVENT(xfs_buf_init); | |||
314 | DEFINE_BUF_EVENT(xfs_buf_free); | 317 | DEFINE_BUF_EVENT(xfs_buf_free); |
315 | DEFINE_BUF_EVENT(xfs_buf_hold); | 318 | DEFINE_BUF_EVENT(xfs_buf_hold); |
316 | DEFINE_BUF_EVENT(xfs_buf_rele); | 319 | DEFINE_BUF_EVENT(xfs_buf_rele); |
317 | DEFINE_BUF_EVENT(xfs_buf_pin); | ||
318 | DEFINE_BUF_EVENT(xfs_buf_unpin); | ||
319 | DEFINE_BUF_EVENT(xfs_buf_iodone); | 320 | DEFINE_BUF_EVENT(xfs_buf_iodone); |
320 | DEFINE_BUF_EVENT(xfs_buf_iorequest); | 321 | DEFINE_BUF_EVENT(xfs_buf_iorequest); |
321 | DEFINE_BUF_EVENT(xfs_buf_bawrite); | 322 | DEFINE_BUF_EVENT(xfs_buf_bawrite); |
@@ -538,7 +539,7 @@ DEFINE_LOCK_EVENT(xfs_ilock_nowait); | |||
538 | DEFINE_LOCK_EVENT(xfs_ilock_demote); | 539 | DEFINE_LOCK_EVENT(xfs_ilock_demote); |
539 | DEFINE_LOCK_EVENT(xfs_iunlock); | 540 | DEFINE_LOCK_EVENT(xfs_iunlock); |
540 | 541 | ||
541 | DECLARE_EVENT_CLASS(xfs_iget_class, | 542 | DECLARE_EVENT_CLASS(xfs_inode_class, |
542 | TP_PROTO(struct xfs_inode *ip), | 543 | TP_PROTO(struct xfs_inode *ip), |
543 | TP_ARGS(ip), | 544 | TP_ARGS(ip), |
544 | TP_STRUCT__entry( | 545 | TP_STRUCT__entry( |
@@ -554,16 +555,38 @@ DECLARE_EVENT_CLASS(xfs_iget_class, | |||
554 | __entry->ino) | 555 | __entry->ino) |
555 | ) | 556 | ) |
556 | 557 | ||
557 | #define DEFINE_IGET_EVENT(name) \ | 558 | #define DEFINE_INODE_EVENT(name) \ |
558 | DEFINE_EVENT(xfs_iget_class, name, \ | 559 | DEFINE_EVENT(xfs_inode_class, name, \ |
559 | TP_PROTO(struct xfs_inode *ip), \ | 560 | TP_PROTO(struct xfs_inode *ip), \ |
560 | TP_ARGS(ip)) | 561 | TP_ARGS(ip)) |
561 | DEFINE_IGET_EVENT(xfs_iget_skip); | 562 | DEFINE_INODE_EVENT(xfs_iget_skip); |
562 | DEFINE_IGET_EVENT(xfs_iget_reclaim); | 563 | DEFINE_INODE_EVENT(xfs_iget_reclaim); |
563 | DEFINE_IGET_EVENT(xfs_iget_found); | 564 | DEFINE_INODE_EVENT(xfs_iget_reclaim_fail); |
564 | DEFINE_IGET_EVENT(xfs_iget_alloc); | 565 | DEFINE_INODE_EVENT(xfs_iget_hit); |
565 | 566 | DEFINE_INODE_EVENT(xfs_iget_miss); | |
566 | DECLARE_EVENT_CLASS(xfs_inode_class, | 567 | |
568 | DEFINE_INODE_EVENT(xfs_getattr); | ||
569 | DEFINE_INODE_EVENT(xfs_setattr); | ||
570 | DEFINE_INODE_EVENT(xfs_readlink); | ||
571 | DEFINE_INODE_EVENT(xfs_alloc_file_space); | ||
572 | DEFINE_INODE_EVENT(xfs_free_file_space); | ||
573 | DEFINE_INODE_EVENT(xfs_readdir); | ||
574 | #ifdef CONFIG_XFS_POSIX_ACL | ||
575 | DEFINE_INODE_EVENT(xfs_check_acl); | ||
576 | #endif | ||
577 | DEFINE_INODE_EVENT(xfs_vm_bmap); | ||
578 | DEFINE_INODE_EVENT(xfs_file_ioctl); | ||
579 | DEFINE_INODE_EVENT(xfs_file_compat_ioctl); | ||
580 | DEFINE_INODE_EVENT(xfs_ioctl_setattr); | ||
581 | DEFINE_INODE_EVENT(xfs_file_fsync); | ||
582 | DEFINE_INODE_EVENT(xfs_destroy_inode); | ||
583 | DEFINE_INODE_EVENT(xfs_write_inode); | ||
584 | DEFINE_INODE_EVENT(xfs_clear_inode); | ||
585 | |||
586 | DEFINE_INODE_EVENT(xfs_dquot_dqalloc); | ||
587 | DEFINE_INODE_EVENT(xfs_dquot_dqdetach); | ||
588 | |||
589 | DECLARE_EVENT_CLASS(xfs_iref_class, | ||
567 | TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), | 590 | TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), |
568 | TP_ARGS(ip, caller_ip), | 591 | TP_ARGS(ip, caller_ip), |
569 | TP_STRUCT__entry( | 592 | TP_STRUCT__entry( |
@@ -588,20 +611,71 @@ DECLARE_EVENT_CLASS(xfs_inode_class, | |||
588 | (char *)__entry->caller_ip) | 611 | (char *)__entry->caller_ip) |
589 | ) | 612 | ) |
590 | 613 | ||
591 | #define DEFINE_INODE_EVENT(name) \ | 614 | #define DEFINE_IREF_EVENT(name) \ |
592 | DEFINE_EVENT(xfs_inode_class, name, \ | 615 | DEFINE_EVENT(xfs_iref_class, name, \ |
593 | TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), \ | 616 | TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), \ |
594 | TP_ARGS(ip, caller_ip)) | 617 | TP_ARGS(ip, caller_ip)) |
595 | DEFINE_INODE_EVENT(xfs_ihold); | 618 | DEFINE_IREF_EVENT(xfs_ihold); |
596 | DEFINE_INODE_EVENT(xfs_irele); | 619 | DEFINE_IREF_EVENT(xfs_irele); |
597 | DEFINE_INODE_EVENT(xfs_inode_pin); | 620 | DEFINE_IREF_EVENT(xfs_inode_pin); |
598 | DEFINE_INODE_EVENT(xfs_inode_unpin); | 621 | DEFINE_IREF_EVENT(xfs_inode_unpin); |
599 | DEFINE_INODE_EVENT(xfs_inode_unpin_nowait); | 622 | DEFINE_IREF_EVENT(xfs_inode_unpin_nowait); |
623 | |||
624 | DECLARE_EVENT_CLASS(xfs_namespace_class, | ||
625 | TP_PROTO(struct xfs_inode *dp, struct xfs_name *name), | ||
626 | TP_ARGS(dp, name), | ||
627 | TP_STRUCT__entry( | ||
628 | __field(dev_t, dev) | ||
629 | __field(xfs_ino_t, dp_ino) | ||
630 | __dynamic_array(char, name, name->len) | ||
631 | ), | ||
632 | TP_fast_assign( | ||
633 | __entry->dev = VFS_I(dp)->i_sb->s_dev; | ||
634 | __entry->dp_ino = dp->i_ino; | ||
635 | memcpy(__get_str(name), name->name, name->len); | ||
636 | ), | ||
637 | TP_printk("dev %d:%d dp ino 0x%llx name %s", | ||
638 | MAJOR(__entry->dev), MINOR(__entry->dev), | ||
639 | __entry->dp_ino, | ||
640 | __get_str(name)) | ||
641 | ) | ||
600 | 642 | ||
601 | /* the old xfs_itrace_entry tracer - to be replaced by s.th. in the VFS */ | 643 | #define DEFINE_NAMESPACE_EVENT(name) \ |
602 | DEFINE_INODE_EVENT(xfs_inode); | 644 | DEFINE_EVENT(xfs_namespace_class, name, \ |
603 | #define xfs_itrace_entry(ip) \ | 645 | TP_PROTO(struct xfs_inode *dp, struct xfs_name *name), \ |
604 | trace_xfs_inode(ip, _THIS_IP_) | 646 | TP_ARGS(dp, name)) |
647 | DEFINE_NAMESPACE_EVENT(xfs_remove); | ||
648 | DEFINE_NAMESPACE_EVENT(xfs_link); | ||
649 | DEFINE_NAMESPACE_EVENT(xfs_lookup); | ||
650 | DEFINE_NAMESPACE_EVENT(xfs_create); | ||
651 | DEFINE_NAMESPACE_EVENT(xfs_symlink); | ||
652 | |||
653 | TRACE_EVENT(xfs_rename, | ||
654 | TP_PROTO(struct xfs_inode *src_dp, struct xfs_inode *target_dp, | ||
655 | struct xfs_name *src_name, struct xfs_name *target_name), | ||
656 | TP_ARGS(src_dp, target_dp, src_name, target_name), | ||
657 | TP_STRUCT__entry( | ||
658 | __field(dev_t, dev) | ||
659 | __field(xfs_ino_t, src_dp_ino) | ||
660 | __field(xfs_ino_t, target_dp_ino) | ||
661 | __dynamic_array(char, src_name, src_name->len) | ||
662 | __dynamic_array(char, target_name, target_name->len) | ||
663 | ), | ||
664 | TP_fast_assign( | ||
665 | __entry->dev = VFS_I(src_dp)->i_sb->s_dev; | ||
666 | __entry->src_dp_ino = src_dp->i_ino; | ||
667 | __entry->target_dp_ino = target_dp->i_ino; | ||
668 | memcpy(__get_str(src_name), src_name->name, src_name->len); | ||
669 | memcpy(__get_str(target_name), target_name->name, target_name->len); | ||
670 | ), | ||
671 | TP_printk("dev %d:%d src dp ino 0x%llx target dp ino 0x%llx" | ||
672 | " src name %s target name %s", | ||
673 | MAJOR(__entry->dev), MINOR(__entry->dev), | ||
674 | __entry->src_dp_ino, | ||
675 | __entry->target_dp_ino, | ||
676 | __get_str(src_name), | ||
677 | __get_str(target_name)) | ||
678 | ) | ||
605 | 679 | ||
606 | DECLARE_EVENT_CLASS(xfs_dquot_class, | 680 | DECLARE_EVENT_CLASS(xfs_dquot_class, |
607 | TP_PROTO(struct xfs_dquot *dqp), | 681 | TP_PROTO(struct xfs_dquot *dqp), |
@@ -681,9 +755,6 @@ DEFINE_DQUOT_EVENT(xfs_dqrele); | |||
681 | DEFINE_DQUOT_EVENT(xfs_dqflush); | 755 | DEFINE_DQUOT_EVENT(xfs_dqflush); |
682 | DEFINE_DQUOT_EVENT(xfs_dqflush_force); | 756 | DEFINE_DQUOT_EVENT(xfs_dqflush_force); |
683 | DEFINE_DQUOT_EVENT(xfs_dqflush_done); | 757 | DEFINE_DQUOT_EVENT(xfs_dqflush_done); |
684 | /* not really iget events, but we re-use the format */ | ||
685 | DEFINE_IGET_EVENT(xfs_dquot_dqalloc); | ||
686 | DEFINE_IGET_EVENT(xfs_dquot_dqdetach); | ||
687 | 758 | ||
688 | DECLARE_EVENT_CLASS(xfs_loggrant_class, | 759 | DECLARE_EVENT_CLASS(xfs_loggrant_class, |
689 | TP_PROTO(struct log *log, struct xlog_ticket *tic), | 760 | TP_PROTO(struct log *log, struct xlog_ticket *tic), |
@@ -831,33 +902,29 @@ DECLARE_EVENT_CLASS(xfs_page_class, | |||
831 | __field(loff_t, size) | 902 | __field(loff_t, size) |
832 | __field(unsigned long, offset) | 903 | __field(unsigned long, offset) |
833 | __field(int, delalloc) | 904 | __field(int, delalloc) |
834 | __field(int, unmapped) | ||
835 | __field(int, unwritten) | 905 | __field(int, unwritten) |
836 | ), | 906 | ), |
837 | TP_fast_assign( | 907 | TP_fast_assign( |
838 | int delalloc = -1, unmapped = -1, unwritten = -1; | 908 | int delalloc = -1, unwritten = -1; |
839 | 909 | ||
840 | if (page_has_buffers(page)) | 910 | if (page_has_buffers(page)) |
841 | xfs_count_page_state(page, &delalloc, | 911 | xfs_count_page_state(page, &delalloc, &unwritten); |
842 | &unmapped, &unwritten); | ||
843 | __entry->dev = inode->i_sb->s_dev; | 912 | __entry->dev = inode->i_sb->s_dev; |
844 | __entry->ino = XFS_I(inode)->i_ino; | 913 | __entry->ino = XFS_I(inode)->i_ino; |
845 | __entry->pgoff = page_offset(page); | 914 | __entry->pgoff = page_offset(page); |
846 | __entry->size = i_size_read(inode); | 915 | __entry->size = i_size_read(inode); |
847 | __entry->offset = off; | 916 | __entry->offset = off; |
848 | __entry->delalloc = delalloc; | 917 | __entry->delalloc = delalloc; |
849 | __entry->unmapped = unmapped; | ||
850 | __entry->unwritten = unwritten; | 918 | __entry->unwritten = unwritten; |
851 | ), | 919 | ), |
852 | TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx " | 920 | TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx " |
853 | "delalloc %d unmapped %d unwritten %d", | 921 | "delalloc %d unwritten %d", |
854 | MAJOR(__entry->dev), MINOR(__entry->dev), | 922 | MAJOR(__entry->dev), MINOR(__entry->dev), |
855 | __entry->ino, | 923 | __entry->ino, |
856 | __entry->pgoff, | 924 | __entry->pgoff, |
857 | __entry->size, | 925 | __entry->size, |
858 | __entry->offset, | 926 | __entry->offset, |
859 | __entry->delalloc, | 927 | __entry->delalloc, |
860 | __entry->unmapped, | ||
861 | __entry->unwritten) | 928 | __entry->unwritten) |
862 | ) | 929 | ) |
863 | 930 | ||
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c index 585e7633dfc7..e1a2f6800e01 100644 --- a/fs/xfs/quota/xfs_dquot.c +++ b/fs/xfs/quota/xfs_dquot.c | |||
@@ -23,25 +23,15 @@ | |||
23 | #include "xfs_trans.h" | 23 | #include "xfs_trans.h" |
24 | #include "xfs_sb.h" | 24 | #include "xfs_sb.h" |
25 | #include "xfs_ag.h" | 25 | #include "xfs_ag.h" |
26 | #include "xfs_dir2.h" | ||
27 | #include "xfs_alloc.h" | 26 | #include "xfs_alloc.h" |
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_quota.h" | 27 | #include "xfs_quota.h" |
30 | #include "xfs_mount.h" | 28 | #include "xfs_mount.h" |
31 | #include "xfs_bmap_btree.h" | 29 | #include "xfs_bmap_btree.h" |
32 | #include "xfs_alloc_btree.h" | ||
33 | #include "xfs_ialloc_btree.h" | ||
34 | #include "xfs_dir2_sf.h" | ||
35 | #include "xfs_attr_sf.h" | ||
36 | #include "xfs_dinode.h" | ||
37 | #include "xfs_inode.h" | 30 | #include "xfs_inode.h" |
38 | #include "xfs_btree.h" | ||
39 | #include "xfs_ialloc.h" | ||
40 | #include "xfs_bmap.h" | 31 | #include "xfs_bmap.h" |
41 | #include "xfs_rtalloc.h" | 32 | #include "xfs_rtalloc.h" |
42 | #include "xfs_error.h" | 33 | #include "xfs_error.h" |
43 | #include "xfs_itable.h" | 34 | #include "xfs_itable.h" |
44 | #include "xfs_rw.h" | ||
45 | #include "xfs_attr.h" | 35 | #include "xfs_attr.h" |
46 | #include "xfs_buf_item.h" | 36 | #include "xfs_buf_item.h" |
47 | #include "xfs_trans_space.h" | 37 | #include "xfs_trans_space.h" |
@@ -64,8 +54,6 @@ | |||
64 | flush lock - ditto. | 54 | flush lock - ditto. |
65 | */ | 55 | */ |
66 | 56 | ||
67 | STATIC void xfs_qm_dqflush_done(xfs_buf_t *, xfs_dq_logitem_t *); | ||
68 | |||
69 | #ifdef DEBUG | 57 | #ifdef DEBUG |
70 | xfs_buftarg_t *xfs_dqerror_target; | 58 | xfs_buftarg_t *xfs_dqerror_target; |
71 | int xfs_do_dqerror; | 59 | int xfs_do_dqerror; |
@@ -390,21 +378,14 @@ xfs_qm_dqalloc( | |||
390 | return (ESRCH); | 378 | return (ESRCH); |
391 | } | 379 | } |
392 | 380 | ||
393 | /* | 381 | xfs_trans_ijoin_ref(tp, quotip, XFS_ILOCK_EXCL); |
394 | * xfs_trans_commit normally decrements the vnode ref count | ||
395 | * when it unlocks the inode. Since we want to keep the quota | ||
396 | * inode around, we bump the vnode ref count now. | ||
397 | */ | ||
398 | IHOLD(quotip); | ||
399 | |||
400 | xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL); | ||
401 | nmaps = 1; | 382 | nmaps = 1; |
402 | if ((error = xfs_bmapi(tp, quotip, | 383 | if ((error = xfs_bmapi(tp, quotip, |
403 | offset_fsb, XFS_DQUOT_CLUSTER_SIZE_FSB, | 384 | offset_fsb, XFS_DQUOT_CLUSTER_SIZE_FSB, |
404 | XFS_BMAPI_METADATA | XFS_BMAPI_WRITE, | 385 | XFS_BMAPI_METADATA | XFS_BMAPI_WRITE, |
405 | &firstblock, | 386 | &firstblock, |
406 | XFS_QM_DQALLOC_SPACE_RES(mp), | 387 | XFS_QM_DQALLOC_SPACE_RES(mp), |
407 | &map, &nmaps, &flist, NULL))) { | 388 | &map, &nmaps, &flist))) { |
408 | goto error0; | 389 | goto error0; |
409 | } | 390 | } |
410 | ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB); | 391 | ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB); |
@@ -520,7 +501,7 @@ xfs_qm_dqtobp( | |||
520 | error = xfs_bmapi(NULL, quotip, dqp->q_fileoffset, | 501 | error = xfs_bmapi(NULL, quotip, dqp->q_fileoffset, |
521 | XFS_DQUOT_CLUSTER_SIZE_FSB, | 502 | XFS_DQUOT_CLUSTER_SIZE_FSB, |
522 | XFS_BMAPI_METADATA, | 503 | XFS_BMAPI_METADATA, |
523 | NULL, 0, &map, &nmaps, NULL, NULL); | 504 | NULL, 0, &map, &nmaps, NULL); |
524 | 505 | ||
525 | xfs_iunlock(quotip, XFS_ILOCK_SHARED); | 506 | xfs_iunlock(quotip, XFS_ILOCK_SHARED); |
526 | if (error) | 507 | if (error) |
@@ -1141,6 +1122,46 @@ xfs_qm_dqrele( | |||
1141 | xfs_qm_dqput(dqp); | 1122 | xfs_qm_dqput(dqp); |
1142 | } | 1123 | } |
1143 | 1124 | ||
1125 | /* | ||
1126 | * This is the dquot flushing I/O completion routine. It is called | ||
1127 | * from interrupt level when the buffer containing the dquot is | ||
1128 | * flushed to disk. It is responsible for removing the dquot logitem | ||
1129 | * from the AIL if it has not been re-logged, and unlocking the dquot's | ||
1130 | * flush lock. This behavior is very similar to that of inodes.. | ||
1131 | */ | ||
1132 | STATIC void | ||
1133 | xfs_qm_dqflush_done( | ||
1134 | struct xfs_buf *bp, | ||
1135 | struct xfs_log_item *lip) | ||
1136 | { | ||
1137 | xfs_dq_logitem_t *qip = (struct xfs_dq_logitem *)lip; | ||
1138 | xfs_dquot_t *dqp = qip->qli_dquot; | ||
1139 | struct xfs_ail *ailp = lip->li_ailp; | ||
1140 | |||
1141 | /* | ||
1142 | * We only want to pull the item from the AIL if its | ||
1143 | * location in the log has not changed since we started the flush. | ||
1144 | * Thus, we only bother if the dquot's lsn has | ||
1145 | * not changed. First we check the lsn outside the lock | ||
1146 | * since it's cheaper, and then we recheck while | ||
1147 | * holding the lock before removing the dquot from the AIL. | ||
1148 | */ | ||
1149 | if ((lip->li_flags & XFS_LI_IN_AIL) && | ||
1150 | lip->li_lsn == qip->qli_flush_lsn) { | ||
1151 | |||
1152 | /* xfs_trans_ail_delete() drops the AIL lock. */ | ||
1153 | spin_lock(&ailp->xa_lock); | ||
1154 | if (lip->li_lsn == qip->qli_flush_lsn) | ||
1155 | xfs_trans_ail_delete(ailp, lip); | ||
1156 | else | ||
1157 | spin_unlock(&ailp->xa_lock); | ||
1158 | } | ||
1159 | |||
1160 | /* | ||
1161 | * Release the dq's flush lock since we're done with it. | ||
1162 | */ | ||
1163 | xfs_dqfunlock(dqp); | ||
1164 | } | ||
1144 | 1165 | ||
1145 | /* | 1166 | /* |
1146 | * Write a modified dquot to disk. | 1167 | * Write a modified dquot to disk. |
@@ -1222,8 +1243,9 @@ xfs_qm_dqflush( | |||
1222 | * Attach an iodone routine so that we can remove this dquot from the | 1243 | * Attach an iodone routine so that we can remove this dquot from the |
1223 | * AIL and release the flush lock once the dquot is synced to disk. | 1244 | * AIL and release the flush lock once the dquot is synced to disk. |
1224 | */ | 1245 | */ |
1225 | xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t *, xfs_log_item_t *)) | 1246 | xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done, |
1226 | xfs_qm_dqflush_done, &(dqp->q_logitem.qli_item)); | 1247 | &dqp->q_logitem.qli_item); |
1248 | |||
1227 | /* | 1249 | /* |
1228 | * If the buffer is pinned then push on the log so we won't | 1250 | * If the buffer is pinned then push on the log so we won't |
1229 | * get stuck waiting in the write for too long. | 1251 | * get stuck waiting in the write for too long. |
@@ -1247,50 +1269,6 @@ xfs_qm_dqflush( | |||
1247 | 1269 | ||
1248 | } | 1270 | } |
1249 | 1271 | ||
1250 | /* | ||
1251 | * This is the dquot flushing I/O completion routine. It is called | ||
1252 | * from interrupt level when the buffer containing the dquot is | ||
1253 | * flushed to disk. It is responsible for removing the dquot logitem | ||
1254 | * from the AIL if it has not been re-logged, and unlocking the dquot's | ||
1255 | * flush lock. This behavior is very similar to that of inodes.. | ||
1256 | */ | ||
1257 | /*ARGSUSED*/ | ||
1258 | STATIC void | ||
1259 | xfs_qm_dqflush_done( | ||
1260 | xfs_buf_t *bp, | ||
1261 | xfs_dq_logitem_t *qip) | ||
1262 | { | ||
1263 | xfs_dquot_t *dqp; | ||
1264 | struct xfs_ail *ailp; | ||
1265 | |||
1266 | dqp = qip->qli_dquot; | ||
1267 | ailp = qip->qli_item.li_ailp; | ||
1268 | |||
1269 | /* | ||
1270 | * We only want to pull the item from the AIL if its | ||
1271 | * location in the log has not changed since we started the flush. | ||
1272 | * Thus, we only bother if the dquot's lsn has | ||
1273 | * not changed. First we check the lsn outside the lock | ||
1274 | * since it's cheaper, and then we recheck while | ||
1275 | * holding the lock before removing the dquot from the AIL. | ||
1276 | */ | ||
1277 | if ((qip->qli_item.li_flags & XFS_LI_IN_AIL) && | ||
1278 | qip->qli_item.li_lsn == qip->qli_flush_lsn) { | ||
1279 | |||
1280 | /* xfs_trans_ail_delete() drops the AIL lock. */ | ||
1281 | spin_lock(&ailp->xa_lock); | ||
1282 | if (qip->qli_item.li_lsn == qip->qli_flush_lsn) | ||
1283 | xfs_trans_ail_delete(ailp, (xfs_log_item_t*)qip); | ||
1284 | else | ||
1285 | spin_unlock(&ailp->xa_lock); | ||
1286 | } | ||
1287 | |||
1288 | /* | ||
1289 | * Release the dq's flush lock since we're done with it. | ||
1290 | */ | ||
1291 | xfs_dqfunlock(dqp); | ||
1292 | } | ||
1293 | |||
1294 | int | 1272 | int |
1295 | xfs_qm_dqlock_nowait( | 1273 | xfs_qm_dqlock_nowait( |
1296 | xfs_dquot_t *dqp) | 1274 | xfs_dquot_t *dqp) |
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c index 8d89a24ae324..2a1f3dc10a02 100644 --- a/fs/xfs/quota/xfs_dquot_item.c +++ b/fs/xfs/quota/xfs_dquot_item.c | |||
@@ -23,42 +23,36 @@ | |||
23 | #include "xfs_trans.h" | 23 | #include "xfs_trans.h" |
24 | #include "xfs_sb.h" | 24 | #include "xfs_sb.h" |
25 | #include "xfs_ag.h" | 25 | #include "xfs_ag.h" |
26 | #include "xfs_dir2.h" | ||
27 | #include "xfs_alloc.h" | 26 | #include "xfs_alloc.h" |
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_quota.h" | 27 | #include "xfs_quota.h" |
30 | #include "xfs_mount.h" | 28 | #include "xfs_mount.h" |
31 | #include "xfs_bmap_btree.h" | 29 | #include "xfs_bmap_btree.h" |
32 | #include "xfs_alloc_btree.h" | ||
33 | #include "xfs_ialloc_btree.h" | ||
34 | #include "xfs_dir2_sf.h" | ||
35 | #include "xfs_attr_sf.h" | ||
36 | #include "xfs_dinode.h" | ||
37 | #include "xfs_inode.h" | 30 | #include "xfs_inode.h" |
38 | #include "xfs_bmap.h" | 31 | #include "xfs_bmap.h" |
39 | #include "xfs_btree.h" | ||
40 | #include "xfs_ialloc.h" | ||
41 | #include "xfs_rtalloc.h" | 32 | #include "xfs_rtalloc.h" |
42 | #include "xfs_error.h" | 33 | #include "xfs_error.h" |
43 | #include "xfs_itable.h" | 34 | #include "xfs_itable.h" |
44 | #include "xfs_rw.h" | ||
45 | #include "xfs_attr.h" | 35 | #include "xfs_attr.h" |
46 | #include "xfs_buf_item.h" | 36 | #include "xfs_buf_item.h" |
47 | #include "xfs_trans_priv.h" | 37 | #include "xfs_trans_priv.h" |
48 | #include "xfs_qm.h" | 38 | #include "xfs_qm.h" |
49 | 39 | ||
40 | static inline struct xfs_dq_logitem *DQUOT_ITEM(struct xfs_log_item *lip) | ||
41 | { | ||
42 | return container_of(lip, struct xfs_dq_logitem, qli_item); | ||
43 | } | ||
44 | |||
50 | /* | 45 | /* |
51 | * returns the number of iovecs needed to log the given dquot item. | 46 | * returns the number of iovecs needed to log the given dquot item. |
52 | */ | 47 | */ |
53 | /* ARGSUSED */ | ||
54 | STATIC uint | 48 | STATIC uint |
55 | xfs_qm_dquot_logitem_size( | 49 | xfs_qm_dquot_logitem_size( |
56 | xfs_dq_logitem_t *logitem) | 50 | struct xfs_log_item *lip) |
57 | { | 51 | { |
58 | /* | 52 | /* |
59 | * we need only two iovecs, one for the format, one for the real thing | 53 | * we need only two iovecs, one for the format, one for the real thing |
60 | */ | 54 | */ |
61 | return (2); | 55 | return 2; |
62 | } | 56 | } |
63 | 57 | ||
64 | /* | 58 | /* |
@@ -66,22 +60,21 @@ xfs_qm_dquot_logitem_size( | |||
66 | */ | 60 | */ |
67 | STATIC void | 61 | STATIC void |
68 | xfs_qm_dquot_logitem_format( | 62 | xfs_qm_dquot_logitem_format( |
69 | xfs_dq_logitem_t *logitem, | 63 | struct xfs_log_item *lip, |
70 | xfs_log_iovec_t *logvec) | 64 | struct xfs_log_iovec *logvec) |
71 | { | 65 | { |
72 | ASSERT(logitem); | 66 | struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip); |
73 | ASSERT(logitem->qli_dquot); | ||
74 | 67 | ||
75 | logvec->i_addr = (xfs_caddr_t)&logitem->qli_format; | 68 | logvec->i_addr = &qlip->qli_format; |
76 | logvec->i_len = sizeof(xfs_dq_logformat_t); | 69 | logvec->i_len = sizeof(xfs_dq_logformat_t); |
77 | logvec->i_type = XLOG_REG_TYPE_QFORMAT; | 70 | logvec->i_type = XLOG_REG_TYPE_QFORMAT; |
78 | logvec++; | 71 | logvec++; |
79 | logvec->i_addr = (xfs_caddr_t)&logitem->qli_dquot->q_core; | 72 | logvec->i_addr = &qlip->qli_dquot->q_core; |
80 | logvec->i_len = sizeof(xfs_disk_dquot_t); | 73 | logvec->i_len = sizeof(xfs_disk_dquot_t); |
81 | logvec->i_type = XLOG_REG_TYPE_DQUOT; | 74 | logvec->i_type = XLOG_REG_TYPE_DQUOT; |
82 | 75 | ||
83 | ASSERT(2 == logitem->qli_item.li_desc->lid_size); | 76 | ASSERT(2 == lip->li_desc->lid_size); |
84 | logitem->qli_format.qlf_size = 2; | 77 | qlip->qli_format.qlf_size = 2; |
85 | 78 | ||
86 | } | 79 | } |
87 | 80 | ||
@@ -90,9 +83,9 @@ xfs_qm_dquot_logitem_format( | |||
90 | */ | 83 | */ |
91 | STATIC void | 84 | STATIC void |
92 | xfs_qm_dquot_logitem_pin( | 85 | xfs_qm_dquot_logitem_pin( |
93 | xfs_dq_logitem_t *logitem) | 86 | struct xfs_log_item *lip) |
94 | { | 87 | { |
95 | xfs_dquot_t *dqp = logitem->qli_dquot; | 88 | struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot; |
96 | 89 | ||
97 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); | 90 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); |
98 | atomic_inc(&dqp->q_pincount); | 91 | atomic_inc(&dqp->q_pincount); |
@@ -104,27 +97,18 @@ xfs_qm_dquot_logitem_pin( | |||
104 | * dquot must have been previously pinned with a call to | 97 | * dquot must have been previously pinned with a call to |
105 | * xfs_qm_dquot_logitem_pin(). | 98 | * xfs_qm_dquot_logitem_pin(). |
106 | */ | 99 | */ |
107 | /* ARGSUSED */ | ||
108 | STATIC void | 100 | STATIC void |
109 | xfs_qm_dquot_logitem_unpin( | 101 | xfs_qm_dquot_logitem_unpin( |
110 | xfs_dq_logitem_t *logitem) | 102 | struct xfs_log_item *lip, |
103 | int remove) | ||
111 | { | 104 | { |
112 | xfs_dquot_t *dqp = logitem->qli_dquot; | 105 | struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot; |
113 | 106 | ||
114 | ASSERT(atomic_read(&dqp->q_pincount) > 0); | 107 | ASSERT(atomic_read(&dqp->q_pincount) > 0); |
115 | if (atomic_dec_and_test(&dqp->q_pincount)) | 108 | if (atomic_dec_and_test(&dqp->q_pincount)) |
116 | wake_up(&dqp->q_pinwait); | 109 | wake_up(&dqp->q_pinwait); |
117 | } | 110 | } |
118 | 111 | ||
119 | /* ARGSUSED */ | ||
120 | STATIC void | ||
121 | xfs_qm_dquot_logitem_unpin_remove( | ||
122 | xfs_dq_logitem_t *logitem, | ||
123 | xfs_trans_t *tp) | ||
124 | { | ||
125 | xfs_qm_dquot_logitem_unpin(logitem); | ||
126 | } | ||
127 | |||
128 | /* | 112 | /* |
129 | * Given the logitem, this writes the corresponding dquot entry to disk | 113 | * Given the logitem, this writes the corresponding dquot entry to disk |
130 | * asynchronously. This is called with the dquot entry securely locked; | 114 | * asynchronously. This is called with the dquot entry securely locked; |
@@ -133,12 +117,10 @@ xfs_qm_dquot_logitem_unpin_remove( | |||
133 | */ | 117 | */ |
134 | STATIC void | 118 | STATIC void |
135 | xfs_qm_dquot_logitem_push( | 119 | xfs_qm_dquot_logitem_push( |
136 | xfs_dq_logitem_t *logitem) | 120 | struct xfs_log_item *lip) |
137 | { | 121 | { |
138 | xfs_dquot_t *dqp; | 122 | struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot; |
139 | int error; | 123 | int error; |
140 | |||
141 | dqp = logitem->qli_dquot; | ||
142 | 124 | ||
143 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); | 125 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); |
144 | ASSERT(!completion_done(&dqp->q_flush)); | 126 | ASSERT(!completion_done(&dqp->q_flush)); |
@@ -160,27 +142,25 @@ xfs_qm_dquot_logitem_push( | |||
160 | xfs_dqunlock(dqp); | 142 | xfs_dqunlock(dqp); |
161 | } | 143 | } |
162 | 144 | ||
163 | /*ARGSUSED*/ | ||
164 | STATIC xfs_lsn_t | 145 | STATIC xfs_lsn_t |
165 | xfs_qm_dquot_logitem_committed( | 146 | xfs_qm_dquot_logitem_committed( |
166 | xfs_dq_logitem_t *l, | 147 | struct xfs_log_item *lip, |
167 | xfs_lsn_t lsn) | 148 | xfs_lsn_t lsn) |
168 | { | 149 | { |
169 | /* | 150 | /* |
170 | * We always re-log the entire dquot when it becomes dirty, | 151 | * We always re-log the entire dquot when it becomes dirty, |
171 | * so, the latest copy _is_ the only one that matters. | 152 | * so, the latest copy _is_ the only one that matters. |
172 | */ | 153 | */ |
173 | return (lsn); | 154 | return lsn; |
174 | } | 155 | } |
175 | 156 | ||
176 | |||
177 | /* | 157 | /* |
178 | * This is called to wait for the given dquot to be unpinned. | 158 | * This is called to wait for the given dquot to be unpinned. |
179 | * Most of these pin/unpin routines are plagiarized from inode code. | 159 | * Most of these pin/unpin routines are plagiarized from inode code. |
180 | */ | 160 | */ |
181 | void | 161 | void |
182 | xfs_qm_dqunpin_wait( | 162 | xfs_qm_dqunpin_wait( |
183 | xfs_dquot_t *dqp) | 163 | struct xfs_dquot *dqp) |
184 | { | 164 | { |
185 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); | 165 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); |
186 | if (atomic_read(&dqp->q_pincount) == 0) | 166 | if (atomic_read(&dqp->q_pincount) == 0) |
@@ -206,13 +186,12 @@ xfs_qm_dqunpin_wait( | |||
206 | */ | 186 | */ |
207 | STATIC void | 187 | STATIC void |
208 | xfs_qm_dquot_logitem_pushbuf( | 188 | xfs_qm_dquot_logitem_pushbuf( |
209 | xfs_dq_logitem_t *qip) | 189 | struct xfs_log_item *lip) |
210 | { | 190 | { |
211 | xfs_dquot_t *dqp; | 191 | struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip); |
212 | xfs_mount_t *mp; | 192 | struct xfs_dquot *dqp = qlip->qli_dquot; |
213 | xfs_buf_t *bp; | 193 | struct xfs_buf *bp; |
214 | 194 | ||
215 | dqp = qip->qli_dquot; | ||
216 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); | 195 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); |
217 | 196 | ||
218 | /* | 197 | /* |
@@ -220,22 +199,20 @@ xfs_qm_dquot_logitem_pushbuf( | |||
220 | * inode flush completed and the inode was taken off the AIL. | 199 | * inode flush completed and the inode was taken off the AIL. |
221 | * So, just get out. | 200 | * So, just get out. |
222 | */ | 201 | */ |
223 | if (completion_done(&dqp->q_flush) || | 202 | if (completion_done(&dqp->q_flush) || |
224 | ((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) { | 203 | !(lip->li_flags & XFS_LI_IN_AIL)) { |
225 | xfs_dqunlock(dqp); | 204 | xfs_dqunlock(dqp); |
226 | return; | 205 | return; |
227 | } | 206 | } |
228 | mp = dqp->q_mount; | 207 | |
229 | bp = xfs_incore(mp->m_ddev_targp, qip->qli_format.qlf_blkno, | 208 | bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno, |
230 | mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK); | 209 | dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK); |
231 | xfs_dqunlock(dqp); | 210 | xfs_dqunlock(dqp); |
232 | if (!bp) | 211 | if (!bp) |
233 | return; | 212 | return; |
234 | if (XFS_BUF_ISDELAYWRITE(bp)) | 213 | if (XFS_BUF_ISDELAYWRITE(bp)) |
235 | xfs_buf_delwri_promote(bp); | 214 | xfs_buf_delwri_promote(bp); |
236 | xfs_buf_relse(bp); | 215 | xfs_buf_relse(bp); |
237 | return; | ||
238 | |||
239 | } | 216 | } |
240 | 217 | ||
241 | /* | 218 | /* |
@@ -250,15 +227,14 @@ xfs_qm_dquot_logitem_pushbuf( | |||
250 | */ | 227 | */ |
251 | STATIC uint | 228 | STATIC uint |
252 | xfs_qm_dquot_logitem_trylock( | 229 | xfs_qm_dquot_logitem_trylock( |
253 | xfs_dq_logitem_t *qip) | 230 | struct xfs_log_item *lip) |
254 | { | 231 | { |
255 | xfs_dquot_t *dqp; | 232 | struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot; |
256 | 233 | ||
257 | dqp = qip->qli_dquot; | ||
258 | if (atomic_read(&dqp->q_pincount) > 0) | 234 | if (atomic_read(&dqp->q_pincount) > 0) |
259 | return XFS_ITEM_PINNED; | 235 | return XFS_ITEM_PINNED; |
260 | 236 | ||
261 | if (! xfs_qm_dqlock_nowait(dqp)) | 237 | if (!xfs_qm_dqlock_nowait(dqp)) |
262 | return XFS_ITEM_LOCKED; | 238 | return XFS_ITEM_LOCKED; |
263 | 239 | ||
264 | if (!xfs_dqflock_nowait(dqp)) { | 240 | if (!xfs_dqflock_nowait(dqp)) { |
@@ -269,11 +245,10 @@ xfs_qm_dquot_logitem_trylock( | |||
269 | return XFS_ITEM_PUSHBUF; | 245 | return XFS_ITEM_PUSHBUF; |
270 | } | 246 | } |
271 | 247 | ||
272 | ASSERT(qip->qli_item.li_flags & XFS_LI_IN_AIL); | 248 | ASSERT(lip->li_flags & XFS_LI_IN_AIL); |
273 | return XFS_ITEM_SUCCESS; | 249 | return XFS_ITEM_SUCCESS; |
274 | } | 250 | } |
275 | 251 | ||
276 | |||
277 | /* | 252 | /* |
278 | * Unlock the dquot associated with the log item. | 253 | * Unlock the dquot associated with the log item. |
279 | * Clear the fields of the dquot and dquot log item that | 254 | * Clear the fields of the dquot and dquot log item that |
@@ -282,12 +257,10 @@ xfs_qm_dquot_logitem_trylock( | |||
282 | */ | 257 | */ |
283 | STATIC void | 258 | STATIC void |
284 | xfs_qm_dquot_logitem_unlock( | 259 | xfs_qm_dquot_logitem_unlock( |
285 | xfs_dq_logitem_t *ql) | 260 | struct xfs_log_item *lip) |
286 | { | 261 | { |
287 | xfs_dquot_t *dqp; | 262 | struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot; |
288 | 263 | ||
289 | ASSERT(ql != NULL); | ||
290 | dqp = ql->qli_dquot; | ||
291 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); | 264 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); |
292 | 265 | ||
293 | /* | 266 | /* |
@@ -304,43 +277,32 @@ xfs_qm_dquot_logitem_unlock( | |||
304 | xfs_dqunlock(dqp); | 277 | xfs_dqunlock(dqp); |
305 | } | 278 | } |
306 | 279 | ||
307 | |||
308 | /* | 280 | /* |
309 | * this needs to stamp an lsn into the dquot, I think. | 281 | * this needs to stamp an lsn into the dquot, I think. |
310 | * rpc's that look at user dquot's would then have to | 282 | * rpc's that look at user dquot's would then have to |
311 | * push on the dependency recorded in the dquot | 283 | * push on the dependency recorded in the dquot |
312 | */ | 284 | */ |
313 | /* ARGSUSED */ | ||
314 | STATIC void | 285 | STATIC void |
315 | xfs_qm_dquot_logitem_committing( | 286 | xfs_qm_dquot_logitem_committing( |
316 | xfs_dq_logitem_t *l, | 287 | struct xfs_log_item *lip, |
317 | xfs_lsn_t lsn) | 288 | xfs_lsn_t lsn) |
318 | { | 289 | { |
319 | return; | ||
320 | } | 290 | } |
321 | 291 | ||
322 | |||
323 | /* | 292 | /* |
324 | * This is the ops vector for dquots | 293 | * This is the ops vector for dquots |
325 | */ | 294 | */ |
326 | static struct xfs_item_ops xfs_dquot_item_ops = { | 295 | static struct xfs_item_ops xfs_dquot_item_ops = { |
327 | .iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_size, | 296 | .iop_size = xfs_qm_dquot_logitem_size, |
328 | .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) | 297 | .iop_format = xfs_qm_dquot_logitem_format, |
329 | xfs_qm_dquot_logitem_format, | 298 | .iop_pin = xfs_qm_dquot_logitem_pin, |
330 | .iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_pin, | 299 | .iop_unpin = xfs_qm_dquot_logitem_unpin, |
331 | .iop_unpin = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_unpin, | 300 | .iop_trylock = xfs_qm_dquot_logitem_trylock, |
332 | .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*)) | 301 | .iop_unlock = xfs_qm_dquot_logitem_unlock, |
333 | xfs_qm_dquot_logitem_unpin_remove, | 302 | .iop_committed = xfs_qm_dquot_logitem_committed, |
334 | .iop_trylock = (uint(*)(xfs_log_item_t*)) | 303 | .iop_push = xfs_qm_dquot_logitem_push, |
335 | xfs_qm_dquot_logitem_trylock, | 304 | .iop_pushbuf = xfs_qm_dquot_logitem_pushbuf, |
336 | .iop_unlock = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_unlock, | 305 | .iop_committing = xfs_qm_dquot_logitem_committing |
337 | .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) | ||
338 | xfs_qm_dquot_logitem_committed, | ||
339 | .iop_push = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_push, | ||
340 | .iop_pushbuf = (void(*)(xfs_log_item_t*)) | ||
341 | xfs_qm_dquot_logitem_pushbuf, | ||
342 | .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t)) | ||
343 | xfs_qm_dquot_logitem_committing | ||
344 | }; | 306 | }; |
345 | 307 | ||
346 | /* | 308 | /* |
@@ -350,10 +312,9 @@ static struct xfs_item_ops xfs_dquot_item_ops = { | |||
350 | */ | 312 | */ |
351 | void | 313 | void |
352 | xfs_qm_dquot_logitem_init( | 314 | xfs_qm_dquot_logitem_init( |
353 | struct xfs_dquot *dqp) | 315 | struct xfs_dquot *dqp) |
354 | { | 316 | { |
355 | xfs_dq_logitem_t *lp; | 317 | struct xfs_dq_logitem *lp = &dqp->q_logitem; |
356 | lp = &dqp->q_logitem; | ||
357 | 318 | ||
358 | xfs_log_item_init(dqp->q_mount, &lp->qli_item, XFS_LI_DQUOT, | 319 | xfs_log_item_init(dqp->q_mount, &lp->qli_item, XFS_LI_DQUOT, |
359 | &xfs_dquot_item_ops); | 320 | &xfs_dquot_item_ops); |
@@ -374,16 +335,22 @@ xfs_qm_dquot_logitem_init( | |||
374 | 335 | ||
375 | /*------------------ QUOTAOFF LOG ITEMS -------------------*/ | 336 | /*------------------ QUOTAOFF LOG ITEMS -------------------*/ |
376 | 337 | ||
338 | static inline struct xfs_qoff_logitem *QOFF_ITEM(struct xfs_log_item *lip) | ||
339 | { | ||
340 | return container_of(lip, struct xfs_qoff_logitem, qql_item); | ||
341 | } | ||
342 | |||
343 | |||
377 | /* | 344 | /* |
378 | * This returns the number of iovecs needed to log the given quotaoff item. | 345 | * This returns the number of iovecs needed to log the given quotaoff item. |
379 | * We only need 1 iovec for an quotaoff item. It just logs the | 346 | * We only need 1 iovec for an quotaoff item. It just logs the |
380 | * quotaoff_log_format structure. | 347 | * quotaoff_log_format structure. |
381 | */ | 348 | */ |
382 | /*ARGSUSED*/ | ||
383 | STATIC uint | 349 | STATIC uint |
384 | xfs_qm_qoff_logitem_size(xfs_qoff_logitem_t *qf) | 350 | xfs_qm_qoff_logitem_size( |
351 | struct xfs_log_item *lip) | ||
385 | { | 352 | { |
386 | return (1); | 353 | return 1; |
387 | } | 354 | } |
388 | 355 | ||
389 | /* | 356 | /* |
@@ -394,53 +361,46 @@ xfs_qm_qoff_logitem_size(xfs_qoff_logitem_t *qf) | |||
394 | * slots in the quotaoff item have been filled. | 361 | * slots in the quotaoff item have been filled. |
395 | */ | 362 | */ |
396 | STATIC void | 363 | STATIC void |
397 | xfs_qm_qoff_logitem_format(xfs_qoff_logitem_t *qf, | 364 | xfs_qm_qoff_logitem_format( |
398 | xfs_log_iovec_t *log_vector) | 365 | struct xfs_log_item *lip, |
366 | struct xfs_log_iovec *log_vector) | ||
399 | { | 367 | { |
400 | ASSERT(qf->qql_format.qf_type == XFS_LI_QUOTAOFF); | 368 | struct xfs_qoff_logitem *qflip = QOFF_ITEM(lip); |
369 | |||
370 | ASSERT(qflip->qql_format.qf_type == XFS_LI_QUOTAOFF); | ||
401 | 371 | ||
402 | log_vector->i_addr = (xfs_caddr_t)&(qf->qql_format); | 372 | log_vector->i_addr = &qflip->qql_format; |
403 | log_vector->i_len = sizeof(xfs_qoff_logitem_t); | 373 | log_vector->i_len = sizeof(xfs_qoff_logitem_t); |
404 | log_vector->i_type = XLOG_REG_TYPE_QUOTAOFF; | 374 | log_vector->i_type = XLOG_REG_TYPE_QUOTAOFF; |
405 | qf->qql_format.qf_size = 1; | 375 | qflip->qql_format.qf_size = 1; |
406 | } | 376 | } |
407 | 377 | ||
408 | |||
409 | /* | 378 | /* |
410 | * Pinning has no meaning for an quotaoff item, so just return. | 379 | * Pinning has no meaning for an quotaoff item, so just return. |
411 | */ | 380 | */ |
412 | /*ARGSUSED*/ | ||
413 | STATIC void | 381 | STATIC void |
414 | xfs_qm_qoff_logitem_pin(xfs_qoff_logitem_t *qf) | 382 | xfs_qm_qoff_logitem_pin( |
383 | struct xfs_log_item *lip) | ||
415 | { | 384 | { |
416 | return; | ||
417 | } | 385 | } |
418 | 386 | ||
419 | |||
420 | /* | 387 | /* |
421 | * Since pinning has no meaning for an quotaoff item, unpinning does | 388 | * Since pinning has no meaning for an quotaoff item, unpinning does |
422 | * not either. | 389 | * not either. |
423 | */ | 390 | */ |
424 | /*ARGSUSED*/ | ||
425 | STATIC void | 391 | STATIC void |
426 | xfs_qm_qoff_logitem_unpin(xfs_qoff_logitem_t *qf) | 392 | xfs_qm_qoff_logitem_unpin( |
393 | struct xfs_log_item *lip, | ||
394 | int remove) | ||
427 | { | 395 | { |
428 | return; | ||
429 | } | ||
430 | |||
431 | /*ARGSUSED*/ | ||
432 | STATIC void | ||
433 | xfs_qm_qoff_logitem_unpin_remove(xfs_qoff_logitem_t *qf, xfs_trans_t *tp) | ||
434 | { | ||
435 | return; | ||
436 | } | 396 | } |
437 | 397 | ||
438 | /* | 398 | /* |
439 | * Quotaoff items have no locking, so just return success. | 399 | * Quotaoff items have no locking, so just return success. |
440 | */ | 400 | */ |
441 | /*ARGSUSED*/ | ||
442 | STATIC uint | 401 | STATIC uint |
443 | xfs_qm_qoff_logitem_trylock(xfs_qoff_logitem_t *qf) | 402 | xfs_qm_qoff_logitem_trylock( |
403 | struct xfs_log_item *lip) | ||
444 | { | 404 | { |
445 | return XFS_ITEM_LOCKED; | 405 | return XFS_ITEM_LOCKED; |
446 | } | 406 | } |
@@ -449,53 +409,51 @@ xfs_qm_qoff_logitem_trylock(xfs_qoff_logitem_t *qf) | |||
449 | * Quotaoff items have no locking or pushing, so return failure | 409 | * Quotaoff items have no locking or pushing, so return failure |
450 | * so that the caller doesn't bother with us. | 410 | * so that the caller doesn't bother with us. |
451 | */ | 411 | */ |
452 | /*ARGSUSED*/ | ||
453 | STATIC void | 412 | STATIC void |
454 | xfs_qm_qoff_logitem_unlock(xfs_qoff_logitem_t *qf) | 413 | xfs_qm_qoff_logitem_unlock( |
414 | struct xfs_log_item *lip) | ||
455 | { | 415 | { |
456 | return; | ||
457 | } | 416 | } |
458 | 417 | ||
459 | /* | 418 | /* |
460 | * The quotaoff-start-item is logged only once and cannot be moved in the log, | 419 | * The quotaoff-start-item is logged only once and cannot be moved in the log, |
461 | * so simply return the lsn at which it's been logged. | 420 | * so simply return the lsn at which it's been logged. |
462 | */ | 421 | */ |
463 | /*ARGSUSED*/ | ||
464 | STATIC xfs_lsn_t | 422 | STATIC xfs_lsn_t |
465 | xfs_qm_qoff_logitem_committed(xfs_qoff_logitem_t *qf, xfs_lsn_t lsn) | 423 | xfs_qm_qoff_logitem_committed( |
424 | struct xfs_log_item *lip, | ||
425 | xfs_lsn_t lsn) | ||
466 | { | 426 | { |
467 | return (lsn); | 427 | return lsn; |
468 | } | 428 | } |
469 | 429 | ||
470 | /* | 430 | /* |
471 | * There isn't much you can do to push on an quotaoff item. It is simply | 431 | * There isn't much you can do to push on an quotaoff item. It is simply |
472 | * stuck waiting for the log to be flushed to disk. | 432 | * stuck waiting for the log to be flushed to disk. |
473 | */ | 433 | */ |
474 | /*ARGSUSED*/ | ||
475 | STATIC void | 434 | STATIC void |
476 | xfs_qm_qoff_logitem_push(xfs_qoff_logitem_t *qf) | 435 | xfs_qm_qoff_logitem_push( |
436 | struct xfs_log_item *lip) | ||
477 | { | 437 | { |
478 | return; | ||
479 | } | 438 | } |
480 | 439 | ||
481 | 440 | ||
482 | /*ARGSUSED*/ | ||
483 | STATIC xfs_lsn_t | 441 | STATIC xfs_lsn_t |
484 | xfs_qm_qoffend_logitem_committed( | 442 | xfs_qm_qoffend_logitem_committed( |
485 | xfs_qoff_logitem_t *qfe, | 443 | struct xfs_log_item *lip, |
486 | xfs_lsn_t lsn) | 444 | xfs_lsn_t lsn) |
487 | { | 445 | { |
488 | xfs_qoff_logitem_t *qfs; | 446 | struct xfs_qoff_logitem *qfe = QOFF_ITEM(lip); |
489 | struct xfs_ail *ailp; | 447 | struct xfs_qoff_logitem *qfs = qfe->qql_start_lip; |
448 | struct xfs_ail *ailp = qfs->qql_item.li_ailp; | ||
490 | 449 | ||
491 | qfs = qfe->qql_start_lip; | ||
492 | ailp = qfs->qql_item.li_ailp; | ||
493 | spin_lock(&ailp->xa_lock); | ||
494 | /* | 450 | /* |
495 | * Delete the qoff-start logitem from the AIL. | 451 | * Delete the qoff-start logitem from the AIL. |
496 | * xfs_trans_ail_delete() drops the AIL lock. | 452 | * xfs_trans_ail_delete() drops the AIL lock. |
497 | */ | 453 | */ |
454 | spin_lock(&ailp->xa_lock); | ||
498 | xfs_trans_ail_delete(ailp, (xfs_log_item_t *)qfs); | 455 | xfs_trans_ail_delete(ailp, (xfs_log_item_t *)qfs); |
456 | |||
499 | kmem_free(qfs); | 457 | kmem_free(qfs); |
500 | kmem_free(qfe); | 458 | kmem_free(qfe); |
501 | return (xfs_lsn_t)-1; | 459 | return (xfs_lsn_t)-1; |
@@ -515,71 +473,52 @@ xfs_qm_qoffend_logitem_committed( | |||
515 | * (truly makes the quotaoff irrevocable). If we do something else, | 473 | * (truly makes the quotaoff irrevocable). If we do something else, |
516 | * then maybe we don't need two. | 474 | * then maybe we don't need two. |
517 | */ | 475 | */ |
518 | /* ARGSUSED */ | ||
519 | STATIC void | ||
520 | xfs_qm_qoff_logitem_committing(xfs_qoff_logitem_t *qip, xfs_lsn_t commit_lsn) | ||
521 | { | ||
522 | return; | ||
523 | } | ||
524 | |||
525 | /* ARGSUSED */ | ||
526 | STATIC void | 476 | STATIC void |
527 | xfs_qm_qoffend_logitem_committing(xfs_qoff_logitem_t *qip, xfs_lsn_t commit_lsn) | 477 | xfs_qm_qoff_logitem_committing( |
478 | struct xfs_log_item *lip, | ||
479 | xfs_lsn_t commit_lsn) | ||
528 | { | 480 | { |
529 | return; | ||
530 | } | 481 | } |
531 | 482 | ||
532 | static struct xfs_item_ops xfs_qm_qoffend_logitem_ops = { | 483 | static struct xfs_item_ops xfs_qm_qoffend_logitem_ops = { |
533 | .iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_size, | 484 | .iop_size = xfs_qm_qoff_logitem_size, |
534 | .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) | 485 | .iop_format = xfs_qm_qoff_logitem_format, |
535 | xfs_qm_qoff_logitem_format, | 486 | .iop_pin = xfs_qm_qoff_logitem_pin, |
536 | .iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_pin, | 487 | .iop_unpin = xfs_qm_qoff_logitem_unpin, |
537 | .iop_unpin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unpin, | 488 | .iop_trylock = xfs_qm_qoff_logitem_trylock, |
538 | .iop_unpin_remove = (void(*)(xfs_log_item_t*,xfs_trans_t*)) | 489 | .iop_unlock = xfs_qm_qoff_logitem_unlock, |
539 | xfs_qm_qoff_logitem_unpin_remove, | 490 | .iop_committed = xfs_qm_qoffend_logitem_committed, |
540 | .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_trylock, | 491 | .iop_push = xfs_qm_qoff_logitem_push, |
541 | .iop_unlock = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unlock, | 492 | .iop_committing = xfs_qm_qoff_logitem_committing |
542 | .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) | ||
543 | xfs_qm_qoffend_logitem_committed, | ||
544 | .iop_push = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_push, | ||
545 | .iop_pushbuf = NULL, | ||
546 | .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t)) | ||
547 | xfs_qm_qoffend_logitem_committing | ||
548 | }; | 493 | }; |
549 | 494 | ||
550 | /* | 495 | /* |
551 | * This is the ops vector shared by all quotaoff-start log items. | 496 | * This is the ops vector shared by all quotaoff-start log items. |
552 | */ | 497 | */ |
553 | static struct xfs_item_ops xfs_qm_qoff_logitem_ops = { | 498 | static struct xfs_item_ops xfs_qm_qoff_logitem_ops = { |
554 | .iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_size, | 499 | .iop_size = xfs_qm_qoff_logitem_size, |
555 | .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) | 500 | .iop_format = xfs_qm_qoff_logitem_format, |
556 | xfs_qm_qoff_logitem_format, | 501 | .iop_pin = xfs_qm_qoff_logitem_pin, |
557 | .iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_pin, | 502 | .iop_unpin = xfs_qm_qoff_logitem_unpin, |
558 | .iop_unpin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unpin, | 503 | .iop_trylock = xfs_qm_qoff_logitem_trylock, |
559 | .iop_unpin_remove = (void(*)(xfs_log_item_t*,xfs_trans_t*)) | 504 | .iop_unlock = xfs_qm_qoff_logitem_unlock, |
560 | xfs_qm_qoff_logitem_unpin_remove, | 505 | .iop_committed = xfs_qm_qoff_logitem_committed, |
561 | .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_trylock, | 506 | .iop_push = xfs_qm_qoff_logitem_push, |
562 | .iop_unlock = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unlock, | 507 | .iop_committing = xfs_qm_qoff_logitem_committing |
563 | .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) | ||
564 | xfs_qm_qoff_logitem_committed, | ||
565 | .iop_push = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_push, | ||
566 | .iop_pushbuf = NULL, | ||
567 | .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t)) | ||
568 | xfs_qm_qoff_logitem_committing | ||
569 | }; | 508 | }; |
570 | 509 | ||
571 | /* | 510 | /* |
572 | * Allocate and initialize an quotaoff item of the correct quota type(s). | 511 | * Allocate and initialize an quotaoff item of the correct quota type(s). |
573 | */ | 512 | */ |
574 | xfs_qoff_logitem_t * | 513 | struct xfs_qoff_logitem * |
575 | xfs_qm_qoff_logitem_init( | 514 | xfs_qm_qoff_logitem_init( |
576 | struct xfs_mount *mp, | 515 | struct xfs_mount *mp, |
577 | xfs_qoff_logitem_t *start, | 516 | struct xfs_qoff_logitem *start, |
578 | uint flags) | 517 | uint flags) |
579 | { | 518 | { |
580 | xfs_qoff_logitem_t *qf; | 519 | struct xfs_qoff_logitem *qf; |
581 | 520 | ||
582 | qf = (xfs_qoff_logitem_t*) kmem_zalloc(sizeof(xfs_qoff_logitem_t), KM_SLEEP); | 521 | qf = kmem_zalloc(sizeof(struct xfs_qoff_logitem), KM_SLEEP); |
583 | 522 | ||
584 | xfs_log_item_init(mp, &qf->qql_item, XFS_LI_QUOTAOFF, start ? | 523 | xfs_log_item_init(mp, &qf->qql_item, XFS_LI_QUOTAOFF, start ? |
585 | &xfs_qm_qoffend_logitem_ops : &xfs_qm_qoff_logitem_ops); | 524 | &xfs_qm_qoffend_logitem_ops : &xfs_qm_qoff_logitem_ops); |
@@ -587,5 +526,5 @@ xfs_qm_qoff_logitem_init( | |||
587 | qf->qql_format.qf_type = XFS_LI_QUOTAOFF; | 526 | qf->qql_format.qf_type = XFS_LI_QUOTAOFF; |
588 | qf->qql_format.qf_flags = flags; | 527 | qf->qql_format.qf_flags = flags; |
589 | qf->qql_start_lip = start; | 528 | qf->qql_start_lip = start; |
590 | return (qf); | 529 | return qf; |
591 | } | 530 | } |
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c index 2d8b7bc792c9..9a92407109a1 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/quota/xfs_qm.c | |||
@@ -23,25 +23,18 @@ | |||
23 | #include "xfs_trans.h" | 23 | #include "xfs_trans.h" |
24 | #include "xfs_sb.h" | 24 | #include "xfs_sb.h" |
25 | #include "xfs_ag.h" | 25 | #include "xfs_ag.h" |
26 | #include "xfs_dir2.h" | ||
27 | #include "xfs_alloc.h" | 26 | #include "xfs_alloc.h" |
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_quota.h" | 27 | #include "xfs_quota.h" |
30 | #include "xfs_mount.h" | 28 | #include "xfs_mount.h" |
31 | #include "xfs_bmap_btree.h" | 29 | #include "xfs_bmap_btree.h" |
32 | #include "xfs_alloc_btree.h" | ||
33 | #include "xfs_ialloc_btree.h" | 30 | #include "xfs_ialloc_btree.h" |
34 | #include "xfs_dir2_sf.h" | ||
35 | #include "xfs_attr_sf.h" | ||
36 | #include "xfs_dinode.h" | 31 | #include "xfs_dinode.h" |
37 | #include "xfs_inode.h" | 32 | #include "xfs_inode.h" |
38 | #include "xfs_btree.h" | ||
39 | #include "xfs_ialloc.h" | 33 | #include "xfs_ialloc.h" |
40 | #include "xfs_itable.h" | 34 | #include "xfs_itable.h" |
41 | #include "xfs_rtalloc.h" | 35 | #include "xfs_rtalloc.h" |
42 | #include "xfs_error.h" | 36 | #include "xfs_error.h" |
43 | #include "xfs_bmap.h" | 37 | #include "xfs_bmap.h" |
44 | #include "xfs_rw.h" | ||
45 | #include "xfs_attr.h" | 38 | #include "xfs_attr.h" |
46 | #include "xfs_buf_item.h" | 39 | #include "xfs_buf_item.h" |
47 | #include "xfs_trans_space.h" | 40 | #include "xfs_trans_space.h" |
@@ -69,7 +62,7 @@ STATIC void xfs_qm_list_destroy(xfs_dqlist_t *); | |||
69 | 62 | ||
70 | STATIC int xfs_qm_init_quotainos(xfs_mount_t *); | 63 | STATIC int xfs_qm_init_quotainos(xfs_mount_t *); |
71 | STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); | 64 | STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); |
72 | STATIC int xfs_qm_shake(int, gfp_t); | 65 | STATIC int xfs_qm_shake(struct shrinker *, int, gfp_t); |
73 | 66 | ||
74 | static struct shrinker xfs_qm_shaker = { | 67 | static struct shrinker xfs_qm_shaker = { |
75 | .shrink = xfs_qm_shake, | 68 | .shrink = xfs_qm_shake, |
@@ -1497,7 +1490,7 @@ xfs_qm_dqiterate( | |||
1497 | maxlblkcnt - lblkno, | 1490 | maxlblkcnt - lblkno, |
1498 | XFS_BMAPI_METADATA, | 1491 | XFS_BMAPI_METADATA, |
1499 | NULL, | 1492 | NULL, |
1500 | 0, map, &nmaps, NULL, NULL); | 1493 | 0, map, &nmaps, NULL); |
1501 | xfs_iunlock(qip, XFS_ILOCK_SHARED); | 1494 | xfs_iunlock(qip, XFS_ILOCK_SHARED); |
1502 | if (error) | 1495 | if (error) |
1503 | break; | 1496 | break; |
@@ -1632,10 +1625,7 @@ xfs_qm_dqusage_adjust( | |||
1632 | xfs_ino_t ino, /* inode number to get data for */ | 1625 | xfs_ino_t ino, /* inode number to get data for */ |
1633 | void __user *buffer, /* not used */ | 1626 | void __user *buffer, /* not used */ |
1634 | int ubsize, /* not used */ | 1627 | int ubsize, /* not used */ |
1635 | void *private_data, /* not used */ | ||
1636 | xfs_daddr_t bno, /* starting block of inode cluster */ | ||
1637 | int *ubused, /* not used */ | 1628 | int *ubused, /* not used */ |
1638 | void *dip, /* on-disk inode pointer (not used) */ | ||
1639 | int *res) /* result code value */ | 1629 | int *res) /* result code value */ |
1640 | { | 1630 | { |
1641 | xfs_inode_t *ip; | 1631 | xfs_inode_t *ip; |
@@ -1660,7 +1650,7 @@ xfs_qm_dqusage_adjust( | |||
1660 | * the case in all other instances. It's OK that we do this because | 1650 | * the case in all other instances. It's OK that we do this because |
1661 | * quotacheck is done only at mount time. | 1651 | * quotacheck is done only at mount time. |
1662 | */ | 1652 | */ |
1663 | if ((error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip, bno))) { | 1653 | if ((error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip))) { |
1664 | *res = BULKSTAT_RV_NOTHING; | 1654 | *res = BULKSTAT_RV_NOTHING; |
1665 | return error; | 1655 | return error; |
1666 | } | 1656 | } |
@@ -1672,7 +1662,8 @@ xfs_qm_dqusage_adjust( | |||
1672 | * making us disable quotas for the file system. | 1662 | * making us disable quotas for the file system. |
1673 | */ | 1663 | */ |
1674 | if ((error = xfs_qm_dqget_noattach(ip, &udqp, &gdqp))) { | 1664 | if ((error = xfs_qm_dqget_noattach(ip, &udqp, &gdqp))) { |
1675 | xfs_iput(ip, XFS_ILOCK_EXCL); | 1665 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
1666 | IRELE(ip); | ||
1676 | *res = BULKSTAT_RV_GIVEUP; | 1667 | *res = BULKSTAT_RV_GIVEUP; |
1677 | return error; | 1668 | return error; |
1678 | } | 1669 | } |
@@ -1685,7 +1676,8 @@ xfs_qm_dqusage_adjust( | |||
1685 | * Walk thru the extent list and count the realtime blocks. | 1676 | * Walk thru the extent list and count the realtime blocks. |
1686 | */ | 1677 | */ |
1687 | if ((error = xfs_qm_get_rtblks(ip, &rtblks))) { | 1678 | if ((error = xfs_qm_get_rtblks(ip, &rtblks))) { |
1688 | xfs_iput(ip, XFS_ILOCK_EXCL); | 1679 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
1680 | IRELE(ip); | ||
1689 | if (udqp) | 1681 | if (udqp) |
1690 | xfs_qm_dqput(udqp); | 1682 | xfs_qm_dqput(udqp); |
1691 | if (gdqp) | 1683 | if (gdqp) |
@@ -1796,12 +1788,13 @@ xfs_qm_quotacheck( | |||
1796 | * Iterate thru all the inodes in the file system, | 1788 | * Iterate thru all the inodes in the file system, |
1797 | * adjusting the corresponding dquot counters in core. | 1789 | * adjusting the corresponding dquot counters in core. |
1798 | */ | 1790 | */ |
1799 | if ((error = xfs_bulkstat(mp, &lastino, &count, | 1791 | error = xfs_bulkstat(mp, &lastino, &count, |
1800 | xfs_qm_dqusage_adjust, NULL, | 1792 | xfs_qm_dqusage_adjust, |
1801 | structsz, NULL, BULKSTAT_FG_IGET, &done))) | 1793 | structsz, NULL, &done); |
1794 | if (error) | ||
1802 | break; | 1795 | break; |
1803 | 1796 | ||
1804 | } while (! done); | 1797 | } while (!done); |
1805 | 1798 | ||
1806 | /* | 1799 | /* |
1807 | * We've made all the changes that we need to make incore. | 1800 | * We've made all the changes that we need to make incore. |
@@ -1889,14 +1882,14 @@ xfs_qm_init_quotainos( | |||
1889 | mp->m_sb.sb_uquotino != NULLFSINO) { | 1882 | mp->m_sb.sb_uquotino != NULLFSINO) { |
1890 | ASSERT(mp->m_sb.sb_uquotino > 0); | 1883 | ASSERT(mp->m_sb.sb_uquotino > 0); |
1891 | if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, | 1884 | if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, |
1892 | 0, 0, &uip, 0))) | 1885 | 0, 0, &uip))) |
1893 | return XFS_ERROR(error); | 1886 | return XFS_ERROR(error); |
1894 | } | 1887 | } |
1895 | if (XFS_IS_OQUOTA_ON(mp) && | 1888 | if (XFS_IS_OQUOTA_ON(mp) && |
1896 | mp->m_sb.sb_gquotino != NULLFSINO) { | 1889 | mp->m_sb.sb_gquotino != NULLFSINO) { |
1897 | ASSERT(mp->m_sb.sb_gquotino > 0); | 1890 | ASSERT(mp->m_sb.sb_gquotino > 0); |
1898 | if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, | 1891 | if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, |
1899 | 0, 0, &gip, 0))) { | 1892 | 0, 0, &gip))) { |
1900 | if (uip) | 1893 | if (uip) |
1901 | IRELE(uip); | 1894 | IRELE(uip); |
1902 | return XFS_ERROR(error); | 1895 | return XFS_ERROR(error); |
@@ -2119,7 +2112,10 @@ xfs_qm_shake_freelist( | |||
2119 | */ | 2112 | */ |
2120 | /* ARGSUSED */ | 2113 | /* ARGSUSED */ |
2121 | STATIC int | 2114 | STATIC int |
2122 | xfs_qm_shake(int nr_to_scan, gfp_t gfp_mask) | 2115 | xfs_qm_shake( |
2116 | struct shrinker *shrink, | ||
2117 | int nr_to_scan, | ||
2118 | gfp_t gfp_mask) | ||
2123 | { | 2119 | { |
2124 | int ndqused, nfree, n; | 2120 | int ndqused, nfree, n; |
2125 | 2121 | ||
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c index 97b410c12794..bea02d786c5d 100644 --- a/fs/xfs/quota/xfs_qm_bhv.c +++ b/fs/xfs/quota/xfs_qm_bhv.c | |||
@@ -23,25 +23,15 @@ | |||
23 | #include "xfs_trans.h" | 23 | #include "xfs_trans.h" |
24 | #include "xfs_sb.h" | 24 | #include "xfs_sb.h" |
25 | #include "xfs_ag.h" | 25 | #include "xfs_ag.h" |
26 | #include "xfs_dir2.h" | ||
27 | #include "xfs_alloc.h" | 26 | #include "xfs_alloc.h" |
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_quota.h" | 27 | #include "xfs_quota.h" |
30 | #include "xfs_mount.h" | 28 | #include "xfs_mount.h" |
31 | #include "xfs_bmap_btree.h" | 29 | #include "xfs_bmap_btree.h" |
32 | #include "xfs_alloc_btree.h" | ||
33 | #include "xfs_ialloc_btree.h" | ||
34 | #include "xfs_dir2_sf.h" | ||
35 | #include "xfs_attr_sf.h" | ||
36 | #include "xfs_dinode.h" | ||
37 | #include "xfs_inode.h" | 30 | #include "xfs_inode.h" |
38 | #include "xfs_ialloc.h" | ||
39 | #include "xfs_itable.h" | 31 | #include "xfs_itable.h" |
40 | #include "xfs_btree.h" | ||
41 | #include "xfs_bmap.h" | 32 | #include "xfs_bmap.h" |
42 | #include "xfs_rtalloc.h" | 33 | #include "xfs_rtalloc.h" |
43 | #include "xfs_error.h" | 34 | #include "xfs_error.h" |
44 | #include "xfs_rw.h" | ||
45 | #include "xfs_attr.h" | 35 | #include "xfs_attr.h" |
46 | #include "xfs_buf_item.h" | 36 | #include "xfs_buf_item.h" |
47 | #include "xfs_qm.h" | 37 | #include "xfs_qm.h" |
diff --git a/fs/xfs/quota/xfs_qm_stats.c b/fs/xfs/quota/xfs_qm_stats.c index 3d1fc79532e2..8671a0b32644 100644 --- a/fs/xfs/quota/xfs_qm_stats.c +++ b/fs/xfs/quota/xfs_qm_stats.c | |||
@@ -23,25 +23,15 @@ | |||
23 | #include "xfs_trans.h" | 23 | #include "xfs_trans.h" |
24 | #include "xfs_sb.h" | 24 | #include "xfs_sb.h" |
25 | #include "xfs_ag.h" | 25 | #include "xfs_ag.h" |
26 | #include "xfs_dir2.h" | ||
27 | #include "xfs_alloc.h" | 26 | #include "xfs_alloc.h" |
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_quota.h" | 27 | #include "xfs_quota.h" |
30 | #include "xfs_mount.h" | 28 | #include "xfs_mount.h" |
31 | #include "xfs_bmap_btree.h" | 29 | #include "xfs_bmap_btree.h" |
32 | #include "xfs_alloc_btree.h" | ||
33 | #include "xfs_ialloc_btree.h" | ||
34 | #include "xfs_dir2_sf.h" | ||
35 | #include "xfs_attr_sf.h" | ||
36 | #include "xfs_dinode.h" | ||
37 | #include "xfs_inode.h" | 30 | #include "xfs_inode.h" |
38 | #include "xfs_ialloc.h" | ||
39 | #include "xfs_itable.h" | 31 | #include "xfs_itable.h" |
40 | #include "xfs_bmap.h" | 32 | #include "xfs_bmap.h" |
41 | #include "xfs_btree.h" | ||
42 | #include "xfs_rtalloc.h" | 33 | #include "xfs_rtalloc.h" |
43 | #include "xfs_error.h" | 34 | #include "xfs_error.h" |
44 | #include "xfs_rw.h" | ||
45 | #include "xfs_attr.h" | 35 | #include "xfs_attr.h" |
46 | #include "xfs_buf_item.h" | 36 | #include "xfs_buf_item.h" |
47 | #include "xfs_qm.h" | 37 | #include "xfs_qm.h" |
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c index 92b002f1805f..d257eb8557c4 100644 --- a/fs/xfs/quota/xfs_qm_syscalls.c +++ b/fs/xfs/quota/xfs_qm_syscalls.c | |||
@@ -26,25 +26,15 @@ | |||
26 | #include "xfs_trans.h" | 26 | #include "xfs_trans.h" |
27 | #include "xfs_sb.h" | 27 | #include "xfs_sb.h" |
28 | #include "xfs_ag.h" | 28 | #include "xfs_ag.h" |
29 | #include "xfs_dir2.h" | ||
30 | #include "xfs_alloc.h" | 29 | #include "xfs_alloc.h" |
31 | #include "xfs_dmapi.h" | ||
32 | #include "xfs_quota.h" | 30 | #include "xfs_quota.h" |
33 | #include "xfs_mount.h" | 31 | #include "xfs_mount.h" |
34 | #include "xfs_bmap_btree.h" | 32 | #include "xfs_bmap_btree.h" |
35 | #include "xfs_alloc_btree.h" | ||
36 | #include "xfs_ialloc_btree.h" | ||
37 | #include "xfs_dir2_sf.h" | ||
38 | #include "xfs_attr_sf.h" | ||
39 | #include "xfs_dinode.h" | ||
40 | #include "xfs_inode.h" | 33 | #include "xfs_inode.h" |
41 | #include "xfs_ialloc.h" | ||
42 | #include "xfs_itable.h" | 34 | #include "xfs_itable.h" |
43 | #include "xfs_bmap.h" | 35 | #include "xfs_bmap.h" |
44 | #include "xfs_btree.h" | ||
45 | #include "xfs_rtalloc.h" | 36 | #include "xfs_rtalloc.h" |
46 | #include "xfs_error.h" | 37 | #include "xfs_error.h" |
47 | #include "xfs_rw.h" | ||
48 | #include "xfs_attr.h" | 38 | #include "xfs_attr.h" |
49 | #include "xfs_buf_item.h" | 39 | #include "xfs_buf_item.h" |
50 | #include "xfs_utils.h" | 40 | #include "xfs_utils.h" |
@@ -248,40 +238,74 @@ out_unlock: | |||
248 | return error; | 238 | return error; |
249 | } | 239 | } |
250 | 240 | ||
241 | STATIC int | ||
242 | xfs_qm_scall_trunc_qfile( | ||
243 | struct xfs_mount *mp, | ||
244 | xfs_ino_t ino) | ||
245 | { | ||
246 | struct xfs_inode *ip; | ||
247 | struct xfs_trans *tp; | ||
248 | int error; | ||
249 | |||
250 | if (ino == NULLFSINO) | ||
251 | return 0; | ||
252 | |||
253 | error = xfs_iget(mp, NULL, ino, 0, 0, &ip); | ||
254 | if (error) | ||
255 | return error; | ||
256 | |||
257 | xfs_ilock(ip, XFS_IOLOCK_EXCL); | ||
258 | |||
259 | tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE); | ||
260 | error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, | ||
261 | XFS_TRANS_PERM_LOG_RES, | ||
262 | XFS_ITRUNCATE_LOG_COUNT); | ||
263 | if (error) { | ||
264 | xfs_trans_cancel(tp, 0); | ||
265 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | ||
266 | goto out_put; | ||
267 | } | ||
268 | |||
269 | xfs_ilock(ip, XFS_ILOCK_EXCL); | ||
270 | xfs_trans_ijoin(tp, ip); | ||
271 | |||
272 | error = xfs_itruncate_finish(&tp, ip, 0, XFS_DATA_FORK, 1); | ||
273 | if (error) { | ||
274 | xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | | ||
275 | XFS_TRANS_ABORT); | ||
276 | goto out_unlock; | ||
277 | } | ||
278 | |||
279 | xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | ||
280 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); | ||
281 | |||
282 | out_unlock: | ||
283 | xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | ||
284 | out_put: | ||
285 | IRELE(ip); | ||
286 | return error; | ||
287 | } | ||
288 | |||
251 | int | 289 | int |
252 | xfs_qm_scall_trunc_qfiles( | 290 | xfs_qm_scall_trunc_qfiles( |
253 | xfs_mount_t *mp, | 291 | xfs_mount_t *mp, |
254 | uint flags) | 292 | uint flags) |
255 | { | 293 | { |
256 | int error = 0, error2 = 0; | 294 | int error = 0, error2 = 0; |
257 | xfs_inode_t *qip; | ||
258 | 295 | ||
259 | if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) { | 296 | if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) { |
260 | qdprintk("qtrunc flags=%x m_qflags=%x\n", flags, mp->m_qflags); | 297 | qdprintk("qtrunc flags=%x m_qflags=%x\n", flags, mp->m_qflags); |
261 | return XFS_ERROR(EINVAL); | 298 | return XFS_ERROR(EINVAL); |
262 | } | 299 | } |
263 | 300 | ||
264 | if ((flags & XFS_DQ_USER) && mp->m_sb.sb_uquotino != NULLFSINO) { | 301 | if (flags & XFS_DQ_USER) |
265 | error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &qip, 0); | 302 | error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino); |
266 | if (!error) { | 303 | if (flags & (XFS_DQ_GROUP|XFS_DQ_PROJ)) |
267 | error = xfs_truncate_file(mp, qip); | 304 | error2 = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino); |
268 | IRELE(qip); | ||
269 | } | ||
270 | } | ||
271 | |||
272 | if ((flags & (XFS_DQ_GROUP|XFS_DQ_PROJ)) && | ||
273 | mp->m_sb.sb_gquotino != NULLFSINO) { | ||
274 | error2 = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &qip, 0); | ||
275 | if (!error2) { | ||
276 | error2 = xfs_truncate_file(mp, qip); | ||
277 | IRELE(qip); | ||
278 | } | ||
279 | } | ||
280 | 305 | ||
281 | return error ? error : error2; | 306 | return error ? error : error2; |
282 | } | 307 | } |
283 | 308 | ||
284 | |||
285 | /* | 309 | /* |
286 | * Switch on (a given) quota enforcement for a filesystem. This takes | 310 | * Switch on (a given) quota enforcement for a filesystem. This takes |
287 | * effect immediately. | 311 | * effect immediately. |
@@ -417,12 +441,12 @@ xfs_qm_scall_getqstat( | |||
417 | } | 441 | } |
418 | if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) { | 442 | if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) { |
419 | if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, | 443 | if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, |
420 | 0, 0, &uip, 0) == 0) | 444 | 0, 0, &uip) == 0) |
421 | tempuqip = B_TRUE; | 445 | tempuqip = B_TRUE; |
422 | } | 446 | } |
423 | if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) { | 447 | if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) { |
424 | if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, | 448 | if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, |
425 | 0, 0, &gip, 0) == 0) | 449 | 0, 0, &gip) == 0) |
426 | tempgqip = B_TRUE; | 450 | tempgqip = B_TRUE; |
427 | } | 451 | } |
428 | if (uip) { | 452 | if (uip) { |
@@ -875,8 +899,9 @@ xfs_dqrele_inode( | |||
875 | xfs_qm_dqrele(ip->i_gdquot); | 899 | xfs_qm_dqrele(ip->i_gdquot); |
876 | ip->i_gdquot = NULL; | 900 | ip->i_gdquot = NULL; |
877 | } | 901 | } |
878 | xfs_iput(ip, XFS_ILOCK_EXCL); | 902 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
879 | 903 | ||
904 | IRELE(ip); | ||
880 | return 0; | 905 | return 0; |
881 | } | 906 | } |
882 | 907 | ||
@@ -1109,10 +1134,7 @@ xfs_qm_internalqcheck_adjust( | |||
1109 | xfs_ino_t ino, /* inode number to get data for */ | 1134 | xfs_ino_t ino, /* inode number to get data for */ |
1110 | void __user *buffer, /* not used */ | 1135 | void __user *buffer, /* not used */ |
1111 | int ubsize, /* not used */ | 1136 | int ubsize, /* not used */ |
1112 | void *private_data, /* not used */ | ||
1113 | xfs_daddr_t bno, /* starting block of inode cluster */ | ||
1114 | int *ubused, /* not used */ | 1137 | int *ubused, /* not used */ |
1115 | void *dip, /* not used */ | ||
1116 | int *res) /* bulkstat result code */ | 1138 | int *res) /* bulkstat result code */ |
1117 | { | 1139 | { |
1118 | xfs_inode_t *ip; | 1140 | xfs_inode_t *ip; |
@@ -1134,7 +1156,7 @@ xfs_qm_internalqcheck_adjust( | |||
1134 | ipreleased = B_FALSE; | 1156 | ipreleased = B_FALSE; |
1135 | again: | 1157 | again: |
1136 | lock_flags = XFS_ILOCK_SHARED; | 1158 | lock_flags = XFS_ILOCK_SHARED; |
1137 | if ((error = xfs_iget(mp, NULL, ino, 0, lock_flags, &ip, bno))) { | 1159 | if ((error = xfs_iget(mp, NULL, ino, 0, lock_flags, &ip))) { |
1138 | *res = BULKSTAT_RV_NOTHING; | 1160 | *res = BULKSTAT_RV_NOTHING; |
1139 | return (error); | 1161 | return (error); |
1140 | } | 1162 | } |
@@ -1146,7 +1168,8 @@ xfs_qm_internalqcheck_adjust( | |||
1146 | * of those now. | 1168 | * of those now. |
1147 | */ | 1169 | */ |
1148 | if (! ipreleased) { | 1170 | if (! ipreleased) { |
1149 | xfs_iput(ip, lock_flags); | 1171 | xfs_iunlock(ip, lock_flags); |
1172 | IRELE(ip); | ||
1150 | ipreleased = B_TRUE; | 1173 | ipreleased = B_TRUE; |
1151 | goto again; | 1174 | goto again; |
1152 | } | 1175 | } |
@@ -1163,7 +1186,8 @@ xfs_qm_internalqcheck_adjust( | |||
1163 | ASSERT(gd); | 1186 | ASSERT(gd); |
1164 | xfs_qm_internalqcheck_dqadjust(ip, gd); | 1187 | xfs_qm_internalqcheck_dqadjust(ip, gd); |
1165 | } | 1188 | } |
1166 | xfs_iput(ip, lock_flags); | 1189 | xfs_iunlock(ip, lock_flags); |
1190 | IRELE(ip); | ||
1167 | *res = BULKSTAT_RV_DIDONE; | 1191 | *res = BULKSTAT_RV_DIDONE; |
1168 | return (0); | 1192 | return (0); |
1169 | } | 1193 | } |
@@ -1205,15 +1229,15 @@ xfs_qm_internalqcheck( | |||
1205 | * Iterate thru all the inodes in the file system, | 1229 | * Iterate thru all the inodes in the file system, |
1206 | * adjusting the corresponding dquot counters | 1230 | * adjusting the corresponding dquot counters |
1207 | */ | 1231 | */ |
1208 | if ((error = xfs_bulkstat(mp, &lastino, &count, | 1232 | error = xfs_bulkstat(mp, &lastino, &count, |
1209 | xfs_qm_internalqcheck_adjust, NULL, | 1233 | xfs_qm_internalqcheck_adjust, |
1210 | 0, NULL, BULKSTAT_FG_IGET, &done))) { | 1234 | 0, NULL, &done); |
1235 | if (error) { | ||
1236 | cmn_err(CE_DEBUG, "Bulkstat returned error 0x%x", error); | ||
1211 | break; | 1237 | break; |
1212 | } | 1238 | } |
1213 | } while (! done); | 1239 | } while (!done); |
1214 | if (error) { | 1240 | |
1215 | cmn_err(CE_DEBUG, "Bulkstat returned error 0x%x", error); | ||
1216 | } | ||
1217 | cmn_err(CE_DEBUG, "Checking results against system dquots"); | 1241 | cmn_err(CE_DEBUG, "Checking results against system dquots"); |
1218 | for (i = 0; i < qmtest_hashmask; i++) { | 1242 | for (i = 0; i < qmtest_hashmask; i++) { |
1219 | xfs_dqtest_t *d, *n; | 1243 | xfs_dqtest_t *d, *n; |
diff --git a/fs/xfs/quota/xfs_trans_dquot.c b/fs/xfs/quota/xfs_trans_dquot.c index 061d827da33c..7de91d1b75c0 100644 --- a/fs/xfs/quota/xfs_trans_dquot.c +++ b/fs/xfs/quota/xfs_trans_dquot.c | |||
@@ -23,25 +23,15 @@ | |||
23 | #include "xfs_trans.h" | 23 | #include "xfs_trans.h" |
24 | #include "xfs_sb.h" | 24 | #include "xfs_sb.h" |
25 | #include "xfs_ag.h" | 25 | #include "xfs_ag.h" |
26 | #include "xfs_dir2.h" | ||
27 | #include "xfs_alloc.h" | 26 | #include "xfs_alloc.h" |
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_quota.h" | 27 | #include "xfs_quota.h" |
30 | #include "xfs_mount.h" | 28 | #include "xfs_mount.h" |
31 | #include "xfs_bmap_btree.h" | 29 | #include "xfs_bmap_btree.h" |
32 | #include "xfs_alloc_btree.h" | ||
33 | #include "xfs_ialloc_btree.h" | ||
34 | #include "xfs_attr_sf.h" | ||
35 | #include "xfs_dir2_sf.h" | ||
36 | #include "xfs_dinode.h" | ||
37 | #include "xfs_inode.h" | 30 | #include "xfs_inode.h" |
38 | #include "xfs_ialloc.h" | ||
39 | #include "xfs_itable.h" | 31 | #include "xfs_itable.h" |
40 | #include "xfs_btree.h" | ||
41 | #include "xfs_bmap.h" | 32 | #include "xfs_bmap.h" |
42 | #include "xfs_rtalloc.h" | 33 | #include "xfs_rtalloc.h" |
43 | #include "xfs_error.h" | 34 | #include "xfs_error.h" |
44 | #include "xfs_rw.h" | ||
45 | #include "xfs_attr.h" | 35 | #include "xfs_attr.h" |
46 | #include "xfs_buf_item.h" | 36 | #include "xfs_buf_item.h" |
47 | #include "xfs_trans_priv.h" | 37 | #include "xfs_trans_priv.h" |
@@ -59,16 +49,14 @@ xfs_trans_dqjoin( | |||
59 | xfs_trans_t *tp, | 49 | xfs_trans_t *tp, |
60 | xfs_dquot_t *dqp) | 50 | xfs_dquot_t *dqp) |
61 | { | 51 | { |
62 | xfs_dq_logitem_t *lp = &dqp->q_logitem; | ||
63 | |||
64 | ASSERT(dqp->q_transp != tp); | 52 | ASSERT(dqp->q_transp != tp); |
65 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); | 53 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); |
66 | ASSERT(lp->qli_dquot == dqp); | 54 | ASSERT(dqp->q_logitem.qli_dquot == dqp); |
67 | 55 | ||
68 | /* | 56 | /* |
69 | * Get a log_item_desc to point at the new item. | 57 | * Get a log_item_desc to point at the new item. |
70 | */ | 58 | */ |
71 | (void) xfs_trans_add_item(tp, (xfs_log_item_t*)(lp)); | 59 | xfs_trans_add_item(tp, &dqp->q_logitem.qli_item); |
72 | 60 | ||
73 | /* | 61 | /* |
74 | * Initialize i_transp so we can later determine if this dquot is | 62 | * Initialize i_transp so we can later determine if this dquot is |
@@ -93,16 +81,11 @@ xfs_trans_log_dquot( | |||
93 | xfs_trans_t *tp, | 81 | xfs_trans_t *tp, |
94 | xfs_dquot_t *dqp) | 82 | xfs_dquot_t *dqp) |
95 | { | 83 | { |
96 | xfs_log_item_desc_t *lidp; | ||
97 | |||
98 | ASSERT(dqp->q_transp == tp); | 84 | ASSERT(dqp->q_transp == tp); |
99 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); | 85 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); |
100 | 86 | ||
101 | lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)(&dqp->q_logitem)); | ||
102 | ASSERT(lidp != NULL); | ||
103 | |||
104 | tp->t_flags |= XFS_TRANS_DIRTY; | 87 | tp->t_flags |= XFS_TRANS_DIRTY; |
105 | lidp->lid_flags |= XFS_LID_DIRTY; | 88 | dqp->q_logitem.qli_item.li_desc->lid_flags |= XFS_LID_DIRTY; |
106 | } | 89 | } |
107 | 90 | ||
108 | /* | 91 | /* |
@@ -874,9 +857,8 @@ xfs_trans_get_qoff_item( | |||
874 | /* | 857 | /* |
875 | * Get a log_item_desc to point at the new item. | 858 | * Get a log_item_desc to point at the new item. |
876 | */ | 859 | */ |
877 | (void) xfs_trans_add_item(tp, (xfs_log_item_t*)q); | 860 | xfs_trans_add_item(tp, &q->qql_item); |
878 | 861 | return q; | |
879 | return (q); | ||
880 | } | 862 | } |
881 | 863 | ||
882 | 864 | ||
@@ -890,13 +872,8 @@ xfs_trans_log_quotaoff_item( | |||
890 | xfs_trans_t *tp, | 872 | xfs_trans_t *tp, |
891 | xfs_qoff_logitem_t *qlp) | 873 | xfs_qoff_logitem_t *qlp) |
892 | { | 874 | { |
893 | xfs_log_item_desc_t *lidp; | ||
894 | |||
895 | lidp = xfs_trans_find_item(tp, (xfs_log_item_t *)qlp); | ||
896 | ASSERT(lidp != NULL); | ||
897 | |||
898 | tp->t_flags |= XFS_TRANS_DIRTY; | 875 | tp->t_flags |= XFS_TRANS_DIRTY; |
899 | lidp->lid_flags |= XFS_LID_DIRTY; | 876 | qlp->qql_item.li_desc->lid_flags |= XFS_LID_DIRTY; |
900 | } | 877 | } |
901 | 878 | ||
902 | STATIC void | 879 | STATIC void |
diff --git a/fs/xfs/support/debug.c b/fs/xfs/support/debug.c index 3f3610a7ee05..975aa10e1a47 100644 --- a/fs/xfs/support/debug.c +++ b/fs/xfs/support/debug.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include "xfs_sb.h" | 22 | #include "xfs_sb.h" |
23 | #include "xfs_inum.h" | 23 | #include "xfs_inum.h" |
24 | #include "xfs_ag.h" | 24 | #include "xfs_ag.h" |
25 | #include "xfs_dmapi.h" | ||
26 | #include "xfs_mount.h" | 25 | #include "xfs_mount.h" |
27 | #include "xfs_error.h" | 26 | #include "xfs_error.h" |
28 | 27 | ||
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index a7fbe8a99b12..af168faccc7a 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c | |||
@@ -24,18 +24,13 @@ | |||
24 | #include "xfs_trans.h" | 24 | #include "xfs_trans.h" |
25 | #include "xfs_sb.h" | 25 | #include "xfs_sb.h" |
26 | #include "xfs_ag.h" | 26 | #include "xfs_ag.h" |
27 | #include "xfs_dir2.h" | ||
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_mount.h" | 27 | #include "xfs_mount.h" |
30 | #include "xfs_bmap_btree.h" | 28 | #include "xfs_bmap_btree.h" |
31 | #include "xfs_alloc_btree.h" | 29 | #include "xfs_alloc_btree.h" |
32 | #include "xfs_ialloc_btree.h" | 30 | #include "xfs_ialloc_btree.h" |
33 | #include "xfs_dir2_sf.h" | ||
34 | #include "xfs_attr_sf.h" | ||
35 | #include "xfs_dinode.h" | 31 | #include "xfs_dinode.h" |
36 | #include "xfs_inode.h" | 32 | #include "xfs_inode.h" |
37 | #include "xfs_btree.h" | 33 | #include "xfs_btree.h" |
38 | #include "xfs_ialloc.h" | ||
39 | #include "xfs_alloc.h" | 34 | #include "xfs_alloc.h" |
40 | #include "xfs_error.h" | 35 | #include "xfs_error.h" |
41 | #include "xfs_trace.h" | 36 | #include "xfs_trace.h" |
@@ -688,8 +683,6 @@ xfs_alloc_ag_vextent_near( | |||
688 | xfs_agblock_t ltbno; /* start bno of left side entry */ | 683 | xfs_agblock_t ltbno; /* start bno of left side entry */ |
689 | xfs_agblock_t ltbnoa; /* aligned ... */ | 684 | xfs_agblock_t ltbnoa; /* aligned ... */ |
690 | xfs_extlen_t ltdiff; /* difference to left side entry */ | 685 | xfs_extlen_t ltdiff; /* difference to left side entry */ |
691 | /*REFERENCED*/ | ||
692 | xfs_agblock_t ltend; /* end bno of left side entry */ | ||
693 | xfs_extlen_t ltlen; /* length of left side entry */ | 686 | xfs_extlen_t ltlen; /* length of left side entry */ |
694 | xfs_extlen_t ltlena; /* aligned ... */ | 687 | xfs_extlen_t ltlena; /* aligned ... */ |
695 | xfs_agblock_t ltnew; /* useful start bno of left side */ | 688 | xfs_agblock_t ltnew; /* useful start bno of left side */ |
@@ -814,8 +807,7 @@ xfs_alloc_ag_vextent_near( | |||
814 | if ((error = xfs_alloc_get_rec(cnt_cur, <bno, <len, &i))) | 807 | if ((error = xfs_alloc_get_rec(cnt_cur, <bno, <len, &i))) |
815 | goto error0; | 808 | goto error0; |
816 | XFS_WANT_CORRUPTED_GOTO(i == 1, error0); | 809 | XFS_WANT_CORRUPTED_GOTO(i == 1, error0); |
817 | ltend = ltbno + ltlen; | 810 | ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); |
818 | ASSERT(ltend <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); | ||
819 | args->len = blen; | 811 | args->len = blen; |
820 | if (!xfs_alloc_fix_minleft(args)) { | 812 | if (!xfs_alloc_fix_minleft(args)) { |
821 | xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); | 813 | xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); |
@@ -828,7 +820,7 @@ xfs_alloc_ag_vextent_near( | |||
828 | */ | 820 | */ |
829 | args->agbno = bnew; | 821 | args->agbno = bnew; |
830 | ASSERT(bnew >= ltbno); | 822 | ASSERT(bnew >= ltbno); |
831 | ASSERT(bnew + blen <= ltend); | 823 | ASSERT(bnew + blen <= ltbno + ltlen); |
832 | /* | 824 | /* |
833 | * Set up a cursor for the by-bno tree. | 825 | * Set up a cursor for the by-bno tree. |
834 | */ | 826 | */ |
@@ -1157,7 +1149,6 @@ xfs_alloc_ag_vextent_near( | |||
1157 | /* | 1149 | /* |
1158 | * Fix up the length and compute the useful address. | 1150 | * Fix up the length and compute the useful address. |
1159 | */ | 1151 | */ |
1160 | ltend = ltbno + ltlen; | ||
1161 | args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); | 1152 | args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); |
1162 | xfs_alloc_fix_len(args); | 1153 | xfs_alloc_fix_len(args); |
1163 | if (!xfs_alloc_fix_minleft(args)) { | 1154 | if (!xfs_alloc_fix_minleft(args)) { |
@@ -1170,7 +1161,7 @@ xfs_alloc_ag_vextent_near( | |||
1170 | (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment, ltbno, | 1161 | (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment, ltbno, |
1171 | ltlen, <new); | 1162 | ltlen, <new); |
1172 | ASSERT(ltnew >= ltbno); | 1163 | ASSERT(ltnew >= ltbno); |
1173 | ASSERT(ltnew + rlen <= ltend); | 1164 | ASSERT(ltnew + rlen <= ltbno + ltlen); |
1174 | ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); | 1165 | ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); |
1175 | args->agbno = ltnew; | 1166 | args->agbno = ltnew; |
1176 | if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen, | 1167 | if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen, |
diff --git a/fs/xfs/xfs_alloc.h b/fs/xfs/xfs_alloc.h index 6d05199b667c..895009a97271 100644 --- a/fs/xfs/xfs_alloc.h +++ b/fs/xfs/xfs_alloc.h | |||
@@ -27,16 +27,16 @@ struct xfs_busy_extent; | |||
27 | /* | 27 | /* |
28 | * Freespace allocation types. Argument to xfs_alloc_[v]extent. | 28 | * Freespace allocation types. Argument to xfs_alloc_[v]extent. |
29 | */ | 29 | */ |
30 | typedef enum xfs_alloctype | 30 | #define XFS_ALLOCTYPE_ANY_AG 0x01 /* allocate anywhere, use rotor */ |
31 | { | 31 | #define XFS_ALLOCTYPE_FIRST_AG 0x02 /* ... start at ag 0 */ |
32 | XFS_ALLOCTYPE_ANY_AG, /* allocate anywhere, use rotor */ | 32 | #define XFS_ALLOCTYPE_START_AG 0x04 /* anywhere, start in this a.g. */ |
33 | XFS_ALLOCTYPE_FIRST_AG, /* ... start at ag 0 */ | 33 | #define XFS_ALLOCTYPE_THIS_AG 0x08 /* anywhere in this a.g. */ |
34 | XFS_ALLOCTYPE_START_AG, /* anywhere, start in this a.g. */ | 34 | #define XFS_ALLOCTYPE_START_BNO 0x10 /* near this block else anywhere */ |
35 | XFS_ALLOCTYPE_THIS_AG, /* anywhere in this a.g. */ | 35 | #define XFS_ALLOCTYPE_NEAR_BNO 0x20 /* in this a.g. and near this block */ |
36 | XFS_ALLOCTYPE_START_BNO, /* near this block else anywhere */ | 36 | #define XFS_ALLOCTYPE_THIS_BNO 0x40 /* at exactly this block */ |
37 | XFS_ALLOCTYPE_NEAR_BNO, /* in this a.g. and near this block */ | 37 | |
38 | XFS_ALLOCTYPE_THIS_BNO /* at exactly this block */ | 38 | /* this should become an enum again when the tracing code is fixed */ |
39 | } xfs_alloctype_t; | 39 | typedef unsigned int xfs_alloctype_t; |
40 | 40 | ||
41 | #define XFS_ALLOC_TYPES \ | 41 | #define XFS_ALLOC_TYPES \ |
42 | { XFS_ALLOCTYPE_ANY_AG, "ANY_AG" }, \ | 42 | { XFS_ALLOCTYPE_ANY_AG, "ANY_AG" }, \ |
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index 83f494218759..97f7328967fd 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c | |||
@@ -24,19 +24,14 @@ | |||
24 | #include "xfs_trans.h" | 24 | #include "xfs_trans.h" |
25 | #include "xfs_sb.h" | 25 | #include "xfs_sb.h" |
26 | #include "xfs_ag.h" | 26 | #include "xfs_ag.h" |
27 | #include "xfs_dir2.h" | ||
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_mount.h" | 27 | #include "xfs_mount.h" |
30 | #include "xfs_bmap_btree.h" | 28 | #include "xfs_bmap_btree.h" |
31 | #include "xfs_alloc_btree.h" | 29 | #include "xfs_alloc_btree.h" |
32 | #include "xfs_ialloc_btree.h" | 30 | #include "xfs_ialloc_btree.h" |
33 | #include "xfs_dir2_sf.h" | ||
34 | #include "xfs_attr_sf.h" | ||
35 | #include "xfs_dinode.h" | 31 | #include "xfs_dinode.h" |
36 | #include "xfs_inode.h" | 32 | #include "xfs_inode.h" |
37 | #include "xfs_btree.h" | 33 | #include "xfs_btree.h" |
38 | #include "xfs_btree_trace.h" | 34 | #include "xfs_btree_trace.h" |
39 | #include "xfs_ialloc.h" | ||
40 | #include "xfs_alloc.h" | 35 | #include "xfs_alloc.h" |
41 | #include "xfs_error.h" | 36 | #include "xfs_error.h" |
42 | #include "xfs_trace.h" | 37 | #include "xfs_trace.h" |
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c index b9c196a53c42..c2568242a901 100644 --- a/fs/xfs/xfs_attr.c +++ b/fs/xfs/xfs_attr.c | |||
@@ -25,19 +25,13 @@ | |||
25 | #include "xfs_trans.h" | 25 | #include "xfs_trans.h" |
26 | #include "xfs_sb.h" | 26 | #include "xfs_sb.h" |
27 | #include "xfs_ag.h" | 27 | #include "xfs_ag.h" |
28 | #include "xfs_dir2.h" | ||
29 | #include "xfs_dmapi.h" | ||
30 | #include "xfs_mount.h" | 28 | #include "xfs_mount.h" |
31 | #include "xfs_da_btree.h" | 29 | #include "xfs_da_btree.h" |
32 | #include "xfs_bmap_btree.h" | 30 | #include "xfs_bmap_btree.h" |
33 | #include "xfs_alloc_btree.h" | ||
34 | #include "xfs_ialloc_btree.h" | ||
35 | #include "xfs_dir2_sf.h" | ||
36 | #include "xfs_attr_sf.h" | 31 | #include "xfs_attr_sf.h" |
37 | #include "xfs_dinode.h" | 32 | #include "xfs_dinode.h" |
38 | #include "xfs_inode.h" | 33 | #include "xfs_inode.h" |
39 | #include "xfs_alloc.h" | 34 | #include "xfs_alloc.h" |
40 | #include "xfs_btree.h" | ||
41 | #include "xfs_inode_item.h" | 35 | #include "xfs_inode_item.h" |
42 | #include "xfs_bmap.h" | 36 | #include "xfs_bmap.h" |
43 | #include "xfs_attr.h" | 37 | #include "xfs_attr.h" |
@@ -325,8 +319,7 @@ xfs_attr_set_int( | |||
325 | return (error); | 319 | return (error); |
326 | } | 320 | } |
327 | 321 | ||
328 | xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL); | 322 | xfs_trans_ijoin(args.trans, dp); |
329 | xfs_trans_ihold(args.trans, dp); | ||
330 | 323 | ||
331 | /* | 324 | /* |
332 | * If the attribute list is non-existent or a shortform list, | 325 | * If the attribute list is non-existent or a shortform list, |
@@ -396,10 +389,8 @@ xfs_attr_set_int( | |||
396 | * bmap_finish() may have committed the last trans and started | 389 | * bmap_finish() may have committed the last trans and started |
397 | * a new one. We need the inode to be in all transactions. | 390 | * a new one. We need the inode to be in all transactions. |
398 | */ | 391 | */ |
399 | if (committed) { | 392 | if (committed) |
400 | xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL); | 393 | xfs_trans_ijoin(args.trans, dp); |
401 | xfs_trans_ihold(args.trans, dp); | ||
402 | } | ||
403 | 394 | ||
404 | /* | 395 | /* |
405 | * Commit the leaf transformation. We'll need another (linked) | 396 | * Commit the leaf transformation. We'll need another (linked) |
@@ -544,8 +535,7 @@ xfs_attr_remove_int(xfs_inode_t *dp, struct xfs_name *name, int flags) | |||
544 | * No need to make quota reservations here. We expect to release some | 535 | * No need to make quota reservations here. We expect to release some |
545 | * blocks not allocate in the common case. | 536 | * blocks not allocate in the common case. |
546 | */ | 537 | */ |
547 | xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL); | 538 | xfs_trans_ijoin(args.trans, dp); |
548 | xfs_trans_ihold(args.trans, dp); | ||
549 | 539 | ||
550 | /* | 540 | /* |
551 | * Decide on what work routines to call based on the inode size. | 541 | * Decide on what work routines to call based on the inode size. |
@@ -821,8 +811,7 @@ xfs_attr_inactive(xfs_inode_t *dp) | |||
821 | * No need to make quota reservations here. We expect to release some | 811 | * No need to make quota reservations here. We expect to release some |
822 | * blocks, not allocate, in the common case. | 812 | * blocks, not allocate, in the common case. |
823 | */ | 813 | */ |
824 | xfs_trans_ijoin(trans, dp, XFS_ILOCK_EXCL); | 814 | xfs_trans_ijoin(trans, dp); |
825 | xfs_trans_ihold(trans, dp); | ||
826 | 815 | ||
827 | /* | 816 | /* |
828 | * Decide on what work routines to call based on the inode size. | 817 | * Decide on what work routines to call based on the inode size. |
@@ -981,10 +970,8 @@ xfs_attr_leaf_addname(xfs_da_args_t *args) | |||
981 | * bmap_finish() may have committed the last trans and started | 970 | * bmap_finish() may have committed the last trans and started |
982 | * a new one. We need the inode to be in all transactions. | 971 | * a new one. We need the inode to be in all transactions. |
983 | */ | 972 | */ |
984 | if (committed) { | 973 | if (committed) |
985 | xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); | 974 | xfs_trans_ijoin(args->trans, dp); |
986 | xfs_trans_ihold(args->trans, dp); | ||
987 | } | ||
988 | 975 | ||
989 | /* | 976 | /* |
990 | * Commit the current trans (including the inode) and start | 977 | * Commit the current trans (including the inode) and start |
@@ -1085,10 +1072,8 @@ xfs_attr_leaf_addname(xfs_da_args_t *args) | |||
1085 | * and started a new one. We need the inode to be | 1072 | * and started a new one. We need the inode to be |
1086 | * in all transactions. | 1073 | * in all transactions. |
1087 | */ | 1074 | */ |
1088 | if (committed) { | 1075 | if (committed) |
1089 | xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); | 1076 | xfs_trans_ijoin(args->trans, dp); |
1090 | xfs_trans_ihold(args->trans, dp); | ||
1091 | } | ||
1092 | } else | 1077 | } else |
1093 | xfs_da_buf_done(bp); | 1078 | xfs_da_buf_done(bp); |
1094 | 1079 | ||
@@ -1161,10 +1146,8 @@ xfs_attr_leaf_removename(xfs_da_args_t *args) | |||
1161 | * bmap_finish() may have committed the last trans and started | 1146 | * bmap_finish() may have committed the last trans and started |
1162 | * a new one. We need the inode to be in all transactions. | 1147 | * a new one. We need the inode to be in all transactions. |
1163 | */ | 1148 | */ |
1164 | if (committed) { | 1149 | if (committed) |
1165 | xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); | 1150 | xfs_trans_ijoin(args->trans, dp); |
1166 | xfs_trans_ihold(args->trans, dp); | ||
1167 | } | ||
1168 | } else | 1151 | } else |
1169 | xfs_da_buf_done(bp); | 1152 | xfs_da_buf_done(bp); |
1170 | return(0); | 1153 | return(0); |
@@ -1317,10 +1300,8 @@ restart: | |||
1317 | * and started a new one. We need the inode to be | 1300 | * and started a new one. We need the inode to be |
1318 | * in all transactions. | 1301 | * in all transactions. |
1319 | */ | 1302 | */ |
1320 | if (committed) { | 1303 | if (committed) |
1321 | xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); | 1304 | xfs_trans_ijoin(args->trans, dp); |
1322 | xfs_trans_ihold(args->trans, dp); | ||
1323 | } | ||
1324 | 1305 | ||
1325 | /* | 1306 | /* |
1326 | * Commit the node conversion and start the next | 1307 | * Commit the node conversion and start the next |
@@ -1356,10 +1337,8 @@ restart: | |||
1356 | * bmap_finish() may have committed the last trans and started | 1337 | * bmap_finish() may have committed the last trans and started |
1357 | * a new one. We need the inode to be in all transactions. | 1338 | * a new one. We need the inode to be in all transactions. |
1358 | */ | 1339 | */ |
1359 | if (committed) { | 1340 | if (committed) |
1360 | xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); | 1341 | xfs_trans_ijoin(args->trans, dp); |
1361 | xfs_trans_ihold(args->trans, dp); | ||
1362 | } | ||
1363 | } else { | 1342 | } else { |
1364 | /* | 1343 | /* |
1365 | * Addition succeeded, update Btree hashvals. | 1344 | * Addition succeeded, update Btree hashvals. |
@@ -1470,10 +1449,8 @@ restart: | |||
1470 | * and started a new one. We need the inode to be | 1449 | * and started a new one. We need the inode to be |
1471 | * in all transactions. | 1450 | * in all transactions. |
1472 | */ | 1451 | */ |
1473 | if (committed) { | 1452 | if (committed) |
1474 | xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); | 1453 | xfs_trans_ijoin(args->trans, dp); |
1475 | xfs_trans_ihold(args->trans, dp); | ||
1476 | } | ||
1477 | } | 1454 | } |
1478 | 1455 | ||
1479 | /* | 1456 | /* |
@@ -1604,10 +1581,8 @@ xfs_attr_node_removename(xfs_da_args_t *args) | |||
1604 | * bmap_finish() may have committed the last trans and started | 1581 | * bmap_finish() may have committed the last trans and started |
1605 | * a new one. We need the inode to be in all transactions. | 1582 | * a new one. We need the inode to be in all transactions. |
1606 | */ | 1583 | */ |
1607 | if (committed) { | 1584 | if (committed) |
1608 | xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); | 1585 | xfs_trans_ijoin(args->trans, dp); |
1609 | xfs_trans_ihold(args->trans, dp); | ||
1610 | } | ||
1611 | 1586 | ||
1612 | /* | 1587 | /* |
1613 | * Commit the Btree join operation and start a new trans. | 1588 | * Commit the Btree join operation and start a new trans. |
@@ -1658,10 +1633,8 @@ xfs_attr_node_removename(xfs_da_args_t *args) | |||
1658 | * and started a new one. We need the inode to be | 1633 | * and started a new one. We need the inode to be |
1659 | * in all transactions. | 1634 | * in all transactions. |
1660 | */ | 1635 | */ |
1661 | if (committed) { | 1636 | if (committed) |
1662 | xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); | 1637 | xfs_trans_ijoin(args->trans, dp); |
1663 | xfs_trans_ihold(args->trans, dp); | ||
1664 | } | ||
1665 | } else | 1638 | } else |
1666 | xfs_da_brelse(args->trans, bp); | 1639 | xfs_da_brelse(args->trans, bp); |
1667 | } | 1640 | } |
@@ -2004,7 +1977,7 @@ xfs_attr_rmtval_get(xfs_da_args_t *args) | |||
2004 | error = xfs_bmapi(args->trans, args->dp, (xfs_fileoff_t)lblkno, | 1977 | error = xfs_bmapi(args->trans, args->dp, (xfs_fileoff_t)lblkno, |
2005 | args->rmtblkcnt, | 1978 | args->rmtblkcnt, |
2006 | XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, | 1979 | XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, |
2007 | NULL, 0, map, &nmap, NULL, NULL); | 1980 | NULL, 0, map, &nmap, NULL); |
2008 | if (error) | 1981 | if (error) |
2009 | return(error); | 1982 | return(error); |
2010 | ASSERT(nmap >= 1); | 1983 | ASSERT(nmap >= 1); |
@@ -2083,7 +2056,7 @@ xfs_attr_rmtval_set(xfs_da_args_t *args) | |||
2083 | XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA | | 2056 | XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA | |
2084 | XFS_BMAPI_WRITE, | 2057 | XFS_BMAPI_WRITE, |
2085 | args->firstblock, args->total, &map, &nmap, | 2058 | args->firstblock, args->total, &map, &nmap, |
2086 | args->flist, NULL); | 2059 | args->flist); |
2087 | if (!error) { | 2060 | if (!error) { |
2088 | error = xfs_bmap_finish(&args->trans, args->flist, | 2061 | error = xfs_bmap_finish(&args->trans, args->flist, |
2089 | &committed); | 2062 | &committed); |
@@ -2099,10 +2072,8 @@ xfs_attr_rmtval_set(xfs_da_args_t *args) | |||
2099 | * bmap_finish() may have committed the last trans and started | 2072 | * bmap_finish() may have committed the last trans and started |
2100 | * a new one. We need the inode to be in all transactions. | 2073 | * a new one. We need the inode to be in all transactions. |
2101 | */ | 2074 | */ |
2102 | if (committed) { | 2075 | if (committed) |
2103 | xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL); | 2076 | xfs_trans_ijoin(args->trans, dp); |
2104 | xfs_trans_ihold(args->trans, dp); | ||
2105 | } | ||
2106 | 2077 | ||
2107 | ASSERT(nmap == 1); | 2078 | ASSERT(nmap == 1); |
2108 | ASSERT((map.br_startblock != DELAYSTARTBLOCK) && | 2079 | ASSERT((map.br_startblock != DELAYSTARTBLOCK) && |
@@ -2136,7 +2107,7 @@ xfs_attr_rmtval_set(xfs_da_args_t *args) | |||
2136 | args->rmtblkcnt, | 2107 | args->rmtblkcnt, |
2137 | XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, | 2108 | XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, |
2138 | args->firstblock, 0, &map, &nmap, | 2109 | args->firstblock, 0, &map, &nmap, |
2139 | NULL, NULL); | 2110 | NULL); |
2140 | if (error) { | 2111 | if (error) { |
2141 | return(error); | 2112 | return(error); |
2142 | } | 2113 | } |
@@ -2201,7 +2172,7 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args) | |||
2201 | args->rmtblkcnt, | 2172 | args->rmtblkcnt, |
2202 | XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, | 2173 | XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, |
2203 | args->firstblock, 0, &map, &nmap, | 2174 | args->firstblock, 0, &map, &nmap, |
2204 | args->flist, NULL); | 2175 | args->flist); |
2205 | if (error) { | 2176 | if (error) { |
2206 | return(error); | 2177 | return(error); |
2207 | } | 2178 | } |
@@ -2239,7 +2210,7 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args) | |||
2239 | error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt, | 2210 | error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt, |
2240 | XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, | 2211 | XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, |
2241 | 1, args->firstblock, args->flist, | 2212 | 1, args->firstblock, args->flist, |
2242 | NULL, &done); | 2213 | &done); |
2243 | if (!error) { | 2214 | if (!error) { |
2244 | error = xfs_bmap_finish(&args->trans, args->flist, | 2215 | error = xfs_bmap_finish(&args->trans, args->flist, |
2245 | &committed); | 2216 | &committed); |
@@ -2255,10 +2226,8 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args) | |||
2255 | * bmap_finish() may have committed the last trans and started | 2226 | * bmap_finish() may have committed the last trans and started |
2256 | * a new one. We need the inode to be in all transactions. | 2227 | * a new one. We need the inode to be in all transactions. |
2257 | */ | 2228 | */ |
2258 | if (committed) { | 2229 | if (committed) |
2259 | xfs_trans_ijoin(args->trans, args->dp, XFS_ILOCK_EXCL); | 2230 | xfs_trans_ijoin(args->trans, args->dp); |
2260 | xfs_trans_ihold(args->trans, args->dp); | ||
2261 | } | ||
2262 | 2231 | ||
2263 | /* | 2232 | /* |
2264 | * Close out trans and start the next one in the chain. | 2233 | * Close out trans and start the next one in the chain. |
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c index a90ce74fc256..a6cff8edcdb6 100644 --- a/fs/xfs/xfs_attr_leaf.c +++ b/fs/xfs/xfs_attr_leaf.c | |||
@@ -24,8 +24,6 @@ | |||
24 | #include "xfs_trans.h" | 24 | #include "xfs_trans.h" |
25 | #include "xfs_sb.h" | 25 | #include "xfs_sb.h" |
26 | #include "xfs_ag.h" | 26 | #include "xfs_ag.h" |
27 | #include "xfs_dir2.h" | ||
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_mount.h" | 27 | #include "xfs_mount.h" |
30 | #include "xfs_da_btree.h" | 28 | #include "xfs_da_btree.h" |
31 | #include "xfs_bmap_btree.h" | 29 | #include "xfs_bmap_btree.h" |
@@ -33,7 +31,6 @@ | |||
33 | #include "xfs_ialloc_btree.h" | 31 | #include "xfs_ialloc_btree.h" |
34 | #include "xfs_alloc.h" | 32 | #include "xfs_alloc.h" |
35 | #include "xfs_btree.h" | 33 | #include "xfs_btree.h" |
36 | #include "xfs_dir2_sf.h" | ||
37 | #include "xfs_attr_sf.h" | 34 | #include "xfs_attr_sf.h" |
38 | #include "xfs_dinode.h" | 35 | #include "xfs_dinode.h" |
39 | #include "xfs_inode.h" | 36 | #include "xfs_inode.h" |
@@ -2931,7 +2928,7 @@ xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp, | |||
2931 | nmap = 1; | 2928 | nmap = 1; |
2932 | error = xfs_bmapi(*trans, dp, (xfs_fileoff_t)tblkno, tblkcnt, | 2929 | error = xfs_bmapi(*trans, dp, (xfs_fileoff_t)tblkno, tblkcnt, |
2933 | XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, | 2930 | XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, |
2934 | NULL, 0, &map, &nmap, NULL, NULL); | 2931 | NULL, 0, &map, &nmap, NULL); |
2935 | if (error) { | 2932 | if (error) { |
2936 | return(error); | 2933 | return(error); |
2937 | } | 2934 | } |
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 99587ded043f..23f14e595c18 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c | |||
@@ -30,13 +30,10 @@ | |||
30 | #include "xfs_alloc_btree.h" | 30 | #include "xfs_alloc_btree.h" |
31 | #include "xfs_ialloc_btree.h" | 31 | #include "xfs_ialloc_btree.h" |
32 | #include "xfs_dir2_sf.h" | 32 | #include "xfs_dir2_sf.h" |
33 | #include "xfs_attr_sf.h" | ||
34 | #include "xfs_dinode.h" | 33 | #include "xfs_dinode.h" |
35 | #include "xfs_inode.h" | 34 | #include "xfs_inode.h" |
36 | #include "xfs_btree.h" | 35 | #include "xfs_btree.h" |
37 | #include "xfs_dmapi.h" | ||
38 | #include "xfs_mount.h" | 36 | #include "xfs_mount.h" |
39 | #include "xfs_ialloc.h" | ||
40 | #include "xfs_itable.h" | 37 | #include "xfs_itable.h" |
41 | #include "xfs_dir2_data.h" | 38 | #include "xfs_dir2_data.h" |
42 | #include "xfs_dir2_leaf.h" | 39 | #include "xfs_dir2_leaf.h" |
@@ -104,7 +101,6 @@ xfs_bmap_add_extent( | |||
104 | xfs_fsblock_t *first, /* pointer to firstblock variable */ | 101 | xfs_fsblock_t *first, /* pointer to firstblock variable */ |
105 | xfs_bmap_free_t *flist, /* list of extents to be freed */ | 102 | xfs_bmap_free_t *flist, /* list of extents to be freed */ |
106 | int *logflagsp, /* inode logging flags */ | 103 | int *logflagsp, /* inode logging flags */ |
107 | xfs_extdelta_t *delta, /* Change made to incore extents */ | ||
108 | int whichfork, /* data or attr fork */ | 104 | int whichfork, /* data or attr fork */ |
109 | int rsvd); /* OK to allocate reserved blocks */ | 105 | int rsvd); /* OK to allocate reserved blocks */ |
110 | 106 | ||
@@ -122,7 +118,6 @@ xfs_bmap_add_extent_delay_real( | |||
122 | xfs_fsblock_t *first, /* pointer to firstblock variable */ | 118 | xfs_fsblock_t *first, /* pointer to firstblock variable */ |
123 | xfs_bmap_free_t *flist, /* list of extents to be freed */ | 119 | xfs_bmap_free_t *flist, /* list of extents to be freed */ |
124 | int *logflagsp, /* inode logging flags */ | 120 | int *logflagsp, /* inode logging flags */ |
125 | xfs_extdelta_t *delta, /* Change made to incore extents */ | ||
126 | int rsvd); /* OK to allocate reserved blocks */ | 121 | int rsvd); /* OK to allocate reserved blocks */ |
127 | 122 | ||
128 | /* | 123 | /* |
@@ -135,7 +130,6 @@ xfs_bmap_add_extent_hole_delay( | |||
135 | xfs_extnum_t idx, /* extent number to update/insert */ | 130 | xfs_extnum_t idx, /* extent number to update/insert */ |
136 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ | 131 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ |
137 | int *logflagsp,/* inode logging flags */ | 132 | int *logflagsp,/* inode logging flags */ |
138 | xfs_extdelta_t *delta, /* Change made to incore extents */ | ||
139 | int rsvd); /* OK to allocate reserved blocks */ | 133 | int rsvd); /* OK to allocate reserved blocks */ |
140 | 134 | ||
141 | /* | 135 | /* |
@@ -149,7 +143,6 @@ xfs_bmap_add_extent_hole_real( | |||
149 | xfs_btree_cur_t *cur, /* if null, not a btree */ | 143 | xfs_btree_cur_t *cur, /* if null, not a btree */ |
150 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ | 144 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ |
151 | int *logflagsp, /* inode logging flags */ | 145 | int *logflagsp, /* inode logging flags */ |
152 | xfs_extdelta_t *delta, /* Change made to incore extents */ | ||
153 | int whichfork); /* data or attr fork */ | 146 | int whichfork); /* data or attr fork */ |
154 | 147 | ||
155 | /* | 148 | /* |
@@ -162,8 +155,7 @@ xfs_bmap_add_extent_unwritten_real( | |||
162 | xfs_extnum_t idx, /* extent number to update/insert */ | 155 | xfs_extnum_t idx, /* extent number to update/insert */ |
163 | xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ | 156 | xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ |
164 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ | 157 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ |
165 | int *logflagsp, /* inode logging flags */ | 158 | int *logflagsp); /* inode logging flags */ |
166 | xfs_extdelta_t *delta); /* Change made to incore extents */ | ||
167 | 159 | ||
168 | /* | 160 | /* |
169 | * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file. | 161 | * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file. |
@@ -200,7 +192,6 @@ xfs_bmap_del_extent( | |||
200 | xfs_btree_cur_t *cur, /* if null, not a btree */ | 192 | xfs_btree_cur_t *cur, /* if null, not a btree */ |
201 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ | 193 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ |
202 | int *logflagsp,/* inode logging flags */ | 194 | int *logflagsp,/* inode logging flags */ |
203 | xfs_extdelta_t *delta, /* Change made to incore extents */ | ||
204 | int whichfork, /* data or attr fork */ | 195 | int whichfork, /* data or attr fork */ |
205 | int rsvd); /* OK to allocate reserved blocks */ | 196 | int rsvd); /* OK to allocate reserved blocks */ |
206 | 197 | ||
@@ -489,7 +480,6 @@ xfs_bmap_add_extent( | |||
489 | xfs_fsblock_t *first, /* pointer to firstblock variable */ | 480 | xfs_fsblock_t *first, /* pointer to firstblock variable */ |
490 | xfs_bmap_free_t *flist, /* list of extents to be freed */ | 481 | xfs_bmap_free_t *flist, /* list of extents to be freed */ |
491 | int *logflagsp, /* inode logging flags */ | 482 | int *logflagsp, /* inode logging flags */ |
492 | xfs_extdelta_t *delta, /* Change made to incore extents */ | ||
493 | int whichfork, /* data or attr fork */ | 483 | int whichfork, /* data or attr fork */ |
494 | int rsvd) /* OK to use reserved data blocks */ | 484 | int rsvd) /* OK to use reserved data blocks */ |
495 | { | 485 | { |
@@ -524,15 +514,6 @@ xfs_bmap_add_extent( | |||
524 | logflags = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); | 514 | logflags = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); |
525 | } else | 515 | } else |
526 | logflags = 0; | 516 | logflags = 0; |
527 | /* DELTA: single new extent */ | ||
528 | if (delta) { | ||
529 | if (delta->xed_startoff > new->br_startoff) | ||
530 | delta->xed_startoff = new->br_startoff; | ||
531 | if (delta->xed_blockcount < | ||
532 | new->br_startoff + new->br_blockcount) | ||
533 | delta->xed_blockcount = new->br_startoff + | ||
534 | new->br_blockcount; | ||
535 | } | ||
536 | } | 517 | } |
537 | /* | 518 | /* |
538 | * Any kind of new delayed allocation goes here. | 519 | * Any kind of new delayed allocation goes here. |
@@ -542,7 +523,7 @@ xfs_bmap_add_extent( | |||
542 | ASSERT((cur->bc_private.b.flags & | 523 | ASSERT((cur->bc_private.b.flags & |
543 | XFS_BTCUR_BPRV_WASDEL) == 0); | 524 | XFS_BTCUR_BPRV_WASDEL) == 0); |
544 | if ((error = xfs_bmap_add_extent_hole_delay(ip, idx, new, | 525 | if ((error = xfs_bmap_add_extent_hole_delay(ip, idx, new, |
545 | &logflags, delta, rsvd))) | 526 | &logflags, rsvd))) |
546 | goto done; | 527 | goto done; |
547 | } | 528 | } |
548 | /* | 529 | /* |
@@ -553,7 +534,7 @@ xfs_bmap_add_extent( | |||
553 | ASSERT((cur->bc_private.b.flags & | 534 | ASSERT((cur->bc_private.b.flags & |
554 | XFS_BTCUR_BPRV_WASDEL) == 0); | 535 | XFS_BTCUR_BPRV_WASDEL) == 0); |
555 | if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur, new, | 536 | if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur, new, |
556 | &logflags, delta, whichfork))) | 537 | &logflags, whichfork))) |
557 | goto done; | 538 | goto done; |
558 | } else { | 539 | } else { |
559 | xfs_bmbt_irec_t prev; /* old extent at offset idx */ | 540 | xfs_bmbt_irec_t prev; /* old extent at offset idx */ |
@@ -578,17 +559,17 @@ xfs_bmap_add_extent( | |||
578 | XFS_BTCUR_BPRV_WASDEL); | 559 | XFS_BTCUR_BPRV_WASDEL); |
579 | if ((error = xfs_bmap_add_extent_delay_real(ip, | 560 | if ((error = xfs_bmap_add_extent_delay_real(ip, |
580 | idx, &cur, new, &da_new, first, flist, | 561 | idx, &cur, new, &da_new, first, flist, |
581 | &logflags, delta, rsvd))) | 562 | &logflags, rsvd))) |
582 | goto done; | 563 | goto done; |
583 | } else if (new->br_state == XFS_EXT_NORM) { | 564 | } else if (new->br_state == XFS_EXT_NORM) { |
584 | ASSERT(new->br_state == XFS_EXT_NORM); | 565 | ASSERT(new->br_state == XFS_EXT_NORM); |
585 | if ((error = xfs_bmap_add_extent_unwritten_real( | 566 | if ((error = xfs_bmap_add_extent_unwritten_real( |
586 | ip, idx, &cur, new, &logflags, delta))) | 567 | ip, idx, &cur, new, &logflags))) |
587 | goto done; | 568 | goto done; |
588 | } else { | 569 | } else { |
589 | ASSERT(new->br_state == XFS_EXT_UNWRITTEN); | 570 | ASSERT(new->br_state == XFS_EXT_UNWRITTEN); |
590 | if ((error = xfs_bmap_add_extent_unwritten_real( | 571 | if ((error = xfs_bmap_add_extent_unwritten_real( |
591 | ip, idx, &cur, new, &logflags, delta))) | 572 | ip, idx, &cur, new, &logflags))) |
592 | goto done; | 573 | goto done; |
593 | } | 574 | } |
594 | ASSERT(*curp == cur || *curp == NULL); | 575 | ASSERT(*curp == cur || *curp == NULL); |
@@ -601,7 +582,7 @@ xfs_bmap_add_extent( | |||
601 | ASSERT((cur->bc_private.b.flags & | 582 | ASSERT((cur->bc_private.b.flags & |
602 | XFS_BTCUR_BPRV_WASDEL) == 0); | 583 | XFS_BTCUR_BPRV_WASDEL) == 0); |
603 | if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur, | 584 | if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur, |
604 | new, &logflags, delta, whichfork))) | 585 | new, &logflags, whichfork))) |
605 | goto done; | 586 | goto done; |
606 | } | 587 | } |
607 | } | 588 | } |
@@ -666,7 +647,6 @@ xfs_bmap_add_extent_delay_real( | |||
666 | xfs_fsblock_t *first, /* pointer to firstblock variable */ | 647 | xfs_fsblock_t *first, /* pointer to firstblock variable */ |
667 | xfs_bmap_free_t *flist, /* list of extents to be freed */ | 648 | xfs_bmap_free_t *flist, /* list of extents to be freed */ |
668 | int *logflagsp, /* inode logging flags */ | 649 | int *logflagsp, /* inode logging flags */ |
669 | xfs_extdelta_t *delta, /* Change made to incore extents */ | ||
670 | int rsvd) /* OK to use reserved data block allocation */ | 650 | int rsvd) /* OK to use reserved data block allocation */ |
671 | { | 651 | { |
672 | xfs_btree_cur_t *cur; /* btree cursor */ | 652 | xfs_btree_cur_t *cur; /* btree cursor */ |
@@ -797,11 +777,6 @@ xfs_bmap_add_extent_delay_real( | |||
797 | goto done; | 777 | goto done; |
798 | } | 778 | } |
799 | *dnew = 0; | 779 | *dnew = 0; |
800 | /* DELTA: Three in-core extents are replaced by one. */ | ||
801 | temp = LEFT.br_startoff; | ||
802 | temp2 = LEFT.br_blockcount + | ||
803 | PREV.br_blockcount + | ||
804 | RIGHT.br_blockcount; | ||
805 | break; | 780 | break; |
806 | 781 | ||
807 | case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: | 782 | case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: |
@@ -832,10 +807,6 @@ xfs_bmap_add_extent_delay_real( | |||
832 | goto done; | 807 | goto done; |
833 | } | 808 | } |
834 | *dnew = 0; | 809 | *dnew = 0; |
835 | /* DELTA: Two in-core extents are replaced by one. */ | ||
836 | temp = LEFT.br_startoff; | ||
837 | temp2 = LEFT.br_blockcount + | ||
838 | PREV.br_blockcount; | ||
839 | break; | 810 | break; |
840 | 811 | ||
841 | case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: | 812 | case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: |
@@ -867,10 +838,6 @@ xfs_bmap_add_extent_delay_real( | |||
867 | goto done; | 838 | goto done; |
868 | } | 839 | } |
869 | *dnew = 0; | 840 | *dnew = 0; |
870 | /* DELTA: Two in-core extents are replaced by one. */ | ||
871 | temp = PREV.br_startoff; | ||
872 | temp2 = PREV.br_blockcount + | ||
873 | RIGHT.br_blockcount; | ||
874 | break; | 841 | break; |
875 | 842 | ||
876 | case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: | 843 | case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: |
@@ -900,9 +867,6 @@ xfs_bmap_add_extent_delay_real( | |||
900 | XFS_WANT_CORRUPTED_GOTO(i == 1, done); | 867 | XFS_WANT_CORRUPTED_GOTO(i == 1, done); |
901 | } | 868 | } |
902 | *dnew = 0; | 869 | *dnew = 0; |
903 | /* DELTA: The in-core extent described by new changed type. */ | ||
904 | temp = new->br_startoff; | ||
905 | temp2 = new->br_blockcount; | ||
906 | break; | 870 | break; |
907 | 871 | ||
908 | case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: | 872 | case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: |
@@ -942,10 +906,6 @@ xfs_bmap_add_extent_delay_real( | |||
942 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); | 906 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); |
943 | trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); | 907 | trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); |
944 | *dnew = temp; | 908 | *dnew = temp; |
945 | /* DELTA: The boundary between two in-core extents moved. */ | ||
946 | temp = LEFT.br_startoff; | ||
947 | temp2 = LEFT.br_blockcount + | ||
948 | PREV.br_blockcount; | ||
949 | break; | 909 | break; |
950 | 910 | ||
951 | case BMAP_LEFT_FILLING: | 911 | case BMAP_LEFT_FILLING: |
@@ -990,9 +950,6 @@ xfs_bmap_add_extent_delay_real( | |||
990 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); | 950 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); |
991 | trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_); | 951 | trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_); |
992 | *dnew = temp; | 952 | *dnew = temp; |
993 | /* DELTA: One in-core extent is split in two. */ | ||
994 | temp = PREV.br_startoff; | ||
995 | temp2 = PREV.br_blockcount; | ||
996 | break; | 953 | break; |
997 | 954 | ||
998 | case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: | 955 | case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: |
@@ -1031,10 +988,6 @@ xfs_bmap_add_extent_delay_real( | |||
1031 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); | 988 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); |
1032 | trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); | 989 | trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); |
1033 | *dnew = temp; | 990 | *dnew = temp; |
1034 | /* DELTA: The boundary between two in-core extents moved. */ | ||
1035 | temp = PREV.br_startoff; | ||
1036 | temp2 = PREV.br_blockcount + | ||
1037 | RIGHT.br_blockcount; | ||
1038 | break; | 991 | break; |
1039 | 992 | ||
1040 | case BMAP_RIGHT_FILLING: | 993 | case BMAP_RIGHT_FILLING: |
@@ -1078,9 +1031,6 @@ xfs_bmap_add_extent_delay_real( | |||
1078 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); | 1031 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); |
1079 | trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); | 1032 | trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); |
1080 | *dnew = temp; | 1033 | *dnew = temp; |
1081 | /* DELTA: One in-core extent is split in two. */ | ||
1082 | temp = PREV.br_startoff; | ||
1083 | temp2 = PREV.br_blockcount; | ||
1084 | break; | 1034 | break; |
1085 | 1035 | ||
1086 | case 0: | 1036 | case 0: |
@@ -1161,9 +1111,6 @@ xfs_bmap_add_extent_delay_real( | |||
1161 | nullstartblock((int)temp2)); | 1111 | nullstartblock((int)temp2)); |
1162 | trace_xfs_bmap_post_update(ip, idx + 2, state, _THIS_IP_); | 1112 | trace_xfs_bmap_post_update(ip, idx + 2, state, _THIS_IP_); |
1163 | *dnew = temp + temp2; | 1113 | *dnew = temp + temp2; |
1164 | /* DELTA: One in-core extent is split in three. */ | ||
1165 | temp = PREV.br_startoff; | ||
1166 | temp2 = PREV.br_blockcount; | ||
1167 | break; | 1114 | break; |
1168 | 1115 | ||
1169 | case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: | 1116 | case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: |
@@ -1179,13 +1126,6 @@ xfs_bmap_add_extent_delay_real( | |||
1179 | ASSERT(0); | 1126 | ASSERT(0); |
1180 | } | 1127 | } |
1181 | *curp = cur; | 1128 | *curp = cur; |
1182 | if (delta) { | ||
1183 | temp2 += temp; | ||
1184 | if (delta->xed_startoff > temp) | ||
1185 | delta->xed_startoff = temp; | ||
1186 | if (delta->xed_blockcount < temp2) | ||
1187 | delta->xed_blockcount = temp2; | ||
1188 | } | ||
1189 | done: | 1129 | done: |
1190 | *logflagsp = rval; | 1130 | *logflagsp = rval; |
1191 | return error; | 1131 | return error; |
@@ -1204,8 +1144,7 @@ xfs_bmap_add_extent_unwritten_real( | |||
1204 | xfs_extnum_t idx, /* extent number to update/insert */ | 1144 | xfs_extnum_t idx, /* extent number to update/insert */ |
1205 | xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ | 1145 | xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ |
1206 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ | 1146 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ |
1207 | int *logflagsp, /* inode logging flags */ | 1147 | int *logflagsp) /* inode logging flags */ |
1208 | xfs_extdelta_t *delta) /* Change made to incore extents */ | ||
1209 | { | 1148 | { |
1210 | xfs_btree_cur_t *cur; /* btree cursor */ | 1149 | xfs_btree_cur_t *cur; /* btree cursor */ |
1211 | xfs_bmbt_rec_host_t *ep; /* extent entry for idx */ | 1150 | xfs_bmbt_rec_host_t *ep; /* extent entry for idx */ |
@@ -1219,8 +1158,6 @@ xfs_bmap_add_extent_unwritten_real( | |||
1219 | /* left is 0, right is 1, prev is 2 */ | 1158 | /* left is 0, right is 1, prev is 2 */ |
1220 | int rval=0; /* return value (logging flags) */ | 1159 | int rval=0; /* return value (logging flags) */ |
1221 | int state = 0;/* state bits, accessed thru macros */ | 1160 | int state = 0;/* state bits, accessed thru macros */ |
1222 | xfs_filblks_t temp=0; | ||
1223 | xfs_filblks_t temp2=0; | ||
1224 | 1161 | ||
1225 | #define LEFT r[0] | 1162 | #define LEFT r[0] |
1226 | #define RIGHT r[1] | 1163 | #define RIGHT r[1] |
@@ -1341,11 +1278,6 @@ xfs_bmap_add_extent_unwritten_real( | |||
1341 | RIGHT.br_blockcount, LEFT.br_state))) | 1278 | RIGHT.br_blockcount, LEFT.br_state))) |
1342 | goto done; | 1279 | goto done; |
1343 | } | 1280 | } |
1344 | /* DELTA: Three in-core extents are replaced by one. */ | ||
1345 | temp = LEFT.br_startoff; | ||
1346 | temp2 = LEFT.br_blockcount + | ||
1347 | PREV.br_blockcount + | ||
1348 | RIGHT.br_blockcount; | ||
1349 | break; | 1281 | break; |
1350 | 1282 | ||
1351 | case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: | 1283 | case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: |
@@ -1382,10 +1314,6 @@ xfs_bmap_add_extent_unwritten_real( | |||
1382 | LEFT.br_state))) | 1314 | LEFT.br_state))) |
1383 | goto done; | 1315 | goto done; |
1384 | } | 1316 | } |
1385 | /* DELTA: Two in-core extents are replaced by one. */ | ||
1386 | temp = LEFT.br_startoff; | ||
1387 | temp2 = LEFT.br_blockcount + | ||
1388 | PREV.br_blockcount; | ||
1389 | break; | 1317 | break; |
1390 | 1318 | ||
1391 | case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: | 1319 | case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: |
@@ -1422,10 +1350,6 @@ xfs_bmap_add_extent_unwritten_real( | |||
1422 | newext))) | 1350 | newext))) |
1423 | goto done; | 1351 | goto done; |
1424 | } | 1352 | } |
1425 | /* DELTA: Two in-core extents are replaced by one. */ | ||
1426 | temp = PREV.br_startoff; | ||
1427 | temp2 = PREV.br_blockcount + | ||
1428 | RIGHT.br_blockcount; | ||
1429 | break; | 1353 | break; |
1430 | 1354 | ||
1431 | case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: | 1355 | case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: |
@@ -1453,9 +1377,6 @@ xfs_bmap_add_extent_unwritten_real( | |||
1453 | newext))) | 1377 | newext))) |
1454 | goto done; | 1378 | goto done; |
1455 | } | 1379 | } |
1456 | /* DELTA: The in-core extent described by new changed type. */ | ||
1457 | temp = new->br_startoff; | ||
1458 | temp2 = new->br_blockcount; | ||
1459 | break; | 1380 | break; |
1460 | 1381 | ||
1461 | case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: | 1382 | case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: |
@@ -1501,10 +1422,6 @@ xfs_bmap_add_extent_unwritten_real( | |||
1501 | LEFT.br_state)) | 1422 | LEFT.br_state)) |
1502 | goto done; | 1423 | goto done; |
1503 | } | 1424 | } |
1504 | /* DELTA: The boundary between two in-core extents moved. */ | ||
1505 | temp = LEFT.br_startoff; | ||
1506 | temp2 = LEFT.br_blockcount + | ||
1507 | PREV.br_blockcount; | ||
1508 | break; | 1425 | break; |
1509 | 1426 | ||
1510 | case BMAP_LEFT_FILLING: | 1427 | case BMAP_LEFT_FILLING: |
@@ -1544,9 +1461,6 @@ xfs_bmap_add_extent_unwritten_real( | |||
1544 | goto done; | 1461 | goto done; |
1545 | XFS_WANT_CORRUPTED_GOTO(i == 1, done); | 1462 | XFS_WANT_CORRUPTED_GOTO(i == 1, done); |
1546 | } | 1463 | } |
1547 | /* DELTA: One in-core extent is split in two. */ | ||
1548 | temp = PREV.br_startoff; | ||
1549 | temp2 = PREV.br_blockcount; | ||
1550 | break; | 1464 | break; |
1551 | 1465 | ||
1552 | case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: | 1466 | case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: |
@@ -1587,10 +1501,6 @@ xfs_bmap_add_extent_unwritten_real( | |||
1587 | newext))) | 1501 | newext))) |
1588 | goto done; | 1502 | goto done; |
1589 | } | 1503 | } |
1590 | /* DELTA: The boundary between two in-core extents moved. */ | ||
1591 | temp = PREV.br_startoff; | ||
1592 | temp2 = PREV.br_blockcount + | ||
1593 | RIGHT.br_blockcount; | ||
1594 | break; | 1504 | break; |
1595 | 1505 | ||
1596 | case BMAP_RIGHT_FILLING: | 1506 | case BMAP_RIGHT_FILLING: |
@@ -1630,9 +1540,6 @@ xfs_bmap_add_extent_unwritten_real( | |||
1630 | goto done; | 1540 | goto done; |
1631 | XFS_WANT_CORRUPTED_GOTO(i == 1, done); | 1541 | XFS_WANT_CORRUPTED_GOTO(i == 1, done); |
1632 | } | 1542 | } |
1633 | /* DELTA: One in-core extent is split in two. */ | ||
1634 | temp = PREV.br_startoff; | ||
1635 | temp2 = PREV.br_blockcount; | ||
1636 | break; | 1543 | break; |
1637 | 1544 | ||
1638 | case 0: | 1545 | case 0: |
@@ -1692,9 +1599,6 @@ xfs_bmap_add_extent_unwritten_real( | |||
1692 | goto done; | 1599 | goto done; |
1693 | XFS_WANT_CORRUPTED_GOTO(i == 1, done); | 1600 | XFS_WANT_CORRUPTED_GOTO(i == 1, done); |
1694 | } | 1601 | } |
1695 | /* DELTA: One in-core extent is split in three. */ | ||
1696 | temp = PREV.br_startoff; | ||
1697 | temp2 = PREV.br_blockcount; | ||
1698 | break; | 1602 | break; |
1699 | 1603 | ||
1700 | case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: | 1604 | case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: |
@@ -1710,13 +1614,6 @@ xfs_bmap_add_extent_unwritten_real( | |||
1710 | ASSERT(0); | 1614 | ASSERT(0); |
1711 | } | 1615 | } |
1712 | *curp = cur; | 1616 | *curp = cur; |
1713 | if (delta) { | ||
1714 | temp2 += temp; | ||
1715 | if (delta->xed_startoff > temp) | ||
1716 | delta->xed_startoff = temp; | ||
1717 | if (delta->xed_blockcount < temp2) | ||
1718 | delta->xed_blockcount = temp2; | ||
1719 | } | ||
1720 | done: | 1617 | done: |
1721 | *logflagsp = rval; | 1618 | *logflagsp = rval; |
1722 | return error; | 1619 | return error; |
@@ -1736,7 +1633,6 @@ xfs_bmap_add_extent_hole_delay( | |||
1736 | xfs_extnum_t idx, /* extent number to update/insert */ | 1633 | xfs_extnum_t idx, /* extent number to update/insert */ |
1737 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ | 1634 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ |
1738 | int *logflagsp, /* inode logging flags */ | 1635 | int *logflagsp, /* inode logging flags */ |
1739 | xfs_extdelta_t *delta, /* Change made to incore extents */ | ||
1740 | int rsvd) /* OK to allocate reserved blocks */ | 1636 | int rsvd) /* OK to allocate reserved blocks */ |
1741 | { | 1637 | { |
1742 | xfs_bmbt_rec_host_t *ep; /* extent record for idx */ | 1638 | xfs_bmbt_rec_host_t *ep; /* extent record for idx */ |
@@ -1747,7 +1643,6 @@ xfs_bmap_add_extent_hole_delay( | |||
1747 | xfs_bmbt_irec_t right; /* right neighbor extent entry */ | 1643 | xfs_bmbt_irec_t right; /* right neighbor extent entry */ |
1748 | int state; /* state bits, accessed thru macros */ | 1644 | int state; /* state bits, accessed thru macros */ |
1749 | xfs_filblks_t temp=0; /* temp for indirect calculations */ | 1645 | xfs_filblks_t temp=0; /* temp for indirect calculations */ |
1750 | xfs_filblks_t temp2=0; | ||
1751 | 1646 | ||
1752 | ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); | 1647 | ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); |
1753 | ep = xfs_iext_get_ext(ifp, idx); | 1648 | ep = xfs_iext_get_ext(ifp, idx); |
@@ -1819,9 +1714,6 @@ xfs_bmap_add_extent_hole_delay( | |||
1819 | 1714 | ||
1820 | xfs_iext_remove(ip, idx, 1, state); | 1715 | xfs_iext_remove(ip, idx, 1, state); |
1821 | ip->i_df.if_lastex = idx - 1; | 1716 | ip->i_df.if_lastex = idx - 1; |
1822 | /* DELTA: Two in-core extents were replaced by one. */ | ||
1823 | temp2 = temp; | ||
1824 | temp = left.br_startoff; | ||
1825 | break; | 1717 | break; |
1826 | 1718 | ||
1827 | case BMAP_LEFT_CONTIG: | 1719 | case BMAP_LEFT_CONTIG: |
@@ -1841,9 +1733,6 @@ xfs_bmap_add_extent_hole_delay( | |||
1841 | trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); | 1733 | trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); |
1842 | 1734 | ||
1843 | ip->i_df.if_lastex = idx - 1; | 1735 | ip->i_df.if_lastex = idx - 1; |
1844 | /* DELTA: One in-core extent grew into a hole. */ | ||
1845 | temp2 = temp; | ||
1846 | temp = left.br_startoff; | ||
1847 | break; | 1736 | break; |
1848 | 1737 | ||
1849 | case BMAP_RIGHT_CONTIG: | 1738 | case BMAP_RIGHT_CONTIG: |
@@ -1862,9 +1751,6 @@ xfs_bmap_add_extent_hole_delay( | |||
1862 | trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); | 1751 | trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); |
1863 | 1752 | ||
1864 | ip->i_df.if_lastex = idx; | 1753 | ip->i_df.if_lastex = idx; |
1865 | /* DELTA: One in-core extent grew into a hole. */ | ||
1866 | temp2 = temp; | ||
1867 | temp = new->br_startoff; | ||
1868 | break; | 1754 | break; |
1869 | 1755 | ||
1870 | case 0: | 1756 | case 0: |
@@ -1876,9 +1762,6 @@ xfs_bmap_add_extent_hole_delay( | |||
1876 | oldlen = newlen = 0; | 1762 | oldlen = newlen = 0; |
1877 | xfs_iext_insert(ip, idx, 1, new, state); | 1763 | xfs_iext_insert(ip, idx, 1, new, state); |
1878 | ip->i_df.if_lastex = idx; | 1764 | ip->i_df.if_lastex = idx; |
1879 | /* DELTA: A new in-core extent was added in a hole. */ | ||
1880 | temp2 = new->br_blockcount; | ||
1881 | temp = new->br_startoff; | ||
1882 | break; | 1765 | break; |
1883 | } | 1766 | } |
1884 | if (oldlen != newlen) { | 1767 | if (oldlen != newlen) { |
@@ -1889,13 +1772,6 @@ xfs_bmap_add_extent_hole_delay( | |||
1889 | * Nothing to do for disk quota accounting here. | 1772 | * Nothing to do for disk quota accounting here. |
1890 | */ | 1773 | */ |
1891 | } | 1774 | } |
1892 | if (delta) { | ||
1893 | temp2 += temp; | ||
1894 | if (delta->xed_startoff > temp) | ||
1895 | delta->xed_startoff = temp; | ||
1896 | if (delta->xed_blockcount < temp2) | ||
1897 | delta->xed_blockcount = temp2; | ||
1898 | } | ||
1899 | *logflagsp = 0; | 1775 | *logflagsp = 0; |
1900 | return 0; | 1776 | return 0; |
1901 | } | 1777 | } |
@@ -1911,7 +1787,6 @@ xfs_bmap_add_extent_hole_real( | |||
1911 | xfs_btree_cur_t *cur, /* if null, not a btree */ | 1787 | xfs_btree_cur_t *cur, /* if null, not a btree */ |
1912 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ | 1788 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ |
1913 | int *logflagsp, /* inode logging flags */ | 1789 | int *logflagsp, /* inode logging flags */ |
1914 | xfs_extdelta_t *delta, /* Change made to incore extents */ | ||
1915 | int whichfork) /* data or attr fork */ | 1790 | int whichfork) /* data or attr fork */ |
1916 | { | 1791 | { |
1917 | xfs_bmbt_rec_host_t *ep; /* pointer to extent entry ins. point */ | 1792 | xfs_bmbt_rec_host_t *ep; /* pointer to extent entry ins. point */ |
@@ -1922,8 +1797,6 @@ xfs_bmap_add_extent_hole_real( | |||
1922 | xfs_bmbt_irec_t right; /* right neighbor extent entry */ | 1797 | xfs_bmbt_irec_t right; /* right neighbor extent entry */ |
1923 | int rval=0; /* return value (logging flags) */ | 1798 | int rval=0; /* return value (logging flags) */ |
1924 | int state; /* state bits, accessed thru macros */ | 1799 | int state; /* state bits, accessed thru macros */ |
1925 | xfs_filblks_t temp=0; | ||
1926 | xfs_filblks_t temp2=0; | ||
1927 | 1800 | ||
1928 | ifp = XFS_IFORK_PTR(ip, whichfork); | 1801 | ifp = XFS_IFORK_PTR(ip, whichfork); |
1929 | ASSERT(idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)); | 1802 | ASSERT(idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)); |
@@ -2020,11 +1893,6 @@ xfs_bmap_add_extent_hole_real( | |||
2020 | left.br_state))) | 1893 | left.br_state))) |
2021 | goto done; | 1894 | goto done; |
2022 | } | 1895 | } |
2023 | /* DELTA: Two in-core extents were replaced by one. */ | ||
2024 | temp = left.br_startoff; | ||
2025 | temp2 = left.br_blockcount + | ||
2026 | new->br_blockcount + | ||
2027 | right.br_blockcount; | ||
2028 | break; | 1896 | break; |
2029 | 1897 | ||
2030 | case BMAP_LEFT_CONTIG: | 1898 | case BMAP_LEFT_CONTIG: |
@@ -2056,10 +1924,6 @@ xfs_bmap_add_extent_hole_real( | |||
2056 | left.br_state))) | 1924 | left.br_state))) |
2057 | goto done; | 1925 | goto done; |
2058 | } | 1926 | } |
2059 | /* DELTA: One in-core extent grew. */ | ||
2060 | temp = left.br_startoff; | ||
2061 | temp2 = left.br_blockcount + | ||
2062 | new->br_blockcount; | ||
2063 | break; | 1927 | break; |
2064 | 1928 | ||
2065 | case BMAP_RIGHT_CONTIG: | 1929 | case BMAP_RIGHT_CONTIG: |
@@ -2092,10 +1956,6 @@ xfs_bmap_add_extent_hole_real( | |||
2092 | right.br_state))) | 1956 | right.br_state))) |
2093 | goto done; | 1957 | goto done; |
2094 | } | 1958 | } |
2095 | /* DELTA: One in-core extent grew. */ | ||
2096 | temp = new->br_startoff; | ||
2097 | temp2 = new->br_blockcount + | ||
2098 | right.br_blockcount; | ||
2099 | break; | 1959 | break; |
2100 | 1960 | ||
2101 | case 0: | 1961 | case 0: |
@@ -2123,18 +1983,8 @@ xfs_bmap_add_extent_hole_real( | |||
2123 | goto done; | 1983 | goto done; |
2124 | XFS_WANT_CORRUPTED_GOTO(i == 1, done); | 1984 | XFS_WANT_CORRUPTED_GOTO(i == 1, done); |
2125 | } | 1985 | } |
2126 | /* DELTA: A new extent was added in a hole. */ | ||
2127 | temp = new->br_startoff; | ||
2128 | temp2 = new->br_blockcount; | ||
2129 | break; | 1986 | break; |
2130 | } | 1987 | } |
2131 | if (delta) { | ||
2132 | temp2 += temp; | ||
2133 | if (delta->xed_startoff > temp) | ||
2134 | delta->xed_startoff = temp; | ||
2135 | if (delta->xed_blockcount < temp2) | ||
2136 | delta->xed_blockcount = temp2; | ||
2137 | } | ||
2138 | done: | 1988 | done: |
2139 | *logflagsp = rval; | 1989 | *logflagsp = rval; |
2140 | return error; | 1990 | return error; |
@@ -2959,7 +2809,6 @@ xfs_bmap_del_extent( | |||
2959 | xfs_btree_cur_t *cur, /* if null, not a btree */ | 2809 | xfs_btree_cur_t *cur, /* if null, not a btree */ |
2960 | xfs_bmbt_irec_t *del, /* data to remove from extents */ | 2810 | xfs_bmbt_irec_t *del, /* data to remove from extents */ |
2961 | int *logflagsp, /* inode logging flags */ | 2811 | int *logflagsp, /* inode logging flags */ |
2962 | xfs_extdelta_t *delta, /* Change made to incore extents */ | ||
2963 | int whichfork, /* data or attr fork */ | 2812 | int whichfork, /* data or attr fork */ |
2964 | int rsvd) /* OK to allocate reserved blocks */ | 2813 | int rsvd) /* OK to allocate reserved blocks */ |
2965 | { | 2814 | { |
@@ -3265,14 +3114,6 @@ xfs_bmap_del_extent( | |||
3265 | if (da_old > da_new) | 3114 | if (da_old > da_new) |
3266 | xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, (int64_t)(da_old - da_new), | 3115 | xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, (int64_t)(da_old - da_new), |
3267 | rsvd); | 3116 | rsvd); |
3268 | if (delta) { | ||
3269 | /* DELTA: report the original extent. */ | ||
3270 | if (delta->xed_startoff > got.br_startoff) | ||
3271 | delta->xed_startoff = got.br_startoff; | ||
3272 | if (delta->xed_blockcount < got.br_startoff+got.br_blockcount) | ||
3273 | delta->xed_blockcount = got.br_startoff + | ||
3274 | got.br_blockcount; | ||
3275 | } | ||
3276 | done: | 3117 | done: |
3277 | *logflagsp = flags; | 3118 | *logflagsp = flags; |
3278 | return error; | 3119 | return error; |
@@ -3754,9 +3595,10 @@ xfs_bmap_add_attrfork( | |||
3754 | ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; | 3595 | ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS; |
3755 | } | 3596 | } |
3756 | ASSERT(ip->i_d.di_anextents == 0); | 3597 | ASSERT(ip->i_d.di_anextents == 0); |
3757 | IHOLD(ip); | 3598 | |
3758 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | 3599 | xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL); |
3759 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 3600 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
3601 | |||
3760 | switch (ip->i_d.di_format) { | 3602 | switch (ip->i_d.di_format) { |
3761 | case XFS_DINODE_FMT_DEV: | 3603 | case XFS_DINODE_FMT_DEV: |
3762 | ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3; | 3604 | ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3; |
@@ -4483,8 +4325,7 @@ xfs_bmapi( | |||
4483 | xfs_extlen_t total, /* total blocks needed */ | 4325 | xfs_extlen_t total, /* total blocks needed */ |
4484 | xfs_bmbt_irec_t *mval, /* output: map values */ | 4326 | xfs_bmbt_irec_t *mval, /* output: map values */ |
4485 | int *nmap, /* i/o: mval size/count */ | 4327 | int *nmap, /* i/o: mval size/count */ |
4486 | xfs_bmap_free_t *flist, /* i/o: list extents to free */ | 4328 | xfs_bmap_free_t *flist) /* i/o: list extents to free */ |
4487 | xfs_extdelta_t *delta) /* o: change made to incore extents */ | ||
4488 | { | 4329 | { |
4489 | xfs_fsblock_t abno; /* allocated block number */ | 4330 | xfs_fsblock_t abno; /* allocated block number */ |
4490 | xfs_extlen_t alen; /* allocated extent length */ | 4331 | xfs_extlen_t alen; /* allocated extent length */ |
@@ -4596,10 +4437,7 @@ xfs_bmapi( | |||
4596 | end = bno + len; | 4437 | end = bno + len; |
4597 | obno = bno; | 4438 | obno = bno; |
4598 | bma.ip = NULL; | 4439 | bma.ip = NULL; |
4599 | if (delta) { | 4440 | |
4600 | delta->xed_startoff = NULLFILEOFF; | ||
4601 | delta->xed_blockcount = 0; | ||
4602 | } | ||
4603 | while (bno < end && n < *nmap) { | 4441 | while (bno < end && n < *nmap) { |
4604 | /* | 4442 | /* |
4605 | * Reading past eof, act as though there's a hole | 4443 | * Reading past eof, act as though there's a hole |
@@ -4620,19 +4458,13 @@ xfs_bmapi( | |||
4620 | * allocate the stuff asked for in this bmap call | 4458 | * allocate the stuff asked for in this bmap call |
4621 | * but that wouldn't be as good. | 4459 | * but that wouldn't be as good. |
4622 | */ | 4460 | */ |
4623 | if (wasdelay && !(flags & XFS_BMAPI_EXACT)) { | 4461 | if (wasdelay) { |
4624 | alen = (xfs_extlen_t)got.br_blockcount; | 4462 | alen = (xfs_extlen_t)got.br_blockcount; |
4625 | aoff = got.br_startoff; | 4463 | aoff = got.br_startoff; |
4626 | if (lastx != NULLEXTNUM && lastx) { | 4464 | if (lastx != NULLEXTNUM && lastx) { |
4627 | ep = xfs_iext_get_ext(ifp, lastx - 1); | 4465 | ep = xfs_iext_get_ext(ifp, lastx - 1); |
4628 | xfs_bmbt_get_all(ep, &prev); | 4466 | xfs_bmbt_get_all(ep, &prev); |
4629 | } | 4467 | } |
4630 | } else if (wasdelay) { | ||
4631 | alen = (xfs_extlen_t) | ||
4632 | XFS_FILBLKS_MIN(len, | ||
4633 | (got.br_startoff + | ||
4634 | got.br_blockcount) - bno); | ||
4635 | aoff = bno; | ||
4636 | } else { | 4468 | } else { |
4637 | alen = (xfs_extlen_t) | 4469 | alen = (xfs_extlen_t) |
4638 | XFS_FILBLKS_MIN(len, MAXEXTLEN); | 4470 | XFS_FILBLKS_MIN(len, MAXEXTLEN); |
@@ -4831,7 +4663,7 @@ xfs_bmapi( | |||
4831 | got.br_state = XFS_EXT_UNWRITTEN; | 4663 | got.br_state = XFS_EXT_UNWRITTEN; |
4832 | } | 4664 | } |
4833 | error = xfs_bmap_add_extent(ip, lastx, &cur, &got, | 4665 | error = xfs_bmap_add_extent(ip, lastx, &cur, &got, |
4834 | firstblock, flist, &tmp_logflags, delta, | 4666 | firstblock, flist, &tmp_logflags, |
4835 | whichfork, (flags & XFS_BMAPI_RSVBLOCKS)); | 4667 | whichfork, (flags & XFS_BMAPI_RSVBLOCKS)); |
4836 | logflags |= tmp_logflags; | 4668 | logflags |= tmp_logflags; |
4837 | if (error) | 4669 | if (error) |
@@ -4927,7 +4759,7 @@ xfs_bmapi( | |||
4927 | } | 4759 | } |
4928 | mval->br_state = XFS_EXT_NORM; | 4760 | mval->br_state = XFS_EXT_NORM; |
4929 | error = xfs_bmap_add_extent(ip, lastx, &cur, mval, | 4761 | error = xfs_bmap_add_extent(ip, lastx, &cur, mval, |
4930 | firstblock, flist, &tmp_logflags, delta, | 4762 | firstblock, flist, &tmp_logflags, |
4931 | whichfork, (flags & XFS_BMAPI_RSVBLOCKS)); | 4763 | whichfork, (flags & XFS_BMAPI_RSVBLOCKS)); |
4932 | logflags |= tmp_logflags; | 4764 | logflags |= tmp_logflags; |
4933 | if (error) | 4765 | if (error) |
@@ -5017,14 +4849,6 @@ xfs_bmapi( | |||
5017 | ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE || | 4849 | ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE || |
5018 | XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max); | 4850 | XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max); |
5019 | error = 0; | 4851 | error = 0; |
5020 | if (delta && delta->xed_startoff != NULLFILEOFF) { | ||
5021 | /* A change was actually made. | ||
5022 | * Note that delta->xed_blockount is an offset at this | ||
5023 | * point and needs to be converted to a block count. | ||
5024 | */ | ||
5025 | ASSERT(delta->xed_blockcount > delta->xed_startoff); | ||
5026 | delta->xed_blockcount -= delta->xed_startoff; | ||
5027 | } | ||
5028 | error0: | 4852 | error0: |
5029 | /* | 4853 | /* |
5030 | * Log everything. Do this after conversion, there's no point in | 4854 | * Log everything. Do this after conversion, there's no point in |
@@ -5136,8 +4960,6 @@ xfs_bunmapi( | |||
5136 | xfs_fsblock_t *firstblock, /* first allocated block | 4960 | xfs_fsblock_t *firstblock, /* first allocated block |
5137 | controls a.g. for allocs */ | 4961 | controls a.g. for allocs */ |
5138 | xfs_bmap_free_t *flist, /* i/o: list extents to free */ | 4962 | xfs_bmap_free_t *flist, /* i/o: list extents to free */ |
5139 | xfs_extdelta_t *delta, /* o: change made to incore | ||
5140 | extents */ | ||
5141 | int *done) /* set if not done yet */ | 4963 | int *done) /* set if not done yet */ |
5142 | { | 4964 | { |
5143 | xfs_btree_cur_t *cur; /* bmap btree cursor */ | 4965 | xfs_btree_cur_t *cur; /* bmap btree cursor */ |
@@ -5196,10 +5018,7 @@ xfs_bunmapi( | |||
5196 | bno = start + len - 1; | 5018 | bno = start + len - 1; |
5197 | ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, | 5019 | ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, |
5198 | &prev); | 5020 | &prev); |
5199 | if (delta) { | 5021 | |
5200 | delta->xed_startoff = NULLFILEOFF; | ||
5201 | delta->xed_blockcount = 0; | ||
5202 | } | ||
5203 | /* | 5022 | /* |
5204 | * Check to see if the given block number is past the end of the | 5023 | * Check to see if the given block number is past the end of the |
5205 | * file, back up to the last block if so... | 5024 | * file, back up to the last block if so... |
@@ -5297,7 +5116,7 @@ xfs_bunmapi( | |||
5297 | } | 5116 | } |
5298 | del.br_state = XFS_EXT_UNWRITTEN; | 5117 | del.br_state = XFS_EXT_UNWRITTEN; |
5299 | error = xfs_bmap_add_extent(ip, lastx, &cur, &del, | 5118 | error = xfs_bmap_add_extent(ip, lastx, &cur, &del, |
5300 | firstblock, flist, &logflags, delta, | 5119 | firstblock, flist, &logflags, |
5301 | XFS_DATA_FORK, 0); | 5120 | XFS_DATA_FORK, 0); |
5302 | if (error) | 5121 | if (error) |
5303 | goto error0; | 5122 | goto error0; |
@@ -5352,7 +5171,7 @@ xfs_bunmapi( | |||
5352 | prev.br_state = XFS_EXT_UNWRITTEN; | 5171 | prev.br_state = XFS_EXT_UNWRITTEN; |
5353 | error = xfs_bmap_add_extent(ip, lastx - 1, &cur, | 5172 | error = xfs_bmap_add_extent(ip, lastx - 1, &cur, |
5354 | &prev, firstblock, flist, &logflags, | 5173 | &prev, firstblock, flist, &logflags, |
5355 | delta, XFS_DATA_FORK, 0); | 5174 | XFS_DATA_FORK, 0); |
5356 | if (error) | 5175 | if (error) |
5357 | goto error0; | 5176 | goto error0; |
5358 | goto nodelete; | 5177 | goto nodelete; |
@@ -5361,7 +5180,7 @@ xfs_bunmapi( | |||
5361 | del.br_state = XFS_EXT_UNWRITTEN; | 5180 | del.br_state = XFS_EXT_UNWRITTEN; |
5362 | error = xfs_bmap_add_extent(ip, lastx, &cur, | 5181 | error = xfs_bmap_add_extent(ip, lastx, &cur, |
5363 | &del, firstblock, flist, &logflags, | 5182 | &del, firstblock, flist, &logflags, |
5364 | delta, XFS_DATA_FORK, 0); | 5183 | XFS_DATA_FORK, 0); |
5365 | if (error) | 5184 | if (error) |
5366 | goto error0; | 5185 | goto error0; |
5367 | goto nodelete; | 5186 | goto nodelete; |
@@ -5414,7 +5233,7 @@ xfs_bunmapi( | |||
5414 | goto error0; | 5233 | goto error0; |
5415 | } | 5234 | } |
5416 | error = xfs_bmap_del_extent(ip, tp, lastx, flist, cur, &del, | 5235 | error = xfs_bmap_del_extent(ip, tp, lastx, flist, cur, &del, |
5417 | &tmp_logflags, delta, whichfork, rsvd); | 5236 | &tmp_logflags, whichfork, rsvd); |
5418 | logflags |= tmp_logflags; | 5237 | logflags |= tmp_logflags; |
5419 | if (error) | 5238 | if (error) |
5420 | goto error0; | 5239 | goto error0; |
@@ -5471,14 +5290,6 @@ nodelete: | |||
5471 | ASSERT(ifp->if_ext_max == | 5290 | ASSERT(ifp->if_ext_max == |
5472 | XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t)); | 5291 | XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t)); |
5473 | error = 0; | 5292 | error = 0; |
5474 | if (delta && delta->xed_startoff != NULLFILEOFF) { | ||
5475 | /* A change was actually made. | ||
5476 | * Note that delta->xed_blockount is an offset at this | ||
5477 | * point and needs to be converted to a block count. | ||
5478 | */ | ||
5479 | ASSERT(delta->xed_blockcount > delta->xed_startoff); | ||
5480 | delta->xed_blockcount -= delta->xed_startoff; | ||
5481 | } | ||
5482 | error0: | 5293 | error0: |
5483 | /* | 5294 | /* |
5484 | * Log everything. Do this after conversion, there's no point in | 5295 | * Log everything. Do this after conversion, there's no point in |
@@ -5605,28 +5416,6 @@ xfs_getbmap( | |||
5605 | prealloced = 0; | 5416 | prealloced = 0; |
5606 | fixlen = 1LL << 32; | 5417 | fixlen = 1LL << 32; |
5607 | } else { | 5418 | } else { |
5608 | /* | ||
5609 | * If the BMV_IF_NO_DMAPI_READ interface bit specified, do | ||
5610 | * not generate a DMAPI read event. Otherwise, if the | ||
5611 | * DM_EVENT_READ bit is set for the file, generate a read | ||
5612 | * event in order that the DMAPI application may do its thing | ||
5613 | * before we return the extents. Usually this means restoring | ||
5614 | * user file data to regions of the file that look like holes. | ||
5615 | * | ||
5616 | * The "old behavior" (from XFS_IOC_GETBMAP) is to not specify | ||
5617 | * BMV_IF_NO_DMAPI_READ so that read events are generated. | ||
5618 | * If this were not true, callers of ioctl(XFS_IOC_GETBMAP) | ||
5619 | * could misinterpret holes in a DMAPI file as true holes, | ||
5620 | * when in fact they may represent offline user data. | ||
5621 | */ | ||
5622 | if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && | ||
5623 | !(iflags & BMV_IF_NO_DMAPI_READ)) { | ||
5624 | error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip, | ||
5625 | 0, 0, 0, NULL); | ||
5626 | if (error) | ||
5627 | return XFS_ERROR(error); | ||
5628 | } | ||
5629 | |||
5630 | if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS && | 5419 | if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS && |
5631 | ip->i_d.di_format != XFS_DINODE_FMT_BTREE && | 5420 | ip->i_d.di_format != XFS_DINODE_FMT_BTREE && |
5632 | ip->i_d.di_format != XFS_DINODE_FMT_LOCAL) | 5421 | ip->i_d.di_format != XFS_DINODE_FMT_LOCAL) |
@@ -5713,7 +5502,7 @@ xfs_getbmap( | |||
5713 | error = xfs_bmapi(NULL, ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset), | 5502 | error = xfs_bmapi(NULL, ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset), |
5714 | XFS_BB_TO_FSB(mp, bmv->bmv_length), | 5503 | XFS_BB_TO_FSB(mp, bmv->bmv_length), |
5715 | bmapi_flags, NULL, 0, map, &nmap, | 5504 | bmapi_flags, NULL, 0, map, &nmap, |
5716 | NULL, NULL); | 5505 | NULL); |
5717 | if (error) | 5506 | if (error) |
5718 | goto out_free_map; | 5507 | goto out_free_map; |
5719 | ASSERT(nmap <= subnex); | 5508 | ASSERT(nmap <= subnex); |
@@ -5859,66 +5648,34 @@ xfs_bmap_eof( | |||
5859 | } | 5648 | } |
5860 | 5649 | ||
5861 | #ifdef DEBUG | 5650 | #ifdef DEBUG |
5862 | STATIC | 5651 | STATIC struct xfs_buf * |
5863 | xfs_buf_t * | ||
5864 | xfs_bmap_get_bp( | 5652 | xfs_bmap_get_bp( |
5865 | xfs_btree_cur_t *cur, | 5653 | struct xfs_btree_cur *cur, |
5866 | xfs_fsblock_t bno) | 5654 | xfs_fsblock_t bno) |
5867 | { | 5655 | { |
5868 | int i; | 5656 | struct xfs_log_item_desc *lidp; |
5869 | xfs_buf_t *bp; | 5657 | int i; |
5870 | 5658 | ||
5871 | if (!cur) | 5659 | if (!cur) |
5872 | return(NULL); | 5660 | return NULL; |
5873 | |||
5874 | bp = NULL; | ||
5875 | for(i = 0; i < XFS_BTREE_MAXLEVELS; i++) { | ||
5876 | bp = cur->bc_bufs[i]; | ||
5877 | if (!bp) break; | ||
5878 | if (XFS_BUF_ADDR(bp) == bno) | ||
5879 | break; /* Found it */ | ||
5880 | } | ||
5881 | if (i == XFS_BTREE_MAXLEVELS) | ||
5882 | bp = NULL; | ||
5883 | |||
5884 | if (!bp) { /* Chase down all the log items to see if the bp is there */ | ||
5885 | xfs_log_item_chunk_t *licp; | ||
5886 | xfs_trans_t *tp; | ||
5887 | |||
5888 | tp = cur->bc_tp; | ||
5889 | licp = &tp->t_items; | ||
5890 | while (!bp && licp != NULL) { | ||
5891 | if (xfs_lic_are_all_free(licp)) { | ||
5892 | licp = licp->lic_next; | ||
5893 | continue; | ||
5894 | } | ||
5895 | for (i = 0; i < licp->lic_unused; i++) { | ||
5896 | xfs_log_item_desc_t *lidp; | ||
5897 | xfs_log_item_t *lip; | ||
5898 | xfs_buf_log_item_t *bip; | ||
5899 | xfs_buf_t *lbp; | ||
5900 | |||
5901 | if (xfs_lic_isfree(licp, i)) { | ||
5902 | continue; | ||
5903 | } | ||
5904 | |||
5905 | lidp = xfs_lic_slot(licp, i); | ||
5906 | lip = lidp->lid_item; | ||
5907 | if (lip->li_type != XFS_LI_BUF) | ||
5908 | continue; | ||
5909 | 5661 | ||
5910 | bip = (xfs_buf_log_item_t *)lip; | 5662 | for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) { |
5911 | lbp = bip->bli_buf; | 5663 | if (!cur->bc_bufs[i]) |
5664 | break; | ||
5665 | if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno) | ||
5666 | return cur->bc_bufs[i]; | ||
5667 | } | ||
5912 | 5668 | ||
5913 | if (XFS_BUF_ADDR(lbp) == bno) { | 5669 | /* Chase down all the log items to see if the bp is there */ |
5914 | bp = lbp; | 5670 | list_for_each_entry(lidp, &cur->bc_tp->t_items, lid_trans) { |
5915 | break; /* Found it */ | 5671 | struct xfs_buf_log_item *bip; |
5916 | } | 5672 | bip = (struct xfs_buf_log_item *)lidp->lid_item; |
5917 | } | 5673 | if (bip->bli_item.li_type == XFS_LI_BUF && |
5918 | licp = licp->lic_next; | 5674 | XFS_BUF_ADDR(bip->bli_buf) == bno) |
5919 | } | 5675 | return bip->bli_buf; |
5920 | } | 5676 | } |
5921 | return(bp); | 5677 | |
5678 | return NULL; | ||
5922 | } | 5679 | } |
5923 | 5680 | ||
5924 | STATIC void | 5681 | STATIC void |
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h index 419dafb9d87d..b13569a6179b 100644 --- a/fs/xfs/xfs_bmap.h +++ b/fs/xfs/xfs_bmap.h | |||
@@ -28,20 +28,6 @@ struct xfs_trans; | |||
28 | extern kmem_zone_t *xfs_bmap_free_item_zone; | 28 | extern kmem_zone_t *xfs_bmap_free_item_zone; |
29 | 29 | ||
30 | /* | 30 | /* |
31 | * DELTA: describe a change to the in-core extent list. | ||
32 | * | ||
33 | * Internally the use of xed_blockount is somewhat funky. | ||
34 | * xed_blockcount contains an offset much of the time because this | ||
35 | * makes merging changes easier. (xfs_fileoff_t and xfs_filblks_t are | ||
36 | * the same underlying type). | ||
37 | */ | ||
38 | typedef struct xfs_extdelta | ||
39 | { | ||
40 | xfs_fileoff_t xed_startoff; /* offset of range */ | ||
41 | xfs_filblks_t xed_blockcount; /* blocks in range */ | ||
42 | } xfs_extdelta_t; | ||
43 | |||
44 | /* | ||
45 | * List of extents to be free "later". | 31 | * List of extents to be free "later". |
46 | * The list is kept sorted on xbf_startblock. | 32 | * The list is kept sorted on xbf_startblock. |
47 | */ | 33 | */ |
@@ -82,16 +68,13 @@ typedef struct xfs_bmap_free | |||
82 | #define XFS_BMAPI_DELAY 0x002 /* delayed write operation */ | 68 | #define XFS_BMAPI_DELAY 0x002 /* delayed write operation */ |
83 | #define XFS_BMAPI_ENTIRE 0x004 /* return entire extent, not trimmed */ | 69 | #define XFS_BMAPI_ENTIRE 0x004 /* return entire extent, not trimmed */ |
84 | #define XFS_BMAPI_METADATA 0x008 /* mapping metadata not user data */ | 70 | #define XFS_BMAPI_METADATA 0x008 /* mapping metadata not user data */ |
85 | #define XFS_BMAPI_EXACT 0x010 /* allocate only to spec'd bounds */ | 71 | #define XFS_BMAPI_ATTRFORK 0x010 /* use attribute fork not data */ |
86 | #define XFS_BMAPI_ATTRFORK 0x020 /* use attribute fork not data */ | 72 | #define XFS_BMAPI_RSVBLOCKS 0x020 /* OK to alloc. reserved data blocks */ |
87 | #define XFS_BMAPI_ASYNC 0x040 /* bunmapi xactions can be async */ | 73 | #define XFS_BMAPI_PREALLOC 0x040 /* preallocation op: unwritten space */ |
88 | #define XFS_BMAPI_RSVBLOCKS 0x080 /* OK to alloc. reserved data blocks */ | 74 | #define XFS_BMAPI_IGSTATE 0x080 /* Ignore state - */ |
89 | #define XFS_BMAPI_PREALLOC 0x100 /* preallocation op: unwritten space */ | ||
90 | #define XFS_BMAPI_IGSTATE 0x200 /* Ignore state - */ | ||
91 | /* combine contig. space */ | 75 | /* combine contig. space */ |
92 | #define XFS_BMAPI_CONTIG 0x400 /* must allocate only one extent */ | 76 | #define XFS_BMAPI_CONTIG 0x100 /* must allocate only one extent */ |
93 | /* XFS_BMAPI_DIRECT_IO 0x800 */ | 77 | #define XFS_BMAPI_CONVERT 0x200 /* unwritten extent conversion - */ |
94 | #define XFS_BMAPI_CONVERT 0x1000 /* unwritten extent conversion - */ | ||
95 | /* need write cache flushing and no */ | 78 | /* need write cache flushing and no */ |
96 | /* additional allocation alignments */ | 79 | /* additional allocation alignments */ |
97 | 80 | ||
@@ -100,9 +83,7 @@ typedef struct xfs_bmap_free | |||
100 | { XFS_BMAPI_DELAY, "DELAY" }, \ | 83 | { XFS_BMAPI_DELAY, "DELAY" }, \ |
101 | { XFS_BMAPI_ENTIRE, "ENTIRE" }, \ | 84 | { XFS_BMAPI_ENTIRE, "ENTIRE" }, \ |
102 | { XFS_BMAPI_METADATA, "METADATA" }, \ | 85 | { XFS_BMAPI_METADATA, "METADATA" }, \ |
103 | { XFS_BMAPI_EXACT, "EXACT" }, \ | ||
104 | { XFS_BMAPI_ATTRFORK, "ATTRFORK" }, \ | 86 | { XFS_BMAPI_ATTRFORK, "ATTRFORK" }, \ |
105 | { XFS_BMAPI_ASYNC, "ASYNC" }, \ | ||
106 | { XFS_BMAPI_RSVBLOCKS, "RSVBLOCKS" }, \ | 87 | { XFS_BMAPI_RSVBLOCKS, "RSVBLOCKS" }, \ |
107 | { XFS_BMAPI_PREALLOC, "PREALLOC" }, \ | 88 | { XFS_BMAPI_PREALLOC, "PREALLOC" }, \ |
108 | { XFS_BMAPI_IGSTATE, "IGSTATE" }, \ | 89 | { XFS_BMAPI_IGSTATE, "IGSTATE" }, \ |
@@ -310,9 +291,7 @@ xfs_bmapi( | |||
310 | xfs_extlen_t total, /* total blocks needed */ | 291 | xfs_extlen_t total, /* total blocks needed */ |
311 | struct xfs_bmbt_irec *mval, /* output: map values */ | 292 | struct xfs_bmbt_irec *mval, /* output: map values */ |
312 | int *nmap, /* i/o: mval size/count */ | 293 | int *nmap, /* i/o: mval size/count */ |
313 | xfs_bmap_free_t *flist, /* i/o: list extents to free */ | 294 | xfs_bmap_free_t *flist); /* i/o: list extents to free */ |
314 | xfs_extdelta_t *delta); /* o: change made to incore | ||
315 | extents */ | ||
316 | 295 | ||
317 | /* | 296 | /* |
318 | * Map file blocks to filesystem blocks, simple version. | 297 | * Map file blocks to filesystem blocks, simple version. |
@@ -346,8 +325,6 @@ xfs_bunmapi( | |||
346 | xfs_fsblock_t *firstblock, /* first allocated block | 325 | xfs_fsblock_t *firstblock, /* first allocated block |
347 | controls a.g. for allocs */ | 326 | controls a.g. for allocs */ |
348 | xfs_bmap_free_t *flist, /* i/o: list extents to free */ | 327 | xfs_bmap_free_t *flist, /* i/o: list extents to free */ |
349 | xfs_extdelta_t *delta, /* o: change made to incore | ||
350 | extents */ | ||
351 | int *done); /* set if not done yet */ | 328 | int *done); /* set if not done yet */ |
352 | 329 | ||
353 | /* | 330 | /* |
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 416e47e54b83..87d3c10b6954 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c | |||
@@ -24,21 +24,16 @@ | |||
24 | #include "xfs_trans.h" | 24 | #include "xfs_trans.h" |
25 | #include "xfs_sb.h" | 25 | #include "xfs_sb.h" |
26 | #include "xfs_ag.h" | 26 | #include "xfs_ag.h" |
27 | #include "xfs_dir2.h" | ||
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_mount.h" | 27 | #include "xfs_mount.h" |
30 | #include "xfs_bmap_btree.h" | 28 | #include "xfs_bmap_btree.h" |
31 | #include "xfs_alloc_btree.h" | 29 | #include "xfs_alloc_btree.h" |
32 | #include "xfs_ialloc_btree.h" | 30 | #include "xfs_ialloc_btree.h" |
33 | #include "xfs_dir2_sf.h" | ||
34 | #include "xfs_attr_sf.h" | ||
35 | #include "xfs_dinode.h" | 31 | #include "xfs_dinode.h" |
36 | #include "xfs_inode.h" | 32 | #include "xfs_inode.h" |
37 | #include "xfs_inode_item.h" | 33 | #include "xfs_inode_item.h" |
38 | #include "xfs_alloc.h" | 34 | #include "xfs_alloc.h" |
39 | #include "xfs_btree.h" | 35 | #include "xfs_btree.h" |
40 | #include "xfs_btree_trace.h" | 36 | #include "xfs_btree_trace.h" |
41 | #include "xfs_ialloc.h" | ||
42 | #include "xfs_itable.h" | 37 | #include "xfs_itable.h" |
43 | #include "xfs_bmap.h" | 38 | #include "xfs_bmap.h" |
44 | #include "xfs_error.h" | 39 | #include "xfs_error.h" |
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 96be4b0f2496..829af92f0fba 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c | |||
@@ -24,20 +24,15 @@ | |||
24 | #include "xfs_trans.h" | 24 | #include "xfs_trans.h" |
25 | #include "xfs_sb.h" | 25 | #include "xfs_sb.h" |
26 | #include "xfs_ag.h" | 26 | #include "xfs_ag.h" |
27 | #include "xfs_dir2.h" | ||
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_mount.h" | 27 | #include "xfs_mount.h" |
30 | #include "xfs_bmap_btree.h" | 28 | #include "xfs_bmap_btree.h" |
31 | #include "xfs_alloc_btree.h" | 29 | #include "xfs_alloc_btree.h" |
32 | #include "xfs_ialloc_btree.h" | 30 | #include "xfs_ialloc_btree.h" |
33 | #include "xfs_dir2_sf.h" | ||
34 | #include "xfs_attr_sf.h" | ||
35 | #include "xfs_dinode.h" | 31 | #include "xfs_dinode.h" |
36 | #include "xfs_inode.h" | 32 | #include "xfs_inode.h" |
37 | #include "xfs_inode_item.h" | 33 | #include "xfs_inode_item.h" |
38 | #include "xfs_btree.h" | 34 | #include "xfs_btree.h" |
39 | #include "xfs_btree_trace.h" | 35 | #include "xfs_btree_trace.h" |
40 | #include "xfs_ialloc.h" | ||
41 | #include "xfs_error.h" | 36 | #include "xfs_error.h" |
42 | #include "xfs_trace.h" | 37 | #include "xfs_trace.h" |
43 | 38 | ||
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 02a80984aa05..1b09d7a280df 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include "xfs_trans.h" | 24 | #include "xfs_trans.h" |
25 | #include "xfs_sb.h" | 25 | #include "xfs_sb.h" |
26 | #include "xfs_ag.h" | 26 | #include "xfs_ag.h" |
27 | #include "xfs_dmapi.h" | ||
28 | #include "xfs_mount.h" | 27 | #include "xfs_mount.h" |
29 | #include "xfs_buf_item.h" | 28 | #include "xfs_buf_item.h" |
30 | #include "xfs_trans_priv.h" | 29 | #include "xfs_trans_priv.h" |
@@ -34,6 +33,12 @@ | |||
34 | 33 | ||
35 | kmem_zone_t *xfs_buf_item_zone; | 34 | kmem_zone_t *xfs_buf_item_zone; |
36 | 35 | ||
36 | static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip) | ||
37 | { | ||
38 | return container_of(lip, struct xfs_buf_log_item, bli_item); | ||
39 | } | ||
40 | |||
41 | |||
37 | #ifdef XFS_TRANS_DEBUG | 42 | #ifdef XFS_TRANS_DEBUG |
38 | /* | 43 | /* |
39 | * This function uses an alternate strategy for tracking the bytes | 44 | * This function uses an alternate strategy for tracking the bytes |
@@ -151,12 +156,13 @@ STATIC void xfs_buf_do_callbacks(xfs_buf_t *bp, xfs_log_item_t *lip); | |||
151 | */ | 156 | */ |
152 | STATIC uint | 157 | STATIC uint |
153 | xfs_buf_item_size( | 158 | xfs_buf_item_size( |
154 | xfs_buf_log_item_t *bip) | 159 | struct xfs_log_item *lip) |
155 | { | 160 | { |
156 | uint nvecs; | 161 | struct xfs_buf_log_item *bip = BUF_ITEM(lip); |
157 | int next_bit; | 162 | struct xfs_buf *bp = bip->bli_buf; |
158 | int last_bit; | 163 | uint nvecs; |
159 | xfs_buf_t *bp; | 164 | int next_bit; |
165 | int last_bit; | ||
160 | 166 | ||
161 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | 167 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
162 | if (bip->bli_flags & XFS_BLI_STALE) { | 168 | if (bip->bli_flags & XFS_BLI_STALE) { |
@@ -170,7 +176,6 @@ xfs_buf_item_size( | |||
170 | return 1; | 176 | return 1; |
171 | } | 177 | } |
172 | 178 | ||
173 | bp = bip->bli_buf; | ||
174 | ASSERT(bip->bli_flags & XFS_BLI_LOGGED); | 179 | ASSERT(bip->bli_flags & XFS_BLI_LOGGED); |
175 | nvecs = 1; | 180 | nvecs = 1; |
176 | last_bit = xfs_next_bit(bip->bli_format.blf_data_map, | 181 | last_bit = xfs_next_bit(bip->bli_format.blf_data_map, |
@@ -219,13 +224,13 @@ xfs_buf_item_size( | |||
219 | */ | 224 | */ |
220 | STATIC void | 225 | STATIC void |
221 | xfs_buf_item_format( | 226 | xfs_buf_item_format( |
222 | xfs_buf_log_item_t *bip, | 227 | struct xfs_log_item *lip, |
223 | xfs_log_iovec_t *log_vector) | 228 | struct xfs_log_iovec *vecp) |
224 | { | 229 | { |
230 | struct xfs_buf_log_item *bip = BUF_ITEM(lip); | ||
231 | struct xfs_buf *bp = bip->bli_buf; | ||
225 | uint base_size; | 232 | uint base_size; |
226 | uint nvecs; | 233 | uint nvecs; |
227 | xfs_log_iovec_t *vecp; | ||
228 | xfs_buf_t *bp; | ||
229 | int first_bit; | 234 | int first_bit; |
230 | int last_bit; | 235 | int last_bit; |
231 | int next_bit; | 236 | int next_bit; |
@@ -235,8 +240,6 @@ xfs_buf_item_format( | |||
235 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | 240 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
236 | ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || | 241 | ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || |
237 | (bip->bli_flags & XFS_BLI_STALE)); | 242 | (bip->bli_flags & XFS_BLI_STALE)); |
238 | bp = bip->bli_buf; | ||
239 | vecp = log_vector; | ||
240 | 243 | ||
241 | /* | 244 | /* |
242 | * The size of the base structure is the size of the | 245 | * The size of the base structure is the size of the |
@@ -248,7 +251,7 @@ xfs_buf_item_format( | |||
248 | base_size = | 251 | base_size = |
249 | (uint)(sizeof(xfs_buf_log_format_t) + | 252 | (uint)(sizeof(xfs_buf_log_format_t) + |
250 | ((bip->bli_format.blf_map_size - 1) * sizeof(uint))); | 253 | ((bip->bli_format.blf_map_size - 1) * sizeof(uint))); |
251 | vecp->i_addr = (xfs_caddr_t)&bip->bli_format; | 254 | vecp->i_addr = &bip->bli_format; |
252 | vecp->i_len = base_size; | 255 | vecp->i_len = base_size; |
253 | vecp->i_type = XLOG_REG_TYPE_BFORMAT; | 256 | vecp->i_type = XLOG_REG_TYPE_BFORMAT; |
254 | vecp++; | 257 | vecp++; |
@@ -263,7 +266,7 @@ xfs_buf_item_format( | |||
263 | */ | 266 | */ |
264 | if (bip->bli_flags & XFS_BLI_INODE_BUF) { | 267 | if (bip->bli_flags & XFS_BLI_INODE_BUF) { |
265 | if (!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && | 268 | if (!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && |
266 | xfs_log_item_in_current_chkpt(&bip->bli_item))) | 269 | xfs_log_item_in_current_chkpt(lip))) |
267 | bip->bli_format.blf_flags |= XFS_BLF_INODE_BUF; | 270 | bip->bli_format.blf_flags |= XFS_BLF_INODE_BUF; |
268 | bip->bli_flags &= ~XFS_BLI_INODE_BUF; | 271 | bip->bli_flags &= ~XFS_BLI_INODE_BUF; |
269 | } | 272 | } |
@@ -356,66 +359,90 @@ xfs_buf_item_format( | |||
356 | 359 | ||
357 | /* | 360 | /* |
358 | * This is called to pin the buffer associated with the buf log item in memory | 361 | * This is called to pin the buffer associated with the buf log item in memory |
359 | * so it cannot be written out. Simply call bpin() on the buffer to do this. | 362 | * so it cannot be written out. |
360 | * | 363 | * |
361 | * We also always take a reference to the buffer log item here so that the bli | 364 | * We also always take a reference to the buffer log item here so that the bli |
362 | * is held while the item is pinned in memory. This means that we can | 365 | * is held while the item is pinned in memory. This means that we can |
363 | * unconditionally drop the reference count a transaction holds when the | 366 | * unconditionally drop the reference count a transaction holds when the |
364 | * transaction is completed. | 367 | * transaction is completed. |
365 | */ | 368 | */ |
366 | |||
367 | STATIC void | 369 | STATIC void |
368 | xfs_buf_item_pin( | 370 | xfs_buf_item_pin( |
369 | xfs_buf_log_item_t *bip) | 371 | struct xfs_log_item *lip) |
370 | { | 372 | { |
371 | xfs_buf_t *bp; | 373 | struct xfs_buf_log_item *bip = BUF_ITEM(lip); |
372 | 374 | ||
373 | bp = bip->bli_buf; | 375 | ASSERT(XFS_BUF_ISBUSY(bip->bli_buf)); |
374 | ASSERT(XFS_BUF_ISBUSY(bp)); | ||
375 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | 376 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
376 | ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || | 377 | ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || |
377 | (bip->bli_flags & XFS_BLI_STALE)); | 378 | (bip->bli_flags & XFS_BLI_STALE)); |
378 | atomic_inc(&bip->bli_refcount); | 379 | |
379 | trace_xfs_buf_item_pin(bip); | 380 | trace_xfs_buf_item_pin(bip); |
380 | xfs_bpin(bp); | ||
381 | } | ||
382 | 381 | ||
382 | atomic_inc(&bip->bli_refcount); | ||
383 | atomic_inc(&bip->bli_buf->b_pin_count); | ||
384 | } | ||
383 | 385 | ||
384 | /* | 386 | /* |
385 | * This is called to unpin the buffer associated with the buf log | 387 | * This is called to unpin the buffer associated with the buf log |
386 | * item which was previously pinned with a call to xfs_buf_item_pin(). | 388 | * item which was previously pinned with a call to xfs_buf_item_pin(). |
387 | * Just call bunpin() on the buffer to do this. | ||
388 | * | 389 | * |
389 | * Also drop the reference to the buf item for the current transaction. | 390 | * Also drop the reference to the buf item for the current transaction. |
390 | * If the XFS_BLI_STALE flag is set and we are the last reference, | 391 | * If the XFS_BLI_STALE flag is set and we are the last reference, |
391 | * then free up the buf log item and unlock the buffer. | 392 | * then free up the buf log item and unlock the buffer. |
393 | * | ||
394 | * If the remove flag is set we are called from uncommit in the | ||
395 | * forced-shutdown path. If that is true and the reference count on | ||
396 | * the log item is going to drop to zero we need to free the item's | ||
397 | * descriptor in the transaction. | ||
392 | */ | 398 | */ |
393 | STATIC void | 399 | STATIC void |
394 | xfs_buf_item_unpin( | 400 | xfs_buf_item_unpin( |
395 | xfs_buf_log_item_t *bip) | 401 | struct xfs_log_item *lip, |
402 | int remove) | ||
396 | { | 403 | { |
397 | struct xfs_ail *ailp; | 404 | struct xfs_buf_log_item *bip = BUF_ITEM(lip); |
398 | xfs_buf_t *bp; | 405 | xfs_buf_t *bp = bip->bli_buf; |
399 | int freed; | 406 | struct xfs_ail *ailp = lip->li_ailp; |
400 | int stale = bip->bli_flags & XFS_BLI_STALE; | 407 | int stale = bip->bli_flags & XFS_BLI_STALE; |
408 | int freed; | ||
401 | 409 | ||
402 | bp = bip->bli_buf; | ||
403 | ASSERT(bp != NULL); | ||
404 | ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip); | 410 | ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip); |
405 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | 411 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
412 | |||
406 | trace_xfs_buf_item_unpin(bip); | 413 | trace_xfs_buf_item_unpin(bip); |
407 | 414 | ||
408 | freed = atomic_dec_and_test(&bip->bli_refcount); | 415 | freed = atomic_dec_and_test(&bip->bli_refcount); |
409 | ailp = bip->bli_item.li_ailp; | 416 | |
410 | xfs_bunpin(bp); | 417 | if (atomic_dec_and_test(&bp->b_pin_count)) |
418 | wake_up_all(&bp->b_waiters); | ||
419 | |||
411 | if (freed && stale) { | 420 | if (freed && stale) { |
412 | ASSERT(bip->bli_flags & XFS_BLI_STALE); | 421 | ASSERT(bip->bli_flags & XFS_BLI_STALE); |
413 | ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); | 422 | ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); |
414 | ASSERT(!(XFS_BUF_ISDELAYWRITE(bp))); | 423 | ASSERT(!(XFS_BUF_ISDELAYWRITE(bp))); |
415 | ASSERT(XFS_BUF_ISSTALE(bp)); | 424 | ASSERT(XFS_BUF_ISSTALE(bp)); |
416 | ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL); | 425 | ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL); |
426 | |||
417 | trace_xfs_buf_item_unpin_stale(bip); | 427 | trace_xfs_buf_item_unpin_stale(bip); |
418 | 428 | ||
429 | if (remove) { | ||
430 | /* | ||
431 | * We have to remove the log item from the transaction | ||
432 | * as we are about to release our reference to the | ||
433 | * buffer. If we don't, the unlock that occurs later | ||
434 | * in xfs_trans_uncommit() will ry to reference the | ||
435 | * buffer which we no longer have a hold on. | ||
436 | */ | ||
437 | xfs_trans_del_item(lip); | ||
438 | |||
439 | /* | ||
440 | * Since the transaction no longer refers to the buffer, | ||
441 | * the buffer should no longer refer to the transaction. | ||
442 | */ | ||
443 | XFS_BUF_SET_FSPRIVATE2(bp, NULL); | ||
444 | } | ||
445 | |||
419 | /* | 446 | /* |
420 | * If we get called here because of an IO error, we may | 447 | * If we get called here because of an IO error, we may |
421 | * or may not have the item on the AIL. xfs_trans_ail_delete() | 448 | * or may not have the item on the AIL. xfs_trans_ail_delete() |
@@ -437,48 +464,6 @@ xfs_buf_item_unpin( | |||
437 | } | 464 | } |
438 | 465 | ||
439 | /* | 466 | /* |
440 | * this is called from uncommit in the forced-shutdown path. | ||
441 | * we need to check to see if the reference count on the log item | ||
442 | * is going to drop to zero. If so, unpin will free the log item | ||
443 | * so we need to free the item's descriptor (that points to the item) | ||
444 | * in the transaction. | ||
445 | */ | ||
446 | STATIC void | ||
447 | xfs_buf_item_unpin_remove( | ||
448 | xfs_buf_log_item_t *bip, | ||
449 | xfs_trans_t *tp) | ||
450 | { | ||
451 | /* will xfs_buf_item_unpin() call xfs_buf_item_relse()? */ | ||
452 | if ((atomic_read(&bip->bli_refcount) == 1) && | ||
453 | (bip->bli_flags & XFS_BLI_STALE)) { | ||
454 | /* | ||
455 | * yes -- We can safely do some work here and then call | ||
456 | * buf_item_unpin to do the rest because we are | ||
457 | * are holding the buffer locked so no one else will be | ||
458 | * able to bump up the refcount. We have to remove the | ||
459 | * log item from the transaction as we are about to release | ||
460 | * our reference to the buffer. If we don't, the unlock that | ||
461 | * occurs later in the xfs_trans_uncommit() will try to | ||
462 | * reference the buffer which we no longer have a hold on. | ||
463 | */ | ||
464 | struct xfs_log_item_desc *lidp; | ||
465 | |||
466 | ASSERT(XFS_BUF_VALUSEMA(bip->bli_buf) <= 0); | ||
467 | trace_xfs_buf_item_unpin_stale(bip); | ||
468 | |||
469 | lidp = xfs_trans_find_item(tp, (xfs_log_item_t *)bip); | ||
470 | xfs_trans_free_item(tp, lidp); | ||
471 | |||
472 | /* | ||
473 | * Since the transaction no longer refers to the buffer, the | ||
474 | * buffer should no longer refer to the transaction. | ||
475 | */ | ||
476 | XFS_BUF_SET_FSPRIVATE2(bip->bli_buf, NULL); | ||
477 | } | ||
478 | xfs_buf_item_unpin(bip); | ||
479 | } | ||
480 | |||
481 | /* | ||
482 | * This is called to attempt to lock the buffer associated with this | 467 | * This is called to attempt to lock the buffer associated with this |
483 | * buf log item. Don't sleep on the buffer lock. If we can't get | 468 | * buf log item. Don't sleep on the buffer lock. If we can't get |
484 | * the lock right away, return 0. If we can get the lock, take a | 469 | * the lock right away, return 0. If we can get the lock, take a |
@@ -488,11 +473,11 @@ xfs_buf_item_unpin_remove( | |||
488 | */ | 473 | */ |
489 | STATIC uint | 474 | STATIC uint |
490 | xfs_buf_item_trylock( | 475 | xfs_buf_item_trylock( |
491 | xfs_buf_log_item_t *bip) | 476 | struct xfs_log_item *lip) |
492 | { | 477 | { |
493 | xfs_buf_t *bp; | 478 | struct xfs_buf_log_item *bip = BUF_ITEM(lip); |
479 | struct xfs_buf *bp = bip->bli_buf; | ||
494 | 480 | ||
495 | bp = bip->bli_buf; | ||
496 | if (XFS_BUF_ISPINNED(bp)) | 481 | if (XFS_BUF_ISPINNED(bp)) |
497 | return XFS_ITEM_PINNED; | 482 | return XFS_ITEM_PINNED; |
498 | if (!XFS_BUF_CPSEMA(bp)) | 483 | if (!XFS_BUF_CPSEMA(bp)) |
@@ -529,13 +514,12 @@ xfs_buf_item_trylock( | |||
529 | */ | 514 | */ |
530 | STATIC void | 515 | STATIC void |
531 | xfs_buf_item_unlock( | 516 | xfs_buf_item_unlock( |
532 | xfs_buf_log_item_t *bip) | 517 | struct xfs_log_item *lip) |
533 | { | 518 | { |
534 | int aborted; | 519 | struct xfs_buf_log_item *bip = BUF_ITEM(lip); |
535 | xfs_buf_t *bp; | 520 | struct xfs_buf *bp = bip->bli_buf; |
536 | uint hold; | 521 | int aborted; |
537 | 522 | uint hold; | |
538 | bp = bip->bli_buf; | ||
539 | 523 | ||
540 | /* Clear the buffer's association with this transaction. */ | 524 | /* Clear the buffer's association with this transaction. */ |
541 | XFS_BUF_SET_FSPRIVATE2(bp, NULL); | 525 | XFS_BUF_SET_FSPRIVATE2(bp, NULL); |
@@ -546,7 +530,7 @@ xfs_buf_item_unlock( | |||
546 | * (cancelled) buffers at unpin time, but we'll never go through the | 530 | * (cancelled) buffers at unpin time, but we'll never go through the |
547 | * pin/unpin cycle if we abort inside commit. | 531 | * pin/unpin cycle if we abort inside commit. |
548 | */ | 532 | */ |
549 | aborted = (bip->bli_item.li_flags & XFS_LI_ABORTED) != 0; | 533 | aborted = (lip->li_flags & XFS_LI_ABORTED) != 0; |
550 | 534 | ||
551 | /* | 535 | /* |
552 | * Before possibly freeing the buf item, determine if we should | 536 | * Before possibly freeing the buf item, determine if we should |
@@ -607,16 +591,16 @@ xfs_buf_item_unlock( | |||
607 | */ | 591 | */ |
608 | STATIC xfs_lsn_t | 592 | STATIC xfs_lsn_t |
609 | xfs_buf_item_committed( | 593 | xfs_buf_item_committed( |
610 | xfs_buf_log_item_t *bip, | 594 | struct xfs_log_item *lip, |
611 | xfs_lsn_t lsn) | 595 | xfs_lsn_t lsn) |
612 | { | 596 | { |
597 | struct xfs_buf_log_item *bip = BUF_ITEM(lip); | ||
598 | |||
613 | trace_xfs_buf_item_committed(bip); | 599 | trace_xfs_buf_item_committed(bip); |
614 | 600 | ||
615 | if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && | 601 | if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0) |
616 | (bip->bli_item.li_lsn != 0)) { | 602 | return lip->li_lsn; |
617 | return bip->bli_item.li_lsn; | 603 | return lsn; |
618 | } | ||
619 | return (lsn); | ||
620 | } | 604 | } |
621 | 605 | ||
622 | /* | 606 | /* |
@@ -626,15 +610,16 @@ xfs_buf_item_committed( | |||
626 | */ | 610 | */ |
627 | STATIC void | 611 | STATIC void |
628 | xfs_buf_item_push( | 612 | xfs_buf_item_push( |
629 | xfs_buf_log_item_t *bip) | 613 | struct xfs_log_item *lip) |
630 | { | 614 | { |
631 | xfs_buf_t *bp; | 615 | struct xfs_buf_log_item *bip = BUF_ITEM(lip); |
616 | struct xfs_buf *bp = bip->bli_buf; | ||
632 | 617 | ||
633 | ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); | 618 | ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); |
619 | ASSERT(!XFS_BUF_ISDELAYWRITE(bp)); | ||
620 | |||
634 | trace_xfs_buf_item_push(bip); | 621 | trace_xfs_buf_item_push(bip); |
635 | 622 | ||
636 | bp = bip->bli_buf; | ||
637 | ASSERT(!XFS_BUF_ISDELAYWRITE(bp)); | ||
638 | xfs_buf_relse(bp); | 623 | xfs_buf_relse(bp); |
639 | } | 624 | } |
640 | 625 | ||
@@ -646,22 +631,24 @@ xfs_buf_item_push( | |||
646 | */ | 631 | */ |
647 | STATIC void | 632 | STATIC void |
648 | xfs_buf_item_pushbuf( | 633 | xfs_buf_item_pushbuf( |
649 | xfs_buf_log_item_t *bip) | 634 | struct xfs_log_item *lip) |
650 | { | 635 | { |
651 | xfs_buf_t *bp; | 636 | struct xfs_buf_log_item *bip = BUF_ITEM(lip); |
637 | struct xfs_buf *bp = bip->bli_buf; | ||
652 | 638 | ||
653 | ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); | 639 | ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); |
640 | ASSERT(XFS_BUF_ISDELAYWRITE(bp)); | ||
641 | |||
654 | trace_xfs_buf_item_pushbuf(bip); | 642 | trace_xfs_buf_item_pushbuf(bip); |
655 | 643 | ||
656 | bp = bip->bli_buf; | ||
657 | ASSERT(XFS_BUF_ISDELAYWRITE(bp)); | ||
658 | xfs_buf_delwri_promote(bp); | 644 | xfs_buf_delwri_promote(bp); |
659 | xfs_buf_relse(bp); | 645 | xfs_buf_relse(bp); |
660 | } | 646 | } |
661 | 647 | ||
662 | /* ARGSUSED */ | ||
663 | STATIC void | 648 | STATIC void |
664 | xfs_buf_item_committing(xfs_buf_log_item_t *bip, xfs_lsn_t commit_lsn) | 649 | xfs_buf_item_committing( |
650 | struct xfs_log_item *lip, | ||
651 | xfs_lsn_t commit_lsn) | ||
665 | { | 652 | { |
666 | } | 653 | } |
667 | 654 | ||
@@ -669,21 +656,16 @@ xfs_buf_item_committing(xfs_buf_log_item_t *bip, xfs_lsn_t commit_lsn) | |||
669 | * This is the ops vector shared by all buf log items. | 656 | * This is the ops vector shared by all buf log items. |
670 | */ | 657 | */ |
671 | static struct xfs_item_ops xfs_buf_item_ops = { | 658 | static struct xfs_item_ops xfs_buf_item_ops = { |
672 | .iop_size = (uint(*)(xfs_log_item_t*))xfs_buf_item_size, | 659 | .iop_size = xfs_buf_item_size, |
673 | .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) | 660 | .iop_format = xfs_buf_item_format, |
674 | xfs_buf_item_format, | 661 | .iop_pin = xfs_buf_item_pin, |
675 | .iop_pin = (void(*)(xfs_log_item_t*))xfs_buf_item_pin, | 662 | .iop_unpin = xfs_buf_item_unpin, |
676 | .iop_unpin = (void(*)(xfs_log_item_t*))xfs_buf_item_unpin, | 663 | .iop_trylock = xfs_buf_item_trylock, |
677 | .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t *)) | 664 | .iop_unlock = xfs_buf_item_unlock, |
678 | xfs_buf_item_unpin_remove, | 665 | .iop_committed = xfs_buf_item_committed, |
679 | .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_buf_item_trylock, | 666 | .iop_push = xfs_buf_item_push, |
680 | .iop_unlock = (void(*)(xfs_log_item_t*))xfs_buf_item_unlock, | 667 | .iop_pushbuf = xfs_buf_item_pushbuf, |
681 | .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) | 668 | .iop_committing = xfs_buf_item_committing |
682 | xfs_buf_item_committed, | ||
683 | .iop_push = (void(*)(xfs_log_item_t*))xfs_buf_item_push, | ||
684 | .iop_pushbuf = (void(*)(xfs_log_item_t*))xfs_buf_item_pushbuf, | ||
685 | .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t)) | ||
686 | xfs_buf_item_committing | ||
687 | }; | 669 | }; |
688 | 670 | ||
689 | 671 | ||
@@ -712,7 +694,6 @@ xfs_buf_item_init( | |||
712 | */ | 694 | */ |
713 | if (bp->b_mount != mp) | 695 | if (bp->b_mount != mp) |
714 | bp->b_mount = mp; | 696 | bp->b_mount = mp; |
715 | XFS_BUF_SET_BDSTRAT_FUNC(bp, xfs_bdstrat_cb); | ||
716 | if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { | 697 | if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { |
717 | lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); | 698 | lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); |
718 | if (lip->li_type == XFS_LI_BUF) { | 699 | if (lip->li_type == XFS_LI_BUF) { |
@@ -1098,15 +1079,14 @@ xfs_buf_error_relse( | |||
1098 | * It is called by xfs_buf_iodone_callbacks() above which will take | 1079 | * It is called by xfs_buf_iodone_callbacks() above which will take |
1099 | * care of cleaning up the buffer itself. | 1080 | * care of cleaning up the buffer itself. |
1100 | */ | 1081 | */ |
1101 | /* ARGSUSED */ | ||
1102 | void | 1082 | void |
1103 | xfs_buf_iodone( | 1083 | xfs_buf_iodone( |
1104 | xfs_buf_t *bp, | 1084 | struct xfs_buf *bp, |
1105 | xfs_buf_log_item_t *bip) | 1085 | struct xfs_log_item *lip) |
1106 | { | 1086 | { |
1107 | struct xfs_ail *ailp = bip->bli_item.li_ailp; | 1087 | struct xfs_ail *ailp = lip->li_ailp; |
1108 | 1088 | ||
1109 | ASSERT(bip->bli_buf == bp); | 1089 | ASSERT(BUF_ITEM(lip)->bli_buf == bp); |
1110 | 1090 | ||
1111 | xfs_buf_rele(bp); | 1091 | xfs_buf_rele(bp); |
1112 | 1092 | ||
@@ -1120,6 +1100,6 @@ xfs_buf_iodone( | |||
1120 | * Either way, AIL is useless if we're forcing a shutdown. | 1100 | * Either way, AIL is useless if we're forcing a shutdown. |
1121 | */ | 1101 | */ |
1122 | spin_lock(&ailp->xa_lock); | 1102 | spin_lock(&ailp->xa_lock); |
1123 | xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip); | 1103 | xfs_trans_ail_delete(ailp, lip); |
1124 | xfs_buf_item_free(bip); | 1104 | xfs_buf_item_free(BUF_ITEM(lip)); |
1125 | } | 1105 | } |
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h index f20bb472d582..0e2ed43f16c7 100644 --- a/fs/xfs/xfs_buf_item.h +++ b/fs/xfs/xfs_buf_item.h | |||
@@ -124,7 +124,7 @@ void xfs_buf_attach_iodone(struct xfs_buf *, | |||
124 | void(*)(struct xfs_buf *, xfs_log_item_t *), | 124 | void(*)(struct xfs_buf *, xfs_log_item_t *), |
125 | xfs_log_item_t *); | 125 | xfs_log_item_t *); |
126 | void xfs_buf_iodone_callbacks(struct xfs_buf *); | 126 | void xfs_buf_iodone_callbacks(struct xfs_buf *); |
127 | void xfs_buf_iodone(struct xfs_buf *, xfs_buf_log_item_t *); | 127 | void xfs_buf_iodone(struct xfs_buf *, struct xfs_log_item *); |
128 | 128 | ||
129 | #ifdef XFS_TRANS_DEBUG | 129 | #ifdef XFS_TRANS_DEBUG |
130 | void | 130 | void |
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c index 0ca556b4bf31..30fa0e206fba 100644 --- a/fs/xfs/xfs_da_btree.c +++ b/fs/xfs/xfs_da_btree.c | |||
@@ -25,19 +25,14 @@ | |||
25 | #include "xfs_sb.h" | 25 | #include "xfs_sb.h" |
26 | #include "xfs_ag.h" | 26 | #include "xfs_ag.h" |
27 | #include "xfs_dir2.h" | 27 | #include "xfs_dir2.h" |
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_mount.h" | 28 | #include "xfs_mount.h" |
30 | #include "xfs_da_btree.h" | 29 | #include "xfs_da_btree.h" |
31 | #include "xfs_bmap_btree.h" | 30 | #include "xfs_bmap_btree.h" |
32 | #include "xfs_alloc_btree.h" | ||
33 | #include "xfs_ialloc_btree.h" | ||
34 | #include "xfs_dir2_sf.h" | 31 | #include "xfs_dir2_sf.h" |
35 | #include "xfs_attr_sf.h" | ||
36 | #include "xfs_dinode.h" | 32 | #include "xfs_dinode.h" |
37 | #include "xfs_inode.h" | 33 | #include "xfs_inode.h" |
38 | #include "xfs_inode_item.h" | 34 | #include "xfs_inode_item.h" |
39 | #include "xfs_alloc.h" | 35 | #include "xfs_alloc.h" |
40 | #include "xfs_btree.h" | ||
41 | #include "xfs_bmap.h" | 36 | #include "xfs_bmap.h" |
42 | #include "xfs_attr.h" | 37 | #include "xfs_attr.h" |
43 | #include "xfs_attr_leaf.h" | 38 | #include "xfs_attr_leaf.h" |
@@ -581,16 +576,14 @@ xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk, | |||
581 | xfs_da_intnode_t *node; | 576 | xfs_da_intnode_t *node; |
582 | xfs_da_node_entry_t *btree; | 577 | xfs_da_node_entry_t *btree; |
583 | int tmp; | 578 | int tmp; |
584 | xfs_mount_t *mp; | ||
585 | 579 | ||
586 | node = oldblk->bp->data; | 580 | node = oldblk->bp->data; |
587 | mp = state->mp; | ||
588 | ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); | 581 | ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); |
589 | ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count))); | 582 | ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count))); |
590 | ASSERT(newblk->blkno != 0); | 583 | ASSERT(newblk->blkno != 0); |
591 | if (state->args->whichfork == XFS_DATA_FORK) | 584 | if (state->args->whichfork == XFS_DATA_FORK) |
592 | ASSERT(newblk->blkno >= mp->m_dirleafblk && | 585 | ASSERT(newblk->blkno >= state->mp->m_dirleafblk && |
593 | newblk->blkno < mp->m_dirfreeblk); | 586 | newblk->blkno < state->mp->m_dirfreeblk); |
594 | 587 | ||
595 | /* | 588 | /* |
596 | * We may need to make some room before we insert the new node. | 589 | * We may need to make some room before we insert the new node. |
@@ -1601,7 +1594,7 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno) | |||
1601 | xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE|XFS_BMAPI_METADATA| | 1594 | xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE|XFS_BMAPI_METADATA| |
1602 | XFS_BMAPI_CONTIG, | 1595 | XFS_BMAPI_CONTIG, |
1603 | args->firstblock, args->total, &map, &nmap, | 1596 | args->firstblock, args->total, &map, &nmap, |
1604 | args->flist, NULL))) { | 1597 | args->flist))) { |
1605 | return error; | 1598 | return error; |
1606 | } | 1599 | } |
1607 | ASSERT(nmap <= 1); | 1600 | ASSERT(nmap <= 1); |
@@ -1622,8 +1615,7 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno) | |||
1622 | xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE| | 1615 | xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE| |
1623 | XFS_BMAPI_METADATA, | 1616 | XFS_BMAPI_METADATA, |
1624 | args->firstblock, args->total, | 1617 | args->firstblock, args->total, |
1625 | &mapp[mapi], &nmap, args->flist, | 1618 | &mapp[mapi], &nmap, args->flist))) { |
1626 | NULL))) { | ||
1627 | kmem_free(mapp); | 1619 | kmem_free(mapp); |
1628 | return error; | 1620 | return error; |
1629 | } | 1621 | } |
@@ -1884,7 +1876,7 @@ xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno, | |||
1884 | */ | 1876 | */ |
1885 | if ((error = xfs_bunmapi(tp, dp, dead_blkno, count, | 1877 | if ((error = xfs_bunmapi(tp, dp, dead_blkno, count, |
1886 | xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA, | 1878 | xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA, |
1887 | 0, args->firstblock, args->flist, NULL, | 1879 | 0, args->firstblock, args->flist, |
1888 | &done)) == ENOSPC) { | 1880 | &done)) == ENOSPC) { |
1889 | if (w != XFS_DATA_FORK) | 1881 | if (w != XFS_DATA_FORK) |
1890 | break; | 1882 | break; |
@@ -1989,7 +1981,7 @@ xfs_da_do_buf( | |||
1989 | nfsb, | 1981 | nfsb, |
1990 | XFS_BMAPI_METADATA | | 1982 | XFS_BMAPI_METADATA | |
1991 | xfs_bmapi_aflag(whichfork), | 1983 | xfs_bmapi_aflag(whichfork), |
1992 | NULL, 0, mapp, &nmap, NULL, NULL))) | 1984 | NULL, 0, mapp, &nmap, NULL))) |
1993 | goto exit0; | 1985 | goto exit0; |
1994 | } | 1986 | } |
1995 | } else { | 1987 | } else { |
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c index 5bba29a07812..3b9582c60a22 100644 --- a/fs/xfs/xfs_dfrag.c +++ b/fs/xfs/xfs_dfrag.c | |||
@@ -24,24 +24,15 @@ | |||
24 | #include "xfs_trans.h" | 24 | #include "xfs_trans.h" |
25 | #include "xfs_sb.h" | 25 | #include "xfs_sb.h" |
26 | #include "xfs_ag.h" | 26 | #include "xfs_ag.h" |
27 | #include "xfs_dir2.h" | ||
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_mount.h" | 27 | #include "xfs_mount.h" |
30 | #include "xfs_bmap_btree.h" | 28 | #include "xfs_bmap_btree.h" |
31 | #include "xfs_alloc_btree.h" | ||
32 | #include "xfs_ialloc_btree.h" | ||
33 | #include "xfs_dir2_sf.h" | ||
34 | #include "xfs_attr_sf.h" | ||
35 | #include "xfs_dinode.h" | 29 | #include "xfs_dinode.h" |
36 | #include "xfs_inode.h" | 30 | #include "xfs_inode.h" |
37 | #include "xfs_inode_item.h" | 31 | #include "xfs_inode_item.h" |
38 | #include "xfs_bmap.h" | 32 | #include "xfs_bmap.h" |
39 | #include "xfs_btree.h" | ||
40 | #include "xfs_ialloc.h" | ||
41 | #include "xfs_itable.h" | 33 | #include "xfs_itable.h" |
42 | #include "xfs_dfrag.h" | 34 | #include "xfs_dfrag.h" |
43 | #include "xfs_error.h" | 35 | #include "xfs_error.h" |
44 | #include "xfs_rw.h" | ||
45 | #include "xfs_vnodeops.h" | 36 | #include "xfs_vnodeops.h" |
46 | #include "xfs_trace.h" | 37 | #include "xfs_trace.h" |
47 | 38 | ||
@@ -69,7 +60,9 @@ xfs_swapext( | |||
69 | goto out; | 60 | goto out; |
70 | } | 61 | } |
71 | 62 | ||
72 | if (!(file->f_mode & FMODE_WRITE) || (file->f_flags & O_APPEND)) { | 63 | if (!(file->f_mode & FMODE_WRITE) || |
64 | !(file->f_mode & FMODE_READ) || | ||
65 | (file->f_flags & O_APPEND)) { | ||
73 | error = XFS_ERROR(EBADF); | 66 | error = XFS_ERROR(EBADF); |
74 | goto out_put_file; | 67 | goto out_put_file; |
75 | } | 68 | } |
@@ -81,6 +74,7 @@ xfs_swapext( | |||
81 | } | 74 | } |
82 | 75 | ||
83 | if (!(tmp_file->f_mode & FMODE_WRITE) || | 76 | if (!(tmp_file->f_mode & FMODE_WRITE) || |
77 | !(tmp_file->f_mode & FMODE_READ) || | ||
84 | (tmp_file->f_flags & O_APPEND)) { | 78 | (tmp_file->f_flags & O_APPEND)) { |
85 | error = XFS_ERROR(EBADF); | 79 | error = XFS_ERROR(EBADF); |
86 | goto out_put_tmp_file; | 80 | goto out_put_tmp_file; |
@@ -422,11 +416,8 @@ xfs_swap_extents( | |||
422 | } | 416 | } |
423 | 417 | ||
424 | 418 | ||
425 | IHOLD(ip); | 419 | xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); |
426 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | 420 | xfs_trans_ijoin_ref(tp, tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); |
427 | |||
428 | IHOLD(tip); | ||
429 | xfs_trans_ijoin(tp, tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | ||
430 | 421 | ||
431 | xfs_trans_log_inode(tp, ip, ilf_fields); | 422 | xfs_trans_log_inode(tp, ip, ilf_fields); |
432 | xfs_trans_log_inode(tp, tip, tilf_fields); | 423 | xfs_trans_log_inode(tp, tip, tilf_fields); |
diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c index 42520f041265..a1321bc7f192 100644 --- a/fs/xfs/xfs_dir2.c +++ b/fs/xfs/xfs_dir2.c | |||
@@ -25,13 +25,11 @@ | |||
25 | #include "xfs_sb.h" | 25 | #include "xfs_sb.h" |
26 | #include "xfs_ag.h" | 26 | #include "xfs_ag.h" |
27 | #include "xfs_dir2.h" | 27 | #include "xfs_dir2.h" |
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_mount.h" | 28 | #include "xfs_mount.h" |
30 | #include "xfs_da_btree.h" | 29 | #include "xfs_da_btree.h" |
31 | #include "xfs_bmap_btree.h" | 30 | #include "xfs_bmap_btree.h" |
32 | #include "xfs_alloc_btree.h" | 31 | #include "xfs_alloc_btree.h" |
33 | #include "xfs_dir2_sf.h" | 32 | #include "xfs_dir2_sf.h" |
34 | #include "xfs_attr_sf.h" | ||
35 | #include "xfs_dinode.h" | 33 | #include "xfs_dinode.h" |
36 | #include "xfs_inode.h" | 34 | #include "xfs_inode.h" |
37 | #include "xfs_inode_item.h" | 35 | #include "xfs_inode_item.h" |
@@ -382,7 +380,7 @@ xfs_readdir( | |||
382 | int rval; /* return value */ | 380 | int rval; /* return value */ |
383 | int v; /* type-checking value */ | 381 | int v; /* type-checking value */ |
384 | 382 | ||
385 | xfs_itrace_entry(dp); | 383 | trace_xfs_readdir(dp); |
386 | 384 | ||
387 | if (XFS_FORCED_SHUTDOWN(dp->i_mount)) | 385 | if (XFS_FORCED_SHUTDOWN(dp->i_mount)) |
388 | return XFS_ERROR(EIO); | 386 | return XFS_ERROR(EIO); |
@@ -549,7 +547,7 @@ xfs_dir2_grow_inode( | |||
549 | if ((error = xfs_bmapi(tp, dp, bno, count, | 547 | if ((error = xfs_bmapi(tp, dp, bno, count, |
550 | XFS_BMAPI_WRITE|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG, | 548 | XFS_BMAPI_WRITE|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG, |
551 | args->firstblock, args->total, &map, &nmap, | 549 | args->firstblock, args->total, &map, &nmap, |
552 | args->flist, NULL))) | 550 | args->flist))) |
553 | return error; | 551 | return error; |
554 | ASSERT(nmap <= 1); | 552 | ASSERT(nmap <= 1); |
555 | if (nmap == 1) { | 553 | if (nmap == 1) { |
@@ -581,8 +579,7 @@ xfs_dir2_grow_inode( | |||
581 | if ((error = xfs_bmapi(tp, dp, b, c, | 579 | if ((error = xfs_bmapi(tp, dp, b, c, |
582 | XFS_BMAPI_WRITE|XFS_BMAPI_METADATA, | 580 | XFS_BMAPI_WRITE|XFS_BMAPI_METADATA, |
583 | args->firstblock, args->total, | 581 | args->firstblock, args->total, |
584 | &mapp[mapi], &nmap, args->flist, | 582 | &mapp[mapi], &nmap, args->flist))) { |
585 | NULL))) { | ||
586 | kmem_free(mapp); | 583 | kmem_free(mapp); |
587 | return error; | 584 | return error; |
588 | } | 585 | } |
@@ -715,7 +712,7 @@ xfs_dir2_shrink_inode( | |||
715 | */ | 712 | */ |
716 | if ((error = xfs_bunmapi(tp, dp, da, mp->m_dirblkfsbs, | 713 | if ((error = xfs_bunmapi(tp, dp, da, mp->m_dirblkfsbs, |
717 | XFS_BMAPI_METADATA, 0, args->firstblock, args->flist, | 714 | XFS_BMAPI_METADATA, 0, args->firstblock, args->flist, |
718 | NULL, &done))) { | 715 | &done))) { |
719 | /* | 716 | /* |
720 | * ENOSPC actually can happen if we're in a removename with | 717 | * ENOSPC actually can happen if we're in a removename with |
721 | * no space reservation, and the resulting block removal | 718 | * no space reservation, and the resulting block removal |
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c index 779a267b0a84..580d99cef9e7 100644 --- a/fs/xfs/xfs_dir2_block.c +++ b/fs/xfs/xfs_dir2_block.c | |||
@@ -24,12 +24,10 @@ | |||
24 | #include "xfs_sb.h" | 24 | #include "xfs_sb.h" |
25 | #include "xfs_ag.h" | 25 | #include "xfs_ag.h" |
26 | #include "xfs_dir2.h" | 26 | #include "xfs_dir2.h" |
27 | #include "xfs_dmapi.h" | ||
28 | #include "xfs_mount.h" | 27 | #include "xfs_mount.h" |
29 | #include "xfs_da_btree.h" | 28 | #include "xfs_da_btree.h" |
30 | #include "xfs_bmap_btree.h" | 29 | #include "xfs_bmap_btree.h" |
31 | #include "xfs_dir2_sf.h" | 30 | #include "xfs_dir2_sf.h" |
32 | #include "xfs_attr_sf.h" | ||
33 | #include "xfs_dinode.h" | 31 | #include "xfs_dinode.h" |
34 | #include "xfs_inode.h" | 32 | #include "xfs_inode.h" |
35 | #include "xfs_inode_item.h" | 33 | #include "xfs_inode_item.h" |
@@ -1073,10 +1071,10 @@ xfs_dir2_sf_to_block( | |||
1073 | */ | 1071 | */ |
1074 | 1072 | ||
1075 | buf_len = dp->i_df.if_bytes; | 1073 | buf_len = dp->i_df.if_bytes; |
1076 | buf = kmem_alloc(dp->i_df.if_bytes, KM_SLEEP); | 1074 | buf = kmem_alloc(buf_len, KM_SLEEP); |
1077 | 1075 | ||
1078 | memcpy(buf, sfp, dp->i_df.if_bytes); | 1076 | memcpy(buf, sfp, buf_len); |
1079 | xfs_idata_realloc(dp, -dp->i_df.if_bytes, XFS_DATA_FORK); | 1077 | xfs_idata_realloc(dp, -buf_len, XFS_DATA_FORK); |
1080 | dp->i_d.di_size = 0; | 1078 | dp->i_d.di_size = 0; |
1081 | xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); | 1079 | xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); |
1082 | /* | 1080 | /* |
diff --git a/fs/xfs/xfs_dir2_data.c b/fs/xfs/xfs_dir2_data.c index 498f8d694330..921595b84f5b 100644 --- a/fs/xfs/xfs_dir2_data.c +++ b/fs/xfs/xfs_dir2_data.c | |||
@@ -24,12 +24,10 @@ | |||
24 | #include "xfs_sb.h" | 24 | #include "xfs_sb.h" |
25 | #include "xfs_ag.h" | 25 | #include "xfs_ag.h" |
26 | #include "xfs_dir2.h" | 26 | #include "xfs_dir2.h" |
27 | #include "xfs_dmapi.h" | ||
28 | #include "xfs_mount.h" | 27 | #include "xfs_mount.h" |
29 | #include "xfs_da_btree.h" | 28 | #include "xfs_da_btree.h" |
30 | #include "xfs_bmap_btree.h" | 29 | #include "xfs_bmap_btree.h" |
31 | #include "xfs_dir2_sf.h" | 30 | #include "xfs_dir2_sf.h" |
32 | #include "xfs_attr_sf.h" | ||
33 | #include "xfs_dinode.h" | 31 | #include "xfs_dinode.h" |
34 | #include "xfs_inode.h" | 32 | #include "xfs_inode.h" |
35 | #include "xfs_dir2_data.h" | 33 | #include "xfs_dir2_data.h" |
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c index e2d89854ec9e..504be8640e91 100644 --- a/fs/xfs/xfs_dir2_leaf.c +++ b/fs/xfs/xfs_dir2_leaf.c | |||
@@ -25,11 +25,9 @@ | |||
25 | #include "xfs_sb.h" | 25 | #include "xfs_sb.h" |
26 | #include "xfs_ag.h" | 26 | #include "xfs_ag.h" |
27 | #include "xfs_dir2.h" | 27 | #include "xfs_dir2.h" |
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_mount.h" | 28 | #include "xfs_mount.h" |
30 | #include "xfs_da_btree.h" | 29 | #include "xfs_da_btree.h" |
31 | #include "xfs_bmap_btree.h" | 30 | #include "xfs_bmap_btree.h" |
32 | #include "xfs_attr_sf.h" | ||
33 | #include "xfs_dir2_sf.h" | 31 | #include "xfs_dir2_sf.h" |
34 | #include "xfs_dinode.h" | 32 | #include "xfs_dinode.h" |
35 | #include "xfs_inode.h" | 33 | #include "xfs_inode.h" |
@@ -875,7 +873,7 @@ xfs_dir2_leaf_getdents( | |||
875 | xfs_dir2_byte_to_da(mp, | 873 | xfs_dir2_byte_to_da(mp, |
876 | XFS_DIR2_LEAF_OFFSET) - map_off, | 874 | XFS_DIR2_LEAF_OFFSET) - map_off, |
877 | XFS_BMAPI_METADATA, NULL, 0, | 875 | XFS_BMAPI_METADATA, NULL, 0, |
878 | &map[map_valid], &nmap, NULL, NULL); | 876 | &map[map_valid], &nmap, NULL); |
879 | /* | 877 | /* |
880 | * Don't know if we should ignore this or | 878 | * Don't know if we should ignore this or |
881 | * try to return an error. | 879 | * try to return an error. |
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c index 78fc4d9ae756..f9a0864b696a 100644 --- a/fs/xfs/xfs_dir2_node.c +++ b/fs/xfs/xfs_dir2_node.c | |||
@@ -24,12 +24,10 @@ | |||
24 | #include "xfs_sb.h" | 24 | #include "xfs_sb.h" |
25 | #include "xfs_ag.h" | 25 | #include "xfs_ag.h" |
26 | #include "xfs_dir2.h" | 26 | #include "xfs_dir2.h" |
27 | #include "xfs_dmapi.h" | ||
28 | #include "xfs_mount.h" | 27 | #include "xfs_mount.h" |
29 | #include "xfs_da_btree.h" | 28 | #include "xfs_da_btree.h" |
30 | #include "xfs_bmap_btree.h" | 29 | #include "xfs_bmap_btree.h" |
31 | #include "xfs_dir2_sf.h" | 30 | #include "xfs_dir2_sf.h" |
32 | #include "xfs_attr_sf.h" | ||
33 | #include "xfs_dinode.h" | 31 | #include "xfs_dinode.h" |
34 | #include "xfs_inode.h" | 32 | #include "xfs_inode.h" |
35 | #include "xfs_bmap.h" | 33 | #include "xfs_bmap.h" |
diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c index c1a5945d463a..b1bae6b1eed9 100644 --- a/fs/xfs/xfs_dir2_sf.c +++ b/fs/xfs/xfs_dir2_sf.c | |||
@@ -24,12 +24,10 @@ | |||
24 | #include "xfs_sb.h" | 24 | #include "xfs_sb.h" |
25 | #include "xfs_ag.h" | 25 | #include "xfs_ag.h" |
26 | #include "xfs_dir2.h" | 26 | #include "xfs_dir2.h" |
27 | #include "xfs_dmapi.h" | ||
28 | #include "xfs_mount.h" | 27 | #include "xfs_mount.h" |
29 | #include "xfs_da_btree.h" | 28 | #include "xfs_da_btree.h" |
30 | #include "xfs_bmap_btree.h" | 29 | #include "xfs_bmap_btree.h" |
31 | #include "xfs_dir2_sf.h" | 30 | #include "xfs_dir2_sf.h" |
32 | #include "xfs_attr_sf.h" | ||
33 | #include "xfs_dinode.h" | 31 | #include "xfs_dinode.h" |
34 | #include "xfs_inode.h" | 32 | #include "xfs_inode.h" |
35 | #include "xfs_inode_item.h" | 33 | #include "xfs_inode_item.h" |
diff --git a/fs/xfs/xfs_dmapi.h b/fs/xfs/xfs_dmapi.h deleted file mode 100644 index 2813cdd72375..000000000000 --- a/fs/xfs/xfs_dmapi.h +++ /dev/null | |||
@@ -1,170 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it would be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write the Free Software Foundation, | ||
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
17 | */ | ||
18 | #ifndef __XFS_DMAPI_H__ | ||
19 | #define __XFS_DMAPI_H__ | ||
20 | |||
21 | /* Values used to define the on-disk version of dm_attrname_t. All | ||
22 | * on-disk attribute names start with the 8-byte string "SGI_DMI_". | ||
23 | * | ||
24 | * In the on-disk inode, DMAPI attribute names consist of the user-provided | ||
25 | * name with the DMATTR_PREFIXSTRING pre-pended. This string must NEVER be | ||
26 | * changed. | ||
27 | */ | ||
28 | |||
29 | #define DMATTR_PREFIXLEN 8 | ||
30 | #define DMATTR_PREFIXSTRING "SGI_DMI_" | ||
31 | |||
32 | typedef enum { | ||
33 | DM_EVENT_INVALID = -1, | ||
34 | DM_EVENT_CANCEL = 0, /* not supported */ | ||
35 | DM_EVENT_MOUNT = 1, | ||
36 | DM_EVENT_PREUNMOUNT = 2, | ||
37 | DM_EVENT_UNMOUNT = 3, | ||
38 | DM_EVENT_DEBUT = 4, /* not supported */ | ||
39 | DM_EVENT_CREATE = 5, | ||
40 | DM_EVENT_CLOSE = 6, /* not supported */ | ||
41 | DM_EVENT_POSTCREATE = 7, | ||
42 | DM_EVENT_REMOVE = 8, | ||
43 | DM_EVENT_POSTREMOVE = 9, | ||
44 | DM_EVENT_RENAME = 10, | ||
45 | DM_EVENT_POSTRENAME = 11, | ||
46 | DM_EVENT_LINK = 12, | ||
47 | DM_EVENT_POSTLINK = 13, | ||
48 | DM_EVENT_SYMLINK = 14, | ||
49 | DM_EVENT_POSTSYMLINK = 15, | ||
50 | DM_EVENT_READ = 16, | ||
51 | DM_EVENT_WRITE = 17, | ||
52 | DM_EVENT_TRUNCATE = 18, | ||
53 | DM_EVENT_ATTRIBUTE = 19, | ||
54 | DM_EVENT_DESTROY = 20, | ||
55 | DM_EVENT_NOSPACE = 21, | ||
56 | DM_EVENT_USER = 22, | ||
57 | DM_EVENT_MAX = 23 | ||
58 | } dm_eventtype_t; | ||
59 | #define HAVE_DM_EVENTTYPE_T | ||
60 | |||
61 | typedef enum { | ||
62 | DM_RIGHT_NULL, | ||
63 | DM_RIGHT_SHARED, | ||
64 | DM_RIGHT_EXCL | ||
65 | } dm_right_t; | ||
66 | #define HAVE_DM_RIGHT_T | ||
67 | |||
68 | /* Defines for determining if an event message should be sent. */ | ||
69 | #ifdef HAVE_DMAPI | ||
70 | #define DM_EVENT_ENABLED(ip, event) ( \ | ||
71 | unlikely ((ip)->i_mount->m_flags & XFS_MOUNT_DMAPI) && \ | ||
72 | ( ((ip)->i_d.di_dmevmask & (1 << event)) || \ | ||
73 | ((ip)->i_mount->m_dmevmask & (1 << event)) ) \ | ||
74 | ) | ||
75 | #else | ||
76 | #define DM_EVENT_ENABLED(ip, event) (0) | ||
77 | #endif | ||
78 | |||
79 | #define DM_XFS_VALID_FS_EVENTS ( \ | ||
80 | (1 << DM_EVENT_PREUNMOUNT) | \ | ||
81 | (1 << DM_EVENT_UNMOUNT) | \ | ||
82 | (1 << DM_EVENT_NOSPACE) | \ | ||
83 | (1 << DM_EVENT_DEBUT) | \ | ||
84 | (1 << DM_EVENT_CREATE) | \ | ||
85 | (1 << DM_EVENT_POSTCREATE) | \ | ||
86 | (1 << DM_EVENT_REMOVE) | \ | ||
87 | (1 << DM_EVENT_POSTREMOVE) | \ | ||
88 | (1 << DM_EVENT_RENAME) | \ | ||
89 | (1 << DM_EVENT_POSTRENAME) | \ | ||
90 | (1 << DM_EVENT_LINK) | \ | ||
91 | (1 << DM_EVENT_POSTLINK) | \ | ||
92 | (1 << DM_EVENT_SYMLINK) | \ | ||
93 | (1 << DM_EVENT_POSTSYMLINK) | \ | ||
94 | (1 << DM_EVENT_ATTRIBUTE) | \ | ||
95 | (1 << DM_EVENT_DESTROY) ) | ||
96 | |||
97 | /* Events valid in dm_set_eventlist() when called with a file handle for | ||
98 | a regular file or a symlink. These events are persistent. | ||
99 | */ | ||
100 | |||
101 | #define DM_XFS_VALID_FILE_EVENTS ( \ | ||
102 | (1 << DM_EVENT_ATTRIBUTE) | \ | ||
103 | (1 << DM_EVENT_DESTROY) ) | ||
104 | |||
105 | /* Events valid in dm_set_eventlist() when called with a file handle for | ||
106 | a directory. These events are persistent. | ||
107 | */ | ||
108 | |||
109 | #define DM_XFS_VALID_DIRECTORY_EVENTS ( \ | ||
110 | (1 << DM_EVENT_CREATE) | \ | ||
111 | (1 << DM_EVENT_POSTCREATE) | \ | ||
112 | (1 << DM_EVENT_REMOVE) | \ | ||
113 | (1 << DM_EVENT_POSTREMOVE) | \ | ||
114 | (1 << DM_EVENT_RENAME) | \ | ||
115 | (1 << DM_EVENT_POSTRENAME) | \ | ||
116 | (1 << DM_EVENT_LINK) | \ | ||
117 | (1 << DM_EVENT_POSTLINK) | \ | ||
118 | (1 << DM_EVENT_SYMLINK) | \ | ||
119 | (1 << DM_EVENT_POSTSYMLINK) | \ | ||
120 | (1 << DM_EVENT_ATTRIBUTE) | \ | ||
121 | (1 << DM_EVENT_DESTROY) ) | ||
122 | |||
123 | /* Events supported by the XFS filesystem. */ | ||
124 | #define DM_XFS_SUPPORTED_EVENTS ( \ | ||
125 | (1 << DM_EVENT_MOUNT) | \ | ||
126 | (1 << DM_EVENT_PREUNMOUNT) | \ | ||
127 | (1 << DM_EVENT_UNMOUNT) | \ | ||
128 | (1 << DM_EVENT_NOSPACE) | \ | ||
129 | (1 << DM_EVENT_CREATE) | \ | ||
130 | (1 << DM_EVENT_POSTCREATE) | \ | ||
131 | (1 << DM_EVENT_REMOVE) | \ | ||
132 | (1 << DM_EVENT_POSTREMOVE) | \ | ||
133 | (1 << DM_EVENT_RENAME) | \ | ||
134 | (1 << DM_EVENT_POSTRENAME) | \ | ||
135 | (1 << DM_EVENT_LINK) | \ | ||
136 | (1 << DM_EVENT_POSTLINK) | \ | ||
137 | (1 << DM_EVENT_SYMLINK) | \ | ||
138 | (1 << DM_EVENT_POSTSYMLINK) | \ | ||
139 | (1 << DM_EVENT_READ) | \ | ||
140 | (1 << DM_EVENT_WRITE) | \ | ||
141 | (1 << DM_EVENT_TRUNCATE) | \ | ||
142 | (1 << DM_EVENT_ATTRIBUTE) | \ | ||
143 | (1 << DM_EVENT_DESTROY) ) | ||
144 | |||
145 | |||
146 | /* | ||
147 | * Definitions used for the flags field on dm_send_*_event(). | ||
148 | */ | ||
149 | |||
150 | #define DM_FLAGS_NDELAY 0x001 /* return EAGAIN after dm_pending() */ | ||
151 | #define DM_FLAGS_UNWANTED 0x002 /* event not in fsys dm_eventset_t */ | ||
152 | #define DM_FLAGS_IMUX 0x004 /* thread holds i_mutex */ | ||
153 | #define DM_FLAGS_IALLOCSEM_RD 0x010 /* thread holds i_alloc_sem rd */ | ||
154 | #define DM_FLAGS_IALLOCSEM_WR 0x020 /* thread holds i_alloc_sem wr */ | ||
155 | |||
156 | /* | ||
157 | * Pull in platform specific event flags defines | ||
158 | */ | ||
159 | #include "xfs_dmapi_priv.h" | ||
160 | |||
161 | /* | ||
162 | * Macros to turn caller specified delay/block flags into | ||
163 | * dm_send_xxxx_event flag DM_FLAGS_NDELAY. | ||
164 | */ | ||
165 | |||
166 | #define FILP_DELAY_FLAG(filp) ((filp->f_flags&(O_NDELAY|O_NONBLOCK)) ? \ | ||
167 | DM_FLAGS_NDELAY : 0) | ||
168 | #define AT_DELAY_FLAG(f) ((f & XFS_ATTR_NONBLOCK) ? DM_FLAGS_NDELAY : 0) | ||
169 | |||
170 | #endif /* __XFS_DMAPI_H__ */ | ||
diff --git a/fs/xfs/xfs_dmops.c b/fs/xfs/xfs_dmops.c deleted file mode 100644 index e71e2581c0c3..000000000000 --- a/fs/xfs/xfs_dmops.c +++ /dev/null | |||
@@ -1,55 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it would be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write the Free Software Foundation, | ||
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
17 | */ | ||
18 | #include "xfs.h" | ||
19 | #include "xfs_fs.h" | ||
20 | #include "xfs_types.h" | ||
21 | #include "xfs_log.h" | ||
22 | #include "xfs_trans.h" | ||
23 | #include "xfs_sb.h" | ||
24 | #include "xfs_dmapi.h" | ||
25 | #include "xfs_inum.h" | ||
26 | #include "xfs_ag.h" | ||
27 | #include "xfs_mount.h" | ||
28 | |||
29 | |||
30 | static struct xfs_dmops xfs_dmcore_stub = { | ||
31 | .xfs_send_data = (xfs_send_data_t)fs_nosys, | ||
32 | .xfs_send_mmap = (xfs_send_mmap_t)fs_noerr, | ||
33 | .xfs_send_destroy = (xfs_send_destroy_t)fs_nosys, | ||
34 | .xfs_send_namesp = (xfs_send_namesp_t)fs_nosys, | ||
35 | .xfs_send_mount = (xfs_send_mount_t)fs_nosys, | ||
36 | .xfs_send_unmount = (xfs_send_unmount_t)fs_noerr, | ||
37 | }; | ||
38 | |||
39 | int | ||
40 | xfs_dmops_get(struct xfs_mount *mp) | ||
41 | { | ||
42 | if (mp->m_flags & XFS_MOUNT_DMAPI) { | ||
43 | cmn_err(CE_WARN, | ||
44 | "XFS: dmapi support not available in this kernel."); | ||
45 | return EINVAL; | ||
46 | } | ||
47 | |||
48 | mp->m_dm_ops = &xfs_dmcore_stub; | ||
49 | return 0; | ||
50 | } | ||
51 | |||
52 | void | ||
53 | xfs_dmops_put(struct xfs_mount *mp) | ||
54 | { | ||
55 | } | ||
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c index 047b8a8e5c29..ed9990267661 100644 --- a/fs/xfs/xfs_error.c +++ b/fs/xfs/xfs_error.c | |||
@@ -23,12 +23,8 @@ | |||
23 | #include "xfs_trans.h" | 23 | #include "xfs_trans.h" |
24 | #include "xfs_sb.h" | 24 | #include "xfs_sb.h" |
25 | #include "xfs_ag.h" | 25 | #include "xfs_ag.h" |
26 | #include "xfs_dir2.h" | ||
27 | #include "xfs_dmapi.h" | ||
28 | #include "xfs_mount.h" | 26 | #include "xfs_mount.h" |
29 | #include "xfs_bmap_btree.h" | 27 | #include "xfs_bmap_btree.h" |
30 | #include "xfs_dir2_sf.h" | ||
31 | #include "xfs_attr_sf.h" | ||
32 | #include "xfs_dinode.h" | 28 | #include "xfs_dinode.h" |
33 | #include "xfs_inode.h" | 29 | #include "xfs_inode.h" |
34 | #include "xfs_utils.h" | 30 | #include "xfs_utils.h" |
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c index 409fe81585fd..a55e687bf562 100644 --- a/fs/xfs/xfs_extfree_item.c +++ b/fs/xfs/xfs_extfree_item.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include "xfs_buf_item.h" | 24 | #include "xfs_buf_item.h" |
25 | #include "xfs_sb.h" | 25 | #include "xfs_sb.h" |
26 | #include "xfs_ag.h" | 26 | #include "xfs_ag.h" |
27 | #include "xfs_dmapi.h" | ||
28 | #include "xfs_mount.h" | 27 | #include "xfs_mount.h" |
29 | #include "xfs_trans_priv.h" | 28 | #include "xfs_trans_priv.h" |
30 | #include "xfs_extfree_item.h" | 29 | #include "xfs_extfree_item.h" |
@@ -33,18 +32,19 @@ | |||
33 | kmem_zone_t *xfs_efi_zone; | 32 | kmem_zone_t *xfs_efi_zone; |
34 | kmem_zone_t *xfs_efd_zone; | 33 | kmem_zone_t *xfs_efd_zone; |
35 | 34 | ||
36 | STATIC void xfs_efi_item_unlock(xfs_efi_log_item_t *); | 35 | static inline struct xfs_efi_log_item *EFI_ITEM(struct xfs_log_item *lip) |
36 | { | ||
37 | return container_of(lip, struct xfs_efi_log_item, efi_item); | ||
38 | } | ||
37 | 39 | ||
38 | void | 40 | void |
39 | xfs_efi_item_free(xfs_efi_log_item_t *efip) | 41 | xfs_efi_item_free( |
42 | struct xfs_efi_log_item *efip) | ||
40 | { | 43 | { |
41 | int nexts = efip->efi_format.efi_nextents; | 44 | if (efip->efi_format.efi_nextents > XFS_EFI_MAX_FAST_EXTENTS) |
42 | |||
43 | if (nexts > XFS_EFI_MAX_FAST_EXTENTS) { | ||
44 | kmem_free(efip); | 45 | kmem_free(efip); |
45 | } else { | 46 | else |
46 | kmem_zone_free(xfs_efi_zone, efip); | 47 | kmem_zone_free(xfs_efi_zone, efip); |
47 | } | ||
48 | } | 48 | } |
49 | 49 | ||
50 | /* | 50 | /* |
@@ -52,9 +52,9 @@ xfs_efi_item_free(xfs_efi_log_item_t *efip) | |||
52 | * We only need 1 iovec for an efi item. It just logs the efi_log_format | 52 | * We only need 1 iovec for an efi item. It just logs the efi_log_format |
53 | * structure. | 53 | * structure. |
54 | */ | 54 | */ |
55 | /*ARGSUSED*/ | ||
56 | STATIC uint | 55 | STATIC uint |
57 | xfs_efi_item_size(xfs_efi_log_item_t *efip) | 56 | xfs_efi_item_size( |
57 | struct xfs_log_item *lip) | ||
58 | { | 58 | { |
59 | return 1; | 59 | return 1; |
60 | } | 60 | } |
@@ -67,10 +67,12 @@ xfs_efi_item_size(xfs_efi_log_item_t *efip) | |||
67 | * slots in the efi item have been filled. | 67 | * slots in the efi item have been filled. |
68 | */ | 68 | */ |
69 | STATIC void | 69 | STATIC void |
70 | xfs_efi_item_format(xfs_efi_log_item_t *efip, | 70 | xfs_efi_item_format( |
71 | xfs_log_iovec_t *log_vector) | 71 | struct xfs_log_item *lip, |
72 | struct xfs_log_iovec *log_vector) | ||
72 | { | 73 | { |
73 | uint size; | 74 | struct xfs_efi_log_item *efip = EFI_ITEM(lip); |
75 | uint size; | ||
74 | 76 | ||
75 | ASSERT(efip->efi_next_extent == efip->efi_format.efi_nextents); | 77 | ASSERT(efip->efi_next_extent == efip->efi_format.efi_nextents); |
76 | 78 | ||
@@ -80,7 +82,7 @@ xfs_efi_item_format(xfs_efi_log_item_t *efip, | |||
80 | size += (efip->efi_format.efi_nextents - 1) * sizeof(xfs_extent_t); | 82 | size += (efip->efi_format.efi_nextents - 1) * sizeof(xfs_extent_t); |
81 | efip->efi_format.efi_size = 1; | 83 | efip->efi_format.efi_size = 1; |
82 | 84 | ||
83 | log_vector->i_addr = (xfs_caddr_t)&(efip->efi_format); | 85 | log_vector->i_addr = &efip->efi_format; |
84 | log_vector->i_len = size; | 86 | log_vector->i_len = size; |
85 | log_vector->i_type = XLOG_REG_TYPE_EFI_FORMAT; | 87 | log_vector->i_type = XLOG_REG_TYPE_EFI_FORMAT; |
86 | ASSERT(size >= sizeof(xfs_efi_log_format_t)); | 88 | ASSERT(size >= sizeof(xfs_efi_log_format_t)); |
@@ -90,60 +92,33 @@ xfs_efi_item_format(xfs_efi_log_item_t *efip, | |||
90 | /* | 92 | /* |
91 | * Pinning has no meaning for an efi item, so just return. | 93 | * Pinning has no meaning for an efi item, so just return. |
92 | */ | 94 | */ |
93 | /*ARGSUSED*/ | ||
94 | STATIC void | 95 | STATIC void |
95 | xfs_efi_item_pin(xfs_efi_log_item_t *efip) | 96 | xfs_efi_item_pin( |
97 | struct xfs_log_item *lip) | ||
96 | { | 98 | { |
97 | return; | ||
98 | } | 99 | } |
99 | 100 | ||
100 | |||
101 | /* | 101 | /* |
102 | * While EFIs cannot really be pinned, the unpin operation is the | 102 | * While EFIs cannot really be pinned, the unpin operation is the |
103 | * last place at which the EFI is manipulated during a transaction. | 103 | * last place at which the EFI is manipulated during a transaction. |
104 | * Here we coordinate with xfs_efi_cancel() to determine who gets to | 104 | * Here we coordinate with xfs_efi_cancel() to determine who gets to |
105 | * free the EFI. | 105 | * free the EFI. |
106 | */ | 106 | */ |
107 | /*ARGSUSED*/ | ||
108 | STATIC void | ||
109 | xfs_efi_item_unpin(xfs_efi_log_item_t *efip) | ||
110 | { | ||
111 | struct xfs_ail *ailp = efip->efi_item.li_ailp; | ||
112 | |||
113 | spin_lock(&ailp->xa_lock); | ||
114 | if (efip->efi_flags & XFS_EFI_CANCELED) { | ||
115 | /* xfs_trans_ail_delete() drops the AIL lock. */ | ||
116 | xfs_trans_ail_delete(ailp, (xfs_log_item_t *)efip); | ||
117 | xfs_efi_item_free(efip); | ||
118 | } else { | ||
119 | efip->efi_flags |= XFS_EFI_COMMITTED; | ||
120 | spin_unlock(&ailp->xa_lock); | ||
121 | } | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * like unpin only we have to also clear the xaction descriptor | ||
126 | * pointing the log item if we free the item. This routine duplicates | ||
127 | * unpin because efi_flags is protected by the AIL lock. Freeing | ||
128 | * the descriptor and then calling unpin would force us to drop the AIL | ||
129 | * lock which would open up a race condition. | ||
130 | */ | ||
131 | STATIC void | 107 | STATIC void |
132 | xfs_efi_item_unpin_remove(xfs_efi_log_item_t *efip, xfs_trans_t *tp) | 108 | xfs_efi_item_unpin( |
109 | struct xfs_log_item *lip, | ||
110 | int remove) | ||
133 | { | 111 | { |
134 | struct xfs_ail *ailp = efip->efi_item.li_ailp; | 112 | struct xfs_efi_log_item *efip = EFI_ITEM(lip); |
135 | xfs_log_item_desc_t *lidp; | 113 | struct xfs_ail *ailp = lip->li_ailp; |
136 | 114 | ||
137 | spin_lock(&ailp->xa_lock); | 115 | spin_lock(&ailp->xa_lock); |
138 | if (efip->efi_flags & XFS_EFI_CANCELED) { | 116 | if (efip->efi_flags & XFS_EFI_CANCELED) { |
139 | /* | 117 | if (remove) |
140 | * free the xaction descriptor pointing to this item | 118 | xfs_trans_del_item(lip); |
141 | */ | ||
142 | lidp = xfs_trans_find_item(tp, (xfs_log_item_t *) efip); | ||
143 | xfs_trans_free_item(tp, lidp); | ||
144 | 119 | ||
145 | /* xfs_trans_ail_delete() drops the AIL lock. */ | 120 | /* xfs_trans_ail_delete() drops the AIL lock. */ |
146 | xfs_trans_ail_delete(ailp, (xfs_log_item_t *)efip); | 121 | xfs_trans_ail_delete(ailp, lip); |
147 | xfs_efi_item_free(efip); | 122 | xfs_efi_item_free(efip); |
148 | } else { | 123 | } else { |
149 | efip->efi_flags |= XFS_EFI_COMMITTED; | 124 | efip->efi_flags |= XFS_EFI_COMMITTED; |
@@ -158,9 +133,9 @@ xfs_efi_item_unpin_remove(xfs_efi_log_item_t *efip, xfs_trans_t *tp) | |||
158 | * XFS_ITEM_PINNED so that the caller will eventually flush the log. | 133 | * XFS_ITEM_PINNED so that the caller will eventually flush the log. |
159 | * This should help in getting the EFI out of the AIL. | 134 | * This should help in getting the EFI out of the AIL. |
160 | */ | 135 | */ |
161 | /*ARGSUSED*/ | ||
162 | STATIC uint | 136 | STATIC uint |
163 | xfs_efi_item_trylock(xfs_efi_log_item_t *efip) | 137 | xfs_efi_item_trylock( |
138 | struct xfs_log_item *lip) | ||
164 | { | 139 | { |
165 | return XFS_ITEM_PINNED; | 140 | return XFS_ITEM_PINNED; |
166 | } | 141 | } |
@@ -168,13 +143,12 @@ xfs_efi_item_trylock(xfs_efi_log_item_t *efip) | |||
168 | /* | 143 | /* |
169 | * Efi items have no locking, so just return. | 144 | * Efi items have no locking, so just return. |
170 | */ | 145 | */ |
171 | /*ARGSUSED*/ | ||
172 | STATIC void | 146 | STATIC void |
173 | xfs_efi_item_unlock(xfs_efi_log_item_t *efip) | 147 | xfs_efi_item_unlock( |
148 | struct xfs_log_item *lip) | ||
174 | { | 149 | { |
175 | if (efip->efi_item.li_flags & XFS_LI_ABORTED) | 150 | if (lip->li_flags & XFS_LI_ABORTED) |
176 | xfs_efi_item_free(efip); | 151 | xfs_efi_item_free(EFI_ITEM(lip)); |
177 | return; | ||
178 | } | 152 | } |
179 | 153 | ||
180 | /* | 154 | /* |
@@ -183,9 +157,10 @@ xfs_efi_item_unlock(xfs_efi_log_item_t *efip) | |||
183 | * flag is not paid any attention here. Checking for that is delayed | 157 | * flag is not paid any attention here. Checking for that is delayed |
184 | * until the EFI is unpinned. | 158 | * until the EFI is unpinned. |
185 | */ | 159 | */ |
186 | /*ARGSUSED*/ | ||
187 | STATIC xfs_lsn_t | 160 | STATIC xfs_lsn_t |
188 | xfs_efi_item_committed(xfs_efi_log_item_t *efip, xfs_lsn_t lsn) | 161 | xfs_efi_item_committed( |
162 | struct xfs_log_item *lip, | ||
163 | xfs_lsn_t lsn) | ||
189 | { | 164 | { |
190 | return lsn; | 165 | return lsn; |
191 | } | 166 | } |
@@ -195,11 +170,10 @@ xfs_efi_item_committed(xfs_efi_log_item_t *efip, xfs_lsn_t lsn) | |||
195 | * stuck waiting for all of its corresponding efd items to be | 170 | * stuck waiting for all of its corresponding efd items to be |
196 | * committed to disk. | 171 | * committed to disk. |
197 | */ | 172 | */ |
198 | /*ARGSUSED*/ | ||
199 | STATIC void | 173 | STATIC void |
200 | xfs_efi_item_push(xfs_efi_log_item_t *efip) | 174 | xfs_efi_item_push( |
175 | struct xfs_log_item *lip) | ||
201 | { | 176 | { |
202 | return; | ||
203 | } | 177 | } |
204 | 178 | ||
205 | /* | 179 | /* |
@@ -209,61 +183,55 @@ xfs_efi_item_push(xfs_efi_log_item_t *efip) | |||
209 | * example, for inodes, the inode is locked throughout the extent freeing | 183 | * example, for inodes, the inode is locked throughout the extent freeing |
210 | * so the dependency should be recorded there. | 184 | * so the dependency should be recorded there. |
211 | */ | 185 | */ |
212 | /*ARGSUSED*/ | ||
213 | STATIC void | 186 | STATIC void |
214 | xfs_efi_item_committing(xfs_efi_log_item_t *efip, xfs_lsn_t lsn) | 187 | xfs_efi_item_committing( |
188 | struct xfs_log_item *lip, | ||
189 | xfs_lsn_t lsn) | ||
215 | { | 190 | { |
216 | return; | ||
217 | } | 191 | } |
218 | 192 | ||
219 | /* | 193 | /* |
220 | * This is the ops vector shared by all efi log items. | 194 | * This is the ops vector shared by all efi log items. |
221 | */ | 195 | */ |
222 | static struct xfs_item_ops xfs_efi_item_ops = { | 196 | static struct xfs_item_ops xfs_efi_item_ops = { |
223 | .iop_size = (uint(*)(xfs_log_item_t*))xfs_efi_item_size, | 197 | .iop_size = xfs_efi_item_size, |
224 | .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) | 198 | .iop_format = xfs_efi_item_format, |
225 | xfs_efi_item_format, | 199 | .iop_pin = xfs_efi_item_pin, |
226 | .iop_pin = (void(*)(xfs_log_item_t*))xfs_efi_item_pin, | 200 | .iop_unpin = xfs_efi_item_unpin, |
227 | .iop_unpin = (void(*)(xfs_log_item_t*))xfs_efi_item_unpin, | 201 | .iop_trylock = xfs_efi_item_trylock, |
228 | .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t *)) | 202 | .iop_unlock = xfs_efi_item_unlock, |
229 | xfs_efi_item_unpin_remove, | 203 | .iop_committed = xfs_efi_item_committed, |
230 | .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_efi_item_trylock, | 204 | .iop_push = xfs_efi_item_push, |
231 | .iop_unlock = (void(*)(xfs_log_item_t*))xfs_efi_item_unlock, | 205 | .iop_committing = xfs_efi_item_committing |
232 | .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) | ||
233 | xfs_efi_item_committed, | ||
234 | .iop_push = (void(*)(xfs_log_item_t*))xfs_efi_item_push, | ||
235 | .iop_pushbuf = NULL, | ||
236 | .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t)) | ||
237 | xfs_efi_item_committing | ||
238 | }; | 206 | }; |
239 | 207 | ||
240 | 208 | ||
241 | /* | 209 | /* |
242 | * Allocate and initialize an efi item with the given number of extents. | 210 | * Allocate and initialize an efi item with the given number of extents. |
243 | */ | 211 | */ |
244 | xfs_efi_log_item_t * | 212 | struct xfs_efi_log_item * |
245 | xfs_efi_init(xfs_mount_t *mp, | 213 | xfs_efi_init( |
246 | uint nextents) | 214 | struct xfs_mount *mp, |
215 | uint nextents) | ||
247 | 216 | ||
248 | { | 217 | { |
249 | xfs_efi_log_item_t *efip; | 218 | struct xfs_efi_log_item *efip; |
250 | uint size; | 219 | uint size; |
251 | 220 | ||
252 | ASSERT(nextents > 0); | 221 | ASSERT(nextents > 0); |
253 | if (nextents > XFS_EFI_MAX_FAST_EXTENTS) { | 222 | if (nextents > XFS_EFI_MAX_FAST_EXTENTS) { |
254 | size = (uint)(sizeof(xfs_efi_log_item_t) + | 223 | size = (uint)(sizeof(xfs_efi_log_item_t) + |
255 | ((nextents - 1) * sizeof(xfs_extent_t))); | 224 | ((nextents - 1) * sizeof(xfs_extent_t))); |
256 | efip = (xfs_efi_log_item_t*)kmem_zalloc(size, KM_SLEEP); | 225 | efip = kmem_zalloc(size, KM_SLEEP); |
257 | } else { | 226 | } else { |
258 | efip = (xfs_efi_log_item_t*)kmem_zone_zalloc(xfs_efi_zone, | 227 | efip = kmem_zone_zalloc(xfs_efi_zone, KM_SLEEP); |
259 | KM_SLEEP); | ||
260 | } | 228 | } |
261 | 229 | ||
262 | xfs_log_item_init(mp, &efip->efi_item, XFS_LI_EFI, &xfs_efi_item_ops); | 230 | xfs_log_item_init(mp, &efip->efi_item, XFS_LI_EFI, &xfs_efi_item_ops); |
263 | efip->efi_format.efi_nextents = nextents; | 231 | efip->efi_format.efi_nextents = nextents; |
264 | efip->efi_format.efi_id = (__psint_t)(void*)efip; | 232 | efip->efi_format.efi_id = (__psint_t)(void*)efip; |
265 | 233 | ||
266 | return (efip); | 234 | return efip; |
267 | } | 235 | } |
268 | 236 | ||
269 | /* | 237 | /* |
@@ -276,7 +244,7 @@ xfs_efi_init(xfs_mount_t *mp, | |||
276 | int | 244 | int |
277 | xfs_efi_copy_format(xfs_log_iovec_t *buf, xfs_efi_log_format_t *dst_efi_fmt) | 245 | xfs_efi_copy_format(xfs_log_iovec_t *buf, xfs_efi_log_format_t *dst_efi_fmt) |
278 | { | 246 | { |
279 | xfs_efi_log_format_t *src_efi_fmt = (xfs_efi_log_format_t *)buf->i_addr; | 247 | xfs_efi_log_format_t *src_efi_fmt = buf->i_addr; |
280 | uint i; | 248 | uint i; |
281 | uint len = sizeof(xfs_efi_log_format_t) + | 249 | uint len = sizeof(xfs_efi_log_format_t) + |
282 | (src_efi_fmt->efi_nextents - 1) * sizeof(xfs_extent_t); | 250 | (src_efi_fmt->efi_nextents - 1) * sizeof(xfs_extent_t); |
@@ -289,8 +257,7 @@ xfs_efi_copy_format(xfs_log_iovec_t *buf, xfs_efi_log_format_t *dst_efi_fmt) | |||
289 | memcpy((char *)dst_efi_fmt, (char*)src_efi_fmt, len); | 257 | memcpy((char *)dst_efi_fmt, (char*)src_efi_fmt, len); |
290 | return 0; | 258 | return 0; |
291 | } else if (buf->i_len == len32) { | 259 | } else if (buf->i_len == len32) { |
292 | xfs_efi_log_format_32_t *src_efi_fmt_32 = | 260 | xfs_efi_log_format_32_t *src_efi_fmt_32 = buf->i_addr; |
293 | (xfs_efi_log_format_32_t *)buf->i_addr; | ||
294 | 261 | ||
295 | dst_efi_fmt->efi_type = src_efi_fmt_32->efi_type; | 262 | dst_efi_fmt->efi_type = src_efi_fmt_32->efi_type; |
296 | dst_efi_fmt->efi_size = src_efi_fmt_32->efi_size; | 263 | dst_efi_fmt->efi_size = src_efi_fmt_32->efi_size; |
@@ -304,8 +271,7 @@ xfs_efi_copy_format(xfs_log_iovec_t *buf, xfs_efi_log_format_t *dst_efi_fmt) | |||
304 | } | 271 | } |
305 | return 0; | 272 | return 0; |
306 | } else if (buf->i_len == len64) { | 273 | } else if (buf->i_len == len64) { |
307 | xfs_efi_log_format_64_t *src_efi_fmt_64 = | 274 | xfs_efi_log_format_64_t *src_efi_fmt_64 = buf->i_addr; |
308 | (xfs_efi_log_format_64_t *)buf->i_addr; | ||
309 | 275 | ||
310 | dst_efi_fmt->efi_type = src_efi_fmt_64->efi_type; | 276 | dst_efi_fmt->efi_type = src_efi_fmt_64->efi_type; |
311 | dst_efi_fmt->efi_size = src_efi_fmt_64->efi_size; | 277 | dst_efi_fmt->efi_size = src_efi_fmt_64->efi_size; |
@@ -356,16 +322,18 @@ xfs_efi_release(xfs_efi_log_item_t *efip, | |||
356 | } | 322 | } |
357 | } | 323 | } |
358 | 324 | ||
359 | STATIC void | 325 | static inline struct xfs_efd_log_item *EFD_ITEM(struct xfs_log_item *lip) |
360 | xfs_efd_item_free(xfs_efd_log_item_t *efdp) | ||
361 | { | 326 | { |
362 | int nexts = efdp->efd_format.efd_nextents; | 327 | return container_of(lip, struct xfs_efd_log_item, efd_item); |
328 | } | ||
363 | 329 | ||
364 | if (nexts > XFS_EFD_MAX_FAST_EXTENTS) { | 330 | STATIC void |
331 | xfs_efd_item_free(struct xfs_efd_log_item *efdp) | ||
332 | { | ||
333 | if (efdp->efd_format.efd_nextents > XFS_EFD_MAX_FAST_EXTENTS) | ||
365 | kmem_free(efdp); | 334 | kmem_free(efdp); |
366 | } else { | 335 | else |
367 | kmem_zone_free(xfs_efd_zone, efdp); | 336 | kmem_zone_free(xfs_efd_zone, efdp); |
368 | } | ||
369 | } | 337 | } |
370 | 338 | ||
371 | /* | 339 | /* |
@@ -373,9 +341,9 @@ xfs_efd_item_free(xfs_efd_log_item_t *efdp) | |||
373 | * We only need 1 iovec for an efd item. It just logs the efd_log_format | 341 | * We only need 1 iovec for an efd item. It just logs the efd_log_format |
374 | * structure. | 342 | * structure. |
375 | */ | 343 | */ |
376 | /*ARGSUSED*/ | ||
377 | STATIC uint | 344 | STATIC uint |
378 | xfs_efd_item_size(xfs_efd_log_item_t *efdp) | 345 | xfs_efd_item_size( |
346 | struct xfs_log_item *lip) | ||
379 | { | 347 | { |
380 | return 1; | 348 | return 1; |
381 | } | 349 | } |
@@ -388,10 +356,12 @@ xfs_efd_item_size(xfs_efd_log_item_t *efdp) | |||
388 | * slots in the efd item have been filled. | 356 | * slots in the efd item have been filled. |
389 | */ | 357 | */ |
390 | STATIC void | 358 | STATIC void |
391 | xfs_efd_item_format(xfs_efd_log_item_t *efdp, | 359 | xfs_efd_item_format( |
392 | xfs_log_iovec_t *log_vector) | 360 | struct xfs_log_item *lip, |
361 | struct xfs_log_iovec *log_vector) | ||
393 | { | 362 | { |
394 | uint size; | 363 | struct xfs_efd_log_item *efdp = EFD_ITEM(lip); |
364 | uint size; | ||
395 | 365 | ||
396 | ASSERT(efdp->efd_next_extent == efdp->efd_format.efd_nextents); | 366 | ASSERT(efdp->efd_next_extent == efdp->efd_format.efd_nextents); |
397 | 367 | ||
@@ -401,48 +371,38 @@ xfs_efd_item_format(xfs_efd_log_item_t *efdp, | |||
401 | size += (efdp->efd_format.efd_nextents - 1) * sizeof(xfs_extent_t); | 371 | size += (efdp->efd_format.efd_nextents - 1) * sizeof(xfs_extent_t); |
402 | efdp->efd_format.efd_size = 1; | 372 | efdp->efd_format.efd_size = 1; |
403 | 373 | ||
404 | log_vector->i_addr = (xfs_caddr_t)&(efdp->efd_format); | 374 | log_vector->i_addr = &efdp->efd_format; |
405 | log_vector->i_len = size; | 375 | log_vector->i_len = size; |
406 | log_vector->i_type = XLOG_REG_TYPE_EFD_FORMAT; | 376 | log_vector->i_type = XLOG_REG_TYPE_EFD_FORMAT; |
407 | ASSERT(size >= sizeof(xfs_efd_log_format_t)); | 377 | ASSERT(size >= sizeof(xfs_efd_log_format_t)); |
408 | } | 378 | } |
409 | 379 | ||
410 | |||
411 | /* | 380 | /* |
412 | * Pinning has no meaning for an efd item, so just return. | 381 | * Pinning has no meaning for an efd item, so just return. |
413 | */ | 382 | */ |
414 | /*ARGSUSED*/ | ||
415 | STATIC void | 383 | STATIC void |
416 | xfs_efd_item_pin(xfs_efd_log_item_t *efdp) | 384 | xfs_efd_item_pin( |
385 | struct xfs_log_item *lip) | ||
417 | { | 386 | { |
418 | return; | ||
419 | } | 387 | } |
420 | 388 | ||
421 | |||
422 | /* | 389 | /* |
423 | * Since pinning has no meaning for an efd item, unpinning does | 390 | * Since pinning has no meaning for an efd item, unpinning does |
424 | * not either. | 391 | * not either. |
425 | */ | 392 | */ |
426 | /*ARGSUSED*/ | ||
427 | STATIC void | ||
428 | xfs_efd_item_unpin(xfs_efd_log_item_t *efdp) | ||
429 | { | ||
430 | return; | ||
431 | } | ||
432 | |||
433 | /*ARGSUSED*/ | ||
434 | STATIC void | 393 | STATIC void |
435 | xfs_efd_item_unpin_remove(xfs_efd_log_item_t *efdp, xfs_trans_t *tp) | 394 | xfs_efd_item_unpin( |
395 | struct xfs_log_item *lip, | ||
396 | int remove) | ||
436 | { | 397 | { |
437 | return; | ||
438 | } | 398 | } |
439 | 399 | ||
440 | /* | 400 | /* |
441 | * Efd items have no locking, so just return success. | 401 | * Efd items have no locking, so just return success. |
442 | */ | 402 | */ |
443 | /*ARGSUSED*/ | ||
444 | STATIC uint | 403 | STATIC uint |
445 | xfs_efd_item_trylock(xfs_efd_log_item_t *efdp) | 404 | xfs_efd_item_trylock( |
405 | struct xfs_log_item *lip) | ||
446 | { | 406 | { |
447 | return XFS_ITEM_LOCKED; | 407 | return XFS_ITEM_LOCKED; |
448 | } | 408 | } |
@@ -451,13 +411,12 @@ xfs_efd_item_trylock(xfs_efd_log_item_t *efdp) | |||
451 | * Efd items have no locking or pushing, so return failure | 411 | * Efd items have no locking or pushing, so return failure |
452 | * so that the caller doesn't bother with us. | 412 | * so that the caller doesn't bother with us. |
453 | */ | 413 | */ |
454 | /*ARGSUSED*/ | ||
455 | STATIC void | 414 | STATIC void |
456 | xfs_efd_item_unlock(xfs_efd_log_item_t *efdp) | 415 | xfs_efd_item_unlock( |
416 | struct xfs_log_item *lip) | ||
457 | { | 417 | { |
458 | if (efdp->efd_item.li_flags & XFS_LI_ABORTED) | 418 | if (lip->li_flags & XFS_LI_ABORTED) |
459 | xfs_efd_item_free(efdp); | 419 | xfs_efd_item_free(EFD_ITEM(lip)); |
460 | return; | ||
461 | } | 420 | } |
462 | 421 | ||
463 | /* | 422 | /* |
@@ -467,15 +426,18 @@ xfs_efd_item_unlock(xfs_efd_log_item_t *efdp) | |||
467 | * return -1 to keep the transaction code from further referencing | 426 | * return -1 to keep the transaction code from further referencing |
468 | * this item. | 427 | * this item. |
469 | */ | 428 | */ |
470 | /*ARGSUSED*/ | ||
471 | STATIC xfs_lsn_t | 429 | STATIC xfs_lsn_t |
472 | xfs_efd_item_committed(xfs_efd_log_item_t *efdp, xfs_lsn_t lsn) | 430 | xfs_efd_item_committed( |
431 | struct xfs_log_item *lip, | ||
432 | xfs_lsn_t lsn) | ||
473 | { | 433 | { |
434 | struct xfs_efd_log_item *efdp = EFD_ITEM(lip); | ||
435 | |||
474 | /* | 436 | /* |
475 | * If we got a log I/O error, it's always the case that the LR with the | 437 | * If we got a log I/O error, it's always the case that the LR with the |
476 | * EFI got unpinned and freed before the EFD got aborted. | 438 | * EFI got unpinned and freed before the EFD got aborted. |
477 | */ | 439 | */ |
478 | if ((efdp->efd_item.li_flags & XFS_LI_ABORTED) == 0) | 440 | if (!(lip->li_flags & XFS_LI_ABORTED)) |
479 | xfs_efi_release(efdp->efd_efip, efdp->efd_format.efd_nextents); | 441 | xfs_efi_release(efdp->efd_efip, efdp->efd_format.efd_nextents); |
480 | 442 | ||
481 | xfs_efd_item_free(efdp); | 443 | xfs_efd_item_free(efdp); |
@@ -486,11 +448,10 @@ xfs_efd_item_committed(xfs_efd_log_item_t *efdp, xfs_lsn_t lsn) | |||
486 | * There isn't much you can do to push on an efd item. It is simply | 448 | * There isn't much you can do to push on an efd item. It is simply |
487 | * stuck waiting for the log to be flushed to disk. | 449 | * stuck waiting for the log to be flushed to disk. |
488 | */ | 450 | */ |
489 | /*ARGSUSED*/ | ||
490 | STATIC void | 451 | STATIC void |
491 | xfs_efd_item_push(xfs_efd_log_item_t *efdp) | 452 | xfs_efd_item_push( |
453 | struct xfs_log_item *lip) | ||
492 | { | 454 | { |
493 | return; | ||
494 | } | 455 | } |
495 | 456 | ||
496 | /* | 457 | /* |
@@ -500,55 +461,48 @@ xfs_efd_item_push(xfs_efd_log_item_t *efdp) | |||
500 | * example, for inodes, the inode is locked throughout the extent freeing | 461 | * example, for inodes, the inode is locked throughout the extent freeing |
501 | * so the dependency should be recorded there. | 462 | * so the dependency should be recorded there. |
502 | */ | 463 | */ |
503 | /*ARGSUSED*/ | ||
504 | STATIC void | 464 | STATIC void |
505 | xfs_efd_item_committing(xfs_efd_log_item_t *efip, xfs_lsn_t lsn) | 465 | xfs_efd_item_committing( |
466 | struct xfs_log_item *lip, | ||
467 | xfs_lsn_t lsn) | ||
506 | { | 468 | { |
507 | return; | ||
508 | } | 469 | } |
509 | 470 | ||
510 | /* | 471 | /* |
511 | * This is the ops vector shared by all efd log items. | 472 | * This is the ops vector shared by all efd log items. |
512 | */ | 473 | */ |
513 | static struct xfs_item_ops xfs_efd_item_ops = { | 474 | static struct xfs_item_ops xfs_efd_item_ops = { |
514 | .iop_size = (uint(*)(xfs_log_item_t*))xfs_efd_item_size, | 475 | .iop_size = xfs_efd_item_size, |
515 | .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) | 476 | .iop_format = xfs_efd_item_format, |
516 | xfs_efd_item_format, | 477 | .iop_pin = xfs_efd_item_pin, |
517 | .iop_pin = (void(*)(xfs_log_item_t*))xfs_efd_item_pin, | 478 | .iop_unpin = xfs_efd_item_unpin, |
518 | .iop_unpin = (void(*)(xfs_log_item_t*))xfs_efd_item_unpin, | 479 | .iop_trylock = xfs_efd_item_trylock, |
519 | .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*)) | 480 | .iop_unlock = xfs_efd_item_unlock, |
520 | xfs_efd_item_unpin_remove, | 481 | .iop_committed = xfs_efd_item_committed, |
521 | .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_efd_item_trylock, | 482 | .iop_push = xfs_efd_item_push, |
522 | .iop_unlock = (void(*)(xfs_log_item_t*))xfs_efd_item_unlock, | 483 | .iop_committing = xfs_efd_item_committing |
523 | .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) | ||
524 | xfs_efd_item_committed, | ||
525 | .iop_push = (void(*)(xfs_log_item_t*))xfs_efd_item_push, | ||
526 | .iop_pushbuf = NULL, | ||
527 | .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t)) | ||
528 | xfs_efd_item_committing | ||
529 | }; | 484 | }; |
530 | 485 | ||
531 | |||
532 | /* | 486 | /* |
533 | * Allocate and initialize an efd item with the given number of extents. | 487 | * Allocate and initialize an efd item with the given number of extents. |
534 | */ | 488 | */ |
535 | xfs_efd_log_item_t * | 489 | struct xfs_efd_log_item * |
536 | xfs_efd_init(xfs_mount_t *mp, | 490 | xfs_efd_init( |
537 | xfs_efi_log_item_t *efip, | 491 | struct xfs_mount *mp, |
538 | uint nextents) | 492 | struct xfs_efi_log_item *efip, |
493 | uint nextents) | ||
539 | 494 | ||
540 | { | 495 | { |
541 | xfs_efd_log_item_t *efdp; | 496 | struct xfs_efd_log_item *efdp; |
542 | uint size; | 497 | uint size; |
543 | 498 | ||
544 | ASSERT(nextents > 0); | 499 | ASSERT(nextents > 0); |
545 | if (nextents > XFS_EFD_MAX_FAST_EXTENTS) { | 500 | if (nextents > XFS_EFD_MAX_FAST_EXTENTS) { |
546 | size = (uint)(sizeof(xfs_efd_log_item_t) + | 501 | size = (uint)(sizeof(xfs_efd_log_item_t) + |
547 | ((nextents - 1) * sizeof(xfs_extent_t))); | 502 | ((nextents - 1) * sizeof(xfs_extent_t))); |
548 | efdp = (xfs_efd_log_item_t*)kmem_zalloc(size, KM_SLEEP); | 503 | efdp = kmem_zalloc(size, KM_SLEEP); |
549 | } else { | 504 | } else { |
550 | efdp = (xfs_efd_log_item_t*)kmem_zone_zalloc(xfs_efd_zone, | 505 | efdp = kmem_zone_zalloc(xfs_efd_zone, KM_SLEEP); |
551 | KM_SLEEP); | ||
552 | } | 506 | } |
553 | 507 | ||
554 | xfs_log_item_init(mp, &efdp->efd_item, XFS_LI_EFD, &xfs_efd_item_ops); | 508 | xfs_log_item_init(mp, &efdp->efd_item, XFS_LI_EFD, &xfs_efd_item_ops); |
@@ -556,5 +510,5 @@ xfs_efd_init(xfs_mount_t *mp, | |||
556 | efdp->efd_format.efd_nextents = nextents; | 510 | efdp->efd_format.efd_nextents = nextents; |
557 | efdp->efd_format.efd_efi_id = efip->efi_format.efi_id; | 511 | efdp->efd_format.efd_efi_id = efip->efi_format.efi_id; |
558 | 512 | ||
559 | return (efdp); | 513 | return efdp; |
560 | } | 514 | } |
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c index 390850ee6603..9b715dce5699 100644 --- a/fs/xfs/xfs_filestream.c +++ b/fs/xfs/xfs_filestream.c | |||
@@ -18,13 +18,9 @@ | |||
18 | #include "xfs.h" | 18 | #include "xfs.h" |
19 | #include "xfs_bmap_btree.h" | 19 | #include "xfs_bmap_btree.h" |
20 | #include "xfs_inum.h" | 20 | #include "xfs_inum.h" |
21 | #include "xfs_dir2.h" | ||
22 | #include "xfs_dir2_sf.h" | ||
23 | #include "xfs_attr_sf.h" | ||
24 | #include "xfs_dinode.h" | 21 | #include "xfs_dinode.h" |
25 | #include "xfs_inode.h" | 22 | #include "xfs_inode.h" |
26 | #include "xfs_ag.h" | 23 | #include "xfs_ag.h" |
27 | #include "xfs_dmapi.h" | ||
28 | #include "xfs_log.h" | 24 | #include "xfs_log.h" |
29 | #include "xfs_trans.h" | 25 | #include "xfs_trans.h" |
30 | #include "xfs_sb.h" | 26 | #include "xfs_sb.h" |
@@ -127,6 +123,82 @@ typedef struct fstrm_item | |||
127 | xfs_inode_t *pip; /* Parent directory inode pointer. */ | 123 | xfs_inode_t *pip; /* Parent directory inode pointer. */ |
128 | } fstrm_item_t; | 124 | } fstrm_item_t; |
129 | 125 | ||
126 | /* | ||
127 | * Allocation group filestream associations are tracked with per-ag atomic | ||
128 | * counters. These counters allow _xfs_filestream_pick_ag() to tell whether a | ||
129 | * particular AG already has active filestreams associated with it. The mount | ||
130 | * point's m_peraglock is used to protect these counters from per-ag array | ||
131 | * re-allocation during a growfs operation. When xfs_growfs_data_private() is | ||
132 | * about to reallocate the array, it calls xfs_filestream_flush() with the | ||
133 | * m_peraglock held in write mode. | ||
134 | * | ||
135 | * Since xfs_mru_cache_flush() guarantees that all the free functions for all | ||
136 | * the cache elements have finished executing before it returns, it's safe for | ||
137 | * the free functions to use the atomic counters without m_peraglock protection. | ||
138 | * This allows the implementation of xfs_fstrm_free_func() to be agnostic about | ||
139 | * whether it was called with the m_peraglock held in read mode, write mode or | ||
140 | * not held at all. The race condition this addresses is the following: | ||
141 | * | ||
142 | * - The work queue scheduler fires and pulls a filestream directory cache | ||
143 | * element off the LRU end of the cache for deletion, then gets pre-empted. | ||
144 | * - A growfs operation grabs the m_peraglock in write mode, flushes all the | ||
145 | * remaining items from the cache and reallocates the mount point's per-ag | ||
146 | * array, resetting all the counters to zero. | ||
147 | * - The work queue thread resumes and calls the free function for the element | ||
148 | * it started cleaning up earlier. In the process it decrements the | ||
149 | * filestreams counter for an AG that now has no references. | ||
150 | * | ||
151 | * With a shrinkfs feature, the above scenario could panic the system. | ||
152 | * | ||
153 | * All other uses of the following macros should be protected by either the | ||
154 | * m_peraglock held in read mode, or the cache's internal locking exposed by the | ||
155 | * interval between a call to xfs_mru_cache_lookup() and a call to | ||
156 | * xfs_mru_cache_done(). In addition, the m_peraglock must be held in read mode | ||
157 | * when new elements are added to the cache. | ||
158 | * | ||
159 | * Combined, these locking rules ensure that no associations will ever exist in | ||
160 | * the cache that reference per-ag array elements that have since been | ||
161 | * reallocated. | ||
162 | */ | ||
163 | static int | ||
164 | xfs_filestream_peek_ag( | ||
165 | xfs_mount_t *mp, | ||
166 | xfs_agnumber_t agno) | ||
167 | { | ||
168 | struct xfs_perag *pag; | ||
169 | int ret; | ||
170 | |||
171 | pag = xfs_perag_get(mp, agno); | ||
172 | ret = atomic_read(&pag->pagf_fstrms); | ||
173 | xfs_perag_put(pag); | ||
174 | return ret; | ||
175 | } | ||
176 | |||
177 | static int | ||
178 | xfs_filestream_get_ag( | ||
179 | xfs_mount_t *mp, | ||
180 | xfs_agnumber_t agno) | ||
181 | { | ||
182 | struct xfs_perag *pag; | ||
183 | int ret; | ||
184 | |||
185 | pag = xfs_perag_get(mp, agno); | ||
186 | ret = atomic_inc_return(&pag->pagf_fstrms); | ||
187 | xfs_perag_put(pag); | ||
188 | return ret; | ||
189 | } | ||
190 | |||
191 | static void | ||
192 | xfs_filestream_put_ag( | ||
193 | xfs_mount_t *mp, | ||
194 | xfs_agnumber_t agno) | ||
195 | { | ||
196 | struct xfs_perag *pag; | ||
197 | |||
198 | pag = xfs_perag_get(mp, agno); | ||
199 | atomic_dec(&pag->pagf_fstrms); | ||
200 | xfs_perag_put(pag); | ||
201 | } | ||
130 | 202 | ||
131 | /* | 203 | /* |
132 | * Scan the AGs starting at startag looking for an AG that isn't in use and has | 204 | * Scan the AGs starting at startag looking for an AG that isn't in use and has |
@@ -355,16 +427,14 @@ xfs_fstrm_free_func( | |||
355 | { | 427 | { |
356 | fstrm_item_t *item = (fstrm_item_t *)data; | 428 | fstrm_item_t *item = (fstrm_item_t *)data; |
357 | xfs_inode_t *ip = item->ip; | 429 | xfs_inode_t *ip = item->ip; |
358 | int ref; | ||
359 | 430 | ||
360 | ASSERT(ip->i_ino == ino); | 431 | ASSERT(ip->i_ino == ino); |
361 | 432 | ||
362 | xfs_iflags_clear(ip, XFS_IFILESTREAM); | 433 | xfs_iflags_clear(ip, XFS_IFILESTREAM); |
363 | 434 | ||
364 | /* Drop the reference taken on the AG when the item was added. */ | 435 | /* Drop the reference taken on the AG when the item was added. */ |
365 | ref = xfs_filestream_put_ag(ip->i_mount, item->ag); | 436 | xfs_filestream_put_ag(ip->i_mount, item->ag); |
366 | 437 | ||
367 | ASSERT(ref >= 0); | ||
368 | TRACE_FREE(ip->i_mount, ip, item->pip, item->ag, | 438 | TRACE_FREE(ip->i_mount, ip, item->pip, item->ag, |
369 | xfs_filestream_peek_ag(ip->i_mount, item->ag)); | 439 | xfs_filestream_peek_ag(ip->i_mount, item->ag)); |
370 | 440 | ||
diff --git a/fs/xfs/xfs_filestream.h b/fs/xfs/xfs_filestream.h index 260f757bbc5d..09dd9af45434 100644 --- a/fs/xfs/xfs_filestream.h +++ b/fs/xfs/xfs_filestream.h | |||
@@ -42,88 +42,6 @@ extern ktrace_t *xfs_filestreams_trace_buf; | |||
42 | 42 | ||
43 | #endif | 43 | #endif |
44 | 44 | ||
45 | /* | ||
46 | * Allocation group filestream associations are tracked with per-ag atomic | ||
47 | * counters. These counters allow _xfs_filestream_pick_ag() to tell whether a | ||
48 | * particular AG already has active filestreams associated with it. The mount | ||
49 | * point's m_peraglock is used to protect these counters from per-ag array | ||
50 | * re-allocation during a growfs operation. When xfs_growfs_data_private() is | ||
51 | * about to reallocate the array, it calls xfs_filestream_flush() with the | ||
52 | * m_peraglock held in write mode. | ||
53 | * | ||
54 | * Since xfs_mru_cache_flush() guarantees that all the free functions for all | ||
55 | * the cache elements have finished executing before it returns, it's safe for | ||
56 | * the free functions to use the atomic counters without m_peraglock protection. | ||
57 | * This allows the implementation of xfs_fstrm_free_func() to be agnostic about | ||
58 | * whether it was called with the m_peraglock held in read mode, write mode or | ||
59 | * not held at all. The race condition this addresses is the following: | ||
60 | * | ||
61 | * - The work queue scheduler fires and pulls a filestream directory cache | ||
62 | * element off the LRU end of the cache for deletion, then gets pre-empted. | ||
63 | * - A growfs operation grabs the m_peraglock in write mode, flushes all the | ||
64 | * remaining items from the cache and reallocates the mount point's per-ag | ||
65 | * array, resetting all the counters to zero. | ||
66 | * - The work queue thread resumes and calls the free function for the element | ||
67 | * it started cleaning up earlier. In the process it decrements the | ||
68 | * filestreams counter for an AG that now has no references. | ||
69 | * | ||
70 | * With a shrinkfs feature, the above scenario could panic the system. | ||
71 | * | ||
72 | * All other uses of the following macros should be protected by either the | ||
73 | * m_peraglock held in read mode, or the cache's internal locking exposed by the | ||
74 | * interval between a call to xfs_mru_cache_lookup() and a call to | ||
75 | * xfs_mru_cache_done(). In addition, the m_peraglock must be held in read mode | ||
76 | * when new elements are added to the cache. | ||
77 | * | ||
78 | * Combined, these locking rules ensure that no associations will ever exist in | ||
79 | * the cache that reference per-ag array elements that have since been | ||
80 | * reallocated. | ||
81 | */ | ||
82 | /* | ||
83 | * xfs_filestream_peek_ag is only used in tracing code | ||
84 | */ | ||
85 | static inline int | ||
86 | xfs_filestream_peek_ag( | ||
87 | xfs_mount_t *mp, | ||
88 | xfs_agnumber_t agno) | ||
89 | { | ||
90 | struct xfs_perag *pag; | ||
91 | int ret; | ||
92 | |||
93 | pag = xfs_perag_get(mp, agno); | ||
94 | ret = atomic_read(&pag->pagf_fstrms); | ||
95 | xfs_perag_put(pag); | ||
96 | return ret; | ||
97 | } | ||
98 | |||
99 | static inline int | ||
100 | xfs_filestream_get_ag( | ||
101 | xfs_mount_t *mp, | ||
102 | xfs_agnumber_t agno) | ||
103 | { | ||
104 | struct xfs_perag *pag; | ||
105 | int ret; | ||
106 | |||
107 | pag = xfs_perag_get(mp, agno); | ||
108 | ret = atomic_inc_return(&pag->pagf_fstrms); | ||
109 | xfs_perag_put(pag); | ||
110 | return ret; | ||
111 | } | ||
112 | |||
113 | static inline int | ||
114 | xfs_filestream_put_ag( | ||
115 | xfs_mount_t *mp, | ||
116 | xfs_agnumber_t agno) | ||
117 | { | ||
118 | struct xfs_perag *pag; | ||
119 | int ret; | ||
120 | |||
121 | pag = xfs_perag_get(mp, agno); | ||
122 | ret = atomic_dec_return(&pag->pagf_fstrms); | ||
123 | xfs_perag_put(pag); | ||
124 | return ret; | ||
125 | } | ||
126 | |||
127 | /* allocation selection flags */ | 45 | /* allocation selection flags */ |
128 | typedef enum xfs_fstrm_alloc { | 46 | typedef enum xfs_fstrm_alloc { |
129 | XFS_PICK_USERDATA = 1, | 47 | XFS_PICK_USERDATA = 1, |
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index 37a6f62c57b6..dbca5f5c37ba 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c | |||
@@ -24,14 +24,10 @@ | |||
24 | #include "xfs_trans.h" | 24 | #include "xfs_trans.h" |
25 | #include "xfs_sb.h" | 25 | #include "xfs_sb.h" |
26 | #include "xfs_ag.h" | 26 | #include "xfs_ag.h" |
27 | #include "xfs_dir2.h" | ||
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_mount.h" | 27 | #include "xfs_mount.h" |
30 | #include "xfs_bmap_btree.h" | 28 | #include "xfs_bmap_btree.h" |
31 | #include "xfs_alloc_btree.h" | 29 | #include "xfs_alloc_btree.h" |
32 | #include "xfs_ialloc_btree.h" | 30 | #include "xfs_ialloc_btree.h" |
33 | #include "xfs_dir2_sf.h" | ||
34 | #include "xfs_attr_sf.h" | ||
35 | #include "xfs_dinode.h" | 31 | #include "xfs_dinode.h" |
36 | #include "xfs_inode.h" | 32 | #include "xfs_inode.h" |
37 | #include "xfs_inode_item.h" | 33 | #include "xfs_inode_item.h" |
@@ -626,8 +622,7 @@ xfs_fs_log_dummy( | |||
626 | ip = mp->m_rootip; | 622 | ip = mp->m_rootip; |
627 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 623 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
628 | 624 | ||
629 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | 625 | xfs_trans_ijoin(tp, ip); |
630 | xfs_trans_ihold(tp, ip); | ||
631 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 626 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
632 | xfs_trans_set_sync(tp); | 627 | xfs_trans_set_sync(tp); |
633 | error = xfs_trans_commit(tp, 0); | 628 | error = xfs_trans_commit(tp, 0); |
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index 9d884c127bb9..abf80ae1e95b 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c | |||
@@ -24,14 +24,10 @@ | |||
24 | #include "xfs_trans.h" | 24 | #include "xfs_trans.h" |
25 | #include "xfs_sb.h" | 25 | #include "xfs_sb.h" |
26 | #include "xfs_ag.h" | 26 | #include "xfs_ag.h" |
27 | #include "xfs_dir2.h" | ||
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_mount.h" | 27 | #include "xfs_mount.h" |
30 | #include "xfs_bmap_btree.h" | 28 | #include "xfs_bmap_btree.h" |
31 | #include "xfs_alloc_btree.h" | 29 | #include "xfs_alloc_btree.h" |
32 | #include "xfs_ialloc_btree.h" | 30 | #include "xfs_ialloc_btree.h" |
33 | #include "xfs_dir2_sf.h" | ||
34 | #include "xfs_attr_sf.h" | ||
35 | #include "xfs_dinode.h" | 31 | #include "xfs_dinode.h" |
36 | #include "xfs_inode.h" | 32 | #include "xfs_inode.h" |
37 | #include "xfs_btree.h" | 33 | #include "xfs_btree.h" |
@@ -1203,6 +1199,63 @@ error0: | |||
1203 | return error; | 1199 | return error; |
1204 | } | 1200 | } |
1205 | 1201 | ||
1202 | STATIC int | ||
1203 | xfs_imap_lookup( | ||
1204 | struct xfs_mount *mp, | ||
1205 | struct xfs_trans *tp, | ||
1206 | xfs_agnumber_t agno, | ||
1207 | xfs_agino_t agino, | ||
1208 | xfs_agblock_t agbno, | ||
1209 | xfs_agblock_t *chunk_agbno, | ||
1210 | xfs_agblock_t *offset_agbno, | ||
1211 | int flags) | ||
1212 | { | ||
1213 | struct xfs_inobt_rec_incore rec; | ||
1214 | struct xfs_btree_cur *cur; | ||
1215 | struct xfs_buf *agbp; | ||
1216 | xfs_agino_t startino; | ||
1217 | int error; | ||
1218 | int i; | ||
1219 | |||
1220 | error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); | ||
1221 | if (error) { | ||
1222 | xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: " | ||
1223 | "xfs_ialloc_read_agi() returned " | ||
1224 | "error %d, agno %d", | ||
1225 | error, agno); | ||
1226 | return error; | ||
1227 | } | ||
1228 | |||
1229 | /* | ||
1230 | * derive and lookup the exact inode record for the given agino. If the | ||
1231 | * record cannot be found, then it's an invalid inode number and we | ||
1232 | * should abort. | ||
1233 | */ | ||
1234 | cur = xfs_inobt_init_cursor(mp, tp, agbp, agno); | ||
1235 | startino = agino & ~(XFS_IALLOC_INODES(mp) - 1); | ||
1236 | error = xfs_inobt_lookup(cur, startino, XFS_LOOKUP_EQ, &i); | ||
1237 | if (!error) { | ||
1238 | if (i) | ||
1239 | error = xfs_inobt_get_rec(cur, &rec, &i); | ||
1240 | if (!error && i == 0) | ||
1241 | error = EINVAL; | ||
1242 | } | ||
1243 | |||
1244 | xfs_trans_brelse(tp, agbp); | ||
1245 | xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); | ||
1246 | if (error) | ||
1247 | return error; | ||
1248 | |||
1249 | /* for untrusted inodes check it is allocated first */ | ||
1250 | if ((flags & XFS_IGET_UNTRUSTED) && | ||
1251 | (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino))) | ||
1252 | return EINVAL; | ||
1253 | |||
1254 | *chunk_agbno = XFS_AGINO_TO_AGBNO(mp, rec.ir_startino); | ||
1255 | *offset_agbno = agbno - *chunk_agbno; | ||
1256 | return 0; | ||
1257 | } | ||
1258 | |||
1206 | /* | 1259 | /* |
1207 | * Return the location of the inode in imap, for mapping it into a buffer. | 1260 | * Return the location of the inode in imap, for mapping it into a buffer. |
1208 | */ | 1261 | */ |
@@ -1235,8 +1288,11 @@ xfs_imap( | |||
1235 | if (agno >= mp->m_sb.sb_agcount || agbno >= mp->m_sb.sb_agblocks || | 1288 | if (agno >= mp->m_sb.sb_agcount || agbno >= mp->m_sb.sb_agblocks || |
1236 | ino != XFS_AGINO_TO_INO(mp, agno, agino)) { | 1289 | ino != XFS_AGINO_TO_INO(mp, agno, agino)) { |
1237 | #ifdef DEBUG | 1290 | #ifdef DEBUG |
1238 | /* no diagnostics for bulkstat, ino comes from userspace */ | 1291 | /* |
1239 | if (flags & XFS_IGET_BULKSTAT) | 1292 | * Don't output diagnostic information for untrusted inodes |
1293 | * as they can be invalid without implying corruption. | ||
1294 | */ | ||
1295 | if (flags & XFS_IGET_UNTRUSTED) | ||
1240 | return XFS_ERROR(EINVAL); | 1296 | return XFS_ERROR(EINVAL); |
1241 | if (agno >= mp->m_sb.sb_agcount) { | 1297 | if (agno >= mp->m_sb.sb_agcount) { |
1242 | xfs_fs_cmn_err(CE_ALERT, mp, | 1298 | xfs_fs_cmn_err(CE_ALERT, mp, |
@@ -1263,6 +1319,23 @@ xfs_imap( | |||
1263 | return XFS_ERROR(EINVAL); | 1319 | return XFS_ERROR(EINVAL); |
1264 | } | 1320 | } |
1265 | 1321 | ||
1322 | blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_blocklog; | ||
1323 | |||
1324 | /* | ||
1325 | * For bulkstat and handle lookups, we have an untrusted inode number | ||
1326 | * that we have to verify is valid. We cannot do this just by reading | ||
1327 | * the inode buffer as it may have been unlinked and removed leaving | ||
1328 | * inodes in stale state on disk. Hence we have to do a btree lookup | ||
1329 | * in all cases where an untrusted inode number is passed. | ||
1330 | */ | ||
1331 | if (flags & XFS_IGET_UNTRUSTED) { | ||
1332 | error = xfs_imap_lookup(mp, tp, agno, agino, agbno, | ||
1333 | &chunk_agbno, &offset_agbno, flags); | ||
1334 | if (error) | ||
1335 | return error; | ||
1336 | goto out_map; | ||
1337 | } | ||
1338 | |||
1266 | /* | 1339 | /* |
1267 | * If the inode cluster size is the same as the blocksize or | 1340 | * If the inode cluster size is the same as the blocksize or |
1268 | * smaller we get to the buffer by simple arithmetics. | 1341 | * smaller we get to the buffer by simple arithmetics. |
@@ -1277,24 +1350,6 @@ xfs_imap( | |||
1277 | return 0; | 1350 | return 0; |
1278 | } | 1351 | } |
1279 | 1352 | ||
1280 | blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_blocklog; | ||
1281 | |||
1282 | /* | ||
1283 | * If we get a block number passed from bulkstat we can use it to | ||
1284 | * find the buffer easily. | ||
1285 | */ | ||
1286 | if (imap->im_blkno) { | ||
1287 | offset = XFS_INO_TO_OFFSET(mp, ino); | ||
1288 | ASSERT(offset < mp->m_sb.sb_inopblock); | ||
1289 | |||
1290 | cluster_agbno = xfs_daddr_to_agbno(mp, imap->im_blkno); | ||
1291 | offset += (agbno - cluster_agbno) * mp->m_sb.sb_inopblock; | ||
1292 | |||
1293 | imap->im_len = XFS_FSB_TO_BB(mp, blks_per_cluster); | ||
1294 | imap->im_boffset = (ushort)(offset << mp->m_sb.sb_inodelog); | ||
1295 | return 0; | ||
1296 | } | ||
1297 | |||
1298 | /* | 1353 | /* |
1299 | * If the inode chunks are aligned then use simple maths to | 1354 | * If the inode chunks are aligned then use simple maths to |
1300 | * find the location. Otherwise we have to do a btree | 1355 | * find the location. Otherwise we have to do a btree |
@@ -1304,50 +1359,13 @@ xfs_imap( | |||
1304 | offset_agbno = agbno & mp->m_inoalign_mask; | 1359 | offset_agbno = agbno & mp->m_inoalign_mask; |
1305 | chunk_agbno = agbno - offset_agbno; | 1360 | chunk_agbno = agbno - offset_agbno; |
1306 | } else { | 1361 | } else { |
1307 | xfs_btree_cur_t *cur; /* inode btree cursor */ | 1362 | error = xfs_imap_lookup(mp, tp, agno, agino, agbno, |
1308 | xfs_inobt_rec_incore_t chunk_rec; | 1363 | &chunk_agbno, &offset_agbno, flags); |
1309 | xfs_buf_t *agbp; /* agi buffer */ | ||
1310 | int i; /* temp state */ | ||
1311 | |||
1312 | error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); | ||
1313 | if (error) { | ||
1314 | xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: " | ||
1315 | "xfs_ialloc_read_agi() returned " | ||
1316 | "error %d, agno %d", | ||
1317 | error, agno); | ||
1318 | return error; | ||
1319 | } | ||
1320 | |||
1321 | cur = xfs_inobt_init_cursor(mp, tp, agbp, agno); | ||
1322 | error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i); | ||
1323 | if (error) { | ||
1324 | xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: " | ||
1325 | "xfs_inobt_lookup() failed"); | ||
1326 | goto error0; | ||
1327 | } | ||
1328 | |||
1329 | error = xfs_inobt_get_rec(cur, &chunk_rec, &i); | ||
1330 | if (error) { | ||
1331 | xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: " | ||
1332 | "xfs_inobt_get_rec() failed"); | ||
1333 | goto error0; | ||
1334 | } | ||
1335 | if (i == 0) { | ||
1336 | #ifdef DEBUG | ||
1337 | xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: " | ||
1338 | "xfs_inobt_get_rec() failed"); | ||
1339 | #endif /* DEBUG */ | ||
1340 | error = XFS_ERROR(EINVAL); | ||
1341 | } | ||
1342 | error0: | ||
1343 | xfs_trans_brelse(tp, agbp); | ||
1344 | xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); | ||
1345 | if (error) | 1364 | if (error) |
1346 | return error; | 1365 | return error; |
1347 | chunk_agbno = XFS_AGINO_TO_AGBNO(mp, chunk_rec.ir_startino); | ||
1348 | offset_agbno = agbno - chunk_agbno; | ||
1349 | } | 1366 | } |
1350 | 1367 | ||
1368 | out_map: | ||
1351 | ASSERT(agbno >= chunk_agbno); | 1369 | ASSERT(agbno >= chunk_agbno); |
1352 | cluster_agbno = chunk_agbno + | 1370 | cluster_agbno = chunk_agbno + |
1353 | ((offset_agbno / blks_per_cluster) * blks_per_cluster); | 1371 | ((offset_agbno / blks_per_cluster) * blks_per_cluster); |
diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c index c282a9af5393..d352862cefa0 100644 --- a/fs/xfs/xfs_ialloc_btree.c +++ b/fs/xfs/xfs_ialloc_btree.c | |||
@@ -24,14 +24,10 @@ | |||
24 | #include "xfs_trans.h" | 24 | #include "xfs_trans.h" |
25 | #include "xfs_sb.h" | 25 | #include "xfs_sb.h" |
26 | #include "xfs_ag.h" | 26 | #include "xfs_ag.h" |
27 | #include "xfs_dir2.h" | ||
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_mount.h" | 27 | #include "xfs_mount.h" |
30 | #include "xfs_bmap_btree.h" | 28 | #include "xfs_bmap_btree.h" |
31 | #include "xfs_alloc_btree.h" | 29 | #include "xfs_alloc_btree.h" |
32 | #include "xfs_ialloc_btree.h" | 30 | #include "xfs_ialloc_btree.h" |
33 | #include "xfs_dir2_sf.h" | ||
34 | #include "xfs_attr_sf.h" | ||
35 | #include "xfs_dinode.h" | 31 | #include "xfs_dinode.h" |
36 | #include "xfs_inode.h" | 32 | #include "xfs_inode.h" |
37 | #include "xfs_btree.h" | 33 | #include "xfs_btree.h" |
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 75df75f43d48..b1ecc6f97ade 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c | |||
@@ -25,14 +25,10 @@ | |||
25 | #include "xfs_trans.h" | 25 | #include "xfs_trans.h" |
26 | #include "xfs_sb.h" | 26 | #include "xfs_sb.h" |
27 | #include "xfs_ag.h" | 27 | #include "xfs_ag.h" |
28 | #include "xfs_dir2.h" | ||
29 | #include "xfs_dmapi.h" | ||
30 | #include "xfs_mount.h" | 28 | #include "xfs_mount.h" |
31 | #include "xfs_bmap_btree.h" | 29 | #include "xfs_bmap_btree.h" |
32 | #include "xfs_alloc_btree.h" | 30 | #include "xfs_alloc_btree.h" |
33 | #include "xfs_ialloc_btree.h" | 31 | #include "xfs_ialloc_btree.h" |
34 | #include "xfs_dir2_sf.h" | ||
35 | #include "xfs_attr_sf.h" | ||
36 | #include "xfs_dinode.h" | 32 | #include "xfs_dinode.h" |
37 | #include "xfs_inode.h" | 33 | #include "xfs_inode.h" |
38 | #include "xfs_btree.h" | 34 | #include "xfs_btree.h" |
@@ -95,7 +91,7 @@ xfs_inode_alloc( | |||
95 | return ip; | 91 | return ip; |
96 | } | 92 | } |
97 | 93 | ||
98 | STATIC void | 94 | void |
99 | xfs_inode_free( | 95 | xfs_inode_free( |
100 | struct xfs_inode *ip) | 96 | struct xfs_inode *ip) |
101 | { | 97 | { |
@@ -212,7 +208,7 @@ xfs_iget_cache_hit( | |||
212 | ip->i_flags &= ~XFS_INEW; | 208 | ip->i_flags &= ~XFS_INEW; |
213 | ip->i_flags |= XFS_IRECLAIMABLE; | 209 | ip->i_flags |= XFS_IRECLAIMABLE; |
214 | __xfs_inode_set_reclaim_tag(pag, ip); | 210 | __xfs_inode_set_reclaim_tag(pag, ip); |
215 | trace_xfs_iget_reclaim(ip); | 211 | trace_xfs_iget_reclaim_fail(ip); |
216 | goto out_error; | 212 | goto out_error; |
217 | } | 213 | } |
218 | 214 | ||
@@ -227,6 +223,7 @@ xfs_iget_cache_hit( | |||
227 | } else { | 223 | } else { |
228 | /* If the VFS inode is being torn down, pause and try again. */ | 224 | /* If the VFS inode is being torn down, pause and try again. */ |
229 | if (!igrab(inode)) { | 225 | if (!igrab(inode)) { |
226 | trace_xfs_iget_skip(ip); | ||
230 | error = EAGAIN; | 227 | error = EAGAIN; |
231 | goto out_error; | 228 | goto out_error; |
232 | } | 229 | } |
@@ -234,6 +231,7 @@ xfs_iget_cache_hit( | |||
234 | /* We've got a live one. */ | 231 | /* We've got a live one. */ |
235 | spin_unlock(&ip->i_flags_lock); | 232 | spin_unlock(&ip->i_flags_lock); |
236 | read_unlock(&pag->pag_ici_lock); | 233 | read_unlock(&pag->pag_ici_lock); |
234 | trace_xfs_iget_hit(ip); | ||
237 | } | 235 | } |
238 | 236 | ||
239 | if (lock_flags != 0) | 237 | if (lock_flags != 0) |
@@ -242,7 +240,6 @@ xfs_iget_cache_hit( | |||
242 | xfs_iflags_clear(ip, XFS_ISTALE); | 240 | xfs_iflags_clear(ip, XFS_ISTALE); |
243 | XFS_STATS_INC(xs_ig_found); | 241 | XFS_STATS_INC(xs_ig_found); |
244 | 242 | ||
245 | trace_xfs_iget_found(ip); | ||
246 | return 0; | 243 | return 0; |
247 | 244 | ||
248 | out_error: | 245 | out_error: |
@@ -259,24 +256,22 @@ xfs_iget_cache_miss( | |||
259 | xfs_trans_t *tp, | 256 | xfs_trans_t *tp, |
260 | xfs_ino_t ino, | 257 | xfs_ino_t ino, |
261 | struct xfs_inode **ipp, | 258 | struct xfs_inode **ipp, |
262 | xfs_daddr_t bno, | ||
263 | int flags, | 259 | int flags, |
264 | int lock_flags) | 260 | int lock_flags) |
265 | { | 261 | { |
266 | struct xfs_inode *ip; | 262 | struct xfs_inode *ip; |
267 | int error; | 263 | int error; |
268 | unsigned long first_index, mask; | ||
269 | xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino); | 264 | xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino); |
270 | 265 | ||
271 | ip = xfs_inode_alloc(mp, ino); | 266 | ip = xfs_inode_alloc(mp, ino); |
272 | if (!ip) | 267 | if (!ip) |
273 | return ENOMEM; | 268 | return ENOMEM; |
274 | 269 | ||
275 | error = xfs_iread(mp, tp, ip, bno, flags); | 270 | error = xfs_iread(mp, tp, ip, flags); |
276 | if (error) | 271 | if (error) |
277 | goto out_destroy; | 272 | goto out_destroy; |
278 | 273 | ||
279 | xfs_itrace_entry(ip); | 274 | trace_xfs_iget_miss(ip); |
280 | 275 | ||
281 | if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) { | 276 | if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) { |
282 | error = ENOENT; | 277 | error = ENOENT; |
@@ -302,8 +297,6 @@ xfs_iget_cache_miss( | |||
302 | BUG(); | 297 | BUG(); |
303 | } | 298 | } |
304 | 299 | ||
305 | mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1); | ||
306 | first_index = agino & mask; | ||
307 | write_lock(&pag->pag_ici_lock); | 300 | write_lock(&pag->pag_ici_lock); |
308 | 301 | ||
309 | /* insert the new inode */ | 302 | /* insert the new inode */ |
@@ -322,7 +315,6 @@ xfs_iget_cache_miss( | |||
322 | write_unlock(&pag->pag_ici_lock); | 315 | write_unlock(&pag->pag_ici_lock); |
323 | radix_tree_preload_end(); | 316 | radix_tree_preload_end(); |
324 | 317 | ||
325 | trace_xfs_iget_alloc(ip); | ||
326 | *ipp = ip; | 318 | *ipp = ip; |
327 | return 0; | 319 | return 0; |
328 | 320 | ||
@@ -358,8 +350,6 @@ out_destroy: | |||
358 | * within the file system for the inode being requested. | 350 | * within the file system for the inode being requested. |
359 | * lock_flags -- flags indicating how to lock the inode. See the comment | 351 | * lock_flags -- flags indicating how to lock the inode. See the comment |
360 | * for xfs_ilock() for a list of valid values. | 352 | * for xfs_ilock() for a list of valid values. |
361 | * bno -- the block number starting the buffer containing the inode, | ||
362 | * if known (as by bulkstat), else 0. | ||
363 | */ | 353 | */ |
364 | int | 354 | int |
365 | xfs_iget( | 355 | xfs_iget( |
@@ -368,8 +358,7 @@ xfs_iget( | |||
368 | xfs_ino_t ino, | 358 | xfs_ino_t ino, |
369 | uint flags, | 359 | uint flags, |
370 | uint lock_flags, | 360 | uint lock_flags, |
371 | xfs_inode_t **ipp, | 361 | xfs_inode_t **ipp) |
372 | xfs_daddr_t bno) | ||
373 | { | 362 | { |
374 | xfs_inode_t *ip; | 363 | xfs_inode_t *ip; |
375 | int error; | 364 | int error; |
@@ -397,7 +386,7 @@ again: | |||
397 | read_unlock(&pag->pag_ici_lock); | 386 | read_unlock(&pag->pag_ici_lock); |
398 | XFS_STATS_INC(xs_ig_missed); | 387 | XFS_STATS_INC(xs_ig_missed); |
399 | 388 | ||
400 | error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, bno, | 389 | error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, |
401 | flags, lock_flags); | 390 | flags, lock_flags); |
402 | if (error) | 391 | if (error) |
403 | goto out_error_or_again; | 392 | goto out_error_or_again; |
@@ -426,97 +415,6 @@ out_error_or_again: | |||
426 | } | 415 | } |
427 | 416 | ||
428 | /* | 417 | /* |
429 | * Decrement reference count of an inode structure and unlock it. | ||
430 | * | ||
431 | * ip -- the inode being released | ||
432 | * lock_flags -- this parameter indicates the inode's locks to be | ||
433 | * to be released. See the comment on xfs_iunlock() for a list | ||
434 | * of valid values. | ||
435 | */ | ||
436 | void | ||
437 | xfs_iput(xfs_inode_t *ip, | ||
438 | uint lock_flags) | ||
439 | { | ||
440 | xfs_itrace_entry(ip); | ||
441 | xfs_iunlock(ip, lock_flags); | ||
442 | IRELE(ip); | ||
443 | } | ||
444 | |||
445 | /* | ||
446 | * Special iput for brand-new inodes that are still locked | ||
447 | */ | ||
448 | void | ||
449 | xfs_iput_new( | ||
450 | xfs_inode_t *ip, | ||
451 | uint lock_flags) | ||
452 | { | ||
453 | struct inode *inode = VFS_I(ip); | ||
454 | |||
455 | xfs_itrace_entry(ip); | ||
456 | |||
457 | if ((ip->i_d.di_mode == 0)) { | ||
458 | ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE)); | ||
459 | make_bad_inode(inode); | ||
460 | } | ||
461 | if (inode->i_state & I_NEW) | ||
462 | unlock_new_inode(inode); | ||
463 | if (lock_flags) | ||
464 | xfs_iunlock(ip, lock_flags); | ||
465 | IRELE(ip); | ||
466 | } | ||
467 | |||
468 | /* | ||
469 | * This is called free all the memory associated with an inode. | ||
470 | * It must free the inode itself and any buffers allocated for | ||
471 | * if_extents/if_data and if_broot. It must also free the lock | ||
472 | * associated with the inode. | ||
473 | * | ||
474 | * Note: because we don't initialise everything on reallocation out | ||
475 | * of the zone, we must ensure we nullify everything correctly before | ||
476 | * freeing the structure. | ||
477 | */ | ||
478 | void | ||
479 | xfs_ireclaim( | ||
480 | struct xfs_inode *ip) | ||
481 | { | ||
482 | struct xfs_mount *mp = ip->i_mount; | ||
483 | struct xfs_perag *pag; | ||
484 | xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino); | ||
485 | |||
486 | XFS_STATS_INC(xs_ig_reclaims); | ||
487 | |||
488 | /* | ||
489 | * Remove the inode from the per-AG radix tree. | ||
490 | * | ||
491 | * Because radix_tree_delete won't complain even if the item was never | ||
492 | * added to the tree assert that it's been there before to catch | ||
493 | * problems with the inode life time early on. | ||
494 | */ | ||
495 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); | ||
496 | write_lock(&pag->pag_ici_lock); | ||
497 | if (!radix_tree_delete(&pag->pag_ici_root, agino)) | ||
498 | ASSERT(0); | ||
499 | write_unlock(&pag->pag_ici_lock); | ||
500 | xfs_perag_put(pag); | ||
501 | |||
502 | /* | ||
503 | * Here we do an (almost) spurious inode lock in order to coordinate | ||
504 | * with inode cache radix tree lookups. This is because the lookup | ||
505 | * can reference the inodes in the cache without taking references. | ||
506 | * | ||
507 | * We make that OK here by ensuring that we wait until the inode is | ||
508 | * unlocked after the lookup before we go ahead and free it. We get | ||
509 | * both the ilock and the iolock because the code may need to drop the | ||
510 | * ilock one but will still hold the iolock. | ||
511 | */ | ||
512 | xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | ||
513 | xfs_qm_dqdetach(ip); | ||
514 | xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | ||
515 | |||
516 | xfs_inode_free(ip); | ||
517 | } | ||
518 | |||
519 | /* | ||
520 | * This is a wrapper routine around the xfs_ilock() routine | 418 | * This is a wrapper routine around the xfs_ilock() routine |
521 | * used to centralize some grungy code. It is used in places | 419 | * used to centralize some grungy code. It is used in places |
522 | * that wish to lock the inode solely for reading the extents. | 420 | * that wish to lock the inode solely for reading the extents. |
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index d53c39de7d05..68415cb4f23c 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
@@ -27,13 +27,10 @@ | |||
27 | #include "xfs_trans_priv.h" | 27 | #include "xfs_trans_priv.h" |
28 | #include "xfs_sb.h" | 28 | #include "xfs_sb.h" |
29 | #include "xfs_ag.h" | 29 | #include "xfs_ag.h" |
30 | #include "xfs_dir2.h" | ||
31 | #include "xfs_dmapi.h" | ||
32 | #include "xfs_mount.h" | 30 | #include "xfs_mount.h" |
33 | #include "xfs_bmap_btree.h" | 31 | #include "xfs_bmap_btree.h" |
34 | #include "xfs_alloc_btree.h" | 32 | #include "xfs_alloc_btree.h" |
35 | #include "xfs_ialloc_btree.h" | 33 | #include "xfs_ialloc_btree.h" |
36 | #include "xfs_dir2_sf.h" | ||
37 | #include "xfs_attr_sf.h" | 34 | #include "xfs_attr_sf.h" |
38 | #include "xfs_dinode.h" | 35 | #include "xfs_dinode.h" |
39 | #include "xfs_inode.h" | 36 | #include "xfs_inode.h" |
@@ -44,7 +41,6 @@ | |||
44 | #include "xfs_alloc.h" | 41 | #include "xfs_alloc.h" |
45 | #include "xfs_ialloc.h" | 42 | #include "xfs_ialloc.h" |
46 | #include "xfs_bmap.h" | 43 | #include "xfs_bmap.h" |
47 | #include "xfs_rw.h" | ||
48 | #include "xfs_error.h" | 44 | #include "xfs_error.h" |
49 | #include "xfs_utils.h" | 45 | #include "xfs_utils.h" |
50 | #include "xfs_quota.h" | 46 | #include "xfs_quota.h" |
@@ -177,7 +173,7 @@ xfs_imap_to_bp( | |||
177 | if (unlikely(XFS_TEST_ERROR(!di_ok, mp, | 173 | if (unlikely(XFS_TEST_ERROR(!di_ok, mp, |
178 | XFS_ERRTAG_ITOBP_INOTOBP, | 174 | XFS_ERRTAG_ITOBP_INOTOBP, |
179 | XFS_RANDOM_ITOBP_INOTOBP))) { | 175 | XFS_RANDOM_ITOBP_INOTOBP))) { |
180 | if (iget_flags & XFS_IGET_BULKSTAT) { | 176 | if (iget_flags & XFS_IGET_UNTRUSTED) { |
181 | xfs_trans_brelse(tp, bp); | 177 | xfs_trans_brelse(tp, bp); |
182 | return XFS_ERROR(EINVAL); | 178 | return XFS_ERROR(EINVAL); |
183 | } | 179 | } |
@@ -426,7 +422,7 @@ xfs_iformat( | |||
426 | if (!XFS_DFORK_Q(dip)) | 422 | if (!XFS_DFORK_Q(dip)) |
427 | return 0; | 423 | return 0; |
428 | ASSERT(ip->i_afp == NULL); | 424 | ASSERT(ip->i_afp == NULL); |
429 | ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP); | 425 | ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP | KM_NOFS); |
430 | ip->i_afp->if_ext_max = | 426 | ip->i_afp->if_ext_max = |
431 | XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); | 427 | XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); |
432 | switch (dip->di_aformat) { | 428 | switch (dip->di_aformat) { |
@@ -509,7 +505,7 @@ xfs_iformat_local( | |||
509 | ifp->if_u1.if_data = ifp->if_u2.if_inline_data; | 505 | ifp->if_u1.if_data = ifp->if_u2.if_inline_data; |
510 | else { | 506 | else { |
511 | real_size = roundup(size, 4); | 507 | real_size = roundup(size, 4); |
512 | ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP); | 508 | ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP | KM_NOFS); |
513 | } | 509 | } |
514 | ifp->if_bytes = size; | 510 | ifp->if_bytes = size; |
515 | ifp->if_real_bytes = real_size; | 511 | ifp->if_real_bytes = real_size; |
@@ -636,7 +632,7 @@ xfs_iformat_btree( | |||
636 | } | 632 | } |
637 | 633 | ||
638 | ifp->if_broot_bytes = size; | 634 | ifp->if_broot_bytes = size; |
639 | ifp->if_broot = kmem_alloc(size, KM_SLEEP); | 635 | ifp->if_broot = kmem_alloc(size, KM_SLEEP | KM_NOFS); |
640 | ASSERT(ifp->if_broot != NULL); | 636 | ASSERT(ifp->if_broot != NULL); |
641 | /* | 637 | /* |
642 | * Copy and convert from the on-disk structure | 638 | * Copy and convert from the on-disk structure |
@@ -787,7 +783,6 @@ xfs_iread( | |||
787 | xfs_mount_t *mp, | 783 | xfs_mount_t *mp, |
788 | xfs_trans_t *tp, | 784 | xfs_trans_t *tp, |
789 | xfs_inode_t *ip, | 785 | xfs_inode_t *ip, |
790 | xfs_daddr_t bno, | ||
791 | uint iget_flags) | 786 | uint iget_flags) |
792 | { | 787 | { |
793 | xfs_buf_t *bp; | 788 | xfs_buf_t *bp; |
@@ -797,11 +792,9 @@ xfs_iread( | |||
797 | /* | 792 | /* |
798 | * Fill in the location information in the in-core inode. | 793 | * Fill in the location information in the in-core inode. |
799 | */ | 794 | */ |
800 | ip->i_imap.im_blkno = bno; | ||
801 | error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags); | 795 | error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags); |
802 | if (error) | 796 | if (error) |
803 | return error; | 797 | return error; |
804 | ASSERT(bno == 0 || bno == ip->i_imap.im_blkno); | ||
805 | 798 | ||
806 | /* | 799 | /* |
807 | * Get pointers to the on-disk inode and the buffer containing it. | 800 | * Get pointers to the on-disk inode and the buffer containing it. |
@@ -925,7 +918,6 @@ xfs_iread_extents( | |||
925 | int error; | 918 | int error; |
926 | xfs_ifork_t *ifp; | 919 | xfs_ifork_t *ifp; |
927 | xfs_extnum_t nextents; | 920 | xfs_extnum_t nextents; |
928 | size_t size; | ||
929 | 921 | ||
930 | if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { | 922 | if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { |
931 | XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW, | 923 | XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW, |
@@ -933,7 +925,6 @@ xfs_iread_extents( | |||
933 | return XFS_ERROR(EFSCORRUPTED); | 925 | return XFS_ERROR(EFSCORRUPTED); |
934 | } | 926 | } |
935 | nextents = XFS_IFORK_NEXTENTS(ip, whichfork); | 927 | nextents = XFS_IFORK_NEXTENTS(ip, whichfork); |
936 | size = nextents * sizeof(xfs_bmbt_rec_t); | ||
937 | ifp = XFS_IFORK_PTR(ip, whichfork); | 928 | ifp = XFS_IFORK_PTR(ip, whichfork); |
938 | 929 | ||
939 | /* | 930 | /* |
@@ -1229,7 +1220,7 @@ xfs_isize_check( | |||
1229 | (xfs_ufsize_t)XFS_MAXIOFFSET(mp)) - | 1220 | (xfs_ufsize_t)XFS_MAXIOFFSET(mp)) - |
1230 | map_first), | 1221 | map_first), |
1231 | XFS_BMAPI_ENTIRE, NULL, 0, imaps, &nimaps, | 1222 | XFS_BMAPI_ENTIRE, NULL, 0, imaps, &nimaps, |
1232 | NULL, NULL)) | 1223 | NULL)) |
1233 | return; | 1224 | return; |
1234 | ASSERT(nimaps == 1); | 1225 | ASSERT(nimaps == 1); |
1235 | ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK); | 1226 | ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK); |
@@ -1463,7 +1454,7 @@ xfs_itruncate_finish( | |||
1463 | ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES); | 1454 | ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES); |
1464 | ASSERT(ip->i_transp == *tp); | 1455 | ASSERT(ip->i_transp == *tp); |
1465 | ASSERT(ip->i_itemp != NULL); | 1456 | ASSERT(ip->i_itemp != NULL); |
1466 | ASSERT(ip->i_itemp->ili_flags & XFS_ILI_HOLD); | 1457 | ASSERT(ip->i_itemp->ili_lock_flags == 0); |
1467 | 1458 | ||
1468 | 1459 | ||
1469 | ntp = *tp; | 1460 | ntp = *tp; |
@@ -1592,11 +1583,10 @@ xfs_itruncate_finish( | |||
1592 | xfs_bmap_init(&free_list, &first_block); | 1583 | xfs_bmap_init(&free_list, &first_block); |
1593 | error = xfs_bunmapi(ntp, ip, | 1584 | error = xfs_bunmapi(ntp, ip, |
1594 | first_unmap_block, unmap_len, | 1585 | first_unmap_block, unmap_len, |
1595 | xfs_bmapi_aflag(fork) | | 1586 | xfs_bmapi_aflag(fork), |
1596 | (sync ? 0 : XFS_BMAPI_ASYNC), | ||
1597 | XFS_ITRUNC_MAX_EXTENTS, | 1587 | XFS_ITRUNC_MAX_EXTENTS, |
1598 | &first_block, &free_list, | 1588 | &first_block, &free_list, |
1599 | NULL, &done); | 1589 | &done); |
1600 | if (error) { | 1590 | if (error) { |
1601 | /* | 1591 | /* |
1602 | * If the bunmapi call encounters an error, | 1592 | * If the bunmapi call encounters an error, |
@@ -1615,12 +1605,8 @@ xfs_itruncate_finish( | |||
1615 | */ | 1605 | */ |
1616 | error = xfs_bmap_finish(tp, &free_list, &committed); | 1606 | error = xfs_bmap_finish(tp, &free_list, &committed); |
1617 | ntp = *tp; | 1607 | ntp = *tp; |
1618 | if (committed) { | 1608 | if (committed) |
1619 | /* link the inode into the next xact in the chain */ | 1609 | xfs_trans_ijoin(ntp, ip); |
1620 | xfs_trans_ijoin(ntp, ip, | ||
1621 | XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | ||
1622 | xfs_trans_ihold(ntp, ip); | ||
1623 | } | ||
1624 | 1610 | ||
1625 | if (error) { | 1611 | if (error) { |
1626 | /* | 1612 | /* |
@@ -1649,9 +1635,7 @@ xfs_itruncate_finish( | |||
1649 | error = xfs_trans_commit(*tp, 0); | 1635 | error = xfs_trans_commit(*tp, 0); |
1650 | *tp = ntp; | 1636 | *tp = ntp; |
1651 | 1637 | ||
1652 | /* link the inode into the next transaction in the chain */ | 1638 | xfs_trans_ijoin(ntp, ip); |
1653 | xfs_trans_ijoin(ntp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | ||
1654 | xfs_trans_ihold(ntp, ip); | ||
1655 | 1639 | ||
1656 | if (error) | 1640 | if (error) |
1657 | return error; | 1641 | return error; |
@@ -1988,7 +1972,7 @@ xfs_ifree_cluster( | |||
1988 | if (lip->li_type == XFS_LI_INODE) { | 1972 | if (lip->li_type == XFS_LI_INODE) { |
1989 | iip = (xfs_inode_log_item_t *)lip; | 1973 | iip = (xfs_inode_log_item_t *)lip; |
1990 | ASSERT(iip->ili_logged == 1); | 1974 | ASSERT(iip->ili_logged == 1); |
1991 | lip->li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*)) xfs_istale_done; | 1975 | lip->li_cb = xfs_istale_done; |
1992 | xfs_trans_ail_copy_lsn(mp->m_ail, | 1976 | xfs_trans_ail_copy_lsn(mp->m_ail, |
1993 | &iip->ili_flush_lsn, | 1977 | &iip->ili_flush_lsn, |
1994 | &iip->ili_item.li_lsn); | 1978 | &iip->ili_item.li_lsn); |
@@ -2058,9 +2042,8 @@ xfs_ifree_cluster( | |||
2058 | xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, | 2042 | xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, |
2059 | &iip->ili_item.li_lsn); | 2043 | &iip->ili_item.li_lsn); |
2060 | 2044 | ||
2061 | xfs_buf_attach_iodone(bp, | 2045 | xfs_buf_attach_iodone(bp, xfs_istale_done, |
2062 | (void(*)(xfs_buf_t*,xfs_log_item_t*)) | 2046 | &iip->ili_item); |
2063 | xfs_istale_done, (xfs_log_item_t *)iip); | ||
2064 | 2047 | ||
2065 | if (ip != free_ip) | 2048 | if (ip != free_ip) |
2066 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 2049 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
@@ -2206,7 +2189,7 @@ xfs_iroot_realloc( | |||
2206 | */ | 2189 | */ |
2207 | if (ifp->if_broot_bytes == 0) { | 2190 | if (ifp->if_broot_bytes == 0) { |
2208 | new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff); | 2191 | new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff); |
2209 | ifp->if_broot = kmem_alloc(new_size, KM_SLEEP); | 2192 | ifp->if_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS); |
2210 | ifp->if_broot_bytes = (int)new_size; | 2193 | ifp->if_broot_bytes = (int)new_size; |
2211 | return; | 2194 | return; |
2212 | } | 2195 | } |
@@ -2222,7 +2205,7 @@ xfs_iroot_realloc( | |||
2222 | new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max); | 2205 | new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max); |
2223 | ifp->if_broot = kmem_realloc(ifp->if_broot, new_size, | 2206 | ifp->if_broot = kmem_realloc(ifp->if_broot, new_size, |
2224 | (size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */ | 2207 | (size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */ |
2225 | KM_SLEEP); | 2208 | KM_SLEEP | KM_NOFS); |
2226 | op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, | 2209 | op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, |
2227 | ifp->if_broot_bytes); | 2210 | ifp->if_broot_bytes); |
2228 | np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, | 2211 | np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, |
@@ -2248,7 +2231,7 @@ xfs_iroot_realloc( | |||
2248 | else | 2231 | else |
2249 | new_size = 0; | 2232 | new_size = 0; |
2250 | if (new_size > 0) { | 2233 | if (new_size > 0) { |
2251 | new_broot = kmem_alloc(new_size, KM_SLEEP); | 2234 | new_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS); |
2252 | /* | 2235 | /* |
2253 | * First copy over the btree block header. | 2236 | * First copy over the btree block header. |
2254 | */ | 2237 | */ |
@@ -2352,7 +2335,8 @@ xfs_idata_realloc( | |||
2352 | real_size = roundup(new_size, 4); | 2335 | real_size = roundup(new_size, 4); |
2353 | if (ifp->if_u1.if_data == NULL) { | 2336 | if (ifp->if_u1.if_data == NULL) { |
2354 | ASSERT(ifp->if_real_bytes == 0); | 2337 | ASSERT(ifp->if_real_bytes == 0); |
2355 | ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP); | 2338 | ifp->if_u1.if_data = kmem_alloc(real_size, |
2339 | KM_SLEEP | KM_NOFS); | ||
2356 | } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { | 2340 | } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) { |
2357 | /* | 2341 | /* |
2358 | * Only do the realloc if the underlying size | 2342 | * Only do the realloc if the underlying size |
@@ -2363,11 +2347,12 @@ xfs_idata_realloc( | |||
2363 | kmem_realloc(ifp->if_u1.if_data, | 2347 | kmem_realloc(ifp->if_u1.if_data, |
2364 | real_size, | 2348 | real_size, |
2365 | ifp->if_real_bytes, | 2349 | ifp->if_real_bytes, |
2366 | KM_SLEEP); | 2350 | KM_SLEEP | KM_NOFS); |
2367 | } | 2351 | } |
2368 | } else { | 2352 | } else { |
2369 | ASSERT(ifp->if_real_bytes == 0); | 2353 | ASSERT(ifp->if_real_bytes == 0); |
2370 | ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP); | 2354 | ifp->if_u1.if_data = kmem_alloc(real_size, |
2355 | KM_SLEEP | KM_NOFS); | ||
2371 | memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data, | 2356 | memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data, |
2372 | ifp->if_bytes); | 2357 | ifp->if_bytes); |
2373 | } | 2358 | } |
@@ -2734,7 +2719,6 @@ cluster_corrupt_out: | |||
2734 | * mark it as stale and brelse. | 2719 | * mark it as stale and brelse. |
2735 | */ | 2720 | */ |
2736 | if (XFS_BUF_IODONE_FUNC(bp)) { | 2721 | if (XFS_BUF_IODONE_FUNC(bp)) { |
2737 | XFS_BUF_CLR_BDSTRAT_FUNC(bp); | ||
2738 | XFS_BUF_UNDONE(bp); | 2722 | XFS_BUF_UNDONE(bp); |
2739 | XFS_BUF_STALE(bp); | 2723 | XFS_BUF_STALE(bp); |
2740 | XFS_BUF_ERROR(bp,EIO); | 2724 | XFS_BUF_ERROR(bp,EIO); |
@@ -3072,8 +3056,7 @@ xfs_iflush_int( | |||
3072 | * and unlock the inode's flush lock when the inode is | 3056 | * and unlock the inode's flush lock when the inode is |
3073 | * completely written to disk. | 3057 | * completely written to disk. |
3074 | */ | 3058 | */ |
3075 | xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t*,xfs_log_item_t*)) | 3059 | xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item); |
3076 | xfs_iflush_done, (xfs_log_item_t *)iip); | ||
3077 | 3060 | ||
3078 | ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); | 3061 | ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); |
3079 | ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL); | 3062 | ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL); |
@@ -3517,13 +3500,11 @@ xfs_iext_remove_indirect( | |||
3517 | xfs_extnum_t ext_diff; /* extents to remove in current list */ | 3500 | xfs_extnum_t ext_diff; /* extents to remove in current list */ |
3518 | xfs_extnum_t nex1; /* number of extents before idx */ | 3501 | xfs_extnum_t nex1; /* number of extents before idx */ |
3519 | xfs_extnum_t nex2; /* extents after idx + count */ | 3502 | xfs_extnum_t nex2; /* extents after idx + count */ |
3520 | int nlists; /* entries in indirection array */ | ||
3521 | int page_idx = idx; /* index in target extent list */ | 3503 | int page_idx = idx; /* index in target extent list */ |
3522 | 3504 | ||
3523 | ASSERT(ifp->if_flags & XFS_IFEXTIREC); | 3505 | ASSERT(ifp->if_flags & XFS_IFEXTIREC); |
3524 | erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0); | 3506 | erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0); |
3525 | ASSERT(erp != NULL); | 3507 | ASSERT(erp != NULL); |
3526 | nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; | ||
3527 | nex1 = page_idx; | 3508 | nex1 = page_idx; |
3528 | ext_cnt = count; | 3509 | ext_cnt = count; |
3529 | while (ext_cnt) { | 3510 | while (ext_cnt) { |
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 9965e40a4615..0898c5417d12 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h | |||
@@ -442,9 +442,7 @@ static inline void xfs_ifunlock(xfs_inode_t *ip) | |||
442 | * xfs_iget.c prototypes. | 442 | * xfs_iget.c prototypes. |
443 | */ | 443 | */ |
444 | int xfs_iget(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, | 444 | int xfs_iget(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, |
445 | uint, uint, xfs_inode_t **, xfs_daddr_t); | 445 | uint, uint, xfs_inode_t **); |
446 | void xfs_iput(xfs_inode_t *, uint); | ||
447 | void xfs_iput_new(xfs_inode_t *, uint); | ||
448 | void xfs_ilock(xfs_inode_t *, uint); | 446 | void xfs_ilock(xfs_inode_t *, uint); |
449 | int xfs_ilock_nowait(xfs_inode_t *, uint); | 447 | int xfs_ilock_nowait(xfs_inode_t *, uint); |
450 | void xfs_iunlock(xfs_inode_t *, uint); | 448 | void xfs_iunlock(xfs_inode_t *, uint); |
@@ -452,7 +450,7 @@ void xfs_ilock_demote(xfs_inode_t *, uint); | |||
452 | int xfs_isilocked(xfs_inode_t *, uint); | 450 | int xfs_isilocked(xfs_inode_t *, uint); |
453 | uint xfs_ilock_map_shared(xfs_inode_t *); | 451 | uint xfs_ilock_map_shared(xfs_inode_t *); |
454 | void xfs_iunlock_map_shared(xfs_inode_t *, uint); | 452 | void xfs_iunlock_map_shared(xfs_inode_t *, uint); |
455 | void xfs_ireclaim(xfs_inode_t *); | 453 | void xfs_inode_free(struct xfs_inode *ip); |
456 | 454 | ||
457 | /* | 455 | /* |
458 | * xfs_inode.c prototypes. | 456 | * xfs_inode.c prototypes. |
@@ -500,7 +498,7 @@ do { \ | |||
500 | * Flags for xfs_iget() | 498 | * Flags for xfs_iget() |
501 | */ | 499 | */ |
502 | #define XFS_IGET_CREATE 0x1 | 500 | #define XFS_IGET_CREATE 0x1 |
503 | #define XFS_IGET_BULKSTAT 0x2 | 501 | #define XFS_IGET_UNTRUSTED 0x2 |
504 | 502 | ||
505 | int xfs_inotobp(struct xfs_mount *, struct xfs_trans *, | 503 | int xfs_inotobp(struct xfs_mount *, struct xfs_trans *, |
506 | xfs_ino_t, struct xfs_dinode **, | 504 | xfs_ino_t, struct xfs_dinode **, |
@@ -509,7 +507,7 @@ int xfs_itobp(struct xfs_mount *, struct xfs_trans *, | |||
509 | struct xfs_inode *, struct xfs_dinode **, | 507 | struct xfs_inode *, struct xfs_dinode **, |
510 | struct xfs_buf **, uint); | 508 | struct xfs_buf **, uint); |
511 | int xfs_iread(struct xfs_mount *, struct xfs_trans *, | 509 | int xfs_iread(struct xfs_mount *, struct xfs_trans *, |
512 | struct xfs_inode *, xfs_daddr_t, uint); | 510 | struct xfs_inode *, uint); |
513 | void xfs_dinode_to_disk(struct xfs_dinode *, | 511 | void xfs_dinode_to_disk(struct xfs_dinode *, |
514 | struct xfs_icdinode *); | 512 | struct xfs_icdinode *); |
515 | void xfs_idestroy_fork(struct xfs_inode *, int); | 513 | void xfs_idestroy_fork(struct xfs_inode *, int); |
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index cf8249a60004..fe00777e2796 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c | |||
@@ -22,30 +22,26 @@ | |||
22 | #include "xfs_log.h" | 22 | #include "xfs_log.h" |
23 | #include "xfs_inum.h" | 23 | #include "xfs_inum.h" |
24 | #include "xfs_trans.h" | 24 | #include "xfs_trans.h" |
25 | #include "xfs_buf_item.h" | ||
26 | #include "xfs_sb.h" | 25 | #include "xfs_sb.h" |
27 | #include "xfs_ag.h" | 26 | #include "xfs_ag.h" |
28 | #include "xfs_dir2.h" | ||
29 | #include "xfs_dmapi.h" | ||
30 | #include "xfs_mount.h" | 27 | #include "xfs_mount.h" |
31 | #include "xfs_trans_priv.h" | 28 | #include "xfs_trans_priv.h" |
32 | #include "xfs_bmap_btree.h" | 29 | #include "xfs_bmap_btree.h" |
33 | #include "xfs_alloc_btree.h" | ||
34 | #include "xfs_ialloc_btree.h" | ||
35 | #include "xfs_dir2_sf.h" | ||
36 | #include "xfs_attr_sf.h" | ||
37 | #include "xfs_dinode.h" | 30 | #include "xfs_dinode.h" |
38 | #include "xfs_inode.h" | 31 | #include "xfs_inode.h" |
39 | #include "xfs_inode_item.h" | 32 | #include "xfs_inode_item.h" |
40 | #include "xfs_btree.h" | ||
41 | #include "xfs_ialloc.h" | ||
42 | #include "xfs_rw.h" | ||
43 | #include "xfs_error.h" | 33 | #include "xfs_error.h" |
44 | #include "xfs_trace.h" | 34 | #include "xfs_trace.h" |
45 | 35 | ||
46 | 36 | ||
47 | kmem_zone_t *xfs_ili_zone; /* inode log item zone */ | 37 | kmem_zone_t *xfs_ili_zone; /* inode log item zone */ |
48 | 38 | ||
39 | static inline struct xfs_inode_log_item *INODE_ITEM(struct xfs_log_item *lip) | ||
40 | { | ||
41 | return container_of(lip, struct xfs_inode_log_item, ili_item); | ||
42 | } | ||
43 | |||
44 | |||
49 | /* | 45 | /* |
50 | * This returns the number of iovecs needed to log the given inode item. | 46 | * This returns the number of iovecs needed to log the given inode item. |
51 | * | 47 | * |
@@ -55,13 +51,11 @@ kmem_zone_t *xfs_ili_zone; /* inode log item zone */ | |||
55 | */ | 51 | */ |
56 | STATIC uint | 52 | STATIC uint |
57 | xfs_inode_item_size( | 53 | xfs_inode_item_size( |
58 | xfs_inode_log_item_t *iip) | 54 | struct xfs_log_item *lip) |
59 | { | 55 | { |
60 | uint nvecs; | 56 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); |
61 | xfs_inode_t *ip; | 57 | struct xfs_inode *ip = iip->ili_inode; |
62 | 58 | uint nvecs = 2; | |
63 | ip = iip->ili_inode; | ||
64 | nvecs = 2; | ||
65 | 59 | ||
66 | /* | 60 | /* |
67 | * Only log the data/extents/b-tree root if there is something | 61 | * Only log the data/extents/b-tree root if there is something |
@@ -212,21 +206,17 @@ xfs_inode_item_size( | |||
212 | */ | 206 | */ |
213 | STATIC void | 207 | STATIC void |
214 | xfs_inode_item_format( | 208 | xfs_inode_item_format( |
215 | xfs_inode_log_item_t *iip, | 209 | struct xfs_log_item *lip, |
216 | xfs_log_iovec_t *log_vector) | 210 | struct xfs_log_iovec *vecp) |
217 | { | 211 | { |
212 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); | ||
213 | struct xfs_inode *ip = iip->ili_inode; | ||
218 | uint nvecs; | 214 | uint nvecs; |
219 | xfs_log_iovec_t *vecp; | ||
220 | xfs_inode_t *ip; | ||
221 | size_t data_bytes; | 215 | size_t data_bytes; |
222 | xfs_bmbt_rec_t *ext_buffer; | 216 | xfs_bmbt_rec_t *ext_buffer; |
223 | int nrecs; | ||
224 | xfs_mount_t *mp; | 217 | xfs_mount_t *mp; |
225 | 218 | ||
226 | ip = iip->ili_inode; | 219 | vecp->i_addr = &iip->ili_format; |
227 | vecp = log_vector; | ||
228 | |||
229 | vecp->i_addr = (xfs_caddr_t)&iip->ili_format; | ||
230 | vecp->i_len = sizeof(xfs_inode_log_format_t); | 220 | vecp->i_len = sizeof(xfs_inode_log_format_t); |
231 | vecp->i_type = XLOG_REG_TYPE_IFORMAT; | 221 | vecp->i_type = XLOG_REG_TYPE_IFORMAT; |
232 | vecp++; | 222 | vecp++; |
@@ -277,7 +267,7 @@ xfs_inode_item_format( | |||
277 | */ | 267 | */ |
278 | xfs_synchronize_times(ip); | 268 | xfs_synchronize_times(ip); |
279 | 269 | ||
280 | vecp->i_addr = (xfs_caddr_t)&ip->i_d; | 270 | vecp->i_addr = &ip->i_d; |
281 | vecp->i_len = sizeof(struct xfs_icdinode); | 271 | vecp->i_len = sizeof(struct xfs_icdinode); |
282 | vecp->i_type = XLOG_REG_TYPE_ICORE; | 272 | vecp->i_type = XLOG_REG_TYPE_ICORE; |
283 | vecp++; | 273 | vecp++; |
@@ -323,18 +313,17 @@ xfs_inode_item_format( | |||
323 | ASSERT(ip->i_df.if_u1.if_extents != NULL); | 313 | ASSERT(ip->i_df.if_u1.if_extents != NULL); |
324 | ASSERT(ip->i_d.di_nextents > 0); | 314 | ASSERT(ip->i_d.di_nextents > 0); |
325 | ASSERT(iip->ili_extents_buf == NULL); | 315 | ASSERT(iip->ili_extents_buf == NULL); |
326 | nrecs = ip->i_df.if_bytes / | 316 | ASSERT((ip->i_df.if_bytes / |
327 | (uint)sizeof(xfs_bmbt_rec_t); | 317 | (uint)sizeof(xfs_bmbt_rec_t)) > 0); |
328 | ASSERT(nrecs > 0); | ||
329 | #ifdef XFS_NATIVE_HOST | 318 | #ifdef XFS_NATIVE_HOST |
330 | if (nrecs == ip->i_d.di_nextents) { | 319 | if (ip->i_d.di_nextents == ip->i_df.if_bytes / |
320 | (uint)sizeof(xfs_bmbt_rec_t)) { | ||
331 | /* | 321 | /* |
332 | * There are no delayed allocation | 322 | * There are no delayed allocation |
333 | * extents, so just point to the | 323 | * extents, so just point to the |
334 | * real extents array. | 324 | * real extents array. |
335 | */ | 325 | */ |
336 | vecp->i_addr = | 326 | vecp->i_addr = ip->i_df.if_u1.if_extents; |
337 | (char *)(ip->i_df.if_u1.if_extents); | ||
338 | vecp->i_len = ip->i_df.if_bytes; | 327 | vecp->i_len = ip->i_df.if_bytes; |
339 | vecp->i_type = XLOG_REG_TYPE_IEXT; | 328 | vecp->i_type = XLOG_REG_TYPE_IEXT; |
340 | } else | 329 | } else |
@@ -352,7 +341,7 @@ xfs_inode_item_format( | |||
352 | ext_buffer = kmem_alloc(ip->i_df.if_bytes, | 341 | ext_buffer = kmem_alloc(ip->i_df.if_bytes, |
353 | KM_SLEEP); | 342 | KM_SLEEP); |
354 | iip->ili_extents_buf = ext_buffer; | 343 | iip->ili_extents_buf = ext_buffer; |
355 | vecp->i_addr = (xfs_caddr_t)ext_buffer; | 344 | vecp->i_addr = ext_buffer; |
356 | vecp->i_len = xfs_iextents_copy(ip, ext_buffer, | 345 | vecp->i_len = xfs_iextents_copy(ip, ext_buffer, |
357 | XFS_DATA_FORK); | 346 | XFS_DATA_FORK); |
358 | vecp->i_type = XLOG_REG_TYPE_IEXT; | 347 | vecp->i_type = XLOG_REG_TYPE_IEXT; |
@@ -371,7 +360,7 @@ xfs_inode_item_format( | |||
371 | if (iip->ili_format.ilf_fields & XFS_ILOG_DBROOT) { | 360 | if (iip->ili_format.ilf_fields & XFS_ILOG_DBROOT) { |
372 | ASSERT(ip->i_df.if_broot_bytes > 0); | 361 | ASSERT(ip->i_df.if_broot_bytes > 0); |
373 | ASSERT(ip->i_df.if_broot != NULL); | 362 | ASSERT(ip->i_df.if_broot != NULL); |
374 | vecp->i_addr = (xfs_caddr_t)ip->i_df.if_broot; | 363 | vecp->i_addr = ip->i_df.if_broot; |
375 | vecp->i_len = ip->i_df.if_broot_bytes; | 364 | vecp->i_len = ip->i_df.if_broot_bytes; |
376 | vecp->i_type = XLOG_REG_TYPE_IBROOT; | 365 | vecp->i_type = XLOG_REG_TYPE_IBROOT; |
377 | vecp++; | 366 | vecp++; |
@@ -389,7 +378,7 @@ xfs_inode_item_format( | |||
389 | ASSERT(ip->i_df.if_u1.if_data != NULL); | 378 | ASSERT(ip->i_df.if_u1.if_data != NULL); |
390 | ASSERT(ip->i_d.di_size > 0); | 379 | ASSERT(ip->i_d.di_size > 0); |
391 | 380 | ||
392 | vecp->i_addr = (xfs_caddr_t)ip->i_df.if_u1.if_data; | 381 | vecp->i_addr = ip->i_df.if_u1.if_data; |
393 | /* | 382 | /* |
394 | * Round i_bytes up to a word boundary. | 383 | * Round i_bytes up to a word boundary. |
395 | * The underlying memory is guaranteed to | 384 | * The underlying memory is guaranteed to |
@@ -437,7 +426,7 @@ xfs_inode_item_format( | |||
437 | * Assert that no attribute-related log flags are set. | 426 | * Assert that no attribute-related log flags are set. |
438 | */ | 427 | */ |
439 | if (!XFS_IFORK_Q(ip)) { | 428 | if (!XFS_IFORK_Q(ip)) { |
440 | ASSERT(nvecs == iip->ili_item.li_desc->lid_size); | 429 | ASSERT(nvecs == lip->li_desc->lid_size); |
441 | iip->ili_format.ilf_size = nvecs; | 430 | iip->ili_format.ilf_size = nvecs; |
442 | ASSERT(!(iip->ili_format.ilf_fields & | 431 | ASSERT(!(iip->ili_format.ilf_fields & |
443 | (XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT))); | 432 | (XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT))); |
@@ -449,21 +438,21 @@ xfs_inode_item_format( | |||
449 | ASSERT(!(iip->ili_format.ilf_fields & | 438 | ASSERT(!(iip->ili_format.ilf_fields & |
450 | (XFS_ILOG_ADATA | XFS_ILOG_ABROOT))); | 439 | (XFS_ILOG_ADATA | XFS_ILOG_ABROOT))); |
451 | if (iip->ili_format.ilf_fields & XFS_ILOG_AEXT) { | 440 | if (iip->ili_format.ilf_fields & XFS_ILOG_AEXT) { |
452 | ASSERT(ip->i_afp->if_bytes > 0); | ||
453 | ASSERT(ip->i_afp->if_u1.if_extents != NULL); | ||
454 | ASSERT(ip->i_d.di_anextents > 0); | ||
455 | #ifdef DEBUG | 441 | #ifdef DEBUG |
456 | nrecs = ip->i_afp->if_bytes / | 442 | int nrecs = ip->i_afp->if_bytes / |
457 | (uint)sizeof(xfs_bmbt_rec_t); | 443 | (uint)sizeof(xfs_bmbt_rec_t); |
458 | #endif | ||
459 | ASSERT(nrecs > 0); | 444 | ASSERT(nrecs > 0); |
460 | ASSERT(nrecs == ip->i_d.di_anextents); | 445 | ASSERT(nrecs == ip->i_d.di_anextents); |
446 | ASSERT(ip->i_afp->if_bytes > 0); | ||
447 | ASSERT(ip->i_afp->if_u1.if_extents != NULL); | ||
448 | ASSERT(ip->i_d.di_anextents > 0); | ||
449 | #endif | ||
461 | #ifdef XFS_NATIVE_HOST | 450 | #ifdef XFS_NATIVE_HOST |
462 | /* | 451 | /* |
463 | * There are not delayed allocation extents | 452 | * There are not delayed allocation extents |
464 | * for attributes, so just point at the array. | 453 | * for attributes, so just point at the array. |
465 | */ | 454 | */ |
466 | vecp->i_addr = (char *)(ip->i_afp->if_u1.if_extents); | 455 | vecp->i_addr = ip->i_afp->if_u1.if_extents; |
467 | vecp->i_len = ip->i_afp->if_bytes; | 456 | vecp->i_len = ip->i_afp->if_bytes; |
468 | #else | 457 | #else |
469 | ASSERT(iip->ili_aextents_buf == NULL); | 458 | ASSERT(iip->ili_aextents_buf == NULL); |
@@ -473,7 +462,7 @@ xfs_inode_item_format( | |||
473 | ext_buffer = kmem_alloc(ip->i_afp->if_bytes, | 462 | ext_buffer = kmem_alloc(ip->i_afp->if_bytes, |
474 | KM_SLEEP); | 463 | KM_SLEEP); |
475 | iip->ili_aextents_buf = ext_buffer; | 464 | iip->ili_aextents_buf = ext_buffer; |
476 | vecp->i_addr = (xfs_caddr_t)ext_buffer; | 465 | vecp->i_addr = ext_buffer; |
477 | vecp->i_len = xfs_iextents_copy(ip, ext_buffer, | 466 | vecp->i_len = xfs_iextents_copy(ip, ext_buffer, |
478 | XFS_ATTR_FORK); | 467 | XFS_ATTR_FORK); |
479 | #endif | 468 | #endif |
@@ -490,7 +479,7 @@ xfs_inode_item_format( | |||
490 | if (iip->ili_format.ilf_fields & XFS_ILOG_ABROOT) { | 479 | if (iip->ili_format.ilf_fields & XFS_ILOG_ABROOT) { |
491 | ASSERT(ip->i_afp->if_broot_bytes > 0); | 480 | ASSERT(ip->i_afp->if_broot_bytes > 0); |
492 | ASSERT(ip->i_afp->if_broot != NULL); | 481 | ASSERT(ip->i_afp->if_broot != NULL); |
493 | vecp->i_addr = (xfs_caddr_t)ip->i_afp->if_broot; | 482 | vecp->i_addr = ip->i_afp->if_broot; |
494 | vecp->i_len = ip->i_afp->if_broot_bytes; | 483 | vecp->i_len = ip->i_afp->if_broot_bytes; |
495 | vecp->i_type = XLOG_REG_TYPE_IATTR_BROOT; | 484 | vecp->i_type = XLOG_REG_TYPE_IATTR_BROOT; |
496 | vecp++; | 485 | vecp++; |
@@ -506,7 +495,7 @@ xfs_inode_item_format( | |||
506 | ASSERT(ip->i_afp->if_bytes > 0); | 495 | ASSERT(ip->i_afp->if_bytes > 0); |
507 | ASSERT(ip->i_afp->if_u1.if_data != NULL); | 496 | ASSERT(ip->i_afp->if_u1.if_data != NULL); |
508 | 497 | ||
509 | vecp->i_addr = (xfs_caddr_t)ip->i_afp->if_u1.if_data; | 498 | vecp->i_addr = ip->i_afp->if_u1.if_data; |
510 | /* | 499 | /* |
511 | * Round i_bytes up to a word boundary. | 500 | * Round i_bytes up to a word boundary. |
512 | * The underlying memory is guaranteed to | 501 | * The underlying memory is guaranteed to |
@@ -528,7 +517,7 @@ xfs_inode_item_format( | |||
528 | break; | 517 | break; |
529 | } | 518 | } |
530 | 519 | ||
531 | ASSERT(nvecs == iip->ili_item.li_desc->lid_size); | 520 | ASSERT(nvecs == lip->li_desc->lid_size); |
532 | iip->ili_format.ilf_size = nvecs; | 521 | iip->ili_format.ilf_size = nvecs; |
533 | } | 522 | } |
534 | 523 | ||
@@ -539,12 +528,14 @@ xfs_inode_item_format( | |||
539 | */ | 528 | */ |
540 | STATIC void | 529 | STATIC void |
541 | xfs_inode_item_pin( | 530 | xfs_inode_item_pin( |
542 | xfs_inode_log_item_t *iip) | 531 | struct xfs_log_item *lip) |
543 | { | 532 | { |
544 | ASSERT(xfs_isilocked(iip->ili_inode, XFS_ILOCK_EXCL)); | 533 | struct xfs_inode *ip = INODE_ITEM(lip)->ili_inode; |
534 | |||
535 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | ||
545 | 536 | ||
546 | trace_xfs_inode_pin(iip->ili_inode, _RET_IP_); | 537 | trace_xfs_inode_pin(ip, _RET_IP_); |
547 | atomic_inc(&iip->ili_inode->i_pincount); | 538 | atomic_inc(&ip->i_pincount); |
548 | } | 539 | } |
549 | 540 | ||
550 | 541 | ||
@@ -554,12 +545,12 @@ xfs_inode_item_pin( | |||
554 | * | 545 | * |
555 | * Also wake up anyone in xfs_iunpin_wait() if the count goes to 0. | 546 | * Also wake up anyone in xfs_iunpin_wait() if the count goes to 0. |
556 | */ | 547 | */ |
557 | /* ARGSUSED */ | ||
558 | STATIC void | 548 | STATIC void |
559 | xfs_inode_item_unpin( | 549 | xfs_inode_item_unpin( |
560 | xfs_inode_log_item_t *iip) | 550 | struct xfs_log_item *lip, |
551 | int remove) | ||
561 | { | 552 | { |
562 | struct xfs_inode *ip = iip->ili_inode; | 553 | struct xfs_inode *ip = INODE_ITEM(lip)->ili_inode; |
563 | 554 | ||
564 | trace_xfs_inode_unpin(ip, _RET_IP_); | 555 | trace_xfs_inode_unpin(ip, _RET_IP_); |
565 | ASSERT(atomic_read(&ip->i_pincount) > 0); | 556 | ASSERT(atomic_read(&ip->i_pincount) > 0); |
@@ -567,15 +558,6 @@ xfs_inode_item_unpin( | |||
567 | wake_up(&ip->i_ipin_wait); | 558 | wake_up(&ip->i_ipin_wait); |
568 | } | 559 | } |
569 | 560 | ||
570 | /* ARGSUSED */ | ||
571 | STATIC void | ||
572 | xfs_inode_item_unpin_remove( | ||
573 | xfs_inode_log_item_t *iip, | ||
574 | xfs_trans_t *tp) | ||
575 | { | ||
576 | xfs_inode_item_unpin(iip); | ||
577 | } | ||
578 | |||
579 | /* | 561 | /* |
580 | * This is called to attempt to lock the inode associated with this | 562 | * This is called to attempt to lock the inode associated with this |
581 | * inode log item, in preparation for the push routine which does the actual | 563 | * inode log item, in preparation for the push routine which does the actual |
@@ -591,19 +573,16 @@ xfs_inode_item_unpin_remove( | |||
591 | */ | 573 | */ |
592 | STATIC uint | 574 | STATIC uint |
593 | xfs_inode_item_trylock( | 575 | xfs_inode_item_trylock( |
594 | xfs_inode_log_item_t *iip) | 576 | struct xfs_log_item *lip) |
595 | { | 577 | { |
596 | register xfs_inode_t *ip; | 578 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); |
597 | 579 | struct xfs_inode *ip = iip->ili_inode; | |
598 | ip = iip->ili_inode; | ||
599 | 580 | ||
600 | if (xfs_ipincount(ip) > 0) { | 581 | if (xfs_ipincount(ip) > 0) |
601 | return XFS_ITEM_PINNED; | 582 | return XFS_ITEM_PINNED; |
602 | } | ||
603 | 583 | ||
604 | if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) { | 584 | if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) |
605 | return XFS_ITEM_LOCKED; | 585 | return XFS_ITEM_LOCKED; |
606 | } | ||
607 | 586 | ||
608 | if (!xfs_iflock_nowait(ip)) { | 587 | if (!xfs_iflock_nowait(ip)) { |
609 | /* | 588 | /* |
@@ -629,7 +608,7 @@ xfs_inode_item_trylock( | |||
629 | if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { | 608 | if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { |
630 | ASSERT(iip->ili_format.ilf_fields != 0); | 609 | ASSERT(iip->ili_format.ilf_fields != 0); |
631 | ASSERT(iip->ili_logged == 0); | 610 | ASSERT(iip->ili_logged == 0); |
632 | ASSERT(iip->ili_item.li_flags & XFS_LI_IN_AIL); | 611 | ASSERT(lip->li_flags & XFS_LI_IN_AIL); |
633 | } | 612 | } |
634 | #endif | 613 | #endif |
635 | return XFS_ITEM_SUCCESS; | 614 | return XFS_ITEM_SUCCESS; |
@@ -643,26 +622,18 @@ xfs_inode_item_trylock( | |||
643 | */ | 622 | */ |
644 | STATIC void | 623 | STATIC void |
645 | xfs_inode_item_unlock( | 624 | xfs_inode_item_unlock( |
646 | xfs_inode_log_item_t *iip) | 625 | struct xfs_log_item *lip) |
647 | { | 626 | { |
648 | uint hold; | 627 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); |
649 | uint iolocked; | 628 | struct xfs_inode *ip = iip->ili_inode; |
650 | uint lock_flags; | 629 | unsigned short lock_flags; |
651 | xfs_inode_t *ip; | ||
652 | 630 | ||
653 | ASSERT(iip != NULL); | ||
654 | ASSERT(iip->ili_inode->i_itemp != NULL); | 631 | ASSERT(iip->ili_inode->i_itemp != NULL); |
655 | ASSERT(xfs_isilocked(iip->ili_inode, XFS_ILOCK_EXCL)); | 632 | ASSERT(xfs_isilocked(iip->ili_inode, XFS_ILOCK_EXCL)); |
656 | ASSERT((!(iip->ili_inode->i_itemp->ili_flags & | 633 | |
657 | XFS_ILI_IOLOCKED_EXCL)) || | ||
658 | xfs_isilocked(iip->ili_inode, XFS_IOLOCK_EXCL)); | ||
659 | ASSERT((!(iip->ili_inode->i_itemp->ili_flags & | ||
660 | XFS_ILI_IOLOCKED_SHARED)) || | ||
661 | xfs_isilocked(iip->ili_inode, XFS_IOLOCK_SHARED)); | ||
662 | /* | 634 | /* |
663 | * Clear the transaction pointer in the inode. | 635 | * Clear the transaction pointer in the inode. |
664 | */ | 636 | */ |
665 | ip = iip->ili_inode; | ||
666 | ip->i_transp = NULL; | 637 | ip->i_transp = NULL; |
667 | 638 | ||
668 | /* | 639 | /* |
@@ -686,34 +657,11 @@ xfs_inode_item_unlock( | |||
686 | iip->ili_aextents_buf = NULL; | 657 | iip->ili_aextents_buf = NULL; |
687 | } | 658 | } |
688 | 659 | ||
689 | /* | 660 | lock_flags = iip->ili_lock_flags; |
690 | * Figure out if we should unlock the inode or not. | 661 | iip->ili_lock_flags = 0; |
691 | */ | 662 | if (lock_flags) { |
692 | hold = iip->ili_flags & XFS_ILI_HOLD; | 663 | xfs_iunlock(iip->ili_inode, lock_flags); |
693 | 664 | IRELE(iip->ili_inode); | |
694 | /* | ||
695 | * Before clearing out the flags, remember whether we | ||
696 | * are holding the inode's IO lock. | ||
697 | */ | ||
698 | iolocked = iip->ili_flags & XFS_ILI_IOLOCKED_ANY; | ||
699 | |||
700 | /* | ||
701 | * Clear out the fields of the inode log item particular | ||
702 | * to the current transaction. | ||
703 | */ | ||
704 | iip->ili_flags = 0; | ||
705 | |||
706 | /* | ||
707 | * Unlock the inode if XFS_ILI_HOLD was not set. | ||
708 | */ | ||
709 | if (!hold) { | ||
710 | lock_flags = XFS_ILOCK_EXCL; | ||
711 | if (iolocked & XFS_ILI_IOLOCKED_EXCL) { | ||
712 | lock_flags |= XFS_IOLOCK_EXCL; | ||
713 | } else if (iolocked & XFS_ILI_IOLOCKED_SHARED) { | ||
714 | lock_flags |= XFS_IOLOCK_SHARED; | ||
715 | } | ||
716 | xfs_iput(iip->ili_inode, lock_flags); | ||
717 | } | 665 | } |
718 | } | 666 | } |
719 | 667 | ||
@@ -725,13 +673,12 @@ xfs_inode_item_unlock( | |||
725 | * is the only one that matters. Therefore, simply return the | 673 | * is the only one that matters. Therefore, simply return the |
726 | * given lsn. | 674 | * given lsn. |
727 | */ | 675 | */ |
728 | /*ARGSUSED*/ | ||
729 | STATIC xfs_lsn_t | 676 | STATIC xfs_lsn_t |
730 | xfs_inode_item_committed( | 677 | xfs_inode_item_committed( |
731 | xfs_inode_log_item_t *iip, | 678 | struct xfs_log_item *lip, |
732 | xfs_lsn_t lsn) | 679 | xfs_lsn_t lsn) |
733 | { | 680 | { |
734 | return (lsn); | 681 | return lsn; |
735 | } | 682 | } |
736 | 683 | ||
737 | /* | 684 | /* |
@@ -743,13 +690,12 @@ xfs_inode_item_committed( | |||
743 | */ | 690 | */ |
744 | STATIC void | 691 | STATIC void |
745 | xfs_inode_item_pushbuf( | 692 | xfs_inode_item_pushbuf( |
746 | xfs_inode_log_item_t *iip) | 693 | struct xfs_log_item *lip) |
747 | { | 694 | { |
748 | xfs_inode_t *ip; | 695 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); |
749 | xfs_mount_t *mp; | 696 | struct xfs_inode *ip = iip->ili_inode; |
750 | xfs_buf_t *bp; | 697 | struct xfs_buf *bp; |
751 | 698 | ||
752 | ip = iip->ili_inode; | ||
753 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED)); | 699 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED)); |
754 | 700 | ||
755 | /* | 701 | /* |
@@ -757,14 +703,13 @@ xfs_inode_item_pushbuf( | |||
757 | * inode was taken off the AIL. So, just get out. | 703 | * inode was taken off the AIL. So, just get out. |
758 | */ | 704 | */ |
759 | if (completion_done(&ip->i_flush) || | 705 | if (completion_done(&ip->i_flush) || |
760 | ((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0)) { | 706 | !(lip->li_flags & XFS_LI_IN_AIL)) { |
761 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | 707 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
762 | return; | 708 | return; |
763 | } | 709 | } |
764 | 710 | ||
765 | mp = ip->i_mount; | 711 | bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno, |
766 | bp = xfs_incore(mp->m_ddev_targp, iip->ili_format.ilf_blkno, | 712 | iip->ili_format.ilf_len, XBF_TRYLOCK); |
767 | iip->ili_format.ilf_len, XBF_TRYLOCK); | ||
768 | 713 | ||
769 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | 714 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
770 | if (!bp) | 715 | if (!bp) |
@@ -772,10 +717,8 @@ xfs_inode_item_pushbuf( | |||
772 | if (XFS_BUF_ISDELAYWRITE(bp)) | 717 | if (XFS_BUF_ISDELAYWRITE(bp)) |
773 | xfs_buf_delwri_promote(bp); | 718 | xfs_buf_delwri_promote(bp); |
774 | xfs_buf_relse(bp); | 719 | xfs_buf_relse(bp); |
775 | return; | ||
776 | } | 720 | } |
777 | 721 | ||
778 | |||
779 | /* | 722 | /* |
780 | * This is called to asynchronously write the inode associated with this | 723 | * This is called to asynchronously write the inode associated with this |
781 | * inode log item out to disk. The inode will already have been locked by | 724 | * inode log item out to disk. The inode will already have been locked by |
@@ -783,14 +726,14 @@ xfs_inode_item_pushbuf( | |||
783 | */ | 726 | */ |
784 | STATIC void | 727 | STATIC void |
785 | xfs_inode_item_push( | 728 | xfs_inode_item_push( |
786 | xfs_inode_log_item_t *iip) | 729 | struct xfs_log_item *lip) |
787 | { | 730 | { |
788 | xfs_inode_t *ip; | 731 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); |
789 | 732 | struct xfs_inode *ip = iip->ili_inode; | |
790 | ip = iip->ili_inode; | ||
791 | 733 | ||
792 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED)); | 734 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED)); |
793 | ASSERT(!completion_done(&ip->i_flush)); | 735 | ASSERT(!completion_done(&ip->i_flush)); |
736 | |||
794 | /* | 737 | /* |
795 | * Since we were able to lock the inode's flush lock and | 738 | * Since we were able to lock the inode's flush lock and |
796 | * we found it on the AIL, the inode must be dirty. This | 739 | * we found it on the AIL, the inode must be dirty. This |
@@ -813,43 +756,34 @@ xfs_inode_item_push( | |||
813 | */ | 756 | */ |
814 | (void) xfs_iflush(ip, 0); | 757 | (void) xfs_iflush(ip, 0); |
815 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | 758 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
816 | |||
817 | return; | ||
818 | } | 759 | } |
819 | 760 | ||
820 | /* | 761 | /* |
821 | * XXX rcc - this one really has to do something. Probably needs | 762 | * XXX rcc - this one really has to do something. Probably needs |
822 | * to stamp in a new field in the incore inode. | 763 | * to stamp in a new field in the incore inode. |
823 | */ | 764 | */ |
824 | /* ARGSUSED */ | ||
825 | STATIC void | 765 | STATIC void |
826 | xfs_inode_item_committing( | 766 | xfs_inode_item_committing( |
827 | xfs_inode_log_item_t *iip, | 767 | struct xfs_log_item *lip, |
828 | xfs_lsn_t lsn) | 768 | xfs_lsn_t lsn) |
829 | { | 769 | { |
830 | iip->ili_last_lsn = lsn; | 770 | INODE_ITEM(lip)->ili_last_lsn = lsn; |
831 | return; | ||
832 | } | 771 | } |
833 | 772 | ||
834 | /* | 773 | /* |
835 | * This is the ops vector shared by all buf log items. | 774 | * This is the ops vector shared by all buf log items. |
836 | */ | 775 | */ |
837 | static struct xfs_item_ops xfs_inode_item_ops = { | 776 | static struct xfs_item_ops xfs_inode_item_ops = { |
838 | .iop_size = (uint(*)(xfs_log_item_t*))xfs_inode_item_size, | 777 | .iop_size = xfs_inode_item_size, |
839 | .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*)) | 778 | .iop_format = xfs_inode_item_format, |
840 | xfs_inode_item_format, | 779 | .iop_pin = xfs_inode_item_pin, |
841 | .iop_pin = (void(*)(xfs_log_item_t*))xfs_inode_item_pin, | 780 | .iop_unpin = xfs_inode_item_unpin, |
842 | .iop_unpin = (void(*)(xfs_log_item_t*))xfs_inode_item_unpin, | 781 | .iop_trylock = xfs_inode_item_trylock, |
843 | .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*)) | 782 | .iop_unlock = xfs_inode_item_unlock, |
844 | xfs_inode_item_unpin_remove, | 783 | .iop_committed = xfs_inode_item_committed, |
845 | .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_inode_item_trylock, | 784 | .iop_push = xfs_inode_item_push, |
846 | .iop_unlock = (void(*)(xfs_log_item_t*))xfs_inode_item_unlock, | 785 | .iop_pushbuf = xfs_inode_item_pushbuf, |
847 | .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t)) | 786 | .iop_committing = xfs_inode_item_committing |
848 | xfs_inode_item_committed, | ||
849 | .iop_push = (void(*)(xfs_log_item_t*))xfs_inode_item_push, | ||
850 | .iop_pushbuf = (void(*)(xfs_log_item_t*))xfs_inode_item_pushbuf, | ||
851 | .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t)) | ||
852 | xfs_inode_item_committing | ||
853 | }; | 787 | }; |
854 | 788 | ||
855 | 789 | ||
@@ -858,10 +792,10 @@ static struct xfs_item_ops xfs_inode_item_ops = { | |||
858 | */ | 792 | */ |
859 | void | 793 | void |
860 | xfs_inode_item_init( | 794 | xfs_inode_item_init( |
861 | xfs_inode_t *ip, | 795 | struct xfs_inode *ip, |
862 | xfs_mount_t *mp) | 796 | struct xfs_mount *mp) |
863 | { | 797 | { |
864 | xfs_inode_log_item_t *iip; | 798 | struct xfs_inode_log_item *iip; |
865 | 799 | ||
866 | ASSERT(ip->i_itemp == NULL); | 800 | ASSERT(ip->i_itemp == NULL); |
867 | iip = ip->i_itemp = kmem_zone_zalloc(xfs_ili_zone, KM_SLEEP); | 801 | iip = ip->i_itemp = kmem_zone_zalloc(xfs_ili_zone, KM_SLEEP); |
@@ -899,14 +833,14 @@ xfs_inode_item_destroy( | |||
899 | * from the AIL if it has not been re-logged, and unlocking the inode's | 833 | * from the AIL if it has not been re-logged, and unlocking the inode's |
900 | * flush lock. | 834 | * flush lock. |
901 | */ | 835 | */ |
902 | /*ARGSUSED*/ | ||
903 | void | 836 | void |
904 | xfs_iflush_done( | 837 | xfs_iflush_done( |
905 | xfs_buf_t *bp, | 838 | struct xfs_buf *bp, |
906 | xfs_inode_log_item_t *iip) | 839 | struct xfs_log_item *lip) |
907 | { | 840 | { |
841 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); | ||
908 | xfs_inode_t *ip = iip->ili_inode; | 842 | xfs_inode_t *ip = iip->ili_inode; |
909 | struct xfs_ail *ailp = iip->ili_item.li_ailp; | 843 | struct xfs_ail *ailp = lip->li_ailp; |
910 | 844 | ||
911 | /* | 845 | /* |
912 | * We only want to pull the item from the AIL if it is | 846 | * We only want to pull the item from the AIL if it is |
@@ -917,12 +851,11 @@ xfs_iflush_done( | |||
917 | * the lock since it's cheaper, and then we recheck while | 851 | * the lock since it's cheaper, and then we recheck while |
918 | * holding the lock before removing the inode from the AIL. | 852 | * holding the lock before removing the inode from the AIL. |
919 | */ | 853 | */ |
920 | if (iip->ili_logged && | 854 | if (iip->ili_logged && lip->li_lsn == iip->ili_flush_lsn) { |
921 | (iip->ili_item.li_lsn == iip->ili_flush_lsn)) { | ||
922 | spin_lock(&ailp->xa_lock); | 855 | spin_lock(&ailp->xa_lock); |
923 | if (iip->ili_item.li_lsn == iip->ili_flush_lsn) { | 856 | if (lip->li_lsn == iip->ili_flush_lsn) { |
924 | /* xfs_trans_ail_delete() drops the AIL lock. */ | 857 | /* xfs_trans_ail_delete() drops the AIL lock. */ |
925 | xfs_trans_ail_delete(ailp, (xfs_log_item_t*)iip); | 858 | xfs_trans_ail_delete(ailp, lip); |
926 | } else { | 859 | } else { |
927 | spin_unlock(&ailp->xa_lock); | 860 | spin_unlock(&ailp->xa_lock); |
928 | } | 861 | } |
@@ -940,8 +873,6 @@ xfs_iflush_done( | |||
940 | * Release the inode's flush lock since we're done with it. | 873 | * Release the inode's flush lock since we're done with it. |
941 | */ | 874 | */ |
942 | xfs_ifunlock(ip); | 875 | xfs_ifunlock(ip); |
943 | |||
944 | return; | ||
945 | } | 876 | } |
946 | 877 | ||
947 | /* | 878 | /* |
@@ -957,10 +888,8 @@ xfs_iflush_abort( | |||
957 | xfs_inode_t *ip) | 888 | xfs_inode_t *ip) |
958 | { | 889 | { |
959 | xfs_inode_log_item_t *iip = ip->i_itemp; | 890 | xfs_inode_log_item_t *iip = ip->i_itemp; |
960 | xfs_mount_t *mp; | ||
961 | 891 | ||
962 | iip = ip->i_itemp; | 892 | iip = ip->i_itemp; |
963 | mp = ip->i_mount; | ||
964 | if (iip) { | 893 | if (iip) { |
965 | struct xfs_ail *ailp = iip->ili_item.li_ailp; | 894 | struct xfs_ail *ailp = iip->ili_item.li_ailp; |
966 | if (iip->ili_item.li_flags & XFS_LI_IN_AIL) { | 895 | if (iip->ili_item.li_flags & XFS_LI_IN_AIL) { |
@@ -991,10 +920,10 @@ xfs_iflush_abort( | |||
991 | 920 | ||
992 | void | 921 | void |
993 | xfs_istale_done( | 922 | xfs_istale_done( |
994 | xfs_buf_t *bp, | 923 | struct xfs_buf *bp, |
995 | xfs_inode_log_item_t *iip) | 924 | struct xfs_log_item *lip) |
996 | { | 925 | { |
997 | xfs_iflush_abort(iip->ili_inode); | 926 | xfs_iflush_abort(INODE_ITEM(lip)->ili_inode); |
998 | } | 927 | } |
999 | 928 | ||
1000 | /* | 929 | /* |
@@ -1007,9 +936,8 @@ xfs_inode_item_format_convert( | |||
1007 | xfs_inode_log_format_t *in_f) | 936 | xfs_inode_log_format_t *in_f) |
1008 | { | 937 | { |
1009 | if (buf->i_len == sizeof(xfs_inode_log_format_32_t)) { | 938 | if (buf->i_len == sizeof(xfs_inode_log_format_32_t)) { |
1010 | xfs_inode_log_format_32_t *in_f32; | 939 | xfs_inode_log_format_32_t *in_f32 = buf->i_addr; |
1011 | 940 | ||
1012 | in_f32 = (xfs_inode_log_format_32_t *)buf->i_addr; | ||
1013 | in_f->ilf_type = in_f32->ilf_type; | 941 | in_f->ilf_type = in_f32->ilf_type; |
1014 | in_f->ilf_size = in_f32->ilf_size; | 942 | in_f->ilf_size = in_f32->ilf_size; |
1015 | in_f->ilf_fields = in_f32->ilf_fields; | 943 | in_f->ilf_fields = in_f32->ilf_fields; |
@@ -1025,9 +953,8 @@ xfs_inode_item_format_convert( | |||
1025 | in_f->ilf_boffset = in_f32->ilf_boffset; | 953 | in_f->ilf_boffset = in_f32->ilf_boffset; |
1026 | return 0; | 954 | return 0; |
1027 | } else if (buf->i_len == sizeof(xfs_inode_log_format_64_t)){ | 955 | } else if (buf->i_len == sizeof(xfs_inode_log_format_64_t)){ |
1028 | xfs_inode_log_format_64_t *in_f64; | 956 | xfs_inode_log_format_64_t *in_f64 = buf->i_addr; |
1029 | 957 | ||
1030 | in_f64 = (xfs_inode_log_format_64_t *)buf->i_addr; | ||
1031 | in_f->ilf_type = in_f64->ilf_type; | 958 | in_f->ilf_type = in_f64->ilf_type; |
1032 | in_f->ilf_size = in_f64->ilf_size; | 959 | in_f->ilf_size = in_f64->ilf_size; |
1033 | in_f->ilf_fields = in_f64->ilf_fields; | 960 | in_f->ilf_fields = in_f64->ilf_fields; |
diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h index 9a467958ecdd..d3dee61e6d91 100644 --- a/fs/xfs/xfs_inode_item.h +++ b/fs/xfs/xfs_inode_item.h | |||
@@ -103,12 +103,6 @@ typedef struct xfs_inode_log_format_64 { | |||
103 | XFS_ILOG_ADATA | XFS_ILOG_AEXT | \ | 103 | XFS_ILOG_ADATA | XFS_ILOG_AEXT | \ |
104 | XFS_ILOG_ABROOT) | 104 | XFS_ILOG_ABROOT) |
105 | 105 | ||
106 | #define XFS_ILI_HOLD 0x1 | ||
107 | #define XFS_ILI_IOLOCKED_EXCL 0x2 | ||
108 | #define XFS_ILI_IOLOCKED_SHARED 0x4 | ||
109 | |||
110 | #define XFS_ILI_IOLOCKED_ANY (XFS_ILI_IOLOCKED_EXCL | XFS_ILI_IOLOCKED_SHARED) | ||
111 | |||
112 | static inline int xfs_ilog_fbroot(int w) | 106 | static inline int xfs_ilog_fbroot(int w) |
113 | { | 107 | { |
114 | return (w == XFS_DATA_FORK ? XFS_ILOG_DBROOT : XFS_ILOG_ABROOT); | 108 | return (w == XFS_DATA_FORK ? XFS_ILOG_DBROOT : XFS_ILOG_ABROOT); |
@@ -137,7 +131,7 @@ typedef struct xfs_inode_log_item { | |||
137 | struct xfs_inode *ili_inode; /* inode ptr */ | 131 | struct xfs_inode *ili_inode; /* inode ptr */ |
138 | xfs_lsn_t ili_flush_lsn; /* lsn at last flush */ | 132 | xfs_lsn_t ili_flush_lsn; /* lsn at last flush */ |
139 | xfs_lsn_t ili_last_lsn; /* lsn at last transaction */ | 133 | xfs_lsn_t ili_last_lsn; /* lsn at last transaction */ |
140 | unsigned short ili_flags; /* misc flags */ | 134 | unsigned short ili_lock_flags; /* lock flags */ |
141 | unsigned short ili_logged; /* flushed logged data */ | 135 | unsigned short ili_logged; /* flushed logged data */ |
142 | unsigned int ili_last_fields; /* fields when flushed */ | 136 | unsigned int ili_last_fields; /* fields when flushed */ |
143 | struct xfs_bmbt_rec *ili_extents_buf; /* array of logged | 137 | struct xfs_bmbt_rec *ili_extents_buf; /* array of logged |
@@ -161,8 +155,8 @@ static inline int xfs_inode_clean(xfs_inode_t *ip) | |||
161 | 155 | ||
162 | extern void xfs_inode_item_init(struct xfs_inode *, struct xfs_mount *); | 156 | extern void xfs_inode_item_init(struct xfs_inode *, struct xfs_mount *); |
163 | extern void xfs_inode_item_destroy(struct xfs_inode *); | 157 | extern void xfs_inode_item_destroy(struct xfs_inode *); |
164 | extern void xfs_iflush_done(struct xfs_buf *, xfs_inode_log_item_t *); | 158 | extern void xfs_iflush_done(struct xfs_buf *, struct xfs_log_item *); |
165 | extern void xfs_istale_done(struct xfs_buf *, xfs_inode_log_item_t *); | 159 | extern void xfs_istale_done(struct xfs_buf *, struct xfs_log_item *); |
166 | extern void xfs_iflush_abort(struct xfs_inode *); | 160 | extern void xfs_iflush_abort(struct xfs_inode *); |
167 | extern int xfs_inode_item_format_convert(xfs_log_iovec_t *, | 161 | extern int xfs_inode_item_format_convert(xfs_log_iovec_t *, |
168 | xfs_inode_log_format_t *); | 162 | xfs_inode_log_format_t *); |
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index ef14943829da..20576146369f 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c | |||
@@ -23,19 +23,14 @@ | |||
23 | #include "xfs_trans.h" | 23 | #include "xfs_trans.h" |
24 | #include "xfs_sb.h" | 24 | #include "xfs_sb.h" |
25 | #include "xfs_ag.h" | 25 | #include "xfs_ag.h" |
26 | #include "xfs_dir2.h" | ||
27 | #include "xfs_alloc.h" | 26 | #include "xfs_alloc.h" |
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_quota.h" | 27 | #include "xfs_quota.h" |
30 | #include "xfs_mount.h" | 28 | #include "xfs_mount.h" |
31 | #include "xfs_bmap_btree.h" | 29 | #include "xfs_bmap_btree.h" |
32 | #include "xfs_alloc_btree.h" | 30 | #include "xfs_alloc_btree.h" |
33 | #include "xfs_ialloc_btree.h" | 31 | #include "xfs_ialloc_btree.h" |
34 | #include "xfs_dir2_sf.h" | ||
35 | #include "xfs_attr_sf.h" | ||
36 | #include "xfs_dinode.h" | 32 | #include "xfs_dinode.h" |
37 | #include "xfs_inode.h" | 33 | #include "xfs_inode.h" |
38 | #include "xfs_ialloc.h" | ||
39 | #include "xfs_btree.h" | 34 | #include "xfs_btree.h" |
40 | #include "xfs_bmap.h" | 35 | #include "xfs_bmap.h" |
41 | #include "xfs_rtalloc.h" | 36 | #include "xfs_rtalloc.h" |
@@ -123,7 +118,7 @@ xfs_iomap( | |||
123 | error = xfs_bmapi(NULL, ip, offset_fsb, | 118 | error = xfs_bmapi(NULL, ip, offset_fsb, |
124 | (xfs_filblks_t)(end_fsb - offset_fsb), | 119 | (xfs_filblks_t)(end_fsb - offset_fsb), |
125 | bmapi_flags, NULL, 0, imap, | 120 | bmapi_flags, NULL, 0, imap, |
126 | nimaps, NULL, NULL); | 121 | nimaps, NULL); |
127 | 122 | ||
128 | if (error) | 123 | if (error) |
129 | goto out; | 124 | goto out; |
@@ -138,7 +133,7 @@ xfs_iomap( | |||
138 | break; | 133 | break; |
139 | } | 134 | } |
140 | 135 | ||
141 | if (flags & (BMAPI_DIRECT|BMAPI_MMAP)) { | 136 | if (flags & BMAPI_DIRECT) { |
142 | error = xfs_iomap_write_direct(ip, offset, count, flags, | 137 | error = xfs_iomap_write_direct(ip, offset, count, flags, |
143 | imap, nimaps); | 138 | imap, nimaps); |
144 | } else { | 139 | } else { |
@@ -247,7 +242,7 @@ xfs_iomap_write_direct( | |||
247 | xfs_off_t offset, | 242 | xfs_off_t offset, |
248 | size_t count, | 243 | size_t count, |
249 | int flags, | 244 | int flags, |
250 | xfs_bmbt_irec_t *ret_imap, | 245 | xfs_bmbt_irec_t *imap, |
251 | int *nmaps) | 246 | int *nmaps) |
252 | { | 247 | { |
253 | xfs_mount_t *mp = ip->i_mount; | 248 | xfs_mount_t *mp = ip->i_mount; |
@@ -261,7 +256,6 @@ xfs_iomap_write_direct( | |||
261 | int quota_flag; | 256 | int quota_flag; |
262 | int rt; | 257 | int rt; |
263 | xfs_trans_t *tp; | 258 | xfs_trans_t *tp; |
264 | xfs_bmbt_irec_t imap; | ||
265 | xfs_bmap_free_t free_list; | 259 | xfs_bmap_free_t free_list; |
266 | uint qblocks, resblks, resrtextents; | 260 | uint qblocks, resblks, resrtextents; |
267 | int committed; | 261 | int committed; |
@@ -285,10 +279,10 @@ xfs_iomap_write_direct( | |||
285 | if (error) | 279 | if (error) |
286 | goto error_out; | 280 | goto error_out; |
287 | } else { | 281 | } else { |
288 | if (*nmaps && (ret_imap->br_startblock == HOLESTARTBLOCK)) | 282 | if (*nmaps && (imap->br_startblock == HOLESTARTBLOCK)) |
289 | last_fsb = MIN(last_fsb, (xfs_fileoff_t) | 283 | last_fsb = MIN(last_fsb, (xfs_fileoff_t) |
290 | ret_imap->br_blockcount + | 284 | imap->br_blockcount + |
291 | ret_imap->br_startoff); | 285 | imap->br_startoff); |
292 | } | 286 | } |
293 | count_fsb = last_fsb - offset_fsb; | 287 | count_fsb = last_fsb - offset_fsb; |
294 | ASSERT(count_fsb > 0); | 288 | ASSERT(count_fsb > 0); |
@@ -334,20 +328,22 @@ xfs_iomap_write_direct( | |||
334 | if (error) | 328 | if (error) |
335 | goto error1; | 329 | goto error1; |
336 | 330 | ||
337 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | 331 | xfs_trans_ijoin(tp, ip); |
338 | xfs_trans_ihold(tp, ip); | ||
339 | 332 | ||
340 | bmapi_flag = XFS_BMAPI_WRITE; | 333 | bmapi_flag = XFS_BMAPI_WRITE; |
341 | if ((flags & BMAPI_DIRECT) && (offset < ip->i_size || extsz)) | 334 | if ((flags & BMAPI_DIRECT) && (offset < ip->i_size || extsz)) |
342 | bmapi_flag |= XFS_BMAPI_PREALLOC; | 335 | bmapi_flag |= XFS_BMAPI_PREALLOC; |
343 | 336 | ||
344 | /* | 337 | /* |
345 | * Issue the xfs_bmapi() call to allocate the blocks | 338 | * Issue the xfs_bmapi() call to allocate the blocks. |
339 | * | ||
340 | * From this point onwards we overwrite the imap pointer that the | ||
341 | * caller gave to us. | ||
346 | */ | 342 | */ |
347 | xfs_bmap_init(&free_list, &firstfsb); | 343 | xfs_bmap_init(&free_list, &firstfsb); |
348 | nimaps = 1; | 344 | nimaps = 1; |
349 | error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, bmapi_flag, | 345 | error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, bmapi_flag, |
350 | &firstfsb, 0, &imap, &nimaps, &free_list, NULL); | 346 | &firstfsb, 0, imap, &nimaps, &free_list); |
351 | if (error) | 347 | if (error) |
352 | goto error0; | 348 | goto error0; |
353 | 349 | ||
@@ -369,12 +365,11 @@ xfs_iomap_write_direct( | |||
369 | goto error_out; | 365 | goto error_out; |
370 | } | 366 | } |
371 | 367 | ||
372 | if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) { | 368 | if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) { |
373 | error = xfs_cmn_err_fsblock_zero(ip, &imap); | 369 | error = xfs_cmn_err_fsblock_zero(ip, imap); |
374 | goto error_out; | 370 | goto error_out; |
375 | } | 371 | } |
376 | 372 | ||
377 | *ret_imap = imap; | ||
378 | *nmaps = 1; | 373 | *nmaps = 1; |
379 | return 0; | 374 | return 0; |
380 | 375 | ||
@@ -425,7 +420,7 @@ xfs_iomap_eof_want_preallocate( | |||
425 | imaps = nimaps; | 420 | imaps = nimaps; |
426 | firstblock = NULLFSBLOCK; | 421 | firstblock = NULLFSBLOCK; |
427 | error = xfs_bmapi(NULL, ip, start_fsb, count_fsb, 0, | 422 | error = xfs_bmapi(NULL, ip, start_fsb, count_fsb, 0, |
428 | &firstblock, 0, imap, &imaps, NULL, NULL); | 423 | &firstblock, 0, imap, &imaps, NULL); |
429 | if (error) | 424 | if (error) |
430 | return error; | 425 | return error; |
431 | for (n = 0; n < imaps; n++) { | 426 | for (n = 0; n < imaps; n++) { |
@@ -500,7 +495,7 @@ retry: | |||
500 | (xfs_filblks_t)(last_fsb - offset_fsb), | 495 | (xfs_filblks_t)(last_fsb - offset_fsb), |
501 | XFS_BMAPI_DELAY | XFS_BMAPI_WRITE | | 496 | XFS_BMAPI_DELAY | XFS_BMAPI_WRITE | |
502 | XFS_BMAPI_ENTIRE, &firstblock, 1, imap, | 497 | XFS_BMAPI_ENTIRE, &firstblock, 1, imap, |
503 | &nimaps, NULL, NULL); | 498 | &nimaps, NULL); |
504 | if (error && (error != ENOSPC)) | 499 | if (error && (error != ENOSPC)) |
505 | return XFS_ERROR(error); | 500 | return XFS_ERROR(error); |
506 | 501 | ||
@@ -548,7 +543,7 @@ xfs_iomap_write_allocate( | |||
548 | xfs_inode_t *ip, | 543 | xfs_inode_t *ip, |
549 | xfs_off_t offset, | 544 | xfs_off_t offset, |
550 | size_t count, | 545 | size_t count, |
551 | xfs_bmbt_irec_t *map, | 546 | xfs_bmbt_irec_t *imap, |
552 | int *retmap) | 547 | int *retmap) |
553 | { | 548 | { |
554 | xfs_mount_t *mp = ip->i_mount; | 549 | xfs_mount_t *mp = ip->i_mount; |
@@ -557,7 +552,6 @@ xfs_iomap_write_allocate( | |||
557 | xfs_fsblock_t first_block; | 552 | xfs_fsblock_t first_block; |
558 | xfs_bmap_free_t free_list; | 553 | xfs_bmap_free_t free_list; |
559 | xfs_filblks_t count_fsb; | 554 | xfs_filblks_t count_fsb; |
560 | xfs_bmbt_irec_t imap; | ||
561 | xfs_trans_t *tp; | 555 | xfs_trans_t *tp; |
562 | int nimaps, committed; | 556 | int nimaps, committed; |
563 | int error = 0; | 557 | int error = 0; |
@@ -573,8 +567,8 @@ xfs_iomap_write_allocate( | |||
573 | return XFS_ERROR(error); | 567 | return XFS_ERROR(error); |
574 | 568 | ||
575 | offset_fsb = XFS_B_TO_FSBT(mp, offset); | 569 | offset_fsb = XFS_B_TO_FSBT(mp, offset); |
576 | count_fsb = map->br_blockcount; | 570 | count_fsb = imap->br_blockcount; |
577 | map_start_fsb = map->br_startoff; | 571 | map_start_fsb = imap->br_startoff; |
578 | 572 | ||
579 | XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb)); | 573 | XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb)); |
580 | 574 | ||
@@ -602,8 +596,7 @@ xfs_iomap_write_allocate( | |||
602 | return XFS_ERROR(error); | 596 | return XFS_ERROR(error); |
603 | } | 597 | } |
604 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 598 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
605 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | 599 | xfs_trans_ijoin(tp, ip); |
606 | xfs_trans_ihold(tp, ip); | ||
607 | 600 | ||
608 | xfs_bmap_init(&free_list, &first_block); | 601 | xfs_bmap_init(&free_list, &first_block); |
609 | 602 | ||
@@ -654,10 +647,15 @@ xfs_iomap_write_allocate( | |||
654 | } | 647 | } |
655 | } | 648 | } |
656 | 649 | ||
657 | /* Go get the actual blocks */ | 650 | /* |
651 | * Go get the actual blocks. | ||
652 | * | ||
653 | * From this point onwards we overwrite the imap | ||
654 | * pointer that the caller gave to us. | ||
655 | */ | ||
658 | error = xfs_bmapi(tp, ip, map_start_fsb, count_fsb, | 656 | error = xfs_bmapi(tp, ip, map_start_fsb, count_fsb, |
659 | XFS_BMAPI_WRITE, &first_block, 1, | 657 | XFS_BMAPI_WRITE, &first_block, 1, |
660 | &imap, &nimaps, &free_list, NULL); | 658 | imap, &nimaps, &free_list); |
661 | if (error) | 659 | if (error) |
662 | goto trans_cancel; | 660 | goto trans_cancel; |
663 | 661 | ||
@@ -676,13 +674,12 @@ xfs_iomap_write_allocate( | |||
676 | * See if we were able to allocate an extent that | 674 | * See if we were able to allocate an extent that |
677 | * covers at least part of the callers request | 675 | * covers at least part of the callers request |
678 | */ | 676 | */ |
679 | if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) | 677 | if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) |
680 | return xfs_cmn_err_fsblock_zero(ip, &imap); | 678 | return xfs_cmn_err_fsblock_zero(ip, imap); |
681 | 679 | ||
682 | if ((offset_fsb >= imap.br_startoff) && | 680 | if ((offset_fsb >= imap->br_startoff) && |
683 | (offset_fsb < (imap.br_startoff + | 681 | (offset_fsb < (imap->br_startoff + |
684 | imap.br_blockcount))) { | 682 | imap->br_blockcount))) { |
685 | *map = imap; | ||
686 | *retmap = 1; | 683 | *retmap = 1; |
687 | XFS_STATS_INC(xs_xstrat_quick); | 684 | XFS_STATS_INC(xs_xstrat_quick); |
688 | return 0; | 685 | return 0; |
@@ -692,8 +689,8 @@ xfs_iomap_write_allocate( | |||
692 | * So far we have not mapped the requested part of the | 689 | * So far we have not mapped the requested part of the |
693 | * file, just surrounding data, try again. | 690 | * file, just surrounding data, try again. |
694 | */ | 691 | */ |
695 | count_fsb -= imap.br_blockcount; | 692 | count_fsb -= imap->br_blockcount; |
696 | map_start_fsb = imap.br_startoff + imap.br_blockcount; | 693 | map_start_fsb = imap->br_startoff + imap->br_blockcount; |
697 | } | 694 | } |
698 | 695 | ||
699 | trans_cancel: | 696 | trans_cancel: |
@@ -766,8 +763,7 @@ xfs_iomap_write_unwritten( | |||
766 | } | 763 | } |
767 | 764 | ||
768 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 765 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
769 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | 766 | xfs_trans_ijoin(tp, ip); |
770 | xfs_trans_ihold(tp, ip); | ||
771 | 767 | ||
772 | /* | 768 | /* |
773 | * Modify the unwritten extent state of the buffer. | 769 | * Modify the unwritten extent state of the buffer. |
@@ -776,7 +772,7 @@ xfs_iomap_write_unwritten( | |||
776 | nimaps = 1; | 772 | nimaps = 1; |
777 | error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, | 773 | error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, |
778 | XFS_BMAPI_WRITE|XFS_BMAPI_CONVERT, &firstfsb, | 774 | XFS_BMAPI_WRITE|XFS_BMAPI_CONVERT, &firstfsb, |
779 | 1, &imap, &nimaps, &free_list, NULL); | 775 | 1, &imap, &nimaps, &free_list); |
780 | if (error) | 776 | if (error) |
781 | goto error_on_bmapi_transaction; | 777 | goto error_on_bmapi_transaction; |
782 | 778 | ||
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h index 81ac4afd45b3..7748a430f50d 100644 --- a/fs/xfs/xfs_iomap.h +++ b/fs/xfs/xfs_iomap.h | |||
@@ -18,17 +18,16 @@ | |||
18 | #ifndef __XFS_IOMAP_H__ | 18 | #ifndef __XFS_IOMAP_H__ |
19 | #define __XFS_IOMAP_H__ | 19 | #define __XFS_IOMAP_H__ |
20 | 20 | ||
21 | typedef enum { | 21 | /* base extent manipulation calls */ |
22 | /* base extent manipulation calls */ | 22 | #define BMAPI_READ (1 << 0) /* read extents */ |
23 | BMAPI_READ = (1 << 0), /* read extents */ | 23 | #define BMAPI_WRITE (1 << 1) /* create extents */ |
24 | BMAPI_WRITE = (1 << 1), /* create extents */ | 24 | #define BMAPI_ALLOCATE (1 << 2) /* delayed allocate to real extents */ |
25 | BMAPI_ALLOCATE = (1 << 2), /* delayed allocate to real extents */ | 25 | |
26 | /* modifiers */ | 26 | /* modifiers */ |
27 | BMAPI_IGNSTATE = (1 << 4), /* ignore unwritten state on read */ | 27 | #define BMAPI_IGNSTATE (1 << 4) /* ignore unwritten state on read */ |
28 | BMAPI_DIRECT = (1 << 5), /* direct instead of buffered write */ | 28 | #define BMAPI_DIRECT (1 << 5) /* direct instead of buffered write */ |
29 | BMAPI_MMAP = (1 << 6), /* allocate for mmap write */ | 29 | #define BMAPI_MMA (1 << 6) /* allocate for mmap write */ |
30 | BMAPI_TRYLOCK = (1 << 7), /* non-blocking request */ | 30 | #define BMAPI_TRYLOCK (1 << 7) /* non-blocking request */ |
31 | } bmapi_flags_t; | ||
32 | 31 | ||
33 | #define BMAPI_FLAGS \ | 32 | #define BMAPI_FLAGS \ |
34 | { BMAPI_READ, "READ" }, \ | 33 | { BMAPI_READ, "READ" }, \ |
@@ -36,7 +35,6 @@ typedef enum { | |||
36 | { BMAPI_ALLOCATE, "ALLOCATE" }, \ | 35 | { BMAPI_ALLOCATE, "ALLOCATE" }, \ |
37 | { BMAPI_IGNSTATE, "IGNSTATE" }, \ | 36 | { BMAPI_IGNSTATE, "IGNSTATE" }, \ |
38 | { BMAPI_DIRECT, "DIRECT" }, \ | 37 | { BMAPI_DIRECT, "DIRECT" }, \ |
39 | { BMAPI_MMAP, "MMAP" }, \ | ||
40 | { BMAPI_TRYLOCK, "TRYLOCK" } | 38 | { BMAPI_TRYLOCK, "TRYLOCK" } |
41 | 39 | ||
42 | struct xfs_inode; | 40 | struct xfs_inode; |
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index b1b801e4a28e..7e3626e5925c 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c | |||
@@ -24,20 +24,17 @@ | |||
24 | #include "xfs_trans.h" | 24 | #include "xfs_trans.h" |
25 | #include "xfs_sb.h" | 25 | #include "xfs_sb.h" |
26 | #include "xfs_ag.h" | 26 | #include "xfs_ag.h" |
27 | #include "xfs_dir2.h" | ||
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_mount.h" | 27 | #include "xfs_mount.h" |
30 | #include "xfs_bmap_btree.h" | 28 | #include "xfs_bmap_btree.h" |
31 | #include "xfs_alloc_btree.h" | 29 | #include "xfs_alloc_btree.h" |
32 | #include "xfs_ialloc_btree.h" | 30 | #include "xfs_ialloc_btree.h" |
33 | #include "xfs_dir2_sf.h" | ||
34 | #include "xfs_attr_sf.h" | ||
35 | #include "xfs_dinode.h" | 31 | #include "xfs_dinode.h" |
36 | #include "xfs_inode.h" | 32 | #include "xfs_inode.h" |
37 | #include "xfs_ialloc.h" | 33 | #include "xfs_ialloc.h" |
38 | #include "xfs_itable.h" | 34 | #include "xfs_itable.h" |
39 | #include "xfs_error.h" | 35 | #include "xfs_error.h" |
40 | #include "xfs_btree.h" | 36 | #include "xfs_btree.h" |
37 | #include "xfs_trace.h" | ||
41 | 38 | ||
42 | STATIC int | 39 | STATIC int |
43 | xfs_internal_inum( | 40 | xfs_internal_inum( |
@@ -49,24 +46,40 @@ xfs_internal_inum( | |||
49 | (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino))); | 46 | (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino))); |
50 | } | 47 | } |
51 | 48 | ||
52 | STATIC int | 49 | /* |
53 | xfs_bulkstat_one_iget( | 50 | * Return stat information for one inode. |
54 | xfs_mount_t *mp, /* mount point for filesystem */ | 51 | * Return 0 if ok, else errno. |
55 | xfs_ino_t ino, /* inode number to get data for */ | 52 | */ |
56 | xfs_daddr_t bno, /* starting bno of inode cluster */ | 53 | int |
57 | xfs_bstat_t *buf, /* return buffer */ | 54 | xfs_bulkstat_one_int( |
58 | int *stat) /* BULKSTAT_RV_... */ | 55 | struct xfs_mount *mp, /* mount point for filesystem */ |
56 | xfs_ino_t ino, /* inode to get data for */ | ||
57 | void __user *buffer, /* buffer to place output in */ | ||
58 | int ubsize, /* size of buffer */ | ||
59 | bulkstat_one_fmt_pf formatter, /* formatter, copy to user */ | ||
60 | int *ubused, /* bytes used by me */ | ||
61 | int *stat) /* BULKSTAT_RV_... */ | ||
59 | { | 62 | { |
60 | xfs_icdinode_t *dic; /* dinode core info pointer */ | 63 | struct xfs_icdinode *dic; /* dinode core info pointer */ |
61 | xfs_inode_t *ip; /* incore inode pointer */ | 64 | struct xfs_inode *ip; /* incore inode pointer */ |
62 | struct inode *inode; | 65 | struct inode *inode; |
63 | int error; | 66 | struct xfs_bstat *buf; /* return buffer */ |
67 | int error = 0; /* error value */ | ||
68 | |||
69 | *stat = BULKSTAT_RV_NOTHING; | ||
70 | |||
71 | if (!buffer || xfs_internal_inum(mp, ino)) | ||
72 | return XFS_ERROR(EINVAL); | ||
73 | |||
74 | buf = kmem_alloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL); | ||
75 | if (!buf) | ||
76 | return XFS_ERROR(ENOMEM); | ||
64 | 77 | ||
65 | error = xfs_iget(mp, NULL, ino, | 78 | error = xfs_iget(mp, NULL, ino, |
66 | XFS_IGET_BULKSTAT, XFS_ILOCK_SHARED, &ip, bno); | 79 | XFS_IGET_UNTRUSTED, XFS_ILOCK_SHARED, &ip); |
67 | if (error) { | 80 | if (error) { |
68 | *stat = BULKSTAT_RV_NOTHING; | 81 | *stat = BULKSTAT_RV_NOTHING; |
69 | return error; | 82 | goto out_free; |
70 | } | 83 | } |
71 | 84 | ||
72 | ASSERT(ip != NULL); | 85 | ASSERT(ip != NULL); |
@@ -127,77 +140,17 @@ xfs_bulkstat_one_iget( | |||
127 | buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks; | 140 | buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks; |
128 | break; | 141 | break; |
129 | } | 142 | } |
143 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | ||
144 | IRELE(ip); | ||
130 | 145 | ||
131 | xfs_iput(ip, XFS_ILOCK_SHARED); | 146 | error = formatter(buffer, ubsize, ubused, buf); |
132 | return error; | ||
133 | } | ||
134 | |||
135 | STATIC void | ||
136 | xfs_bulkstat_one_dinode( | ||
137 | xfs_mount_t *mp, /* mount point for filesystem */ | ||
138 | xfs_ino_t ino, /* inode number to get data for */ | ||
139 | xfs_dinode_t *dic, /* dinode inode pointer */ | ||
140 | xfs_bstat_t *buf) /* return buffer */ | ||
141 | { | ||
142 | /* | ||
143 | * The inode format changed when we moved the link count and | ||
144 | * made it 32 bits long. If this is an old format inode, | ||
145 | * convert it in memory to look like a new one. If it gets | ||
146 | * flushed to disk we will convert back before flushing or | ||
147 | * logging it. We zero out the new projid field and the old link | ||
148 | * count field. We'll handle clearing the pad field (the remains | ||
149 | * of the old uuid field) when we actually convert the inode to | ||
150 | * the new format. We don't change the version number so that we | ||
151 | * can distinguish this from a real new format inode. | ||
152 | */ | ||
153 | if (dic->di_version == 1) { | ||
154 | buf->bs_nlink = be16_to_cpu(dic->di_onlink); | ||
155 | buf->bs_projid = 0; | ||
156 | } else { | ||
157 | buf->bs_nlink = be32_to_cpu(dic->di_nlink); | ||
158 | buf->bs_projid = be16_to_cpu(dic->di_projid); | ||
159 | } | ||
160 | 147 | ||
161 | buf->bs_ino = ino; | 148 | if (!error) |
162 | buf->bs_mode = be16_to_cpu(dic->di_mode); | 149 | *stat = BULKSTAT_RV_DIDONE; |
163 | buf->bs_uid = be32_to_cpu(dic->di_uid); | ||
164 | buf->bs_gid = be32_to_cpu(dic->di_gid); | ||
165 | buf->bs_size = be64_to_cpu(dic->di_size); | ||
166 | buf->bs_atime.tv_sec = be32_to_cpu(dic->di_atime.t_sec); | ||
167 | buf->bs_atime.tv_nsec = be32_to_cpu(dic->di_atime.t_nsec); | ||
168 | buf->bs_mtime.tv_sec = be32_to_cpu(dic->di_mtime.t_sec); | ||
169 | buf->bs_mtime.tv_nsec = be32_to_cpu(dic->di_mtime.t_nsec); | ||
170 | buf->bs_ctime.tv_sec = be32_to_cpu(dic->di_ctime.t_sec); | ||
171 | buf->bs_ctime.tv_nsec = be32_to_cpu(dic->di_ctime.t_nsec); | ||
172 | buf->bs_xflags = xfs_dic2xflags(dic); | ||
173 | buf->bs_extsize = be32_to_cpu(dic->di_extsize) << mp->m_sb.sb_blocklog; | ||
174 | buf->bs_extents = be32_to_cpu(dic->di_nextents); | ||
175 | buf->bs_gen = be32_to_cpu(dic->di_gen); | ||
176 | memset(buf->bs_pad, 0, sizeof(buf->bs_pad)); | ||
177 | buf->bs_dmevmask = be32_to_cpu(dic->di_dmevmask); | ||
178 | buf->bs_dmstate = be16_to_cpu(dic->di_dmstate); | ||
179 | buf->bs_aextents = be16_to_cpu(dic->di_anextents); | ||
180 | buf->bs_forkoff = XFS_DFORK_BOFF(dic); | ||
181 | 150 | ||
182 | switch (dic->di_format) { | 151 | out_free: |
183 | case XFS_DINODE_FMT_DEV: | 152 | kmem_free(buf); |
184 | buf->bs_rdev = xfs_dinode_get_rdev(dic); | 153 | return error; |
185 | buf->bs_blksize = BLKDEV_IOSIZE; | ||
186 | buf->bs_blocks = 0; | ||
187 | break; | ||
188 | case XFS_DINODE_FMT_LOCAL: | ||
189 | case XFS_DINODE_FMT_UUID: | ||
190 | buf->bs_rdev = 0; | ||
191 | buf->bs_blksize = mp->m_sb.sb_blocksize; | ||
192 | buf->bs_blocks = 0; | ||
193 | break; | ||
194 | case XFS_DINODE_FMT_EXTENTS: | ||
195 | case XFS_DINODE_FMT_BTREE: | ||
196 | buf->bs_rdev = 0; | ||
197 | buf->bs_blksize = mp->m_sb.sb_blocksize; | ||
198 | buf->bs_blocks = be64_to_cpu(dic->di_nblocks); | ||
199 | break; | ||
200 | } | ||
201 | } | 154 | } |
202 | 155 | ||
203 | /* Return 0 on success or positive error */ | 156 | /* Return 0 on success or positive error */ |
@@ -217,118 +170,17 @@ xfs_bulkstat_one_fmt( | |||
217 | return 0; | 170 | return 0; |
218 | } | 171 | } |
219 | 172 | ||
220 | /* | ||
221 | * Return stat information for one inode. | ||
222 | * Return 0 if ok, else errno. | ||
223 | */ | ||
224 | int /* error status */ | ||
225 | xfs_bulkstat_one_int( | ||
226 | xfs_mount_t *mp, /* mount point for filesystem */ | ||
227 | xfs_ino_t ino, /* inode number to get data for */ | ||
228 | void __user *buffer, /* buffer to place output in */ | ||
229 | int ubsize, /* size of buffer */ | ||
230 | bulkstat_one_fmt_pf formatter, /* formatter, copy to user */ | ||
231 | xfs_daddr_t bno, /* starting bno of inode cluster */ | ||
232 | int *ubused, /* bytes used by me */ | ||
233 | void *dibuff, /* on-disk inode buffer */ | ||
234 | int *stat) /* BULKSTAT_RV_... */ | ||
235 | { | ||
236 | xfs_bstat_t *buf; /* return buffer */ | ||
237 | int error = 0; /* error value */ | ||
238 | xfs_dinode_t *dip; /* dinode inode pointer */ | ||
239 | |||
240 | dip = (xfs_dinode_t *)dibuff; | ||
241 | *stat = BULKSTAT_RV_NOTHING; | ||
242 | |||
243 | if (!buffer || xfs_internal_inum(mp, ino)) | ||
244 | return XFS_ERROR(EINVAL); | ||
245 | |||
246 | buf = kmem_alloc(sizeof(*buf), KM_SLEEP); | ||
247 | |||
248 | if (dip == NULL) { | ||
249 | /* We're not being passed a pointer to a dinode. This happens | ||
250 | * if BULKSTAT_FG_IGET is selected. Do the iget. | ||
251 | */ | ||
252 | error = xfs_bulkstat_one_iget(mp, ino, bno, buf, stat); | ||
253 | if (error) | ||
254 | goto out_free; | ||
255 | } else { | ||
256 | xfs_bulkstat_one_dinode(mp, ino, dip, buf); | ||
257 | } | ||
258 | |||
259 | error = formatter(buffer, ubsize, ubused, buf); | ||
260 | if (error) | ||
261 | goto out_free; | ||
262 | |||
263 | *stat = BULKSTAT_RV_DIDONE; | ||
264 | |||
265 | out_free: | ||
266 | kmem_free(buf); | ||
267 | return error; | ||
268 | } | ||
269 | |||
270 | int | 173 | int |
271 | xfs_bulkstat_one( | 174 | xfs_bulkstat_one( |
272 | xfs_mount_t *mp, /* mount point for filesystem */ | 175 | xfs_mount_t *mp, /* mount point for filesystem */ |
273 | xfs_ino_t ino, /* inode number to get data for */ | 176 | xfs_ino_t ino, /* inode number to get data for */ |
274 | void __user *buffer, /* buffer to place output in */ | 177 | void __user *buffer, /* buffer to place output in */ |
275 | int ubsize, /* size of buffer */ | 178 | int ubsize, /* size of buffer */ |
276 | void *private_data, /* my private data */ | ||
277 | xfs_daddr_t bno, /* starting bno of inode cluster */ | ||
278 | int *ubused, /* bytes used by me */ | 179 | int *ubused, /* bytes used by me */ |
279 | void *dibuff, /* on-disk inode buffer */ | ||
280 | int *stat) /* BULKSTAT_RV_... */ | 180 | int *stat) /* BULKSTAT_RV_... */ |
281 | { | 181 | { |
282 | return xfs_bulkstat_one_int(mp, ino, buffer, ubsize, | 182 | return xfs_bulkstat_one_int(mp, ino, buffer, ubsize, |
283 | xfs_bulkstat_one_fmt, bno, | 183 | xfs_bulkstat_one_fmt, ubused, stat); |
284 | ubused, dibuff, stat); | ||
285 | } | ||
286 | |||
287 | /* | ||
288 | * Test to see whether we can use the ondisk inode directly, based | ||
289 | * on the given bulkstat flags, filling in dipp accordingly. | ||
290 | * Returns zero if the inode is dodgey. | ||
291 | */ | ||
292 | STATIC int | ||
293 | xfs_bulkstat_use_dinode( | ||
294 | xfs_mount_t *mp, | ||
295 | int flags, | ||
296 | xfs_buf_t *bp, | ||
297 | int clustidx, | ||
298 | xfs_dinode_t **dipp) | ||
299 | { | ||
300 | xfs_dinode_t *dip; | ||
301 | unsigned int aformat; | ||
302 | |||
303 | *dipp = NULL; | ||
304 | if (!bp || (flags & BULKSTAT_FG_IGET)) | ||
305 | return 1; | ||
306 | dip = (xfs_dinode_t *) | ||
307 | xfs_buf_offset(bp, clustidx << mp->m_sb.sb_inodelog); | ||
308 | /* | ||
309 | * Check the buffer containing the on-disk inode for di_mode == 0. | ||
310 | * This is to prevent xfs_bulkstat from picking up just reclaimed | ||
311 | * inodes that have their in-core state initialized but not flushed | ||
312 | * to disk yet. This is a temporary hack that would require a proper | ||
313 | * fix in the future. | ||
314 | */ | ||
315 | if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC || | ||
316 | !XFS_DINODE_GOOD_VERSION(dip->di_version) || | ||
317 | !dip->di_mode) | ||
318 | return 0; | ||
319 | if (flags & BULKSTAT_FG_QUICK) { | ||
320 | *dipp = dip; | ||
321 | return 1; | ||
322 | } | ||
323 | /* BULKSTAT_FG_INLINE: if attr fork is local, or not there, use it */ | ||
324 | aformat = dip->di_aformat; | ||
325 | if ((XFS_DFORK_Q(dip) == 0) || | ||
326 | (aformat == XFS_DINODE_FMT_LOCAL) || | ||
327 | (aformat == XFS_DINODE_FMT_EXTENTS && !dip->di_anextents)) { | ||
328 | *dipp = dip; | ||
329 | return 1; | ||
330 | } | ||
331 | return 1; | ||
332 | } | 184 | } |
333 | 185 | ||
334 | #define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size) | 186 | #define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size) |
@@ -342,10 +194,8 @@ xfs_bulkstat( | |||
342 | xfs_ino_t *lastinop, /* last inode returned */ | 194 | xfs_ino_t *lastinop, /* last inode returned */ |
343 | int *ubcountp, /* size of buffer/count returned */ | 195 | int *ubcountp, /* size of buffer/count returned */ |
344 | bulkstat_one_pf formatter, /* func that'd fill a single buf */ | 196 | bulkstat_one_pf formatter, /* func that'd fill a single buf */ |
345 | void *private_data,/* private data for formatter */ | ||
346 | size_t statstruct_size, /* sizeof struct filling */ | 197 | size_t statstruct_size, /* sizeof struct filling */ |
347 | char __user *ubuffer, /* buffer with inode stats */ | 198 | char __user *ubuffer, /* buffer with inode stats */ |
348 | int flags, /* defined in xfs_itable.h */ | ||
349 | int *done) /* 1 if there are more stats to get */ | 199 | int *done) /* 1 if there are more stats to get */ |
350 | { | 200 | { |
351 | xfs_agblock_t agbno=0;/* allocation group block number */ | 201 | xfs_agblock_t agbno=0;/* allocation group block number */ |
@@ -380,14 +230,12 @@ xfs_bulkstat( | |||
380 | int ubelem; /* spaces used in user's buffer */ | 230 | int ubelem; /* spaces used in user's buffer */ |
381 | int ubused; /* bytes used by formatter */ | 231 | int ubused; /* bytes used by formatter */ |
382 | xfs_buf_t *bp; /* ptr to on-disk inode cluster buf */ | 232 | xfs_buf_t *bp; /* ptr to on-disk inode cluster buf */ |
383 | xfs_dinode_t *dip; /* ptr into bp for specific inode */ | ||
384 | 233 | ||
385 | /* | 234 | /* |
386 | * Get the last inode value, see if there's nothing to do. | 235 | * Get the last inode value, see if there's nothing to do. |
387 | */ | 236 | */ |
388 | ino = (xfs_ino_t)*lastinop; | 237 | ino = (xfs_ino_t)*lastinop; |
389 | lastino = ino; | 238 | lastino = ino; |
390 | dip = NULL; | ||
391 | agno = XFS_INO_TO_AGNO(mp, ino); | 239 | agno = XFS_INO_TO_AGNO(mp, ino); |
392 | agino = XFS_INO_TO_AGINO(mp, ino); | 240 | agino = XFS_INO_TO_AGINO(mp, ino); |
393 | if (agno >= mp->m_sb.sb_agcount || | 241 | if (agno >= mp->m_sb.sb_agcount || |
@@ -612,37 +460,6 @@ xfs_bulkstat( | |||
612 | irbp->ir_startino) + | 460 | irbp->ir_startino) + |
613 | ((chunkidx & nimask) >> | 461 | ((chunkidx & nimask) >> |
614 | mp->m_sb.sb_inopblog); | 462 | mp->m_sb.sb_inopblog); |
615 | |||
616 | if (flags & (BULKSTAT_FG_QUICK | | ||
617 | BULKSTAT_FG_INLINE)) { | ||
618 | int offset; | ||
619 | |||
620 | ino = XFS_AGINO_TO_INO(mp, agno, | ||
621 | agino); | ||
622 | bno = XFS_AGB_TO_DADDR(mp, agno, | ||
623 | agbno); | ||
624 | |||
625 | /* | ||
626 | * Get the inode cluster buffer | ||
627 | */ | ||
628 | if (bp) | ||
629 | xfs_buf_relse(bp); | ||
630 | |||
631 | error = xfs_inotobp(mp, NULL, ino, &dip, | ||
632 | &bp, &offset, | ||
633 | XFS_IGET_BULKSTAT); | ||
634 | |||
635 | if (!error) | ||
636 | clustidx = offset / mp->m_sb.sb_inodesize; | ||
637 | if (XFS_TEST_ERROR(error != 0, | ||
638 | mp, XFS_ERRTAG_BULKSTAT_READ_CHUNK, | ||
639 | XFS_RANDOM_BULKSTAT_READ_CHUNK)) { | ||
640 | bp = NULL; | ||
641 | ubleft = 0; | ||
642 | rval = error; | ||
643 | break; | ||
644 | } | ||
645 | } | ||
646 | } | 463 | } |
647 | ino = XFS_AGINO_TO_INO(mp, agno, agino); | 464 | ino = XFS_AGINO_TO_INO(mp, agno, agino); |
648 | bno = XFS_AGB_TO_DADDR(mp, agno, agbno); | 465 | bno = XFS_AGB_TO_DADDR(mp, agno, agbno); |
@@ -658,35 +475,13 @@ xfs_bulkstat( | |||
658 | * when the chunk is used up. | 475 | * when the chunk is used up. |
659 | */ | 476 | */ |
660 | irbp->ir_freecount++; | 477 | irbp->ir_freecount++; |
661 | if (!xfs_bulkstat_use_dinode(mp, flags, bp, | ||
662 | clustidx, &dip)) { | ||
663 | lastino = ino; | ||
664 | continue; | ||
665 | } | ||
666 | /* | ||
667 | * If we need to do an iget, cannot hold bp. | ||
668 | * Drop it, until starting the next cluster. | ||
669 | */ | ||
670 | if ((flags & BULKSTAT_FG_INLINE) && !dip) { | ||
671 | if (bp) | ||
672 | xfs_buf_relse(bp); | ||
673 | bp = NULL; | ||
674 | } | ||
675 | 478 | ||
676 | /* | 479 | /* |
677 | * Get the inode and fill in a single buffer. | 480 | * Get the inode and fill in a single buffer. |
678 | * BULKSTAT_FG_QUICK uses dip to fill it in. | ||
679 | * BULKSTAT_FG_IGET uses igets. | ||
680 | * BULKSTAT_FG_INLINE uses dip if we have an | ||
681 | * inline attr fork, else igets. | ||
682 | * See: xfs_bulkstat_one & xfs_dm_bulkstat_one. | ||
683 | * This is also used to count inodes/blks, etc | ||
684 | * in xfs_qm_quotacheck. | ||
685 | */ | 481 | */ |
686 | ubused = statstruct_size; | 482 | ubused = statstruct_size; |
687 | error = formatter(mp, ino, ubufp, | 483 | error = formatter(mp, ino, ubufp, ubleft, |
688 | ubleft, private_data, | 484 | &ubused, &fmterror); |
689 | bno, &ubused, dip, &fmterror); | ||
690 | if (fmterror == BULKSTAT_RV_NOTHING) { | 485 | if (fmterror == BULKSTAT_RV_NOTHING) { |
691 | if (error && error != ENOENT && | 486 | if (error && error != ENOENT && |
692 | error != EINVAL) { | 487 | error != EINVAL) { |
@@ -778,8 +573,7 @@ xfs_bulkstat_single( | |||
778 | */ | 573 | */ |
779 | 574 | ||
780 | ino = (xfs_ino_t)*lastinop; | 575 | ino = (xfs_ino_t)*lastinop; |
781 | error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t), | 576 | error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t), 0, &res); |
782 | NULL, 0, NULL, NULL, &res); | ||
783 | if (error) { | 577 | if (error) { |
784 | /* | 578 | /* |
785 | * Special case way failed, do it the "long" way | 579 | * Special case way failed, do it the "long" way |
@@ -788,8 +582,7 @@ xfs_bulkstat_single( | |||
788 | (*lastinop)--; | 582 | (*lastinop)--; |
789 | count = 1; | 583 | count = 1; |
790 | if (xfs_bulkstat(mp, lastinop, &count, xfs_bulkstat_one, | 584 | if (xfs_bulkstat(mp, lastinop, &count, xfs_bulkstat_one, |
791 | NULL, sizeof(xfs_bstat_t), buffer, | 585 | sizeof(xfs_bstat_t), buffer, done)) |
792 | BULKSTAT_FG_IGET, done)) | ||
793 | return error; | 586 | return error; |
794 | if (count == 0 || (xfs_ino_t)*lastinop != ino) | 587 | if (count == 0 || (xfs_ino_t)*lastinop != ino) |
795 | return error == EFSCORRUPTED ? | 588 | return error == EFSCORRUPTED ? |
diff --git a/fs/xfs/xfs_itable.h b/fs/xfs/xfs_itable.h index 20792bf45946..97295d91d170 100644 --- a/fs/xfs/xfs_itable.h +++ b/fs/xfs/xfs_itable.h | |||
@@ -27,10 +27,7 @@ typedef int (*bulkstat_one_pf)(struct xfs_mount *mp, | |||
27 | xfs_ino_t ino, | 27 | xfs_ino_t ino, |
28 | void __user *buffer, | 28 | void __user *buffer, |
29 | int ubsize, | 29 | int ubsize, |
30 | void *private_data, | ||
31 | xfs_daddr_t bno, | ||
32 | int *ubused, | 30 | int *ubused, |
33 | void *dip, | ||
34 | int *stat); | 31 | int *stat); |
35 | 32 | ||
36 | /* | 33 | /* |
@@ -41,13 +38,6 @@ typedef int (*bulkstat_one_pf)(struct xfs_mount *mp, | |||
41 | #define BULKSTAT_RV_GIVEUP 2 | 38 | #define BULKSTAT_RV_GIVEUP 2 |
42 | 39 | ||
43 | /* | 40 | /* |
44 | * Values for bulkstat flag argument. | ||
45 | */ | ||
46 | #define BULKSTAT_FG_IGET 0x1 /* Go through the buffer cache */ | ||
47 | #define BULKSTAT_FG_QUICK 0x2 /* No iget, walk the dinode cluster */ | ||
48 | #define BULKSTAT_FG_INLINE 0x4 /* No iget if inline attrs */ | ||
49 | |||
50 | /* | ||
51 | * Return stat information in bulk (by-inode) for the filesystem. | 41 | * Return stat information in bulk (by-inode) for the filesystem. |
52 | */ | 42 | */ |
53 | int /* error status */ | 43 | int /* error status */ |
@@ -56,10 +46,8 @@ xfs_bulkstat( | |||
56 | xfs_ino_t *lastino, /* last inode returned */ | 46 | xfs_ino_t *lastino, /* last inode returned */ |
57 | int *count, /* size of buffer/count returned */ | 47 | int *count, /* size of buffer/count returned */ |
58 | bulkstat_one_pf formatter, /* func that'd fill a single buf */ | 48 | bulkstat_one_pf formatter, /* func that'd fill a single buf */ |
59 | void *private_data, /* private data for formatter */ | ||
60 | size_t statstruct_size,/* sizeof struct that we're filling */ | 49 | size_t statstruct_size,/* sizeof struct that we're filling */ |
61 | char __user *ubuffer,/* buffer with inode stats */ | 50 | char __user *ubuffer,/* buffer with inode stats */ |
62 | int flags, /* flag to control access method */ | ||
63 | int *done); /* 1 if there are more stats to get */ | 51 | int *done); /* 1 if there are more stats to get */ |
64 | 52 | ||
65 | int | 53 | int |
@@ -82,9 +70,7 @@ xfs_bulkstat_one_int( | |||
82 | void __user *buffer, | 70 | void __user *buffer, |
83 | int ubsize, | 71 | int ubsize, |
84 | bulkstat_one_fmt_pf formatter, | 72 | bulkstat_one_fmt_pf formatter, |
85 | xfs_daddr_t bno, | ||
86 | int *ubused, | 73 | int *ubused, |
87 | void *dibuff, | ||
88 | int *stat); | 74 | int *stat); |
89 | 75 | ||
90 | int | 76 | int |
@@ -93,10 +79,7 @@ xfs_bulkstat_one( | |||
93 | xfs_ino_t ino, | 79 | xfs_ino_t ino, |
94 | void __user *buffer, | 80 | void __user *buffer, |
95 | int ubsize, | 81 | int ubsize, |
96 | void *private_data, | ||
97 | xfs_daddr_t bno, | ||
98 | int *ubused, | 82 | int *ubused, |
99 | void *dibuff, | ||
100 | int *stat); | 83 | int *stat); |
101 | 84 | ||
102 | typedef int (*inumbers_fmt_pf)( | 85 | typedef int (*inumbers_fmt_pf)( |
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 5215abc8023a..925d572bf0f4 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
@@ -24,8 +24,6 @@ | |||
24 | #include "xfs_trans.h" | 24 | #include "xfs_trans.h" |
25 | #include "xfs_sb.h" | 25 | #include "xfs_sb.h" |
26 | #include "xfs_ag.h" | 26 | #include "xfs_ag.h" |
27 | #include "xfs_dir2.h" | ||
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_mount.h" | 27 | #include "xfs_mount.h" |
30 | #include "xfs_error.h" | 28 | #include "xfs_error.h" |
31 | #include "xfs_log_priv.h" | 29 | #include "xfs_log_priv.h" |
@@ -35,8 +33,6 @@ | |||
35 | #include "xfs_ialloc_btree.h" | 33 | #include "xfs_ialloc_btree.h" |
36 | #include "xfs_log_recover.h" | 34 | #include "xfs_log_recover.h" |
37 | #include "xfs_trans_priv.h" | 35 | #include "xfs_trans_priv.h" |
38 | #include "xfs_dir2_sf.h" | ||
39 | #include "xfs_attr_sf.h" | ||
40 | #include "xfs_dinode.h" | 36 | #include "xfs_dinode.h" |
41 | #include "xfs_inode.h" | 37 | #include "xfs_inode.h" |
42 | #include "xfs_rw.h" | 38 | #include "xfs_rw.h" |
@@ -337,7 +333,6 @@ xfs_log_reserve( | |||
337 | int retval = 0; | 333 | int retval = 0; |
338 | 334 | ||
339 | ASSERT(client == XFS_TRANSACTION || client == XFS_LOG); | 335 | ASSERT(client == XFS_TRANSACTION || client == XFS_LOG); |
340 | ASSERT((flags & XFS_LOG_NOSLEEP) == 0); | ||
341 | 336 | ||
342 | if (XLOG_FORCED_SHUTDOWN(log)) | 337 | if (XLOG_FORCED_SHUTDOWN(log)) |
343 | return XFS_ERROR(EIO); | 338 | return XFS_ERROR(EIO); |
@@ -552,7 +547,7 @@ xfs_log_unmount_write(xfs_mount_t *mp) | |||
552 | .magic = XLOG_UNMOUNT_TYPE, | 547 | .magic = XLOG_UNMOUNT_TYPE, |
553 | }; | 548 | }; |
554 | struct xfs_log_iovec reg = { | 549 | struct xfs_log_iovec reg = { |
555 | .i_addr = (void *)&magic, | 550 | .i_addr = &magic, |
556 | .i_len = sizeof(magic), | 551 | .i_len = sizeof(magic), |
557 | .i_type = XLOG_REG_TYPE_UNMOUNT, | 552 | .i_type = XLOG_REG_TYPE_UNMOUNT, |
558 | }; | 553 | }; |
@@ -1047,7 +1042,6 @@ xlog_alloc_log(xfs_mount_t *mp, | |||
1047 | xlog_in_core_t *iclog, *prev_iclog=NULL; | 1042 | xlog_in_core_t *iclog, *prev_iclog=NULL; |
1048 | xfs_buf_t *bp; | 1043 | xfs_buf_t *bp; |
1049 | int i; | 1044 | int i; |
1050 | int iclogsize; | ||
1051 | int error = ENOMEM; | 1045 | int error = ENOMEM; |
1052 | uint log2_size = 0; | 1046 | uint log2_size = 0; |
1053 | 1047 | ||
@@ -1127,7 +1121,6 @@ xlog_alloc_log(xfs_mount_t *mp, | |||
1127 | * with different amounts of memory. See the definition of | 1121 | * with different amounts of memory. See the definition of |
1128 | * xlog_in_core_t in xfs_log_priv.h for details. | 1122 | * xlog_in_core_t in xfs_log_priv.h for details. |
1129 | */ | 1123 | */ |
1130 | iclogsize = log->l_iclog_size; | ||
1131 | ASSERT(log->l_iclog_size >= 4096); | 1124 | ASSERT(log->l_iclog_size >= 4096); |
1132 | for (i=0; i < log->l_iclog_bufs; i++) { | 1125 | for (i=0; i < log->l_iclog_bufs; i++) { |
1133 | *iclogp = kmem_zalloc(sizeof(xlog_in_core_t), KM_MAYFAIL); | 1126 | *iclogp = kmem_zalloc(sizeof(xlog_in_core_t), KM_MAYFAIL); |
@@ -1428,11 +1421,8 @@ xlog_sync(xlog_t *log, | |||
1428 | XFS_BUF_BUSY(bp); | 1421 | XFS_BUF_BUSY(bp); |
1429 | XFS_BUF_ASYNC(bp); | 1422 | XFS_BUF_ASYNC(bp); |
1430 | bp->b_flags |= XBF_LOG_BUFFER; | 1423 | bp->b_flags |= XBF_LOG_BUFFER; |
1431 | /* | 1424 | |
1432 | * Do an ordered write for the log block. | 1425 | if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) |
1433 | * Its unnecessary to flush the first split block in the log wrap case. | ||
1434 | */ | ||
1435 | if (!split && (log->l_mp->m_flags & XFS_MOUNT_BARRIER)) | ||
1436 | XFS_BUF_ORDERED(bp); | 1426 | XFS_BUF_ORDERED(bp); |
1437 | 1427 | ||
1438 | ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); | 1428 | ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); |
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h index 04c78e642cc8..916eb7db14d9 100644 --- a/fs/xfs/xfs_log.h +++ b/fs/xfs/xfs_log.h | |||
@@ -55,14 +55,10 @@ static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2) | |||
55 | /* | 55 | /* |
56 | * Flags to xfs_log_reserve() | 56 | * Flags to xfs_log_reserve() |
57 | * | 57 | * |
58 | * XFS_LOG_SLEEP: If space is not available, sleep (default) | ||
59 | * XFS_LOG_NOSLEEP: If space is not available, return error | ||
60 | * XFS_LOG_PERM_RESERV: Permanent reservation. When writes are | 58 | * XFS_LOG_PERM_RESERV: Permanent reservation. When writes are |
61 | * performed against this type of reservation, the reservation | 59 | * performed against this type of reservation, the reservation |
62 | * is not decreased. Long running transactions should use this. | 60 | * is not decreased. Long running transactions should use this. |
63 | */ | 61 | */ |
64 | #define XFS_LOG_SLEEP 0x0 | ||
65 | #define XFS_LOG_NOSLEEP 0x1 | ||
66 | #define XFS_LOG_PERM_RESERV 0x2 | 62 | #define XFS_LOG_PERM_RESERV 0x2 |
67 | 63 | ||
68 | /* | 64 | /* |
@@ -104,7 +100,7 @@ static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2) | |||
104 | #define XLOG_REG_TYPE_MAX 19 | 100 | #define XLOG_REG_TYPE_MAX 19 |
105 | 101 | ||
106 | typedef struct xfs_log_iovec { | 102 | typedef struct xfs_log_iovec { |
107 | xfs_caddr_t i_addr; /* beginning address of region */ | 103 | void *i_addr; /* beginning address of region */ |
108 | int i_len; /* length in bytes of region */ | 104 | int i_len; /* length in bytes of region */ |
109 | uint i_type; /* type of region */ | 105 | uint i_type; /* type of region */ |
110 | } xfs_log_iovec_t; | 106 | } xfs_log_iovec_t; |
@@ -201,9 +197,4 @@ int xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp, | |||
201 | bool xfs_log_item_in_current_chkpt(struct xfs_log_item *lip); | 197 | bool xfs_log_item_in_current_chkpt(struct xfs_log_item *lip); |
202 | 198 | ||
203 | #endif | 199 | #endif |
204 | |||
205 | |||
206 | extern int xlog_debug; /* set to 1 to enable real log */ | ||
207 | |||
208 | |||
209 | #endif /* __XFS_LOG_H__ */ | 200 | #endif /* __XFS_LOG_H__ */ |
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c index bb17cc044bf3..31e4ea2d19ac 100644 --- a/fs/xfs/xfs_log_cil.c +++ b/fs/xfs/xfs_log_cil.c | |||
@@ -26,8 +26,6 @@ | |||
26 | #include "xfs_log_priv.h" | 26 | #include "xfs_log_priv.h" |
27 | #include "xfs_sb.h" | 27 | #include "xfs_sb.h" |
28 | #include "xfs_ag.h" | 28 | #include "xfs_ag.h" |
29 | #include "xfs_dir2.h" | ||
30 | #include "xfs_dmapi.h" | ||
31 | #include "xfs_mount.h" | 29 | #include "xfs_mount.h" |
32 | #include "xfs_error.h" | 30 | #include "xfs_error.h" |
33 | #include "xfs_alloc.h" | 31 | #include "xfs_alloc.h" |
@@ -554,7 +552,7 @@ xlog_cil_push( | |||
554 | thdr.th_type = XFS_TRANS_CHECKPOINT; | 552 | thdr.th_type = XFS_TRANS_CHECKPOINT; |
555 | thdr.th_tid = tic->t_tid; | 553 | thdr.th_tid = tic->t_tid; |
556 | thdr.th_num_items = num_iovecs; | 554 | thdr.th_num_items = num_iovecs; |
557 | lhdr.i_addr = (xfs_caddr_t)&thdr; | 555 | lhdr.i_addr = &thdr; |
558 | lhdr.i_len = sizeof(xfs_trans_header_t); | 556 | lhdr.i_len = sizeof(xfs_trans_header_t); |
559 | lhdr.i_type = XLOG_REG_TYPE_TRANSHDR; | 557 | lhdr.i_type = XLOG_REG_TYPE_TRANSHDR; |
560 | tic->t_curr_res -= lhdr.i_len + sizeof(xlog_op_header_t); | 558 | tic->t_curr_res -= lhdr.i_len + sizeof(xlog_op_header_t); |
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index ed0684cc50ee..6f3f5fa37acf 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c | |||
@@ -24,15 +24,11 @@ | |||
24 | #include "xfs_trans.h" | 24 | #include "xfs_trans.h" |
25 | #include "xfs_sb.h" | 25 | #include "xfs_sb.h" |
26 | #include "xfs_ag.h" | 26 | #include "xfs_ag.h" |
27 | #include "xfs_dir2.h" | ||
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_mount.h" | 27 | #include "xfs_mount.h" |
30 | #include "xfs_error.h" | 28 | #include "xfs_error.h" |
31 | #include "xfs_bmap_btree.h" | 29 | #include "xfs_bmap_btree.h" |
32 | #include "xfs_alloc_btree.h" | 30 | #include "xfs_alloc_btree.h" |
33 | #include "xfs_ialloc_btree.h" | 31 | #include "xfs_ialloc_btree.h" |
34 | #include "xfs_dir2_sf.h" | ||
35 | #include "xfs_attr_sf.h" | ||
36 | #include "xfs_dinode.h" | 32 | #include "xfs_dinode.h" |
37 | #include "xfs_inode.h" | 33 | #include "xfs_inode.h" |
38 | #include "xfs_inode_item.h" | 34 | #include "xfs_inode_item.h" |
@@ -1565,9 +1561,7 @@ xlog_recover_reorder_trans( | |||
1565 | 1561 | ||
1566 | list_splice_init(&trans->r_itemq, &sort_list); | 1562 | list_splice_init(&trans->r_itemq, &sort_list); |
1567 | list_for_each_entry_safe(item, n, &sort_list, ri_list) { | 1563 | list_for_each_entry_safe(item, n, &sort_list, ri_list) { |
1568 | xfs_buf_log_format_t *buf_f; | 1564 | xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; |
1569 | |||
1570 | buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr; | ||
1571 | 1565 | ||
1572 | switch (ITEM_TYPE(item)) { | 1566 | switch (ITEM_TYPE(item)) { |
1573 | case XFS_LI_BUF: | 1567 | case XFS_LI_BUF: |
@@ -1892,9 +1886,8 @@ xlog_recover_do_inode_buffer( | |||
1892 | * current di_next_unlinked field. Extract its value | 1886 | * current di_next_unlinked field. Extract its value |
1893 | * and copy it to the buffer copy. | 1887 | * and copy it to the buffer copy. |
1894 | */ | 1888 | */ |
1895 | logged_nextp = (xfs_agino_t *) | 1889 | logged_nextp = item->ri_buf[item_index].i_addr + |
1896 | ((char *)(item->ri_buf[item_index].i_addr) + | 1890 | next_unlinked_offset - reg_buf_offset; |
1897 | (next_unlinked_offset - reg_buf_offset)); | ||
1898 | if (unlikely(*logged_nextp == 0)) { | 1891 | if (unlikely(*logged_nextp == 0)) { |
1899 | xfs_fs_cmn_err(CE_ALERT, mp, | 1892 | xfs_fs_cmn_err(CE_ALERT, mp, |
1900 | "bad inode buffer log record (ptr = 0x%p, bp = 0x%p). XFS trying to replay bad (0) inode di_next_unlinked field", | 1893 | "bad inode buffer log record (ptr = 0x%p, bp = 0x%p). XFS trying to replay bad (0) inode di_next_unlinked field", |
@@ -1973,8 +1966,7 @@ xlog_recover_do_reg_buffer( | |||
1973 | item->ri_buf[i].i_len, __func__); | 1966 | item->ri_buf[i].i_len, __func__); |
1974 | goto next; | 1967 | goto next; |
1975 | } | 1968 | } |
1976 | error = xfs_qm_dqcheck((xfs_disk_dquot_t *) | 1969 | error = xfs_qm_dqcheck(item->ri_buf[i].i_addr, |
1977 | item->ri_buf[i].i_addr, | ||
1978 | -1, 0, XFS_QMOPT_DOWARN, | 1970 | -1, 0, XFS_QMOPT_DOWARN, |
1979 | "dquot_buf_recover"); | 1971 | "dquot_buf_recover"); |
1980 | if (error) | 1972 | if (error) |
@@ -2187,7 +2179,7 @@ xlog_recover_do_buffer_trans( | |||
2187 | xlog_recover_item_t *item, | 2179 | xlog_recover_item_t *item, |
2188 | int pass) | 2180 | int pass) |
2189 | { | 2181 | { |
2190 | xfs_buf_log_format_t *buf_f; | 2182 | xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; |
2191 | xfs_mount_t *mp; | 2183 | xfs_mount_t *mp; |
2192 | xfs_buf_t *bp; | 2184 | xfs_buf_t *bp; |
2193 | int error; | 2185 | int error; |
@@ -2197,8 +2189,6 @@ xlog_recover_do_buffer_trans( | |||
2197 | ushort flags; | 2189 | ushort flags; |
2198 | uint buf_flags; | 2190 | uint buf_flags; |
2199 | 2191 | ||
2200 | buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr; | ||
2201 | |||
2202 | if (pass == XLOG_RECOVER_PASS1) { | 2192 | if (pass == XLOG_RECOVER_PASS1) { |
2203 | /* | 2193 | /* |
2204 | * In this pass we're only looking for buf items | 2194 | * In this pass we're only looking for buf items |
@@ -2319,10 +2309,9 @@ xlog_recover_do_inode_trans( | |||
2319 | } | 2309 | } |
2320 | 2310 | ||
2321 | if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) { | 2311 | if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) { |
2322 | in_f = (xfs_inode_log_format_t *)item->ri_buf[0].i_addr; | 2312 | in_f = item->ri_buf[0].i_addr; |
2323 | } else { | 2313 | } else { |
2324 | in_f = (xfs_inode_log_format_t *)kmem_alloc( | 2314 | in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP); |
2325 | sizeof(xfs_inode_log_format_t), KM_SLEEP); | ||
2326 | need_free = 1; | 2315 | need_free = 1; |
2327 | error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f); | 2316 | error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f); |
2328 | if (error) | 2317 | if (error) |
@@ -2370,7 +2359,7 @@ xlog_recover_do_inode_trans( | |||
2370 | error = EFSCORRUPTED; | 2359 | error = EFSCORRUPTED; |
2371 | goto error; | 2360 | goto error; |
2372 | } | 2361 | } |
2373 | dicp = (xfs_icdinode_t *)(item->ri_buf[1].i_addr); | 2362 | dicp = item->ri_buf[1].i_addr; |
2374 | if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) { | 2363 | if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) { |
2375 | xfs_buf_relse(bp); | 2364 | xfs_buf_relse(bp); |
2376 | xfs_fs_cmn_err(CE_ALERT, mp, | 2365 | xfs_fs_cmn_err(CE_ALERT, mp, |
@@ -2461,7 +2450,7 @@ xlog_recover_do_inode_trans( | |||
2461 | } | 2450 | } |
2462 | 2451 | ||
2463 | /* The core is in in-core format */ | 2452 | /* The core is in in-core format */ |
2464 | xfs_dinode_to_disk(dip, (xfs_icdinode_t *)item->ri_buf[1].i_addr); | 2453 | xfs_dinode_to_disk(dip, item->ri_buf[1].i_addr); |
2465 | 2454 | ||
2466 | /* the rest is in on-disk format */ | 2455 | /* the rest is in on-disk format */ |
2467 | if (item->ri_buf[1].i_len > sizeof(struct xfs_icdinode)) { | 2456 | if (item->ri_buf[1].i_len > sizeof(struct xfs_icdinode)) { |
@@ -2578,7 +2567,7 @@ xlog_recover_do_quotaoff_trans( | |||
2578 | return (0); | 2567 | return (0); |
2579 | } | 2568 | } |
2580 | 2569 | ||
2581 | qoff_f = (xfs_qoff_logformat_t *)item->ri_buf[0].i_addr; | 2570 | qoff_f = item->ri_buf[0].i_addr; |
2582 | ASSERT(qoff_f); | 2571 | ASSERT(qoff_f); |
2583 | 2572 | ||
2584 | /* | 2573 | /* |
@@ -2622,9 +2611,8 @@ xlog_recover_do_dquot_trans( | |||
2622 | if (mp->m_qflags == 0) | 2611 | if (mp->m_qflags == 0) |
2623 | return (0); | 2612 | return (0); |
2624 | 2613 | ||
2625 | recddq = (xfs_disk_dquot_t *)item->ri_buf[1].i_addr; | 2614 | recddq = item->ri_buf[1].i_addr; |
2626 | 2615 | if (recddq == NULL) { | |
2627 | if (item->ri_buf[1].i_addr == NULL) { | ||
2628 | cmn_err(CE_ALERT, | 2616 | cmn_err(CE_ALERT, |
2629 | "XFS: NULL dquot in %s.", __func__); | 2617 | "XFS: NULL dquot in %s.", __func__); |
2630 | return XFS_ERROR(EIO); | 2618 | return XFS_ERROR(EIO); |
@@ -2654,7 +2642,7 @@ xlog_recover_do_dquot_trans( | |||
2654 | * The other possibility, of course, is that the quota subsystem was | 2642 | * The other possibility, of course, is that the quota subsystem was |
2655 | * removed since the last mount - ENOSYS. | 2643 | * removed since the last mount - ENOSYS. |
2656 | */ | 2644 | */ |
2657 | dq_f = (xfs_dq_logformat_t *)item->ri_buf[0].i_addr; | 2645 | dq_f = item->ri_buf[0].i_addr; |
2658 | ASSERT(dq_f); | 2646 | ASSERT(dq_f); |
2659 | if ((error = xfs_qm_dqcheck(recddq, | 2647 | if ((error = xfs_qm_dqcheck(recddq, |
2660 | dq_f->qlf_id, | 2648 | dq_f->qlf_id, |
@@ -2721,7 +2709,7 @@ xlog_recover_do_efi_trans( | |||
2721 | return 0; | 2709 | return 0; |
2722 | } | 2710 | } |
2723 | 2711 | ||
2724 | efi_formatp = (xfs_efi_log_format_t *)item->ri_buf[0].i_addr; | 2712 | efi_formatp = item->ri_buf[0].i_addr; |
2725 | 2713 | ||
2726 | mp = log->l_mp; | 2714 | mp = log->l_mp; |
2727 | efip = xfs_efi_init(mp, efi_formatp->efi_nextents); | 2715 | efip = xfs_efi_init(mp, efi_formatp->efi_nextents); |
@@ -2767,7 +2755,7 @@ xlog_recover_do_efd_trans( | |||
2767 | return; | 2755 | return; |
2768 | } | 2756 | } |
2769 | 2757 | ||
2770 | efd_formatp = (xfs_efd_log_format_t *)item->ri_buf[0].i_addr; | 2758 | efd_formatp = item->ri_buf[0].i_addr; |
2771 | ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) + | 2759 | ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) + |
2772 | ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) || | 2760 | ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) || |
2773 | (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) + | 2761 | (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) + |
@@ -3198,7 +3186,7 @@ xlog_recover_process_one_iunlink( | |||
3198 | int error; | 3186 | int error; |
3199 | 3187 | ||
3200 | ino = XFS_AGINO_TO_INO(mp, agno, agino); | 3188 | ino = XFS_AGINO_TO_INO(mp, agno, agino); |
3201 | error = xfs_iget(mp, NULL, ino, 0, 0, &ip, 0); | 3189 | error = xfs_iget(mp, NULL, ino, 0, 0, &ip); |
3202 | if (error) | 3190 | if (error) |
3203 | goto fail; | 3191 | goto fail; |
3204 | 3192 | ||
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index d59f4e8bedcf..aeb9d72ebf6e 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c | |||
@@ -25,13 +25,10 @@ | |||
25 | #include "xfs_sb.h" | 25 | #include "xfs_sb.h" |
26 | #include "xfs_ag.h" | 26 | #include "xfs_ag.h" |
27 | #include "xfs_dir2.h" | 27 | #include "xfs_dir2.h" |
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_mount.h" | 28 | #include "xfs_mount.h" |
30 | #include "xfs_bmap_btree.h" | 29 | #include "xfs_bmap_btree.h" |
31 | #include "xfs_alloc_btree.h" | 30 | #include "xfs_alloc_btree.h" |
32 | #include "xfs_ialloc_btree.h" | 31 | #include "xfs_ialloc_btree.h" |
33 | #include "xfs_dir2_sf.h" | ||
34 | #include "xfs_attr_sf.h" | ||
35 | #include "xfs_dinode.h" | 32 | #include "xfs_dinode.h" |
36 | #include "xfs_inode.h" | 33 | #include "xfs_inode.h" |
37 | #include "xfs_btree.h" | 34 | #include "xfs_btree.h" |
@@ -1300,7 +1297,7 @@ xfs_mountfs( | |||
1300 | * Get and sanity-check the root inode. | 1297 | * Get and sanity-check the root inode. |
1301 | * Save the pointer to it in the mount structure. | 1298 | * Save the pointer to it in the mount structure. |
1302 | */ | 1299 | */ |
1303 | error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip, 0); | 1300 | error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip); |
1304 | if (error) { | 1301 | if (error) { |
1305 | cmn_err(CE_WARN, "XFS: failed to read root inode"); | 1302 | cmn_err(CE_WARN, "XFS: failed to read root inode"); |
1306 | goto out_log_dealloc; | 1303 | goto out_log_dealloc; |
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 1d2c7eed4eda..622da2179a57 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h | |||
@@ -66,65 +66,6 @@ struct xfs_nameops; | |||
66 | struct xfs_ail; | 66 | struct xfs_ail; |
67 | struct xfs_quotainfo; | 67 | struct xfs_quotainfo; |
68 | 68 | ||
69 | |||
70 | /* | ||
71 | * Prototypes and functions for the Data Migration subsystem. | ||
72 | */ | ||
73 | |||
74 | typedef int (*xfs_send_data_t)(int, struct xfs_inode *, | ||
75 | xfs_off_t, size_t, int, int *); | ||
76 | typedef int (*xfs_send_mmap_t)(struct vm_area_struct *, uint); | ||
77 | typedef int (*xfs_send_destroy_t)(struct xfs_inode *, dm_right_t); | ||
78 | typedef int (*xfs_send_namesp_t)(dm_eventtype_t, struct xfs_mount *, | ||
79 | struct xfs_inode *, dm_right_t, | ||
80 | struct xfs_inode *, dm_right_t, | ||
81 | const unsigned char *, const unsigned char *, | ||
82 | mode_t, int, int); | ||
83 | typedef int (*xfs_send_mount_t)(struct xfs_mount *, dm_right_t, | ||
84 | char *, char *); | ||
85 | typedef void (*xfs_send_unmount_t)(struct xfs_mount *, struct xfs_inode *, | ||
86 | dm_right_t, mode_t, int, int); | ||
87 | |||
88 | typedef struct xfs_dmops { | ||
89 | xfs_send_data_t xfs_send_data; | ||
90 | xfs_send_mmap_t xfs_send_mmap; | ||
91 | xfs_send_destroy_t xfs_send_destroy; | ||
92 | xfs_send_namesp_t xfs_send_namesp; | ||
93 | xfs_send_mount_t xfs_send_mount; | ||
94 | xfs_send_unmount_t xfs_send_unmount; | ||
95 | } xfs_dmops_t; | ||
96 | |||
97 | #define XFS_DMAPI_UNMOUNT_FLAGS(mp) \ | ||
98 | (((mp)->m_dmevmask & (1 << DM_EVENT_UNMOUNT)) ? 0 : DM_FLAGS_UNWANTED) | ||
99 | |||
100 | #define XFS_SEND_DATA(mp, ev,ip,off,len,fl,lock) \ | ||
101 | (*(mp)->m_dm_ops->xfs_send_data)(ev,ip,off,len,fl,lock) | ||
102 | #define XFS_SEND_MMAP(mp, vma,fl) \ | ||
103 | (*(mp)->m_dm_ops->xfs_send_mmap)(vma,fl) | ||
104 | #define XFS_SEND_DESTROY(mp, ip,right) \ | ||
105 | (*(mp)->m_dm_ops->xfs_send_destroy)(ip,right) | ||
106 | #define XFS_SEND_NAMESP(mp, ev,b1,r1,b2,r2,n1,n2,mode,rval,fl) \ | ||
107 | (*(mp)->m_dm_ops->xfs_send_namesp)(ev,NULL,b1,r1,b2,r2,n1,n2,mode,rval,fl) | ||
108 | #define XFS_SEND_MOUNT(mp,right,path,name) \ | ||
109 | (*(mp)->m_dm_ops->xfs_send_mount)(mp,right,path,name) | ||
110 | #define XFS_SEND_PREUNMOUNT(mp) \ | ||
111 | do { \ | ||
112 | if (mp->m_flags & XFS_MOUNT_DMAPI) { \ | ||
113 | (*(mp)->m_dm_ops->xfs_send_namesp)(DM_EVENT_PREUNMOUNT, mp, \ | ||
114 | (mp)->m_rootip, DM_RIGHT_NULL, \ | ||
115 | (mp)->m_rootip, DM_RIGHT_NULL, \ | ||
116 | NULL, NULL, 0, 0, XFS_DMAPI_UNMOUNT_FLAGS(mp)); \ | ||
117 | } \ | ||
118 | } while (0) | ||
119 | #define XFS_SEND_UNMOUNT(mp) \ | ||
120 | do { \ | ||
121 | if (mp->m_flags & XFS_MOUNT_DMAPI) { \ | ||
122 | (*(mp)->m_dm_ops->xfs_send_unmount)(mp, (mp)->m_rootip, \ | ||
123 | DM_RIGHT_NULL, 0, 0, XFS_DMAPI_UNMOUNT_FLAGS(mp)); \ | ||
124 | } \ | ||
125 | } while (0) | ||
126 | |||
127 | |||
128 | #ifdef HAVE_PERCPU_SB | 69 | #ifdef HAVE_PERCPU_SB |
129 | 70 | ||
130 | /* | 71 | /* |
@@ -241,8 +182,6 @@ typedef struct xfs_mount { | |||
241 | uint m_chsize; /* size of next field */ | 182 | uint m_chsize; /* size of next field */ |
242 | struct xfs_chash *m_chash; /* fs private inode per-cluster | 183 | struct xfs_chash *m_chash; /* fs private inode per-cluster |
243 | * hash table */ | 184 | * hash table */ |
244 | struct xfs_dmops *m_dm_ops; /* vector of DMI ops */ | ||
245 | struct xfs_qmops *m_qm_ops; /* vector of XQM ops */ | ||
246 | atomic_t m_active_trans; /* number trans frozen */ | 185 | atomic_t m_active_trans; /* number trans frozen */ |
247 | #ifdef HAVE_PERCPU_SB | 186 | #ifdef HAVE_PERCPU_SB |
248 | xfs_icsb_cnts_t __percpu *m_sb_cnts; /* per-cpu superblock counters */ | 187 | xfs_icsb_cnts_t __percpu *m_sb_cnts; /* per-cpu superblock counters */ |
@@ -259,7 +198,7 @@ typedef struct xfs_mount { | |||
259 | wait_queue_head_t m_wait_single_sync_task; | 198 | wait_queue_head_t m_wait_single_sync_task; |
260 | __int64_t m_update_flags; /* sb flags we need to update | 199 | __int64_t m_update_flags; /* sb flags we need to update |
261 | on the next remount,rw */ | 200 | on the next remount,rw */ |
262 | struct list_head m_mplist; /* inode shrinker mount list */ | 201 | struct shrinker m_inode_shrink; /* inode reclaim shrinker */ |
263 | } xfs_mount_t; | 202 | } xfs_mount_t; |
264 | 203 | ||
265 | /* | 204 | /* |
@@ -269,7 +208,6 @@ typedef struct xfs_mount { | |||
269 | must be synchronous except | 208 | must be synchronous except |
270 | for space allocations */ | 209 | for space allocations */ |
271 | #define XFS_MOUNT_DELAYLOG (1ULL << 1) /* delayed logging is enabled */ | 210 | #define XFS_MOUNT_DELAYLOG (1ULL << 1) /* delayed logging is enabled */ |
272 | #define XFS_MOUNT_DMAPI (1ULL << 2) /* dmapi is enabled */ | ||
273 | #define XFS_MOUNT_WAS_CLEAN (1ULL << 3) | 211 | #define XFS_MOUNT_WAS_CLEAN (1ULL << 3) |
274 | #define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem | 212 | #define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem |
275 | operations, typically for | 213 | operations, typically for |
@@ -282,8 +220,6 @@ typedef struct xfs_mount { | |||
282 | #define XFS_MOUNT_GRPID (1ULL << 9) /* group-ID assigned from directory */ | 220 | #define XFS_MOUNT_GRPID (1ULL << 9) /* group-ID assigned from directory */ |
283 | #define XFS_MOUNT_NORECOVERY (1ULL << 10) /* no recovery - dirty fs */ | 221 | #define XFS_MOUNT_NORECOVERY (1ULL << 10) /* no recovery - dirty fs */ |
284 | #define XFS_MOUNT_DFLT_IOSIZE (1ULL << 12) /* set default i/o size */ | 222 | #define XFS_MOUNT_DFLT_IOSIZE (1ULL << 12) /* set default i/o size */ |
285 | #define XFS_MOUNT_OSYNCISOSYNC (1ULL << 13) /* o_sync is REALLY o_sync */ | ||
286 | /* osyncisdsync is now default*/ | ||
287 | #define XFS_MOUNT_32BITINODES (1ULL << 14) /* do not create inodes above | 223 | #define XFS_MOUNT_32BITINODES (1ULL << 14) /* do not create inodes above |
288 | * 32 bits in size */ | 224 | * 32 bits in size */ |
289 | #define XFS_MOUNT_SMALL_INUMS (1ULL << 15) /* users wants 32bit inodes */ | 225 | #define XFS_MOUNT_SMALL_INUMS (1ULL << 15) /* users wants 32bit inodes */ |
@@ -440,11 +376,6 @@ extern int xfs_sb_validate_fsb_count(struct xfs_sb *, __uint64_t); | |||
440 | 376 | ||
441 | extern int xfs_dev_is_read_only(struct xfs_mount *, char *); | 377 | extern int xfs_dev_is_read_only(struct xfs_mount *, char *); |
442 | 378 | ||
443 | extern int xfs_dmops_get(struct xfs_mount *); | ||
444 | extern void xfs_dmops_put(struct xfs_mount *); | ||
445 | |||
446 | extern struct xfs_dmops xfs_dmcore_xfs; | ||
447 | |||
448 | #endif /* __KERNEL__ */ | 379 | #endif /* __KERNEL__ */ |
449 | 380 | ||
450 | extern void xfs_mod_sb(struct xfs_trans *, __int64_t); | 381 | extern void xfs_mod_sb(struct xfs_trans *, __int64_t); |
diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c index fc1cda23b817..8fca957200df 100644 --- a/fs/xfs/xfs_rename.c +++ b/fs/xfs/xfs_rename.c | |||
@@ -24,12 +24,9 @@ | |||
24 | #include "xfs_sb.h" | 24 | #include "xfs_sb.h" |
25 | #include "xfs_ag.h" | 25 | #include "xfs_ag.h" |
26 | #include "xfs_dir2.h" | 26 | #include "xfs_dir2.h" |
27 | #include "xfs_dmapi.h" | ||
28 | #include "xfs_mount.h" | 27 | #include "xfs_mount.h" |
29 | #include "xfs_da_btree.h" | 28 | #include "xfs_da_btree.h" |
30 | #include "xfs_bmap_btree.h" | 29 | #include "xfs_bmap_btree.h" |
31 | #include "xfs_dir2_sf.h" | ||
32 | #include "xfs_attr_sf.h" | ||
33 | #include "xfs_dinode.h" | 30 | #include "xfs_dinode.h" |
34 | #include "xfs_inode.h" | 31 | #include "xfs_inode.h" |
35 | #include "xfs_inode_item.h" | 32 | #include "xfs_inode_item.h" |
@@ -116,20 +113,7 @@ xfs_rename( | |||
116 | int spaceres; | 113 | int spaceres; |
117 | int num_inodes; | 114 | int num_inodes; |
118 | 115 | ||
119 | xfs_itrace_entry(src_dp); | 116 | trace_xfs_rename(src_dp, target_dp, src_name, target_name); |
120 | xfs_itrace_entry(target_dp); | ||
121 | |||
122 | if (DM_EVENT_ENABLED(src_dp, DM_EVENT_RENAME) || | ||
123 | DM_EVENT_ENABLED(target_dp, DM_EVENT_RENAME)) { | ||
124 | error = XFS_SEND_NAMESP(mp, DM_EVENT_RENAME, | ||
125 | src_dp, DM_RIGHT_NULL, | ||
126 | target_dp, DM_RIGHT_NULL, | ||
127 | src_name->name, target_name->name, | ||
128 | 0, 0, 0); | ||
129 | if (error) | ||
130 | return error; | ||
131 | } | ||
132 | /* Return through std_return after this point. */ | ||
133 | 117 | ||
134 | new_parent = (src_dp != target_dp); | 118 | new_parent = (src_dp != target_dp); |
135 | src_is_directory = ((src_ip->i_d.di_mode & S_IFMT) == S_IFDIR); | 119 | src_is_directory = ((src_ip->i_d.di_mode & S_IFMT) == S_IFDIR); |
@@ -184,26 +168,14 @@ xfs_rename( | |||
184 | /* | 168 | /* |
185 | * Join all the inodes to the transaction. From this point on, | 169 | * Join all the inodes to the transaction. From this point on, |
186 | * we can rely on either trans_commit or trans_cancel to unlock | 170 | * we can rely on either trans_commit or trans_cancel to unlock |
187 | * them. Note that we need to add a vnode reference to the | 171 | * them. |
188 | * directories since trans_commit & trans_cancel will decrement | ||
189 | * them when they unlock the inodes. Also, we need to be careful | ||
190 | * not to add an inode to the transaction more than once. | ||
191 | */ | 172 | */ |
192 | IHOLD(src_dp); | 173 | xfs_trans_ijoin_ref(tp, src_dp, XFS_ILOCK_EXCL); |
193 | xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL); | 174 | if (new_parent) |
194 | 175 | xfs_trans_ijoin_ref(tp, target_dp, XFS_ILOCK_EXCL); | |
195 | if (new_parent) { | 176 | xfs_trans_ijoin_ref(tp, src_ip, XFS_ILOCK_EXCL); |
196 | IHOLD(target_dp); | 177 | if (target_ip) |
197 | xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL); | 178 | xfs_trans_ijoin_ref(tp, target_ip, XFS_ILOCK_EXCL); |
198 | } | ||
199 | |||
200 | IHOLD(src_ip); | ||
201 | xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL); | ||
202 | |||
203 | if (target_ip) { | ||
204 | IHOLD(target_ip); | ||
205 | xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL); | ||
206 | } | ||
207 | 179 | ||
208 | /* | 180 | /* |
209 | * If we are using project inheritance, we only allow renames | 181 | * If we are using project inheritance, we only allow renames |
@@ -369,26 +341,13 @@ xfs_rename( | |||
369 | * trans_commit will unlock src_ip, target_ip & decrement | 341 | * trans_commit will unlock src_ip, target_ip & decrement |
370 | * the vnode references. | 342 | * the vnode references. |
371 | */ | 343 | */ |
372 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); | 344 | return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); |
373 | |||
374 | /* Fall through to std_return with error = 0 or errno from | ||
375 | * xfs_trans_commit */ | ||
376 | std_return: | ||
377 | if (DM_EVENT_ENABLED(src_dp, DM_EVENT_POSTRENAME) || | ||
378 | DM_EVENT_ENABLED(target_dp, DM_EVENT_POSTRENAME)) { | ||
379 | (void) XFS_SEND_NAMESP (mp, DM_EVENT_POSTRENAME, | ||
380 | src_dp, DM_RIGHT_NULL, | ||
381 | target_dp, DM_RIGHT_NULL, | ||
382 | src_name->name, target_name->name, | ||
383 | 0, error, 0); | ||
384 | } | ||
385 | return error; | ||
386 | 345 | ||
387 | abort_return: | 346 | abort_return: |
388 | cancel_flags |= XFS_TRANS_ABORT; | 347 | cancel_flags |= XFS_TRANS_ABORT; |
389 | /* FALLTHROUGH */ | ||
390 | error_return: | 348 | error_return: |
391 | xfs_bmap_cancel(&free_list); | 349 | xfs_bmap_cancel(&free_list); |
392 | xfs_trans_cancel(tp, cancel_flags); | 350 | xfs_trans_cancel(tp, cancel_flags); |
393 | goto std_return; | 351 | std_return: |
352 | return error; | ||
394 | } | 353 | } |
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c index 16445518506d..891260fea11e 100644 --- a/fs/xfs/xfs_rtalloc.c +++ b/fs/xfs/xfs_rtalloc.c | |||
@@ -25,17 +25,10 @@ | |||
25 | #include "xfs_sb.h" | 25 | #include "xfs_sb.h" |
26 | #include "xfs_ag.h" | 26 | #include "xfs_ag.h" |
27 | #include "xfs_dir2.h" | 27 | #include "xfs_dir2.h" |
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_mount.h" | 28 | #include "xfs_mount.h" |
30 | #include "xfs_bmap_btree.h" | 29 | #include "xfs_bmap_btree.h" |
31 | #include "xfs_alloc_btree.h" | ||
32 | #include "xfs_ialloc_btree.h" | ||
33 | #include "xfs_dir2_sf.h" | ||
34 | #include "xfs_attr_sf.h" | ||
35 | #include "xfs_dinode.h" | 30 | #include "xfs_dinode.h" |
36 | #include "xfs_inode.h" | 31 | #include "xfs_inode.h" |
37 | #include "xfs_btree.h" | ||
38 | #include "xfs_ialloc.h" | ||
39 | #include "xfs_alloc.h" | 32 | #include "xfs_alloc.h" |
40 | #include "xfs_bmap.h" | 33 | #include "xfs_bmap.h" |
41 | #include "xfs_rtalloc.h" | 34 | #include "xfs_rtalloc.h" |
@@ -129,7 +122,7 @@ xfs_growfs_rt_alloc( | |||
129 | cancelflags |= XFS_TRANS_ABORT; | 122 | cancelflags |= XFS_TRANS_ABORT; |
130 | error = xfs_bmapi(tp, ip, oblocks, nblocks - oblocks, | 123 | error = xfs_bmapi(tp, ip, oblocks, nblocks - oblocks, |
131 | XFS_BMAPI_WRITE | XFS_BMAPI_METADATA, &firstblock, | 124 | XFS_BMAPI_WRITE | XFS_BMAPI_METADATA, &firstblock, |
132 | resblks, &map, &nmap, &flist, NULL); | 125 | resblks, &map, &nmap, &flist); |
133 | if (!error && nmap < 1) | 126 | if (!error && nmap < 1) |
134 | error = XFS_ERROR(ENOSPC); | 127 | error = XFS_ERROR(ENOSPC); |
135 | if (error) | 128 | if (error) |
@@ -2277,12 +2270,12 @@ xfs_rtmount_inodes( | |||
2277 | sbp = &mp->m_sb; | 2270 | sbp = &mp->m_sb; |
2278 | if (sbp->sb_rbmino == NULLFSINO) | 2271 | if (sbp->sb_rbmino == NULLFSINO) |
2279 | return 0; | 2272 | return 0; |
2280 | error = xfs_iget(mp, NULL, sbp->sb_rbmino, 0, 0, &mp->m_rbmip, 0); | 2273 | error = xfs_iget(mp, NULL, sbp->sb_rbmino, 0, 0, &mp->m_rbmip); |
2281 | if (error) | 2274 | if (error) |
2282 | return error; | 2275 | return error; |
2283 | ASSERT(mp->m_rbmip != NULL); | 2276 | ASSERT(mp->m_rbmip != NULL); |
2284 | ASSERT(sbp->sb_rsumino != NULLFSINO); | 2277 | ASSERT(sbp->sb_rsumino != NULLFSINO); |
2285 | error = xfs_iget(mp, NULL, sbp->sb_rsumino, 0, 0, &mp->m_rsumip, 0); | 2278 | error = xfs_iget(mp, NULL, sbp->sb_rsumino, 0, 0, &mp->m_rsumip); |
2286 | if (error) { | 2279 | if (error) { |
2287 | IRELE(mp->m_rbmip); | 2280 | IRELE(mp->m_rbmip); |
2288 | return error; | 2281 | return error; |
diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c index e336742a58a4..56861d5daaef 100644 --- a/fs/xfs/xfs_rw.c +++ b/fs/xfs/xfs_rw.c | |||
@@ -24,27 +24,12 @@ | |||
24 | #include "xfs_trans.h" | 24 | #include "xfs_trans.h" |
25 | #include "xfs_sb.h" | 25 | #include "xfs_sb.h" |
26 | #include "xfs_ag.h" | 26 | #include "xfs_ag.h" |
27 | #include "xfs_dir2.h" | ||
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_mount.h" | 27 | #include "xfs_mount.h" |
30 | #include "xfs_bmap_btree.h" | 28 | #include "xfs_bmap_btree.h" |
31 | #include "xfs_alloc_btree.h" | ||
32 | #include "xfs_ialloc_btree.h" | ||
33 | #include "xfs_dir2_sf.h" | ||
34 | #include "xfs_attr_sf.h" | ||
35 | #include "xfs_dinode.h" | 29 | #include "xfs_dinode.h" |
36 | #include "xfs_inode.h" | 30 | #include "xfs_inode.h" |
37 | #include "xfs_inode_item.h" | ||
38 | #include "xfs_itable.h" | ||
39 | #include "xfs_btree.h" | ||
40 | #include "xfs_alloc.h" | ||
41 | #include "xfs_ialloc.h" | ||
42 | #include "xfs_attr.h" | ||
43 | #include "xfs_bmap.h" | ||
44 | #include "xfs_error.h" | 31 | #include "xfs_error.h" |
45 | #include "xfs_buf_item.h" | ||
46 | #include "xfs_rw.h" | 32 | #include "xfs_rw.h" |
47 | #include "xfs_trace.h" | ||
48 | 33 | ||
49 | /* | 34 | /* |
50 | * Force a shutdown of the filesystem instantly while keeping | 35 | * Force a shutdown of the filesystem instantly while keeping |
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index 28547dfce037..fdca7416c754 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. | 2 | * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. |
3 | * Copyright (C) 2010 Red Hat, Inc. | ||
3 | * All Rights Reserved. | 4 | * All Rights Reserved. |
4 | * | 5 | * |
5 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
@@ -24,16 +25,12 @@ | |||
24 | #include "xfs_trans.h" | 25 | #include "xfs_trans.h" |
25 | #include "xfs_sb.h" | 26 | #include "xfs_sb.h" |
26 | #include "xfs_ag.h" | 27 | #include "xfs_ag.h" |
27 | #include "xfs_dir2.h" | ||
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_mount.h" | 28 | #include "xfs_mount.h" |
30 | #include "xfs_error.h" | 29 | #include "xfs_error.h" |
31 | #include "xfs_da_btree.h" | 30 | #include "xfs_da_btree.h" |
32 | #include "xfs_bmap_btree.h" | 31 | #include "xfs_bmap_btree.h" |
33 | #include "xfs_alloc_btree.h" | 32 | #include "xfs_alloc_btree.h" |
34 | #include "xfs_ialloc_btree.h" | 33 | #include "xfs_ialloc_btree.h" |
35 | #include "xfs_dir2_sf.h" | ||
36 | #include "xfs_attr_sf.h" | ||
37 | #include "xfs_dinode.h" | 34 | #include "xfs_dinode.h" |
38 | #include "xfs_inode.h" | 35 | #include "xfs_inode.h" |
39 | #include "xfs_btree.h" | 36 | #include "xfs_btree.h" |
@@ -47,6 +44,7 @@ | |||
47 | #include "xfs_trace.h" | 44 | #include "xfs_trace.h" |
48 | 45 | ||
49 | kmem_zone_t *xfs_trans_zone; | 46 | kmem_zone_t *xfs_trans_zone; |
47 | kmem_zone_t *xfs_log_item_desc_zone; | ||
50 | 48 | ||
51 | 49 | ||
52 | /* | 50 | /* |
@@ -597,8 +595,7 @@ _xfs_trans_alloc( | |||
597 | tp->t_magic = XFS_TRANS_MAGIC; | 595 | tp->t_magic = XFS_TRANS_MAGIC; |
598 | tp->t_type = type; | 596 | tp->t_type = type; |
599 | tp->t_mountp = mp; | 597 | tp->t_mountp = mp; |
600 | tp->t_items_free = XFS_LIC_NUM_SLOTS; | 598 | INIT_LIST_HEAD(&tp->t_items); |
601 | xfs_lic_init(&(tp->t_items)); | ||
602 | INIT_LIST_HEAD(&tp->t_busy); | 599 | INIT_LIST_HEAD(&tp->t_busy); |
603 | return tp; | 600 | return tp; |
604 | } | 601 | } |
@@ -643,8 +640,7 @@ xfs_trans_dup( | |||
643 | ntp->t_magic = XFS_TRANS_MAGIC; | 640 | ntp->t_magic = XFS_TRANS_MAGIC; |
644 | ntp->t_type = tp->t_type; | 641 | ntp->t_type = tp->t_type; |
645 | ntp->t_mountp = tp->t_mountp; | 642 | ntp->t_mountp = tp->t_mountp; |
646 | ntp->t_items_free = XFS_LIC_NUM_SLOTS; | 643 | INIT_LIST_HEAD(&ntp->t_items); |
647 | xfs_lic_init(&(ntp->t_items)); | ||
648 | INIT_LIST_HEAD(&ntp->t_busy); | 644 | INIT_LIST_HEAD(&ntp->t_busy); |
649 | 645 | ||
650 | ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); | 646 | ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); |
@@ -1124,6 +1120,108 @@ xfs_trans_unreserve_and_mod_sb( | |||
1124 | } | 1120 | } |
1125 | 1121 | ||
1126 | /* | 1122 | /* |
1123 | * Add the given log item to the transaction's list of log items. | ||
1124 | * | ||
1125 | * The log item will now point to its new descriptor with its li_desc field. | ||
1126 | */ | ||
1127 | void | ||
1128 | xfs_trans_add_item( | ||
1129 | struct xfs_trans *tp, | ||
1130 | struct xfs_log_item *lip) | ||
1131 | { | ||
1132 | struct xfs_log_item_desc *lidp; | ||
1133 | |||
1134 | ASSERT(lip->li_mountp = tp->t_mountp); | ||
1135 | ASSERT(lip->li_ailp = tp->t_mountp->m_ail); | ||
1136 | |||
1137 | lidp = kmem_zone_zalloc(xfs_log_item_desc_zone, KM_SLEEP | KM_NOFS); | ||
1138 | |||
1139 | lidp->lid_item = lip; | ||
1140 | lidp->lid_flags = 0; | ||
1141 | lidp->lid_size = 0; | ||
1142 | list_add_tail(&lidp->lid_trans, &tp->t_items); | ||
1143 | |||
1144 | lip->li_desc = lidp; | ||
1145 | } | ||
1146 | |||
1147 | STATIC void | ||
1148 | xfs_trans_free_item_desc( | ||
1149 | struct xfs_log_item_desc *lidp) | ||
1150 | { | ||
1151 | list_del_init(&lidp->lid_trans); | ||
1152 | kmem_zone_free(xfs_log_item_desc_zone, lidp); | ||
1153 | } | ||
1154 | |||
1155 | /* | ||
1156 | * Unlink and free the given descriptor. | ||
1157 | */ | ||
1158 | void | ||
1159 | xfs_trans_del_item( | ||
1160 | struct xfs_log_item *lip) | ||
1161 | { | ||
1162 | xfs_trans_free_item_desc(lip->li_desc); | ||
1163 | lip->li_desc = NULL; | ||
1164 | } | ||
1165 | |||
1166 | /* | ||
1167 | * Unlock all of the items of a transaction and free all the descriptors | ||
1168 | * of that transaction. | ||
1169 | */ | ||
1170 | STATIC void | ||
1171 | xfs_trans_free_items( | ||
1172 | struct xfs_trans *tp, | ||
1173 | xfs_lsn_t commit_lsn, | ||
1174 | int flags) | ||
1175 | { | ||
1176 | struct xfs_log_item_desc *lidp, *next; | ||
1177 | |||
1178 | list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) { | ||
1179 | struct xfs_log_item *lip = lidp->lid_item; | ||
1180 | |||
1181 | lip->li_desc = NULL; | ||
1182 | |||
1183 | if (commit_lsn != NULLCOMMITLSN) | ||
1184 | IOP_COMMITTING(lip, commit_lsn); | ||
1185 | if (flags & XFS_TRANS_ABORT) | ||
1186 | lip->li_flags |= XFS_LI_ABORTED; | ||
1187 | IOP_UNLOCK(lip); | ||
1188 | |||
1189 | xfs_trans_free_item_desc(lidp); | ||
1190 | } | ||
1191 | } | ||
1192 | |||
1193 | /* | ||
1194 | * Unlock the items associated with a transaction. | ||
1195 | * | ||
1196 | * Items which were not logged should be freed. Those which were logged must | ||
1197 | * still be tracked so they can be unpinned when the transaction commits. | ||
1198 | */ | ||
1199 | STATIC void | ||
1200 | xfs_trans_unlock_items( | ||
1201 | struct xfs_trans *tp, | ||
1202 | xfs_lsn_t commit_lsn) | ||
1203 | { | ||
1204 | struct xfs_log_item_desc *lidp, *next; | ||
1205 | |||
1206 | list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) { | ||
1207 | struct xfs_log_item *lip = lidp->lid_item; | ||
1208 | |||
1209 | lip->li_desc = NULL; | ||
1210 | |||
1211 | if (commit_lsn != NULLCOMMITLSN) | ||
1212 | IOP_COMMITTING(lip, commit_lsn); | ||
1213 | IOP_UNLOCK(lip); | ||
1214 | |||
1215 | /* | ||
1216 | * Free the descriptor if the item is not dirty | ||
1217 | * within this transaction. | ||
1218 | */ | ||
1219 | if (!(lidp->lid_flags & XFS_LID_DIRTY)) | ||
1220 | xfs_trans_free_item_desc(lidp); | ||
1221 | } | ||
1222 | } | ||
1223 | |||
1224 | /* | ||
1127 | * Total up the number of log iovecs needed to commit this | 1225 | * Total up the number of log iovecs needed to commit this |
1128 | * transaction. The transaction itself needs one for the | 1226 | * transaction. The transaction itself needs one for the |
1129 | * transaction header. Ask each dirty item in turn how many | 1227 | * transaction header. Ask each dirty item in turn how many |
@@ -1134,30 +1232,27 @@ xfs_trans_count_vecs( | |||
1134 | struct xfs_trans *tp) | 1232 | struct xfs_trans *tp) |
1135 | { | 1233 | { |
1136 | int nvecs; | 1234 | int nvecs; |
1137 | xfs_log_item_desc_t *lidp; | 1235 | struct xfs_log_item_desc *lidp; |
1138 | 1236 | ||
1139 | nvecs = 1; | 1237 | nvecs = 1; |
1140 | lidp = xfs_trans_first_item(tp); | ||
1141 | ASSERT(lidp != NULL); | ||
1142 | 1238 | ||
1143 | /* In the non-debug case we need to start bailing out if we | 1239 | /* In the non-debug case we need to start bailing out if we |
1144 | * didn't find a log_item here, return zero and let trans_commit | 1240 | * didn't find a log_item here, return zero and let trans_commit |
1145 | * deal with it. | 1241 | * deal with it. |
1146 | */ | 1242 | */ |
1147 | if (lidp == NULL) | 1243 | if (list_empty(&tp->t_items)) { |
1244 | ASSERT(0); | ||
1148 | return 0; | 1245 | return 0; |
1246 | } | ||
1149 | 1247 | ||
1150 | while (lidp != NULL) { | 1248 | list_for_each_entry(lidp, &tp->t_items, lid_trans) { |
1151 | /* | 1249 | /* |
1152 | * Skip items which aren't dirty in this transaction. | 1250 | * Skip items which aren't dirty in this transaction. |
1153 | */ | 1251 | */ |
1154 | if (!(lidp->lid_flags & XFS_LID_DIRTY)) { | 1252 | if (!(lidp->lid_flags & XFS_LID_DIRTY)) |
1155 | lidp = xfs_trans_next_item(tp, lidp); | ||
1156 | continue; | 1253 | continue; |
1157 | } | ||
1158 | lidp->lid_size = IOP_SIZE(lidp->lid_item); | 1254 | lidp->lid_size = IOP_SIZE(lidp->lid_item); |
1159 | nvecs += lidp->lid_size; | 1255 | nvecs += lidp->lid_size; |
1160 | lidp = xfs_trans_next_item(tp, lidp); | ||
1161 | } | 1256 | } |
1162 | 1257 | ||
1163 | return nvecs; | 1258 | return nvecs; |
@@ -1177,7 +1272,7 @@ xfs_trans_fill_vecs( | |||
1177 | struct xfs_trans *tp, | 1272 | struct xfs_trans *tp, |
1178 | struct xfs_log_iovec *log_vector) | 1273 | struct xfs_log_iovec *log_vector) |
1179 | { | 1274 | { |
1180 | xfs_log_item_desc_t *lidp; | 1275 | struct xfs_log_item_desc *lidp; |
1181 | struct xfs_log_iovec *vecp; | 1276 | struct xfs_log_iovec *vecp; |
1182 | uint nitems; | 1277 | uint nitems; |
1183 | 1278 | ||
@@ -1188,14 +1283,11 @@ xfs_trans_fill_vecs( | |||
1188 | vecp = log_vector + 1; | 1283 | vecp = log_vector + 1; |
1189 | 1284 | ||
1190 | nitems = 0; | 1285 | nitems = 0; |
1191 | lidp = xfs_trans_first_item(tp); | 1286 | ASSERT(!list_empty(&tp->t_items)); |
1192 | ASSERT(lidp); | 1287 | list_for_each_entry(lidp, &tp->t_items, lid_trans) { |
1193 | while (lidp) { | ||
1194 | /* Skip items which aren't dirty in this transaction. */ | 1288 | /* Skip items which aren't dirty in this transaction. */ |
1195 | if (!(lidp->lid_flags & XFS_LID_DIRTY)) { | 1289 | if (!(lidp->lid_flags & XFS_LID_DIRTY)) |
1196 | lidp = xfs_trans_next_item(tp, lidp); | ||
1197 | continue; | 1290 | continue; |
1198 | } | ||
1199 | 1291 | ||
1200 | /* | 1292 | /* |
1201 | * The item may be marked dirty but not log anything. This can | 1293 | * The item may be marked dirty but not log anything. This can |
@@ -1206,7 +1298,6 @@ xfs_trans_fill_vecs( | |||
1206 | IOP_FORMAT(lidp->lid_item, vecp); | 1298 | IOP_FORMAT(lidp->lid_item, vecp); |
1207 | vecp += lidp->lid_size; | 1299 | vecp += lidp->lid_size; |
1208 | IOP_PIN(lidp->lid_item); | 1300 | IOP_PIN(lidp->lid_item); |
1209 | lidp = xfs_trans_next_item(tp, lidp); | ||
1210 | } | 1301 | } |
1211 | 1302 | ||
1212 | /* | 1303 | /* |
@@ -1284,7 +1375,7 @@ xfs_trans_item_committed( | |||
1284 | * log item flags, if anyone else stales the buffer we do not want to | 1375 | * log item flags, if anyone else stales the buffer we do not want to |
1285 | * pay any attention to it. | 1376 | * pay any attention to it. |
1286 | */ | 1377 | */ |
1287 | IOP_UNPIN(lip); | 1378 | IOP_UNPIN(lip, 0); |
1288 | } | 1379 | } |
1289 | 1380 | ||
1290 | /* | 1381 | /* |
@@ -1301,24 +1392,15 @@ xfs_trans_committed( | |||
1301 | struct xfs_trans *tp, | 1392 | struct xfs_trans *tp, |
1302 | int abortflag) | 1393 | int abortflag) |
1303 | { | 1394 | { |
1304 | xfs_log_item_desc_t *lidp; | 1395 | struct xfs_log_item_desc *lidp, *next; |
1305 | xfs_log_item_chunk_t *licp; | ||
1306 | xfs_log_item_chunk_t *next_licp; | ||
1307 | 1396 | ||
1308 | /* Call the transaction's completion callback if there is one. */ | 1397 | /* Call the transaction's completion callback if there is one. */ |
1309 | if (tp->t_callback != NULL) | 1398 | if (tp->t_callback != NULL) |
1310 | tp->t_callback(tp, tp->t_callarg); | 1399 | tp->t_callback(tp, tp->t_callarg); |
1311 | 1400 | ||
1312 | for (lidp = xfs_trans_first_item(tp); | 1401 | list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) { |
1313 | lidp != NULL; | ||
1314 | lidp = xfs_trans_next_item(tp, lidp)) { | ||
1315 | xfs_trans_item_committed(lidp->lid_item, tp->t_lsn, abortflag); | 1402 | xfs_trans_item_committed(lidp->lid_item, tp->t_lsn, abortflag); |
1316 | } | 1403 | xfs_trans_free_item_desc(lidp); |
1317 | |||
1318 | /* free the item chunks, ignoring the embedded chunk */ | ||
1319 | for (licp = tp->t_items.lic_next; licp != NULL; licp = next_licp) { | ||
1320 | next_licp = licp->lic_next; | ||
1321 | kmem_free(licp); | ||
1322 | } | 1404 | } |
1323 | 1405 | ||
1324 | xfs_trans_free(tp); | 1406 | xfs_trans_free(tp); |
@@ -1333,16 +1415,14 @@ xfs_trans_uncommit( | |||
1333 | struct xfs_trans *tp, | 1415 | struct xfs_trans *tp, |
1334 | uint flags) | 1416 | uint flags) |
1335 | { | 1417 | { |
1336 | xfs_log_item_desc_t *lidp; | 1418 | struct xfs_log_item_desc *lidp; |
1337 | 1419 | ||
1338 | for (lidp = xfs_trans_first_item(tp); | 1420 | list_for_each_entry(lidp, &tp->t_items, lid_trans) { |
1339 | lidp != NULL; | ||
1340 | lidp = xfs_trans_next_item(tp, lidp)) { | ||
1341 | /* | 1421 | /* |
1342 | * Unpin all but those that aren't dirty. | 1422 | * Unpin all but those that aren't dirty. |
1343 | */ | 1423 | */ |
1344 | if (lidp->lid_flags & XFS_LID_DIRTY) | 1424 | if (lidp->lid_flags & XFS_LID_DIRTY) |
1345 | IOP_UNPIN_REMOVE(lidp->lid_item, tp); | 1425 | IOP_UNPIN(lidp->lid_item, 1); |
1346 | } | 1426 | } |
1347 | 1427 | ||
1348 | xfs_trans_unreserve_and_mod_sb(tp); | 1428 | xfs_trans_unreserve_and_mod_sb(tp); |
@@ -1508,33 +1588,28 @@ STATIC struct xfs_log_vec * | |||
1508 | xfs_trans_alloc_log_vecs( | 1588 | xfs_trans_alloc_log_vecs( |
1509 | xfs_trans_t *tp) | 1589 | xfs_trans_t *tp) |
1510 | { | 1590 | { |
1511 | xfs_log_item_desc_t *lidp; | 1591 | struct xfs_log_item_desc *lidp; |
1512 | struct xfs_log_vec *lv = NULL; | 1592 | struct xfs_log_vec *lv = NULL; |
1513 | struct xfs_log_vec *ret_lv = NULL; | 1593 | struct xfs_log_vec *ret_lv = NULL; |
1514 | 1594 | ||
1515 | lidp = xfs_trans_first_item(tp); | ||
1516 | 1595 | ||
1517 | /* Bail out if we didn't find a log item. */ | 1596 | /* Bail out if we didn't find a log item. */ |
1518 | if (!lidp) { | 1597 | if (list_empty(&tp->t_items)) { |
1519 | ASSERT(0); | 1598 | ASSERT(0); |
1520 | return NULL; | 1599 | return NULL; |
1521 | } | 1600 | } |
1522 | 1601 | ||
1523 | while (lidp != NULL) { | 1602 | list_for_each_entry(lidp, &tp->t_items, lid_trans) { |
1524 | struct xfs_log_vec *new_lv; | 1603 | struct xfs_log_vec *new_lv; |
1525 | 1604 | ||
1526 | /* Skip items which aren't dirty in this transaction. */ | 1605 | /* Skip items which aren't dirty in this transaction. */ |
1527 | if (!(lidp->lid_flags & XFS_LID_DIRTY)) { | 1606 | if (!(lidp->lid_flags & XFS_LID_DIRTY)) |
1528 | lidp = xfs_trans_next_item(tp, lidp); | ||
1529 | continue; | 1607 | continue; |
1530 | } | ||
1531 | 1608 | ||
1532 | /* Skip items that do not have any vectors for writing */ | 1609 | /* Skip items that do not have any vectors for writing */ |
1533 | lidp->lid_size = IOP_SIZE(lidp->lid_item); | 1610 | lidp->lid_size = IOP_SIZE(lidp->lid_item); |
1534 | if (!lidp->lid_size) { | 1611 | if (!lidp->lid_size) |
1535 | lidp = xfs_trans_next_item(tp, lidp); | ||
1536 | continue; | 1612 | continue; |
1537 | } | ||
1538 | 1613 | ||
1539 | new_lv = kmem_zalloc(sizeof(*new_lv) + | 1614 | new_lv = kmem_zalloc(sizeof(*new_lv) + |
1540 | lidp->lid_size * sizeof(struct xfs_log_iovec), | 1615 | lidp->lid_size * sizeof(struct xfs_log_iovec), |
@@ -1549,7 +1624,6 @@ xfs_trans_alloc_log_vecs( | |||
1549 | else | 1624 | else |
1550 | lv->lv_next = new_lv; | 1625 | lv->lv_next = new_lv; |
1551 | lv = new_lv; | 1626 | lv = new_lv; |
1552 | lidp = xfs_trans_next_item(tp, lidp); | ||
1553 | } | 1627 | } |
1554 | 1628 | ||
1555 | return ret_lv; | 1629 | return ret_lv; |
@@ -1708,12 +1782,6 @@ xfs_trans_cancel( | |||
1708 | int flags) | 1782 | int flags) |
1709 | { | 1783 | { |
1710 | int log_flags; | 1784 | int log_flags; |
1711 | #ifdef DEBUG | ||
1712 | xfs_log_item_chunk_t *licp; | ||
1713 | xfs_log_item_desc_t *lidp; | ||
1714 | xfs_log_item_t *lip; | ||
1715 | int i; | ||
1716 | #endif | ||
1717 | xfs_mount_t *mp = tp->t_mountp; | 1785 | xfs_mount_t *mp = tp->t_mountp; |
1718 | 1786 | ||
1719 | /* | 1787 | /* |
@@ -1732,21 +1800,11 @@ xfs_trans_cancel( | |||
1732 | xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); | 1800 | xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); |
1733 | } | 1801 | } |
1734 | #ifdef DEBUG | 1802 | #ifdef DEBUG |
1735 | if (!(flags & XFS_TRANS_ABORT)) { | 1803 | if (!(flags & XFS_TRANS_ABORT) && !XFS_FORCED_SHUTDOWN(mp)) { |
1736 | licp = &(tp->t_items); | 1804 | struct xfs_log_item_desc *lidp; |
1737 | while (licp != NULL) { | 1805 | |
1738 | lidp = licp->lic_descs; | 1806 | list_for_each_entry(lidp, &tp->t_items, lid_trans) |
1739 | for (i = 0; i < licp->lic_unused; i++, lidp++) { | 1807 | ASSERT(!(lidp->lid_item->li_type == XFS_LI_EFD)); |
1740 | if (xfs_lic_isfree(licp, i)) { | ||
1741 | continue; | ||
1742 | } | ||
1743 | |||
1744 | lip = lidp->lid_item; | ||
1745 | if (!XFS_FORCED_SHUTDOWN(mp)) | ||
1746 | ASSERT(!(lip->li_type == XFS_LI_EFD)); | ||
1747 | } | ||
1748 | licp = licp->lic_next; | ||
1749 | } | ||
1750 | } | 1808 | } |
1751 | #endif | 1809 | #endif |
1752 | xfs_trans_unreserve_and_mod_sb(tp); | 1810 | xfs_trans_unreserve_and_mod_sb(tp); |
@@ -1834,7 +1892,6 @@ xfs_trans_roll( | |||
1834 | if (error) | 1892 | if (error) |
1835 | return error; | 1893 | return error; |
1836 | 1894 | ||
1837 | xfs_trans_ijoin(trans, dp, XFS_ILOCK_EXCL); | 1895 | xfs_trans_ijoin(trans, dp); |
1838 | xfs_trans_ihold(trans, dp); | ||
1839 | return 0; | 1896 | return 0; |
1840 | } | 1897 | } |
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index e639e8e9a2a9..c13c0f97b494 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h | |||
@@ -161,105 +161,14 @@ typedef struct xfs_trans_header { | |||
161 | * the amount of space needed to log the item it describes | 161 | * the amount of space needed to log the item it describes |
162 | * once we get to commit processing (see xfs_trans_commit()). | 162 | * once we get to commit processing (see xfs_trans_commit()). |
163 | */ | 163 | */ |
164 | typedef struct xfs_log_item_desc { | 164 | struct xfs_log_item_desc { |
165 | struct xfs_log_item *lid_item; | 165 | struct xfs_log_item *lid_item; |
166 | ushort lid_size; | 166 | ushort lid_size; |
167 | unsigned char lid_flags; | 167 | unsigned char lid_flags; |
168 | unsigned char lid_index; | 168 | struct list_head lid_trans; |
169 | } xfs_log_item_desc_t; | 169 | }; |
170 | 170 | ||
171 | #define XFS_LID_DIRTY 0x1 | 171 | #define XFS_LID_DIRTY 0x1 |
172 | #define XFS_LID_PINNED 0x2 | ||
173 | |||
174 | /* | ||
175 | * This structure is used to maintain a chunk list of log_item_desc | ||
176 | * structures. The free field is a bitmask indicating which descriptors | ||
177 | * in this chunk's array are free. The unused field is the first value | ||
178 | * not used since this chunk was allocated. | ||
179 | */ | ||
180 | #define XFS_LIC_NUM_SLOTS 15 | ||
181 | typedef struct xfs_log_item_chunk { | ||
182 | struct xfs_log_item_chunk *lic_next; | ||
183 | ushort lic_free; | ||
184 | ushort lic_unused; | ||
185 | xfs_log_item_desc_t lic_descs[XFS_LIC_NUM_SLOTS]; | ||
186 | } xfs_log_item_chunk_t; | ||
187 | |||
188 | #define XFS_LIC_MAX_SLOT (XFS_LIC_NUM_SLOTS - 1) | ||
189 | #define XFS_LIC_FREEMASK ((1 << XFS_LIC_NUM_SLOTS) - 1) | ||
190 | |||
191 | |||
192 | /* | ||
193 | * Initialize the given chunk. Set the chunk's free descriptor mask | ||
194 | * to indicate that all descriptors are free. The caller gets to set | ||
195 | * lic_unused to the right value (0 matches all free). The | ||
196 | * lic_descs.lid_index values are set up as each desc is allocated. | ||
197 | */ | ||
198 | static inline void xfs_lic_init(xfs_log_item_chunk_t *cp) | ||
199 | { | ||
200 | cp->lic_free = XFS_LIC_FREEMASK; | ||
201 | } | ||
202 | |||
203 | static inline void xfs_lic_init_slot(xfs_log_item_chunk_t *cp, int slot) | ||
204 | { | ||
205 | cp->lic_descs[slot].lid_index = (unsigned char)(slot); | ||
206 | } | ||
207 | |||
208 | static inline int xfs_lic_vacancy(xfs_log_item_chunk_t *cp) | ||
209 | { | ||
210 | return cp->lic_free & XFS_LIC_FREEMASK; | ||
211 | } | ||
212 | |||
213 | static inline void xfs_lic_all_free(xfs_log_item_chunk_t *cp) | ||
214 | { | ||
215 | cp->lic_free = XFS_LIC_FREEMASK; | ||
216 | } | ||
217 | |||
218 | static inline int xfs_lic_are_all_free(xfs_log_item_chunk_t *cp) | ||
219 | { | ||
220 | return ((cp->lic_free & XFS_LIC_FREEMASK) == XFS_LIC_FREEMASK); | ||
221 | } | ||
222 | |||
223 | static inline int xfs_lic_isfree(xfs_log_item_chunk_t *cp, int slot) | ||
224 | { | ||
225 | return (cp->lic_free & (1 << slot)); | ||
226 | } | ||
227 | |||
228 | static inline void xfs_lic_claim(xfs_log_item_chunk_t *cp, int slot) | ||
229 | { | ||
230 | cp->lic_free &= ~(1 << slot); | ||
231 | } | ||
232 | |||
233 | static inline void xfs_lic_relse(xfs_log_item_chunk_t *cp, int slot) | ||
234 | { | ||
235 | cp->lic_free |= 1 << slot; | ||
236 | } | ||
237 | |||
238 | static inline xfs_log_item_desc_t * | ||
239 | xfs_lic_slot(xfs_log_item_chunk_t *cp, int slot) | ||
240 | { | ||
241 | return &(cp->lic_descs[slot]); | ||
242 | } | ||
243 | |||
244 | static inline int xfs_lic_desc_to_slot(xfs_log_item_desc_t *dp) | ||
245 | { | ||
246 | return (uint)dp->lid_index; | ||
247 | } | ||
248 | |||
249 | /* | ||
250 | * Calculate the address of a chunk given a descriptor pointer: | ||
251 | * dp - dp->lid_index give the address of the start of the lic_descs array. | ||
252 | * From this we subtract the offset of the lic_descs field in a chunk. | ||
253 | * All of this yields the address of the chunk, which is | ||
254 | * cast to a chunk pointer. | ||
255 | */ | ||
256 | static inline xfs_log_item_chunk_t * | ||
257 | xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp) | ||
258 | { | ||
259 | return (xfs_log_item_chunk_t*) \ | ||
260 | (((xfs_caddr_t)((dp) - (dp)->lid_index)) - \ | ||
261 | (xfs_caddr_t)(((xfs_log_item_chunk_t*)0)->lic_descs)); | ||
262 | } | ||
263 | 172 | ||
264 | #define XFS_TRANS_MAGIC 0x5452414E /* 'TRAN' */ | 173 | #define XFS_TRANS_MAGIC 0x5452414E /* 'TRAN' */ |
265 | /* | 174 | /* |
@@ -275,8 +184,6 @@ xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp) | |||
275 | /* | 184 | /* |
276 | * Values for call flags parameter. | 185 | * Values for call flags parameter. |
277 | */ | 186 | */ |
278 | #define XFS_TRANS_NOSLEEP 0x1 | ||
279 | #define XFS_TRANS_WAIT 0x2 | ||
280 | #define XFS_TRANS_RELEASE_LOG_RES 0x4 | 187 | #define XFS_TRANS_RELEASE_LOG_RES 0x4 |
281 | #define XFS_TRANS_ABORT 0x8 | 188 | #define XFS_TRANS_ABORT 0x8 |
282 | 189 | ||
@@ -438,8 +345,7 @@ typedef struct xfs_item_ops { | |||
438 | uint (*iop_size)(xfs_log_item_t *); | 345 | uint (*iop_size)(xfs_log_item_t *); |
439 | void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *); | 346 | void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *); |
440 | void (*iop_pin)(xfs_log_item_t *); | 347 | void (*iop_pin)(xfs_log_item_t *); |
441 | void (*iop_unpin)(xfs_log_item_t *); | 348 | void (*iop_unpin)(xfs_log_item_t *, int remove); |
442 | void (*iop_unpin_remove)(xfs_log_item_t *, struct xfs_trans *); | ||
443 | uint (*iop_trylock)(xfs_log_item_t *); | 349 | uint (*iop_trylock)(xfs_log_item_t *); |
444 | void (*iop_unlock)(xfs_log_item_t *); | 350 | void (*iop_unlock)(xfs_log_item_t *); |
445 | xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t); | 351 | xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t); |
@@ -451,8 +357,7 @@ typedef struct xfs_item_ops { | |||
451 | #define IOP_SIZE(ip) (*(ip)->li_ops->iop_size)(ip) | 357 | #define IOP_SIZE(ip) (*(ip)->li_ops->iop_size)(ip) |
452 | #define IOP_FORMAT(ip,vp) (*(ip)->li_ops->iop_format)(ip, vp) | 358 | #define IOP_FORMAT(ip,vp) (*(ip)->li_ops->iop_format)(ip, vp) |
453 | #define IOP_PIN(ip) (*(ip)->li_ops->iop_pin)(ip) | 359 | #define IOP_PIN(ip) (*(ip)->li_ops->iop_pin)(ip) |
454 | #define IOP_UNPIN(ip) (*(ip)->li_ops->iop_unpin)(ip) | 360 | #define IOP_UNPIN(ip, remove) (*(ip)->li_ops->iop_unpin)(ip, remove) |
455 | #define IOP_UNPIN_REMOVE(ip,tp) (*(ip)->li_ops->iop_unpin_remove)(ip, tp) | ||
456 | #define IOP_TRYLOCK(ip) (*(ip)->li_ops->iop_trylock)(ip) | 361 | #define IOP_TRYLOCK(ip) (*(ip)->li_ops->iop_trylock)(ip) |
457 | #define IOP_UNLOCK(ip) (*(ip)->li_ops->iop_unlock)(ip) | 362 | #define IOP_UNLOCK(ip) (*(ip)->li_ops->iop_unlock)(ip) |
458 | #define IOP_COMMITTED(ip, lsn) (*(ip)->li_ops->iop_committed)(ip, lsn) | 363 | #define IOP_COMMITTED(ip, lsn) (*(ip)->li_ops->iop_committed)(ip, lsn) |
@@ -516,8 +421,7 @@ typedef struct xfs_trans { | |||
516 | int64_t t_rblocks_delta;/* superblock rblocks change */ | 421 | int64_t t_rblocks_delta;/* superblock rblocks change */ |
517 | int64_t t_rextents_delta;/* superblocks rextents chg */ | 422 | int64_t t_rextents_delta;/* superblocks rextents chg */ |
518 | int64_t t_rextslog_delta;/* superblocks rextslog chg */ | 423 | int64_t t_rextslog_delta;/* superblocks rextslog chg */ |
519 | unsigned int t_items_free; /* log item descs free */ | 424 | struct list_head t_items; /* log item descriptors */ |
520 | xfs_log_item_chunk_t t_items; /* first log item desc chunk */ | ||
521 | xfs_trans_header_t t_header; /* header for in-log trans */ | 425 | xfs_trans_header_t t_header; /* header for in-log trans */ |
522 | struct list_head t_busy; /* list of busy extents */ | 426 | struct list_head t_busy; /* list of busy extents */ |
523 | unsigned long t_pflags; /* saved process flags state */ | 427 | unsigned long t_pflags; /* saved process flags state */ |
@@ -569,8 +473,8 @@ void xfs_trans_dquot_buf(xfs_trans_t *, struct xfs_buf *, uint); | |||
569 | void xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *); | 473 | void xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *); |
570 | int xfs_trans_iget(struct xfs_mount *, xfs_trans_t *, | 474 | int xfs_trans_iget(struct xfs_mount *, xfs_trans_t *, |
571 | xfs_ino_t , uint, uint, struct xfs_inode **); | 475 | xfs_ino_t , uint, uint, struct xfs_inode **); |
572 | void xfs_trans_ijoin(xfs_trans_t *, struct xfs_inode *, uint); | 476 | void xfs_trans_ijoin_ref(struct xfs_trans *, struct xfs_inode *, uint); |
573 | void xfs_trans_ihold(xfs_trans_t *, struct xfs_inode *); | 477 | void xfs_trans_ijoin(struct xfs_trans *, struct xfs_inode *); |
574 | void xfs_trans_log_buf(xfs_trans_t *, struct xfs_buf *, uint, uint); | 478 | void xfs_trans_log_buf(xfs_trans_t *, struct xfs_buf *, uint, uint); |
575 | void xfs_trans_log_inode(xfs_trans_t *, struct xfs_inode *, uint); | 479 | void xfs_trans_log_inode(xfs_trans_t *, struct xfs_inode *, uint); |
576 | struct xfs_efi_log_item *xfs_trans_get_efi(xfs_trans_t *, uint); | 480 | struct xfs_efi_log_item *xfs_trans_get_efi(xfs_trans_t *, uint); |
@@ -595,6 +499,7 @@ int xfs_trans_ail_init(struct xfs_mount *); | |||
595 | void xfs_trans_ail_destroy(struct xfs_mount *); | 499 | void xfs_trans_ail_destroy(struct xfs_mount *); |
596 | 500 | ||
597 | extern kmem_zone_t *xfs_trans_zone; | 501 | extern kmem_zone_t *xfs_trans_zone; |
502 | extern kmem_zone_t *xfs_log_item_desc_zone; | ||
598 | 503 | ||
599 | #endif /* __KERNEL__ */ | 504 | #endif /* __KERNEL__ */ |
600 | 505 | ||
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index e799824f7245..dc9069568ff7 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include "xfs_trans.h" | 24 | #include "xfs_trans.h" |
25 | #include "xfs_sb.h" | 25 | #include "xfs_sb.h" |
26 | #include "xfs_ag.h" | 26 | #include "xfs_ag.h" |
27 | #include "xfs_dmapi.h" | ||
28 | #include "xfs_mount.h" | 27 | #include "xfs_mount.h" |
29 | #include "xfs_trans_priv.h" | 28 | #include "xfs_trans_priv.h" |
30 | #include "xfs_error.h" | 29 | #include "xfs_error.h" |
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index 63d81a22f4fd..90af025e6839 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c | |||
@@ -24,14 +24,10 @@ | |||
24 | #include "xfs_trans.h" | 24 | #include "xfs_trans.h" |
25 | #include "xfs_sb.h" | 25 | #include "xfs_sb.h" |
26 | #include "xfs_ag.h" | 26 | #include "xfs_ag.h" |
27 | #include "xfs_dir2.h" | ||
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_mount.h" | 27 | #include "xfs_mount.h" |
30 | #include "xfs_bmap_btree.h" | 28 | #include "xfs_bmap_btree.h" |
31 | #include "xfs_alloc_btree.h" | 29 | #include "xfs_alloc_btree.h" |
32 | #include "xfs_ialloc_btree.h" | 30 | #include "xfs_ialloc_btree.h" |
33 | #include "xfs_dir2_sf.h" | ||
34 | #include "xfs_attr_sf.h" | ||
35 | #include "xfs_dinode.h" | 31 | #include "xfs_dinode.h" |
36 | #include "xfs_inode.h" | 32 | #include "xfs_inode.h" |
37 | #include "xfs_buf_item.h" | 33 | #include "xfs_buf_item.h" |
@@ -51,36 +47,17 @@ xfs_trans_buf_item_match( | |||
51 | xfs_daddr_t blkno, | 47 | xfs_daddr_t blkno, |
52 | int len) | 48 | int len) |
53 | { | 49 | { |
54 | xfs_log_item_chunk_t *licp; | 50 | struct xfs_log_item_desc *lidp; |
55 | xfs_log_item_desc_t *lidp; | 51 | struct xfs_buf_log_item *blip; |
56 | xfs_buf_log_item_t *blip; | ||
57 | int i; | ||
58 | 52 | ||
59 | len = BBTOB(len); | 53 | len = BBTOB(len); |
60 | for (licp = &tp->t_items; licp != NULL; licp = licp->lic_next) { | 54 | list_for_each_entry(lidp, &tp->t_items, lid_trans) { |
61 | if (xfs_lic_are_all_free(licp)) { | 55 | blip = (struct xfs_buf_log_item *)lidp->lid_item; |
62 | ASSERT(licp == &tp->t_items); | 56 | if (blip->bli_item.li_type == XFS_LI_BUF && |
63 | ASSERT(licp->lic_next == NULL); | 57 | XFS_BUF_TARGET(blip->bli_buf) == target && |
64 | return NULL; | 58 | XFS_BUF_ADDR(blip->bli_buf) == blkno && |
65 | } | 59 | XFS_BUF_COUNT(blip->bli_buf) == len) |
66 | 60 | return blip->bli_buf; | |
67 | for (i = 0; i < licp->lic_unused; i++) { | ||
68 | /* | ||
69 | * Skip unoccupied slots. | ||
70 | */ | ||
71 | if (xfs_lic_isfree(licp, i)) | ||
72 | continue; | ||
73 | |||
74 | lidp = xfs_lic_slot(licp, i); | ||
75 | blip = (xfs_buf_log_item_t *)lidp->lid_item; | ||
76 | if (blip->bli_item.li_type != XFS_LI_BUF) | ||
77 | continue; | ||
78 | |||
79 | if (XFS_BUF_TARGET(blip->bli_buf) == target && | ||
80 | XFS_BUF_ADDR(blip->bli_buf) == blkno && | ||
81 | XFS_BUF_COUNT(blip->bli_buf) == len) | ||
82 | return blip->bli_buf; | ||
83 | } | ||
84 | } | 61 | } |
85 | 62 | ||
86 | return NULL; | 63 | return NULL; |
@@ -127,7 +104,7 @@ _xfs_trans_bjoin( | |||
127 | /* | 104 | /* |
128 | * Get a log_item_desc to point at the new item. | 105 | * Get a log_item_desc to point at the new item. |
129 | */ | 106 | */ |
130 | (void) xfs_trans_add_item(tp, (xfs_log_item_t *)bip); | 107 | xfs_trans_add_item(tp, &bip->bli_item); |
131 | 108 | ||
132 | /* | 109 | /* |
133 | * Initialize b_fsprivate2 so we can find it with incore_match() | 110 | * Initialize b_fsprivate2 so we can find it with incore_match() |
@@ -483,7 +460,6 @@ xfs_trans_brelse(xfs_trans_t *tp, | |||
483 | { | 460 | { |
484 | xfs_buf_log_item_t *bip; | 461 | xfs_buf_log_item_t *bip; |
485 | xfs_log_item_t *lip; | 462 | xfs_log_item_t *lip; |
486 | xfs_log_item_desc_t *lidp; | ||
487 | 463 | ||
488 | /* | 464 | /* |
489 | * Default to a normal brelse() call if the tp is NULL. | 465 | * Default to a normal brelse() call if the tp is NULL. |
@@ -514,13 +490,6 @@ xfs_trans_brelse(xfs_trans_t *tp, | |||
514 | ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL)); | 490 | ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL)); |
515 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | 491 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
516 | 492 | ||
517 | /* | ||
518 | * Find the item descriptor pointing to this buffer's | ||
519 | * log item. It must be there. | ||
520 | */ | ||
521 | lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip); | ||
522 | ASSERT(lidp != NULL); | ||
523 | |||
524 | trace_xfs_trans_brelse(bip); | 493 | trace_xfs_trans_brelse(bip); |
525 | 494 | ||
526 | /* | 495 | /* |
@@ -536,7 +505,7 @@ xfs_trans_brelse(xfs_trans_t *tp, | |||
536 | * If the buffer is dirty within this transaction, we can't | 505 | * If the buffer is dirty within this transaction, we can't |
537 | * release it until we commit. | 506 | * release it until we commit. |
538 | */ | 507 | */ |
539 | if (lidp->lid_flags & XFS_LID_DIRTY) | 508 | if (bip->bli_item.li_desc->lid_flags & XFS_LID_DIRTY) |
540 | return; | 509 | return; |
541 | 510 | ||
542 | /* | 511 | /* |
@@ -553,7 +522,7 @@ xfs_trans_brelse(xfs_trans_t *tp, | |||
553 | /* | 522 | /* |
554 | * Free up the log item descriptor tracking the released item. | 523 | * Free up the log item descriptor tracking the released item. |
555 | */ | 524 | */ |
556 | xfs_trans_free_item(tp, lidp); | 525 | xfs_trans_del_item(&bip->bli_item); |
557 | 526 | ||
558 | /* | 527 | /* |
559 | * Clear the hold flag in the buf log item if it is set. | 528 | * Clear the hold flag in the buf log item if it is set. |
@@ -665,7 +634,6 @@ xfs_trans_log_buf(xfs_trans_t *tp, | |||
665 | uint last) | 634 | uint last) |
666 | { | 635 | { |
667 | xfs_buf_log_item_t *bip; | 636 | xfs_buf_log_item_t *bip; |
668 | xfs_log_item_desc_t *lidp; | ||
669 | 637 | ||
670 | ASSERT(XFS_BUF_ISBUSY(bp)); | 638 | ASSERT(XFS_BUF_ISBUSY(bp)); |
671 | ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); | 639 | ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); |
@@ -690,7 +658,7 @@ xfs_trans_log_buf(xfs_trans_t *tp, | |||
690 | bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); | 658 | bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); |
691 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | 659 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
692 | XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks); | 660 | XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks); |
693 | bip->bli_item.li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*))xfs_buf_iodone; | 661 | bip->bli_item.li_cb = xfs_buf_iodone; |
694 | 662 | ||
695 | trace_xfs_trans_log_buf(bip); | 663 | trace_xfs_trans_log_buf(bip); |
696 | 664 | ||
@@ -707,11 +675,8 @@ xfs_trans_log_buf(xfs_trans_t *tp, | |||
707 | bip->bli_format.blf_flags &= ~XFS_BLF_CANCEL; | 675 | bip->bli_format.blf_flags &= ~XFS_BLF_CANCEL; |
708 | } | 676 | } |
709 | 677 | ||
710 | lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip); | ||
711 | ASSERT(lidp != NULL); | ||
712 | |||
713 | tp->t_flags |= XFS_TRANS_DIRTY; | 678 | tp->t_flags |= XFS_TRANS_DIRTY; |
714 | lidp->lid_flags |= XFS_LID_DIRTY; | 679 | bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY; |
715 | bip->bli_flags |= XFS_BLI_LOGGED; | 680 | bip->bli_flags |= XFS_BLI_LOGGED; |
716 | xfs_buf_item_log(bip, first, last); | 681 | xfs_buf_item_log(bip, first, last); |
717 | } | 682 | } |
@@ -740,7 +705,6 @@ xfs_trans_binval( | |||
740 | xfs_trans_t *tp, | 705 | xfs_trans_t *tp, |
741 | xfs_buf_t *bp) | 706 | xfs_buf_t *bp) |
742 | { | 707 | { |
743 | xfs_log_item_desc_t *lidp; | ||
744 | xfs_buf_log_item_t *bip; | 708 | xfs_buf_log_item_t *bip; |
745 | 709 | ||
746 | ASSERT(XFS_BUF_ISBUSY(bp)); | 710 | ASSERT(XFS_BUF_ISBUSY(bp)); |
@@ -748,8 +712,6 @@ xfs_trans_binval( | |||
748 | ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); | 712 | ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); |
749 | 713 | ||
750 | bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); | 714 | bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); |
751 | lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip); | ||
752 | ASSERT(lidp != NULL); | ||
753 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | 715 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
754 | 716 | ||
755 | trace_xfs_trans_binval(bip); | 717 | trace_xfs_trans_binval(bip); |
@@ -764,7 +726,7 @@ xfs_trans_binval( | |||
764 | ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY))); | 726 | ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY))); |
765 | ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_INODE_BUF)); | 727 | ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_INODE_BUF)); |
766 | ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL); | 728 | ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL); |
767 | ASSERT(lidp->lid_flags & XFS_LID_DIRTY); | 729 | ASSERT(bip->bli_item.li_desc->lid_flags & XFS_LID_DIRTY); |
768 | ASSERT(tp->t_flags & XFS_TRANS_DIRTY); | 730 | ASSERT(tp->t_flags & XFS_TRANS_DIRTY); |
769 | return; | 731 | return; |
770 | } | 732 | } |
@@ -797,7 +759,7 @@ xfs_trans_binval( | |||
797 | bip->bli_format.blf_flags |= XFS_BLF_CANCEL; | 759 | bip->bli_format.blf_flags |= XFS_BLF_CANCEL; |
798 | memset((char *)(bip->bli_format.blf_data_map), 0, | 760 | memset((char *)(bip->bli_format.blf_data_map), 0, |
799 | (bip->bli_format.blf_map_size * sizeof(uint))); | 761 | (bip->bli_format.blf_map_size * sizeof(uint))); |
800 | lidp->lid_flags |= XFS_LID_DIRTY; | 762 | bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY; |
801 | tp->t_flags |= XFS_TRANS_DIRTY; | 763 | tp->t_flags |= XFS_TRANS_DIRTY; |
802 | } | 764 | } |
803 | 765 | ||
@@ -853,12 +815,9 @@ xfs_trans_stale_inode_buf( | |||
853 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | 815 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
854 | 816 | ||
855 | bip->bli_flags |= XFS_BLI_STALE_INODE; | 817 | bip->bli_flags |= XFS_BLI_STALE_INODE; |
856 | bip->bli_item.li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*)) | 818 | bip->bli_item.li_cb = xfs_buf_iodone; |
857 | xfs_buf_iodone; | ||
858 | } | 819 | } |
859 | 820 | ||
860 | |||
861 | |||
862 | /* | 821 | /* |
863 | * Mark the buffer as being one which contains newly allocated | 822 | * Mark the buffer as being one which contains newly allocated |
864 | * inodes. We need to make sure that even if this buffer is | 823 | * inodes. We need to make sure that even if this buffer is |
diff --git a/fs/xfs/xfs_trans_extfree.c b/fs/xfs/xfs_trans_extfree.c index 27cce2a9c7e9..f783d5e9fa70 100644 --- a/fs/xfs/xfs_trans_extfree.c +++ b/fs/xfs/xfs_trans_extfree.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include "xfs_trans.h" | 23 | #include "xfs_trans.h" |
24 | #include "xfs_sb.h" | 24 | #include "xfs_sb.h" |
25 | #include "xfs_ag.h" | 25 | #include "xfs_ag.h" |
26 | #include "xfs_dmapi.h" | ||
27 | #include "xfs_mount.h" | 26 | #include "xfs_mount.h" |
28 | #include "xfs_trans_priv.h" | 27 | #include "xfs_trans_priv.h" |
29 | #include "xfs_extfree_item.h" | 28 | #include "xfs_extfree_item.h" |
@@ -49,9 +48,8 @@ xfs_trans_get_efi(xfs_trans_t *tp, | |||
49 | /* | 48 | /* |
50 | * Get a log_item_desc to point at the new item. | 49 | * Get a log_item_desc to point at the new item. |
51 | */ | 50 | */ |
52 | (void) xfs_trans_add_item(tp, (xfs_log_item_t*)efip); | 51 | xfs_trans_add_item(tp, &efip->efi_item); |
53 | 52 | return efip; | |
54 | return (efip); | ||
55 | } | 53 | } |
56 | 54 | ||
57 | /* | 55 | /* |
@@ -65,15 +63,11 @@ xfs_trans_log_efi_extent(xfs_trans_t *tp, | |||
65 | xfs_fsblock_t start_block, | 63 | xfs_fsblock_t start_block, |
66 | xfs_extlen_t ext_len) | 64 | xfs_extlen_t ext_len) |
67 | { | 65 | { |
68 | xfs_log_item_desc_t *lidp; | ||
69 | uint next_extent; | 66 | uint next_extent; |
70 | xfs_extent_t *extp; | 67 | xfs_extent_t *extp; |
71 | 68 | ||
72 | lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)efip); | ||
73 | ASSERT(lidp != NULL); | ||
74 | |||
75 | tp->t_flags |= XFS_TRANS_DIRTY; | 69 | tp->t_flags |= XFS_TRANS_DIRTY; |
76 | lidp->lid_flags |= XFS_LID_DIRTY; | 70 | efip->efi_item.li_desc->lid_flags |= XFS_LID_DIRTY; |
77 | 71 | ||
78 | next_extent = efip->efi_next_extent; | 72 | next_extent = efip->efi_next_extent; |
79 | ASSERT(next_extent < efip->efi_format.efi_nextents); | 73 | ASSERT(next_extent < efip->efi_format.efi_nextents); |
@@ -106,9 +100,8 @@ xfs_trans_get_efd(xfs_trans_t *tp, | |||
106 | /* | 100 | /* |
107 | * Get a log_item_desc to point at the new item. | 101 | * Get a log_item_desc to point at the new item. |
108 | */ | 102 | */ |
109 | (void) xfs_trans_add_item(tp, (xfs_log_item_t*)efdp); | 103 | xfs_trans_add_item(tp, &efdp->efd_item); |
110 | 104 | return efdp; | |
111 | return (efdp); | ||
112 | } | 105 | } |
113 | 106 | ||
114 | /* | 107 | /* |
@@ -122,15 +115,11 @@ xfs_trans_log_efd_extent(xfs_trans_t *tp, | |||
122 | xfs_fsblock_t start_block, | 115 | xfs_fsblock_t start_block, |
123 | xfs_extlen_t ext_len) | 116 | xfs_extlen_t ext_len) |
124 | { | 117 | { |
125 | xfs_log_item_desc_t *lidp; | ||
126 | uint next_extent; | 118 | uint next_extent; |
127 | xfs_extent_t *extp; | 119 | xfs_extent_t *extp; |
128 | 120 | ||
129 | lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)efdp); | ||
130 | ASSERT(lidp != NULL); | ||
131 | |||
132 | tp->t_flags |= XFS_TRANS_DIRTY; | 121 | tp->t_flags |= XFS_TRANS_DIRTY; |
133 | lidp->lid_flags |= XFS_LID_DIRTY; | 122 | efdp->efd_item.li_desc->lid_flags |= XFS_LID_DIRTY; |
134 | 123 | ||
135 | next_extent = efdp->efd_next_extent; | 124 | next_extent = efdp->efd_next_extent; |
136 | ASSERT(next_extent < efdp->efd_format.efd_nextents); | 125 | ASSERT(next_extent < efdp->efd_format.efd_nextents); |
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c index 785ff101da0a..cdc53a1050c5 100644 --- a/fs/xfs/xfs_trans_inode.c +++ b/fs/xfs/xfs_trans_inode.c | |||
@@ -24,20 +24,16 @@ | |||
24 | #include "xfs_trans.h" | 24 | #include "xfs_trans.h" |
25 | #include "xfs_sb.h" | 25 | #include "xfs_sb.h" |
26 | #include "xfs_ag.h" | 26 | #include "xfs_ag.h" |
27 | #include "xfs_dir2.h" | ||
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_mount.h" | 27 | #include "xfs_mount.h" |
30 | #include "xfs_bmap_btree.h" | 28 | #include "xfs_bmap_btree.h" |
31 | #include "xfs_alloc_btree.h" | 29 | #include "xfs_alloc_btree.h" |
32 | #include "xfs_ialloc_btree.h" | 30 | #include "xfs_ialloc_btree.h" |
33 | #include "xfs_dir2_sf.h" | ||
34 | #include "xfs_attr_sf.h" | ||
35 | #include "xfs_dinode.h" | 31 | #include "xfs_dinode.h" |
36 | #include "xfs_inode.h" | 32 | #include "xfs_inode.h" |
37 | #include "xfs_btree.h" | 33 | #include "xfs_btree.h" |
38 | #include "xfs_ialloc.h" | ||
39 | #include "xfs_trans_priv.h" | 34 | #include "xfs_trans_priv.h" |
40 | #include "xfs_inode_item.h" | 35 | #include "xfs_inode_item.h" |
36 | #include "xfs_trace.h" | ||
41 | 37 | ||
42 | #ifdef XFS_TRANS_DEBUG | 38 | #ifdef XFS_TRANS_DEBUG |
43 | STATIC void | 39 | STATIC void |
@@ -47,7 +43,6 @@ xfs_trans_inode_broot_debug( | |||
47 | #define xfs_trans_inode_broot_debug(ip) | 43 | #define xfs_trans_inode_broot_debug(ip) |
48 | #endif | 44 | #endif |
49 | 45 | ||
50 | |||
51 | /* | 46 | /* |
52 | * Get an inode and join it to the transaction. | 47 | * Get an inode and join it to the transaction. |
53 | */ | 48 | */ |
@@ -62,78 +57,66 @@ xfs_trans_iget( | |||
62 | { | 57 | { |
63 | int error; | 58 | int error; |
64 | 59 | ||
65 | error = xfs_iget(mp, tp, ino, flags, lock_flags, ipp, 0); | 60 | error = xfs_iget(mp, tp, ino, flags, lock_flags, ipp); |
66 | if (!error && tp) | 61 | if (!error && tp) { |
67 | xfs_trans_ijoin(tp, *ipp, lock_flags); | 62 | xfs_trans_ijoin(tp, *ipp); |
63 | (*ipp)->i_itemp->ili_lock_flags = lock_flags; | ||
64 | } | ||
68 | return error; | 65 | return error; |
69 | } | 66 | } |
70 | 67 | ||
71 | /* | 68 | /* |
72 | * Add the locked inode to the transaction. | 69 | * Add a locked inode to the transaction. |
73 | * The inode must be locked, and it cannot be associated with any | 70 | * |
74 | * transaction. The caller must specify the locks already held | 71 | * The inode must be locked, and it cannot be associated with any transaction. |
75 | * on the inode. | ||
76 | */ | 72 | */ |
77 | void | 73 | void |
78 | xfs_trans_ijoin( | 74 | xfs_trans_ijoin( |
79 | xfs_trans_t *tp, | 75 | struct xfs_trans *tp, |
80 | xfs_inode_t *ip, | 76 | struct xfs_inode *ip) |
81 | uint lock_flags) | ||
82 | { | 77 | { |
83 | xfs_inode_log_item_t *iip; | 78 | xfs_inode_log_item_t *iip; |
84 | 79 | ||
85 | ASSERT(ip->i_transp == NULL); | 80 | ASSERT(ip->i_transp == NULL); |
86 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | 81 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
87 | ASSERT(lock_flags & XFS_ILOCK_EXCL); | ||
88 | if (ip->i_itemp == NULL) | 82 | if (ip->i_itemp == NULL) |
89 | xfs_inode_item_init(ip, ip->i_mount); | 83 | xfs_inode_item_init(ip, ip->i_mount); |
90 | iip = ip->i_itemp; | 84 | iip = ip->i_itemp; |
91 | ASSERT(iip->ili_flags == 0); | 85 | ASSERT(iip->ili_lock_flags == 0); |
92 | 86 | ||
93 | /* | 87 | /* |
94 | * Get a log_item_desc to point at the new item. | 88 | * Get a log_item_desc to point at the new item. |
95 | */ | 89 | */ |
96 | (void) xfs_trans_add_item(tp, (xfs_log_item_t*)(iip)); | 90 | xfs_trans_add_item(tp, &iip->ili_item); |
97 | 91 | ||
98 | xfs_trans_inode_broot_debug(ip); | 92 | xfs_trans_inode_broot_debug(ip); |
99 | 93 | ||
100 | /* | 94 | /* |
101 | * If the IO lock is already held, mark that in the inode log item. | ||
102 | */ | ||
103 | if (lock_flags & XFS_IOLOCK_EXCL) { | ||
104 | iip->ili_flags |= XFS_ILI_IOLOCKED_EXCL; | ||
105 | } else if (lock_flags & XFS_IOLOCK_SHARED) { | ||
106 | iip->ili_flags |= XFS_ILI_IOLOCKED_SHARED; | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | * Initialize i_transp so we can find it with xfs_inode_incore() | 95 | * Initialize i_transp so we can find it with xfs_inode_incore() |
111 | * in xfs_trans_iget() above. | 96 | * in xfs_trans_iget() above. |
112 | */ | 97 | */ |
113 | ip->i_transp = tp; | 98 | ip->i_transp = tp; |
114 | } | 99 | } |
115 | 100 | ||
116 | |||
117 | |||
118 | /* | 101 | /* |
119 | * Mark the inode as not needing to be unlocked when the inode item's | 102 | * Add a locked inode to the transaction. |
120 | * IOP_UNLOCK() routine is called. The inode must already be locked | 103 | * |
121 | * and associated with the given transaction. | 104 | * |
105 | * Grabs a reference to the inode which will be dropped when the transaction | ||
106 | * is commited. The inode will also be unlocked at that point. The inode | ||
107 | * must be locked, and it cannot be associated with any transaction. | ||
122 | */ | 108 | */ |
123 | /*ARGSUSED*/ | ||
124 | void | 109 | void |
125 | xfs_trans_ihold( | 110 | xfs_trans_ijoin_ref( |
126 | xfs_trans_t *tp, | 111 | struct xfs_trans *tp, |
127 | xfs_inode_t *ip) | 112 | struct xfs_inode *ip, |
113 | uint lock_flags) | ||
128 | { | 114 | { |
129 | ASSERT(ip->i_transp == tp); | 115 | xfs_trans_ijoin(tp, ip); |
130 | ASSERT(ip->i_itemp != NULL); | 116 | IHOLD(ip); |
131 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | 117 | ip->i_itemp->ili_lock_flags = lock_flags; |
132 | |||
133 | ip->i_itemp->ili_flags |= XFS_ILI_HOLD; | ||
134 | } | 118 | } |
135 | 119 | ||
136 | |||
137 | /* | 120 | /* |
138 | * This is called to mark the fields indicated in fieldmask as needing | 121 | * This is called to mark the fields indicated in fieldmask as needing |
139 | * to be logged when the transaction is committed. The inode must | 122 | * to be logged when the transaction is committed. The inode must |
@@ -149,17 +132,12 @@ xfs_trans_log_inode( | |||
149 | xfs_inode_t *ip, | 132 | xfs_inode_t *ip, |
150 | uint flags) | 133 | uint flags) |
151 | { | 134 | { |
152 | xfs_log_item_desc_t *lidp; | ||
153 | |||
154 | ASSERT(ip->i_transp == tp); | 135 | ASSERT(ip->i_transp == tp); |
155 | ASSERT(ip->i_itemp != NULL); | 136 | ASSERT(ip->i_itemp != NULL); |
156 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | 137 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
157 | 138 | ||
158 | lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)(ip->i_itemp)); | ||
159 | ASSERT(lidp != NULL); | ||
160 | |||
161 | tp->t_flags |= XFS_TRANS_DIRTY; | 139 | tp->t_flags |= XFS_TRANS_DIRTY; |
162 | lidp->lid_flags |= XFS_LID_DIRTY; | 140 | ip->i_itemp->ili_item.li_desc->lid_flags |= XFS_LID_DIRTY; |
163 | 141 | ||
164 | /* | 142 | /* |
165 | * Always OR in the bits from the ili_last_fields field. | 143 | * Always OR in the bits from the ili_last_fields field. |
diff --git a/fs/xfs/xfs_trans_item.c b/fs/xfs/xfs_trans_item.c deleted file mode 100644 index f11d37d06dcc..000000000000 --- a/fs/xfs/xfs_trans_item.c +++ /dev/null | |||
@@ -1,441 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it would be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write the Free Software Foundation, | ||
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
17 | */ | ||
18 | #include "xfs.h" | ||
19 | #include "xfs_fs.h" | ||
20 | #include "xfs_types.h" | ||
21 | #include "xfs_log.h" | ||
22 | #include "xfs_inum.h" | ||
23 | #include "xfs_trans.h" | ||
24 | #include "xfs_trans_priv.h" | ||
25 | /* XXX: from here down needed until struct xfs_trans has its own ailp */ | ||
26 | #include "xfs_bit.h" | ||
27 | #include "xfs_buf_item.h" | ||
28 | #include "xfs_sb.h" | ||
29 | #include "xfs_ag.h" | ||
30 | #include "xfs_dir2.h" | ||
31 | #include "xfs_dmapi.h" | ||
32 | #include "xfs_mount.h" | ||
33 | |||
34 | STATIC int xfs_trans_unlock_chunk(xfs_log_item_chunk_t *, | ||
35 | int, int, xfs_lsn_t); | ||
36 | |||
37 | /* | ||
38 | * This is called to add the given log item to the transaction's | ||
39 | * list of log items. It must find a free log item descriptor | ||
40 | * or allocate a new one and add the item to that descriptor. | ||
41 | * The function returns a pointer to item descriptor used to point | ||
42 | * to the new item. The log item will now point to its new descriptor | ||
43 | * with its li_desc field. | ||
44 | */ | ||
45 | xfs_log_item_desc_t * | ||
46 | xfs_trans_add_item(xfs_trans_t *tp, xfs_log_item_t *lip) | ||
47 | { | ||
48 | xfs_log_item_desc_t *lidp; | ||
49 | xfs_log_item_chunk_t *licp; | ||
50 | int i=0; | ||
51 | |||
52 | /* | ||
53 | * If there are no free descriptors, allocate a new chunk | ||
54 | * of them and put it at the front of the chunk list. | ||
55 | */ | ||
56 | if (tp->t_items_free == 0) { | ||
57 | licp = (xfs_log_item_chunk_t*) | ||
58 | kmem_alloc(sizeof(xfs_log_item_chunk_t), KM_SLEEP); | ||
59 | ASSERT(licp != NULL); | ||
60 | /* | ||
61 | * Initialize the chunk, and then | ||
62 | * claim the first slot in the newly allocated chunk. | ||
63 | */ | ||
64 | xfs_lic_init(licp); | ||
65 | xfs_lic_claim(licp, 0); | ||
66 | licp->lic_unused = 1; | ||
67 | xfs_lic_init_slot(licp, 0); | ||
68 | lidp = xfs_lic_slot(licp, 0); | ||
69 | |||
70 | /* | ||
71 | * Link in the new chunk and update the free count. | ||
72 | */ | ||
73 | licp->lic_next = tp->t_items.lic_next; | ||
74 | tp->t_items.lic_next = licp; | ||
75 | tp->t_items_free = XFS_LIC_NUM_SLOTS - 1; | ||
76 | |||
77 | /* | ||
78 | * Initialize the descriptor and the generic portion | ||
79 | * of the log item. | ||
80 | * | ||
81 | * Point the new slot at this item and return it. | ||
82 | * Also point the log item at its currently active | ||
83 | * descriptor and set the item's mount pointer. | ||
84 | */ | ||
85 | lidp->lid_item = lip; | ||
86 | lidp->lid_flags = 0; | ||
87 | lidp->lid_size = 0; | ||
88 | lip->li_desc = lidp; | ||
89 | lip->li_mountp = tp->t_mountp; | ||
90 | lip->li_ailp = tp->t_mountp->m_ail; | ||
91 | return lidp; | ||
92 | } | ||
93 | |||
94 | /* | ||
95 | * Find the free descriptor. It is somewhere in the chunklist | ||
96 | * of descriptors. | ||
97 | */ | ||
98 | licp = &tp->t_items; | ||
99 | while (licp != NULL) { | ||
100 | if (xfs_lic_vacancy(licp)) { | ||
101 | if (licp->lic_unused <= XFS_LIC_MAX_SLOT) { | ||
102 | i = licp->lic_unused; | ||
103 | ASSERT(xfs_lic_isfree(licp, i)); | ||
104 | break; | ||
105 | } | ||
106 | for (i = 0; i <= XFS_LIC_MAX_SLOT; i++) { | ||
107 | if (xfs_lic_isfree(licp, i)) | ||
108 | break; | ||
109 | } | ||
110 | ASSERT(i <= XFS_LIC_MAX_SLOT); | ||
111 | break; | ||
112 | } | ||
113 | licp = licp->lic_next; | ||
114 | } | ||
115 | ASSERT(licp != NULL); | ||
116 | /* | ||
117 | * If we find a free descriptor, claim it, | ||
118 | * initialize it, and return it. | ||
119 | */ | ||
120 | xfs_lic_claim(licp, i); | ||
121 | if (licp->lic_unused <= i) { | ||
122 | licp->lic_unused = i + 1; | ||
123 | xfs_lic_init_slot(licp, i); | ||
124 | } | ||
125 | lidp = xfs_lic_slot(licp, i); | ||
126 | tp->t_items_free--; | ||
127 | lidp->lid_item = lip; | ||
128 | lidp->lid_flags = 0; | ||
129 | lidp->lid_size = 0; | ||
130 | lip->li_desc = lidp; | ||
131 | lip->li_mountp = tp->t_mountp; | ||
132 | lip->li_ailp = tp->t_mountp->m_ail; | ||
133 | return lidp; | ||
134 | } | ||
135 | |||
136 | /* | ||
137 | * Free the given descriptor. | ||
138 | * | ||
139 | * This requires setting the bit in the chunk's free mask corresponding | ||
140 | * to the given slot. | ||
141 | */ | ||
142 | void | ||
143 | xfs_trans_free_item(xfs_trans_t *tp, xfs_log_item_desc_t *lidp) | ||
144 | { | ||
145 | uint slot; | ||
146 | xfs_log_item_chunk_t *licp; | ||
147 | xfs_log_item_chunk_t **licpp; | ||
148 | |||
149 | slot = xfs_lic_desc_to_slot(lidp); | ||
150 | licp = xfs_lic_desc_to_chunk(lidp); | ||
151 | xfs_lic_relse(licp, slot); | ||
152 | lidp->lid_item->li_desc = NULL; | ||
153 | tp->t_items_free++; | ||
154 | |||
155 | /* | ||
156 | * If there are no more used items in the chunk and this is not | ||
157 | * the chunk embedded in the transaction structure, then free | ||
158 | * the chunk. First pull it from the chunk list and then | ||
159 | * free it back to the heap. We didn't bother with a doubly | ||
160 | * linked list here because the lists should be very short | ||
161 | * and this is not a performance path. It's better to save | ||
162 | * the memory of the extra pointer. | ||
163 | * | ||
164 | * Also decrement the transaction structure's count of free items | ||
165 | * by the number in a chunk since we are freeing an empty chunk. | ||
166 | */ | ||
167 | if (xfs_lic_are_all_free(licp) && (licp != &(tp->t_items))) { | ||
168 | licpp = &(tp->t_items.lic_next); | ||
169 | while (*licpp != licp) { | ||
170 | ASSERT(*licpp != NULL); | ||
171 | licpp = &((*licpp)->lic_next); | ||
172 | } | ||
173 | *licpp = licp->lic_next; | ||
174 | kmem_free(licp); | ||
175 | tp->t_items_free -= XFS_LIC_NUM_SLOTS; | ||
176 | } | ||
177 | } | ||
178 | |||
179 | /* | ||
180 | * This is called to find the descriptor corresponding to the given | ||
181 | * log item. It returns a pointer to the descriptor. | ||
182 | * The log item MUST have a corresponding descriptor in the given | ||
183 | * transaction. This routine does not return NULL, it panics. | ||
184 | * | ||
185 | * The descriptor pointer is kept in the log item's li_desc field. | ||
186 | * Just return it. | ||
187 | */ | ||
188 | /*ARGSUSED*/ | ||
189 | xfs_log_item_desc_t * | ||
190 | xfs_trans_find_item(xfs_trans_t *tp, xfs_log_item_t *lip) | ||
191 | { | ||
192 | ASSERT(lip->li_desc != NULL); | ||
193 | |||
194 | return lip->li_desc; | ||
195 | } | ||
196 | |||
197 | |||
198 | /* | ||
199 | * Return a pointer to the first descriptor in the chunk list. | ||
200 | * This does not return NULL if there are none, it panics. | ||
201 | * | ||
202 | * The first descriptor must be in either the first or second chunk. | ||
203 | * This is because the only chunk allowed to be empty is the first. | ||
204 | * All others are freed when they become empty. | ||
205 | * | ||
206 | * At some point this and xfs_trans_next_item() should be optimized | ||
207 | * to quickly look at the mask to determine if there is anything to | ||
208 | * look at. | ||
209 | */ | ||
210 | xfs_log_item_desc_t * | ||
211 | xfs_trans_first_item(xfs_trans_t *tp) | ||
212 | { | ||
213 | xfs_log_item_chunk_t *licp; | ||
214 | int i; | ||
215 | |||
216 | licp = &tp->t_items; | ||
217 | /* | ||
218 | * If it's not in the first chunk, skip to the second. | ||
219 | */ | ||
220 | if (xfs_lic_are_all_free(licp)) { | ||
221 | licp = licp->lic_next; | ||
222 | } | ||
223 | |||
224 | /* | ||
225 | * Return the first non-free descriptor in the chunk. | ||
226 | */ | ||
227 | ASSERT(!xfs_lic_are_all_free(licp)); | ||
228 | for (i = 0; i < licp->lic_unused; i++) { | ||
229 | if (xfs_lic_isfree(licp, i)) { | ||
230 | continue; | ||
231 | } | ||
232 | |||
233 | return xfs_lic_slot(licp, i); | ||
234 | } | ||
235 | cmn_err(CE_WARN, "xfs_trans_first_item() -- no first item"); | ||
236 | return NULL; | ||
237 | } | ||
238 | |||
239 | |||
240 | /* | ||
241 | * Given a descriptor, return the next descriptor in the chunk list. | ||
242 | * This returns NULL if there are no more used descriptors in the list. | ||
243 | * | ||
244 | * We do this by first locating the chunk in which the descriptor resides, | ||
245 | * and then scanning forward in the chunk and the list for the next | ||
246 | * used descriptor. | ||
247 | */ | ||
248 | /*ARGSUSED*/ | ||
249 | xfs_log_item_desc_t * | ||
250 | xfs_trans_next_item(xfs_trans_t *tp, xfs_log_item_desc_t *lidp) | ||
251 | { | ||
252 | xfs_log_item_chunk_t *licp; | ||
253 | int i; | ||
254 | |||
255 | licp = xfs_lic_desc_to_chunk(lidp); | ||
256 | |||
257 | /* | ||
258 | * First search the rest of the chunk. The for loop keeps us | ||
259 | * from referencing things beyond the end of the chunk. | ||
260 | */ | ||
261 | for (i = (int)xfs_lic_desc_to_slot(lidp) + 1; i < licp->lic_unused; i++) { | ||
262 | if (xfs_lic_isfree(licp, i)) { | ||
263 | continue; | ||
264 | } | ||
265 | |||
266 | return xfs_lic_slot(licp, i); | ||
267 | } | ||
268 | |||
269 | /* | ||
270 | * Now search the next chunk. It must be there, because the | ||
271 | * next chunk would have been freed if it were empty. | ||
272 | * If there is no next chunk, return NULL. | ||
273 | */ | ||
274 | if (licp->lic_next == NULL) { | ||
275 | return NULL; | ||
276 | } | ||
277 | |||
278 | licp = licp->lic_next; | ||
279 | ASSERT(!xfs_lic_are_all_free(licp)); | ||
280 | for (i = 0; i < licp->lic_unused; i++) { | ||
281 | if (xfs_lic_isfree(licp, i)) { | ||
282 | continue; | ||
283 | } | ||
284 | |||
285 | return xfs_lic_slot(licp, i); | ||
286 | } | ||
287 | ASSERT(0); | ||
288 | /* NOTREACHED */ | ||
289 | return NULL; /* keep gcc quite */ | ||
290 | } | ||
291 | |||
292 | /* | ||
293 | * This is called to unlock all of the items of a transaction and to free | ||
294 | * all the descriptors of that transaction. | ||
295 | * | ||
296 | * It walks the list of descriptors and unlocks each item. It frees | ||
297 | * each chunk except that embedded in the transaction as it goes along. | ||
298 | */ | ||
299 | void | ||
300 | xfs_trans_free_items( | ||
301 | xfs_trans_t *tp, | ||
302 | xfs_lsn_t commit_lsn, | ||
303 | int flags) | ||
304 | { | ||
305 | xfs_log_item_chunk_t *licp; | ||
306 | xfs_log_item_chunk_t *next_licp; | ||
307 | int abort; | ||
308 | |||
309 | abort = flags & XFS_TRANS_ABORT; | ||
310 | licp = &tp->t_items; | ||
311 | /* | ||
312 | * Special case the embedded chunk so we don't free it below. | ||
313 | */ | ||
314 | if (!xfs_lic_are_all_free(licp)) { | ||
315 | (void) xfs_trans_unlock_chunk(licp, 1, abort, commit_lsn); | ||
316 | xfs_lic_all_free(licp); | ||
317 | licp->lic_unused = 0; | ||
318 | } | ||
319 | licp = licp->lic_next; | ||
320 | |||
321 | /* | ||
322 | * Unlock each item in each chunk and free the chunks. | ||
323 | */ | ||
324 | while (licp != NULL) { | ||
325 | ASSERT(!xfs_lic_are_all_free(licp)); | ||
326 | (void) xfs_trans_unlock_chunk(licp, 1, abort, commit_lsn); | ||
327 | next_licp = licp->lic_next; | ||
328 | kmem_free(licp); | ||
329 | licp = next_licp; | ||
330 | } | ||
331 | |||
332 | /* | ||
333 | * Reset the transaction structure's free item count. | ||
334 | */ | ||
335 | tp->t_items_free = XFS_LIC_NUM_SLOTS; | ||
336 | tp->t_items.lic_next = NULL; | ||
337 | } | ||
338 | |||
339 | |||
340 | |||
341 | /* | ||
342 | * This is called to unlock the items associated with a transaction. | ||
343 | * Items which were not logged should be freed. | ||
344 | * Those which were logged must still be tracked so they can be unpinned | ||
345 | * when the transaction commits. | ||
346 | */ | ||
347 | void | ||
348 | xfs_trans_unlock_items(xfs_trans_t *tp, xfs_lsn_t commit_lsn) | ||
349 | { | ||
350 | xfs_log_item_chunk_t *licp; | ||
351 | xfs_log_item_chunk_t *next_licp; | ||
352 | xfs_log_item_chunk_t **licpp; | ||
353 | int freed; | ||
354 | |||
355 | freed = 0; | ||
356 | licp = &tp->t_items; | ||
357 | |||
358 | /* | ||
359 | * Special case the embedded chunk so we don't free. | ||
360 | */ | ||
361 | if (!xfs_lic_are_all_free(licp)) { | ||
362 | freed = xfs_trans_unlock_chunk(licp, 0, 0, commit_lsn); | ||
363 | } | ||
364 | licpp = &(tp->t_items.lic_next); | ||
365 | licp = licp->lic_next; | ||
366 | |||
367 | /* | ||
368 | * Unlock each item in each chunk, free non-dirty descriptors, | ||
369 | * and free empty chunks. | ||
370 | */ | ||
371 | while (licp != NULL) { | ||
372 | ASSERT(!xfs_lic_are_all_free(licp)); | ||
373 | freed += xfs_trans_unlock_chunk(licp, 0, 0, commit_lsn); | ||
374 | next_licp = licp->lic_next; | ||
375 | if (xfs_lic_are_all_free(licp)) { | ||
376 | *licpp = next_licp; | ||
377 | kmem_free(licp); | ||
378 | freed -= XFS_LIC_NUM_SLOTS; | ||
379 | } else { | ||
380 | licpp = &(licp->lic_next); | ||
381 | } | ||
382 | ASSERT(*licpp == next_licp); | ||
383 | licp = next_licp; | ||
384 | } | ||
385 | |||
386 | /* | ||
387 | * Fix the free descriptor count in the transaction. | ||
388 | */ | ||
389 | tp->t_items_free += freed; | ||
390 | } | ||
391 | |||
392 | /* | ||
393 | * Unlock each item pointed to by a descriptor in the given chunk. | ||
394 | * Stamp the commit lsn into each item if necessary. | ||
395 | * Free descriptors pointing to items which are not dirty if freeing_chunk | ||
396 | * is zero. If freeing_chunk is non-zero, then we need to unlock all | ||
397 | * items in the chunk. | ||
398 | * | ||
399 | * Return the number of descriptors freed. | ||
400 | */ | ||
401 | STATIC int | ||
402 | xfs_trans_unlock_chunk( | ||
403 | xfs_log_item_chunk_t *licp, | ||
404 | int freeing_chunk, | ||
405 | int abort, | ||
406 | xfs_lsn_t commit_lsn) | ||
407 | { | ||
408 | xfs_log_item_desc_t *lidp; | ||
409 | xfs_log_item_t *lip; | ||
410 | int i; | ||
411 | int freed; | ||
412 | |||
413 | freed = 0; | ||
414 | lidp = licp->lic_descs; | ||
415 | for (i = 0; i < licp->lic_unused; i++, lidp++) { | ||
416 | if (xfs_lic_isfree(licp, i)) { | ||
417 | continue; | ||
418 | } | ||
419 | lip = lidp->lid_item; | ||
420 | lip->li_desc = NULL; | ||
421 | |||
422 | if (commit_lsn != NULLCOMMITLSN) | ||
423 | IOP_COMMITTING(lip, commit_lsn); | ||
424 | if (abort) | ||
425 | lip->li_flags |= XFS_LI_ABORTED; | ||
426 | IOP_UNLOCK(lip); | ||
427 | |||
428 | /* | ||
429 | * Free the descriptor if the item is not dirty | ||
430 | * within this transaction and the caller is not | ||
431 | * going to just free the entire thing regardless. | ||
432 | */ | ||
433 | if (!(freeing_chunk) && | ||
434 | (!(lidp->lid_flags & XFS_LID_DIRTY) || abort)) { | ||
435 | xfs_lic_relse(licp, i); | ||
436 | freed++; | ||
437 | } | ||
438 | } | ||
439 | |||
440 | return freed; | ||
441 | } | ||
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h index c6e4f2c8de6e..e2d93d8ead7b 100644 --- a/fs/xfs/xfs_trans_priv.h +++ b/fs/xfs/xfs_trans_priv.h | |||
@@ -23,22 +23,8 @@ struct xfs_log_item_desc; | |||
23 | struct xfs_mount; | 23 | struct xfs_mount; |
24 | struct xfs_trans; | 24 | struct xfs_trans; |
25 | 25 | ||
26 | /* | 26 | void xfs_trans_add_item(struct xfs_trans *, struct xfs_log_item *); |
27 | * From xfs_trans_item.c | 27 | void xfs_trans_del_item(struct xfs_log_item *); |
28 | */ | ||
29 | struct xfs_log_item_desc *xfs_trans_add_item(struct xfs_trans *, | ||
30 | struct xfs_log_item *); | ||
31 | void xfs_trans_free_item(struct xfs_trans *, | ||
32 | struct xfs_log_item_desc *); | ||
33 | struct xfs_log_item_desc *xfs_trans_find_item(struct xfs_trans *, | ||
34 | struct xfs_log_item *); | ||
35 | struct xfs_log_item_desc *xfs_trans_first_item(struct xfs_trans *); | ||
36 | struct xfs_log_item_desc *xfs_trans_next_item(struct xfs_trans *, | ||
37 | struct xfs_log_item_desc *); | ||
38 | |||
39 | void xfs_trans_unlock_items(struct xfs_trans *tp, xfs_lsn_t commit_lsn); | ||
40 | void xfs_trans_free_items(struct xfs_trans *tp, xfs_lsn_t commit_lsn, | ||
41 | int flags); | ||
42 | 28 | ||
43 | void xfs_trans_item_committed(struct xfs_log_item *lip, | 29 | void xfs_trans_item_committed(struct xfs_log_item *lip, |
44 | xfs_lsn_t commit_lsn, int aborted); | 30 | xfs_lsn_t commit_lsn, int aborted); |
diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c index 4d88616bde91..b7d5769d2df0 100644 --- a/fs/xfs/xfs_utils.c +++ b/fs/xfs/xfs_utils.c | |||
@@ -25,18 +25,14 @@ | |||
25 | #include "xfs_sb.h" | 25 | #include "xfs_sb.h" |
26 | #include "xfs_ag.h" | 26 | #include "xfs_ag.h" |
27 | #include "xfs_dir2.h" | 27 | #include "xfs_dir2.h" |
28 | #include "xfs_dmapi.h" | ||
29 | #include "xfs_mount.h" | 28 | #include "xfs_mount.h" |
30 | #include "xfs_bmap_btree.h" | 29 | #include "xfs_bmap_btree.h" |
31 | #include "xfs_dir2_sf.h" | ||
32 | #include "xfs_attr_sf.h" | ||
33 | #include "xfs_dinode.h" | 30 | #include "xfs_dinode.h" |
34 | #include "xfs_inode.h" | 31 | #include "xfs_inode.h" |
35 | #include "xfs_inode_item.h" | 32 | #include "xfs_inode_item.h" |
36 | #include "xfs_bmap.h" | 33 | #include "xfs_bmap.h" |
37 | #include "xfs_error.h" | 34 | #include "xfs_error.h" |
38 | #include "xfs_quota.h" | 35 | #include "xfs_quota.h" |
39 | #include "xfs_rw.h" | ||
40 | #include "xfs_itable.h" | 36 | #include "xfs_itable.h" |
41 | #include "xfs_utils.h" | 37 | #include "xfs_utils.h" |
42 | 38 | ||
@@ -324,86 +320,3 @@ xfs_bumplink( | |||
324 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 320 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
325 | return 0; | 321 | return 0; |
326 | } | 322 | } |
327 | |||
328 | /* | ||
329 | * Try to truncate the given file to 0 length. Currently called | ||
330 | * only out of xfs_remove when it has to truncate a file to free | ||
331 | * up space for the remove to proceed. | ||
332 | */ | ||
333 | int | ||
334 | xfs_truncate_file( | ||
335 | xfs_mount_t *mp, | ||
336 | xfs_inode_t *ip) | ||
337 | { | ||
338 | xfs_trans_t *tp; | ||
339 | int error; | ||
340 | |||
341 | #ifdef QUOTADEBUG | ||
342 | /* | ||
343 | * This is called to truncate the quotainodes too. | ||
344 | */ | ||
345 | if (XFS_IS_UQUOTA_ON(mp)) { | ||
346 | if (ip->i_ino != mp->m_sb.sb_uquotino) | ||
347 | ASSERT(ip->i_udquot); | ||
348 | } | ||
349 | if (XFS_IS_OQUOTA_ON(mp)) { | ||
350 | if (ip->i_ino != mp->m_sb.sb_gquotino) | ||
351 | ASSERT(ip->i_gdquot); | ||
352 | } | ||
353 | #endif | ||
354 | /* | ||
355 | * Make the call to xfs_itruncate_start before starting the | ||
356 | * transaction, because we cannot make the call while we're | ||
357 | * in a transaction. | ||
358 | */ | ||
359 | xfs_ilock(ip, XFS_IOLOCK_EXCL); | ||
360 | error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, (xfs_fsize_t)0); | ||
361 | if (error) { | ||
362 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | ||
363 | return error; | ||
364 | } | ||
365 | |||
366 | tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE); | ||
367 | if ((error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, | ||
368 | XFS_TRANS_PERM_LOG_RES, | ||
369 | XFS_ITRUNCATE_LOG_COUNT))) { | ||
370 | xfs_trans_cancel(tp, 0); | ||
371 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | ||
372 | return error; | ||
373 | } | ||
374 | |||
375 | /* | ||
376 | * Follow the normal truncate locking protocol. Since we | ||
377 | * hold the inode in the transaction, we know that its number | ||
378 | * of references will stay constant. | ||
379 | */ | ||
380 | xfs_ilock(ip, XFS_ILOCK_EXCL); | ||
381 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | ||
382 | xfs_trans_ihold(tp, ip); | ||
383 | /* | ||
384 | * Signal a sync xaction. The only case where that isn't | ||
385 | * the case is if we're truncating an already unlinked file | ||
386 | * on a wsync fs. In that case, we know the blocks can't | ||
387 | * reappear in the file because the links to file are | ||
388 | * permanently toast. Currently, we're always going to | ||
389 | * want a sync transaction because this code is being | ||
390 | * called from places where nlink is guaranteed to be 1 | ||
391 | * but I'm leaving the tests in to protect against future | ||
392 | * changes -- rcc. | ||
393 | */ | ||
394 | error = xfs_itruncate_finish(&tp, ip, (xfs_fsize_t)0, | ||
395 | XFS_DATA_FORK, | ||
396 | ((ip->i_d.di_nlink != 0 || | ||
397 | !(mp->m_flags & XFS_MOUNT_WSYNC)) | ||
398 | ? 1 : 0)); | ||
399 | if (error) { | ||
400 | xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | | ||
401 | XFS_TRANS_ABORT); | ||
402 | } else { | ||
403 | xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | ||
404 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); | ||
405 | } | ||
406 | xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | ||
407 | |||
408 | return error; | ||
409 | } | ||
diff --git a/fs/xfs/xfs_utils.h b/fs/xfs/xfs_utils.h index ef321225d269..f55b9678264f 100644 --- a/fs/xfs/xfs_utils.h +++ b/fs/xfs/xfs_utils.h | |||
@@ -18,7 +18,6 @@ | |||
18 | #ifndef __XFS_UTILS_H__ | 18 | #ifndef __XFS_UTILS_H__ |
19 | #define __XFS_UTILS_H__ | 19 | #define __XFS_UTILS_H__ |
20 | 20 | ||
21 | extern int xfs_truncate_file(xfs_mount_t *, xfs_inode_t *); | ||
22 | extern int xfs_dir_ialloc(xfs_trans_t **, xfs_inode_t *, mode_t, xfs_nlink_t, | 21 | extern int xfs_dir_ialloc(xfs_trans_t **, xfs_inode_t *, mode_t, xfs_nlink_t, |
23 | xfs_dev_t, cred_t *, prid_t, int, | 22 | xfs_dev_t, cred_t *, prid_t, int, |
24 | xfs_inode_t **, int *); | 23 | xfs_inode_t **, int *); |
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index a06bd62504fc..3ac137dd531b 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c | |||
@@ -26,19 +26,14 @@ | |||
26 | #include "xfs_sb.h" | 26 | #include "xfs_sb.h" |
27 | #include "xfs_ag.h" | 27 | #include "xfs_ag.h" |
28 | #include "xfs_dir2.h" | 28 | #include "xfs_dir2.h" |
29 | #include "xfs_dmapi.h" | ||
30 | #include "xfs_mount.h" | 29 | #include "xfs_mount.h" |
31 | #include "xfs_da_btree.h" | 30 | #include "xfs_da_btree.h" |
32 | #include "xfs_bmap_btree.h" | 31 | #include "xfs_bmap_btree.h" |
33 | #include "xfs_alloc_btree.h" | ||
34 | #include "xfs_ialloc_btree.h" | 32 | #include "xfs_ialloc_btree.h" |
35 | #include "xfs_dir2_sf.h" | ||
36 | #include "xfs_attr_sf.h" | ||
37 | #include "xfs_dinode.h" | 33 | #include "xfs_dinode.h" |
38 | #include "xfs_inode.h" | 34 | #include "xfs_inode.h" |
39 | #include "xfs_inode_item.h" | 35 | #include "xfs_inode_item.h" |
40 | #include "xfs_itable.h" | 36 | #include "xfs_itable.h" |
41 | #include "xfs_btree.h" | ||
42 | #include "xfs_ialloc.h" | 37 | #include "xfs_ialloc.h" |
43 | #include "xfs_alloc.h" | 38 | #include "xfs_alloc.h" |
44 | #include "xfs_bmap.h" | 39 | #include "xfs_bmap.h" |
@@ -73,7 +68,7 @@ xfs_setattr( | |||
73 | struct xfs_dquot *udqp, *gdqp, *olddquot1, *olddquot2; | 68 | struct xfs_dquot *udqp, *gdqp, *olddquot1, *olddquot2; |
74 | int need_iolock = 1; | 69 | int need_iolock = 1; |
75 | 70 | ||
76 | xfs_itrace_entry(ip); | 71 | trace_xfs_setattr(ip); |
77 | 72 | ||
78 | if (mp->m_flags & XFS_MOUNT_RDONLY) | 73 | if (mp->m_flags & XFS_MOUNT_RDONLY) |
79 | return XFS_ERROR(EROFS); | 74 | return XFS_ERROR(EROFS); |
@@ -143,16 +138,6 @@ xfs_setattr( | |||
143 | goto error_return; | 138 | goto error_return; |
144 | } | 139 | } |
145 | } else { | 140 | } else { |
146 | if (DM_EVENT_ENABLED(ip, DM_EVENT_TRUNCATE) && | ||
147 | !(flags & XFS_ATTR_DMI)) { | ||
148 | int dmflags = AT_DELAY_FLAG(flags) | DM_SEM_FLAG_WR; | ||
149 | code = XFS_SEND_DATA(mp, DM_EVENT_TRUNCATE, ip, | ||
150 | iattr->ia_size, 0, dmflags, NULL); | ||
151 | if (code) { | ||
152 | lock_flags = 0; | ||
153 | goto error_return; | ||
154 | } | ||
155 | } | ||
156 | if (need_iolock) | 141 | if (need_iolock) |
157 | lock_flags |= XFS_IOLOCK_EXCL; | 142 | lock_flags |= XFS_IOLOCK_EXCL; |
158 | } | 143 | } |
@@ -283,8 +268,7 @@ xfs_setattr( | |||
283 | commit_flags = XFS_TRANS_RELEASE_LOG_RES; | 268 | commit_flags = XFS_TRANS_RELEASE_LOG_RES; |
284 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 269 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
285 | 270 | ||
286 | xfs_trans_ijoin(tp, ip, lock_flags); | 271 | xfs_trans_ijoin(tp, ip); |
287 | xfs_trans_ihold(tp, ip); | ||
288 | 272 | ||
289 | /* | 273 | /* |
290 | * Only change the c/mtime if we are changing the size | 274 | * Only change the c/mtime if we are changing the size |
@@ -334,8 +318,7 @@ xfs_setattr( | |||
334 | xfs_iflags_set(ip, XFS_ITRUNCATED); | 318 | xfs_iflags_set(ip, XFS_ITRUNCATED); |
335 | } | 319 | } |
336 | } else if (tp) { | 320 | } else if (tp) { |
337 | xfs_trans_ijoin(tp, ip, lock_flags); | 321 | xfs_trans_ijoin(tp, ip); |
338 | xfs_trans_ihold(tp, ip); | ||
339 | } | 322 | } |
340 | 323 | ||
341 | /* | 324 | /* |
@@ -470,17 +453,10 @@ xfs_setattr( | |||
470 | return XFS_ERROR(code); | 453 | return XFS_ERROR(code); |
471 | } | 454 | } |
472 | 455 | ||
473 | if (DM_EVENT_ENABLED(ip, DM_EVENT_ATTRIBUTE) && | ||
474 | !(flags & XFS_ATTR_DMI)) { | ||
475 | (void) XFS_SEND_NAMESP(mp, DM_EVENT_ATTRIBUTE, ip, DM_RIGHT_NULL, | ||
476 | NULL, DM_RIGHT_NULL, NULL, NULL, | ||
477 | 0, 0, AT_DELAY_FLAG(flags)); | ||
478 | } | ||
479 | return 0; | 456 | return 0; |
480 | 457 | ||
481 | abort_return: | 458 | abort_return: |
482 | commit_flags |= XFS_TRANS_ABORT; | 459 | commit_flags |= XFS_TRANS_ABORT; |
483 | /* FALLTHROUGH */ | ||
484 | error_return: | 460 | error_return: |
485 | xfs_qm_dqrele(udqp); | 461 | xfs_qm_dqrele(udqp); |
486 | xfs_qm_dqrele(gdqp); | 462 | xfs_qm_dqrele(gdqp); |
@@ -516,7 +492,7 @@ xfs_readlink_bmap( | |||
516 | int error = 0; | 492 | int error = 0; |
517 | 493 | ||
518 | error = xfs_bmapi(NULL, ip, 0, XFS_B_TO_FSB(mp, pathlen), 0, NULL, 0, | 494 | error = xfs_bmapi(NULL, ip, 0, XFS_B_TO_FSB(mp, pathlen), 0, NULL, 0, |
519 | mval, &nmaps, NULL, NULL); | 495 | mval, &nmaps, NULL); |
520 | if (error) | 496 | if (error) |
521 | goto out; | 497 | goto out; |
522 | 498 | ||
@@ -557,7 +533,7 @@ xfs_readlink( | |||
557 | int pathlen; | 533 | int pathlen; |
558 | int error = 0; | 534 | int error = 0; |
559 | 535 | ||
560 | xfs_itrace_entry(ip); | 536 | trace_xfs_readlink(ip); |
561 | 537 | ||
562 | if (XFS_FORCED_SHUTDOWN(mp)) | 538 | if (XFS_FORCED_SHUTDOWN(mp)) |
563 | return XFS_ERROR(EIO); | 539 | return XFS_ERROR(EIO); |
@@ -613,14 +589,14 @@ xfs_free_eofblocks( | |||
613 | */ | 589 | */ |
614 | end_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)ip->i_size)); | 590 | end_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)ip->i_size)); |
615 | last_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); | 591 | last_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); |
616 | map_len = last_fsb - end_fsb; | 592 | if (last_fsb <= end_fsb) |
617 | if (map_len <= 0) | ||
618 | return 0; | 593 | return 0; |
594 | map_len = last_fsb - end_fsb; | ||
619 | 595 | ||
620 | nimaps = 1; | 596 | nimaps = 1; |
621 | xfs_ilock(ip, XFS_ILOCK_SHARED); | 597 | xfs_ilock(ip, XFS_ILOCK_SHARED); |
622 | error = xfs_bmapi(NULL, ip, end_fsb, map_len, 0, | 598 | error = xfs_bmapi(NULL, ip, end_fsb, map_len, 0, |
623 | NULL, 0, &imap, &nimaps, NULL, NULL); | 599 | NULL, 0, &imap, &nimaps, NULL); |
624 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | 600 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
625 | 601 | ||
626 | if (!error && (nimaps != 0) && | 602 | if (!error && (nimaps != 0) && |
@@ -675,10 +651,7 @@ xfs_free_eofblocks( | |||
675 | } | 651 | } |
676 | 652 | ||
677 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 653 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
678 | xfs_trans_ijoin(tp, ip, | 654 | xfs_trans_ijoin(tp, ip); |
679 | XFS_IOLOCK_EXCL | | ||
680 | XFS_ILOCK_EXCL); | ||
681 | xfs_trans_ihold(tp, ip); | ||
682 | 655 | ||
683 | error = xfs_itruncate_finish(&tp, ip, | 656 | error = xfs_itruncate_finish(&tp, ip, |
684 | ip->i_size, | 657 | ip->i_size, |
@@ -750,8 +723,7 @@ xfs_inactive_symlink_rmt( | |||
750 | xfs_ilock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); | 723 | xfs_ilock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); |
751 | size = (int)ip->i_d.di_size; | 724 | size = (int)ip->i_d.di_size; |
752 | ip->i_d.di_size = 0; | 725 | ip->i_d.di_size = 0; |
753 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | 726 | xfs_trans_ijoin(tp, ip); |
754 | xfs_trans_ihold(tp, ip); | ||
755 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 727 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
756 | /* | 728 | /* |
757 | * Find the block(s) so we can inval and unmap them. | 729 | * Find the block(s) so we can inval and unmap them. |
@@ -761,7 +733,7 @@ xfs_inactive_symlink_rmt( | |||
761 | nmaps = ARRAY_SIZE(mval); | 733 | nmaps = ARRAY_SIZE(mval); |
762 | if ((error = xfs_bmapi(tp, ip, 0, XFS_B_TO_FSB(mp, size), | 734 | if ((error = xfs_bmapi(tp, ip, 0, XFS_B_TO_FSB(mp, size), |
763 | XFS_BMAPI_METADATA, &first_block, 0, mval, &nmaps, | 735 | XFS_BMAPI_METADATA, &first_block, 0, mval, &nmaps, |
764 | &free_list, NULL))) | 736 | &free_list))) |
765 | goto error0; | 737 | goto error0; |
766 | /* | 738 | /* |
767 | * Invalidate the block(s). | 739 | * Invalidate the block(s). |
@@ -776,7 +748,7 @@ xfs_inactive_symlink_rmt( | |||
776 | * Unmap the dead block(s) to the free_list. | 748 | * Unmap the dead block(s) to the free_list. |
777 | */ | 749 | */ |
778 | if ((error = xfs_bunmapi(tp, ip, 0, size, XFS_BMAPI_METADATA, nmaps, | 750 | if ((error = xfs_bunmapi(tp, ip, 0, size, XFS_BMAPI_METADATA, nmaps, |
779 | &first_block, &free_list, NULL, &done))) | 751 | &first_block, &free_list, &done))) |
780 | goto error1; | 752 | goto error1; |
781 | ASSERT(done); | 753 | ASSERT(done); |
782 | /* | 754 | /* |
@@ -795,8 +767,7 @@ xfs_inactive_symlink_rmt( | |||
795 | * Mark it dirty so it will be logged and moved forward in the log as | 767 | * Mark it dirty so it will be logged and moved forward in the log as |
796 | * part of every commit. | 768 | * part of every commit. |
797 | */ | 769 | */ |
798 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | 770 | xfs_trans_ijoin(tp, ip); |
799 | xfs_trans_ihold(tp, ip); | ||
800 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 771 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
801 | /* | 772 | /* |
802 | * Get a new, empty transaction to return to our caller. | 773 | * Get a new, empty transaction to return to our caller. |
@@ -929,8 +900,7 @@ xfs_inactive_attrs( | |||
929 | goto error_cancel; | 900 | goto error_cancel; |
930 | 901 | ||
931 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 902 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
932 | xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); | 903 | xfs_trans_ijoin(tp, ip); |
933 | xfs_trans_ihold(tp, ip); | ||
934 | xfs_idestroy_fork(ip, XFS_ATTR_FORK); | 904 | xfs_idestroy_fork(ip, XFS_ATTR_FORK); |
935 | 905 | ||
936 | ASSERT(ip->i_d.di_anextents == 0); | 906 | ASSERT(ip->i_d.di_anextents == 0); |
@@ -1035,8 +1005,6 @@ xfs_inactive( | |||
1035 | int error; | 1005 | int error; |
1036 | int truncate; | 1006 | int truncate; |
1037 | 1007 | ||
1038 | xfs_itrace_entry(ip); | ||
1039 | |||
1040 | /* | 1008 | /* |
1041 | * If the inode is already free, then there can be nothing | 1009 | * If the inode is already free, then there can be nothing |
1042 | * to clean up here. | 1010 | * to clean up here. |
@@ -1060,9 +1028,6 @@ xfs_inactive( | |||
1060 | 1028 | ||
1061 | mp = ip->i_mount; | 1029 | mp = ip->i_mount; |
1062 | 1030 | ||
1063 | if (ip->i_d.di_nlink == 0 && DM_EVENT_ENABLED(ip, DM_EVENT_DESTROY)) | ||
1064 | XFS_SEND_DESTROY(mp, ip, DM_RIGHT_NULL); | ||
1065 | |||
1066 | error = 0; | 1031 | error = 0; |
1067 | 1032 | ||
1068 | /* If this is a read-only mount, don't do this (would generate I/O) */ | 1033 | /* If this is a read-only mount, don't do this (would generate I/O) */ |
@@ -1120,8 +1085,7 @@ xfs_inactive( | |||
1120 | } | 1085 | } |
1121 | 1086 | ||
1122 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 1087 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
1123 | xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); | 1088 | xfs_trans_ijoin(tp, ip); |
1124 | xfs_trans_ihold(tp, ip); | ||
1125 | 1089 | ||
1126 | /* | 1090 | /* |
1127 | * normally, we have to run xfs_itruncate_finish sync. | 1091 | * normally, we have to run xfs_itruncate_finish sync. |
@@ -1154,8 +1118,7 @@ xfs_inactive( | |||
1154 | return VN_INACTIVE_CACHE; | 1118 | return VN_INACTIVE_CACHE; |
1155 | } | 1119 | } |
1156 | 1120 | ||
1157 | xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); | 1121 | xfs_trans_ijoin(tp, ip); |
1158 | xfs_trans_ihold(tp, ip); | ||
1159 | } else { | 1122 | } else { |
1160 | error = xfs_trans_reserve(tp, 0, | 1123 | error = xfs_trans_reserve(tp, 0, |
1161 | XFS_IFREE_LOG_RES(mp), | 1124 | XFS_IFREE_LOG_RES(mp), |
@@ -1168,8 +1131,7 @@ xfs_inactive( | |||
1168 | } | 1131 | } |
1169 | 1132 | ||
1170 | xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | 1133 | xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); |
1171 | xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); | 1134 | xfs_trans_ijoin(tp, ip); |
1172 | xfs_trans_ihold(tp, ip); | ||
1173 | } | 1135 | } |
1174 | 1136 | ||
1175 | /* | 1137 | /* |
@@ -1257,7 +1219,7 @@ xfs_lookup( | |||
1257 | int error; | 1219 | int error; |
1258 | uint lock_mode; | 1220 | uint lock_mode; |
1259 | 1221 | ||
1260 | xfs_itrace_entry(dp); | 1222 | trace_xfs_lookup(dp, name); |
1261 | 1223 | ||
1262 | if (XFS_FORCED_SHUTDOWN(dp->i_mount)) | 1224 | if (XFS_FORCED_SHUTDOWN(dp->i_mount)) |
1263 | return XFS_ERROR(EIO); | 1225 | return XFS_ERROR(EIO); |
@@ -1269,7 +1231,7 @@ xfs_lookup( | |||
1269 | if (error) | 1231 | if (error) |
1270 | goto out; | 1232 | goto out; |
1271 | 1233 | ||
1272 | error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp, 0); | 1234 | error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp); |
1273 | if (error) | 1235 | if (error) |
1274 | goto out_free_name; | 1236 | goto out_free_name; |
1275 | 1237 | ||
@@ -1309,21 +1271,11 @@ xfs_create( | |||
1309 | uint log_res; | 1271 | uint log_res; |
1310 | uint log_count; | 1272 | uint log_count; |
1311 | 1273 | ||
1312 | xfs_itrace_entry(dp); | 1274 | trace_xfs_create(dp, name); |
1313 | 1275 | ||
1314 | if (XFS_FORCED_SHUTDOWN(mp)) | 1276 | if (XFS_FORCED_SHUTDOWN(mp)) |
1315 | return XFS_ERROR(EIO); | 1277 | return XFS_ERROR(EIO); |
1316 | 1278 | ||
1317 | if (DM_EVENT_ENABLED(dp, DM_EVENT_CREATE)) { | ||
1318 | error = XFS_SEND_NAMESP(mp, DM_EVENT_CREATE, | ||
1319 | dp, DM_RIGHT_NULL, NULL, | ||
1320 | DM_RIGHT_NULL, name->name, NULL, | ||
1321 | mode, 0, 0); | ||
1322 | |||
1323 | if (error) | ||
1324 | return error; | ||
1325 | } | ||
1326 | |||
1327 | if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) | 1279 | if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) |
1328 | prid = dp->i_d.di_projid; | 1280 | prid = dp->i_d.di_projid; |
1329 | else | 1281 | else |
@@ -1427,8 +1379,7 @@ xfs_create( | |||
1427 | * the transaction cancel unlocking dp so don't do it explicitly in the | 1379 | * the transaction cancel unlocking dp so don't do it explicitly in the |
1428 | * error path. | 1380 | * error path. |
1429 | */ | 1381 | */ |
1430 | IHOLD(dp); | 1382 | xfs_trans_ijoin_ref(tp, dp, XFS_ILOCK_EXCL); |
1431 | xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); | ||
1432 | unlock_dp_on_error = B_FALSE; | 1383 | unlock_dp_on_error = B_FALSE; |
1433 | 1384 | ||
1434 | error = xfs_dir_createname(tp, dp, name, ip->i_ino, | 1385 | error = xfs_dir_createname(tp, dp, name, ip->i_ino, |
@@ -1487,16 +1438,7 @@ xfs_create( | |||
1487 | xfs_qm_dqrele(gdqp); | 1438 | xfs_qm_dqrele(gdqp); |
1488 | 1439 | ||
1489 | *ipp = ip; | 1440 | *ipp = ip; |
1490 | 1441 | return 0; | |
1491 | /* Fallthrough to std_return with error = 0 */ | ||
1492 | std_return: | ||
1493 | if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTCREATE)) { | ||
1494 | XFS_SEND_NAMESP(mp, DM_EVENT_POSTCREATE, dp, DM_RIGHT_NULL, | ||
1495 | ip, DM_RIGHT_NULL, name->name, NULL, mode, | ||
1496 | error, 0); | ||
1497 | } | ||
1498 | |||
1499 | return error; | ||
1500 | 1442 | ||
1501 | out_bmap_cancel: | 1443 | out_bmap_cancel: |
1502 | xfs_bmap_cancel(&free_list); | 1444 | xfs_bmap_cancel(&free_list); |
@@ -1510,8 +1452,8 @@ xfs_create( | |||
1510 | 1452 | ||
1511 | if (unlock_dp_on_error) | 1453 | if (unlock_dp_on_error) |
1512 | xfs_iunlock(dp, XFS_ILOCK_EXCL); | 1454 | xfs_iunlock(dp, XFS_ILOCK_EXCL); |
1513 | 1455 | std_return: | |
1514 | goto std_return; | 1456 | return error; |
1515 | 1457 | ||
1516 | out_abort_rele: | 1458 | out_abort_rele: |
1517 | /* | 1459 | /* |
@@ -1726,20 +1668,11 @@ xfs_remove( | |||
1726 | uint resblks; | 1668 | uint resblks; |
1727 | uint log_count; | 1669 | uint log_count; |
1728 | 1670 | ||
1729 | xfs_itrace_entry(dp); | 1671 | trace_xfs_remove(dp, name); |
1730 | xfs_itrace_entry(ip); | ||
1731 | 1672 | ||
1732 | if (XFS_FORCED_SHUTDOWN(mp)) | 1673 | if (XFS_FORCED_SHUTDOWN(mp)) |
1733 | return XFS_ERROR(EIO); | 1674 | return XFS_ERROR(EIO); |
1734 | 1675 | ||
1735 | if (DM_EVENT_ENABLED(dp, DM_EVENT_REMOVE)) { | ||
1736 | error = XFS_SEND_NAMESP(mp, DM_EVENT_REMOVE, dp, DM_RIGHT_NULL, | ||
1737 | NULL, DM_RIGHT_NULL, name->name, NULL, | ||
1738 | ip->i_d.di_mode, 0, 0); | ||
1739 | if (error) | ||
1740 | return error; | ||
1741 | } | ||
1742 | |||
1743 | error = xfs_qm_dqattach(dp, 0); | 1676 | error = xfs_qm_dqattach(dp, 0); |
1744 | if (error) | 1677 | if (error) |
1745 | goto std_return; | 1678 | goto std_return; |
@@ -1782,15 +1715,8 @@ xfs_remove( | |||
1782 | 1715 | ||
1783 | xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL); | 1716 | xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL); |
1784 | 1717 | ||
1785 | /* | 1718 | xfs_trans_ijoin_ref(tp, dp, XFS_ILOCK_EXCL); |
1786 | * At this point, we've gotten both the directory and the entry | 1719 | xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL); |
1787 | * inodes locked. | ||
1788 | */ | ||
1789 | IHOLD(ip); | ||
1790 | xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); | ||
1791 | |||
1792 | IHOLD(dp); | ||
1793 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | ||
1794 | 1720 | ||
1795 | /* | 1721 | /* |
1796 | * If we're removing a directory perform some additional validation. | 1722 | * If we're removing a directory perform some additional validation. |
@@ -1877,21 +1803,15 @@ xfs_remove( | |||
1877 | if (!is_dir && link_zero && xfs_inode_is_filestream(ip)) | 1803 | if (!is_dir && link_zero && xfs_inode_is_filestream(ip)) |
1878 | xfs_filestream_deassociate(ip); | 1804 | xfs_filestream_deassociate(ip); |
1879 | 1805 | ||
1880 | std_return: | 1806 | return 0; |
1881 | if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTREMOVE)) { | ||
1882 | XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, dp, DM_RIGHT_NULL, | ||
1883 | NULL, DM_RIGHT_NULL, name->name, NULL, | ||
1884 | ip->i_d.di_mode, error, 0); | ||
1885 | } | ||
1886 | |||
1887 | return error; | ||
1888 | 1807 | ||
1889 | out_bmap_cancel: | 1808 | out_bmap_cancel: |
1890 | xfs_bmap_cancel(&free_list); | 1809 | xfs_bmap_cancel(&free_list); |
1891 | cancel_flags |= XFS_TRANS_ABORT; | 1810 | cancel_flags |= XFS_TRANS_ABORT; |
1892 | out_trans_cancel: | 1811 | out_trans_cancel: |
1893 | xfs_trans_cancel(tp, cancel_flags); | 1812 | xfs_trans_cancel(tp, cancel_flags); |
1894 | goto std_return; | 1813 | std_return: |
1814 | return error; | ||
1895 | } | 1815 | } |
1896 | 1816 | ||
1897 | int | 1817 | int |
@@ -1909,25 +1829,13 @@ xfs_link( | |||
1909 | int committed; | 1829 | int committed; |
1910 | int resblks; | 1830 | int resblks; |
1911 | 1831 | ||
1912 | xfs_itrace_entry(tdp); | 1832 | trace_xfs_link(tdp, target_name); |
1913 | xfs_itrace_entry(sip); | ||
1914 | 1833 | ||
1915 | ASSERT(!S_ISDIR(sip->i_d.di_mode)); | 1834 | ASSERT(!S_ISDIR(sip->i_d.di_mode)); |
1916 | 1835 | ||
1917 | if (XFS_FORCED_SHUTDOWN(mp)) | 1836 | if (XFS_FORCED_SHUTDOWN(mp)) |
1918 | return XFS_ERROR(EIO); | 1837 | return XFS_ERROR(EIO); |
1919 | 1838 | ||
1920 | if (DM_EVENT_ENABLED(tdp, DM_EVENT_LINK)) { | ||
1921 | error = XFS_SEND_NAMESP(mp, DM_EVENT_LINK, | ||
1922 | tdp, DM_RIGHT_NULL, | ||
1923 | sip, DM_RIGHT_NULL, | ||
1924 | target_name->name, NULL, 0, 0, 0); | ||
1925 | if (error) | ||
1926 | return error; | ||
1927 | } | ||
1928 | |||
1929 | /* Return through std_return after this point. */ | ||
1930 | |||
1931 | error = xfs_qm_dqattach(sip, 0); | 1839 | error = xfs_qm_dqattach(sip, 0); |
1932 | if (error) | 1840 | if (error) |
1933 | goto std_return; | 1841 | goto std_return; |
@@ -1953,15 +1861,8 @@ xfs_link( | |||
1953 | 1861 | ||
1954 | xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL); | 1862 | xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL); |
1955 | 1863 | ||
1956 | /* | 1864 | xfs_trans_ijoin_ref(tp, sip, XFS_ILOCK_EXCL); |
1957 | * Increment vnode ref counts since xfs_trans_commit & | 1865 | xfs_trans_ijoin_ref(tp, tdp, XFS_ILOCK_EXCL); |
1958 | * xfs_trans_cancel will both unlock the inodes and | ||
1959 | * decrement the associated ref counts. | ||
1960 | */ | ||
1961 | IHOLD(sip); | ||
1962 | IHOLD(tdp); | ||
1963 | xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL); | ||
1964 | xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL); | ||
1965 | 1866 | ||
1966 | /* | 1867 | /* |
1967 | * If the source has too many links, we can't make any more to it. | 1868 | * If the source has too many links, we can't make any more to it. |
@@ -2014,27 +1915,14 @@ xfs_link( | |||
2014 | goto abort_return; | 1915 | goto abort_return; |
2015 | } | 1916 | } |
2016 | 1917 | ||
2017 | error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); | 1918 | return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); |
2018 | if (error) | ||
2019 | goto std_return; | ||
2020 | |||
2021 | /* Fall through to std_return with error = 0. */ | ||
2022 | std_return: | ||
2023 | if (DM_EVENT_ENABLED(sip, DM_EVENT_POSTLINK)) { | ||
2024 | (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTLINK, | ||
2025 | tdp, DM_RIGHT_NULL, | ||
2026 | sip, DM_RIGHT_NULL, | ||
2027 | target_name->name, NULL, 0, error, 0); | ||
2028 | } | ||
2029 | return error; | ||
2030 | 1919 | ||
2031 | abort_return: | 1920 | abort_return: |
2032 | cancel_flags |= XFS_TRANS_ABORT; | 1921 | cancel_flags |= XFS_TRANS_ABORT; |
2033 | /* FALLTHROUGH */ | ||
2034 | |||
2035 | error_return: | 1922 | error_return: |
2036 | xfs_trans_cancel(tp, cancel_flags); | 1923 | xfs_trans_cancel(tp, cancel_flags); |
2037 | goto std_return; | 1924 | std_return: |
1925 | return error; | ||
2038 | } | 1926 | } |
2039 | 1927 | ||
2040 | int | 1928 | int |
@@ -2074,7 +1962,7 @@ xfs_symlink( | |||
2074 | ip = NULL; | 1962 | ip = NULL; |
2075 | tp = NULL; | 1963 | tp = NULL; |
2076 | 1964 | ||
2077 | xfs_itrace_entry(dp); | 1965 | trace_xfs_symlink(dp, link_name); |
2078 | 1966 | ||
2079 | if (XFS_FORCED_SHUTDOWN(mp)) | 1967 | if (XFS_FORCED_SHUTDOWN(mp)) |
2080 | return XFS_ERROR(EIO); | 1968 | return XFS_ERROR(EIO); |
@@ -2086,17 +1974,6 @@ xfs_symlink( | |||
2086 | if (pathlen >= MAXPATHLEN) /* total string too long */ | 1974 | if (pathlen >= MAXPATHLEN) /* total string too long */ |
2087 | return XFS_ERROR(ENAMETOOLONG); | 1975 | return XFS_ERROR(ENAMETOOLONG); |
2088 | 1976 | ||
2089 | if (DM_EVENT_ENABLED(dp, DM_EVENT_SYMLINK)) { | ||
2090 | error = XFS_SEND_NAMESP(mp, DM_EVENT_SYMLINK, dp, | ||
2091 | DM_RIGHT_NULL, NULL, DM_RIGHT_NULL, | ||
2092 | link_name->name, | ||
2093 | (unsigned char *)target_path, 0, 0, 0); | ||
2094 | if (error) | ||
2095 | return error; | ||
2096 | } | ||
2097 | |||
2098 | /* Return through std_return after this point. */ | ||
2099 | |||
2100 | udqp = gdqp = NULL; | 1977 | udqp = gdqp = NULL; |
2101 | if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) | 1978 | if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) |
2102 | prid = dp->i_d.di_projid; | 1979 | prid = dp->i_d.di_projid; |
@@ -2180,8 +2057,7 @@ xfs_symlink( | |||
2180 | * transaction cancel unlocking dp so don't do it explicitly in the | 2057 | * transaction cancel unlocking dp so don't do it explicitly in the |
2181 | * error path. | 2058 | * error path. |
2182 | */ | 2059 | */ |
2183 | IHOLD(dp); | 2060 | xfs_trans_ijoin_ref(tp, dp, XFS_ILOCK_EXCL); |
2184 | xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); | ||
2185 | unlock_dp_on_error = B_FALSE; | 2061 | unlock_dp_on_error = B_FALSE; |
2186 | 2062 | ||
2187 | /* | 2063 | /* |
@@ -2215,7 +2091,7 @@ xfs_symlink( | |||
2215 | error = xfs_bmapi(tp, ip, first_fsb, fs_blocks, | 2091 | error = xfs_bmapi(tp, ip, first_fsb, fs_blocks, |
2216 | XFS_BMAPI_WRITE | XFS_BMAPI_METADATA, | 2092 | XFS_BMAPI_WRITE | XFS_BMAPI_METADATA, |
2217 | &first_block, resblks, mval, &nmaps, | 2093 | &first_block, resblks, mval, &nmaps, |
2218 | &free_list, NULL); | 2094 | &free_list); |
2219 | if (error) { | 2095 | if (error) { |
2220 | goto error1; | 2096 | goto error1; |
2221 | } | 2097 | } |
@@ -2278,21 +2154,8 @@ xfs_symlink( | |||
2278 | xfs_qm_dqrele(udqp); | 2154 | xfs_qm_dqrele(udqp); |
2279 | xfs_qm_dqrele(gdqp); | 2155 | xfs_qm_dqrele(gdqp); |
2280 | 2156 | ||
2281 | /* Fall through to std_return with error = 0 or errno from | 2157 | *ipp = ip; |
2282 | * xfs_trans_commit */ | 2158 | return 0; |
2283 | std_return: | ||
2284 | if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTSYMLINK)) { | ||
2285 | (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTSYMLINK, | ||
2286 | dp, DM_RIGHT_NULL, | ||
2287 | error ? NULL : ip, | ||
2288 | DM_RIGHT_NULL, link_name->name, | ||
2289 | (unsigned char *)target_path, | ||
2290 | 0, error, 0); | ||
2291 | } | ||
2292 | |||
2293 | if (!error) | ||
2294 | *ipp = ip; | ||
2295 | return error; | ||
2296 | 2159 | ||
2297 | error2: | 2160 | error2: |
2298 | IRELE(ip); | 2161 | IRELE(ip); |
@@ -2306,8 +2169,8 @@ std_return: | |||
2306 | 2169 | ||
2307 | if (unlock_dp_on_error) | 2170 | if (unlock_dp_on_error) |
2308 | xfs_iunlock(dp, XFS_ILOCK_EXCL); | 2171 | xfs_iunlock(dp, XFS_ILOCK_EXCL); |
2309 | 2172 | std_return: | |
2310 | goto std_return; | 2173 | return error; |
2311 | } | 2174 | } |
2312 | 2175 | ||
2313 | int | 2176 | int |
@@ -2333,13 +2196,12 @@ xfs_set_dmattrs( | |||
2333 | return error; | 2196 | return error; |
2334 | } | 2197 | } |
2335 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 2198 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
2336 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | 2199 | xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL); |
2337 | 2200 | ||
2338 | ip->i_d.di_dmevmask = evmask; | 2201 | ip->i_d.di_dmevmask = evmask; |
2339 | ip->i_d.di_dmstate = state; | 2202 | ip->i_d.di_dmstate = state; |
2340 | 2203 | ||
2341 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | 2204 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
2342 | IHOLD(ip); | ||
2343 | error = xfs_trans_commit(tp, 0); | 2205 | error = xfs_trans_commit(tp, 0); |
2344 | 2206 | ||
2345 | return error; | 2207 | return error; |
@@ -2390,7 +2252,7 @@ xfs_alloc_file_space( | |||
2390 | int committed; | 2252 | int committed; |
2391 | int error; | 2253 | int error; |
2392 | 2254 | ||
2393 | xfs_itrace_entry(ip); | 2255 | trace_xfs_alloc_file_space(ip); |
2394 | 2256 | ||
2395 | if (XFS_FORCED_SHUTDOWN(mp)) | 2257 | if (XFS_FORCED_SHUTDOWN(mp)) |
2396 | return XFS_ERROR(EIO); | 2258 | return XFS_ERROR(EIO); |
@@ -2412,25 +2274,9 @@ xfs_alloc_file_space( | |||
2412 | startoffset_fsb = XFS_B_TO_FSBT(mp, offset); | 2274 | startoffset_fsb = XFS_B_TO_FSBT(mp, offset); |
2413 | allocatesize_fsb = XFS_B_TO_FSB(mp, count); | 2275 | allocatesize_fsb = XFS_B_TO_FSB(mp, count); |
2414 | 2276 | ||
2415 | /* Generate a DMAPI event if needed. */ | ||
2416 | if (alloc_type != 0 && offset < ip->i_size && | ||
2417 | (attr_flags & XFS_ATTR_DMI) == 0 && | ||
2418 | DM_EVENT_ENABLED(ip, DM_EVENT_WRITE)) { | ||
2419 | xfs_off_t end_dmi_offset; | ||
2420 | |||
2421 | end_dmi_offset = offset+len; | ||
2422 | if (end_dmi_offset > ip->i_size) | ||
2423 | end_dmi_offset = ip->i_size; | ||
2424 | error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip, offset, | ||
2425 | end_dmi_offset - offset, 0, NULL); | ||
2426 | if (error) | ||
2427 | return error; | ||
2428 | } | ||
2429 | |||
2430 | /* | 2277 | /* |
2431 | * Allocate file space until done or until there is an error | 2278 | * Allocate file space until done or until there is an error |
2432 | */ | 2279 | */ |
2433 | retry: | ||
2434 | while (allocatesize_fsb && !error) { | 2280 | while (allocatesize_fsb && !error) { |
2435 | xfs_fileoff_t s, e; | 2281 | xfs_fileoff_t s, e; |
2436 | 2282 | ||
@@ -2488,8 +2334,7 @@ retry: | |||
2488 | if (error) | 2334 | if (error) |
2489 | goto error1; | 2335 | goto error1; |
2490 | 2336 | ||
2491 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | 2337 | xfs_trans_ijoin(tp, ip); |
2492 | xfs_trans_ihold(tp, ip); | ||
2493 | 2338 | ||
2494 | /* | 2339 | /* |
2495 | * Issue the xfs_bmapi() call to allocate the blocks | 2340 | * Issue the xfs_bmapi() call to allocate the blocks |
@@ -2498,7 +2343,7 @@ retry: | |||
2498 | error = xfs_bmapi(tp, ip, startoffset_fsb, | 2343 | error = xfs_bmapi(tp, ip, startoffset_fsb, |
2499 | allocatesize_fsb, bmapi_flag, | 2344 | allocatesize_fsb, bmapi_flag, |
2500 | &firstfsb, 0, imapp, &nimaps, | 2345 | &firstfsb, 0, imapp, &nimaps, |
2501 | &free_list, NULL); | 2346 | &free_list); |
2502 | if (error) { | 2347 | if (error) { |
2503 | goto error0; | 2348 | goto error0; |
2504 | } | 2349 | } |
@@ -2527,17 +2372,6 @@ retry: | |||
2527 | startoffset_fsb += allocated_fsb; | 2372 | startoffset_fsb += allocated_fsb; |
2528 | allocatesize_fsb -= allocated_fsb; | 2373 | allocatesize_fsb -= allocated_fsb; |
2529 | } | 2374 | } |
2530 | dmapi_enospc_check: | ||
2531 | if (error == ENOSPC && (attr_flags & XFS_ATTR_DMI) == 0 && | ||
2532 | DM_EVENT_ENABLED(ip, DM_EVENT_NOSPACE)) { | ||
2533 | error = XFS_SEND_NAMESP(mp, DM_EVENT_NOSPACE, | ||
2534 | ip, DM_RIGHT_NULL, | ||
2535 | ip, DM_RIGHT_NULL, | ||
2536 | NULL, NULL, 0, 0, 0); /* Delay flag intentionally unused */ | ||
2537 | if (error == 0) | ||
2538 | goto retry; /* Maybe DMAPI app. has made space */ | ||
2539 | /* else fall through with error from XFS_SEND_DATA */ | ||
2540 | } | ||
2541 | 2375 | ||
2542 | return error; | 2376 | return error; |
2543 | 2377 | ||
@@ -2548,7 +2382,7 @@ error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */ | |||
2548 | error1: /* Just cancel transaction */ | 2382 | error1: /* Just cancel transaction */ |
2549 | xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); | 2383 | xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); |
2550 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 2384 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
2551 | goto dmapi_enospc_check; | 2385 | return error; |
2552 | } | 2386 | } |
2553 | 2387 | ||
2554 | /* | 2388 | /* |
@@ -2598,7 +2432,7 @@ xfs_zero_remaining_bytes( | |||
2598 | offset_fsb = XFS_B_TO_FSBT(mp, offset); | 2432 | offset_fsb = XFS_B_TO_FSBT(mp, offset); |
2599 | nimap = 1; | 2433 | nimap = 1; |
2600 | error = xfs_bmapi(NULL, ip, offset_fsb, 1, 0, | 2434 | error = xfs_bmapi(NULL, ip, offset_fsb, 1, 0, |
2601 | NULL, 0, &imap, &nimap, NULL, NULL); | 2435 | NULL, 0, &imap, &nimap, NULL); |
2602 | if (error || nimap < 1) | 2436 | if (error || nimap < 1) |
2603 | break; | 2437 | break; |
2604 | ASSERT(imap.br_blockcount >= 1); | 2438 | ASSERT(imap.br_blockcount >= 1); |
@@ -2661,7 +2495,6 @@ xfs_free_file_space( | |||
2661 | { | 2495 | { |
2662 | int committed; | 2496 | int committed; |
2663 | int done; | 2497 | int done; |
2664 | xfs_off_t end_dmi_offset; | ||
2665 | xfs_fileoff_t endoffset_fsb; | 2498 | xfs_fileoff_t endoffset_fsb; |
2666 | int error; | 2499 | int error; |
2667 | xfs_fsblock_t firstfsb; | 2500 | xfs_fsblock_t firstfsb; |
@@ -2680,7 +2513,7 @@ xfs_free_file_space( | |||
2680 | 2513 | ||
2681 | mp = ip->i_mount; | 2514 | mp = ip->i_mount; |
2682 | 2515 | ||
2683 | xfs_itrace_entry(ip); | 2516 | trace_xfs_free_file_space(ip); |
2684 | 2517 | ||
2685 | error = xfs_qm_dqattach(ip, 0); | 2518 | error = xfs_qm_dqattach(ip, 0); |
2686 | if (error) | 2519 | if (error) |
@@ -2691,19 +2524,7 @@ xfs_free_file_space( | |||
2691 | return error; | 2524 | return error; |
2692 | rt = XFS_IS_REALTIME_INODE(ip); | 2525 | rt = XFS_IS_REALTIME_INODE(ip); |
2693 | startoffset_fsb = XFS_B_TO_FSB(mp, offset); | 2526 | startoffset_fsb = XFS_B_TO_FSB(mp, offset); |
2694 | end_dmi_offset = offset + len; | 2527 | endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len); |
2695 | endoffset_fsb = XFS_B_TO_FSBT(mp, end_dmi_offset); | ||
2696 | |||
2697 | if (offset < ip->i_size && (attr_flags & XFS_ATTR_DMI) == 0 && | ||
2698 | DM_EVENT_ENABLED(ip, DM_EVENT_WRITE)) { | ||
2699 | if (end_dmi_offset > ip->i_size) | ||
2700 | end_dmi_offset = ip->i_size; | ||
2701 | error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip, | ||
2702 | offset, end_dmi_offset - offset, | ||
2703 | AT_DELAY_FLAG(attr_flags), NULL); | ||
2704 | if (error) | ||
2705 | return error; | ||
2706 | } | ||
2707 | 2528 | ||
2708 | if (attr_flags & XFS_ATTR_NOLOCK) | 2529 | if (attr_flags & XFS_ATTR_NOLOCK) |
2709 | need_iolock = 0; | 2530 | need_iolock = 0; |
@@ -2731,7 +2552,7 @@ xfs_free_file_space( | |||
2731 | if (rt && !xfs_sb_version_hasextflgbit(&mp->m_sb)) { | 2552 | if (rt && !xfs_sb_version_hasextflgbit(&mp->m_sb)) { |
2732 | nimap = 1; | 2553 | nimap = 1; |
2733 | error = xfs_bmapi(NULL, ip, startoffset_fsb, | 2554 | error = xfs_bmapi(NULL, ip, startoffset_fsb, |
2734 | 1, 0, NULL, 0, &imap, &nimap, NULL, NULL); | 2555 | 1, 0, NULL, 0, &imap, &nimap, NULL); |
2735 | if (error) | 2556 | if (error) |
2736 | goto out_unlock_iolock; | 2557 | goto out_unlock_iolock; |
2737 | ASSERT(nimap == 0 || nimap == 1); | 2558 | ASSERT(nimap == 0 || nimap == 1); |
@@ -2746,7 +2567,7 @@ xfs_free_file_space( | |||
2746 | } | 2567 | } |
2747 | nimap = 1; | 2568 | nimap = 1; |
2748 | error = xfs_bmapi(NULL, ip, endoffset_fsb - 1, | 2569 | error = xfs_bmapi(NULL, ip, endoffset_fsb - 1, |
2749 | 1, 0, NULL, 0, &imap, &nimap, NULL, NULL); | 2570 | 1, 0, NULL, 0, &imap, &nimap, NULL); |
2750 | if (error) | 2571 | if (error) |
2751 | goto out_unlock_iolock; | 2572 | goto out_unlock_iolock; |
2752 | ASSERT(nimap == 0 || nimap == 1); | 2573 | ASSERT(nimap == 0 || nimap == 1); |
@@ -2814,8 +2635,7 @@ xfs_free_file_space( | |||
2814 | if (error) | 2635 | if (error) |
2815 | goto error1; | 2636 | goto error1; |
2816 | 2637 | ||
2817 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | 2638 | xfs_trans_ijoin(tp, ip); |
2818 | xfs_trans_ihold(tp, ip); | ||
2819 | 2639 | ||
2820 | /* | 2640 | /* |
2821 | * issue the bunmapi() call to free the blocks | 2641 | * issue the bunmapi() call to free the blocks |
@@ -2823,7 +2643,7 @@ xfs_free_file_space( | |||
2823 | xfs_bmap_init(&free_list, &firstfsb); | 2643 | xfs_bmap_init(&free_list, &firstfsb); |
2824 | error = xfs_bunmapi(tp, ip, startoffset_fsb, | 2644 | error = xfs_bunmapi(tp, ip, startoffset_fsb, |
2825 | endoffset_fsb - startoffset_fsb, | 2645 | endoffset_fsb - startoffset_fsb, |
2826 | 0, 2, &firstfsb, &free_list, NULL, &done); | 2646 | 0, 2, &firstfsb, &free_list, &done); |
2827 | if (error) { | 2647 | if (error) { |
2828 | goto error0; | 2648 | goto error0; |
2829 | } | 2649 | } |
@@ -2883,8 +2703,6 @@ xfs_change_file_space( | |||
2883 | xfs_trans_t *tp; | 2703 | xfs_trans_t *tp; |
2884 | struct iattr iattr; | 2704 | struct iattr iattr; |
2885 | 2705 | ||
2886 | xfs_itrace_entry(ip); | ||
2887 | |||
2888 | if (!S_ISREG(ip->i_d.di_mode)) | 2706 | if (!S_ISREG(ip->i_d.di_mode)) |
2889 | return XFS_ERROR(EINVAL); | 2707 | return XFS_ERROR(EINVAL); |
2890 | 2708 | ||
@@ -2985,8 +2803,7 @@ xfs_change_file_space( | |||
2985 | 2803 | ||
2986 | xfs_ilock(ip, XFS_ILOCK_EXCL); | 2804 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
2987 | 2805 | ||
2988 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | 2806 | xfs_trans_ijoin(tp, ip); |
2989 | xfs_trans_ihold(tp, ip); | ||
2990 | 2807 | ||
2991 | if ((attr_flags & XFS_ATTR_DMI) == 0) { | 2808 | if ((attr_flags & XFS_ATTR_DMI) == 0) { |
2992 | ip->i_d.di_mode &= ~S_ISUID; | 2809 | ip->i_d.di_mode &= ~S_ISUID; |