aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_vnodeops.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_vnodeops.c')
-rw-r--r--fs/xfs/xfs_vnodeops.c908
1 files changed, 250 insertions, 658 deletions
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index e475e3717eb3..aa238c8fbd7a 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -75,26 +75,23 @@ xfs_open(
75 return 0; 75 return 0;
76} 76}
77 77
78/*
79 * xfs_setattr
80 */
81int 78int
82xfs_setattr( 79xfs_setattr(
83 xfs_inode_t *ip, 80 struct xfs_inode *ip,
84 bhv_vattr_t *vap, 81 struct iattr *iattr,
85 int flags, 82 int flags,
86 cred_t *credp) 83 cred_t *credp)
87{ 84{
88 xfs_mount_t *mp = ip->i_mount; 85 xfs_mount_t *mp = ip->i_mount;
86 struct inode *inode = VFS_I(ip);
87 int mask = iattr->ia_valid;
89 xfs_trans_t *tp; 88 xfs_trans_t *tp;
90 int mask;
91 int code; 89 int code;
92 uint lock_flags; 90 uint lock_flags;
93 uint commit_flags=0; 91 uint commit_flags=0;
94 uid_t uid=0, iuid=0; 92 uid_t uid=0, iuid=0;
95 gid_t gid=0, igid=0; 93 gid_t gid=0, igid=0;
96 int timeflags = 0; 94 int timeflags = 0;
97 xfs_prid_t projid=0, iprojid=0;
98 struct xfs_dquot *udqp, *gdqp, *olddquot1, *olddquot2; 95 struct xfs_dquot *udqp, *gdqp, *olddquot1, *olddquot2;
99 int file_owner; 96 int file_owner;
100 int need_iolock = 1; 97 int need_iolock = 1;
@@ -104,30 +101,9 @@ xfs_setattr(
104 if (mp->m_flags & XFS_MOUNT_RDONLY) 101 if (mp->m_flags & XFS_MOUNT_RDONLY)
105 return XFS_ERROR(EROFS); 102 return XFS_ERROR(EROFS);
106 103
107 /*
108 * Cannot set certain attributes.
109 */
110 mask = vap->va_mask;
111 if (mask & XFS_AT_NOSET) {
112 return XFS_ERROR(EINVAL);
113 }
114
115 if (XFS_FORCED_SHUTDOWN(mp)) 104 if (XFS_FORCED_SHUTDOWN(mp))
116 return XFS_ERROR(EIO); 105 return XFS_ERROR(EIO);
117 106
118 /*
119 * Timestamps do not need to be logged and hence do not
120 * need to be done within a transaction.
121 */
122 if (mask & XFS_AT_UPDTIMES) {
123 ASSERT((mask & ~XFS_AT_UPDTIMES) == 0);
124 timeflags = ((mask & XFS_AT_UPDATIME) ? XFS_ICHGTIME_ACC : 0) |
125 ((mask & XFS_AT_UPDCTIME) ? XFS_ICHGTIME_CHG : 0) |
126 ((mask & XFS_AT_UPDMTIME) ? XFS_ICHGTIME_MOD : 0);
127 xfs_ichgtime(ip, timeflags);
128 return 0;
129 }
130
131 olddquot1 = olddquot2 = NULL; 107 olddquot1 = olddquot2 = NULL;
132 udqp = gdqp = NULL; 108 udqp = gdqp = NULL;
133 109
@@ -139,28 +115,22 @@ xfs_setattr(
139 * If the IDs do change before we take the ilock, we're covered 115 * If the IDs do change before we take the ilock, we're covered
140 * because the i_*dquot fields will get updated anyway. 116 * because the i_*dquot fields will get updated anyway.
141 */ 117 */
142 if (XFS_IS_QUOTA_ON(mp) && 118 if (XFS_IS_QUOTA_ON(mp) && (mask & (ATTR_UID|ATTR_GID))) {
143 (mask & (XFS_AT_UID|XFS_AT_GID|XFS_AT_PROJID))) {
144 uint qflags = 0; 119 uint qflags = 0;
145 120
146 if ((mask & XFS_AT_UID) && XFS_IS_UQUOTA_ON(mp)) { 121 if ((mask & ATTR_UID) && XFS_IS_UQUOTA_ON(mp)) {
147 uid = vap->va_uid; 122 uid = iattr->ia_uid;
148 qflags |= XFS_QMOPT_UQUOTA; 123 qflags |= XFS_QMOPT_UQUOTA;
149 } else { 124 } else {
150 uid = ip->i_d.di_uid; 125 uid = ip->i_d.di_uid;
151 } 126 }
152 if ((mask & XFS_AT_GID) && XFS_IS_GQUOTA_ON(mp)) { 127 if ((mask & ATTR_GID) && XFS_IS_GQUOTA_ON(mp)) {
153 gid = vap->va_gid; 128 gid = iattr->ia_gid;
154 qflags |= XFS_QMOPT_GQUOTA; 129 qflags |= XFS_QMOPT_GQUOTA;
155 } else { 130 } else {
156 gid = ip->i_d.di_gid; 131 gid = ip->i_d.di_gid;
157 } 132 }
158 if ((mask & XFS_AT_PROJID) && XFS_IS_PQUOTA_ON(mp)) { 133
159 projid = vap->va_projid;
160 qflags |= XFS_QMOPT_PQUOTA;
161 } else {
162 projid = ip->i_d.di_projid;
163 }
164 /* 134 /*
165 * We take a reference when we initialize udqp and gdqp, 135 * We take a reference when we initialize udqp and gdqp,
166 * so it is important that we never blindly double trip on 136 * so it is important that we never blindly double trip on
@@ -168,8 +138,8 @@ xfs_setattr(
168 */ 138 */
169 ASSERT(udqp == NULL); 139 ASSERT(udqp == NULL);
170 ASSERT(gdqp == NULL); 140 ASSERT(gdqp == NULL);
171 code = XFS_QM_DQVOPALLOC(mp, ip, uid, gid, projid, qflags, 141 code = XFS_QM_DQVOPALLOC(mp, ip, uid, gid, ip->i_d.di_projid,
172 &udqp, &gdqp); 142 qflags, &udqp, &gdqp);
173 if (code) 143 if (code)
174 return code; 144 return code;
175 } 145 }
@@ -180,10 +150,10 @@ xfs_setattr(
180 */ 150 */
181 tp = NULL; 151 tp = NULL;
182 lock_flags = XFS_ILOCK_EXCL; 152 lock_flags = XFS_ILOCK_EXCL;
183 if (flags & ATTR_NOLOCK) 153 if (flags & XFS_ATTR_NOLOCK)
184 need_iolock = 0; 154 need_iolock = 0;
185 if (!(mask & XFS_AT_SIZE)) { 155 if (!(mask & ATTR_SIZE)) {
186 if ((mask != (XFS_AT_CTIME|XFS_AT_ATIME|XFS_AT_MTIME)) || 156 if ((mask != (ATTR_CTIME|ATTR_ATIME|ATTR_MTIME)) ||
187 (mp->m_flags & XFS_MOUNT_WSYNC)) { 157 (mp->m_flags & XFS_MOUNT_WSYNC)) {
188 tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE); 158 tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
189 commit_flags = 0; 159 commit_flags = 0;
@@ -196,10 +166,10 @@ xfs_setattr(
196 } 166 }
197 } else { 167 } else {
198 if (DM_EVENT_ENABLED(ip, DM_EVENT_TRUNCATE) && 168 if (DM_EVENT_ENABLED(ip, DM_EVENT_TRUNCATE) &&
199 !(flags & ATTR_DMI)) { 169 !(flags & XFS_ATTR_DMI)) {
200 int dmflags = AT_DELAY_FLAG(flags) | DM_SEM_FLAG_WR; 170 int dmflags = AT_DELAY_FLAG(flags) | DM_SEM_FLAG_WR;
201 code = XFS_SEND_DATA(mp, DM_EVENT_TRUNCATE, ip, 171 code = XFS_SEND_DATA(mp, DM_EVENT_TRUNCATE, ip,
202 vap->va_size, 0, dmflags, NULL); 172 iattr->ia_size, 0, dmflags, NULL);
203 if (code) { 173 if (code) {
204 lock_flags = 0; 174 lock_flags = 0;
205 goto error_return; 175 goto error_return;
@@ -212,16 +182,14 @@ xfs_setattr(
212 xfs_ilock(ip, lock_flags); 182 xfs_ilock(ip, lock_flags);
213 183
214 /* boolean: are we the file owner? */ 184 /* boolean: are we the file owner? */
215 file_owner = (current_fsuid(credp) == ip->i_d.di_uid); 185 file_owner = (current_fsuid() == ip->i_d.di_uid);
216 186
217 /* 187 /*
218 * Change various properties of a file. 188 * Change various properties of a file.
219 * Only the owner or users with CAP_FOWNER 189 * Only the owner or users with CAP_FOWNER
220 * capability may do these things. 190 * capability may do these things.
221 */ 191 */
222 if (mask & 192 if (mask & (ATTR_MODE|ATTR_UID|ATTR_GID)) {
223 (XFS_AT_MODE|XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_UID|
224 XFS_AT_GID|XFS_AT_PROJID)) {
225 /* 193 /*
226 * CAP_FOWNER overrides the following restrictions: 194 * CAP_FOWNER overrides the following restrictions:
227 * 195 *
@@ -245,21 +213,21 @@ xfs_setattr(
245 * IDs of the calling process shall match the group owner of 213 * IDs of the calling process shall match the group owner of
246 * the file when setting the set-group-ID bit on that file 214 * the file when setting the set-group-ID bit on that file
247 */ 215 */
248 if (mask & XFS_AT_MODE) { 216 if (mask & ATTR_MODE) {
249 mode_t m = 0; 217 mode_t m = 0;
250 218
251 if ((vap->va_mode & S_ISUID) && !file_owner) 219 if ((iattr->ia_mode & S_ISUID) && !file_owner)
252 m |= S_ISUID; 220 m |= S_ISUID;
253 if ((vap->va_mode & S_ISGID) && 221 if ((iattr->ia_mode & S_ISGID) &&
254 !in_group_p((gid_t)ip->i_d.di_gid)) 222 !in_group_p((gid_t)ip->i_d.di_gid))
255 m |= S_ISGID; 223 m |= S_ISGID;
256#if 0 224#if 0
257 /* Linux allows this, Irix doesn't. */ 225 /* Linux allows this, Irix doesn't. */
258 if ((vap->va_mode & S_ISVTX) && !S_ISDIR(ip->i_d.di_mode)) 226 if ((iattr->ia_mode & S_ISVTX) && !S_ISDIR(ip->i_d.di_mode))
259 m |= S_ISVTX; 227 m |= S_ISVTX;
260#endif 228#endif
261 if (m && !capable(CAP_FSETID)) 229 if (m && !capable(CAP_FSETID))
262 vap->va_mode &= ~m; 230 iattr->ia_mode &= ~m;
263 } 231 }
264 } 232 }
265 233
@@ -270,7 +238,7 @@ xfs_setattr(
270 * and can change the group id only to a group of which he 238 * and can change the group id only to a group of which he
271 * or she is a member. 239 * or she is a member.
272 */ 240 */
273 if (mask & (XFS_AT_UID|XFS_AT_GID|XFS_AT_PROJID)) { 241 if (mask & (ATTR_UID|ATTR_GID)) {
274 /* 242 /*
275 * These IDs could have changed since we last looked at them. 243 * These IDs could have changed since we last looked at them.
276 * But, we're assured that if the ownership did change 244 * But, we're assured that if the ownership did change
@@ -278,12 +246,9 @@ xfs_setattr(
278 * would have changed also. 246 * would have changed also.
279 */ 247 */
280 iuid = ip->i_d.di_uid; 248 iuid = ip->i_d.di_uid;
281 iprojid = ip->i_d.di_projid;
282 igid = ip->i_d.di_gid; 249 igid = ip->i_d.di_gid;
283 gid = (mask & XFS_AT_GID) ? vap->va_gid : igid; 250 gid = (mask & ATTR_GID) ? iattr->ia_gid : igid;
284 uid = (mask & XFS_AT_UID) ? vap->va_uid : iuid; 251 uid = (mask & ATTR_UID) ? iattr->ia_uid : iuid;
285 projid = (mask & XFS_AT_PROJID) ? (xfs_prid_t)vap->va_projid :
286 iprojid;
287 252
288 /* 253 /*
289 * CAP_CHOWN overrides the following restrictions: 254 * CAP_CHOWN overrides the following restrictions:
@@ -303,11 +268,10 @@ xfs_setattr(
303 goto error_return; 268 goto error_return;
304 } 269 }
305 /* 270 /*
306 * Do a quota reservation only if uid/projid/gid is actually 271 * Do a quota reservation only if uid/gid is actually
307 * going to change. 272 * going to change.
308 */ 273 */
309 if ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) || 274 if ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) ||
310 (XFS_IS_PQUOTA_ON(mp) && iprojid != projid) ||
311 (XFS_IS_GQUOTA_ON(mp) && igid != gid)) { 275 (XFS_IS_GQUOTA_ON(mp) && igid != gid)) {
312 ASSERT(tp); 276 ASSERT(tp);
313 code = XFS_QM_DQVOPCHOWNRESV(mp, tp, ip, udqp, gdqp, 277 code = XFS_QM_DQVOPCHOWNRESV(mp, tp, ip, udqp, gdqp,
@@ -321,13 +285,13 @@ xfs_setattr(
321 /* 285 /*
322 * Truncate file. Must have write permission and not be a directory. 286 * Truncate file. Must have write permission and not be a directory.
323 */ 287 */
324 if (mask & XFS_AT_SIZE) { 288 if (mask & ATTR_SIZE) {
325 /* Short circuit the truncate case for zero length files */ 289 /* Short circuit the truncate case for zero length files */
326 if ((vap->va_size == 0) && 290 if (iattr->ia_size == 0 &&
327 (ip->i_size == 0) && (ip->i_d.di_nextents == 0)) { 291 ip->i_size == 0 && ip->i_d.di_nextents == 0) {
328 xfs_iunlock(ip, XFS_ILOCK_EXCL); 292 xfs_iunlock(ip, XFS_ILOCK_EXCL);
329 lock_flags &= ~XFS_ILOCK_EXCL; 293 lock_flags &= ~XFS_ILOCK_EXCL;
330 if (mask & XFS_AT_CTIME) 294 if (mask & ATTR_CTIME)
331 xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 295 xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
332 code = 0; 296 code = 0;
333 goto error_return; 297 goto error_return;
@@ -350,9 +314,9 @@ xfs_setattr(
350 /* 314 /*
351 * Change file access or modified times. 315 * Change file access or modified times.
352 */ 316 */
353 if (mask & (XFS_AT_ATIME|XFS_AT_MTIME)) { 317 if (mask & (ATTR_ATIME|ATTR_MTIME)) {
354 if (!file_owner) { 318 if (!file_owner) {
355 if ((flags & ATTR_UTIME) && 319 if ((mask & (ATTR_MTIME_SET|ATTR_ATIME_SET)) &&
356 !capable(CAP_FOWNER)) { 320 !capable(CAP_FOWNER)) {
357 code = XFS_ERROR(EPERM); 321 code = XFS_ERROR(EPERM);
358 goto error_return; 322 goto error_return;
@@ -361,90 +325,23 @@ xfs_setattr(
361 } 325 }
362 326
363 /* 327 /*
364 * Change extent size or realtime flag.
365 */
366 if (mask & (XFS_AT_EXTSIZE|XFS_AT_XFLAGS)) {
367 /*
368 * Can't change extent size if any extents are allocated.
369 */
370 if (ip->i_d.di_nextents && (mask & XFS_AT_EXTSIZE) &&
371 ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) !=
372 vap->va_extsize) ) {
373 code = XFS_ERROR(EINVAL); /* EFBIG? */
374 goto error_return;
375 }
376
377 /*
378 * Can't change realtime flag if any extents are allocated.
379 */
380 if ((ip->i_d.di_nextents || ip->i_delayed_blks) &&
381 (mask & XFS_AT_XFLAGS) &&
382 (XFS_IS_REALTIME_INODE(ip)) !=
383 (vap->va_xflags & XFS_XFLAG_REALTIME)) {
384 code = XFS_ERROR(EINVAL); /* EFBIG? */
385 goto error_return;
386 }
387 /*
388 * Extent size must be a multiple of the appropriate block
389 * size, if set at all.
390 */
391 if ((mask & XFS_AT_EXTSIZE) && vap->va_extsize != 0) {
392 xfs_extlen_t size;
393
394 if (XFS_IS_REALTIME_INODE(ip) ||
395 ((mask & XFS_AT_XFLAGS) &&
396 (vap->va_xflags & XFS_XFLAG_REALTIME))) {
397 size = mp->m_sb.sb_rextsize <<
398 mp->m_sb.sb_blocklog;
399 } else {
400 size = mp->m_sb.sb_blocksize;
401 }
402 if (vap->va_extsize % size) {
403 code = XFS_ERROR(EINVAL);
404 goto error_return;
405 }
406 }
407 /*
408 * If realtime flag is set then must have realtime data.
409 */
410 if ((mask & XFS_AT_XFLAGS) &&
411 (vap->va_xflags & XFS_XFLAG_REALTIME)) {
412 if ((mp->m_sb.sb_rblocks == 0) ||
413 (mp->m_sb.sb_rextsize == 0) ||
414 (ip->i_d.di_extsize % mp->m_sb.sb_rextsize)) {
415 code = XFS_ERROR(EINVAL);
416 goto error_return;
417 }
418 }
419
420 /*
421 * Can't modify an immutable/append-only file unless
422 * we have appropriate permission.
423 */
424 if ((mask & XFS_AT_XFLAGS) &&
425 (ip->i_d.di_flags &
426 (XFS_DIFLAG_IMMUTABLE|XFS_DIFLAG_APPEND) ||
427 (vap->va_xflags &
428 (XFS_XFLAG_IMMUTABLE | XFS_XFLAG_APPEND))) &&
429 !capable(CAP_LINUX_IMMUTABLE)) {
430 code = XFS_ERROR(EPERM);
431 goto error_return;
432 }
433 }
434
435 /*
436 * Now we can make the changes. Before we join the inode 328 * Now we can make the changes. Before we join the inode
437 * to the transaction, if XFS_AT_SIZE is set then take care of 329 * to the transaction, if ATTR_SIZE is set then take care of
438 * the part of the truncation that must be done without the 330 * the part of the truncation that must be done without the
439 * inode lock. This needs to be done before joining the inode 331 * inode lock. This needs to be done before joining the inode
440 * to the transaction, because the inode cannot be unlocked 332 * to the transaction, because the inode cannot be unlocked
441 * once it is a part of the transaction. 333 * once it is a part of the transaction.
442 */ 334 */
443 if (mask & XFS_AT_SIZE) { 335 if (mask & ATTR_SIZE) {
444 code = 0; 336 code = 0;
445 if ((vap->va_size > ip->i_size) && 337 if (iattr->ia_size > ip->i_size) {
446 (flags & ATTR_NOSIZETOK) == 0) { 338 /*
447 code = xfs_igrow_start(ip, vap->va_size, credp); 339 * Do the first part of growing a file: zero any data
340 * in the last block that is beyond the old EOF. We
341 * need to do this before the inode is joined to the
342 * transaction to modify the i_size.
343 */
344 code = xfs_zero_eof(ip, iattr->ia_size, ip->i_size);
448 } 345 }
449 xfs_iunlock(ip, XFS_ILOCK_EXCL); 346 xfs_iunlock(ip, XFS_ILOCK_EXCL);
450 347
@@ -461,10 +358,10 @@ xfs_setattr(
461 * not within the range we care about here. 358 * not within the range we care about here.
462 */ 359 */
463 if (!code && 360 if (!code &&
464 (ip->i_size != ip->i_d.di_size) && 361 ip->i_size != ip->i_d.di_size &&
465 (vap->va_size > ip->i_d.di_size)) { 362 iattr->ia_size > ip->i_d.di_size) {
466 code = xfs_flush_pages(ip, 363 code = xfs_flush_pages(ip,
467 ip->i_d.di_size, vap->va_size, 364 ip->i_d.di_size, iattr->ia_size,
468 XFS_B_ASYNC, FI_NONE); 365 XFS_B_ASYNC, FI_NONE);
469 } 366 }
470 367
@@ -472,7 +369,7 @@ xfs_setattr(
472 vn_iowait(ip); 369 vn_iowait(ip);
473 370
474 if (!code) 371 if (!code)
475 code = xfs_itruncate_data(ip, vap->va_size); 372 code = xfs_itruncate_data(ip, iattr->ia_size);
476 if (code) { 373 if (code) {
477 ASSERT(tp == NULL); 374 ASSERT(tp == NULL);
478 lock_flags &= ~XFS_ILOCK_EXCL; 375 lock_flags &= ~XFS_ILOCK_EXCL;
@@ -501,28 +398,30 @@ xfs_setattr(
501 /* 398 /*
502 * Truncate file. Must have write permission and not be a directory. 399 * Truncate file. Must have write permission and not be a directory.
503 */ 400 */
504 if (mask & XFS_AT_SIZE) { 401 if (mask & ATTR_SIZE) {
505 /* 402 /*
506 * Only change the c/mtime if we are changing the size 403 * Only change the c/mtime if we are changing the size
507 * or we are explicitly asked to change it. This handles 404 * or we are explicitly asked to change it. This handles
508 * the semantic difference between truncate() and ftruncate() 405 * the semantic difference between truncate() and ftruncate()
509 * as implemented in the VFS. 406 * as implemented in the VFS.
510 */ 407 */
511 if (vap->va_size != ip->i_size || (mask & XFS_AT_CTIME)) 408 if (iattr->ia_size != ip->i_size || (mask & ATTR_CTIME))
512 timeflags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; 409 timeflags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
513 410
514 if (vap->va_size > ip->i_size) { 411 if (iattr->ia_size > ip->i_size) {
515 xfs_igrow_finish(tp, ip, vap->va_size, 412 ip->i_d.di_size = iattr->ia_size;
516 !(flags & ATTR_DMI)); 413 ip->i_size = iattr->ia_size;
517 } else if ((vap->va_size <= ip->i_size) || 414 if (!(flags & XFS_ATTR_DMI))
518 ((vap->va_size == 0) && ip->i_d.di_nextents)) { 415 xfs_ichgtime(ip, XFS_ICHGTIME_CHG);
416 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
417 } else if (iattr->ia_size <= ip->i_size ||
418 (iattr->ia_size == 0 && ip->i_d.di_nextents)) {
519 /* 419 /*
520 * signal a sync transaction unless 420 * signal a sync transaction unless
521 * we're truncating an already unlinked 421 * we're truncating an already unlinked
522 * file on a wsync filesystem 422 * file on a wsync filesystem
523 */ 423 */
524 code = xfs_itruncate_finish(&tp, ip, 424 code = xfs_itruncate_finish(&tp, ip, iattr->ia_size,
525 (xfs_fsize_t)vap->va_size,
526 XFS_DATA_FORK, 425 XFS_DATA_FORK,
527 ((ip->i_d.di_nlink != 0 || 426 ((ip->i_d.di_nlink != 0 ||
528 !(mp->m_flags & XFS_MOUNT_WSYNC)) 427 !(mp->m_flags & XFS_MOUNT_WSYNC))
@@ -544,9 +443,12 @@ xfs_setattr(
544 /* 443 /*
545 * Change file access modes. 444 * Change file access modes.
546 */ 445 */
547 if (mask & XFS_AT_MODE) { 446 if (mask & ATTR_MODE) {
548 ip->i_d.di_mode &= S_IFMT; 447 ip->i_d.di_mode &= S_IFMT;
549 ip->i_d.di_mode |= vap->va_mode & ~S_IFMT; 448 ip->i_d.di_mode |= iattr->ia_mode & ~S_IFMT;
449
450 inode->i_mode &= S_IFMT;
451 inode->i_mode |= iattr->ia_mode & ~S_IFMT;
550 452
551 xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE); 453 xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE);
552 timeflags |= XFS_ICHGTIME_CHG; 454 timeflags |= XFS_ICHGTIME_CHG;
@@ -559,7 +461,7 @@ xfs_setattr(
559 * and can change the group id only to a group of which he 461 * and can change the group id only to a group of which he
560 * or she is a member. 462 * or she is a member.
561 */ 463 */
562 if (mask & (XFS_AT_UID|XFS_AT_GID|XFS_AT_PROJID)) { 464 if (mask & (ATTR_UID|ATTR_GID)) {
563 /* 465 /*
564 * CAP_FSETID overrides the following restrictions: 466 * CAP_FSETID overrides the following restrictions:
565 * 467 *
@@ -577,39 +479,24 @@ xfs_setattr(
577 */ 479 */
578 if (iuid != uid) { 480 if (iuid != uid) {
579 if (XFS_IS_UQUOTA_ON(mp)) { 481 if (XFS_IS_UQUOTA_ON(mp)) {
580 ASSERT(mask & XFS_AT_UID); 482 ASSERT(mask & ATTR_UID);
581 ASSERT(udqp); 483 ASSERT(udqp);
582 olddquot1 = XFS_QM_DQVOPCHOWN(mp, tp, ip, 484 olddquot1 = XFS_QM_DQVOPCHOWN(mp, tp, ip,
583 &ip->i_udquot, udqp); 485 &ip->i_udquot, udqp);
584 } 486 }
585 ip->i_d.di_uid = uid; 487 ip->i_d.di_uid = uid;
488 inode->i_uid = uid;
586 } 489 }
587 if (igid != gid) { 490 if (igid != gid) {
588 if (XFS_IS_GQUOTA_ON(mp)) { 491 if (XFS_IS_GQUOTA_ON(mp)) {
589 ASSERT(!XFS_IS_PQUOTA_ON(mp)); 492 ASSERT(!XFS_IS_PQUOTA_ON(mp));
590 ASSERT(mask & XFS_AT_GID); 493 ASSERT(mask & ATTR_GID);
591 ASSERT(gdqp); 494 ASSERT(gdqp);
592 olddquot2 = XFS_QM_DQVOPCHOWN(mp, tp, ip, 495 olddquot2 = XFS_QM_DQVOPCHOWN(mp, tp, ip,
593 &ip->i_gdquot, gdqp); 496 &ip->i_gdquot, gdqp);
594 } 497 }
595 ip->i_d.di_gid = gid; 498 ip->i_d.di_gid = gid;
596 } 499 inode->i_gid = gid;
597 if (iprojid != projid) {
598 if (XFS_IS_PQUOTA_ON(mp)) {
599 ASSERT(!XFS_IS_GQUOTA_ON(mp));
600 ASSERT(mask & XFS_AT_PROJID);
601 ASSERT(gdqp);
602 olddquot2 = XFS_QM_DQVOPCHOWN(mp, tp, ip,
603 &ip->i_gdquot, gdqp);
604 }
605 ip->i_d.di_projid = projid;
606 /*
607 * We may have to rev the inode as well as
608 * the superblock version number since projids didn't
609 * exist before DINODE_VERSION_2 and SB_VERSION_NLINK.
610 */
611 if (ip->i_d.di_version == XFS_DINODE_VERSION_1)
612 xfs_bump_ino_vers2(tp, ip);
613 } 500 }
614 501
615 xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE); 502 xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE);
@@ -620,82 +507,33 @@ xfs_setattr(
620 /* 507 /*
621 * Change file access or modified times. 508 * Change file access or modified times.
622 */ 509 */
623 if (mask & (XFS_AT_ATIME|XFS_AT_MTIME)) { 510 if (mask & (ATTR_ATIME|ATTR_MTIME)) {
624 if (mask & XFS_AT_ATIME) { 511 if (mask & ATTR_ATIME) {
625 ip->i_d.di_atime.t_sec = vap->va_atime.tv_sec; 512 inode->i_atime = iattr->ia_atime;
626 ip->i_d.di_atime.t_nsec = vap->va_atime.tv_nsec; 513 ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec;
514 ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec;
627 ip->i_update_core = 1; 515 ip->i_update_core = 1;
628 timeflags &= ~XFS_ICHGTIME_ACC;
629 } 516 }
630 if (mask & XFS_AT_MTIME) { 517 if (mask & ATTR_MTIME) {
631 ip->i_d.di_mtime.t_sec = vap->va_mtime.tv_sec; 518 inode->i_mtime = iattr->ia_mtime;
632 ip->i_d.di_mtime.t_nsec = vap->va_mtime.tv_nsec; 519 ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec;
520 ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec;
633 timeflags &= ~XFS_ICHGTIME_MOD; 521 timeflags &= ~XFS_ICHGTIME_MOD;
634 timeflags |= XFS_ICHGTIME_CHG; 522 timeflags |= XFS_ICHGTIME_CHG;
635 } 523 }
636 if (tp && (flags & ATTR_UTIME)) 524 if (tp && (mask & (ATTR_MTIME_SET|ATTR_ATIME_SET)))
637 xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE); 525 xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE);
638 } 526 }
639 527
640 /* 528 /*
641 * Change XFS-added attributes. 529 * Change file inode change time only if ATTR_CTIME set
642 */
643 if (mask & (XFS_AT_EXTSIZE|XFS_AT_XFLAGS)) {
644 if (mask & XFS_AT_EXTSIZE) {
645 /*
646 * Converting bytes to fs blocks.
647 */
648 ip->i_d.di_extsize = vap->va_extsize >>
649 mp->m_sb.sb_blocklog;
650 }
651 if (mask & XFS_AT_XFLAGS) {
652 uint di_flags;
653
654 /* can't set PREALLOC this way, just preserve it */
655 di_flags = (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC);
656 if (vap->va_xflags & XFS_XFLAG_IMMUTABLE)
657 di_flags |= XFS_DIFLAG_IMMUTABLE;
658 if (vap->va_xflags & XFS_XFLAG_APPEND)
659 di_flags |= XFS_DIFLAG_APPEND;
660 if (vap->va_xflags & XFS_XFLAG_SYNC)
661 di_flags |= XFS_DIFLAG_SYNC;
662 if (vap->va_xflags & XFS_XFLAG_NOATIME)
663 di_flags |= XFS_DIFLAG_NOATIME;
664 if (vap->va_xflags & XFS_XFLAG_NODUMP)
665 di_flags |= XFS_DIFLAG_NODUMP;
666 if (vap->va_xflags & XFS_XFLAG_PROJINHERIT)
667 di_flags |= XFS_DIFLAG_PROJINHERIT;
668 if (vap->va_xflags & XFS_XFLAG_NODEFRAG)
669 di_flags |= XFS_DIFLAG_NODEFRAG;
670 if (vap->va_xflags & XFS_XFLAG_FILESTREAM)
671 di_flags |= XFS_DIFLAG_FILESTREAM;
672 if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
673 if (vap->va_xflags & XFS_XFLAG_RTINHERIT)
674 di_flags |= XFS_DIFLAG_RTINHERIT;
675 if (vap->va_xflags & XFS_XFLAG_NOSYMLINKS)
676 di_flags |= XFS_DIFLAG_NOSYMLINKS;
677 if (vap->va_xflags & XFS_XFLAG_EXTSZINHERIT)
678 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
679 } else if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) {
680 if (vap->va_xflags & XFS_XFLAG_REALTIME)
681 di_flags |= XFS_DIFLAG_REALTIME;
682 if (vap->va_xflags & XFS_XFLAG_EXTSIZE)
683 di_flags |= XFS_DIFLAG_EXTSIZE;
684 }
685 ip->i_d.di_flags = di_flags;
686 }
687 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
688 timeflags |= XFS_ICHGTIME_CHG;
689 }
690
691 /*
692 * Change file inode change time only if XFS_AT_CTIME set
693 * AND we have been called by a DMI function. 530 * AND we have been called by a DMI function.
694 */ 531 */
695 532
696 if ( (flags & ATTR_DMI) && (mask & XFS_AT_CTIME) ) { 533 if ((flags & XFS_ATTR_DMI) && (mask & ATTR_CTIME)) {
697 ip->i_d.di_ctime.t_sec = vap->va_ctime.tv_sec; 534 inode->i_ctime = iattr->ia_ctime;
698 ip->i_d.di_ctime.t_nsec = vap->va_ctime.tv_nsec; 535 ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec;
536 ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec;
699 ip->i_update_core = 1; 537 ip->i_update_core = 1;
700 timeflags &= ~XFS_ICHGTIME_CHG; 538 timeflags &= ~XFS_ICHGTIME_CHG;
701 } 539 }
@@ -704,7 +542,7 @@ xfs_setattr(
704 * Send out timestamp changes that need to be set to the 542 * Send out timestamp changes that need to be set to the
705 * current time. Not done when called by a DMI function. 543 * current time. Not done when called by a DMI function.
706 */ 544 */
707 if (timeflags && !(flags & ATTR_DMI)) 545 if (timeflags && !(flags & XFS_ATTR_DMI))
708 xfs_ichgtime(ip, timeflags); 546 xfs_ichgtime(ip, timeflags);
709 547
710 XFS_STATS_INC(xs_ig_attrchg); 548 XFS_STATS_INC(xs_ig_attrchg);
@@ -742,7 +580,7 @@ xfs_setattr(
742 } 580 }
743 581
744 if (DM_EVENT_ENABLED(ip, DM_EVENT_ATTRIBUTE) && 582 if (DM_EVENT_ENABLED(ip, DM_EVENT_ATTRIBUTE) &&
745 !(flags & ATTR_DMI)) { 583 !(flags & XFS_ATTR_DMI)) {
746 (void) XFS_SEND_NAMESP(mp, DM_EVENT_ATTRIBUTE, ip, DM_RIGHT_NULL, 584 (void) XFS_SEND_NAMESP(mp, DM_EVENT_ATTRIBUTE, ip, DM_RIGHT_NULL,
747 NULL, DM_RIGHT_NULL, NULL, NULL, 585 NULL, DM_RIGHT_NULL, NULL, NULL,
748 0, 0, AT_DELAY_FLAG(flags)); 586 0, 0, AT_DELAY_FLAG(flags));
@@ -875,7 +713,7 @@ xfs_fsync(
875 return XFS_ERROR(EIO); 713 return XFS_ERROR(EIO);
876 714
877 /* capture size updates in I/O completion before writing the inode. */ 715 /* capture size updates in I/O completion before writing the inode. */
878 error = filemap_fdatawait(vn_to_inode(XFS_ITOV(ip))->i_mapping); 716 error = filemap_fdatawait(VFS_I(ip)->i_mapping);
879 if (error) 717 if (error)
880 return XFS_ERROR(error); 718 return XFS_ERROR(error);
881 719
@@ -1321,7 +1159,6 @@ int
1321xfs_release( 1159xfs_release(
1322 xfs_inode_t *ip) 1160 xfs_inode_t *ip)
1323{ 1161{
1324 bhv_vnode_t *vp = XFS_ITOV(ip);
1325 xfs_mount_t *mp = ip->i_mount; 1162 xfs_mount_t *mp = ip->i_mount;
1326 int error; 1163 int error;
1327 1164
@@ -1356,13 +1193,13 @@ xfs_release(
1356 * be exposed to that problem. 1193 * be exposed to that problem.
1357 */ 1194 */
1358 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED); 1195 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1359 if (truncated && VN_DIRTY(vp) && ip->i_delayed_blks > 0) 1196 if (truncated && VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0)
1360 xfs_flush_pages(ip, 0, -1, XFS_B_ASYNC, FI_NONE); 1197 xfs_flush_pages(ip, 0, -1, XFS_B_ASYNC, FI_NONE);
1361 } 1198 }
1362 1199
1363 if (ip->i_d.di_nlink != 0) { 1200 if (ip->i_d.di_nlink != 0) {
1364 if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) && 1201 if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
1365 ((ip->i_size > 0) || (VN_CACHED(vp) > 0 || 1202 ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 ||
1366 ip->i_delayed_blks > 0)) && 1203 ip->i_delayed_blks > 0)) &&
1367 (ip->i_df.if_flags & XFS_IFEXTENTS)) && 1204 (ip->i_df.if_flags & XFS_IFEXTENTS)) &&
1368 (!(ip->i_d.di_flags & 1205 (!(ip->i_d.di_flags &
@@ -1388,7 +1225,6 @@ int
1388xfs_inactive( 1225xfs_inactive(
1389 xfs_inode_t *ip) 1226 xfs_inode_t *ip)
1390{ 1227{
1391 bhv_vnode_t *vp = XFS_ITOV(ip);
1392 xfs_bmap_free_t free_list; 1228 xfs_bmap_free_t free_list;
1393 xfs_fsblock_t first_block; 1229 xfs_fsblock_t first_block;
1394 int committed; 1230 int committed;
@@ -1403,7 +1239,7 @@ xfs_inactive(
1403 * If the inode is already free, then there can be nothing 1239 * If the inode is already free, then there can be nothing
1404 * to clean up here. 1240 * to clean up here.
1405 */ 1241 */
1406 if (ip->i_d.di_mode == 0 || VN_BAD(vp)) { 1242 if (ip->i_d.di_mode == 0 || VN_BAD(VFS_I(ip))) {
1407 ASSERT(ip->i_df.if_real_bytes == 0); 1243 ASSERT(ip->i_df.if_real_bytes == 0);
1408 ASSERT(ip->i_df.if_broot_bytes == 0); 1244 ASSERT(ip->i_df.if_broot_bytes == 0);
1409 return VN_INACTIVE_CACHE; 1245 return VN_INACTIVE_CACHE;
@@ -1433,7 +1269,7 @@ xfs_inactive(
1433 1269
1434 if (ip->i_d.di_nlink != 0) { 1270 if (ip->i_d.di_nlink != 0) {
1435 if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) && 1271 if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
1436 ((ip->i_size > 0) || (VN_CACHED(vp) > 0 || 1272 ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 ||
1437 ip->i_delayed_blks > 0)) && 1273 ip->i_delayed_blks > 0)) &&
1438 (ip->i_df.if_flags & XFS_IFEXTENTS) && 1274 (ip->i_df.if_flags & XFS_IFEXTENTS) &&
1439 (!(ip->i_d.di_flags & 1275 (!(ip->i_d.di_flags &
@@ -1601,12 +1437,18 @@ xfs_inactive(
1601 return VN_INACTIVE_CACHE; 1437 return VN_INACTIVE_CACHE;
1602} 1438}
1603 1439
1604 1440/*
1441 * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
1442 * is allowed, otherwise it has to be an exact match. If a CI match is found,
1443 * ci_name->name will point to a the actual name (caller must free) or
1444 * will be set to NULL if an exact match is found.
1445 */
1605int 1446int
1606xfs_lookup( 1447xfs_lookup(
1607 xfs_inode_t *dp, 1448 xfs_inode_t *dp,
1608 struct xfs_name *name, 1449 struct xfs_name *name,
1609 xfs_inode_t **ipp) 1450 xfs_inode_t **ipp,
1451 struct xfs_name *ci_name)
1610{ 1452{
1611 xfs_ino_t inum; 1453 xfs_ino_t inum;
1612 int error; 1454 int error;
@@ -1618,7 +1460,7 @@ xfs_lookup(
1618 return XFS_ERROR(EIO); 1460 return XFS_ERROR(EIO);
1619 1461
1620 lock_mode = xfs_ilock_map_shared(dp); 1462 lock_mode = xfs_ilock_map_shared(dp);
1621 error = xfs_dir_lookup(NULL, dp, name, &inum); 1463 error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
1622 xfs_iunlock_map_shared(dp, lock_mode); 1464 xfs_iunlock_map_shared(dp, lock_mode);
1623 1465
1624 if (error) 1466 if (error)
@@ -1626,12 +1468,15 @@ xfs_lookup(
1626 1468
1627 error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp, 0); 1469 error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp, 0);
1628 if (error) 1470 if (error)
1629 goto out; 1471 goto out_free_name;
1630 1472
1631 xfs_itrace_ref(*ipp); 1473 xfs_itrace_ref(*ipp);
1632 return 0; 1474 return 0;
1633 1475
1634 out: 1476out_free_name:
1477 if (ci_name)
1478 kmem_free(ci_name->name);
1479out:
1635 *ipp = NULL; 1480 *ipp = NULL;
1636 return error; 1481 return error;
1637} 1482}
@@ -1688,7 +1533,7 @@ xfs_create(
1688 * Make sure that we have allocated dquot(s) on disk. 1533 * Make sure that we have allocated dquot(s) on disk.
1689 */ 1534 */
1690 error = XFS_QM_DQVOPALLOC(mp, dp, 1535 error = XFS_QM_DQVOPALLOC(mp, dp,
1691 current_fsuid(credp), current_fsgid(credp), prid, 1536 current_fsuid(), current_fsgid(), prid,
1692 XFS_QMOPT_QUOTALL|XFS_QMOPT_INHERIT, &udqp, &gdqp); 1537 XFS_QMOPT_QUOTALL|XFS_QMOPT_INHERIT, &udqp, &gdqp);
1693 if (error) 1538 if (error)
1694 goto std_return; 1539 goto std_return;
@@ -1860,111 +1705,6 @@ std_return:
1860} 1705}
1861 1706
1862#ifdef DEBUG 1707#ifdef DEBUG
1863/*
1864 * Some counters to see if (and how often) we are hitting some deadlock
1865 * prevention code paths.
1866 */
1867
1868int xfs_rm_locks;
1869int xfs_rm_lock_delays;
1870int xfs_rm_attempts;
1871#endif
1872
1873/*
1874 * The following routine will lock the inodes associated with the
1875 * directory and the named entry in the directory. The locks are
1876 * acquired in increasing inode number.
1877 *
1878 * If the entry is "..", then only the directory is locked. The
1879 * vnode ref count will still include that from the .. entry in
1880 * this case.
1881 *
1882 * There is a deadlock we need to worry about. If the locked directory is
1883 * in the AIL, it might be blocking up the log. The next inode we lock
1884 * could be already locked by another thread waiting for log space (e.g
1885 * a permanent log reservation with a long running transaction (see
1886 * xfs_itruncate_finish)). To solve this, we must check if the directory
1887 * is in the ail and use lock_nowait. If we can't lock, we need to
1888 * drop the inode lock on the directory and try again. xfs_iunlock will
1889 * potentially push the tail if we were holding up the log.
1890 */
1891STATIC int
1892xfs_lock_dir_and_entry(
1893 xfs_inode_t *dp,
1894 xfs_inode_t *ip) /* inode of entry 'name' */
1895{
1896 int attempts;
1897 xfs_ino_t e_inum;
1898 xfs_inode_t *ips[2];
1899 xfs_log_item_t *lp;
1900
1901#ifdef DEBUG
1902 xfs_rm_locks++;
1903#endif
1904 attempts = 0;
1905
1906again:
1907 xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1908
1909 e_inum = ip->i_ino;
1910
1911 xfs_itrace_ref(ip);
1912
1913 /*
1914 * We want to lock in increasing inum. Since we've already
1915 * acquired the lock on the directory, we may need to release
1916 * if if the inum of the entry turns out to be less.
1917 */
1918 if (e_inum > dp->i_ino) {
1919 /*
1920 * We are already in the right order, so just
1921 * lock on the inode of the entry.
1922 * We need to use nowait if dp is in the AIL.
1923 */
1924
1925 lp = (xfs_log_item_t *)dp->i_itemp;
1926 if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
1927 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
1928 attempts++;
1929#ifdef DEBUG
1930 xfs_rm_attempts++;
1931#endif
1932
1933 /*
1934 * Unlock dp and try again.
1935 * xfs_iunlock will try to push the tail
1936 * if the inode is in the AIL.
1937 */
1938
1939 xfs_iunlock(dp, XFS_ILOCK_EXCL);
1940
1941 if ((attempts % 5) == 0) {
1942 delay(1); /* Don't just spin the CPU */
1943#ifdef DEBUG
1944 xfs_rm_lock_delays++;
1945#endif
1946 }
1947 goto again;
1948 }
1949 } else {
1950 xfs_ilock(ip, XFS_ILOCK_EXCL);
1951 }
1952 } else if (e_inum < dp->i_ino) {
1953 xfs_iunlock(dp, XFS_ILOCK_EXCL);
1954
1955 ips[0] = ip;
1956 ips[1] = dp;
1957 xfs_lock_inodes(ips, 2, XFS_ILOCK_EXCL);
1958 }
1959 /* else e_inum == dp->i_ino */
1960 /* This can happen if we're asked to lock /x/..
1961 * the entry is "..", which is also the parent directory.
1962 */
1963
1964 return 0;
1965}
1966
1967#ifdef DEBUG
1968int xfs_locked_n; 1708int xfs_locked_n;
1969int xfs_small_retries; 1709int xfs_small_retries;
1970int xfs_middle_retries; 1710int xfs_middle_retries;
@@ -2098,12 +1838,44 @@ again:
2098#endif 1838#endif
2099} 1839}
2100 1840
2101#ifdef DEBUG 1841void
2102#define REMOVE_DEBUG_TRACE(x) {remove_which_error_return = (x);} 1842xfs_lock_two_inodes(
2103int remove_which_error_return = 0; 1843 xfs_inode_t *ip0,
2104#else /* ! DEBUG */ 1844 xfs_inode_t *ip1,
2105#define REMOVE_DEBUG_TRACE(x) 1845 uint lock_mode)
2106#endif /* ! DEBUG */ 1846{
1847 xfs_inode_t *temp;
1848 int attempts = 0;
1849 xfs_log_item_t *lp;
1850
1851 ASSERT(ip0->i_ino != ip1->i_ino);
1852
1853 if (ip0->i_ino > ip1->i_ino) {
1854 temp = ip0;
1855 ip0 = ip1;
1856 ip1 = temp;
1857 }
1858
1859 again:
1860 xfs_ilock(ip0, xfs_lock_inumorder(lock_mode, 0));
1861
1862 /*
1863 * If the first lock we have locked is in the AIL, we must TRY to get
1864 * the second lock. If we can't get it, we must release the first one
1865 * and try again.
1866 */
1867 lp = (xfs_log_item_t *)ip0->i_itemp;
1868 if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
1869 if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(lock_mode, 1))) {
1870 xfs_iunlock(ip0, lock_mode);
1871 if ((++attempts % 5) == 0)
1872 delay(1); /* Don't just spin the CPU */
1873 goto again;
1874 }
1875 } else {
1876 xfs_ilock(ip1, xfs_lock_inumorder(lock_mode, 1));
1877 }
1878}
2107 1879
2108int 1880int
2109xfs_remove( 1881xfs_remove(
@@ -2113,6 +1885,7 @@ xfs_remove(
2113{ 1885{
2114 xfs_mount_t *mp = dp->i_mount; 1886 xfs_mount_t *mp = dp->i_mount;
2115 xfs_trans_t *tp = NULL; 1887 xfs_trans_t *tp = NULL;
1888 int is_dir = S_ISDIR(ip->i_d.di_mode);
2116 int error = 0; 1889 int error = 0;
2117 xfs_bmap_free_t free_list; 1890 xfs_bmap_free_t free_list;
2118 xfs_fsblock_t first_block; 1891 xfs_fsblock_t first_block;
@@ -2120,8 +1893,10 @@ xfs_remove(
2120 int committed; 1893 int committed;
2121 int link_zero; 1894 int link_zero;
2122 uint resblks; 1895 uint resblks;
1896 uint log_count;
2123 1897
2124 xfs_itrace_entry(dp); 1898 xfs_itrace_entry(dp);
1899 xfs_itrace_entry(ip);
2125 1900
2126 if (XFS_FORCED_SHUTDOWN(mp)) 1901 if (XFS_FORCED_SHUTDOWN(mp))
2127 return XFS_ERROR(EIO); 1902 return XFS_ERROR(EIO);
@@ -2134,19 +1909,23 @@ xfs_remove(
2134 return error; 1909 return error;
2135 } 1910 }
2136 1911
2137 xfs_itrace_entry(ip);
2138 xfs_itrace_ref(ip);
2139
2140 error = XFS_QM_DQATTACH(mp, dp, 0); 1912 error = XFS_QM_DQATTACH(mp, dp, 0);
2141 if (!error) 1913 if (error)
2142 error = XFS_QM_DQATTACH(mp, ip, 0);
2143 if (error) {
2144 REMOVE_DEBUG_TRACE(__LINE__);
2145 goto std_return; 1914 goto std_return;
2146 }
2147 1915
2148 tp = xfs_trans_alloc(mp, XFS_TRANS_REMOVE); 1916 error = XFS_QM_DQATTACH(mp, ip, 0);
1917 if (error)
1918 goto std_return;
1919
1920 if (is_dir) {
1921 tp = xfs_trans_alloc(mp, XFS_TRANS_RMDIR);
1922 log_count = XFS_DEFAULT_LOG_COUNT;
1923 } else {
1924 tp = xfs_trans_alloc(mp, XFS_TRANS_REMOVE);
1925 log_count = XFS_REMOVE_LOG_COUNT;
1926 }
2149 cancel_flags = XFS_TRANS_RELEASE_LOG_RES; 1927 cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
1928
2150 /* 1929 /*
2151 * We try to get the real space reservation first, 1930 * We try to get the real space reservation first,
2152 * allowing for directory btree deletion(s) implying 1931 * allowing for directory btree deletion(s) implying
@@ -2158,25 +1937,19 @@ xfs_remove(
2158 */ 1937 */
2159 resblks = XFS_REMOVE_SPACE_RES(mp); 1938 resblks = XFS_REMOVE_SPACE_RES(mp);
2160 error = xfs_trans_reserve(tp, resblks, XFS_REMOVE_LOG_RES(mp), 0, 1939 error = xfs_trans_reserve(tp, resblks, XFS_REMOVE_LOG_RES(mp), 0,
2161 XFS_TRANS_PERM_LOG_RES, XFS_REMOVE_LOG_COUNT); 1940 XFS_TRANS_PERM_LOG_RES, log_count);
2162 if (error == ENOSPC) { 1941 if (error == ENOSPC) {
2163 resblks = 0; 1942 resblks = 0;
2164 error = xfs_trans_reserve(tp, 0, XFS_REMOVE_LOG_RES(mp), 0, 1943 error = xfs_trans_reserve(tp, 0, XFS_REMOVE_LOG_RES(mp), 0,
2165 XFS_TRANS_PERM_LOG_RES, XFS_REMOVE_LOG_COUNT); 1944 XFS_TRANS_PERM_LOG_RES, log_count);
2166 } 1945 }
2167 if (error) { 1946 if (error) {
2168 ASSERT(error != ENOSPC); 1947 ASSERT(error != ENOSPC);
2169 REMOVE_DEBUG_TRACE(__LINE__); 1948 cancel_flags = 0;
2170 xfs_trans_cancel(tp, 0); 1949 goto out_trans_cancel;
2171 return error;
2172 } 1950 }
2173 1951
2174 error = xfs_lock_dir_and_entry(dp, ip); 1952 xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL);
2175 if (error) {
2176 REMOVE_DEBUG_TRACE(__LINE__);
2177 xfs_trans_cancel(tp, cancel_flags);
2178 goto std_return;
2179 }
2180 1953
2181 /* 1954 /*
2182 * At this point, we've gotten both the directory and the entry 1955 * At this point, we've gotten both the directory and the entry
@@ -2189,46 +1962,83 @@ xfs_remove(
2189 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 1962 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
2190 1963
2191 /* 1964 /*
2192 * Entry must exist since we did a lookup in xfs_lock_dir_and_entry. 1965 * If we're removing a directory perform some additional validation.
2193 */ 1966 */
1967 if (is_dir) {
1968 ASSERT(ip->i_d.di_nlink >= 2);
1969 if (ip->i_d.di_nlink != 2) {
1970 error = XFS_ERROR(ENOTEMPTY);
1971 goto out_trans_cancel;
1972 }
1973 if (!xfs_dir_isempty(ip)) {
1974 error = XFS_ERROR(ENOTEMPTY);
1975 goto out_trans_cancel;
1976 }
1977 }
1978
2194 XFS_BMAP_INIT(&free_list, &first_block); 1979 XFS_BMAP_INIT(&free_list, &first_block);
2195 error = xfs_dir_removename(tp, dp, name, ip->i_ino, 1980 error = xfs_dir_removename(tp, dp, name, ip->i_ino,
2196 &first_block, &free_list, resblks); 1981 &first_block, &free_list, resblks);
2197 if (error) { 1982 if (error) {
2198 ASSERT(error != ENOENT); 1983 ASSERT(error != ENOENT);
2199 REMOVE_DEBUG_TRACE(__LINE__); 1984 goto out_bmap_cancel;
2200 goto error1;
2201 } 1985 }
2202 xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 1986 xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2203 1987
1988 /*
1989 * Bump the in memory generation count on the parent
1990 * directory so that other can know that it has changed.
1991 */
2204 dp->i_gen++; 1992 dp->i_gen++;
2205 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); 1993 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2206 1994
2207 error = xfs_droplink(tp, ip); 1995 if (is_dir) {
2208 if (error) { 1996 /*
2209 REMOVE_DEBUG_TRACE(__LINE__); 1997 * Drop the link from ip's "..".
2210 goto error1; 1998 */
1999 error = xfs_droplink(tp, dp);
2000 if (error)
2001 goto out_bmap_cancel;
2002
2003 /*
2004 * Drop the link from dp to ip.
2005 */
2006 error = xfs_droplink(tp, ip);
2007 if (error)
2008 goto out_bmap_cancel;
2009 } else {
2010 /*
2011 * When removing a non-directory we need to log the parent
2012 * inode here for the i_gen update. For a directory this is
2013 * done implicitly by the xfs_droplink call for the ".." entry.
2014 */
2015 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2211 } 2016 }
2212 2017
2213 /* Determine if this is the last link while 2018 /*
2019 * Drop the "." link from ip to self.
2020 */
2021 error = xfs_droplink(tp, ip);
2022 if (error)
2023 goto out_bmap_cancel;
2024
2025 /*
2026 * Determine if this is the last link while
2214 * we are in the transaction. 2027 * we are in the transaction.
2215 */ 2028 */
2216 link_zero = (ip)->i_d.di_nlink==0; 2029 link_zero = (ip->i_d.di_nlink == 0);
2217 2030
2218 /* 2031 /*
2219 * If this is a synchronous mount, make sure that the 2032 * If this is a synchronous mount, make sure that the
2220 * remove transaction goes to disk before returning to 2033 * remove transaction goes to disk before returning to
2221 * the user. 2034 * the user.
2222 */ 2035 */
2223 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) { 2036 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2224 xfs_trans_set_sync(tp); 2037 xfs_trans_set_sync(tp);
2225 }
2226 2038
2227 error = xfs_bmap_finish(&tp, &free_list, &committed); 2039 error = xfs_bmap_finish(&tp, &free_list, &committed);
2228 if (error) { 2040 if (error)
2229 REMOVE_DEBUG_TRACE(__LINE__); 2041 goto out_bmap_cancel;
2230 goto error_rele;
2231 }
2232 2042
2233 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 2043 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
2234 if (error) 2044 if (error)
@@ -2240,38 +2050,26 @@ xfs_remove(
2240 * will get killed on last close in xfs_close() so we don't 2050 * will get killed on last close in xfs_close() so we don't
2241 * have to worry about that. 2051 * have to worry about that.
2242 */ 2052 */
2243 if (link_zero && xfs_inode_is_filestream(ip)) 2053 if (!is_dir && link_zero && xfs_inode_is_filestream(ip))
2244 xfs_filestream_deassociate(ip); 2054 xfs_filestream_deassociate(ip);
2245 2055
2246 xfs_itrace_exit(ip); 2056 xfs_itrace_exit(ip);
2057 xfs_itrace_exit(dp);
2247 2058
2248/* Fall through to std_return with error = 0 */
2249 std_return: 2059 std_return:
2250 if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTREMOVE)) { 2060 if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTREMOVE)) {
2251 (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, 2061 XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, dp, DM_RIGHT_NULL,
2252 dp, DM_RIGHT_NULL, 2062 NULL, DM_RIGHT_NULL, name->name, NULL,
2253 NULL, DM_RIGHT_NULL, 2063 ip->i_d.di_mode, error, 0);
2254 name->name, NULL, ip->i_d.di_mode, error, 0);
2255 } 2064 }
2256 return error;
2257 2065
2258 error1: 2066 return error;
2259 xfs_bmap_cancel(&free_list);
2260 cancel_flags |= XFS_TRANS_ABORT;
2261 xfs_trans_cancel(tp, cancel_flags);
2262 goto std_return;
2263 2067
2264 error_rele: 2068 out_bmap_cancel:
2265 /*
2266 * In this case make sure to not release the inode until after
2267 * the current transaction is aborted. Releasing it beforehand
2268 * can cause us to go to xfs_inactive and start a recursive
2269 * transaction which can easily deadlock with the current one.
2270 */
2271 xfs_bmap_cancel(&free_list); 2069 xfs_bmap_cancel(&free_list);
2272 cancel_flags |= XFS_TRANS_ABORT; 2070 cancel_flags |= XFS_TRANS_ABORT;
2071 out_trans_cancel:
2273 xfs_trans_cancel(tp, cancel_flags); 2072 xfs_trans_cancel(tp, cancel_flags);
2274
2275 goto std_return; 2073 goto std_return;
2276} 2074}
2277 2075
@@ -2283,7 +2081,6 @@ xfs_link(
2283{ 2081{
2284 xfs_mount_t *mp = tdp->i_mount; 2082 xfs_mount_t *mp = tdp->i_mount;
2285 xfs_trans_t *tp; 2083 xfs_trans_t *tp;
2286 xfs_inode_t *ips[2];
2287 int error; 2084 int error;
2288 xfs_bmap_free_t free_list; 2085 xfs_bmap_free_t free_list;
2289 xfs_fsblock_t first_block; 2086 xfs_fsblock_t first_block;
@@ -2331,15 +2128,7 @@ xfs_link(
2331 goto error_return; 2128 goto error_return;
2332 } 2129 }
2333 2130
2334 if (sip->i_ino < tdp->i_ino) { 2131 xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL);
2335 ips[0] = sip;
2336 ips[1] = tdp;
2337 } else {
2338 ips[0] = tdp;
2339 ips[1] = sip;
2340 }
2341
2342 xfs_lock_inodes(ips, 2, XFS_ILOCK_EXCL);
2343 2132
2344 /* 2133 /*
2345 * Increment vnode ref counts since xfs_trans_commit & 2134 * Increment vnode ref counts since xfs_trans_commit &
@@ -2480,7 +2269,7 @@ xfs_mkdir(
2480 * Make sure that we have allocated dquot(s) on disk. 2269 * Make sure that we have allocated dquot(s) on disk.
2481 */ 2270 */
2482 error = XFS_QM_DQVOPALLOC(mp, dp, 2271 error = XFS_QM_DQVOPALLOC(mp, dp,
2483 current_fsuid(credp), current_fsgid(credp), prid, 2272 current_fsuid(), current_fsgid(), prid,
2484 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp); 2273 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
2485 if (error) 2274 if (error)
2486 goto std_return; 2275 goto std_return;
@@ -2638,186 +2427,6 @@ std_return:
2638} 2427}
2639 2428
2640int 2429int
2641xfs_rmdir(
2642 xfs_inode_t *dp,
2643 struct xfs_name *name,
2644 xfs_inode_t *cdp)
2645{
2646 xfs_mount_t *mp = dp->i_mount;
2647 xfs_trans_t *tp;
2648 int error;
2649 xfs_bmap_free_t free_list;
2650 xfs_fsblock_t first_block;
2651 int cancel_flags;
2652 int committed;
2653 int last_cdp_link;
2654 uint resblks;
2655
2656 xfs_itrace_entry(dp);
2657
2658 if (XFS_FORCED_SHUTDOWN(mp))
2659 return XFS_ERROR(EIO);
2660
2661 if (DM_EVENT_ENABLED(dp, DM_EVENT_REMOVE)) {
2662 error = XFS_SEND_NAMESP(mp, DM_EVENT_REMOVE,
2663 dp, DM_RIGHT_NULL,
2664 NULL, DM_RIGHT_NULL, name->name,
2665 NULL, cdp->i_d.di_mode, 0, 0);
2666 if (error)
2667 return XFS_ERROR(error);
2668 }
2669
2670 /*
2671 * Get the dquots for the inodes.
2672 */
2673 error = XFS_QM_DQATTACH(mp, dp, 0);
2674 if (!error)
2675 error = XFS_QM_DQATTACH(mp, cdp, 0);
2676 if (error) {
2677 REMOVE_DEBUG_TRACE(__LINE__);
2678 goto std_return;
2679 }
2680
2681 tp = xfs_trans_alloc(mp, XFS_TRANS_RMDIR);
2682 cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
2683 /*
2684 * We try to get the real space reservation first,
2685 * allowing for directory btree deletion(s) implying
2686 * possible bmap insert(s). If we can't get the space
2687 * reservation then we use 0 instead, and avoid the bmap
2688 * btree insert(s) in the directory code by, if the bmap
2689 * insert tries to happen, instead trimming the LAST
2690 * block from the directory.
2691 */
2692 resblks = XFS_REMOVE_SPACE_RES(mp);
2693 error = xfs_trans_reserve(tp, resblks, XFS_REMOVE_LOG_RES(mp), 0,
2694 XFS_TRANS_PERM_LOG_RES, XFS_DEFAULT_LOG_COUNT);
2695 if (error == ENOSPC) {
2696 resblks = 0;
2697 error = xfs_trans_reserve(tp, 0, XFS_REMOVE_LOG_RES(mp), 0,
2698 XFS_TRANS_PERM_LOG_RES, XFS_DEFAULT_LOG_COUNT);
2699 }
2700 if (error) {
2701 ASSERT(error != ENOSPC);
2702 cancel_flags = 0;
2703 goto error_return;
2704 }
2705 XFS_BMAP_INIT(&free_list, &first_block);
2706
2707 /*
2708 * Now lock the child directory inode and the parent directory
2709 * inode in the proper order. This will take care of validating
2710 * that the directory entry for the child directory inode has
2711 * not changed while we were obtaining a log reservation.
2712 */
2713 error = xfs_lock_dir_and_entry(dp, cdp);
2714 if (error) {
2715 xfs_trans_cancel(tp, cancel_flags);
2716 goto std_return;
2717 }
2718
2719 IHOLD(dp);
2720 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
2721
2722 IHOLD(cdp);
2723 xfs_trans_ijoin(tp, cdp, XFS_ILOCK_EXCL);
2724
2725 ASSERT(cdp->i_d.di_nlink >= 2);
2726 if (cdp->i_d.di_nlink != 2) {
2727 error = XFS_ERROR(ENOTEMPTY);
2728 goto error_return;
2729 }
2730 if (!xfs_dir_isempty(cdp)) {
2731 error = XFS_ERROR(ENOTEMPTY);
2732 goto error_return;
2733 }
2734
2735 error = xfs_dir_removename(tp, dp, name, cdp->i_ino,
2736 &first_block, &free_list, resblks);
2737 if (error)
2738 goto error1;
2739
2740 xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2741
2742 /*
2743 * Bump the in memory generation count on the parent
2744 * directory so that other can know that it has changed.
2745 */
2746 dp->i_gen++;
2747
2748 /*
2749 * Drop the link from cdp's "..".
2750 */
2751 error = xfs_droplink(tp, dp);
2752 if (error) {
2753 goto error1;
2754 }
2755
2756 /*
2757 * Drop the link from dp to cdp.
2758 */
2759 error = xfs_droplink(tp, cdp);
2760 if (error) {
2761 goto error1;
2762 }
2763
2764 /*
2765 * Drop the "." link from cdp to self.
2766 */
2767 error = xfs_droplink(tp, cdp);
2768 if (error) {
2769 goto error1;
2770 }
2771
2772 /* Determine these before committing transaction */
2773 last_cdp_link = (cdp)->i_d.di_nlink==0;
2774
2775 /*
2776 * If this is a synchronous mount, make sure that the
2777 * rmdir transaction goes to disk before returning to
2778 * the user.
2779 */
2780 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
2781 xfs_trans_set_sync(tp);
2782 }
2783
2784 error = xfs_bmap_finish (&tp, &free_list, &committed);
2785 if (error) {
2786 xfs_bmap_cancel(&free_list);
2787 xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES |
2788 XFS_TRANS_ABORT));
2789 goto std_return;
2790 }
2791
2792 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
2793 if (error) {
2794 goto std_return;
2795 }
2796
2797
2798 /* Fall through to std_return with error = 0 or the errno
2799 * from xfs_trans_commit. */
2800 std_return:
2801 if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTREMOVE)) {
2802 (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE,
2803 dp, DM_RIGHT_NULL,
2804 NULL, DM_RIGHT_NULL,
2805 name->name, NULL, cdp->i_d.di_mode,
2806 error, 0);
2807 }
2808 return error;
2809
2810 error1:
2811 xfs_bmap_cancel(&free_list);
2812 cancel_flags |= XFS_TRANS_ABORT;
2813 /* FALLTHROUGH */
2814
2815 error_return:
2816 xfs_trans_cancel(tp, cancel_flags);
2817 goto std_return;
2818}
2819
2820int
2821xfs_symlink( 2430xfs_symlink(
2822 xfs_inode_t *dp, 2431 xfs_inode_t *dp,
2823 struct xfs_name *link_name, 2432 struct xfs_name *link_name,
@@ -2886,7 +2495,7 @@ xfs_symlink(
2886 * Make sure that we have allocated dquot(s) on disk. 2495 * Make sure that we have allocated dquot(s) on disk.
2887 */ 2496 */
2888 error = XFS_QM_DQVOPALLOC(mp, dp, 2497 error = XFS_QM_DQVOPALLOC(mp, dp,
2889 current_fsuid(credp), current_fsgid(credp), prid, 2498 current_fsuid(), current_fsgid(), prid,
2890 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp); 2499 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
2891 if (error) 2500 if (error)
2892 goto std_return; 2501 goto std_return;
@@ -3181,14 +2790,13 @@ int
3181xfs_reclaim( 2790xfs_reclaim(
3182 xfs_inode_t *ip) 2791 xfs_inode_t *ip)
3183{ 2792{
3184 bhv_vnode_t *vp = XFS_ITOV(ip);
3185 2793
3186 xfs_itrace_entry(ip); 2794 xfs_itrace_entry(ip);
3187 2795
3188 ASSERT(!VN_MAPPED(vp)); 2796 ASSERT(!VN_MAPPED(VFS_I(ip)));
3189 2797
3190 /* bad inode, get out here ASAP */ 2798 /* bad inode, get out here ASAP */
3191 if (VN_BAD(vp)) { 2799 if (VN_BAD(VFS_I(ip))) {
3192 xfs_ireclaim(ip); 2800 xfs_ireclaim(ip);
3193 return 0; 2801 return 0;
3194 } 2802 }
@@ -3225,7 +2833,7 @@ xfs_reclaim(
3225 XFS_MOUNT_ILOCK(mp); 2833 XFS_MOUNT_ILOCK(mp);
3226 spin_lock(&ip->i_flags_lock); 2834 spin_lock(&ip->i_flags_lock);
3227 __xfs_iflags_set(ip, XFS_IRECLAIMABLE); 2835 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
3228 vn_to_inode(vp)->i_private = NULL; 2836 VFS_I(ip)->i_private = NULL;
3229 ip->i_vnode = NULL; 2837 ip->i_vnode = NULL;
3230 spin_unlock(&ip->i_flags_lock); 2838 spin_unlock(&ip->i_flags_lock);
3231 list_add_tail(&ip->i_reclaim, &mp->m_del_inodes); 2839 list_add_tail(&ip->i_reclaim, &mp->m_del_inodes);
@@ -3241,8 +2849,7 @@ xfs_finish_reclaim(
3241 int sync_mode) 2849 int sync_mode)
3242{ 2850{
3243 xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino); 2851 xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino);
3244 bhv_vnode_t *vp = XFS_ITOV_NULL(ip); 2852 struct inode *vp = VFS_I(ip);
3245 int error;
3246 2853
3247 if (vp && VN_BAD(vp)) 2854 if (vp && VN_BAD(vp))
3248 goto reclaim; 2855 goto reclaim;
@@ -3285,29 +2892,16 @@ xfs_finish_reclaim(
3285 xfs_iflock(ip); 2892 xfs_iflock(ip);
3286 } 2893 }
3287 2894
3288 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { 2895 /*
3289 if (ip->i_update_core || 2896 * In the case of a forced shutdown we rely on xfs_iflush() to
3290 ((ip->i_itemp != NULL) && 2897 * wait for the inode to be unpinned before returning an error.
3291 (ip->i_itemp->ili_format.ilf_fields != 0))) { 2898 */
3292 error = xfs_iflush(ip, sync_mode); 2899 if (xfs_iflush(ip, sync_mode) == 0) {
3293 /* 2900 /* synchronize with xfs_iflush_done */
3294 * If we hit an error, typically because of filesystem 2901 xfs_iflock(ip);
3295 * shutdown, we don't need to let vn_reclaim to know 2902 xfs_ifunlock(ip);
3296 * because we're gonna reclaim the inode anyway.
3297 */
3298 if (error) {
3299 xfs_iunlock(ip, XFS_ILOCK_EXCL);
3300 goto reclaim;
3301 }
3302 xfs_iflock(ip); /* synchronize with xfs_iflush_done */
3303 }
3304
3305 ASSERT(ip->i_update_core == 0);
3306 ASSERT(ip->i_itemp == NULL ||
3307 ip->i_itemp->ili_format.ilf_fields == 0);
3308 } 2903 }
3309 2904
3310 xfs_ifunlock(ip);
3311 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2905 xfs_iunlock(ip, XFS_ILOCK_EXCL);
3312 2906
3313 reclaim: 2907 reclaim:
@@ -3418,7 +3012,7 @@ xfs_alloc_file_space(
3418 3012
3419 /* Generate a DMAPI event if needed. */ 3013 /* Generate a DMAPI event if needed. */
3420 if (alloc_type != 0 && offset < ip->i_size && 3014 if (alloc_type != 0 && offset < ip->i_size &&
3421 (attr_flags&ATTR_DMI) == 0 && 3015 (attr_flags & XFS_ATTR_DMI) == 0 &&
3422 DM_EVENT_ENABLED(ip, DM_EVENT_WRITE)) { 3016 DM_EVENT_ENABLED(ip, DM_EVENT_WRITE)) {
3423 xfs_off_t end_dmi_offset; 3017 xfs_off_t end_dmi_offset;
3424 3018
@@ -3532,7 +3126,7 @@ retry:
3532 allocatesize_fsb -= allocated_fsb; 3126 allocatesize_fsb -= allocated_fsb;
3533 } 3127 }
3534dmapi_enospc_check: 3128dmapi_enospc_check:
3535 if (error == ENOSPC && (attr_flags & ATTR_DMI) == 0 && 3129 if (error == ENOSPC && (attr_flags & XFS_ATTR_DMI) == 0 &&
3536 DM_EVENT_ENABLED(ip, DM_EVENT_NOSPACE)) { 3130 DM_EVENT_ENABLED(ip, DM_EVENT_NOSPACE)) {
3537 error = XFS_SEND_NAMESP(mp, DM_EVENT_NOSPACE, 3131 error = XFS_SEND_NAMESP(mp, DM_EVENT_NOSPACE,
3538 ip, DM_RIGHT_NULL, 3132 ip, DM_RIGHT_NULL,
@@ -3643,7 +3237,6 @@ xfs_free_file_space(
3643 xfs_off_t len, 3237 xfs_off_t len,
3644 int attr_flags) 3238 int attr_flags)
3645{ 3239{
3646 bhv_vnode_t *vp;
3647 int committed; 3240 int committed;
3648 int done; 3241 int done;
3649 xfs_off_t end_dmi_offset; 3242 xfs_off_t end_dmi_offset;
@@ -3663,7 +3256,6 @@ xfs_free_file_space(
3663 xfs_trans_t *tp; 3256 xfs_trans_t *tp;
3664 int need_iolock = 1; 3257 int need_iolock = 1;
3665 3258
3666 vp = XFS_ITOV(ip);
3667 mp = ip->i_mount; 3259 mp = ip->i_mount;
3668 3260
3669 xfs_itrace_entry(ip); 3261 xfs_itrace_entry(ip);
@@ -3679,7 +3271,7 @@ xfs_free_file_space(
3679 end_dmi_offset = offset + len; 3271 end_dmi_offset = offset + len;
3680 endoffset_fsb = XFS_B_TO_FSBT(mp, end_dmi_offset); 3272 endoffset_fsb = XFS_B_TO_FSBT(mp, end_dmi_offset);
3681 3273
3682 if (offset < ip->i_size && (attr_flags & ATTR_DMI) == 0 && 3274 if (offset < ip->i_size && (attr_flags & XFS_ATTR_DMI) == 0 &&
3683 DM_EVENT_ENABLED(ip, DM_EVENT_WRITE)) { 3275 DM_EVENT_ENABLED(ip, DM_EVENT_WRITE)) {
3684 if (end_dmi_offset > ip->i_size) 3276 if (end_dmi_offset > ip->i_size)
3685 end_dmi_offset = ip->i_size; 3277 end_dmi_offset = ip->i_size;
@@ -3690,7 +3282,7 @@ xfs_free_file_space(
3690 return error; 3282 return error;
3691 } 3283 }
3692 3284
3693 if (attr_flags & ATTR_NOLOCK) 3285 if (attr_flags & XFS_ATTR_NOLOCK)
3694 need_iolock = 0; 3286 need_iolock = 0;
3695 if (need_iolock) { 3287 if (need_iolock) {
3696 xfs_ilock(ip, XFS_IOLOCK_EXCL); 3288 xfs_ilock(ip, XFS_IOLOCK_EXCL);
@@ -3700,7 +3292,7 @@ xfs_free_file_space(
3700 rounding = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE); 3292 rounding = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
3701 ioffset = offset & ~(rounding - 1); 3293 ioffset = offset & ~(rounding - 1);
3702 3294
3703 if (VN_CACHED(vp) != 0) { 3295 if (VN_CACHED(VFS_I(ip)) != 0) {
3704 xfs_inval_cached_trace(ip, ioffset, -1, ioffset, -1); 3296 xfs_inval_cached_trace(ip, ioffset, -1, ioffset, -1);
3705 error = xfs_flushinval_pages(ip, ioffset, -1, FI_REMAPF_LOCKED); 3297 error = xfs_flushinval_pages(ip, ioffset, -1, FI_REMAPF_LOCKED);
3706 if (error) 3298 if (error)
@@ -3867,7 +3459,7 @@ xfs_change_file_space(
3867 xfs_off_t startoffset; 3459 xfs_off_t startoffset;
3868 xfs_off_t llen; 3460 xfs_off_t llen;
3869 xfs_trans_t *tp; 3461 xfs_trans_t *tp;
3870 bhv_vattr_t va; 3462 struct iattr iattr;
3871 3463
3872 xfs_itrace_entry(ip); 3464 xfs_itrace_entry(ip);
3873 3465
@@ -3941,10 +3533,10 @@ xfs_change_file_space(
3941 break; 3533 break;
3942 } 3534 }
3943 3535
3944 va.va_mask = XFS_AT_SIZE; 3536 iattr.ia_valid = ATTR_SIZE;
3945 va.va_size = startoffset; 3537 iattr.ia_size = startoffset;
3946 3538
3947 error = xfs_setattr(ip, &va, attr_flags, credp); 3539 error = xfs_setattr(ip, &iattr, attr_flags, credp);
3948 3540
3949 if (error) 3541 if (error)
3950 return error; 3542 return error;
@@ -3974,7 +3566,7 @@ xfs_change_file_space(
3974 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 3566 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
3975 xfs_trans_ihold(tp, ip); 3567 xfs_trans_ihold(tp, ip);
3976 3568
3977 if ((attr_flags & ATTR_DMI) == 0) { 3569 if ((attr_flags & XFS_ATTR_DMI) == 0) {
3978 ip->i_d.di_mode &= ~S_ISUID; 3570 ip->i_d.di_mode &= ~S_ISUID;
3979 3571
3980 /* 3572 /*