aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_rw.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_rw.c')
-rw-r--r--fs/xfs/xfs_rw.c174
1 files changed, 20 insertions, 154 deletions
diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c
index 3f816ad7ff19..e336742a58a4 100644
--- a/fs/xfs/xfs_rw.c
+++ b/fs/xfs/xfs_rw.c
@@ -44,48 +44,7 @@
44#include "xfs_error.h" 44#include "xfs_error.h"
45#include "xfs_buf_item.h" 45#include "xfs_buf_item.h"
46#include "xfs_rw.h" 46#include "xfs_rw.h"
47 47#include "xfs_trace.h"
48/*
49 * This is a subroutine for xfs_write() and other writers (xfs_ioctl)
50 * which clears the setuid and setgid bits when a file is written.
51 */
52int
53xfs_write_clear_setuid(
54 xfs_inode_t *ip)
55{
56 xfs_mount_t *mp;
57 xfs_trans_t *tp;
58 int error;
59
60 mp = ip->i_mount;
61 tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID);
62 if ((error = xfs_trans_reserve(tp, 0,
63 XFS_WRITEID_LOG_RES(mp),
64 0, 0, 0))) {
65 xfs_trans_cancel(tp, 0);
66 return error;
67 }
68 xfs_ilock(ip, XFS_ILOCK_EXCL);
69 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
70 xfs_trans_ihold(tp, ip);
71 ip->i_d.di_mode &= ~S_ISUID;
72
73 /*
74 * Note that we don't have to worry about mandatory
75 * file locking being disabled here because we only
76 * clear the S_ISGID bit if the Group execute bit is
77 * on, but if it was on then mandatory locking wouldn't
78 * have been enabled.
79 */
80 if (ip->i_d.di_mode & S_IXGRP) {
81 ip->i_d.di_mode &= ~S_ISGID;
82 }
83 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
84 xfs_trans_set_sync(tp);
85 error = xfs_trans_commit(tp, 0);
86 xfs_iunlock(ip, XFS_ILOCK_EXCL);
87 return 0;
88}
89 48
90/* 49/*
91 * Force a shutdown of the filesystem instantly while keeping 50 * Force a shutdown of the filesystem instantly while keeping
@@ -152,90 +111,6 @@ xfs_do_force_shutdown(
152 } 111 }
153} 112}
154 113
155
156/*
157 * Called when we want to stop a buffer from getting written or read.
158 * We attach the EIO error, muck with its flags, and call biodone
159 * so that the proper iodone callbacks get called.
160 */
161int
162xfs_bioerror(
163 xfs_buf_t *bp)
164{
165
166#ifdef XFSERRORDEBUG
167 ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
168#endif
169
170 /*
171 * No need to wait until the buffer is unpinned.
172 * We aren't flushing it.
173 */
174 xfs_buftrace("XFS IOERROR", bp);
175 XFS_BUF_ERROR(bp, EIO);
176 /*
177 * We're calling biodone, so delete B_DONE flag. Either way
178 * we have to call the iodone callback, and calling biodone
179 * probably is the best way since it takes care of
180 * GRIO as well.
181 */
182 XFS_BUF_UNREAD(bp);
183 XFS_BUF_UNDELAYWRITE(bp);
184 XFS_BUF_UNDONE(bp);
185 XFS_BUF_STALE(bp);
186
187 XFS_BUF_CLR_BDSTRAT_FUNC(bp);
188 xfs_biodone(bp);
189
190 return (EIO);
191}
192
193/*
194 * Same as xfs_bioerror, except that we are releasing the buffer
195 * here ourselves, and avoiding the biodone call.
196 * This is meant for userdata errors; metadata bufs come with
197 * iodone functions attached, so that we can track down errors.
198 */
199int
200xfs_bioerror_relse(
201 xfs_buf_t *bp)
202{
203 int64_t fl;
204
205 ASSERT(XFS_BUF_IODONE_FUNC(bp) != xfs_buf_iodone_callbacks);
206 ASSERT(XFS_BUF_IODONE_FUNC(bp) != xlog_iodone);
207
208 xfs_buftrace("XFS IOERRELSE", bp);
209 fl = XFS_BUF_BFLAGS(bp);
210 /*
211 * No need to wait until the buffer is unpinned.
212 * We aren't flushing it.
213 *
214 * chunkhold expects B_DONE to be set, whether
215 * we actually finish the I/O or not. We don't want to
216 * change that interface.
217 */
218 XFS_BUF_UNREAD(bp);
219 XFS_BUF_UNDELAYWRITE(bp);
220 XFS_BUF_DONE(bp);
221 XFS_BUF_STALE(bp);
222 XFS_BUF_CLR_IODONE_FUNC(bp);
223 XFS_BUF_CLR_BDSTRAT_FUNC(bp);
224 if (!(fl & XFS_B_ASYNC)) {
225 /*
226 * Mark b_error and B_ERROR _both_.
227 * Lot's of chunkcache code assumes that.
228 * There's no reason to mark error for
229 * ASYNC buffers.
230 */
231 XFS_BUF_ERROR(bp, EIO);
232 XFS_BUF_FINISH_IOWAIT(bp);
233 } else {
234 xfs_buf_relse(bp);
235 }
236 return (EIO);
237}
238
239/* 114/*
240 * Prints out an ALERT message about I/O error. 115 * Prints out an ALERT message about I/O error.
241 */ 116 */
@@ -277,10 +152,10 @@ xfs_read_buf(
277 xfs_buf_t *bp; 152 xfs_buf_t *bp;
278 int error; 153 int error;
279 154
280 if (flags) 155 if (!flags)
281 bp = xfs_buf_read_flags(target, blkno, len, flags); 156 flags = XBF_LOCK | XBF_MAPPED;
282 else 157
283 bp = xfs_buf_read(target, blkno, len, flags); 158 bp = xfs_buf_read(target, blkno, len, flags);
284 if (!bp) 159 if (!bp)
285 return XFS_ERROR(EIO); 160 return XFS_ERROR(EIO);
286 error = XFS_BUF_GETERROR(bp); 161 error = XFS_BUF_GETERROR(bp);
@@ -307,32 +182,23 @@ xfs_read_buf(
307} 182}
308 183
309/* 184/*
310 * Wrapper around bwrite() so that we can trap 185 * helper function to extract extent size hint from inode
311 * write errors, and act accordingly.
312 */ 186 */
313int 187xfs_extlen_t
314xfs_bwrite( 188xfs_get_extsz_hint(
315 struct xfs_mount *mp, 189 struct xfs_inode *ip)
316 struct xfs_buf *bp)
317{ 190{
318 int error; 191 xfs_extlen_t extsz;
319 192
320 /* 193 if (unlikely(XFS_IS_REALTIME_INODE(ip))) {
321 * XXXsup how does this work for quotas. 194 extsz = (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)
322 */ 195 ? ip->i_d.di_extsize
323 XFS_BUF_SET_BDSTRAT_FUNC(bp, xfs_bdstrat_cb); 196 : ip->i_mount->m_sb.sb_rextsize;
324 bp->b_mount = mp; 197 ASSERT(extsz);
325 XFS_BUF_WRITE(bp); 198 } else {
326 199 extsz = (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)
327 if ((error = XFS_bwrite(bp))) { 200 ? ip->i_d.di_extsize : 0;
328 ASSERT(mp);
329 /*
330 * Cannot put a buftrace here since if the buffer is not
331 * B_HOLD then we will brelse() the buffer before returning
332 * from bwrite and we could be tracing a buffer that has
333 * been reused.
334 */
335 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
336 } 201 }
337 return (error); 202
203 return extsz;
338} 204}