aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/linux-2.6
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/linux-2.6')
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c723
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c796
2 files changed, 723 insertions, 796 deletions
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index 3805ada98747..51fc510828a4 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -16,6 +16,7 @@
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */ 17 */
18#include "xfs.h" 18#include "xfs.h"
19#include "xfs_fs.h"
19#include "xfs_bit.h" 20#include "xfs_bit.h"
20#include "xfs_log.h" 21#include "xfs_log.h"
21#include "xfs_inum.h" 22#include "xfs_inum.h"
@@ -34,16 +35,738 @@
34#include "xfs_dir2_sf.h" 35#include "xfs_dir2_sf.h"
35#include "xfs_dinode.h" 36#include "xfs_dinode.h"
36#include "xfs_inode.h" 37#include "xfs_inode.h"
38#include "xfs_bmap.h"
37#include "xfs_error.h" 39#include "xfs_error.h"
38#include "xfs_rw.h" 40#include "xfs_rw.h"
39#include "xfs_vnodeops.h" 41#include "xfs_vnodeops.h"
40#include "xfs_da_btree.h" 42#include "xfs_da_btree.h"
41#include "xfs_ioctl.h" 43#include "xfs_ioctl.h"
44#include "xfs_trace.h"
42 45
43#include <linux/dcache.h> 46#include <linux/dcache.h>
44 47
45static const struct vm_operations_struct xfs_file_vm_ops; 48static const struct vm_operations_struct xfs_file_vm_ops;
46 49
50/*
51 * xfs_iozero
52 *
53 * xfs_iozero clears the specified range of buffer supplied,
54 * and marks all the affected blocks as valid and modified. If
55 * an affected block is not allocated, it will be allocated. If
56 * an affected block is not completely overwritten, and is not
57 * valid before the operation, it will be read from disk before
58 * being partially zeroed.
59 */
60STATIC int
61xfs_iozero(
62 struct xfs_inode *ip, /* inode */
63 loff_t pos, /* offset in file */
64 size_t count) /* size of data to zero */
65{
66 struct page *page;
67 struct address_space *mapping;
68 int status;
69
70 mapping = VFS_I(ip)->i_mapping;
71 do {
72 unsigned offset, bytes;
73 void *fsdata;
74
75 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
76 bytes = PAGE_CACHE_SIZE - offset;
77 if (bytes > count)
78 bytes = count;
79
80 status = pagecache_write_begin(NULL, mapping, pos, bytes,
81 AOP_FLAG_UNINTERRUPTIBLE,
82 &page, &fsdata);
83 if (status)
84 break;
85
86 zero_user(page, offset, bytes);
87
88 status = pagecache_write_end(NULL, mapping, pos, bytes, bytes,
89 page, fsdata);
90 WARN_ON(status <= 0); /* can't return less than zero! */
91 pos += bytes;
92 count -= bytes;
93 status = 0;
94 } while (count);
95
96 return (-status);
97}
98
99ssize_t /* bytes read, or (-) error */
100xfs_read(
101 xfs_inode_t *ip,
102 struct kiocb *iocb,
103 const struct iovec *iovp,
104 unsigned int segs,
105 loff_t *offset,
106 int ioflags)
107{
108 struct file *file = iocb->ki_filp;
109 struct inode *inode = file->f_mapping->host;
110 xfs_mount_t *mp = ip->i_mount;
111 size_t size = 0;
112 ssize_t ret = 0;
113 xfs_fsize_t n;
114 unsigned long seg;
115
116
117 XFS_STATS_INC(xs_read_calls);
118
119 /* START copy & waste from filemap.c */
120 for (seg = 0; seg < segs; seg++) {
121 const struct iovec *iv = &iovp[seg];
122
123 /*
124 * If any segment has a negative length, or the cumulative
125 * length ever wraps negative then return -EINVAL.
126 */
127 size += iv->iov_len;
128 if (unlikely((ssize_t)(size|iv->iov_len) < 0))
129 return XFS_ERROR(-EINVAL);
130 }
131 /* END copy & waste from filemap.c */
132
133 if (unlikely(ioflags & IO_ISDIRECT)) {
134 xfs_buftarg_t *target =
135 XFS_IS_REALTIME_INODE(ip) ?
136 mp->m_rtdev_targp : mp->m_ddev_targp;
137 if ((*offset & target->bt_smask) ||
138 (size & target->bt_smask)) {
139 if (*offset == ip->i_size) {
140 return (0);
141 }
142 return -XFS_ERROR(EINVAL);
143 }
144 }
145
146 n = XFS_MAXIOFFSET(mp) - *offset;
147 if ((n <= 0) || (size == 0))
148 return 0;
149
150 if (n < size)
151 size = n;
152
153 if (XFS_FORCED_SHUTDOWN(mp))
154 return -EIO;
155
156 if (unlikely(ioflags & IO_ISDIRECT))
157 mutex_lock(&inode->i_mutex);
158 xfs_ilock(ip, XFS_IOLOCK_SHARED);
159
160 if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
161 int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);
162 int iolock = XFS_IOLOCK_SHARED;
163
164 ret = -XFS_SEND_DATA(mp, DM_EVENT_READ, ip, *offset, size,
165 dmflags, &iolock);
166 if (ret) {
167 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
168 if (unlikely(ioflags & IO_ISDIRECT))
169 mutex_unlock(&inode->i_mutex);
170 return ret;
171 }
172 }
173
174 if (unlikely(ioflags & IO_ISDIRECT)) {
175 if (inode->i_mapping->nrpages)
176 ret = -xfs_flushinval_pages(ip, (*offset & PAGE_CACHE_MASK),
177 -1, FI_REMAPF_LOCKED);
178 mutex_unlock(&inode->i_mutex);
179 if (ret) {
180 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
181 return ret;
182 }
183 }
184
185 trace_xfs_file_read(ip, size, *offset, ioflags);
186
187 iocb->ki_pos = *offset;
188 ret = generic_file_aio_read(iocb, iovp, segs, *offset);
189 if (ret > 0)
190 XFS_STATS_ADD(xs_read_bytes, ret);
191
192 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
193 return ret;
194}
195
196ssize_t
197xfs_splice_read(
198 xfs_inode_t *ip,
199 struct file *infilp,
200 loff_t *ppos,
201 struct pipe_inode_info *pipe,
202 size_t count,
203 int flags,
204 int ioflags)
205{
206 xfs_mount_t *mp = ip->i_mount;
207 ssize_t ret;
208
209 XFS_STATS_INC(xs_read_calls);
210 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
211 return -EIO;
212
213 xfs_ilock(ip, XFS_IOLOCK_SHARED);
214
215 if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
216 int iolock = XFS_IOLOCK_SHARED;
217 int error;
218
219 error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip, *ppos, count,
220 FILP_DELAY_FLAG(infilp), &iolock);
221 if (error) {
222 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
223 return -error;
224 }
225 }
226
227 trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
228
229 ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
230 if (ret > 0)
231 XFS_STATS_ADD(xs_read_bytes, ret);
232
233 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
234 return ret;
235}
236
237ssize_t
238xfs_splice_write(
239 xfs_inode_t *ip,
240 struct pipe_inode_info *pipe,
241 struct file *outfilp,
242 loff_t *ppos,
243 size_t count,
244 int flags,
245 int ioflags)
246{
247 xfs_mount_t *mp = ip->i_mount;
248 ssize_t ret;
249 struct inode *inode = outfilp->f_mapping->host;
250 xfs_fsize_t isize, new_size;
251
252 XFS_STATS_INC(xs_write_calls);
253 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
254 return -EIO;
255
256 xfs_ilock(ip, XFS_IOLOCK_EXCL);
257
258 if (DM_EVENT_ENABLED(ip, DM_EVENT_WRITE) && !(ioflags & IO_INVIS)) {
259 int iolock = XFS_IOLOCK_EXCL;
260 int error;
261
262 error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip, *ppos, count,
263 FILP_DELAY_FLAG(outfilp), &iolock);
264 if (error) {
265 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
266 return -error;
267 }
268 }
269
270 new_size = *ppos + count;
271
272 xfs_ilock(ip, XFS_ILOCK_EXCL);
273 if (new_size > ip->i_size)
274 ip->i_new_size = new_size;
275 xfs_iunlock(ip, XFS_ILOCK_EXCL);
276
277 trace_xfs_file_splice_write(ip, count, *ppos, ioflags);
278
279 ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
280 if (ret > 0)
281 XFS_STATS_ADD(xs_write_bytes, ret);
282
283 isize = i_size_read(inode);
284 if (unlikely(ret < 0 && ret != -EFAULT && *ppos > isize))
285 *ppos = isize;
286
287 if (*ppos > ip->i_size) {
288 xfs_ilock(ip, XFS_ILOCK_EXCL);
289 if (*ppos > ip->i_size)
290 ip->i_size = *ppos;
291 xfs_iunlock(ip, XFS_ILOCK_EXCL);
292 }
293
294 if (ip->i_new_size) {
295 xfs_ilock(ip, XFS_ILOCK_EXCL);
296 ip->i_new_size = 0;
297 if (ip->i_d.di_size > ip->i_size)
298 ip->i_d.di_size = ip->i_size;
299 xfs_iunlock(ip, XFS_ILOCK_EXCL);
300 }
301 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
302 return ret;
303}
304
305/*
306 * This routine is called to handle zeroing any space in the last
307 * block of the file that is beyond the EOF. We do this since the
308 * size is being increased without writing anything to that block
309 * and we don't want anyone to read the garbage on the disk.
310 */
311STATIC int /* error (positive) */
312xfs_zero_last_block(
313 xfs_inode_t *ip,
314 xfs_fsize_t offset,
315 xfs_fsize_t isize)
316{
317 xfs_fileoff_t last_fsb;
318 xfs_mount_t *mp = ip->i_mount;
319 int nimaps;
320 int zero_offset;
321 int zero_len;
322 int error = 0;
323 xfs_bmbt_irec_t imap;
324
325 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
326
327 zero_offset = XFS_B_FSB_OFFSET(mp, isize);
328 if (zero_offset == 0) {
329 /*
330 * There are no extra bytes in the last block on disk to
331 * zero, so return.
332 */
333 return 0;
334 }
335
336 last_fsb = XFS_B_TO_FSBT(mp, isize);
337 nimaps = 1;
338 error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap,
339 &nimaps, NULL, NULL);
340 if (error) {
341 return error;
342 }
343 ASSERT(nimaps > 0);
344 /*
345 * If the block underlying isize is just a hole, then there
346 * is nothing to zero.
347 */
348 if (imap.br_startblock == HOLESTARTBLOCK) {
349 return 0;
350 }
351 /*
352 * Zero the part of the last block beyond the EOF, and write it
353 * out sync. We need to drop the ilock while we do this so we
354 * don't deadlock when the buffer cache calls back to us.
355 */
356 xfs_iunlock(ip, XFS_ILOCK_EXCL);
357
358 zero_len = mp->m_sb.sb_blocksize - zero_offset;
359 if (isize + zero_len > offset)
360 zero_len = offset - isize;
361 error = xfs_iozero(ip, isize, zero_len);
362
363 xfs_ilock(ip, XFS_ILOCK_EXCL);
364 ASSERT(error >= 0);
365 return error;
366}
367
368/*
369 * Zero any on disk space between the current EOF and the new,
370 * larger EOF. This handles the normal case of zeroing the remainder
371 * of the last block in the file and the unusual case of zeroing blocks
372 * out beyond the size of the file. This second case only happens
373 * with fixed size extents and when the system crashes before the inode
374 * size was updated but after blocks were allocated. If fill is set,
375 * then any holes in the range are filled and zeroed. If not, the holes
376 * are left alone as holes.
377 */
378
379int /* error (positive) */
380xfs_zero_eof(
381 xfs_inode_t *ip,
382 xfs_off_t offset, /* starting I/O offset */
383 xfs_fsize_t isize) /* current inode size */
384{
385 xfs_mount_t *mp = ip->i_mount;
386 xfs_fileoff_t start_zero_fsb;
387 xfs_fileoff_t end_zero_fsb;
388 xfs_fileoff_t zero_count_fsb;
389 xfs_fileoff_t last_fsb;
390 xfs_fileoff_t zero_off;
391 xfs_fsize_t zero_len;
392 int nimaps;
393 int error = 0;
394 xfs_bmbt_irec_t imap;
395
396 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
397 ASSERT(offset > isize);
398
399 /*
400 * First handle zeroing the block on which isize resides.
401 * We only zero a part of that block so it is handled specially.
402 */
403 error = xfs_zero_last_block(ip, offset, isize);
404 if (error) {
405 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
406 return error;
407 }
408
409 /*
410 * Calculate the range between the new size and the old
411 * where blocks needing to be zeroed may exist. To get the
412 * block where the last byte in the file currently resides,
413 * we need to subtract one from the size and truncate back
414 * to a block boundary. We subtract 1 in case the size is
415 * exactly on a block boundary.
416 */
417 last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
418 start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
419 end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
420 ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
421 if (last_fsb == end_zero_fsb) {
422 /*
423 * The size was only incremented on its last block.
424 * We took care of that above, so just return.
425 */
426 return 0;
427 }
428
429 ASSERT(start_zero_fsb <= end_zero_fsb);
430 while (start_zero_fsb <= end_zero_fsb) {
431 nimaps = 1;
432 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
433 error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb,
434 0, NULL, 0, &imap, &nimaps, NULL, NULL);
435 if (error) {
436 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
437 return error;
438 }
439 ASSERT(nimaps > 0);
440
441 if (imap.br_state == XFS_EXT_UNWRITTEN ||
442 imap.br_startblock == HOLESTARTBLOCK) {
443 /*
444 * This loop handles initializing pages that were
445 * partially initialized by the code below this
446 * loop. It basically zeroes the part of the page
447 * that sits on a hole and sets the page as P_HOLE
448 * and calls remapf if it is a mapped file.
449 */
450 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
451 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
452 continue;
453 }
454
455 /*
456 * There are blocks we need to zero.
457 * Drop the inode lock while we're doing the I/O.
458 * We'll still have the iolock to protect us.
459 */
460 xfs_iunlock(ip, XFS_ILOCK_EXCL);
461
462 zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
463 zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
464
465 if ((zero_off + zero_len) > offset)
466 zero_len = offset - zero_off;
467
468 error = xfs_iozero(ip, zero_off, zero_len);
469 if (error) {
470 goto out_lock;
471 }
472
473 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
474 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
475
476 xfs_ilock(ip, XFS_ILOCK_EXCL);
477 }
478
479 return 0;
480
481out_lock:
482 xfs_ilock(ip, XFS_ILOCK_EXCL);
483 ASSERT(error >= 0);
484 return error;
485}
486
487ssize_t /* bytes written, or (-) error */
488xfs_write(
489 struct xfs_inode *xip,
490 struct kiocb *iocb,
491 const struct iovec *iovp,
492 unsigned int nsegs,
493 loff_t *offset,
494 int ioflags)
495{
496 struct file *file = iocb->ki_filp;
497 struct address_space *mapping = file->f_mapping;
498 struct inode *inode = mapping->host;
499 unsigned long segs = nsegs;
500 xfs_mount_t *mp;
501 ssize_t ret = 0, error = 0;
502 xfs_fsize_t isize, new_size;
503 int iolock;
504 int eventsent = 0;
505 size_t ocount = 0, count;
506 loff_t pos;
507 int need_i_mutex;
508
509 XFS_STATS_INC(xs_write_calls);
510
511 error = generic_segment_checks(iovp, &segs, &ocount, VERIFY_READ);
512 if (error)
513 return error;
514
515 count = ocount;
516 pos = *offset;
517
518 if (count == 0)
519 return 0;
520
521 mp = xip->i_mount;
522
523 xfs_wait_for_freeze(mp, SB_FREEZE_WRITE);
524
525 if (XFS_FORCED_SHUTDOWN(mp))
526 return -EIO;
527
528relock:
529 if (ioflags & IO_ISDIRECT) {
530 iolock = XFS_IOLOCK_SHARED;
531 need_i_mutex = 0;
532 } else {
533 iolock = XFS_IOLOCK_EXCL;
534 need_i_mutex = 1;
535 mutex_lock(&inode->i_mutex);
536 }
537
538 xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
539
540start:
541 error = -generic_write_checks(file, &pos, &count,
542 S_ISBLK(inode->i_mode));
543 if (error) {
544 xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
545 goto out_unlock_mutex;
546 }
547
548 if ((DM_EVENT_ENABLED(xip, DM_EVENT_WRITE) &&
549 !(ioflags & IO_INVIS) && !eventsent)) {
550 int dmflags = FILP_DELAY_FLAG(file);
551
552 if (need_i_mutex)
553 dmflags |= DM_FLAGS_IMUX;
554
555 xfs_iunlock(xip, XFS_ILOCK_EXCL);
556 error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, xip,
557 pos, count, dmflags, &iolock);
558 if (error) {
559 goto out_unlock_internal;
560 }
561 xfs_ilock(xip, XFS_ILOCK_EXCL);
562 eventsent = 1;
563
564 /*
565 * The iolock was dropped and reacquired in XFS_SEND_DATA
566 * so we have to recheck the size when appending.
567 * We will only "goto start;" once, since having sent the
568 * event prevents another call to XFS_SEND_DATA, which is
569 * what allows the size to change in the first place.
570 */
571 if ((file->f_flags & O_APPEND) && pos != xip->i_size)
572 goto start;
573 }
574
575 if (ioflags & IO_ISDIRECT) {
576 xfs_buftarg_t *target =
577 XFS_IS_REALTIME_INODE(xip) ?
578 mp->m_rtdev_targp : mp->m_ddev_targp;
579
580 if ((pos & target->bt_smask) || (count & target->bt_smask)) {
581 xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
582 return XFS_ERROR(-EINVAL);
583 }
584
585 if (!need_i_mutex && (mapping->nrpages || pos > xip->i_size)) {
586 xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
587 iolock = XFS_IOLOCK_EXCL;
588 need_i_mutex = 1;
589 mutex_lock(&inode->i_mutex);
590 xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
591 goto start;
592 }
593 }
594
595 new_size = pos + count;
596 if (new_size > xip->i_size)
597 xip->i_new_size = new_size;
598
599 if (likely(!(ioflags & IO_INVIS)))
600 file_update_time(file);
601
602 /*
603 * If the offset is beyond the size of the file, we have a couple
604 * of things to do. First, if there is already space allocated
605 * we need to either create holes or zero the disk or ...
606 *
607 * If there is a page where the previous size lands, we need
608 * to zero it out up to the new size.
609 */
610
611 if (pos > xip->i_size) {
612 error = xfs_zero_eof(xip, pos, xip->i_size);
613 if (error) {
614 xfs_iunlock(xip, XFS_ILOCK_EXCL);
615 goto out_unlock_internal;
616 }
617 }
618 xfs_iunlock(xip, XFS_ILOCK_EXCL);
619
620 /*
621 * If we're writing the file then make sure to clear the
622 * setuid and setgid bits if the process is not being run
623 * by root. This keeps people from modifying setuid and
624 * setgid binaries.
625 */
626 error = -file_remove_suid(file);
627 if (unlikely(error))
628 goto out_unlock_internal;
629
630 /* We can write back this queue in page reclaim */
631 current->backing_dev_info = mapping->backing_dev_info;
632
633 if ((ioflags & IO_ISDIRECT)) {
634 if (mapping->nrpages) {
635 WARN_ON(need_i_mutex == 0);
636 error = xfs_flushinval_pages(xip,
637 (pos & PAGE_CACHE_MASK),
638 -1, FI_REMAPF_LOCKED);
639 if (error)
640 goto out_unlock_internal;
641 }
642
643 if (need_i_mutex) {
644 /* demote the lock now the cached pages are gone */
645 xfs_ilock_demote(xip, XFS_IOLOCK_EXCL);
646 mutex_unlock(&inode->i_mutex);
647
648 iolock = XFS_IOLOCK_SHARED;
649 need_i_mutex = 0;
650 }
651
652 trace_xfs_file_direct_write(xip, count, *offset, ioflags);
653 ret = generic_file_direct_write(iocb, iovp,
654 &segs, pos, offset, count, ocount);
655
656 /*
657 * direct-io write to a hole: fall through to buffered I/O
658 * for completing the rest of the request.
659 */
660 if (ret >= 0 && ret != count) {
661 XFS_STATS_ADD(xs_write_bytes, ret);
662
663 pos += ret;
664 count -= ret;
665
666 ioflags &= ~IO_ISDIRECT;
667 xfs_iunlock(xip, iolock);
668 goto relock;
669 }
670 } else {
671 int enospc = 0;
672 ssize_t ret2 = 0;
673
674write_retry:
675 trace_xfs_file_buffered_write(xip, count, *offset, ioflags);
676 ret2 = generic_file_buffered_write(iocb, iovp, segs,
677 pos, offset, count, ret);
678 /*
679 * if we just got an ENOSPC, flush the inode now we
680 * aren't holding any page locks and retry *once*
681 */
682 if (ret2 == -ENOSPC && !enospc) {
683 error = xfs_flush_pages(xip, 0, -1, 0, FI_NONE);
684 if (error)
685 goto out_unlock_internal;
686 enospc = 1;
687 goto write_retry;
688 }
689 ret = ret2;
690 }
691
692 current->backing_dev_info = NULL;
693
694 isize = i_size_read(inode);
695 if (unlikely(ret < 0 && ret != -EFAULT && *offset > isize))
696 *offset = isize;
697
698 if (*offset > xip->i_size) {
699 xfs_ilock(xip, XFS_ILOCK_EXCL);
700 if (*offset > xip->i_size)
701 xip->i_size = *offset;
702 xfs_iunlock(xip, XFS_ILOCK_EXCL);
703 }
704
705 if (ret == -ENOSPC &&
706 DM_EVENT_ENABLED(xip, DM_EVENT_NOSPACE) && !(ioflags & IO_INVIS)) {
707 xfs_iunlock(xip, iolock);
708 if (need_i_mutex)
709 mutex_unlock(&inode->i_mutex);
710 error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, xip,
711 DM_RIGHT_NULL, xip, DM_RIGHT_NULL, NULL, NULL,
712 0, 0, 0); /* Delay flag intentionally unused */
713 if (need_i_mutex)
714 mutex_lock(&inode->i_mutex);
715 xfs_ilock(xip, iolock);
716 if (error)
717 goto out_unlock_internal;
718 goto start;
719 }
720
721 error = -ret;
722 if (ret <= 0)
723 goto out_unlock_internal;
724
725 XFS_STATS_ADD(xs_write_bytes, ret);
726
727 /* Handle various SYNC-type writes */
728 if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
729 loff_t end = pos + ret - 1;
730 int error2;
731
732 xfs_iunlock(xip, iolock);
733 if (need_i_mutex)
734 mutex_unlock(&inode->i_mutex);
735
736 error2 = filemap_write_and_wait_range(mapping, pos, end);
737 if (!error)
738 error = error2;
739 if (need_i_mutex)
740 mutex_lock(&inode->i_mutex);
741 xfs_ilock(xip, iolock);
742
743 error2 = xfs_fsync(xip);
744 if (!error)
745 error = error2;
746 }
747
748 out_unlock_internal:
749 if (xip->i_new_size) {
750 xfs_ilock(xip, XFS_ILOCK_EXCL);
751 xip->i_new_size = 0;
752 /*
753 * If this was a direct or synchronous I/O that failed (such
754 * as ENOSPC) then part of the I/O may have been written to
755 * disk before the error occured. In this case the on-disk
756 * file size may have been adjusted beyond the in-memory file
757 * size and now needs to be truncated back.
758 */
759 if (xip->i_d.di_size > xip->i_size)
760 xip->i_d.di_size = xip->i_size;
761 xfs_iunlock(xip, XFS_ILOCK_EXCL);
762 }
763 xfs_iunlock(xip, iolock);
764 out_unlock_mutex:
765 if (need_i_mutex)
766 mutex_unlock(&inode->i_mutex);
767 return -error;
768}
769
47STATIC ssize_t 770STATIC ssize_t
48xfs_file_aio_read( 771xfs_file_aio_read(
49 struct kiocb *iocb, 772 struct kiocb *iocb,
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
deleted file mode 100644
index eac6f80d786d..000000000000
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ /dev/null
@@ -1,796 +0,0 @@
1/*
2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_bit.h"
21#include "xfs_log.h"
22#include "xfs_inum.h"
23#include "xfs_trans.h"
24#include "xfs_sb.h"
25#include "xfs_ag.h"
26#include "xfs_dir2.h"
27#include "xfs_alloc.h"
28#include "xfs_dmapi.h"
29#include "xfs_quota.h"
30#include "xfs_mount.h"
31#include "xfs_bmap_btree.h"
32#include "xfs_alloc_btree.h"
33#include "xfs_ialloc_btree.h"
34#include "xfs_dir2_sf.h"
35#include "xfs_attr_sf.h"
36#include "xfs_dinode.h"
37#include "xfs_inode.h"
38#include "xfs_bmap.h"
39#include "xfs_btree.h"
40#include "xfs_ialloc.h"
41#include "xfs_rtalloc.h"
42#include "xfs_error.h"
43#include "xfs_itable.h"
44#include "xfs_rw.h"
45#include "xfs_attr.h"
46#include "xfs_inode_item.h"
47#include "xfs_buf_item.h"
48#include "xfs_utils.h"
49#include "xfs_iomap.h"
50#include "xfs_vnodeops.h"
51#include "xfs_trace.h"
52
53#include <linux/capability.h>
54#include <linux/writeback.h>
55
56
57/*
58 * xfs_iozero
59 *
60 * xfs_iozero clears the specified range of buffer supplied,
61 * and marks all the affected blocks as valid and modified. If
62 * an affected block is not allocated, it will be allocated. If
63 * an affected block is not completely overwritten, and is not
64 * valid before the operation, it will be read from disk before
65 * being partially zeroed.
66 */
67STATIC int
68xfs_iozero(
69 struct xfs_inode *ip, /* inode */
70 loff_t pos, /* offset in file */
71 size_t count) /* size of data to zero */
72{
73 struct page *page;
74 struct address_space *mapping;
75 int status;
76
77 mapping = VFS_I(ip)->i_mapping;
78 do {
79 unsigned offset, bytes;
80 void *fsdata;
81
82 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
83 bytes = PAGE_CACHE_SIZE - offset;
84 if (bytes > count)
85 bytes = count;
86
87 status = pagecache_write_begin(NULL, mapping, pos, bytes,
88 AOP_FLAG_UNINTERRUPTIBLE,
89 &page, &fsdata);
90 if (status)
91 break;
92
93 zero_user(page, offset, bytes);
94
95 status = pagecache_write_end(NULL, mapping, pos, bytes, bytes,
96 page, fsdata);
97 WARN_ON(status <= 0); /* can't return less than zero! */
98 pos += bytes;
99 count -= bytes;
100 status = 0;
101 } while (count);
102
103 return (-status);
104}
105
106ssize_t /* bytes read, or (-) error */
107xfs_read(
108 xfs_inode_t *ip,
109 struct kiocb *iocb,
110 const struct iovec *iovp,
111 unsigned int segs,
112 loff_t *offset,
113 int ioflags)
114{
115 struct file *file = iocb->ki_filp;
116 struct inode *inode = file->f_mapping->host;
117 xfs_mount_t *mp = ip->i_mount;
118 size_t size = 0;
119 ssize_t ret = 0;
120 xfs_fsize_t n;
121 unsigned long seg;
122
123
124 XFS_STATS_INC(xs_read_calls);
125
126 /* START copy & waste from filemap.c */
127 for (seg = 0; seg < segs; seg++) {
128 const struct iovec *iv = &iovp[seg];
129
130 /*
131 * If any segment has a negative length, or the cumulative
132 * length ever wraps negative then return -EINVAL.
133 */
134 size += iv->iov_len;
135 if (unlikely((ssize_t)(size|iv->iov_len) < 0))
136 return XFS_ERROR(-EINVAL);
137 }
138 /* END copy & waste from filemap.c */
139
140 if (unlikely(ioflags & IO_ISDIRECT)) {
141 xfs_buftarg_t *target =
142 XFS_IS_REALTIME_INODE(ip) ?
143 mp->m_rtdev_targp : mp->m_ddev_targp;
144 if ((*offset & target->bt_smask) ||
145 (size & target->bt_smask)) {
146 if (*offset == ip->i_size) {
147 return (0);
148 }
149 return -XFS_ERROR(EINVAL);
150 }
151 }
152
153 n = XFS_MAXIOFFSET(mp) - *offset;
154 if ((n <= 0) || (size == 0))
155 return 0;
156
157 if (n < size)
158 size = n;
159
160 if (XFS_FORCED_SHUTDOWN(mp))
161 return -EIO;
162
163 if (unlikely(ioflags & IO_ISDIRECT))
164 mutex_lock(&inode->i_mutex);
165 xfs_ilock(ip, XFS_IOLOCK_SHARED);
166
167 if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
168 int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);
169 int iolock = XFS_IOLOCK_SHARED;
170
171 ret = -XFS_SEND_DATA(mp, DM_EVENT_READ, ip, *offset, size,
172 dmflags, &iolock);
173 if (ret) {
174 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
175 if (unlikely(ioflags & IO_ISDIRECT))
176 mutex_unlock(&inode->i_mutex);
177 return ret;
178 }
179 }
180
181 if (unlikely(ioflags & IO_ISDIRECT)) {
182 if (inode->i_mapping->nrpages)
183 ret = -xfs_flushinval_pages(ip, (*offset & PAGE_CACHE_MASK),
184 -1, FI_REMAPF_LOCKED);
185 mutex_unlock(&inode->i_mutex);
186 if (ret) {
187 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
188 return ret;
189 }
190 }
191
192 trace_xfs_file_read(ip, size, *offset, ioflags);
193
194 iocb->ki_pos = *offset;
195 ret = generic_file_aio_read(iocb, iovp, segs, *offset);
196 if (ret > 0)
197 XFS_STATS_ADD(xs_read_bytes, ret);
198
199 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
200 return ret;
201}
202
203ssize_t
204xfs_splice_read(
205 xfs_inode_t *ip,
206 struct file *infilp,
207 loff_t *ppos,
208 struct pipe_inode_info *pipe,
209 size_t count,
210 int flags,
211 int ioflags)
212{
213 xfs_mount_t *mp = ip->i_mount;
214 ssize_t ret;
215
216 XFS_STATS_INC(xs_read_calls);
217 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
218 return -EIO;
219
220 xfs_ilock(ip, XFS_IOLOCK_SHARED);
221
222 if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
223 int iolock = XFS_IOLOCK_SHARED;
224 int error;
225
226 error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip, *ppos, count,
227 FILP_DELAY_FLAG(infilp), &iolock);
228 if (error) {
229 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
230 return -error;
231 }
232 }
233
234 trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
235
236 ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
237 if (ret > 0)
238 XFS_STATS_ADD(xs_read_bytes, ret);
239
240 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
241 return ret;
242}
243
244ssize_t
245xfs_splice_write(
246 xfs_inode_t *ip,
247 struct pipe_inode_info *pipe,
248 struct file *outfilp,
249 loff_t *ppos,
250 size_t count,
251 int flags,
252 int ioflags)
253{
254 xfs_mount_t *mp = ip->i_mount;
255 ssize_t ret;
256 struct inode *inode = outfilp->f_mapping->host;
257 xfs_fsize_t isize, new_size;
258
259 XFS_STATS_INC(xs_write_calls);
260 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
261 return -EIO;
262
263 xfs_ilock(ip, XFS_IOLOCK_EXCL);
264
265 if (DM_EVENT_ENABLED(ip, DM_EVENT_WRITE) && !(ioflags & IO_INVIS)) {
266 int iolock = XFS_IOLOCK_EXCL;
267 int error;
268
269 error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip, *ppos, count,
270 FILP_DELAY_FLAG(outfilp), &iolock);
271 if (error) {
272 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
273 return -error;
274 }
275 }
276
277 new_size = *ppos + count;
278
279 xfs_ilock(ip, XFS_ILOCK_EXCL);
280 if (new_size > ip->i_size)
281 ip->i_new_size = new_size;
282 xfs_iunlock(ip, XFS_ILOCK_EXCL);
283
284 trace_xfs_file_splice_write(ip, count, *ppos, ioflags);
285
286 ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
287 if (ret > 0)
288 XFS_STATS_ADD(xs_write_bytes, ret);
289
290 isize = i_size_read(inode);
291 if (unlikely(ret < 0 && ret != -EFAULT && *ppos > isize))
292 *ppos = isize;
293
294 if (*ppos > ip->i_size) {
295 xfs_ilock(ip, XFS_ILOCK_EXCL);
296 if (*ppos > ip->i_size)
297 ip->i_size = *ppos;
298 xfs_iunlock(ip, XFS_ILOCK_EXCL);
299 }
300
301 if (ip->i_new_size) {
302 xfs_ilock(ip, XFS_ILOCK_EXCL);
303 ip->i_new_size = 0;
304 if (ip->i_d.di_size > ip->i_size)
305 ip->i_d.di_size = ip->i_size;
306 xfs_iunlock(ip, XFS_ILOCK_EXCL);
307 }
308 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
309 return ret;
310}
311
312/*
313 * This routine is called to handle zeroing any space in the last
314 * block of the file that is beyond the EOF. We do this since the
315 * size is being increased without writing anything to that block
316 * and we don't want anyone to read the garbage on the disk.
317 */
318STATIC int /* error (positive) */
319xfs_zero_last_block(
320 xfs_inode_t *ip,
321 xfs_fsize_t offset,
322 xfs_fsize_t isize)
323{
324 xfs_fileoff_t last_fsb;
325 xfs_mount_t *mp = ip->i_mount;
326 int nimaps;
327 int zero_offset;
328 int zero_len;
329 int error = 0;
330 xfs_bmbt_irec_t imap;
331
332 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
333
334 zero_offset = XFS_B_FSB_OFFSET(mp, isize);
335 if (zero_offset == 0) {
336 /*
337 * There are no extra bytes in the last block on disk to
338 * zero, so return.
339 */
340 return 0;
341 }
342
343 last_fsb = XFS_B_TO_FSBT(mp, isize);
344 nimaps = 1;
345 error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap,
346 &nimaps, NULL, NULL);
347 if (error) {
348 return error;
349 }
350 ASSERT(nimaps > 0);
351 /*
352 * If the block underlying isize is just a hole, then there
353 * is nothing to zero.
354 */
355 if (imap.br_startblock == HOLESTARTBLOCK) {
356 return 0;
357 }
358 /*
359 * Zero the part of the last block beyond the EOF, and write it
360 * out sync. We need to drop the ilock while we do this so we
361 * don't deadlock when the buffer cache calls back to us.
362 */
363 xfs_iunlock(ip, XFS_ILOCK_EXCL);
364
365 zero_len = mp->m_sb.sb_blocksize - zero_offset;
366 if (isize + zero_len > offset)
367 zero_len = offset - isize;
368 error = xfs_iozero(ip, isize, zero_len);
369
370 xfs_ilock(ip, XFS_ILOCK_EXCL);
371 ASSERT(error >= 0);
372 return error;
373}
374
375/*
376 * Zero any on disk space between the current EOF and the new,
377 * larger EOF. This handles the normal case of zeroing the remainder
378 * of the last block in the file and the unusual case of zeroing blocks
379 * out beyond the size of the file. This second case only happens
380 * with fixed size extents and when the system crashes before the inode
381 * size was updated but after blocks were allocated. If fill is set,
382 * then any holes in the range are filled and zeroed. If not, the holes
383 * are left alone as holes.
384 */
385
386int /* error (positive) */
387xfs_zero_eof(
388 xfs_inode_t *ip,
389 xfs_off_t offset, /* starting I/O offset */
390 xfs_fsize_t isize) /* current inode size */
391{
392 xfs_mount_t *mp = ip->i_mount;
393 xfs_fileoff_t start_zero_fsb;
394 xfs_fileoff_t end_zero_fsb;
395 xfs_fileoff_t zero_count_fsb;
396 xfs_fileoff_t last_fsb;
397 xfs_fileoff_t zero_off;
398 xfs_fsize_t zero_len;
399 int nimaps;
400 int error = 0;
401 xfs_bmbt_irec_t imap;
402
403 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
404 ASSERT(offset > isize);
405
406 /*
407 * First handle zeroing the block on which isize resides.
408 * We only zero a part of that block so it is handled specially.
409 */
410 error = xfs_zero_last_block(ip, offset, isize);
411 if (error) {
412 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
413 return error;
414 }
415
416 /*
417 * Calculate the range between the new size and the old
418 * where blocks needing to be zeroed may exist. To get the
419 * block where the last byte in the file currently resides,
420 * we need to subtract one from the size and truncate back
421 * to a block boundary. We subtract 1 in case the size is
422 * exactly on a block boundary.
423 */
424 last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
425 start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
426 end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
427 ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
428 if (last_fsb == end_zero_fsb) {
429 /*
430 * The size was only incremented on its last block.
431 * We took care of that above, so just return.
432 */
433 return 0;
434 }
435
436 ASSERT(start_zero_fsb <= end_zero_fsb);
437 while (start_zero_fsb <= end_zero_fsb) {
438 nimaps = 1;
439 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
440 error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb,
441 0, NULL, 0, &imap, &nimaps, NULL, NULL);
442 if (error) {
443 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
444 return error;
445 }
446 ASSERT(nimaps > 0);
447
448 if (imap.br_state == XFS_EXT_UNWRITTEN ||
449 imap.br_startblock == HOLESTARTBLOCK) {
450 /*
451 * This loop handles initializing pages that were
452 * partially initialized by the code below this
453 * loop. It basically zeroes the part of the page
454 * that sits on a hole and sets the page as P_HOLE
455 * and calls remapf if it is a mapped file.
456 */
457 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
458 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
459 continue;
460 }
461
462 /*
463 * There are blocks we need to zero.
464 * Drop the inode lock while we're doing the I/O.
465 * We'll still have the iolock to protect us.
466 */
467 xfs_iunlock(ip, XFS_ILOCK_EXCL);
468
469 zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
470 zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
471
472 if ((zero_off + zero_len) > offset)
473 zero_len = offset - zero_off;
474
475 error = xfs_iozero(ip, zero_off, zero_len);
476 if (error) {
477 goto out_lock;
478 }
479
480 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
481 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
482
483 xfs_ilock(ip, XFS_ILOCK_EXCL);
484 }
485
486 return 0;
487
488out_lock:
489 xfs_ilock(ip, XFS_ILOCK_EXCL);
490 ASSERT(error >= 0);
491 return error;
492}
493
494ssize_t /* bytes written, or (-) error */
495xfs_write(
496 struct xfs_inode *xip,
497 struct kiocb *iocb,
498 const struct iovec *iovp,
499 unsigned int nsegs,
500 loff_t *offset,
501 int ioflags)
502{
503 struct file *file = iocb->ki_filp;
504 struct address_space *mapping = file->f_mapping;
505 struct inode *inode = mapping->host;
506 unsigned long segs = nsegs;
507 xfs_mount_t *mp;
508 ssize_t ret = 0, error = 0;
509 xfs_fsize_t isize, new_size;
510 int iolock;
511 int eventsent = 0;
512 size_t ocount = 0, count;
513 loff_t pos;
514 int need_i_mutex;
515
516 XFS_STATS_INC(xs_write_calls);
517
518 error = generic_segment_checks(iovp, &segs, &ocount, VERIFY_READ);
519 if (error)
520 return error;
521
522 count = ocount;
523 pos = *offset;
524
525 if (count == 0)
526 return 0;
527
528 mp = xip->i_mount;
529
530 xfs_wait_for_freeze(mp, SB_FREEZE_WRITE);
531
532 if (XFS_FORCED_SHUTDOWN(mp))
533 return -EIO;
534
535relock:
536 if (ioflags & IO_ISDIRECT) {
537 iolock = XFS_IOLOCK_SHARED;
538 need_i_mutex = 0;
539 } else {
540 iolock = XFS_IOLOCK_EXCL;
541 need_i_mutex = 1;
542 mutex_lock(&inode->i_mutex);
543 }
544
545 xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
546
547start:
548 error = -generic_write_checks(file, &pos, &count,
549 S_ISBLK(inode->i_mode));
550 if (error) {
551 xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
552 goto out_unlock_mutex;
553 }
554
555 if ((DM_EVENT_ENABLED(xip, DM_EVENT_WRITE) &&
556 !(ioflags & IO_INVIS) && !eventsent)) {
557 int dmflags = FILP_DELAY_FLAG(file);
558
559 if (need_i_mutex)
560 dmflags |= DM_FLAGS_IMUX;
561
562 xfs_iunlock(xip, XFS_ILOCK_EXCL);
563 error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, xip,
564 pos, count, dmflags, &iolock);
565 if (error) {
566 goto out_unlock_internal;
567 }
568 xfs_ilock(xip, XFS_ILOCK_EXCL);
569 eventsent = 1;
570
571 /*
572 * The iolock was dropped and reacquired in XFS_SEND_DATA
573 * so we have to recheck the size when appending.
574 * We will only "goto start;" once, since having sent the
575 * event prevents another call to XFS_SEND_DATA, which is
576 * what allows the size to change in the first place.
577 */
578 if ((file->f_flags & O_APPEND) && pos != xip->i_size)
579 goto start;
580 }
581
582 if (ioflags & IO_ISDIRECT) {
583 xfs_buftarg_t *target =
584 XFS_IS_REALTIME_INODE(xip) ?
585 mp->m_rtdev_targp : mp->m_ddev_targp;
586
587 if ((pos & target->bt_smask) || (count & target->bt_smask)) {
588 xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
589 return XFS_ERROR(-EINVAL);
590 }
591
592 if (!need_i_mutex && (mapping->nrpages || pos > xip->i_size)) {
593 xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
594 iolock = XFS_IOLOCK_EXCL;
595 need_i_mutex = 1;
596 mutex_lock(&inode->i_mutex);
597 xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
598 goto start;
599 }
600 }
601
602 new_size = pos + count;
603 if (new_size > xip->i_size)
604 xip->i_new_size = new_size;
605
606 if (likely(!(ioflags & IO_INVIS)))
607 file_update_time(file);
608
609 /*
610 * If the offset is beyond the size of the file, we have a couple
611 * of things to do. First, if there is already space allocated
612 * we need to either create holes or zero the disk or ...
613 *
614 * If there is a page where the previous size lands, we need
615 * to zero it out up to the new size.
616 */
617
618 if (pos > xip->i_size) {
619 error = xfs_zero_eof(xip, pos, xip->i_size);
620 if (error) {
621 xfs_iunlock(xip, XFS_ILOCK_EXCL);
622 goto out_unlock_internal;
623 }
624 }
625 xfs_iunlock(xip, XFS_ILOCK_EXCL);
626
627 /*
628 * If we're writing the file then make sure to clear the
629 * setuid and setgid bits if the process is not being run
630 * by root. This keeps people from modifying setuid and
631 * setgid binaries.
632 */
633 error = -file_remove_suid(file);
634 if (unlikely(error))
635 goto out_unlock_internal;
636
637 /* We can write back this queue in page reclaim */
638 current->backing_dev_info = mapping->backing_dev_info;
639
640 if ((ioflags & IO_ISDIRECT)) {
641 if (mapping->nrpages) {
642 WARN_ON(need_i_mutex == 0);
643 error = xfs_flushinval_pages(xip,
644 (pos & PAGE_CACHE_MASK),
645 -1, FI_REMAPF_LOCKED);
646 if (error)
647 goto out_unlock_internal;
648 }
649
650 if (need_i_mutex) {
651 /* demote the lock now the cached pages are gone */
652 xfs_ilock_demote(xip, XFS_IOLOCK_EXCL);
653 mutex_unlock(&inode->i_mutex);
654
655 iolock = XFS_IOLOCK_SHARED;
656 need_i_mutex = 0;
657 }
658
659 trace_xfs_file_direct_write(xip, count, *offset, ioflags);
660 ret = generic_file_direct_write(iocb, iovp,
661 &segs, pos, offset, count, ocount);
662
663 /*
664 * direct-io write to a hole: fall through to buffered I/O
665 * for completing the rest of the request.
666 */
667 if (ret >= 0 && ret != count) {
668 XFS_STATS_ADD(xs_write_bytes, ret);
669
670 pos += ret;
671 count -= ret;
672
673 ioflags &= ~IO_ISDIRECT;
674 xfs_iunlock(xip, iolock);
675 goto relock;
676 }
677 } else {
678 int enospc = 0;
679 ssize_t ret2 = 0;
680
681write_retry:
682 trace_xfs_file_buffered_write(xip, count, *offset, ioflags);
683 ret2 = generic_file_buffered_write(iocb, iovp, segs,
684 pos, offset, count, ret);
685 /*
686 * if we just got an ENOSPC, flush the inode now we
687 * aren't holding any page locks and retry *once*
688 */
689 if (ret2 == -ENOSPC && !enospc) {
690 error = xfs_flush_pages(xip, 0, -1, 0, FI_NONE);
691 if (error)
692 goto out_unlock_internal;
693 enospc = 1;
694 goto write_retry;
695 }
696 ret = ret2;
697 }
698
699 current->backing_dev_info = NULL;
700
701 isize = i_size_read(inode);
702 if (unlikely(ret < 0 && ret != -EFAULT && *offset > isize))
703 *offset = isize;
704
705 if (*offset > xip->i_size) {
706 xfs_ilock(xip, XFS_ILOCK_EXCL);
707 if (*offset > xip->i_size)
708 xip->i_size = *offset;
709 xfs_iunlock(xip, XFS_ILOCK_EXCL);
710 }
711
712 if (ret == -ENOSPC &&
713 DM_EVENT_ENABLED(xip, DM_EVENT_NOSPACE) && !(ioflags & IO_INVIS)) {
714 xfs_iunlock(xip, iolock);
715 if (need_i_mutex)
716 mutex_unlock(&inode->i_mutex);
717 error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, xip,
718 DM_RIGHT_NULL, xip, DM_RIGHT_NULL, NULL, NULL,
719 0, 0, 0); /* Delay flag intentionally unused */
720 if (need_i_mutex)
721 mutex_lock(&inode->i_mutex);
722 xfs_ilock(xip, iolock);
723 if (error)
724 goto out_unlock_internal;
725 goto start;
726 }
727
728 error = -ret;
729 if (ret <= 0)
730 goto out_unlock_internal;
731
732 XFS_STATS_ADD(xs_write_bytes, ret);
733
734 /* Handle various SYNC-type writes */
735 if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
736 loff_t end = pos + ret - 1;
737 int error2;
738
739 xfs_iunlock(xip, iolock);
740 if (need_i_mutex)
741 mutex_unlock(&inode->i_mutex);
742
743 error2 = filemap_write_and_wait_range(mapping, pos, end);
744 if (!error)
745 error = error2;
746 if (need_i_mutex)
747 mutex_lock(&inode->i_mutex);
748 xfs_ilock(xip, iolock);
749
750 error2 = xfs_fsync(xip);
751 if (!error)
752 error = error2;
753 }
754
755 out_unlock_internal:
756 if (xip->i_new_size) {
757 xfs_ilock(xip, XFS_ILOCK_EXCL);
758 xip->i_new_size = 0;
759 /*
760 * If this was a direct or synchronous I/O that failed (such
761 * as ENOSPC) then part of the I/O may have been written to
762 * disk before the error occured. In this case the on-disk
763 * file size may have been adjusted beyond the in-memory file
764 * size and now needs to be truncated back.
765 */
766 if (xip->i_d.di_size > xip->i_size)
767 xip->i_d.di_size = xip->i_size;
768 xfs_iunlock(xip, XFS_ILOCK_EXCL);
769 }
770 xfs_iunlock(xip, iolock);
771 out_unlock_mutex:
772 if (need_i_mutex)
773 mutex_unlock(&inode->i_mutex);
774 return -error;
775}
776
777/*
778 * If the underlying (data/log/rt) device is readonly, there are some
779 * operations that cannot proceed.
780 */
781int
782xfs_dev_is_read_only(
783 xfs_mount_t *mp,
784 char *message)
785{
786 if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
787 xfs_readonly_buftarg(mp->m_logdev_targp) ||
788 (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
789 cmn_err(CE_NOTE,
790 "XFS: %s required on read-only device.", message);
791 cmn_err(CE_NOTE,
792 "XFS: write access unavailable, cannot proceed.");
793 return EROFS;
794 }
795 return 0;
796}