aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-12-10 13:18:27 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-12-10 13:18:27 -0500
commit41f81e88e01eb959f439f8537c58078e4bfc5291 (patch)
tree3d5dba6982a074bcd5f3248c57679215e5f8b701 /fs/xfs
parentdc3d532a1792263ec9b26c1cbc7ce566056b5b1f (diff)
parentcf10e82bdc0d38d09dfaf46d0daf56136138ef3f (diff)
Merge branch 'for-linus' of git://oss.sgi.com:8090/xfs/xfs-2.6
* 'for-linus' of git://oss.sgi.com:8090/xfs/xfs-2.6: [XFS] Fix xfs_ichgtime()s broken usage of I_SYNC [XFS] Make xfsbufd threads freezable [XFS] revert to double-buffering readdir [XFS] Fix broken inode cluster setup. [XFS] Clear XBF_READ_AHEAD flag on I/O completion. [XFS] Fixed a few bugs in xfs_buf_associate_memory() [XFS] 971064 Various fixups for xfs_bulkstat(). [XFS] Fix dbflush panic in xfs_qm_sync.
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c37
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c124
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c20
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl32.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c4
-rw-r--r--fs/xfs/quota/xfs_qm.c3
-rw-r--r--fs/xfs/xfs_iget.c2
-rw-r--r--fs/xfs/xfs_itable.c43
8 files changed, 186 insertions, 50 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index b9c8589e05c2..a49dd8d4b069 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -725,15 +725,15 @@ xfs_buf_associate_memory(
725{ 725{
726 int rval; 726 int rval;
727 int i = 0; 727 int i = 0;
728 size_t ptr; 728 unsigned long pageaddr;
729 size_t end, end_cur; 729 unsigned long offset;
730 off_t offset; 730 size_t buflen;
731 int page_count; 731 int page_count;
732 732
733 page_count = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT; 733 pageaddr = (unsigned long)mem & PAGE_CACHE_MASK;
734 offset = (off_t) mem - ((off_t)mem & PAGE_CACHE_MASK); 734 offset = (unsigned long)mem - pageaddr;
735 if (offset && (len > PAGE_CACHE_SIZE)) 735 buflen = PAGE_CACHE_ALIGN(len + offset);
736 page_count++; 736 page_count = buflen >> PAGE_CACHE_SHIFT;
737 737
738 /* Free any previous set of page pointers */ 738 /* Free any previous set of page pointers */
739 if (bp->b_pages) 739 if (bp->b_pages)
@@ -747,22 +747,15 @@ xfs_buf_associate_memory(
747 return rval; 747 return rval;
748 748
749 bp->b_offset = offset; 749 bp->b_offset = offset;
750 ptr = (size_t) mem & PAGE_CACHE_MASK; 750
751 end = PAGE_CACHE_ALIGN((size_t) mem + len); 751 for (i = 0; i < bp->b_page_count; i++) {
752 end_cur = end; 752 bp->b_pages[i] = mem_to_page((void *)pageaddr);
753 /* set up first page */ 753 pageaddr += PAGE_CACHE_SIZE;
754 bp->b_pages[0] = mem_to_page(mem);
755
756 ptr += PAGE_CACHE_SIZE;
757 bp->b_page_count = ++i;
758 while (ptr < end) {
759 bp->b_pages[i] = mem_to_page((void *)ptr);
760 bp->b_page_count = ++i;
761 ptr += PAGE_CACHE_SIZE;
762 } 754 }
763 bp->b_locked = 0; 755 bp->b_locked = 0;
764 756
765 bp->b_count_desired = bp->b_buffer_length = len; 757 bp->b_count_desired = len;
758 bp->b_buffer_length = buflen;
766 bp->b_flags |= XBF_MAPPED; 759 bp->b_flags |= XBF_MAPPED;
767 760
768 return 0; 761 return 0;
@@ -1032,7 +1025,7 @@ xfs_buf_ioend(
1032 xfs_buf_t *bp, 1025 xfs_buf_t *bp,
1033 int schedule) 1026 int schedule)
1034{ 1027{
1035 bp->b_flags &= ~(XBF_READ | XBF_WRITE); 1028 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
1036 if (bp->b_error == 0) 1029 if (bp->b_error == 0)
1037 bp->b_flags |= XBF_DONE; 1030 bp->b_flags |= XBF_DONE;
1038 1031
@@ -1750,6 +1743,8 @@ xfsbufd(
1750 1743
1751 current->flags |= PF_MEMALLOC; 1744 current->flags |= PF_MEMALLOC;
1752 1745
1746 set_freezable();
1747
1753 do { 1748 do {
1754 if (unlikely(freezing(current))) { 1749 if (unlikely(freezing(current))) {
1755 set_bit(XBT_FORCE_SLEEP, &target->bt_flags); 1750 set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index fb8dd34041eb..54c564693d93 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -218,6 +218,15 @@ xfs_vm_fault(
218} 218}
219#endif /* CONFIG_XFS_DMAPI */ 219#endif /* CONFIG_XFS_DMAPI */
220 220
221/*
222 * Unfortunately we can't just use the clean and simple readdir implementation
223 * below, because nfs might call back into ->lookup from the filldir callback
224 * and that will deadlock the low-level btree code.
225 *
226 * Hopefully we'll find a better workaround that allows to use the optimal
227 * version at least for local readdirs for 2.6.25.
228 */
229#if 0
221STATIC int 230STATIC int
222xfs_file_readdir( 231xfs_file_readdir(
223 struct file *filp, 232 struct file *filp,
@@ -249,6 +258,121 @@ xfs_file_readdir(
249 return -error; 258 return -error;
250 return 0; 259 return 0;
251} 260}
261#else
262
263struct hack_dirent {
264 int namlen;
265 loff_t offset;
266 u64 ino;
267 unsigned int d_type;
268 char name[];
269};
270
271struct hack_callback {
272 char *dirent;
273 size_t len;
274 size_t used;
275};
276
277STATIC int
278xfs_hack_filldir(
279 void *__buf,
280 const char *name,
281 int namlen,
282 loff_t offset,
283 u64 ino,
284 unsigned int d_type)
285{
286 struct hack_callback *buf = __buf;
287 struct hack_dirent *de = (struct hack_dirent *)(buf->dirent + buf->used);
288
289 if (buf->used + sizeof(struct hack_dirent) + namlen > buf->len)
290 return -EINVAL;
291
292 de->namlen = namlen;
293 de->offset = offset;
294 de->ino = ino;
295 de->d_type = d_type;
296 memcpy(de->name, name, namlen);
297 buf->used += sizeof(struct hack_dirent) + namlen;
298 return 0;
299}
300
301STATIC int
302xfs_file_readdir(
303 struct file *filp,
304 void *dirent,
305 filldir_t filldir)
306{
307 struct inode *inode = filp->f_path.dentry->d_inode;
308 xfs_inode_t *ip = XFS_I(inode);
309 struct hack_callback buf;
310 struct hack_dirent *de;
311 int error;
312 loff_t size;
313 int eof = 0;
314 xfs_off_t start_offset, curr_offset, offset;
315
316 /*
317 * Try fairly hard to get memory
318 */
319 buf.len = PAGE_CACHE_SIZE;
320 do {
321 buf.dirent = kmalloc(buf.len, GFP_KERNEL);
322 if (buf.dirent)
323 break;
324 buf.len >>= 1;
325 } while (buf.len >= 1024);
326
327 if (!buf.dirent)
328 return -ENOMEM;
329
330 curr_offset = filp->f_pos;
331 if (curr_offset == 0x7fffffff)
332 offset = 0xffffffff;
333 else
334 offset = filp->f_pos;
335
336 while (!eof) {
337 int reclen;
338 start_offset = offset;
339
340 buf.used = 0;
341 error = -xfs_readdir(ip, &buf, buf.len, &offset,
342 xfs_hack_filldir);
343 if (error || offset == start_offset) {
344 size = 0;
345 break;
346 }
347
348 size = buf.used;
349 de = (struct hack_dirent *)buf.dirent;
350 while (size > 0) {
351 if (filldir(dirent, de->name, de->namlen,
352 curr_offset & 0x7fffffff,
353 de->ino, de->d_type)) {
354 goto done;
355 }
356
357 reclen = sizeof(struct hack_dirent) + de->namlen;
358 size -= reclen;
359 curr_offset = de->offset /* & 0x7fffffff */;
360 de = (struct hack_dirent *)((char *)de + reclen);
361 }
362 }
363
364 done:
365 if (!error) {
366 if (size == 0)
367 filp->f_pos = offset & 0x7fffffff;
368 else if (de)
369 filp->f_pos = curr_offset;
370 }
371
372 kfree(buf.dirent);
373 return error;
374}
375#endif
252 376
253STATIC int 377STATIC int
254xfs_file_mmap( 378xfs_file_mmap(
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 2b34bad48b07..98a56568bb24 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -1047,24 +1047,20 @@ xfs_ioc_bulkstat(
1047 if ((count = bulkreq.icount) <= 0) 1047 if ((count = bulkreq.icount) <= 0)
1048 return -XFS_ERROR(EINVAL); 1048 return -XFS_ERROR(EINVAL);
1049 1049
1050 if (bulkreq.ubuffer == NULL)
1051 return -XFS_ERROR(EINVAL);
1052
1050 if (cmd == XFS_IOC_FSINUMBERS) 1053 if (cmd == XFS_IOC_FSINUMBERS)
1051 error = xfs_inumbers(mp, &inlast, &count, 1054 error = xfs_inumbers(mp, &inlast, &count,
1052 bulkreq.ubuffer, xfs_inumbers_fmt); 1055 bulkreq.ubuffer, xfs_inumbers_fmt);
1053 else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE) 1056 else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE)
1054 error = xfs_bulkstat_single(mp, &inlast, 1057 error = xfs_bulkstat_single(mp, &inlast,
1055 bulkreq.ubuffer, &done); 1058 bulkreq.ubuffer, &done);
1056 else { /* XFS_IOC_FSBULKSTAT */ 1059 else /* XFS_IOC_FSBULKSTAT */
1057 if (count == 1 && inlast != 0) { 1060 error = xfs_bulkstat(mp, &inlast, &count,
1058 inlast++; 1061 (bulkstat_one_pf)xfs_bulkstat_one, NULL,
1059 error = xfs_bulkstat_single(mp, &inlast, 1062 sizeof(xfs_bstat_t), bulkreq.ubuffer,
1060 bulkreq.ubuffer, &done); 1063 BULKSTAT_FG_QUICK, &done);
1061 } else {
1062 error = xfs_bulkstat(mp, &inlast, &count,
1063 (bulkstat_one_pf)xfs_bulkstat_one, NULL,
1064 sizeof(xfs_bstat_t), bulkreq.ubuffer,
1065 BULKSTAT_FG_QUICK, &done);
1066 }
1067 }
1068 1064
1069 if (error) 1065 if (error)
1070 return -error; 1066 return -error;
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
index 0046bdd5b7f1..bf2a956b63c2 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
@@ -291,6 +291,9 @@ xfs_ioc_bulkstat_compat(
291 if ((count = bulkreq.icount) <= 0) 291 if ((count = bulkreq.icount) <= 0)
292 return -XFS_ERROR(EINVAL); 292 return -XFS_ERROR(EINVAL);
293 293
294 if (bulkreq.ubuffer == NULL)
295 return -XFS_ERROR(EINVAL);
296
294 if (cmd == XFS_IOC_FSINUMBERS) 297 if (cmd == XFS_IOC_FSINUMBERS)
295 error = xfs_inumbers(mp, &inlast, &count, 298 error = xfs_inumbers(mp, &inlast, &count,
296 bulkreq.ubuffer, xfs_inumbers_fmt_compat); 299 bulkreq.ubuffer, xfs_inumbers_fmt_compat);
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index ac50f8a37582..37e116779eb1 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -117,7 +117,7 @@ xfs_ichgtime(
117 */ 117 */
118 SYNCHRONIZE(); 118 SYNCHRONIZE();
119 ip->i_update_core = 1; 119 ip->i_update_core = 1;
120 if (!(inode->i_state & I_SYNC)) 120 if (!(inode->i_state & I_NEW))
121 mark_inode_dirty_sync(inode); 121 mark_inode_dirty_sync(inode);
122} 122}
123 123
@@ -169,7 +169,7 @@ xfs_ichgtime_fast(
169 */ 169 */
170 SYNCHRONIZE(); 170 SYNCHRONIZE();
171 ip->i_update_core = 1; 171 ip->i_update_core = 1;
172 if (!(inode->i_state & I_SYNC)) 172 if (!(inode->i_state & I_NEW))
173 mark_inode_dirty_sync(inode); 173 mark_inode_dirty_sync(inode);
174} 174}
175 175
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index b5f91281b707..d488645f833d 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -1008,6 +1008,9 @@ xfs_qm_sync(
1008 boolean_t nowait; 1008 boolean_t nowait;
1009 int error; 1009 int error;
1010 1010
1011 if (! XFS_IS_QUOTA_ON(mp))
1012 return 0;
1013
1011 restarts = 0; 1014 restarts = 0;
1012 /* 1015 /*
1013 * We won't block unless we are asked to. 1016 * We won't block unless we are asked to.
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 488836e204a3..fb69ef180b27 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -267,7 +267,7 @@ finish_inode:
267 icl = NULL; 267 icl = NULL;
268 if (radix_tree_gang_lookup(&pag->pag_ici_root, (void**)&iq, 268 if (radix_tree_gang_lookup(&pag->pag_ici_root, (void**)&iq,
269 first_index, 1)) { 269 first_index, 1)) {
270 if ((iq->i_ino & mask) == first_index) 270 if ((XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) == first_index)
271 icl = iq->i_cluster; 271 icl = iq->i_cluster;
272 } 272 }
273 273
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 9972992fd3c3..9fc4c2886529 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -316,6 +316,8 @@ xfs_bulkstat_use_dinode(
316 return 1; 316 return 1;
317} 317}
318 318
319#define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size)
320
319/* 321/*
320 * Return stat information in bulk (by-inode) for the filesystem. 322 * Return stat information in bulk (by-inode) for the filesystem.
321 */ 323 */
@@ -353,7 +355,7 @@ xfs_bulkstat(
353 xfs_inobt_rec_incore_t *irbp; /* current irec buffer pointer */ 355 xfs_inobt_rec_incore_t *irbp; /* current irec buffer pointer */
354 xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ 356 xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */
355 xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */ 357 xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */
356 xfs_ino_t lastino=0; /* last inode number returned */ 358 xfs_ino_t lastino; /* last inode number returned */
357 int nbcluster; /* # of blocks in a cluster */ 359 int nbcluster; /* # of blocks in a cluster */
358 int nicluster; /* # of inodes in a cluster */ 360 int nicluster; /* # of inodes in a cluster */
359 int nimask; /* mask for inode clusters */ 361 int nimask; /* mask for inode clusters */
@@ -373,6 +375,7 @@ xfs_bulkstat(
373 * Get the last inode value, see if there's nothing to do. 375 * Get the last inode value, see if there's nothing to do.
374 */ 376 */
375 ino = (xfs_ino_t)*lastinop; 377 ino = (xfs_ino_t)*lastinop;
378 lastino = ino;
376 dip = NULL; 379 dip = NULL;
377 agno = XFS_INO_TO_AGNO(mp, ino); 380 agno = XFS_INO_TO_AGNO(mp, ino);
378 agino = XFS_INO_TO_AGINO(mp, ino); 381 agino = XFS_INO_TO_AGINO(mp, ino);
@@ -382,6 +385,9 @@ xfs_bulkstat(
382 *ubcountp = 0; 385 *ubcountp = 0;
383 return 0; 386 return 0;
384 } 387 }
388 if (!ubcountp || *ubcountp <= 0) {
389 return EINVAL;
390 }
385 ubcount = *ubcountp; /* statstruct's */ 391 ubcount = *ubcountp; /* statstruct's */
386 ubleft = ubcount * statstruct_size; /* bytes */ 392 ubleft = ubcount * statstruct_size; /* bytes */
387 *ubcountp = ubelem = 0; 393 *ubcountp = ubelem = 0;
@@ -402,7 +408,8 @@ xfs_bulkstat(
402 * inode returned; 0 means start of the allocation group. 408 * inode returned; 0 means start of the allocation group.
403 */ 409 */
404 rval = 0; 410 rval = 0;
405 while (ubleft >= statstruct_size && agno < mp->m_sb.sb_agcount) { 411 while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) {
412 cond_resched();
406 bp = NULL; 413 bp = NULL;
407 down_read(&mp->m_peraglock); 414 down_read(&mp->m_peraglock);
408 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); 415 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
@@ -499,6 +506,7 @@ xfs_bulkstat(
499 break; 506 break;
500 error = xfs_inobt_lookup_ge(cur, agino, 0, 0, 507 error = xfs_inobt_lookup_ge(cur, agino, 0, 0,
501 &tmp); 508 &tmp);
509 cond_resched();
502 } 510 }
503 /* 511 /*
504 * If ran off the end of the ag either with an error, 512 * If ran off the end of the ag either with an error,
@@ -542,6 +550,7 @@ xfs_bulkstat(
542 */ 550 */
543 agino = gino + XFS_INODES_PER_CHUNK; 551 agino = gino + XFS_INODES_PER_CHUNK;
544 error = xfs_inobt_increment(cur, 0, &tmp); 552 error = xfs_inobt_increment(cur, 0, &tmp);
553 cond_resched();
545 } 554 }
546 /* 555 /*
547 * Drop the btree buffers and the agi buffer. 556 * Drop the btree buffers and the agi buffer.
@@ -555,12 +564,12 @@ xfs_bulkstat(
555 */ 564 */
556 irbufend = irbp; 565 irbufend = irbp;
557 for (irbp = irbuf; 566 for (irbp = irbuf;
558 irbp < irbufend && ubleft >= statstruct_size; irbp++) { 567 irbp < irbufend && XFS_BULKSTAT_UBLEFT(ubleft); irbp++) {
559 /* 568 /*
560 * Now process this chunk of inodes. 569 * Now process this chunk of inodes.
561 */ 570 */
562 for (agino = irbp->ir_startino, chunkidx = clustidx = 0; 571 for (agino = irbp->ir_startino, chunkidx = clustidx = 0;
563 ubleft > 0 && 572 XFS_BULKSTAT_UBLEFT(ubleft) &&
564 irbp->ir_freecount < XFS_INODES_PER_CHUNK; 573 irbp->ir_freecount < XFS_INODES_PER_CHUNK;
565 chunkidx++, clustidx++, agino++) { 574 chunkidx++, clustidx++, agino++) {
566 ASSERT(chunkidx < XFS_INODES_PER_CHUNK); 575 ASSERT(chunkidx < XFS_INODES_PER_CHUNK);
@@ -663,15 +672,13 @@ xfs_bulkstat(
663 ubleft, private_data, 672 ubleft, private_data,
664 bno, &ubused, dip, &fmterror); 673 bno, &ubused, dip, &fmterror);
665 if (fmterror == BULKSTAT_RV_NOTHING) { 674 if (fmterror == BULKSTAT_RV_NOTHING) {
666 if (error == EFAULT) { 675 if (error && error != ENOENT &&
667 ubleft = 0; 676 error != EINVAL) {
668 rval = error;
669 break;
670 }
671 else if (error == ENOMEM)
672 ubleft = 0; 677 ubleft = 0;
673 else 678 rval = error;
674 lastino = ino; 679 break;
680 }
681 lastino = ino;
675 continue; 682 continue;
676 } 683 }
677 if (fmterror == BULKSTAT_RV_GIVEUP) { 684 if (fmterror == BULKSTAT_RV_GIVEUP) {
@@ -686,6 +693,8 @@ xfs_bulkstat(
686 ubelem++; 693 ubelem++;
687 lastino = ino; 694 lastino = ino;
688 } 695 }
696
697 cond_resched();
689 } 698 }
690 699
691 if (bp) 700 if (bp)
@@ -694,11 +703,12 @@ xfs_bulkstat(
694 /* 703 /*
695 * Set up for the next loop iteration. 704 * Set up for the next loop iteration.
696 */ 705 */
697 if (ubleft > 0) { 706 if (XFS_BULKSTAT_UBLEFT(ubleft)) {
698 if (end_of_ag) { 707 if (end_of_ag) {
699 agno++; 708 agno++;
700 agino = 0; 709 agino = 0;
701 } 710 } else
711 agino = XFS_INO_TO_AGINO(mp, lastino);
702 } else 712 } else
703 break; 713 break;
704 } 714 }
@@ -707,6 +717,11 @@ xfs_bulkstat(
707 */ 717 */
708 kmem_free(irbuf, irbsize); 718 kmem_free(irbuf, irbsize);
709 *ubcountp = ubelem; 719 *ubcountp = ubelem;
720 /*
721 * Found some inodes, return them now and return the error next time.
722 */
723 if (ubelem)
724 rval = 0;
710 if (agno >= mp->m_sb.sb_agcount) { 725 if (agno >= mp->m_sb.sb_agcount) {
711 /* 726 /*
712 * If we ran out of filesystem, mark lastino as off 727 * If we ran out of filesystem, mark lastino as off