aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/xfs/xfs_iget.c251
-rw-r--r--fs/xfs/xfs_inode.c250
2 files changed, 250 insertions, 251 deletions
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 069c5ceb9459..ea9a5fa49a48 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -453,254 +453,3 @@ out_error_or_again:
453 return error; 453 return error;
454} 454}
455 455
456/*
457 * This is a wrapper routine around the xfs_ilock() routine
458 * used to centralize some grungy code. It is used in places
459 * that wish to lock the inode solely for reading the extents.
460 * The reason these places can't just call xfs_ilock(SHARED)
461 * is that the inode lock also guards to bringing in of the
462 * extents from disk for a file in b-tree format. If the inode
463 * is in b-tree format, then we need to lock the inode exclusively
464 * until the extents are read in. Locking it exclusively all
465 * the time would limit our parallelism unnecessarily, though.
466 * What we do instead is check to see if the extents have been
467 * read in yet, and only lock the inode exclusively if they
468 * have not.
469 *
470 * The function returns a value which should be given to the
471 * corresponding xfs_iunlock_map_shared(). This value is
472 * the mode in which the lock was actually taken.
473 */
474uint
475xfs_ilock_map_shared(
476 xfs_inode_t *ip)
477{
478 uint lock_mode;
479
480 if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
481 ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
482 lock_mode = XFS_ILOCK_EXCL;
483 } else {
484 lock_mode = XFS_ILOCK_SHARED;
485 }
486
487 xfs_ilock(ip, lock_mode);
488
489 return lock_mode;
490}
491
492/*
493 * This is simply the unlock routine to go with xfs_ilock_map_shared().
494 * All it does is call xfs_iunlock() with the given lock_mode.
495 */
496void
497xfs_iunlock_map_shared(
498 xfs_inode_t *ip,
499 unsigned int lock_mode)
500{
501 xfs_iunlock(ip, lock_mode);
502}
503
504/*
505 * The xfs inode contains 2 locks: a multi-reader lock called the
506 * i_iolock and a multi-reader lock called the i_lock. This routine
507 * allows either or both of the locks to be obtained.
508 *
509 * The 2 locks should always be ordered so that the IO lock is
510 * obtained first in order to prevent deadlock.
511 *
512 * ip -- the inode being locked
513 * lock_flags -- this parameter indicates the inode's locks
514 * to be locked. It can be:
515 * XFS_IOLOCK_SHARED,
516 * XFS_IOLOCK_EXCL,
517 * XFS_ILOCK_SHARED,
518 * XFS_ILOCK_EXCL,
519 * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
520 * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
521 * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
522 * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
523 */
524void
525xfs_ilock(
526 xfs_inode_t *ip,
527 uint lock_flags)
528{
529 /*
530 * You can't set both SHARED and EXCL for the same lock,
531 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
532 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
533 */
534 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
535 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
536 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
537 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
538 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
539
540 if (lock_flags & XFS_IOLOCK_EXCL)
541 mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
542 else if (lock_flags & XFS_IOLOCK_SHARED)
543 mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
544
545 if (lock_flags & XFS_ILOCK_EXCL)
546 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
547 else if (lock_flags & XFS_ILOCK_SHARED)
548 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
549
550 trace_xfs_ilock(ip, lock_flags, _RET_IP_);
551}
552
553/*
554 * This is just like xfs_ilock(), except that the caller
555 * is guaranteed not to sleep. It returns 1 if it gets
556 * the requested locks and 0 otherwise. If the IO lock is
557 * obtained but the inode lock cannot be, then the IO lock
558 * is dropped before returning.
559 *
560 * ip -- the inode being locked
561 * lock_flags -- this parameter indicates the inode's locks to be
562 * to be locked. See the comment for xfs_ilock() for a list
563 * of valid values.
564 */
565int
566xfs_ilock_nowait(
567 xfs_inode_t *ip,
568 uint lock_flags)
569{
570 /*
571 * You can't set both SHARED and EXCL for the same lock,
572 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
573 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
574 */
575 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
576 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
577 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
578 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
579 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
580
581 if (lock_flags & XFS_IOLOCK_EXCL) {
582 if (!mrtryupdate(&ip->i_iolock))
583 goto out;
584 } else if (lock_flags & XFS_IOLOCK_SHARED) {
585 if (!mrtryaccess(&ip->i_iolock))
586 goto out;
587 }
588 if (lock_flags & XFS_ILOCK_EXCL) {
589 if (!mrtryupdate(&ip->i_lock))
590 goto out_undo_iolock;
591 } else if (lock_flags & XFS_ILOCK_SHARED) {
592 if (!mrtryaccess(&ip->i_lock))
593 goto out_undo_iolock;
594 }
595 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
596 return 1;
597
598 out_undo_iolock:
599 if (lock_flags & XFS_IOLOCK_EXCL)
600 mrunlock_excl(&ip->i_iolock);
601 else if (lock_flags & XFS_IOLOCK_SHARED)
602 mrunlock_shared(&ip->i_iolock);
603 out:
604 return 0;
605}
606
607/*
608 * xfs_iunlock() is used to drop the inode locks acquired with
609 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
610 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
611 * that we know which locks to drop.
612 *
613 * ip -- the inode being unlocked
614 * lock_flags -- this parameter indicates the inode's locks to be
615 * to be unlocked. See the comment for xfs_ilock() for a list
616 * of valid values for this parameter.
617 *
618 */
619void
620xfs_iunlock(
621 xfs_inode_t *ip,
622 uint lock_flags)
623{
624 /*
625 * You can't set both SHARED and EXCL for the same lock,
626 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
627 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
628 */
629 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
630 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
631 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
632 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
633 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
634 ASSERT(lock_flags != 0);
635
636 if (lock_flags & XFS_IOLOCK_EXCL)
637 mrunlock_excl(&ip->i_iolock);
638 else if (lock_flags & XFS_IOLOCK_SHARED)
639 mrunlock_shared(&ip->i_iolock);
640
641 if (lock_flags & XFS_ILOCK_EXCL)
642 mrunlock_excl(&ip->i_lock);
643 else if (lock_flags & XFS_ILOCK_SHARED)
644 mrunlock_shared(&ip->i_lock);
645
646 trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
647}
648
649/*
650 * give up write locks. the i/o lock cannot be held nested
651 * if it is being demoted.
652 */
653void
654xfs_ilock_demote(
655 xfs_inode_t *ip,
656 uint lock_flags)
657{
658 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
659 ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
660
661 if (lock_flags & XFS_ILOCK_EXCL)
662 mrdemote(&ip->i_lock);
663 if (lock_flags & XFS_IOLOCK_EXCL)
664 mrdemote(&ip->i_iolock);
665
666 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
667}
668
669#ifdef DEBUG
670int
671xfs_isilocked(
672 xfs_inode_t *ip,
673 uint lock_flags)
674{
675 if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
676 if (!(lock_flags & XFS_ILOCK_SHARED))
677 return !!ip->i_lock.mr_writer;
678 return rwsem_is_locked(&ip->i_lock.mr_lock);
679 }
680
681 if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
682 if (!(lock_flags & XFS_IOLOCK_SHARED))
683 return !!ip->i_iolock.mr_writer;
684 return rwsem_is_locked(&ip->i_iolock.mr_lock);
685 }
686
687 ASSERT(0);
688 return 0;
689}
690#endif
691
692void
693__xfs_iflock(
694 struct xfs_inode *ip)
695{
696 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
697 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
698
699 do {
700 prepare_to_wait_exclusive(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
701 if (xfs_isiflocked(ip))
702 io_schedule();
703 } while (!xfs_iflock_nowait(ip));
704
705 finish_wait(wq, &wait.wait);
706}
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 2778258fcfa2..ba404e4b9f0c 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -74,6 +74,256 @@ xfs_get_extsz_hint(
74 return 0; 74 return 0;
75} 75}
76 76
77/*
78 * This is a wrapper routine around the xfs_ilock() routine used to centralize
79 * some grungy code. It is used in places that wish to lock the inode solely
80 * for reading the extents. The reason these places can't just call
81 * xfs_ilock(SHARED) is that the inode lock also guards to bringing in of the
82 * extents from disk for a file in b-tree format. If the inode is in b-tree
83 * format, then we need to lock the inode exclusively until the extents are read
84 * in. Locking it exclusively all the time would limit our parallelism
85 * unnecessarily, though. What we do instead is check to see if the extents
86 * have been read in yet, and only lock the inode exclusively if they have not.
87 *
88 * The function returns a value which should be given to the corresponding
89 * xfs_iunlock_map_shared(). This value is the mode in which the lock was
90 * actually taken.
91 */
92uint
93xfs_ilock_map_shared(
94 xfs_inode_t *ip)
95{
96 uint lock_mode;
97
98 if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
99 ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
100 lock_mode = XFS_ILOCK_EXCL;
101 } else {
102 lock_mode = XFS_ILOCK_SHARED;
103 }
104
105 xfs_ilock(ip, lock_mode);
106
107 return lock_mode;
108}
109
110/*
111 * This is simply the unlock routine to go with xfs_ilock_map_shared().
112 * All it does is call xfs_iunlock() with the given lock_mode.
113 */
114void
115xfs_iunlock_map_shared(
116 xfs_inode_t *ip,
117 unsigned int lock_mode)
118{
119 xfs_iunlock(ip, lock_mode);
120}
121
122/*
123 * The xfs inode contains 2 locks: a multi-reader lock called the
124 * i_iolock and a multi-reader lock called the i_lock. This routine
125 * allows either or both of the locks to be obtained.
126 *
127 * The 2 locks should always be ordered so that the IO lock is
128 * obtained first in order to prevent deadlock.
129 *
130 * ip -- the inode being locked
131 * lock_flags -- this parameter indicates the inode's locks
132 * to be locked. It can be:
133 * XFS_IOLOCK_SHARED,
134 * XFS_IOLOCK_EXCL,
135 * XFS_ILOCK_SHARED,
136 * XFS_ILOCK_EXCL,
137 * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
138 * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
139 * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
140 * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
141 */
142void
143xfs_ilock(
144 xfs_inode_t *ip,
145 uint lock_flags)
146{
147 trace_xfs_ilock(ip, lock_flags, _RET_IP_);
148
149 /*
150 * You can't set both SHARED and EXCL for the same lock,
151 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
152 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
153 */
154 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
155 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
156 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
157 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
158 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
159
160 if (lock_flags & XFS_IOLOCK_EXCL)
161 mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
162 else if (lock_flags & XFS_IOLOCK_SHARED)
163 mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
164
165 if (lock_flags & XFS_ILOCK_EXCL)
166 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
167 else if (lock_flags & XFS_ILOCK_SHARED)
168 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
169}
170
171/*
172 * This is just like xfs_ilock(), except that the caller
173 * is guaranteed not to sleep. It returns 1 if it gets
174 * the requested locks and 0 otherwise. If the IO lock is
175 * obtained but the inode lock cannot be, then the IO lock
176 * is dropped before returning.
177 *
178 * ip -- the inode being locked
179 * lock_flags -- this parameter indicates the inode's locks to be
180 * to be locked. See the comment for xfs_ilock() for a list
181 * of valid values.
182 */
183int
184xfs_ilock_nowait(
185 xfs_inode_t *ip,
186 uint lock_flags)
187{
188 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
189
190 /*
191 * You can't set both SHARED and EXCL for the same lock,
192 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
193 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
194 */
195 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
196 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
197 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
198 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
199 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
200
201 if (lock_flags & XFS_IOLOCK_EXCL) {
202 if (!mrtryupdate(&ip->i_iolock))
203 goto out;
204 } else if (lock_flags & XFS_IOLOCK_SHARED) {
205 if (!mrtryaccess(&ip->i_iolock))
206 goto out;
207 }
208 if (lock_flags & XFS_ILOCK_EXCL) {
209 if (!mrtryupdate(&ip->i_lock))
210 goto out_undo_iolock;
211 } else if (lock_flags & XFS_ILOCK_SHARED) {
212 if (!mrtryaccess(&ip->i_lock))
213 goto out_undo_iolock;
214 }
215 return 1;
216
217 out_undo_iolock:
218 if (lock_flags & XFS_IOLOCK_EXCL)
219 mrunlock_excl(&ip->i_iolock);
220 else if (lock_flags & XFS_IOLOCK_SHARED)
221 mrunlock_shared(&ip->i_iolock);
222 out:
223 return 0;
224}
225
226/*
227 * xfs_iunlock() is used to drop the inode locks acquired with
228 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
229 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
230 * that we know which locks to drop.
231 *
232 * ip -- the inode being unlocked
233 * lock_flags -- this parameter indicates the inode's locks to be
234 * to be unlocked. See the comment for xfs_ilock() for a list
235 * of valid values for this parameter.
236 *
237 */
238void
239xfs_iunlock(
240 xfs_inode_t *ip,
241 uint lock_flags)
242{
243 /*
244 * You can't set both SHARED and EXCL for the same lock,
245 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
246 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
247 */
248 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
249 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
250 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
251 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
252 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
253 ASSERT(lock_flags != 0);
254
255 if (lock_flags & XFS_IOLOCK_EXCL)
256 mrunlock_excl(&ip->i_iolock);
257 else if (lock_flags & XFS_IOLOCK_SHARED)
258 mrunlock_shared(&ip->i_iolock);
259
260 if (lock_flags & XFS_ILOCK_EXCL)
261 mrunlock_excl(&ip->i_lock);
262 else if (lock_flags & XFS_ILOCK_SHARED)
263 mrunlock_shared(&ip->i_lock);
264
265 trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
266}
267
268/*
269 * give up write locks. the i/o lock cannot be held nested
270 * if it is being demoted.
271 */
272void
273xfs_ilock_demote(
274 xfs_inode_t *ip,
275 uint lock_flags)
276{
277 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
278 ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
279
280 if (lock_flags & XFS_ILOCK_EXCL)
281 mrdemote(&ip->i_lock);
282 if (lock_flags & XFS_IOLOCK_EXCL)
283 mrdemote(&ip->i_iolock);
284
285 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
286}
287
288#ifdef DEBUG
289int
290xfs_isilocked(
291 xfs_inode_t *ip,
292 uint lock_flags)
293{
294 if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
295 if (!(lock_flags & XFS_ILOCK_SHARED))
296 return !!ip->i_lock.mr_writer;
297 return rwsem_is_locked(&ip->i_lock.mr_lock);
298 }
299
300 if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
301 if (!(lock_flags & XFS_IOLOCK_SHARED))
302 return !!ip->i_iolock.mr_writer;
303 return rwsem_is_locked(&ip->i_iolock.mr_lock);
304 }
305
306 ASSERT(0);
307 return 0;
308}
309#endif
310
311void
312__xfs_iflock(
313 struct xfs_inode *ip)
314{
315 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
316 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
317
318 do {
319 prepare_to_wait_exclusive(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
320 if (xfs_isiflocked(ip))
321 io_schedule();
322 } while (!xfs_iflock_nowait(ip));
323
324 finish_wait(wq, &wait.wait);
325}
326
77#ifdef DEBUG 327#ifdef DEBUG
78/* 328/*
79 * Make sure that the extents in the given memory buffer 329 * Make sure that the extents in the given memory buffer