aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-10-27 12:38:12 -0400
committerArnd Bergmann <arnd@arndb.de>2010-10-27 16:06:17 -0400
commitf7347ce4ee7c65415f84be915c018473e7076f31 (patch)
tree613ce14f088ad00bdbc77cdfb686a40a4851180f /fs
parentc5b1f0d92c36851aca09ac6c7c0c4f9690ac14f3 (diff)
fasync: re-organize fasync entry insertion to allow it under a spinlock
You currently cannot use "fasync_helper()" in an atomic environment to insert a new fasync entry, because it will need to allocate the new "struct fasync_struct". Yet fcntl_setlease() wants to call this under lock_flocks(), which is in the process of being converted from the BKL to a spinlock. In order to fix this, this abstracts out the actual fasync list insertion and the fasync allocations into functions of their own, and teaches fs/locks.c to pre-allocate the fasync_struct entry. That way the actual list insertion can happen while holding the required spinlock. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> [bfields@redhat.com: rebase on top of my changes to Arnd's patch] Tested-by: J. Bruce Fields <bfields@redhat.com> Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'fs')
-rw-r--r--fs/fcntl.c66
-rw-r--r--fs/locks.c18
2 files changed, 67 insertions, 17 deletions
diff --git a/fs/fcntl.c b/fs/fcntl.c
index f8cc34f542c3..dcdbc6f5c33b 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -640,7 +640,7 @@ static void fasync_free_rcu(struct rcu_head *head)
640 * match the state "is the filp on a fasync list". 640 * match the state "is the filp on a fasync list".
641 * 641 *
642 */ 642 */
643static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp) 643int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
644{ 644{
645 struct fasync_struct *fa, **fp; 645 struct fasync_struct *fa, **fp;
646 int result = 0; 646 int result = 0;
@@ -666,21 +666,28 @@ static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
666 return result; 666 return result;
667} 667}
668 668
669struct fasync_struct *fasync_alloc(void)
670{
671 return kmem_cache_alloc(fasync_cache, GFP_KERNEL);
672}
673
669/* 674/*
670 * Add a fasync entry. Return negative on error, positive if 675 * NOTE! This can be used only for unused fasync entries:
671 * added, and zero if did nothing but change an existing one. 676 * entries that actually got inserted on the fasync list
672 * 677 * need to be released by rcu - see fasync_remove_entry.
673 * NOTE! It is very important that the FASYNC flag always
674 * match the state "is the filp on a fasync list".
675 */ 678 */
676static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp) 679void fasync_free(struct fasync_struct *new)
677{ 680{
678 struct fasync_struct *new, *fa, **fp; 681 kmem_cache_free(fasync_cache, new);
679 int result = 0; 682}
680 683
681 new = kmem_cache_alloc(fasync_cache, GFP_KERNEL); 684/*
682 if (!new) 685 * Insert a new entry into the fasync list. Return the pointer to the
683 return -ENOMEM; 686 * old one if we didn't use the new one.
687 */
688struct fasync_struct *fasync_insert_entry(int fd, struct file *filp, struct fasync_struct **fapp, struct fasync_struct *new)
689{
690 struct fasync_struct *fa, **fp;
684 691
685 spin_lock(&filp->f_lock); 692 spin_lock(&filp->f_lock);
686 spin_lock(&fasync_lock); 693 spin_lock(&fasync_lock);
@@ -691,8 +698,6 @@ static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fa
691 spin_lock_irq(&fa->fa_lock); 698 spin_lock_irq(&fa->fa_lock);
692 fa->fa_fd = fd; 699 fa->fa_fd = fd;
693 spin_unlock_irq(&fa->fa_lock); 700 spin_unlock_irq(&fa->fa_lock);
694
695 kmem_cache_free(fasync_cache, new);
696 goto out; 701 goto out;
697 } 702 }
698 703
@@ -702,13 +707,42 @@ static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fa
702 new->fa_fd = fd; 707 new->fa_fd = fd;
703 new->fa_next = *fapp; 708 new->fa_next = *fapp;
704 rcu_assign_pointer(*fapp, new); 709 rcu_assign_pointer(*fapp, new);
705 result = 1;
706 filp->f_flags |= FASYNC; 710 filp->f_flags |= FASYNC;
707 711
708out: 712out:
709 spin_unlock(&fasync_lock); 713 spin_unlock(&fasync_lock);
710 spin_unlock(&filp->f_lock); 714 spin_unlock(&filp->f_lock);
711 return result; 715 return fa;
716}
717
718/*
719 * Add a fasync entry. Return negative on error, positive if
720 * added, and zero if did nothing but change an existing one.
721 *
722 * NOTE! It is very important that the FASYNC flag always
723 * match the state "is the filp on a fasync list".
724 */
725static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp)
726{
727 struct fasync_struct *new;
728
729 new = fasync_alloc();
730 if (!new)
731 return -ENOMEM;
732
733 /*
734 * fasync_insert_entry() returns the old (update) entry if
735 * it existed.
736 *
737 * So free the (unused) new entry and return 0 to let the
738 * caller know that we didn't add any new fasync entries.
739 */
740 if (fasync_insert_entry(fd, filp, fapp, new)) {
741 fasync_free(new);
742 return 0;
743 }
744
745 return 1;
712} 746}
713 747
714/* 748/*
diff --git a/fs/locks.c b/fs/locks.c
index 0391d2ff5a4e..85fd9ce1abae 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1505,6 +1505,7 @@ EXPORT_SYMBOL_GPL(vfs_setlease);
1505int fcntl_setlease(unsigned int fd, struct file *filp, long arg) 1505int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1506{ 1506{
1507 struct file_lock *fl; 1507 struct file_lock *fl;
1508 struct fasync_struct *new;
1508 struct inode *inode = filp->f_path.dentry->d_inode; 1509 struct inode *inode = filp->f_path.dentry->d_inode;
1509 int error; 1510 int error;
1510 1511
@@ -1512,12 +1513,25 @@ int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1512 if (IS_ERR(fl)) 1513 if (IS_ERR(fl))
1513 return PTR_ERR(fl); 1514 return PTR_ERR(fl);
1514 1515
1516 new = fasync_alloc();
1517 if (!new) {
1518 locks_free_lock(fl);
1519 return -ENOMEM;
1520 }
1515 lock_flocks(); 1521 lock_flocks();
1516 error = __vfs_setlease(filp, arg, &fl); 1522 error = __vfs_setlease(filp, arg, &fl);
1517 if (error || arg == F_UNLCK) 1523 if (error || arg == F_UNLCK)
1518 goto out_unlock; 1524 goto out_unlock;
1519 1525
1520 error = fasync_helper(fd, filp, 1, &fl->fl_fasync); 1526 /*
1527 * fasync_insert_entry() returns the old entry if any.
1528 * If there was no old entry, then it used 'new' and
1529 * inserted it into the fasync list. Clear new so that
1530 * we don't release it here.
1531 */
1532 if (!fasync_insert_entry(fd, filp, &fl->fl_fasync, new))
1533 new = NULL;
1534
1521 if (error < 0) { 1535 if (error < 0) {
1522 /* remove lease just inserted by setlease */ 1536 /* remove lease just inserted by setlease */
1523 fl->fl_type = F_UNLCK | F_INPROGRESS; 1537 fl->fl_type = F_UNLCK | F_INPROGRESS;
@@ -1529,6 +1543,8 @@ int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1529 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); 1543 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1530out_unlock: 1544out_unlock:
1531 unlock_flocks(); 1545 unlock_flocks();
1546 if (new)
1547 fasync_free(new);
1532 return error; 1548 return error;
1533} 1549}
1534 1550