aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/futex.c176
1 files changed, 6 insertions, 170 deletions
diff --git a/kernel/futex.c b/kernel/futex.c
index 98092c9817f4..449def8074fe 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -104,10 +104,6 @@ struct futex_q {
104 /* Key which the futex is hashed on: */ 104 /* Key which the futex is hashed on: */
105 union futex_key key; 105 union futex_key key;
106 106
107 /* For fd, sigio sent using these: */
108 int fd;
109 struct file *filp;
110
111 /* Optional priority inheritance state: */ 107 /* Optional priority inheritance state: */
112 struct futex_pi_state *pi_state; 108 struct futex_pi_state *pi_state;
113 struct task_struct *task; 109 struct task_struct *task;
@@ -126,9 +122,6 @@ struct futex_hash_bucket {
126 122
127static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS]; 123static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
128 124
129/* Futex-fs vfsmount entry: */
130static struct vfsmount *futex_mnt;
131
132/* 125/*
133 * Take mm->mmap_sem, when futex is shared 126 * Take mm->mmap_sem, when futex is shared
134 */ 127 */
@@ -610,8 +603,6 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
610static void wake_futex(struct futex_q *q) 603static void wake_futex(struct futex_q *q)
611{ 604{
612 plist_del(&q->list, &q->list.plist); 605 plist_del(&q->list, &q->list.plist);
613 if (q->filp)
614 send_sigio(&q->filp->f_owner, q->fd, POLL_IN);
615 /* 606 /*
616 * The lock in wake_up_all() is a crucial memory barrier after the 607 * The lock in wake_up_all() is a crucial memory barrier after the
617 * plist_del() and also before assigning to q->lock_ptr. 608 * plist_del() and also before assigning to q->lock_ptr.
@@ -988,14 +979,10 @@ out:
988} 979}
989 980
990/* The key must be already stored in q->key. */ 981/* The key must be already stored in q->key. */
991static inline struct futex_hash_bucket * 982static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
992queue_lock(struct futex_q *q, int fd, struct file *filp)
993{ 983{
994 struct futex_hash_bucket *hb; 984 struct futex_hash_bucket *hb;
995 985
996 q->fd = fd;
997 q->filp = filp;
998
999 init_waitqueue_head(&q->waiters); 986 init_waitqueue_head(&q->waiters);
1000 987
1001 get_futex_key_refs(&q->key); 988 get_futex_key_refs(&q->key);
@@ -1006,7 +993,7 @@ queue_lock(struct futex_q *q, int fd, struct file *filp)
1006 return hb; 993 return hb;
1007} 994}
1008 995
1009static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb) 996static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1010{ 997{
1011 int prio; 998 int prio;
1012 999
@@ -1041,15 +1028,6 @@ queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
1041 * exactly once. They are called with the hashed spinlock held. 1028 * exactly once. They are called with the hashed spinlock held.
1042 */ 1029 */
1043 1030
1044/* The key must be already stored in q->key. */
1045static void queue_me(struct futex_q *q, int fd, struct file *filp)
1046{
1047 struct futex_hash_bucket *hb;
1048
1049 hb = queue_lock(q, fd, filp);
1050 __queue_me(q, hb);
1051}
1052
1053/* Return 1 if we were still queued (ie. 0 means we were woken) */ 1031/* Return 1 if we were still queued (ie. 0 means we were woken) */
1054static int unqueue_me(struct futex_q *q) 1032static int unqueue_me(struct futex_q *q)
1055{ 1033{
@@ -1194,7 +1172,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
1194 if (unlikely(ret != 0)) 1172 if (unlikely(ret != 0))
1195 goto out_release_sem; 1173 goto out_release_sem;
1196 1174
1197 hb = queue_lock(&q, -1, NULL); 1175 hb = queue_lock(&q);
1198 1176
1199 /* 1177 /*
1200 * Access the page AFTER the futex is queued. 1178 * Access the page AFTER the futex is queued.
@@ -1238,7 +1216,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
1238 goto out_unlock_release_sem; 1216 goto out_unlock_release_sem;
1239 1217
1240 /* Only actually queue if *uaddr contained val. */ 1218 /* Only actually queue if *uaddr contained val. */
1241 __queue_me(&q, hb); 1219 queue_me(&q, hb);
1242 1220
1243 /* 1221 /*
1244 * Now the futex is queued and we have checked the data, we 1222 * Now the futex is queued and we have checked the data, we
@@ -1386,7 +1364,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
1386 goto out_release_sem; 1364 goto out_release_sem;
1387 1365
1388 retry_unlocked: 1366 retry_unlocked:
1389 hb = queue_lock(&q, -1, NULL); 1367 hb = queue_lock(&q);
1390 1368
1391 retry_locked: 1369 retry_locked:
1392 ret = lock_taken = 0; 1370 ret = lock_taken = 0;
@@ -1499,7 +1477,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
1499 /* 1477 /*
1500 * Only actually queue now that the atomic ops are done: 1478 * Only actually queue now that the atomic ops are done:
1501 */ 1479 */
1502 __queue_me(&q, hb); 1480 queue_me(&q, hb);
1503 1481
1504 /* 1482 /*
1505 * Now the futex is queued and we have checked the data, we 1483 * Now the futex is queued and we have checked the data, we
@@ -1746,121 +1724,6 @@ pi_faulted:
1746 return ret; 1724 return ret;
1747} 1725}
1748 1726
1749static int futex_close(struct inode *inode, struct file *filp)
1750{
1751 struct futex_q *q = filp->private_data;
1752
1753 unqueue_me(q);
1754 kfree(q);
1755
1756 return 0;
1757}
1758
1759/* This is one-shot: once it's gone off you need a new fd */
1760static unsigned int futex_poll(struct file *filp,
1761 struct poll_table_struct *wait)
1762{
1763 struct futex_q *q = filp->private_data;
1764 int ret = 0;
1765
1766 poll_wait(filp, &q->waiters, wait);
1767
1768 /*
1769 * plist_node_empty() is safe here without any lock.
1770 * q->lock_ptr != 0 is not safe, because of ordering against wakeup.
1771 */
1772 if (plist_node_empty(&q->list))
1773 ret = POLLIN | POLLRDNORM;
1774
1775 return ret;
1776}
1777
1778static const struct file_operations futex_fops = {
1779 .release = futex_close,
1780 .poll = futex_poll,
1781};
1782
1783/*
1784 * Signal allows caller to avoid the race which would occur if they
1785 * set the sigio stuff up afterwards.
1786 */
1787static int futex_fd(u32 __user *uaddr, int signal)
1788{
1789 struct futex_q *q;
1790 struct file *filp;
1791 int ret, err;
1792 struct rw_semaphore *fshared;
1793 static unsigned long printk_interval;
1794
1795 if (printk_timed_ratelimit(&printk_interval, 60 * 60 * 1000)) {
1796 printk(KERN_WARNING "Process `%s' used FUTEX_FD, which "
1797 "will be removed from the kernel in June 2007\n",
1798 current->comm);
1799 }
1800
1801 ret = -EINVAL;
1802 if (!valid_signal(signal))
1803 goto out;
1804
1805 ret = get_unused_fd();
1806 if (ret < 0)
1807 goto out;
1808 filp = get_empty_filp();
1809 if (!filp) {
1810 put_unused_fd(ret);
1811 ret = -ENFILE;
1812 goto out;
1813 }
1814 filp->f_op = &futex_fops;
1815 filp->f_path.mnt = mntget(futex_mnt);
1816 filp->f_path.dentry = dget(futex_mnt->mnt_root);
1817 filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping;
1818
1819 if (signal) {
1820 err = __f_setown(filp, task_pid(current), PIDTYPE_PID, 1);
1821 if (err < 0) {
1822 goto error;
1823 }
1824 filp->f_owner.signum = signal;
1825 }
1826
1827 q = kmalloc(sizeof(*q), GFP_KERNEL);
1828 if (!q) {
1829 err = -ENOMEM;
1830 goto error;
1831 }
1832 q->pi_state = NULL;
1833
1834 fshared = &current->mm->mmap_sem;
1835 down_read(fshared);
1836 err = get_futex_key(uaddr, fshared, &q->key);
1837
1838 if (unlikely(err != 0)) {
1839 up_read(fshared);
1840 kfree(q);
1841 goto error;
1842 }
1843
1844 /*
1845 * queue_me() must be called before releasing mmap_sem, because
1846 * key->shared.inode needs to be referenced while holding it.
1847 */
1848 filp->private_data = q;
1849
1850 queue_me(q, ret, filp);
1851 up_read(fshared);
1852
1853 /* Now we map fd to filp, so userspace can access it */
1854 fd_install(ret, filp);
1855out:
1856 return ret;
1857error:
1858 put_unused_fd(ret);
1859 put_filp(filp);
1860 ret = err;
1861 goto out;
1862}
1863
1864/* 1727/*
1865 * Support for robust futexes: the kernel cleans up held futexes at 1728 * Support for robust futexes: the kernel cleans up held futexes at
1866 * thread exit time. 1729 * thread exit time.
@@ -2092,10 +1955,6 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
2092 case FUTEX_WAKE_BITSET: 1955 case FUTEX_WAKE_BITSET:
2093 ret = futex_wake(uaddr, fshared, val, val3); 1956 ret = futex_wake(uaddr, fshared, val, val3);
2094 break; 1957 break;
2095 case FUTEX_FD:
2096 /* non-zero val means F_SETOWN(getpid()) & F_SETSIG(val) */
2097 ret = futex_fd(uaddr, val);
2098 break;
2099 case FUTEX_REQUEUE: 1958 case FUTEX_REQUEUE:
2100 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL); 1959 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL);
2101 break; 1960 break;
@@ -2156,19 +2015,6 @@ asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
2156 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); 2015 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
2157} 2016}
2158 2017
2159static int futexfs_get_sb(struct file_system_type *fs_type,
2160 int flags, const char *dev_name, void *data,
2161 struct vfsmount *mnt)
2162{
2163 return get_sb_pseudo(fs_type, "futex", NULL, FUTEXFS_SUPER_MAGIC, mnt);
2164}
2165
2166static struct file_system_type futex_fs_type = {
2167 .name = "futexfs",
2168 .get_sb = futexfs_get_sb,
2169 .kill_sb = kill_anon_super,
2170};
2171
2172static int __init futex_init(void) 2018static int __init futex_init(void)
2173{ 2019{
2174 u32 curval; 2020 u32 curval;
@@ -2193,16 +2039,6 @@ static int __init futex_init(void)
2193 spin_lock_init(&futex_queues[i].lock); 2039 spin_lock_init(&futex_queues[i].lock);
2194 } 2040 }
2195 2041
2196 i = register_filesystem(&futex_fs_type);
2197 if (i)
2198 return i;
2199
2200 futex_mnt = kern_mount(&futex_fs_type);
2201 if (IS_ERR(futex_mnt)) {
2202 unregister_filesystem(&futex_fs_type);
2203 return PTR_ERR(futex_mnt);
2204 }
2205
2206 return 0; 2042 return 0;
2207} 2043}
2208__initcall(futex_init); 2044__initcall(futex_init);