diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/futex.c | 235 | ||||
-rw-r--r-- | kernel/hw_breakpoint.c | 2 | ||||
-rw-r--r-- | kernel/kprobes.c | 565 | ||||
-rw-r--r-- | kernel/perf_event.c | 573 | ||||
-rw-r--r-- | kernel/power/suspend.c | 3 | ||||
-rw-r--r-- | kernel/rcutiny.c | 105 | ||||
-rw-r--r-- | kernel/rcutiny_plugin.h | 433 | ||||
-rw-r--r-- | kernel/rcutorture.c | 270 | ||||
-rw-r--r-- | kernel/rcutree.c | 156 | ||||
-rw-r--r-- | kernel/rcutree.h | 61 | ||||
-rw-r--r-- | kernel/rcutree_plugin.h | 135 | ||||
-rw-r--r-- | kernel/rcutree_trace.c | 12 | ||||
-rw-r--r-- | kernel/sched.c | 71 | ||||
-rw-r--r-- | kernel/srcu.c | 8 | ||||
-rw-r--r-- | kernel/sysctl.c | 16 | ||||
-rw-r--r-- | kernel/sysctl_binary.c | 1 | ||||
-rw-r--r-- | kernel/trace/Kconfig | 15 | ||||
-rw-r--r-- | kernel/trace/power-traces.c | 5 | ||||
-rw-r--r-- | kernel/trace/trace_event_perf.c | 31 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 6 | ||||
-rw-r--r-- | kernel/trace/trace_export.c | 14 | ||||
-rw-r--r-- | kernel/watchdog.c | 9 |
22 files changed, 2000 insertions, 726 deletions
diff --git a/kernel/futex.c b/kernel/futex.c index 40a8777a27d..3019b92e691 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -69,6 +69,14 @@ int __read_mostly futex_cmpxchg_enabled; | |||
69 | #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8) | 69 | #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8) |
70 | 70 | ||
71 | /* | 71 | /* |
72 | * Futex flags used to encode options to functions and preserve them across | ||
73 | * restarts. | ||
74 | */ | ||
75 | #define FLAGS_SHARED 0x01 | ||
76 | #define FLAGS_CLOCKRT 0x02 | ||
77 | #define FLAGS_HAS_TIMEOUT 0x04 | ||
78 | |||
79 | /* | ||
72 | * Priority Inheritance state: | 80 | * Priority Inheritance state: |
73 | */ | 81 | */ |
74 | struct futex_pi_state { | 82 | struct futex_pi_state { |
@@ -123,6 +131,12 @@ struct futex_q { | |||
123 | u32 bitset; | 131 | u32 bitset; |
124 | }; | 132 | }; |
125 | 133 | ||
134 | static const struct futex_q futex_q_init = { | ||
135 | /* list gets initialized in queue_me()*/ | ||
136 | .key = FUTEX_KEY_INIT, | ||
137 | .bitset = FUTEX_BITSET_MATCH_ANY | ||
138 | }; | ||
139 | |||
126 | /* | 140 | /* |
127 | * Hash buckets are shared by all the futex_keys that hash to the same | 141 | * Hash buckets are shared by all the futex_keys that hash to the same |
128 | * location. Each key may have multiple futex_q structures, one for each task | 142 | * location. Each key may have multiple futex_q structures, one for each task |
@@ -283,8 +297,7 @@ again: | |||
283 | return 0; | 297 | return 0; |
284 | } | 298 | } |
285 | 299 | ||
286 | static inline | 300 | static inline void put_futex_key(union futex_key *key) |
287 | void put_futex_key(int fshared, union futex_key *key) | ||
288 | { | 301 | { |
289 | drop_futex_key_refs(key); | 302 | drop_futex_key_refs(key); |
290 | } | 303 | } |
@@ -870,7 +883,8 @@ double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) | |||
870 | /* | 883 | /* |
871 | * Wake up waiters matching bitset queued on this futex (uaddr). | 884 | * Wake up waiters matching bitset queued on this futex (uaddr). |
872 | */ | 885 | */ |
873 | static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset) | 886 | static int |
887 | futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset) | ||
874 | { | 888 | { |
875 | struct futex_hash_bucket *hb; | 889 | struct futex_hash_bucket *hb; |
876 | struct futex_q *this, *next; | 890 | struct futex_q *this, *next; |
@@ -881,7 +895,7 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset) | |||
881 | if (!bitset) | 895 | if (!bitset) |
882 | return -EINVAL; | 896 | return -EINVAL; |
883 | 897 | ||
884 | ret = get_futex_key(uaddr, fshared, &key); | 898 | ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key); |
885 | if (unlikely(ret != 0)) | 899 | if (unlikely(ret != 0)) |
886 | goto out; | 900 | goto out; |
887 | 901 | ||
@@ -907,7 +921,7 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset) | |||
907 | } | 921 | } |
908 | 922 | ||
909 | spin_unlock(&hb->lock); | 923 | spin_unlock(&hb->lock); |
910 | put_futex_key(fshared, &key); | 924 | put_futex_key(&key); |
911 | out: | 925 | out: |
912 | return ret; | 926 | return ret; |
913 | } | 927 | } |
@@ -917,7 +931,7 @@ out: | |||
917 | * to this virtual address: | 931 | * to this virtual address: |
918 | */ | 932 | */ |
919 | static int | 933 | static int |
920 | futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, | 934 | futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2, |
921 | int nr_wake, int nr_wake2, int op) | 935 | int nr_wake, int nr_wake2, int op) |
922 | { | 936 | { |
923 | union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; | 937 | union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; |
@@ -927,10 +941,10 @@ futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, | |||
927 | int ret, op_ret; | 941 | int ret, op_ret; |
928 | 942 | ||
929 | retry: | 943 | retry: |
930 | ret = get_futex_key(uaddr1, fshared, &key1); | 944 | ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1); |
931 | if (unlikely(ret != 0)) | 945 | if (unlikely(ret != 0)) |
932 | goto out; | 946 | goto out; |
933 | ret = get_futex_key(uaddr2, fshared, &key2); | 947 | ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2); |
934 | if (unlikely(ret != 0)) | 948 | if (unlikely(ret != 0)) |
935 | goto out_put_key1; | 949 | goto out_put_key1; |
936 | 950 | ||
@@ -962,11 +976,11 @@ retry_private: | |||
962 | if (ret) | 976 | if (ret) |
963 | goto out_put_keys; | 977 | goto out_put_keys; |
964 | 978 | ||
965 | if (!fshared) | 979 | if (!(flags & FLAGS_SHARED)) |
966 | goto retry_private; | 980 | goto retry_private; |
967 | 981 | ||
968 | put_futex_key(fshared, &key2); | 982 | put_futex_key(&key2); |
969 | put_futex_key(fshared, &key1); | 983 | put_futex_key(&key1); |
970 | goto retry; | 984 | goto retry; |
971 | } | 985 | } |
972 | 986 | ||
@@ -996,9 +1010,9 @@ retry_private: | |||
996 | 1010 | ||
997 | double_unlock_hb(hb1, hb2); | 1011 | double_unlock_hb(hb1, hb2); |
998 | out_put_keys: | 1012 | out_put_keys: |
999 | put_futex_key(fshared, &key2); | 1013 | put_futex_key(&key2); |
1000 | out_put_key1: | 1014 | out_put_key1: |
1001 | put_futex_key(fshared, &key1); | 1015 | put_futex_key(&key1); |
1002 | out: | 1016 | out: |
1003 | return ret; | 1017 | return ret; |
1004 | } | 1018 | } |
@@ -1133,13 +1147,13 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex, | |||
1133 | /** | 1147 | /** |
1134 | * futex_requeue() - Requeue waiters from uaddr1 to uaddr2 | 1148 | * futex_requeue() - Requeue waiters from uaddr1 to uaddr2 |
1135 | * @uaddr1: source futex user address | 1149 | * @uaddr1: source futex user address |
1136 | * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED | 1150 | * @flags: futex flags (FLAGS_SHARED, etc.) |
1137 | * @uaddr2: target futex user address | 1151 | * @uaddr2: target futex user address |
1138 | * @nr_wake: number of waiters to wake (must be 1 for requeue_pi) | 1152 | * @nr_wake: number of waiters to wake (must be 1 for requeue_pi) |
1139 | * @nr_requeue: number of waiters to requeue (0-INT_MAX) | 1153 | * @nr_requeue: number of waiters to requeue (0-INT_MAX) |
1140 | * @cmpval: @uaddr1 expected value (or %NULL) | 1154 | * @cmpval: @uaddr1 expected value (or %NULL) |
1141 | * @requeue_pi: if we are attempting to requeue from a non-pi futex to a | 1155 | * @requeue_pi: if we are attempting to requeue from a non-pi futex to a |
1142 | * pi futex (pi to pi requeue is not supported) | 1156 | * pi futex (pi to pi requeue is not supported) |
1143 | * | 1157 | * |
1144 | * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire | 1158 | * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire |
1145 | * uaddr2 atomically on behalf of the top waiter. | 1159 | * uaddr2 atomically on behalf of the top waiter. |
@@ -1148,9 +1162,9 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex, | |||
1148 | * >=0 - on success, the number of tasks requeued or woken | 1162 | * >=0 - on success, the number of tasks requeued or woken |
1149 | * <0 - on error | 1163 | * <0 - on error |
1150 | */ | 1164 | */ |
1151 | static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, | 1165 | static int futex_requeue(u32 __user *uaddr1, unsigned int flags, |
1152 | int nr_wake, int nr_requeue, u32 *cmpval, | 1166 | u32 __user *uaddr2, int nr_wake, int nr_requeue, |
1153 | int requeue_pi) | 1167 | u32 *cmpval, int requeue_pi) |
1154 | { | 1168 | { |
1155 | union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; | 1169 | union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; |
1156 | int drop_count = 0, task_count = 0, ret; | 1170 | int drop_count = 0, task_count = 0, ret; |
@@ -1191,10 +1205,10 @@ retry: | |||
1191 | pi_state = NULL; | 1205 | pi_state = NULL; |
1192 | } | 1206 | } |
1193 | 1207 | ||
1194 | ret = get_futex_key(uaddr1, fshared, &key1); | 1208 | ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1); |
1195 | if (unlikely(ret != 0)) | 1209 | if (unlikely(ret != 0)) |
1196 | goto out; | 1210 | goto out; |
1197 | ret = get_futex_key(uaddr2, fshared, &key2); | 1211 | ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2); |
1198 | if (unlikely(ret != 0)) | 1212 | if (unlikely(ret != 0)) |
1199 | goto out_put_key1; | 1213 | goto out_put_key1; |
1200 | 1214 | ||
@@ -1216,11 +1230,11 @@ retry_private: | |||
1216 | if (ret) | 1230 | if (ret) |
1217 | goto out_put_keys; | 1231 | goto out_put_keys; |
1218 | 1232 | ||
1219 | if (!fshared) | 1233 | if (!(flags & FLAGS_SHARED)) |
1220 | goto retry_private; | 1234 | goto retry_private; |
1221 | 1235 | ||
1222 | put_futex_key(fshared, &key2); | 1236 | put_futex_key(&key2); |
1223 | put_futex_key(fshared, &key1); | 1237 | put_futex_key(&key1); |
1224 | goto retry; | 1238 | goto retry; |
1225 | } | 1239 | } |
1226 | if (curval != *cmpval) { | 1240 | if (curval != *cmpval) { |
@@ -1260,8 +1274,8 @@ retry_private: | |||
1260 | break; | 1274 | break; |
1261 | case -EFAULT: | 1275 | case -EFAULT: |
1262 | double_unlock_hb(hb1, hb2); | 1276 | double_unlock_hb(hb1, hb2); |
1263 | put_futex_key(fshared, &key2); | 1277 | put_futex_key(&key2); |
1264 | put_futex_key(fshared, &key1); | 1278 | put_futex_key(&key1); |
1265 | ret = fault_in_user_writeable(uaddr2); | 1279 | ret = fault_in_user_writeable(uaddr2); |
1266 | if (!ret) | 1280 | if (!ret) |
1267 | goto retry; | 1281 | goto retry; |
@@ -1269,8 +1283,8 @@ retry_private: | |||
1269 | case -EAGAIN: | 1283 | case -EAGAIN: |
1270 | /* The owner was exiting, try again. */ | 1284 | /* The owner was exiting, try again. */ |
1271 | double_unlock_hb(hb1, hb2); | 1285 | double_unlock_hb(hb1, hb2); |
1272 | put_futex_key(fshared, &key2); | 1286 | put_futex_key(&key2); |
1273 | put_futex_key(fshared, &key1); | 1287 | put_futex_key(&key1); |
1274 | cond_resched(); | 1288 | cond_resched(); |
1275 | goto retry; | 1289 | goto retry; |
1276 | default: | 1290 | default: |
@@ -1352,9 +1366,9 @@ out_unlock: | |||
1352 | drop_futex_key_refs(&key1); | 1366 | drop_futex_key_refs(&key1); |
1353 | 1367 | ||
1354 | out_put_keys: | 1368 | out_put_keys: |
1355 | put_futex_key(fshared, &key2); | 1369 | put_futex_key(&key2); |
1356 | out_put_key1: | 1370 | out_put_key1: |
1357 | put_futex_key(fshared, &key1); | 1371 | put_futex_key(&key1); |
1358 | out: | 1372 | out: |
1359 | if (pi_state != NULL) | 1373 | if (pi_state != NULL) |
1360 | free_pi_state(pi_state); | 1374 | free_pi_state(pi_state); |
@@ -1494,7 +1508,7 @@ static void unqueue_me_pi(struct futex_q *q) | |||
1494 | * private futexes. | 1508 | * private futexes. |
1495 | */ | 1509 | */ |
1496 | static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, | 1510 | static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, |
1497 | struct task_struct *newowner, int fshared) | 1511 | struct task_struct *newowner) |
1498 | { | 1512 | { |
1499 | u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; | 1513 | u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; |
1500 | struct futex_pi_state *pi_state = q->pi_state; | 1514 | struct futex_pi_state *pi_state = q->pi_state; |
@@ -1587,20 +1601,11 @@ handle_fault: | |||
1587 | goto retry; | 1601 | goto retry; |
1588 | } | 1602 | } |
1589 | 1603 | ||
1590 | /* | ||
1591 | * In case we must use restart_block to restart a futex_wait, | ||
1592 | * we encode in the 'flags' shared capability | ||
1593 | */ | ||
1594 | #define FLAGS_SHARED 0x01 | ||
1595 | #define FLAGS_CLOCKRT 0x02 | ||
1596 | #define FLAGS_HAS_TIMEOUT 0x04 | ||
1597 | |||
1598 | static long futex_wait_restart(struct restart_block *restart); | 1604 | static long futex_wait_restart(struct restart_block *restart); |
1599 | 1605 | ||
1600 | /** | 1606 | /** |
1601 | * fixup_owner() - Post lock pi_state and corner case management | 1607 | * fixup_owner() - Post lock pi_state and corner case management |
1602 | * @uaddr: user address of the futex | 1608 | * @uaddr: user address of the futex |
1603 | * @fshared: whether the futex is shared (1) or not (0) | ||
1604 | * @q: futex_q (contains pi_state and access to the rt_mutex) | 1609 | * @q: futex_q (contains pi_state and access to the rt_mutex) |
1605 | * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0) | 1610 | * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0) |
1606 | * | 1611 | * |
@@ -1613,8 +1618,7 @@ static long futex_wait_restart(struct restart_block *restart); | |||
1613 | * 0 - success, lock not taken | 1618 | * 0 - success, lock not taken |
1614 | * <0 - on error (-EFAULT) | 1619 | * <0 - on error (-EFAULT) |
1615 | */ | 1620 | */ |
1616 | static int fixup_owner(u32 __user *uaddr, int fshared, struct futex_q *q, | 1621 | static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) |
1617 | int locked) | ||
1618 | { | 1622 | { |
1619 | struct task_struct *owner; | 1623 | struct task_struct *owner; |
1620 | int ret = 0; | 1624 | int ret = 0; |
@@ -1625,7 +1629,7 @@ static int fixup_owner(u32 __user *uaddr, int fshared, struct futex_q *q, | |||
1625 | * did a lock-steal - fix up the PI-state in that case: | 1629 | * did a lock-steal - fix up the PI-state in that case: |
1626 | */ | 1630 | */ |
1627 | if (q->pi_state->owner != current) | 1631 | if (q->pi_state->owner != current) |
1628 | ret = fixup_pi_state_owner(uaddr, q, current, fshared); | 1632 | ret = fixup_pi_state_owner(uaddr, q, current); |
1629 | goto out; | 1633 | goto out; |
1630 | } | 1634 | } |
1631 | 1635 | ||
@@ -1652,7 +1656,7 @@ static int fixup_owner(u32 __user *uaddr, int fshared, struct futex_q *q, | |||
1652 | * lock. Fix the state up. | 1656 | * lock. Fix the state up. |
1653 | */ | 1657 | */ |
1654 | owner = rt_mutex_owner(&q->pi_state->pi_mutex); | 1658 | owner = rt_mutex_owner(&q->pi_state->pi_mutex); |
1655 | ret = fixup_pi_state_owner(uaddr, q, owner, fshared); | 1659 | ret = fixup_pi_state_owner(uaddr, q, owner); |
1656 | goto out; | 1660 | goto out; |
1657 | } | 1661 | } |
1658 | 1662 | ||
@@ -1715,7 +1719,7 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, | |||
1715 | * futex_wait_setup() - Prepare to wait on a futex | 1719 | * futex_wait_setup() - Prepare to wait on a futex |
1716 | * @uaddr: the futex userspace address | 1720 | * @uaddr: the futex userspace address |
1717 | * @val: the expected value | 1721 | * @val: the expected value |
1718 | * @fshared: whether the futex is shared (1) or not (0) | 1722 | * @flags: futex flags (FLAGS_SHARED, etc.) |
1719 | * @q: the associated futex_q | 1723 | * @q: the associated futex_q |
1720 | * @hb: storage for hash_bucket pointer to be returned to caller | 1724 | * @hb: storage for hash_bucket pointer to be returned to caller |
1721 | * | 1725 | * |
@@ -1728,7 +1732,7 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, | |||
1728 | * 0 - uaddr contains val and hb has been locked | 1732 | * 0 - uaddr contains val and hb has been locked |
1729 | * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlcoked | 1733 | * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlcoked |
1730 | */ | 1734 | */ |
1731 | static int futex_wait_setup(u32 __user *uaddr, u32 val, int fshared, | 1735 | static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags, |
1732 | struct futex_q *q, struct futex_hash_bucket **hb) | 1736 | struct futex_q *q, struct futex_hash_bucket **hb) |
1733 | { | 1737 | { |
1734 | u32 uval; | 1738 | u32 uval; |
@@ -1752,8 +1756,7 @@ static int futex_wait_setup(u32 __user *uaddr, u32 val, int fshared, | |||
1752 | * rare, but normal. | 1756 | * rare, but normal. |
1753 | */ | 1757 | */ |
1754 | retry: | 1758 | retry: |
1755 | q->key = FUTEX_KEY_INIT; | 1759 | ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key); |
1756 | ret = get_futex_key(uaddr, fshared, &q->key); | ||
1757 | if (unlikely(ret != 0)) | 1760 | if (unlikely(ret != 0)) |
1758 | return ret; | 1761 | return ret; |
1759 | 1762 | ||
@@ -1769,10 +1772,10 @@ retry_private: | |||
1769 | if (ret) | 1772 | if (ret) |
1770 | goto out; | 1773 | goto out; |
1771 | 1774 | ||
1772 | if (!fshared) | 1775 | if (!(flags & FLAGS_SHARED)) |
1773 | goto retry_private; | 1776 | goto retry_private; |
1774 | 1777 | ||
1775 | put_futex_key(fshared, &q->key); | 1778 | put_futex_key(&q->key); |
1776 | goto retry; | 1779 | goto retry; |
1777 | } | 1780 | } |
1778 | 1781 | ||
@@ -1783,32 +1786,29 @@ retry_private: | |||
1783 | 1786 | ||
1784 | out: | 1787 | out: |
1785 | if (ret) | 1788 | if (ret) |
1786 | put_futex_key(fshared, &q->key); | 1789 | put_futex_key(&q->key); |
1787 | return ret; | 1790 | return ret; |
1788 | } | 1791 | } |
1789 | 1792 | ||
1790 | static int futex_wait(u32 __user *uaddr, int fshared, | 1793 | static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val, |
1791 | u32 val, ktime_t *abs_time, u32 bitset, int clockrt) | 1794 | ktime_t *abs_time, u32 bitset) |
1792 | { | 1795 | { |
1793 | struct hrtimer_sleeper timeout, *to = NULL; | 1796 | struct hrtimer_sleeper timeout, *to = NULL; |
1794 | struct restart_block *restart; | 1797 | struct restart_block *restart; |
1795 | struct futex_hash_bucket *hb; | 1798 | struct futex_hash_bucket *hb; |
1796 | struct futex_q q; | 1799 | struct futex_q q = futex_q_init; |
1797 | int ret; | 1800 | int ret; |
1798 | 1801 | ||
1799 | if (!bitset) | 1802 | if (!bitset) |
1800 | return -EINVAL; | 1803 | return -EINVAL; |
1801 | |||
1802 | q.pi_state = NULL; | ||
1803 | q.bitset = bitset; | 1804 | q.bitset = bitset; |
1804 | q.rt_waiter = NULL; | ||
1805 | q.requeue_pi_key = NULL; | ||
1806 | 1805 | ||
1807 | if (abs_time) { | 1806 | if (abs_time) { |
1808 | to = &timeout; | 1807 | to = &timeout; |
1809 | 1808 | ||
1810 | hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME : | 1809 | hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ? |
1811 | CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | 1810 | CLOCK_REALTIME : CLOCK_MONOTONIC, |
1811 | HRTIMER_MODE_ABS); | ||
1812 | hrtimer_init_sleeper(to, current); | 1812 | hrtimer_init_sleeper(to, current); |
1813 | hrtimer_set_expires_range_ns(&to->timer, *abs_time, | 1813 | hrtimer_set_expires_range_ns(&to->timer, *abs_time, |
1814 | current->timer_slack_ns); | 1814 | current->timer_slack_ns); |
@@ -1819,7 +1819,7 @@ retry: | |||
1819 | * Prepare to wait on uaddr. On success, holds hb lock and increments | 1819 | * Prepare to wait on uaddr. On success, holds hb lock and increments |
1820 | * q.key refs. | 1820 | * q.key refs. |
1821 | */ | 1821 | */ |
1822 | ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); | 1822 | ret = futex_wait_setup(uaddr, val, flags, &q, &hb); |
1823 | if (ret) | 1823 | if (ret) |
1824 | goto out; | 1824 | goto out; |
1825 | 1825 | ||
@@ -1852,12 +1852,7 @@ retry: | |||
1852 | restart->futex.val = val; | 1852 | restart->futex.val = val; |
1853 | restart->futex.time = abs_time->tv64; | 1853 | restart->futex.time = abs_time->tv64; |
1854 | restart->futex.bitset = bitset; | 1854 | restart->futex.bitset = bitset; |
1855 | restart->futex.flags = FLAGS_HAS_TIMEOUT; | 1855 | restart->futex.flags = flags; |
1856 | |||
1857 | if (fshared) | ||
1858 | restart->futex.flags |= FLAGS_SHARED; | ||
1859 | if (clockrt) | ||
1860 | restart->futex.flags |= FLAGS_CLOCKRT; | ||
1861 | 1856 | ||
1862 | ret = -ERESTART_RESTARTBLOCK; | 1857 | ret = -ERESTART_RESTARTBLOCK; |
1863 | 1858 | ||
@@ -1873,7 +1868,6 @@ out: | |||
1873 | static long futex_wait_restart(struct restart_block *restart) | 1868 | static long futex_wait_restart(struct restart_block *restart) |
1874 | { | 1869 | { |
1875 | u32 __user *uaddr = restart->futex.uaddr; | 1870 | u32 __user *uaddr = restart->futex.uaddr; |
1876 | int fshared = 0; | ||
1877 | ktime_t t, *tp = NULL; | 1871 | ktime_t t, *tp = NULL; |
1878 | 1872 | ||
1879 | if (restart->futex.flags & FLAGS_HAS_TIMEOUT) { | 1873 | if (restart->futex.flags & FLAGS_HAS_TIMEOUT) { |
@@ -1881,11 +1875,9 @@ static long futex_wait_restart(struct restart_block *restart) | |||
1881 | tp = &t; | 1875 | tp = &t; |
1882 | } | 1876 | } |
1883 | restart->fn = do_no_restart_syscall; | 1877 | restart->fn = do_no_restart_syscall; |
1884 | if (restart->futex.flags & FLAGS_SHARED) | 1878 | |
1885 | fshared = 1; | 1879 | return (long)futex_wait(uaddr, restart->futex.flags, |
1886 | return (long)futex_wait(uaddr, fshared, restart->futex.val, tp, | 1880 | restart->futex.val, tp, restart->futex.bitset); |
1887 | restart->futex.bitset, | ||
1888 | restart->futex.flags & FLAGS_CLOCKRT); | ||
1889 | } | 1881 | } |
1890 | 1882 | ||
1891 | 1883 | ||
@@ -1895,12 +1887,12 @@ static long futex_wait_restart(struct restart_block *restart) | |||
1895 | * if there are waiters then it will block, it does PI, etc. (Due to | 1887 | * if there are waiters then it will block, it does PI, etc. (Due to |
1896 | * races the kernel might see a 0 value of the futex too.) | 1888 | * races the kernel might see a 0 value of the futex too.) |
1897 | */ | 1889 | */ |
1898 | static int futex_lock_pi(u32 __user *uaddr, int fshared, | 1890 | static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect, |
1899 | int detect, ktime_t *time, int trylock) | 1891 | ktime_t *time, int trylock) |
1900 | { | 1892 | { |
1901 | struct hrtimer_sleeper timeout, *to = NULL; | 1893 | struct hrtimer_sleeper timeout, *to = NULL; |
1902 | struct futex_hash_bucket *hb; | 1894 | struct futex_hash_bucket *hb; |
1903 | struct futex_q q; | 1895 | struct futex_q q = futex_q_init; |
1904 | int res, ret; | 1896 | int res, ret; |
1905 | 1897 | ||
1906 | if (refill_pi_state_cache()) | 1898 | if (refill_pi_state_cache()) |
@@ -1914,12 +1906,8 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared, | |||
1914 | hrtimer_set_expires(&to->timer, *time); | 1906 | hrtimer_set_expires(&to->timer, *time); |
1915 | } | 1907 | } |
1916 | 1908 | ||
1917 | q.pi_state = NULL; | ||
1918 | q.rt_waiter = NULL; | ||
1919 | q.requeue_pi_key = NULL; | ||
1920 | retry: | 1909 | retry: |
1921 | q.key = FUTEX_KEY_INIT; | 1910 | ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key); |
1922 | ret = get_futex_key(uaddr, fshared, &q.key); | ||
1923 | if (unlikely(ret != 0)) | 1911 | if (unlikely(ret != 0)) |
1924 | goto out; | 1912 | goto out; |
1925 | 1913 | ||
@@ -1941,7 +1929,7 @@ retry_private: | |||
1941 | * exit to complete. | 1929 | * exit to complete. |
1942 | */ | 1930 | */ |
1943 | queue_unlock(&q, hb); | 1931 | queue_unlock(&q, hb); |
1944 | put_futex_key(fshared, &q.key); | 1932 | put_futex_key(&q.key); |
1945 | cond_resched(); | 1933 | cond_resched(); |
1946 | goto retry; | 1934 | goto retry; |
1947 | default: | 1935 | default: |
@@ -1971,7 +1959,7 @@ retry_private: | |||
1971 | * Fixup the pi_state owner and possibly acquire the lock if we | 1959 | * Fixup the pi_state owner and possibly acquire the lock if we |
1972 | * haven't already. | 1960 | * haven't already. |
1973 | */ | 1961 | */ |
1974 | res = fixup_owner(uaddr, fshared, &q, !ret); | 1962 | res = fixup_owner(uaddr, &q, !ret); |
1975 | /* | 1963 | /* |
1976 | * If fixup_owner() returned an error, proprogate that. If it acquired | 1964 | * If fixup_owner() returned an error, proprogate that. If it acquired |
1977 | * the lock, clear our -ETIMEDOUT or -EINTR. | 1965 | * the lock, clear our -ETIMEDOUT or -EINTR. |
@@ -1995,7 +1983,7 @@ out_unlock_put_key: | |||
1995 | queue_unlock(&q, hb); | 1983 | queue_unlock(&q, hb); |
1996 | 1984 | ||
1997 | out_put_key: | 1985 | out_put_key: |
1998 | put_futex_key(fshared, &q.key); | 1986 | put_futex_key(&q.key); |
1999 | out: | 1987 | out: |
2000 | if (to) | 1988 | if (to) |
2001 | destroy_hrtimer_on_stack(&to->timer); | 1989 | destroy_hrtimer_on_stack(&to->timer); |
@@ -2008,10 +1996,10 @@ uaddr_faulted: | |||
2008 | if (ret) | 1996 | if (ret) |
2009 | goto out_put_key; | 1997 | goto out_put_key; |
2010 | 1998 | ||
2011 | if (!fshared) | 1999 | if (!(flags & FLAGS_SHARED)) |
2012 | goto retry_private; | 2000 | goto retry_private; |
2013 | 2001 | ||
2014 | put_futex_key(fshared, &q.key); | 2002 | put_futex_key(&q.key); |
2015 | goto retry; | 2003 | goto retry; |
2016 | } | 2004 | } |
2017 | 2005 | ||
@@ -2020,7 +2008,7 @@ uaddr_faulted: | |||
2020 | * This is the in-kernel slowpath: we look up the PI state (if any), | 2008 | * This is the in-kernel slowpath: we look up the PI state (if any), |
2021 | * and do the rt-mutex unlock. | 2009 | * and do the rt-mutex unlock. |
2022 | */ | 2010 | */ |
2023 | static int futex_unlock_pi(u32 __user *uaddr, int fshared) | 2011 | static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) |
2024 | { | 2012 | { |
2025 | struct futex_hash_bucket *hb; | 2013 | struct futex_hash_bucket *hb; |
2026 | struct futex_q *this, *next; | 2014 | struct futex_q *this, *next; |
@@ -2038,7 +2026,7 @@ retry: | |||
2038 | if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current)) | 2026 | if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current)) |
2039 | return -EPERM; | 2027 | return -EPERM; |
2040 | 2028 | ||
2041 | ret = get_futex_key(uaddr, fshared, &key); | 2029 | ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key); |
2042 | if (unlikely(ret != 0)) | 2030 | if (unlikely(ret != 0)) |
2043 | goto out; | 2031 | goto out; |
2044 | 2032 | ||
@@ -2093,14 +2081,14 @@ retry: | |||
2093 | 2081 | ||
2094 | out_unlock: | 2082 | out_unlock: |
2095 | spin_unlock(&hb->lock); | 2083 | spin_unlock(&hb->lock); |
2096 | put_futex_key(fshared, &key); | 2084 | put_futex_key(&key); |
2097 | 2085 | ||
2098 | out: | 2086 | out: |
2099 | return ret; | 2087 | return ret; |
2100 | 2088 | ||
2101 | pi_faulted: | 2089 | pi_faulted: |
2102 | spin_unlock(&hb->lock); | 2090 | spin_unlock(&hb->lock); |
2103 | put_futex_key(fshared, &key); | 2091 | put_futex_key(&key); |
2104 | 2092 | ||
2105 | ret = fault_in_user_writeable(uaddr); | 2093 | ret = fault_in_user_writeable(uaddr); |
2106 | if (!ret) | 2094 | if (!ret) |
@@ -2160,7 +2148,7 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, | |||
2160 | /** | 2148 | /** |
2161 | * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2 | 2149 | * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2 |
2162 | * @uaddr: the futex we initially wait on (non-pi) | 2150 | * @uaddr: the futex we initially wait on (non-pi) |
2163 | * @fshared: whether the futexes are shared (1) or not (0). They must be | 2151 | * @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be |
2164 | * the same type, no requeueing from private to shared, etc. | 2152 | * the same type, no requeueing from private to shared, etc. |
2165 | * @val: the expected value of uaddr | 2153 | * @val: the expected value of uaddr |
2166 | * @abs_time: absolute timeout | 2154 | * @abs_time: absolute timeout |
@@ -2198,16 +2186,16 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, | |||
2198 | * 0 - On success | 2186 | * 0 - On success |
2199 | * <0 - On error | 2187 | * <0 - On error |
2200 | */ | 2188 | */ |
2201 | static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared, | 2189 | static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, |
2202 | u32 val, ktime_t *abs_time, u32 bitset, | 2190 | u32 val, ktime_t *abs_time, u32 bitset, |
2203 | int clockrt, u32 __user *uaddr2) | 2191 | u32 __user *uaddr2) |
2204 | { | 2192 | { |
2205 | struct hrtimer_sleeper timeout, *to = NULL; | 2193 | struct hrtimer_sleeper timeout, *to = NULL; |
2206 | struct rt_mutex_waiter rt_waiter; | 2194 | struct rt_mutex_waiter rt_waiter; |
2207 | struct rt_mutex *pi_mutex = NULL; | 2195 | struct rt_mutex *pi_mutex = NULL; |
2208 | struct futex_hash_bucket *hb; | 2196 | struct futex_hash_bucket *hb; |
2209 | union futex_key key2; | 2197 | union futex_key key2 = FUTEX_KEY_INIT; |
2210 | struct futex_q q; | 2198 | struct futex_q q = futex_q_init; |
2211 | int res, ret; | 2199 | int res, ret; |
2212 | 2200 | ||
2213 | if (!bitset) | 2201 | if (!bitset) |
@@ -2215,8 +2203,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared, | |||
2215 | 2203 | ||
2216 | if (abs_time) { | 2204 | if (abs_time) { |
2217 | to = &timeout; | 2205 | to = &timeout; |
2218 | hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME : | 2206 | hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ? |
2219 | CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | 2207 | CLOCK_REALTIME : CLOCK_MONOTONIC, |
2208 | HRTIMER_MODE_ABS); | ||
2220 | hrtimer_init_sleeper(to, current); | 2209 | hrtimer_init_sleeper(to, current); |
2221 | hrtimer_set_expires_range_ns(&to->timer, *abs_time, | 2210 | hrtimer_set_expires_range_ns(&to->timer, *abs_time, |
2222 | current->timer_slack_ns); | 2211 | current->timer_slack_ns); |
@@ -2229,12 +2218,10 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared, | |||
2229 | debug_rt_mutex_init_waiter(&rt_waiter); | 2218 | debug_rt_mutex_init_waiter(&rt_waiter); |
2230 | rt_waiter.task = NULL; | 2219 | rt_waiter.task = NULL; |
2231 | 2220 | ||
2232 | key2 = FUTEX_KEY_INIT; | 2221 | ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2); |
2233 | ret = get_futex_key(uaddr2, fshared, &key2); | ||
2234 | if (unlikely(ret != 0)) | 2222 | if (unlikely(ret != 0)) |
2235 | goto out; | 2223 | goto out; |
2236 | 2224 | ||
2237 | q.pi_state = NULL; | ||
2238 | q.bitset = bitset; | 2225 | q.bitset = bitset; |
2239 | q.rt_waiter = &rt_waiter; | 2226 | q.rt_waiter = &rt_waiter; |
2240 | q.requeue_pi_key = &key2; | 2227 | q.requeue_pi_key = &key2; |
@@ -2243,7 +2230,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared, | |||
2243 | * Prepare to wait on uaddr. On success, increments q.key (key1) ref | 2230 | * Prepare to wait on uaddr. On success, increments q.key (key1) ref |
2244 | * count. | 2231 | * count. |
2245 | */ | 2232 | */ |
2246 | ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); | 2233 | ret = futex_wait_setup(uaddr, val, flags, &q, &hb); |
2247 | if (ret) | 2234 | if (ret) |
2248 | goto out_key2; | 2235 | goto out_key2; |
2249 | 2236 | ||
@@ -2273,8 +2260,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared, | |||
2273 | */ | 2260 | */ |
2274 | if (q.pi_state && (q.pi_state->owner != current)) { | 2261 | if (q.pi_state && (q.pi_state->owner != current)) { |
2275 | spin_lock(q.lock_ptr); | 2262 | spin_lock(q.lock_ptr); |
2276 | ret = fixup_pi_state_owner(uaddr2, &q, current, | 2263 | ret = fixup_pi_state_owner(uaddr2, &q, current); |
2277 | fshared); | ||
2278 | spin_unlock(q.lock_ptr); | 2264 | spin_unlock(q.lock_ptr); |
2279 | } | 2265 | } |
2280 | } else { | 2266 | } else { |
@@ -2293,7 +2279,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared, | |||
2293 | * Fixup the pi_state owner and possibly acquire the lock if we | 2279 | * Fixup the pi_state owner and possibly acquire the lock if we |
2294 | * haven't already. | 2280 | * haven't already. |
2295 | */ | 2281 | */ |
2296 | res = fixup_owner(uaddr2, fshared, &q, !ret); | 2282 | res = fixup_owner(uaddr2, &q, !ret); |
2297 | /* | 2283 | /* |
2298 | * If fixup_owner() returned an error, proprogate that. If it | 2284 | * If fixup_owner() returned an error, proprogate that. If it |
2299 | * acquired the lock, clear -ETIMEDOUT or -EINTR. | 2285 | * acquired the lock, clear -ETIMEDOUT or -EINTR. |
@@ -2324,9 +2310,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared, | |||
2324 | } | 2310 | } |
2325 | 2311 | ||
2326 | out_put_keys: | 2312 | out_put_keys: |
2327 | put_futex_key(fshared, &q.key); | 2313 | put_futex_key(&q.key); |
2328 | out_key2: | 2314 | out_key2: |
2329 | put_futex_key(fshared, &key2); | 2315 | put_futex_key(&key2); |
2330 | 2316 | ||
2331 | out: | 2317 | out: |
2332 | if (to) { | 2318 | if (to) { |
@@ -2551,58 +2537,57 @@ void exit_robust_list(struct task_struct *curr) | |||
2551 | long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, | 2537 | long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, |
2552 | u32 __user *uaddr2, u32 val2, u32 val3) | 2538 | u32 __user *uaddr2, u32 val2, u32 val3) |
2553 | { | 2539 | { |
2554 | int clockrt, ret = -ENOSYS; | 2540 | int ret = -ENOSYS, cmd = op & FUTEX_CMD_MASK; |
2555 | int cmd = op & FUTEX_CMD_MASK; | 2541 | unsigned int flags = 0; |
2556 | int fshared = 0; | ||
2557 | 2542 | ||
2558 | if (!(op & FUTEX_PRIVATE_FLAG)) | 2543 | if (!(op & FUTEX_PRIVATE_FLAG)) |
2559 | fshared = 1; | 2544 | flags |= FLAGS_SHARED; |
2560 | 2545 | ||
2561 | clockrt = op & FUTEX_CLOCK_REALTIME; | 2546 | if (op & FUTEX_CLOCK_REALTIME) { |
2562 | if (clockrt && cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI) | 2547 | flags |= FLAGS_CLOCKRT; |
2563 | return -ENOSYS; | 2548 | if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI) |
2549 | return -ENOSYS; | ||
2550 | } | ||
2564 | 2551 | ||
2565 | switch (cmd) { | 2552 | switch (cmd) { |
2566 | case FUTEX_WAIT: | 2553 | case FUTEX_WAIT: |
2567 | val3 = FUTEX_BITSET_MATCH_ANY; | 2554 | val3 = FUTEX_BITSET_MATCH_ANY; |
2568 | case FUTEX_WAIT_BITSET: | 2555 | case FUTEX_WAIT_BITSET: |
2569 | ret = futex_wait(uaddr, fshared, val, timeout, val3, clockrt); | 2556 | ret = futex_wait(uaddr, flags, val, timeout, val3); |
2570 | break; | 2557 | break; |
2571 | case FUTEX_WAKE: | 2558 | case FUTEX_WAKE: |
2572 | val3 = FUTEX_BITSET_MATCH_ANY; | 2559 | val3 = FUTEX_BITSET_MATCH_ANY; |
2573 | case FUTEX_WAKE_BITSET: | 2560 | case FUTEX_WAKE_BITSET: |
2574 | ret = futex_wake(uaddr, fshared, val, val3); | 2561 | ret = futex_wake(uaddr, flags, val, val3); |
2575 | break; | 2562 | break; |
2576 | case FUTEX_REQUEUE: | 2563 | case FUTEX_REQUEUE: |
2577 | ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL, 0); | 2564 | ret = futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0); |
2578 | break; | 2565 | break; |
2579 | case FUTEX_CMP_REQUEUE: | 2566 | case FUTEX_CMP_REQUEUE: |
2580 | ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3, | 2567 | ret = futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0); |
2581 | 0); | ||
2582 | break; | 2568 | break; |
2583 | case FUTEX_WAKE_OP: | 2569 | case FUTEX_WAKE_OP: |
2584 | ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3); | 2570 | ret = futex_wake_op(uaddr, flags, uaddr2, val, val2, val3); |
2585 | break; | 2571 | break; |
2586 | case FUTEX_LOCK_PI: | 2572 | case FUTEX_LOCK_PI: |
2587 | if (futex_cmpxchg_enabled) | 2573 | if (futex_cmpxchg_enabled) |
2588 | ret = futex_lock_pi(uaddr, fshared, val, timeout, 0); | 2574 | ret = futex_lock_pi(uaddr, flags, val, timeout, 0); |
2589 | break; | 2575 | break; |
2590 | case FUTEX_UNLOCK_PI: | 2576 | case FUTEX_UNLOCK_PI: |
2591 | if (futex_cmpxchg_enabled) | 2577 | if (futex_cmpxchg_enabled) |
2592 | ret = futex_unlock_pi(uaddr, fshared); | 2578 | ret = futex_unlock_pi(uaddr, flags); |
2593 | break; | 2579 | break; |
2594 | case FUTEX_TRYLOCK_PI: | 2580 | case FUTEX_TRYLOCK_PI: |
2595 | if (futex_cmpxchg_enabled) | 2581 | if (futex_cmpxchg_enabled) |
2596 | ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1); | 2582 | ret = futex_lock_pi(uaddr, flags, 0, timeout, 1); |
2597 | break; | 2583 | break; |
2598 | case FUTEX_WAIT_REQUEUE_PI: | 2584 | case FUTEX_WAIT_REQUEUE_PI: |
2599 | val3 = FUTEX_BITSET_MATCH_ANY; | 2585 | val3 = FUTEX_BITSET_MATCH_ANY; |
2600 | ret = futex_wait_requeue_pi(uaddr, fshared, val, timeout, val3, | 2586 | ret = futex_wait_requeue_pi(uaddr, flags, val, timeout, val3, |
2601 | clockrt, uaddr2); | 2587 | uaddr2); |
2602 | break; | 2588 | break; |
2603 | case FUTEX_CMP_REQUEUE_PI: | 2589 | case FUTEX_CMP_REQUEUE_PI: |
2604 | ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3, | 2590 | ret = futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1); |
2605 | 1); | ||
2606 | break; | 2591 | break; |
2607 | default: | 2592 | default: |
2608 | ret = -ENOSYS; | 2593 | ret = -ENOSYS; |
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index e5325825aeb..086adf25a55 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c | |||
@@ -641,7 +641,7 @@ int __init init_hw_breakpoint(void) | |||
641 | 641 | ||
642 | constraints_initialized = 1; | 642 | constraints_initialized = 1; |
643 | 643 | ||
644 | perf_pmu_register(&perf_breakpoint); | 644 | perf_pmu_register(&perf_breakpoint, "breakpoint", PERF_TYPE_BREAKPOINT); |
645 | 645 | ||
646 | return register_die_notifier(&hw_breakpoint_exceptions_nb); | 646 | return register_die_notifier(&hw_breakpoint_exceptions_nb); |
647 | 647 | ||
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 9737a76e106..7663e5df0e6 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -354,13 +354,20 @@ static inline int kprobe_aggrprobe(struct kprobe *p) | |||
354 | return p->pre_handler == aggr_pre_handler; | 354 | return p->pre_handler == aggr_pre_handler; |
355 | } | 355 | } |
356 | 356 | ||
357 | /* Return true(!0) if the kprobe is unused */ | ||
358 | static inline int kprobe_unused(struct kprobe *p) | ||
359 | { | ||
360 | return kprobe_aggrprobe(p) && kprobe_disabled(p) && | ||
361 | list_empty(&p->list); | ||
362 | } | ||
363 | |||
357 | /* | 364 | /* |
358 | * Keep all fields in the kprobe consistent | 365 | * Keep all fields in the kprobe consistent |
359 | */ | 366 | */ |
360 | static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p) | 367 | static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p) |
361 | { | 368 | { |
362 | memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t)); | 369 | memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t)); |
363 | memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn)); | 370 | memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn)); |
364 | } | 371 | } |
365 | 372 | ||
366 | #ifdef CONFIG_OPTPROBES | 373 | #ifdef CONFIG_OPTPROBES |
@@ -384,6 +391,17 @@ void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
384 | } | 391 | } |
385 | } | 392 | } |
386 | 393 | ||
394 | /* Free optimized instructions and optimized_kprobe */ | ||
395 | static __kprobes void free_aggr_kprobe(struct kprobe *p) | ||
396 | { | ||
397 | struct optimized_kprobe *op; | ||
398 | |||
399 | op = container_of(p, struct optimized_kprobe, kp); | ||
400 | arch_remove_optimized_kprobe(op); | ||
401 | arch_remove_kprobe(p); | ||
402 | kfree(op); | ||
403 | } | ||
404 | |||
387 | /* Return true(!0) if the kprobe is ready for optimization. */ | 405 | /* Return true(!0) if the kprobe is ready for optimization. */ |
388 | static inline int kprobe_optready(struct kprobe *p) | 406 | static inline int kprobe_optready(struct kprobe *p) |
389 | { | 407 | { |
@@ -397,6 +415,33 @@ static inline int kprobe_optready(struct kprobe *p) | |||
397 | return 0; | 415 | return 0; |
398 | } | 416 | } |
399 | 417 | ||
418 | /* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */ | ||
419 | static inline int kprobe_disarmed(struct kprobe *p) | ||
420 | { | ||
421 | struct optimized_kprobe *op; | ||
422 | |||
423 | /* If kprobe is not aggr/opt probe, just return kprobe is disabled */ | ||
424 | if (!kprobe_aggrprobe(p)) | ||
425 | return kprobe_disabled(p); | ||
426 | |||
427 | op = container_of(p, struct optimized_kprobe, kp); | ||
428 | |||
429 | return kprobe_disabled(p) && list_empty(&op->list); | ||
430 | } | ||
431 | |||
432 | /* Return true(!0) if the probe is queued on (un)optimizing lists */ | ||
433 | static int __kprobes kprobe_queued(struct kprobe *p) | ||
434 | { | ||
435 | struct optimized_kprobe *op; | ||
436 | |||
437 | if (kprobe_aggrprobe(p)) { | ||
438 | op = container_of(p, struct optimized_kprobe, kp); | ||
439 | if (!list_empty(&op->list)) | ||
440 | return 1; | ||
441 | } | ||
442 | return 0; | ||
443 | } | ||
444 | |||
400 | /* | 445 | /* |
401 | * Return an optimized kprobe whose optimizing code replaces | 446 | * Return an optimized kprobe whose optimizing code replaces |
402 | * instructions including addr (exclude breakpoint). | 447 | * instructions including addr (exclude breakpoint). |
@@ -422,30 +467,23 @@ static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr) | |||
422 | 467 | ||
423 | /* Optimization staging list, protected by kprobe_mutex */ | 468 | /* Optimization staging list, protected by kprobe_mutex */ |
424 | static LIST_HEAD(optimizing_list); | 469 | static LIST_HEAD(optimizing_list); |
470 | static LIST_HEAD(unoptimizing_list); | ||
425 | 471 | ||
426 | static void kprobe_optimizer(struct work_struct *work); | 472 | static void kprobe_optimizer(struct work_struct *work); |
427 | static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); | 473 | static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); |
474 | static DECLARE_COMPLETION(optimizer_comp); | ||
428 | #define OPTIMIZE_DELAY 5 | 475 | #define OPTIMIZE_DELAY 5 |
429 | 476 | ||
430 | /* Kprobe jump optimizer */ | 477 | /* |
431 | static __kprobes void kprobe_optimizer(struct work_struct *work) | 478 | * Optimize (replace a breakpoint with a jump) kprobes listed on |
479 | * optimizing_list. | ||
480 | */ | ||
481 | static __kprobes void do_optimize_kprobes(void) | ||
432 | { | 482 | { |
433 | struct optimized_kprobe *op, *tmp; | 483 | /* Optimization never be done when disarmed */ |
434 | 484 | if (kprobes_all_disarmed || !kprobes_allow_optimization || | |
435 | /* Lock modules while optimizing kprobes */ | 485 | list_empty(&optimizing_list)) |
436 | mutex_lock(&module_mutex); | 486 | return; |
437 | mutex_lock(&kprobe_mutex); | ||
438 | if (kprobes_all_disarmed || !kprobes_allow_optimization) | ||
439 | goto end; | ||
440 | |||
441 | /* | ||
442 | * Wait for quiesence period to ensure all running interrupts | ||
443 | * are done. Because optprobe may modify multiple instructions | ||
444 | * there is a chance that Nth instruction is interrupted. In that | ||
445 | * case, running interrupt can return to 2nd-Nth byte of jump | ||
446 | * instruction. This wait is for avoiding it. | ||
447 | */ | ||
448 | synchronize_sched(); | ||
449 | 487 | ||
450 | /* | 488 | /* |
451 | * The optimization/unoptimization refers online_cpus via | 489 | * The optimization/unoptimization refers online_cpus via |
@@ -459,17 +497,111 @@ static __kprobes void kprobe_optimizer(struct work_struct *work) | |||
459 | */ | 497 | */ |
460 | get_online_cpus(); | 498 | get_online_cpus(); |
461 | mutex_lock(&text_mutex); | 499 | mutex_lock(&text_mutex); |
462 | list_for_each_entry_safe(op, tmp, &optimizing_list, list) { | 500 | arch_optimize_kprobes(&optimizing_list); |
463 | WARN_ON(kprobe_disabled(&op->kp)); | 501 | mutex_unlock(&text_mutex); |
464 | if (arch_optimize_kprobe(op) < 0) | 502 | put_online_cpus(); |
465 | op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; | 503 | } |
466 | list_del_init(&op->list); | 504 | |
505 | /* | ||
506 | * Unoptimize (replace a jump with a breakpoint and remove the breakpoint | ||
507 | * if need) kprobes listed on unoptimizing_list. | ||
508 | */ | ||
509 | static __kprobes void do_unoptimize_kprobes(struct list_head *free_list) | ||
510 | { | ||
511 | struct optimized_kprobe *op, *tmp; | ||
512 | |||
513 | /* Unoptimization must be done anytime */ | ||
514 | if (list_empty(&unoptimizing_list)) | ||
515 | return; | ||
516 | |||
517 | /* Ditto to do_optimize_kprobes */ | ||
518 | get_online_cpus(); | ||
519 | mutex_lock(&text_mutex); | ||
520 | arch_unoptimize_kprobes(&unoptimizing_list, free_list); | ||
521 | /* Loop free_list for disarming */ | ||
522 | list_for_each_entry_safe(op, tmp, free_list, list) { | ||
523 | /* Disarm probes if marked disabled */ | ||
524 | if (kprobe_disabled(&op->kp)) | ||
525 | arch_disarm_kprobe(&op->kp); | ||
526 | if (kprobe_unused(&op->kp)) { | ||
527 | /* | ||
528 | * Remove unused probes from hash list. After waiting | ||
529 | * for synchronization, these probes are reclaimed. | ||
530 | * (reclaiming is done by do_free_cleaned_kprobes.) | ||
531 | */ | ||
532 | hlist_del_rcu(&op->kp.hlist); | ||
533 | } else | ||
534 | list_del_init(&op->list); | ||
467 | } | 535 | } |
468 | mutex_unlock(&text_mutex); | 536 | mutex_unlock(&text_mutex); |
469 | put_online_cpus(); | 537 | put_online_cpus(); |
470 | end: | 538 | } |
539 | |||
540 | /* Reclaim all kprobes on the free_list */ | ||
541 | static __kprobes void do_free_cleaned_kprobes(struct list_head *free_list) | ||
542 | { | ||
543 | struct optimized_kprobe *op, *tmp; | ||
544 | |||
545 | list_for_each_entry_safe(op, tmp, free_list, list) { | ||
546 | BUG_ON(!kprobe_unused(&op->kp)); | ||
547 | list_del_init(&op->list); | ||
548 | free_aggr_kprobe(&op->kp); | ||
549 | } | ||
550 | } | ||
551 | |||
552 | /* Start optimizer after OPTIMIZE_DELAY passed */ | ||
553 | static __kprobes void kick_kprobe_optimizer(void) | ||
554 | { | ||
555 | if (!delayed_work_pending(&optimizing_work)) | ||
556 | schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY); | ||
557 | } | ||
558 | |||
559 | /* Kprobe jump optimizer */ | ||
560 | static __kprobes void kprobe_optimizer(struct work_struct *work) | ||
561 | { | ||
562 | LIST_HEAD(free_list); | ||
563 | |||
564 | /* Lock modules while optimizing kprobes */ | ||
565 | mutex_lock(&module_mutex); | ||
566 | mutex_lock(&kprobe_mutex); | ||
567 | |||
568 | /* | ||
569 | * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) | ||
570 | * kprobes before waiting for quiesence period. | ||
571 | */ | ||
572 | do_unoptimize_kprobes(&free_list); | ||
573 | |||
574 | /* | ||
575 | * Step 2: Wait for quiesence period to ensure all running interrupts | ||
576 | * are done. Because optprobe may modify multiple instructions | ||
577 | * there is a chance that Nth instruction is interrupted. In that | ||
578 | * case, running interrupt can return to 2nd-Nth byte of jump | ||
579 | * instruction. This wait is for avoiding it. | ||
580 | */ | ||
581 | synchronize_sched(); | ||
582 | |||
583 | /* Step 3: Optimize kprobes after quiesence period */ | ||
584 | do_optimize_kprobes(); | ||
585 | |||
586 | /* Step 4: Free cleaned kprobes after quiesence period */ | ||
587 | do_free_cleaned_kprobes(&free_list); | ||
588 | |||
471 | mutex_unlock(&kprobe_mutex); | 589 | mutex_unlock(&kprobe_mutex); |
472 | mutex_unlock(&module_mutex); | 590 | mutex_unlock(&module_mutex); |
591 | |||
592 | /* Step 5: Kick optimizer again if needed */ | ||
593 | if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) | ||
594 | kick_kprobe_optimizer(); | ||
595 | else | ||
596 | /* Wake up all waiters */ | ||
597 | complete_all(&optimizer_comp); | ||
598 | } | ||
599 | |||
600 | /* Wait for completing optimization and unoptimization */ | ||
601 | static __kprobes void wait_for_kprobe_optimizer(void) | ||
602 | { | ||
603 | if (delayed_work_pending(&optimizing_work)) | ||
604 | wait_for_completion(&optimizer_comp); | ||
473 | } | 605 | } |
474 | 606 | ||
475 | /* Optimize kprobe if p is ready to be optimized */ | 607 | /* Optimize kprobe if p is ready to be optimized */ |
@@ -495,42 +627,99 @@ static __kprobes void optimize_kprobe(struct kprobe *p) | |||
495 | /* Check if it is already optimized. */ | 627 | /* Check if it is already optimized. */ |
496 | if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) | 628 | if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) |
497 | return; | 629 | return; |
498 | |||
499 | op->kp.flags |= KPROBE_FLAG_OPTIMIZED; | 630 | op->kp.flags |= KPROBE_FLAG_OPTIMIZED; |
500 | list_add(&op->list, &optimizing_list); | 631 | |
501 | if (!delayed_work_pending(&optimizing_work)) | 632 | if (!list_empty(&op->list)) |
502 | schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY); | 633 | /* This is under unoptimizing. Just dequeue the probe */ |
634 | list_del_init(&op->list); | ||
635 | else { | ||
636 | list_add(&op->list, &optimizing_list); | ||
637 | kick_kprobe_optimizer(); | ||
638 | } | ||
639 | } | ||
640 | |||
641 | /* Short cut to direct unoptimizing */ | ||
642 | static __kprobes void force_unoptimize_kprobe(struct optimized_kprobe *op) | ||
643 | { | ||
644 | get_online_cpus(); | ||
645 | arch_unoptimize_kprobe(op); | ||
646 | put_online_cpus(); | ||
647 | if (kprobe_disabled(&op->kp)) | ||
648 | arch_disarm_kprobe(&op->kp); | ||
503 | } | 649 | } |
504 | 650 | ||
505 | /* Unoptimize a kprobe if p is optimized */ | 651 | /* Unoptimize a kprobe if p is optimized */ |
506 | static __kprobes void unoptimize_kprobe(struct kprobe *p) | 652 | static __kprobes void unoptimize_kprobe(struct kprobe *p, bool force) |
507 | { | 653 | { |
508 | struct optimized_kprobe *op; | 654 | struct optimized_kprobe *op; |
509 | 655 | ||
510 | if ((p->flags & KPROBE_FLAG_OPTIMIZED) && kprobe_aggrprobe(p)) { | 656 | if (!kprobe_aggrprobe(p) || kprobe_disarmed(p)) |
511 | op = container_of(p, struct optimized_kprobe, kp); | 657 | return; /* This is not an optprobe nor optimized */ |
512 | if (!list_empty(&op->list)) | 658 | |
513 | /* Dequeue from the optimization queue */ | 659 | op = container_of(p, struct optimized_kprobe, kp); |
660 | if (!kprobe_optimized(p)) { | ||
661 | /* Unoptimized or unoptimizing case */ | ||
662 | if (force && !list_empty(&op->list)) { | ||
663 | /* | ||
664 | * Only if this is unoptimizing kprobe and forced, | ||
665 | * forcibly unoptimize it. (No need to unoptimize | ||
666 | * unoptimized kprobe again :) | ||
667 | */ | ||
514 | list_del_init(&op->list); | 668 | list_del_init(&op->list); |
515 | else | 669 | force_unoptimize_kprobe(op); |
516 | /* Replace jump with break */ | 670 | } |
517 | arch_unoptimize_kprobe(op); | 671 | return; |
518 | op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; | 672 | } |
673 | |||
674 | op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; | ||
675 | if (!list_empty(&op->list)) { | ||
676 | /* Dequeue from the optimization queue */ | ||
677 | list_del_init(&op->list); | ||
678 | return; | ||
679 | } | ||
680 | /* Optimized kprobe case */ | ||
681 | if (force) | ||
682 | /* Forcibly update the code: this is a special case */ | ||
683 | force_unoptimize_kprobe(op); | ||
684 | else { | ||
685 | list_add(&op->list, &unoptimizing_list); | ||
686 | kick_kprobe_optimizer(); | ||
519 | } | 687 | } |
520 | } | 688 | } |
521 | 689 | ||
690 | /* Cancel unoptimizing for reusing */ | ||
691 | static void reuse_unused_kprobe(struct kprobe *ap) | ||
692 | { | ||
693 | struct optimized_kprobe *op; | ||
694 | |||
695 | BUG_ON(!kprobe_unused(ap)); | ||
696 | /* | ||
697 | * Unused kprobe MUST be on the way of delayed unoptimizing (means | ||
698 | * there is still a relative jump) and disabled. | ||
699 | */ | ||
700 | op = container_of(ap, struct optimized_kprobe, kp); | ||
701 | if (unlikely(list_empty(&op->list))) | ||
702 | printk(KERN_WARNING "Warning: found a stray unused " | ||
703 | "aggrprobe@%p\n", ap->addr); | ||
704 | /* Enable the probe again */ | ||
705 | ap->flags &= ~KPROBE_FLAG_DISABLED; | ||
706 | /* Optimize it again (remove from op->list) */ | ||
707 | BUG_ON(!kprobe_optready(ap)); | ||
708 | optimize_kprobe(ap); | ||
709 | } | ||
710 | |||
522 | /* Remove optimized instructions */ | 711 | /* Remove optimized instructions */ |
523 | static void __kprobes kill_optimized_kprobe(struct kprobe *p) | 712 | static void __kprobes kill_optimized_kprobe(struct kprobe *p) |
524 | { | 713 | { |
525 | struct optimized_kprobe *op; | 714 | struct optimized_kprobe *op; |
526 | 715 | ||
527 | op = container_of(p, struct optimized_kprobe, kp); | 716 | op = container_of(p, struct optimized_kprobe, kp); |
528 | if (!list_empty(&op->list)) { | 717 | if (!list_empty(&op->list)) |
529 | /* Dequeue from the optimization queue */ | 718 | /* Dequeue from the (un)optimization queue */ |
530 | list_del_init(&op->list); | 719 | list_del_init(&op->list); |
531 | op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; | 720 | |
532 | } | 721 | op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; |
533 | /* Don't unoptimize, because the target code will be freed. */ | 722 | /* Don't touch the code, because it is already freed. */ |
534 | arch_remove_optimized_kprobe(op); | 723 | arch_remove_optimized_kprobe(op); |
535 | } | 724 | } |
536 | 725 | ||
@@ -543,16 +732,6 @@ static __kprobes void prepare_optimized_kprobe(struct kprobe *p) | |||
543 | arch_prepare_optimized_kprobe(op); | 732 | arch_prepare_optimized_kprobe(op); |
544 | } | 733 | } |
545 | 734 | ||
546 | /* Free optimized instructions and optimized_kprobe */ | ||
547 | static __kprobes void free_aggr_kprobe(struct kprobe *p) | ||
548 | { | ||
549 | struct optimized_kprobe *op; | ||
550 | |||
551 | op = container_of(p, struct optimized_kprobe, kp); | ||
552 | arch_remove_optimized_kprobe(op); | ||
553 | kfree(op); | ||
554 | } | ||
555 | |||
556 | /* Allocate new optimized_kprobe and try to prepare optimized instructions */ | 735 | /* Allocate new optimized_kprobe and try to prepare optimized instructions */ |
557 | static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p) | 736 | static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p) |
558 | { | 737 | { |
@@ -587,7 +766,8 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p) | |||
587 | op = container_of(ap, struct optimized_kprobe, kp); | 766 | op = container_of(ap, struct optimized_kprobe, kp); |
588 | if (!arch_prepared_optinsn(&op->optinsn)) { | 767 | if (!arch_prepared_optinsn(&op->optinsn)) { |
589 | /* If failed to setup optimizing, fallback to kprobe */ | 768 | /* If failed to setup optimizing, fallback to kprobe */ |
590 | free_aggr_kprobe(ap); | 769 | arch_remove_optimized_kprobe(op); |
770 | kfree(op); | ||
591 | return; | 771 | return; |
592 | } | 772 | } |
593 | 773 | ||
@@ -631,21 +811,16 @@ static void __kprobes unoptimize_all_kprobes(void) | |||
631 | return; | 811 | return; |
632 | 812 | ||
633 | kprobes_allow_optimization = false; | 813 | kprobes_allow_optimization = false; |
634 | printk(KERN_INFO "Kprobes globally unoptimized\n"); | ||
635 | get_online_cpus(); /* For avoiding text_mutex deadlock */ | ||
636 | mutex_lock(&text_mutex); | ||
637 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 814 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
638 | head = &kprobe_table[i]; | 815 | head = &kprobe_table[i]; |
639 | hlist_for_each_entry_rcu(p, node, head, hlist) { | 816 | hlist_for_each_entry_rcu(p, node, head, hlist) { |
640 | if (!kprobe_disabled(p)) | 817 | if (!kprobe_disabled(p)) |
641 | unoptimize_kprobe(p); | 818 | unoptimize_kprobe(p, false); |
642 | } | 819 | } |
643 | } | 820 | } |
644 | 821 | /* Wait for unoptimizing completion */ | |
645 | mutex_unlock(&text_mutex); | 822 | wait_for_kprobe_optimizer(); |
646 | put_online_cpus(); | 823 | printk(KERN_INFO "Kprobes globally unoptimized\n"); |
647 | /* Allow all currently running kprobes to complete */ | ||
648 | synchronize_sched(); | ||
649 | } | 824 | } |
650 | 825 | ||
651 | int sysctl_kprobes_optimization; | 826 | int sysctl_kprobes_optimization; |
@@ -669,44 +844,60 @@ int proc_kprobes_optimization_handler(struct ctl_table *table, int write, | |||
669 | } | 844 | } |
670 | #endif /* CONFIG_SYSCTL */ | 845 | #endif /* CONFIG_SYSCTL */ |
671 | 846 | ||
847 | /* Put a breakpoint for a probe. Must be called with text_mutex locked */ | ||
672 | static void __kprobes __arm_kprobe(struct kprobe *p) | 848 | static void __kprobes __arm_kprobe(struct kprobe *p) |
673 | { | 849 | { |
674 | struct kprobe *old_p; | 850 | struct kprobe *_p; |
675 | 851 | ||
676 | /* Check collision with other optimized kprobes */ | 852 | /* Check collision with other optimized kprobes */ |
677 | old_p = get_optimized_kprobe((unsigned long)p->addr); | 853 | _p = get_optimized_kprobe((unsigned long)p->addr); |
678 | if (unlikely(old_p)) | 854 | if (unlikely(_p)) |
679 | unoptimize_kprobe(old_p); /* Fallback to unoptimized kprobe */ | 855 | /* Fallback to unoptimized kprobe */ |
856 | unoptimize_kprobe(_p, true); | ||
680 | 857 | ||
681 | arch_arm_kprobe(p); | 858 | arch_arm_kprobe(p); |
682 | optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */ | 859 | optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */ |
683 | } | 860 | } |
684 | 861 | ||
685 | static void __kprobes __disarm_kprobe(struct kprobe *p) | 862 | /* Remove the breakpoint of a probe. Must be called with text_mutex locked */ |
863 | static void __kprobes __disarm_kprobe(struct kprobe *p, bool reopt) | ||
686 | { | 864 | { |
687 | struct kprobe *old_p; | 865 | struct kprobe *_p; |
688 | 866 | ||
689 | unoptimize_kprobe(p); /* Try to unoptimize */ | 867 | unoptimize_kprobe(p, false); /* Try to unoptimize */ |
690 | arch_disarm_kprobe(p); | ||
691 | 868 | ||
692 | /* If another kprobe was blocked, optimize it. */ | 869 | if (!kprobe_queued(p)) { |
693 | old_p = get_optimized_kprobe((unsigned long)p->addr); | 870 | arch_disarm_kprobe(p); |
694 | if (unlikely(old_p)) | 871 | /* If another kprobe was blocked, optimize it. */ |
695 | optimize_kprobe(old_p); | 872 | _p = get_optimized_kprobe((unsigned long)p->addr); |
873 | if (unlikely(_p) && reopt) | ||
874 | optimize_kprobe(_p); | ||
875 | } | ||
876 | /* TODO: reoptimize others after unoptimized this probe */ | ||
696 | } | 877 | } |
697 | 878 | ||
698 | #else /* !CONFIG_OPTPROBES */ | 879 | #else /* !CONFIG_OPTPROBES */ |
699 | 880 | ||
700 | #define optimize_kprobe(p) do {} while (0) | 881 | #define optimize_kprobe(p) do {} while (0) |
701 | #define unoptimize_kprobe(p) do {} while (0) | 882 | #define unoptimize_kprobe(p, f) do {} while (0) |
702 | #define kill_optimized_kprobe(p) do {} while (0) | 883 | #define kill_optimized_kprobe(p) do {} while (0) |
703 | #define prepare_optimized_kprobe(p) do {} while (0) | 884 | #define prepare_optimized_kprobe(p) do {} while (0) |
704 | #define try_to_optimize_kprobe(p) do {} while (0) | 885 | #define try_to_optimize_kprobe(p) do {} while (0) |
705 | #define __arm_kprobe(p) arch_arm_kprobe(p) | 886 | #define __arm_kprobe(p) arch_arm_kprobe(p) |
706 | #define __disarm_kprobe(p) arch_disarm_kprobe(p) | 887 | #define __disarm_kprobe(p, o) arch_disarm_kprobe(p) |
888 | #define kprobe_disarmed(p) kprobe_disabled(p) | ||
889 | #define wait_for_kprobe_optimizer() do {} while (0) | ||
890 | |||
891 | /* There should be no unused kprobes can be reused without optimization */ | ||
892 | static void reuse_unused_kprobe(struct kprobe *ap) | ||
893 | { | ||
894 | printk(KERN_ERR "Error: There should be no unused kprobe here.\n"); | ||
895 | BUG_ON(kprobe_unused(ap)); | ||
896 | } | ||
707 | 897 | ||
708 | static __kprobes void free_aggr_kprobe(struct kprobe *p) | 898 | static __kprobes void free_aggr_kprobe(struct kprobe *p) |
709 | { | 899 | { |
900 | arch_remove_kprobe(p); | ||
710 | kfree(p); | 901 | kfree(p); |
711 | } | 902 | } |
712 | 903 | ||
@@ -732,11 +923,10 @@ static void __kprobes arm_kprobe(struct kprobe *kp) | |||
732 | /* Disarm a kprobe with text_mutex */ | 923 | /* Disarm a kprobe with text_mutex */ |
733 | static void __kprobes disarm_kprobe(struct kprobe *kp) | 924 | static void __kprobes disarm_kprobe(struct kprobe *kp) |
734 | { | 925 | { |
735 | get_online_cpus(); /* For avoiding text_mutex deadlock */ | 926 | /* Ditto */ |
736 | mutex_lock(&text_mutex); | 927 | mutex_lock(&text_mutex); |
737 | __disarm_kprobe(kp); | 928 | __disarm_kprobe(kp, true); |
738 | mutex_unlock(&text_mutex); | 929 | mutex_unlock(&text_mutex); |
739 | put_online_cpus(); | ||
740 | } | 930 | } |
741 | 931 | ||
742 | /* | 932 | /* |
@@ -942,7 +1132,7 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) | |||
942 | BUG_ON(kprobe_gone(ap) || kprobe_gone(p)); | 1132 | BUG_ON(kprobe_gone(ap) || kprobe_gone(p)); |
943 | 1133 | ||
944 | if (p->break_handler || p->post_handler) | 1134 | if (p->break_handler || p->post_handler) |
945 | unoptimize_kprobe(ap); /* Fall back to normal kprobe */ | 1135 | unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */ |
946 | 1136 | ||
947 | if (p->break_handler) { | 1137 | if (p->break_handler) { |
948 | if (ap->break_handler) | 1138 | if (ap->break_handler) |
@@ -993,19 +1183,21 @@ static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p) | |||
993 | * This is the second or subsequent kprobe at the address - handle | 1183 | * This is the second or subsequent kprobe at the address - handle |
994 | * the intricacies | 1184 | * the intricacies |
995 | */ | 1185 | */ |
996 | static int __kprobes register_aggr_kprobe(struct kprobe *old_p, | 1186 | static int __kprobes register_aggr_kprobe(struct kprobe *orig_p, |
997 | struct kprobe *p) | 1187 | struct kprobe *p) |
998 | { | 1188 | { |
999 | int ret = 0; | 1189 | int ret = 0; |
1000 | struct kprobe *ap = old_p; | 1190 | struct kprobe *ap = orig_p; |
1001 | 1191 | ||
1002 | if (!kprobe_aggrprobe(old_p)) { | 1192 | if (!kprobe_aggrprobe(orig_p)) { |
1003 | /* If old_p is not an aggr_kprobe, create new aggr_kprobe. */ | 1193 | /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */ |
1004 | ap = alloc_aggr_kprobe(old_p); | 1194 | ap = alloc_aggr_kprobe(orig_p); |
1005 | if (!ap) | 1195 | if (!ap) |
1006 | return -ENOMEM; | 1196 | return -ENOMEM; |
1007 | init_aggr_kprobe(ap, old_p); | 1197 | init_aggr_kprobe(ap, orig_p); |
1008 | } | 1198 | } else if (kprobe_unused(ap)) |
1199 | /* This probe is going to die. Rescue it */ | ||
1200 | reuse_unused_kprobe(ap); | ||
1009 | 1201 | ||
1010 | if (kprobe_gone(ap)) { | 1202 | if (kprobe_gone(ap)) { |
1011 | /* | 1203 | /* |
@@ -1039,23 +1231,6 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p, | |||
1039 | return add_new_kprobe(ap, p); | 1231 | return add_new_kprobe(ap, p); |
1040 | } | 1232 | } |
1041 | 1233 | ||
1042 | /* Try to disable aggr_kprobe, and return 1 if succeeded.*/ | ||
1043 | static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p) | ||
1044 | { | ||
1045 | struct kprobe *kp; | ||
1046 | |||
1047 | list_for_each_entry_rcu(kp, &p->list, list) { | ||
1048 | if (!kprobe_disabled(kp)) | ||
1049 | /* | ||
1050 | * There is an active probe on the list. | ||
1051 | * We can't disable aggr_kprobe. | ||
1052 | */ | ||
1053 | return 0; | ||
1054 | } | ||
1055 | p->flags |= KPROBE_FLAG_DISABLED; | ||
1056 | return 1; | ||
1057 | } | ||
1058 | |||
1059 | static int __kprobes in_kprobes_functions(unsigned long addr) | 1234 | static int __kprobes in_kprobes_functions(unsigned long addr) |
1060 | { | 1235 | { |
1061 | struct kprobe_blackpoint *kb; | 1236 | struct kprobe_blackpoint *kb; |
@@ -1098,34 +1273,33 @@ static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p) | |||
1098 | /* Check passed kprobe is valid and return kprobe in kprobe_table. */ | 1273 | /* Check passed kprobe is valid and return kprobe in kprobe_table. */ |
1099 | static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p) | 1274 | static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p) |
1100 | { | 1275 | { |
1101 | struct kprobe *old_p, *list_p; | 1276 | struct kprobe *ap, *list_p; |
1102 | 1277 | ||
1103 | old_p = get_kprobe(p->addr); | 1278 | ap = get_kprobe(p->addr); |
1104 | if (unlikely(!old_p)) | 1279 | if (unlikely(!ap)) |
1105 | return NULL; | 1280 | return NULL; |
1106 | 1281 | ||
1107 | if (p != old_p) { | 1282 | if (p != ap) { |
1108 | list_for_each_entry_rcu(list_p, &old_p->list, list) | 1283 | list_for_each_entry_rcu(list_p, &ap->list, list) |
1109 | if (list_p == p) | 1284 | if (list_p == p) |
1110 | /* kprobe p is a valid probe */ | 1285 | /* kprobe p is a valid probe */ |
1111 | goto valid; | 1286 | goto valid; |
1112 | return NULL; | 1287 | return NULL; |
1113 | } | 1288 | } |
1114 | valid: | 1289 | valid: |
1115 | return old_p; | 1290 | return ap; |
1116 | } | 1291 | } |
1117 | 1292 | ||
1118 | /* Return error if the kprobe is being re-registered */ | 1293 | /* Return error if the kprobe is being re-registered */ |
1119 | static inline int check_kprobe_rereg(struct kprobe *p) | 1294 | static inline int check_kprobe_rereg(struct kprobe *p) |
1120 | { | 1295 | { |
1121 | int ret = 0; | 1296 | int ret = 0; |
1122 | struct kprobe *old_p; | ||
1123 | 1297 | ||
1124 | mutex_lock(&kprobe_mutex); | 1298 | mutex_lock(&kprobe_mutex); |
1125 | old_p = __get_valid_kprobe(p); | 1299 | if (__get_valid_kprobe(p)) |
1126 | if (old_p) | ||
1127 | ret = -EINVAL; | 1300 | ret = -EINVAL; |
1128 | mutex_unlock(&kprobe_mutex); | 1301 | mutex_unlock(&kprobe_mutex); |
1302 | |||
1129 | return ret; | 1303 | return ret; |
1130 | } | 1304 | } |
1131 | 1305 | ||
@@ -1229,67 +1403,121 @@ fail_with_jump_label: | |||
1229 | } | 1403 | } |
1230 | EXPORT_SYMBOL_GPL(register_kprobe); | 1404 | EXPORT_SYMBOL_GPL(register_kprobe); |
1231 | 1405 | ||
1406 | /* Check if all probes on the aggrprobe are disabled */ | ||
1407 | static int __kprobes aggr_kprobe_disabled(struct kprobe *ap) | ||
1408 | { | ||
1409 | struct kprobe *kp; | ||
1410 | |||
1411 | list_for_each_entry_rcu(kp, &ap->list, list) | ||
1412 | if (!kprobe_disabled(kp)) | ||
1413 | /* | ||
1414 | * There is an active probe on the list. | ||
1415 | * We can't disable this ap. | ||
1416 | */ | ||
1417 | return 0; | ||
1418 | |||
1419 | return 1; | ||
1420 | } | ||
1421 | |||
1422 | /* Disable one kprobe: Make sure called under kprobe_mutex is locked */ | ||
1423 | static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p) | ||
1424 | { | ||
1425 | struct kprobe *orig_p; | ||
1426 | |||
1427 | /* Get an original kprobe for return */ | ||
1428 | orig_p = __get_valid_kprobe(p); | ||
1429 | if (unlikely(orig_p == NULL)) | ||
1430 | return NULL; | ||
1431 | |||
1432 | if (!kprobe_disabled(p)) { | ||
1433 | /* Disable probe if it is a child probe */ | ||
1434 | if (p != orig_p) | ||
1435 | p->flags |= KPROBE_FLAG_DISABLED; | ||
1436 | |||
1437 | /* Try to disarm and disable this/parent probe */ | ||
1438 | if (p == orig_p || aggr_kprobe_disabled(orig_p)) { | ||
1439 | disarm_kprobe(orig_p); | ||
1440 | orig_p->flags |= KPROBE_FLAG_DISABLED; | ||
1441 | } | ||
1442 | } | ||
1443 | |||
1444 | return orig_p; | ||
1445 | } | ||
1446 | |||
1232 | /* | 1447 | /* |
1233 | * Unregister a kprobe without a scheduler synchronization. | 1448 | * Unregister a kprobe without a scheduler synchronization. |
1234 | */ | 1449 | */ |
1235 | static int __kprobes __unregister_kprobe_top(struct kprobe *p) | 1450 | static int __kprobes __unregister_kprobe_top(struct kprobe *p) |
1236 | { | 1451 | { |
1237 | struct kprobe *old_p, *list_p; | 1452 | struct kprobe *ap, *list_p; |
1238 | 1453 | ||
1239 | old_p = __get_valid_kprobe(p); | 1454 | /* Disable kprobe. This will disarm it if needed. */ |
1240 | if (old_p == NULL) | 1455 | ap = __disable_kprobe(p); |
1456 | if (ap == NULL) | ||
1241 | return -EINVAL; | 1457 | return -EINVAL; |
1242 | 1458 | ||
1243 | if (old_p == p || | 1459 | if (ap == p) |
1244 | (kprobe_aggrprobe(old_p) && | ||
1245 | list_is_singular(&old_p->list))) { | ||
1246 | /* | 1460 | /* |
1247 | * Only probe on the hash list. Disarm only if kprobes are | 1461 | * This probe is an independent(and non-optimized) kprobe |
1248 | * enabled and not gone - otherwise, the breakpoint would | 1462 | * (not an aggrprobe). Remove from the hash list. |
1249 | * already have been removed. We save on flushing icache. | ||
1250 | */ | 1463 | */ |
1251 | if (!kprobes_all_disarmed && !kprobe_disabled(old_p)) | 1464 | goto disarmed; |
1252 | disarm_kprobe(old_p); | 1465 | |
1253 | hlist_del_rcu(&old_p->hlist); | 1466 | /* Following process expects this probe is an aggrprobe */ |
1254 | } else { | 1467 | WARN_ON(!kprobe_aggrprobe(ap)); |
1468 | |||
1469 | if (list_is_singular(&ap->list) && kprobe_disarmed(ap)) | ||
1470 | /* | ||
1471 | * !disarmed could be happen if the probe is under delayed | ||
1472 | * unoptimizing. | ||
1473 | */ | ||
1474 | goto disarmed; | ||
1475 | else { | ||
1476 | /* If disabling probe has special handlers, update aggrprobe */ | ||
1255 | if (p->break_handler && !kprobe_gone(p)) | 1477 | if (p->break_handler && !kprobe_gone(p)) |
1256 | old_p->break_handler = NULL; | 1478 | ap->break_handler = NULL; |
1257 | if (p->post_handler && !kprobe_gone(p)) { | 1479 | if (p->post_handler && !kprobe_gone(p)) { |
1258 | list_for_each_entry_rcu(list_p, &old_p->list, list) { | 1480 | list_for_each_entry_rcu(list_p, &ap->list, list) { |
1259 | if ((list_p != p) && (list_p->post_handler)) | 1481 | if ((list_p != p) && (list_p->post_handler)) |
1260 | goto noclean; | 1482 | goto noclean; |
1261 | } | 1483 | } |
1262 | old_p->post_handler = NULL; | 1484 | ap->post_handler = NULL; |
1263 | } | 1485 | } |
1264 | noclean: | 1486 | noclean: |
1487 | /* | ||
1488 | * Remove from the aggrprobe: this path will do nothing in | ||
1489 | * __unregister_kprobe_bottom(). | ||
1490 | */ | ||
1265 | list_del_rcu(&p->list); | 1491 | list_del_rcu(&p->list); |
1266 | if (!kprobe_disabled(old_p)) { | 1492 | if (!kprobe_disabled(ap) && !kprobes_all_disarmed) |
1267 | try_to_disable_aggr_kprobe(old_p); | 1493 | /* |
1268 | if (!kprobes_all_disarmed) { | 1494 | * Try to optimize this probe again, because post |
1269 | if (kprobe_disabled(old_p)) | 1495 | * handler may have been changed. |
1270 | disarm_kprobe(old_p); | 1496 | */ |
1271 | else | 1497 | optimize_kprobe(ap); |
1272 | /* Try to optimize this probe again */ | ||
1273 | optimize_kprobe(old_p); | ||
1274 | } | ||
1275 | } | ||
1276 | } | 1498 | } |
1277 | return 0; | 1499 | return 0; |
1500 | |||
1501 | disarmed: | ||
1502 | BUG_ON(!kprobe_disarmed(ap)); | ||
1503 | hlist_del_rcu(&ap->hlist); | ||
1504 | return 0; | ||
1278 | } | 1505 | } |
1279 | 1506 | ||
1280 | static void __kprobes __unregister_kprobe_bottom(struct kprobe *p) | 1507 | static void __kprobes __unregister_kprobe_bottom(struct kprobe *p) |
1281 | { | 1508 | { |
1282 | struct kprobe *old_p; | 1509 | struct kprobe *ap; |
1283 | 1510 | ||
1284 | if (list_empty(&p->list)) | 1511 | if (list_empty(&p->list)) |
1512 | /* This is an independent kprobe */ | ||
1285 | arch_remove_kprobe(p); | 1513 | arch_remove_kprobe(p); |
1286 | else if (list_is_singular(&p->list)) { | 1514 | else if (list_is_singular(&p->list)) { |
1287 | /* "p" is the last child of an aggr_kprobe */ | 1515 | /* This is the last child of an aggrprobe */ |
1288 | old_p = list_entry(p->list.next, struct kprobe, list); | 1516 | ap = list_entry(p->list.next, struct kprobe, list); |
1289 | list_del(&p->list); | 1517 | list_del(&p->list); |
1290 | arch_remove_kprobe(old_p); | 1518 | free_aggr_kprobe(ap); |
1291 | free_aggr_kprobe(old_p); | ||
1292 | } | 1519 | } |
1520 | /* Otherwise, do nothing. */ | ||
1293 | } | 1521 | } |
1294 | 1522 | ||
1295 | int __kprobes register_kprobes(struct kprobe **kps, int num) | 1523 | int __kprobes register_kprobes(struct kprobe **kps, int num) |
@@ -1607,29 +1835,13 @@ static void __kprobes kill_kprobe(struct kprobe *p) | |||
1607 | int __kprobes disable_kprobe(struct kprobe *kp) | 1835 | int __kprobes disable_kprobe(struct kprobe *kp) |
1608 | { | 1836 | { |
1609 | int ret = 0; | 1837 | int ret = 0; |
1610 | struct kprobe *p; | ||
1611 | 1838 | ||
1612 | mutex_lock(&kprobe_mutex); | 1839 | mutex_lock(&kprobe_mutex); |
1613 | 1840 | ||
1614 | /* Check whether specified probe is valid. */ | 1841 | /* Disable this kprobe */ |
1615 | p = __get_valid_kprobe(kp); | 1842 | if (__disable_kprobe(kp) == NULL) |
1616 | if (unlikely(p == NULL)) { | ||
1617 | ret = -EINVAL; | 1843 | ret = -EINVAL; |
1618 | goto out; | ||
1619 | } | ||
1620 | 1844 | ||
1621 | /* If the probe is already disabled (or gone), just return */ | ||
1622 | if (kprobe_disabled(kp)) | ||
1623 | goto out; | ||
1624 | |||
1625 | kp->flags |= KPROBE_FLAG_DISABLED; | ||
1626 | if (p != kp) | ||
1627 | /* When kp != p, p is always enabled. */ | ||
1628 | try_to_disable_aggr_kprobe(p); | ||
1629 | |||
1630 | if (!kprobes_all_disarmed && kprobe_disabled(p)) | ||
1631 | disarm_kprobe(p); | ||
1632 | out: | ||
1633 | mutex_unlock(&kprobe_mutex); | 1845 | mutex_unlock(&kprobe_mutex); |
1634 | return ret; | 1846 | return ret; |
1635 | } | 1847 | } |
@@ -1927,36 +2139,27 @@ static void __kprobes disarm_all_kprobes(void) | |||
1927 | mutex_lock(&kprobe_mutex); | 2139 | mutex_lock(&kprobe_mutex); |
1928 | 2140 | ||
1929 | /* If kprobes are already disarmed, just return */ | 2141 | /* If kprobes are already disarmed, just return */ |
1930 | if (kprobes_all_disarmed) | 2142 | if (kprobes_all_disarmed) { |
1931 | goto already_disabled; | 2143 | mutex_unlock(&kprobe_mutex); |
2144 | return; | ||
2145 | } | ||
1932 | 2146 | ||
1933 | kprobes_all_disarmed = true; | 2147 | kprobes_all_disarmed = true; |
1934 | printk(KERN_INFO "Kprobes globally disabled\n"); | 2148 | printk(KERN_INFO "Kprobes globally disabled\n"); |
1935 | 2149 | ||
1936 | /* | ||
1937 | * Here we call get_online_cpus() for avoiding text_mutex deadlock, | ||
1938 | * because disarming may also unoptimize kprobes. | ||
1939 | */ | ||
1940 | get_online_cpus(); | ||
1941 | mutex_lock(&text_mutex); | 2150 | mutex_lock(&text_mutex); |
1942 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 2151 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
1943 | head = &kprobe_table[i]; | 2152 | head = &kprobe_table[i]; |
1944 | hlist_for_each_entry_rcu(p, node, head, hlist) { | 2153 | hlist_for_each_entry_rcu(p, node, head, hlist) { |
1945 | if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) | 2154 | if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) |
1946 | __disarm_kprobe(p); | 2155 | __disarm_kprobe(p, false); |
1947 | } | 2156 | } |
1948 | } | 2157 | } |
1949 | |||
1950 | mutex_unlock(&text_mutex); | 2158 | mutex_unlock(&text_mutex); |
1951 | put_online_cpus(); | ||
1952 | mutex_unlock(&kprobe_mutex); | 2159 | mutex_unlock(&kprobe_mutex); |
1953 | /* Allow all currently running kprobes to complete */ | ||
1954 | synchronize_sched(); | ||
1955 | return; | ||
1956 | 2160 | ||
1957 | already_disabled: | 2161 | /* Wait for disarming all kprobes by optimizer */ |
1958 | mutex_unlock(&kprobe_mutex); | 2162 | wait_for_kprobe_optimizer(); |
1959 | return; | ||
1960 | } | 2163 | } |
1961 | 2164 | ||
1962 | /* | 2165 | /* |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 2870feee81d..11847bf1e8c 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
14 | #include <linux/cpu.h> | 14 | #include <linux/cpu.h> |
15 | #include <linux/smp.h> | 15 | #include <linux/smp.h> |
16 | #include <linux/idr.h> | ||
16 | #include <linux/file.h> | 17 | #include <linux/file.h> |
17 | #include <linux/poll.h> | 18 | #include <linux/poll.h> |
18 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
@@ -21,7 +22,9 @@ | |||
21 | #include <linux/dcache.h> | 22 | #include <linux/dcache.h> |
22 | #include <linux/percpu.h> | 23 | #include <linux/percpu.h> |
23 | #include <linux/ptrace.h> | 24 | #include <linux/ptrace.h> |
25 | #include <linux/reboot.h> | ||
24 | #include <linux/vmstat.h> | 26 | #include <linux/vmstat.h> |
27 | #include <linux/device.h> | ||
25 | #include <linux/vmalloc.h> | 28 | #include <linux/vmalloc.h> |
26 | #include <linux/hardirq.h> | 29 | #include <linux/hardirq.h> |
27 | #include <linux/rculist.h> | 30 | #include <linux/rculist.h> |
@@ -133,6 +136,28 @@ static void unclone_ctx(struct perf_event_context *ctx) | |||
133 | } | 136 | } |
134 | } | 137 | } |
135 | 138 | ||
139 | static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) | ||
140 | { | ||
141 | /* | ||
142 | * only top level events have the pid namespace they were created in | ||
143 | */ | ||
144 | if (event->parent) | ||
145 | event = event->parent; | ||
146 | |||
147 | return task_tgid_nr_ns(p, event->ns); | ||
148 | } | ||
149 | |||
150 | static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) | ||
151 | { | ||
152 | /* | ||
153 | * only top level events have the pid namespace they were created in | ||
154 | */ | ||
155 | if (event->parent) | ||
156 | event = event->parent; | ||
157 | |||
158 | return task_pid_nr_ns(p, event->ns); | ||
159 | } | ||
160 | |||
136 | /* | 161 | /* |
137 | * If we inherit events we want to return the parent event id | 162 | * If we inherit events we want to return the parent event id |
138 | * to userspace. | 163 | * to userspace. |
@@ -312,9 +337,84 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx) | |||
312 | ctx->nr_stat++; | 337 | ctx->nr_stat++; |
313 | } | 338 | } |
314 | 339 | ||
340 | /* | ||
341 | * Called at perf_event creation and when events are attached/detached from a | ||
342 | * group. | ||
343 | */ | ||
344 | static void perf_event__read_size(struct perf_event *event) | ||
345 | { | ||
346 | int entry = sizeof(u64); /* value */ | ||
347 | int size = 0; | ||
348 | int nr = 1; | ||
349 | |||
350 | if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | ||
351 | size += sizeof(u64); | ||
352 | |||
353 | if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | ||
354 | size += sizeof(u64); | ||
355 | |||
356 | if (event->attr.read_format & PERF_FORMAT_ID) | ||
357 | entry += sizeof(u64); | ||
358 | |||
359 | if (event->attr.read_format & PERF_FORMAT_GROUP) { | ||
360 | nr += event->group_leader->nr_siblings; | ||
361 | size += sizeof(u64); | ||
362 | } | ||
363 | |||
364 | size += entry * nr; | ||
365 | event->read_size = size; | ||
366 | } | ||
367 | |||
368 | static void perf_event__header_size(struct perf_event *event) | ||
369 | { | ||
370 | struct perf_sample_data *data; | ||
371 | u64 sample_type = event->attr.sample_type; | ||
372 | u16 size = 0; | ||
373 | |||
374 | perf_event__read_size(event); | ||
375 | |||
376 | if (sample_type & PERF_SAMPLE_IP) | ||
377 | size += sizeof(data->ip); | ||
378 | |||
379 | if (sample_type & PERF_SAMPLE_ADDR) | ||
380 | size += sizeof(data->addr); | ||
381 | |||
382 | if (sample_type & PERF_SAMPLE_PERIOD) | ||
383 | size += sizeof(data->period); | ||
384 | |||
385 | if (sample_type & PERF_SAMPLE_READ) | ||
386 | size += event->read_size; | ||
387 | |||
388 | event->header_size = size; | ||
389 | } | ||
390 | |||
391 | static void perf_event__id_header_size(struct perf_event *event) | ||
392 | { | ||
393 | struct perf_sample_data *data; | ||
394 | u64 sample_type = event->attr.sample_type; | ||
395 | u16 size = 0; | ||
396 | |||
397 | if (sample_type & PERF_SAMPLE_TID) | ||
398 | size += sizeof(data->tid_entry); | ||
399 | |||
400 | if (sample_type & PERF_SAMPLE_TIME) | ||
401 | size += sizeof(data->time); | ||
402 | |||
403 | if (sample_type & PERF_SAMPLE_ID) | ||
404 | size += sizeof(data->id); | ||
405 | |||
406 | if (sample_type & PERF_SAMPLE_STREAM_ID) | ||
407 | size += sizeof(data->stream_id); | ||
408 | |||
409 | if (sample_type & PERF_SAMPLE_CPU) | ||
410 | size += sizeof(data->cpu_entry); | ||
411 | |||
412 | event->id_header_size = size; | ||
413 | } | ||
414 | |||
315 | static void perf_group_attach(struct perf_event *event) | 415 | static void perf_group_attach(struct perf_event *event) |
316 | { | 416 | { |
317 | struct perf_event *group_leader = event->group_leader; | 417 | struct perf_event *group_leader = event->group_leader, *pos; |
318 | 418 | ||
319 | /* | 419 | /* |
320 | * We can have double attach due to group movement in perf_event_open. | 420 | * We can have double attach due to group movement in perf_event_open. |
@@ -333,6 +433,11 @@ static void perf_group_attach(struct perf_event *event) | |||
333 | 433 | ||
334 | list_add_tail(&event->group_entry, &group_leader->sibling_list); | 434 | list_add_tail(&event->group_entry, &group_leader->sibling_list); |
335 | group_leader->nr_siblings++; | 435 | group_leader->nr_siblings++; |
436 | |||
437 | perf_event__header_size(group_leader); | ||
438 | |||
439 | list_for_each_entry(pos, &group_leader->sibling_list, group_entry) | ||
440 | perf_event__header_size(pos); | ||
336 | } | 441 | } |
337 | 442 | ||
338 | /* | 443 | /* |
@@ -391,7 +496,7 @@ static void perf_group_detach(struct perf_event *event) | |||
391 | if (event->group_leader != event) { | 496 | if (event->group_leader != event) { |
392 | list_del_init(&event->group_entry); | 497 | list_del_init(&event->group_entry); |
393 | event->group_leader->nr_siblings--; | 498 | event->group_leader->nr_siblings--; |
394 | return; | 499 | goto out; |
395 | } | 500 | } |
396 | 501 | ||
397 | if (!list_empty(&event->group_entry)) | 502 | if (!list_empty(&event->group_entry)) |
@@ -410,6 +515,12 @@ static void perf_group_detach(struct perf_event *event) | |||
410 | /* Inherit group flags from the previous leader */ | 515 | /* Inherit group flags from the previous leader */ |
411 | sibling->group_flags = event->group_flags; | 516 | sibling->group_flags = event->group_flags; |
412 | } | 517 | } |
518 | |||
519 | out: | ||
520 | perf_event__header_size(event->group_leader); | ||
521 | |||
522 | list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry) | ||
523 | perf_event__header_size(tmp); | ||
413 | } | 524 | } |
414 | 525 | ||
415 | static inline int | 526 | static inline int |
@@ -1073,7 +1184,7 @@ static int perf_event_refresh(struct perf_event *event, int refresh) | |||
1073 | /* | 1184 | /* |
1074 | * not supported on inherited events | 1185 | * not supported on inherited events |
1075 | */ | 1186 | */ |
1076 | if (event->attr.inherit) | 1187 | if (event->attr.inherit || !is_sampling_event(event)) |
1077 | return -EINVAL; | 1188 | return -EINVAL; |
1078 | 1189 | ||
1079 | atomic_add(refresh, &event->event_limit); | 1190 | atomic_add(refresh, &event->event_limit); |
@@ -2289,31 +2400,6 @@ static int perf_release(struct inode *inode, struct file *file) | |||
2289 | return perf_event_release_kernel(event); | 2400 | return perf_event_release_kernel(event); |
2290 | } | 2401 | } |
2291 | 2402 | ||
2292 | static int perf_event_read_size(struct perf_event *event) | ||
2293 | { | ||
2294 | int entry = sizeof(u64); /* value */ | ||
2295 | int size = 0; | ||
2296 | int nr = 1; | ||
2297 | |||
2298 | if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | ||
2299 | size += sizeof(u64); | ||
2300 | |||
2301 | if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | ||
2302 | size += sizeof(u64); | ||
2303 | |||
2304 | if (event->attr.read_format & PERF_FORMAT_ID) | ||
2305 | entry += sizeof(u64); | ||
2306 | |||
2307 | if (event->attr.read_format & PERF_FORMAT_GROUP) { | ||
2308 | nr += event->group_leader->nr_siblings; | ||
2309 | size += sizeof(u64); | ||
2310 | } | ||
2311 | |||
2312 | size += entry * nr; | ||
2313 | |||
2314 | return size; | ||
2315 | } | ||
2316 | |||
2317 | u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) | 2403 | u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) |
2318 | { | 2404 | { |
2319 | struct perf_event *child; | 2405 | struct perf_event *child; |
@@ -2428,7 +2514,7 @@ perf_read_hw(struct perf_event *event, char __user *buf, size_t count) | |||
2428 | if (event->state == PERF_EVENT_STATE_ERROR) | 2514 | if (event->state == PERF_EVENT_STATE_ERROR) |
2429 | return 0; | 2515 | return 0; |
2430 | 2516 | ||
2431 | if (count < perf_event_read_size(event)) | 2517 | if (count < event->read_size) |
2432 | return -ENOSPC; | 2518 | return -ENOSPC; |
2433 | 2519 | ||
2434 | WARN_ON_ONCE(event->ctx->parent_ctx); | 2520 | WARN_ON_ONCE(event->ctx->parent_ctx); |
@@ -2514,7 +2600,7 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg) | |||
2514 | int ret = 0; | 2600 | int ret = 0; |
2515 | u64 value; | 2601 | u64 value; |
2516 | 2602 | ||
2517 | if (!event->attr.sample_period) | 2603 | if (!is_sampling_event(event)) |
2518 | return -EINVAL; | 2604 | return -EINVAL; |
2519 | 2605 | ||
2520 | if (copy_from_user(&value, arg, sizeof(value))) | 2606 | if (copy_from_user(&value, arg, sizeof(value))) |
@@ -3305,6 +3391,73 @@ __always_inline void perf_output_copy(struct perf_output_handle *handle, | |||
3305 | } while (len); | 3391 | } while (len); |
3306 | } | 3392 | } |
3307 | 3393 | ||
3394 | static void __perf_event_header__init_id(struct perf_event_header *header, | ||
3395 | struct perf_sample_data *data, | ||
3396 | struct perf_event *event) | ||
3397 | { | ||
3398 | u64 sample_type = event->attr.sample_type; | ||
3399 | |||
3400 | data->type = sample_type; | ||
3401 | header->size += event->id_header_size; | ||
3402 | |||
3403 | if (sample_type & PERF_SAMPLE_TID) { | ||
3404 | /* namespace issues */ | ||
3405 | data->tid_entry.pid = perf_event_pid(event, current); | ||
3406 | data->tid_entry.tid = perf_event_tid(event, current); | ||
3407 | } | ||
3408 | |||
3409 | if (sample_type & PERF_SAMPLE_TIME) | ||
3410 | data->time = perf_clock(); | ||
3411 | |||
3412 | if (sample_type & PERF_SAMPLE_ID) | ||
3413 | data->id = primary_event_id(event); | ||
3414 | |||
3415 | if (sample_type & PERF_SAMPLE_STREAM_ID) | ||
3416 | data->stream_id = event->id; | ||
3417 | |||
3418 | if (sample_type & PERF_SAMPLE_CPU) { | ||
3419 | data->cpu_entry.cpu = raw_smp_processor_id(); | ||
3420 | data->cpu_entry.reserved = 0; | ||
3421 | } | ||
3422 | } | ||
3423 | |||
3424 | static void perf_event_header__init_id(struct perf_event_header *header, | ||
3425 | struct perf_sample_data *data, | ||
3426 | struct perf_event *event) | ||
3427 | { | ||
3428 | if (event->attr.sample_id_all) | ||
3429 | __perf_event_header__init_id(header, data, event); | ||
3430 | } | ||
3431 | |||
3432 | static void __perf_event__output_id_sample(struct perf_output_handle *handle, | ||
3433 | struct perf_sample_data *data) | ||
3434 | { | ||
3435 | u64 sample_type = data->type; | ||
3436 | |||
3437 | if (sample_type & PERF_SAMPLE_TID) | ||
3438 | perf_output_put(handle, data->tid_entry); | ||
3439 | |||
3440 | if (sample_type & PERF_SAMPLE_TIME) | ||
3441 | perf_output_put(handle, data->time); | ||
3442 | |||
3443 | if (sample_type & PERF_SAMPLE_ID) | ||
3444 | perf_output_put(handle, data->id); | ||
3445 | |||
3446 | if (sample_type & PERF_SAMPLE_STREAM_ID) | ||
3447 | perf_output_put(handle, data->stream_id); | ||
3448 | |||
3449 | if (sample_type & PERF_SAMPLE_CPU) | ||
3450 | perf_output_put(handle, data->cpu_entry); | ||
3451 | } | ||
3452 | |||
3453 | static void perf_event__output_id_sample(struct perf_event *event, | ||
3454 | struct perf_output_handle *handle, | ||
3455 | struct perf_sample_data *sample) | ||
3456 | { | ||
3457 | if (event->attr.sample_id_all) | ||
3458 | __perf_event__output_id_sample(handle, sample); | ||
3459 | } | ||
3460 | |||
3308 | int perf_output_begin(struct perf_output_handle *handle, | 3461 | int perf_output_begin(struct perf_output_handle *handle, |
3309 | struct perf_event *event, unsigned int size, | 3462 | struct perf_event *event, unsigned int size, |
3310 | int nmi, int sample) | 3463 | int nmi, int sample) |
@@ -3312,6 +3465,7 @@ int perf_output_begin(struct perf_output_handle *handle, | |||
3312 | struct perf_buffer *buffer; | 3465 | struct perf_buffer *buffer; |
3313 | unsigned long tail, offset, head; | 3466 | unsigned long tail, offset, head; |
3314 | int have_lost; | 3467 | int have_lost; |
3468 | struct perf_sample_data sample_data; | ||
3315 | struct { | 3469 | struct { |
3316 | struct perf_event_header header; | 3470 | struct perf_event_header header; |
3317 | u64 id; | 3471 | u64 id; |
@@ -3338,8 +3492,12 @@ int perf_output_begin(struct perf_output_handle *handle, | |||
3338 | goto out; | 3492 | goto out; |
3339 | 3493 | ||
3340 | have_lost = local_read(&buffer->lost); | 3494 | have_lost = local_read(&buffer->lost); |
3341 | if (have_lost) | 3495 | if (have_lost) { |
3342 | size += sizeof(lost_event); | 3496 | lost_event.header.size = sizeof(lost_event); |
3497 | perf_event_header__init_id(&lost_event.header, &sample_data, | ||
3498 | event); | ||
3499 | size += lost_event.header.size; | ||
3500 | } | ||
3343 | 3501 | ||
3344 | perf_output_get_handle(handle); | 3502 | perf_output_get_handle(handle); |
3345 | 3503 | ||
@@ -3370,11 +3528,11 @@ int perf_output_begin(struct perf_output_handle *handle, | |||
3370 | if (have_lost) { | 3528 | if (have_lost) { |
3371 | lost_event.header.type = PERF_RECORD_LOST; | 3529 | lost_event.header.type = PERF_RECORD_LOST; |
3372 | lost_event.header.misc = 0; | 3530 | lost_event.header.misc = 0; |
3373 | lost_event.header.size = sizeof(lost_event); | ||
3374 | lost_event.id = event->id; | 3531 | lost_event.id = event->id; |
3375 | lost_event.lost = local_xchg(&buffer->lost, 0); | 3532 | lost_event.lost = local_xchg(&buffer->lost, 0); |
3376 | 3533 | ||
3377 | perf_output_put(handle, lost_event); | 3534 | perf_output_put(handle, lost_event); |
3535 | perf_event__output_id_sample(event, handle, &sample_data); | ||
3378 | } | 3536 | } |
3379 | 3537 | ||
3380 | return 0; | 3538 | return 0; |
@@ -3407,28 +3565,6 @@ void perf_output_end(struct perf_output_handle *handle) | |||
3407 | rcu_read_unlock(); | 3565 | rcu_read_unlock(); |
3408 | } | 3566 | } |
3409 | 3567 | ||
3410 | static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) | ||
3411 | { | ||
3412 | /* | ||
3413 | * only top level events have the pid namespace they were created in | ||
3414 | */ | ||
3415 | if (event->parent) | ||
3416 | event = event->parent; | ||
3417 | |||
3418 | return task_tgid_nr_ns(p, event->ns); | ||
3419 | } | ||
3420 | |||
3421 | static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) | ||
3422 | { | ||
3423 | /* | ||
3424 | * only top level events have the pid namespace they were created in | ||
3425 | */ | ||
3426 | if (event->parent) | ||
3427 | event = event->parent; | ||
3428 | |||
3429 | return task_pid_nr_ns(p, event->ns); | ||
3430 | } | ||
3431 | |||
3432 | static void perf_output_read_one(struct perf_output_handle *handle, | 3568 | static void perf_output_read_one(struct perf_output_handle *handle, |
3433 | struct perf_event *event, | 3569 | struct perf_event *event, |
3434 | u64 enabled, u64 running) | 3570 | u64 enabled, u64 running) |
@@ -3603,61 +3739,16 @@ void perf_prepare_sample(struct perf_event_header *header, | |||
3603 | { | 3739 | { |
3604 | u64 sample_type = event->attr.sample_type; | 3740 | u64 sample_type = event->attr.sample_type; |
3605 | 3741 | ||
3606 | data->type = sample_type; | ||
3607 | |||
3608 | header->type = PERF_RECORD_SAMPLE; | 3742 | header->type = PERF_RECORD_SAMPLE; |
3609 | header->size = sizeof(*header); | 3743 | header->size = sizeof(*header) + event->header_size; |
3610 | 3744 | ||
3611 | header->misc = 0; | 3745 | header->misc = 0; |
3612 | header->misc |= perf_misc_flags(regs); | 3746 | header->misc |= perf_misc_flags(regs); |
3613 | 3747 | ||
3614 | if (sample_type & PERF_SAMPLE_IP) { | 3748 | __perf_event_header__init_id(header, data, event); |
3615 | data->ip = perf_instruction_pointer(regs); | ||
3616 | |||
3617 | header->size += sizeof(data->ip); | ||
3618 | } | ||
3619 | |||
3620 | if (sample_type & PERF_SAMPLE_TID) { | ||
3621 | /* namespace issues */ | ||
3622 | data->tid_entry.pid = perf_event_pid(event, current); | ||
3623 | data->tid_entry.tid = perf_event_tid(event, current); | ||
3624 | |||
3625 | header->size += sizeof(data->tid_entry); | ||
3626 | } | ||
3627 | |||
3628 | if (sample_type & PERF_SAMPLE_TIME) { | ||
3629 | data->time = perf_clock(); | ||
3630 | |||
3631 | header->size += sizeof(data->time); | ||
3632 | } | ||
3633 | |||
3634 | if (sample_type & PERF_SAMPLE_ADDR) | ||
3635 | header->size += sizeof(data->addr); | ||
3636 | |||
3637 | if (sample_type & PERF_SAMPLE_ID) { | ||
3638 | data->id = primary_event_id(event); | ||
3639 | |||
3640 | header->size += sizeof(data->id); | ||
3641 | } | ||
3642 | |||
3643 | if (sample_type & PERF_SAMPLE_STREAM_ID) { | ||
3644 | data->stream_id = event->id; | ||
3645 | |||
3646 | header->size += sizeof(data->stream_id); | ||
3647 | } | ||
3648 | |||
3649 | if (sample_type & PERF_SAMPLE_CPU) { | ||
3650 | data->cpu_entry.cpu = raw_smp_processor_id(); | ||
3651 | data->cpu_entry.reserved = 0; | ||
3652 | |||
3653 | header->size += sizeof(data->cpu_entry); | ||
3654 | } | ||
3655 | |||
3656 | if (sample_type & PERF_SAMPLE_PERIOD) | ||
3657 | header->size += sizeof(data->period); | ||
3658 | 3749 | ||
3659 | if (sample_type & PERF_SAMPLE_READ) | 3750 | if (sample_type & PERF_SAMPLE_IP) |
3660 | header->size += perf_event_read_size(event); | 3751 | data->ip = perf_instruction_pointer(regs); |
3661 | 3752 | ||
3662 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { | 3753 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { |
3663 | int size = 1; | 3754 | int size = 1; |
@@ -3722,23 +3813,26 @@ perf_event_read_event(struct perf_event *event, | |||
3722 | struct task_struct *task) | 3813 | struct task_struct *task) |
3723 | { | 3814 | { |
3724 | struct perf_output_handle handle; | 3815 | struct perf_output_handle handle; |
3816 | struct perf_sample_data sample; | ||
3725 | struct perf_read_event read_event = { | 3817 | struct perf_read_event read_event = { |
3726 | .header = { | 3818 | .header = { |
3727 | .type = PERF_RECORD_READ, | 3819 | .type = PERF_RECORD_READ, |
3728 | .misc = 0, | 3820 | .misc = 0, |
3729 | .size = sizeof(read_event) + perf_event_read_size(event), | 3821 | .size = sizeof(read_event) + event->read_size, |
3730 | }, | 3822 | }, |
3731 | .pid = perf_event_pid(event, task), | 3823 | .pid = perf_event_pid(event, task), |
3732 | .tid = perf_event_tid(event, task), | 3824 | .tid = perf_event_tid(event, task), |
3733 | }; | 3825 | }; |
3734 | int ret; | 3826 | int ret; |
3735 | 3827 | ||
3828 | perf_event_header__init_id(&read_event.header, &sample, event); | ||
3736 | ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0); | 3829 | ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0); |
3737 | if (ret) | 3830 | if (ret) |
3738 | return; | 3831 | return; |
3739 | 3832 | ||
3740 | perf_output_put(&handle, read_event); | 3833 | perf_output_put(&handle, read_event); |
3741 | perf_output_read(&handle, event); | 3834 | perf_output_read(&handle, event); |
3835 | perf_event__output_id_sample(event, &handle, &sample); | ||
3742 | 3836 | ||
3743 | perf_output_end(&handle); | 3837 | perf_output_end(&handle); |
3744 | } | 3838 | } |
@@ -3768,14 +3862,16 @@ static void perf_event_task_output(struct perf_event *event, | |||
3768 | struct perf_task_event *task_event) | 3862 | struct perf_task_event *task_event) |
3769 | { | 3863 | { |
3770 | struct perf_output_handle handle; | 3864 | struct perf_output_handle handle; |
3865 | struct perf_sample_data sample; | ||
3771 | struct task_struct *task = task_event->task; | 3866 | struct task_struct *task = task_event->task; |
3772 | int size, ret; | 3867 | int ret, size = task_event->event_id.header.size; |
3773 | 3868 | ||
3774 | size = task_event->event_id.header.size; | 3869 | perf_event_header__init_id(&task_event->event_id.header, &sample, event); |
3775 | ret = perf_output_begin(&handle, event, size, 0, 0); | ||
3776 | 3870 | ||
3871 | ret = perf_output_begin(&handle, event, | ||
3872 | task_event->event_id.header.size, 0, 0); | ||
3777 | if (ret) | 3873 | if (ret) |
3778 | return; | 3874 | goto out; |
3779 | 3875 | ||
3780 | task_event->event_id.pid = perf_event_pid(event, task); | 3876 | task_event->event_id.pid = perf_event_pid(event, task); |
3781 | task_event->event_id.ppid = perf_event_pid(event, current); | 3877 | task_event->event_id.ppid = perf_event_pid(event, current); |
@@ -3785,7 +3881,11 @@ static void perf_event_task_output(struct perf_event *event, | |||
3785 | 3881 | ||
3786 | perf_output_put(&handle, task_event->event_id); | 3882 | perf_output_put(&handle, task_event->event_id); |
3787 | 3883 | ||
3884 | perf_event__output_id_sample(event, &handle, &sample); | ||
3885 | |||
3788 | perf_output_end(&handle); | 3886 | perf_output_end(&handle); |
3887 | out: | ||
3888 | task_event->event_id.header.size = size; | ||
3789 | } | 3889 | } |
3790 | 3890 | ||
3791 | static int perf_event_task_match(struct perf_event *event) | 3891 | static int perf_event_task_match(struct perf_event *event) |
@@ -3900,11 +4000,16 @@ static void perf_event_comm_output(struct perf_event *event, | |||
3900 | struct perf_comm_event *comm_event) | 4000 | struct perf_comm_event *comm_event) |
3901 | { | 4001 | { |
3902 | struct perf_output_handle handle; | 4002 | struct perf_output_handle handle; |
4003 | struct perf_sample_data sample; | ||
3903 | int size = comm_event->event_id.header.size; | 4004 | int size = comm_event->event_id.header.size; |
3904 | int ret = perf_output_begin(&handle, event, size, 0, 0); | 4005 | int ret; |
4006 | |||
4007 | perf_event_header__init_id(&comm_event->event_id.header, &sample, event); | ||
4008 | ret = perf_output_begin(&handle, event, | ||
4009 | comm_event->event_id.header.size, 0, 0); | ||
3905 | 4010 | ||
3906 | if (ret) | 4011 | if (ret) |
3907 | return; | 4012 | goto out; |
3908 | 4013 | ||
3909 | comm_event->event_id.pid = perf_event_pid(event, comm_event->task); | 4014 | comm_event->event_id.pid = perf_event_pid(event, comm_event->task); |
3910 | comm_event->event_id.tid = perf_event_tid(event, comm_event->task); | 4015 | comm_event->event_id.tid = perf_event_tid(event, comm_event->task); |
@@ -3912,7 +4017,12 @@ static void perf_event_comm_output(struct perf_event *event, | |||
3912 | perf_output_put(&handle, comm_event->event_id); | 4017 | perf_output_put(&handle, comm_event->event_id); |
3913 | perf_output_copy(&handle, comm_event->comm, | 4018 | perf_output_copy(&handle, comm_event->comm, |
3914 | comm_event->comm_size); | 4019 | comm_event->comm_size); |
4020 | |||
4021 | perf_event__output_id_sample(event, &handle, &sample); | ||
4022 | |||
3915 | perf_output_end(&handle); | 4023 | perf_output_end(&handle); |
4024 | out: | ||
4025 | comm_event->event_id.header.size = size; | ||
3916 | } | 4026 | } |
3917 | 4027 | ||
3918 | static int perf_event_comm_match(struct perf_event *event) | 4028 | static int perf_event_comm_match(struct perf_event *event) |
@@ -3957,7 +4067,6 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) | |||
3957 | comm_event->comm_size = size; | 4067 | comm_event->comm_size = size; |
3958 | 4068 | ||
3959 | comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; | 4069 | comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; |
3960 | |||
3961 | rcu_read_lock(); | 4070 | rcu_read_lock(); |
3962 | list_for_each_entry_rcu(pmu, &pmus, entry) { | 4071 | list_for_each_entry_rcu(pmu, &pmus, entry) { |
3963 | cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); | 4072 | cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); |
@@ -4038,11 +4147,15 @@ static void perf_event_mmap_output(struct perf_event *event, | |||
4038 | struct perf_mmap_event *mmap_event) | 4147 | struct perf_mmap_event *mmap_event) |
4039 | { | 4148 | { |
4040 | struct perf_output_handle handle; | 4149 | struct perf_output_handle handle; |
4150 | struct perf_sample_data sample; | ||
4041 | int size = mmap_event->event_id.header.size; | 4151 | int size = mmap_event->event_id.header.size; |
4042 | int ret = perf_output_begin(&handle, event, size, 0, 0); | 4152 | int ret; |
4043 | 4153 | ||
4154 | perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); | ||
4155 | ret = perf_output_begin(&handle, event, | ||
4156 | mmap_event->event_id.header.size, 0, 0); | ||
4044 | if (ret) | 4157 | if (ret) |
4045 | return; | 4158 | goto out; |
4046 | 4159 | ||
4047 | mmap_event->event_id.pid = perf_event_pid(event, current); | 4160 | mmap_event->event_id.pid = perf_event_pid(event, current); |
4048 | mmap_event->event_id.tid = perf_event_tid(event, current); | 4161 | mmap_event->event_id.tid = perf_event_tid(event, current); |
@@ -4050,7 +4163,12 @@ static void perf_event_mmap_output(struct perf_event *event, | |||
4050 | perf_output_put(&handle, mmap_event->event_id); | 4163 | perf_output_put(&handle, mmap_event->event_id); |
4051 | perf_output_copy(&handle, mmap_event->file_name, | 4164 | perf_output_copy(&handle, mmap_event->file_name, |
4052 | mmap_event->file_size); | 4165 | mmap_event->file_size); |
4166 | |||
4167 | perf_event__output_id_sample(event, &handle, &sample); | ||
4168 | |||
4053 | perf_output_end(&handle); | 4169 | perf_output_end(&handle); |
4170 | out: | ||
4171 | mmap_event->event_id.header.size = size; | ||
4054 | } | 4172 | } |
4055 | 4173 | ||
4056 | static int perf_event_mmap_match(struct perf_event *event, | 4174 | static int perf_event_mmap_match(struct perf_event *event, |
@@ -4205,6 +4323,7 @@ void perf_event_mmap(struct vm_area_struct *vma) | |||
4205 | static void perf_log_throttle(struct perf_event *event, int enable) | 4323 | static void perf_log_throttle(struct perf_event *event, int enable) |
4206 | { | 4324 | { |
4207 | struct perf_output_handle handle; | 4325 | struct perf_output_handle handle; |
4326 | struct perf_sample_data sample; | ||
4208 | int ret; | 4327 | int ret; |
4209 | 4328 | ||
4210 | struct { | 4329 | struct { |
@@ -4226,11 +4345,15 @@ static void perf_log_throttle(struct perf_event *event, int enable) | |||
4226 | if (enable) | 4345 | if (enable) |
4227 | throttle_event.header.type = PERF_RECORD_UNTHROTTLE; | 4346 | throttle_event.header.type = PERF_RECORD_UNTHROTTLE; |
4228 | 4347 | ||
4229 | ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0); | 4348 | perf_event_header__init_id(&throttle_event.header, &sample, event); |
4349 | |||
4350 | ret = perf_output_begin(&handle, event, | ||
4351 | throttle_event.header.size, 1, 0); | ||
4230 | if (ret) | 4352 | if (ret) |
4231 | return; | 4353 | return; |
4232 | 4354 | ||
4233 | perf_output_put(&handle, throttle_event); | 4355 | perf_output_put(&handle, throttle_event); |
4356 | perf_event__output_id_sample(event, &handle, &sample); | ||
4234 | perf_output_end(&handle); | 4357 | perf_output_end(&handle); |
4235 | } | 4358 | } |
4236 | 4359 | ||
@@ -4246,6 +4369,13 @@ static int __perf_event_overflow(struct perf_event *event, int nmi, | |||
4246 | struct hw_perf_event *hwc = &event->hw; | 4369 | struct hw_perf_event *hwc = &event->hw; |
4247 | int ret = 0; | 4370 | int ret = 0; |
4248 | 4371 | ||
4372 | /* | ||
4373 | * Non-sampling counters might still use the PMI to fold short | ||
4374 | * hardware counters, ignore those. | ||
4375 | */ | ||
4376 | if (unlikely(!is_sampling_event(event))) | ||
4377 | return 0; | ||
4378 | |||
4249 | if (!throttle) { | 4379 | if (!throttle) { |
4250 | hwc->interrupts++; | 4380 | hwc->interrupts++; |
4251 | } else { | 4381 | } else { |
@@ -4391,7 +4521,7 @@ static void perf_swevent_event(struct perf_event *event, u64 nr, | |||
4391 | if (!regs) | 4521 | if (!regs) |
4392 | return; | 4522 | return; |
4393 | 4523 | ||
4394 | if (!hwc->sample_period) | 4524 | if (!is_sampling_event(event)) |
4395 | return; | 4525 | return; |
4396 | 4526 | ||
4397 | if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) | 4527 | if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) |
@@ -4554,7 +4684,7 @@ static int perf_swevent_add(struct perf_event *event, int flags) | |||
4554 | struct hw_perf_event *hwc = &event->hw; | 4684 | struct hw_perf_event *hwc = &event->hw; |
4555 | struct hlist_head *head; | 4685 | struct hlist_head *head; |
4556 | 4686 | ||
4557 | if (hwc->sample_period) { | 4687 | if (is_sampling_event(event)) { |
4558 | hwc->last_period = hwc->sample_period; | 4688 | hwc->last_period = hwc->sample_period; |
4559 | perf_swevent_set_period(event); | 4689 | perf_swevent_set_period(event); |
4560 | } | 4690 | } |
@@ -4811,15 +4941,6 @@ static int perf_tp_event_init(struct perf_event *event) | |||
4811 | if (event->attr.type != PERF_TYPE_TRACEPOINT) | 4941 | if (event->attr.type != PERF_TYPE_TRACEPOINT) |
4812 | return -ENOENT; | 4942 | return -ENOENT; |
4813 | 4943 | ||
4814 | /* | ||
4815 | * Raw tracepoint data is a severe data leak, only allow root to | ||
4816 | * have these. | ||
4817 | */ | ||
4818 | if ((event->attr.sample_type & PERF_SAMPLE_RAW) && | ||
4819 | perf_paranoid_tracepoint_raw() && | ||
4820 | !capable(CAP_SYS_ADMIN)) | ||
4821 | return -EPERM; | ||
4822 | |||
4823 | err = perf_trace_init(event); | 4944 | err = perf_trace_init(event); |
4824 | if (err) | 4945 | if (err) |
4825 | return err; | 4946 | return err; |
@@ -4842,7 +4963,7 @@ static struct pmu perf_tracepoint = { | |||
4842 | 4963 | ||
4843 | static inline void perf_tp_register(void) | 4964 | static inline void perf_tp_register(void) |
4844 | { | 4965 | { |
4845 | perf_pmu_register(&perf_tracepoint); | 4966 | perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT); |
4846 | } | 4967 | } |
4847 | 4968 | ||
4848 | static int perf_event_set_filter(struct perf_event *event, void __user *arg) | 4969 | static int perf_event_set_filter(struct perf_event *event, void __user *arg) |
@@ -4932,31 +5053,33 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) | |||
4932 | static void perf_swevent_start_hrtimer(struct perf_event *event) | 5053 | static void perf_swevent_start_hrtimer(struct perf_event *event) |
4933 | { | 5054 | { |
4934 | struct hw_perf_event *hwc = &event->hw; | 5055 | struct hw_perf_event *hwc = &event->hw; |
5056 | s64 period; | ||
5057 | |||
5058 | if (!is_sampling_event(event)) | ||
5059 | return; | ||
4935 | 5060 | ||
4936 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 5061 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
4937 | hwc->hrtimer.function = perf_swevent_hrtimer; | 5062 | hwc->hrtimer.function = perf_swevent_hrtimer; |
4938 | if (hwc->sample_period) { | ||
4939 | s64 period = local64_read(&hwc->period_left); | ||
4940 | 5063 | ||
4941 | if (period) { | 5064 | period = local64_read(&hwc->period_left); |
4942 | if (period < 0) | 5065 | if (period) { |
4943 | period = 10000; | 5066 | if (period < 0) |
5067 | period = 10000; | ||
4944 | 5068 | ||
4945 | local64_set(&hwc->period_left, 0); | 5069 | local64_set(&hwc->period_left, 0); |
4946 | } else { | 5070 | } else { |
4947 | period = max_t(u64, 10000, hwc->sample_period); | 5071 | period = max_t(u64, 10000, hwc->sample_period); |
4948 | } | 5072 | } |
4949 | __hrtimer_start_range_ns(&hwc->hrtimer, | 5073 | __hrtimer_start_range_ns(&hwc->hrtimer, |
4950 | ns_to_ktime(period), 0, | 5074 | ns_to_ktime(period), 0, |
4951 | HRTIMER_MODE_REL_PINNED, 0); | 5075 | HRTIMER_MODE_REL_PINNED, 0); |
4952 | } | ||
4953 | } | 5076 | } |
4954 | 5077 | ||
4955 | static void perf_swevent_cancel_hrtimer(struct perf_event *event) | 5078 | static void perf_swevent_cancel_hrtimer(struct perf_event *event) |
4956 | { | 5079 | { |
4957 | struct hw_perf_event *hwc = &event->hw; | 5080 | struct hw_perf_event *hwc = &event->hw; |
4958 | 5081 | ||
4959 | if (hwc->sample_period) { | 5082 | if (is_sampling_event(event)) { |
4960 | ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); | 5083 | ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); |
4961 | local64_set(&hwc->period_left, ktime_to_ns(remaining)); | 5084 | local64_set(&hwc->period_left, ktime_to_ns(remaining)); |
4962 | 5085 | ||
@@ -5184,8 +5307,61 @@ static void free_pmu_context(struct pmu *pmu) | |||
5184 | out: | 5307 | out: |
5185 | mutex_unlock(&pmus_lock); | 5308 | mutex_unlock(&pmus_lock); |
5186 | } | 5309 | } |
5310 | static struct idr pmu_idr; | ||
5311 | |||
5312 | static ssize_t | ||
5313 | type_show(struct device *dev, struct device_attribute *attr, char *page) | ||
5314 | { | ||
5315 | struct pmu *pmu = dev_get_drvdata(dev); | ||
5316 | |||
5317 | return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type); | ||
5318 | } | ||
5319 | |||
5320 | static struct device_attribute pmu_dev_attrs[] = { | ||
5321 | __ATTR_RO(type), | ||
5322 | __ATTR_NULL, | ||
5323 | }; | ||
5324 | |||
5325 | static int pmu_bus_running; | ||
5326 | static struct bus_type pmu_bus = { | ||
5327 | .name = "event_source", | ||
5328 | .dev_attrs = pmu_dev_attrs, | ||
5329 | }; | ||
5330 | |||
5331 | static void pmu_dev_release(struct device *dev) | ||
5332 | { | ||
5333 | kfree(dev); | ||
5334 | } | ||
5335 | |||
5336 | static int pmu_dev_alloc(struct pmu *pmu) | ||
5337 | { | ||
5338 | int ret = -ENOMEM; | ||
5339 | |||
5340 | pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL); | ||
5341 | if (!pmu->dev) | ||
5342 | goto out; | ||
5343 | |||
5344 | device_initialize(pmu->dev); | ||
5345 | ret = dev_set_name(pmu->dev, "%s", pmu->name); | ||
5346 | if (ret) | ||
5347 | goto free_dev; | ||
5348 | |||
5349 | dev_set_drvdata(pmu->dev, pmu); | ||
5350 | pmu->dev->bus = &pmu_bus; | ||
5351 | pmu->dev->release = pmu_dev_release; | ||
5352 | ret = device_add(pmu->dev); | ||
5353 | if (ret) | ||
5354 | goto free_dev; | ||
5355 | |||
5356 | out: | ||
5357 | return ret; | ||
5358 | |||
5359 | free_dev: | ||
5360 | put_device(pmu->dev); | ||
5361 | goto out; | ||
5362 | } | ||
5187 | 5363 | ||
5188 | int perf_pmu_register(struct pmu *pmu) | 5364 | int perf_pmu_register(struct pmu *pmu, char *name, int type) |
5189 | { | 5365 | { |
5190 | int cpu, ret; | 5366 | int cpu, ret; |
5191 | 5367 | ||
@@ -5195,13 +5371,38 @@ int perf_pmu_register(struct pmu *pmu) | |||
5195 | if (!pmu->pmu_disable_count) | 5371 | if (!pmu->pmu_disable_count) |
5196 | goto unlock; | 5372 | goto unlock; |
5197 | 5373 | ||
5374 | pmu->type = -1; | ||
5375 | if (!name) | ||
5376 | goto skip_type; | ||
5377 | pmu->name = name; | ||
5378 | |||
5379 | if (type < 0) { | ||
5380 | int err = idr_pre_get(&pmu_idr, GFP_KERNEL); | ||
5381 | if (!err) | ||
5382 | goto free_pdc; | ||
5383 | |||
5384 | err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type); | ||
5385 | if (err) { | ||
5386 | ret = err; | ||
5387 | goto free_pdc; | ||
5388 | } | ||
5389 | } | ||
5390 | pmu->type = type; | ||
5391 | |||
5392 | if (pmu_bus_running) { | ||
5393 | ret = pmu_dev_alloc(pmu); | ||
5394 | if (ret) | ||
5395 | goto free_idr; | ||
5396 | } | ||
5397 | |||
5398 | skip_type: | ||
5198 | pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr); | 5399 | pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr); |
5199 | if (pmu->pmu_cpu_context) | 5400 | if (pmu->pmu_cpu_context) |
5200 | goto got_cpu_context; | 5401 | goto got_cpu_context; |
5201 | 5402 | ||
5202 | pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); | 5403 | pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); |
5203 | if (!pmu->pmu_cpu_context) | 5404 | if (!pmu->pmu_cpu_context) |
5204 | goto free_pdc; | 5405 | goto free_dev; |
5205 | 5406 | ||
5206 | for_each_possible_cpu(cpu) { | 5407 | for_each_possible_cpu(cpu) { |
5207 | struct perf_cpu_context *cpuctx; | 5408 | struct perf_cpu_context *cpuctx; |
@@ -5245,6 +5446,14 @@ unlock: | |||
5245 | 5446 | ||
5246 | return ret; | 5447 | return ret; |
5247 | 5448 | ||
5449 | free_dev: | ||
5450 | device_del(pmu->dev); | ||
5451 | put_device(pmu->dev); | ||
5452 | |||
5453 | free_idr: | ||
5454 | if (pmu->type >= PERF_TYPE_MAX) | ||
5455 | idr_remove(&pmu_idr, pmu->type); | ||
5456 | |||
5248 | free_pdc: | 5457 | free_pdc: |
5249 | free_percpu(pmu->pmu_disable_count); | 5458 | free_percpu(pmu->pmu_disable_count); |
5250 | goto unlock; | 5459 | goto unlock; |
@@ -5264,6 +5473,10 @@ void perf_pmu_unregister(struct pmu *pmu) | |||
5264 | synchronize_rcu(); | 5473 | synchronize_rcu(); |
5265 | 5474 | ||
5266 | free_percpu(pmu->pmu_disable_count); | 5475 | free_percpu(pmu->pmu_disable_count); |
5476 | if (pmu->type >= PERF_TYPE_MAX) | ||
5477 | idr_remove(&pmu_idr, pmu->type); | ||
5478 | device_del(pmu->dev); | ||
5479 | put_device(pmu->dev); | ||
5267 | free_pmu_context(pmu); | 5480 | free_pmu_context(pmu); |
5268 | } | 5481 | } |
5269 | 5482 | ||
@@ -5273,6 +5486,13 @@ struct pmu *perf_init_event(struct perf_event *event) | |||
5273 | int idx; | 5486 | int idx; |
5274 | 5487 | ||
5275 | idx = srcu_read_lock(&pmus_srcu); | 5488 | idx = srcu_read_lock(&pmus_srcu); |
5489 | |||
5490 | rcu_read_lock(); | ||
5491 | pmu = idr_find(&pmu_idr, event->attr.type); | ||
5492 | rcu_read_unlock(); | ||
5493 | if (pmu) | ||
5494 | goto unlock; | ||
5495 | |||
5276 | list_for_each_entry_rcu(pmu, &pmus, entry) { | 5496 | list_for_each_entry_rcu(pmu, &pmus, entry) { |
5277 | int ret = pmu->event_init(event); | 5497 | int ret = pmu->event_init(event); |
5278 | if (!ret) | 5498 | if (!ret) |
@@ -5738,6 +5958,12 @@ SYSCALL_DEFINE5(perf_event_open, | |||
5738 | mutex_unlock(¤t->perf_event_mutex); | 5958 | mutex_unlock(¤t->perf_event_mutex); |
5739 | 5959 | ||
5740 | /* | 5960 | /* |
5961 | * Precalculate sample_data sizes | ||
5962 | */ | ||
5963 | perf_event__header_size(event); | ||
5964 | perf_event__id_header_size(event); | ||
5965 | |||
5966 | /* | ||
5741 | * Drop the reference on the group_event after placing the | 5967 | * Drop the reference on the group_event after placing the |
5742 | * new event on the sibling_list. This ensures destruction | 5968 | * new event on the sibling_list. This ensures destruction |
5743 | * of the group leader will find the pointer to itself in | 5969 | * of the group leader will find the pointer to itself in |
@@ -6090,6 +6316,12 @@ inherit_event(struct perf_event *parent_event, | |||
6090 | child_event->overflow_handler = parent_event->overflow_handler; | 6316 | child_event->overflow_handler = parent_event->overflow_handler; |
6091 | 6317 | ||
6092 | /* | 6318 | /* |
6319 | * Precalculate sample_data sizes | ||
6320 | */ | ||
6321 | perf_event__header_size(child_event); | ||
6322 | perf_event__id_header_size(child_event); | ||
6323 | |||
6324 | /* | ||
6093 | * Link it up in the child's context: | 6325 | * Link it up in the child's context: |
6094 | */ | 6326 | */ |
6095 | raw_spin_lock_irqsave(&child_ctx->lock, flags); | 6327 | raw_spin_lock_irqsave(&child_ctx->lock, flags); |
@@ -6320,7 +6552,7 @@ static void __cpuinit perf_event_init_cpu(int cpu) | |||
6320 | mutex_unlock(&swhash->hlist_mutex); | 6552 | mutex_unlock(&swhash->hlist_mutex); |
6321 | } | 6553 | } |
6322 | 6554 | ||
6323 | #ifdef CONFIG_HOTPLUG_CPU | 6555 | #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC |
6324 | static void perf_pmu_rotate_stop(struct pmu *pmu) | 6556 | static void perf_pmu_rotate_stop(struct pmu *pmu) |
6325 | { | 6557 | { |
6326 | struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); | 6558 | struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); |
@@ -6374,6 +6606,26 @@ static void perf_event_exit_cpu(int cpu) | |||
6374 | static inline void perf_event_exit_cpu(int cpu) { } | 6606 | static inline void perf_event_exit_cpu(int cpu) { } |
6375 | #endif | 6607 | #endif |
6376 | 6608 | ||
6609 | static int | ||
6610 | perf_reboot(struct notifier_block *notifier, unsigned long val, void *v) | ||
6611 | { | ||
6612 | int cpu; | ||
6613 | |||
6614 | for_each_online_cpu(cpu) | ||
6615 | perf_event_exit_cpu(cpu); | ||
6616 | |||
6617 | return NOTIFY_OK; | ||
6618 | } | ||
6619 | |||
6620 | /* | ||
6621 | * Run the perf reboot notifier at the very last possible moment so that | ||
6622 | * the generic watchdog code runs as long as possible. | ||
6623 | */ | ||
6624 | static struct notifier_block perf_reboot_notifier = { | ||
6625 | .notifier_call = perf_reboot, | ||
6626 | .priority = INT_MIN, | ||
6627 | }; | ||
6628 | |||
6377 | static int __cpuinit | 6629 | static int __cpuinit |
6378 | perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) | 6630 | perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) |
6379 | { | 6631 | { |
@@ -6402,14 +6654,45 @@ void __init perf_event_init(void) | |||
6402 | { | 6654 | { |
6403 | int ret; | 6655 | int ret; |
6404 | 6656 | ||
6657 | idr_init(&pmu_idr); | ||
6658 | |||
6405 | perf_event_init_all_cpus(); | 6659 | perf_event_init_all_cpus(); |
6406 | init_srcu_struct(&pmus_srcu); | 6660 | init_srcu_struct(&pmus_srcu); |
6407 | perf_pmu_register(&perf_swevent); | 6661 | perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE); |
6408 | perf_pmu_register(&perf_cpu_clock); | 6662 | perf_pmu_register(&perf_cpu_clock, NULL, -1); |
6409 | perf_pmu_register(&perf_task_clock); | 6663 | perf_pmu_register(&perf_task_clock, NULL, -1); |
6410 | perf_tp_register(); | 6664 | perf_tp_register(); |
6411 | perf_cpu_notifier(perf_cpu_notify); | 6665 | perf_cpu_notifier(perf_cpu_notify); |
6666 | register_reboot_notifier(&perf_reboot_notifier); | ||
6412 | 6667 | ||
6413 | ret = init_hw_breakpoint(); | 6668 | ret = init_hw_breakpoint(); |
6414 | WARN(ret, "hw_breakpoint initialization failed with: %d", ret); | 6669 | WARN(ret, "hw_breakpoint initialization failed with: %d", ret); |
6415 | } | 6670 | } |
6671 | |||
6672 | static int __init perf_event_sysfs_init(void) | ||
6673 | { | ||
6674 | struct pmu *pmu; | ||
6675 | int ret; | ||
6676 | |||
6677 | mutex_lock(&pmus_lock); | ||
6678 | |||
6679 | ret = bus_register(&pmu_bus); | ||
6680 | if (ret) | ||
6681 | goto unlock; | ||
6682 | |||
6683 | list_for_each_entry(pmu, &pmus, entry) { | ||
6684 | if (!pmu->name || pmu->type < 0) | ||
6685 | continue; | ||
6686 | |||
6687 | ret = pmu_dev_alloc(pmu); | ||
6688 | WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret); | ||
6689 | } | ||
6690 | pmu_bus_running = 1; | ||
6691 | ret = 0; | ||
6692 | |||
6693 | unlock: | ||
6694 | mutex_unlock(&pmus_lock); | ||
6695 | |||
6696 | return ret; | ||
6697 | } | ||
6698 | device_initcall(perf_event_sysfs_init); | ||
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index ecf770509d0..031d5e3a619 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/mm.h> | 22 | #include <linux/mm.h> |
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include <linux/suspend.h> | 24 | #include <linux/suspend.h> |
25 | #include <trace/events/power.h> | ||
25 | 26 | ||
26 | #include "power.h" | 27 | #include "power.h" |
27 | 28 | ||
@@ -201,6 +202,7 @@ int suspend_devices_and_enter(suspend_state_t state) | |||
201 | if (!suspend_ops) | 202 | if (!suspend_ops) |
202 | return -ENOSYS; | 203 | return -ENOSYS; |
203 | 204 | ||
205 | trace_machine_suspend(state); | ||
204 | if (suspend_ops->begin) { | 206 | if (suspend_ops->begin) { |
205 | error = suspend_ops->begin(state); | 207 | error = suspend_ops->begin(state); |
206 | if (error) | 208 | if (error) |
@@ -229,6 +231,7 @@ int suspend_devices_and_enter(suspend_state_t state) | |||
229 | Close: | 231 | Close: |
230 | if (suspend_ops->end) | 232 | if (suspend_ops->end) |
231 | suspend_ops->end(); | 233 | suspend_ops->end(); |
234 | trace_machine_suspend(PWR_EVENT_EXIT); | ||
232 | return error; | 235 | return error; |
233 | 236 | ||
234 | Recover_platform: | 237 | Recover_platform: |
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index d806735342a..03449372474 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c | |||
@@ -36,31 +36,16 @@ | |||
36 | #include <linux/time.h> | 36 | #include <linux/time.h> |
37 | #include <linux/cpu.h> | 37 | #include <linux/cpu.h> |
38 | 38 | ||
39 | /* Global control variables for rcupdate callback mechanism. */ | 39 | /* Controls for rcu_kthread() kthread, replacing RCU_SOFTIRQ used previously. */ |
40 | struct rcu_ctrlblk { | 40 | static struct task_struct *rcu_kthread_task; |
41 | struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */ | 41 | static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq); |
42 | struct rcu_head **donetail; /* ->next pointer of last "done" CB. */ | 42 | static unsigned long have_rcu_kthread_work; |
43 | struct rcu_head **curtail; /* ->next pointer of last CB. */ | 43 | static void invoke_rcu_kthread(void); |
44 | }; | ||
45 | |||
46 | /* Definition for rcupdate control block. */ | ||
47 | static struct rcu_ctrlblk rcu_sched_ctrlblk = { | ||
48 | .donetail = &rcu_sched_ctrlblk.rcucblist, | ||
49 | .curtail = &rcu_sched_ctrlblk.rcucblist, | ||
50 | }; | ||
51 | |||
52 | static struct rcu_ctrlblk rcu_bh_ctrlblk = { | ||
53 | .donetail = &rcu_bh_ctrlblk.rcucblist, | ||
54 | .curtail = &rcu_bh_ctrlblk.rcucblist, | ||
55 | }; | ||
56 | |||
57 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
58 | int rcu_scheduler_active __read_mostly; | ||
59 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); | ||
60 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
61 | 44 | ||
62 | /* Forward declarations for rcutiny_plugin.h. */ | 45 | /* Forward declarations for rcutiny_plugin.h. */ |
63 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp); | 46 | struct rcu_ctrlblk; |
47 | static void rcu_process_callbacks(struct rcu_ctrlblk *rcp); | ||
48 | static int rcu_kthread(void *arg); | ||
64 | static void __call_rcu(struct rcu_head *head, | 49 | static void __call_rcu(struct rcu_head *head, |
65 | void (*func)(struct rcu_head *rcu), | 50 | void (*func)(struct rcu_head *rcu), |
66 | struct rcu_ctrlblk *rcp); | 51 | struct rcu_ctrlblk *rcp); |
@@ -123,7 +108,7 @@ void rcu_sched_qs(int cpu) | |||
123 | { | 108 | { |
124 | if (rcu_qsctr_help(&rcu_sched_ctrlblk) + | 109 | if (rcu_qsctr_help(&rcu_sched_ctrlblk) + |
125 | rcu_qsctr_help(&rcu_bh_ctrlblk)) | 110 | rcu_qsctr_help(&rcu_bh_ctrlblk)) |
126 | raise_softirq(RCU_SOFTIRQ); | 111 | invoke_rcu_kthread(); |
127 | } | 112 | } |
128 | 113 | ||
129 | /* | 114 | /* |
@@ -132,7 +117,7 @@ void rcu_sched_qs(int cpu) | |||
132 | void rcu_bh_qs(int cpu) | 117 | void rcu_bh_qs(int cpu) |
133 | { | 118 | { |
134 | if (rcu_qsctr_help(&rcu_bh_ctrlblk)) | 119 | if (rcu_qsctr_help(&rcu_bh_ctrlblk)) |
135 | raise_softirq(RCU_SOFTIRQ); | 120 | invoke_rcu_kthread(); |
136 | } | 121 | } |
137 | 122 | ||
138 | /* | 123 | /* |
@@ -152,13 +137,14 @@ void rcu_check_callbacks(int cpu, int user) | |||
152 | } | 137 | } |
153 | 138 | ||
154 | /* | 139 | /* |
155 | * Helper function for rcu_process_callbacks() that operates on the | 140 | * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure |
156 | * specified rcu_ctrlkblk structure. | 141 | * whose grace period has elapsed. |
157 | */ | 142 | */ |
158 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) | 143 | static void rcu_process_callbacks(struct rcu_ctrlblk *rcp) |
159 | { | 144 | { |
160 | struct rcu_head *next, *list; | 145 | struct rcu_head *next, *list; |
161 | unsigned long flags; | 146 | unsigned long flags; |
147 | RCU_TRACE(int cb_count = 0); | ||
162 | 148 | ||
163 | /* If no RCU callbacks ready to invoke, just return. */ | 149 | /* If no RCU callbacks ready to invoke, just return. */ |
164 | if (&rcp->rcucblist == rcp->donetail) | 150 | if (&rcp->rcucblist == rcp->donetail) |
@@ -180,19 +166,58 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) | |||
180 | next = list->next; | 166 | next = list->next; |
181 | prefetch(next); | 167 | prefetch(next); |
182 | debug_rcu_head_unqueue(list); | 168 | debug_rcu_head_unqueue(list); |
169 | local_bh_disable(); | ||
183 | list->func(list); | 170 | list->func(list); |
171 | local_bh_enable(); | ||
184 | list = next; | 172 | list = next; |
173 | RCU_TRACE(cb_count++); | ||
185 | } | 174 | } |
175 | RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count)); | ||
186 | } | 176 | } |
187 | 177 | ||
188 | /* | 178 | /* |
189 | * Invoke any callbacks whose grace period has completed. | 179 | * This kthread invokes RCU callbacks whose grace periods have |
180 | * elapsed. It is awakened as needed, and takes the place of the | ||
181 | * RCU_SOFTIRQ that was used previously for this purpose. | ||
182 | * This is a kthread, but it is never stopped, at least not until | ||
183 | * the system goes down. | ||
190 | */ | 184 | */ |
191 | static void rcu_process_callbacks(struct softirq_action *unused) | 185 | static int rcu_kthread(void *arg) |
192 | { | 186 | { |
193 | __rcu_process_callbacks(&rcu_sched_ctrlblk); | 187 | unsigned long work; |
194 | __rcu_process_callbacks(&rcu_bh_ctrlblk); | 188 | unsigned long morework; |
195 | rcu_preempt_process_callbacks(); | 189 | unsigned long flags; |
190 | |||
191 | for (;;) { | ||
192 | wait_event(rcu_kthread_wq, have_rcu_kthread_work != 0); | ||
193 | morework = rcu_boost(); | ||
194 | local_irq_save(flags); | ||
195 | work = have_rcu_kthread_work; | ||
196 | have_rcu_kthread_work = morework; | ||
197 | local_irq_restore(flags); | ||
198 | if (work) { | ||
199 | rcu_process_callbacks(&rcu_sched_ctrlblk); | ||
200 | rcu_process_callbacks(&rcu_bh_ctrlblk); | ||
201 | rcu_preempt_process_callbacks(); | ||
202 | } | ||
203 | schedule_timeout_interruptible(1); /* Leave CPU for others. */ | ||
204 | } | ||
205 | |||
206 | return 0; /* Not reached, but needed to shut gcc up. */ | ||
207 | } | ||
208 | |||
209 | /* | ||
210 | * Wake up rcu_kthread() to process callbacks now eligible for invocation | ||
211 | * or to boost readers. | ||
212 | */ | ||
213 | static void invoke_rcu_kthread(void) | ||
214 | { | ||
215 | unsigned long flags; | ||
216 | |||
217 | local_irq_save(flags); | ||
218 | have_rcu_kthread_work = 1; | ||
219 | wake_up(&rcu_kthread_wq); | ||
220 | local_irq_restore(flags); | ||
196 | } | 221 | } |
197 | 222 | ||
198 | /* | 223 | /* |
@@ -230,6 +255,7 @@ static void __call_rcu(struct rcu_head *head, | |||
230 | local_irq_save(flags); | 255 | local_irq_save(flags); |
231 | *rcp->curtail = head; | 256 | *rcp->curtail = head; |
232 | rcp->curtail = &head->next; | 257 | rcp->curtail = &head->next; |
258 | RCU_TRACE(rcp->qlen++); | ||
233 | local_irq_restore(flags); | 259 | local_irq_restore(flags); |
234 | } | 260 | } |
235 | 261 | ||
@@ -282,7 +308,16 @@ void rcu_barrier_sched(void) | |||
282 | } | 308 | } |
283 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); | 309 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); |
284 | 310 | ||
285 | void __init rcu_init(void) | 311 | /* |
312 | * Spawn the kthread that invokes RCU callbacks. | ||
313 | */ | ||
314 | static int __init rcu_spawn_kthreads(void) | ||
286 | { | 315 | { |
287 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | 316 | struct sched_param sp; |
317 | |||
318 | rcu_kthread_task = kthread_run(rcu_kthread, NULL, "rcu_kthread"); | ||
319 | sp.sched_priority = RCU_BOOST_PRIO; | ||
320 | sched_setscheduler_nocheck(rcu_kthread_task, SCHED_FIFO, &sp); | ||
321 | return 0; | ||
288 | } | 322 | } |
323 | early_initcall(rcu_spawn_kthreads); | ||
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h index 6ceca4f745f..015abaea962 100644 --- a/kernel/rcutiny_plugin.h +++ b/kernel/rcutiny_plugin.h | |||
@@ -22,6 +22,40 @@ | |||
22 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 22 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <linux/kthread.h> | ||
26 | #include <linux/debugfs.h> | ||
27 | #include <linux/seq_file.h> | ||
28 | |||
29 | #ifdef CONFIG_RCU_TRACE | ||
30 | #define RCU_TRACE(stmt) stmt | ||
31 | #else /* #ifdef CONFIG_RCU_TRACE */ | ||
32 | #define RCU_TRACE(stmt) | ||
33 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ | ||
34 | |||
35 | /* Global control variables for rcupdate callback mechanism. */ | ||
36 | struct rcu_ctrlblk { | ||
37 | struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */ | ||
38 | struct rcu_head **donetail; /* ->next pointer of last "done" CB. */ | ||
39 | struct rcu_head **curtail; /* ->next pointer of last CB. */ | ||
40 | RCU_TRACE(long qlen); /* Number of pending CBs. */ | ||
41 | }; | ||
42 | |||
43 | /* Definition for rcupdate control block. */ | ||
44 | static struct rcu_ctrlblk rcu_sched_ctrlblk = { | ||
45 | .donetail = &rcu_sched_ctrlblk.rcucblist, | ||
46 | .curtail = &rcu_sched_ctrlblk.rcucblist, | ||
47 | }; | ||
48 | |||
49 | static struct rcu_ctrlblk rcu_bh_ctrlblk = { | ||
50 | .donetail = &rcu_bh_ctrlblk.rcucblist, | ||
51 | .curtail = &rcu_bh_ctrlblk.rcucblist, | ||
52 | }; | ||
53 | |||
54 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
55 | int rcu_scheduler_active __read_mostly; | ||
56 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); | ||
57 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
58 | |||
25 | #ifdef CONFIG_TINY_PREEMPT_RCU | 59 | #ifdef CONFIG_TINY_PREEMPT_RCU |
26 | 60 | ||
27 | #include <linux/delay.h> | 61 | #include <linux/delay.h> |
@@ -46,17 +80,45 @@ struct rcu_preempt_ctrlblk { | |||
46 | struct list_head *gp_tasks; | 80 | struct list_head *gp_tasks; |
47 | /* Pointer to the first task blocking the */ | 81 | /* Pointer to the first task blocking the */ |
48 | /* current grace period, or NULL if there */ | 82 | /* current grace period, or NULL if there */ |
49 | /* is not such task. */ | 83 | /* is no such task. */ |
50 | struct list_head *exp_tasks; | 84 | struct list_head *exp_tasks; |
51 | /* Pointer to first task blocking the */ | 85 | /* Pointer to first task blocking the */ |
52 | /* current expedited grace period, or NULL */ | 86 | /* current expedited grace period, or NULL */ |
53 | /* if there is no such task. If there */ | 87 | /* if there is no such task. If there */ |
54 | /* is no current expedited grace period, */ | 88 | /* is no current expedited grace period, */ |
55 | /* then there cannot be any such task. */ | 89 | /* then there cannot be any such task. */ |
90 | #ifdef CONFIG_RCU_BOOST | ||
91 | struct list_head *boost_tasks; | ||
92 | /* Pointer to first task that needs to be */ | ||
93 | /* priority-boosted, or NULL if no priority */ | ||
94 | /* boosting is needed. If there is no */ | ||
95 | /* current or expedited grace period, there */ | ||
96 | /* can be no such task. */ | ||
97 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
56 | u8 gpnum; /* Current grace period. */ | 98 | u8 gpnum; /* Current grace period. */ |
57 | u8 gpcpu; /* Last grace period blocked by the CPU. */ | 99 | u8 gpcpu; /* Last grace period blocked by the CPU. */ |
58 | u8 completed; /* Last grace period completed. */ | 100 | u8 completed; /* Last grace period completed. */ |
59 | /* If all three are equal, RCU is idle. */ | 101 | /* If all three are equal, RCU is idle. */ |
102 | #ifdef CONFIG_RCU_BOOST | ||
103 | s8 boosted_this_gp; /* Has boosting already happened? */ | ||
104 | unsigned long boost_time; /* When to start boosting (jiffies) */ | ||
105 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
106 | #ifdef CONFIG_RCU_TRACE | ||
107 | unsigned long n_grace_periods; | ||
108 | #ifdef CONFIG_RCU_BOOST | ||
109 | unsigned long n_tasks_boosted; | ||
110 | unsigned long n_exp_boosts; | ||
111 | unsigned long n_normal_boosts; | ||
112 | unsigned long n_normal_balk_blkd_tasks; | ||
113 | unsigned long n_normal_balk_gp_tasks; | ||
114 | unsigned long n_normal_balk_boost_tasks; | ||
115 | unsigned long n_normal_balk_boosted; | ||
116 | unsigned long n_normal_balk_notyet; | ||
117 | unsigned long n_normal_balk_nos; | ||
118 | unsigned long n_exp_balk_blkd_tasks; | ||
119 | unsigned long n_exp_balk_nos; | ||
120 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
121 | #endif /* #ifdef CONFIG_RCU_TRACE */ | ||
60 | }; | 122 | }; |
61 | 123 | ||
62 | static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = { | 124 | static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = { |
@@ -122,6 +184,210 @@ static int rcu_preempt_gp_in_progress(void) | |||
122 | } | 184 | } |
123 | 185 | ||
124 | /* | 186 | /* |
187 | * Advance a ->blkd_tasks-list pointer to the next entry, instead | ||
188 | * returning NULL if at the end of the list. | ||
189 | */ | ||
190 | static struct list_head *rcu_next_node_entry(struct task_struct *t) | ||
191 | { | ||
192 | struct list_head *np; | ||
193 | |||
194 | np = t->rcu_node_entry.next; | ||
195 | if (np == &rcu_preempt_ctrlblk.blkd_tasks) | ||
196 | np = NULL; | ||
197 | return np; | ||
198 | } | ||
199 | |||
200 | #ifdef CONFIG_RCU_TRACE | ||
201 | |||
202 | #ifdef CONFIG_RCU_BOOST | ||
203 | static void rcu_initiate_boost_trace(void); | ||
204 | static void rcu_initiate_exp_boost_trace(void); | ||
205 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
206 | |||
207 | /* | ||
208 | * Dump additional statistice for TINY_PREEMPT_RCU. | ||
209 | */ | ||
210 | static void show_tiny_preempt_stats(struct seq_file *m) | ||
211 | { | ||
212 | seq_printf(m, "rcu_preempt: qlen=%ld gp=%lu g%u/p%u/c%u tasks=%c%c%c\n", | ||
213 | rcu_preempt_ctrlblk.rcb.qlen, | ||
214 | rcu_preempt_ctrlblk.n_grace_periods, | ||
215 | rcu_preempt_ctrlblk.gpnum, | ||
216 | rcu_preempt_ctrlblk.gpcpu, | ||
217 | rcu_preempt_ctrlblk.completed, | ||
218 | "T."[list_empty(&rcu_preempt_ctrlblk.blkd_tasks)], | ||
219 | "N."[!rcu_preempt_ctrlblk.gp_tasks], | ||
220 | "E."[!rcu_preempt_ctrlblk.exp_tasks]); | ||
221 | #ifdef CONFIG_RCU_BOOST | ||
222 | seq_printf(m, " ttb=%c btg=", | ||
223 | "B."[!rcu_preempt_ctrlblk.boost_tasks]); | ||
224 | switch (rcu_preempt_ctrlblk.boosted_this_gp) { | ||
225 | case -1: | ||
226 | seq_puts(m, "exp"); | ||
227 | break; | ||
228 | case 0: | ||
229 | seq_puts(m, "no"); | ||
230 | break; | ||
231 | case 1: | ||
232 | seq_puts(m, "begun"); | ||
233 | break; | ||
234 | case 2: | ||
235 | seq_puts(m, "done"); | ||
236 | break; | ||
237 | default: | ||
238 | seq_printf(m, "?%d?", rcu_preempt_ctrlblk.boosted_this_gp); | ||
239 | } | ||
240 | seq_printf(m, " ntb=%lu neb=%lu nnb=%lu j=%04x bt=%04x\n", | ||
241 | rcu_preempt_ctrlblk.n_tasks_boosted, | ||
242 | rcu_preempt_ctrlblk.n_exp_boosts, | ||
243 | rcu_preempt_ctrlblk.n_normal_boosts, | ||
244 | (int)(jiffies & 0xffff), | ||
245 | (int)(rcu_preempt_ctrlblk.boost_time & 0xffff)); | ||
246 | seq_printf(m, " %s: nt=%lu gt=%lu bt=%lu b=%lu ny=%lu nos=%lu\n", | ||
247 | "normal balk", | ||
248 | rcu_preempt_ctrlblk.n_normal_balk_blkd_tasks, | ||
249 | rcu_preempt_ctrlblk.n_normal_balk_gp_tasks, | ||
250 | rcu_preempt_ctrlblk.n_normal_balk_boost_tasks, | ||
251 | rcu_preempt_ctrlblk.n_normal_balk_boosted, | ||
252 | rcu_preempt_ctrlblk.n_normal_balk_notyet, | ||
253 | rcu_preempt_ctrlblk.n_normal_balk_nos); | ||
254 | seq_printf(m, " exp balk: bt=%lu nos=%lu\n", | ||
255 | rcu_preempt_ctrlblk.n_exp_balk_blkd_tasks, | ||
256 | rcu_preempt_ctrlblk.n_exp_balk_nos); | ||
257 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
258 | } | ||
259 | |||
260 | #endif /* #ifdef CONFIG_RCU_TRACE */ | ||
261 | |||
262 | #ifdef CONFIG_RCU_BOOST | ||
263 | |||
264 | #include "rtmutex_common.h" | ||
265 | |||
266 | /* | ||
267 | * Carry out RCU priority boosting on the task indicated by ->boost_tasks, | ||
268 | * and advance ->boost_tasks to the next task in the ->blkd_tasks list. | ||
269 | */ | ||
270 | static int rcu_boost(void) | ||
271 | { | ||
272 | unsigned long flags; | ||
273 | struct rt_mutex mtx; | ||
274 | struct list_head *np; | ||
275 | struct task_struct *t; | ||
276 | |||
277 | if (rcu_preempt_ctrlblk.boost_tasks == NULL) | ||
278 | return 0; /* Nothing to boost. */ | ||
279 | raw_local_irq_save(flags); | ||
280 | rcu_preempt_ctrlblk.boosted_this_gp++; | ||
281 | t = container_of(rcu_preempt_ctrlblk.boost_tasks, struct task_struct, | ||
282 | rcu_node_entry); | ||
283 | np = rcu_next_node_entry(t); | ||
284 | rt_mutex_init_proxy_locked(&mtx, t); | ||
285 | t->rcu_boost_mutex = &mtx; | ||
286 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BOOSTED; | ||
287 | raw_local_irq_restore(flags); | ||
288 | rt_mutex_lock(&mtx); | ||
289 | RCU_TRACE(rcu_preempt_ctrlblk.n_tasks_boosted++); | ||
290 | rcu_preempt_ctrlblk.boosted_this_gp++; | ||
291 | rt_mutex_unlock(&mtx); | ||
292 | return rcu_preempt_ctrlblk.boost_tasks != NULL; | ||
293 | } | ||
294 | |||
295 | /* | ||
296 | * Check to see if it is now time to start boosting RCU readers blocking | ||
297 | * the current grace period, and, if so, tell the rcu_kthread_task to | ||
298 | * start boosting them. If there is an expedited boost in progress, | ||
299 | * we wait for it to complete. | ||
300 | * | ||
301 | * If there are no blocked readers blocking the current grace period, | ||
302 | * return 0 to let the caller know, otherwise return 1. Note that this | ||
303 | * return value is independent of whether or not boosting was done. | ||
304 | */ | ||
305 | static int rcu_initiate_boost(void) | ||
306 | { | ||
307 | if (!rcu_preempt_blocked_readers_cgp()) { | ||
308 | RCU_TRACE(rcu_preempt_ctrlblk.n_normal_balk_blkd_tasks++); | ||
309 | return 0; | ||
310 | } | ||
311 | if (rcu_preempt_ctrlblk.gp_tasks != NULL && | ||
312 | rcu_preempt_ctrlblk.boost_tasks == NULL && | ||
313 | rcu_preempt_ctrlblk.boosted_this_gp == 0 && | ||
314 | ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time)) { | ||
315 | rcu_preempt_ctrlblk.boost_tasks = rcu_preempt_ctrlblk.gp_tasks; | ||
316 | invoke_rcu_kthread(); | ||
317 | RCU_TRACE(rcu_preempt_ctrlblk.n_normal_boosts++); | ||
318 | } else | ||
319 | RCU_TRACE(rcu_initiate_boost_trace()); | ||
320 | return 1; | ||
321 | } | ||
322 | |||
323 | /* | ||
324 | * Initiate boosting for an expedited grace period. | ||
325 | */ | ||
326 | static void rcu_initiate_expedited_boost(void) | ||
327 | { | ||
328 | unsigned long flags; | ||
329 | |||
330 | raw_local_irq_save(flags); | ||
331 | if (!list_empty(&rcu_preempt_ctrlblk.blkd_tasks)) { | ||
332 | rcu_preempt_ctrlblk.boost_tasks = | ||
333 | rcu_preempt_ctrlblk.blkd_tasks.next; | ||
334 | rcu_preempt_ctrlblk.boosted_this_gp = -1; | ||
335 | invoke_rcu_kthread(); | ||
336 | RCU_TRACE(rcu_preempt_ctrlblk.n_exp_boosts++); | ||
337 | } else | ||
338 | RCU_TRACE(rcu_initiate_exp_boost_trace()); | ||
339 | raw_local_irq_restore(flags); | ||
340 | } | ||
341 | |||
342 | #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000); | ||
343 | |||
344 | /* | ||
345 | * Do priority-boost accounting for the start of a new grace period. | ||
346 | */ | ||
347 | static void rcu_preempt_boost_start_gp(void) | ||
348 | { | ||
349 | rcu_preempt_ctrlblk.boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES; | ||
350 | if (rcu_preempt_ctrlblk.boosted_this_gp > 0) | ||
351 | rcu_preempt_ctrlblk.boosted_this_gp = 0; | ||
352 | } | ||
353 | |||
354 | #else /* #ifdef CONFIG_RCU_BOOST */ | ||
355 | |||
356 | /* | ||
357 | * If there is no RCU priority boosting, we don't boost. | ||
358 | */ | ||
359 | static int rcu_boost(void) | ||
360 | { | ||
361 | return 0; | ||
362 | } | ||
363 | |||
364 | /* | ||
365 | * If there is no RCU priority boosting, we don't initiate boosting, | ||
366 | * but we do indicate whether there are blocked readers blocking the | ||
367 | * current grace period. | ||
368 | */ | ||
369 | static int rcu_initiate_boost(void) | ||
370 | { | ||
371 | return rcu_preempt_blocked_readers_cgp(); | ||
372 | } | ||
373 | |||
374 | /* | ||
375 | * If there is no RCU priority boosting, we don't initiate expedited boosting. | ||
376 | */ | ||
377 | static void rcu_initiate_expedited_boost(void) | ||
378 | { | ||
379 | } | ||
380 | |||
381 | /* | ||
382 | * If there is no RCU priority boosting, nothing to do at grace-period start. | ||
383 | */ | ||
384 | static void rcu_preempt_boost_start_gp(void) | ||
385 | { | ||
386 | } | ||
387 | |||
388 | #endif /* else #ifdef CONFIG_RCU_BOOST */ | ||
389 | |||
390 | /* | ||
125 | * Record a preemptible-RCU quiescent state for the specified CPU. Note | 391 | * Record a preemptible-RCU quiescent state for the specified CPU. Note |
126 | * that this just means that the task currently running on the CPU is | 392 | * that this just means that the task currently running on the CPU is |
127 | * in a quiescent state. There might be any number of tasks blocked | 393 | * in a quiescent state. There might be any number of tasks blocked |
@@ -148,11 +414,14 @@ static void rcu_preempt_cpu_qs(void) | |||
148 | rcu_preempt_ctrlblk.gpcpu = rcu_preempt_ctrlblk.gpnum; | 414 | rcu_preempt_ctrlblk.gpcpu = rcu_preempt_ctrlblk.gpnum; |
149 | current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; | 415 | current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; |
150 | 416 | ||
417 | /* If there is no GP then there is nothing more to do. */ | ||
418 | if (!rcu_preempt_gp_in_progress()) | ||
419 | return; | ||
151 | /* | 420 | /* |
152 | * If there is no GP, or if blocked readers are still blocking GP, | 421 | * Check up on boosting. If there are no readers blocking the |
153 | * then there is nothing more to do. | 422 | * current grace period, leave. |
154 | */ | 423 | */ |
155 | if (!rcu_preempt_gp_in_progress() || rcu_preempt_blocked_readers_cgp()) | 424 | if (rcu_initiate_boost()) |
156 | return; | 425 | return; |
157 | 426 | ||
158 | /* Advance callbacks. */ | 427 | /* Advance callbacks. */ |
@@ -164,9 +433,9 @@ static void rcu_preempt_cpu_qs(void) | |||
164 | if (!rcu_preempt_blocked_readers_any()) | 433 | if (!rcu_preempt_blocked_readers_any()) |
165 | rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.nexttail; | 434 | rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.nexttail; |
166 | 435 | ||
167 | /* If there are done callbacks, make RCU_SOFTIRQ process them. */ | 436 | /* If there are done callbacks, cause them to be invoked. */ |
168 | if (*rcu_preempt_ctrlblk.rcb.donetail != NULL) | 437 | if (*rcu_preempt_ctrlblk.rcb.donetail != NULL) |
169 | raise_softirq(RCU_SOFTIRQ); | 438 | invoke_rcu_kthread(); |
170 | } | 439 | } |
171 | 440 | ||
172 | /* | 441 | /* |
@@ -178,12 +447,16 @@ static void rcu_preempt_start_gp(void) | |||
178 | 447 | ||
179 | /* Official start of GP. */ | 448 | /* Official start of GP. */ |
180 | rcu_preempt_ctrlblk.gpnum++; | 449 | rcu_preempt_ctrlblk.gpnum++; |
450 | RCU_TRACE(rcu_preempt_ctrlblk.n_grace_periods++); | ||
181 | 451 | ||
182 | /* Any blocked RCU readers block new GP. */ | 452 | /* Any blocked RCU readers block new GP. */ |
183 | if (rcu_preempt_blocked_readers_any()) | 453 | if (rcu_preempt_blocked_readers_any()) |
184 | rcu_preempt_ctrlblk.gp_tasks = | 454 | rcu_preempt_ctrlblk.gp_tasks = |
185 | rcu_preempt_ctrlblk.blkd_tasks.next; | 455 | rcu_preempt_ctrlblk.blkd_tasks.next; |
186 | 456 | ||
457 | /* Set up for RCU priority boosting. */ | ||
458 | rcu_preempt_boost_start_gp(); | ||
459 | |||
187 | /* If there is no running reader, CPU is done with GP. */ | 460 | /* If there is no running reader, CPU is done with GP. */ |
188 | if (!rcu_preempt_running_reader()) | 461 | if (!rcu_preempt_running_reader()) |
189 | rcu_preempt_cpu_qs(); | 462 | rcu_preempt_cpu_qs(); |
@@ -304,14 +577,16 @@ static void rcu_read_unlock_special(struct task_struct *t) | |||
304 | */ | 577 | */ |
305 | empty = !rcu_preempt_blocked_readers_cgp(); | 578 | empty = !rcu_preempt_blocked_readers_cgp(); |
306 | empty_exp = rcu_preempt_ctrlblk.exp_tasks == NULL; | 579 | empty_exp = rcu_preempt_ctrlblk.exp_tasks == NULL; |
307 | np = t->rcu_node_entry.next; | 580 | np = rcu_next_node_entry(t); |
308 | if (np == &rcu_preempt_ctrlblk.blkd_tasks) | ||
309 | np = NULL; | ||
310 | list_del(&t->rcu_node_entry); | 581 | list_del(&t->rcu_node_entry); |
311 | if (&t->rcu_node_entry == rcu_preempt_ctrlblk.gp_tasks) | 582 | if (&t->rcu_node_entry == rcu_preempt_ctrlblk.gp_tasks) |
312 | rcu_preempt_ctrlblk.gp_tasks = np; | 583 | rcu_preempt_ctrlblk.gp_tasks = np; |
313 | if (&t->rcu_node_entry == rcu_preempt_ctrlblk.exp_tasks) | 584 | if (&t->rcu_node_entry == rcu_preempt_ctrlblk.exp_tasks) |
314 | rcu_preempt_ctrlblk.exp_tasks = np; | 585 | rcu_preempt_ctrlblk.exp_tasks = np; |
586 | #ifdef CONFIG_RCU_BOOST | ||
587 | if (&t->rcu_node_entry == rcu_preempt_ctrlblk.boost_tasks) | ||
588 | rcu_preempt_ctrlblk.boost_tasks = np; | ||
589 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
315 | INIT_LIST_HEAD(&t->rcu_node_entry); | 590 | INIT_LIST_HEAD(&t->rcu_node_entry); |
316 | 591 | ||
317 | /* | 592 | /* |
@@ -331,6 +606,14 @@ static void rcu_read_unlock_special(struct task_struct *t) | |||
331 | if (!empty_exp && rcu_preempt_ctrlblk.exp_tasks == NULL) | 606 | if (!empty_exp && rcu_preempt_ctrlblk.exp_tasks == NULL) |
332 | rcu_report_exp_done(); | 607 | rcu_report_exp_done(); |
333 | } | 608 | } |
609 | #ifdef CONFIG_RCU_BOOST | ||
610 | /* Unboost self if was boosted. */ | ||
611 | if (special & RCU_READ_UNLOCK_BOOSTED) { | ||
612 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BOOSTED; | ||
613 | rt_mutex_unlock(t->rcu_boost_mutex); | ||
614 | t->rcu_boost_mutex = NULL; | ||
615 | } | ||
616 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
334 | local_irq_restore(flags); | 617 | local_irq_restore(flags); |
335 | } | 618 | } |
336 | 619 | ||
@@ -374,7 +657,7 @@ static void rcu_preempt_check_callbacks(void) | |||
374 | rcu_preempt_cpu_qs(); | 657 | rcu_preempt_cpu_qs(); |
375 | if (&rcu_preempt_ctrlblk.rcb.rcucblist != | 658 | if (&rcu_preempt_ctrlblk.rcb.rcucblist != |
376 | rcu_preempt_ctrlblk.rcb.donetail) | 659 | rcu_preempt_ctrlblk.rcb.donetail) |
377 | raise_softirq(RCU_SOFTIRQ); | 660 | invoke_rcu_kthread(); |
378 | if (rcu_preempt_gp_in_progress() && | 661 | if (rcu_preempt_gp_in_progress() && |
379 | rcu_cpu_blocking_cur_gp() && | 662 | rcu_cpu_blocking_cur_gp() && |
380 | rcu_preempt_running_reader()) | 663 | rcu_preempt_running_reader()) |
@@ -383,7 +666,7 @@ static void rcu_preempt_check_callbacks(void) | |||
383 | 666 | ||
384 | /* | 667 | /* |
385 | * TINY_PREEMPT_RCU has an extra callback-list tail pointer to | 668 | * TINY_PREEMPT_RCU has an extra callback-list tail pointer to |
386 | * update, so this is invoked from __rcu_process_callbacks() to | 669 | * update, so this is invoked from rcu_process_callbacks() to |
387 | * handle that case. Of course, it is invoked for all flavors of | 670 | * handle that case. Of course, it is invoked for all flavors of |
388 | * RCU, but RCU callbacks can appear only on one of the lists, and | 671 | * RCU, but RCU callbacks can appear only on one of the lists, and |
389 | * neither ->nexttail nor ->donetail can possibly be NULL, so there | 672 | * neither ->nexttail nor ->donetail can possibly be NULL, so there |
@@ -400,7 +683,7 @@ static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp) | |||
400 | */ | 683 | */ |
401 | static void rcu_preempt_process_callbacks(void) | 684 | static void rcu_preempt_process_callbacks(void) |
402 | { | 685 | { |
403 | __rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb); | 686 | rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb); |
404 | } | 687 | } |
405 | 688 | ||
406 | /* | 689 | /* |
@@ -417,6 +700,7 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |||
417 | local_irq_save(flags); | 700 | local_irq_save(flags); |
418 | *rcu_preempt_ctrlblk.nexttail = head; | 701 | *rcu_preempt_ctrlblk.nexttail = head; |
419 | rcu_preempt_ctrlblk.nexttail = &head->next; | 702 | rcu_preempt_ctrlblk.nexttail = &head->next; |
703 | RCU_TRACE(rcu_preempt_ctrlblk.rcb.qlen++); | ||
420 | rcu_preempt_start_gp(); /* checks to see if GP needed. */ | 704 | rcu_preempt_start_gp(); /* checks to see if GP needed. */ |
421 | local_irq_restore(flags); | 705 | local_irq_restore(flags); |
422 | } | 706 | } |
@@ -532,6 +816,7 @@ void synchronize_rcu_expedited(void) | |||
532 | 816 | ||
533 | /* Wait for tail of ->blkd_tasks list to drain. */ | 817 | /* Wait for tail of ->blkd_tasks list to drain. */ |
534 | if (rcu_preempted_readers_exp()) | 818 | if (rcu_preempted_readers_exp()) |
819 | rcu_initiate_expedited_boost(); | ||
535 | wait_event(sync_rcu_preempt_exp_wq, | 820 | wait_event(sync_rcu_preempt_exp_wq, |
536 | !rcu_preempted_readers_exp()); | 821 | !rcu_preempted_readers_exp()); |
537 | 822 | ||
@@ -572,6 +857,27 @@ void exit_rcu(void) | |||
572 | 857 | ||
573 | #else /* #ifdef CONFIG_TINY_PREEMPT_RCU */ | 858 | #else /* #ifdef CONFIG_TINY_PREEMPT_RCU */ |
574 | 859 | ||
860 | #ifdef CONFIG_RCU_TRACE | ||
861 | |||
862 | /* | ||
863 | * Because preemptible RCU does not exist, it is not necessary to | ||
864 | * dump out its statistics. | ||
865 | */ | ||
866 | static void show_tiny_preempt_stats(struct seq_file *m) | ||
867 | { | ||
868 | } | ||
869 | |||
870 | #endif /* #ifdef CONFIG_RCU_TRACE */ | ||
871 | |||
872 | /* | ||
873 | * Because preemptible RCU does not exist, it is never necessary to | ||
874 | * boost preempted RCU readers. | ||
875 | */ | ||
876 | static int rcu_boost(void) | ||
877 | { | ||
878 | return 0; | ||
879 | } | ||
880 | |||
575 | /* | 881 | /* |
576 | * Because preemptible RCU does not exist, it never has any callbacks | 882 | * Because preemptible RCU does not exist, it never has any callbacks |
577 | * to check. | 883 | * to check. |
@@ -599,17 +905,116 @@ static void rcu_preempt_process_callbacks(void) | |||
599 | #endif /* #else #ifdef CONFIG_TINY_PREEMPT_RCU */ | 905 | #endif /* #else #ifdef CONFIG_TINY_PREEMPT_RCU */ |
600 | 906 | ||
601 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 907 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
602 | |||
603 | #include <linux/kernel_stat.h> | 908 | #include <linux/kernel_stat.h> |
604 | 909 | ||
605 | /* | 910 | /* |
606 | * During boot, we forgive RCU lockdep issues. After this function is | 911 | * During boot, we forgive RCU lockdep issues. After this function is |
607 | * invoked, we start taking RCU lockdep issues seriously. | 912 | * invoked, we start taking RCU lockdep issues seriously. |
608 | */ | 913 | */ |
609 | void rcu_scheduler_starting(void) | 914 | void __init rcu_scheduler_starting(void) |
610 | { | 915 | { |
611 | WARN_ON(nr_context_switches() > 0); | 916 | WARN_ON(nr_context_switches() > 0); |
612 | rcu_scheduler_active = 1; | 917 | rcu_scheduler_active = 1; |
613 | } | 918 | } |
614 | 919 | ||
615 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 920 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
921 | |||
922 | #ifdef CONFIG_RCU_BOOST | ||
923 | #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO | ||
924 | #else /* #ifdef CONFIG_RCU_BOOST */ | ||
925 | #define RCU_BOOST_PRIO 1 | ||
926 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ | ||
927 | |||
928 | #ifdef CONFIG_RCU_TRACE | ||
929 | |||
930 | #ifdef CONFIG_RCU_BOOST | ||
931 | |||
932 | static void rcu_initiate_boost_trace(void) | ||
933 | { | ||
934 | if (rcu_preempt_ctrlblk.gp_tasks == NULL) | ||
935 | rcu_preempt_ctrlblk.n_normal_balk_gp_tasks++; | ||
936 | else if (rcu_preempt_ctrlblk.boost_tasks != NULL) | ||
937 | rcu_preempt_ctrlblk.n_normal_balk_boost_tasks++; | ||
938 | else if (rcu_preempt_ctrlblk.boosted_this_gp != 0) | ||
939 | rcu_preempt_ctrlblk.n_normal_balk_boosted++; | ||
940 | else if (!ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time)) | ||
941 | rcu_preempt_ctrlblk.n_normal_balk_notyet++; | ||
942 | else | ||
943 | rcu_preempt_ctrlblk.n_normal_balk_nos++; | ||
944 | } | ||
945 | |||
946 | static void rcu_initiate_exp_boost_trace(void) | ||
947 | { | ||
948 | if (list_empty(&rcu_preempt_ctrlblk.blkd_tasks)) | ||
949 | rcu_preempt_ctrlblk.n_exp_balk_blkd_tasks++; | ||
950 | else | ||
951 | rcu_preempt_ctrlblk.n_exp_balk_nos++; | ||
952 | } | ||
953 | |||
954 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
955 | |||
956 | static void rcu_trace_sub_qlen(struct rcu_ctrlblk *rcp, int n) | ||
957 | { | ||
958 | unsigned long flags; | ||
959 | |||
960 | raw_local_irq_save(flags); | ||
961 | rcp->qlen -= n; | ||
962 | raw_local_irq_restore(flags); | ||
963 | } | ||
964 | |||
965 | /* | ||
966 | * Dump statistics for TINY_RCU, such as they are. | ||
967 | */ | ||
968 | static int show_tiny_stats(struct seq_file *m, void *unused) | ||
969 | { | ||
970 | show_tiny_preempt_stats(m); | ||
971 | seq_printf(m, "rcu_sched: qlen: %ld\n", rcu_sched_ctrlblk.qlen); | ||
972 | seq_printf(m, "rcu_bh: qlen: %ld\n", rcu_bh_ctrlblk.qlen); | ||
973 | return 0; | ||
974 | } | ||
975 | |||
976 | static int show_tiny_stats_open(struct inode *inode, struct file *file) | ||
977 | { | ||
978 | return single_open(file, show_tiny_stats, NULL); | ||
979 | } | ||
980 | |||
981 | static const struct file_operations show_tiny_stats_fops = { | ||
982 | .owner = THIS_MODULE, | ||
983 | .open = show_tiny_stats_open, | ||
984 | .read = seq_read, | ||
985 | .llseek = seq_lseek, | ||
986 | .release = single_release, | ||
987 | }; | ||
988 | |||
989 | static struct dentry *rcudir; | ||
990 | |||
991 | static int __init rcutiny_trace_init(void) | ||
992 | { | ||
993 | struct dentry *retval; | ||
994 | |||
995 | rcudir = debugfs_create_dir("rcu", NULL); | ||
996 | if (!rcudir) | ||
997 | goto free_out; | ||
998 | retval = debugfs_create_file("rcudata", 0444, rcudir, | ||
999 | NULL, &show_tiny_stats_fops); | ||
1000 | if (!retval) | ||
1001 | goto free_out; | ||
1002 | return 0; | ||
1003 | free_out: | ||
1004 | debugfs_remove_recursive(rcudir); | ||
1005 | return 1; | ||
1006 | } | ||
1007 | |||
1008 | static void __exit rcutiny_trace_cleanup(void) | ||
1009 | { | ||
1010 | debugfs_remove_recursive(rcudir); | ||
1011 | } | ||
1012 | |||
1013 | module_init(rcutiny_trace_init); | ||
1014 | module_exit(rcutiny_trace_cleanup); | ||
1015 | |||
1016 | MODULE_AUTHOR("Paul E. McKenney"); | ||
1017 | MODULE_DESCRIPTION("Read-Copy Update tracing for tiny implementation"); | ||
1018 | MODULE_LICENSE("GPL"); | ||
1019 | |||
1020 | #endif /* #ifdef CONFIG_RCU_TRACE */ | ||
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 9d8e8fb2515..89613f97ff2 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <linux/srcu.h> | 47 | #include <linux/srcu.h> |
48 | #include <linux/slab.h> | 48 | #include <linux/slab.h> |
49 | #include <asm/byteorder.h> | 49 | #include <asm/byteorder.h> |
50 | #include <linux/sched.h> | ||
50 | 51 | ||
51 | MODULE_LICENSE("GPL"); | 52 | MODULE_LICENSE("GPL"); |
52 | MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and " | 53 | MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and " |
@@ -64,6 +65,9 @@ static int irqreader = 1; /* RCU readers from irq (timers). */ | |||
64 | static int fqs_duration = 0; /* Duration of bursts (us), 0 to disable. */ | 65 | static int fqs_duration = 0; /* Duration of bursts (us), 0 to disable. */ |
65 | static int fqs_holdoff = 0; /* Hold time within burst (us). */ | 66 | static int fqs_holdoff = 0; /* Hold time within burst (us). */ |
66 | static int fqs_stutter = 3; /* Wait time between bursts (s). */ | 67 | static int fqs_stutter = 3; /* Wait time between bursts (s). */ |
68 | static int test_boost = 1; /* Test RCU prio boost: 0=no, 1=maybe, 2=yes. */ | ||
69 | static int test_boost_interval = 7; /* Interval between boost tests, seconds. */ | ||
70 | static int test_boost_duration = 4; /* Duration of each boost test, seconds. */ | ||
67 | static char *torture_type = "rcu"; /* What RCU implementation to torture. */ | 71 | static char *torture_type = "rcu"; /* What RCU implementation to torture. */ |
68 | 72 | ||
69 | module_param(nreaders, int, 0444); | 73 | module_param(nreaders, int, 0444); |
@@ -88,6 +92,12 @@ module_param(fqs_holdoff, int, 0444); | |||
88 | MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)"); | 92 | MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)"); |
89 | module_param(fqs_stutter, int, 0444); | 93 | module_param(fqs_stutter, int, 0444); |
90 | MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)"); | 94 | MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)"); |
95 | module_param(test_boost, int, 0444); | ||
96 | MODULE_PARM_DESC(test_boost, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); | ||
97 | module_param(test_boost_interval, int, 0444); | ||
98 | MODULE_PARM_DESC(test_boost_interval, "Interval between boost tests, seconds."); | ||
99 | module_param(test_boost_duration, int, 0444); | ||
100 | MODULE_PARM_DESC(test_boost_duration, "Duration of each boost test, seconds."); | ||
91 | module_param(torture_type, charp, 0444); | 101 | module_param(torture_type, charp, 0444); |
92 | MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)"); | 102 | MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)"); |
93 | 103 | ||
@@ -109,6 +119,7 @@ static struct task_struct *stats_task; | |||
109 | static struct task_struct *shuffler_task; | 119 | static struct task_struct *shuffler_task; |
110 | static struct task_struct *stutter_task; | 120 | static struct task_struct *stutter_task; |
111 | static struct task_struct *fqs_task; | 121 | static struct task_struct *fqs_task; |
122 | static struct task_struct *boost_tasks[NR_CPUS]; | ||
112 | 123 | ||
113 | #define RCU_TORTURE_PIPE_LEN 10 | 124 | #define RCU_TORTURE_PIPE_LEN 10 |
114 | 125 | ||
@@ -134,6 +145,12 @@ static atomic_t n_rcu_torture_alloc_fail; | |||
134 | static atomic_t n_rcu_torture_free; | 145 | static atomic_t n_rcu_torture_free; |
135 | static atomic_t n_rcu_torture_mberror; | 146 | static atomic_t n_rcu_torture_mberror; |
136 | static atomic_t n_rcu_torture_error; | 147 | static atomic_t n_rcu_torture_error; |
148 | static long n_rcu_torture_boost_ktrerror; | ||
149 | static long n_rcu_torture_boost_rterror; | ||
150 | static long n_rcu_torture_boost_allocerror; | ||
151 | static long n_rcu_torture_boost_afferror; | ||
152 | static long n_rcu_torture_boost_failure; | ||
153 | static long n_rcu_torture_boosts; | ||
137 | static long n_rcu_torture_timers; | 154 | static long n_rcu_torture_timers; |
138 | static struct list_head rcu_torture_removed; | 155 | static struct list_head rcu_torture_removed; |
139 | static cpumask_var_t shuffle_tmp_mask; | 156 | static cpumask_var_t shuffle_tmp_mask; |
@@ -147,6 +164,16 @@ static int stutter_pause_test; | |||
147 | #endif | 164 | #endif |
148 | int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT; | 165 | int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT; |
149 | 166 | ||
167 | #ifdef CONFIG_RCU_BOOST | ||
168 | #define rcu_can_boost() 1 | ||
169 | #else /* #ifdef CONFIG_RCU_BOOST */ | ||
170 | #define rcu_can_boost() 0 | ||
171 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ | ||
172 | |||
173 | static unsigned long boost_starttime; /* jiffies of next boost test start. */ | ||
174 | DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ | ||
175 | /* and boost task create/destroy. */ | ||
176 | |||
150 | /* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */ | 177 | /* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */ |
151 | 178 | ||
152 | #define FULLSTOP_DONTSTOP 0 /* Normal operation. */ | 179 | #define FULLSTOP_DONTSTOP 0 /* Normal operation. */ |
@@ -277,6 +304,7 @@ struct rcu_torture_ops { | |||
277 | void (*fqs)(void); | 304 | void (*fqs)(void); |
278 | int (*stats)(char *page); | 305 | int (*stats)(char *page); |
279 | int irq_capable; | 306 | int irq_capable; |
307 | int can_boost; | ||
280 | char *name; | 308 | char *name; |
281 | }; | 309 | }; |
282 | 310 | ||
@@ -366,6 +394,7 @@ static struct rcu_torture_ops rcu_ops = { | |||
366 | .fqs = rcu_force_quiescent_state, | 394 | .fqs = rcu_force_quiescent_state, |
367 | .stats = NULL, | 395 | .stats = NULL, |
368 | .irq_capable = 1, | 396 | .irq_capable = 1, |
397 | .can_boost = rcu_can_boost(), | ||
369 | .name = "rcu" | 398 | .name = "rcu" |
370 | }; | 399 | }; |
371 | 400 | ||
@@ -408,6 +437,7 @@ static struct rcu_torture_ops rcu_sync_ops = { | |||
408 | .fqs = rcu_force_quiescent_state, | 437 | .fqs = rcu_force_quiescent_state, |
409 | .stats = NULL, | 438 | .stats = NULL, |
410 | .irq_capable = 1, | 439 | .irq_capable = 1, |
440 | .can_boost = rcu_can_boost(), | ||
411 | .name = "rcu_sync" | 441 | .name = "rcu_sync" |
412 | }; | 442 | }; |
413 | 443 | ||
@@ -424,6 +454,7 @@ static struct rcu_torture_ops rcu_expedited_ops = { | |||
424 | .fqs = rcu_force_quiescent_state, | 454 | .fqs = rcu_force_quiescent_state, |
425 | .stats = NULL, | 455 | .stats = NULL, |
426 | .irq_capable = 1, | 456 | .irq_capable = 1, |
457 | .can_boost = rcu_can_boost(), | ||
427 | .name = "rcu_expedited" | 458 | .name = "rcu_expedited" |
428 | }; | 459 | }; |
429 | 460 | ||
@@ -684,6 +715,110 @@ static struct rcu_torture_ops sched_expedited_ops = { | |||
684 | }; | 715 | }; |
685 | 716 | ||
686 | /* | 717 | /* |
718 | * RCU torture priority-boost testing. Runs one real-time thread per | ||
719 | * CPU for moderate bursts, repeatedly registering RCU callbacks and | ||
720 | * spinning waiting for them to be invoked. If a given callback takes | ||
721 | * too long to be invoked, we assume that priority inversion has occurred. | ||
722 | */ | ||
723 | |||
724 | struct rcu_boost_inflight { | ||
725 | struct rcu_head rcu; | ||
726 | int inflight; | ||
727 | }; | ||
728 | |||
729 | static void rcu_torture_boost_cb(struct rcu_head *head) | ||
730 | { | ||
731 | struct rcu_boost_inflight *rbip = | ||
732 | container_of(head, struct rcu_boost_inflight, rcu); | ||
733 | |||
734 | smp_mb(); /* Ensure RCU-core accesses precede clearing ->inflight */ | ||
735 | rbip->inflight = 0; | ||
736 | } | ||
737 | |||
738 | static int rcu_torture_boost(void *arg) | ||
739 | { | ||
740 | unsigned long call_rcu_time; | ||
741 | unsigned long endtime; | ||
742 | unsigned long oldstarttime; | ||
743 | struct rcu_boost_inflight rbi = { .inflight = 0 }; | ||
744 | struct sched_param sp; | ||
745 | |||
746 | VERBOSE_PRINTK_STRING("rcu_torture_boost started"); | ||
747 | |||
748 | /* Set real-time priority. */ | ||
749 | sp.sched_priority = 1; | ||
750 | if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) { | ||
751 | VERBOSE_PRINTK_STRING("rcu_torture_boost RT prio failed!"); | ||
752 | n_rcu_torture_boost_rterror++; | ||
753 | } | ||
754 | |||
755 | /* Each pass through the following loop does one boost-test cycle. */ | ||
756 | do { | ||
757 | /* Wait for the next test interval. */ | ||
758 | oldstarttime = boost_starttime; | ||
759 | while (jiffies - oldstarttime > ULONG_MAX / 2) { | ||
760 | schedule_timeout_uninterruptible(1); | ||
761 | rcu_stutter_wait("rcu_torture_boost"); | ||
762 | if (kthread_should_stop() || | ||
763 | fullstop != FULLSTOP_DONTSTOP) | ||
764 | goto checkwait; | ||
765 | } | ||
766 | |||
767 | /* Do one boost-test interval. */ | ||
768 | endtime = oldstarttime + test_boost_duration * HZ; | ||
769 | call_rcu_time = jiffies; | ||
770 | while (jiffies - endtime > ULONG_MAX / 2) { | ||
771 | /* If we don't have a callback in flight, post one. */ | ||
772 | if (!rbi.inflight) { | ||
773 | smp_mb(); /* RCU core before ->inflight = 1. */ | ||
774 | rbi.inflight = 1; | ||
775 | call_rcu(&rbi.rcu, rcu_torture_boost_cb); | ||
776 | if (jiffies - call_rcu_time > | ||
777 | test_boost_duration * HZ - HZ / 2) { | ||
778 | VERBOSE_PRINTK_STRING("rcu_torture_boost boosting failed"); | ||
779 | n_rcu_torture_boost_failure++; | ||
780 | } | ||
781 | call_rcu_time = jiffies; | ||
782 | } | ||
783 | cond_resched(); | ||
784 | rcu_stutter_wait("rcu_torture_boost"); | ||
785 | if (kthread_should_stop() || | ||
786 | fullstop != FULLSTOP_DONTSTOP) | ||
787 | goto checkwait; | ||
788 | } | ||
789 | |||
790 | /* | ||
791 | * Set the start time of the next test interval. | ||
792 | * Yes, this is vulnerable to long delays, but such | ||
793 | * delays simply cause a false negative for the next | ||
794 | * interval. Besides, we are running at RT priority, | ||
795 | * so delays should be relatively rare. | ||
796 | */ | ||
797 | while (oldstarttime == boost_starttime) { | ||
798 | if (mutex_trylock(&boost_mutex)) { | ||
799 | boost_starttime = jiffies + | ||
800 | test_boost_interval * HZ; | ||
801 | n_rcu_torture_boosts++; | ||
802 | mutex_unlock(&boost_mutex); | ||
803 | break; | ||
804 | } | ||
805 | schedule_timeout_uninterruptible(1); | ||
806 | } | ||
807 | |||
808 | /* Go do the stutter. */ | ||
809 | checkwait: rcu_stutter_wait("rcu_torture_boost"); | ||
810 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); | ||
811 | |||
812 | /* Clean up and exit. */ | ||
813 | VERBOSE_PRINTK_STRING("rcu_torture_boost task stopping"); | ||
814 | rcutorture_shutdown_absorb("rcu_torture_boost"); | ||
815 | while (!kthread_should_stop() || rbi.inflight) | ||
816 | schedule_timeout_uninterruptible(1); | ||
817 | smp_mb(); /* order accesses to ->inflight before stack-frame death. */ | ||
818 | return 0; | ||
819 | } | ||
820 | |||
821 | /* | ||
687 | * RCU torture force-quiescent-state kthread. Repeatedly induces | 822 | * RCU torture force-quiescent-state kthread. Repeatedly induces |
688 | * bursts of calls to force_quiescent_state(), increasing the probability | 823 | * bursts of calls to force_quiescent_state(), increasing the probability |
689 | * of occurrence of some important types of race conditions. | 824 | * of occurrence of some important types of race conditions. |
@@ -933,7 +1068,8 @@ rcu_torture_printk(char *page) | |||
933 | cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG); | 1068 | cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG); |
934 | cnt += sprintf(&page[cnt], | 1069 | cnt += sprintf(&page[cnt], |
935 | "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d " | 1070 | "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d " |
936 | "rtmbe: %d nt: %ld", | 1071 | "rtmbe: %d rtbke: %ld rtbre: %ld rtbae: %ld rtbafe: %ld " |
1072 | "rtbf: %ld rtb: %ld nt: %ld", | ||
937 | rcu_torture_current, | 1073 | rcu_torture_current, |
938 | rcu_torture_current_version, | 1074 | rcu_torture_current_version, |
939 | list_empty(&rcu_torture_freelist), | 1075 | list_empty(&rcu_torture_freelist), |
@@ -941,8 +1077,19 @@ rcu_torture_printk(char *page) | |||
941 | atomic_read(&n_rcu_torture_alloc_fail), | 1077 | atomic_read(&n_rcu_torture_alloc_fail), |
942 | atomic_read(&n_rcu_torture_free), | 1078 | atomic_read(&n_rcu_torture_free), |
943 | atomic_read(&n_rcu_torture_mberror), | 1079 | atomic_read(&n_rcu_torture_mberror), |
1080 | n_rcu_torture_boost_ktrerror, | ||
1081 | n_rcu_torture_boost_rterror, | ||
1082 | n_rcu_torture_boost_allocerror, | ||
1083 | n_rcu_torture_boost_afferror, | ||
1084 | n_rcu_torture_boost_failure, | ||
1085 | n_rcu_torture_boosts, | ||
944 | n_rcu_torture_timers); | 1086 | n_rcu_torture_timers); |
945 | if (atomic_read(&n_rcu_torture_mberror) != 0) | 1087 | if (atomic_read(&n_rcu_torture_mberror) != 0 || |
1088 | n_rcu_torture_boost_ktrerror != 0 || | ||
1089 | n_rcu_torture_boost_rterror != 0 || | ||
1090 | n_rcu_torture_boost_allocerror != 0 || | ||
1091 | n_rcu_torture_boost_afferror != 0 || | ||
1092 | n_rcu_torture_boost_failure != 0) | ||
946 | cnt += sprintf(&page[cnt], " !!!"); | 1093 | cnt += sprintf(&page[cnt], " !!!"); |
947 | cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG); | 1094 | cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG); |
948 | if (i > 1) { | 1095 | if (i > 1) { |
@@ -1094,22 +1241,91 @@ rcu_torture_stutter(void *arg) | |||
1094 | } | 1241 | } |
1095 | 1242 | ||
1096 | static inline void | 1243 | static inline void |
1097 | rcu_torture_print_module_parms(char *tag) | 1244 | rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, char *tag) |
1098 | { | 1245 | { |
1099 | printk(KERN_ALERT "%s" TORTURE_FLAG | 1246 | printk(KERN_ALERT "%s" TORTURE_FLAG |
1100 | "--- %s: nreaders=%d nfakewriters=%d " | 1247 | "--- %s: nreaders=%d nfakewriters=%d " |
1101 | "stat_interval=%d verbose=%d test_no_idle_hz=%d " | 1248 | "stat_interval=%d verbose=%d test_no_idle_hz=%d " |
1102 | "shuffle_interval=%d stutter=%d irqreader=%d " | 1249 | "shuffle_interval=%d stutter=%d irqreader=%d " |
1103 | "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d\n", | 1250 | "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " |
1251 | "test_boost=%d/%d test_boost_interval=%d " | ||
1252 | "test_boost_duration=%d\n", | ||
1104 | torture_type, tag, nrealreaders, nfakewriters, | 1253 | torture_type, tag, nrealreaders, nfakewriters, |
1105 | stat_interval, verbose, test_no_idle_hz, shuffle_interval, | 1254 | stat_interval, verbose, test_no_idle_hz, shuffle_interval, |
1106 | stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter); | 1255 | stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, |
1256 | test_boost, cur_ops->can_boost, | ||
1257 | test_boost_interval, test_boost_duration); | ||
1107 | } | 1258 | } |
1108 | 1259 | ||
1109 | static struct notifier_block rcutorture_nb = { | 1260 | static struct notifier_block rcutorture_shutdown_nb = { |
1110 | .notifier_call = rcutorture_shutdown_notify, | 1261 | .notifier_call = rcutorture_shutdown_notify, |
1111 | }; | 1262 | }; |
1112 | 1263 | ||
1264 | static void rcutorture_booster_cleanup(int cpu) | ||
1265 | { | ||
1266 | struct task_struct *t; | ||
1267 | |||
1268 | if (boost_tasks[cpu] == NULL) | ||
1269 | return; | ||
1270 | mutex_lock(&boost_mutex); | ||
1271 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_boost task"); | ||
1272 | t = boost_tasks[cpu]; | ||
1273 | boost_tasks[cpu] = NULL; | ||
1274 | mutex_unlock(&boost_mutex); | ||
1275 | |||
1276 | /* This must be outside of the mutex, otherwise deadlock! */ | ||
1277 | kthread_stop(t); | ||
1278 | } | ||
1279 | |||
1280 | static int rcutorture_booster_init(int cpu) | ||
1281 | { | ||
1282 | int retval; | ||
1283 | |||
1284 | if (boost_tasks[cpu] != NULL) | ||
1285 | return 0; /* Already created, nothing more to do. */ | ||
1286 | |||
1287 | /* Don't allow time recalculation while creating a new task. */ | ||
1288 | mutex_lock(&boost_mutex); | ||
1289 | VERBOSE_PRINTK_STRING("Creating rcu_torture_boost task"); | ||
1290 | boost_tasks[cpu] = kthread_create(rcu_torture_boost, NULL, | ||
1291 | "rcu_torture_boost"); | ||
1292 | if (IS_ERR(boost_tasks[cpu])) { | ||
1293 | retval = PTR_ERR(boost_tasks[cpu]); | ||
1294 | VERBOSE_PRINTK_STRING("rcu_torture_boost task create failed"); | ||
1295 | n_rcu_torture_boost_ktrerror++; | ||
1296 | boost_tasks[cpu] = NULL; | ||
1297 | mutex_unlock(&boost_mutex); | ||
1298 | return retval; | ||
1299 | } | ||
1300 | kthread_bind(boost_tasks[cpu], cpu); | ||
1301 | wake_up_process(boost_tasks[cpu]); | ||
1302 | mutex_unlock(&boost_mutex); | ||
1303 | return 0; | ||
1304 | } | ||
1305 | |||
1306 | static int rcutorture_cpu_notify(struct notifier_block *self, | ||
1307 | unsigned long action, void *hcpu) | ||
1308 | { | ||
1309 | long cpu = (long)hcpu; | ||
1310 | |||
1311 | switch (action) { | ||
1312 | case CPU_ONLINE: | ||
1313 | case CPU_DOWN_FAILED: | ||
1314 | (void)rcutorture_booster_init(cpu); | ||
1315 | break; | ||
1316 | case CPU_DOWN_PREPARE: | ||
1317 | rcutorture_booster_cleanup(cpu); | ||
1318 | break; | ||
1319 | default: | ||
1320 | break; | ||
1321 | } | ||
1322 | return NOTIFY_OK; | ||
1323 | } | ||
1324 | |||
1325 | static struct notifier_block rcutorture_cpu_nb = { | ||
1326 | .notifier_call = rcutorture_cpu_notify, | ||
1327 | }; | ||
1328 | |||
1113 | static void | 1329 | static void |
1114 | rcu_torture_cleanup(void) | 1330 | rcu_torture_cleanup(void) |
1115 | { | 1331 | { |
@@ -1127,7 +1343,7 @@ rcu_torture_cleanup(void) | |||
1127 | } | 1343 | } |
1128 | fullstop = FULLSTOP_RMMOD; | 1344 | fullstop = FULLSTOP_RMMOD; |
1129 | mutex_unlock(&fullstop_mutex); | 1345 | mutex_unlock(&fullstop_mutex); |
1130 | unregister_reboot_notifier(&rcutorture_nb); | 1346 | unregister_reboot_notifier(&rcutorture_shutdown_nb); |
1131 | if (stutter_task) { | 1347 | if (stutter_task) { |
1132 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task"); | 1348 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task"); |
1133 | kthread_stop(stutter_task); | 1349 | kthread_stop(stutter_task); |
@@ -1184,6 +1400,12 @@ rcu_torture_cleanup(void) | |||
1184 | kthread_stop(fqs_task); | 1400 | kthread_stop(fqs_task); |
1185 | } | 1401 | } |
1186 | fqs_task = NULL; | 1402 | fqs_task = NULL; |
1403 | if ((test_boost == 1 && cur_ops->can_boost) || | ||
1404 | test_boost == 2) { | ||
1405 | unregister_cpu_notifier(&rcutorture_cpu_nb); | ||
1406 | for_each_possible_cpu(i) | ||
1407 | rcutorture_booster_cleanup(i); | ||
1408 | } | ||
1187 | 1409 | ||
1188 | /* Wait for all RCU callbacks to fire. */ | 1410 | /* Wait for all RCU callbacks to fire. */ |
1189 | 1411 | ||
@@ -1195,9 +1417,9 @@ rcu_torture_cleanup(void) | |||
1195 | if (cur_ops->cleanup) | 1417 | if (cur_ops->cleanup) |
1196 | cur_ops->cleanup(); | 1418 | cur_ops->cleanup(); |
1197 | if (atomic_read(&n_rcu_torture_error)) | 1419 | if (atomic_read(&n_rcu_torture_error)) |
1198 | rcu_torture_print_module_parms("End of test: FAILURE"); | 1420 | rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); |
1199 | else | 1421 | else |
1200 | rcu_torture_print_module_parms("End of test: SUCCESS"); | 1422 | rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); |
1201 | } | 1423 | } |
1202 | 1424 | ||
1203 | static int __init | 1425 | static int __init |
@@ -1242,7 +1464,7 @@ rcu_torture_init(void) | |||
1242 | nrealreaders = nreaders; | 1464 | nrealreaders = nreaders; |
1243 | else | 1465 | else |
1244 | nrealreaders = 2 * num_online_cpus(); | 1466 | nrealreaders = 2 * num_online_cpus(); |
1245 | rcu_torture_print_module_parms("Start of test"); | 1467 | rcu_torture_print_module_parms(cur_ops, "Start of test"); |
1246 | fullstop = FULLSTOP_DONTSTOP; | 1468 | fullstop = FULLSTOP_DONTSTOP; |
1247 | 1469 | ||
1248 | /* Set up the freelist. */ | 1470 | /* Set up the freelist. */ |
@@ -1263,6 +1485,12 @@ rcu_torture_init(void) | |||
1263 | atomic_set(&n_rcu_torture_free, 0); | 1485 | atomic_set(&n_rcu_torture_free, 0); |
1264 | atomic_set(&n_rcu_torture_mberror, 0); | 1486 | atomic_set(&n_rcu_torture_mberror, 0); |
1265 | atomic_set(&n_rcu_torture_error, 0); | 1487 | atomic_set(&n_rcu_torture_error, 0); |
1488 | n_rcu_torture_boost_ktrerror = 0; | ||
1489 | n_rcu_torture_boost_rterror = 0; | ||
1490 | n_rcu_torture_boost_allocerror = 0; | ||
1491 | n_rcu_torture_boost_afferror = 0; | ||
1492 | n_rcu_torture_boost_failure = 0; | ||
1493 | n_rcu_torture_boosts = 0; | ||
1266 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) | 1494 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) |
1267 | atomic_set(&rcu_torture_wcount[i], 0); | 1495 | atomic_set(&rcu_torture_wcount[i], 0); |
1268 | for_each_possible_cpu(cpu) { | 1496 | for_each_possible_cpu(cpu) { |
@@ -1376,7 +1604,27 @@ rcu_torture_init(void) | |||
1376 | goto unwind; | 1604 | goto unwind; |
1377 | } | 1605 | } |
1378 | } | 1606 | } |
1379 | register_reboot_notifier(&rcutorture_nb); | 1607 | if (test_boost_interval < 1) |
1608 | test_boost_interval = 1; | ||
1609 | if (test_boost_duration < 2) | ||
1610 | test_boost_duration = 2; | ||
1611 | if ((test_boost == 1 && cur_ops->can_boost) || | ||
1612 | test_boost == 2) { | ||
1613 | int retval; | ||
1614 | |||
1615 | boost_starttime = jiffies + test_boost_interval * HZ; | ||
1616 | register_cpu_notifier(&rcutorture_cpu_nb); | ||
1617 | for_each_possible_cpu(i) { | ||
1618 | if (cpu_is_offline(i)) | ||
1619 | continue; /* Heuristic: CPU can go offline. */ | ||
1620 | retval = rcutorture_booster_init(i); | ||
1621 | if (retval < 0) { | ||
1622 | firsterr = retval; | ||
1623 | goto unwind; | ||
1624 | } | ||
1625 | } | ||
1626 | } | ||
1627 | register_reboot_notifier(&rcutorture_shutdown_nb); | ||
1380 | mutex_unlock(&fullstop_mutex); | 1628 | mutex_unlock(&fullstop_mutex); |
1381 | return 0; | 1629 | return 0; |
1382 | 1630 | ||
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index ccdc04c4798..d0ddfea6579 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -67,9 +67,6 @@ static struct lock_class_key rcu_node_class[NUM_RCU_LVLS]; | |||
67 | .gpnum = -300, \ | 67 | .gpnum = -300, \ |
68 | .completed = -300, \ | 68 | .completed = -300, \ |
69 | .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname.onofflock), \ | 69 | .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname.onofflock), \ |
70 | .orphan_cbs_list = NULL, \ | ||
71 | .orphan_cbs_tail = &structname.orphan_cbs_list, \ | ||
72 | .orphan_qlen = 0, \ | ||
73 | .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&structname.fqslock), \ | 70 | .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&structname.fqslock), \ |
74 | .n_force_qs = 0, \ | 71 | .n_force_qs = 0, \ |
75 | .n_force_qs_ngp = 0, \ | 72 | .n_force_qs_ngp = 0, \ |
@@ -620,9 +617,17 @@ static void __init check_cpu_stall_init(void) | |||
620 | static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) | 617 | static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) |
621 | { | 618 | { |
622 | if (rdp->gpnum != rnp->gpnum) { | 619 | if (rdp->gpnum != rnp->gpnum) { |
623 | rdp->qs_pending = 1; | 620 | /* |
624 | rdp->passed_quiesc = 0; | 621 | * If the current grace period is waiting for this CPU, |
622 | * set up to detect a quiescent state, otherwise don't | ||
623 | * go looking for one. | ||
624 | */ | ||
625 | rdp->gpnum = rnp->gpnum; | 625 | rdp->gpnum = rnp->gpnum; |
626 | if (rnp->qsmask & rdp->grpmask) { | ||
627 | rdp->qs_pending = 1; | ||
628 | rdp->passed_quiesc = 0; | ||
629 | } else | ||
630 | rdp->qs_pending = 0; | ||
626 | } | 631 | } |
627 | } | 632 | } |
628 | 633 | ||
@@ -681,6 +686,24 @@ __rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_dat | |||
681 | 686 | ||
682 | /* Remember that we saw this grace-period completion. */ | 687 | /* Remember that we saw this grace-period completion. */ |
683 | rdp->completed = rnp->completed; | 688 | rdp->completed = rnp->completed; |
689 | |||
690 | /* | ||
691 | * If we were in an extended quiescent state, we may have | ||
692 | * missed some grace periods that others CPUs handled on | ||
693 | * our behalf. Catch up with this state to avoid noting | ||
694 | * spurious new grace periods. If another grace period | ||
695 | * has started, then rnp->gpnum will have advanced, so | ||
696 | * we will detect this later on. | ||
697 | */ | ||
698 | if (ULONG_CMP_LT(rdp->gpnum, rdp->completed)) | ||
699 | rdp->gpnum = rdp->completed; | ||
700 | |||
701 | /* | ||
702 | * If RCU does not need a quiescent state from this CPU, | ||
703 | * then make sure that this CPU doesn't go looking for one. | ||
704 | */ | ||
705 | if ((rnp->qsmask & rdp->grpmask) == 0) | ||
706 | rdp->qs_pending = 0; | ||
684 | } | 707 | } |
685 | } | 708 | } |
686 | 709 | ||
@@ -984,53 +1007,31 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp) | |||
984 | #ifdef CONFIG_HOTPLUG_CPU | 1007 | #ifdef CONFIG_HOTPLUG_CPU |
985 | 1008 | ||
986 | /* | 1009 | /* |
987 | * Move a dying CPU's RCU callbacks to the ->orphan_cbs_list for the | 1010 | * Move a dying CPU's RCU callbacks to online CPU's callback list. |
988 | * specified flavor of RCU. The callbacks will be adopted by the next | 1011 | * Synchronization is not required because this function executes |
989 | * _rcu_barrier() invocation or by the CPU_DEAD notifier, whichever | 1012 | * in stop_machine() context. |
990 | * comes first. Because this is invoked from the CPU_DYING notifier, | ||
991 | * irqs are already disabled. | ||
992 | */ | 1013 | */ |
993 | static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp) | 1014 | static void rcu_send_cbs_to_online(struct rcu_state *rsp) |
994 | { | 1015 | { |
995 | int i; | 1016 | int i; |
1017 | /* current DYING CPU is cleared in the cpu_online_mask */ | ||
1018 | int receive_cpu = cpumask_any(cpu_online_mask); | ||
996 | struct rcu_data *rdp = this_cpu_ptr(rsp->rda); | 1019 | struct rcu_data *rdp = this_cpu_ptr(rsp->rda); |
1020 | struct rcu_data *receive_rdp = per_cpu_ptr(rsp->rda, receive_cpu); | ||
997 | 1021 | ||
998 | if (rdp->nxtlist == NULL) | 1022 | if (rdp->nxtlist == NULL) |
999 | return; /* irqs disabled, so comparison is stable. */ | 1023 | return; /* irqs disabled, so comparison is stable. */ |
1000 | raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */ | 1024 | |
1001 | *rsp->orphan_cbs_tail = rdp->nxtlist; | 1025 | *receive_rdp->nxttail[RCU_NEXT_TAIL] = rdp->nxtlist; |
1002 | rsp->orphan_cbs_tail = rdp->nxttail[RCU_NEXT_TAIL]; | 1026 | receive_rdp->nxttail[RCU_NEXT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; |
1027 | receive_rdp->qlen += rdp->qlen; | ||
1028 | receive_rdp->n_cbs_adopted += rdp->qlen; | ||
1029 | rdp->n_cbs_orphaned += rdp->qlen; | ||
1030 | |||
1003 | rdp->nxtlist = NULL; | 1031 | rdp->nxtlist = NULL; |
1004 | for (i = 0; i < RCU_NEXT_SIZE; i++) | 1032 | for (i = 0; i < RCU_NEXT_SIZE; i++) |
1005 | rdp->nxttail[i] = &rdp->nxtlist; | 1033 | rdp->nxttail[i] = &rdp->nxtlist; |
1006 | rsp->orphan_qlen += rdp->qlen; | ||
1007 | rdp->n_cbs_orphaned += rdp->qlen; | ||
1008 | rdp->qlen = 0; | 1034 | rdp->qlen = 0; |
1009 | raw_spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ | ||
1010 | } | ||
1011 | |||
1012 | /* | ||
1013 | * Adopt previously orphaned RCU callbacks. | ||
1014 | */ | ||
1015 | static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) | ||
1016 | { | ||
1017 | unsigned long flags; | ||
1018 | struct rcu_data *rdp; | ||
1019 | |||
1020 | raw_spin_lock_irqsave(&rsp->onofflock, flags); | ||
1021 | rdp = this_cpu_ptr(rsp->rda); | ||
1022 | if (rsp->orphan_cbs_list == NULL) { | ||
1023 | raw_spin_unlock_irqrestore(&rsp->onofflock, flags); | ||
1024 | return; | ||
1025 | } | ||
1026 | *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_list; | ||
1027 | rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_tail; | ||
1028 | rdp->qlen += rsp->orphan_qlen; | ||
1029 | rdp->n_cbs_adopted += rsp->orphan_qlen; | ||
1030 | rsp->orphan_cbs_list = NULL; | ||
1031 | rsp->orphan_cbs_tail = &rsp->orphan_cbs_list; | ||
1032 | rsp->orphan_qlen = 0; | ||
1033 | raw_spin_unlock_irqrestore(&rsp->onofflock, flags); | ||
1034 | } | 1035 | } |
1035 | 1036 | ||
1036 | /* | 1037 | /* |
@@ -1081,8 +1082,6 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
1081 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1082 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1082 | if (need_report & RCU_OFL_TASKS_EXP_GP) | 1083 | if (need_report & RCU_OFL_TASKS_EXP_GP) |
1083 | rcu_report_exp_rnp(rsp, rnp); | 1084 | rcu_report_exp_rnp(rsp, rnp); |
1084 | |||
1085 | rcu_adopt_orphan_cbs(rsp); | ||
1086 | } | 1085 | } |
1087 | 1086 | ||
1088 | /* | 1087 | /* |
@@ -1100,11 +1099,7 @@ static void rcu_offline_cpu(int cpu) | |||
1100 | 1099 | ||
1101 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ | 1100 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ |
1102 | 1101 | ||
1103 | static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp) | 1102 | static void rcu_send_cbs_to_online(struct rcu_state *rsp) |
1104 | { | ||
1105 | } | ||
1106 | |||
1107 | static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) | ||
1108 | { | 1103 | { |
1109 | } | 1104 | } |
1110 | 1105 | ||
@@ -1440,22 +1435,11 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
1440 | */ | 1435 | */ |
1441 | local_irq_save(flags); | 1436 | local_irq_save(flags); |
1442 | rdp = this_cpu_ptr(rsp->rda); | 1437 | rdp = this_cpu_ptr(rsp->rda); |
1443 | rcu_process_gp_end(rsp, rdp); | ||
1444 | check_for_new_grace_period(rsp, rdp); | ||
1445 | 1438 | ||
1446 | /* Add the callback to our list. */ | 1439 | /* Add the callback to our list. */ |
1447 | *rdp->nxttail[RCU_NEXT_TAIL] = head; | 1440 | *rdp->nxttail[RCU_NEXT_TAIL] = head; |
1448 | rdp->nxttail[RCU_NEXT_TAIL] = &head->next; | 1441 | rdp->nxttail[RCU_NEXT_TAIL] = &head->next; |
1449 | 1442 | ||
1450 | /* Start a new grace period if one not already started. */ | ||
1451 | if (!rcu_gp_in_progress(rsp)) { | ||
1452 | unsigned long nestflag; | ||
1453 | struct rcu_node *rnp_root = rcu_get_root(rsp); | ||
1454 | |||
1455 | raw_spin_lock_irqsave(&rnp_root->lock, nestflag); | ||
1456 | rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */ | ||
1457 | } | ||
1458 | |||
1459 | /* | 1443 | /* |
1460 | * Force the grace period if too many callbacks or too long waiting. | 1444 | * Force the grace period if too many callbacks or too long waiting. |
1461 | * Enforce hysteresis, and don't invoke force_quiescent_state() | 1445 | * Enforce hysteresis, and don't invoke force_quiescent_state() |
@@ -1464,12 +1448,27 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
1464 | * is the only one waiting for a grace period to complete. | 1448 | * is the only one waiting for a grace period to complete. |
1465 | */ | 1449 | */ |
1466 | if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) { | 1450 | if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) { |
1467 | rdp->blimit = LONG_MAX; | 1451 | |
1468 | if (rsp->n_force_qs == rdp->n_force_qs_snap && | 1452 | /* Are we ignoring a completed grace period? */ |
1469 | *rdp->nxttail[RCU_DONE_TAIL] != head) | 1453 | rcu_process_gp_end(rsp, rdp); |
1470 | force_quiescent_state(rsp, 0); | 1454 | check_for_new_grace_period(rsp, rdp); |
1471 | rdp->n_force_qs_snap = rsp->n_force_qs; | 1455 | |
1472 | rdp->qlen_last_fqs_check = rdp->qlen; | 1456 | /* Start a new grace period if one not already started. */ |
1457 | if (!rcu_gp_in_progress(rsp)) { | ||
1458 | unsigned long nestflag; | ||
1459 | struct rcu_node *rnp_root = rcu_get_root(rsp); | ||
1460 | |||
1461 | raw_spin_lock_irqsave(&rnp_root->lock, nestflag); | ||
1462 | rcu_start_gp(rsp, nestflag); /* rlses rnp_root->lock */ | ||
1463 | } else { | ||
1464 | /* Give the grace period a kick. */ | ||
1465 | rdp->blimit = LONG_MAX; | ||
1466 | if (rsp->n_force_qs == rdp->n_force_qs_snap && | ||
1467 | *rdp->nxttail[RCU_DONE_TAIL] != head) | ||
1468 | force_quiescent_state(rsp, 0); | ||
1469 | rdp->n_force_qs_snap = rsp->n_force_qs; | ||
1470 | rdp->qlen_last_fqs_check = rdp->qlen; | ||
1471 | } | ||
1473 | } else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) | 1472 | } else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) |
1474 | force_quiescent_state(rsp, 1); | 1473 | force_quiescent_state(rsp, 1); |
1475 | local_irq_restore(flags); | 1474 | local_irq_restore(flags); |
@@ -1699,13 +1698,12 @@ static void _rcu_barrier(struct rcu_state *rsp, | |||
1699 | * decrement rcu_barrier_cpu_count -- otherwise the first CPU | 1698 | * decrement rcu_barrier_cpu_count -- otherwise the first CPU |
1700 | * might complete its grace period before all of the other CPUs | 1699 | * might complete its grace period before all of the other CPUs |
1701 | * did their increment, causing this function to return too | 1700 | * did their increment, causing this function to return too |
1702 | * early. | 1701 | * early. Note that on_each_cpu() disables irqs, which prevents |
1702 | * any CPUs from coming online or going offline until each online | ||
1703 | * CPU has queued its RCU-barrier callback. | ||
1703 | */ | 1704 | */ |
1704 | atomic_set(&rcu_barrier_cpu_count, 1); | 1705 | atomic_set(&rcu_barrier_cpu_count, 1); |
1705 | preempt_disable(); /* stop CPU_DYING from filling orphan_cbs_list */ | ||
1706 | rcu_adopt_orphan_cbs(rsp); | ||
1707 | on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1); | 1706 | on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1); |
1708 | preempt_enable(); /* CPU_DYING can again fill orphan_cbs_list */ | ||
1709 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) | 1707 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) |
1710 | complete(&rcu_barrier_completion); | 1708 | complete(&rcu_barrier_completion); |
1711 | wait_for_completion(&rcu_barrier_completion); | 1709 | wait_for_completion(&rcu_barrier_completion); |
@@ -1831,18 +1829,13 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | |||
1831 | case CPU_DYING: | 1829 | case CPU_DYING: |
1832 | case CPU_DYING_FROZEN: | 1830 | case CPU_DYING_FROZEN: |
1833 | /* | 1831 | /* |
1834 | * preempt_disable() in _rcu_barrier() prevents stop_machine(), | 1832 | * The whole machine is "stopped" except this CPU, so we can |
1835 | * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);" | 1833 | * touch any data without introducing corruption. We send the |
1836 | * returns, all online cpus have queued rcu_barrier_func(). | 1834 | * dying CPU's callbacks to an arbitrarily chosen online CPU. |
1837 | * The dying CPU clears its cpu_online_mask bit and | ||
1838 | * moves all of its RCU callbacks to ->orphan_cbs_list | ||
1839 | * in the context of stop_machine(), so subsequent calls | ||
1840 | * to _rcu_barrier() will adopt these callbacks and only | ||
1841 | * then queue rcu_barrier_func() on all remaining CPUs. | ||
1842 | */ | 1835 | */ |
1843 | rcu_send_cbs_to_orphanage(&rcu_bh_state); | 1836 | rcu_send_cbs_to_online(&rcu_bh_state); |
1844 | rcu_send_cbs_to_orphanage(&rcu_sched_state); | 1837 | rcu_send_cbs_to_online(&rcu_sched_state); |
1845 | rcu_preempt_send_cbs_to_orphanage(); | 1838 | rcu_preempt_send_cbs_to_online(); |
1846 | break; | 1839 | break; |
1847 | case CPU_DEAD: | 1840 | case CPU_DEAD: |
1848 | case CPU_DEAD_FROZEN: | 1841 | case CPU_DEAD_FROZEN: |
@@ -1880,8 +1873,9 @@ static void __init rcu_init_levelspread(struct rcu_state *rsp) | |||
1880 | { | 1873 | { |
1881 | int i; | 1874 | int i; |
1882 | 1875 | ||
1883 | for (i = NUM_RCU_LVLS - 1; i >= 0; i--) | 1876 | for (i = NUM_RCU_LVLS - 1; i > 0; i--) |
1884 | rsp->levelspread[i] = CONFIG_RCU_FANOUT; | 1877 | rsp->levelspread[i] = CONFIG_RCU_FANOUT; |
1878 | rsp->levelspread[0] = RCU_FANOUT_LEAF; | ||
1885 | } | 1879 | } |
1886 | #else /* #ifdef CONFIG_RCU_FANOUT_EXACT */ | 1880 | #else /* #ifdef CONFIG_RCU_FANOUT_EXACT */ |
1887 | static void __init rcu_init_levelspread(struct rcu_state *rsp) | 1881 | static void __init rcu_init_levelspread(struct rcu_state *rsp) |
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 91d4170c5c1..e8f057e44e3 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
@@ -31,46 +31,51 @@ | |||
31 | /* | 31 | /* |
32 | * Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT. | 32 | * Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT. |
33 | * In theory, it should be possible to add more levels straightforwardly. | 33 | * In theory, it should be possible to add more levels straightforwardly. |
34 | * In practice, this has not been tested, so there is probably some | 34 | * In practice, this did work well going from three levels to four. |
35 | * bug somewhere. | 35 | * Of course, your mileage may vary. |
36 | */ | 36 | */ |
37 | #define MAX_RCU_LVLS 4 | 37 | #define MAX_RCU_LVLS 4 |
38 | #define RCU_FANOUT (CONFIG_RCU_FANOUT) | 38 | #if CONFIG_RCU_FANOUT > 16 |
39 | #define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT) | 39 | #define RCU_FANOUT_LEAF 16 |
40 | #define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT) | 40 | #else /* #if CONFIG_RCU_FANOUT > 16 */ |
41 | #define RCU_FANOUT_FOURTH (RCU_FANOUT_CUBE * RCU_FANOUT) | 41 | #define RCU_FANOUT_LEAF (CONFIG_RCU_FANOUT) |
42 | 42 | #endif /* #else #if CONFIG_RCU_FANOUT > 16 */ | |
43 | #if NR_CPUS <= RCU_FANOUT | 43 | #define RCU_FANOUT_1 (RCU_FANOUT_LEAF) |
44 | #define RCU_FANOUT_2 (RCU_FANOUT_1 * CONFIG_RCU_FANOUT) | ||
45 | #define RCU_FANOUT_3 (RCU_FANOUT_2 * CONFIG_RCU_FANOUT) | ||
46 | #define RCU_FANOUT_4 (RCU_FANOUT_3 * CONFIG_RCU_FANOUT) | ||
47 | |||
48 | #if NR_CPUS <= RCU_FANOUT_1 | ||
44 | # define NUM_RCU_LVLS 1 | 49 | # define NUM_RCU_LVLS 1 |
45 | # define NUM_RCU_LVL_0 1 | 50 | # define NUM_RCU_LVL_0 1 |
46 | # define NUM_RCU_LVL_1 (NR_CPUS) | 51 | # define NUM_RCU_LVL_1 (NR_CPUS) |
47 | # define NUM_RCU_LVL_2 0 | 52 | # define NUM_RCU_LVL_2 0 |
48 | # define NUM_RCU_LVL_3 0 | 53 | # define NUM_RCU_LVL_3 0 |
49 | # define NUM_RCU_LVL_4 0 | 54 | # define NUM_RCU_LVL_4 0 |
50 | #elif NR_CPUS <= RCU_FANOUT_SQ | 55 | #elif NR_CPUS <= RCU_FANOUT_2 |
51 | # define NUM_RCU_LVLS 2 | 56 | # define NUM_RCU_LVLS 2 |
52 | # define NUM_RCU_LVL_0 1 | 57 | # define NUM_RCU_LVL_0 1 |
53 | # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT) | 58 | # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) |
54 | # define NUM_RCU_LVL_2 (NR_CPUS) | 59 | # define NUM_RCU_LVL_2 (NR_CPUS) |
55 | # define NUM_RCU_LVL_3 0 | 60 | # define NUM_RCU_LVL_3 0 |
56 | # define NUM_RCU_LVL_4 0 | 61 | # define NUM_RCU_LVL_4 0 |
57 | #elif NR_CPUS <= RCU_FANOUT_CUBE | 62 | #elif NR_CPUS <= RCU_FANOUT_3 |
58 | # define NUM_RCU_LVLS 3 | 63 | # define NUM_RCU_LVLS 3 |
59 | # define NUM_RCU_LVL_0 1 | 64 | # define NUM_RCU_LVL_0 1 |
60 | # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ) | 65 | # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) |
61 | # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT) | 66 | # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) |
62 | # define NUM_RCU_LVL_3 NR_CPUS | 67 | # define NUM_RCU_LVL_3 (NR_CPUS) |
63 | # define NUM_RCU_LVL_4 0 | 68 | # define NUM_RCU_LVL_4 0 |
64 | #elif NR_CPUS <= RCU_FANOUT_FOURTH | 69 | #elif NR_CPUS <= RCU_FANOUT_4 |
65 | # define NUM_RCU_LVLS 4 | 70 | # define NUM_RCU_LVLS 4 |
66 | # define NUM_RCU_LVL_0 1 | 71 | # define NUM_RCU_LVL_0 1 |
67 | # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_CUBE) | 72 | # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3) |
68 | # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ) | 73 | # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) |
69 | # define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT) | 74 | # define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) |
70 | # define NUM_RCU_LVL_4 NR_CPUS | 75 | # define NUM_RCU_LVL_4 (NR_CPUS) |
71 | #else | 76 | #else |
72 | # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" | 77 | # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" |
73 | #endif /* #if (NR_CPUS) <= RCU_FANOUT */ | 78 | #endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */ |
74 | 79 | ||
75 | #define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3 + NUM_RCU_LVL_4) | 80 | #define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3 + NUM_RCU_LVL_4) |
76 | #define NUM_RCU_NODES (RCU_SUM - NR_CPUS) | 81 | #define NUM_RCU_NODES (RCU_SUM - NR_CPUS) |
@@ -203,8 +208,8 @@ struct rcu_data { | |||
203 | long qlen_last_fqs_check; | 208 | long qlen_last_fqs_check; |
204 | /* qlen at last check for QS forcing */ | 209 | /* qlen at last check for QS forcing */ |
205 | unsigned long n_cbs_invoked; /* count of RCU cbs invoked. */ | 210 | unsigned long n_cbs_invoked; /* count of RCU cbs invoked. */ |
206 | unsigned long n_cbs_orphaned; /* RCU cbs sent to orphanage. */ | 211 | unsigned long n_cbs_orphaned; /* RCU cbs orphaned by dying CPU */ |
207 | unsigned long n_cbs_adopted; /* RCU cbs adopted from orphanage. */ | 212 | unsigned long n_cbs_adopted; /* RCU cbs adopted from dying CPU */ |
208 | unsigned long n_force_qs_snap; | 213 | unsigned long n_force_qs_snap; |
209 | /* did other CPU force QS recently? */ | 214 | /* did other CPU force QS recently? */ |
210 | long blimit; /* Upper limit on a processed batch */ | 215 | long blimit; /* Upper limit on a processed batch */ |
@@ -309,15 +314,7 @@ struct rcu_state { | |||
309 | /* End of fields guarded by root rcu_node's lock. */ | 314 | /* End of fields guarded by root rcu_node's lock. */ |
310 | 315 | ||
311 | raw_spinlock_t onofflock; /* exclude on/offline and */ | 316 | raw_spinlock_t onofflock; /* exclude on/offline and */ |
312 | /* starting new GP. Also */ | 317 | /* starting new GP. */ |
313 | /* protects the following */ | ||
314 | /* orphan_cbs fields. */ | ||
315 | struct rcu_head *orphan_cbs_list; /* list of rcu_head structs */ | ||
316 | /* orphaned by all CPUs in */ | ||
317 | /* a given leaf rcu_node */ | ||
318 | /* going offline. */ | ||
319 | struct rcu_head **orphan_cbs_tail; /* And tail pointer. */ | ||
320 | long orphan_qlen; /* Number of orphaned cbs. */ | ||
321 | raw_spinlock_t fqslock; /* Only one task forcing */ | 318 | raw_spinlock_t fqslock; /* Only one task forcing */ |
322 | /* quiescent states. */ | 319 | /* quiescent states. */ |
323 | unsigned long jiffies_force_qs; /* Time at which to invoke */ | 320 | unsigned long jiffies_force_qs; /* Time at which to invoke */ |
@@ -390,7 +387,7 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp); | |||
390 | static int rcu_preempt_pending(int cpu); | 387 | static int rcu_preempt_pending(int cpu); |
391 | static int rcu_preempt_needs_cpu(int cpu); | 388 | static int rcu_preempt_needs_cpu(int cpu); |
392 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu); | 389 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu); |
393 | static void rcu_preempt_send_cbs_to_orphanage(void); | 390 | static void rcu_preempt_send_cbs_to_online(void); |
394 | static void __init __rcu_init_preempt(void); | 391 | static void __init __rcu_init_preempt(void); |
395 | static void rcu_needs_cpu_flush(void); | 392 | static void rcu_needs_cpu_flush(void); |
396 | 393 | ||
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 71a4147473f..a3638710dc6 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -25,6 +25,7 @@ | |||
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/delay.h> | 27 | #include <linux/delay.h> |
28 | #include <linux/stop_machine.h> | ||
28 | 29 | ||
29 | /* | 30 | /* |
30 | * Check the RCU kernel configuration parameters and print informative | 31 | * Check the RCU kernel configuration parameters and print informative |
@@ -773,11 +774,11 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | |||
773 | } | 774 | } |
774 | 775 | ||
775 | /* | 776 | /* |
776 | * Move preemptable RCU's callbacks to ->orphan_cbs_list. | 777 | * Move preemptable RCU's callbacks from dying CPU to other online CPU. |
777 | */ | 778 | */ |
778 | static void rcu_preempt_send_cbs_to_orphanage(void) | 779 | static void rcu_preempt_send_cbs_to_online(void) |
779 | { | 780 | { |
780 | rcu_send_cbs_to_orphanage(&rcu_preempt_state); | 781 | rcu_send_cbs_to_online(&rcu_preempt_state); |
781 | } | 782 | } |
782 | 783 | ||
783 | /* | 784 | /* |
@@ -1001,7 +1002,7 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | |||
1001 | /* | 1002 | /* |
1002 | * Because there is no preemptable RCU, there are no callbacks to move. | 1003 | * Because there is no preemptable RCU, there are no callbacks to move. |
1003 | */ | 1004 | */ |
1004 | static void rcu_preempt_send_cbs_to_orphanage(void) | 1005 | static void rcu_preempt_send_cbs_to_online(void) |
1005 | { | 1006 | { |
1006 | } | 1007 | } |
1007 | 1008 | ||
@@ -1014,6 +1015,132 @@ static void __init __rcu_init_preempt(void) | |||
1014 | 1015 | ||
1015 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ | 1016 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ |
1016 | 1017 | ||
1018 | #ifndef CONFIG_SMP | ||
1019 | |||
1020 | void synchronize_sched_expedited(void) | ||
1021 | { | ||
1022 | cond_resched(); | ||
1023 | } | ||
1024 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); | ||
1025 | |||
1026 | #else /* #ifndef CONFIG_SMP */ | ||
1027 | |||
1028 | static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0); | ||
1029 | static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0); | ||
1030 | |||
1031 | static int synchronize_sched_expedited_cpu_stop(void *data) | ||
1032 | { | ||
1033 | /* | ||
1034 | * There must be a full memory barrier on each affected CPU | ||
1035 | * between the time that try_stop_cpus() is called and the | ||
1036 | * time that it returns. | ||
1037 | * | ||
1038 | * In the current initial implementation of cpu_stop, the | ||
1039 | * above condition is already met when the control reaches | ||
1040 | * this point and the following smp_mb() is not strictly | ||
1041 | * necessary. Do smp_mb() anyway for documentation and | ||
1042 | * robustness against future implementation changes. | ||
1043 | */ | ||
1044 | smp_mb(); /* See above comment block. */ | ||
1045 | return 0; | ||
1046 | } | ||
1047 | |||
1048 | /* | ||
1049 | * Wait for an rcu-sched grace period to elapse, but use "big hammer" | ||
1050 | * approach to force grace period to end quickly. This consumes | ||
1051 | * significant time on all CPUs, and is thus not recommended for | ||
1052 | * any sort of common-case code. | ||
1053 | * | ||
1054 | * Note that it is illegal to call this function while holding any | ||
1055 | * lock that is acquired by a CPU-hotplug notifier. Failing to | ||
1056 | * observe this restriction will result in deadlock. | ||
1057 | * | ||
1058 | * This implementation can be thought of as an application of ticket | ||
1059 | * locking to RCU, with sync_sched_expedited_started and | ||
1060 | * sync_sched_expedited_done taking on the roles of the halves | ||
1061 | * of the ticket-lock word. Each task atomically increments | ||
1062 | * sync_sched_expedited_started upon entry, snapshotting the old value, | ||
1063 | * then attempts to stop all the CPUs. If this succeeds, then each | ||
1064 | * CPU will have executed a context switch, resulting in an RCU-sched | ||
1065 | * grace period. We are then done, so we use atomic_cmpxchg() to | ||
1066 | * update sync_sched_expedited_done to match our snapshot -- but | ||
1067 | * only if someone else has not already advanced past our snapshot. | ||
1068 | * | ||
1069 | * On the other hand, if try_stop_cpus() fails, we check the value | ||
1070 | * of sync_sched_expedited_done. If it has advanced past our | ||
1071 | * initial snapshot, then someone else must have forced a grace period | ||
1072 | * some time after we took our snapshot. In this case, our work is | ||
1073 | * done for us, and we can simply return. Otherwise, we try again, | ||
1074 | * but keep our initial snapshot for purposes of checking for someone | ||
1075 | * doing our work for us. | ||
1076 | * | ||
1077 | * If we fail too many times in a row, we fall back to synchronize_sched(). | ||
1078 | */ | ||
1079 | void synchronize_sched_expedited(void) | ||
1080 | { | ||
1081 | int firstsnap, s, snap, trycount = 0; | ||
1082 | |||
1083 | /* Note that atomic_inc_return() implies full memory barrier. */ | ||
1084 | firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started); | ||
1085 | get_online_cpus(); | ||
1086 | |||
1087 | /* | ||
1088 | * Each pass through the following loop attempts to force a | ||
1089 | * context switch on each CPU. | ||
1090 | */ | ||
1091 | while (try_stop_cpus(cpu_online_mask, | ||
1092 | synchronize_sched_expedited_cpu_stop, | ||
1093 | NULL) == -EAGAIN) { | ||
1094 | put_online_cpus(); | ||
1095 | |||
1096 | /* No joy, try again later. Or just synchronize_sched(). */ | ||
1097 | if (trycount++ < 10) | ||
1098 | udelay(trycount * num_online_cpus()); | ||
1099 | else { | ||
1100 | synchronize_sched(); | ||
1101 | return; | ||
1102 | } | ||
1103 | |||
1104 | /* Check to see if someone else did our work for us. */ | ||
1105 | s = atomic_read(&sync_sched_expedited_done); | ||
1106 | if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) { | ||
1107 | smp_mb(); /* ensure test happens before caller kfree */ | ||
1108 | return; | ||
1109 | } | ||
1110 | |||
1111 | /* | ||
1112 | * Refetching sync_sched_expedited_started allows later | ||
1113 | * callers to piggyback on our grace period. We subtract | ||
1114 | * 1 to get the same token that the last incrementer got. | ||
1115 | * We retry after they started, so our grace period works | ||
1116 | * for them, and they started after our first try, so their | ||
1117 | * grace period works for us. | ||
1118 | */ | ||
1119 | get_online_cpus(); | ||
1120 | snap = atomic_read(&sync_sched_expedited_started) - 1; | ||
1121 | smp_mb(); /* ensure read is before try_stop_cpus(). */ | ||
1122 | } | ||
1123 | |||
1124 | /* | ||
1125 | * Everyone up to our most recent fetch is covered by our grace | ||
1126 | * period. Update the counter, but only if our work is still | ||
1127 | * relevant -- which it won't be if someone who started later | ||
1128 | * than we did beat us to the punch. | ||
1129 | */ | ||
1130 | do { | ||
1131 | s = atomic_read(&sync_sched_expedited_done); | ||
1132 | if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) { | ||
1133 | smp_mb(); /* ensure test happens before caller kfree */ | ||
1134 | break; | ||
1135 | } | ||
1136 | } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s); | ||
1137 | |||
1138 | put_online_cpus(); | ||
1139 | } | ||
1140 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); | ||
1141 | |||
1142 | #endif /* #else #ifndef CONFIG_SMP */ | ||
1143 | |||
1017 | #if !defined(CONFIG_RCU_FAST_NO_HZ) | 1144 | #if !defined(CONFIG_RCU_FAST_NO_HZ) |
1018 | 1145 | ||
1019 | /* | 1146 | /* |
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c index d15430b9d12..c8e97853b97 100644 --- a/kernel/rcutree_trace.c +++ b/kernel/rcutree_trace.c | |||
@@ -166,13 +166,13 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp) | |||
166 | 166 | ||
167 | gpnum = rsp->gpnum; | 167 | gpnum = rsp->gpnum; |
168 | seq_printf(m, "c=%lu g=%lu s=%d jfq=%ld j=%x " | 168 | seq_printf(m, "c=%lu g=%lu s=%d jfq=%ld j=%x " |
169 | "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld\n", | 169 | "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu\n", |
170 | rsp->completed, gpnum, rsp->signaled, | 170 | rsp->completed, gpnum, rsp->signaled, |
171 | (long)(rsp->jiffies_force_qs - jiffies), | 171 | (long)(rsp->jiffies_force_qs - jiffies), |
172 | (int)(jiffies & 0xffff), | 172 | (int)(jiffies & 0xffff), |
173 | rsp->n_force_qs, rsp->n_force_qs_ngp, | 173 | rsp->n_force_qs, rsp->n_force_qs_ngp, |
174 | rsp->n_force_qs - rsp->n_force_qs_ngp, | 174 | rsp->n_force_qs - rsp->n_force_qs_ngp, |
175 | rsp->n_force_qs_lh, rsp->orphan_qlen); | 175 | rsp->n_force_qs_lh); |
176 | for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < NUM_RCU_NODES; rnp++) { | 176 | for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < NUM_RCU_NODES; rnp++) { |
177 | if (rnp->level != level) { | 177 | if (rnp->level != level) { |
178 | seq_puts(m, "\n"); | 178 | seq_puts(m, "\n"); |
@@ -300,7 +300,7 @@ static const struct file_operations rcu_pending_fops = { | |||
300 | 300 | ||
301 | static struct dentry *rcudir; | 301 | static struct dentry *rcudir; |
302 | 302 | ||
303 | static int __init rcuclassic_trace_init(void) | 303 | static int __init rcutree_trace_init(void) |
304 | { | 304 | { |
305 | struct dentry *retval; | 305 | struct dentry *retval; |
306 | 306 | ||
@@ -337,14 +337,14 @@ free_out: | |||
337 | return 1; | 337 | return 1; |
338 | } | 338 | } |
339 | 339 | ||
340 | static void __exit rcuclassic_trace_cleanup(void) | 340 | static void __exit rcutree_trace_cleanup(void) |
341 | { | 341 | { |
342 | debugfs_remove_recursive(rcudir); | 342 | debugfs_remove_recursive(rcudir); |
343 | } | 343 | } |
344 | 344 | ||
345 | 345 | ||
346 | module_init(rcuclassic_trace_init); | 346 | module_init(rcutree_trace_init); |
347 | module_exit(rcuclassic_trace_cleanup); | 347 | module_exit(rcutree_trace_cleanup); |
348 | 348 | ||
349 | MODULE_AUTHOR("Paul E. McKenney"); | 349 | MODULE_AUTHOR("Paul E. McKenney"); |
350 | MODULE_DESCRIPTION("Read-Copy Update tracing for hierarchical implementation"); | 350 | MODULE_DESCRIPTION("Read-Copy Update tracing for hierarchical implementation"); |
diff --git a/kernel/sched.c b/kernel/sched.c index 114a0deb2b0..04949089e76 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -8067,8 +8067,6 @@ void __init sched_init(void) | |||
8067 | zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); | 8067 | zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); |
8068 | #endif /* SMP */ | 8068 | #endif /* SMP */ |
8069 | 8069 | ||
8070 | perf_event_init(); | ||
8071 | |||
8072 | scheduler_running = 1; | 8070 | scheduler_running = 1; |
8073 | } | 8071 | } |
8074 | 8072 | ||
@@ -9241,72 +9239,3 @@ struct cgroup_subsys cpuacct_subsys = { | |||
9241 | }; | 9239 | }; |
9242 | #endif /* CONFIG_CGROUP_CPUACCT */ | 9240 | #endif /* CONFIG_CGROUP_CPUACCT */ |
9243 | 9241 | ||
9244 | #ifndef CONFIG_SMP | ||
9245 | |||
9246 | void synchronize_sched_expedited(void) | ||
9247 | { | ||
9248 | barrier(); | ||
9249 | } | ||
9250 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); | ||
9251 | |||
9252 | #else /* #ifndef CONFIG_SMP */ | ||
9253 | |||
9254 | static atomic_t synchronize_sched_expedited_count = ATOMIC_INIT(0); | ||
9255 | |||
9256 | static int synchronize_sched_expedited_cpu_stop(void *data) | ||
9257 | { | ||
9258 | /* | ||
9259 | * There must be a full memory barrier on each affected CPU | ||
9260 | * between the time that try_stop_cpus() is called and the | ||
9261 | * time that it returns. | ||
9262 | * | ||
9263 | * In the current initial implementation of cpu_stop, the | ||
9264 | * above condition is already met when the control reaches | ||
9265 | * this point and the following smp_mb() is not strictly | ||
9266 | * necessary. Do smp_mb() anyway for documentation and | ||
9267 | * robustness against future implementation changes. | ||
9268 | */ | ||
9269 | smp_mb(); /* See above comment block. */ | ||
9270 | return 0; | ||
9271 | } | ||
9272 | |||
9273 | /* | ||
9274 | * Wait for an rcu-sched grace period to elapse, but use "big hammer" | ||
9275 | * approach to force grace period to end quickly. This consumes | ||
9276 | * significant time on all CPUs, and is thus not recommended for | ||
9277 | * any sort of common-case code. | ||
9278 | * | ||
9279 | * Note that it is illegal to call this function while holding any | ||
9280 | * lock that is acquired by a CPU-hotplug notifier. Failing to | ||
9281 | * observe this restriction will result in deadlock. | ||
9282 | */ | ||
9283 | void synchronize_sched_expedited(void) | ||
9284 | { | ||
9285 | int snap, trycount = 0; | ||
9286 | |||
9287 | smp_mb(); /* ensure prior mod happens before capturing snap. */ | ||
9288 | snap = atomic_read(&synchronize_sched_expedited_count) + 1; | ||
9289 | get_online_cpus(); | ||
9290 | while (try_stop_cpus(cpu_online_mask, | ||
9291 | synchronize_sched_expedited_cpu_stop, | ||
9292 | NULL) == -EAGAIN) { | ||
9293 | put_online_cpus(); | ||
9294 | if (trycount++ < 10) | ||
9295 | udelay(trycount * num_online_cpus()); | ||
9296 | else { | ||
9297 | synchronize_sched(); | ||
9298 | return; | ||
9299 | } | ||
9300 | if (atomic_read(&synchronize_sched_expedited_count) - snap > 0) { | ||
9301 | smp_mb(); /* ensure test happens before caller kfree */ | ||
9302 | return; | ||
9303 | } | ||
9304 | get_online_cpus(); | ||
9305 | } | ||
9306 | atomic_inc(&synchronize_sched_expedited_count); | ||
9307 | smp_mb__after_atomic_inc(); /* ensure post-GP actions seen after GP. */ | ||
9308 | put_online_cpus(); | ||
9309 | } | ||
9310 | EXPORT_SYMBOL_GPL(synchronize_sched_expedited); | ||
9311 | |||
9312 | #endif /* #else #ifndef CONFIG_SMP */ | ||
diff --git a/kernel/srcu.c b/kernel/srcu.c index c71e0750053..98d8c1e80ed 100644 --- a/kernel/srcu.c +++ b/kernel/srcu.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/rcupdate.h> | 31 | #include <linux/rcupdate.h> |
32 | #include <linux/sched.h> | 32 | #include <linux/sched.h> |
33 | #include <linux/smp.h> | 33 | #include <linux/smp.h> |
34 | #include <linux/delay.h> | ||
34 | #include <linux/srcu.h> | 35 | #include <linux/srcu.h> |
35 | 36 | ||
36 | static int init_srcu_struct_fields(struct srcu_struct *sp) | 37 | static int init_srcu_struct_fields(struct srcu_struct *sp) |
@@ -203,9 +204,14 @@ static void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void)) | |||
203 | * all srcu_read_lock() calls using the old counters have completed. | 204 | * all srcu_read_lock() calls using the old counters have completed. |
204 | * Their corresponding critical sections might well be still | 205 | * Their corresponding critical sections might well be still |
205 | * executing, but the srcu_read_lock() primitives themselves | 206 | * executing, but the srcu_read_lock() primitives themselves |
206 | * will have finished executing. | 207 | * will have finished executing. We initially give readers |
208 | * an arbitrarily chosen 10 microseconds to get out of their | ||
209 | * SRCU read-side critical sections, then loop waiting 1/HZ | ||
210 | * seconds per iteration. | ||
207 | */ | 211 | */ |
208 | 212 | ||
213 | if (srcu_readers_active_idx(sp, idx)) | ||
214 | udelay(CONFIG_SRCU_SYNCHRONIZE_DELAY); | ||
209 | while (srcu_readers_active_idx(sp, idx)) | 215 | while (srcu_readers_active_idx(sp, idx)) |
210 | schedule_timeout_interruptible(1); | 216 | schedule_timeout_interruptible(1); |
211 | 217 | ||
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 121e4fff03d..ae5cbb1e3ce 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -744,21 +744,21 @@ static struct ctl_table kern_table[] = { | |||
744 | .extra1 = &zero, | 744 | .extra1 = &zero, |
745 | .extra2 = &one, | 745 | .extra2 = &one, |
746 | }, | 746 | }, |
747 | #endif | ||
748 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86) && !defined(CONFIG_LOCKUP_DETECTOR) | ||
749 | { | 747 | { |
750 | .procname = "unknown_nmi_panic", | 748 | .procname = "nmi_watchdog", |
751 | .data = &unknown_nmi_panic, | 749 | .data = &watchdog_enabled, |
752 | .maxlen = sizeof (int), | 750 | .maxlen = sizeof (int), |
753 | .mode = 0644, | 751 | .mode = 0644, |
754 | .proc_handler = proc_dointvec, | 752 | .proc_handler = proc_dowatchdog_enabled, |
755 | }, | 753 | }, |
754 | #endif | ||
755 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86) | ||
756 | { | 756 | { |
757 | .procname = "nmi_watchdog", | 757 | .procname = "unknown_nmi_panic", |
758 | .data = &nmi_watchdog_enabled, | 758 | .data = &unknown_nmi_panic, |
759 | .maxlen = sizeof (int), | 759 | .maxlen = sizeof (int), |
760 | .mode = 0644, | 760 | .mode = 0644, |
761 | .proc_handler = proc_nmi_enabled, | 761 | .proc_handler = proc_dointvec, |
762 | }, | 762 | }, |
763 | #endif | 763 | #endif |
764 | #if defined(CONFIG_X86) | 764 | #if defined(CONFIG_X86) |
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c index 1357c578606..4b2545a136f 100644 --- a/kernel/sysctl_binary.c +++ b/kernel/sysctl_binary.c | |||
@@ -136,7 +136,6 @@ static const struct bin_table bin_kern_table[] = { | |||
136 | { CTL_INT, KERN_IA64_UNALIGNED, "ignore-unaligned-usertrap" }, | 136 | { CTL_INT, KERN_IA64_UNALIGNED, "ignore-unaligned-usertrap" }, |
137 | { CTL_INT, KERN_COMPAT_LOG, "compat-log" }, | 137 | { CTL_INT, KERN_COMPAT_LOG, "compat-log" }, |
138 | { CTL_INT, KERN_MAX_LOCK_DEPTH, "max_lock_depth" }, | 138 | { CTL_INT, KERN_MAX_LOCK_DEPTH, "max_lock_depth" }, |
139 | { CTL_INT, KERN_NMI_WATCHDOG, "nmi_watchdog" }, | ||
140 | { CTL_INT, KERN_PANIC_ON_NMI, "panic_on_unrecovered_nmi" }, | 139 | { CTL_INT, KERN_PANIC_ON_NMI, "panic_on_unrecovered_nmi" }, |
141 | {} | 140 | {} |
142 | }; | 141 | }; |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index ea37e2ff416..14674dce77a 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -69,6 +69,21 @@ config EVENT_TRACING | |||
69 | select CONTEXT_SWITCH_TRACER | 69 | select CONTEXT_SWITCH_TRACER |
70 | bool | 70 | bool |
71 | 71 | ||
72 | config EVENT_POWER_TRACING_DEPRECATED | ||
73 | depends on EVENT_TRACING | ||
74 | bool "Deprecated power event trace API, to be removed" | ||
75 | default y | ||
76 | help | ||
77 | Provides old power event types: | ||
78 | C-state/idle accounting events: | ||
79 | power:power_start | ||
80 | power:power_end | ||
81 | and old cpufreq accounting event: | ||
82 | power:power_frequency | ||
83 | This is for userspace compatibility | ||
84 | and will vanish after 5 kernel iterations, | ||
85 | namely 2.6.41. | ||
86 | |||
72 | config CONTEXT_SWITCH_TRACER | 87 | config CONTEXT_SWITCH_TRACER |
73 | bool | 88 | bool |
74 | 89 | ||
diff --git a/kernel/trace/power-traces.c b/kernel/trace/power-traces.c index a22582a0616..f55fcf61b22 100644 --- a/kernel/trace/power-traces.c +++ b/kernel/trace/power-traces.c | |||
@@ -13,5 +13,8 @@ | |||
13 | #define CREATE_TRACE_POINTS | 13 | #define CREATE_TRACE_POINTS |
14 | #include <trace/events/power.h> | 14 | #include <trace/events/power.h> |
15 | 15 | ||
16 | EXPORT_TRACEPOINT_SYMBOL_GPL(power_frequency); | 16 | #ifdef EVENT_POWER_TRACING_DEPRECATED |
17 | EXPORT_TRACEPOINT_SYMBOL_GPL(power_start); | ||
18 | #endif | ||
19 | EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_idle); | ||
17 | 20 | ||
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 39c059ca670..19a359d5e6d 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c | |||
@@ -21,17 +21,46 @@ typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)]) | |||
21 | /* Count the events in use (per event id, not per instance) */ | 21 | /* Count the events in use (per event id, not per instance) */ |
22 | static int total_ref_count; | 22 | static int total_ref_count; |
23 | 23 | ||
24 | static int perf_trace_event_perm(struct ftrace_event_call *tp_event, | ||
25 | struct perf_event *p_event) | ||
26 | { | ||
27 | /* No tracing, just counting, so no obvious leak */ | ||
28 | if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW)) | ||
29 | return 0; | ||
30 | |||
31 | /* Some events are ok to be traced by non-root users... */ | ||
32 | if (p_event->attach_state == PERF_ATTACH_TASK) { | ||
33 | if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY) | ||
34 | return 0; | ||
35 | } | ||
36 | |||
37 | /* | ||
38 | * ...otherwise raw tracepoint data can be a severe data leak, | ||
39 | * only allow root to have these. | ||
40 | */ | ||
41 | if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN)) | ||
42 | return -EPERM; | ||
43 | |||
44 | return 0; | ||
45 | } | ||
46 | |||
24 | static int perf_trace_event_init(struct ftrace_event_call *tp_event, | 47 | static int perf_trace_event_init(struct ftrace_event_call *tp_event, |
25 | struct perf_event *p_event) | 48 | struct perf_event *p_event) |
26 | { | 49 | { |
27 | struct hlist_head __percpu *list; | 50 | struct hlist_head __percpu *list; |
28 | int ret = -ENOMEM; | 51 | int ret; |
29 | int cpu; | 52 | int cpu; |
30 | 53 | ||
54 | ret = perf_trace_event_perm(tp_event, p_event); | ||
55 | if (ret) | ||
56 | return ret; | ||
57 | |||
31 | p_event->tp_event = tp_event; | 58 | p_event->tp_event = tp_event; |
32 | if (tp_event->perf_refcount++ > 0) | 59 | if (tp_event->perf_refcount++ > 0) |
33 | return 0; | 60 | return 0; |
34 | 61 | ||
62 | ret = -ENOMEM; | ||
63 | |||
35 | list = alloc_percpu(struct hlist_head); | 64 | list = alloc_percpu(struct hlist_head); |
36 | if (!list) | 65 | if (!list) |
37 | goto fail; | 66 | goto fail; |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 0725eeab193..35fde09b81d 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -27,6 +27,12 @@ | |||
27 | 27 | ||
28 | DEFINE_MUTEX(event_mutex); | 28 | DEFINE_MUTEX(event_mutex); |
29 | 29 | ||
30 | DEFINE_MUTEX(event_storage_mutex); | ||
31 | EXPORT_SYMBOL_GPL(event_storage_mutex); | ||
32 | |||
33 | char event_storage[EVENT_STORAGE_SIZE]; | ||
34 | EXPORT_SYMBOL_GPL(event_storage); | ||
35 | |||
30 | LIST_HEAD(ftrace_events); | 36 | LIST_HEAD(ftrace_events); |
31 | LIST_HEAD(ftrace_common_fields); | 37 | LIST_HEAD(ftrace_common_fields); |
32 | 38 | ||
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 4ba44deaac2..4b74d71705c 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c | |||
@@ -83,13 +83,19 @@ static void __always_unused ____ftrace_check_##name(void) \ | |||
83 | 83 | ||
84 | #undef __array | 84 | #undef __array |
85 | #define __array(type, item, len) \ | 85 | #define __array(type, item, len) \ |
86 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ | 86 | do { \ |
87 | ret = trace_define_field(event_call, #type "[" #len "]", #item, \ | 87 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ |
88 | mutex_lock(&event_storage_mutex); \ | ||
89 | snprintf(event_storage, sizeof(event_storage), \ | ||
90 | "%s[%d]", #type, len); \ | ||
91 | ret = trace_define_field(event_call, event_storage, #item, \ | ||
88 | offsetof(typeof(field), item), \ | 92 | offsetof(typeof(field), item), \ |
89 | sizeof(field.item), \ | 93 | sizeof(field.item), \ |
90 | is_signed_type(type), FILTER_OTHER); \ | 94 | is_signed_type(type), FILTER_OTHER); \ |
91 | if (ret) \ | 95 | mutex_unlock(&event_storage_mutex); \ |
92 | return ret; | 96 | if (ret) \ |
97 | return ret; \ | ||
98 | } while (0); | ||
93 | 99 | ||
94 | #undef __array_desc | 100 | #undef __array_desc |
95 | #define __array_desc(type, container, item, len) \ | 101 | #define __array_desc(type, container, item, len) \ |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index c812c4927ca..6e7b575ac33 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -57,6 +57,8 @@ static int __init hardlockup_panic_setup(char *str) | |||
57 | { | 57 | { |
58 | if (!strncmp(str, "panic", 5)) | 58 | if (!strncmp(str, "panic", 5)) |
59 | hardlockup_panic = 1; | 59 | hardlockup_panic = 1; |
60 | else if (!strncmp(str, "0", 1)) | ||
61 | no_watchdog = 1; | ||
60 | return 1; | 62 | return 1; |
61 | } | 63 | } |
62 | __setup("nmi_watchdog=", hardlockup_panic_setup); | 64 | __setup("nmi_watchdog=", hardlockup_panic_setup); |
@@ -548,13 +550,13 @@ static struct notifier_block __cpuinitdata cpu_nfb = { | |||
548 | .notifier_call = cpu_callback | 550 | .notifier_call = cpu_callback |
549 | }; | 551 | }; |
550 | 552 | ||
551 | static int __init spawn_watchdog_task(void) | 553 | void __init lockup_detector_init(void) |
552 | { | 554 | { |
553 | void *cpu = (void *)(long)smp_processor_id(); | 555 | void *cpu = (void *)(long)smp_processor_id(); |
554 | int err; | 556 | int err; |
555 | 557 | ||
556 | if (no_watchdog) | 558 | if (no_watchdog) |
557 | return 0; | 559 | return; |
558 | 560 | ||
559 | err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); | 561 | err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); |
560 | WARN_ON(notifier_to_errno(err)); | 562 | WARN_ON(notifier_to_errno(err)); |
@@ -562,6 +564,5 @@ static int __init spawn_watchdog_task(void) | |||
562 | cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); | 564 | cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); |
563 | register_cpu_notifier(&cpu_nfb); | 565 | register_cpu_notifier(&cpu_nfb); |
564 | 566 | ||
565 | return 0; | 567 | return; |
566 | } | 568 | } |
567 | early_initcall(spawn_watchdog_task); | ||