diff options
Diffstat (limited to 'kernel')
44 files changed, 2604 insertions, 765 deletions
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks new file mode 100644 index 000000000000..88c92fb44618 --- /dev/null +++ b/kernel/Kconfig.locks | |||
@@ -0,0 +1,202 @@ | |||
1 | # | ||
2 | # The ARCH_INLINE foo is necessary because select ignores "depends on" | ||
3 | # | ||
4 | config ARCH_INLINE_SPIN_TRYLOCK | ||
5 | bool | ||
6 | |||
7 | config ARCH_INLINE_SPIN_TRYLOCK_BH | ||
8 | bool | ||
9 | |||
10 | config ARCH_INLINE_SPIN_LOCK | ||
11 | bool | ||
12 | |||
13 | config ARCH_INLINE_SPIN_LOCK_BH | ||
14 | bool | ||
15 | |||
16 | config ARCH_INLINE_SPIN_LOCK_IRQ | ||
17 | bool | ||
18 | |||
19 | config ARCH_INLINE_SPIN_LOCK_IRQSAVE | ||
20 | bool | ||
21 | |||
22 | config ARCH_INLINE_SPIN_UNLOCK | ||
23 | bool | ||
24 | |||
25 | config ARCH_INLINE_SPIN_UNLOCK_BH | ||
26 | bool | ||
27 | |||
28 | config ARCH_INLINE_SPIN_UNLOCK_IRQ | ||
29 | bool | ||
30 | |||
31 | config ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE | ||
32 | bool | ||
33 | |||
34 | |||
35 | config ARCH_INLINE_READ_TRYLOCK | ||
36 | bool | ||
37 | |||
38 | config ARCH_INLINE_READ_LOCK | ||
39 | bool | ||
40 | |||
41 | config ARCH_INLINE_READ_LOCK_BH | ||
42 | bool | ||
43 | |||
44 | config ARCH_INLINE_READ_LOCK_IRQ | ||
45 | bool | ||
46 | |||
47 | config ARCH_INLINE_READ_LOCK_IRQSAVE | ||
48 | bool | ||
49 | |||
50 | config ARCH_INLINE_READ_UNLOCK | ||
51 | bool | ||
52 | |||
53 | config ARCH_INLINE_READ_UNLOCK_BH | ||
54 | bool | ||
55 | |||
56 | config ARCH_INLINE_READ_UNLOCK_IRQ | ||
57 | bool | ||
58 | |||
59 | config ARCH_INLINE_READ_UNLOCK_IRQRESTORE | ||
60 | bool | ||
61 | |||
62 | |||
63 | config ARCH_INLINE_WRITE_TRYLOCK | ||
64 | bool | ||
65 | |||
66 | config ARCH_INLINE_WRITE_LOCK | ||
67 | bool | ||
68 | |||
69 | config ARCH_INLINE_WRITE_LOCK_BH | ||
70 | bool | ||
71 | |||
72 | config ARCH_INLINE_WRITE_LOCK_IRQ | ||
73 | bool | ||
74 | |||
75 | config ARCH_INLINE_WRITE_LOCK_IRQSAVE | ||
76 | bool | ||
77 | |||
78 | config ARCH_INLINE_WRITE_UNLOCK | ||
79 | bool | ||
80 | |||
81 | config ARCH_INLINE_WRITE_UNLOCK_BH | ||
82 | bool | ||
83 | |||
84 | config ARCH_INLINE_WRITE_UNLOCK_IRQ | ||
85 | bool | ||
86 | |||
87 | config ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE | ||
88 | bool | ||
89 | |||
90 | # | ||
91 | # lock_* functions are inlined when: | ||
92 | # - DEBUG_SPINLOCK=n and GENERIC_LOCKBREAK=n and ARCH_INLINE_*LOCK=y | ||
93 | # | ||
94 | # trylock_* functions are inlined when: | ||
95 | # - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y | ||
96 | # | ||
97 | # unlock and unlock_irq functions are inlined when: | ||
98 | # - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y | ||
99 | # or | ||
100 | # - DEBUG_SPINLOCK=n and PREEMPT=n | ||
101 | # | ||
102 | # unlock_bh and unlock_irqrestore functions are inlined when: | ||
103 | # - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y | ||
104 | # | ||
105 | |||
106 | config INLINE_SPIN_TRYLOCK | ||
107 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_TRYLOCK | ||
108 | |||
109 | config INLINE_SPIN_TRYLOCK_BH | ||
110 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_TRYLOCK_BH | ||
111 | |||
112 | config INLINE_SPIN_LOCK | ||
113 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK | ||
114 | |||
115 | config INLINE_SPIN_LOCK_BH | ||
116 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
117 | ARCH_INLINE_SPIN_LOCK_BH | ||
118 | |||
119 | config INLINE_SPIN_LOCK_IRQ | ||
120 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
121 | ARCH_INLINE_SPIN_LOCK_IRQ | ||
122 | |||
123 | config INLINE_SPIN_LOCK_IRQSAVE | ||
124 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
125 | ARCH_INLINE_SPIN_LOCK_IRQSAVE | ||
126 | |||
127 | config INLINE_SPIN_UNLOCK | ||
128 | def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_SPIN_UNLOCK) | ||
129 | |||
130 | config INLINE_SPIN_UNLOCK_BH | ||
131 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_BH | ||
132 | |||
133 | config INLINE_SPIN_UNLOCK_IRQ | ||
134 | def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_SPIN_UNLOCK_BH) | ||
135 | |||
136 | config INLINE_SPIN_UNLOCK_IRQRESTORE | ||
137 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE | ||
138 | |||
139 | |||
140 | config INLINE_READ_TRYLOCK | ||
141 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_TRYLOCK | ||
142 | |||
143 | config INLINE_READ_LOCK | ||
144 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK | ||
145 | |||
146 | config INLINE_READ_LOCK_BH | ||
147 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
148 | ARCH_INLINE_READ_LOCK_BH | ||
149 | |||
150 | config INLINE_READ_LOCK_IRQ | ||
151 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
152 | ARCH_INLINE_READ_LOCK_IRQ | ||
153 | |||
154 | config INLINE_READ_LOCK_IRQSAVE | ||
155 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
156 | ARCH_INLINE_READ_LOCK_IRQSAVE | ||
157 | |||
158 | config INLINE_READ_UNLOCK | ||
159 | def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_READ_UNLOCK) | ||
160 | |||
161 | config INLINE_READ_UNLOCK_BH | ||
162 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_UNLOCK_BH | ||
163 | |||
164 | config INLINE_READ_UNLOCK_IRQ | ||
165 | def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_READ_UNLOCK_BH) | ||
166 | |||
167 | config INLINE_READ_UNLOCK_IRQRESTORE | ||
168 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_UNLOCK_IRQRESTORE | ||
169 | |||
170 | |||
171 | config INLINE_WRITE_TRYLOCK | ||
172 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_TRYLOCK | ||
173 | |||
174 | config INLINE_WRITE_LOCK | ||
175 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK | ||
176 | |||
177 | config INLINE_WRITE_LOCK_BH | ||
178 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
179 | ARCH_INLINE_WRITE_LOCK_BH | ||
180 | |||
181 | config INLINE_WRITE_LOCK_IRQ | ||
182 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
183 | ARCH_INLINE_WRITE_LOCK_IRQ | ||
184 | |||
185 | config INLINE_WRITE_LOCK_IRQSAVE | ||
186 | def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ | ||
187 | ARCH_INLINE_WRITE_LOCK_IRQSAVE | ||
188 | |||
189 | config INLINE_WRITE_UNLOCK | ||
190 | def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_WRITE_UNLOCK) | ||
191 | |||
192 | config INLINE_WRITE_UNLOCK_BH | ||
193 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_BH | ||
194 | |||
195 | config INLINE_WRITE_UNLOCK_IRQ | ||
196 | def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_WRITE_UNLOCK_BH) | ||
197 | |||
198 | config INLINE_WRITE_UNLOCK_IRQRESTORE | ||
199 | def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE | ||
200 | |||
201 | config MUTEX_SPIN_ON_OWNER | ||
202 | def_bool SMP && !DEBUG_MUTEXES && !HAVE_DEFAULT_NO_SPIN_MUTEXES | ||
diff --git a/kernel/Makefile b/kernel/Makefile index b8d4cd8ac0b9..dcf6789bf547 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -82,6 +82,7 @@ obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o | |||
82 | obj-$(CONFIG_TREE_RCU) += rcutree.o | 82 | obj-$(CONFIG_TREE_RCU) += rcutree.o |
83 | obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o | 83 | obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o |
84 | obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o | 84 | obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o |
85 | obj-$(CONFIG_TINY_RCU) += rcutiny.o | ||
85 | obj-$(CONFIG_RELAY) += relay.o | 86 | obj-$(CONFIG_RELAY) += relay.o |
86 | obj-$(CONFIG_SYSCTL) += utsname_sysctl.o | 87 | obj-$(CONFIG_SYSCTL) += utsname_sysctl.o |
87 | obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o | 88 | obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o |
@@ -94,6 +95,7 @@ obj-$(CONFIG_X86_DS) += trace/ | |||
94 | obj-$(CONFIG_RING_BUFFER) += trace/ | 95 | obj-$(CONFIG_RING_BUFFER) += trace/ |
95 | obj-$(CONFIG_SMP) += sched_cpupri.o | 96 | obj-$(CONFIG_SMP) += sched_cpupri.o |
96 | obj-$(CONFIG_SLOW_WORK) += slow-work.o | 97 | obj-$(CONFIG_SLOW_WORK) += slow-work.o |
98 | obj-$(CONFIG_SLOW_WORK_DEBUG) += slow-work-debugfs.o | ||
97 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o | 99 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o |
98 | 100 | ||
99 | ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) | 101 | ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) |
diff --git a/kernel/capability.c b/kernel/capability.c index 4e17041963f5..7f876e60521f 100644 --- a/kernel/capability.c +++ b/kernel/capability.c | |||
@@ -29,7 +29,6 @@ EXPORT_SYMBOL(__cap_empty_set); | |||
29 | EXPORT_SYMBOL(__cap_full_set); | 29 | EXPORT_SYMBOL(__cap_full_set); |
30 | EXPORT_SYMBOL(__cap_init_eff_set); | 30 | EXPORT_SYMBOL(__cap_init_eff_set); |
31 | 31 | ||
32 | #ifdef CONFIG_SECURITY_FILE_CAPABILITIES | ||
33 | int file_caps_enabled = 1; | 32 | int file_caps_enabled = 1; |
34 | 33 | ||
35 | static int __init file_caps_disable(char *str) | 34 | static int __init file_caps_disable(char *str) |
@@ -38,7 +37,6 @@ static int __init file_caps_disable(char *str) | |||
38 | return 1; | 37 | return 1; |
39 | } | 38 | } |
40 | __setup("no_file_caps", file_caps_disable); | 39 | __setup("no_file_caps", file_caps_disable); |
41 | #endif | ||
42 | 40 | ||
43 | /* | 41 | /* |
44 | * More recent versions of libcap are available from: | 42 | * More recent versions of libcap are available from: |
@@ -169,8 +167,8 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr) | |||
169 | kernel_cap_t pE, pI, pP; | 167 | kernel_cap_t pE, pI, pP; |
170 | 168 | ||
171 | ret = cap_validate_magic(header, &tocopy); | 169 | ret = cap_validate_magic(header, &tocopy); |
172 | if (ret != 0) | 170 | if ((dataptr == NULL) || (ret != 0)) |
173 | return ret; | 171 | return ((dataptr == NULL) && (ret == -EINVAL)) ? 0 : ret; |
174 | 172 | ||
175 | if (get_user(pid, &header->pid)) | 173 | if (get_user(pid, &header->pid)) |
176 | return -EFAULT; | 174 | return -EFAULT; |
@@ -238,7 +236,7 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr) | |||
238 | SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data) | 236 | SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data) |
239 | { | 237 | { |
240 | struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; | 238 | struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; |
241 | unsigned i, tocopy; | 239 | unsigned i, tocopy, copybytes; |
242 | kernel_cap_t inheritable, permitted, effective; | 240 | kernel_cap_t inheritable, permitted, effective; |
243 | struct cred *new; | 241 | struct cred *new; |
244 | int ret; | 242 | int ret; |
@@ -255,8 +253,11 @@ SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data) | |||
255 | if (pid != 0 && pid != task_pid_vnr(current)) | 253 | if (pid != 0 && pid != task_pid_vnr(current)) |
256 | return -EPERM; | 254 | return -EPERM; |
257 | 255 | ||
258 | if (copy_from_user(&kdata, data, | 256 | copybytes = tocopy * sizeof(struct __user_cap_data_struct); |
259 | tocopy * sizeof(struct __user_cap_data_struct))) | 257 | if (copybytes > sizeof(kdata)) |
258 | return -EFAULT; | ||
259 | |||
260 | if (copy_from_user(&kdata, data, copybytes)) | ||
260 | return -EFAULT; | 261 | return -EFAULT; |
261 | 262 | ||
262 | for (i = 0; i < tocopy; i++) { | 263 | for (i = 0; i < tocopy; i++) { |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index ca83b73fba19..0249f4be9b5c 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -1710,14 +1710,13 @@ static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft, | |||
1710 | return -EFAULT; | 1710 | return -EFAULT; |
1711 | 1711 | ||
1712 | buffer[nbytes] = 0; /* nul-terminate */ | 1712 | buffer[nbytes] = 0; /* nul-terminate */ |
1713 | strstrip(buffer); | ||
1714 | if (cft->write_u64) { | 1713 | if (cft->write_u64) { |
1715 | u64 val = simple_strtoull(buffer, &end, 0); | 1714 | u64 val = simple_strtoull(strstrip(buffer), &end, 0); |
1716 | if (*end) | 1715 | if (*end) |
1717 | return -EINVAL; | 1716 | return -EINVAL; |
1718 | retval = cft->write_u64(cgrp, cft, val); | 1717 | retval = cft->write_u64(cgrp, cft, val); |
1719 | } else { | 1718 | } else { |
1720 | s64 val = simple_strtoll(buffer, &end, 0); | 1719 | s64 val = simple_strtoll(strstrip(buffer), &end, 0); |
1721 | if (*end) | 1720 | if (*end) |
1722 | return -EINVAL; | 1721 | return -EINVAL; |
1723 | retval = cft->write_s64(cgrp, cft, val); | 1722 | retval = cft->write_s64(cgrp, cft, val); |
@@ -1753,8 +1752,7 @@ static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft, | |||
1753 | } | 1752 | } |
1754 | 1753 | ||
1755 | buffer[nbytes] = 0; /* nul-terminate */ | 1754 | buffer[nbytes] = 0; /* nul-terminate */ |
1756 | strstrip(buffer); | 1755 | retval = cft->write_string(cgrp, cft, strstrip(buffer)); |
1757 | retval = cft->write_string(cgrp, cft, buffer); | ||
1758 | if (!retval) | 1756 | if (!retval) |
1759 | retval = nbytes; | 1757 | retval = nbytes; |
1760 | out: | 1758 | out: |
diff --git a/kernel/exit.c b/kernel/exit.c index e61891f80123..f7864ac2ecc1 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -359,10 +359,8 @@ void __set_special_pids(struct pid *pid) | |||
359 | { | 359 | { |
360 | struct task_struct *curr = current->group_leader; | 360 | struct task_struct *curr = current->group_leader; |
361 | 361 | ||
362 | if (task_session(curr) != pid) { | 362 | if (task_session(curr) != pid) |
363 | change_pid(curr, PIDTYPE_SID, pid); | 363 | change_pid(curr, PIDTYPE_SID, pid); |
364 | proc_sid_connector(curr); | ||
365 | } | ||
366 | 364 | ||
367 | if (task_pgrp(curr) != pid) | 365 | if (task_pgrp(curr) != pid) |
368 | change_pid(curr, PIDTYPE_PGID, pid); | 366 | change_pid(curr, PIDTYPE_PGID, pid); |
diff --git a/kernel/fork.c b/kernel/fork.c index 4c20fff8c13a..166b8c49257c 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -91,7 +91,7 @@ int nr_processes(void) | |||
91 | int cpu; | 91 | int cpu; |
92 | int total = 0; | 92 | int total = 0; |
93 | 93 | ||
94 | for_each_online_cpu(cpu) | 94 | for_each_possible_cpu(cpu) |
95 | total += per_cpu(process_counts, cpu); | 95 | total += per_cpu(process_counts, cpu); |
96 | 96 | ||
97 | return total; | 97 | return total; |
diff --git a/kernel/futex.c b/kernel/futex.c index 4949d336d88d..fb65e822fc41 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -150,7 +150,8 @@ static struct futex_hash_bucket *hash_futex(union futex_key *key) | |||
150 | */ | 150 | */ |
151 | static inline int match_futex(union futex_key *key1, union futex_key *key2) | 151 | static inline int match_futex(union futex_key *key1, union futex_key *key2) |
152 | { | 152 | { |
153 | return (key1->both.word == key2->both.word | 153 | return (key1 && key2 |
154 | && key1->both.word == key2->both.word | ||
154 | && key1->both.ptr == key2->both.ptr | 155 | && key1->both.ptr == key2->both.ptr |
155 | && key1->both.offset == key2->both.offset); | 156 | && key1->both.offset == key2->both.offset); |
156 | } | 157 | } |
@@ -1028,7 +1029,6 @@ static inline | |||
1028 | void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, | 1029 | void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, |
1029 | struct futex_hash_bucket *hb) | 1030 | struct futex_hash_bucket *hb) |
1030 | { | 1031 | { |
1031 | drop_futex_key_refs(&q->key); | ||
1032 | get_futex_key_refs(key); | 1032 | get_futex_key_refs(key); |
1033 | q->key = *key; | 1033 | q->key = *key; |
1034 | 1034 | ||
@@ -1226,6 +1226,7 @@ retry_private: | |||
1226 | */ | 1226 | */ |
1227 | if (ret == 1) { | 1227 | if (ret == 1) { |
1228 | WARN_ON(pi_state); | 1228 | WARN_ON(pi_state); |
1229 | drop_count++; | ||
1229 | task_count++; | 1230 | task_count++; |
1230 | ret = get_futex_value_locked(&curval2, uaddr2); | 1231 | ret = get_futex_value_locked(&curval2, uaddr2); |
1231 | if (!ret) | 1232 | if (!ret) |
@@ -1304,6 +1305,7 @@ retry_private: | |||
1304 | if (ret == 1) { | 1305 | if (ret == 1) { |
1305 | /* We got the lock. */ | 1306 | /* We got the lock. */ |
1306 | requeue_pi_wake_futex(this, &key2, hb2); | 1307 | requeue_pi_wake_futex(this, &key2, hb2); |
1308 | drop_count++; | ||
1307 | continue; | 1309 | continue; |
1308 | } else if (ret) { | 1310 | } else if (ret) { |
1309 | /* -EDEADLK */ | 1311 | /* -EDEADLK */ |
@@ -1791,6 +1793,7 @@ static int futex_wait(u32 __user *uaddr, int fshared, | |||
1791 | current->timer_slack_ns); | 1793 | current->timer_slack_ns); |
1792 | } | 1794 | } |
1793 | 1795 | ||
1796 | retry: | ||
1794 | /* Prepare to wait on uaddr. */ | 1797 | /* Prepare to wait on uaddr. */ |
1795 | ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); | 1798 | ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); |
1796 | if (ret) | 1799 | if (ret) |
@@ -1808,9 +1811,14 @@ static int futex_wait(u32 __user *uaddr, int fshared, | |||
1808 | goto out_put_key; | 1811 | goto out_put_key; |
1809 | 1812 | ||
1810 | /* | 1813 | /* |
1811 | * We expect signal_pending(current), but another thread may | 1814 | * We expect signal_pending(current), but we might be the |
1812 | * have handled it for us already. | 1815 | * victim of a spurious wakeup as well. |
1813 | */ | 1816 | */ |
1817 | if (!signal_pending(current)) { | ||
1818 | put_futex_key(fshared, &q.key); | ||
1819 | goto retry; | ||
1820 | } | ||
1821 | |||
1814 | ret = -ERESTARTSYS; | 1822 | ret = -ERESTARTSYS; |
1815 | if (!abs_time) | 1823 | if (!abs_time) |
1816 | goto out_put_key; | 1824 | goto out_put_key; |
@@ -2118,9 +2126,11 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, | |||
2118 | */ | 2126 | */ |
2119 | plist_del(&q->list, &q->list.plist); | 2127 | plist_del(&q->list, &q->list.plist); |
2120 | 2128 | ||
2129 | /* Handle spurious wakeups gracefully */ | ||
2130 | ret = -EWOULDBLOCK; | ||
2121 | if (timeout && !timeout->task) | 2131 | if (timeout && !timeout->task) |
2122 | ret = -ETIMEDOUT; | 2132 | ret = -ETIMEDOUT; |
2123 | else | 2133 | else if (signal_pending(current)) |
2124 | ret = -ERESTARTNOINTR; | 2134 | ret = -ERESTARTNOINTR; |
2125 | } | 2135 | } |
2126 | return ret; | 2136 | return ret; |
diff --git a/kernel/hung_task.c b/kernel/hung_task.c index d4e841747400..0c642d51aac2 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c | |||
@@ -144,7 +144,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout) | |||
144 | 144 | ||
145 | rcu_read_lock(); | 145 | rcu_read_lock(); |
146 | do_each_thread(g, t) { | 146 | do_each_thread(g, t) { |
147 | if (!--max_count) | 147 | if (!max_count--) |
148 | goto unlock; | 148 | goto unlock; |
149 | if (!--batch_count) { | 149 | if (!--batch_count) { |
150 | batch_count = HUNG_TASK_BATCHING; | 150 | batch_count = HUNG_TASK_BATCHING; |
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 8996b98f9eb2..22b0a6eedf24 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
@@ -121,7 +121,9 @@ static void poll_spurious_irqs(unsigned long dummy) | |||
121 | if (!(status & IRQ_SPURIOUS_DISABLED)) | 121 | if (!(status & IRQ_SPURIOUS_DISABLED)) |
122 | continue; | 122 | continue; |
123 | 123 | ||
124 | local_irq_disable(); | ||
124 | try_one_irq(i, desc); | 125 | try_one_irq(i, desc); |
126 | local_irq_enable(); | ||
125 | } | 127 | } |
126 | 128 | ||
127 | mod_timer(&poll_spurious_irq_timer, | 129 | mod_timer(&poll_spurious_irq_timer, |
diff --git a/kernel/kmod.c b/kernel/kmod.c index 9fcb53a11f87..25b103190364 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c | |||
@@ -80,16 +80,16 @@ int __request_module(bool wait, const char *fmt, ...) | |||
80 | #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */ | 80 | #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */ |
81 | static int kmod_loop_msg; | 81 | static int kmod_loop_msg; |
82 | 82 | ||
83 | ret = security_kernel_module_request(); | ||
84 | if (ret) | ||
85 | return ret; | ||
86 | |||
87 | va_start(args, fmt); | 83 | va_start(args, fmt); |
88 | ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); | 84 | ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); |
89 | va_end(args); | 85 | va_end(args); |
90 | if (ret >= MODULE_NAME_LEN) | 86 | if (ret >= MODULE_NAME_LEN) |
91 | return -ENAMETOOLONG; | 87 | return -ENAMETOOLONG; |
92 | 88 | ||
89 | ret = security_kernel_module_request(module_name); | ||
90 | if (ret) | ||
91 | return ret; | ||
92 | |||
93 | /* If modprobe needs a service that is in a module, we get a recursive | 93 | /* If modprobe needs a service that is in a module, we get a recursive |
94 | * loop. Limit the number of running kmod threads to max_threads/2 or | 94 | * loop. Limit the number of running kmod threads to max_threads/2 or |
95 | * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method | 95 | * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method |
diff --git a/kernel/kthread.c b/kernel/kthread.c index 5fe709982caa..ab7ae57773e1 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -150,29 +150,6 @@ struct task_struct *kthread_create(int (*threadfn)(void *data), | |||
150 | EXPORT_SYMBOL(kthread_create); | 150 | EXPORT_SYMBOL(kthread_create); |
151 | 151 | ||
152 | /** | 152 | /** |
153 | * kthread_bind - bind a just-created kthread to a cpu. | ||
154 | * @k: thread created by kthread_create(). | ||
155 | * @cpu: cpu (might not be online, must be possible) for @k to run on. | ||
156 | * | ||
157 | * Description: This function is equivalent to set_cpus_allowed(), | ||
158 | * except that @cpu doesn't need to be online, and the thread must be | ||
159 | * stopped (i.e., just returned from kthread_create()). | ||
160 | */ | ||
161 | void kthread_bind(struct task_struct *k, unsigned int cpu) | ||
162 | { | ||
163 | /* Must have done schedule() in kthread() before we set_task_cpu */ | ||
164 | if (!wait_task_inactive(k, TASK_UNINTERRUPTIBLE)) { | ||
165 | WARN_ON(1); | ||
166 | return; | ||
167 | } | ||
168 | set_task_cpu(k, cpu); | ||
169 | k->cpus_allowed = cpumask_of_cpu(cpu); | ||
170 | k->rt.nr_cpus_allowed = 1; | ||
171 | k->flags |= PF_THREAD_BOUND; | ||
172 | } | ||
173 | EXPORT_SYMBOL(kthread_bind); | ||
174 | |||
175 | /** | ||
176 | * kthread_stop - stop a thread created by kthread_create(). | 153 | * kthread_stop - stop a thread created by kthread_create(). |
177 | * @k: thread created by kthread_create(). | 154 | * @k: thread created by kthread_create(). |
178 | * | 155 | * |
diff --git a/kernel/module.c b/kernel/module.c index 8b7d8805819d..5842a71cf052 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -1187,7 +1187,8 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect, | |||
1187 | 1187 | ||
1188 | /* Count loaded sections and allocate structures */ | 1188 | /* Count loaded sections and allocate structures */ |
1189 | for (i = 0; i < nsect; i++) | 1189 | for (i = 0; i < nsect; i++) |
1190 | if (sechdrs[i].sh_flags & SHF_ALLOC) | 1190 | if (sechdrs[i].sh_flags & SHF_ALLOC |
1191 | && sechdrs[i].sh_size) | ||
1191 | nloaded++; | 1192 | nloaded++; |
1192 | size[0] = ALIGN(sizeof(*sect_attrs) | 1193 | size[0] = ALIGN(sizeof(*sect_attrs) |
1193 | + nloaded * sizeof(sect_attrs->attrs[0]), | 1194 | + nloaded * sizeof(sect_attrs->attrs[0]), |
@@ -1207,6 +1208,8 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect, | |||
1207 | for (i = 0; i < nsect; i++) { | 1208 | for (i = 0; i < nsect; i++) { |
1208 | if (! (sechdrs[i].sh_flags & SHF_ALLOC)) | 1209 | if (! (sechdrs[i].sh_flags & SHF_ALLOC)) |
1209 | continue; | 1210 | continue; |
1211 | if (!sechdrs[i].sh_size) | ||
1212 | continue; | ||
1210 | sattr->address = sechdrs[i].sh_addr; | 1213 | sattr->address = sechdrs[i].sh_addr; |
1211 | sattr->name = kstrdup(secstrings + sechdrs[i].sh_name, | 1214 | sattr->name = kstrdup(secstrings + sechdrs[i].sh_name, |
1212 | GFP_KERNEL); | 1215 | GFP_KERNEL); |
diff --git a/kernel/mutex.c b/kernel/mutex.c index 947b3ad551f8..632f04c57d82 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -148,8 +148,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
148 | 148 | ||
149 | preempt_disable(); | 149 | preempt_disable(); |
150 | mutex_acquire(&lock->dep_map, subclass, 0, ip); | 150 | mutex_acquire(&lock->dep_map, subclass, 0, ip); |
151 | #if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) && \ | 151 | |
152 | !defined(CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES) | 152 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
153 | /* | 153 | /* |
154 | * Optimistic spinning. | 154 | * Optimistic spinning. |
155 | * | 155 | * |
diff --git a/kernel/params.c b/kernel/params.c index 9da58eabdcb2..d656c276508d 100644 --- a/kernel/params.c +++ b/kernel/params.c | |||
@@ -218,15 +218,11 @@ int param_set_charp(const char *val, struct kernel_param *kp) | |||
218 | return -ENOSPC; | 218 | return -ENOSPC; |
219 | } | 219 | } |
220 | 220 | ||
221 | if (kp->flags & KPARAM_KMALLOCED) | ||
222 | kfree(*(char **)kp->arg); | ||
223 | |||
224 | /* This is a hack. We can't need to strdup in early boot, and we | 221 | /* This is a hack. We can't need to strdup in early boot, and we |
225 | * don't need to; this mangled commandline is preserved. */ | 222 | * don't need to; this mangled commandline is preserved. */ |
226 | if (slab_is_available()) { | 223 | if (slab_is_available()) { |
227 | kp->flags |= KPARAM_KMALLOCED; | ||
228 | *(char **)kp->arg = kstrdup(val, GFP_KERNEL); | 224 | *(char **)kp->arg = kstrdup(val, GFP_KERNEL); |
229 | if (!kp->arg) | 225 | if (!*(char **)kp->arg) |
230 | return -ENOMEM; | 226 | return -ENOMEM; |
231 | } else | 227 | } else |
232 | *(const char **)kp->arg = val; | 228 | *(const char **)kp->arg = val; |
@@ -304,6 +300,7 @@ static int param_array(const char *name, | |||
304 | unsigned int min, unsigned int max, | 300 | unsigned int min, unsigned int max, |
305 | void *elem, int elemsize, | 301 | void *elem, int elemsize, |
306 | int (*set)(const char *, struct kernel_param *kp), | 302 | int (*set)(const char *, struct kernel_param *kp), |
303 | u16 flags, | ||
307 | unsigned int *num) | 304 | unsigned int *num) |
308 | { | 305 | { |
309 | int ret; | 306 | int ret; |
@@ -313,6 +310,7 @@ static int param_array(const char *name, | |||
313 | /* Get the name right for errors. */ | 310 | /* Get the name right for errors. */ |
314 | kp.name = name; | 311 | kp.name = name; |
315 | kp.arg = elem; | 312 | kp.arg = elem; |
313 | kp.flags = flags; | ||
316 | 314 | ||
317 | /* No equals sign? */ | 315 | /* No equals sign? */ |
318 | if (!val) { | 316 | if (!val) { |
@@ -358,7 +356,8 @@ int param_array_set(const char *val, struct kernel_param *kp) | |||
358 | unsigned int temp_num; | 356 | unsigned int temp_num; |
359 | 357 | ||
360 | return param_array(kp->name, val, 1, arr->max, arr->elem, | 358 | return param_array(kp->name, val, 1, arr->max, arr->elem, |
361 | arr->elemsize, arr->set, arr->num ?: &temp_num); | 359 | arr->elemsize, arr->set, kp->flags, |
360 | arr->num ?: &temp_num); | ||
362 | } | 361 | } |
363 | 362 | ||
364 | int param_array_get(char *buffer, struct kernel_param *kp) | 363 | int param_array_get(char *buffer, struct kernel_param *kp) |
@@ -605,11 +604,7 @@ void module_param_sysfs_remove(struct module *mod) | |||
605 | 604 | ||
606 | void destroy_params(const struct kernel_param *params, unsigned num) | 605 | void destroy_params(const struct kernel_param *params, unsigned num) |
607 | { | 606 | { |
608 | unsigned int i; | 607 | /* FIXME: This should free kmalloced charp parameters. It doesn't. */ |
609 | |||
610 | for (i = 0; i < num; i++) | ||
611 | if (params[i].flags & KPARAM_KMALLOCED) | ||
612 | kfree(*(char **)params[i].arg); | ||
613 | } | 608 | } |
614 | 609 | ||
615 | static void __init kernel_add_sysfs_param(const char *name, | 610 | static void __init kernel_add_sysfs_param(const char *name, |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 9d0b5c665883..7f29643c8985 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -1355,7 +1355,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) | |||
1355 | u64 interrupts, freq; | 1355 | u64 interrupts, freq; |
1356 | 1356 | ||
1357 | spin_lock(&ctx->lock); | 1357 | spin_lock(&ctx->lock); |
1358 | list_for_each_entry(event, &ctx->group_list, group_entry) { | 1358 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { |
1359 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 1359 | if (event->state != PERF_EVENT_STATE_ACTIVE) |
1360 | continue; | 1360 | continue; |
1361 | 1361 | ||
@@ -3959,8 +3959,9 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) | |||
3959 | regs = task_pt_regs(current); | 3959 | regs = task_pt_regs(current); |
3960 | 3960 | ||
3961 | if (regs) { | 3961 | if (regs) { |
3962 | if (perf_event_overflow(event, 0, &data, regs)) | 3962 | if (!(event->attr.exclude_idle && current->pid == 0)) |
3963 | ret = HRTIMER_NORESTART; | 3963 | if (perf_event_overflow(event, 0, &data, regs)) |
3964 | ret = HRTIMER_NORESTART; | ||
3964 | } | 3965 | } |
3965 | 3966 | ||
3966 | period = max_t(u64, 10000, event->hw.sample_period); | 3967 | period = max_t(u64, 10000, event->hw.sample_period); |
@@ -3969,6 +3970,42 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) | |||
3969 | return ret; | 3970 | return ret; |
3970 | } | 3971 | } |
3971 | 3972 | ||
3973 | static void perf_swevent_start_hrtimer(struct perf_event *event) | ||
3974 | { | ||
3975 | struct hw_perf_event *hwc = &event->hw; | ||
3976 | |||
3977 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
3978 | hwc->hrtimer.function = perf_swevent_hrtimer; | ||
3979 | if (hwc->sample_period) { | ||
3980 | u64 period; | ||
3981 | |||
3982 | if (hwc->remaining) { | ||
3983 | if (hwc->remaining < 0) | ||
3984 | period = 10000; | ||
3985 | else | ||
3986 | period = hwc->remaining; | ||
3987 | hwc->remaining = 0; | ||
3988 | } else { | ||
3989 | period = max_t(u64, 10000, hwc->sample_period); | ||
3990 | } | ||
3991 | __hrtimer_start_range_ns(&hwc->hrtimer, | ||
3992 | ns_to_ktime(period), 0, | ||
3993 | HRTIMER_MODE_REL, 0); | ||
3994 | } | ||
3995 | } | ||
3996 | |||
3997 | static void perf_swevent_cancel_hrtimer(struct perf_event *event) | ||
3998 | { | ||
3999 | struct hw_perf_event *hwc = &event->hw; | ||
4000 | |||
4001 | if (hwc->sample_period) { | ||
4002 | ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); | ||
4003 | hwc->remaining = ktime_to_ns(remaining); | ||
4004 | |||
4005 | hrtimer_cancel(&hwc->hrtimer); | ||
4006 | } | ||
4007 | } | ||
4008 | |||
3972 | /* | 4009 | /* |
3973 | * Software event: cpu wall time clock | 4010 | * Software event: cpu wall time clock |
3974 | */ | 4011 | */ |
@@ -3991,22 +4028,14 @@ static int cpu_clock_perf_event_enable(struct perf_event *event) | |||
3991 | int cpu = raw_smp_processor_id(); | 4028 | int cpu = raw_smp_processor_id(); |
3992 | 4029 | ||
3993 | atomic64_set(&hwc->prev_count, cpu_clock(cpu)); | 4030 | atomic64_set(&hwc->prev_count, cpu_clock(cpu)); |
3994 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 4031 | perf_swevent_start_hrtimer(event); |
3995 | hwc->hrtimer.function = perf_swevent_hrtimer; | ||
3996 | if (hwc->sample_period) { | ||
3997 | u64 period = max_t(u64, 10000, hwc->sample_period); | ||
3998 | __hrtimer_start_range_ns(&hwc->hrtimer, | ||
3999 | ns_to_ktime(period), 0, | ||
4000 | HRTIMER_MODE_REL, 0); | ||
4001 | } | ||
4002 | 4032 | ||
4003 | return 0; | 4033 | return 0; |
4004 | } | 4034 | } |
4005 | 4035 | ||
4006 | static void cpu_clock_perf_event_disable(struct perf_event *event) | 4036 | static void cpu_clock_perf_event_disable(struct perf_event *event) |
4007 | { | 4037 | { |
4008 | if (event->hw.sample_period) | 4038 | perf_swevent_cancel_hrtimer(event); |
4009 | hrtimer_cancel(&event->hw.hrtimer); | ||
4010 | cpu_clock_perf_event_update(event); | 4039 | cpu_clock_perf_event_update(event); |
4011 | } | 4040 | } |
4012 | 4041 | ||
@@ -4043,22 +4072,15 @@ static int task_clock_perf_event_enable(struct perf_event *event) | |||
4043 | now = event->ctx->time; | 4072 | now = event->ctx->time; |
4044 | 4073 | ||
4045 | atomic64_set(&hwc->prev_count, now); | 4074 | atomic64_set(&hwc->prev_count, now); |
4046 | hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 4075 | |
4047 | hwc->hrtimer.function = perf_swevent_hrtimer; | 4076 | perf_swevent_start_hrtimer(event); |
4048 | if (hwc->sample_period) { | ||
4049 | u64 period = max_t(u64, 10000, hwc->sample_period); | ||
4050 | __hrtimer_start_range_ns(&hwc->hrtimer, | ||
4051 | ns_to_ktime(period), 0, | ||
4052 | HRTIMER_MODE_REL, 0); | ||
4053 | } | ||
4054 | 4077 | ||
4055 | return 0; | 4078 | return 0; |
4056 | } | 4079 | } |
4057 | 4080 | ||
4058 | static void task_clock_perf_event_disable(struct perf_event *event) | 4081 | static void task_clock_perf_event_disable(struct perf_event *event) |
4059 | { | 4082 | { |
4060 | if (event->hw.sample_period) | 4083 | perf_swevent_cancel_hrtimer(event); |
4061 | hrtimer_cancel(&event->hw.hrtimer); | ||
4062 | task_clock_perf_event_update(event, event->ctx->time); | 4084 | task_clock_perf_event_update(event, event->ctx->time); |
4063 | 4085 | ||
4064 | } | 4086 | } |
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 04b3a83d686f..04a9e90d248f 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c | |||
@@ -693,21 +693,22 @@ static int software_resume(void) | |||
693 | /* The snapshot device should not be opened while we're running */ | 693 | /* The snapshot device should not be opened while we're running */ |
694 | if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { | 694 | if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { |
695 | error = -EBUSY; | 695 | error = -EBUSY; |
696 | swsusp_close(FMODE_READ); | ||
696 | goto Unlock; | 697 | goto Unlock; |
697 | } | 698 | } |
698 | 699 | ||
699 | pm_prepare_console(); | 700 | pm_prepare_console(); |
700 | error = pm_notifier_call_chain(PM_RESTORE_PREPARE); | 701 | error = pm_notifier_call_chain(PM_RESTORE_PREPARE); |
701 | if (error) | 702 | if (error) |
702 | goto Finish; | 703 | goto close_finish; |
703 | 704 | ||
704 | error = usermodehelper_disable(); | 705 | error = usermodehelper_disable(); |
705 | if (error) | 706 | if (error) |
706 | goto Finish; | 707 | goto close_finish; |
707 | 708 | ||
708 | error = create_basic_memory_bitmaps(); | 709 | error = create_basic_memory_bitmaps(); |
709 | if (error) | 710 | if (error) |
710 | goto Finish; | 711 | goto close_finish; |
711 | 712 | ||
712 | pr_debug("PM: Preparing processes for restore.\n"); | 713 | pr_debug("PM: Preparing processes for restore.\n"); |
713 | error = prepare_processes(); | 714 | error = prepare_processes(); |
@@ -719,6 +720,7 @@ static int software_resume(void) | |||
719 | pr_debug("PM: Reading hibernation image.\n"); | 720 | pr_debug("PM: Reading hibernation image.\n"); |
720 | 721 | ||
721 | error = swsusp_read(&flags); | 722 | error = swsusp_read(&flags); |
723 | swsusp_close(FMODE_READ); | ||
722 | if (!error) | 724 | if (!error) |
723 | hibernation_restore(flags & SF_PLATFORM_MODE); | 725 | hibernation_restore(flags & SF_PLATFORM_MODE); |
724 | 726 | ||
@@ -737,6 +739,9 @@ static int software_resume(void) | |||
737 | mutex_unlock(&pm_mutex); | 739 | mutex_unlock(&pm_mutex); |
738 | pr_debug("PM: Resume from disk failed.\n"); | 740 | pr_debug("PM: Resume from disk failed.\n"); |
739 | return error; | 741 | return error; |
742 | close_finish: | ||
743 | swsusp_close(FMODE_READ); | ||
744 | goto Finish; | ||
740 | } | 745 | } |
741 | 746 | ||
742 | late_initcall(software_resume); | 747 | late_initcall(software_resume); |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index b101cdc4df3f..890f6b11b1d3 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
@@ -314,7 +314,6 @@ static int save_image(struct swap_map_handle *handle, | |||
314 | { | 314 | { |
315 | unsigned int m; | 315 | unsigned int m; |
316 | int ret; | 316 | int ret; |
317 | int error = 0; | ||
318 | int nr_pages; | 317 | int nr_pages; |
319 | int err2; | 318 | int err2; |
320 | struct bio *bio; | 319 | struct bio *bio; |
@@ -329,26 +328,27 @@ static int save_image(struct swap_map_handle *handle, | |||
329 | nr_pages = 0; | 328 | nr_pages = 0; |
330 | bio = NULL; | 329 | bio = NULL; |
331 | do_gettimeofday(&start); | 330 | do_gettimeofday(&start); |
332 | do { | 331 | while (1) { |
333 | ret = snapshot_read_next(snapshot, PAGE_SIZE); | 332 | ret = snapshot_read_next(snapshot, PAGE_SIZE); |
334 | if (ret > 0) { | 333 | if (ret <= 0) |
335 | error = swap_write_page(handle, data_of(*snapshot), | 334 | break; |
336 | &bio); | 335 | ret = swap_write_page(handle, data_of(*snapshot), &bio); |
337 | if (error) | 336 | if (ret) |
338 | break; | 337 | break; |
339 | if (!(nr_pages % m)) | 338 | if (!(nr_pages % m)) |
340 | printk("\b\b\b\b%3d%%", nr_pages / m); | 339 | printk("\b\b\b\b%3d%%", nr_pages / m); |
341 | nr_pages++; | 340 | nr_pages++; |
342 | } | 341 | } |
343 | } while (ret > 0); | ||
344 | err2 = wait_on_bio_chain(&bio); | 342 | err2 = wait_on_bio_chain(&bio); |
345 | do_gettimeofday(&stop); | 343 | do_gettimeofday(&stop); |
346 | if (!error) | 344 | if (!ret) |
347 | error = err2; | 345 | ret = err2; |
348 | if (!error) | 346 | if (!ret) |
349 | printk("\b\b\b\bdone\n"); | 347 | printk("\b\b\b\bdone\n"); |
348 | else | ||
349 | printk("\n"); | ||
350 | swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); | 350 | swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); |
351 | return error; | 351 | return ret; |
352 | } | 352 | } |
353 | 353 | ||
354 | /** | 354 | /** |
@@ -536,7 +536,8 @@ static int load_image(struct swap_map_handle *handle, | |||
536 | snapshot_write_finalize(snapshot); | 536 | snapshot_write_finalize(snapshot); |
537 | if (!snapshot_image_loaded(snapshot)) | 537 | if (!snapshot_image_loaded(snapshot)) |
538 | error = -ENODATA; | 538 | error = -ENODATA; |
539 | } | 539 | } else |
540 | printk("\n"); | ||
540 | swsusp_show_speed(&start, &stop, nr_to_read, "Read"); | 541 | swsusp_show_speed(&start, &stop, nr_to_read, "Read"); |
541 | return error; | 542 | return error; |
542 | } | 543 | } |
@@ -572,8 +573,6 @@ int swsusp_read(unsigned int *flags_p) | |||
572 | error = load_image(&handle, &snapshot, header->pages - 1); | 573 | error = load_image(&handle, &snapshot, header->pages - 1); |
573 | release_swap_reader(&handle); | 574 | release_swap_reader(&handle); |
574 | 575 | ||
575 | blkdev_put(resume_bdev, FMODE_READ); | ||
576 | |||
577 | if (!error) | 576 | if (!error) |
578 | pr_debug("PM: Image successfully loaded\n"); | 577 | pr_debug("PM: Image successfully loaded\n"); |
579 | else | 578 | else |
@@ -596,7 +595,7 @@ int swsusp_check(void) | |||
596 | error = bio_read_page(swsusp_resume_block, | 595 | error = bio_read_page(swsusp_resume_block, |
597 | swsusp_header, NULL); | 596 | swsusp_header, NULL); |
598 | if (error) | 597 | if (error) |
599 | return error; | 598 | goto put; |
600 | 599 | ||
601 | if (!memcmp(SWSUSP_SIG, swsusp_header->sig, 10)) { | 600 | if (!memcmp(SWSUSP_SIG, swsusp_header->sig, 10)) { |
602 | memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10); | 601 | memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10); |
@@ -604,8 +603,10 @@ int swsusp_check(void) | |||
604 | error = bio_write_page(swsusp_resume_block, | 603 | error = bio_write_page(swsusp_resume_block, |
605 | swsusp_header, NULL); | 604 | swsusp_header, NULL); |
606 | } else { | 605 | } else { |
607 | return -EINVAL; | 606 | error = -EINVAL; |
608 | } | 607 | } |
608 | |||
609 | put: | ||
609 | if (error) | 610 | if (error) |
610 | blkdev_put(resume_bdev, FMODE_READ); | 611 | blkdev_put(resume_bdev, FMODE_READ); |
611 | else | 612 | else |
diff --git a/kernel/printk.c b/kernel/printk.c index f38b07f78a4e..b5ac4d99c667 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/bootmem.h> | 33 | #include <linux/bootmem.h> |
34 | #include <linux/syscalls.h> | 34 | #include <linux/syscalls.h> |
35 | #include <linux/kexec.h> | 35 | #include <linux/kexec.h> |
36 | #include <linux/ratelimit.h> | ||
36 | 37 | ||
37 | #include <asm/uaccess.h> | 38 | #include <asm/uaccess.h> |
38 | 39 | ||
@@ -1376,11 +1377,11 @@ late_initcall(disable_boot_consoles); | |||
1376 | */ | 1377 | */ |
1377 | DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10); | 1378 | DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10); |
1378 | 1379 | ||
1379 | int printk_ratelimit(void) | 1380 | int __printk_ratelimit(const char *func) |
1380 | { | 1381 | { |
1381 | return __ratelimit(&printk_ratelimit_state); | 1382 | return ___ratelimit(&printk_ratelimit_state, func); |
1382 | } | 1383 | } |
1383 | EXPORT_SYMBOL(printk_ratelimit); | 1384 | EXPORT_SYMBOL(__printk_ratelimit); |
1384 | 1385 | ||
1385 | /** | 1386 | /** |
1386 | * printk_timed_ratelimit - caller-controlled printk ratelimiting | 1387 | * printk_timed_ratelimit - caller-controlled printk ratelimiting |
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 400183346ad2..9b7fd4723878 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -44,7 +44,6 @@ | |||
44 | #include <linux/cpu.h> | 44 | #include <linux/cpu.h> |
45 | #include <linux/mutex.h> | 45 | #include <linux/mutex.h> |
46 | #include <linux/module.h> | 46 | #include <linux/module.h> |
47 | #include <linux/kernel_stat.h> | ||
48 | 47 | ||
49 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 48 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
50 | static struct lock_class_key rcu_lock_key; | 49 | static struct lock_class_key rcu_lock_key; |
@@ -53,8 +52,6 @@ struct lockdep_map rcu_lock_map = | |||
53 | EXPORT_SYMBOL_GPL(rcu_lock_map); | 52 | EXPORT_SYMBOL_GPL(rcu_lock_map); |
54 | #endif | 53 | #endif |
55 | 54 | ||
56 | int rcu_scheduler_active __read_mostly; | ||
57 | |||
58 | /* | 55 | /* |
59 | * Awaken the corresponding synchronize_rcu() instance now that a | 56 | * Awaken the corresponding synchronize_rcu() instance now that a |
60 | * grace period has elapsed. | 57 | * grace period has elapsed. |
@@ -66,122 +63,3 @@ void wakeme_after_rcu(struct rcu_head *head) | |||
66 | rcu = container_of(head, struct rcu_synchronize, head); | 63 | rcu = container_of(head, struct rcu_synchronize, head); |
67 | complete(&rcu->completion); | 64 | complete(&rcu->completion); |
68 | } | 65 | } |
69 | |||
70 | #ifdef CONFIG_TREE_PREEMPT_RCU | ||
71 | |||
72 | /** | ||
73 | * synchronize_rcu - wait until a grace period has elapsed. | ||
74 | * | ||
75 | * Control will return to the caller some time after a full grace | ||
76 | * period has elapsed, in other words after all currently executing RCU | ||
77 | * read-side critical sections have completed. RCU read-side critical | ||
78 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | ||
79 | * and may be nested. | ||
80 | */ | ||
81 | void synchronize_rcu(void) | ||
82 | { | ||
83 | struct rcu_synchronize rcu; | ||
84 | |||
85 | if (!rcu_scheduler_active) | ||
86 | return; | ||
87 | |||
88 | init_completion(&rcu.completion); | ||
89 | /* Will wake me after RCU finished. */ | ||
90 | call_rcu(&rcu.head, wakeme_after_rcu); | ||
91 | /* Wait for it. */ | ||
92 | wait_for_completion(&rcu.completion); | ||
93 | } | ||
94 | EXPORT_SYMBOL_GPL(synchronize_rcu); | ||
95 | |||
96 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | ||
97 | |||
98 | /** | ||
99 | * synchronize_sched - wait until an rcu-sched grace period has elapsed. | ||
100 | * | ||
101 | * Control will return to the caller some time after a full rcu-sched | ||
102 | * grace period has elapsed, in other words after all currently executing | ||
103 | * rcu-sched read-side critical sections have completed. These read-side | ||
104 | * critical sections are delimited by rcu_read_lock_sched() and | ||
105 | * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(), | ||
106 | * local_irq_disable(), and so on may be used in place of | ||
107 | * rcu_read_lock_sched(). | ||
108 | * | ||
109 | * This means that all preempt_disable code sequences, including NMI and | ||
110 | * hardware-interrupt handlers, in progress on entry will have completed | ||
111 | * before this primitive returns. However, this does not guarantee that | ||
112 | * softirq handlers will have completed, since in some kernels, these | ||
113 | * handlers can run in process context, and can block. | ||
114 | * | ||
115 | * This primitive provides the guarantees made by the (now removed) | ||
116 | * synchronize_kernel() API. In contrast, synchronize_rcu() only | ||
117 | * guarantees that rcu_read_lock() sections will have completed. | ||
118 | * In "classic RCU", these two guarantees happen to be one and | ||
119 | * the same, but can differ in realtime RCU implementations. | ||
120 | */ | ||
121 | void synchronize_sched(void) | ||
122 | { | ||
123 | struct rcu_synchronize rcu; | ||
124 | |||
125 | if (rcu_blocking_is_gp()) | ||
126 | return; | ||
127 | |||
128 | init_completion(&rcu.completion); | ||
129 | /* Will wake me after RCU finished. */ | ||
130 | call_rcu_sched(&rcu.head, wakeme_after_rcu); | ||
131 | /* Wait for it. */ | ||
132 | wait_for_completion(&rcu.completion); | ||
133 | } | ||
134 | EXPORT_SYMBOL_GPL(synchronize_sched); | ||
135 | |||
136 | /** | ||
137 | * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. | ||
138 | * | ||
139 | * Control will return to the caller some time after a full rcu_bh grace | ||
140 | * period has elapsed, in other words after all currently executing rcu_bh | ||
141 | * read-side critical sections have completed. RCU read-side critical | ||
142 | * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(), | ||
143 | * and may be nested. | ||
144 | */ | ||
145 | void synchronize_rcu_bh(void) | ||
146 | { | ||
147 | struct rcu_synchronize rcu; | ||
148 | |||
149 | if (rcu_blocking_is_gp()) | ||
150 | return; | ||
151 | |||
152 | init_completion(&rcu.completion); | ||
153 | /* Will wake me after RCU finished. */ | ||
154 | call_rcu_bh(&rcu.head, wakeme_after_rcu); | ||
155 | /* Wait for it. */ | ||
156 | wait_for_completion(&rcu.completion); | ||
157 | } | ||
158 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); | ||
159 | |||
160 | static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, | ||
161 | unsigned long action, void *hcpu) | ||
162 | { | ||
163 | return rcu_cpu_notify(self, action, hcpu); | ||
164 | } | ||
165 | |||
166 | void __init rcu_init(void) | ||
167 | { | ||
168 | int i; | ||
169 | |||
170 | __rcu_init(); | ||
171 | cpu_notifier(rcu_barrier_cpu_hotplug, 0); | ||
172 | |||
173 | /* | ||
174 | * We don't need protection against CPU-hotplug here because | ||
175 | * this is called early in boot, before either interrupts | ||
176 | * or the scheduler are operational. | ||
177 | */ | ||
178 | for_each_online_cpu(i) | ||
179 | rcu_barrier_cpu_hotplug(NULL, CPU_UP_PREPARE, (void *)(long)i); | ||
180 | } | ||
181 | |||
182 | void rcu_scheduler_starting(void) | ||
183 | { | ||
184 | WARN_ON(num_online_cpus() != 1); | ||
185 | WARN_ON(nr_context_switches() > 0); | ||
186 | rcu_scheduler_active = 1; | ||
187 | } | ||
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c new file mode 100644 index 000000000000..9f6d9ff2572c --- /dev/null +++ b/kernel/rcutiny.c | |||
@@ -0,0 +1,282 @@ | |||
1 | /* | ||
2 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright IBM Corporation, 2008 | ||
19 | * | ||
20 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> | ||
21 | * | ||
22 | * For detailed explanation of Read-Copy Update mechanism see - | ||
23 | * Documentation/RCU | ||
24 | */ | ||
25 | #include <linux/moduleparam.h> | ||
26 | #include <linux/completion.h> | ||
27 | #include <linux/interrupt.h> | ||
28 | #include <linux/notifier.h> | ||
29 | #include <linux/rcupdate.h> | ||
30 | #include <linux/kernel.h> | ||
31 | #include <linux/module.h> | ||
32 | #include <linux/mutex.h> | ||
33 | #include <linux/sched.h> | ||
34 | #include <linux/types.h> | ||
35 | #include <linux/init.h> | ||
36 | #include <linux/time.h> | ||
37 | #include <linux/cpu.h> | ||
38 | |||
39 | /* Global control variables for rcupdate callback mechanism. */ | ||
40 | struct rcu_ctrlblk { | ||
41 | struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */ | ||
42 | struct rcu_head **donetail; /* ->next pointer of last "done" CB. */ | ||
43 | struct rcu_head **curtail; /* ->next pointer of last CB. */ | ||
44 | }; | ||
45 | |||
46 | /* Definition for rcupdate control block. */ | ||
47 | static struct rcu_ctrlblk rcu_ctrlblk = { | ||
48 | .donetail = &rcu_ctrlblk.rcucblist, | ||
49 | .curtail = &rcu_ctrlblk.rcucblist, | ||
50 | }; | ||
51 | |||
52 | static struct rcu_ctrlblk rcu_bh_ctrlblk = { | ||
53 | .donetail = &rcu_bh_ctrlblk.rcucblist, | ||
54 | .curtail = &rcu_bh_ctrlblk.rcucblist, | ||
55 | }; | ||
56 | |||
57 | #ifdef CONFIG_NO_HZ | ||
58 | |||
59 | static long rcu_dynticks_nesting = 1; | ||
60 | |||
61 | /* | ||
62 | * Enter dynticks-idle mode, which is an extended quiescent state | ||
63 | * if we have fully entered that mode (i.e., if the new value of | ||
64 | * dynticks_nesting is zero). | ||
65 | */ | ||
66 | void rcu_enter_nohz(void) | ||
67 | { | ||
68 | if (--rcu_dynticks_nesting == 0) | ||
69 | rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */ | ||
70 | } | ||
71 | |||
72 | /* | ||
73 | * Exit dynticks-idle mode, so that we are no longer in an extended | ||
74 | * quiescent state. | ||
75 | */ | ||
76 | void rcu_exit_nohz(void) | ||
77 | { | ||
78 | rcu_dynticks_nesting++; | ||
79 | } | ||
80 | |||
81 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
82 | |||
83 | /* | ||
84 | * Helper function for rcu_qsctr_inc() and rcu_bh_qsctr_inc(). | ||
85 | * Also disable irqs to avoid confusion due to interrupt handlers | ||
86 | * invoking call_rcu(). | ||
87 | */ | ||
88 | static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) | ||
89 | { | ||
90 | unsigned long flags; | ||
91 | |||
92 | local_irq_save(flags); | ||
93 | if (rcp->rcucblist != NULL && | ||
94 | rcp->donetail != rcp->curtail) { | ||
95 | rcp->donetail = rcp->curtail; | ||
96 | local_irq_restore(flags); | ||
97 | return 1; | ||
98 | } | ||
99 | local_irq_restore(flags); | ||
100 | |||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | /* | ||
105 | * Record an rcu quiescent state. And an rcu_bh quiescent state while we | ||
106 | * are at it, given that any rcu quiescent state is also an rcu_bh | ||
107 | * quiescent state. Use "+" instead of "||" to defeat short circuiting. | ||
108 | */ | ||
109 | void rcu_sched_qs(int cpu) | ||
110 | { | ||
111 | if (rcu_qsctr_help(&rcu_ctrlblk) + rcu_qsctr_help(&rcu_bh_ctrlblk)) | ||
112 | raise_softirq(RCU_SOFTIRQ); | ||
113 | } | ||
114 | |||
115 | /* | ||
116 | * Record an rcu_bh quiescent state. | ||
117 | */ | ||
118 | void rcu_bh_qs(int cpu) | ||
119 | { | ||
120 | if (rcu_qsctr_help(&rcu_bh_ctrlblk)) | ||
121 | raise_softirq(RCU_SOFTIRQ); | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * Check to see if the scheduling-clock interrupt came from an extended | ||
126 | * quiescent state, and, if so, tell RCU about it. | ||
127 | */ | ||
128 | void rcu_check_callbacks(int cpu, int user) | ||
129 | { | ||
130 | if (user || | ||
131 | (idle_cpu(cpu) && | ||
132 | !in_softirq() && | ||
133 | hardirq_count() <= (1 << HARDIRQ_SHIFT))) | ||
134 | rcu_sched_qs(cpu); | ||
135 | else if (!in_softirq()) | ||
136 | rcu_bh_qs(cpu); | ||
137 | } | ||
138 | |||
139 | /* | ||
140 | * Helper function for rcu_process_callbacks() that operates on the | ||
141 | * specified rcu_ctrlkblk structure. | ||
142 | */ | ||
143 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) | ||
144 | { | ||
145 | struct rcu_head *next, *list; | ||
146 | unsigned long flags; | ||
147 | |||
148 | /* If no RCU callbacks ready to invoke, just return. */ | ||
149 | if (&rcp->rcucblist == rcp->donetail) | ||
150 | return; | ||
151 | |||
152 | /* Move the ready-to-invoke callbacks to a local list. */ | ||
153 | local_irq_save(flags); | ||
154 | list = rcp->rcucblist; | ||
155 | rcp->rcucblist = *rcp->donetail; | ||
156 | *rcp->donetail = NULL; | ||
157 | if (rcp->curtail == rcp->donetail) | ||
158 | rcp->curtail = &rcp->rcucblist; | ||
159 | rcp->donetail = &rcp->rcucblist; | ||
160 | local_irq_restore(flags); | ||
161 | |||
162 | /* Invoke the callbacks on the local list. */ | ||
163 | while (list) { | ||
164 | next = list->next; | ||
165 | prefetch(next); | ||
166 | list->func(list); | ||
167 | list = next; | ||
168 | } | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * Invoke any callbacks whose grace period has completed. | ||
173 | */ | ||
174 | static void rcu_process_callbacks(struct softirq_action *unused) | ||
175 | { | ||
176 | __rcu_process_callbacks(&rcu_ctrlblk); | ||
177 | __rcu_process_callbacks(&rcu_bh_ctrlblk); | ||
178 | } | ||
179 | |||
180 | /* | ||
181 | * Wait for a grace period to elapse. But it is illegal to invoke | ||
182 | * synchronize_sched() from within an RCU read-side critical section. | ||
183 | * Therefore, any legal call to synchronize_sched() is a quiescent | ||
184 | * state, and so on a UP system, synchronize_sched() need do nothing. | ||
185 | * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the | ||
186 | * benefits of doing might_sleep() to reduce latency.) | ||
187 | * | ||
188 | * Cool, huh? (Due to Josh Triplett.) | ||
189 | * | ||
190 | * But we want to make this a static inline later. | ||
191 | */ | ||
192 | void synchronize_sched(void) | ||
193 | { | ||
194 | cond_resched(); | ||
195 | } | ||
196 | EXPORT_SYMBOL_GPL(synchronize_sched); | ||
197 | |||
198 | void synchronize_rcu_bh(void) | ||
199 | { | ||
200 | synchronize_sched(); | ||
201 | } | ||
202 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); | ||
203 | |||
204 | /* | ||
205 | * Helper function for call_rcu() and call_rcu_bh(). | ||
206 | */ | ||
207 | static void __call_rcu(struct rcu_head *head, | ||
208 | void (*func)(struct rcu_head *rcu), | ||
209 | struct rcu_ctrlblk *rcp) | ||
210 | { | ||
211 | unsigned long flags; | ||
212 | |||
213 | head->func = func; | ||
214 | head->next = NULL; | ||
215 | |||
216 | local_irq_save(flags); | ||
217 | *rcp->curtail = head; | ||
218 | rcp->curtail = &head->next; | ||
219 | local_irq_restore(flags); | ||
220 | } | ||
221 | |||
222 | /* | ||
223 | * Post an RCU callback to be invoked after the end of an RCU grace | ||
224 | * period. But since we have but one CPU, that would be after any | ||
225 | * quiescent state. | ||
226 | */ | ||
227 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | ||
228 | { | ||
229 | __call_rcu(head, func, &rcu_ctrlblk); | ||
230 | } | ||
231 | EXPORT_SYMBOL_GPL(call_rcu); | ||
232 | |||
233 | /* | ||
234 | * Post an RCU bottom-half callback to be invoked after any subsequent | ||
235 | * quiescent state. | ||
236 | */ | ||
237 | void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | ||
238 | { | ||
239 | __call_rcu(head, func, &rcu_bh_ctrlblk); | ||
240 | } | ||
241 | EXPORT_SYMBOL_GPL(call_rcu_bh); | ||
242 | |||
243 | void rcu_barrier(void) | ||
244 | { | ||
245 | struct rcu_synchronize rcu; | ||
246 | |||
247 | init_completion(&rcu.completion); | ||
248 | /* Will wake me after RCU finished. */ | ||
249 | call_rcu(&rcu.head, wakeme_after_rcu); | ||
250 | /* Wait for it. */ | ||
251 | wait_for_completion(&rcu.completion); | ||
252 | } | ||
253 | EXPORT_SYMBOL_GPL(rcu_barrier); | ||
254 | |||
255 | void rcu_barrier_bh(void) | ||
256 | { | ||
257 | struct rcu_synchronize rcu; | ||
258 | |||
259 | init_completion(&rcu.completion); | ||
260 | /* Will wake me after RCU finished. */ | ||
261 | call_rcu_bh(&rcu.head, wakeme_after_rcu); | ||
262 | /* Wait for it. */ | ||
263 | wait_for_completion(&rcu.completion); | ||
264 | } | ||
265 | EXPORT_SYMBOL_GPL(rcu_barrier_bh); | ||
266 | |||
267 | void rcu_barrier_sched(void) | ||
268 | { | ||
269 | struct rcu_synchronize rcu; | ||
270 | |||
271 | init_completion(&rcu.completion); | ||
272 | /* Will wake me after RCU finished. */ | ||
273 | call_rcu_sched(&rcu.head, wakeme_after_rcu); | ||
274 | /* Wait for it. */ | ||
275 | wait_for_completion(&rcu.completion); | ||
276 | } | ||
277 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); | ||
278 | |||
279 | void __init rcu_init(void) | ||
280 | { | ||
281 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | ||
282 | } | ||
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 697c0a0229d4..a621a67ef4e3 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
@@ -327,6 +327,11 @@ rcu_torture_cb(struct rcu_head *p) | |||
327 | cur_ops->deferred_free(rp); | 327 | cur_ops->deferred_free(rp); |
328 | } | 328 | } |
329 | 329 | ||
330 | static int rcu_no_completed(void) | ||
331 | { | ||
332 | return 0; | ||
333 | } | ||
334 | |||
330 | static void rcu_torture_deferred_free(struct rcu_torture *p) | 335 | static void rcu_torture_deferred_free(struct rcu_torture *p) |
331 | { | 336 | { |
332 | call_rcu(&p->rtort_rcu, rcu_torture_cb); | 337 | call_rcu(&p->rtort_rcu, rcu_torture_cb); |
@@ -388,6 +393,21 @@ static struct rcu_torture_ops rcu_sync_ops = { | |||
388 | .name = "rcu_sync" | 393 | .name = "rcu_sync" |
389 | }; | 394 | }; |
390 | 395 | ||
396 | static struct rcu_torture_ops rcu_expedited_ops = { | ||
397 | .init = rcu_sync_torture_init, | ||
398 | .cleanup = NULL, | ||
399 | .readlock = rcu_torture_read_lock, | ||
400 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | ||
401 | .readunlock = rcu_torture_read_unlock, | ||
402 | .completed = rcu_no_completed, | ||
403 | .deferred_free = rcu_sync_torture_deferred_free, | ||
404 | .sync = synchronize_rcu_expedited, | ||
405 | .cb_barrier = NULL, | ||
406 | .stats = NULL, | ||
407 | .irq_capable = 1, | ||
408 | .name = "rcu_expedited" | ||
409 | }; | ||
410 | |||
391 | /* | 411 | /* |
392 | * Definitions for rcu_bh torture testing. | 412 | * Definitions for rcu_bh torture testing. |
393 | */ | 413 | */ |
@@ -547,6 +567,25 @@ static struct rcu_torture_ops srcu_ops = { | |||
547 | .name = "srcu" | 567 | .name = "srcu" |
548 | }; | 568 | }; |
549 | 569 | ||
570 | static void srcu_torture_synchronize_expedited(void) | ||
571 | { | ||
572 | synchronize_srcu_expedited(&srcu_ctl); | ||
573 | } | ||
574 | |||
575 | static struct rcu_torture_ops srcu_expedited_ops = { | ||
576 | .init = srcu_torture_init, | ||
577 | .cleanup = srcu_torture_cleanup, | ||
578 | .readlock = srcu_torture_read_lock, | ||
579 | .read_delay = srcu_read_delay, | ||
580 | .readunlock = srcu_torture_read_unlock, | ||
581 | .completed = srcu_torture_completed, | ||
582 | .deferred_free = rcu_sync_torture_deferred_free, | ||
583 | .sync = srcu_torture_synchronize_expedited, | ||
584 | .cb_barrier = NULL, | ||
585 | .stats = srcu_torture_stats, | ||
586 | .name = "srcu_expedited" | ||
587 | }; | ||
588 | |||
550 | /* | 589 | /* |
551 | * Definitions for sched torture testing. | 590 | * Definitions for sched torture testing. |
552 | */ | 591 | */ |
@@ -562,11 +601,6 @@ static void sched_torture_read_unlock(int idx) | |||
562 | preempt_enable(); | 601 | preempt_enable(); |
563 | } | 602 | } |
564 | 603 | ||
565 | static int sched_torture_completed(void) | ||
566 | { | ||
567 | return 0; | ||
568 | } | ||
569 | |||
570 | static void rcu_sched_torture_deferred_free(struct rcu_torture *p) | 604 | static void rcu_sched_torture_deferred_free(struct rcu_torture *p) |
571 | { | 605 | { |
572 | call_rcu_sched(&p->rtort_rcu, rcu_torture_cb); | 606 | call_rcu_sched(&p->rtort_rcu, rcu_torture_cb); |
@@ -583,7 +617,7 @@ static struct rcu_torture_ops sched_ops = { | |||
583 | .readlock = sched_torture_read_lock, | 617 | .readlock = sched_torture_read_lock, |
584 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | 618 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
585 | .readunlock = sched_torture_read_unlock, | 619 | .readunlock = sched_torture_read_unlock, |
586 | .completed = sched_torture_completed, | 620 | .completed = rcu_no_completed, |
587 | .deferred_free = rcu_sched_torture_deferred_free, | 621 | .deferred_free = rcu_sched_torture_deferred_free, |
588 | .sync = sched_torture_synchronize, | 622 | .sync = sched_torture_synchronize, |
589 | .cb_barrier = rcu_barrier_sched, | 623 | .cb_barrier = rcu_barrier_sched, |
@@ -592,13 +626,13 @@ static struct rcu_torture_ops sched_ops = { | |||
592 | .name = "sched" | 626 | .name = "sched" |
593 | }; | 627 | }; |
594 | 628 | ||
595 | static struct rcu_torture_ops sched_ops_sync = { | 629 | static struct rcu_torture_ops sched_sync_ops = { |
596 | .init = rcu_sync_torture_init, | 630 | .init = rcu_sync_torture_init, |
597 | .cleanup = NULL, | 631 | .cleanup = NULL, |
598 | .readlock = sched_torture_read_lock, | 632 | .readlock = sched_torture_read_lock, |
599 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | 633 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
600 | .readunlock = sched_torture_read_unlock, | 634 | .readunlock = sched_torture_read_unlock, |
601 | .completed = sched_torture_completed, | 635 | .completed = rcu_no_completed, |
602 | .deferred_free = rcu_sync_torture_deferred_free, | 636 | .deferred_free = rcu_sync_torture_deferred_free, |
603 | .sync = sched_torture_synchronize, | 637 | .sync = sched_torture_synchronize, |
604 | .cb_barrier = NULL, | 638 | .cb_barrier = NULL, |
@@ -612,7 +646,7 @@ static struct rcu_torture_ops sched_expedited_ops = { | |||
612 | .readlock = sched_torture_read_lock, | 646 | .readlock = sched_torture_read_lock, |
613 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | 647 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
614 | .readunlock = sched_torture_read_unlock, | 648 | .readunlock = sched_torture_read_unlock, |
615 | .completed = sched_torture_completed, | 649 | .completed = rcu_no_completed, |
616 | .deferred_free = rcu_sync_torture_deferred_free, | 650 | .deferred_free = rcu_sync_torture_deferred_free, |
617 | .sync = synchronize_sched_expedited, | 651 | .sync = synchronize_sched_expedited, |
618 | .cb_barrier = NULL, | 652 | .cb_barrier = NULL, |
@@ -1097,9 +1131,10 @@ rcu_torture_init(void) | |||
1097 | int cpu; | 1131 | int cpu; |
1098 | int firsterr = 0; | 1132 | int firsterr = 0; |
1099 | static struct rcu_torture_ops *torture_ops[] = | 1133 | static struct rcu_torture_ops *torture_ops[] = |
1100 | { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops, | 1134 | { &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops, |
1101 | &sched_expedited_ops, | 1135 | &rcu_bh_ops, &rcu_bh_sync_ops, |
1102 | &srcu_ops, &sched_ops, &sched_ops_sync, }; | 1136 | &srcu_ops, &srcu_expedited_ops, |
1137 | &sched_ops, &sched_sync_ops, &sched_expedited_ops, }; | ||
1103 | 1138 | ||
1104 | mutex_lock(&fullstop_mutex); | 1139 | mutex_lock(&fullstop_mutex); |
1105 | 1140 | ||
@@ -1110,8 +1145,12 @@ rcu_torture_init(void) | |||
1110 | break; | 1145 | break; |
1111 | } | 1146 | } |
1112 | if (i == ARRAY_SIZE(torture_ops)) { | 1147 | if (i == ARRAY_SIZE(torture_ops)) { |
1113 | printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n", | 1148 | printk(KERN_ALERT "rcu-torture: invalid torture type: \"%s\"\n", |
1114 | torture_type); | 1149 | torture_type); |
1150 | printk(KERN_ALERT "rcu-torture types:"); | ||
1151 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) | ||
1152 | printk(KERN_ALERT " %s", torture_ops[i]->name); | ||
1153 | printk(KERN_ALERT "\n"); | ||
1115 | mutex_unlock(&fullstop_mutex); | 1154 | mutex_unlock(&fullstop_mutex); |
1116 | return -EINVAL; | 1155 | return -EINVAL; |
1117 | } | 1156 | } |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 705f02ac7433..53ae9598f798 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -46,20 +46,24 @@ | |||
46 | #include <linux/cpu.h> | 46 | #include <linux/cpu.h> |
47 | #include <linux/mutex.h> | 47 | #include <linux/mutex.h> |
48 | #include <linux/time.h> | 48 | #include <linux/time.h> |
49 | #include <linux/kernel_stat.h> | ||
49 | 50 | ||
50 | #include "rcutree.h" | 51 | #include "rcutree.h" |
51 | 52 | ||
52 | /* Data structures. */ | 53 | /* Data structures. */ |
53 | 54 | ||
55 | static struct lock_class_key rcu_node_class[NUM_RCU_LVLS]; | ||
56 | |||
54 | #define RCU_STATE_INITIALIZER(name) { \ | 57 | #define RCU_STATE_INITIALIZER(name) { \ |
55 | .level = { &name.node[0] }, \ | 58 | .level = { &name.node[0] }, \ |
56 | .levelcnt = { \ | 59 | .levelcnt = { \ |
57 | NUM_RCU_LVL_0, /* root of hierarchy. */ \ | 60 | NUM_RCU_LVL_0, /* root of hierarchy. */ \ |
58 | NUM_RCU_LVL_1, \ | 61 | NUM_RCU_LVL_1, \ |
59 | NUM_RCU_LVL_2, \ | 62 | NUM_RCU_LVL_2, \ |
60 | NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \ | 63 | NUM_RCU_LVL_3, \ |
64 | NUM_RCU_LVL_4, /* == MAX_RCU_LVLS */ \ | ||
61 | }, \ | 65 | }, \ |
62 | .signaled = RCU_SIGNAL_INIT, \ | 66 | .signaled = RCU_GP_IDLE, \ |
63 | .gpnum = -300, \ | 67 | .gpnum = -300, \ |
64 | .completed = -300, \ | 68 | .completed = -300, \ |
65 | .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ | 69 | .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ |
@@ -77,6 +81,8 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data); | |||
77 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); | 81 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); |
78 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); | 82 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); |
79 | 83 | ||
84 | static int rcu_scheduler_active __read_mostly; | ||
85 | |||
80 | 86 | ||
81 | /* | 87 | /* |
82 | * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s | 88 | * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s |
@@ -98,7 +104,7 @@ void rcu_sched_qs(int cpu) | |||
98 | struct rcu_data *rdp; | 104 | struct rcu_data *rdp; |
99 | 105 | ||
100 | rdp = &per_cpu(rcu_sched_data, cpu); | 106 | rdp = &per_cpu(rcu_sched_data, cpu); |
101 | rdp->passed_quiesc_completed = rdp->completed; | 107 | rdp->passed_quiesc_completed = rdp->gpnum - 1; |
102 | barrier(); | 108 | barrier(); |
103 | rdp->passed_quiesc = 1; | 109 | rdp->passed_quiesc = 1; |
104 | rcu_preempt_note_context_switch(cpu); | 110 | rcu_preempt_note_context_switch(cpu); |
@@ -109,7 +115,7 @@ void rcu_bh_qs(int cpu) | |||
109 | struct rcu_data *rdp; | 115 | struct rcu_data *rdp; |
110 | 116 | ||
111 | rdp = &per_cpu(rcu_bh_data, cpu); | 117 | rdp = &per_cpu(rcu_bh_data, cpu); |
112 | rdp->passed_quiesc_completed = rdp->completed; | 118 | rdp->passed_quiesc_completed = rdp->gpnum - 1; |
113 | barrier(); | 119 | barrier(); |
114 | rdp->passed_quiesc = 1; | 120 | rdp->passed_quiesc = 1; |
115 | } | 121 | } |
@@ -335,28 +341,9 @@ void rcu_irq_exit(void) | |||
335 | set_need_resched(); | 341 | set_need_resched(); |
336 | } | 342 | } |
337 | 343 | ||
338 | /* | ||
339 | * Record the specified "completed" value, which is later used to validate | ||
340 | * dynticks counter manipulations. Specify "rsp->completed - 1" to | ||
341 | * unconditionally invalidate any future dynticks manipulations (which is | ||
342 | * useful at the beginning of a grace period). | ||
343 | */ | ||
344 | static void dyntick_record_completed(struct rcu_state *rsp, long comp) | ||
345 | { | ||
346 | rsp->dynticks_completed = comp; | ||
347 | } | ||
348 | |||
349 | #ifdef CONFIG_SMP | 344 | #ifdef CONFIG_SMP |
350 | 345 | ||
351 | /* | 346 | /* |
352 | * Recall the previously recorded value of the completion for dynticks. | ||
353 | */ | ||
354 | static long dyntick_recall_completed(struct rcu_state *rsp) | ||
355 | { | ||
356 | return rsp->dynticks_completed; | ||
357 | } | ||
358 | |||
359 | /* | ||
360 | * Snapshot the specified CPU's dynticks counter so that we can later | 347 | * Snapshot the specified CPU's dynticks counter so that we can later |
361 | * credit them with an implicit quiescent state. Return 1 if this CPU | 348 | * credit them with an implicit quiescent state. Return 1 if this CPU |
362 | * is in dynticks idle mode, which is an extended quiescent state. | 349 | * is in dynticks idle mode, which is an extended quiescent state. |
@@ -419,24 +406,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | |||
419 | 406 | ||
420 | #else /* #ifdef CONFIG_NO_HZ */ | 407 | #else /* #ifdef CONFIG_NO_HZ */ |
421 | 408 | ||
422 | static void dyntick_record_completed(struct rcu_state *rsp, long comp) | ||
423 | { | ||
424 | } | ||
425 | |||
426 | #ifdef CONFIG_SMP | 409 | #ifdef CONFIG_SMP |
427 | 410 | ||
428 | /* | ||
429 | * If there are no dynticks, then the only way that a CPU can passively | ||
430 | * be in a quiescent state is to be offline. Unlike dynticks idle, which | ||
431 | * is a point in time during the prior (already finished) grace period, | ||
432 | * an offline CPU is always in a quiescent state, and thus can be | ||
433 | * unconditionally applied. So just return the current value of completed. | ||
434 | */ | ||
435 | static long dyntick_recall_completed(struct rcu_state *rsp) | ||
436 | { | ||
437 | return rsp->completed; | ||
438 | } | ||
439 | |||
440 | static int dyntick_save_progress_counter(struct rcu_data *rdp) | 411 | static int dyntick_save_progress_counter(struct rcu_data *rdp) |
441 | { | 412 | { |
442 | return 0; | 413 | return 0; |
@@ -553,13 +524,33 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | |||
553 | /* | 524 | /* |
554 | * Update CPU-local rcu_data state to record the newly noticed grace period. | 525 | * Update CPU-local rcu_data state to record the newly noticed grace period. |
555 | * This is used both when we started the grace period and when we notice | 526 | * This is used both when we started the grace period and when we notice |
556 | * that someone else started the grace period. | 527 | * that someone else started the grace period. The caller must hold the |
528 | * ->lock of the leaf rcu_node structure corresponding to the current CPU, | ||
529 | * and must have irqs disabled. | ||
557 | */ | 530 | */ |
531 | static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) | ||
532 | { | ||
533 | if (rdp->gpnum != rnp->gpnum) { | ||
534 | rdp->qs_pending = 1; | ||
535 | rdp->passed_quiesc = 0; | ||
536 | rdp->gpnum = rnp->gpnum; | ||
537 | } | ||
538 | } | ||
539 | |||
558 | static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp) | 540 | static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp) |
559 | { | 541 | { |
560 | rdp->qs_pending = 1; | 542 | unsigned long flags; |
561 | rdp->passed_quiesc = 0; | 543 | struct rcu_node *rnp; |
562 | rdp->gpnum = rsp->gpnum; | 544 | |
545 | local_irq_save(flags); | ||
546 | rnp = rdp->mynode; | ||
547 | if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */ | ||
548 | !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */ | ||
549 | local_irq_restore(flags); | ||
550 | return; | ||
551 | } | ||
552 | __note_new_gpnum(rsp, rnp, rdp); | ||
553 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
563 | } | 554 | } |
564 | 555 | ||
565 | /* | 556 | /* |
@@ -583,6 +574,79 @@ check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp) | |||
583 | } | 574 | } |
584 | 575 | ||
585 | /* | 576 | /* |
577 | * Advance this CPU's callbacks, but only if the current grace period | ||
578 | * has ended. This may be called only from the CPU to whom the rdp | ||
579 | * belongs. In addition, the corresponding leaf rcu_node structure's | ||
580 | * ->lock must be held by the caller, with irqs disabled. | ||
581 | */ | ||
582 | static void | ||
583 | __rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) | ||
584 | { | ||
585 | /* Did another grace period end? */ | ||
586 | if (rdp->completed != rnp->completed) { | ||
587 | |||
588 | /* Advance callbacks. No harm if list empty. */ | ||
589 | rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL]; | ||
590 | rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL]; | ||
591 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
592 | |||
593 | /* Remember that we saw this grace-period completion. */ | ||
594 | rdp->completed = rnp->completed; | ||
595 | } | ||
596 | } | ||
597 | |||
598 | /* | ||
599 | * Advance this CPU's callbacks, but only if the current grace period | ||
600 | * has ended. This may be called only from the CPU to whom the rdp | ||
601 | * belongs. | ||
602 | */ | ||
603 | static void | ||
604 | rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp) | ||
605 | { | ||
606 | unsigned long flags; | ||
607 | struct rcu_node *rnp; | ||
608 | |||
609 | local_irq_save(flags); | ||
610 | rnp = rdp->mynode; | ||
611 | if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */ | ||
612 | !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */ | ||
613 | local_irq_restore(flags); | ||
614 | return; | ||
615 | } | ||
616 | __rcu_process_gp_end(rsp, rnp, rdp); | ||
617 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
618 | } | ||
619 | |||
620 | /* | ||
621 | * Do per-CPU grace-period initialization for running CPU. The caller | ||
622 | * must hold the lock of the leaf rcu_node structure corresponding to | ||
623 | * this CPU. | ||
624 | */ | ||
625 | static void | ||
626 | rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) | ||
627 | { | ||
628 | /* Prior grace period ended, so advance callbacks for current CPU. */ | ||
629 | __rcu_process_gp_end(rsp, rnp, rdp); | ||
630 | |||
631 | /* | ||
632 | * Because this CPU just now started the new grace period, we know | ||
633 | * that all of its callbacks will be covered by this upcoming grace | ||
634 | * period, even the ones that were registered arbitrarily recently. | ||
635 | * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL. | ||
636 | * | ||
637 | * Other CPUs cannot be sure exactly when the grace period started. | ||
638 | * Therefore, their recently registered callbacks must pass through | ||
639 | * an additional RCU_NEXT_READY stage, so that they will be handled | ||
640 | * by the next RCU grace period. | ||
641 | */ | ||
642 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
643 | rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
644 | |||
645 | /* Set state so that this CPU will detect the next quiescent state. */ | ||
646 | __note_new_gpnum(rsp, rnp, rdp); | ||
647 | } | ||
648 | |||
649 | /* | ||
586 | * Start a new RCU grace period if warranted, re-initializing the hierarchy | 650 | * Start a new RCU grace period if warranted, re-initializing the hierarchy |
587 | * in preparation for detecting the next grace period. The caller must hold | 651 | * in preparation for detecting the next grace period. The caller must hold |
588 | * the root node's ->lock, which is released before return. Hard irqs must | 652 | * the root node's ->lock, which is released before return. Hard irqs must |
@@ -596,7 +660,23 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
596 | struct rcu_node *rnp = rcu_get_root(rsp); | 660 | struct rcu_node *rnp = rcu_get_root(rsp); |
597 | 661 | ||
598 | if (!cpu_needs_another_gp(rsp, rdp)) { | 662 | if (!cpu_needs_another_gp(rsp, rdp)) { |
599 | spin_unlock_irqrestore(&rnp->lock, flags); | 663 | if (rnp->completed == rsp->completed) { |
664 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
665 | return; | ||
666 | } | ||
667 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
668 | |||
669 | /* | ||
670 | * Propagate new ->completed value to rcu_node structures | ||
671 | * so that other CPUs don't have to wait until the start | ||
672 | * of the next grace period to process their callbacks. | ||
673 | */ | ||
674 | rcu_for_each_node_breadth_first(rsp, rnp) { | ||
675 | spin_lock(&rnp->lock); /* irqs already disabled. */ | ||
676 | rnp->completed = rsp->completed; | ||
677 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
678 | } | ||
679 | local_irq_restore(flags); | ||
600 | return; | 680 | return; |
601 | } | 681 | } |
602 | 682 | ||
@@ -606,29 +686,15 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
606 | rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ | 686 | rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ |
607 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; | 687 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; |
608 | record_gp_stall_check_time(rsp); | 688 | record_gp_stall_check_time(rsp); |
609 | dyntick_record_completed(rsp, rsp->completed - 1); | ||
610 | note_new_gpnum(rsp, rdp); | ||
611 | |||
612 | /* | ||
613 | * Because this CPU just now started the new grace period, we know | ||
614 | * that all of its callbacks will be covered by this upcoming grace | ||
615 | * period, even the ones that were registered arbitrarily recently. | ||
616 | * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL. | ||
617 | * | ||
618 | * Other CPUs cannot be sure exactly when the grace period started. | ||
619 | * Therefore, their recently registered callbacks must pass through | ||
620 | * an additional RCU_NEXT_READY stage, so that they will be handled | ||
621 | * by the next RCU grace period. | ||
622 | */ | ||
623 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
624 | rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
625 | 689 | ||
626 | /* Special-case the common single-level case. */ | 690 | /* Special-case the common single-level case. */ |
627 | if (NUM_RCU_NODES == 1) { | 691 | if (NUM_RCU_NODES == 1) { |
628 | rcu_preempt_check_blocked_tasks(rnp); | 692 | rcu_preempt_check_blocked_tasks(rnp); |
629 | rnp->qsmask = rnp->qsmaskinit; | 693 | rnp->qsmask = rnp->qsmaskinit; |
630 | rnp->gpnum = rsp->gpnum; | 694 | rnp->gpnum = rsp->gpnum; |
695 | rnp->completed = rsp->completed; | ||
631 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ | 696 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ |
697 | rcu_start_gp_per_cpu(rsp, rnp, rdp); | ||
632 | spin_unlock_irqrestore(&rnp->lock, flags); | 698 | spin_unlock_irqrestore(&rnp->lock, flags); |
633 | return; | 699 | return; |
634 | } | 700 | } |
@@ -657,69 +723,50 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
657 | * irqs disabled. | 723 | * irqs disabled. |
658 | */ | 724 | */ |
659 | rcu_for_each_node_breadth_first(rsp, rnp) { | 725 | rcu_for_each_node_breadth_first(rsp, rnp) { |
660 | spin_lock(&rnp->lock); /* irqs already disabled. */ | 726 | spin_lock(&rnp->lock); /* irqs already disabled. */ |
661 | rcu_preempt_check_blocked_tasks(rnp); | 727 | rcu_preempt_check_blocked_tasks(rnp); |
662 | rnp->qsmask = rnp->qsmaskinit; | 728 | rnp->qsmask = rnp->qsmaskinit; |
663 | rnp->gpnum = rsp->gpnum; | 729 | rnp->gpnum = rsp->gpnum; |
664 | spin_unlock(&rnp->lock); /* irqs already disabled. */ | 730 | rnp->completed = rsp->completed; |
731 | if (rnp == rdp->mynode) | ||
732 | rcu_start_gp_per_cpu(rsp, rnp, rdp); | ||
733 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
665 | } | 734 | } |
666 | 735 | ||
736 | rnp = rcu_get_root(rsp); | ||
737 | spin_lock(&rnp->lock); /* irqs already disabled. */ | ||
667 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ | 738 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ |
739 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
668 | spin_unlock_irqrestore(&rsp->onofflock, flags); | 740 | spin_unlock_irqrestore(&rsp->onofflock, flags); |
669 | } | 741 | } |
670 | 742 | ||
671 | /* | 743 | /* |
672 | * Advance this CPU's callbacks, but only if the current grace period | 744 | * Report a full set of quiescent states to the specified rcu_state |
673 | * has ended. This may be called only from the CPU to whom the rdp | 745 | * data structure. This involves cleaning up after the prior grace |
674 | * belongs. | 746 | * period and letting rcu_start_gp() start up the next grace period |
675 | */ | 747 | * if one is needed. Note that the caller must hold rnp->lock, as |
676 | static void | 748 | * required by rcu_start_gp(), which will release it. |
677 | rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp) | ||
678 | { | ||
679 | long completed_snap; | ||
680 | unsigned long flags; | ||
681 | |||
682 | local_irq_save(flags); | ||
683 | completed_snap = ACCESS_ONCE(rsp->completed); /* outside of lock. */ | ||
684 | |||
685 | /* Did another grace period end? */ | ||
686 | if (rdp->completed != completed_snap) { | ||
687 | |||
688 | /* Advance callbacks. No harm if list empty. */ | ||
689 | rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL]; | ||
690 | rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL]; | ||
691 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
692 | |||
693 | /* Remember that we saw this grace-period completion. */ | ||
694 | rdp->completed = completed_snap; | ||
695 | } | ||
696 | local_irq_restore(flags); | ||
697 | } | ||
698 | |||
699 | /* | ||
700 | * Clean up after the prior grace period and let rcu_start_gp() start up | ||
701 | * the next grace period if one is needed. Note that the caller must | ||
702 | * hold rnp->lock, as required by rcu_start_gp(), which will release it. | ||
703 | */ | 749 | */ |
704 | static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags) | 750 | static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) |
705 | __releases(rcu_get_root(rsp)->lock) | 751 | __releases(rcu_get_root(rsp)->lock) |
706 | { | 752 | { |
707 | WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); | 753 | WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); |
708 | rsp->completed = rsp->gpnum; | 754 | rsp->completed = rsp->gpnum; |
709 | rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); | 755 | rsp->signaled = RCU_GP_IDLE; |
710 | rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ | 756 | rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ |
711 | } | 757 | } |
712 | 758 | ||
713 | /* | 759 | /* |
714 | * Similar to cpu_quiet(), for which it is a helper function. Allows | 760 | * Similar to rcu_report_qs_rdp(), for which it is a helper function. |
715 | * a group of CPUs to be quieted at one go, though all the CPUs in the | 761 | * Allows quiescent states for a group of CPUs to be reported at one go |
716 | * group must be represented by the same leaf rcu_node structure. | 762 | * to the specified rcu_node structure, though all the CPUs in the group |
717 | * That structure's lock must be held upon entry, and it is released | 763 | * must be represented by the same rcu_node structure (which need not be |
718 | * before return. | 764 | * a leaf rcu_node structure, though it often will be). That structure's |
765 | * lock must be held upon entry, and it is released before return. | ||
719 | */ | 766 | */ |
720 | static void | 767 | static void |
721 | cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, | 768 | rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp, |
722 | unsigned long flags) | 769 | struct rcu_node *rnp, unsigned long flags) |
723 | __releases(rnp->lock) | 770 | __releases(rnp->lock) |
724 | { | 771 | { |
725 | struct rcu_node *rnp_c; | 772 | struct rcu_node *rnp_c; |
@@ -755,21 +802,23 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, | |||
755 | 802 | ||
756 | /* | 803 | /* |
757 | * Get here if we are the last CPU to pass through a quiescent | 804 | * Get here if we are the last CPU to pass through a quiescent |
758 | * state for this grace period. Invoke cpu_quiet_msk_finish() | 805 | * state for this grace period. Invoke rcu_report_qs_rsp() |
759 | * to clean up and start the next grace period if one is needed. | 806 | * to clean up and start the next grace period if one is needed. |
760 | */ | 807 | */ |
761 | cpu_quiet_msk_finish(rsp, flags); /* releases rnp->lock. */ | 808 | rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */ |
762 | } | 809 | } |
763 | 810 | ||
764 | /* | 811 | /* |
765 | * Record a quiescent state for the specified CPU, which must either be | 812 | * Record a quiescent state for the specified CPU to that CPU's rcu_data |
766 | * the current CPU. The lastcomp argument is used to make sure we are | 813 | * structure. This must be either called from the specified CPU, or |
767 | * still in the grace period of interest. We don't want to end the current | 814 | * called when the specified CPU is known to be offline (and when it is |
768 | * grace period based on quiescent states detected in an earlier grace | 815 | * also known that no other CPU is concurrently trying to help the offline |
769 | * period! | 816 | * CPU). The lastcomp argument is used to make sure we are still in the |
817 | * grace period of interest. We don't want to end the current grace period | ||
818 | * based on quiescent states detected in an earlier grace period! | ||
770 | */ | 819 | */ |
771 | static void | 820 | static void |
772 | cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) | 821 | rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) |
773 | { | 822 | { |
774 | unsigned long flags; | 823 | unsigned long flags; |
775 | unsigned long mask; | 824 | unsigned long mask; |
@@ -777,15 +826,15 @@ cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) | |||
777 | 826 | ||
778 | rnp = rdp->mynode; | 827 | rnp = rdp->mynode; |
779 | spin_lock_irqsave(&rnp->lock, flags); | 828 | spin_lock_irqsave(&rnp->lock, flags); |
780 | if (lastcomp != ACCESS_ONCE(rsp->completed)) { | 829 | if (lastcomp != rnp->completed) { |
781 | 830 | ||
782 | /* | 831 | /* |
783 | * Someone beat us to it for this grace period, so leave. | 832 | * Someone beat us to it for this grace period, so leave. |
784 | * The race with GP start is resolved by the fact that we | 833 | * The race with GP start is resolved by the fact that we |
785 | * hold the leaf rcu_node lock, so that the per-CPU bits | 834 | * hold the leaf rcu_node lock, so that the per-CPU bits |
786 | * cannot yet be initialized -- so we would simply find our | 835 | * cannot yet be initialized -- so we would simply find our |
787 | * CPU's bit already cleared in cpu_quiet_msk() if this race | 836 | * CPU's bit already cleared in rcu_report_qs_rnp() if this |
788 | * occurred. | 837 | * race occurred. |
789 | */ | 838 | */ |
790 | rdp->passed_quiesc = 0; /* try again later! */ | 839 | rdp->passed_quiesc = 0; /* try again later! */ |
791 | spin_unlock_irqrestore(&rnp->lock, flags); | 840 | spin_unlock_irqrestore(&rnp->lock, flags); |
@@ -803,7 +852,7 @@ cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) | |||
803 | */ | 852 | */ |
804 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | 853 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; |
805 | 854 | ||
806 | cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */ | 855 | rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */ |
807 | } | 856 | } |
808 | } | 857 | } |
809 | 858 | ||
@@ -834,8 +883,11 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp) | |||
834 | if (!rdp->passed_quiesc) | 883 | if (!rdp->passed_quiesc) |
835 | return; | 884 | return; |
836 | 885 | ||
837 | /* Tell RCU we are done (but cpu_quiet() will be the judge of that). */ | 886 | /* |
838 | cpu_quiet(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed); | 887 | * Tell RCU we are done (but rcu_report_qs_rdp() will be the |
888 | * judge of that). | ||
889 | */ | ||
890 | rcu_report_qs_rdp(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed); | ||
839 | } | 891 | } |
840 | 892 | ||
841 | #ifdef CONFIG_HOTPLUG_CPU | 893 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -895,8 +947,8 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) | |||
895 | static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | 947 | static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) |
896 | { | 948 | { |
897 | unsigned long flags; | 949 | unsigned long flags; |
898 | long lastcomp; | ||
899 | unsigned long mask; | 950 | unsigned long mask; |
951 | int need_report = 0; | ||
900 | struct rcu_data *rdp = rsp->rda[cpu]; | 952 | struct rcu_data *rdp = rsp->rda[cpu]; |
901 | struct rcu_node *rnp; | 953 | struct rcu_node *rnp; |
902 | 954 | ||
@@ -910,17 +962,32 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
910 | spin_lock(&rnp->lock); /* irqs already disabled. */ | 962 | spin_lock(&rnp->lock); /* irqs already disabled. */ |
911 | rnp->qsmaskinit &= ~mask; | 963 | rnp->qsmaskinit &= ~mask; |
912 | if (rnp->qsmaskinit != 0) { | 964 | if (rnp->qsmaskinit != 0) { |
913 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 965 | if (rnp != rdp->mynode) |
966 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
914 | break; | 967 | break; |
915 | } | 968 | } |
916 | rcu_preempt_offline_tasks(rsp, rnp, rdp); | 969 | if (rnp == rdp->mynode) |
970 | need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp); | ||
971 | else | ||
972 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
917 | mask = rnp->grpmask; | 973 | mask = rnp->grpmask; |
918 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
919 | rnp = rnp->parent; | 974 | rnp = rnp->parent; |
920 | } while (rnp != NULL); | 975 | } while (rnp != NULL); |
921 | lastcomp = rsp->completed; | ||
922 | 976 | ||
923 | spin_unlock_irqrestore(&rsp->onofflock, flags); | 977 | /* |
978 | * We still hold the leaf rcu_node structure lock here, and | ||
979 | * irqs are still disabled. The reason for this subterfuge is | ||
980 | * because invoking rcu_report_unblock_qs_rnp() with ->onofflock | ||
981 | * held leads to deadlock. | ||
982 | */ | ||
983 | spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ | ||
984 | rnp = rdp->mynode; | ||
985 | if (need_report & RCU_OFL_TASKS_NORM_GP) | ||
986 | rcu_report_unblock_qs_rnp(rnp, flags); | ||
987 | else | ||
988 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
989 | if (need_report & RCU_OFL_TASKS_EXP_GP) | ||
990 | rcu_report_exp_rnp(rsp, rnp); | ||
924 | 991 | ||
925 | rcu_adopt_orphan_cbs(rsp); | 992 | rcu_adopt_orphan_cbs(rsp); |
926 | } | 993 | } |
@@ -958,7 +1025,7 @@ static void rcu_offline_cpu(int cpu) | |||
958 | * Invoke any RCU callbacks that have made it to the end of their grace | 1025 | * Invoke any RCU callbacks that have made it to the end of their grace |
959 | * period. Thottle as specified by rdp->blimit. | 1026 | * period. Thottle as specified by rdp->blimit. |
960 | */ | 1027 | */ |
961 | static void rcu_do_batch(struct rcu_data *rdp) | 1028 | static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) |
962 | { | 1029 | { |
963 | unsigned long flags; | 1030 | unsigned long flags; |
964 | struct rcu_head *next, *list, **tail; | 1031 | struct rcu_head *next, *list, **tail; |
@@ -1011,6 +1078,13 @@ static void rcu_do_batch(struct rcu_data *rdp) | |||
1011 | if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) | 1078 | if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) |
1012 | rdp->blimit = blimit; | 1079 | rdp->blimit = blimit; |
1013 | 1080 | ||
1081 | /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ | ||
1082 | if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) { | ||
1083 | rdp->qlen_last_fqs_check = 0; | ||
1084 | rdp->n_force_qs_snap = rsp->n_force_qs; | ||
1085 | } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark) | ||
1086 | rdp->qlen_last_fqs_check = rdp->qlen; | ||
1087 | |||
1014 | local_irq_restore(flags); | 1088 | local_irq_restore(flags); |
1015 | 1089 | ||
1016 | /* Re-raise the RCU softirq if there are callbacks remaining. */ | 1090 | /* Re-raise the RCU softirq if there are callbacks remaining. */ |
@@ -1085,7 +1159,7 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp, | |||
1085 | rcu_for_each_leaf_node(rsp, rnp) { | 1159 | rcu_for_each_leaf_node(rsp, rnp) { |
1086 | mask = 0; | 1160 | mask = 0; |
1087 | spin_lock_irqsave(&rnp->lock, flags); | 1161 | spin_lock_irqsave(&rnp->lock, flags); |
1088 | if (rsp->completed != lastcomp) { | 1162 | if (rnp->completed != lastcomp) { |
1089 | spin_unlock_irqrestore(&rnp->lock, flags); | 1163 | spin_unlock_irqrestore(&rnp->lock, flags); |
1090 | return 1; | 1164 | return 1; |
1091 | } | 1165 | } |
@@ -1099,10 +1173,10 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp, | |||
1099 | if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu])) | 1173 | if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu])) |
1100 | mask |= bit; | 1174 | mask |= bit; |
1101 | } | 1175 | } |
1102 | if (mask != 0 && rsp->completed == lastcomp) { | 1176 | if (mask != 0 && rnp->completed == lastcomp) { |
1103 | 1177 | ||
1104 | /* cpu_quiet_msk() releases rnp->lock. */ | 1178 | /* rcu_report_qs_rnp() releases rnp->lock. */ |
1105 | cpu_quiet_msk(mask, rsp, rnp, flags); | 1179 | rcu_report_qs_rnp(mask, rsp, rnp, flags); |
1106 | continue; | 1180 | continue; |
1107 | } | 1181 | } |
1108 | spin_unlock_irqrestore(&rnp->lock, flags); | 1182 | spin_unlock_irqrestore(&rnp->lock, flags); |
@@ -1120,6 +1194,7 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
1120 | long lastcomp; | 1194 | long lastcomp; |
1121 | struct rcu_node *rnp = rcu_get_root(rsp); | 1195 | struct rcu_node *rnp = rcu_get_root(rsp); |
1122 | u8 signaled; | 1196 | u8 signaled; |
1197 | u8 forcenow; | ||
1123 | 1198 | ||
1124 | if (!rcu_gp_in_progress(rsp)) | 1199 | if (!rcu_gp_in_progress(rsp)) |
1125 | return; /* No grace period in progress, nothing to force. */ | 1200 | return; /* No grace period in progress, nothing to force. */ |
@@ -1132,19 +1207,20 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
1132 | goto unlock_ret; /* no emergency and done recently. */ | 1207 | goto unlock_ret; /* no emergency and done recently. */ |
1133 | rsp->n_force_qs++; | 1208 | rsp->n_force_qs++; |
1134 | spin_lock(&rnp->lock); | 1209 | spin_lock(&rnp->lock); |
1135 | lastcomp = rsp->completed; | 1210 | lastcomp = rsp->gpnum - 1; |
1136 | signaled = rsp->signaled; | 1211 | signaled = rsp->signaled; |
1137 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; | 1212 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; |
1138 | if (lastcomp == rsp->gpnum) { | 1213 | if(!rcu_gp_in_progress(rsp)) { |
1139 | rsp->n_force_qs_ngp++; | 1214 | rsp->n_force_qs_ngp++; |
1140 | spin_unlock(&rnp->lock); | 1215 | spin_unlock(&rnp->lock); |
1141 | goto unlock_ret; /* no GP in progress, time updated. */ | 1216 | goto unlock_ret; /* no GP in progress, time updated. */ |
1142 | } | 1217 | } |
1143 | spin_unlock(&rnp->lock); | 1218 | spin_unlock(&rnp->lock); |
1144 | switch (signaled) { | 1219 | switch (signaled) { |
1220 | case RCU_GP_IDLE: | ||
1145 | case RCU_GP_INIT: | 1221 | case RCU_GP_INIT: |
1146 | 1222 | ||
1147 | break; /* grace period still initializing, ignore. */ | 1223 | break; /* grace period idle or initializing, ignore. */ |
1148 | 1224 | ||
1149 | case RCU_SAVE_DYNTICK: | 1225 | case RCU_SAVE_DYNTICK: |
1150 | 1226 | ||
@@ -1155,20 +1231,29 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
1155 | if (rcu_process_dyntick(rsp, lastcomp, | 1231 | if (rcu_process_dyntick(rsp, lastcomp, |
1156 | dyntick_save_progress_counter)) | 1232 | dyntick_save_progress_counter)) |
1157 | goto unlock_ret; | 1233 | goto unlock_ret; |
1234 | /* fall into next case. */ | ||
1235 | |||
1236 | case RCU_SAVE_COMPLETED: | ||
1158 | 1237 | ||
1159 | /* Update state, record completion counter. */ | 1238 | /* Update state, record completion counter. */ |
1239 | forcenow = 0; | ||
1160 | spin_lock(&rnp->lock); | 1240 | spin_lock(&rnp->lock); |
1161 | if (lastcomp == rsp->completed) { | 1241 | if (lastcomp + 1 == rsp->gpnum && |
1242 | lastcomp == rsp->completed && | ||
1243 | rsp->signaled == signaled) { | ||
1162 | rsp->signaled = RCU_FORCE_QS; | 1244 | rsp->signaled = RCU_FORCE_QS; |
1163 | dyntick_record_completed(rsp, lastcomp); | 1245 | rsp->completed_fqs = lastcomp; |
1246 | forcenow = signaled == RCU_SAVE_COMPLETED; | ||
1164 | } | 1247 | } |
1165 | spin_unlock(&rnp->lock); | 1248 | spin_unlock(&rnp->lock); |
1166 | break; | 1249 | if (!forcenow) |
1250 | break; | ||
1251 | /* fall into next case. */ | ||
1167 | 1252 | ||
1168 | case RCU_FORCE_QS: | 1253 | case RCU_FORCE_QS: |
1169 | 1254 | ||
1170 | /* Check dyntick-idle state, send IPI to laggarts. */ | 1255 | /* Check dyntick-idle state, send IPI to laggarts. */ |
1171 | if (rcu_process_dyntick(rsp, dyntick_recall_completed(rsp), | 1256 | if (rcu_process_dyntick(rsp, rsp->completed_fqs, |
1172 | rcu_implicit_dynticks_qs)) | 1257 | rcu_implicit_dynticks_qs)) |
1173 | goto unlock_ret; | 1258 | goto unlock_ret; |
1174 | 1259 | ||
@@ -1224,7 +1309,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1224 | } | 1309 | } |
1225 | 1310 | ||
1226 | /* If there are callbacks ready, invoke them. */ | 1311 | /* If there are callbacks ready, invoke them. */ |
1227 | rcu_do_batch(rdp); | 1312 | rcu_do_batch(rsp, rdp); |
1228 | } | 1313 | } |
1229 | 1314 | ||
1230 | /* | 1315 | /* |
@@ -1288,10 +1373,20 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
1288 | rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */ | 1373 | rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */ |
1289 | } | 1374 | } |
1290 | 1375 | ||
1291 | /* Force the grace period if too many callbacks or too long waiting. */ | 1376 | /* |
1292 | if (unlikely(++rdp->qlen > qhimark)) { | 1377 | * Force the grace period if too many callbacks or too long waiting. |
1378 | * Enforce hysteresis, and don't invoke force_quiescent_state() | ||
1379 | * if some other CPU has recently done so. Also, don't bother | ||
1380 | * invoking force_quiescent_state() if the newly enqueued callback | ||
1381 | * is the only one waiting for a grace period to complete. | ||
1382 | */ | ||
1383 | if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) { | ||
1293 | rdp->blimit = LONG_MAX; | 1384 | rdp->blimit = LONG_MAX; |
1294 | force_quiescent_state(rsp, 0); | 1385 | if (rsp->n_force_qs == rdp->n_force_qs_snap && |
1386 | *rdp->nxttail[RCU_DONE_TAIL] != head) | ||
1387 | force_quiescent_state(rsp, 0); | ||
1388 | rdp->n_force_qs_snap = rsp->n_force_qs; | ||
1389 | rdp->qlen_last_fqs_check = rdp->qlen; | ||
1295 | } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) | 1390 | } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) |
1296 | force_quiescent_state(rsp, 1); | 1391 | force_quiescent_state(rsp, 1); |
1297 | local_irq_restore(flags); | 1392 | local_irq_restore(flags); |
@@ -1315,6 +1410,68 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |||
1315 | } | 1410 | } |
1316 | EXPORT_SYMBOL_GPL(call_rcu_bh); | 1411 | EXPORT_SYMBOL_GPL(call_rcu_bh); |
1317 | 1412 | ||
1413 | /** | ||
1414 | * synchronize_sched - wait until an rcu-sched grace period has elapsed. | ||
1415 | * | ||
1416 | * Control will return to the caller some time after a full rcu-sched | ||
1417 | * grace period has elapsed, in other words after all currently executing | ||
1418 | * rcu-sched read-side critical sections have completed. These read-side | ||
1419 | * critical sections are delimited by rcu_read_lock_sched() and | ||
1420 | * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(), | ||
1421 | * local_irq_disable(), and so on may be used in place of | ||
1422 | * rcu_read_lock_sched(). | ||
1423 | * | ||
1424 | * This means that all preempt_disable code sequences, including NMI and | ||
1425 | * hardware-interrupt handlers, in progress on entry will have completed | ||
1426 | * before this primitive returns. However, this does not guarantee that | ||
1427 | * softirq handlers will have completed, since in some kernels, these | ||
1428 | * handlers can run in process context, and can block. | ||
1429 | * | ||
1430 | * This primitive provides the guarantees made by the (now removed) | ||
1431 | * synchronize_kernel() API. In contrast, synchronize_rcu() only | ||
1432 | * guarantees that rcu_read_lock() sections will have completed. | ||
1433 | * In "classic RCU", these two guarantees happen to be one and | ||
1434 | * the same, but can differ in realtime RCU implementations. | ||
1435 | */ | ||
1436 | void synchronize_sched(void) | ||
1437 | { | ||
1438 | struct rcu_synchronize rcu; | ||
1439 | |||
1440 | if (rcu_blocking_is_gp()) | ||
1441 | return; | ||
1442 | |||
1443 | init_completion(&rcu.completion); | ||
1444 | /* Will wake me after RCU finished. */ | ||
1445 | call_rcu_sched(&rcu.head, wakeme_after_rcu); | ||
1446 | /* Wait for it. */ | ||
1447 | wait_for_completion(&rcu.completion); | ||
1448 | } | ||
1449 | EXPORT_SYMBOL_GPL(synchronize_sched); | ||
1450 | |||
1451 | /** | ||
1452 | * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. | ||
1453 | * | ||
1454 | * Control will return to the caller some time after a full rcu_bh grace | ||
1455 | * period has elapsed, in other words after all currently executing rcu_bh | ||
1456 | * read-side critical sections have completed. RCU read-side critical | ||
1457 | * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(), | ||
1458 | * and may be nested. | ||
1459 | */ | ||
1460 | void synchronize_rcu_bh(void) | ||
1461 | { | ||
1462 | struct rcu_synchronize rcu; | ||
1463 | |||
1464 | if (rcu_blocking_is_gp()) | ||
1465 | return; | ||
1466 | |||
1467 | init_completion(&rcu.completion); | ||
1468 | /* Will wake me after RCU finished. */ | ||
1469 | call_rcu_bh(&rcu.head, wakeme_after_rcu); | ||
1470 | /* Wait for it. */ | ||
1471 | wait_for_completion(&rcu.completion); | ||
1472 | } | ||
1473 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); | ||
1474 | |||
1318 | /* | 1475 | /* |
1319 | * Check to see if there is any immediate RCU-related work to be done | 1476 | * Check to see if there is any immediate RCU-related work to be done |
1320 | * by the current CPU, for the specified type of RCU, returning 1 if so. | 1477 | * by the current CPU, for the specified type of RCU, returning 1 if so. |
@@ -1324,6 +1481,8 @@ EXPORT_SYMBOL_GPL(call_rcu_bh); | |||
1324 | */ | 1481 | */ |
1325 | static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | 1482 | static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) |
1326 | { | 1483 | { |
1484 | struct rcu_node *rnp = rdp->mynode; | ||
1485 | |||
1327 | rdp->n_rcu_pending++; | 1486 | rdp->n_rcu_pending++; |
1328 | 1487 | ||
1329 | /* Check for CPU stalls, if enabled. */ | 1488 | /* Check for CPU stalls, if enabled. */ |
@@ -1348,13 +1507,13 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1348 | } | 1507 | } |
1349 | 1508 | ||
1350 | /* Has another RCU grace period completed? */ | 1509 | /* Has another RCU grace period completed? */ |
1351 | if (ACCESS_ONCE(rsp->completed) != rdp->completed) { /* outside lock */ | 1510 | if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */ |
1352 | rdp->n_rp_gp_completed++; | 1511 | rdp->n_rp_gp_completed++; |
1353 | return 1; | 1512 | return 1; |
1354 | } | 1513 | } |
1355 | 1514 | ||
1356 | /* Has a new RCU grace period started? */ | 1515 | /* Has a new RCU grace period started? */ |
1357 | if (ACCESS_ONCE(rsp->gpnum) != rdp->gpnum) { /* outside lock */ | 1516 | if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */ |
1358 | rdp->n_rp_gp_started++; | 1517 | rdp->n_rp_gp_started++; |
1359 | return 1; | 1518 | return 1; |
1360 | } | 1519 | } |
@@ -1397,6 +1556,21 @@ int rcu_needs_cpu(int cpu) | |||
1397 | rcu_preempt_needs_cpu(cpu); | 1556 | rcu_preempt_needs_cpu(cpu); |
1398 | } | 1557 | } |
1399 | 1558 | ||
1559 | /* | ||
1560 | * This function is invoked towards the end of the scheduler's initialization | ||
1561 | * process. Before this is called, the idle task might contain | ||
1562 | * RCU read-side critical sections (during which time, this idle | ||
1563 | * task is booting the system). After this function is called, the | ||
1564 | * idle tasks are prohibited from containing RCU read-side critical | ||
1565 | * sections. | ||
1566 | */ | ||
1567 | void rcu_scheduler_starting(void) | ||
1568 | { | ||
1569 | WARN_ON(num_online_cpus() != 1); | ||
1570 | WARN_ON(nr_context_switches() > 0); | ||
1571 | rcu_scheduler_active = 1; | ||
1572 | } | ||
1573 | |||
1400 | static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; | 1574 | static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; |
1401 | static atomic_t rcu_barrier_cpu_count; | 1575 | static atomic_t rcu_barrier_cpu_count; |
1402 | static DEFINE_MUTEX(rcu_barrier_mutex); | 1576 | static DEFINE_MUTEX(rcu_barrier_mutex); |
@@ -1508,21 +1682,18 @@ static void __cpuinit | |||
1508 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) | 1682 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) |
1509 | { | 1683 | { |
1510 | unsigned long flags; | 1684 | unsigned long flags; |
1511 | long lastcomp; | ||
1512 | unsigned long mask; | 1685 | unsigned long mask; |
1513 | struct rcu_data *rdp = rsp->rda[cpu]; | 1686 | struct rcu_data *rdp = rsp->rda[cpu]; |
1514 | struct rcu_node *rnp = rcu_get_root(rsp); | 1687 | struct rcu_node *rnp = rcu_get_root(rsp); |
1515 | 1688 | ||
1516 | /* Set up local state, ensuring consistent view of global state. */ | 1689 | /* Set up local state, ensuring consistent view of global state. */ |
1517 | spin_lock_irqsave(&rnp->lock, flags); | 1690 | spin_lock_irqsave(&rnp->lock, flags); |
1518 | lastcomp = rsp->completed; | ||
1519 | rdp->completed = lastcomp; | ||
1520 | rdp->gpnum = lastcomp; | ||
1521 | rdp->passed_quiesc = 0; /* We could be racing with new GP, */ | 1691 | rdp->passed_quiesc = 0; /* We could be racing with new GP, */ |
1522 | rdp->qs_pending = 1; /* so set up to respond to current GP. */ | 1692 | rdp->qs_pending = 1; /* so set up to respond to current GP. */ |
1523 | rdp->beenonline = 1; /* We have now been online. */ | 1693 | rdp->beenonline = 1; /* We have now been online. */ |
1524 | rdp->preemptable = preemptable; | 1694 | rdp->preemptable = preemptable; |
1525 | rdp->passed_quiesc_completed = lastcomp - 1; | 1695 | rdp->qlen_last_fqs_check = 0; |
1696 | rdp->n_force_qs_snap = rsp->n_force_qs; | ||
1526 | rdp->blimit = blimit; | 1697 | rdp->blimit = blimit; |
1527 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 1698 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
1528 | 1699 | ||
@@ -1542,6 +1713,11 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) | |||
1542 | spin_lock(&rnp->lock); /* irqs already disabled. */ | 1713 | spin_lock(&rnp->lock); /* irqs already disabled. */ |
1543 | rnp->qsmaskinit |= mask; | 1714 | rnp->qsmaskinit |= mask; |
1544 | mask = rnp->grpmask; | 1715 | mask = rnp->grpmask; |
1716 | if (rnp == rdp->mynode) { | ||
1717 | rdp->gpnum = rnp->completed; /* if GP in progress... */ | ||
1718 | rdp->completed = rnp->completed; | ||
1719 | rdp->passed_quiesc_completed = rnp->completed - 1; | ||
1720 | } | ||
1545 | spin_unlock(&rnp->lock); /* irqs already disabled. */ | 1721 | spin_unlock(&rnp->lock); /* irqs already disabled. */ |
1546 | rnp = rnp->parent; | 1722 | rnp = rnp->parent; |
1547 | } while (rnp != NULL && !(rnp->qsmaskinit & mask)); | 1723 | } while (rnp != NULL && !(rnp->qsmaskinit & mask)); |
@@ -1559,8 +1735,8 @@ static void __cpuinit rcu_online_cpu(int cpu) | |||
1559 | /* | 1735 | /* |
1560 | * Handle CPU online/offline notification events. | 1736 | * Handle CPU online/offline notification events. |
1561 | */ | 1737 | */ |
1562 | int __cpuinit rcu_cpu_notify(struct notifier_block *self, | 1738 | static int __cpuinit rcu_cpu_notify(struct notifier_block *self, |
1563 | unsigned long action, void *hcpu) | 1739 | unsigned long action, void *hcpu) |
1564 | { | 1740 | { |
1565 | long cpu = (long)hcpu; | 1741 | long cpu = (long)hcpu; |
1566 | 1742 | ||
@@ -1647,8 +1823,8 @@ static void __init rcu_init_one(struct rcu_state *rsp) | |||
1647 | cpustride *= rsp->levelspread[i]; | 1823 | cpustride *= rsp->levelspread[i]; |
1648 | rnp = rsp->level[i]; | 1824 | rnp = rsp->level[i]; |
1649 | for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { | 1825 | for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { |
1650 | if (rnp != rcu_get_root(rsp)) | 1826 | spin_lock_init(&rnp->lock); |
1651 | spin_lock_init(&rnp->lock); | 1827 | lockdep_set_class(&rnp->lock, &rcu_node_class[i]); |
1652 | rnp->gpnum = 0; | 1828 | rnp->gpnum = 0; |
1653 | rnp->qsmask = 0; | 1829 | rnp->qsmask = 0; |
1654 | rnp->qsmaskinit = 0; | 1830 | rnp->qsmaskinit = 0; |
@@ -1669,9 +1845,10 @@ static void __init rcu_init_one(struct rcu_state *rsp) | |||
1669 | rnp->level = i; | 1845 | rnp->level = i; |
1670 | INIT_LIST_HEAD(&rnp->blocked_tasks[0]); | 1846 | INIT_LIST_HEAD(&rnp->blocked_tasks[0]); |
1671 | INIT_LIST_HEAD(&rnp->blocked_tasks[1]); | 1847 | INIT_LIST_HEAD(&rnp->blocked_tasks[1]); |
1848 | INIT_LIST_HEAD(&rnp->blocked_tasks[2]); | ||
1849 | INIT_LIST_HEAD(&rnp->blocked_tasks[3]); | ||
1672 | } | 1850 | } |
1673 | } | 1851 | } |
1674 | spin_lock_init(&rcu_get_root(rsp)->lock); | ||
1675 | } | 1852 | } |
1676 | 1853 | ||
1677 | /* | 1854 | /* |
@@ -1697,16 +1874,30 @@ do { \ | |||
1697 | } \ | 1874 | } \ |
1698 | } while (0) | 1875 | } while (0) |
1699 | 1876 | ||
1700 | void __init __rcu_init(void) | 1877 | void __init rcu_init(void) |
1701 | { | 1878 | { |
1879 | int i; | ||
1880 | |||
1702 | rcu_bootup_announce(); | 1881 | rcu_bootup_announce(); |
1703 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 1882 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR |
1704 | printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); | 1883 | printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); |
1705 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 1884 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
1885 | #if NUM_RCU_LVL_4 != 0 | ||
1886 | printk(KERN_INFO "Experimental four-level hierarchy is enabled.\n"); | ||
1887 | #endif /* #if NUM_RCU_LVL_4 != 0 */ | ||
1706 | RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data); | 1888 | RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data); |
1707 | RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data); | 1889 | RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data); |
1708 | __rcu_init_preempt(); | 1890 | __rcu_init_preempt(); |
1709 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | 1891 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
1892 | |||
1893 | /* | ||
1894 | * We don't need protection against CPU-hotplug here because | ||
1895 | * this is called early in boot, before either interrupts | ||
1896 | * or the scheduler are operational. | ||
1897 | */ | ||
1898 | cpu_notifier(rcu_cpu_notify, 0); | ||
1899 | for_each_online_cpu(i) | ||
1900 | rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)i); | ||
1710 | } | 1901 | } |
1711 | 1902 | ||
1712 | #include "rcutree_plugin.h" | 1903 | #include "rcutree_plugin.h" |
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index b40ac5706040..d2a0046f63b2 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
@@ -34,10 +34,11 @@ | |||
34 | * In practice, this has not been tested, so there is probably some | 34 | * In practice, this has not been tested, so there is probably some |
35 | * bug somewhere. | 35 | * bug somewhere. |
36 | */ | 36 | */ |
37 | #define MAX_RCU_LVLS 3 | 37 | #define MAX_RCU_LVLS 4 |
38 | #define RCU_FANOUT (CONFIG_RCU_FANOUT) | 38 | #define RCU_FANOUT (CONFIG_RCU_FANOUT) |
39 | #define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT) | 39 | #define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT) |
40 | #define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT) | 40 | #define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT) |
41 | #define RCU_FANOUT_FOURTH (RCU_FANOUT_CUBE * RCU_FANOUT) | ||
41 | 42 | ||
42 | #if NR_CPUS <= RCU_FANOUT | 43 | #if NR_CPUS <= RCU_FANOUT |
43 | # define NUM_RCU_LVLS 1 | 44 | # define NUM_RCU_LVLS 1 |
@@ -45,23 +46,33 @@ | |||
45 | # define NUM_RCU_LVL_1 (NR_CPUS) | 46 | # define NUM_RCU_LVL_1 (NR_CPUS) |
46 | # define NUM_RCU_LVL_2 0 | 47 | # define NUM_RCU_LVL_2 0 |
47 | # define NUM_RCU_LVL_3 0 | 48 | # define NUM_RCU_LVL_3 0 |
49 | # define NUM_RCU_LVL_4 0 | ||
48 | #elif NR_CPUS <= RCU_FANOUT_SQ | 50 | #elif NR_CPUS <= RCU_FANOUT_SQ |
49 | # define NUM_RCU_LVLS 2 | 51 | # define NUM_RCU_LVLS 2 |
50 | # define NUM_RCU_LVL_0 1 | 52 | # define NUM_RCU_LVL_0 1 |
51 | # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT) | 53 | # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT) |
52 | # define NUM_RCU_LVL_2 (NR_CPUS) | 54 | # define NUM_RCU_LVL_2 (NR_CPUS) |
53 | # define NUM_RCU_LVL_3 0 | 55 | # define NUM_RCU_LVL_3 0 |
56 | # define NUM_RCU_LVL_4 0 | ||
54 | #elif NR_CPUS <= RCU_FANOUT_CUBE | 57 | #elif NR_CPUS <= RCU_FANOUT_CUBE |
55 | # define NUM_RCU_LVLS 3 | 58 | # define NUM_RCU_LVLS 3 |
56 | # define NUM_RCU_LVL_0 1 | 59 | # define NUM_RCU_LVL_0 1 |
57 | # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ) | 60 | # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ) |
58 | # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT) | 61 | # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT) |
59 | # define NUM_RCU_LVL_3 NR_CPUS | 62 | # define NUM_RCU_LVL_3 NR_CPUS |
63 | # define NUM_RCU_LVL_4 0 | ||
64 | #elif NR_CPUS <= RCU_FANOUT_FOURTH | ||
65 | # define NUM_RCU_LVLS 4 | ||
66 | # define NUM_RCU_LVL_0 1 | ||
67 | # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_CUBE) | ||
68 | # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ) | ||
69 | # define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT) | ||
70 | # define NUM_RCU_LVL_4 NR_CPUS | ||
60 | #else | 71 | #else |
61 | # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" | 72 | # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" |
62 | #endif /* #if (NR_CPUS) <= RCU_FANOUT */ | 73 | #endif /* #if (NR_CPUS) <= RCU_FANOUT */ |
63 | 74 | ||
64 | #define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3) | 75 | #define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3 + NUM_RCU_LVL_4) |
65 | #define NUM_RCU_NODES (RCU_SUM - NR_CPUS) | 76 | #define NUM_RCU_NODES (RCU_SUM - NR_CPUS) |
66 | 77 | ||
67 | /* | 78 | /* |
@@ -84,14 +95,21 @@ struct rcu_node { | |||
84 | long gpnum; /* Current grace period for this node. */ | 95 | long gpnum; /* Current grace period for this node. */ |
85 | /* This will either be equal to or one */ | 96 | /* This will either be equal to or one */ |
86 | /* behind the root rcu_node's gpnum. */ | 97 | /* behind the root rcu_node's gpnum. */ |
98 | long completed; /* Last grace period completed for this node. */ | ||
99 | /* This will either be equal to or one */ | ||
100 | /* behind the root rcu_node's gpnum. */ | ||
87 | unsigned long qsmask; /* CPUs or groups that need to switch in */ | 101 | unsigned long qsmask; /* CPUs or groups that need to switch in */ |
88 | /* order for current grace period to proceed.*/ | 102 | /* order for current grace period to proceed.*/ |
89 | /* In leaf rcu_node, each bit corresponds to */ | 103 | /* In leaf rcu_node, each bit corresponds to */ |
90 | /* an rcu_data structure, otherwise, each */ | 104 | /* an rcu_data structure, otherwise, each */ |
91 | /* bit corresponds to a child rcu_node */ | 105 | /* bit corresponds to a child rcu_node */ |
92 | /* structure. */ | 106 | /* structure. */ |
107 | unsigned long expmask; /* Groups that have ->blocked_tasks[] */ | ||
108 | /* elements that need to drain to allow the */ | ||
109 | /* current expedited grace period to */ | ||
110 | /* complete (only for TREE_PREEMPT_RCU). */ | ||
93 | unsigned long qsmaskinit; | 111 | unsigned long qsmaskinit; |
94 | /* Per-GP initialization for qsmask. */ | 112 | /* Per-GP initial value for qsmask & expmask. */ |
95 | unsigned long grpmask; /* Mask to apply to parent qsmask. */ | 113 | unsigned long grpmask; /* Mask to apply to parent qsmask. */ |
96 | /* Only one bit will be set in this mask. */ | 114 | /* Only one bit will be set in this mask. */ |
97 | int grplo; /* lowest-numbered CPU or group here. */ | 115 | int grplo; /* lowest-numbered CPU or group here. */ |
@@ -99,7 +117,7 @@ struct rcu_node { | |||
99 | u8 grpnum; /* CPU/group number for next level up. */ | 117 | u8 grpnum; /* CPU/group number for next level up. */ |
100 | u8 level; /* root is at level 0. */ | 118 | u8 level; /* root is at level 0. */ |
101 | struct rcu_node *parent; | 119 | struct rcu_node *parent; |
102 | struct list_head blocked_tasks[2]; | 120 | struct list_head blocked_tasks[4]; |
103 | /* Tasks blocked in RCU read-side critsect. */ | 121 | /* Tasks blocked in RCU read-side critsect. */ |
104 | /* Grace period number (->gpnum) x blocked */ | 122 | /* Grace period number (->gpnum) x blocked */ |
105 | /* by tasks on the (x & 0x1) element of the */ | 123 | /* by tasks on the (x & 0x1) element of the */ |
@@ -114,6 +132,21 @@ struct rcu_node { | |||
114 | for ((rnp) = &(rsp)->node[0]; \ | 132 | for ((rnp) = &(rsp)->node[0]; \ |
115 | (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++) | 133 | (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++) |
116 | 134 | ||
135 | /* | ||
136 | * Do a breadth-first scan of the non-leaf rcu_node structures for the | ||
137 | * specified rcu_state structure. Note that if there is a singleton | ||
138 | * rcu_node tree with but one rcu_node structure, this loop is a no-op. | ||
139 | */ | ||
140 | #define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \ | ||
141 | for ((rnp) = &(rsp)->node[0]; \ | ||
142 | (rnp) < (rsp)->level[NUM_RCU_LVLS - 1]; (rnp)++) | ||
143 | |||
144 | /* | ||
145 | * Scan the leaves of the rcu_node hierarchy for the specified rcu_state | ||
146 | * structure. Note that if there is a singleton rcu_node tree with but | ||
147 | * one rcu_node structure, this loop -will- visit the rcu_node structure. | ||
148 | * It is still a leaf node, even if it is also the root node. | ||
149 | */ | ||
117 | #define rcu_for_each_leaf_node(rsp, rnp) \ | 150 | #define rcu_for_each_leaf_node(rsp, rnp) \ |
118 | for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \ | 151 | for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \ |
119 | (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++) | 152 | (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++) |
@@ -167,6 +200,10 @@ struct rcu_data { | |||
167 | struct rcu_head *nxtlist; | 200 | struct rcu_head *nxtlist; |
168 | struct rcu_head **nxttail[RCU_NEXT_SIZE]; | 201 | struct rcu_head **nxttail[RCU_NEXT_SIZE]; |
169 | long qlen; /* # of queued callbacks */ | 202 | long qlen; /* # of queued callbacks */ |
203 | long qlen_last_fqs_check; | ||
204 | /* qlen at last check for QS forcing */ | ||
205 | unsigned long n_force_qs_snap; | ||
206 | /* did other CPU force QS recently? */ | ||
170 | long blimit; /* Upper limit on a processed batch */ | 207 | long blimit; /* Upper limit on a processed batch */ |
171 | 208 | ||
172 | #ifdef CONFIG_NO_HZ | 209 | #ifdef CONFIG_NO_HZ |
@@ -197,13 +234,15 @@ struct rcu_data { | |||
197 | }; | 234 | }; |
198 | 235 | ||
199 | /* Values for signaled field in struct rcu_state. */ | 236 | /* Values for signaled field in struct rcu_state. */ |
200 | #define RCU_GP_INIT 0 /* Grace period being initialized. */ | 237 | #define RCU_GP_IDLE 0 /* No grace period in progress. */ |
201 | #define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */ | 238 | #define RCU_GP_INIT 1 /* Grace period being initialized. */ |
202 | #define RCU_FORCE_QS 2 /* Need to force quiescent state. */ | 239 | #define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */ |
240 | #define RCU_SAVE_COMPLETED 3 /* Need to save rsp->completed. */ | ||
241 | #define RCU_FORCE_QS 4 /* Need to force quiescent state. */ | ||
203 | #ifdef CONFIG_NO_HZ | 242 | #ifdef CONFIG_NO_HZ |
204 | #define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK | 243 | #define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK |
205 | #else /* #ifdef CONFIG_NO_HZ */ | 244 | #else /* #ifdef CONFIG_NO_HZ */ |
206 | #define RCU_SIGNAL_INIT RCU_FORCE_QS | 245 | #define RCU_SIGNAL_INIT RCU_SAVE_COMPLETED |
207 | #endif /* #else #ifdef CONFIG_NO_HZ */ | 246 | #endif /* #else #ifdef CONFIG_NO_HZ */ |
208 | 247 | ||
209 | #define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ | 248 | #define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ |
@@ -241,7 +280,7 @@ struct rcu_state { | |||
241 | long gpnum; /* Current gp number. */ | 280 | long gpnum; /* Current gp number. */ |
242 | long completed; /* # of last completed gp. */ | 281 | long completed; /* # of last completed gp. */ |
243 | 282 | ||
244 | /* End of fields guarded by root rcu_node's lock. */ | 283 | /* End of fields guarded by root rcu_node's lock. */ |
245 | 284 | ||
246 | spinlock_t onofflock; /* exclude on/offline and */ | 285 | spinlock_t onofflock; /* exclude on/offline and */ |
247 | /* starting new GP. Also */ | 286 | /* starting new GP. Also */ |
@@ -255,6 +294,8 @@ struct rcu_state { | |||
255 | long orphan_qlen; /* Number of orphaned cbs. */ | 294 | long orphan_qlen; /* Number of orphaned cbs. */ |
256 | spinlock_t fqslock; /* Only one task forcing */ | 295 | spinlock_t fqslock; /* Only one task forcing */ |
257 | /* quiescent states. */ | 296 | /* quiescent states. */ |
297 | long completed_fqs; /* Value of completed @ snap. */ | ||
298 | /* Protected by fqslock. */ | ||
258 | unsigned long jiffies_force_qs; /* Time at which to invoke */ | 299 | unsigned long jiffies_force_qs; /* Time at which to invoke */ |
259 | /* force_quiescent_state(). */ | 300 | /* force_quiescent_state(). */ |
260 | unsigned long n_force_qs; /* Number of calls to */ | 301 | unsigned long n_force_qs; /* Number of calls to */ |
@@ -269,11 +310,15 @@ struct rcu_state { | |||
269 | unsigned long jiffies_stall; /* Time at which to check */ | 310 | unsigned long jiffies_stall; /* Time at which to check */ |
270 | /* for CPU stalls. */ | 311 | /* for CPU stalls. */ |
271 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 312 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
272 | #ifdef CONFIG_NO_HZ | ||
273 | long dynticks_completed; /* Value of completed @ snap. */ | ||
274 | #endif /* #ifdef CONFIG_NO_HZ */ | ||
275 | }; | 313 | }; |
276 | 314 | ||
315 | /* Return values for rcu_preempt_offline_tasks(). */ | ||
316 | |||
317 | #define RCU_OFL_TASKS_NORM_GP 0x1 /* Tasks blocking normal */ | ||
318 | /* GP were moved to root. */ | ||
319 | #define RCU_OFL_TASKS_EXP_GP 0x2 /* Tasks blocking expedited */ | ||
320 | /* GP were moved to root. */ | ||
321 | |||
277 | #ifdef RCU_TREE_NONCORE | 322 | #ifdef RCU_TREE_NONCORE |
278 | 323 | ||
279 | /* | 324 | /* |
@@ -293,23 +338,30 @@ DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data); | |||
293 | #else /* #ifdef RCU_TREE_NONCORE */ | 338 | #else /* #ifdef RCU_TREE_NONCORE */ |
294 | 339 | ||
295 | /* Forward declarations for rcutree_plugin.h */ | 340 | /* Forward declarations for rcutree_plugin.h */ |
296 | static inline void rcu_bootup_announce(void); | 341 | static void rcu_bootup_announce(void); |
297 | long rcu_batches_completed(void); | 342 | long rcu_batches_completed(void); |
298 | static void rcu_preempt_note_context_switch(int cpu); | 343 | static void rcu_preempt_note_context_switch(int cpu); |
299 | static int rcu_preempted_readers(struct rcu_node *rnp); | 344 | static int rcu_preempted_readers(struct rcu_node *rnp); |
345 | #ifdef CONFIG_HOTPLUG_CPU | ||
346 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, | ||
347 | unsigned long flags); | ||
348 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
300 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 349 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR |
301 | static void rcu_print_task_stall(struct rcu_node *rnp); | 350 | static void rcu_print_task_stall(struct rcu_node *rnp); |
302 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 351 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
303 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); | 352 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); |
304 | #ifdef CONFIG_HOTPLUG_CPU | 353 | #ifdef CONFIG_HOTPLUG_CPU |
305 | static void rcu_preempt_offline_tasks(struct rcu_state *rsp, | 354 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
306 | struct rcu_node *rnp, | 355 | struct rcu_node *rnp, |
307 | struct rcu_data *rdp); | 356 | struct rcu_data *rdp); |
308 | static void rcu_preempt_offline_cpu(int cpu); | 357 | static void rcu_preempt_offline_cpu(int cpu); |
309 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 358 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
310 | static void rcu_preempt_check_callbacks(int cpu); | 359 | static void rcu_preempt_check_callbacks(int cpu); |
311 | static void rcu_preempt_process_callbacks(void); | 360 | static void rcu_preempt_process_callbacks(void); |
312 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); | 361 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); |
362 | #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) | ||
363 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp); | ||
364 | #endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */ | ||
313 | static int rcu_preempt_pending(int cpu); | 365 | static int rcu_preempt_pending(int cpu); |
314 | static int rcu_preempt_needs_cpu(int cpu); | 366 | static int rcu_preempt_needs_cpu(int cpu); |
315 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu); | 367 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu); |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index c0cb783aa16a..37fbccdf41d5 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -24,16 +24,19 @@ | |||
24 | * Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 24 | * Paul E. McKenney <paulmck@linux.vnet.ibm.com> |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/delay.h> | ||
27 | 28 | ||
28 | #ifdef CONFIG_TREE_PREEMPT_RCU | 29 | #ifdef CONFIG_TREE_PREEMPT_RCU |
29 | 30 | ||
30 | struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); | 31 | struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); |
31 | DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); | 32 | DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); |
32 | 33 | ||
34 | static int rcu_preempted_readers_exp(struct rcu_node *rnp); | ||
35 | |||
33 | /* | 36 | /* |
34 | * Tell them what RCU they are running. | 37 | * Tell them what RCU they are running. |
35 | */ | 38 | */ |
36 | static inline void rcu_bootup_announce(void) | 39 | static void __init rcu_bootup_announce(void) |
37 | { | 40 | { |
38 | printk(KERN_INFO | 41 | printk(KERN_INFO |
39 | "Experimental preemptable hierarchical RCU implementation.\n"); | 42 | "Experimental preemptable hierarchical RCU implementation.\n"); |
@@ -67,7 +70,7 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed); | |||
67 | static void rcu_preempt_qs(int cpu) | 70 | static void rcu_preempt_qs(int cpu) |
68 | { | 71 | { |
69 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); | 72 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); |
70 | rdp->passed_quiesc_completed = rdp->completed; | 73 | rdp->passed_quiesc_completed = rdp->gpnum - 1; |
71 | barrier(); | 74 | barrier(); |
72 | rdp->passed_quiesc = 1; | 75 | rdp->passed_quiesc = 1; |
73 | } | 76 | } |
@@ -157,14 +160,58 @@ EXPORT_SYMBOL_GPL(__rcu_read_lock); | |||
157 | */ | 160 | */ |
158 | static int rcu_preempted_readers(struct rcu_node *rnp) | 161 | static int rcu_preempted_readers(struct rcu_node *rnp) |
159 | { | 162 | { |
160 | return !list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]); | 163 | int phase = rnp->gpnum & 0x1; |
164 | |||
165 | return !list_empty(&rnp->blocked_tasks[phase]) || | ||
166 | !list_empty(&rnp->blocked_tasks[phase + 2]); | ||
167 | } | ||
168 | |||
169 | /* | ||
170 | * Record a quiescent state for all tasks that were previously queued | ||
171 | * on the specified rcu_node structure and that were blocking the current | ||
172 | * RCU grace period. The caller must hold the specified rnp->lock with | ||
173 | * irqs disabled, and this lock is released upon return, but irqs remain | ||
174 | * disabled. | ||
175 | */ | ||
176 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) | ||
177 | __releases(rnp->lock) | ||
178 | { | ||
179 | unsigned long mask; | ||
180 | struct rcu_node *rnp_p; | ||
181 | |||
182 | if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) { | ||
183 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
184 | return; /* Still need more quiescent states! */ | ||
185 | } | ||
186 | |||
187 | rnp_p = rnp->parent; | ||
188 | if (rnp_p == NULL) { | ||
189 | /* | ||
190 | * Either there is only one rcu_node in the tree, | ||
191 | * or tasks were kicked up to root rcu_node due to | ||
192 | * CPUs going offline. | ||
193 | */ | ||
194 | rcu_report_qs_rsp(&rcu_preempt_state, flags); | ||
195 | return; | ||
196 | } | ||
197 | |||
198 | /* Report up the rest of the hierarchy. */ | ||
199 | mask = rnp->grpmask; | ||
200 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
201 | spin_lock(&rnp_p->lock); /* irqs already disabled. */ | ||
202 | rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags); | ||
161 | } | 203 | } |
162 | 204 | ||
205 | /* | ||
206 | * Handle special cases during rcu_read_unlock(), such as needing to | ||
207 | * notify RCU core processing or task having blocked during the RCU | ||
208 | * read-side critical section. | ||
209 | */ | ||
163 | static void rcu_read_unlock_special(struct task_struct *t) | 210 | static void rcu_read_unlock_special(struct task_struct *t) |
164 | { | 211 | { |
165 | int empty; | 212 | int empty; |
213 | int empty_exp; | ||
166 | unsigned long flags; | 214 | unsigned long flags; |
167 | unsigned long mask; | ||
168 | struct rcu_node *rnp; | 215 | struct rcu_node *rnp; |
169 | int special; | 216 | int special; |
170 | 217 | ||
@@ -207,36 +254,30 @@ static void rcu_read_unlock_special(struct task_struct *t) | |||
207 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 254 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
208 | } | 255 | } |
209 | empty = !rcu_preempted_readers(rnp); | 256 | empty = !rcu_preempted_readers(rnp); |
257 | empty_exp = !rcu_preempted_readers_exp(rnp); | ||
258 | smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ | ||
210 | list_del_init(&t->rcu_node_entry); | 259 | list_del_init(&t->rcu_node_entry); |
211 | t->rcu_blocked_node = NULL; | 260 | t->rcu_blocked_node = NULL; |
212 | 261 | ||
213 | /* | 262 | /* |
214 | * If this was the last task on the current list, and if | 263 | * If this was the last task on the current list, and if |
215 | * we aren't waiting on any CPUs, report the quiescent state. | 264 | * we aren't waiting on any CPUs, report the quiescent state. |
216 | * Note that both cpu_quiet_msk_finish() and cpu_quiet_msk() | 265 | * Note that rcu_report_unblock_qs_rnp() releases rnp->lock. |
217 | * drop rnp->lock and restore irq. | ||
218 | */ | 266 | */ |
219 | if (!empty && rnp->qsmask == 0 && | 267 | if (empty) |
220 | !rcu_preempted_readers(rnp)) { | ||
221 | struct rcu_node *rnp_p; | ||
222 | |||
223 | if (rnp->parent == NULL) { | ||
224 | /* Only one rcu_node in the tree. */ | ||
225 | cpu_quiet_msk_finish(&rcu_preempt_state, flags); | ||
226 | return; | ||
227 | } | ||
228 | /* Report up the rest of the hierarchy. */ | ||
229 | mask = rnp->grpmask; | ||
230 | spin_unlock_irqrestore(&rnp->lock, flags); | 268 | spin_unlock_irqrestore(&rnp->lock, flags); |
231 | rnp_p = rnp->parent; | 269 | else |
232 | spin_lock_irqsave(&rnp_p->lock, flags); | 270 | rcu_report_unblock_qs_rnp(rnp, flags); |
233 | WARN_ON_ONCE(rnp->qsmask); | 271 | |
234 | cpu_quiet_msk(mask, &rcu_preempt_state, rnp_p, flags); | 272 | /* |
235 | return; | 273 | * If this was the last task on the expedited lists, |
236 | } | 274 | * then we need to report up the rcu_node hierarchy. |
237 | spin_unlock(&rnp->lock); | 275 | */ |
276 | if (!empty_exp && !rcu_preempted_readers_exp(rnp)) | ||
277 | rcu_report_exp_rnp(&rcu_preempt_state, rnp); | ||
278 | } else { | ||
279 | local_irq_restore(flags); | ||
238 | } | 280 | } |
239 | local_irq_restore(flags); | ||
240 | } | 281 | } |
241 | 282 | ||
242 | /* | 283 | /* |
@@ -303,26 +344,34 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | |||
303 | * rcu_node. The reason for not just moving them to the immediate | 344 | * rcu_node. The reason for not just moving them to the immediate |
304 | * parent is to remove the need for rcu_read_unlock_special() to | 345 | * parent is to remove the need for rcu_read_unlock_special() to |
305 | * make more than two attempts to acquire the target rcu_node's lock. | 346 | * make more than two attempts to acquire the target rcu_node's lock. |
347 | * Returns true if there were tasks blocking the current RCU grace | ||
348 | * period. | ||
349 | * | ||
350 | * Returns 1 if there was previously a task blocking the current grace | ||
351 | * period on the specified rcu_node structure. | ||
306 | * | 352 | * |
307 | * The caller must hold rnp->lock with irqs disabled. | 353 | * The caller must hold rnp->lock with irqs disabled. |
308 | */ | 354 | */ |
309 | static void rcu_preempt_offline_tasks(struct rcu_state *rsp, | 355 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
310 | struct rcu_node *rnp, | 356 | struct rcu_node *rnp, |
311 | struct rcu_data *rdp) | 357 | struct rcu_data *rdp) |
312 | { | 358 | { |
313 | int i; | 359 | int i; |
314 | struct list_head *lp; | 360 | struct list_head *lp; |
315 | struct list_head *lp_root; | 361 | struct list_head *lp_root; |
362 | int retval = 0; | ||
316 | struct rcu_node *rnp_root = rcu_get_root(rsp); | 363 | struct rcu_node *rnp_root = rcu_get_root(rsp); |
317 | struct task_struct *tp; | 364 | struct task_struct *tp; |
318 | 365 | ||
319 | if (rnp == rnp_root) { | 366 | if (rnp == rnp_root) { |
320 | WARN_ONCE(1, "Last CPU thought to be offlined?"); | 367 | WARN_ONCE(1, "Last CPU thought to be offlined?"); |
321 | return; /* Shouldn't happen: at least one CPU online. */ | 368 | return 0; /* Shouldn't happen: at least one CPU online. */ |
322 | } | 369 | } |
323 | WARN_ON_ONCE(rnp != rdp->mynode && | 370 | WARN_ON_ONCE(rnp != rdp->mynode && |
324 | (!list_empty(&rnp->blocked_tasks[0]) || | 371 | (!list_empty(&rnp->blocked_tasks[0]) || |
325 | !list_empty(&rnp->blocked_tasks[1]))); | 372 | !list_empty(&rnp->blocked_tasks[1]) || |
373 | !list_empty(&rnp->blocked_tasks[2]) || | ||
374 | !list_empty(&rnp->blocked_tasks[3]))); | ||
326 | 375 | ||
327 | /* | 376 | /* |
328 | * Move tasks up to root rcu_node. Rely on the fact that the | 377 | * Move tasks up to root rcu_node. Rely on the fact that the |
@@ -330,7 +379,11 @@ static void rcu_preempt_offline_tasks(struct rcu_state *rsp, | |||
330 | * rcu_nodes in terms of gp_num value. This fact allows us to | 379 | * rcu_nodes in terms of gp_num value. This fact allows us to |
331 | * move the blocked_tasks[] array directly, element by element. | 380 | * move the blocked_tasks[] array directly, element by element. |
332 | */ | 381 | */ |
333 | for (i = 0; i < 2; i++) { | 382 | if (rcu_preempted_readers(rnp)) |
383 | retval |= RCU_OFL_TASKS_NORM_GP; | ||
384 | if (rcu_preempted_readers_exp(rnp)) | ||
385 | retval |= RCU_OFL_TASKS_EXP_GP; | ||
386 | for (i = 0; i < 4; i++) { | ||
334 | lp = &rnp->blocked_tasks[i]; | 387 | lp = &rnp->blocked_tasks[i]; |
335 | lp_root = &rnp_root->blocked_tasks[i]; | 388 | lp_root = &rnp_root->blocked_tasks[i]; |
336 | while (!list_empty(lp)) { | 389 | while (!list_empty(lp)) { |
@@ -342,6 +395,7 @@ static void rcu_preempt_offline_tasks(struct rcu_state *rsp, | |||
342 | spin_unlock(&rnp_root->lock); /* irqs remain disabled */ | 395 | spin_unlock(&rnp_root->lock); /* irqs remain disabled */ |
343 | } | 396 | } |
344 | } | 397 | } |
398 | return retval; | ||
345 | } | 399 | } |
346 | 400 | ||
347 | /* | 401 | /* |
@@ -392,6 +446,186 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |||
392 | } | 446 | } |
393 | EXPORT_SYMBOL_GPL(call_rcu); | 447 | EXPORT_SYMBOL_GPL(call_rcu); |
394 | 448 | ||
449 | /** | ||
450 | * synchronize_rcu - wait until a grace period has elapsed. | ||
451 | * | ||
452 | * Control will return to the caller some time after a full grace | ||
453 | * period has elapsed, in other words after all currently executing RCU | ||
454 | * read-side critical sections have completed. RCU read-side critical | ||
455 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | ||
456 | * and may be nested. | ||
457 | */ | ||
458 | void synchronize_rcu(void) | ||
459 | { | ||
460 | struct rcu_synchronize rcu; | ||
461 | |||
462 | if (!rcu_scheduler_active) | ||
463 | return; | ||
464 | |||
465 | init_completion(&rcu.completion); | ||
466 | /* Will wake me after RCU finished. */ | ||
467 | call_rcu(&rcu.head, wakeme_after_rcu); | ||
468 | /* Wait for it. */ | ||
469 | wait_for_completion(&rcu.completion); | ||
470 | } | ||
471 | EXPORT_SYMBOL_GPL(synchronize_rcu); | ||
472 | |||
473 | static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq); | ||
474 | static long sync_rcu_preempt_exp_count; | ||
475 | static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex); | ||
476 | |||
477 | /* | ||
478 | * Return non-zero if there are any tasks in RCU read-side critical | ||
479 | * sections blocking the current preemptible-RCU expedited grace period. | ||
480 | * If there is no preemptible-RCU expedited grace period currently in | ||
481 | * progress, returns zero unconditionally. | ||
482 | */ | ||
483 | static int rcu_preempted_readers_exp(struct rcu_node *rnp) | ||
484 | { | ||
485 | return !list_empty(&rnp->blocked_tasks[2]) || | ||
486 | !list_empty(&rnp->blocked_tasks[3]); | ||
487 | } | ||
488 | |||
489 | /* | ||
490 | * return non-zero if there is no RCU expedited grace period in progress | ||
491 | * for the specified rcu_node structure, in other words, if all CPUs and | ||
492 | * tasks covered by the specified rcu_node structure have done their bit | ||
493 | * for the current expedited grace period. Works only for preemptible | ||
494 | * RCU -- other RCU implementation use other means. | ||
495 | * | ||
496 | * Caller must hold sync_rcu_preempt_exp_mutex. | ||
497 | */ | ||
498 | static int sync_rcu_preempt_exp_done(struct rcu_node *rnp) | ||
499 | { | ||
500 | return !rcu_preempted_readers_exp(rnp) && | ||
501 | ACCESS_ONCE(rnp->expmask) == 0; | ||
502 | } | ||
503 | |||
504 | /* | ||
505 | * Report the exit from RCU read-side critical section for the last task | ||
506 | * that queued itself during or before the current expedited preemptible-RCU | ||
507 | * grace period. This event is reported either to the rcu_node structure on | ||
508 | * which the task was queued or to one of that rcu_node structure's ancestors, | ||
509 | * recursively up the tree. (Calm down, calm down, we do the recursion | ||
510 | * iteratively!) | ||
511 | * | ||
512 | * Caller must hold sync_rcu_preempt_exp_mutex. | ||
513 | */ | ||
514 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) | ||
515 | { | ||
516 | unsigned long flags; | ||
517 | unsigned long mask; | ||
518 | |||
519 | spin_lock_irqsave(&rnp->lock, flags); | ||
520 | for (;;) { | ||
521 | if (!sync_rcu_preempt_exp_done(rnp)) | ||
522 | break; | ||
523 | if (rnp->parent == NULL) { | ||
524 | wake_up(&sync_rcu_preempt_exp_wq); | ||
525 | break; | ||
526 | } | ||
527 | mask = rnp->grpmask; | ||
528 | spin_unlock(&rnp->lock); /* irqs remain disabled */ | ||
529 | rnp = rnp->parent; | ||
530 | spin_lock(&rnp->lock); /* irqs already disabled */ | ||
531 | rnp->expmask &= ~mask; | ||
532 | } | ||
533 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
534 | } | ||
535 | |||
536 | /* | ||
537 | * Snapshot the tasks blocking the newly started preemptible-RCU expedited | ||
538 | * grace period for the specified rcu_node structure. If there are no such | ||
539 | * tasks, report it up the rcu_node hierarchy. | ||
540 | * | ||
541 | * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock. | ||
542 | */ | ||
543 | static void | ||
544 | sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) | ||
545 | { | ||
546 | int must_wait; | ||
547 | |||
548 | spin_lock(&rnp->lock); /* irqs already disabled */ | ||
549 | list_splice_init(&rnp->blocked_tasks[0], &rnp->blocked_tasks[2]); | ||
550 | list_splice_init(&rnp->blocked_tasks[1], &rnp->blocked_tasks[3]); | ||
551 | must_wait = rcu_preempted_readers_exp(rnp); | ||
552 | spin_unlock(&rnp->lock); /* irqs remain disabled */ | ||
553 | if (!must_wait) | ||
554 | rcu_report_exp_rnp(rsp, rnp); | ||
555 | } | ||
556 | |||
557 | /* | ||
558 | * Wait for an rcu-preempt grace period, but expedite it. The basic idea | ||
559 | * is to invoke synchronize_sched_expedited() to push all the tasks to | ||
560 | * the ->blocked_tasks[] lists, move all entries from the first set of | ||
561 | * ->blocked_tasks[] lists to the second set, and finally wait for this | ||
562 | * second set to drain. | ||
563 | */ | ||
564 | void synchronize_rcu_expedited(void) | ||
565 | { | ||
566 | unsigned long flags; | ||
567 | struct rcu_node *rnp; | ||
568 | struct rcu_state *rsp = &rcu_preempt_state; | ||
569 | long snap; | ||
570 | int trycount = 0; | ||
571 | |||
572 | smp_mb(); /* Caller's modifications seen first by other CPUs. */ | ||
573 | snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1; | ||
574 | smp_mb(); /* Above access cannot bleed into critical section. */ | ||
575 | |||
576 | /* | ||
577 | * Acquire lock, falling back to synchronize_rcu() if too many | ||
578 | * lock-acquisition failures. Of course, if someone does the | ||
579 | * expedited grace period for us, just leave. | ||
580 | */ | ||
581 | while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) { | ||
582 | if (trycount++ < 10) | ||
583 | udelay(trycount * num_online_cpus()); | ||
584 | else { | ||
585 | synchronize_rcu(); | ||
586 | return; | ||
587 | } | ||
588 | if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) | ||
589 | goto mb_ret; /* Others did our work for us. */ | ||
590 | } | ||
591 | if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) | ||
592 | goto unlock_mb_ret; /* Others did our work for us. */ | ||
593 | |||
594 | /* force all RCU readers onto blocked_tasks[]. */ | ||
595 | synchronize_sched_expedited(); | ||
596 | |||
597 | spin_lock_irqsave(&rsp->onofflock, flags); | ||
598 | |||
599 | /* Initialize ->expmask for all non-leaf rcu_node structures. */ | ||
600 | rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) { | ||
601 | spin_lock(&rnp->lock); /* irqs already disabled. */ | ||
602 | rnp->expmask = rnp->qsmaskinit; | ||
603 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
604 | } | ||
605 | |||
606 | /* Snapshot current state of ->blocked_tasks[] lists. */ | ||
607 | rcu_for_each_leaf_node(rsp, rnp) | ||
608 | sync_rcu_preempt_exp_init(rsp, rnp); | ||
609 | if (NUM_RCU_NODES > 1) | ||
610 | sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp)); | ||
611 | |||
612 | spin_unlock_irqrestore(&rsp->onofflock, flags); | ||
613 | |||
614 | /* Wait for snapshotted ->blocked_tasks[] lists to drain. */ | ||
615 | rnp = rcu_get_root(rsp); | ||
616 | wait_event(sync_rcu_preempt_exp_wq, | ||
617 | sync_rcu_preempt_exp_done(rnp)); | ||
618 | |||
619 | /* Clean up and exit. */ | ||
620 | smp_mb(); /* ensure expedited GP seen before counter increment. */ | ||
621 | ACCESS_ONCE(sync_rcu_preempt_exp_count)++; | ||
622 | unlock_mb_ret: | ||
623 | mutex_unlock(&sync_rcu_preempt_exp_mutex); | ||
624 | mb_ret: | ||
625 | smp_mb(); /* ensure subsequent action seen after grace period. */ | ||
626 | } | ||
627 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | ||
628 | |||
395 | /* | 629 | /* |
396 | * Check to see if there is any immediate preemptable-RCU-related work | 630 | * Check to see if there is any immediate preemptable-RCU-related work |
397 | * to be done. | 631 | * to be done. |
@@ -464,7 +698,7 @@ void exit_rcu(void) | |||
464 | /* | 698 | /* |
465 | * Tell them what RCU they are running. | 699 | * Tell them what RCU they are running. |
466 | */ | 700 | */ |
467 | static inline void rcu_bootup_announce(void) | 701 | static void __init rcu_bootup_announce(void) |
468 | { | 702 | { |
469 | printk(KERN_INFO "Hierarchical RCU implementation.\n"); | 703 | printk(KERN_INFO "Hierarchical RCU implementation.\n"); |
470 | } | 704 | } |
@@ -495,6 +729,16 @@ static int rcu_preempted_readers(struct rcu_node *rnp) | |||
495 | return 0; | 729 | return 0; |
496 | } | 730 | } |
497 | 731 | ||
732 | #ifdef CONFIG_HOTPLUG_CPU | ||
733 | |||
734 | /* Because preemptible RCU does not exist, no quieting of tasks. */ | ||
735 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) | ||
736 | { | ||
737 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
738 | } | ||
739 | |||
740 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
741 | |||
498 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 742 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR |
499 | 743 | ||
500 | /* | 744 | /* |
@@ -521,12 +765,15 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | |||
521 | 765 | ||
522 | /* | 766 | /* |
523 | * Because preemptable RCU does not exist, it never needs to migrate | 767 | * Because preemptable RCU does not exist, it never needs to migrate |
524 | * tasks that were blocked within RCU read-side critical sections. | 768 | * tasks that were blocked within RCU read-side critical sections, and |
769 | * such non-existent tasks cannot possibly have been blocking the current | ||
770 | * grace period. | ||
525 | */ | 771 | */ |
526 | static void rcu_preempt_offline_tasks(struct rcu_state *rsp, | 772 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, |
527 | struct rcu_node *rnp, | 773 | struct rcu_node *rnp, |
528 | struct rcu_data *rdp) | 774 | struct rcu_data *rdp) |
529 | { | 775 | { |
776 | return 0; | ||
530 | } | 777 | } |
531 | 778 | ||
532 | /* | 779 | /* |
@@ -565,6 +812,30 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |||
565 | EXPORT_SYMBOL_GPL(call_rcu); | 812 | EXPORT_SYMBOL_GPL(call_rcu); |
566 | 813 | ||
567 | /* | 814 | /* |
815 | * Wait for an rcu-preempt grace period, but make it happen quickly. | ||
816 | * But because preemptable RCU does not exist, map to rcu-sched. | ||
817 | */ | ||
818 | void synchronize_rcu_expedited(void) | ||
819 | { | ||
820 | synchronize_sched_expedited(); | ||
821 | } | ||
822 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | ||
823 | |||
824 | #ifdef CONFIG_HOTPLUG_CPU | ||
825 | |||
826 | /* | ||
827 | * Because preemptable RCU does not exist, there is never any need to | ||
828 | * report on tasks preempted in RCU read-side critical sections during | ||
829 | * expedited RCU grace periods. | ||
830 | */ | ||
831 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) | ||
832 | { | ||
833 | return; | ||
834 | } | ||
835 | |||
836 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
837 | |||
838 | /* | ||
568 | * Because preemptable RCU does not exist, it never has any work to do. | 839 | * Because preemptable RCU does not exist, it never has any work to do. |
569 | */ | 840 | */ |
570 | static int rcu_preempt_pending(int cpu) | 841 | static int rcu_preempt_pending(int cpu) |
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c index 4b31c779e62e..9d2c88423b31 100644 --- a/kernel/rcutree_trace.c +++ b/kernel/rcutree_trace.c | |||
@@ -155,12 +155,15 @@ static const struct file_operations rcudata_csv_fops = { | |||
155 | 155 | ||
156 | static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp) | 156 | static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp) |
157 | { | 157 | { |
158 | long gpnum; | ||
158 | int level = 0; | 159 | int level = 0; |
160 | int phase; | ||
159 | struct rcu_node *rnp; | 161 | struct rcu_node *rnp; |
160 | 162 | ||
163 | gpnum = rsp->gpnum; | ||
161 | seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x " | 164 | seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x " |
162 | "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld\n", | 165 | "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld\n", |
163 | rsp->completed, rsp->gpnum, rsp->signaled, | 166 | rsp->completed, gpnum, rsp->signaled, |
164 | (long)(rsp->jiffies_force_qs - jiffies), | 167 | (long)(rsp->jiffies_force_qs - jiffies), |
165 | (int)(jiffies & 0xffff), | 168 | (int)(jiffies & 0xffff), |
166 | rsp->n_force_qs, rsp->n_force_qs_ngp, | 169 | rsp->n_force_qs, rsp->n_force_qs_ngp, |
@@ -171,8 +174,13 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp) | |||
171 | seq_puts(m, "\n"); | 174 | seq_puts(m, "\n"); |
172 | level = rnp->level; | 175 | level = rnp->level; |
173 | } | 176 | } |
174 | seq_printf(m, "%lx/%lx %d:%d ^%d ", | 177 | phase = gpnum & 0x1; |
178 | seq_printf(m, "%lx/%lx %c%c>%c%c %d:%d ^%d ", | ||
175 | rnp->qsmask, rnp->qsmaskinit, | 179 | rnp->qsmask, rnp->qsmaskinit, |
180 | "T."[list_empty(&rnp->blocked_tasks[phase])], | ||
181 | "E."[list_empty(&rnp->blocked_tasks[phase + 2])], | ||
182 | "T."[list_empty(&rnp->blocked_tasks[!phase])], | ||
183 | "E."[list_empty(&rnp->blocked_tasks[!phase + 2])], | ||
176 | rnp->grplo, rnp->grphi, rnp->grpnum); | 184 | rnp->grplo, rnp->grphi, rnp->grpnum); |
177 | } | 185 | } |
178 | seq_puts(m, "\n"); | 186 | seq_puts(m, "\n"); |
diff --git a/kernel/sched.c b/kernel/sched.c index e88689522e66..6ae2739b8f19 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -309,6 +309,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq); | |||
309 | */ | 309 | */ |
310 | static DEFINE_SPINLOCK(task_group_lock); | 310 | static DEFINE_SPINLOCK(task_group_lock); |
311 | 311 | ||
312 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
313 | |||
312 | #ifdef CONFIG_SMP | 314 | #ifdef CONFIG_SMP |
313 | static int root_task_group_empty(void) | 315 | static int root_task_group_empty(void) |
314 | { | 316 | { |
@@ -316,7 +318,6 @@ static int root_task_group_empty(void) | |||
316 | } | 318 | } |
317 | #endif | 319 | #endif |
318 | 320 | ||
319 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
320 | #ifdef CONFIG_USER_SCHED | 321 | #ifdef CONFIG_USER_SCHED |
321 | # define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) | 322 | # define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) |
322 | #else /* !CONFIG_USER_SCHED */ | 323 | #else /* !CONFIG_USER_SCHED */ |
@@ -1564,11 +1565,7 @@ static unsigned long cpu_avg_load_per_task(int cpu) | |||
1564 | 1565 | ||
1565 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1566 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1566 | 1567 | ||
1567 | struct update_shares_data { | 1568 | static __read_mostly unsigned long *update_shares_data; |
1568 | unsigned long rq_weight[NR_CPUS]; | ||
1569 | }; | ||
1570 | |||
1571 | static DEFINE_PER_CPU(struct update_shares_data, update_shares_data); | ||
1572 | 1569 | ||
1573 | static void __set_se_shares(struct sched_entity *se, unsigned long shares); | 1570 | static void __set_se_shares(struct sched_entity *se, unsigned long shares); |
1574 | 1571 | ||
@@ -1578,12 +1575,12 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares); | |||
1578 | static void update_group_shares_cpu(struct task_group *tg, int cpu, | 1575 | static void update_group_shares_cpu(struct task_group *tg, int cpu, |
1579 | unsigned long sd_shares, | 1576 | unsigned long sd_shares, |
1580 | unsigned long sd_rq_weight, | 1577 | unsigned long sd_rq_weight, |
1581 | struct update_shares_data *usd) | 1578 | unsigned long *usd_rq_weight) |
1582 | { | 1579 | { |
1583 | unsigned long shares, rq_weight; | 1580 | unsigned long shares, rq_weight; |
1584 | int boost = 0; | 1581 | int boost = 0; |
1585 | 1582 | ||
1586 | rq_weight = usd->rq_weight[cpu]; | 1583 | rq_weight = usd_rq_weight[cpu]; |
1587 | if (!rq_weight) { | 1584 | if (!rq_weight) { |
1588 | boost = 1; | 1585 | boost = 1; |
1589 | rq_weight = NICE_0_LOAD; | 1586 | rq_weight = NICE_0_LOAD; |
@@ -1618,7 +1615,7 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu, | |||
1618 | static int tg_shares_up(struct task_group *tg, void *data) | 1615 | static int tg_shares_up(struct task_group *tg, void *data) |
1619 | { | 1616 | { |
1620 | unsigned long weight, rq_weight = 0, shares = 0; | 1617 | unsigned long weight, rq_weight = 0, shares = 0; |
1621 | struct update_shares_data *usd; | 1618 | unsigned long *usd_rq_weight; |
1622 | struct sched_domain *sd = data; | 1619 | struct sched_domain *sd = data; |
1623 | unsigned long flags; | 1620 | unsigned long flags; |
1624 | int i; | 1621 | int i; |
@@ -1627,11 +1624,11 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
1627 | return 0; | 1624 | return 0; |
1628 | 1625 | ||
1629 | local_irq_save(flags); | 1626 | local_irq_save(flags); |
1630 | usd = &__get_cpu_var(update_shares_data); | 1627 | usd_rq_weight = per_cpu_ptr(update_shares_data, smp_processor_id()); |
1631 | 1628 | ||
1632 | for_each_cpu(i, sched_domain_span(sd)) { | 1629 | for_each_cpu(i, sched_domain_span(sd)) { |
1633 | weight = tg->cfs_rq[i]->load.weight; | 1630 | weight = tg->cfs_rq[i]->load.weight; |
1634 | usd->rq_weight[i] = weight; | 1631 | usd_rq_weight[i] = weight; |
1635 | 1632 | ||
1636 | /* | 1633 | /* |
1637 | * If there are currently no tasks on the cpu pretend there | 1634 | * If there are currently no tasks on the cpu pretend there |
@@ -1652,7 +1649,7 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
1652 | shares = tg->shares; | 1649 | shares = tg->shares; |
1653 | 1650 | ||
1654 | for_each_cpu(i, sched_domain_span(sd)) | 1651 | for_each_cpu(i, sched_domain_span(sd)) |
1655 | update_group_shares_cpu(tg, i, shares, rq_weight, usd); | 1652 | update_group_shares_cpu(tg, i, shares, rq_weight, usd_rq_weight); |
1656 | 1653 | ||
1657 | local_irq_restore(flags); | 1654 | local_irq_restore(flags); |
1658 | 1655 | ||
@@ -1996,6 +1993,38 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p, | |||
1996 | p->sched_class->prio_changed(rq, p, oldprio, running); | 1993 | p->sched_class->prio_changed(rq, p, oldprio, running); |
1997 | } | 1994 | } |
1998 | 1995 | ||
1996 | /** | ||
1997 | * kthread_bind - bind a just-created kthread to a cpu. | ||
1998 | * @p: thread created by kthread_create(). | ||
1999 | * @cpu: cpu (might not be online, must be possible) for @k to run on. | ||
2000 | * | ||
2001 | * Description: This function is equivalent to set_cpus_allowed(), | ||
2002 | * except that @cpu doesn't need to be online, and the thread must be | ||
2003 | * stopped (i.e., just returned from kthread_create()). | ||
2004 | * | ||
2005 | * Function lives here instead of kthread.c because it messes with | ||
2006 | * scheduler internals which require locking. | ||
2007 | */ | ||
2008 | void kthread_bind(struct task_struct *p, unsigned int cpu) | ||
2009 | { | ||
2010 | struct rq *rq = cpu_rq(cpu); | ||
2011 | unsigned long flags; | ||
2012 | |||
2013 | /* Must have done schedule() in kthread() before we set_task_cpu */ | ||
2014 | if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) { | ||
2015 | WARN_ON(1); | ||
2016 | return; | ||
2017 | } | ||
2018 | |||
2019 | spin_lock_irqsave(&rq->lock, flags); | ||
2020 | set_task_cpu(p, cpu); | ||
2021 | p->cpus_allowed = cpumask_of_cpu(cpu); | ||
2022 | p->rt.nr_cpus_allowed = 1; | ||
2023 | p->flags |= PF_THREAD_BOUND; | ||
2024 | spin_unlock_irqrestore(&rq->lock, flags); | ||
2025 | } | ||
2026 | EXPORT_SYMBOL(kthread_bind); | ||
2027 | |||
1999 | #ifdef CONFIG_SMP | 2028 | #ifdef CONFIG_SMP |
2000 | /* | 2029 | /* |
2001 | * Is this task likely cache-hot: | 2030 | * Is this task likely cache-hot: |
@@ -2008,7 +2037,7 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) | |||
2008 | /* | 2037 | /* |
2009 | * Buddy candidates are cache hot: | 2038 | * Buddy candidates are cache hot: |
2010 | */ | 2039 | */ |
2011 | if (sched_feat(CACHE_HOT_BUDDY) && | 2040 | if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running && |
2012 | (&p->se == cfs_rq_of(&p->se)->next || | 2041 | (&p->se == cfs_rq_of(&p->se)->next || |
2013 | &p->se == cfs_rq_of(&p->se)->last)) | 2042 | &p->se == cfs_rq_of(&p->se)->last)) |
2014 | return 1; | 2043 | return 1; |
@@ -5452,7 +5481,7 @@ need_resched_nonpreemptible: | |||
5452 | } | 5481 | } |
5453 | EXPORT_SYMBOL(schedule); | 5482 | EXPORT_SYMBOL(schedule); |
5454 | 5483 | ||
5455 | #ifdef CONFIG_SMP | 5484 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
5456 | /* | 5485 | /* |
5457 | * Look out! "owner" is an entirely speculative pointer | 5486 | * Look out! "owner" is an entirely speculative pointer |
5458 | * access and not reliable. | 5487 | * access and not reliable. |
@@ -9407,6 +9436,10 @@ void __init sched_init(void) | |||
9407 | #endif /* CONFIG_USER_SCHED */ | 9436 | #endif /* CONFIG_USER_SCHED */ |
9408 | #endif /* CONFIG_GROUP_SCHED */ | 9437 | #endif /* CONFIG_GROUP_SCHED */ |
9409 | 9438 | ||
9439 | #if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP | ||
9440 | update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long), | ||
9441 | __alignof__(unsigned long)); | ||
9442 | #endif | ||
9410 | for_each_possible_cpu(i) { | 9443 | for_each_possible_cpu(i) { |
9411 | struct rq *rq; | 9444 | struct rq *rq; |
9412 | 9445 | ||
@@ -9532,13 +9565,13 @@ void __init sched_init(void) | |||
9532 | current->sched_class = &fair_sched_class; | 9565 | current->sched_class = &fair_sched_class; |
9533 | 9566 | ||
9534 | /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ | 9567 | /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ |
9535 | alloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); | 9568 | zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); |
9536 | #ifdef CONFIG_SMP | 9569 | #ifdef CONFIG_SMP |
9537 | #ifdef CONFIG_NO_HZ | 9570 | #ifdef CONFIG_NO_HZ |
9538 | alloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT); | 9571 | zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT); |
9539 | alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT); | 9572 | alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT); |
9540 | #endif | 9573 | #endif |
9541 | alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); | 9574 | zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); |
9542 | #endif /* SMP */ | 9575 | #endif /* SMP */ |
9543 | 9576 | ||
9544 | perf_event_init(); | 9577 | perf_event_init(); |
@@ -10868,6 +10901,7 @@ void synchronize_sched_expedited(void) | |||
10868 | spin_unlock_irqrestore(&rq->lock, flags); | 10901 | spin_unlock_irqrestore(&rq->lock, flags); |
10869 | } | 10902 | } |
10870 | rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; | 10903 | rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; |
10904 | synchronize_sched_expedited_count++; | ||
10871 | mutex_unlock(&rcu_sched_expedited_mutex); | 10905 | mutex_unlock(&rcu_sched_expedited_mutex); |
10872 | put_online_cpus(); | 10906 | put_online_cpus(); |
10873 | if (need_full_sync) | 10907 | if (need_full_sync) |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 4e777b47eeda..37087a7fac22 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -822,6 +822,26 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | |||
822 | * re-elected due to buddy favours. | 822 | * re-elected due to buddy favours. |
823 | */ | 823 | */ |
824 | clear_buddies(cfs_rq, curr); | 824 | clear_buddies(cfs_rq, curr); |
825 | return; | ||
826 | } | ||
827 | |||
828 | /* | ||
829 | * Ensure that a task that missed wakeup preemption by a | ||
830 | * narrow margin doesn't have to wait for a full slice. | ||
831 | * This also mitigates buddy induced latencies under load. | ||
832 | */ | ||
833 | if (!sched_feat(WAKEUP_PREEMPT)) | ||
834 | return; | ||
835 | |||
836 | if (delta_exec < sysctl_sched_min_granularity) | ||
837 | return; | ||
838 | |||
839 | if (cfs_rq->nr_running > 1) { | ||
840 | struct sched_entity *se = __pick_next_entity(cfs_rq); | ||
841 | s64 delta = curr->vruntime - se->vruntime; | ||
842 | |||
843 | if (delta > ideal_runtime) | ||
844 | resched_task(rq_of(cfs_rq)->curr); | ||
825 | } | 845 | } |
826 | } | 846 | } |
827 | 847 | ||
@@ -861,12 +881,18 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); | |||
861 | static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) | 881 | static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) |
862 | { | 882 | { |
863 | struct sched_entity *se = __pick_next_entity(cfs_rq); | 883 | struct sched_entity *se = __pick_next_entity(cfs_rq); |
884 | struct sched_entity *left = se; | ||
864 | 885 | ||
865 | if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, se) < 1) | 886 | if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) |
866 | return cfs_rq->next; | 887 | se = cfs_rq->next; |
867 | 888 | ||
868 | if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, se) < 1) | 889 | /* |
869 | return cfs_rq->last; | 890 | * Prefer last buddy, try to return the CPU to a preempted task. |
891 | */ | ||
892 | if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) | ||
893 | se = cfs_rq->last; | ||
894 | |||
895 | clear_buddies(cfs_rq, se); | ||
870 | 896 | ||
871 | return se; | 897 | return se; |
872 | } | 898 | } |
@@ -1568,6 +1594,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ | |||
1568 | struct sched_entity *se = &curr->se, *pse = &p->se; | 1594 | struct sched_entity *se = &curr->se, *pse = &p->se; |
1569 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | 1595 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); |
1570 | int sync = wake_flags & WF_SYNC; | 1596 | int sync = wake_flags & WF_SYNC; |
1597 | int scale = cfs_rq->nr_running >= sched_nr_latency; | ||
1571 | 1598 | ||
1572 | update_curr(cfs_rq); | 1599 | update_curr(cfs_rq); |
1573 | 1600 | ||
@@ -1582,18 +1609,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ | |||
1582 | if (unlikely(se == pse)) | 1609 | if (unlikely(se == pse)) |
1583 | return; | 1610 | return; |
1584 | 1611 | ||
1585 | /* | 1612 | if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) |
1586 | * Only set the backward buddy when the current task is still on the | ||
1587 | * rq. This can happen when a wakeup gets interleaved with schedule on | ||
1588 | * the ->pre_schedule() or idle_balance() point, either of which can | ||
1589 | * drop the rq lock. | ||
1590 | * | ||
1591 | * Also, during early boot the idle thread is in the fair class, for | ||
1592 | * obvious reasons its a bad idea to schedule back to the idle thread. | ||
1593 | */ | ||
1594 | if (sched_feat(LAST_BUDDY) && likely(se->on_rq && curr != rq->idle)) | ||
1595 | set_last_buddy(se); | ||
1596 | if (sched_feat(NEXT_BUDDY) && !(wake_flags & WF_FORK)) | ||
1597 | set_next_buddy(pse); | 1613 | set_next_buddy(pse); |
1598 | 1614 | ||
1599 | /* | 1615 | /* |
@@ -1639,8 +1655,22 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ | |||
1639 | 1655 | ||
1640 | BUG_ON(!pse); | 1656 | BUG_ON(!pse); |
1641 | 1657 | ||
1642 | if (wakeup_preempt_entity(se, pse) == 1) | 1658 | if (wakeup_preempt_entity(se, pse) == 1) { |
1643 | resched_task(curr); | 1659 | resched_task(curr); |
1660 | /* | ||
1661 | * Only set the backward buddy when the current task is still | ||
1662 | * on the rq. This can happen when a wakeup gets interleaved | ||
1663 | * with schedule on the ->pre_schedule() or idle_balance() | ||
1664 | * point, either of which can * drop the rq lock. | ||
1665 | * | ||
1666 | * Also, during early boot the idle thread is in the fair class, | ||
1667 | * for obvious reasons its a bad idea to schedule back to it. | ||
1668 | */ | ||
1669 | if (unlikely(!se->on_rq || curr == rq->idle)) | ||
1670 | return; | ||
1671 | if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se)) | ||
1672 | set_last_buddy(se); | ||
1673 | } | ||
1644 | } | 1674 | } |
1645 | 1675 | ||
1646 | static struct task_struct *pick_next_task_fair(struct rq *rq) | 1676 | static struct task_struct *pick_next_task_fair(struct rq *rq) |
@@ -1654,16 +1684,6 @@ static struct task_struct *pick_next_task_fair(struct rq *rq) | |||
1654 | 1684 | ||
1655 | do { | 1685 | do { |
1656 | se = pick_next_entity(cfs_rq); | 1686 | se = pick_next_entity(cfs_rq); |
1657 | /* | ||
1658 | * If se was a buddy, clear it so that it will have to earn | ||
1659 | * the favour again. | ||
1660 | * | ||
1661 | * If se was not a buddy, clear the buddies because neither | ||
1662 | * was elegible to run, let them earn it again. | ||
1663 | * | ||
1664 | * IOW. unconditionally clear buddies. | ||
1665 | */ | ||
1666 | __clear_buddies(cfs_rq, NULL); | ||
1667 | set_next_entity(cfs_rq, se); | 1687 | set_next_entity(cfs_rq, se); |
1668 | cfs_rq = group_cfs_rq(se); | 1688 | cfs_rq = group_cfs_rq(se); |
1669 | } while (cfs_rq); | 1689 | } while (cfs_rq); |
diff --git a/kernel/signal.c b/kernel/signal.c index 6705320784fd..fe08008133da 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/ptrace.h> | 22 | #include <linux/ptrace.h> |
23 | #include <linux/signal.h> | 23 | #include <linux/signal.h> |
24 | #include <linux/signalfd.h> | 24 | #include <linux/signalfd.h> |
25 | #include <linux/ratelimit.h> | ||
25 | #include <linux/tracehook.h> | 26 | #include <linux/tracehook.h> |
26 | #include <linux/capability.h> | 27 | #include <linux/capability.h> |
27 | #include <linux/freezer.h> | 28 | #include <linux/freezer.h> |
@@ -41,6 +42,8 @@ | |||
41 | 42 | ||
42 | static struct kmem_cache *sigqueue_cachep; | 43 | static struct kmem_cache *sigqueue_cachep; |
43 | 44 | ||
45 | int print_fatal_signals __read_mostly; | ||
46 | |||
44 | static void __user *sig_handler(struct task_struct *t, int sig) | 47 | static void __user *sig_handler(struct task_struct *t, int sig) |
45 | { | 48 | { |
46 | return t->sighand->action[sig - 1].sa.sa_handler; | 49 | return t->sighand->action[sig - 1].sa.sa_handler; |
@@ -159,7 +162,7 @@ int next_signal(struct sigpending *pending, sigset_t *mask) | |||
159 | { | 162 | { |
160 | unsigned long i, *s, *m, x; | 163 | unsigned long i, *s, *m, x; |
161 | int sig = 0; | 164 | int sig = 0; |
162 | 165 | ||
163 | s = pending->signal.sig; | 166 | s = pending->signal.sig; |
164 | m = mask->sig; | 167 | m = mask->sig; |
165 | switch (_NSIG_WORDS) { | 168 | switch (_NSIG_WORDS) { |
@@ -184,17 +187,31 @@ int next_signal(struct sigpending *pending, sigset_t *mask) | |||
184 | sig = ffz(~x) + 1; | 187 | sig = ffz(~x) + 1; |
185 | break; | 188 | break; |
186 | } | 189 | } |
187 | 190 | ||
188 | return sig; | 191 | return sig; |
189 | } | 192 | } |
190 | 193 | ||
194 | static inline void print_dropped_signal(int sig) | ||
195 | { | ||
196 | static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); | ||
197 | |||
198 | if (!print_fatal_signals) | ||
199 | return; | ||
200 | |||
201 | if (!__ratelimit(&ratelimit_state)) | ||
202 | return; | ||
203 | |||
204 | printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n", | ||
205 | current->comm, current->pid, sig); | ||
206 | } | ||
207 | |||
191 | /* | 208 | /* |
192 | * allocate a new signal queue record | 209 | * allocate a new signal queue record |
193 | * - this may be called without locks if and only if t == current, otherwise an | 210 | * - this may be called without locks if and only if t == current, otherwise an |
194 | * appopriate lock must be held to stop the target task from exiting | 211 | * appopriate lock must be held to stop the target task from exiting |
195 | */ | 212 | */ |
196 | static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, | 213 | static struct sigqueue * |
197 | int override_rlimit) | 214 | __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) |
198 | { | 215 | { |
199 | struct sigqueue *q = NULL; | 216 | struct sigqueue *q = NULL; |
200 | struct user_struct *user; | 217 | struct user_struct *user; |
@@ -207,10 +224,15 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, | |||
207 | */ | 224 | */ |
208 | user = get_uid(__task_cred(t)->user); | 225 | user = get_uid(__task_cred(t)->user); |
209 | atomic_inc(&user->sigpending); | 226 | atomic_inc(&user->sigpending); |
227 | |||
210 | if (override_rlimit || | 228 | if (override_rlimit || |
211 | atomic_read(&user->sigpending) <= | 229 | atomic_read(&user->sigpending) <= |
212 | t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) | 230 | t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) { |
213 | q = kmem_cache_alloc(sigqueue_cachep, flags); | 231 | q = kmem_cache_alloc(sigqueue_cachep, flags); |
232 | } else { | ||
233 | print_dropped_signal(sig); | ||
234 | } | ||
235 | |||
214 | if (unlikely(q == NULL)) { | 236 | if (unlikely(q == NULL)) { |
215 | atomic_dec(&user->sigpending); | 237 | atomic_dec(&user->sigpending); |
216 | free_uid(user); | 238 | free_uid(user); |
@@ -869,7 +891,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, | |||
869 | else | 891 | else |
870 | override_rlimit = 0; | 892 | override_rlimit = 0; |
871 | 893 | ||
872 | q = __sigqueue_alloc(t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE, | 894 | q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE, |
873 | override_rlimit); | 895 | override_rlimit); |
874 | if (q) { | 896 | if (q) { |
875 | list_add_tail(&q->list, &pending->list); | 897 | list_add_tail(&q->list, &pending->list); |
@@ -925,8 +947,6 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t, | |||
925 | return __send_signal(sig, info, t, group, from_ancestor_ns); | 947 | return __send_signal(sig, info, t, group, from_ancestor_ns); |
926 | } | 948 | } |
927 | 949 | ||
928 | int print_fatal_signals; | ||
929 | |||
930 | static void print_fatal_signal(struct pt_regs *regs, int signr) | 950 | static void print_fatal_signal(struct pt_regs *regs, int signr) |
931 | { | 951 | { |
932 | printk("%s/%d: potentially unexpected fatal signal %d.\n", | 952 | printk("%s/%d: potentially unexpected fatal signal %d.\n", |
@@ -1293,19 +1313,19 @@ EXPORT_SYMBOL(kill_pid); | |||
1293 | * These functions support sending signals using preallocated sigqueue | 1313 | * These functions support sending signals using preallocated sigqueue |
1294 | * structures. This is needed "because realtime applications cannot | 1314 | * structures. This is needed "because realtime applications cannot |
1295 | * afford to lose notifications of asynchronous events, like timer | 1315 | * afford to lose notifications of asynchronous events, like timer |
1296 | * expirations or I/O completions". In the case of Posix Timers | 1316 | * expirations or I/O completions". In the case of Posix Timers |
1297 | * we allocate the sigqueue structure from the timer_create. If this | 1317 | * we allocate the sigqueue structure from the timer_create. If this |
1298 | * allocation fails we are able to report the failure to the application | 1318 | * allocation fails we are able to report the failure to the application |
1299 | * with an EAGAIN error. | 1319 | * with an EAGAIN error. |
1300 | */ | 1320 | */ |
1301 | |||
1302 | struct sigqueue *sigqueue_alloc(void) | 1321 | struct sigqueue *sigqueue_alloc(void) |
1303 | { | 1322 | { |
1304 | struct sigqueue *q; | 1323 | struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); |
1305 | 1324 | ||
1306 | if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0))) | 1325 | if (q) |
1307 | q->flags |= SIGQUEUE_PREALLOC; | 1326 | q->flags |= SIGQUEUE_PREALLOC; |
1308 | return(q); | 1327 | |
1328 | return q; | ||
1309 | } | 1329 | } |
1310 | 1330 | ||
1311 | void sigqueue_free(struct sigqueue *q) | 1331 | void sigqueue_free(struct sigqueue *q) |
diff --git a/kernel/slow-work-debugfs.c b/kernel/slow-work-debugfs.c new file mode 100644 index 000000000000..e45c43645298 --- /dev/null +++ b/kernel/slow-work-debugfs.c | |||
@@ -0,0 +1,227 @@ | |||
1 | /* Slow work debugging | ||
2 | * | ||
3 | * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/slow-work.h> | ||
14 | #include <linux/fs.h> | ||
15 | #include <linux/time.h> | ||
16 | #include <linux/seq_file.h> | ||
17 | #include "slow-work.h" | ||
18 | |||
19 | #define ITERATOR_SHIFT (BITS_PER_LONG - 4) | ||
20 | #define ITERATOR_SELECTOR (0xfUL << ITERATOR_SHIFT) | ||
21 | #define ITERATOR_COUNTER (~ITERATOR_SELECTOR) | ||
22 | |||
23 | void slow_work_new_thread_desc(struct slow_work *work, struct seq_file *m) | ||
24 | { | ||
25 | seq_puts(m, "Slow-work: New thread"); | ||
26 | } | ||
27 | |||
28 | /* | ||
29 | * Render the time mark field on a work item into a 5-char time with units plus | ||
30 | * a space | ||
31 | */ | ||
32 | static void slow_work_print_mark(struct seq_file *m, struct slow_work *work) | ||
33 | { | ||
34 | struct timespec now, diff; | ||
35 | |||
36 | now = CURRENT_TIME; | ||
37 | diff = timespec_sub(now, work->mark); | ||
38 | |||
39 | if (diff.tv_sec < 0) | ||
40 | seq_puts(m, " -ve "); | ||
41 | else if (diff.tv_sec == 0 && diff.tv_nsec < 1000) | ||
42 | seq_printf(m, "%3luns ", diff.tv_nsec); | ||
43 | else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000) | ||
44 | seq_printf(m, "%3luus ", diff.tv_nsec / 1000); | ||
45 | else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000000) | ||
46 | seq_printf(m, "%3lums ", diff.tv_nsec / 1000000); | ||
47 | else if (diff.tv_sec <= 1) | ||
48 | seq_puts(m, " 1s "); | ||
49 | else if (diff.tv_sec < 60) | ||
50 | seq_printf(m, "%4lus ", diff.tv_sec); | ||
51 | else if (diff.tv_sec < 60 * 60) | ||
52 | seq_printf(m, "%4lum ", diff.tv_sec / 60); | ||
53 | else if (diff.tv_sec < 60 * 60 * 24) | ||
54 | seq_printf(m, "%4luh ", diff.tv_sec / 3600); | ||
55 | else | ||
56 | seq_puts(m, "exces "); | ||
57 | } | ||
58 | |||
59 | /* | ||
60 | * Describe a slow work item for debugfs | ||
61 | */ | ||
62 | static int slow_work_runqueue_show(struct seq_file *m, void *v) | ||
63 | { | ||
64 | struct slow_work *work; | ||
65 | struct list_head *p = v; | ||
66 | unsigned long id; | ||
67 | |||
68 | switch ((unsigned long) v) { | ||
69 | case 1: | ||
70 | seq_puts(m, "THR PID ITEM ADDR FL MARK DESC\n"); | ||
71 | return 0; | ||
72 | case 2: | ||
73 | seq_puts(m, "=== ===== ================ == ===== ==========\n"); | ||
74 | return 0; | ||
75 | |||
76 | case 3 ... 3 + SLOW_WORK_THREAD_LIMIT - 1: | ||
77 | id = (unsigned long) v - 3; | ||
78 | |||
79 | read_lock(&slow_work_execs_lock); | ||
80 | work = slow_work_execs[id]; | ||
81 | if (work) { | ||
82 | smp_read_barrier_depends(); | ||
83 | |||
84 | seq_printf(m, "%3lu %5d %16p %2lx ", | ||
85 | id, slow_work_pids[id], work, work->flags); | ||
86 | slow_work_print_mark(m, work); | ||
87 | |||
88 | if (work->ops->desc) | ||
89 | work->ops->desc(work, m); | ||
90 | seq_putc(m, '\n'); | ||
91 | } | ||
92 | read_unlock(&slow_work_execs_lock); | ||
93 | return 0; | ||
94 | |||
95 | default: | ||
96 | work = list_entry(p, struct slow_work, link); | ||
97 | seq_printf(m, "%3s - %16p %2lx ", | ||
98 | work->flags & SLOW_WORK_VERY_SLOW ? "vsq" : "sq", | ||
99 | work, work->flags); | ||
100 | slow_work_print_mark(m, work); | ||
101 | |||
102 | if (work->ops->desc) | ||
103 | work->ops->desc(work, m); | ||
104 | seq_putc(m, '\n'); | ||
105 | return 0; | ||
106 | } | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | * map the iterator to a work item | ||
111 | */ | ||
112 | static void *slow_work_runqueue_index(struct seq_file *m, loff_t *_pos) | ||
113 | { | ||
114 | struct list_head *p; | ||
115 | unsigned long count, id; | ||
116 | |||
117 | switch (*_pos >> ITERATOR_SHIFT) { | ||
118 | case 0x0: | ||
119 | if (*_pos == 0) | ||
120 | *_pos = 1; | ||
121 | if (*_pos < 3) | ||
122 | return (void *)(unsigned long) *_pos; | ||
123 | if (*_pos < 3 + SLOW_WORK_THREAD_LIMIT) | ||
124 | for (id = *_pos - 3; | ||
125 | id < SLOW_WORK_THREAD_LIMIT; | ||
126 | id++, (*_pos)++) | ||
127 | if (slow_work_execs[id]) | ||
128 | return (void *)(unsigned long) *_pos; | ||
129 | *_pos = 0x1UL << ITERATOR_SHIFT; | ||
130 | |||
131 | case 0x1: | ||
132 | count = *_pos & ITERATOR_COUNTER; | ||
133 | list_for_each(p, &slow_work_queue) { | ||
134 | if (count == 0) | ||
135 | return p; | ||
136 | count--; | ||
137 | } | ||
138 | *_pos = 0x2UL << ITERATOR_SHIFT; | ||
139 | |||
140 | case 0x2: | ||
141 | count = *_pos & ITERATOR_COUNTER; | ||
142 | list_for_each(p, &vslow_work_queue) { | ||
143 | if (count == 0) | ||
144 | return p; | ||
145 | count--; | ||
146 | } | ||
147 | *_pos = 0x3UL << ITERATOR_SHIFT; | ||
148 | |||
149 | default: | ||
150 | return NULL; | ||
151 | } | ||
152 | } | ||
153 | |||
154 | /* | ||
155 | * set up the iterator to start reading from the first line | ||
156 | */ | ||
157 | static void *slow_work_runqueue_start(struct seq_file *m, loff_t *_pos) | ||
158 | { | ||
159 | spin_lock_irq(&slow_work_queue_lock); | ||
160 | return slow_work_runqueue_index(m, _pos); | ||
161 | } | ||
162 | |||
163 | /* | ||
164 | * move to the next line | ||
165 | */ | ||
166 | static void *slow_work_runqueue_next(struct seq_file *m, void *v, loff_t *_pos) | ||
167 | { | ||
168 | struct list_head *p = v; | ||
169 | unsigned long selector = *_pos >> ITERATOR_SHIFT; | ||
170 | |||
171 | (*_pos)++; | ||
172 | switch (selector) { | ||
173 | case 0x0: | ||
174 | return slow_work_runqueue_index(m, _pos); | ||
175 | |||
176 | case 0x1: | ||
177 | if (*_pos >> ITERATOR_SHIFT == 0x1) { | ||
178 | p = p->next; | ||
179 | if (p != &slow_work_queue) | ||
180 | return p; | ||
181 | } | ||
182 | *_pos = 0x2UL << ITERATOR_SHIFT; | ||
183 | p = &vslow_work_queue; | ||
184 | |||
185 | case 0x2: | ||
186 | if (*_pos >> ITERATOR_SHIFT == 0x2) { | ||
187 | p = p->next; | ||
188 | if (p != &vslow_work_queue) | ||
189 | return p; | ||
190 | } | ||
191 | *_pos = 0x3UL << ITERATOR_SHIFT; | ||
192 | |||
193 | default: | ||
194 | return NULL; | ||
195 | } | ||
196 | } | ||
197 | |||
198 | /* | ||
199 | * clean up after reading | ||
200 | */ | ||
201 | static void slow_work_runqueue_stop(struct seq_file *m, void *v) | ||
202 | { | ||
203 | spin_unlock_irq(&slow_work_queue_lock); | ||
204 | } | ||
205 | |||
206 | static const struct seq_operations slow_work_runqueue_ops = { | ||
207 | .start = slow_work_runqueue_start, | ||
208 | .stop = slow_work_runqueue_stop, | ||
209 | .next = slow_work_runqueue_next, | ||
210 | .show = slow_work_runqueue_show, | ||
211 | }; | ||
212 | |||
213 | /* | ||
214 | * open "/sys/kernel/debug/slow_work/runqueue" to list queue contents | ||
215 | */ | ||
216 | static int slow_work_runqueue_open(struct inode *inode, struct file *file) | ||
217 | { | ||
218 | return seq_open(file, &slow_work_runqueue_ops); | ||
219 | } | ||
220 | |||
221 | const struct file_operations slow_work_runqueue_fops = { | ||
222 | .owner = THIS_MODULE, | ||
223 | .open = slow_work_runqueue_open, | ||
224 | .read = seq_read, | ||
225 | .llseek = seq_lseek, | ||
226 | .release = seq_release, | ||
227 | }; | ||
diff --git a/kernel/slow-work.c b/kernel/slow-work.c index 0d31135efbf4..00889bd3c590 100644 --- a/kernel/slow-work.c +++ b/kernel/slow-work.c | |||
@@ -16,11 +16,8 @@ | |||
16 | #include <linux/kthread.h> | 16 | #include <linux/kthread.h> |
17 | #include <linux/freezer.h> | 17 | #include <linux/freezer.h> |
18 | #include <linux/wait.h> | 18 | #include <linux/wait.h> |
19 | 19 | #include <linux/debugfs.h> | |
20 | #define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of | 20 | #include "slow-work.h" |
21 | * things to do */ | ||
22 | #define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after | ||
23 | * OOM */ | ||
24 | 21 | ||
25 | static void slow_work_cull_timeout(unsigned long); | 22 | static void slow_work_cull_timeout(unsigned long); |
26 | static void slow_work_oom_timeout(unsigned long); | 23 | static void slow_work_oom_timeout(unsigned long); |
@@ -46,7 +43,7 @@ static unsigned vslow_work_proportion = 50; /* % of threads that may process | |||
46 | 43 | ||
47 | #ifdef CONFIG_SYSCTL | 44 | #ifdef CONFIG_SYSCTL |
48 | static const int slow_work_min_min_threads = 2; | 45 | static const int slow_work_min_min_threads = 2; |
49 | static int slow_work_max_max_threads = 255; | 46 | static int slow_work_max_max_threads = SLOW_WORK_THREAD_LIMIT; |
50 | static const int slow_work_min_vslow = 1; | 47 | static const int slow_work_min_vslow = 1; |
51 | static const int slow_work_max_vslow = 99; | 48 | static const int slow_work_max_vslow = 99; |
52 | 49 | ||
@@ -98,6 +95,56 @@ static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0); | |||
98 | static struct slow_work slow_work_new_thread; /* new thread starter */ | 95 | static struct slow_work slow_work_new_thread; /* new thread starter */ |
99 | 96 | ||
100 | /* | 97 | /* |
98 | * slow work ID allocation (use slow_work_queue_lock) | ||
99 | */ | ||
100 | static DECLARE_BITMAP(slow_work_ids, SLOW_WORK_THREAD_LIMIT); | ||
101 | |||
102 | /* | ||
103 | * Unregistration tracking to prevent put_ref() from disappearing during module | ||
104 | * unload | ||
105 | */ | ||
106 | #ifdef CONFIG_MODULES | ||
107 | static struct module *slow_work_thread_processing[SLOW_WORK_THREAD_LIMIT]; | ||
108 | static struct module *slow_work_unreg_module; | ||
109 | static struct slow_work *slow_work_unreg_work_item; | ||
110 | static DECLARE_WAIT_QUEUE_HEAD(slow_work_unreg_wq); | ||
111 | static DEFINE_MUTEX(slow_work_unreg_sync_lock); | ||
112 | |||
113 | static void slow_work_set_thread_processing(int id, struct slow_work *work) | ||
114 | { | ||
115 | if (work) | ||
116 | slow_work_thread_processing[id] = work->owner; | ||
117 | } | ||
118 | static void slow_work_done_thread_processing(int id, struct slow_work *work) | ||
119 | { | ||
120 | struct module *module = slow_work_thread_processing[id]; | ||
121 | |||
122 | slow_work_thread_processing[id] = NULL; | ||
123 | smp_mb(); | ||
124 | if (slow_work_unreg_work_item == work || | ||
125 | slow_work_unreg_module == module) | ||
126 | wake_up_all(&slow_work_unreg_wq); | ||
127 | } | ||
128 | static void slow_work_clear_thread_processing(int id) | ||
129 | { | ||
130 | slow_work_thread_processing[id] = NULL; | ||
131 | } | ||
132 | #else | ||
133 | static void slow_work_set_thread_processing(int id, struct slow_work *work) {} | ||
134 | static void slow_work_done_thread_processing(int id, struct slow_work *work) {} | ||
135 | static void slow_work_clear_thread_processing(int id) {} | ||
136 | #endif | ||
137 | |||
138 | /* | ||
139 | * Data for tracking currently executing items for indication through /proc | ||
140 | */ | ||
141 | #ifdef CONFIG_SLOW_WORK_DEBUG | ||
142 | struct slow_work *slow_work_execs[SLOW_WORK_THREAD_LIMIT]; | ||
143 | pid_t slow_work_pids[SLOW_WORK_THREAD_LIMIT]; | ||
144 | DEFINE_RWLOCK(slow_work_execs_lock); | ||
145 | #endif | ||
146 | |||
147 | /* | ||
101 | * The queues of work items and the lock governing access to them. These are | 148 | * The queues of work items and the lock governing access to them. These are |
102 | * shared between all the CPUs. It doesn't make sense to have per-CPU queues | 149 | * shared between all the CPUs. It doesn't make sense to have per-CPU queues |
103 | * as the number of threads bears no relation to the number of CPUs. | 150 | * as the number of threads bears no relation to the number of CPUs. |
@@ -105,9 +152,18 @@ static struct slow_work slow_work_new_thread; /* new thread starter */ | |||
105 | * There are two queues of work items: one for slow work items, and one for | 152 | * There are two queues of work items: one for slow work items, and one for |
106 | * very slow work items. | 153 | * very slow work items. |
107 | */ | 154 | */ |
108 | static LIST_HEAD(slow_work_queue); | 155 | LIST_HEAD(slow_work_queue); |
109 | static LIST_HEAD(vslow_work_queue); | 156 | LIST_HEAD(vslow_work_queue); |
110 | static DEFINE_SPINLOCK(slow_work_queue_lock); | 157 | DEFINE_SPINLOCK(slow_work_queue_lock); |
158 | |||
159 | /* | ||
160 | * The following are two wait queues that get pinged when a work item is placed | ||
161 | * on an empty queue. These allow work items that are hogging a thread by | ||
162 | * sleeping in a way that could be deferred to yield their thread and enqueue | ||
163 | * themselves. | ||
164 | */ | ||
165 | static DECLARE_WAIT_QUEUE_HEAD(slow_work_queue_waits_for_occupation); | ||
166 | static DECLARE_WAIT_QUEUE_HEAD(vslow_work_queue_waits_for_occupation); | ||
111 | 167 | ||
112 | /* | 168 | /* |
113 | * The thread controls. A variable used to signal to the threads that they | 169 | * The thread controls. A variable used to signal to the threads that they |
@@ -126,6 +182,20 @@ static DECLARE_COMPLETION(slow_work_last_thread_exited); | |||
126 | static int slow_work_user_count; | 182 | static int slow_work_user_count; |
127 | static DEFINE_MUTEX(slow_work_user_lock); | 183 | static DEFINE_MUTEX(slow_work_user_lock); |
128 | 184 | ||
185 | static inline int slow_work_get_ref(struct slow_work *work) | ||
186 | { | ||
187 | if (work->ops->get_ref) | ||
188 | return work->ops->get_ref(work); | ||
189 | |||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | static inline void slow_work_put_ref(struct slow_work *work) | ||
194 | { | ||
195 | if (work->ops->put_ref) | ||
196 | work->ops->put_ref(work); | ||
197 | } | ||
198 | |||
129 | /* | 199 | /* |
130 | * Calculate the maximum number of active threads in the pool that are | 200 | * Calculate the maximum number of active threads in the pool that are |
131 | * permitted to process very slow work items. | 201 | * permitted to process very slow work items. |
@@ -149,7 +219,7 @@ static unsigned slow_work_calc_vsmax(void) | |||
149 | * Attempt to execute stuff queued on a slow thread. Return true if we managed | 219 | * Attempt to execute stuff queued on a slow thread. Return true if we managed |
150 | * it, false if there was nothing to do. | 220 | * it, false if there was nothing to do. |
151 | */ | 221 | */ |
152 | static bool slow_work_execute(void) | 222 | static noinline bool slow_work_execute(int id) |
153 | { | 223 | { |
154 | struct slow_work *work = NULL; | 224 | struct slow_work *work = NULL; |
155 | unsigned vsmax; | 225 | unsigned vsmax; |
@@ -186,6 +256,13 @@ static bool slow_work_execute(void) | |||
186 | } else { | 256 | } else { |
187 | very_slow = false; /* avoid the compiler warning */ | 257 | very_slow = false; /* avoid the compiler warning */ |
188 | } | 258 | } |
259 | |||
260 | slow_work_set_thread_processing(id, work); | ||
261 | if (work) { | ||
262 | slow_work_mark_time(work); | ||
263 | slow_work_begin_exec(id, work); | ||
264 | } | ||
265 | |||
189 | spin_unlock_irq(&slow_work_queue_lock); | 266 | spin_unlock_irq(&slow_work_queue_lock); |
190 | 267 | ||
191 | if (!work) | 268 | if (!work) |
@@ -194,12 +271,19 @@ static bool slow_work_execute(void) | |||
194 | if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags)) | 271 | if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags)) |
195 | BUG(); | 272 | BUG(); |
196 | 273 | ||
197 | work->ops->execute(work); | 274 | /* don't execute if the work is in the process of being cancelled */ |
275 | if (!test_bit(SLOW_WORK_CANCELLING, &work->flags)) | ||
276 | work->ops->execute(work); | ||
198 | 277 | ||
199 | if (very_slow) | 278 | if (very_slow) |
200 | atomic_dec(&vslow_work_executing_count); | 279 | atomic_dec(&vslow_work_executing_count); |
201 | clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags); | 280 | clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags); |
202 | 281 | ||
282 | /* wake up anyone waiting for this work to be complete */ | ||
283 | wake_up_bit(&work->flags, SLOW_WORK_EXECUTING); | ||
284 | |||
285 | slow_work_end_exec(id, work); | ||
286 | |||
203 | /* if someone tried to enqueue the item whilst we were executing it, | 287 | /* if someone tried to enqueue the item whilst we were executing it, |
204 | * then it'll be left unenqueued to avoid multiple threads trying to | 288 | * then it'll be left unenqueued to avoid multiple threads trying to |
205 | * execute it simultaneously | 289 | * execute it simultaneously |
@@ -219,7 +303,10 @@ static bool slow_work_execute(void) | |||
219 | spin_unlock_irq(&slow_work_queue_lock); | 303 | spin_unlock_irq(&slow_work_queue_lock); |
220 | } | 304 | } |
221 | 305 | ||
222 | work->ops->put_ref(work); | 306 | /* sort out the race between module unloading and put_ref() */ |
307 | slow_work_put_ref(work); | ||
308 | slow_work_done_thread_processing(id, work); | ||
309 | |||
223 | return true; | 310 | return true; |
224 | 311 | ||
225 | auto_requeue: | 312 | auto_requeue: |
@@ -227,15 +314,61 @@ auto_requeue: | |||
227 | * - we transfer our ref on the item back to the appropriate queue | 314 | * - we transfer our ref on the item back to the appropriate queue |
228 | * - don't wake another thread up as we're awake already | 315 | * - don't wake another thread up as we're awake already |
229 | */ | 316 | */ |
317 | slow_work_mark_time(work); | ||
230 | if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) | 318 | if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) |
231 | list_add_tail(&work->link, &vslow_work_queue); | 319 | list_add_tail(&work->link, &vslow_work_queue); |
232 | else | 320 | else |
233 | list_add_tail(&work->link, &slow_work_queue); | 321 | list_add_tail(&work->link, &slow_work_queue); |
234 | spin_unlock_irq(&slow_work_queue_lock); | 322 | spin_unlock_irq(&slow_work_queue_lock); |
323 | slow_work_clear_thread_processing(id); | ||
235 | return true; | 324 | return true; |
236 | } | 325 | } |
237 | 326 | ||
238 | /** | 327 | /** |
328 | * slow_work_sleep_till_thread_needed - Sleep till thread needed by other work | ||
329 | * work: The work item under execution that wants to sleep | ||
330 | * _timeout: Scheduler sleep timeout | ||
331 | * | ||
332 | * Allow a requeueable work item to sleep on a slow-work processor thread until | ||
333 | * that thread is needed to do some other work or the sleep is interrupted by | ||
334 | * some other event. | ||
335 | * | ||
336 | * The caller must set up a wake up event before calling this and must have set | ||
337 | * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own | ||
338 | * condition before calling this function as no test is made here. | ||
339 | * | ||
340 | * False is returned if there is nothing on the queue; true is returned if the | ||
341 | * work item should be requeued | ||
342 | */ | ||
343 | bool slow_work_sleep_till_thread_needed(struct slow_work *work, | ||
344 | signed long *_timeout) | ||
345 | { | ||
346 | wait_queue_head_t *wfo_wq; | ||
347 | struct list_head *queue; | ||
348 | |||
349 | DEFINE_WAIT(wait); | ||
350 | |||
351 | if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) { | ||
352 | wfo_wq = &vslow_work_queue_waits_for_occupation; | ||
353 | queue = &vslow_work_queue; | ||
354 | } else { | ||
355 | wfo_wq = &slow_work_queue_waits_for_occupation; | ||
356 | queue = &slow_work_queue; | ||
357 | } | ||
358 | |||
359 | if (!list_empty(queue)) | ||
360 | return true; | ||
361 | |||
362 | add_wait_queue_exclusive(wfo_wq, &wait); | ||
363 | if (list_empty(queue)) | ||
364 | *_timeout = schedule_timeout(*_timeout); | ||
365 | finish_wait(wfo_wq, &wait); | ||
366 | |||
367 | return !list_empty(queue); | ||
368 | } | ||
369 | EXPORT_SYMBOL(slow_work_sleep_till_thread_needed); | ||
370 | |||
371 | /** | ||
239 | * slow_work_enqueue - Schedule a slow work item for processing | 372 | * slow_work_enqueue - Schedule a slow work item for processing |
240 | * @work: The work item to queue | 373 | * @work: The work item to queue |
241 | * | 374 | * |
@@ -260,16 +393,22 @@ auto_requeue: | |||
260 | * allowed to pick items to execute. This ensures that very slow items won't | 393 | * allowed to pick items to execute. This ensures that very slow items won't |
261 | * overly block ones that are just ordinarily slow. | 394 | * overly block ones that are just ordinarily slow. |
262 | * | 395 | * |
263 | * Returns 0 if successful, -EAGAIN if not. | 396 | * Returns 0 if successful, -EAGAIN if not (or -ECANCELED if cancelled work is |
397 | * attempted queued) | ||
264 | */ | 398 | */ |
265 | int slow_work_enqueue(struct slow_work *work) | 399 | int slow_work_enqueue(struct slow_work *work) |
266 | { | 400 | { |
401 | wait_queue_head_t *wfo_wq; | ||
402 | struct list_head *queue; | ||
267 | unsigned long flags; | 403 | unsigned long flags; |
404 | int ret; | ||
405 | |||
406 | if (test_bit(SLOW_WORK_CANCELLING, &work->flags)) | ||
407 | return -ECANCELED; | ||
268 | 408 | ||
269 | BUG_ON(slow_work_user_count <= 0); | 409 | BUG_ON(slow_work_user_count <= 0); |
270 | BUG_ON(!work); | 410 | BUG_ON(!work); |
271 | BUG_ON(!work->ops); | 411 | BUG_ON(!work->ops); |
272 | BUG_ON(!work->ops->get_ref); | ||
273 | 412 | ||
274 | /* when honouring an enqueue request, we only promise that we will run | 413 | /* when honouring an enqueue request, we only promise that we will run |
275 | * the work function in the future; we do not promise to run it once | 414 | * the work function in the future; we do not promise to run it once |
@@ -280,8 +419,19 @@ int slow_work_enqueue(struct slow_work *work) | |||
280 | * maintaining our promise | 419 | * maintaining our promise |
281 | */ | 420 | */ |
282 | if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) { | 421 | if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) { |
422 | if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) { | ||
423 | wfo_wq = &vslow_work_queue_waits_for_occupation; | ||
424 | queue = &vslow_work_queue; | ||
425 | } else { | ||
426 | wfo_wq = &slow_work_queue_waits_for_occupation; | ||
427 | queue = &slow_work_queue; | ||
428 | } | ||
429 | |||
283 | spin_lock_irqsave(&slow_work_queue_lock, flags); | 430 | spin_lock_irqsave(&slow_work_queue_lock, flags); |
284 | 431 | ||
432 | if (unlikely(test_bit(SLOW_WORK_CANCELLING, &work->flags))) | ||
433 | goto cancelled; | ||
434 | |||
285 | /* we promise that we will not attempt to execute the work | 435 | /* we promise that we will not attempt to execute the work |
286 | * function in more than one thread simultaneously | 436 | * function in more than one thread simultaneously |
287 | * | 437 | * |
@@ -299,25 +449,221 @@ int slow_work_enqueue(struct slow_work *work) | |||
299 | if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { | 449 | if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { |
300 | set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); | 450 | set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); |
301 | } else { | 451 | } else { |
302 | if (work->ops->get_ref(work) < 0) | 452 | ret = slow_work_get_ref(work); |
303 | goto cant_get_ref; | 453 | if (ret < 0) |
304 | if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) | 454 | goto failed; |
305 | list_add_tail(&work->link, &vslow_work_queue); | 455 | slow_work_mark_time(work); |
306 | else | 456 | list_add_tail(&work->link, queue); |
307 | list_add_tail(&work->link, &slow_work_queue); | ||
308 | wake_up(&slow_work_thread_wq); | 457 | wake_up(&slow_work_thread_wq); |
458 | |||
459 | /* if someone who could be requeued is sleeping on a | ||
460 | * thread, then ask them to yield their thread */ | ||
461 | if (work->link.prev == queue) | ||
462 | wake_up(wfo_wq); | ||
309 | } | 463 | } |
310 | 464 | ||
311 | spin_unlock_irqrestore(&slow_work_queue_lock, flags); | 465 | spin_unlock_irqrestore(&slow_work_queue_lock, flags); |
312 | } | 466 | } |
313 | return 0; | 467 | return 0; |
314 | 468 | ||
315 | cant_get_ref: | 469 | cancelled: |
470 | ret = -ECANCELED; | ||
471 | failed: | ||
316 | spin_unlock_irqrestore(&slow_work_queue_lock, flags); | 472 | spin_unlock_irqrestore(&slow_work_queue_lock, flags); |
317 | return -EAGAIN; | 473 | return ret; |
318 | } | 474 | } |
319 | EXPORT_SYMBOL(slow_work_enqueue); | 475 | EXPORT_SYMBOL(slow_work_enqueue); |
320 | 476 | ||
477 | static int slow_work_wait(void *word) | ||
478 | { | ||
479 | schedule(); | ||
480 | return 0; | ||
481 | } | ||
482 | |||
483 | /** | ||
484 | * slow_work_cancel - Cancel a slow work item | ||
485 | * @work: The work item to cancel | ||
486 | * | ||
487 | * This function will cancel a previously enqueued work item. If we cannot | ||
488 | * cancel the work item, it is guarenteed to have run when this function | ||
489 | * returns. | ||
490 | */ | ||
491 | void slow_work_cancel(struct slow_work *work) | ||
492 | { | ||
493 | bool wait = true, put = false; | ||
494 | |||
495 | set_bit(SLOW_WORK_CANCELLING, &work->flags); | ||
496 | smp_mb(); | ||
497 | |||
498 | /* if the work item is a delayed work item with an active timer, we | ||
499 | * need to wait for the timer to finish _before_ getting the spinlock, | ||
500 | * lest we deadlock against the timer routine | ||
501 | * | ||
502 | * the timer routine will leave DELAYED set if it notices the | ||
503 | * CANCELLING flag in time | ||
504 | */ | ||
505 | if (test_bit(SLOW_WORK_DELAYED, &work->flags)) { | ||
506 | struct delayed_slow_work *dwork = | ||
507 | container_of(work, struct delayed_slow_work, work); | ||
508 | del_timer_sync(&dwork->timer); | ||
509 | } | ||
510 | |||
511 | spin_lock_irq(&slow_work_queue_lock); | ||
512 | |||
513 | if (test_bit(SLOW_WORK_DELAYED, &work->flags)) { | ||
514 | /* the timer routine aborted or never happened, so we are left | ||
515 | * holding the timer's reference on the item and should just | ||
516 | * drop the pending flag and wait for any ongoing execution to | ||
517 | * finish */ | ||
518 | struct delayed_slow_work *dwork = | ||
519 | container_of(work, struct delayed_slow_work, work); | ||
520 | |||
521 | BUG_ON(timer_pending(&dwork->timer)); | ||
522 | BUG_ON(!list_empty(&work->link)); | ||
523 | |||
524 | clear_bit(SLOW_WORK_DELAYED, &work->flags); | ||
525 | put = true; | ||
526 | clear_bit(SLOW_WORK_PENDING, &work->flags); | ||
527 | |||
528 | } else if (test_bit(SLOW_WORK_PENDING, &work->flags) && | ||
529 | !list_empty(&work->link)) { | ||
530 | /* the link in the pending queue holds a reference on the item | ||
531 | * that we will need to release */ | ||
532 | list_del_init(&work->link); | ||
533 | wait = false; | ||
534 | put = true; | ||
535 | clear_bit(SLOW_WORK_PENDING, &work->flags); | ||
536 | |||
537 | } else if (test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags)) { | ||
538 | /* the executor is holding our only reference on the item, so | ||
539 | * we merely need to wait for it to finish executing */ | ||
540 | clear_bit(SLOW_WORK_PENDING, &work->flags); | ||
541 | } | ||
542 | |||
543 | spin_unlock_irq(&slow_work_queue_lock); | ||
544 | |||
545 | /* the EXECUTING flag is set by the executor whilst the spinlock is set | ||
546 | * and before the item is dequeued - so assuming the above doesn't | ||
547 | * actually dequeue it, simply waiting for the EXECUTING flag to be | ||
548 | * released here should be sufficient */ | ||
549 | if (wait) | ||
550 | wait_on_bit(&work->flags, SLOW_WORK_EXECUTING, slow_work_wait, | ||
551 | TASK_UNINTERRUPTIBLE); | ||
552 | |||
553 | clear_bit(SLOW_WORK_CANCELLING, &work->flags); | ||
554 | if (put) | ||
555 | slow_work_put_ref(work); | ||
556 | } | ||
557 | EXPORT_SYMBOL(slow_work_cancel); | ||
558 | |||
559 | /* | ||
560 | * Handle expiry of the delay timer, indicating that a delayed slow work item | ||
561 | * should now be queued if not cancelled | ||
562 | */ | ||
563 | static void delayed_slow_work_timer(unsigned long data) | ||
564 | { | ||
565 | wait_queue_head_t *wfo_wq; | ||
566 | struct list_head *queue; | ||
567 | struct slow_work *work = (struct slow_work *) data; | ||
568 | unsigned long flags; | ||
569 | bool queued = false, put = false, first = false; | ||
570 | |||
571 | if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) { | ||
572 | wfo_wq = &vslow_work_queue_waits_for_occupation; | ||
573 | queue = &vslow_work_queue; | ||
574 | } else { | ||
575 | wfo_wq = &slow_work_queue_waits_for_occupation; | ||
576 | queue = &slow_work_queue; | ||
577 | } | ||
578 | |||
579 | spin_lock_irqsave(&slow_work_queue_lock, flags); | ||
580 | if (likely(!test_bit(SLOW_WORK_CANCELLING, &work->flags))) { | ||
581 | clear_bit(SLOW_WORK_DELAYED, &work->flags); | ||
582 | |||
583 | if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { | ||
584 | /* we discard the reference the timer was holding in | ||
585 | * favour of the one the executor holds */ | ||
586 | set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); | ||
587 | put = true; | ||
588 | } else { | ||
589 | slow_work_mark_time(work); | ||
590 | list_add_tail(&work->link, queue); | ||
591 | queued = true; | ||
592 | if (work->link.prev == queue) | ||
593 | first = true; | ||
594 | } | ||
595 | } | ||
596 | |||
597 | spin_unlock_irqrestore(&slow_work_queue_lock, flags); | ||
598 | if (put) | ||
599 | slow_work_put_ref(work); | ||
600 | if (first) | ||
601 | wake_up(wfo_wq); | ||
602 | if (queued) | ||
603 | wake_up(&slow_work_thread_wq); | ||
604 | } | ||
605 | |||
606 | /** | ||
607 | * delayed_slow_work_enqueue - Schedule a delayed slow work item for processing | ||
608 | * @dwork: The delayed work item to queue | ||
609 | * @delay: When to start executing the work, in jiffies from now | ||
610 | * | ||
611 | * This is similar to slow_work_enqueue(), but it adds a delay before the work | ||
612 | * is actually queued for processing. | ||
613 | * | ||
614 | * The item can have delayed processing requested on it whilst it is being | ||
615 | * executed. The delay will begin immediately, and if it expires before the | ||
616 | * item finishes executing, the item will be placed back on the queue when it | ||
617 | * has done executing. | ||
618 | */ | ||
619 | int delayed_slow_work_enqueue(struct delayed_slow_work *dwork, | ||
620 | unsigned long delay) | ||
621 | { | ||
622 | struct slow_work *work = &dwork->work; | ||
623 | unsigned long flags; | ||
624 | int ret; | ||
625 | |||
626 | if (delay == 0) | ||
627 | return slow_work_enqueue(&dwork->work); | ||
628 | |||
629 | BUG_ON(slow_work_user_count <= 0); | ||
630 | BUG_ON(!work); | ||
631 | BUG_ON(!work->ops); | ||
632 | |||
633 | if (test_bit(SLOW_WORK_CANCELLING, &work->flags)) | ||
634 | return -ECANCELED; | ||
635 | |||
636 | if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) { | ||
637 | spin_lock_irqsave(&slow_work_queue_lock, flags); | ||
638 | |||
639 | if (test_bit(SLOW_WORK_CANCELLING, &work->flags)) | ||
640 | goto cancelled; | ||
641 | |||
642 | /* the timer holds a reference whilst it is pending */ | ||
643 | ret = work->ops->get_ref(work); | ||
644 | if (ret < 0) | ||
645 | goto cant_get_ref; | ||
646 | |||
647 | if (test_and_set_bit(SLOW_WORK_DELAYED, &work->flags)) | ||
648 | BUG(); | ||
649 | dwork->timer.expires = jiffies + delay; | ||
650 | dwork->timer.data = (unsigned long) work; | ||
651 | dwork->timer.function = delayed_slow_work_timer; | ||
652 | add_timer(&dwork->timer); | ||
653 | |||
654 | spin_unlock_irqrestore(&slow_work_queue_lock, flags); | ||
655 | } | ||
656 | |||
657 | return 0; | ||
658 | |||
659 | cancelled: | ||
660 | ret = -ECANCELED; | ||
661 | cant_get_ref: | ||
662 | spin_unlock_irqrestore(&slow_work_queue_lock, flags); | ||
663 | return ret; | ||
664 | } | ||
665 | EXPORT_SYMBOL(delayed_slow_work_enqueue); | ||
666 | |||
321 | /* | 667 | /* |
322 | * Schedule a cull of the thread pool at some time in the near future | 668 | * Schedule a cull of the thread pool at some time in the near future |
323 | */ | 669 | */ |
@@ -368,13 +714,23 @@ static inline bool slow_work_available(int vsmax) | |||
368 | */ | 714 | */ |
369 | static int slow_work_thread(void *_data) | 715 | static int slow_work_thread(void *_data) |
370 | { | 716 | { |
371 | int vsmax; | 717 | int vsmax, id; |
372 | 718 | ||
373 | DEFINE_WAIT(wait); | 719 | DEFINE_WAIT(wait); |
374 | 720 | ||
375 | set_freezable(); | 721 | set_freezable(); |
376 | set_user_nice(current, -5); | 722 | set_user_nice(current, -5); |
377 | 723 | ||
724 | /* allocate ourselves an ID */ | ||
725 | spin_lock_irq(&slow_work_queue_lock); | ||
726 | id = find_first_zero_bit(slow_work_ids, SLOW_WORK_THREAD_LIMIT); | ||
727 | BUG_ON(id < 0 || id >= SLOW_WORK_THREAD_LIMIT); | ||
728 | __set_bit(id, slow_work_ids); | ||
729 | slow_work_set_thread_pid(id, current->pid); | ||
730 | spin_unlock_irq(&slow_work_queue_lock); | ||
731 | |||
732 | sprintf(current->comm, "kslowd%03u", id); | ||
733 | |||
378 | for (;;) { | 734 | for (;;) { |
379 | vsmax = vslow_work_proportion; | 735 | vsmax = vslow_work_proportion; |
380 | vsmax *= atomic_read(&slow_work_thread_count); | 736 | vsmax *= atomic_read(&slow_work_thread_count); |
@@ -395,7 +751,7 @@ static int slow_work_thread(void *_data) | |||
395 | vsmax *= atomic_read(&slow_work_thread_count); | 751 | vsmax *= atomic_read(&slow_work_thread_count); |
396 | vsmax /= 100; | 752 | vsmax /= 100; |
397 | 753 | ||
398 | if (slow_work_available(vsmax) && slow_work_execute()) { | 754 | if (slow_work_available(vsmax) && slow_work_execute(id)) { |
399 | cond_resched(); | 755 | cond_resched(); |
400 | if (list_empty(&slow_work_queue) && | 756 | if (list_empty(&slow_work_queue) && |
401 | list_empty(&vslow_work_queue) && | 757 | list_empty(&vslow_work_queue) && |
@@ -412,6 +768,11 @@ static int slow_work_thread(void *_data) | |||
412 | break; | 768 | break; |
413 | } | 769 | } |
414 | 770 | ||
771 | spin_lock_irq(&slow_work_queue_lock); | ||
772 | slow_work_set_thread_pid(id, 0); | ||
773 | __clear_bit(id, slow_work_ids); | ||
774 | spin_unlock_irq(&slow_work_queue_lock); | ||
775 | |||
415 | if (atomic_dec_and_test(&slow_work_thread_count)) | 776 | if (atomic_dec_and_test(&slow_work_thread_count)) |
416 | complete_and_exit(&slow_work_last_thread_exited, 0); | 777 | complete_and_exit(&slow_work_last_thread_exited, 0); |
417 | return 0; | 778 | return 0; |
@@ -427,21 +788,6 @@ static void slow_work_cull_timeout(unsigned long data) | |||
427 | } | 788 | } |
428 | 789 | ||
429 | /* | 790 | /* |
430 | * Get a reference on slow work thread starter | ||
431 | */ | ||
432 | static int slow_work_new_thread_get_ref(struct slow_work *work) | ||
433 | { | ||
434 | return 0; | ||
435 | } | ||
436 | |||
437 | /* | ||
438 | * Drop a reference on slow work thread starter | ||
439 | */ | ||
440 | static void slow_work_new_thread_put_ref(struct slow_work *work) | ||
441 | { | ||
442 | } | ||
443 | |||
444 | /* | ||
445 | * Start a new slow work thread | 791 | * Start a new slow work thread |
446 | */ | 792 | */ |
447 | static void slow_work_new_thread_execute(struct slow_work *work) | 793 | static void slow_work_new_thread_execute(struct slow_work *work) |
@@ -475,9 +821,11 @@ static void slow_work_new_thread_execute(struct slow_work *work) | |||
475 | } | 821 | } |
476 | 822 | ||
477 | static const struct slow_work_ops slow_work_new_thread_ops = { | 823 | static const struct slow_work_ops slow_work_new_thread_ops = { |
478 | .get_ref = slow_work_new_thread_get_ref, | 824 | .owner = THIS_MODULE, |
479 | .put_ref = slow_work_new_thread_put_ref, | ||
480 | .execute = slow_work_new_thread_execute, | 825 | .execute = slow_work_new_thread_execute, |
826 | #ifdef CONFIG_SLOW_WORK_DEBUG | ||
827 | .desc = slow_work_new_thread_desc, | ||
828 | #endif | ||
481 | }; | 829 | }; |
482 | 830 | ||
483 | /* | 831 | /* |
@@ -546,12 +894,13 @@ static int slow_work_max_threads_sysctl(struct ctl_table *table, int write, | |||
546 | 894 | ||
547 | /** | 895 | /** |
548 | * slow_work_register_user - Register a user of the facility | 896 | * slow_work_register_user - Register a user of the facility |
897 | * @module: The module about to make use of the facility | ||
549 | * | 898 | * |
550 | * Register a user of the facility, starting up the initial threads if there | 899 | * Register a user of the facility, starting up the initial threads if there |
551 | * aren't any other users at this point. This will return 0 if successful, or | 900 | * aren't any other users at this point. This will return 0 if successful, or |
552 | * an error if not. | 901 | * an error if not. |
553 | */ | 902 | */ |
554 | int slow_work_register_user(void) | 903 | int slow_work_register_user(struct module *module) |
555 | { | 904 | { |
556 | struct task_struct *p; | 905 | struct task_struct *p; |
557 | int loop; | 906 | int loop; |
@@ -598,14 +947,81 @@ error: | |||
598 | } | 947 | } |
599 | EXPORT_SYMBOL(slow_work_register_user); | 948 | EXPORT_SYMBOL(slow_work_register_user); |
600 | 949 | ||
950 | /* | ||
951 | * wait for all outstanding items from the calling module to complete | ||
952 | * - note that more items may be queued whilst we're waiting | ||
953 | */ | ||
954 | static void slow_work_wait_for_items(struct module *module) | ||
955 | { | ||
956 | #ifdef CONFIG_MODULES | ||
957 | DECLARE_WAITQUEUE(myself, current); | ||
958 | struct slow_work *work; | ||
959 | int loop; | ||
960 | |||
961 | mutex_lock(&slow_work_unreg_sync_lock); | ||
962 | add_wait_queue(&slow_work_unreg_wq, &myself); | ||
963 | |||
964 | for (;;) { | ||
965 | spin_lock_irq(&slow_work_queue_lock); | ||
966 | |||
967 | /* first of all, we wait for the last queued item in each list | ||
968 | * to be processed */ | ||
969 | list_for_each_entry_reverse(work, &vslow_work_queue, link) { | ||
970 | if (work->owner == module) { | ||
971 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
972 | slow_work_unreg_work_item = work; | ||
973 | goto do_wait; | ||
974 | } | ||
975 | } | ||
976 | list_for_each_entry_reverse(work, &slow_work_queue, link) { | ||
977 | if (work->owner == module) { | ||
978 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
979 | slow_work_unreg_work_item = work; | ||
980 | goto do_wait; | ||
981 | } | ||
982 | } | ||
983 | |||
984 | /* then we wait for the items being processed to finish */ | ||
985 | slow_work_unreg_module = module; | ||
986 | smp_mb(); | ||
987 | for (loop = 0; loop < SLOW_WORK_THREAD_LIMIT; loop++) { | ||
988 | if (slow_work_thread_processing[loop] == module) | ||
989 | goto do_wait; | ||
990 | } | ||
991 | spin_unlock_irq(&slow_work_queue_lock); | ||
992 | break; /* okay, we're done */ | ||
993 | |||
994 | do_wait: | ||
995 | spin_unlock_irq(&slow_work_queue_lock); | ||
996 | schedule(); | ||
997 | slow_work_unreg_work_item = NULL; | ||
998 | slow_work_unreg_module = NULL; | ||
999 | } | ||
1000 | |||
1001 | remove_wait_queue(&slow_work_unreg_wq, &myself); | ||
1002 | mutex_unlock(&slow_work_unreg_sync_lock); | ||
1003 | #endif /* CONFIG_MODULES */ | ||
1004 | } | ||
1005 | |||
601 | /** | 1006 | /** |
602 | * slow_work_unregister_user - Unregister a user of the facility | 1007 | * slow_work_unregister_user - Unregister a user of the facility |
1008 | * @module: The module whose items should be cleared | ||
603 | * | 1009 | * |
604 | * Unregister a user of the facility, killing all the threads if this was the | 1010 | * Unregister a user of the facility, killing all the threads if this was the |
605 | * last one. | 1011 | * last one. |
1012 | * | ||
1013 | * This waits for all the work items belonging to the nominated module to go | ||
1014 | * away before proceeding. | ||
606 | */ | 1015 | */ |
607 | void slow_work_unregister_user(void) | 1016 | void slow_work_unregister_user(struct module *module) |
608 | { | 1017 | { |
1018 | /* first of all, wait for all outstanding items from the calling module | ||
1019 | * to complete */ | ||
1020 | if (module) | ||
1021 | slow_work_wait_for_items(module); | ||
1022 | |||
1023 | /* then we can actually go about shutting down the facility if need | ||
1024 | * be */ | ||
609 | mutex_lock(&slow_work_user_lock); | 1025 | mutex_lock(&slow_work_user_lock); |
610 | 1026 | ||
611 | BUG_ON(slow_work_user_count <= 0); | 1027 | BUG_ON(slow_work_user_count <= 0); |
@@ -639,6 +1055,16 @@ static int __init init_slow_work(void) | |||
639 | if (slow_work_max_max_threads < nr_cpus * 2) | 1055 | if (slow_work_max_max_threads < nr_cpus * 2) |
640 | slow_work_max_max_threads = nr_cpus * 2; | 1056 | slow_work_max_max_threads = nr_cpus * 2; |
641 | #endif | 1057 | #endif |
1058 | #ifdef CONFIG_SLOW_WORK_DEBUG | ||
1059 | { | ||
1060 | struct dentry *dbdir; | ||
1061 | |||
1062 | dbdir = debugfs_create_dir("slow_work", NULL); | ||
1063 | if (dbdir && !IS_ERR(dbdir)) | ||
1064 | debugfs_create_file("runqueue", S_IFREG | 0400, dbdir, | ||
1065 | NULL, &slow_work_runqueue_fops); | ||
1066 | } | ||
1067 | #endif | ||
642 | return 0; | 1068 | return 0; |
643 | } | 1069 | } |
644 | 1070 | ||
diff --git a/kernel/slow-work.h b/kernel/slow-work.h new file mode 100644 index 000000000000..321f3c59d732 --- /dev/null +++ b/kernel/slow-work.h | |||
@@ -0,0 +1,72 @@ | |||
1 | /* Slow work private definitions | ||
2 | * | ||
3 | * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of | ||
13 | * things to do */ | ||
14 | #define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after | ||
15 | * OOM */ | ||
16 | |||
17 | #define SLOW_WORK_THREAD_LIMIT 255 /* abs maximum number of slow-work threads */ | ||
18 | |||
19 | /* | ||
20 | * slow-work.c | ||
21 | */ | ||
22 | #ifdef CONFIG_SLOW_WORK_DEBUG | ||
23 | extern struct slow_work *slow_work_execs[]; | ||
24 | extern pid_t slow_work_pids[]; | ||
25 | extern rwlock_t slow_work_execs_lock; | ||
26 | #endif | ||
27 | |||
28 | extern struct list_head slow_work_queue; | ||
29 | extern struct list_head vslow_work_queue; | ||
30 | extern spinlock_t slow_work_queue_lock; | ||
31 | |||
32 | /* | ||
33 | * slow-work-debugfs.c | ||
34 | */ | ||
35 | #ifdef CONFIG_SLOW_WORK_DEBUG | ||
36 | extern const struct file_operations slow_work_runqueue_fops; | ||
37 | |||
38 | extern void slow_work_new_thread_desc(struct slow_work *, struct seq_file *); | ||
39 | #endif | ||
40 | |||
41 | /* | ||
42 | * Helper functions | ||
43 | */ | ||
44 | static inline void slow_work_set_thread_pid(int id, pid_t pid) | ||
45 | { | ||
46 | #ifdef CONFIG_SLOW_WORK_PROC | ||
47 | slow_work_pids[id] = pid; | ||
48 | #endif | ||
49 | } | ||
50 | |||
51 | static inline void slow_work_mark_time(struct slow_work *work) | ||
52 | { | ||
53 | #ifdef CONFIG_SLOW_WORK_PROC | ||
54 | work->mark = CURRENT_TIME; | ||
55 | #endif | ||
56 | } | ||
57 | |||
58 | static inline void slow_work_begin_exec(int id, struct slow_work *work) | ||
59 | { | ||
60 | #ifdef CONFIG_SLOW_WORK_PROC | ||
61 | slow_work_execs[id] = work; | ||
62 | #endif | ||
63 | } | ||
64 | |||
65 | static inline void slow_work_end_exec(int id, struct slow_work *work) | ||
66 | { | ||
67 | #ifdef CONFIG_SLOW_WORK_PROC | ||
68 | write_lock(&slow_work_execs_lock); | ||
69 | slow_work_execs[id] = NULL; | ||
70 | write_unlock(&slow_work_execs_lock); | ||
71 | #endif | ||
72 | } | ||
diff --git a/kernel/smp.c b/kernel/smp.c index c9d1c7835c2f..a8c76069cf50 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -265,9 +265,7 @@ static DEFINE_PER_CPU(struct call_single_data, csd_data); | |||
265 | * @info: An arbitrary pointer to pass to the function. | 265 | * @info: An arbitrary pointer to pass to the function. |
266 | * @wait: If true, wait until function has completed on other CPUs. | 266 | * @wait: If true, wait until function has completed on other CPUs. |
267 | * | 267 | * |
268 | * Returns 0 on success, else a negative status code. Note that @wait | 268 | * Returns 0 on success, else a negative status code. |
269 | * will be implicitly turned on in case of allocation failures, since | ||
270 | * we fall back to on-stack allocation. | ||
271 | */ | 269 | */ |
272 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | 270 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, |
273 | int wait) | 271 | int wait) |
@@ -321,6 +319,51 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |||
321 | } | 319 | } |
322 | EXPORT_SYMBOL(smp_call_function_single); | 320 | EXPORT_SYMBOL(smp_call_function_single); |
323 | 321 | ||
322 | /* | ||
323 | * smp_call_function_any - Run a function on any of the given cpus | ||
324 | * @mask: The mask of cpus it can run on. | ||
325 | * @func: The function to run. This must be fast and non-blocking. | ||
326 | * @info: An arbitrary pointer to pass to the function. | ||
327 | * @wait: If true, wait until function has completed. | ||
328 | * | ||
329 | * Returns 0 on success, else a negative status code (if no cpus were online). | ||
330 | * Note that @wait will be implicitly turned on in case of allocation failures, | ||
331 | * since we fall back to on-stack allocation. | ||
332 | * | ||
333 | * Selection preference: | ||
334 | * 1) current cpu if in @mask | ||
335 | * 2) any cpu of current node if in @mask | ||
336 | * 3) any other online cpu in @mask | ||
337 | */ | ||
338 | int smp_call_function_any(const struct cpumask *mask, | ||
339 | void (*func)(void *info), void *info, int wait) | ||
340 | { | ||
341 | unsigned int cpu; | ||
342 | const struct cpumask *nodemask; | ||
343 | int ret; | ||
344 | |||
345 | /* Try for same CPU (cheapest) */ | ||
346 | cpu = get_cpu(); | ||
347 | if (cpumask_test_cpu(cpu, mask)) | ||
348 | goto call; | ||
349 | |||
350 | /* Try for same node. */ | ||
351 | nodemask = cpumask_of_node(cpu); | ||
352 | for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; | ||
353 | cpu = cpumask_next_and(cpu, nodemask, mask)) { | ||
354 | if (cpu_online(cpu)) | ||
355 | goto call; | ||
356 | } | ||
357 | |||
358 | /* Any online will do: smp_call_function_single handles nr_cpu_ids. */ | ||
359 | cpu = cpumask_any_and(mask, cpu_online_mask); | ||
360 | call: | ||
361 | ret = smp_call_function_single(cpu, func, info, wait); | ||
362 | put_cpu(); | ||
363 | return ret; | ||
364 | } | ||
365 | EXPORT_SYMBOL_GPL(smp_call_function_any); | ||
366 | |||
324 | /** | 367 | /** |
325 | * __smp_call_function_single(): Run a function on another CPU | 368 | * __smp_call_function_single(): Run a function on another CPU |
326 | * @cpu: The CPU to run on. | 369 | * @cpu: The CPU to run on. |
@@ -355,9 +398,7 @@ void __smp_call_function_single(int cpu, struct call_single_data *data, | |||
355 | * @wait: If true, wait (atomically) until function has completed | 398 | * @wait: If true, wait (atomically) until function has completed |
356 | * on other CPUs. | 399 | * on other CPUs. |
357 | * | 400 | * |
358 | * If @wait is true, then returns once @func has returned. Note that @wait | 401 | * If @wait is true, then returns once @func has returned. |
359 | * will be implicitly turned on in case of allocation failures, since | ||
360 | * we fall back to on-stack allocation. | ||
361 | * | 402 | * |
362 | * You must not call this function with disabled interrupts or from a | 403 | * You must not call this function with disabled interrupts or from a |
363 | * hardware interrupt handler or from a bottom half handler. Preemption | 404 | * hardware interrupt handler or from a bottom half handler. Preemption |
@@ -443,8 +484,7 @@ EXPORT_SYMBOL(smp_call_function_many); | |||
443 | * Returns 0. | 484 | * Returns 0. |
444 | * | 485 | * |
445 | * If @wait is true, then returns once @func has returned; otherwise | 486 | * If @wait is true, then returns once @func has returned; otherwise |
446 | * it returns just before the target cpu calls @func. In case of allocation | 487 | * it returns just before the target cpu calls @func. |
447 | * failure, @wait will be implicitly turned on. | ||
448 | * | 488 | * |
449 | * You must not call this function with disabled interrupts or from a | 489 | * You must not call this function with disabled interrupts or from a |
450 | * hardware interrupt handler or from a bottom half handler. | 490 | * hardware interrupt handler or from a bottom half handler. |
diff --git a/kernel/softirq.c b/kernel/softirq.c index f8749e5216e0..21939d9e830e 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -302,9 +302,9 @@ void irq_exit(void) | |||
302 | if (!in_interrupt() && local_softirq_pending()) | 302 | if (!in_interrupt() && local_softirq_pending()) |
303 | invoke_softirq(); | 303 | invoke_softirq(); |
304 | 304 | ||
305 | rcu_irq_exit(); | ||
305 | #ifdef CONFIG_NO_HZ | 306 | #ifdef CONFIG_NO_HZ |
306 | /* Make sure that timer wheel updates are propagated */ | 307 | /* Make sure that timer wheel updates are propagated */ |
307 | rcu_irq_exit(); | ||
308 | if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched()) | 308 | if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched()) |
309 | tick_nohz_stop_sched_tick(0); | 309 | tick_nohz_stop_sched_tick(0); |
310 | #endif | 310 | #endif |
diff --git a/kernel/spinlock.c b/kernel/spinlock.c index 5ddab730cb2f..41e042219ff6 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c | |||
@@ -21,145 +21,28 @@ | |||
21 | #include <linux/debug_locks.h> | 21 | #include <linux/debug_locks.h> |
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | 23 | ||
24 | #ifndef _spin_trylock | ||
25 | int __lockfunc _spin_trylock(spinlock_t *lock) | ||
26 | { | ||
27 | return __spin_trylock(lock); | ||
28 | } | ||
29 | EXPORT_SYMBOL(_spin_trylock); | ||
30 | #endif | ||
31 | |||
32 | #ifndef _read_trylock | ||
33 | int __lockfunc _read_trylock(rwlock_t *lock) | ||
34 | { | ||
35 | return __read_trylock(lock); | ||
36 | } | ||
37 | EXPORT_SYMBOL(_read_trylock); | ||
38 | #endif | ||
39 | |||
40 | #ifndef _write_trylock | ||
41 | int __lockfunc _write_trylock(rwlock_t *lock) | ||
42 | { | ||
43 | return __write_trylock(lock); | ||
44 | } | ||
45 | EXPORT_SYMBOL(_write_trylock); | ||
46 | #endif | ||
47 | |||
48 | /* | 24 | /* |
49 | * If lockdep is enabled then we use the non-preemption spin-ops | 25 | * If lockdep is enabled then we use the non-preemption spin-ops |
50 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are | 26 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are |
51 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): | 27 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): |
52 | */ | 28 | */ |
53 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) | 29 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) |
54 | |||
55 | #ifndef _read_lock | ||
56 | void __lockfunc _read_lock(rwlock_t *lock) | ||
57 | { | ||
58 | __read_lock(lock); | ||
59 | } | ||
60 | EXPORT_SYMBOL(_read_lock); | ||
61 | #endif | ||
62 | |||
63 | #ifndef _spin_lock_irqsave | ||
64 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) | ||
65 | { | ||
66 | return __spin_lock_irqsave(lock); | ||
67 | } | ||
68 | EXPORT_SYMBOL(_spin_lock_irqsave); | ||
69 | #endif | ||
70 | |||
71 | #ifndef _spin_lock_irq | ||
72 | void __lockfunc _spin_lock_irq(spinlock_t *lock) | ||
73 | { | ||
74 | __spin_lock_irq(lock); | ||
75 | } | ||
76 | EXPORT_SYMBOL(_spin_lock_irq); | ||
77 | #endif | ||
78 | |||
79 | #ifndef _spin_lock_bh | ||
80 | void __lockfunc _spin_lock_bh(spinlock_t *lock) | ||
81 | { | ||
82 | __spin_lock_bh(lock); | ||
83 | } | ||
84 | EXPORT_SYMBOL(_spin_lock_bh); | ||
85 | #endif | ||
86 | |||
87 | #ifndef _read_lock_irqsave | ||
88 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) | ||
89 | { | ||
90 | return __read_lock_irqsave(lock); | ||
91 | } | ||
92 | EXPORT_SYMBOL(_read_lock_irqsave); | ||
93 | #endif | ||
94 | |||
95 | #ifndef _read_lock_irq | ||
96 | void __lockfunc _read_lock_irq(rwlock_t *lock) | ||
97 | { | ||
98 | __read_lock_irq(lock); | ||
99 | } | ||
100 | EXPORT_SYMBOL(_read_lock_irq); | ||
101 | #endif | ||
102 | |||
103 | #ifndef _read_lock_bh | ||
104 | void __lockfunc _read_lock_bh(rwlock_t *lock) | ||
105 | { | ||
106 | __read_lock_bh(lock); | ||
107 | } | ||
108 | EXPORT_SYMBOL(_read_lock_bh); | ||
109 | #endif | ||
110 | |||
111 | #ifndef _write_lock_irqsave | ||
112 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) | ||
113 | { | ||
114 | return __write_lock_irqsave(lock); | ||
115 | } | ||
116 | EXPORT_SYMBOL(_write_lock_irqsave); | ||
117 | #endif | ||
118 | |||
119 | #ifndef _write_lock_irq | ||
120 | void __lockfunc _write_lock_irq(rwlock_t *lock) | ||
121 | { | ||
122 | __write_lock_irq(lock); | ||
123 | } | ||
124 | EXPORT_SYMBOL(_write_lock_irq); | ||
125 | #endif | ||
126 | |||
127 | #ifndef _write_lock_bh | ||
128 | void __lockfunc _write_lock_bh(rwlock_t *lock) | ||
129 | { | ||
130 | __write_lock_bh(lock); | ||
131 | } | ||
132 | EXPORT_SYMBOL(_write_lock_bh); | ||
133 | #endif | ||
134 | |||
135 | #ifndef _spin_lock | ||
136 | void __lockfunc _spin_lock(spinlock_t *lock) | ||
137 | { | ||
138 | __spin_lock(lock); | ||
139 | } | ||
140 | EXPORT_SYMBOL(_spin_lock); | ||
141 | #endif | ||
142 | |||
143 | #ifndef _write_lock | ||
144 | void __lockfunc _write_lock(rwlock_t *lock) | ||
145 | { | ||
146 | __write_lock(lock); | ||
147 | } | ||
148 | EXPORT_SYMBOL(_write_lock); | ||
149 | #endif | ||
150 | |||
151 | #else /* CONFIG_PREEMPT: */ | ||
152 | |||
153 | /* | 30 | /* |
31 | * The __lock_function inlines are taken from | ||
32 | * include/linux/spinlock_api_smp.h | ||
33 | */ | ||
34 | #else | ||
35 | /* | ||
36 | * We build the __lock_function inlines here. They are too large for | ||
37 | * inlining all over the place, but here is only one user per function | ||
38 | * which embedds them into the calling _lock_function below. | ||
39 | * | ||
154 | * This could be a long-held lock. We both prepare to spin for a long | 40 | * This could be a long-held lock. We both prepare to spin for a long |
155 | * time (making _this_ CPU preemptable if possible), and we also signal | 41 | * time (making _this_ CPU preemptable if possible), and we also signal |
156 | * towards that other CPU that it should break the lock ASAP. | 42 | * towards that other CPU that it should break the lock ASAP. |
157 | * | ||
158 | * (We do this in a function because inlining it would be excessive.) | ||
159 | */ | 43 | */ |
160 | |||
161 | #define BUILD_LOCK_OPS(op, locktype) \ | 44 | #define BUILD_LOCK_OPS(op, locktype) \ |
162 | void __lockfunc _##op##_lock(locktype##_t *lock) \ | 45 | void __lockfunc __##op##_lock(locktype##_t *lock) \ |
163 | { \ | 46 | { \ |
164 | for (;;) { \ | 47 | for (;;) { \ |
165 | preempt_disable(); \ | 48 | preempt_disable(); \ |
@@ -175,9 +58,7 @@ void __lockfunc _##op##_lock(locktype##_t *lock) \ | |||
175 | (lock)->break_lock = 0; \ | 58 | (lock)->break_lock = 0; \ |
176 | } \ | 59 | } \ |
177 | \ | 60 | \ |
178 | EXPORT_SYMBOL(_##op##_lock); \ | 61 | unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \ |
179 | \ | ||
180 | unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \ | ||
181 | { \ | 62 | { \ |
182 | unsigned long flags; \ | 63 | unsigned long flags; \ |
183 | \ | 64 | \ |
@@ -198,16 +79,12 @@ unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \ | |||
198 | return flags; \ | 79 | return flags; \ |
199 | } \ | 80 | } \ |
200 | \ | 81 | \ |
201 | EXPORT_SYMBOL(_##op##_lock_irqsave); \ | 82 | void __lockfunc __##op##_lock_irq(locktype##_t *lock) \ |
202 | \ | ||
203 | void __lockfunc _##op##_lock_irq(locktype##_t *lock) \ | ||
204 | { \ | 83 | { \ |
205 | _##op##_lock_irqsave(lock); \ | 84 | _##op##_lock_irqsave(lock); \ |
206 | } \ | 85 | } \ |
207 | \ | 86 | \ |
208 | EXPORT_SYMBOL(_##op##_lock_irq); \ | 87 | void __lockfunc __##op##_lock_bh(locktype##_t *lock) \ |
209 | \ | ||
210 | void __lockfunc _##op##_lock_bh(locktype##_t *lock) \ | ||
211 | { \ | 88 | { \ |
212 | unsigned long flags; \ | 89 | unsigned long flags; \ |
213 | \ | 90 | \ |
@@ -220,23 +97,21 @@ void __lockfunc _##op##_lock_bh(locktype##_t *lock) \ | |||
220 | local_bh_disable(); \ | 97 | local_bh_disable(); \ |
221 | local_irq_restore(flags); \ | 98 | local_irq_restore(flags); \ |
222 | } \ | 99 | } \ |
223 | \ | ||
224 | EXPORT_SYMBOL(_##op##_lock_bh) | ||
225 | 100 | ||
226 | /* | 101 | /* |
227 | * Build preemption-friendly versions of the following | 102 | * Build preemption-friendly versions of the following |
228 | * lock-spinning functions: | 103 | * lock-spinning functions: |
229 | * | 104 | * |
230 | * _[spin|read|write]_lock() | 105 | * __[spin|read|write]_lock() |
231 | * _[spin|read|write]_lock_irq() | 106 | * __[spin|read|write]_lock_irq() |
232 | * _[spin|read|write]_lock_irqsave() | 107 | * __[spin|read|write]_lock_irqsave() |
233 | * _[spin|read|write]_lock_bh() | 108 | * __[spin|read|write]_lock_bh() |
234 | */ | 109 | */ |
235 | BUILD_LOCK_OPS(spin, spinlock); | 110 | BUILD_LOCK_OPS(spin, spinlock); |
236 | BUILD_LOCK_OPS(read, rwlock); | 111 | BUILD_LOCK_OPS(read, rwlock); |
237 | BUILD_LOCK_OPS(write, rwlock); | 112 | BUILD_LOCK_OPS(write, rwlock); |
238 | 113 | ||
239 | #endif /* CONFIG_PREEMPT */ | 114 | #endif |
240 | 115 | ||
241 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 116 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
242 | 117 | ||
@@ -248,7 +123,8 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) | |||
248 | } | 123 | } |
249 | EXPORT_SYMBOL(_spin_lock_nested); | 124 | EXPORT_SYMBOL(_spin_lock_nested); |
250 | 125 | ||
251 | unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) | 126 | unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, |
127 | int subclass) | ||
252 | { | 128 | { |
253 | unsigned long flags; | 129 | unsigned long flags; |
254 | 130 | ||
@@ -272,7 +148,127 @@ EXPORT_SYMBOL(_spin_lock_nest_lock); | |||
272 | 148 | ||
273 | #endif | 149 | #endif |
274 | 150 | ||
275 | #ifndef _spin_unlock | 151 | #ifndef CONFIG_INLINE_SPIN_TRYLOCK |
152 | int __lockfunc _spin_trylock(spinlock_t *lock) | ||
153 | { | ||
154 | return __spin_trylock(lock); | ||
155 | } | ||
156 | EXPORT_SYMBOL(_spin_trylock); | ||
157 | #endif | ||
158 | |||
159 | #ifndef CONFIG_INLINE_READ_TRYLOCK | ||
160 | int __lockfunc _read_trylock(rwlock_t *lock) | ||
161 | { | ||
162 | return __read_trylock(lock); | ||
163 | } | ||
164 | EXPORT_SYMBOL(_read_trylock); | ||
165 | #endif | ||
166 | |||
167 | #ifndef CONFIG_INLINE_WRITE_TRYLOCK | ||
168 | int __lockfunc _write_trylock(rwlock_t *lock) | ||
169 | { | ||
170 | return __write_trylock(lock); | ||
171 | } | ||
172 | EXPORT_SYMBOL(_write_trylock); | ||
173 | #endif | ||
174 | |||
175 | #ifndef CONFIG_INLINE_READ_LOCK | ||
176 | void __lockfunc _read_lock(rwlock_t *lock) | ||
177 | { | ||
178 | __read_lock(lock); | ||
179 | } | ||
180 | EXPORT_SYMBOL(_read_lock); | ||
181 | #endif | ||
182 | |||
183 | #ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE | ||
184 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) | ||
185 | { | ||
186 | return __spin_lock_irqsave(lock); | ||
187 | } | ||
188 | EXPORT_SYMBOL(_spin_lock_irqsave); | ||
189 | #endif | ||
190 | |||
191 | #ifndef CONFIG_INLINE_SPIN_LOCK_IRQ | ||
192 | void __lockfunc _spin_lock_irq(spinlock_t *lock) | ||
193 | { | ||
194 | __spin_lock_irq(lock); | ||
195 | } | ||
196 | EXPORT_SYMBOL(_spin_lock_irq); | ||
197 | #endif | ||
198 | |||
199 | #ifndef CONFIG_INLINE_SPIN_LOCK_BH | ||
200 | void __lockfunc _spin_lock_bh(spinlock_t *lock) | ||
201 | { | ||
202 | __spin_lock_bh(lock); | ||
203 | } | ||
204 | EXPORT_SYMBOL(_spin_lock_bh); | ||
205 | #endif | ||
206 | |||
207 | #ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE | ||
208 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) | ||
209 | { | ||
210 | return __read_lock_irqsave(lock); | ||
211 | } | ||
212 | EXPORT_SYMBOL(_read_lock_irqsave); | ||
213 | #endif | ||
214 | |||
215 | #ifndef CONFIG_INLINE_READ_LOCK_IRQ | ||
216 | void __lockfunc _read_lock_irq(rwlock_t *lock) | ||
217 | { | ||
218 | __read_lock_irq(lock); | ||
219 | } | ||
220 | EXPORT_SYMBOL(_read_lock_irq); | ||
221 | #endif | ||
222 | |||
223 | #ifndef CONFIG_INLINE_READ_LOCK_BH | ||
224 | void __lockfunc _read_lock_bh(rwlock_t *lock) | ||
225 | { | ||
226 | __read_lock_bh(lock); | ||
227 | } | ||
228 | EXPORT_SYMBOL(_read_lock_bh); | ||
229 | #endif | ||
230 | |||
231 | #ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE | ||
232 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) | ||
233 | { | ||
234 | return __write_lock_irqsave(lock); | ||
235 | } | ||
236 | EXPORT_SYMBOL(_write_lock_irqsave); | ||
237 | #endif | ||
238 | |||
239 | #ifndef CONFIG_INLINE_WRITE_LOCK_IRQ | ||
240 | void __lockfunc _write_lock_irq(rwlock_t *lock) | ||
241 | { | ||
242 | __write_lock_irq(lock); | ||
243 | } | ||
244 | EXPORT_SYMBOL(_write_lock_irq); | ||
245 | #endif | ||
246 | |||
247 | #ifndef CONFIG_INLINE_WRITE_LOCK_BH | ||
248 | void __lockfunc _write_lock_bh(rwlock_t *lock) | ||
249 | { | ||
250 | __write_lock_bh(lock); | ||
251 | } | ||
252 | EXPORT_SYMBOL(_write_lock_bh); | ||
253 | #endif | ||
254 | |||
255 | #ifndef CONFIG_INLINE_SPIN_LOCK | ||
256 | void __lockfunc _spin_lock(spinlock_t *lock) | ||
257 | { | ||
258 | __spin_lock(lock); | ||
259 | } | ||
260 | EXPORT_SYMBOL(_spin_lock); | ||
261 | #endif | ||
262 | |||
263 | #ifndef CONFIG_INLINE_WRITE_LOCK | ||
264 | void __lockfunc _write_lock(rwlock_t *lock) | ||
265 | { | ||
266 | __write_lock(lock); | ||
267 | } | ||
268 | EXPORT_SYMBOL(_write_lock); | ||
269 | #endif | ||
270 | |||
271 | #ifndef CONFIG_INLINE_SPIN_UNLOCK | ||
276 | void __lockfunc _spin_unlock(spinlock_t *lock) | 272 | void __lockfunc _spin_unlock(spinlock_t *lock) |
277 | { | 273 | { |
278 | __spin_unlock(lock); | 274 | __spin_unlock(lock); |
@@ -280,7 +276,7 @@ void __lockfunc _spin_unlock(spinlock_t *lock) | |||
280 | EXPORT_SYMBOL(_spin_unlock); | 276 | EXPORT_SYMBOL(_spin_unlock); |
281 | #endif | 277 | #endif |
282 | 278 | ||
283 | #ifndef _write_unlock | 279 | #ifndef CONFIG_INLINE_WRITE_UNLOCK |
284 | void __lockfunc _write_unlock(rwlock_t *lock) | 280 | void __lockfunc _write_unlock(rwlock_t *lock) |
285 | { | 281 | { |
286 | __write_unlock(lock); | 282 | __write_unlock(lock); |
@@ -288,7 +284,7 @@ void __lockfunc _write_unlock(rwlock_t *lock) | |||
288 | EXPORT_SYMBOL(_write_unlock); | 284 | EXPORT_SYMBOL(_write_unlock); |
289 | #endif | 285 | #endif |
290 | 286 | ||
291 | #ifndef _read_unlock | 287 | #ifndef CONFIG_INLINE_READ_UNLOCK |
292 | void __lockfunc _read_unlock(rwlock_t *lock) | 288 | void __lockfunc _read_unlock(rwlock_t *lock) |
293 | { | 289 | { |
294 | __read_unlock(lock); | 290 | __read_unlock(lock); |
@@ -296,7 +292,7 @@ void __lockfunc _read_unlock(rwlock_t *lock) | |||
296 | EXPORT_SYMBOL(_read_unlock); | 292 | EXPORT_SYMBOL(_read_unlock); |
297 | #endif | 293 | #endif |
298 | 294 | ||
299 | #ifndef _spin_unlock_irqrestore | 295 | #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE |
300 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | 296 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) |
301 | { | 297 | { |
302 | __spin_unlock_irqrestore(lock, flags); | 298 | __spin_unlock_irqrestore(lock, flags); |
@@ -304,7 +300,7 @@ void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | |||
304 | EXPORT_SYMBOL(_spin_unlock_irqrestore); | 300 | EXPORT_SYMBOL(_spin_unlock_irqrestore); |
305 | #endif | 301 | #endif |
306 | 302 | ||
307 | #ifndef _spin_unlock_irq | 303 | #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ |
308 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) | 304 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) |
309 | { | 305 | { |
310 | __spin_unlock_irq(lock); | 306 | __spin_unlock_irq(lock); |
@@ -312,7 +308,7 @@ void __lockfunc _spin_unlock_irq(spinlock_t *lock) | |||
312 | EXPORT_SYMBOL(_spin_unlock_irq); | 308 | EXPORT_SYMBOL(_spin_unlock_irq); |
313 | #endif | 309 | #endif |
314 | 310 | ||
315 | #ifndef _spin_unlock_bh | 311 | #ifndef CONFIG_INLINE_SPIN_UNLOCK_BH |
316 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) | 312 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) |
317 | { | 313 | { |
318 | __spin_unlock_bh(lock); | 314 | __spin_unlock_bh(lock); |
@@ -320,7 +316,7 @@ void __lockfunc _spin_unlock_bh(spinlock_t *lock) | |||
320 | EXPORT_SYMBOL(_spin_unlock_bh); | 316 | EXPORT_SYMBOL(_spin_unlock_bh); |
321 | #endif | 317 | #endif |
322 | 318 | ||
323 | #ifndef _read_unlock_irqrestore | 319 | #ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE |
324 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 320 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
325 | { | 321 | { |
326 | __read_unlock_irqrestore(lock, flags); | 322 | __read_unlock_irqrestore(lock, flags); |
@@ -328,7 +324,7 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | |||
328 | EXPORT_SYMBOL(_read_unlock_irqrestore); | 324 | EXPORT_SYMBOL(_read_unlock_irqrestore); |
329 | #endif | 325 | #endif |
330 | 326 | ||
331 | #ifndef _read_unlock_irq | 327 | #ifndef CONFIG_INLINE_READ_UNLOCK_IRQ |
332 | void __lockfunc _read_unlock_irq(rwlock_t *lock) | 328 | void __lockfunc _read_unlock_irq(rwlock_t *lock) |
333 | { | 329 | { |
334 | __read_unlock_irq(lock); | 330 | __read_unlock_irq(lock); |
@@ -336,7 +332,7 @@ void __lockfunc _read_unlock_irq(rwlock_t *lock) | |||
336 | EXPORT_SYMBOL(_read_unlock_irq); | 332 | EXPORT_SYMBOL(_read_unlock_irq); |
337 | #endif | 333 | #endif |
338 | 334 | ||
339 | #ifndef _read_unlock_bh | 335 | #ifndef CONFIG_INLINE_READ_UNLOCK_BH |
340 | void __lockfunc _read_unlock_bh(rwlock_t *lock) | 336 | void __lockfunc _read_unlock_bh(rwlock_t *lock) |
341 | { | 337 | { |
342 | __read_unlock_bh(lock); | 338 | __read_unlock_bh(lock); |
@@ -344,7 +340,7 @@ void __lockfunc _read_unlock_bh(rwlock_t *lock) | |||
344 | EXPORT_SYMBOL(_read_unlock_bh); | 340 | EXPORT_SYMBOL(_read_unlock_bh); |
345 | #endif | 341 | #endif |
346 | 342 | ||
347 | #ifndef _write_unlock_irqrestore | 343 | #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE |
348 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | 344 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) |
349 | { | 345 | { |
350 | __write_unlock_irqrestore(lock, flags); | 346 | __write_unlock_irqrestore(lock, flags); |
@@ -352,7 +348,7 @@ void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | |||
352 | EXPORT_SYMBOL(_write_unlock_irqrestore); | 348 | EXPORT_SYMBOL(_write_unlock_irqrestore); |
353 | #endif | 349 | #endif |
354 | 350 | ||
355 | #ifndef _write_unlock_irq | 351 | #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ |
356 | void __lockfunc _write_unlock_irq(rwlock_t *lock) | 352 | void __lockfunc _write_unlock_irq(rwlock_t *lock) |
357 | { | 353 | { |
358 | __write_unlock_irq(lock); | 354 | __write_unlock_irq(lock); |
@@ -360,7 +356,7 @@ void __lockfunc _write_unlock_irq(rwlock_t *lock) | |||
360 | EXPORT_SYMBOL(_write_unlock_irq); | 356 | EXPORT_SYMBOL(_write_unlock_irq); |
361 | #endif | 357 | #endif |
362 | 358 | ||
363 | #ifndef _write_unlock_bh | 359 | #ifndef CONFIG_INLINE_WRITE_UNLOCK_BH |
364 | void __lockfunc _write_unlock_bh(rwlock_t *lock) | 360 | void __lockfunc _write_unlock_bh(rwlock_t *lock) |
365 | { | 361 | { |
366 | __write_unlock_bh(lock); | 362 | __write_unlock_bh(lock); |
@@ -368,7 +364,7 @@ void __lockfunc _write_unlock_bh(rwlock_t *lock) | |||
368 | EXPORT_SYMBOL(_write_unlock_bh); | 364 | EXPORT_SYMBOL(_write_unlock_bh); |
369 | #endif | 365 | #endif |
370 | 366 | ||
371 | #ifndef _spin_trylock_bh | 367 | #ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH |
372 | int __lockfunc _spin_trylock_bh(spinlock_t *lock) | 368 | int __lockfunc _spin_trylock_bh(spinlock_t *lock) |
373 | { | 369 | { |
374 | return __spin_trylock_bh(lock); | 370 | return __spin_trylock_bh(lock); |
diff --git a/kernel/srcu.c b/kernel/srcu.c index b0aeeaf22ce4..818d7d9aa03c 100644 --- a/kernel/srcu.c +++ b/kernel/srcu.c | |||
@@ -49,6 +49,7 @@ int init_srcu_struct(struct srcu_struct *sp) | |||
49 | sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array); | 49 | sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array); |
50 | return (sp->per_cpu_ref ? 0 : -ENOMEM); | 50 | return (sp->per_cpu_ref ? 0 : -ENOMEM); |
51 | } | 51 | } |
52 | EXPORT_SYMBOL_GPL(init_srcu_struct); | ||
52 | 53 | ||
53 | /* | 54 | /* |
54 | * srcu_readers_active_idx -- returns approximate number of readers | 55 | * srcu_readers_active_idx -- returns approximate number of readers |
@@ -97,6 +98,7 @@ void cleanup_srcu_struct(struct srcu_struct *sp) | |||
97 | free_percpu(sp->per_cpu_ref); | 98 | free_percpu(sp->per_cpu_ref); |
98 | sp->per_cpu_ref = NULL; | 99 | sp->per_cpu_ref = NULL; |
99 | } | 100 | } |
101 | EXPORT_SYMBOL_GPL(cleanup_srcu_struct); | ||
100 | 102 | ||
101 | /** | 103 | /** |
102 | * srcu_read_lock - register a new reader for an SRCU-protected structure. | 104 | * srcu_read_lock - register a new reader for an SRCU-protected structure. |
@@ -118,6 +120,7 @@ int srcu_read_lock(struct srcu_struct *sp) | |||
118 | preempt_enable(); | 120 | preempt_enable(); |
119 | return idx; | 121 | return idx; |
120 | } | 122 | } |
123 | EXPORT_SYMBOL_GPL(srcu_read_lock); | ||
121 | 124 | ||
122 | /** | 125 | /** |
123 | * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. | 126 | * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. |
@@ -136,22 +139,12 @@ void srcu_read_unlock(struct srcu_struct *sp, int idx) | |||
136 | per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]--; | 139 | per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]--; |
137 | preempt_enable(); | 140 | preempt_enable(); |
138 | } | 141 | } |
142 | EXPORT_SYMBOL_GPL(srcu_read_unlock); | ||
139 | 143 | ||
140 | /** | 144 | /* |
141 | * synchronize_srcu - wait for prior SRCU read-side critical-section completion | 145 | * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). |
142 | * @sp: srcu_struct with which to synchronize. | ||
143 | * | ||
144 | * Flip the completed counter, and wait for the old count to drain to zero. | ||
145 | * As with classic RCU, the updater must use some separate means of | ||
146 | * synchronizing concurrent updates. Can block; must be called from | ||
147 | * process context. | ||
148 | * | ||
149 | * Note that it is illegal to call synchornize_srcu() from the corresponding | ||
150 | * SRCU read-side critical section; doing so will result in deadlock. | ||
151 | * However, it is perfectly legal to call synchronize_srcu() on one | ||
152 | * srcu_struct from some other srcu_struct's read-side critical section. | ||
153 | */ | 146 | */ |
154 | void synchronize_srcu(struct srcu_struct *sp) | 147 | void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void)) |
155 | { | 148 | { |
156 | int idx; | 149 | int idx; |
157 | 150 | ||
@@ -173,7 +166,7 @@ void synchronize_srcu(struct srcu_struct *sp) | |||
173 | return; | 166 | return; |
174 | } | 167 | } |
175 | 168 | ||
176 | synchronize_sched(); /* Force memory barrier on all CPUs. */ | 169 | sync_func(); /* Force memory barrier on all CPUs. */ |
177 | 170 | ||
178 | /* | 171 | /* |
179 | * The preceding synchronize_sched() ensures that any CPU that | 172 | * The preceding synchronize_sched() ensures that any CPU that |
@@ -190,7 +183,7 @@ void synchronize_srcu(struct srcu_struct *sp) | |||
190 | idx = sp->completed & 0x1; | 183 | idx = sp->completed & 0x1; |
191 | sp->completed++; | 184 | sp->completed++; |
192 | 185 | ||
193 | synchronize_sched(); /* Force memory barrier on all CPUs. */ | 186 | sync_func(); /* Force memory barrier on all CPUs. */ |
194 | 187 | ||
195 | /* | 188 | /* |
196 | * At this point, because of the preceding synchronize_sched(), | 189 | * At this point, because of the preceding synchronize_sched(), |
@@ -203,7 +196,7 @@ void synchronize_srcu(struct srcu_struct *sp) | |||
203 | while (srcu_readers_active_idx(sp, idx)) | 196 | while (srcu_readers_active_idx(sp, idx)) |
204 | schedule_timeout_interruptible(1); | 197 | schedule_timeout_interruptible(1); |
205 | 198 | ||
206 | synchronize_sched(); /* Force memory barrier on all CPUs. */ | 199 | sync_func(); /* Force memory barrier on all CPUs. */ |
207 | 200 | ||
208 | /* | 201 | /* |
209 | * The preceding synchronize_sched() forces all srcu_read_unlock() | 202 | * The preceding synchronize_sched() forces all srcu_read_unlock() |
@@ -237,6 +230,47 @@ void synchronize_srcu(struct srcu_struct *sp) | |||
237 | } | 230 | } |
238 | 231 | ||
239 | /** | 232 | /** |
233 | * synchronize_srcu - wait for prior SRCU read-side critical-section completion | ||
234 | * @sp: srcu_struct with which to synchronize. | ||
235 | * | ||
236 | * Flip the completed counter, and wait for the old count to drain to zero. | ||
237 | * As with classic RCU, the updater must use some separate means of | ||
238 | * synchronizing concurrent updates. Can block; must be called from | ||
239 | * process context. | ||
240 | * | ||
241 | * Note that it is illegal to call synchronize_srcu() from the corresponding | ||
242 | * SRCU read-side critical section; doing so will result in deadlock. | ||
243 | * However, it is perfectly legal to call synchronize_srcu() on one | ||
244 | * srcu_struct from some other srcu_struct's read-side critical section. | ||
245 | */ | ||
246 | void synchronize_srcu(struct srcu_struct *sp) | ||
247 | { | ||
248 | __synchronize_srcu(sp, synchronize_sched); | ||
249 | } | ||
250 | EXPORT_SYMBOL_GPL(synchronize_srcu); | ||
251 | |||
252 | /** | ||
253 | * synchronize_srcu_expedited - like synchronize_srcu, but less patient | ||
254 | * @sp: srcu_struct with which to synchronize. | ||
255 | * | ||
256 | * Flip the completed counter, and wait for the old count to drain to zero. | ||
257 | * As with classic RCU, the updater must use some separate means of | ||
258 | * synchronizing concurrent updates. Can block; must be called from | ||
259 | * process context. | ||
260 | * | ||
261 | * Note that it is illegal to call synchronize_srcu_expedited() | ||
262 | * from the corresponding SRCU read-side critical section; doing so | ||
263 | * will result in deadlock. However, it is perfectly legal to call | ||
264 | * synchronize_srcu_expedited() on one srcu_struct from some other | ||
265 | * srcu_struct's read-side critical section. | ||
266 | */ | ||
267 | void synchronize_srcu_expedited(struct srcu_struct *sp) | ||
268 | { | ||
269 | __synchronize_srcu(sp, synchronize_sched_expedited); | ||
270 | } | ||
271 | EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); | ||
272 | |||
273 | /** | ||
240 | * srcu_batches_completed - return batches completed. | 274 | * srcu_batches_completed - return batches completed. |
241 | * @sp: srcu_struct on which to report batch completion. | 275 | * @sp: srcu_struct on which to report batch completion. |
242 | * | 276 | * |
@@ -248,10 +282,4 @@ long srcu_batches_completed(struct srcu_struct *sp) | |||
248 | { | 282 | { |
249 | return sp->completed; | 283 | return sp->completed; |
250 | } | 284 | } |
251 | |||
252 | EXPORT_SYMBOL_GPL(init_srcu_struct); | ||
253 | EXPORT_SYMBOL_GPL(cleanup_srcu_struct); | ||
254 | EXPORT_SYMBOL_GPL(srcu_read_lock); | ||
255 | EXPORT_SYMBOL_GPL(srcu_read_unlock); | ||
256 | EXPORT_SYMBOL_GPL(synchronize_srcu); | ||
257 | EXPORT_SYMBOL_GPL(srcu_batches_completed); | 285 | EXPORT_SYMBOL_GPL(srcu_batches_completed); |
diff --git a/kernel/sys.c b/kernel/sys.c index 255475d163e0..ce17760d9c51 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -1110,6 +1110,8 @@ SYSCALL_DEFINE0(setsid) | |||
1110 | err = session; | 1110 | err = session; |
1111 | out: | 1111 | out: |
1112 | write_unlock_irq(&tasklist_lock); | 1112 | write_unlock_irq(&tasklist_lock); |
1113 | if (err > 0) | ||
1114 | proc_sid_connector(group_leader); | ||
1113 | return err; | 1115 | return err; |
1114 | } | 1116 | } |
1115 | 1117 | ||
@@ -1546,24 +1548,37 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, | |||
1546 | if (arg4 | arg5) | 1548 | if (arg4 | arg5) |
1547 | return -EINVAL; | 1549 | return -EINVAL; |
1548 | switch (arg2) { | 1550 | switch (arg2) { |
1549 | case 0: | 1551 | case PR_MCE_KILL_CLEAR: |
1550 | if (arg3 != 0) | 1552 | if (arg3 != 0) |
1551 | return -EINVAL; | 1553 | return -EINVAL; |
1552 | current->flags &= ~PF_MCE_PROCESS; | 1554 | current->flags &= ~PF_MCE_PROCESS; |
1553 | break; | 1555 | break; |
1554 | case 1: | 1556 | case PR_MCE_KILL_SET: |
1555 | current->flags |= PF_MCE_PROCESS; | 1557 | current->flags |= PF_MCE_PROCESS; |
1556 | if (arg3 != 0) | 1558 | if (arg3 == PR_MCE_KILL_EARLY) |
1557 | current->flags |= PF_MCE_EARLY; | 1559 | current->flags |= PF_MCE_EARLY; |
1558 | else | 1560 | else if (arg3 == PR_MCE_KILL_LATE) |
1559 | current->flags &= ~PF_MCE_EARLY; | 1561 | current->flags &= ~PF_MCE_EARLY; |
1562 | else if (arg3 == PR_MCE_KILL_DEFAULT) | ||
1563 | current->flags &= | ||
1564 | ~(PF_MCE_EARLY|PF_MCE_PROCESS); | ||
1565 | else | ||
1566 | return -EINVAL; | ||
1560 | break; | 1567 | break; |
1561 | default: | 1568 | default: |
1562 | return -EINVAL; | 1569 | return -EINVAL; |
1563 | } | 1570 | } |
1564 | error = 0; | 1571 | error = 0; |
1565 | break; | 1572 | break; |
1566 | 1573 | case PR_MCE_KILL_GET: | |
1574 | if (arg2 | arg3 | arg4 | arg5) | ||
1575 | return -EINVAL; | ||
1576 | if (current->flags & PF_MCE_PROCESS) | ||
1577 | error = (current->flags & PF_MCE_EARLY) ? | ||
1578 | PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE; | ||
1579 | else | ||
1580 | error = PR_MCE_KILL_DEFAULT; | ||
1581 | break; | ||
1567 | default: | 1582 | default: |
1568 | error = -EINVAL; | 1583 | error = -EINVAL; |
1569 | break; | 1584 | break; |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 0d949c517412..4dbf93a52ee9 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/sysrq.h> | 36 | #include <linux/sysrq.h> |
37 | #include <linux/highuid.h> | 37 | #include <linux/highuid.h> |
38 | #include <linux/writeback.h> | 38 | #include <linux/writeback.h> |
39 | #include <linux/ratelimit.h> | ||
39 | #include <linux/hugetlb.h> | 40 | #include <linux/hugetlb.h> |
40 | #include <linux/initrd.h> | 41 | #include <linux/initrd.h> |
41 | #include <linux/key.h> | 42 | #include <linux/key.h> |
@@ -158,6 +159,8 @@ extern int no_unaligned_warning; | |||
158 | extern int unaligned_dump_stack; | 159 | extern int unaligned_dump_stack; |
159 | #endif | 160 | #endif |
160 | 161 | ||
162 | extern struct ratelimit_state printk_ratelimit_state; | ||
163 | |||
161 | #ifdef CONFIG_RT_MUTEXES | 164 | #ifdef CONFIG_RT_MUTEXES |
162 | extern int max_lock_depth; | 165 | extern int max_lock_depth; |
163 | #endif | 166 | #endif |
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c index b38423ca711a..b6e7aaea4604 100644 --- a/kernel/sysctl_check.c +++ b/kernel/sysctl_check.c | |||
@@ -1521,7 +1521,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table) | |||
1521 | if (!table->ctl_name && table->strategy) | 1521 | if (!table->ctl_name && table->strategy) |
1522 | set_fail(&fail, table, "Strategy without ctl_name"); | 1522 | set_fail(&fail, table, "Strategy without ctl_name"); |
1523 | #endif | 1523 | #endif |
1524 | #ifdef CONFIG_PROC_FS | 1524 | #ifdef CONFIG_PROC_SYSCTL |
1525 | if (table->procname && !table->proc_handler) | 1525 | if (table->procname && !table->proc_handler) |
1526 | set_fail(&fail, table, "No proc_handler"); | 1526 | set_fail(&fail, table, "No proc_handler"); |
1527 | #endif | 1527 | #endif |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 37ba67e33265..6dc4e5ef7a01 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -740,7 +740,7 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf, | |||
740 | out: | 740 | out: |
741 | mutex_unlock(&ftrace_profile_lock); | 741 | mutex_unlock(&ftrace_profile_lock); |
742 | 742 | ||
743 | filp->f_pos += cnt; | 743 | *ppos += cnt; |
744 | 744 | ||
745 | return cnt; | 745 | return cnt; |
746 | } | 746 | } |
@@ -2222,15 +2222,15 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, | |||
2222 | ret = ftrace_process_regex(parser->buffer, | 2222 | ret = ftrace_process_regex(parser->buffer, |
2223 | parser->idx, enable); | 2223 | parser->idx, enable); |
2224 | if (ret) | 2224 | if (ret) |
2225 | goto out; | 2225 | goto out_unlock; |
2226 | 2226 | ||
2227 | trace_parser_clear(parser); | 2227 | trace_parser_clear(parser); |
2228 | } | 2228 | } |
2229 | 2229 | ||
2230 | ret = read; | 2230 | ret = read; |
2231 | 2231 | out_unlock: | |
2232 | mutex_unlock(&ftrace_regex_lock); | 2232 | mutex_unlock(&ftrace_regex_lock); |
2233 | out: | 2233 | |
2234 | return ret; | 2234 | return ret; |
2235 | } | 2235 | } |
2236 | 2236 | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index d4ff01970547..5dd017fea6f5 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -483,7 +483,7 @@ struct ring_buffer_iter { | |||
483 | /* Up this if you want to test the TIME_EXTENTS and normalization */ | 483 | /* Up this if you want to test the TIME_EXTENTS and normalization */ |
484 | #define DEBUG_SHIFT 0 | 484 | #define DEBUG_SHIFT 0 |
485 | 485 | ||
486 | static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu) | 486 | static inline u64 rb_time_stamp(struct ring_buffer *buffer) |
487 | { | 487 | { |
488 | /* shift to debug/test normalization and TIME_EXTENTS */ | 488 | /* shift to debug/test normalization and TIME_EXTENTS */ |
489 | return buffer->clock() << DEBUG_SHIFT; | 489 | return buffer->clock() << DEBUG_SHIFT; |
@@ -494,7 +494,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu) | |||
494 | u64 time; | 494 | u64 time; |
495 | 495 | ||
496 | preempt_disable_notrace(); | 496 | preempt_disable_notrace(); |
497 | time = rb_time_stamp(buffer, cpu); | 497 | time = rb_time_stamp(buffer); |
498 | preempt_enable_no_resched_notrace(); | 498 | preempt_enable_no_resched_notrace(); |
499 | 499 | ||
500 | return time; | 500 | return time; |
@@ -599,7 +599,7 @@ static struct list_head *rb_list_head(struct list_head *list) | |||
599 | } | 599 | } |
600 | 600 | ||
601 | /* | 601 | /* |
602 | * rb_is_head_page - test if the give page is the head page | 602 | * rb_is_head_page - test if the given page is the head page |
603 | * | 603 | * |
604 | * Because the reader may move the head_page pointer, we can | 604 | * Because the reader may move the head_page pointer, we can |
605 | * not trust what the head page is (it may be pointing to | 605 | * not trust what the head page is (it may be pointing to |
@@ -1193,6 +1193,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
1193 | atomic_inc(&cpu_buffer->record_disabled); | 1193 | atomic_inc(&cpu_buffer->record_disabled); |
1194 | synchronize_sched(); | 1194 | synchronize_sched(); |
1195 | 1195 | ||
1196 | spin_lock_irq(&cpu_buffer->reader_lock); | ||
1196 | rb_head_page_deactivate(cpu_buffer); | 1197 | rb_head_page_deactivate(cpu_buffer); |
1197 | 1198 | ||
1198 | for (i = 0; i < nr_pages; i++) { | 1199 | for (i = 0; i < nr_pages; i++) { |
@@ -1207,6 +1208,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
1207 | return; | 1208 | return; |
1208 | 1209 | ||
1209 | rb_reset_cpu(cpu_buffer); | 1210 | rb_reset_cpu(cpu_buffer); |
1211 | spin_unlock_irq(&cpu_buffer->reader_lock); | ||
1210 | 1212 | ||
1211 | rb_check_pages(cpu_buffer); | 1213 | rb_check_pages(cpu_buffer); |
1212 | 1214 | ||
@@ -1868,7 +1870,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, | |||
1868 | * Nested commits always have zero deltas, so | 1870 | * Nested commits always have zero deltas, so |
1869 | * just reread the time stamp | 1871 | * just reread the time stamp |
1870 | */ | 1872 | */ |
1871 | *ts = rb_time_stamp(buffer, cpu_buffer->cpu); | 1873 | *ts = rb_time_stamp(buffer); |
1872 | next_page->page->time_stamp = *ts; | 1874 | next_page->page->time_stamp = *ts; |
1873 | } | 1875 | } |
1874 | 1876 | ||
@@ -2111,7 +2113,7 @@ rb_reserve_next_event(struct ring_buffer *buffer, | |||
2111 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) | 2113 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) |
2112 | goto out_fail; | 2114 | goto out_fail; |
2113 | 2115 | ||
2114 | ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu); | 2116 | ts = rb_time_stamp(cpu_buffer->buffer); |
2115 | 2117 | ||
2116 | /* | 2118 | /* |
2117 | * Only the first commit can update the timestamp. | 2119 | * Only the first commit can update the timestamp. |
@@ -2681,7 +2683,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer) | |||
2681 | EXPORT_SYMBOL_GPL(ring_buffer_entries); | 2683 | EXPORT_SYMBOL_GPL(ring_buffer_entries); |
2682 | 2684 | ||
2683 | /** | 2685 | /** |
2684 | * ring_buffer_overrun_cpu - get the number of overruns in buffer | 2686 | * ring_buffer_overruns - get the number of overruns in buffer |
2685 | * @buffer: The ring buffer | 2687 | * @buffer: The ring buffer |
2686 | * | 2688 | * |
2687 | * Returns the total number of overruns in the ring buffer | 2689 | * Returns the total number of overruns in the ring buffer |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c820b0310a12..b20d3ec75de9 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -2440,7 +2440,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
2440 | return ret; | 2440 | return ret; |
2441 | } | 2441 | } |
2442 | 2442 | ||
2443 | filp->f_pos += cnt; | 2443 | *ppos += cnt; |
2444 | 2444 | ||
2445 | return cnt; | 2445 | return cnt; |
2446 | } | 2446 | } |
@@ -2582,7 +2582,7 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf, | |||
2582 | } | 2582 | } |
2583 | mutex_unlock(&trace_types_lock); | 2583 | mutex_unlock(&trace_types_lock); |
2584 | 2584 | ||
2585 | filp->f_pos += cnt; | 2585 | *ppos += cnt; |
2586 | 2586 | ||
2587 | return cnt; | 2587 | return cnt; |
2588 | } | 2588 | } |
@@ -2764,7 +2764,7 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf, | |||
2764 | if (err) | 2764 | if (err) |
2765 | return err; | 2765 | return err; |
2766 | 2766 | ||
2767 | filp->f_pos += ret; | 2767 | *ppos += ret; |
2768 | 2768 | ||
2769 | return ret; | 2769 | return ret; |
2770 | } | 2770 | } |
@@ -3299,7 +3299,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
3299 | } | 3299 | } |
3300 | } | 3300 | } |
3301 | 3301 | ||
3302 | filp->f_pos += cnt; | 3302 | *ppos += cnt; |
3303 | 3303 | ||
3304 | /* If check pages failed, return ENOMEM */ | 3304 | /* If check pages failed, return ENOMEM */ |
3305 | if (tracing_disabled) | 3305 | if (tracing_disabled) |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index ed17565826b0..b6c12c6a1bcd 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -69,6 +69,9 @@ enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter) | |||
69 | * @s: trace sequence descriptor | 69 | * @s: trace sequence descriptor |
70 | * @fmt: printf format string | 70 | * @fmt: printf format string |
71 | * | 71 | * |
72 | * It returns 0 if the trace oversizes the buffer's free | ||
73 | * space, 1 otherwise. | ||
74 | * | ||
72 | * The tracer may use either sequence operations or its own | 75 | * The tracer may use either sequence operations or its own |
73 | * copy to user routines. To simplify formating of a trace | 76 | * copy to user routines. To simplify formating of a trace |
74 | * trace_seq_printf is used to store strings into a special | 77 | * trace_seq_printf is used to store strings into a special |
@@ -95,7 +98,7 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | |||
95 | 98 | ||
96 | s->len += ret; | 99 | s->len += ret; |
97 | 100 | ||
98 | return len; | 101 | return 1; |
99 | } | 102 | } |
100 | EXPORT_SYMBOL_GPL(trace_seq_printf); | 103 | EXPORT_SYMBOL_GPL(trace_seq_printf); |
101 | 104 | ||
diff --git a/kernel/user.c b/kernel/user.c index 2c000e7132ac..46d0165ca70c 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
@@ -330,9 +330,9 @@ done: | |||
330 | */ | 330 | */ |
331 | static void free_user(struct user_struct *up, unsigned long flags) | 331 | static void free_user(struct user_struct *up, unsigned long flags) |
332 | { | 332 | { |
333 | spin_unlock_irqrestore(&uidhash_lock, flags); | ||
334 | INIT_DELAYED_WORK(&up->work, cleanup_user_struct); | 333 | INIT_DELAYED_WORK(&up->work, cleanup_user_struct); |
335 | schedule_delayed_work(&up->work, msecs_to_jiffies(1000)); | 334 | schedule_delayed_work(&up->work, msecs_to_jiffies(1000)); |
335 | spin_unlock_irqrestore(&uidhash_lock, flags); | ||
336 | } | 336 | } |
337 | 337 | ||
338 | #else /* CONFIG_USER_SCHED && CONFIG_SYSFS */ | 338 | #else /* CONFIG_USER_SCHED && CONFIG_SYSFS */ |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 47cdd7e76f2b..67e526b6ae81 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -685,6 +685,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on); | |||
685 | int schedule_on_each_cpu(work_func_t func) | 685 | int schedule_on_each_cpu(work_func_t func) |
686 | { | 686 | { |
687 | int cpu; | 687 | int cpu; |
688 | int orig = -1; | ||
688 | struct work_struct *works; | 689 | struct work_struct *works; |
689 | 690 | ||
690 | works = alloc_percpu(struct work_struct); | 691 | works = alloc_percpu(struct work_struct); |
@@ -692,14 +693,28 @@ int schedule_on_each_cpu(work_func_t func) | |||
692 | return -ENOMEM; | 693 | return -ENOMEM; |
693 | 694 | ||
694 | get_online_cpus(); | 695 | get_online_cpus(); |
696 | |||
697 | /* | ||
698 | * When running in keventd don't schedule a work item on | ||
699 | * itself. Can just call directly because the work queue is | ||
700 | * already bound. This also is faster. | ||
701 | */ | ||
702 | if (current_is_keventd()) | ||
703 | orig = raw_smp_processor_id(); | ||
704 | |||
695 | for_each_online_cpu(cpu) { | 705 | for_each_online_cpu(cpu) { |
696 | struct work_struct *work = per_cpu_ptr(works, cpu); | 706 | struct work_struct *work = per_cpu_ptr(works, cpu); |
697 | 707 | ||
698 | INIT_WORK(work, func); | 708 | INIT_WORK(work, func); |
699 | schedule_work_on(cpu, work); | 709 | if (cpu != orig) |
710 | schedule_work_on(cpu, work); | ||
700 | } | 711 | } |
712 | if (orig >= 0) | ||
713 | func(per_cpu_ptr(works, orig)); | ||
714 | |||
701 | for_each_online_cpu(cpu) | 715 | for_each_online_cpu(cpu) |
702 | flush_work(per_cpu_ptr(works, cpu)); | 716 | flush_work(per_cpu_ptr(works, cpu)); |
717 | |||
703 | put_online_cpus(); | 718 | put_online_cpus(); |
704 | free_percpu(works); | 719 | free_percpu(works); |
705 | return 0; | 720 | return 0; |