aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-11-03 02:57:41 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-03 02:57:41 -0500
commitdb5935001a43528e673ad26ffec9d98c60a496a9 (patch)
tree8e735327a97beccabb5d94ef93df25d2bacda705 /kernel
parent34f3a814eef8069a24e5b3ebcf27aba9dabac2ea (diff)
parent45beca08dd8b6d6a65c5ffd730af2eac7a2c7a03 (diff)
Merge commit 'v2.6.28-rc3' into sched/core
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup_freezer.c51
-rw-r--r--kernel/freezer.c20
-rw-r--r--kernel/power/Kconfig2
-rw-r--r--kernel/profile.c2
-rw-r--r--kernel/resource.c6
-rw-r--r--kernel/sched_debug.c2
-rw-r--r--kernel/signal.c3
-rw-r--r--kernel/trace/Kconfig2
-rw-r--r--kernel/trace/ftrace.c8
-rw-r--r--kernel/trace/trace.c7
-rw-r--r--kernel/trace/trace.h20
11 files changed, 64 insertions, 59 deletions
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index e95056954498..7fa476f01d05 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -162,9 +162,13 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
162 struct task_struct *task) 162 struct task_struct *task)
163{ 163{
164 struct freezer *freezer; 164 struct freezer *freezer;
165 int retval;
166 165
167 /* Anything frozen can't move or be moved to/from */ 166 /*
167 * Anything frozen can't move or be moved to/from.
168 *
169 * Since orig_freezer->state == FROZEN means that @task has been
170 * frozen, so it's sufficient to check the latter condition.
171 */
168 172
169 if (is_task_frozen_enough(task)) 173 if (is_task_frozen_enough(task))
170 return -EBUSY; 174 return -EBUSY;
@@ -173,13 +177,7 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
173 if (freezer->state == CGROUP_FROZEN) 177 if (freezer->state == CGROUP_FROZEN)
174 return -EBUSY; 178 return -EBUSY;
175 179
176 retval = 0; 180 return 0;
177 task_lock(task);
178 freezer = task_freezer(task);
179 if (freezer->state == CGROUP_FROZEN)
180 retval = -EBUSY;
181 task_unlock(task);
182 return retval;
183} 181}
184 182
185static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task) 183static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
@@ -190,8 +188,9 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
190 freezer = task_freezer(task); 188 freezer = task_freezer(task);
191 task_unlock(task); 189 task_unlock(task);
192 190
193 BUG_ON(freezer->state == CGROUP_FROZEN);
194 spin_lock_irq(&freezer->lock); 191 spin_lock_irq(&freezer->lock);
192 BUG_ON(freezer->state == CGROUP_FROZEN);
193
195 /* Locking avoids race with FREEZING -> THAWED transitions. */ 194 /* Locking avoids race with FREEZING -> THAWED transitions. */
196 if (freezer->state == CGROUP_FREEZING) 195 if (freezer->state == CGROUP_FREEZING)
197 freeze_task(task, true); 196 freeze_task(task, true);
@@ -276,25 +275,18 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
276 return num_cant_freeze_now ? -EBUSY : 0; 275 return num_cant_freeze_now ? -EBUSY : 0;
277} 276}
278 277
279static int unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer) 278static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
280{ 279{
281 struct cgroup_iter it; 280 struct cgroup_iter it;
282 struct task_struct *task; 281 struct task_struct *task;
283 282
284 cgroup_iter_start(cgroup, &it); 283 cgroup_iter_start(cgroup, &it);
285 while ((task = cgroup_iter_next(cgroup, &it))) { 284 while ((task = cgroup_iter_next(cgroup, &it))) {
286 int do_wake; 285 thaw_process(task);
287
288 task_lock(task);
289 do_wake = __thaw_process(task);
290 task_unlock(task);
291 if (do_wake)
292 wake_up_process(task);
293 } 286 }
294 cgroup_iter_end(cgroup, &it); 287 cgroup_iter_end(cgroup, &it);
295 freezer->state = CGROUP_THAWED;
296 288
297 return 0; 289 freezer->state = CGROUP_THAWED;
298} 290}
299 291
300static int freezer_change_state(struct cgroup *cgroup, 292static int freezer_change_state(struct cgroup *cgroup,
@@ -304,27 +296,22 @@ static int freezer_change_state(struct cgroup *cgroup,
304 int retval = 0; 296 int retval = 0;
305 297
306 freezer = cgroup_freezer(cgroup); 298 freezer = cgroup_freezer(cgroup);
299
307 spin_lock_irq(&freezer->lock); 300 spin_lock_irq(&freezer->lock);
301
308 update_freezer_state(cgroup, freezer); 302 update_freezer_state(cgroup, freezer);
309 if (goal_state == freezer->state) 303 if (goal_state == freezer->state)
310 goto out; 304 goto out;
311 switch (freezer->state) { 305
306 switch (goal_state) {
312 case CGROUP_THAWED: 307 case CGROUP_THAWED:
313 retval = try_to_freeze_cgroup(cgroup, freezer); 308 unfreeze_cgroup(cgroup, freezer);
314 break; 309 break;
315 case CGROUP_FREEZING:
316 if (goal_state == CGROUP_FROZEN) {
317 /* Userspace is retrying after
318 * "/bin/echo FROZEN > freezer.state" returned -EBUSY */
319 retval = try_to_freeze_cgroup(cgroup, freezer);
320 break;
321 }
322 /* state == FREEZING and goal_state == THAWED, so unfreeze */
323 case CGROUP_FROZEN: 310 case CGROUP_FROZEN:
324 retval = unfreeze_cgroup(cgroup, freezer); 311 retval = try_to_freeze_cgroup(cgroup, freezer);
325 break; 312 break;
326 default: 313 default:
327 break; 314 BUG();
328 } 315 }
329out: 316out:
330 spin_unlock_irq(&freezer->lock); 317 spin_unlock_irq(&freezer->lock);
diff --git a/kernel/freezer.c b/kernel/freezer.c
index ba6248b323ef..2f4936cf7083 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -121,16 +121,7 @@ void cancel_freezing(struct task_struct *p)
121 } 121 }
122} 122}
123 123
124/* 124static int __thaw_process(struct task_struct *p)
125 * Wake up a frozen process
126 *
127 * task_lock() is needed to prevent the race with refrigerator() which may
128 * occur if the freezing of tasks fails. Namely, without the lock, if the
129 * freezing of tasks failed, thaw_tasks() might have run before a task in
130 * refrigerator() could call frozen_process(), in which case the task would be
131 * frozen and no one would thaw it.
132 */
133int __thaw_process(struct task_struct *p)
134{ 125{
135 if (frozen(p)) { 126 if (frozen(p)) {
136 p->flags &= ~PF_FROZEN; 127 p->flags &= ~PF_FROZEN;
@@ -140,6 +131,15 @@ int __thaw_process(struct task_struct *p)
140 return 0; 131 return 0;
141} 132}
142 133
134/*
135 * Wake up a frozen process
136 *
137 * task_lock() is needed to prevent the race with refrigerator() which may
138 * occur if the freezing of tasks fails. Namely, without the lock, if the
139 * freezing of tasks failed, thaw_tasks() might have run before a task in
140 * refrigerator() could call frozen_process(), in which case the task would be
141 * frozen and no one would thaw it.
142 */
143int thaw_process(struct task_struct *p) 143int thaw_process(struct task_struct *p)
144{ 144{
145 task_lock(p); 145 task_lock(p);
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index dcd165f92a88..23bd4daeb96b 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -96,7 +96,7 @@ config SUSPEND
96 96
97config PM_TEST_SUSPEND 97config PM_TEST_SUSPEND
98 bool "Test suspend/resume and wakealarm during bootup" 98 bool "Test suspend/resume and wakealarm during bootup"
99 depends on SUSPEND && PM_DEBUG && RTC_LIB=y 99 depends on SUSPEND && PM_DEBUG && RTC_CLASS=y
100 ---help--- 100 ---help---
101 This option will let you suspend your machine during bootup, and 101 This option will let you suspend your machine during bootup, and
102 make it wake up a few seconds later using an RTC wakeup alarm. 102 make it wake up a few seconds later using an RTC wakeup alarm.
diff --git a/kernel/profile.c b/kernel/profile.c
index a9e422df6bf6..9830a037d8db 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -102,7 +102,7 @@ int profile_setup(char *str)
102__setup("profile=", profile_setup); 102__setup("profile=", profile_setup);
103 103
104 104
105int profile_init(void) 105int __ref profile_init(void)
106{ 106{
107 int buffer_bytes; 107 int buffer_bytes;
108 if (!prof_on) 108 if (!prof_on)
diff --git a/kernel/resource.c b/kernel/resource.c
index 7fec0e427234..4337063663ef 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -17,6 +17,7 @@
17#include <linux/proc_fs.h> 17#include <linux/proc_fs.h>
18#include <linux/seq_file.h> 18#include <linux/seq_file.h>
19#include <linux/device.h> 19#include <linux/device.h>
20#include <linux/pfn.h>
20#include <asm/io.h> 21#include <asm/io.h>
21 22
22 23
@@ -522,7 +523,7 @@ static void __init __reserve_region_with_split(struct resource *root,
522{ 523{
523 struct resource *parent = root; 524 struct resource *parent = root;
524 struct resource *conflict; 525 struct resource *conflict;
525 struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL); 526 struct resource *res = kzalloc(sizeof(*res), GFP_ATOMIC);
526 527
527 if (!res) 528 if (!res)
528 return; 529 return;
@@ -849,7 +850,8 @@ int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
849 continue; 850 continue;
850 if (p->end < addr) 851 if (p->end < addr)
851 continue; 852 continue;
852 if (p->start <= addr && (p->end >= addr + size - 1)) 853 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
854 PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1))
853 continue; 855 continue;
854 printk(KERN_WARNING "resource map sanity check conflict: " 856 printk(KERN_WARNING "resource map sanity check conflict: "
855 "0x%llx 0x%llx 0x%llx 0x%llx %s\n", 857 "0x%llx 0x%llx 0x%llx 0x%llx %s\n",
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index ad958c1ec708..5ae17762ec32 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -319,7 +319,7 @@ static int __init init_sched_debug_procfs(void)
319{ 319{
320 struct proc_dir_entry *pe; 320 struct proc_dir_entry *pe;
321 321
322 pe = proc_create("sched_debug", 0644, NULL, &sched_debug_fops); 322 pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
323 if (!pe) 323 if (!pe)
324 return -ENOMEM; 324 return -ENOMEM;
325 return 0; 325 return 0;
diff --git a/kernel/signal.c b/kernel/signal.c
index 105217da5c82..4530fc654455 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1144,7 +1144,8 @@ static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1144 struct task_struct * p; 1144 struct task_struct * p;
1145 1145
1146 for_each_process(p) { 1146 for_each_process(p) {
1147 if (p->pid > 1 && !same_thread_group(p, current)) { 1147 if (task_pid_vnr(p) > 1 &&
1148 !same_thread_group(p, current)) {
1148 int err = group_send_sig_info(sig, info, p); 1149 int err = group_send_sig_info(sig, info, p);
1149 ++count; 1150 ++count;
1150 if (err != -EPERM) 1151 if (err != -EPERM)
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index e0cea282e0c5..b58f43bec363 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -8,7 +8,6 @@ config NOP_TRACER
8 8
9config HAVE_FUNCTION_TRACER 9config HAVE_FUNCTION_TRACER
10 bool 10 bool
11 select NOP_TRACER
12 11
13config HAVE_DYNAMIC_FTRACE 12config HAVE_DYNAMIC_FTRACE
14 bool 13 bool
@@ -28,6 +27,7 @@ config TRACING
28 select RING_BUFFER 27 select RING_BUFFER
29 select STACKTRACE 28 select STACKTRACE
30 select TRACEPOINTS 29 select TRACEPOINTS
30 select NOP_TRACER
31 31
32menu "Tracers" 32menu "Tracers"
33 33
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 7618c528756b..4a39d24568c8 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1339,6 +1339,14 @@ void __init ftrace_init(void)
1339} 1339}
1340 1340
1341#else 1341#else
1342
1343static int __init ftrace_nodyn_init(void)
1344{
1345 ftrace_enabled = 1;
1346 return 0;
1347}
1348device_initcall(ftrace_nodyn_init);
1349
1342# define ftrace_startup() do { } while (0) 1350# define ftrace_startup() do { } while (0)
1343# define ftrace_shutdown() do { } while (0) 1351# define ftrace_shutdown() do { } while (0)
1344# define ftrace_startup_sysctl() do { } while (0) 1352# define ftrace_startup_sysctl() do { } while (0)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index a610ca771558..8a499e2adaec 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -656,7 +656,11 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
656 entry->preempt_count = pc & 0xff; 656 entry->preempt_count = pc & 0xff;
657 entry->pid = (tsk) ? tsk->pid : 0; 657 entry->pid = (tsk) ? tsk->pid : 0;
658 entry->flags = 658 entry->flags =
659#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
659 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | 660 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
661#else
662 TRACE_FLAG_IRQS_NOSUPPORT |
663#endif
660 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | 664 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
661 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | 665 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
662 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); 666 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
@@ -1244,7 +1248,8 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
1244 trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid); 1248 trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid);
1245 trace_seq_printf(s, "%3d", cpu); 1249 trace_seq_printf(s, "%3d", cpu);
1246 trace_seq_printf(s, "%c%c", 1250 trace_seq_printf(s, "%c%c",
1247 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.', 1251 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
1252 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : '.',
1248 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.')); 1253 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
1249 1254
1250 hardirq = entry->flags & TRACE_FLAG_HARDIRQ; 1255 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 6889ca48f1f1..8465ad052707 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -120,18 +120,20 @@ struct trace_boot {
120/* 120/*
121 * trace_flag_type is an enumeration that holds different 121 * trace_flag_type is an enumeration that holds different
122 * states when a trace occurs. These are: 122 * states when a trace occurs. These are:
123 * IRQS_OFF - interrupts were disabled 123 * IRQS_OFF - interrupts were disabled
124 * NEED_RESCED - reschedule is requested 124 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
125 * HARDIRQ - inside an interrupt handler 125 * NEED_RESCED - reschedule is requested
126 * SOFTIRQ - inside a softirq handler 126 * HARDIRQ - inside an interrupt handler
127 * CONT - multiple entries hold the trace item 127 * SOFTIRQ - inside a softirq handler
128 * CONT - multiple entries hold the trace item
128 */ 129 */
129enum trace_flag_type { 130enum trace_flag_type {
130 TRACE_FLAG_IRQS_OFF = 0x01, 131 TRACE_FLAG_IRQS_OFF = 0x01,
131 TRACE_FLAG_NEED_RESCHED = 0x02, 132 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
132 TRACE_FLAG_HARDIRQ = 0x04, 133 TRACE_FLAG_NEED_RESCHED = 0x04,
133 TRACE_FLAG_SOFTIRQ = 0x08, 134 TRACE_FLAG_HARDIRQ = 0x08,
134 TRACE_FLAG_CONT = 0x10, 135 TRACE_FLAG_SOFTIRQ = 0x10,
136 TRACE_FLAG_CONT = 0x20,
135}; 137};
136 138
137#define TRACE_BUF_SIZE 1024 139#define TRACE_BUF_SIZE 1024