aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2006-02-20 02:16:23 -0500
committerJeff Garzik <jgarzik@pobox.com>2006-02-20 02:16:23 -0500
commit5b2ffed906a3ebd4e52a5bbef06b99a517c53e4b (patch)
tree2f900f89d93db6b0822d8bdf4f49851c581c12a6 /kernel
parentf1b318793dcd2d9ff6b5ac06e7762098fa079cee (diff)
parentbd71c2b17468a2531fb4c81ec1d73520845e97e1 (diff)
Merge branch 'master'
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpuset.c35
-rw-r--r--kernel/fork.c9
-rw-r--r--kernel/hrtimer.c13
-rw-r--r--kernel/power/snapshot.c4
-rw-r--r--kernel/power/swsusp.c4
-rw-r--r--kernel/ptrace.c28
-rw-r--r--kernel/sched.c23
-rw-r--r--kernel/sysctl.c2
-rw-r--r--kernel/timer.c39
9 files changed, 115 insertions, 42 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index ba42b0a76961..12815d3f1a05 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1977,6 +1977,39 @@ void cpuset_fork(struct task_struct *child)
1977 * We don't need to task_lock() this reference to tsk->cpuset, 1977 * We don't need to task_lock() this reference to tsk->cpuset,
1978 * because tsk is already marked PF_EXITING, so attach_task() won't 1978 * because tsk is already marked PF_EXITING, so attach_task() won't
1979 * mess with it, or task is a failed fork, never visible to attach_task. 1979 * mess with it, or task is a failed fork, never visible to attach_task.
1980 *
1981 * Hack:
1982 *
1983 * Set the exiting tasks cpuset to the root cpuset (top_cpuset).
1984 *
1985 * Don't leave a task unable to allocate memory, as that is an
1986 * accident waiting to happen should someone add a callout in
1987 * do_exit() after the cpuset_exit() call that might allocate.
1988 * If a task tries to allocate memory with an invalid cpuset,
1989 * it will oops in cpuset_update_task_memory_state().
1990 *
1991 * We call cpuset_exit() while the task is still competent to
1992 * handle notify_on_release(), then leave the task attached to
1993 * the root cpuset (top_cpuset) for the remainder of its exit.
1994 *
1995 * To do this properly, we would increment the reference count on
1996 * top_cpuset, and near the very end of the kernel/exit.c do_exit()
1997 * code we would add a second cpuset function call, to drop that
1998 * reference. This would just create an unnecessary hot spot on
1999 * the top_cpuset reference count, to no avail.
2000 *
2001 * Normally, holding a reference to a cpuset without bumping its
2002 * count is unsafe. The cpuset could go away, or someone could
2003 * attach us to a different cpuset, decrementing the count on
2004 * the first cpuset that we never incremented. But in this case,
2005 * top_cpuset isn't going away, and either task has PF_EXITING set,
2006 * which wards off any attach_task() attempts, or task is a failed
2007 * fork, never visible to attach_task.
2008 *
2009 * Another way to do this would be to set the cpuset pointer
2010 * to NULL here, and check in cpuset_update_task_memory_state()
2011 * for a NULL pointer. This hack avoids that NULL check, for no
2012 * cost (other than this way too long comment ;).
1980 **/ 2013 **/
1981 2014
1982void cpuset_exit(struct task_struct *tsk) 2015void cpuset_exit(struct task_struct *tsk)
@@ -1984,7 +2017,7 @@ void cpuset_exit(struct task_struct *tsk)
1984 struct cpuset *cs; 2017 struct cpuset *cs;
1985 2018
1986 cs = tsk->cpuset; 2019 cs = tsk->cpuset;
1987 tsk->cpuset = NULL; 2020 tsk->cpuset = &top_cpuset; /* Hack - see comment above */
1988 2021
1989 if (notify_on_release(cs)) { 2022 if (notify_on_release(cs)) {
1990 char *pathbuf = NULL; 2023 char *pathbuf = NULL;
diff --git a/kernel/fork.c b/kernel/fork.c
index 8e88b374cee9..fbea12d7a943 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1123,8 +1123,8 @@ static task_t *copy_process(unsigned long clone_flags,
1123 p->real_parent = current; 1123 p->real_parent = current;
1124 p->parent = p->real_parent; 1124 p->parent = p->real_parent;
1125 1125
1126 spin_lock(&current->sighand->siglock);
1126 if (clone_flags & CLONE_THREAD) { 1127 if (clone_flags & CLONE_THREAD) {
1127 spin_lock(&current->sighand->siglock);
1128 /* 1128 /*
1129 * Important: if an exit-all has been started then 1129 * Important: if an exit-all has been started then
1130 * do not create this new thread - the whole thread 1130 * do not create this new thread - the whole thread
@@ -1162,8 +1162,6 @@ static task_t *copy_process(unsigned long clone_flags,
1162 */ 1162 */
1163 p->it_prof_expires = jiffies_to_cputime(1); 1163 p->it_prof_expires = jiffies_to_cputime(1);
1164 } 1164 }
1165
1166 spin_unlock(&current->sighand->siglock);
1167 } 1165 }
1168 1166
1169 /* 1167 /*
@@ -1175,8 +1173,6 @@ static task_t *copy_process(unsigned long clone_flags,
1175 if (unlikely(p->ptrace & PT_PTRACED)) 1173 if (unlikely(p->ptrace & PT_PTRACED))
1176 __ptrace_link(p, current->parent); 1174 __ptrace_link(p, current->parent);
1177 1175
1178 attach_pid(p, PIDTYPE_PID, p->pid);
1179 attach_pid(p, PIDTYPE_TGID, p->tgid);
1180 if (thread_group_leader(p)) { 1176 if (thread_group_leader(p)) {
1181 p->signal->tty = current->signal->tty; 1177 p->signal->tty = current->signal->tty;
1182 p->signal->pgrp = process_group(current); 1178 p->signal->pgrp = process_group(current);
@@ -1186,9 +1182,12 @@ static task_t *copy_process(unsigned long clone_flags,
1186 if (p->pid) 1182 if (p->pid)
1187 __get_cpu_var(process_counts)++; 1183 __get_cpu_var(process_counts)++;
1188 } 1184 }
1185 attach_pid(p, PIDTYPE_TGID, p->tgid);
1186 attach_pid(p, PIDTYPE_PID, p->pid);
1189 1187
1190 nr_threads++; 1188 nr_threads++;
1191 total_forks++; 1189 total_forks++;
1190 spin_unlock(&current->sighand->siglock);
1192 write_unlock_irq(&tasklist_lock); 1191 write_unlock_irq(&tasklist_lock);
1193 proc_fork_connector(p); 1192 proc_fork_connector(p);
1194 return p; 1193 return p;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 2b6e1757aedd..5ae51f1bc7c8 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -418,8 +418,19 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
418 /* Switch the timer base, if necessary: */ 418 /* Switch the timer base, if necessary: */
419 new_base = switch_hrtimer_base(timer, base); 419 new_base = switch_hrtimer_base(timer, base);
420 420
421 if (mode == HRTIMER_REL) 421 if (mode == HRTIMER_REL) {
422 tim = ktime_add(tim, new_base->get_time()); 422 tim = ktime_add(tim, new_base->get_time());
423 /*
424 * CONFIG_TIME_LOW_RES is a temporary way for architectures
425 * to signal that they simply return xtime in
426 * do_gettimeoffset(). In this case we want to round up by
427 * resolution when starting a relative timer, to avoid short
428 * timeouts. This will go away with the GTOD framework.
429 */
430#ifdef CONFIG_TIME_LOW_RES
431 tim = ktime_add(tim, base->resolution);
432#endif
433 }
423 timer->expires = tim; 434 timer->expires = tim;
424 435
425 enqueue_hrtimer(timer, new_base); 436 enqueue_hrtimer(timer, new_base);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 41f66365f0d8..8d5a5986d621 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -91,10 +91,8 @@ static int save_highmem_zone(struct zone *zone)
91 * corrected eventually when the cases giving rise to this 91 * corrected eventually when the cases giving rise to this
92 * are better understood. 92 * are better understood.
93 */ 93 */
94 if (PageReserved(page)) { 94 if (PageReserved(page))
95 printk("highmem reserved page?!\n");
96 continue; 95 continue;
97 }
98 BUG_ON(PageNosave(page)); 96 BUG_ON(PageNosave(page));
99 if (PageNosaveFree(page)) 97 if (PageNosaveFree(page))
100 continue; 98 continue;
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 4e90905f0e87..2d9d08f72f76 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -153,13 +153,11 @@ static int swsusp_swap_check(void) /* This is called before saving image */
153{ 153{
154 int i; 154 int i;
155 155
156 if (!swsusp_resume_device)
157 return -ENODEV;
158 spin_lock(&swap_lock); 156 spin_lock(&swap_lock);
159 for (i = 0; i < MAX_SWAPFILES; i++) { 157 for (i = 0; i < MAX_SWAPFILES; i++) {
160 if (!(swap_info[i].flags & SWP_WRITEOK)) 158 if (!(swap_info[i].flags & SWP_WRITEOK))
161 continue; 159 continue;
162 if (is_resume_device(swap_info + i)) { 160 if (!swsusp_resume_device || is_resume_device(swap_info + i)) {
163 spin_unlock(&swap_lock); 161 spin_unlock(&swap_lock);
164 root_swap = i; 162 root_swap = i;
165 return 0; 163 return 0;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 5f33cdb6fff5..d95a72c9279d 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -72,8 +72,8 @@ void ptrace_untrace(task_t *child)
72 */ 72 */
73void __ptrace_unlink(task_t *child) 73void __ptrace_unlink(task_t *child)
74{ 74{
75 if (!child->ptrace) 75 BUG_ON(!child->ptrace);
76 BUG(); 76
77 child->ptrace = 0; 77 child->ptrace = 0;
78 if (!list_empty(&child->ptrace_list)) { 78 if (!list_empty(&child->ptrace_list)) {
79 list_del_init(&child->ptrace_list); 79 list_del_init(&child->ptrace_list);
@@ -184,22 +184,27 @@ bad:
184 return retval; 184 return retval;
185} 185}
186 186
187void __ptrace_detach(struct task_struct *child, unsigned int data)
188{
189 child->exit_code = data;
190 /* .. re-parent .. */
191 __ptrace_unlink(child);
192 /* .. and wake it up. */
193 if (child->exit_state != EXIT_ZOMBIE)
194 wake_up_process(child);
195}
196
187int ptrace_detach(struct task_struct *child, unsigned int data) 197int ptrace_detach(struct task_struct *child, unsigned int data)
188{ 198{
189 if (!valid_signal(data)) 199 if (!valid_signal(data))
190 return -EIO; 200 return -EIO;
191 201
192 /* Architecture-specific hardware disable .. */ 202 /* Architecture-specific hardware disable .. */
193 ptrace_disable(child); 203 ptrace_disable(child);
194 204
195 /* .. re-parent .. */
196 child->exit_code = data;
197
198 write_lock_irq(&tasklist_lock); 205 write_lock_irq(&tasklist_lock);
199 __ptrace_unlink(child); 206 if (child->ptrace)
200 /* .. and wake it up. */ 207 __ptrace_detach(child, data);
201 if (child->exit_state != EXIT_ZOMBIE)
202 wake_up_process(child);
203 write_unlock_irq(&tasklist_lock); 208 write_unlock_irq(&tasklist_lock);
204 209
205 return 0; 210 return 0;
@@ -242,8 +247,7 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
242 if (write) { 247 if (write) {
243 copy_to_user_page(vma, page, addr, 248 copy_to_user_page(vma, page, addr,
244 maddr + offset, buf, bytes); 249 maddr + offset, buf, bytes);
245 if (!PageCompound(page)) 250 set_page_dirty_lock(page);
246 set_page_dirty_lock(page);
247 } else { 251 } else {
248 copy_from_user_page(vma, page, addr, 252 copy_from_user_page(vma, page, addr,
249 buf, maddr + offset, bytes); 253 buf, maddr + offset, bytes);
diff --git a/kernel/sched.c b/kernel/sched.c
index 87d93be336a1..12d291bf3379 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1204,9 +1204,6 @@ static int try_to_wake_up(task_t *p, unsigned int state, int sync)
1204 } 1204 }
1205 } 1205 }
1206 1206
1207 if (p->last_waker_cpu != this_cpu)
1208 goto out_set_cpu;
1209
1210 if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed))) 1207 if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
1211 goto out_set_cpu; 1208 goto out_set_cpu;
1212 1209
@@ -1277,8 +1274,6 @@ out_set_cpu:
1277 cpu = task_cpu(p); 1274 cpu = task_cpu(p);
1278 } 1275 }
1279 1276
1280 p->last_waker_cpu = this_cpu;
1281
1282out_activate: 1277out_activate:
1283#endif /* CONFIG_SMP */ 1278#endif /* CONFIG_SMP */
1284 if (old_state == TASK_UNINTERRUPTIBLE) { 1279 if (old_state == TASK_UNINTERRUPTIBLE) {
@@ -1360,12 +1355,9 @@ void fastcall sched_fork(task_t *p, int clone_flags)
1360#ifdef CONFIG_SCHEDSTATS 1355#ifdef CONFIG_SCHEDSTATS
1361 memset(&p->sched_info, 0, sizeof(p->sched_info)); 1356 memset(&p->sched_info, 0, sizeof(p->sched_info));
1362#endif 1357#endif
1363#if defined(CONFIG_SMP) 1358#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
1364 p->last_waker_cpu = cpu;
1365#if defined(__ARCH_WANT_UNLOCKED_CTXSW)
1366 p->oncpu = 0; 1359 p->oncpu = 0;
1367#endif 1360#endif
1368#endif
1369#ifdef CONFIG_PREEMPT 1361#ifdef CONFIG_PREEMPT
1370 /* Want to start with kernel preemption disabled. */ 1362 /* Want to start with kernel preemption disabled. */
1371 task_thread_info(p)->preempt_count = 1; 1363 task_thread_info(p)->preempt_count = 1;
@@ -5066,7 +5058,18 @@ static void init_sched_build_groups(struct sched_group groups[], cpumask_t span,
5066#define MAX_DOMAIN_DISTANCE 32 5058#define MAX_DOMAIN_DISTANCE 32
5067 5059
5068static unsigned long long migration_cost[MAX_DOMAIN_DISTANCE] = 5060static unsigned long long migration_cost[MAX_DOMAIN_DISTANCE] =
5069 { [ 0 ... MAX_DOMAIN_DISTANCE-1 ] = -1LL }; 5061 { [ 0 ... MAX_DOMAIN_DISTANCE-1 ] =
5062/*
5063 * Architectures may override the migration cost and thus avoid
5064 * boot-time calibration. Unit is nanoseconds. Mostly useful for
5065 * virtualized hardware:
5066 */
5067#ifdef CONFIG_DEFAULT_MIGRATION_COST
5068 CONFIG_DEFAULT_MIGRATION_COST
5069#else
5070 -1LL
5071#endif
5072};
5070 5073
5071/* 5074/*
5072 * Allow override of migration cost - in units of microseconds. 5075 * Allow override of migration cost - in units of microseconds.
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 71dd6f62efec..7654d55c47f5 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -126,8 +126,6 @@ extern int sysctl_hz_timer;
126extern int acct_parm[]; 126extern int acct_parm[];
127#endif 127#endif
128 128
129int randomize_va_space = 1;
130
131static int parse_table(int __user *, int, void __user *, size_t __user *, void __user *, size_t, 129static int parse_table(int __user *, int, void __user *, size_t __user *, void __user *, size_t,
132 ctl_table *, void **); 130 ctl_table *, void **);
133static int proc_doutsstring(ctl_table *table, int write, struct file *filp, 131static int proc_doutsstring(ctl_table *table, int write, struct file *filp,
diff --git a/kernel/timer.c b/kernel/timer.c
index b9dad3994676..fe3a9a9f8328 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -717,12 +717,16 @@ static void second_overflow(void)
717#endif 717#endif
718} 718}
719 719
720/* in the NTP reference this is called "hardclock()" */ 720/*
721static void update_wall_time_one_tick(void) 721 * Returns how many microseconds we need to add to xtime this tick
722 * in doing an adjustment requested with adjtime.
723 */
724static long adjtime_adjustment(void)
722{ 725{
723 long time_adjust_step, delta_nsec; 726 long time_adjust_step;
724 727
725 if ((time_adjust_step = time_adjust) != 0 ) { 728 time_adjust_step = time_adjust;
729 if (time_adjust_step) {
726 /* 730 /*
727 * We are doing an adjtime thing. Prepare time_adjust_step to 731 * We are doing an adjtime thing. Prepare time_adjust_step to
728 * be within bounds. Note that a positive time_adjust means we 732 * be within bounds. Note that a positive time_adjust means we
@@ -733,10 +737,19 @@ static void update_wall_time_one_tick(void)
733 */ 737 */
734 time_adjust_step = min(time_adjust_step, (long)tickadj); 738 time_adjust_step = min(time_adjust_step, (long)tickadj);
735 time_adjust_step = max(time_adjust_step, (long)-tickadj); 739 time_adjust_step = max(time_adjust_step, (long)-tickadj);
740 }
741 return time_adjust_step;
742}
736 743
744/* in the NTP reference this is called "hardclock()" */
745static void update_wall_time_one_tick(void)
746{
747 long time_adjust_step, delta_nsec;
748
749 time_adjust_step = adjtime_adjustment();
750 if (time_adjust_step)
737 /* Reduce by this step the amount of time left */ 751 /* Reduce by this step the amount of time left */
738 time_adjust -= time_adjust_step; 752 time_adjust -= time_adjust_step;
739 }
740 delta_nsec = tick_nsec + time_adjust_step * 1000; 753 delta_nsec = tick_nsec + time_adjust_step * 1000;
741 /* 754 /*
742 * Advance the phase, once it gets to one microsecond, then 755 * Advance the phase, once it gets to one microsecond, then
@@ -759,6 +772,22 @@ static void update_wall_time_one_tick(void)
759} 772}
760 773
761/* 774/*
775 * Return how long ticks are at the moment, that is, how much time
776 * update_wall_time_one_tick will add to xtime next time we call it
777 * (assuming no calls to do_adjtimex in the meantime).
778 * The return value is in fixed-point nanoseconds with SHIFT_SCALE-10
779 * bits to the right of the binary point.
780 * This function has no side-effects.
781 */
782u64 current_tick_length(void)
783{
784 long delta_nsec;
785
786 delta_nsec = tick_nsec + adjtime_adjustment() * 1000;
787 return ((u64) delta_nsec << (SHIFT_SCALE - 10)) + time_adj;
788}
789
790/*
762 * Using a loop looks inefficient, but "ticks" is 791 * Using a loop looks inefficient, but "ticks" is
763 * usually just one (we shouldn't be losing ticks, 792 * usually just one (we shouldn't be losing ticks,
764 * we're doing this this way mainly for interrupt 793 * we're doing this this way mainly for interrupt