aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c6
-rw-r--r--include/asm-x86_64/pci.h6
-rw-r--r--kernel/exit.c8
-rw-r--r--kernel/posix-cpu-timers.c47
4 files changed, 39 insertions, 28 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index e1df376e709e..2ed5c4363b53 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -315,9 +315,9 @@ static void dbs_check_cpu(int cpu)
315 policy = this_dbs_info->cur_policy; 315 policy = this_dbs_info->cur_policy;
316 316
317 if ( init_flag == 0 ) { 317 if ( init_flag == 0 ) {
318 for ( /* NULL */; init_flag < NR_CPUS; init_flag++ ) { 318 for_each_online_cpu(j) {
319 dbs_info = &per_cpu(cpu_dbs_info, init_flag); 319 dbs_info = &per_cpu(cpu_dbs_info, j);
320 requested_freq[cpu] = dbs_info->cur_policy->cur; 320 requested_freq[j] = dbs_info->cur_policy->cur;
321 } 321 }
322 init_flag = 1; 322 init_flag = 1;
323 } 323 }
diff --git a/include/asm-x86_64/pci.h b/include/asm-x86_64/pci.h
index 5a82a6762c21..eeb3088a1c9e 100644
--- a/include/asm-x86_64/pci.h
+++ b/include/asm-x86_64/pci.h
@@ -50,10 +50,10 @@ extern int iommu_setup(char *opt);
50 * address space. The networking and block device layers use 50 * address space. The networking and block device layers use
51 * this boolean for bounce buffer decisions 51 * this boolean for bounce buffer decisions
52 * 52 *
53 * On x86-64 it mostly equals, but we set it to zero to tell some subsystems 53 * On AMD64 it mostly equals, but we set it to zero to tell some subsystems
54 * that an hard or soft IOMMU is available. 54 * that an IOMMU is available.
55 */ 55 */
56#define PCI_DMA_BUS_IS_PHYS 0 56#define PCI_DMA_BUS_IS_PHYS (no_iommu ? 1 : 0)
57 57
58/* 58/*
59 * x86-64 always supports DAC, but sometimes it is useful to force 59 * x86-64 always supports DAC, but sometimes it is useful to force
diff --git a/kernel/exit.c b/kernel/exit.c
index 4897977a1f4b..3b25b182d2be 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -825,6 +825,14 @@ fastcall NORET_TYPE void do_exit(long code)
825 825
826 tsk->flags |= PF_EXITING; 826 tsk->flags |= PF_EXITING;
827 827
828 /*
829 * Make sure we don't try to process any timer firings
830 * while we are already exiting.
831 */
832 tsk->it_virt_expires = cputime_zero;
833 tsk->it_prof_expires = cputime_zero;
834 tsk->it_sched_expires = 0;
835
828 if (unlikely(in_atomic())) 836 if (unlikely(in_atomic()))
829 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", 837 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
830 current->comm, current->pid, 838 current->comm, current->pid,
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 383ba22f0b62..bf374fceb39c 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -497,7 +497,7 @@ static void process_timer_rebalance(struct task_struct *p,
497 left = cputime_div(cputime_sub(expires.cpu, val.cpu), 497 left = cputime_div(cputime_sub(expires.cpu, val.cpu),
498 nthreads); 498 nthreads);
499 do { 499 do {
500 if (!unlikely(t->exit_state)) { 500 if (!unlikely(t->flags & PF_EXITING)) {
501 ticks = cputime_add(prof_ticks(t), left); 501 ticks = cputime_add(prof_ticks(t), left);
502 if (cputime_eq(t->it_prof_expires, 502 if (cputime_eq(t->it_prof_expires,
503 cputime_zero) || 503 cputime_zero) ||
@@ -512,7 +512,7 @@ static void process_timer_rebalance(struct task_struct *p,
512 left = cputime_div(cputime_sub(expires.cpu, val.cpu), 512 left = cputime_div(cputime_sub(expires.cpu, val.cpu),
513 nthreads); 513 nthreads);
514 do { 514 do {
515 if (!unlikely(t->exit_state)) { 515 if (!unlikely(t->flags & PF_EXITING)) {
516 ticks = cputime_add(virt_ticks(t), left); 516 ticks = cputime_add(virt_ticks(t), left);
517 if (cputime_eq(t->it_virt_expires, 517 if (cputime_eq(t->it_virt_expires,
518 cputime_zero) || 518 cputime_zero) ||
@@ -527,7 +527,7 @@ static void process_timer_rebalance(struct task_struct *p,
527 nsleft = expires.sched - val.sched; 527 nsleft = expires.sched - val.sched;
528 do_div(nsleft, nthreads); 528 do_div(nsleft, nthreads);
529 do { 529 do {
530 if (!unlikely(t->exit_state)) { 530 if (!unlikely(t->flags & PF_EXITING)) {
531 ns = t->sched_time + nsleft; 531 ns = t->sched_time + nsleft;
532 if (t->it_sched_expires == 0 || 532 if (t->it_sched_expires == 0 ||
533 t->it_sched_expires > ns) { 533 t->it_sched_expires > ns) {
@@ -566,6 +566,9 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
566 struct cpu_timer_list *next; 566 struct cpu_timer_list *next;
567 unsigned long i; 567 unsigned long i;
568 568
569 if (CPUCLOCK_PERTHREAD(timer->it_clock) && (p->flags & PF_EXITING))
570 return;
571
569 head = (CPUCLOCK_PERTHREAD(timer->it_clock) ? 572 head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
570 p->cpu_timers : p->signal->cpu_timers); 573 p->cpu_timers : p->signal->cpu_timers);
571 head += CPUCLOCK_WHICH(timer->it_clock); 574 head += CPUCLOCK_WHICH(timer->it_clock);
@@ -1204,7 +1207,7 @@ static void check_process_timers(struct task_struct *tsk,
1204 1207
1205 do { 1208 do {
1206 t = next_thread(t); 1209 t = next_thread(t);
1207 } while (unlikely(t->exit_state)); 1210 } while (unlikely(t->flags & PF_EXITING));
1208 } while (t != tsk); 1211 } while (t != tsk);
1209 } 1212 }
1210} 1213}
@@ -1293,30 +1296,30 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1293 1296
1294#undef UNEXPIRED 1297#undef UNEXPIRED
1295 1298
1299 BUG_ON(tsk->exit_state);
1300
1296 /* 1301 /*
1297 * Double-check with locks held. 1302 * Double-check with locks held.
1298 */ 1303 */
1299 read_lock(&tasklist_lock); 1304 read_lock(&tasklist_lock);
1300 if (likely(tsk->signal != NULL)) { 1305 spin_lock(&tsk->sighand->siglock);
1301 spin_lock(&tsk->sighand->siglock);
1302 1306
1303 /* 1307 /*
1304 * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N] 1308 * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
1305 * all the timers that are firing, and put them on the firing list. 1309 * all the timers that are firing, and put them on the firing list.
1306 */ 1310 */
1307 check_thread_timers(tsk, &firing); 1311 check_thread_timers(tsk, &firing);
1308 check_process_timers(tsk, &firing); 1312 check_process_timers(tsk, &firing);
1309 1313
1310 /* 1314 /*
1311 * We must release these locks before taking any timer's lock. 1315 * We must release these locks before taking any timer's lock.
1312 * There is a potential race with timer deletion here, as the 1316 * There is a potential race with timer deletion here, as the
1313 * siglock now protects our private firing list. We have set 1317 * siglock now protects our private firing list. We have set
1314 * the firing flag in each timer, so that a deletion attempt 1318 * the firing flag in each timer, so that a deletion attempt
1315 * that gets the timer lock before we do will give it up and 1319 * that gets the timer lock before we do will give it up and
1316 * spin until we've taken care of that timer below. 1320 * spin until we've taken care of that timer below.
1317 */ 1321 */
1318 spin_unlock(&tsk->sighand->siglock); 1322 spin_unlock(&tsk->sighand->siglock);
1319 }
1320 read_unlock(&tasklist_lock); 1323 read_unlock(&tasklist_lock);
1321 1324
1322 /* 1325 /*