aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorJames Bottomley <jejb@mulgrave.il.steeleye.com>2006-03-14 15:18:01 -0500
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2006-03-14 15:18:01 -0500
commitf33b5d783b4f56be5ace6a1c98fb5f76b2d2d07d (patch)
treeb027b5f3429d416b3da5b9195024007dab062a5e /kernel
parente935d5da8e5d12fabe5b632736c50eae0427e8c8 (diff)
parent67963132638e67ad3c5aa16765e6f3f2f3cdd85c (diff)
Merge ../linux-2.6
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c10
-rw-r--r--kernel/hrtimer.c35
-rw-r--r--kernel/rcupdate.c76
-rw-r--r--kernel/sched.c10
-rw-r--r--kernel/sysctl.c19
-rw-r--r--kernel/timer.c22
6 files changed, 143 insertions, 29 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index fbea12d7a943..ccdfbb16c86d 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -108,8 +108,10 @@ void free_task(struct task_struct *tsk)
108} 108}
109EXPORT_SYMBOL(free_task); 109EXPORT_SYMBOL(free_task);
110 110
111void __put_task_struct(struct task_struct *tsk) 111void __put_task_struct_cb(struct rcu_head *rhp)
112{ 112{
113 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
114
113 WARN_ON(!(tsk->exit_state & (EXIT_DEAD | EXIT_ZOMBIE))); 115 WARN_ON(!(tsk->exit_state & (EXIT_DEAD | EXIT_ZOMBIE)));
114 WARN_ON(atomic_read(&tsk->usage)); 116 WARN_ON(atomic_read(&tsk->usage));
115 WARN_ON(tsk == current); 117 WARN_ON(tsk == current);
@@ -1060,6 +1062,12 @@ static task_t *copy_process(unsigned long clone_flags,
1060 p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL; 1062 p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
1061 1063
1062 /* 1064 /*
1065 * sigaltstack should be cleared when sharing the same VM
1066 */
1067 if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
1068 p->sas_ss_sp = p->sas_ss_size = 0;
1069
1070 /*
1063 * Syscall tracing should be turned off in the child regardless 1071 * Syscall tracing should be turned off in the child regardless
1064 * of CLONE_PTRACE. 1072 * of CLONE_PTRACE.
1065 */ 1073 */
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 5ae51f1bc7c8..14bc9cfa6399 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -505,6 +505,41 @@ ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
505 return rem; 505 return rem;
506} 506}
507 507
508#ifdef CONFIG_NO_IDLE_HZ
509/**
510 * hrtimer_get_next_event - get the time until next expiry event
511 *
512 * Returns the delta to the next expiry event or KTIME_MAX if no timer
513 * is pending.
514 */
515ktime_t hrtimer_get_next_event(void)
516{
517 struct hrtimer_base *base = __get_cpu_var(hrtimer_bases);
518 ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
519 unsigned long flags;
520 int i;
521
522 for (i = 0; i < MAX_HRTIMER_BASES; i++, base++) {
523 struct hrtimer *timer;
524
525 spin_lock_irqsave(&base->lock, flags);
526 if (!base->first) {
527 spin_unlock_irqrestore(&base->lock, flags);
528 continue;
529 }
530 timer = rb_entry(base->first, struct hrtimer, node);
531 delta.tv64 = timer->expires.tv64;
532 spin_unlock_irqrestore(&base->lock, flags);
533 delta = ktime_sub(delta, base->get_time());
534 if (delta.tv64 < mindelta.tv64)
535 mindelta.tv64 = delta.tv64;
536 }
537 if (mindelta.tv64 < 0)
538 mindelta.tv64 = 0;
539 return mindelta;
540}
541#endif
542
508/** 543/**
509 * hrtimer_init - initialize a timer to the given clock 544 * hrtimer_init - initialize a timer to the given clock
510 * 545 *
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 0cf8146bd585..8cf15a569fcd 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -67,7 +67,43 @@ DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };
67 67
68/* Fake initialization required by compiler */ 68/* Fake initialization required by compiler */
69static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL}; 69static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
70static int maxbatch = 10000; 70static int blimit = 10;
71static int qhimark = 10000;
72static int qlowmark = 100;
73#ifdef CONFIG_SMP
74static int rsinterval = 1000;
75#endif
76
77static atomic_t rcu_barrier_cpu_count;
78static struct semaphore rcu_barrier_sema;
79static struct completion rcu_barrier_completion;
80
81#ifdef CONFIG_SMP
82static void force_quiescent_state(struct rcu_data *rdp,
83 struct rcu_ctrlblk *rcp)
84{
85 int cpu;
86 cpumask_t cpumask;
87 set_need_resched();
88 if (unlikely(rdp->qlen - rdp->last_rs_qlen > rsinterval)) {
89 rdp->last_rs_qlen = rdp->qlen;
90 /*
91 * Don't send IPI to itself. With irqs disabled,
92 * rdp->cpu is the current cpu.
93 */
94 cpumask = rcp->cpumask;
95 cpu_clear(rdp->cpu, cpumask);
96 for_each_cpu_mask(cpu, cpumask)
97 smp_send_reschedule(cpu);
98 }
99}
100#else
101static inline void force_quiescent_state(struct rcu_data *rdp,
102 struct rcu_ctrlblk *rcp)
103{
104 set_need_resched();
105}
106#endif
71 107
72/** 108/**
73 * call_rcu - Queue an RCU callback for invocation after a grace period. 109 * call_rcu - Queue an RCU callback for invocation after a grace period.
@@ -92,17 +128,13 @@ void fastcall call_rcu(struct rcu_head *head,
92 rdp = &__get_cpu_var(rcu_data); 128 rdp = &__get_cpu_var(rcu_data);
93 *rdp->nxttail = head; 129 *rdp->nxttail = head;
94 rdp->nxttail = &head->next; 130 rdp->nxttail = &head->next;
95 131 if (unlikely(++rdp->qlen > qhimark)) {
96 if (unlikely(++rdp->count > 10000)) 132 rdp->blimit = INT_MAX;
97 set_need_resched(); 133 force_quiescent_state(rdp, &rcu_ctrlblk);
98 134 }
99 local_irq_restore(flags); 135 local_irq_restore(flags);
100} 136}
101 137
102static atomic_t rcu_barrier_cpu_count;
103static struct semaphore rcu_barrier_sema;
104static struct completion rcu_barrier_completion;
105
106/** 138/**
107 * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. 139 * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
108 * @head: structure to be used for queueing the RCU updates. 140 * @head: structure to be used for queueing the RCU updates.
@@ -131,12 +163,12 @@ void fastcall call_rcu_bh(struct rcu_head *head,
131 rdp = &__get_cpu_var(rcu_bh_data); 163 rdp = &__get_cpu_var(rcu_bh_data);
132 *rdp->nxttail = head; 164 *rdp->nxttail = head;
133 rdp->nxttail = &head->next; 165 rdp->nxttail = &head->next;
134 rdp->count++; 166
135/* 167 if (unlikely(++rdp->qlen > qhimark)) {
136 * Should we directly call rcu_do_batch() here ? 168 rdp->blimit = INT_MAX;
137 * if (unlikely(rdp->count > 10000)) 169 force_quiescent_state(rdp, &rcu_bh_ctrlblk);
138 * rcu_do_batch(rdp); 170 }
139 */ 171
140 local_irq_restore(flags); 172 local_irq_restore(flags);
141} 173}
142 174
@@ -199,10 +231,12 @@ static void rcu_do_batch(struct rcu_data *rdp)
199 next = rdp->donelist = list->next; 231 next = rdp->donelist = list->next;
200 list->func(list); 232 list->func(list);
201 list = next; 233 list = next;
202 rdp->count--; 234 rdp->qlen--;
203 if (++count >= maxbatch) 235 if (++count >= rdp->blimit)
204 break; 236 break;
205 } 237 }
238 if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark)
239 rdp->blimit = blimit;
206 if (!rdp->donelist) 240 if (!rdp->donelist)
207 rdp->donetail = &rdp->donelist; 241 rdp->donetail = &rdp->donelist;
208 else 242 else
@@ -473,6 +507,7 @@ static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
473 rdp->quiescbatch = rcp->completed; 507 rdp->quiescbatch = rcp->completed;
474 rdp->qs_pending = 0; 508 rdp->qs_pending = 0;
475 rdp->cpu = cpu; 509 rdp->cpu = cpu;
510 rdp->blimit = blimit;
476} 511}
477 512
478static void __devinit rcu_online_cpu(int cpu) 513static void __devinit rcu_online_cpu(int cpu)
@@ -567,7 +602,12 @@ void synchronize_kernel(void)
567 synchronize_rcu(); 602 synchronize_rcu();
568} 603}
569 604
570module_param(maxbatch, int, 0); 605module_param(blimit, int, 0);
606module_param(qhimark, int, 0);
607module_param(qlowmark, int, 0);
608#ifdef CONFIG_SMP
609module_param(rsinterval, int, 0);
610#endif
571EXPORT_SYMBOL_GPL(rcu_batches_completed); 611EXPORT_SYMBOL_GPL(rcu_batches_completed);
572EXPORT_SYMBOL(call_rcu); /* WARNING: GPL-only in April 2006. */ 612EXPORT_SYMBOL(call_rcu); /* WARNING: GPL-only in April 2006. */
573EXPORT_SYMBOL(call_rcu_bh); /* WARNING: GPL-only in April 2006. */ 613EXPORT_SYMBOL(call_rcu_bh); /* WARNING: GPL-only in April 2006. */
diff --git a/kernel/sched.c b/kernel/sched.c
index 12d291bf3379..4d46e90f59c3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -178,13 +178,6 @@ static unsigned int task_timeslice(task_t *p)
178#define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran) \ 178#define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran) \
179 < (long long) (sd)->cache_hot_time) 179 < (long long) (sd)->cache_hot_time)
180 180
181void __put_task_struct_cb(struct rcu_head *rhp)
182{
183 __put_task_struct(container_of(rhp, struct task_struct, rcu));
184}
185
186EXPORT_SYMBOL_GPL(__put_task_struct_cb);
187
188/* 181/*
189 * These are the runqueue data structures: 182 * These are the runqueue data structures:
190 */ 183 */
@@ -4028,6 +4021,8 @@ static inline void __cond_resched(void)
4028 */ 4021 */
4029 if (unlikely(preempt_count())) 4022 if (unlikely(preempt_count()))
4030 return; 4023 return;
4024 if (unlikely(system_state != SYSTEM_RUNNING))
4025 return;
4031 do { 4026 do {
4032 add_preempt_count(PREEMPT_ACTIVE); 4027 add_preempt_count(PREEMPT_ACTIVE);
4033 schedule(); 4028 schedule();
@@ -4333,6 +4328,7 @@ void __devinit init_idle(task_t *idle, int cpu)
4333 runqueue_t *rq = cpu_rq(cpu); 4328 runqueue_t *rq = cpu_rq(cpu);
4334 unsigned long flags; 4329 unsigned long flags;
4335 4330
4331 idle->timestamp = sched_clock();
4336 idle->sleep_avg = 0; 4332 idle->sleep_avg = 0;
4337 idle->array = NULL; 4333 idle->array = NULL;
4338 idle->prio = MAX_PRIO; 4334 idle->prio = MAX_PRIO;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index c05a2b7125e1..32b48e8ee36e 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -50,6 +50,9 @@
50#include <asm/uaccess.h> 50#include <asm/uaccess.h>
51#include <asm/processor.h> 51#include <asm/processor.h>
52 52
53extern int proc_nr_files(ctl_table *table, int write, struct file *filp,
54 void __user *buffer, size_t *lenp, loff_t *ppos);
55
53#if defined(CONFIG_SYSCTL) 56#if defined(CONFIG_SYSCTL)
54 57
55/* External variables not in a header file. */ 58/* External variables not in a header file. */
@@ -124,6 +127,10 @@ extern int sysctl_hz_timer;
124extern int acct_parm[]; 127extern int acct_parm[];
125#endif 128#endif
126 129
130#ifdef CONFIG_IA64
131extern int no_unaligned_warning;
132#endif
133
127static int parse_table(int __user *, int, void __user *, size_t __user *, void __user *, size_t, 134static int parse_table(int __user *, int, void __user *, size_t __user *, void __user *, size_t,
128 ctl_table *, void **); 135 ctl_table *, void **);
129static int proc_doutsstring(ctl_table *table, int write, struct file *filp, 136static int proc_doutsstring(ctl_table *table, int write, struct file *filp,
@@ -663,6 +670,16 @@ static ctl_table kern_table[] = {
663 .data = &acpi_video_flags, 670 .data = &acpi_video_flags,
664 .maxlen = sizeof (unsigned long), 671 .maxlen = sizeof (unsigned long),
665 .mode = 0644, 672 .mode = 0644,
673 .proc_handler = &proc_doulongvec_minmax,
674 },
675#endif
676#ifdef CONFIG_IA64
677 {
678 .ctl_name = KERN_IA64_UNALIGNED,
679 .procname = "ignore-unaligned-usertrap",
680 .data = &no_unaligned_warning,
681 .maxlen = sizeof (int),
682 .mode = 0644,
666 .proc_handler = &proc_dointvec, 683 .proc_handler = &proc_dointvec,
667 }, 684 },
668#endif 685#endif
@@ -929,7 +946,7 @@ static ctl_table fs_table[] = {
929 .data = &files_stat, 946 .data = &files_stat,
930 .maxlen = 3*sizeof(int), 947 .maxlen = 3*sizeof(int),
931 .mode = 0444, 948 .mode = 0444,
932 .proc_handler = &proc_dointvec, 949 .proc_handler = &proc_nr_files,
933 }, 950 },
934 { 951 {
935 .ctl_name = FS_MAXFILE, 952 .ctl_name = FS_MAXFILE,
diff --git a/kernel/timer.c b/kernel/timer.c
index fe3a9a9f8328..bf7c4193b936 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -489,9 +489,21 @@ unsigned long next_timer_interrupt(void)
489 struct list_head *list; 489 struct list_head *list;
490 struct timer_list *nte; 490 struct timer_list *nte;
491 unsigned long expires; 491 unsigned long expires;
492 unsigned long hr_expires = MAX_JIFFY_OFFSET;
493 ktime_t hr_delta;
492 tvec_t *varray[4]; 494 tvec_t *varray[4];
493 int i, j; 495 int i, j;
494 496
497 hr_delta = hrtimer_get_next_event();
498 if (hr_delta.tv64 != KTIME_MAX) {
499 struct timespec tsdelta;
500 tsdelta = ktime_to_timespec(hr_delta);
501 hr_expires = timespec_to_jiffies(&tsdelta);
502 if (hr_expires < 3)
503 return hr_expires + jiffies;
504 }
505 hr_expires += jiffies;
506
495 base = &__get_cpu_var(tvec_bases); 507 base = &__get_cpu_var(tvec_bases);
496 spin_lock(&base->t_base.lock); 508 spin_lock(&base->t_base.lock);
497 expires = base->timer_jiffies + (LONG_MAX >> 1); 509 expires = base->timer_jiffies + (LONG_MAX >> 1);
@@ -542,6 +554,10 @@ found:
542 } 554 }
543 } 555 }
544 spin_unlock(&base->t_base.lock); 556 spin_unlock(&base->t_base.lock);
557
558 if (time_before(hr_expires, expires))
559 return hr_expires;
560
545 return expires; 561 return expires;
546} 562}
547#endif 563#endif
@@ -925,6 +941,8 @@ static inline void update_times(void)
925void do_timer(struct pt_regs *regs) 941void do_timer(struct pt_regs *regs)
926{ 942{
927 jiffies_64++; 943 jiffies_64++;
944 /* prevent loading jiffies before storing new jiffies_64 value. */
945 barrier();
928 update_times(); 946 update_times();
929 softlockup_tick(regs); 947 softlockup_tick(regs);
930} 948}
@@ -1351,10 +1369,10 @@ static inline u64 time_interpolator_get_cycles(unsigned int src)
1351 return x(); 1369 return x();
1352 1370
1353 case TIME_SOURCE_MMIO64 : 1371 case TIME_SOURCE_MMIO64 :
1354 return readq((void __iomem *) time_interpolator->addr); 1372 return readq_relaxed((void __iomem *)time_interpolator->addr);
1355 1373
1356 case TIME_SOURCE_MMIO32 : 1374 case TIME_SOURCE_MMIO32 :
1357 return readl((void __iomem *) time_interpolator->addr); 1375 return readl_relaxed((void __iomem *)time_interpolator->addr);
1358 1376
1359 default: return get_cycles(); 1377 default: return get_cycles();
1360 } 1378 }