aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2007-09-19 20:09:27 -0400
committerPaul Mackerras <paulus@samba.org>2007-09-19 20:09:27 -0400
commit0ce49a3945474fc942ec37c0c0efece60f592f80 (patch)
treef42b821b2d9e2d8775bc22f56d444c2cc7b7b7dd /kernel
parent9e4859ef5462193643fd2b3c8ffb298e5a4a4319 (diff)
parenta88a8eff1e6e32d3288986a9d36c6a449c032d3a (diff)
Merge branch 'linux-2.6'
Diffstat (limited to 'kernel')
-rw-r--r--kernel/time/tick-broadcast.c24
-rw-r--r--kernel/time/tick-sched.c12
-rw-r--r--kernel/time/timekeeping.c10
-rw-r--r--kernel/user.c45
-rw-r--r--kernel/user_namespace.c4
-rw-r--r--kernel/utsname.c2
6 files changed, 74 insertions, 23 deletions
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index db8e0f3d409b..aab881c86a1a 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -382,12 +382,23 @@ static int tick_broadcast_set_event(ktime_t expires, int force)
382 382
383int tick_resume_broadcast_oneshot(struct clock_event_device *bc) 383int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
384{ 384{
385 int cpu = smp_processor_id();
386
387 /*
388 * If the CPU is marked for broadcast, enforce oneshot
389 * broadcast mode. The jinxed VAIO does not resume otherwise.
390 * No idea why it ends up in a lower C State during resume
391 * without notifying the clock events layer.
392 */
393 if (cpu_isset(cpu, tick_broadcast_mask))
394 cpu_set(cpu, tick_broadcast_oneshot_mask);
395
385 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); 396 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
386 397
387 if(!cpus_empty(tick_broadcast_oneshot_mask)) 398 if(!cpus_empty(tick_broadcast_oneshot_mask))
388 tick_broadcast_set_event(ktime_get(), 1); 399 tick_broadcast_set_event(ktime_get(), 1);
389 400
390 return cpu_isset(smp_processor_id(), tick_broadcast_oneshot_mask); 401 return cpu_isset(cpu, tick_broadcast_oneshot_mask);
391} 402}
392 403
393/* 404/*
@@ -549,20 +560,17 @@ void tick_broadcast_switch_to_oneshot(void)
549 */ 560 */
550void tick_shutdown_broadcast_oneshot(unsigned int *cpup) 561void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
551{ 562{
552 struct clock_event_device *bc;
553 unsigned long flags; 563 unsigned long flags;
554 unsigned int cpu = *cpup; 564 unsigned int cpu = *cpup;
555 565
556 spin_lock_irqsave(&tick_broadcast_lock, flags); 566 spin_lock_irqsave(&tick_broadcast_lock, flags);
557 567
558 bc = tick_broadcast_device.evtdev; 568 /*
569 * Clear the broadcast mask flag for the dead cpu, but do not
570 * stop the broadcast device!
571 */
559 cpu_clear(cpu, tick_broadcast_oneshot_mask); 572 cpu_clear(cpu, tick_broadcast_oneshot_mask);
560 573
561 if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT) {
562 if (bc && cpus_empty(tick_broadcast_oneshot_mask))
563 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN);
564 }
565
566 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 574 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
567} 575}
568 576
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index b416995b9757..8c3fef1db09c 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -160,6 +160,18 @@ void tick_nohz_stop_sched_tick(void)
160 cpu = smp_processor_id(); 160 cpu = smp_processor_id();
161 ts = &per_cpu(tick_cpu_sched, cpu); 161 ts = &per_cpu(tick_cpu_sched, cpu);
162 162
163 /*
164 * If this cpu is offline and it is the one which updates
165 * jiffies, then give up the assignment and let it be taken by
166 * the cpu which runs the tick timer next. If we don't drop
167 * this here the jiffies might be stale and do_timer() never
168 * invoked.
169 */
170 if (unlikely(!cpu_online(cpu))) {
171 if (cpu == tick_do_timer_cpu)
172 tick_do_timer_cpu = -1;
173 }
174
163 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) 175 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
164 goto end; 176 goto end;
165 177
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index acc417b5a9b7..4ad79f6bdec6 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -217,6 +217,7 @@ static void change_clocksource(void)
217} 217}
218#else 218#else
219static inline void change_clocksource(void) { } 219static inline void change_clocksource(void) { }
220static inline s64 __get_nsec_offset(void) { return 0; }
220#endif 221#endif
221 222
222/** 223/**
@@ -280,6 +281,8 @@ void __init timekeeping_init(void)
280static int timekeeping_suspended; 281static int timekeeping_suspended;
281/* time in seconds when suspend began */ 282/* time in seconds when suspend began */
282static unsigned long timekeeping_suspend_time; 283static unsigned long timekeeping_suspend_time;
284/* xtime offset when we went into suspend */
285static s64 timekeeping_suspend_nsecs;
283 286
284/** 287/**
285 * timekeeping_resume - Resumes the generic timekeeping subsystem. 288 * timekeeping_resume - Resumes the generic timekeeping subsystem.
@@ -305,6 +308,8 @@ static int timekeeping_resume(struct sys_device *dev)
305 wall_to_monotonic.tv_sec -= sleep_length; 308 wall_to_monotonic.tv_sec -= sleep_length;
306 total_sleep_time += sleep_length; 309 total_sleep_time += sleep_length;
307 } 310 }
311 /* Make sure that we have the correct xtime reference */
312 timespec_add_ns(&xtime, timekeeping_suspend_nsecs);
308 /* re-base the last cycle value */ 313 /* re-base the last cycle value */
309 clock->cycle_last = clocksource_read(clock); 314 clock->cycle_last = clocksource_read(clock);
310 clock->error = 0; 315 clock->error = 0;
@@ -325,9 +330,12 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
325{ 330{
326 unsigned long flags; 331 unsigned long flags;
327 332
333 timekeeping_suspend_time = read_persistent_clock();
334
328 write_seqlock_irqsave(&xtime_lock, flags); 335 write_seqlock_irqsave(&xtime_lock, flags);
336 /* Get the current xtime offset */
337 timekeeping_suspend_nsecs = __get_nsec_offset();
329 timekeeping_suspended = 1; 338 timekeeping_suspended = 1;
330 timekeeping_suspend_time = read_persistent_clock();
331 write_sequnlock_irqrestore(&xtime_lock, flags); 339 write_sequnlock_irqrestore(&xtime_lock, flags);
332 340
333 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); 341 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
diff --git a/kernel/user.c b/kernel/user.c
index e7d11cef6998..9ca2848fc356 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -55,25 +55,22 @@ struct user_struct root_user = {
55/* 55/*
56 * These routines must be called with the uidhash spinlock held! 56 * These routines must be called with the uidhash spinlock held!
57 */ 57 */
58static inline void uid_hash_insert(struct user_struct *up, struct list_head *hashent) 58static inline void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
59{ 59{
60 list_add(&up->uidhash_list, hashent); 60 hlist_add_head(&up->uidhash_node, hashent);
61} 61}
62 62
63static inline void uid_hash_remove(struct user_struct *up) 63static inline void uid_hash_remove(struct user_struct *up)
64{ 64{
65 list_del(&up->uidhash_list); 65 hlist_del_init(&up->uidhash_node);
66} 66}
67 67
68static inline struct user_struct *uid_hash_find(uid_t uid, struct list_head *hashent) 68static inline struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
69{ 69{
70 struct list_head *up; 70 struct user_struct *user;
71 71 struct hlist_node *h;
72 list_for_each(up, hashent) {
73 struct user_struct *user;
74
75 user = list_entry(up, struct user_struct, uidhash_list);
76 72
73 hlist_for_each_entry(user, h, hashent, uidhash_node) {
77 if(user->uid == uid) { 74 if(user->uid == uid) {
78 atomic_inc(&user->__count); 75 atomic_inc(&user->__count);
79 return user; 76 return user;
@@ -122,7 +119,7 @@ void free_uid(struct user_struct *up)
122 119
123struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) 120struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
124{ 121{
125 struct list_head *hashent = uidhashentry(ns, uid); 122 struct hlist_head *hashent = uidhashentry(ns, uid);
126 struct user_struct *up; 123 struct user_struct *up;
127 124
128 spin_lock_irq(&uidhash_lock); 125 spin_lock_irq(&uidhash_lock);
@@ -202,6 +199,30 @@ void switch_uid(struct user_struct *new_user)
202 suid_keys(current); 199 suid_keys(current);
203} 200}
204 201
202void release_uids(struct user_namespace *ns)
203{
204 int i;
205 unsigned long flags;
206 struct hlist_head *head;
207 struct hlist_node *nd;
208
209 spin_lock_irqsave(&uidhash_lock, flags);
210 /*
211 * collapse the chains so that the user_struct-s will
212 * be still alive, but not in hashes. subsequent free_uid()
213 * will free them.
214 */
215 for (i = 0; i < UIDHASH_SZ; i++) {
216 head = ns->uidhash_table + i;
217 while (!hlist_empty(head)) {
218 nd = head->first;
219 hlist_del_init(nd);
220 }
221 }
222 spin_unlock_irqrestore(&uidhash_lock, flags);
223
224 free_uid(ns->root_user);
225}
205 226
206static int __init uid_cache_init(void) 227static int __init uid_cache_init(void)
207{ 228{
@@ -211,7 +232,7 @@ static int __init uid_cache_init(void)
211 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 232 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
212 233
213 for(n = 0; n < UIDHASH_SZ; ++n) 234 for(n = 0; n < UIDHASH_SZ; ++n)
214 INIT_LIST_HEAD(init_user_ns.uidhash_table + n); 235 INIT_HLIST_HEAD(init_user_ns.uidhash_table + n);
215 236
216 /* Insert the root user immediately (init already runs as root) */ 237 /* Insert the root user immediately (init already runs as root) */
217 spin_lock_irq(&uidhash_lock); 238 spin_lock_irq(&uidhash_lock);
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 85af9422ea6e..7af90fc4f0fd 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -39,7 +39,7 @@ static struct user_namespace *clone_user_ns(struct user_namespace *old_ns)
39 kref_init(&ns->kref); 39 kref_init(&ns->kref);
40 40
41 for (n = 0; n < UIDHASH_SZ; ++n) 41 for (n = 0; n < UIDHASH_SZ; ++n)
42 INIT_LIST_HEAD(ns->uidhash_table + n); 42 INIT_HLIST_HEAD(ns->uidhash_table + n);
43 43
44 /* Insert new root user. */ 44 /* Insert new root user. */
45 ns->root_user = alloc_uid(ns, 0); 45 ns->root_user = alloc_uid(ns, 0);
@@ -81,7 +81,7 @@ void free_user_ns(struct kref *kref)
81 struct user_namespace *ns; 81 struct user_namespace *ns;
82 82
83 ns = container_of(kref, struct user_namespace, kref); 83 ns = container_of(kref, struct user_namespace, kref);
84 free_uid(ns->root_user); 84 release_uids(ns);
85 kfree(ns); 85 kfree(ns);
86} 86}
87 87
diff --git a/kernel/utsname.c b/kernel/utsname.c
index 9d8180a0f0d8..816d7b24fa03 100644
--- a/kernel/utsname.c
+++ b/kernel/utsname.c
@@ -28,7 +28,9 @@ static struct uts_namespace *clone_uts_ns(struct uts_namespace *old_ns)
28 if (!ns) 28 if (!ns)
29 return ERR_PTR(-ENOMEM); 29 return ERR_PTR(-ENOMEM);
30 30
31 down_read(&uts_sem);
31 memcpy(&ns->name, &old_ns->name, sizeof(ns->name)); 32 memcpy(&ns->name, &old_ns->name, sizeof(ns->name));
33 up_read(&uts_sem);
32 kref_init(&ns->kref); 34 kref_init(&ns->kref);
33 return ns; 35 return ns;
34} 36}