aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/timer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/timer.c')
-rw-r--r--kernel/timer.c49
1 files changed, 11 insertions, 38 deletions
diff --git a/kernel/timer.c b/kernel/timer.c
index 05809c2e2fd6..1d7dd6267c2d 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -84,7 +84,7 @@ typedef struct tvec_t_base_s tvec_base_t;
84 84
85tvec_base_t boot_tvec_bases; 85tvec_base_t boot_tvec_bases;
86EXPORT_SYMBOL(boot_tvec_bases); 86EXPORT_SYMBOL(boot_tvec_bases);
87static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = { &boot_tvec_bases }; 87static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases;
88 88
89static inline void set_running_timer(tvec_base_t *base, 89static inline void set_running_timer(tvec_base_t *base,
90 struct timer_list *timer) 90 struct timer_list *timer)
@@ -408,7 +408,7 @@ static int cascade(tvec_base_t *base, tvec_t *tv, int index)
408 * This function cascades all vectors and executes all expired timer 408 * This function cascades all vectors and executes all expired timer
409 * vectors. 409 * vectors.
410 */ 410 */
411#define INDEX(N) (base->timer_jiffies >> (TVR_BITS + N * TVN_BITS)) & TVN_MASK 411#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
412 412
413static inline void __run_timers(tvec_base_t *base) 413static inline void __run_timers(tvec_base_t *base)
414{ 414{
@@ -1324,46 +1324,19 @@ asmlinkage long sys_getpid(void)
1324} 1324}
1325 1325
1326/* 1326/*
1327 * Accessing ->group_leader->real_parent is not SMP-safe, it could 1327 * Accessing ->real_parent is not SMP-safe, it could
1328 * change from under us. However, rather than getting any lock 1328 * change from under us. However, we can use a stale
1329 * we can use an optimistic algorithm: get the parent 1329 * value of ->real_parent under rcu_read_lock(), see
1330 * pid, and go back and check that the parent is still 1330 * release_task()->call_rcu(delayed_put_task_struct).
1331 * the same. If it has changed (which is extremely unlikely
1332 * indeed), we just try again..
1333 *
1334 * NOTE! This depends on the fact that even if we _do_
1335 * get an old value of "parent", we can happily dereference
1336 * the pointer (it was and remains a dereferencable kernel pointer
1337 * no matter what): we just can't necessarily trust the result
1338 * until we know that the parent pointer is valid.
1339 *
1340 * NOTE2: ->group_leader never changes from under us.
1341 */ 1331 */
1342asmlinkage long sys_getppid(void) 1332asmlinkage long sys_getppid(void)
1343{ 1333{
1344 int pid; 1334 int pid;
1345 struct task_struct *me = current;
1346 struct task_struct *parent;
1347 1335
1348 parent = me->group_leader->real_parent; 1336 rcu_read_lock();
1349 for (;;) { 1337 pid = rcu_dereference(current->real_parent)->tgid;
1350 pid = parent->tgid; 1338 rcu_read_unlock();
1351#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
1352{
1353 struct task_struct *old = parent;
1354 1339
1355 /*
1356 * Make sure we read the pid before re-reading the
1357 * parent pointer:
1358 */
1359 smp_rmb();
1360 parent = me->group_leader->real_parent;
1361 if (old != parent)
1362 continue;
1363}
1364#endif
1365 break;
1366 }
1367 return pid; 1340 return pid;
1368} 1341}
1369 1342
@@ -1688,7 +1661,7 @@ static void __devinit migrate_timers(int cpu)
1688} 1661}
1689#endif /* CONFIG_HOTPLUG_CPU */ 1662#endif /* CONFIG_HOTPLUG_CPU */
1690 1663
1691static int __devinit timer_cpu_notify(struct notifier_block *self, 1664static int __cpuinit timer_cpu_notify(struct notifier_block *self,
1692 unsigned long action, void *hcpu) 1665 unsigned long action, void *hcpu)
1693{ 1666{
1694 long cpu = (long)hcpu; 1667 long cpu = (long)hcpu;
@@ -1708,7 +1681,7 @@ static int __devinit timer_cpu_notify(struct notifier_block *self,
1708 return NOTIFY_OK; 1681 return NOTIFY_OK;
1709} 1682}
1710 1683
1711static struct notifier_block __devinitdata timers_nb = { 1684static struct notifier_block __cpuinitdata timers_nb = {
1712 .notifier_call = timer_cpu_notify, 1685 .notifier_call = timer_cpu_notify,
1713}; 1686};
1714 1687