aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/pid.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/pid.c')
-rw-r--r--kernel/pid.c20
1 files changed, 14 insertions, 6 deletions
diff --git a/kernel/pid.c b/kernel/pid.c
index 36aa02ff17d6..047dc6264638 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -270,7 +270,6 @@ void free_pid(struct pid *pid)
270 wake_up_process(ns->child_reaper); 270 wake_up_process(ns->child_reaper);
271 break; 271 break;
272 case 0: 272 case 0:
273 ns->nr_hashed = -1;
274 schedule_work(&ns->proc_work); 273 schedule_work(&ns->proc_work);
275 break; 274 break;
276 } 275 }
@@ -319,7 +318,7 @@ struct pid *alloc_pid(struct pid_namespace *ns)
319 318
320 upid = pid->numbers + ns->level; 319 upid = pid->numbers + ns->level;
321 spin_lock_irq(&pidmap_lock); 320 spin_lock_irq(&pidmap_lock);
322 if (ns->nr_hashed < 0) 321 if (!(ns->nr_hashed & PIDNS_HASH_ADDING))
323 goto out_unlock; 322 goto out_unlock;
324 for ( ; upid >= pid->numbers; --upid) { 323 for ( ; upid >= pid->numbers; --upid) {
325 hlist_add_head_rcu(&upid->pid_chain, 324 hlist_add_head_rcu(&upid->pid_chain,
@@ -332,7 +331,7 @@ out:
332 return pid; 331 return pid;
333 332
334out_unlock: 333out_unlock:
335 spin_unlock(&pidmap_lock); 334 spin_unlock_irq(&pidmap_lock);
336out_free: 335out_free:
337 while (++i <= ns->level) 336 while (++i <= ns->level)
338 free_pidmap(pid->numbers + i); 337 free_pidmap(pid->numbers + i);
@@ -342,12 +341,18 @@ out_free:
342 goto out; 341 goto out;
343} 342}
344 343
344void disable_pid_allocation(struct pid_namespace *ns)
345{
346 spin_lock_irq(&pidmap_lock);
347 ns->nr_hashed &= ~PIDNS_HASH_ADDING;
348 spin_unlock_irq(&pidmap_lock);
349}
350
345struct pid *find_pid_ns(int nr, struct pid_namespace *ns) 351struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
346{ 352{
347 struct hlist_node *elem;
348 struct upid *pnr; 353 struct upid *pnr;
349 354
350 hlist_for_each_entry_rcu(pnr, elem, 355 hlist_for_each_entry_rcu(pnr,
351 &pid_hash[pid_hashfn(nr, ns)], pid_chain) 356 &pid_hash[pid_hashfn(nr, ns)], pid_chain)
352 if (pnr->nr == nr && pnr->ns == ns) 357 if (pnr->nr == nr && pnr->ns == ns)
353 return container_of(pnr, struct pid, 358 return container_of(pnr, struct pid,
@@ -573,6 +578,9 @@ void __init pidhash_init(void)
573 578
574void __init pidmap_init(void) 579void __init pidmap_init(void)
575{ 580{
581 /* Veryify no one has done anything silly */
582 BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_HASH_ADDING);
583
576 /* bump default and minimum pid_max based on number of cpus */ 584 /* bump default and minimum pid_max based on number of cpus */
577 pid_max = min(pid_max_max, max_t(int, pid_max, 585 pid_max = min(pid_max_max, max_t(int, pid_max,
578 PIDS_PER_CPU_DEFAULT * num_possible_cpus())); 586 PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
@@ -584,7 +592,7 @@ void __init pidmap_init(void)
584 /* Reserve PID 0. We never call free_pidmap(0) */ 592 /* Reserve PID 0. We never call free_pidmap(0) */
585 set_bit(0, init_pid_ns.pidmap[0].page); 593 set_bit(0, init_pid_ns.pidmap[0].page);
586 atomic_dec(&init_pid_ns.pidmap[0].nr_free); 594 atomic_dec(&init_pid_ns.pidmap[0].nr_free);
587 init_pid_ns.nr_hashed = 1; 595 init_pid_ns.nr_hashed = PIDNS_HASH_ADDING;
588 596
589 init_pid_ns.pid_cachep = KMEM_CACHE(pid, 597 init_pid_ns.pid_cachep = KMEM_CACHE(pid,
590 SLAB_HWCACHE_ALIGN | SLAB_PANIC); 598 SLAB_HWCACHE_ALIGN | SLAB_PANIC);