aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/pid.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/pid.c')
-rw-r--r--kernel/pid.c49
1 files changed, 25 insertions, 24 deletions
diff --git a/kernel/pid.c b/kernel/pid.c
index a48879b0b921..25807e1b98dd 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -26,7 +26,7 @@
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/bootmem.h> 27#include <linux/bootmem.h>
28#include <linux/hash.h> 28#include <linux/hash.h>
29#include <linux/pspace.h> 29#include <linux/pid_namespace.h>
30 30
31#define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift) 31#define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift)
32static struct hlist_head *pid_hash; 32static struct hlist_head *pid_hash;
@@ -43,9 +43,10 @@ int pid_max_max = PID_MAX_LIMIT;
43#define BITS_PER_PAGE (PAGE_SIZE*8) 43#define BITS_PER_PAGE (PAGE_SIZE*8)
44#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) 44#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
45 45
46static inline int mk_pid(struct pspace *pspace, struct pidmap *map, int off) 46static inline int mk_pid(struct pid_namespace *pid_ns,
47 struct pidmap *map, int off)
47{ 48{
48 return (map - pspace->pidmap)*BITS_PER_PAGE + off; 49 return (map - pid_ns->pidmap)*BITS_PER_PAGE + off;
49} 50}
50 51
51#define find_next_offset(map, off) \ 52#define find_next_offset(map, off) \
@@ -57,7 +58,7 @@ static inline int mk_pid(struct pspace *pspace, struct pidmap *map, int off)
57 * value does not cause lots of bitmaps to be allocated, but 58 * value does not cause lots of bitmaps to be allocated, but
58 * the scheme scales to up to 4 million PIDs, runtime. 59 * the scheme scales to up to 4 million PIDs, runtime.
59 */ 60 */
60struct pspace init_pspace = { 61struct pid_namespace init_pid_ns = {
61 .pidmap = { 62 .pidmap = {
62 [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } 63 [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
63 }, 64 },
@@ -80,25 +81,25 @@ struct pspace init_pspace = {
80 81
81static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock); 82static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
82 83
83static fastcall void free_pidmap(struct pspace *pspace, int pid) 84static fastcall void free_pidmap(struct pid_namespace *pid_ns, int pid)
84{ 85{
85 struct pidmap *map = pspace->pidmap + pid / BITS_PER_PAGE; 86 struct pidmap *map = pid_ns->pidmap + pid / BITS_PER_PAGE;
86 int offset = pid & BITS_PER_PAGE_MASK; 87 int offset = pid & BITS_PER_PAGE_MASK;
87 88
88 clear_bit(offset, map->page); 89 clear_bit(offset, map->page);
89 atomic_inc(&map->nr_free); 90 atomic_inc(&map->nr_free);
90} 91}
91 92
92static int alloc_pidmap(struct pspace *pspace) 93static int alloc_pidmap(struct pid_namespace *pid_ns)
93{ 94{
94 int i, offset, max_scan, pid, last = pspace->last_pid; 95 int i, offset, max_scan, pid, last = pid_ns->last_pid;
95 struct pidmap *map; 96 struct pidmap *map;
96 97
97 pid = last + 1; 98 pid = last + 1;
98 if (pid >= pid_max) 99 if (pid >= pid_max)
99 pid = RESERVED_PIDS; 100 pid = RESERVED_PIDS;
100 offset = pid & BITS_PER_PAGE_MASK; 101 offset = pid & BITS_PER_PAGE_MASK;
101 map = &pspace->pidmap[pid/BITS_PER_PAGE]; 102 map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
102 max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset; 103 max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset;
103 for (i = 0; i <= max_scan; ++i) { 104 for (i = 0; i <= max_scan; ++i) {
104 if (unlikely(!map->page)) { 105 if (unlikely(!map->page)) {
@@ -120,11 +121,11 @@ static int alloc_pidmap(struct pspace *pspace)
120 do { 121 do {
121 if (!test_and_set_bit(offset, map->page)) { 122 if (!test_and_set_bit(offset, map->page)) {
122 atomic_dec(&map->nr_free); 123 atomic_dec(&map->nr_free);
123 pspace->last_pid = pid; 124 pid_ns->last_pid = pid;
124 return pid; 125 return pid;
125 } 126 }
126 offset = find_next_offset(map, offset); 127 offset = find_next_offset(map, offset);
127 pid = mk_pid(pspace, map, offset); 128 pid = mk_pid(pid_ns, map, offset);
128 /* 129 /*
129 * find_next_offset() found a bit, the pid from it 130 * find_next_offset() found a bit, the pid from it
130 * is in-bounds, and if we fell back to the last 131 * is in-bounds, and if we fell back to the last
@@ -135,34 +136,34 @@ static int alloc_pidmap(struct pspace *pspace)
135 (i != max_scan || pid < last || 136 (i != max_scan || pid < last ||
136 !((last+1) & BITS_PER_PAGE_MASK))); 137 !((last+1) & BITS_PER_PAGE_MASK)));
137 } 138 }
138 if (map < &pspace->pidmap[(pid_max-1)/BITS_PER_PAGE]) { 139 if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
139 ++map; 140 ++map;
140 offset = 0; 141 offset = 0;
141 } else { 142 } else {
142 map = &pspace->pidmap[0]; 143 map = &pid_ns->pidmap[0];
143 offset = RESERVED_PIDS; 144 offset = RESERVED_PIDS;
144 if (unlikely(last == offset)) 145 if (unlikely(last == offset))
145 break; 146 break;
146 } 147 }
147 pid = mk_pid(pspace, map, offset); 148 pid = mk_pid(pid_ns, map, offset);
148 } 149 }
149 return -1; 150 return -1;
150} 151}
151 152
152static int next_pidmap(struct pspace *pspace, int last) 153static int next_pidmap(struct pid_namespace *pid_ns, int last)
153{ 154{
154 int offset; 155 int offset;
155 struct pidmap *map, *end; 156 struct pidmap *map, *end;
156 157
157 offset = (last + 1) & BITS_PER_PAGE_MASK; 158 offset = (last + 1) & BITS_PER_PAGE_MASK;
158 map = &pspace->pidmap[(last + 1)/BITS_PER_PAGE]; 159 map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
159 end = &pspace->pidmap[PIDMAP_ENTRIES]; 160 end = &pid_ns->pidmap[PIDMAP_ENTRIES];
160 for (; map < end; map++, offset = 0) { 161 for (; map < end; map++, offset = 0) {
161 if (unlikely(!map->page)) 162 if (unlikely(!map->page))
162 continue; 163 continue;
163 offset = find_next_bit((map)->page, BITS_PER_PAGE, offset); 164 offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
164 if (offset < BITS_PER_PAGE) 165 if (offset < BITS_PER_PAGE)
165 return mk_pid(pspace, map, offset); 166 return mk_pid(pid_ns, map, offset);
166 } 167 }
167 return -1; 168 return -1;
168} 169}
@@ -192,7 +193,7 @@ fastcall void free_pid(struct pid *pid)
192 hlist_del_rcu(&pid->pid_chain); 193 hlist_del_rcu(&pid->pid_chain);
193 spin_unlock_irqrestore(&pidmap_lock, flags); 194 spin_unlock_irqrestore(&pidmap_lock, flags);
194 195
195 free_pidmap(&init_pspace, pid->nr); 196 free_pidmap(&init_pid_ns, pid->nr);
196 call_rcu(&pid->rcu, delayed_put_pid); 197 call_rcu(&pid->rcu, delayed_put_pid);
197} 198}
198 199
@@ -206,7 +207,7 @@ struct pid *alloc_pid(void)
206 if (!pid) 207 if (!pid)
207 goto out; 208 goto out;
208 209
209 nr = alloc_pidmap(&init_pspace); 210 nr = alloc_pidmap(&init_pid_ns);
210 if (nr < 0) 211 if (nr < 0)
211 goto out_free; 212 goto out_free;
212 213
@@ -348,7 +349,7 @@ struct pid *find_ge_pid(int nr)
348 pid = find_pid(nr); 349 pid = find_pid(nr);
349 if (pid) 350 if (pid)
350 break; 351 break;
351 nr = next_pidmap(&init_pspace, nr); 352 nr = next_pidmap(&init_pid_ns, nr);
352 } while (nr > 0); 353 } while (nr > 0);
353 354
354 return pid; 355 return pid;
@@ -382,10 +383,10 @@ void __init pidhash_init(void)
382 383
383void __init pidmap_init(void) 384void __init pidmap_init(void)
384{ 385{
385 init_pspace.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL); 386 init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
386 /* Reserve PID 0. We never call free_pidmap(0) */ 387 /* Reserve PID 0. We never call free_pidmap(0) */
387 set_bit(0, init_pspace.pidmap[0].page); 388 set_bit(0, init_pid_ns.pidmap[0].page);
388 atomic_dec(&init_pspace.pidmap[0].nr_free); 389 atomic_dec(&init_pid_ns.pidmap[0].nr_free);
389 390
390 pid_cachep = kmem_cache_create("pid", sizeof(struct pid), 391 pid_cachep = kmem_cache_create("pid", sizeof(struct pid),
391 __alignof__(struct pid), 392 __alignof__(struct pid),