diff options
Diffstat (limited to 'arch/sparc/mm/tsb.c')
-rw-r--r-- | arch/sparc/mm/tsb.c | 503 |
1 files changed, 503 insertions, 0 deletions
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c new file mode 100644 index 000000000000..36a0813f9517 --- /dev/null +++ b/arch/sparc/mm/tsb.c | |||
@@ -0,0 +1,503 @@ | |||
1 | /* arch/sparc64/mm/tsb.c | ||
2 | * | ||
3 | * Copyright (C) 2006, 2008 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/preempt.h> | ||
8 | #include <asm/system.h> | ||
9 | #include <asm/page.h> | ||
10 | #include <asm/tlbflush.h> | ||
11 | #include <asm/tlb.h> | ||
12 | #include <asm/mmu_context.h> | ||
13 | #include <asm/pgtable.h> | ||
14 | #include <asm/tsb.h> | ||
15 | #include <asm/oplib.h> | ||
16 | |||
17 | extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; | ||
18 | |||
19 | static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries) | ||
20 | { | ||
21 | vaddr >>= hash_shift; | ||
22 | return vaddr & (nentries - 1); | ||
23 | } | ||
24 | |||
25 | static inline int tag_compare(unsigned long tag, unsigned long vaddr) | ||
26 | { | ||
27 | return (tag == (vaddr >> 22)); | ||
28 | } | ||
29 | |||
30 | /* TSB flushes need only occur on the processor initiating the address | ||
31 | * space modification, not on each cpu the address space has run on. | ||
32 | * Only the TLB flush needs that treatment. | ||
33 | */ | ||
34 | |||
35 | void flush_tsb_kernel_range(unsigned long start, unsigned long end) | ||
36 | { | ||
37 | unsigned long v; | ||
38 | |||
39 | for (v = start; v < end; v += PAGE_SIZE) { | ||
40 | unsigned long hash = tsb_hash(v, PAGE_SHIFT, | ||
41 | KERNEL_TSB_NENTRIES); | ||
42 | struct tsb *ent = &swapper_tsb[hash]; | ||
43 | |||
44 | if (tag_compare(ent->tag, v)) | ||
45 | ent->tag = (1UL << TSB_TAG_INVALID_BIT); | ||
46 | } | ||
47 | } | ||
48 | |||
49 | static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, unsigned long tsb, unsigned long nentries) | ||
50 | { | ||
51 | unsigned long i; | ||
52 | |||
53 | for (i = 0; i < mp->tlb_nr; i++) { | ||
54 | unsigned long v = mp->vaddrs[i]; | ||
55 | unsigned long tag, ent, hash; | ||
56 | |||
57 | v &= ~0x1UL; | ||
58 | |||
59 | hash = tsb_hash(v, hash_shift, nentries); | ||
60 | ent = tsb + (hash * sizeof(struct tsb)); | ||
61 | tag = (v >> 22UL); | ||
62 | |||
63 | tsb_flush(ent, tag); | ||
64 | } | ||
65 | } | ||
66 | |||
67 | void flush_tsb_user(struct mmu_gather *mp) | ||
68 | { | ||
69 | struct mm_struct *mm = mp->mm; | ||
70 | unsigned long nentries, base, flags; | ||
71 | |||
72 | spin_lock_irqsave(&mm->context.lock, flags); | ||
73 | |||
74 | base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; | ||
75 | nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; | ||
76 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) | ||
77 | base = __pa(base); | ||
78 | __flush_tsb_one(mp, PAGE_SHIFT, base, nentries); | ||
79 | |||
80 | #ifdef CONFIG_HUGETLB_PAGE | ||
81 | if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { | ||
82 | base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; | ||
83 | nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; | ||
84 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) | ||
85 | base = __pa(base); | ||
86 | __flush_tsb_one(mp, HPAGE_SHIFT, base, nentries); | ||
87 | } | ||
88 | #endif | ||
89 | spin_unlock_irqrestore(&mm->context.lock, flags); | ||
90 | } | ||
91 | |||
92 | #if defined(CONFIG_SPARC64_PAGE_SIZE_8KB) | ||
93 | #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K | ||
94 | #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K | ||
95 | #elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB) | ||
96 | #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_64K | ||
97 | #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_64K | ||
98 | #else | ||
99 | #error Broken base page size setting... | ||
100 | #endif | ||
101 | |||
102 | #ifdef CONFIG_HUGETLB_PAGE | ||
103 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) | ||
104 | #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_64K | ||
105 | #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_64K | ||
106 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) | ||
107 | #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_512K | ||
108 | #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_512K | ||
109 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) | ||
110 | #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB | ||
111 | #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB | ||
112 | #else | ||
113 | #error Broken huge page size setting... | ||
114 | #endif | ||
115 | #endif | ||
116 | |||
117 | static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes) | ||
118 | { | ||
119 | unsigned long tsb_reg, base, tsb_paddr; | ||
120 | unsigned long page_sz, tte; | ||
121 | |||
122 | mm->context.tsb_block[tsb_idx].tsb_nentries = | ||
123 | tsb_bytes / sizeof(struct tsb); | ||
124 | |||
125 | base = TSBMAP_BASE; | ||
126 | tte = pgprot_val(PAGE_KERNEL_LOCKED); | ||
127 | tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb); | ||
128 | BUG_ON(tsb_paddr & (tsb_bytes - 1UL)); | ||
129 | |||
130 | /* Use the smallest page size that can map the whole TSB | ||
131 | * in one TLB entry. | ||
132 | */ | ||
133 | switch (tsb_bytes) { | ||
134 | case 8192 << 0: | ||
135 | tsb_reg = 0x0UL; | ||
136 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
137 | base += (tsb_paddr & 8192); | ||
138 | #endif | ||
139 | page_sz = 8192; | ||
140 | break; | ||
141 | |||
142 | case 8192 << 1: | ||
143 | tsb_reg = 0x1UL; | ||
144 | page_sz = 64 * 1024; | ||
145 | break; | ||
146 | |||
147 | case 8192 << 2: | ||
148 | tsb_reg = 0x2UL; | ||
149 | page_sz = 64 * 1024; | ||
150 | break; | ||
151 | |||
152 | case 8192 << 3: | ||
153 | tsb_reg = 0x3UL; | ||
154 | page_sz = 64 * 1024; | ||
155 | break; | ||
156 | |||
157 | case 8192 << 4: | ||
158 | tsb_reg = 0x4UL; | ||
159 | page_sz = 512 * 1024; | ||
160 | break; | ||
161 | |||
162 | case 8192 << 5: | ||
163 | tsb_reg = 0x5UL; | ||
164 | page_sz = 512 * 1024; | ||
165 | break; | ||
166 | |||
167 | case 8192 << 6: | ||
168 | tsb_reg = 0x6UL; | ||
169 | page_sz = 512 * 1024; | ||
170 | break; | ||
171 | |||
172 | case 8192 << 7: | ||
173 | tsb_reg = 0x7UL; | ||
174 | page_sz = 4 * 1024 * 1024; | ||
175 | break; | ||
176 | |||
177 | default: | ||
178 | printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n", | ||
179 | current->comm, current->pid, tsb_bytes); | ||
180 | do_exit(SIGSEGV); | ||
181 | }; | ||
182 | tte |= pte_sz_bits(page_sz); | ||
183 | |||
184 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) { | ||
185 | /* Physical mapping, no locked TLB entry for TSB. */ | ||
186 | tsb_reg |= tsb_paddr; | ||
187 | |||
188 | mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg; | ||
189 | mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0; | ||
190 | mm->context.tsb_block[tsb_idx].tsb_map_pte = 0; | ||
191 | } else { | ||
192 | tsb_reg |= base; | ||
193 | tsb_reg |= (tsb_paddr & (page_sz - 1UL)); | ||
194 | tte |= (tsb_paddr & ~(page_sz - 1UL)); | ||
195 | |||
196 | mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg; | ||
197 | mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base; | ||
198 | mm->context.tsb_block[tsb_idx].tsb_map_pte = tte; | ||
199 | } | ||
200 | |||
201 | /* Setup the Hypervisor TSB descriptor. */ | ||
202 | if (tlb_type == hypervisor) { | ||
203 | struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx]; | ||
204 | |||
205 | switch (tsb_idx) { | ||
206 | case MM_TSB_BASE: | ||
207 | hp->pgsz_idx = HV_PGSZ_IDX_BASE; | ||
208 | break; | ||
209 | #ifdef CONFIG_HUGETLB_PAGE | ||
210 | case MM_TSB_HUGE: | ||
211 | hp->pgsz_idx = HV_PGSZ_IDX_HUGE; | ||
212 | break; | ||
213 | #endif | ||
214 | default: | ||
215 | BUG(); | ||
216 | }; | ||
217 | hp->assoc = 1; | ||
218 | hp->num_ttes = tsb_bytes / 16; | ||
219 | hp->ctx_idx = 0; | ||
220 | switch (tsb_idx) { | ||
221 | case MM_TSB_BASE: | ||
222 | hp->pgsz_mask = HV_PGSZ_MASK_BASE; | ||
223 | break; | ||
224 | #ifdef CONFIG_HUGETLB_PAGE | ||
225 | case MM_TSB_HUGE: | ||
226 | hp->pgsz_mask = HV_PGSZ_MASK_HUGE; | ||
227 | break; | ||
228 | #endif | ||
229 | default: | ||
230 | BUG(); | ||
231 | }; | ||
232 | hp->tsb_base = tsb_paddr; | ||
233 | hp->resv = 0; | ||
234 | } | ||
235 | } | ||
236 | |||
237 | static struct kmem_cache *tsb_caches[8] __read_mostly; | ||
238 | |||
239 | static const char *tsb_cache_names[8] = { | ||
240 | "tsb_8KB", | ||
241 | "tsb_16KB", | ||
242 | "tsb_32KB", | ||
243 | "tsb_64KB", | ||
244 | "tsb_128KB", | ||
245 | "tsb_256KB", | ||
246 | "tsb_512KB", | ||
247 | "tsb_1MB", | ||
248 | }; | ||
249 | |||
250 | void __init pgtable_cache_init(void) | ||
251 | { | ||
252 | unsigned long i; | ||
253 | |||
254 | for (i = 0; i < 8; i++) { | ||
255 | unsigned long size = 8192 << i; | ||
256 | const char *name = tsb_cache_names[i]; | ||
257 | |||
258 | tsb_caches[i] = kmem_cache_create(name, | ||
259 | size, size, | ||
260 | 0, NULL); | ||
261 | if (!tsb_caches[i]) { | ||
262 | prom_printf("Could not create %s cache\n", name); | ||
263 | prom_halt(); | ||
264 | } | ||
265 | } | ||
266 | } | ||
267 | |||
268 | int sysctl_tsb_ratio = -2; | ||
269 | |||
270 | static unsigned long tsb_size_to_rss_limit(unsigned long new_size) | ||
271 | { | ||
272 | unsigned long num_ents = (new_size / sizeof(struct tsb)); | ||
273 | |||
274 | if (sysctl_tsb_ratio < 0) | ||
275 | return num_ents - (num_ents >> -sysctl_tsb_ratio); | ||
276 | else | ||
277 | return num_ents + (num_ents >> sysctl_tsb_ratio); | ||
278 | } | ||
279 | |||
280 | /* When the RSS of an address space exceeds tsb_rss_limit for a TSB, | ||
281 | * do_sparc64_fault() invokes this routine to try and grow it. | ||
282 | * | ||
283 | * When we reach the maximum TSB size supported, we stick ~0UL into | ||
284 | * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault() | ||
285 | * will not trigger any longer. | ||
286 | * | ||
287 | * The TSB can be anywhere from 8K to 1MB in size, in increasing powers | ||
288 | * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB | ||
289 | * must be 512K aligned. It also must be physically contiguous, so we | ||
290 | * cannot use vmalloc(). | ||
291 | * | ||
292 | * The idea here is to grow the TSB when the RSS of the process approaches | ||
293 | * the number of entries that the current TSB can hold at once. Currently, | ||
294 | * we trigger when the RSS hits 3/4 of the TSB capacity. | ||
295 | */ | ||
296 | void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss) | ||
297 | { | ||
298 | unsigned long max_tsb_size = 1 * 1024 * 1024; | ||
299 | unsigned long new_size, old_size, flags; | ||
300 | struct tsb *old_tsb, *new_tsb; | ||
301 | unsigned long new_cache_index, old_cache_index; | ||
302 | unsigned long new_rss_limit; | ||
303 | gfp_t gfp_flags; | ||
304 | |||
305 | if (max_tsb_size > (PAGE_SIZE << MAX_ORDER)) | ||
306 | max_tsb_size = (PAGE_SIZE << MAX_ORDER); | ||
307 | |||
308 | new_cache_index = 0; | ||
309 | for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) { | ||
310 | new_rss_limit = tsb_size_to_rss_limit(new_size); | ||
311 | if (new_rss_limit > rss) | ||
312 | break; | ||
313 | new_cache_index++; | ||
314 | } | ||
315 | |||
316 | if (new_size == max_tsb_size) | ||
317 | new_rss_limit = ~0UL; | ||
318 | |||
319 | retry_tsb_alloc: | ||
320 | gfp_flags = GFP_KERNEL; | ||
321 | if (new_size > (PAGE_SIZE * 2)) | ||
322 | gfp_flags = __GFP_NOWARN | __GFP_NORETRY; | ||
323 | |||
324 | new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index], | ||
325 | gfp_flags, numa_node_id()); | ||
326 | if (unlikely(!new_tsb)) { | ||
327 | /* Not being able to fork due to a high-order TSB | ||
328 | * allocation failure is very bad behavior. Just back | ||
329 | * down to a 0-order allocation and force no TSB | ||
330 | * growing for this address space. | ||
331 | */ | ||
332 | if (mm->context.tsb_block[tsb_index].tsb == NULL && | ||
333 | new_cache_index > 0) { | ||
334 | new_cache_index = 0; | ||
335 | new_size = 8192; | ||
336 | new_rss_limit = ~0UL; | ||
337 | goto retry_tsb_alloc; | ||
338 | } | ||
339 | |||
340 | /* If we failed on a TSB grow, we are under serious | ||
341 | * memory pressure so don't try to grow any more. | ||
342 | */ | ||
343 | if (mm->context.tsb_block[tsb_index].tsb != NULL) | ||
344 | mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL; | ||
345 | return; | ||
346 | } | ||
347 | |||
348 | /* Mark all tags as invalid. */ | ||
349 | tsb_init(new_tsb, new_size); | ||
350 | |||
351 | /* Ok, we are about to commit the changes. If we are | ||
352 | * growing an existing TSB the locking is very tricky, | ||
353 | * so WATCH OUT! | ||
354 | * | ||
355 | * We have to hold mm->context.lock while committing to the | ||
356 | * new TSB, this synchronizes us with processors in | ||
357 | * flush_tsb_user() and switch_mm() for this address space. | ||
358 | * | ||
359 | * But even with that lock held, processors run asynchronously | ||
360 | * accessing the old TSB via TLB miss handling. This is OK | ||
361 | * because those actions are just propagating state from the | ||
362 | * Linux page tables into the TSB, page table mappings are not | ||
363 | * being changed. If a real fault occurs, the processor will | ||
364 | * synchronize with us when it hits flush_tsb_user(), this is | ||
365 | * also true for the case where vmscan is modifying the page | ||
366 | * tables. The only thing we need to be careful with is to | ||
367 | * skip any locked TSB entries during copy_tsb(). | ||
368 | * | ||
369 | * When we finish committing to the new TSB, we have to drop | ||
370 | * the lock and ask all other cpus running this address space | ||
371 | * to run tsb_context_switch() to see the new TSB table. | ||
372 | */ | ||
373 | spin_lock_irqsave(&mm->context.lock, flags); | ||
374 | |||
375 | old_tsb = mm->context.tsb_block[tsb_index].tsb; | ||
376 | old_cache_index = | ||
377 | (mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL); | ||
378 | old_size = (mm->context.tsb_block[tsb_index].tsb_nentries * | ||
379 | sizeof(struct tsb)); | ||
380 | |||
381 | |||
382 | /* Handle multiple threads trying to grow the TSB at the same time. | ||
383 | * One will get in here first, and bump the size and the RSS limit. | ||
384 | * The others will get in here next and hit this check. | ||
385 | */ | ||
386 | if (unlikely(old_tsb && | ||
387 | (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) { | ||
388 | spin_unlock_irqrestore(&mm->context.lock, flags); | ||
389 | |||
390 | kmem_cache_free(tsb_caches[new_cache_index], new_tsb); | ||
391 | return; | ||
392 | } | ||
393 | |||
394 | mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit; | ||
395 | |||
396 | if (old_tsb) { | ||
397 | extern void copy_tsb(unsigned long old_tsb_base, | ||
398 | unsigned long old_tsb_size, | ||
399 | unsigned long new_tsb_base, | ||
400 | unsigned long new_tsb_size); | ||
401 | unsigned long old_tsb_base = (unsigned long) old_tsb; | ||
402 | unsigned long new_tsb_base = (unsigned long) new_tsb; | ||
403 | |||
404 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) { | ||
405 | old_tsb_base = __pa(old_tsb_base); | ||
406 | new_tsb_base = __pa(new_tsb_base); | ||
407 | } | ||
408 | copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size); | ||
409 | } | ||
410 | |||
411 | mm->context.tsb_block[tsb_index].tsb = new_tsb; | ||
412 | setup_tsb_params(mm, tsb_index, new_size); | ||
413 | |||
414 | spin_unlock_irqrestore(&mm->context.lock, flags); | ||
415 | |||
416 | /* If old_tsb is NULL, we're being invoked for the first time | ||
417 | * from init_new_context(). | ||
418 | */ | ||
419 | if (old_tsb) { | ||
420 | /* Reload it on the local cpu. */ | ||
421 | tsb_context_switch(mm); | ||
422 | |||
423 | /* Now force other processors to do the same. */ | ||
424 | preempt_disable(); | ||
425 | smp_tsb_sync(mm); | ||
426 | preempt_enable(); | ||
427 | |||
428 | /* Now it is safe to free the old tsb. */ | ||
429 | kmem_cache_free(tsb_caches[old_cache_index], old_tsb); | ||
430 | } | ||
431 | } | ||
432 | |||
433 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | ||
434 | { | ||
435 | #ifdef CONFIG_HUGETLB_PAGE | ||
436 | unsigned long huge_pte_count; | ||
437 | #endif | ||
438 | unsigned int i; | ||
439 | |||
440 | spin_lock_init(&mm->context.lock); | ||
441 | |||
442 | mm->context.sparc64_ctx_val = 0UL; | ||
443 | |||
444 | #ifdef CONFIG_HUGETLB_PAGE | ||
445 | /* We reset it to zero because the fork() page copying | ||
446 | * will re-increment the counters as the parent PTEs are | ||
447 | * copied into the child address space. | ||
448 | */ | ||
449 | huge_pte_count = mm->context.huge_pte_count; | ||
450 | mm->context.huge_pte_count = 0; | ||
451 | #endif | ||
452 | |||
453 | /* copy_mm() copies over the parent's mm_struct before calling | ||
454 | * us, so we need to zero out the TSB pointer or else tsb_grow() | ||
455 | * will be confused and think there is an older TSB to free up. | ||
456 | */ | ||
457 | for (i = 0; i < MM_NUM_TSBS; i++) | ||
458 | mm->context.tsb_block[i].tsb = NULL; | ||
459 | |||
460 | /* If this is fork, inherit the parent's TSB size. We would | ||
461 | * grow it to that size on the first page fault anyways. | ||
462 | */ | ||
463 | tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm)); | ||
464 | |||
465 | #ifdef CONFIG_HUGETLB_PAGE | ||
466 | if (unlikely(huge_pte_count)) | ||
467 | tsb_grow(mm, MM_TSB_HUGE, huge_pte_count); | ||
468 | #endif | ||
469 | |||
470 | if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb)) | ||
471 | return -ENOMEM; | ||
472 | |||
473 | return 0; | ||
474 | } | ||
475 | |||
476 | static void tsb_destroy_one(struct tsb_config *tp) | ||
477 | { | ||
478 | unsigned long cache_index; | ||
479 | |||
480 | if (!tp->tsb) | ||
481 | return; | ||
482 | cache_index = tp->tsb_reg_val & 0x7UL; | ||
483 | kmem_cache_free(tsb_caches[cache_index], tp->tsb); | ||
484 | tp->tsb = NULL; | ||
485 | tp->tsb_reg_val = 0UL; | ||
486 | } | ||
487 | |||
488 | void destroy_context(struct mm_struct *mm) | ||
489 | { | ||
490 | unsigned long flags, i; | ||
491 | |||
492 | for (i = 0; i < MM_NUM_TSBS; i++) | ||
493 | tsb_destroy_one(&mm->context.tsb_block[i]); | ||
494 | |||
495 | spin_lock_irqsave(&ctx_alloc_lock, flags); | ||
496 | |||
497 | if (CTX_VALID(mm->context)) { | ||
498 | unsigned long nr = CTX_NRBITS(mm->context); | ||
499 | mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); | ||
500 | } | ||
501 | |||
502 | spin_unlock_irqrestore(&ctx_alloc_lock, flags); | ||
503 | } | ||