diff options
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r-- | arch/arm/mm/mmu.c | 349 |
1 files changed, 171 insertions, 178 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 94fd4bf5cb9e..2ba1530d1ce1 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -176,28 +176,42 @@ void adjust_cr(unsigned long mask, unsigned long set) | |||
176 | } | 176 | } |
177 | #endif | 177 | #endif |
178 | 178 | ||
179 | struct mem_types { | 179 | #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE |
180 | unsigned int prot_pte; | 180 | #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_XN|PMD_SECT_AP_WRITE |
181 | unsigned int prot_l1; | 181 | |
182 | unsigned int prot_sect; | 182 | static struct mem_type mem_types[] = { |
183 | unsigned int domain; | 183 | [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ |
184 | }; | 184 | .prot_pte = PROT_PTE_DEVICE, |
185 | 185 | .prot_l1 = PMD_TYPE_TABLE, | |
186 | static struct mem_types mem_types[] __initdata = { | 186 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_UNCACHED, |
187 | [MT_DEVICE] = { | 187 | .domain = DOMAIN_IO, |
188 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 188 | }, |
189 | L_PTE_WRITE, | 189 | [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */ |
190 | .prot_l1 = PMD_TYPE_TABLE, | 190 | .prot_pte = PROT_PTE_DEVICE, |
191 | .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED | | 191 | .prot_pte_ext = PTE_EXT_TEX(2), |
192 | PMD_SECT_AP_WRITE, | 192 | .prot_l1 = PMD_TYPE_TABLE, |
193 | .domain = DOMAIN_IO, | 193 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_TEX(2), |
194 | .domain = DOMAIN_IO, | ||
195 | }, | ||
196 | [MT_DEVICE_CACHED] = { /* ioremap_cached */ | ||
197 | .prot_pte = PROT_PTE_DEVICE | L_PTE_CACHEABLE | L_PTE_BUFFERABLE, | ||
198 | .prot_l1 = PMD_TYPE_TABLE, | ||
199 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, | ||
200 | .domain = DOMAIN_IO, | ||
201 | }, | ||
202 | [MT_DEVICE_IXP2000] = { /* IXP2400 requires XCB=101 for on-chip I/O */ | ||
203 | .prot_pte = PROT_PTE_DEVICE, | ||
204 | .prot_l1 = PMD_TYPE_TABLE, | ||
205 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE | | ||
206 | PMD_SECT_TEX(1), | ||
207 | .domain = DOMAIN_IO, | ||
194 | }, | 208 | }, |
195 | [MT_CACHECLEAN] = { | 209 | [MT_CACHECLEAN] = { |
196 | .prot_sect = PMD_TYPE_SECT | PMD_BIT4, | 210 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, |
197 | .domain = DOMAIN_KERNEL, | 211 | .domain = DOMAIN_KERNEL, |
198 | }, | 212 | }, |
199 | [MT_MINICLEAN] = { | 213 | [MT_MINICLEAN] = { |
200 | .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_MINICACHE, | 214 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE, |
201 | .domain = DOMAIN_KERNEL, | 215 | .domain = DOMAIN_KERNEL, |
202 | }, | 216 | }, |
203 | [MT_LOW_VECTORS] = { | 217 | [MT_LOW_VECTORS] = { |
@@ -213,30 +227,20 @@ static struct mem_types mem_types[] __initdata = { | |||
213 | .domain = DOMAIN_USER, | 227 | .domain = DOMAIN_USER, |
214 | }, | 228 | }, |
215 | [MT_MEMORY] = { | 229 | [MT_MEMORY] = { |
216 | .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_AP_WRITE, | 230 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, |
217 | .domain = DOMAIN_KERNEL, | 231 | .domain = DOMAIN_KERNEL, |
218 | }, | 232 | }, |
219 | [MT_ROM] = { | 233 | [MT_ROM] = { |
220 | .prot_sect = PMD_TYPE_SECT | PMD_BIT4, | 234 | .prot_sect = PMD_TYPE_SECT, |
221 | .domain = DOMAIN_KERNEL, | 235 | .domain = DOMAIN_KERNEL, |
222 | }, | 236 | }, |
223 | [MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */ | ||
224 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | ||
225 | L_PTE_WRITE, | ||
226 | .prot_l1 = PMD_TYPE_TABLE, | ||
227 | .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED | | ||
228 | PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE | | ||
229 | PMD_SECT_TEX(1), | ||
230 | .domain = DOMAIN_IO, | ||
231 | }, | ||
232 | [MT_NONSHARED_DEVICE] = { | ||
233 | .prot_l1 = PMD_TYPE_TABLE, | ||
234 | .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_NONSHARED_DEV | | ||
235 | PMD_SECT_AP_WRITE, | ||
236 | .domain = DOMAIN_IO, | ||
237 | } | ||
238 | }; | 237 | }; |
239 | 238 | ||
239 | const struct mem_type *get_mem_type(unsigned int type) | ||
240 | { | ||
241 | return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL; | ||
242 | } | ||
243 | |||
240 | /* | 244 | /* |
241 | * Adjust the PMD section entries according to the CPU in use. | 245 | * Adjust the PMD section entries according to the CPU in use. |
242 | */ | 246 | */ |
@@ -262,20 +266,23 @@ static void __init build_mem_type_table(void) | |||
262 | } | 266 | } |
263 | 267 | ||
264 | /* | 268 | /* |
265 | * Xscale must not have PMD bit 4 set for section mappings. | 269 | * ARMv5 and lower, bit 4 must be set for page tables. |
270 | * (was: cache "update-able on write" bit on ARM610) | ||
271 | * However, Xscale cores require this bit to be cleared. | ||
266 | */ | 272 | */ |
267 | if (cpu_is_xscale()) | 273 | if (cpu_is_xscale()) { |
268 | for (i = 0; i < ARRAY_SIZE(mem_types); i++) | 274 | for (i = 0; i < ARRAY_SIZE(mem_types); i++) { |
269 | mem_types[i].prot_sect &= ~PMD_BIT4; | 275 | mem_types[i].prot_sect &= ~PMD_BIT4; |
270 | 276 | mem_types[i].prot_l1 &= ~PMD_BIT4; | |
271 | /* | 277 | } |
272 | * ARMv5 and lower, excluding Xscale, bit 4 must be set for | 278 | } else if (cpu_arch < CPU_ARCH_ARMv6) { |
273 | * page tables. | 279 | for (i = 0; i < ARRAY_SIZE(mem_types); i++) { |
274 | */ | ||
275 | if (cpu_arch < CPU_ARCH_ARMv6 && !cpu_is_xscale()) | ||
276 | for (i = 0; i < ARRAY_SIZE(mem_types); i++) | ||
277 | if (mem_types[i].prot_l1) | 280 | if (mem_types[i].prot_l1) |
278 | mem_types[i].prot_l1 |= PMD_BIT4; | 281 | mem_types[i].prot_l1 |= PMD_BIT4; |
282 | if (mem_types[i].prot_sect) | ||
283 | mem_types[i].prot_sect |= PMD_BIT4; | ||
284 | } | ||
285 | } | ||
279 | 286 | ||
280 | cp = &cache_policies[cachepolicy]; | 287 | cp = &cache_policies[cachepolicy]; |
281 | kern_pgprot = user_pgprot = cp->pte; | 288 | kern_pgprot = user_pgprot = cp->pte; |
@@ -296,13 +303,6 @@ static void __init build_mem_type_table(void) | |||
296 | */ | 303 | */ |
297 | if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { | 304 | if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { |
298 | /* | 305 | /* |
299 | * bit 4 becomes XN which we must clear for the | ||
300 | * kernel memory mapping. | ||
301 | */ | ||
302 | mem_types[MT_MEMORY].prot_sect &= ~PMD_SECT_XN; | ||
303 | mem_types[MT_ROM].prot_sect &= ~PMD_SECT_XN; | ||
304 | |||
305 | /* | ||
306 | * Mark cache clean areas and XIP ROM read only | 306 | * Mark cache clean areas and XIP ROM read only |
307 | * from SVC mode and no access from userspace. | 307 | * from SVC mode and no access from userspace. |
308 | */ | 308 | */ |
@@ -368,64 +368,126 @@ static void __init build_mem_type_table(void) | |||
368 | } | 368 | } |
369 | printk("Memory policy: ECC %sabled, Data cache %s\n", | 369 | printk("Memory policy: ECC %sabled, Data cache %s\n", |
370 | ecc_mask ? "en" : "dis", cp->policy); | 370 | ecc_mask ? "en" : "dis", cp->policy); |
371 | |||
372 | for (i = 0; i < ARRAY_SIZE(mem_types); i++) { | ||
373 | struct mem_type *t = &mem_types[i]; | ||
374 | if (t->prot_l1) | ||
375 | t->prot_l1 |= PMD_DOMAIN(t->domain); | ||
376 | if (t->prot_sect) | ||
377 | t->prot_sect |= PMD_DOMAIN(t->domain); | ||
378 | } | ||
371 | } | 379 | } |
372 | 380 | ||
373 | #define vectors_base() (vectors_high() ? 0xffff0000 : 0) | 381 | #define vectors_base() (vectors_high() ? 0xffff0000 : 0) |
374 | 382 | ||
375 | /* | 383 | static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, |
376 | * Create a SECTION PGD between VIRT and PHYS in domain | 384 | unsigned long end, unsigned long pfn, |
377 | * DOMAIN with protection PROT. This operates on half- | 385 | const struct mem_type *type) |
378 | * pgdir entry increments. | ||
379 | */ | ||
380 | static inline void | ||
381 | alloc_init_section(unsigned long virt, unsigned long phys, int prot) | ||
382 | { | 386 | { |
383 | pmd_t *pmdp = pmd_off_k(virt); | 387 | pte_t *pte; |
384 | 388 | ||
385 | if (virt & (1 << 20)) | 389 | if (pmd_none(*pmd)) { |
386 | pmdp++; | 390 | pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t)); |
391 | __pmd_populate(pmd, __pa(pte) | type->prot_l1); | ||
392 | } | ||
387 | 393 | ||
388 | *pmdp = __pmd(phys | prot); | 394 | pte = pte_offset_kernel(pmd, addr); |
389 | flush_pmd_entry(pmdp); | 395 | do { |
396 | set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), | ||
397 | type->prot_pte_ext); | ||
398 | pfn++; | ||
399 | } while (pte++, addr += PAGE_SIZE, addr != end); | ||
390 | } | 400 | } |
391 | 401 | ||
392 | /* | 402 | static void __init alloc_init_section(pgd_t *pgd, unsigned long addr, |
393 | * Create a SUPER SECTION PGD between VIRT and PHYS with protection PROT | 403 | unsigned long end, unsigned long phys, |
394 | */ | 404 | const struct mem_type *type) |
395 | static inline void | ||
396 | alloc_init_supersection(unsigned long virt, unsigned long phys, int prot) | ||
397 | { | 405 | { |
398 | int i; | 406 | pmd_t *pmd = pmd_offset(pgd, addr); |
407 | |||
408 | /* | ||
409 | * Try a section mapping - end, addr and phys must all be aligned | ||
410 | * to a section boundary. Note that PMDs refer to the individual | ||
411 | * L1 entries, whereas PGDs refer to a group of L1 entries making | ||
412 | * up one logical pointer to an L2 table. | ||
413 | */ | ||
414 | if (((addr | end | phys) & ~SECTION_MASK) == 0) { | ||
415 | pmd_t *p = pmd; | ||
416 | |||
417 | if (addr & SECTION_SIZE) | ||
418 | pmd++; | ||
399 | 419 | ||
400 | for (i = 0; i < 16; i += 1) { | 420 | do { |
401 | alloc_init_section(virt, phys, prot | PMD_SECT_SUPER); | 421 | *pmd = __pmd(phys | type->prot_sect); |
422 | phys += SECTION_SIZE; | ||
423 | } while (pmd++, addr += SECTION_SIZE, addr != end); | ||
402 | 424 | ||
403 | virt += (PGDIR_SIZE / 2); | 425 | flush_pmd_entry(p); |
426 | } else { | ||
427 | /* | ||
428 | * No need to loop; pte's aren't interested in the | ||
429 | * individual L1 entries. | ||
430 | */ | ||
431 | alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); | ||
404 | } | 432 | } |
405 | } | 433 | } |
406 | 434 | ||
407 | /* | 435 | static void __init create_36bit_mapping(struct map_desc *md, |
408 | * Add a PAGE mapping between VIRT and PHYS in domain | 436 | const struct mem_type *type) |
409 | * DOMAIN with protection PROT. Note that due to the | ||
410 | * way we map the PTEs, we must allocate two PTE_SIZE'd | ||
411 | * blocks - one for the Linux pte table, and one for | ||
412 | * the hardware pte table. | ||
413 | */ | ||
414 | static inline void | ||
415 | alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot) | ||
416 | { | 437 | { |
417 | pmd_t *pmdp = pmd_off_k(virt); | 438 | unsigned long phys, addr, length, end; |
418 | pte_t *ptep; | 439 | pgd_t *pgd; |
440 | |||
441 | addr = md->virtual; | ||
442 | phys = (unsigned long)__pfn_to_phys(md->pfn); | ||
443 | length = PAGE_ALIGN(md->length); | ||
444 | |||
445 | if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) { | ||
446 | printk(KERN_ERR "MM: CPU does not support supersection " | ||
447 | "mapping for 0x%08llx at 0x%08lx\n", | ||
448 | __pfn_to_phys((u64)md->pfn), addr); | ||
449 | return; | ||
450 | } | ||
419 | 451 | ||
420 | if (pmd_none(*pmdp)) { | 452 | /* N.B. ARMv6 supersections are only defined to work with domain 0. |
421 | ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * | 453 | * Since domain assignments can in fact be arbitrary, the |
422 | sizeof(pte_t)); | 454 | * 'domain == 0' check below is required to insure that ARMv6 |
455 | * supersections are only allocated for domain 0 regardless | ||
456 | * of the actual domain assignments in use. | ||
457 | */ | ||
458 | if (type->domain) { | ||
459 | printk(KERN_ERR "MM: invalid domain in supersection " | ||
460 | "mapping for 0x%08llx at 0x%08lx\n", | ||
461 | __pfn_to_phys((u64)md->pfn), addr); | ||
462 | return; | ||
463 | } | ||
423 | 464 | ||
424 | __pmd_populate(pmdp, __pa(ptep) | prot_l1); | 465 | if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) { |
466 | printk(KERN_ERR "MM: cannot create mapping for " | ||
467 | "0x%08llx at 0x%08lx invalid alignment\n", | ||
468 | __pfn_to_phys((u64)md->pfn), addr); | ||
469 | return; | ||
425 | } | 470 | } |
426 | ptep = pte_offset_kernel(pmdp, virt); | ||
427 | 471 | ||
428 | set_pte_ext(ptep, pfn_pte(phys >> PAGE_SHIFT, prot), 0); | 472 | /* |
473 | * Shift bits [35:32] of address into bits [23:20] of PMD | ||
474 | * (See ARMv6 spec). | ||
475 | */ | ||
476 | phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20); | ||
477 | |||
478 | pgd = pgd_offset_k(addr); | ||
479 | end = addr + length; | ||
480 | do { | ||
481 | pmd_t *pmd = pmd_offset(pgd, addr); | ||
482 | int i; | ||
483 | |||
484 | for (i = 0; i < 16; i++) | ||
485 | *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER); | ||
486 | |||
487 | addr += SUPERSECTION_SIZE; | ||
488 | phys += SUPERSECTION_SIZE; | ||
489 | pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT; | ||
490 | } while (addr != end); | ||
429 | } | 491 | } |
430 | 492 | ||
431 | /* | 493 | /* |
@@ -437,10 +499,9 @@ alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pg | |||
437 | */ | 499 | */ |
438 | void __init create_mapping(struct map_desc *md) | 500 | void __init create_mapping(struct map_desc *md) |
439 | { | 501 | { |
440 | unsigned long virt, length; | 502 | unsigned long phys, addr, length, end; |
441 | int prot_sect, prot_l1, domain; | 503 | const struct mem_type *type; |
442 | pgprot_t prot_pte; | 504 | pgd_t *pgd; |
443 | unsigned long off = (u32)__pfn_to_phys(md->pfn); | ||
444 | 505 | ||
445 | if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { | 506 | if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { |
446 | printk(KERN_WARNING "BUG: not creating mapping for " | 507 | printk(KERN_WARNING "BUG: not creating mapping for " |
@@ -456,105 +517,37 @@ void __init create_mapping(struct map_desc *md) | |||
456 | __pfn_to_phys((u64)md->pfn), md->virtual); | 517 | __pfn_to_phys((u64)md->pfn), md->virtual); |
457 | } | 518 | } |
458 | 519 | ||
459 | domain = mem_types[md->type].domain; | 520 | type = &mem_types[md->type]; |
460 | prot_pte = __pgprot(mem_types[md->type].prot_pte); | ||
461 | prot_l1 = mem_types[md->type].prot_l1 | PMD_DOMAIN(domain); | ||
462 | prot_sect = mem_types[md->type].prot_sect | PMD_DOMAIN(domain); | ||
463 | 521 | ||
464 | /* | 522 | /* |
465 | * Catch 36-bit addresses | 523 | * Catch 36-bit addresses |
466 | */ | 524 | */ |
467 | if(md->pfn >= 0x100000) { | 525 | if (md->pfn >= 0x100000) { |
468 | if(domain) { | 526 | create_36bit_mapping(md, type); |
469 | printk(KERN_ERR "MM: invalid domain in supersection " | 527 | return; |
470 | "mapping for 0x%08llx at 0x%08lx\n", | ||
471 | __pfn_to_phys((u64)md->pfn), md->virtual); | ||
472 | return; | ||
473 | } | ||
474 | if((md->virtual | md->length | __pfn_to_phys(md->pfn)) | ||
475 | & ~SUPERSECTION_MASK) { | ||
476 | printk(KERN_ERR "MM: cannot create mapping for " | ||
477 | "0x%08llx at 0x%08lx invalid alignment\n", | ||
478 | __pfn_to_phys((u64)md->pfn), md->virtual); | ||
479 | return; | ||
480 | } | ||
481 | |||
482 | /* | ||
483 | * Shift bits [35:32] of address into bits [23:20] of PMD | ||
484 | * (See ARMv6 spec). | ||
485 | */ | ||
486 | off |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20); | ||
487 | } | 528 | } |
488 | 529 | ||
489 | virt = md->virtual; | 530 | addr = md->virtual; |
490 | off -= virt; | 531 | phys = (unsigned long)__pfn_to_phys(md->pfn); |
491 | length = md->length; | 532 | length = PAGE_ALIGN(md->length); |
492 | 533 | ||
493 | if (mem_types[md->type].prot_l1 == 0 && | 534 | if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) { |
494 | (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) { | ||
495 | printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " | 535 | printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " |
496 | "be mapped using pages, ignoring.\n", | 536 | "be mapped using pages, ignoring.\n", |
497 | __pfn_to_phys(md->pfn), md->virtual); | 537 | __pfn_to_phys(md->pfn), addr); |
498 | return; | 538 | return; |
499 | } | 539 | } |
500 | 540 | ||
501 | while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) { | 541 | pgd = pgd_offset_k(addr); |
502 | alloc_init_page(virt, virt + off, prot_l1, prot_pte); | 542 | end = addr + length; |
543 | do { | ||
544 | unsigned long next = pgd_addr_end(addr, end); | ||
503 | 545 | ||
504 | virt += PAGE_SIZE; | 546 | alloc_init_section(pgd, addr, next, phys, type); |
505 | length -= PAGE_SIZE; | ||
506 | } | ||
507 | |||
508 | /* N.B. ARMv6 supersections are only defined to work with domain 0. | ||
509 | * Since domain assignments can in fact be arbitrary, the | ||
510 | * 'domain == 0' check below is required to insure that ARMv6 | ||
511 | * supersections are only allocated for domain 0 regardless | ||
512 | * of the actual domain assignments in use. | ||
513 | */ | ||
514 | if ((cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3()) | ||
515 | && domain == 0) { | ||
516 | /* | ||
517 | * Align to supersection boundary if !high pages. | ||
518 | * High pages have already been checked for proper | ||
519 | * alignment above and they will fail the SUPSERSECTION_MASK | ||
520 | * check because of the way the address is encoded into | ||
521 | * offset. | ||
522 | */ | ||
523 | if (md->pfn <= 0x100000) { | ||
524 | while ((virt & ~SUPERSECTION_MASK || | ||
525 | (virt + off) & ~SUPERSECTION_MASK) && | ||
526 | length >= (PGDIR_SIZE / 2)) { | ||
527 | alloc_init_section(virt, virt + off, prot_sect); | ||
528 | |||
529 | virt += (PGDIR_SIZE / 2); | ||
530 | length -= (PGDIR_SIZE / 2); | ||
531 | } | ||
532 | } | ||
533 | 547 | ||
534 | while (length >= SUPERSECTION_SIZE) { | 548 | phys += next - addr; |
535 | alloc_init_supersection(virt, virt + off, prot_sect); | 549 | addr = next; |
536 | 550 | } while (pgd++, addr != end); | |
537 | virt += SUPERSECTION_SIZE; | ||
538 | length -= SUPERSECTION_SIZE; | ||
539 | } | ||
540 | } | ||
541 | |||
542 | /* | ||
543 | * A section mapping covers half a "pgdir" entry. | ||
544 | */ | ||
545 | while (length >= (PGDIR_SIZE / 2)) { | ||
546 | alloc_init_section(virt, virt + off, prot_sect); | ||
547 | |||
548 | virt += (PGDIR_SIZE / 2); | ||
549 | length -= (PGDIR_SIZE / 2); | ||
550 | } | ||
551 | |||
552 | while (length >= PAGE_SIZE) { | ||
553 | alloc_init_page(virt, virt + off, prot_l1, prot_pte); | ||
554 | |||
555 | virt += PAGE_SIZE; | ||
556 | length -= PAGE_SIZE; | ||
557 | } | ||
558 | } | 551 | } |
559 | 552 | ||
560 | /* | 553 | /* |