aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/cmm.c8
-rw-r--r--arch/s390/mm/fault.c9
-rw-r--r--arch/s390/mm/hugetlbpage.c2
-rw-r--r--arch/s390/mm/init.c45
-rw-r--r--arch/s390/mm/pageattr.c24
-rw-r--r--arch/s390/mm/pgtable.c235
-rw-r--r--arch/s390/mm/vmem.c15
7 files changed, 201 insertions, 137 deletions
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 479e94282910..9d84a1feefef 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -458,12 +458,10 @@ static int __init cmm_init(void)
458 if (rc) 458 if (rc)
459 goto out_pm; 459 goto out_pm;
460 cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread"); 460 cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
461 rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0; 461 if (!IS_ERR(cmm_thread_ptr))
462 if (rc) 462 return 0;
463 goto out_kthread;
464 return 0;
465 463
466out_kthread: 464 rc = PTR_ERR(cmm_thread_ptr);
467 unregister_pm_notifier(&cmm_power_notifier); 465 unregister_pm_notifier(&cmm_power_notifier);
468out_pm: 466out_pm:
469 unregister_oom_notifier(&cmm_oom_nb); 467 unregister_oom_notifier(&cmm_oom_nb);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 2fb9e63b8fc4..047c3e4c59a2 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -395,8 +395,13 @@ void __kprobes do_protection_exception(struct pt_regs *regs)
395 int fault; 395 int fault;
396 396
397 trans_exc_code = regs->int_parm_long; 397 trans_exc_code = regs->int_parm_long;
398 /* Protection exception is suppressing, decrement psw address. */ 398 /*
399 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16); 399 * Protection exceptions are suppressing, decrement psw address.
400 * The exception to this rule are aborted transactions, for these
401 * the PSW already points to the correct location.
402 */
403 if (!(regs->int_code & 0x200))
404 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
400 /* 405 /*
401 * Check for low-address protection. This needs to be treated 406 * Check for low-address protection. This needs to be treated
402 * as a special case because the translation exception code 407 * as a special case because the translation exception code
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 532525ec88c1..121089d57802 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -39,7 +39,7 @@ int arch_prepare_hugepage(struct page *page)
39 if (!ptep) 39 if (!ptep)
40 return -ENOMEM; 40 return -ENOMEM;
41 41
42 pte = mk_pte(page, PAGE_RW); 42 pte_val(pte) = addr;
43 for (i = 0; i < PTRS_PER_PTE; i++) { 43 for (i = 0; i < PTRS_PER_PTE; i++) {
44 set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte); 44 set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);
45 pte_val(pte) += PAGE_SIZE; 45 pte_val(pte) += PAGE_SIZE;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 49ce6bb2c641..0b09b2342302 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -42,11 +42,10 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
42unsigned long empty_zero_page, zero_page_mask; 42unsigned long empty_zero_page, zero_page_mask;
43EXPORT_SYMBOL(empty_zero_page); 43EXPORT_SYMBOL(empty_zero_page);
44 44
45static unsigned long __init setup_zero_pages(void) 45static void __init setup_zero_pages(void)
46{ 46{
47 struct cpuid cpu_id; 47 struct cpuid cpu_id;
48 unsigned int order; 48 unsigned int order;
49 unsigned long size;
50 struct page *page; 49 struct page *page;
51 int i; 50 int i;
52 51
@@ -63,10 +62,18 @@ static unsigned long __init setup_zero_pages(void)
63 break; 62 break;
64 case 0x2097: /* z10 */ 63 case 0x2097: /* z10 */
65 case 0x2098: /* z10 */ 64 case 0x2098: /* z10 */
66 default: 65 case 0x2817: /* z196 */
66 case 0x2818: /* z196 */
67 order = 2; 67 order = 2;
68 break; 68 break;
69 case 0x2827: /* zEC12 */
70 default:
71 order = 5;
72 break;
69 } 73 }
74 /* Limit number of empty zero pages for small memory sizes */
75 if (order > 2 && totalram_pages <= 16384)
76 order = 2;
70 77
71 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); 78 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
72 if (!empty_zero_page) 79 if (!empty_zero_page)
@@ -75,14 +82,11 @@ static unsigned long __init setup_zero_pages(void)
75 page = virt_to_page((void *) empty_zero_page); 82 page = virt_to_page((void *) empty_zero_page);
76 split_page(page, order); 83 split_page(page, order);
77 for (i = 1 << order; i > 0; i--) { 84 for (i = 1 << order; i > 0; i--) {
78 SetPageReserved(page); 85 mark_page_reserved(page);
79 page++; 86 page++;
80 } 87 }
81 88
82 size = PAGE_SIZE << order; 89 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
83 zero_page_mask = (size - 1) & PAGE_MASK;
84
85 return 1UL << order;
86} 90}
87 91
88/* 92/*
@@ -139,7 +143,7 @@ void __init mem_init(void)
139 143
140 /* this will put all low memory onto the freelists */ 144 /* this will put all low memory onto the freelists */
141 totalram_pages += free_all_bootmem(); 145 totalram_pages += free_all_bootmem();
142 totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */ 146 setup_zero_pages(); /* Setup zeroed pages. */
143 147
144 reservedpages = 0; 148 reservedpages = 0;
145 149
@@ -158,34 +162,15 @@ void __init mem_init(void)
158 PFN_ALIGN((unsigned long)&_eshared) - 1); 162 PFN_ALIGN((unsigned long)&_eshared) - 1);
159} 163}
160 164
161void free_init_pages(char *what, unsigned long begin, unsigned long end)
162{
163 unsigned long addr = begin;
164
165 if (begin >= end)
166 return;
167 for (; addr < end; addr += PAGE_SIZE) {
168 ClearPageReserved(virt_to_page(addr));
169 init_page_count(virt_to_page(addr));
170 memset((void *)(addr & PAGE_MASK), POISON_FREE_INITMEM,
171 PAGE_SIZE);
172 free_page(addr);
173 totalram_pages++;
174 }
175 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
176}
177
178void free_initmem(void) 165void free_initmem(void)
179{ 166{
180 free_init_pages("unused kernel memory", 167 free_initmem_default(0);
181 (unsigned long)&__init_begin,
182 (unsigned long)&__init_end);
183} 168}
184 169
185#ifdef CONFIG_BLK_DEV_INITRD 170#ifdef CONFIG_BLK_DEV_INITRD
186void __init free_initrd_mem(unsigned long start, unsigned long end) 171void __init free_initrd_mem(unsigned long start, unsigned long end)
187{ 172{
188 free_init_pages("initrd memory", start, end); 173 free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd");
189} 174}
190#endif 175#endif
191 176
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index d21040ed5e59..80adfbf75065 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -9,31 +9,25 @@
9#include <asm/pgtable.h> 9#include <asm/pgtable.h>
10#include <asm/page.h> 10#include <asm/page.h>
11 11
12static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
13{
14 asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0"
15 : [addr] "+a" (addr) : [skey] "d" (skey));
16 return addr;
17}
18
12void storage_key_init_range(unsigned long start, unsigned long end) 19void storage_key_init_range(unsigned long start, unsigned long end)
13{ 20{
14 unsigned long boundary, function, size; 21 unsigned long boundary, size;
15 22
16 while (start < end) { 23 while (start < end) {
17 if (MACHINE_HAS_EDAT2) {
18 /* set storage keys for a 2GB frame */
19 function = 0x22000 | PAGE_DEFAULT_KEY;
20 size = 1UL << 31;
21 boundary = (start + size) & ~(size - 1);
22 if (boundary <= end) {
23 do {
24 start = pfmf(function, start);
25 } while (start < boundary);
26 continue;
27 }
28 }
29 if (MACHINE_HAS_EDAT1) { 24 if (MACHINE_HAS_EDAT1) {
30 /* set storage keys for a 1MB frame */ 25 /* set storage keys for a 1MB frame */
31 function = 0x21000 | PAGE_DEFAULT_KEY;
32 size = 1UL << 20; 26 size = 1UL << 20;
33 boundary = (start + size) & ~(size - 1); 27 boundary = (start + size) & ~(size - 1);
34 if (boundary <= end) { 28 if (boundary <= end) {
35 do { 29 do {
36 start = pfmf(function, start); 30 start = sske_frame(start, PAGE_DEFAULT_KEY);
37 } while (start < boundary); 31 } while (start < boundary);
38 continue; 32 continue;
39 } 33 }
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index ae44d2a34313..bd954e96f51c 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -379,75 +379,183 @@ out_unmap:
379} 379}
380EXPORT_SYMBOL_GPL(gmap_map_segment); 380EXPORT_SYMBOL_GPL(gmap_map_segment);
381 381
382/* 382static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap)
383 * this function is assumed to be called with mmap_sem held
384 */
385unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
386{ 383{
387 unsigned long *table, vmaddr, segment; 384 unsigned long *table;
388 struct mm_struct *mm;
389 struct gmap_pgtable *mp;
390 struct gmap_rmap *rmap;
391 struct vm_area_struct *vma;
392 struct page *page;
393 pgd_t *pgd;
394 pud_t *pud;
395 pmd_t *pmd;
396 385
397 current->thread.gmap_addr = address;
398 mm = gmap->mm;
399 /* Walk the gmap address space page table */
400 table = gmap->table + ((address >> 53) & 0x7ff); 386 table = gmap->table + ((address >> 53) & 0x7ff);
401 if (unlikely(*table & _REGION_ENTRY_INV)) 387 if (unlikely(*table & _REGION_ENTRY_INV))
402 return -EFAULT; 388 return ERR_PTR(-EFAULT);
403 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 389 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
404 table = table + ((address >> 42) & 0x7ff); 390 table = table + ((address >> 42) & 0x7ff);
405 if (unlikely(*table & _REGION_ENTRY_INV)) 391 if (unlikely(*table & _REGION_ENTRY_INV))
406 return -EFAULT; 392 return ERR_PTR(-EFAULT);
407 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 393 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
408 table = table + ((address >> 31) & 0x7ff); 394 table = table + ((address >> 31) & 0x7ff);
409 if (unlikely(*table & _REGION_ENTRY_INV)) 395 if (unlikely(*table & _REGION_ENTRY_INV))
410 return -EFAULT; 396 return ERR_PTR(-EFAULT);
411 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 397 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
412 table = table + ((address >> 20) & 0x7ff); 398 table = table + ((address >> 20) & 0x7ff);
399 return table;
400}
401
402/**
403 * __gmap_translate - translate a guest address to a user space address
404 * @address: guest address
405 * @gmap: pointer to guest mapping meta data structure
406 *
407 * Returns user space address which corresponds to the guest address or
408 * -EFAULT if no such mapping exists.
409 * This function does not establish potentially missing page table entries.
410 * The mmap_sem of the mm that belongs to the address space must be held
411 * when this function gets called.
412 */
413unsigned long __gmap_translate(unsigned long address, struct gmap *gmap)
414{
415 unsigned long *segment_ptr, vmaddr, segment;
416 struct gmap_pgtable *mp;
417 struct page *page;
413 418
419 current->thread.gmap_addr = address;
420 segment_ptr = gmap_table_walk(address, gmap);
421 if (IS_ERR(segment_ptr))
422 return PTR_ERR(segment_ptr);
414 /* Convert the gmap address to an mm address. */ 423 /* Convert the gmap address to an mm address. */
415 segment = *table; 424 segment = *segment_ptr;
416 if (likely(!(segment & _SEGMENT_ENTRY_INV))) { 425 if (!(segment & _SEGMENT_ENTRY_INV)) {
417 page = pfn_to_page(segment >> PAGE_SHIFT); 426 page = pfn_to_page(segment >> PAGE_SHIFT);
418 mp = (struct gmap_pgtable *) page->index; 427 mp = (struct gmap_pgtable *) page->index;
419 return mp->vmaddr | (address & ~PMD_MASK); 428 return mp->vmaddr | (address & ~PMD_MASK);
420 } else if (segment & _SEGMENT_ENTRY_RO) { 429 } else if (segment & _SEGMENT_ENTRY_RO) {
421 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; 430 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
422 vma = find_vma(mm, vmaddr); 431 return vmaddr | (address & ~PMD_MASK);
423 if (!vma || vma->vm_start > vmaddr) 432 }
424 return -EFAULT; 433 return -EFAULT;
425 434}
426 /* Walk the parent mm page table */ 435EXPORT_SYMBOL_GPL(__gmap_translate);
427 pgd = pgd_offset(mm, vmaddr); 436
428 pud = pud_alloc(mm, pgd, vmaddr); 437/**
429 if (!pud) 438 * gmap_translate - translate a guest address to a user space address
430 return -ENOMEM; 439 * @address: guest address
431 pmd = pmd_alloc(mm, pud, vmaddr); 440 * @gmap: pointer to guest mapping meta data structure
432 if (!pmd) 441 *
433 return -ENOMEM; 442 * Returns user space address which corresponds to the guest address or
434 if (!pmd_present(*pmd) && 443 * -EFAULT if no such mapping exists.
435 __pte_alloc(mm, vma, pmd, vmaddr)) 444 * This function does not establish potentially missing page table entries.
436 return -ENOMEM; 445 */
437 /* pmd now points to a valid segment table entry. */ 446unsigned long gmap_translate(unsigned long address, struct gmap *gmap)
438 rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT); 447{
439 if (!rmap) 448 unsigned long rc;
440 return -ENOMEM; 449
441 /* Link gmap segment table entry location to page table. */ 450 down_read(&gmap->mm->mmap_sem);
442 page = pmd_page(*pmd); 451 rc = __gmap_translate(address, gmap);
443 mp = (struct gmap_pgtable *) page->index; 452 up_read(&gmap->mm->mmap_sem);
444 rmap->entry = table; 453 return rc;
445 spin_lock(&mm->page_table_lock); 454}
455EXPORT_SYMBOL_GPL(gmap_translate);
456
457static int gmap_connect_pgtable(unsigned long segment,
458 unsigned long *segment_ptr,
459 struct gmap *gmap)
460{
461 unsigned long vmaddr;
462 struct vm_area_struct *vma;
463 struct gmap_pgtable *mp;
464 struct gmap_rmap *rmap;
465 struct mm_struct *mm;
466 struct page *page;
467 pgd_t *pgd;
468 pud_t *pud;
469 pmd_t *pmd;
470
471 mm = gmap->mm;
472 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
473 vma = find_vma(mm, vmaddr);
474 if (!vma || vma->vm_start > vmaddr)
475 return -EFAULT;
476 /* Walk the parent mm page table */
477 pgd = pgd_offset(mm, vmaddr);
478 pud = pud_alloc(mm, pgd, vmaddr);
479 if (!pud)
480 return -ENOMEM;
481 pmd = pmd_alloc(mm, pud, vmaddr);
482 if (!pmd)
483 return -ENOMEM;
484 if (!pmd_present(*pmd) &&
485 __pte_alloc(mm, vma, pmd, vmaddr))
486 return -ENOMEM;
487 /* pmd now points to a valid segment table entry. */
488 rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
489 if (!rmap)
490 return -ENOMEM;
491 /* Link gmap segment table entry location to page table. */
492 page = pmd_page(*pmd);
493 mp = (struct gmap_pgtable *) page->index;
494 rmap->entry = segment_ptr;
495 spin_lock(&mm->page_table_lock);
496 if (*segment_ptr == segment) {
446 list_add(&rmap->list, &mp->mapper); 497 list_add(&rmap->list, &mp->mapper);
447 spin_unlock(&mm->page_table_lock);
448 /* Set gmap segment table entry to page table. */ 498 /* Set gmap segment table entry to page table. */
449 *table = pmd_val(*pmd) & PAGE_MASK; 499 *segment_ptr = pmd_val(*pmd) & PAGE_MASK;
450 return vmaddr | (address & ~PMD_MASK); 500 rmap = NULL;
501 }
502 spin_unlock(&mm->page_table_lock);
503 kfree(rmap);
504 return 0;
505}
506
507static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
508{
509 struct gmap_rmap *rmap, *next;
510 struct gmap_pgtable *mp;
511 struct page *page;
512 int flush;
513
514 flush = 0;
515 spin_lock(&mm->page_table_lock);
516 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
517 mp = (struct gmap_pgtable *) page->index;
518 list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
519 *rmap->entry =
520 _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
521 list_del(&rmap->list);
522 kfree(rmap);
523 flush = 1;
524 }
525 spin_unlock(&mm->page_table_lock);
526 if (flush)
527 __tlb_flush_global();
528}
529
530/*
531 * this function is assumed to be called with mmap_sem held
532 */
533unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
534{
535 unsigned long *segment_ptr, segment;
536 struct gmap_pgtable *mp;
537 struct page *page;
538 int rc;
539
540 current->thread.gmap_addr = address;
541 segment_ptr = gmap_table_walk(address, gmap);
542 if (IS_ERR(segment_ptr))
543 return -EFAULT;
544 /* Convert the gmap address to an mm address. */
545 while (1) {
546 segment = *segment_ptr;
547 if (!(segment & _SEGMENT_ENTRY_INV)) {
548 /* Page table is present */
549 page = pfn_to_page(segment >> PAGE_SHIFT);
550 mp = (struct gmap_pgtable *) page->index;
551 return mp->vmaddr | (address & ~PMD_MASK);
552 }
553 if (!(segment & _SEGMENT_ENTRY_RO))
554 /* Nothing mapped in the gmap address space. */
555 break;
556 rc = gmap_connect_pgtable(segment, segment_ptr, gmap);
557 if (rc)
558 return rc;
451 } 559 }
452 return -EFAULT; 560 return -EFAULT;
453} 561}
@@ -511,29 +619,6 @@ void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
511} 619}
512EXPORT_SYMBOL_GPL(gmap_discard); 620EXPORT_SYMBOL_GPL(gmap_discard);
513 621
514void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
515{
516 struct gmap_rmap *rmap, *next;
517 struct gmap_pgtable *mp;
518 struct page *page;
519 int flush;
520
521 flush = 0;
522 spin_lock(&mm->page_table_lock);
523 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
524 mp = (struct gmap_pgtable *) page->index;
525 list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
526 *rmap->entry =
527 _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
528 list_del(&rmap->list);
529 kfree(rmap);
530 flush = 1;
531 }
532 spin_unlock(&mm->page_table_lock);
533 if (flush)
534 __tlb_flush_global();
535}
536
537static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, 622static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
538 unsigned long vmaddr) 623 unsigned long vmaddr)
539{ 624{
@@ -586,8 +671,8 @@ static inline void page_table_free_pgste(unsigned long *table)
586{ 671{
587} 672}
588 673
589static inline void gmap_unmap_notifier(struct mm_struct *mm, 674static inline void gmap_disconnect_pgtable(struct mm_struct *mm,
590 unsigned long *table) 675 unsigned long *table)
591{ 676{
592} 677}
593 678
@@ -653,7 +738,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
653 unsigned int bit, mask; 738 unsigned int bit, mask;
654 739
655 if (mm_has_pgste(mm)) { 740 if (mm_has_pgste(mm)) {
656 gmap_unmap_notifier(mm, table); 741 gmap_disconnect_pgtable(mm, table);
657 return page_table_free_pgste(table); 742 return page_table_free_pgste(table);
658 } 743 }
659 /* Free 1K/2K page table fragment of a 4K page */ 744 /* Free 1K/2K page table fragment of a 4K page */
@@ -696,7 +781,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
696 781
697 mm = tlb->mm; 782 mm = tlb->mm;
698 if (mm_has_pgste(mm)) { 783 if (mm_has_pgste(mm)) {
699 gmap_unmap_notifier(mm, table); 784 gmap_disconnect_pgtable(mm, table);
700 table = (unsigned long *) (__pa(table) | FRAG_MASK); 785 table = (unsigned long *) (__pa(table) | FRAG_MASK);
701 tlb_remove_table(tlb, table); 786 tlb_remove_table(tlb, table);
702 return; 787 return;
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index ffab84db6907..35837054f734 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -191,19 +191,16 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
191/* 191/*
192 * Add a backed mem_map array to the virtual mem_map array. 192 * Add a backed mem_map array to the virtual mem_map array.
193 */ 193 */
194int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) 194int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
195{ 195{
196 unsigned long address, start_addr, end_addr; 196 unsigned long address = start;
197 pgd_t *pg_dir; 197 pgd_t *pg_dir;
198 pud_t *pu_dir; 198 pud_t *pu_dir;
199 pmd_t *pm_dir; 199 pmd_t *pm_dir;
200 pte_t *pt_dir; 200 pte_t *pt_dir;
201 int ret = -ENOMEM; 201 int ret = -ENOMEM;
202 202
203 start_addr = (unsigned long) start; 203 for (address = start; address < end;) {
204 end_addr = (unsigned long) (start + nr);
205
206 for (address = start_addr; address < end_addr;) {
207 pg_dir = pgd_offset_k(address); 204 pg_dir = pgd_offset_k(address);
208 if (pgd_none(*pg_dir)) { 205 if (pgd_none(*pg_dir)) {
209 pu_dir = vmem_pud_alloc(); 206 pu_dir = vmem_pud_alloc();
@@ -262,14 +259,14 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
262 } 259 }
263 address += PAGE_SIZE; 260 address += PAGE_SIZE;
264 } 261 }
265 memset(start, 0, nr * sizeof(struct page)); 262 memset((void *)start, 0, end - start);
266 ret = 0; 263 ret = 0;
267out: 264out:
268 flush_tlb_kernel_range(start_addr, end_addr); 265 flush_tlb_kernel_range(start, end);
269 return ret; 266 return ret;
270} 267}
271 268
272void vmemmap_free(struct page *memmap, unsigned long nr_pages) 269void vmemmap_free(unsigned long start, unsigned long end)
273{ 270{
274} 271}
275 272