diff options
Diffstat (limited to 'arch/ia64/mm')
-rw-r--r-- | arch/ia64/mm/hugetlbpage.c | 9 | ||||
-rw-r--r-- | arch/ia64/mm/init.c | 4 |
2 files changed, 10 insertions, 3 deletions
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index eee5c1cfbe32..0c7e94edc20e 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c | |||
@@ -64,14 +64,21 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr) | |||
64 | return pte; | 64 | return pte; |
65 | } | 65 | } |
66 | 66 | ||
67 | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) | ||
68 | { | ||
69 | return 0; | ||
70 | } | ||
71 | |||
67 | #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; } | 72 | #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; } |
68 | 73 | ||
69 | /* | 74 | /* |
70 | * Don't actually need to do any preparation, but need to make sure | 75 | * Don't actually need to do any preparation, but need to make sure |
71 | * the address is in the right region. | 76 | * the address is in the right region. |
72 | */ | 77 | */ |
73 | int prepare_hugepage_range(unsigned long addr, unsigned long len) | 78 | int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff) |
74 | { | 79 | { |
80 | if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT)) | ||
81 | return -EINVAL; | ||
75 | if (len & ~HPAGE_MASK) | 82 | if (len & ~HPAGE_MASK) |
76 | return -EINVAL; | 83 | return -EINVAL; |
77 | if (addr & ~HPAGE_MASK) | 84 | if (addr & ~HPAGE_MASK) |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index ff87a5cba399..56dc2024220e 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -156,7 +156,7 @@ ia64_init_addr_space (void) | |||
156 | * the problem. When the process attempts to write to the register backing store | 156 | * the problem. When the process attempts to write to the register backing store |
157 | * for the first time, it will get a SEGFAULT in this case. | 157 | * for the first time, it will get a SEGFAULT in this case. |
158 | */ | 158 | */ |
159 | vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); | 159 | vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); |
160 | if (vma) { | 160 | if (vma) { |
161 | memset(vma, 0, sizeof(*vma)); | 161 | memset(vma, 0, sizeof(*vma)); |
162 | vma->vm_mm = current->mm; | 162 | vma->vm_mm = current->mm; |
@@ -175,7 +175,7 @@ ia64_init_addr_space (void) | |||
175 | 175 | ||
176 | /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ | 176 | /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ |
177 | if (!(current->personality & MMAP_PAGE_ZERO)) { | 177 | if (!(current->personality & MMAP_PAGE_ZERO)) { |
178 | vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); | 178 | vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); |
179 | if (vma) { | 179 | if (vma) { |
180 | memset(vma, 0, sizeof(*vma)); | 180 | memset(vma, 0, sizeof(*vma)); |
181 | vma->vm_mm = current->mm; | 181 | vma->vm_mm = current->mm; |