aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig6
-rw-r--r--arch/ia64/configs/sn2_defconfig6
-rw-r--r--arch/ia64/defconfig6
-rw-r--r--arch/ia64/kernel/Makefile1
-rw-r--r--arch/ia64/kernel/efi.c32
-rw-r--r--arch/ia64/kernel/entry.S2
-rw-r--r--arch/ia64/kernel/uncached.c246
-rw-r--r--arch/ia64/mm/hugetlbpage.c158
-rw-r--r--arch/ia64/sn/kernel/xpc_partition.c6
9 files changed, 303 insertions, 160 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 992bcfff7913..ce4dfa8b834d 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -50,6 +50,10 @@ config SCHED_NO_NO_OMIT_FRAME_POINTER
50 bool 50 bool
51 default y 51 default y
52 52
53config IA64_UNCACHED_ALLOCATOR
54 bool
55 select GENERIC_ALLOCATOR
56
53choice 57choice
54 prompt "System type" 58 prompt "System type"
55 default IA64_GENERIC 59 default IA64_GENERIC
@@ -223,7 +227,7 @@ config IA64_SGI_SN_SIM
223 227
224config IA64_SGI_SN_XP 228config IA64_SGI_SN_XP
225 tristate "Support communication between SGI SSIs" 229 tristate "Support communication between SGI SSIs"
226 depends on MSPEC 230 select IA64_UNCACHED_ALLOCATOR
227 help 231 help
228 An SGI machine can be divided into multiple Single System 232 An SGI machine can be divided into multiple Single System
229 Images which act independently of each other and have 233 Images which act independently of each other and have
diff --git a/arch/ia64/configs/sn2_defconfig b/arch/ia64/configs/sn2_defconfig
index 6ff7107fee4d..a01bb02d074d 100644
--- a/arch/ia64/configs/sn2_defconfig
+++ b/arch/ia64/configs/sn2_defconfig
@@ -588,6 +588,7 @@ CONFIG_SGI_MBCS=m
588CONFIG_SERIAL_CORE=y 588CONFIG_SERIAL_CORE=y
589CONFIG_SERIAL_CORE_CONSOLE=y 589CONFIG_SERIAL_CORE_CONSOLE=y
590CONFIG_SERIAL_SGI_L1_CONSOLE=y 590CONFIG_SERIAL_SGI_L1_CONSOLE=y
591CONFIG_SERIAL_SGI_IOC4=y
591CONFIG_UNIX98_PTYS=y 592CONFIG_UNIX98_PTYS=y
592CONFIG_LEGACY_PTYS=y 593CONFIG_LEGACY_PTYS=y
593CONFIG_LEGACY_PTY_COUNT=256 594CONFIG_LEGACY_PTY_COUNT=256
@@ -788,6 +789,11 @@ CONFIG_INFINIBAND_IPOIB=m
788# CONFIG_INFINIBAND_IPOIB_DEBUG is not set 789# CONFIG_INFINIBAND_IPOIB_DEBUG is not set
789 790
790# 791#
792# SN Devices
793#
794CONFIG_SGI_IOC4=y
795
796#
791# File systems 797# File systems
792# 798#
793CONFIG_EXT2_FS=y 799CONFIG_EXT2_FS=y
diff --git a/arch/ia64/defconfig b/arch/ia64/defconfig
index 2aea0f9e6e1d..7be8096e0561 100644
--- a/arch/ia64/defconfig
+++ b/arch/ia64/defconfig
@@ -657,6 +657,7 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
657CONFIG_SERIAL_CORE=y 657CONFIG_SERIAL_CORE=y
658CONFIG_SERIAL_CORE_CONSOLE=y 658CONFIG_SERIAL_CORE_CONSOLE=y
659CONFIG_SERIAL_SGI_L1_CONSOLE=y 659CONFIG_SERIAL_SGI_L1_CONSOLE=y
660CONFIG_SERIAL_SGI_IOC4=y
660# CONFIG_SERIAL_JSM is not set 661# CONFIG_SERIAL_JSM is not set
661CONFIG_UNIX98_PTYS=y 662CONFIG_UNIX98_PTYS=y
662CONFIG_LEGACY_PTYS=y 663CONFIG_LEGACY_PTYS=y
@@ -976,6 +977,11 @@ CONFIG_INFINIBAND_IPOIB=m
976# CONFIG_INFINIBAND_IPOIB_DEBUG is not set 977# CONFIG_INFINIBAND_IPOIB_DEBUG is not set
977 978
978# 979#
980# SN Devices
981#
982CONFIG_SGI_IOC4=y
983
984#
979# File systems 985# File systems
980# 986#
981CONFIG_EXT2_FS=y 987CONFIG_EXT2_FS=y
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index c1a02bbc252c..4c73d8ba2e3d 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_SMP) += smp.o smpboot.o domain.o
20obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o 20obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o
21obj-$(CONFIG_IA64_CYCLONE) += cyclone.o 21obj-$(CONFIG_IA64_CYCLONE) += cyclone.o
22obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o 22obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o
23obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o
23mca_recovery-y += mca_drv.o mca_drv_asm.o 24mca_recovery-y += mca_drv.o mca_drv_asm.o
24 25
25# The gate DSO image is built using a special linker script. 26# The gate DSO image is built using a special linker script.
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 4a3b1aac43e7..179f230816ed 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -410,6 +410,38 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
410} 410}
411 411
412/* 412/*
413 * Walk the EFI memory map to pull out leftover pages in the lower
414 * memory regions which do not end up in the regular memory map and
415 * stick them into the uncached allocator
416 *
417 * The regular walk function is significantly more complex than the
418 * uncached walk which means it really doesn't make sense to try and
419 * marge the two.
420 */
421void __init
422efi_memmap_walk_uc (efi_freemem_callback_t callback)
423{
424 void *efi_map_start, *efi_map_end, *p;
425 efi_memory_desc_t *md;
426 u64 efi_desc_size, start, end;
427
428 efi_map_start = __va(ia64_boot_param->efi_memmap);
429 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
430 efi_desc_size = ia64_boot_param->efi_memdesc_size;
431
432 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
433 md = p;
434 if (md->attribute == EFI_MEMORY_UC) {
435 start = PAGE_ALIGN(md->phys_addr);
436 end = PAGE_ALIGN((md->phys_addr+(md->num_pages << EFI_PAGE_SHIFT)) & PAGE_MASK);
437 if ((*callback)(start, end, NULL) < 0)
438 return;
439 }
440 }
441}
442
443
444/*
413 * Look for the PAL_CODE region reported by EFI and maps it using an 445 * Look for the PAL_CODE region reported by EFI and maps it using an
414 * ITR to enable safe PAL calls in virtual mode. See IA-64 Processor 446 * ITR to enable safe PAL calls in virtual mode. See IA-64 Processor
415 * Abstraction Layer chapter 11 in ADAG 447 * Abstraction Layer chapter 11 in ADAG
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index d99316c9be28..b1d5d3d5276c 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1579,7 +1579,7 @@ sys_call_table:
1579 data8 sys_keyctl 1579 data8 sys_keyctl
1580 data8 sys_ni_syscall 1580 data8 sys_ni_syscall
1581 data8 sys_ni_syscall // 1275 1581 data8 sys_ni_syscall // 1275
1582 data8 sys_ni_syscall 1582 data8 sys_set_zone_reclaim
1583 data8 sys_ni_syscall 1583 data8 sys_ni_syscall
1584 data8 sys_ni_syscall 1584 data8 sys_ni_syscall
1585 data8 sys_ni_syscall 1585 data8 sys_ni_syscall
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
new file mode 100644
index 000000000000..490dfc9ab47f
--- /dev/null
+++ b/arch/ia64/kernel/uncached.c
@@ -0,0 +1,246 @@
1/*
2 * Copyright (C) 2001-2005 Silicon Graphics, Inc. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
7 *
8 * A simple uncached page allocator using the generic allocator. This
9 * allocator first utilizes the spare (spill) pages found in the EFI
10 * memmap and will then start converting cached pages to uncached ones
11 * at a granule at a time. Node awareness is implemented by having a
12 * pool of pages per node.
13 */
14
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/errno.h>
20#include <linux/string.h>
21#include <linux/slab.h>
22#include <linux/efi.h>
23#include <linux/genalloc.h>
24#include <asm/page.h>
25#include <asm/pal.h>
26#include <asm/system.h>
27#include <asm/pgtable.h>
28#include <asm/atomic.h>
29#include <asm/tlbflush.h>
30#include <asm/sn/arch.h>
31
32#define DEBUG 0
33
34#if DEBUG
35#define dprintk printk
36#else
37#define dprintk(x...) do { } while (0)
38#endif
39
40void __init efi_memmap_walk_uc (efi_freemem_callback_t callback);
41
42#define MAX_UNCACHED_GRANULES 5
43static int allocated_granules;
44
45struct gen_pool *uncached_pool[MAX_NUMNODES];
46
47
48static void uncached_ipi_visibility(void *data)
49{
50 int status;
51
52 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
53 if ((status != PAL_VISIBILITY_OK) &&
54 (status != PAL_VISIBILITY_OK_REMOTE_NEEDED))
55 printk(KERN_DEBUG "pal_prefetch_visibility() returns %i on "
56 "CPU %i\n", status, get_cpu());
57}
58
59
60static void uncached_ipi_mc_drain(void *data)
61{
62 int status;
63 status = ia64_pal_mc_drain();
64 if (status)
65 printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on "
66 "CPU %i\n", status, get_cpu());
67}
68
69
70static unsigned long
71uncached_get_new_chunk(struct gen_pool *poolp)
72{
73 struct page *page;
74 void *tmp;
75 int status, i;
76 unsigned long addr, node;
77
78 if (allocated_granules >= MAX_UNCACHED_GRANULES)
79 return 0;
80
81 node = poolp->private;
82 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO,
83 IA64_GRANULE_SHIFT-PAGE_SHIFT);
84
85 dprintk(KERN_INFO "get_new_chunk page %p, addr %lx\n",
86 page, (unsigned long)(page-vmem_map) << PAGE_SHIFT);
87
88 /*
89 * Do magic if no mem on local node! XXX
90 */
91 if (!page)
92 return 0;
93 tmp = page_address(page);
94
95 /*
96 * There's a small race here where it's possible for someone to
97 * access the page through /dev/mem halfway through the conversion
98 * to uncached - not sure it's really worth bothering about
99 */
100 for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
101 SetPageUncached(&page[i]);
102
103 flush_tlb_kernel_range(tmp, tmp + IA64_GRANULE_SIZE);
104
105 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
106
107 dprintk(KERN_INFO "pal_prefetch_visibility() returns %i on cpu %i\n",
108 status, get_cpu());
109
110 if (!status) {
111 status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1);
112 if (status)
113 printk(KERN_WARNING "smp_call_function failed for "
114 "uncached_ipi_visibility! (%i)\n", status);
115 }
116
117 if (ia64_platform_is("sn2"))
118 sn_flush_all_caches((unsigned long)tmp, IA64_GRANULE_SIZE);
119 else
120 flush_icache_range((unsigned long)tmp,
121 (unsigned long)tmp+IA64_GRANULE_SIZE);
122
123 ia64_pal_mc_drain();
124 status = smp_call_function(uncached_ipi_mc_drain, NULL, 0, 1);
125 if (status)
126 printk(KERN_WARNING "smp_call_function failed for "
127 "uncached_ipi_mc_drain! (%i)\n", status);
128
129 addr = (unsigned long)tmp - PAGE_OFFSET + __IA64_UNCACHED_OFFSET;
130
131 allocated_granules++;
132 return addr;
133}
134
135
136/*
137 * uncached_alloc_page
138 *
139 * Allocate 1 uncached page. Allocates on the requested node. If no
140 * uncached pages are available on the requested node, roundrobin starting
141 * with higher nodes.
142 */
143unsigned long
144uncached_alloc_page(int nid)
145{
146 unsigned long maddr;
147
148 maddr = gen_pool_alloc(uncached_pool[nid], PAGE_SIZE);
149
150 dprintk(KERN_DEBUG "uncached_alloc_page returns %lx on node %i\n",
151 maddr, nid);
152
153 /*
154 * If no memory is availble on our local node, try the
155 * remaining nodes in the system.
156 */
157 if (!maddr) {
158 int i;
159
160 for (i = MAX_NUMNODES - 1; i >= 0; i--) {
161 if (i == nid || !node_online(i))
162 continue;
163 maddr = gen_pool_alloc(uncached_pool[i], PAGE_SIZE);
164 dprintk(KERN_DEBUG "uncached_alloc_page alternate search "
165 "returns %lx on node %i\n", maddr, i);
166 if (maddr) {
167 break;
168 }
169 }
170 }
171
172 return maddr;
173}
174EXPORT_SYMBOL(uncached_alloc_page);
175
176
177/*
178 * uncached_free_page
179 *
180 * Free a single uncached page.
181 */
182void
183uncached_free_page(unsigned long maddr)
184{
185 int node;
186
187 node = nasid_to_cnodeid(NASID_GET(maddr));
188
189 dprintk(KERN_DEBUG "uncached_free_page(%lx) on node %i\n", maddr, node);
190
191 if ((maddr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET)
192 panic("uncached_free_page invalid address %lx\n", maddr);
193
194 gen_pool_free(uncached_pool[node], maddr, PAGE_SIZE);
195}
196EXPORT_SYMBOL(uncached_free_page);
197
198
199/*
200 * uncached_build_memmap,
201 *
202 * Called at boot time to build a map of pages that can be used for
203 * memory special operations.
204 */
205static int __init
206uncached_build_memmap(unsigned long start, unsigned long end, void *arg)
207{
208 long length;
209 unsigned long vstart, vend;
210 int node;
211
212 length = end - start;
213 vstart = start + __IA64_UNCACHED_OFFSET;
214 vend = end + __IA64_UNCACHED_OFFSET;
215
216 dprintk(KERN_ERR "uncached_build_memmap(%lx %lx)\n", start, end);
217
218 memset((char *)vstart, 0, length);
219
220 node = nasid_to_cnodeid(NASID_GET(start));
221
222 for (; vstart < vend ; vstart += PAGE_SIZE) {
223 dprintk(KERN_INFO "sticking %lx into the pool!\n", vstart);
224 gen_pool_free(uncached_pool[node], vstart, PAGE_SIZE);
225 }
226
227 return 0;
228}
229
230
231static int __init uncached_init(void) {
232 int i;
233
234 for (i = 0; i < MAX_NUMNODES; i++) {
235 if (!node_online(i))
236 continue;
237 uncached_pool[i] = gen_pool_create(0, IA64_GRANULE_SHIFT,
238 &uncached_get_new_chunk, i);
239 }
240
241 efi_memmap_walk_uc(uncached_build_memmap);
242
243 return 0;
244}
245
246__initcall(uncached_init);
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index df08ae7634b6..e0a776a3044c 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -24,7 +24,7 @@
24 24
25unsigned int hpage_shift=HPAGE_SHIFT_DEFAULT; 25unsigned int hpage_shift=HPAGE_SHIFT_DEFAULT;
26 26
27static pte_t * 27pte_t *
28huge_pte_alloc (struct mm_struct *mm, unsigned long addr) 28huge_pte_alloc (struct mm_struct *mm, unsigned long addr)
29{ 29{
30 unsigned long taddr = htlbpage_to_page(addr); 30 unsigned long taddr = htlbpage_to_page(addr);
@@ -43,7 +43,7 @@ huge_pte_alloc (struct mm_struct *mm, unsigned long addr)
43 return pte; 43 return pte;
44} 44}
45 45
46static pte_t * 46pte_t *
47huge_pte_offset (struct mm_struct *mm, unsigned long addr) 47huge_pte_offset (struct mm_struct *mm, unsigned long addr)
48{ 48{
49 unsigned long taddr = htlbpage_to_page(addr); 49 unsigned long taddr = htlbpage_to_page(addr);
@@ -67,23 +67,6 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr)
67 67
68#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; } 68#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
69 69
70static void
71set_huge_pte (struct mm_struct *mm, struct vm_area_struct *vma,
72 struct page *page, pte_t * page_table, int write_access)
73{
74 pte_t entry;
75
76 add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE);
77 if (write_access) {
78 entry =
79 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
80 } else
81 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
82 entry = pte_mkyoung(entry);
83 mk_pte_huge(entry);
84 set_pte(page_table, entry);
85 return;
86}
87/* 70/*
88 * This function checks for proper alignment of input addr and len parameters. 71 * This function checks for proper alignment of input addr and len parameters.
89 */ 72 */
@@ -99,68 +82,6 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
99 return 0; 82 return 0;
100} 83}
101 84
102int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
103 struct vm_area_struct *vma)
104{
105 pte_t *src_pte, *dst_pte, entry;
106 struct page *ptepage;
107 unsigned long addr = vma->vm_start;
108 unsigned long end = vma->vm_end;
109
110 while (addr < end) {
111 dst_pte = huge_pte_alloc(dst, addr);
112 if (!dst_pte)
113 goto nomem;
114 src_pte = huge_pte_offset(src, addr);
115 entry = *src_pte;
116 ptepage = pte_page(entry);
117 get_page(ptepage);
118 set_pte(dst_pte, entry);
119 add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE);
120 addr += HPAGE_SIZE;
121 }
122 return 0;
123nomem:
124 return -ENOMEM;
125}
126
127int
128follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
129 struct page **pages, struct vm_area_struct **vmas,
130 unsigned long *st, int *length, int i)
131{
132 pte_t *ptep, pte;
133 unsigned long start = *st;
134 unsigned long pstart;
135 int len = *length;
136 struct page *page;
137
138 do {
139 pstart = start & HPAGE_MASK;
140 ptep = huge_pte_offset(mm, start);
141 pte = *ptep;
142
143back1:
144 page = pte_page(pte);
145 if (pages) {
146 page += ((start & ~HPAGE_MASK) >> PAGE_SHIFT);
147 get_page(page);
148 pages[i] = page;
149 }
150 if (vmas)
151 vmas[i] = vma;
152 i++;
153 len--;
154 start += PAGE_SIZE;
155 if (((start & HPAGE_MASK) == pstart) && len &&
156 (start < vma->vm_end))
157 goto back1;
158 } while (len && start < vma->vm_end);
159 *length = len;
160 *st = start;
161 return i;
162}
163
164struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write) 85struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
165{ 86{
166 struct page *page; 87 struct page *page;
@@ -212,81 +133,6 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb,
212 free_pgd_range(tlb, addr, end, floor, ceiling); 133 free_pgd_range(tlb, addr, end, floor, ceiling);
213} 134}
214 135
215void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
216{
217 struct mm_struct *mm = vma->vm_mm;
218 unsigned long address;
219 pte_t *pte;
220 struct page *page;
221
222 BUG_ON(start & (HPAGE_SIZE - 1));
223 BUG_ON(end & (HPAGE_SIZE - 1));
224
225 for (address = start; address < end; address += HPAGE_SIZE) {
226 pte = huge_pte_offset(mm, address);
227 if (pte_none(*pte))
228 continue;
229 page = pte_page(*pte);
230 put_page(page);
231 pte_clear(mm, address, pte);
232 }
233 add_mm_counter(mm, rss, - ((end - start) >> PAGE_SHIFT));
234 flush_tlb_range(vma, start, end);
235}
236
237int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
238{
239 struct mm_struct *mm = current->mm;
240 unsigned long addr;
241 int ret = 0;
242
243 BUG_ON(vma->vm_start & ~HPAGE_MASK);
244 BUG_ON(vma->vm_end & ~HPAGE_MASK);
245
246 spin_lock(&mm->page_table_lock);
247 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
248 unsigned long idx;
249 pte_t *pte = huge_pte_alloc(mm, addr);
250 struct page *page;
251
252 if (!pte) {
253 ret = -ENOMEM;
254 goto out;
255 }
256 if (!pte_none(*pte))
257 continue;
258
259 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
260 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
261 page = find_get_page(mapping, idx);
262 if (!page) {
263 /* charge the fs quota first */
264 if (hugetlb_get_quota(mapping)) {
265 ret = -ENOMEM;
266 goto out;
267 }
268 page = alloc_huge_page();
269 if (!page) {
270 hugetlb_put_quota(mapping);
271 ret = -ENOMEM;
272 goto out;
273 }
274 ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
275 if (! ret) {
276 unlock_page(page);
277 } else {
278 hugetlb_put_quota(mapping);
279 page_cache_release(page);
280 goto out;
281 }
282 }
283 set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
284 }
285out:
286 spin_unlock(&mm->page_table_lock);
287 return ret;
288}
289
290unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, 136unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
291 unsigned long pgoff, unsigned long flags) 137 unsigned long pgoff, unsigned long flags)
292{ 138{
diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/arch/ia64/sn/kernel/xpc_partition.c
index 2c3c4a8af553..cd7ed73f0e7a 100644
--- a/arch/ia64/sn/kernel/xpc_partition.c
+++ b/arch/ia64/sn/kernel/xpc_partition.c
@@ -22,6 +22,7 @@
22#include <linux/cache.h> 22#include <linux/cache.h>
23#include <linux/mmzone.h> 23#include <linux/mmzone.h>
24#include <linux/nodemask.h> 24#include <linux/nodemask.h>
25#include <asm/uncached.h>
25#include <asm/sn/bte.h> 26#include <asm/sn/bte.h>
26#include <asm/sn/intr.h> 27#include <asm/sn/intr.h>
27#include <asm/sn/sn_sal.h> 28#include <asm/sn/sn_sal.h>
@@ -183,7 +184,7 @@ xpc_rsvd_page_init(void)
183 * memory protections are never restricted. 184 * memory protections are never restricted.
184 */ 185 */
185 if ((amos_page = xpc_vars->amos_page) == NULL) { 186 if ((amos_page = xpc_vars->amos_page) == NULL) {
186 amos_page = (AMO_t *) mspec_kalloc_page(0); 187 amos_page = (AMO_t *) TO_AMO(uncached_alloc_page(0));
187 if (amos_page == NULL) { 188 if (amos_page == NULL) {
188 dev_err(xpc_part, "can't allocate page of AMOs\n"); 189 dev_err(xpc_part, "can't allocate page of AMOs\n");
189 return NULL; 190 return NULL;
@@ -200,7 +201,8 @@ xpc_rsvd_page_init(void)
200 if (ret != 0) { 201 if (ret != 0) {
201 dev_err(xpc_part, "can't change memory " 202 dev_err(xpc_part, "can't change memory "
202 "protections\n"); 203 "protections\n");
203 mspec_kfree_page((unsigned long) amos_page); 204 uncached_free_page(__IA64_UNCACHED_OFFSET |
205 TO_PHYS((u64) amos_page));
204 return NULL; 206 return NULL;
205 } 207 }
206 } 208 }