aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/mm')
-rw-r--r--arch/ia64/mm/contig.c99
-rw-r--r--arch/ia64/mm/discontig.c130
-rw-r--r--arch/ia64/mm/hugetlbpage.c1
-rw-r--r--arch/ia64/mm/init.c13
-rw-r--r--arch/ia64/mm/ioremap.c11
-rw-r--r--arch/ia64/mm/tlb.c33
6 files changed, 231 insertions, 56 deletions
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 2f724d2bf299..54bf54059811 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -154,38 +154,99 @@ static void *cpu_data;
154void * __cpuinit 154void * __cpuinit
155per_cpu_init (void) 155per_cpu_init (void)
156{ 156{
157 int cpu; 157 static bool first_time = true;
158 static int first_time=1; 158 void *cpu0_data = __cpu0_per_cpu;
159 unsigned int cpu;
160
161 if (!first_time)
162 goto skip;
163 first_time = false;
159 164
160 /* 165 /*
161 * get_free_pages() cannot be used before cpu_init() done. BSP 166 * get_free_pages() cannot be used before cpu_init() done.
162 * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls 167 * BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs
163 * get_zeroed_page(). 168 * to avoid that AP calls get_zeroed_page().
164 */ 169 */
165 if (first_time) { 170 for_each_possible_cpu(cpu) {
166 void *cpu0_data = __cpu0_per_cpu; 171 void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start;
167 172
168 first_time=0; 173 memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);
174 __per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start;
175 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
169 176
170 __per_cpu_offset[0] = (char *) cpu0_data - __per_cpu_start; 177 /*
171 per_cpu(local_per_cpu_offset, 0) = __per_cpu_offset[0]; 178 * percpu area for cpu0 is moved from the __init area
179 * which is setup by head.S and used till this point.
180 * Update ar.k3. This move is ensures that percpu
181 * area for cpu0 is on the correct node and its
182 * virtual address isn't insanely far from other
183 * percpu areas which is important for congruent
184 * percpu allocator.
185 */
186 if (cpu == 0)
187 ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) -
188 (unsigned long)__per_cpu_start);
172 189
173 for (cpu = 1; cpu < NR_CPUS; cpu++) { 190 cpu_data += PERCPU_PAGE_SIZE;
174 memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
175 __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
176 cpu_data += PERCPU_PAGE_SIZE;
177 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
178 }
179 } 191 }
192skip:
180 return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; 193 return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
181} 194}
182 195
183static inline void 196static inline void
184alloc_per_cpu_data(void) 197alloc_per_cpu_data(void)
185{ 198{
186 cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS-1, 199 cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * num_possible_cpus(),
187 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 200 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
188} 201}
202
203/**
204 * setup_per_cpu_areas - setup percpu areas
205 *
206 * Arch code has already allocated and initialized percpu areas. All
207 * this function has to do is to teach the determined layout to the
208 * dynamic percpu allocator, which happens to be more complex than
209 * creating whole new ones using helpers.
210 */
211void __init
212setup_per_cpu_areas(void)
213{
214 struct pcpu_alloc_info *ai;
215 struct pcpu_group_info *gi;
216 unsigned int cpu;
217 ssize_t static_size, reserved_size, dyn_size;
218 int rc;
219
220 ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
221 if (!ai)
222 panic("failed to allocate pcpu_alloc_info");
223 gi = &ai->groups[0];
224
225 /* units are assigned consecutively to possible cpus */
226 for_each_possible_cpu(cpu)
227 gi->cpu_map[gi->nr_units++] = cpu;
228
229 /* set parameters */
230 static_size = __per_cpu_end - __per_cpu_start;
231 reserved_size = PERCPU_MODULE_RESERVE;
232 dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
233 if (dyn_size < 0)
234 panic("percpu area overflow static=%zd reserved=%zd\n",
235 static_size, reserved_size);
236
237 ai->static_size = static_size;
238 ai->reserved_size = reserved_size;
239 ai->dyn_size = dyn_size;
240 ai->unit_size = PERCPU_PAGE_SIZE;
241 ai->atom_size = PAGE_SIZE;
242 ai->alloc_size = PERCPU_PAGE_SIZE;
243
244 rc = pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
245 if (rc)
246 panic("failed to setup percpu area (err=%d)", rc);
247
248 pcpu_free_alloc_info(ai);
249}
189#else 250#else
190#define alloc_per_cpu_data() do { } while (0) 251#define alloc_per_cpu_data() do { } while (0)
191#endif /* CONFIG_SMP */ 252#endif /* CONFIG_SMP */
@@ -270,8 +331,8 @@ paging_init (void)
270 331
271 map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * 332 map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
272 sizeof(struct page)); 333 sizeof(struct page));
273 vmalloc_end -= map_size; 334 VMALLOC_END -= map_size;
274 vmem_map = (struct page *) vmalloc_end; 335 vmem_map = (struct page *) VMALLOC_END;
275 efi_memmap_walk(create_mem_map_page_table, NULL); 336 efi_memmap_walk(create_mem_map_page_table, NULL);
276 337
277 /* 338 /*
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index d85ba98d9008..61620323bb60 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -22,6 +22,7 @@
22#include <linux/acpi.h> 22#include <linux/acpi.h>
23#include <linux/efi.h> 23#include <linux/efi.h>
24#include <linux/nodemask.h> 24#include <linux/nodemask.h>
25#include <linux/slab.h>
25#include <asm/pgalloc.h> 26#include <asm/pgalloc.h>
26#include <asm/tlb.h> 27#include <asm/tlb.h>
27#include <asm/meminit.h> 28#include <asm/meminit.h>
@@ -143,22 +144,120 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
143 int cpu; 144 int cpu;
144 145
145 for_each_possible_early_cpu(cpu) { 146 for_each_possible_early_cpu(cpu) {
146 if (cpu == 0) { 147 void *src = cpu == 0 ? __cpu0_per_cpu : __phys_per_cpu_start;
147 void *cpu0_data = __cpu0_per_cpu; 148
148 __per_cpu_offset[cpu] = (char*)cpu0_data - 149 if (node != node_cpuid[cpu].nid)
149 __per_cpu_start; 150 continue;
150 } else if (node == node_cpuid[cpu].nid) { 151
151 memcpy(__va(cpu_data), __phys_per_cpu_start, 152 memcpy(__va(cpu_data), src, __per_cpu_end - __per_cpu_start);
152 __per_cpu_end - __per_cpu_start); 153 __per_cpu_offset[cpu] = (char *)__va(cpu_data) -
153 __per_cpu_offset[cpu] = (char*)__va(cpu_data) - 154 __per_cpu_start;
154 __per_cpu_start; 155
155 cpu_data += PERCPU_PAGE_SIZE; 156 /*
156 } 157 * percpu area for cpu0 is moved from the __init area
158 * which is setup by head.S and used till this point.
159 * Update ar.k3. This move is ensures that percpu
160 * area for cpu0 is on the correct node and its
161 * virtual address isn't insanely far from other
162 * percpu areas which is important for congruent
163 * percpu allocator.
164 */
165 if (cpu == 0)
166 ia64_set_kr(IA64_KR_PER_CPU_DATA,
167 (unsigned long)cpu_data -
168 (unsigned long)__per_cpu_start);
169
170 cpu_data += PERCPU_PAGE_SIZE;
157 } 171 }
158#endif 172#endif
159 return cpu_data; 173 return cpu_data;
160} 174}
161 175
176#ifdef CONFIG_SMP
177/**
178 * setup_per_cpu_areas - setup percpu areas
179 *
180 * Arch code has already allocated and initialized percpu areas. All
181 * this function has to do is to teach the determined layout to the
182 * dynamic percpu allocator, which happens to be more complex than
183 * creating whole new ones using helpers.
184 */
185void __init setup_per_cpu_areas(void)
186{
187 struct pcpu_alloc_info *ai;
188 struct pcpu_group_info *uninitialized_var(gi);
189 unsigned int *cpu_map;
190 void *base;
191 unsigned long base_offset;
192 unsigned int cpu;
193 ssize_t static_size, reserved_size, dyn_size;
194 int node, prev_node, unit, nr_units, rc;
195
196 ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids);
197 if (!ai)
198 panic("failed to allocate pcpu_alloc_info");
199 cpu_map = ai->groups[0].cpu_map;
200
201 /* determine base */
202 base = (void *)ULONG_MAX;
203 for_each_possible_cpu(cpu)
204 base = min(base,
205 (void *)(__per_cpu_offset[cpu] + __per_cpu_start));
206 base_offset = (void *)__per_cpu_start - base;
207
208 /* build cpu_map, units are grouped by node */
209 unit = 0;
210 for_each_node(node)
211 for_each_possible_cpu(cpu)
212 if (node == node_cpuid[cpu].nid)
213 cpu_map[unit++] = cpu;
214 nr_units = unit;
215
216 /* set basic parameters */
217 static_size = __per_cpu_end - __per_cpu_start;
218 reserved_size = PERCPU_MODULE_RESERVE;
219 dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
220 if (dyn_size < 0)
221 panic("percpu area overflow static=%zd reserved=%zd\n",
222 static_size, reserved_size);
223
224 ai->static_size = static_size;
225 ai->reserved_size = reserved_size;
226 ai->dyn_size = dyn_size;
227 ai->unit_size = PERCPU_PAGE_SIZE;
228 ai->atom_size = PAGE_SIZE;
229 ai->alloc_size = PERCPU_PAGE_SIZE;
230
231 /*
232 * CPUs are put into groups according to node. Walk cpu_map
233 * and create new groups at node boundaries.
234 */
235 prev_node = -1;
236 ai->nr_groups = 0;
237 for (unit = 0; unit < nr_units; unit++) {
238 cpu = cpu_map[unit];
239 node = node_cpuid[cpu].nid;
240
241 if (node == prev_node) {
242 gi->nr_units++;
243 continue;
244 }
245 prev_node = node;
246
247 gi = &ai->groups[ai->nr_groups++];
248 gi->nr_units = 1;
249 gi->base_offset = __per_cpu_offset[cpu] + base_offset;
250 gi->cpu_map = &cpu_map[unit];
251 }
252
253 rc = pcpu_setup_first_chunk(ai, base);
254 if (rc)
255 panic("failed to setup percpu area (err=%d)", rc);
256
257 pcpu_free_alloc_info(ai);
258}
259#endif
260
162/** 261/**
163 * fill_pernode - initialize pernode data. 262 * fill_pernode - initialize pernode data.
164 * @node: the node id. 263 * @node: the node id.
@@ -352,7 +451,8 @@ static void __init initialize_pernode_data(void)
352 /* Set the node_data pointer for each per-cpu struct */ 451 /* Set the node_data pointer for each per-cpu struct */
353 for_each_possible_early_cpu(cpu) { 452 for_each_possible_early_cpu(cpu) {
354 node = node_cpuid[cpu].nid; 453 node = node_cpuid[cpu].nid;
355 per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data; 454 per_cpu(ia64_cpu_info, cpu).node_data =
455 mem_data[node].node_data;
356 } 456 }
357#else 457#else
358 { 458 {
@@ -360,7 +460,7 @@ static void __init initialize_pernode_data(void)
360 cpu = 0; 460 cpu = 0;
361 node = node_cpuid[cpu].nid; 461 node = node_cpuid[cpu].nid;
362 cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start + 462 cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
363 ((char *)&per_cpu__cpu_info - __per_cpu_start)); 463 ((char *)&ia64_cpu_info - __per_cpu_start));
364 cpu0_cpu_info->node_data = mem_data[node].node_data; 464 cpu0_cpu_info->node_data = mem_data[node].node_data;
365 } 465 }
366#endif /* CONFIG_SMP */ 466#endif /* CONFIG_SMP */
@@ -666,9 +766,9 @@ void __init paging_init(void)
666 sparse_init(); 766 sparse_init();
667 767
668#ifdef CONFIG_VIRTUAL_MEM_MAP 768#ifdef CONFIG_VIRTUAL_MEM_MAP
669 vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * 769 VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
670 sizeof(struct page)); 770 sizeof(struct page));
671 vmem_map = (struct page *) vmalloc_end; 771 vmem_map = (struct page *) VMALLOC_END;
672 efi_memmap_walk(create_mem_map_page_table, NULL); 772 efi_memmap_walk(create_mem_map_page_table, NULL);
673 printk("Virtual mem_map starts at 0x%p\n", vmem_map); 773 printk("Virtual mem_map starts at 0x%p\n", vmem_map);
674#endif 774#endif
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index b0f615759e97..1841ee7e65f9 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -14,7 +14,6 @@
14#include <linux/hugetlb.h> 14#include <linux/hugetlb.h>
15#include <linux/pagemap.h> 15#include <linux/pagemap.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/slab.h>
18#include <linux/sysctl.h> 17#include <linux/sysctl.h>
19#include <linux/log2.h> 18#include <linux/log2.h>
20#include <asm/mman.h> 19#include <asm/mman.h>
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 1857766a63c1..ed41759efcac 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -22,7 +22,6 @@
22#include <linux/kexec.h> 22#include <linux/kexec.h>
23 23
24#include <asm/dma.h> 24#include <asm/dma.h>
25#include <asm/ia32.h>
26#include <asm/io.h> 25#include <asm/io.h>
27#include <asm/machvec.h> 26#include <asm/machvec.h>
28#include <asm/numa.h> 27#include <asm/numa.h>
@@ -44,8 +43,8 @@ extern void ia64_tlb_init (void);
44unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; 43unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
45 44
46#ifdef CONFIG_VIRTUAL_MEM_MAP 45#ifdef CONFIG_VIRTUAL_MEM_MAP
47unsigned long vmalloc_end = VMALLOC_END_INIT; 46unsigned long VMALLOC_END = VMALLOC_END_INIT;
48EXPORT_SYMBOL(vmalloc_end); 47EXPORT_SYMBOL(VMALLOC_END);
49struct page *vmem_map; 48struct page *vmem_map;
50EXPORT_SYMBOL(vmem_map); 49EXPORT_SYMBOL(vmem_map);
51#endif 50#endif
@@ -91,7 +90,7 @@ dma_mark_clean(void *addr, size_t size)
91inline void 90inline void
92ia64_set_rbs_bot (void) 91ia64_set_rbs_bot (void)
93{ 92{
94 unsigned long stack_size = current->signal->rlim[RLIMIT_STACK].rlim_max & -16; 93 unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16;
95 94
96 if (stack_size > MAX_USER_STACK_SIZE) 95 if (stack_size > MAX_USER_STACK_SIZE)
97 stack_size = MAX_USER_STACK_SIZE; 96 stack_size = MAX_USER_STACK_SIZE;
@@ -118,6 +117,7 @@ ia64_init_addr_space (void)
118 */ 117 */
119 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 118 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
120 if (vma) { 119 if (vma) {
120 INIT_LIST_HEAD(&vma->anon_vma_chain);
121 vma->vm_mm = current->mm; 121 vma->vm_mm = current->mm;
122 vma->vm_start = current->thread.rbs_bot & PAGE_MASK; 122 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
123 vma->vm_end = vma->vm_start + PAGE_SIZE; 123 vma->vm_end = vma->vm_start + PAGE_SIZE;
@@ -136,6 +136,7 @@ ia64_init_addr_space (void)
136 if (!(current->personality & MMAP_PAGE_ZERO)) { 136 if (!(current->personality & MMAP_PAGE_ZERO)) {
137 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 137 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
138 if (vma) { 138 if (vma) {
139 INIT_LIST_HEAD(&vma->anon_vma_chain);
139 vma->vm_mm = current->mm; 140 vma->vm_mm = current->mm;
140 vma->vm_end = PAGE_SIZE; 141 vma->vm_end = PAGE_SIZE;
141 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); 142 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
@@ -668,10 +669,6 @@ mem_init (void)
668 fsyscall_table[i] = sys_call_table[i] | 1; 669 fsyscall_table[i] = sys_call_table[i] | 1;
669 } 670 }
670 setup_gate(); 671 setup_gate();
671
672#ifdef CONFIG_IA32_SUPPORT
673 ia32_mem_init();
674#endif
675} 672}
676 673
677#ifdef CONFIG_MEMORY_HOTPLUG 674#ifdef CONFIG_MEMORY_HOTPLUG
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
index 2a140627dfd6..3dccdd8eb275 100644
--- a/arch/ia64/mm/ioremap.c
+++ b/arch/ia64/mm/ioremap.c
@@ -22,6 +22,12 @@ __ioremap (unsigned long phys_addr)
22} 22}
23 23
24void __iomem * 24void __iomem *
25early_ioremap (unsigned long phys_addr, unsigned long size)
26{
27 return __ioremap(phys_addr);
28}
29
30void __iomem *
25ioremap (unsigned long phys_addr, unsigned long size) 31ioremap (unsigned long phys_addr, unsigned long size)
26{ 32{
27 void __iomem *addr; 33 void __iomem *addr;
@@ -102,6 +108,11 @@ ioremap_nocache (unsigned long phys_addr, unsigned long size)
102EXPORT_SYMBOL(ioremap_nocache); 108EXPORT_SYMBOL(ioremap_nocache);
103 109
104void 110void
111early_iounmap (volatile void __iomem *addr, unsigned long size)
112{
113}
114
115void
105iounmap (volatile void __iomem *addr) 116iounmap (volatile void __iomem *addr)
106{ 117{
107 if (REGION_NUMBER(addr) == RGN_GATE) 118 if (REGION_NUMBER(addr) == RGN_GATE)
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index ee09d261f2e6..5dfd916e9ea6 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -22,6 +22,7 @@
22#include <linux/smp.h> 22#include <linux/smp.h>
23#include <linux/mm.h> 23#include <linux/mm.h>
24#include <linux/bootmem.h> 24#include <linux/bootmem.h>
25#include <linux/slab.h>
25 26
26#include <asm/delay.h> 27#include <asm/delay.h>
27#include <asm/mmu_context.h> 28#include <asm/mmu_context.h>
@@ -48,7 +49,7 @@ DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
48DEFINE_PER_CPU(u8, ia64_tr_num); /*Number of TR slots in current processor*/ 49DEFINE_PER_CPU(u8, ia64_tr_num); /*Number of TR slots in current processor*/
49DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/ 50DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/
50 51
51struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX]; 52struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
52 53
53/* 54/*
54 * Initializes the ia64_ctx.bitmap array based on max_ctx+1. 55 * Initializes the ia64_ctx.bitmap array based on max_ctx+1.
@@ -429,10 +430,16 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
429 struct ia64_tr_entry *p; 430 struct ia64_tr_entry *p;
430 int cpu = smp_processor_id(); 431 int cpu = smp_processor_id();
431 432
433 if (!ia64_idtrs[cpu]) {
434 ia64_idtrs[cpu] = kmalloc(2 * IA64_TR_ALLOC_MAX *
435 sizeof (struct ia64_tr_entry), GFP_KERNEL);
436 if (!ia64_idtrs[cpu])
437 return -ENOMEM;
438 }
432 r = -EINVAL; 439 r = -EINVAL;
433 /*Check overlap with existing TR entries*/ 440 /*Check overlap with existing TR entries*/
434 if (target_mask & 0x1) { 441 if (target_mask & 0x1) {
435 p = &__per_cpu_idtrs[cpu][0][0]; 442 p = ia64_idtrs[cpu];
436 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); 443 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
437 i++, p++) { 444 i++, p++) {
438 if (p->pte & 0x1) 445 if (p->pte & 0x1)
@@ -444,7 +451,7 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
444 } 451 }
445 } 452 }
446 if (target_mask & 0x2) { 453 if (target_mask & 0x2) {
447 p = &__per_cpu_idtrs[cpu][1][0]; 454 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX;
448 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); 455 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
449 i++, p++) { 456 i++, p++) {
450 if (p->pte & 0x1) 457 if (p->pte & 0x1)
@@ -459,16 +466,16 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
459 for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) { 466 for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) {
460 switch (target_mask & 0x3) { 467 switch (target_mask & 0x3) {
461 case 1: 468 case 1:
462 if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1)) 469 if (!((ia64_idtrs[cpu] + i)->pte & 0x1))
463 goto found; 470 goto found;
464 continue; 471 continue;
465 case 2: 472 case 2:
466 if (!(__per_cpu_idtrs[cpu][1][i].pte & 0x1)) 473 if (!((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
467 goto found; 474 goto found;
468 continue; 475 continue;
469 case 3: 476 case 3:
470 if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1) && 477 if (!((ia64_idtrs[cpu] + i)->pte & 0x1) &&
471 !(__per_cpu_idtrs[cpu][1][i].pte & 0x1)) 478 !((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
472 goto found; 479 goto found;
473 continue; 480 continue;
474 default: 481 default:
@@ -488,7 +495,7 @@ found:
488 if (target_mask & 0x1) { 495 if (target_mask & 0x1) {
489 ia64_itr(0x1, i, va, pte, log_size); 496 ia64_itr(0x1, i, va, pte, log_size);
490 ia64_srlz_i(); 497 ia64_srlz_i();
491 p = &__per_cpu_idtrs[cpu][0][i]; 498 p = ia64_idtrs[cpu] + i;
492 p->ifa = va; 499 p->ifa = va;
493 p->pte = pte; 500 p->pte = pte;
494 p->itir = log_size << 2; 501 p->itir = log_size << 2;
@@ -497,7 +504,7 @@ found:
497 if (target_mask & 0x2) { 504 if (target_mask & 0x2) {
498 ia64_itr(0x2, i, va, pte, log_size); 505 ia64_itr(0x2, i, va, pte, log_size);
499 ia64_srlz_i(); 506 ia64_srlz_i();
500 p = &__per_cpu_idtrs[cpu][1][i]; 507 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i;
501 p->ifa = va; 508 p->ifa = va;
502 p->pte = pte; 509 p->pte = pte;
503 p->itir = log_size << 2; 510 p->itir = log_size << 2;
@@ -528,7 +535,7 @@ void ia64_ptr_entry(u64 target_mask, int slot)
528 return; 535 return;
529 536
530 if (target_mask & 0x1) { 537 if (target_mask & 0x1) {
531 p = &__per_cpu_idtrs[cpu][0][slot]; 538 p = ia64_idtrs[cpu] + slot;
532 if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) { 539 if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
533 p->pte = 0; 540 p->pte = 0;
534 ia64_ptr(0x1, p->ifa, p->itir>>2); 541 ia64_ptr(0x1, p->ifa, p->itir>>2);
@@ -537,7 +544,7 @@ void ia64_ptr_entry(u64 target_mask, int slot)
537 } 544 }
538 545
539 if (target_mask & 0x2) { 546 if (target_mask & 0x2) {
540 p = &__per_cpu_idtrs[cpu][1][slot]; 547 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + slot;
541 if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) { 548 if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
542 p->pte = 0; 549 p->pte = 0;
543 ia64_ptr(0x2, p->ifa, p->itir>>2); 550 ia64_ptr(0x2, p->ifa, p->itir>>2);
@@ -546,8 +553,8 @@ void ia64_ptr_entry(u64 target_mask, int slot)
546 } 553 }
547 554
548 for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) { 555 for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) {
549 if ((__per_cpu_idtrs[cpu][0][i].pte & 0x1) || 556 if (((ia64_idtrs[cpu] + i)->pte & 0x1) ||
550 (__per_cpu_idtrs[cpu][1][i].pte & 0x1)) 557 ((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
551 break; 558 break;
552 } 559 }
553 per_cpu(ia64_tr_used, cpu) = i; 560 per_cpu(ia64_tr_used, cpu) = i;