diff options
Diffstat (limited to 'arch/ia64/kernel/setup.c')
-rw-r--r-- | arch/ia64/kernel/setup.c | 723 |
1 files changed, 723 insertions, 0 deletions
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c new file mode 100644 index 000000000000..f05650c801d2 --- /dev/null +++ b/arch/ia64/kernel/setup.c | |||
@@ -0,0 +1,723 @@ | |||
1 | /* | ||
2 | * Architecture-specific setup. | ||
3 | * | ||
4 | * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co | ||
5 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
6 | * Stephane Eranian <eranian@hpl.hp.com> | ||
7 | * Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com> | ||
8 | * Copyright (C) 1999 VA Linux Systems | ||
9 | * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> | ||
10 | * | ||
11 | * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo(). | ||
12 | * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map | ||
13 | * 03/31/00 R.Seth cpu_initialized and current->processor fixes | ||
14 | * 02/04/00 D.Mosberger some more get_cpuinfo fixes... | ||
15 | * 02/01/00 R.Seth fixed get_cpuinfo for SMP | ||
16 | * 01/07/99 S.Eranian added the support for command line argument | ||
17 | * 06/24/99 W.Drummond added boot_cpu_data. | ||
18 | */ | ||
19 | #include <linux/config.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/init.h> | ||
22 | |||
23 | #include <linux/acpi.h> | ||
24 | #include <linux/bootmem.h> | ||
25 | #include <linux/console.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/reboot.h> | ||
29 | #include <linux/sched.h> | ||
30 | #include <linux/seq_file.h> | ||
31 | #include <linux/string.h> | ||
32 | #include <linux/threads.h> | ||
33 | #include <linux/tty.h> | ||
34 | #include <linux/serial.h> | ||
35 | #include <linux/serial_core.h> | ||
36 | #include <linux/efi.h> | ||
37 | #include <linux/initrd.h> | ||
38 | |||
39 | #include <asm/ia32.h> | ||
40 | #include <asm/machvec.h> | ||
41 | #include <asm/mca.h> | ||
42 | #include <asm/meminit.h> | ||
43 | #include <asm/page.h> | ||
44 | #include <asm/patch.h> | ||
45 | #include <asm/pgtable.h> | ||
46 | #include <asm/processor.h> | ||
47 | #include <asm/sal.h> | ||
48 | #include <asm/sections.h> | ||
49 | #include <asm/serial.h> | ||
50 | #include <asm/setup.h> | ||
51 | #include <asm/smp.h> | ||
52 | #include <asm/system.h> | ||
53 | #include <asm/unistd.h> | ||
54 | |||
55 | #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE) | ||
56 | # error "struct cpuinfo_ia64 too big!" | ||
57 | #endif | ||
58 | |||
59 | #ifdef CONFIG_SMP | ||
60 | unsigned long __per_cpu_offset[NR_CPUS]; | ||
61 | EXPORT_SYMBOL(__per_cpu_offset); | ||
62 | #endif | ||
63 | |||
64 | DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info); | ||
65 | DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); | ||
66 | DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8); | ||
67 | unsigned long ia64_cycles_per_usec; | ||
68 | struct ia64_boot_param *ia64_boot_param; | ||
69 | struct screen_info screen_info; | ||
70 | |||
71 | unsigned long ia64_max_cacheline_size; | ||
72 | unsigned long ia64_iobase; /* virtual address for I/O accesses */ | ||
73 | EXPORT_SYMBOL(ia64_iobase); | ||
74 | struct io_space io_space[MAX_IO_SPACES]; | ||
75 | EXPORT_SYMBOL(io_space); | ||
76 | unsigned int num_io_spaces; | ||
77 | |||
78 | /* | ||
79 | * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This | ||
80 | * mask specifies a mask of address bits that must be 0 in order for two buffers to be | ||
81 | * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start | ||
82 | * address of the second buffer must be aligned to (merge_mask+1) in order to be | ||
83 | * mergeable). By default, we assume there is no I/O MMU which can merge physically | ||
84 | * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu | ||
85 | * page-size of 2^64. | ||
86 | */ | ||
87 | unsigned long ia64_max_iommu_merge_mask = ~0UL; | ||
88 | EXPORT_SYMBOL(ia64_max_iommu_merge_mask); | ||
89 | |||
90 | /* | ||
91 | * We use a special marker for the end of memory and it uses the extra (+1) slot | ||
92 | */ | ||
93 | struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1]; | ||
94 | int num_rsvd_regions; | ||
95 | |||
96 | |||
97 | /* | ||
98 | * Filter incoming memory segments based on the primitive map created from the boot | ||
99 | * parameters. Segments contained in the map are removed from the memory ranges. A | ||
100 | * caller-specified function is called with the memory ranges that remain after filtering. | ||
101 | * This routine does not assume the incoming segments are sorted. | ||
102 | */ | ||
103 | int | ||
104 | filter_rsvd_memory (unsigned long start, unsigned long end, void *arg) | ||
105 | { | ||
106 | unsigned long range_start, range_end, prev_start; | ||
107 | void (*func)(unsigned long, unsigned long, int); | ||
108 | int i; | ||
109 | |||
110 | #if IGNORE_PFN0 | ||
111 | if (start == PAGE_OFFSET) { | ||
112 | printk(KERN_WARNING "warning: skipping physical page 0\n"); | ||
113 | start += PAGE_SIZE; | ||
114 | if (start >= end) return 0; | ||
115 | } | ||
116 | #endif | ||
117 | /* | ||
118 | * lowest possible address(walker uses virtual) | ||
119 | */ | ||
120 | prev_start = PAGE_OFFSET; | ||
121 | func = arg; | ||
122 | |||
123 | for (i = 0; i < num_rsvd_regions; ++i) { | ||
124 | range_start = max(start, prev_start); | ||
125 | range_end = min(end, rsvd_region[i].start); | ||
126 | |||
127 | if (range_start < range_end) | ||
128 | call_pernode_memory(__pa(range_start), range_end - range_start, func); | ||
129 | |||
130 | /* nothing more available in this segment */ | ||
131 | if (range_end == end) return 0; | ||
132 | |||
133 | prev_start = rsvd_region[i].end; | ||
134 | } | ||
135 | /* end of memory marker allows full processing inside loop body */ | ||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | static void | ||
140 | sort_regions (struct rsvd_region *rsvd_region, int max) | ||
141 | { | ||
142 | int j; | ||
143 | |||
144 | /* simple bubble sorting */ | ||
145 | while (max--) { | ||
146 | for (j = 0; j < max; ++j) { | ||
147 | if (rsvd_region[j].start > rsvd_region[j+1].start) { | ||
148 | struct rsvd_region tmp; | ||
149 | tmp = rsvd_region[j]; | ||
150 | rsvd_region[j] = rsvd_region[j + 1]; | ||
151 | rsvd_region[j + 1] = tmp; | ||
152 | } | ||
153 | } | ||
154 | } | ||
155 | } | ||
156 | |||
157 | /** | ||
158 | * reserve_memory - setup reserved memory areas | ||
159 | * | ||
160 | * Setup the reserved memory areas set aside for the boot parameters, | ||
161 | * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined, | ||
162 | * see include/asm-ia64/meminit.h if you need to define more. | ||
163 | */ | ||
164 | void | ||
165 | reserve_memory (void) | ||
166 | { | ||
167 | int n = 0; | ||
168 | |||
169 | /* | ||
170 | * none of the entries in this table overlap | ||
171 | */ | ||
172 | rsvd_region[n].start = (unsigned long) ia64_boot_param; | ||
173 | rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param); | ||
174 | n++; | ||
175 | |||
176 | rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap); | ||
177 | rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size; | ||
178 | n++; | ||
179 | |||
180 | rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line); | ||
181 | rsvd_region[n].end = (rsvd_region[n].start | ||
182 | + strlen(__va(ia64_boot_param->command_line)) + 1); | ||
183 | n++; | ||
184 | |||
185 | rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START); | ||
186 | rsvd_region[n].end = (unsigned long) ia64_imva(_end); | ||
187 | n++; | ||
188 | |||
189 | #ifdef CONFIG_BLK_DEV_INITRD | ||
190 | if (ia64_boot_param->initrd_start) { | ||
191 | rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start); | ||
192 | rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size; | ||
193 | n++; | ||
194 | } | ||
195 | #endif | ||
196 | |||
197 | /* end of memory marker */ | ||
198 | rsvd_region[n].start = ~0UL; | ||
199 | rsvd_region[n].end = ~0UL; | ||
200 | n++; | ||
201 | |||
202 | num_rsvd_regions = n; | ||
203 | |||
204 | sort_regions(rsvd_region, num_rsvd_regions); | ||
205 | } | ||
206 | |||
207 | /** | ||
208 | * find_initrd - get initrd parameters from the boot parameter structure | ||
209 | * | ||
210 | * Grab the initrd start and end from the boot parameter struct given us by | ||
211 | * the boot loader. | ||
212 | */ | ||
213 | void | ||
214 | find_initrd (void) | ||
215 | { | ||
216 | #ifdef CONFIG_BLK_DEV_INITRD | ||
217 | if (ia64_boot_param->initrd_start) { | ||
218 | initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start); | ||
219 | initrd_end = initrd_start+ia64_boot_param->initrd_size; | ||
220 | |||
221 | printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n", | ||
222 | initrd_start, ia64_boot_param->initrd_size); | ||
223 | } | ||
224 | #endif | ||
225 | } | ||
226 | |||
227 | static void __init | ||
228 | io_port_init (void) | ||
229 | { | ||
230 | extern unsigned long ia64_iobase; | ||
231 | unsigned long phys_iobase; | ||
232 | |||
233 | /* | ||
234 | * Set `iobase' to the appropriate address in region 6 (uncached access range). | ||
235 | * | ||
236 | * The EFI memory map is the "preferred" location to get the I/O port space base, | ||
237 | * rather the relying on AR.KR0. This should become more clear in future SAL | ||
238 | * specs. We'll fall back to getting it out of AR.KR0 if no appropriate entry is | ||
239 | * found in the memory map. | ||
240 | */ | ||
241 | phys_iobase = efi_get_iobase(); | ||
242 | if (phys_iobase) | ||
243 | /* set AR.KR0 since this is all we use it for anyway */ | ||
244 | ia64_set_kr(IA64_KR_IO_BASE, phys_iobase); | ||
245 | else { | ||
246 | phys_iobase = ia64_get_kr(IA64_KR_IO_BASE); | ||
247 | printk(KERN_INFO "No I/O port range found in EFI memory map, falling back " | ||
248 | "to AR.KR0\n"); | ||
249 | printk(KERN_INFO "I/O port base = 0x%lx\n", phys_iobase); | ||
250 | } | ||
251 | ia64_iobase = (unsigned long) ioremap(phys_iobase, 0); | ||
252 | |||
253 | /* setup legacy IO port space */ | ||
254 | io_space[0].mmio_base = ia64_iobase; | ||
255 | io_space[0].sparse = 1; | ||
256 | num_io_spaces = 1; | ||
257 | } | ||
258 | |||
259 | /** | ||
260 | * early_console_setup - setup debugging console | ||
261 | * | ||
262 | * Consoles started here require little enough setup that we can start using | ||
263 | * them very early in the boot process, either right after the machine | ||
264 | * vector initialization, or even before if the drivers can detect their hw. | ||
265 | * | ||
266 | * Returns non-zero if a console couldn't be setup. | ||
267 | */ | ||
268 | static inline int __init | ||
269 | early_console_setup (char *cmdline) | ||
270 | { | ||
271 | #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE | ||
272 | { | ||
273 | extern int sn_serial_console_early_setup(void); | ||
274 | if (!sn_serial_console_early_setup()) | ||
275 | return 0; | ||
276 | } | ||
277 | #endif | ||
278 | #ifdef CONFIG_EFI_PCDP | ||
279 | if (!efi_setup_pcdp_console(cmdline)) | ||
280 | return 0; | ||
281 | #endif | ||
282 | #ifdef CONFIG_SERIAL_8250_CONSOLE | ||
283 | if (!early_serial_console_init(cmdline)) | ||
284 | return 0; | ||
285 | #endif | ||
286 | |||
287 | return -1; | ||
288 | } | ||
289 | |||
290 | static inline void | ||
291 | mark_bsp_online (void) | ||
292 | { | ||
293 | #ifdef CONFIG_SMP | ||
294 | /* If we register an early console, allow CPU 0 to printk */ | ||
295 | cpu_set(smp_processor_id(), cpu_online_map); | ||
296 | #endif | ||
297 | } | ||
298 | |||
299 | void __init | ||
300 | setup_arch (char **cmdline_p) | ||
301 | { | ||
302 | unw_init(); | ||
303 | |||
304 | ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); | ||
305 | |||
306 | *cmdline_p = __va(ia64_boot_param->command_line); | ||
307 | strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE); | ||
308 | |||
309 | efi_init(); | ||
310 | io_port_init(); | ||
311 | |||
312 | #ifdef CONFIG_IA64_GENERIC | ||
313 | { | ||
314 | const char *mvec_name = strstr (*cmdline_p, "machvec="); | ||
315 | char str[64]; | ||
316 | |||
317 | if (mvec_name) { | ||
318 | const char *end; | ||
319 | size_t len; | ||
320 | |||
321 | mvec_name += 8; | ||
322 | end = strchr (mvec_name, ' '); | ||
323 | if (end) | ||
324 | len = end - mvec_name; | ||
325 | else | ||
326 | len = strlen (mvec_name); | ||
327 | len = min(len, sizeof (str) - 1); | ||
328 | strncpy (str, mvec_name, len); | ||
329 | str[len] = '\0'; | ||
330 | mvec_name = str; | ||
331 | } else | ||
332 | mvec_name = acpi_get_sysname(); | ||
333 | machvec_init(mvec_name); | ||
334 | } | ||
335 | #endif | ||
336 | |||
337 | if (early_console_setup(*cmdline_p) == 0) | ||
338 | mark_bsp_online(); | ||
339 | |||
340 | #ifdef CONFIG_ACPI_BOOT | ||
341 | /* Initialize the ACPI boot-time table parser */ | ||
342 | acpi_table_init(); | ||
343 | # ifdef CONFIG_ACPI_NUMA | ||
344 | acpi_numa_init(); | ||
345 | # endif | ||
346 | #else | ||
347 | # ifdef CONFIG_SMP | ||
348 | smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */ | ||
349 | # endif | ||
350 | #endif /* CONFIG_APCI_BOOT */ | ||
351 | |||
352 | find_memory(); | ||
353 | |||
354 | /* process SAL system table: */ | ||
355 | ia64_sal_init(efi.sal_systab); | ||
356 | |||
357 | #ifdef CONFIG_SMP | ||
358 | cpu_physical_id(0) = hard_smp_processor_id(); | ||
359 | #endif | ||
360 | |||
361 | cpu_init(); /* initialize the bootstrap CPU */ | ||
362 | |||
363 | #ifdef CONFIG_ACPI_BOOT | ||
364 | acpi_boot_init(); | ||
365 | #endif | ||
366 | |||
367 | #ifdef CONFIG_VT | ||
368 | if (!conswitchp) { | ||
369 | # if defined(CONFIG_DUMMY_CONSOLE) | ||
370 | conswitchp = &dummy_con; | ||
371 | # endif | ||
372 | # if defined(CONFIG_VGA_CONSOLE) | ||
373 | /* | ||
374 | * Non-legacy systems may route legacy VGA MMIO range to system | ||
375 | * memory. vga_con probes the MMIO hole, so memory looks like | ||
376 | * a VGA device to it. The EFI memory map can tell us if it's | ||
377 | * memory so we can avoid this problem. | ||
378 | */ | ||
379 | if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY) | ||
380 | conswitchp = &vga_con; | ||
381 | # endif | ||
382 | } | ||
383 | #endif | ||
384 | |||
385 | /* enable IA-64 Machine Check Abort Handling unless disabled */ | ||
386 | if (!strstr(saved_command_line, "nomca")) | ||
387 | ia64_mca_init(); | ||
388 | |||
389 | platform_setup(cmdline_p); | ||
390 | paging_init(); | ||
391 | } | ||
392 | |||
393 | /* | ||
394 | * Display cpu info for all cpu's. | ||
395 | */ | ||
396 | static int | ||
397 | show_cpuinfo (struct seq_file *m, void *v) | ||
398 | { | ||
399 | #ifdef CONFIG_SMP | ||
400 | # define lpj c->loops_per_jiffy | ||
401 | # define cpunum c->cpu | ||
402 | #else | ||
403 | # define lpj loops_per_jiffy | ||
404 | # define cpunum 0 | ||
405 | #endif | ||
406 | static struct { | ||
407 | unsigned long mask; | ||
408 | const char *feature_name; | ||
409 | } feature_bits[] = { | ||
410 | { 1UL << 0, "branchlong" }, | ||
411 | { 1UL << 1, "spontaneous deferral"}, | ||
412 | { 1UL << 2, "16-byte atomic ops" } | ||
413 | }; | ||
414 | char family[32], features[128], *cp, sep; | ||
415 | struct cpuinfo_ia64 *c = v; | ||
416 | unsigned long mask; | ||
417 | int i; | ||
418 | |||
419 | mask = c->features; | ||
420 | |||
421 | switch (c->family) { | ||
422 | case 0x07: memcpy(family, "Itanium", 8); break; | ||
423 | case 0x1f: memcpy(family, "Itanium 2", 10); break; | ||
424 | default: sprintf(family, "%u", c->family); break; | ||
425 | } | ||
426 | |||
427 | /* build the feature string: */ | ||
428 | memcpy(features, " standard", 10); | ||
429 | cp = features; | ||
430 | sep = 0; | ||
431 | for (i = 0; i < (int) ARRAY_SIZE(feature_bits); ++i) { | ||
432 | if (mask & feature_bits[i].mask) { | ||
433 | if (sep) | ||
434 | *cp++ = sep; | ||
435 | sep = ','; | ||
436 | *cp++ = ' '; | ||
437 | strcpy(cp, feature_bits[i].feature_name); | ||
438 | cp += strlen(feature_bits[i].feature_name); | ||
439 | mask &= ~feature_bits[i].mask; | ||
440 | } | ||
441 | } | ||
442 | if (mask) { | ||
443 | /* print unknown features as a hex value: */ | ||
444 | if (sep) | ||
445 | *cp++ = sep; | ||
446 | sprintf(cp, " 0x%lx", mask); | ||
447 | } | ||
448 | |||
449 | seq_printf(m, | ||
450 | "processor : %d\n" | ||
451 | "vendor : %s\n" | ||
452 | "arch : IA-64\n" | ||
453 | "family : %s\n" | ||
454 | "model : %u\n" | ||
455 | "revision : %u\n" | ||
456 | "archrev : %u\n" | ||
457 | "features :%s\n" /* don't change this---it _is_ right! */ | ||
458 | "cpu number : %lu\n" | ||
459 | "cpu regs : %u\n" | ||
460 | "cpu MHz : %lu.%06lu\n" | ||
461 | "itc MHz : %lu.%06lu\n" | ||
462 | "BogoMIPS : %lu.%02lu\n\n", | ||
463 | cpunum, c->vendor, family, c->model, c->revision, c->archrev, | ||
464 | features, c->ppn, c->number, | ||
465 | c->proc_freq / 1000000, c->proc_freq % 1000000, | ||
466 | c->itc_freq / 1000000, c->itc_freq % 1000000, | ||
467 | lpj*HZ/500000, (lpj*HZ/5000) % 100); | ||
468 | return 0; | ||
469 | } | ||
470 | |||
471 | static void * | ||
472 | c_start (struct seq_file *m, loff_t *pos) | ||
473 | { | ||
474 | #ifdef CONFIG_SMP | ||
475 | while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map)) | ||
476 | ++*pos; | ||
477 | #endif | ||
478 | return *pos < NR_CPUS ? cpu_data(*pos) : NULL; | ||
479 | } | ||
480 | |||
481 | static void * | ||
482 | c_next (struct seq_file *m, void *v, loff_t *pos) | ||
483 | { | ||
484 | ++*pos; | ||
485 | return c_start(m, pos); | ||
486 | } | ||
487 | |||
488 | static void | ||
489 | c_stop (struct seq_file *m, void *v) | ||
490 | { | ||
491 | } | ||
492 | |||
493 | struct seq_operations cpuinfo_op = { | ||
494 | .start = c_start, | ||
495 | .next = c_next, | ||
496 | .stop = c_stop, | ||
497 | .show = show_cpuinfo | ||
498 | }; | ||
499 | |||
500 | void | ||
501 | identify_cpu (struct cpuinfo_ia64 *c) | ||
502 | { | ||
503 | union { | ||
504 | unsigned long bits[5]; | ||
505 | struct { | ||
506 | /* id 0 & 1: */ | ||
507 | char vendor[16]; | ||
508 | |||
509 | /* id 2 */ | ||
510 | u64 ppn; /* processor serial number */ | ||
511 | |||
512 | /* id 3: */ | ||
513 | unsigned number : 8; | ||
514 | unsigned revision : 8; | ||
515 | unsigned model : 8; | ||
516 | unsigned family : 8; | ||
517 | unsigned archrev : 8; | ||
518 | unsigned reserved : 24; | ||
519 | |||
520 | /* id 4: */ | ||
521 | u64 features; | ||
522 | } field; | ||
523 | } cpuid; | ||
524 | pal_vm_info_1_u_t vm1; | ||
525 | pal_vm_info_2_u_t vm2; | ||
526 | pal_status_t status; | ||
527 | unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */ | ||
528 | int i; | ||
529 | |||
530 | for (i = 0; i < 5; ++i) | ||
531 | cpuid.bits[i] = ia64_get_cpuid(i); | ||
532 | |||
533 | memcpy(c->vendor, cpuid.field.vendor, 16); | ||
534 | #ifdef CONFIG_SMP | ||
535 | c->cpu = smp_processor_id(); | ||
536 | #endif | ||
537 | c->ppn = cpuid.field.ppn; | ||
538 | c->number = cpuid.field.number; | ||
539 | c->revision = cpuid.field.revision; | ||
540 | c->model = cpuid.field.model; | ||
541 | c->family = cpuid.field.family; | ||
542 | c->archrev = cpuid.field.archrev; | ||
543 | c->features = cpuid.field.features; | ||
544 | |||
545 | status = ia64_pal_vm_summary(&vm1, &vm2); | ||
546 | if (status == PAL_STATUS_SUCCESS) { | ||
547 | impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb; | ||
548 | phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size; | ||
549 | } | ||
550 | c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1)); | ||
551 | c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1)); | ||
552 | } | ||
553 | |||
554 | void | ||
555 | setup_per_cpu_areas (void) | ||
556 | { | ||
557 | /* start_kernel() requires this... */ | ||
558 | } | ||
559 | |||
560 | static void | ||
561 | get_max_cacheline_size (void) | ||
562 | { | ||
563 | unsigned long line_size, max = 1; | ||
564 | u64 l, levels, unique_caches; | ||
565 | pal_cache_config_info_t cci; | ||
566 | s64 status; | ||
567 | |||
568 | status = ia64_pal_cache_summary(&levels, &unique_caches); | ||
569 | if (status != 0) { | ||
570 | printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n", | ||
571 | __FUNCTION__, status); | ||
572 | max = SMP_CACHE_BYTES; | ||
573 | goto out; | ||
574 | } | ||
575 | |||
576 | for (l = 0; l < levels; ++l) { | ||
577 | status = ia64_pal_cache_config_info(l, /* cache_type (data_or_unified)= */ 2, | ||
578 | &cci); | ||
579 | if (status != 0) { | ||
580 | printk(KERN_ERR | ||
581 | "%s: ia64_pal_cache_config_info(l=%lu) failed (status=%ld)\n", | ||
582 | __FUNCTION__, l, status); | ||
583 | max = SMP_CACHE_BYTES; | ||
584 | } | ||
585 | line_size = 1 << cci.pcci_line_size; | ||
586 | if (line_size > max) | ||
587 | max = line_size; | ||
588 | } | ||
589 | out: | ||
590 | if (max > ia64_max_cacheline_size) | ||
591 | ia64_max_cacheline_size = max; | ||
592 | } | ||
593 | |||
594 | /* | ||
595 | * cpu_init() initializes state that is per-CPU. This function acts | ||
596 | * as a 'CPU state barrier', nothing should get across. | ||
597 | */ | ||
598 | void | ||
599 | cpu_init (void) | ||
600 | { | ||
601 | extern void __devinit ia64_mmu_init (void *); | ||
602 | unsigned long num_phys_stacked; | ||
603 | pal_vm_info_2_u_t vmi; | ||
604 | unsigned int max_ctx; | ||
605 | struct cpuinfo_ia64 *cpu_info; | ||
606 | void *cpu_data; | ||
607 | |||
608 | cpu_data = per_cpu_init(); | ||
609 | |||
610 | /* | ||
611 | * We set ar.k3 so that assembly code in MCA handler can compute | ||
612 | * physical addresses of per cpu variables with a simple: | ||
613 | * phys = ar.k3 + &per_cpu_var | ||
614 | */ | ||
615 | ia64_set_kr(IA64_KR_PER_CPU_DATA, | ||
616 | ia64_tpa(cpu_data) - (long) __per_cpu_start); | ||
617 | |||
618 | get_max_cacheline_size(); | ||
619 | |||
620 | /* | ||
621 | * We can't pass "local_cpu_data" to identify_cpu() because we haven't called | ||
622 | * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it | ||
623 | * depends on the data returned by identify_cpu(). We break the dependency by | ||
624 | * accessing cpu_data() through the canonical per-CPU address. | ||
625 | */ | ||
626 | cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start); | ||
627 | identify_cpu(cpu_info); | ||
628 | |||
629 | #ifdef CONFIG_MCKINLEY | ||
630 | { | ||
631 | # define FEATURE_SET 16 | ||
632 | struct ia64_pal_retval iprv; | ||
633 | |||
634 | if (cpu_info->family == 0x1f) { | ||
635 | PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0); | ||
636 | if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80)) | ||
637 | PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, | ||
638 | (iprv.v1 | 0x80), FEATURE_SET, 0); | ||
639 | } | ||
640 | } | ||
641 | #endif | ||
642 | |||
643 | /* Clear the stack memory reserved for pt_regs: */ | ||
644 | memset(ia64_task_regs(current), 0, sizeof(struct pt_regs)); | ||
645 | |||
646 | ia64_set_kr(IA64_KR_FPU_OWNER, 0); | ||
647 | |||
648 | /* | ||
649 | * Initialize the page-table base register to a global | ||
650 | * directory with all zeroes. This ensure that we can handle | ||
651 | * TLB-misses to user address-space even before we created the | ||
652 | * first user address-space. This may happen, e.g., due to | ||
653 | * aggressive use of lfetch.fault. | ||
654 | */ | ||
655 | ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page))); | ||
656 | |||
657 | /* | ||
658 | * Initialize default control register to defer all speculative faults. The | ||
659 | * kernel MUST NOT depend on a particular setting of these bits (in other words, | ||
660 | * the kernel must have recovery code for all speculative accesses). Turn on | ||
661 | * dcr.lc as per recommendation by the architecture team. Most IA-32 apps | ||
662 | * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll | ||
663 | * be fine). | ||
664 | */ | ||
665 | ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR | ||
666 | | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); | ||
667 | atomic_inc(&init_mm.mm_count); | ||
668 | current->active_mm = &init_mm; | ||
669 | if (current->mm) | ||
670 | BUG(); | ||
671 | |||
672 | ia64_mmu_init(ia64_imva(cpu_data)); | ||
673 | ia64_mca_cpu_init(ia64_imva(cpu_data)); | ||
674 | |||
675 | #ifdef CONFIG_IA32_SUPPORT | ||
676 | ia32_cpu_init(); | ||
677 | #endif | ||
678 | |||
679 | /* Clear ITC to eliminiate sched_clock() overflows in human time. */ | ||
680 | ia64_set_itc(0); | ||
681 | |||
682 | /* disable all local interrupt sources: */ | ||
683 | ia64_set_itv(1 << 16); | ||
684 | ia64_set_lrr0(1 << 16); | ||
685 | ia64_set_lrr1(1 << 16); | ||
686 | ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); | ||
687 | ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); | ||
688 | |||
689 | /* clear TPR & XTP to enable all interrupt classes: */ | ||
690 | ia64_setreg(_IA64_REG_CR_TPR, 0); | ||
691 | #ifdef CONFIG_SMP | ||
692 | normal_xtp(); | ||
693 | #endif | ||
694 | |||
695 | /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */ | ||
696 | if (ia64_pal_vm_summary(NULL, &vmi) == 0) | ||
697 | max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1; | ||
698 | else { | ||
699 | printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n"); | ||
700 | max_ctx = (1U << 15) - 1; /* use architected minimum */ | ||
701 | } | ||
702 | while (max_ctx < ia64_ctx.max_ctx) { | ||
703 | unsigned int old = ia64_ctx.max_ctx; | ||
704 | if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old) | ||
705 | break; | ||
706 | } | ||
707 | |||
708 | if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) { | ||
709 | printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical " | ||
710 | "stacked regs\n"); | ||
711 | num_phys_stacked = 96; | ||
712 | } | ||
713 | /* size of physical stacked register partition plus 8 bytes: */ | ||
714 | __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8; | ||
715 | platform_cpu_init(); | ||
716 | } | ||
717 | |||
718 | void | ||
719 | check_bugs (void) | ||
720 | { | ||
721 | ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles, | ||
722 | (unsigned long) __end___mckinley_e9_bundles); | ||
723 | } | ||