diff options
author | Len Brown <len.brown@intel.com> | 2005-12-06 17:31:30 -0500 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2005-12-06 17:31:30 -0500 |
commit | 3d5271f9883cba7b54762bc4fe027d4172f06db7 (patch) | |
tree | ab8a881a14478598a0c8bda0d26c62cdccfffd6d /arch/ia64/kernel | |
parent | 378b2556f4e09fa6f87ff0cb5c4395ff28257d02 (diff) | |
parent | 9115a6c787596e687df03010d97fccc5e0762506 (diff) |
Pull release into acpica branch
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r-- | arch/ia64/kernel/acpi.c | 13 | ||||
-rw-r--r-- | arch/ia64/kernel/cyclone.c | 1 | ||||
-rw-r--r-- | arch/ia64/kernel/efi.c | 510 | ||||
-rw-r--r-- | arch/ia64/kernel/ia64_ksyms.c | 1 | ||||
-rw-r--r-- | arch/ia64/kernel/irq.c | 12 | ||||
-rw-r--r-- | arch/ia64/kernel/ivt.S | 142 | ||||
-rw-r--r-- | arch/ia64/kernel/kprobes.c | 144 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 129 | ||||
-rw-r--r-- | arch/ia64/kernel/mca_asm.S | 96 | ||||
-rw-r--r-- | arch/ia64/kernel/mca_drv.c | 39 | ||||
-rw-r--r-- | arch/ia64/kernel/module.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/patch.c | 16 | ||||
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 5 | ||||
-rw-r--r-- | arch/ia64/kernel/process.c | 42 | ||||
-rw-r--r-- | arch/ia64/kernel/ptrace.c | 28 | ||||
-rw-r--r-- | arch/ia64/kernel/setup.c | 71 | ||||
-rw-r--r-- | arch/ia64/kernel/signal.c | 11 | ||||
-rw-r--r-- | arch/ia64/kernel/smp.c | 10 | ||||
-rw-r--r-- | arch/ia64/kernel/smpboot.c | 7 | ||||
-rw-r--r-- | arch/ia64/kernel/time.c | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/traps.c | 62 | ||||
-rw-r--r-- | arch/ia64/kernel/uncached.c | 17 |
22 files changed, 884 insertions, 482 deletions
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 7e926471e4ec..9ad94ddf6687 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -838,7 +838,7 @@ EXPORT_SYMBOL(acpi_unmap_lsapic); | |||
838 | #endif /* CONFIG_ACPI_HOTPLUG_CPU */ | 838 | #endif /* CONFIG_ACPI_HOTPLUG_CPU */ |
839 | 839 | ||
840 | #ifdef CONFIG_ACPI_NUMA | 840 | #ifdef CONFIG_ACPI_NUMA |
841 | acpi_status __devinit | 841 | static acpi_status __devinit |
842 | acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret) | 842 | acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret) |
843 | { | 843 | { |
844 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | 844 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
@@ -890,7 +890,16 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret) | |||
890 | map_iosapic_to_node(gsi_base, node); | 890 | map_iosapic_to_node(gsi_base, node); |
891 | return AE_OK; | 891 | return AE_OK; |
892 | } | 892 | } |
893 | #endif /* CONFIG_NUMA */ | 893 | |
894 | static int __init | ||
895 | acpi_map_iosapics (void) | ||
896 | { | ||
897 | acpi_get_devices(NULL, acpi_map_iosapic, NULL, NULL); | ||
898 | return 0; | ||
899 | } | ||
900 | |||
901 | fs_initcall(acpi_map_iosapics); | ||
902 | #endif /* CONFIG_ACPI_NUMA */ | ||
894 | 903 | ||
895 | int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) | 904 | int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) |
896 | { | 905 | { |
diff --git a/arch/ia64/kernel/cyclone.c b/arch/ia64/kernel/cyclone.c index 768c7e46957c..6ade3790ce07 100644 --- a/arch/ia64/kernel/cyclone.c +++ b/arch/ia64/kernel/cyclone.c | |||
@@ -2,6 +2,7 @@ | |||
2 | #include <linux/smp.h> | 2 | #include <linux/smp.h> |
3 | #include <linux/time.h> | 3 | #include <linux/time.h> |
4 | #include <linux/errno.h> | 4 | #include <linux/errno.h> |
5 | #include <linux/timex.h> | ||
5 | #include <asm/io.h> | 6 | #include <asm/io.h> |
6 | 7 | ||
7 | /* IBM Summit (EXA) Cyclone counter code*/ | 8 | /* IBM Summit (EXA) Cyclone counter code*/ |
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index 179f230816ed..a3aa45cbcfa0 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c | |||
@@ -239,57 +239,30 @@ is_available_memory (efi_memory_desc_t *md) | |||
239 | return 0; | 239 | return 0; |
240 | } | 240 | } |
241 | 241 | ||
242 | /* | 242 | typedef struct kern_memdesc { |
243 | * Trim descriptor MD so its starts at address START_ADDR. If the descriptor covers | 243 | u64 attribute; |
244 | * memory that is normally available to the kernel, issue a warning that some memory | 244 | u64 start; |
245 | * is being ignored. | 245 | u64 num_pages; |
246 | */ | 246 | } kern_memdesc_t; |
247 | static void | ||
248 | trim_bottom (efi_memory_desc_t *md, u64 start_addr) | ||
249 | { | ||
250 | u64 num_skipped_pages; | ||
251 | 247 | ||
252 | if (md->phys_addr >= start_addr || !md->num_pages) | 248 | static kern_memdesc_t *kern_memmap; |
253 | return; | ||
254 | |||
255 | num_skipped_pages = (start_addr - md->phys_addr) >> EFI_PAGE_SHIFT; | ||
256 | if (num_skipped_pages > md->num_pages) | ||
257 | num_skipped_pages = md->num_pages; | ||
258 | |||
259 | if (is_available_memory(md)) | ||
260 | printk(KERN_NOTICE "efi.%s: ignoring %luKB of memory at 0x%lx due to granule hole " | ||
261 | "at 0x%lx\n", __FUNCTION__, | ||
262 | (num_skipped_pages << EFI_PAGE_SHIFT) >> 10, | ||
263 | md->phys_addr, start_addr - IA64_GRANULE_SIZE); | ||
264 | /* | ||
265 | * NOTE: Don't set md->phys_addr to START_ADDR because that could cause the memory | ||
266 | * descriptor list to become unsorted. In such a case, md->num_pages will be | ||
267 | * zero, so the Right Thing will happen. | ||
268 | */ | ||
269 | md->phys_addr += num_skipped_pages << EFI_PAGE_SHIFT; | ||
270 | md->num_pages -= num_skipped_pages; | ||
271 | } | ||
272 | 249 | ||
273 | static void | 250 | static void |
274 | trim_top (efi_memory_desc_t *md, u64 end_addr) | 251 | walk (efi_freemem_callback_t callback, void *arg, u64 attr) |
275 | { | 252 | { |
276 | u64 num_dropped_pages, md_end_addr; | 253 | kern_memdesc_t *k; |
277 | 254 | u64 start, end, voff; | |
278 | md_end_addr = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); | ||
279 | |||
280 | if (md_end_addr <= end_addr || !md->num_pages) | ||
281 | return; | ||
282 | 255 | ||
283 | num_dropped_pages = (md_end_addr - end_addr) >> EFI_PAGE_SHIFT; | 256 | voff = (attr == EFI_MEMORY_WB) ? PAGE_OFFSET : __IA64_UNCACHED_OFFSET; |
284 | if (num_dropped_pages > md->num_pages) | 257 | for (k = kern_memmap; k->start != ~0UL; k++) { |
285 | num_dropped_pages = md->num_pages; | 258 | if (k->attribute != attr) |
286 | 259 | continue; | |
287 | if (is_available_memory(md)) | 260 | start = PAGE_ALIGN(k->start); |
288 | printk(KERN_NOTICE "efi.%s: ignoring %luKB of memory at 0x%lx due to granule hole " | 261 | end = (k->start + (k->num_pages << EFI_PAGE_SHIFT)) & PAGE_MASK; |
289 | "at 0x%lx\n", __FUNCTION__, | 262 | if (start < end) |
290 | (num_dropped_pages << EFI_PAGE_SHIFT) >> 10, | 263 | if ((*callback)(start + voff, end + voff, arg) < 0) |
291 | md->phys_addr, end_addr); | 264 | return; |
292 | md->num_pages -= num_dropped_pages; | 265 | } |
293 | } | 266 | } |
294 | 267 | ||
295 | /* | 268 | /* |
@@ -299,148 +272,19 @@ trim_top (efi_memory_desc_t *md, u64 end_addr) | |||
299 | void | 272 | void |
300 | efi_memmap_walk (efi_freemem_callback_t callback, void *arg) | 273 | efi_memmap_walk (efi_freemem_callback_t callback, void *arg) |
301 | { | 274 | { |
302 | int prev_valid = 0; | 275 | walk(callback, arg, EFI_MEMORY_WB); |
303 | struct range { | ||
304 | u64 start; | ||
305 | u64 end; | ||
306 | } prev, curr; | ||
307 | void *efi_map_start, *efi_map_end, *p, *q; | ||
308 | efi_memory_desc_t *md, *check_md; | ||
309 | u64 efi_desc_size, start, end, granule_addr, last_granule_addr, first_non_wb_addr = 0; | ||
310 | unsigned long total_mem = 0; | ||
311 | |||
312 | efi_map_start = __va(ia64_boot_param->efi_memmap); | ||
313 | efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; | ||
314 | efi_desc_size = ia64_boot_param->efi_memdesc_size; | ||
315 | |||
316 | for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { | ||
317 | md = p; | ||
318 | |||
319 | /* skip over non-WB memory descriptors; that's all we're interested in... */ | ||
320 | if (!(md->attribute & EFI_MEMORY_WB)) | ||
321 | continue; | ||
322 | |||
323 | /* | ||
324 | * granule_addr is the base of md's first granule. | ||
325 | * [granule_addr - first_non_wb_addr) is guaranteed to | ||
326 | * be contiguous WB memory. | ||
327 | */ | ||
328 | granule_addr = GRANULEROUNDDOWN(md->phys_addr); | ||
329 | first_non_wb_addr = max(first_non_wb_addr, granule_addr); | ||
330 | |||
331 | if (first_non_wb_addr < md->phys_addr) { | ||
332 | trim_bottom(md, granule_addr + IA64_GRANULE_SIZE); | ||
333 | granule_addr = GRANULEROUNDDOWN(md->phys_addr); | ||
334 | first_non_wb_addr = max(first_non_wb_addr, granule_addr); | ||
335 | } | ||
336 | |||
337 | for (q = p; q < efi_map_end; q += efi_desc_size) { | ||
338 | check_md = q; | ||
339 | |||
340 | if ((check_md->attribute & EFI_MEMORY_WB) && | ||
341 | (check_md->phys_addr == first_non_wb_addr)) | ||
342 | first_non_wb_addr += check_md->num_pages << EFI_PAGE_SHIFT; | ||
343 | else | ||
344 | break; /* non-WB or hole */ | ||
345 | } | ||
346 | |||
347 | last_granule_addr = GRANULEROUNDDOWN(first_non_wb_addr); | ||
348 | if (last_granule_addr < md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) | ||
349 | trim_top(md, last_granule_addr); | ||
350 | |||
351 | if (is_available_memory(md)) { | ||
352 | if (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) >= max_addr) { | ||
353 | if (md->phys_addr >= max_addr) | ||
354 | continue; | ||
355 | md->num_pages = (max_addr - md->phys_addr) >> EFI_PAGE_SHIFT; | ||
356 | first_non_wb_addr = max_addr; | ||
357 | } | ||
358 | |||
359 | if (total_mem >= mem_limit) | ||
360 | continue; | ||
361 | |||
362 | if (total_mem + (md->num_pages << EFI_PAGE_SHIFT) > mem_limit) { | ||
363 | unsigned long limit_addr = md->phys_addr; | ||
364 | |||
365 | limit_addr += mem_limit - total_mem; | ||
366 | limit_addr = GRANULEROUNDDOWN(limit_addr); | ||
367 | |||
368 | if (md->phys_addr > limit_addr) | ||
369 | continue; | ||
370 | |||
371 | md->num_pages = (limit_addr - md->phys_addr) >> | ||
372 | EFI_PAGE_SHIFT; | ||
373 | first_non_wb_addr = max_addr = md->phys_addr + | ||
374 | (md->num_pages << EFI_PAGE_SHIFT); | ||
375 | } | ||
376 | total_mem += (md->num_pages << EFI_PAGE_SHIFT); | ||
377 | |||
378 | if (md->num_pages == 0) | ||
379 | continue; | ||
380 | |||
381 | curr.start = PAGE_OFFSET + md->phys_addr; | ||
382 | curr.end = curr.start + (md->num_pages << EFI_PAGE_SHIFT); | ||
383 | |||
384 | if (!prev_valid) { | ||
385 | prev = curr; | ||
386 | prev_valid = 1; | ||
387 | } else { | ||
388 | if (curr.start < prev.start) | ||
389 | printk(KERN_ERR "Oops: EFI memory table not ordered!\n"); | ||
390 | |||
391 | if (prev.end == curr.start) { | ||
392 | /* merge two consecutive memory ranges */ | ||
393 | prev.end = curr.end; | ||
394 | } else { | ||
395 | start = PAGE_ALIGN(prev.start); | ||
396 | end = prev.end & PAGE_MASK; | ||
397 | if ((end > start) && (*callback)(start, end, arg) < 0) | ||
398 | return; | ||
399 | prev = curr; | ||
400 | } | ||
401 | } | ||
402 | } | ||
403 | } | ||
404 | if (prev_valid) { | ||
405 | start = PAGE_ALIGN(prev.start); | ||
406 | end = prev.end & PAGE_MASK; | ||
407 | if (end > start) | ||
408 | (*callback)(start, end, arg); | ||
409 | } | ||
410 | } | 276 | } |
411 | 277 | ||
412 | /* | 278 | /* |
413 | * Walk the EFI memory map to pull out leftover pages in the lower | 279 | * Walks the EFI memory map and calls CALLBACK once for each EFI memory descriptor that |
414 | * memory regions which do not end up in the regular memory map and | 280 | * has memory that is available for uncached allocator. |
415 | * stick them into the uncached allocator | ||
416 | * | ||
417 | * The regular walk function is significantly more complex than the | ||
418 | * uncached walk which means it really doesn't make sense to try and | ||
419 | * marge the two. | ||
420 | */ | 281 | */ |
421 | void __init | 282 | void |
422 | efi_memmap_walk_uc (efi_freemem_callback_t callback) | 283 | efi_memmap_walk_uc (efi_freemem_callback_t callback, void *arg) |
423 | { | 284 | { |
424 | void *efi_map_start, *efi_map_end, *p; | 285 | walk(callback, arg, EFI_MEMORY_UC); |
425 | efi_memory_desc_t *md; | ||
426 | u64 efi_desc_size, start, end; | ||
427 | |||
428 | efi_map_start = __va(ia64_boot_param->efi_memmap); | ||
429 | efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; | ||
430 | efi_desc_size = ia64_boot_param->efi_memdesc_size; | ||
431 | |||
432 | for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { | ||
433 | md = p; | ||
434 | if (md->attribute == EFI_MEMORY_UC) { | ||
435 | start = PAGE_ALIGN(md->phys_addr); | ||
436 | end = PAGE_ALIGN((md->phys_addr+(md->num_pages << EFI_PAGE_SHIFT)) & PAGE_MASK); | ||
437 | if ((*callback)(start, end, NULL) < 0) | ||
438 | return; | ||
439 | } | ||
440 | } | ||
441 | } | 286 | } |
442 | 287 | ||
443 | |||
444 | /* | 288 | /* |
445 | * Look for the PAL_CODE region reported by EFI and maps it using an | 289 | * Look for the PAL_CODE region reported by EFI and maps it using an |
446 | * ITR to enable safe PAL calls in virtual mode. See IA-64 Processor | 290 | * ITR to enable safe PAL calls in virtual mode. See IA-64 Processor |
@@ -862,3 +706,307 @@ efi_uart_console_only(void) | |||
862 | printk(KERN_ERR "Malformed %s value\n", name); | 706 | printk(KERN_ERR "Malformed %s value\n", name); |
863 | return 0; | 707 | return 0; |
864 | } | 708 | } |
709 | |||
710 | #define efi_md_size(md) (md->num_pages << EFI_PAGE_SHIFT) | ||
711 | |||
712 | static inline u64 | ||
713 | kmd_end(kern_memdesc_t *kmd) | ||
714 | { | ||
715 | return (kmd->start + (kmd->num_pages << EFI_PAGE_SHIFT)); | ||
716 | } | ||
717 | |||
718 | static inline u64 | ||
719 | efi_md_end(efi_memory_desc_t *md) | ||
720 | { | ||
721 | return (md->phys_addr + efi_md_size(md)); | ||
722 | } | ||
723 | |||
724 | static inline int | ||
725 | efi_wb(efi_memory_desc_t *md) | ||
726 | { | ||
727 | return (md->attribute & EFI_MEMORY_WB); | ||
728 | } | ||
729 | |||
730 | static inline int | ||
731 | efi_uc(efi_memory_desc_t *md) | ||
732 | { | ||
733 | return (md->attribute & EFI_MEMORY_UC); | ||
734 | } | ||
735 | |||
736 | /* | ||
737 | * Look for the first granule aligned memory descriptor memory | ||
738 | * that is big enough to hold EFI memory map. Make sure this | ||
739 | * descriptor is atleast granule sized so it does not get trimmed | ||
740 | */ | ||
741 | struct kern_memdesc * | ||
742 | find_memmap_space (void) | ||
743 | { | ||
744 | u64 contig_low=0, contig_high=0; | ||
745 | u64 as = 0, ae; | ||
746 | void *efi_map_start, *efi_map_end, *p, *q; | ||
747 | efi_memory_desc_t *md, *pmd = NULL, *check_md; | ||
748 | u64 space_needed, efi_desc_size; | ||
749 | unsigned long total_mem = 0; | ||
750 | |||
751 | efi_map_start = __va(ia64_boot_param->efi_memmap); | ||
752 | efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; | ||
753 | efi_desc_size = ia64_boot_param->efi_memdesc_size; | ||
754 | |||
755 | /* | ||
756 | * Worst case: we need 3 kernel descriptors for each efi descriptor | ||
757 | * (if every entry has a WB part in the middle, and UC head and tail), | ||
758 | * plus one for the end marker. | ||
759 | */ | ||
760 | space_needed = sizeof(kern_memdesc_t) * | ||
761 | (3 * (ia64_boot_param->efi_memmap_size/efi_desc_size) + 1); | ||
762 | |||
763 | for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) { | ||
764 | md = p; | ||
765 | if (!efi_wb(md)) { | ||
766 | continue; | ||
767 | } | ||
768 | if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) { | ||
769 | contig_low = GRANULEROUNDUP(md->phys_addr); | ||
770 | contig_high = efi_md_end(md); | ||
771 | for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) { | ||
772 | check_md = q; | ||
773 | if (!efi_wb(check_md)) | ||
774 | break; | ||
775 | if (contig_high != check_md->phys_addr) | ||
776 | break; | ||
777 | contig_high = efi_md_end(check_md); | ||
778 | } | ||
779 | contig_high = GRANULEROUNDDOWN(contig_high); | ||
780 | } | ||
781 | if (!is_available_memory(md) || md->type == EFI_LOADER_DATA) | ||
782 | continue; | ||
783 | |||
784 | /* Round ends inward to granule boundaries */ | ||
785 | as = max(contig_low, md->phys_addr); | ||
786 | ae = min(contig_high, efi_md_end(md)); | ||
787 | |||
788 | /* keep within max_addr= command line arg */ | ||
789 | ae = min(ae, max_addr); | ||
790 | if (ae <= as) | ||
791 | continue; | ||
792 | |||
793 | /* avoid going over mem= command line arg */ | ||
794 | if (total_mem + (ae - as) > mem_limit) | ||
795 | ae -= total_mem + (ae - as) - mem_limit; | ||
796 | |||
797 | if (ae <= as) | ||
798 | continue; | ||
799 | |||
800 | if (ae - as > space_needed) | ||
801 | break; | ||
802 | } | ||
803 | if (p >= efi_map_end) | ||
804 | panic("Can't allocate space for kernel memory descriptors"); | ||
805 | |||
806 | return __va(as); | ||
807 | } | ||
808 | |||
809 | /* | ||
810 | * Walk the EFI memory map and gather all memory available for kernel | ||
811 | * to use. We can allocate partial granules only if the unavailable | ||
812 | * parts exist, and are WB. | ||
813 | */ | ||
814 | void | ||
815 | efi_memmap_init(unsigned long *s, unsigned long *e) | ||
816 | { | ||
817 | struct kern_memdesc *k, *prev = 0; | ||
818 | u64 contig_low=0, contig_high=0; | ||
819 | u64 as, ae, lim; | ||
820 | void *efi_map_start, *efi_map_end, *p, *q; | ||
821 | efi_memory_desc_t *md, *pmd = NULL, *check_md; | ||
822 | u64 efi_desc_size; | ||
823 | unsigned long total_mem = 0; | ||
824 | |||
825 | k = kern_memmap = find_memmap_space(); | ||
826 | |||
827 | efi_map_start = __va(ia64_boot_param->efi_memmap); | ||
828 | efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; | ||
829 | efi_desc_size = ia64_boot_param->efi_memdesc_size; | ||
830 | |||
831 | for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) { | ||
832 | md = p; | ||
833 | if (!efi_wb(md)) { | ||
834 | if (efi_uc(md) && (md->type == EFI_CONVENTIONAL_MEMORY || | ||
835 | md->type == EFI_BOOT_SERVICES_DATA)) { | ||
836 | k->attribute = EFI_MEMORY_UC; | ||
837 | k->start = md->phys_addr; | ||
838 | k->num_pages = md->num_pages; | ||
839 | k++; | ||
840 | } | ||
841 | continue; | ||
842 | } | ||
843 | if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) { | ||
844 | contig_low = GRANULEROUNDUP(md->phys_addr); | ||
845 | contig_high = efi_md_end(md); | ||
846 | for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) { | ||
847 | check_md = q; | ||
848 | if (!efi_wb(check_md)) | ||
849 | break; | ||
850 | if (contig_high != check_md->phys_addr) | ||
851 | break; | ||
852 | contig_high = efi_md_end(check_md); | ||
853 | } | ||
854 | contig_high = GRANULEROUNDDOWN(contig_high); | ||
855 | } | ||
856 | if (!is_available_memory(md)) | ||
857 | continue; | ||
858 | |||
859 | /* | ||
860 | * Round ends inward to granule boundaries | ||
861 | * Give trimmings to uncached allocator | ||
862 | */ | ||
863 | if (md->phys_addr < contig_low) { | ||
864 | lim = min(efi_md_end(md), contig_low); | ||
865 | if (efi_uc(md)) { | ||
866 | if (k > kern_memmap && (k-1)->attribute == EFI_MEMORY_UC && | ||
867 | kmd_end(k-1) == md->phys_addr) { | ||
868 | (k-1)->num_pages += (lim - md->phys_addr) >> EFI_PAGE_SHIFT; | ||
869 | } else { | ||
870 | k->attribute = EFI_MEMORY_UC; | ||
871 | k->start = md->phys_addr; | ||
872 | k->num_pages = (lim - md->phys_addr) >> EFI_PAGE_SHIFT; | ||
873 | k++; | ||
874 | } | ||
875 | } | ||
876 | as = contig_low; | ||
877 | } else | ||
878 | as = md->phys_addr; | ||
879 | |||
880 | if (efi_md_end(md) > contig_high) { | ||
881 | lim = max(md->phys_addr, contig_high); | ||
882 | if (efi_uc(md)) { | ||
883 | if (lim == md->phys_addr && k > kern_memmap && | ||
884 | (k-1)->attribute == EFI_MEMORY_UC && | ||
885 | kmd_end(k-1) == md->phys_addr) { | ||
886 | (k-1)->num_pages += md->num_pages; | ||
887 | } else { | ||
888 | k->attribute = EFI_MEMORY_UC; | ||
889 | k->start = lim; | ||
890 | k->num_pages = (efi_md_end(md) - lim) >> EFI_PAGE_SHIFT; | ||
891 | k++; | ||
892 | } | ||
893 | } | ||
894 | ae = contig_high; | ||
895 | } else | ||
896 | ae = efi_md_end(md); | ||
897 | |||
898 | /* keep within max_addr= command line arg */ | ||
899 | ae = min(ae, max_addr); | ||
900 | if (ae <= as) | ||
901 | continue; | ||
902 | |||
903 | /* avoid going over mem= command line arg */ | ||
904 | if (total_mem + (ae - as) > mem_limit) | ||
905 | ae -= total_mem + (ae - as) - mem_limit; | ||
906 | |||
907 | if (ae <= as) | ||
908 | continue; | ||
909 | if (prev && kmd_end(prev) == md->phys_addr) { | ||
910 | prev->num_pages += (ae - as) >> EFI_PAGE_SHIFT; | ||
911 | total_mem += ae - as; | ||
912 | continue; | ||
913 | } | ||
914 | k->attribute = EFI_MEMORY_WB; | ||
915 | k->start = as; | ||
916 | k->num_pages = (ae - as) >> EFI_PAGE_SHIFT; | ||
917 | total_mem += ae - as; | ||
918 | prev = k++; | ||
919 | } | ||
920 | k->start = ~0L; /* end-marker */ | ||
921 | |||
922 | /* reserve the memory we are using for kern_memmap */ | ||
923 | *s = (u64)kern_memmap; | ||
924 | *e = (u64)++k; | ||
925 | } | ||
926 | |||
927 | void | ||
928 | efi_initialize_iomem_resources(struct resource *code_resource, | ||
929 | struct resource *data_resource) | ||
930 | { | ||
931 | struct resource *res; | ||
932 | void *efi_map_start, *efi_map_end, *p; | ||
933 | efi_memory_desc_t *md; | ||
934 | u64 efi_desc_size; | ||
935 | char *name; | ||
936 | unsigned long flags; | ||
937 | |||
938 | efi_map_start = __va(ia64_boot_param->efi_memmap); | ||
939 | efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; | ||
940 | efi_desc_size = ia64_boot_param->efi_memdesc_size; | ||
941 | |||
942 | res = NULL; | ||
943 | |||
944 | for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { | ||
945 | md = p; | ||
946 | |||
947 | if (md->num_pages == 0) /* should not happen */ | ||
948 | continue; | ||
949 | |||
950 | flags = IORESOURCE_MEM; | ||
951 | switch (md->type) { | ||
952 | |||
953 | case EFI_MEMORY_MAPPED_IO: | ||
954 | case EFI_MEMORY_MAPPED_IO_PORT_SPACE: | ||
955 | continue; | ||
956 | |||
957 | case EFI_LOADER_CODE: | ||
958 | case EFI_LOADER_DATA: | ||
959 | case EFI_BOOT_SERVICES_DATA: | ||
960 | case EFI_BOOT_SERVICES_CODE: | ||
961 | case EFI_CONVENTIONAL_MEMORY: | ||
962 | if (md->attribute & EFI_MEMORY_WP) { | ||
963 | name = "System ROM"; | ||
964 | flags |= IORESOURCE_READONLY; | ||
965 | } else { | ||
966 | name = "System RAM"; | ||
967 | } | ||
968 | break; | ||
969 | |||
970 | case EFI_ACPI_MEMORY_NVS: | ||
971 | name = "ACPI Non-volatile Storage"; | ||
972 | flags |= IORESOURCE_BUSY; | ||
973 | break; | ||
974 | |||
975 | case EFI_UNUSABLE_MEMORY: | ||
976 | name = "reserved"; | ||
977 | flags |= IORESOURCE_BUSY | IORESOURCE_DISABLED; | ||
978 | break; | ||
979 | |||
980 | case EFI_RESERVED_TYPE: | ||
981 | case EFI_RUNTIME_SERVICES_CODE: | ||
982 | case EFI_RUNTIME_SERVICES_DATA: | ||
983 | case EFI_ACPI_RECLAIM_MEMORY: | ||
984 | default: | ||
985 | name = "reserved"; | ||
986 | flags |= IORESOURCE_BUSY; | ||
987 | break; | ||
988 | } | ||
989 | |||
990 | if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) { | ||
991 | printk(KERN_ERR "failed to alocate resource for iomem\n"); | ||
992 | return; | ||
993 | } | ||
994 | |||
995 | res->name = name; | ||
996 | res->start = md->phys_addr; | ||
997 | res->end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1; | ||
998 | res->flags = flags; | ||
999 | |||
1000 | if (insert_resource(&iomem_resource, res) < 0) | ||
1001 | kfree(res); | ||
1002 | else { | ||
1003 | /* | ||
1004 | * We don't know which region contains | ||
1005 | * kernel data so we try it repeatedly and | ||
1006 | * let the resource manager test it. | ||
1007 | */ | ||
1008 | insert_resource(res, code_resource); | ||
1009 | insert_resource(res, data_resource); | ||
1010 | } | ||
1011 | } | ||
1012 | } | ||
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c index 01572814abe4..5db9d3bcbbcb 100644 --- a/arch/ia64/kernel/ia64_ksyms.c +++ b/arch/ia64/kernel/ia64_ksyms.c | |||
@@ -42,6 +42,7 @@ EXPORT_SYMBOL(clear_page); | |||
42 | 42 | ||
43 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 43 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
44 | #include <linux/bootmem.h> | 44 | #include <linux/bootmem.h> |
45 | EXPORT_SYMBOL(min_low_pfn); /* defined by bootmem.c, but not exported by generic code */ | ||
45 | EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic code */ | 46 | EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic code */ |
46 | #endif | 47 | #endif |
47 | 48 | ||
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 205d98028261..d33244c32759 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c | |||
@@ -57,9 +57,9 @@ int show_interrupts(struct seq_file *p, void *v) | |||
57 | 57 | ||
58 | if (i == 0) { | 58 | if (i == 0) { |
59 | seq_printf(p, " "); | 59 | seq_printf(p, " "); |
60 | for (j=0; j<NR_CPUS; j++) | 60 | for_each_online_cpu(j) { |
61 | if (cpu_online(j)) | 61 | seq_printf(p, "CPU%d ",j); |
62 | seq_printf(p, "CPU%d ",j); | 62 | } |
63 | seq_putc(p, '\n'); | 63 | seq_putc(p, '\n'); |
64 | } | 64 | } |
65 | 65 | ||
@@ -72,9 +72,9 @@ int show_interrupts(struct seq_file *p, void *v) | |||
72 | #ifndef CONFIG_SMP | 72 | #ifndef CONFIG_SMP |
73 | seq_printf(p, "%10u ", kstat_irqs(i)); | 73 | seq_printf(p, "%10u ", kstat_irqs(i)); |
74 | #else | 74 | #else |
75 | for (j = 0; j < NR_CPUS; j++) | 75 | for_each_online_cpu(j) { |
76 | if (cpu_online(j)) | 76 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); |
77 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | 77 | } |
78 | #endif | 78 | #endif |
79 | seq_printf(p, " %14s", irq_desc[i].handler->typename); | 79 | seq_printf(p, " %14s", irq_desc[i].handler->typename); |
80 | seq_printf(p, " %s", action->name); | 80 | seq_printf(p, " %s", action->name); |
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index c13ca0d49c4a..301f2e9d262e 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S | |||
@@ -91,16 +91,17 @@ ENTRY(vhpt_miss) | |||
91 | * (the "original") TLB miss, which may either be caused by an instruction | 91 | * (the "original") TLB miss, which may either be caused by an instruction |
92 | * fetch or a data access (or non-access). | 92 | * fetch or a data access (or non-access). |
93 | * | 93 | * |
94 | * What we do here is normal TLB miss handing for the _original_ miss, followed | 94 | * What we do here is normal TLB miss handing for the _original_ miss, |
95 | * by inserting the TLB entry for the virtual page table page that the VHPT | 95 | * followed by inserting the TLB entry for the virtual page table page |
96 | * walker was attempting to access. The latter gets inserted as long | 96 | * that the VHPT walker was attempting to access. The latter gets |
97 | * as both L1 and L2 have valid mappings for the faulting address. | 97 | * inserted as long as page table entry above pte level have valid |
98 | * The TLB entry for the original miss gets inserted only if | 98 | * mappings for the faulting address. The TLB entry for the original |
99 | * the L3 entry indicates that the page is present. | 99 | * miss gets inserted only if the pte entry indicates that the page is |
100 | * present. | ||
100 | * | 101 | * |
101 | * do_page_fault gets invoked in the following cases: | 102 | * do_page_fault gets invoked in the following cases: |
102 | * - the faulting virtual address uses unimplemented address bits | 103 | * - the faulting virtual address uses unimplemented address bits |
103 | * - the faulting virtual address has no L1, L2, or L3 mapping | 104 | * - the faulting virtual address has no valid page table mapping |
104 | */ | 105 | */ |
105 | mov r16=cr.ifa // get address that caused the TLB miss | 106 | mov r16=cr.ifa // get address that caused the TLB miss |
106 | #ifdef CONFIG_HUGETLB_PAGE | 107 | #ifdef CONFIG_HUGETLB_PAGE |
@@ -114,7 +115,7 @@ ENTRY(vhpt_miss) | |||
114 | shl r21=r16,3 // shift bit 60 into sign bit | 115 | shl r21=r16,3 // shift bit 60 into sign bit |
115 | shr.u r17=r16,61 // get the region number into r17 | 116 | shr.u r17=r16,61 // get the region number into r17 |
116 | ;; | 117 | ;; |
117 | shr r22=r21,3 | 118 | shr.u r22=r21,3 |
118 | #ifdef CONFIG_HUGETLB_PAGE | 119 | #ifdef CONFIG_HUGETLB_PAGE |
119 | extr.u r26=r25,2,6 | 120 | extr.u r26=r25,2,6 |
120 | ;; | 121 | ;; |
@@ -126,7 +127,7 @@ ENTRY(vhpt_miss) | |||
126 | #endif | 127 | #endif |
127 | ;; | 128 | ;; |
128 | cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5? | 129 | cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5? |
129 | shr.u r18=r22,PGDIR_SHIFT // get bits 33-63 of the faulting address | 130 | shr.u r18=r22,PGDIR_SHIFT // get bottom portion of pgd index bit |
130 | ;; | 131 | ;; |
131 | (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place | 132 | (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place |
132 | 133 | ||
@@ -137,24 +138,38 @@ ENTRY(vhpt_miss) | |||
137 | (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT | 138 | (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT |
138 | (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3 | 139 | (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3 |
139 | ;; | 140 | ;; |
140 | (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8 | 141 | (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5 |
141 | (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8) | 142 | (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4] |
142 | cmp.eq p7,p6=0,r21 // unused address bits all zeroes? | 143 | cmp.eq p7,p6=0,r21 // unused address bits all zeroes? |
143 | shr.u r18=r22,PMD_SHIFT // shift L2 index into position | 144 | #ifdef CONFIG_PGTABLE_4 |
145 | shr.u r28=r22,PUD_SHIFT // shift pud index into position | ||
146 | #else | ||
147 | shr.u r18=r22,PMD_SHIFT // shift pmd index into position | ||
148 | #endif | ||
149 | ;; | ||
150 | ld8 r17=[r17] // get *pgd (may be 0) | ||
151 | ;; | ||
152 | (p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL? | ||
153 | #ifdef CONFIG_PGTABLE_4 | ||
154 | dep r28=r28,r17,3,(PAGE_SHIFT-3) // r28=pud_offset(pgd,addr) | ||
144 | ;; | 155 | ;; |
145 | ld8 r17=[r17] // fetch the L1 entry (may be 0) | 156 | shr.u r18=r22,PMD_SHIFT // shift pmd index into position |
157 | (p7) ld8 r29=[r28] // get *pud (may be 0) | ||
146 | ;; | 158 | ;; |
147 | (p7) cmp.eq p6,p7=r17,r0 // was L1 entry NULL? | 159 | (p7) cmp.eq.or.andcm p6,p7=r29,r0 // was pud_present(*pud) == NULL? |
148 | dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry | 160 | dep r17=r18,r29,3,(PAGE_SHIFT-3) // r17=pmd_offset(pud,addr) |
161 | #else | ||
162 | dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=pmd_offset(pgd,addr) | ||
163 | #endif | ||
149 | ;; | 164 | ;; |
150 | (p7) ld8 r20=[r17] // fetch the L2 entry (may be 0) | 165 | (p7) ld8 r20=[r17] // get *pmd (may be 0) |
151 | shr.u r19=r22,PAGE_SHIFT // shift L3 index into position | 166 | shr.u r19=r22,PAGE_SHIFT // shift pte index into position |
152 | ;; | 167 | ;; |
153 | (p7) cmp.eq.or.andcm p6,p7=r20,r0 // was L2 entry NULL? | 168 | (p7) cmp.eq.or.andcm p6,p7=r20,r0 // was pmd_present(*pmd) == NULL? |
154 | dep r21=r19,r20,3,(PAGE_SHIFT-3) // compute address of L3 page table entry | 169 | dep r21=r19,r20,3,(PAGE_SHIFT-3) // r21=pte_offset(pmd,addr) |
155 | ;; | 170 | ;; |
156 | (p7) ld8 r18=[r21] // read the L3 PTE | 171 | (p7) ld8 r18=[r21] // read *pte |
157 | mov r19=cr.isr // cr.isr bit 0 tells us if this is an insn miss | 172 | mov r19=cr.isr // cr.isr bit 32 tells us if this is an insn miss |
158 | ;; | 173 | ;; |
159 | (p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared? | 174 | (p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared? |
160 | mov r22=cr.iha // get the VHPT address that caused the TLB miss | 175 | mov r22=cr.iha // get the VHPT address that caused the TLB miss |
@@ -188,18 +203,33 @@ ENTRY(vhpt_miss) | |||
188 | dv_serialize_data | 203 | dv_serialize_data |
189 | 204 | ||
190 | /* | 205 | /* |
191 | * Re-check L2 and L3 pagetable. If they changed, we may have received a ptc.g | 206 | * Re-check pagetable entry. If they changed, we may have received a ptc.g |
192 | * between reading the pagetable and the "itc". If so, flush the entry we | 207 | * between reading the pagetable and the "itc". If so, flush the entry we |
193 | * inserted and retry. | 208 | * inserted and retry. At this point, we have: |
209 | * | ||
210 | * r28 = equivalent of pud_offset(pgd, ifa) | ||
211 | * r17 = equivalent of pmd_offset(pud, ifa) | ||
212 | * r21 = equivalent of pte_offset(pmd, ifa) | ||
213 | * | ||
214 | * r29 = *pud | ||
215 | * r20 = *pmd | ||
216 | * r18 = *pte | ||
194 | */ | 217 | */ |
195 | ld8 r25=[r21] // read L3 PTE again | 218 | ld8 r25=[r21] // read *pte again |
196 | ld8 r26=[r17] // read L2 entry again | 219 | ld8 r26=[r17] // read *pmd again |
220 | #ifdef CONFIG_PGTABLE_4 | ||
221 | ld8 r19=[r28] // read *pud again | ||
222 | #endif | ||
223 | cmp.ne p6,p7=r0,r0 | ||
197 | ;; | 224 | ;; |
198 | cmp.ne p6,p7=r26,r20 // did L2 entry change | 225 | cmp.ne.or.andcm p6,p7=r26,r20 // did *pmd change |
226 | #ifdef CONFIG_PGTABLE_4 | ||
227 | cmp.ne.or.andcm p6,p7=r19,r29 // did *pud change | ||
228 | #endif | ||
199 | mov r27=PAGE_SHIFT<<2 | 229 | mov r27=PAGE_SHIFT<<2 |
200 | ;; | 230 | ;; |
201 | (p6) ptc.l r22,r27 // purge PTE page translation | 231 | (p6) ptc.l r22,r27 // purge PTE page translation |
202 | (p7) cmp.ne.or.andcm p6,p7=r25,r18 // did L3 PTE change | 232 | (p7) cmp.ne.or.andcm p6,p7=r25,r18 // did *pte change |
203 | ;; | 233 | ;; |
204 | (p6) ptc.l r16,r27 // purge translation | 234 | (p6) ptc.l r16,r27 // purge translation |
205 | #endif | 235 | #endif |
@@ -214,19 +244,19 @@ END(vhpt_miss) | |||
214 | ENTRY(itlb_miss) | 244 | ENTRY(itlb_miss) |
215 | DBG_FAULT(1) | 245 | DBG_FAULT(1) |
216 | /* | 246 | /* |
217 | * The ITLB handler accesses the L3 PTE via the virtually mapped linear | 247 | * The ITLB handler accesses the PTE via the virtually mapped linear |
218 | * page table. If a nested TLB miss occurs, we switch into physical | 248 | * page table. If a nested TLB miss occurs, we switch into physical |
219 | * mode, walk the page table, and then re-execute the L3 PTE read | 249 | * mode, walk the page table, and then re-execute the PTE read and |
220 | * and go on normally after that. | 250 | * go on normally after that. |
221 | */ | 251 | */ |
222 | mov r16=cr.ifa // get virtual address | 252 | mov r16=cr.ifa // get virtual address |
223 | mov r29=b0 // save b0 | 253 | mov r29=b0 // save b0 |
224 | mov r31=pr // save predicates | 254 | mov r31=pr // save predicates |
225 | .itlb_fault: | 255 | .itlb_fault: |
226 | mov r17=cr.iha // get virtual address of L3 PTE | 256 | mov r17=cr.iha // get virtual address of PTE |
227 | movl r30=1f // load nested fault continuation point | 257 | movl r30=1f // load nested fault continuation point |
228 | ;; | 258 | ;; |
229 | 1: ld8 r18=[r17] // read L3 PTE | 259 | 1: ld8 r18=[r17] // read *pte |
230 | ;; | 260 | ;; |
231 | mov b0=r29 | 261 | mov b0=r29 |
232 | tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? | 262 | tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? |
@@ -241,7 +271,7 @@ ENTRY(itlb_miss) | |||
241 | */ | 271 | */ |
242 | dv_serialize_data | 272 | dv_serialize_data |
243 | 273 | ||
244 | ld8 r19=[r17] // read L3 PTE again and see if same | 274 | ld8 r19=[r17] // read *pte again and see if same |
245 | mov r20=PAGE_SHIFT<<2 // setup page size for purge | 275 | mov r20=PAGE_SHIFT<<2 // setup page size for purge |
246 | ;; | 276 | ;; |
247 | cmp.ne p7,p0=r18,r19 | 277 | cmp.ne p7,p0=r18,r19 |
@@ -258,19 +288,19 @@ END(itlb_miss) | |||
258 | ENTRY(dtlb_miss) | 288 | ENTRY(dtlb_miss) |
259 | DBG_FAULT(2) | 289 | DBG_FAULT(2) |
260 | /* | 290 | /* |
261 | * The DTLB handler accesses the L3 PTE via the virtually mapped linear | 291 | * The DTLB handler accesses the PTE via the virtually mapped linear |
262 | * page table. If a nested TLB miss occurs, we switch into physical | 292 | * page table. If a nested TLB miss occurs, we switch into physical |
263 | * mode, walk the page table, and then re-execute the L3 PTE read | 293 | * mode, walk the page table, and then re-execute the PTE read and |
264 | * and go on normally after that. | 294 | * go on normally after that. |
265 | */ | 295 | */ |
266 | mov r16=cr.ifa // get virtual address | 296 | mov r16=cr.ifa // get virtual address |
267 | mov r29=b0 // save b0 | 297 | mov r29=b0 // save b0 |
268 | mov r31=pr // save predicates | 298 | mov r31=pr // save predicates |
269 | dtlb_fault: | 299 | dtlb_fault: |
270 | mov r17=cr.iha // get virtual address of L3 PTE | 300 | mov r17=cr.iha // get virtual address of PTE |
271 | movl r30=1f // load nested fault continuation point | 301 | movl r30=1f // load nested fault continuation point |
272 | ;; | 302 | ;; |
273 | 1: ld8 r18=[r17] // read L3 PTE | 303 | 1: ld8 r18=[r17] // read *pte |
274 | ;; | 304 | ;; |
275 | mov b0=r29 | 305 | mov b0=r29 |
276 | tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? | 306 | tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? |
@@ -285,7 +315,7 @@ dtlb_fault: | |||
285 | */ | 315 | */ |
286 | dv_serialize_data | 316 | dv_serialize_data |
287 | 317 | ||
288 | ld8 r19=[r17] // read L3 PTE again and see if same | 318 | ld8 r19=[r17] // read *pte again and see if same |
289 | mov r20=PAGE_SHIFT<<2 // setup page size for purge | 319 | mov r20=PAGE_SHIFT<<2 // setup page size for purge |
290 | ;; | 320 | ;; |
291 | cmp.ne p7,p0=r18,r19 | 321 | cmp.ne p7,p0=r18,r19 |
@@ -399,7 +429,7 @@ ENTRY(nested_dtlb_miss) | |||
399 | * r30: continuation address | 429 | * r30: continuation address |
400 | * r31: saved pr | 430 | * r31: saved pr |
401 | * | 431 | * |
402 | * Output: r17: physical address of L3 PTE of faulting address | 432 | * Output: r17: physical address of PTE of faulting address |
403 | * r29: saved b0 | 433 | * r29: saved b0 |
404 | * r30: continuation address | 434 | * r30: continuation address |
405 | * r31: saved pr | 435 | * r31: saved pr |
@@ -429,21 +459,33 @@ ENTRY(nested_dtlb_miss) | |||
429 | (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT | 459 | (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT |
430 | (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3 | 460 | (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3 |
431 | ;; | 461 | ;; |
432 | (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8 | 462 | (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5 |
433 | (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8) | 463 | (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4] |
434 | cmp.eq p7,p6=0,r21 // unused address bits all zeroes? | 464 | cmp.eq p7,p6=0,r21 // unused address bits all zeroes? |
435 | shr.u r18=r22,PMD_SHIFT // shift L2 index into position | 465 | #ifdef CONFIG_PGTABLE_4 |
466 | shr.u r18=r22,PUD_SHIFT // shift pud index into position | ||
467 | #else | ||
468 | shr.u r18=r22,PMD_SHIFT // shift pmd index into position | ||
469 | #endif | ||
436 | ;; | 470 | ;; |
437 | ld8 r17=[r17] // fetch the L1 entry (may be 0) | 471 | ld8 r17=[r17] // get *pgd (may be 0) |
438 | ;; | 472 | ;; |
439 | (p7) cmp.eq p6,p7=r17,r0 // was L1 entry NULL? | 473 | (p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL? |
440 | dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry | 474 | dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=p[u|m]d_offset(pgd,addr) |
441 | ;; | 475 | ;; |
442 | (p7) ld8 r17=[r17] // fetch the L2 entry (may be 0) | 476 | #ifdef CONFIG_PGTABLE_4 |
443 | shr.u r19=r22,PAGE_SHIFT // shift L3 index into position | 477 | (p7) ld8 r17=[r17] // get *pud (may be 0) |
478 | shr.u r18=r22,PMD_SHIFT // shift pmd index into position | ||
479 | ;; | ||
480 | (p7) cmp.eq.or.andcm p6,p7=r17,r0 // was pud_present(*pud) == NULL? | ||
481 | dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=pmd_offset(pud,addr) | ||
482 | ;; | ||
483 | #endif | ||
484 | (p7) ld8 r17=[r17] // get *pmd (may be 0) | ||
485 | shr.u r19=r22,PAGE_SHIFT // shift pte index into position | ||
444 | ;; | 486 | ;; |
445 | (p7) cmp.eq.or.andcm p6,p7=r17,r0 // was L2 entry NULL? | 487 | (p7) cmp.eq.or.andcm p6,p7=r17,r0 // was pmd_present(*pmd) == NULL? |
446 | dep r17=r19,r17,3,(PAGE_SHIFT-3) // compute address of L3 page table entry | 488 | dep r17=r19,r17,3,(PAGE_SHIFT-3) // r17=pte_offset(pmd,addr); |
447 | (p6) br.cond.spnt page_fault | 489 | (p6) br.cond.spnt page_fault |
448 | mov b0=r30 | 490 | mov b0=r30 |
449 | br.sptk.many b0 // return to continuation point | 491 | br.sptk.many b0 // return to continuation point |
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 471086b808a4..2895d6e6062f 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/config.h> | 26 | #include <linux/config.h> |
27 | #include <linux/kprobes.h> | 27 | #include <linux/kprobes.h> |
28 | #include <linux/ptrace.h> | 28 | #include <linux/ptrace.h> |
29 | #include <linux/spinlock.h> | ||
30 | #include <linux/string.h> | 29 | #include <linux/string.h> |
31 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
32 | #include <linux/preempt.h> | 31 | #include <linux/preempt.h> |
@@ -38,13 +37,8 @@ | |||
38 | 37 | ||
39 | extern void jprobe_inst_return(void); | 38 | extern void jprobe_inst_return(void); |
40 | 39 | ||
41 | /* kprobe_status settings */ | 40 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; |
42 | #define KPROBE_HIT_ACTIVE 0x00000001 | 41 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); |
43 | #define KPROBE_HIT_SS 0x00000002 | ||
44 | |||
45 | static struct kprobe *current_kprobe, *kprobe_prev; | ||
46 | static unsigned long kprobe_status, kprobe_status_prev; | ||
47 | static struct pt_regs jprobe_saved_regs; | ||
48 | 42 | ||
49 | enum instruction_type {A, I, M, F, B, L, X, u}; | 43 | enum instruction_type {A, I, M, F, B, L, X, u}; |
50 | static enum instruction_type bundle_encoding[32][3] = { | 44 | static enum instruction_type bundle_encoding[32][3] = { |
@@ -313,21 +307,22 @@ static int __kprobes valid_kprobe_addr(int template, int slot, | |||
313 | return 0; | 307 | return 0; |
314 | } | 308 | } |
315 | 309 | ||
316 | static inline void save_previous_kprobe(void) | 310 | static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) |
317 | { | 311 | { |
318 | kprobe_prev = current_kprobe; | 312 | kcb->prev_kprobe.kp = kprobe_running(); |
319 | kprobe_status_prev = kprobe_status; | 313 | kcb->prev_kprobe.status = kcb->kprobe_status; |
320 | } | 314 | } |
321 | 315 | ||
322 | static inline void restore_previous_kprobe(void) | 316 | static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb) |
323 | { | 317 | { |
324 | current_kprobe = kprobe_prev; | 318 | __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; |
325 | kprobe_status = kprobe_status_prev; | 319 | kcb->kprobe_status = kcb->prev_kprobe.status; |
326 | } | 320 | } |
327 | 321 | ||
328 | static inline void set_current_kprobe(struct kprobe *p) | 322 | static inline void set_current_kprobe(struct kprobe *p, |
323 | struct kprobe_ctlblk *kcb) | ||
329 | { | 324 | { |
330 | current_kprobe = p; | 325 | __get_cpu_var(current_kprobe) = p; |
331 | } | 326 | } |
332 | 327 | ||
333 | static void kretprobe_trampoline(void) | 328 | static void kretprobe_trampoline(void) |
@@ -347,11 +342,12 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | |||
347 | struct kretprobe_instance *ri = NULL; | 342 | struct kretprobe_instance *ri = NULL; |
348 | struct hlist_head *head; | 343 | struct hlist_head *head; |
349 | struct hlist_node *node, *tmp; | 344 | struct hlist_node *node, *tmp; |
350 | unsigned long orig_ret_address = 0; | 345 | unsigned long flags, orig_ret_address = 0; |
351 | unsigned long trampoline_address = | 346 | unsigned long trampoline_address = |
352 | ((struct fnptr *)kretprobe_trampoline)->ip; | 347 | ((struct fnptr *)kretprobe_trampoline)->ip; |
353 | 348 | ||
354 | head = kretprobe_inst_table_head(current); | 349 | spin_lock_irqsave(&kretprobe_lock, flags); |
350 | head = kretprobe_inst_table_head(current); | ||
355 | 351 | ||
356 | /* | 352 | /* |
357 | * It is possible to have multiple instances associated with a given | 353 | * It is possible to have multiple instances associated with a given |
@@ -367,9 +363,9 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | |||
367 | * kretprobe_trampoline | 363 | * kretprobe_trampoline |
368 | */ | 364 | */ |
369 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | 365 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { |
370 | if (ri->task != current) | 366 | if (ri->task != current) |
371 | /* another task is sharing our hash bucket */ | 367 | /* another task is sharing our hash bucket */ |
372 | continue; | 368 | continue; |
373 | 369 | ||
374 | if (ri->rp && ri->rp->handler) | 370 | if (ri->rp && ri->rp->handler) |
375 | ri->rp->handler(ri, regs); | 371 | ri->rp->handler(ri, regs); |
@@ -389,17 +385,19 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | |||
389 | BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); | 385 | BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); |
390 | regs->cr_iip = orig_ret_address; | 386 | regs->cr_iip = orig_ret_address; |
391 | 387 | ||
392 | unlock_kprobes(); | 388 | reset_current_kprobe(); |
389 | spin_unlock_irqrestore(&kretprobe_lock, flags); | ||
393 | preempt_enable_no_resched(); | 390 | preempt_enable_no_resched(); |
394 | 391 | ||
395 | /* | 392 | /* |
396 | * By returning a non-zero value, we are telling | 393 | * By returning a non-zero value, we are telling |
397 | * kprobe_handler() that we have handled unlocking | 394 | * kprobe_handler() that we don't want the post_handler |
398 | * and re-enabling preemption. | 395 | * to run (and have re-enabled preemption) |
399 | */ | 396 | */ |
400 | return 1; | 397 | return 1; |
401 | } | 398 | } |
402 | 399 | ||
400 | /* Called with kretprobe_lock held */ | ||
403 | void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, | 401 | void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, |
404 | struct pt_regs *regs) | 402 | struct pt_regs *regs) |
405 | { | 403 | { |
@@ -606,17 +604,22 @@ static int __kprobes pre_kprobes_handler(struct die_args *args) | |||
606 | int ret = 0; | 604 | int ret = 0; |
607 | struct pt_regs *regs = args->regs; | 605 | struct pt_regs *regs = args->regs; |
608 | kprobe_opcode_t *addr = (kprobe_opcode_t *)instruction_pointer(regs); | 606 | kprobe_opcode_t *addr = (kprobe_opcode_t *)instruction_pointer(regs); |
607 | struct kprobe_ctlblk *kcb; | ||
609 | 608 | ||
609 | /* | ||
610 | * We don't want to be preempted for the entire | ||
611 | * duration of kprobe processing | ||
612 | */ | ||
610 | preempt_disable(); | 613 | preempt_disable(); |
614 | kcb = get_kprobe_ctlblk(); | ||
611 | 615 | ||
612 | /* Handle recursion cases */ | 616 | /* Handle recursion cases */ |
613 | if (kprobe_running()) { | 617 | if (kprobe_running()) { |
614 | p = get_kprobe(addr); | 618 | p = get_kprobe(addr); |
615 | if (p) { | 619 | if (p) { |
616 | if ( (kprobe_status == KPROBE_HIT_SS) && | 620 | if ((kcb->kprobe_status == KPROBE_HIT_SS) && |
617 | (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) { | 621 | (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) { |
618 | ia64_psr(regs)->ss = 0; | 622 | ia64_psr(regs)->ss = 0; |
619 | unlock_kprobes(); | ||
620 | goto no_kprobe; | 623 | goto no_kprobe; |
621 | } | 624 | } |
622 | /* We have reentered the pre_kprobe_handler(), since | 625 | /* We have reentered the pre_kprobe_handler(), since |
@@ -625,17 +628,17 @@ static int __kprobes pre_kprobes_handler(struct die_args *args) | |||
625 | * just single step on the instruction of the new probe | 628 | * just single step on the instruction of the new probe |
626 | * without calling any user handlers. | 629 | * without calling any user handlers. |
627 | */ | 630 | */ |
628 | save_previous_kprobe(); | 631 | save_previous_kprobe(kcb); |
629 | set_current_kprobe(p); | 632 | set_current_kprobe(p, kcb); |
630 | p->nmissed++; | 633 | p->nmissed++; |
631 | prepare_ss(p, regs); | 634 | prepare_ss(p, regs); |
632 | kprobe_status = KPROBE_REENTER; | 635 | kcb->kprobe_status = KPROBE_REENTER; |
633 | return 1; | 636 | return 1; |
634 | } else if (args->err == __IA64_BREAK_JPROBE) { | 637 | } else if (args->err == __IA64_BREAK_JPROBE) { |
635 | /* | 638 | /* |
636 | * jprobe instrumented function just completed | 639 | * jprobe instrumented function just completed |
637 | */ | 640 | */ |
638 | p = current_kprobe; | 641 | p = __get_cpu_var(current_kprobe); |
639 | if (p->break_handler && p->break_handler(p, regs)) { | 642 | if (p->break_handler && p->break_handler(p, regs)) { |
640 | goto ss_probe; | 643 | goto ss_probe; |
641 | } | 644 | } |
@@ -645,10 +648,8 @@ static int __kprobes pre_kprobes_handler(struct die_args *args) | |||
645 | } | 648 | } |
646 | } | 649 | } |
647 | 650 | ||
648 | lock_kprobes(); | ||
649 | p = get_kprobe(addr); | 651 | p = get_kprobe(addr); |
650 | if (!p) { | 652 | if (!p) { |
651 | unlock_kprobes(); | ||
652 | if (!is_ia64_break_inst(regs)) { | 653 | if (!is_ia64_break_inst(regs)) { |
653 | /* | 654 | /* |
654 | * The breakpoint instruction was removed right | 655 | * The breakpoint instruction was removed right |
@@ -665,8 +666,8 @@ static int __kprobes pre_kprobes_handler(struct die_args *args) | |||
665 | goto no_kprobe; | 666 | goto no_kprobe; |
666 | } | 667 | } |
667 | 668 | ||
668 | kprobe_status = KPROBE_HIT_ACTIVE; | 669 | set_current_kprobe(p, kcb); |
669 | set_current_kprobe(p); | 670 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; |
670 | 671 | ||
671 | if (p->pre_handler && p->pre_handler(p, regs)) | 672 | if (p->pre_handler && p->pre_handler(p, regs)) |
672 | /* | 673 | /* |
@@ -678,7 +679,7 @@ static int __kprobes pre_kprobes_handler(struct die_args *args) | |||
678 | 679 | ||
679 | ss_probe: | 680 | ss_probe: |
680 | prepare_ss(p, regs); | 681 | prepare_ss(p, regs); |
681 | kprobe_status = KPROBE_HIT_SS; | 682 | kcb->kprobe_status = KPROBE_HIT_SS; |
682 | return 1; | 683 | return 1; |
683 | 684 | ||
684 | no_kprobe: | 685 | no_kprobe: |
@@ -688,23 +689,25 @@ no_kprobe: | |||
688 | 689 | ||
689 | static int __kprobes post_kprobes_handler(struct pt_regs *regs) | 690 | static int __kprobes post_kprobes_handler(struct pt_regs *regs) |
690 | { | 691 | { |
691 | if (!kprobe_running()) | 692 | struct kprobe *cur = kprobe_running(); |
693 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
694 | |||
695 | if (!cur) | ||
692 | return 0; | 696 | return 0; |
693 | 697 | ||
694 | if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) { | 698 | if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { |
695 | kprobe_status = KPROBE_HIT_SSDONE; | 699 | kcb->kprobe_status = KPROBE_HIT_SSDONE; |
696 | current_kprobe->post_handler(current_kprobe, regs, 0); | 700 | cur->post_handler(cur, regs, 0); |
697 | } | 701 | } |
698 | 702 | ||
699 | resume_execution(current_kprobe, regs); | 703 | resume_execution(cur, regs); |
700 | 704 | ||
701 | /*Restore back the original saved kprobes variables and continue. */ | 705 | /*Restore back the original saved kprobes variables and continue. */ |
702 | if (kprobe_status == KPROBE_REENTER) { | 706 | if (kcb->kprobe_status == KPROBE_REENTER) { |
703 | restore_previous_kprobe(); | 707 | restore_previous_kprobe(kcb); |
704 | goto out; | 708 | goto out; |
705 | } | 709 | } |
706 | 710 | reset_current_kprobe(); | |
707 | unlock_kprobes(); | ||
708 | 711 | ||
709 | out: | 712 | out: |
710 | preempt_enable_no_resched(); | 713 | preempt_enable_no_resched(); |
@@ -713,16 +716,15 @@ out: | |||
713 | 716 | ||
714 | static int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr) | 717 | static int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr) |
715 | { | 718 | { |
716 | if (!kprobe_running()) | 719 | struct kprobe *cur = kprobe_running(); |
717 | return 0; | 720 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
718 | 721 | ||
719 | if (current_kprobe->fault_handler && | 722 | if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) |
720 | current_kprobe->fault_handler(current_kprobe, regs, trapnr)) | ||
721 | return 1; | 723 | return 1; |
722 | 724 | ||
723 | if (kprobe_status & KPROBE_HIT_SS) { | 725 | if (kcb->kprobe_status & KPROBE_HIT_SS) { |
724 | resume_execution(current_kprobe, regs); | 726 | resume_execution(cur, regs); |
725 | unlock_kprobes(); | 727 | reset_current_kprobe(); |
726 | preempt_enable_no_resched(); | 728 | preempt_enable_no_resched(); |
727 | } | 729 | } |
728 | 730 | ||
@@ -733,31 +735,42 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | |||
733 | unsigned long val, void *data) | 735 | unsigned long val, void *data) |
734 | { | 736 | { |
735 | struct die_args *args = (struct die_args *)data; | 737 | struct die_args *args = (struct die_args *)data; |
738 | int ret = NOTIFY_DONE; | ||
739 | |||
736 | switch(val) { | 740 | switch(val) { |
737 | case DIE_BREAK: | 741 | case DIE_BREAK: |
738 | if (pre_kprobes_handler(args)) | 742 | /* err is break number from ia64_bad_break() */ |
739 | return NOTIFY_STOP; | 743 | if (args->err == 0x80200 || args->err == 0x80300 || args->err == 0) |
744 | if (pre_kprobes_handler(args)) | ||
745 | ret = NOTIFY_STOP; | ||
740 | break; | 746 | break; |
741 | case DIE_SS: | 747 | case DIE_FAULT: |
742 | if (post_kprobes_handler(args->regs)) | 748 | /* err is vector number from ia64_fault() */ |
743 | return NOTIFY_STOP; | 749 | if (args->err == 36) |
750 | if (post_kprobes_handler(args->regs)) | ||
751 | ret = NOTIFY_STOP; | ||
744 | break; | 752 | break; |
745 | case DIE_PAGE_FAULT: | 753 | case DIE_PAGE_FAULT: |
746 | if (kprobes_fault_handler(args->regs, args->trapnr)) | 754 | /* kprobe_running() needs smp_processor_id() */ |
747 | return NOTIFY_STOP; | 755 | preempt_disable(); |
756 | if (kprobe_running() && | ||
757 | kprobes_fault_handler(args->regs, args->trapnr)) | ||
758 | ret = NOTIFY_STOP; | ||
759 | preempt_enable(); | ||
748 | default: | 760 | default: |
749 | break; | 761 | break; |
750 | } | 762 | } |
751 | return NOTIFY_DONE; | 763 | return ret; |
752 | } | 764 | } |
753 | 765 | ||
754 | int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | 766 | int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) |
755 | { | 767 | { |
756 | struct jprobe *jp = container_of(p, struct jprobe, kp); | 768 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
757 | unsigned long addr = ((struct fnptr *)(jp->entry))->ip; | 769 | unsigned long addr = ((struct fnptr *)(jp->entry))->ip; |
770 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
758 | 771 | ||
759 | /* save architectural state */ | 772 | /* save architectural state */ |
760 | jprobe_saved_regs = *regs; | 773 | kcb->jprobe_saved_regs = *regs; |
761 | 774 | ||
762 | /* after rfi, execute the jprobe instrumented function */ | 775 | /* after rfi, execute the jprobe instrumented function */ |
763 | regs->cr_iip = addr & ~0xFULL; | 776 | regs->cr_iip = addr & ~0xFULL; |
@@ -775,7 +788,10 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
775 | 788 | ||
776 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | 789 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) |
777 | { | 790 | { |
778 | *regs = jprobe_saved_regs; | 791 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
792 | |||
793 | *regs = kcb->jprobe_saved_regs; | ||
794 | preempt_enable_no_resched(); | ||
779 | return 1; | 795 | return 1; |
780 | } | 796 | } |
781 | 797 | ||
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 6dc726ad7137..355af15287c7 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -51,6 +51,9 @@ | |||
51 | * | 51 | * |
52 | * 2005-08-12 Keith Owens <kaos@sgi.com> | 52 | * 2005-08-12 Keith Owens <kaos@sgi.com> |
53 | * Convert MCA/INIT handlers to use per event stacks and SAL/OS state. | 53 | * Convert MCA/INIT handlers to use per event stacks and SAL/OS state. |
54 | * | ||
55 | * 2005-10-07 Keith Owens <kaos@sgi.com> | ||
56 | * Add notify_die() hooks. | ||
54 | */ | 57 | */ |
55 | #include <linux/config.h> | 58 | #include <linux/config.h> |
56 | #include <linux/types.h> | 59 | #include <linux/types.h> |
@@ -58,7 +61,6 @@ | |||
58 | #include <linux/sched.h> | 61 | #include <linux/sched.h> |
59 | #include <linux/interrupt.h> | 62 | #include <linux/interrupt.h> |
60 | #include <linux/irq.h> | 63 | #include <linux/irq.h> |
61 | #include <linux/kallsyms.h> | ||
62 | #include <linux/smp_lock.h> | 64 | #include <linux/smp_lock.h> |
63 | #include <linux/bootmem.h> | 65 | #include <linux/bootmem.h> |
64 | #include <linux/acpi.h> | 66 | #include <linux/acpi.h> |
@@ -69,6 +71,7 @@ | |||
69 | #include <linux/workqueue.h> | 71 | #include <linux/workqueue.h> |
70 | 72 | ||
71 | #include <asm/delay.h> | 73 | #include <asm/delay.h> |
74 | #include <asm/kdebug.h> | ||
72 | #include <asm/machvec.h> | 75 | #include <asm/machvec.h> |
73 | #include <asm/meminit.h> | 76 | #include <asm/meminit.h> |
74 | #include <asm/page.h> | 77 | #include <asm/page.h> |
@@ -132,6 +135,14 @@ extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe); | |||
132 | 135 | ||
133 | static int mca_init; | 136 | static int mca_init; |
134 | 137 | ||
138 | |||
139 | static void inline | ||
140 | ia64_mca_spin(const char *func) | ||
141 | { | ||
142 | printk(KERN_EMERG "%s: spinning here, not returning to SAL\n", func); | ||
143 | while (1) | ||
144 | cpu_relax(); | ||
145 | } | ||
135 | /* | 146 | /* |
136 | * IA64_MCA log support | 147 | * IA64_MCA log support |
137 | */ | 148 | */ |
@@ -508,9 +519,7 @@ ia64_mca_wakeup_all(void) | |||
508 | int cpu; | 519 | int cpu; |
509 | 520 | ||
510 | /* Clear the Rendez checkin flag for all cpus */ | 521 | /* Clear the Rendez checkin flag for all cpus */ |
511 | for(cpu = 0; cpu < NR_CPUS; cpu++) { | 522 | for_each_online_cpu(cpu) { |
512 | if (!cpu_online(cpu)) | ||
513 | continue; | ||
514 | if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE) | 523 | if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE) |
515 | ia64_mca_wakeup(cpu); | 524 | ia64_mca_wakeup(cpu); |
516 | } | 525 | } |
@@ -528,13 +537,16 @@ ia64_mca_wakeup_all(void) | |||
528 | * Outputs : None | 537 | * Outputs : None |
529 | */ | 538 | */ |
530 | static irqreturn_t | 539 | static irqreturn_t |
531 | ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs) | 540 | ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *regs) |
532 | { | 541 | { |
533 | unsigned long flags; | 542 | unsigned long flags; |
534 | int cpu = smp_processor_id(); | 543 | int cpu = smp_processor_id(); |
535 | 544 | ||
536 | /* Mask all interrupts */ | 545 | /* Mask all interrupts */ |
537 | local_irq_save(flags); | 546 | local_irq_save(flags); |
547 | if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", regs, 0, 0, 0) | ||
548 | == NOTIFY_STOP) | ||
549 | ia64_mca_spin(__FUNCTION__); | ||
538 | 550 | ||
539 | ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE; | 551 | ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE; |
540 | /* Register with the SAL monarch that the slave has | 552 | /* Register with the SAL monarch that the slave has |
@@ -542,10 +554,18 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs) | |||
542 | */ | 554 | */ |
543 | ia64_sal_mc_rendez(); | 555 | ia64_sal_mc_rendez(); |
544 | 556 | ||
557 | if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", regs, 0, 0, 0) | ||
558 | == NOTIFY_STOP) | ||
559 | ia64_mca_spin(__FUNCTION__); | ||
560 | |||
545 | /* Wait for the monarch cpu to exit. */ | 561 | /* Wait for the monarch cpu to exit. */ |
546 | while (monarch_cpu != -1) | 562 | while (monarch_cpu != -1) |
547 | cpu_relax(); /* spin until monarch leaves */ | 563 | cpu_relax(); /* spin until monarch leaves */ |
548 | 564 | ||
565 | if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", regs, 0, 0, 0) | ||
566 | == NOTIFY_STOP) | ||
567 | ia64_mca_spin(__FUNCTION__); | ||
568 | |||
549 | /* Enable all interrupts */ | 569 | /* Enable all interrupts */ |
550 | local_irq_restore(flags); | 570 | local_irq_restore(flags); |
551 | return IRQ_HANDLED; | 571 | return IRQ_HANDLED; |
@@ -935,6 +955,9 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
935 | oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */ | 955 | oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */ |
936 | previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA"); | 956 | previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA"); |
937 | monarch_cpu = cpu; | 957 | monarch_cpu = cpu; |
958 | if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, 0, 0, 0) | ||
959 | == NOTIFY_STOP) | ||
960 | ia64_mca_spin(__FUNCTION__); | ||
938 | ia64_wait_for_slaves(cpu); | 961 | ia64_wait_for_slaves(cpu); |
939 | 962 | ||
940 | /* Wakeup all the processors which are spinning in the rendezvous loop. | 963 | /* Wakeup all the processors which are spinning in the rendezvous loop. |
@@ -944,6 +967,9 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
944 | * spinning in SAL does not work. | 967 | * spinning in SAL does not work. |
945 | */ | 968 | */ |
946 | ia64_mca_wakeup_all(); | 969 | ia64_mca_wakeup_all(); |
970 | if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, 0, 0, 0) | ||
971 | == NOTIFY_STOP) | ||
972 | ia64_mca_spin(__FUNCTION__); | ||
947 | 973 | ||
948 | /* Get the MCA error record and log it */ | 974 | /* Get the MCA error record and log it */ |
949 | ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA); | 975 | ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA); |
@@ -962,6 +988,9 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
962 | ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA); | 988 | ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA); |
963 | sos->os_status = IA64_MCA_CORRECTED; | 989 | sos->os_status = IA64_MCA_CORRECTED; |
964 | } | 990 | } |
991 | if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, 0, 0, recover) | ||
992 | == NOTIFY_STOP) | ||
993 | ia64_mca_spin(__FUNCTION__); | ||
965 | 994 | ||
966 | set_curr_task(cpu, previous_current); | 995 | set_curr_task(cpu, previous_current); |
967 | monarch_cpu = -1; | 996 | monarch_cpu = -1; |
@@ -1016,6 +1045,11 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs) | |||
1016 | 1045 | ||
1017 | cmc_polling_enabled = 1; | 1046 | cmc_polling_enabled = 1; |
1018 | spin_unlock(&cmc_history_lock); | 1047 | spin_unlock(&cmc_history_lock); |
1048 | /* If we're being hit with CMC interrupts, we won't | ||
1049 | * ever execute the schedule_work() below. Need to | ||
1050 | * disable CMC interrupts on this processor now. | ||
1051 | */ | ||
1052 | ia64_mca_cmc_vector_disable(NULL); | ||
1019 | schedule_work(&cmc_disable_work); | 1053 | schedule_work(&cmc_disable_work); |
1020 | 1054 | ||
1021 | /* | 1055 | /* |
@@ -1185,6 +1219,37 @@ ia64_mca_cpe_poll (unsigned long dummy) | |||
1185 | 1219 | ||
1186 | #endif /* CONFIG_ACPI */ | 1220 | #endif /* CONFIG_ACPI */ |
1187 | 1221 | ||
1222 | static int | ||
1223 | default_monarch_init_process(struct notifier_block *self, unsigned long val, void *data) | ||
1224 | { | ||
1225 | int c; | ||
1226 | struct task_struct *g, *t; | ||
1227 | if (val != DIE_INIT_MONARCH_PROCESS) | ||
1228 | return NOTIFY_DONE; | ||
1229 | printk(KERN_ERR "Processes interrupted by INIT -"); | ||
1230 | for_each_online_cpu(c) { | ||
1231 | struct ia64_sal_os_state *s; | ||
1232 | t = __va(__per_cpu_mca[c] + IA64_MCA_CPU_INIT_STACK_OFFSET); | ||
1233 | s = (struct ia64_sal_os_state *)((char *)t + MCA_SOS_OFFSET); | ||
1234 | g = s->prev_task; | ||
1235 | if (g) { | ||
1236 | if (g->pid) | ||
1237 | printk(" %d", g->pid); | ||
1238 | else | ||
1239 | printk(" %d (cpu %d task 0x%p)", g->pid, task_cpu(g), g); | ||
1240 | } | ||
1241 | } | ||
1242 | printk("\n\n"); | ||
1243 | if (read_trylock(&tasklist_lock)) { | ||
1244 | do_each_thread (g, t) { | ||
1245 | printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm); | ||
1246 | show_stack(t, NULL); | ||
1247 | } while_each_thread (g, t); | ||
1248 | read_unlock(&tasklist_lock); | ||
1249 | } | ||
1250 | return NOTIFY_DONE; | ||
1251 | } | ||
1252 | |||
1188 | /* | 1253 | /* |
1189 | * C portion of the OS INIT handler | 1254 | * C portion of the OS INIT handler |
1190 | * | 1255 | * |
@@ -1209,8 +1274,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1209 | static atomic_t slaves; | 1274 | static atomic_t slaves; |
1210 | static atomic_t monarchs; | 1275 | static atomic_t monarchs; |
1211 | task_t *previous_current; | 1276 | task_t *previous_current; |
1212 | int cpu = smp_processor_id(), c; | 1277 | int cpu = smp_processor_id(); |
1213 | struct task_struct *g, *t; | ||
1214 | 1278 | ||
1215 | oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */ | 1279 | oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */ |
1216 | console_loglevel = 15; /* make sure printks make it to console */ | 1280 | console_loglevel = 15; /* make sure printks make it to console */ |
@@ -1250,8 +1314,17 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1250 | ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT; | 1314 | ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT; |
1251 | while (monarch_cpu == -1) | 1315 | while (monarch_cpu == -1) |
1252 | cpu_relax(); /* spin until monarch enters */ | 1316 | cpu_relax(); /* spin until monarch enters */ |
1317 | if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, 0, 0, 0) | ||
1318 | == NOTIFY_STOP) | ||
1319 | ia64_mca_spin(__FUNCTION__); | ||
1320 | if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, 0, 0, 0) | ||
1321 | == NOTIFY_STOP) | ||
1322 | ia64_mca_spin(__FUNCTION__); | ||
1253 | while (monarch_cpu != -1) | 1323 | while (monarch_cpu != -1) |
1254 | cpu_relax(); /* spin until monarch leaves */ | 1324 | cpu_relax(); /* spin until monarch leaves */ |
1325 | if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, 0, 0, 0) | ||
1326 | == NOTIFY_STOP) | ||
1327 | ia64_mca_spin(__FUNCTION__); | ||
1255 | printk("Slave on cpu %d returning to normal service.\n", cpu); | 1328 | printk("Slave on cpu %d returning to normal service.\n", cpu); |
1256 | set_curr_task(cpu, previous_current); | 1329 | set_curr_task(cpu, previous_current); |
1257 | ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; | 1330 | ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; |
@@ -1260,6 +1333,9 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1260 | } | 1333 | } |
1261 | 1334 | ||
1262 | monarch_cpu = cpu; | 1335 | monarch_cpu = cpu; |
1336 | if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, 0, 0, 0) | ||
1337 | == NOTIFY_STOP) | ||
1338 | ia64_mca_spin(__FUNCTION__); | ||
1263 | 1339 | ||
1264 | /* | 1340 | /* |
1265 | * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be | 1341 | * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be |
@@ -1270,27 +1346,16 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1270 | printk("Delaying for 5 seconds...\n"); | 1346 | printk("Delaying for 5 seconds...\n"); |
1271 | udelay(5*1000000); | 1347 | udelay(5*1000000); |
1272 | ia64_wait_for_slaves(cpu); | 1348 | ia64_wait_for_slaves(cpu); |
1273 | printk(KERN_ERR "Processes interrupted by INIT -"); | 1349 | /* If nobody intercepts DIE_INIT_MONARCH_PROCESS then we drop through |
1274 | for_each_online_cpu(c) { | 1350 | * to default_monarch_init_process() above and just print all the |
1275 | struct ia64_sal_os_state *s; | 1351 | * tasks. |
1276 | t = __va(__per_cpu_mca[c] + IA64_MCA_CPU_INIT_STACK_OFFSET); | 1352 | */ |
1277 | s = (struct ia64_sal_os_state *)((char *)t + MCA_SOS_OFFSET); | 1353 | if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, 0, 0, 0) |
1278 | g = s->prev_task; | 1354 | == NOTIFY_STOP) |
1279 | if (g) { | 1355 | ia64_mca_spin(__FUNCTION__); |
1280 | if (g->pid) | 1356 | if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, 0, 0, 0) |
1281 | printk(" %d", g->pid); | 1357 | == NOTIFY_STOP) |
1282 | else | 1358 | ia64_mca_spin(__FUNCTION__); |
1283 | printk(" %d (cpu %d task 0x%p)", g->pid, task_cpu(g), g); | ||
1284 | } | ||
1285 | } | ||
1286 | printk("\n\n"); | ||
1287 | if (read_trylock(&tasklist_lock)) { | ||
1288 | do_each_thread (g, t) { | ||
1289 | printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm); | ||
1290 | show_stack(t, NULL); | ||
1291 | } while_each_thread (g, t); | ||
1292 | read_unlock(&tasklist_lock); | ||
1293 | } | ||
1294 | printk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); | 1359 | printk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); |
1295 | atomic_dec(&monarchs); | 1360 | atomic_dec(&monarchs); |
1296 | set_curr_task(cpu, previous_current); | 1361 | set_curr_task(cpu, previous_current); |
@@ -1459,6 +1524,10 @@ ia64_mca_init(void) | |||
1459 | s64 rc; | 1524 | s64 rc; |
1460 | struct ia64_sal_retval isrv; | 1525 | struct ia64_sal_retval isrv; |
1461 | u64 timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */ | 1526 | u64 timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */ |
1527 | static struct notifier_block default_init_monarch_nb = { | ||
1528 | .notifier_call = default_monarch_init_process, | ||
1529 | .priority = 0/* we need to notified last */ | ||
1530 | }; | ||
1462 | 1531 | ||
1463 | IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__); | 1532 | IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__); |
1464 | 1533 | ||
@@ -1552,6 +1621,10 @@ ia64_mca_init(void) | |||
1552 | "(status %ld)\n", rc); | 1621 | "(status %ld)\n", rc); |
1553 | return; | 1622 | return; |
1554 | } | 1623 | } |
1624 | if (register_die_notifier(&default_init_monarch_nb)) { | ||
1625 | printk(KERN_ERR "Failed to register default monarch INIT process\n"); | ||
1626 | return; | ||
1627 | } | ||
1555 | 1628 | ||
1556 | IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __FUNCTION__); | 1629 | IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __FUNCTION__); |
1557 | 1630 | ||
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S index 499a065f4e60..db32fc1d3935 100644 --- a/arch/ia64/kernel/mca_asm.S +++ b/arch/ia64/kernel/mca_asm.S | |||
@@ -489,24 +489,27 @@ ia64_state_save: | |||
489 | ;; | 489 | ;; |
490 | st8 [temp1]=r17,16 // pal_min_state | 490 | st8 [temp1]=r17,16 // pal_min_state |
491 | st8 [temp2]=r6,16 // prev_IA64_KR_CURRENT | 491 | st8 [temp2]=r6,16 // prev_IA64_KR_CURRENT |
492 | mov r6=IA64_KR(CURRENT_STACK) | ||
493 | ;; | ||
494 | st8 [temp1]=r6,16 // prev_IA64_KR_CURRENT_STACK | ||
495 | st8 [temp2]=r0,16 // prev_task, starts off as NULL | ||
492 | mov r6=cr.ifa | 496 | mov r6=cr.ifa |
493 | ;; | 497 | ;; |
494 | st8 [temp1]=r0,16 // prev_task, starts off as NULL | 498 | st8 [temp1]=r12,16 // cr.isr |
495 | st8 [temp2]=r12,16 // cr.isr | 499 | st8 [temp2]=r6,16 // cr.ifa |
496 | mov r12=cr.itir | 500 | mov r12=cr.itir |
497 | ;; | 501 | ;; |
498 | st8 [temp1]=r6,16 // cr.ifa | 502 | st8 [temp1]=r12,16 // cr.itir |
499 | st8 [temp2]=r12,16 // cr.itir | 503 | st8 [temp2]=r11,16 // cr.iipa |
500 | mov r12=cr.iim | 504 | mov r12=cr.iim |
501 | ;; | 505 | ;; |
502 | st8 [temp1]=r11,16 // cr.iipa | 506 | st8 [temp1]=r12,16 // cr.iim |
503 | st8 [temp2]=r12,16 // cr.iim | ||
504 | mov r6=cr.iha | ||
505 | (p1) mov r12=IA64_MCA_COLD_BOOT | 507 | (p1) mov r12=IA64_MCA_COLD_BOOT |
506 | (p2) mov r12=IA64_INIT_WARM_BOOT | 508 | (p2) mov r12=IA64_INIT_WARM_BOOT |
509 | mov r6=cr.iha | ||
507 | ;; | 510 | ;; |
508 | st8 [temp1]=r6,16 // cr.iha | 511 | st8 [temp2]=r6,16 // cr.iha |
509 | st8 [temp2]=r12 // os_status, default is cold boot | 512 | st8 [temp1]=r12 // os_status, default is cold boot |
510 | mov r6=IA64_MCA_SAME_CONTEXT | 513 | mov r6=IA64_MCA_SAME_CONTEXT |
511 | ;; | 514 | ;; |
512 | st8 [temp1]=r6 // context, default is same context | 515 | st8 [temp1]=r6 // context, default is same context |
@@ -823,9 +826,12 @@ ia64_state_restore: | |||
823 | ld8 r12=[temp1],16 // sal_ra | 826 | ld8 r12=[temp1],16 // sal_ra |
824 | ld8 r9=[temp2],16 // sal_gp | 827 | ld8 r9=[temp2],16 // sal_gp |
825 | ;; | 828 | ;; |
826 | ld8 r22=[temp1],24 // pal_min_state, virtual. skip prev_task | 829 | ld8 r22=[temp1],16 // pal_min_state, virtual |
827 | ld8 r21=[temp2],16 // prev_IA64_KR_CURRENT | 830 | ld8 r21=[temp2],16 // prev_IA64_KR_CURRENT |
828 | ;; | 831 | ;; |
832 | ld8 r16=[temp1],16 // prev_IA64_KR_CURRENT_STACK | ||
833 | ld8 r20=[temp2],16 // prev_task | ||
834 | ;; | ||
829 | ld8 temp3=[temp1],16 // cr.isr | 835 | ld8 temp3=[temp1],16 // cr.isr |
830 | ld8 temp4=[temp2],16 // cr.ifa | 836 | ld8 temp4=[temp2],16 // cr.ifa |
831 | ;; | 837 | ;; |
@@ -846,6 +852,45 @@ ia64_state_restore: | |||
846 | ld8 r8=[temp1] // os_status | 852 | ld8 r8=[temp1] // os_status |
847 | ld8 r10=[temp2] // context | 853 | ld8 r10=[temp2] // context |
848 | 854 | ||
855 | /* Wire IA64_TR_CURRENT_STACK to the stack that we are resuming to. To | ||
856 | * avoid any dependencies on the algorithm in ia64_switch_to(), just | ||
857 | * purge any existing CURRENT_STACK mapping and insert the new one. | ||
858 | * | ||
859 | * r16 contains prev_IA64_KR_CURRENT_STACK, r21 contains | ||
860 | * prev_IA64_KR_CURRENT, these values may have been changed by the C | ||
861 | * code. Do not use r8, r9, r10, r22, they contain values ready for | ||
862 | * the return to SAL. | ||
863 | */ | ||
864 | |||
865 | mov r15=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK | ||
866 | ;; | ||
867 | shl r15=r15,IA64_GRANULE_SHIFT | ||
868 | ;; | ||
869 | dep r15=-1,r15,61,3 // virtual granule | ||
870 | mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps | ||
871 | ;; | ||
872 | ptr.d r15,r18 | ||
873 | ;; | ||
874 | srlz.d | ||
875 | |||
876 | extr.u r19=r21,61,3 // r21 = prev_IA64_KR_CURRENT | ||
877 | shl r20=r16,IA64_GRANULE_SHIFT // r16 = prev_IA64_KR_CURRENT_STACK | ||
878 | movl r21=PAGE_KERNEL // page properties | ||
879 | ;; | ||
880 | mov IA64_KR(CURRENT_STACK)=r16 | ||
881 | cmp.ne p6,p0=RGN_KERNEL,r19 // new stack is in the kernel region? | ||
882 | or r21=r20,r21 // construct PA | page properties | ||
883 | (p6) br.spnt 1f // the dreaded cpu 0 idle task in region 5:( | ||
884 | ;; | ||
885 | mov cr.itir=r18 | ||
886 | mov cr.ifa=r21 | ||
887 | mov r20=IA64_TR_CURRENT_STACK | ||
888 | ;; | ||
889 | itr.d dtr[r20]=r21 | ||
890 | ;; | ||
891 | srlz.d | ||
892 | 1: | ||
893 | |||
849 | br.sptk b0 | 894 | br.sptk b0 |
850 | 895 | ||
851 | //EndStub////////////////////////////////////////////////////////////////////// | 896 | //EndStub////////////////////////////////////////////////////////////////////// |
@@ -982,6 +1027,7 @@ ia64_set_kernel_registers: | |||
982 | add temp4=temp4, temp1 // &struct ia64_sal_os_state.os_gp | 1027 | add temp4=temp4, temp1 // &struct ia64_sal_os_state.os_gp |
983 | add r12=temp1, temp3 // kernel stack pointer on MCA/INIT stack | 1028 | add r12=temp1, temp3 // kernel stack pointer on MCA/INIT stack |
984 | add r13=temp1, r3 // set current to start of MCA/INIT stack | 1029 | add r13=temp1, r3 // set current to start of MCA/INIT stack |
1030 | add r20=temp1, r3 // physical start of MCA/INIT stack | ||
985 | ;; | 1031 | ;; |
986 | ld8 r1=[temp4] // OS GP from SAL OS state | 1032 | ld8 r1=[temp4] // OS GP from SAL OS state |
987 | ;; | 1033 | ;; |
@@ -991,7 +1037,35 @@ ia64_set_kernel_registers: | |||
991 | ;; | 1037 | ;; |
992 | mov IA64_KR(CURRENT)=r13 | 1038 | mov IA64_KR(CURRENT)=r13 |
993 | 1039 | ||
994 | // FIXME: do I need to wire IA64_KR_CURRENT_STACK and IA64_TR_CURRENT_STACK? | 1040 | /* Wire IA64_TR_CURRENT_STACK to the MCA/INIT handler stack. To avoid |
1041 | * any dependencies on the algorithm in ia64_switch_to(), just purge | ||
1042 | * any existing CURRENT_STACK mapping and insert the new one. | ||
1043 | */ | ||
1044 | |||
1045 | mov r16=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK | ||
1046 | ;; | ||
1047 | shl r16=r16,IA64_GRANULE_SHIFT | ||
1048 | ;; | ||
1049 | dep r16=-1,r16,61,3 // virtual granule | ||
1050 | mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps | ||
1051 | ;; | ||
1052 | ptr.d r16,r18 | ||
1053 | ;; | ||
1054 | srlz.d | ||
1055 | |||
1056 | shr.u r16=r20,IA64_GRANULE_SHIFT // r20 = physical start of MCA/INIT stack | ||
1057 | movl r21=PAGE_KERNEL // page properties | ||
1058 | ;; | ||
1059 | mov IA64_KR(CURRENT_STACK)=r16 | ||
1060 | or r21=r20,r21 // construct PA | page properties | ||
1061 | ;; | ||
1062 | mov cr.itir=r18 | ||
1063 | mov cr.ifa=r13 | ||
1064 | mov r20=IA64_TR_CURRENT_STACK | ||
1065 | ;; | ||
1066 | itr.d dtr[r20]=r21 | ||
1067 | ;; | ||
1068 | srlz.d | ||
995 | 1069 | ||
996 | br.sptk b0 | 1070 | br.sptk b0 |
997 | 1071 | ||
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index 80f83d6cdbfc..3492e3211a44 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c | |||
@@ -56,8 +56,9 @@ static struct page *page_isolate[MAX_PAGE_ISOLATE]; | |||
56 | static int num_page_isolate = 0; | 56 | static int num_page_isolate = 0; |
57 | 57 | ||
58 | typedef enum { | 58 | typedef enum { |
59 | ISOLATE_NG = 0, | 59 | ISOLATE_NG, |
60 | ISOLATE_OK = 1 | 60 | ISOLATE_OK, |
61 | ISOLATE_NONE | ||
61 | } isolate_status_t; | 62 | } isolate_status_t; |
62 | 63 | ||
63 | /* | 64 | /* |
@@ -74,7 +75,7 @@ static struct { | |||
74 | * @paddr: poisoned memory location | 75 | * @paddr: poisoned memory location |
75 | * | 76 | * |
76 | * Return value: | 77 | * Return value: |
77 | * ISOLATE_OK / ISOLATE_NG | 78 | * one of isolate_status_t, ISOLATE_OK/NG/NONE. |
78 | */ | 79 | */ |
79 | 80 | ||
80 | static isolate_status_t | 81 | static isolate_status_t |
@@ -85,7 +86,10 @@ mca_page_isolate(unsigned long paddr) | |||
85 | 86 | ||
86 | /* whether physical address is valid or not */ | 87 | /* whether physical address is valid or not */ |
87 | if (!ia64_phys_addr_valid(paddr)) | 88 | if (!ia64_phys_addr_valid(paddr)) |
88 | return ISOLATE_NG; | 89 | return ISOLATE_NONE; |
90 | |||
91 | if (!pfn_valid(paddr >> PAGE_SHIFT)) | ||
92 | return ISOLATE_NONE; | ||
89 | 93 | ||
90 | /* convert physical address to physical page number */ | 94 | /* convert physical address to physical page number */ |
91 | p = pfn_to_page(paddr>>PAGE_SHIFT); | 95 | p = pfn_to_page(paddr>>PAGE_SHIFT); |
@@ -104,6 +108,7 @@ mca_page_isolate(unsigned long paddr) | |||
104 | return ISOLATE_NG; | 108 | return ISOLATE_NG; |
105 | 109 | ||
106 | /* add attribute 'Reserved' and register the page */ | 110 | /* add attribute 'Reserved' and register the page */ |
111 | get_page(p); | ||
107 | SetPageReserved(p); | 112 | SetPageReserved(p); |
108 | page_isolate[num_page_isolate++] = p; | 113 | page_isolate[num_page_isolate++] = p; |
109 | 114 | ||
@@ -122,10 +127,15 @@ mca_handler_bh(unsigned long paddr) | |||
122 | current->pid, current->comm); | 127 | current->pid, current->comm); |
123 | 128 | ||
124 | spin_lock(&mca_bh_lock); | 129 | spin_lock(&mca_bh_lock); |
125 | if (mca_page_isolate(paddr) == ISOLATE_OK) { | 130 | switch (mca_page_isolate(paddr)) { |
131 | case ISOLATE_OK: | ||
126 | printk(KERN_DEBUG "Page isolation: ( %lx ) success.\n", paddr); | 132 | printk(KERN_DEBUG "Page isolation: ( %lx ) success.\n", paddr); |
127 | } else { | 133 | break; |
134 | case ISOLATE_NG: | ||
128 | printk(KERN_DEBUG "Page isolation: ( %lx ) failure.\n", paddr); | 135 | printk(KERN_DEBUG "Page isolation: ( %lx ) failure.\n", paddr); |
136 | break; | ||
137 | default: | ||
138 | break; | ||
129 | } | 139 | } |
130 | spin_unlock(&mca_bh_lock); | 140 | spin_unlock(&mca_bh_lock); |
131 | 141 | ||
@@ -537,9 +547,20 @@ recover_from_processor_error(int platform, slidx_table_t *slidx, | |||
537 | (pal_processor_state_info_t*)peidx_psp(peidx); | 547 | (pal_processor_state_info_t*)peidx_psp(peidx); |
538 | 548 | ||
539 | /* | 549 | /* |
540 | * We cannot recover errors with other than bus_check. | 550 | * Processor recovery status must key off of the PAL recovery |
551 | * status in the Processor State Parameter. | ||
552 | */ | ||
553 | |||
554 | /* | ||
555 | * The machine check is corrected. | ||
541 | */ | 556 | */ |
542 | if (psp->cc || psp->rc || psp->uc) | 557 | if (psp->cm == 1) |
558 | return 1; | ||
559 | |||
560 | /* | ||
561 | * The error was not contained. Software must be reset. | ||
562 | */ | ||
563 | if (psp->us || psp->ci == 0) | ||
543 | return 0; | 564 | return 0; |
544 | 565 | ||
545 | /* | 566 | /* |
@@ -560,8 +581,6 @@ recover_from_processor_error(int platform, slidx_table_t *slidx, | |||
560 | return 0; | 581 | return 0; |
561 | if (pbci->eb && pbci->bsi > 0) | 582 | if (pbci->eb && pbci->bsi > 0) |
562 | return 0; | 583 | return 0; |
563 | if (psp->ci == 0) | ||
564 | return 0; | ||
565 | 584 | ||
566 | /* | 585 | /* |
567 | * This is a local MCA and estimated as recoverble external bus error. | 586 | * This is a local MCA and estimated as recoverble external bus error. |
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c index f1aca7cffd12..7a2f0a798d12 100644 --- a/arch/ia64/kernel/module.c +++ b/arch/ia64/kernel/module.c | |||
@@ -947,8 +947,8 @@ void | |||
947 | percpu_modcopy (void *pcpudst, const void *src, unsigned long size) | 947 | percpu_modcopy (void *pcpudst, const void *src, unsigned long size) |
948 | { | 948 | { |
949 | unsigned int i; | 949 | unsigned int i; |
950 | for (i = 0; i < NR_CPUS; i++) | 950 | for_each_cpu(i) { |
951 | if (cpu_possible(i)) | 951 | memcpy(pcpudst + __per_cpu_offset[i], src, size); |
952 | memcpy(pcpudst + __per_cpu_offset[i], src, size); | 952 | } |
953 | } | 953 | } |
954 | #endif /* CONFIG_SMP */ | 954 | #endif /* CONFIG_SMP */ |
diff --git a/arch/ia64/kernel/patch.c b/arch/ia64/kernel/patch.c index 367804a605fa..6a4ac7d70b35 100644 --- a/arch/ia64/kernel/patch.c +++ b/arch/ia64/kernel/patch.c | |||
@@ -64,22 +64,30 @@ ia64_patch (u64 insn_addr, u64 mask, u64 val) | |||
64 | void | 64 | void |
65 | ia64_patch_imm64 (u64 insn_addr, u64 val) | 65 | ia64_patch_imm64 (u64 insn_addr, u64 val) |
66 | { | 66 | { |
67 | ia64_patch(insn_addr, | 67 | /* The assembler may generate offset pointing to either slot 1 |
68 | or slot 2 for a long (2-slot) instruction, occupying slots 1 | ||
69 | and 2. */ | ||
70 | insn_addr &= -16UL; | ||
71 | ia64_patch(insn_addr + 2, | ||
68 | 0x01fffefe000UL, ( ((val & 0x8000000000000000UL) >> 27) /* bit 63 -> 36 */ | 72 | 0x01fffefe000UL, ( ((val & 0x8000000000000000UL) >> 27) /* bit 63 -> 36 */ |
69 | | ((val & 0x0000000000200000UL) << 0) /* bit 21 -> 21 */ | 73 | | ((val & 0x0000000000200000UL) << 0) /* bit 21 -> 21 */ |
70 | | ((val & 0x00000000001f0000UL) << 6) /* bit 16 -> 22 */ | 74 | | ((val & 0x00000000001f0000UL) << 6) /* bit 16 -> 22 */ |
71 | | ((val & 0x000000000000ff80UL) << 20) /* bit 7 -> 27 */ | 75 | | ((val & 0x000000000000ff80UL) << 20) /* bit 7 -> 27 */ |
72 | | ((val & 0x000000000000007fUL) << 13) /* bit 0 -> 13 */)); | 76 | | ((val & 0x000000000000007fUL) << 13) /* bit 0 -> 13 */)); |
73 | ia64_patch(insn_addr - 1, 0x1ffffffffffUL, val >> 22); | 77 | ia64_patch(insn_addr + 1, 0x1ffffffffffUL, val >> 22); |
74 | } | 78 | } |
75 | 79 | ||
76 | void | 80 | void |
77 | ia64_patch_imm60 (u64 insn_addr, u64 val) | 81 | ia64_patch_imm60 (u64 insn_addr, u64 val) |
78 | { | 82 | { |
79 | ia64_patch(insn_addr, | 83 | /* The assembler may generate offset pointing to either slot 1 |
84 | or slot 2 for a long (2-slot) instruction, occupying slots 1 | ||
85 | and 2. */ | ||
86 | insn_addr &= -16UL; | ||
87 | ia64_patch(insn_addr + 2, | ||
80 | 0x011ffffe000UL, ( ((val & 0x0800000000000000UL) >> 23) /* bit 59 -> 36 */ | 88 | 0x011ffffe000UL, ( ((val & 0x0800000000000000UL) >> 23) /* bit 59 -> 36 */ |
81 | | ((val & 0x00000000000fffffUL) << 13) /* bit 0 -> 13 */)); | 89 | | ((val & 0x00000000000fffffUL) << 13) /* bit 0 -> 13 */)); |
82 | ia64_patch(insn_addr - 1, 0x1fffffffffcUL, val >> 18); | 90 | ia64_patch(insn_addr + 1, 0x1fffffffffcUL, val >> 18); |
83 | } | 91 | } |
84 | 92 | ||
85 | /* | 93 | /* |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index d71731ee5b61..410d4804fa6e 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -2352,7 +2352,8 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon | |||
2352 | insert_vm_struct(mm, vma); | 2352 | insert_vm_struct(mm, vma); |
2353 | 2353 | ||
2354 | mm->total_vm += size >> PAGE_SHIFT; | 2354 | mm->total_vm += size >> PAGE_SHIFT; |
2355 | vm_stat_account(vma); | 2355 | vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, |
2356 | vma_pages(vma)); | ||
2356 | up_write(&task->mm->mmap_sem); | 2357 | up_write(&task->mm->mmap_sem); |
2357 | 2358 | ||
2358 | /* | 2359 | /* |
@@ -4939,7 +4940,7 @@ abort_locked: | |||
4939 | if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT; | 4940 | if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT; |
4940 | 4941 | ||
4941 | error_args: | 4942 | error_args: |
4942 | if (args_k) kfree(args_k); | 4943 | kfree(args_k); |
4943 | 4944 | ||
4944 | DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret)); | 4945 | DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret)); |
4945 | 4946 | ||
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 051e050359e4..2e33665d9c18 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
@@ -4,6 +4,9 @@ | |||
4 | * Copyright (C) 1998-2003 Hewlett-Packard Co | 4 | * Copyright (C) 1998-2003 Hewlett-Packard Co |
5 | * David Mosberger-Tang <davidm@hpl.hp.com> | 5 | * David Mosberger-Tang <davidm@hpl.hp.com> |
6 | * 04/11/17 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support | 6 | * 04/11/17 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support |
7 | * | ||
8 | * 2005-10-07 Keith Owens <kaos@sgi.com> | ||
9 | * Add notify_die() hooks. | ||
7 | */ | 10 | */ |
8 | #define __KERNEL_SYSCALLS__ /* see <asm/unistd.h> */ | 11 | #define __KERNEL_SYSCALLS__ /* see <asm/unistd.h> */ |
9 | #include <linux/config.h> | 12 | #include <linux/config.h> |
@@ -34,6 +37,7 @@ | |||
34 | #include <asm/elf.h> | 37 | #include <asm/elf.h> |
35 | #include <asm/ia32.h> | 38 | #include <asm/ia32.h> |
36 | #include <asm/irq.h> | 39 | #include <asm/irq.h> |
40 | #include <asm/kdebug.h> | ||
37 | #include <asm/pgalloc.h> | 41 | #include <asm/pgalloc.h> |
38 | #include <asm/processor.h> | 42 | #include <asm/processor.h> |
39 | #include <asm/sal.h> | 43 | #include <asm/sal.h> |
@@ -197,11 +201,12 @@ void | |||
197 | default_idle (void) | 201 | default_idle (void) |
198 | { | 202 | { |
199 | local_irq_enable(); | 203 | local_irq_enable(); |
200 | while (!need_resched()) | 204 | while (!need_resched()) { |
201 | if (can_do_pal_halt) | 205 | if (can_do_pal_halt) |
202 | safe_halt(); | 206 | safe_halt(); |
203 | else | 207 | else |
204 | cpu_relax(); | 208 | cpu_relax(); |
209 | } | ||
205 | } | 210 | } |
206 | 211 | ||
207 | #ifdef CONFIG_HOTPLUG_CPU | 212 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -263,16 +268,20 @@ void __attribute__((noreturn)) | |||
263 | cpu_idle (void) | 268 | cpu_idle (void) |
264 | { | 269 | { |
265 | void (*mark_idle)(int) = ia64_mark_idle; | 270 | void (*mark_idle)(int) = ia64_mark_idle; |
271 | int cpu = smp_processor_id(); | ||
266 | 272 | ||
267 | /* endless idle loop with no priority at all */ | 273 | /* endless idle loop with no priority at all */ |
268 | while (1) { | 274 | while (1) { |
275 | if (can_do_pal_halt) | ||
276 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
277 | else | ||
278 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
279 | |||
280 | if (!need_resched()) { | ||
281 | void (*idle)(void); | ||
269 | #ifdef CONFIG_SMP | 282 | #ifdef CONFIG_SMP |
270 | if (!need_resched()) | ||
271 | min_xtp(); | 283 | min_xtp(); |
272 | #endif | 284 | #endif |
273 | while (!need_resched()) { | ||
274 | void (*idle)(void); | ||
275 | |||
276 | if (__get_cpu_var(cpu_idle_state)) | 285 | if (__get_cpu_var(cpu_idle_state)) |
277 | __get_cpu_var(cpu_idle_state) = 0; | 286 | __get_cpu_var(cpu_idle_state) = 0; |
278 | 287 | ||
@@ -284,17 +293,17 @@ cpu_idle (void) | |||
284 | if (!idle) | 293 | if (!idle) |
285 | idle = default_idle; | 294 | idle = default_idle; |
286 | (*idle)(); | 295 | (*idle)(); |
287 | } | 296 | if (mark_idle) |
288 | 297 | (*mark_idle)(0); | |
289 | if (mark_idle) | ||
290 | (*mark_idle)(0); | ||
291 | |||
292 | #ifdef CONFIG_SMP | 298 | #ifdef CONFIG_SMP |
293 | normal_xtp(); | 299 | normal_xtp(); |
294 | #endif | 300 | #endif |
301 | } | ||
302 | preempt_enable_no_resched(); | ||
295 | schedule(); | 303 | schedule(); |
304 | preempt_disable(); | ||
296 | check_pgt_cache(); | 305 | check_pgt_cache(); |
297 | if (cpu_is_offline(smp_processor_id())) | 306 | if (cpu_is_offline(cpu)) |
298 | play_dead(); | 307 | play_dead(); |
299 | } | 308 | } |
300 | } | 309 | } |
@@ -709,13 +718,6 @@ kernel_thread_helper (int (*fn)(void *), void *arg) | |||
709 | void | 718 | void |
710 | flush_thread (void) | 719 | flush_thread (void) |
711 | { | 720 | { |
712 | /* | ||
713 | * Remove function-return probe instances associated with this task | ||
714 | * and put them back on the free list. Do not insert an exit probe for | ||
715 | * this function, it will be disabled by kprobe_flush_task if you do. | ||
716 | */ | ||
717 | kprobe_flush_task(current); | ||
718 | |||
719 | /* drop floating-point and debug-register state if it exists: */ | 721 | /* drop floating-point and debug-register state if it exists: */ |
720 | current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); | 722 | current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); |
721 | ia64_drop_fpu(current); | 723 | ia64_drop_fpu(current); |
@@ -804,12 +806,14 @@ cpu_halt (void) | |||
804 | void | 806 | void |
805 | machine_restart (char *restart_cmd) | 807 | machine_restart (char *restart_cmd) |
806 | { | 808 | { |
809 | (void) notify_die(DIE_MACHINE_RESTART, restart_cmd, NULL, 0, 0, 0); | ||
807 | (*efi.reset_system)(EFI_RESET_WARM, 0, 0, NULL); | 810 | (*efi.reset_system)(EFI_RESET_WARM, 0, 0, NULL); |
808 | } | 811 | } |
809 | 812 | ||
810 | void | 813 | void |
811 | machine_halt (void) | 814 | machine_halt (void) |
812 | { | 815 | { |
816 | (void) notify_die(DIE_MACHINE_HALT, "", NULL, 0, 0, 0); | ||
813 | cpu_halt(); | 817 | cpu_halt(); |
814 | } | 818 | } |
815 | 819 | ||
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index bbb8bc7c0552..4b19d0410632 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c | |||
@@ -587,8 +587,9 @@ thread_matches (struct task_struct *thread, unsigned long addr) | |||
587 | static struct task_struct * | 587 | static struct task_struct * |
588 | find_thread_for_addr (struct task_struct *child, unsigned long addr) | 588 | find_thread_for_addr (struct task_struct *child, unsigned long addr) |
589 | { | 589 | { |
590 | struct task_struct *g, *p; | 590 | struct task_struct *p; |
591 | struct mm_struct *mm; | 591 | struct mm_struct *mm; |
592 | struct list_head *this, *next; | ||
592 | int mm_users; | 593 | int mm_users; |
593 | 594 | ||
594 | if (!(mm = get_task_mm(child))) | 595 | if (!(mm = get_task_mm(child))) |
@@ -600,28 +601,21 @@ find_thread_for_addr (struct task_struct *child, unsigned long addr) | |||
600 | goto out; /* not multi-threaded */ | 601 | goto out; /* not multi-threaded */ |
601 | 602 | ||
602 | /* | 603 | /* |
603 | * First, traverse the child's thread-list. Good for scalability with | 604 | * Traverse the current process' children list. Every task that |
604 | * NPTL-threads. | 605 | * one attaches to becomes a child. And it is only attached children |
606 | * of the debugger that are of interest (ptrace_check_attach checks | ||
607 | * for this). | ||
605 | */ | 608 | */ |
606 | p = child; | 609 | list_for_each_safe(this, next, ¤t->children) { |
607 | do { | 610 | p = list_entry(this, struct task_struct, sibling); |
608 | if (thread_matches(p, addr)) { | 611 | if (p->mm != mm) |
609 | child = p; | ||
610 | goto out; | ||
611 | } | ||
612 | if (mm_users-- <= 1) | ||
613 | goto out; | ||
614 | } while ((p = next_thread(p)) != child); | ||
615 | |||
616 | do_each_thread(g, p) { | ||
617 | if (child->mm != mm) | ||
618 | continue; | 612 | continue; |
619 | |||
620 | if (thread_matches(p, addr)) { | 613 | if (thread_matches(p, addr)) { |
621 | child = p; | 614 | child = p; |
622 | goto out; | 615 | goto out; |
623 | } | 616 | } |
624 | } while_each_thread(g, p); | 617 | } |
618 | |||
625 | out: | 619 | out: |
626 | mmput(mm); | 620 | mmput(mm); |
627 | return child; | 621 | return child; |
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 1f5c26dbe705..5add0bcf87a7 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -78,7 +78,27 @@ struct screen_info screen_info; | |||
78 | unsigned long vga_console_iobase; | 78 | unsigned long vga_console_iobase; |
79 | unsigned long vga_console_membase; | 79 | unsigned long vga_console_membase; |
80 | 80 | ||
81 | static struct resource data_resource = { | ||
82 | .name = "Kernel data", | ||
83 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM | ||
84 | }; | ||
85 | |||
86 | static struct resource code_resource = { | ||
87 | .name = "Kernel code", | ||
88 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM | ||
89 | }; | ||
90 | extern void efi_initialize_iomem_resources(struct resource *, | ||
91 | struct resource *); | ||
92 | extern char _text[], _end[], _etext[]; | ||
93 | |||
81 | unsigned long ia64_max_cacheline_size; | 94 | unsigned long ia64_max_cacheline_size; |
95 | |||
96 | int dma_get_cache_alignment(void) | ||
97 | { | ||
98 | return ia64_max_cacheline_size; | ||
99 | } | ||
100 | EXPORT_SYMBOL(dma_get_cache_alignment); | ||
101 | |||
82 | unsigned long ia64_iobase; /* virtual address for I/O accesses */ | 102 | unsigned long ia64_iobase; /* virtual address for I/O accesses */ |
83 | EXPORT_SYMBOL(ia64_iobase); | 103 | EXPORT_SYMBOL(ia64_iobase); |
84 | struct io_space io_space[MAX_IO_SPACES]; | 104 | struct io_space io_space[MAX_IO_SPACES]; |
@@ -171,6 +191,22 @@ sort_regions (struct rsvd_region *rsvd_region, int max) | |||
171 | } | 191 | } |
172 | } | 192 | } |
173 | 193 | ||
194 | /* | ||
195 | * Request address space for all standard resources | ||
196 | */ | ||
197 | static int __init register_memory(void) | ||
198 | { | ||
199 | code_resource.start = ia64_tpa(_text); | ||
200 | code_resource.end = ia64_tpa(_etext) - 1; | ||
201 | data_resource.start = ia64_tpa(_etext); | ||
202 | data_resource.end = ia64_tpa(_end) - 1; | ||
203 | efi_initialize_iomem_resources(&code_resource, &data_resource); | ||
204 | |||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | __initcall(register_memory); | ||
209 | |||
174 | /** | 210 | /** |
175 | * reserve_memory - setup reserved memory areas | 211 | * reserve_memory - setup reserved memory areas |
176 | * | 212 | * |
@@ -211,6 +247,9 @@ reserve_memory (void) | |||
211 | } | 247 | } |
212 | #endif | 248 | #endif |
213 | 249 | ||
250 | efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); | ||
251 | n++; | ||
252 | |||
214 | /* end of memory marker */ | 253 | /* end of memory marker */ |
215 | rsvd_region[n].start = ~0UL; | 254 | rsvd_region[n].start = ~0UL; |
216 | rsvd_region[n].end = ~0UL; | 255 | rsvd_region[n].end = ~0UL; |
@@ -244,28 +283,31 @@ find_initrd (void) | |||
244 | static void __init | 283 | static void __init |
245 | io_port_init (void) | 284 | io_port_init (void) |
246 | { | 285 | { |
247 | extern unsigned long ia64_iobase; | ||
248 | unsigned long phys_iobase; | 286 | unsigned long phys_iobase; |
249 | 287 | ||
250 | /* | 288 | /* |
251 | * Set `iobase' to the appropriate address in region 6 (uncached access range). | 289 | * Set `iobase' based on the EFI memory map or, failing that, the |
290 | * value firmware left in ar.k0. | ||
252 | * | 291 | * |
253 | * The EFI memory map is the "preferred" location to get the I/O port space base, | 292 | * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute |
254 | * rather the relying on AR.KR0. This should become more clear in future SAL | 293 | * the port's virtual address, so ia32_load_state() loads it with a |
255 | * specs. We'll fall back to getting it out of AR.KR0 if no appropriate entry is | 294 | * user virtual address. But in ia64 mode, glibc uses the |
256 | * found in the memory map. | 295 | * *physical* address in ar.k0 to mmap the appropriate area from |
296 | * /dev/mem, and the inX()/outX() interfaces use MMIO. In both | ||
297 | * cases, user-mode can only use the legacy 0-64K I/O port space. | ||
298 | * | ||
299 | * ar.k0 is not involved in kernel I/O port accesses, which can use | ||
300 | * any of the I/O port spaces and are done via MMIO using the | ||
301 | * virtual mmio_base from the appropriate io_space[]. | ||
257 | */ | 302 | */ |
258 | phys_iobase = efi_get_iobase(); | 303 | phys_iobase = efi_get_iobase(); |
259 | if (phys_iobase) | 304 | if (!phys_iobase) { |
260 | /* set AR.KR0 since this is all we use it for anyway */ | ||
261 | ia64_set_kr(IA64_KR_IO_BASE, phys_iobase); | ||
262 | else { | ||
263 | phys_iobase = ia64_get_kr(IA64_KR_IO_BASE); | 305 | phys_iobase = ia64_get_kr(IA64_KR_IO_BASE); |
264 | printk(KERN_INFO "No I/O port range found in EFI memory map, falling back " | 306 | printk(KERN_INFO "No I/O port range found in EFI memory map, " |
265 | "to AR.KR0\n"); | 307 | "falling back to AR.KR0 (0x%lx)\n", phys_iobase); |
266 | printk(KERN_INFO "I/O port base = 0x%lx\n", phys_iobase); | ||
267 | } | 308 | } |
268 | ia64_iobase = (unsigned long) ioremap(phys_iobase, 0); | 309 | ia64_iobase = (unsigned long) ioremap(phys_iobase, 0); |
310 | ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); | ||
269 | 311 | ||
270 | /* setup legacy IO port space */ | 312 | /* setup legacy IO port space */ |
271 | io_space[0].mmio_base = ia64_iobase; | 313 | io_space[0].mmio_base = ia64_iobase; |
@@ -419,6 +461,7 @@ setup_arch (char **cmdline_p) | |||
419 | #endif | 461 | #endif |
420 | 462 | ||
421 | cpu_init(); /* initialize the bootstrap CPU */ | 463 | cpu_init(); /* initialize the bootstrap CPU */ |
464 | mmu_context_init(); /* initialize context_id bitmap */ | ||
422 | 465 | ||
423 | #ifdef CONFIG_ACPI | 466 | #ifdef CONFIG_ACPI |
424 | acpi_boot_init(); | 467 | acpi_boot_init(); |
@@ -526,7 +569,7 @@ show_cpuinfo (struct seq_file *m, void *v) | |||
526 | c->itc_freq / 1000000, c->itc_freq % 1000000, | 569 | c->itc_freq / 1000000, c->itc_freq % 1000000, |
527 | lpj*HZ/500000, (lpj*HZ/5000) % 100); | 570 | lpj*HZ/500000, (lpj*HZ/5000) % 100); |
528 | #ifdef CONFIG_SMP | 571 | #ifdef CONFIG_SMP |
529 | seq_printf(m, "siblings : %u\n", c->num_log); | 572 | seq_printf(m, "siblings : %u\n", cpus_weight(cpu_core_map[cpunum])); |
530 | if (c->threads_per_core > 1 || c->cores_per_socket > 1) | 573 | if (c->threads_per_core > 1 || c->cores_per_socket > 1) |
531 | seq_printf(m, | 574 | seq_printf(m, |
532 | "physical id: %u\n" | 575 | "physical id: %u\n" |
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index 774f34b675cf..58ce07efc56e 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c | |||
@@ -387,15 +387,14 @@ setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, | |||
387 | struct sigscratch *scr) | 387 | struct sigscratch *scr) |
388 | { | 388 | { |
389 | extern char __kernel_sigtramp[]; | 389 | extern char __kernel_sigtramp[]; |
390 | unsigned long tramp_addr, new_rbs = 0; | 390 | unsigned long tramp_addr, new_rbs = 0, new_sp; |
391 | struct sigframe __user *frame; | 391 | struct sigframe __user *frame; |
392 | long err; | 392 | long err; |
393 | 393 | ||
394 | frame = (void __user *) scr->pt.r12; | 394 | new_sp = scr->pt.r12; |
395 | tramp_addr = (unsigned long) __kernel_sigtramp; | 395 | tramp_addr = (unsigned long) __kernel_sigtramp; |
396 | if ((ka->sa.sa_flags & SA_ONSTACK) && sas_ss_flags((unsigned long) frame) == 0) { | 396 | if ((ka->sa.sa_flags & SA_ONSTACK) && sas_ss_flags(new_sp) == 0) { |
397 | frame = (void __user *) ((current->sas_ss_sp + current->sas_ss_size) | 397 | new_sp = current->sas_ss_sp + current->sas_ss_size; |
398 | & ~(STACK_ALIGN - 1)); | ||
399 | /* | 398 | /* |
400 | * We need to check for the register stack being on the signal stack | 399 | * We need to check for the register stack being on the signal stack |
401 | * separately, because it's switched separately (memory stack is switched | 400 | * separately, because it's switched separately (memory stack is switched |
@@ -404,7 +403,7 @@ setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, | |||
404 | if (!rbs_on_sig_stack(scr->pt.ar_bspstore)) | 403 | if (!rbs_on_sig_stack(scr->pt.ar_bspstore)) |
405 | new_rbs = (current->sas_ss_sp + sizeof(long) - 1) & ~(sizeof(long) - 1); | 404 | new_rbs = (current->sas_ss_sp + sizeof(long) - 1) & ~(sizeof(long) - 1); |
406 | } | 405 | } |
407 | frame = (void __user *) frame - ((sizeof(*frame) + STACK_ALIGN - 1) & ~(STACK_ALIGN - 1)); | 406 | frame = (void __user *) ((new_sp - sizeof(*frame)) & -STACK_ALIGN); |
408 | 407 | ||
409 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 408 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
410 | return force_sigsegv_info(sig, frame); | 409 | return force_sigsegv_info(sig, frame); |
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 0166a9847095..657ac99a451c 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c | |||
@@ -185,8 +185,8 @@ send_IPI_allbutself (int op) | |||
185 | { | 185 | { |
186 | unsigned int i; | 186 | unsigned int i; |
187 | 187 | ||
188 | for (i = 0; i < NR_CPUS; i++) { | 188 | for_each_online_cpu(i) { |
189 | if (cpu_online(i) && i != smp_processor_id()) | 189 | if (i != smp_processor_id()) |
190 | send_IPI_single(i, op); | 190 | send_IPI_single(i, op); |
191 | } | 191 | } |
192 | } | 192 | } |
@@ -199,9 +199,9 @@ send_IPI_all (int op) | |||
199 | { | 199 | { |
200 | int i; | 200 | int i; |
201 | 201 | ||
202 | for (i = 0; i < NR_CPUS; i++) | 202 | for_each_online_cpu(i) { |
203 | if (cpu_online(i)) | 203 | send_IPI_single(i, op); |
204 | send_IPI_single(i, op); | 204 | } |
205 | } | 205 | } |
206 | 206 | ||
207 | /* | 207 | /* |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 7d72c0d872b3..8f44e7d2df66 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -399,6 +399,7 @@ start_secondary (void *unused) | |||
399 | Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id()); | 399 | Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id()); |
400 | efi_map_pal_code(); | 400 | efi_map_pal_code(); |
401 | cpu_init(); | 401 | cpu_init(); |
402 | preempt_disable(); | ||
402 | smp_callin(); | 403 | smp_callin(); |
403 | 404 | ||
404 | cpu_idle(); | 405 | cpu_idle(); |
@@ -694,9 +695,9 @@ smp_cpus_done (unsigned int dummy) | |||
694 | * Allow the user to impress friends. | 695 | * Allow the user to impress friends. |
695 | */ | 696 | */ |
696 | 697 | ||
697 | for (cpu = 0; cpu < NR_CPUS; cpu++) | 698 | for_each_online_cpu(cpu) { |
698 | if (cpu_online(cpu)) | 699 | bogosum += cpu_data(cpu)->loops_per_jiffy; |
699 | bogosum += cpu_data(cpu)->loops_per_jiffy; | 700 | } |
700 | 701 | ||
701 | printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", | 702 | printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", |
702 | (int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100); | 703 | (int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100); |
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 8b8a5a45b621..5b7e736f3b49 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c | |||
@@ -32,10 +32,6 @@ | |||
32 | 32 | ||
33 | extern unsigned long wall_jiffies; | 33 | extern unsigned long wall_jiffies; |
34 | 34 | ||
35 | u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; | ||
36 | |||
37 | EXPORT_SYMBOL(jiffies_64); | ||
38 | |||
39 | #define TIME_KEEPER_ID 0 /* smp_processor_id() of time-keeper */ | 35 | #define TIME_KEEPER_ID 0 /* smp_processor_id() of time-keeper */ |
40 | 36 | ||
41 | #ifdef CONFIG_IA64_DEBUG_IRQ | 37 | #ifdef CONFIG_IA64_DEBUG_IRQ |
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index f970359e7edf..d3e0ecb56d62 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c | |||
@@ -30,17 +30,20 @@ fpswa_interface_t *fpswa_interface; | |||
30 | EXPORT_SYMBOL(fpswa_interface); | 30 | EXPORT_SYMBOL(fpswa_interface); |
31 | 31 | ||
32 | struct notifier_block *ia64die_chain; | 32 | struct notifier_block *ia64die_chain; |
33 | static DEFINE_SPINLOCK(die_notifier_lock); | ||
34 | 33 | ||
35 | int register_die_notifier(struct notifier_block *nb) | 34 | int |
35 | register_die_notifier(struct notifier_block *nb) | ||
36 | { | 36 | { |
37 | int err = 0; | 37 | return notifier_chain_register(&ia64die_chain, nb); |
38 | unsigned long flags; | ||
39 | spin_lock_irqsave(&die_notifier_lock, flags); | ||
40 | err = notifier_chain_register(&ia64die_chain, nb); | ||
41 | spin_unlock_irqrestore(&die_notifier_lock, flags); | ||
42 | return err; | ||
43 | } | 38 | } |
39 | EXPORT_SYMBOL_GPL(register_die_notifier); | ||
40 | |||
41 | int | ||
42 | unregister_die_notifier(struct notifier_block *nb) | ||
43 | { | ||
44 | return notifier_chain_unregister(&ia64die_chain, nb); | ||
45 | } | ||
46 | EXPORT_SYMBOL_GPL(unregister_die_notifier); | ||
44 | 47 | ||
45 | void __init | 48 | void __init |
46 | trap_init (void) | 49 | trap_init (void) |
@@ -105,6 +108,7 @@ die (const char *str, struct pt_regs *regs, long err) | |||
105 | if (++die.lock_owner_depth < 3) { | 108 | if (++die.lock_owner_depth < 3) { |
106 | printk("%s[%d]: %s %ld [%d]\n", | 109 | printk("%s[%d]: %s %ld [%d]\n", |
107 | current->comm, current->pid, str, err, ++die_counter); | 110 | current->comm, current->pid, str, err, ++die_counter); |
111 | (void) notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV); | ||
108 | show_regs(regs); | 112 | show_regs(regs); |
109 | } else | 113 | } else |
110 | printk(KERN_ERR "Recursive die() failure, output suppressed\n"); | 114 | printk(KERN_ERR "Recursive die() failure, output suppressed\n"); |
@@ -128,24 +132,6 @@ __kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs) | |||
128 | siginfo_t siginfo; | 132 | siginfo_t siginfo; |
129 | int sig, code; | 133 | int sig, code; |
130 | 134 | ||
131 | /* break.b always sets cr.iim to 0, which causes problems for | ||
132 | * debuggers. Get the real break number from the original instruction, | ||
133 | * but only for kernel code. User space break.b is left alone, to | ||
134 | * preserve the existing behaviour. All break codings have the same | ||
135 | * format, so there is no need to check the slot type. | ||
136 | */ | ||
137 | if (break_num == 0 && !user_mode(regs)) { | ||
138 | struct ia64_psr *ipsr = ia64_psr(regs); | ||
139 | unsigned long *bundle = (unsigned long *)regs->cr_iip; | ||
140 | unsigned long slot; | ||
141 | switch (ipsr->ri) { | ||
142 | case 0: slot = (bundle[0] >> 5); break; | ||
143 | case 1: slot = (bundle[0] >> 46) | (bundle[1] << 18); break; | ||
144 | default: slot = (bundle[1] >> 23); break; | ||
145 | } | ||
146 | break_num = ((slot >> 36 & 1) << 20) | (slot >> 6 & 0xfffff); | ||
147 | } | ||
148 | |||
149 | /* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */ | 135 | /* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */ |
150 | siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri); | 136 | siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri); |
151 | siginfo.si_imm = break_num; | 137 | siginfo.si_imm = break_num; |
@@ -155,9 +141,8 @@ __kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs) | |||
155 | switch (break_num) { | 141 | switch (break_num) { |
156 | case 0: /* unknown error (used by GCC for __builtin_abort()) */ | 142 | case 0: /* unknown error (used by GCC for __builtin_abort()) */ |
157 | if (notify_die(DIE_BREAK, "break 0", regs, break_num, TRAP_BRKPT, SIGTRAP) | 143 | if (notify_die(DIE_BREAK, "break 0", regs, break_num, TRAP_BRKPT, SIGTRAP) |
158 | == NOTIFY_STOP) { | 144 | == NOTIFY_STOP) |
159 | return; | 145 | return; |
160 | } | ||
161 | die_if_kernel("bugcheck!", regs, break_num); | 146 | die_if_kernel("bugcheck!", regs, break_num); |
162 | sig = SIGILL; code = ILL_ILLOPC; | 147 | sig = SIGILL; code = ILL_ILLOPC; |
163 | break; | 148 | break; |
@@ -210,15 +195,6 @@ __kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs) | |||
210 | sig = SIGILL; code = __ILL_BNDMOD; | 195 | sig = SIGILL; code = __ILL_BNDMOD; |
211 | break; | 196 | break; |
212 | 197 | ||
213 | case 0x80200: | ||
214 | case 0x80300: | ||
215 | if (notify_die(DIE_BREAK, "kprobe", regs, break_num, TRAP_BRKPT, SIGTRAP) | ||
216 | == NOTIFY_STOP) { | ||
217 | return; | ||
218 | } | ||
219 | sig = SIGTRAP; code = TRAP_BRKPT; | ||
220 | break; | ||
221 | |||
222 | default: | 198 | default: |
223 | if (break_num < 0x40000 || break_num > 0x100000) | 199 | if (break_num < 0x40000 || break_num > 0x100000) |
224 | die_if_kernel("Bad break", regs, break_num); | 200 | die_if_kernel("Bad break", regs, break_num); |
@@ -226,6 +202,9 @@ __kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs) | |||
226 | if (break_num < 0x80000) { | 202 | if (break_num < 0x80000) { |
227 | sig = SIGILL; code = __ILL_BREAK; | 203 | sig = SIGILL; code = __ILL_BREAK; |
228 | } else { | 204 | } else { |
205 | if (notify_die(DIE_BREAK, "bad break", regs, break_num, TRAP_BRKPT, SIGTRAP) | ||
206 | == NOTIFY_STOP) | ||
207 | return; | ||
229 | sig = SIGTRAP; code = TRAP_BRKPT; | 208 | sig = SIGTRAP; code = TRAP_BRKPT; |
230 | } | 209 | } |
231 | } | 210 | } |
@@ -578,12 +557,11 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, | |||
578 | #endif | 557 | #endif |
579 | break; | 558 | break; |
580 | case 35: siginfo.si_code = TRAP_BRANCH; ifa = 0; break; | 559 | case 35: siginfo.si_code = TRAP_BRANCH; ifa = 0; break; |
581 | case 36: | 560 | case 36: siginfo.si_code = TRAP_TRACE; ifa = 0; break; |
582 | if (notify_die(DIE_SS, "ss", ®s, vector, | ||
583 | vector, SIGTRAP) == NOTIFY_STOP) | ||
584 | return; | ||
585 | siginfo.si_code = TRAP_TRACE; ifa = 0; break; | ||
586 | } | 561 | } |
562 | if (notify_die(DIE_FAULT, "ia64_fault", ®s, vector, siginfo.si_code, SIGTRAP) | ||
563 | == NOTIFY_STOP) | ||
564 | return; | ||
587 | siginfo.si_signo = SIGTRAP; | 565 | siginfo.si_signo = SIGTRAP; |
588 | siginfo.si_errno = 0; | 566 | siginfo.si_errno = 0; |
589 | siginfo.si_addr = (void __user *) ifa; | 567 | siginfo.si_addr = (void __user *) ifa; |
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index 4e9d06c48a8b..c6d40446c2c4 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c | |||
@@ -205,23 +205,18 @@ EXPORT_SYMBOL(uncached_free_page); | |||
205 | static int __init | 205 | static int __init |
206 | uncached_build_memmap(unsigned long start, unsigned long end, void *arg) | 206 | uncached_build_memmap(unsigned long start, unsigned long end, void *arg) |
207 | { | 207 | { |
208 | long length; | 208 | long length = end - start; |
209 | unsigned long vstart, vend; | ||
210 | int node; | 209 | int node; |
211 | 210 | ||
212 | length = end - start; | ||
213 | vstart = start + __IA64_UNCACHED_OFFSET; | ||
214 | vend = end + __IA64_UNCACHED_OFFSET; | ||
215 | |||
216 | dprintk(KERN_ERR "uncached_build_memmap(%lx %lx)\n", start, end); | 211 | dprintk(KERN_ERR "uncached_build_memmap(%lx %lx)\n", start, end); |
217 | 212 | ||
218 | memset((char *)vstart, 0, length); | 213 | memset((char *)start, 0, length); |
219 | 214 | ||
220 | node = paddr_to_nid(start); | 215 | node = paddr_to_nid(start - __IA64_UNCACHED_OFFSET); |
221 | 216 | ||
222 | for (; vstart < vend ; vstart += PAGE_SIZE) { | 217 | for (; start < end ; start += PAGE_SIZE) { |
223 | dprintk(KERN_INFO "sticking %lx into the pool!\n", vstart); | 218 | dprintk(KERN_INFO "sticking %lx into the pool!\n", start); |
224 | gen_pool_free(uncached_pool[node], vstart, PAGE_SIZE); | 219 | gen_pool_free(uncached_pool[node], start, PAGE_SIZE); |
225 | } | 220 | } |
226 | 221 | ||
227 | return 0; | 222 | return 0; |