diff options
Diffstat (limited to 'arch/ia64')
72 files changed, 3853 insertions, 2211 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 80988136f26d..ed25d66c8d50 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -60,6 +60,7 @@ choice | |||
60 | 60 | ||
61 | config IA64_GENERIC | 61 | config IA64_GENERIC |
62 | bool "generic" | 62 | bool "generic" |
63 | select ACPI | ||
63 | select NUMA | 64 | select NUMA |
64 | select ACPI_NUMA | 65 | select ACPI_NUMA |
65 | select VIRTUAL_MEM_MAP | 66 | select VIRTUAL_MEM_MAP |
@@ -338,11 +339,6 @@ config IA64_PALINFO | |||
338 | To use this option, you have to ensure that the "/proc file system | 339 | To use this option, you have to ensure that the "/proc file system |
339 | support" (CONFIG_PROC_FS) is enabled, too. | 340 | support" (CONFIG_PROC_FS) is enabled, too. |
340 | 341 | ||
341 | config ACPI_DEALLOCATE_IRQ | ||
342 | bool | ||
343 | depends on IOSAPIC && EXPERIMENTAL | ||
344 | default y | ||
345 | |||
346 | source "drivers/firmware/Kconfig" | 342 | source "drivers/firmware/Kconfig" |
347 | 343 | ||
348 | source "fs/Kconfig.binfmt" | 344 | source "fs/Kconfig.binfmt" |
@@ -351,36 +347,14 @@ endmenu | |||
351 | 347 | ||
352 | menu "Power management and ACPI" | 348 | menu "Power management and ACPI" |
353 | 349 | ||
354 | config PM | 350 | source "kernel/power/Kconfig" |
355 | bool "Power Management support" | ||
356 | depends on !IA64_HP_SIM | ||
357 | default y | ||
358 | help | ||
359 | "Power Management" means that parts of your computer are shut | ||
360 | off or put into a power conserving "sleep" mode if they are not | ||
361 | being used. There are two competing standards for doing this: APM | ||
362 | and ACPI. If you want to use either one, say Y here and then also | ||
363 | to the requisite support below. | ||
364 | |||
365 | Power Management is most important for battery powered laptop | ||
366 | computers; if you have a laptop, check out the Linux Laptop home | ||
367 | page on the WWW at <http://www.linux-on-laptops.com/> and the | ||
368 | Battery Powered Linux mini-HOWTO, available from | ||
369 | <http://www.tldp.org/docs.html#howto>. | ||
370 | |||
371 | Note that, even if you say N here, Linux on the x86 architecture | ||
372 | will issue the hlt instruction if nothing is to be done, thereby | ||
373 | sending the processor to sleep and saving power. | ||
374 | |||
375 | config ACPI | ||
376 | bool | ||
377 | depends on !IA64_HP_SIM | ||
378 | default y | ||
379 | |||
380 | if !IA64_HP_SIM | ||
381 | 351 | ||
382 | source "drivers/acpi/Kconfig" | 352 | source "drivers/acpi/Kconfig" |
383 | 353 | ||
354 | if PM | ||
355 | |||
356 | source "arch/ia64/kernel/cpufreq/Kconfig" | ||
357 | |||
384 | endif | 358 | endif |
385 | 359 | ||
386 | endmenu | 360 | endmenu |
@@ -428,6 +402,11 @@ config GENERIC_IRQ_PROBE | |||
428 | bool | 402 | bool |
429 | default y | 403 | default y |
430 | 404 | ||
405 | config GENERIC_PENDING_IRQ | ||
406 | bool | ||
407 | depends on GENERIC_HARDIRQS && SMP | ||
408 | default y | ||
409 | |||
431 | source "arch/ia64/hp/sim/Kconfig" | 410 | source "arch/ia64/hp/sim/Kconfig" |
432 | 411 | ||
433 | source "arch/ia64/oprofile/Kconfig" | 412 | source "arch/ia64/oprofile/Kconfig" |
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index f9bd88ada708..7ed678cf5e41 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile | |||
@@ -82,25 +82,18 @@ unwcheck: vmlinux | |||
82 | archclean: | 82 | archclean: |
83 | $(Q)$(MAKE) $(clean)=$(boot) | 83 | $(Q)$(MAKE) $(clean)=$(boot) |
84 | 84 | ||
85 | CLEAN_FILES += include/asm-ia64/.offsets.h.stamp vmlinux.gz bootloader | 85 | prepare: include/asm-ia64/.offsets.h.stamp |
86 | |||
87 | MRPROPER_FILES += include/asm-ia64/offsets.h | ||
88 | |||
89 | prepare: include/asm-ia64/offsets.h | ||
90 | |||
91 | arch/ia64/kernel/asm-offsets.s: include/asm include/linux/version.h include/config/MARKER | ||
92 | |||
93 | include/asm-ia64/offsets.h: arch/ia64/kernel/asm-offsets.s | ||
94 | $(call filechk,gen-asm-offsets) | ||
95 | |||
96 | arch/ia64/kernel/asm-offsets.s: include/asm-ia64/.offsets.h.stamp | ||
97 | 86 | ||
98 | include/asm-ia64/.offsets.h.stamp: | 87 | include/asm-ia64/.offsets.h.stamp: |
99 | mkdir -p include/asm-ia64 | 88 | mkdir -p include/asm-ia64 |
100 | [ -s include/asm-ia64/offsets.h ] \ | 89 | [ -s include/asm-ia64/asm-offsets.h ] \ |
101 | || echo "#define IA64_TASK_SIZE 0" > include/asm-ia64/offsets.h | 90 | || echo "#define IA64_TASK_SIZE 0" > include/asm-ia64/asm-offsets.h |
102 | touch $@ | 91 | touch $@ |
103 | 92 | ||
93 | |||
94 | |||
95 | CLEAN_FILES += vmlinux.gz bootloader include/asm-ia64/.offsets.h.stamp | ||
96 | |||
104 | boot: lib/lib.a vmlinux | 97 | boot: lib/lib.a vmlinux |
105 | $(Q)$(MAKE) $(build)=$(boot) $@ | 98 | $(Q)$(MAKE) $(build)=$(boot) $@ |
106 | 99 | ||
diff --git a/arch/ia64/configs/bigsur_defconfig b/arch/ia64/configs/bigsur_defconfig index b95fcf86ea00..3b65cbb31b1d 100644 --- a/arch/ia64/configs/bigsur_defconfig +++ b/arch/ia64/configs/bigsur_defconfig | |||
@@ -107,18 +107,12 @@ CONFIG_ACPI=y | |||
107 | # | 107 | # |
108 | # ACPI (Advanced Configuration and Power Interface) Support | 108 | # ACPI (Advanced Configuration and Power Interface) Support |
109 | # | 109 | # |
110 | CONFIG_ACPI_BOOT=y | ||
111 | CONFIG_ACPI_INTERPRETER=y | ||
112 | CONFIG_ACPI_BUTTON=m | 110 | CONFIG_ACPI_BUTTON=m |
113 | CONFIG_ACPI_VIDEO=m | ||
114 | CONFIG_ACPI_FAN=m | 111 | CONFIG_ACPI_FAN=m |
115 | CONFIG_ACPI_PROCESSOR=m | 112 | CONFIG_ACPI_PROCESSOR=m |
116 | CONFIG_ACPI_THERMAL=m | 113 | CONFIG_ACPI_THERMAL=m |
117 | CONFIG_ACPI_BLACKLIST_YEAR=0 | ||
118 | # CONFIG_ACPI_DEBUG is not set | 114 | # CONFIG_ACPI_DEBUG is not set |
119 | CONFIG_ACPI_BUS=y | ||
120 | CONFIG_ACPI_POWER=y | 115 | CONFIG_ACPI_POWER=y |
121 | CONFIG_ACPI_PCI=y | ||
122 | CONFIG_ACPI_SYSTEM=y | 116 | CONFIG_ACPI_SYSTEM=y |
123 | 117 | ||
124 | # | 118 | # |
diff --git a/arch/ia64/configs/sn2_defconfig b/arch/ia64/configs/sn2_defconfig index dccf35c60b94..08112ab38468 100644 --- a/arch/ia64/configs/sn2_defconfig +++ b/arch/ia64/configs/sn2_defconfig | |||
@@ -111,7 +111,6 @@ CONFIG_COMPAT=y | |||
111 | CONFIG_IA64_MCA_RECOVERY=y | 111 | CONFIG_IA64_MCA_RECOVERY=y |
112 | CONFIG_PERFMON=y | 112 | CONFIG_PERFMON=y |
113 | CONFIG_IA64_PALINFO=y | 113 | CONFIG_IA64_PALINFO=y |
114 | CONFIG_ACPI_DEALLOCATE_IRQ=y | ||
115 | 114 | ||
116 | # | 115 | # |
117 | # Firmware Drivers | 116 | # Firmware Drivers |
@@ -130,19 +129,12 @@ CONFIG_ACPI=y | |||
130 | # | 129 | # |
131 | # ACPI (Advanced Configuration and Power Interface) Support | 130 | # ACPI (Advanced Configuration and Power Interface) Support |
132 | # | 131 | # |
133 | CONFIG_ACPI_BOOT=y | ||
134 | CONFIG_ACPI_INTERPRETER=y | ||
135 | # CONFIG_ACPI_BUTTON is not set | 132 | # CONFIG_ACPI_BUTTON is not set |
136 | CONFIG_ACPI_VIDEO=m | ||
137 | CONFIG_ACPI_HOTKEY=m | ||
138 | # CONFIG_ACPI_FAN is not set | 133 | # CONFIG_ACPI_FAN is not set |
139 | # CONFIG_ACPI_PROCESSOR is not set | 134 | # CONFIG_ACPI_PROCESSOR is not set |
140 | CONFIG_ACPI_NUMA=y | 135 | CONFIG_ACPI_NUMA=y |
141 | CONFIG_ACPI_BLACKLIST_YEAR=0 | ||
142 | # CONFIG_ACPI_DEBUG is not set | 136 | # CONFIG_ACPI_DEBUG is not set |
143 | CONFIG_ACPI_BUS=y | ||
144 | CONFIG_ACPI_POWER=y | 137 | CONFIG_ACPI_POWER=y |
145 | CONFIG_ACPI_PCI=y | ||
146 | CONFIG_ACPI_SYSTEM=y | 138 | CONFIG_ACPI_SYSTEM=y |
147 | # CONFIG_ACPI_CONTAINER is not set | 139 | # CONFIG_ACPI_CONTAINER is not set |
148 | 140 | ||
diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig index c853cfcd2d11..d452e18ac494 100644 --- a/arch/ia64/configs/tiger_defconfig +++ b/arch/ia64/configs/tiger_defconfig | |||
@@ -109,7 +109,6 @@ CONFIG_COMPAT=y | |||
109 | CONFIG_IA64_MCA_RECOVERY=y | 109 | CONFIG_IA64_MCA_RECOVERY=y |
110 | CONFIG_PERFMON=y | 110 | CONFIG_PERFMON=y |
111 | CONFIG_IA64_PALINFO=y | 111 | CONFIG_IA64_PALINFO=y |
112 | CONFIG_ACPI_DEALLOCATE_IRQ=y | ||
113 | 112 | ||
114 | # | 113 | # |
115 | # Firmware Drivers | 114 | # Firmware Drivers |
@@ -128,20 +127,13 @@ CONFIG_ACPI=y | |||
128 | # | 127 | # |
129 | # ACPI (Advanced Configuration and Power Interface) Support | 128 | # ACPI (Advanced Configuration and Power Interface) Support |
130 | # | 129 | # |
131 | CONFIG_ACPI_BOOT=y | ||
132 | CONFIG_ACPI_INTERPRETER=y | ||
133 | CONFIG_ACPI_BUTTON=m | 130 | CONFIG_ACPI_BUTTON=m |
134 | # CONFIG_ACPI_VIDEO is not set | ||
135 | # CONFIG_ACPI_HOTKEY is not set | ||
136 | CONFIG_ACPI_FAN=m | 131 | CONFIG_ACPI_FAN=m |
137 | CONFIG_ACPI_PROCESSOR=m | 132 | CONFIG_ACPI_PROCESSOR=m |
138 | # CONFIG_ACPI_HOTPLUG_CPU is not set | 133 | # CONFIG_ACPI_HOTPLUG_CPU is not set |
139 | CONFIG_ACPI_THERMAL=m | 134 | CONFIG_ACPI_THERMAL=m |
140 | CONFIG_ACPI_BLACKLIST_YEAR=0 | ||
141 | # CONFIG_ACPI_DEBUG is not set | 135 | # CONFIG_ACPI_DEBUG is not set |
142 | CONFIG_ACPI_BUS=y | ||
143 | CONFIG_ACPI_POWER=y | 136 | CONFIG_ACPI_POWER=y |
144 | CONFIG_ACPI_PCI=y | ||
145 | CONFIG_ACPI_SYSTEM=y | 137 | CONFIG_ACPI_SYSTEM=y |
146 | # CONFIG_ACPI_CONTAINER is not set | 138 | # CONFIG_ACPI_CONTAINER is not set |
147 | 139 | ||
diff --git a/arch/ia64/configs/zx1_defconfig b/arch/ia64/configs/zx1_defconfig index 88e8867fa8e8..80b0e9eb7fb3 100644 --- a/arch/ia64/configs/zx1_defconfig +++ b/arch/ia64/configs/zx1_defconfig | |||
@@ -109,7 +109,6 @@ CONFIG_COMPAT=y | |||
109 | CONFIG_IA64_MCA_RECOVERY=y | 109 | CONFIG_IA64_MCA_RECOVERY=y |
110 | CONFIG_PERFMON=y | 110 | CONFIG_PERFMON=y |
111 | CONFIG_IA64_PALINFO=y | 111 | CONFIG_IA64_PALINFO=y |
112 | CONFIG_ACPI_DEALLOCATE_IRQ=y | ||
113 | 112 | ||
114 | # | 113 | # |
115 | # Firmware Drivers | 114 | # Firmware Drivers |
@@ -128,19 +127,12 @@ CONFIG_ACPI=y | |||
128 | # | 127 | # |
129 | # ACPI (Advanced Configuration and Power Interface) Support | 128 | # ACPI (Advanced Configuration and Power Interface) Support |
130 | # | 129 | # |
131 | CONFIG_ACPI_BOOT=y | ||
132 | CONFIG_ACPI_INTERPRETER=y | ||
133 | CONFIG_ACPI_BUTTON=y | 130 | CONFIG_ACPI_BUTTON=y |
134 | CONFIG_ACPI_VIDEO=m | ||
135 | CONFIG_ACPI_HOTKEY=m | ||
136 | CONFIG_ACPI_FAN=y | 131 | CONFIG_ACPI_FAN=y |
137 | CONFIG_ACPI_PROCESSOR=y | 132 | CONFIG_ACPI_PROCESSOR=y |
138 | CONFIG_ACPI_THERMAL=y | 133 | CONFIG_ACPI_THERMAL=y |
139 | CONFIG_ACPI_BLACKLIST_YEAR=0 | ||
140 | # CONFIG_ACPI_DEBUG is not set | 134 | # CONFIG_ACPI_DEBUG is not set |
141 | CONFIG_ACPI_BUS=y | ||
142 | CONFIG_ACPI_POWER=y | 135 | CONFIG_ACPI_POWER=y |
143 | CONFIG_ACPI_PCI=y | ||
144 | CONFIG_ACPI_SYSTEM=y | 136 | CONFIG_ACPI_SYSTEM=y |
145 | # CONFIG_ACPI_CONTAINER is not set | 137 | # CONFIG_ACPI_CONTAINER is not set |
146 | 138 | ||
diff --git a/arch/ia64/defconfig b/arch/ia64/defconfig index 8444add76380..5da208115ea1 100644 --- a/arch/ia64/defconfig +++ b/arch/ia64/defconfig | |||
@@ -99,7 +99,6 @@ CONFIG_COMPAT=y | |||
99 | CONFIG_IA64_MCA_RECOVERY=y | 99 | CONFIG_IA64_MCA_RECOVERY=y |
100 | CONFIG_PERFMON=y | 100 | CONFIG_PERFMON=y |
101 | CONFIG_IA64_PALINFO=y | 101 | CONFIG_IA64_PALINFO=y |
102 | CONFIG_ACPI_DEALLOCATE_IRQ=y | ||
103 | 102 | ||
104 | # | 103 | # |
105 | # Firmware Drivers | 104 | # Firmware Drivers |
@@ -118,20 +117,14 @@ CONFIG_ACPI=y | |||
118 | # | 117 | # |
119 | # ACPI (Advanced Configuration and Power Interface) Support | 118 | # ACPI (Advanced Configuration and Power Interface) Support |
120 | # | 119 | # |
121 | CONFIG_ACPI_BOOT=y | ||
122 | CONFIG_ACPI_INTERPRETER=y | ||
123 | CONFIG_ACPI_BUTTON=m | 120 | CONFIG_ACPI_BUTTON=m |
124 | CONFIG_ACPI_VIDEO=m | ||
125 | CONFIG_ACPI_FAN=m | 121 | CONFIG_ACPI_FAN=m |
126 | CONFIG_ACPI_PROCESSOR=m | 122 | CONFIG_ACPI_PROCESSOR=m |
127 | CONFIG_ACPI_HOTPLUG_CPU=y | 123 | CONFIG_ACPI_HOTPLUG_CPU=y |
128 | CONFIG_ACPI_THERMAL=m | 124 | CONFIG_ACPI_THERMAL=m |
129 | CONFIG_ACPI_NUMA=y | 125 | CONFIG_ACPI_NUMA=y |
130 | CONFIG_ACPI_BLACKLIST_YEAR=0 | ||
131 | # CONFIG_ACPI_DEBUG is not set | 126 | # CONFIG_ACPI_DEBUG is not set |
132 | CONFIG_ACPI_BUS=y | ||
133 | CONFIG_ACPI_POWER=y | 127 | CONFIG_ACPI_POWER=y |
134 | CONFIG_ACPI_PCI=y | ||
135 | CONFIG_ACPI_SYSTEM=y | 128 | CONFIG_ACPI_SYSTEM=y |
136 | CONFIG_ACPI_CONTAINER=m | 129 | CONFIG_ACPI_CONTAINER=m |
137 | 130 | ||
@@ -341,7 +334,7 @@ CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16 | |||
341 | CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64 | 334 | CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64 |
342 | # CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set | 335 | # CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set |
343 | # CONFIG_SCSI_IPR is not set | 336 | # CONFIG_SCSI_IPR is not set |
344 | CONFIG_SCSI_QLOGIC_FC=y | 337 | # CONFIG_SCSI_QLOGIC_FC is not set |
345 | # CONFIG_SCSI_QLOGIC_FC_FIRMWARE is not set | 338 | # CONFIG_SCSI_QLOGIC_FC_FIRMWARE is not set |
346 | CONFIG_SCSI_QLOGIC_1280=y | 339 | CONFIG_SCSI_QLOGIC_1280=y |
347 | # CONFIG_SCSI_QLOGIC_1280_1040 is not set | 340 | # CONFIG_SCSI_QLOGIC_1280_1040 is not set |
diff --git a/arch/ia64/hp/sim/boot/boot_head.S b/arch/ia64/hp/sim/boot/boot_head.S index 1c8c7e6a9a5e..a9bd71ac78e2 100644 --- a/arch/ia64/hp/sim/boot/boot_head.S +++ b/arch/ia64/hp/sim/boot/boot_head.S | |||
@@ -4,6 +4,7 @@ | |||
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <asm/asmmacro.h> | 6 | #include <asm/asmmacro.h> |
7 | #include <asm/pal.h> | ||
7 | 8 | ||
8 | .bss | 9 | .bss |
9 | .align 16 | 10 | .align 16 |
@@ -49,7 +50,11 @@ GLOBAL_ENTRY(jmp_to_kernel) | |||
49 | br.sptk.few b7 | 50 | br.sptk.few b7 |
50 | END(jmp_to_kernel) | 51 | END(jmp_to_kernel) |
51 | 52 | ||
52 | 53 | /* | |
54 | * r28 contains the index of the PAL function | ||
55 | * r29--31 the args | ||
56 | * Return values in ret0--3 (r8--11) | ||
57 | */ | ||
53 | GLOBAL_ENTRY(pal_emulator_static) | 58 | GLOBAL_ENTRY(pal_emulator_static) |
54 | mov r8=-1 | 59 | mov r8=-1 |
55 | mov r9=256 | 60 | mov r9=256 |
@@ -62,7 +67,7 @@ GLOBAL_ENTRY(pal_emulator_static) | |||
62 | cmp.gtu p6,p7=r9,r28 | 67 | cmp.gtu p6,p7=r9,r28 |
63 | (p6) br.cond.sptk.few stacked | 68 | (p6) br.cond.sptk.few stacked |
64 | ;; | 69 | ;; |
65 | static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */ | 70 | static: cmp.eq p6,p7=PAL_PTCE_INFO,r28 |
66 | (p7) br.cond.sptk.few 1f | 71 | (p7) br.cond.sptk.few 1f |
67 | ;; | 72 | ;; |
68 | mov r8=0 /* status = 0 */ | 73 | mov r8=0 /* status = 0 */ |
@@ -70,21 +75,21 @@ static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */ | |||
70 | movl r10=0x0000000200000003 /* count[0], count[1] */ | 75 | movl r10=0x0000000200000003 /* count[0], count[1] */ |
71 | movl r11=0x1000000000002000 /* stride[0], stride[1] */ | 76 | movl r11=0x1000000000002000 /* stride[0], stride[1] */ |
72 | br.cond.sptk.few rp | 77 | br.cond.sptk.few rp |
73 | 1: cmp.eq p6,p7=14,r28 /* PAL_FREQ_RATIOS */ | 78 | 1: cmp.eq p6,p7=PAL_FREQ_RATIOS,r28 |
74 | (p7) br.cond.sptk.few 1f | 79 | (p7) br.cond.sptk.few 1f |
75 | mov r8=0 /* status = 0 */ | 80 | mov r8=0 /* status = 0 */ |
76 | movl r9 =0x100000064 /* proc_ratio (1/100) */ | 81 | movl r9 =0x100000064 /* proc_ratio (1/100) */ |
77 | movl r10=0x100000100 /* bus_ratio<<32 (1/256) */ | 82 | movl r10=0x100000100 /* bus_ratio<<32 (1/256) */ |
78 | movl r11=0x100000064 /* itc_ratio<<32 (1/100) */ | 83 | movl r11=0x100000064 /* itc_ratio<<32 (1/100) */ |
79 | ;; | 84 | ;; |
80 | 1: cmp.eq p6,p7=19,r28 /* PAL_RSE_INFO */ | 85 | 1: cmp.eq p6,p7=PAL_RSE_INFO,r28 |
81 | (p7) br.cond.sptk.few 1f | 86 | (p7) br.cond.sptk.few 1f |
82 | mov r8=0 /* status = 0 */ | 87 | mov r8=0 /* status = 0 */ |
83 | mov r9=96 /* num phys stacked */ | 88 | mov r9=96 /* num phys stacked */ |
84 | mov r10=0 /* hints */ | 89 | mov r10=0 /* hints */ |
85 | mov r11=0 | 90 | mov r11=0 |
86 | br.cond.sptk.few rp | 91 | br.cond.sptk.few rp |
87 | 1: cmp.eq p6,p7=1,r28 /* PAL_CACHE_FLUSH */ | 92 | 1: cmp.eq p6,p7=PAL_CACHE_FLUSH,r28 /* PAL_CACHE_FLUSH */ |
88 | (p7) br.cond.sptk.few 1f | 93 | (p7) br.cond.sptk.few 1f |
89 | mov r9=ar.lc | 94 | mov r9=ar.lc |
90 | movl r8=524288 /* flush 512k million cache lines (16MB) */ | 95 | movl r8=524288 /* flush 512k million cache lines (16MB) */ |
@@ -102,7 +107,7 @@ static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */ | |||
102 | mov ar.lc=r9 | 107 | mov ar.lc=r9 |
103 | mov r8=r0 | 108 | mov r8=r0 |
104 | ;; | 109 | ;; |
105 | 1: cmp.eq p6,p7=15,r28 /* PAL_PERF_MON_INFO */ | 110 | 1: cmp.eq p6,p7=PAL_PERF_MON_INFO,r28 |
106 | (p7) br.cond.sptk.few 1f | 111 | (p7) br.cond.sptk.few 1f |
107 | mov r8=0 /* status = 0 */ | 112 | mov r8=0 /* status = 0 */ |
108 | movl r9 =0x08122f04 /* generic=4 width=47 retired=8 cycles=18 */ | 113 | movl r9 =0x08122f04 /* generic=4 width=47 retired=8 cycles=18 */ |
@@ -138,6 +143,20 @@ static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */ | |||
138 | st8 [r29]=r0,16 /* clear remaining bits */ | 143 | st8 [r29]=r0,16 /* clear remaining bits */ |
139 | st8 [r18]=r0,16 /* clear remaining bits */ | 144 | st8 [r18]=r0,16 /* clear remaining bits */ |
140 | ;; | 145 | ;; |
146 | 1: cmp.eq p6,p7=PAL_VM_SUMMARY,r28 | ||
147 | (p7) br.cond.sptk.few 1f | ||
148 | mov r8=0 /* status = 0 */ | ||
149 | movl r9=0x2044040020F1865 /* num_tc_levels=2, num_unique_tcs=4 */ | ||
150 | /* max_itr_entry=64, max_dtr_entry=64 */ | ||
151 | /* hash_tag_id=2, max_pkr=15 */ | ||
152 | /* key_size=24, phys_add_size=50, vw=1 */ | ||
153 | movl r10=0x183C /* rid_size=24, impl_va_msb=60 */ | ||
154 | ;; | ||
155 | 1: cmp.eq p6,p7=PAL_MEM_ATTRIB,r28 | ||
156 | (p7) br.cond.sptk.few 1f | ||
157 | mov r8=0 /* status = 0 */ | ||
158 | mov r9=0x80|0x01 /* NatPage|WB */ | ||
159 | ;; | ||
141 | 1: br.cond.sptk.few rp | 160 | 1: br.cond.sptk.few rp |
142 | stacked: | 161 | stacked: |
143 | br.ret.sptk.few rp | 162 | br.ret.sptk.few rp |
diff --git a/arch/ia64/hp/sim/boot/fw-emu.c b/arch/ia64/hp/sim/boot/fw-emu.c index 5c46928e3dc6..30fdfb1d0a53 100644 --- a/arch/ia64/hp/sim/boot/fw-emu.c +++ b/arch/ia64/hp/sim/boot/fw-emu.c | |||
@@ -237,17 +237,6 @@ sal_emulator (long index, unsigned long in1, unsigned long in2, | |||
237 | return ((struct sal_ret_values) {status, r9, r10, r11}); | 237 | return ((struct sal_ret_values) {status, r9, r10, r11}); |
238 | } | 238 | } |
239 | 239 | ||
240 | |||
241 | /* | ||
242 | * This is here to work around a bug in egcs-1.1.1b that causes the | ||
243 | * compiler to crash (seems like a bug in the new alias analysis code. | ||
244 | */ | ||
245 | void * | ||
246 | id (long addr) | ||
247 | { | ||
248 | return (void *) addr; | ||
249 | } | ||
250 | |||
251 | struct ia64_boot_param * | 240 | struct ia64_boot_param * |
252 | sys_fw_init (const char *args, int arglen) | 241 | sys_fw_init (const char *args, int arglen) |
253 | { | 242 | { |
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c index 7dcb8582ae0d..b42ec37be51c 100644 --- a/arch/ia64/hp/sim/simserial.c +++ b/arch/ia64/hp/sim/simserial.c | |||
@@ -130,7 +130,7 @@ static void rs_stop(struct tty_struct *tty) | |||
130 | 130 | ||
131 | static void rs_start(struct tty_struct *tty) | 131 | static void rs_start(struct tty_struct *tty) |
132 | { | 132 | { |
133 | #if SIMSERIAL_DEBUG | 133 | #ifdef SIMSERIAL_DEBUG |
134 | printk("rs_start: tty->stopped=%d tty->hw_stopped=%d tty->flow_stopped=%d\n", | 134 | printk("rs_start: tty->stopped=%d tty->hw_stopped=%d tty->flow_stopped=%d\n", |
135 | tty->stopped, tty->hw_stopped, tty->flow_stopped); | 135 | tty->stopped, tty->hw_stopped, tty->flow_stopped); |
136 | #endif | 136 | #endif |
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S index 829a6d80711c..494fad6bf376 100644 --- a/arch/ia64/ia32/ia32_entry.S +++ b/arch/ia64/ia32/ia32_entry.S | |||
@@ -1,6 +1,6 @@ | |||
1 | #include <asm/asmmacro.h> | 1 | #include <asm/asmmacro.h> |
2 | #include <asm/ia32.h> | 2 | #include <asm/ia32.h> |
3 | #include <asm/offsets.h> | 3 | #include <asm/asm-offsets.h> |
4 | #include <asm/signal.h> | 4 | #include <asm/signal.h> |
5 | #include <asm/thread_info.h> | 5 | #include <asm/thread_info.h> |
6 | 6 | ||
@@ -215,7 +215,7 @@ ia32_syscall_table: | |||
215 | data8 sys32_fork | 215 | data8 sys32_fork |
216 | data8 sys_read | 216 | data8 sys_read |
217 | data8 sys_write | 217 | data8 sys_write |
218 | data8 sys32_open /* 5 */ | 218 | data8 compat_sys_open /* 5 */ |
219 | data8 sys_close | 219 | data8 sys_close |
220 | data8 sys32_waitpid | 220 | data8 sys32_waitpid |
221 | data8 sys_creat | 221 | data8 sys_creat |
diff --git a/arch/ia64/ia32/ia32_signal.c b/arch/ia64/ia32/ia32_signal.c index ebb89be2aa2d..aa891c9bc9b6 100644 --- a/arch/ia64/ia32/ia32_signal.c +++ b/arch/ia64/ia32/ia32_signal.c | |||
@@ -29,7 +29,6 @@ | |||
29 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
30 | #include <asm/rse.h> | 30 | #include <asm/rse.h> |
31 | #include <asm/sigcontext.h> | 31 | #include <asm/sigcontext.h> |
32 | #include <asm/segment.h> | ||
33 | 32 | ||
34 | #include "ia32priv.h" | 33 | #include "ia32priv.h" |
35 | 34 | ||
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index c1e20d65dd6c..e29a8a55486a 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c | |||
@@ -2359,37 +2359,6 @@ sys32_brk (unsigned int brk) | |||
2359 | return ret; | 2359 | return ret; |
2360 | } | 2360 | } |
2361 | 2361 | ||
2362 | /* | ||
2363 | * Exactly like fs/open.c:sys_open(), except that it doesn't set the O_LARGEFILE flag. | ||
2364 | */ | ||
2365 | asmlinkage long | ||
2366 | sys32_open (const char __user * filename, int flags, int mode) | ||
2367 | { | ||
2368 | char * tmp; | ||
2369 | int fd, error; | ||
2370 | |||
2371 | tmp = getname(filename); | ||
2372 | fd = PTR_ERR(tmp); | ||
2373 | if (!IS_ERR(tmp)) { | ||
2374 | fd = get_unused_fd(); | ||
2375 | if (fd >= 0) { | ||
2376 | struct file *f = filp_open(tmp, flags, mode); | ||
2377 | error = PTR_ERR(f); | ||
2378 | if (IS_ERR(f)) | ||
2379 | goto out_error; | ||
2380 | fd_install(fd, f); | ||
2381 | } | ||
2382 | out: | ||
2383 | putname(tmp); | ||
2384 | } | ||
2385 | return fd; | ||
2386 | |||
2387 | out_error: | ||
2388 | put_unused_fd(fd); | ||
2389 | fd = error; | ||
2390 | goto out; | ||
2391 | } | ||
2392 | |||
2393 | /* Structure for ia32 emulation on ia64 */ | 2362 | /* Structure for ia32 emulation on ia64 */ |
2394 | struct epoll_event32 | 2363 | struct epoll_event32 |
2395 | { | 2364 | { |
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index e1fb68ddec26..307514f7a282 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile | |||
@@ -16,10 +16,11 @@ obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += acpi-ext.o | |||
16 | obj-$(CONFIG_IA64_PALINFO) += palinfo.o | 16 | obj-$(CONFIG_IA64_PALINFO) += palinfo.o |
17 | obj-$(CONFIG_IOSAPIC) += iosapic.o | 17 | obj-$(CONFIG_IOSAPIC) += iosapic.o |
18 | obj-$(CONFIG_MODULES) += module.o | 18 | obj-$(CONFIG_MODULES) += module.o |
19 | obj-$(CONFIG_SMP) += smp.o smpboot.o domain.o | 19 | obj-$(CONFIG_SMP) += smp.o smpboot.o |
20 | obj-$(CONFIG_NUMA) += numa.o | 20 | obj-$(CONFIG_NUMA) += numa.o |
21 | obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o | 21 | obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o |
22 | obj-$(CONFIG_IA64_CYCLONE) += cyclone.o | 22 | obj-$(CONFIG_IA64_CYCLONE) += cyclone.o |
23 | obj-$(CONFIG_CPU_FREQ) += cpufreq/ | ||
23 | obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o | 24 | obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o |
24 | obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o | 25 | obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o |
25 | obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o | 26 | obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o |
diff --git a/arch/ia64/kernel/acpi-ext.c b/arch/ia64/kernel/acpi-ext.c index 2623df5e2633..13a5b3b49bf8 100644 --- a/arch/ia64/kernel/acpi-ext.c +++ b/arch/ia64/kernel/acpi-ext.c | |||
@@ -17,20 +17,20 @@ | |||
17 | #include <asm/acpi-ext.h> | 17 | #include <asm/acpi-ext.h> |
18 | 18 | ||
19 | struct acpi_vendor_descriptor { | 19 | struct acpi_vendor_descriptor { |
20 | u8 guid_id; | 20 | u8 guid_id; |
21 | efi_guid_t guid; | 21 | efi_guid_t guid; |
22 | }; | 22 | }; |
23 | 23 | ||
24 | struct acpi_vendor_info { | 24 | struct acpi_vendor_info { |
25 | struct acpi_vendor_descriptor *descriptor; | 25 | struct acpi_vendor_descriptor *descriptor; |
26 | u8 *data; | 26 | u8 *data; |
27 | u32 length; | 27 | u32 length; |
28 | }; | 28 | }; |
29 | 29 | ||
30 | acpi_status | 30 | acpi_status |
31 | acpi_vendor_resource_match(struct acpi_resource *resource, void *context) | 31 | acpi_vendor_resource_match(struct acpi_resource *resource, void *context) |
32 | { | 32 | { |
33 | struct acpi_vendor_info *info = (struct acpi_vendor_info *) context; | 33 | struct acpi_vendor_info *info = (struct acpi_vendor_info *)context; |
34 | struct acpi_resource_vendor *vendor; | 34 | struct acpi_resource_vendor *vendor; |
35 | struct acpi_vendor_descriptor *descriptor; | 35 | struct acpi_vendor_descriptor *descriptor; |
36 | u32 length; | 36 | u32 length; |
@@ -38,8 +38,8 @@ acpi_vendor_resource_match(struct acpi_resource *resource, void *context) | |||
38 | if (resource->id != ACPI_RSTYPE_VENDOR) | 38 | if (resource->id != ACPI_RSTYPE_VENDOR) |
39 | return AE_OK; | 39 | return AE_OK; |
40 | 40 | ||
41 | vendor = (struct acpi_resource_vendor *) &resource->data; | 41 | vendor = (struct acpi_resource_vendor *)&resource->data; |
42 | descriptor = (struct acpi_vendor_descriptor *) vendor->reserved; | 42 | descriptor = (struct acpi_vendor_descriptor *)vendor->reserved; |
43 | if (vendor->length <= sizeof(*info->descriptor) || | 43 | if (vendor->length <= sizeof(*info->descriptor) || |
44 | descriptor->guid_id != info->descriptor->guid_id || | 44 | descriptor->guid_id != info->descriptor->guid_id || |
45 | efi_guidcmp(descriptor->guid, info->descriptor->guid)) | 45 | efi_guidcmp(descriptor->guid, info->descriptor->guid)) |
@@ -50,21 +50,24 @@ acpi_vendor_resource_match(struct acpi_resource *resource, void *context) | |||
50 | if (!info->data) | 50 | if (!info->data) |
51 | return AE_NO_MEMORY; | 51 | return AE_NO_MEMORY; |
52 | 52 | ||
53 | memcpy(info->data, vendor->reserved + sizeof(struct acpi_vendor_descriptor), length); | 53 | memcpy(info->data, |
54 | vendor->reserved + sizeof(struct acpi_vendor_descriptor), | ||
55 | length); | ||
54 | info->length = length; | 56 | info->length = length; |
55 | return AE_CTRL_TERMINATE; | 57 | return AE_CTRL_TERMINATE; |
56 | } | 58 | } |
57 | 59 | ||
58 | acpi_status | 60 | acpi_status |
59 | acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor *id, | 61 | acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor * id, |
60 | u8 **data, u32 *length) | 62 | u8 ** data, u32 * length) |
61 | { | 63 | { |
62 | struct acpi_vendor_info info; | 64 | struct acpi_vendor_info info; |
63 | 65 | ||
64 | info.descriptor = id; | 66 | info.descriptor = id; |
65 | info.data = NULL; | 67 | info.data = NULL; |
66 | 68 | ||
67 | acpi_walk_resources(obj, METHOD_NAME__CRS, acpi_vendor_resource_match, &info); | 69 | acpi_walk_resources(obj, METHOD_NAME__CRS, acpi_vendor_resource_match, |
70 | &info); | ||
68 | if (!info.data) | 71 | if (!info.data) |
69 | return AE_NOT_FOUND; | 72 | return AE_NOT_FOUND; |
70 | 73 | ||
@@ -75,17 +78,19 @@ acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor *id, | |||
75 | 78 | ||
76 | struct acpi_vendor_descriptor hp_ccsr_descriptor = { | 79 | struct acpi_vendor_descriptor hp_ccsr_descriptor = { |
77 | .guid_id = 2, | 80 | .guid_id = 2, |
78 | .guid = EFI_GUID(0x69e9adf9, 0x924f, 0xab5f, 0xf6, 0x4a, 0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad) | 81 | .guid = |
82 | EFI_GUID(0x69e9adf9, 0x924f, 0xab5f, 0xf6, 0x4a, 0x24, 0xd2, 0x01, | ||
83 | 0x37, 0x0e, 0xad) | ||
79 | }; | 84 | }; |
80 | 85 | ||
81 | acpi_status | 86 | acpi_status hp_acpi_csr_space(acpi_handle obj, u64 * csr_base, u64 * csr_length) |
82 | hp_acpi_csr_space(acpi_handle obj, u64 *csr_base, u64 *csr_length) | ||
83 | { | 87 | { |
84 | acpi_status status; | 88 | acpi_status status; |
85 | u8 *data; | 89 | u8 *data; |
86 | u32 length; | 90 | u32 length; |
87 | 91 | ||
88 | status = acpi_find_vendor_resource(obj, &hp_ccsr_descriptor, &data, &length); | 92 | status = |
93 | acpi_find_vendor_resource(obj, &hp_ccsr_descriptor, &data, &length); | ||
89 | 94 | ||
90 | if (ACPI_FAILURE(status) || length != 16) | 95 | if (ACPI_FAILURE(status) || length != 16) |
91 | return AE_NOT_FOUND; | 96 | return AE_NOT_FOUND; |
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 9609f243e5d0..28a4529fdd60 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -74,12 +74,11 @@ unsigned int acpi_cpei_override; | |||
74 | unsigned int acpi_cpei_phys_cpuid; | 74 | unsigned int acpi_cpei_phys_cpuid; |
75 | 75 | ||
76 | #define MAX_SAPICS 256 | 76 | #define MAX_SAPICS 256 |
77 | u16 ia64_acpiid_to_sapicid[MAX_SAPICS] = | 77 | u16 ia64_acpiid_to_sapicid[MAX_SAPICS] = {[0 ... MAX_SAPICS - 1] = -1 }; |
78 | { [0 ... MAX_SAPICS - 1] = -1 }; | 78 | |
79 | EXPORT_SYMBOL(ia64_acpiid_to_sapicid); | 79 | EXPORT_SYMBOL(ia64_acpiid_to_sapicid); |
80 | 80 | ||
81 | const char * | 81 | const char *acpi_get_sysname(void) |
82 | acpi_get_sysname (void) | ||
83 | { | 82 | { |
84 | #ifdef CONFIG_IA64_GENERIC | 83 | #ifdef CONFIG_IA64_GENERIC |
85 | unsigned long rsdp_phys; | 84 | unsigned long rsdp_phys; |
@@ -89,27 +88,29 @@ acpi_get_sysname (void) | |||
89 | 88 | ||
90 | rsdp_phys = acpi_find_rsdp(); | 89 | rsdp_phys = acpi_find_rsdp(); |
91 | if (!rsdp_phys) { | 90 | if (!rsdp_phys) { |
92 | printk(KERN_ERR "ACPI 2.0 RSDP not found, default to \"dig\"\n"); | 91 | printk(KERN_ERR |
92 | "ACPI 2.0 RSDP not found, default to \"dig\"\n"); | ||
93 | return "dig"; | 93 | return "dig"; |
94 | } | 94 | } |
95 | 95 | ||
96 | rsdp = (struct acpi20_table_rsdp *) __va(rsdp_phys); | 96 | rsdp = (struct acpi20_table_rsdp *)__va(rsdp_phys); |
97 | if (strncmp(rsdp->signature, RSDP_SIG, sizeof(RSDP_SIG) - 1)) { | 97 | if (strncmp(rsdp->signature, RSDP_SIG, sizeof(RSDP_SIG) - 1)) { |
98 | printk(KERN_ERR "ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n"); | 98 | printk(KERN_ERR |
99 | "ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n"); | ||
99 | return "dig"; | 100 | return "dig"; |
100 | } | 101 | } |
101 | 102 | ||
102 | xsdt = (struct acpi_table_xsdt *) __va(rsdp->xsdt_address); | 103 | xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_address); |
103 | hdr = &xsdt->header; | 104 | hdr = &xsdt->header; |
104 | if (strncmp(hdr->signature, XSDT_SIG, sizeof(XSDT_SIG) - 1)) { | 105 | if (strncmp(hdr->signature, XSDT_SIG, sizeof(XSDT_SIG) - 1)) { |
105 | printk(KERN_ERR "ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n"); | 106 | printk(KERN_ERR |
107 | "ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n"); | ||
106 | return "dig"; | 108 | return "dig"; |
107 | } | 109 | } |
108 | 110 | ||
109 | if (!strcmp(hdr->oem_id, "HP")) { | 111 | if (!strcmp(hdr->oem_id, "HP")) { |
110 | return "hpzx1"; | 112 | return "hpzx1"; |
111 | } | 113 | } else if (!strcmp(hdr->oem_id, "SGI")) { |
112 | else if (!strcmp(hdr->oem_id, "SGI")) { | ||
113 | return "sn2"; | 114 | return "sn2"; |
114 | } | 115 | } |
115 | 116 | ||
@@ -131,7 +132,7 @@ acpi_get_sysname (void) | |||
131 | #endif | 132 | #endif |
132 | } | 133 | } |
133 | 134 | ||
134 | #ifdef CONFIG_ACPI_BOOT | 135 | #ifdef CONFIG_ACPI |
135 | 136 | ||
136 | #define ACPI_MAX_PLATFORM_INTERRUPTS 256 | 137 | #define ACPI_MAX_PLATFORM_INTERRUPTS 256 |
137 | 138 | ||
@@ -146,8 +147,7 @@ enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_IOSAPIC; | |||
146 | * Interrupt routing API for device drivers. Provides interrupt vector for | 147 | * Interrupt routing API for device drivers. Provides interrupt vector for |
147 | * a generic platform event. Currently only CPEI is implemented. | 148 | * a generic platform event. Currently only CPEI is implemented. |
148 | */ | 149 | */ |
149 | int | 150 | int acpi_request_vector(u32 int_type) |
150 | acpi_request_vector (u32 int_type) | ||
151 | { | 151 | { |
152 | int vector = -1; | 152 | int vector = -1; |
153 | 153 | ||
@@ -155,12 +155,12 @@ acpi_request_vector (u32 int_type) | |||
155 | /* corrected platform error interrupt */ | 155 | /* corrected platform error interrupt */ |
156 | vector = platform_intr_list[int_type]; | 156 | vector = platform_intr_list[int_type]; |
157 | } else | 157 | } else |
158 | printk(KERN_ERR "acpi_request_vector(): invalid interrupt type\n"); | 158 | printk(KERN_ERR |
159 | "acpi_request_vector(): invalid interrupt type\n"); | ||
159 | return vector; | 160 | return vector; |
160 | } | 161 | } |
161 | 162 | ||
162 | char * | 163 | char *__acpi_map_table(unsigned long phys_addr, unsigned long size) |
163 | __acpi_map_table (unsigned long phys_addr, unsigned long size) | ||
164 | { | 164 | { |
165 | return __va(phys_addr); | 165 | return __va(phys_addr); |
166 | } | 166 | } |
@@ -169,19 +169,18 @@ __acpi_map_table (unsigned long phys_addr, unsigned long size) | |||
169 | Boot-time Table Parsing | 169 | Boot-time Table Parsing |
170 | -------------------------------------------------------------------------- */ | 170 | -------------------------------------------------------------------------- */ |
171 | 171 | ||
172 | static int total_cpus __initdata; | 172 | static int total_cpus __initdata; |
173 | static int available_cpus __initdata; | 173 | static int available_cpus __initdata; |
174 | struct acpi_table_madt * acpi_madt __initdata; | 174 | struct acpi_table_madt *acpi_madt __initdata; |
175 | static u8 has_8259; | 175 | static u8 has_8259; |
176 | |||
177 | 176 | ||
178 | static int __init | 177 | static int __init |
179 | acpi_parse_lapic_addr_ovr ( | 178 | acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header, |
180 | acpi_table_entry_header *header, const unsigned long end) | 179 | const unsigned long end) |
181 | { | 180 | { |
182 | struct acpi_table_lapic_addr_ovr *lapic; | 181 | struct acpi_table_lapic_addr_ovr *lapic; |
183 | 182 | ||
184 | lapic = (struct acpi_table_lapic_addr_ovr *) header; | 183 | lapic = (struct acpi_table_lapic_addr_ovr *)header; |
185 | 184 | ||
186 | if (BAD_MADT_ENTRY(lapic, end)) | 185 | if (BAD_MADT_ENTRY(lapic, end)) |
187 | return -EINVAL; | 186 | return -EINVAL; |
@@ -193,22 +192,23 @@ acpi_parse_lapic_addr_ovr ( | |||
193 | return 0; | 192 | return 0; |
194 | } | 193 | } |
195 | 194 | ||
196 | |||
197 | static int __init | 195 | static int __init |
198 | acpi_parse_lsapic (acpi_table_entry_header *header, const unsigned long end) | 196 | acpi_parse_lsapic(acpi_table_entry_header * header, const unsigned long end) |
199 | { | 197 | { |
200 | struct acpi_table_lsapic *lsapic; | 198 | struct acpi_table_lsapic *lsapic; |
201 | 199 | ||
202 | lsapic = (struct acpi_table_lsapic *) header; | 200 | lsapic = (struct acpi_table_lsapic *)header; |
203 | 201 | ||
204 | if (BAD_MADT_ENTRY(lsapic, end)) | 202 | if (BAD_MADT_ENTRY(lsapic, end)) |
205 | return -EINVAL; | 203 | return -EINVAL; |
206 | 204 | ||
207 | if (lsapic->flags.enabled) { | 205 | if (lsapic->flags.enabled) { |
208 | #ifdef CONFIG_SMP | 206 | #ifdef CONFIG_SMP |
209 | smp_boot_data.cpu_phys_id[available_cpus] = (lsapic->id << 8) | lsapic->eid; | 207 | smp_boot_data.cpu_phys_id[available_cpus] = |
208 | (lsapic->id << 8) | lsapic->eid; | ||
210 | #endif | 209 | #endif |
211 | ia64_acpiid_to_sapicid[lsapic->acpi_id] = (lsapic->id << 8) | lsapic->eid; | 210 | ia64_acpiid_to_sapicid[lsapic->acpi_id] = |
211 | (lsapic->id << 8) | lsapic->eid; | ||
212 | ++available_cpus; | 212 | ++available_cpus; |
213 | } | 213 | } |
214 | 214 | ||
@@ -216,13 +216,12 @@ acpi_parse_lsapic (acpi_table_entry_header *header, const unsigned long end) | |||
216 | return 0; | 216 | return 0; |
217 | } | 217 | } |
218 | 218 | ||
219 | |||
220 | static int __init | 219 | static int __init |
221 | acpi_parse_lapic_nmi (acpi_table_entry_header *header, const unsigned long end) | 220 | acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end) |
222 | { | 221 | { |
223 | struct acpi_table_lapic_nmi *lacpi_nmi; | 222 | struct acpi_table_lapic_nmi *lacpi_nmi; |
224 | 223 | ||
225 | lacpi_nmi = (struct acpi_table_lapic_nmi*) header; | 224 | lacpi_nmi = (struct acpi_table_lapic_nmi *)header; |
226 | 225 | ||
227 | if (BAD_MADT_ENTRY(lacpi_nmi, end)) | 226 | if (BAD_MADT_ENTRY(lacpi_nmi, end)) |
228 | return -EINVAL; | 227 | return -EINVAL; |
@@ -231,13 +230,12 @@ acpi_parse_lapic_nmi (acpi_table_entry_header *header, const unsigned long end) | |||
231 | return 0; | 230 | return 0; |
232 | } | 231 | } |
233 | 232 | ||
234 | |||
235 | static int __init | 233 | static int __init |
236 | acpi_parse_iosapic (acpi_table_entry_header *header, const unsigned long end) | 234 | acpi_parse_iosapic(acpi_table_entry_header * header, const unsigned long end) |
237 | { | 235 | { |
238 | struct acpi_table_iosapic *iosapic; | 236 | struct acpi_table_iosapic *iosapic; |
239 | 237 | ||
240 | iosapic = (struct acpi_table_iosapic *) header; | 238 | iosapic = (struct acpi_table_iosapic *)header; |
241 | 239 | ||
242 | if (BAD_MADT_ENTRY(iosapic, end)) | 240 | if (BAD_MADT_ENTRY(iosapic, end)) |
243 | return -EINVAL; | 241 | return -EINVAL; |
@@ -245,15 +243,14 @@ acpi_parse_iosapic (acpi_table_entry_header *header, const unsigned long end) | |||
245 | return iosapic_init(iosapic->address, iosapic->global_irq_base); | 243 | return iosapic_init(iosapic->address, iosapic->global_irq_base); |
246 | } | 244 | } |
247 | 245 | ||
248 | |||
249 | static int __init | 246 | static int __init |
250 | acpi_parse_plat_int_src ( | 247 | acpi_parse_plat_int_src(acpi_table_entry_header * header, |
251 | acpi_table_entry_header *header, const unsigned long end) | 248 | const unsigned long end) |
252 | { | 249 | { |
253 | struct acpi_table_plat_int_src *plintsrc; | 250 | struct acpi_table_plat_int_src *plintsrc; |
254 | int vector; | 251 | int vector; |
255 | 252 | ||
256 | plintsrc = (struct acpi_table_plat_int_src *) header; | 253 | plintsrc = (struct acpi_table_plat_int_src *)header; |
257 | 254 | ||
258 | if (BAD_MADT_ENTRY(plintsrc, end)) | 255 | if (BAD_MADT_ENTRY(plintsrc, end)) |
259 | return -EINVAL; | 256 | return -EINVAL; |
@@ -267,8 +264,12 @@ acpi_parse_plat_int_src ( | |||
267 | plintsrc->iosapic_vector, | 264 | plintsrc->iosapic_vector, |
268 | plintsrc->eid, | 265 | plintsrc->eid, |
269 | plintsrc->id, | 266 | plintsrc->id, |
270 | (plintsrc->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, | 267 | (plintsrc->flags.polarity == |
271 | (plintsrc->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); | 268 | 1) ? IOSAPIC_POL_HIGH : |
269 | IOSAPIC_POL_LOW, | ||
270 | (plintsrc->flags.trigger == | ||
271 | 1) ? IOSAPIC_EDGE : | ||
272 | IOSAPIC_LEVEL); | ||
272 | 273 | ||
273 | platform_intr_list[plintsrc->type] = vector; | 274 | platform_intr_list[plintsrc->type] = vector; |
274 | if (acpi_madt_rev > 1) { | 275 | if (acpi_madt_rev > 1) { |
@@ -283,7 +284,6 @@ acpi_parse_plat_int_src ( | |||
283 | return 0; | 284 | return 0; |
284 | } | 285 | } |
285 | 286 | ||
286 | |||
287 | unsigned int can_cpei_retarget(void) | 287 | unsigned int can_cpei_retarget(void) |
288 | { | 288 | { |
289 | extern int cpe_vector; | 289 | extern int cpe_vector; |
@@ -322,29 +322,30 @@ unsigned int get_cpei_target_cpu(void) | |||
322 | } | 322 | } |
323 | 323 | ||
324 | static int __init | 324 | static int __init |
325 | acpi_parse_int_src_ovr ( | 325 | acpi_parse_int_src_ovr(acpi_table_entry_header * header, |
326 | acpi_table_entry_header *header, const unsigned long end) | 326 | const unsigned long end) |
327 | { | 327 | { |
328 | struct acpi_table_int_src_ovr *p; | 328 | struct acpi_table_int_src_ovr *p; |
329 | 329 | ||
330 | p = (struct acpi_table_int_src_ovr *) header; | 330 | p = (struct acpi_table_int_src_ovr *)header; |
331 | 331 | ||
332 | if (BAD_MADT_ENTRY(p, end)) | 332 | if (BAD_MADT_ENTRY(p, end)) |
333 | return -EINVAL; | 333 | return -EINVAL; |
334 | 334 | ||
335 | iosapic_override_isa_irq(p->bus_irq, p->global_irq, | 335 | iosapic_override_isa_irq(p->bus_irq, p->global_irq, |
336 | (p->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, | 336 | (p->flags.polarity == |
337 | (p->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); | 337 | 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, |
338 | (p->flags.trigger == | ||
339 | 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); | ||
338 | return 0; | 340 | return 0; |
339 | } | 341 | } |
340 | 342 | ||
341 | |||
342 | static int __init | 343 | static int __init |
343 | acpi_parse_nmi_src (acpi_table_entry_header *header, const unsigned long end) | 344 | acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end) |
344 | { | 345 | { |
345 | struct acpi_table_nmi_src *nmi_src; | 346 | struct acpi_table_nmi_src *nmi_src; |
346 | 347 | ||
347 | nmi_src = (struct acpi_table_nmi_src*) header; | 348 | nmi_src = (struct acpi_table_nmi_src *)header; |
348 | 349 | ||
349 | if (BAD_MADT_ENTRY(nmi_src, end)) | 350 | if (BAD_MADT_ENTRY(nmi_src, end)) |
350 | return -EINVAL; | 351 | return -EINVAL; |
@@ -353,11 +354,9 @@ acpi_parse_nmi_src (acpi_table_entry_header *header, const unsigned long end) | |||
353 | return 0; | 354 | return 0; |
354 | } | 355 | } |
355 | 356 | ||
356 | static void __init | 357 | static void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) |
357 | acpi_madt_oem_check (char *oem_id, char *oem_table_id) | ||
358 | { | 358 | { |
359 | if (!strncmp(oem_id, "IBM", 3) && | 359 | if (!strncmp(oem_id, "IBM", 3) && (!strncmp(oem_table_id, "SERMOW", 6))) { |
360 | (!strncmp(oem_table_id, "SERMOW", 6))) { | ||
361 | 360 | ||
362 | /* | 361 | /* |
363 | * Unfortunately ITC_DRIFT is not yet part of the | 362 | * Unfortunately ITC_DRIFT is not yet part of the |
@@ -370,19 +369,18 @@ acpi_madt_oem_check (char *oem_id, char *oem_table_id) | |||
370 | } | 369 | } |
371 | } | 370 | } |
372 | 371 | ||
373 | static int __init | 372 | static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size) |
374 | acpi_parse_madt (unsigned long phys_addr, unsigned long size) | ||
375 | { | 373 | { |
376 | if (!phys_addr || !size) | 374 | if (!phys_addr || !size) |
377 | return -EINVAL; | 375 | return -EINVAL; |
378 | 376 | ||
379 | acpi_madt = (struct acpi_table_madt *) __va(phys_addr); | 377 | acpi_madt = (struct acpi_table_madt *)__va(phys_addr); |
380 | 378 | ||
381 | acpi_madt_rev = acpi_madt->header.revision; | 379 | acpi_madt_rev = acpi_madt->header.revision; |
382 | 380 | ||
383 | /* remember the value for reference after free_initmem() */ | 381 | /* remember the value for reference after free_initmem() */ |
384 | #ifdef CONFIG_ITANIUM | 382 | #ifdef CONFIG_ITANIUM |
385 | has_8259 = 1; /* Firmware on old Itanium systems is broken */ | 383 | has_8259 = 1; /* Firmware on old Itanium systems is broken */ |
386 | #else | 384 | #else |
387 | has_8259 = acpi_madt->flags.pcat_compat; | 385 | has_8259 = acpi_madt->flags.pcat_compat; |
388 | #endif | 386 | #endif |
@@ -396,19 +394,18 @@ acpi_parse_madt (unsigned long phys_addr, unsigned long size) | |||
396 | printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr); | 394 | printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr); |
397 | 395 | ||
398 | acpi_madt_oem_check(acpi_madt->header.oem_id, | 396 | acpi_madt_oem_check(acpi_madt->header.oem_id, |
399 | acpi_madt->header.oem_table_id); | 397 | acpi_madt->header.oem_table_id); |
400 | 398 | ||
401 | return 0; | 399 | return 0; |
402 | } | 400 | } |
403 | 401 | ||
404 | |||
405 | #ifdef CONFIG_ACPI_NUMA | 402 | #ifdef CONFIG_ACPI_NUMA |
406 | 403 | ||
407 | #undef SLIT_DEBUG | 404 | #undef SLIT_DEBUG |
408 | 405 | ||
409 | #define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32) | 406 | #define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32) |
410 | 407 | ||
411 | static int __initdata srat_num_cpus; /* number of cpus */ | 408 | static int __initdata srat_num_cpus; /* number of cpus */ |
412 | static u32 __devinitdata pxm_flag[PXM_FLAG_LEN]; | 409 | static u32 __devinitdata pxm_flag[PXM_FLAG_LEN]; |
413 | #define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag)) | 410 | #define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag)) |
414 | #define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag)) | 411 | #define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag)) |
@@ -421,15 +418,15 @@ static struct acpi_table_slit __initdata *slit_table; | |||
421 | * ACPI 2.0 SLIT (System Locality Information Table) | 418 | * ACPI 2.0 SLIT (System Locality Information Table) |
422 | * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf | 419 | * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf |
423 | */ | 420 | */ |
424 | void __init | 421 | void __init acpi_numa_slit_init(struct acpi_table_slit *slit) |
425 | acpi_numa_slit_init (struct acpi_table_slit *slit) | ||
426 | { | 422 | { |
427 | u32 len; | 423 | u32 len; |
428 | 424 | ||
429 | len = sizeof(struct acpi_table_header) + 8 | 425 | len = sizeof(struct acpi_table_header) + 8 |
430 | + slit->localities * slit->localities; | 426 | + slit->localities * slit->localities; |
431 | if (slit->header.length != len) { | 427 | if (slit->header.length != len) { |
432 | printk(KERN_ERR "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n", | 428 | printk(KERN_ERR |
429 | "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n", | ||
433 | len, slit->header.length); | 430 | len, slit->header.length); |
434 | memset(numa_slit, 10, sizeof(numa_slit)); | 431 | memset(numa_slit, 10, sizeof(numa_slit)); |
435 | return; | 432 | return; |
@@ -438,19 +435,20 @@ acpi_numa_slit_init (struct acpi_table_slit *slit) | |||
438 | } | 435 | } |
439 | 436 | ||
440 | void __init | 437 | void __init |
441 | acpi_numa_processor_affinity_init (struct acpi_table_processor_affinity *pa) | 438 | acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa) |
442 | { | 439 | { |
443 | /* record this node in proximity bitmap */ | 440 | /* record this node in proximity bitmap */ |
444 | pxm_bit_set(pa->proximity_domain); | 441 | pxm_bit_set(pa->proximity_domain); |
445 | 442 | ||
446 | node_cpuid[srat_num_cpus].phys_id = (pa->apic_id << 8) | (pa->lsapic_eid); | 443 | node_cpuid[srat_num_cpus].phys_id = |
444 | (pa->apic_id << 8) | (pa->lsapic_eid); | ||
447 | /* nid should be overridden as logical node id later */ | 445 | /* nid should be overridden as logical node id later */ |
448 | node_cpuid[srat_num_cpus].nid = pa->proximity_domain; | 446 | node_cpuid[srat_num_cpus].nid = pa->proximity_domain; |
449 | srat_num_cpus++; | 447 | srat_num_cpus++; |
450 | } | 448 | } |
451 | 449 | ||
452 | void __init | 450 | void __init |
453 | acpi_numa_memory_affinity_init (struct acpi_table_memory_affinity *ma) | 451 | acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma) |
454 | { | 452 | { |
455 | unsigned long paddr, size; | 453 | unsigned long paddr, size; |
456 | u8 pxm; | 454 | u8 pxm; |
@@ -487,8 +485,7 @@ acpi_numa_memory_affinity_init (struct acpi_table_memory_affinity *ma) | |||
487 | num_node_memblks++; | 485 | num_node_memblks++; |
488 | } | 486 | } |
489 | 487 | ||
490 | void __init | 488 | void __init acpi_numa_arch_fixup(void) |
491 | acpi_numa_arch_fixup (void) | ||
492 | { | 489 | { |
493 | int i, j, node_from, node_to; | 490 | int i, j, node_from, node_to; |
494 | 491 | ||
@@ -534,21 +531,24 @@ acpi_numa_arch_fixup (void) | |||
534 | for (i = 0; i < srat_num_cpus; i++) | 531 | for (i = 0; i < srat_num_cpus; i++) |
535 | node_cpuid[i].nid = pxm_to_nid_map[node_cpuid[i].nid]; | 532 | node_cpuid[i].nid = pxm_to_nid_map[node_cpuid[i].nid]; |
536 | 533 | ||
537 | printk(KERN_INFO "Number of logical nodes in system = %d\n", num_online_nodes()); | 534 | printk(KERN_INFO "Number of logical nodes in system = %d\n", |
538 | printk(KERN_INFO "Number of memory chunks in system = %d\n", num_node_memblks); | 535 | num_online_nodes()); |
536 | printk(KERN_INFO "Number of memory chunks in system = %d\n", | ||
537 | num_node_memblks); | ||
539 | 538 | ||
540 | if (!slit_table) return; | 539 | if (!slit_table) |
540 | return; | ||
541 | memset(numa_slit, -1, sizeof(numa_slit)); | 541 | memset(numa_slit, -1, sizeof(numa_slit)); |
542 | for (i=0; i<slit_table->localities; i++) { | 542 | for (i = 0; i < slit_table->localities; i++) { |
543 | if (!pxm_bit_test(i)) | 543 | if (!pxm_bit_test(i)) |
544 | continue; | 544 | continue; |
545 | node_from = pxm_to_nid_map[i]; | 545 | node_from = pxm_to_nid_map[i]; |
546 | for (j=0; j<slit_table->localities; j++) { | 546 | for (j = 0; j < slit_table->localities; j++) { |
547 | if (!pxm_bit_test(j)) | 547 | if (!pxm_bit_test(j)) |
548 | continue; | 548 | continue; |
549 | node_to = pxm_to_nid_map[j]; | 549 | node_to = pxm_to_nid_map[j]; |
550 | node_distance(node_from, node_to) = | 550 | node_distance(node_from, node_to) = |
551 | slit_table->entry[i*slit_table->localities + j]; | 551 | slit_table->entry[i * slit_table->localities + j]; |
552 | } | 552 | } |
553 | } | 553 | } |
554 | 554 | ||
@@ -556,36 +556,41 @@ acpi_numa_arch_fixup (void) | |||
556 | printk("ACPI 2.0 SLIT locality table:\n"); | 556 | printk("ACPI 2.0 SLIT locality table:\n"); |
557 | for_each_online_node(i) { | 557 | for_each_online_node(i) { |
558 | for_each_online_node(j) | 558 | for_each_online_node(j) |
559 | printk("%03d ", node_distance(i,j)); | 559 | printk("%03d ", node_distance(i, j)); |
560 | printk("\n"); | 560 | printk("\n"); |
561 | } | 561 | } |
562 | #endif | 562 | #endif |
563 | } | 563 | } |
564 | #endif /* CONFIG_ACPI_NUMA */ | 564 | #endif /* CONFIG_ACPI_NUMA */ |
565 | 565 | ||
566 | unsigned int | 566 | /* |
567 | acpi_register_gsi (u32 gsi, int edge_level, int active_high_low) | 567 | * success: return IRQ number (>=0) |
568 | * failure: return < 0 | ||
569 | */ | ||
570 | int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low) | ||
568 | { | 571 | { |
569 | if (has_8259 && gsi < 16) | 572 | if (has_8259 && gsi < 16) |
570 | return isa_irq_to_vector(gsi); | 573 | return isa_irq_to_vector(gsi); |
571 | 574 | ||
572 | return iosapic_register_intr(gsi, | 575 | return iosapic_register_intr(gsi, |
573 | (active_high_low == ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, | 576 | (active_high_low == |
574 | (edge_level == ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); | 577 | ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH : |
578 | IOSAPIC_POL_LOW, | ||
579 | (edge_level == | ||
580 | ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE : | ||
581 | IOSAPIC_LEVEL); | ||
575 | } | 582 | } |
583 | |||
576 | EXPORT_SYMBOL(acpi_register_gsi); | 584 | EXPORT_SYMBOL(acpi_register_gsi); |
577 | 585 | ||
578 | #ifdef CONFIG_ACPI_DEALLOCATE_IRQ | 586 | void acpi_unregister_gsi(u32 gsi) |
579 | void | ||
580 | acpi_unregister_gsi (u32 gsi) | ||
581 | { | 587 | { |
582 | iosapic_unregister_intr(gsi); | 588 | iosapic_unregister_intr(gsi); |
583 | } | 589 | } |
590 | |||
584 | EXPORT_SYMBOL(acpi_unregister_gsi); | 591 | EXPORT_SYMBOL(acpi_unregister_gsi); |
585 | #endif /* CONFIG_ACPI_DEALLOCATE_IRQ */ | ||
586 | 592 | ||
587 | static int __init | 593 | static int __init acpi_parse_fadt(unsigned long phys_addr, unsigned long size) |
588 | acpi_parse_fadt (unsigned long phys_addr, unsigned long size) | ||
589 | { | 594 | { |
590 | struct acpi_table_header *fadt_header; | 595 | struct acpi_table_header *fadt_header; |
591 | struct fadt_descriptor_rev2 *fadt; | 596 | struct fadt_descriptor_rev2 *fadt; |
@@ -593,11 +598,11 @@ acpi_parse_fadt (unsigned long phys_addr, unsigned long size) | |||
593 | if (!phys_addr || !size) | 598 | if (!phys_addr || !size) |
594 | return -EINVAL; | 599 | return -EINVAL; |
595 | 600 | ||
596 | fadt_header = (struct acpi_table_header *) __va(phys_addr); | 601 | fadt_header = (struct acpi_table_header *)__va(phys_addr); |
597 | if (fadt_header->revision != 3) | 602 | if (fadt_header->revision != 3) |
598 | return -ENODEV; /* Only deal with ACPI 2.0 FADT */ | 603 | return -ENODEV; /* Only deal with ACPI 2.0 FADT */ |
599 | 604 | ||
600 | fadt = (struct fadt_descriptor_rev2 *) fadt_header; | 605 | fadt = (struct fadt_descriptor_rev2 *)fadt_header; |
601 | 606 | ||
602 | if (!(fadt->iapc_boot_arch & BAF_8042_KEYBOARD_CONTROLLER)) | 607 | if (!(fadt->iapc_boot_arch & BAF_8042_KEYBOARD_CONTROLLER)) |
603 | acpi_kbd_controller_present = 0; | 608 | acpi_kbd_controller_present = 0; |
@@ -609,22 +614,19 @@ acpi_parse_fadt (unsigned long phys_addr, unsigned long size) | |||
609 | return 0; | 614 | return 0; |
610 | } | 615 | } |
611 | 616 | ||
612 | 617 | unsigned long __init acpi_find_rsdp(void) | |
613 | unsigned long __init | ||
614 | acpi_find_rsdp (void) | ||
615 | { | 618 | { |
616 | unsigned long rsdp_phys = 0; | 619 | unsigned long rsdp_phys = 0; |
617 | 620 | ||
618 | if (efi.acpi20) | 621 | if (efi.acpi20) |
619 | rsdp_phys = __pa(efi.acpi20); | 622 | rsdp_phys = __pa(efi.acpi20); |
620 | else if (efi.acpi) | 623 | else if (efi.acpi) |
621 | printk(KERN_WARNING PREFIX "v1.0/r0.71 tables no longer supported\n"); | 624 | printk(KERN_WARNING PREFIX |
625 | "v1.0/r0.71 tables no longer supported\n"); | ||
622 | return rsdp_phys; | 626 | return rsdp_phys; |
623 | } | 627 | } |
624 | 628 | ||
625 | 629 | int __init acpi_boot_init(void) | |
626 | int __init | ||
627 | acpi_boot_init (void) | ||
628 | { | 630 | { |
629 | 631 | ||
630 | /* | 632 | /* |
@@ -642,31 +644,43 @@ acpi_boot_init (void) | |||
642 | 644 | ||
643 | /* Local APIC */ | 645 | /* Local APIC */ |
644 | 646 | ||
645 | if (acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr, 0) < 0) | 647 | if (acpi_table_parse_madt |
646 | printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n"); | 648 | (ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr, 0) < 0) |
649 | printk(KERN_ERR PREFIX | ||
650 | "Error parsing LAPIC address override entry\n"); | ||
647 | 651 | ||
648 | if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_parse_lsapic, NR_CPUS) < 1) | 652 | if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_parse_lsapic, NR_CPUS) |
649 | printk(KERN_ERR PREFIX "Error parsing MADT - no LAPIC entries\n"); | 653 | < 1) |
654 | printk(KERN_ERR PREFIX | ||
655 | "Error parsing MADT - no LAPIC entries\n"); | ||
650 | 656 | ||
651 | if (acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0) < 0) | 657 | if (acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0) |
658 | < 0) | ||
652 | printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); | 659 | printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); |
653 | 660 | ||
654 | /* I/O APIC */ | 661 | /* I/O APIC */ |
655 | 662 | ||
656 | if (acpi_table_parse_madt(ACPI_MADT_IOSAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1) | 663 | if (acpi_table_parse_madt |
657 | printk(KERN_ERR PREFIX "Error parsing MADT - no IOSAPIC entries\n"); | 664 | (ACPI_MADT_IOSAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1) |
665 | printk(KERN_ERR PREFIX | ||
666 | "Error parsing MADT - no IOSAPIC entries\n"); | ||
658 | 667 | ||
659 | /* System-Level Interrupt Routing */ | 668 | /* System-Level Interrupt Routing */ |
660 | 669 | ||
661 | if (acpi_table_parse_madt(ACPI_MADT_PLAT_INT_SRC, acpi_parse_plat_int_src, ACPI_MAX_PLATFORM_INTERRUPTS) < 0) | 670 | if (acpi_table_parse_madt |
662 | printk(KERN_ERR PREFIX "Error parsing platform interrupt source entry\n"); | 671 | (ACPI_MADT_PLAT_INT_SRC, acpi_parse_plat_int_src, |
672 | ACPI_MAX_PLATFORM_INTERRUPTS) < 0) | ||
673 | printk(KERN_ERR PREFIX | ||
674 | "Error parsing platform interrupt source entry\n"); | ||
663 | 675 | ||
664 | if (acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, 0) < 0) | 676 | if (acpi_table_parse_madt |
665 | printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n"); | 677 | (ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, 0) < 0) |
678 | printk(KERN_ERR PREFIX | ||
679 | "Error parsing interrupt source overrides entry\n"); | ||
666 | 680 | ||
667 | if (acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, 0) < 0) | 681 | if (acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, 0) < 0) |
668 | printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); | 682 | printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); |
669 | skip_madt: | 683 | skip_madt: |
670 | 684 | ||
671 | /* | 685 | /* |
672 | * FADT says whether a legacy keyboard controller is present. | 686 | * FADT says whether a legacy keyboard controller is present. |
@@ -681,8 +695,9 @@ acpi_boot_init (void) | |||
681 | if (available_cpus == 0) { | 695 | if (available_cpus == 0) { |
682 | printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n"); | 696 | printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n"); |
683 | printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id()); | 697 | printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id()); |
684 | smp_boot_data.cpu_phys_id[available_cpus] = hard_smp_processor_id(); | 698 | smp_boot_data.cpu_phys_id[available_cpus] = |
685 | available_cpus = 1; /* We've got at least one of these, no? */ | 699 | hard_smp_processor_id(); |
700 | available_cpus = 1; /* We've got at least one of these, no? */ | ||
686 | } | 701 | } |
687 | smp_boot_data.cpu_count = available_cpus; | 702 | smp_boot_data.cpu_count = available_cpus; |
688 | 703 | ||
@@ -691,8 +706,10 @@ acpi_boot_init (void) | |||
691 | if (srat_num_cpus == 0) { | 706 | if (srat_num_cpus == 0) { |
692 | int cpu, i = 1; | 707 | int cpu, i = 1; |
693 | for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++) | 708 | for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++) |
694 | if (smp_boot_data.cpu_phys_id[cpu] != hard_smp_processor_id()) | 709 | if (smp_boot_data.cpu_phys_id[cpu] != |
695 | node_cpuid[i++].phys_id = smp_boot_data.cpu_phys_id[cpu]; | 710 | hard_smp_processor_id()) |
711 | node_cpuid[i++].phys_id = | ||
712 | smp_boot_data.cpu_phys_id[cpu]; | ||
696 | } | 713 | } |
697 | # endif | 714 | # endif |
698 | #endif | 715 | #endif |
@@ -700,12 +717,12 @@ acpi_boot_init (void) | |||
700 | build_cpu_to_node_map(); | 717 | build_cpu_to_node_map(); |
701 | #endif | 718 | #endif |
702 | /* Make boot-up look pretty */ | 719 | /* Make boot-up look pretty */ |
703 | printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, total_cpus); | 720 | printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, |
721 | total_cpus); | ||
704 | return 0; | 722 | return 0; |
705 | } | 723 | } |
706 | 724 | ||
707 | int | 725 | int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) |
708 | acpi_gsi_to_irq (u32 gsi, unsigned int *irq) | ||
709 | { | 726 | { |
710 | int vector; | 727 | int vector; |
711 | 728 | ||
@@ -726,11 +743,10 @@ acpi_gsi_to_irq (u32 gsi, unsigned int *irq) | |||
726 | */ | 743 | */ |
727 | #ifdef CONFIG_ACPI_HOTPLUG_CPU | 744 | #ifdef CONFIG_ACPI_HOTPLUG_CPU |
728 | static | 745 | static |
729 | int | 746 | int acpi_map_cpu2node(acpi_handle handle, int cpu, long physid) |
730 | acpi_map_cpu2node(acpi_handle handle, int cpu, long physid) | ||
731 | { | 747 | { |
732 | #ifdef CONFIG_ACPI_NUMA | 748 | #ifdef CONFIG_ACPI_NUMA |
733 | int pxm_id; | 749 | int pxm_id; |
734 | 750 | ||
735 | pxm_id = acpi_get_pxm(handle); | 751 | pxm_id = acpi_get_pxm(handle); |
736 | 752 | ||
@@ -738,31 +754,28 @@ acpi_map_cpu2node(acpi_handle handle, int cpu, long physid) | |||
738 | * Assuming that the container driver would have set the proximity | 754 | * Assuming that the container driver would have set the proximity |
739 | * domain and would have initialized pxm_to_nid_map[pxm_id] && pxm_flag | 755 | * domain and would have initialized pxm_to_nid_map[pxm_id] && pxm_flag |
740 | */ | 756 | */ |
741 | node_cpuid[cpu].nid = (pxm_id < 0) ? 0: | 757 | node_cpuid[cpu].nid = (pxm_id < 0) ? 0 : pxm_to_nid_map[pxm_id]; |
742 | pxm_to_nid_map[pxm_id]; | ||
743 | 758 | ||
744 | node_cpuid[cpu].phys_id = physid; | 759 | node_cpuid[cpu].phys_id = physid; |
745 | #endif | 760 | #endif |
746 | return(0); | 761 | return (0); |
747 | } | 762 | } |
748 | 763 | ||
749 | 764 | int acpi_map_lsapic(acpi_handle handle, int *pcpu) | |
750 | int | ||
751 | acpi_map_lsapic(acpi_handle handle, int *pcpu) | ||
752 | { | 765 | { |
753 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; | 766 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
754 | union acpi_object *obj; | 767 | union acpi_object *obj; |
755 | struct acpi_table_lsapic *lsapic; | 768 | struct acpi_table_lsapic *lsapic; |
756 | cpumask_t tmp_map; | 769 | cpumask_t tmp_map; |
757 | long physid; | 770 | long physid; |
758 | int cpu; | 771 | int cpu; |
759 | 772 | ||
760 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) | 773 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) |
761 | return -EINVAL; | 774 | return -EINVAL; |
762 | 775 | ||
763 | if (!buffer.length || !buffer.pointer) | 776 | if (!buffer.length || !buffer.pointer) |
764 | return -EINVAL; | 777 | return -EINVAL; |
765 | 778 | ||
766 | obj = buffer.pointer; | 779 | obj = buffer.pointer; |
767 | if (obj->type != ACPI_TYPE_BUFFER || | 780 | if (obj->type != ACPI_TYPE_BUFFER || |
768 | obj->buffer.length < sizeof(*lsapic)) { | 781 | obj->buffer.length < sizeof(*lsapic)) { |
@@ -778,7 +791,7 @@ acpi_map_lsapic(acpi_handle handle, int *pcpu) | |||
778 | return -EINVAL; | 791 | return -EINVAL; |
779 | } | 792 | } |
780 | 793 | ||
781 | physid = ((lsapic->id <<8) | (lsapic->eid)); | 794 | physid = ((lsapic->id << 8) | (lsapic->eid)); |
782 | 795 | ||
783 | acpi_os_free(buffer.pointer); | 796 | acpi_os_free(buffer.pointer); |
784 | buffer.length = ACPI_ALLOCATE_BUFFER; | 797 | buffer.length = ACPI_ALLOCATE_BUFFER; |
@@ -786,50 +799,49 @@ acpi_map_lsapic(acpi_handle handle, int *pcpu) | |||
786 | 799 | ||
787 | cpus_complement(tmp_map, cpu_present_map); | 800 | cpus_complement(tmp_map, cpu_present_map); |
788 | cpu = first_cpu(tmp_map); | 801 | cpu = first_cpu(tmp_map); |
789 | if(cpu >= NR_CPUS) | 802 | if (cpu >= NR_CPUS) |
790 | return -EINVAL; | 803 | return -EINVAL; |
791 | 804 | ||
792 | acpi_map_cpu2node(handle, cpu, physid); | 805 | acpi_map_cpu2node(handle, cpu, physid); |
793 | 806 | ||
794 | cpu_set(cpu, cpu_present_map); | 807 | cpu_set(cpu, cpu_present_map); |
795 | ia64_cpu_to_sapicid[cpu] = physid; | 808 | ia64_cpu_to_sapicid[cpu] = physid; |
796 | ia64_acpiid_to_sapicid[lsapic->acpi_id] = ia64_cpu_to_sapicid[cpu]; | 809 | ia64_acpiid_to_sapicid[lsapic->acpi_id] = ia64_cpu_to_sapicid[cpu]; |
797 | 810 | ||
798 | *pcpu = cpu; | 811 | *pcpu = cpu; |
799 | return(0); | 812 | return (0); |
800 | } | 813 | } |
801 | EXPORT_SYMBOL(acpi_map_lsapic); | ||
802 | 814 | ||
815 | EXPORT_SYMBOL(acpi_map_lsapic); | ||
803 | 816 | ||
804 | int | 817 | int acpi_unmap_lsapic(int cpu) |
805 | acpi_unmap_lsapic(int cpu) | ||
806 | { | 818 | { |
807 | int i; | 819 | int i; |
808 | 820 | ||
809 | for (i=0; i<MAX_SAPICS; i++) { | 821 | for (i = 0; i < MAX_SAPICS; i++) { |
810 | if (ia64_acpiid_to_sapicid[i] == ia64_cpu_to_sapicid[cpu]) { | 822 | if (ia64_acpiid_to_sapicid[i] == ia64_cpu_to_sapicid[cpu]) { |
811 | ia64_acpiid_to_sapicid[i] = -1; | 823 | ia64_acpiid_to_sapicid[i] = -1; |
812 | break; | 824 | break; |
813 | } | 825 | } |
814 | } | 826 | } |
815 | ia64_cpu_to_sapicid[cpu] = -1; | 827 | ia64_cpu_to_sapicid[cpu] = -1; |
816 | cpu_clear(cpu,cpu_present_map); | 828 | cpu_clear(cpu, cpu_present_map); |
817 | 829 | ||
818 | #ifdef CONFIG_ACPI_NUMA | 830 | #ifdef CONFIG_ACPI_NUMA |
819 | /* NUMA specific cleanup's */ | 831 | /* NUMA specific cleanup's */ |
820 | #endif | 832 | #endif |
821 | 833 | ||
822 | return(0); | 834 | return (0); |
823 | } | 835 | } |
836 | |||
824 | EXPORT_SYMBOL(acpi_unmap_lsapic); | 837 | EXPORT_SYMBOL(acpi_unmap_lsapic); |
825 | #endif /* CONFIG_ACPI_HOTPLUG_CPU */ | 838 | #endif /* CONFIG_ACPI_HOTPLUG_CPU */ |
826 | |||
827 | 839 | ||
828 | #ifdef CONFIG_ACPI_NUMA | 840 | #ifdef CONFIG_ACPI_NUMA |
829 | acpi_status __devinit | 841 | acpi_status __devinit |
830 | acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret) | 842 | acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret) |
831 | { | 843 | { |
832 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; | 844 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
833 | union acpi_object *obj; | 845 | union acpi_object *obj; |
834 | struct acpi_table_iosapic *iosapic; | 846 | struct acpi_table_iosapic *iosapic; |
835 | unsigned int gsi_base; | 847 | unsigned int gsi_base; |
@@ -878,10 +890,9 @@ acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret) | |||
878 | map_iosapic_to_node(gsi_base, node); | 890 | map_iosapic_to_node(gsi_base, node); |
879 | return AE_OK; | 891 | return AE_OK; |
880 | } | 892 | } |
881 | #endif /* CONFIG_NUMA */ | 893 | #endif /* CONFIG_NUMA */ |
882 | 894 | ||
883 | int | 895 | int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) |
884 | acpi_register_ioapic (acpi_handle handle, u64 phys_addr, u32 gsi_base) | ||
885 | { | 896 | { |
886 | int err; | 897 | int err; |
887 | 898 | ||
@@ -890,17 +901,18 @@ acpi_register_ioapic (acpi_handle handle, u64 phys_addr, u32 gsi_base) | |||
890 | 901 | ||
891 | #if CONFIG_ACPI_NUMA | 902 | #if CONFIG_ACPI_NUMA |
892 | acpi_map_iosapic(handle, 0, NULL, NULL); | 903 | acpi_map_iosapic(handle, 0, NULL, NULL); |
893 | #endif /* CONFIG_ACPI_NUMA */ | 904 | #endif /* CONFIG_ACPI_NUMA */ |
894 | 905 | ||
895 | return 0; | 906 | return 0; |
896 | } | 907 | } |
908 | |||
897 | EXPORT_SYMBOL(acpi_register_ioapic); | 909 | EXPORT_SYMBOL(acpi_register_ioapic); |
898 | 910 | ||
899 | int | 911 | int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base) |
900 | acpi_unregister_ioapic (acpi_handle handle, u32 gsi_base) | ||
901 | { | 912 | { |
902 | return iosapic_remove(gsi_base); | 913 | return iosapic_remove(gsi_base); |
903 | } | 914 | } |
915 | |||
904 | EXPORT_SYMBOL(acpi_unregister_ioapic); | 916 | EXPORT_SYMBOL(acpi_unregister_ioapic); |
905 | 917 | ||
906 | #endif /* CONFIG_ACPI_BOOT */ | 918 | #endif /* CONFIG_ACPI */ |
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index 7d1ae2982c53..f6a234289341 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c | |||
@@ -211,17 +211,41 @@ void foo(void) | |||
211 | #endif | 211 | #endif |
212 | 212 | ||
213 | BLANK(); | 213 | BLANK(); |
214 | DEFINE(IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, | 214 | DEFINE(IA64_MCA_CPU_MCA_STACK_OFFSET, |
215 | offsetof (struct ia64_mca_cpu, proc_state_dump)); | 215 | offsetof (struct ia64_mca_cpu, mca_stack)); |
216 | DEFINE(IA64_MCA_CPU_STACK_OFFSET, | ||
217 | offsetof (struct ia64_mca_cpu, stack)); | ||
218 | DEFINE(IA64_MCA_CPU_STACKFRAME_OFFSET, | ||
219 | offsetof (struct ia64_mca_cpu, stackframe)); | ||
220 | DEFINE(IA64_MCA_CPU_RBSTORE_OFFSET, | ||
221 | offsetof (struct ia64_mca_cpu, rbstore)); | ||
222 | DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET, | 216 | DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET, |
223 | offsetof (struct ia64_mca_cpu, init_stack)); | 217 | offsetof (struct ia64_mca_cpu, init_stack)); |
224 | BLANK(); | 218 | BLANK(); |
219 | DEFINE(IA64_SAL_OS_STATE_COMMON_OFFSET, | ||
220 | offsetof (struct ia64_sal_os_state, sal_ra)); | ||
221 | DEFINE(IA64_SAL_OS_STATE_OS_GP_OFFSET, | ||
222 | offsetof (struct ia64_sal_os_state, os_gp)); | ||
223 | DEFINE(IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET, | ||
224 | offsetof (struct ia64_sal_os_state, pal_min_state)); | ||
225 | DEFINE(IA64_SAL_OS_STATE_PROC_STATE_PARAM_OFFSET, | ||
226 | offsetof (struct ia64_sal_os_state, proc_state_param)); | ||
227 | DEFINE(IA64_SAL_OS_STATE_SIZE, | ||
228 | sizeof (struct ia64_sal_os_state)); | ||
229 | DEFINE(IA64_PMSA_GR_OFFSET, | ||
230 | offsetof (struct pal_min_state_area_s, pmsa_gr)); | ||
231 | DEFINE(IA64_PMSA_BANK1_GR_OFFSET, | ||
232 | offsetof (struct pal_min_state_area_s, pmsa_bank1_gr)); | ||
233 | DEFINE(IA64_PMSA_PR_OFFSET, | ||
234 | offsetof (struct pal_min_state_area_s, pmsa_pr)); | ||
235 | DEFINE(IA64_PMSA_BR0_OFFSET, | ||
236 | offsetof (struct pal_min_state_area_s, pmsa_br0)); | ||
237 | DEFINE(IA64_PMSA_RSC_OFFSET, | ||
238 | offsetof (struct pal_min_state_area_s, pmsa_rsc)); | ||
239 | DEFINE(IA64_PMSA_IIP_OFFSET, | ||
240 | offsetof (struct pal_min_state_area_s, pmsa_iip)); | ||
241 | DEFINE(IA64_PMSA_IPSR_OFFSET, | ||
242 | offsetof (struct pal_min_state_area_s, pmsa_ipsr)); | ||
243 | DEFINE(IA64_PMSA_IFS_OFFSET, | ||
244 | offsetof (struct pal_min_state_area_s, pmsa_ifs)); | ||
245 | DEFINE(IA64_PMSA_XIP_OFFSET, | ||
246 | offsetof (struct pal_min_state_area_s, pmsa_xip)); | ||
247 | BLANK(); | ||
248 | |||
225 | /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */ | 249 | /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */ |
226 | DEFINE(IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET, offsetof (struct time_interpolator, addr)); | 250 | DEFINE(IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET, offsetof (struct time_interpolator, addr)); |
227 | DEFINE(IA64_TIME_INTERPOLATOR_SOURCE_OFFSET, offsetof (struct time_interpolator, source)); | 251 | DEFINE(IA64_TIME_INTERPOLATOR_SOURCE_OFFSET, offsetof (struct time_interpolator, source)); |
diff --git a/arch/ia64/kernel/cpufreq/Kconfig b/arch/ia64/kernel/cpufreq/Kconfig new file mode 100644 index 000000000000..2d9d5279b981 --- /dev/null +++ b/arch/ia64/kernel/cpufreq/Kconfig | |||
@@ -0,0 +1,29 @@ | |||
1 | |||
2 | # | ||
3 | # CPU Frequency scaling | ||
4 | # | ||
5 | |||
6 | menu "CPU Frequency scaling" | ||
7 | |||
8 | source "drivers/cpufreq/Kconfig" | ||
9 | |||
10 | if CPU_FREQ | ||
11 | |||
12 | comment "CPUFreq processor drivers" | ||
13 | |||
14 | config IA64_ACPI_CPUFREQ | ||
15 | tristate "ACPI Processor P-States driver" | ||
16 | select CPU_FREQ_TABLE | ||
17 | depends on ACPI_PROCESSOR | ||
18 | help | ||
19 | This driver adds a CPUFreq driver which utilizes the ACPI | ||
20 | Processor Performance States. | ||
21 | |||
22 | For details, take a look at <file:Documentation/cpu-freq/>. | ||
23 | |||
24 | If in doubt, say N. | ||
25 | |||
26 | endif # CPU_FREQ | ||
27 | |||
28 | endmenu | ||
29 | |||
diff --git a/arch/ia64/kernel/cpufreq/Makefile b/arch/ia64/kernel/cpufreq/Makefile new file mode 100644 index 000000000000..f748d34c02f0 --- /dev/null +++ b/arch/ia64/kernel/cpufreq/Makefile | |||
@@ -0,0 +1 @@ | |||
obj-$(CONFIG_IA64_ACPI_CPUFREQ) += acpi-cpufreq.o | |||
diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c new file mode 100644 index 000000000000..da4d5cf80a48 --- /dev/null +++ b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c | |||
@@ -0,0 +1,499 @@ | |||
1 | /* | ||
2 | * arch/ia64/kernel/cpufreq/acpi-cpufreq.c | ||
3 | * This file provides the ACPI based P-state support. This | ||
4 | * module works with generic cpufreq infrastructure. Most of | ||
5 | * the code is based on i386 version | ||
6 | * (arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c) | ||
7 | * | ||
8 | * Copyright (C) 2005 Intel Corp | ||
9 | * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | ||
10 | */ | ||
11 | |||
12 | #include <linux/config.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/cpufreq.h> | ||
17 | #include <linux/proc_fs.h> | ||
18 | #include <linux/seq_file.h> | ||
19 | #include <asm/io.h> | ||
20 | #include <asm/uaccess.h> | ||
21 | #include <asm/pal.h> | ||
22 | |||
23 | #include <linux/acpi.h> | ||
24 | #include <acpi/processor.h> | ||
25 | |||
26 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg) | ||
27 | |||
28 | MODULE_AUTHOR("Venkatesh Pallipadi"); | ||
29 | MODULE_DESCRIPTION("ACPI Processor P-States Driver"); | ||
30 | MODULE_LICENSE("GPL"); | ||
31 | |||
32 | |||
33 | struct cpufreq_acpi_io { | ||
34 | struct acpi_processor_performance acpi_data; | ||
35 | struct cpufreq_frequency_table *freq_table; | ||
36 | unsigned int resume; | ||
37 | }; | ||
38 | |||
39 | static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS]; | ||
40 | |||
41 | static struct cpufreq_driver acpi_cpufreq_driver; | ||
42 | |||
43 | |||
44 | static int | ||
45 | processor_set_pstate ( | ||
46 | u32 value) | ||
47 | { | ||
48 | s64 retval; | ||
49 | |||
50 | dprintk("processor_set_pstate\n"); | ||
51 | |||
52 | retval = ia64_pal_set_pstate((u64)value); | ||
53 | |||
54 | if (retval) { | ||
55 | dprintk("Failed to set freq to 0x%x, with error 0x%x\n", | ||
56 | value, retval); | ||
57 | return -ENODEV; | ||
58 | } | ||
59 | return (int)retval; | ||
60 | } | ||
61 | |||
62 | |||
63 | static int | ||
64 | processor_get_pstate ( | ||
65 | u32 *value) | ||
66 | { | ||
67 | u64 pstate_index = 0; | ||
68 | s64 retval; | ||
69 | |||
70 | dprintk("processor_get_pstate\n"); | ||
71 | |||
72 | retval = ia64_pal_get_pstate(&pstate_index); | ||
73 | *value = (u32) pstate_index; | ||
74 | |||
75 | if (retval) | ||
76 | dprintk("Failed to get current freq with " | ||
77 | "error 0x%x, idx 0x%x\n", retval, *value); | ||
78 | |||
79 | return (int)retval; | ||
80 | } | ||
81 | |||
82 | |||
83 | /* To be used only after data->acpi_data is initialized */ | ||
84 | static unsigned | ||
85 | extract_clock ( | ||
86 | struct cpufreq_acpi_io *data, | ||
87 | unsigned value, | ||
88 | unsigned int cpu) | ||
89 | { | ||
90 | unsigned long i; | ||
91 | |||
92 | dprintk("extract_clock\n"); | ||
93 | |||
94 | for (i = 0; i < data->acpi_data.state_count; i++) { | ||
95 | if (value >= data->acpi_data.states[i].control) | ||
96 | return data->acpi_data.states[i].core_frequency; | ||
97 | } | ||
98 | return data->acpi_data.states[i-1].core_frequency; | ||
99 | } | ||
100 | |||
101 | |||
102 | static unsigned int | ||
103 | processor_get_freq ( | ||
104 | struct cpufreq_acpi_io *data, | ||
105 | unsigned int cpu) | ||
106 | { | ||
107 | int ret = 0; | ||
108 | u32 value = 0; | ||
109 | cpumask_t saved_mask; | ||
110 | unsigned long clock_freq; | ||
111 | |||
112 | dprintk("processor_get_freq\n"); | ||
113 | |||
114 | saved_mask = current->cpus_allowed; | ||
115 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | ||
116 | if (smp_processor_id() != cpu) { | ||
117 | ret = -EAGAIN; | ||
118 | goto migrate_end; | ||
119 | } | ||
120 | |||
121 | /* | ||
122 | * processor_get_pstate gets the average frequency since the | ||
123 | * last get. So, do two PAL_get_freq()... | ||
124 | */ | ||
125 | ret = processor_get_pstate(&value); | ||
126 | ret = processor_get_pstate(&value); | ||
127 | |||
128 | if (ret) { | ||
129 | set_cpus_allowed(current, saved_mask); | ||
130 | printk(KERN_WARNING "get performance failed with error %d\n", | ||
131 | ret); | ||
132 | ret = -EAGAIN; | ||
133 | goto migrate_end; | ||
134 | } | ||
135 | clock_freq = extract_clock(data, value, cpu); | ||
136 | ret = (clock_freq*1000); | ||
137 | |||
138 | migrate_end: | ||
139 | set_cpus_allowed(current, saved_mask); | ||
140 | return ret; | ||
141 | } | ||
142 | |||
143 | |||
144 | static int | ||
145 | processor_set_freq ( | ||
146 | struct cpufreq_acpi_io *data, | ||
147 | unsigned int cpu, | ||
148 | int state) | ||
149 | { | ||
150 | int ret = 0; | ||
151 | u32 value = 0; | ||
152 | struct cpufreq_freqs cpufreq_freqs; | ||
153 | cpumask_t saved_mask; | ||
154 | int retval; | ||
155 | |||
156 | dprintk("processor_set_freq\n"); | ||
157 | |||
158 | saved_mask = current->cpus_allowed; | ||
159 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | ||
160 | if (smp_processor_id() != cpu) { | ||
161 | retval = -EAGAIN; | ||
162 | goto migrate_end; | ||
163 | } | ||
164 | |||
165 | if (state == data->acpi_data.state) { | ||
166 | if (unlikely(data->resume)) { | ||
167 | dprintk("Called after resume, resetting to P%d\n", state); | ||
168 | data->resume = 0; | ||
169 | } else { | ||
170 | dprintk("Already at target state (P%d)\n", state); | ||
171 | retval = 0; | ||
172 | goto migrate_end; | ||
173 | } | ||
174 | } | ||
175 | |||
176 | dprintk("Transitioning from P%d to P%d\n", | ||
177 | data->acpi_data.state, state); | ||
178 | |||
179 | /* cpufreq frequency struct */ | ||
180 | cpufreq_freqs.cpu = cpu; | ||
181 | cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency; | ||
182 | cpufreq_freqs.new = data->freq_table[state].frequency; | ||
183 | |||
184 | /* notify cpufreq */ | ||
185 | cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE); | ||
186 | |||
187 | /* | ||
188 | * First we write the target state's 'control' value to the | ||
189 | * control_register. | ||
190 | */ | ||
191 | |||
192 | value = (u32) data->acpi_data.states[state].control; | ||
193 | |||
194 | dprintk("Transitioning to state: 0x%08x\n", value); | ||
195 | |||
196 | ret = processor_set_pstate(value); | ||
197 | if (ret) { | ||
198 | unsigned int tmp = cpufreq_freqs.new; | ||
199 | cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE); | ||
200 | cpufreq_freqs.new = cpufreq_freqs.old; | ||
201 | cpufreq_freqs.old = tmp; | ||
202 | cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE); | ||
203 | cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE); | ||
204 | printk(KERN_WARNING "Transition failed with error %d\n", ret); | ||
205 | retval = -ENODEV; | ||
206 | goto migrate_end; | ||
207 | } | ||
208 | |||
209 | cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE); | ||
210 | |||
211 | data->acpi_data.state = state; | ||
212 | |||
213 | retval = 0; | ||
214 | |||
215 | migrate_end: | ||
216 | set_cpus_allowed(current, saved_mask); | ||
217 | return (retval); | ||
218 | } | ||
219 | |||
220 | |||
221 | static unsigned int | ||
222 | acpi_cpufreq_get ( | ||
223 | unsigned int cpu) | ||
224 | { | ||
225 | struct cpufreq_acpi_io *data = acpi_io_data[cpu]; | ||
226 | |||
227 | dprintk("acpi_cpufreq_get\n"); | ||
228 | |||
229 | return processor_get_freq(data, cpu); | ||
230 | } | ||
231 | |||
232 | |||
233 | static int | ||
234 | acpi_cpufreq_target ( | ||
235 | struct cpufreq_policy *policy, | ||
236 | unsigned int target_freq, | ||
237 | unsigned int relation) | ||
238 | { | ||
239 | struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; | ||
240 | unsigned int next_state = 0; | ||
241 | unsigned int result = 0; | ||
242 | |||
243 | dprintk("acpi_cpufreq_setpolicy\n"); | ||
244 | |||
245 | result = cpufreq_frequency_table_target(policy, | ||
246 | data->freq_table, target_freq, relation, &next_state); | ||
247 | if (result) | ||
248 | return (result); | ||
249 | |||
250 | result = processor_set_freq(data, policy->cpu, next_state); | ||
251 | |||
252 | return (result); | ||
253 | } | ||
254 | |||
255 | |||
256 | static int | ||
257 | acpi_cpufreq_verify ( | ||
258 | struct cpufreq_policy *policy) | ||
259 | { | ||
260 | unsigned int result = 0; | ||
261 | struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; | ||
262 | |||
263 | dprintk("acpi_cpufreq_verify\n"); | ||
264 | |||
265 | result = cpufreq_frequency_table_verify(policy, | ||
266 | data->freq_table); | ||
267 | |||
268 | return (result); | ||
269 | } | ||
270 | |||
271 | |||
272 | /* | ||
273 | * processor_init_pdc - let BIOS know about the SMP capabilities | ||
274 | * of this driver | ||
275 | * @perf: processor-specific acpi_io_data struct | ||
276 | * @cpu: CPU being initialized | ||
277 | * | ||
278 | * To avoid issues with legacy OSes, some BIOSes require to be informed of | ||
279 | * the SMP capabilities of OS P-state driver. Here we set the bits in _PDC | ||
280 | * accordingly. Actual call to _PDC is done in driver/acpi/processor.c | ||
281 | */ | ||
282 | static void | ||
283 | processor_init_pdc ( | ||
284 | struct acpi_processor_performance *perf, | ||
285 | unsigned int cpu, | ||
286 | struct acpi_object_list *obj_list | ||
287 | ) | ||
288 | { | ||
289 | union acpi_object *obj; | ||
290 | u32 *buf; | ||
291 | |||
292 | dprintk("processor_init_pdc\n"); | ||
293 | |||
294 | perf->pdc = NULL; | ||
295 | /* Initialize pdc. It will be used later. */ | ||
296 | if (!obj_list) | ||
297 | return; | ||
298 | |||
299 | if (!(obj_list->count && obj_list->pointer)) | ||
300 | return; | ||
301 | |||
302 | obj = obj_list->pointer; | ||
303 | if ((obj->buffer.length == 12) && obj->buffer.pointer) { | ||
304 | buf = (u32 *)obj->buffer.pointer; | ||
305 | buf[0] = ACPI_PDC_REVISION_ID; | ||
306 | buf[1] = 1; | ||
307 | buf[2] = ACPI_PDC_EST_CAPABILITY_SMP; | ||
308 | perf->pdc = obj_list; | ||
309 | } | ||
310 | return; | ||
311 | } | ||
312 | |||
313 | |||
314 | static int | ||
315 | acpi_cpufreq_cpu_init ( | ||
316 | struct cpufreq_policy *policy) | ||
317 | { | ||
318 | unsigned int i; | ||
319 | unsigned int cpu = policy->cpu; | ||
320 | struct cpufreq_acpi_io *data; | ||
321 | unsigned int result = 0; | ||
322 | |||
323 | union acpi_object arg0 = {ACPI_TYPE_BUFFER}; | ||
324 | u32 arg0_buf[3]; | ||
325 | struct acpi_object_list arg_list = {1, &arg0}; | ||
326 | |||
327 | dprintk("acpi_cpufreq_cpu_init\n"); | ||
328 | /* setup arg_list for _PDC settings */ | ||
329 | arg0.buffer.length = 12; | ||
330 | arg0.buffer.pointer = (u8 *) arg0_buf; | ||
331 | |||
332 | data = kmalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); | ||
333 | if (!data) | ||
334 | return (-ENOMEM); | ||
335 | |||
336 | memset(data, 0, sizeof(struct cpufreq_acpi_io)); | ||
337 | |||
338 | acpi_io_data[cpu] = data; | ||
339 | |||
340 | processor_init_pdc(&data->acpi_data, cpu, &arg_list); | ||
341 | result = acpi_processor_register_performance(&data->acpi_data, cpu); | ||
342 | data->acpi_data.pdc = NULL; | ||
343 | |||
344 | if (result) | ||
345 | goto err_free; | ||
346 | |||
347 | /* capability check */ | ||
348 | if (data->acpi_data.state_count <= 1) { | ||
349 | dprintk("No P-States\n"); | ||
350 | result = -ENODEV; | ||
351 | goto err_unreg; | ||
352 | } | ||
353 | |||
354 | if ((data->acpi_data.control_register.space_id != | ||
355 | ACPI_ADR_SPACE_FIXED_HARDWARE) || | ||
356 | (data->acpi_data.status_register.space_id != | ||
357 | ACPI_ADR_SPACE_FIXED_HARDWARE)) { | ||
358 | dprintk("Unsupported address space [%d, %d]\n", | ||
359 | (u32) (data->acpi_data.control_register.space_id), | ||
360 | (u32) (data->acpi_data.status_register.space_id)); | ||
361 | result = -ENODEV; | ||
362 | goto err_unreg; | ||
363 | } | ||
364 | |||
365 | /* alloc freq_table */ | ||
366 | data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * | ||
367 | (data->acpi_data.state_count + 1), | ||
368 | GFP_KERNEL); | ||
369 | if (!data->freq_table) { | ||
370 | result = -ENOMEM; | ||
371 | goto err_unreg; | ||
372 | } | ||
373 | |||
374 | /* detect transition latency */ | ||
375 | policy->cpuinfo.transition_latency = 0; | ||
376 | for (i=0; i<data->acpi_data.state_count; i++) { | ||
377 | if ((data->acpi_data.states[i].transition_latency * 1000) > | ||
378 | policy->cpuinfo.transition_latency) { | ||
379 | policy->cpuinfo.transition_latency = | ||
380 | data->acpi_data.states[i].transition_latency * 1000; | ||
381 | } | ||
382 | } | ||
383 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
384 | |||
385 | policy->cur = processor_get_freq(data, policy->cpu); | ||
386 | |||
387 | /* table init */ | ||
388 | for (i = 0; i <= data->acpi_data.state_count; i++) | ||
389 | { | ||
390 | data->freq_table[i].index = i; | ||
391 | if (i < data->acpi_data.state_count) { | ||
392 | data->freq_table[i].frequency = | ||
393 | data->acpi_data.states[i].core_frequency * 1000; | ||
394 | } else { | ||
395 | data->freq_table[i].frequency = CPUFREQ_TABLE_END; | ||
396 | } | ||
397 | } | ||
398 | |||
399 | result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); | ||
400 | if (result) { | ||
401 | goto err_freqfree; | ||
402 | } | ||
403 | |||
404 | /* notify BIOS that we exist */ | ||
405 | acpi_processor_notify_smm(THIS_MODULE); | ||
406 | |||
407 | printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management " | ||
408 | "activated.\n", cpu); | ||
409 | |||
410 | for (i = 0; i < data->acpi_data.state_count; i++) | ||
411 | dprintk(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n", | ||
412 | (i == data->acpi_data.state?'*':' '), i, | ||
413 | (u32) data->acpi_data.states[i].core_frequency, | ||
414 | (u32) data->acpi_data.states[i].power, | ||
415 | (u32) data->acpi_data.states[i].transition_latency, | ||
416 | (u32) data->acpi_data.states[i].bus_master_latency, | ||
417 | (u32) data->acpi_data.states[i].status, | ||
418 | (u32) data->acpi_data.states[i].control); | ||
419 | |||
420 | cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); | ||
421 | |||
422 | /* the first call to ->target() should result in us actually | ||
423 | * writing something to the appropriate registers. */ | ||
424 | data->resume = 1; | ||
425 | |||
426 | return (result); | ||
427 | |||
428 | err_freqfree: | ||
429 | kfree(data->freq_table); | ||
430 | err_unreg: | ||
431 | acpi_processor_unregister_performance(&data->acpi_data, cpu); | ||
432 | err_free: | ||
433 | kfree(data); | ||
434 | acpi_io_data[cpu] = NULL; | ||
435 | |||
436 | return (result); | ||
437 | } | ||
438 | |||
439 | |||
440 | static int | ||
441 | acpi_cpufreq_cpu_exit ( | ||
442 | struct cpufreq_policy *policy) | ||
443 | { | ||
444 | struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; | ||
445 | |||
446 | dprintk("acpi_cpufreq_cpu_exit\n"); | ||
447 | |||
448 | if (data) { | ||
449 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
450 | acpi_io_data[policy->cpu] = NULL; | ||
451 | acpi_processor_unregister_performance(&data->acpi_data, | ||
452 | policy->cpu); | ||
453 | kfree(data); | ||
454 | } | ||
455 | |||
456 | return (0); | ||
457 | } | ||
458 | |||
459 | |||
460 | static struct freq_attr* acpi_cpufreq_attr[] = { | ||
461 | &cpufreq_freq_attr_scaling_available_freqs, | ||
462 | NULL, | ||
463 | }; | ||
464 | |||
465 | |||
466 | static struct cpufreq_driver acpi_cpufreq_driver = { | ||
467 | .verify = acpi_cpufreq_verify, | ||
468 | .target = acpi_cpufreq_target, | ||
469 | .get = acpi_cpufreq_get, | ||
470 | .init = acpi_cpufreq_cpu_init, | ||
471 | .exit = acpi_cpufreq_cpu_exit, | ||
472 | .name = "acpi-cpufreq", | ||
473 | .owner = THIS_MODULE, | ||
474 | .attr = acpi_cpufreq_attr, | ||
475 | }; | ||
476 | |||
477 | |||
478 | static int __init | ||
479 | acpi_cpufreq_init (void) | ||
480 | { | ||
481 | dprintk("acpi_cpufreq_init\n"); | ||
482 | |||
483 | return cpufreq_register_driver(&acpi_cpufreq_driver); | ||
484 | } | ||
485 | |||
486 | |||
487 | static void __exit | ||
488 | acpi_cpufreq_exit (void) | ||
489 | { | ||
490 | dprintk("acpi_cpufreq_exit\n"); | ||
491 | |||
492 | cpufreq_unregister_driver(&acpi_cpufreq_driver); | ||
493 | return; | ||
494 | } | ||
495 | |||
496 | |||
497 | late_initcall(acpi_cpufreq_init); | ||
498 | module_exit(acpi_cpufreq_exit); | ||
499 | |||
diff --git a/arch/ia64/kernel/domain.c b/arch/ia64/kernel/domain.c deleted file mode 100644 index bbb8efe126b7..000000000000 --- a/arch/ia64/kernel/domain.c +++ /dev/null | |||
@@ -1,396 +0,0 @@ | |||
1 | /* | ||
2 | * arch/ia64/kernel/domain.c | ||
3 | * Architecture specific sched-domains builder. | ||
4 | * | ||
5 | * Copyright (C) 2004 Jesse Barnes | ||
6 | * Copyright (C) 2004 Silicon Graphics, Inc. | ||
7 | */ | ||
8 | |||
9 | #include <linux/sched.h> | ||
10 | #include <linux/percpu.h> | ||
11 | #include <linux/slab.h> | ||
12 | #include <linux/cpumask.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/topology.h> | ||
15 | #include <linux/nodemask.h> | ||
16 | |||
17 | #define SD_NODES_PER_DOMAIN 16 | ||
18 | |||
19 | #ifdef CONFIG_NUMA | ||
20 | /** | ||
21 | * find_next_best_node - find the next node to include in a sched_domain | ||
22 | * @node: node whose sched_domain we're building | ||
23 | * @used_nodes: nodes already in the sched_domain | ||
24 | * | ||
25 | * Find the next node to include in a given scheduling domain. Simply | ||
26 | * finds the closest node not already in the @used_nodes map. | ||
27 | * | ||
28 | * Should use nodemask_t. | ||
29 | */ | ||
30 | static int find_next_best_node(int node, unsigned long *used_nodes) | ||
31 | { | ||
32 | int i, n, val, min_val, best_node = 0; | ||
33 | |||
34 | min_val = INT_MAX; | ||
35 | |||
36 | for (i = 0; i < MAX_NUMNODES; i++) { | ||
37 | /* Start at @node */ | ||
38 | n = (node + i) % MAX_NUMNODES; | ||
39 | |||
40 | if (!nr_cpus_node(n)) | ||
41 | continue; | ||
42 | |||
43 | /* Skip already used nodes */ | ||
44 | if (test_bit(n, used_nodes)) | ||
45 | continue; | ||
46 | |||
47 | /* Simple min distance search */ | ||
48 | val = node_distance(node, n); | ||
49 | |||
50 | if (val < min_val) { | ||
51 | min_val = val; | ||
52 | best_node = n; | ||
53 | } | ||
54 | } | ||
55 | |||
56 | set_bit(best_node, used_nodes); | ||
57 | return best_node; | ||
58 | } | ||
59 | |||
60 | /** | ||
61 | * sched_domain_node_span - get a cpumask for a node's sched_domain | ||
62 | * @node: node whose cpumask we're constructing | ||
63 | * @size: number of nodes to include in this span | ||
64 | * | ||
65 | * Given a node, construct a good cpumask for its sched_domain to span. It | ||
66 | * should be one that prevents unnecessary balancing, but also spreads tasks | ||
67 | * out optimally. | ||
68 | */ | ||
69 | static cpumask_t sched_domain_node_span(int node) | ||
70 | { | ||
71 | int i; | ||
72 | cpumask_t span, nodemask; | ||
73 | DECLARE_BITMAP(used_nodes, MAX_NUMNODES); | ||
74 | |||
75 | cpus_clear(span); | ||
76 | bitmap_zero(used_nodes, MAX_NUMNODES); | ||
77 | |||
78 | nodemask = node_to_cpumask(node); | ||
79 | cpus_or(span, span, nodemask); | ||
80 | set_bit(node, used_nodes); | ||
81 | |||
82 | for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { | ||
83 | int next_node = find_next_best_node(node, used_nodes); | ||
84 | nodemask = node_to_cpumask(next_node); | ||
85 | cpus_or(span, span, nodemask); | ||
86 | } | ||
87 | |||
88 | return span; | ||
89 | } | ||
90 | #endif | ||
91 | |||
92 | /* | ||
93 | * At the moment, CONFIG_SCHED_SMT is never defined, but leave it in so we | ||
94 | * can switch it on easily if needed. | ||
95 | */ | ||
96 | #ifdef CONFIG_SCHED_SMT | ||
97 | static DEFINE_PER_CPU(struct sched_domain, cpu_domains); | ||
98 | static struct sched_group sched_group_cpus[NR_CPUS]; | ||
99 | static int cpu_to_cpu_group(int cpu) | ||
100 | { | ||
101 | return cpu; | ||
102 | } | ||
103 | #endif | ||
104 | |||
105 | static DEFINE_PER_CPU(struct sched_domain, phys_domains); | ||
106 | static struct sched_group sched_group_phys[NR_CPUS]; | ||
107 | static int cpu_to_phys_group(int cpu) | ||
108 | { | ||
109 | #ifdef CONFIG_SCHED_SMT | ||
110 | return first_cpu(cpu_sibling_map[cpu]); | ||
111 | #else | ||
112 | return cpu; | ||
113 | #endif | ||
114 | } | ||
115 | |||
116 | #ifdef CONFIG_NUMA | ||
117 | /* | ||
118 | * The init_sched_build_groups can't handle what we want to do with node | ||
119 | * groups, so roll our own. Now each node has its own list of groups which | ||
120 | * gets dynamically allocated. | ||
121 | */ | ||
122 | static DEFINE_PER_CPU(struct sched_domain, node_domains); | ||
123 | static struct sched_group *sched_group_nodes[MAX_NUMNODES]; | ||
124 | |||
125 | static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); | ||
126 | static struct sched_group sched_group_allnodes[MAX_NUMNODES]; | ||
127 | |||
128 | static int cpu_to_allnodes_group(int cpu) | ||
129 | { | ||
130 | return cpu_to_node(cpu); | ||
131 | } | ||
132 | #endif | ||
133 | |||
134 | /* | ||
135 | * Build sched domains for a given set of cpus and attach the sched domains | ||
136 | * to the individual cpus | ||
137 | */ | ||
138 | void build_sched_domains(const cpumask_t *cpu_map) | ||
139 | { | ||
140 | int i; | ||
141 | |||
142 | /* | ||
143 | * Set up domains for cpus specified by the cpu_map. | ||
144 | */ | ||
145 | for_each_cpu_mask(i, *cpu_map) { | ||
146 | int group; | ||
147 | struct sched_domain *sd = NULL, *p; | ||
148 | cpumask_t nodemask = node_to_cpumask(cpu_to_node(i)); | ||
149 | |||
150 | cpus_and(nodemask, nodemask, *cpu_map); | ||
151 | |||
152 | #ifdef CONFIG_NUMA | ||
153 | if (num_online_cpus() | ||
154 | > SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) { | ||
155 | sd = &per_cpu(allnodes_domains, i); | ||
156 | *sd = SD_ALLNODES_INIT; | ||
157 | sd->span = *cpu_map; | ||
158 | group = cpu_to_allnodes_group(i); | ||
159 | sd->groups = &sched_group_allnodes[group]; | ||
160 | p = sd; | ||
161 | } else | ||
162 | p = NULL; | ||
163 | |||
164 | sd = &per_cpu(node_domains, i); | ||
165 | *sd = SD_NODE_INIT; | ||
166 | sd->span = sched_domain_node_span(cpu_to_node(i)); | ||
167 | sd->parent = p; | ||
168 | cpus_and(sd->span, sd->span, *cpu_map); | ||
169 | #endif | ||
170 | |||
171 | p = sd; | ||
172 | sd = &per_cpu(phys_domains, i); | ||
173 | group = cpu_to_phys_group(i); | ||
174 | *sd = SD_CPU_INIT; | ||
175 | sd->span = nodemask; | ||
176 | sd->parent = p; | ||
177 | sd->groups = &sched_group_phys[group]; | ||
178 | |||
179 | #ifdef CONFIG_SCHED_SMT | ||
180 | p = sd; | ||
181 | sd = &per_cpu(cpu_domains, i); | ||
182 | group = cpu_to_cpu_group(i); | ||
183 | *sd = SD_SIBLING_INIT; | ||
184 | sd->span = cpu_sibling_map[i]; | ||
185 | cpus_and(sd->span, sd->span, *cpu_map); | ||
186 | sd->parent = p; | ||
187 | sd->groups = &sched_group_cpus[group]; | ||
188 | #endif | ||
189 | } | ||
190 | |||
191 | #ifdef CONFIG_SCHED_SMT | ||
192 | /* Set up CPU (sibling) groups */ | ||
193 | for_each_cpu_mask(i, *cpu_map) { | ||
194 | cpumask_t this_sibling_map = cpu_sibling_map[i]; | ||
195 | cpus_and(this_sibling_map, this_sibling_map, *cpu_map); | ||
196 | if (i != first_cpu(this_sibling_map)) | ||
197 | continue; | ||
198 | |||
199 | init_sched_build_groups(sched_group_cpus, this_sibling_map, | ||
200 | &cpu_to_cpu_group); | ||
201 | } | ||
202 | #endif | ||
203 | |||
204 | /* Set up physical groups */ | ||
205 | for (i = 0; i < MAX_NUMNODES; i++) { | ||
206 | cpumask_t nodemask = node_to_cpumask(i); | ||
207 | |||
208 | cpus_and(nodemask, nodemask, *cpu_map); | ||
209 | if (cpus_empty(nodemask)) | ||
210 | continue; | ||
211 | |||
212 | init_sched_build_groups(sched_group_phys, nodemask, | ||
213 | &cpu_to_phys_group); | ||
214 | } | ||
215 | |||
216 | #ifdef CONFIG_NUMA | ||
217 | init_sched_build_groups(sched_group_allnodes, *cpu_map, | ||
218 | &cpu_to_allnodes_group); | ||
219 | |||
220 | for (i = 0; i < MAX_NUMNODES; i++) { | ||
221 | /* Set up node groups */ | ||
222 | struct sched_group *sg, *prev; | ||
223 | cpumask_t nodemask = node_to_cpumask(i); | ||
224 | cpumask_t domainspan; | ||
225 | cpumask_t covered = CPU_MASK_NONE; | ||
226 | int j; | ||
227 | |||
228 | cpus_and(nodemask, nodemask, *cpu_map); | ||
229 | if (cpus_empty(nodemask)) | ||
230 | continue; | ||
231 | |||
232 | domainspan = sched_domain_node_span(i); | ||
233 | cpus_and(domainspan, domainspan, *cpu_map); | ||
234 | |||
235 | sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL); | ||
236 | sched_group_nodes[i] = sg; | ||
237 | for_each_cpu_mask(j, nodemask) { | ||
238 | struct sched_domain *sd; | ||
239 | sd = &per_cpu(node_domains, j); | ||
240 | sd->groups = sg; | ||
241 | if (sd->groups == NULL) { | ||
242 | /* Turn off balancing if we have no groups */ | ||
243 | sd->flags = 0; | ||
244 | } | ||
245 | } | ||
246 | if (!sg) { | ||
247 | printk(KERN_WARNING | ||
248 | "Can not alloc domain group for node %d\n", i); | ||
249 | continue; | ||
250 | } | ||
251 | sg->cpu_power = 0; | ||
252 | sg->cpumask = nodemask; | ||
253 | cpus_or(covered, covered, nodemask); | ||
254 | prev = sg; | ||
255 | |||
256 | for (j = 0; j < MAX_NUMNODES; j++) { | ||
257 | cpumask_t tmp, notcovered; | ||
258 | int n = (i + j) % MAX_NUMNODES; | ||
259 | |||
260 | cpus_complement(notcovered, covered); | ||
261 | cpus_and(tmp, notcovered, *cpu_map); | ||
262 | cpus_and(tmp, tmp, domainspan); | ||
263 | if (cpus_empty(tmp)) | ||
264 | break; | ||
265 | |||
266 | nodemask = node_to_cpumask(n); | ||
267 | cpus_and(tmp, tmp, nodemask); | ||
268 | if (cpus_empty(tmp)) | ||
269 | continue; | ||
270 | |||
271 | sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL); | ||
272 | if (!sg) { | ||
273 | printk(KERN_WARNING | ||
274 | "Can not alloc domain group for node %d\n", j); | ||
275 | break; | ||
276 | } | ||
277 | sg->cpu_power = 0; | ||
278 | sg->cpumask = tmp; | ||
279 | cpus_or(covered, covered, tmp); | ||
280 | prev->next = sg; | ||
281 | prev = sg; | ||
282 | } | ||
283 | prev->next = sched_group_nodes[i]; | ||
284 | } | ||
285 | #endif | ||
286 | |||
287 | /* Calculate CPU power for physical packages and nodes */ | ||
288 | for_each_cpu_mask(i, *cpu_map) { | ||
289 | int power; | ||
290 | struct sched_domain *sd; | ||
291 | #ifdef CONFIG_SCHED_SMT | ||
292 | sd = &per_cpu(cpu_domains, i); | ||
293 | power = SCHED_LOAD_SCALE; | ||
294 | sd->groups->cpu_power = power; | ||
295 | #endif | ||
296 | |||
297 | sd = &per_cpu(phys_domains, i); | ||
298 | power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE * | ||
299 | (cpus_weight(sd->groups->cpumask)-1) / 10; | ||
300 | sd->groups->cpu_power = power; | ||
301 | |||
302 | #ifdef CONFIG_NUMA | ||
303 | sd = &per_cpu(allnodes_domains, i); | ||
304 | if (sd->groups) { | ||
305 | power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE * | ||
306 | (cpus_weight(sd->groups->cpumask)-1) / 10; | ||
307 | sd->groups->cpu_power = power; | ||
308 | } | ||
309 | #endif | ||
310 | } | ||
311 | |||
312 | #ifdef CONFIG_NUMA | ||
313 | for (i = 0; i < MAX_NUMNODES; i++) { | ||
314 | struct sched_group *sg = sched_group_nodes[i]; | ||
315 | int j; | ||
316 | |||
317 | if (sg == NULL) | ||
318 | continue; | ||
319 | next_sg: | ||
320 | for_each_cpu_mask(j, sg->cpumask) { | ||
321 | struct sched_domain *sd; | ||
322 | int power; | ||
323 | |||
324 | sd = &per_cpu(phys_domains, j); | ||
325 | if (j != first_cpu(sd->groups->cpumask)) { | ||
326 | /* | ||
327 | * Only add "power" once for each | ||
328 | * physical package. | ||
329 | */ | ||
330 | continue; | ||
331 | } | ||
332 | power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE * | ||
333 | (cpus_weight(sd->groups->cpumask)-1) / 10; | ||
334 | |||
335 | sg->cpu_power += power; | ||
336 | } | ||
337 | sg = sg->next; | ||
338 | if (sg != sched_group_nodes[i]) | ||
339 | goto next_sg; | ||
340 | } | ||
341 | #endif | ||
342 | |||
343 | /* Attach the domains */ | ||
344 | for_each_cpu_mask(i, *cpu_map) { | ||
345 | struct sched_domain *sd; | ||
346 | #ifdef CONFIG_SCHED_SMT | ||
347 | sd = &per_cpu(cpu_domains, i); | ||
348 | #else | ||
349 | sd = &per_cpu(phys_domains, i); | ||
350 | #endif | ||
351 | cpu_attach_domain(sd, i); | ||
352 | } | ||
353 | } | ||
354 | /* | ||
355 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. | ||
356 | */ | ||
357 | void arch_init_sched_domains(const cpumask_t *cpu_map) | ||
358 | { | ||
359 | cpumask_t cpu_default_map; | ||
360 | |||
361 | /* | ||
362 | * Setup mask for cpus without special case scheduling requirements. | ||
363 | * For now this just excludes isolated cpus, but could be used to | ||
364 | * exclude other special cases in the future. | ||
365 | */ | ||
366 | cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map); | ||
367 | |||
368 | build_sched_domains(&cpu_default_map); | ||
369 | } | ||
370 | |||
371 | void arch_destroy_sched_domains(const cpumask_t *cpu_map) | ||
372 | { | ||
373 | #ifdef CONFIG_NUMA | ||
374 | int i; | ||
375 | for (i = 0; i < MAX_NUMNODES; i++) { | ||
376 | cpumask_t nodemask = node_to_cpumask(i); | ||
377 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; | ||
378 | |||
379 | cpus_and(nodemask, nodemask, *cpu_map); | ||
380 | if (cpus_empty(nodemask)) | ||
381 | continue; | ||
382 | |||
383 | if (sg == NULL) | ||
384 | continue; | ||
385 | sg = sg->next; | ||
386 | next_sg: | ||
387 | oldsg = sg; | ||
388 | sg = sg->next; | ||
389 | kfree(oldsg); | ||
390 | if (oldsg != sched_group_nodes[i]) | ||
391 | goto next_sg; | ||
392 | sched_group_nodes[i] = NULL; | ||
393 | } | ||
394 | #endif | ||
395 | } | ||
396 | |||
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 9be53e1ea404..ba0b6a1f429f 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S | |||
@@ -37,7 +37,7 @@ | |||
37 | #include <asm/cache.h> | 37 | #include <asm/cache.h> |
38 | #include <asm/errno.h> | 38 | #include <asm/errno.h> |
39 | #include <asm/kregs.h> | 39 | #include <asm/kregs.h> |
40 | #include <asm/offsets.h> | 40 | #include <asm/asm-offsets.h> |
41 | #include <asm/pgtable.h> | 41 | #include <asm/pgtable.h> |
42 | #include <asm/percpu.h> | 42 | #include <asm/percpu.h> |
43 | #include <asm/processor.h> | 43 | #include <asm/processor.h> |
@@ -204,9 +204,6 @@ GLOBAL_ENTRY(ia64_switch_to) | |||
204 | (p6) br.cond.dpnt .map | 204 | (p6) br.cond.dpnt .map |
205 | ;; | 205 | ;; |
206 | .done: | 206 | .done: |
207 | (p6) ssm psr.ic // if we had to map, reenable the psr.ic bit FIRST!!! | ||
208 | ;; | ||
209 | (p6) srlz.d | ||
210 | ld8 sp=[r21] // load kernel stack pointer of new task | 207 | ld8 sp=[r21] // load kernel stack pointer of new task |
211 | mov IA64_KR(CURRENT)=in0 // update "current" application register | 208 | mov IA64_KR(CURRENT)=in0 // update "current" application register |
212 | mov r8=r13 // return pointer to previously running task | 209 | mov r8=r13 // return pointer to previously running task |
@@ -234,6 +231,9 @@ GLOBAL_ENTRY(ia64_switch_to) | |||
234 | mov IA64_KR(CURRENT_STACK)=r26 // remember last page we mapped... | 231 | mov IA64_KR(CURRENT_STACK)=r26 // remember last page we mapped... |
235 | ;; | 232 | ;; |
236 | itr.d dtr[r25]=r23 // wire in new mapping... | 233 | itr.d dtr[r25]=r23 // wire in new mapping... |
234 | ssm psr.ic // reenable the psr.ic bit | ||
235 | ;; | ||
236 | srlz.d | ||
237 | br.cond.sptk .done | 237 | br.cond.sptk .done |
238 | END(ia64_switch_to) | 238 | END(ia64_switch_to) |
239 | 239 | ||
@@ -470,6 +470,29 @@ ENTRY(load_switch_stack) | |||
470 | br.cond.sptk.many b7 | 470 | br.cond.sptk.many b7 |
471 | END(load_switch_stack) | 471 | END(load_switch_stack) |
472 | 472 | ||
473 | GLOBAL_ENTRY(prefetch_stack) | ||
474 | add r14 = -IA64_SWITCH_STACK_SIZE, sp | ||
475 | add r15 = IA64_TASK_THREAD_KSP_OFFSET, in0 | ||
476 | ;; | ||
477 | ld8 r16 = [r15] // load next's stack pointer | ||
478 | lfetch.fault.excl [r14], 128 | ||
479 | ;; | ||
480 | lfetch.fault.excl [r14], 128 | ||
481 | lfetch.fault [r16], 128 | ||
482 | ;; | ||
483 | lfetch.fault.excl [r14], 128 | ||
484 | lfetch.fault [r16], 128 | ||
485 | ;; | ||
486 | lfetch.fault.excl [r14], 128 | ||
487 | lfetch.fault [r16], 128 | ||
488 | ;; | ||
489 | lfetch.fault.excl [r14], 128 | ||
490 | lfetch.fault [r16], 128 | ||
491 | ;; | ||
492 | lfetch.fault [r16], 128 | ||
493 | br.ret.sptk.many rp | ||
494 | END(prefetch_switch_stack) | ||
495 | |||
473 | GLOBAL_ENTRY(execve) | 496 | GLOBAL_ENTRY(execve) |
474 | mov r15=__NR_execve // put syscall number in place | 497 | mov r15=__NR_execve // put syscall number in place |
475 | break __BREAK_SYSCALL | 498 | break __BREAK_SYSCALL |
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index 7d7684a369d3..2ddbac6f4999 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S | |||
@@ -14,7 +14,7 @@ | |||
14 | 14 | ||
15 | #include <asm/asmmacro.h> | 15 | #include <asm/asmmacro.h> |
16 | #include <asm/errno.h> | 16 | #include <asm/errno.h> |
17 | #include <asm/offsets.h> | 17 | #include <asm/asm-offsets.h> |
18 | #include <asm/percpu.h> | 18 | #include <asm/percpu.h> |
19 | #include <asm/thread_info.h> | 19 | #include <asm/thread_info.h> |
20 | #include <asm/sal.h> | 20 | #include <asm/sal.h> |
diff --git a/arch/ia64/kernel/gate.S b/arch/ia64/kernel/gate.S index 86948ce63e43..86064ca98952 100644 --- a/arch/ia64/kernel/gate.S +++ b/arch/ia64/kernel/gate.S | |||
@@ -10,7 +10,7 @@ | |||
10 | 10 | ||
11 | #include <asm/asmmacro.h> | 11 | #include <asm/asmmacro.h> |
12 | #include <asm/errno.h> | 12 | #include <asm/errno.h> |
13 | #include <asm/offsets.h> | 13 | #include <asm/asm-offsets.h> |
14 | #include <asm/sigcontext.h> | 14 | #include <asm/sigcontext.h> |
15 | #include <asm/system.h> | 15 | #include <asm/system.h> |
16 | #include <asm/unistd.h> | 16 | #include <asm/unistd.h> |
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 8d3a9291b47f..bfe65b2e8621 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <asm/fpu.h> | 25 | #include <asm/fpu.h> |
26 | #include <asm/kregs.h> | 26 | #include <asm/kregs.h> |
27 | #include <asm/mmu_context.h> | 27 | #include <asm/mmu_context.h> |
28 | #include <asm/offsets.h> | 28 | #include <asm/asm-offsets.h> |
29 | #include <asm/pal.h> | 29 | #include <asm/pal.h> |
30 | #include <asm/pgtable.h> | 30 | #include <asm/pgtable.h> |
31 | #include <asm/processor.h> | 31 | #include <asm/processor.h> |
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 7936b62f7a2e..574084f343fa 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
@@ -561,7 +561,7 @@ static inline int vector_is_shared (int vector) | |||
561 | return (iosapic_intr_info[vector].count > 1); | 561 | return (iosapic_intr_info[vector].count > 1); |
562 | } | 562 | } |
563 | 563 | ||
564 | static void | 564 | static int |
565 | register_intr (unsigned int gsi, int vector, unsigned char delivery, | 565 | register_intr (unsigned int gsi, int vector, unsigned char delivery, |
566 | unsigned long polarity, unsigned long trigger) | 566 | unsigned long polarity, unsigned long trigger) |
567 | { | 567 | { |
@@ -576,7 +576,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery, | |||
576 | index = find_iosapic(gsi); | 576 | index = find_iosapic(gsi); |
577 | if (index < 0) { | 577 | if (index < 0) { |
578 | printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n", __FUNCTION__, gsi); | 578 | printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n", __FUNCTION__, gsi); |
579 | return; | 579 | return -ENODEV; |
580 | } | 580 | } |
581 | 581 | ||
582 | iosapic_address = iosapic_lists[index].addr; | 582 | iosapic_address = iosapic_lists[index].addr; |
@@ -587,7 +587,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery, | |||
587 | rte = iosapic_alloc_rte(); | 587 | rte = iosapic_alloc_rte(); |
588 | if (!rte) { | 588 | if (!rte) { |
589 | printk(KERN_WARNING "%s: cannot allocate memory\n", __FUNCTION__); | 589 | printk(KERN_WARNING "%s: cannot allocate memory\n", __FUNCTION__); |
590 | return; | 590 | return -ENOMEM; |
591 | } | 591 | } |
592 | 592 | ||
593 | rte_index = gsi - gsi_base; | 593 | rte_index = gsi - gsi_base; |
@@ -603,7 +603,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery, | |||
603 | struct iosapic_intr_info *info = &iosapic_intr_info[vector]; | 603 | struct iosapic_intr_info *info = &iosapic_intr_info[vector]; |
604 | if (info->trigger != trigger || info->polarity != polarity) { | 604 | if (info->trigger != trigger || info->polarity != polarity) { |
605 | printk (KERN_WARNING "%s: cannot override the interrupt\n", __FUNCTION__); | 605 | printk (KERN_WARNING "%s: cannot override the interrupt\n", __FUNCTION__); |
606 | return; | 606 | return -EINVAL; |
607 | } | 607 | } |
608 | } | 608 | } |
609 | 609 | ||
@@ -623,6 +623,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery, | |||
623 | __FUNCTION__, vector, idesc->handler->typename, irq_type->typename); | 623 | __FUNCTION__, vector, idesc->handler->typename, irq_type->typename); |
624 | idesc->handler = irq_type; | 624 | idesc->handler = irq_type; |
625 | } | 625 | } |
626 | return 0; | ||
626 | } | 627 | } |
627 | 628 | ||
628 | static unsigned int | 629 | static unsigned int |
@@ -710,7 +711,7 @@ int | |||
710 | iosapic_register_intr (unsigned int gsi, | 711 | iosapic_register_intr (unsigned int gsi, |
711 | unsigned long polarity, unsigned long trigger) | 712 | unsigned long polarity, unsigned long trigger) |
712 | { | 713 | { |
713 | int vector, mask = 1; | 714 | int vector, mask = 1, err; |
714 | unsigned int dest; | 715 | unsigned int dest; |
715 | unsigned long flags; | 716 | unsigned long flags; |
716 | struct iosapic_rte_info *rte; | 717 | struct iosapic_rte_info *rte; |
@@ -737,8 +738,8 @@ again: | |||
737 | vector = assign_irq_vector(AUTO_ASSIGN); | 738 | vector = assign_irq_vector(AUTO_ASSIGN); |
738 | if (vector < 0) { | 739 | if (vector < 0) { |
739 | vector = iosapic_find_sharable_vector(trigger, polarity); | 740 | vector = iosapic_find_sharable_vector(trigger, polarity); |
740 | if (vector < 0) | 741 | if (vector < 0) |
741 | panic("%s: out of interrupt vectors!\n", __FUNCTION__); | 742 | return -ENOSPC; |
742 | } | 743 | } |
743 | 744 | ||
744 | spin_lock_irqsave(&irq_descp(vector)->lock, flags); | 745 | spin_lock_irqsave(&irq_descp(vector)->lock, flags); |
@@ -753,8 +754,13 @@ again: | |||
753 | } | 754 | } |
754 | 755 | ||
755 | dest = get_target_cpu(gsi, vector); | 756 | dest = get_target_cpu(gsi, vector); |
756 | register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY, | 757 | err = register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY, |
757 | polarity, trigger); | 758 | polarity, trigger); |
759 | if (err < 0) { | ||
760 | spin_unlock(&iosapic_lock); | ||
761 | spin_unlock_irqrestore(&irq_descp(vector)->lock, flags); | ||
762 | return err; | ||
763 | } | ||
758 | 764 | ||
759 | /* | 765 | /* |
760 | * If the vector is shared and already unmasked for | 766 | * If the vector is shared and already unmasked for |
@@ -776,7 +782,6 @@ again: | |||
776 | return vector; | 782 | return vector; |
777 | } | 783 | } |
778 | 784 | ||
779 | #ifdef CONFIG_ACPI_DEALLOCATE_IRQ | ||
780 | void | 785 | void |
781 | iosapic_unregister_intr (unsigned int gsi) | 786 | iosapic_unregister_intr (unsigned int gsi) |
782 | { | 787 | { |
@@ -859,7 +864,6 @@ iosapic_unregister_intr (unsigned int gsi) | |||
859 | spin_unlock(&iosapic_lock); | 864 | spin_unlock(&iosapic_lock); |
860 | spin_unlock_irqrestore(&idesc->lock, flags); | 865 | spin_unlock_irqrestore(&idesc->lock, flags); |
861 | } | 866 | } |
862 | #endif /* CONFIG_ACPI_DEALLOCATE_IRQ */ | ||
863 | 867 | ||
864 | /* | 868 | /* |
865 | * ACPI calls this when it finds an entry for a platform interrupt. | 869 | * ACPI calls this when it finds an entry for a platform interrupt. |
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 28f2aadc38d0..205d98028261 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c | |||
@@ -91,23 +91,8 @@ skip: | |||
91 | } | 91 | } |
92 | 92 | ||
93 | #ifdef CONFIG_SMP | 93 | #ifdef CONFIG_SMP |
94 | /* | ||
95 | * This is updated when the user sets irq affinity via /proc | ||
96 | */ | ||
97 | static cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS]; | ||
98 | static unsigned long pending_irq_redir[BITS_TO_LONGS(NR_IRQS)]; | ||
99 | |||
100 | static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 }; | 94 | static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 }; |
101 | 95 | ||
102 | /* | ||
103 | * Arch specific routine for deferred write to iosapic rte to reprogram | ||
104 | * intr destination. | ||
105 | */ | ||
106 | void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val) | ||
107 | { | ||
108 | pending_irq_cpumask[irq] = mask_val; | ||
109 | } | ||
110 | |||
111 | void set_irq_affinity_info (unsigned int irq, int hwid, int redir) | 96 | void set_irq_affinity_info (unsigned int irq, int hwid, int redir) |
112 | { | 97 | { |
113 | cpumask_t mask = CPU_MASK_NONE; | 98 | cpumask_t mask = CPU_MASK_NONE; |
@@ -116,32 +101,10 @@ void set_irq_affinity_info (unsigned int irq, int hwid, int redir) | |||
116 | 101 | ||
117 | if (irq < NR_IRQS) { | 102 | if (irq < NR_IRQS) { |
118 | irq_affinity[irq] = mask; | 103 | irq_affinity[irq] = mask; |
104 | set_irq_info(irq, mask); | ||
119 | irq_redir[irq] = (char) (redir & 0xff); | 105 | irq_redir[irq] = (char) (redir & 0xff); |
120 | } | 106 | } |
121 | } | 107 | } |
122 | |||
123 | |||
124 | void move_irq(int irq) | ||
125 | { | ||
126 | /* note - we hold desc->lock */ | ||
127 | cpumask_t tmp; | ||
128 | irq_desc_t *desc = irq_descp(irq); | ||
129 | int redir = test_bit(irq, pending_irq_redir); | ||
130 | |||
131 | if (unlikely(!desc->handler->set_affinity)) | ||
132 | return; | ||
133 | |||
134 | if (!cpus_empty(pending_irq_cpumask[irq])) { | ||
135 | cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map); | ||
136 | if (unlikely(!cpus_empty(tmp))) { | ||
137 | desc->handler->set_affinity(irq | (redir ? IA64_IRQ_REDIRECTED : 0), | ||
138 | pending_irq_cpumask[irq]); | ||
139 | } | ||
140 | cpus_clear(pending_irq_cpumask[irq]); | ||
141 | } | ||
142 | } | ||
143 | |||
144 | |||
145 | #endif /* CONFIG_SMP */ | 108 | #endif /* CONFIG_SMP */ |
146 | 109 | ||
147 | #ifdef CONFIG_HOTPLUG_CPU | 110 | #ifdef CONFIG_HOTPLUG_CPU |
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index 3bb3a13c4047..c13ca0d49c4a 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S | |||
@@ -44,7 +44,7 @@ | |||
44 | #include <asm/break.h> | 44 | #include <asm/break.h> |
45 | #include <asm/ia32.h> | 45 | #include <asm/ia32.h> |
46 | #include <asm/kregs.h> | 46 | #include <asm/kregs.h> |
47 | #include <asm/offsets.h> | 47 | #include <asm/asm-offsets.h> |
48 | #include <asm/pgtable.h> | 48 | #include <asm/pgtable.h> |
49 | #include <asm/processor.h> | 49 | #include <asm/processor.h> |
50 | #include <asm/ptrace.h> | 50 | #include <asm/ptrace.h> |
@@ -69,7 +69,6 @@ | |||
69 | # define DBG_FAULT(i) | 69 | # define DBG_FAULT(i) |
70 | #endif | 70 | #endif |
71 | 71 | ||
72 | #define MINSTATE_VIRT /* needed by minstate.h */ | ||
73 | #include "minstate.h" | 72 | #include "minstate.h" |
74 | 73 | ||
75 | #define FAULT(n) \ | 74 | #define FAULT(n) \ |
diff --git a/arch/ia64/kernel/jprobes.S b/arch/ia64/kernel/jprobes.S index b7fa3ccd2b0f..2323377e3695 100644 --- a/arch/ia64/kernel/jprobes.S +++ b/arch/ia64/kernel/jprobes.S | |||
@@ -49,6 +49,7 @@ | |||
49 | /* | 49 | /* |
50 | * void jprobe_break(void) | 50 | * void jprobe_break(void) |
51 | */ | 51 | */ |
52 | .section .kprobes.text, "ax" | ||
52 | ENTRY(jprobe_break) | 53 | ENTRY(jprobe_break) |
53 | break.m 0x80300 | 54 | break.m 0x80300 |
54 | END(jprobe_break) | 55 | END(jprobe_break) |
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 884f5cd27d8a..471086b808a4 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c | |||
@@ -87,12 +87,25 @@ static enum instruction_type bundle_encoding[32][3] = { | |||
87 | * is IP relative instruction and update the kprobe | 87 | * is IP relative instruction and update the kprobe |
88 | * inst flag accordingly | 88 | * inst flag accordingly |
89 | */ | 89 | */ |
90 | static void update_kprobe_inst_flag(uint template, uint slot, uint major_opcode, | 90 | static void __kprobes update_kprobe_inst_flag(uint template, uint slot, |
91 | unsigned long kprobe_inst, struct kprobe *p) | 91 | uint major_opcode, |
92 | unsigned long kprobe_inst, | ||
93 | struct kprobe *p) | ||
92 | { | 94 | { |
93 | p->ainsn.inst_flag = 0; | 95 | p->ainsn.inst_flag = 0; |
94 | p->ainsn.target_br_reg = 0; | 96 | p->ainsn.target_br_reg = 0; |
95 | 97 | ||
98 | /* Check for Break instruction | ||
99 | * Bits 37:40 Major opcode to be zero | ||
100 | * Bits 27:32 X6 to be zero | ||
101 | * Bits 32:35 X3 to be zero | ||
102 | */ | ||
103 | if ((!major_opcode) && (!((kprobe_inst >> 27) & 0x1FF)) ) { | ||
104 | /* is a break instruction */ | ||
105 | p->ainsn.inst_flag |= INST_FLAG_BREAK_INST; | ||
106 | return; | ||
107 | } | ||
108 | |||
96 | if (bundle_encoding[template][slot] == B) { | 109 | if (bundle_encoding[template][slot] == B) { |
97 | switch (major_opcode) { | 110 | switch (major_opcode) { |
98 | case INDIRECT_CALL_OPCODE: | 111 | case INDIRECT_CALL_OPCODE: |
@@ -126,8 +139,10 @@ static void update_kprobe_inst_flag(uint template, uint slot, uint major_opcode | |||
126 | * Returns 0 if supported | 139 | * Returns 0 if supported |
127 | * Returns -EINVAL if unsupported | 140 | * Returns -EINVAL if unsupported |
128 | */ | 141 | */ |
129 | static int unsupported_inst(uint template, uint slot, uint major_opcode, | 142 | static int __kprobes unsupported_inst(uint template, uint slot, |
130 | unsigned long kprobe_inst, struct kprobe *p) | 143 | uint major_opcode, |
144 | unsigned long kprobe_inst, | ||
145 | struct kprobe *p) | ||
131 | { | 146 | { |
132 | unsigned long addr = (unsigned long)p->addr; | 147 | unsigned long addr = (unsigned long)p->addr; |
133 | 148 | ||
@@ -168,8 +183,9 @@ static int unsupported_inst(uint template, uint slot, uint major_opcode, | |||
168 | * on which we are inserting kprobe is cmp instruction | 183 | * on which we are inserting kprobe is cmp instruction |
169 | * with ctype as unc. | 184 | * with ctype as unc. |
170 | */ | 185 | */ |
171 | static uint is_cmp_ctype_unc_inst(uint template, uint slot, uint major_opcode, | 186 | static uint __kprobes is_cmp_ctype_unc_inst(uint template, uint slot, |
172 | unsigned long kprobe_inst) | 187 | uint major_opcode, |
188 | unsigned long kprobe_inst) | ||
173 | { | 189 | { |
174 | cmp_inst_t cmp_inst; | 190 | cmp_inst_t cmp_inst; |
175 | uint ctype_unc = 0; | 191 | uint ctype_unc = 0; |
@@ -201,8 +217,10 @@ out: | |||
201 | * In this function we override the bundle with | 217 | * In this function we override the bundle with |
202 | * the break instruction at the given slot. | 218 | * the break instruction at the given slot. |
203 | */ | 219 | */ |
204 | static void prepare_break_inst(uint template, uint slot, uint major_opcode, | 220 | static void __kprobes prepare_break_inst(uint template, uint slot, |
205 | unsigned long kprobe_inst, struct kprobe *p) | 221 | uint major_opcode, |
222 | unsigned long kprobe_inst, | ||
223 | struct kprobe *p) | ||
206 | { | 224 | { |
207 | unsigned long break_inst = BREAK_INST; | 225 | unsigned long break_inst = BREAK_INST; |
208 | bundle_t *bundle = &p->ainsn.insn.bundle; | 226 | bundle_t *bundle = &p->ainsn.insn.bundle; |
@@ -271,7 +289,8 @@ static inline int in_ivt_functions(unsigned long addr) | |||
271 | && addr < (unsigned long)__end_ivt_text); | 289 | && addr < (unsigned long)__end_ivt_text); |
272 | } | 290 | } |
273 | 291 | ||
274 | static int valid_kprobe_addr(int template, int slot, unsigned long addr) | 292 | static int __kprobes valid_kprobe_addr(int template, int slot, |
293 | unsigned long addr) | ||
275 | { | 294 | { |
276 | if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) { | 295 | if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) { |
277 | printk(KERN_WARNING "Attempting to insert unaligned kprobe " | 296 | printk(KERN_WARNING "Attempting to insert unaligned kprobe " |
@@ -323,7 +342,7 @@ static void kretprobe_trampoline(void) | |||
323 | * - cleanup by marking the instance as unused | 342 | * - cleanup by marking the instance as unused |
324 | * - long jump back to the original return address | 343 | * - long jump back to the original return address |
325 | */ | 344 | */ |
326 | int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | 345 | int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) |
327 | { | 346 | { |
328 | struct kretprobe_instance *ri = NULL; | 347 | struct kretprobe_instance *ri = NULL; |
329 | struct hlist_head *head; | 348 | struct hlist_head *head; |
@@ -381,7 +400,8 @@ int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | |||
381 | return 1; | 400 | return 1; |
382 | } | 401 | } |
383 | 402 | ||
384 | void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) | 403 | void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, |
404 | struct pt_regs *regs) | ||
385 | { | 405 | { |
386 | struct kretprobe_instance *ri; | 406 | struct kretprobe_instance *ri; |
387 | 407 | ||
@@ -399,7 +419,7 @@ void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) | |||
399 | } | 419 | } |
400 | } | 420 | } |
401 | 421 | ||
402 | int arch_prepare_kprobe(struct kprobe *p) | 422 | int __kprobes arch_prepare_kprobe(struct kprobe *p) |
403 | { | 423 | { |
404 | unsigned long addr = (unsigned long) p->addr; | 424 | unsigned long addr = (unsigned long) p->addr; |
405 | unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL); | 425 | unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL); |
@@ -430,7 +450,7 @@ int arch_prepare_kprobe(struct kprobe *p) | |||
430 | return 0; | 450 | return 0; |
431 | } | 451 | } |
432 | 452 | ||
433 | void arch_arm_kprobe(struct kprobe *p) | 453 | void __kprobes arch_arm_kprobe(struct kprobe *p) |
434 | { | 454 | { |
435 | unsigned long addr = (unsigned long)p->addr; | 455 | unsigned long addr = (unsigned long)p->addr; |
436 | unsigned long arm_addr = addr & ~0xFULL; | 456 | unsigned long arm_addr = addr & ~0xFULL; |
@@ -439,7 +459,7 @@ void arch_arm_kprobe(struct kprobe *p) | |||
439 | flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t)); | 459 | flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t)); |
440 | } | 460 | } |
441 | 461 | ||
442 | void arch_disarm_kprobe(struct kprobe *p) | 462 | void __kprobes arch_disarm_kprobe(struct kprobe *p) |
443 | { | 463 | { |
444 | unsigned long addr = (unsigned long)p->addr; | 464 | unsigned long addr = (unsigned long)p->addr; |
445 | unsigned long arm_addr = addr & ~0xFULL; | 465 | unsigned long arm_addr = addr & ~0xFULL; |
@@ -449,7 +469,7 @@ void arch_disarm_kprobe(struct kprobe *p) | |||
449 | flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t)); | 469 | flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t)); |
450 | } | 470 | } |
451 | 471 | ||
452 | void arch_remove_kprobe(struct kprobe *p) | 472 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
453 | { | 473 | { |
454 | } | 474 | } |
455 | 475 | ||
@@ -461,7 +481,7 @@ void arch_remove_kprobe(struct kprobe *p) | |||
461 | * to original stack address, handle the case where we need to fixup the | 481 | * to original stack address, handle the case where we need to fixup the |
462 | * relative IP address and/or fixup branch register. | 482 | * relative IP address and/or fixup branch register. |
463 | */ | 483 | */ |
464 | static void resume_execution(struct kprobe *p, struct pt_regs *regs) | 484 | static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) |
465 | { | 485 | { |
466 | unsigned long bundle_addr = ((unsigned long) (&p->opcode.bundle)) & ~0xFULL; | 486 | unsigned long bundle_addr = ((unsigned long) (&p->opcode.bundle)) & ~0xFULL; |
467 | unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL; | 487 | unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL; |
@@ -528,13 +548,16 @@ turn_ss_off: | |||
528 | ia64_psr(regs)->ss = 0; | 548 | ia64_psr(regs)->ss = 0; |
529 | } | 549 | } |
530 | 550 | ||
531 | static void prepare_ss(struct kprobe *p, struct pt_regs *regs) | 551 | static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs) |
532 | { | 552 | { |
533 | unsigned long bundle_addr = (unsigned long) &p->opcode.bundle; | 553 | unsigned long bundle_addr = (unsigned long) &p->opcode.bundle; |
534 | unsigned long slot = (unsigned long)p->addr & 0xf; | 554 | unsigned long slot = (unsigned long)p->addr & 0xf; |
535 | 555 | ||
536 | /* Update instruction pointer (IIP) and slot number (IPSR.ri) */ | 556 | /* single step inline if break instruction */ |
537 | regs->cr_iip = bundle_addr & ~0xFULL; | 557 | if (p->ainsn.inst_flag == INST_FLAG_BREAK_INST) |
558 | regs->cr_iip = (unsigned long)p->addr & ~0xFULL; | ||
559 | else | ||
560 | regs->cr_iip = bundle_addr & ~0xFULL; | ||
538 | 561 | ||
539 | if (slot > 2) | 562 | if (slot > 2) |
540 | slot = 0; | 563 | slot = 0; |
@@ -545,7 +568,39 @@ static void prepare_ss(struct kprobe *p, struct pt_regs *regs) | |||
545 | ia64_psr(regs)->ss = 1; | 568 | ia64_psr(regs)->ss = 1; |
546 | } | 569 | } |
547 | 570 | ||
548 | static int pre_kprobes_handler(struct die_args *args) | 571 | static int __kprobes is_ia64_break_inst(struct pt_regs *regs) |
572 | { | ||
573 | unsigned int slot = ia64_psr(regs)->ri; | ||
574 | unsigned int template, major_opcode; | ||
575 | unsigned long kprobe_inst; | ||
576 | unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip; | ||
577 | bundle_t bundle; | ||
578 | |||
579 | memcpy(&bundle, kprobe_addr, sizeof(bundle_t)); | ||
580 | template = bundle.quad0.template; | ||
581 | |||
582 | /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */ | ||
583 | if (slot == 1 && bundle_encoding[template][1] == L) | ||
584 | slot++; | ||
585 | |||
586 | /* Get Kprobe probe instruction at given slot*/ | ||
587 | get_kprobe_inst(&bundle, slot, &kprobe_inst, &major_opcode); | ||
588 | |||
589 | /* For break instruction, | ||
590 | * Bits 37:40 Major opcode to be zero | ||
591 | * Bits 27:32 X6 to be zero | ||
592 | * Bits 32:35 X3 to be zero | ||
593 | */ | ||
594 | if (major_opcode || ((kprobe_inst >> 27) & 0x1FF) ) { | ||
595 | /* Not a break instruction */ | ||
596 | return 0; | ||
597 | } | ||
598 | |||
599 | /* Is a break instruction */ | ||
600 | return 1; | ||
601 | } | ||
602 | |||
603 | static int __kprobes pre_kprobes_handler(struct die_args *args) | ||
549 | { | 604 | { |
550 | struct kprobe *p; | 605 | struct kprobe *p; |
551 | int ret = 0; | 606 | int ret = 0; |
@@ -558,7 +613,9 @@ static int pre_kprobes_handler(struct die_args *args) | |||
558 | if (kprobe_running()) { | 613 | if (kprobe_running()) { |
559 | p = get_kprobe(addr); | 614 | p = get_kprobe(addr); |
560 | if (p) { | 615 | if (p) { |
561 | if (kprobe_status == KPROBE_HIT_SS) { | 616 | if ( (kprobe_status == KPROBE_HIT_SS) && |
617 | (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) { | ||
618 | ia64_psr(regs)->ss = 0; | ||
562 | unlock_kprobes(); | 619 | unlock_kprobes(); |
563 | goto no_kprobe; | 620 | goto no_kprobe; |
564 | } | 621 | } |
@@ -592,6 +649,19 @@ static int pre_kprobes_handler(struct die_args *args) | |||
592 | p = get_kprobe(addr); | 649 | p = get_kprobe(addr); |
593 | if (!p) { | 650 | if (!p) { |
594 | unlock_kprobes(); | 651 | unlock_kprobes(); |
652 | if (!is_ia64_break_inst(regs)) { | ||
653 | /* | ||
654 | * The breakpoint instruction was removed right | ||
655 | * after we hit it. Another cpu has removed | ||
656 | * either a probepoint or a debugger breakpoint | ||
657 | * at this address. In either case, no further | ||
658 | * handling of this interrupt is appropriate. | ||
659 | */ | ||
660 | ret = 1; | ||
661 | |||
662 | } | ||
663 | |||
664 | /* Not one of our break, let kernel handle it */ | ||
595 | goto no_kprobe; | 665 | goto no_kprobe; |
596 | } | 666 | } |
597 | 667 | ||
@@ -616,7 +686,7 @@ no_kprobe: | |||
616 | return ret; | 686 | return ret; |
617 | } | 687 | } |
618 | 688 | ||
619 | static int post_kprobes_handler(struct pt_regs *regs) | 689 | static int __kprobes post_kprobes_handler(struct pt_regs *regs) |
620 | { | 690 | { |
621 | if (!kprobe_running()) | 691 | if (!kprobe_running()) |
622 | return 0; | 692 | return 0; |
@@ -641,7 +711,7 @@ out: | |||
641 | return 1; | 711 | return 1; |
642 | } | 712 | } |
643 | 713 | ||
644 | static int kprobes_fault_handler(struct pt_regs *regs, int trapnr) | 714 | static int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr) |
645 | { | 715 | { |
646 | if (!kprobe_running()) | 716 | if (!kprobe_running()) |
647 | return 0; | 717 | return 0; |
@@ -659,8 +729,8 @@ static int kprobes_fault_handler(struct pt_regs *regs, int trapnr) | |||
659 | return 0; | 729 | return 0; |
660 | } | 730 | } |
661 | 731 | ||
662 | int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, | 732 | int __kprobes kprobe_exceptions_notify(struct notifier_block *self, |
663 | void *data) | 733 | unsigned long val, void *data) |
664 | { | 734 | { |
665 | struct die_args *args = (struct die_args *)data; | 735 | struct die_args *args = (struct die_args *)data; |
666 | switch(val) { | 736 | switch(val) { |
@@ -681,7 +751,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, | |||
681 | return NOTIFY_DONE; | 751 | return NOTIFY_DONE; |
682 | } | 752 | } |
683 | 753 | ||
684 | int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | 754 | int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) |
685 | { | 755 | { |
686 | struct jprobe *jp = container_of(p, struct jprobe, kp); | 756 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
687 | unsigned long addr = ((struct fnptr *)(jp->entry))->ip; | 757 | unsigned long addr = ((struct fnptr *)(jp->entry))->ip; |
@@ -703,7 +773,7 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
703 | return 1; | 773 | return 1; |
704 | } | 774 | } |
705 | 775 | ||
706 | int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | 776 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) |
707 | { | 777 | { |
708 | *regs = jprobe_saved_regs; | 778 | *regs = jprobe_saved_regs; |
709 | return 1; | 779 | return 1; |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 4ebbf3974381..6dc726ad7137 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -48,6 +48,9 @@ | |||
48 | * Delete dead variables and functions. | 48 | * Delete dead variables and functions. |
49 | * Reorder to remove the need for forward declarations and to consolidate | 49 | * Reorder to remove the need for forward declarations and to consolidate |
50 | * related code. | 50 | * related code. |
51 | * | ||
52 | * 2005-08-12 Keith Owens <kaos@sgi.com> | ||
53 | * Convert MCA/INIT handlers to use per event stacks and SAL/OS state. | ||
51 | */ | 54 | */ |
52 | #include <linux/config.h> | 55 | #include <linux/config.h> |
53 | #include <linux/types.h> | 56 | #include <linux/types.h> |
@@ -77,6 +80,8 @@ | |||
77 | #include <asm/irq.h> | 80 | #include <asm/irq.h> |
78 | #include <asm/hw_irq.h> | 81 | #include <asm/hw_irq.h> |
79 | 82 | ||
83 | #include "entry.h" | ||
84 | |||
80 | #if defined(IA64_MCA_DEBUG_INFO) | 85 | #if defined(IA64_MCA_DEBUG_INFO) |
81 | # define IA64_MCA_DEBUG(fmt...) printk(fmt) | 86 | # define IA64_MCA_DEBUG(fmt...) printk(fmt) |
82 | #else | 87 | #else |
@@ -84,9 +89,7 @@ | |||
84 | #endif | 89 | #endif |
85 | 90 | ||
86 | /* Used by mca_asm.S */ | 91 | /* Used by mca_asm.S */ |
87 | ia64_mca_sal_to_os_state_t ia64_sal_to_os_handoff_state; | 92 | u32 ia64_mca_serialize; |
88 | ia64_mca_os_to_sal_state_t ia64_os_to_sal_handoff_state; | ||
89 | u64 ia64_mca_serialize; | ||
90 | DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */ | 93 | DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */ |
91 | DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */ | 94 | DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */ |
92 | DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */ | 95 | DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */ |
@@ -95,8 +98,10 @@ DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */ | |||
95 | unsigned long __per_cpu_mca[NR_CPUS]; | 98 | unsigned long __per_cpu_mca[NR_CPUS]; |
96 | 99 | ||
97 | /* In mca_asm.S */ | 100 | /* In mca_asm.S */ |
98 | extern void ia64_monarch_init_handler (void); | 101 | extern void ia64_os_init_dispatch_monarch (void); |
99 | extern void ia64_slave_init_handler (void); | 102 | extern void ia64_os_init_dispatch_slave (void); |
103 | |||
104 | static int monarch_cpu = -1; | ||
100 | 105 | ||
101 | static ia64_mc_info_t ia64_mc_info; | 106 | static ia64_mc_info_t ia64_mc_info; |
102 | 107 | ||
@@ -234,7 +239,8 @@ ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe) | |||
234 | * This function retrieves a specified error record type from SAL | 239 | * This function retrieves a specified error record type from SAL |
235 | * and wakes up any processes waiting for error records. | 240 | * and wakes up any processes waiting for error records. |
236 | * | 241 | * |
237 | * Inputs : sal_info_type (Type of error record MCA/CMC/CPE/INIT) | 242 | * Inputs : sal_info_type (Type of error record MCA/CMC/CPE) |
243 | * FIXME: remove MCA and irq_safe. | ||
238 | */ | 244 | */ |
239 | static void | 245 | static void |
240 | ia64_mca_log_sal_error_record(int sal_info_type) | 246 | ia64_mca_log_sal_error_record(int sal_info_type) |
@@ -242,7 +248,7 @@ ia64_mca_log_sal_error_record(int sal_info_type) | |||
242 | u8 *buffer; | 248 | u8 *buffer; |
243 | sal_log_record_header_t *rh; | 249 | sal_log_record_header_t *rh; |
244 | u64 size; | 250 | u64 size; |
245 | int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA && sal_info_type != SAL_INFO_TYPE_INIT; | 251 | int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA; |
246 | #ifdef IA64_MCA_DEBUG_INFO | 252 | #ifdef IA64_MCA_DEBUG_INFO |
247 | static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" }; | 253 | static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" }; |
248 | #endif | 254 | #endif |
@@ -330,191 +336,6 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs) | |||
330 | 336 | ||
331 | #endif /* CONFIG_ACPI */ | 337 | #endif /* CONFIG_ACPI */ |
332 | 338 | ||
333 | static void | ||
334 | show_min_state (pal_min_state_area_t *minstate) | ||
335 | { | ||
336 | u64 iip = minstate->pmsa_iip + ((struct ia64_psr *)(&minstate->pmsa_ipsr))->ri; | ||
337 | u64 xip = minstate->pmsa_xip + ((struct ia64_psr *)(&minstate->pmsa_xpsr))->ri; | ||
338 | |||
339 | printk("NaT bits\t%016lx\n", minstate->pmsa_nat_bits); | ||
340 | printk("pr\t\t%016lx\n", minstate->pmsa_pr); | ||
341 | printk("b0\t\t%016lx ", minstate->pmsa_br0); print_symbol("%s\n", minstate->pmsa_br0); | ||
342 | printk("ar.rsc\t\t%016lx\n", minstate->pmsa_rsc); | ||
343 | printk("cr.iip\t\t%016lx ", iip); print_symbol("%s\n", iip); | ||
344 | printk("cr.ipsr\t\t%016lx\n", minstate->pmsa_ipsr); | ||
345 | printk("cr.ifs\t\t%016lx\n", minstate->pmsa_ifs); | ||
346 | printk("xip\t\t%016lx ", xip); print_symbol("%s\n", xip); | ||
347 | printk("xpsr\t\t%016lx\n", minstate->pmsa_xpsr); | ||
348 | printk("xfs\t\t%016lx\n", minstate->pmsa_xfs); | ||
349 | printk("b1\t\t%016lx ", minstate->pmsa_br1); | ||
350 | print_symbol("%s\n", minstate->pmsa_br1); | ||
351 | |||
352 | printk("\nstatic registers r0-r15:\n"); | ||
353 | printk(" r0- 3 %016lx %016lx %016lx %016lx\n", | ||
354 | 0UL, minstate->pmsa_gr[0], minstate->pmsa_gr[1], minstate->pmsa_gr[2]); | ||
355 | printk(" r4- 7 %016lx %016lx %016lx %016lx\n", | ||
356 | minstate->pmsa_gr[3], minstate->pmsa_gr[4], | ||
357 | minstate->pmsa_gr[5], minstate->pmsa_gr[6]); | ||
358 | printk(" r8-11 %016lx %016lx %016lx %016lx\n", | ||
359 | minstate->pmsa_gr[7], minstate->pmsa_gr[8], | ||
360 | minstate->pmsa_gr[9], minstate->pmsa_gr[10]); | ||
361 | printk("r12-15 %016lx %016lx %016lx %016lx\n", | ||
362 | minstate->pmsa_gr[11], minstate->pmsa_gr[12], | ||
363 | minstate->pmsa_gr[13], minstate->pmsa_gr[14]); | ||
364 | |||
365 | printk("\nbank 0:\n"); | ||
366 | printk("r16-19 %016lx %016lx %016lx %016lx\n", | ||
367 | minstate->pmsa_bank0_gr[0], minstate->pmsa_bank0_gr[1], | ||
368 | minstate->pmsa_bank0_gr[2], minstate->pmsa_bank0_gr[3]); | ||
369 | printk("r20-23 %016lx %016lx %016lx %016lx\n", | ||
370 | minstate->pmsa_bank0_gr[4], minstate->pmsa_bank0_gr[5], | ||
371 | minstate->pmsa_bank0_gr[6], minstate->pmsa_bank0_gr[7]); | ||
372 | printk("r24-27 %016lx %016lx %016lx %016lx\n", | ||
373 | minstate->pmsa_bank0_gr[8], minstate->pmsa_bank0_gr[9], | ||
374 | minstate->pmsa_bank0_gr[10], minstate->pmsa_bank0_gr[11]); | ||
375 | printk("r28-31 %016lx %016lx %016lx %016lx\n", | ||
376 | minstate->pmsa_bank0_gr[12], minstate->pmsa_bank0_gr[13], | ||
377 | minstate->pmsa_bank0_gr[14], minstate->pmsa_bank0_gr[15]); | ||
378 | |||
379 | printk("\nbank 1:\n"); | ||
380 | printk("r16-19 %016lx %016lx %016lx %016lx\n", | ||
381 | minstate->pmsa_bank1_gr[0], minstate->pmsa_bank1_gr[1], | ||
382 | minstate->pmsa_bank1_gr[2], minstate->pmsa_bank1_gr[3]); | ||
383 | printk("r20-23 %016lx %016lx %016lx %016lx\n", | ||
384 | minstate->pmsa_bank1_gr[4], minstate->pmsa_bank1_gr[5], | ||
385 | minstate->pmsa_bank1_gr[6], minstate->pmsa_bank1_gr[7]); | ||
386 | printk("r24-27 %016lx %016lx %016lx %016lx\n", | ||
387 | minstate->pmsa_bank1_gr[8], minstate->pmsa_bank1_gr[9], | ||
388 | minstate->pmsa_bank1_gr[10], minstate->pmsa_bank1_gr[11]); | ||
389 | printk("r28-31 %016lx %016lx %016lx %016lx\n", | ||
390 | minstate->pmsa_bank1_gr[12], minstate->pmsa_bank1_gr[13], | ||
391 | minstate->pmsa_bank1_gr[14], minstate->pmsa_bank1_gr[15]); | ||
392 | } | ||
393 | |||
394 | static void | ||
395 | fetch_min_state (pal_min_state_area_t *ms, struct pt_regs *pt, struct switch_stack *sw) | ||
396 | { | ||
397 | u64 *dst_banked, *src_banked, bit, shift, nat_bits; | ||
398 | int i; | ||
399 | |||
400 | /* | ||
401 | * First, update the pt-regs and switch-stack structures with the contents stored | ||
402 | * in the min-state area: | ||
403 | */ | ||
404 | if (((struct ia64_psr *) &ms->pmsa_ipsr)->ic == 0) { | ||
405 | pt->cr_ipsr = ms->pmsa_xpsr; | ||
406 | pt->cr_iip = ms->pmsa_xip; | ||
407 | pt->cr_ifs = ms->pmsa_xfs; | ||
408 | } else { | ||
409 | pt->cr_ipsr = ms->pmsa_ipsr; | ||
410 | pt->cr_iip = ms->pmsa_iip; | ||
411 | pt->cr_ifs = ms->pmsa_ifs; | ||
412 | } | ||
413 | pt->ar_rsc = ms->pmsa_rsc; | ||
414 | pt->pr = ms->pmsa_pr; | ||
415 | pt->r1 = ms->pmsa_gr[0]; | ||
416 | pt->r2 = ms->pmsa_gr[1]; | ||
417 | pt->r3 = ms->pmsa_gr[2]; | ||
418 | sw->r4 = ms->pmsa_gr[3]; | ||
419 | sw->r5 = ms->pmsa_gr[4]; | ||
420 | sw->r6 = ms->pmsa_gr[5]; | ||
421 | sw->r7 = ms->pmsa_gr[6]; | ||
422 | pt->r8 = ms->pmsa_gr[7]; | ||
423 | pt->r9 = ms->pmsa_gr[8]; | ||
424 | pt->r10 = ms->pmsa_gr[9]; | ||
425 | pt->r11 = ms->pmsa_gr[10]; | ||
426 | pt->r12 = ms->pmsa_gr[11]; | ||
427 | pt->r13 = ms->pmsa_gr[12]; | ||
428 | pt->r14 = ms->pmsa_gr[13]; | ||
429 | pt->r15 = ms->pmsa_gr[14]; | ||
430 | dst_banked = &pt->r16; /* r16-r31 are contiguous in struct pt_regs */ | ||
431 | src_banked = ms->pmsa_bank1_gr; | ||
432 | for (i = 0; i < 16; ++i) | ||
433 | dst_banked[i] = src_banked[i]; | ||
434 | pt->b0 = ms->pmsa_br0; | ||
435 | sw->b1 = ms->pmsa_br1; | ||
436 | |||
437 | /* construct the NaT bits for the pt-regs structure: */ | ||
438 | # define PUT_NAT_BIT(dst, addr) \ | ||
439 | do { \ | ||
440 | bit = nat_bits & 1; nat_bits >>= 1; \ | ||
441 | shift = ((unsigned long) addr >> 3) & 0x3f; \ | ||
442 | dst = ((dst) & ~(1UL << shift)) | (bit << shift); \ | ||
443 | } while (0) | ||
444 | |||
445 | /* Rotate the saved NaT bits such that bit 0 corresponds to pmsa_gr[0]: */ | ||
446 | shift = ((unsigned long) &ms->pmsa_gr[0] >> 3) & 0x3f; | ||
447 | nat_bits = (ms->pmsa_nat_bits >> shift) | (ms->pmsa_nat_bits << (64 - shift)); | ||
448 | |||
449 | PUT_NAT_BIT(sw->caller_unat, &pt->r1); | ||
450 | PUT_NAT_BIT(sw->caller_unat, &pt->r2); | ||
451 | PUT_NAT_BIT(sw->caller_unat, &pt->r3); | ||
452 | PUT_NAT_BIT(sw->ar_unat, &sw->r4); | ||
453 | PUT_NAT_BIT(sw->ar_unat, &sw->r5); | ||
454 | PUT_NAT_BIT(sw->ar_unat, &sw->r6); | ||
455 | PUT_NAT_BIT(sw->ar_unat, &sw->r7); | ||
456 | PUT_NAT_BIT(sw->caller_unat, &pt->r8); PUT_NAT_BIT(sw->caller_unat, &pt->r9); | ||
457 | PUT_NAT_BIT(sw->caller_unat, &pt->r10); PUT_NAT_BIT(sw->caller_unat, &pt->r11); | ||
458 | PUT_NAT_BIT(sw->caller_unat, &pt->r12); PUT_NAT_BIT(sw->caller_unat, &pt->r13); | ||
459 | PUT_NAT_BIT(sw->caller_unat, &pt->r14); PUT_NAT_BIT(sw->caller_unat, &pt->r15); | ||
460 | nat_bits >>= 16; /* skip over bank0 NaT bits */ | ||
461 | PUT_NAT_BIT(sw->caller_unat, &pt->r16); PUT_NAT_BIT(sw->caller_unat, &pt->r17); | ||
462 | PUT_NAT_BIT(sw->caller_unat, &pt->r18); PUT_NAT_BIT(sw->caller_unat, &pt->r19); | ||
463 | PUT_NAT_BIT(sw->caller_unat, &pt->r20); PUT_NAT_BIT(sw->caller_unat, &pt->r21); | ||
464 | PUT_NAT_BIT(sw->caller_unat, &pt->r22); PUT_NAT_BIT(sw->caller_unat, &pt->r23); | ||
465 | PUT_NAT_BIT(sw->caller_unat, &pt->r24); PUT_NAT_BIT(sw->caller_unat, &pt->r25); | ||
466 | PUT_NAT_BIT(sw->caller_unat, &pt->r26); PUT_NAT_BIT(sw->caller_unat, &pt->r27); | ||
467 | PUT_NAT_BIT(sw->caller_unat, &pt->r28); PUT_NAT_BIT(sw->caller_unat, &pt->r29); | ||
468 | PUT_NAT_BIT(sw->caller_unat, &pt->r30); PUT_NAT_BIT(sw->caller_unat, &pt->r31); | ||
469 | } | ||
470 | |||
471 | static void | ||
472 | init_handler_platform (pal_min_state_area_t *ms, | ||
473 | struct pt_regs *pt, struct switch_stack *sw) | ||
474 | { | ||
475 | struct unw_frame_info info; | ||
476 | |||
477 | /* if a kernel debugger is available call it here else just dump the registers */ | ||
478 | |||
479 | /* | ||
480 | * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be | ||
481 | * generated via the BMC's command-line interface, but since the console is on the | ||
482 | * same serial line, the user will need some time to switch out of the BMC before | ||
483 | * the dump begins. | ||
484 | */ | ||
485 | printk("Delaying for 5 seconds...\n"); | ||
486 | udelay(5*1000000); | ||
487 | show_min_state(ms); | ||
488 | |||
489 | printk("Backtrace of current task (pid %d, %s)\n", current->pid, current->comm); | ||
490 | fetch_min_state(ms, pt, sw); | ||
491 | unw_init_from_interruption(&info, current, pt, sw); | ||
492 | ia64_do_show_stack(&info, NULL); | ||
493 | |||
494 | #ifdef CONFIG_SMP | ||
495 | /* read_trylock() would be handy... */ | ||
496 | if (!tasklist_lock.write_lock) | ||
497 | read_lock(&tasklist_lock); | ||
498 | #endif | ||
499 | { | ||
500 | struct task_struct *g, *t; | ||
501 | do_each_thread (g, t) { | ||
502 | if (t == current) | ||
503 | continue; | ||
504 | |||
505 | printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm); | ||
506 | show_stack(t, NULL); | ||
507 | } while_each_thread (g, t); | ||
508 | } | ||
509 | #ifdef CONFIG_SMP | ||
510 | if (!tasklist_lock.write_lock) | ||
511 | read_unlock(&tasklist_lock); | ||
512 | #endif | ||
513 | |||
514 | printk("\nINIT dump complete. Please reboot now.\n"); | ||
515 | while (1); /* hang city if no debugger */ | ||
516 | } | ||
517 | |||
518 | #ifdef CONFIG_ACPI | 339 | #ifdef CONFIG_ACPI |
519 | /* | 340 | /* |
520 | * ia64_mca_register_cpev | 341 | * ia64_mca_register_cpev |
@@ -657,42 +478,6 @@ ia64_mca_cmc_vector_enable_keventd(void *unused) | |||
657 | } | 478 | } |
658 | 479 | ||
659 | /* | 480 | /* |
660 | * ia64_mca_wakeup_ipi_wait | ||
661 | * | ||
662 | * Wait for the inter-cpu interrupt to be sent by the | ||
663 | * monarch processor once it is done with handling the | ||
664 | * MCA. | ||
665 | * | ||
666 | * Inputs : None | ||
667 | * Outputs : None | ||
668 | */ | ||
669 | static void | ||
670 | ia64_mca_wakeup_ipi_wait(void) | ||
671 | { | ||
672 | int irr_num = (IA64_MCA_WAKEUP_VECTOR >> 6); | ||
673 | int irr_bit = (IA64_MCA_WAKEUP_VECTOR & 0x3f); | ||
674 | u64 irr = 0; | ||
675 | |||
676 | do { | ||
677 | switch(irr_num) { | ||
678 | case 0: | ||
679 | irr = ia64_getreg(_IA64_REG_CR_IRR0); | ||
680 | break; | ||
681 | case 1: | ||
682 | irr = ia64_getreg(_IA64_REG_CR_IRR1); | ||
683 | break; | ||
684 | case 2: | ||
685 | irr = ia64_getreg(_IA64_REG_CR_IRR2); | ||
686 | break; | ||
687 | case 3: | ||
688 | irr = ia64_getreg(_IA64_REG_CR_IRR3); | ||
689 | break; | ||
690 | } | ||
691 | cpu_relax(); | ||
692 | } while (!(irr & (1UL << irr_bit))) ; | ||
693 | } | ||
694 | |||
695 | /* | ||
696 | * ia64_mca_wakeup | 481 | * ia64_mca_wakeup |
697 | * | 482 | * |
698 | * Send an inter-cpu interrupt to wake-up a particular cpu | 483 | * Send an inter-cpu interrupt to wake-up a particular cpu |
@@ -757,11 +542,9 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs) | |||
757 | */ | 542 | */ |
758 | ia64_sal_mc_rendez(); | 543 | ia64_sal_mc_rendez(); |
759 | 544 | ||
760 | /* Wait for the wakeup IPI from the monarch | 545 | /* Wait for the monarch cpu to exit. */ |
761 | * This waiting is done by polling on the wakeup-interrupt | 546 | while (monarch_cpu != -1) |
762 | * vector bit in the processor's IRRs | 547 | cpu_relax(); /* spin until monarch leaves */ |
763 | */ | ||
764 | ia64_mca_wakeup_ipi_wait(); | ||
765 | 548 | ||
766 | /* Enable all interrupts */ | 549 | /* Enable all interrupts */ |
767 | local_irq_restore(flags); | 550 | local_irq_restore(flags); |
@@ -789,53 +572,13 @@ ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg, struct pt_regs *ptregs) | |||
789 | return IRQ_HANDLED; | 572 | return IRQ_HANDLED; |
790 | } | 573 | } |
791 | 574 | ||
792 | /* | ||
793 | * ia64_return_to_sal_check | ||
794 | * | ||
795 | * This is function called before going back from the OS_MCA handler | ||
796 | * to the OS_MCA dispatch code which finally takes the control back | ||
797 | * to the SAL. | ||
798 | * The main purpose of this routine is to setup the OS_MCA to SAL | ||
799 | * return state which can be used by the OS_MCA dispatch code | ||
800 | * just before going back to SAL. | ||
801 | * | ||
802 | * Inputs : None | ||
803 | * Outputs : None | ||
804 | */ | ||
805 | |||
806 | static void | ||
807 | ia64_return_to_sal_check(int recover) | ||
808 | { | ||
809 | |||
810 | /* Copy over some relevant stuff from the sal_to_os_mca_handoff | ||
811 | * so that it can be used at the time of os_mca_to_sal_handoff | ||
812 | */ | ||
813 | ia64_os_to_sal_handoff_state.imots_sal_gp = | ||
814 | ia64_sal_to_os_handoff_state.imsto_sal_gp; | ||
815 | |||
816 | ia64_os_to_sal_handoff_state.imots_sal_check_ra = | ||
817 | ia64_sal_to_os_handoff_state.imsto_sal_check_ra; | ||
818 | |||
819 | if (recover) | ||
820 | ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_CORRECTED; | ||
821 | else | ||
822 | ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_COLD_BOOT; | ||
823 | |||
824 | /* Default = tell SAL to return to same context */ | ||
825 | ia64_os_to_sal_handoff_state.imots_context = IA64_MCA_SAME_CONTEXT; | ||
826 | |||
827 | ia64_os_to_sal_handoff_state.imots_new_min_state = | ||
828 | (u64 *)ia64_sal_to_os_handoff_state.pal_min_state; | ||
829 | |||
830 | } | ||
831 | |||
832 | /* Function pointer for extra MCA recovery */ | 575 | /* Function pointer for extra MCA recovery */ |
833 | int (*ia64_mca_ucmc_extension) | 576 | int (*ia64_mca_ucmc_extension) |
834 | (void*,ia64_mca_sal_to_os_state_t*,ia64_mca_os_to_sal_state_t*) | 577 | (void*,struct ia64_sal_os_state*) |
835 | = NULL; | 578 | = NULL; |
836 | 579 | ||
837 | int | 580 | int |
838 | ia64_reg_MCA_extension(void *fn) | 581 | ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *)) |
839 | { | 582 | { |
840 | if (ia64_mca_ucmc_extension) | 583 | if (ia64_mca_ucmc_extension) |
841 | return 1; | 584 | return 1; |
@@ -854,8 +597,321 @@ ia64_unreg_MCA_extension(void) | |||
854 | EXPORT_SYMBOL(ia64_reg_MCA_extension); | 597 | EXPORT_SYMBOL(ia64_reg_MCA_extension); |
855 | EXPORT_SYMBOL(ia64_unreg_MCA_extension); | 598 | EXPORT_SYMBOL(ia64_unreg_MCA_extension); |
856 | 599 | ||
600 | |||
601 | static inline void | ||
602 | copy_reg(const u64 *fr, u64 fnat, u64 *tr, u64 *tnat) | ||
603 | { | ||
604 | u64 fslot, tslot, nat; | ||
605 | *tr = *fr; | ||
606 | fslot = ((unsigned long)fr >> 3) & 63; | ||
607 | tslot = ((unsigned long)tr >> 3) & 63; | ||
608 | *tnat &= ~(1UL << tslot); | ||
609 | nat = (fnat >> fslot) & 1; | ||
610 | *tnat |= (nat << tslot); | ||
611 | } | ||
612 | |||
613 | /* On entry to this routine, we are running on the per cpu stack, see | ||
614 | * mca_asm.h. The original stack has not been touched by this event. Some of | ||
615 | * the original stack's registers will be in the RBS on this stack. This stack | ||
616 | * also contains a partial pt_regs and switch_stack, the rest of the data is in | ||
617 | * PAL minstate. | ||
618 | * | ||
619 | * The first thing to do is modify the original stack to look like a blocked | ||
620 | * task so we can run backtrace on the original task. Also mark the per cpu | ||
621 | * stack as current to ensure that we use the correct task state, it also means | ||
622 | * that we can do backtrace on the MCA/INIT handler code itself. | ||
623 | */ | ||
624 | |||
625 | static task_t * | ||
626 | ia64_mca_modify_original_stack(struct pt_regs *regs, | ||
627 | const struct switch_stack *sw, | ||
628 | struct ia64_sal_os_state *sos, | ||
629 | const char *type) | ||
630 | { | ||
631 | char *p, comm[sizeof(current->comm)]; | ||
632 | ia64_va va; | ||
633 | extern char ia64_leave_kernel[]; /* Need asm address, not function descriptor */ | ||
634 | const pal_min_state_area_t *ms = sos->pal_min_state; | ||
635 | task_t *previous_current; | ||
636 | struct pt_regs *old_regs; | ||
637 | struct switch_stack *old_sw; | ||
638 | unsigned size = sizeof(struct pt_regs) + | ||
639 | sizeof(struct switch_stack) + 16; | ||
640 | u64 *old_bspstore, *old_bsp; | ||
641 | u64 *new_bspstore, *new_bsp; | ||
642 | u64 old_unat, old_rnat, new_rnat, nat; | ||
643 | u64 slots, loadrs = regs->loadrs; | ||
644 | u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1]; | ||
645 | u64 ar_bspstore = regs->ar_bspstore; | ||
646 | u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16); | ||
647 | const u64 *bank; | ||
648 | const char *msg; | ||
649 | int cpu = smp_processor_id(); | ||
650 | |||
651 | previous_current = curr_task(cpu); | ||
652 | set_curr_task(cpu, current); | ||
653 | if ((p = strchr(current->comm, ' '))) | ||
654 | *p = '\0'; | ||
655 | |||
656 | /* Best effort attempt to cope with MCA/INIT delivered while in | ||
657 | * physical mode. | ||
658 | */ | ||
659 | regs->cr_ipsr = ms->pmsa_ipsr; | ||
660 | if (ia64_psr(regs)->dt == 0) { | ||
661 | va.l = r12; | ||
662 | if (va.f.reg == 0) { | ||
663 | va.f.reg = 7; | ||
664 | r12 = va.l; | ||
665 | } | ||
666 | va.l = r13; | ||
667 | if (va.f.reg == 0) { | ||
668 | va.f.reg = 7; | ||
669 | r13 = va.l; | ||
670 | } | ||
671 | } | ||
672 | if (ia64_psr(regs)->rt == 0) { | ||
673 | va.l = ar_bspstore; | ||
674 | if (va.f.reg == 0) { | ||
675 | va.f.reg = 7; | ||
676 | ar_bspstore = va.l; | ||
677 | } | ||
678 | va.l = ar_bsp; | ||
679 | if (va.f.reg == 0) { | ||
680 | va.f.reg = 7; | ||
681 | ar_bsp = va.l; | ||
682 | } | ||
683 | } | ||
684 | |||
685 | /* mca_asm.S ia64_old_stack() cannot assume that the dirty registers | ||
686 | * have been copied to the old stack, the old stack may fail the | ||
687 | * validation tests below. So ia64_old_stack() must restore the dirty | ||
688 | * registers from the new stack. The old and new bspstore probably | ||
689 | * have different alignments, so loadrs calculated on the old bsp | ||
690 | * cannot be used to restore from the new bsp. Calculate a suitable | ||
691 | * loadrs for the new stack and save it in the new pt_regs, where | ||
692 | * ia64_old_stack() can get it. | ||
693 | */ | ||
694 | old_bspstore = (u64 *)ar_bspstore; | ||
695 | old_bsp = (u64 *)ar_bsp; | ||
696 | slots = ia64_rse_num_regs(old_bspstore, old_bsp); | ||
697 | new_bspstore = (u64 *)((u64)current + IA64_RBS_OFFSET); | ||
698 | new_bsp = ia64_rse_skip_regs(new_bspstore, slots); | ||
699 | regs->loadrs = (new_bsp - new_bspstore) * 8 << 16; | ||
700 | |||
701 | /* Verify the previous stack state before we change it */ | ||
702 | if (user_mode(regs)) { | ||
703 | msg = "occurred in user space"; | ||
704 | goto no_mod; | ||
705 | } | ||
706 | if (r13 != sos->prev_IA64_KR_CURRENT) { | ||
707 | msg = "inconsistent previous current and r13"; | ||
708 | goto no_mod; | ||
709 | } | ||
710 | if ((r12 - r13) >= KERNEL_STACK_SIZE) { | ||
711 | msg = "inconsistent r12 and r13"; | ||
712 | goto no_mod; | ||
713 | } | ||
714 | if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) { | ||
715 | msg = "inconsistent ar.bspstore and r13"; | ||
716 | goto no_mod; | ||
717 | } | ||
718 | va.p = old_bspstore; | ||
719 | if (va.f.reg < 5) { | ||
720 | msg = "old_bspstore is in the wrong region"; | ||
721 | goto no_mod; | ||
722 | } | ||
723 | if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) { | ||
724 | msg = "inconsistent ar.bsp and r13"; | ||
725 | goto no_mod; | ||
726 | } | ||
727 | size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8; | ||
728 | if (ar_bspstore + size > r12) { | ||
729 | msg = "no room for blocked state"; | ||
730 | goto no_mod; | ||
731 | } | ||
732 | |||
733 | /* Change the comm field on the MCA/INT task to include the pid that | ||
734 | * was interrupted, it makes for easier debugging. If that pid was 0 | ||
735 | * (swapper or nested MCA/INIT) then use the start of the previous comm | ||
736 | * field suffixed with its cpu. | ||
737 | */ | ||
738 | if (previous_current->pid) | ||
739 | snprintf(comm, sizeof(comm), "%s %d", | ||
740 | current->comm, previous_current->pid); | ||
741 | else { | ||
742 | int l; | ||
743 | if ((p = strchr(previous_current->comm, ' '))) | ||
744 | l = p - previous_current->comm; | ||
745 | else | ||
746 | l = strlen(previous_current->comm); | ||
747 | snprintf(comm, sizeof(comm), "%s %*s %d", | ||
748 | current->comm, l, previous_current->comm, | ||
749 | previous_current->thread_info->cpu); | ||
750 | } | ||
751 | memcpy(current->comm, comm, sizeof(current->comm)); | ||
752 | |||
753 | /* Make the original task look blocked. First stack a struct pt_regs, | ||
754 | * describing the state at the time of interrupt. mca_asm.S built a | ||
755 | * partial pt_regs, copy it and fill in the blanks using minstate. | ||
756 | */ | ||
757 | p = (char *)r12 - sizeof(*regs); | ||
758 | old_regs = (struct pt_regs *)p; | ||
759 | memcpy(old_regs, regs, sizeof(*regs)); | ||
760 | /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use | ||
761 | * pmsa_{xip,xpsr,xfs} | ||
762 | */ | ||
763 | if (ia64_psr(regs)->ic) { | ||
764 | old_regs->cr_iip = ms->pmsa_iip; | ||
765 | old_regs->cr_ipsr = ms->pmsa_ipsr; | ||
766 | old_regs->cr_ifs = ms->pmsa_ifs; | ||
767 | } else { | ||
768 | old_regs->cr_iip = ms->pmsa_xip; | ||
769 | old_regs->cr_ipsr = ms->pmsa_xpsr; | ||
770 | old_regs->cr_ifs = ms->pmsa_xfs; | ||
771 | } | ||
772 | old_regs->pr = ms->pmsa_pr; | ||
773 | old_regs->b0 = ms->pmsa_br0; | ||
774 | old_regs->loadrs = loadrs; | ||
775 | old_regs->ar_rsc = ms->pmsa_rsc; | ||
776 | old_unat = old_regs->ar_unat; | ||
777 | copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, &old_regs->r1, &old_unat); | ||
778 | copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, &old_regs->r2, &old_unat); | ||
779 | copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, &old_regs->r3, &old_unat); | ||
780 | copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, &old_regs->r8, &old_unat); | ||
781 | copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, &old_regs->r9, &old_unat); | ||
782 | copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, &old_regs->r10, &old_unat); | ||
783 | copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, &old_regs->r11, &old_unat); | ||
784 | copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, &old_regs->r12, &old_unat); | ||
785 | copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, &old_regs->r13, &old_unat); | ||
786 | copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, &old_regs->r14, &old_unat); | ||
787 | copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, &old_regs->r15, &old_unat); | ||
788 | if (ia64_psr(old_regs)->bn) | ||
789 | bank = ms->pmsa_bank1_gr; | ||
790 | else | ||
791 | bank = ms->pmsa_bank0_gr; | ||
792 | copy_reg(&bank[16-16], ms->pmsa_nat_bits, &old_regs->r16, &old_unat); | ||
793 | copy_reg(&bank[17-16], ms->pmsa_nat_bits, &old_regs->r17, &old_unat); | ||
794 | copy_reg(&bank[18-16], ms->pmsa_nat_bits, &old_regs->r18, &old_unat); | ||
795 | copy_reg(&bank[19-16], ms->pmsa_nat_bits, &old_regs->r19, &old_unat); | ||
796 | copy_reg(&bank[20-16], ms->pmsa_nat_bits, &old_regs->r20, &old_unat); | ||
797 | copy_reg(&bank[21-16], ms->pmsa_nat_bits, &old_regs->r21, &old_unat); | ||
798 | copy_reg(&bank[22-16], ms->pmsa_nat_bits, &old_regs->r22, &old_unat); | ||
799 | copy_reg(&bank[23-16], ms->pmsa_nat_bits, &old_regs->r23, &old_unat); | ||
800 | copy_reg(&bank[24-16], ms->pmsa_nat_bits, &old_regs->r24, &old_unat); | ||
801 | copy_reg(&bank[25-16], ms->pmsa_nat_bits, &old_regs->r25, &old_unat); | ||
802 | copy_reg(&bank[26-16], ms->pmsa_nat_bits, &old_regs->r26, &old_unat); | ||
803 | copy_reg(&bank[27-16], ms->pmsa_nat_bits, &old_regs->r27, &old_unat); | ||
804 | copy_reg(&bank[28-16], ms->pmsa_nat_bits, &old_regs->r28, &old_unat); | ||
805 | copy_reg(&bank[29-16], ms->pmsa_nat_bits, &old_regs->r29, &old_unat); | ||
806 | copy_reg(&bank[30-16], ms->pmsa_nat_bits, &old_regs->r30, &old_unat); | ||
807 | copy_reg(&bank[31-16], ms->pmsa_nat_bits, &old_regs->r31, &old_unat); | ||
808 | |||
809 | /* Next stack a struct switch_stack. mca_asm.S built a partial | ||
810 | * switch_stack, copy it and fill in the blanks using pt_regs and | ||
811 | * minstate. | ||
812 | * | ||
813 | * In the synthesized switch_stack, b0 points to ia64_leave_kernel, | ||
814 | * ar.pfs is set to 0. | ||
815 | * | ||
816 | * unwind.c::unw_unwind() does special processing for interrupt frames. | ||
817 | * It checks if the PRED_NON_SYSCALL predicate is set, if the predicate | ||
818 | * is clear then unw_unwind() does _not_ adjust bsp over pt_regs. Not | ||
819 | * that this is documented, of course. Set PRED_NON_SYSCALL in the | ||
820 | * switch_stack on the original stack so it will unwind correctly when | ||
821 | * unwind.c reads pt_regs. | ||
822 | * | ||
823 | * thread.ksp is updated to point to the synthesized switch_stack. | ||
824 | */ | ||
825 | p -= sizeof(struct switch_stack); | ||
826 | old_sw = (struct switch_stack *)p; | ||
827 | memcpy(old_sw, sw, sizeof(*sw)); | ||
828 | old_sw->caller_unat = old_unat; | ||
829 | old_sw->ar_fpsr = old_regs->ar_fpsr; | ||
830 | copy_reg(&ms->pmsa_gr[4-1], ms->pmsa_nat_bits, &old_sw->r4, &old_unat); | ||
831 | copy_reg(&ms->pmsa_gr[5-1], ms->pmsa_nat_bits, &old_sw->r5, &old_unat); | ||
832 | copy_reg(&ms->pmsa_gr[6-1], ms->pmsa_nat_bits, &old_sw->r6, &old_unat); | ||
833 | copy_reg(&ms->pmsa_gr[7-1], ms->pmsa_nat_bits, &old_sw->r7, &old_unat); | ||
834 | old_sw->b0 = (u64)ia64_leave_kernel; | ||
835 | old_sw->b1 = ms->pmsa_br1; | ||
836 | old_sw->ar_pfs = 0; | ||
837 | old_sw->ar_unat = old_unat; | ||
838 | old_sw->pr = old_regs->pr | (1UL << PRED_NON_SYSCALL); | ||
839 | previous_current->thread.ksp = (u64)p - 16; | ||
840 | |||
841 | /* Finally copy the original stack's registers back to its RBS. | ||
842 | * Registers from ar.bspstore through ar.bsp at the time of the event | ||
843 | * are in the current RBS, copy them back to the original stack. The | ||
844 | * copy must be done register by register because the original bspstore | ||
845 | * and the current one have different alignments, so the saved RNAT | ||
846 | * data occurs at different places. | ||
847 | * | ||
848 | * mca_asm does cover, so the old_bsp already includes all registers at | ||
849 | * the time of MCA/INIT. It also does flushrs, so all registers before | ||
850 | * this function have been written to backing store on the MCA/INIT | ||
851 | * stack. | ||
852 | */ | ||
853 | new_rnat = ia64_get_rnat(ia64_rse_rnat_addr(new_bspstore)); | ||
854 | old_rnat = regs->ar_rnat; | ||
855 | while (slots--) { | ||
856 | if (ia64_rse_is_rnat_slot(new_bspstore)) { | ||
857 | new_rnat = ia64_get_rnat(new_bspstore++); | ||
858 | } | ||
859 | if (ia64_rse_is_rnat_slot(old_bspstore)) { | ||
860 | *old_bspstore++ = old_rnat; | ||
861 | old_rnat = 0; | ||
862 | } | ||
863 | nat = (new_rnat >> ia64_rse_slot_num(new_bspstore)) & 1UL; | ||
864 | old_rnat &= ~(1UL << ia64_rse_slot_num(old_bspstore)); | ||
865 | old_rnat |= (nat << ia64_rse_slot_num(old_bspstore)); | ||
866 | *old_bspstore++ = *new_bspstore++; | ||
867 | } | ||
868 | old_sw->ar_bspstore = (unsigned long)old_bspstore; | ||
869 | old_sw->ar_rnat = old_rnat; | ||
870 | |||
871 | sos->prev_task = previous_current; | ||
872 | return previous_current; | ||
873 | |||
874 | no_mod: | ||
875 | printk(KERN_INFO "cpu %d, %s %s, original stack not modified\n", | ||
876 | smp_processor_id(), type, msg); | ||
877 | return previous_current; | ||
878 | } | ||
879 | |||
880 | /* The monarch/slave interaction is based on monarch_cpu and requires that all | ||
881 | * slaves have entered rendezvous before the monarch leaves. If any cpu has | ||
882 | * not entered rendezvous yet then wait a bit. The assumption is that any | ||
883 | * slave that has not rendezvoused after a reasonable time is never going to do | ||
884 | * so. In this context, slave includes cpus that respond to the MCA rendezvous | ||
885 | * interrupt, as well as cpus that receive the INIT slave event. | ||
886 | */ | ||
887 | |||
888 | static void | ||
889 | ia64_wait_for_slaves(int monarch) | ||
890 | { | ||
891 | int c, wait = 0; | ||
892 | for_each_online_cpu(c) { | ||
893 | if (c == monarch) | ||
894 | continue; | ||
895 | if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) { | ||
896 | udelay(1000); /* short wait first */ | ||
897 | wait = 1; | ||
898 | break; | ||
899 | } | ||
900 | } | ||
901 | if (!wait) | ||
902 | return; | ||
903 | for_each_online_cpu(c) { | ||
904 | if (c == monarch) | ||
905 | continue; | ||
906 | if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) { | ||
907 | udelay(5*1000000); /* wait 5 seconds for slaves (arbitrary) */ | ||
908 | break; | ||
909 | } | ||
910 | } | ||
911 | } | ||
912 | |||
857 | /* | 913 | /* |
858 | * ia64_mca_ucmc_handler | 914 | * ia64_mca_handler |
859 | * | 915 | * |
860 | * This is uncorrectable machine check handler called from OS_MCA | 916 | * This is uncorrectable machine check handler called from OS_MCA |
861 | * dispatch code which is in turn called from SAL_CHECK(). | 917 | * dispatch code which is in turn called from SAL_CHECK(). |
@@ -866,16 +922,28 @@ EXPORT_SYMBOL(ia64_unreg_MCA_extension); | |||
866 | * further MCA logging is enabled by clearing logs. | 922 | * further MCA logging is enabled by clearing logs. |
867 | * Monarch also has the duty of sending wakeup-IPIs to pull the | 923 | * Monarch also has the duty of sending wakeup-IPIs to pull the |
868 | * slave processors out of rendezvous spinloop. | 924 | * slave processors out of rendezvous spinloop. |
869 | * | ||
870 | * Inputs : None | ||
871 | * Outputs : None | ||
872 | */ | 925 | */ |
873 | void | 926 | void |
874 | ia64_mca_ucmc_handler(void) | 927 | ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, |
928 | struct ia64_sal_os_state *sos) | ||
875 | { | 929 | { |
876 | pal_processor_state_info_t *psp = (pal_processor_state_info_t *) | 930 | pal_processor_state_info_t *psp = (pal_processor_state_info_t *) |
877 | &ia64_sal_to_os_handoff_state.proc_state_param; | 931 | &sos->proc_state_param; |
878 | int recover; | 932 | int recover, cpu = smp_processor_id(); |
933 | task_t *previous_current; | ||
934 | |||
935 | oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */ | ||
936 | previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA"); | ||
937 | monarch_cpu = cpu; | ||
938 | ia64_wait_for_slaves(cpu); | ||
939 | |||
940 | /* Wakeup all the processors which are spinning in the rendezvous loop. | ||
941 | * They will leave SAL, then spin in the OS with interrupts disabled | ||
942 | * until this monarch cpu leaves the MCA handler. That gets control | ||
943 | * back to the OS so we can backtrace the other cpus, backtrace when | ||
944 | * spinning in SAL does not work. | ||
945 | */ | ||
946 | ia64_mca_wakeup_all(); | ||
879 | 947 | ||
880 | /* Get the MCA error record and log it */ | 948 | /* Get the MCA error record and log it */ |
881 | ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA); | 949 | ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA); |
@@ -883,25 +951,20 @@ ia64_mca_ucmc_handler(void) | |||
883 | /* TLB error is only exist in this SAL error record */ | 951 | /* TLB error is only exist in this SAL error record */ |
884 | recover = (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc)) | 952 | recover = (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc)) |
885 | /* other error recovery */ | 953 | /* other error recovery */ |
886 | || (ia64_mca_ucmc_extension | 954 | || (ia64_mca_ucmc_extension |
887 | && ia64_mca_ucmc_extension( | 955 | && ia64_mca_ucmc_extension( |
888 | IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA), | 956 | IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA), |
889 | &ia64_sal_to_os_handoff_state, | 957 | sos)); |
890 | &ia64_os_to_sal_handoff_state)); | ||
891 | 958 | ||
892 | if (recover) { | 959 | if (recover) { |
893 | sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA); | 960 | sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA); |
894 | rh->severity = sal_log_severity_corrected; | 961 | rh->severity = sal_log_severity_corrected; |
895 | ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA); | 962 | ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA); |
963 | sos->os_status = IA64_MCA_CORRECTED; | ||
896 | } | 964 | } |
897 | /* | ||
898 | * Wakeup all the processors which are spinning in the rendezvous | ||
899 | * loop. | ||
900 | */ | ||
901 | ia64_mca_wakeup_all(); | ||
902 | 965 | ||
903 | /* Return to SAL */ | 966 | set_curr_task(cpu, previous_current); |
904 | ia64_return_to_sal_check(recover); | 967 | monarch_cpu = -1; |
905 | } | 968 | } |
906 | 969 | ||
907 | static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL); | 970 | static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL); |
@@ -1125,34 +1188,114 @@ ia64_mca_cpe_poll (unsigned long dummy) | |||
1125 | /* | 1188 | /* |
1126 | * C portion of the OS INIT handler | 1189 | * C portion of the OS INIT handler |
1127 | * | 1190 | * |
1128 | * Called from ia64_monarch_init_handler | 1191 | * Called from ia64_os_init_dispatch |
1129 | * | ||
1130 | * Inputs: pointer to pt_regs where processor info was saved. | ||
1131 | * | 1192 | * |
1132 | * Returns: | 1193 | * Inputs: pointer to pt_regs where processor info was saved. SAL/OS state for |
1133 | * 0 if SAL must warm boot the System | 1194 | * this event. This code is used for both monarch and slave INIT events, see |
1134 | * 1 if SAL must return to interrupted context using PAL_MC_RESUME | 1195 | * sos->monarch. |
1135 | * | 1196 | * |
1197 | * All INIT events switch to the INIT stack and change the previous process to | ||
1198 | * blocked status. If one of the INIT events is the monarch then we are | ||
1199 | * probably processing the nmi button/command. Use the monarch cpu to dump all | ||
1200 | * the processes. The slave INIT events all spin until the monarch cpu | ||
1201 | * returns. We can also get INIT slave events for MCA, in which case the MCA | ||
1202 | * process is the monarch. | ||
1136 | */ | 1203 | */ |
1204 | |||
1137 | void | 1205 | void |
1138 | ia64_init_handler (struct pt_regs *pt, struct switch_stack *sw) | 1206 | ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, |
1207 | struct ia64_sal_os_state *sos) | ||
1139 | { | 1208 | { |
1140 | pal_min_state_area_t *ms; | 1209 | static atomic_t slaves; |
1210 | static atomic_t monarchs; | ||
1211 | task_t *previous_current; | ||
1212 | int cpu = smp_processor_id(), c; | ||
1213 | struct task_struct *g, *t; | ||
1141 | 1214 | ||
1142 | oops_in_progress = 1; /* avoid deadlock in printk, but it makes recovery dodgy */ | 1215 | oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */ |
1143 | console_loglevel = 15; /* make sure printks make it to console */ | 1216 | console_loglevel = 15; /* make sure printks make it to console */ |
1144 | 1217 | ||
1145 | printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n", | 1218 | printk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n", |
1146 | ia64_sal_to_os_handoff_state.proc_state_param); | 1219 | sos->proc_state_param, cpu, sos->monarch); |
1220 | salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0); | ||
1147 | 1221 | ||
1148 | /* | 1222 | previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "INIT"); |
1149 | * Address of minstate area provided by PAL is physical, | 1223 | sos->os_status = IA64_INIT_RESUME; |
1150 | * uncacheable (bit 63 set). Convert to Linux virtual | 1224 | |
1151 | * address in region 6. | 1225 | /* FIXME: Workaround for broken proms that drive all INIT events as |
1226 | * slaves. The last slave that enters is promoted to be a monarch. | ||
1227 | * Remove this code in September 2006, that gives platforms a year to | ||
1228 | * fix their proms and get their customers updated. | ||
1152 | */ | 1229 | */ |
1153 | ms = (pal_min_state_area_t *)(ia64_sal_to_os_handoff_state.pal_min_state | (6ul<<61)); | 1230 | if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) { |
1231 | printk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n", | ||
1232 | __FUNCTION__, cpu); | ||
1233 | atomic_dec(&slaves); | ||
1234 | sos->monarch = 1; | ||
1235 | } | ||
1236 | |||
1237 | /* FIXME: Workaround for broken proms that drive all INIT events as | ||
1238 | * monarchs. Second and subsequent monarchs are demoted to slaves. | ||
1239 | * Remove this code in September 2006, that gives platforms a year to | ||
1240 | * fix their proms and get their customers updated. | ||
1241 | */ | ||
1242 | if (sos->monarch && atomic_add_return(1, &monarchs) > 1) { | ||
1243 | printk(KERN_WARNING "%s: Demoting cpu %d to slave.\n", | ||
1244 | __FUNCTION__, cpu); | ||
1245 | atomic_dec(&monarchs); | ||
1246 | sos->monarch = 0; | ||
1247 | } | ||
1154 | 1248 | ||
1155 | init_handler_platform(ms, pt, sw); /* call platform specific routines */ | 1249 | if (!sos->monarch) { |
1250 | ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT; | ||
1251 | while (monarch_cpu == -1) | ||
1252 | cpu_relax(); /* spin until monarch enters */ | ||
1253 | while (monarch_cpu != -1) | ||
1254 | cpu_relax(); /* spin until monarch leaves */ | ||
1255 | printk("Slave on cpu %d returning to normal service.\n", cpu); | ||
1256 | set_curr_task(cpu, previous_current); | ||
1257 | ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; | ||
1258 | atomic_dec(&slaves); | ||
1259 | return; | ||
1260 | } | ||
1261 | |||
1262 | monarch_cpu = cpu; | ||
1263 | |||
1264 | /* | ||
1265 | * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be | ||
1266 | * generated via the BMC's command-line interface, but since the console is on the | ||
1267 | * same serial line, the user will need some time to switch out of the BMC before | ||
1268 | * the dump begins. | ||
1269 | */ | ||
1270 | printk("Delaying for 5 seconds...\n"); | ||
1271 | udelay(5*1000000); | ||
1272 | ia64_wait_for_slaves(cpu); | ||
1273 | printk(KERN_ERR "Processes interrupted by INIT -"); | ||
1274 | for_each_online_cpu(c) { | ||
1275 | struct ia64_sal_os_state *s; | ||
1276 | t = __va(__per_cpu_mca[c] + IA64_MCA_CPU_INIT_STACK_OFFSET); | ||
1277 | s = (struct ia64_sal_os_state *)((char *)t + MCA_SOS_OFFSET); | ||
1278 | g = s->prev_task; | ||
1279 | if (g) { | ||
1280 | if (g->pid) | ||
1281 | printk(" %d", g->pid); | ||
1282 | else | ||
1283 | printk(" %d (cpu %d task 0x%p)", g->pid, task_cpu(g), g); | ||
1284 | } | ||
1285 | } | ||
1286 | printk("\n\n"); | ||
1287 | if (read_trylock(&tasklist_lock)) { | ||
1288 | do_each_thread (g, t) { | ||
1289 | printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm); | ||
1290 | show_stack(t, NULL); | ||
1291 | } while_each_thread (g, t); | ||
1292 | read_unlock(&tasklist_lock); | ||
1293 | } | ||
1294 | printk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); | ||
1295 | atomic_dec(&monarchs); | ||
1296 | set_curr_task(cpu, previous_current); | ||
1297 | monarch_cpu = -1; | ||
1298 | return; | ||
1156 | } | 1299 | } |
1157 | 1300 | ||
1158 | static int __init | 1301 | static int __init |
@@ -1202,6 +1345,34 @@ static struct irqaction mca_cpep_irqaction = { | |||
1202 | }; | 1345 | }; |
1203 | #endif /* CONFIG_ACPI */ | 1346 | #endif /* CONFIG_ACPI */ |
1204 | 1347 | ||
1348 | /* Minimal format of the MCA/INIT stacks. The pseudo processes that run on | ||
1349 | * these stacks can never sleep, they cannot return from the kernel to user | ||
1350 | * space, they do not appear in a normal ps listing. So there is no need to | ||
1351 | * format most of the fields. | ||
1352 | */ | ||
1353 | |||
1354 | static void | ||
1355 | format_mca_init_stack(void *mca_data, unsigned long offset, | ||
1356 | const char *type, int cpu) | ||
1357 | { | ||
1358 | struct task_struct *p = (struct task_struct *)((char *)mca_data + offset); | ||
1359 | struct thread_info *ti; | ||
1360 | memset(p, 0, KERNEL_STACK_SIZE); | ||
1361 | ti = (struct thread_info *)((char *)p + IA64_TASK_SIZE); | ||
1362 | ti->flags = _TIF_MCA_INIT; | ||
1363 | ti->preempt_count = 1; | ||
1364 | ti->task = p; | ||
1365 | ti->cpu = cpu; | ||
1366 | p->thread_info = ti; | ||
1367 | p->state = TASK_UNINTERRUPTIBLE; | ||
1368 | __set_bit(cpu, &p->cpus_allowed); | ||
1369 | INIT_LIST_HEAD(&p->tasks); | ||
1370 | p->parent = p->real_parent = p->group_leader = p; | ||
1371 | INIT_LIST_HEAD(&p->children); | ||
1372 | INIT_LIST_HEAD(&p->sibling); | ||
1373 | strncpy(p->comm, type, sizeof(p->comm)-1); | ||
1374 | } | ||
1375 | |||
1205 | /* Do per-CPU MCA-related initialization. */ | 1376 | /* Do per-CPU MCA-related initialization. */ |
1206 | 1377 | ||
1207 | void __devinit | 1378 | void __devinit |
@@ -1214,19 +1385,28 @@ ia64_mca_cpu_init(void *cpu_data) | |||
1214 | int cpu; | 1385 | int cpu; |
1215 | 1386 | ||
1216 | mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu) | 1387 | mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu) |
1217 | * NR_CPUS); | 1388 | * NR_CPUS + KERNEL_STACK_SIZE); |
1389 | mca_data = (void *)(((unsigned long)mca_data + | ||
1390 | KERNEL_STACK_SIZE - 1) & | ||
1391 | (-KERNEL_STACK_SIZE)); | ||
1218 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 1392 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
1393 | format_mca_init_stack(mca_data, | ||
1394 | offsetof(struct ia64_mca_cpu, mca_stack), | ||
1395 | "MCA", cpu); | ||
1396 | format_mca_init_stack(mca_data, | ||
1397 | offsetof(struct ia64_mca_cpu, init_stack), | ||
1398 | "INIT", cpu); | ||
1219 | __per_cpu_mca[cpu] = __pa(mca_data); | 1399 | __per_cpu_mca[cpu] = __pa(mca_data); |
1220 | mca_data += sizeof(struct ia64_mca_cpu); | 1400 | mca_data += sizeof(struct ia64_mca_cpu); |
1221 | } | 1401 | } |
1222 | } | 1402 | } |
1223 | 1403 | ||
1224 | /* | 1404 | /* |
1225 | * The MCA info structure was allocated earlier and its | 1405 | * The MCA info structure was allocated earlier and its |
1226 | * physical address saved in __per_cpu_mca[cpu]. Copy that | 1406 | * physical address saved in __per_cpu_mca[cpu]. Copy that |
1227 | * address * to ia64_mca_data so we can access it as a per-CPU | 1407 | * address * to ia64_mca_data so we can access it as a per-CPU |
1228 | * variable. | 1408 | * variable. |
1229 | */ | 1409 | */ |
1230 | __get_cpu_var(ia64_mca_data) = __per_cpu_mca[smp_processor_id()]; | 1410 | __get_cpu_var(ia64_mca_data) = __per_cpu_mca[smp_processor_id()]; |
1231 | 1411 | ||
1232 | /* | 1412 | /* |
@@ -1236,11 +1416,11 @@ ia64_mca_cpu_init(void *cpu_data) | |||
1236 | __get_cpu_var(ia64_mca_per_cpu_pte) = | 1416 | __get_cpu_var(ia64_mca_per_cpu_pte) = |
1237 | pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL)); | 1417 | pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL)); |
1238 | 1418 | ||
1239 | /* | 1419 | /* |
1240 | * Also, stash away a copy of the PAL address and the PTE | 1420 | * Also, stash away a copy of the PAL address and the PTE |
1241 | * needed to map it. | 1421 | * needed to map it. |
1242 | */ | 1422 | */ |
1243 | pal_vaddr = efi_get_pal_addr(); | 1423 | pal_vaddr = efi_get_pal_addr(); |
1244 | if (!pal_vaddr) | 1424 | if (!pal_vaddr) |
1245 | return; | 1425 | return; |
1246 | __get_cpu_var(ia64_mca_pal_base) = | 1426 | __get_cpu_var(ia64_mca_pal_base) = |
@@ -1272,8 +1452,8 @@ ia64_mca_cpu_init(void *cpu_data) | |||
1272 | void __init | 1452 | void __init |
1273 | ia64_mca_init(void) | 1453 | ia64_mca_init(void) |
1274 | { | 1454 | { |
1275 | ia64_fptr_t *mon_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler; | 1455 | ia64_fptr_t *init_hldlr_ptr_monarch = (ia64_fptr_t *)ia64_os_init_dispatch_monarch; |
1276 | ia64_fptr_t *slave_init_ptr = (ia64_fptr_t *)ia64_slave_init_handler; | 1456 | ia64_fptr_t *init_hldlr_ptr_slave = (ia64_fptr_t *)ia64_os_init_dispatch_slave; |
1277 | ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch; | 1457 | ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch; |
1278 | int i; | 1458 | int i; |
1279 | s64 rc; | 1459 | s64 rc; |
@@ -1351,9 +1531,9 @@ ia64_mca_init(void) | |||
1351 | * XXX - disable SAL checksum by setting size to 0, should be | 1531 | * XXX - disable SAL checksum by setting size to 0, should be |
1352 | * size of the actual init handler in mca_asm.S. | 1532 | * size of the actual init handler in mca_asm.S. |
1353 | */ | 1533 | */ |
1354 | ia64_mc_info.imi_monarch_init_handler = ia64_tpa(mon_init_ptr->fp); | 1534 | ia64_mc_info.imi_monarch_init_handler = ia64_tpa(init_hldlr_ptr_monarch->fp); |
1355 | ia64_mc_info.imi_monarch_init_handler_size = 0; | 1535 | ia64_mc_info.imi_monarch_init_handler_size = 0; |
1356 | ia64_mc_info.imi_slave_init_handler = ia64_tpa(slave_init_ptr->fp); | 1536 | ia64_mc_info.imi_slave_init_handler = ia64_tpa(init_hldlr_ptr_slave->fp); |
1357 | ia64_mc_info.imi_slave_init_handler_size = 0; | 1537 | ia64_mc_info.imi_slave_init_handler_size = 0; |
1358 | 1538 | ||
1359 | IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__, | 1539 | IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__, |
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S index ef3fd7265b67..499a065f4e60 100644 --- a/arch/ia64/kernel/mca_asm.S +++ b/arch/ia64/kernel/mca_asm.S | |||
@@ -16,6 +16,9 @@ | |||
16 | // 04/11/12 Russ Anderson <rja@sgi.com> | 16 | // 04/11/12 Russ Anderson <rja@sgi.com> |
17 | // Added per cpu MCA/INIT stack save areas. | 17 | // Added per cpu MCA/INIT stack save areas. |
18 | // | 18 | // |
19 | // 12/08/05 Keith Owens <kaos@sgi.com> | ||
20 | // Use per cpu MCA/INIT stacks for all data. | ||
21 | // | ||
19 | #include <linux/config.h> | 22 | #include <linux/config.h> |
20 | #include <linux/threads.h> | 23 | #include <linux/threads.h> |
21 | 24 | ||
@@ -25,96 +28,23 @@ | |||
25 | #include <asm/mca_asm.h> | 28 | #include <asm/mca_asm.h> |
26 | #include <asm/mca.h> | 29 | #include <asm/mca.h> |
27 | 30 | ||
28 | /* | 31 | #include "entry.h" |
29 | * When we get a machine check, the kernel stack pointer is no longer | ||
30 | * valid, so we need to set a new stack pointer. | ||
31 | */ | ||
32 | #define MINSTATE_PHYS /* Make sure stack access is physical for MINSTATE */ | ||
33 | |||
34 | /* | ||
35 | * Needed for return context to SAL | ||
36 | */ | ||
37 | #define IA64_MCA_SAME_CONTEXT 0 | ||
38 | #define IA64_MCA_COLD_BOOT -2 | ||
39 | |||
40 | #include "minstate.h" | ||
41 | |||
42 | /* | ||
43 | * SAL_TO_OS_MCA_HANDOFF_STATE (SAL 3.0 spec) | ||
44 | * 1. GR1 = OS GP | ||
45 | * 2. GR8 = PAL_PROC physical address | ||
46 | * 3. GR9 = SAL_PROC physical address | ||
47 | * 4. GR10 = SAL GP (physical) | ||
48 | * 5. GR11 = Rendez state | ||
49 | * 6. GR12 = Return address to location within SAL_CHECK | ||
50 | */ | ||
51 | #define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp) \ | ||
52 | LOAD_PHYSICAL(p0, _tmp, ia64_sal_to_os_handoff_state);; \ | ||
53 | st8 [_tmp]=r1,0x08;; \ | ||
54 | st8 [_tmp]=r8,0x08;; \ | ||
55 | st8 [_tmp]=r9,0x08;; \ | ||
56 | st8 [_tmp]=r10,0x08;; \ | ||
57 | st8 [_tmp]=r11,0x08;; \ | ||
58 | st8 [_tmp]=r12,0x08;; \ | ||
59 | st8 [_tmp]=r17,0x08;; \ | ||
60 | st8 [_tmp]=r18,0x08 | ||
61 | |||
62 | /* | ||
63 | * OS_MCA_TO_SAL_HANDOFF_STATE (SAL 3.0 spec) | ||
64 | * (p6) is executed if we never entered virtual mode (TLB error) | ||
65 | * (p7) is executed if we entered virtual mode as expected (normal case) | ||
66 | * 1. GR8 = OS_MCA return status | ||
67 | * 2. GR9 = SAL GP (physical) | ||
68 | * 3. GR10 = 0/1 returning same/new context | ||
69 | * 4. GR22 = New min state save area pointer | ||
70 | * returns ptr to SAL rtn save loc in _tmp | ||
71 | */ | ||
72 | #define OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(_tmp) \ | ||
73 | movl _tmp=ia64_os_to_sal_handoff_state;; \ | ||
74 | DATA_VA_TO_PA(_tmp);; \ | ||
75 | ld8 r8=[_tmp],0x08;; \ | ||
76 | ld8 r9=[_tmp],0x08;; \ | ||
77 | ld8 r10=[_tmp],0x08;; \ | ||
78 | ld8 r22=[_tmp],0x08;; | ||
79 | // now _tmp is pointing to SAL rtn save location | ||
80 | |||
81 | /* | ||
82 | * COLD_BOOT_HANDOFF_STATE() sets ia64_mca_os_to_sal_state | ||
83 | * imots_os_status=IA64_MCA_COLD_BOOT | ||
84 | * imots_sal_gp=SAL GP | ||
85 | * imots_context=IA64_MCA_SAME_CONTEXT | ||
86 | * imots_new_min_state=Min state save area pointer | ||
87 | * imots_sal_check_ra=Return address to location within SAL_CHECK | ||
88 | * | ||
89 | */ | ||
90 | #define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\ | ||
91 | movl tmp=IA64_MCA_COLD_BOOT; \ | ||
92 | movl sal_to_os_handoff=__pa(ia64_sal_to_os_handoff_state); \ | ||
93 | movl os_to_sal_handoff=__pa(ia64_os_to_sal_handoff_state);; \ | ||
94 | st8 [os_to_sal_handoff]=tmp,8;; \ | ||
95 | ld8 tmp=[sal_to_os_handoff],48;; \ | ||
96 | st8 [os_to_sal_handoff]=tmp,8;; \ | ||
97 | movl tmp=IA64_MCA_SAME_CONTEXT;; \ | ||
98 | st8 [os_to_sal_handoff]=tmp,8;; \ | ||
99 | ld8 tmp=[sal_to_os_handoff],-8;; \ | ||
100 | st8 [os_to_sal_handoff]=tmp,8;; \ | ||
101 | ld8 tmp=[sal_to_os_handoff];; \ | ||
102 | st8 [os_to_sal_handoff]=tmp;; | ||
103 | 32 | ||
104 | #define GET_IA64_MCA_DATA(reg) \ | 33 | #define GET_IA64_MCA_DATA(reg) \ |
105 | GET_THIS_PADDR(reg, ia64_mca_data) \ | 34 | GET_THIS_PADDR(reg, ia64_mca_data) \ |
106 | ;; \ | 35 | ;; \ |
107 | ld8 reg=[reg] | 36 | ld8 reg=[reg] |
108 | 37 | ||
109 | .global ia64_os_mca_dispatch | ||
110 | .global ia64_os_mca_dispatch_end | ||
111 | .global ia64_sal_to_os_handoff_state | ||
112 | .global ia64_os_to_sal_handoff_state | ||
113 | .global ia64_do_tlb_purge | 38 | .global ia64_do_tlb_purge |
39 | .global ia64_os_mca_dispatch | ||
40 | .global ia64_os_init_dispatch_monarch | ||
41 | .global ia64_os_init_dispatch_slave | ||
114 | 42 | ||
115 | .text | 43 | .text |
116 | .align 16 | 44 | .align 16 |
117 | 45 | ||
46 | //StartMain//////////////////////////////////////////////////////////////////// | ||
47 | |||
118 | /* | 48 | /* |
119 | * Just the TLB purge part is moved to a separate function | 49 | * Just the TLB purge part is moved to a separate function |
120 | * so we can re-use the code for cpu hotplug code as well | 50 | * so we can re-use the code for cpu hotplug code as well |
@@ -207,34 +137,31 @@ ia64_do_tlb_purge: | |||
207 | br.sptk.many b1 | 137 | br.sptk.many b1 |
208 | ;; | 138 | ;; |
209 | 139 | ||
210 | ia64_os_mca_dispatch: | 140 | //EndMain////////////////////////////////////////////////////////////////////// |
141 | |||
142 | //StartMain//////////////////////////////////////////////////////////////////// | ||
211 | 143 | ||
144 | ia64_os_mca_dispatch: | ||
212 | // Serialize all MCA processing | 145 | // Serialize all MCA processing |
213 | mov r3=1;; | 146 | mov r3=1;; |
214 | LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);; | 147 | LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);; |
215 | ia64_os_mca_spin: | 148 | ia64_os_mca_spin: |
216 | xchg8 r4=[r2],r3;; | 149 | xchg4 r4=[r2],r3;; |
217 | cmp.ne p6,p0=r4,r0 | 150 | cmp.ne p6,p0=r4,r0 |
218 | (p6) br ia64_os_mca_spin | 151 | (p6) br ia64_os_mca_spin |
219 | 152 | ||
220 | // Save the SAL to OS MCA handoff state as defined | 153 | mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack |
221 | // by SAL SPEC 3.0 | 154 | LOAD_PHYSICAL(p0,r2,1f) // return address |
222 | // NOTE : The order in which the state gets saved | 155 | mov r19=1 // All MCA events are treated as monarch (for now) |
223 | // is dependent on the way the C-structure | 156 | br.sptk ia64_state_save // save the state that is not in minstate |
224 | // for ia64_mca_sal_to_os_state_t has been | 157 | 1: |
225 | // defined in include/asm/mca.h | ||
226 | SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2) | ||
227 | ;; | ||
228 | |||
229 | // LOG PROCESSOR STATE INFO FROM HERE ON.. | ||
230 | begin_os_mca_dump: | ||
231 | br ia64_os_mca_proc_state_dump;; | ||
232 | 158 | ||
233 | ia64_os_mca_done_dump: | 159 | GET_IA64_MCA_DATA(r2) |
234 | 160 | // Using MCA stack, struct ia64_sal_os_state, variable proc_state_param | |
235 | LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56) | 161 | ;; |
162 | add r3=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET+IA64_SAL_OS_STATE_PROC_STATE_PARAM_OFFSET, r2 | ||
236 | ;; | 163 | ;; |
237 | ld8 r18=[r16] // Get processor state parameter on existing PALE_CHECK. | 164 | ld8 r18=[r3] // Get processor state parameter on existing PALE_CHECK. |
238 | ;; | 165 | ;; |
239 | tbit.nz p6,p7=r18,60 | 166 | tbit.nz p6,p7=r18,60 |
240 | (p7) br.spnt done_tlb_purge_and_reload | 167 | (p7) br.spnt done_tlb_purge_and_reload |
@@ -323,624 +250,775 @@ ia64_reload_tr: | |||
323 | itr.d dtr[r20]=r16 | 250 | itr.d dtr[r20]=r16 |
324 | ;; | 251 | ;; |
325 | srlz.d | 252 | srlz.d |
326 | ;; | ||
327 | br.sptk.many done_tlb_purge_and_reload | ||
328 | err: | ||
329 | COLD_BOOT_HANDOFF_STATE(r20,r21,r22) | ||
330 | br.sptk.many ia64_os_mca_done_restore | ||
331 | 253 | ||
332 | done_tlb_purge_and_reload: | 254 | done_tlb_purge_and_reload: |
333 | 255 | ||
334 | // Setup new stack frame for OS_MCA handling | 256 | // switch to per cpu MCA stack |
335 | GET_IA64_MCA_DATA(r2) | 257 | mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack |
336 | ;; | 258 | LOAD_PHYSICAL(p0,r2,1f) // return address |
337 | add r3 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2 | 259 | br.sptk ia64_new_stack |
338 | add r2 = IA64_MCA_CPU_RBSTORE_OFFSET, r2 | 260 | 1: |
339 | ;; | 261 | |
340 | rse_switch_context(r6,r3,r2);; // RSC management in this new context | 262 | // everything saved, now we can set the kernel registers |
263 | mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack | ||
264 | LOAD_PHYSICAL(p0,r2,1f) // return address | ||
265 | br.sptk ia64_set_kernel_registers | ||
266 | 1: | ||
341 | 267 | ||
268 | // This must be done in physical mode | ||
342 | GET_IA64_MCA_DATA(r2) | 269 | GET_IA64_MCA_DATA(r2) |
343 | ;; | 270 | ;; |
344 | add r2 = IA64_MCA_CPU_STACK_OFFSET+IA64_MCA_STACK_SIZE-16, r2 | 271 | mov r7=r2 |
345 | ;; | ||
346 | mov r12=r2 // establish new stack-pointer | ||
347 | 272 | ||
348 | // Enter virtual mode from physical mode | 273 | // Enter virtual mode from physical mode |
349 | VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4) | 274 | VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4) |
350 | ia64_os_mca_virtual_begin: | 275 | |
276 | // This code returns to SAL via SOS r2, in general SAL has no unwind | ||
277 | // data. To get a clean termination when backtracing the C MCA/INIT | ||
278 | // handler, set a dummy return address of 0 in this routine. That | ||
279 | // requires that ia64_os_mca_virtual_begin be a global function. | ||
280 | ENTRY(ia64_os_mca_virtual_begin) | ||
281 | .prologue | ||
282 | .save rp,r0 | ||
283 | .body | ||
284 | |||
285 | mov ar.rsc=3 // set eager mode for C handler | ||
286 | mov r2=r7 // see GET_IA64_MCA_DATA above | ||
287 | ;; | ||
351 | 288 | ||
352 | // Call virtual mode handler | 289 | // Call virtual mode handler |
353 | movl r2=ia64_mca_ucmc_handler;; | 290 | alloc r14=ar.pfs,0,0,3,0 |
354 | mov b6=r2;; | 291 | ;; |
355 | br.call.sptk.many b0=b6;; | 292 | DATA_PA_TO_VA(r2,r7) |
356 | .ret0: | 293 | ;; |
294 | add out0=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2 | ||
295 | add out1=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2 | ||
296 | add out2=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET, r2 | ||
297 | br.call.sptk.many b0=ia64_mca_handler | ||
298 | |||
357 | // Revert back to physical mode before going back to SAL | 299 | // Revert back to physical mode before going back to SAL |
358 | PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4) | 300 | PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4) |
359 | ia64_os_mca_virtual_end: | 301 | ia64_os_mca_virtual_end: |
360 | 302 | ||
361 | // restore the original stack frame here | 303 | END(ia64_os_mca_virtual_begin) |
304 | |||
305 | // switch back to previous stack | ||
306 | alloc r14=ar.pfs,0,0,0,0 // remove the MCA handler frame | ||
307 | mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack | ||
308 | LOAD_PHYSICAL(p0,r2,1f) // return address | ||
309 | br.sptk ia64_old_stack | ||
310 | 1: | ||
311 | |||
312 | mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack | ||
313 | LOAD_PHYSICAL(p0,r2,1f) // return address | ||
314 | br.sptk ia64_state_restore // restore the SAL state | ||
315 | 1: | ||
316 | |||
317 | mov b0=r12 // SAL_CHECK return address | ||
318 | |||
319 | // release lock | ||
320 | LOAD_PHYSICAL(p0,r3,ia64_mca_serialize);; | ||
321 | st4.rel [r3]=r0 | ||
322 | |||
323 | br b0 | ||
324 | |||
325 | //EndMain////////////////////////////////////////////////////////////////////// | ||
326 | |||
327 | //StartMain//////////////////////////////////////////////////////////////////// | ||
328 | |||
329 | // | ||
330 | // SAL to OS entry point for INIT on all processors. This has been defined for | ||
331 | // registration purposes with SAL as a part of ia64_mca_init. Monarch and | ||
332 | // slave INIT have identical processing, except for the value of the | ||
333 | // sos->monarch flag in r19. | ||
334 | // | ||
335 | |||
336 | ia64_os_init_dispatch_monarch: | ||
337 | mov r19=1 // Bow, bow, ye lower middle classes! | ||
338 | br.sptk ia64_os_init_dispatch | ||
339 | |||
340 | ia64_os_init_dispatch_slave: | ||
341 | mov r19=0 // <igor>yeth, mathter</igor> | ||
342 | |||
343 | ia64_os_init_dispatch: | ||
344 | |||
345 | mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack | ||
346 | LOAD_PHYSICAL(p0,r2,1f) // return address | ||
347 | br.sptk ia64_state_save // save the state that is not in minstate | ||
348 | 1: | ||
349 | |||
350 | // switch to per cpu INIT stack | ||
351 | mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack | ||
352 | LOAD_PHYSICAL(p0,r2,1f) // return address | ||
353 | br.sptk ia64_new_stack | ||
354 | 1: | ||
355 | |||
356 | // everything saved, now we can set the kernel registers | ||
357 | mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack | ||
358 | LOAD_PHYSICAL(p0,r2,1f) // return address | ||
359 | br.sptk ia64_set_kernel_registers | ||
360 | 1: | ||
361 | |||
362 | // This must be done in physical mode | ||
362 | GET_IA64_MCA_DATA(r2) | 363 | GET_IA64_MCA_DATA(r2) |
363 | ;; | 364 | ;; |
364 | add r2 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2 | 365 | mov r7=r2 |
365 | ;; | 366 | |
366 | movl r4=IA64_PSR_MC | 367 | // Enter virtual mode from physical mode |
368 | VIRTUAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_begin, r4) | ||
369 | |||
370 | // This code returns to SAL via SOS r2, in general SAL has no unwind | ||
371 | // data. To get a clean termination when backtracing the C MCA/INIT | ||
372 | // handler, set a dummy return address of 0 in this routine. That | ||
373 | // requires that ia64_os_init_virtual_begin be a global function. | ||
374 | ENTRY(ia64_os_init_virtual_begin) | ||
375 | .prologue | ||
376 | .save rp,r0 | ||
377 | .body | ||
378 | |||
379 | mov ar.rsc=3 // set eager mode for C handler | ||
380 | mov r2=r7 // see GET_IA64_MCA_DATA above | ||
367 | ;; | 381 | ;; |
368 | rse_return_context(r4,r3,r2) // switch from interrupt context for RSE | ||
369 | 382 | ||
370 | // let us restore all the registers from our PSI structure | 383 | // Call virtual mode handler |
371 | mov r8=gp | 384 | alloc r14=ar.pfs,0,0,3,0 |
385 | ;; | ||
386 | DATA_PA_TO_VA(r2,r7) | ||
372 | ;; | 387 | ;; |
373 | begin_os_mca_restore: | 388 | add out0=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2 |
374 | br ia64_os_mca_proc_state_restore;; | 389 | add out1=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2 |
390 | add out2=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SOS_OFFSET, r2 | ||
391 | br.call.sptk.many b0=ia64_init_handler | ||
375 | 392 | ||
376 | ia64_os_mca_done_restore: | 393 | // Revert back to physical mode before going back to SAL |
377 | OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(r2);; | 394 | PHYSICAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_end, r4) |
378 | // branch back to SALE_CHECK | 395 | ia64_os_init_virtual_end: |
379 | ld8 r3=[r2];; | ||
380 | mov b0=r3;; // SAL_CHECK return address | ||
381 | 396 | ||
382 | // release lock | 397 | END(ia64_os_init_virtual_begin) |
383 | movl r3=ia64_mca_serialize;; | 398 | |
384 | DATA_VA_TO_PA(r3);; | 399 | mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack |
385 | st8.rel [r3]=r0 | 400 | LOAD_PHYSICAL(p0,r2,1f) // return address |
401 | br.sptk ia64_state_restore // restore the SAL state | ||
402 | 1: | ||
386 | 403 | ||
404 | // switch back to previous stack | ||
405 | alloc r14=ar.pfs,0,0,0,0 // remove the INIT handler frame | ||
406 | mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack | ||
407 | LOAD_PHYSICAL(p0,r2,1f) // return address | ||
408 | br.sptk ia64_old_stack | ||
409 | 1: | ||
410 | |||
411 | mov b0=r12 // SAL_CHECK return address | ||
387 | br b0 | 412 | br b0 |
388 | ;; | 413 | |
389 | ia64_os_mca_dispatch_end: | ||
390 | //EndMain////////////////////////////////////////////////////////////////////// | 414 | //EndMain////////////////////////////////////////////////////////////////////// |
391 | 415 | ||
416 | // common defines for the stubs | ||
417 | #define ms r4 | ||
418 | #define regs r5 | ||
419 | #define temp1 r2 /* careful, it overlaps with input registers */ | ||
420 | #define temp2 r3 /* careful, it overlaps with input registers */ | ||
421 | #define temp3 r7 | ||
422 | #define temp4 r14 | ||
423 | |||
392 | 424 | ||
393 | //++ | 425 | //++ |
394 | // Name: | 426 | // Name: |
395 | // ia64_os_mca_proc_state_dump() | 427 | // ia64_state_save() |
396 | // | 428 | // |
397 | // Stub Description: | 429 | // Stub Description: |
398 | // | 430 | // |
399 | // This stub dumps the processor state during MCHK to a data area | 431 | // Save the state that is not in minstate. This is sensitive to the layout of |
432 | // struct ia64_sal_os_state in mca.h. | ||
433 | // | ||
434 | // r2 contains the return address, r3 contains either | ||
435 | // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. | ||
436 | // | ||
437 | // The OS to SAL section of struct ia64_sal_os_state is set to a default | ||
438 | // value of cold boot (MCA) or warm boot (INIT) and return to the same | ||
439 | // context. ia64_sal_os_state is also used to hold some registers that | ||
440 | // need to be saved and restored across the stack switches. | ||
441 | // | ||
442 | // Most input registers to this stub come from PAL/SAL | ||
443 | // r1 os gp, physical | ||
444 | // r8 pal_proc entry point | ||
445 | // r9 sal_proc entry point | ||
446 | // r10 sal gp | ||
447 | // r11 MCA - rendevzous state, INIT - reason code | ||
448 | // r12 sal return address | ||
449 | // r17 pal min_state | ||
450 | // r18 processor state parameter | ||
451 | // r19 monarch flag, set by the caller of this routine | ||
452 | // | ||
453 | // In addition to the SAL to OS state, this routine saves all the | ||
454 | // registers that appear in struct pt_regs and struct switch_stack, | ||
455 | // excluding those that are already in the PAL minstate area. This | ||
456 | // results in a partial pt_regs and switch_stack, the C code copies the | ||
457 | // remaining registers from PAL minstate to pt_regs and switch_stack. The | ||
458 | // resulting structures contain all the state of the original process when | ||
459 | // MCA/INIT occurred. | ||
400 | // | 460 | // |
401 | //-- | 461 | //-- |
402 | 462 | ||
403 | ia64_os_mca_proc_state_dump: | 463 | ia64_state_save: |
404 | // Save bank 1 GRs 16-31 which will be used by c-language code when we switch | 464 | add regs=MCA_SOS_OFFSET, r3 |
405 | // to virtual addressing mode. | 465 | add ms=MCA_SOS_OFFSET+8, r3 |
406 | GET_IA64_MCA_DATA(r2) | 466 | mov b0=r2 // save return address |
467 | cmp.eq p1,p2=IA64_MCA_CPU_MCA_STACK_OFFSET, r3 | ||
468 | ;; | ||
469 | GET_IA64_MCA_DATA(temp2) | ||
470 | ;; | ||
471 | add temp1=temp2, regs // struct ia64_sal_os_state on MCA or INIT stack | ||
472 | add temp2=temp2, ms // struct ia64_sal_os_state+8 on MCA or INIT stack | ||
473 | ;; | ||
474 | mov regs=temp1 // save the start of sos | ||
475 | st8 [temp1]=r1,16 // os_gp | ||
476 | st8 [temp2]=r8,16 // pal_proc | ||
477 | ;; | ||
478 | st8 [temp1]=r9,16 // sal_proc | ||
479 | st8 [temp2]=r11,16 // rv_rc | ||
480 | mov r11=cr.iipa | ||
481 | ;; | ||
482 | st8 [temp1]=r18,16 // proc_state_param | ||
483 | st8 [temp2]=r19,16 // monarch | ||
484 | mov r6=IA64_KR(CURRENT) | ||
485 | ;; | ||
486 | st8 [temp1]=r12,16 // sal_ra | ||
487 | st8 [temp2]=r10,16 // sal_gp | ||
488 | mov r12=cr.isr | ||
489 | ;; | ||
490 | st8 [temp1]=r17,16 // pal_min_state | ||
491 | st8 [temp2]=r6,16 // prev_IA64_KR_CURRENT | ||
492 | mov r6=cr.ifa | ||
493 | ;; | ||
494 | st8 [temp1]=r0,16 // prev_task, starts off as NULL | ||
495 | st8 [temp2]=r12,16 // cr.isr | ||
496 | mov r12=cr.itir | ||
497 | ;; | ||
498 | st8 [temp1]=r6,16 // cr.ifa | ||
499 | st8 [temp2]=r12,16 // cr.itir | ||
500 | mov r12=cr.iim | ||
501 | ;; | ||
502 | st8 [temp1]=r11,16 // cr.iipa | ||
503 | st8 [temp2]=r12,16 // cr.iim | ||
504 | mov r6=cr.iha | ||
505 | (p1) mov r12=IA64_MCA_COLD_BOOT | ||
506 | (p2) mov r12=IA64_INIT_WARM_BOOT | ||
507 | ;; | ||
508 | st8 [temp1]=r6,16 // cr.iha | ||
509 | st8 [temp2]=r12 // os_status, default is cold boot | ||
510 | mov r6=IA64_MCA_SAME_CONTEXT | ||
511 | ;; | ||
512 | st8 [temp1]=r6 // context, default is same context | ||
513 | |||
514 | // Save the pt_regs data that is not in minstate. The previous code | ||
515 | // left regs at sos. | ||
516 | add regs=MCA_PT_REGS_OFFSET-MCA_SOS_OFFSET, regs | ||
517 | ;; | ||
518 | add temp1=PT(B6), regs | ||
519 | mov temp3=b6 | ||
520 | mov temp4=b7 | ||
521 | add temp2=PT(B7), regs | ||
522 | ;; | ||
523 | st8 [temp1]=temp3,PT(AR_CSD)-PT(B6) // save b6 | ||
524 | st8 [temp2]=temp4,PT(AR_SSD)-PT(B7) // save b7 | ||
525 | mov temp3=ar.csd | ||
526 | mov temp4=ar.ssd | ||
527 | cover // must be last in group | ||
407 | ;; | 528 | ;; |
408 | add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2 | 529 | st8 [temp1]=temp3,PT(AR_UNAT)-PT(AR_CSD) // save ar.csd |
409 | ;; | 530 | st8 [temp2]=temp4,PT(AR_PFS)-PT(AR_SSD) // save ar.ssd |
410 | // save ar.NaT | 531 | mov temp3=ar.unat |
411 | mov r5=ar.unat // ar.unat | 532 | mov temp4=ar.pfs |
412 | 533 | ;; | |
413 | // save banked GRs 16-31 along with NaT bits | 534 | st8 [temp1]=temp3,PT(AR_RNAT)-PT(AR_UNAT) // save ar.unat |
414 | bsw.1;; | 535 | st8 [temp2]=temp4,PT(AR_BSPSTORE)-PT(AR_PFS) // save ar.pfs |
415 | st8.spill [r2]=r16,8;; | 536 | mov temp3=ar.rnat |
416 | st8.spill [r2]=r17,8;; | 537 | mov temp4=ar.bspstore |
417 | st8.spill [r2]=r18,8;; | 538 | ;; |
418 | st8.spill [r2]=r19,8;; | 539 | st8 [temp1]=temp3,PT(LOADRS)-PT(AR_RNAT) // save ar.rnat |
419 | st8.spill [r2]=r20,8;; | 540 | st8 [temp2]=temp4,PT(AR_FPSR)-PT(AR_BSPSTORE) // save ar.bspstore |
420 | st8.spill [r2]=r21,8;; | 541 | mov temp3=ar.bsp |
421 | st8.spill [r2]=r22,8;; | 542 | ;; |
422 | st8.spill [r2]=r23,8;; | 543 | sub temp3=temp3, temp4 // ar.bsp - ar.bspstore |
423 | st8.spill [r2]=r24,8;; | 544 | mov temp4=ar.fpsr |
424 | st8.spill [r2]=r25,8;; | 545 | ;; |
425 | st8.spill [r2]=r26,8;; | 546 | shl temp3=temp3,16 // compute ar.rsc to be used for "loadrs" |
426 | st8.spill [r2]=r27,8;; | 547 | ;; |
427 | st8.spill [r2]=r28,8;; | 548 | st8 [temp1]=temp3,PT(AR_CCV)-PT(LOADRS) // save loadrs |
428 | st8.spill [r2]=r29,8;; | 549 | st8 [temp2]=temp4,PT(F6)-PT(AR_FPSR) // save ar.fpsr |
429 | st8.spill [r2]=r30,8;; | 550 | mov temp3=ar.ccv |
430 | st8.spill [r2]=r31,8;; | 551 | ;; |
431 | 552 | st8 [temp1]=temp3,PT(F7)-PT(AR_CCV) // save ar.ccv | |
432 | mov r4=ar.unat;; | 553 | stf.spill [temp2]=f6,PT(F8)-PT(F6) |
433 | st8 [r2]=r4,8 // save User NaT bits for r16-r31 | 554 | ;; |
434 | mov ar.unat=r5 // restore original unat | 555 | stf.spill [temp1]=f7,PT(F9)-PT(F7) |
435 | bsw.0;; | 556 | stf.spill [temp2]=f8,PT(F10)-PT(F8) |
436 | 557 | ;; | |
437 | //save BRs | 558 | stf.spill [temp1]=f9,PT(F11)-PT(F9) |
438 | add r4=8,r2 // duplicate r2 in r4 | 559 | stf.spill [temp2]=f10 |
439 | add r6=2*8,r2 // duplicate r2 in r4 | 560 | ;; |
440 | 561 | stf.spill [temp1]=f11 | |
441 | mov r3=b0 | 562 | |
442 | mov r5=b1 | 563 | // Save the switch_stack data that is not in minstate nor pt_regs. The |
443 | mov r7=b2;; | 564 | // previous code left regs at pt_regs. |
444 | st8 [r2]=r3,3*8 | 565 | add regs=MCA_SWITCH_STACK_OFFSET-MCA_PT_REGS_OFFSET, regs |
445 | st8 [r4]=r5,3*8 | 566 | ;; |
446 | st8 [r6]=r7,3*8;; | 567 | add temp1=SW(F2), regs |
447 | 568 | add temp2=SW(F3), regs | |
448 | mov r3=b3 | 569 | ;; |
449 | mov r5=b4 | 570 | stf.spill [temp1]=f2,32 |
450 | mov r7=b5;; | 571 | stf.spill [temp2]=f3,32 |
451 | st8 [r2]=r3,3*8 | 572 | ;; |
452 | st8 [r4]=r5,3*8 | 573 | stf.spill [temp1]=f4,32 |
453 | st8 [r6]=r7,3*8;; | 574 | stf.spill [temp2]=f5,32 |
454 | 575 | ;; | |
455 | mov r3=b6 | 576 | stf.spill [temp1]=f12,32 |
456 | mov r5=b7;; | 577 | stf.spill [temp2]=f13,32 |
457 | st8 [r2]=r3,2*8 | 578 | ;; |
458 | st8 [r4]=r5,2*8;; | 579 | stf.spill [temp1]=f14,32 |
459 | 580 | stf.spill [temp2]=f15,32 | |
460 | cSaveCRs: | 581 | ;; |
461 | // save CRs | 582 | stf.spill [temp1]=f16,32 |
462 | add r4=8,r2 // duplicate r2 in r4 | 583 | stf.spill [temp2]=f17,32 |
463 | add r6=2*8,r2 // duplicate r2 in r4 | 584 | ;; |
464 | 585 | stf.spill [temp1]=f18,32 | |
465 | mov r3=cr.dcr | 586 | stf.spill [temp2]=f19,32 |
466 | mov r5=cr.itm | 587 | ;; |
467 | mov r7=cr.iva;; | 588 | stf.spill [temp1]=f20,32 |
468 | 589 | stf.spill [temp2]=f21,32 | |
469 | st8 [r2]=r3,8*8 | 590 | ;; |
470 | st8 [r4]=r5,3*8 | 591 | stf.spill [temp1]=f22,32 |
471 | st8 [r6]=r7,3*8;; // 48 byte rements | 592 | stf.spill [temp2]=f23,32 |
472 | 593 | ;; | |
473 | mov r3=cr.pta;; | 594 | stf.spill [temp1]=f24,32 |
474 | st8 [r2]=r3,8*8;; // 64 byte rements | 595 | stf.spill [temp2]=f25,32 |
475 | 596 | ;; | |
476 | // if PSR.ic=0, reading interruption registers causes an illegal operation fault | 597 | stf.spill [temp1]=f26,32 |
477 | mov r3=psr;; | 598 | stf.spill [temp2]=f27,32 |
478 | tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test | 599 | ;; |
479 | (p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc. | 600 | stf.spill [temp1]=f28,32 |
480 | begin_skip_intr_regs: | 601 | stf.spill [temp2]=f29,32 |
481 | (p6) br SkipIntrRegs;; | 602 | ;; |
482 | 603 | stf.spill [temp1]=f30,SW(B2)-SW(F30) | |
483 | add r4=8,r2 // duplicate r2 in r4 | 604 | stf.spill [temp2]=f31,SW(B3)-SW(F31) |
484 | add r6=2*8,r2 // duplicate r2 in r6 | 605 | mov temp3=b2 |
485 | 606 | mov temp4=b3 | |
486 | mov r3=cr.ipsr | 607 | ;; |
487 | mov r5=cr.isr | 608 | st8 [temp1]=temp3,16 // save b2 |
488 | mov r7=r0;; | 609 | st8 [temp2]=temp4,16 // save b3 |
489 | st8 [r2]=r3,3*8 | 610 | mov temp3=b4 |
490 | st8 [r4]=r5,3*8 | 611 | mov temp4=b5 |
491 | st8 [r6]=r7,3*8;; | 612 | ;; |
492 | 613 | st8 [temp1]=temp3,SW(AR_LC)-SW(B4) // save b4 | |
493 | mov r3=cr.iip | 614 | st8 [temp2]=temp4 // save b5 |
494 | mov r5=cr.ifa | 615 | mov temp3=ar.lc |
495 | mov r7=cr.itir;; | 616 | ;; |
496 | st8 [r2]=r3,3*8 | 617 | st8 [temp1]=temp3 // save ar.lc |
497 | st8 [r4]=r5,3*8 | 618 | |
498 | st8 [r6]=r7,3*8;; | 619 | // FIXME: Some proms are incorrectly accessing the minstate area as |
499 | 620 | // cached data. The C code uses region 6, uncached virtual. Ensure | |
500 | mov r3=cr.iipa | 621 | // that there is no cache data lying around for the first 1K of the |
501 | mov r5=cr.ifs | 622 | // minstate area. |
502 | mov r7=cr.iim;; | 623 | // Remove this code in September 2006, that gives platforms a year to |
503 | st8 [r2]=r3,3*8 | 624 | // fix their proms and get their customers updated. |
504 | st8 [r4]=r5,3*8 | 625 | |
505 | st8 [r6]=r7,3*8;; | 626 | add r1=32*1,r17 |
506 | 627 | add r2=32*2,r17 | |
507 | mov r3=cr25;; // cr.iha | 628 | add r3=32*3,r17 |
508 | st8 [r2]=r3,160;; // 160 byte rement | 629 | add r4=32*4,r17 |
509 | 630 | add r5=32*5,r17 | |
510 | SkipIntrRegs: | 631 | add r6=32*6,r17 |
511 | st8 [r2]=r0,152;; // another 152 byte . | 632 | add r7=32*7,r17 |
512 | 633 | ;; | |
513 | add r4=8,r2 // duplicate r2 in r4 | 634 | fc r17 |
514 | add r6=2*8,r2 // duplicate r2 in r6 | 635 | fc r1 |
515 | 636 | fc r2 | |
516 | mov r3=cr.lid | 637 | fc r3 |
517 | // mov r5=cr.ivr // cr.ivr, don't read it | 638 | fc r4 |
518 | mov r7=cr.tpr;; | 639 | fc r5 |
519 | st8 [r2]=r3,3*8 | 640 | fc r6 |
520 | st8 [r4]=r5,3*8 | 641 | fc r7 |
521 | st8 [r6]=r7,3*8;; | 642 | add r17=32*8,r17 |
522 | 643 | add r1=32*8,r1 | |
523 | mov r3=r0 // cr.eoi => cr67 | 644 | add r2=32*8,r2 |
524 | mov r5=r0 // cr.irr0 => cr68 | 645 | add r3=32*8,r3 |
525 | mov r7=r0;; // cr.irr1 => cr69 | 646 | add r4=32*8,r4 |
526 | st8 [r2]=r3,3*8 | 647 | add r5=32*8,r5 |
527 | st8 [r4]=r5,3*8 | 648 | add r6=32*8,r6 |
528 | st8 [r6]=r7,3*8;; | 649 | add r7=32*8,r7 |
529 | 650 | ;; | |
530 | mov r3=r0 // cr.irr2 => cr70 | 651 | fc r17 |
531 | mov r5=r0 // cr.irr3 => cr71 | 652 | fc r1 |
532 | mov r7=cr.itv;; | 653 | fc r2 |
533 | st8 [r2]=r3,3*8 | 654 | fc r3 |
534 | st8 [r4]=r5,3*8 | 655 | fc r4 |
535 | st8 [r6]=r7,3*8;; | 656 | fc r5 |
536 | 657 | fc r6 | |
537 | mov r3=cr.pmv | 658 | fc r7 |
538 | mov r5=cr.cmcv;; | 659 | add r17=32*8,r17 |
539 | st8 [r2]=r3,7*8 | 660 | add r1=32*8,r1 |
540 | st8 [r4]=r5,7*8;; | 661 | add r2=32*8,r2 |
541 | 662 | add r3=32*8,r3 | |
542 | mov r3=r0 // cr.lrr0 => cr80 | 663 | add r4=32*8,r4 |
543 | mov r5=r0;; // cr.lrr1 => cr81 | 664 | add r5=32*8,r5 |
544 | st8 [r2]=r3,23*8 | 665 | add r6=32*8,r6 |
545 | st8 [r4]=r5,23*8;; | 666 | add r7=32*8,r7 |
546 | 667 | ;; | |
547 | adds r2=25*8,r2;; | 668 | fc r17 |
548 | 669 | fc r1 | |
549 | cSaveARs: | 670 | fc r2 |
550 | // save ARs | 671 | fc r3 |
551 | add r4=8,r2 // duplicate r2 in r4 | 672 | fc r4 |
552 | add r6=2*8,r2 // duplicate r2 in r6 | 673 | fc r5 |
553 | 674 | fc r6 | |
554 | mov r3=ar.k0 | 675 | fc r7 |
555 | mov r5=ar.k1 | 676 | add r17=32*8,r17 |
556 | mov r7=ar.k2;; | 677 | add r1=32*8,r1 |
557 | st8 [r2]=r3,3*8 | 678 | add r2=32*8,r2 |
558 | st8 [r4]=r5,3*8 | 679 | add r3=32*8,r3 |
559 | st8 [r6]=r7,3*8;; | 680 | add r4=32*8,r4 |
560 | 681 | add r5=32*8,r5 | |
561 | mov r3=ar.k3 | 682 | add r6=32*8,r6 |
562 | mov r5=ar.k4 | 683 | add r7=32*8,r7 |
563 | mov r7=ar.k5;; | 684 | ;; |
564 | st8 [r2]=r3,3*8 | 685 | fc r17 |
565 | st8 [r4]=r5,3*8 | 686 | fc r1 |
566 | st8 [r6]=r7,3*8;; | 687 | fc r2 |
567 | 688 | fc r3 | |
568 | mov r3=ar.k6 | 689 | fc r4 |
569 | mov r5=ar.k7 | 690 | fc r5 |
570 | mov r7=r0;; // ar.kr8 | 691 | fc r6 |
571 | st8 [r2]=r3,10*8 | 692 | fc r7 |
572 | st8 [r4]=r5,10*8 | 693 | |
573 | st8 [r6]=r7,10*8;; // rement by 72 bytes | 694 | br.sptk b0 |
574 | |||
575 | mov r3=ar.rsc | ||
576 | mov ar.rsc=r0 // put RSE in enforced lazy mode | ||
577 | mov r5=ar.bsp | ||
578 | ;; | ||
579 | mov r7=ar.bspstore;; | ||
580 | st8 [r2]=r3,3*8 | ||
581 | st8 [r4]=r5,3*8 | ||
582 | st8 [r6]=r7,3*8;; | ||
583 | |||
584 | mov r3=ar.rnat;; | ||
585 | st8 [r2]=r3,8*13 // increment by 13x8 bytes | ||
586 | |||
587 | mov r3=ar.ccv;; | ||
588 | st8 [r2]=r3,8*4 | ||
589 | |||
590 | mov r3=ar.unat;; | ||
591 | st8 [r2]=r3,8*4 | ||
592 | |||
593 | mov r3=ar.fpsr;; | ||
594 | st8 [r2]=r3,8*4 | ||
595 | |||
596 | mov r3=ar.itc;; | ||
597 | st8 [r2]=r3,160 // 160 | ||
598 | |||
599 | mov r3=ar.pfs;; | ||
600 | st8 [r2]=r3,8 | ||
601 | |||
602 | mov r3=ar.lc;; | ||
603 | st8 [r2]=r3,8 | ||
604 | |||
605 | mov r3=ar.ec;; | ||
606 | st8 [r2]=r3 | ||
607 | add r2=8*62,r2 //padding | ||
608 | |||
609 | // save RRs | ||
610 | mov ar.lc=0x08-1 | ||
611 | movl r4=0x00;; | ||
612 | |||
613 | cStRR: | ||
614 | dep.z r5=r4,61,3;; | ||
615 | mov r3=rr[r5];; | ||
616 | st8 [r2]=r3,8 | ||
617 | add r4=1,r4 | ||
618 | br.cloop.sptk.few cStRR | ||
619 | ;; | ||
620 | end_os_mca_dump: | ||
621 | br ia64_os_mca_done_dump;; | ||
622 | 695 | ||
623 | //EndStub////////////////////////////////////////////////////////////////////// | 696 | //EndStub////////////////////////////////////////////////////////////////////// |
624 | 697 | ||
625 | 698 | ||
626 | //++ | 699 | //++ |
627 | // Name: | 700 | // Name: |
628 | // ia64_os_mca_proc_state_restore() | 701 | // ia64_state_restore() |
629 | // | 702 | // |
630 | // Stub Description: | 703 | // Stub Description: |
631 | // | 704 | // |
632 | // This is a stub to restore the saved processor state during MCHK | 705 | // Restore the SAL/OS state. This is sensitive to the layout of struct |
706 | // ia64_sal_os_state in mca.h. | ||
707 | // | ||
708 | // r2 contains the return address, r3 contains either | ||
709 | // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. | ||
710 | // | ||
711 | // In addition to the SAL to OS state, this routine restores all the | ||
712 | // registers that appear in struct pt_regs and struct switch_stack, | ||
713 | // excluding those in the PAL minstate area. | ||
633 | // | 714 | // |
634 | //-- | 715 | //-- |
635 | 716 | ||
636 | ia64_os_mca_proc_state_restore: | 717 | ia64_state_restore: |
718 | // Restore the switch_stack data that is not in minstate nor pt_regs. | ||
719 | add regs=MCA_SWITCH_STACK_OFFSET, r3 | ||
720 | mov b0=r2 // save return address | ||
721 | ;; | ||
722 | GET_IA64_MCA_DATA(temp2) | ||
723 | ;; | ||
724 | add regs=temp2, regs | ||
725 | ;; | ||
726 | add temp1=SW(F2), regs | ||
727 | add temp2=SW(F3), regs | ||
728 | ;; | ||
729 | ldf.fill f2=[temp1],32 | ||
730 | ldf.fill f3=[temp2],32 | ||
731 | ;; | ||
732 | ldf.fill f4=[temp1],32 | ||
733 | ldf.fill f5=[temp2],32 | ||
734 | ;; | ||
735 | ldf.fill f12=[temp1],32 | ||
736 | ldf.fill f13=[temp2],32 | ||
737 | ;; | ||
738 | ldf.fill f14=[temp1],32 | ||
739 | ldf.fill f15=[temp2],32 | ||
740 | ;; | ||
741 | ldf.fill f16=[temp1],32 | ||
742 | ldf.fill f17=[temp2],32 | ||
743 | ;; | ||
744 | ldf.fill f18=[temp1],32 | ||
745 | ldf.fill f19=[temp2],32 | ||
746 | ;; | ||
747 | ldf.fill f20=[temp1],32 | ||
748 | ldf.fill f21=[temp2],32 | ||
749 | ;; | ||
750 | ldf.fill f22=[temp1],32 | ||
751 | ldf.fill f23=[temp2],32 | ||
752 | ;; | ||
753 | ldf.fill f24=[temp1],32 | ||
754 | ldf.fill f25=[temp2],32 | ||
755 | ;; | ||
756 | ldf.fill f26=[temp1],32 | ||
757 | ldf.fill f27=[temp2],32 | ||
758 | ;; | ||
759 | ldf.fill f28=[temp1],32 | ||
760 | ldf.fill f29=[temp2],32 | ||
761 | ;; | ||
762 | ldf.fill f30=[temp1],SW(B2)-SW(F30) | ||
763 | ldf.fill f31=[temp2],SW(B3)-SW(F31) | ||
764 | ;; | ||
765 | ld8 temp3=[temp1],16 // restore b2 | ||
766 | ld8 temp4=[temp2],16 // restore b3 | ||
767 | ;; | ||
768 | mov b2=temp3 | ||
769 | mov b3=temp4 | ||
770 | ld8 temp3=[temp1],SW(AR_LC)-SW(B4) // restore b4 | ||
771 | ld8 temp4=[temp2] // restore b5 | ||
772 | ;; | ||
773 | mov b4=temp3 | ||
774 | mov b5=temp4 | ||
775 | ld8 temp3=[temp1] // restore ar.lc | ||
776 | ;; | ||
777 | mov ar.lc=temp3 | ||
637 | 778 | ||
638 | // Restore bank1 GR16-31 | 779 | // Restore the pt_regs data that is not in minstate. The previous code |
639 | GET_IA64_MCA_DATA(r2) | 780 | // left regs at switch_stack. |
781 | add regs=MCA_PT_REGS_OFFSET-MCA_SWITCH_STACK_OFFSET, regs | ||
782 | ;; | ||
783 | add temp1=PT(B6), regs | ||
784 | add temp2=PT(B7), regs | ||
785 | ;; | ||
786 | ld8 temp3=[temp1],PT(AR_CSD)-PT(B6) // restore b6 | ||
787 | ld8 temp4=[temp2],PT(AR_SSD)-PT(B7) // restore b7 | ||
788 | ;; | ||
789 | mov b6=temp3 | ||
790 | mov b7=temp4 | ||
791 | ld8 temp3=[temp1],PT(AR_UNAT)-PT(AR_CSD) // restore ar.csd | ||
792 | ld8 temp4=[temp2],PT(AR_PFS)-PT(AR_SSD) // restore ar.ssd | ||
793 | ;; | ||
794 | mov ar.csd=temp3 | ||
795 | mov ar.ssd=temp4 | ||
796 | ld8 temp3=[temp1] // restore ar.unat | ||
797 | add temp1=PT(AR_CCV)-PT(AR_UNAT), temp1 | ||
798 | ld8 temp4=[temp2],PT(AR_FPSR)-PT(AR_PFS) // restore ar.pfs | ||
799 | ;; | ||
800 | mov ar.unat=temp3 | ||
801 | mov ar.pfs=temp4 | ||
802 | // ar.rnat, ar.bspstore, loadrs are restore in ia64_old_stack. | ||
803 | ld8 temp3=[temp1],PT(F6)-PT(AR_CCV) // restore ar.ccv | ||
804 | ld8 temp4=[temp2],PT(F7)-PT(AR_FPSR) // restore ar.fpsr | ||
805 | ;; | ||
806 | mov ar.ccv=temp3 | ||
807 | mov ar.fpsr=temp4 | ||
808 | ldf.fill f6=[temp1],PT(F8)-PT(F6) | ||
809 | ldf.fill f7=[temp2],PT(F9)-PT(F7) | ||
810 | ;; | ||
811 | ldf.fill f8=[temp1],PT(F10)-PT(F8) | ||
812 | ldf.fill f9=[temp2],PT(F11)-PT(F9) | ||
813 | ;; | ||
814 | ldf.fill f10=[temp1] | ||
815 | ldf.fill f11=[temp2] | ||
816 | |||
817 | // Restore the SAL to OS state. The previous code left regs at pt_regs. | ||
818 | add regs=MCA_SOS_OFFSET-MCA_PT_REGS_OFFSET, regs | ||
640 | ;; | 819 | ;; |
641 | add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2 | 820 | add temp1=IA64_SAL_OS_STATE_COMMON_OFFSET, regs |
642 | 821 | add temp2=IA64_SAL_OS_STATE_COMMON_OFFSET+8, regs | |
643 | restore_GRs: // restore bank-1 GRs 16-31 | 822 | ;; |
644 | bsw.1;; | 823 | ld8 r12=[temp1],16 // sal_ra |
645 | add r3=16*8,r2;; // to get to NaT of GR 16-31 | 824 | ld8 r9=[temp2],16 // sal_gp |
646 | ld8 r3=[r3];; | 825 | ;; |
647 | mov ar.unat=r3;; // first restore NaT | 826 | ld8 r22=[temp1],24 // pal_min_state, virtual. skip prev_task |
648 | 827 | ld8 r21=[temp2],16 // prev_IA64_KR_CURRENT | |
649 | ld8.fill r16=[r2],8;; | 828 | ;; |
650 | ld8.fill r17=[r2],8;; | 829 | ld8 temp3=[temp1],16 // cr.isr |
651 | ld8.fill r18=[r2],8;; | 830 | ld8 temp4=[temp2],16 // cr.ifa |
652 | ld8.fill r19=[r2],8;; | 831 | ;; |
653 | ld8.fill r20=[r2],8;; | 832 | mov cr.isr=temp3 |
654 | ld8.fill r21=[r2],8;; | 833 | mov cr.ifa=temp4 |
655 | ld8.fill r22=[r2],8;; | 834 | ld8 temp3=[temp1],16 // cr.itir |
656 | ld8.fill r23=[r2],8;; | 835 | ld8 temp4=[temp2],16 // cr.iipa |
657 | ld8.fill r24=[r2],8;; | 836 | ;; |
658 | ld8.fill r25=[r2],8;; | 837 | mov cr.itir=temp3 |
659 | ld8.fill r26=[r2],8;; | 838 | mov cr.iipa=temp4 |
660 | ld8.fill r27=[r2],8;; | 839 | ld8 temp3=[temp1],16 // cr.iim |
661 | ld8.fill r28=[r2],8;; | 840 | ld8 temp4=[temp2],16 // cr.iha |
662 | ld8.fill r29=[r2],8;; | 841 | ;; |
663 | ld8.fill r30=[r2],8;; | 842 | mov cr.iim=temp3 |
664 | ld8.fill r31=[r2],8;; | 843 | mov cr.iha=temp4 |
665 | 844 | dep r22=0,r22,62,2 // pal_min_state, physical, uncached | |
666 | ld8 r3=[r2],8;; // increment to skip NaT | 845 | mov IA64_KR(CURRENT)=r21 |
667 | bsw.0;; | 846 | ld8 r8=[temp1] // os_status |
668 | 847 | ld8 r10=[temp2] // context | |
669 | restore_BRs: | 848 | |
670 | add r4=8,r2 // duplicate r2 in r4 | 849 | br.sptk b0 |
671 | add r6=2*8,r2;; // duplicate r2 in r4 | ||
672 | |||
673 | ld8 r3=[r2],3*8 | ||
674 | ld8 r5=[r4],3*8 | ||
675 | ld8 r7=[r6],3*8;; | ||
676 | mov b0=r3 | ||
677 | mov b1=r5 | ||
678 | mov b2=r7;; | ||
679 | |||
680 | ld8 r3=[r2],3*8 | ||
681 | ld8 r5=[r4],3*8 | ||
682 | ld8 r7=[r6],3*8;; | ||
683 | mov b3=r3 | ||
684 | mov b4=r5 | ||
685 | mov b5=r7;; | ||
686 | |||
687 | ld8 r3=[r2],2*8 | ||
688 | ld8 r5=[r4],2*8;; | ||
689 | mov b6=r3 | ||
690 | mov b7=r5;; | ||
691 | |||
692 | restore_CRs: | ||
693 | add r4=8,r2 // duplicate r2 in r4 | ||
694 | add r6=2*8,r2;; // duplicate r2 in r4 | ||
695 | |||
696 | ld8 r3=[r2],8*8 | ||
697 | ld8 r5=[r4],3*8 | ||
698 | ld8 r7=[r6],3*8;; // 48 byte increments | ||
699 | mov cr.dcr=r3 | ||
700 | mov cr.itm=r5 | ||
701 | mov cr.iva=r7;; | ||
702 | |||
703 | ld8 r3=[r2],8*8;; // 64 byte increments | ||
704 | // mov cr.pta=r3 | ||
705 | |||
706 | |||
707 | // if PSR.ic=1, reading interruption registers causes an illegal operation fault | ||
708 | mov r3=psr;; | ||
709 | tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test | ||
710 | (p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc. | ||
711 | |||
712 | begin_rskip_intr_regs: | ||
713 | (p6) br rSkipIntrRegs;; | ||
714 | |||
715 | add r4=8,r2 // duplicate r2 in r4 | ||
716 | add r6=2*8,r2;; // duplicate r2 in r4 | ||
717 | |||
718 | ld8 r3=[r2],3*8 | ||
719 | ld8 r5=[r4],3*8 | ||
720 | ld8 r7=[r6],3*8;; | ||
721 | mov cr.ipsr=r3 | ||
722 | // mov cr.isr=r5 // cr.isr is read only | ||
723 | |||
724 | ld8 r3=[r2],3*8 | ||
725 | ld8 r5=[r4],3*8 | ||
726 | ld8 r7=[r6],3*8;; | ||
727 | mov cr.iip=r3 | ||
728 | mov cr.ifa=r5 | ||
729 | mov cr.itir=r7;; | ||
730 | |||
731 | ld8 r3=[r2],3*8 | ||
732 | ld8 r5=[r4],3*8 | ||
733 | ld8 r7=[r6],3*8;; | ||
734 | mov cr.iipa=r3 | ||
735 | mov cr.ifs=r5 | ||
736 | mov cr.iim=r7 | ||
737 | |||
738 | ld8 r3=[r2],160;; // 160 byte increment | ||
739 | mov cr.iha=r3 | ||
740 | |||
741 | rSkipIntrRegs: | ||
742 | ld8 r3=[r2],152;; // another 152 byte inc. | ||
743 | |||
744 | add r4=8,r2 // duplicate r2 in r4 | ||
745 | add r6=2*8,r2;; // duplicate r2 in r6 | ||
746 | |||
747 | ld8 r3=[r2],8*3 | ||
748 | ld8 r5=[r4],8*3 | ||
749 | ld8 r7=[r6],8*3;; | ||
750 | mov cr.lid=r3 | ||
751 | // mov cr.ivr=r5 // cr.ivr is read only | ||
752 | mov cr.tpr=r7;; | ||
753 | |||
754 | ld8 r3=[r2],8*3 | ||
755 | ld8 r5=[r4],8*3 | ||
756 | ld8 r7=[r6],8*3;; | ||
757 | // mov cr.eoi=r3 | ||
758 | // mov cr.irr0=r5 // cr.irr0 is read only | ||
759 | // mov cr.irr1=r7;; // cr.irr1 is read only | ||
760 | |||
761 | ld8 r3=[r2],8*3 | ||
762 | ld8 r5=[r4],8*3 | ||
763 | ld8 r7=[r6],8*3;; | ||
764 | // mov cr.irr2=r3 // cr.irr2 is read only | ||
765 | // mov cr.irr3=r5 // cr.irr3 is read only | ||
766 | mov cr.itv=r7;; | ||
767 | |||
768 | ld8 r3=[r2],8*7 | ||
769 | ld8 r5=[r4],8*7;; | ||
770 | mov cr.pmv=r3 | ||
771 | mov cr.cmcv=r5;; | ||
772 | |||
773 | ld8 r3=[r2],8*23 | ||
774 | ld8 r5=[r4],8*23;; | ||
775 | adds r2=8*23,r2 | ||
776 | adds r4=8*23,r4;; | ||
777 | // mov cr.lrr0=r3 | ||
778 | // mov cr.lrr1=r5 | ||
779 | |||
780 | adds r2=8*2,r2;; | ||
781 | |||
782 | restore_ARs: | ||
783 | add r4=8,r2 // duplicate r2 in r4 | ||
784 | add r6=2*8,r2;; // duplicate r2 in r4 | ||
785 | |||
786 | ld8 r3=[r2],3*8 | ||
787 | ld8 r5=[r4],3*8 | ||
788 | ld8 r7=[r6],3*8;; | ||
789 | mov ar.k0=r3 | ||
790 | mov ar.k1=r5 | ||
791 | mov ar.k2=r7;; | ||
792 | |||
793 | ld8 r3=[r2],3*8 | ||
794 | ld8 r5=[r4],3*8 | ||
795 | ld8 r7=[r6],3*8;; | ||
796 | mov ar.k3=r3 | ||
797 | mov ar.k4=r5 | ||
798 | mov ar.k5=r7;; | ||
799 | |||
800 | ld8 r3=[r2],10*8 | ||
801 | ld8 r5=[r4],10*8 | ||
802 | ld8 r7=[r6],10*8;; | ||
803 | mov ar.k6=r3 | ||
804 | mov ar.k7=r5 | ||
805 | ;; | ||
806 | |||
807 | ld8 r3=[r2],3*8 | ||
808 | ld8 r5=[r4],3*8 | ||
809 | ld8 r7=[r6],3*8;; | ||
810 | // mov ar.rsc=r3 | ||
811 | // mov ar.bsp=r5 // ar.bsp is read only | ||
812 | mov ar.rsc=r0 // make sure that RSE is in enforced lazy mode | ||
813 | ;; | ||
814 | mov ar.bspstore=r7;; | ||
815 | |||
816 | ld8 r9=[r2],8*13;; | ||
817 | mov ar.rnat=r9 | ||
818 | |||
819 | mov ar.rsc=r3 | ||
820 | ld8 r3=[r2],8*4;; | ||
821 | mov ar.ccv=r3 | ||
822 | |||
823 | ld8 r3=[r2],8*4;; | ||
824 | mov ar.unat=r3 | ||
825 | |||
826 | ld8 r3=[r2],8*4;; | ||
827 | mov ar.fpsr=r3 | ||
828 | |||
829 | ld8 r3=[r2],160;; // 160 | ||
830 | // mov ar.itc=r3 | ||
831 | |||
832 | ld8 r3=[r2],8;; | ||
833 | mov ar.pfs=r3 | ||
834 | |||
835 | ld8 r3=[r2],8;; | ||
836 | mov ar.lc=r3 | ||
837 | |||
838 | ld8 r3=[r2];; | ||
839 | mov ar.ec=r3 | ||
840 | add r2=8*62,r2;; // padding | ||
841 | |||
842 | restore_RRs: | ||
843 | mov r5=ar.lc | ||
844 | mov ar.lc=0x08-1 | ||
845 | movl r4=0x00;; | ||
846 | cStRRr: | ||
847 | dep.z r7=r4,61,3 | ||
848 | ld8 r3=[r2],8;; | ||
849 | mov rr[r7]=r3 // what are its access previledges? | ||
850 | add r4=1,r4 | ||
851 | br.cloop.sptk.few cStRRr | ||
852 | ;; | ||
853 | mov ar.lc=r5 | ||
854 | ;; | ||
855 | end_os_mca_restore: | ||
856 | br ia64_os_mca_done_restore;; | ||
857 | 850 | ||
858 | //EndStub////////////////////////////////////////////////////////////////////// | 851 | //EndStub////////////////////////////////////////////////////////////////////// |
859 | 852 | ||
860 | 853 | ||
861 | // ok, the issue here is that we need to save state information so | 854 | //++ |
862 | // it can be useable by the kernel debugger and show regs routines. | 855 | // Name: |
863 | // In order to do this, our best bet is save the current state (plus | 856 | // ia64_new_stack() |
864 | // the state information obtain from the MIN_STATE_AREA) into a pt_regs | ||
865 | // format. This way we can pass it on in a useable format. | ||
866 | // | 857 | // |
867 | 858 | // Stub Description: | |
868 | // | 859 | // |
869 | // SAL to OS entry point for INIT on the monarch processor | 860 | // Switch to the MCA/INIT stack. |
870 | // This has been defined for registration purposes with SAL | ||
871 | // as a part of ia64_mca_init. | ||
872 | // | 861 | // |
873 | // When we get here, the following registers have been | 862 | // r2 contains the return address, r3 contains either |
874 | // set by the SAL for our use | 863 | // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. |
875 | // | 864 | // |
876 | // 1. GR1 = OS INIT GP | 865 | // On entry RBS is still on the original stack, this routine switches RBS |
877 | // 2. GR8 = PAL_PROC physical address | 866 | // to use the MCA/INIT stack. |
878 | // 3. GR9 = SAL_PROC physical address | ||
879 | // 4. GR10 = SAL GP (physical) | ||
880 | // 5. GR11 = Init Reason | ||
881 | // 0 = Received INIT for event other than crash dump switch | ||
882 | // 1 = Received wakeup at the end of an OS_MCA corrected machine check | ||
883 | // 2 = Received INIT dude to CrashDump switch assertion | ||
884 | // | 867 | // |
885 | // 6. GR12 = Return address to location within SAL_INIT procedure | 868 | // On entry, sos->pal_min_state is physical, on exit it is virtual. |
886 | 869 | // | |
870 | //-- | ||
887 | 871 | ||
888 | GLOBAL_ENTRY(ia64_monarch_init_handler) | 872 | ia64_new_stack: |
889 | .prologue | 873 | add regs=MCA_PT_REGS_OFFSET, r3 |
890 | // stash the information the SAL passed to os | 874 | add temp2=MCA_SOS_OFFSET+IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET, r3 |
891 | SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2) | 875 | mov b0=r2 // save return address |
876 | GET_IA64_MCA_DATA(temp1) | ||
877 | invala | ||
892 | ;; | 878 | ;; |
893 | SAVE_MIN_WITH_COVER | 879 | add temp2=temp2, temp1 // struct ia64_sal_os_state.pal_min_state on MCA or INIT stack |
880 | add regs=regs, temp1 // struct pt_regs on MCA or INIT stack | ||
894 | ;; | 881 | ;; |
895 | mov r8=cr.ifa | 882 | // Address of minstate area provided by PAL is physical, uncacheable. |
896 | mov r9=cr.isr | 883 | // Convert to Linux virtual address in region 6 for C code. |
897 | adds r3=8,r2 // set up second base pointer | 884 | ld8 ms=[temp2] // pal_min_state, physical |
898 | ;; | 885 | ;; |
899 | SAVE_REST | 886 | dep temp1=-1,ms,62,2 // set region 6 |
900 | 887 | mov temp3=IA64_RBS_OFFSET-MCA_PT_REGS_OFFSET | |
901 | // ok, enough should be saved at this point to be dangerous, and supply | 888 | ;; |
902 | // information for a dump | 889 | st8 [temp2]=temp1 // pal_min_state, virtual |
903 | // We need to switch to Virtual mode before hitting the C functions. | ||
904 | 890 | ||
905 | movl r2=IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN | 891 | add temp4=temp3, regs // start of bspstore on new stack |
906 | mov r3=psr // get the current psr, minimum enabled at this point | ||
907 | ;; | 892 | ;; |
908 | or r2=r2,r3 | 893 | mov ar.bspstore=temp4 // switch RBS to MCA/INIT stack |
909 | ;; | 894 | ;; |
910 | movl r3=IVirtual_Switch | 895 | flushrs // must be first in group |
896 | br.sptk b0 | ||
897 | |||
898 | //EndStub////////////////////////////////////////////////////////////////////// | ||
899 | |||
900 | |||
901 | //++ | ||
902 | // Name: | ||
903 | // ia64_old_stack() | ||
904 | // | ||
905 | // Stub Description: | ||
906 | // | ||
907 | // Switch to the old stack. | ||
908 | // | ||
909 | // r2 contains the return address, r3 contains either | ||
910 | // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. | ||
911 | // | ||
912 | // On entry, pal_min_state is virtual, on exit it is physical. | ||
913 | // | ||
914 | // On entry RBS is on the MCA/INIT stack, this routine switches RBS | ||
915 | // back to the previous stack. | ||
916 | // | ||
917 | // The psr is set to all zeroes. SAL return requires either all zeroes or | ||
918 | // just psr.mc set. Leaving psr.mc off allows INIT to be issued if this | ||
919 | // code does not perform correctly. | ||
920 | // | ||
921 | // The dirty registers at the time of the event were flushed to the | ||
922 | // MCA/INIT stack in ia64_pt_regs_save(). Restore the dirty registers | ||
923 | // before reverting to the previous bspstore. | ||
924 | //-- | ||
925 | |||
926 | ia64_old_stack: | ||
927 | add regs=MCA_PT_REGS_OFFSET, r3 | ||
928 | mov b0=r2 // save return address | ||
929 | GET_IA64_MCA_DATA(temp2) | ||
930 | LOAD_PHYSICAL(p0,temp1,1f) | ||
911 | ;; | 931 | ;; |
912 | mov cr.iip=r3 // short return to set the appropriate bits | 932 | mov cr.ipsr=r0 |
913 | mov cr.ipsr=r2 // need to do an rfi to set appropriate bits | 933 | mov cr.ifs=r0 |
934 | mov cr.iip=temp1 | ||
914 | ;; | 935 | ;; |
936 | invala | ||
915 | rfi | 937 | rfi |
938 | 1: | ||
939 | |||
940 | add regs=regs, temp2 // struct pt_regs on MCA or INIT stack | ||
916 | ;; | 941 | ;; |
917 | IVirtual_Switch: | 942 | add temp1=PT(LOADRS), regs |
918 | // | ||
919 | // We should now be running virtual | ||
920 | // | ||
921 | // Let's call the C handler to get the rest of the state info | ||
922 | // | ||
923 | alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!) | ||
924 | ;; | 943 | ;; |
925 | adds out0=16,sp // out0 = pointer to pt_regs | 944 | ld8 temp2=[temp1],PT(AR_BSPSTORE)-PT(LOADRS) // restore loadrs |
926 | ;; | 945 | ;; |
927 | DO_SAVE_SWITCH_STACK | 946 | ld8 temp3=[temp1],PT(AR_RNAT)-PT(AR_BSPSTORE) // restore ar.bspstore |
928 | .body | 947 | mov ar.rsc=temp2 |
929 | adds out1=16,sp // out0 = pointer to switch_stack | 948 | ;; |
949 | loadrs | ||
950 | ld8 temp4=[temp1] // restore ar.rnat | ||
951 | ;; | ||
952 | mov ar.bspstore=temp3 // back to old stack | ||
953 | ;; | ||
954 | mov ar.rnat=temp4 | ||
955 | ;; | ||
956 | |||
957 | br.sptk b0 | ||
930 | 958 | ||
931 | br.call.sptk.many rp=ia64_init_handler | 959 | //EndStub////////////////////////////////////////////////////////////////////// |
932 | .ret1: | ||
933 | 960 | ||
934 | return_from_init: | ||
935 | br.sptk return_from_init | ||
936 | END(ia64_monarch_init_handler) | ||
937 | 961 | ||
962 | //++ | ||
963 | // Name: | ||
964 | // ia64_set_kernel_registers() | ||
938 | // | 965 | // |
939 | // SAL to OS entry point for INIT on the slave processor | 966 | // Stub Description: |
940 | // This has been defined for registration purposes with SAL | 967 | // |
941 | // as a part of ia64_mca_init. | 968 | // Set the registers that are required by the C code in order to run on an |
969 | // MCA/INIT stack. | ||
970 | // | ||
971 | // r2 contains the return address, r3 contains either | ||
972 | // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. | ||
942 | // | 973 | // |
974 | //-- | ||
975 | |||
976 | ia64_set_kernel_registers: | ||
977 | add temp3=MCA_SP_OFFSET, r3 | ||
978 | add temp4=MCA_SOS_OFFSET+IA64_SAL_OS_STATE_OS_GP_OFFSET, r3 | ||
979 | mov b0=r2 // save return address | ||
980 | GET_IA64_MCA_DATA(temp1) | ||
981 | ;; | ||
982 | add temp4=temp4, temp1 // &struct ia64_sal_os_state.os_gp | ||
983 | add r12=temp1, temp3 // kernel stack pointer on MCA/INIT stack | ||
984 | add r13=temp1, r3 // set current to start of MCA/INIT stack | ||
985 | ;; | ||
986 | ld8 r1=[temp4] // OS GP from SAL OS state | ||
987 | ;; | ||
988 | DATA_PA_TO_VA(r1,temp1) | ||
989 | DATA_PA_TO_VA(r12,temp2) | ||
990 | DATA_PA_TO_VA(r13,temp3) | ||
991 | ;; | ||
992 | mov IA64_KR(CURRENT)=r13 | ||
993 | |||
994 | // FIXME: do I need to wire IA64_KR_CURRENT_STACK and IA64_TR_CURRENT_STACK? | ||
995 | |||
996 | br.sptk b0 | ||
997 | |||
998 | //EndStub////////////////////////////////////////////////////////////////////// | ||
999 | |||
1000 | #undef ms | ||
1001 | #undef regs | ||
1002 | #undef temp1 | ||
1003 | #undef temp2 | ||
1004 | #undef temp3 | ||
1005 | #undef temp4 | ||
1006 | |||
943 | 1007 | ||
944 | GLOBAL_ENTRY(ia64_slave_init_handler) | 1008 | // Support function for mca.c, it is here to avoid using inline asm. Given the |
945 | 1: br.sptk 1b | 1009 | // address of an rnat slot, if that address is below the current ar.bspstore |
946 | END(ia64_slave_init_handler) | 1010 | // then return the contents of that slot, otherwise return the contents of |
1011 | // ar.rnat. | ||
1012 | GLOBAL_ENTRY(ia64_get_rnat) | ||
1013 | alloc r14=ar.pfs,1,0,0,0 | ||
1014 | mov ar.rsc=0 | ||
1015 | ;; | ||
1016 | mov r14=ar.bspstore | ||
1017 | ;; | ||
1018 | cmp.lt p6,p7=in0,r14 | ||
1019 | ;; | ||
1020 | (p6) ld8 r8=[in0] | ||
1021 | (p7) mov r8=ar.rnat | ||
1022 | mov ar.rsc=3 | ||
1023 | br.ret.sptk.many rp | ||
1024 | END(ia64_get_rnat) | ||
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index abc0113a821d..6e683745af49 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c | |||
@@ -4,6 +4,8 @@ | |||
4 | * | 4 | * |
5 | * Copyright (C) 2004 FUJITSU LIMITED | 5 | * Copyright (C) 2004 FUJITSU LIMITED |
6 | * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com) | 6 | * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com) |
7 | * Copyright (C) 2005 Silicon Graphics, Inc | ||
8 | * Copyright (C) 2005 Keith Owens <kaos@sgi.com> | ||
7 | */ | 9 | */ |
8 | #include <linux/config.h> | 10 | #include <linux/config.h> |
9 | #include <linux/types.h> | 11 | #include <linux/types.h> |
@@ -38,10 +40,6 @@ | |||
38 | /* max size of SAL error record (default) */ | 40 | /* max size of SAL error record (default) */ |
39 | static int sal_rec_max = 10000; | 41 | static int sal_rec_max = 10000; |
40 | 42 | ||
41 | /* from mca.c */ | ||
42 | static ia64_mca_sal_to_os_state_t *sal_to_os_handoff_state; | ||
43 | static ia64_mca_os_to_sal_state_t *os_to_sal_handoff_state; | ||
44 | |||
45 | /* from mca_drv_asm.S */ | 43 | /* from mca_drv_asm.S */ |
46 | extern void *mca_handler_bhhook(void); | 44 | extern void *mca_handler_bhhook(void); |
47 | 45 | ||
@@ -316,7 +314,8 @@ init_record_index_pools(void) | |||
316 | */ | 314 | */ |
317 | 315 | ||
318 | static mca_type_t | 316 | static mca_type_t |
319 | is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci) | 317 | is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci, |
318 | struct ia64_sal_os_state *sos) | ||
320 | { | 319 | { |
321 | pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); | 320 | pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); |
322 | 321 | ||
@@ -327,7 +326,7 @@ is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci) | |||
327 | * Therefore it is local MCA when rendezvous has not been requested. | 326 | * Therefore it is local MCA when rendezvous has not been requested. |
328 | * Failed to rendezvous, the system must be down. | 327 | * Failed to rendezvous, the system must be down. |
329 | */ | 328 | */ |
330 | switch (sal_to_os_handoff_state->imsto_rendez_state) { | 329 | switch (sos->rv_rc) { |
331 | case -1: /* SAL rendezvous unsuccessful */ | 330 | case -1: /* SAL rendezvous unsuccessful */ |
332 | return MCA_IS_GLOBAL; | 331 | return MCA_IS_GLOBAL; |
333 | case 0: /* SAL rendezvous not required */ | 332 | case 0: /* SAL rendezvous not required */ |
@@ -388,7 +387,8 @@ is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci) | |||
388 | */ | 387 | */ |
389 | 388 | ||
390 | static int | 389 | static int |
391 | recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci) | 390 | recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci, |
391 | struct ia64_sal_os_state *sos) | ||
392 | { | 392 | { |
393 | sal_log_mod_error_info_t *smei; | 393 | sal_log_mod_error_info_t *smei; |
394 | pal_min_state_area_t *pmsa; | 394 | pal_min_state_area_t *pmsa; |
@@ -426,7 +426,7 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec | |||
426 | * setup for resume to bottom half of MCA, | 426 | * setup for resume to bottom half of MCA, |
427 | * "mca_handler_bhhook" | 427 | * "mca_handler_bhhook" |
428 | */ | 428 | */ |
429 | pmsa = (pal_min_state_area_t *)(sal_to_os_handoff_state->pal_min_state | (6ul<<61)); | 429 | pmsa = sos->pal_min_state; |
430 | /* pass to bhhook as 1st argument (gr8) */ | 430 | /* pass to bhhook as 1st argument (gr8) */ |
431 | pmsa->pmsa_gr[8-1] = smei->target_identifier; | 431 | pmsa->pmsa_gr[8-1] = smei->target_identifier; |
432 | /* set interrupted return address (but no use) */ | 432 | /* set interrupted return address (but no use) */ |
@@ -459,7 +459,8 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec | |||
459 | */ | 459 | */ |
460 | 460 | ||
461 | static int | 461 | static int |
462 | recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci) | 462 | recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci, |
463 | struct ia64_sal_os_state *sos) | ||
463 | { | 464 | { |
464 | int status = 0; | 465 | int status = 0; |
465 | pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); | 466 | pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); |
@@ -469,7 +470,7 @@ recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_ | |||
469 | case 1: /* partial read */ | 470 | case 1: /* partial read */ |
470 | case 3: /* full line(cpu) read */ | 471 | case 3: /* full line(cpu) read */ |
471 | case 9: /* I/O space read */ | 472 | case 9: /* I/O space read */ |
472 | status = recover_from_read_error(slidx, peidx, pbci); | 473 | status = recover_from_read_error(slidx, peidx, pbci, sos); |
473 | break; | 474 | break; |
474 | case 0: /* unknown */ | 475 | case 0: /* unknown */ |
475 | case 2: /* partial write */ | 476 | case 2: /* partial write */ |
@@ -508,7 +509,8 @@ recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_ | |||
508 | */ | 509 | */ |
509 | 510 | ||
510 | static int | 511 | static int |
511 | recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci) | 512 | recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci, |
513 | struct ia64_sal_os_state *sos) | ||
512 | { | 514 | { |
513 | pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); | 515 | pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); |
514 | 516 | ||
@@ -545,7 +547,7 @@ recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t * | |||
545 | * This means "there are some platform errors". | 547 | * This means "there are some platform errors". |
546 | */ | 548 | */ |
547 | if (platform) | 549 | if (platform) |
548 | return recover_from_platform_error(slidx, peidx, pbci); | 550 | return recover_from_platform_error(slidx, peidx, pbci, sos); |
549 | /* | 551 | /* |
550 | * On account of strange SAL error record, we cannot recover. | 552 | * On account of strange SAL error record, we cannot recover. |
551 | */ | 553 | */ |
@@ -562,8 +564,7 @@ recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t * | |||
562 | 564 | ||
563 | static int | 565 | static int |
564 | mca_try_to_recover(void *rec, | 566 | mca_try_to_recover(void *rec, |
565 | ia64_mca_sal_to_os_state_t *sal_to_os_state, | 567 | struct ia64_sal_os_state *sos) |
566 | ia64_mca_os_to_sal_state_t *os_to_sal_state) | ||
567 | { | 568 | { |
568 | int platform_err; | 569 | int platform_err; |
569 | int n_proc_err; | 570 | int n_proc_err; |
@@ -571,10 +572,6 @@ mca_try_to_recover(void *rec, | |||
571 | peidx_table_t peidx; | 572 | peidx_table_t peidx; |
572 | pal_bus_check_info_t pbci; | 573 | pal_bus_check_info_t pbci; |
573 | 574 | ||
574 | /* handoff state from/to mca.c */ | ||
575 | sal_to_os_handoff_state = sal_to_os_state; | ||
576 | os_to_sal_handoff_state = os_to_sal_state; | ||
577 | |||
578 | /* Make index of SAL error record */ | 575 | /* Make index of SAL error record */ |
579 | platform_err = mca_make_slidx(rec, &slidx); | 576 | platform_err = mca_make_slidx(rec, &slidx); |
580 | 577 | ||
@@ -597,11 +594,11 @@ mca_try_to_recover(void *rec, | |||
597 | *((u64*)&pbci) = peidx_check_info(&peidx, bus_check, 0); | 594 | *((u64*)&pbci) = peidx_check_info(&peidx, bus_check, 0); |
598 | 595 | ||
599 | /* Check whether MCA is global or not */ | 596 | /* Check whether MCA is global or not */ |
600 | if (is_mca_global(&peidx, &pbci)) | 597 | if (is_mca_global(&peidx, &pbci, sos)) |
601 | return 0; | 598 | return 0; |
602 | 599 | ||
603 | /* Try to recover a processor error */ | 600 | /* Try to recover a processor error */ |
604 | return recover_from_processor_error(platform_err, &slidx, &peidx, &pbci); | 601 | return recover_from_processor_error(platform_err, &slidx, &peidx, &pbci, sos); |
605 | } | 602 | } |
606 | 603 | ||
607 | /* | 604 | /* |
diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h index f6d8a010d99b..85ed54179afa 100644 --- a/arch/ia64/kernel/minstate.h +++ b/arch/ia64/kernel/minstate.h | |||
@@ -5,73 +5,6 @@ | |||
5 | #include "entry.h" | 5 | #include "entry.h" |
6 | 6 | ||
7 | /* | 7 | /* |
8 | * For ivt.s we want to access the stack virtually so we don't have to disable translation | ||
9 | * on interrupts. | ||
10 | * | ||
11 | * On entry: | ||
12 | * r1: pointer to current task (ar.k6) | ||
13 | */ | ||
14 | #define MINSTATE_START_SAVE_MIN_VIRT \ | ||
15 | (pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \ | ||
16 | ;; \ | ||
17 | (pUStk) mov.m r24=ar.rnat; \ | ||
18 | (pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \ | ||
19 | (pKStk) mov r1=sp; /* get sp */ \ | ||
20 | ;; \ | ||
21 | (pUStk) lfetch.fault.excl.nt1 [r22]; \ | ||
22 | (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \ | ||
23 | (pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \ | ||
24 | ;; \ | ||
25 | (pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \ | ||
26 | (pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \ | ||
27 | ;; \ | ||
28 | (pUStk) mov r18=ar.bsp; \ | ||
29 | (pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ | ||
30 | |||
31 | #define MINSTATE_END_SAVE_MIN_VIRT \ | ||
32 | bsw.1; /* switch back to bank 1 (must be last in insn group) */ \ | ||
33 | ;; | ||
34 | |||
35 | /* | ||
36 | * For mca_asm.S we want to access the stack physically since the state is saved before we | ||
37 | * go virtual and don't want to destroy the iip or ipsr. | ||
38 | */ | ||
39 | #define MINSTATE_START_SAVE_MIN_PHYS \ | ||
40 | (pKStk) mov r3=IA64_KR(PER_CPU_DATA);; \ | ||
41 | (pKStk) addl r3=THIS_CPU(ia64_mca_data),r3;; \ | ||
42 | (pKStk) ld8 r3 = [r3];; \ | ||
43 | (pKStk) addl r3=IA64_MCA_CPU_INIT_STACK_OFFSET,r3;; \ | ||
44 | (pKStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3; \ | ||
45 | (pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \ | ||
46 | (pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of register backing store */ \ | ||
47 | ;; \ | ||
48 | (pUStk) mov r24=ar.rnat; \ | ||
49 | (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \ | ||
50 | (pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \ | ||
51 | (pUStk) dep r22=-1,r22,61,3; /* compute kernel virtual addr of RBS */ \ | ||
52 | ;; \ | ||
53 | (pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \ | ||
54 | ;; \ | ||
55 | (pUStk) mov r18=ar.bsp; \ | ||
56 | (pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \ | ||
57 | |||
58 | #define MINSTATE_END_SAVE_MIN_PHYS \ | ||
59 | dep r12=-1,r12,61,3; /* make sp a kernel virtual address */ \ | ||
60 | ;; | ||
61 | |||
62 | #ifdef MINSTATE_VIRT | ||
63 | # define MINSTATE_GET_CURRENT(reg) mov reg=IA64_KR(CURRENT) | ||
64 | # define MINSTATE_START_SAVE_MIN MINSTATE_START_SAVE_MIN_VIRT | ||
65 | # define MINSTATE_END_SAVE_MIN MINSTATE_END_SAVE_MIN_VIRT | ||
66 | #endif | ||
67 | |||
68 | #ifdef MINSTATE_PHYS | ||
69 | # define MINSTATE_GET_CURRENT(reg) mov reg=IA64_KR(CURRENT);; tpa reg=reg | ||
70 | # define MINSTATE_START_SAVE_MIN MINSTATE_START_SAVE_MIN_PHYS | ||
71 | # define MINSTATE_END_SAVE_MIN MINSTATE_END_SAVE_MIN_PHYS | ||
72 | #endif | ||
73 | |||
74 | /* | ||
75 | * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves | 8 | * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves |
76 | * the minimum state necessary that allows us to turn psr.ic back | 9 | * the minimum state necessary that allows us to turn psr.ic back |
77 | * on. | 10 | * on. |
@@ -97,7 +30,7 @@ | |||
97 | * we can pass interruption state as arguments to a handler. | 30 | * we can pass interruption state as arguments to a handler. |
98 | */ | 31 | */ |
99 | #define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \ | 32 | #define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \ |
100 | MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \ | 33 | mov r16=IA64_KR(CURRENT); /* M */ \ |
101 | mov r27=ar.rsc; /* M */ \ | 34 | mov r27=ar.rsc; /* M */ \ |
102 | mov r20=r1; /* A */ \ | 35 | mov r20=r1; /* A */ \ |
103 | mov r25=ar.unat; /* M */ \ | 36 | mov r25=ar.unat; /* M */ \ |
@@ -118,7 +51,21 @@ | |||
118 | SAVE_IFS; \ | 51 | SAVE_IFS; \ |
119 | cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \ | 52 | cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \ |
120 | ;; \ | 53 | ;; \ |
121 | MINSTATE_START_SAVE_MIN \ | 54 | (pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \ |
55 | ;; \ | ||
56 | (pUStk) mov.m r24=ar.rnat; \ | ||
57 | (pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \ | ||
58 | (pKStk) mov r1=sp; /* get sp */ \ | ||
59 | ;; \ | ||
60 | (pUStk) lfetch.fault.excl.nt1 [r22]; \ | ||
61 | (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \ | ||
62 | (pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \ | ||
63 | ;; \ | ||
64 | (pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \ | ||
65 | (pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \ | ||
66 | ;; \ | ||
67 | (pUStk) mov r18=ar.bsp; \ | ||
68 | (pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \ | ||
122 | adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \ | 69 | adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \ |
123 | adds r16=PT(CR_IPSR),r1; \ | 70 | adds r16=PT(CR_IPSR),r1; \ |
124 | ;; \ | 71 | ;; \ |
@@ -181,7 +128,8 @@ | |||
181 | EXTRA; \ | 128 | EXTRA; \ |
182 | movl r1=__gp; /* establish kernel global pointer */ \ | 129 | movl r1=__gp; /* establish kernel global pointer */ \ |
183 | ;; \ | 130 | ;; \ |
184 | MINSTATE_END_SAVE_MIN | 131 | bsw.1; /* switch back to bank 1 (must be last in insn group) */ \ |
132 | ;; | ||
185 | 133 | ||
186 | /* | 134 | /* |
187 | * SAVE_REST saves the remainder of pt_regs (with psr.ic on). | 135 | * SAVE_REST saves the remainder of pt_regs (with psr.ic on). |
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index 25e7c8344564..89faa603c6be 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c | |||
@@ -307,11 +307,9 @@ vm_info(char *page) | |||
307 | 307 | ||
308 | if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) { | 308 | if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) { |
309 | printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status); | 309 | printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status); |
310 | return 0; | 310 | } else { |
311 | } | ||
312 | 311 | ||
313 | 312 | p += sprintf(p, | |
314 | p += sprintf(p, | ||
315 | "Physical Address Space : %d bits\n" | 313 | "Physical Address Space : %d bits\n" |
316 | "Virtual Address Space : %d bits\n" | 314 | "Virtual Address Space : %d bits\n" |
317 | "Protection Key Registers(PKR) : %d\n" | 315 | "Protection Key Registers(PKR) : %d\n" |
@@ -319,92 +317,99 @@ vm_info(char *page) | |||
319 | "Hash Tag ID : 0x%x\n" | 317 | "Hash Tag ID : 0x%x\n" |
320 | "Size of RR.rid : %d\n", | 318 | "Size of RR.rid : %d\n", |
321 | vm_info_1.pal_vm_info_1_s.phys_add_size, | 319 | vm_info_1.pal_vm_info_1_s.phys_add_size, |
322 | vm_info_2.pal_vm_info_2_s.impl_va_msb+1, vm_info_1.pal_vm_info_1_s.max_pkr+1, | 320 | vm_info_2.pal_vm_info_2_s.impl_va_msb+1, |
323 | vm_info_1.pal_vm_info_1_s.key_size, vm_info_1.pal_vm_info_1_s.hash_tag_id, | 321 | vm_info_1.pal_vm_info_1_s.max_pkr+1, |
322 | vm_info_1.pal_vm_info_1_s.key_size, | ||
323 | vm_info_1.pal_vm_info_1_s.hash_tag_id, | ||
324 | vm_info_2.pal_vm_info_2_s.rid_size); | 324 | vm_info_2.pal_vm_info_2_s.rid_size); |
325 | } | ||
325 | 326 | ||
326 | if (ia64_pal_mem_attrib(&attrib) != 0) | 327 | if (ia64_pal_mem_attrib(&attrib) == 0) { |
327 | return 0; | 328 | p += sprintf(p, "Supported memory attributes : "); |
328 | 329 | sep = ""; | |
329 | p += sprintf(p, "Supported memory attributes : "); | 330 | for (i = 0; i < 8; i++) { |
330 | sep = ""; | 331 | if (attrib & (1 << i)) { |
331 | for (i = 0; i < 8; i++) { | 332 | p += sprintf(p, "%s%s", sep, mem_attrib[i]); |
332 | if (attrib & (1 << i)) { | 333 | sep = ", "; |
333 | p += sprintf(p, "%s%s", sep, mem_attrib[i]); | 334 | } |
334 | sep = ", "; | ||
335 | } | 335 | } |
336 | p += sprintf(p, "\n"); | ||
336 | } | 337 | } |
337 | p += sprintf(p, "\n"); | ||
338 | 338 | ||
339 | if ((status = ia64_pal_vm_page_size(&tr_pages, &vw_pages)) !=0) { | 339 | if ((status = ia64_pal_vm_page_size(&tr_pages, &vw_pages)) !=0) { |
340 | printk(KERN_ERR "ia64_pal_vm_page_size=%ld\n", status); | 340 | printk(KERN_ERR "ia64_pal_vm_page_size=%ld\n", status); |
341 | return 0; | 341 | } else { |
342 | } | ||
343 | |||
344 | p += sprintf(p, | ||
345 | "\nTLB walker : %simplemented\n" | ||
346 | "Number of DTR : %d\n" | ||
347 | "Number of ITR : %d\n" | ||
348 | "TLB insertable page sizes : ", | ||
349 | vm_info_1.pal_vm_info_1_s.vw ? "" : "not ", | ||
350 | vm_info_1.pal_vm_info_1_s.max_dtr_entry+1, | ||
351 | vm_info_1.pal_vm_info_1_s.max_itr_entry+1); | ||
352 | 342 | ||
343 | p += sprintf(p, | ||
344 | "\nTLB walker : %simplemented\n" | ||
345 | "Number of DTR : %d\n" | ||
346 | "Number of ITR : %d\n" | ||
347 | "TLB insertable page sizes : ", | ||
348 | vm_info_1.pal_vm_info_1_s.vw ? "" : "not ", | ||
349 | vm_info_1.pal_vm_info_1_s.max_dtr_entry+1, | ||
350 | vm_info_1.pal_vm_info_1_s.max_itr_entry+1); | ||
353 | 351 | ||
354 | p = bitvector_process(p, tr_pages); | ||
355 | 352 | ||
356 | p += sprintf(p, "\nTLB purgeable page sizes : "); | 353 | p = bitvector_process(p, tr_pages); |
357 | 354 | ||
358 | p = bitvector_process(p, vw_pages); | 355 | p += sprintf(p, "\nTLB purgeable page sizes : "); |
359 | 356 | ||
357 | p = bitvector_process(p, vw_pages); | ||
358 | } | ||
360 | if ((status=ia64_get_ptce(&ptce)) != 0) { | 359 | if ((status=ia64_get_ptce(&ptce)) != 0) { |
361 | printk(KERN_ERR "ia64_get_ptce=%ld\n", status); | 360 | printk(KERN_ERR "ia64_get_ptce=%ld\n", status); |
362 | return 0; | 361 | } else { |
363 | } | 362 | p += sprintf(p, |
364 | |||
365 | p += sprintf(p, | ||
366 | "\nPurge base address : 0x%016lx\n" | 363 | "\nPurge base address : 0x%016lx\n" |
367 | "Purge outer loop count : %d\n" | 364 | "Purge outer loop count : %d\n" |
368 | "Purge inner loop count : %d\n" | 365 | "Purge inner loop count : %d\n" |
369 | "Purge outer loop stride : %d\n" | 366 | "Purge outer loop stride : %d\n" |
370 | "Purge inner loop stride : %d\n", | 367 | "Purge inner loop stride : %d\n", |
371 | ptce.base, ptce.count[0], ptce.count[1], ptce.stride[0], ptce.stride[1]); | 368 | ptce.base, ptce.count[0], ptce.count[1], |
369 | ptce.stride[0], ptce.stride[1]); | ||
372 | 370 | ||
373 | p += sprintf(p, | 371 | p += sprintf(p, |
374 | "TC Levels : %d\n" | 372 | "TC Levels : %d\n" |
375 | "Unique TC(s) : %d\n", | 373 | "Unique TC(s) : %d\n", |
376 | vm_info_1.pal_vm_info_1_s.num_tc_levels, | 374 | vm_info_1.pal_vm_info_1_s.num_tc_levels, |
377 | vm_info_1.pal_vm_info_1_s.max_unique_tcs); | 375 | vm_info_1.pal_vm_info_1_s.max_unique_tcs); |
378 | 376 | ||
379 | for(i=0; i < vm_info_1.pal_vm_info_1_s.num_tc_levels; i++) { | 377 | for(i=0; i < vm_info_1.pal_vm_info_1_s.num_tc_levels; i++) { |
380 | for (j=2; j>0 ; j--) { | 378 | for (j=2; j>0 ; j--) { |
381 | tc_pages = 0; /* just in case */ | 379 | tc_pages = 0; /* just in case */ |
382 | 380 | ||
383 | 381 | ||
384 | /* even without unification, some levels may not be present */ | 382 | /* even without unification, some levels may not be present */ |
385 | if ((status=ia64_pal_vm_info(i,j, &tc_info, &tc_pages)) != 0) { | 383 | if ((status=ia64_pal_vm_info(i,j, &tc_info, &tc_pages)) != 0) { |
386 | continue; | 384 | continue; |
387 | } | 385 | } |
388 | 386 | ||
389 | p += sprintf(p, | 387 | p += sprintf(p, |
390 | "\n%s Translation Cache Level %d:\n" | 388 | "\n%s Translation Cache Level %d:\n" |
391 | "\tHash sets : %d\n" | 389 | "\tHash sets : %d\n" |
392 | "\tAssociativity : %d\n" | 390 | "\tAssociativity : %d\n" |
393 | "\tNumber of entries : %d\n" | 391 | "\tNumber of entries : %d\n" |
394 | "\tFlags : ", | 392 | "\tFlags : ", |
395 | cache_types[j+tc_info.tc_unified], i+1, tc_info.tc_num_sets, | 393 | cache_types[j+tc_info.tc_unified], i+1, |
396 | tc_info.tc_associativity, tc_info.tc_num_entries); | 394 | tc_info.tc_num_sets, |
395 | tc_info.tc_associativity, | ||
396 | tc_info.tc_num_entries); | ||
397 | 397 | ||
398 | if (tc_info.tc_pf) p += sprintf(p, "PreferredPageSizeOptimized "); | 398 | if (tc_info.tc_pf) |
399 | if (tc_info.tc_unified) p += sprintf(p, "Unified "); | 399 | p += sprintf(p, "PreferredPageSizeOptimized "); |
400 | if (tc_info.tc_reduce_tr) p += sprintf(p, "TCReduction"); | 400 | if (tc_info.tc_unified) |
401 | p += sprintf(p, "Unified "); | ||
402 | if (tc_info.tc_reduce_tr) | ||
403 | p += sprintf(p, "TCReduction"); | ||
401 | 404 | ||
402 | p += sprintf(p, "\n\tSupported page sizes: "); | 405 | p += sprintf(p, "\n\tSupported page sizes: "); |
403 | 406 | ||
404 | p = bitvector_process(p, tc_pages); | 407 | p = bitvector_process(p, tc_pages); |
405 | 408 | ||
406 | /* when unified date (j=2) is enough */ | 409 | /* when unified date (j=2) is enough */ |
407 | if (tc_info.tc_unified) break; | 410 | if (tc_info.tc_unified) |
411 | break; | ||
412 | } | ||
408 | } | 413 | } |
409 | } | 414 | } |
410 | p += sprintf(p, "\n"); | 415 | p += sprintf(p, "\n"); |
@@ -440,14 +445,14 @@ register_info(char *page) | |||
440 | p += sprintf(p, "\n"); | 445 | p += sprintf(p, "\n"); |
441 | } | 446 | } |
442 | 447 | ||
443 | if (ia64_pal_rse_info(&phys_stacked, &hints) != 0) return 0; | 448 | if (ia64_pal_rse_info(&phys_stacked, &hints) == 0) { |
444 | 449 | ||
445 | p += sprintf(p, | 450 | p += sprintf(p, |
446 | "RSE stacked physical registers : %ld\n" | 451 | "RSE stacked physical registers : %ld\n" |
447 | "RSE load/store hints : %ld (%s)\n", | 452 | "RSE load/store hints : %ld (%s)\n", |
448 | phys_stacked, hints.ph_data, | 453 | phys_stacked, hints.ph_data, |
449 | hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(??)"); | 454 | hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(??)"); |
450 | 455 | } | |
451 | if (ia64_pal_debug_info(&iregs, &dregs)) | 456 | if (ia64_pal_debug_info(&iregs, &dregs)) |
452 | return 0; | 457 | return 0; |
453 | 458 | ||
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index f1201ac8a116..1650353e3f77 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/pagemap.h> | 38 | #include <linux/pagemap.h> |
39 | #include <linux/mount.h> | 39 | #include <linux/mount.h> |
40 | #include <linux/bitops.h> | 40 | #include <linux/bitops.h> |
41 | #include <linux/rcupdate.h> | ||
41 | 42 | ||
42 | #include <asm/errno.h> | 43 | #include <asm/errno.h> |
43 | #include <asm/intrinsics.h> | 44 | #include <asm/intrinsics.h> |
@@ -496,7 +497,7 @@ typedef struct { | |||
496 | static pfm_stats_t pfm_stats[NR_CPUS]; | 497 | static pfm_stats_t pfm_stats[NR_CPUS]; |
497 | static pfm_session_t pfm_sessions; /* global sessions information */ | 498 | static pfm_session_t pfm_sessions; /* global sessions information */ |
498 | 499 | ||
499 | static spinlock_t pfm_alt_install_check = SPIN_LOCK_UNLOCKED; | 500 | static DEFINE_SPINLOCK(pfm_alt_install_check); |
500 | static pfm_intr_handler_desc_t *pfm_alt_intr_handler; | 501 | static pfm_intr_handler_desc_t *pfm_alt_intr_handler; |
501 | 502 | ||
502 | static struct proc_dir_entry *perfmon_dir; | 503 | static struct proc_dir_entry *perfmon_dir; |
@@ -2217,15 +2218,17 @@ static void | |||
2217 | pfm_free_fd(int fd, struct file *file) | 2218 | pfm_free_fd(int fd, struct file *file) |
2218 | { | 2219 | { |
2219 | struct files_struct *files = current->files; | 2220 | struct files_struct *files = current->files; |
2221 | struct fdtable *fdt = files_fdtable(files); | ||
2220 | 2222 | ||
2221 | /* | 2223 | /* |
2222 | * there ie no fd_uninstall(), so we do it here | 2224 | * there ie no fd_uninstall(), so we do it here |
2223 | */ | 2225 | */ |
2224 | spin_lock(&files->file_lock); | 2226 | spin_lock(&files->file_lock); |
2225 | files->fd[fd] = NULL; | 2227 | rcu_assign_pointer(fdt->fd[fd], NULL); |
2226 | spin_unlock(&files->file_lock); | 2228 | spin_unlock(&files->file_lock); |
2227 | 2229 | ||
2228 | if (file) put_filp(file); | 2230 | if (file) |
2231 | put_filp(file); | ||
2229 | put_unused_fd(fd); | 2232 | put_unused_fd(fd); |
2230 | } | 2233 | } |
2231 | 2234 | ||
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c index 6f0cc7a6634e..ca68e6e44a72 100644 --- a/arch/ia64/kernel/salinfo.c +++ b/arch/ia64/kernel/salinfo.c | |||
@@ -22,6 +22,11 @@ | |||
22 | * | 22 | * |
23 | * Dec 5 2004 kaos@sgi.com | 23 | * Dec 5 2004 kaos@sgi.com |
24 | * Standardize which records are cleared automatically. | 24 | * Standardize which records are cleared automatically. |
25 | * | ||
26 | * Aug 18 2005 kaos@sgi.com | ||
27 | * mca.c may not pass a buffer, a NULL buffer just indicates that a new | ||
28 | * record is available in SAL. | ||
29 | * Replace some NR_CPUS by cpus_online, for hotplug cpu. | ||
25 | */ | 30 | */ |
26 | 31 | ||
27 | #include <linux/types.h> | 32 | #include <linux/types.h> |
@@ -193,7 +198,7 @@ shift1_data_saved (struct salinfo_data *data, int shift) | |||
193 | * The buffer passed from mca.c points to the output from ia64_log_get. This is | 198 | * The buffer passed from mca.c points to the output from ia64_log_get. This is |
194 | * a persistent buffer but its contents can change between the interrupt and | 199 | * a persistent buffer but its contents can change between the interrupt and |
195 | * when user space processes the record. Save the record id to identify | 200 | * when user space processes the record. Save the record id to identify |
196 | * changes. | 201 | * changes. If the buffer is NULL then just update the bitmap. |
197 | */ | 202 | */ |
198 | void | 203 | void |
199 | salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe) | 204 | salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe) |
@@ -206,27 +211,29 @@ salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe) | |||
206 | 211 | ||
207 | BUG_ON(type >= ARRAY_SIZE(salinfo_log_name)); | 212 | BUG_ON(type >= ARRAY_SIZE(salinfo_log_name)); |
208 | 213 | ||
209 | if (irqsafe) | 214 | if (buffer) { |
210 | spin_lock_irqsave(&data_saved_lock, flags); | 215 | if (irqsafe) |
211 | for (i = 0, data_saved = data->data_saved; i < saved_size; ++i, ++data_saved) { | 216 | spin_lock_irqsave(&data_saved_lock, flags); |
212 | if (!data_saved->buffer) | 217 | for (i = 0, data_saved = data->data_saved; i < saved_size; ++i, ++data_saved) { |
213 | break; | 218 | if (!data_saved->buffer) |
214 | } | 219 | break; |
215 | if (i == saved_size) { | 220 | } |
216 | if (!data->saved_num) { | 221 | if (i == saved_size) { |
217 | shift1_data_saved(data, 0); | 222 | if (!data->saved_num) { |
218 | data_saved = data->data_saved + saved_size - 1; | 223 | shift1_data_saved(data, 0); |
219 | } else | 224 | data_saved = data->data_saved + saved_size - 1; |
220 | data_saved = NULL; | 225 | } else |
221 | } | 226 | data_saved = NULL; |
222 | if (data_saved) { | 227 | } |
223 | data_saved->cpu = smp_processor_id(); | 228 | if (data_saved) { |
224 | data_saved->id = ((sal_log_record_header_t *)buffer)->id; | 229 | data_saved->cpu = smp_processor_id(); |
225 | data_saved->size = size; | 230 | data_saved->id = ((sal_log_record_header_t *)buffer)->id; |
226 | data_saved->buffer = buffer; | 231 | data_saved->size = size; |
232 | data_saved->buffer = buffer; | ||
233 | } | ||
234 | if (irqsafe) | ||
235 | spin_unlock_irqrestore(&data_saved_lock, flags); | ||
227 | } | 236 | } |
228 | if (irqsafe) | ||
229 | spin_unlock_irqrestore(&data_saved_lock, flags); | ||
230 | 237 | ||
231 | if (!test_and_set_bit(smp_processor_id(), &data->cpu_event)) { | 238 | if (!test_and_set_bit(smp_processor_id(), &data->cpu_event)) { |
232 | if (irqsafe) | 239 | if (irqsafe) |
@@ -244,7 +251,7 @@ salinfo_timeout_check(struct salinfo_data *data) | |||
244 | int i; | 251 | int i; |
245 | if (!data->open) | 252 | if (!data->open) |
246 | return; | 253 | return; |
247 | for (i = 0; i < NR_CPUS; ++i) { | 254 | for_each_online_cpu(i) { |
248 | if (test_bit(i, &data->cpu_event)) { | 255 | if (test_bit(i, &data->cpu_event)) { |
249 | /* double up() is not a problem, user space will see no | 256 | /* double up() is not a problem, user space will see no |
250 | * records for the additional "events". | 257 | * records for the additional "events". |
@@ -291,7 +298,7 @@ retry: | |||
291 | 298 | ||
292 | n = data->cpu_check; | 299 | n = data->cpu_check; |
293 | for (i = 0; i < NR_CPUS; i++) { | 300 | for (i = 0; i < NR_CPUS; i++) { |
294 | if (test_bit(n, &data->cpu_event)) { | 301 | if (test_bit(n, &data->cpu_event) && cpu_online(n)) { |
295 | cpu = n; | 302 | cpu = n; |
296 | break; | 303 | break; |
297 | } | 304 | } |
@@ -585,11 +592,10 @@ salinfo_init(void) | |||
585 | 592 | ||
586 | /* we missed any events before now */ | 593 | /* we missed any events before now */ |
587 | online = 0; | 594 | online = 0; |
588 | for (j = 0; j < NR_CPUS; j++) | 595 | for_each_online_cpu(j) { |
589 | if (cpu_online(j)) { | 596 | set_bit(j, &data->cpu_event); |
590 | set_bit(j, &data->cpu_event); | 597 | ++online; |
591 | ++online; | 598 | } |
592 | } | ||
593 | sema_init(&data->sem, online); | 599 | sema_init(&data->sem, online); |
594 | 600 | ||
595 | *sdir++ = dir; | 601 | *sdir++ = dir; |
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 84f89da7c640..1f5c26dbe705 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -384,7 +384,7 @@ setup_arch (char **cmdline_p) | |||
384 | if (early_console_setup(*cmdline_p) == 0) | 384 | if (early_console_setup(*cmdline_p) == 0) |
385 | mark_bsp_online(); | 385 | mark_bsp_online(); |
386 | 386 | ||
387 | #ifdef CONFIG_ACPI_BOOT | 387 | #ifdef CONFIG_ACPI |
388 | /* Initialize the ACPI boot-time table parser */ | 388 | /* Initialize the ACPI boot-time table parser */ |
389 | acpi_table_init(); | 389 | acpi_table_init(); |
390 | # ifdef CONFIG_ACPI_NUMA | 390 | # ifdef CONFIG_ACPI_NUMA |
@@ -420,7 +420,7 @@ setup_arch (char **cmdline_p) | |||
420 | 420 | ||
421 | cpu_init(); /* initialize the bootstrap CPU */ | 421 | cpu_init(); /* initialize the bootstrap CPU */ |
422 | 422 | ||
423 | #ifdef CONFIG_ACPI_BOOT | 423 | #ifdef CONFIG_ACPI |
424 | acpi_boot_init(); | 424 | acpi_boot_init(); |
425 | #endif | 425 | #endif |
426 | 426 | ||
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c index 770fab37928e..f2dbcd1db0d4 100644 --- a/arch/ia64/kernel/sys_ia64.c +++ b/arch/ia64/kernel/sys_ia64.c | |||
@@ -35,7 +35,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len | |||
35 | return -ENOMEM; | 35 | return -ENOMEM; |
36 | 36 | ||
37 | #ifdef CONFIG_HUGETLB_PAGE | 37 | #ifdef CONFIG_HUGETLB_PAGE |
38 | if (REGION_NUMBER(addr) == REGION_HPAGE) | 38 | if (REGION_NUMBER(addr) == RGN_HPAGE) |
39 | addr = 0; | 39 | addr = 0; |
40 | #endif | 40 | #endif |
41 | if (!addr) | 41 | if (!addr) |
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index 92ff46ad21e2..706b7734e191 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c | |||
@@ -36,7 +36,7 @@ int arch_register_cpu(int num) | |||
36 | parent = &sysfs_nodes[cpu_to_node(num)]; | 36 | parent = &sysfs_nodes[cpu_to_node(num)]; |
37 | #endif /* CONFIG_NUMA */ | 37 | #endif /* CONFIG_NUMA */ |
38 | 38 | ||
39 | #ifdef CONFIG_ACPI_BOOT | 39 | #ifdef CONFIG_ACPI |
40 | /* | 40 | /* |
41 | * If CPEI cannot be re-targetted, and this is | 41 | * If CPEI cannot be re-targetted, and this is |
42 | * CPEI target, then dont create the control file | 42 | * CPEI target, then dont create the control file |
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index 4440c8343fa4..f970359e7edf 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/vt_kern.h> /* For unblank_screen() */ | 15 | #include <linux/vt_kern.h> /* For unblank_screen() */ |
16 | #include <linux/module.h> /* for EXPORT_SYMBOL */ | 16 | #include <linux/module.h> /* for EXPORT_SYMBOL */ |
17 | #include <linux/hardirq.h> | 17 | #include <linux/hardirq.h> |
18 | #include <linux/kprobes.h> | ||
18 | 19 | ||
19 | #include <asm/fpswa.h> | 20 | #include <asm/fpswa.h> |
20 | #include <asm/ia32.h> | 21 | #include <asm/ia32.h> |
@@ -122,7 +123,7 @@ die_if_kernel (char *str, struct pt_regs *regs, long err) | |||
122 | } | 123 | } |
123 | 124 | ||
124 | void | 125 | void |
125 | ia64_bad_break (unsigned long break_num, struct pt_regs *regs) | 126 | __kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs) |
126 | { | 127 | { |
127 | siginfo_t siginfo; | 128 | siginfo_t siginfo; |
128 | int sig, code; | 129 | int sig, code; |
@@ -444,7 +445,7 @@ ia64_illegal_op_fault (unsigned long ec, long arg1, long arg2, long arg3, | |||
444 | return rv; | 445 | return rv; |
445 | } | 446 | } |
446 | 447 | ||
447 | void | 448 | void __kprobes |
448 | ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, | 449 | ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, |
449 | unsigned long iim, unsigned long itir, long arg5, long arg6, | 450 | unsigned long iim, unsigned long itir, long arg5, long arg6, |
450 | long arg7, struct pt_regs regs) | 451 | long arg7, struct pt_regs regs) |
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index 490dfc9ab47f..4e9d06c48a8b 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c | |||
@@ -184,7 +184,7 @@ uncached_free_page(unsigned long maddr) | |||
184 | { | 184 | { |
185 | int node; | 185 | int node; |
186 | 186 | ||
187 | node = nasid_to_cnodeid(NASID_GET(maddr)); | 187 | node = paddr_to_nid(maddr - __IA64_UNCACHED_OFFSET); |
188 | 188 | ||
189 | dprintk(KERN_DEBUG "uncached_free_page(%lx) on node %i\n", maddr, node); | 189 | dprintk(KERN_DEBUG "uncached_free_page(%lx) on node %i\n", maddr, node); |
190 | 190 | ||
@@ -217,7 +217,7 @@ uncached_build_memmap(unsigned long start, unsigned long end, void *arg) | |||
217 | 217 | ||
218 | memset((char *)vstart, 0, length); | 218 | memset((char *)vstart, 0, length); |
219 | 219 | ||
220 | node = nasid_to_cnodeid(NASID_GET(start)); | 220 | node = paddr_to_nid(start); |
221 | 221 | ||
222 | for (; vstart < vend ; vstart += PAGE_SIZE) { | 222 | for (; vstart < vend ; vstart += PAGE_SIZE) { |
223 | dprintk(KERN_INFO "sticking %lx into the pool!\n", vstart); | 223 | dprintk(KERN_INFO "sticking %lx into the pool!\n", vstart); |
diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c index 3288be47bc75..93d5a3b41f69 100644 --- a/arch/ia64/kernel/unwind.c +++ b/arch/ia64/kernel/unwind.c | |||
@@ -2020,28 +2020,6 @@ init_frame_info (struct unw_frame_info *info, struct task_struct *t, | |||
2020 | } | 2020 | } |
2021 | 2021 | ||
2022 | void | 2022 | void |
2023 | unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t, | ||
2024 | struct pt_regs *pt, struct switch_stack *sw) | ||
2025 | { | ||
2026 | unsigned long sof; | ||
2027 | |||
2028 | init_frame_info(info, t, sw, pt->r12); | ||
2029 | info->cfm_loc = &pt->cr_ifs; | ||
2030 | info->unat_loc = &pt->ar_unat; | ||
2031 | info->pfs_loc = &pt->ar_pfs; | ||
2032 | sof = *info->cfm_loc & 0x7f; | ||
2033 | info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sof); | ||
2034 | info->ip = pt->cr_iip + ia64_psr(pt)->ri; | ||
2035 | info->pt = (unsigned long) pt; | ||
2036 | UNW_DPRINT(3, "unwind.%s:\n" | ||
2037 | " bsp 0x%lx\n" | ||
2038 | " sof 0x%lx\n" | ||
2039 | " ip 0x%lx\n", | ||
2040 | __FUNCTION__, info->bsp, sof, info->ip); | ||
2041 | find_save_locs(info); | ||
2042 | } | ||
2043 | |||
2044 | void | ||
2045 | unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw) | 2023 | unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw) |
2046 | { | 2024 | { |
2047 | unsigned long sol; | 2025 | unsigned long sol; |
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index a676e79e0681..30d8564e9603 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S | |||
@@ -48,6 +48,7 @@ SECTIONS | |||
48 | *(.text) | 48 | *(.text) |
49 | SCHED_TEXT | 49 | SCHED_TEXT |
50 | LOCK_TEXT | 50 | LOCK_TEXT |
51 | KPROBES_TEXT | ||
51 | *(.gnu.linkonce.t*) | 52 | *(.gnu.linkonce.t*) |
52 | } | 53 | } |
53 | .text2 : AT(ADDR(.text2) - LOAD_OFFSET) | 54 | .text2 : AT(ADDR(.text2) - LOAD_OFFSET) |
diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile index 1902c3c2ef92..799407e7726f 100644 --- a/arch/ia64/lib/Makefile +++ b/arch/ia64/lib/Makefile | |||
@@ -6,7 +6,7 @@ obj-y := io.o | |||
6 | 6 | ||
7 | lib-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \ | 7 | lib-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \ |
8 | __divdi3.o __udivdi3.o __moddi3.o __umoddi3.o \ | 8 | __divdi3.o __udivdi3.o __moddi3.o __umoddi3.o \ |
9 | bitop.o checksum.o clear_page.o csum_partial_copy.o copy_page.o \ | 9 | bitop.o checksum.o clear_page.o csum_partial_copy.o \ |
10 | clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \ | 10 | clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \ |
11 | flush.o ip_fast_csum.o do_csum.o \ | 11 | flush.o ip_fast_csum.o do_csum.o \ |
12 | memset.o strlen.o swiotlb.o | 12 | memset.o strlen.o swiotlb.o |
diff --git a/arch/ia64/lib/flush.S b/arch/ia64/lib/flush.S index 3e2cfa2c6d39..2a0d27f2f21b 100644 --- a/arch/ia64/lib/flush.S +++ b/arch/ia64/lib/flush.S | |||
@@ -20,6 +20,7 @@ | |||
20 | * | 20 | * |
21 | * Note: "in0" and "in1" are preserved for debugging purposes. | 21 | * Note: "in0" and "in1" are preserved for debugging purposes. |
22 | */ | 22 | */ |
23 | .section .kprobes.text,"ax" | ||
23 | GLOBAL_ENTRY(flush_icache_range) | 24 | GLOBAL_ENTRY(flush_icache_range) |
24 | 25 | ||
25 | .prologue | 26 | .prologue |
diff --git a/arch/ia64/lib/memcpy_mck.S b/arch/ia64/lib/memcpy_mck.S index 6f308e62c137..46c9331e7ab5 100644 --- a/arch/ia64/lib/memcpy_mck.S +++ b/arch/ia64/lib/memcpy_mck.S | |||
@@ -625,8 +625,11 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \ | |||
625 | clrrrb | 625 | clrrrb |
626 | ;; | 626 | ;; |
627 | alloc saved_pfs_stack=ar.pfs,3,3,3,0 | 627 | alloc saved_pfs_stack=ar.pfs,3,3,3,0 |
628 | cmp.lt p8,p0=A,r0 | ||
628 | sub B = dst0, saved_in0 // how many byte copied so far | 629 | sub B = dst0, saved_in0 // how many byte copied so far |
629 | ;; | 630 | ;; |
631 | (p8) mov A = 0; // A shouldn't be negative, cap it | ||
632 | ;; | ||
630 | sub C = A, B | 633 | sub C = A, B |
631 | sub D = saved_in2, A | 634 | sub D = saved_in2, A |
632 | ;; | 635 | ;; |
diff --git a/arch/ia64/lib/swiotlb.c b/arch/ia64/lib/swiotlb.c index ab7b3ad99a7f..dbc0b3e449c5 100644 --- a/arch/ia64/lib/swiotlb.c +++ b/arch/ia64/lib/swiotlb.c | |||
@@ -93,8 +93,7 @@ static int __init | |||
93 | setup_io_tlb_npages(char *str) | 93 | setup_io_tlb_npages(char *str) |
94 | { | 94 | { |
95 | if (isdigit(*str)) { | 95 | if (isdigit(*str)) { |
96 | io_tlb_nslabs = simple_strtoul(str, &str, 0) << | 96 | io_tlb_nslabs = simple_strtoul(str, &str, 0); |
97 | (PAGE_SHIFT - IO_TLB_SHIFT); | ||
98 | /* avoid tail segment of size < IO_TLB_SEGSIZE */ | 97 | /* avoid tail segment of size < IO_TLB_SEGSIZE */ |
99 | io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); | 98 | io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); |
100 | } | 99 | } |
@@ -117,7 +116,7 @@ swiotlb_init_with_default_size (size_t default_size) | |||
117 | unsigned long i; | 116 | unsigned long i; |
118 | 117 | ||
119 | if (!io_tlb_nslabs) { | 118 | if (!io_tlb_nslabs) { |
120 | io_tlb_nslabs = (default_size >> PAGE_SHIFT); | 119 | io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); |
121 | io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); | 120 | io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); |
122 | } | 121 | } |
123 | 122 | ||
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index ff62551eb3a1..3c32af910d60 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/mm.h> | 9 | #include <linux/mm.h> |
10 | #include <linux/smp_lock.h> | 10 | #include <linux/smp_lock.h> |
11 | #include <linux/interrupt.h> | 11 | #include <linux/interrupt.h> |
12 | #include <linux/kprobes.h> | ||
12 | 13 | ||
13 | #include <asm/pgtable.h> | 14 | #include <asm/pgtable.h> |
14 | #include <asm/processor.h> | 15 | #include <asm/processor.h> |
@@ -76,7 +77,7 @@ mapped_kernel_page_is_present (unsigned long address) | |||
76 | return pte_present(pte); | 77 | return pte_present(pte); |
77 | } | 78 | } |
78 | 79 | ||
79 | void | 80 | void __kprobes |
80 | ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs) | 81 | ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs) |
81 | { | 82 | { |
82 | int signal = SIGSEGV, code = SEGV_MAPERR; | 83 | int signal = SIGSEGV, code = SEGV_MAPERR; |
@@ -229,9 +230,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re | |||
229 | return; | 230 | return; |
230 | } | 231 | } |
231 | 232 | ||
232 | if (ia64_done_with_exception(regs)) | ||
233 | return; | ||
234 | |||
235 | /* | 233 | /* |
236 | * Since we have no vma's for region 5, we might get here even if the address is | 234 | * Since we have no vma's for region 5, we might get here even if the address is |
237 | * valid, due to the VHPT walker inserting a non present translation that becomes | 235 | * valid, due to the VHPT walker inserting a non present translation that becomes |
@@ -242,6 +240,9 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re | |||
242 | if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address)) | 240 | if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address)) |
243 | return; | 241 | return; |
244 | 242 | ||
243 | if (ia64_done_with_exception(regs)) | ||
244 | return; | ||
245 | |||
245 | /* | 246 | /* |
246 | * Oops. The kernel tried to access some bad page. We'll have to terminate things | 247 | * Oops. The kernel tried to access some bad page. We'll have to terminate things |
247 | * with extreme prejudice. | 248 | * with extreme prejudice. |
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index e0a776a3044c..2d13889d0a99 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c | |||
@@ -76,7 +76,7 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len) | |||
76 | return -EINVAL; | 76 | return -EINVAL; |
77 | if (addr & ~HPAGE_MASK) | 77 | if (addr & ~HPAGE_MASK) |
78 | return -EINVAL; | 78 | return -EINVAL; |
79 | if (REGION_NUMBER(addr) != REGION_HPAGE) | 79 | if (REGION_NUMBER(addr) != RGN_HPAGE) |
80 | return -EINVAL; | 80 | return -EINVAL; |
81 | 81 | ||
82 | return 0; | 82 | return 0; |
@@ -87,7 +87,7 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int writ | |||
87 | struct page *page; | 87 | struct page *page; |
88 | pte_t *ptep; | 88 | pte_t *ptep; |
89 | 89 | ||
90 | if (REGION_NUMBER(addr) != REGION_HPAGE) | 90 | if (REGION_NUMBER(addr) != RGN_HPAGE) |
91 | return ERR_PTR(-EINVAL); | 91 | return ERR_PTR(-EINVAL); |
92 | 92 | ||
93 | ptep = huge_pte_offset(mm, addr); | 93 | ptep = huge_pte_offset(mm, addr); |
@@ -142,8 +142,8 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u | |||
142 | return -ENOMEM; | 142 | return -ENOMEM; |
143 | if (len & ~HPAGE_MASK) | 143 | if (len & ~HPAGE_MASK) |
144 | return -EINVAL; | 144 | return -EINVAL; |
145 | /* This code assumes that REGION_HPAGE != 0. */ | 145 | /* This code assumes that RGN_HPAGE != 0. */ |
146 | if ((REGION_NUMBER(addr) != REGION_HPAGE) || (addr & (HPAGE_SIZE - 1))) | 146 | if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1))) |
147 | addr = HPAGE_REGION_BASE; | 147 | addr = HPAGE_REGION_BASE; |
148 | else | 148 | else |
149 | addr = ALIGN(addr, HPAGE_SIZE); | 149 | addr = ALIGN(addr, HPAGE_SIZE); |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 65f9958db9f0..1281c609ee98 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -382,13 +382,22 @@ ia64_mmu_init (void *my_cpu_data) | |||
382 | 382 | ||
383 | if (impl_va_bits < 51 || impl_va_bits > 61) | 383 | if (impl_va_bits < 51 || impl_va_bits > 61) |
384 | panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1); | 384 | panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1); |
385 | /* | ||
386 | * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need, | ||
387 | * which must fit into "vmlpt_bits - pte_bits" slots. Second half of | ||
388 | * the test makes sure that our mapped space doesn't overlap the | ||
389 | * unimplemented hole in the middle of the region. | ||
390 | */ | ||
391 | if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) || | ||
392 | (mapped_space_bits > impl_va_bits - 1)) | ||
393 | panic("Cannot build a big enough virtual-linear page table" | ||
394 | " to cover mapped address space.\n" | ||
395 | " Try using a smaller page size.\n"); | ||
396 | |||
385 | 397 | ||
386 | /* place the VMLPT at the end of each page-table mapped region: */ | 398 | /* place the VMLPT at the end of each page-table mapped region: */ |
387 | pta = POW2(61) - POW2(vmlpt_bits); | 399 | pta = POW2(61) - POW2(vmlpt_bits); |
388 | 400 | ||
389 | if (POW2(mapped_space_bits) >= pta) | ||
390 | panic("mm/init: overlap between virtually mapped linear page table and " | ||
391 | "mapped kernel space!"); | ||
392 | /* | 401 | /* |
393 | * Set the (virtually mapped linear) page table address. Bit | 402 | * Set the (virtually mapped linear) page table address. Bit |
394 | * 8 selects between the short and long format, bits 2-7 the | 403 | * 8 selects between the short and long format, bits 2-7 the |
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index f9472c50ab42..9b5de589b82f 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c | |||
@@ -24,7 +24,6 @@ | |||
24 | 24 | ||
25 | #include <asm/machvec.h> | 25 | #include <asm/machvec.h> |
26 | #include <asm/page.h> | 26 | #include <asm/page.h> |
27 | #include <asm/segment.h> | ||
28 | #include <asm/system.h> | 27 | #include <asm/system.h> |
29 | #include <asm/io.h> | 28 | #include <asm/io.h> |
30 | #include <asm/sal.h> | 29 | #include <asm/sal.h> |
@@ -499,13 +498,11 @@ pcibios_enable_device (struct pci_dev *dev, int mask) | |||
499 | return acpi_pci_irq_enable(dev); | 498 | return acpi_pci_irq_enable(dev); |
500 | } | 499 | } |
501 | 500 | ||
502 | #ifdef CONFIG_ACPI_DEALLOCATE_IRQ | ||
503 | void | 501 | void |
504 | pcibios_disable_device (struct pci_dev *dev) | 502 | pcibios_disable_device (struct pci_dev *dev) |
505 | { | 503 | { |
506 | acpi_pci_irq_disable(dev); | 504 | acpi_pci_irq_disable(dev); |
507 | } | 505 | } |
508 | #endif /* CONFIG_ACPI_DEALLOCATE_IRQ */ | ||
509 | 506 | ||
510 | void | 507 | void |
511 | pcibios_align_resource (void *data, struct resource *res, | 508 | pcibios_align_resource (void *data, struct resource *res, |
diff --git a/arch/ia64/sn/include/tio.h b/arch/ia64/sn/include/tio.h index 0139124dd54a..6b2e7b75eb19 100644 --- a/arch/ia64/sn/include/tio.h +++ b/arch/ia64/sn/include/tio.h | |||
@@ -3,7 +3,7 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved. | 6 | * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #ifndef _ASM_IA64_SN_TIO_H | 9 | #ifndef _ASM_IA64_SN_TIO_H |
@@ -26,6 +26,10 @@ | |||
26 | #define TIO_ITTE_VALID_MASK 0x1 | 26 | #define TIO_ITTE_VALID_MASK 0x1 |
27 | #define TIO_ITTE_VALID_SHIFT 16 | 27 | #define TIO_ITTE_VALID_SHIFT 16 |
28 | 28 | ||
29 | #define TIO_ITTE_WIDGET(itte) \ | ||
30 | (((itte) >> TIO_ITTE_WIDGET_SHIFT) & TIO_ITTE_WIDGET_MASK) | ||
31 | #define TIO_ITTE_VALID(itte) \ | ||
32 | (((itte) >> TIO_ITTE_VALID_SHIFT) & TIO_ITTE_VALID_MASK) | ||
29 | 33 | ||
30 | #define TIO_ITTE_PUT(nasid, bigwin, widget, addr, valid) \ | 34 | #define TIO_ITTE_PUT(nasid, bigwin, widget, addr, valid) \ |
31 | REMOTE_HUB_S((nasid), TIO_ITTE(bigwin), \ | 35 | REMOTE_HUB_S((nasid), TIO_ITTE(bigwin), \ |
diff --git a/arch/ia64/sn/include/xtalk/hubdev.h b/arch/ia64/sn/include/xtalk/hubdev.h index 580a1c0403a7..71c2b271b4c6 100644 --- a/arch/ia64/sn/include/xtalk/hubdev.h +++ b/arch/ia64/sn/include/xtalk/hubdev.h | |||
@@ -3,7 +3,7 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved. | 6 | * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved. |
7 | */ | 7 | */ |
8 | #ifndef _ASM_IA64_SN_XTALK_HUBDEV_H | 8 | #ifndef _ASM_IA64_SN_XTALK_HUBDEV_H |
9 | #define _ASM_IA64_SN_XTALK_HUBDEV_H | 9 | #define _ASM_IA64_SN_XTALK_HUBDEV_H |
@@ -16,6 +16,9 @@ | |||
16 | #define IIO_ITTE_WIDGET_MASK ((1<<IIO_ITTE_WIDGET_BITS)-1) | 16 | #define IIO_ITTE_WIDGET_MASK ((1<<IIO_ITTE_WIDGET_BITS)-1) |
17 | #define IIO_ITTE_WIDGET_SHIFT 8 | 17 | #define IIO_ITTE_WIDGET_SHIFT 8 |
18 | 18 | ||
19 | #define IIO_ITTE_WIDGET(itte) \ | ||
20 | (((itte) >> IIO_ITTE_WIDGET_SHIFT) & IIO_ITTE_WIDGET_MASK) | ||
21 | |||
19 | /* | 22 | /* |
20 | * Use the top big window as a surrogate for the first small window | 23 | * Use the top big window as a surrogate for the first small window |
21 | */ | 24 | */ |
@@ -34,7 +37,8 @@ struct sn_flush_device_list { | |||
34 | unsigned long sfdl_force_int_addr; | 37 | unsigned long sfdl_force_int_addr; |
35 | unsigned long sfdl_flush_value; | 38 | unsigned long sfdl_flush_value; |
36 | volatile unsigned long *sfdl_flush_addr; | 39 | volatile unsigned long *sfdl_flush_addr; |
37 | uint64_t sfdl_persistent_busnum; | 40 | uint32_t sfdl_persistent_busnum; |
41 | uint32_t sfdl_persistent_segment; | ||
38 | struct pcibus_info *sfdl_pcibus_info; | 42 | struct pcibus_info *sfdl_pcibus_info; |
39 | spinlock_t sfdl_flush_lock; | 43 | spinlock_t sfdl_flush_lock; |
40 | }; | 44 | }; |
@@ -58,7 +62,8 @@ struct hubdev_info { | |||
58 | 62 | ||
59 | void *hdi_nodepda; | 63 | void *hdi_nodepda; |
60 | void *hdi_node_vertex; | 64 | void *hdi_node_vertex; |
61 | void *hdi_xtalk_vertex; | 65 | uint32_t max_segment_number; |
66 | uint32_t max_pcibus_number; | ||
62 | }; | 67 | }; |
63 | 68 | ||
64 | extern void hubdev_init_node(nodepda_t *, cnodeid_t); | 69 | extern void hubdev_init_node(nodepda_t *, cnodeid_t); |
diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c index 647deae9bfcd..45854c637e9c 100644 --- a/arch/ia64/sn/kernel/bte.c +++ b/arch/ia64/sn/kernel/bte.c | |||
@@ -29,16 +29,30 @@ | |||
29 | 29 | ||
30 | /* two interfaces on two btes */ | 30 | /* two interfaces on two btes */ |
31 | #define MAX_INTERFACES_TO_TRY 4 | 31 | #define MAX_INTERFACES_TO_TRY 4 |
32 | #define MAX_NODES_TO_TRY 2 | ||
32 | 33 | ||
33 | static struct bteinfo_s *bte_if_on_node(nasid_t nasid, int interface) | 34 | static struct bteinfo_s *bte_if_on_node(nasid_t nasid, int interface) |
34 | { | 35 | { |
35 | nodepda_t *tmp_nodepda; | 36 | nodepda_t *tmp_nodepda; |
36 | 37 | ||
38 | if (nasid_to_cnodeid(nasid) == -1) | ||
39 | return (struct bteinfo_s *)NULL;; | ||
40 | |||
37 | tmp_nodepda = NODEPDA(nasid_to_cnodeid(nasid)); | 41 | tmp_nodepda = NODEPDA(nasid_to_cnodeid(nasid)); |
38 | return &tmp_nodepda->bte_if[interface]; | 42 | return &tmp_nodepda->bte_if[interface]; |
39 | 43 | ||
40 | } | 44 | } |
41 | 45 | ||
46 | static inline void bte_start_transfer(struct bteinfo_s *bte, u64 len, u64 mode) | ||
47 | { | ||
48 | if (is_shub2()) { | ||
49 | BTE_CTRL_STORE(bte, (IBLS_BUSY | ((len) | (mode) << 24))); | ||
50 | } else { | ||
51 | BTE_LNSTAT_STORE(bte, len); | ||
52 | BTE_CTRL_STORE(bte, mode); | ||
53 | } | ||
54 | } | ||
55 | |||
42 | /************************************************************************ | 56 | /************************************************************************ |
43 | * Block Transfer Engine copy related functions. | 57 | * Block Transfer Engine copy related functions. |
44 | * | 58 | * |
@@ -67,13 +81,15 @@ bte_result_t bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification) | |||
67 | { | 81 | { |
68 | u64 transfer_size; | 82 | u64 transfer_size; |
69 | u64 transfer_stat; | 83 | u64 transfer_stat; |
84 | u64 notif_phys_addr; | ||
70 | struct bteinfo_s *bte; | 85 | struct bteinfo_s *bte; |
71 | bte_result_t bte_status; | 86 | bte_result_t bte_status; |
72 | unsigned long irq_flags; | 87 | unsigned long irq_flags; |
73 | unsigned long itc_end = 0; | 88 | unsigned long itc_end = 0; |
74 | struct bteinfo_s *btes_to_try[MAX_INTERFACES_TO_TRY]; | 89 | int nasid_to_try[MAX_NODES_TO_TRY]; |
75 | int bte_if_index; | 90 | int my_nasid = get_nasid(); |
76 | int bte_pri, bte_sec; | 91 | int bte_if_index, nasid_index; |
92 | int bte_first, btes_per_node = BTES_PER_NODE; | ||
77 | 93 | ||
78 | BTE_PRINTK(("bte_copy(0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%p)\n", | 94 | BTE_PRINTK(("bte_copy(0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%p)\n", |
79 | src, dest, len, mode, notification)); | 95 | src, dest, len, mode, notification)); |
@@ -86,36 +102,26 @@ bte_result_t bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification) | |||
86 | (src & L1_CACHE_MASK) || (dest & L1_CACHE_MASK)); | 102 | (src & L1_CACHE_MASK) || (dest & L1_CACHE_MASK)); |
87 | BUG_ON(!(len < ((BTE_LEN_MASK + 1) << L1_CACHE_SHIFT))); | 103 | BUG_ON(!(len < ((BTE_LEN_MASK + 1) << L1_CACHE_SHIFT))); |
88 | 104 | ||
89 | /* CPU 0 (per node) tries bte0 first, CPU 1 try bte1 first */ | 105 | /* |
90 | if (cpuid_to_subnode(smp_processor_id()) == 0) { | 106 | * Start with interface corresponding to cpu number |
91 | bte_pri = 0; | 107 | */ |
92 | bte_sec = 1; | 108 | bte_first = raw_smp_processor_id() % btes_per_node; |
93 | } else { | ||
94 | bte_pri = 1; | ||
95 | bte_sec = 0; | ||
96 | } | ||
97 | 109 | ||
98 | if (mode & BTE_USE_DEST) { | 110 | if (mode & BTE_USE_DEST) { |
99 | /* try remote then local */ | 111 | /* try remote then local */ |
100 | btes_to_try[0] = bte_if_on_node(NASID_GET(dest), bte_pri); | 112 | nasid_to_try[0] = NASID_GET(dest); |
101 | btes_to_try[1] = bte_if_on_node(NASID_GET(dest), bte_sec); | ||
102 | if (mode & BTE_USE_ANY) { | 113 | if (mode & BTE_USE_ANY) { |
103 | btes_to_try[2] = bte_if_on_node(get_nasid(), bte_pri); | 114 | nasid_to_try[1] = my_nasid; |
104 | btes_to_try[3] = bte_if_on_node(get_nasid(), bte_sec); | ||
105 | } else { | 115 | } else { |
106 | btes_to_try[2] = NULL; | 116 | nasid_to_try[1] = (int)NULL; |
107 | btes_to_try[3] = NULL; | ||
108 | } | 117 | } |
109 | } else { | 118 | } else { |
110 | /* try local then remote */ | 119 | /* try local then remote */ |
111 | btes_to_try[0] = bte_if_on_node(get_nasid(), bte_pri); | 120 | nasid_to_try[0] = my_nasid; |
112 | btes_to_try[1] = bte_if_on_node(get_nasid(), bte_sec); | ||
113 | if (mode & BTE_USE_ANY) { | 121 | if (mode & BTE_USE_ANY) { |
114 | btes_to_try[2] = bte_if_on_node(NASID_GET(dest), bte_pri); | 122 | nasid_to_try[1] = NASID_GET(dest); |
115 | btes_to_try[3] = bte_if_on_node(NASID_GET(dest), bte_sec); | ||
116 | } else { | 123 | } else { |
117 | btes_to_try[2] = NULL; | 124 | nasid_to_try[1] = (int)NULL; |
118 | btes_to_try[3] = NULL; | ||
119 | } | 125 | } |
120 | } | 126 | } |
121 | 127 | ||
@@ -123,11 +129,12 @@ retry_bteop: | |||
123 | do { | 129 | do { |
124 | local_irq_save(irq_flags); | 130 | local_irq_save(irq_flags); |
125 | 131 | ||
126 | bte_if_index = 0; | 132 | bte_if_index = bte_first; |
133 | nasid_index = 0; | ||
127 | 134 | ||
128 | /* Attempt to lock one of the BTE interfaces. */ | 135 | /* Attempt to lock one of the BTE interfaces. */ |
129 | while (bte_if_index < MAX_INTERFACES_TO_TRY) { | 136 | while (nasid_index < MAX_NODES_TO_TRY) { |
130 | bte = btes_to_try[bte_if_index++]; | 137 | bte = bte_if_on_node(nasid_to_try[nasid_index],bte_if_index); |
131 | 138 | ||
132 | if (bte == NULL) { | 139 | if (bte == NULL) { |
133 | continue; | 140 | continue; |
@@ -143,6 +150,15 @@ retry_bteop: | |||
143 | break; | 150 | break; |
144 | } | 151 | } |
145 | } | 152 | } |
153 | |||
154 | bte_if_index = (bte_if_index + 1) % btes_per_node; /* Next interface */ | ||
155 | if (bte_if_index == bte_first) { | ||
156 | /* | ||
157 | * We've tried all interfaces on this node | ||
158 | */ | ||
159 | nasid_index++; | ||
160 | } | ||
161 | |||
146 | bte = NULL; | 162 | bte = NULL; |
147 | } | 163 | } |
148 | 164 | ||
@@ -169,7 +185,13 @@ retry_bteop: | |||
169 | 185 | ||
170 | /* Initialize the notification to a known value. */ | 186 | /* Initialize the notification to a known value. */ |
171 | *bte->most_rcnt_na = BTE_WORD_BUSY; | 187 | *bte->most_rcnt_na = BTE_WORD_BUSY; |
188 | notif_phys_addr = TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na)); | ||
172 | 189 | ||
190 | if (is_shub2()) { | ||
191 | src = SH2_TIO_PHYS_TO_DMA(src); | ||
192 | dest = SH2_TIO_PHYS_TO_DMA(dest); | ||
193 | notif_phys_addr = SH2_TIO_PHYS_TO_DMA(notif_phys_addr); | ||
194 | } | ||
173 | /* Set the source and destination registers */ | 195 | /* Set the source and destination registers */ |
174 | BTE_PRINTKV(("IBSA = 0x%lx)\n", (TO_PHYS(src)))); | 196 | BTE_PRINTKV(("IBSA = 0x%lx)\n", (TO_PHYS(src)))); |
175 | BTE_SRC_STORE(bte, TO_PHYS(src)); | 197 | BTE_SRC_STORE(bte, TO_PHYS(src)); |
@@ -177,14 +199,12 @@ retry_bteop: | |||
177 | BTE_DEST_STORE(bte, TO_PHYS(dest)); | 199 | BTE_DEST_STORE(bte, TO_PHYS(dest)); |
178 | 200 | ||
179 | /* Set the notification register */ | 201 | /* Set the notification register */ |
180 | BTE_PRINTKV(("IBNA = 0x%lx)\n", | 202 | BTE_PRINTKV(("IBNA = 0x%lx)\n", notif_phys_addr)); |
181 | TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na)))); | 203 | BTE_NOTIF_STORE(bte, notif_phys_addr); |
182 | BTE_NOTIF_STORE(bte, | ||
183 | TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na))); | ||
184 | 204 | ||
185 | /* Initiate the transfer */ | 205 | /* Initiate the transfer */ |
186 | BTE_PRINTK(("IBCT = 0x%lx)\n", BTE_VALID_MODE(mode))); | 206 | BTE_PRINTK(("IBCT = 0x%lx)\n", BTE_VALID_MODE(mode))); |
187 | BTE_START_TRANSFER(bte, transfer_size, BTE_VALID_MODE(mode)); | 207 | bte_start_transfer(bte, transfer_size, BTE_VALID_MODE(mode)); |
188 | 208 | ||
189 | itc_end = ia64_get_itc() + (40000000 * local_cpu_data->cyc_per_usec); | 209 | itc_end = ia64_get_itc() + (40000000 * local_cpu_data->cyc_per_usec); |
190 | 210 | ||
@@ -195,6 +215,7 @@ retry_bteop: | |||
195 | } | 215 | } |
196 | 216 | ||
197 | while ((transfer_stat = *bte->most_rcnt_na) == BTE_WORD_BUSY) { | 217 | while ((transfer_stat = *bte->most_rcnt_na) == BTE_WORD_BUSY) { |
218 | cpu_relax(); | ||
198 | if (ia64_get_itc() > itc_end) { | 219 | if (ia64_get_itc() > itc_end) { |
199 | BTE_PRINTK(("BTE timeout nasid 0x%x bte%d IBLS = 0x%lx na 0x%lx\n", | 220 | BTE_PRINTK(("BTE timeout nasid 0x%x bte%d IBLS = 0x%lx na 0x%lx\n", |
200 | NASID_GET(bte->bte_base_addr), bte->bte_num, | 221 | NASID_GET(bte->bte_base_addr), bte->bte_num, |
diff --git a/arch/ia64/sn/kernel/huberror.c b/arch/ia64/sn/kernel/huberror.c index 5c39b43ba3c0..5c5eb01c50f0 100644 --- a/arch/ia64/sn/kernel/huberror.c +++ b/arch/ia64/sn/kernel/huberror.c | |||
@@ -76,7 +76,7 @@ void hubiio_crb_free(struct hubdev_info *hubdev_info, int crbnum) | |||
76 | */ | 76 | */ |
77 | REMOTE_HUB_S(hubdev_info->hdi_nasid, IIO_ICDR, (IIO_ICDR_PND | crbnum)); | 77 | REMOTE_HUB_S(hubdev_info->hdi_nasid, IIO_ICDR, (IIO_ICDR_PND | crbnum)); |
78 | while (REMOTE_HUB_L(hubdev_info->hdi_nasid, IIO_ICDR) & IIO_ICDR_PND) | 78 | while (REMOTE_HUB_L(hubdev_info->hdi_nasid, IIO_ICDR) & IIO_ICDR_PND) |
79 | udelay(1); | 79 | cpu_relax(); |
80 | 80 | ||
81 | } | 81 | } |
82 | 82 | ||
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c index 414cdf2e3c96..906622d9f933 100644 --- a/arch/ia64/sn/kernel/io_init.c +++ b/arch/ia64/sn/kernel/io_init.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <asm/sn/simulator.h> | 18 | #include <asm/sn/simulator.h> |
19 | #include <asm/sn/sn_sal.h> | 19 | #include <asm/sn/sn_sal.h> |
20 | #include <asm/sn/tioca_provider.h> | 20 | #include <asm/sn/tioca_provider.h> |
21 | #include <asm/sn/tioce_provider.h> | ||
21 | #include "xtalk/hubdev.h" | 22 | #include "xtalk/hubdev.h" |
22 | #include "xtalk/xwidgetdev.h" | 23 | #include "xtalk/xwidgetdev.h" |
23 | 24 | ||
@@ -44,6 +45,9 @@ int sn_ioif_inited = 0; /* SN I/O infrastructure initialized? */ | |||
44 | 45 | ||
45 | struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */ | 46 | struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */ |
46 | 47 | ||
48 | static int max_segment_number = 0; /* Default highest segment number */ | ||
49 | static int max_pcibus_number = 255; /* Default highest pci bus number */ | ||
50 | |||
47 | /* | 51 | /* |
48 | * Hooks and struct for unsupported pci providers | 52 | * Hooks and struct for unsupported pci providers |
49 | */ | 53 | */ |
@@ -157,13 +161,28 @@ static void sn_fixup_ionodes(void) | |||
157 | uint64_t nasid; | 161 | uint64_t nasid; |
158 | int i, widget; | 162 | int i, widget; |
159 | 163 | ||
164 | /* | ||
165 | * Get SGI Specific HUB chipset information. | ||
166 | * Inform Prom that this kernel can support domain bus numbering. | ||
167 | */ | ||
160 | for (i = 0; i < numionodes; i++) { | 168 | for (i = 0; i < numionodes; i++) { |
161 | hubdev = (struct hubdev_info *)(NODEPDA(i)->pdinfo); | 169 | hubdev = (struct hubdev_info *)(NODEPDA(i)->pdinfo); |
162 | nasid = cnodeid_to_nasid(i); | 170 | nasid = cnodeid_to_nasid(i); |
171 | hubdev->max_segment_number = 0xffffffff; | ||
172 | hubdev->max_pcibus_number = 0xff; | ||
163 | status = sal_get_hubdev_info(nasid, (uint64_t) __pa(hubdev)); | 173 | status = sal_get_hubdev_info(nasid, (uint64_t) __pa(hubdev)); |
164 | if (status) | 174 | if (status) |
165 | continue; | 175 | continue; |
166 | 176 | ||
177 | /* Save the largest Domain and pcibus numbers found. */ | ||
178 | if (hubdev->max_segment_number) { | ||
179 | /* | ||
180 | * Dealing with a Prom that supports segments. | ||
181 | */ | ||
182 | max_segment_number = hubdev->max_segment_number; | ||
183 | max_pcibus_number = hubdev->max_pcibus_number; | ||
184 | } | ||
185 | |||
167 | /* Attach the error interrupt handlers */ | 186 | /* Attach the error interrupt handlers */ |
168 | if (nasid & 1) | 187 | if (nasid & 1) |
169 | ice_error_init(hubdev); | 188 | ice_error_init(hubdev); |
@@ -230,7 +249,7 @@ void sn_pci_unfixup_slot(struct pci_dev *dev) | |||
230 | void sn_pci_fixup_slot(struct pci_dev *dev) | 249 | void sn_pci_fixup_slot(struct pci_dev *dev) |
231 | { | 250 | { |
232 | int idx; | 251 | int idx; |
233 | int segment = 0; | 252 | int segment = pci_domain_nr(dev->bus); |
234 | int status = 0; | 253 | int status = 0; |
235 | struct pcibus_bussoft *bs; | 254 | struct pcibus_bussoft *bs; |
236 | struct pci_bus *host_pci_bus; | 255 | struct pci_bus *host_pci_bus; |
@@ -283,9 +302,9 @@ void sn_pci_fixup_slot(struct pci_dev *dev) | |||
283 | * PCI host_pci_dev struct and set up host bus linkages | 302 | * PCI host_pci_dev struct and set up host bus linkages |
284 | */ | 303 | */ |
285 | 304 | ||
286 | bus_no = SN_PCIDEV_INFO(dev)->pdi_slot_host_handle >> 32; | 305 | bus_no = (SN_PCIDEV_INFO(dev)->pdi_slot_host_handle >> 32) & 0xff; |
287 | devfn = SN_PCIDEV_INFO(dev)->pdi_slot_host_handle & 0xffffffff; | 306 | devfn = SN_PCIDEV_INFO(dev)->pdi_slot_host_handle & 0xffffffff; |
288 | host_pci_bus = pci_find_bus(pci_domain_nr(dev->bus), bus_no); | 307 | host_pci_bus = pci_find_bus(segment, bus_no); |
289 | host_pci_dev = pci_get_slot(host_pci_bus, devfn); | 308 | host_pci_dev = pci_get_slot(host_pci_bus, devfn); |
290 | 309 | ||
291 | SN_PCIDEV_INFO(dev)->host_pci_dev = host_pci_dev; | 310 | SN_PCIDEV_INFO(dev)->host_pci_dev = host_pci_dev; |
@@ -333,6 +352,7 @@ void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus) | |||
333 | prom_bussoft_ptr = __va(prom_bussoft_ptr); | 352 | prom_bussoft_ptr = __va(prom_bussoft_ptr); |
334 | 353 | ||
335 | controller = kcalloc(1,sizeof(struct pci_controller), GFP_KERNEL); | 354 | controller = kcalloc(1,sizeof(struct pci_controller), GFP_KERNEL); |
355 | controller->segment = segment; | ||
336 | if (!controller) | 356 | if (!controller) |
337 | BUG(); | 357 | BUG(); |
338 | 358 | ||
@@ -390,7 +410,7 @@ void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus) | |||
390 | if (controller->node >= num_online_nodes()) { | 410 | if (controller->node >= num_online_nodes()) { |
391 | struct pcibus_bussoft *b = SN_PCIBUS_BUSSOFT(bus); | 411 | struct pcibus_bussoft *b = SN_PCIBUS_BUSSOFT(bus); |
392 | 412 | ||
393 | printk(KERN_WARNING "Device ASIC=%u XID=%u PBUSNUM=%lu" | 413 | printk(KERN_WARNING "Device ASIC=%u XID=%u PBUSNUM=%u" |
394 | "L_IO=%lx L_MEM=%lx BASE=%lx\n", | 414 | "L_IO=%lx L_MEM=%lx BASE=%lx\n", |
395 | b->bs_asic_type, b->bs_xid, b->bs_persist_busnum, | 415 | b->bs_asic_type, b->bs_xid, b->bs_persist_busnum, |
396 | b->bs_legacy_io, b->bs_legacy_mem, b->bs_base); | 416 | b->bs_legacy_io, b->bs_legacy_mem, b->bs_base); |
@@ -411,7 +431,7 @@ void sn_bus_store_sysdata(struct pci_dev *dev) | |||
411 | { | 431 | { |
412 | struct sysdata_el *element; | 432 | struct sysdata_el *element; |
413 | 433 | ||
414 | element = kcalloc(1, sizeof(struct sysdata_el), GFP_KERNEL); | 434 | element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL); |
415 | if (!element) { | 435 | if (!element) { |
416 | dev_dbg(dev, "%s: out of memory!\n", __FUNCTION__); | 436 | dev_dbg(dev, "%s: out of memory!\n", __FUNCTION__); |
417 | return; | 437 | return; |
@@ -445,6 +465,7 @@ sn_sysdata_free_start: | |||
445 | static int __init sn_pci_init(void) | 465 | static int __init sn_pci_init(void) |
446 | { | 466 | { |
447 | int i = 0; | 467 | int i = 0; |
468 | int j = 0; | ||
448 | struct pci_dev *pci_dev = NULL; | 469 | struct pci_dev *pci_dev = NULL; |
449 | extern void sn_init_cpei_timer(void); | 470 | extern void sn_init_cpei_timer(void); |
450 | #ifdef CONFIG_PROC_FS | 471 | #ifdef CONFIG_PROC_FS |
@@ -464,6 +485,7 @@ static int __init sn_pci_init(void) | |||
464 | 485 | ||
465 | pcibr_init_provider(); | 486 | pcibr_init_provider(); |
466 | tioca_init_provider(); | 487 | tioca_init_provider(); |
488 | tioce_init_provider(); | ||
467 | 489 | ||
468 | /* | 490 | /* |
469 | * This is needed to avoid bounce limit checks in the blk layer | 491 | * This is needed to avoid bounce limit checks in the blk layer |
@@ -479,8 +501,9 @@ static int __init sn_pci_init(void) | |||
479 | #endif | 501 | #endif |
480 | 502 | ||
481 | /* busses are not known yet ... */ | 503 | /* busses are not known yet ... */ |
482 | for (i = 0; i < PCI_BUSES_TO_SCAN; i++) | 504 | for (i = 0; i <= max_segment_number; i++) |
483 | sn_pci_controller_fixup(0, i, NULL); | 505 | for (j = 0; j <= max_pcibus_number; j++) |
506 | sn_pci_controller_fixup(i, j, NULL); | ||
484 | 507 | ||
485 | /* | 508 | /* |
486 | * Generic Linux PCI Layer has created the pci_bus and pci_dev | 509 | * Generic Linux PCI Layer has created the pci_bus and pci_dev |
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index 84d276a14ecb..01d18b7b5bb3 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * License. See the file "COPYING" in the main directory of this archive | 5 | * License. See the file "COPYING" in the main directory of this archive |
6 | * for more details. | 6 | * for more details. |
7 | * | 7 | * |
8 | * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved. | 8 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/irq.h> | 11 | #include <linux/irq.h> |
@@ -23,7 +23,7 @@ static void force_interrupt(int irq); | |||
23 | static void register_intr_pda(struct sn_irq_info *sn_irq_info); | 23 | static void register_intr_pda(struct sn_irq_info *sn_irq_info); |
24 | static void unregister_intr_pda(struct sn_irq_info *sn_irq_info); | 24 | static void unregister_intr_pda(struct sn_irq_info *sn_irq_info); |
25 | 25 | ||
26 | extern int sn_force_interrupt_flag; | 26 | int sn_force_interrupt_flag = 1; |
27 | extern int sn_ioif_inited; | 27 | extern int sn_ioif_inited; |
28 | static struct list_head **sn_irq_lh; | 28 | static struct list_head **sn_irq_lh; |
29 | static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */ | 29 | static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */ |
@@ -76,16 +76,14 @@ static void sn_enable_irq(unsigned int irq) | |||
76 | 76 | ||
77 | static void sn_ack_irq(unsigned int irq) | 77 | static void sn_ack_irq(unsigned int irq) |
78 | { | 78 | { |
79 | uint64_t event_occurred, mask = 0; | 79 | u64 event_occurred, mask = 0; |
80 | int nasid; | ||
81 | 80 | ||
82 | irq = irq & 0xff; | 81 | irq = irq & 0xff; |
83 | nasid = get_nasid(); | ||
84 | event_occurred = | 82 | event_occurred = |
85 | HUB_L((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED)); | 83 | HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)); |
86 | mask = event_occurred & SH_ALL_INT_MASK; | 84 | mask = event_occurred & SH_ALL_INT_MASK; |
87 | HUB_S((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS), | 85 | HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), |
88 | mask); | 86 | mask); |
89 | __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs); | 87 | __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs); |
90 | 88 | ||
91 | move_irq(irq); | 89 | move_irq(irq); |
@@ -93,15 +91,12 @@ static void sn_ack_irq(unsigned int irq) | |||
93 | 91 | ||
94 | static void sn_end_irq(unsigned int irq) | 92 | static void sn_end_irq(unsigned int irq) |
95 | { | 93 | { |
96 | int nasid; | ||
97 | int ivec; | 94 | int ivec; |
98 | uint64_t event_occurred; | 95 | u64 event_occurred; |
99 | 96 | ||
100 | ivec = irq & 0xff; | 97 | ivec = irq & 0xff; |
101 | if (ivec == SGI_UART_VECTOR) { | 98 | if (ivec == SGI_UART_VECTOR) { |
102 | nasid = get_nasid(); | 99 | event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR (SH_EVENT_OCCURRED)); |
103 | event_occurred = HUB_L((uint64_t *) GLOBAL_MMR_ADDR | ||
104 | (nasid, SH_EVENT_OCCURRED)); | ||
105 | /* If the UART bit is set here, we may have received an | 100 | /* If the UART bit is set here, we may have received an |
106 | * interrupt from the UART that the driver missed. To | 101 | * interrupt from the UART that the driver missed. To |
107 | * make sure, we IPI ourselves to force us to look again. | 102 | * make sure, we IPI ourselves to force us to look again. |
@@ -132,6 +127,7 @@ static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) | |||
132 | int local_widget, status; | 127 | int local_widget, status; |
133 | nasid_t local_nasid; | 128 | nasid_t local_nasid; |
134 | struct sn_irq_info *new_irq_info; | 129 | struct sn_irq_info *new_irq_info; |
130 | struct sn_pcibus_provider *pci_provider; | ||
135 | 131 | ||
136 | new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC); | 132 | new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC); |
137 | if (new_irq_info == NULL) | 133 | if (new_irq_info == NULL) |
@@ -171,8 +167,9 @@ static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) | |||
171 | new_irq_info->irq_cpuid = cpuid; | 167 | new_irq_info->irq_cpuid = cpuid; |
172 | register_intr_pda(new_irq_info); | 168 | register_intr_pda(new_irq_info); |
173 | 169 | ||
174 | if (IS_PCI_BRIDGE_ASIC(new_irq_info->irq_bridge_type)) | 170 | pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type]; |
175 | pcibr_change_devices_irq(new_irq_info); | 171 | if (pci_provider && pci_provider->target_interrupt) |
172 | (pci_provider->target_interrupt)(new_irq_info); | ||
176 | 173 | ||
177 | spin_lock(&sn_irq_info_lock); | 174 | spin_lock(&sn_irq_info_lock); |
178 | list_replace_rcu(&sn_irq_info->list, &new_irq_info->list); | 175 | list_replace_rcu(&sn_irq_info->list, &new_irq_info->list); |
@@ -317,6 +314,16 @@ void sn_irq_unfixup(struct pci_dev *pci_dev) | |||
317 | pci_dev_put(pci_dev); | 314 | pci_dev_put(pci_dev); |
318 | } | 315 | } |
319 | 316 | ||
317 | static inline void | ||
318 | sn_call_force_intr_provider(struct sn_irq_info *sn_irq_info) | ||
319 | { | ||
320 | struct sn_pcibus_provider *pci_provider; | ||
321 | |||
322 | pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type]; | ||
323 | if (pci_provider && pci_provider->force_interrupt) | ||
324 | (*pci_provider->force_interrupt)(sn_irq_info); | ||
325 | } | ||
326 | |||
320 | static void force_interrupt(int irq) | 327 | static void force_interrupt(int irq) |
321 | { | 328 | { |
322 | struct sn_irq_info *sn_irq_info; | 329 | struct sn_irq_info *sn_irq_info; |
@@ -325,11 +332,9 @@ static void force_interrupt(int irq) | |||
325 | return; | 332 | return; |
326 | 333 | ||
327 | rcu_read_lock(); | 334 | rcu_read_lock(); |
328 | list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list) { | 335 | list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list) |
329 | if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) && | 336 | sn_call_force_intr_provider(sn_irq_info); |
330 | (sn_irq_info->irq_bridge != NULL)) | 337 | |
331 | pcibr_force_interrupt(sn_irq_info); | ||
332 | } | ||
333 | rcu_read_unlock(); | 338 | rcu_read_unlock(); |
334 | } | 339 | } |
335 | 340 | ||
@@ -351,6 +356,14 @@ static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info) | |||
351 | struct pcidev_info *pcidev_info; | 356 | struct pcidev_info *pcidev_info; |
352 | struct pcibus_info *pcibus_info; | 357 | struct pcibus_info *pcibus_info; |
353 | 358 | ||
359 | /* | ||
360 | * Bridge types attached to TIO (anything but PIC) do not need this WAR | ||
361 | * since they do not target Shub II interrupt registers. If that | ||
362 | * ever changes, this check needs to accomodate. | ||
363 | */ | ||
364 | if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_PIC) | ||
365 | return; | ||
366 | |||
354 | pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; | 367 | pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; |
355 | if (!pcidev_info) | 368 | if (!pcidev_info) |
356 | return; | 369 | return; |
@@ -377,16 +390,12 @@ static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info) | |||
377 | break; | 390 | break; |
378 | } | 391 | } |
379 | if (!test_bit(irr_bit, &irr_reg)) { | 392 | if (!test_bit(irr_bit, &irr_reg)) { |
380 | if (!test_bit(irq, pda->sn_soft_irr)) { | 393 | if (!test_bit(irq, pda->sn_in_service_ivecs)) { |
381 | if (!test_bit(irq, pda->sn_in_service_ivecs)) { | 394 | regval &= 0xff; |
382 | regval &= 0xff; | 395 | if (sn_irq_info->irq_int_bit & regval & |
383 | if (sn_irq_info->irq_int_bit & regval & | 396 | sn_irq_info->irq_last_intr) { |
384 | sn_irq_info->irq_last_intr) { | 397 | regval &= ~(sn_irq_info->irq_int_bit & regval); |
385 | regval &= | 398 | sn_call_force_intr_provider(sn_irq_info); |
386 | ~(sn_irq_info-> | ||
387 | irq_int_bit & regval); | ||
388 | pcibr_force_interrupt(sn_irq_info); | ||
389 | } | ||
390 | } | 399 | } |
391 | } | 400 | } |
392 | } | 401 | } |
@@ -404,13 +413,7 @@ void sn_lb_int_war_check(void) | |||
404 | rcu_read_lock(); | 413 | rcu_read_lock(); |
405 | for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) { | 414 | for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) { |
406 | list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[i], list) { | 415 | list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[i], list) { |
407 | /* | 416 | sn_check_intr(i, sn_irq_info); |
408 | * Only call for PCI bridges that are fully | ||
409 | * initialized. | ||
410 | */ | ||
411 | if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) && | ||
412 | (sn_irq_info->irq_bridge != NULL)) | ||
413 | sn_check_intr(i, sn_irq_info); | ||
414 | } | 417 | } |
415 | } | 418 | } |
416 | rcu_read_unlock(); | 419 | rcu_read_unlock(); |
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index 981928f35a8c..6f8c5883716b 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c | |||
@@ -57,7 +57,7 @@ | |||
57 | 57 | ||
58 | DEFINE_PER_CPU(struct pda_s, pda_percpu); | 58 | DEFINE_PER_CPU(struct pda_s, pda_percpu); |
59 | 59 | ||
60 | #define MAX_PHYS_MEMORY (1UL << 49) /* 1 TB */ | 60 | #define MAX_PHYS_MEMORY (1UL << IA64_MAX_PHYS_BITS) /* Max physical address supported */ |
61 | 61 | ||
62 | lboard_t *root_lboard[MAX_COMPACT_NODES]; | 62 | lboard_t *root_lboard[MAX_COMPACT_NODES]; |
63 | 63 | ||
@@ -81,8 +81,6 @@ EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid); | |||
81 | DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda); | 81 | DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda); |
82 | EXPORT_PER_CPU_SYMBOL(__sn_nodepda); | 82 | EXPORT_PER_CPU_SYMBOL(__sn_nodepda); |
83 | 83 | ||
84 | partid_t sn_partid = -1; | ||
85 | EXPORT_SYMBOL(sn_partid); | ||
86 | char sn_system_serial_number_string[128]; | 84 | char sn_system_serial_number_string[128]; |
87 | EXPORT_SYMBOL(sn_system_serial_number_string); | 85 | EXPORT_SYMBOL(sn_system_serial_number_string); |
88 | u64 sn_partition_serial_number; | 86 | u64 sn_partition_serial_number; |
@@ -398,6 +396,7 @@ static void __init sn_init_pdas(char **cmdline_p) | |||
398 | memset(nodepdaindr[cnode], 0, sizeof(nodepda_t)); | 396 | memset(nodepdaindr[cnode], 0, sizeof(nodepda_t)); |
399 | memset(nodepdaindr[cnode]->phys_cpuid, -1, | 397 | memset(nodepdaindr[cnode]->phys_cpuid, -1, |
400 | sizeof(nodepdaindr[cnode]->phys_cpuid)); | 398 | sizeof(nodepdaindr[cnode]->phys_cpuid)); |
399 | spin_lock_init(&nodepdaindr[cnode]->ptc_lock); | ||
401 | } | 400 | } |
402 | 401 | ||
403 | /* | 402 | /* |
@@ -531,8 +530,8 @@ void __init sn_cpu_init(void) | |||
531 | */ | 530 | */ |
532 | { | 531 | { |
533 | u64 pio1[] = {SH1_PIO_WRITE_STATUS_0, 0, SH1_PIO_WRITE_STATUS_1, 0}; | 532 | u64 pio1[] = {SH1_PIO_WRITE_STATUS_0, 0, SH1_PIO_WRITE_STATUS_1, 0}; |
534 | u64 pio2[] = {SH2_PIO_WRITE_STATUS_0, SH2_PIO_WRITE_STATUS_1, | 533 | u64 pio2[] = {SH2_PIO_WRITE_STATUS_0, SH2_PIO_WRITE_STATUS_2, |
535 | SH2_PIO_WRITE_STATUS_2, SH2_PIO_WRITE_STATUS_3}; | 534 | SH2_PIO_WRITE_STATUS_1, SH2_PIO_WRITE_STATUS_3}; |
536 | u64 *pio; | 535 | u64 *pio; |
537 | pio = is_shub1() ? pio1 : pio2; | 536 | pio = is_shub1() ? pio1 : pio2; |
538 | pda->pio_write_status_addr = (volatile unsigned long *) LOCAL_MMR_ADDR(pio[slice]); | 537 | pda->pio_write_status_addr = (volatile unsigned long *) LOCAL_MMR_ADDR(pio[slice]); |
diff --git a/arch/ia64/sn/kernel/sn2/ptc_deadlock.S b/arch/ia64/sn/kernel/sn2/ptc_deadlock.S index 96cb71d15682..3fa95065a446 100644 --- a/arch/ia64/sn/kernel/sn2/ptc_deadlock.S +++ b/arch/ia64/sn/kernel/sn2/ptc_deadlock.S | |||
@@ -3,7 +3,7 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved. | 6 | * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <asm/types.h> | 9 | #include <asm/types.h> |
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | #define DEADLOCKBIT SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT | 12 | #define DEADLOCKBIT SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT |
13 | #define WRITECOUNTMASK SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK | 13 | #define WRITECOUNTMASK SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK |
14 | #define ALIAS_OFFSET (SH1_PIO_WRITE_STATUS_0_ALIAS-SH1_PIO_WRITE_STATUS_0) | 14 | #define ALIAS_OFFSET 8 |
15 | 15 | ||
16 | 16 | ||
17 | .global sn2_ptc_deadlock_recovery_core | 17 | .global sn2_ptc_deadlock_recovery_core |
@@ -36,13 +36,15 @@ sn2_ptc_deadlock_recovery_core: | |||
36 | extr.u piowcphy=piowc,0,61;; // Convert piowc to uncached physical address | 36 | extr.u piowcphy=piowc,0,61;; // Convert piowc to uncached physical address |
37 | dep piowcphy=-1,piowcphy,63,1 | 37 | dep piowcphy=-1,piowcphy,63,1 |
38 | movl mask=WRITECOUNTMASK | 38 | movl mask=WRITECOUNTMASK |
39 | mov r8=r0 | ||
39 | 40 | ||
40 | 1: | 41 | 1: |
41 | add scr2=ALIAS_OFFSET,piowc // Address of WRITE_STATUS alias register | 42 | add scr2=ALIAS_OFFSET,piowc // Address of WRITE_STATUS alias register |
42 | mov scr1=7;; // Clear DEADLOCK, WRITE_ERROR, MULTI_WRITE_ERROR | 43 | ;; |
43 | st8.rel [scr2]=scr1;; | 44 | ld8.acq scr1=[scr2];; |
44 | 45 | ||
45 | 5: ld8.acq scr1=[piowc];; // Wait for PIOs to complete. | 46 | 5: ld8.acq scr1=[piowc];; // Wait for PIOs to complete. |
47 | hint @pause | ||
46 | and scr2=scr1,mask;; // mask of writecount bits | 48 | and scr2=scr1,mask;; // mask of writecount bits |
47 | cmp.ne p6,p0=zeroval,scr2 | 49 | cmp.ne p6,p0=zeroval,scr2 |
48 | (p6) br.cond.sptk 5b | 50 | (p6) br.cond.sptk 5b |
@@ -57,6 +59,7 @@ sn2_ptc_deadlock_recovery_core: | |||
57 | st8.rel [ptc0]=data0 // Write PTC0 & wait for completion. | 59 | st8.rel [ptc0]=data0 // Write PTC0 & wait for completion. |
58 | 60 | ||
59 | 5: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete. | 61 | 5: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete. |
62 | hint @pause | ||
60 | and scr2=scr1,mask;; // mask of writecount bits | 63 | and scr2=scr1,mask;; // mask of writecount bits |
61 | cmp.ne p6,p0=zeroval,scr2 | 64 | cmp.ne p6,p0=zeroval,scr2 |
62 | (p6) br.cond.sptk 5b;; | 65 | (p6) br.cond.sptk 5b;; |
@@ -67,6 +70,7 @@ sn2_ptc_deadlock_recovery_core: | |||
67 | (p7) st8.rel [ptc1]=data1;; // Now write PTC1. | 70 | (p7) st8.rel [ptc1]=data1;; // Now write PTC1. |
68 | 71 | ||
69 | 5: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete. | 72 | 5: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete. |
73 | hint @pause | ||
70 | and scr2=scr1,mask;; // mask of writecount bits | 74 | and scr2=scr1,mask;; // mask of writecount bits |
71 | cmp.ne p6,p0=zeroval,scr2 | 75 | cmp.ne p6,p0=zeroval,scr2 |
72 | (p6) br.cond.sptk 5b | 76 | (p6) br.cond.sptk 5b |
@@ -77,6 +81,7 @@ sn2_ptc_deadlock_recovery_core: | |||
77 | srlz.i;; | 81 | srlz.i;; |
78 | ////////////// END PHYSICAL MODE //////////////////// | 82 | ////////////// END PHYSICAL MODE //////////////////// |
79 | 83 | ||
84 | (p8) add r8=1,r8 | ||
80 | (p8) br.cond.spnt 1b;; // Repeat if DEADLOCK occurred. | 85 | (p8) br.cond.spnt 1b;; // Repeat if DEADLOCK occurred. |
81 | 86 | ||
82 | br.ret.sptk rp | 87 | br.ret.sptk rp |
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c index 7af05a7ac743..0a4ee50c302f 100644 --- a/arch/ia64/sn/kernel/sn2/sn2_smp.c +++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * License. See the file "COPYING" in the main directory of this archive | 5 | * License. See the file "COPYING" in the main directory of this archive |
6 | * for more details. | 6 | * for more details. |
7 | * | 7 | * |
8 | * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved. | 8 | * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
@@ -20,6 +20,8 @@ | |||
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/bitops.h> | 21 | #include <linux/bitops.h> |
22 | #include <linux/nodemask.h> | 22 | #include <linux/nodemask.h> |
23 | #include <linux/proc_fs.h> | ||
24 | #include <linux/seq_file.h> | ||
23 | 25 | ||
24 | #include <asm/processor.h> | 26 | #include <asm/processor.h> |
25 | #include <asm/irq.h> | 27 | #include <asm/irq.h> |
@@ -39,12 +41,120 @@ | |||
39 | #include <asm/sn/nodepda.h> | 41 | #include <asm/sn/nodepda.h> |
40 | #include <asm/sn/rw_mmr.h> | 42 | #include <asm/sn/rw_mmr.h> |
41 | 43 | ||
42 | void sn2_ptc_deadlock_recovery(volatile unsigned long *, unsigned long data0, | 44 | DEFINE_PER_CPU(struct ptc_stats, ptcstats); |
43 | volatile unsigned long *, unsigned long data1); | 45 | DECLARE_PER_CPU(struct ptc_stats, ptcstats); |
44 | 46 | ||
45 | static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock); | 47 | static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock); |
46 | 48 | ||
47 | static unsigned long sn2_ptc_deadlock_count; | 49 | void sn2_ptc_deadlock_recovery(short *, short, int, volatile unsigned long *, unsigned long data0, |
50 | volatile unsigned long *, unsigned long data1); | ||
51 | |||
52 | #ifdef DEBUG_PTC | ||
53 | /* | ||
54 | * ptctest: | ||
55 | * | ||
56 | * xyz - 3 digit hex number: | ||
57 | * x - Force PTC purges to use shub: | ||
58 | * 0 - no force | ||
59 | * 1 - force | ||
60 | * y - interupt enable | ||
61 | * 0 - disable interrupts | ||
62 | * 1 - leave interuupts enabled | ||
63 | * z - type of lock: | ||
64 | * 0 - global lock | ||
65 | * 1 - node local lock | ||
66 | * 2 - no lock | ||
67 | * | ||
68 | * Note: on shub1, only ptctest == 0 is supported. Don't try other values! | ||
69 | */ | ||
70 | |||
71 | static unsigned int sn2_ptctest = 0; | ||
72 | |||
73 | static int __init ptc_test(char *str) | ||
74 | { | ||
75 | get_option(&str, &sn2_ptctest); | ||
76 | return 1; | ||
77 | } | ||
78 | __setup("ptctest=", ptc_test); | ||
79 | |||
80 | static inline int ptc_lock(unsigned long *flagp) | ||
81 | { | ||
82 | unsigned long opt = sn2_ptctest & 255; | ||
83 | |||
84 | switch (opt) { | ||
85 | case 0x00: | ||
86 | spin_lock_irqsave(&sn2_global_ptc_lock, *flagp); | ||
87 | break; | ||
88 | case 0x01: | ||
89 | spin_lock_irqsave(&sn_nodepda->ptc_lock, *flagp); | ||
90 | break; | ||
91 | case 0x02: | ||
92 | local_irq_save(*flagp); | ||
93 | break; | ||
94 | case 0x10: | ||
95 | spin_lock(&sn2_global_ptc_lock); | ||
96 | break; | ||
97 | case 0x11: | ||
98 | spin_lock(&sn_nodepda->ptc_lock); | ||
99 | break; | ||
100 | case 0x12: | ||
101 | break; | ||
102 | default: | ||
103 | BUG(); | ||
104 | } | ||
105 | return opt; | ||
106 | } | ||
107 | |||
108 | static inline void ptc_unlock(unsigned long flags, int opt) | ||
109 | { | ||
110 | switch (opt) { | ||
111 | case 0x00: | ||
112 | spin_unlock_irqrestore(&sn2_global_ptc_lock, flags); | ||
113 | break; | ||
114 | case 0x01: | ||
115 | spin_unlock_irqrestore(&sn_nodepda->ptc_lock, flags); | ||
116 | break; | ||
117 | case 0x02: | ||
118 | local_irq_restore(flags); | ||
119 | break; | ||
120 | case 0x10: | ||
121 | spin_unlock(&sn2_global_ptc_lock); | ||
122 | break; | ||
123 | case 0x11: | ||
124 | spin_unlock(&sn_nodepda->ptc_lock); | ||
125 | break; | ||
126 | case 0x12: | ||
127 | break; | ||
128 | default: | ||
129 | BUG(); | ||
130 | } | ||
131 | } | ||
132 | #else | ||
133 | |||
134 | #define sn2_ptctest 0 | ||
135 | |||
136 | static inline int ptc_lock(unsigned long *flagp) | ||
137 | { | ||
138 | spin_lock_irqsave(&sn2_global_ptc_lock, *flagp); | ||
139 | return 0; | ||
140 | } | ||
141 | |||
142 | static inline void ptc_unlock(unsigned long flags, int opt) | ||
143 | { | ||
144 | spin_unlock_irqrestore(&sn2_global_ptc_lock, flags); | ||
145 | } | ||
146 | #endif | ||
147 | |||
148 | struct ptc_stats { | ||
149 | unsigned long ptc_l; | ||
150 | unsigned long change_rid; | ||
151 | unsigned long shub_ptc_flushes; | ||
152 | unsigned long nodes_flushed; | ||
153 | unsigned long deadlocks; | ||
154 | unsigned long lock_itc_clocks; | ||
155 | unsigned long shub_itc_clocks; | ||
156 | unsigned long shub_itc_clocks_max; | ||
157 | }; | ||
48 | 158 | ||
49 | static inline unsigned long wait_piowc(void) | 159 | static inline unsigned long wait_piowc(void) |
50 | { | 160 | { |
@@ -89,9 +199,9 @@ void | |||
89 | sn2_global_tlb_purge(unsigned long start, unsigned long end, | 199 | sn2_global_tlb_purge(unsigned long start, unsigned long end, |
90 | unsigned long nbits) | 200 | unsigned long nbits) |
91 | { | 201 | { |
92 | int i, shub1, cnode, mynasid, cpu, lcpu = 0, nasid, flushed = 0; | 202 | int i, opt, shub1, cnode, mynasid, cpu, lcpu = 0, nasid, flushed = 0; |
93 | volatile unsigned long *ptc0, *ptc1; | 203 | volatile unsigned long *ptc0, *ptc1; |
94 | unsigned long flags = 0, data0 = 0, data1 = 0; | 204 | unsigned long itc, itc2, flags, data0 = 0, data1 = 0; |
95 | struct mm_struct *mm = current->active_mm; | 205 | struct mm_struct *mm = current->active_mm; |
96 | short nasids[MAX_NUMNODES], nix; | 206 | short nasids[MAX_NUMNODES], nix; |
97 | nodemask_t nodes_flushed; | 207 | nodemask_t nodes_flushed; |
@@ -114,16 +224,19 @@ sn2_global_tlb_purge(unsigned long start, unsigned long end, | |||
114 | start += (1UL << nbits); | 224 | start += (1UL << nbits); |
115 | } while (start < end); | 225 | } while (start < end); |
116 | ia64_srlz_i(); | 226 | ia64_srlz_i(); |
227 | __get_cpu_var(ptcstats).ptc_l++; | ||
117 | preempt_enable(); | 228 | preempt_enable(); |
118 | return; | 229 | return; |
119 | } | 230 | } |
120 | 231 | ||
121 | if (atomic_read(&mm->mm_users) == 1) { | 232 | if (atomic_read(&mm->mm_users) == 1) { |
122 | flush_tlb_mm(mm); | 233 | flush_tlb_mm(mm); |
234 | __get_cpu_var(ptcstats).change_rid++; | ||
123 | preempt_enable(); | 235 | preempt_enable(); |
124 | return; | 236 | return; |
125 | } | 237 | } |
126 | 238 | ||
239 | itc = ia64_get_itc(); | ||
127 | nix = 0; | 240 | nix = 0; |
128 | for_each_node_mask(cnode, nodes_flushed) | 241 | for_each_node_mask(cnode, nodes_flushed) |
129 | nasids[nix++] = cnodeid_to_nasid(cnode); | 242 | nasids[nix++] = cnodeid_to_nasid(cnode); |
@@ -148,7 +261,12 @@ sn2_global_tlb_purge(unsigned long start, unsigned long end, | |||
148 | 261 | ||
149 | mynasid = get_nasid(); | 262 | mynasid = get_nasid(); |
150 | 263 | ||
151 | spin_lock_irqsave(&sn2_global_ptc_lock, flags); | 264 | itc = ia64_get_itc(); |
265 | opt = ptc_lock(&flags); | ||
266 | itc2 = ia64_get_itc(); | ||
267 | __get_cpu_var(ptcstats).lock_itc_clocks += itc2 - itc; | ||
268 | __get_cpu_var(ptcstats).shub_ptc_flushes++; | ||
269 | __get_cpu_var(ptcstats).nodes_flushed += nix; | ||
152 | 270 | ||
153 | do { | 271 | do { |
154 | if (shub1) | 272 | if (shub1) |
@@ -157,7 +275,7 @@ sn2_global_tlb_purge(unsigned long start, unsigned long end, | |||
157 | data0 = (data0 & ~SH2_PTC_ADDR_MASK) | (start & SH2_PTC_ADDR_MASK); | 275 | data0 = (data0 & ~SH2_PTC_ADDR_MASK) | (start & SH2_PTC_ADDR_MASK); |
158 | for (i = 0; i < nix; i++) { | 276 | for (i = 0; i < nix; i++) { |
159 | nasid = nasids[i]; | 277 | nasid = nasids[i]; |
160 | if (unlikely(nasid == mynasid)) { | 278 | if ((!(sn2_ptctest & 3)) && unlikely(nasid == mynasid)) { |
161 | ia64_ptcga(start, nbits << 2); | 279 | ia64_ptcga(start, nbits << 2); |
162 | ia64_srlz_i(); | 280 | ia64_srlz_i(); |
163 | } else { | 281 | } else { |
@@ -169,18 +287,22 @@ sn2_global_tlb_purge(unsigned long start, unsigned long end, | |||
169 | flushed = 1; | 287 | flushed = 1; |
170 | } | 288 | } |
171 | } | 289 | } |
172 | |||
173 | if (flushed | 290 | if (flushed |
174 | && (wait_piowc() & | 291 | && (wait_piowc() & |
175 | SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK)) { | 292 | (SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK))) { |
176 | sn2_ptc_deadlock_recovery(ptc0, data0, ptc1, data1); | 293 | sn2_ptc_deadlock_recovery(nasids, nix, mynasid, ptc0, data0, ptc1, data1); |
177 | } | 294 | } |
178 | 295 | ||
179 | start += (1UL << nbits); | 296 | start += (1UL << nbits); |
180 | 297 | ||
181 | } while (start < end); | 298 | } while (start < end); |
182 | 299 | ||
183 | spin_unlock_irqrestore(&sn2_global_ptc_lock, flags); | 300 | itc2 = ia64_get_itc() - itc2; |
301 | __get_cpu_var(ptcstats).shub_itc_clocks += itc2; | ||
302 | if (itc2 > __get_cpu_var(ptcstats).shub_itc_clocks_max) | ||
303 | __get_cpu_var(ptcstats).shub_itc_clocks_max = itc2; | ||
304 | |||
305 | ptc_unlock(flags, opt); | ||
184 | 306 | ||
185 | preempt_enable(); | 307 | preempt_enable(); |
186 | } | 308 | } |
@@ -192,31 +314,29 @@ sn2_global_tlb_purge(unsigned long start, unsigned long end, | |||
192 | * TLB flush transaction. The recovery sequence is somewhat tricky & is | 314 | * TLB flush transaction. The recovery sequence is somewhat tricky & is |
193 | * coded in assembly language. | 315 | * coded in assembly language. |
194 | */ | 316 | */ |
195 | void sn2_ptc_deadlock_recovery(volatile unsigned long *ptc0, unsigned long data0, | 317 | void sn2_ptc_deadlock_recovery(short *nasids, short nix, int mynasid, volatile unsigned long *ptc0, unsigned long data0, |
196 | volatile unsigned long *ptc1, unsigned long data1) | 318 | volatile unsigned long *ptc1, unsigned long data1) |
197 | { | 319 | { |
198 | extern void sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long, | 320 | extern void sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long, |
199 | volatile unsigned long *, unsigned long, volatile unsigned long *, unsigned long); | 321 | volatile unsigned long *, unsigned long, volatile unsigned long *, unsigned long); |
200 | int cnode, mycnode, nasid; | 322 | short nasid, i; |
201 | volatile unsigned long *piows; | 323 | unsigned long *piows, zeroval; |
202 | volatile unsigned long zeroval; | ||
203 | 324 | ||
204 | sn2_ptc_deadlock_count++; | 325 | __get_cpu_var(ptcstats).deadlocks++; |
205 | 326 | ||
206 | piows = pda->pio_write_status_addr; | 327 | piows = (unsigned long *) pda->pio_write_status_addr; |
207 | zeroval = pda->pio_write_status_val; | 328 | zeroval = pda->pio_write_status_val; |
208 | 329 | ||
209 | mycnode = numa_node_id(); | 330 | for (i=0; i < nix; i++) { |
210 | 331 | nasid = nasids[i]; | |
211 | for_each_online_node(cnode) { | 332 | if (!(sn2_ptctest & 3) && nasid == mynasid) |
212 | if (is_headless_node(cnode) || cnode == mycnode) | ||
213 | continue; | 333 | continue; |
214 | nasid = cnodeid_to_nasid(cnode); | ||
215 | ptc0 = CHANGE_NASID(nasid, ptc0); | 334 | ptc0 = CHANGE_NASID(nasid, ptc0); |
216 | if (ptc1) | 335 | if (ptc1) |
217 | ptc1 = CHANGE_NASID(nasid, ptc1); | 336 | ptc1 = CHANGE_NASID(nasid, ptc1); |
218 | sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval); | 337 | sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval); |
219 | } | 338 | } |
339 | |||
220 | } | 340 | } |
221 | 341 | ||
222 | /** | 342 | /** |
@@ -293,3 +413,93 @@ void sn2_send_IPI(int cpuid, int vector, int delivery_mode, int redirect) | |||
293 | 413 | ||
294 | sn_send_IPI_phys(nasid, physid, vector, delivery_mode); | 414 | sn_send_IPI_phys(nasid, physid, vector, delivery_mode); |
295 | } | 415 | } |
416 | |||
417 | #ifdef CONFIG_PROC_FS | ||
418 | |||
419 | #define PTC_BASENAME "sgi_sn/ptc_statistics" | ||
420 | |||
421 | static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset) | ||
422 | { | ||
423 | if (*offset < NR_CPUS) | ||
424 | return offset; | ||
425 | return NULL; | ||
426 | } | ||
427 | |||
428 | static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * offset) | ||
429 | { | ||
430 | (*offset)++; | ||
431 | if (*offset < NR_CPUS) | ||
432 | return offset; | ||
433 | return NULL; | ||
434 | } | ||
435 | |||
436 | static void sn2_ptc_seq_stop(struct seq_file *file, void *data) | ||
437 | { | ||
438 | } | ||
439 | |||
440 | static int sn2_ptc_seq_show(struct seq_file *file, void *data) | ||
441 | { | ||
442 | struct ptc_stats *stat; | ||
443 | int cpu; | ||
444 | |||
445 | cpu = *(loff_t *) data; | ||
446 | |||
447 | if (!cpu) { | ||
448 | seq_printf(file, "# ptc_l change_rid shub_ptc_flushes shub_nodes_flushed deadlocks lock_nsec shub_nsec shub_nsec_max\n"); | ||
449 | seq_printf(file, "# ptctest %d\n", sn2_ptctest); | ||
450 | } | ||
451 | |||
452 | if (cpu < NR_CPUS && cpu_online(cpu)) { | ||
453 | stat = &per_cpu(ptcstats, cpu); | ||
454 | seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, | ||
455 | stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, | ||
456 | stat->deadlocks, | ||
457 | 1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec, | ||
458 | 1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec, | ||
459 | 1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec); | ||
460 | } | ||
461 | |||
462 | return 0; | ||
463 | } | ||
464 | |||
465 | static struct seq_operations sn2_ptc_seq_ops = { | ||
466 | .start = sn2_ptc_seq_start, | ||
467 | .next = sn2_ptc_seq_next, | ||
468 | .stop = sn2_ptc_seq_stop, | ||
469 | .show = sn2_ptc_seq_show | ||
470 | }; | ||
471 | |||
472 | int sn2_ptc_proc_open(struct inode *inode, struct file *file) | ||
473 | { | ||
474 | return seq_open(file, &sn2_ptc_seq_ops); | ||
475 | } | ||
476 | |||
477 | static struct file_operations proc_sn2_ptc_operations = { | ||
478 | .open = sn2_ptc_proc_open, | ||
479 | .read = seq_read, | ||
480 | .llseek = seq_lseek, | ||
481 | .release = seq_release, | ||
482 | }; | ||
483 | |||
484 | static struct proc_dir_entry *proc_sn2_ptc; | ||
485 | |||
486 | static int __init sn2_ptc_init(void) | ||
487 | { | ||
488 | if (!(proc_sn2_ptc = create_proc_entry(PTC_BASENAME, 0444, NULL))) { | ||
489 | printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME); | ||
490 | return -EINVAL; | ||
491 | } | ||
492 | proc_sn2_ptc->proc_fops = &proc_sn2_ptc_operations; | ||
493 | spin_lock_init(&sn2_global_ptc_lock); | ||
494 | return 0; | ||
495 | } | ||
496 | |||
497 | static void __exit sn2_ptc_exit(void) | ||
498 | { | ||
499 | remove_proc_entry(PTC_BASENAME, NULL); | ||
500 | } | ||
501 | |||
502 | module_init(sn2_ptc_init); | ||
503 | module_exit(sn2_ptc_exit); | ||
504 | #endif /* CONFIG_PROC_FS */ | ||
505 | |||
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c index 833e700fdac9..0513aacac8c1 100644 --- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c +++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c | |||
@@ -36,7 +36,6 @@ | |||
36 | #include <asm/topology.h> | 36 | #include <asm/topology.h> |
37 | #include <asm/smp.h> | 37 | #include <asm/smp.h> |
38 | #include <asm/semaphore.h> | 38 | #include <asm/semaphore.h> |
39 | #include <asm/segment.h> | ||
40 | #include <asm/uaccess.h> | 39 | #include <asm/uaccess.h> |
41 | #include <asm/sal.h> | 40 | #include <asm/sal.h> |
42 | #include <asm/sn/io.h> | 41 | #include <asm/sn/io.h> |
@@ -59,7 +58,7 @@ static int sn_hwperf_enum_objects(int *nobj, struct sn_hwperf_object_info **ret) | |||
59 | struct sn_hwperf_object_info *objbuf = NULL; | 58 | struct sn_hwperf_object_info *objbuf = NULL; |
60 | 59 | ||
61 | if ((e = sn_hwperf_init()) < 0) { | 60 | if ((e = sn_hwperf_init()) < 0) { |
62 | printk("sn_hwperf_init failed: err %d\n", e); | 61 | printk(KERN_ERR "sn_hwperf_init failed: err %d\n", e); |
63 | goto out; | 62 | goto out; |
64 | } | 63 | } |
65 | 64 | ||
@@ -111,7 +110,7 @@ static int sn_hwperf_geoid_to_cnode(char *location) | |||
111 | if (sn_hwperf_location_to_bpos(location, &rack, &bay, &slot, &slab)) | 110 | if (sn_hwperf_location_to_bpos(location, &rack, &bay, &slot, &slab)) |
112 | return -1; | 111 | return -1; |
113 | 112 | ||
114 | for (cnode = 0; cnode < numionodes; cnode++) { | 113 | for_each_node(cnode) { |
115 | geoid = cnodeid_get_geoid(cnode); | 114 | geoid = cnodeid_get_geoid(cnode); |
116 | module_id = geo_module(geoid); | 115 | module_id = geo_module(geoid); |
117 | this_rack = MODULE_GET_RACK(module_id); | 116 | this_rack = MODULE_GET_RACK(module_id); |
@@ -124,11 +123,13 @@ static int sn_hwperf_geoid_to_cnode(char *location) | |||
124 | } | 123 | } |
125 | } | 124 | } |
126 | 125 | ||
127 | return cnode < numionodes ? cnode : -1; | 126 | return node_possible(cnode) ? cnode : -1; |
128 | } | 127 | } |
129 | 128 | ||
130 | static int sn_hwperf_obj_to_cnode(struct sn_hwperf_object_info * obj) | 129 | static int sn_hwperf_obj_to_cnode(struct sn_hwperf_object_info * obj) |
131 | { | 130 | { |
131 | if (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj)) | ||
132 | BUG(); | ||
132 | if (!obj->sn_hwp_this_part) | 133 | if (!obj->sn_hwp_this_part) |
133 | return -1; | 134 | return -1; |
134 | return sn_hwperf_geoid_to_cnode(obj->location); | 135 | return sn_hwperf_geoid_to_cnode(obj->location); |
@@ -174,31 +175,199 @@ static const char *sn_hwperf_get_slabname(struct sn_hwperf_object_info *obj, | |||
174 | return slabname; | 175 | return slabname; |
175 | } | 176 | } |
176 | 177 | ||
177 | static void print_pci_topology(struct seq_file *s, | 178 | static void print_pci_topology(struct seq_file *s) |
178 | struct sn_hwperf_object_info *obj, int *ordinal, | 179 | { |
179 | u64 rack, u64 bay, u64 slot, u64 slab) | 180 | char *p; |
181 | size_t sz; | ||
182 | int e; | ||
183 | |||
184 | for (sz = PAGE_SIZE; sz < 16 * PAGE_SIZE; sz += PAGE_SIZE) { | ||
185 | if (!(p = (char *)kmalloc(sz, GFP_KERNEL))) | ||
186 | break; | ||
187 | e = ia64_sn_ioif_get_pci_topology(__pa(p), sz); | ||
188 | if (e == SALRET_OK) | ||
189 | seq_puts(s, p); | ||
190 | kfree(p); | ||
191 | if (e == SALRET_OK || e == SALRET_NOT_IMPLEMENTED) | ||
192 | break; | ||
193 | } | ||
194 | } | ||
195 | |||
196 | static inline int sn_hwperf_has_cpus(cnodeid_t node) | ||
197 | { | ||
198 | return node_online(node) && nr_cpus_node(node); | ||
199 | } | ||
200 | |||
201 | static inline int sn_hwperf_has_mem(cnodeid_t node) | ||
202 | { | ||
203 | return node_online(node) && NODE_DATA(node)->node_present_pages; | ||
204 | } | ||
205 | |||
206 | static struct sn_hwperf_object_info * | ||
207 | sn_hwperf_findobj_id(struct sn_hwperf_object_info *objbuf, | ||
208 | int nobj, int id) | ||
180 | { | 209 | { |
181 | char *p1; | 210 | int i; |
182 | char *p2; | 211 | struct sn_hwperf_object_info *p = objbuf; |
183 | char *pg; | 212 | |
184 | 213 | for (i=0; i < nobj; i++, p++) { | |
185 | if (!(pg = (char *)get_zeroed_page(GFP_KERNEL))) | 214 | if (p->id == id) |
186 | return; /* ignore */ | 215 | return p; |
187 | if (ia64_sn_ioif_get_pci_topology(rack, bay, slot, slab, | 216 | } |
188 | __pa(pg), PAGE_SIZE) == SN_HWPERF_OP_OK) { | 217 | |
189 | for (p1=pg; *p1 && p1 < pg + PAGE_SIZE;) { | 218 | return NULL; |
190 | if (!(p2 = strchr(p1, '\n'))) | 219 | |
220 | } | ||
221 | |||
222 | static int sn_hwperf_get_nearest_node_objdata(struct sn_hwperf_object_info *objbuf, | ||
223 | int nobj, cnodeid_t node, cnodeid_t *near_mem_node, cnodeid_t *near_cpu_node) | ||
224 | { | ||
225 | int e; | ||
226 | struct sn_hwperf_object_info *nodeobj = NULL; | ||
227 | struct sn_hwperf_object_info *op; | ||
228 | struct sn_hwperf_object_info *dest; | ||
229 | struct sn_hwperf_object_info *router; | ||
230 | struct sn_hwperf_port_info ptdata[16]; | ||
231 | int sz, i, j; | ||
232 | cnodeid_t c; | ||
233 | int found_mem = 0; | ||
234 | int found_cpu = 0; | ||
235 | |||
236 | if (!node_possible(node)) | ||
237 | return -EINVAL; | ||
238 | |||
239 | if (sn_hwperf_has_cpus(node)) { | ||
240 | if (near_cpu_node) | ||
241 | *near_cpu_node = node; | ||
242 | found_cpu++; | ||
243 | } | ||
244 | |||
245 | if (sn_hwperf_has_mem(node)) { | ||
246 | if (near_mem_node) | ||
247 | *near_mem_node = node; | ||
248 | found_mem++; | ||
249 | } | ||
250 | |||
251 | if (found_cpu && found_mem) | ||
252 | return 0; /* trivially successful */ | ||
253 | |||
254 | /* find the argument node object */ | ||
255 | for (i=0, op=objbuf; i < nobj; i++, op++) { | ||
256 | if (!SN_HWPERF_IS_NODE(op) && !SN_HWPERF_IS_IONODE(op)) | ||
257 | continue; | ||
258 | if (node == sn_hwperf_obj_to_cnode(op)) { | ||
259 | nodeobj = op; | ||
260 | break; | ||
261 | } | ||
262 | } | ||
263 | if (!nodeobj) { | ||
264 | e = -ENOENT; | ||
265 | goto err; | ||
266 | } | ||
267 | |||
268 | /* get it's interconnect topology */ | ||
269 | sz = op->ports * sizeof(struct sn_hwperf_port_info); | ||
270 | if (sz > sizeof(ptdata)) | ||
271 | BUG(); | ||
272 | e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, | ||
273 | SN_HWPERF_ENUM_PORTS, nodeobj->id, sz, | ||
274 | (u64)&ptdata, 0, 0, NULL); | ||
275 | if (e != SN_HWPERF_OP_OK) { | ||
276 | e = -EINVAL; | ||
277 | goto err; | ||
278 | } | ||
279 | |||
280 | /* find nearest node with cpus and nearest memory */ | ||
281 | for (router=NULL, j=0; j < op->ports; j++) { | ||
282 | dest = sn_hwperf_findobj_id(objbuf, nobj, ptdata[j].conn_id); | ||
283 | if (!dest || SN_HWPERF_FOREIGN(dest) || | ||
284 | !SN_HWPERF_IS_NODE(dest) || SN_HWPERF_IS_IONODE(dest)) { | ||
285 | continue; | ||
286 | } | ||
287 | c = sn_hwperf_obj_to_cnode(dest); | ||
288 | if (!found_cpu && sn_hwperf_has_cpus(c)) { | ||
289 | if (near_cpu_node) | ||
290 | *near_cpu_node = c; | ||
291 | found_cpu++; | ||
292 | } | ||
293 | if (!found_mem && sn_hwperf_has_mem(c)) { | ||
294 | if (near_mem_node) | ||
295 | *near_mem_node = c; | ||
296 | found_mem++; | ||
297 | } | ||
298 | if (SN_HWPERF_IS_ROUTER(dest)) | ||
299 | router = dest; | ||
300 | } | ||
301 | |||
302 | if (router && (!found_cpu || !found_mem)) { | ||
303 | /* search for a node connected to the same router */ | ||
304 | sz = router->ports * sizeof(struct sn_hwperf_port_info); | ||
305 | if (sz > sizeof(ptdata)) | ||
306 | BUG(); | ||
307 | e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, | ||
308 | SN_HWPERF_ENUM_PORTS, router->id, sz, | ||
309 | (u64)&ptdata, 0, 0, NULL); | ||
310 | if (e != SN_HWPERF_OP_OK) { | ||
311 | e = -EINVAL; | ||
312 | goto err; | ||
313 | } | ||
314 | for (j=0; j < router->ports; j++) { | ||
315 | dest = sn_hwperf_findobj_id(objbuf, nobj, | ||
316 | ptdata[j].conn_id); | ||
317 | if (!dest || dest->id == node || | ||
318 | SN_HWPERF_FOREIGN(dest) || | ||
319 | !SN_HWPERF_IS_NODE(dest) || | ||
320 | SN_HWPERF_IS_IONODE(dest)) { | ||
321 | continue; | ||
322 | } | ||
323 | c = sn_hwperf_obj_to_cnode(dest); | ||
324 | if (!found_cpu && sn_hwperf_has_cpus(c)) { | ||
325 | if (near_cpu_node) | ||
326 | *near_cpu_node = c; | ||
327 | found_cpu++; | ||
328 | } | ||
329 | if (!found_mem && sn_hwperf_has_mem(c)) { | ||
330 | if (near_mem_node) | ||
331 | *near_mem_node = c; | ||
332 | found_mem++; | ||
333 | } | ||
334 | if (found_cpu && found_mem) | ||
335 | break; | ||
336 | } | ||
337 | } | ||
338 | |||
339 | if (!found_cpu || !found_mem) { | ||
340 | /* resort to _any_ node with CPUs and memory */ | ||
341 | for (i=0, op=objbuf; i < nobj; i++, op++) { | ||
342 | if (SN_HWPERF_FOREIGN(op) || | ||
343 | SN_HWPERF_IS_IONODE(op) || | ||
344 | !SN_HWPERF_IS_NODE(op)) { | ||
345 | continue; | ||
346 | } | ||
347 | c = sn_hwperf_obj_to_cnode(op); | ||
348 | if (!found_cpu && sn_hwperf_has_cpus(c)) { | ||
349 | if (near_cpu_node) | ||
350 | *near_cpu_node = c; | ||
351 | found_cpu++; | ||
352 | } | ||
353 | if (!found_mem && sn_hwperf_has_mem(c)) { | ||
354 | if (near_mem_node) | ||
355 | *near_mem_node = c; | ||
356 | found_mem++; | ||
357 | } | ||
358 | if (found_cpu && found_mem) | ||
191 | break; | 359 | break; |
192 | *p2 = '\0'; | ||
193 | seq_printf(s, "pcibus %d %s-%s\n", | ||
194 | *ordinal, obj->location, p1); | ||
195 | (*ordinal)++; | ||
196 | p1 = p2 + 1; | ||
197 | } | 360 | } |
198 | } | 361 | } |
199 | free_page((unsigned long)pg); | 362 | |
363 | if (!found_cpu || !found_mem) | ||
364 | e = -ENODATA; | ||
365 | |||
366 | err: | ||
367 | return e; | ||
200 | } | 368 | } |
201 | 369 | ||
370 | |||
202 | static int sn_topology_show(struct seq_file *s, void *d) | 371 | static int sn_topology_show(struct seq_file *s, void *d) |
203 | { | 372 | { |
204 | int sz; | 373 | int sz; |
@@ -215,7 +384,6 @@ static int sn_topology_show(struct seq_file *s, void *d) | |||
215 | struct sn_hwperf_object_info *p; | 384 | struct sn_hwperf_object_info *p; |
216 | struct sn_hwperf_object_info *obj = d; /* this object */ | 385 | struct sn_hwperf_object_info *obj = d; /* this object */ |
217 | struct sn_hwperf_object_info *objs = s->private; /* all objects */ | 386 | struct sn_hwperf_object_info *objs = s->private; /* all objects */ |
218 | int rack, bay, slot, slab; | ||
219 | u8 shubtype; | 387 | u8 shubtype; |
220 | u8 system_size; | 388 | u8 system_size; |
221 | u8 sharing_size; | 389 | u8 sharing_size; |
@@ -225,7 +393,6 @@ static int sn_topology_show(struct seq_file *s, void *d) | |||
225 | u8 region_size; | 393 | u8 region_size; |
226 | u16 nasid_mask; | 394 | u16 nasid_mask; |
227 | int nasid_msb; | 395 | int nasid_msb; |
228 | int pci_bus_ordinal = 0; | ||
229 | 396 | ||
230 | if (obj == objs) { | 397 | if (obj == objs) { |
231 | seq_printf(s, "# sn_topology version 2\n"); | 398 | seq_printf(s, "# sn_topology version 2\n"); |
@@ -253,6 +420,8 @@ static int sn_topology_show(struct seq_file *s, void *d) | |||
253 | shubtype ? "shub2" : "shub1", | 420 | shubtype ? "shub2" : "shub1", |
254 | (u64)nasid_mask << nasid_shift, nasid_msb, nasid_shift, | 421 | (u64)nasid_mask << nasid_shift, nasid_msb, nasid_shift, |
255 | system_size, sharing_size, coher, region_size); | 422 | system_size, sharing_size, coher, region_size); |
423 | |||
424 | print_pci_topology(s); | ||
256 | } | 425 | } |
257 | 426 | ||
258 | if (SN_HWPERF_FOREIGN(obj)) { | 427 | if (SN_HWPERF_FOREIGN(obj)) { |
@@ -272,11 +441,24 @@ static int sn_topology_show(struct seq_file *s, void *d) | |||
272 | if (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj)) | 441 | if (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj)) |
273 | seq_putc(s, '\n'); | 442 | seq_putc(s, '\n'); |
274 | else { | 443 | else { |
444 | cnodeid_t near_mem = -1; | ||
445 | cnodeid_t near_cpu = -1; | ||
446 | |||
275 | seq_printf(s, ", nasid 0x%x", cnodeid_to_nasid(ordinal)); | 447 | seq_printf(s, ", nasid 0x%x", cnodeid_to_nasid(ordinal)); |
276 | for (i=0; i < numionodes; i++) { | 448 | |
277 | seq_printf(s, i ? ":%d" : ", dist %d", | 449 | if (sn_hwperf_get_nearest_node_objdata(objs, sn_hwperf_obj_cnt, |
278 | node_distance(ordinal, i)); | 450 | ordinal, &near_mem, &near_cpu) == 0) { |
451 | seq_printf(s, ", near_mem_nodeid %d, near_cpu_nodeid %d", | ||
452 | near_mem, near_cpu); | ||
453 | } | ||
454 | |||
455 | if (!SN_HWPERF_IS_IONODE(obj)) { | ||
456 | for_each_online_node(i) { | ||
457 | seq_printf(s, i ? ":%d" : ", dist %d", | ||
458 | node_distance(ordinal, i)); | ||
459 | } | ||
279 | } | 460 | } |
461 | |||
280 | seq_putc(s, '\n'); | 462 | seq_putc(s, '\n'); |
281 | 463 | ||
282 | /* | 464 | /* |
@@ -300,17 +482,6 @@ static int sn_topology_show(struct seq_file *s, void *d) | |||
300 | seq_putc(s, '\n'); | 482 | seq_putc(s, '\n'); |
301 | } | 483 | } |
302 | } | 484 | } |
303 | |||
304 | /* | ||
305 | * PCI busses attached to this node, if any | ||
306 | */ | ||
307 | if (sn_hwperf_location_to_bpos(obj->location, | ||
308 | &rack, &bay, &slot, &slab)) { | ||
309 | /* export pci bus info */ | ||
310 | print_pci_topology(s, obj, &pci_bus_ordinal, | ||
311 | rack, bay, slot, slab); | ||
312 | |||
313 | } | ||
314 | } | 485 | } |
315 | 486 | ||
316 | if (obj->ports) { | 487 | if (obj->ports) { |
@@ -572,6 +743,8 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg) | |||
572 | if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) { | 743 | if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) { |
573 | memset(p, 0, a.sz); | 744 | memset(p, 0, a.sz); |
574 | for (i = 0; i < nobj; i++) { | 745 | for (i = 0; i < nobj; i++) { |
746 | if (!SN_HWPERF_IS_NODE(objs + i)) | ||
747 | continue; | ||
575 | node = sn_hwperf_obj_to_cnode(objs + i); | 748 | node = sn_hwperf_obj_to_cnode(objs + i); |
576 | for_each_online_cpu(j) { | 749 | for_each_online_cpu(j) { |
577 | if (node != cpu_to_node(j)) | 750 | if (node != cpu_to_node(j)) |
@@ -598,7 +771,7 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg) | |||
598 | 771 | ||
599 | case SN_HWPERF_GET_NODE_NASID: | 772 | case SN_HWPERF_GET_NODE_NASID: |
600 | if (a.sz != sizeof(u64) || | 773 | if (a.sz != sizeof(u64) || |
601 | (node = a.arg) < 0 || node >= numionodes) { | 774 | (node = a.arg) < 0 || !node_possible(node)) { |
602 | r = -EINVAL; | 775 | r = -EINVAL; |
603 | goto error; | 776 | goto error; |
604 | } | 777 | } |
@@ -627,6 +800,14 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg) | |||
627 | vfree(objs); | 800 | vfree(objs); |
628 | goto error; | 801 | goto error; |
629 | } | 802 | } |
803 | |||
804 | if (!SN_HWPERF_IS_NODE(objs + i) && | ||
805 | !SN_HWPERF_IS_IONODE(objs + i)) { | ||
806 | r = -ENOENT; | ||
807 | vfree(objs); | ||
808 | goto error; | ||
809 | } | ||
810 | |||
630 | *(u64 *)p = (u64)sn_hwperf_obj_to_cnode(objs + i); | 811 | *(u64 *)p = (u64)sn_hwperf_obj_to_cnode(objs + i); |
631 | vfree(objs); | 812 | vfree(objs); |
632 | } | 813 | } |
@@ -692,6 +873,7 @@ static int sn_hwperf_init(void) | |||
692 | 873 | ||
693 | /* single threaded, once-only initialization */ | 874 | /* single threaded, once-only initialization */ |
694 | down(&sn_hwperf_init_mutex); | 875 | down(&sn_hwperf_init_mutex); |
876 | |||
695 | if (sn_hwperf_salheap) { | 877 | if (sn_hwperf_salheap) { |
696 | up(&sn_hwperf_init_mutex); | 878 | up(&sn_hwperf_init_mutex); |
697 | return e; | 879 | return e; |
@@ -742,19 +924,6 @@ out: | |||
742 | sn_hwperf_salheap = NULL; | 924 | sn_hwperf_salheap = NULL; |
743 | sn_hwperf_obj_cnt = 0; | 925 | sn_hwperf_obj_cnt = 0; |
744 | } | 926 | } |
745 | |||
746 | if (!e) { | ||
747 | /* | ||
748 | * Register a dynamic misc device for ioctl. Platforms | ||
749 | * supporting hotplug will create /dev/sn_hwperf, else | ||
750 | * user can to look up the minor number in /proc/misc. | ||
751 | */ | ||
752 | if ((e = misc_register(&sn_hwperf_dev)) != 0) { | ||
753 | printk(KERN_ERR "sn_hwperf_init: misc register " | ||
754 | "for \"sn_hwperf\" failed, err %d\n", e); | ||
755 | } | ||
756 | } | ||
757 | |||
758 | up(&sn_hwperf_init_mutex); | 927 | up(&sn_hwperf_init_mutex); |
759 | return e; | 928 | return e; |
760 | } | 929 | } |
@@ -782,3 +951,41 @@ int sn_topology_release(struct inode *inode, struct file *file) | |||
782 | vfree(seq->private); | 951 | vfree(seq->private); |
783 | return seq_release(inode, file); | 952 | return seq_release(inode, file); |
784 | } | 953 | } |
954 | |||
955 | int sn_hwperf_get_nearest_node(cnodeid_t node, | ||
956 | cnodeid_t *near_mem_node, cnodeid_t *near_cpu_node) | ||
957 | { | ||
958 | int e; | ||
959 | int nobj; | ||
960 | struct sn_hwperf_object_info *objbuf; | ||
961 | |||
962 | if ((e = sn_hwperf_enum_objects(&nobj, &objbuf)) == 0) { | ||
963 | e = sn_hwperf_get_nearest_node_objdata(objbuf, nobj, | ||
964 | node, near_mem_node, near_cpu_node); | ||
965 | vfree(objbuf); | ||
966 | } | ||
967 | |||
968 | return e; | ||
969 | } | ||
970 | |||
971 | static int __devinit sn_hwperf_misc_register_init(void) | ||
972 | { | ||
973 | int e; | ||
974 | |||
975 | sn_hwperf_init(); | ||
976 | |||
977 | /* | ||
978 | * Register a dynamic misc device for hwperf ioctls. Platforms | ||
979 | * supporting hotplug will create /dev/sn_hwperf, else user | ||
980 | * can to look up the minor number in /proc/misc. | ||
981 | */ | ||
982 | if ((e = misc_register(&sn_hwperf_dev)) != 0) { | ||
983 | printk(KERN_ERR "sn_hwperf_misc_register_init: failed to " | ||
984 | "register misc device for \"%s\"\n", sn_hwperf_dev.name); | ||
985 | } | ||
986 | |||
987 | return e; | ||
988 | } | ||
989 | |||
990 | device_initcall(sn_hwperf_misc_register_init); /* after misc_init() */ | ||
991 | EXPORT_SYMBOL(sn_hwperf_get_nearest_node); | ||
diff --git a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c index 6a80fca807b9..a06719d752a0 100644 --- a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c +++ b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved. | 6 | * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved. |
7 | */ | 7 | */ |
8 | #include <linux/config.h> | 8 | #include <linux/config.h> |
9 | #include <asm/uaccess.h> | 9 | #include <asm/uaccess.h> |
@@ -15,7 +15,7 @@ | |||
15 | 15 | ||
16 | static int partition_id_show(struct seq_file *s, void *p) | 16 | static int partition_id_show(struct seq_file *s, void *p) |
17 | { | 17 | { |
18 | seq_printf(s, "%d\n", sn_local_partid()); | 18 | seq_printf(s, "%d\n", sn_partition_id); |
19 | return 0; | 19 | return 0; |
20 | } | 20 | } |
21 | 21 | ||
@@ -52,7 +52,7 @@ static int licenseID_open(struct inode *inode, struct file *file) | |||
52 | * the bridge chip. The hardware will then send an interrupt message if the | 52 | * the bridge chip. The hardware will then send an interrupt message if the |
53 | * interrupt line is active. This mimics a level sensitive interrupt. | 53 | * interrupt line is active. This mimics a level sensitive interrupt. |
54 | */ | 54 | */ |
55 | int sn_force_interrupt_flag = 1; | 55 | extern int sn_force_interrupt_flag; |
56 | 56 | ||
57 | static int sn_force_interrupt_show(struct seq_file *s, void *p) | 57 | static int sn_force_interrupt_show(struct seq_file *s, void *p) |
58 | { | 58 | { |
diff --git a/arch/ia64/sn/kernel/sn2/timer_interrupt.c b/arch/ia64/sn/kernel/sn2/timer_interrupt.c index cde7375390b0..adf5db2e2afe 100644 --- a/arch/ia64/sn/kernel/sn2/timer_interrupt.c +++ b/arch/ia64/sn/kernel/sn2/timer_interrupt.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * | 2 | * |
3 | * | 3 | * |
4 | * Copyright (c) 2003 Silicon Graphics, Inc. All Rights Reserved. | 4 | * Copyright (c) 2005 Silicon Graphics, Inc. All Rights Reserved. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of version 2 of the GNU General Public License | 7 | * under the terms of version 2 of the GNU General Public License |
@@ -50,14 +50,16 @@ void sn_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |||
50 | LED_CPU_HEARTBEAT, LED_CPU_HEARTBEAT); | 50 | LED_CPU_HEARTBEAT, LED_CPU_HEARTBEAT); |
51 | } | 51 | } |
52 | 52 | ||
53 | if (enable_shub_wars_1_1()) { | 53 | if (is_shub1()) { |
54 | /* Bugfix code for SHUB 1.1 */ | 54 | if (enable_shub_wars_1_1()) { |
55 | if (pda->pio_shub_war_cam_addr) | 55 | /* Bugfix code for SHUB 1.1 */ |
56 | *pda->pio_shub_war_cam_addr = 0x8000000000000010UL; | 56 | if (pda->pio_shub_war_cam_addr) |
57 | *pda->pio_shub_war_cam_addr = 0x8000000000000010UL; | ||
58 | } | ||
59 | if (pda->sn_lb_int_war_ticks == 0) | ||
60 | sn_lb_int_war_check(); | ||
61 | pda->sn_lb_int_war_ticks++; | ||
62 | if (pda->sn_lb_int_war_ticks >= SN_LB_INT_WAR_INTERVAL) | ||
63 | pda->sn_lb_int_war_ticks = 0; | ||
57 | } | 64 | } |
58 | if (pda->sn_lb_int_war_ticks == 0) | ||
59 | sn_lb_int_war_check(); | ||
60 | pda->sn_lb_int_war_ticks++; | ||
61 | if (pda->sn_lb_int_war_ticks >= SN_LB_INT_WAR_INTERVAL) | ||
62 | pda->sn_lb_int_war_ticks = 0; | ||
63 | } | 65 | } |
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c index 254fe15c064b..b45db5133f55 100644 --- a/arch/ia64/sn/kernel/tiocx.c +++ b/arch/ia64/sn/kernel/tiocx.c | |||
@@ -191,7 +191,7 @@ cx_device_register(nasid_t nasid, int part_num, int mfg_num, | |||
191 | { | 191 | { |
192 | struct cx_dev *cx_dev; | 192 | struct cx_dev *cx_dev; |
193 | 193 | ||
194 | cx_dev = kcalloc(1, sizeof(struct cx_dev), GFP_KERNEL); | 194 | cx_dev = kzalloc(sizeof(struct cx_dev), GFP_KERNEL); |
195 | DBG("cx_dev= 0x%p\n", cx_dev); | 195 | DBG("cx_dev= 0x%p\n", cx_dev); |
196 | if (cx_dev == NULL) | 196 | if (cx_dev == NULL) |
197 | return -ENOMEM; | 197 | return -ENOMEM; |
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c index bb1d5cf30440..ed7c21586e98 100644 --- a/arch/ia64/sn/kernel/xpc_main.c +++ b/arch/ia64/sn/kernel/xpc_main.c | |||
@@ -885,6 +885,10 @@ xpc_init(void) | |||
885 | pid_t pid; | 885 | pid_t pid; |
886 | 886 | ||
887 | 887 | ||
888 | if (!ia64_platform_is("sn2")) { | ||
889 | return -ENODEV; | ||
890 | } | ||
891 | |||
888 | /* | 892 | /* |
889 | * xpc_remote_copy_buffer is used as a temporary buffer for bte_copy'ng | 893 | * xpc_remote_copy_buffer is used as a temporary buffer for bte_copy'ng |
890 | * both a partition's reserved page and its XPC variables. Its size was | 894 | * both a partition's reserved page and its XPC variables. Its size was |
diff --git a/arch/ia64/sn/kernel/xpnet.c b/arch/ia64/sn/kernel/xpnet.c index 78c13d676fa6..e5c6d3c0a8e9 100644 --- a/arch/ia64/sn/kernel/xpnet.c +++ b/arch/ia64/sn/kernel/xpnet.c | |||
@@ -130,7 +130,7 @@ struct net_device *xpnet_device; | |||
130 | */ | 130 | */ |
131 | static u64 xpnet_broadcast_partitions; | 131 | static u64 xpnet_broadcast_partitions; |
132 | /* protect above */ | 132 | /* protect above */ |
133 | static spinlock_t xpnet_broadcast_lock = SPIN_LOCK_UNLOCKED; | 133 | static DEFINE_SPINLOCK(xpnet_broadcast_lock); |
134 | 134 | ||
135 | /* | 135 | /* |
136 | * Since the Block Transfer Engine (BTE) is being used for the transfer | 136 | * Since the Block Transfer Engine (BTE) is being used for the transfer |
@@ -636,6 +636,10 @@ xpnet_init(void) | |||
636 | int result = -ENOMEM; | 636 | int result = -ENOMEM; |
637 | 637 | ||
638 | 638 | ||
639 | if (!ia64_platform_is("sn2")) { | ||
640 | return -ENODEV; | ||
641 | } | ||
642 | |||
639 | dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME); | 643 | dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME); |
640 | 644 | ||
641 | /* | 645 | /* |
diff --git a/arch/ia64/sn/pci/Makefile b/arch/ia64/sn/pci/Makefile index 2f915bce25f9..321576b1b425 100644 --- a/arch/ia64/sn/pci/Makefile +++ b/arch/ia64/sn/pci/Makefile | |||
@@ -7,4 +7,4 @@ | |||
7 | # | 7 | # |
8 | # Makefile for the sn pci general routines. | 8 | # Makefile for the sn pci general routines. |
9 | 9 | ||
10 | obj-y := pci_dma.o tioca_provider.o pcibr/ | 10 | obj-y := pci_dma.o tioca_provider.o tioce_provider.o pcibr/ |
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c index b058dc2a0b9d..34093476e965 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved. | 6 | * Copyright (C) 2001-2005 Silicon Graphics, Inc. All rights reserved. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
@@ -215,8 +215,8 @@ void sn_dma_flush(uint64_t addr) | |||
215 | int is_tio; | 215 | int is_tio; |
216 | int wid_num; | 216 | int wid_num; |
217 | int i, j; | 217 | int i, j; |
218 | int bwin; | ||
219 | uint64_t flags; | 218 | uint64_t flags; |
219 | uint64_t itte; | ||
220 | struct hubdev_info *hubinfo; | 220 | struct hubdev_info *hubinfo; |
221 | volatile struct sn_flush_device_list *p; | 221 | volatile struct sn_flush_device_list *p; |
222 | struct sn_flush_nasid_entry *flush_nasid_list; | 222 | struct sn_flush_nasid_entry *flush_nasid_list; |
@@ -233,31 +233,36 @@ void sn_dma_flush(uint64_t addr) | |||
233 | if (!hubinfo) { | 233 | if (!hubinfo) { |
234 | BUG(); | 234 | BUG(); |
235 | } | 235 | } |
236 | is_tio = (nasid & 1); | ||
237 | if (is_tio) { | ||
238 | wid_num = TIO_SWIN_WIDGETNUM(addr); | ||
239 | bwin = TIO_BWIN_WINDOWNUM(addr); | ||
240 | } else { | ||
241 | wid_num = SWIN_WIDGETNUM(addr); | ||
242 | bwin = BWIN_WINDOWNUM(addr); | ||
243 | } | ||
244 | 236 | ||
245 | flush_nasid_list = &hubinfo->hdi_flush_nasid_list; | 237 | flush_nasid_list = &hubinfo->hdi_flush_nasid_list; |
246 | if (flush_nasid_list->widget_p == NULL) | 238 | if (flush_nasid_list->widget_p == NULL) |
247 | return; | 239 | return; |
248 | if (bwin > 0) { | ||
249 | uint64_t itte = flush_nasid_list->iio_itte[bwin]; | ||
250 | 240 | ||
251 | if (is_tio) { | 241 | is_tio = (nasid & 1); |
252 | wid_num = (itte >> TIO_ITTE_WIDGET_SHIFT) & | 242 | if (is_tio) { |
253 | TIO_ITTE_WIDGET_MASK; | 243 | int itte_index; |
254 | } else { | 244 | |
255 | wid_num = (itte >> IIO_ITTE_WIDGET_SHIFT) & | 245 | if (TIO_HWIN(addr)) |
256 | IIO_ITTE_WIDGET_MASK; | 246 | itte_index = 0; |
257 | } | 247 | else if (TIO_BWIN_WINDOWNUM(addr)) |
248 | itte_index = TIO_BWIN_WINDOWNUM(addr); | ||
249 | else | ||
250 | itte_index = -1; | ||
251 | |||
252 | if (itte_index >= 0) { | ||
253 | itte = flush_nasid_list->iio_itte[itte_index]; | ||
254 | if (! TIO_ITTE_VALID(itte)) | ||
255 | return; | ||
256 | wid_num = TIO_ITTE_WIDGET(itte); | ||
257 | } else | ||
258 | wid_num = TIO_SWIN_WIDGETNUM(addr); | ||
259 | } else { | ||
260 | if (BWIN_WINDOWNUM(addr)) { | ||
261 | itte = flush_nasid_list->iio_itte[BWIN_WINDOWNUM(addr)]; | ||
262 | wid_num = IIO_ITTE_WIDGET(itte); | ||
263 | } else | ||
264 | wid_num = SWIN_WIDGETNUM(addr); | ||
258 | } | 265 | } |
259 | if (flush_nasid_list->widget_p == NULL) | ||
260 | return; | ||
261 | if (flush_nasid_list->widget_p[wid_num] == NULL) | 266 | if (flush_nasid_list->widget_p[wid_num] == NULL) |
262 | return; | 267 | return; |
263 | p = &flush_nasid_list->widget_p[wid_num][0]; | 268 | p = &flush_nasid_list->widget_p[wid_num][0]; |
@@ -283,10 +288,16 @@ void sn_dma_flush(uint64_t addr) | |||
283 | /* | 288 | /* |
284 | * For TIOCP use the Device(x) Write Request Buffer Flush Bridge | 289 | * For TIOCP use the Device(x) Write Request Buffer Flush Bridge |
285 | * register since it ensures the data has entered the coherence | 290 | * register since it ensures the data has entered the coherence |
286 | * domain, unlike PIC | 291 | * domain, unlike PIC. |
287 | */ | 292 | */ |
288 | if (is_tio) { | 293 | if (is_tio) { |
289 | uint32_t tio_id = REMOTE_HUB_L(nasid, TIO_NODE_ID); | 294 | /* |
295 | * Note: devices behind TIOCE should never be matched in the | ||
296 | * above code, and so the following code is PIC/CP centric. | ||
297 | * If CE ever needs the sn_dma_flush mechanism, we will have | ||
298 | * to account for that here and in tioce_bus_fixup(). | ||
299 | */ | ||
300 | uint32_t tio_id = HUB_L(TIO_IOSPACE_ADDR(nasid, TIO_NODE_ID)); | ||
290 | uint32_t revnum = XWIDGET_PART_REV_NUM(tio_id); | 301 | uint32_t revnum = XWIDGET_PART_REV_NUM(tio_id); |
291 | 302 | ||
292 | /* TIOCP BRINGUP WAR (PV907516): Don't write buffer flush reg */ | 303 | /* TIOCP BRINGUP WAR (PV907516): Don't write buffer flush reg */ |
@@ -306,7 +317,8 @@ void sn_dma_flush(uint64_t addr) | |||
306 | *(volatile uint32_t *)(p->sfdl_force_int_addr) = 1; | 317 | *(volatile uint32_t *)(p->sfdl_force_int_addr) = 1; |
307 | 318 | ||
308 | /* wait for the interrupt to come back. */ | 319 | /* wait for the interrupt to come back. */ |
309 | while (*(p->sfdl_flush_addr) != 0x10f) ; | 320 | while (*(p->sfdl_flush_addr) != 0x10f) |
321 | cpu_relax(); | ||
310 | 322 | ||
311 | /* okay, everything is synched up. */ | 323 | /* okay, everything is synched up. */ |
312 | spin_unlock_irqrestore((spinlock_t *)&p->sfdl_flush_lock, flags); | 324 | spin_unlock_irqrestore((spinlock_t *)&p->sfdl_flush_lock, flags); |
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c index b95e928636a1..7b03b8084ffc 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/sn/pcibus_provider_defs.h> | 15 | #include <asm/sn/pcibus_provider_defs.h> |
16 | #include <asm/sn/pcidev.h> | 16 | #include <asm/sn/pcidev.h> |
17 | #include <asm/sn/sn_sal.h> | 17 | #include <asm/sn/sn_sal.h> |
18 | #include <asm/sn/sn2/sn_hwperf.h> | ||
18 | #include "xtalk/xwidgetdev.h" | 19 | #include "xtalk/xwidgetdev.h" |
19 | #include "xtalk/hubdev.h" | 20 | #include "xtalk/hubdev.h" |
20 | 21 | ||
@@ -60,7 +61,7 @@ static int sal_pcibr_error_interrupt(struct pcibus_info *soft) | |||
60 | ret_stuff.status = 0; | 61 | ret_stuff.status = 0; |
61 | ret_stuff.v0 = 0; | 62 | ret_stuff.v0 = 0; |
62 | 63 | ||
63 | segment = 0; | 64 | segment = soft->pbi_buscommon.bs_persist_segment; |
64 | busnum = soft->pbi_buscommon.bs_persist_busnum; | 65 | busnum = soft->pbi_buscommon.bs_persist_busnum; |
65 | SAL_CALL_NOLOCK(ret_stuff, | 66 | SAL_CALL_NOLOCK(ret_stuff, |
66 | (u64) SN_SAL_IOIF_ERROR_INTERRUPT, | 67 | (u64) SN_SAL_IOIF_ERROR_INTERRUPT, |
@@ -88,6 +89,7 @@ void * | |||
88 | pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller) | 89 | pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller) |
89 | { | 90 | { |
90 | int nasid, cnode, j; | 91 | int nasid, cnode, j; |
92 | cnodeid_t near_cnode; | ||
91 | struct hubdev_info *hubdev_info; | 93 | struct hubdev_info *hubdev_info; |
92 | struct pcibus_info *soft; | 94 | struct pcibus_info *soft; |
93 | struct sn_flush_device_list *sn_flush_device_list; | 95 | struct sn_flush_device_list *sn_flush_device_list; |
@@ -115,7 +117,7 @@ pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont | |||
115 | /* | 117 | /* |
116 | * register the bridge's error interrupt handler | 118 | * register the bridge's error interrupt handler |
117 | */ | 119 | */ |
118 | if (request_irq(SGI_PCIBR_ERROR, (void *)pcibr_error_intr_handler, | 120 | if (request_irq(SGI_PCIASIC_ERROR, (void *)pcibr_error_intr_handler, |
119 | SA_SHIRQ, "PCIBR error", (void *)(soft))) { | 121 | SA_SHIRQ, "PCIBR error", (void *)(soft))) { |
120 | printk(KERN_WARNING | 122 | printk(KERN_WARNING |
121 | "pcibr cannot allocate interrupt for error handler\n"); | 123 | "pcibr cannot allocate interrupt for error handler\n"); |
@@ -142,9 +144,12 @@ pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont | |||
142 | j++, sn_flush_device_list++) { | 144 | j++, sn_flush_device_list++) { |
143 | if (sn_flush_device_list->sfdl_slot == -1) | 145 | if (sn_flush_device_list->sfdl_slot == -1) |
144 | continue; | 146 | continue; |
145 | if (sn_flush_device_list-> | 147 | if ((sn_flush_device_list-> |
146 | sfdl_persistent_busnum == | 148 | sfdl_persistent_segment == |
147 | soft->pbi_buscommon.bs_persist_busnum) | 149 | soft->pbi_buscommon.bs_persist_segment) && |
150 | (sn_flush_device_list-> | ||
151 | sfdl_persistent_busnum == | ||
152 | soft->pbi_buscommon.bs_persist_busnum)) | ||
148 | sn_flush_device_list->sfdl_pcibus_info = | 153 | sn_flush_device_list->sfdl_pcibus_info = |
149 | soft; | 154 | soft; |
150 | } | 155 | } |
@@ -158,12 +163,18 @@ pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont | |||
158 | memset(soft->pbi_int_ate_resource.ate, 0, | 163 | memset(soft->pbi_int_ate_resource.ate, 0, |
159 | (soft->pbi_int_ate_size * sizeof(uint64_t))); | 164 | (soft->pbi_int_ate_size * sizeof(uint64_t))); |
160 | 165 | ||
161 | if (prom_bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) | 166 | if (prom_bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) { |
162 | /* | 167 | /* TIO PCI Bridge: find nearest node with CPUs */ |
163 | * TIO PCI Bridge with no closest node information. | 168 | int e = sn_hwperf_get_nearest_node(cnode, NULL, &near_cnode); |
164 | * FIXME: Find another way to determine the closest node | 169 | |
165 | */ | 170 | if (e < 0) { |
166 | controller->node = -1; | 171 | near_cnode = (cnodeid_t)-1; /* use any node */ |
172 | printk(KERN_WARNING "pcibr_bus_fixup: failed to find " | ||
173 | "near node with CPUs to TIO node %d, err=%d\n", | ||
174 | cnode, e); | ||
175 | } | ||
176 | controller->node = near_cnode; | ||
177 | } | ||
167 | else | 178 | else |
168 | controller->node = cnode; | 179 | controller->node = cnode; |
169 | return soft; | 180 | return soft; |
@@ -175,6 +186,9 @@ void pcibr_force_interrupt(struct sn_irq_info *sn_irq_info) | |||
175 | struct pcibus_info *pcibus_info; | 186 | struct pcibus_info *pcibus_info; |
176 | int bit = sn_irq_info->irq_int_bit; | 187 | int bit = sn_irq_info->irq_int_bit; |
177 | 188 | ||
189 | if (! sn_irq_info->irq_bridge) | ||
190 | return; | ||
191 | |||
178 | pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; | 192 | pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; |
179 | if (pcidev_info) { | 193 | if (pcidev_info) { |
180 | pcibus_info = | 194 | pcibus_info = |
@@ -184,7 +198,7 @@ void pcibr_force_interrupt(struct sn_irq_info *sn_irq_info) | |||
184 | } | 198 | } |
185 | } | 199 | } |
186 | 200 | ||
187 | void pcibr_change_devices_irq(struct sn_irq_info *sn_irq_info) | 201 | void pcibr_target_interrupt(struct sn_irq_info *sn_irq_info) |
188 | { | 202 | { |
189 | struct pcidev_info *pcidev_info; | 203 | struct pcidev_info *pcidev_info; |
190 | struct pcibus_info *pcibus_info; | 204 | struct pcibus_info *pcibus_info; |
@@ -219,6 +233,8 @@ struct sn_pcibus_provider pcibr_provider = { | |||
219 | .dma_map_consistent = pcibr_dma_map_consistent, | 233 | .dma_map_consistent = pcibr_dma_map_consistent, |
220 | .dma_unmap = pcibr_dma_unmap, | 234 | .dma_unmap = pcibr_dma_unmap, |
221 | .bus_fixup = pcibr_bus_fixup, | 235 | .bus_fixup = pcibr_bus_fixup, |
236 | .force_interrupt = pcibr_force_interrupt, | ||
237 | .target_interrupt = pcibr_target_interrupt | ||
222 | }; | 238 | }; |
223 | 239 | ||
224 | int | 240 | int |
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c index 5d76a7581465..19bced34d5f1 100644 --- a/arch/ia64/sn/pci/tioca_provider.c +++ b/arch/ia64/sn/pci/tioca_provider.c | |||
@@ -148,7 +148,7 @@ tioca_gart_init(struct tioca_kernel *tioca_kern) | |||
148 | tioca_kern->ca_pcigart_entries = | 148 | tioca_kern->ca_pcigart_entries = |
149 | tioca_kern->ca_pciap_size / tioca_kern->ca_ap_pagesize; | 149 | tioca_kern->ca_pciap_size / tioca_kern->ca_ap_pagesize; |
150 | tioca_kern->ca_pcigart_pagemap = | 150 | tioca_kern->ca_pcigart_pagemap = |
151 | kcalloc(1, tioca_kern->ca_pcigart_entries / 8, GFP_KERNEL); | 151 | kzalloc(tioca_kern->ca_pcigart_entries / 8, GFP_KERNEL); |
152 | if (!tioca_kern->ca_pcigart_pagemap) { | 152 | if (!tioca_kern->ca_pcigart_pagemap) { |
153 | free_pages((unsigned long)tioca_kern->ca_gart, | 153 | free_pages((unsigned long)tioca_kern->ca_gart, |
154 | get_order(tioca_kern->ca_gart_size)); | 154 | get_order(tioca_kern->ca_gart_size)); |
@@ -392,7 +392,7 @@ tioca_dma_mapped(struct pci_dev *pdev, uint64_t paddr, size_t req_size) | |||
392 | * allocate a map struct | 392 | * allocate a map struct |
393 | */ | 393 | */ |
394 | 394 | ||
395 | ca_dmamap = kcalloc(1, sizeof(struct tioca_dmamap), GFP_ATOMIC); | 395 | ca_dmamap = kzalloc(sizeof(struct tioca_dmamap), GFP_ATOMIC); |
396 | if (!ca_dmamap) | 396 | if (!ca_dmamap) |
397 | goto map_return; | 397 | goto map_return; |
398 | 398 | ||
@@ -559,7 +559,7 @@ tioca_error_intr_handler(int irq, void *arg, struct pt_regs *pt) | |||
559 | ret_stuff.status = 0; | 559 | ret_stuff.status = 0; |
560 | ret_stuff.v0 = 0; | 560 | ret_stuff.v0 = 0; |
561 | 561 | ||
562 | segment = 0; | 562 | segment = soft->ca_common.bs_persist_segment; |
563 | busnum = soft->ca_common.bs_persist_busnum; | 563 | busnum = soft->ca_common.bs_persist_busnum; |
564 | 564 | ||
565 | SAL_CALL_NOLOCK(ret_stuff, | 565 | SAL_CALL_NOLOCK(ret_stuff, |
@@ -600,7 +600,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont | |||
600 | * Allocate kernel bus soft and copy from prom. | 600 | * Allocate kernel bus soft and copy from prom. |
601 | */ | 601 | */ |
602 | 602 | ||
603 | tioca_common = kcalloc(1, sizeof(struct tioca_common), GFP_KERNEL); | 603 | tioca_common = kzalloc(sizeof(struct tioca_common), GFP_KERNEL); |
604 | if (!tioca_common) | 604 | if (!tioca_common) |
605 | return NULL; | 605 | return NULL; |
606 | 606 | ||
@@ -609,7 +609,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont | |||
609 | 609 | ||
610 | /* init kernel-private area */ | 610 | /* init kernel-private area */ |
611 | 611 | ||
612 | tioca_kern = kcalloc(1, sizeof(struct tioca_kernel), GFP_KERNEL); | 612 | tioca_kern = kzalloc(sizeof(struct tioca_kernel), GFP_KERNEL); |
613 | if (!tioca_kern) { | 613 | if (!tioca_kern) { |
614 | kfree(tioca_common); | 614 | kfree(tioca_common); |
615 | return NULL; | 615 | return NULL; |
@@ -622,7 +622,8 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont | |||
622 | nasid_to_cnodeid(tioca_common->ca_closest_nasid); | 622 | nasid_to_cnodeid(tioca_common->ca_closest_nasid); |
623 | tioca_common->ca_kernel_private = (uint64_t) tioca_kern; | 623 | tioca_common->ca_kernel_private = (uint64_t) tioca_kern; |
624 | 624 | ||
625 | bus = pci_find_bus(0, tioca_common->ca_common.bs_persist_busnum); | 625 | bus = pci_find_bus(tioca_common->ca_common.bs_persist_segment, |
626 | tioca_common->ca_common.bs_persist_busnum); | ||
626 | BUG_ON(!bus); | 627 | BUG_ON(!bus); |
627 | tioca_kern->ca_devices = &bus->devices; | 628 | tioca_kern->ca_devices = &bus->devices; |
628 | 629 | ||
@@ -656,6 +657,8 @@ static struct sn_pcibus_provider tioca_pci_interfaces = { | |||
656 | .dma_map_consistent = tioca_dma_map, | 657 | .dma_map_consistent = tioca_dma_map, |
657 | .dma_unmap = tioca_dma_unmap, | 658 | .dma_unmap = tioca_dma_unmap, |
658 | .bus_fixup = tioca_bus_fixup, | 659 | .bus_fixup = tioca_bus_fixup, |
660 | .force_interrupt = NULL, | ||
661 | .target_interrupt = NULL | ||
659 | }; | 662 | }; |
660 | 663 | ||
661 | /** | 664 | /** |
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c new file mode 100644 index 000000000000..8e75db2b825d --- /dev/null +++ b/arch/ia64/sn/pci/tioce_provider.c | |||
@@ -0,0 +1,771 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2003-2005 Silicon Graphics, Inc. All Rights Reserved. | ||
7 | */ | ||
8 | |||
9 | #include <linux/types.h> | ||
10 | #include <linux/interrupt.h> | ||
11 | #include <linux/pci.h> | ||
12 | #include <asm/sn/sn_sal.h> | ||
13 | #include <asm/sn/addrs.h> | ||
14 | #include <asm/sn/pcidev.h> | ||
15 | #include <asm/sn/pcibus_provider_defs.h> | ||
16 | #include <asm/sn/tioce_provider.h> | ||
17 | |||
18 | /** | ||
19 | * Bus address ranges for the 5 flavors of TIOCE DMA | ||
20 | */ | ||
21 | |||
22 | #define TIOCE_D64_MIN 0x8000000000000000UL | ||
23 | #define TIOCE_D64_MAX 0xffffffffffffffffUL | ||
24 | #define TIOCE_D64_ADDR(a) ((a) >= TIOCE_D64_MIN) | ||
25 | |||
26 | #define TIOCE_D32_MIN 0x0000000080000000UL | ||
27 | #define TIOCE_D32_MAX 0x00000000ffffffffUL | ||
28 | #define TIOCE_D32_ADDR(a) ((a) >= TIOCE_D32_MIN && (a) <= TIOCE_D32_MAX) | ||
29 | |||
30 | #define TIOCE_M32_MIN 0x0000000000000000UL | ||
31 | #define TIOCE_M32_MAX 0x000000007fffffffUL | ||
32 | #define TIOCE_M32_ADDR(a) ((a) >= TIOCE_M32_MIN && (a) <= TIOCE_M32_MAX) | ||
33 | |||
34 | #define TIOCE_M40_MIN 0x0000004000000000UL | ||
35 | #define TIOCE_M40_MAX 0x0000007fffffffffUL | ||
36 | #define TIOCE_M40_ADDR(a) ((a) >= TIOCE_M40_MIN && (a) <= TIOCE_M40_MAX) | ||
37 | |||
38 | #define TIOCE_M40S_MIN 0x0000008000000000UL | ||
39 | #define TIOCE_M40S_MAX 0x000000ffffffffffUL | ||
40 | #define TIOCE_M40S_ADDR(a) ((a) >= TIOCE_M40S_MIN && (a) <= TIOCE_M40S_MAX) | ||
41 | |||
42 | /* | ||
43 | * ATE manipulation macros. | ||
44 | */ | ||
45 | |||
46 | #define ATE_PAGESHIFT(ps) (__ffs(ps)) | ||
47 | #define ATE_PAGEMASK(ps) ((ps)-1) | ||
48 | |||
49 | #define ATE_PAGE(x, ps) ((x) >> ATE_PAGESHIFT(ps)) | ||
50 | #define ATE_NPAGES(start, len, pagesize) \ | ||
51 | (ATE_PAGE((start)+(len)-1, pagesize) - ATE_PAGE(start, pagesize) + 1) | ||
52 | |||
53 | #define ATE_VALID(ate) ((ate) & (1UL << 63)) | ||
54 | #define ATE_MAKE(addr, ps) (((addr) & ~ATE_PAGEMASK(ps)) | (1UL << 63)) | ||
55 | |||
56 | /* | ||
57 | * Flavors of ate-based mapping supported by tioce_alloc_map() | ||
58 | */ | ||
59 | |||
60 | #define TIOCE_ATE_M32 1 | ||
61 | #define TIOCE_ATE_M40 2 | ||
62 | #define TIOCE_ATE_M40S 3 | ||
63 | |||
64 | #define KB(x) ((x) << 10) | ||
65 | #define MB(x) ((x) << 20) | ||
66 | #define GB(x) ((x) << 30) | ||
67 | |||
68 | /** | ||
69 | * tioce_dma_d64 - create a DMA mapping using 64-bit direct mode | ||
70 | * @ct_addr: system coretalk address | ||
71 | * | ||
72 | * Map @ct_addr into 64-bit CE bus space. No device context is necessary | ||
73 | * and no CE mapping are consumed. | ||
74 | * | ||
75 | * Bits 53:0 come from the coretalk address. The remaining bits are set as | ||
76 | * follows: | ||
77 | * | ||
78 | * 63 - must be 1 to indicate d64 mode to CE hardware | ||
79 | * 62 - barrier bit ... controlled with tioce_dma_barrier() | ||
80 | * 61 - 0 since this is not an MSI transaction | ||
81 | * 60:54 - reserved, MBZ | ||
82 | */ | ||
83 | static uint64_t | ||
84 | tioce_dma_d64(unsigned long ct_addr) | ||
85 | { | ||
86 | uint64_t bus_addr; | ||
87 | |||
88 | bus_addr = ct_addr | (1UL << 63); | ||
89 | |||
90 | return bus_addr; | ||
91 | } | ||
92 | |||
93 | /** | ||
94 | * pcidev_to_tioce - return misc ce related pointers given a pci_dev | ||
95 | * @pci_dev: pci device context | ||
96 | * @base: ptr to store struct tioce_mmr * for the CE holding this device | ||
97 | * @kernel: ptr to store struct tioce_kernel * for the CE holding this device | ||
98 | * @port: ptr to store the CE port number that this device is on | ||
99 | * | ||
100 | * Return pointers to various CE-related structures for the CE upstream of | ||
101 | * @pci_dev. | ||
102 | */ | ||
103 | static inline void | ||
104 | pcidev_to_tioce(struct pci_dev *pdev, struct tioce **base, | ||
105 | struct tioce_kernel **kernel, int *port) | ||
106 | { | ||
107 | struct pcidev_info *pcidev_info; | ||
108 | struct tioce_common *ce_common; | ||
109 | struct tioce_kernel *ce_kernel; | ||
110 | |||
111 | pcidev_info = SN_PCIDEV_INFO(pdev); | ||
112 | ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info; | ||
113 | ce_kernel = (struct tioce_kernel *)ce_common->ce_kernel_private; | ||
114 | |||
115 | if (base) | ||
116 | *base = (struct tioce *)ce_common->ce_pcibus.bs_base; | ||
117 | if (kernel) | ||
118 | *kernel = ce_kernel; | ||
119 | |||
120 | /* | ||
121 | * we use port as a zero-based value internally, even though the | ||
122 | * documentation is 1-based. | ||
123 | */ | ||
124 | if (port) | ||
125 | *port = | ||
126 | (pdev->bus->number < ce_kernel->ce_port1_secondary) ? 0 : 1; | ||
127 | } | ||
128 | |||
129 | /** | ||
130 | * tioce_alloc_map - Given a coretalk address, map it to pcie bus address | ||
131 | * space using one of the various ATE-based address modes. | ||
132 | * @ce_kern: tioce context | ||
133 | * @type: map mode to use | ||
134 | * @port: 0-based port that the requesting device is downstream of | ||
135 | * @ct_addr: the coretalk address to map | ||
136 | * @len: number of bytes to map | ||
137 | * | ||
138 | * Given the addressing type, set up various paramaters that define the | ||
139 | * ATE pool to use. Search for a contiguous block of entries to cover the | ||
140 | * length, and if enough resources exist, fill in the ATE's and construct a | ||
141 | * tioce_dmamap struct to track the mapping. | ||
142 | */ | ||
143 | static uint64_t | ||
144 | tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, | ||
145 | uint64_t ct_addr, int len) | ||
146 | { | ||
147 | int i; | ||
148 | int j; | ||
149 | int first; | ||
150 | int last; | ||
151 | int entries; | ||
152 | int nates; | ||
153 | int pagesize; | ||
154 | uint64_t *ate_shadow; | ||
155 | uint64_t *ate_reg; | ||
156 | uint64_t addr; | ||
157 | struct tioce *ce_mmr; | ||
158 | uint64_t bus_base; | ||
159 | struct tioce_dmamap *map; | ||
160 | |||
161 | ce_mmr = (struct tioce *)ce_kern->ce_common->ce_pcibus.bs_base; | ||
162 | |||
163 | switch (type) { | ||
164 | case TIOCE_ATE_M32: | ||
165 | /* | ||
166 | * The first 64 entries of the ate3240 pool are dedicated to | ||
167 | * super-page (TIOCE_ATE_M40S) mode. | ||
168 | */ | ||
169 | first = 64; | ||
170 | entries = TIOCE_NUM_M3240_ATES - 64; | ||
171 | ate_shadow = ce_kern->ce_ate3240_shadow; | ||
172 | ate_reg = ce_mmr->ce_ure_ate3240; | ||
173 | pagesize = ce_kern->ce_ate3240_pagesize; | ||
174 | bus_base = TIOCE_M32_MIN; | ||
175 | break; | ||
176 | case TIOCE_ATE_M40: | ||
177 | first = 0; | ||
178 | entries = TIOCE_NUM_M40_ATES; | ||
179 | ate_shadow = ce_kern->ce_ate40_shadow; | ||
180 | ate_reg = ce_mmr->ce_ure_ate40; | ||
181 | pagesize = MB(64); | ||
182 | bus_base = TIOCE_M40_MIN; | ||
183 | break; | ||
184 | case TIOCE_ATE_M40S: | ||
185 | /* | ||
186 | * ate3240 entries 0-31 are dedicated to port1 super-page | ||
187 | * mappings. ate3240 entries 32-63 are dedicated to port2. | ||
188 | */ | ||
189 | first = port * 32; | ||
190 | entries = 32; | ||
191 | ate_shadow = ce_kern->ce_ate3240_shadow; | ||
192 | ate_reg = ce_mmr->ce_ure_ate3240; | ||
193 | pagesize = GB(16); | ||
194 | bus_base = TIOCE_M40S_MIN; | ||
195 | break; | ||
196 | default: | ||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | nates = ATE_NPAGES(ct_addr, len, pagesize); | ||
201 | if (nates > entries) | ||
202 | return 0; | ||
203 | |||
204 | last = first + entries - nates; | ||
205 | for (i = first; i <= last; i++) { | ||
206 | if (ATE_VALID(ate_shadow[i])) | ||
207 | continue; | ||
208 | |||
209 | for (j = i; j < i + nates; j++) | ||
210 | if (ATE_VALID(ate_shadow[j])) | ||
211 | break; | ||
212 | |||
213 | if (j >= i + nates) | ||
214 | break; | ||
215 | } | ||
216 | |||
217 | if (i > last) | ||
218 | return 0; | ||
219 | |||
220 | map = kcalloc(1, sizeof(struct tioce_dmamap), GFP_ATOMIC); | ||
221 | if (!map) | ||
222 | return 0; | ||
223 | |||
224 | addr = ct_addr; | ||
225 | for (j = 0; j < nates; j++) { | ||
226 | uint64_t ate; | ||
227 | |||
228 | ate = ATE_MAKE(addr, pagesize); | ||
229 | ate_shadow[i + j] = ate; | ||
230 | ate_reg[i + j] = ate; | ||
231 | addr += pagesize; | ||
232 | } | ||
233 | |||
234 | map->refcnt = 1; | ||
235 | map->nbytes = nates * pagesize; | ||
236 | map->ct_start = ct_addr & ~ATE_PAGEMASK(pagesize); | ||
237 | map->pci_start = bus_base + (i * pagesize); | ||
238 | map->ate_hw = &ate_reg[i]; | ||
239 | map->ate_shadow = &ate_shadow[i]; | ||
240 | map->ate_count = nates; | ||
241 | |||
242 | list_add(&map->ce_dmamap_list, &ce_kern->ce_dmamap_list); | ||
243 | |||
244 | return (map->pci_start + (ct_addr - map->ct_start)); | ||
245 | } | ||
246 | |||
247 | /** | ||
248 | * tioce_dma_d32 - create a DMA mapping using 32-bit direct mode | ||
249 | * @pdev: linux pci_dev representing the function | ||
250 | * @paddr: system physical address | ||
251 | * | ||
252 | * Map @paddr into 32-bit bus space of the CE associated with @pcidev_info. | ||
253 | */ | ||
254 | static uint64_t | ||
255 | tioce_dma_d32(struct pci_dev *pdev, uint64_t ct_addr) | ||
256 | { | ||
257 | int dma_ok; | ||
258 | int port; | ||
259 | struct tioce *ce_mmr; | ||
260 | struct tioce_kernel *ce_kern; | ||
261 | uint64_t ct_upper; | ||
262 | uint64_t ct_lower; | ||
263 | dma_addr_t bus_addr; | ||
264 | |||
265 | ct_upper = ct_addr & ~0x3fffffffUL; | ||
266 | ct_lower = ct_addr & 0x3fffffffUL; | ||
267 | |||
268 | pcidev_to_tioce(pdev, &ce_mmr, &ce_kern, &port); | ||
269 | |||
270 | if (ce_kern->ce_port[port].dirmap_refcnt == 0) { | ||
271 | volatile uint64_t tmp; | ||
272 | |||
273 | ce_kern->ce_port[port].dirmap_shadow = ct_upper; | ||
274 | ce_mmr->ce_ure_dir_map[port] = ct_upper; | ||
275 | tmp = ce_mmr->ce_ure_dir_map[port]; | ||
276 | dma_ok = 1; | ||
277 | } else | ||
278 | dma_ok = (ce_kern->ce_port[port].dirmap_shadow == ct_upper); | ||
279 | |||
280 | if (dma_ok) { | ||
281 | ce_kern->ce_port[port].dirmap_refcnt++; | ||
282 | bus_addr = TIOCE_D32_MIN + ct_lower; | ||
283 | } else | ||
284 | bus_addr = 0; | ||
285 | |||
286 | return bus_addr; | ||
287 | } | ||
288 | |||
289 | /** | ||
290 | * tioce_dma_barrier - swizzle a TIOCE bus address to include or exclude | ||
291 | * the barrier bit. | ||
292 | * @bus_addr: bus address to swizzle | ||
293 | * | ||
294 | * Given a TIOCE bus address, set the appropriate bit to indicate barrier | ||
295 | * attributes. | ||
296 | */ | ||
297 | static uint64_t | ||
298 | tioce_dma_barrier(uint64_t bus_addr, int on) | ||
299 | { | ||
300 | uint64_t barrier_bit; | ||
301 | |||
302 | /* barrier not supported in M40/M40S mode */ | ||
303 | if (TIOCE_M40_ADDR(bus_addr) || TIOCE_M40S_ADDR(bus_addr)) | ||
304 | return bus_addr; | ||
305 | |||
306 | if (TIOCE_D64_ADDR(bus_addr)) | ||
307 | barrier_bit = (1UL << 62); | ||
308 | else /* must be m32 or d32 */ | ||
309 | barrier_bit = (1UL << 30); | ||
310 | |||
311 | return (on) ? (bus_addr | barrier_bit) : (bus_addr & ~barrier_bit); | ||
312 | } | ||
313 | |||
314 | /** | ||
315 | * tioce_dma_unmap - release CE mapping resources | ||
316 | * @pdev: linux pci_dev representing the function | ||
317 | * @bus_addr: bus address returned by an earlier tioce_dma_map | ||
318 | * @dir: mapping direction (unused) | ||
319 | * | ||
320 | * Locate mapping resources associated with @bus_addr and release them. | ||
321 | * For mappings created using the direct modes there are no resources | ||
322 | * to release. | ||
323 | */ | ||
324 | void | ||
325 | tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) | ||
326 | { | ||
327 | int i; | ||
328 | int port; | ||
329 | struct tioce_kernel *ce_kern; | ||
330 | struct tioce *ce_mmr; | ||
331 | unsigned long flags; | ||
332 | |||
333 | bus_addr = tioce_dma_barrier(bus_addr, 0); | ||
334 | pcidev_to_tioce(pdev, &ce_mmr, &ce_kern, &port); | ||
335 | |||
336 | /* nothing to do for D64 */ | ||
337 | |||
338 | if (TIOCE_D64_ADDR(bus_addr)) | ||
339 | return; | ||
340 | |||
341 | spin_lock_irqsave(&ce_kern->ce_lock, flags); | ||
342 | |||
343 | if (TIOCE_D32_ADDR(bus_addr)) { | ||
344 | if (--ce_kern->ce_port[port].dirmap_refcnt == 0) { | ||
345 | ce_kern->ce_port[port].dirmap_shadow = 0; | ||
346 | ce_mmr->ce_ure_dir_map[port] = 0; | ||
347 | } | ||
348 | } else { | ||
349 | struct tioce_dmamap *map; | ||
350 | |||
351 | list_for_each_entry(map, &ce_kern->ce_dmamap_list, | ||
352 | ce_dmamap_list) { | ||
353 | uint64_t last; | ||
354 | |||
355 | last = map->pci_start + map->nbytes - 1; | ||
356 | if (bus_addr >= map->pci_start && bus_addr <= last) | ||
357 | break; | ||
358 | } | ||
359 | |||
360 | if (&map->ce_dmamap_list == &ce_kern->ce_dmamap_list) { | ||
361 | printk(KERN_WARNING | ||
362 | "%s: %s - no map found for bus_addr 0x%lx\n", | ||
363 | __FUNCTION__, pci_name(pdev), bus_addr); | ||
364 | } else if (--map->refcnt == 0) { | ||
365 | for (i = 0; i < map->ate_count; i++) { | ||
366 | map->ate_shadow[i] = 0; | ||
367 | map->ate_hw[i] = 0; | ||
368 | } | ||
369 | |||
370 | list_del(&map->ce_dmamap_list); | ||
371 | kfree(map); | ||
372 | } | ||
373 | } | ||
374 | |||
375 | spin_unlock_irqrestore(&ce_kern->ce_lock, flags); | ||
376 | } | ||
377 | |||
378 | /** | ||
379 | * tioce_do_dma_map - map pages for PCI DMA | ||
380 | * @pdev: linux pci_dev representing the function | ||
381 | * @paddr: host physical address to map | ||
382 | * @byte_count: bytes to map | ||
383 | * | ||
384 | * This is the main wrapper for mapping host physical pages to CE PCI space. | ||
385 | * The mapping mode used is based on the device's dma_mask. | ||
386 | */ | ||
387 | static uint64_t | ||
388 | tioce_do_dma_map(struct pci_dev *pdev, uint64_t paddr, size_t byte_count, | ||
389 | int barrier) | ||
390 | { | ||
391 | unsigned long flags; | ||
392 | uint64_t ct_addr; | ||
393 | uint64_t mapaddr = 0; | ||
394 | struct tioce_kernel *ce_kern; | ||
395 | struct tioce_dmamap *map; | ||
396 | int port; | ||
397 | uint64_t dma_mask; | ||
398 | |||
399 | dma_mask = (barrier) ? pdev->dev.coherent_dma_mask : pdev->dma_mask; | ||
400 | |||
401 | /* cards must be able to address at least 31 bits */ | ||
402 | if (dma_mask < 0x7fffffffUL) | ||
403 | return 0; | ||
404 | |||
405 | ct_addr = PHYS_TO_TIODMA(paddr); | ||
406 | |||
407 | /* | ||
408 | * If the device can generate 64 bit addresses, create a D64 map. | ||
409 | * Since this should never fail, bypass the rest of the checks. | ||
410 | */ | ||
411 | if (dma_mask == ~0UL) { | ||
412 | mapaddr = tioce_dma_d64(ct_addr); | ||
413 | goto dma_map_done; | ||
414 | } | ||
415 | |||
416 | pcidev_to_tioce(pdev, NULL, &ce_kern, &port); | ||
417 | |||
418 | spin_lock_irqsave(&ce_kern->ce_lock, flags); | ||
419 | |||
420 | /* | ||
421 | * D64 didn't work ... See if we have an existing map that covers | ||
422 | * this address range. Must account for devices dma_mask here since | ||
423 | * an existing map might have been done in a mode using more pci | ||
424 | * address bits than this device can support. | ||
425 | */ | ||
426 | list_for_each_entry(map, &ce_kern->ce_dmamap_list, ce_dmamap_list) { | ||
427 | uint64_t last; | ||
428 | |||
429 | last = map->ct_start + map->nbytes - 1; | ||
430 | if (ct_addr >= map->ct_start && | ||
431 | ct_addr + byte_count - 1 <= last && | ||
432 | map->pci_start <= dma_mask) { | ||
433 | map->refcnt++; | ||
434 | mapaddr = map->pci_start + (ct_addr - map->ct_start); | ||
435 | break; | ||
436 | } | ||
437 | } | ||
438 | |||
439 | /* | ||
440 | * If we don't have a map yet, and the card can generate 40 | ||
441 | * bit addresses, try the M40/M40S modes. Note these modes do not | ||
442 | * support a barrier bit, so if we need a consistent map these | ||
443 | * won't work. | ||
444 | */ | ||
445 | if (!mapaddr && !barrier && dma_mask >= 0xffffffffffUL) { | ||
446 | /* | ||
447 | * We have two options for 40-bit mappings: 16GB "super" ATE's | ||
448 | * and 64MB "regular" ATE's. We'll try both if needed for a | ||
449 | * given mapping but which one we try first depends on the | ||
450 | * size. For requests >64MB, prefer to use a super page with | ||
451 | * regular as the fallback. Otherwise, try in the reverse order. | ||
452 | */ | ||
453 | |||
454 | if (byte_count > MB(64)) { | ||
455 | mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40S, | ||
456 | port, ct_addr, byte_count); | ||
457 | if (!mapaddr) | ||
458 | mapaddr = | ||
459 | tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1, | ||
460 | ct_addr, byte_count); | ||
461 | } else { | ||
462 | mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1, | ||
463 | ct_addr, byte_count); | ||
464 | if (!mapaddr) | ||
465 | mapaddr = | ||
466 | tioce_alloc_map(ce_kern, TIOCE_ATE_M40S, | ||
467 | port, ct_addr, byte_count); | ||
468 | } | ||
469 | } | ||
470 | |||
471 | /* | ||
472 | * 32-bit direct is the next mode to try | ||
473 | */ | ||
474 | if (!mapaddr && dma_mask >= 0xffffffffUL) | ||
475 | mapaddr = tioce_dma_d32(pdev, ct_addr); | ||
476 | |||
477 | /* | ||
478 | * Last resort, try 32-bit ATE-based map. | ||
479 | */ | ||
480 | if (!mapaddr) | ||
481 | mapaddr = | ||
482 | tioce_alloc_map(ce_kern, TIOCE_ATE_M32, -1, ct_addr, | ||
483 | byte_count); | ||
484 | |||
485 | spin_unlock_irqrestore(&ce_kern->ce_lock, flags); | ||
486 | |||
487 | dma_map_done: | ||
488 | if (mapaddr & barrier) | ||
489 | mapaddr = tioce_dma_barrier(mapaddr, 1); | ||
490 | |||
491 | return mapaddr; | ||
492 | } | ||
493 | |||
494 | /** | ||
495 | * tioce_dma - standard pci dma map interface | ||
496 | * @pdev: pci device requesting the map | ||
497 | * @paddr: system physical address to map into pci space | ||
498 | * @byte_count: # bytes to map | ||
499 | * | ||
500 | * Simply call tioce_do_dma_map() to create a map with the barrier bit clear | ||
501 | * in the address. | ||
502 | */ | ||
503 | static uint64_t | ||
504 | tioce_dma(struct pci_dev *pdev, uint64_t paddr, size_t byte_count) | ||
505 | { | ||
506 | return tioce_do_dma_map(pdev, paddr, byte_count, 0); | ||
507 | } | ||
508 | |||
509 | /** | ||
510 | * tioce_dma_consistent - consistent pci dma map interface | ||
511 | * @pdev: pci device requesting the map | ||
512 | * @paddr: system physical address to map into pci space | ||
513 | * @byte_count: # bytes to map | ||
514 | * | ||
515 | * Simply call tioce_do_dma_map() to create a map with the barrier bit set | ||
516 | * in the address. | ||
517 | */ static uint64_t | ||
518 | tioce_dma_consistent(struct pci_dev *pdev, uint64_t paddr, size_t byte_count) | ||
519 | { | ||
520 | return tioce_do_dma_map(pdev, paddr, byte_count, 1); | ||
521 | } | ||
522 | |||
523 | /** | ||
524 | * tioce_error_intr_handler - SGI TIO CE error interrupt handler | ||
525 | * @irq: unused | ||
526 | * @arg: pointer to tioce_common struct for the given CE | ||
527 | * @pt: unused | ||
528 | * | ||
529 | * Handle a CE error interrupt. Simply a wrapper around a SAL call which | ||
530 | * defers processing to the SGI prom. | ||
531 | */ static irqreturn_t | ||
532 | tioce_error_intr_handler(int irq, void *arg, struct pt_regs *pt) | ||
533 | { | ||
534 | struct tioce_common *soft = arg; | ||
535 | struct ia64_sal_retval ret_stuff; | ||
536 | ret_stuff.status = 0; | ||
537 | ret_stuff.v0 = 0; | ||
538 | |||
539 | SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_ERROR_INTERRUPT, | ||
540 | soft->ce_pcibus.bs_persist_segment, | ||
541 | soft->ce_pcibus.bs_persist_busnum, 0, 0, 0, 0, 0); | ||
542 | |||
543 | return IRQ_HANDLED; | ||
544 | } | ||
545 | |||
546 | /** | ||
547 | * tioce_kern_init - init kernel structures related to a given TIOCE | ||
548 | * @tioce_common: ptr to a cached tioce_common struct that originated in prom | ||
549 | */ static struct tioce_kernel * | ||
550 | tioce_kern_init(struct tioce_common *tioce_common) | ||
551 | { | ||
552 | int i; | ||
553 | uint32_t tmp; | ||
554 | struct tioce *tioce_mmr; | ||
555 | struct tioce_kernel *tioce_kern; | ||
556 | |||
557 | tioce_kern = kcalloc(1, sizeof(struct tioce_kernel), GFP_KERNEL); | ||
558 | if (!tioce_kern) { | ||
559 | return NULL; | ||
560 | } | ||
561 | |||
562 | tioce_kern->ce_common = tioce_common; | ||
563 | spin_lock_init(&tioce_kern->ce_lock); | ||
564 | INIT_LIST_HEAD(&tioce_kern->ce_dmamap_list); | ||
565 | tioce_common->ce_kernel_private = (uint64_t) tioce_kern; | ||
566 | |||
567 | /* | ||
568 | * Determine the secondary bus number of the port2 logical PPB. | ||
569 | * This is used to decide whether a given pci device resides on | ||
570 | * port1 or port2. Note: We don't have enough plumbing set up | ||
571 | * here to use pci_read_config_xxx() so use the raw_pci_ops vector. | ||
572 | */ | ||
573 | |||
574 | raw_pci_ops->read(tioce_common->ce_pcibus.bs_persist_segment, | ||
575 | tioce_common->ce_pcibus.bs_persist_busnum, | ||
576 | PCI_DEVFN(2, 0), PCI_SECONDARY_BUS, 1, &tmp); | ||
577 | tioce_kern->ce_port1_secondary = (uint8_t) tmp; | ||
578 | |||
579 | /* | ||
580 | * Set PMU pagesize to the largest size available, and zero out | ||
581 | * the ate's. | ||
582 | */ | ||
583 | |||
584 | tioce_mmr = (struct tioce *)tioce_common->ce_pcibus.bs_base; | ||
585 | tioce_mmr->ce_ure_page_map &= ~CE_URE_PAGESIZE_MASK; | ||
586 | tioce_mmr->ce_ure_page_map |= CE_URE_256K_PAGESIZE; | ||
587 | tioce_kern->ce_ate3240_pagesize = KB(256); | ||
588 | |||
589 | for (i = 0; i < TIOCE_NUM_M40_ATES; i++) { | ||
590 | tioce_kern->ce_ate40_shadow[i] = 0; | ||
591 | tioce_mmr->ce_ure_ate40[i] = 0; | ||
592 | } | ||
593 | |||
594 | for (i = 0; i < TIOCE_NUM_M3240_ATES; i++) { | ||
595 | tioce_kern->ce_ate3240_shadow[i] = 0; | ||
596 | tioce_mmr->ce_ure_ate3240[i] = 0; | ||
597 | } | ||
598 | |||
599 | return tioce_kern; | ||
600 | } | ||
601 | |||
602 | /** | ||
603 | * tioce_force_interrupt - implement altix force_interrupt() backend for CE | ||
604 | * @sn_irq_info: sn asic irq that we need an interrupt generated for | ||
605 | * | ||
606 | * Given an sn_irq_info struct, set the proper bit in ce_adm_force_int to | ||
607 | * force a secondary interrupt to be generated. This is to work around an | ||
608 | * asic issue where there is a small window of opportunity for a legacy device | ||
609 | * interrupt to be lost. | ||
610 | */ | ||
611 | static void | ||
612 | tioce_force_interrupt(struct sn_irq_info *sn_irq_info) | ||
613 | { | ||
614 | struct pcidev_info *pcidev_info; | ||
615 | struct tioce_common *ce_common; | ||
616 | struct tioce *ce_mmr; | ||
617 | uint64_t force_int_val; | ||
618 | |||
619 | if (!sn_irq_info->irq_bridge) | ||
620 | return; | ||
621 | |||
622 | if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_TIOCE) | ||
623 | return; | ||
624 | |||
625 | pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; | ||
626 | if (!pcidev_info) | ||
627 | return; | ||
628 | |||
629 | ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info; | ||
630 | ce_mmr = (struct tioce *)ce_common->ce_pcibus.bs_base; | ||
631 | |||
632 | /* | ||
633 | * irq_int_bit is originally set up by prom, and holds the interrupt | ||
634 | * bit shift (not mask) as defined by the bit definitions in the | ||
635 | * ce_adm_int mmr. These shifts are not the same for the | ||
636 | * ce_adm_force_int register, so do an explicit mapping here to make | ||
637 | * things clearer. | ||
638 | */ | ||
639 | |||
640 | switch (sn_irq_info->irq_int_bit) { | ||
641 | case CE_ADM_INT_PCIE_PORT1_DEV_A_SHFT: | ||
642 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_A_SHFT; | ||
643 | break; | ||
644 | case CE_ADM_INT_PCIE_PORT1_DEV_B_SHFT: | ||
645 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_B_SHFT; | ||
646 | break; | ||
647 | case CE_ADM_INT_PCIE_PORT1_DEV_C_SHFT: | ||
648 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_C_SHFT; | ||
649 | break; | ||
650 | case CE_ADM_INT_PCIE_PORT1_DEV_D_SHFT: | ||
651 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT1_DEV_D_SHFT; | ||
652 | break; | ||
653 | case CE_ADM_INT_PCIE_PORT2_DEV_A_SHFT: | ||
654 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_A_SHFT; | ||
655 | break; | ||
656 | case CE_ADM_INT_PCIE_PORT2_DEV_B_SHFT: | ||
657 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_B_SHFT; | ||
658 | break; | ||
659 | case CE_ADM_INT_PCIE_PORT2_DEV_C_SHFT: | ||
660 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_C_SHFT; | ||
661 | break; | ||
662 | case CE_ADM_INT_PCIE_PORT2_DEV_D_SHFT: | ||
663 | force_int_val = 1UL << CE_ADM_FORCE_INT_PCIE_PORT2_DEV_D_SHFT; | ||
664 | break; | ||
665 | default: | ||
666 | return; | ||
667 | } | ||
668 | ce_mmr->ce_adm_force_int = force_int_val; | ||
669 | } | ||
670 | |||
671 | /** | ||
672 | * tioce_target_interrupt - implement set_irq_affinity for tioce resident | ||
673 | * functions. Note: only applies to line interrupts, not MSI's. | ||
674 | * | ||
675 | * @sn_irq_info: SN IRQ context | ||
676 | * | ||
677 | * Given an sn_irq_info, set the associated CE device's interrupt destination | ||
678 | * register. Since the interrupt destination registers are on a per-ce-slot | ||
679 | * basis, this will retarget line interrupts for all functions downstream of | ||
680 | * the slot. | ||
681 | */ | ||
682 | static void | ||
683 | tioce_target_interrupt(struct sn_irq_info *sn_irq_info) | ||
684 | { | ||
685 | struct pcidev_info *pcidev_info; | ||
686 | struct tioce_common *ce_common; | ||
687 | struct tioce *ce_mmr; | ||
688 | int bit; | ||
689 | |||
690 | pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; | ||
691 | if (!pcidev_info) | ||
692 | return; | ||
693 | |||
694 | ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info; | ||
695 | ce_mmr = (struct tioce *)ce_common->ce_pcibus.bs_base; | ||
696 | |||
697 | bit = sn_irq_info->irq_int_bit; | ||
698 | |||
699 | ce_mmr->ce_adm_int_mask |= (1UL << bit); | ||
700 | ce_mmr->ce_adm_int_dest[bit] = | ||
701 | ((uint64_t)sn_irq_info->irq_irq << INTR_VECTOR_SHFT) | | ||
702 | sn_irq_info->irq_xtalkaddr; | ||
703 | ce_mmr->ce_adm_int_mask &= ~(1UL << bit); | ||
704 | |||
705 | tioce_force_interrupt(sn_irq_info); | ||
706 | } | ||
707 | |||
708 | /** | ||
709 | * tioce_bus_fixup - perform final PCI fixup for a TIO CE bus | ||
710 | * @prom_bussoft: Common prom/kernel struct representing the bus | ||
711 | * | ||
712 | * Replicates the tioce_common pointed to by @prom_bussoft in kernel | ||
713 | * space. Allocates and initializes a kernel-only area for a given CE, | ||
714 | * and sets up an irq for handling CE error interrupts. | ||
715 | * | ||
716 | * On successful setup, returns the kernel version of tioce_common back to | ||
717 | * the caller. | ||
718 | */ | ||
719 | static void * | ||
720 | tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller) | ||
721 | { | ||
722 | struct tioce_common *tioce_common; | ||
723 | |||
724 | /* | ||
725 | * Allocate kernel bus soft and copy from prom. | ||
726 | */ | ||
727 | |||
728 | tioce_common = kcalloc(1, sizeof(struct tioce_common), GFP_KERNEL); | ||
729 | if (!tioce_common) | ||
730 | return NULL; | ||
731 | |||
732 | memcpy(tioce_common, prom_bussoft, sizeof(struct tioce_common)); | ||
733 | tioce_common->ce_pcibus.bs_base |= __IA64_UNCACHED_OFFSET; | ||
734 | |||
735 | if (tioce_kern_init(tioce_common) == NULL) { | ||
736 | kfree(tioce_common); | ||
737 | return NULL; | ||
738 | } | ||
739 | |||
740 | if (request_irq(SGI_PCIASIC_ERROR, | ||
741 | tioce_error_intr_handler, | ||
742 | SA_SHIRQ, "TIOCE error", (void *)tioce_common)) | ||
743 | printk(KERN_WARNING | ||
744 | "%s: Unable to get irq %d. " | ||
745 | "Error interrupts won't be routed for " | ||
746 | "TIOCE bus %04x:%02x\n", | ||
747 | __FUNCTION__, SGI_PCIASIC_ERROR, | ||
748 | tioce_common->ce_pcibus.bs_persist_segment, | ||
749 | tioce_common->ce_pcibus.bs_persist_busnum); | ||
750 | |||
751 | return tioce_common; | ||
752 | } | ||
753 | |||
754 | static struct sn_pcibus_provider tioce_pci_interfaces = { | ||
755 | .dma_map = tioce_dma, | ||
756 | .dma_map_consistent = tioce_dma_consistent, | ||
757 | .dma_unmap = tioce_dma_unmap, | ||
758 | .bus_fixup = tioce_bus_fixup, | ||
759 | .force_interrupt = tioce_force_interrupt, | ||
760 | .target_interrupt = tioce_target_interrupt | ||
761 | }; | ||
762 | |||
763 | /** | ||
764 | * tioce_init_provider - init SN PCI provider ops for TIO CE | ||
765 | */ | ||
766 | int | ||
767 | tioce_init_provider(void) | ||
768 | { | ||
769 | sn_pci_provider[PCIIO_ASIC_TYPE_TIOCE] = &tioce_pci_interfaces; | ||
770 | return 0; | ||
771 | } | ||