diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/ia64/Kconfig | 10 | ||||
-rw-r--r-- | arch/ia64/defconfig | 1 | ||||
-rw-r--r-- | arch/ia64/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/ia64/kernel/efi.c | 46 | ||||
-rw-r--r-- | arch/ia64/kernel/entry.S | 7 | ||||
-rw-r--r-- | arch/ia64/kernel/err_inject.c | 293 | ||||
-rw-r--r-- | arch/ia64/kernel/ivt.S | 19 | ||||
-rw-r--r-- | arch/ia64/kernel/mca_asm.S | 24 | ||||
-rw-r--r-- | arch/ia64/kernel/patch.c | 20 | ||||
-rw-r--r-- | arch/ia64/kernel/setup.c | 7 | ||||
-rw-r--r-- | arch/ia64/kernel/vmlinux.lds.S | 7 | ||||
-rw-r--r-- | arch/ia64/mm/init.c | 11 | ||||
-rw-r--r-- | arch/ia64/mm/ioremap.c | 78 | ||||
-rw-r--r-- | arch/ia64/pci/pci.c | 2 |
14 files changed, 461 insertions, 65 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 3b71f97d0b60..e23af4b6ae8c 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -439,6 +439,16 @@ config IA64_PALINFO | |||
439 | To use this option, you have to ensure that the "/proc file system | 439 | To use this option, you have to ensure that the "/proc file system |
440 | support" (CONFIG_PROC_FS) is enabled, too. | 440 | support" (CONFIG_PROC_FS) is enabled, too. |
441 | 441 | ||
442 | config IA64_MC_ERR_INJECT | ||
443 | tristate "MC error injection support" | ||
444 | help | ||
445 | Selets whether support for MC error injection. By enabling the | ||
446 | support, kernel provide sysfs interface for user application to | ||
447 | call MC error injection PAL procedure to inject various errors. | ||
448 | This is a useful tool for MCA testing. | ||
449 | |||
450 | If you're unsure, do not select this option. | ||
451 | |||
442 | config SGI_SN | 452 | config SGI_SN |
443 | def_bool y if (IA64_SGI_SN2 || IA64_GENERIC) | 453 | def_bool y if (IA64_SGI_SN2 || IA64_GENERIC) |
444 | 454 | ||
diff --git a/arch/ia64/defconfig b/arch/ia64/defconfig index 153bfdc0182d..90bd9601cdde 100644 --- a/arch/ia64/defconfig +++ b/arch/ia64/defconfig | |||
@@ -164,6 +164,7 @@ CONFIG_COMPAT=y | |||
164 | CONFIG_IA64_MCA_RECOVERY=y | 164 | CONFIG_IA64_MCA_RECOVERY=y |
165 | CONFIG_PERFMON=y | 165 | CONFIG_PERFMON=y |
166 | CONFIG_IA64_PALINFO=y | 166 | CONFIG_IA64_PALINFO=y |
167 | # CONFIG_MC_ERR_INJECT is not set | ||
167 | CONFIG_SGI_SN=y | 168 | CONFIG_SGI_SN=y |
168 | # CONFIG_IA64_ESI is not set | 169 | # CONFIG_IA64_ESI is not set |
169 | 170 | ||
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 098ee605bf5e..33e5a598672d 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile | |||
@@ -34,6 +34,7 @@ obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o | |||
34 | obj-$(CONFIG_AUDIT) += audit.o | 34 | obj-$(CONFIG_AUDIT) += audit.o |
35 | obj-$(CONFIG_PCI_MSI) += msi_ia64.o | 35 | obj-$(CONFIG_PCI_MSI) += msi_ia64.o |
36 | mca_recovery-y += mca_drv.o mca_drv_asm.o | 36 | mca_recovery-y += mca_drv.o mca_drv_asm.o |
37 | obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o | ||
37 | 38 | ||
38 | obj-$(CONFIG_IA64_ESI) += esi.o | 39 | obj-$(CONFIG_IA64_ESI) += esi.o |
39 | ifneq ($(CONFIG_IA64_ESI),) | 40 | ifneq ($(CONFIG_IA64_ESI),) |
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index f45f91d38cab..78d29b79947d 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c | |||
@@ -660,6 +660,29 @@ efi_memory_descriptor (unsigned long phys_addr) | |||
660 | return NULL; | 660 | return NULL; |
661 | } | 661 | } |
662 | 662 | ||
663 | static int | ||
664 | efi_memmap_intersects (unsigned long phys_addr, unsigned long size) | ||
665 | { | ||
666 | void *efi_map_start, *efi_map_end, *p; | ||
667 | efi_memory_desc_t *md; | ||
668 | u64 efi_desc_size; | ||
669 | unsigned long end; | ||
670 | |||
671 | efi_map_start = __va(ia64_boot_param->efi_memmap); | ||
672 | efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; | ||
673 | efi_desc_size = ia64_boot_param->efi_memdesc_size; | ||
674 | |||
675 | end = phys_addr + size; | ||
676 | |||
677 | for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { | ||
678 | md = p; | ||
679 | |||
680 | if (md->phys_addr < end && efi_md_end(md) > phys_addr) | ||
681 | return 1; | ||
682 | } | ||
683 | return 0; | ||
684 | } | ||
685 | |||
663 | u32 | 686 | u32 |
664 | efi_mem_type (unsigned long phys_addr) | 687 | efi_mem_type (unsigned long phys_addr) |
665 | { | 688 | { |
@@ -766,11 +789,28 @@ valid_phys_addr_range (unsigned long phys_addr, unsigned long size) | |||
766 | int | 789 | int |
767 | valid_mmap_phys_addr_range (unsigned long pfn, unsigned long size) | 790 | valid_mmap_phys_addr_range (unsigned long pfn, unsigned long size) |
768 | { | 791 | { |
792 | unsigned long phys_addr = pfn << PAGE_SHIFT; | ||
793 | u64 attr; | ||
794 | |||
795 | attr = efi_mem_attribute(phys_addr, size); | ||
796 | |||
769 | /* | 797 | /* |
770 | * MMIO regions are often missing from the EFI memory map. | 798 | * /dev/mem mmap uses normal user pages, so we don't need the entire |
771 | * We must allow mmap of them for programs like X, so we | 799 | * granule, but the entire region we're mapping must support the same |
772 | * currently can't do any useful validation. | 800 | * attribute. |
773 | */ | 801 | */ |
802 | if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC) | ||
803 | return 1; | ||
804 | |||
805 | /* | ||
806 | * Intel firmware doesn't tell us about all the MMIO regions, so | ||
807 | * in general we have to allow mmap requests. But if EFI *does* | ||
808 | * tell us about anything inside this region, we should deny it. | ||
809 | * The user can always map a smaller region to avoid the overlap. | ||
810 | */ | ||
811 | if (efi_memmap_intersects(phys_addr, size)) | ||
812 | return 0; | ||
813 | |||
774 | return 1; | 814 | return 1; |
775 | } | 815 | } |
776 | 816 | ||
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index e7873eeae448..55fd2d5471e1 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S | |||
@@ -767,7 +767,7 @@ ENTRY(ia64_leave_syscall) | |||
767 | ld8.fill r15=[r3] // M0|1 restore r15 | 767 | ld8.fill r15=[r3] // M0|1 restore r15 |
768 | mov b6=r18 // I0 restore b6 | 768 | mov b6=r18 // I0 restore b6 |
769 | 769 | ||
770 | addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0 // A | 770 | LOAD_PHYS_STACK_REG_SIZE(r17) |
771 | mov f9=f0 // F clear f9 | 771 | mov f9=f0 // F clear f9 |
772 | (pKStk) br.cond.dpnt.many skip_rbs_switch // B | 772 | (pKStk) br.cond.dpnt.many skip_rbs_switch // B |
773 | 773 | ||
@@ -775,7 +775,6 @@ ENTRY(ia64_leave_syscall) | |||
775 | shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition | 775 | shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition |
776 | cover // B add current frame into dirty partition & set cr.ifs | 776 | cover // B add current frame into dirty partition & set cr.ifs |
777 | ;; | 777 | ;; |
778 | (pUStk) ld4 r17=[r17] // M0|1 r17 = cpu_data->phys_stacked_size_p8 | ||
779 | mov r19=ar.bsp // M2 get new backing store pointer | 778 | mov r19=ar.bsp // M2 get new backing store pointer |
780 | mov f10=f0 // F clear f10 | 779 | mov f10=f0 // F clear f10 |
781 | 780 | ||
@@ -953,9 +952,7 @@ GLOBAL_ENTRY(ia64_leave_kernel) | |||
953 | shr.u r18=r19,16 // get byte size of existing "dirty" partition | 952 | shr.u r18=r19,16 // get byte size of existing "dirty" partition |
954 | ;; | 953 | ;; |
955 | mov r16=ar.bsp // get existing backing store pointer | 954 | mov r16=ar.bsp // get existing backing store pointer |
956 | addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0 | 955 | LOAD_PHYS_STACK_REG_SIZE(r17) |
957 | ;; | ||
958 | ld4 r17=[r17] // r17 = cpu_data->phys_stacked_size_p8 | ||
959 | (pKStk) br.cond.dpnt skip_rbs_switch | 956 | (pKStk) br.cond.dpnt skip_rbs_switch |
960 | 957 | ||
961 | /* | 958 | /* |
diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c new file mode 100644 index 000000000000..d3e9f33e8bdd --- /dev/null +++ b/arch/ia64/kernel/err_inject.c | |||
@@ -0,0 +1,293 @@ | |||
1 | /* | ||
2 | * err_inject.c - | ||
3 | * 1.) Inject errors to a processor. | ||
4 | * 2.) Query error injection capabilities. | ||
5 | * This driver along with user space code can be acting as an error | ||
6 | * injection tool. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but | ||
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
16 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
17 | * details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
22 | * | ||
23 | * Written by: Fenghua Yu <fenghua.yu@intel.com>, Intel Corporation | ||
24 | * Copyright (C) 2006, Intel Corp. All rights reserved. | ||
25 | * | ||
26 | */ | ||
27 | #include <linux/sysdev.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/mm.h> | ||
30 | #include <linux/cpu.h> | ||
31 | #include <linux/module.h> | ||
32 | |||
33 | #define ERR_INJ_DEBUG | ||
34 | |||
35 | #define ERR_DATA_BUFFER_SIZE 3 // Three 8-byte; | ||
36 | |||
37 | #define define_one_ro(name) \ | ||
38 | static SYSDEV_ATTR(name, 0444, show_##name, NULL) | ||
39 | |||
40 | #define define_one_rw(name) \ | ||
41 | static SYSDEV_ATTR(name, 0644, show_##name, store_##name) | ||
42 | |||
43 | static u64 call_start[NR_CPUS]; | ||
44 | static u64 phys_addr[NR_CPUS]; | ||
45 | static u64 err_type_info[NR_CPUS]; | ||
46 | static u64 err_struct_info[NR_CPUS]; | ||
47 | static struct { | ||
48 | u64 data1; | ||
49 | u64 data2; | ||
50 | u64 data3; | ||
51 | } __attribute__((__aligned__(16))) err_data_buffer[NR_CPUS]; | ||
52 | static s64 status[NR_CPUS]; | ||
53 | static u64 capabilities[NR_CPUS]; | ||
54 | static u64 resources[NR_CPUS]; | ||
55 | |||
56 | #define show(name) \ | ||
57 | static ssize_t \ | ||
58 | show_##name(struct sys_device *dev, char *buf) \ | ||
59 | { \ | ||
60 | u32 cpu=dev->id; \ | ||
61 | return sprintf(buf, "%lx\n", name[cpu]); \ | ||
62 | } | ||
63 | |||
64 | #define store(name) \ | ||
65 | static ssize_t \ | ||
66 | store_##name(struct sys_device *dev, const char *buf, size_t size) \ | ||
67 | { \ | ||
68 | unsigned int cpu=dev->id; \ | ||
69 | name[cpu] = simple_strtoull(buf, NULL, 16); \ | ||
70 | return size; \ | ||
71 | } | ||
72 | |||
73 | show(call_start) | ||
74 | |||
75 | /* It's user's responsibility to call the PAL procedure on a specific | ||
76 | * processor. The cpu number in driver is only used for storing data. | ||
77 | */ | ||
78 | static ssize_t | ||
79 | store_call_start(struct sys_device *dev, const char *buf, size_t size) | ||
80 | { | ||
81 | unsigned int cpu=dev->id; | ||
82 | unsigned long call_start = simple_strtoull(buf, NULL, 16); | ||
83 | |||
84 | #ifdef ERR_INJ_DEBUG | ||
85 | printk(KERN_DEBUG "pal_mc_err_inject for cpu%d:\n", cpu); | ||
86 | printk(KERN_DEBUG "err_type_info=%lx,\n", err_type_info[cpu]); | ||
87 | printk(KERN_DEBUG "err_struct_info=%lx,\n", err_struct_info[cpu]); | ||
88 | printk(KERN_DEBUG "err_data_buffer=%lx, %lx, %lx.\n", | ||
89 | err_data_buffer[cpu].data1, | ||
90 | err_data_buffer[cpu].data2, | ||
91 | err_data_buffer[cpu].data3); | ||
92 | #endif | ||
93 | switch (call_start) { | ||
94 | case 0: /* Do nothing. */ | ||
95 | break; | ||
96 | case 1: /* Call pal_mc_error_inject in physical mode. */ | ||
97 | status[cpu]=ia64_pal_mc_error_inject_phys(err_type_info[cpu], | ||
98 | err_struct_info[cpu], | ||
99 | ia64_tpa(&err_data_buffer[cpu]), | ||
100 | &capabilities[cpu], | ||
101 | &resources[cpu]); | ||
102 | break; | ||
103 | case 2: /* Call pal_mc_error_inject in virtual mode. */ | ||
104 | status[cpu]=ia64_pal_mc_error_inject_virt(err_type_info[cpu], | ||
105 | err_struct_info[cpu], | ||
106 | ia64_tpa(&err_data_buffer[cpu]), | ||
107 | &capabilities[cpu], | ||
108 | &resources[cpu]); | ||
109 | break; | ||
110 | default: | ||
111 | status[cpu] = -EINVAL; | ||
112 | break; | ||
113 | } | ||
114 | |||
115 | #ifdef ERR_INJ_DEBUG | ||
116 | printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]); | ||
117 | printk(KERN_DEBUG "capapbilities=%lx,\n", capabilities[cpu]); | ||
118 | printk(KERN_DEBUG "resources=%lx\n", resources[cpu]); | ||
119 | #endif | ||
120 | return size; | ||
121 | } | ||
122 | |||
123 | show(err_type_info) | ||
124 | store(err_type_info) | ||
125 | |||
126 | static ssize_t | ||
127 | show_virtual_to_phys(struct sys_device *dev, char *buf) | ||
128 | { | ||
129 | unsigned int cpu=dev->id; | ||
130 | return sprintf(buf, "%lx\n", phys_addr[cpu]); | ||
131 | } | ||
132 | |||
133 | static ssize_t | ||
134 | store_virtual_to_phys(struct sys_device *dev, const char *buf, size_t size) | ||
135 | { | ||
136 | unsigned int cpu=dev->id; | ||
137 | u64 virt_addr=simple_strtoull(buf, NULL, 16); | ||
138 | int ret; | ||
139 | |||
140 | ret = get_user_pages(current, current->mm, virt_addr, | ||
141 | 1, VM_READ, 0, NULL, NULL); | ||
142 | if (ret<=0) { | ||
143 | #ifdef ERR_INJ_DEBUG | ||
144 | printk("Virtual address %lx is not existing.\n",virt_addr); | ||
145 | #endif | ||
146 | return -EINVAL; | ||
147 | } | ||
148 | |||
149 | phys_addr[cpu] = ia64_tpa(virt_addr); | ||
150 | return size; | ||
151 | } | ||
152 | |||
153 | show(err_struct_info) | ||
154 | store(err_struct_info) | ||
155 | |||
156 | static ssize_t | ||
157 | show_err_data_buffer(struct sys_device *dev, char *buf) | ||
158 | { | ||
159 | unsigned int cpu=dev->id; | ||
160 | |||
161 | return sprintf(buf, "%lx, %lx, %lx\n", | ||
162 | err_data_buffer[cpu].data1, | ||
163 | err_data_buffer[cpu].data2, | ||
164 | err_data_buffer[cpu].data3); | ||
165 | } | ||
166 | |||
167 | static ssize_t | ||
168 | store_err_data_buffer(struct sys_device *dev, const char *buf, size_t size) | ||
169 | { | ||
170 | unsigned int cpu=dev->id; | ||
171 | int ret; | ||
172 | |||
173 | #ifdef ERR_INJ_DEBUG | ||
174 | printk("write err_data_buffer=[%lx,%lx,%lx] on cpu%d\n", | ||
175 | err_data_buffer[cpu].data1, | ||
176 | err_data_buffer[cpu].data2, | ||
177 | err_data_buffer[cpu].data3, | ||
178 | cpu); | ||
179 | #endif | ||
180 | ret=sscanf(buf, "%lx, %lx, %lx", | ||
181 | &err_data_buffer[cpu].data1, | ||
182 | &err_data_buffer[cpu].data2, | ||
183 | &err_data_buffer[cpu].data3); | ||
184 | if (ret!=ERR_DATA_BUFFER_SIZE) | ||
185 | return -EINVAL; | ||
186 | |||
187 | return size; | ||
188 | } | ||
189 | |||
190 | show(status) | ||
191 | show(capabilities) | ||
192 | show(resources) | ||
193 | |||
194 | define_one_rw(call_start); | ||
195 | define_one_rw(err_type_info); | ||
196 | define_one_rw(err_struct_info); | ||
197 | define_one_rw(err_data_buffer); | ||
198 | define_one_rw(virtual_to_phys); | ||
199 | define_one_ro(status); | ||
200 | define_one_ro(capabilities); | ||
201 | define_one_ro(resources); | ||
202 | |||
203 | static struct attribute *default_attrs[] = { | ||
204 | &attr_call_start.attr, | ||
205 | &attr_virtual_to_phys.attr, | ||
206 | &attr_err_type_info.attr, | ||
207 | &attr_err_struct_info.attr, | ||
208 | &attr_err_data_buffer.attr, | ||
209 | &attr_status.attr, | ||
210 | &attr_capabilities.attr, | ||
211 | &attr_resources.attr, | ||
212 | NULL | ||
213 | }; | ||
214 | |||
215 | static struct attribute_group err_inject_attr_group = { | ||
216 | .attrs = default_attrs, | ||
217 | .name = "err_inject" | ||
218 | }; | ||
219 | /* Add/Remove err_inject interface for CPU device */ | ||
220 | static int __cpuinit err_inject_add_dev(struct sys_device * sys_dev) | ||
221 | { | ||
222 | return sysfs_create_group(&sys_dev->kobj, &err_inject_attr_group); | ||
223 | } | ||
224 | |||
225 | static int __cpuinit err_inject_remove_dev(struct sys_device * sys_dev) | ||
226 | { | ||
227 | sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group); | ||
228 | return 0; | ||
229 | } | ||
230 | static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb, | ||
231 | unsigned long action, void *hcpu) | ||
232 | { | ||
233 | unsigned int cpu = (unsigned long)hcpu; | ||
234 | struct sys_device *sys_dev; | ||
235 | |||
236 | sys_dev = get_cpu_sysdev(cpu); | ||
237 | switch (action) { | ||
238 | case CPU_ONLINE: | ||
239 | err_inject_add_dev(sys_dev); | ||
240 | break; | ||
241 | case CPU_DEAD: | ||
242 | err_inject_remove_dev(sys_dev); | ||
243 | break; | ||
244 | } | ||
245 | |||
246 | return NOTIFY_OK; | ||
247 | } | ||
248 | |||
249 | static struct notifier_block __cpuinitdata err_inject_cpu_notifier = | ||
250 | { | ||
251 | .notifier_call = err_inject_cpu_callback, | ||
252 | }; | ||
253 | |||
254 | static int __init | ||
255 | err_inject_init(void) | ||
256 | { | ||
257 | int i; | ||
258 | |||
259 | #ifdef ERR_INJ_DEBUG | ||
260 | printk(KERN_INFO "Enter error injection driver.\n"); | ||
261 | #endif | ||
262 | for_each_online_cpu(i) { | ||
263 | err_inject_cpu_callback(&err_inject_cpu_notifier, CPU_ONLINE, | ||
264 | (void *)(long)i); | ||
265 | } | ||
266 | |||
267 | register_hotcpu_notifier(&err_inject_cpu_notifier); | ||
268 | |||
269 | return 0; | ||
270 | } | ||
271 | |||
272 | static void __exit | ||
273 | err_inject_exit(void) | ||
274 | { | ||
275 | int i; | ||
276 | struct sys_device *sys_dev; | ||
277 | |||
278 | #ifdef ERR_INJ_DEBUG | ||
279 | printk(KERN_INFO "Exit error injection driver.\n"); | ||
280 | #endif | ||
281 | for_each_online_cpu(i) { | ||
282 | sys_dev = get_cpu_sysdev(i); | ||
283 | sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group); | ||
284 | } | ||
285 | unregister_hotcpu_notifier(&err_inject_cpu_notifier); | ||
286 | } | ||
287 | |||
288 | module_init(err_inject_init); | ||
289 | module_exit(err_inject_exit); | ||
290 | |||
291 | MODULE_AUTHOR("Fenghua Yu <fenghua.yu@intel.com>"); | ||
292 | MODULE_DESCRIPTION("MC error injection kenrel sysfs interface"); | ||
293 | MODULE_LICENSE("GPL"); | ||
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index 6b7fcbd3f6f1..34f44d8be00d 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S | |||
@@ -374,6 +374,7 @@ ENTRY(alt_dtlb_miss) | |||
374 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) | 374 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) |
375 | mov r21=cr.ipsr | 375 | mov r21=cr.ipsr |
376 | mov r31=pr | 376 | mov r31=pr |
377 | mov r24=PERCPU_ADDR | ||
377 | ;; | 378 | ;; |
378 | #ifdef CONFIG_DISABLE_VHPT | 379 | #ifdef CONFIG_DISABLE_VHPT |
379 | shr.u r22=r16,61 // get the region number into r21 | 380 | shr.u r22=r16,61 // get the region number into r21 |
@@ -386,22 +387,30 @@ ENTRY(alt_dtlb_miss) | |||
386 | (p8) mov r29=b0 // save b0 | 387 | (p8) mov r29=b0 // save b0 |
387 | (p8) br.cond.dptk dtlb_fault | 388 | (p8) br.cond.dptk dtlb_fault |
388 | #endif | 389 | #endif |
390 | cmp.ge p10,p11=r16,r24 // access to per_cpu_data? | ||
391 | tbit.z p12,p0=r16,61 // access to region 6? | ||
392 | mov r25=PERCPU_PAGE_SHIFT << 2 | ||
393 | mov r26=PERCPU_PAGE_SIZE | ||
394 | nop.m 0 | ||
395 | nop.b 0 | ||
396 | ;; | ||
397 | (p10) mov r19=IA64_KR(PER_CPU_DATA) | ||
398 | (p11) and r19=r19,r16 // clear non-ppn fields | ||
389 | extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl | 399 | extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl |
390 | and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field | 400 | and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field |
391 | tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on? | 401 | tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on? |
392 | shr.u r18=r16,57 // move address bit 61 to bit 4 | ||
393 | and r19=r19,r16 // clear ed, reserved bits, and PTE control bits | ||
394 | tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on? | 402 | tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on? |
395 | ;; | 403 | ;; |
396 | andcm r18=0x10,r18 // bit 4=~address-bit(61) | 404 | (p10) sub r19=r19,r26 |
405 | (p10) mov cr.itir=r25 | ||
397 | cmp.ne p8,p0=r0,r23 | 406 | cmp.ne p8,p0=r0,r23 |
398 | (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field | 407 | (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field |
408 | (p12) dep r17=-1,r17,4,1 // set ma=UC for region 6 addr | ||
399 | (p8) br.cond.spnt page_fault | 409 | (p8) br.cond.spnt page_fault |
400 | 410 | ||
401 | dep r21=-1,r21,IA64_PSR_ED_BIT,1 | 411 | dep r21=-1,r21,IA64_PSR_ED_BIT,1 |
402 | or r19=r19,r17 // insert PTE control bits into r19 | ||
403 | ;; | 412 | ;; |
404 | or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6 | 413 | or r19=r19,r17 // insert PTE control bits into r19 |
405 | (p6) mov cr.ipsr=r21 | 414 | (p6) mov cr.ipsr=r21 |
406 | ;; | 415 | ;; |
407 | (p7) itc.d r19 // insert the TLB entry | 416 | (p7) itc.d r19 // insert the TLB entry |
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S index c6b607c00dee..8c9c26aa6ae0 100644 --- a/arch/ia64/kernel/mca_asm.S +++ b/arch/ia64/kernel/mca_asm.S | |||
@@ -101,14 +101,6 @@ ia64_do_tlb_purge: | |||
101 | ;; | 101 | ;; |
102 | srlz.d | 102 | srlz.d |
103 | ;; | 103 | ;; |
104 | // 2. Purge DTR for PERCPU data. | ||
105 | movl r16=PERCPU_ADDR | ||
106 | mov r18=PERCPU_PAGE_SHIFT<<2 | ||
107 | ;; | ||
108 | ptr.d r16,r18 | ||
109 | ;; | ||
110 | srlz.d | ||
111 | ;; | ||
112 | // 3. Purge ITR for PAL code. | 104 | // 3. Purge ITR for PAL code. |
113 | GET_THIS_PADDR(r2, ia64_mca_pal_base) | 105 | GET_THIS_PADDR(r2, ia64_mca_pal_base) |
114 | ;; | 106 | ;; |
@@ -196,22 +188,6 @@ ia64_reload_tr: | |||
196 | srlz.i | 188 | srlz.i |
197 | srlz.d | 189 | srlz.d |
198 | ;; | 190 | ;; |
199 | // 2. Reload DTR register for PERCPU data. | ||
200 | GET_THIS_PADDR(r2, ia64_mca_per_cpu_pte) | ||
201 | ;; | ||
202 | movl r16=PERCPU_ADDR // vaddr | ||
203 | movl r18=PERCPU_PAGE_SHIFT<<2 | ||
204 | ;; | ||
205 | mov cr.itir=r18 | ||
206 | mov cr.ifa=r16 | ||
207 | ;; | ||
208 | ld8 r18=[r2] // load per-CPU PTE | ||
209 | mov r16=IA64_TR_PERCPU_DATA; | ||
210 | ;; | ||
211 | itr.d dtr[r16]=r18 | ||
212 | ;; | ||
213 | srlz.d | ||
214 | ;; | ||
215 | // 3. Reload ITR for PAL code. | 191 | // 3. Reload ITR for PAL code. |
216 | GET_THIS_PADDR(r2, ia64_mca_pal_pte) | 192 | GET_THIS_PADDR(r2, ia64_mca_pal_pte) |
217 | ;; | 193 | ;; |
diff --git a/arch/ia64/kernel/patch.c b/arch/ia64/kernel/patch.c index bc11bb096f58..e796e29f8e15 100644 --- a/arch/ia64/kernel/patch.c +++ b/arch/ia64/kernel/patch.c | |||
@@ -195,3 +195,23 @@ ia64_patch_gate (void) | |||
195 | ia64_patch_vtop(START(vtop), END(vtop)); | 195 | ia64_patch_vtop(START(vtop), END(vtop)); |
196 | ia64_patch_mckinley_e9(START(mckinley_e9), END(mckinley_e9)); | 196 | ia64_patch_mckinley_e9(START(mckinley_e9), END(mckinley_e9)); |
197 | } | 197 | } |
198 | |||
199 | void ia64_patch_phys_stack_reg(unsigned long val) | ||
200 | { | ||
201 | s32 * offp = (s32 *) __start___phys_stack_reg_patchlist; | ||
202 | s32 * end = (s32 *) __end___phys_stack_reg_patchlist; | ||
203 | u64 ip, mask, imm; | ||
204 | |||
205 | /* see instruction format A4: adds r1 = imm13, r3 */ | ||
206 | mask = (0x3fUL << 27) | (0x7f << 13); | ||
207 | imm = (((val >> 7) & 0x3f) << 27) | (val & 0x7f) << 13; | ||
208 | |||
209 | while (offp < end) { | ||
210 | ip = (u64) offp + *offp; | ||
211 | ia64_patch(ip, mask, imm); | ||
212 | ia64_fc(ip); | ||
213 | ++offp; | ||
214 | } | ||
215 | ia64_sync_i(); | ||
216 | ia64_srlz_i(); | ||
217 | } | ||
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index dc7dd7648ec5..6e19da122ae3 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -75,7 +75,6 @@ extern void ia64_setup_printk_clock(void); | |||
75 | 75 | ||
76 | DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info); | 76 | DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info); |
77 | DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); | 77 | DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); |
78 | DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8); | ||
79 | unsigned long ia64_cycles_per_usec; | 78 | unsigned long ia64_cycles_per_usec; |
80 | struct ia64_boot_param *ia64_boot_param; | 79 | struct ia64_boot_param *ia64_boot_param; |
81 | struct screen_info screen_info; | 80 | struct screen_info screen_info; |
@@ -869,6 +868,7 @@ void __cpuinit | |||
869 | cpu_init (void) | 868 | cpu_init (void) |
870 | { | 869 | { |
871 | extern void __cpuinit ia64_mmu_init (void *); | 870 | extern void __cpuinit ia64_mmu_init (void *); |
871 | static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG; | ||
872 | unsigned long num_phys_stacked; | 872 | unsigned long num_phys_stacked; |
873 | pal_vm_info_2_u_t vmi; | 873 | pal_vm_info_2_u_t vmi; |
874 | unsigned int max_ctx; | 874 | unsigned int max_ctx; |
@@ -982,7 +982,10 @@ cpu_init (void) | |||
982 | num_phys_stacked = 96; | 982 | num_phys_stacked = 96; |
983 | } | 983 | } |
984 | /* size of physical stacked register partition plus 8 bytes: */ | 984 | /* size of physical stacked register partition plus 8 bytes: */ |
985 | __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8; | 985 | if (num_phys_stacked > max_num_phys_stacked) { |
986 | ia64_patch_phys_stack_reg(num_phys_stacked*8 + 8); | ||
987 | max_num_phys_stacked = num_phys_stacked; | ||
988 | } | ||
986 | platform_cpu_init(); | 989 | platform_cpu_init(); |
987 | pm_idle = default_idle; | 990 | pm_idle = default_idle; |
988 | } | 991 | } |
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 25dd55e4db24..692382642118 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S | |||
@@ -78,6 +78,13 @@ SECTIONS | |||
78 | __stop___mca_table = .; | 78 | __stop___mca_table = .; |
79 | } | 79 | } |
80 | 80 | ||
81 | .data.patch.phys_stack_reg : AT(ADDR(.data.patch.phys_stack_reg) - LOAD_OFFSET) | ||
82 | { | ||
83 | __start___phys_stack_reg_patchlist = .; | ||
84 | *(.data.patch.phys_stack_reg) | ||
85 | __end___phys_stack_reg_patchlist = .; | ||
86 | } | ||
87 | |||
81 | /* Global data */ | 88 | /* Global data */ |
82 | _data = .; | 89 | _data = .; |
83 | 90 | ||
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 2da841110727..cffb1e8325e8 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -355,7 +355,7 @@ setup_gate (void) | |||
355 | void __devinit | 355 | void __devinit |
356 | ia64_mmu_init (void *my_cpu_data) | 356 | ia64_mmu_init (void *my_cpu_data) |
357 | { | 357 | { |
358 | unsigned long psr, pta, impl_va_bits; | 358 | unsigned long pta, impl_va_bits; |
359 | extern void __devinit tlb_init (void); | 359 | extern void __devinit tlb_init (void); |
360 | 360 | ||
361 | #ifdef CONFIG_DISABLE_VHPT | 361 | #ifdef CONFIG_DISABLE_VHPT |
@@ -364,15 +364,6 @@ ia64_mmu_init (void *my_cpu_data) | |||
364 | # define VHPT_ENABLE_BIT 1 | 364 | # define VHPT_ENABLE_BIT 1 |
365 | #endif | 365 | #endif |
366 | 366 | ||
367 | /* Pin mapping for percpu area into TLB */ | ||
368 | psr = ia64_clear_ic(); | ||
369 | ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR, | ||
370 | pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)), | ||
371 | PERCPU_PAGE_SHIFT); | ||
372 | |||
373 | ia64_set_psr(psr); | ||
374 | ia64_srlz_i(); | ||
375 | |||
376 | /* | 367 | /* |
377 | * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped | 368 | * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped |
378 | * address space. The IA-64 architecture guarantees that at least 50 bits of | 369 | * address space. The IA-64 architecture guarantees that at least 50 bits of |
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c index 4280c074d64e..2a140627dfd6 100644 --- a/arch/ia64/mm/ioremap.c +++ b/arch/ia64/mm/ioremap.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * (c) Copyright 2006 Hewlett-Packard Development Company, L.P. | 2 | * (c) Copyright 2006, 2007 Hewlett-Packard Development Company, L.P. |
3 | * Bjorn Helgaas <bjorn.helgaas@hp.com> | 3 | * Bjorn Helgaas <bjorn.helgaas@hp.com> |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
@@ -10,51 +10,101 @@ | |||
10 | #include <linux/compiler.h> | 10 | #include <linux/compiler.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/efi.h> | 12 | #include <linux/efi.h> |
13 | #include <linux/io.h> | ||
14 | #include <linux/vmalloc.h> | ||
13 | #include <asm/io.h> | 15 | #include <asm/io.h> |
14 | #include <asm/meminit.h> | 16 | #include <asm/meminit.h> |
15 | 17 | ||
16 | static inline void __iomem * | 18 | static inline void __iomem * |
17 | __ioremap (unsigned long offset, unsigned long size) | 19 | __ioremap (unsigned long phys_addr) |
18 | { | 20 | { |
19 | return (void __iomem *) (__IA64_UNCACHED_OFFSET | offset); | 21 | return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr); |
20 | } | 22 | } |
21 | 23 | ||
22 | void __iomem * | 24 | void __iomem * |
23 | ioremap (unsigned long offset, unsigned long size) | 25 | ioremap (unsigned long phys_addr, unsigned long size) |
24 | { | 26 | { |
27 | void __iomem *addr; | ||
28 | struct vm_struct *area; | ||
29 | unsigned long offset; | ||
30 | pgprot_t prot; | ||
25 | u64 attr; | 31 | u64 attr; |
26 | unsigned long gran_base, gran_size; | 32 | unsigned long gran_base, gran_size; |
33 | unsigned long page_base; | ||
27 | 34 | ||
28 | /* | 35 | /* |
29 | * For things in kern_memmap, we must use the same attribute | 36 | * For things in kern_memmap, we must use the same attribute |
30 | * as the rest of the kernel. For more details, see | 37 | * as the rest of the kernel. For more details, see |
31 | * Documentation/ia64/aliasing.txt. | 38 | * Documentation/ia64/aliasing.txt. |
32 | */ | 39 | */ |
33 | attr = kern_mem_attribute(offset, size); | 40 | attr = kern_mem_attribute(phys_addr, size); |
34 | if (attr & EFI_MEMORY_WB) | 41 | if (attr & EFI_MEMORY_WB) |
35 | return (void __iomem *) phys_to_virt(offset); | 42 | return (void __iomem *) phys_to_virt(phys_addr); |
36 | else if (attr & EFI_MEMORY_UC) | 43 | else if (attr & EFI_MEMORY_UC) |
37 | return __ioremap(offset, size); | 44 | return __ioremap(phys_addr); |
38 | 45 | ||
39 | /* | 46 | /* |
40 | * Some chipsets don't support UC access to memory. If | 47 | * Some chipsets don't support UC access to memory. If |
41 | * WB is supported for the whole granule, we prefer that. | 48 | * WB is supported for the whole granule, we prefer that. |
42 | */ | 49 | */ |
43 | gran_base = GRANULEROUNDDOWN(offset); | 50 | gran_base = GRANULEROUNDDOWN(phys_addr); |
44 | gran_size = GRANULEROUNDUP(offset + size) - gran_base; | 51 | gran_size = GRANULEROUNDUP(phys_addr + size) - gran_base; |
45 | if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB) | 52 | if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB) |
46 | return (void __iomem *) phys_to_virt(offset); | 53 | return (void __iomem *) phys_to_virt(phys_addr); |
47 | 54 | ||
48 | return __ioremap(offset, size); | 55 | /* |
56 | * WB is not supported for the whole granule, so we can't use | ||
57 | * the region 7 identity mapping. If we can safely cover the | ||
58 | * area with kernel page table mappings, we can use those | ||
59 | * instead. | ||
60 | */ | ||
61 | page_base = phys_addr & PAGE_MASK; | ||
62 | size = PAGE_ALIGN(phys_addr + size) - page_base; | ||
63 | if (efi_mem_attribute(page_base, size) & EFI_MEMORY_WB) { | ||
64 | prot = PAGE_KERNEL; | ||
65 | |||
66 | /* | ||
67 | * Mappings have to be page-aligned | ||
68 | */ | ||
69 | offset = phys_addr & ~PAGE_MASK; | ||
70 | phys_addr &= PAGE_MASK; | ||
71 | |||
72 | /* | ||
73 | * Ok, go for it.. | ||
74 | */ | ||
75 | area = get_vm_area(size, VM_IOREMAP); | ||
76 | if (!area) | ||
77 | return NULL; | ||
78 | |||
79 | area->phys_addr = phys_addr; | ||
80 | addr = (void __iomem *) area->addr; | ||
81 | if (ioremap_page_range((unsigned long) addr, | ||
82 | (unsigned long) addr + size, phys_addr, prot)) { | ||
83 | vunmap((void __force *) addr); | ||
84 | return NULL; | ||
85 | } | ||
86 | |||
87 | return (void __iomem *) (offset + (char __iomem *)addr); | ||
88 | } | ||
89 | |||
90 | return __ioremap(phys_addr); | ||
49 | } | 91 | } |
50 | EXPORT_SYMBOL(ioremap); | 92 | EXPORT_SYMBOL(ioremap); |
51 | 93 | ||
52 | void __iomem * | 94 | void __iomem * |
53 | ioremap_nocache (unsigned long offset, unsigned long size) | 95 | ioremap_nocache (unsigned long phys_addr, unsigned long size) |
54 | { | 96 | { |
55 | if (kern_mem_attribute(offset, size) & EFI_MEMORY_WB) | 97 | if (kern_mem_attribute(phys_addr, size) & EFI_MEMORY_WB) |
56 | return NULL; | 98 | return NULL; |
57 | 99 | ||
58 | return __ioremap(offset, size); | 100 | return __ioremap(phys_addr); |
59 | } | 101 | } |
60 | EXPORT_SYMBOL(ioremap_nocache); | 102 | EXPORT_SYMBOL(ioremap_nocache); |
103 | |||
104 | void | ||
105 | iounmap (volatile void __iomem *addr) | ||
106 | { | ||
107 | if (REGION_NUMBER(addr) == RGN_GATE) | ||
108 | vunmap((void *) ((unsigned long) addr & PAGE_MASK)); | ||
109 | } | ||
110 | EXPORT_SYMBOL(iounmap); | ||
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index 0e83f3b419b5..9f635896d252 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c | |||
@@ -659,8 +659,6 @@ pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma) | |||
659 | return -EINVAL; | 659 | return -EINVAL; |
660 | prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size, | 660 | prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size, |
661 | vma->vm_page_prot); | 661 | vma->vm_page_prot); |
662 | if (pgprot_val(prot) != pgprot_val(pgprot_noncached(vma->vm_page_prot))) | ||
663 | return -EINVAL; | ||
664 | 662 | ||
665 | addr = pci_get_legacy_mem(bus); | 663 | addr = pci_get_legacy_mem(bus); |
666 | if (IS_ERR(addr)) | 664 | if (IS_ERR(addr)) |