aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig14
-rw-r--r--arch/ia64/hp/common/hwsw_iommu.c4
-rw-r--r--arch/ia64/hp/common/sba_iommu.c2
-rw-r--r--arch/ia64/ia32/binfmt_elf32.c13
-rw-r--r--arch/ia64/ia32/ia32_entry.S2
-rw-r--r--arch/ia64/ia32/sys_ia32.c68
-rw-r--r--arch/ia64/kernel/acpi.c200
-rw-r--r--arch/ia64/kernel/crash.c20
-rw-r--r--arch/ia64/kernel/crash_dump.c3
-rw-r--r--arch/ia64/kernel/efi.c5
-rw-r--r--arch/ia64/kernel/entry.S2
-rw-r--r--arch/ia64/kernel/iosapic.c5
-rw-r--r--arch/ia64/kernel/irq_ia64.c2
-rw-r--r--arch/ia64/kernel/machine_kexec.c15
-rw-r--r--arch/ia64/kernel/msi_ia64.c19
-rw-r--r--arch/ia64/kernel/perfmon.c67
-rw-r--r--arch/ia64/kernel/process.c16
-rw-r--r--arch/ia64/kernel/ptrace.c14
-rw-r--r--arch/ia64/kernel/sal.c3
-rw-r--r--arch/ia64/kernel/salinfo.c4
-rw-r--r--arch/ia64/kernel/setup.c35
-rw-r--r--arch/ia64/kernel/smp.c4
-rw-r--r--arch/ia64/kernel/traps.c30
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S3
-rw-r--r--arch/ia64/mm/contig.c80
-rw-r--r--arch/ia64/mm/discontig.c52
-rw-r--r--arch/ia64/mm/init.c46
-rw-r--r--arch/ia64/sn/kernel/huberror.c16
-rw-r--r--arch/ia64/sn/kernel/io_acpi_init.c314
-rw-r--r--arch/ia64/sn/kernel/io_common.c90
-rw-r--r--arch/ia64/sn/kernel/io_init.c54
-rw-r--r--arch/ia64/sn/kernel/iomv.c5
-rw-r--r--arch/ia64/sn/kernel/msi_sn.c20
-rw-r--r--arch/ia64/sn/kernel/sn2/sn2_smp.c2
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c2
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_proc_fs.c105
-rw-r--r--arch/ia64/sn/kernel/xpc_main.c88
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_provider.c6
38 files changed, 862 insertions, 568 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index fcacfe291b9b..d51f0f11f7f9 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -11,6 +11,8 @@ menu "Processor type and features"
11 11
12config IA64 12config IA64
13 bool 13 bool
14 select PCI if (!IA64_HP_SIM)
15 select ACPI if (!IA64_HP_SIM)
14 default y 16 default y
15 help 17 help
16 The Itanium Processor Family is Intel's 64-bit successor to 18 The Itanium Processor Family is Intel's 64-bit successor to
@@ -20,15 +22,19 @@ config IA64
20 22
21config 64BIT 23config 64BIT
22 bool 24 bool
25 select ATA_NONSTANDARD if ATA
23 default y 26 default y
24 27
28config ZONE_DMA
29 def_bool y
30 depends on !IA64_SGI_SN2
31
25config MMU 32config MMU
26 bool 33 bool
27 default y 34 default y
28 35
29config SWIOTLB 36config SWIOTLB
30 bool 37 bool
31 default y
32 38
33config RWSEM_XCHGADD_ALGORITHM 39config RWSEM_XCHGADD_ALGORITHM
34 bool 40 bool
@@ -84,10 +90,9 @@ choice
84 90
85config IA64_GENERIC 91config IA64_GENERIC
86 bool "generic" 92 bool "generic"
87 select ACPI
88 select PCI
89 select NUMA 93 select NUMA
90 select ACPI_NUMA 94 select ACPI_NUMA
95 select SWIOTLB
91 help 96 help
92 This selects the system type of your hardware. A "generic" kernel 97 This selects the system type of your hardware. A "generic" kernel
93 will run on any supported IA-64 system. However, if you configure 98 will run on any supported IA-64 system. However, if you configure
@@ -104,6 +109,7 @@ config IA64_GENERIC
104 109
105config IA64_DIG 110config IA64_DIG
106 bool "DIG-compliant" 111 bool "DIG-compliant"
112 select SWIOTLB
107 113
108config IA64_HP_ZX1 114config IA64_HP_ZX1
109 bool "HP-zx1/sx1000" 115 bool "HP-zx1/sx1000"
@@ -113,6 +119,7 @@ config IA64_HP_ZX1
113 119
114config IA64_HP_ZX1_SWIOTLB 120config IA64_HP_ZX1_SWIOTLB
115 bool "HP-zx1/sx1000 with software I/O TLB" 121 bool "HP-zx1/sx1000 with software I/O TLB"
122 select SWIOTLB
116 help 123 help
117 Build a kernel that runs on HP zx1 and sx1000 systems even when they 124 Build a kernel that runs on HP zx1 and sx1000 systems even when they
118 have broken PCI devices which cannot DMA to full 32 bits. Apart 125 have broken PCI devices which cannot DMA to full 32 bits. Apart
@@ -131,6 +138,7 @@ config IA64_SGI_SN2
131 138
132config IA64_HP_SIM 139config IA64_HP_SIM
133 bool "Ski-simulator" 140 bool "Ski-simulator"
141 select SWIOTLB
134 142
135endchoice 143endchoice
136 144
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
index a5a5637507be..2153bcacbe6c 100644
--- a/arch/ia64/hp/common/hwsw_iommu.c
+++ b/arch/ia64/hp/common/hwsw_iommu.c
@@ -192,3 +192,7 @@ EXPORT_SYMBOL(hwsw_unmap_sg);
192EXPORT_SYMBOL(hwsw_dma_supported); 192EXPORT_SYMBOL(hwsw_dma_supported);
193EXPORT_SYMBOL(hwsw_alloc_coherent); 193EXPORT_SYMBOL(hwsw_alloc_coherent);
194EXPORT_SYMBOL(hwsw_free_coherent); 194EXPORT_SYMBOL(hwsw_free_coherent);
195EXPORT_SYMBOL(hwsw_sync_single_for_cpu);
196EXPORT_SYMBOL(hwsw_sync_single_for_device);
197EXPORT_SYMBOL(hwsw_sync_sg_for_cpu);
198EXPORT_SYMBOL(hwsw_sync_sg_for_device);
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index ce49fe3a3b56..c1dca226b479 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -1881,7 +1881,7 @@ ioc_open(struct inode *inode, struct file *file)
1881 return seq_open(file, &ioc_seq_ops); 1881 return seq_open(file, &ioc_seq_ops);
1882} 1882}
1883 1883
1884static struct file_operations ioc_fops = { 1884static const struct file_operations ioc_fops = {
1885 .open = ioc_open, 1885 .open = ioc_open,
1886 .read = seq_read, 1886 .read = seq_read,
1887 .llseek = seq_lseek, 1887 .llseek = seq_lseek,
diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
index 578737ec7629..c05bda662364 100644
--- a/arch/ia64/ia32/binfmt_elf32.c
+++ b/arch/ia64/ia32/binfmt_elf32.c
@@ -91,9 +91,8 @@ ia64_elf32_init (struct pt_regs *regs)
91 * it with privilege level 3 because the IVE uses non-privileged accesses to these 91 * it with privilege level 3 because the IVE uses non-privileged accesses to these
92 * tables. IA-32 segmentation is used to protect against IA-32 accesses to them. 92 * tables. IA-32 segmentation is used to protect against IA-32 accesses to them.
93 */ 93 */
94 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 94 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
95 if (vma) { 95 if (vma) {
96 memset(vma, 0, sizeof(*vma));
97 vma->vm_mm = current->mm; 96 vma->vm_mm = current->mm;
98 vma->vm_start = IA32_GDT_OFFSET; 97 vma->vm_start = IA32_GDT_OFFSET;
99 vma->vm_end = vma->vm_start + PAGE_SIZE; 98 vma->vm_end = vma->vm_start + PAGE_SIZE;
@@ -117,9 +116,8 @@ ia64_elf32_init (struct pt_regs *regs)
117 * code is locked in specific gate page, which is pointed by pretcode 116 * code is locked in specific gate page, which is pointed by pretcode
118 * when setup_frame_ia32 117 * when setup_frame_ia32
119 */ 118 */
120 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 119 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
121 if (vma) { 120 if (vma) {
122 memset(vma, 0, sizeof(*vma));
123 vma->vm_mm = current->mm; 121 vma->vm_mm = current->mm;
124 vma->vm_start = IA32_GATE_OFFSET; 122 vma->vm_start = IA32_GATE_OFFSET;
125 vma->vm_end = vma->vm_start + PAGE_SIZE; 123 vma->vm_end = vma->vm_start + PAGE_SIZE;
@@ -142,9 +140,8 @@ ia64_elf32_init (struct pt_regs *regs)
142 * Install LDT as anonymous memory. This gives us all-zero segment descriptors 140 * Install LDT as anonymous memory. This gives us all-zero segment descriptors
143 * until a task modifies them via modify_ldt(). 141 * until a task modifies them via modify_ldt().
144 */ 142 */
145 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 143 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
146 if (vma) { 144 if (vma) {
147 memset(vma, 0, sizeof(*vma));
148 vma->vm_mm = current->mm; 145 vma->vm_mm = current->mm;
149 vma->vm_start = IA32_LDT_OFFSET; 146 vma->vm_start = IA32_LDT_OFFSET;
150 vma->vm_end = vma->vm_start + PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE); 147 vma->vm_end = vma->vm_start + PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE);
@@ -214,12 +211,10 @@ ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack)
214 bprm->loader += stack_base; 211 bprm->loader += stack_base;
215 bprm->exec += stack_base; 212 bprm->exec += stack_base;
216 213
217 mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 214 mpnt = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
218 if (!mpnt) 215 if (!mpnt)
219 return -ENOMEM; 216 return -ENOMEM;
220 217
221 memset(mpnt, 0, sizeof(*mpnt));
222
223 down_write(&current->mm->mmap_sem); 218 down_write(&current->mm->mmap_sem);
224 { 219 {
225 mpnt->vm_mm = current->mm; 220 mpnt->vm_mm = current->mm;
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S
index a32cd59b81ed..687e5fdc9683 100644
--- a/arch/ia64/ia32/ia32_entry.S
+++ b/arch/ia64/ia32/ia32_entry.S
@@ -326,7 +326,7 @@ ia32_syscall_table:
326 data8 sys_ni_syscall 326 data8 sys_ni_syscall
327 data8 compat_sys_wait4 327 data8 compat_sys_wait4
328 data8 sys_swapoff /* 115 */ 328 data8 sys_swapoff /* 115 */
329 data8 sys32_sysinfo 329 data8 compat_sys_sysinfo
330 data8 sys32_ipc 330 data8 sys32_ipc
331 data8 sys_fsync 331 data8 sys_fsync
332 data8 sys32_sigreturn 332 data8 sys32_sigreturn
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index 957681c39ad9..d430d36ae49d 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -2209,74 +2209,6 @@ sys32_fstat64 (unsigned int fd, struct stat64 __user *statbuf)
2209 return ret; 2209 return ret;
2210} 2210}
2211 2211
2212struct sysinfo32 {
2213 s32 uptime;
2214 u32 loads[3];
2215 u32 totalram;
2216 u32 freeram;
2217 u32 sharedram;
2218 u32 bufferram;
2219 u32 totalswap;
2220 u32 freeswap;
2221 u16 procs;
2222 u16 pad;
2223 u32 totalhigh;
2224 u32 freehigh;
2225 u32 mem_unit;
2226 char _f[8];
2227};
2228
2229asmlinkage long
2230sys32_sysinfo (struct sysinfo32 __user *info)
2231{
2232 struct sysinfo s;
2233 long ret, err;
2234 int bitcount = 0;
2235 mm_segment_t old_fs = get_fs();
2236
2237 set_fs(KERNEL_DS);
2238 ret = sys_sysinfo((struct sysinfo __user *) &s);
2239 set_fs(old_fs);
2240 /* Check to see if any memory value is too large for 32-bit and
2241 * scale down if needed.
2242 */
2243 if ((s.totalram >> 32) || (s.totalswap >> 32)) {
2244 while (s.mem_unit < PAGE_SIZE) {
2245 s.mem_unit <<= 1;
2246 bitcount++;
2247 }
2248 s.totalram >>= bitcount;
2249 s.freeram >>= bitcount;
2250 s.sharedram >>= bitcount;
2251 s.bufferram >>= bitcount;
2252 s.totalswap >>= bitcount;
2253 s.freeswap >>= bitcount;
2254 s.totalhigh >>= bitcount;
2255 s.freehigh >>= bitcount;
2256 }
2257
2258 if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
2259 return -EFAULT;
2260
2261 err = __put_user(s.uptime, &info->uptime);
2262 err |= __put_user(s.loads[0], &info->loads[0]);
2263 err |= __put_user(s.loads[1], &info->loads[1]);
2264 err |= __put_user(s.loads[2], &info->loads[2]);
2265 err |= __put_user(s.totalram, &info->totalram);
2266 err |= __put_user(s.freeram, &info->freeram);
2267 err |= __put_user(s.sharedram, &info->sharedram);
2268 err |= __put_user(s.bufferram, &info->bufferram);
2269 err |= __put_user(s.totalswap, &info->totalswap);
2270 err |= __put_user(s.freeswap, &info->freeswap);
2271 err |= __put_user(s.procs, &info->procs);
2272 err |= __put_user (s.totalhigh, &info->totalhigh);
2273 err |= __put_user (s.freehigh, &info->freehigh);
2274 err |= __put_user (s.mem_unit, &info->mem_unit);
2275 if (err)
2276 return -EFAULT;
2277 return ret;
2278}
2279
2280asmlinkage long 2212asmlinkage long
2281sys32_sched_rr_get_interval (pid_t pid, struct compat_timespec __user *interval) 2213sys32_sched_rr_get_interval (pid_t pid, struct compat_timespec __user *interval)
2282{ 2214{
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 29f05d4b68cd..3549c94467b8 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -55,7 +55,7 @@
55 55
56#define BAD_MADT_ENTRY(entry, end) ( \ 56#define BAD_MADT_ENTRY(entry, end) ( \
57 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ 57 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
58 ((acpi_table_entry_header *)entry)->length < sizeof(*entry)) 58 ((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
59 59
60#define PREFIX "ACPI: " 60#define PREFIX "ACPI: "
61 61
@@ -67,16 +67,11 @@ EXPORT_SYMBOL(pm_power_off);
67unsigned int acpi_cpei_override; 67unsigned int acpi_cpei_override;
68unsigned int acpi_cpei_phys_cpuid; 68unsigned int acpi_cpei_phys_cpuid;
69 69
70#define MAX_SAPICS 256
71u16 ia64_acpiid_to_sapicid[MAX_SAPICS] = {[0 ... MAX_SAPICS - 1] = -1 };
72
73EXPORT_SYMBOL(ia64_acpiid_to_sapicid);
74
75const char *acpi_get_sysname(void) 70const char *acpi_get_sysname(void)
76{ 71{
77#ifdef CONFIG_IA64_GENERIC 72#ifdef CONFIG_IA64_GENERIC
78 unsigned long rsdp_phys; 73 unsigned long rsdp_phys;
79 struct acpi20_table_rsdp *rsdp; 74 struct acpi_table_rsdp *rsdp;
80 struct acpi_table_xsdt *xsdt; 75 struct acpi_table_xsdt *xsdt;
81 struct acpi_table_header *hdr; 76 struct acpi_table_header *hdr;
82 77
@@ -87,16 +82,16 @@ const char *acpi_get_sysname(void)
87 return "dig"; 82 return "dig";
88 } 83 }
89 84
90 rsdp = (struct acpi20_table_rsdp *)__va(rsdp_phys); 85 rsdp = (struct acpi_table_rsdp *)__va(rsdp_phys);
91 if (strncmp(rsdp->signature, RSDP_SIG, sizeof(RSDP_SIG) - 1)) { 86 if (strncmp(rsdp->signature, ACPI_SIG_RSDP, sizeof(ACPI_SIG_RSDP) - 1)) {
92 printk(KERN_ERR 87 printk(KERN_ERR
93 "ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n"); 88 "ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n");
94 return "dig"; 89 return "dig";
95 } 90 }
96 91
97 xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_address); 92 xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_physical_address);
98 hdr = &xsdt->header; 93 hdr = &xsdt->header;
99 if (strncmp(hdr->signature, XSDT_SIG, sizeof(XSDT_SIG) - 1)) { 94 if (strncmp(hdr->signature, ACPI_SIG_XSDT, sizeof(ACPI_SIG_XSDT) - 1)) {
100 printk(KERN_ERR 95 printk(KERN_ERR
101 "ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n"); 96 "ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n");
102 return "dig"; 97 return "dig";
@@ -169,12 +164,12 @@ struct acpi_table_madt *acpi_madt __initdata;
169static u8 has_8259; 164static u8 has_8259;
170 165
171static int __init 166static int __init
172acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header, 167acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
173 const unsigned long end) 168 const unsigned long end)
174{ 169{
175 struct acpi_table_lapic_addr_ovr *lapic; 170 struct acpi_madt_local_apic_override *lapic;
176 171
177 lapic = (struct acpi_table_lapic_addr_ovr *)header; 172 lapic = (struct acpi_madt_local_apic_override *)header;
178 173
179 if (BAD_MADT_ENTRY(lapic, end)) 174 if (BAD_MADT_ENTRY(lapic, end))
180 return -EINVAL; 175 return -EINVAL;
@@ -187,22 +182,19 @@ acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
187} 182}
188 183
189static int __init 184static int __init
190acpi_parse_lsapic(acpi_table_entry_header * header, const unsigned long end) 185acpi_parse_lsapic(struct acpi_subtable_header * header, const unsigned long end)
191{ 186{
192 struct acpi_table_lsapic *lsapic; 187 struct acpi_madt_local_sapic *lsapic;
193 188
194 lsapic = (struct acpi_table_lsapic *)header; 189 lsapic = (struct acpi_madt_local_sapic *)header;
195 190
196 if (BAD_MADT_ENTRY(lsapic, end)) 191 /*Skip BAD_MADT_ENTRY check, as lsapic size could vary */
197 return -EINVAL;
198 192
199 if (lsapic->flags.enabled) { 193 if (lsapic->lapic_flags & ACPI_MADT_ENABLED) {
200#ifdef CONFIG_SMP 194#ifdef CONFIG_SMP
201 smp_boot_data.cpu_phys_id[available_cpus] = 195 smp_boot_data.cpu_phys_id[available_cpus] =
202 (lsapic->id << 8) | lsapic->eid; 196 (lsapic->id << 8) | lsapic->eid;
203#endif 197#endif
204 ia64_acpiid_to_sapicid[lsapic->acpi_id] =
205 (lsapic->id << 8) | lsapic->eid;
206 ++available_cpus; 198 ++available_cpus;
207 } 199 }
208 200
@@ -211,11 +203,11 @@ acpi_parse_lsapic(acpi_table_entry_header * header, const unsigned long end)
211} 203}
212 204
213static int __init 205static int __init
214acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end) 206acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end)
215{ 207{
216 struct acpi_table_lapic_nmi *lacpi_nmi; 208 struct acpi_madt_local_apic_nmi *lacpi_nmi;
217 209
218 lacpi_nmi = (struct acpi_table_lapic_nmi *)header; 210 lacpi_nmi = (struct acpi_madt_local_apic_nmi *)header;
219 211
220 if (BAD_MADT_ENTRY(lacpi_nmi, end)) 212 if (BAD_MADT_ENTRY(lacpi_nmi, end))
221 return -EINVAL; 213 return -EINVAL;
@@ -225,11 +217,11 @@ acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
225} 217}
226 218
227static int __init 219static int __init
228acpi_parse_iosapic(acpi_table_entry_header * header, const unsigned long end) 220acpi_parse_iosapic(struct acpi_subtable_header * header, const unsigned long end)
229{ 221{
230 struct acpi_table_iosapic *iosapic; 222 struct acpi_madt_io_sapic *iosapic;
231 223
232 iosapic = (struct acpi_table_iosapic *)header; 224 iosapic = (struct acpi_madt_io_sapic *)header;
233 225
234 if (BAD_MADT_ENTRY(iosapic, end)) 226 if (BAD_MADT_ENTRY(iosapic, end))
235 return -EINVAL; 227 return -EINVAL;
@@ -240,13 +232,13 @@ acpi_parse_iosapic(acpi_table_entry_header * header, const unsigned long end)
240static unsigned int __initdata acpi_madt_rev; 232static unsigned int __initdata acpi_madt_rev;
241 233
242static int __init 234static int __init
243acpi_parse_plat_int_src(acpi_table_entry_header * header, 235acpi_parse_plat_int_src(struct acpi_subtable_header * header,
244 const unsigned long end) 236 const unsigned long end)
245{ 237{
246 struct acpi_table_plat_int_src *plintsrc; 238 struct acpi_madt_interrupt_source *plintsrc;
247 int vector; 239 int vector;
248 240
249 plintsrc = (struct acpi_table_plat_int_src *)header; 241 plintsrc = (struct acpi_madt_interrupt_source *)header;
250 242
251 if (BAD_MADT_ENTRY(plintsrc, end)) 243 if (BAD_MADT_ENTRY(plintsrc, end))
252 return -EINVAL; 244 return -EINVAL;
@@ -257,19 +249,19 @@ acpi_parse_plat_int_src(acpi_table_entry_header * header,
257 */ 249 */
258 vector = iosapic_register_platform_intr(plintsrc->type, 250 vector = iosapic_register_platform_intr(plintsrc->type,
259 plintsrc->global_irq, 251 plintsrc->global_irq,
260 plintsrc->iosapic_vector, 252 plintsrc->io_sapic_vector,
261 plintsrc->eid, 253 plintsrc->eid,
262 plintsrc->id, 254 plintsrc->id,
263 (plintsrc->flags.polarity == 255 ((plintsrc->inti_flags & ACPI_MADT_POLARITY_MASK) ==
264 1) ? IOSAPIC_POL_HIGH : 256 ACPI_MADT_POLARITY_ACTIVE_HIGH) ?
265 IOSAPIC_POL_LOW, 257 IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
266 (plintsrc->flags.trigger == 258 ((plintsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) ==
267 1) ? IOSAPIC_EDGE : 259 ACPI_MADT_TRIGGER_EDGE) ?
268 IOSAPIC_LEVEL); 260 IOSAPIC_EDGE : IOSAPIC_LEVEL);
269 261
270 platform_intr_list[plintsrc->type] = vector; 262 platform_intr_list[plintsrc->type] = vector;
271 if (acpi_madt_rev > 1) { 263 if (acpi_madt_rev > 1) {
272 acpi_cpei_override = plintsrc->plint_flags.cpei_override_flag; 264 acpi_cpei_override = plintsrc->flags & ACPI_MADT_CPEI_OVERRIDE;
273 } 265 }
274 266
275 /* 267 /*
@@ -324,30 +316,32 @@ unsigned int get_cpei_target_cpu(void)
324} 316}
325 317
326static int __init 318static int __init
327acpi_parse_int_src_ovr(acpi_table_entry_header * header, 319acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
328 const unsigned long end) 320 const unsigned long end)
329{ 321{
330 struct acpi_table_int_src_ovr *p; 322 struct acpi_madt_interrupt_override *p;
331 323
332 p = (struct acpi_table_int_src_ovr *)header; 324 p = (struct acpi_madt_interrupt_override *)header;
333 325
334 if (BAD_MADT_ENTRY(p, end)) 326 if (BAD_MADT_ENTRY(p, end))
335 return -EINVAL; 327 return -EINVAL;
336 328
337 iosapic_override_isa_irq(p->bus_irq, p->global_irq, 329 iosapic_override_isa_irq(p->source_irq, p->global_irq,
338 (p->flags.polarity == 330 ((p->inti_flags & ACPI_MADT_POLARITY_MASK) ==
339 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, 331 ACPI_MADT_POLARITY_ACTIVE_HIGH) ?
340 (p->flags.trigger == 332 IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
341 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); 333 ((p->inti_flags & ACPI_MADT_TRIGGER_MASK) ==
334 ACPI_MADT_TRIGGER_EDGE) ?
335 IOSAPIC_EDGE : IOSAPIC_LEVEL);
342 return 0; 336 return 0;
343} 337}
344 338
345static int __init 339static int __init
346acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end) 340acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end)
347{ 341{
348 struct acpi_table_nmi_src *nmi_src; 342 struct acpi_madt_nmi_source *nmi_src;
349 343
350 nmi_src = (struct acpi_table_nmi_src *)header; 344 nmi_src = (struct acpi_madt_nmi_source *)header;
351 345
352 if (BAD_MADT_ENTRY(nmi_src, end)) 346 if (BAD_MADT_ENTRY(nmi_src, end))
353 return -EINVAL; 347 return -EINVAL;
@@ -371,12 +365,12 @@ static void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
371 } 365 }
372} 366}
373 367
374static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size) 368static int __init acpi_parse_madt(struct acpi_table_header *table)
375{ 369{
376 if (!phys_addr || !size) 370 if (!table)
377 return -EINVAL; 371 return -EINVAL;
378 372
379 acpi_madt = (struct acpi_table_madt *)__va(phys_addr); 373 acpi_madt = (struct acpi_table_madt *)table;
380 374
381 acpi_madt_rev = acpi_madt->header.revision; 375 acpi_madt_rev = acpi_madt->header.revision;
382 376
@@ -384,14 +378,14 @@ static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
384#ifdef CONFIG_ITANIUM 378#ifdef CONFIG_ITANIUM
385 has_8259 = 1; /* Firmware on old Itanium systems is broken */ 379 has_8259 = 1; /* Firmware on old Itanium systems is broken */
386#else 380#else
387 has_8259 = acpi_madt->flags.pcat_compat; 381 has_8259 = acpi_madt->flags & ACPI_MADT_PCAT_COMPAT;
388#endif 382#endif
389 iosapic_system_init(has_8259); 383 iosapic_system_init(has_8259);
390 384
391 /* Get base address of IPI Message Block */ 385 /* Get base address of IPI Message Block */
392 386
393 if (acpi_madt->lapic_address) 387 if (acpi_madt->address)
394 ipi_base_addr = ioremap(acpi_madt->lapic_address, 0); 388 ipi_base_addr = ioremap(acpi_madt->address, 0);
395 389
396 printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr); 390 printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr);
397 391
@@ -413,23 +407,24 @@ static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
413#define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag)) 407#define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag))
414static struct acpi_table_slit __initdata *slit_table; 408static struct acpi_table_slit __initdata *slit_table;
415 409
416static int get_processor_proximity_domain(struct acpi_table_processor_affinity *pa) 410static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
417{ 411{
418 int pxm; 412 int pxm;
419 413
420 pxm = pa->proximity_domain; 414 pxm = pa->proximity_domain_lo;
421 if (ia64_platform_is("sn2")) 415 if (ia64_platform_is("sn2"))
422 pxm += pa->reserved[0] << 8; 416 pxm += pa->proximity_domain_hi[0] << 8;
423 return pxm; 417 return pxm;
424} 418}
425 419
426static int get_memory_proximity_domain(struct acpi_table_memory_affinity *ma) 420static int get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
427{ 421{
428 int pxm; 422 int pxm;
429 423
430 pxm = ma->proximity_domain; 424 pxm = ma->proximity_domain;
431 if (ia64_platform_is("sn2")) 425 if (!ia64_platform_is("sn2"))
432 pxm += ma->reserved1[0] << 8; 426 pxm &= 0xff;
427
433 return pxm; 428 return pxm;
434} 429}
435 430
@@ -442,7 +437,7 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
442 u32 len; 437 u32 len;
443 438
444 len = sizeof(struct acpi_table_header) + 8 439 len = sizeof(struct acpi_table_header) + 8
445 + slit->localities * slit->localities; 440 + slit->locality_count * slit->locality_count;
446 if (slit->header.length != len) { 441 if (slit->header.length != len) {
447 printk(KERN_ERR 442 printk(KERN_ERR
448 "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n", 443 "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n",
@@ -454,11 +449,11 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
454} 449}
455 450
456void __init 451void __init
457acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa) 452acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
458{ 453{
459 int pxm; 454 int pxm;
460 455
461 if (!pa->flags.enabled) 456 if (!(pa->flags & ACPI_SRAT_CPU_ENABLED))
462 return; 457 return;
463 458
464 pxm = get_processor_proximity_domain(pa); 459 pxm = get_processor_proximity_domain(pa);
@@ -467,14 +462,14 @@ acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
467 pxm_bit_set(pxm); 462 pxm_bit_set(pxm);
468 463
469 node_cpuid[srat_num_cpus].phys_id = 464 node_cpuid[srat_num_cpus].phys_id =
470 (pa->apic_id << 8) | (pa->lsapic_eid); 465 (pa->apic_id << 8) | (pa->local_sapic_eid);
471 /* nid should be overridden as logical node id later */ 466 /* nid should be overridden as logical node id later */
472 node_cpuid[srat_num_cpus].nid = pxm; 467 node_cpuid[srat_num_cpus].nid = pxm;
473 srat_num_cpus++; 468 srat_num_cpus++;
474} 469}
475 470
476void __init 471void __init
477acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma) 472acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
478{ 473{
479 unsigned long paddr, size; 474 unsigned long paddr, size;
480 int pxm; 475 int pxm;
@@ -483,13 +478,11 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
483 pxm = get_memory_proximity_domain(ma); 478 pxm = get_memory_proximity_domain(ma);
484 479
485 /* fill node memory chunk structure */ 480 /* fill node memory chunk structure */
486 paddr = ma->base_addr_hi; 481 paddr = ma->base_address;
487 paddr = (paddr << 32) | ma->base_addr_lo; 482 size = ma->length;
488 size = ma->length_hi;
489 size = (size << 32) | ma->length_lo;
490 483
491 /* Ignore disabled entries */ 484 /* Ignore disabled entries */
492 if (!ma->flags.enabled) 485 if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
493 return; 486 return;
494 487
495 /* record this node in proximity bitmap */ 488 /* record this node in proximity bitmap */
@@ -560,16 +553,16 @@ void __init acpi_numa_arch_fixup(void)
560 if (!slit_table) 553 if (!slit_table)
561 return; 554 return;
562 memset(numa_slit, -1, sizeof(numa_slit)); 555 memset(numa_slit, -1, sizeof(numa_slit));
563 for (i = 0; i < slit_table->localities; i++) { 556 for (i = 0; i < slit_table->locality_count; i++) {
564 if (!pxm_bit_test(i)) 557 if (!pxm_bit_test(i))
565 continue; 558 continue;
566 node_from = pxm_to_node(i); 559 node_from = pxm_to_node(i);
567 for (j = 0; j < slit_table->localities; j++) { 560 for (j = 0; j < slit_table->locality_count; j++) {
568 if (!pxm_bit_test(j)) 561 if (!pxm_bit_test(j))
569 continue; 562 continue;
570 node_to = pxm_to_node(j); 563 node_to = pxm_to_node(j);
571 node_distance(node_from, node_to) = 564 node_distance(node_from, node_to) =
572 slit_table->entry[i * slit_table->localities + j]; 565 slit_table->entry[i * slit_table->locality_count + j];
573 } 566 }
574 } 567 }
575 568
@@ -617,21 +610,21 @@ void acpi_unregister_gsi(u32 gsi)
617 610
618EXPORT_SYMBOL(acpi_unregister_gsi); 611EXPORT_SYMBOL(acpi_unregister_gsi);
619 612
620static int __init acpi_parse_fadt(unsigned long phys_addr, unsigned long size) 613static int __init acpi_parse_fadt(struct acpi_table_header *table)
621{ 614{
622 struct acpi_table_header *fadt_header; 615 struct acpi_table_header *fadt_header;
623 struct fadt_descriptor *fadt; 616 struct acpi_table_fadt *fadt;
624 617
625 if (!phys_addr || !size) 618 if (!table)
626 return -EINVAL; 619 return -EINVAL;
627 620
628 fadt_header = (struct acpi_table_header *)__va(phys_addr); 621 fadt_header = (struct acpi_table_header *)table;
629 if (fadt_header->revision != 3) 622 if (fadt_header->revision != 3)
630 return -ENODEV; /* Only deal with ACPI 2.0 FADT */ 623 return -ENODEV; /* Only deal with ACPI 2.0 FADT */
631 624
632 fadt = (struct fadt_descriptor *)fadt_header; 625 fadt = (struct acpi_table_fadt *)fadt_header;
633 626
634 acpi_register_gsi(fadt->sci_int, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW); 627 acpi_register_gsi(fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
635 return 0; 628 return 0;
636} 629}
637 630
@@ -658,7 +651,7 @@ int __init acpi_boot_init(void)
658 * information -- the successor to MPS tables. 651 * information -- the successor to MPS tables.
659 */ 652 */
660 653
661 if (acpi_table_parse(ACPI_APIC, acpi_parse_madt) < 1) { 654 if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
662 printk(KERN_ERR PREFIX "Can't find MADT\n"); 655 printk(KERN_ERR PREFIX "Can't find MADT\n");
663 goto skip_madt; 656 goto skip_madt;
664 } 657 }
@@ -666,40 +659,40 @@ int __init acpi_boot_init(void)
666 /* Local APIC */ 659 /* Local APIC */
667 660
668 if (acpi_table_parse_madt 661 if (acpi_table_parse_madt
669 (ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr, 0) < 0) 662 (ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, acpi_parse_lapic_addr_ovr, 0) < 0)
670 printk(KERN_ERR PREFIX 663 printk(KERN_ERR PREFIX
671 "Error parsing LAPIC address override entry\n"); 664 "Error parsing LAPIC address override entry\n");
672 665
673 if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_parse_lsapic, NR_CPUS) 666 if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, acpi_parse_lsapic, NR_CPUS)
674 < 1) 667 < 1)
675 printk(KERN_ERR PREFIX 668 printk(KERN_ERR PREFIX
676 "Error parsing MADT - no LAPIC entries\n"); 669 "Error parsing MADT - no LAPIC entries\n");
677 670
678 if (acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0) 671 if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0)
679 < 0) 672 < 0)
680 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); 673 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
681 674
682 /* I/O APIC */ 675 /* I/O APIC */
683 676
684 if (acpi_table_parse_madt 677 if (acpi_table_parse_madt
685 (ACPI_MADT_IOSAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1) 678 (ACPI_MADT_TYPE_IO_SAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1)
686 printk(KERN_ERR PREFIX 679 printk(KERN_ERR PREFIX
687 "Error parsing MADT - no IOSAPIC entries\n"); 680 "Error parsing MADT - no IOSAPIC entries\n");
688 681
689 /* System-Level Interrupt Routing */ 682 /* System-Level Interrupt Routing */
690 683
691 if (acpi_table_parse_madt 684 if (acpi_table_parse_madt
692 (ACPI_MADT_PLAT_INT_SRC, acpi_parse_plat_int_src, 685 (ACPI_MADT_TYPE_INTERRUPT_SOURCE, acpi_parse_plat_int_src,
693 ACPI_MAX_PLATFORM_INTERRUPTS) < 0) 686 ACPI_MAX_PLATFORM_INTERRUPTS) < 0)
694 printk(KERN_ERR PREFIX 687 printk(KERN_ERR PREFIX
695 "Error parsing platform interrupt source entry\n"); 688 "Error parsing platform interrupt source entry\n");
696 689
697 if (acpi_table_parse_madt 690 if (acpi_table_parse_madt
698 (ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, 0) < 0) 691 (ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, 0) < 0)
699 printk(KERN_ERR PREFIX 692 printk(KERN_ERR PREFIX
700 "Error parsing interrupt source overrides entry\n"); 693 "Error parsing interrupt source overrides entry\n");
701 694
702 if (acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, 0) < 0) 695 if (acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, 0) < 0)
703 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); 696 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
704 skip_madt: 697 skip_madt:
705 698
@@ -709,7 +702,7 @@ int __init acpi_boot_init(void)
709 * gets interrupts such as power and sleep buttons. If it's not 702 * gets interrupts such as power and sleep buttons. If it's not
710 * on a Legacy interrupt, it needs to be setup. 703 * on a Legacy interrupt, it needs to be setup.
711 */ 704 */
712 if (acpi_table_parse(ACPI_FADT, acpi_parse_fadt) < 1) 705 if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt))
713 printk(KERN_ERR PREFIX "Can't find FADT\n"); 706 printk(KERN_ERR PREFIX "Can't find FADT\n");
714 707
715#ifdef CONFIG_SMP 708#ifdef CONFIG_SMP
@@ -842,7 +835,7 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
842{ 835{
843 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 836 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
844 union acpi_object *obj; 837 union acpi_object *obj;
845 struct acpi_table_lsapic *lsapic; 838 struct acpi_madt_local_sapic *lsapic;
846 cpumask_t tmp_map; 839 cpumask_t tmp_map;
847 long physid; 840 long physid;
848 int cpu; 841 int cpu;
@@ -854,16 +847,16 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
854 return -EINVAL; 847 return -EINVAL;
855 848
856 obj = buffer.pointer; 849 obj = buffer.pointer;
857 if (obj->type != ACPI_TYPE_BUFFER || 850 if (obj->type != ACPI_TYPE_BUFFER)
858 obj->buffer.length < sizeof(*lsapic)) { 851 {
859 kfree(buffer.pointer); 852 kfree(buffer.pointer);
860 return -EINVAL; 853 return -EINVAL;
861 } 854 }
862 855
863 lsapic = (struct acpi_table_lsapic *)obj->buffer.pointer; 856 lsapic = (struct acpi_madt_local_sapic *)obj->buffer.pointer;
864 857
865 if ((lsapic->header.type != ACPI_MADT_LSAPIC) || 858 if ((lsapic->header.type != ACPI_MADT_TYPE_LOCAL_SAPIC) ||
866 (!lsapic->flags.enabled)) { 859 (!lsapic->lapic_flags & ACPI_MADT_ENABLED)) {
867 kfree(buffer.pointer); 860 kfree(buffer.pointer);
868 return -EINVAL; 861 return -EINVAL;
869 } 862 }
@@ -883,7 +876,6 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
883 876
884 cpu_set(cpu, cpu_present_map); 877 cpu_set(cpu, cpu_present_map);
885 ia64_cpu_to_sapicid[cpu] = physid; 878 ia64_cpu_to_sapicid[cpu] = physid;
886 ia64_acpiid_to_sapicid[lsapic->acpi_id] = ia64_cpu_to_sapicid[cpu];
887 879
888 *pcpu = cpu; 880 *pcpu = cpu;
889 return (0); 881 return (0);
@@ -893,14 +885,6 @@ EXPORT_SYMBOL(acpi_map_lsapic);
893 885
894int acpi_unmap_lsapic(int cpu) 886int acpi_unmap_lsapic(int cpu)
895{ 887{
896 int i;
897
898 for (i = 0; i < MAX_SAPICS; i++) {
899 if (ia64_acpiid_to_sapicid[i] == ia64_cpu_to_sapicid[cpu]) {
900 ia64_acpiid_to_sapicid[i] = -1;
901 break;
902 }
903 }
904 ia64_cpu_to_sapicid[cpu] = -1; 888 ia64_cpu_to_sapicid[cpu] = -1;
905 cpu_clear(cpu, cpu_present_map); 889 cpu_clear(cpu, cpu_present_map);
906 890
@@ -920,7 +904,7 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
920{ 904{
921 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 905 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
922 union acpi_object *obj; 906 union acpi_object *obj;
923 struct acpi_table_iosapic *iosapic; 907 struct acpi_madt_io_sapic *iosapic;
924 unsigned int gsi_base; 908 unsigned int gsi_base;
925 int pxm, node; 909 int pxm, node;
926 910
@@ -938,9 +922,9 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
938 return AE_OK; 922 return AE_OK;
939 } 923 }
940 924
941 iosapic = (struct acpi_table_iosapic *)obj->buffer.pointer; 925 iosapic = (struct acpi_madt_io_sapic *)obj->buffer.pointer;
942 926
943 if (iosapic->header.type != ACPI_MADT_IOSAPIC) { 927 if (iosapic->header.type != ACPI_MADT_TYPE_IO_SAPIC) {
944 kfree(buffer.pointer); 928 kfree(buffer.pointer);
945 return AE_OK; 929 return AE_OK;
946 } 930 }
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index bc2f64d72244..5cdd2f5fa064 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -52,7 +52,7 @@ extern void ia64_dump_cpu_regs(void *);
52static DEFINE_PER_CPU(struct elf_prstatus, elf_prstatus); 52static DEFINE_PER_CPU(struct elf_prstatus, elf_prstatus);
53 53
54void 54void
55crash_save_this_cpu() 55crash_save_this_cpu(void)
56{ 56{
57 void *buf; 57 void *buf;
58 unsigned long cfm, sof, sol; 58 unsigned long cfm, sof, sol;
@@ -79,6 +79,7 @@ crash_save_this_cpu()
79 final_note(buf); 79 final_note(buf);
80} 80}
81 81
82#ifdef CONFIG_SMP
82static int 83static int
83kdump_wait_cpu_freeze(void) 84kdump_wait_cpu_freeze(void)
84{ 85{
@@ -91,6 +92,7 @@ kdump_wait_cpu_freeze(void)
91 } 92 }
92 return 1; 93 return 1;
93} 94}
95#endif
94 96
95void 97void
96machine_crash_shutdown(struct pt_regs *pt) 98machine_crash_shutdown(struct pt_regs *pt)
@@ -116,6 +118,11 @@ machine_crash_shutdown(struct pt_regs *pt)
116static void 118static void
117machine_kdump_on_init(void) 119machine_kdump_on_init(void)
118{ 120{
121 if (!ia64_kimage) {
122 printk(KERN_NOTICE "machine_kdump_on_init(): "
123 "kdump not configured\n");
124 return;
125 }
119 local_irq_disable(); 126 local_irq_disable();
120 kexec_disable_iosapic(); 127 kexec_disable_iosapic();
121 machine_kexec(ia64_kimage); 128 machine_kexec(ia64_kimage);
@@ -132,11 +139,12 @@ kdump_cpu_freeze(struct unw_frame_info *info, void *arg)
132 atomic_inc(&kdump_cpu_freezed); 139 atomic_inc(&kdump_cpu_freezed);
133 kdump_status[cpuid] = 1; 140 kdump_status[cpuid] = 1;
134 mb(); 141 mb();
135 if (cpuid == 0) { 142#ifdef CONFIG_HOTPLUG_CPU
136 for (;;) 143 if (cpuid != 0)
137 cpu_relax();
138 } else
139 ia64_jump_to_sal(&sal_boot_rendez_state[cpuid]); 144 ia64_jump_to_sal(&sal_boot_rendez_state[cpuid]);
145#endif
146 for (;;)
147 cpu_relax();
140} 148}
141 149
142static int 150static int
@@ -214,7 +222,7 @@ machine_crash_setup(void)
214 if((ret = register_die_notifier(&kdump_init_notifier_nb)) != 0) 222 if((ret = register_die_notifier(&kdump_init_notifier_nb)) != 0)
215 return ret; 223 return ret;
216#ifdef CONFIG_SYSCTL 224#ifdef CONFIG_SYSCTL
217 register_sysctl_table(sys_table, 0); 225 register_sysctl_table(sys_table);
218#endif 226#endif
219 return 0; 227 return 0;
220} 228}
diff --git a/arch/ia64/kernel/crash_dump.c b/arch/ia64/kernel/crash_dump.c
index 83b8c91c1408..da60e90eeeb1 100644
--- a/arch/ia64/kernel/crash_dump.c
+++ b/arch/ia64/kernel/crash_dump.c
@@ -9,7 +9,8 @@
9#include <linux/errno.h> 9#include <linux/errno.h>
10#include <linux/types.h> 10#include <linux/types.h>
11 11
12#include <linux/uaccess.h> 12#include <asm/page.h>
13#include <asm/uaccess.h>
13 14
14/** 15/**
15 * copy_oldmem_page - copy one page from "oldmem" 16 * copy_oldmem_page - copy one page from "oldmem"
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 0b25a7d4e1e4..772ba6fe110f 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -380,7 +380,7 @@ efi_get_pal_addr (void)
380#endif 380#endif
381 return __va(md->phys_addr); 381 return __va(md->phys_addr);
382 } 382 }
383 printk(KERN_WARNING "%s: no PAL-code memory-descriptor found", 383 printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n",
384 __FUNCTION__); 384 __FUNCTION__);
385 return NULL; 385 return NULL;
386} 386}
@@ -413,11 +413,10 @@ efi_init (void)
413 efi_char16_t *c16; 413 efi_char16_t *c16;
414 u64 efi_desc_size; 414 u64 efi_desc_size;
415 char *cp, vendor[100] = "unknown"; 415 char *cp, vendor[100] = "unknown";
416 extern char saved_command_line[];
417 int i; 416 int i;
418 417
419 /* it's too early to be able to use the standard kernel command line support... */ 418 /* it's too early to be able to use the standard kernel command line support... */
420 for (cp = saved_command_line; *cp; ) { 419 for (cp = boot_command_line; *cp; ) {
421 if (memcmp(cp, "mem=", 4) == 0) { 420 if (memcmp(cp, "mem=", 4) == 0) {
422 mem_limit = memparse(cp + 4, &cp); 421 mem_limit = memparse(cp + 4, &cp);
423 } else if (memcmp(cp, "max_addr=", 9) == 0) { 422 } else if (memcmp(cp, "max_addr=", 9) == 0) {
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 15234ed3a341..e7873eeae448 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1610,5 +1610,7 @@ sys_call_table:
1610 data8 sys_sync_file_range // 1300 1610 data8 sys_sync_file_range // 1300
1611 data8 sys_tee 1611 data8 sys_tee
1612 data8 sys_vmsplice 1612 data8 sys_vmsplice
1613 data8 sys_ni_syscall // reserved for move_pages
1614 data8 sys_getcpu
1613 1615
1614 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1616 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 0fc5fb7865cf..d6aab40c6416 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -925,6 +925,11 @@ iosapic_unregister_intr (unsigned int gsi)
925 /* Clear the interrupt controller descriptor */ 925 /* Clear the interrupt controller descriptor */
926 idesc->chip = &no_irq_type; 926 idesc->chip = &no_irq_type;
927 927
928#ifdef CONFIG_SMP
929 /* Clear affinity */
930 cpus_setall(idesc->affinity);
931#endif
932
928 /* Clear the interrupt information */ 933 /* Clear the interrupt information */
929 memset(&iosapic_intr_info[vector], 0, 934 memset(&iosapic_intr_info[vector], 0,
930 sizeof(struct iosapic_intr_info)); 935 sizeof(struct iosapic_intr_info));
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index ba3ba8bc50be..456f57b087ca 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -275,7 +275,7 @@ static struct irqaction ipi_irqaction = {
275 275
276static struct irqaction resched_irqaction = { 276static struct irqaction resched_irqaction = {
277 .handler = dummy_handler, 277 .handler = dummy_handler,
278 .flags = SA_INTERRUPT, 278 .flags = IRQF_DISABLED,
279 .name = "resched" 279 .name = "resched"
280}; 280};
281#endif 281#endif
diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c
index e2ccc9f660c5..4f0f3b8c1ee2 100644
--- a/arch/ia64/kernel/machine_kexec.c
+++ b/arch/ia64/kernel/machine_kexec.c
@@ -14,6 +14,7 @@
14#include <linux/kexec.h> 14#include <linux/kexec.h>
15#include <linux/cpu.h> 15#include <linux/cpu.h>
16#include <linux/irq.h> 16#include <linux/irq.h>
17#include <linux/efi.h>
17#include <asm/mmu_context.h> 18#include <asm/mmu_context.h>
18#include <asm/setup.h> 19#include <asm/setup.h>
19#include <asm/delay.h> 20#include <asm/delay.h>
@@ -68,22 +69,10 @@ void machine_kexec_cleanup(struct kimage *image)
68{ 69{
69} 70}
70 71
71void machine_shutdown(void)
72{
73 int cpu;
74
75 for_each_online_cpu(cpu) {
76 if (cpu != smp_processor_id())
77 cpu_down(cpu);
78 }
79 kexec_disable_iosapic();
80}
81
82/* 72/*
83 * Do not allocate memory (or fail in any way) in machine_kexec(). 73 * Do not allocate memory (or fail in any way) in machine_kexec().
84 * We are past the point of no return, committed to rebooting now. 74 * We are past the point of no return, committed to rebooting now.
85 */ 75 */
86extern void *efi_get_pal_addr(void);
87static void ia64_machine_kexec(struct unw_frame_info *info, void *arg) 76static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
88{ 77{
89 struct kimage *image = arg; 78 struct kimage *image = arg;
@@ -93,6 +82,7 @@ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
93 unsigned long vector; 82 unsigned long vector;
94 int ii; 83 int ii;
95 84
85 BUG_ON(!image);
96 if (image->type == KEXEC_TYPE_CRASH) { 86 if (image->type == KEXEC_TYPE_CRASH) {
97 crash_save_this_cpu(); 87 crash_save_this_cpu();
98 current->thread.ksp = (__u64)info->sw - 16; 88 current->thread.ksp = (__u64)info->sw - 16;
@@ -131,6 +121,7 @@ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
131 121
132void machine_kexec(struct kimage *image) 122void machine_kexec(struct kimage *image)
133{ 123{
124 BUG_ON(!image);
134 unw_init_running(ia64_machine_kexec, image); 125 unw_init_running(ia64_machine_kexec, image);
135 for(;;); 126 for(;;);
136} 127}
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index 822e59a1b822..0d05450c91c4 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -64,12 +64,17 @@ static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
64} 64}
65#endif /* CONFIG_SMP */ 65#endif /* CONFIG_SMP */
66 66
67int ia64_setup_msi_irq(unsigned int irq, struct pci_dev *pdev) 67int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
68{ 68{
69 struct msi_msg msg; 69 struct msi_msg msg;
70 unsigned long dest_phys_id; 70 unsigned long dest_phys_id;
71 unsigned int vector; 71 unsigned int irq, vector;
72 72
73 irq = create_irq();
74 if (irq < 0)
75 return irq;
76
77 set_irq_msi(irq, desc);
73 dest_phys_id = cpu_physical_id(first_cpu(cpu_online_map)); 78 dest_phys_id = cpu_physical_id(first_cpu(cpu_online_map));
74 vector = irq; 79 vector = irq;
75 80
@@ -89,12 +94,12 @@ int ia64_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
89 write_msi_msg(irq, &msg); 94 write_msi_msg(irq, &msg);
90 set_irq_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq); 95 set_irq_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq);
91 96
92 return 0; 97 return irq;
93} 98}
94 99
95void ia64_teardown_msi_irq(unsigned int irq) 100void ia64_teardown_msi_irq(unsigned int irq)
96{ 101{
97 return; /* no-op */ 102 destroy_irq(irq);
98} 103}
99 104
100static void ia64_ack_msi_irq(unsigned int irq) 105static void ia64_ack_msi_irq(unsigned int irq)
@@ -126,12 +131,12 @@ static struct irq_chip ia64_msi_chip = {
126}; 131};
127 132
128 133
129int arch_setup_msi_irq(unsigned int irq, struct pci_dev *pdev) 134int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
130{ 135{
131 if (platform_setup_msi_irq) 136 if (platform_setup_msi_irq)
132 return platform_setup_msi_irq(irq, pdev); 137 return platform_setup_msi_irq(pdev, desc);
133 138
134 return ia64_setup_msi_irq(irq, pdev); 139 return ia64_setup_msi_irq(pdev, desc);
135} 140}
136 141
137void arch_teardown_msi_irq(unsigned int irq) 142void arch_teardown_msi_irq(unsigned int irq)
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index aa94f60fa8e7..2ecb20b551e1 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -521,19 +521,57 @@ pfm_sysctl_t pfm_sysctl;
521EXPORT_SYMBOL(pfm_sysctl); 521EXPORT_SYMBOL(pfm_sysctl);
522 522
523static ctl_table pfm_ctl_table[]={ 523static ctl_table pfm_ctl_table[]={
524 {1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,}, 524 {
525 {2, "debug_ovfl", &pfm_sysctl.debug_ovfl, sizeof(int), 0666, NULL, &proc_dointvec, NULL,}, 525 .ctl_name = CTL_UNNUMBERED,
526 {3, "fastctxsw", &pfm_sysctl.fastctxsw, sizeof(int), 0600, NULL, &proc_dointvec, NULL,}, 526 .procname = "debug",
527 {4, "expert_mode", &pfm_sysctl.expert_mode, sizeof(int), 0600, NULL, &proc_dointvec, NULL,}, 527 .data = &pfm_sysctl.debug,
528 { 0, }, 528 .maxlen = sizeof(int),
529 .mode = 0666,
530 .proc_handler = &proc_dointvec,
531 },
532 {
533 .ctl_name = CTL_UNNUMBERED,
534 .procname = "debug_ovfl",
535 .data = &pfm_sysctl.debug_ovfl,
536 .maxlen = sizeof(int),
537 .mode = 0666,
538 .proc_handler = &proc_dointvec,
539 },
540 {
541 .ctl_name = CTL_UNNUMBERED,
542 .procname = "fastctxsw",
543 .data = &pfm_sysctl.fastctxsw,
544 .maxlen = sizeof(int),
545 .mode = 0600,
546 .proc_handler = &proc_dointvec,
547 },
548 {
549 .ctl_name = CTL_UNNUMBERED,
550 .procname = "expert_mode",
551 .data = &pfm_sysctl.expert_mode,
552 .maxlen = sizeof(int),
553 .mode = 0600,
554 .proc_handler = &proc_dointvec,
555 },
556 {}
529}; 557};
530static ctl_table pfm_sysctl_dir[] = { 558static ctl_table pfm_sysctl_dir[] = {
531 {1, "perfmon", NULL, 0, 0755, pfm_ctl_table, }, 559 {
532 {0,}, 560 .ctl_name = CTL_UNNUMBERED,
561 .procname = "perfmon",
562 .mode = 0755,
563 .child = pfm_ctl_table,
564 },
565 {}
533}; 566};
534static ctl_table pfm_sysctl_root[] = { 567static ctl_table pfm_sysctl_root[] = {
535 {1, "kernel", NULL, 0, 0755, pfm_sysctl_dir, }, 568 {
536 {0,}, 569 .ctl_name = CTL_KERN,
570 .procname = "kernel",
571 .mode = 0755,
572 .child = pfm_sysctl_dir,
573 },
574 {}
537}; 575};
538static struct ctl_table_header *pfm_sysctl_header; 576static struct ctl_table_header *pfm_sysctl_header;
539 577
@@ -621,7 +659,7 @@ EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);
621 659
622 660
623/* forward declaration */ 661/* forward declaration */
624static struct file_operations pfm_file_ops; 662static const struct file_operations pfm_file_ops;
625 663
626/* 664/*
627 * forward declarations 665 * forward declarations
@@ -2126,7 +2164,7 @@ pfm_no_open(struct inode *irrelevant, struct file *dontcare)
2126 2164
2127 2165
2128 2166
2129static struct file_operations pfm_file_ops = { 2167static const struct file_operations pfm_file_ops = {
2130 .llseek = no_llseek, 2168 .llseek = no_llseek,
2131 .read = pfm_read, 2169 .read = pfm_read,
2132 .write = pfm_write, 2170 .write = pfm_write,
@@ -2301,12 +2339,11 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon
2301 DPRINT(("smpl_buf @%p\n", smpl_buf)); 2339 DPRINT(("smpl_buf @%p\n", smpl_buf));
2302 2340
2303 /* allocate vma */ 2341 /* allocate vma */
2304 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 2342 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2305 if (!vma) { 2343 if (!vma) {
2306 DPRINT(("Cannot allocate vma\n")); 2344 DPRINT(("Cannot allocate vma\n"));
2307 goto error_kmem; 2345 goto error_kmem;
2308 } 2346 }
2309 memset(vma, 0, sizeof(*vma));
2310 2347
2311 /* 2348 /*
2312 * partially initialize the vma for the sampling buffer 2349 * partially initialize the vma for the sampling buffer
@@ -6597,7 +6634,7 @@ found:
6597 return 0; 6634 return 0;
6598} 6635}
6599 6636
6600static struct file_operations pfm_proc_fops = { 6637static const struct file_operations pfm_proc_fops = {
6601 .open = pfm_proc_open, 6638 .open = pfm_proc_open,
6602 .read = seq_read, 6639 .read = seq_read,
6603 .llseek = seq_lseek, 6640 .llseek = seq_lseek,
@@ -6689,7 +6726,7 @@ pfm_init(void)
6689 /* 6726 /*
6690 * create /proc/sys/kernel/perfmon (for debugging purposes) 6727 * create /proc/sys/kernel/perfmon (for debugging purposes)
6691 */ 6728 */
6692 pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root, 0); 6729 pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root);
6693 6730
6694 /* 6731 /*
6695 * initialize all our spinlocks 6732 * initialize all our spinlocks
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 17685abaf496..ae96d4176995 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -34,6 +34,7 @@
34#include <asm/ia32.h> 34#include <asm/ia32.h>
35#include <asm/irq.h> 35#include <asm/irq.h>
36#include <asm/kdebug.h> 36#include <asm/kdebug.h>
37#include <asm/kexec.h>
37#include <asm/pgalloc.h> 38#include <asm/pgalloc.h>
38#include <asm/processor.h> 39#include <asm/processor.h>
39#include <asm/sal.h> 40#include <asm/sal.h>
@@ -803,6 +804,21 @@ cpu_halt (void)
803 ia64_pal_halt(min_power_state); 804 ia64_pal_halt(min_power_state);
804} 805}
805 806
807void machine_shutdown(void)
808{
809#ifdef CONFIG_HOTPLUG_CPU
810 int cpu;
811
812 for_each_online_cpu(cpu) {
813 if (cpu != smp_processor_id())
814 cpu_down(cpu);
815 }
816#endif
817#ifdef CONFIG_KEXEC
818 kexec_disable_iosapic();
819#endif
820}
821
806void 822void
807machine_restart (char *restart_cmd) 823machine_restart (char *restart_cmd)
808{ 824{
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index aa705e46b974..3f8918782e0c 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -607,7 +607,7 @@ find_thread_for_addr (struct task_struct *child, unsigned long addr)
607 */ 607 */
608 list_for_each_safe(this, next, &current->children) { 608 list_for_each_safe(this, next, &current->children) {
609 p = list_entry(this, struct task_struct, sibling); 609 p = list_entry(this, struct task_struct, sibling);
610 if (p->mm != mm) 610 if (p->tgid != child->tgid)
611 continue; 611 continue;
612 if (thread_matches(p, addr)) { 612 if (thread_matches(p, addr)) {
613 child = p; 613 child = p;
@@ -1405,6 +1405,7 @@ ptrace_disable (struct task_struct *child)
1405 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); 1405 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1406 1406
1407 /* make sure the single step/taken-branch trap bits are not set: */ 1407 /* make sure the single step/taken-branch trap bits are not set: */
1408 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1408 child_psr->ss = 0; 1409 child_psr->ss = 0;
1409 child_psr->tb = 0; 1410 child_psr->tb = 0;
1410} 1411}
@@ -1525,6 +1526,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
1525 * Make sure the single step/taken-branch trap bits 1526 * Make sure the single step/taken-branch trap bits
1526 * are not set: 1527 * are not set:
1527 */ 1528 */
1529 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1528 ia64_psr(pt)->ss = 0; 1530 ia64_psr(pt)->ss = 0;
1529 ia64_psr(pt)->tb = 0; 1531 ia64_psr(pt)->tb = 0;
1530 1532
@@ -1556,6 +1558,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
1556 goto out_tsk; 1558 goto out_tsk;
1557 1559
1558 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 1560 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
1561 set_tsk_thread_flag(child, TIF_SINGLESTEP);
1559 if (request == PTRACE_SINGLESTEP) { 1562 if (request == PTRACE_SINGLESTEP) {
1560 ia64_psr(pt)->ss = 1; 1563 ia64_psr(pt)->ss = 1;
1561 } else { 1564 } else {
@@ -1595,13 +1598,9 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
1595} 1598}
1596 1599
1597 1600
1598void 1601static void
1599syscall_trace (void) 1602syscall_trace (void)
1600{ 1603{
1601 if (!test_thread_flag(TIF_SYSCALL_TRACE))
1602 return;
1603 if (!(current->ptrace & PT_PTRACED))
1604 return;
1605 /* 1604 /*
1606 * The 0x80 provides a way for the tracing parent to 1605 * The 0x80 provides a way for the tracing parent to
1607 * distinguish between a syscall stop and SIGTRAP delivery. 1606 * distinguish between a syscall stop and SIGTRAP delivery.
@@ -1664,7 +1663,8 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1664 audit_syscall_exit(success, result); 1663 audit_syscall_exit(success, result);
1665 } 1664 }
1666 1665
1667 if (test_thread_flag(TIF_SYSCALL_TRACE) 1666 if ((test_thread_flag(TIF_SYSCALL_TRACE)
1667 || test_thread_flag(TIF_SINGLESTEP))
1668 && (current->ptrace & PT_PTRACED)) 1668 && (current->ptrace & PT_PTRACED))
1669 syscall_trace(); 1669 syscall_trace();
1670} 1670}
diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c
index 20bad78b5073..37c876f95dba 100644
--- a/arch/ia64/kernel/sal.c
+++ b/arch/ia64/kernel/sal.c
@@ -194,9 +194,8 @@ static void __init
194chk_nointroute_opt(void) 194chk_nointroute_opt(void)
195{ 195{
196 char *cp; 196 char *cp;
197 extern char saved_command_line[];
198 197
199 for (cp = saved_command_line; *cp; ) { 198 for (cp = boot_command_line; *cp; ) {
200 if (memcmp(cp, "nointroute", 10) == 0) { 199 if (memcmp(cp, "nointroute", 10) == 0) {
201 no_int_routing = 1; 200 no_int_routing = 1;
202 printk ("no_int_routing on\n"); 201 printk ("no_int_routing on\n");
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index e375a2f0f2c3..af9f8754d847 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -352,7 +352,7 @@ retry:
352 return size; 352 return size;
353} 353}
354 354
355static struct file_operations salinfo_event_fops = { 355static const struct file_operations salinfo_event_fops = {
356 .open = salinfo_event_open, 356 .open = salinfo_event_open,
357 .read = salinfo_event_read, 357 .read = salinfo_event_read,
358}; 358};
@@ -568,7 +568,7 @@ salinfo_log_write(struct file *file, const char __user *buffer, size_t count, lo
568 return count; 568 return count;
569} 569}
570 570
571static struct file_operations salinfo_data_fops = { 571static const struct file_operations salinfo_data_fops = {
572 .open = salinfo_log_open, 572 .open = salinfo_log_open,
573 .release = salinfo_log_release, 573 .release = salinfo_log_release,
574 .read = salinfo_log_read, 574 .read = salinfo_log_read,
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index ad567b8d432e..5fa09d141ab7 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -262,7 +262,7 @@ reserve_memory (void)
262 * appropriate after a kernel panic. 262 * appropriate after a kernel panic.
263 */ 263 */
264 { 264 {
265 char *from = strstr(saved_command_line, "crashkernel="); 265 char *from = strstr(boot_command_line, "crashkernel=");
266 unsigned long base, size; 266 unsigned long base, size;
267 if (from) { 267 if (from) {
268 size = memparse(from + 12, &from); 268 size = memparse(from + 12, &from);
@@ -463,7 +463,7 @@ setup_arch (char **cmdline_p)
463 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); 463 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
464 464
465 *cmdline_p = __va(ia64_boot_param->command_line); 465 *cmdline_p = __va(ia64_boot_param->command_line);
466 strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE); 466 strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
467 467
468 efi_init(); 468 efi_init();
469 io_port_init(); 469 io_port_init();
@@ -569,34 +569,31 @@ show_cpuinfo (struct seq_file *m, void *v)
569 { 1UL << 1, "spontaneous deferral"}, 569 { 1UL << 1, "spontaneous deferral"},
570 { 1UL << 2, "16-byte atomic ops" } 570 { 1UL << 2, "16-byte atomic ops" }
571 }; 571 };
572 char features[128], *cp, sep; 572 char features[128], *cp, *sep;
573 struct cpuinfo_ia64 *c = v; 573 struct cpuinfo_ia64 *c = v;
574 unsigned long mask; 574 unsigned long mask;
575 unsigned long proc_freq; 575 unsigned long proc_freq;
576 int i; 576 int i, size;
577 577
578 mask = c->features; 578 mask = c->features;
579 579
580 /* build the feature string: */ 580 /* build the feature string: */
581 memcpy(features, " standard", 10); 581 memcpy(features, "standard", 9);
582 cp = features; 582 cp = features;
583 sep = 0; 583 size = sizeof(features);
584 for (i = 0; i < (int) ARRAY_SIZE(feature_bits); ++i) { 584 sep = "";
585 for (i = 0; i < ARRAY_SIZE(feature_bits) && size > 1; ++i) {
585 if (mask & feature_bits[i].mask) { 586 if (mask & feature_bits[i].mask) {
586 if (sep) 587 cp += snprintf(cp, size, "%s%s", sep,
587 *cp++ = sep; 588 feature_bits[i].feature_name),
588 sep = ','; 589 sep = ", ";
589 *cp++ = ' ';
590 strcpy(cp, feature_bits[i].feature_name);
591 cp += strlen(feature_bits[i].feature_name);
592 mask &= ~feature_bits[i].mask; 590 mask &= ~feature_bits[i].mask;
591 size = sizeof(features) - (cp - features);
593 } 592 }
594 } 593 }
595 if (mask) { 594 if (mask && size > 1) {
596 /* print unknown features as a hex value: */ 595 /* print unknown features as a hex value */
597 if (sep) 596 snprintf(cp, size, "%s0x%lx", sep, mask);
598 *cp++ = sep;
599 sprintf(cp, " 0x%lx", mask);
600 } 597 }
601 598
602 proc_freq = cpufreq_quick_get(cpunum); 599 proc_freq = cpufreq_quick_get(cpunum);
@@ -612,7 +609,7 @@ show_cpuinfo (struct seq_file *m, void *v)
612 "model name : %s\n" 609 "model name : %s\n"
613 "revision : %u\n" 610 "revision : %u\n"
614 "archrev : %u\n" 611 "archrev : %u\n"
615 "features :%s\n" /* don't change this---it _is_ right! */ 612 "features : %s\n"
616 "cpu number : %lu\n" 613 "cpu number : %lu\n"
617 "cpu regs : %u\n" 614 "cpu regs : %u\n"
618 "cpu MHz : %lu.%06lu\n" 615 "cpu MHz : %lu.%06lu\n"
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index f4c7f7769cf7..55ddd809b02d 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -221,13 +221,13 @@ send_IPI_self (int op)
221 221
222#ifdef CONFIG_KEXEC 222#ifdef CONFIG_KEXEC
223void 223void
224kdump_smp_send_stop() 224kdump_smp_send_stop(void)
225{ 225{
226 send_IPI_allbutself(IPI_KDUMP_CPU_STOP); 226 send_IPI_allbutself(IPI_KDUMP_CPU_STOP);
227} 227}
228 228
229void 229void
230kdump_smp_send_init() 230kdump_smp_send_init(void)
231{ 231{
232 unsigned int cpu, self_cpu; 232 unsigned int cpu, self_cpu;
233 self_cpu = smp_processor_id(); 233 self_cpu = smp_processor_id();
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index ab684747036f..765cbe5ba6ae 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -24,8 +24,6 @@
24#include <asm/uaccess.h> 24#include <asm/uaccess.h>
25#include <asm/kdebug.h> 25#include <asm/kdebug.h>
26 26
27extern spinlock_t timerlist_lock;
28
29fpswa_interface_t *fpswa_interface; 27fpswa_interface_t *fpswa_interface;
30EXPORT_SYMBOL(fpswa_interface); 28EXPORT_SYMBOL(fpswa_interface);
31 29
@@ -53,34 +51,6 @@ trap_init (void)
53 fpswa_interface = __va(ia64_boot_param->fpswa); 51 fpswa_interface = __va(ia64_boot_param->fpswa);
54} 52}
55 53
56/*
57 * Unlock any spinlocks which will prevent us from getting the message out (timerlist_lock
58 * is acquired through the console unblank code)
59 */
60void
61bust_spinlocks (int yes)
62{
63 int loglevel_save = console_loglevel;
64
65 if (yes) {
66 oops_in_progress = 1;
67 return;
68 }
69
70#ifdef CONFIG_VT
71 unblank_screen();
72#endif
73 oops_in_progress = 0;
74 /*
75 * OK, the message is on the console. Now we call printk() without
76 * oops_in_progress set so that printk will give klogd a poke. Hold onto
77 * your hats...
78 */
79 console_loglevel = 15; /* NMI oopser may have shut the console up */
80 printk(" ");
81 console_loglevel = loglevel_save;
82}
83
84void 54void
85die (const char *str, struct pt_regs *regs, long err) 55die (const char *str, struct pt_regs *regs, long err)
86{ 56{
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index d6083a0936f4..25dd55e4db24 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -111,12 +111,14 @@ SECTIONS
111 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) 111 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET)
112 { *(.init.data) } 112 { *(.init.data) }
113 113
114#ifdef CONFIG_BLK_DEV_INITRD
114 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) 115 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET)
115 { 116 {
116 __initramfs_start = .; 117 __initramfs_start = .;
117 *(.init.ramfs) 118 *(.init.ramfs)
118 __initramfs_end = .; 119 __initramfs_end = .;
119 } 120 }
121#endif
120 122
121 . = ALIGN(16); 123 . = ALIGN(16);
122 .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) 124 .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET)
@@ -157,6 +159,7 @@ SECTIONS
157 } 159 }
158#endif 160#endif
159 161
162 . = ALIGN(8);
160 __con_initcall_start = .; 163 __con_initcall_start = .;
161 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) 164 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET)
162 { *(.con_initcall.init) } 165 { *(.con_initcall.init) }
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 1e79551231b9..ca4d41e5f177 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -30,47 +30,69 @@ static unsigned long max_gap;
30#endif 30#endif
31 31
32/** 32/**
33 * show_mem - display a memory statistics summary 33 * show_mem - give short summary of memory stats
34 * 34 *
35 * Just walks the pages in the system and describes where they're allocated. 35 * Shows a simple page count of reserved and used pages in the system.
36 * For discontig machines, it does this on a per-pgdat basis.
36 */ 37 */
37void 38void show_mem(void)
38show_mem (void)
39{ 39{
40 int i, total = 0, reserved = 0; 40 int i, total_reserved = 0;
41 int shared = 0, cached = 0; 41 int total_shared = 0, total_cached = 0;
42 unsigned long total_present = 0;
43 pg_data_t *pgdat;
42 44
43 printk(KERN_INFO "Mem-info:\n"); 45 printk(KERN_INFO "Mem-info:\n");
44 show_free_areas(); 46 show_free_areas();
45
46 printk(KERN_INFO "Free swap: %6ldkB\n", 47 printk(KERN_INFO "Free swap: %6ldkB\n",
47 nr_swap_pages<<(PAGE_SHIFT-10)); 48 nr_swap_pages<<(PAGE_SHIFT-10));
48 i = max_mapnr; 49 printk(KERN_INFO "Node memory in pages:\n");
49 for (i = 0; i < max_mapnr; i++) { 50 for_each_online_pgdat(pgdat) {
50 if (!pfn_valid(i)) { 51 unsigned long present;
52 unsigned long flags;
53 int shared = 0, cached = 0, reserved = 0;
54
55 pgdat_resize_lock(pgdat, &flags);
56 present = pgdat->node_present_pages;
57 for(i = 0; i < pgdat->node_spanned_pages; i++) {
58 struct page *page;
59 if (pfn_valid(pgdat->node_start_pfn + i))
60 page = pfn_to_page(pgdat->node_start_pfn + i);
61 else {
51#ifdef CONFIG_VIRTUAL_MEM_MAP 62#ifdef CONFIG_VIRTUAL_MEM_MAP
52 if (max_gap < LARGE_GAP) 63 if (max_gap < LARGE_GAP)
53 continue; 64 continue;
54 i = vmemmap_find_next_valid_pfn(0, i) - 1;
55#endif 65#endif
56 continue; 66 i = vmemmap_find_next_valid_pfn(pgdat->node_id,
67 i) - 1;
68 continue;
69 }
70 if (PageReserved(page))
71 reserved++;
72 else if (PageSwapCache(page))
73 cached++;
74 else if (page_count(page))
75 shared += page_count(page)-1;
57 } 76 }
58 total++; 77 pgdat_resize_unlock(pgdat, &flags);
59 if (PageReserved(mem_map+i)) 78 total_present += present;
60 reserved++; 79 total_reserved += reserved;
61 else if (PageSwapCache(mem_map+i)) 80 total_cached += cached;
62 cached++; 81 total_shared += shared;
63 else if (page_count(mem_map + i)) 82 printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, "
64 shared += page_count(mem_map + i) - 1; 83 "shrd: %10d, swpd: %10d\n", pgdat->node_id,
84 present, reserved, shared, cached);
65 } 85 }
66 printk(KERN_INFO "%d pages of RAM\n", total); 86 printk(KERN_INFO "%ld pages of RAM\n", total_present);
67 printk(KERN_INFO "%d reserved pages\n", reserved); 87 printk(KERN_INFO "%d reserved pages\n", total_reserved);
68 printk(KERN_INFO "%d pages shared\n", shared); 88 printk(KERN_INFO "%d pages shared\n", total_shared);
69 printk(KERN_INFO "%d pages swap cached\n", cached); 89 printk(KERN_INFO "%d pages swap cached\n", total_cached);
70 printk(KERN_INFO "%ld pages in page table cache\n", 90 printk(KERN_INFO "Total of %ld pages in page table cache\n",
71 pgtable_quicklist_total_size()); 91 pgtable_quicklist_total_size());
92 printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages());
72} 93}
73 94
95
74/* physical address where the bootmem map is located */ 96/* physical address where the bootmem map is located */
75unsigned long bootmap_start; 97unsigned long bootmap_start;
76 98
@@ -177,7 +199,7 @@ find_memory (void)
177 199
178#ifdef CONFIG_CRASH_DUMP 200#ifdef CONFIG_CRASH_DUMP
179 /* If we are doing a crash dump, we still need to know the real mem 201 /* If we are doing a crash dump, we still need to know the real mem
180 * size before original memory map is * reset. */ 202 * size before original memory map is reset. */
181 saved_max_pfn = max_pfn; 203 saved_max_pfn = max_pfn;
182#endif 204#endif
183} 205}
@@ -237,9 +259,11 @@ paging_init (void)
237 num_physpages = 0; 259 num_physpages = 0;
238 efi_memmap_walk(count_pages, &num_physpages); 260 efi_memmap_walk(count_pages, &num_physpages);
239 261
240 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
241 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 262 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
263#ifdef CONFIG_ZONE_DMA
264 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
242 max_zone_pfns[ZONE_DMA] = max_dma; 265 max_zone_pfns[ZONE_DMA] = max_dma;
266#endif
243 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 267 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
244 268
245#ifdef CONFIG_VIRTUAL_MEM_MAP 269#ifdef CONFIG_VIRTUAL_MEM_MAP
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 96722cb1b49d..16835108bb5b 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -37,7 +37,9 @@ struct early_node_data {
37 unsigned long pernode_size; 37 unsigned long pernode_size;
38 struct bootmem_data bootmem_data; 38 struct bootmem_data bootmem_data;
39 unsigned long num_physpages; 39 unsigned long num_physpages;
40#ifdef CONFIG_ZONE_DMA
40 unsigned long num_dma_physpages; 41 unsigned long num_dma_physpages;
42#endif
41 unsigned long min_pfn; 43 unsigned long min_pfn;
42 unsigned long max_pfn; 44 unsigned long max_pfn;
43}; 45};
@@ -412,37 +414,6 @@ static void __init memory_less_nodes(void)
412 return; 414 return;
413} 415}
414 416
415#ifdef CONFIG_SPARSEMEM
416/**
417 * register_sparse_mem - notify SPARSEMEM that this memory range exists.
418 * @start: physical start of range
419 * @end: physical end of range
420 * @arg: unused
421 *
422 * Simply calls SPARSEMEM to register memory section(s).
423 */
424static int __init register_sparse_mem(unsigned long start, unsigned long end,
425 void *arg)
426{
427 int nid;
428
429 start = __pa(start) >> PAGE_SHIFT;
430 end = __pa(end) >> PAGE_SHIFT;
431 nid = early_pfn_to_nid(start);
432 memory_present(nid, start, end);
433
434 return 0;
435}
436
437static void __init arch_sparse_init(void)
438{
439 efi_memmap_walk(register_sparse_mem, NULL);
440 sparse_init();
441}
442#else
443#define arch_sparse_init() do {} while (0)
444#endif
445
446/** 417/**
447 * find_memory - walk the EFI memory map and setup the bootmem allocator 418 * find_memory - walk the EFI memory map and setup the bootmem allocator
448 * 419 *
@@ -473,6 +444,9 @@ void __init find_memory(void)
473 node_clear(node, memory_less_mask); 444 node_clear(node, memory_less_mask);
474 mem_data[node].min_pfn = ~0UL; 445 mem_data[node].min_pfn = ~0UL;
475 } 446 }
447
448 efi_memmap_walk(register_active_ranges, NULL);
449
476 /* 450 /*
477 * Initialize the boot memory maps in reverse order since that's 451 * Initialize the boot memory maps in reverse order since that's
478 * what the bootmem allocator expects 452 * what the bootmem allocator expects
@@ -506,6 +480,12 @@ void __init find_memory(void)
506 max_pfn = max_low_pfn; 480 max_pfn = max_low_pfn;
507 481
508 find_initrd(); 482 find_initrd();
483
484#ifdef CONFIG_CRASH_DUMP
485 /* If we are doing a crash dump, we still need to know the real mem
486 * size before original memory map is reset. */
487 saved_max_pfn = max_pfn;
488#endif
509} 489}
510 490
511#ifdef CONFIG_SMP 491#ifdef CONFIG_SMP
@@ -654,11 +634,12 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n
654{ 634{
655 unsigned long end = start + len; 635 unsigned long end = start + len;
656 636
657 add_active_range(node, start >> PAGE_SHIFT, end >> PAGE_SHIFT);
658 mem_data[node].num_physpages += len >> PAGE_SHIFT; 637 mem_data[node].num_physpages += len >> PAGE_SHIFT;
638#ifdef CONFIG_ZONE_DMA
659 if (start <= __pa(MAX_DMA_ADDRESS)) 639 if (start <= __pa(MAX_DMA_ADDRESS))
660 mem_data[node].num_dma_physpages += 640 mem_data[node].num_dma_physpages +=
661 (min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT; 641 (min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT;
642#endif
662 start = GRANULEROUNDDOWN(start); 643 start = GRANULEROUNDDOWN(start);
663 start = ORDERROUNDDOWN(start); 644 start = ORDERROUNDDOWN(start);
664 end = GRANULEROUNDUP(end); 645 end = GRANULEROUNDUP(end);
@@ -686,10 +667,11 @@ void __init paging_init(void)
686 667
687 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; 668 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
688 669
689 arch_sparse_init();
690
691 efi_memmap_walk(filter_rsvd_memory, count_node_pages); 670 efi_memmap_walk(filter_rsvd_memory, count_node_pages);
692 671
672 sparse_memory_present_with_active_regions(MAX_NUMNODES);
673 sparse_init();
674
693#ifdef CONFIG_VIRTUAL_MEM_MAP 675#ifdef CONFIG_VIRTUAL_MEM_MAP
694 vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * 676 vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
695 sizeof(struct page)); 677 sizeof(struct page));
@@ -710,7 +692,9 @@ void __init paging_init(void)
710 } 692 }
711 693
712 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 694 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
695#ifdef CONFIG_ZONE_DMA
713 max_zone_pfns[ZONE_DMA] = max_dma; 696 max_zone_pfns[ZONE_DMA] = max_dma;
697#endif
714 max_zone_pfns[ZONE_NORMAL] = max_pfn; 698 max_zone_pfns[ZONE_NORMAL] = max_pfn;
715 free_area_init_nodes(max_zone_pfns); 699 free_area_init_nodes(max_zone_pfns);
716 700
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 1373fae7657f..f225dd72968b 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -19,6 +19,7 @@
19#include <linux/swap.h> 19#include <linux/swap.h>
20#include <linux/proc_fs.h> 20#include <linux/proc_fs.h>
21#include <linux/bitops.h> 21#include <linux/bitops.h>
22#include <linux/kexec.h>
22 23
23#include <asm/a.out.h> 24#include <asm/a.out.h>
24#include <asm/dma.h> 25#include <asm/dma.h>
@@ -67,7 +68,7 @@ max_pgt_pages(void)
67#ifndef CONFIG_NUMA 68#ifndef CONFIG_NUMA
68 node_free_pages = nr_free_pages(); 69 node_free_pages = nr_free_pages();
69#else 70#else
70 node_free_pages = nr_free_pages_pgdat(NODE_DATA(numa_node_id())); 71 node_free_pages = node_page_state(numa_node_id(), NR_FREE_PAGES);
71#endif 72#endif
72 max_pgt_pages = node_free_pages / PGT_FRACTION_OF_NODE_MEM; 73 max_pgt_pages = node_free_pages / PGT_FRACTION_OF_NODE_MEM;
73 max_pgt_pages = max(max_pgt_pages, MIN_PGT_PAGES); 74 max_pgt_pages = max(max_pgt_pages, MIN_PGT_PAGES);
@@ -128,6 +129,25 @@ lazy_mmu_prot_update (pte_t pte)
128 set_bit(PG_arch_1, &page->flags); /* mark page as clean */ 129 set_bit(PG_arch_1, &page->flags); /* mark page as clean */
129} 130}
130 131
132/*
133 * Since DMA is i-cache coherent, any (complete) pages that were written via
134 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
135 * flush them when they get mapped into an executable vm-area.
136 */
137void
138dma_mark_clean(void *addr, size_t size)
139{
140 unsigned long pg_addr, end;
141
142 pg_addr = PAGE_ALIGN((unsigned long) addr);
143 end = (unsigned long) addr + size;
144 while (pg_addr + PAGE_SIZE <= end) {
145 struct page *page = virt_to_page(pg_addr);
146 set_bit(PG_arch_1, &page->flags);
147 pg_addr += PAGE_SIZE;
148 }
149}
150
131inline void 151inline void
132ia64_set_rbs_bot (void) 152ia64_set_rbs_bot (void)
133{ 153{
@@ -156,9 +176,8 @@ ia64_init_addr_space (void)
156 * the problem. When the process attempts to write to the register backing store 176 * the problem. When the process attempts to write to the register backing store
157 * for the first time, it will get a SEGFAULT in this case. 177 * for the first time, it will get a SEGFAULT in this case.
158 */ 178 */
159 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 179 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
160 if (vma) { 180 if (vma) {
161 memset(vma, 0, sizeof(*vma));
162 vma->vm_mm = current->mm; 181 vma->vm_mm = current->mm;
163 vma->vm_start = current->thread.rbs_bot & PAGE_MASK; 182 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
164 vma->vm_end = vma->vm_start + PAGE_SIZE; 183 vma->vm_end = vma->vm_start + PAGE_SIZE;
@@ -175,9 +194,8 @@ ia64_init_addr_space (void)
175 194
176 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ 195 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
177 if (!(current->personality & MMAP_PAGE_ZERO)) { 196 if (!(current->personality & MMAP_PAGE_ZERO)) {
178 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 197 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
179 if (vma) { 198 if (vma) {
180 memset(vma, 0, sizeof(*vma));
181 vma->vm_mm = current->mm; 199 vma->vm_mm = current->mm;
182 vma->vm_end = PAGE_SIZE; 200 vma->vm_end = PAGE_SIZE;
183 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); 201 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
@@ -595,13 +613,27 @@ find_largest_hole (u64 start, u64 end, void *arg)
595 return 0; 613 return 0;
596} 614}
597 615
616#endif /* CONFIG_VIRTUAL_MEM_MAP */
617
598int __init 618int __init
599register_active_ranges(u64 start, u64 end, void *arg) 619register_active_ranges(u64 start, u64 end, void *arg)
600{ 620{
601 add_active_range(0, __pa(start) >> PAGE_SHIFT, __pa(end) >> PAGE_SHIFT); 621 int nid = paddr_to_nid(__pa(start));
622
623 if (nid < 0)
624 nid = 0;
625#ifdef CONFIG_KEXEC
626 if (start > crashk_res.start && start < crashk_res.end)
627 start = crashk_res.end;
628 if (end > crashk_res.start && end < crashk_res.end)
629 end = crashk_res.start;
630#endif
631
632 if (start < end)
633 add_active_range(nid, __pa(start) >> PAGE_SHIFT,
634 __pa(end) >> PAGE_SHIFT);
602 return 0; 635 return 0;
603} 636}
604#endif /* CONFIG_VIRTUAL_MEM_MAP */
605 637
606static int __init 638static int __init
607count_reserved_pages (u64 start, u64 end, void *arg) 639count_reserved_pages (u64 start, u64 end, void *arg)
diff --git a/arch/ia64/sn/kernel/huberror.c b/arch/ia64/sn/kernel/huberror.c
index abca6bd7962f..fcf7f93c4b61 100644
--- a/arch/ia64/sn/kernel/huberror.c
+++ b/arch/ia64/sn/kernel/huberror.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1992 - 1997, 2000,2002-2005 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 1992 - 1997, 2000,2002-2007 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8 8
9#include <linux/types.h> 9#include <linux/types.h>
@@ -38,12 +38,20 @@ static irqreturn_t hub_eint_handler(int irq, void *arg)
38 (u64) nasid, 0, 0, 0, 0, 0, 0); 38 (u64) nasid, 0, 0, 0, 0, 0, 0);
39 39
40 if ((int)ret_stuff.v0) 40 if ((int)ret_stuff.v0)
41 panic("hubii_eint_handler(): Fatal TIO Error"); 41 panic("%s: Fatal %s Error", __FUNCTION__,
42 ((nasid & 1) ? "TIO" : "HUBII"));
42 43
43 if (!(nasid & 1)) /* Not a TIO, handle CRB errors */ 44 if (!(nasid & 1)) /* Not a TIO, handle CRB errors */
44 (void)hubiio_crb_error_handler(hubdev_info); 45 (void)hubiio_crb_error_handler(hubdev_info);
45 } else 46 } else
46 bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid))); 47 if (nasid & 1) { /* TIO errors */
48 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT,
49 (u64) nasid, 0, 0, 0, 0, 0, 0);
50
51 if ((int)ret_stuff.v0)
52 panic("%s: Fatal TIO Error", __FUNCTION__);
53 } else
54 bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid)));
47 55
48 return IRQ_HANDLED; 56 return IRQ_HANDLED;
49} 57}
diff --git a/arch/ia64/sn/kernel/io_acpi_init.c b/arch/ia64/sn/kernel/io_acpi_init.c
index cb96b4ea7df6..8c331ca6e5c9 100644
--- a/arch/ia64/sn/kernel/io_acpi_init.c
+++ b/arch/ia64/sn/kernel/io_acpi_init.c
@@ -13,6 +13,7 @@
13#include <asm/sn/sn_sal.h> 13#include <asm/sn/sn_sal.h>
14#include "xtalk/hubdev.h" 14#include "xtalk/hubdev.h"
15#include <linux/acpi.h> 15#include <linux/acpi.h>
16#include <acpi/acnamesp.h>
16 17
17 18
18/* 19/*
@@ -31,6 +32,12 @@ struct acpi_vendor_uuid sn_uuid = {
31 0xa2, 0x7c, 0x08, 0x00, 0x69, 0x13, 0xea, 0x51 }, 32 0xa2, 0x7c, 0x08, 0x00, 0x69, 0x13, 0xea, 0x51 },
32}; 33};
33 34
35struct sn_pcidev_match {
36 u8 bus;
37 unsigned int devfn;
38 acpi_handle handle;
39};
40
34/* 41/*
35 * Perform the early IO init in PROM. 42 * Perform the early IO init in PROM.
36 */ 43 */
@@ -119,9 +126,11 @@ sn_get_bussoft_ptr(struct pci_bus *bus)
119 status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS, 126 status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS,
120 &sn_uuid, &buffer); 127 &sn_uuid, &buffer);
121 if (ACPI_FAILURE(status)) { 128 if (ACPI_FAILURE(status)) {
122 printk(KERN_ERR "get_acpi_pcibus_ptr: " 129 printk(KERN_ERR "%s: "
123 "get_acpi_bussoft_info() failed: %d\n", 130 "acpi_get_vendor_resource() failed (0x%x) for: ",
124 status); 131 __FUNCTION__, status);
132 acpi_ns_print_node_pathname(handle, NULL);
133 printk("\n");
125 return NULL; 134 return NULL;
126 } 135 }
127 resource = buffer.pointer; 136 resource = buffer.pointer;
@@ -130,8 +139,8 @@ sn_get_bussoft_ptr(struct pci_bus *bus)
130 if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) != 139 if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) !=
131 sizeof(struct pcibus_bussoft *)) { 140 sizeof(struct pcibus_bussoft *)) {
132 printk(KERN_ERR 141 printk(KERN_ERR
133 "get_acpi_bussoft_ptr: Invalid vendor data " 142 "%s: Invalid vendor data length %d\n",
134 "length %d\n", vendor->byte_length); 143 __FUNCTION__, vendor->byte_length);
135 kfree(buffer.pointer); 144 kfree(buffer.pointer);
136 return NULL; 145 return NULL;
137 } 146 }
@@ -143,34 +152,254 @@ sn_get_bussoft_ptr(struct pci_bus *bus)
143} 152}
144 153
145/* 154/*
146 * sn_acpi_bus_fixup 155 * sn_extract_device_info - Extract the pcidev_info and the sn_irq_info
156 * pointers from the vendor resource using the
157 * provided acpi handle, and copy the structures
158 * into the argument buffers.
147 */ 159 */
148void 160static int
149sn_acpi_bus_fixup(struct pci_bus *bus) 161sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info,
162 struct sn_irq_info **sn_irq_info)
150{ 163{
151 struct pci_dev *pci_dev = NULL; 164 u64 addr;
152 struct pcibus_bussoft *prom_bussoft_ptr; 165 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
153 extern void sn_common_bus_fixup(struct pci_bus *, 166 struct sn_irq_info *irq_info, *irq_info_prom;
154 struct pcibus_bussoft *); 167 struct pcidev_info *pcidev_ptr, *pcidev_prom_ptr;
168 struct acpi_resource *resource;
169 int ret = 0;
170 acpi_status status;
171 struct acpi_resource_vendor_typed *vendor;
155 172
156 if (!bus->parent) { /* If root bus */ 173 /*
157 prom_bussoft_ptr = sn_get_bussoft_ptr(bus); 174 * The pointer to this device's pcidev_info structure in
158 if (prom_bussoft_ptr == NULL) { 175 * the PROM, is in the vendor resource.
176 */
177 status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS,
178 &sn_uuid, &buffer);
179 if (ACPI_FAILURE(status)) {
180 printk(KERN_ERR
181 "%s: acpi_get_vendor_resource() failed (0x%x) for: ",
182 __FUNCTION__, status);
183 acpi_ns_print_node_pathname(handle, NULL);
184 printk("\n");
185 return 1;
186 }
187
188 resource = buffer.pointer;
189 vendor = &resource->data.vendor_typed;
190 if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) !=
191 sizeof(struct pci_devdev_info *)) {
192 printk(KERN_ERR
193 "%s: Invalid vendor data length: %d for: ",
194 __FUNCTION__, vendor->byte_length);
195 acpi_ns_print_node_pathname(handle, NULL);
196 printk("\n");
197 ret = 1;
198 goto exit;
199 }
200
201 pcidev_ptr = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
202 if (!pcidev_ptr)
203 panic("%s: Unable to alloc memory for pcidev_info", __FUNCTION__);
204
205 memcpy(&addr, vendor->byte_data, sizeof(struct pcidev_info *));
206 pcidev_prom_ptr = __va(addr);
207 memcpy(pcidev_ptr, pcidev_prom_ptr, sizeof(struct pcidev_info));
208
209 /* Get the IRQ info */
210 irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
211 if (!irq_info)
212 panic("%s: Unable to alloc memory for sn_irq_info", __FUNCTION__);
213
214 if (pcidev_ptr->pdi_sn_irq_info) {
215 irq_info_prom = __va(pcidev_ptr->pdi_sn_irq_info);
216 memcpy(irq_info, irq_info_prom, sizeof(struct sn_irq_info));
217 }
218
219 *pcidev_info = pcidev_ptr;
220 *sn_irq_info = irq_info;
221
222exit:
223 kfree(buffer.pointer);
224 return ret;
225}
226
227static unsigned int
228get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle)
229{
230 unsigned long adr;
231 acpi_handle child;
232 unsigned int devfn;
233 int function;
234 acpi_handle parent;
235 int slot;
236 acpi_status status;
237
238 /*
239 * Do an upward search to find the root bus device, and
240 * obtain the host devfn from the previous child device.
241 */
242 child = device_handle;
243 while (child) {
244 status = acpi_get_parent(child, &parent);
245 if (ACPI_FAILURE(status)) {
246 printk(KERN_ERR "%s: acpi_get_parent() failed "
247 "(0x%x) for: ", __FUNCTION__, status);
248 acpi_ns_print_node_pathname(child, NULL);
249 printk("\n");
250 panic("%s: Unable to find host devfn\n", __FUNCTION__);
251 }
252 if (parent == rootbus_handle)
253 break;
254 child = parent;
255 }
256 if (!child) {
257 printk(KERN_ERR "%s: Unable to find root bus for: ",
258 __FUNCTION__);
259 acpi_ns_print_node_pathname(device_handle, NULL);
260 printk("\n");
261 BUG();
262 }
263
264 status = acpi_evaluate_integer(child, METHOD_NAME__ADR, NULL, &adr);
265 if (ACPI_FAILURE(status)) {
266 printk(KERN_ERR "%s: Unable to get _ADR (0x%x) for: ",
267 __FUNCTION__, status);
268 acpi_ns_print_node_pathname(child, NULL);
269 printk("\n");
270 panic("%s: Unable to find host devfn\n", __FUNCTION__);
271 }
272
273 slot = (adr >> 16) & 0xffff;
274 function = adr & 0xffff;
275 devfn = PCI_DEVFN(slot, function);
276 return devfn;
277}
278
279/*
280 * find_matching_device - Callback routine to find the ACPI device
281 * that matches up with our pci_dev device.
282 * Matching is done on bus number and devfn.
283 * To find the bus number for a particular
284 * ACPI device, we must look at the _BBN method
285 * of its parent.
286 */
287static acpi_status
288find_matching_device(acpi_handle handle, u32 lvl, void *context, void **rv)
289{
290 unsigned long bbn = -1;
291 unsigned long adr;
292 acpi_handle parent = NULL;
293 acpi_status status;
294 unsigned int devfn;
295 int function;
296 int slot;
297 struct sn_pcidev_match *info = context;
298
299 status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
300 &adr);
301 if (ACPI_SUCCESS(status)) {
302 status = acpi_get_parent(handle, &parent);
303 if (ACPI_FAILURE(status)) {
159 printk(KERN_ERR 304 printk(KERN_ERR
160 "sn_pci_fixup_bus: 0x%04x:0x%02x Unable to " 305 "%s: acpi_get_parent() failed (0x%x) for: ",
161 "obtain prom_bussoft_ptr\n", 306 __FUNCTION__, status);
162 pci_domain_nr(bus), bus->number); 307 acpi_ns_print_node_pathname(handle, NULL);
163 return; 308 printk("\n");
309 return AE_OK;
310 }
311 status = acpi_evaluate_integer(parent, METHOD_NAME__BBN,
312 NULL, &bbn);
313 if (ACPI_FAILURE(status)) {
314 printk(KERN_ERR
315 "%s: Failed to find _BBN in parent of: ",
316 __FUNCTION__);
317 acpi_ns_print_node_pathname(handle, NULL);
318 printk("\n");
319 return AE_OK;
320 }
321
322 slot = (adr >> 16) & 0xffff;
323 function = adr & 0xffff;
324 devfn = PCI_DEVFN(slot, function);
325 if ((info->devfn == devfn) && (info->bus == bbn)) {
326 /* We have a match! */
327 info->handle = handle;
328 return 1;
164 } 329 }
165 sn_common_bus_fixup(bus, prom_bussoft_ptr);
166 } 330 }
167 list_for_each_entry(pci_dev, &bus->devices, bus_list) { 331 return AE_OK;
168 sn_pci_fixup_slot(pci_dev); 332}
333
334/*
335 * sn_acpi_get_pcidev_info - Search ACPI namespace for the acpi
336 * device matching the specified pci_dev,
337 * and return the pcidev info and irq info.
338 */
339int
340sn_acpi_get_pcidev_info(struct pci_dev *dev, struct pcidev_info **pcidev_info,
341 struct sn_irq_info **sn_irq_info)
342{
343 unsigned int host_devfn;
344 struct sn_pcidev_match pcidev_match;
345 acpi_handle rootbus_handle;
346 unsigned long segment;
347 acpi_status status;
348
349 rootbus_handle = PCI_CONTROLLER(dev)->acpi_handle;
350 status = acpi_evaluate_integer(rootbus_handle, METHOD_NAME__SEG, NULL,
351 &segment);
352 if (ACPI_SUCCESS(status)) {
353 if (segment != pci_domain_nr(dev)) {
354 printk(KERN_ERR
355 "%s: Segment number mismatch, 0x%lx vs 0x%x for: ",
356 __FUNCTION__, segment, pci_domain_nr(dev));
357 acpi_ns_print_node_pathname(rootbus_handle, NULL);
358 printk("\n");
359 return 1;
360 }
361 } else {
362 printk(KERN_ERR "%s: Unable to get __SEG from: ",
363 __FUNCTION__);
364 acpi_ns_print_node_pathname(rootbus_handle, NULL);
365 printk("\n");
366 return 1;
367 }
368
369 /*
370 * We want to search all devices in this segment/domain
371 * of the ACPI namespace for the matching ACPI device,
372 * which holds the pcidev_info pointer in its vendor resource.
373 */
374 pcidev_match.bus = dev->bus->number;
375 pcidev_match.devfn = dev->devfn;
376 pcidev_match.handle = NULL;
377
378 acpi_walk_namespace(ACPI_TYPE_DEVICE, rootbus_handle, ACPI_UINT32_MAX,
379 find_matching_device, &pcidev_match, NULL);
380
381 if (!pcidev_match.handle) {
382 printk(KERN_ERR
383 "%s: Could not find matching ACPI device for %s.\n",
384 __FUNCTION__, pci_name(dev));
385 return 1;
169 } 386 }
387
388 if (sn_extract_device_info(pcidev_match.handle, pcidev_info, sn_irq_info))
389 return 1;
390
391 /* Build up the pcidev_info.pdi_slot_host_handle */
392 host_devfn = get_host_devfn(pcidev_match.handle, rootbus_handle);
393 (*pcidev_info)->pdi_slot_host_handle =
394 ((unsigned long) pci_domain_nr(dev) << 40) |
395 /* bus == 0 */
396 host_devfn;
397 return 0;
170} 398}
171 399
172/* 400/*
173 * sn_acpi_slot_fixup - Perform any SN specific slot fixup. 401 * sn_acpi_slot_fixup - Obtain the pcidev_info and sn_irq_info.
402 * Perform any SN specific slot fixup.
174 * At present there does not appear to be 403 * At present there does not appear to be
175 * any generic way to handle a ROM image 404 * any generic way to handle a ROM image
176 * that has been shadowed by the PROM, so 405 * that has been shadowed by the PROM, so
@@ -179,11 +408,18 @@ sn_acpi_bus_fixup(struct pci_bus *bus)
179 */ 408 */
180 409
181void 410void
182sn_acpi_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info) 411sn_acpi_slot_fixup(struct pci_dev *dev)
183{ 412{
184 void __iomem *addr; 413 void __iomem *addr;
414 struct pcidev_info *pcidev_info = NULL;
415 struct sn_irq_info *sn_irq_info = NULL;
185 size_t size; 416 size_t size;
186 417
418 if (sn_acpi_get_pcidev_info(dev, &pcidev_info, &sn_irq_info)) {
419 panic("%s: Failure obtaining pcidev_info for %s\n",
420 __FUNCTION__, pci_name(dev));
421 }
422
187 if (pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE]) { 423 if (pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE]) {
188 /* 424 /*
189 * A valid ROM image exists and has been shadowed by the 425 * A valid ROM image exists and has been shadowed by the
@@ -200,8 +436,11 @@ sn_acpi_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info)
200 (unsigned long) addr + size; 436 (unsigned long) addr + size;
201 dev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_BIOS_COPY; 437 dev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_BIOS_COPY;
202 } 438 }
439 sn_pci_fixup_slot(dev, pcidev_info, sn_irq_info);
203} 440}
204 441
442EXPORT_SYMBOL(sn_acpi_slot_fixup);
443
205static struct acpi_driver acpi_sn_hubdev_driver = { 444static struct acpi_driver acpi_sn_hubdev_driver = {
206 .name = "SGI HUBDEV Driver", 445 .name = "SGI HUBDEV Driver",
207 .ids = "SGIHUB,SGITIO", 446 .ids = "SGIHUB,SGITIO",
@@ -212,6 +451,33 @@ static struct acpi_driver acpi_sn_hubdev_driver = {
212 451
213 452
214/* 453/*
454 * sn_acpi_bus_fixup - Perform SN specific setup of software structs
455 * (pcibus_bussoft, pcidev_info) and hardware
456 * registers, for the specified bus and devices under it.
457 */
458void
459sn_acpi_bus_fixup(struct pci_bus *bus)
460{
461 struct pci_dev *pci_dev = NULL;
462 struct pcibus_bussoft *prom_bussoft_ptr;
463
464 if (!bus->parent) { /* If root bus */
465 prom_bussoft_ptr = sn_get_bussoft_ptr(bus);
466 if (prom_bussoft_ptr == NULL) {
467 printk(KERN_ERR
468 "%s: 0x%04x:0x%02x Unable to "
469 "obtain prom_bussoft_ptr\n",
470 __FUNCTION__, pci_domain_nr(bus), bus->number);
471 return;
472 }
473 sn_common_bus_fixup(bus, prom_bussoft_ptr);
474 }
475 list_for_each_entry(pci_dev, &bus->devices, bus_list) {
476 sn_acpi_slot_fixup(pci_dev);
477 }
478}
479
480/*
215 * sn_io_acpi_init - PROM has ACPI support for IO, defining at a minimum the 481 * sn_io_acpi_init - PROM has ACPI support for IO, defining at a minimum the
216 * nodes and root buses in the DSDT. As a result, bus scanning 482 * nodes and root buses in the DSDT. As a result, bus scanning
217 * will be initiated by the Linux ACPI code. 483 * will be initiated by the Linux ACPI code.
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c
index d4dd8f4b6b8d..d48bcd83253c 100644
--- a/arch/ia64/sn/kernel/io_common.c
+++ b/arch/ia64/sn/kernel/io_common.c
@@ -26,14 +26,10 @@
26#include <linux/acpi.h> 26#include <linux/acpi.h>
27#include <asm/sn/sn2/sn_hwperf.h> 27#include <asm/sn/sn2/sn_hwperf.h>
28#include <asm/sn/acpi.h> 28#include <asm/sn/acpi.h>
29#include "acpi/acglobal.h"
29 30
30extern void sn_init_cpei_timer(void); 31extern void sn_init_cpei_timer(void);
31extern void register_sn_procfs(void); 32extern void register_sn_procfs(void);
32extern void sn_acpi_bus_fixup(struct pci_bus *);
33extern void sn_bus_fixup(struct pci_bus *);
34extern void sn_acpi_slot_fixup(struct pci_dev *, struct pcidev_info *);
35extern void sn_more_slot_fixup(struct pci_dev *, struct pcidev_info *);
36extern void sn_legacy_pci_window_fixup(struct pci_controller *, u64, u64);
37extern void sn_io_acpi_init(void); 33extern void sn_io_acpi_init(void);
38extern void sn_io_init(void); 34extern void sn_io_init(void);
39 35
@@ -48,6 +44,9 @@ struct sysdata_el {
48 44
49int sn_ioif_inited; /* SN I/O infrastructure initialized? */ 45int sn_ioif_inited; /* SN I/O infrastructure initialized? */
50 46
47int sn_acpi_rev; /* SN ACPI revision */
48EXPORT_SYMBOL_GPL(sn_acpi_rev);
49
51struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */ 50struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */
52 51
53/* 52/*
@@ -99,25 +98,6 @@ sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
99} 98}
100 99
101/* 100/*
102 * Retrieve the pci device information given the bus and device|function number.
103 */
104static inline u64
105sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
106 u64 sn_irq_info)
107{
108 struct ia64_sal_retval ret_stuff;
109 ret_stuff.status = 0;
110 ret_stuff.v0 = 0;
111
112 SAL_CALL_NOLOCK(ret_stuff,
113 (u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
114 (u64) segment, (u64) bus_number, (u64) devfn,
115 (u64) pci_dev,
116 sn_irq_info, 0, 0);
117 return ret_stuff.v0;
118}
119
120/*
121 * sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified 101 * sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified
122 * device. 102 * device.
123 */ 103 */
@@ -249,50 +229,25 @@ void sn_pci_unfixup_slot(struct pci_dev *dev)
249} 229}
250 230
251/* 231/*
252 * sn_pci_fixup_slot() - This routine sets up a slot's resources consistent 232 * sn_pci_fixup_slot()
253 * with the Linux PCI abstraction layer. Resources
254 * acquired from our PCI provider include PIO maps
255 * to BAR space and interrupt objects.
256 */ 233 */
257void sn_pci_fixup_slot(struct pci_dev *dev) 234void sn_pci_fixup_slot(struct pci_dev *dev, struct pcidev_info *pcidev_info,
235 struct sn_irq_info *sn_irq_info)
258{ 236{
259 int segment = pci_domain_nr(dev->bus); 237 int segment = pci_domain_nr(dev->bus);
260 int status = 0;
261 struct pcibus_bussoft *bs; 238 struct pcibus_bussoft *bs;
262 struct pci_bus *host_pci_bus; 239 struct pci_bus *host_pci_bus;
263 struct pci_dev *host_pci_dev; 240 struct pci_dev *host_pci_dev;
264 struct pcidev_info *pcidev_info; 241 unsigned int bus_no, devfn;
265 struct sn_irq_info *sn_irq_info;
266 unsigned int bus_no, devfn;
267 242
268 pci_dev_get(dev); /* for the sysdata pointer */ 243 pci_dev_get(dev); /* for the sysdata pointer */
269 pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
270 if (!pcidev_info)
271 BUG(); /* Cannot afford to run out of memory */
272
273 sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
274 if (!sn_irq_info)
275 BUG(); /* Cannot afford to run out of memory */
276
277 /* Call to retrieve pci device information needed by kernel. */
278 status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number,
279 dev->devfn,
280 (u64) __pa(pcidev_info),
281 (u64) __pa(sn_irq_info));
282 if (status)
283 BUG(); /* Cannot get platform pci device information */
284 244
285 /* Add pcidev_info to list in pci_controller.platform_data */ 245 /* Add pcidev_info to list in pci_controller.platform_data */
286 list_add_tail(&pcidev_info->pdi_list, 246 list_add_tail(&pcidev_info->pdi_list,
287 &(SN_PLATFORM_DATA(dev->bus)->pcidev_info)); 247 &(SN_PLATFORM_DATA(dev->bus)->pcidev_info));
288
289 if (SN_ACPI_BASE_SUPPORT())
290 sn_acpi_slot_fixup(dev, pcidev_info);
291 else
292 sn_more_slot_fixup(dev, pcidev_info);
293 /* 248 /*
294 * Using the PROMs values for the PCI host bus, get the Linux 249 * Using the PROMs values for the PCI host bus, get the Linux
295 * PCI host_pci_dev struct and set up host bus linkages 250 * PCI host_pci_dev struct and set up host bus linkages
296 */ 251 */
297 252
298 bus_no = (pcidev_info->pdi_slot_host_handle >> 32) & 0xff; 253 bus_no = (pcidev_info->pdi_slot_host_handle >> 32) & 0xff;
@@ -489,11 +444,6 @@ void sn_generate_path(struct pci_bus *pci_bus, char *address)
489 sprintf(address, "%s^%d", address, geo_slot(geoid)); 444 sprintf(address, "%s^%d", address, geo_slot(geoid));
490} 445}
491 446
492/*
493 * sn_pci_fixup_bus() - Perform SN specific setup of software structs
494 * (pcibus_bussoft, pcidev_info) and hardware
495 * registers, for the specified bus and devices under it.
496 */
497void __devinit 447void __devinit
498sn_pci_fixup_bus(struct pci_bus *bus) 448sn_pci_fixup_bus(struct pci_bus *bus)
499{ 449{
@@ -519,6 +469,15 @@ sn_io_early_init(void)
519 if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM()) 469 if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
520 return 0; 470 return 0;
521 471
472 /* we set the acpi revision to that of the DSDT table OEM rev. */
473 {
474 struct acpi_table_header *header = NULL;
475
476 acpi_get_table_by_index(ACPI_TABLE_INDEX_DSDT, &header);
477 BUG_ON(header == NULL);
478 sn_acpi_rev = header->oem_revision;
479 }
480
522 /* 481 /*
523 * prime sn_pci_provider[]. Individial provider init routines will 482 * prime sn_pci_provider[]. Individial provider init routines will
524 * override their respective default entries. 483 * override their respective default entries.
@@ -544,8 +503,12 @@ sn_io_early_init(void)
544 register_sn_procfs(); 503 register_sn_procfs();
545#endif 504#endif
546 505
547 printk(KERN_INFO "ACPI DSDT OEM Rev 0x%x\n", 506 {
548 acpi_gbl_DSDT->oem_revision); 507 struct acpi_table_header *header;
508 (void)acpi_get_table_by_index(ACPI_TABLE_INDEX_DSDT, &header);
509 printk(KERN_INFO "ACPI DSDT OEM Rev 0x%x\n",
510 header->oem_revision);
511 }
549 if (SN_ACPI_BASE_SUPPORT()) 512 if (SN_ACPI_BASE_SUPPORT())
550 sn_io_acpi_init(); 513 sn_io_acpi_init();
551 else 514 else
@@ -605,7 +568,6 @@ sn_io_late_init(void)
605 568
606fs_initcall(sn_io_late_init); 569fs_initcall(sn_io_late_init);
607 570
608EXPORT_SYMBOL(sn_pci_fixup_slot);
609EXPORT_SYMBOL(sn_pci_unfixup_slot); 571EXPORT_SYMBOL(sn_pci_unfixup_slot);
610EXPORT_SYMBOL(sn_bus_store_sysdata); 572EXPORT_SYMBOL(sn_bus_store_sysdata);
611EXPORT_SYMBOL(sn_bus_free_sysdata); 573EXPORT_SYMBOL(sn_bus_free_sysdata);
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index 9ad843e0383b..600be3ebae05 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -56,6 +56,25 @@ static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
56 return ret_stuff.v0; 56 return ret_stuff.v0;
57} 57}
58 58
59/*
60 * Retrieve the pci device information given the bus and device|function number.
61 */
62static inline u64
63sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
64 u64 sn_irq_info)
65{
66 struct ia64_sal_retval ret_stuff;
67 ret_stuff.status = 0;
68 ret_stuff.v0 = 0;
69
70 SAL_CALL_NOLOCK(ret_stuff,
71 (u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
72 (u64) segment, (u64) bus_number, (u64) devfn,
73 (u64) pci_dev,
74 sn_irq_info, 0, 0);
75 return ret_stuff.v0;
76}
77
59 78
60/* 79/*
61 * sn_fixup_ionodes() - This routine initializes the HUB data structure for 80 * sn_fixup_ionodes() - This routine initializes the HUB data structure for
@@ -172,18 +191,40 @@ sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
172} 191}
173 192
174/* 193/*
175 * sn_more_slot_fixup() - We are not running with an ACPI capable PROM, 194 * sn_io_slot_fixup() - We are not running with an ACPI capable PROM,
176 * and need to convert the pci_dev->resource 195 * and need to convert the pci_dev->resource
177 * 'start' and 'end' addresses to mapped addresses, 196 * 'start' and 'end' addresses to mapped addresses,
178 * and setup the pci_controller->window array entries. 197 * and setup the pci_controller->window array entries.
179 */ 198 */
180void 199void
181sn_more_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info) 200sn_io_slot_fixup(struct pci_dev *dev)
182{ 201{
183 unsigned int count = 0; 202 unsigned int count = 0;
184 int idx; 203 int idx;
185 s64 pci_addrs[PCI_ROM_RESOURCE + 1]; 204 s64 pci_addrs[PCI_ROM_RESOURCE + 1];
186 unsigned long addr, end, size, start; 205 unsigned long addr, end, size, start;
206 struct pcidev_info *pcidev_info;
207 struct sn_irq_info *sn_irq_info;
208 int status;
209
210 pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
211 if (!pcidev_info)
212 panic("%s: Unable to alloc memory for pcidev_info", __FUNCTION__);
213
214 sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
215 if (!sn_irq_info)
216 panic("%s: Unable to alloc memory for sn_irq_info", __FUNCTION__);
217
218 /* Call to retrieve pci device information needed by kernel. */
219 status = sal_get_pcidev_info((u64) pci_domain_nr(dev),
220 (u64) dev->bus->number,
221 dev->devfn,
222 (u64) __pa(pcidev_info),
223 (u64) __pa(sn_irq_info));
224
225 if (status)
226 BUG(); /* Cannot get platform pci device information */
227
187 228
188 /* Copy over PIO Mapped Addresses */ 229 /* Copy over PIO Mapped Addresses */
189 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) { 230 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
@@ -219,8 +260,12 @@ sn_more_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info)
219 */ 260 */
220 if (count > 0) 261 if (count > 0)
221 sn_pci_window_fixup(dev, count, pci_addrs); 262 sn_pci_window_fixup(dev, count, pci_addrs);
263
264 sn_pci_fixup_slot(dev, pcidev_info, sn_irq_info);
222} 265}
223 266
267EXPORT_SYMBOL(sn_io_slot_fixup);
268
224/* 269/*
225 * sn_pci_controller_fixup() - This routine sets up a bus's resources 270 * sn_pci_controller_fixup() - This routine sets up a bus's resources
226 * consistent with the Linux PCI abstraction layer. 271 * consistent with the Linux PCI abstraction layer.
@@ -272,9 +317,6 @@ sn_bus_fixup(struct pci_bus *bus)
272{ 317{
273 struct pci_dev *pci_dev = NULL; 318 struct pci_dev *pci_dev = NULL;
274 struct pcibus_bussoft *prom_bussoft_ptr; 319 struct pcibus_bussoft *prom_bussoft_ptr;
275 extern void sn_common_bus_fixup(struct pci_bus *,
276 struct pcibus_bussoft *);
277
278 320
279 if (!bus->parent) { /* If root bus */ 321 if (!bus->parent) { /* If root bus */
280 prom_bussoft_ptr = PCI_CONTROLLER(bus)->platform_data; 322 prom_bussoft_ptr = PCI_CONTROLLER(bus)->platform_data;
@@ -291,7 +333,7 @@ sn_bus_fixup(struct pci_bus *bus)
291 prom_bussoft_ptr->bs_legacy_mem); 333 prom_bussoft_ptr->bs_legacy_mem);
292 } 334 }
293 list_for_each_entry(pci_dev, &bus->devices, bus_list) { 335 list_for_each_entry(pci_dev, &bus->devices, bus_list) {
294 sn_pci_fixup_slot(pci_dev); 336 sn_io_slot_fixup(pci_dev);
295 } 337 }
296 338
297} 339}
diff --git a/arch/ia64/sn/kernel/iomv.c b/arch/ia64/sn/kernel/iomv.c
index 4aa4f301d56d..ab7e2fd40798 100644
--- a/arch/ia64/sn/kernel/iomv.c
+++ b/arch/ia64/sn/kernel/iomv.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
@@ -26,9 +26,10 @@
26 * @port: port to convert 26 * @port: port to convert
27 * 27 *
28 * Legacy in/out instructions are converted to ld/st instructions 28 * Legacy in/out instructions are converted to ld/st instructions
29 * on IA64. This routine will convert a port number into a valid 29 * on IA64. This routine will convert a port number into a valid
30 * SN i/o address. Used by sn_in*() and sn_out*(). 30 * SN i/o address. Used by sn_in*() and sn_out*().
31 */ 31 */
32
32void *sn_io_addr(unsigned long port) 33void *sn_io_addr(unsigned long port)
33{ 34{
34 if (!IS_RUNNING_ON_SIMULATOR()) { 35 if (!IS_RUNNING_ON_SIMULATOR()) {
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c
index b3a435fd70fb..ea3dc38d73fd 100644
--- a/arch/ia64/sn/kernel/msi_sn.c
+++ b/arch/ia64/sn/kernel/msi_sn.c
@@ -59,13 +59,12 @@ void sn_teardown_msi_irq(unsigned int irq)
59 sn_intr_free(nasid, widget, sn_irq_info); 59 sn_intr_free(nasid, widget, sn_irq_info);
60 sn_msi_info[irq].sn_irq_info = NULL; 60 sn_msi_info[irq].sn_irq_info = NULL;
61 61
62 return; 62 destroy_irq(irq);
63} 63}
64 64
65int sn_setup_msi_irq(unsigned int irq, struct pci_dev *pdev) 65int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry)
66{ 66{
67 struct msi_msg msg; 67 struct msi_msg msg;
68 struct msi_desc *entry;
69 int widget; 68 int widget;
70 int status; 69 int status;
71 nasid_t nasid; 70 nasid_t nasid;
@@ -73,8 +72,8 @@ int sn_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
73 struct sn_irq_info *sn_irq_info; 72 struct sn_irq_info *sn_irq_info;
74 struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(pdev); 73 struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(pdev);
75 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 74 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
75 int irq;
76 76
77 entry = get_irq_data(irq);
78 if (!entry->msi_attrib.is_64) 77 if (!entry->msi_attrib.is_64)
79 return -EINVAL; 78 return -EINVAL;
80 79
@@ -84,6 +83,11 @@ int sn_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
84 if (provider == NULL || provider->dma_map_consistent == NULL) 83 if (provider == NULL || provider->dma_map_consistent == NULL)
85 return -EINVAL; 84 return -EINVAL;
86 85
86 irq = create_irq();
87 if (irq < 0)
88 return irq;
89
90 set_irq_msi(irq, entry);
87 /* 91 /*
88 * Set up the vector plumbing. Let the prom (via sn_intr_alloc) 92 * Set up the vector plumbing. Let the prom (via sn_intr_alloc)
89 * decide which cpu to direct this msi at by default. 93 * decide which cpu to direct this msi at by default.
@@ -95,12 +99,15 @@ int sn_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
95 SWIN_WIDGETNUM(bussoft->bs_base); 99 SWIN_WIDGETNUM(bussoft->bs_base);
96 100
97 sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL); 101 sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
98 if (! sn_irq_info) 102 if (! sn_irq_info) {
103 destroy_irq(irq);
99 return -ENOMEM; 104 return -ENOMEM;
105 }
100 106
101 status = sn_intr_alloc(nasid, widget, sn_irq_info, irq, -1, -1); 107 status = sn_intr_alloc(nasid, widget, sn_irq_info, irq, -1, -1);
102 if (status) { 108 if (status) {
103 kfree(sn_irq_info); 109 kfree(sn_irq_info);
110 destroy_irq(irq);
104 return -ENOMEM; 111 return -ENOMEM;
105 } 112 }
106 113
@@ -121,6 +128,7 @@ int sn_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
121 if (! bus_addr) { 128 if (! bus_addr) {
122 sn_intr_free(nasid, widget, sn_irq_info); 129 sn_intr_free(nasid, widget, sn_irq_info);
123 kfree(sn_irq_info); 130 kfree(sn_irq_info);
131 destroy_irq(irq);
124 return -ENOMEM; 132 return -ENOMEM;
125 } 133 }
126 134
@@ -139,7 +147,7 @@ int sn_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
139 write_msi_msg(irq, &msg); 147 write_msi_msg(irq, &msg);
140 set_irq_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq); 148 set_irq_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq);
141 149
142 return 0; 150 return irq;
143} 151}
144 152
145#ifdef CONFIG_SMP 153#ifdef CONFIG_SMP
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
index d9d306c79f2d..601747b1e22a 100644
--- a/arch/ia64/sn/kernel/sn2/sn2_smp.c
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -455,7 +455,7 @@ static int sn2_ptc_proc_open(struct inode *inode, struct file *file)
455 return seq_open(file, &sn2_ptc_seq_ops); 455 return seq_open(file, &sn2_ptc_seq_ops);
456} 456}
457 457
458static struct file_operations proc_sn2_ptc_operations = { 458static const struct file_operations proc_sn2_ptc_operations = {
459 .open = sn2_ptc_proc_open, 459 .open = sn2_ptc_proc_open,
460 .read = seq_read, 460 .read = seq_read,
461 .llseek = seq_lseek, 461 .llseek = seq_lseek,
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index 33367996d72d..6da9854751cd 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -865,7 +865,7 @@ error:
865 return r; 865 return r;
866} 866}
867 867
868static struct file_operations sn_hwperf_fops = { 868static const struct file_operations sn_hwperf_fops = {
869 .ioctl = sn_hwperf_ioctl, 869 .ioctl = sn_hwperf_ioctl,
870}; 870};
871 871
diff --git a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
index 43ddc2eccb96..62b3e9a496ac 100644
--- a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
+++ b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
@@ -89,61 +89,80 @@ static int coherence_id_open(struct inode *inode, struct file *file)
89 return single_open(file, coherence_id_show, NULL); 89 return single_open(file, coherence_id_show, NULL);
90} 90}
91 91
92static struct proc_dir_entry
93*sn_procfs_create_entry(const char *name, struct proc_dir_entry *parent,
94 int (*openfunc)(struct inode *, struct file *),
95 int (*releasefunc)(struct inode *, struct file *),
96 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *))
97{
98 struct proc_dir_entry *e = create_proc_entry(name, 0444, parent);
99
100 if (e) {
101 struct file_operations *f;
102
103 f = kzalloc(sizeof(*f), GFP_KERNEL);
104 if (f) {
105 f->open = openfunc;
106 f->read = seq_read;
107 f->llseek = seq_lseek;
108 f->release = releasefunc;
109 f->write = write;
110 e->proc_fops = f;
111 }
112 }
113
114 return e;
115}
116
117/* /proc/sgi_sn/sn_topology uses seq_file, see sn_hwperf.c */ 92/* /proc/sgi_sn/sn_topology uses seq_file, see sn_hwperf.c */
118extern int sn_topology_open(struct inode *, struct file *); 93extern int sn_topology_open(struct inode *, struct file *);
119extern int sn_topology_release(struct inode *, struct file *); 94extern int sn_topology_release(struct inode *, struct file *);
120 95
96static const struct file_operations proc_partition_id_fops = {
97 .open = partition_id_open,
98 .read = seq_read,
99 .llseek = seq_lseek,
100 .release = single_release,
101};
102
103static const struct file_operations proc_system_sn_fops = {
104 .open = system_serial_number_open,
105 .read = seq_read,
106 .llseek = seq_lseek,
107 .release = single_release,
108};
109
110static const struct file_operations proc_license_id_fops = {
111 .open = licenseID_open,
112 .read = seq_read,
113 .llseek = seq_lseek,
114 .release = single_release,
115};
116
117static const struct file_operations proc_sn_force_intr_fops = {
118 .open = sn_force_interrupt_open,
119 .read = seq_read,
120 .write = sn_force_interrupt_write_proc,
121 .llseek = seq_lseek,
122 .release = single_release,
123};
124
125static const struct file_operations proc_coherence_id_fops = {
126 .open = coherence_id_open,
127 .read = seq_read,
128 .llseek = seq_lseek,
129 .release = single_release,
130};
131
132static const struct file_operations proc_sn_topo_fops = {
133 .open = sn_topology_open,
134 .read = seq_read,
135 .llseek = seq_lseek,
136 .release = sn_topology_release,
137};
138
121void register_sn_procfs(void) 139void register_sn_procfs(void)
122{ 140{
123 static struct proc_dir_entry *sgi_proc_dir = NULL; 141 static struct proc_dir_entry *sgi_proc_dir = NULL;
142 struct proc_dir_entry *pde;
124 143
125 BUG_ON(sgi_proc_dir != NULL); 144 BUG_ON(sgi_proc_dir != NULL);
126 if (!(sgi_proc_dir = proc_mkdir("sgi_sn", NULL))) 145 if (!(sgi_proc_dir = proc_mkdir("sgi_sn", NULL)))
127 return; 146 return;
128 147
129 sn_procfs_create_entry("partition_id", sgi_proc_dir, 148 pde = create_proc_entry("partition_id", 0444, sgi_proc_dir);
130 partition_id_open, single_release, NULL); 149 if (pde)
131 150 pde->proc_fops = &proc_partition_id_fops;
132 sn_procfs_create_entry("system_serial_number", sgi_proc_dir, 151 pde = create_proc_entry("system_serial_number", 0444, sgi_proc_dir);
133 system_serial_number_open, single_release, NULL); 152 if (pde)
134 153 pde->proc_fops = &proc_system_sn_fops;
135 sn_procfs_create_entry("licenseID", sgi_proc_dir, 154 pde = create_proc_entry("licenseID", 0444, sgi_proc_dir);
136 licenseID_open, single_release, NULL); 155 if (pde)
137 156 pde->proc_fops = &proc_license_id_fops;
138 sn_procfs_create_entry("sn_force_interrupt", sgi_proc_dir, 157 pde = create_proc_entry("sn_force_interrupt", 0644, sgi_proc_dir);
139 sn_force_interrupt_open, single_release, 158 if (pde)
140 sn_force_interrupt_write_proc); 159 pde->proc_fops = &proc_sn_force_intr_fops;
141 160 pde = create_proc_entry("coherence_id", 0444, sgi_proc_dir);
142 sn_procfs_create_entry("coherence_id", sgi_proc_dir, 161 if (pde)
143 coherence_id_open, single_release, NULL); 162 pde->proc_fops = &proc_coherence_id_fops;
144 163 pde = create_proc_entry("sn_topology", 0444, sgi_proc_dir);
145 sn_procfs_create_entry("sn_topology", sgi_proc_dir, 164 if (pde)
146 sn_topology_open, sn_topology_release, NULL); 165 pde->proc_fops = &proc_sn_topo_fops;
147} 166}
148 167
149#endif /* CONFIG_PROC_FS */ 168#endif /* CONFIG_PROC_FS */
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c
index 7a387d237363..68355ef6f841 100644
--- a/arch/ia64/sn/kernel/xpc_main.c
+++ b/arch/ia64/sn/kernel/xpc_main.c
@@ -101,67 +101,57 @@ static int xpc_disengage_request_max_timelimit = 120;
101 101
102static ctl_table xpc_sys_xpc_hb_dir[] = { 102static ctl_table xpc_sys_xpc_hb_dir[] = {
103 { 103 {
104 1, 104 .ctl_name = CTL_UNNUMBERED,
105 "hb_interval", 105 .procname = "hb_interval",
106 &xpc_hb_interval, 106 .data = &xpc_hb_interval,
107 sizeof(int), 107 .maxlen = sizeof(int),
108 0644, 108 .mode = 0644,
109 NULL, 109 .proc_handler = &proc_dointvec_minmax,
110 &proc_dointvec_minmax, 110 .strategy = &sysctl_intvec,
111 &sysctl_intvec, 111 .extra1 = &xpc_hb_min_interval,
112 NULL, 112 .extra2 = &xpc_hb_max_interval
113 &xpc_hb_min_interval,
114 &xpc_hb_max_interval
115 }, 113 },
116 { 114 {
117 2, 115 .ctl_name = CTL_UNNUMBERED,
118 "hb_check_interval", 116 .procname = "hb_check_interval",
119 &xpc_hb_check_interval, 117 .data = &xpc_hb_check_interval,
120 sizeof(int), 118 .maxlen = sizeof(int),
121 0644, 119 .mode = 0644,
122 NULL, 120 .proc_handler = &proc_dointvec_minmax,
123 &proc_dointvec_minmax, 121 .strategy = &sysctl_intvec,
124 &sysctl_intvec, 122 .extra1 = &xpc_hb_check_min_interval,
125 NULL, 123 .extra2 = &xpc_hb_check_max_interval
126 &xpc_hb_check_min_interval,
127 &xpc_hb_check_max_interval
128 }, 124 },
129 {0} 125 {}
130}; 126};
131static ctl_table xpc_sys_xpc_dir[] = { 127static ctl_table xpc_sys_xpc_dir[] = {
132 { 128 {
133 1, 129 .ctl_name = CTL_UNNUMBERED,
134 "hb", 130 .procname = "hb",
135 NULL, 131 .mode = 0555,
136 0, 132 .child = xpc_sys_xpc_hb_dir
137 0555,
138 xpc_sys_xpc_hb_dir
139 }, 133 },
140 { 134 {
141 2, 135 .ctl_name = CTL_UNNUMBERED,
142 "disengage_request_timelimit", 136 .procname = "disengage_request_timelimit",
143 &xpc_disengage_request_timelimit, 137 .data = &xpc_disengage_request_timelimit,
144 sizeof(int), 138 .maxlen = sizeof(int),
145 0644, 139 .mode = 0644,
146 NULL, 140 .proc_handler = &proc_dointvec_minmax,
147 &proc_dointvec_minmax, 141 .strategy = &sysctl_intvec,
148 &sysctl_intvec, 142 .extra1 = &xpc_disengage_request_min_timelimit,
149 NULL, 143 .extra2 = &xpc_disengage_request_max_timelimit
150 &xpc_disengage_request_min_timelimit,
151 &xpc_disengage_request_max_timelimit
152 }, 144 },
153 {0} 145 {}
154}; 146};
155static ctl_table xpc_sys_dir[] = { 147static ctl_table xpc_sys_dir[] = {
156 { 148 {
157 1, 149 .ctl_name = CTL_UNNUMBERED,
158 "xpc", 150 .procname = "xpc",
159 NULL, 151 .mode = 0555,
160 0, 152 .child = xpc_sys_xpc_dir
161 0555,
162 xpc_sys_xpc_dir
163 }, 153 },
164 {0} 154 {}
165}; 155};
166static struct ctl_table_header *xpc_sysctl; 156static struct ctl_table_header *xpc_sysctl;
167 157
@@ -1251,7 +1241,7 @@ xpc_init(void)
1251 snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part"); 1241 snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part");
1252 snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan"); 1242 snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan");
1253 1243
1254 xpc_sysctl = register_sysctl_table(xpc_sys_dir, 1); 1244 xpc_sysctl = register_sysctl_table(xpc_sys_dir);
1255 1245
1256 /* 1246 /*
1257 * The first few fields of each entry of xpc_partitions[] need to 1247 * The first few fields of each entry of xpc_partitions[] need to
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
index 6846dc9b432d..04a8256017eb 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
@@ -20,7 +20,8 @@
20#include "xtalk/hubdev.h" 20#include "xtalk/hubdev.h"
21 21
22int 22int
23sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp) 23sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp,
24 char **ssdt)
24{ 25{
25 struct ia64_sal_retval ret_stuff; 26 struct ia64_sal_retval ret_stuff;
26 u64 busnum; 27 u64 busnum;
@@ -32,7 +33,8 @@ sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp)
32 segment = soft->pbi_buscommon.bs_persist_segment; 33 segment = soft->pbi_buscommon.bs_persist_segment;
33 busnum = soft->pbi_buscommon.bs_persist_busnum; 34 busnum = soft->pbi_buscommon.bs_persist_busnum;
34 SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, segment, 35 SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, segment,
35 busnum, (u64) device, (u64) resp, 0, 0, 0); 36 busnum, (u64) device, (u64) resp, (u64)ia64_tpa(ssdt),
37 0, 0);
36 38
37 return (int)ret_stuff.v0; 39 return (int)ret_stuff.v0;
38} 40}