aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/kernel/pci.c4
-rw-r--r--arch/i386/defconfig3
-rw-r--r--arch/i386/kernel/acpi/boot.c235
-rw-r--r--arch/i386/kernel/acpi/earlyquirk.c4
-rw-r--r--arch/i386/kernel/cpu/cpufreq/longhaul.c15
-rw-r--r--arch/i386/kernel/io_apic.c17
-rw-r--r--arch/i386/kernel/mpparse.c4
-rw-r--r--arch/i386/kernel/srat.c84
-rw-r--r--arch/i386/mach-es7000/es7000.h9
-rw-r--r--arch/i386/mach-es7000/es7000plat.c53
-rw-r--r--arch/i386/pci/mmconfig.c24
-rw-r--r--arch/ia64/Kconfig9
-rw-r--r--arch/ia64/hp/common/hwsw_iommu.c4
-rw-r--r--arch/ia64/kernel/acpi.c200
-rw-r--r--arch/ia64/kernel/crash.c16
-rw-r--r--arch/ia64/kernel/crash_dump.c3
-rw-r--r--arch/ia64/kernel/efi.c2
-rw-r--r--arch/ia64/kernel/entry.S2
-rw-r--r--arch/ia64/kernel/iosapic.c5
-rw-r--r--arch/ia64/kernel/machine_kexec.c15
-rw-r--r--arch/ia64/kernel/msi_ia64.c19
-rw-r--r--arch/ia64/kernel/process.c16
-rw-r--r--arch/ia64/kernel/ptrace.c14
-rw-r--r--arch/ia64/kernel/setup.c31
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S1
-rw-r--r--arch/ia64/mm/contig.c76
-rw-r--r--arch/ia64/mm/discontig.c46
-rw-r--r--arch/ia64/mm/init.c38
-rw-r--r--arch/ia64/sn/kernel/huberror.c16
-rw-r--r--arch/ia64/sn/kernel/io_acpi_init.c314
-rw-r--r--arch/ia64/sn/kernel/io_common.c90
-rw-r--r--arch/ia64/sn/kernel/io_init.c54
-rw-r--r--arch/ia64/sn/kernel/iomv.c5
-rw-r--r--arch/ia64/sn/kernel/msi_sn.c20
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_provider.c6
-rw-r--r--arch/mips/Kconfig103
-rw-r--r--arch/mips/Kconfig.debug8
-rw-r--r--arch/mips/arc/identify.c2
-rw-r--r--arch/mips/arc/memory.c18
-rw-r--r--arch/mips/au1000/common/irq.c8
-rw-r--r--arch/mips/au1000/common/pci.c18
-rw-r--r--arch/mips/au1000/common/prom.c3
-rw-r--r--arch/mips/au1000/common/setup.c19
-rw-r--r--arch/mips/au1000/pb1100/board_setup.c93
-rw-r--r--arch/mips/au1000/pb1200/irqmap.c32
-rw-r--r--arch/mips/basler/excite/excite_irq.c6
-rw-r--r--arch/mips/cobalt/irq.c2
-rw-r--r--arch/mips/cobalt/setup.c3
-rw-r--r--arch/mips/ddb5xxx/common/prom.c3
-rw-r--r--arch/mips/ddb5xxx/ddb5477/irq.c9
-rw-r--r--arch/mips/ddb5xxx/ddb5477/irq_5477.c2
-rw-r--r--arch/mips/dec/ioasic-irq.c4
-rw-r--r--arch/mips/dec/kn02-irq.c2
-rw-r--r--arch/mips/dec/prom/memory.c17
-rw-r--r--arch/mips/dec/setup.c12
-rw-r--r--arch/mips/emma2rh/common/irq_emma2rh.c2
-rw-r--r--arch/mips/emma2rh/markeins/irq.c2
-rw-r--r--arch/mips/emma2rh/markeins/irq_markeins.c4
-rw-r--r--arch/mips/gt64120/ev64120/irq.c2
-rw-r--r--arch/mips/gt64120/ev64120/setup.c3
-rw-r--r--arch/mips/gt64120/momenco_ocelot/dbg_io.c4
-rw-r--r--arch/mips/gt64120/momenco_ocelot/irq.c4
-rw-r--r--arch/mips/gt64120/momenco_ocelot/prom.c3
-rw-r--r--arch/mips/gt64120/wrppmc/irq.c2
-rw-r--r--arch/mips/gt64120/wrppmc/setup.c3
-rw-r--r--arch/mips/jazz/irq.c2
-rw-r--r--arch/mips/jmr3927/common/prom.c3
-rw-r--r--arch/mips/jmr3927/rbhma3100/irq.c2
-rw-r--r--arch/mips/jmr3927/rbhma3100/setup.c2
-rw-r--r--arch/mips/kernel/asm-offsets.c4
-rw-r--r--arch/mips/kernel/cpu-probe.c2
-rw-r--r--arch/mips/kernel/gdb-stub.c6
-rw-r--r--arch/mips/kernel/head.S25
-rw-r--r--arch/mips/kernel/i8259.c24
-rw-r--r--arch/mips/kernel/irixelf.c331
-rw-r--r--arch/mips/kernel/irq-msc01.c4
-rw-r--r--arch/mips/kernel/irq-mv6434x.c14
-rw-r--r--arch/mips/kernel/irq-rm7000.c13
-rw-r--r--arch/mips/kernel/irq-rm9000.c24
-rw-r--r--arch/mips/kernel/irq_cpu.c21
-rw-r--r--arch/mips/kernel/linux32.c28
-rw-r--r--arch/mips/kernel/mips-mt.c9
-rw-r--r--arch/mips/kernel/proc.c8
-rw-r--r--arch/mips/kernel/process.c6
-rw-r--r--arch/mips/kernel/r4k_fpu.S19
-rw-r--r--arch/mips/kernel/rtlx.c4
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/setup.c47
-rw-r--r--arch/mips/kernel/signal.c6
-rw-r--r--arch/mips/kernel/signal_n32.c4
-rw-r--r--arch/mips/kernel/smp-mt.c9
-rw-r--r--arch/mips/kernel/smtc.c54
-rw-r--r--arch/mips/kernel/sysirix.c4
-rw-r--r--arch/mips/kernel/vpe.c36
-rw-r--r--arch/mips/lasat/interrupt.c2
-rw-r--r--arch/mips/lasat/prom.c3
-rw-r--r--arch/mips/lib-32/Makefile2
-rw-r--r--arch/mips/lib-64/Makefile2
-rw-r--r--arch/mips/lib-64/memset.S142
-rw-r--r--arch/mips/lib/Makefile2
-rw-r--r--arch/mips/lib/memset.S (renamed from arch/mips/lib-32/memset.S)35
-rw-r--r--arch/mips/lib/uncached.c4
-rw-r--r--arch/mips/mips-boards/atlas/atlas_int.c9
-rw-r--r--arch/mips/mips-boards/generic/memory.c18
-rw-r--r--arch/mips/mips-boards/malta/malta_int.c7
-rw-r--r--arch/mips/mips-boards/sead/sead_int.c2
-rw-r--r--arch/mips/mips-boards/sim/sim_int.c6
-rw-r--r--arch/mips/mips-boards/sim/sim_mem.c16
-rw-r--r--arch/mips/mm/init.c50
-rw-r--r--arch/mips/momentum/jaguar_atx/Makefile2
-rw-r--r--arch/mips/momentum/jaguar_atx/irq.c4
-rw-r--r--arch/mips/momentum/jaguar_atx/jaguar_atx_fpga.h6
-rw-r--r--arch/mips/momentum/jaguar_atx/platform.c235
-rw-r--r--arch/mips/momentum/jaguar_atx/prom.c58
-rw-r--r--arch/mips/momentum/ocelot_3/irq.c2
-rw-r--r--arch/mips/momentum/ocelot_3/prom.c3
-rw-r--r--arch/mips/momentum/ocelot_c/cpci-irq.c2
-rw-r--r--arch/mips/momentum/ocelot_c/dbg_io.c4
-rw-r--r--arch/mips/momentum/ocelot_c/irq.c2
-rw-r--r--arch/mips/momentum/ocelot_c/prom.c3
-rw-r--r--arch/mips/momentum/ocelot_c/uart-irq.c2
-rw-r--r--arch/mips/momentum/ocelot_g/dbg_io.c4
-rw-r--r--arch/mips/momentum/ocelot_g/irq.c4
-rw-r--r--arch/mips/momentum/ocelot_g/prom.c3
-rw-r--r--arch/mips/oprofile/Kconfig2
-rw-r--r--arch/mips/pci/fixup-vr4133.c16
-rw-r--r--arch/mips/philips/pnx8550/common/int.c2
-rw-r--r--arch/mips/philips/pnx8550/common/prom.c3
-rw-r--r--arch/mips/pmc-sierra/yosemite/dbg_io.c2
-rw-r--r--arch/mips/pmc-sierra/yosemite/irq.c6
-rw-r--r--arch/mips/pmc-sierra/yosemite/prom.c3
-rw-r--r--arch/mips/pmc-sierra/yosemite/setup.c2
-rw-r--r--arch/mips/qemu/q-mem.c3
-rw-r--r--arch/mips/sgi-ip22/ip22-eisa.c4
-rw-r--r--arch/mips/sgi-ip22/ip22-int.c13
-rw-r--r--arch/mips/sgi-ip22/ip22-mc.c3
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c3
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c2
-rw-r--r--arch/mips/sgi-ip32/ip32-irq.c10
-rw-r--r--arch/mips/sgi-ip32/ip32-memory.c3
-rw-r--r--arch/mips/sibyte/bcm1480/irq.c2
-rw-r--r--arch/mips/sibyte/cfe/setup.c3
-rw-r--r--arch/mips/sibyte/sb1250/irq.c2
-rw-r--r--arch/mips/sibyte/sb1250/prom.c3
-rw-r--r--arch/mips/sni/irq.c2
-rw-r--r--arch/mips/sni/sniprom.c3
-rw-r--r--arch/mips/tx4927/common/tx4927_irq.c4
-rw-r--r--arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c12
-rw-r--r--arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_prom.c3
-rw-r--r--arch/mips/tx4938/common/irq.c4
-rw-r--r--arch/mips/tx4938/toshiba_rbtx4938/irq.c2
-rw-r--r--arch/mips/tx4938/toshiba_rbtx4938/prom.c3
-rw-r--r--arch/mips/vr41xx/common/icu.c31
-rw-r--r--arch/mips/vr41xx/common/init.c3
-rw-r--r--arch/mips/vr41xx/common/irq.c18
-rw-r--r--arch/mips/vr41xx/nec-cmbvr4133/irq.c53
-rw-r--r--arch/powerpc/Kconfig5
-rw-r--r--arch/powerpc/kernel/pci_64.c2
-rw-r--r--arch/s390/Kconfig29
-rw-r--r--arch/s390/appldata/appldata_base.c2
-rw-r--r--arch/s390/appldata/appldata_mem.c2
-rw-r--r--arch/s390/appldata/appldata_net_sum.c2
-rw-r--r--arch/s390/crypto/Kconfig60
-rw-r--r--arch/s390/crypto/Makefile3
-rw-r--r--arch/s390/crypto/aes_s390.c47
-rw-r--r--arch/s390/crypto/crypt_s390.h281
-rw-r--r--arch/s390/crypto/crypt_s390_query.c129
-rw-r--r--arch/s390/crypto/des_check_key.c6
-rw-r--r--arch/s390/crypto/des_s390.c8
-rw-r--r--arch/s390/crypto/prng.c213
-rw-r--r--arch/s390/crypto/sha1_s390.c83
-rw-r--r--arch/s390/crypto/sha256_s390.c11
-rw-r--r--arch/s390/defconfig12
-rw-r--r--arch/s390/hypfs/Makefile2
-rw-r--r--arch/s390/hypfs/hypfs.h9
-rw-r--r--arch/s390/hypfs/hypfs_diag.h16
-rw-r--r--arch/s390/hypfs/hypfs_vm.c231
-rw-r--r--arch/s390/hypfs/inode.c31
-rw-r--r--arch/s390/kernel/Makefile4
-rw-r--r--arch/s390/kernel/base.S150
-rw-r--r--arch/s390/kernel/binfmt_elf32.c2
-rw-r--r--arch/s390/kernel/compat_exec_domain.c5
-rw-r--r--arch/s390/kernel/compat_linux.c24
-rw-r--r--arch/s390/kernel/compat_linux.h31
-rw-r--r--arch/s390/kernel/compat_signal.c8
-rw-r--r--arch/s390/kernel/cpcmd.c14
-rw-r--r--arch/s390/kernel/crash.c1
-rw-r--r--arch/s390/kernel/debug.c18
-rw-r--r--arch/s390/kernel/early.c306
-rw-r--r--arch/s390/kernel/ebcdic.c1
-rw-r--r--arch/s390/kernel/head31.S194
-rw-r--r--arch/s390/kernel/head64.S193
-rw-r--r--arch/s390/kernel/ipl.c106
-rw-r--r--arch/s390/kernel/irq.c15
-rw-r--r--arch/s390/kernel/kprobes.c32
-rw-r--r--arch/s390/kernel/machine_kexec.c1
-rw-r--r--arch/s390/kernel/module.c5
-rw-r--r--arch/s390/kernel/process.c4
-rw-r--r--arch/s390/kernel/profile.c20
-rw-r--r--arch/s390/kernel/ptrace.c46
-rw-r--r--arch/s390/kernel/reset.S90
-rw-r--r--arch/s390/kernel/s390_ext.c8
-rw-r--r--arch/s390/kernel/setup.c155
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/smp.c32
-rw-r--r--arch/s390/kernel/stacktrace.c10
-rw-r--r--arch/s390/kernel/time.c1185
-rw-r--r--arch/s390/kernel/traps.c24
-rw-r--r--arch/s390/kernel/vmlinux.lds.S13
-rw-r--r--arch/s390/kernel/vtime.c10
-rw-r--r--arch/s390/lib/Makefile2
-rw-r--r--arch/s390/lib/delay.c48
-rw-r--r--arch/s390/lib/qrnnd.S77
-rw-r--r--arch/s390/lib/uaccess.h23
-rw-r--r--arch/s390/lib/uaccess_mvcos.c78
-rw-r--r--arch/s390/lib/uaccess_pt.c329
-rw-r--r--arch/s390/lib/uaccess_std.c23
-rw-r--r--arch/s390/math-emu/Makefile2
-rw-r--r--arch/s390/math-emu/math.c2
-rw-r--r--arch/s390/math-emu/qrnnd.S77
-rw-r--r--arch/s390/math-emu/sfp-util.h66
-rw-r--r--arch/s390/mm/cmm.c4
-rw-r--r--arch/s390/mm/extmem.c66
-rw-r--r--arch/s390/mm/fault.c93
-rw-r--r--arch/s390/mm/init.c20
-rw-r--r--arch/s390/mm/vmem.c14
-rw-r--r--arch/x86_64/kernel/early-quirks.c4
-rw-r--r--arch/x86_64/kernel/genapic.c4
-rw-r--r--arch/x86_64/kernel/io_apic.c17
-rw-r--r--arch/x86_64/kernel/mpparse.c2
-rw-r--r--arch/x86_64/kernel/pci-swiotlb.c2
-rw-r--r--arch/x86_64/kernel/time.c18
-rw-r--r--arch/x86_64/mm/srat.c48
-rw-r--r--arch/x86_64/pci/mmconfig.c29
236 files changed, 5357 insertions, 3007 deletions
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index 3c10b9a1ddf5..ab642a4f08de 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -575,3 +575,7 @@ void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
575 575
576EXPORT_SYMBOL(pci_iomap); 576EXPORT_SYMBOL(pci_iomap);
577EXPORT_SYMBOL(pci_iounmap); 577EXPORT_SYMBOL(pci_iounmap);
578
579/* FIXME: Some boxes have multiple ISA bridges! */
580struct pci_dev *isa_bridge;
581EXPORT_SYMBOL(isa_bridge);
diff --git a/arch/i386/defconfig b/arch/i386/defconfig
index 5d80edfc61b7..bb0c376b62b3 100644
--- a/arch/i386/defconfig
+++ b/arch/i386/defconfig
@@ -466,7 +466,8 @@ CONFIG_FW_LOADER=y
466# 466#
467# Plug and Play support 467# Plug and Play support
468# 468#
469# CONFIG_PNP is not set 469CONFIG_PNP=y
470CONFIG_PNPACPI=y
470 471
471# 472#
472# Block devices 473# Block devices
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index cbcb2c27f48b..e94aff6888ca 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -66,7 +66,7 @@ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return
66 66
67#define BAD_MADT_ENTRY(entry, end) ( \ 67#define BAD_MADT_ENTRY(entry, end) ( \
68 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ 68 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
69 ((acpi_table_entry_header *)entry)->length < sizeof(*entry)) 69 ((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
70 70
71#define PREFIX "ACPI: " 71#define PREFIX "ACPI: "
72 72
@@ -79,7 +79,7 @@ int acpi_ioapic;
79int acpi_strict; 79int acpi_strict;
80EXPORT_SYMBOL(acpi_strict); 80EXPORT_SYMBOL(acpi_strict);
81 81
82acpi_interrupt_flags acpi_sci_flags __initdata; 82u8 acpi_sci_flags __initdata;
83int acpi_sci_override_gsi __initdata; 83int acpi_sci_override_gsi __initdata;
84int acpi_skip_timer_override __initdata; 84int acpi_skip_timer_override __initdata;
85int acpi_use_timer_override __initdata; 85int acpi_use_timer_override __initdata;
@@ -92,11 +92,6 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
92#warning ACPI uses CMPXCHG, i486 and later hardware 92#warning ACPI uses CMPXCHG, i486 and later hardware
93#endif 93#endif
94 94
95#define MAX_MADT_ENTRIES 256
96u8 x86_acpiid_to_apicid[MAX_MADT_ENTRIES] =
97 {[0 ... MAX_MADT_ENTRIES - 1] = 0xff };
98EXPORT_SYMBOL(x86_acpiid_to_apicid);
99
100/* -------------------------------------------------------------------------- 95/* --------------------------------------------------------------------------
101 Boot-time Configuration 96 Boot-time Configuration
102 -------------------------------------------------------------------------- */ 97 -------------------------------------------------------------------------- */
@@ -166,30 +161,26 @@ char *__acpi_map_table(unsigned long phys, unsigned long size)
166 161
167#ifdef CONFIG_PCI_MMCONFIG 162#ifdef CONFIG_PCI_MMCONFIG
168/* The physical address of the MMCONFIG aperture. Set from ACPI tables. */ 163/* The physical address of the MMCONFIG aperture. Set from ACPI tables. */
169struct acpi_table_mcfg_config *pci_mmcfg_config; 164struct acpi_mcfg_allocation *pci_mmcfg_config;
170int pci_mmcfg_config_num; 165int pci_mmcfg_config_num;
171 166
172int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size) 167int __init acpi_parse_mcfg(struct acpi_table_header *header)
173{ 168{
174 struct acpi_table_mcfg *mcfg; 169 struct acpi_table_mcfg *mcfg;
175 unsigned long i; 170 unsigned long i;
176 int config_size; 171 int config_size;
177 172
178 if (!phys_addr || !size) 173 if (!header)
179 return -EINVAL; 174 return -EINVAL;
180 175
181 mcfg = (struct acpi_table_mcfg *)__acpi_map_table(phys_addr, size); 176 mcfg = (struct acpi_table_mcfg *)header;
182 if (!mcfg) {
183 printk(KERN_WARNING PREFIX "Unable to map MCFG\n");
184 return -ENODEV;
185 }
186 177
187 /* how many config structures do we have */ 178 /* how many config structures do we have */
188 pci_mmcfg_config_num = 0; 179 pci_mmcfg_config_num = 0;
189 i = size - sizeof(struct acpi_table_mcfg); 180 i = header->length - sizeof(struct acpi_table_mcfg);
190 while (i >= sizeof(struct acpi_table_mcfg_config)) { 181 while (i >= sizeof(struct acpi_mcfg_allocation)) {
191 ++pci_mmcfg_config_num; 182 ++pci_mmcfg_config_num;
192 i -= sizeof(struct acpi_table_mcfg_config); 183 i -= sizeof(struct acpi_mcfg_allocation);
193 }; 184 };
194 if (pci_mmcfg_config_num == 0) { 185 if (pci_mmcfg_config_num == 0) {
195 printk(KERN_ERR PREFIX "MMCONFIG has no entries\n"); 186 printk(KERN_ERR PREFIX "MMCONFIG has no entries\n");
@@ -204,9 +195,9 @@ int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
204 return -ENOMEM; 195 return -ENOMEM;
205 } 196 }
206 197
207 memcpy(pci_mmcfg_config, &mcfg->config, config_size); 198 memcpy(pci_mmcfg_config, &mcfg[1], config_size);
208 for (i = 0; i < pci_mmcfg_config_num; ++i) { 199 for (i = 0; i < pci_mmcfg_config_num; ++i) {
209 if (mcfg->config[i].base_reserved) { 200 if (pci_mmcfg_config[i].address > 0xFFFFFFFF) {
210 printk(KERN_ERR PREFIX 201 printk(KERN_ERR PREFIX
211 "MMCONFIG not in low 4GB of memory\n"); 202 "MMCONFIG not in low 4GB of memory\n");
212 kfree(pci_mmcfg_config); 203 kfree(pci_mmcfg_config);
@@ -220,24 +211,24 @@ int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
220#endif /* CONFIG_PCI_MMCONFIG */ 211#endif /* CONFIG_PCI_MMCONFIG */
221 212
222#ifdef CONFIG_X86_LOCAL_APIC 213#ifdef CONFIG_X86_LOCAL_APIC
223static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size) 214static int __init acpi_parse_madt(struct acpi_table_header *table)
224{ 215{
225 struct acpi_table_madt *madt = NULL; 216 struct acpi_table_madt *madt = NULL;
226 217
227 if (!phys_addr || !size || !cpu_has_apic) 218 if (!cpu_has_apic)
228 return -EINVAL; 219 return -EINVAL;
229 220
230 madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size); 221 madt = (struct acpi_table_madt *)table;
231 if (!madt) { 222 if (!madt) {
232 printk(KERN_WARNING PREFIX "Unable to map MADT\n"); 223 printk(KERN_WARNING PREFIX "Unable to map MADT\n");
233 return -ENODEV; 224 return -ENODEV;
234 } 225 }
235 226
236 if (madt->lapic_address) { 227 if (madt->address) {
237 acpi_lapic_addr = (u64) madt->lapic_address; 228 acpi_lapic_addr = (u64) madt->address;
238 229
239 printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n", 230 printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
240 madt->lapic_address); 231 madt->address);
241 } 232 }
242 233
243 acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id); 234 acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
@@ -246,21 +237,17 @@ static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
246} 237}
247 238
248static int __init 239static int __init
249acpi_parse_lapic(acpi_table_entry_header * header, const unsigned long end) 240acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
250{ 241{
251 struct acpi_table_lapic *processor = NULL; 242 struct acpi_madt_local_apic *processor = NULL;
252 243
253 processor = (struct acpi_table_lapic *)header; 244 processor = (struct acpi_madt_local_apic *)header;
254 245
255 if (BAD_MADT_ENTRY(processor, end)) 246 if (BAD_MADT_ENTRY(processor, end))
256 return -EINVAL; 247 return -EINVAL;
257 248
258 acpi_table_print_madt_entry(header); 249 acpi_table_print_madt_entry(header);
259 250
260 /* Record local apic id only when enabled */
261 if (processor->flags.enabled)
262 x86_acpiid_to_apicid[processor->acpi_id] = processor->id;
263
264 /* 251 /*
265 * We need to register disabled CPU as well to permit 252 * We need to register disabled CPU as well to permit
266 * counting disabled CPUs. This allows us to size 253 * counting disabled CPUs. This allows us to size
@@ -269,18 +256,18 @@ acpi_parse_lapic(acpi_table_entry_header * header, const unsigned long end)
269 * when we use CPU hotplug. 256 * when we use CPU hotplug.
270 */ 257 */
271 mp_register_lapic(processor->id, /* APIC ID */ 258 mp_register_lapic(processor->id, /* APIC ID */
272 processor->flags.enabled); /* Enabled? */ 259 processor->lapic_flags & ACPI_MADT_ENABLED); /* Enabled? */
273 260
274 return 0; 261 return 0;
275} 262}
276 263
277static int __init 264static int __init
278acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header, 265acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
279 const unsigned long end) 266 const unsigned long end)
280{ 267{
281 struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL; 268 struct acpi_madt_local_apic_override *lapic_addr_ovr = NULL;
282 269
283 lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr *)header; 270 lapic_addr_ovr = (struct acpi_madt_local_apic_override *)header;
284 271
285 if (BAD_MADT_ENTRY(lapic_addr_ovr, end)) 272 if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
286 return -EINVAL; 273 return -EINVAL;
@@ -291,11 +278,11 @@ acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
291} 278}
292 279
293static int __init 280static int __init
294acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end) 281acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end)
295{ 282{
296 struct acpi_table_lapic_nmi *lapic_nmi = NULL; 283 struct acpi_madt_local_apic_nmi *lapic_nmi = NULL;
297 284
298 lapic_nmi = (struct acpi_table_lapic_nmi *)header; 285 lapic_nmi = (struct acpi_madt_local_apic_nmi *)header;
299 286
300 if (BAD_MADT_ENTRY(lapic_nmi, end)) 287 if (BAD_MADT_ENTRY(lapic_nmi, end))
301 return -EINVAL; 288 return -EINVAL;
@@ -313,11 +300,11 @@ acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
313#ifdef CONFIG_X86_IO_APIC 300#ifdef CONFIG_X86_IO_APIC
314 301
315static int __init 302static int __init
316acpi_parse_ioapic(acpi_table_entry_header * header, const unsigned long end) 303acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
317{ 304{
318 struct acpi_table_ioapic *ioapic = NULL; 305 struct acpi_madt_io_apic *ioapic = NULL;
319 306
320 ioapic = (struct acpi_table_ioapic *)header; 307 ioapic = (struct acpi_madt_io_apic *)header;
321 308
322 if (BAD_MADT_ENTRY(ioapic, end)) 309 if (BAD_MADT_ENTRY(ioapic, end))
323 return -EINVAL; 310 return -EINVAL;
@@ -342,11 +329,11 @@ static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
342 polarity = 3; 329 polarity = 3;
343 330
344 /* Command-line over-ride via acpi_sci= */ 331 /* Command-line over-ride via acpi_sci= */
345 if (acpi_sci_flags.trigger) 332 if (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK)
346 trigger = acpi_sci_flags.trigger; 333 trigger = (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2;
347 334
348 if (acpi_sci_flags.polarity) 335 if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK)
349 polarity = acpi_sci_flags.polarity; 336 polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
350 337
351 /* 338 /*
352 * mp_config_acpi_legacy_irqs() already setup IRQs < 16 339 * mp_config_acpi_legacy_irqs() already setup IRQs < 16
@@ -357,51 +344,52 @@ static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
357 344
358 /* 345 /*
359 * stash over-ride to indicate we've been here 346 * stash over-ride to indicate we've been here
360 * and for later update of acpi_fadt 347 * and for later update of acpi_gbl_FADT
361 */ 348 */
362 acpi_sci_override_gsi = gsi; 349 acpi_sci_override_gsi = gsi;
363 return; 350 return;
364} 351}
365 352
366static int __init 353static int __init
367acpi_parse_int_src_ovr(acpi_table_entry_header * header, 354acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
368 const unsigned long end) 355 const unsigned long end)
369{ 356{
370 struct acpi_table_int_src_ovr *intsrc = NULL; 357 struct acpi_madt_interrupt_override *intsrc = NULL;
371 358
372 intsrc = (struct acpi_table_int_src_ovr *)header; 359 intsrc = (struct acpi_madt_interrupt_override *)header;
373 360
374 if (BAD_MADT_ENTRY(intsrc, end)) 361 if (BAD_MADT_ENTRY(intsrc, end))
375 return -EINVAL; 362 return -EINVAL;
376 363
377 acpi_table_print_madt_entry(header); 364 acpi_table_print_madt_entry(header);
378 365
379 if (intsrc->bus_irq == acpi_fadt.sci_int) { 366 if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) {
380 acpi_sci_ioapic_setup(intsrc->global_irq, 367 acpi_sci_ioapic_setup(intsrc->global_irq,
381 intsrc->flags.polarity, 368 intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
382 intsrc->flags.trigger); 369 (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2);
383 return 0; 370 return 0;
384 } 371 }
385 372
386 if (acpi_skip_timer_override && 373 if (acpi_skip_timer_override &&
387 intsrc->bus_irq == 0 && intsrc->global_irq == 2) { 374 intsrc->source_irq == 0 && intsrc->global_irq == 2) {
388 printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); 375 printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
389 return 0; 376 return 0;
390 } 377 }
391 378
392 mp_override_legacy_irq(intsrc->bus_irq, 379 mp_override_legacy_irq(intsrc->source_irq,
393 intsrc->flags.polarity, 380 intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
394 intsrc->flags.trigger, intsrc->global_irq); 381 (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2,
382 intsrc->global_irq);
395 383
396 return 0; 384 return 0;
397} 385}
398 386
399static int __init 387static int __init
400acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end) 388acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end)
401{ 389{
402 struct acpi_table_nmi_src *nmi_src = NULL; 390 struct acpi_madt_nmi_source *nmi_src = NULL;
403 391
404 nmi_src = (struct acpi_table_nmi_src *)header; 392 nmi_src = (struct acpi_madt_nmi_source *)header;
405 393
406 if (BAD_MADT_ENTRY(nmi_src, end)) 394 if (BAD_MADT_ENTRY(nmi_src, end))
407 return -EINVAL; 395 return -EINVAL;
@@ -417,7 +405,7 @@ acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end)
417 405
418/* 406/*
419 * acpi_pic_sci_set_trigger() 407 * acpi_pic_sci_set_trigger()
420 * 408 *
421 * use ELCR to set PIC-mode trigger type for SCI 409 * use ELCR to set PIC-mode trigger type for SCI
422 * 410 *
423 * If a PIC-mode SCI is not recognized or gives spurious IRQ7's 411 * If a PIC-mode SCI is not recognized or gives spurious IRQ7's
@@ -511,7 +499,7 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
511{ 499{
512 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 500 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
513 union acpi_object *obj; 501 union acpi_object *obj;
514 struct acpi_table_lapic *lapic; 502 struct acpi_madt_local_apic *lapic;
515 cpumask_t tmp_map, new_map; 503 cpumask_t tmp_map, new_map;
516 u8 physid; 504 u8 physid;
517 int cpu; 505 int cpu;
@@ -529,10 +517,10 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
529 return -EINVAL; 517 return -EINVAL;
530 } 518 }
531 519
532 lapic = (struct acpi_table_lapic *)obj->buffer.pointer; 520 lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer;
533 521
534 if ((lapic->header.type != ACPI_MADT_LAPIC) || 522 if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC ||
535 (!lapic->flags.enabled)) { 523 !(lapic->lapic_flags & ACPI_MADT_ENABLED)) {
536 kfree(buffer.pointer); 524 kfree(buffer.pointer);
537 return -EINVAL; 525 return -EINVAL;
538 } 526 }
@@ -544,7 +532,7 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
544 buffer.pointer = NULL; 532 buffer.pointer = NULL;
545 533
546 tmp_map = cpu_present_map; 534 tmp_map = cpu_present_map;
547 mp_register_lapic(physid, lapic->flags.enabled); 535 mp_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED);
548 536
549 /* 537 /*
550 * If mp_register_lapic successfully generates a new logical cpu 538 * If mp_register_lapic successfully generates a new logical cpu
@@ -566,14 +554,6 @@ EXPORT_SYMBOL(acpi_map_lsapic);
566 554
567int acpi_unmap_lsapic(int cpu) 555int acpi_unmap_lsapic(int cpu)
568{ 556{
569 int i;
570
571 for_each_possible_cpu(i) {
572 if (x86_acpiid_to_apicid[i] == x86_cpu_to_apicid[cpu]) {
573 x86_acpiid_to_apicid[i] = -1;
574 break;
575 }
576 }
577 x86_cpu_to_apicid[cpu] = -1; 557 x86_cpu_to_apicid[cpu] = -1;
578 cpu_clear(cpu, cpu_present_map); 558 cpu_clear(cpu, cpu_present_map);
579 num_processors--; 559 num_processors--;
@@ -619,42 +599,36 @@ acpi_scan_rsdp(unsigned long start, unsigned long length)
619 return 0; 599 return 0;
620} 600}
621 601
622static int __init acpi_parse_sbf(unsigned long phys_addr, unsigned long size) 602static int __init acpi_parse_sbf(struct acpi_table_header *table)
623{ 603{
624 struct acpi_table_sbf *sb; 604 struct acpi_table_boot *sb;
625
626 if (!phys_addr || !size)
627 return -EINVAL;
628 605
629 sb = (struct acpi_table_sbf *)__acpi_map_table(phys_addr, size); 606 sb = (struct acpi_table_boot *)table;
630 if (!sb) { 607 if (!sb) {
631 printk(KERN_WARNING PREFIX "Unable to map SBF\n"); 608 printk(KERN_WARNING PREFIX "Unable to map SBF\n");
632 return -ENODEV; 609 return -ENODEV;
633 } 610 }
634 611
635 sbf_port = sb->sbf_cmos; /* Save CMOS port */ 612 sbf_port = sb->cmos_index; /* Save CMOS port */
636 613
637 return 0; 614 return 0;
638} 615}
639 616
640#ifdef CONFIG_HPET_TIMER 617#ifdef CONFIG_HPET_TIMER
641 618
642static int __init acpi_parse_hpet(unsigned long phys, unsigned long size) 619static int __init acpi_parse_hpet(struct acpi_table_header *table)
643{ 620{
644 struct acpi_table_hpet *hpet_tbl; 621 struct acpi_table_hpet *hpet_tbl;
645 struct resource *hpet_res; 622 struct resource *hpet_res;
646 resource_size_t res_start; 623 resource_size_t res_start;
647 624
648 if (!phys || !size) 625 hpet_tbl = (struct acpi_table_hpet *)table;
649 return -EINVAL;
650
651 hpet_tbl = (struct acpi_table_hpet *)__acpi_map_table(phys, size);
652 if (!hpet_tbl) { 626 if (!hpet_tbl) {
653 printk(KERN_WARNING PREFIX "Unable to map HPET\n"); 627 printk(KERN_WARNING PREFIX "Unable to map HPET\n");
654 return -ENODEV; 628 return -ENODEV;
655 } 629 }
656 630
657 if (hpet_tbl->addr.space_id != ACPI_SPACE_MEM) { 631 if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) {
658 printk(KERN_WARNING PREFIX "HPET timers must be located in " 632 printk(KERN_WARNING PREFIX "HPET timers must be located in "
659 "memory.\n"); 633 "memory.\n");
660 return -1; 634 return -1;
@@ -667,29 +641,28 @@ static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
667 hpet_res->name = (void *)&hpet_res[1]; 641 hpet_res->name = (void *)&hpet_res[1];
668 hpet_res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 642 hpet_res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
669 snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE, 643 snprintf((char *)hpet_res->name, HPET_RESOURCE_NAME_SIZE,
670 "HPET %u", hpet_tbl->number); 644 "HPET %u", hpet_tbl->sequence);
671 hpet_res->end = (1 * 1024) - 1; 645 hpet_res->end = (1 * 1024) - 1;
672 } 646 }
673 647
674#ifdef CONFIG_X86_64 648#ifdef CONFIG_X86_64
675 vxtime.hpet_address = hpet_tbl->addr.addrl | 649 vxtime.hpet_address = hpet_tbl->address.address;
676 ((long)hpet_tbl->addr.addrh << 32);
677 650
678 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", 651 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
679 hpet_tbl->id, vxtime.hpet_address); 652 hpet_tbl->id, vxtime.hpet_address);
680 653
681 res_start = vxtime.hpet_address; 654 res_start = vxtime.hpet_address;
682#else /* X86 */ 655#else /* X86 */
683 { 656 {
684 extern unsigned long hpet_address; 657 extern unsigned long hpet_address;
685 658
686 hpet_address = hpet_tbl->addr.addrl; 659 hpet_address = hpet_tbl->address.address;
687 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n", 660 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
688 hpet_tbl->id, hpet_address); 661 hpet_tbl->id, hpet_address);
689 662
690 res_start = hpet_address; 663 res_start = hpet_address;
691 } 664 }
692#endif /* X86 */ 665#endif /* X86 */
693 666
694 if (hpet_res) { 667 if (hpet_res) {
695 hpet_res->start = res_start; 668 hpet_res->start = res_start;
@@ -707,42 +680,28 @@ static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
707extern u32 pmtmr_ioport; 680extern u32 pmtmr_ioport;
708#endif 681#endif
709 682
710static int __init acpi_parse_fadt(unsigned long phys, unsigned long size) 683static int __init acpi_parse_fadt(struct acpi_table_header *table)
711{ 684{
712 struct fadt_descriptor *fadt = NULL;
713
714 fadt = (struct fadt_descriptor *)__acpi_map_table(phys, size);
715 if (!fadt) {
716 printk(KERN_WARNING PREFIX "Unable to map FADT\n");
717 return 0;
718 }
719 /* initialize sci_int early for INT_SRC_OVR MADT parsing */
720 acpi_fadt.sci_int = fadt->sci_int;
721
722 /* initialize rev and apic_phys_dest_mode for x86_64 genapic */
723 acpi_fadt.revision = fadt->revision;
724 acpi_fadt.force_apic_physical_destination_mode =
725 fadt->force_apic_physical_destination_mode;
726 685
727#ifdef CONFIG_X86_PM_TIMER 686#ifdef CONFIG_X86_PM_TIMER
728 /* detect the location of the ACPI PM Timer */ 687 /* detect the location of the ACPI PM Timer */
729 if (fadt->revision >= FADT2_REVISION_ID) { 688 if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) {
730 /* FADT rev. 2 */ 689 /* FADT rev. 2 */
731 if (fadt->xpm_tmr_blk.address_space_id != 690 if (acpi_gbl_FADT.xpm_timer_block.space_id !=
732 ACPI_ADR_SPACE_SYSTEM_IO) 691 ACPI_ADR_SPACE_SYSTEM_IO)
733 return 0; 692 return 0;
734 693
735 pmtmr_ioport = fadt->xpm_tmr_blk.address; 694 pmtmr_ioport = acpi_gbl_FADT.xpm_timer_block.address;
736 /* 695 /*
737 * "X" fields are optional extensions to the original V1.0 696 * "X" fields are optional extensions to the original V1.0
738 * fields, so we must selectively expand V1.0 fields if the 697 * fields, so we must selectively expand V1.0 fields if the
739 * corresponding X field is zero. 698 * corresponding X field is zero.
740 */ 699 */
741 if (!pmtmr_ioport) 700 if (!pmtmr_ioport)
742 pmtmr_ioport = fadt->V1_pm_tmr_blk; 701 pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
743 } else { 702 } else {
744 /* FADT rev. 1 */ 703 /* FADT rev. 1 */
745 pmtmr_ioport = fadt->V1_pm_tmr_blk; 704 pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
746 } 705 }
747 if (pmtmr_ioport) 706 if (pmtmr_ioport)
748 printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n", 707 printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
@@ -784,13 +743,13 @@ static int __init acpi_parse_madt_lapic_entries(void)
784 if (!cpu_has_apic) 743 if (!cpu_has_apic)
785 return -ENODEV; 744 return -ENODEV;
786 745
787 /* 746 /*
788 * Note that the LAPIC address is obtained from the MADT (32-bit value) 747 * Note that the LAPIC address is obtained from the MADT (32-bit value)
789 * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value). 748 * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
790 */ 749 */
791 750
792 count = 751 count =
793 acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, 752 acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
794 acpi_parse_lapic_addr_ovr, 0); 753 acpi_parse_lapic_addr_ovr, 0);
795 if (count < 0) { 754 if (count < 0) {
796 printk(KERN_ERR PREFIX 755 printk(KERN_ERR PREFIX
@@ -800,7 +759,7 @@ static int __init acpi_parse_madt_lapic_entries(void)
800 759
801 mp_register_lapic_address(acpi_lapic_addr); 760 mp_register_lapic_address(acpi_lapic_addr);
802 761
803 count = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic, 762 count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, acpi_parse_lapic,
804 MAX_APICS); 763 MAX_APICS);
805 if (!count) { 764 if (!count) {
806 printk(KERN_ERR PREFIX "No LAPIC entries present\n"); 765 printk(KERN_ERR PREFIX "No LAPIC entries present\n");
@@ -813,7 +772,7 @@ static int __init acpi_parse_madt_lapic_entries(void)
813 } 772 }
814 773
815 count = 774 count =
816 acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0); 775 acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0);
817 if (count < 0) { 776 if (count < 0) {
818 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); 777 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
819 /* TBD: Cleanup to allow fallback to MPS */ 778 /* TBD: Cleanup to allow fallback to MPS */
@@ -842,7 +801,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
842 return -ENODEV; 801 return -ENODEV;
843 } 802 }
844 803
845 if (!cpu_has_apic) 804 if (!cpu_has_apic)
846 return -ENODEV; 805 return -ENODEV;
847 806
848 /* 807 /*
@@ -855,7 +814,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
855 } 814 }
856 815
857 count = 816 count =
858 acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic, 817 acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic,
859 MAX_IO_APICS); 818 MAX_IO_APICS);
860 if (!count) { 819 if (!count) {
861 printk(KERN_ERR PREFIX "No IOAPIC entries present\n"); 820 printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
@@ -866,7 +825,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
866 } 825 }
867 826
868 count = 827 count =
869 acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, 828 acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr,
870 NR_IRQ_VECTORS); 829 NR_IRQ_VECTORS);
871 if (count < 0) { 830 if (count < 0) {
872 printk(KERN_ERR PREFIX 831 printk(KERN_ERR PREFIX
@@ -880,13 +839,13 @@ static int __init acpi_parse_madt_ioapic_entries(void)
880 * pretend we got one so we can set the SCI flags. 839 * pretend we got one so we can set the SCI flags.
881 */ 840 */
882 if (!acpi_sci_override_gsi) 841 if (!acpi_sci_override_gsi)
883 acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0); 842 acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0);
884 843
885 /* Fill in identity legacy mapings where no override */ 844 /* Fill in identity legacy mapings where no override */
886 mp_config_acpi_legacy_irqs(); 845 mp_config_acpi_legacy_irqs();
887 846
888 count = 847 count =
889 acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, 848 acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src,
890 NR_IRQ_VECTORS); 849 NR_IRQ_VECTORS);
891 if (count < 0) { 850 if (count < 0) {
892 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); 851 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
@@ -908,7 +867,7 @@ static void __init acpi_process_madt(void)
908#ifdef CONFIG_X86_LOCAL_APIC 867#ifdef CONFIG_X86_LOCAL_APIC
909 int count, error; 868 int count, error;
910 869
911 count = acpi_table_parse(ACPI_APIC, acpi_parse_madt); 870 count = acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt);
912 if (count >= 1) { 871 if (count >= 1) {
913 872
914 /* 873 /*
@@ -1195,7 +1154,7 @@ int __init acpi_boot_table_init(void)
1195 if (acpi_disabled && !acpi_ht) 1154 if (acpi_disabled && !acpi_ht)
1196 return 1; 1155 return 1;
1197 1156
1198 /* 1157 /*
1199 * Initialize the ACPI boot-time table parser. 1158 * Initialize the ACPI boot-time table parser.
1200 */ 1159 */
1201 error = acpi_table_init(); 1160 error = acpi_table_init();
@@ -1204,7 +1163,7 @@ int __init acpi_boot_table_init(void)
1204 return error; 1163 return error;
1205 } 1164 }
1206 1165
1207 acpi_table_parse(ACPI_BOOT, acpi_parse_sbf); 1166 acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
1208 1167
1209 /* 1168 /*
1210 * blacklist may disable ACPI entirely 1169 * blacklist may disable ACPI entirely
@@ -1232,19 +1191,19 @@ int __init acpi_boot_init(void)
1232 if (acpi_disabled && !acpi_ht) 1191 if (acpi_disabled && !acpi_ht)
1233 return 1; 1192 return 1;
1234 1193
1235 acpi_table_parse(ACPI_BOOT, acpi_parse_sbf); 1194 acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
1236 1195
1237 /* 1196 /*
1238 * set sci_int and PM timer address 1197 * set sci_int and PM timer address
1239 */ 1198 */
1240 acpi_table_parse(ACPI_FADT, acpi_parse_fadt); 1199 acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt);
1241 1200
1242 /* 1201 /*
1243 * Process the Multiple APIC Description Table (MADT), if present 1202 * Process the Multiple APIC Description Table (MADT), if present
1244 */ 1203 */
1245 acpi_process_madt(); 1204 acpi_process_madt();
1246 1205
1247 acpi_table_parse(ACPI_HPET, acpi_parse_hpet); 1206 acpi_table_parse(ACPI_SIG_HPET, acpi_parse_hpet);
1248 1207
1249 return 0; 1208 return 0;
1250} 1209}
@@ -1315,13 +1274,17 @@ static int __init setup_acpi_sci(char *s)
1315 if (!s) 1274 if (!s)
1316 return -EINVAL; 1275 return -EINVAL;
1317 if (!strcmp(s, "edge")) 1276 if (!strcmp(s, "edge"))
1318 acpi_sci_flags.trigger = 1; 1277 acpi_sci_flags = ACPI_MADT_TRIGGER_EDGE |
1278 (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
1319 else if (!strcmp(s, "level")) 1279 else if (!strcmp(s, "level"))
1320 acpi_sci_flags.trigger = 3; 1280 acpi_sci_flags = ACPI_MADT_TRIGGER_LEVEL |
1281 (acpi_sci_flags & ~ACPI_MADT_TRIGGER_MASK);
1321 else if (!strcmp(s, "high")) 1282 else if (!strcmp(s, "high"))
1322 acpi_sci_flags.polarity = 1; 1283 acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_HIGH |
1284 (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
1323 else if (!strcmp(s, "low")) 1285 else if (!strcmp(s, "low"))
1324 acpi_sci_flags.polarity = 3; 1286 acpi_sci_flags = ACPI_MADT_POLARITY_ACTIVE_LOW |
1287 (acpi_sci_flags & ~ACPI_MADT_POLARITY_MASK);
1325 else 1288 else
1326 return -EINVAL; 1289 return -EINVAL;
1327 return 0; 1290 return 0;
diff --git a/arch/i386/kernel/acpi/earlyquirk.c b/arch/i386/kernel/acpi/earlyquirk.c
index 4b60af7f91dd..bf86f7662d8b 100644
--- a/arch/i386/kernel/acpi/earlyquirk.c
+++ b/arch/i386/kernel/acpi/earlyquirk.c
@@ -16,7 +16,7 @@
16 16
17static int nvidia_hpet_detected __initdata; 17static int nvidia_hpet_detected __initdata;
18 18
19static int __init nvidia_hpet_check(unsigned long phys, unsigned long size) 19static int __init nvidia_hpet_check(struct acpi_table_header *header)
20{ 20{
21 nvidia_hpet_detected = 1; 21 nvidia_hpet_detected = 1;
22 return 0; 22 return 0;
@@ -30,7 +30,7 @@ static int __init check_bridge(int vendor, int device)
30 is enabled. */ 30 is enabled. */
31 if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) { 31 if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) {
32 nvidia_hpet_detected = 0; 32 nvidia_hpet_detected = 0;
33 acpi_table_parse(ACPI_HPET, nvidia_hpet_check); 33 acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check);
34 if (nvidia_hpet_detected == 0) { 34 if (nvidia_hpet_detected == 0) {
35 acpi_skip_timer_override = 1; 35 acpi_skip_timer_override = 1;
36 printk(KERN_INFO "Nvidia board " 36 printk(KERN_INFO "Nvidia board "
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.c b/arch/i386/kernel/cpu/cpufreq/longhaul.c
index e940e00b96c9..a3db9332d652 100644
--- a/arch/i386/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/i386/kernel/cpu/cpufreq/longhaul.c
@@ -190,7 +190,7 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index)
190 /* Invoke C3 */ 190 /* Invoke C3 */
191 inb(cx_address); 191 inb(cx_address);
192 /* Dummy op - must do something useless after P_LVL3 read */ 192 /* Dummy op - must do something useless after P_LVL3 read */
193 t = inl(acpi_fadt.xpm_tmr_blk.address); 193 t = inl(acpi_gbl_FADT.xpm_timer_block.address);
194 } 194 }
195 /* Disable bus ratio bit */ 195 /* Disable bus ratio bit */
196 local_irq_disable(); 196 local_irq_disable();
@@ -250,8 +250,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
250 outb(3, 0x22); 250 outb(3, 0x22);
251 } else if ((pr != NULL) && pr->flags.bm_control) { 251 } else if ((pr != NULL) && pr->flags.bm_control) {
252 /* Disable bus master arbitration */ 252 /* Disable bus master arbitration */
253 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1, 253 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
254 ACPI_MTX_DO_NOT_LOCK);
255 } 254 }
256 switch (longhaul_version) { 255 switch (longhaul_version) {
257 256
@@ -281,8 +280,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
281 case TYPE_POWERSAVER: 280 case TYPE_POWERSAVER:
282 if (longhaul_flags & USE_ACPI_C3) { 281 if (longhaul_flags & USE_ACPI_C3) {
283 /* Don't allow wakeup */ 282 /* Don't allow wakeup */
284 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0, 283 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
285 ACPI_MTX_DO_NOT_LOCK);
286 do_powersaver(cx->address, clock_ratio_index); 284 do_powersaver(cx->address, clock_ratio_index);
287 } else { 285 } else {
288 do_powersaver(0, clock_ratio_index); 286 do_powersaver(0, clock_ratio_index);
@@ -295,8 +293,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
295 outb(0, 0x22); 293 outb(0, 0x22);
296 } else if ((pr != NULL) && pr->flags.bm_control) { 294 } else if ((pr != NULL) && pr->flags.bm_control) {
297 /* Enable bus master arbitration */ 295 /* Enable bus master arbitration */
298 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0, 296 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
299 ACPI_MTX_DO_NOT_LOCK);
300 } 297 }
301 outb(pic2_mask,0xA1); /* restore mask */ 298 outb(pic2_mask,0xA1); /* restore mask */
302 outb(pic1_mask,0x21); 299 outb(pic1_mask,0x21);
@@ -414,7 +411,7 @@ static int __init longhaul_get_ranges(void)
414 highest_speed = calc_speed(maxmult); 411 highest_speed = calc_speed(maxmult);
415 lowest_speed = calc_speed(minmult); 412 lowest_speed = calc_speed(minmult);
416 dprintk ("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb, 413 dprintk ("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb,
417 print_speed(lowest_speed/1000), 414 print_speed(lowest_speed/1000),
418 print_speed(highest_speed/1000)); 415 print_speed(highest_speed/1000));
419 416
420 if (lowest_speed == highest_speed) { 417 if (lowest_speed == highest_speed) {
@@ -498,7 +495,7 @@ static void __init longhaul_setup_voltagescaling(void)
498 maxvid.mV/1000, maxvid.mV%1000, 495 maxvid.mV/1000, maxvid.mV%1000,
499 minvid.mV/1000, minvid.mV%1000, 496 minvid.mV/1000, minvid.mV%1000,
500 numvscales); 497 numvscales);
501 498
502 j = 0; 499 j = 0;
503 while (longhaul_table[j].frequency != CPUFREQ_TABLE_END) { 500 while (longhaul_table[j].frequency != CPUFREQ_TABLE_END) {
504 speed = longhaul_table[j].frequency; 501 speed = longhaul_table[j].frequency;
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 6a3875f81a0a..5592fa6e1fa1 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -2606,25 +2606,32 @@ static struct irq_chip msi_chip = {
2606 .retrigger = ioapic_retrigger_irq, 2606 .retrigger = ioapic_retrigger_irq,
2607}; 2607};
2608 2608
2609int arch_setup_msi_irq(unsigned int irq, struct pci_dev *dev) 2609int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
2610{ 2610{
2611 struct msi_msg msg; 2611 struct msi_msg msg;
2612 int ret; 2612 int irq, ret;
2613 irq = create_irq();
2614 if (irq < 0)
2615 return irq;
2616
2617 set_irq_msi(irq, desc);
2613 ret = msi_compose_msg(dev, irq, &msg); 2618 ret = msi_compose_msg(dev, irq, &msg);
2614 if (ret < 0) 2619 if (ret < 0) {
2620 destroy_irq(irq);
2615 return ret; 2621 return ret;
2622 }
2616 2623
2617 write_msi_msg(irq, &msg); 2624 write_msi_msg(irq, &msg);
2618 2625
2619 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, 2626 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq,
2620 "edge"); 2627 "edge");
2621 2628
2622 return 0; 2629 return irq;
2623} 2630}
2624 2631
2625void arch_teardown_msi_irq(unsigned int irq) 2632void arch_teardown_msi_irq(unsigned int irq)
2626{ 2633{
2627 return; 2634 destroy_irq(irq);
2628} 2635}
2629 2636
2630#endif /* CONFIG_PCI_MSI */ 2637#endif /* CONFIG_PCI_MSI */
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
index 49bff3596bff..4f5983c98669 100644
--- a/arch/i386/kernel/mpparse.c
+++ b/arch/i386/kernel/mpparse.c
@@ -1057,7 +1057,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
1057 static int gsi_to_irq[MAX_GSI_NUM]; 1057 static int gsi_to_irq[MAX_GSI_NUM];
1058 1058
1059 /* Don't set up the ACPI SCI because it's already set up */ 1059 /* Don't set up the ACPI SCI because it's already set up */
1060 if (acpi_fadt.sci_int == gsi) 1060 if (acpi_gbl_FADT.sci_interrupt == gsi)
1061 return gsi; 1061 return gsi;
1062 1062
1063 ioapic = mp_find_ioapic(gsi); 1063 ioapic = mp_find_ioapic(gsi);
@@ -1114,7 +1114,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
1114 /* 1114 /*
1115 * Don't assign IRQ used by ACPI SCI 1115 * Don't assign IRQ used by ACPI SCI
1116 */ 1116 */
1117 if (gsi == acpi_fadt.sci_int) 1117 if (gsi == acpi_gbl_FADT.sci_interrupt)
1118 gsi = pci_irq++; 1118 gsi = pci_irq++;
1119 gsi_to_irq[irq] = gsi; 1119 gsi_to_irq[irq] = gsi;
1120 } else { 1120 } else {
diff --git a/arch/i386/kernel/srat.c b/arch/i386/kernel/srat.c
index f7e735c077c3..2a8713ec0f9a 100644
--- a/arch/i386/kernel/srat.c
+++ b/arch/i386/kernel/srat.c
@@ -62,19 +62,19 @@ extern void * boot_ioremap(unsigned long, unsigned long);
62/* Identify CPU proximity domains */ 62/* Identify CPU proximity domains */
63static void __init parse_cpu_affinity_structure(char *p) 63static void __init parse_cpu_affinity_structure(char *p)
64{ 64{
65 struct acpi_table_processor_affinity *cpu_affinity = 65 struct acpi_srat_cpu_affinity *cpu_affinity =
66 (struct acpi_table_processor_affinity *) p; 66 (struct acpi_srat_cpu_affinity *) p;
67 67
68 if (!cpu_affinity->flags.enabled) 68 if ((cpu_affinity->flags & ACPI_SRAT_CPU_ENABLED) == 0)
69 return; /* empty entry */ 69 return; /* empty entry */
70 70
71 /* mark this node as "seen" in node bitmap */ 71 /* mark this node as "seen" in node bitmap */
72 BMAP_SET(pxm_bitmap, cpu_affinity->proximity_domain); 72 BMAP_SET(pxm_bitmap, cpu_affinity->proximity_domain_lo);
73 73
74 apicid_to_pxm[cpu_affinity->apic_id] = cpu_affinity->proximity_domain; 74 apicid_to_pxm[cpu_affinity->apic_id] = cpu_affinity->proximity_domain_lo;
75 75
76 printk("CPU 0x%02X in proximity domain 0x%02X\n", 76 printk("CPU 0x%02X in proximity domain 0x%02X\n",
77 cpu_affinity->apic_id, cpu_affinity->proximity_domain); 77 cpu_affinity->apic_id, cpu_affinity->proximity_domain_lo);
78} 78}
79 79
80/* 80/*
@@ -84,28 +84,27 @@ static void __init parse_cpu_affinity_structure(char *p)
84static void __init parse_memory_affinity_structure (char *sratp) 84static void __init parse_memory_affinity_structure (char *sratp)
85{ 85{
86 unsigned long long paddr, size; 86 unsigned long long paddr, size;
87 unsigned long start_pfn, end_pfn; 87 unsigned long start_pfn, end_pfn;
88 u8 pxm; 88 u8 pxm;
89 struct node_memory_chunk_s *p, *q, *pend; 89 struct node_memory_chunk_s *p, *q, *pend;
90 struct acpi_table_memory_affinity *memory_affinity = 90 struct acpi_srat_mem_affinity *memory_affinity =
91 (struct acpi_table_memory_affinity *) sratp; 91 (struct acpi_srat_mem_affinity *) sratp;
92 92
93 if (!memory_affinity->flags.enabled) 93 if ((memory_affinity->flags & ACPI_SRAT_MEM_ENABLED) == 0)
94 return; /* empty entry */ 94 return; /* empty entry */
95 95
96 pxm = memory_affinity->proximity_domain & 0xff;
97
96 /* mark this node as "seen" in node bitmap */ 98 /* mark this node as "seen" in node bitmap */
97 BMAP_SET(pxm_bitmap, memory_affinity->proximity_domain); 99 BMAP_SET(pxm_bitmap, pxm);
98 100
99 /* calculate info for memory chunk structure */ 101 /* calculate info for memory chunk structure */
100 paddr = memory_affinity->base_addr_hi; 102 paddr = memory_affinity->base_address;
101 paddr = (paddr << 32) | memory_affinity->base_addr_lo; 103 size = memory_affinity->length;
102 size = memory_affinity->length_hi; 104
103 size = (size << 32) | memory_affinity->length_lo;
104
105 start_pfn = paddr >> PAGE_SHIFT; 105 start_pfn = paddr >> PAGE_SHIFT;
106 end_pfn = (paddr + size) >> PAGE_SHIFT; 106 end_pfn = (paddr + size) >> PAGE_SHIFT;
107 107
108 pxm = memory_affinity->proximity_domain;
109 108
110 if (num_memory_chunks >= MAXCHUNKS) { 109 if (num_memory_chunks >= MAXCHUNKS) {
111 printk("Too many mem chunks in SRAT. Ignoring %lld MBytes at %llx\n", 110 printk("Too many mem chunks in SRAT. Ignoring %lld MBytes at %llx\n",
@@ -132,8 +131,8 @@ static void __init parse_memory_affinity_structure (char *sratp)
132 printk("Memory range 0x%lX to 0x%lX (type 0x%X) in proximity domain 0x%02X %s\n", 131 printk("Memory range 0x%lX to 0x%lX (type 0x%X) in proximity domain 0x%02X %s\n",
133 start_pfn, end_pfn, 132 start_pfn, end_pfn,
134 memory_affinity->memory_type, 133 memory_affinity->memory_type,
135 memory_affinity->proximity_domain, 134 pxm,
136 (memory_affinity->flags.hot_pluggable ? 135 ((memory_affinity->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) ?
137 "enabled and removable" : "enabled" ) ); 136 "enabled and removable" : "enabled" ) );
138} 137}
139 138
@@ -185,10 +184,10 @@ static int __init acpi20_parse_srat(struct acpi_table_srat *sratp)
185 num_memory_chunks = 0; 184 num_memory_chunks = 0;
186 while (p < end) { 185 while (p < end) {
187 switch (*p) { 186 switch (*p) {
188 case ACPI_SRAT_PROCESSOR_AFFINITY: 187 case ACPI_SRAT_TYPE_CPU_AFFINITY:
189 parse_cpu_affinity_structure(p); 188 parse_cpu_affinity_structure(p);
190 break; 189 break;
191 case ACPI_SRAT_MEMORY_AFFINITY: 190 case ACPI_SRAT_TYPE_MEMORY_AFFINITY:
192 parse_memory_affinity_structure(p); 191 parse_memory_affinity_structure(p);
193 break; 192 break;
194 default: 193 default:
@@ -262,31 +261,30 @@ out_fail:
262 return 0; 261 return 0;
263} 262}
264 263
264struct acpi_static_rsdt {
265 struct acpi_table_rsdt table;
266 u32 padding[7]; /* Allow for 7 more table entries */
267};
268
265int __init get_memcfg_from_srat(void) 269int __init get_memcfg_from_srat(void)
266{ 270{
267 struct acpi_table_header *header = NULL; 271 struct acpi_table_header *header = NULL;
268 struct acpi_table_rsdp *rsdp = NULL; 272 struct acpi_table_rsdp *rsdp = NULL;
269 struct acpi_table_rsdt *rsdt = NULL; 273 struct acpi_table_rsdt *rsdt = NULL;
270 struct acpi_pointer *rsdp_address = NULL; 274 acpi_native_uint rsdp_address = 0;
271 struct acpi_table_rsdt saved_rsdt; 275 struct acpi_static_rsdt saved_rsdt;
272 int tables = 0; 276 int tables = 0;
273 int i = 0; 277 int i = 0;
274 278
275 if (ACPI_FAILURE(acpi_find_root_pointer(ACPI_PHYSICAL_ADDRESSING, 279 rsdp_address = acpi_find_rsdp();
276 rsdp_address))) { 280 if (!rsdp_address) {
277 printk("%s: System description tables not found\n", 281 printk("%s: System description tables not found\n",
278 __FUNCTION__); 282 __FUNCTION__);
279 goto out_err; 283 goto out_err;
280 } 284 }
281 285
282 if (rsdp_address->pointer_type == ACPI_PHYSICAL_POINTER) { 286 printk("%s: assigning address to rsdp\n", __FUNCTION__);
283 printk("%s: assigning address to rsdp\n", __FUNCTION__); 287 rsdp = (struct acpi_table_rsdp *)(u32)rsdp_address;
284 rsdp = (struct acpi_table_rsdp *)
285 (u32)rsdp_address->pointer.physical;
286 } else {
287 printk("%s: rsdp_address is not a physical pointer\n", __FUNCTION__);
288 goto out_err;
289 }
290 if (!rsdp) { 288 if (!rsdp) {
291 printk("%s: Didn't find ACPI root!\n", __FUNCTION__); 289 printk("%s: Didn't find ACPI root!\n", __FUNCTION__);
292 goto out_err; 290 goto out_err;
@@ -295,13 +293,13 @@ int __init get_memcfg_from_srat(void)
295 printk(KERN_INFO "%.8s v%d [%.6s]\n", rsdp->signature, rsdp->revision, 293 printk(KERN_INFO "%.8s v%d [%.6s]\n", rsdp->signature, rsdp->revision,
296 rsdp->oem_id); 294 rsdp->oem_id);
297 295
298 if (strncmp(rsdp->signature, RSDP_SIG,strlen(RSDP_SIG))) { 296 if (strncmp(rsdp->signature, ACPI_SIG_RSDP,strlen(ACPI_SIG_RSDP))) {
299 printk(KERN_WARNING "%s: RSDP table signature incorrect\n", __FUNCTION__); 297 printk(KERN_WARNING "%s: RSDP table signature incorrect\n", __FUNCTION__);
300 goto out_err; 298 goto out_err;
301 } 299 }
302 300
303 rsdt = (struct acpi_table_rsdt *) 301 rsdt = (struct acpi_table_rsdt *)
304 boot_ioremap(rsdp->rsdt_address, sizeof(struct acpi_table_rsdt)); 302 boot_ioremap(rsdp->rsdt_physical_address, sizeof(struct acpi_table_rsdt));
305 303
306 if (!rsdt) { 304 if (!rsdt) {
307 printk(KERN_WARNING 305 printk(KERN_WARNING
@@ -310,9 +308,9 @@ int __init get_memcfg_from_srat(void)
310 goto out_err; 308 goto out_err;
311 } 309 }
312 310
313 header = & rsdt->header; 311 header = &rsdt->header;
314 312
315 if (strncmp(header->signature, RSDT_SIG, strlen(RSDT_SIG))) { 313 if (strncmp(header->signature, ACPI_SIG_RSDT, strlen(ACPI_SIG_RSDT))) {
316 printk(KERN_WARNING "ACPI: RSDT signature incorrect\n"); 314 printk(KERN_WARNING "ACPI: RSDT signature incorrect\n");
317 goto out_err; 315 goto out_err;
318 } 316 }
@@ -330,9 +328,9 @@ int __init get_memcfg_from_srat(void)
330 328
331 memcpy(&saved_rsdt, rsdt, sizeof(saved_rsdt)); 329 memcpy(&saved_rsdt, rsdt, sizeof(saved_rsdt));
332 330
333 if (saved_rsdt.header.length > sizeof(saved_rsdt)) { 331 if (saved_rsdt.table.header.length > sizeof(saved_rsdt)) {
334 printk(KERN_WARNING "ACPI: Too big length in RSDT: %d\n", 332 printk(KERN_WARNING "ACPI: Too big length in RSDT: %d\n",
335 saved_rsdt.header.length); 333 saved_rsdt.table.header.length);
336 goto out_err; 334 goto out_err;
337 } 335 }
338 336
@@ -341,15 +339,15 @@ int __init get_memcfg_from_srat(void)
341 for (i = 0; i < tables; i++) { 339 for (i = 0; i < tables; i++) {
342 /* Map in header, then map in full table length. */ 340 /* Map in header, then map in full table length. */
343 header = (struct acpi_table_header *) 341 header = (struct acpi_table_header *)
344 boot_ioremap(saved_rsdt.entry[i], sizeof(struct acpi_table_header)); 342 boot_ioremap(saved_rsdt.table.table_offset_entry[i], sizeof(struct acpi_table_header));
345 if (!header) 343 if (!header)
346 break; 344 break;
347 header = (struct acpi_table_header *) 345 header = (struct acpi_table_header *)
348 boot_ioremap(saved_rsdt.entry[i], header->length); 346 boot_ioremap(saved_rsdt.table.table_offset_entry[i], header->length);
349 if (!header) 347 if (!header)
350 break; 348 break;
351 349
352 if (strncmp((char *) &header->signature, "SRAT", 4)) 350 if (strncmp((char *) &header->signature, ACPI_SIG_SRAT, 4))
353 continue; 351 continue;
354 352
355 /* we've found the srat table. don't need to look at any more tables */ 353 /* we've found the srat table. don't need to look at any more tables */
diff --git a/arch/i386/mach-es7000/es7000.h b/arch/i386/mach-es7000/es7000.h
index 80566ca4a80a..c8d5aa132fa0 100644
--- a/arch/i386/mach-es7000/es7000.h
+++ b/arch/i386/mach-es7000/es7000.h
@@ -84,15 +84,6 @@ struct es7000_oem_table {
84}; 84};
85 85
86#ifdef CONFIG_ACPI 86#ifdef CONFIG_ACPI
87struct acpi_table_sdt {
88 unsigned long pa;
89 unsigned long count;
90 struct {
91 unsigned long pa;
92 enum acpi_table_id id;
93 unsigned long size;
94 } entry[50];
95};
96 87
97struct oem_table { 88struct oem_table {
98 struct acpi_table_header Header; 89 struct acpi_table_header Header;
diff --git a/arch/i386/mach-es7000/es7000plat.c b/arch/i386/mach-es7000/es7000plat.c
index 3d0fc853516d..9be6ceabf042 100644
--- a/arch/i386/mach-es7000/es7000plat.c
+++ b/arch/i386/mach-es7000/es7000plat.c
@@ -160,51 +160,14 @@ parse_unisys_oem (char *oemptr)
160int __init 160int __init
161find_unisys_acpi_oem_table(unsigned long *oem_addr) 161find_unisys_acpi_oem_table(unsigned long *oem_addr)
162{ 162{
163 struct acpi_table_rsdp *rsdp = NULL; 163 struct acpi_table_header *header = NULL;
164 unsigned long rsdp_phys = 0; 164 int i = 0;
165 struct acpi_table_header *header = NULL; 165 while (ACPI_SUCCESS(acpi_get_table("OEM1", i++, &header))) {
166 int i; 166 if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) {
167 struct acpi_table_sdt sdt; 167 struct oem_table *t = (struct oem_table *)header;
168 168 *oem_addr = (unsigned long)__acpi_map_table(t->OEMTableAddr,
169 rsdp_phys = acpi_find_rsdp(); 169 t->OEMTableSize);
170 rsdp = __va(rsdp_phys); 170 return 0;
171 if (rsdp->rsdt_address) {
172 struct acpi_table_rsdt *mapped_rsdt = NULL;
173 sdt.pa = rsdp->rsdt_address;
174
175 header = (struct acpi_table_header *)
176 __acpi_map_table(sdt.pa, sizeof(struct acpi_table_header));
177 if (!header)
178 return -ENODEV;
179
180 sdt.count = (header->length - sizeof(struct acpi_table_header)) >> 3;
181 mapped_rsdt = (struct acpi_table_rsdt *)
182 __acpi_map_table(sdt.pa, header->length);
183 if (!mapped_rsdt)
184 return -ENODEV;
185
186 header = &mapped_rsdt->header;
187
188 for (i = 0; i < sdt.count; i++)
189 sdt.entry[i].pa = (unsigned long) mapped_rsdt->entry[i];
190 };
191 for (i = 0; i < sdt.count; i++) {
192
193 header = (struct acpi_table_header *)
194 __acpi_map_table(sdt.entry[i].pa,
195 sizeof(struct acpi_table_header));
196 if (!header)
197 continue;
198 if (!strncmp((char *) &header->signature, "OEM1", 4)) {
199 if (!strncmp((char *) &header->oem_id, "UNISYS", 6)) {
200 void *addr;
201 struct oem_table *t;
202 acpi_table_print(header, sdt.entry[i].pa);
203 t = (struct oem_table *) __acpi_map_table(sdt.entry[i].pa, header->length);
204 addr = (void *) __acpi_map_table(t->OEMTableAddr, t->OEMTableSize);
205 *oem_addr = (unsigned long) addr;
206 return 0;
207 }
208 } 171 }
209 } 172 }
210 return -1; 173 return -1;
diff --git a/arch/i386/pci/mmconfig.c b/arch/i386/pci/mmconfig.c
index e2616a266e13..5700220dcf5f 100644
--- a/arch/i386/pci/mmconfig.c
+++ b/arch/i386/pci/mmconfig.c
@@ -36,7 +36,7 @@ static DECLARE_BITMAP(fallback_slots, MAX_CHECK_BUS*32);
36static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn) 36static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
37{ 37{
38 int cfg_num = -1; 38 int cfg_num = -1;
39 struct acpi_table_mcfg_config *cfg; 39 struct acpi_mcfg_allocation *cfg;
40 40
41 if (seg == 0 && bus < MAX_CHECK_BUS && 41 if (seg == 0 && bus < MAX_CHECK_BUS &&
42 test_bit(PCI_SLOT(devfn) + 32*bus, fallback_slots)) 42 test_bit(PCI_SLOT(devfn) + 32*bus, fallback_slots))
@@ -48,11 +48,11 @@ static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
48 break; 48 break;
49 } 49 }
50 cfg = &pci_mmcfg_config[cfg_num]; 50 cfg = &pci_mmcfg_config[cfg_num];
51 if (cfg->pci_segment_group_number != seg) 51 if (cfg->pci_segment != seg)
52 continue; 52 continue;
53 if ((cfg->start_bus_number <= bus) && 53 if ((cfg->start_bus_number <= bus) &&
54 (cfg->end_bus_number >= bus)) 54 (cfg->end_bus_number >= bus))
55 return cfg->base_address; 55 return cfg->address;
56 } 56 }
57 57
58 /* Handle more broken MCFG tables on Asus etc. 58 /* Handle more broken MCFG tables on Asus etc.
@@ -60,9 +60,9 @@ static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
60 this applies to all busses. */ 60 this applies to all busses. */
61 cfg = &pci_mmcfg_config[0]; 61 cfg = &pci_mmcfg_config[0];
62 if (pci_mmcfg_config_num == 1 && 62 if (pci_mmcfg_config_num == 1 &&
63 cfg->pci_segment_group_number == 0 && 63 cfg->pci_segment == 0 &&
64 (cfg->start_bus_number | cfg->end_bus_number) == 0) 64 (cfg->start_bus_number | cfg->end_bus_number) == 0)
65 return cfg->base_address; 65 return cfg->address;
66 66
67 /* Fall back to type 0 */ 67 /* Fall back to type 0 */
68 return 0; 68 return 0;
@@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
125 unsigned long flags; 125 unsigned long flags;
126 u32 base; 126 u32 base;
127 127
128 if ((bus > 255) || (devfn > 255) || (reg > 4095)) 128 if ((bus > 255) || (devfn > 255) || (reg > 4095))
129 return -EINVAL; 129 return -EINVAL;
130 130
131 base = get_base_addr(seg, bus, devfn); 131 base = get_base_addr(seg, bus, devfn);
@@ -199,19 +199,19 @@ void __init pci_mmcfg_init(int type)
199 if ((pci_probe & PCI_PROBE_MMCONF) == 0) 199 if ((pci_probe & PCI_PROBE_MMCONF) == 0)
200 return; 200 return;
201 201
202 acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg); 202 acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg);
203 if ((pci_mmcfg_config_num == 0) || 203 if ((pci_mmcfg_config_num == 0) ||
204 (pci_mmcfg_config == NULL) || 204 (pci_mmcfg_config == NULL) ||
205 (pci_mmcfg_config[0].base_address == 0)) 205 (pci_mmcfg_config[0].address == 0))
206 return; 206 return;
207 207
208 /* Only do this check when type 1 works. If it doesn't work 208 /* Only do this check when type 1 works. If it doesn't work
209 assume we run on a Mac and always use MCFG */ 209 assume we run on a Mac and always use MCFG */
210 if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].base_address, 210 if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].address,
211 pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN, 211 pci_mmcfg_config[0].address + MMCONFIG_APER_MIN,
212 E820_RESERVED)) { 212 E820_RESERVED)) {
213 printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n", 213 printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %lx is not E820-reserved\n",
214 pci_mmcfg_config[0].base_address); 214 (unsigned long)pci_mmcfg_config[0].address);
215 printk(KERN_ERR "PCI: Not using MMCONFIG.\n"); 215 printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
216 return; 216 return;
217 } 217 }
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index fcacfe291b9b..f1d2899e9a62 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -11,6 +11,8 @@ menu "Processor type and features"
11 11
12config IA64 12config IA64
13 bool 13 bool
14 select PCI if (!IA64_HP_SIM)
15 select ACPI if (!IA64_HP_SIM)
14 default y 16 default y
15 help 17 help
16 The Itanium Processor Family is Intel's 64-bit successor to 18 The Itanium Processor Family is Intel's 64-bit successor to
@@ -28,7 +30,6 @@ config MMU
28 30
29config SWIOTLB 31config SWIOTLB
30 bool 32 bool
31 default y
32 33
33config RWSEM_XCHGADD_ALGORITHM 34config RWSEM_XCHGADD_ALGORITHM
34 bool 35 bool
@@ -84,10 +85,9 @@ choice
84 85
85config IA64_GENERIC 86config IA64_GENERIC
86 bool "generic" 87 bool "generic"
87 select ACPI
88 select PCI
89 select NUMA 88 select NUMA
90 select ACPI_NUMA 89 select ACPI_NUMA
90 select SWIOTLB
91 help 91 help
92 This selects the system type of your hardware. A "generic" kernel 92 This selects the system type of your hardware. A "generic" kernel
93 will run on any supported IA-64 system. However, if you configure 93 will run on any supported IA-64 system. However, if you configure
@@ -104,6 +104,7 @@ config IA64_GENERIC
104 104
105config IA64_DIG 105config IA64_DIG
106 bool "DIG-compliant" 106 bool "DIG-compliant"
107 select SWIOTLB
107 108
108config IA64_HP_ZX1 109config IA64_HP_ZX1
109 bool "HP-zx1/sx1000" 110 bool "HP-zx1/sx1000"
@@ -113,6 +114,7 @@ config IA64_HP_ZX1
113 114
114config IA64_HP_ZX1_SWIOTLB 115config IA64_HP_ZX1_SWIOTLB
115 bool "HP-zx1/sx1000 with software I/O TLB" 116 bool "HP-zx1/sx1000 with software I/O TLB"
117 select SWIOTLB
116 help 118 help
117 Build a kernel that runs on HP zx1 and sx1000 systems even when they 119 Build a kernel that runs on HP zx1 and sx1000 systems even when they
118 have broken PCI devices which cannot DMA to full 32 bits. Apart 120 have broken PCI devices which cannot DMA to full 32 bits. Apart
@@ -131,6 +133,7 @@ config IA64_SGI_SN2
131 133
132config IA64_HP_SIM 134config IA64_HP_SIM
133 bool "Ski-simulator" 135 bool "Ski-simulator"
136 select SWIOTLB
134 137
135endchoice 138endchoice
136 139
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
index a5a5637507be..2153bcacbe6c 100644
--- a/arch/ia64/hp/common/hwsw_iommu.c
+++ b/arch/ia64/hp/common/hwsw_iommu.c
@@ -192,3 +192,7 @@ EXPORT_SYMBOL(hwsw_unmap_sg);
192EXPORT_SYMBOL(hwsw_dma_supported); 192EXPORT_SYMBOL(hwsw_dma_supported);
193EXPORT_SYMBOL(hwsw_alloc_coherent); 193EXPORT_SYMBOL(hwsw_alloc_coherent);
194EXPORT_SYMBOL(hwsw_free_coherent); 194EXPORT_SYMBOL(hwsw_free_coherent);
195EXPORT_SYMBOL(hwsw_sync_single_for_cpu);
196EXPORT_SYMBOL(hwsw_sync_single_for_device);
197EXPORT_SYMBOL(hwsw_sync_sg_for_cpu);
198EXPORT_SYMBOL(hwsw_sync_sg_for_device);
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 29f05d4b68cd..9197d7b361b3 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -55,7 +55,7 @@
55 55
56#define BAD_MADT_ENTRY(entry, end) ( \ 56#define BAD_MADT_ENTRY(entry, end) ( \
57 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ 57 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
58 ((acpi_table_entry_header *)entry)->length < sizeof(*entry)) 58 ((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
59 59
60#define PREFIX "ACPI: " 60#define PREFIX "ACPI: "
61 61
@@ -67,16 +67,11 @@ EXPORT_SYMBOL(pm_power_off);
67unsigned int acpi_cpei_override; 67unsigned int acpi_cpei_override;
68unsigned int acpi_cpei_phys_cpuid; 68unsigned int acpi_cpei_phys_cpuid;
69 69
70#define MAX_SAPICS 256
71u16 ia64_acpiid_to_sapicid[MAX_SAPICS] = {[0 ... MAX_SAPICS - 1] = -1 };
72
73EXPORT_SYMBOL(ia64_acpiid_to_sapicid);
74
75const char *acpi_get_sysname(void) 70const char *acpi_get_sysname(void)
76{ 71{
77#ifdef CONFIG_IA64_GENERIC 72#ifdef CONFIG_IA64_GENERIC
78 unsigned long rsdp_phys; 73 unsigned long rsdp_phys;
79 struct acpi20_table_rsdp *rsdp; 74 struct acpi_table_rsdp *rsdp;
80 struct acpi_table_xsdt *xsdt; 75 struct acpi_table_xsdt *xsdt;
81 struct acpi_table_header *hdr; 76 struct acpi_table_header *hdr;
82 77
@@ -87,16 +82,16 @@ const char *acpi_get_sysname(void)
87 return "dig"; 82 return "dig";
88 } 83 }
89 84
90 rsdp = (struct acpi20_table_rsdp *)__va(rsdp_phys); 85 rsdp = (struct acpi_table_rsdp *)__va(rsdp_phys);
91 if (strncmp(rsdp->signature, RSDP_SIG, sizeof(RSDP_SIG) - 1)) { 86 if (strncmp(rsdp->signature, ACPI_SIG_RSDP, sizeof(ACPI_SIG_RSDP) - 1)) {
92 printk(KERN_ERR 87 printk(KERN_ERR
93 "ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n"); 88 "ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n");
94 return "dig"; 89 return "dig";
95 } 90 }
96 91
97 xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_address); 92 xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_physical_address);
98 hdr = &xsdt->header; 93 hdr = &xsdt->header;
99 if (strncmp(hdr->signature, XSDT_SIG, sizeof(XSDT_SIG) - 1)) { 94 if (strncmp(hdr->signature, ACPI_SIG_XSDT, sizeof(ACPI_SIG_XSDT) - 1)) {
100 printk(KERN_ERR 95 printk(KERN_ERR
101 "ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n"); 96 "ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n");
102 return "dig"; 97 return "dig";
@@ -169,12 +164,12 @@ struct acpi_table_madt *acpi_madt __initdata;
169static u8 has_8259; 164static u8 has_8259;
170 165
171static int __init 166static int __init
172acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header, 167acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
173 const unsigned long end) 168 const unsigned long end)
174{ 169{
175 struct acpi_table_lapic_addr_ovr *lapic; 170 struct acpi_madt_local_apic_override *lapic;
176 171
177 lapic = (struct acpi_table_lapic_addr_ovr *)header; 172 lapic = (struct acpi_madt_local_apic_override *)header;
178 173
179 if (BAD_MADT_ENTRY(lapic, end)) 174 if (BAD_MADT_ENTRY(lapic, end))
180 return -EINVAL; 175 return -EINVAL;
@@ -187,22 +182,19 @@ acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
187} 182}
188 183
189static int __init 184static int __init
190acpi_parse_lsapic(acpi_table_entry_header * header, const unsigned long end) 185acpi_parse_lsapic(struct acpi_subtable_header * header, const unsigned long end)
191{ 186{
192 struct acpi_table_lsapic *lsapic; 187 struct acpi_madt_local_sapic *lsapic;
193 188
194 lsapic = (struct acpi_table_lsapic *)header; 189 lsapic = (struct acpi_madt_local_sapic *)header;
195 190
196 if (BAD_MADT_ENTRY(lsapic, end)) 191 /*Skip BAD_MADT_ENTRY check, as lsapic size could vary */
197 return -EINVAL;
198 192
199 if (lsapic->flags.enabled) { 193 if (lsapic->lapic_flags & ACPI_MADT_ENABLED) {
200#ifdef CONFIG_SMP 194#ifdef CONFIG_SMP
201 smp_boot_data.cpu_phys_id[available_cpus] = 195 smp_boot_data.cpu_phys_id[available_cpus] =
202 (lsapic->id << 8) | lsapic->eid; 196 (lsapic->id << 8) | lsapic->eid;
203#endif 197#endif
204 ia64_acpiid_to_sapicid[lsapic->acpi_id] =
205 (lsapic->id << 8) | lsapic->eid;
206 ++available_cpus; 198 ++available_cpus;
207 } 199 }
208 200
@@ -211,11 +203,11 @@ acpi_parse_lsapic(acpi_table_entry_header * header, const unsigned long end)
211} 203}
212 204
213static int __init 205static int __init
214acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end) 206acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end)
215{ 207{
216 struct acpi_table_lapic_nmi *lacpi_nmi; 208 struct acpi_madt_local_apic_nmi *lacpi_nmi;
217 209
218 lacpi_nmi = (struct acpi_table_lapic_nmi *)header; 210 lacpi_nmi = (struct acpi_madt_local_apic_nmi *)header;
219 211
220 if (BAD_MADT_ENTRY(lacpi_nmi, end)) 212 if (BAD_MADT_ENTRY(lacpi_nmi, end))
221 return -EINVAL; 213 return -EINVAL;
@@ -225,11 +217,11 @@ acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
225} 217}
226 218
227static int __init 219static int __init
228acpi_parse_iosapic(acpi_table_entry_header * header, const unsigned long end) 220acpi_parse_iosapic(struct acpi_subtable_header * header, const unsigned long end)
229{ 221{
230 struct acpi_table_iosapic *iosapic; 222 struct acpi_madt_io_sapic *iosapic;
231 223
232 iosapic = (struct acpi_table_iosapic *)header; 224 iosapic = (struct acpi_madt_io_sapic *)header;
233 225
234 if (BAD_MADT_ENTRY(iosapic, end)) 226 if (BAD_MADT_ENTRY(iosapic, end))
235 return -EINVAL; 227 return -EINVAL;
@@ -240,13 +232,13 @@ acpi_parse_iosapic(acpi_table_entry_header * header, const unsigned long end)
240static unsigned int __initdata acpi_madt_rev; 232static unsigned int __initdata acpi_madt_rev;
241 233
242static int __init 234static int __init
243acpi_parse_plat_int_src(acpi_table_entry_header * header, 235acpi_parse_plat_int_src(struct acpi_subtable_header * header,
244 const unsigned long end) 236 const unsigned long end)
245{ 237{
246 struct acpi_table_plat_int_src *plintsrc; 238 struct acpi_madt_interrupt_source *plintsrc;
247 int vector; 239 int vector;
248 240
249 plintsrc = (struct acpi_table_plat_int_src *)header; 241 plintsrc = (struct acpi_madt_interrupt_source *)header;
250 242
251 if (BAD_MADT_ENTRY(plintsrc, end)) 243 if (BAD_MADT_ENTRY(plintsrc, end))
252 return -EINVAL; 244 return -EINVAL;
@@ -257,19 +249,19 @@ acpi_parse_plat_int_src(acpi_table_entry_header * header,
257 */ 249 */
258 vector = iosapic_register_platform_intr(plintsrc->type, 250 vector = iosapic_register_platform_intr(plintsrc->type,
259 plintsrc->global_irq, 251 plintsrc->global_irq,
260 plintsrc->iosapic_vector, 252 plintsrc->io_sapic_vector,
261 plintsrc->eid, 253 plintsrc->eid,
262 plintsrc->id, 254 plintsrc->id,
263 (plintsrc->flags.polarity == 255 ((plintsrc->inti_flags & ACPI_MADT_POLARITY_MASK) ==
264 1) ? IOSAPIC_POL_HIGH : 256 ACPI_MADT_POLARITY_ACTIVE_HIGH) ?
265 IOSAPIC_POL_LOW, 257 IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
266 (plintsrc->flags.trigger == 258 ((plintsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) ==
267 1) ? IOSAPIC_EDGE : 259 ACPI_MADT_TRIGGER_EDGE) ?
268 IOSAPIC_LEVEL); 260 IOSAPIC_EDGE : IOSAPIC_LEVEL);
269 261
270 platform_intr_list[plintsrc->type] = vector; 262 platform_intr_list[plintsrc->type] = vector;
271 if (acpi_madt_rev > 1) { 263 if (acpi_madt_rev > 1) {
272 acpi_cpei_override = plintsrc->plint_flags.cpei_override_flag; 264 acpi_cpei_override = plintsrc->flags & ACPI_MADT_CPEI_OVERRIDE;
273 } 265 }
274 266
275 /* 267 /*
@@ -324,30 +316,32 @@ unsigned int get_cpei_target_cpu(void)
324} 316}
325 317
326static int __init 318static int __init
327acpi_parse_int_src_ovr(acpi_table_entry_header * header, 319acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
328 const unsigned long end) 320 const unsigned long end)
329{ 321{
330 struct acpi_table_int_src_ovr *p; 322 struct acpi_madt_interrupt_override *p;
331 323
332 p = (struct acpi_table_int_src_ovr *)header; 324 p = (struct acpi_madt_interrupt_override *)header;
333 325
334 if (BAD_MADT_ENTRY(p, end)) 326 if (BAD_MADT_ENTRY(p, end))
335 return -EINVAL; 327 return -EINVAL;
336 328
337 iosapic_override_isa_irq(p->bus_irq, p->global_irq, 329 iosapic_override_isa_irq(p->source_irq, p->global_irq,
338 (p->flags.polarity == 330 ((p->inti_flags & ACPI_MADT_POLARITY_MASK) ==
339 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, 331 ACPI_MADT_POLARITY_ACTIVE_HIGH) ?
340 (p->flags.trigger == 332 IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
341 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); 333 ((p->inti_flags & ACPI_MADT_TRIGGER_MASK) ==
334 ACPI_MADT_TRIGGER_EDGE) ?
335 IOSAPIC_EDGE : IOSAPIC_LEVEL);
342 return 0; 336 return 0;
343} 337}
344 338
345static int __init 339static int __init
346acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end) 340acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end)
347{ 341{
348 struct acpi_table_nmi_src *nmi_src; 342 struct acpi_madt_nmi_source *nmi_src;
349 343
350 nmi_src = (struct acpi_table_nmi_src *)header; 344 nmi_src = (struct acpi_madt_nmi_source *)header;
351 345
352 if (BAD_MADT_ENTRY(nmi_src, end)) 346 if (BAD_MADT_ENTRY(nmi_src, end))
353 return -EINVAL; 347 return -EINVAL;
@@ -371,12 +365,12 @@ static void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
371 } 365 }
372} 366}
373 367
374static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size) 368static int __init acpi_parse_madt(struct acpi_table_header *table)
375{ 369{
376 if (!phys_addr || !size) 370 if (!table)
377 return -EINVAL; 371 return -EINVAL;
378 372
379 acpi_madt = (struct acpi_table_madt *)__va(phys_addr); 373 acpi_madt = (struct acpi_table_madt *)table;
380 374
381 acpi_madt_rev = acpi_madt->header.revision; 375 acpi_madt_rev = acpi_madt->header.revision;
382 376
@@ -384,14 +378,14 @@ static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
384#ifdef CONFIG_ITANIUM 378#ifdef CONFIG_ITANIUM
385 has_8259 = 1; /* Firmware on old Itanium systems is broken */ 379 has_8259 = 1; /* Firmware on old Itanium systems is broken */
386#else 380#else
387 has_8259 = acpi_madt->flags.pcat_compat; 381 has_8259 = acpi_madt->flags & ACPI_MADT_PCAT_COMPAT;
388#endif 382#endif
389 iosapic_system_init(has_8259); 383 iosapic_system_init(has_8259);
390 384
391 /* Get base address of IPI Message Block */ 385 /* Get base address of IPI Message Block */
392 386
393 if (acpi_madt->lapic_address) 387 if (acpi_madt->address)
394 ipi_base_addr = ioremap(acpi_madt->lapic_address, 0); 388 ipi_base_addr = ioremap(acpi_madt->address, 0);
395 389
396 printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr); 390 printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr);
397 391
@@ -413,23 +407,24 @@ static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
413#define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag)) 407#define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag))
414static struct acpi_table_slit __initdata *slit_table; 408static struct acpi_table_slit __initdata *slit_table;
415 409
416static int get_processor_proximity_domain(struct acpi_table_processor_affinity *pa) 410static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
417{ 411{
418 int pxm; 412 int pxm;
419 413
420 pxm = pa->proximity_domain; 414 pxm = pa->proximity_domain_lo;
421 if (ia64_platform_is("sn2")) 415 if (ia64_platform_is("sn2"))
422 pxm += pa->reserved[0] << 8; 416 pxm += pa->proximity_domain_hi[0] << 8;
423 return pxm; 417 return pxm;
424} 418}
425 419
426static int get_memory_proximity_domain(struct acpi_table_memory_affinity *ma) 420static int get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
427{ 421{
428 int pxm; 422 int pxm;
429 423
430 pxm = ma->proximity_domain; 424 pxm = ma->proximity_domain;
431 if (ia64_platform_is("sn2")) 425 if (!ia64_platform_is("sn2"))
432 pxm += ma->reserved1[0] << 8; 426 pxm &= 0xff;
427
433 return pxm; 428 return pxm;
434} 429}
435 430
@@ -442,7 +437,7 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
442 u32 len; 437 u32 len;
443 438
444 len = sizeof(struct acpi_table_header) + 8 439 len = sizeof(struct acpi_table_header) + 8
445 + slit->localities * slit->localities; 440 + slit->locality_count * slit->locality_count;
446 if (slit->header.length != len) { 441 if (slit->header.length != len) {
447 printk(KERN_ERR 442 printk(KERN_ERR
448 "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n", 443 "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n",
@@ -454,11 +449,11 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
454} 449}
455 450
456void __init 451void __init
457acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa) 452acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
458{ 453{
459 int pxm; 454 int pxm;
460 455
461 if (!pa->flags.enabled) 456 if (!(pa->flags & ACPI_SRAT_CPU_ENABLED))
462 return; 457 return;
463 458
464 pxm = get_processor_proximity_domain(pa); 459 pxm = get_processor_proximity_domain(pa);
@@ -467,14 +462,14 @@ acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
467 pxm_bit_set(pxm); 462 pxm_bit_set(pxm);
468 463
469 node_cpuid[srat_num_cpus].phys_id = 464 node_cpuid[srat_num_cpus].phys_id =
470 (pa->apic_id << 8) | (pa->lsapic_eid); 465 (pa->apic_id << 8) | (pa->local_sapic_eid);
471 /* nid should be overridden as logical node id later */ 466 /* nid should be overridden as logical node id later */
472 node_cpuid[srat_num_cpus].nid = pxm; 467 node_cpuid[srat_num_cpus].nid = pxm;
473 srat_num_cpus++; 468 srat_num_cpus++;
474} 469}
475 470
476void __init 471void __init
477acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma) 472acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
478{ 473{
479 unsigned long paddr, size; 474 unsigned long paddr, size;
480 int pxm; 475 int pxm;
@@ -483,13 +478,11 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
483 pxm = get_memory_proximity_domain(ma); 478 pxm = get_memory_proximity_domain(ma);
484 479
485 /* fill node memory chunk structure */ 480 /* fill node memory chunk structure */
486 paddr = ma->base_addr_hi; 481 paddr = ma->base_address;
487 paddr = (paddr << 32) | ma->base_addr_lo; 482 size = ma->length;
488 size = ma->length_hi;
489 size = (size << 32) | ma->length_lo;
490 483
491 /* Ignore disabled entries */ 484 /* Ignore disabled entries */
492 if (!ma->flags.enabled) 485 if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
493 return; 486 return;
494 487
495 /* record this node in proximity bitmap */ 488 /* record this node in proximity bitmap */
@@ -560,16 +553,16 @@ void __init acpi_numa_arch_fixup(void)
560 if (!slit_table) 553 if (!slit_table)
561 return; 554 return;
562 memset(numa_slit, -1, sizeof(numa_slit)); 555 memset(numa_slit, -1, sizeof(numa_slit));
563 for (i = 0; i < slit_table->localities; i++) { 556 for (i = 0; i < slit_table->locality_count; i++) {
564 if (!pxm_bit_test(i)) 557 if (!pxm_bit_test(i))
565 continue; 558 continue;
566 node_from = pxm_to_node(i); 559 node_from = pxm_to_node(i);
567 for (j = 0; j < slit_table->localities; j++) { 560 for (j = 0; j < slit_table->locality_count; j++) {
568 if (!pxm_bit_test(j)) 561 if (!pxm_bit_test(j))
569 continue; 562 continue;
570 node_to = pxm_to_node(j); 563 node_to = pxm_to_node(j);
571 node_distance(node_from, node_to) = 564 node_distance(node_from, node_to) =
572 slit_table->entry[i * slit_table->localities + j]; 565 slit_table->entry[i * slit_table->locality_count + j];
573 } 566 }
574 } 567 }
575 568
@@ -617,21 +610,21 @@ void acpi_unregister_gsi(u32 gsi)
617 610
618EXPORT_SYMBOL(acpi_unregister_gsi); 611EXPORT_SYMBOL(acpi_unregister_gsi);
619 612
620static int __init acpi_parse_fadt(unsigned long phys_addr, unsigned long size) 613static int __init acpi_parse_fadt(struct acpi_table_header *table)
621{ 614{
622 struct acpi_table_header *fadt_header; 615 struct acpi_table_header *fadt_header;
623 struct fadt_descriptor *fadt; 616 struct acpi_table_fadt *fadt;
624 617
625 if (!phys_addr || !size) 618 if (!table)
626 return -EINVAL; 619 return -EINVAL;
627 620
628 fadt_header = (struct acpi_table_header *)__va(phys_addr); 621 fadt_header = (struct acpi_table_header *)table;
629 if (fadt_header->revision != 3) 622 if (fadt_header->revision != 3)
630 return -ENODEV; /* Only deal with ACPI 2.0 FADT */ 623 return -ENODEV; /* Only deal with ACPI 2.0 FADT */
631 624
632 fadt = (struct fadt_descriptor *)fadt_header; 625 fadt = (struct acpi_table_fadt *)fadt_header;
633 626
634 acpi_register_gsi(fadt->sci_int, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW); 627 acpi_register_gsi(fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
635 return 0; 628 return 0;
636} 629}
637 630
@@ -658,7 +651,7 @@ int __init acpi_boot_init(void)
658 * information -- the successor to MPS tables. 651 * information -- the successor to MPS tables.
659 */ 652 */
660 653
661 if (acpi_table_parse(ACPI_APIC, acpi_parse_madt) < 1) { 654 if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt) < 1) {
662 printk(KERN_ERR PREFIX "Can't find MADT\n"); 655 printk(KERN_ERR PREFIX "Can't find MADT\n");
663 goto skip_madt; 656 goto skip_madt;
664 } 657 }
@@ -666,40 +659,40 @@ int __init acpi_boot_init(void)
666 /* Local APIC */ 659 /* Local APIC */
667 660
668 if (acpi_table_parse_madt 661 if (acpi_table_parse_madt
669 (ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr, 0) < 0) 662 (ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, acpi_parse_lapic_addr_ovr, 0) < 0)
670 printk(KERN_ERR PREFIX 663 printk(KERN_ERR PREFIX
671 "Error parsing LAPIC address override entry\n"); 664 "Error parsing LAPIC address override entry\n");
672 665
673 if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_parse_lsapic, NR_CPUS) 666 if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, acpi_parse_lsapic, NR_CPUS)
674 < 1) 667 < 1)
675 printk(KERN_ERR PREFIX 668 printk(KERN_ERR PREFIX
676 "Error parsing MADT - no LAPIC entries\n"); 669 "Error parsing MADT - no LAPIC entries\n");
677 670
678 if (acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0) 671 if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0)
679 < 0) 672 < 0)
680 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); 673 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
681 674
682 /* I/O APIC */ 675 /* I/O APIC */
683 676
684 if (acpi_table_parse_madt 677 if (acpi_table_parse_madt
685 (ACPI_MADT_IOSAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1) 678 (ACPI_MADT_TYPE_IO_SAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1)
686 printk(KERN_ERR PREFIX 679 printk(KERN_ERR PREFIX
687 "Error parsing MADT - no IOSAPIC entries\n"); 680 "Error parsing MADT - no IOSAPIC entries\n");
688 681
689 /* System-Level Interrupt Routing */ 682 /* System-Level Interrupt Routing */
690 683
691 if (acpi_table_parse_madt 684 if (acpi_table_parse_madt
692 (ACPI_MADT_PLAT_INT_SRC, acpi_parse_plat_int_src, 685 (ACPI_MADT_TYPE_INTERRUPT_SOURCE, acpi_parse_plat_int_src,
693 ACPI_MAX_PLATFORM_INTERRUPTS) < 0) 686 ACPI_MAX_PLATFORM_INTERRUPTS) < 0)
694 printk(KERN_ERR PREFIX 687 printk(KERN_ERR PREFIX
695 "Error parsing platform interrupt source entry\n"); 688 "Error parsing platform interrupt source entry\n");
696 689
697 if (acpi_table_parse_madt 690 if (acpi_table_parse_madt
698 (ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, 0) < 0) 691 (ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, 0) < 0)
699 printk(KERN_ERR PREFIX 692 printk(KERN_ERR PREFIX
700 "Error parsing interrupt source overrides entry\n"); 693 "Error parsing interrupt source overrides entry\n");
701 694
702 if (acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, 0) < 0) 695 if (acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, 0) < 0)
703 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); 696 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
704 skip_madt: 697 skip_madt:
705 698
@@ -709,7 +702,7 @@ int __init acpi_boot_init(void)
709 * gets interrupts such as power and sleep buttons. If it's not 702 * gets interrupts such as power and sleep buttons. If it's not
710 * on a Legacy interrupt, it needs to be setup. 703 * on a Legacy interrupt, it needs to be setup.
711 */ 704 */
712 if (acpi_table_parse(ACPI_FADT, acpi_parse_fadt) < 1) 705 if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt) < 1)
713 printk(KERN_ERR PREFIX "Can't find FADT\n"); 706 printk(KERN_ERR PREFIX "Can't find FADT\n");
714 707
715#ifdef CONFIG_SMP 708#ifdef CONFIG_SMP
@@ -842,7 +835,7 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
842{ 835{
843 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 836 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
844 union acpi_object *obj; 837 union acpi_object *obj;
845 struct acpi_table_lsapic *lsapic; 838 struct acpi_madt_local_sapic *lsapic;
846 cpumask_t tmp_map; 839 cpumask_t tmp_map;
847 long physid; 840 long physid;
848 int cpu; 841 int cpu;
@@ -854,16 +847,16 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
854 return -EINVAL; 847 return -EINVAL;
855 848
856 obj = buffer.pointer; 849 obj = buffer.pointer;
857 if (obj->type != ACPI_TYPE_BUFFER || 850 if (obj->type != ACPI_TYPE_BUFFER)
858 obj->buffer.length < sizeof(*lsapic)) { 851 {
859 kfree(buffer.pointer); 852 kfree(buffer.pointer);
860 return -EINVAL; 853 return -EINVAL;
861 } 854 }
862 855
863 lsapic = (struct acpi_table_lsapic *)obj->buffer.pointer; 856 lsapic = (struct acpi_madt_local_sapic *)obj->buffer.pointer;
864 857
865 if ((lsapic->header.type != ACPI_MADT_LSAPIC) || 858 if ((lsapic->header.type != ACPI_MADT_TYPE_LOCAL_SAPIC) ||
866 (!lsapic->flags.enabled)) { 859 (!lsapic->lapic_flags & ACPI_MADT_ENABLED)) {
867 kfree(buffer.pointer); 860 kfree(buffer.pointer);
868 return -EINVAL; 861 return -EINVAL;
869 } 862 }
@@ -883,7 +876,6 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
883 876
884 cpu_set(cpu, cpu_present_map); 877 cpu_set(cpu, cpu_present_map);
885 ia64_cpu_to_sapicid[cpu] = physid; 878 ia64_cpu_to_sapicid[cpu] = physid;
886 ia64_acpiid_to_sapicid[lsapic->acpi_id] = ia64_cpu_to_sapicid[cpu];
887 879
888 *pcpu = cpu; 880 *pcpu = cpu;
889 return (0); 881 return (0);
@@ -893,14 +885,6 @@ EXPORT_SYMBOL(acpi_map_lsapic);
893 885
894int acpi_unmap_lsapic(int cpu) 886int acpi_unmap_lsapic(int cpu)
895{ 887{
896 int i;
897
898 for (i = 0; i < MAX_SAPICS; i++) {
899 if (ia64_acpiid_to_sapicid[i] == ia64_cpu_to_sapicid[cpu]) {
900 ia64_acpiid_to_sapicid[i] = -1;
901 break;
902 }
903 }
904 ia64_cpu_to_sapicid[cpu] = -1; 888 ia64_cpu_to_sapicid[cpu] = -1;
905 cpu_clear(cpu, cpu_present_map); 889 cpu_clear(cpu, cpu_present_map);
906 890
@@ -920,7 +904,7 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
920{ 904{
921 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 905 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
922 union acpi_object *obj; 906 union acpi_object *obj;
923 struct acpi_table_iosapic *iosapic; 907 struct acpi_madt_io_sapic *iosapic;
924 unsigned int gsi_base; 908 unsigned int gsi_base;
925 int pxm, node; 909 int pxm, node;
926 910
@@ -938,9 +922,9 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
938 return AE_OK; 922 return AE_OK;
939 } 923 }
940 924
941 iosapic = (struct acpi_table_iosapic *)obj->buffer.pointer; 925 iosapic = (struct acpi_madt_io_sapic *)obj->buffer.pointer;
942 926
943 if (iosapic->header.type != ACPI_MADT_IOSAPIC) { 927 if (iosapic->header.type != ACPI_MADT_TYPE_IO_SAPIC) {
944 kfree(buffer.pointer); 928 kfree(buffer.pointer);
945 return AE_OK; 929 return AE_OK;
946 } 930 }
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index bc2f64d72244..9d92097ce96d 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -79,6 +79,7 @@ crash_save_this_cpu()
79 final_note(buf); 79 final_note(buf);
80} 80}
81 81
82#ifdef CONFIG_SMP
82static int 83static int
83kdump_wait_cpu_freeze(void) 84kdump_wait_cpu_freeze(void)
84{ 85{
@@ -91,6 +92,7 @@ kdump_wait_cpu_freeze(void)
91 } 92 }
92 return 1; 93 return 1;
93} 94}
95#endif
94 96
95void 97void
96machine_crash_shutdown(struct pt_regs *pt) 98machine_crash_shutdown(struct pt_regs *pt)
@@ -116,6 +118,11 @@ machine_crash_shutdown(struct pt_regs *pt)
116static void 118static void
117machine_kdump_on_init(void) 119machine_kdump_on_init(void)
118{ 120{
121 if (!ia64_kimage) {
122 printk(KERN_NOTICE "machine_kdump_on_init(): "
123 "kdump not configured\n");
124 return;
125 }
119 local_irq_disable(); 126 local_irq_disable();
120 kexec_disable_iosapic(); 127 kexec_disable_iosapic();
121 machine_kexec(ia64_kimage); 128 machine_kexec(ia64_kimage);
@@ -132,11 +139,12 @@ kdump_cpu_freeze(struct unw_frame_info *info, void *arg)
132 atomic_inc(&kdump_cpu_freezed); 139 atomic_inc(&kdump_cpu_freezed);
133 kdump_status[cpuid] = 1; 140 kdump_status[cpuid] = 1;
134 mb(); 141 mb();
135 if (cpuid == 0) { 142#ifdef CONFIG_HOTPLUG_CPU
136 for (;;) 143 if (cpuid != 0)
137 cpu_relax();
138 } else
139 ia64_jump_to_sal(&sal_boot_rendez_state[cpuid]); 144 ia64_jump_to_sal(&sal_boot_rendez_state[cpuid]);
145#endif
146 for (;;)
147 cpu_relax();
140} 148}
141 149
142static int 150static int
diff --git a/arch/ia64/kernel/crash_dump.c b/arch/ia64/kernel/crash_dump.c
index 83b8c91c1408..da60e90eeeb1 100644
--- a/arch/ia64/kernel/crash_dump.c
+++ b/arch/ia64/kernel/crash_dump.c
@@ -9,7 +9,8 @@
9#include <linux/errno.h> 9#include <linux/errno.h>
10#include <linux/types.h> 10#include <linux/types.h>
11 11
12#include <linux/uaccess.h> 12#include <asm/page.h>
13#include <asm/uaccess.h>
13 14
14/** 15/**
15 * copy_oldmem_page - copy one page from "oldmem" 16 * copy_oldmem_page - copy one page from "oldmem"
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 0b25a7d4e1e4..6c03928544c2 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -380,7 +380,7 @@ efi_get_pal_addr (void)
380#endif 380#endif
381 return __va(md->phys_addr); 381 return __va(md->phys_addr);
382 } 382 }
383 printk(KERN_WARNING "%s: no PAL-code memory-descriptor found", 383 printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n",
384 __FUNCTION__); 384 __FUNCTION__);
385 return NULL; 385 return NULL;
386} 386}
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 15234ed3a341..e7873eeae448 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1610,5 +1610,7 @@ sys_call_table:
1610 data8 sys_sync_file_range // 1300 1610 data8 sys_sync_file_range // 1300
1611 data8 sys_tee 1611 data8 sys_tee
1612 data8 sys_vmsplice 1612 data8 sys_vmsplice
1613 data8 sys_ni_syscall // reserved for move_pages
1614 data8 sys_getcpu
1613 1615
1614 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1616 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 0fc5fb7865cf..d6aab40c6416 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -925,6 +925,11 @@ iosapic_unregister_intr (unsigned int gsi)
925 /* Clear the interrupt controller descriptor */ 925 /* Clear the interrupt controller descriptor */
926 idesc->chip = &no_irq_type; 926 idesc->chip = &no_irq_type;
927 927
928#ifdef CONFIG_SMP
929 /* Clear affinity */
930 cpus_setall(idesc->affinity);
931#endif
932
928 /* Clear the interrupt information */ 933 /* Clear the interrupt information */
929 memset(&iosapic_intr_info[vector], 0, 934 memset(&iosapic_intr_info[vector], 0,
930 sizeof(struct iosapic_intr_info)); 935 sizeof(struct iosapic_intr_info));
diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c
index e2ccc9f660c5..4f0f3b8c1ee2 100644
--- a/arch/ia64/kernel/machine_kexec.c
+++ b/arch/ia64/kernel/machine_kexec.c
@@ -14,6 +14,7 @@
14#include <linux/kexec.h> 14#include <linux/kexec.h>
15#include <linux/cpu.h> 15#include <linux/cpu.h>
16#include <linux/irq.h> 16#include <linux/irq.h>
17#include <linux/efi.h>
17#include <asm/mmu_context.h> 18#include <asm/mmu_context.h>
18#include <asm/setup.h> 19#include <asm/setup.h>
19#include <asm/delay.h> 20#include <asm/delay.h>
@@ -68,22 +69,10 @@ void machine_kexec_cleanup(struct kimage *image)
68{ 69{
69} 70}
70 71
71void machine_shutdown(void)
72{
73 int cpu;
74
75 for_each_online_cpu(cpu) {
76 if (cpu != smp_processor_id())
77 cpu_down(cpu);
78 }
79 kexec_disable_iosapic();
80}
81
82/* 72/*
83 * Do not allocate memory (or fail in any way) in machine_kexec(). 73 * Do not allocate memory (or fail in any way) in machine_kexec().
84 * We are past the point of no return, committed to rebooting now. 74 * We are past the point of no return, committed to rebooting now.
85 */ 75 */
86extern void *efi_get_pal_addr(void);
87static void ia64_machine_kexec(struct unw_frame_info *info, void *arg) 76static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
88{ 77{
89 struct kimage *image = arg; 78 struct kimage *image = arg;
@@ -93,6 +82,7 @@ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
93 unsigned long vector; 82 unsigned long vector;
94 int ii; 83 int ii;
95 84
85 BUG_ON(!image);
96 if (image->type == KEXEC_TYPE_CRASH) { 86 if (image->type == KEXEC_TYPE_CRASH) {
97 crash_save_this_cpu(); 87 crash_save_this_cpu();
98 current->thread.ksp = (__u64)info->sw - 16; 88 current->thread.ksp = (__u64)info->sw - 16;
@@ -131,6 +121,7 @@ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
131 121
132void machine_kexec(struct kimage *image) 122void machine_kexec(struct kimage *image)
133{ 123{
124 BUG_ON(!image);
134 unw_init_running(ia64_machine_kexec, image); 125 unw_init_running(ia64_machine_kexec, image);
135 for(;;); 126 for(;;);
136} 127}
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index 822e59a1b822..0d05450c91c4 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -64,12 +64,17 @@ static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
64} 64}
65#endif /* CONFIG_SMP */ 65#endif /* CONFIG_SMP */
66 66
67int ia64_setup_msi_irq(unsigned int irq, struct pci_dev *pdev) 67int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
68{ 68{
69 struct msi_msg msg; 69 struct msi_msg msg;
70 unsigned long dest_phys_id; 70 unsigned long dest_phys_id;
71 unsigned int vector; 71 unsigned int irq, vector;
72 72
73 irq = create_irq();
74 if (irq < 0)
75 return irq;
76
77 set_irq_msi(irq, desc);
73 dest_phys_id = cpu_physical_id(first_cpu(cpu_online_map)); 78 dest_phys_id = cpu_physical_id(first_cpu(cpu_online_map));
74 vector = irq; 79 vector = irq;
75 80
@@ -89,12 +94,12 @@ int ia64_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
89 write_msi_msg(irq, &msg); 94 write_msi_msg(irq, &msg);
90 set_irq_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq); 95 set_irq_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq);
91 96
92 return 0; 97 return irq;
93} 98}
94 99
95void ia64_teardown_msi_irq(unsigned int irq) 100void ia64_teardown_msi_irq(unsigned int irq)
96{ 101{
97 return; /* no-op */ 102 destroy_irq(irq);
98} 103}
99 104
100static void ia64_ack_msi_irq(unsigned int irq) 105static void ia64_ack_msi_irq(unsigned int irq)
@@ -126,12 +131,12 @@ static struct irq_chip ia64_msi_chip = {
126}; 131};
127 132
128 133
129int arch_setup_msi_irq(unsigned int irq, struct pci_dev *pdev) 134int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
130{ 135{
131 if (platform_setup_msi_irq) 136 if (platform_setup_msi_irq)
132 return platform_setup_msi_irq(irq, pdev); 137 return platform_setup_msi_irq(pdev, desc);
133 138
134 return ia64_setup_msi_irq(irq, pdev); 139 return ia64_setup_msi_irq(pdev, desc);
135} 140}
136 141
137void arch_teardown_msi_irq(unsigned int irq) 142void arch_teardown_msi_irq(unsigned int irq)
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 17685abaf496..ae96d4176995 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -34,6 +34,7 @@
34#include <asm/ia32.h> 34#include <asm/ia32.h>
35#include <asm/irq.h> 35#include <asm/irq.h>
36#include <asm/kdebug.h> 36#include <asm/kdebug.h>
37#include <asm/kexec.h>
37#include <asm/pgalloc.h> 38#include <asm/pgalloc.h>
38#include <asm/processor.h> 39#include <asm/processor.h>
39#include <asm/sal.h> 40#include <asm/sal.h>
@@ -803,6 +804,21 @@ cpu_halt (void)
803 ia64_pal_halt(min_power_state); 804 ia64_pal_halt(min_power_state);
804} 805}
805 806
807void machine_shutdown(void)
808{
809#ifdef CONFIG_HOTPLUG_CPU
810 int cpu;
811
812 for_each_online_cpu(cpu) {
813 if (cpu != smp_processor_id())
814 cpu_down(cpu);
815 }
816#endif
817#ifdef CONFIG_KEXEC
818 kexec_disable_iosapic();
819#endif
820}
821
806void 822void
807machine_restart (char *restart_cmd) 823machine_restart (char *restart_cmd)
808{ 824{
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index aa705e46b974..3f8918782e0c 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -607,7 +607,7 @@ find_thread_for_addr (struct task_struct *child, unsigned long addr)
607 */ 607 */
608 list_for_each_safe(this, next, &current->children) { 608 list_for_each_safe(this, next, &current->children) {
609 p = list_entry(this, struct task_struct, sibling); 609 p = list_entry(this, struct task_struct, sibling);
610 if (p->mm != mm) 610 if (p->tgid != child->tgid)
611 continue; 611 continue;
612 if (thread_matches(p, addr)) { 612 if (thread_matches(p, addr)) {
613 child = p; 613 child = p;
@@ -1405,6 +1405,7 @@ ptrace_disable (struct task_struct *child)
1405 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); 1405 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1406 1406
1407 /* make sure the single step/taken-branch trap bits are not set: */ 1407 /* make sure the single step/taken-branch trap bits are not set: */
1408 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1408 child_psr->ss = 0; 1409 child_psr->ss = 0;
1409 child_psr->tb = 0; 1410 child_psr->tb = 0;
1410} 1411}
@@ -1525,6 +1526,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
1525 * Make sure the single step/taken-branch trap bits 1526 * Make sure the single step/taken-branch trap bits
1526 * are not set: 1527 * are not set:
1527 */ 1528 */
1529 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1528 ia64_psr(pt)->ss = 0; 1530 ia64_psr(pt)->ss = 0;
1529 ia64_psr(pt)->tb = 0; 1531 ia64_psr(pt)->tb = 0;
1530 1532
@@ -1556,6 +1558,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
1556 goto out_tsk; 1558 goto out_tsk;
1557 1559
1558 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 1560 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
1561 set_tsk_thread_flag(child, TIF_SINGLESTEP);
1559 if (request == PTRACE_SINGLESTEP) { 1562 if (request == PTRACE_SINGLESTEP) {
1560 ia64_psr(pt)->ss = 1; 1563 ia64_psr(pt)->ss = 1;
1561 } else { 1564 } else {
@@ -1595,13 +1598,9 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
1595} 1598}
1596 1599
1597 1600
1598void 1601static void
1599syscall_trace (void) 1602syscall_trace (void)
1600{ 1603{
1601 if (!test_thread_flag(TIF_SYSCALL_TRACE))
1602 return;
1603 if (!(current->ptrace & PT_PTRACED))
1604 return;
1605 /* 1604 /*
1606 * The 0x80 provides a way for the tracing parent to 1605 * The 0x80 provides a way for the tracing parent to
1607 * distinguish between a syscall stop and SIGTRAP delivery. 1606 * distinguish between a syscall stop and SIGTRAP delivery.
@@ -1664,7 +1663,8 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1664 audit_syscall_exit(success, result); 1663 audit_syscall_exit(success, result);
1665 } 1664 }
1666 1665
1667 if (test_thread_flag(TIF_SYSCALL_TRACE) 1666 if ((test_thread_flag(TIF_SYSCALL_TRACE)
1667 || test_thread_flag(TIF_SINGLESTEP))
1668 && (current->ptrace & PT_PTRACED)) 1668 && (current->ptrace & PT_PTRACED))
1669 syscall_trace(); 1669 syscall_trace();
1670} 1670}
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index ad567b8d432e..83c2629e1c4c 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -569,34 +569,31 @@ show_cpuinfo (struct seq_file *m, void *v)
569 { 1UL << 1, "spontaneous deferral"}, 569 { 1UL << 1, "spontaneous deferral"},
570 { 1UL << 2, "16-byte atomic ops" } 570 { 1UL << 2, "16-byte atomic ops" }
571 }; 571 };
572 char features[128], *cp, sep; 572 char features[128], *cp, *sep;
573 struct cpuinfo_ia64 *c = v; 573 struct cpuinfo_ia64 *c = v;
574 unsigned long mask; 574 unsigned long mask;
575 unsigned long proc_freq; 575 unsigned long proc_freq;
576 int i; 576 int i, size;
577 577
578 mask = c->features; 578 mask = c->features;
579 579
580 /* build the feature string: */ 580 /* build the feature string: */
581 memcpy(features, " standard", 10); 581 memcpy(features, "standard", 9);
582 cp = features; 582 cp = features;
583 sep = 0; 583 size = sizeof(features);
584 for (i = 0; i < (int) ARRAY_SIZE(feature_bits); ++i) { 584 sep = "";
585 for (i = 0; i < ARRAY_SIZE(feature_bits) && size > 1; ++i) {
585 if (mask & feature_bits[i].mask) { 586 if (mask & feature_bits[i].mask) {
586 if (sep) 587 cp += snprintf(cp, size, "%s%s", sep,
587 *cp++ = sep; 588 feature_bits[i].feature_name),
588 sep = ','; 589 sep = ", ";
589 *cp++ = ' ';
590 strcpy(cp, feature_bits[i].feature_name);
591 cp += strlen(feature_bits[i].feature_name);
592 mask &= ~feature_bits[i].mask; 590 mask &= ~feature_bits[i].mask;
591 size = sizeof(features) - (cp - features);
593 } 592 }
594 } 593 }
595 if (mask) { 594 if (mask && size > 1) {
596 /* print unknown features as a hex value: */ 595 /* print unknown features as a hex value */
597 if (sep) 596 snprintf(cp, size, "%s0x%lx", sep, mask);
598 *cp++ = sep;
599 sprintf(cp, " 0x%lx", mask);
600 } 597 }
601 598
602 proc_freq = cpufreq_quick_get(cpunum); 599 proc_freq = cpufreq_quick_get(cpunum);
@@ -612,7 +609,7 @@ show_cpuinfo (struct seq_file *m, void *v)
612 "model name : %s\n" 609 "model name : %s\n"
613 "revision : %u\n" 610 "revision : %u\n"
614 "archrev : %u\n" 611 "archrev : %u\n"
615 "features :%s\n" /* don't change this---it _is_ right! */ 612 "features : %s\n"
616 "cpu number : %lu\n" 613 "cpu number : %lu\n"
617 "cpu regs : %u\n" 614 "cpu regs : %u\n"
618 "cpu MHz : %lu.%06lu\n" 615 "cpu MHz : %lu.%06lu\n"
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index d6083a0936f4..8f3d0066f446 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -157,6 +157,7 @@ SECTIONS
157 } 157 }
158#endif 158#endif
159 159
160 . = ALIGN(8);
160 __con_initcall_start = .; 161 __con_initcall_start = .;
161 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) 162 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET)
162 { *(.con_initcall.init) } 163 { *(.con_initcall.init) }
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 1e79551231b9..63e6d49c5813 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -30,47 +30,69 @@ static unsigned long max_gap;
30#endif 30#endif
31 31
32/** 32/**
33 * show_mem - display a memory statistics summary 33 * show_mem - give short summary of memory stats
34 * 34 *
35 * Just walks the pages in the system and describes where they're allocated. 35 * Shows a simple page count of reserved and used pages in the system.
36 * For discontig machines, it does this on a per-pgdat basis.
36 */ 37 */
37void 38void show_mem(void)
38show_mem (void)
39{ 39{
40 int i, total = 0, reserved = 0; 40 int i, total_reserved = 0;
41 int shared = 0, cached = 0; 41 int total_shared = 0, total_cached = 0;
42 unsigned long total_present = 0;
43 pg_data_t *pgdat;
42 44
43 printk(KERN_INFO "Mem-info:\n"); 45 printk(KERN_INFO "Mem-info:\n");
44 show_free_areas(); 46 show_free_areas();
45
46 printk(KERN_INFO "Free swap: %6ldkB\n", 47 printk(KERN_INFO "Free swap: %6ldkB\n",
47 nr_swap_pages<<(PAGE_SHIFT-10)); 48 nr_swap_pages<<(PAGE_SHIFT-10));
48 i = max_mapnr; 49 printk(KERN_INFO "Node memory in pages:\n");
49 for (i = 0; i < max_mapnr; i++) { 50 for_each_online_pgdat(pgdat) {
50 if (!pfn_valid(i)) { 51 unsigned long present;
52 unsigned long flags;
53 int shared = 0, cached = 0, reserved = 0;
54
55 pgdat_resize_lock(pgdat, &flags);
56 present = pgdat->node_present_pages;
57 for(i = 0; i < pgdat->node_spanned_pages; i++) {
58 struct page *page;
59 if (pfn_valid(pgdat->node_start_pfn + i))
60 page = pfn_to_page(pgdat->node_start_pfn + i);
61 else {
51#ifdef CONFIG_VIRTUAL_MEM_MAP 62#ifdef CONFIG_VIRTUAL_MEM_MAP
52 if (max_gap < LARGE_GAP) 63 if (max_gap < LARGE_GAP)
53 continue; 64 continue;
54 i = vmemmap_find_next_valid_pfn(0, i) - 1;
55#endif 65#endif
56 continue; 66 i = vmemmap_find_next_valid_pfn(pgdat->node_id,
67 i) - 1;
68 continue;
69 }
70 if (PageReserved(page))
71 reserved++;
72 else if (PageSwapCache(page))
73 cached++;
74 else if (page_count(page))
75 shared += page_count(page)-1;
57 } 76 }
58 total++; 77 pgdat_resize_unlock(pgdat, &flags);
59 if (PageReserved(mem_map+i)) 78 total_present += present;
60 reserved++; 79 total_reserved += reserved;
61 else if (PageSwapCache(mem_map+i)) 80 total_cached += cached;
62 cached++; 81 total_shared += shared;
63 else if (page_count(mem_map + i)) 82 printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, "
64 shared += page_count(mem_map + i) - 1; 83 "shrd: %10d, swpd: %10d\n", pgdat->node_id,
84 present, reserved, shared, cached);
65 } 85 }
66 printk(KERN_INFO "%d pages of RAM\n", total); 86 printk(KERN_INFO "%ld pages of RAM\n", total_present);
67 printk(KERN_INFO "%d reserved pages\n", reserved); 87 printk(KERN_INFO "%d reserved pages\n", total_reserved);
68 printk(KERN_INFO "%d pages shared\n", shared); 88 printk(KERN_INFO "%d pages shared\n", total_shared);
69 printk(KERN_INFO "%d pages swap cached\n", cached); 89 printk(KERN_INFO "%d pages swap cached\n", total_cached);
70 printk(KERN_INFO "%ld pages in page table cache\n", 90 printk(KERN_INFO "Total of %ld pages in page table cache\n",
71 pgtable_quicklist_total_size()); 91 pgtable_quicklist_total_size());
92 printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages());
72} 93}
73 94
95
74/* physical address where the bootmem map is located */ 96/* physical address where the bootmem map is located */
75unsigned long bootmap_start; 97unsigned long bootmap_start;
76 98
@@ -177,7 +199,7 @@ find_memory (void)
177 199
178#ifdef CONFIG_CRASH_DUMP 200#ifdef CONFIG_CRASH_DUMP
179 /* If we are doing a crash dump, we still need to know the real mem 201 /* If we are doing a crash dump, we still need to know the real mem
180 * size before original memory map is * reset. */ 202 * size before original memory map is reset. */
181 saved_max_pfn = max_pfn; 203 saved_max_pfn = max_pfn;
182#endif 204#endif
183} 205}
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 96722cb1b49d..6eae596c509d 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -412,37 +412,6 @@ static void __init memory_less_nodes(void)
412 return; 412 return;
413} 413}
414 414
415#ifdef CONFIG_SPARSEMEM
416/**
417 * register_sparse_mem - notify SPARSEMEM that this memory range exists.
418 * @start: physical start of range
419 * @end: physical end of range
420 * @arg: unused
421 *
422 * Simply calls SPARSEMEM to register memory section(s).
423 */
424static int __init register_sparse_mem(unsigned long start, unsigned long end,
425 void *arg)
426{
427 int nid;
428
429 start = __pa(start) >> PAGE_SHIFT;
430 end = __pa(end) >> PAGE_SHIFT;
431 nid = early_pfn_to_nid(start);
432 memory_present(nid, start, end);
433
434 return 0;
435}
436
437static void __init arch_sparse_init(void)
438{
439 efi_memmap_walk(register_sparse_mem, NULL);
440 sparse_init();
441}
442#else
443#define arch_sparse_init() do {} while (0)
444#endif
445
446/** 415/**
447 * find_memory - walk the EFI memory map and setup the bootmem allocator 416 * find_memory - walk the EFI memory map and setup the bootmem allocator
448 * 417 *
@@ -473,6 +442,9 @@ void __init find_memory(void)
473 node_clear(node, memory_less_mask); 442 node_clear(node, memory_less_mask);
474 mem_data[node].min_pfn = ~0UL; 443 mem_data[node].min_pfn = ~0UL;
475 } 444 }
445
446 efi_memmap_walk(register_active_ranges, NULL);
447
476 /* 448 /*
477 * Initialize the boot memory maps in reverse order since that's 449 * Initialize the boot memory maps in reverse order since that's
478 * what the bootmem allocator expects 450 * what the bootmem allocator expects
@@ -506,6 +478,12 @@ void __init find_memory(void)
506 max_pfn = max_low_pfn; 478 max_pfn = max_low_pfn;
507 479
508 find_initrd(); 480 find_initrd();
481
482#ifdef CONFIG_CRASH_DUMP
483 /* If we are doing a crash dump, we still need to know the real mem
484 * size before original memory map is reset. */
485 saved_max_pfn = max_pfn;
486#endif
509} 487}
510 488
511#ifdef CONFIG_SMP 489#ifdef CONFIG_SMP
@@ -654,7 +632,6 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n
654{ 632{
655 unsigned long end = start + len; 633 unsigned long end = start + len;
656 634
657 add_active_range(node, start >> PAGE_SHIFT, end >> PAGE_SHIFT);
658 mem_data[node].num_physpages += len >> PAGE_SHIFT; 635 mem_data[node].num_physpages += len >> PAGE_SHIFT;
659 if (start <= __pa(MAX_DMA_ADDRESS)) 636 if (start <= __pa(MAX_DMA_ADDRESS))
660 mem_data[node].num_dma_physpages += 637 mem_data[node].num_dma_physpages +=
@@ -686,10 +663,11 @@ void __init paging_init(void)
686 663
687 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; 664 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
688 665
689 arch_sparse_init();
690
691 efi_memmap_walk(filter_rsvd_memory, count_node_pages); 666 efi_memmap_walk(filter_rsvd_memory, count_node_pages);
692 667
668 sparse_memory_present_with_active_regions(MAX_NUMNODES);
669 sparse_init();
670
693#ifdef CONFIG_VIRTUAL_MEM_MAP 671#ifdef CONFIG_VIRTUAL_MEM_MAP
694 vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * 672 vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
695 sizeof(struct page)); 673 sizeof(struct page));
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 1373fae7657f..faaca21a3718 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -19,6 +19,7 @@
19#include <linux/swap.h> 19#include <linux/swap.h>
20#include <linux/proc_fs.h> 20#include <linux/proc_fs.h>
21#include <linux/bitops.h> 21#include <linux/bitops.h>
22#include <linux/kexec.h>
22 23
23#include <asm/a.out.h> 24#include <asm/a.out.h>
24#include <asm/dma.h> 25#include <asm/dma.h>
@@ -128,6 +129,25 @@ lazy_mmu_prot_update (pte_t pte)
128 set_bit(PG_arch_1, &page->flags); /* mark page as clean */ 129 set_bit(PG_arch_1, &page->flags); /* mark page as clean */
129} 130}
130 131
132/*
133 * Since DMA is i-cache coherent, any (complete) pages that were written via
134 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
135 * flush them when they get mapped into an executable vm-area.
136 */
137void
138dma_mark_clean(void *addr, size_t size)
139{
140 unsigned long pg_addr, end;
141
142 pg_addr = PAGE_ALIGN((unsigned long) addr);
143 end = (unsigned long) addr + size;
144 while (pg_addr + PAGE_SIZE <= end) {
145 struct page *page = virt_to_page(pg_addr);
146 set_bit(PG_arch_1, &page->flags);
147 pg_addr += PAGE_SIZE;
148 }
149}
150
131inline void 151inline void
132ia64_set_rbs_bot (void) 152ia64_set_rbs_bot (void)
133{ 153{
@@ -595,13 +615,27 @@ find_largest_hole (u64 start, u64 end, void *arg)
595 return 0; 615 return 0;
596} 616}
597 617
618#endif /* CONFIG_VIRTUAL_MEM_MAP */
619
598int __init 620int __init
599register_active_ranges(u64 start, u64 end, void *arg) 621register_active_ranges(u64 start, u64 end, void *arg)
600{ 622{
601 add_active_range(0, __pa(start) >> PAGE_SHIFT, __pa(end) >> PAGE_SHIFT); 623 int nid = paddr_to_nid(__pa(start));
624
625 if (nid < 0)
626 nid = 0;
627#ifdef CONFIG_KEXEC
628 if (start > crashk_res.start && start < crashk_res.end)
629 start = crashk_res.end;
630 if (end > crashk_res.start && end < crashk_res.end)
631 end = crashk_res.start;
632#endif
633
634 if (start < end)
635 add_active_range(nid, __pa(start) >> PAGE_SHIFT,
636 __pa(end) >> PAGE_SHIFT);
602 return 0; 637 return 0;
603} 638}
604#endif /* CONFIG_VIRTUAL_MEM_MAP */
605 639
606static int __init 640static int __init
607count_reserved_pages (u64 start, u64 end, void *arg) 641count_reserved_pages (u64 start, u64 end, void *arg)
diff --git a/arch/ia64/sn/kernel/huberror.c b/arch/ia64/sn/kernel/huberror.c
index abca6bd7962f..fcf7f93c4b61 100644
--- a/arch/ia64/sn/kernel/huberror.c
+++ b/arch/ia64/sn/kernel/huberror.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1992 - 1997, 2000,2002-2005 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 1992 - 1997, 2000,2002-2007 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8 8
9#include <linux/types.h> 9#include <linux/types.h>
@@ -38,12 +38,20 @@ static irqreturn_t hub_eint_handler(int irq, void *arg)
38 (u64) nasid, 0, 0, 0, 0, 0, 0); 38 (u64) nasid, 0, 0, 0, 0, 0, 0);
39 39
40 if ((int)ret_stuff.v0) 40 if ((int)ret_stuff.v0)
41 panic("hubii_eint_handler(): Fatal TIO Error"); 41 panic("%s: Fatal %s Error", __FUNCTION__,
42 ((nasid & 1) ? "TIO" : "HUBII"));
42 43
43 if (!(nasid & 1)) /* Not a TIO, handle CRB errors */ 44 if (!(nasid & 1)) /* Not a TIO, handle CRB errors */
44 (void)hubiio_crb_error_handler(hubdev_info); 45 (void)hubiio_crb_error_handler(hubdev_info);
45 } else 46 } else
46 bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid))); 47 if (nasid & 1) { /* TIO errors */
48 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT,
49 (u64) nasid, 0, 0, 0, 0, 0, 0);
50
51 if ((int)ret_stuff.v0)
52 panic("%s: Fatal TIO Error", __FUNCTION__);
53 } else
54 bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid)));
47 55
48 return IRQ_HANDLED; 56 return IRQ_HANDLED;
49} 57}
diff --git a/arch/ia64/sn/kernel/io_acpi_init.c b/arch/ia64/sn/kernel/io_acpi_init.c
index cb96b4ea7df6..8c331ca6e5c9 100644
--- a/arch/ia64/sn/kernel/io_acpi_init.c
+++ b/arch/ia64/sn/kernel/io_acpi_init.c
@@ -13,6 +13,7 @@
13#include <asm/sn/sn_sal.h> 13#include <asm/sn/sn_sal.h>
14#include "xtalk/hubdev.h" 14#include "xtalk/hubdev.h"
15#include <linux/acpi.h> 15#include <linux/acpi.h>
16#include <acpi/acnamesp.h>
16 17
17 18
18/* 19/*
@@ -31,6 +32,12 @@ struct acpi_vendor_uuid sn_uuid = {
31 0xa2, 0x7c, 0x08, 0x00, 0x69, 0x13, 0xea, 0x51 }, 32 0xa2, 0x7c, 0x08, 0x00, 0x69, 0x13, 0xea, 0x51 },
32}; 33};
33 34
35struct sn_pcidev_match {
36 u8 bus;
37 unsigned int devfn;
38 acpi_handle handle;
39};
40
34/* 41/*
35 * Perform the early IO init in PROM. 42 * Perform the early IO init in PROM.
36 */ 43 */
@@ -119,9 +126,11 @@ sn_get_bussoft_ptr(struct pci_bus *bus)
119 status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS, 126 status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS,
120 &sn_uuid, &buffer); 127 &sn_uuid, &buffer);
121 if (ACPI_FAILURE(status)) { 128 if (ACPI_FAILURE(status)) {
122 printk(KERN_ERR "get_acpi_pcibus_ptr: " 129 printk(KERN_ERR "%s: "
123 "get_acpi_bussoft_info() failed: %d\n", 130 "acpi_get_vendor_resource() failed (0x%x) for: ",
124 status); 131 __FUNCTION__, status);
132 acpi_ns_print_node_pathname(handle, NULL);
133 printk("\n");
125 return NULL; 134 return NULL;
126 } 135 }
127 resource = buffer.pointer; 136 resource = buffer.pointer;
@@ -130,8 +139,8 @@ sn_get_bussoft_ptr(struct pci_bus *bus)
130 if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) != 139 if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) !=
131 sizeof(struct pcibus_bussoft *)) { 140 sizeof(struct pcibus_bussoft *)) {
132 printk(KERN_ERR 141 printk(KERN_ERR
133 "get_acpi_bussoft_ptr: Invalid vendor data " 142 "%s: Invalid vendor data length %d\n",
134 "length %d\n", vendor->byte_length); 143 __FUNCTION__, vendor->byte_length);
135 kfree(buffer.pointer); 144 kfree(buffer.pointer);
136 return NULL; 145 return NULL;
137 } 146 }
@@ -143,34 +152,254 @@ sn_get_bussoft_ptr(struct pci_bus *bus)
143} 152}
144 153
145/* 154/*
146 * sn_acpi_bus_fixup 155 * sn_extract_device_info - Extract the pcidev_info and the sn_irq_info
156 * pointers from the vendor resource using the
157 * provided acpi handle, and copy the structures
158 * into the argument buffers.
147 */ 159 */
148void 160static int
149sn_acpi_bus_fixup(struct pci_bus *bus) 161sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info,
162 struct sn_irq_info **sn_irq_info)
150{ 163{
151 struct pci_dev *pci_dev = NULL; 164 u64 addr;
152 struct pcibus_bussoft *prom_bussoft_ptr; 165 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
153 extern void sn_common_bus_fixup(struct pci_bus *, 166 struct sn_irq_info *irq_info, *irq_info_prom;
154 struct pcibus_bussoft *); 167 struct pcidev_info *pcidev_ptr, *pcidev_prom_ptr;
168 struct acpi_resource *resource;
169 int ret = 0;
170 acpi_status status;
171 struct acpi_resource_vendor_typed *vendor;
155 172
156 if (!bus->parent) { /* If root bus */ 173 /*
157 prom_bussoft_ptr = sn_get_bussoft_ptr(bus); 174 * The pointer to this device's pcidev_info structure in
158 if (prom_bussoft_ptr == NULL) { 175 * the PROM, is in the vendor resource.
176 */
177 status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS,
178 &sn_uuid, &buffer);
179 if (ACPI_FAILURE(status)) {
180 printk(KERN_ERR
181 "%s: acpi_get_vendor_resource() failed (0x%x) for: ",
182 __FUNCTION__, status);
183 acpi_ns_print_node_pathname(handle, NULL);
184 printk("\n");
185 return 1;
186 }
187
188 resource = buffer.pointer;
189 vendor = &resource->data.vendor_typed;
190 if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) !=
191 sizeof(struct pci_devdev_info *)) {
192 printk(KERN_ERR
193 "%s: Invalid vendor data length: %d for: ",
194 __FUNCTION__, vendor->byte_length);
195 acpi_ns_print_node_pathname(handle, NULL);
196 printk("\n");
197 ret = 1;
198 goto exit;
199 }
200
201 pcidev_ptr = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
202 if (!pcidev_ptr)
203 panic("%s: Unable to alloc memory for pcidev_info", __FUNCTION__);
204
205 memcpy(&addr, vendor->byte_data, sizeof(struct pcidev_info *));
206 pcidev_prom_ptr = __va(addr);
207 memcpy(pcidev_ptr, pcidev_prom_ptr, sizeof(struct pcidev_info));
208
209 /* Get the IRQ info */
210 irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
211 if (!irq_info)
212 panic("%s: Unable to alloc memory for sn_irq_info", __FUNCTION__);
213
214 if (pcidev_ptr->pdi_sn_irq_info) {
215 irq_info_prom = __va(pcidev_ptr->pdi_sn_irq_info);
216 memcpy(irq_info, irq_info_prom, sizeof(struct sn_irq_info));
217 }
218
219 *pcidev_info = pcidev_ptr;
220 *sn_irq_info = irq_info;
221
222exit:
223 kfree(buffer.pointer);
224 return ret;
225}
226
227static unsigned int
228get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle)
229{
230 unsigned long adr;
231 acpi_handle child;
232 unsigned int devfn;
233 int function;
234 acpi_handle parent;
235 int slot;
236 acpi_status status;
237
238 /*
239 * Do an upward search to find the root bus device, and
240 * obtain the host devfn from the previous child device.
241 */
242 child = device_handle;
243 while (child) {
244 status = acpi_get_parent(child, &parent);
245 if (ACPI_FAILURE(status)) {
246 printk(KERN_ERR "%s: acpi_get_parent() failed "
247 "(0x%x) for: ", __FUNCTION__, status);
248 acpi_ns_print_node_pathname(child, NULL);
249 printk("\n");
250 panic("%s: Unable to find host devfn\n", __FUNCTION__);
251 }
252 if (parent == rootbus_handle)
253 break;
254 child = parent;
255 }
256 if (!child) {
257 printk(KERN_ERR "%s: Unable to find root bus for: ",
258 __FUNCTION__);
259 acpi_ns_print_node_pathname(device_handle, NULL);
260 printk("\n");
261 BUG();
262 }
263
264 status = acpi_evaluate_integer(child, METHOD_NAME__ADR, NULL, &adr);
265 if (ACPI_FAILURE(status)) {
266 printk(KERN_ERR "%s: Unable to get _ADR (0x%x) for: ",
267 __FUNCTION__, status);
268 acpi_ns_print_node_pathname(child, NULL);
269 printk("\n");
270 panic("%s: Unable to find host devfn\n", __FUNCTION__);
271 }
272
273 slot = (adr >> 16) & 0xffff;
274 function = adr & 0xffff;
275 devfn = PCI_DEVFN(slot, function);
276 return devfn;
277}
278
279/*
280 * find_matching_device - Callback routine to find the ACPI device
281 * that matches up with our pci_dev device.
282 * Matching is done on bus number and devfn.
283 * To find the bus number for a particular
284 * ACPI device, we must look at the _BBN method
285 * of its parent.
286 */
287static acpi_status
288find_matching_device(acpi_handle handle, u32 lvl, void *context, void **rv)
289{
290 unsigned long bbn = -1;
291 unsigned long adr;
292 acpi_handle parent = NULL;
293 acpi_status status;
294 unsigned int devfn;
295 int function;
296 int slot;
297 struct sn_pcidev_match *info = context;
298
299 status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
300 &adr);
301 if (ACPI_SUCCESS(status)) {
302 status = acpi_get_parent(handle, &parent);
303 if (ACPI_FAILURE(status)) {
159 printk(KERN_ERR 304 printk(KERN_ERR
160 "sn_pci_fixup_bus: 0x%04x:0x%02x Unable to " 305 "%s: acpi_get_parent() failed (0x%x) for: ",
161 "obtain prom_bussoft_ptr\n", 306 __FUNCTION__, status);
162 pci_domain_nr(bus), bus->number); 307 acpi_ns_print_node_pathname(handle, NULL);
163 return; 308 printk("\n");
309 return AE_OK;
310 }
311 status = acpi_evaluate_integer(parent, METHOD_NAME__BBN,
312 NULL, &bbn);
313 if (ACPI_FAILURE(status)) {
314 printk(KERN_ERR
315 "%s: Failed to find _BBN in parent of: ",
316 __FUNCTION__);
317 acpi_ns_print_node_pathname(handle, NULL);
318 printk("\n");
319 return AE_OK;
320 }
321
322 slot = (adr >> 16) & 0xffff;
323 function = adr & 0xffff;
324 devfn = PCI_DEVFN(slot, function);
325 if ((info->devfn == devfn) && (info->bus == bbn)) {
326 /* We have a match! */
327 info->handle = handle;
328 return 1;
164 } 329 }
165 sn_common_bus_fixup(bus, prom_bussoft_ptr);
166 } 330 }
167 list_for_each_entry(pci_dev, &bus->devices, bus_list) { 331 return AE_OK;
168 sn_pci_fixup_slot(pci_dev); 332}
333
334/*
335 * sn_acpi_get_pcidev_info - Search ACPI namespace for the acpi
336 * device matching the specified pci_dev,
337 * and return the pcidev info and irq info.
338 */
339int
340sn_acpi_get_pcidev_info(struct pci_dev *dev, struct pcidev_info **pcidev_info,
341 struct sn_irq_info **sn_irq_info)
342{
343 unsigned int host_devfn;
344 struct sn_pcidev_match pcidev_match;
345 acpi_handle rootbus_handle;
346 unsigned long segment;
347 acpi_status status;
348
349 rootbus_handle = PCI_CONTROLLER(dev)->acpi_handle;
350 status = acpi_evaluate_integer(rootbus_handle, METHOD_NAME__SEG, NULL,
351 &segment);
352 if (ACPI_SUCCESS(status)) {
353 if (segment != pci_domain_nr(dev)) {
354 printk(KERN_ERR
355 "%s: Segment number mismatch, 0x%lx vs 0x%x for: ",
356 __FUNCTION__, segment, pci_domain_nr(dev));
357 acpi_ns_print_node_pathname(rootbus_handle, NULL);
358 printk("\n");
359 return 1;
360 }
361 } else {
362 printk(KERN_ERR "%s: Unable to get __SEG from: ",
363 __FUNCTION__);
364 acpi_ns_print_node_pathname(rootbus_handle, NULL);
365 printk("\n");
366 return 1;
367 }
368
369 /*
370 * We want to search all devices in this segment/domain
371 * of the ACPI namespace for the matching ACPI device,
372 * which holds the pcidev_info pointer in its vendor resource.
373 */
374 pcidev_match.bus = dev->bus->number;
375 pcidev_match.devfn = dev->devfn;
376 pcidev_match.handle = NULL;
377
378 acpi_walk_namespace(ACPI_TYPE_DEVICE, rootbus_handle, ACPI_UINT32_MAX,
379 find_matching_device, &pcidev_match, NULL);
380
381 if (!pcidev_match.handle) {
382 printk(KERN_ERR
383 "%s: Could not find matching ACPI device for %s.\n",
384 __FUNCTION__, pci_name(dev));
385 return 1;
169 } 386 }
387
388 if (sn_extract_device_info(pcidev_match.handle, pcidev_info, sn_irq_info))
389 return 1;
390
391 /* Build up the pcidev_info.pdi_slot_host_handle */
392 host_devfn = get_host_devfn(pcidev_match.handle, rootbus_handle);
393 (*pcidev_info)->pdi_slot_host_handle =
394 ((unsigned long) pci_domain_nr(dev) << 40) |
395 /* bus == 0 */
396 host_devfn;
397 return 0;
170} 398}
171 399
172/* 400/*
173 * sn_acpi_slot_fixup - Perform any SN specific slot fixup. 401 * sn_acpi_slot_fixup - Obtain the pcidev_info and sn_irq_info.
402 * Perform any SN specific slot fixup.
174 * At present there does not appear to be 403 * At present there does not appear to be
175 * any generic way to handle a ROM image 404 * any generic way to handle a ROM image
176 * that has been shadowed by the PROM, so 405 * that has been shadowed by the PROM, so
@@ -179,11 +408,18 @@ sn_acpi_bus_fixup(struct pci_bus *bus)
179 */ 408 */
180 409
181void 410void
182sn_acpi_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info) 411sn_acpi_slot_fixup(struct pci_dev *dev)
183{ 412{
184 void __iomem *addr; 413 void __iomem *addr;
414 struct pcidev_info *pcidev_info = NULL;
415 struct sn_irq_info *sn_irq_info = NULL;
185 size_t size; 416 size_t size;
186 417
418 if (sn_acpi_get_pcidev_info(dev, &pcidev_info, &sn_irq_info)) {
419 panic("%s: Failure obtaining pcidev_info for %s\n",
420 __FUNCTION__, pci_name(dev));
421 }
422
187 if (pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE]) { 423 if (pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE]) {
188 /* 424 /*
189 * A valid ROM image exists and has been shadowed by the 425 * A valid ROM image exists and has been shadowed by the
@@ -200,8 +436,11 @@ sn_acpi_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info)
200 (unsigned long) addr + size; 436 (unsigned long) addr + size;
201 dev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_BIOS_COPY; 437 dev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_BIOS_COPY;
202 } 438 }
439 sn_pci_fixup_slot(dev, pcidev_info, sn_irq_info);
203} 440}
204 441
442EXPORT_SYMBOL(sn_acpi_slot_fixup);
443
205static struct acpi_driver acpi_sn_hubdev_driver = { 444static struct acpi_driver acpi_sn_hubdev_driver = {
206 .name = "SGI HUBDEV Driver", 445 .name = "SGI HUBDEV Driver",
207 .ids = "SGIHUB,SGITIO", 446 .ids = "SGIHUB,SGITIO",
@@ -212,6 +451,33 @@ static struct acpi_driver acpi_sn_hubdev_driver = {
212 451
213 452
214/* 453/*
454 * sn_acpi_bus_fixup - Perform SN specific setup of software structs
455 * (pcibus_bussoft, pcidev_info) and hardware
456 * registers, for the specified bus and devices under it.
457 */
458void
459sn_acpi_bus_fixup(struct pci_bus *bus)
460{
461 struct pci_dev *pci_dev = NULL;
462 struct pcibus_bussoft *prom_bussoft_ptr;
463
464 if (!bus->parent) { /* If root bus */
465 prom_bussoft_ptr = sn_get_bussoft_ptr(bus);
466 if (prom_bussoft_ptr == NULL) {
467 printk(KERN_ERR
468 "%s: 0x%04x:0x%02x Unable to "
469 "obtain prom_bussoft_ptr\n",
470 __FUNCTION__, pci_domain_nr(bus), bus->number);
471 return;
472 }
473 sn_common_bus_fixup(bus, prom_bussoft_ptr);
474 }
475 list_for_each_entry(pci_dev, &bus->devices, bus_list) {
476 sn_acpi_slot_fixup(pci_dev);
477 }
478}
479
480/*
215 * sn_io_acpi_init - PROM has ACPI support for IO, defining at a minimum the 481 * sn_io_acpi_init - PROM has ACPI support for IO, defining at a minimum the
216 * nodes and root buses in the DSDT. As a result, bus scanning 482 * nodes and root buses in the DSDT. As a result, bus scanning
217 * will be initiated by the Linux ACPI code. 483 * will be initiated by the Linux ACPI code.
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c
index d4dd8f4b6b8d..d48bcd83253c 100644
--- a/arch/ia64/sn/kernel/io_common.c
+++ b/arch/ia64/sn/kernel/io_common.c
@@ -26,14 +26,10 @@
26#include <linux/acpi.h> 26#include <linux/acpi.h>
27#include <asm/sn/sn2/sn_hwperf.h> 27#include <asm/sn/sn2/sn_hwperf.h>
28#include <asm/sn/acpi.h> 28#include <asm/sn/acpi.h>
29#include "acpi/acglobal.h"
29 30
30extern void sn_init_cpei_timer(void); 31extern void sn_init_cpei_timer(void);
31extern void register_sn_procfs(void); 32extern void register_sn_procfs(void);
32extern void sn_acpi_bus_fixup(struct pci_bus *);
33extern void sn_bus_fixup(struct pci_bus *);
34extern void sn_acpi_slot_fixup(struct pci_dev *, struct pcidev_info *);
35extern void sn_more_slot_fixup(struct pci_dev *, struct pcidev_info *);
36extern void sn_legacy_pci_window_fixup(struct pci_controller *, u64, u64);
37extern void sn_io_acpi_init(void); 33extern void sn_io_acpi_init(void);
38extern void sn_io_init(void); 34extern void sn_io_init(void);
39 35
@@ -48,6 +44,9 @@ struct sysdata_el {
48 44
49int sn_ioif_inited; /* SN I/O infrastructure initialized? */ 45int sn_ioif_inited; /* SN I/O infrastructure initialized? */
50 46
47int sn_acpi_rev; /* SN ACPI revision */
48EXPORT_SYMBOL_GPL(sn_acpi_rev);
49
51struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */ 50struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */
52 51
53/* 52/*
@@ -99,25 +98,6 @@ sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
99} 98}
100 99
101/* 100/*
102 * Retrieve the pci device information given the bus and device|function number.
103 */
104static inline u64
105sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
106 u64 sn_irq_info)
107{
108 struct ia64_sal_retval ret_stuff;
109 ret_stuff.status = 0;
110 ret_stuff.v0 = 0;
111
112 SAL_CALL_NOLOCK(ret_stuff,
113 (u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
114 (u64) segment, (u64) bus_number, (u64) devfn,
115 (u64) pci_dev,
116 sn_irq_info, 0, 0);
117 return ret_stuff.v0;
118}
119
120/*
121 * sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified 101 * sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified
122 * device. 102 * device.
123 */ 103 */
@@ -249,50 +229,25 @@ void sn_pci_unfixup_slot(struct pci_dev *dev)
249} 229}
250 230
251/* 231/*
252 * sn_pci_fixup_slot() - This routine sets up a slot's resources consistent 232 * sn_pci_fixup_slot()
253 * with the Linux PCI abstraction layer. Resources
254 * acquired from our PCI provider include PIO maps
255 * to BAR space and interrupt objects.
256 */ 233 */
257void sn_pci_fixup_slot(struct pci_dev *dev) 234void sn_pci_fixup_slot(struct pci_dev *dev, struct pcidev_info *pcidev_info,
235 struct sn_irq_info *sn_irq_info)
258{ 236{
259 int segment = pci_domain_nr(dev->bus); 237 int segment = pci_domain_nr(dev->bus);
260 int status = 0;
261 struct pcibus_bussoft *bs; 238 struct pcibus_bussoft *bs;
262 struct pci_bus *host_pci_bus; 239 struct pci_bus *host_pci_bus;
263 struct pci_dev *host_pci_dev; 240 struct pci_dev *host_pci_dev;
264 struct pcidev_info *pcidev_info; 241 unsigned int bus_no, devfn;
265 struct sn_irq_info *sn_irq_info;
266 unsigned int bus_no, devfn;
267 242
268 pci_dev_get(dev); /* for the sysdata pointer */ 243 pci_dev_get(dev); /* for the sysdata pointer */
269 pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
270 if (!pcidev_info)
271 BUG(); /* Cannot afford to run out of memory */
272
273 sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
274 if (!sn_irq_info)
275 BUG(); /* Cannot afford to run out of memory */
276
277 /* Call to retrieve pci device information needed by kernel. */
278 status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number,
279 dev->devfn,
280 (u64) __pa(pcidev_info),
281 (u64) __pa(sn_irq_info));
282 if (status)
283 BUG(); /* Cannot get platform pci device information */
284 244
285 /* Add pcidev_info to list in pci_controller.platform_data */ 245 /* Add pcidev_info to list in pci_controller.platform_data */
286 list_add_tail(&pcidev_info->pdi_list, 246 list_add_tail(&pcidev_info->pdi_list,
287 &(SN_PLATFORM_DATA(dev->bus)->pcidev_info)); 247 &(SN_PLATFORM_DATA(dev->bus)->pcidev_info));
288
289 if (SN_ACPI_BASE_SUPPORT())
290 sn_acpi_slot_fixup(dev, pcidev_info);
291 else
292 sn_more_slot_fixup(dev, pcidev_info);
293 /* 248 /*
294 * Using the PROMs values for the PCI host bus, get the Linux 249 * Using the PROMs values for the PCI host bus, get the Linux
295 * PCI host_pci_dev struct and set up host bus linkages 250 * PCI host_pci_dev struct and set up host bus linkages
296 */ 251 */
297 252
298 bus_no = (pcidev_info->pdi_slot_host_handle >> 32) & 0xff; 253 bus_no = (pcidev_info->pdi_slot_host_handle >> 32) & 0xff;
@@ -489,11 +444,6 @@ void sn_generate_path(struct pci_bus *pci_bus, char *address)
489 sprintf(address, "%s^%d", address, geo_slot(geoid)); 444 sprintf(address, "%s^%d", address, geo_slot(geoid));
490} 445}
491 446
492/*
493 * sn_pci_fixup_bus() - Perform SN specific setup of software structs
494 * (pcibus_bussoft, pcidev_info) and hardware
495 * registers, for the specified bus and devices under it.
496 */
497void __devinit 447void __devinit
498sn_pci_fixup_bus(struct pci_bus *bus) 448sn_pci_fixup_bus(struct pci_bus *bus)
499{ 449{
@@ -519,6 +469,15 @@ sn_io_early_init(void)
519 if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM()) 469 if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
520 return 0; 470 return 0;
521 471
472 /* we set the acpi revision to that of the DSDT table OEM rev. */
473 {
474 struct acpi_table_header *header = NULL;
475
476 acpi_get_table_by_index(ACPI_TABLE_INDEX_DSDT, &header);
477 BUG_ON(header == NULL);
478 sn_acpi_rev = header->oem_revision;
479 }
480
522 /* 481 /*
523 * prime sn_pci_provider[]. Individial provider init routines will 482 * prime sn_pci_provider[]. Individial provider init routines will
524 * override their respective default entries. 483 * override their respective default entries.
@@ -544,8 +503,12 @@ sn_io_early_init(void)
544 register_sn_procfs(); 503 register_sn_procfs();
545#endif 504#endif
546 505
547 printk(KERN_INFO "ACPI DSDT OEM Rev 0x%x\n", 506 {
548 acpi_gbl_DSDT->oem_revision); 507 struct acpi_table_header *header;
508 (void)acpi_get_table_by_index(ACPI_TABLE_INDEX_DSDT, &header);
509 printk(KERN_INFO "ACPI DSDT OEM Rev 0x%x\n",
510 header->oem_revision);
511 }
549 if (SN_ACPI_BASE_SUPPORT()) 512 if (SN_ACPI_BASE_SUPPORT())
550 sn_io_acpi_init(); 513 sn_io_acpi_init();
551 else 514 else
@@ -605,7 +568,6 @@ sn_io_late_init(void)
605 568
606fs_initcall(sn_io_late_init); 569fs_initcall(sn_io_late_init);
607 570
608EXPORT_SYMBOL(sn_pci_fixup_slot);
609EXPORT_SYMBOL(sn_pci_unfixup_slot); 571EXPORT_SYMBOL(sn_pci_unfixup_slot);
610EXPORT_SYMBOL(sn_bus_store_sysdata); 572EXPORT_SYMBOL(sn_bus_store_sysdata);
611EXPORT_SYMBOL(sn_bus_free_sysdata); 573EXPORT_SYMBOL(sn_bus_free_sysdata);
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index 9ad843e0383b..600be3ebae05 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -56,6 +56,25 @@ static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
56 return ret_stuff.v0; 56 return ret_stuff.v0;
57} 57}
58 58
59/*
60 * Retrieve the pci device information given the bus and device|function number.
61 */
62static inline u64
63sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
64 u64 sn_irq_info)
65{
66 struct ia64_sal_retval ret_stuff;
67 ret_stuff.status = 0;
68 ret_stuff.v0 = 0;
69
70 SAL_CALL_NOLOCK(ret_stuff,
71 (u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
72 (u64) segment, (u64) bus_number, (u64) devfn,
73 (u64) pci_dev,
74 sn_irq_info, 0, 0);
75 return ret_stuff.v0;
76}
77
59 78
60/* 79/*
61 * sn_fixup_ionodes() - This routine initializes the HUB data structure for 80 * sn_fixup_ionodes() - This routine initializes the HUB data structure for
@@ -172,18 +191,40 @@ sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
172} 191}
173 192
174/* 193/*
175 * sn_more_slot_fixup() - We are not running with an ACPI capable PROM, 194 * sn_io_slot_fixup() - We are not running with an ACPI capable PROM,
176 * and need to convert the pci_dev->resource 195 * and need to convert the pci_dev->resource
177 * 'start' and 'end' addresses to mapped addresses, 196 * 'start' and 'end' addresses to mapped addresses,
178 * and setup the pci_controller->window array entries. 197 * and setup the pci_controller->window array entries.
179 */ 198 */
180void 199void
181sn_more_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info) 200sn_io_slot_fixup(struct pci_dev *dev)
182{ 201{
183 unsigned int count = 0; 202 unsigned int count = 0;
184 int idx; 203 int idx;
185 s64 pci_addrs[PCI_ROM_RESOURCE + 1]; 204 s64 pci_addrs[PCI_ROM_RESOURCE + 1];
186 unsigned long addr, end, size, start; 205 unsigned long addr, end, size, start;
206 struct pcidev_info *pcidev_info;
207 struct sn_irq_info *sn_irq_info;
208 int status;
209
210 pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
211 if (!pcidev_info)
212 panic("%s: Unable to alloc memory for pcidev_info", __FUNCTION__);
213
214 sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
215 if (!sn_irq_info)
216 panic("%s: Unable to alloc memory for sn_irq_info", __FUNCTION__);
217
218 /* Call to retrieve pci device information needed by kernel. */
219 status = sal_get_pcidev_info((u64) pci_domain_nr(dev),
220 (u64) dev->bus->number,
221 dev->devfn,
222 (u64) __pa(pcidev_info),
223 (u64) __pa(sn_irq_info));
224
225 if (status)
226 BUG(); /* Cannot get platform pci device information */
227
187 228
188 /* Copy over PIO Mapped Addresses */ 229 /* Copy over PIO Mapped Addresses */
189 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) { 230 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
@@ -219,8 +260,12 @@ sn_more_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info)
219 */ 260 */
220 if (count > 0) 261 if (count > 0)
221 sn_pci_window_fixup(dev, count, pci_addrs); 262 sn_pci_window_fixup(dev, count, pci_addrs);
263
264 sn_pci_fixup_slot(dev, pcidev_info, sn_irq_info);
222} 265}
223 266
267EXPORT_SYMBOL(sn_io_slot_fixup);
268
224/* 269/*
225 * sn_pci_controller_fixup() - This routine sets up a bus's resources 270 * sn_pci_controller_fixup() - This routine sets up a bus's resources
226 * consistent with the Linux PCI abstraction layer. 271 * consistent with the Linux PCI abstraction layer.
@@ -272,9 +317,6 @@ sn_bus_fixup(struct pci_bus *bus)
272{ 317{
273 struct pci_dev *pci_dev = NULL; 318 struct pci_dev *pci_dev = NULL;
274 struct pcibus_bussoft *prom_bussoft_ptr; 319 struct pcibus_bussoft *prom_bussoft_ptr;
275 extern void sn_common_bus_fixup(struct pci_bus *,
276 struct pcibus_bussoft *);
277
278 320
279 if (!bus->parent) { /* If root bus */ 321 if (!bus->parent) { /* If root bus */
280 prom_bussoft_ptr = PCI_CONTROLLER(bus)->platform_data; 322 prom_bussoft_ptr = PCI_CONTROLLER(bus)->platform_data;
@@ -291,7 +333,7 @@ sn_bus_fixup(struct pci_bus *bus)
291 prom_bussoft_ptr->bs_legacy_mem); 333 prom_bussoft_ptr->bs_legacy_mem);
292 } 334 }
293 list_for_each_entry(pci_dev, &bus->devices, bus_list) { 335 list_for_each_entry(pci_dev, &bus->devices, bus_list) {
294 sn_pci_fixup_slot(pci_dev); 336 sn_io_slot_fixup(pci_dev);
295 } 337 }
296 338
297} 339}
diff --git a/arch/ia64/sn/kernel/iomv.c b/arch/ia64/sn/kernel/iomv.c
index 4aa4f301d56d..ab7e2fd40798 100644
--- a/arch/ia64/sn/kernel/iomv.c
+++ b/arch/ia64/sn/kernel/iomv.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
@@ -26,9 +26,10 @@
26 * @port: port to convert 26 * @port: port to convert
27 * 27 *
28 * Legacy in/out instructions are converted to ld/st instructions 28 * Legacy in/out instructions are converted to ld/st instructions
29 * on IA64. This routine will convert a port number into a valid 29 * on IA64. This routine will convert a port number into a valid
30 * SN i/o address. Used by sn_in*() and sn_out*(). 30 * SN i/o address. Used by sn_in*() and sn_out*().
31 */ 31 */
32
32void *sn_io_addr(unsigned long port) 33void *sn_io_addr(unsigned long port)
33{ 34{
34 if (!IS_RUNNING_ON_SIMULATOR()) { 35 if (!IS_RUNNING_ON_SIMULATOR()) {
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c
index b3a435fd70fb..ea3dc38d73fd 100644
--- a/arch/ia64/sn/kernel/msi_sn.c
+++ b/arch/ia64/sn/kernel/msi_sn.c
@@ -59,13 +59,12 @@ void sn_teardown_msi_irq(unsigned int irq)
59 sn_intr_free(nasid, widget, sn_irq_info); 59 sn_intr_free(nasid, widget, sn_irq_info);
60 sn_msi_info[irq].sn_irq_info = NULL; 60 sn_msi_info[irq].sn_irq_info = NULL;
61 61
62 return; 62 destroy_irq(irq);
63} 63}
64 64
65int sn_setup_msi_irq(unsigned int irq, struct pci_dev *pdev) 65int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry)
66{ 66{
67 struct msi_msg msg; 67 struct msi_msg msg;
68 struct msi_desc *entry;
69 int widget; 68 int widget;
70 int status; 69 int status;
71 nasid_t nasid; 70 nasid_t nasid;
@@ -73,8 +72,8 @@ int sn_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
73 struct sn_irq_info *sn_irq_info; 72 struct sn_irq_info *sn_irq_info;
74 struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(pdev); 73 struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(pdev);
75 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 74 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
75 int irq;
76 76
77 entry = get_irq_data(irq);
78 if (!entry->msi_attrib.is_64) 77 if (!entry->msi_attrib.is_64)
79 return -EINVAL; 78 return -EINVAL;
80 79
@@ -84,6 +83,11 @@ int sn_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
84 if (provider == NULL || provider->dma_map_consistent == NULL) 83 if (provider == NULL || provider->dma_map_consistent == NULL)
85 return -EINVAL; 84 return -EINVAL;
86 85
86 irq = create_irq();
87 if (irq < 0)
88 return irq;
89
90 set_irq_msi(irq, entry);
87 /* 91 /*
88 * Set up the vector plumbing. Let the prom (via sn_intr_alloc) 92 * Set up the vector plumbing. Let the prom (via sn_intr_alloc)
89 * decide which cpu to direct this msi at by default. 93 * decide which cpu to direct this msi at by default.
@@ -95,12 +99,15 @@ int sn_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
95 SWIN_WIDGETNUM(bussoft->bs_base); 99 SWIN_WIDGETNUM(bussoft->bs_base);
96 100
97 sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL); 101 sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
98 if (! sn_irq_info) 102 if (! sn_irq_info) {
103 destroy_irq(irq);
99 return -ENOMEM; 104 return -ENOMEM;
105 }
100 106
101 status = sn_intr_alloc(nasid, widget, sn_irq_info, irq, -1, -1); 107 status = sn_intr_alloc(nasid, widget, sn_irq_info, irq, -1, -1);
102 if (status) { 108 if (status) {
103 kfree(sn_irq_info); 109 kfree(sn_irq_info);
110 destroy_irq(irq);
104 return -ENOMEM; 111 return -ENOMEM;
105 } 112 }
106 113
@@ -121,6 +128,7 @@ int sn_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
121 if (! bus_addr) { 128 if (! bus_addr) {
122 sn_intr_free(nasid, widget, sn_irq_info); 129 sn_intr_free(nasid, widget, sn_irq_info);
123 kfree(sn_irq_info); 130 kfree(sn_irq_info);
131 destroy_irq(irq);
124 return -ENOMEM; 132 return -ENOMEM;
125 } 133 }
126 134
@@ -139,7 +147,7 @@ int sn_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
139 write_msi_msg(irq, &msg); 147 write_msi_msg(irq, &msg);
140 set_irq_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq); 148 set_irq_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq);
141 149
142 return 0; 150 return irq;
143} 151}
144 152
145#ifdef CONFIG_SMP 153#ifdef CONFIG_SMP
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
index 6846dc9b432d..04a8256017eb 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
@@ -20,7 +20,8 @@
20#include "xtalk/hubdev.h" 20#include "xtalk/hubdev.h"
21 21
22int 22int
23sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp) 23sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp,
24 char **ssdt)
24{ 25{
25 struct ia64_sal_retval ret_stuff; 26 struct ia64_sal_retval ret_stuff;
26 u64 busnum; 27 u64 busnum;
@@ -32,7 +33,8 @@ sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp)
32 segment = soft->pbi_buscommon.bs_persist_segment; 33 segment = soft->pbi_buscommon.bs_persist_segment;
33 busnum = soft->pbi_buscommon.bs_persist_busnum; 34 busnum = soft->pbi_buscommon.bs_persist_busnum;
34 SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, segment, 35 SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, segment,
35 busnum, (u64) device, (u64) resp, 0, 0, 0); 36 busnum, (u64) device, (u64) resp, (u64)ia64_tpa(ssdt),
37 0, 0);
36 38
37 return (int)ret_stuff.v0; 39 return (int)ret_stuff.v0;
38} 40}
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index bbd386f572d9..44a0224c32dd 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -575,6 +575,7 @@ config SGI_IP27
575 select DMA_IP27 575 select DMA_IP27
576 select EARLY_PRINTK 576 select EARLY_PRINTK
577 select HW_HAS_PCI 577 select HW_HAS_PCI
578 select NR_CPUS_DEFAULT_64
578 select PCI_DOMAINS 579 select PCI_DOMAINS
579 select SYS_HAS_CPU_R10000 580 select SYS_HAS_CPU_R10000
580 select SYS_SUPPORTS_64BIT_KERNEL 581 select SYS_SUPPORTS_64BIT_KERNEL
@@ -612,6 +613,7 @@ config SIBYTE_BIGSUR
612 bool "Sibyte BCM91480B-BigSur" 613 bool "Sibyte BCM91480B-BigSur"
613 select BOOT_ELF32 614 select BOOT_ELF32
614 select DMA_COHERENT 615 select DMA_COHERENT
616 select NR_CPUS_DEFAULT_4
615 select PCI_DOMAINS 617 select PCI_DOMAINS
616 select SIBYTE_BCM1x80 618 select SIBYTE_BCM1x80
617 select SWAP_IO_SPACE 619 select SWAP_IO_SPACE
@@ -623,6 +625,7 @@ config SIBYTE_SWARM
623 bool "Sibyte BCM91250A-SWARM" 625 bool "Sibyte BCM91250A-SWARM"
624 select BOOT_ELF32 626 select BOOT_ELF32
625 select DMA_COHERENT 627 select DMA_COHERENT
628 select NR_CPUS_DEFAULT_2
626 select SIBYTE_SB1250 629 select SIBYTE_SB1250
627 select SWAP_IO_SPACE 630 select SWAP_IO_SPACE
628 select SYS_HAS_CPU_SB1 631 select SYS_HAS_CPU_SB1
@@ -635,6 +638,7 @@ config SIBYTE_SENTOSA
635 depends on EXPERIMENTAL 638 depends on EXPERIMENTAL
636 select BOOT_ELF32 639 select BOOT_ELF32
637 select DMA_COHERENT 640 select DMA_COHERENT
641 select NR_CPUS_DEFAULT_2
638 select SIBYTE_SB1250 642 select SIBYTE_SB1250
639 select SWAP_IO_SPACE 643 select SWAP_IO_SPACE
640 select SYS_HAS_CPU_SB1 644 select SYS_HAS_CPU_SB1
@@ -668,6 +672,7 @@ config SIBYTE_PTSWARM
668 depends on EXPERIMENTAL 672 depends on EXPERIMENTAL
669 select BOOT_ELF32 673 select BOOT_ELF32
670 select DMA_COHERENT 674 select DMA_COHERENT
675 select NR_CPUS_DEFAULT_2
671 select SIBYTE_SB1250 676 select SIBYTE_SB1250
672 select SWAP_IO_SPACE 677 select SWAP_IO_SPACE
673 select SYS_HAS_CPU_SB1 678 select SYS_HAS_CPU_SB1
@@ -680,6 +685,7 @@ config SIBYTE_LITTLESUR
680 depends on EXPERIMENTAL 685 depends on EXPERIMENTAL
681 select BOOT_ELF32 686 select BOOT_ELF32
682 select DMA_COHERENT 687 select DMA_COHERENT
688 select NR_CPUS_DEFAULT_2
683 select SIBYTE_SB1250 689 select SIBYTE_SB1250
684 select SWAP_IO_SPACE 690 select SWAP_IO_SPACE
685 select SYS_HAS_CPU_SB1 691 select SYS_HAS_CPU_SB1
@@ -790,23 +796,6 @@ config TOSHIBA_RBTX4938
790 796
791endchoice 797endchoice
792 798
793config KEXEC
794 bool "Kexec system call (EXPERIMENTAL)"
795 depends on EXPERIMENTAL
796 help
797 kexec is a system call that implements the ability to shutdown your
798 current kernel, and to start another kernel. It is like a reboot
799 but it is indepedent of the system firmware. And like a reboot
800 you can start any kernel with it, not just Linux.
801
802 The name comes from the similiarity to the exec system call.
803
804 It is an ongoing process to be certain the hardware in a machine
805 is properly shutdown, so do not be surprised if this code does not
806 initially work for you. It may help to enable device hotplugging
807 support. As of this writing the exact hardware interface is
808 strongly in flux, so no good recommendation can be made.
809
810source "arch/mips/ddb5xxx/Kconfig" 799source "arch/mips/ddb5xxx/Kconfig"
811source "arch/mips/gt64120/ev64120/Kconfig" 800source "arch/mips/gt64120/ev64120/Kconfig"
812source "arch/mips/jazz/Kconfig" 801source "arch/mips/jazz/Kconfig"
@@ -1541,6 +1530,8 @@ config MIPS_MT_SMTC
1541 select CPU_MIPSR2_IRQ_VI 1530 select CPU_MIPSR2_IRQ_VI
1542 select CPU_MIPSR2_SRS 1531 select CPU_MIPSR2_SRS
1543 select MIPS_MT 1532 select MIPS_MT
1533 select NR_CPUS_DEFAULT_2
1534 select NR_CPUS_DEFAULT_8
1544 select SMP 1535 select SMP
1545 select SYS_SUPPORTS_SMP 1536 select SYS_SUPPORTS_SMP
1546 help 1537 help
@@ -1756,13 +1747,34 @@ config SMP
1756config SYS_SUPPORTS_SMP 1747config SYS_SUPPORTS_SMP
1757 bool 1748 bool
1758 1749
1750config NR_CPUS_DEFAULT_2
1751 bool
1752
1753config NR_CPUS_DEFAULT_4
1754 bool
1755
1756config NR_CPUS_DEFAULT_8
1757 bool
1758
1759config NR_CPUS_DEFAULT_16
1760 bool
1761
1762config NR_CPUS_DEFAULT_32
1763 bool
1764
1765config NR_CPUS_DEFAULT_64
1766 bool
1767
1759config NR_CPUS 1768config NR_CPUS
1760 int "Maximum number of CPUs (2-64)" 1769 int "Maximum number of CPUs (2-64)"
1761 range 2 64 1770 range 2 64
1762 depends on SMP 1771 depends on SMP
1763 default "64" if SGI_IP27 1772 default "2" if NR_CPUS_DEFAULT_2
1764 default "2" 1773 default "4" if NR_CPUS_DEFAULT_4
1765 default "8" if MIPS_MT_SMTC 1774 default "8" if NR_CPUS_DEFAULT_8
1775 default "16" if NR_CPUS_DEFAULT_16
1776 default "32" if NR_CPUS_DEFAULT_32
1777 default "64" if NR_CPUS_DEFAULT_64
1766 help 1778 help
1767 This allows you to specify the maximum number of CPUs which this 1779 This allows you to specify the maximum number of CPUs which this
1768 kernel will support. The maximum supported value is 32 for 32-bit 1780 kernel will support. The maximum supported value is 32 for 32-bit
@@ -1859,6 +1871,40 @@ config MIPS_INSANE_LARGE
1859 This will result in additional memory usage, so it is not 1871 This will result in additional memory usage, so it is not
1860 recommended for normal users. 1872 recommended for normal users.
1861 1873
1874config KEXEC
1875 bool "Kexec system call (EXPERIMENTAL)"
1876 depends on EXPERIMENTAL
1877 help
1878 kexec is a system call that implements the ability to shutdown your
1879 current kernel, and to start another kernel. It is like a reboot
1880 but it is indepedent of the system firmware. And like a reboot
1881 you can start any kernel with it, not just Linux.
1882
1883 The name comes from the similiarity to the exec system call.
1884
1885 It is an ongoing process to be certain the hardware in a machine
1886 is properly shutdown, so do not be surprised if this code does not
1887 initially work for you. It may help to enable device hotplugging
1888 support. As of this writing the exact hardware interface is
1889 strongly in flux, so no good recommendation can be made.
1890
1891config SECCOMP
1892 bool "Enable seccomp to safely compute untrusted bytecode"
1893 depends on PROC_FS && BROKEN
1894 default y
1895 help
1896 This kernel feature is useful for number crunching applications
1897 that may need to compute untrusted bytecode during their
1898 execution. By using pipes or other transports made available to
1899 the process as file descriptors supporting the read/write
1900 syscalls, it's possible to isolate those applications in
1901 their own address space using seccomp. Once seccomp is
1902 enabled via /proc/<pid>/seccomp, it cannot be disabled
1903 and the task is only allowed to execute a few safe syscalls
1904 defined by each seccomp mode.
1905
1906 If unsure, say Y. Only embedded should say N here.
1907
1862endmenu 1908endmenu
1863 1909
1864config RWSEM_GENERIC_SPINLOCK 1910config RWSEM_GENERIC_SPINLOCK
@@ -2025,23 +2071,6 @@ config BINFMT_ELF32
2025 bool 2071 bool
2026 default y if MIPS32_O32 || MIPS32_N32 2072 default y if MIPS32_O32 || MIPS32_N32
2027 2073
2028config SECCOMP
2029 bool "Enable seccomp to safely compute untrusted bytecode"
2030 depends on PROC_FS && BROKEN
2031 default y
2032 help
2033 This kernel feature is useful for number crunching applications
2034 that may need to compute untrusted bytecode during their
2035 execution. By using pipes or other transports made available to
2036 the process as file descriptors supporting the read/write
2037 syscalls, it's possible to isolate those applications in
2038 their own address space using seccomp. Once seccomp is
2039 enabled via /proc/<pid>/seccomp, it cannot be disabled
2040 and the task is only allowed to execute a few safe syscalls
2041 defined by each seccomp mode.
2042
2043 If unsure, say Y. Only embedded should say N here.
2044
2045config PM 2074config PM
2046 bool "Power Management support (EXPERIMENTAL)" 2075 bool "Power Management support (EXPERIMENTAL)"
2047 depends on EXPERIMENTAL && SOC_AU1X00 2076 depends on EXPERIMENTAL && SOC_AU1X00
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index 5d6afb52d904..9351f1c04a9d 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -22,10 +22,10 @@ config CMDLINE
22 string "Default kernel command string" 22 string "Default kernel command string"
23 default "" 23 default ""
24 help 24 help
25 On some platforms, there is currently no way for the boot loader to 25 On some platforms, there is currently no way for the boot loader to
26 pass arguments to the kernel. For these platforms, you can supply 26 pass arguments to the kernel. For these platforms, you can supply
27 some command-line options at build time by entering them here. In 27 some command-line options at build time by entering them here. In
28 other cases you can specify kernel args so that you don't have 28 other cases you can specify kernel args so that you don't have
29 to set them up in board prom initialization routines. 29 to set them up in board prom initialization routines.
30 30
31config DEBUG_STACK_USAGE 31config DEBUG_STACK_USAGE
diff --git a/arch/mips/arc/identify.c b/arch/mips/arc/identify.c
index 3ba7c47f9f23..4b907369b0f9 100644
--- a/arch/mips/arc/identify.c
+++ b/arch/mips/arc/identify.c
@@ -77,7 +77,7 @@ static struct smatch * __init string_to_mach(const char *s)
77{ 77{
78 int i; 78 int i;
79 79
80 for (i = 0; i < (sizeof(mach_table) / sizeof (mach_table[0])); i++) { 80 for (i = 0; i < ARRAY_SIZE(mach_table); i++) {
81 if (!strcmp(s, mach_table[i].arcname)) 81 if (!strcmp(s, mach_table[i].arcname))
82 return &mach_table[i]; 82 return &mach_table[i];
83 } 83 }
diff --git a/arch/mips/arc/memory.c b/arch/mips/arc/memory.c
index 8a9ef58cc399..456cb81a32d9 100644
--- a/arch/mips/arc/memory.c
+++ b/arch/mips/arc/memory.c
@@ -141,30 +141,20 @@ void __init prom_meminit(void)
141 } 141 }
142} 142}
143 143
144unsigned long __init prom_free_prom_memory(void) 144void __init prom_free_prom_memory(void)
145{ 145{
146 unsigned long freed = 0;
147 unsigned long addr; 146 unsigned long addr;
148 int i; 147 int i;
149 148
150 if (prom_flags & PROM_FLAG_DONT_FREE_TEMP) 149 if (prom_flags & PROM_FLAG_DONT_FREE_TEMP)
151 return 0; 150 return;
152 151
153 for (i = 0; i < boot_mem_map.nr_map; i++) { 152 for (i = 0; i < boot_mem_map.nr_map; i++) {
154 if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA) 153 if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA)
155 continue; 154 continue;
156 155
157 addr = boot_mem_map.map[i].addr; 156 addr = boot_mem_map.map[i].addr;
158 while (addr < boot_mem_map.map[i].addr 157 free_init_pages("prom memory",
159 + boot_mem_map.map[i].size) { 158 addr, addr + boot_mem_map.map[i].size);
160 ClearPageReserved(virt_to_page(__va(addr)));
161 init_page_count(virt_to_page(__va(addr)));
162 free_page((unsigned long)__va(addr));
163 addr += PAGE_SIZE;
164 freed += PAGE_SIZE;
165 }
166 } 159 }
167 printk(KERN_INFO "Freeing prom memory: %ldkb freed\n", freed >> 10);
168
169 return freed;
170} 160}
diff --git a/arch/mips/au1000/common/irq.c b/arch/mips/au1000/common/irq.c
index 9cf7b6715836..ea6e99fbe2f7 100644
--- a/arch/mips/au1000/common/irq.c
+++ b/arch/mips/au1000/common/irq.c
@@ -233,7 +233,7 @@ void restore_local_and_enable(int controller, unsigned long mask)
233 233
234 234
235static struct irq_chip rise_edge_irq_type = { 235static struct irq_chip rise_edge_irq_type = {
236 .typename = "Au1000 Rise Edge", 236 .name = "Au1000 Rise Edge",
237 .ack = mask_and_ack_rise_edge_irq, 237 .ack = mask_and_ack_rise_edge_irq,
238 .mask = local_disable_irq, 238 .mask = local_disable_irq,
239 .mask_ack = mask_and_ack_rise_edge_irq, 239 .mask_ack = mask_and_ack_rise_edge_irq,
@@ -242,7 +242,7 @@ static struct irq_chip rise_edge_irq_type = {
242}; 242};
243 243
244static struct irq_chip fall_edge_irq_type = { 244static struct irq_chip fall_edge_irq_type = {
245 .typename = "Au1000 Fall Edge", 245 .name = "Au1000 Fall Edge",
246 .ack = mask_and_ack_fall_edge_irq, 246 .ack = mask_and_ack_fall_edge_irq,
247 .mask = local_disable_irq, 247 .mask = local_disable_irq,
248 .mask_ack = mask_and_ack_fall_edge_irq, 248 .mask_ack = mask_and_ack_fall_edge_irq,
@@ -251,7 +251,7 @@ static struct irq_chip fall_edge_irq_type = {
251}; 251};
252 252
253static struct irq_chip either_edge_irq_type = { 253static struct irq_chip either_edge_irq_type = {
254 .typename = "Au1000 Rise or Fall Edge", 254 .name = "Au1000 Rise or Fall Edge",
255 .ack = mask_and_ack_either_edge_irq, 255 .ack = mask_and_ack_either_edge_irq,
256 .mask = local_disable_irq, 256 .mask = local_disable_irq,
257 .mask_ack = mask_and_ack_either_edge_irq, 257 .mask_ack = mask_and_ack_either_edge_irq,
@@ -260,7 +260,7 @@ static struct irq_chip either_edge_irq_type = {
260}; 260};
261 261
262static struct irq_chip level_irq_type = { 262static struct irq_chip level_irq_type = {
263 .typename = "Au1000 Level", 263 .name = "Au1000 Level",
264 .ack = mask_and_ack_level_irq, 264 .ack = mask_and_ack_level_irq,
265 .mask = local_disable_irq, 265 .mask = local_disable_irq,
266 .mask_ack = mask_and_ack_level_irq, 266 .mask_ack = mask_and_ack_level_irq,
diff --git a/arch/mips/au1000/common/pci.c b/arch/mips/au1000/common/pci.c
index 9f8ce08e173b..6c25e6c09f78 100644
--- a/arch/mips/au1000/common/pci.c
+++ b/arch/mips/au1000/common/pci.c
@@ -76,13 +76,17 @@ static int __init au1x_pci_setup(void)
76 } 76 }
77 77
78#ifdef CONFIG_DMA_NONCOHERENT 78#ifdef CONFIG_DMA_NONCOHERENT
79 /* 79 {
80 * Set the NC bit in controller for Au1500 pre-AC silicon 80 /*
81 */ 81 * Set the NC bit in controller for Au1500 pre-AC silicon
82 u32 prid = read_c0_prid(); 82 */
83 if ( (prid & 0xFF000000) == 0x01000000 && prid < 0x01030202) { 83 u32 prid = read_c0_prid();
84 au_writel( 1<<16 | au_readl(Au1500_PCI_CFG), Au1500_PCI_CFG); 84
85 printk("Non-coherent PCI accesses enabled\n"); 85 if ((prid & 0xFF000000) == 0x01000000 && prid < 0x01030202) {
86 au_writel((1 << 16) | au_readl(Au1500_PCI_CFG),
87 Au1500_PCI_CFG);
88 printk("Non-coherent PCI accesses enabled\n");
89 }
86 } 90 }
87#endif 91#endif
88 92
diff --git a/arch/mips/au1000/common/prom.c b/arch/mips/au1000/common/prom.c
index 6fce60af005d..a8637cdb5b4b 100644
--- a/arch/mips/au1000/common/prom.c
+++ b/arch/mips/au1000/common/prom.c
@@ -149,9 +149,8 @@ int get_ethernet_addr(char *ethernet_addr)
149 return 0; 149 return 0;
150} 150}
151 151
152unsigned long __init prom_free_prom_memory(void) 152void __init prom_free_prom_memory(void)
153{ 153{
154 return 0;
155} 154}
156 155
157EXPORT_SYMBOL(prom_getcmdline); 156EXPORT_SYMBOL(prom_getcmdline);
diff --git a/arch/mips/au1000/common/setup.c b/arch/mips/au1000/common/setup.c
index 919172db560c..13fe187f35d6 100644
--- a/arch/mips/au1000/common/setup.c
+++ b/arch/mips/au1000/common/setup.c
@@ -141,17 +141,20 @@ void __init plat_mem_setup(void)
141/* This routine should be valid for all Au1x based boards */ 141/* This routine should be valid for all Au1x based boards */
142phys_t __fixup_bigphys_addr(phys_t phys_addr, phys_t size) 142phys_t __fixup_bigphys_addr(phys_t phys_addr, phys_t size)
143{ 143{
144 u32 start, end;
145
146 /* Don't fixup 36 bit addresses */ 144 /* Don't fixup 36 bit addresses */
147 if ((phys_addr >> 32) != 0) return phys_addr; 145 if ((phys_addr >> 32) != 0)
146 return phys_addr;
148 147
149#ifdef CONFIG_PCI 148#ifdef CONFIG_PCI
150 start = (u32)Au1500_PCI_MEM_START; 149 {
151 end = (u32)Au1500_PCI_MEM_END; 150 u32 start, end;
152 /* check for pci memory window */ 151
153 if ((phys_addr >= start) && ((phys_addr + size) < end)) { 152 start = (u32)Au1500_PCI_MEM_START;
154 return (phys_t)((phys_addr - start) + Au1500_PCI_MEM_START); 153 end = (u32)Au1500_PCI_MEM_END;
154 /* check for pci memory window */
155 if ((phys_addr >= start) && ((phys_addr + size) < end))
156 return (phys_t)
157 ((phys_addr - start) + Au1500_PCI_MEM_START);
155 } 158 }
156#endif 159#endif
157 160
diff --git a/arch/mips/au1000/pb1100/board_setup.c b/arch/mips/au1000/pb1100/board_setup.c
index 2d1533f116c0..6bc1f8e1b608 100644
--- a/arch/mips/au1000/pb1100/board_setup.c
+++ b/arch/mips/au1000/pb1100/board_setup.c
@@ -47,8 +47,7 @@ void board_reset (void)
47 47
48void __init board_setup(void) 48void __init board_setup(void)
49{ 49{
50 u32 pin_func; 50 volatile void __iomem * base = (volatile void __iomem *) 0xac000000UL;
51 u32 sys_freqctrl, sys_clksrc;
52 51
53 // set AUX clock to 12MHz * 8 = 96 MHz 52 // set AUX clock to 12MHz * 8 = 96 MHz
54 au_writel(8, SYS_AUXPLL); 53 au_writel(8, SYS_AUXPLL);
@@ -56,58 +55,62 @@ void __init board_setup(void)
56 udelay(100); 55 udelay(100);
57 56
58#ifdef CONFIG_USB_OHCI 57#ifdef CONFIG_USB_OHCI
59 // configure pins GPIO[14:9] as GPIO 58 {
60 pin_func = au_readl(SYS_PINFUNC) & (u32)(~0x80); 59 u32 pin_func, sys_freqctrl, sys_clksrc;
61 60
62 /* zero and disable FREQ2 */ 61 // configure pins GPIO[14:9] as GPIO
63 sys_freqctrl = au_readl(SYS_FREQCTRL0); 62 pin_func = au_readl(SYS_PINFUNC) & (u32)(~0x80);
64 sys_freqctrl &= ~0xFFF00000; 63
65 au_writel(sys_freqctrl, SYS_FREQCTRL0); 64 /* zero and disable FREQ2 */
66 65 sys_freqctrl = au_readl(SYS_FREQCTRL0);
67 /* zero and disable USBH/USBD/IrDA clock */ 66 sys_freqctrl &= ~0xFFF00000;
68 sys_clksrc = au_readl(SYS_CLKSRC); 67 au_writel(sys_freqctrl, SYS_FREQCTRL0);
69 sys_clksrc &= ~0x0000001F; 68
70 au_writel(sys_clksrc, SYS_CLKSRC); 69 /* zero and disable USBH/USBD/IrDA clock */
71 70 sys_clksrc = au_readl(SYS_CLKSRC);
72 sys_freqctrl = au_readl(SYS_FREQCTRL0); 71 sys_clksrc &= ~0x0000001F;
73 sys_freqctrl &= ~0xFFF00000; 72 au_writel(sys_clksrc, SYS_CLKSRC);
74 73
75 sys_clksrc = au_readl(SYS_CLKSRC); 74 sys_freqctrl = au_readl(SYS_FREQCTRL0);
76 sys_clksrc &= ~0x0000001F; 75 sys_freqctrl &= ~0xFFF00000;
77 76
78 // FREQ2 = aux/2 = 48 MHz 77 sys_clksrc = au_readl(SYS_CLKSRC);
79 sys_freqctrl |= ((0<<22) | (1<<21) | (1<<20)); 78 sys_clksrc &= ~0x0000001F;
80 au_writel(sys_freqctrl, SYS_FREQCTRL0); 79
81 80 // FREQ2 = aux/2 = 48 MHz
82 /* 81 sys_freqctrl |= ((0<<22) | (1<<21) | (1<<20));
83 * Route 48MHz FREQ2 into USBH/USBD/IrDA 82 au_writel(sys_freqctrl, SYS_FREQCTRL0);
84 */ 83
85 sys_clksrc |= ((4<<2) | (0<<1) | 0 ); 84 /*
86 au_writel(sys_clksrc, SYS_CLKSRC); 85 * Route 48MHz FREQ2 into USBH/USBD/IrDA
87 86 */
88 /* setup the static bus controller */ 87 sys_clksrc |= ((4<<2) | (0<<1) | 0 );
89 au_writel(0x00000002, MEM_STCFG3); /* type = PCMCIA */ 88 au_writel(sys_clksrc, SYS_CLKSRC);
90 au_writel(0x280E3D07, MEM_STTIME3); /* 250ns cycle time */ 89
91 au_writel(0x10000000, MEM_STADDR3); /* any PCMCIA select */ 90 /* setup the static bus controller */
92 91 au_writel(0x00000002, MEM_STCFG3); /* type = PCMCIA */
93 // get USB Functionality pin state (device vs host drive pins) 92 au_writel(0x280E3D07, MEM_STTIME3); /* 250ns cycle time */
94 pin_func = au_readl(SYS_PINFUNC) & (u32)(~0x8000); 93 au_writel(0x10000000, MEM_STADDR3); /* any PCMCIA select */
95 // 2nd USB port is USB host 94
96 pin_func |= 0x8000; 95 // get USB Functionality pin state (device vs host drive pins)
97 au_writel(pin_func, SYS_PINFUNC); 96 pin_func = au_readl(SYS_PINFUNC) & (u32)(~0x8000);
97 // 2nd USB port is USB host
98 pin_func |= 0x8000;
99 au_writel(pin_func, SYS_PINFUNC);
100 }
98#endif // defined (CONFIG_USB_OHCI) 101#endif // defined (CONFIG_USB_OHCI)
99 102
100 /* Enable sys bus clock divider when IDLE state or no bus activity. */ 103 /* Enable sys bus clock divider when IDLE state or no bus activity. */
101 au_writel(au_readl(SYS_POWERCTRL) | (0x3 << 5), SYS_POWERCTRL); 104 au_writel(au_readl(SYS_POWERCTRL) | (0x3 << 5), SYS_POWERCTRL);
102 105
103 // Enable the RTC if not already enabled 106 // Enable the RTC if not already enabled
104 if (!(readb(0xac000028) & 0x20)) { 107 if (!(readb(base + 0x28) & 0x20)) {
105 writeb(readb(0xac000028) | 0x20, 0xac000028); 108 writeb(readb(base + 0x28) | 0x20, base + 0x28);
106 au_sync(); 109 au_sync();
107 } 110 }
108 // Put the clock in BCD mode 111 // Put the clock in BCD mode
109 if (readb(0xac00002C) & 0x4) { /* reg B */ 112 if (readb(base + 0x2C) & 0x4) { /* reg B */
110 writeb(readb(0xac00002c) & ~0x4, 0xac00002c); 113 writeb(readb(base + 0x2c) & ~0x4, base + 0x2c);
111 au_sync(); 114 au_sync();
112 } 115 }
113} 116}
diff --git a/arch/mips/au1000/pb1200/irqmap.c b/arch/mips/au1000/pb1200/irqmap.c
index 91983ba407c4..b73b2d18bf56 100644
--- a/arch/mips/au1000/pb1200/irqmap.c
+++ b/arch/mips/au1000/pb1200/irqmap.c
@@ -137,33 +137,20 @@ static void pb1200_shutdown_irq( unsigned int irq_nr )
137 return; 137 return;
138} 138}
139 139
140static inline void pb1200_mask_and_ack_irq(unsigned int irq_nr)
141{
142 pb1200_disable_irq( irq_nr );
143}
144
145static void pb1200_end_irq(unsigned int irq_nr)
146{
147 if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS))) {
148 pb1200_enable_irq(irq_nr);
149 }
150}
151
152static struct irq_chip external_irq_type = 140static struct irq_chip external_irq_type =
153{ 141{
154#ifdef CONFIG_MIPS_PB1200 142#ifdef CONFIG_MIPS_PB1200
155 "Pb1200 Ext", 143 .name = "Pb1200 Ext",
156#endif 144#endif
157#ifdef CONFIG_MIPS_DB1200 145#ifdef CONFIG_MIPS_DB1200
158 "Db1200 Ext", 146 .name = "Db1200 Ext",
159#endif 147#endif
160 pb1200_startup_irq, 148 .startup = pb1200_startup_irq,
161 pb1200_shutdown_irq, 149 .shutdown = pb1200_shutdown_irq,
162 pb1200_enable_irq, 150 .ack = pb1200_disable_irq,
163 pb1200_disable_irq, 151 .mask = pb1200_disable_irq,
164 pb1200_mask_and_ack_irq, 152 .mask_ack = pb1200_disable_irq,
165 pb1200_end_irq, 153 .unmask = pb1200_enable_irq,
166 NULL
167}; 154};
168 155
169void _board_init_irq(void) 156void _board_init_irq(void)
@@ -172,7 +159,8 @@ void _board_init_irq(void)
172 159
173 for (irq_nr = PB1200_INT_BEGIN; irq_nr <= PB1200_INT_END; irq_nr++) 160 for (irq_nr = PB1200_INT_BEGIN; irq_nr <= PB1200_INT_END; irq_nr++)
174 { 161 {
175 irq_desc[irq_nr].chip = &external_irq_type; 162 set_irq_chip_and_handler(irq_nr, &external_irq_type,
163 handle_level_irq);
176 pb1200_disable_irq(irq_nr); 164 pb1200_disable_irq(irq_nr);
177 } 165 }
178 166
diff --git a/arch/mips/basler/excite/excite_irq.c b/arch/mips/basler/excite/excite_irq.c
index 2e2061a286c5..1ecab6350421 100644
--- a/arch/mips/basler/excite/excite_irq.c
+++ b/arch/mips/basler/excite/excite_irq.c
@@ -47,9 +47,9 @@ extern asmlinkage void excite_handle_int(void);
47 */ 47 */
48void __init arch_init_irq(void) 48void __init arch_init_irq(void)
49{ 49{
50 mips_cpu_irq_init(0); 50 mips_cpu_irq_init();
51 rm7k_cpu_irq_init(8); 51 rm7k_cpu_irq_init();
52 rm9k_cpu_irq_init(12); 52 rm9k_cpu_irq_init();
53 53
54#ifdef CONFIG_KGDB 54#ifdef CONFIG_KGDB
55 excite_kgdb_init(); 55 excite_kgdb_init();
diff --git a/arch/mips/cobalt/irq.c b/arch/mips/cobalt/irq.c
index 4c46f0e73783..fe93b846923b 100644
--- a/arch/mips/cobalt/irq.c
+++ b/arch/mips/cobalt/irq.c
@@ -104,7 +104,7 @@ void __init arch_init_irq(void)
104 GT_WRITE(GT_INTRMASK_OFS, 0); 104 GT_WRITE(GT_INTRMASK_OFS, 0);
105 105
106 init_i8259_irqs(); /* 0 ... 15 */ 106 init_i8259_irqs(); /* 0 ... 15 */
107 mips_cpu_irq_init(COBALT_CPU_IRQ); /* 16 ... 23 */ 107 mips_cpu_irq_init(); /* 16 ... 23 */
108 108
109 /* 109 /*
110 * Mask all cpu interrupts 110 * Mask all cpu interrupts
diff --git a/arch/mips/cobalt/setup.c b/arch/mips/cobalt/setup.c
index e8f0f20b852d..a4b69b543bd9 100644
--- a/arch/mips/cobalt/setup.c
+++ b/arch/mips/cobalt/setup.c
@@ -204,8 +204,7 @@ void __init prom_init(void)
204 add_memory_region(0x0, memsz, BOOT_MEM_RAM); 204 add_memory_region(0x0, memsz, BOOT_MEM_RAM);
205} 205}
206 206
207unsigned long __init prom_free_prom_memory(void) 207void __init prom_free_prom_memory(void)
208{ 208{
209 /* Nothing to do! */ 209 /* Nothing to do! */
210 return 0;
211} 210}
diff --git a/arch/mips/ddb5xxx/common/prom.c b/arch/mips/ddb5xxx/common/prom.c
index efef0f57ce1e..54a857b5e3ba 100644
--- a/arch/mips/ddb5xxx/common/prom.c
+++ b/arch/mips/ddb5xxx/common/prom.c
@@ -59,9 +59,8 @@ void __init prom_init(void)
59#endif 59#endif
60} 60}
61 61
62unsigned long __init prom_free_prom_memory(void) 62void __init prom_free_prom_memory(void)
63{ 63{
64 return 0;
65} 64}
66 65
67#if defined(CONFIG_DDB5477) 66#if defined(CONFIG_DDB5477)
diff --git a/arch/mips/ddb5xxx/ddb5477/irq.c b/arch/mips/ddb5xxx/ddb5477/irq.c
index a8bd2e66705c..2b23234a5b95 100644
--- a/arch/mips/ddb5xxx/ddb5477/irq.c
+++ b/arch/mips/ddb5xxx/ddb5477/irq.c
@@ -17,6 +17,7 @@
17#include <linux/ptrace.h> 17#include <linux/ptrace.h>
18 18
19#include <asm/i8259.h> 19#include <asm/i8259.h>
20#include <asm/irq_cpu.h>
20#include <asm/system.h> 21#include <asm/system.h>
21#include <asm/mipsregs.h> 22#include <asm/mipsregs.h>
22#include <asm/debug.h> 23#include <asm/debug.h>
@@ -73,7 +74,6 @@ set_pci_int_attr(u32 pci, u32 intn, u32 active, u32 trigger)
73} 74}
74 75
75extern void vrc5477_irq_init(u32 base); 76extern void vrc5477_irq_init(u32 base);
76extern void mips_cpu_irq_init(u32 base);
77static struct irqaction irq_cascade = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL }; 77static struct irqaction irq_cascade = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL };
78 78
79void __init arch_init_irq(void) 79void __init arch_init_irq(void)
@@ -125,7 +125,7 @@ void __init arch_init_irq(void)
125 125
126 /* init all controllers */ 126 /* init all controllers */
127 init_i8259_irqs(); 127 init_i8259_irqs();
128 mips_cpu_irq_init(CPU_IRQ_BASE); 128 mips_cpu_irq_init();
129 vrc5477_irq_init(VRC5477_IRQ_BASE); 129 vrc5477_irq_init(VRC5477_IRQ_BASE);
130 130
131 131
@@ -146,8 +146,7 @@ u8 i8259_interrupt_ack(void)
146 irq = *(volatile u8 *) KSEG1ADDR(DDB_PCI_IACK_BASE); 146 irq = *(volatile u8 *) KSEG1ADDR(DDB_PCI_IACK_BASE);
147 ddb_out32(DDB_PCIINIT10, reg); 147 ddb_out32(DDB_PCIINIT10, reg);
148 148
149 /* i8259.c set the base vector to be 0x0 */ 149 return irq;
150 return irq + I8259_IRQ_BASE;
151} 150}
152/* 151/*
153 * the first level int-handler will jump here if it is a vrc5477 irq 152 * the first level int-handler will jump here if it is a vrc5477 irq
@@ -177,7 +176,7 @@ static void vrc5477_irq_dispatch(void)
177 /* check for i8259 interrupts */ 176 /* check for i8259 interrupts */
178 if (intStatus & (1 << VRC5477_I8259_CASCADE)) { 177 if (intStatus & (1 << VRC5477_I8259_CASCADE)) {
179 int i8259_irq = i8259_interrupt_ack(); 178 int i8259_irq = i8259_interrupt_ack();
180 do_IRQ(I8259_IRQ_BASE + i8259_irq); 179 do_IRQ(i8259_irq);
181 return; 180 return;
182 } 181 }
183 } 182 }
diff --git a/arch/mips/ddb5xxx/ddb5477/irq_5477.c b/arch/mips/ddb5xxx/ddb5477/irq_5477.c
index 96249aa5df5d..98c3b15eb369 100644
--- a/arch/mips/ddb5xxx/ddb5477/irq_5477.c
+++ b/arch/mips/ddb5xxx/ddb5477/irq_5477.c
@@ -82,7 +82,7 @@ vrc5477_irq_end(unsigned int irq)
82} 82}
83 83
84struct irq_chip vrc5477_irq_controller = { 84struct irq_chip vrc5477_irq_controller = {
85 .typename = "vrc5477_irq", 85 .name = "vrc5477_irq",
86 .ack = vrc5477_irq_ack, 86 .ack = vrc5477_irq_ack,
87 .mask = vrc5477_irq_disable, 87 .mask = vrc5477_irq_disable,
88 .mask_ack = vrc5477_irq_ack, 88 .mask_ack = vrc5477_irq_ack,
diff --git a/arch/mips/dec/ioasic-irq.c b/arch/mips/dec/ioasic-irq.c
index 4c7cb4048d35..3acb133668dc 100644
--- a/arch/mips/dec/ioasic-irq.c
+++ b/arch/mips/dec/ioasic-irq.c
@@ -62,7 +62,7 @@ static inline void end_ioasic_irq(unsigned int irq)
62} 62}
63 63
64static struct irq_chip ioasic_irq_type = { 64static struct irq_chip ioasic_irq_type = {
65 .typename = "IO-ASIC", 65 .name = "IO-ASIC",
66 .ack = ack_ioasic_irq, 66 .ack = ack_ioasic_irq,
67 .mask = mask_ioasic_irq, 67 .mask = mask_ioasic_irq,
68 .mask_ack = ack_ioasic_irq, 68 .mask_ack = ack_ioasic_irq,
@@ -84,7 +84,7 @@ static inline void end_ioasic_dma_irq(unsigned int irq)
84} 84}
85 85
86static struct irq_chip ioasic_dma_irq_type = { 86static struct irq_chip ioasic_dma_irq_type = {
87 .typename = "IO-ASIC-DMA", 87 .name = "IO-ASIC-DMA",
88 .ack = ack_ioasic_dma_irq, 88 .ack = ack_ioasic_dma_irq,
89 .mask = mask_ioasic_dma_irq, 89 .mask = mask_ioasic_dma_irq,
90 .mask_ack = ack_ioasic_dma_irq, 90 .mask_ack = ack_ioasic_dma_irq,
diff --git a/arch/mips/dec/kn02-irq.c b/arch/mips/dec/kn02-irq.c
index 916e46b8ccd8..02439dc0ba83 100644
--- a/arch/mips/dec/kn02-irq.c
+++ b/arch/mips/dec/kn02-irq.c
@@ -58,7 +58,7 @@ static void ack_kn02_irq(unsigned int irq)
58} 58}
59 59
60static struct irq_chip kn02_irq_type = { 60static struct irq_chip kn02_irq_type = {
61 .typename = "KN02-CSR", 61 .name = "KN02-CSR",
62 .ack = ack_kn02_irq, 62 .ack = ack_kn02_irq,
63 .mask = mask_kn02_irq, 63 .mask = mask_kn02_irq,
64 .mask_ack = ack_kn02_irq, 64 .mask_ack = ack_kn02_irq,
diff --git a/arch/mips/dec/prom/memory.c b/arch/mips/dec/prom/memory.c
index 3aa01d268f2d..5a557e268f78 100644
--- a/arch/mips/dec/prom/memory.c
+++ b/arch/mips/dec/prom/memory.c
@@ -92,9 +92,9 @@ void __init prom_meminit(u32 magic)
92 rex_setup_memory_region(); 92 rex_setup_memory_region();
93} 93}
94 94
95unsigned long __init prom_free_prom_memory(void) 95void __init prom_free_prom_memory(void)
96{ 96{
97 unsigned long addr, end; 97 unsigned long end;
98 98
99 /* 99 /*
100 * Free everything below the kernel itself but leave 100 * Free everything below the kernel itself but leave
@@ -114,16 +114,5 @@ unsigned long __init prom_free_prom_memory(void)
114#endif 114#endif
115 end = __pa(&_text); 115 end = __pa(&_text);
116 116
117 addr = PAGE_SIZE; 117 free_init_pages("unused PROM memory", PAGE_SIZE, end);
118 while (addr < end) {
119 ClearPageReserved(virt_to_page(__va(addr)));
120 init_page_count(virt_to_page(__va(addr)));
121 free_page((unsigned long)__va(addr));
122 addr += PAGE_SIZE;
123 }
124
125 printk("Freeing unused PROM memory: %ldkb freed\n",
126 (end - PAGE_SIZE) >> 10);
127
128 return end - PAGE_SIZE;
129} 118}
diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c
index d34032ac492a..1058e2f409bb 100644
--- a/arch/mips/dec/setup.c
+++ b/arch/mips/dec/setup.c
@@ -234,7 +234,7 @@ static void __init dec_init_kn01(void)
234 memcpy(&cpu_mask_nr_tbl, &kn01_cpu_mask_nr_tbl, 234 memcpy(&cpu_mask_nr_tbl, &kn01_cpu_mask_nr_tbl,
235 sizeof(kn01_cpu_mask_nr_tbl)); 235 sizeof(kn01_cpu_mask_nr_tbl));
236 236
237 mips_cpu_irq_init(DEC_CPU_IRQ_BASE); 237 mips_cpu_irq_init();
238 238
239} /* dec_init_kn01 */ 239} /* dec_init_kn01 */
240 240
@@ -309,7 +309,7 @@ static void __init dec_init_kn230(void)
309 memcpy(&cpu_mask_nr_tbl, &kn230_cpu_mask_nr_tbl, 309 memcpy(&cpu_mask_nr_tbl, &kn230_cpu_mask_nr_tbl,
310 sizeof(kn230_cpu_mask_nr_tbl)); 310 sizeof(kn230_cpu_mask_nr_tbl));
311 311
312 mips_cpu_irq_init(DEC_CPU_IRQ_BASE); 312 mips_cpu_irq_init();
313 313
314} /* dec_init_kn230 */ 314} /* dec_init_kn230 */
315 315
@@ -403,7 +403,7 @@ static void __init dec_init_kn02(void)
403 memcpy(&asic_mask_nr_tbl, &kn02_asic_mask_nr_tbl, 403 memcpy(&asic_mask_nr_tbl, &kn02_asic_mask_nr_tbl,
404 sizeof(kn02_asic_mask_nr_tbl)); 404 sizeof(kn02_asic_mask_nr_tbl));
405 405
406 mips_cpu_irq_init(DEC_CPU_IRQ_BASE); 406 mips_cpu_irq_init();
407 init_kn02_irqs(KN02_IRQ_BASE); 407 init_kn02_irqs(KN02_IRQ_BASE);
408 408
409} /* dec_init_kn02 */ 409} /* dec_init_kn02 */
@@ -504,7 +504,7 @@ static void __init dec_init_kn02ba(void)
504 memcpy(&asic_mask_nr_tbl, &kn02ba_asic_mask_nr_tbl, 504 memcpy(&asic_mask_nr_tbl, &kn02ba_asic_mask_nr_tbl,
505 sizeof(kn02ba_asic_mask_nr_tbl)); 505 sizeof(kn02ba_asic_mask_nr_tbl));
506 506
507 mips_cpu_irq_init(DEC_CPU_IRQ_BASE); 507 mips_cpu_irq_init();
508 init_ioasic_irqs(IO_IRQ_BASE); 508 init_ioasic_irqs(IO_IRQ_BASE);
509 509
510} /* dec_init_kn02ba */ 510} /* dec_init_kn02ba */
@@ -601,7 +601,7 @@ static void __init dec_init_kn02ca(void)
601 memcpy(&asic_mask_nr_tbl, &kn02ca_asic_mask_nr_tbl, 601 memcpy(&asic_mask_nr_tbl, &kn02ca_asic_mask_nr_tbl,
602 sizeof(kn02ca_asic_mask_nr_tbl)); 602 sizeof(kn02ca_asic_mask_nr_tbl));
603 603
604 mips_cpu_irq_init(DEC_CPU_IRQ_BASE); 604 mips_cpu_irq_init();
605 init_ioasic_irqs(IO_IRQ_BASE); 605 init_ioasic_irqs(IO_IRQ_BASE);
606 606
607} /* dec_init_kn02ca */ 607} /* dec_init_kn02ca */
@@ -702,7 +702,7 @@ static void __init dec_init_kn03(void)
702 memcpy(&asic_mask_nr_tbl, &kn03_asic_mask_nr_tbl, 702 memcpy(&asic_mask_nr_tbl, &kn03_asic_mask_nr_tbl,
703 sizeof(kn03_asic_mask_nr_tbl)); 703 sizeof(kn03_asic_mask_nr_tbl));
704 704
705 mips_cpu_irq_init(DEC_CPU_IRQ_BASE); 705 mips_cpu_irq_init();
706 init_ioasic_irqs(IO_IRQ_BASE); 706 init_ioasic_irqs(IO_IRQ_BASE);
707 707
708} /* dec_init_kn03 */ 708} /* dec_init_kn03 */
diff --git a/arch/mips/emma2rh/common/irq_emma2rh.c b/arch/mips/emma2rh/common/irq_emma2rh.c
index 8d880f0b06ec..96df37b77759 100644
--- a/arch/mips/emma2rh/common/irq_emma2rh.c
+++ b/arch/mips/emma2rh/common/irq_emma2rh.c
@@ -57,7 +57,7 @@ static void emma2rh_irq_disable(unsigned int irq)
57} 57}
58 58
59struct irq_chip emma2rh_irq_controller = { 59struct irq_chip emma2rh_irq_controller = {
60 .typename = "emma2rh_irq", 60 .name = "emma2rh_irq",
61 .ack = emma2rh_irq_disable, 61 .ack = emma2rh_irq_disable,
62 .mask = emma2rh_irq_disable, 62 .mask = emma2rh_irq_disable,
63 .mask_ack = emma2rh_irq_disable, 63 .mask_ack = emma2rh_irq_disable,
diff --git a/arch/mips/emma2rh/markeins/irq.c b/arch/mips/emma2rh/markeins/irq.c
index c93369cb4115..3299b6dfe764 100644
--- a/arch/mips/emma2rh/markeins/irq.c
+++ b/arch/mips/emma2rh/markeins/irq.c
@@ -106,7 +106,7 @@ void __init arch_init_irq(void)
106 emma2rh_irq_init(EMMA2RH_IRQ_BASE); 106 emma2rh_irq_init(EMMA2RH_IRQ_BASE);
107 emma2rh_sw_irq_init(EMMA2RH_SW_IRQ_BASE); 107 emma2rh_sw_irq_init(EMMA2RH_SW_IRQ_BASE);
108 emma2rh_gpio_irq_init(EMMA2RH_GPIO_IRQ_BASE); 108 emma2rh_gpio_irq_init(EMMA2RH_GPIO_IRQ_BASE);
109 mips_cpu_irq_init(CPU_IRQ_BASE); 109 mips_cpu_irq_init();
110 110
111 /* setup cascade interrupts */ 111 /* setup cascade interrupts */
112 setup_irq(EMMA2RH_IRQ_BASE + EMMA2RH_SW_CASCADE, &irq_cascade); 112 setup_irq(EMMA2RH_IRQ_BASE + EMMA2RH_SW_CASCADE, &irq_cascade);
diff --git a/arch/mips/emma2rh/markeins/irq_markeins.c b/arch/mips/emma2rh/markeins/irq_markeins.c
index 2116d9be5fa9..fba5c156f472 100644
--- a/arch/mips/emma2rh/markeins/irq_markeins.c
+++ b/arch/mips/emma2rh/markeins/irq_markeins.c
@@ -49,7 +49,7 @@ static void emma2rh_sw_irq_disable(unsigned int irq)
49} 49}
50 50
51struct irq_chip emma2rh_sw_irq_controller = { 51struct irq_chip emma2rh_sw_irq_controller = {
52 .typename = "emma2rh_sw_irq", 52 .name = "emma2rh_sw_irq",
53 .ack = emma2rh_sw_irq_disable, 53 .ack = emma2rh_sw_irq_disable,
54 .mask = emma2rh_sw_irq_disable, 54 .mask = emma2rh_sw_irq_disable,
55 .mask_ack = emma2rh_sw_irq_disable, 55 .mask_ack = emma2rh_sw_irq_disable,
@@ -115,7 +115,7 @@ static void emma2rh_gpio_irq_end(unsigned int irq)
115} 115}
116 116
117struct irq_chip emma2rh_gpio_irq_controller = { 117struct irq_chip emma2rh_gpio_irq_controller = {
118 .typename = "emma2rh_gpio_irq", 118 .name = "emma2rh_gpio_irq",
119 .ack = emma2rh_gpio_irq_ack, 119 .ack = emma2rh_gpio_irq_ack,
120 .mask = emma2rh_gpio_irq_disable, 120 .mask = emma2rh_gpio_irq_disable,
121 .mask_ack = emma2rh_gpio_irq_ack, 121 .mask_ack = emma2rh_gpio_irq_ack,
diff --git a/arch/mips/gt64120/ev64120/irq.c b/arch/mips/gt64120/ev64120/irq.c
index b3e5796c81d7..04572b9c9642 100644
--- a/arch/mips/gt64120/ev64120/irq.c
+++ b/arch/mips/gt64120/ev64120/irq.c
@@ -88,7 +88,7 @@ static void end_ev64120_irq(unsigned int irq)
88} 88}
89 89
90static struct irq_chip ev64120_irq_type = { 90static struct irq_chip ev64120_irq_type = {
91 .typename = "EV64120", 91 .name = "EV64120",
92 .ack = disable_ev64120_irq, 92 .ack = disable_ev64120_irq,
93 .mask = disable_ev64120_irq, 93 .mask = disable_ev64120_irq,
94 .mask_ack = disable_ev64120_irq, 94 .mask_ack = disable_ev64120_irq,
diff --git a/arch/mips/gt64120/ev64120/setup.c b/arch/mips/gt64120/ev64120/setup.c
index 99c8d42212e2..477848c22a2c 100644
--- a/arch/mips/gt64120/ev64120/setup.c
+++ b/arch/mips/gt64120/ev64120/setup.c
@@ -59,9 +59,8 @@ extern void galileo_machine_power_off(void);
59 */ 59 */
60extern struct pci_ops galileo_pci_ops; 60extern struct pci_ops galileo_pci_ops;
61 61
62unsigned long __init prom_free_prom_memory(void) 62void __init prom_free_prom_memory(void)
63{ 63{
64 return 0;
65} 64}
66 65
67/* 66/*
diff --git a/arch/mips/gt64120/momenco_ocelot/dbg_io.c b/arch/mips/gt64120/momenco_ocelot/dbg_io.c
index 2128684584f5..32d6fb4ee679 100644
--- a/arch/mips/gt64120/momenco_ocelot/dbg_io.c
+++ b/arch/mips/gt64120/momenco_ocelot/dbg_io.c
@@ -1,6 +1,4 @@
1 1
2#ifdef CONFIG_KGDB
3
4#include <asm/serial.h> /* For the serial port location and base baud */ 2#include <asm/serial.h> /* For the serial port location and base baud */
5 3
6/* --- CONFIG --- */ 4/* --- CONFIG --- */
@@ -121,5 +119,3 @@ int putDebugChar(uint8 byte)
121 UART16550_WRITE(OFS_SEND_BUFFER, byte); 119 UART16550_WRITE(OFS_SEND_BUFFER, byte);
122 return 1; 120 return 1;
123} 121}
124
125#endif
diff --git a/arch/mips/gt64120/momenco_ocelot/irq.c b/arch/mips/gt64120/momenco_ocelot/irq.c
index d9294401ccb0..2585d9dbda33 100644
--- a/arch/mips/gt64120/momenco_ocelot/irq.c
+++ b/arch/mips/gt64120/momenco_ocelot/irq.c
@@ -90,6 +90,6 @@ void __init arch_init_irq(void)
90 clear_c0_status(ST0_IM); 90 clear_c0_status(ST0_IM);
91 local_irq_disable(); 91 local_irq_disable();
92 92
93 mips_cpu_irq_init(0); 93 mips_cpu_irq_init();
94 rm7k_cpu_irq_init(8); 94 rm7k_cpu_irq_init();
95} 95}
diff --git a/arch/mips/gt64120/momenco_ocelot/prom.c b/arch/mips/gt64120/momenco_ocelot/prom.c
index 8677b6d3ada7..78f393b2afd9 100644
--- a/arch/mips/gt64120/momenco_ocelot/prom.c
+++ b/arch/mips/gt64120/momenco_ocelot/prom.c
@@ -67,7 +67,6 @@ void __init prom_init(void)
67 add_memory_region(0, 64 << 20, BOOT_MEM_RAM); 67 add_memory_region(0, 64 << 20, BOOT_MEM_RAM);
68} 68}
69 69
70unsigned long __init prom_free_prom_memory(void) 70void __init prom_free_prom_memory(void)
71{ 71{
72 return 0;
73} 72}
diff --git a/arch/mips/gt64120/wrppmc/irq.c b/arch/mips/gt64120/wrppmc/irq.c
index eedfc24e1eae..d3d96591780e 100644
--- a/arch/mips/gt64120/wrppmc/irq.c
+++ b/arch/mips/gt64120/wrppmc/irq.c
@@ -63,7 +63,7 @@ void gt64120_init_pic(void)
63void __init arch_init_irq(void) 63void __init arch_init_irq(void)
64{ 64{
65 /* IRQ 0 - 7 are for MIPS common irq_cpu controller */ 65 /* IRQ 0 - 7 are for MIPS common irq_cpu controller */
66 mips_cpu_irq_init(0); 66 mips_cpu_irq_init();
67 67
68 gt64120_init_pic(); 68 gt64120_init_pic();
69} 69}
diff --git a/arch/mips/gt64120/wrppmc/setup.c b/arch/mips/gt64120/wrppmc/setup.c
index 429afc400cb4..121188d5ec4a 100644
--- a/arch/mips/gt64120/wrppmc/setup.c
+++ b/arch/mips/gt64120/wrppmc/setup.c
@@ -93,9 +93,8 @@ void __init wrppmc_early_printk(const char *fmt, ...)
93} 93}
94#endif /* WRPPMC_EARLY_DEBUG */ 94#endif /* WRPPMC_EARLY_DEBUG */
95 95
96unsigned long __init prom_free_prom_memory(void) 96void __init prom_free_prom_memory(void)
97{ 97{
98 return 0;
99} 98}
100 99
101#ifdef CONFIG_SERIAL_8250 100#ifdef CONFIG_SERIAL_8250
diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c
index f8d417b5c2bb..295892e4ce53 100644
--- a/arch/mips/jazz/irq.c
+++ b/arch/mips/jazz/irq.c
@@ -40,7 +40,7 @@ void disable_r4030_irq(unsigned int irq)
40} 40}
41 41
42static struct irq_chip r4030_irq_type = { 42static struct irq_chip r4030_irq_type = {
43 .typename = "R4030", 43 .name = "R4030",
44 .ack = disable_r4030_irq, 44 .ack = disable_r4030_irq,
45 .mask = disable_r4030_irq, 45 .mask = disable_r4030_irq,
46 .mask_ack = disable_r4030_irq, 46 .mask_ack = disable_r4030_irq,
diff --git a/arch/mips/jmr3927/common/prom.c b/arch/mips/jmr3927/common/prom.c
index 5d5838f41d23..aa481b774c42 100644
--- a/arch/mips/jmr3927/common/prom.c
+++ b/arch/mips/jmr3927/common/prom.c
@@ -75,7 +75,6 @@ void __init prom_init_cmdline(void)
75 *cp = '\0'; 75 *cp = '\0';
76} 76}
77 77
78unsigned long __init prom_free_prom_memory(void) 78void __init prom_free_prom_memory(void)
79{ 79{
80 return 0;
81} 80}
diff --git a/arch/mips/jmr3927/rbhma3100/irq.c b/arch/mips/jmr3927/rbhma3100/irq.c
index 3da49c5aaf49..7d2c203cb406 100644
--- a/arch/mips/jmr3927/rbhma3100/irq.c
+++ b/arch/mips/jmr3927/rbhma3100/irq.c
@@ -439,7 +439,7 @@ void __init arch_init_irq(void)
439} 439}
440 440
441static struct irq_chip jmr3927_irq_controller = { 441static struct irq_chip jmr3927_irq_controller = {
442 .typename = "jmr3927_irq", 442 .name = "jmr3927_irq",
443 .ack = jmr3927_irq_ack, 443 .ack = jmr3927_irq_ack,
444 .mask = jmr3927_irq_disable, 444 .mask = jmr3927_irq_disable,
445 .mask_ack = jmr3927_irq_ack, 445 .mask_ack = jmr3927_irq_ack,
diff --git a/arch/mips/jmr3927/rbhma3100/setup.c b/arch/mips/jmr3927/rbhma3100/setup.c
index 138f25efe38a..7ca3d6d07b34 100644
--- a/arch/mips/jmr3927/rbhma3100/setup.c
+++ b/arch/mips/jmr3927/rbhma3100/setup.c
@@ -434,7 +434,7 @@ void __init tx3927_setup(void)
434 434
435 /* DMA */ 435 /* DMA */
436 tx3927_dmaptr->mcr = 0; 436 tx3927_dmaptr->mcr = 0;
437 for (i = 0; i < sizeof(tx3927_dmaptr->ch) / sizeof(tx3927_dmaptr->ch[0]); i++) { 437 for (i = 0; i < ARRAY_SIZE(tx3927_dmaptr->ch); i++) {
438 /* reset channel */ 438 /* reset channel */
439 tx3927_dmaptr->ch[i].ccr = TX3927_DMA_CCR_CHRST; 439 tx3927_dmaptr->ch[i].ccr = TX3927_DMA_CCR_CHRST;
440 tx3927_dmaptr->ch[i].ccr = 0; 440 tx3927_dmaptr->ch[i].ccr = 0;
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index ff88b06f89df..ea7df4b8da33 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -234,10 +234,6 @@ void output_mm_defines(void)
234 constant("#define _PMD_SHIFT ", PMD_SHIFT); 234 constant("#define _PMD_SHIFT ", PMD_SHIFT);
235 constant("#define _PGDIR_SHIFT ", PGDIR_SHIFT); 235 constant("#define _PGDIR_SHIFT ", PGDIR_SHIFT);
236 linefeed; 236 linefeed;
237 constant("#define _PGD_ORDER ", PGD_ORDER);
238 constant("#define _PMD_ORDER ", PMD_ORDER);
239 constant("#define _PTE_ORDER ", PTE_ORDER);
240 linefeed;
241 constant("#define _PTRS_PER_PGD ", PTRS_PER_PGD); 237 constant("#define _PTRS_PER_PGD ", PTRS_PER_PGD);
242 constant("#define _PTRS_PER_PMD ", PTRS_PER_PMD); 238 constant("#define _PTRS_PER_PMD ", PTRS_PER_PMD);
243 constant("#define _PTRS_PER_PTE ", PTRS_PER_PTE); 239 constant("#define _PTRS_PER_PTE ", PTRS_PER_PTE);
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 442839e9578c..f59ef271d247 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -565,7 +565,7 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
565 if (config3 & MIPS_CONF3_VEIC) 565 if (config3 & MIPS_CONF3_VEIC)
566 c->options |= MIPS_CPU_VEIC; 566 c->options |= MIPS_CPU_VEIC;
567 if (config3 & MIPS_CONF3_MT) 567 if (config3 & MIPS_CONF3_MT)
568 c->ases |= MIPS_ASE_MIPSMT; 568 c->ases |= MIPS_ASE_MIPSMT;
569 569
570 return config3 & MIPS_CONF_M; 570 return config3 & MIPS_CONF_M;
571} 571}
diff --git a/arch/mips/kernel/gdb-stub.c b/arch/mips/kernel/gdb-stub.c
index 719d26968cb2..7bc882049269 100644
--- a/arch/mips/kernel/gdb-stub.c
+++ b/arch/mips/kernel/gdb-stub.c
@@ -505,13 +505,13 @@ void show_gdbregs(struct gdb_regs * regs)
505 */ 505 */
506 printk("$0 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", 506 printk("$0 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
507 regs->reg0, regs->reg1, regs->reg2, regs->reg3, 507 regs->reg0, regs->reg1, regs->reg2, regs->reg3,
508 regs->reg4, regs->reg5, regs->reg6, regs->reg7); 508 regs->reg4, regs->reg5, regs->reg6, regs->reg7);
509 printk("$8 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", 509 printk("$8 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
510 regs->reg8, regs->reg9, regs->reg10, regs->reg11, 510 regs->reg8, regs->reg9, regs->reg10, regs->reg11,
511 regs->reg12, regs->reg13, regs->reg14, regs->reg15); 511 regs->reg12, regs->reg13, regs->reg14, regs->reg15);
512 printk("$16: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", 512 printk("$16: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
513 regs->reg16, regs->reg17, regs->reg18, regs->reg19, 513 regs->reg16, regs->reg17, regs->reg18, regs->reg19,
514 regs->reg20, regs->reg21, regs->reg22, regs->reg23); 514 regs->reg20, regs->reg21, regs->reg22, regs->reg23);
515 printk("$24: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", 515 printk("$24: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
516 regs->reg24, regs->reg25, regs->reg26, regs->reg27, 516 regs->reg24, regs->reg25, regs->reg26, regs->reg27,
517 regs->reg28, regs->reg29, regs->reg30, regs->reg31); 517 regs->reg28, regs->reg29, regs->reg30, regs->reg31);
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index 9a7811d13db2..6f57ca44291f 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -231,28 +231,3 @@ NESTED(smp_bootstrap, 16, sp)
231#endif /* CONFIG_SMP */ 231#endif /* CONFIG_SMP */
232 232
233 __FINIT 233 __FINIT
234
235 .comm kernelsp, NR_CPUS * 8, 8
236 .comm pgd_current, NR_CPUS * 8, 8
237
238 .comm fw_arg0, SZREG, SZREG # firmware arguments
239 .comm fw_arg1, SZREG, SZREG
240 .comm fw_arg2, SZREG, SZREG
241 .comm fw_arg3, SZREG, SZREG
242
243 .macro page name, order
244 .comm \name, (_PAGE_SIZE << \order), (_PAGE_SIZE << \order)
245 .endm
246
247 /*
248 * On 64-bit we've got three-level pagetables with a slightly
249 * different layout ...
250 */
251 page swapper_pg_dir, _PGD_ORDER
252#ifdef CONFIG_64BIT
253#if defined(CONFIG_MODULES) && !defined(CONFIG_BUILD_ELF64)
254 page module_pg_dir, _PGD_ORDER
255#endif
256 page invalid_pmd_table, _PMD_ORDER
257#endif
258 page invalid_pte_table, _PTE_ORDER
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index b59a676c6d0e..b33ba6cd7f5b 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -54,9 +54,11 @@ static unsigned int cached_irq_mask = 0xffff;
54 54
55void disable_8259A_irq(unsigned int irq) 55void disable_8259A_irq(unsigned int irq)
56{ 56{
57 unsigned int mask = 1 << irq; 57 unsigned int mask;
58 unsigned long flags; 58 unsigned long flags;
59 59
60 irq -= I8259A_IRQ_BASE;
61 mask = 1 << irq;
60 spin_lock_irqsave(&i8259A_lock, flags); 62 spin_lock_irqsave(&i8259A_lock, flags);
61 cached_irq_mask |= mask; 63 cached_irq_mask |= mask;
62 if (irq & 8) 64 if (irq & 8)
@@ -68,9 +70,11 @@ void disable_8259A_irq(unsigned int irq)
68 70
69void enable_8259A_irq(unsigned int irq) 71void enable_8259A_irq(unsigned int irq)
70{ 72{
71 unsigned int mask = ~(1 << irq); 73 unsigned int mask;
72 unsigned long flags; 74 unsigned long flags;
73 75
76 irq -= I8259A_IRQ_BASE;
77 mask = ~(1 << irq);
74 spin_lock_irqsave(&i8259A_lock, flags); 78 spin_lock_irqsave(&i8259A_lock, flags);
75 cached_irq_mask &= mask; 79 cached_irq_mask &= mask;
76 if (irq & 8) 80 if (irq & 8)
@@ -82,10 +86,12 @@ void enable_8259A_irq(unsigned int irq)
82 86
83int i8259A_irq_pending(unsigned int irq) 87int i8259A_irq_pending(unsigned int irq)
84{ 88{
85 unsigned int mask = 1 << irq; 89 unsigned int mask;
86 unsigned long flags; 90 unsigned long flags;
87 int ret; 91 int ret;
88 92
93 irq -= I8259A_IRQ_BASE;
94 mask = 1 << irq;
89 spin_lock_irqsave(&i8259A_lock, flags); 95 spin_lock_irqsave(&i8259A_lock, flags);
90 if (irq < 8) 96 if (irq < 8)
91 ret = inb(PIC_MASTER_CMD) & mask; 97 ret = inb(PIC_MASTER_CMD) & mask;
@@ -134,9 +140,11 @@ static inline int i8259A_irq_real(unsigned int irq)
134 */ 140 */
135void mask_and_ack_8259A(unsigned int irq) 141void mask_and_ack_8259A(unsigned int irq)
136{ 142{
137 unsigned int irqmask = 1 << irq; 143 unsigned int irqmask;
138 unsigned long flags; 144 unsigned long flags;
139 145
146 irq -= I8259A_IRQ_BASE;
147 irqmask = 1 << irq;
140 spin_lock_irqsave(&i8259A_lock, flags); 148 spin_lock_irqsave(&i8259A_lock, flags);
141 /* 149 /*
142 * Lightweight spurious IRQ detection. We do not want 150 * Lightweight spurious IRQ detection. We do not want
@@ -169,8 +177,8 @@ handle_real_irq:
169 outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */ 177 outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */
170 } 178 }
171#ifdef CONFIG_MIPS_MT_SMTC 179#ifdef CONFIG_MIPS_MT_SMTC
172 if (irq_hwmask[irq] & ST0_IM) 180 if (irq_hwmask[irq] & ST0_IM)
173 set_c0_status(irq_hwmask[irq] & ST0_IM); 181 set_c0_status(irq_hwmask[irq] & ST0_IM);
174#endif /* CONFIG_MIPS_MT_SMTC */ 182#endif /* CONFIG_MIPS_MT_SMTC */
175 spin_unlock_irqrestore(&i8259A_lock, flags); 183 spin_unlock_irqrestore(&i8259A_lock, flags);
176 return; 184 return;
@@ -322,8 +330,8 @@ void __init init_i8259_irqs (void)
322 330
323 init_8259A(0); 331 init_8259A(0);
324 332
325 for (i = 0; i < 16; i++) 333 for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++)
326 set_irq_chip_and_handler(i, &i8259A_chip, handle_level_irq); 334 set_irq_chip_and_handler(i, &i8259A_chip, handle_level_irq);
327 335
328 setup_irq(PIC_CASCADE_IR, &irq2); 336 setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
329} 337}
diff --git a/arch/mips/kernel/irixelf.c b/arch/mips/kernel/irixelf.c
index 37cad5de515c..3cc25c05d367 100644
--- a/arch/mips/kernel/irixelf.c
+++ b/arch/mips/kernel/irixelf.c
@@ -10,6 +10,8 @@
10 * Copyright (C) 1996 - 2004 David S. Miller <dm@engr.sgi.com> 10 * Copyright (C) 1996 - 2004 David S. Miller <dm@engr.sgi.com>
11 * Copyright (C) 2004 - 2005 Steven J. Hill <sjhill@realitydiluted.com> 11 * Copyright (C) 2004 - 2005 Steven J. Hill <sjhill@realitydiluted.com>
12 */ 12 */
13#undef DEBUG
14
13#include <linux/module.h> 15#include <linux/module.h>
14#include <linux/fs.h> 16#include <linux/fs.h>
15#include <linux/stat.h> 17#include <linux/stat.h>
@@ -40,8 +42,6 @@
40 42
41#include <linux/elf.h> 43#include <linux/elf.h>
42 44
43#undef DEBUG
44
45static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs); 45static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs);
46static int load_irix_library(struct file *); 46static int load_irix_library(struct file *);
47static int irix_core_dump(long signr, struct pt_regs * regs, 47static int irix_core_dump(long signr, struct pt_regs * regs,
@@ -52,72 +52,102 @@ static struct linux_binfmt irix_format = {
52 irix_core_dump, PAGE_SIZE 52 irix_core_dump, PAGE_SIZE
53}; 53};
54 54
55#ifdef DEBUG
56/* Debugging routines. */ 55/* Debugging routines. */
57static char *get_elf_p_type(Elf32_Word p_type) 56static char *get_elf_p_type(Elf32_Word p_type)
58{ 57{
59 int i = (int) p_type; 58#ifdef DEBUG
60 59 switch (p_type) {
61 switch(i) { 60 case PT_NULL:
62 case PT_NULL: return("PT_NULL"); break; 61 return "PT_NULL";
63 case PT_LOAD: return("PT_LOAD"); break; 62 break;
64 case PT_DYNAMIC: return("PT_DYNAMIC"); break; 63
65 case PT_INTERP: return("PT_INTERP"); break; 64 case PT_LOAD:
66 case PT_NOTE: return("PT_NOTE"); break; 65 return "PT_LOAD";
67 case PT_SHLIB: return("PT_SHLIB"); break; 66 break;
68 case PT_PHDR: return("PT_PHDR"); break; 67
69 case PT_LOPROC: return("PT_LOPROC/REGINFO"); break; 68 case PT_DYNAMIC:
70 case PT_HIPROC: return("PT_HIPROC"); break; 69 return "PT_DYNAMIC";
71 default: return("PT_BOGUS"); break; 70 break;
71
72 case PT_INTERP:
73 return "PT_INTERP";
74 break;
75
76 case PT_NOTE:
77 return "PT_NOTE";
78 break;
79
80 case PT_SHLIB:
81 return "PT_SHLIB";
82 break;
83
84 case PT_PHDR:
85 return "PT_PHDR";
86 break;
87
88 case PT_LOPROC:
89 return "PT_LOPROC/REGINFO";
90 break;
91
92 case PT_HIPROC:
93 return "PT_HIPROC";
94 break;
95
96 default:
97 return "PT_BOGUS";
98 break;
72 } 99 }
100#endif
73} 101}
74 102
75static void print_elfhdr(struct elfhdr *ehp) 103static void print_elfhdr(struct elfhdr *ehp)
76{ 104{
77 int i; 105 int i;
78 106
79 printk("ELFHDR: e_ident<"); 107 pr_debug("ELFHDR: e_ident<");
80 for(i = 0; i < (EI_NIDENT - 1); i++) printk("%x ", ehp->e_ident[i]); 108 for (i = 0; i < (EI_NIDENT - 1); i++)
81 printk("%x>\n", ehp->e_ident[i]); 109 pr_debug("%x ", ehp->e_ident[i]);
82 printk(" e_type[%04x] e_machine[%04x] e_version[%08lx]\n", 110 pr_debug("%x>\n", ehp->e_ident[i]);
83 (unsigned short) ehp->e_type, (unsigned short) ehp->e_machine, 111 pr_debug(" e_type[%04x] e_machine[%04x] e_version[%08lx]\n",
84 (unsigned long) ehp->e_version); 112 (unsigned short) ehp->e_type, (unsigned short) ehp->e_machine,
85 printk(" e_entry[%08lx] e_phoff[%08lx] e_shoff[%08lx] " 113 (unsigned long) ehp->e_version);
86 "e_flags[%08lx]\n", 114 pr_debug(" e_entry[%08lx] e_phoff[%08lx] e_shoff[%08lx] "
87 (unsigned long) ehp->e_entry, (unsigned long) ehp->e_phoff, 115 "e_flags[%08lx]\n",
88 (unsigned long) ehp->e_shoff, (unsigned long) ehp->e_flags); 116 (unsigned long) ehp->e_entry, (unsigned long) ehp->e_phoff,
89 printk(" e_ehsize[%04x] e_phentsize[%04x] e_phnum[%04x]\n", 117 (unsigned long) ehp->e_shoff, (unsigned long) ehp->e_flags);
90 (unsigned short) ehp->e_ehsize, (unsigned short) ehp->e_phentsize, 118 pr_debug(" e_ehsize[%04x] e_phentsize[%04x] e_phnum[%04x]\n",
91 (unsigned short) ehp->e_phnum); 119 (unsigned short) ehp->e_ehsize,
92 printk(" e_shentsize[%04x] e_shnum[%04x] e_shstrndx[%04x]\n", 120 (unsigned short) ehp->e_phentsize,
93 (unsigned short) ehp->e_shentsize, (unsigned short) ehp->e_shnum, 121 (unsigned short) ehp->e_phnum);
94 (unsigned short) ehp->e_shstrndx); 122 pr_debug(" e_shentsize[%04x] e_shnum[%04x] e_shstrndx[%04x]\n",
123 (unsigned short) ehp->e_shentsize,
124 (unsigned short) ehp->e_shnum,
125 (unsigned short) ehp->e_shstrndx);
95} 126}
96 127
97static void print_phdr(int i, struct elf_phdr *ep) 128static void print_phdr(int i, struct elf_phdr *ep)
98{ 129{
99 printk("PHDR[%d]: p_type[%s] p_offset[%08lx] p_vaddr[%08lx] " 130 pr_debug("PHDR[%d]: p_type[%s] p_offset[%08lx] p_vaddr[%08lx] "
100 "p_paddr[%08lx]\n", i, get_elf_p_type(ep->p_type), 131 "p_paddr[%08lx]\n", i, get_elf_p_type(ep->p_type),
101 (unsigned long) ep->p_offset, (unsigned long) ep->p_vaddr, 132 (unsigned long) ep->p_offset, (unsigned long) ep->p_vaddr,
102 (unsigned long) ep->p_paddr); 133 (unsigned long) ep->p_paddr);
103 printk(" p_filesz[%08lx] p_memsz[%08lx] p_flags[%08lx] " 134 pr_debug(" p_filesz[%08lx] p_memsz[%08lx] p_flags[%08lx] "
104 "p_align[%08lx]\n", (unsigned long) ep->p_filesz, 135 "p_align[%08lx]\n", (unsigned long) ep->p_filesz,
105 (unsigned long) ep->p_memsz, (unsigned long) ep->p_flags, 136 (unsigned long) ep->p_memsz, (unsigned long) ep->p_flags,
106 (unsigned long) ep->p_align); 137 (unsigned long) ep->p_align);
107} 138}
108 139
109static void dump_phdrs(struct elf_phdr *ep, int pnum) 140static void dump_phdrs(struct elf_phdr *ep, int pnum)
110{ 141{
111 int i; 142 int i;
112 143
113 for(i = 0; i < pnum; i++, ep++) { 144 for (i = 0; i < pnum; i++, ep++) {
114 if((ep->p_type == PT_LOAD) || 145 if ((ep->p_type == PT_LOAD) ||
115 (ep->p_type == PT_INTERP) || 146 (ep->p_type == PT_INTERP) ||
116 (ep->p_type == PT_PHDR)) 147 (ep->p_type == PT_PHDR))
117 print_phdr(i, ep); 148 print_phdr(i, ep);
118 } 149 }
119} 150}
120#endif /* DEBUG */
121 151
122static void set_brk(unsigned long start, unsigned long end) 152static void set_brk(unsigned long start, unsigned long end)
123{ 153{
@@ -156,11 +186,10 @@ static unsigned long * create_irix_tables(char * p, int argc, int envc,
156 elf_addr_t *envp; 186 elf_addr_t *envp;
157 elf_addr_t *sp, *csp; 187 elf_addr_t *sp, *csp;
158 188
159#ifdef DEBUG 189 pr_debug("create_irix_tables: p[%p] argc[%d] envc[%d] "
160 printk("create_irix_tables: p[%p] argc[%d] envc[%d] " 190 "load_addr[%08x] interp_load_addr[%08x]\n",
161 "load_addr[%08x] interp_load_addr[%08x]\n", 191 p, argc, envc, load_addr, interp_load_addr);
162 p, argc, envc, load_addr, interp_load_addr); 192
163#endif
164 sp = (elf_addr_t *) (~15UL & (unsigned long) p); 193 sp = (elf_addr_t *) (~15UL & (unsigned long) p);
165 csp = sp; 194 csp = sp;
166 csp -= exec ? DLINFO_ITEMS*2 : 2; 195 csp -= exec ? DLINFO_ITEMS*2 : 2;
@@ -181,7 +210,7 @@ static unsigned long * create_irix_tables(char * p, int argc, int envc,
181 sp -= 2; 210 sp -= 2;
182 NEW_AUX_ENT(0, AT_NULL, 0); 211 NEW_AUX_ENT(0, AT_NULL, 0);
183 212
184 if(exec) { 213 if (exec) {
185 sp -= 11*2; 214 sp -= 11*2;
186 215
187 NEW_AUX_ENT (0, AT_PHDR, load_addr + exec->e_phoff); 216 NEW_AUX_ENT (0, AT_PHDR, load_addr + exec->e_phoff);
@@ -245,9 +274,7 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
245 last_bss = 0; 274 last_bss = 0;
246 error = load_addr = 0; 275 error = load_addr = 0;
247 276
248#ifdef DEBUG
249 print_elfhdr(interp_elf_ex); 277 print_elfhdr(interp_elf_ex);
250#endif
251 278
252 /* First of all, some simple consistency checks */ 279 /* First of all, some simple consistency checks */
253 if ((interp_elf_ex->e_type != ET_EXEC && 280 if ((interp_elf_ex->e_type != ET_EXEC &&
@@ -258,7 +285,7 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
258 } 285 }
259 286
260 /* Now read in all of the header information */ 287 /* Now read in all of the header information */
261 if(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > PAGE_SIZE) { 288 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > PAGE_SIZE) {
262 printk("IRIX interp header bigger than a page (%d)\n", 289 printk("IRIX interp header bigger than a page (%d)\n",
263 (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum)); 290 (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum));
264 return 0xffffffff; 291 return 0xffffffff;
@@ -267,15 +294,15 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
267 elf_phdata = kmalloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum, 294 elf_phdata = kmalloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum,
268 GFP_KERNEL); 295 GFP_KERNEL);
269 296
270 if(!elf_phdata) { 297 if (!elf_phdata) {
271 printk("Cannot kmalloc phdata for IRIX interp.\n"); 298 printk("Cannot kmalloc phdata for IRIX interp.\n");
272 return 0xffffffff; 299 return 0xffffffff;
273 } 300 }
274 301
275 /* If the size of this structure has changed, then punt, since 302 /* If the size of this structure has changed, then punt, since
276 * we will be doing the wrong thing. 303 * we will be doing the wrong thing.
277 */ 304 */
278 if(interp_elf_ex->e_phentsize != 32) { 305 if (interp_elf_ex->e_phentsize != 32) {
279 printk("IRIX interp e_phentsize == %d != 32 ", 306 printk("IRIX interp e_phentsize == %d != 32 ",
280 interp_elf_ex->e_phentsize); 307 interp_elf_ex->e_phentsize);
281 kfree(elf_phdata); 308 kfree(elf_phdata);
@@ -286,61 +313,71 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
286 (char *) elf_phdata, 313 (char *) elf_phdata,
287 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum); 314 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
288 315
289#ifdef DEBUG
290 dump_phdrs(elf_phdata, interp_elf_ex->e_phnum); 316 dump_phdrs(elf_phdata, interp_elf_ex->e_phnum);
291#endif
292 317
293 eppnt = elf_phdata; 318 eppnt = elf_phdata;
294 for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) { 319 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
295 if(eppnt->p_type == PT_LOAD) { 320 if (eppnt->p_type == PT_LOAD) {
296 int elf_type = MAP_PRIVATE | MAP_DENYWRITE; 321 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
297 int elf_prot = 0; 322 int elf_prot = 0;
298 unsigned long vaddr = 0; 323 unsigned long vaddr = 0;
299 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ; 324 if (eppnt->p_flags & PF_R)
300 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE; 325 elf_prot = PROT_READ;
301 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC; 326 if (eppnt->p_flags & PF_W)
302 elf_type |= MAP_FIXED; 327 elf_prot |= PROT_WRITE;
303 vaddr = eppnt->p_vaddr; 328 if (eppnt->p_flags & PF_X)
304 329 elf_prot |= PROT_EXEC;
305 pr_debug("INTERP do_mmap(%p, %08lx, %08lx, %08lx, %08lx, %08lx) ", 330 elf_type |= MAP_FIXED;
306 interpreter, vaddr, 331 vaddr = eppnt->p_vaddr;
307 (unsigned long) (eppnt->p_filesz + (eppnt->p_vaddr & 0xfff)), 332
308 (unsigned long) elf_prot, (unsigned long) elf_type, 333 pr_debug("INTERP do_mmap"
309 (unsigned long) (eppnt->p_offset & 0xfffff000)); 334 "(%p, %08lx, %08lx, %08lx, %08lx, %08lx) ",
310 down_write(&current->mm->mmap_sem); 335 interpreter, vaddr,
311 error = do_mmap(interpreter, vaddr, 336 (unsigned long)
312 eppnt->p_filesz + (eppnt->p_vaddr & 0xfff), 337 (eppnt->p_filesz + (eppnt->p_vaddr & 0xfff)),
313 elf_prot, elf_type, 338 (unsigned long)
314 eppnt->p_offset & 0xfffff000); 339 elf_prot, (unsigned long) elf_type,
315 up_write(&current->mm->mmap_sem); 340 (unsigned long)
316 341 (eppnt->p_offset & 0xfffff000));
317 if(error < 0 && error > -1024) { 342
318 printk("Aieee IRIX interp mmap error=%d\n", error); 343 down_write(&current->mm->mmap_sem);
319 break; /* Real error */ 344 error = do_mmap(interpreter, vaddr,
320 } 345 eppnt->p_filesz + (eppnt->p_vaddr & 0xfff),
321 pr_debug("error=%08lx ", (unsigned long) error); 346 elf_prot, elf_type,
322 if(!load_addr && interp_elf_ex->e_type == ET_DYN) { 347 eppnt->p_offset & 0xfffff000);
323 load_addr = error; 348 up_write(&current->mm->mmap_sem);
324 pr_debug("load_addr = error "); 349
325 } 350 if (error < 0 && error > -1024) {
326 351 printk("Aieee IRIX interp mmap error=%d\n",
327 /* Find the end of the file mapping for this phdr, and keep 352 error);
328 * track of the largest address we see for this. 353 break; /* Real error */
329 */ 354 }
330 k = eppnt->p_vaddr + eppnt->p_filesz; 355 pr_debug("error=%08lx ", (unsigned long) error);
331 if(k > elf_bss) elf_bss = k; 356 if (!load_addr && interp_elf_ex->e_type == ET_DYN) {
332 357 load_addr = error;
333 /* Do the same thing for the memory mapping - between 358 pr_debug("load_addr = error ");
334 * elf_bss and last_bss is the bss section. 359 }
335 */ 360
336 k = eppnt->p_memsz + eppnt->p_vaddr; 361 /*
337 if(k > last_bss) last_bss = k; 362 * Find the end of the file mapping for this phdr, and
338 pr_debug("\n"); 363 * keep track of the largest address we see for this.
339 } 364 */
365 k = eppnt->p_vaddr + eppnt->p_filesz;
366 if (k > elf_bss)
367 elf_bss = k;
368
369 /* Do the same thing for the memory mapping - between
370 * elf_bss and last_bss is the bss section.
371 */
372 k = eppnt->p_memsz + eppnt->p_vaddr;
373 if (k > last_bss)
374 last_bss = k;
375 pr_debug("\n");
376 }
340 } 377 }
341 378
342 /* Now use mmap to map the library into memory. */ 379 /* Now use mmap to map the library into memory. */
343 if(error < 0 && error > -1024) { 380 if (error < 0 && error > -1024) {
344 pr_debug("got error %d\n", error); 381 pr_debug("got error %d\n", error);
345 kfree(elf_phdata); 382 kfree(elf_phdata);
346 return 0xffffffff; 383 return 0xffffffff;
@@ -377,7 +414,7 @@ static int verify_binary(struct elfhdr *ehp, struct linux_binprm *bprm)
377 return -ENOEXEC; 414 return -ENOEXEC;
378 415
379 /* First of all, some simple consistency checks */ 416 /* First of all, some simple consistency checks */
380 if((ehp->e_type != ET_EXEC && ehp->e_type != ET_DYN) || 417 if ((ehp->e_type != ET_EXEC && ehp->e_type != ET_DYN) ||
381 !bprm->file->f_op->mmap) { 418 !bprm->file->f_op->mmap) {
382 return -ENOEXEC; 419 return -ENOEXEC;
383 } 420 }
@@ -388,7 +425,7 @@ static int verify_binary(struct elfhdr *ehp, struct linux_binprm *bprm)
388 * XXX all registers as 64bits on cpu's capable of this at 425 * XXX all registers as 64bits on cpu's capable of this at
389 * XXX exception time plus frob the XTLB exception vector. 426 * XXX exception time plus frob the XTLB exception vector.
390 */ 427 */
391 if((ehp->e_flags & EF_MIPS_ABI2)) 428 if ((ehp->e_flags & EF_MIPS_ABI2))
392 return -ENOEXEC; 429 return -ENOEXEC;
393 430
394 return 0; 431 return 0;
@@ -410,7 +447,7 @@ static inline int look_for_irix_interpreter(char **name,
410 struct file *file = NULL; 447 struct file *file = NULL;
411 448
412 *name = NULL; 449 *name = NULL;
413 for(i = 0; i < pnum; i++, epp++) { 450 for (i = 0; i < pnum; i++, epp++) {
414 if (epp->p_type != PT_INTERP) 451 if (epp->p_type != PT_INTERP)
415 continue; 452 continue;
416 453
@@ -467,8 +504,8 @@ static inline void map_executable(struct file *fp, struct elf_phdr *epp, int pnu
467 unsigned int tmp; 504 unsigned int tmp;
468 int i, prot; 505 int i, prot;
469 506
470 for(i = 0; i < pnum; i++, epp++) { 507 for (i = 0; i < pnum; i++, epp++) {
471 if(epp->p_type != PT_LOAD) 508 if (epp->p_type != PT_LOAD)
472 continue; 509 continue;
473 510
474 /* Map it. */ 511 /* Map it. */
@@ -483,23 +520,23 @@ static inline void map_executable(struct file *fp, struct elf_phdr *epp, int pnu
483 up_write(&current->mm->mmap_sem); 520 up_write(&current->mm->mmap_sem);
484 521
485 /* Fixup location tracking vars. */ 522 /* Fixup location tracking vars. */
486 if((epp->p_vaddr & 0xfffff000) < *estack) 523 if ((epp->p_vaddr & 0xfffff000) < *estack)
487 *estack = (epp->p_vaddr & 0xfffff000); 524 *estack = (epp->p_vaddr & 0xfffff000);
488 if(!*laddr) 525 if (!*laddr)
489 *laddr = epp->p_vaddr - epp->p_offset; 526 *laddr = epp->p_vaddr - epp->p_offset;
490 if(epp->p_vaddr < *scode) 527 if (epp->p_vaddr < *scode)
491 *scode = epp->p_vaddr; 528 *scode = epp->p_vaddr;
492 529
493 tmp = epp->p_vaddr + epp->p_filesz; 530 tmp = epp->p_vaddr + epp->p_filesz;
494 if(tmp > *ebss) 531 if (tmp > *ebss)
495 *ebss = tmp; 532 *ebss = tmp;
496 if((epp->p_flags & PF_X) && *ecode < tmp) 533 if ((epp->p_flags & PF_X) && *ecode < tmp)
497 *ecode = tmp; 534 *ecode = tmp;
498 if(*edata < tmp) 535 if (*edata < tmp)
499 *edata = tmp; 536 *edata = tmp;
500 537
501 tmp = epp->p_vaddr + epp->p_memsz; 538 tmp = epp->p_vaddr + epp->p_memsz;
502 if(tmp > *ebrk) 539 if (tmp > *ebrk)
503 *ebrk = tmp; 540 *ebrk = tmp;
504 } 541 }
505 542
@@ -513,12 +550,12 @@ static inline int map_interpreter(struct elf_phdr *epp, struct elfhdr *ihp,
513 int i; 550 int i;
514 551
515 *eentry = 0xffffffff; 552 *eentry = 0xffffffff;
516 for(i = 0; i < pnum; i++, epp++) { 553 for (i = 0; i < pnum; i++, epp++) {
517 if(epp->p_type != PT_INTERP) 554 if (epp->p_type != PT_INTERP)
518 continue; 555 continue;
519 556
520 /* We should have fielded this error elsewhere... */ 557 /* We should have fielded this error elsewhere... */
521 if(*eentry != 0xffffffff) 558 if (*eentry != 0xffffffff)
522 return -1; 559 return -1;
523 560
524 set_fs(old_fs); 561 set_fs(old_fs);
@@ -604,9 +641,7 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
604 if (elf_ex.e_shnum > 20) 641 if (elf_ex.e_shnum > 20)
605 goto out; 642 goto out;
606 643
607#ifdef DEBUG
608 print_elfhdr(&elf_ex); 644 print_elfhdr(&elf_ex);
609#endif
610 645
611 /* Now read in all of the header information */ 646 /* Now read in all of the header information */
612 size = elf_ex.e_phentsize * elf_ex.e_phnum; 647 size = elf_ex.e_phentsize * elf_ex.e_phnum;
@@ -622,13 +657,11 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
622 if (retval < 0) 657 if (retval < 0)
623 goto out_free_ph; 658 goto out_free_ph;
624 659
625#ifdef DEBUG
626 dump_phdrs(elf_phdata, elf_ex.e_phnum); 660 dump_phdrs(elf_phdata, elf_ex.e_phnum);
627#endif
628 661
629 /* Set some things for later. */ 662 /* Set some things for later. */
630 for(i = 0; i < elf_ex.e_phnum; i++) { 663 for (i = 0; i < elf_ex.e_phnum; i++) {
631 switch(elf_phdata[i].p_type) { 664 switch (elf_phdata[i].p_type) {
632 case PT_INTERP: 665 case PT_INTERP:
633 has_interp = 1; 666 has_interp = 1;
634 elf_ihdr = &elf_phdata[i]; 667 elf_ihdr = &elf_phdata[i];
@@ -667,7 +700,7 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
667 700
668 if (elf_interpreter) { 701 if (elf_interpreter) {
669 retval = verify_irix_interpreter(&interp_elf_ex); 702 retval = verify_irix_interpreter(&interp_elf_ex);
670 if(retval) 703 if (retval)
671 goto out_free_interp; 704 goto out_free_interp;
672 } 705 }
673 706
@@ -706,12 +739,12 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
706 &load_addr, &start_code, &elf_bss, &end_code, 739 &load_addr, &start_code, &elf_bss, &end_code,
707 &end_data, &elf_brk); 740 &end_data, &elf_brk);
708 741
709 if(elf_interpreter) { 742 if (elf_interpreter) {
710 retval = map_interpreter(elf_phdata, &interp_elf_ex, 743 retval = map_interpreter(elf_phdata, &interp_elf_ex,
711 interpreter, &interp_load_addr, 744 interpreter, &interp_load_addr,
712 elf_ex.e_phnum, old_fs, &elf_entry); 745 elf_ex.e_phnum, old_fs, &elf_entry);
713 kfree(elf_interpreter); 746 kfree(elf_interpreter);
714 if(retval) { 747 if (retval) {
715 set_fs(old_fs); 748 set_fs(old_fs);
716 printk("Unable to load IRIX ELF interpreter\n"); 749 printk("Unable to load IRIX ELF interpreter\n");
717 send_sig(SIGSEGV, current, 0); 750 send_sig(SIGSEGV, current, 0);
@@ -809,12 +842,12 @@ static int load_irix_library(struct file *file)
809 return -ENOEXEC; 842 return -ENOEXEC;
810 843
811 /* First of all, some simple consistency checks. */ 844 /* First of all, some simple consistency checks. */
812 if(elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 || 845 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
813 !file->f_op->mmap) 846 !file->f_op->mmap)
814 return -ENOEXEC; 847 return -ENOEXEC;
815 848
816 /* Now read in all of the header information. */ 849 /* Now read in all of the header information. */
817 if(sizeof(struct elf_phdr) * elf_ex.e_phnum > PAGE_SIZE) 850 if (sizeof(struct elf_phdr) * elf_ex.e_phnum > PAGE_SIZE)
818 return -ENOEXEC; 851 return -ENOEXEC;
819 852
820 elf_phdata = kmalloc(sizeof(struct elf_phdr) * elf_ex.e_phnum, GFP_KERNEL); 853 elf_phdata = kmalloc(sizeof(struct elf_phdr) * elf_ex.e_phnum, GFP_KERNEL);
@@ -825,15 +858,15 @@ static int load_irix_library(struct file *file)
825 sizeof(struct elf_phdr) * elf_ex.e_phnum); 858 sizeof(struct elf_phdr) * elf_ex.e_phnum);
826 859
827 j = 0; 860 j = 0;
828 for(i=0; i<elf_ex.e_phnum; i++) 861 for (i=0; i<elf_ex.e_phnum; i++)
829 if((elf_phdata + i)->p_type == PT_LOAD) j++; 862 if ((elf_phdata + i)->p_type == PT_LOAD) j++;
830 863
831 if(j != 1) { 864 if (j != 1) {
832 kfree(elf_phdata); 865 kfree(elf_phdata);
833 return -ENOEXEC; 866 return -ENOEXEC;
834 } 867 }
835 868
836 while(elf_phdata->p_type != PT_LOAD) elf_phdata++; 869 while (elf_phdata->p_type != PT_LOAD) elf_phdata++;
837 870
838 /* Now use mmap to map the library into memory. */ 871 /* Now use mmap to map the library into memory. */
839 down_write(&current->mm->mmap_sem); 872 down_write(&current->mm->mmap_sem);
@@ -889,9 +922,7 @@ unsigned long irix_mapelf(int fd, struct elf_phdr __user *user_phdrp, int cnt)
889 return -EFAULT; 922 return -EFAULT;
890 } 923 }
891 924
892#ifdef DEBUG
893 dump_phdrs(user_phdrp, cnt); 925 dump_phdrs(user_phdrp, cnt);
894#endif
895 926
896 for (i = 0; i < cnt; i++, hp++) { 927 for (i = 0; i < cnt; i++, hp++) {
897 if (__get_user(type, &hp->p_type)) 928 if (__get_user(type, &hp->p_type))
@@ -905,14 +936,14 @@ unsigned long irix_mapelf(int fd, struct elf_phdr __user *user_phdrp, int cnt)
905 filp = fget(fd); 936 filp = fget(fd);
906 if (!filp) 937 if (!filp)
907 return -EACCES; 938 return -EACCES;
908 if(!filp->f_op) { 939 if (!filp->f_op) {
909 printk("irix_mapelf: Bogon filp!\n"); 940 printk("irix_mapelf: Bogon filp!\n");
910 fput(filp); 941 fput(filp);
911 return -EACCES; 942 return -EACCES;
912 } 943 }
913 944
914 hp = user_phdrp; 945 hp = user_phdrp;
915 for(i = 0; i < cnt; i++, hp++) { 946 for (i = 0; i < cnt; i++, hp++) {
916 int prot; 947 int prot;
917 948
918 retval = __get_user(vaddr, &hp->p_vaddr); 949 retval = __get_user(vaddr, &hp->p_vaddr);
@@ -1015,8 +1046,6 @@ static int notesize(struct memelfnote *en)
1015 return sz; 1046 return sz;
1016} 1047}
1017 1048
1018/* #define DEBUG */
1019
1020#define DUMP_WRITE(addr, nr) \ 1049#define DUMP_WRITE(addr, nr) \
1021 if (!dump_write(file, (addr), (nr))) \ 1050 if (!dump_write(file, (addr), (nr))) \
1022 goto end_coredump; 1051 goto end_coredump;
@@ -1093,9 +1122,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1093 1122
1094 segs++; 1123 segs++;
1095 } 1124 }
1096#ifdef DEBUG 1125 pr_debug("irix_core_dump: %d segs taking %d bytes\n", segs, size);
1097 printk("irix_core_dump: %d segs taking %d bytes\n", segs, size);
1098#endif
1099 1126
1100 /* Set up header. */ 1127 /* Set up header. */
1101 memcpy(elf.e_ident, ELFMAG, SELFMAG); 1128 memcpy(elf.e_ident, ELFMAG, SELFMAG);
@@ -1221,7 +1248,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1221 struct elf_phdr phdr; 1248 struct elf_phdr phdr;
1222 int sz = 0; 1249 int sz = 0;
1223 1250
1224 for(i = 0; i < numnote; i++) 1251 for (i = 0; i < numnote; i++)
1225 sz += notesize(&notes[i]); 1252 sz += notesize(&notes[i]);
1226 1253
1227 phdr.p_type = PT_NOTE; 1254 phdr.p_type = PT_NOTE;
@@ -1241,7 +1268,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1241 dataoff = offset = roundup(offset, PAGE_SIZE); 1268 dataoff = offset = roundup(offset, PAGE_SIZE);
1242 1269
1243 /* Write program headers for segments dump. */ 1270 /* Write program headers for segments dump. */
1244 for(vma = current->mm->mmap, i = 0; 1271 for (vma = current->mm->mmap, i = 0;
1245 i < segs && vma != NULL; vma = vma->vm_next) { 1272 i < segs && vma != NULL; vma = vma->vm_next) {
1246 struct elf_phdr phdr; 1273 struct elf_phdr phdr;
1247 size_t sz; 1274 size_t sz;
@@ -1267,7 +1294,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1267 DUMP_WRITE(&phdr, sizeof(phdr)); 1294 DUMP_WRITE(&phdr, sizeof(phdr));
1268 } 1295 }
1269 1296
1270 for(i = 0; i < numnote; i++) 1297 for (i = 0; i < numnote; i++)
1271 if (!writenote(&notes[i], file)) 1298 if (!writenote(&notes[i], file))
1272 goto end_coredump; 1299 goto end_coredump;
1273 1300
@@ -1275,7 +1302,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1275 1302
1276 DUMP_SEEK(dataoff); 1303 DUMP_SEEK(dataoff);
1277 1304
1278 for(i = 0, vma = current->mm->mmap; 1305 for (i = 0, vma = current->mm->mmap;
1279 i < segs && vma != NULL; 1306 i < segs && vma != NULL;
1280 vma = vma->vm_next) { 1307 vma = vma->vm_next) {
1281 unsigned long addr = vma->vm_start; 1308 unsigned long addr = vma->vm_start;
@@ -1284,9 +1311,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1284 if (!maydump(vma)) 1311 if (!maydump(vma))
1285 continue; 1312 continue;
1286 i++; 1313 i++;
1287#ifdef DEBUG 1314 pr_debug("elf_core_dump: writing %08lx %lx\n", addr, len);
1288 printk("elf_core_dump: writing %08lx %lx\n", addr, len);
1289#endif
1290 DUMP_WRITE((void __user *)addr, len); 1315 DUMP_WRITE((void __user *)addr, len);
1291 } 1316 }
1292 1317
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index bcaad6696082..2967537221e2 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -112,7 +112,7 @@ msc_bind_eic_interrupt (unsigned int irq, unsigned int set)
112} 112}
113 113
114struct irq_chip msc_levelirq_type = { 114struct irq_chip msc_levelirq_type = {
115 .typename = "SOC-it-Level", 115 .name = "SOC-it-Level",
116 .ack = level_mask_and_ack_msc_irq, 116 .ack = level_mask_and_ack_msc_irq,
117 .mask = mask_msc_irq, 117 .mask = mask_msc_irq,
118 .mask_ack = level_mask_and_ack_msc_irq, 118 .mask_ack = level_mask_and_ack_msc_irq,
@@ -122,7 +122,7 @@ struct irq_chip msc_levelirq_type = {
122}; 122};
123 123
124struct irq_chip msc_edgeirq_type = { 124struct irq_chip msc_edgeirq_type = {
125 .typename = "SOC-it-Edge", 125 .name = "SOC-it-Edge",
126 .ack = edge_mask_and_ack_msc_irq, 126 .ack = edge_mask_and_ack_msc_irq,
127 .mask = mask_msc_irq, 127 .mask = mask_msc_irq,
128 .mask_ack = edge_mask_and_ack_msc_irq, 128 .mask_ack = edge_mask_and_ack_msc_irq,
diff --git a/arch/mips/kernel/irq-mv6434x.c b/arch/mips/kernel/irq-mv6434x.c
index efbd219845b5..3dd561832e4c 100644
--- a/arch/mips/kernel/irq-mv6434x.c
+++ b/arch/mips/kernel/irq-mv6434x.c
@@ -23,13 +23,13 @@ static unsigned int irq_base;
23 23
24static inline int ls1bit32(unsigned int x) 24static inline int ls1bit32(unsigned int x)
25{ 25{
26 int b = 31, s; 26 int b = 31, s;
27 27
28 s = 16; if (x << 16 == 0) s = 0; b -= s; x <<= s; 28 s = 16; if (x << 16 == 0) s = 0; b -= s; x <<= s;
29 s = 8; if (x << 8 == 0) s = 0; b -= s; x <<= s; 29 s = 8; if (x << 8 == 0) s = 0; b -= s; x <<= s;
30 s = 4; if (x << 4 == 0) s = 0; b -= s; x <<= s; 30 s = 4; if (x << 4 == 0) s = 0; b -= s; x <<= s;
31 s = 2; if (x << 2 == 0) s = 0; b -= s; x <<= s; 31 s = 2; if (x << 2 == 0) s = 0; b -= s; x <<= s;
32 s = 1; if (x << 1 == 0) s = 0; b -= s; 32 s = 1; if (x << 1 == 0) s = 0; b -= s;
33 33
34 return b; 34 return b;
35} 35}
@@ -92,7 +92,7 @@ void ll_mv64340_irq(void)
92} 92}
93 93
94struct irq_chip mv64340_irq_type = { 94struct irq_chip mv64340_irq_type = {
95 .typename = "MV-64340", 95 .name = "MV-64340",
96 .ack = mask_mv64340_irq, 96 .ack = mask_mv64340_irq,
97 .mask = mask_mv64340_irq, 97 .mask = mask_mv64340_irq,
98 .mask_ack = mask_mv64340_irq, 98 .mask_ack = mask_mv64340_irq,
diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c
index 123324ba8c14..250732883488 100644
--- a/arch/mips/kernel/irq-rm7000.c
+++ b/arch/mips/kernel/irq-rm7000.c
@@ -17,28 +17,27 @@
17#include <asm/mipsregs.h> 17#include <asm/mipsregs.h>
18#include <asm/system.h> 18#include <asm/system.h>
19 19
20static int irq_base;
21
22static inline void unmask_rm7k_irq(unsigned int irq) 20static inline void unmask_rm7k_irq(unsigned int irq)
23{ 21{
24 set_c0_intcontrol(0x100 << (irq - irq_base)); 22 set_c0_intcontrol(0x100 << (irq - RM7K_CPU_IRQ_BASE));
25} 23}
26 24
27static inline void mask_rm7k_irq(unsigned int irq) 25static inline void mask_rm7k_irq(unsigned int irq)
28{ 26{
29 clear_c0_intcontrol(0x100 << (irq - irq_base)); 27 clear_c0_intcontrol(0x100 << (irq - RM7K_CPU_IRQ_BASE));
30} 28}
31 29
32static struct irq_chip rm7k_irq_controller = { 30static struct irq_chip rm7k_irq_controller = {
33 .typename = "RM7000", 31 .name = "RM7000",
34 .ack = mask_rm7k_irq, 32 .ack = mask_rm7k_irq,
35 .mask = mask_rm7k_irq, 33 .mask = mask_rm7k_irq,
36 .mask_ack = mask_rm7k_irq, 34 .mask_ack = mask_rm7k_irq,
37 .unmask = unmask_rm7k_irq, 35 .unmask = unmask_rm7k_irq,
38}; 36};
39 37
40void __init rm7k_cpu_irq_init(int base) 38void __init rm7k_cpu_irq_init(void)
41{ 39{
40 int base = RM7K_CPU_IRQ_BASE;
42 int i; 41 int i;
43 42
44 clear_c0_intcontrol(0x00000f00); /* Mask all */ 43 clear_c0_intcontrol(0x00000f00); /* Mask all */
@@ -46,6 +45,4 @@ void __init rm7k_cpu_irq_init(int base)
46 for (i = base; i < base + 4; i++) 45 for (i = base; i < base + 4; i++)
47 set_irq_chip_and_handler(i, &rm7k_irq_controller, 46 set_irq_chip_and_handler(i, &rm7k_irq_controller,
48 handle_level_irq); 47 handle_level_irq);
49
50 irq_base = base;
51} 48}
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c
index 0e6f4c5349d2..ae83d2df6f31 100644
--- a/arch/mips/kernel/irq-rm9000.c
+++ b/arch/mips/kernel/irq-rm9000.c
@@ -18,16 +18,14 @@
18#include <asm/mipsregs.h> 18#include <asm/mipsregs.h>
19#include <asm/system.h> 19#include <asm/system.h>
20 20
21static int irq_base;
22
23static inline void unmask_rm9k_irq(unsigned int irq) 21static inline void unmask_rm9k_irq(unsigned int irq)
24{ 22{
25 set_c0_intcontrol(0x1000 << (irq - irq_base)); 23 set_c0_intcontrol(0x1000 << (irq - RM9K_CPU_IRQ_BASE));
26} 24}
27 25
28static inline void mask_rm9k_irq(unsigned int irq) 26static inline void mask_rm9k_irq(unsigned int irq)
29{ 27{
30 clear_c0_intcontrol(0x1000 << (irq - irq_base)); 28 clear_c0_intcontrol(0x1000 << (irq - RM9K_CPU_IRQ_BASE));
31} 29}
32 30
33static inline void rm9k_cpu_irq_enable(unsigned int irq) 31static inline void rm9k_cpu_irq_enable(unsigned int irq)
@@ -39,15 +37,6 @@ static inline void rm9k_cpu_irq_enable(unsigned int irq)
39 local_irq_restore(flags); 37 local_irq_restore(flags);
40} 38}
41 39
42static void rm9k_cpu_irq_disable(unsigned int irq)
43{
44 unsigned long flags;
45
46 local_irq_save(flags);
47 mask_rm9k_irq(irq);
48 local_irq_restore(flags);
49}
50
51/* 40/*
52 * Performance counter interrupts are global on all processors. 41 * Performance counter interrupts are global on all processors.
53 */ 42 */
@@ -81,7 +70,7 @@ static void rm9k_perfcounter_irq_shutdown(unsigned int irq)
81} 70}
82 71
83static struct irq_chip rm9k_irq_controller = { 72static struct irq_chip rm9k_irq_controller = {
84 .typename = "RM9000", 73 .name = "RM9000",
85 .ack = mask_rm9k_irq, 74 .ack = mask_rm9k_irq,
86 .mask = mask_rm9k_irq, 75 .mask = mask_rm9k_irq,
87 .mask_ack = mask_rm9k_irq, 76 .mask_ack = mask_rm9k_irq,
@@ -89,7 +78,7 @@ static struct irq_chip rm9k_irq_controller = {
89}; 78};
90 79
91static struct irq_chip rm9k_perfcounter_irq = { 80static struct irq_chip rm9k_perfcounter_irq = {
92 .typename = "RM9000", 81 .name = "RM9000",
93 .startup = rm9k_perfcounter_irq_startup, 82 .startup = rm9k_perfcounter_irq_startup,
94 .shutdown = rm9k_perfcounter_irq_shutdown, 83 .shutdown = rm9k_perfcounter_irq_shutdown,
95 .ack = mask_rm9k_irq, 84 .ack = mask_rm9k_irq,
@@ -102,8 +91,9 @@ unsigned int rm9000_perfcount_irq;
102 91
103EXPORT_SYMBOL(rm9000_perfcount_irq); 92EXPORT_SYMBOL(rm9000_perfcount_irq);
104 93
105void __init rm9k_cpu_irq_init(int base) 94void __init rm9k_cpu_irq_init(void)
106{ 95{
96 int base = RM9K_CPU_IRQ_BASE;
107 int i; 97 int i;
108 98
109 clear_c0_intcontrol(0x0000f000); /* Mask all */ 99 clear_c0_intcontrol(0x0000f000); /* Mask all */
@@ -115,6 +105,4 @@ void __init rm9k_cpu_irq_init(int base)
115 rm9000_perfcount_irq = base + 1; 105 rm9000_perfcount_irq = base + 1;
116 set_irq_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq, 106 set_irq_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq,
117 handle_level_irq); 107 handle_level_irq);
118
119 irq_base = base;
120} 108}
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
index fcc86b96ccf6..7b66e03b5899 100644
--- a/arch/mips/kernel/irq_cpu.c
+++ b/arch/mips/kernel/irq_cpu.c
@@ -25,7 +25,7 @@
25 * Don't even think about using this on SMP. You have been warned. 25 * Don't even think about using this on SMP. You have been warned.
26 * 26 *
27 * This file exports one global function: 27 * This file exports one global function:
28 * void mips_cpu_irq_init(int irq_base); 28 * void mips_cpu_irq_init(void);
29 */ 29 */
30#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/interrupt.h> 31#include <linux/interrupt.h>
@@ -36,22 +36,20 @@
36#include <asm/mipsmtregs.h> 36#include <asm/mipsmtregs.h>
37#include <asm/system.h> 37#include <asm/system.h>
38 38
39static int mips_cpu_irq_base;
40
41static inline void unmask_mips_irq(unsigned int irq) 39static inline void unmask_mips_irq(unsigned int irq)
42{ 40{
43 set_c0_status(0x100 << (irq - mips_cpu_irq_base)); 41 set_c0_status(0x100 << (irq - MIPS_CPU_IRQ_BASE));
44 irq_enable_hazard(); 42 irq_enable_hazard();
45} 43}
46 44
47static inline void mask_mips_irq(unsigned int irq) 45static inline void mask_mips_irq(unsigned int irq)
48{ 46{
49 clear_c0_status(0x100 << (irq - mips_cpu_irq_base)); 47 clear_c0_status(0x100 << (irq - MIPS_CPU_IRQ_BASE));
50 irq_disable_hazard(); 48 irq_disable_hazard();
51} 49}
52 50
53static struct irq_chip mips_cpu_irq_controller = { 51static struct irq_chip mips_cpu_irq_controller = {
54 .typename = "MIPS", 52 .name = "MIPS",
55 .ack = mask_mips_irq, 53 .ack = mask_mips_irq,
56 .mask = mask_mips_irq, 54 .mask = mask_mips_irq,
57 .mask_ack = mask_mips_irq, 55 .mask_ack = mask_mips_irq,
@@ -70,7 +68,7 @@ static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
70{ 68{
71 unsigned int vpflags = dvpe(); 69 unsigned int vpflags = dvpe();
72 70
73 clear_c0_cause(0x100 << (irq - mips_cpu_irq_base)); 71 clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE));
74 evpe(vpflags); 72 evpe(vpflags);
75 unmask_mips_mt_irq(irq); 73 unmask_mips_mt_irq(irq);
76 74
@@ -84,13 +82,13 @@ static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
84static void mips_mt_cpu_irq_ack(unsigned int irq) 82static void mips_mt_cpu_irq_ack(unsigned int irq)
85{ 83{
86 unsigned int vpflags = dvpe(); 84 unsigned int vpflags = dvpe();
87 clear_c0_cause(0x100 << (irq - mips_cpu_irq_base)); 85 clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE));
88 evpe(vpflags); 86 evpe(vpflags);
89 mask_mips_mt_irq(irq); 87 mask_mips_mt_irq(irq);
90} 88}
91 89
92static struct irq_chip mips_mt_cpu_irq_controller = { 90static struct irq_chip mips_mt_cpu_irq_controller = {
93 .typename = "MIPS", 91 .name = "MIPS",
94 .startup = mips_mt_cpu_irq_startup, 92 .startup = mips_mt_cpu_irq_startup,
95 .ack = mips_mt_cpu_irq_ack, 93 .ack = mips_mt_cpu_irq_ack,
96 .mask = mask_mips_mt_irq, 94 .mask = mask_mips_mt_irq,
@@ -99,8 +97,9 @@ static struct irq_chip mips_mt_cpu_irq_controller = {
99 .eoi = unmask_mips_mt_irq, 97 .eoi = unmask_mips_mt_irq,
100}; 98};
101 99
102void __init mips_cpu_irq_init(int irq_base) 100void __init mips_cpu_irq_init(void)
103{ 101{
102 int irq_base = MIPS_CPU_IRQ_BASE;
104 int i; 103 int i;
105 104
106 /* Mask interrupts. */ 105 /* Mask interrupts. */
@@ -118,6 +117,4 @@ void __init mips_cpu_irq_init(int irq_base)
118 for (i = irq_base + 2; i < irq_base + 8; i++) 117 for (i = irq_base + 2; i < irq_base + 8; i++)
119 set_irq_chip_and_handler(i, &mips_cpu_irq_controller, 118 set_irq_chip_and_handler(i, &mips_cpu_irq_controller,
120 handle_level_irq); 119 handle_level_irq);
121
122 mips_cpu_irq_base = irq_base;
123} 120}
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index de3fae260ff8..0b8ce59429a8 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -194,15 +194,15 @@ sysn32_waitid(int which, compat_pid_t pid,
194} 194}
195 195
196struct sysinfo32 { 196struct sysinfo32 {
197 s32 uptime; 197 s32 uptime;
198 u32 loads[3]; 198 u32 loads[3];
199 u32 totalram; 199 u32 totalram;
200 u32 freeram; 200 u32 freeram;
201 u32 sharedram; 201 u32 sharedram;
202 u32 bufferram; 202 u32 bufferram;
203 u32 totalswap; 203 u32 totalswap;
204 u32 freeswap; 204 u32 freeswap;
205 u16 procs; 205 u16 procs;
206 u32 totalhigh; 206 u32 totalhigh;
207 u32 freehigh; 207 u32 freehigh;
208 u32 mem_unit; 208 u32 mem_unit;
@@ -558,7 +558,7 @@ extern asmlinkage long sys_ustat(dev_t dev, struct ustat __user * ubuf);
558asmlinkage int sys32_ustat(dev_t dev, struct ustat32 __user * ubuf32) 558asmlinkage int sys32_ustat(dev_t dev, struct ustat32 __user * ubuf32)
559{ 559{
560 int err; 560 int err;
561 struct ustat tmp; 561 struct ustat tmp;
562 struct ustat32 tmp32; 562 struct ustat32 tmp32;
563 mm_segment_t old_fs = get_fs(); 563 mm_segment_t old_fs = get_fs();
564 564
@@ -569,11 +569,11 @@ asmlinkage int sys32_ustat(dev_t dev, struct ustat32 __user * ubuf32)
569 if (err) 569 if (err)
570 goto out; 570 goto out;
571 571
572 memset(&tmp32,0,sizeof(struct ustat32)); 572 memset(&tmp32,0,sizeof(struct ustat32));
573 tmp32.f_tfree = tmp.f_tfree; 573 tmp32.f_tfree = tmp.f_tfree;
574 tmp32.f_tinode = tmp.f_tinode; 574 tmp32.f_tinode = tmp.f_tinode;
575 575
576 err = copy_to_user(ubuf32,&tmp32,sizeof(struct ustat32)) ? -EFAULT : 0; 576 err = copy_to_user(ubuf32,&tmp32,sizeof(struct ustat32)) ? -EFAULT : 0;
577 577
578out: 578out:
579 return err; 579 return err;
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
index c1373a6e668b..a32f6797353a 100644
--- a/arch/mips/kernel/mips-mt.c
+++ b/arch/mips/kernel/mips-mt.c
@@ -96,6 +96,10 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
96 goto out_unlock; 96 goto out_unlock;
97 } 97 }
98 98
99 retval = security_task_setscheduler(p, 0, NULL);
100 if (retval)
101 goto out_unlock;
102
99 /* Record new user-specified CPU set for future reference */ 103 /* Record new user-specified CPU set for future reference */
100 p->thread.user_cpus_allowed = new_mask; 104 p->thread.user_cpus_allowed = new_mask;
101 105
@@ -141,8 +145,9 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
141 p = find_process_by_pid(pid); 145 p = find_process_by_pid(pid);
142 if (!p) 146 if (!p)
143 goto out_unlock; 147 goto out_unlock;
144 148 retval = security_task_getscheduler(p);
145 retval = 0; 149 if (retval)
150 goto out_unlock;
146 151
147 cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map); 152 cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map);
148 153
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 4ed37ba19731..5ddc2e9deecf 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -31,13 +31,13 @@ static const char *cpu_name[] = {
31 [CPU_R4000PC] = "R4000PC", 31 [CPU_R4000PC] = "R4000PC",
32 [CPU_R4000SC] = "R4000SC", 32 [CPU_R4000SC] = "R4000SC",
33 [CPU_R4000MC] = "R4000MC", 33 [CPU_R4000MC] = "R4000MC",
34 [CPU_R4200] = "R4200", 34 [CPU_R4200] = "R4200",
35 [CPU_R4400PC] = "R4400PC", 35 [CPU_R4400PC] = "R4400PC",
36 [CPU_R4400SC] = "R4400SC", 36 [CPU_R4400SC] = "R4400SC",
37 [CPU_R4400MC] = "R4400MC", 37 [CPU_R4400MC] = "R4400MC",
38 [CPU_R4600] = "R4600", 38 [CPU_R4600] = "R4600",
39 [CPU_R6000] = "R6000", 39 [CPU_R6000] = "R6000",
40 [CPU_R6000A] = "R6000A", 40 [CPU_R6000A] = "R6000A",
41 [CPU_R8000] = "R8000", 41 [CPU_R8000] = "R8000",
42 [CPU_R10000] = "R10000", 42 [CPU_R10000] = "R10000",
43 [CPU_R12000] = "R12000", 43 [CPU_R12000] = "R12000",
@@ -46,14 +46,14 @@ static const char *cpu_name[] = {
46 [CPU_R4650] = "R4650", 46 [CPU_R4650] = "R4650",
47 [CPU_R4700] = "R4700", 47 [CPU_R4700] = "R4700",
48 [CPU_R5000] = "R5000", 48 [CPU_R5000] = "R5000",
49 [CPU_R5000A] = "R5000A", 49 [CPU_R5000A] = "R5000A",
50 [CPU_R4640] = "R4640", 50 [CPU_R4640] = "R4640",
51 [CPU_NEVADA] = "Nevada", 51 [CPU_NEVADA] = "Nevada",
52 [CPU_RM7000] = "RM7000", 52 [CPU_RM7000] = "RM7000",
53 [CPU_RM9000] = "RM9000", 53 [CPU_RM9000] = "RM9000",
54 [CPU_R5432] = "R5432", 54 [CPU_R5432] = "R5432",
55 [CPU_4KC] = "MIPS 4Kc", 55 [CPU_4KC] = "MIPS 4Kc",
56 [CPU_5KC] = "MIPS 5Kc", 56 [CPU_5KC] = "MIPS 5Kc",
57 [CPU_R4310] = "R4310", 57 [CPU_R4310] = "R4310",
58 [CPU_SB1] = "SiByte SB1", 58 [CPU_SB1] = "SiByte SB1",
59 [CPU_SB1A] = "SiByte SB1A", 59 [CPU_SB1A] = "SiByte SB1A",
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index ec8209f3a0c6..04e5b38d327d 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -41,10 +41,6 @@
41#include <asm/isadep.h> 41#include <asm/isadep.h>
42#include <asm/inst.h> 42#include <asm/inst.h>
43#include <asm/stacktrace.h> 43#include <asm/stacktrace.h>
44#ifdef CONFIG_MIPS_MT_SMTC
45#include <asm/mipsmtregs.h>
46extern void smtc_idle_loop_hook(void);
47#endif /* CONFIG_MIPS_MT_SMTC */
48 44
49/* 45/*
50 * The idle thread. There's no useful work to be done, so just try to conserve 46 * The idle thread. There's no useful work to be done, so just try to conserve
@@ -57,6 +53,8 @@ ATTRIB_NORET void cpu_idle(void)
57 while (1) { 53 while (1) {
58 while (!need_resched()) { 54 while (!need_resched()) {
59#ifdef CONFIG_MIPS_MT_SMTC 55#ifdef CONFIG_MIPS_MT_SMTC
56 extern void smtc_idle_loop_hook(void);
57
60 smtc_idle_loop_hook(); 58 smtc_idle_loop_hook();
61#endif /* CONFIG_MIPS_MT_SMTC */ 59#endif /* CONFIG_MIPS_MT_SMTC */
62 if (cpu_wait) 60 if (cpu_wait)
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 880fa6e841ee..59c1577ecbb3 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -114,6 +114,14 @@ LEAF(_save_fp_context32)
114 */ 114 */
115LEAF(_restore_fp_context) 115LEAF(_restore_fp_context)
116 EX lw t0, SC_FPC_CSR(a0) 116 EX lw t0, SC_FPC_CSR(a0)
117
118 /* Fail if the CSR has exceptions pending */
119 srl t1, t0, 5
120 and t1, t0
121 andi t1, 0x1f << 7
122 bnez t1, fault
123 nop
124
117#ifdef CONFIG_64BIT 125#ifdef CONFIG_64BIT
118 EX ldc1 $f1, SC_FPREGS+8(a0) 126 EX ldc1 $f1, SC_FPREGS+8(a0)
119 EX ldc1 $f3, SC_FPREGS+24(a0) 127 EX ldc1 $f3, SC_FPREGS+24(a0)
@@ -157,6 +165,14 @@ LEAF(_restore_fp_context)
157LEAF(_restore_fp_context32) 165LEAF(_restore_fp_context32)
158 /* Restore an o32 sigcontext. */ 166 /* Restore an o32 sigcontext. */
159 EX lw t0, SC32_FPC_CSR(a0) 167 EX lw t0, SC32_FPC_CSR(a0)
168
169 /* Fail if the CSR has exceptions pending */
170 srl t1, t0, 5
171 and t1, t0
172 andi t1, 0x1f << 7
173 bnez t1, fault
174 nop
175
160 EX ldc1 $f0, SC32_FPREGS+0(a0) 176 EX ldc1 $f0, SC32_FPREGS+0(a0)
161 EX ldc1 $f2, SC32_FPREGS+16(a0) 177 EX ldc1 $f2, SC32_FPREGS+16(a0)
162 EX ldc1 $f4, SC32_FPREGS+32(a0) 178 EX ldc1 $f4, SC32_FPREGS+32(a0)
@@ -177,9 +193,10 @@ LEAF(_restore_fp_context32)
177 jr ra 193 jr ra
178 li v0, 0 # success 194 li v0, 0 # success
179 END(_restore_fp_context32) 195 END(_restore_fp_context32)
180 .set reorder
181#endif 196#endif
182 197
198 .set reorder
199
183 .type fault@function 200 .type fault@function
184 .ent fault 201 .ent fault
185fault: li v0, -EFAULT # failure 202fault: li v0, -EFAULT # failure
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index 5a99e3e0c96d..8610f4a925e9 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -63,7 +63,7 @@ extern void *vpe_get_shared(int index);
63 63
64static void rtlx_dispatch(void) 64static void rtlx_dispatch(void)
65{ 65{
66 do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ); 66 do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ);
67} 67}
68 68
69 69
@@ -491,7 +491,7 @@ static struct irqaction rtlx_irq = {
491 .name = "RTLX", 491 .name = "RTLX",
492}; 492};
493 493
494static int rtlx_irq_num = MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ; 494static int rtlx_irq_num = MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ;
495 495
496static char register_chrdev_failed[] __initdata = 496static char register_chrdev_failed[] __initdata =
497 KERN_ERR "rtlx_module_init: unable to register device\n"; 497 KERN_ERR "rtlx_module_init: unable to register device\n";
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index a7bff2a54723..39add2341aa2 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -384,7 +384,7 @@ EXPORT(sysn32_call_table)
384 PTR sys_readlinkat 384 PTR sys_readlinkat
385 PTR sys_fchmodat 385 PTR sys_fchmodat
386 PTR sys_faccessat 386 PTR sys_faccessat
387 PTR sys_pselect6 387 PTR compat_sys_pselect6
388 PTR sys_ppoll /* 6265 */ 388 PTR sys_ppoll /* 6265 */
389 PTR sys_unshare 389 PTR sys_unshare
390 PTR sys_splice 390 PTR sys_splice
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index e91379c1be1d..c58b8e0105ea 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -506,7 +506,7 @@ sys_call_table:
506 PTR sys_readlinkat 506 PTR sys_readlinkat
507 PTR sys_fchmodat 507 PTR sys_fchmodat
508 PTR sys_faccessat /* 4300 */ 508 PTR sys_faccessat /* 4300 */
509 PTR sys_pselect6 509 PTR compat_sys_pselect6
510 PTR sys_ppoll 510 PTR sys_ppoll
511 PTR sys_unshare 511 PTR sys_unshare
512 PTR sys_splice 512 PTR sys_splice
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 89440a0d8528..d2e01e7167b8 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -271,8 +271,7 @@ static void __init bootmem_init(void)
271static void __init bootmem_init(void) 271static void __init bootmem_init(void)
272{ 272{
273 unsigned long reserved_end; 273 unsigned long reserved_end;
274 unsigned long highest = 0; 274 unsigned long mapstart = ~0UL;
275 unsigned long mapstart = -1UL;
276 unsigned long bootmap_size; 275 unsigned long bootmap_size;
277 int i; 276 int i;
278 277
@@ -284,6 +283,13 @@ static void __init bootmem_init(void)
284 reserved_end = max(init_initrd(), PFN_UP(__pa_symbol(&_end))); 283 reserved_end = max(init_initrd(), PFN_UP(__pa_symbol(&_end)));
285 284
286 /* 285 /*
286 * max_low_pfn is not a number of pages. The number of pages
287 * of the system is given by 'max_low_pfn - min_low_pfn'.
288 */
289 min_low_pfn = ~0UL;
290 max_low_pfn = 0;
291
292 /*
287 * Find the highest page frame number we have available. 293 * Find the highest page frame number we have available.
288 */ 294 */
289 for (i = 0; i < boot_mem_map.nr_map; i++) { 295 for (i = 0; i < boot_mem_map.nr_map; i++) {
@@ -296,8 +302,10 @@ static void __init bootmem_init(void)
296 end = PFN_DOWN(boot_mem_map.map[i].addr 302 end = PFN_DOWN(boot_mem_map.map[i].addr
297 + boot_mem_map.map[i].size); 303 + boot_mem_map.map[i].size);
298 304
299 if (end > highest) 305 if (end > max_low_pfn)
300 highest = end; 306 max_low_pfn = end;
307 if (start < min_low_pfn)
308 min_low_pfn = start;
301 if (end <= reserved_end) 309 if (end <= reserved_end)
302 continue; 310 continue;
303 if (start >= mapstart) 311 if (start >= mapstart)
@@ -305,22 +313,36 @@ static void __init bootmem_init(void)
305 mapstart = max(reserved_end, start); 313 mapstart = max(reserved_end, start);
306 } 314 }
307 315
316 if (min_low_pfn >= max_low_pfn)
317 panic("Incorrect memory mapping !!!");
318 if (min_low_pfn > ARCH_PFN_OFFSET) {
319 printk(KERN_INFO
320 "Wasting %lu bytes for tracking %lu unused pages\n",
321 (min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
322 min_low_pfn - ARCH_PFN_OFFSET);
323 } else if (min_low_pfn < ARCH_PFN_OFFSET) {
324 printk(KERN_INFO
325 "%lu free pages won't be used\n",
326 ARCH_PFN_OFFSET - min_low_pfn);
327 }
328 min_low_pfn = ARCH_PFN_OFFSET;
329
308 /* 330 /*
309 * Determine low and high memory ranges 331 * Determine low and high memory ranges
310 */ 332 */
311 if (highest > PFN_DOWN(HIGHMEM_START)) { 333 if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) {
312#ifdef CONFIG_HIGHMEM 334#ifdef CONFIG_HIGHMEM
313 highstart_pfn = PFN_DOWN(HIGHMEM_START); 335 highstart_pfn = PFN_DOWN(HIGHMEM_START);
314 highend_pfn = highest; 336 highend_pfn = max_low_pfn;
315#endif 337#endif
316 highest = PFN_DOWN(HIGHMEM_START); 338 max_low_pfn = PFN_DOWN(HIGHMEM_START);
317 } 339 }
318 340
319 /* 341 /*
320 * Initialize the boot-time allocator with low memory only. 342 * Initialize the boot-time allocator with low memory only.
321 */ 343 */
322 bootmap_size = init_bootmem(mapstart, highest); 344 bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart,
323 345 min_low_pfn, max_low_pfn);
324 /* 346 /*
325 * Register fully available low RAM pages with the bootmem allocator. 347 * Register fully available low RAM pages with the bootmem allocator.
326 */ 348 */
@@ -507,9 +529,9 @@ void __init setup_arch(char **cmdline_p)
507 529
508#if defined(CONFIG_VT) 530#if defined(CONFIG_VT)
509#if defined(CONFIG_VGA_CONSOLE) 531#if defined(CONFIG_VGA_CONSOLE)
510 conswitchp = &vga_con; 532 conswitchp = &vga_con;
511#elif defined(CONFIG_DUMMY_CONSOLE) 533#elif defined(CONFIG_DUMMY_CONSOLE)
512 conswitchp = &dummy_con; 534 conswitchp = &dummy_con;
513#endif 535#endif
514#endif 536#endif
515 537
@@ -541,3 +563,6 @@ int __init dsp_disable(char *s)
541} 563}
542 564
543__setup("nodsp", dsp_disable); 565__setup("nodsp", dsp_disable);
566
567unsigned long kernelsp[NR_CPUS];
568unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index b9d358e05214..9a44053cd9f1 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -89,7 +89,7 @@ _sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
89 spin_lock_irq(&current->sighand->siglock); 89 spin_lock_irq(&current->sighand->siglock);
90 current->saved_sigmask = current->blocked; 90 current->saved_sigmask = current->blocked;
91 current->blocked = newset; 91 current->blocked = newset;
92 recalc_sigpending(); 92 recalc_sigpending();
93 spin_unlock_irq(&current->sighand->siglock); 93 spin_unlock_irq(&current->sighand->siglock);
94 94
95 current->state = TASK_INTERRUPTIBLE; 95 current->state = TASK_INTERRUPTIBLE;
@@ -124,7 +124,7 @@ asmlinkage int sys_sigaction(int sig, const struct sigaction __user *act,
124 124
125 if (!ret && oact) { 125 if (!ret && oact) {
126 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact))) 126 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
127 return -EFAULT; 127 return -EFAULT;
128 err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); 128 err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
129 err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler); 129 err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler);
130 err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig); 130 err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig);
@@ -304,7 +304,7 @@ int setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
304 current->comm, current->pid, 304 current->comm, current->pid,
305 frame, regs->cp0_epc, frame->regs[31]); 305 frame, regs->cp0_epc, frame->regs[31]);
306#endif 306#endif
307 return 0; 307 return 0;
308 308
309give_sigsegv: 309give_sigsegv:
310 force_sigsegv(signr, current); 310 force_sigsegv(signr, current);
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index a67c18555ed3..b28646b3ceae 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -105,7 +105,7 @@ _sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
105 spin_lock_irq(&current->sighand->siglock); 105 spin_lock_irq(&current->sighand->siglock);
106 current->saved_sigmask = current->blocked; 106 current->saved_sigmask = current->blocked;
107 current->blocked = newset; 107 current->blocked = newset;
108 recalc_sigpending(); 108 recalc_sigpending();
109 spin_unlock_irq(&current->sighand->siglock); 109 spin_unlock_irq(&current->sighand->siglock);
110 110
111 current->state = TASK_INTERRUPTIBLE; 111 current->state = TASK_INTERRUPTIBLE;
@@ -184,7 +184,7 @@ int setup_rt_frame_n32(struct k_sigaction * ka,
184 /* Create the ucontext. */ 184 /* Create the ucontext. */
185 err |= __put_user(0, &frame->rs_uc.uc_flags); 185 err |= __put_user(0, &frame->rs_uc.uc_flags);
186 err |= __put_user(0, &frame->rs_uc.uc_link); 186 err |= __put_user(0, &frame->rs_uc.uc_link);
187 sp = (int) (long) current->sas_ss_sp; 187 sp = (int) (long) current->sas_ss_sp;
188 err |= __put_user(sp, 188 err |= __put_user(sp,
189 &frame->rs_uc.uc_stack.ss_sp); 189 &frame->rs_uc.uc_stack.ss_sp);
190 err |= __put_user(sas_ss_flags(regs->regs[29]), 190 err |= __put_user(sas_ss_flags(regs->regs[29]),
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 1ee689c0e0c9..64b62bdfb4f6 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -35,7 +35,6 @@
35#include <asm/mipsregs.h> 35#include <asm/mipsregs.h>
36#include <asm/mipsmtregs.h> 36#include <asm/mipsmtregs.h>
37#include <asm/mips_mt.h> 37#include <asm/mips_mt.h>
38#include <asm/mips-boards/maltaint.h> /* This is f*cking wrong */
39 38
40#define MIPS_CPU_IPI_RESCHED_IRQ 0 39#define MIPS_CPU_IPI_RESCHED_IRQ 0
41#define MIPS_CPU_IPI_CALL_IRQ 1 40#define MIPS_CPU_IPI_CALL_IRQ 1
@@ -108,12 +107,12 @@ void __init sanitize_tlb_entries(void)
108 107
109static void ipi_resched_dispatch(void) 108static void ipi_resched_dispatch(void)
110{ 109{
111 do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ); 110 do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ);
112} 111}
113 112
114static void ipi_call_dispatch(void) 113static void ipi_call_dispatch(void)
115{ 114{
116 do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ); 115 do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
117} 116}
118 117
119static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) 118static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
@@ -270,8 +269,8 @@ void __init plat_prepare_cpus(unsigned int max_cpus)
270 set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch); 269 set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
271 } 270 }
272 271
273 cpu_ipi_resched_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ; 272 cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
274 cpu_ipi_call_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ; 273 cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ;
275 274
276 setup_irq(cpu_ipi_resched_irq, &irq_resched); 275 setup_irq(cpu_ipi_resched_irq, &irq_resched);
277 setup_irq(cpu_ipi_call_irq, &irq_call); 276 setup_irq(cpu_ipi_call_irq, &irq_call);
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 6a857bf030b0..9251ea824937 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -26,16 +26,6 @@
26 * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. 26 * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set.
27 */ 27 */
28 28
29/*
30 * MIPSCPU_INT_BASE is identically defined in both
31 * asm-mips/mips-boards/maltaint.h and asm-mips/mips-boards/simint.h,
32 * but as yet there's no properly organized include structure that
33 * will ensure that the right *int.h file will be included for a
34 * given platform build.
35 */
36
37#define MIPSCPU_INT_BASE 16
38
39#define MIPS_CPU_IPI_IRQ 1 29#define MIPS_CPU_IPI_IRQ 1
40 30
41#define LOCK_MT_PRA() \ 31#define LOCK_MT_PRA() \
@@ -77,15 +67,15 @@ unsigned int ipi_timer_latch[NR_CPUS];
77 67
78#define IPIBUF_PER_CPU 4 68#define IPIBUF_PER_CPU 4
79 69
80struct smtc_ipi_q IPIQ[NR_CPUS]; 70static struct smtc_ipi_q IPIQ[NR_CPUS];
81struct smtc_ipi_q freeIPIq; 71static struct smtc_ipi_q freeIPIq;
82 72
83 73
84/* Forward declarations */ 74/* Forward declarations */
85 75
86void ipi_decode(struct smtc_ipi *); 76void ipi_decode(struct smtc_ipi *);
87void post_direct_ipi(int cpu, struct smtc_ipi *pipi); 77static void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
88void setup_cross_vpe_interrupts(void); 78static void setup_cross_vpe_interrupts(void);
89void init_smtc_stats(void); 79void init_smtc_stats(void);
90 80
91/* Global SMTC Status */ 81/* Global SMTC Status */
@@ -200,7 +190,7 @@ void __init sanitize_tlb_entries(void)
200 * Configure shared TLB - VPC configuration bit must be set by caller 190 * Configure shared TLB - VPC configuration bit must be set by caller
201 */ 191 */
202 192
203void smtc_configure_tlb(void) 193static void smtc_configure_tlb(void)
204{ 194{
205 int i,tlbsiz,vpes; 195 int i,tlbsiz,vpes;
206 unsigned long mvpconf0; 196 unsigned long mvpconf0;
@@ -648,7 +638,7 @@ int setup_irq_smtc(unsigned int irq, struct irqaction * new,
648 * the VPE. 638 * the VPE.
649 */ 639 */
650 640
651void smtc_ipi_qdump(void) 641static void smtc_ipi_qdump(void)
652{ 642{
653 int i; 643 int i;
654 644
@@ -686,28 +676,6 @@ static __inline__ int atomic_postincrement(unsigned int *pv)
686 return result; 676 return result;
687} 677}
688 678
689/* No longer used in IPI dispatch, but retained for future recycling */
690
691static __inline__ int atomic_postclear(unsigned int *pv)
692{
693 unsigned long result;
694
695 unsigned long temp;
696
697 __asm__ __volatile__(
698 "1: ll %0, %2 \n"
699 " or %1, $0, $0 \n"
700 " sc %1, %2 \n"
701 " beqz %1, 1b \n"
702 " sync \n"
703 : "=&r" (result), "=&r" (temp), "=m" (*pv)
704 : "m" (*pv)
705 : "memory");
706
707 return result;
708}
709
710
711void smtc_send_ipi(int cpu, int type, unsigned int action) 679void smtc_send_ipi(int cpu, int type, unsigned int action)
712{ 680{
713 int tcstatus; 681 int tcstatus;
@@ -781,7 +749,7 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
781/* 749/*
782 * Send IPI message to Halted TC, TargTC/TargVPE already having been set 750 * Send IPI message to Halted TC, TargTC/TargVPE already having been set
783 */ 751 */
784void post_direct_ipi(int cpu, struct smtc_ipi *pipi) 752static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
785{ 753{
786 struct pt_regs *kstack; 754 struct pt_regs *kstack;
787 unsigned long tcstatus; 755 unsigned long tcstatus;
@@ -921,7 +889,7 @@ void smtc_timer_broadcast(int vpe)
921 * interrupts. 889 * interrupts.
922 */ 890 */
923 891
924static int cpu_ipi_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_IRQ; 892static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ;
925 893
926static irqreturn_t ipi_interrupt(int irq, void *dev_idm) 894static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
927{ 895{
@@ -1000,7 +968,7 @@ static void ipi_irq_dispatch(void)
1000 968
1001static struct irqaction irq_ipi; 969static struct irqaction irq_ipi;
1002 970
1003void setup_cross_vpe_interrupts(void) 971static void setup_cross_vpe_interrupts(void)
1004{ 972{
1005 if (!cpu_has_vint) 973 if (!cpu_has_vint)
1006 panic("SMTC Kernel requires Vectored Interupt support"); 974 panic("SMTC Kernel requires Vectored Interupt support");
@@ -1191,7 +1159,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1191 * It would be nice to be able to use a spinlock here, 1159 * It would be nice to be able to use a spinlock here,
1192 * but this is invoked from within TLB flush routines 1160 * but this is invoked from within TLB flush routines
1193 * that protect themselves with DVPE, so if a lock is 1161 * that protect themselves with DVPE, so if a lock is
1194 * held by another TC, it'll never be freed. 1162 * held by another TC, it'll never be freed.
1195 * 1163 *
1196 * DVPE/DMT must not be done with interrupts enabled, 1164 * DVPE/DMT must not be done with interrupts enabled,
1197 * so even so most callers will already have disabled 1165 * so even so most callers will already have disabled
@@ -1296,7 +1264,7 @@ void smtc_flush_tlb_asid(unsigned long asid)
1296 * Support for single-threading cache flush operations. 1264 * Support for single-threading cache flush operations.
1297 */ 1265 */
1298 1266
1299int halt_state_save[NR_CPUS]; 1267static int halt_state_save[NR_CPUS];
1300 1268
1301/* 1269/*
1302 * To really, really be sure that nothing is being done 1270 * To really, really be sure that nothing is being done
diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c
index 6c2406a93f2b..93a148486f88 100644
--- a/arch/mips/kernel/sysirix.c
+++ b/arch/mips/kernel/sysirix.c
@@ -669,7 +669,7 @@ asmlinkage int irix_mount(char __user *dev_name, char __user *dir_name,
669 669
670struct irix_statfs { 670struct irix_statfs {
671 short f_type; 671 short f_type;
672 long f_bsize, f_frsize, f_blocks, f_bfree, f_files, f_ffree; 672 long f_bsize, f_frsize, f_blocks, f_bfree, f_files, f_ffree;
673 char f_fname[6], f_fpack[6]; 673 char f_fname[6], f_fpack[6];
674}; 674};
675 675
@@ -959,7 +959,7 @@ static inline loff_t llseek(struct file *file, loff_t offset, int origin)
959 959
960 fn = default_llseek; 960 fn = default_llseek;
961 if (file->f_op && file->f_op->llseek) 961 if (file->f_op && file->f_op->llseek)
962 fn = file->f_op->llseek; 962 fn = file->f_op->llseek;
963 lock_kernel(); 963 lock_kernel();
964 retval = fn(file, offset, origin); 964 retval = fn(file, offset, origin);
965 unlock_kernel(); 965 unlock_kernel();
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 458fccf87c54..459624969c99 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -522,7 +522,7 @@ static int (*reloc_handlers[]) (struct module *me, uint32_t *location,
522}; 522};
523 523
524static char *rstrs[] = { 524static char *rstrs[] = {
525 [R_MIPS_NONE] = "MIPS_NONE", 525 [R_MIPS_NONE] = "MIPS_NONE",
526 [R_MIPS_32] = "MIPS_32", 526 [R_MIPS_32] = "MIPS_32",
527 [R_MIPS_26] = "MIPS_26", 527 [R_MIPS_26] = "MIPS_26",
528 [R_MIPS_HI16] = "MIPS_HI16", 528 [R_MIPS_HI16] = "MIPS_HI16",
@@ -695,7 +695,7 @@ static void dump_tclist(void)
695} 695}
696 696
697/* We are prepared so configure and start the VPE... */ 697/* We are prepared so configure and start the VPE... */
698int vpe_run(struct vpe * v) 698static int vpe_run(struct vpe * v)
699{ 699{
700 struct vpe_notifications *n; 700 struct vpe_notifications *n;
701 unsigned long val, dmt_flag; 701 unsigned long val, dmt_flag;
@@ -713,16 +713,16 @@ int vpe_run(struct vpe * v)
713 dvpe(); 713 dvpe();
714 714
715 if (!list_empty(&v->tc)) { 715 if (!list_empty(&v->tc)) {
716 if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) { 716 if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
717 printk(KERN_WARNING "VPE loader: TC %d is already in use.\n", 717 printk(KERN_WARNING "VPE loader: TC %d is already in use.\n",
718 t->index); 718 t->index);
719 return -ENOEXEC; 719 return -ENOEXEC;
720 } 720 }
721 } else { 721 } else {
722 printk(KERN_WARNING "VPE loader: No TC's associated with VPE %d\n", 722 printk(KERN_WARNING "VPE loader: No TC's associated with VPE %d\n",
723 v->minor); 723 v->minor);
724 return -ENOEXEC; 724 return -ENOEXEC;
725 } 725 }
726 726
727 /* Put MVPE's into 'configuration state' */ 727 /* Put MVPE's into 'configuration state' */
728 set_c0_mvpcontrol(MVPCONTROL_VPC); 728 set_c0_mvpcontrol(MVPCONTROL_VPC);
@@ -775,14 +775,14 @@ int vpe_run(struct vpe * v)
775 775
776 back_to_back_c0_hazard(); 776 back_to_back_c0_hazard();
777 777
778 /* Set up the XTC bit in vpeconf0 to point at our tc */ 778 /* Set up the XTC bit in vpeconf0 to point at our tc */
779 write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC)) 779 write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
780 | (t->index << VPECONF0_XTC_SHIFT)); 780 | (t->index << VPECONF0_XTC_SHIFT));
781 781
782 back_to_back_c0_hazard(); 782 back_to_back_c0_hazard();
783 783
784 /* enable this VPE */ 784 /* enable this VPE */
785 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); 785 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
786 786
787 /* clear out any left overs from a previous program */ 787 /* clear out any left overs from a previous program */
788 write_vpe_c0_status(0); 788 write_vpe_c0_status(0);
@@ -832,7 +832,7 @@ static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs,
832 * contents of the program (p)buffer performing relocatations/etc, free's it 832 * contents of the program (p)buffer performing relocatations/etc, free's it
833 * when finished. 833 * when finished.
834 */ 834 */
835int vpe_elfload(struct vpe * v) 835static int vpe_elfload(struct vpe * v)
836{ 836{
837 Elf_Ehdr *hdr; 837 Elf_Ehdr *hdr;
838 Elf_Shdr *sechdrs; 838 Elf_Shdr *sechdrs;
diff --git a/arch/mips/lasat/interrupt.c b/arch/mips/lasat/interrupt.c
index 2affa5ff171c..9a622b9a1051 100644
--- a/arch/mips/lasat/interrupt.c
+++ b/arch/mips/lasat/interrupt.c
@@ -45,7 +45,7 @@ void enable_lasat_irq(unsigned int irq_nr)
45} 45}
46 46
47static struct irq_chip lasat_irq_type = { 47static struct irq_chip lasat_irq_type = {
48 .typename = "Lasat", 48 .name = "Lasat",
49 .ack = disable_lasat_irq, 49 .ack = disable_lasat_irq,
50 .mask = disable_lasat_irq, 50 .mask = disable_lasat_irq,
51 .mask_ack = disable_lasat_irq, 51 .mask_ack = disable_lasat_irq,
diff --git a/arch/mips/lasat/prom.c b/arch/mips/lasat/prom.c
index 88c7ab871ec4..d47692f73a26 100644
--- a/arch/mips/lasat/prom.c
+++ b/arch/mips/lasat/prom.c
@@ -132,9 +132,8 @@ void __init prom_init(void)
132 add_memory_region(0, lasat_board_info.li_memsize, BOOT_MEM_RAM); 132 add_memory_region(0, lasat_board_info.li_memsize, BOOT_MEM_RAM);
133} 133}
134 134
135unsigned long __init prom_free_prom_memory(void) 135void __init prom_free_prom_memory(void)
136{ 136{
137 return 0;
138} 137}
139 138
140const char *get_system_type(void) 139const char *get_system_type(void)
diff --git a/arch/mips/lib-32/Makefile b/arch/mips/lib-32/Makefile
index dcd4d2ed2ac4..2036cf5e6857 100644
--- a/arch/mips/lib-32/Makefile
+++ b/arch/mips/lib-32/Makefile
@@ -2,7 +2,7 @@
2# Makefile for MIPS-specific library files.. 2# Makefile for MIPS-specific library files..
3# 3#
4 4
5lib-y += memset.o watch.o 5lib-y += watch.o
6 6
7obj-$(CONFIG_CPU_MIPS32) += dump_tlb.o 7obj-$(CONFIG_CPU_MIPS32) += dump_tlb.o
8obj-$(CONFIG_CPU_MIPS64) += dump_tlb.o 8obj-$(CONFIG_CPU_MIPS64) += dump_tlb.o
diff --git a/arch/mips/lib-64/Makefile b/arch/mips/lib-64/Makefile
index dcd4d2ed2ac4..2036cf5e6857 100644
--- a/arch/mips/lib-64/Makefile
+++ b/arch/mips/lib-64/Makefile
@@ -2,7 +2,7 @@
2# Makefile for MIPS-specific library files.. 2# Makefile for MIPS-specific library files..
3# 3#
4 4
5lib-y += memset.o watch.o 5lib-y += watch.o
6 6
7obj-$(CONFIG_CPU_MIPS32) += dump_tlb.o 7obj-$(CONFIG_CPU_MIPS32) += dump_tlb.o
8obj-$(CONFIG_CPU_MIPS64) += dump_tlb.o 8obj-$(CONFIG_CPU_MIPS64) += dump_tlb.o
diff --git a/arch/mips/lib-64/memset.S b/arch/mips/lib-64/memset.S
deleted file mode 100644
index e2c42c85113b..000000000000
--- a/arch/mips/lib-64/memset.S
+++ /dev/null
@@ -1,142 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1998, 1999, 2000 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9#include <asm/asm.h>
10#include <asm/asm-offsets.h>
11#include <asm/regdef.h>
12
13#define EX(insn,reg,addr,handler) \
149: insn reg, addr; \
15 .section __ex_table,"a"; \
16 PTR 9b, handler; \
17 .previous
18
19 .macro f_fill64 dst, offset, val, fixup
20 EX(LONG_S, \val, (\offset + 0 * LONGSIZE)(\dst), \fixup)
21 EX(LONG_S, \val, (\offset + 1 * LONGSIZE)(\dst), \fixup)
22 EX(LONG_S, \val, (\offset + 2 * LONGSIZE)(\dst), \fixup)
23 EX(LONG_S, \val, (\offset + 3 * LONGSIZE)(\dst), \fixup)
24 EX(LONG_S, \val, (\offset + 4 * LONGSIZE)(\dst), \fixup)
25 EX(LONG_S, \val, (\offset + 5 * LONGSIZE)(\dst), \fixup)
26 EX(LONG_S, \val, (\offset + 6 * LONGSIZE)(\dst), \fixup)
27 EX(LONG_S, \val, (\offset + 7 * LONGSIZE)(\dst), \fixup)
28 .endm
29
30/*
31 * memset(void *s, int c, size_t n)
32 *
33 * a0: start of area to clear
34 * a1: char to fill with
35 * a2: size of area to clear
36 */
37 .set noreorder
38 .align 5
39LEAF(memset)
40 beqz a1, 1f
41 move v0, a0 /* result */
42
43 andi a1, 0xff /* spread fillword */
44 dsll t1, a1, 8
45 or a1, t1
46 dsll t1, a1, 16
47 or a1, t1
48 dsll t1, a1, 32
49 or a1, t1
501:
51
52FEXPORT(__bzero)
53 sltiu t0, a2, LONGSIZE /* very small region? */
54 bnez t0, small_memset
55 andi t0, a0, LONGMASK /* aligned? */
56
57 beqz t0, 1f
58 PTR_SUBU t0, LONGSIZE /* alignment in bytes */
59
60#ifdef __MIPSEB__
61 EX(sdl, a1, (a0), first_fixup) /* make dword aligned */
62#endif
63#ifdef __MIPSEL__
64 EX(sdr, a1, (a0), first_fixup) /* make dword aligned */
65#endif
66 PTR_SUBU a0, t0 /* long align ptr */
67 PTR_ADDU a2, t0 /* correct size */
68
691: ori t1, a2, 0x3f /* # of full blocks */
70 xori t1, 0x3f
71 beqz t1, memset_partial /* no block to fill */
72 andi t0, a2, 0x38
73
74 PTR_ADDU t1, a0 /* end address */
75 .set reorder
761: PTR_ADDIU a0, 64
77 f_fill64 a0, -64, a1, fwd_fixup
78 bne t1, a0, 1b
79 .set noreorder
80
81memset_partial:
82 PTR_LA t1, 2f /* where to start */
83 .set noat
84 dsrl AT, t0, 1
85 PTR_SUBU t1, AT
86 .set noat
87 jr t1
88 PTR_ADDU a0, t0 /* dest ptr */
89
90 .set push
91 .set noreorder
92 .set nomacro
93 f_fill64 a0, -64, a1, partial_fixup /* ... but first do longs ... */
942: .set pop
95 andi a2, LONGMASK /* At most one long to go */
96
97 beqz a2, 1f
98 PTR_ADDU a0, a2 /* What's left */
99#ifdef __MIPSEB__
100 EX(sdr, a1, -1(a0), last_fixup)
101#endif
102#ifdef __MIPSEL__
103 EX(sdl, a1, -1(a0), last_fixup)
104#endif
1051: jr ra
106 move a2, zero
107
108small_memset:
109 beqz a2, 2f
110 PTR_ADDU t1, a0, a2
111
1121: PTR_ADDIU a0, 1 /* fill bytewise */
113 bne t1, a0, 1b
114 sb a1, -1(a0)
115
1162: jr ra /* done */
117 move a2, zero
118 END(memset)
119
120first_fixup:
121 jr ra
122 nop
123
124fwd_fixup:
125 PTR_L t0, TI_TASK($28)
126 LONG_L t0, THREAD_BUADDR(t0)
127 andi a2, 0x3f
128 LONG_ADDU a2, t1
129 jr ra
130 LONG_SUBU a2, t0
131
132partial_fixup:
133 PTR_L t0, TI_TASK($28)
134 LONG_L t0, THREAD_BUADDR(t0)
135 andi a2, LONGMASK
136 LONG_ADDU a2, t1
137 jr ra
138 LONG_SUBU a2, t0
139
140last_fixup:
141 jr ra
142 andi v1, a2, LONGMASK
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index 989c900b8b14..5ad501b30b43 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -2,7 +2,7 @@
2# Makefile for MIPS-specific library files.. 2# Makefile for MIPS-specific library files..
3# 3#
4 4
5lib-y += csum_partial.o memcpy.o promlib.o \ 5lib-y += csum_partial.o memcpy.o memset.o promlib.o \
6 strlen_user.o strncpy_user.o strnlen_user.o uncached.o 6 strlen_user.o strncpy_user.o strnlen_user.o uncached.o
7 7
8obj-y += iomap.o 8obj-y += iomap.o
diff --git a/arch/mips/lib-32/memset.S b/arch/mips/lib/memset.S
index 1981485bd48b..3f8b8b3d0b23 100644
--- a/arch/mips/lib-32/memset.S
+++ b/arch/mips/lib/memset.S
@@ -10,6 +10,14 @@
10#include <asm/asm-offsets.h> 10#include <asm/asm-offsets.h>
11#include <asm/regdef.h> 11#include <asm/regdef.h>
12 12
13#if LONGSIZE == 4
14#define LONG_S_L swl
15#define LONG_S_R swr
16#else
17#define LONG_S_L sdl
18#define LONG_S_R sdr
19#endif
20
13#define EX(insn,reg,addr,handler) \ 21#define EX(insn,reg,addr,handler) \
149: insn reg, addr; \ 229: insn reg, addr; \
15 .section __ex_table,"a"; \ 23 .section __ex_table,"a"; \
@@ -25,6 +33,7 @@
25 EX(LONG_S, \val, (\offset + 5 * LONGSIZE)(\dst), \fixup) 33 EX(LONG_S, \val, (\offset + 5 * LONGSIZE)(\dst), \fixup)
26 EX(LONG_S, \val, (\offset + 6 * LONGSIZE)(\dst), \fixup) 34 EX(LONG_S, \val, (\offset + 6 * LONGSIZE)(\dst), \fixup)
27 EX(LONG_S, \val, (\offset + 7 * LONGSIZE)(\dst), \fixup) 35 EX(LONG_S, \val, (\offset + 7 * LONGSIZE)(\dst), \fixup)
36#if LONGSIZE == 4
28 EX(LONG_S, \val, (\offset + 8 * LONGSIZE)(\dst), \fixup) 37 EX(LONG_S, \val, (\offset + 8 * LONGSIZE)(\dst), \fixup)
29 EX(LONG_S, \val, (\offset + 9 * LONGSIZE)(\dst), \fixup) 38 EX(LONG_S, \val, (\offset + 9 * LONGSIZE)(\dst), \fixup)
30 EX(LONG_S, \val, (\offset + 10 * LONGSIZE)(\dst), \fixup) 39 EX(LONG_S, \val, (\offset + 10 * LONGSIZE)(\dst), \fixup)
@@ -33,6 +42,7 @@
33 EX(LONG_S, \val, (\offset + 13 * LONGSIZE)(\dst), \fixup) 42 EX(LONG_S, \val, (\offset + 13 * LONGSIZE)(\dst), \fixup)
34 EX(LONG_S, \val, (\offset + 14 * LONGSIZE)(\dst), \fixup) 43 EX(LONG_S, \val, (\offset + 14 * LONGSIZE)(\dst), \fixup)
35 EX(LONG_S, \val, (\offset + 15 * LONGSIZE)(\dst), \fixup) 44 EX(LONG_S, \val, (\offset + 15 * LONGSIZE)(\dst), \fixup)
45#endif
36 .endm 46 .endm
37 47
38/* 48/*
@@ -49,9 +59,13 @@ LEAF(memset)
49 move v0, a0 /* result */ 59 move v0, a0 /* result */
50 60
51 andi a1, 0xff /* spread fillword */ 61 andi a1, 0xff /* spread fillword */
52 sll t1, a1, 8 62 LONG_SLL t1, a1, 8
53 or a1, t1 63 or a1, t1
54 sll t1, a1, 16 64 LONG_SLL t1, a1, 16
65#if LONGSIZE == 8
66 or a1, t1
67 LONG_SLL t1, a1, 32
68#endif
55 or a1, t1 69 or a1, t1
561: 701:
57 71
@@ -64,10 +78,10 @@ FEXPORT(__bzero)
64 PTR_SUBU t0, LONGSIZE /* alignment in bytes */ 78 PTR_SUBU t0, LONGSIZE /* alignment in bytes */
65 79
66#ifdef __MIPSEB__ 80#ifdef __MIPSEB__
67 EX(swl, a1, (a0), first_fixup) /* make word aligned */ 81 EX(LONG_S_L, a1, (a0), first_fixup) /* make word/dword aligned */
68#endif 82#endif
69#ifdef __MIPSEL__ 83#ifdef __MIPSEL__
70 EX(swr, a1, (a0), first_fixup) /* make word aligned */ 84 EX(LONG_S_R, a1, (a0), first_fixup) /* make word/dword aligned */
71#endif 85#endif
72 PTR_SUBU a0, t0 /* long align ptr */ 86 PTR_SUBU a0, t0 /* long align ptr */
73 PTR_ADDU a2, t0 /* correct size */ 87 PTR_ADDU a2, t0 /* correct size */
@@ -75,7 +89,7 @@ FEXPORT(__bzero)
751: ori t1, a2, 0x3f /* # of full blocks */ 891: ori t1, a2, 0x3f /* # of full blocks */
76 xori t1, 0x3f 90 xori t1, 0x3f
77 beqz t1, memset_partial /* no block to fill */ 91 beqz t1, memset_partial /* no block to fill */
78 andi t0, a2, 0x3c 92 andi t0, a2, 0x40-LONGSIZE
79 93
80 PTR_ADDU t1, a0 /* end address */ 94 PTR_ADDU t1, a0 /* end address */
81 .set reorder 95 .set reorder
@@ -86,7 +100,14 @@ FEXPORT(__bzero)
86 100
87memset_partial: 101memset_partial:
88 PTR_LA t1, 2f /* where to start */ 102 PTR_LA t1, 2f /* where to start */
103#if LONGSIZE == 4
89 PTR_SUBU t1, t0 104 PTR_SUBU t1, t0
105#else
106 .set noat
107 LONG_SRL AT, t0, 1
108 PTR_SUBU t1, AT
109 .set noat
110#endif
90 jr t1 111 jr t1
91 PTR_ADDU a0, t0 /* dest ptr */ 112 PTR_ADDU a0, t0 /* dest ptr */
92 113
@@ -100,10 +121,10 @@ memset_partial:
100 beqz a2, 1f 121 beqz a2, 1f
101 PTR_ADDU a0, a2 /* What's left */ 122 PTR_ADDU a0, a2 /* What's left */
102#ifdef __MIPSEB__ 123#ifdef __MIPSEB__
103 EX(swr, a1, -1(a0), last_fixup) 124 EX(LONG_S_R, a1, -1(a0), last_fixup)
104#endif 125#endif
105#ifdef __MIPSEL__ 126#ifdef __MIPSEL__
106 EX(swl, a1, -1(a0), last_fixup) 127 EX(LONG_S_L, a1, -1(a0), last_fixup)
107#endif 128#endif
1081: jr ra 1291: jr ra
109 move a2, zero 130 move a2, zero
diff --git a/arch/mips/lib/uncached.c b/arch/mips/lib/uncached.c
index 98ce89f8068b..2388f7f3ffde 100644
--- a/arch/mips/lib/uncached.c
+++ b/arch/mips/lib/uncached.c
@@ -44,20 +44,24 @@ unsigned long __init run_uncached(void *func)
44 44
45 if (sp >= (long)CKSEG0 && sp < (long)CKSEG2) 45 if (sp >= (long)CKSEG0 && sp < (long)CKSEG2)
46 usp = CKSEG1ADDR(sp); 46 usp = CKSEG1ADDR(sp);
47#ifdef CONFIG_64BIT
47 else if ((long long)sp >= (long long)PHYS_TO_XKPHYS(0LL, 0) && 48 else if ((long long)sp >= (long long)PHYS_TO_XKPHYS(0LL, 0) &&
48 (long long)sp < (long long)PHYS_TO_XKPHYS(8LL, 0)) 49 (long long)sp < (long long)PHYS_TO_XKPHYS(8LL, 0))
49 usp = PHYS_TO_XKPHYS((long long)K_CALG_UNCACHED, 50 usp = PHYS_TO_XKPHYS((long long)K_CALG_UNCACHED,
50 XKPHYS_TO_PHYS((long long)sp)); 51 XKPHYS_TO_PHYS((long long)sp));
52#endif
51 else { 53 else {
52 BUG(); 54 BUG();
53 usp = sp; 55 usp = sp;
54 } 56 }
55 if (lfunc >= (long)CKSEG0 && lfunc < (long)CKSEG2) 57 if (lfunc >= (long)CKSEG0 && lfunc < (long)CKSEG2)
56 ufunc = CKSEG1ADDR(lfunc); 58 ufunc = CKSEG1ADDR(lfunc);
59#ifdef CONFIG_64BIT
57 else if ((long long)lfunc >= (long long)PHYS_TO_XKPHYS(0LL, 0) && 60 else if ((long long)lfunc >= (long long)PHYS_TO_XKPHYS(0LL, 0) &&
58 (long long)lfunc < (long long)PHYS_TO_XKPHYS(8LL, 0)) 61 (long long)lfunc < (long long)PHYS_TO_XKPHYS(8LL, 0))
59 ufunc = PHYS_TO_XKPHYS((long long)K_CALG_UNCACHED, 62 ufunc = PHYS_TO_XKPHYS((long long)K_CALG_UNCACHED,
60 XKPHYS_TO_PHYS((long long)lfunc)); 63 XKPHYS_TO_PHYS((long long)lfunc));
64#endif
61 else { 65 else {
62 BUG(); 66 BUG();
63 ufunc = lfunc; 67 ufunc = lfunc;
diff --git a/arch/mips/mips-boards/atlas/atlas_int.c b/arch/mips/mips-boards/atlas/atlas_int.c
index 43dba6ce6603..dfa0acbd7fc2 100644
--- a/arch/mips/mips-boards/atlas/atlas_int.c
+++ b/arch/mips/mips-boards/atlas/atlas_int.c
@@ -32,6 +32,7 @@
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/interrupt.h> 33#include <linux/interrupt.h>
34#include <linux/kernel_stat.h> 34#include <linux/kernel_stat.h>
35#include <linux/kernel.h>
35 36
36#include <asm/gdb-stub.h> 37#include <asm/gdb-stub.h>
37#include <asm/io.h> 38#include <asm/io.h>
@@ -69,7 +70,7 @@ static void end_atlas_irq(unsigned int irq)
69} 70}
70 71
71static struct irq_chip atlas_irq_type = { 72static struct irq_chip atlas_irq_type = {
72 .typename = "Atlas", 73 .name = "Atlas",
73 .ack = disable_atlas_irq, 74 .ack = disable_atlas_irq,
74 .mask = disable_atlas_irq, 75 .mask = disable_atlas_irq,
75 .mask_ack = disable_atlas_irq, 76 .mask_ack = disable_atlas_irq,
@@ -220,7 +221,7 @@ msc_irqmap_t __initdata msc_irqmap[] = {
220 {MSC01C_INT_TMR, MSC01_IRQ_EDGE, 0}, 221 {MSC01C_INT_TMR, MSC01_IRQ_EDGE, 0},
221 {MSC01C_INT_PCI, MSC01_IRQ_LEVEL, 0}, 222 {MSC01C_INT_PCI, MSC01_IRQ_LEVEL, 0},
222}; 223};
223int __initdata msc_nr_irqs = sizeof(msc_irqmap) / sizeof(*msc_irqmap); 224int __initdata msc_nr_irqs = ARRAY_SIZE(msc_irqmap);
224 225
225msc_irqmap_t __initdata msc_eicirqmap[] = { 226msc_irqmap_t __initdata msc_eicirqmap[] = {
226 {MSC01E_INT_SW0, MSC01_IRQ_LEVEL, 0}, 227 {MSC01E_INT_SW0, MSC01_IRQ_LEVEL, 0},
@@ -231,14 +232,14 @@ msc_irqmap_t __initdata msc_eicirqmap[] = {
231 {MSC01E_INT_PERFCTR, MSC01_IRQ_LEVEL, 0}, 232 {MSC01E_INT_PERFCTR, MSC01_IRQ_LEVEL, 0},
232 {MSC01E_INT_CPUCTR, MSC01_IRQ_LEVEL, 0} 233 {MSC01E_INT_CPUCTR, MSC01_IRQ_LEVEL, 0}
233}; 234};
234int __initdata msc_nr_eicirqs = sizeof(msc_eicirqmap) / sizeof(*msc_eicirqmap); 235int __initdata msc_nr_eicirqs = ARRAY_SIZE(msc_eicirqmap);
235 236
236void __init arch_init_irq(void) 237void __init arch_init_irq(void)
237{ 238{
238 init_atlas_irqs(ATLAS_INT_BASE); 239 init_atlas_irqs(ATLAS_INT_BASE);
239 240
240 if (!cpu_has_veic) 241 if (!cpu_has_veic)
241 mips_cpu_irq_init(MIPSCPU_INT_BASE); 242 mips_cpu_irq_init();
242 243
243 switch(mips_revision_corid) { 244 switch(mips_revision_corid) {
244 case MIPS_REVISION_CORID_CORE_MSC: 245 case MIPS_REVISION_CORID_CORE_MSC:
diff --git a/arch/mips/mips-boards/generic/memory.c b/arch/mips/mips-boards/generic/memory.c
index eeed944e0f83..ebf0e16c5a0d 100644
--- a/arch/mips/mips-boards/generic/memory.c
+++ b/arch/mips/mips-boards/generic/memory.c
@@ -166,9 +166,8 @@ void __init prom_meminit(void)
166 } 166 }
167} 167}
168 168
169unsigned long __init prom_free_prom_memory(void) 169void __init prom_free_prom_memory(void)
170{ 170{
171 unsigned long freed = 0;
172 unsigned long addr; 171 unsigned long addr;
173 int i; 172 int i;
174 173
@@ -176,17 +175,8 @@ unsigned long __init prom_free_prom_memory(void)
176 if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA) 175 if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA)
177 continue; 176 continue;
178 177
179 addr = PAGE_ALIGN(boot_mem_map.map[i].addr); 178 addr = boot_mem_map.map[i].addr;
180 while (addr < boot_mem_map.map[i].addr 179 free_init_pages("prom memory",
181 + boot_mem_map.map[i].size) { 180 addr, addr + boot_mem_map.map[i].size);
182 ClearPageReserved(virt_to_page(__va(addr)));
183 init_page_count(virt_to_page(__va(addr)));
184 free_page((unsigned long)__va(addr));
185 addr += PAGE_SIZE;
186 freed += PAGE_SIZE;
187 }
188 } 181 }
189 printk("Freeing prom memory: %ldkb freed\n", freed >> 10);
190
191 return freed;
192} 182}
diff --git a/arch/mips/mips-boards/malta/malta_int.c b/arch/mips/mips-boards/malta/malta_int.c
index 90ad5bf3e2f1..3c206bb17160 100644
--- a/arch/mips/mips-boards/malta/malta_int.c
+++ b/arch/mips/mips-boards/malta/malta_int.c
@@ -27,6 +27,7 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/interrupt.h> 28#include <linux/interrupt.h>
29#include <linux/kernel_stat.h> 29#include <linux/kernel_stat.h>
30#include <linux/kernel.h>
30#include <linux/random.h> 31#include <linux/random.h>
31 32
32#include <asm/i8259.h> 33#include <asm/i8259.h>
@@ -289,7 +290,7 @@ msc_irqmap_t __initdata msc_irqmap[] = {
289 {MSC01C_INT_TMR, MSC01_IRQ_EDGE, 0}, 290 {MSC01C_INT_TMR, MSC01_IRQ_EDGE, 0},
290 {MSC01C_INT_PCI, MSC01_IRQ_LEVEL, 0}, 291 {MSC01C_INT_PCI, MSC01_IRQ_LEVEL, 0},
291}; 292};
292int __initdata msc_nr_irqs = sizeof(msc_irqmap)/sizeof(msc_irqmap_t); 293int __initdata msc_nr_irqs = ARRAY_SIZE(msc_irqmap);
293 294
294msc_irqmap_t __initdata msc_eicirqmap[] = { 295msc_irqmap_t __initdata msc_eicirqmap[] = {
295 {MSC01E_INT_SW0, MSC01_IRQ_LEVEL, 0}, 296 {MSC01E_INT_SW0, MSC01_IRQ_LEVEL, 0},
@@ -303,14 +304,14 @@ msc_irqmap_t __initdata msc_eicirqmap[] = {
303 {MSC01E_INT_PERFCTR, MSC01_IRQ_LEVEL, 0}, 304 {MSC01E_INT_PERFCTR, MSC01_IRQ_LEVEL, 0},
304 {MSC01E_INT_CPUCTR, MSC01_IRQ_LEVEL, 0} 305 {MSC01E_INT_CPUCTR, MSC01_IRQ_LEVEL, 0}
305}; 306};
306int __initdata msc_nr_eicirqs = sizeof(msc_eicirqmap)/sizeof(msc_irqmap_t); 307int __initdata msc_nr_eicirqs = ARRAY_SIZE(msc_eicirqmap);
307 308
308void __init arch_init_irq(void) 309void __init arch_init_irq(void)
309{ 310{
310 init_i8259_irqs(); 311 init_i8259_irqs();
311 312
312 if (!cpu_has_veic) 313 if (!cpu_has_veic)
313 mips_cpu_irq_init (MIPSCPU_INT_BASE); 314 mips_cpu_irq_init();
314 315
315 switch(mips_revision_corid) { 316 switch(mips_revision_corid) {
316 case MIPS_REVISION_CORID_CORE_MSC: 317 case MIPS_REVISION_CORID_CORE_MSC:
diff --git a/arch/mips/mips-boards/sead/sead_int.c b/arch/mips/mips-boards/sead/sead_int.c
index 874ccb0066b8..c4b9de3a7f27 100644
--- a/arch/mips/mips-boards/sead/sead_int.c
+++ b/arch/mips/mips-boards/sead/sead_int.c
@@ -113,5 +113,5 @@ asmlinkage void plat_irq_dispatch(void)
113 113
114void __init arch_init_irq(void) 114void __init arch_init_irq(void)
115{ 115{
116 mips_cpu_irq_init(MIPSCPU_INT_BASE); 116 mips_cpu_irq_init();
117} 117}
diff --git a/arch/mips/mips-boards/sim/sim_int.c b/arch/mips/mips-boards/sim/sim_int.c
index 2ce449dce6f2..15ac0655c1ff 100644
--- a/arch/mips/mips-boards/sim/sim_int.c
+++ b/arch/mips/mips-boards/sim/sim_int.c
@@ -21,9 +21,7 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/kernel_stat.h> 22#include <linux/kernel_stat.h>
23#include <asm/mips-boards/simint.h> 23#include <asm/mips-boards/simint.h>
24 24#include <asm/irq_cpu.h>
25
26extern void mips_cpu_irq_init(int);
27 25
28static inline int clz(unsigned long x) 26static inline int clz(unsigned long x)
29{ 27{
@@ -86,5 +84,5 @@ asmlinkage void plat_irq_dispatch(void)
86 84
87void __init arch_init_irq(void) 85void __init arch_init_irq(void)
88{ 86{
89 mips_cpu_irq_init(MIPSCPU_INT_BASE); 87 mips_cpu_irq_init();
90} 88}
diff --git a/arch/mips/mips-boards/sim/sim_mem.c b/arch/mips/mips-boards/sim/sim_mem.c
index f7ce76983328..46bc16f8b15d 100644
--- a/arch/mips/mips-boards/sim/sim_mem.c
+++ b/arch/mips/mips-boards/sim/sim_mem.c
@@ -99,10 +99,9 @@ void __init prom_meminit(void)
99 } 99 }
100} 100}
101 101
102unsigned long __init prom_free_prom_memory(void) 102void __init prom_free_prom_memory(void)
103{ 103{
104 int i; 104 int i;
105 unsigned long freed = 0;
106 unsigned long addr; 105 unsigned long addr;
107 106
108 for (i = 0; i < boot_mem_map.nr_map; i++) { 107 for (i = 0; i < boot_mem_map.nr_map; i++) {
@@ -110,16 +109,7 @@ unsigned long __init prom_free_prom_memory(void)
110 continue; 109 continue;
111 110
112 addr = boot_mem_map.map[i].addr; 111 addr = boot_mem_map.map[i].addr;
113 while (addr < boot_mem_map.map[i].addr 112 free_init_pages("prom memory",
114 + boot_mem_map.map[i].size) { 113 addr, addr + boot_mem_map.map[i].size);
115 ClearPageReserved(virt_to_page(__va(addr)));
116 init_page_count(virt_to_page(__va(addr)));
117 free_page((unsigned long)__va(addr));
118 addr += PAGE_SIZE;
119 freed += PAGE_SIZE;
120 }
121 } 114 }
122 printk("Freeing prom memory: %ldkb freed\n", freed >> 10);
123
124 return freed;
125} 115}
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 49065c133ebf..125a4a85ec05 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -341,7 +341,6 @@ static int __init page_is_ram(unsigned long pagenr)
341void __init paging_init(void) 341void __init paging_init(void)
342{ 342{
343 unsigned long zones_size[MAX_NR_ZONES] = { 0, }; 343 unsigned long zones_size[MAX_NR_ZONES] = { 0, };
344 unsigned long max_dma, low;
345#ifndef CONFIG_FLATMEM 344#ifndef CONFIG_FLATMEM
346 unsigned long zholes_size[MAX_NR_ZONES] = { 0, }; 345 unsigned long zholes_size[MAX_NR_ZONES] = { 0, };
347 unsigned long i, j, pfn; 346 unsigned long i, j, pfn;
@@ -354,19 +353,19 @@ void __init paging_init(void)
354#endif 353#endif
355 kmap_coherent_init(); 354 kmap_coherent_init();
356 355
357 max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
358 low = max_low_pfn;
359
360#ifdef CONFIG_ISA 356#ifdef CONFIG_ISA
361 if (low < max_dma) 357 if (max_low_pfn >= MAX_DMA_PFN)
362 zones_size[ZONE_DMA] = low; 358 if (min_low_pfn >= MAX_DMA_PFN) {
363 else { 359 zones_size[ZONE_DMA] = 0;
364 zones_size[ZONE_DMA] = max_dma; 360 zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn;
365 zones_size[ZONE_NORMAL] = low - max_dma; 361 } else {
366 } 362 zones_size[ZONE_DMA] = MAX_DMA_PFN - min_low_pfn;
367#else 363 zones_size[ZONE_NORMAL] = max_low_pfn - MAX_DMA_PFN;
368 zones_size[ZONE_DMA] = low; 364 }
365 else
369#endif 366#endif
367 zones_size[ZONE_DMA] = max_low_pfn - min_low_pfn;
368
370#ifdef CONFIG_HIGHMEM 369#ifdef CONFIG_HIGHMEM
371 zones_size[ZONE_HIGHMEM] = highend_pfn - highstart_pfn; 370 zones_size[ZONE_HIGHMEM] = highend_pfn - highstart_pfn;
372 371
@@ -467,7 +466,7 @@ void __init mem_init(void)
467} 466}
468#endif /* !CONFIG_NEED_MULTIPLE_NODES */ 467#endif /* !CONFIG_NEED_MULTIPLE_NODES */
469 468
470static void free_init_pages(char *what, unsigned long begin, unsigned long end) 469void free_init_pages(const char *what, unsigned long begin, unsigned long end)
471{ 470{
472 unsigned long pfn; 471 unsigned long pfn;
473 472
@@ -493,18 +492,25 @@ void free_initrd_mem(unsigned long start, unsigned long end)
493} 492}
494#endif 493#endif
495 494
496extern unsigned long prom_free_prom_memory(void);
497
498void free_initmem(void) 495void free_initmem(void)
499{ 496{
500 unsigned long freed; 497 prom_free_prom_memory();
501
502 freed = prom_free_prom_memory();
503 if (freed)
504 printk(KERN_INFO "Freeing firmware memory: %ldkb freed\n",
505 freed >> 10);
506
507 free_init_pages("unused kernel memory", 498 free_init_pages("unused kernel memory",
508 __pa_symbol(&__init_begin), 499 __pa_symbol(&__init_begin),
509 __pa_symbol(&__init_end)); 500 __pa_symbol(&__init_end));
510} 501}
502
503unsigned long pgd_current[NR_CPUS];
504/*
505 * On 64-bit we've got three-level pagetables with a slightly
506 * different layout ...
507 */
508#define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order)))
509pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER);
510#ifdef CONFIG_64BIT
511#ifdef MODULE_START
512pgd_t module_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER);
513#endif
514pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER);
515#endif
516pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
diff --git a/arch/mips/momentum/jaguar_atx/Makefile b/arch/mips/momentum/jaguar_atx/Makefile
index 67372f3f9654..2e8cebd49bc0 100644
--- a/arch/mips/momentum/jaguar_atx/Makefile
+++ b/arch/mips/momentum/jaguar_atx/Makefile
@@ -6,7 +6,7 @@
6# unless it's something special (ie not a .c file). 6# unless it's something special (ie not a .c file).
7# 7#
8 8
9obj-y += irq.o prom.o reset.o setup.o 9obj-y += irq.o platform.o prom.o reset.o setup.o
10 10
11obj-$(CONFIG_SERIAL_8250_CONSOLE) += ja-console.o 11obj-$(CONFIG_SERIAL_8250_CONSOLE) += ja-console.o
12obj-$(CONFIG_REMOTE_DEBUG) += dbg_io.o 12obj-$(CONFIG_REMOTE_DEBUG) += dbg_io.o
diff --git a/arch/mips/momentum/jaguar_atx/irq.c b/arch/mips/momentum/jaguar_atx/irq.c
index 2efb25aa1aed..f2b432585df2 100644
--- a/arch/mips/momentum/jaguar_atx/irq.c
+++ b/arch/mips/momentum/jaguar_atx/irq.c
@@ -82,8 +82,8 @@ void __init arch_init_irq(void)
82 */ 82 */
83 clear_c0_status(ST0_IM); 83 clear_c0_status(ST0_IM);
84 84
85 mips_cpu_irq_init(0); 85 mips_cpu_irq_init();
86 rm7k_cpu_irq_init(8); 86 rm7k_cpu_irq_init();
87 87
88 /* set up the cascading interrupts */ 88 /* set up the cascading interrupts */
89 setup_irq(8, &cascade_mv64340); 89 setup_irq(8, &cascade_mv64340);
diff --git a/arch/mips/momentum/jaguar_atx/jaguar_atx_fpga.h b/arch/mips/momentum/jaguar_atx/jaguar_atx_fpga.h
index 6978654c712b..022f6974b76e 100644
--- a/arch/mips/momentum/jaguar_atx/jaguar_atx_fpga.h
+++ b/arch/mips/momentum/jaguar_atx/jaguar_atx_fpga.h
@@ -46,7 +46,9 @@
46 46
47extern unsigned long ja_fpga_base; 47extern unsigned long ja_fpga_base;
48 48
49#define JAGUAR_FPGA_WRITE(x,y) writeb(x, ja_fpga_base + JAGUAR_ATX_REG_##y) 49#define __FPGA_REG_TO_ADDR(reg) \
50#define JAGUAR_FPGA_READ(x) readb(ja_fpga_base + JAGUAR_ATX_REG_##x) 50 ((void *) ja_fpga_base + JAGUAR_ATX_REG_##reg)
51#define JAGUAR_FPGA_WRITE(x, reg) writeb(x, __FPGA_REG_TO_ADDR(reg))
52#define JAGUAR_FPGA_READ(reg) readb(__FPGA_REG_TO_ADDR(reg))
51 53
52#endif 54#endif
diff --git a/arch/mips/momentum/jaguar_atx/platform.c b/arch/mips/momentum/jaguar_atx/platform.c
new file mode 100644
index 000000000000..035ea5137c71
--- /dev/null
+++ b/arch/mips/momentum/jaguar_atx/platform.c
@@ -0,0 +1,235 @@
1#include <linux/delay.h>
2#include <linux/if_ether.h>
3#include <linux/ioport.h>
4#include <linux/mv643xx.h>
5#include <linux/platform_device.h>
6
7#include "jaguar_atx_fpga.h"
8
9#if defined(CONFIG_MV643XX_ETH) || defined(CONFIG_MV643XX_ETH_MODULE)
10
11static struct resource mv643xx_eth_shared_resources[] = {
12 [0] = {
13 .name = "ethernet shared base",
14 .start = 0xf1000000 + MV643XX_ETH_SHARED_REGS,
15 .end = 0xf1000000 + MV643XX_ETH_SHARED_REGS +
16 MV643XX_ETH_SHARED_REGS_SIZE - 1,
17 .flags = IORESOURCE_MEM,
18 },
19};
20
21static struct platform_device mv643xx_eth_shared_device = {
22 .name = MV643XX_ETH_SHARED_NAME,
23 .id = 0,
24 .num_resources = ARRAY_SIZE(mv643xx_eth_shared_resources),
25 .resource = mv643xx_eth_shared_resources,
26};
27
28#define MV_SRAM_BASE 0xfe000000UL
29#define MV_SRAM_SIZE (256 * 1024)
30
31#define MV_SRAM_RXRING_SIZE (MV_SRAM_SIZE / 4)
32#define MV_SRAM_TXRING_SIZE (MV_SRAM_SIZE / 4)
33
34#define MV_SRAM_BASE_ETH0 MV_SRAM_BASE
35#define MV_SRAM_BASE_ETH1 (MV_SRAM_BASE + (MV_SRAM_SIZE / 2))
36
37#define MV64x60_IRQ_ETH_0 48
38#define MV64x60_IRQ_ETH_1 49
39#define MV64x60_IRQ_ETH_2 50
40
41#ifdef CONFIG_MV643XX_ETH_0
42
43static struct resource mv64x60_eth0_resources[] = {
44 [0] = {
45 .name = "eth0 irq",
46 .start = MV64x60_IRQ_ETH_0,
47 .end = MV64x60_IRQ_ETH_0,
48 .flags = IORESOURCE_IRQ,
49 },
50};
51
52static char eth0_mac_addr[ETH_ALEN];
53
54static struct mv643xx_eth_platform_data eth0_pd = {
55 .mac_addr = eth0_mac_addr,
56
57 .tx_sram_addr = MV_SRAM_BASE_ETH0,
58 .tx_sram_size = MV_SRAM_TXRING_SIZE,
59 .tx_queue_size = MV_SRAM_TXRING_SIZE / 16,
60
61 .rx_sram_addr = MV_SRAM_BASE_ETH0 + MV_SRAM_TXRING_SIZE,
62 .rx_sram_size = MV_SRAM_RXRING_SIZE,
63 .rx_queue_size = MV_SRAM_RXRING_SIZE / 16,
64};
65
66static struct platform_device eth0_device = {
67 .name = MV643XX_ETH_NAME,
68 .id = 0,
69 .num_resources = ARRAY_SIZE(mv64x60_eth0_resources),
70 .resource = mv64x60_eth0_resources,
71 .dev = {
72 .platform_data = &eth0_pd,
73 },
74};
75#endif /* CONFIG_MV643XX_ETH_0 */
76
77#ifdef CONFIG_MV643XX_ETH_1
78
79static struct resource mv64x60_eth1_resources[] = {
80 [0] = {
81 .name = "eth1 irq",
82 .start = MV64x60_IRQ_ETH_1,
83 .end = MV64x60_IRQ_ETH_1,
84 .flags = IORESOURCE_IRQ,
85 },
86};
87
88static char eth1_mac_addr[ETH_ALEN];
89
90static struct mv643xx_eth_platform_data eth1_pd = {
91 .mac_addr = eth1_mac_addr,
92
93 .tx_sram_addr = MV_SRAM_BASE_ETH1,
94 .tx_sram_size = MV_SRAM_TXRING_SIZE,
95 .tx_queue_size = MV_SRAM_TXRING_SIZE / 16,
96
97 .rx_sram_addr = MV_SRAM_BASE_ETH1 + MV_SRAM_TXRING_SIZE,
98 .rx_sram_size = MV_SRAM_RXRING_SIZE,
99 .rx_queue_size = MV_SRAM_RXRING_SIZE / 16,
100};
101
102static struct platform_device eth1_device = {
103 .name = MV643XX_ETH_NAME,
104 .id = 1,
105 .num_resources = ARRAY_SIZE(mv64x60_eth1_resources),
106 .resource = mv64x60_eth1_resources,
107 .dev = {
108 .platform_data = &eth1_pd,
109 },
110};
111#endif /* CONFIG_MV643XX_ETH_1 */
112
113#ifdef CONFIG_MV643XX_ETH_2
114
115static struct resource mv64x60_eth2_resources[] = {
116 [0] = {
117 .name = "eth2 irq",
118 .start = MV64x60_IRQ_ETH_2,
119 .end = MV64x60_IRQ_ETH_2,
120 .flags = IORESOURCE_IRQ,
121 },
122};
123
124static char eth2_mac_addr[ETH_ALEN];
125
126static struct mv643xx_eth_platform_data eth2_pd = {
127 .mac_addr = eth2_mac_addr,
128};
129
130static struct platform_device eth2_device = {
131 .name = MV643XX_ETH_NAME,
132 .id = 1,
133 .num_resources = ARRAY_SIZE(mv64x60_eth2_resources),
134 .resource = mv64x60_eth2_resources,
135 .dev = {
136 .platform_data = &eth2_pd,
137 },
138};
139#endif /* CONFIG_MV643XX_ETH_2 */
140
141static struct platform_device *mv643xx_eth_pd_devs[] __initdata = {
142 &mv643xx_eth_shared_device,
143#ifdef CONFIG_MV643XX_ETH_0
144 &eth0_device,
145#endif
146#ifdef CONFIG_MV643XX_ETH_1
147 &eth1_device,
148#endif
149#ifdef CONFIG_MV643XX_ETH_2
150 &eth2_device,
151#endif
152};
153
154static u8 __init exchange_bit(u8 val, u8 cs)
155{
156 /* place the data */
157 JAGUAR_FPGA_WRITE((val << 2) | cs, EEPROM_MODE);
158 udelay(1);
159
160 /* turn the clock on */
161 JAGUAR_FPGA_WRITE((val << 2) | cs | 0x2, EEPROM_MODE);
162 udelay(1);
163
164 /* turn the clock off and read-strobe */
165 JAGUAR_FPGA_WRITE((val << 2) | cs | 0x10, EEPROM_MODE);
166
167 /* return the data */
168 return (JAGUAR_FPGA_READ(EEPROM_MODE) >> 3) & 0x1;
169}
170
171static void __init get_mac(char dest[6])
172{
173 u8 read_opcode[12] = {1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
174 int i,j;
175
176 for (i = 0; i < 12; i++)
177 exchange_bit(read_opcode[i], 1);
178
179 for (j = 0; j < 6; j++) {
180 dest[j] = 0;
181 for (i = 0; i < 8; i++) {
182 dest[j] <<= 1;
183 dest[j] |= exchange_bit(0, 1);
184 }
185 }
186
187 /* turn off CS */
188 exchange_bit(0,0);
189}
190
191/*
192 * Copy and increment ethernet MAC address by a small value.
193 *
194 * This is useful for systems where the only one MAC address is stored in
195 * non-volatile memory for multiple ports.
196 */
197static inline void eth_mac_add(unsigned char *dst, unsigned char *src,
198 unsigned int add)
199{
200 int i;
201
202 BUG_ON(add >= 256);
203
204 for (i = ETH_ALEN; i >= 0; i--) {
205 dst[i] = src[i] + add;
206 add = dst[i] < src[i]; /* compute carry */
207 }
208
209 WARN_ON(add);
210}
211
212static int __init mv643xx_eth_add_pds(void)
213{
214 unsigned char mac[ETH_ALEN];
215 int ret;
216
217 get_mac(mac);
218#ifdef CONFIG_MV643XX_ETH_0
219 eth_mac_add(eth1_mac_addr, mac, 0);
220#endif
221#ifdef CONFIG_MV643XX_ETH_1
222 eth_mac_add(eth1_mac_addr, mac, 1);
223#endif
224#ifdef CONFIG_MV643XX_ETH_2
225 eth_mac_add(eth2_mac_addr, mac, 2);
226#endif
227 ret = platform_add_devices(mv643xx_eth_pd_devs,
228 ARRAY_SIZE(mv643xx_eth_pd_devs));
229
230 return ret;
231}
232
233device_initcall(mv643xx_eth_add_pds);
234
235#endif /* defined(CONFIG_MV643XX_ETH) || defined(CONFIG_MV643XX_ETH_MODULE) */
diff --git a/arch/mips/momentum/jaguar_atx/prom.c b/arch/mips/momentum/jaguar_atx/prom.c
index 3d2712929293..5dd154ee58f6 100644
--- a/arch/mips/momentum/jaguar_atx/prom.c
+++ b/arch/mips/momentum/jaguar_atx/prom.c
@@ -39,56 +39,6 @@ const char *get_system_type(void)
39 return "Momentum Jaguar-ATX"; 39 return "Momentum Jaguar-ATX";
40} 40}
41 41
42#ifdef CONFIG_MV643XX_ETH
43extern unsigned char prom_mac_addr_base[6];
44
45static void burn_clocks(void)
46{
47 int i;
48
49 /* this loop should burn at least 1us -- this should be plenty */
50 for (i = 0; i < 0x10000; i++)
51 ;
52}
53
54static u8 exchange_bit(u8 val, u8 cs)
55{
56 /* place the data */
57 JAGUAR_FPGA_WRITE((val << 2) | cs, EEPROM_MODE);
58 burn_clocks();
59
60 /* turn the clock on */
61 JAGUAR_FPGA_WRITE((val << 2) | cs | 0x2, EEPROM_MODE);
62 burn_clocks();
63
64 /* turn the clock off and read-strobe */
65 JAGUAR_FPGA_WRITE((val << 2) | cs | 0x10, EEPROM_MODE);
66
67 /* return the data */
68 return ((JAGUAR_FPGA_READ(EEPROM_MODE) >> 3) & 0x1);
69}
70
71void get_mac(char dest[6])
72{
73 u8 read_opcode[12] = {1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
74 int i,j;
75
76 for (i = 0; i < 12; i++)
77 exchange_bit(read_opcode[i], 1);
78
79 for (j = 0; j < 6; j++) {
80 dest[j] = 0;
81 for (i = 0; i < 8; i++) {
82 dest[j] <<= 1;
83 dest[j] |= exchange_bit(0, 1);
84 }
85 }
86
87 /* turn off CS */
88 exchange_bit(0,0);
89}
90#endif
91
92#ifdef CONFIG_64BIT 42#ifdef CONFIG_64BIT
93 43
94unsigned long signext(unsigned long addr) 44unsigned long signext(unsigned long addr)
@@ -228,16 +178,10 @@ void __init prom_init(void)
228#endif /* CONFIG_64BIT */ 178#endif /* CONFIG_64BIT */
229 mips_machgroup = MACH_GROUP_MOMENCO; 179 mips_machgroup = MACH_GROUP_MOMENCO;
230 mips_machtype = MACH_MOMENCO_JAGUAR_ATX; 180 mips_machtype = MACH_MOMENCO_JAGUAR_ATX;
231
232#ifdef CONFIG_MV643XX_ETH
233 /* get the base MAC address for on-board ethernet ports */
234 get_mac(prom_mac_addr_base);
235#endif
236} 181}
237 182
238unsigned long __init prom_free_prom_memory(void) 183void __init prom_free_prom_memory(void)
239{ 184{
240 return 0;
241} 185}
242 186
243void __init prom_fixup_mem_map(unsigned long start, unsigned long end) 187void __init prom_fixup_mem_map(unsigned long start, unsigned long end)
diff --git a/arch/mips/momentum/ocelot_3/irq.c b/arch/mips/momentum/ocelot_3/irq.c
index cea0e5deb80e..3862d1d1add4 100644
--- a/arch/mips/momentum/ocelot_3/irq.c
+++ b/arch/mips/momentum/ocelot_3/irq.c
@@ -65,7 +65,7 @@ void __init arch_init_irq(void)
65 */ 65 */
66 clear_c0_status(ST0_IM | ST0_BEV); 66 clear_c0_status(ST0_IM | ST0_BEV);
67 67
68 rm7k_cpu_irq_init(8); 68 rm7k_cpu_irq_init();
69 69
70 /* set up the cascading interrupts */ 70 /* set up the cascading interrupts */
71 setup_irq(8, &cascade_mv64340); /* unmask intControl IM8, IRQ 9 */ 71 setup_irq(8, &cascade_mv64340); /* unmask intControl IM8, IRQ 9 */
diff --git a/arch/mips/momentum/ocelot_3/prom.c b/arch/mips/momentum/ocelot_3/prom.c
index 6ce9b7fdb824..8e02df63578a 100644
--- a/arch/mips/momentum/ocelot_3/prom.c
+++ b/arch/mips/momentum/ocelot_3/prom.c
@@ -180,9 +180,8 @@ void __init prom_init(void)
180#endif 180#endif
181} 181}
182 182
183unsigned long __init prom_free_prom_memory(void) 183void __init prom_free_prom_memory(void)
184{ 184{
185 return 0;
186} 185}
187 186
188void __init prom_fixup_mem_map(unsigned long start, unsigned long end) 187void __init prom_fixup_mem_map(unsigned long start, unsigned long end)
diff --git a/arch/mips/momentum/ocelot_c/cpci-irq.c b/arch/mips/momentum/ocelot_c/cpci-irq.c
index bb11fef08472..186a140fd2a9 100644
--- a/arch/mips/momentum/ocelot_c/cpci-irq.c
+++ b/arch/mips/momentum/ocelot_c/cpci-irq.c
@@ -84,7 +84,7 @@ void ll_cpci_irq(void)
84} 84}
85 85
86struct irq_chip cpci_irq_type = { 86struct irq_chip cpci_irq_type = {
87 .typename = "CPCI/FPGA", 87 .name = "CPCI/FPGA",
88 .ack = mask_cpci_irq, 88 .ack = mask_cpci_irq,
89 .mask = mask_cpci_irq, 89 .mask = mask_cpci_irq,
90 .mask_ack = mask_cpci_irq, 90 .mask_ack = mask_cpci_irq,
diff --git a/arch/mips/momentum/ocelot_c/dbg_io.c b/arch/mips/momentum/ocelot_c/dbg_io.c
index 2128684584f5..32d6fb4ee679 100644
--- a/arch/mips/momentum/ocelot_c/dbg_io.c
+++ b/arch/mips/momentum/ocelot_c/dbg_io.c
@@ -1,6 +1,4 @@
1 1
2#ifdef CONFIG_KGDB
3
4#include <asm/serial.h> /* For the serial port location and base baud */ 2#include <asm/serial.h> /* For the serial port location and base baud */
5 3
6/* --- CONFIG --- */ 4/* --- CONFIG --- */
@@ -121,5 +119,3 @@ int putDebugChar(uint8 byte)
121 UART16550_WRITE(OFS_SEND_BUFFER, byte); 119 UART16550_WRITE(OFS_SEND_BUFFER, byte);
122 return 1; 120 return 1;
123} 121}
124
125#endif
diff --git a/arch/mips/momentum/ocelot_c/irq.c b/arch/mips/momentum/ocelot_c/irq.c
index ea65223a6d2c..40472f7944d7 100644
--- a/arch/mips/momentum/ocelot_c/irq.c
+++ b/arch/mips/momentum/ocelot_c/irq.c
@@ -94,7 +94,7 @@ void __init arch_init_irq(void)
94 */ 94 */
95 clear_c0_status(ST0_IM); 95 clear_c0_status(ST0_IM);
96 96
97 mips_cpu_irq_init(0); 97 mips_cpu_irq_init();
98 98
99 /* set up the cascading interrupts */ 99 /* set up the cascading interrupts */
100 setup_irq(3, &cascade_fpga); 100 setup_irq(3, &cascade_fpga);
diff --git a/arch/mips/momentum/ocelot_c/prom.c b/arch/mips/momentum/ocelot_c/prom.c
index d0b77e101d74..b689ceea8cfb 100644
--- a/arch/mips/momentum/ocelot_c/prom.c
+++ b/arch/mips/momentum/ocelot_c/prom.c
@@ -178,7 +178,6 @@ void __init prom_init(void)
178#endif 178#endif
179} 179}
180 180
181unsigned long __init prom_free_prom_memory(void) 181void __init prom_free_prom_memory(void)
182{ 182{
183 return 0;
184} 183}
diff --git a/arch/mips/momentum/ocelot_c/uart-irq.c b/arch/mips/momentum/ocelot_c/uart-irq.c
index a7a80c0da569..de1a31ee52f3 100644
--- a/arch/mips/momentum/ocelot_c/uart-irq.c
+++ b/arch/mips/momentum/ocelot_c/uart-irq.c
@@ -77,7 +77,7 @@ void ll_uart_irq(void)
77} 77}
78 78
79struct irq_chip uart_irq_type = { 79struct irq_chip uart_irq_type = {
80 .typename = "UART/FPGA", 80 .name = "UART/FPGA",
81 .ack = mask_uart_irq, 81 .ack = mask_uart_irq,
82 .mask = mask_uart_irq, 82 .mask = mask_uart_irq,
83 .mask_ack = mask_uart_irq, 83 .mask_ack = mask_uart_irq,
diff --git a/arch/mips/momentum/ocelot_g/dbg_io.c b/arch/mips/momentum/ocelot_g/dbg_io.c
index 2128684584f5..32d6fb4ee679 100644
--- a/arch/mips/momentum/ocelot_g/dbg_io.c
+++ b/arch/mips/momentum/ocelot_g/dbg_io.c
@@ -1,6 +1,4 @@
1 1
2#ifdef CONFIG_KGDB
3
4#include <asm/serial.h> /* For the serial port location and base baud */ 2#include <asm/serial.h> /* For the serial port location and base baud */
5 3
6/* --- CONFIG --- */ 4/* --- CONFIG --- */
@@ -121,5 +119,3 @@ int putDebugChar(uint8 byte)
121 UART16550_WRITE(OFS_SEND_BUFFER, byte); 119 UART16550_WRITE(OFS_SEND_BUFFER, byte);
122 return 1; 120 return 1;
123} 121}
124
125#endif
diff --git a/arch/mips/momentum/ocelot_g/irq.c b/arch/mips/momentum/ocelot_g/irq.c
index da46524e87cb..273541fe7087 100644
--- a/arch/mips/momentum/ocelot_g/irq.c
+++ b/arch/mips/momentum/ocelot_g/irq.c
@@ -94,8 +94,8 @@ void __init arch_init_irq(void)
94 clear_c0_status(ST0_IM); 94 clear_c0_status(ST0_IM);
95 local_irq_disable(); 95 local_irq_disable();
96 96
97 mips_cpu_irq_init(0); 97 mips_cpu_irq_init();
98 rm7k_cpu_irq_init(8); 98 rm7k_cpu_irq_init();
99 99
100 gt64240_irq_init(); 100 gt64240_irq_init();
101} 101}
diff --git a/arch/mips/momentum/ocelot_g/prom.c b/arch/mips/momentum/ocelot_g/prom.c
index 2f75c6b91ec5..836d0830720d 100644
--- a/arch/mips/momentum/ocelot_g/prom.c
+++ b/arch/mips/momentum/ocelot_g/prom.c
@@ -79,7 +79,6 @@ void __init prom_init(void)
79 } 79 }
80} 80}
81 81
82unsigned long __init prom_free_prom_memory(void) 82void __init prom_free_prom_memory(void)
83{ 83{
84 return 0;
85} 84}
diff --git a/arch/mips/oprofile/Kconfig b/arch/mips/oprofile/Kconfig
index 55feaf798596..ca395ef06d4e 100644
--- a/arch/mips/oprofile/Kconfig
+++ b/arch/mips/oprofile/Kconfig
@@ -11,7 +11,7 @@ config PROFILING
11 11
12config OPROFILE 12config OPROFILE
13 tristate "OProfile system profiling (EXPERIMENTAL)" 13 tristate "OProfile system profiling (EXPERIMENTAL)"
14 depends on PROFILING && EXPERIMENTAL 14 depends on PROFILING && !!MIPS_MT_SMTC && EXPERIMENTAL
15 help 15 help
16 OProfile is a profiling system capable of profiling the 16 OProfile is a profiling system capable of profiling the
17 whole system, include the kernel, kernel modules, libraries, 17 whole system, include the kernel, kernel modules, libraries,
diff --git a/arch/mips/pci/fixup-vr4133.c b/arch/mips/pci/fixup-vr4133.c
index 597b89764ba1..a8d9d22b13df 100644
--- a/arch/mips/pci/fixup-vr4133.c
+++ b/arch/mips/pci/fixup-vr4133.c
@@ -17,8 +17,10 @@
17 */ 17 */
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/pci.h> 19#include <linux/pci.h>
20#include <linux/kernel.h>
20 21
21#include <asm/io.h> 22#include <asm/io.h>
23#include <asm/i8259.h>
22#include <asm/vr41xx/cmbvr4133.h> 24#include <asm/vr41xx/cmbvr4133.h>
23 25
24extern int vr4133_rockhopper; 26extern int vr4133_rockhopper;
@@ -142,7 +144,7 @@ int rockhopper_get_irq(struct pci_dev *dev, u8 pin, u8 slot)
142 if (bus == NULL) 144 if (bus == NULL)
143 return -1; 145 return -1;
144 146
145 for (i = 0; i < sizeof (int_map) / sizeof (int_map[0]); i++) { 147 for (i = 0; i < ARRAY_SIZE(int_map); i++) {
146 if (int_map[i].bus == bus->number && int_map[i].slot == slot) { 148 if (int_map[i].bus == bus->number && int_map[i].slot == slot) {
147 int line; 149 int line;
148 for (line = 0; line < 4; line++) 150 for (line = 0; line < 4; line++)
@@ -160,17 +162,7 @@ int rockhopper_get_irq(struct pci_dev *dev, u8 pin, u8 slot)
160#ifdef CONFIG_ROCKHOPPER 162#ifdef CONFIG_ROCKHOPPER
161void i8259_init(void) 163void i8259_init(void)
162{ 164{
163 outb(0x11, 0x20); /* Master ICW1 */ 165 init_i8259_irqs();
164 outb(I8259_IRQ_BASE, 0x21); /* Master ICW2 */
165 outb(0x04, 0x21); /* Master ICW3 */
166 outb(0x01, 0x21); /* Master ICW4 */
167 outb(0xff, 0x21); /* Master IMW */
168
169 outb(0x11, 0xa0); /* Slave ICW1 */
170 outb(I8259_IRQ_BASE + 8, 0xa1); /* Slave ICW2 */
171 outb(0x02, 0xa1); /* Slave ICW3 */
172 outb(0x01, 0xa1); /* Slave ICW4 */
173 outb(0xff, 0xa1); /* Slave IMW */
174 166
175 outb(0x00, 0x4d0); 167 outb(0x00, 0x4d0);
176 outb(0x02, 0x4d1); /* USB IRQ9 is level */ 168 outb(0x02, 0x4d1); /* USB IRQ9 is level */
diff --git a/arch/mips/philips/pnx8550/common/int.c b/arch/mips/philips/pnx8550/common/int.c
index 2c36c108c4d6..d48665ebd33c 100644
--- a/arch/mips/philips/pnx8550/common/int.c
+++ b/arch/mips/philips/pnx8550/common/int.c
@@ -159,7 +159,7 @@ int pnx8550_set_gic_priority(int irq, int priority)
159} 159}
160 160
161static struct irq_chip level_irq_type = { 161static struct irq_chip level_irq_type = {
162 .typename = "PNX Level IRQ", 162 .name = "PNX Level IRQ",
163 .ack = mask_irq, 163 .ack = mask_irq,
164 .mask = mask_irq, 164 .mask = mask_irq,
165 .mask_ack = mask_irq, 165 .mask_ack = mask_irq,
diff --git a/arch/mips/philips/pnx8550/common/prom.c b/arch/mips/philips/pnx8550/common/prom.c
index eb6ec11fef07..8aeed6c2b8c3 100644
--- a/arch/mips/philips/pnx8550/common/prom.c
+++ b/arch/mips/philips/pnx8550/common/prom.c
@@ -106,9 +106,8 @@ int get_ethernet_addr(char *ethernet_addr)
106 return 0; 106 return 0;
107} 107}
108 108
109unsigned long __init prom_free_prom_memory(void) 109void __init prom_free_prom_memory(void)
110{ 110{
111 return 0;
112} 111}
113 112
114extern int pnx8550_console_port; 113extern int pnx8550_console_port;
diff --git a/arch/mips/pmc-sierra/yosemite/dbg_io.c b/arch/mips/pmc-sierra/yosemite/dbg_io.c
index 0f659c9106ac..6362c702e389 100644
--- a/arch/mips/pmc-sierra/yosemite/dbg_io.c
+++ b/arch/mips/pmc-sierra/yosemite/dbg_io.c
@@ -93,7 +93,7 @@
93 * Functions to READ and WRITE to serial port 1 93 * Functions to READ and WRITE to serial port 1
94 */ 94 */
95#define SERIAL_READ_1(ofs) (*((volatile unsigned char*) \ 95#define SERIAL_READ_1(ofs) (*((volatile unsigned char*) \
96 (TITAN_SERIAL_BASE_1 + ofs) 96 (TITAN_SERIAL_BASE_1 + ofs)))
97 97
98#define SERIAL_WRITE_1(ofs, val) ((*((volatile unsigned char*) \ 98#define SERIAL_WRITE_1(ofs, val) ((*((volatile unsigned char*) \
99 (TITAN_SERIAL_BASE_1 + ofs))) = val) 99 (TITAN_SERIAL_BASE_1 + ofs))) = val)
diff --git a/arch/mips/pmc-sierra/yosemite/irq.c b/arch/mips/pmc-sierra/yosemite/irq.c
index adb048527e76..428d1f45a287 100644
--- a/arch/mips/pmc-sierra/yosemite/irq.c
+++ b/arch/mips/pmc-sierra/yosemite/irq.c
@@ -148,9 +148,9 @@ void __init arch_init_irq(void)
148{ 148{
149 clear_c0_status(ST0_IM); 149 clear_c0_status(ST0_IM);
150 150
151 mips_cpu_irq_init(0); 151 mips_cpu_irq_init();
152 rm7k_cpu_irq_init(8); 152 rm7k_cpu_irq_init();
153 rm9k_cpu_irq_init(12); 153 rm9k_cpu_irq_init();
154 154
155#ifdef CONFIG_KGDB 155#ifdef CONFIG_KGDB
156 /* At this point, initialize the second serial port */ 156 /* At this point, initialize the second serial port */
diff --git a/arch/mips/pmc-sierra/yosemite/prom.c b/arch/mips/pmc-sierra/yosemite/prom.c
index 9fe4973377c3..1e1685e415a4 100644
--- a/arch/mips/pmc-sierra/yosemite/prom.c
+++ b/arch/mips/pmc-sierra/yosemite/prom.c
@@ -132,9 +132,8 @@ void __init prom_init(void)
132 prom_grab_secondary(); 132 prom_grab_secondary();
133} 133}
134 134
135unsigned long __init prom_free_prom_memory(void) 135void __init prom_free_prom_memory(void)
136{ 136{
137 return 0;
138} 137}
139 138
140void __init prom_fixup_mem_map(unsigned long start, unsigned long end) 139void __init prom_fixup_mem_map(unsigned long start, unsigned long end)
diff --git a/arch/mips/pmc-sierra/yosemite/setup.c b/arch/mips/pmc-sierra/yosemite/setup.c
index 1b9b0d396d3e..6a6e15e40009 100644
--- a/arch/mips/pmc-sierra/yosemite/setup.c
+++ b/arch/mips/pmc-sierra/yosemite/setup.c
@@ -171,6 +171,7 @@ static void __init py_map_ocd(void)
171 171
172static void __init py_uart_setup(void) 172static void __init py_uart_setup(void)
173{ 173{
174#ifdef CONFIG_SERIAL_8250
174 struct uart_port up; 175 struct uart_port up;
175 176
176 /* 177 /*
@@ -188,6 +189,7 @@ static void __init py_uart_setup(void)
188 189
189 if (early_serial_setup(&up)) 190 if (early_serial_setup(&up))
190 printk(KERN_ERR "Early serial init of port 0 failed\n"); 191 printk(KERN_ERR "Early serial init of port 0 failed\n");
192#endif /* CONFIG_SERIAL_8250 */
191} 193}
192 194
193static void __init py_rtc_setup(void) 195static void __init py_rtc_setup(void)
diff --git a/arch/mips/qemu/q-mem.c b/arch/mips/qemu/q-mem.c
index d174fac43031..dae39b59de15 100644
--- a/arch/mips/qemu/q-mem.c
+++ b/arch/mips/qemu/q-mem.c
@@ -1,6 +1,5 @@
1#include <linux/init.h> 1#include <linux/init.h>
2 2
3unsigned long __init prom_free_prom_memory(void) 3void __init prom_free_prom_memory(void)
4{ 4{
5 return 0UL;
6} 5}
diff --git a/arch/mips/sgi-ip22/ip22-eisa.c b/arch/mips/sgi-ip22/ip22-eisa.c
index a1a9af6da7bf..6b6e97b90c6e 100644
--- a/arch/mips/sgi-ip22/ip22-eisa.c
+++ b/arch/mips/sgi-ip22/ip22-eisa.c
@@ -139,7 +139,7 @@ static void end_eisa1_irq(unsigned int irq)
139} 139}
140 140
141static struct irq_chip ip22_eisa1_irq_type = { 141static struct irq_chip ip22_eisa1_irq_type = {
142 .typename = "IP22 EISA", 142 .name = "IP22 EISA",
143 .startup = startup_eisa1_irq, 143 .startup = startup_eisa1_irq,
144 .ack = mask_and_ack_eisa1_irq, 144 .ack = mask_and_ack_eisa1_irq,
145 .mask = disable_eisa1_irq, 145 .mask = disable_eisa1_irq,
@@ -194,7 +194,7 @@ static void end_eisa2_irq(unsigned int irq)
194} 194}
195 195
196static struct irq_chip ip22_eisa2_irq_type = { 196static struct irq_chip ip22_eisa2_irq_type = {
197 .typename = "IP22 EISA", 197 .name = "IP22 EISA",
198 .startup = startup_eisa2_irq, 198 .startup = startup_eisa2_irq,
199 .ack = mask_and_ack_eisa2_irq, 199 .ack = mask_and_ack_eisa2_irq,
200 .mask = disable_eisa2_irq, 200 .mask = disable_eisa2_irq,
diff --git a/arch/mips/sgi-ip22/ip22-int.c b/arch/mips/sgi-ip22/ip22-int.c
index c44f8be0644f..b454924aeb56 100644
--- a/arch/mips/sgi-ip22/ip22-int.c
+++ b/arch/mips/sgi-ip22/ip22-int.c
@@ -19,6 +19,7 @@
19 19
20#include <asm/mipsregs.h> 20#include <asm/mipsregs.h>
21#include <asm/addrspace.h> 21#include <asm/addrspace.h>
22#include <asm/irq_cpu.h>
22 23
23#include <asm/sgi/ioc.h> 24#include <asm/sgi/ioc.h>
24#include <asm/sgi/hpc3.h> 25#include <asm/sgi/hpc3.h>
@@ -52,7 +53,7 @@ static void disable_local0_irq(unsigned int irq)
52} 53}
53 54
54static struct irq_chip ip22_local0_irq_type = { 55static struct irq_chip ip22_local0_irq_type = {
55 .typename = "IP22 local 0", 56 .name = "IP22 local 0",
56 .ack = disable_local0_irq, 57 .ack = disable_local0_irq,
57 .mask = disable_local0_irq, 58 .mask = disable_local0_irq,
58 .mask_ack = disable_local0_irq, 59 .mask_ack = disable_local0_irq,
@@ -73,7 +74,7 @@ void disable_local1_irq(unsigned int irq)
73} 74}
74 75
75static struct irq_chip ip22_local1_irq_type = { 76static struct irq_chip ip22_local1_irq_type = {
76 .typename = "IP22 local 1", 77 .name = "IP22 local 1",
77 .ack = disable_local1_irq, 78 .ack = disable_local1_irq,
78 .mask = disable_local1_irq, 79 .mask = disable_local1_irq,
79 .mask_ack = disable_local1_irq, 80 .mask_ack = disable_local1_irq,
@@ -94,7 +95,7 @@ void disable_local2_irq(unsigned int irq)
94} 95}
95 96
96static struct irq_chip ip22_local2_irq_type = { 97static struct irq_chip ip22_local2_irq_type = {
97 .typename = "IP22 local 2", 98 .name = "IP22 local 2",
98 .ack = disable_local2_irq, 99 .ack = disable_local2_irq,
99 .mask = disable_local2_irq, 100 .mask = disable_local2_irq,
100 .mask_ack = disable_local2_irq, 101 .mask_ack = disable_local2_irq,
@@ -115,7 +116,7 @@ void disable_local3_irq(unsigned int irq)
115} 116}
116 117
117static struct irq_chip ip22_local3_irq_type = { 118static struct irq_chip ip22_local3_irq_type = {
118 .typename = "IP22 local 3", 119 .name = "IP22 local 3",
119 .ack = disable_local3_irq, 120 .ack = disable_local3_irq,
120 .mask = disable_local3_irq, 121 .mask = disable_local3_irq,
121 .mask_ack = disable_local3_irq, 122 .mask_ack = disable_local3_irq,
@@ -253,8 +254,6 @@ asmlinkage void plat_irq_dispatch(void)
253 indy_8254timer_irq(); 254 indy_8254timer_irq();
254} 255}
255 256
256extern void mips_cpu_irq_init(unsigned int irq_base);
257
258void __init arch_init_irq(void) 257void __init arch_init_irq(void)
259{ 258{
260 int i; 259 int i;
@@ -316,7 +315,7 @@ void __init arch_init_irq(void)
316 sgint->cmeimask1 = 0; 315 sgint->cmeimask1 = 0;
317 316
318 /* init CPU irqs */ 317 /* init CPU irqs */
319 mips_cpu_irq_init(SGINT_CPU); 318 mips_cpu_irq_init();
320 319
321 for (i = SGINT_LOCAL0; i < SGI_INTERRUPTS; i++) { 320 for (i = SGINT_LOCAL0; i < SGI_INTERRUPTS; i++) {
322 struct irq_chip *handler; 321 struct irq_chip *handler;
diff --git a/arch/mips/sgi-ip22/ip22-mc.c b/arch/mips/sgi-ip22/ip22-mc.c
index b58bd522262b..ddb6506d8341 100644
--- a/arch/mips/sgi-ip22/ip22-mc.c
+++ b/arch/mips/sgi-ip22/ip22-mc.c
@@ -202,7 +202,6 @@ void __init sgimc_init(void)
202} 202}
203 203
204void __init prom_meminit(void) {} 204void __init prom_meminit(void) {}
205unsigned long __init prom_free_prom_memory(void) 205void __init prom_free_prom_memory(void)
206{ 206{
207 return 0;
208} 207}
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 319f8803ef6f..60ade7690e09 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -333,7 +333,7 @@ static inline void disable_bridge_irq(unsigned int irq)
333} 333}
334 334
335static struct irq_chip bridge_irq_type = { 335static struct irq_chip bridge_irq_type = {
336 .typename = "bridge", 336 .name = "bridge",
337 .startup = startup_bridge_irq, 337 .startup = startup_bridge_irq,
338 .shutdown = shutdown_bridge_irq, 338 .shutdown = shutdown_bridge_irq,
339 .ack = disable_bridge_irq, 339 .ack = disable_bridge_irq,
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 16e5682b01f1..0e3d535e9f43 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -498,10 +498,9 @@ void __init prom_meminit(void)
498 } 498 }
499} 499}
500 500
501unsigned long __init prom_free_prom_memory(void) 501void __init prom_free_prom_memory(void)
502{ 502{
503 /* We got nothing to free here ... */ 503 /* We got nothing to free here ... */
504 return 0;
505} 504}
506 505
507extern void pagetable_init(void); 506extern void pagetable_init(void);
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index c20e9899b34b..9ce513629b14 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -181,7 +181,7 @@ static void disable_rt_irq(unsigned int irq)
181} 181}
182 182
183static struct irq_chip rt_irq_type = { 183static struct irq_chip rt_irq_type = {
184 .typename = "SN HUB RT timer", 184 .name = "SN HUB RT timer",
185 .ack = disable_rt_irq, 185 .ack = disable_rt_irq,
186 .mask = disable_rt_irq, 186 .mask = disable_rt_irq,
187 .mask_ack = disable_rt_irq, 187 .mask_ack = disable_rt_irq,
diff --git a/arch/mips/sgi-ip32/ip32-irq.c b/arch/mips/sgi-ip32/ip32-irq.c
index ae063864c026..8c450d9e8696 100644
--- a/arch/mips/sgi-ip32/ip32-irq.c
+++ b/arch/mips/sgi-ip32/ip32-irq.c
@@ -144,7 +144,7 @@ static void end_cpu_irq(unsigned int irq)
144} 144}
145 145
146static struct irq_chip ip32_cpu_interrupt = { 146static struct irq_chip ip32_cpu_interrupt = {
147 .typename = "IP32 CPU", 147 .name = "IP32 CPU",
148 .ack = disable_cpu_irq, 148 .ack = disable_cpu_irq,
149 .mask = disable_cpu_irq, 149 .mask = disable_cpu_irq,
150 .mask_ack = disable_cpu_irq, 150 .mask_ack = disable_cpu_irq,
@@ -193,7 +193,7 @@ static void end_crime_irq(unsigned int irq)
193} 193}
194 194
195static struct irq_chip ip32_crime_interrupt = { 195static struct irq_chip ip32_crime_interrupt = {
196 .typename = "IP32 CRIME", 196 .name = "IP32 CRIME",
197 .ack = mask_and_ack_crime_irq, 197 .ack = mask_and_ack_crime_irq,
198 .mask = disable_crime_irq, 198 .mask = disable_crime_irq,
199 .mask_ack = mask_and_ack_crime_irq, 199 .mask_ack = mask_and_ack_crime_irq,
@@ -234,7 +234,7 @@ static void end_macepci_irq(unsigned int irq)
234} 234}
235 235
236static struct irq_chip ip32_macepci_interrupt = { 236static struct irq_chip ip32_macepci_interrupt = {
237 .typename = "IP32 MACE PCI", 237 .name = "IP32 MACE PCI",
238 .ack = disable_macepci_irq, 238 .ack = disable_macepci_irq,
239 .mask = disable_macepci_irq, 239 .mask = disable_macepci_irq,
240 .mask_ack = disable_macepci_irq, 240 .mask_ack = disable_macepci_irq,
@@ -347,7 +347,7 @@ static void end_maceisa_irq(unsigned irq)
347} 347}
348 348
349static struct irq_chip ip32_maceisa_interrupt = { 349static struct irq_chip ip32_maceisa_interrupt = {
350 .typename = "IP32 MACE ISA", 350 .name = "IP32 MACE ISA",
351 .ack = mask_and_ack_maceisa_irq, 351 .ack = mask_and_ack_maceisa_irq,
352 .mask = disable_maceisa_irq, 352 .mask = disable_maceisa_irq,
353 .mask_ack = mask_and_ack_maceisa_irq, 353 .mask_ack = mask_and_ack_maceisa_irq,
@@ -379,7 +379,7 @@ static void end_mace_irq(unsigned int irq)
379} 379}
380 380
381static struct irq_chip ip32_mace_interrupt = { 381static struct irq_chip ip32_mace_interrupt = {
382 .typename = "IP32 MACE", 382 .name = "IP32 MACE",
383 .ack = disable_mace_irq, 383 .ack = disable_mace_irq,
384 .mask = disable_mace_irq, 384 .mask = disable_mace_irq,
385 .mask_ack = disable_mace_irq, 385 .mask_ack = disable_mace_irq,
diff --git a/arch/mips/sgi-ip32/ip32-memory.c b/arch/mips/sgi-ip32/ip32-memory.c
index d37d40a3cdae..849d392a0013 100644
--- a/arch/mips/sgi-ip32/ip32-memory.c
+++ b/arch/mips/sgi-ip32/ip32-memory.c
@@ -43,7 +43,6 @@ void __init prom_meminit (void)
43} 43}
44 44
45 45
46unsigned long __init prom_free_prom_memory (void) 46void __init prom_free_prom_memory(void)
47{ 47{
48 return 0;
49} 48}
diff --git a/arch/mips/sibyte/bcm1480/irq.c b/arch/mips/sibyte/bcm1480/irq.c
index 2e8f6b2e2420..1dc5d05d8962 100644
--- a/arch/mips/sibyte/bcm1480/irq.c
+++ b/arch/mips/sibyte/bcm1480/irq.c
@@ -82,7 +82,7 @@ extern char sb1250_duart_present[];
82#endif 82#endif
83 83
84static struct irq_chip bcm1480_irq_type = { 84static struct irq_chip bcm1480_irq_type = {
85 .typename = "BCM1480-IMR", 85 .name = "BCM1480-IMR",
86 .ack = ack_bcm1480_irq, 86 .ack = ack_bcm1480_irq,
87 .mask = disable_bcm1480_irq, 87 .mask = disable_bcm1480_irq,
88 .mask_ack = ack_bcm1480_irq, 88 .mask_ack = ack_bcm1480_irq,
diff --git a/arch/mips/sibyte/cfe/setup.c b/arch/mips/sibyte/cfe/setup.c
index 6e8952da6e2a..9e6099e69622 100644
--- a/arch/mips/sibyte/cfe/setup.c
+++ b/arch/mips/sibyte/cfe/setup.c
@@ -343,10 +343,9 @@ void __init prom_init(void)
343 prom_meminit(); 343 prom_meminit();
344} 344}
345 345
346unsigned long __init prom_free_prom_memory(void) 346void __init prom_free_prom_memory(void)
347{ 347{
348 /* Not sure what I'm supposed to do here. Nothing, I think */ 348 /* Not sure what I'm supposed to do here. Nothing, I think */
349 return 0;
350} 349}
351 350
352void prom_putchar(char c) 351void prom_putchar(char c)
diff --git a/arch/mips/sibyte/sb1250/irq.c b/arch/mips/sibyte/sb1250/irq.c
index 82ce7533053f..148239446e6e 100644
--- a/arch/mips/sibyte/sb1250/irq.c
+++ b/arch/mips/sibyte/sb1250/irq.c
@@ -67,7 +67,7 @@ extern char sb1250_duart_present[];
67#endif 67#endif
68 68
69static struct irq_chip sb1250_irq_type = { 69static struct irq_chip sb1250_irq_type = {
70 .typename = "SB1250-IMR", 70 .name = "SB1250-IMR",
71 .ack = ack_sb1250_irq, 71 .ack = ack_sb1250_irq,
72 .mask = disable_sb1250_irq, 72 .mask = disable_sb1250_irq,
73 .mask_ack = ack_sb1250_irq, 73 .mask_ack = ack_sb1250_irq,
diff --git a/arch/mips/sibyte/sb1250/prom.c b/arch/mips/sibyte/sb1250/prom.c
index 3c33a4517bc3..257c4e674353 100644
--- a/arch/mips/sibyte/sb1250/prom.c
+++ b/arch/mips/sibyte/sb1250/prom.c
@@ -87,10 +87,9 @@ void __init prom_init(void)
87 prom_meminit(); 87 prom_meminit();
88} 88}
89 89
90unsigned long __init prom_free_prom_memory(void) 90void __init prom_free_prom_memory(void)
91{ 91{
92 /* Not sure what I'm supposed to do here. Nothing, I think */ 92 /* Not sure what I'm supposed to do here. Nothing, I think */
93 return 0;
94} 93}
95 94
96void prom_putchar(char c) 95void prom_putchar(char c)
diff --git a/arch/mips/sni/irq.c b/arch/mips/sni/irq.c
index 8511bcc6d99d..039e8e540508 100644
--- a/arch/mips/sni/irq.c
+++ b/arch/mips/sni/irq.c
@@ -37,7 +37,7 @@ static void end_pciasic_irq(unsigned int irq)
37} 37}
38 38
39static struct irq_chip pciasic_irq_type = { 39static struct irq_chip pciasic_irq_type = {
40 .typename = "ASIC-PCI", 40 .name = "ASIC-PCI",
41 .ack = disable_pciasic_irq, 41 .ack = disable_pciasic_irq,
42 .mask = disable_pciasic_irq, 42 .mask = disable_pciasic_irq,
43 .mask_ack = disable_pciasic_irq, 43 .mask_ack = disable_pciasic_irq,
diff --git a/arch/mips/sni/sniprom.c b/arch/mips/sni/sniprom.c
index d1d0f1f493b4..1213d166f22e 100644
--- a/arch/mips/sni/sniprom.c
+++ b/arch/mips/sni/sniprom.c
@@ -67,9 +67,8 @@ void prom_printf(char *fmt, ...)
67 va_end(args); 67 va_end(args);
68} 68}
69 69
70unsigned long prom_free_prom_memory(void) 70void __init prom_free_prom_memory(void)
71{ 71{
72 return 0;
73} 72}
74 73
75/* 74/*
diff --git a/arch/mips/tx4927/common/tx4927_irq.c b/arch/mips/tx4927/common/tx4927_irq.c
index ed4a19adf361..e7f3e5b84dcf 100644
--- a/arch/mips/tx4927/common/tx4927_irq.c
+++ b/arch/mips/tx4927/common/tx4927_irq.c
@@ -120,7 +120,7 @@ static void tx4927_irq_pic_disable(unsigned int irq);
120 120
121#define TX4927_CP0_NAME "TX4927-CP0" 121#define TX4927_CP0_NAME "TX4927-CP0"
122static struct irq_chip tx4927_irq_cp0_type = { 122static struct irq_chip tx4927_irq_cp0_type = {
123 .typename = TX4927_CP0_NAME, 123 .name = TX4927_CP0_NAME,
124 .ack = tx4927_irq_cp0_disable, 124 .ack = tx4927_irq_cp0_disable,
125 .mask = tx4927_irq_cp0_disable, 125 .mask = tx4927_irq_cp0_disable,
126 .mask_ack = tx4927_irq_cp0_disable, 126 .mask_ack = tx4927_irq_cp0_disable,
@@ -129,7 +129,7 @@ static struct irq_chip tx4927_irq_cp0_type = {
129 129
130#define TX4927_PIC_NAME "TX4927-PIC" 130#define TX4927_PIC_NAME "TX4927-PIC"
131static struct irq_chip tx4927_irq_pic_type = { 131static struct irq_chip tx4927_irq_pic_type = {
132 .typename = TX4927_PIC_NAME, 132 .name = TX4927_PIC_NAME,
133 .ack = tx4927_irq_pic_disable, 133 .ack = tx4927_irq_pic_disable,
134 .mask = tx4927_irq_pic_disable, 134 .mask = tx4927_irq_pic_disable,
135 .mask_ack = tx4927_irq_pic_disable, 135 .mask_ack = tx4927_irq_pic_disable,
diff --git a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c
index b54b529a29f9..dcce88f403c9 100644
--- a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c
+++ b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c
@@ -228,7 +228,7 @@ static void toshiba_rbtx4927_irq_isa_mask_and_ack(unsigned int irq);
228 228
229#define TOSHIBA_RBTX4927_IOC_NAME "RBTX4927-IOC" 229#define TOSHIBA_RBTX4927_IOC_NAME "RBTX4927-IOC"
230static struct irq_chip toshiba_rbtx4927_irq_ioc_type = { 230static struct irq_chip toshiba_rbtx4927_irq_ioc_type = {
231 .typename = TOSHIBA_RBTX4927_IOC_NAME, 231 .name = TOSHIBA_RBTX4927_IOC_NAME,
232 .ack = toshiba_rbtx4927_irq_ioc_disable, 232 .ack = toshiba_rbtx4927_irq_ioc_disable,
233 .mask = toshiba_rbtx4927_irq_ioc_disable, 233 .mask = toshiba_rbtx4927_irq_ioc_disable,
234 .mask_ack = toshiba_rbtx4927_irq_ioc_disable, 234 .mask_ack = toshiba_rbtx4927_irq_ioc_disable,
@@ -241,7 +241,7 @@ static struct irq_chip toshiba_rbtx4927_irq_ioc_type = {
241#ifdef CONFIG_TOSHIBA_FPCIB0 241#ifdef CONFIG_TOSHIBA_FPCIB0
242#define TOSHIBA_RBTX4927_ISA_NAME "RBTX4927-ISA" 242#define TOSHIBA_RBTX4927_ISA_NAME "RBTX4927-ISA"
243static struct irq_chip toshiba_rbtx4927_irq_isa_type = { 243static struct irq_chip toshiba_rbtx4927_irq_isa_type = {
244 .typename = TOSHIBA_RBTX4927_ISA_NAME, 244 .name = TOSHIBA_RBTX4927_ISA_NAME,
245 .ack = toshiba_rbtx4927_irq_isa_mask_and_ack, 245 .ack = toshiba_rbtx4927_irq_isa_mask_and_ack,
246 .mask = toshiba_rbtx4927_irq_isa_disable, 246 .mask = toshiba_rbtx4927_irq_isa_disable,
247 .mask_ack = toshiba_rbtx4927_irq_isa_mask_and_ack, 247 .mask_ack = toshiba_rbtx4927_irq_isa_mask_and_ack,
@@ -490,13 +490,13 @@ void toshiba_rbtx4927_irq_dump(char *key)
490 { 490 {
491 u32 i, j = 0; 491 u32 i, j = 0;
492 for (i = 0; i < NR_IRQS; i++) { 492 for (i = 0; i < NR_IRQS; i++) {
493 if (strcmp(irq_desc[i].chip->typename, "none") 493 if (strcmp(irq_desc[i].chip->name, "none")
494 == 0) 494 == 0)
495 continue; 495 continue;
496 496
497 if ((i >= 1) 497 if ((i >= 1)
498 && (irq_desc[i - 1].chip->typename == 498 && (irq_desc[i - 1].chip->name ==
499 irq_desc[i].chip->typename)) { 499 irq_desc[i].chip->name)) {
500 j++; 500 j++;
501 } else { 501 } else {
502 j = 0; 502 j = 0;
@@ -510,7 +510,7 @@ void toshiba_rbtx4927_irq_dump(char *key)
510 (u32) (irq_desc[i].action ? irq_desc[i]. 510 (u32) (irq_desc[i].action ? irq_desc[i].
511 action->handler : 0), 511 action->handler : 0),
512 irq_desc[i].depth, 512 irq_desc[i].depth,
513 irq_desc[i].chip->typename, j); 513 irq_desc[i].chip->name, j);
514 } 514 }
515 } 515 }
516#endif 516#endif
diff --git a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_prom.c b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_prom.c
index efe50562f0ce..9a3a5babd1fb 100644
--- a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_prom.c
+++ b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_prom.c
@@ -80,9 +80,8 @@ void __init prom_init(void)
80 add_memory_region(0, msize << 20, BOOT_MEM_RAM); 80 add_memory_region(0, msize << 20, BOOT_MEM_RAM);
81} 81}
82 82
83unsigned long __init prom_free_prom_memory(void) 83void __init prom_free_prom_memory(void)
84{ 84{
85 return 0;
86} 85}
87 86
88const char *get_system_type(void) 87const char *get_system_type(void)
diff --git a/arch/mips/tx4938/common/irq.c b/arch/mips/tx4938/common/irq.c
index a347b424d91c..3a2dbfc25014 100644
--- a/arch/mips/tx4938/common/irq.c
+++ b/arch/mips/tx4938/common/irq.c
@@ -49,7 +49,7 @@ static void tx4938_irq_pic_disable(unsigned int irq);
49 49
50#define TX4938_CP0_NAME "TX4938-CP0" 50#define TX4938_CP0_NAME "TX4938-CP0"
51static struct irq_chip tx4938_irq_cp0_type = { 51static struct irq_chip tx4938_irq_cp0_type = {
52 .typename = TX4938_CP0_NAME, 52 .name = TX4938_CP0_NAME,
53 .ack = tx4938_irq_cp0_disable, 53 .ack = tx4938_irq_cp0_disable,
54 .mask = tx4938_irq_cp0_disable, 54 .mask = tx4938_irq_cp0_disable,
55 .mask_ack = tx4938_irq_cp0_disable, 55 .mask_ack = tx4938_irq_cp0_disable,
@@ -58,7 +58,7 @@ static struct irq_chip tx4938_irq_cp0_type = {
58 58
59#define TX4938_PIC_NAME "TX4938-PIC" 59#define TX4938_PIC_NAME "TX4938-PIC"
60static struct irq_chip tx4938_irq_pic_type = { 60static struct irq_chip tx4938_irq_pic_type = {
61 .typename = TX4938_PIC_NAME, 61 .name = TX4938_PIC_NAME,
62 .ack = tx4938_irq_pic_disable, 62 .ack = tx4938_irq_pic_disable,
63 .mask = tx4938_irq_pic_disable, 63 .mask = tx4938_irq_pic_disable,
64 .mask_ack = tx4938_irq_pic_disable, 64 .mask_ack = tx4938_irq_pic_disable,
diff --git a/arch/mips/tx4938/toshiba_rbtx4938/irq.c b/arch/mips/tx4938/toshiba_rbtx4938/irq.c
index b6f363d08011..2e96dbb248b1 100644
--- a/arch/mips/tx4938/toshiba_rbtx4938/irq.c
+++ b/arch/mips/tx4938/toshiba_rbtx4938/irq.c
@@ -92,7 +92,7 @@ static void toshiba_rbtx4938_irq_ioc_disable(unsigned int irq);
92 92
93#define TOSHIBA_RBTX4938_IOC_NAME "RBTX4938-IOC" 93#define TOSHIBA_RBTX4938_IOC_NAME "RBTX4938-IOC"
94static struct irq_chip toshiba_rbtx4938_irq_ioc_type = { 94static struct irq_chip toshiba_rbtx4938_irq_ioc_type = {
95 .typename = TOSHIBA_RBTX4938_IOC_NAME, 95 .name = TOSHIBA_RBTX4938_IOC_NAME,
96 .ack = toshiba_rbtx4938_irq_ioc_disable, 96 .ack = toshiba_rbtx4938_irq_ioc_disable,
97 .mask = toshiba_rbtx4938_irq_ioc_disable, 97 .mask = toshiba_rbtx4938_irq_ioc_disable,
98 .mask_ack = toshiba_rbtx4938_irq_ioc_disable, 98 .mask_ack = toshiba_rbtx4938_irq_ioc_disable,
diff --git a/arch/mips/tx4938/toshiba_rbtx4938/prom.c b/arch/mips/tx4938/toshiba_rbtx4938/prom.c
index e44daf30a7c1..7dc6a0aae21c 100644
--- a/arch/mips/tx4938/toshiba_rbtx4938/prom.c
+++ b/arch/mips/tx4938/toshiba_rbtx4938/prom.c
@@ -56,9 +56,8 @@ void __init prom_init(void)
56 return; 56 return;
57} 57}
58 58
59unsigned long __init prom_free_prom_memory(void) 59void __init prom_free_prom_memory(void)
60{ 60{
61 return 0;
62} 61}
63 62
64void __init prom_fixup_mem_map(unsigned long start, unsigned long end) 63void __init prom_fixup_mem_map(unsigned long start, unsigned long end)
diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
index c075261976c5..adabc6bad440 100644
--- a/arch/mips/vr41xx/common/icu.c
+++ b/arch/mips/vr41xx/common/icu.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2001-2002 MontaVista Software Inc. 4 * Copyright (C) 2001-2002 MontaVista Software Inc.
5 * Author: Yoichi Yuasa <yyuasa@mvista.com or source@mvista.com> 5 * Author: Yoichi Yuasa <yyuasa@mvista.com or source@mvista.com>
6 * Copyright (C) 2003-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> 6 * Copyright (C) 2003-2006 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -68,6 +68,7 @@ static unsigned char sysint2_assign[16] = {
68#define MPIUINTREG 0x0e 68#define MPIUINTREG 0x0e
69#define MAIUINTREG 0x10 69#define MAIUINTREG 0x10
70#define MKIUINTREG 0x12 70#define MKIUINTREG 0x12
71#define MMACINTREG 0x12
71#define MGIUINTLREG 0x14 72#define MGIUINTLREG 0x14
72#define MDSIUINTREG 0x16 73#define MDSIUINTREG 0x16
73#define NMIREG 0x18 74#define NMIREG 0x18
@@ -241,6 +242,30 @@ void vr41xx_disable_kiuint(uint16_t mask)
241 242
242EXPORT_SYMBOL(vr41xx_disable_kiuint); 243EXPORT_SYMBOL(vr41xx_disable_kiuint);
243 244
245void vr41xx_enable_macint(uint16_t mask)
246{
247 struct irq_desc *desc = irq_desc + ETHERNET_IRQ;
248 unsigned long flags;
249
250 spin_lock_irqsave(&desc->lock, flags);
251 icu1_set(MMACINTREG, mask);
252 spin_unlock_irqrestore(&desc->lock, flags);
253}
254
255EXPORT_SYMBOL(vr41xx_enable_macint);
256
257void vr41xx_disable_macint(uint16_t mask)
258{
259 struct irq_desc *desc = irq_desc + ETHERNET_IRQ;
260 unsigned long flags;
261
262 spin_lock_irqsave(&desc->lock, flags);
263 icu1_clear(MMACINTREG, mask);
264 spin_unlock_irqrestore(&desc->lock, flags);
265}
266
267EXPORT_SYMBOL(vr41xx_disable_macint);
268
244void vr41xx_enable_dsiuint(uint16_t mask) 269void vr41xx_enable_dsiuint(uint16_t mask)
245{ 270{
246 struct irq_desc *desc = irq_desc + DSIU_IRQ; 271 struct irq_desc *desc = irq_desc + DSIU_IRQ;
@@ -428,7 +453,7 @@ static void enable_sysint1_irq(unsigned int irq)
428} 453}
429 454
430static struct irq_chip sysint1_irq_type = { 455static struct irq_chip sysint1_irq_type = {
431 .typename = "SYSINT1", 456 .name = "SYSINT1",
432 .ack = disable_sysint1_irq, 457 .ack = disable_sysint1_irq,
433 .mask = disable_sysint1_irq, 458 .mask = disable_sysint1_irq,
434 .mask_ack = disable_sysint1_irq, 459 .mask_ack = disable_sysint1_irq,
@@ -446,7 +471,7 @@ static void enable_sysint2_irq(unsigned int irq)
446} 471}
447 472
448static struct irq_chip sysint2_irq_type = { 473static struct irq_chip sysint2_irq_type = {
449 .typename = "SYSINT2", 474 .name = "SYSINT2",
450 .ack = disable_sysint2_irq, 475 .ack = disable_sysint2_irq,
451 .mask = disable_sysint2_irq, 476 .mask = disable_sysint2_irq,
452 .mask_ack = disable_sysint2_irq, 477 .mask_ack = disable_sysint2_irq,
diff --git a/arch/mips/vr41xx/common/init.c b/arch/mips/vr41xx/common/init.c
index a2e285c1d4d5..4f97e0ba9e24 100644
--- a/arch/mips/vr41xx/common/init.c
+++ b/arch/mips/vr41xx/common/init.c
@@ -81,7 +81,6 @@ void __init prom_init(void)
81 } 81 }
82} 82}
83 83
84unsigned long __init prom_free_prom_memory (void) 84void __init prom_free_prom_memory(void)
85{ 85{
86 return 0UL;
87} 86}
diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
index 16decf4ac2f4..cba36a247e32 100644
--- a/arch/mips/vr41xx/common/irq.c
+++ b/arch/mips/vr41xx/common/irq.c
@@ -95,27 +95,27 @@ asmlinkage void plat_irq_dispatch(void)
95 unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM; 95 unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM;
96 96
97 if (pending & CAUSEF_IP7) 97 if (pending & CAUSEF_IP7)
98 do_IRQ(7); 98 do_IRQ(TIMER_IRQ);
99 else if (pending & 0x7800) { 99 else if (pending & 0x7800) {
100 if (pending & CAUSEF_IP3) 100 if (pending & CAUSEF_IP3)
101 irq_dispatch(3); 101 irq_dispatch(INT1_IRQ);
102 else if (pending & CAUSEF_IP4) 102 else if (pending & CAUSEF_IP4)
103 irq_dispatch(4); 103 irq_dispatch(INT2_IRQ);
104 else if (pending & CAUSEF_IP5) 104 else if (pending & CAUSEF_IP5)
105 irq_dispatch(5); 105 irq_dispatch(INT3_IRQ);
106 else if (pending & CAUSEF_IP6) 106 else if (pending & CAUSEF_IP6)
107 irq_dispatch(6); 107 irq_dispatch(INT4_IRQ);
108 } else if (pending & CAUSEF_IP2) 108 } else if (pending & CAUSEF_IP2)
109 irq_dispatch(2); 109 irq_dispatch(INT0_IRQ);
110 else if (pending & CAUSEF_IP0) 110 else if (pending & CAUSEF_IP0)
111 do_IRQ(0); 111 do_IRQ(MIPS_SOFTINT0_IRQ);
112 else if (pending & CAUSEF_IP1) 112 else if (pending & CAUSEF_IP1)
113 do_IRQ(1); 113 do_IRQ(MIPS_SOFTINT1_IRQ);
114 else 114 else
115 spurious_interrupt(); 115 spurious_interrupt();
116} 116}
117 117
118void __init arch_init_irq(void) 118void __init arch_init_irq(void)
119{ 119{
120 mips_cpu_irq_init(MIPS_CPU_IRQ_BASE); 120 mips_cpu_irq_init();
121} 121}
diff --git a/arch/mips/vr41xx/nec-cmbvr4133/irq.c b/arch/mips/vr41xx/nec-cmbvr4133/irq.c
index 128ed8d6f111..7d2d076b0f54 100644
--- a/arch/mips/vr41xx/nec-cmbvr4133/irq.c
+++ b/arch/mips/vr41xx/nec-cmbvr4133/irq.c
@@ -21,60 +21,16 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22 22
23#include <asm/io.h> 23#include <asm/io.h>
24#include <asm/i8259.h>
24#include <asm/vr41xx/cmbvr4133.h> 25#include <asm/vr41xx/cmbvr4133.h>
25 26
26extern void enable_8259A_irq(unsigned int irq);
27extern void disable_8259A_irq(unsigned int irq);
28extern void mask_and_ack_8259A(unsigned int irq);
29extern void init_8259A(int hoge);
30
31extern int vr4133_rockhopper; 27extern int vr4133_rockhopper;
32 28
33static void enable_i8259_irq(unsigned int irq)
34{
35 enable_8259A_irq(irq - I8259_IRQ_BASE);
36}
37
38static void disable_i8259_irq(unsigned int irq)
39{
40 disable_8259A_irq(irq - I8259_IRQ_BASE);
41}
42
43static void ack_i8259_irq(unsigned int irq)
44{
45 mask_and_ack_8259A(irq - I8259_IRQ_BASE);
46}
47
48static struct irq_chip i8259_irq_type = {
49 .typename = "XT-PIC",
50 .ack = ack_i8259_irq,
51 .mask = disable_i8259_irq,
52 .mask_ack = ack_i8259_irq,
53 .unmask = enable_i8259_irq,
54};
55
56static int i8259_get_irq_number(int irq) 29static int i8259_get_irq_number(int irq)
57{ 30{
58 unsigned long isr; 31 return i8259_irq();
59
60 isr = inb(0x20);
61 irq = ffz(~isr);
62 if (irq == 2) {
63 isr = inb(0xa0);
64 irq = 8 + ffz(~isr);
65 }
66
67 if (irq < 0 || irq > 15)
68 return -EINVAL;
69
70 return I8259_IRQ_BASE + irq;
71} 32}
72 33
73static struct irqaction i8259_slave_cascade = {
74 .handler = &no_action,
75 .name = "cascade",
76};
77
78void __init rockhopper_init_irq(void) 34void __init rockhopper_init_irq(void)
79{ 35{
80 int i; 36 int i;
@@ -84,11 +40,6 @@ void __init rockhopper_init_irq(void)
84 return; 40 return;
85 } 41 }
86 42
87 for (i = I8259_IRQ_BASE; i <= I8259_IRQ_LAST; i++)
88 set_irq_chip_and_handler(i, &i8259_irq_type, handle_level_irq);
89
90 setup_irq(I8259_SLAVE_IRQ, &i8259_slave_cascade);
91
92 vr41xx_set_irq_trigger(CMBVR41XX_INTC_PIN, TRIGGER_LEVEL, SIGNAL_THROUGH); 43 vr41xx_set_irq_trigger(CMBVR41XX_INTC_PIN, TRIGGER_LEVEL, SIGNAL_THROUGH);
93 vr41xx_set_irq_level(CMBVR41XX_INTC_PIN, LEVEL_HIGH); 44 vr41xx_set_irq_level(CMBVR41XX_INTC_PIN, LEVEL_HIGH);
94 vr41xx_cascade_irq(CMBVR41XX_INTC_IRQ, i8259_get_irq_number); 45 vr41xx_cascade_irq(CMBVR41XX_INTC_IRQ, i8259_get_irq_number);
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index abc6bd2f858e..f08e80a0bf0a 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -552,6 +552,11 @@ config PPC_PS3
552 bool "Sony PS3 (incomplete)" 552 bool "Sony PS3 (incomplete)"
553 depends on PPC_MULTIPLATFORM && PPC64 553 depends on PPC_MULTIPLATFORM && PPC64
554 select PPC_CELL 554 select PPC_CELL
555 select USB_ARCH_HAS_OHCI
556 select USB_OHCI_LITTLE_ENDIAN
557 select USB_OHCI_BIG_ENDIAN_MMIO
558 select USB_ARCH_HAS_EHCI
559 select USB_EHCI_BIG_ENDIAN_MMIO
555 help 560 help
556 This option enables support for the Sony PS3 game console 561 This option enables support for the Sony PS3 game console
557 and other platforms using the PS3 hypervisor. 562 and other platforms using the PS3 hypervisor.
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 6828df4afd99..7e97d71a5f8f 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -381,8 +381,6 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
381 381
382 pci_device_add(dev, bus); 382 pci_device_add(dev, bus);
383 383
384 /* XXX pci_scan_msi_device(dev); */
385
386 return dev; 384 return dev;
387} 385}
388EXPORT_SYMBOL(of_create_pci_dev); 386EXPORT_SYMBOL(of_create_pci_dev);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 12272361c018..eaed402ad346 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -34,10 +34,6 @@ config GENERIC_HWEIGHT
34 bool 34 bool
35 default y 35 default y
36 36
37config GENERIC_CALIBRATE_DELAY
38 bool
39 default y
40
41config GENERIC_TIME 37config GENERIC_TIME
42 def_bool y 38 def_bool y
43 39
@@ -134,6 +130,31 @@ config AUDIT_ARCH
134 bool 130 bool
135 default y 131 default y
136 132
133config S390_SWITCH_AMODE
134 bool "Switch kernel/user addressing modes"
135 help
136 This option allows to switch the addressing modes of kernel and user
137 space. The kernel parameter switch_amode=on will enable this feature,
138 default is disabled. Enabling this (via kernel parameter) on machines
139 earlier than IBM System z9-109 EC/BC will reduce system performance.
140
141 Note that this option will also be selected by selecting the execute
142 protection option below. Enabling the execute protection via the
143 noexec kernel parameter will also switch the addressing modes,
144 independent of the switch_amode kernel parameter.
145
146
147config S390_EXEC_PROTECT
148 bool "Data execute protection"
149 select S390_SWITCH_AMODE
150 help
151 This option allows to enable a buffer overflow protection for user
152 space programs and it also selects the addressing mode option above.
153 The kernel parameter noexec=on will enable this feature and also
154 switch the addressing modes, default is disabled. Enabling this (via
155 kernel parameter) on machines earlier than IBM System z9-109 EC/BC
156 will reduce system performance.
157
137comment "Code generation options" 158comment "Code generation options"
138 159
139choice 160choice
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index b8c237290263..c9da7d16145e 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -81,7 +81,7 @@ static struct ctl_table appldata_dir_table[] = {
81/* 81/*
82 * Timer 82 * Timer
83 */ 83 */
84DEFINE_PER_CPU(struct vtimer_list, appldata_timer); 84static DEFINE_PER_CPU(struct vtimer_list, appldata_timer);
85static atomic_t appldata_expire_count = ATOMIC_INIT(0); 85static atomic_t appldata_expire_count = ATOMIC_INIT(0);
86 86
87static DEFINE_SPINLOCK(appldata_timer_lock); 87static DEFINE_SPINLOCK(appldata_timer_lock);
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c
index 8aea3698a77b..4ca615788702 100644
--- a/arch/s390/appldata/appldata_mem.c
+++ b/arch/s390/appldata/appldata_mem.c
@@ -36,7 +36,7 @@
36 * book: 36 * book:
37 * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml 37 * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml
38 */ 38 */
39struct appldata_mem_data { 39static struct appldata_mem_data {
40 u64 timestamp; 40 u64 timestamp;
41 u32 sync_count_1; /* after VM collected the record data, */ 41 u32 sync_count_1; /* after VM collected the record data, */
42 u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the 42 u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the
diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c
index 075e619bf37d..f64b8c867ae2 100644
--- a/arch/s390/appldata/appldata_net_sum.c
+++ b/arch/s390/appldata/appldata_net_sum.c
@@ -34,7 +34,7 @@
34 * book: 34 * book:
35 * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml 35 * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml
36 */ 36 */
37struct appldata_net_sum_data { 37static struct appldata_net_sum_data {
38 u64 timestamp; 38 u64 timestamp;
39 u32 sync_count_1; /* after VM collected the record data, */ 39 u32 sync_count_1; /* after VM collected the record data, */
40 u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the 40 u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the
diff --git a/arch/s390/crypto/Kconfig b/arch/s390/crypto/Kconfig
new file mode 100644
index 000000000000..99ff9f08e4d7
--- /dev/null
+++ b/arch/s390/crypto/Kconfig
@@ -0,0 +1,60 @@
1config CRYPTO_SHA1_S390
2 tristate "SHA1 digest algorithm"
3 depends on S390
4 select CRYPTO_ALGAPI
5 help
6 This is the s390 hardware accelerated implementation of the
7 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
8
9config CRYPTO_SHA256_S390
10 tristate "SHA256 digest algorithm"
11 depends on S390
12 select CRYPTO_ALGAPI
13 help
14 This is the s390 hardware accelerated implementation of the
15 SHA256 secure hash standard (DFIPS 180-2).
16
17 This version of SHA implements a 256 bit hash with 128 bits of
18 security against collision attacks.
19
20config CRYPTO_DES_S390
21 tristate "DES and Triple DES cipher algorithms"
22 depends on S390
23 select CRYPTO_ALGAPI
24 select CRYPTO_BLKCIPHER
25 help
26 This us the s390 hardware accelerated implementation of the
27 DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
28
29config CRYPTO_AES_S390
30 tristate "AES cipher algorithms"
31 depends on S390
32 select CRYPTO_ALGAPI
33 select CRYPTO_BLKCIPHER
34 help
35 This is the s390 hardware accelerated implementation of the
36 AES cipher algorithms (FIPS-197). AES uses the Rijndael
37 algorithm.
38
39 Rijndael appears to be consistently a very good performer in
40 both hardware and software across a wide range of computing
41 environments regardless of its use in feedback or non-feedback
42 modes. Its key setup time is excellent, and its key agility is
43 good. Rijndael's very low memory requirements make it very well
44 suited for restricted-space environments, in which it also
45 demonstrates excellent performance. Rijndael's operations are
46 among the easiest to defend against power and timing attacks.
47
48 On s390 the System z9-109 currently only supports the key size
49 of 128 bit.
50
51config S390_PRNG
52 tristate "Pseudo random number generator device driver"
53 depends on S390
54 default "m"
55 help
56 Select this option if you want to use the s390 pseudo random number
57 generator. The PRNG is part of the cryptograhic processor functions
58 and uses triple-DES to generate secure random numbers like the
59 ANSI X9.17 standard. The PRNG is usable via the char device
60 /dev/prandom.
diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile
index bfe2541dc5cf..14e552c5cc43 100644
--- a/arch/s390/crypto/Makefile
+++ b/arch/s390/crypto/Makefile
@@ -6,5 +6,4 @@ obj-$(CONFIG_CRYPTO_SHA1_S390) += sha1_s390.o
6obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o 6obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o
7obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o des_check_key.o 7obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o des_check_key.o
8obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o 8obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o
9 9obj-$(CONFIG_S390_PRNG) += prng.o
10obj-$(CONFIG_CRYPTO_TEST) += crypt_s390_query.o
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 15c9eec02928..91636353f6f0 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -4,7 +4,7 @@
4 * s390 implementation of the AES Cipher Algorithm. 4 * s390 implementation of the AES Cipher Algorithm.
5 * 5 *
6 * s390 Version: 6 * s390 Version:
7 * Copyright (C) 2005 IBM Deutschland GmbH, IBM Corporation 7 * Copyright IBM Corp. 2005,2007
8 * Author(s): Jan Glauber (jang@de.ibm.com) 8 * Author(s): Jan Glauber (jang@de.ibm.com)
9 * 9 *
10 * Derived from "crypto/aes.c" 10 * Derived from "crypto/aes.c"
@@ -27,9 +27,11 @@
27/* data block size for all key lengths */ 27/* data block size for all key lengths */
28#define AES_BLOCK_SIZE 16 28#define AES_BLOCK_SIZE 16
29 29
30int has_aes_128 = 0; 30#define AES_KEYLEN_128 1
31int has_aes_192 = 0; 31#define AES_KEYLEN_192 2
32int has_aes_256 = 0; 32#define AES_KEYLEN_256 4
33
34static char keylen_flag = 0;
33 35
34struct s390_aes_ctx { 36struct s390_aes_ctx {
35 u8 iv[AES_BLOCK_SIZE]; 37 u8 iv[AES_BLOCK_SIZE];
@@ -47,20 +49,19 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
47 49
48 switch (key_len) { 50 switch (key_len) {
49 case 16: 51 case 16:
50 if (!has_aes_128) 52 if (!(keylen_flag & AES_KEYLEN_128))
51 goto fail; 53 goto fail;
52 break; 54 break;
53 case 24: 55 case 24:
54 if (!has_aes_192) 56 if (!(keylen_flag & AES_KEYLEN_192))
55 goto fail; 57 goto fail;
56 58
57 break; 59 break;
58 case 32: 60 case 32:
59 if (!has_aes_256) 61 if (!(keylen_flag & AES_KEYLEN_256))
60 goto fail; 62 goto fail;
61 break; 63 break;
62 default: 64 default:
63 /* invalid key length */
64 goto fail; 65 goto fail;
65 break; 66 break;
66 } 67 }
@@ -322,34 +323,32 @@ static int __init aes_init(void)
322 int ret; 323 int ret;
323 324
324 if (crypt_s390_func_available(KM_AES_128_ENCRYPT)) 325 if (crypt_s390_func_available(KM_AES_128_ENCRYPT))
325 has_aes_128 = 1; 326 keylen_flag |= AES_KEYLEN_128;
326 if (crypt_s390_func_available(KM_AES_192_ENCRYPT)) 327 if (crypt_s390_func_available(KM_AES_192_ENCRYPT))
327 has_aes_192 = 1; 328 keylen_flag |= AES_KEYLEN_192;
328 if (crypt_s390_func_available(KM_AES_256_ENCRYPT)) 329 if (crypt_s390_func_available(KM_AES_256_ENCRYPT))
329 has_aes_256 = 1; 330 keylen_flag |= AES_KEYLEN_256;
331
332 if (!keylen_flag)
333 return -EOPNOTSUPP;
330 334
331 if (!has_aes_128 && !has_aes_192 && !has_aes_256) 335 /* z9 109 and z9 BC/EC only support 128 bit key length */
332 return -ENOSYS; 336 if (keylen_flag == AES_KEYLEN_128)
337 printk(KERN_INFO
338 "aes_s390: hardware acceleration only available for"
339 "128 bit keys\n");
333 340
334 ret = crypto_register_alg(&aes_alg); 341 ret = crypto_register_alg(&aes_alg);
335 if (ret != 0) { 342 if (ret)
336 printk(KERN_INFO "crypt_s390: aes-s390 couldn't be loaded.\n");
337 goto aes_err; 343 goto aes_err;
338 }
339 344
340 ret = crypto_register_alg(&ecb_aes_alg); 345 ret = crypto_register_alg(&ecb_aes_alg);
341 if (ret != 0) { 346 if (ret)
342 printk(KERN_INFO
343 "crypt_s390: ecb-aes-s390 couldn't be loaded.\n");
344 goto ecb_aes_err; 347 goto ecb_aes_err;
345 }
346 348
347 ret = crypto_register_alg(&cbc_aes_alg); 349 ret = crypto_register_alg(&cbc_aes_alg);
348 if (ret != 0) { 350 if (ret)
349 printk(KERN_INFO
350 "crypt_s390: cbc-aes-s390 couldn't be loaded.\n");
351 goto cbc_aes_err; 351 goto cbc_aes_err;
352 }
353 352
354out: 353out:
355 return ret; 354 return ret;
diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h
index 2b137089f625..2775d2618332 100644
--- a/arch/s390/crypto/crypt_s390.h
+++ b/arch/s390/crypto/crypt_s390.h
@@ -3,8 +3,9 @@
3 * 3 *
4 * Support for s390 cryptographic instructions. 4 * Support for s390 cryptographic instructions.
5 * 5 *
6 * Copyright (C) 2003 IBM Deutschland GmbH, IBM Corporation 6 * Copyright IBM Corp. 2003,2007
7 * Author(s): Thomas Spatzier (tspat@de.ibm.com) 7 * Author(s): Thomas Spatzier
8 * Jan Glauber (jan.glauber@de.ibm.com)
8 * 9 *
9 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free 11 * under the terms of the GNU General Public License as published by the Free
@@ -32,7 +33,8 @@ enum crypt_s390_operations {
32 CRYPT_S390_KMAC = 0x0500 33 CRYPT_S390_KMAC = 0x0500
33}; 34};
34 35
35/* function codes for KM (CIPHER MESSAGE) instruction 36/*
37 * function codes for KM (CIPHER MESSAGE) instruction
36 * 0x80 is the decipher modifier bit 38 * 0x80 is the decipher modifier bit
37 */ 39 */
38enum crypt_s390_km_func { 40enum crypt_s390_km_func {
@@ -51,7 +53,8 @@ enum crypt_s390_km_func {
51 KM_AES_256_DECRYPT = CRYPT_S390_KM | 0x14 | 0x80, 53 KM_AES_256_DECRYPT = CRYPT_S390_KM | 0x14 | 0x80,
52}; 54};
53 55
54/* function codes for KMC (CIPHER MESSAGE WITH CHAINING) 56/*
57 * function codes for KMC (CIPHER MESSAGE WITH CHAINING)
55 * instruction 58 * instruction
56 */ 59 */
57enum crypt_s390_kmc_func { 60enum crypt_s390_kmc_func {
@@ -68,9 +71,11 @@ enum crypt_s390_kmc_func {
68 KMC_AES_192_DECRYPT = CRYPT_S390_KMC | 0x13 | 0x80, 71 KMC_AES_192_DECRYPT = CRYPT_S390_KMC | 0x13 | 0x80,
69 KMC_AES_256_ENCRYPT = CRYPT_S390_KMC | 0x14, 72 KMC_AES_256_ENCRYPT = CRYPT_S390_KMC | 0x14,
70 KMC_AES_256_DECRYPT = CRYPT_S390_KMC | 0x14 | 0x80, 73 KMC_AES_256_DECRYPT = CRYPT_S390_KMC | 0x14 | 0x80,
74 KMC_PRNG = CRYPT_S390_KMC | 0x43,
71}; 75};
72 76
73/* function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) 77/*
78 * function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST)
74 * instruction 79 * instruction
75 */ 80 */
76enum crypt_s390_kimd_func { 81enum crypt_s390_kimd_func {
@@ -79,7 +84,8 @@ enum crypt_s390_kimd_func {
79 KIMD_SHA_256 = CRYPT_S390_KIMD | 2, 84 KIMD_SHA_256 = CRYPT_S390_KIMD | 2,
80}; 85};
81 86
82/* function codes for KLMD (COMPUTE LAST MESSAGE DIGEST) 87/*
88 * function codes for KLMD (COMPUTE LAST MESSAGE DIGEST)
83 * instruction 89 * instruction
84 */ 90 */
85enum crypt_s390_klmd_func { 91enum crypt_s390_klmd_func {
@@ -88,7 +94,8 @@ enum crypt_s390_klmd_func {
88 KLMD_SHA_256 = CRYPT_S390_KLMD | 2, 94 KLMD_SHA_256 = CRYPT_S390_KLMD | 2,
89}; 95};
90 96
91/* function codes for KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) 97/*
98 * function codes for KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
92 * instruction 99 * instruction
93 */ 100 */
94enum crypt_s390_kmac_func { 101enum crypt_s390_kmac_func {
@@ -98,229 +105,219 @@ enum crypt_s390_kmac_func {
98 KMAC_TDEA_192 = CRYPT_S390_KMAC | 3 105 KMAC_TDEA_192 = CRYPT_S390_KMAC | 3
99}; 106};
100 107
101/* status word for s390 crypto instructions' QUERY functions */ 108/**
102struct crypt_s390_query_status { 109 * crypt_s390_km:
103 u64 high; 110 * @func: the function code passed to KM; see crypt_s390_km_func
104 u64 low; 111 * @param: address of parameter block; see POP for details on each func
105}; 112 * @dest: address of destination memory area
106 113 * @src: address of source memory area
107/* 114 * @src_len: length of src operand in bytes
115 *
108 * Executes the KM (CIPHER MESSAGE) operation of the CPU. 116 * Executes the KM (CIPHER MESSAGE) operation of the CPU.
109 * @param func: the function code passed to KM; see crypt_s390_km_func 117 *
110 * @param param: address of parameter block; see POP for details on each func 118 * Returns -1 for failure, 0 for the query func, number of processed
111 * @param dest: address of destination memory area 119 * bytes for encryption/decryption funcs
112 * @param src: address of source memory area
113 * @param src_len: length of src operand in bytes
114 * @returns < zero for failure, 0 for the query func, number of processed bytes
115 * for encryption/decryption funcs
116 */ 120 */
117static inline int 121static inline int crypt_s390_km(long func, void *param,
118crypt_s390_km(long func, void* param, u8* dest, const u8* src, long src_len) 122 u8 *dest, const u8 *src, long src_len)
119{ 123{
120 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; 124 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
121 register void* __param asm("1") = param; 125 register void *__param asm("1") = param;
122 register const u8* __src asm("2") = src; 126 register const u8 *__src asm("2") = src;
123 register long __src_len asm("3") = src_len; 127 register long __src_len asm("3") = src_len;
124 register u8* __dest asm("4") = dest; 128 register u8 *__dest asm("4") = dest;
125 int ret; 129 int ret;
126 130
127 asm volatile( 131 asm volatile(
128 "0: .insn rre,0xb92e0000,%3,%1 \n" /* KM opcode */ 132 "0: .insn rre,0xb92e0000,%3,%1 \n" /* KM opcode */
129 "1: brc 1,0b \n" /* handle partial completion */ 133 "1: brc 1,0b \n" /* handle partial completion */
130 " ahi %0,%h7\n" 134 " la %0,0\n"
131 "2: ahi %0,%h8\n" 135 "2:\n"
132 "3:\n" 136 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
133 EX_TABLE(0b,3b) EX_TABLE(1b,2b)
134 : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest) 137 : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
135 : "d" (__func), "a" (__param), "0" (-EFAULT), 138 : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
136 "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
137 if (ret < 0) 139 if (ret < 0)
138 return ret; 140 return ret;
139 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; 141 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
140} 142}
141 143
142/* 144/**
145 * crypt_s390_kmc:
146 * @func: the function code passed to KM; see crypt_s390_kmc_func
147 * @param: address of parameter block; see POP for details on each func
148 * @dest: address of destination memory area
149 * @src: address of source memory area
150 * @src_len: length of src operand in bytes
151 *
143 * Executes the KMC (CIPHER MESSAGE WITH CHAINING) operation of the CPU. 152 * Executes the KMC (CIPHER MESSAGE WITH CHAINING) operation of the CPU.
144 * @param func: the function code passed to KM; see crypt_s390_kmc_func 153 *
145 * @param param: address of parameter block; see POP for details on each func 154 * Returns -1 for failure, 0 for the query func, number of processed
146 * @param dest: address of destination memory area 155 * bytes for encryption/decryption funcs
147 * @param src: address of source memory area
148 * @param src_len: length of src operand in bytes
149 * @returns < zero for failure, 0 for the query func, number of processed bytes
150 * for encryption/decryption funcs
151 */ 156 */
152static inline int 157static inline int crypt_s390_kmc(long func, void *param,
153crypt_s390_kmc(long func, void* param, u8* dest, const u8* src, long src_len) 158 u8 *dest, const u8 *src, long src_len)
154{ 159{
155 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; 160 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
156 register void* __param asm("1") = param; 161 register void *__param asm("1") = param;
157 register const u8* __src asm("2") = src; 162 register const u8 *__src asm("2") = src;
158 register long __src_len asm("3") = src_len; 163 register long __src_len asm("3") = src_len;
159 register u8* __dest asm("4") = dest; 164 register u8 *__dest asm("4") = dest;
160 int ret; 165 int ret;
161 166
162 asm volatile( 167 asm volatile(
163 "0: .insn rre,0xb92f0000,%3,%1 \n" /* KMC opcode */ 168 "0: .insn rre,0xb92f0000,%3,%1 \n" /* KMC opcode */
164 "1: brc 1,0b \n" /* handle partial completion */ 169 "1: brc 1,0b \n" /* handle partial completion */
165 " ahi %0,%h7\n" 170 " la %0,0\n"
166 "2: ahi %0,%h8\n" 171 "2:\n"
167 "3:\n" 172 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
168 EX_TABLE(0b,3b) EX_TABLE(1b,2b)
169 : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest) 173 : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
170 : "d" (__func), "a" (__param), "0" (-EFAULT), 174 : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
171 "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
172 if (ret < 0) 175 if (ret < 0)
173 return ret; 176 return ret;
174 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; 177 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
175} 178}
176 179
177/* 180/**
181 * crypt_s390_kimd:
182 * @func: the function code passed to KM; see crypt_s390_kimd_func
183 * @param: address of parameter block; see POP for details on each func
184 * @src: address of source memory area
185 * @src_len: length of src operand in bytes
186 *
178 * Executes the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) operation 187 * Executes the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) operation
179 * of the CPU. 188 * of the CPU.
180 * @param func: the function code passed to KM; see crypt_s390_kimd_func 189 *
181 * @param param: address of parameter block; see POP for details on each func 190 * Returns -1 for failure, 0 for the query func, number of processed
182 * @param src: address of source memory area 191 * bytes for digest funcs
183 * @param src_len: length of src operand in bytes
184 * @returns < zero for failure, 0 for the query func, number of processed bytes
185 * for digest funcs
186 */ 192 */
187static inline int 193static inline int crypt_s390_kimd(long func, void *param,
188crypt_s390_kimd(long func, void* param, const u8* src, long src_len) 194 const u8 *src, long src_len)
189{ 195{
190 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; 196 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
191 register void* __param asm("1") = param; 197 register void *__param asm("1") = param;
192 register const u8* __src asm("2") = src; 198 register const u8 *__src asm("2") = src;
193 register long __src_len asm("3") = src_len; 199 register long __src_len asm("3") = src_len;
194 int ret; 200 int ret;
195 201
196 asm volatile( 202 asm volatile(
197 "0: .insn rre,0xb93e0000,%1,%1 \n" /* KIMD opcode */ 203 "0: .insn rre,0xb93e0000,%1,%1 \n" /* KIMD opcode */
198 "1: brc 1,0b \n" /* handle partial completion */ 204 "1: brc 1,0b \n" /* handle partial completion */
199 " ahi %0,%h6\n" 205 " la %0,0\n"
200 "2: ahi %0,%h7\n" 206 "2:\n"
201 "3:\n" 207 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
202 EX_TABLE(0b,3b) EX_TABLE(1b,2b)
203 : "=d" (ret), "+a" (__src), "+d" (__src_len) 208 : "=d" (ret), "+a" (__src), "+d" (__src_len)
204 : "d" (__func), "a" (__param), "0" (-EFAULT), 209 : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
205 "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
206 if (ret < 0) 210 if (ret < 0)
207 return ret; 211 return ret;
208 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; 212 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
209} 213}
210 214
211/* 215/**
216 * crypt_s390_klmd:
217 * @func: the function code passed to KM; see crypt_s390_klmd_func
218 * @param: address of parameter block; see POP for details on each func
219 * @src: address of source memory area
220 * @src_len: length of src operand in bytes
221 *
212 * Executes the KLMD (COMPUTE LAST MESSAGE DIGEST) operation of the CPU. 222 * Executes the KLMD (COMPUTE LAST MESSAGE DIGEST) operation of the CPU.
213 * @param func: the function code passed to KM; see crypt_s390_klmd_func 223 *
214 * @param param: address of parameter block; see POP for details on each func 224 * Returns -1 for failure, 0 for the query func, number of processed
215 * @param src: address of source memory area 225 * bytes for digest funcs
216 * @param src_len: length of src operand in bytes
217 * @returns < zero for failure, 0 for the query func, number of processed bytes
218 * for digest funcs
219 */ 226 */
220static inline int 227static inline int crypt_s390_klmd(long func, void *param,
221crypt_s390_klmd(long func, void* param, const u8* src, long src_len) 228 const u8 *src, long src_len)
222{ 229{
223 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; 230 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
224 register void* __param asm("1") = param; 231 register void *__param asm("1") = param;
225 register const u8* __src asm("2") = src; 232 register const u8 *__src asm("2") = src;
226 register long __src_len asm("3") = src_len; 233 register long __src_len asm("3") = src_len;
227 int ret; 234 int ret;
228 235
229 asm volatile( 236 asm volatile(
230 "0: .insn rre,0xb93f0000,%1,%1 \n" /* KLMD opcode */ 237 "0: .insn rre,0xb93f0000,%1,%1 \n" /* KLMD opcode */
231 "1: brc 1,0b \n" /* handle partial completion */ 238 "1: brc 1,0b \n" /* handle partial completion */
232 " ahi %0,%h6\n" 239 " la %0,0\n"
233 "2: ahi %0,%h7\n" 240 "2:\n"
234 "3:\n" 241 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
235 EX_TABLE(0b,3b) EX_TABLE(1b,2b)
236 : "=d" (ret), "+a" (__src), "+d" (__src_len) 242 : "=d" (ret), "+a" (__src), "+d" (__src_len)
237 : "d" (__func), "a" (__param), "0" (-EFAULT), 243 : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
238 "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
239 if (ret < 0) 244 if (ret < 0)
240 return ret; 245 return ret;
241 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; 246 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
242} 247}
243 248
244/* 249/**
250 * crypt_s390_kmac:
251 * @func: the function code passed to KM; see crypt_s390_klmd_func
252 * @param: address of parameter block; see POP for details on each func
253 * @src: address of source memory area
254 * @src_len: length of src operand in bytes
255 *
245 * Executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) operation 256 * Executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) operation
246 * of the CPU. 257 * of the CPU.
247 * @param func: the function code passed to KM; see crypt_s390_klmd_func 258 *
248 * @param param: address of parameter block; see POP for details on each func 259 * Returns -1 for failure, 0 for the query func, number of processed
249 * @param src: address of source memory area 260 * bytes for digest funcs
250 * @param src_len: length of src operand in bytes
251 * @returns < zero for failure, 0 for the query func, number of processed bytes
252 * for digest funcs
253 */ 261 */
254static inline int 262static inline int crypt_s390_kmac(long func, void *param,
255crypt_s390_kmac(long func, void* param, const u8* src, long src_len) 263 const u8 *src, long src_len)
256{ 264{
257 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; 265 register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
258 register void* __param asm("1") = param; 266 register void *__param asm("1") = param;
259 register const u8* __src asm("2") = src; 267 register const u8 *__src asm("2") = src;
260 register long __src_len asm("3") = src_len; 268 register long __src_len asm("3") = src_len;
261 int ret; 269 int ret;
262 270
263 asm volatile( 271 asm volatile(
264 "0: .insn rre,0xb91e0000,%1,%1 \n" /* KLAC opcode */ 272 "0: .insn rre,0xb91e0000,%1,%1 \n" /* KLAC opcode */
265 "1: brc 1,0b \n" /* handle partial completion */ 273 "1: brc 1,0b \n" /* handle partial completion */
266 " ahi %0,%h6\n" 274 " la %0,0\n"
267 "2: ahi %0,%h7\n" 275 "2:\n"
268 "3:\n" 276 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
269 EX_TABLE(0b,3b) EX_TABLE(1b,2b)
270 : "=d" (ret), "+a" (__src), "+d" (__src_len) 277 : "=d" (ret), "+a" (__src), "+d" (__src_len)
271 : "d" (__func), "a" (__param), "0" (-EFAULT), 278 : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
272 "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory");
273 if (ret < 0) 279 if (ret < 0)
274 return ret; 280 return ret;
275 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; 281 return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len;
276} 282}
277 283
278/** 284/**
285 * crypt_s390_func_available:
286 * @func: the function code of the specific function; 0 if op in general
287 *
279 * Tests if a specific crypto function is implemented on the machine. 288 * Tests if a specific crypto function is implemented on the machine.
280 * @param func: the function code of the specific function; 0 if op in general 289 *
281 * @return 1 if func available; 0 if func or op in general not available 290 * Returns 1 if func available; 0 if func or op in general not available
282 */ 291 */
283static inline int 292static inline int crypt_s390_func_available(int func)
284crypt_s390_func_available(int func)
285{ 293{
294 unsigned char status[16];
286 int ret; 295 int ret;
287 296
288 struct crypt_s390_query_status status = { 297 switch (func & CRYPT_S390_OP_MASK) {
289 .high = 0, 298 case CRYPT_S390_KM:
290 .low = 0 299 ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0);
291 }; 300 break;
292 switch (func & CRYPT_S390_OP_MASK){ 301 case CRYPT_S390_KMC:
293 case CRYPT_S390_KM: 302 ret = crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0);
294 ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0); 303 break;
295 break; 304 case CRYPT_S390_KIMD:
296 case CRYPT_S390_KMC: 305 ret = crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0);
297 ret = crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0); 306 break;
298 break; 307 case CRYPT_S390_KLMD:
299 case CRYPT_S390_KIMD: 308 ret = crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0);
300 ret = crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0); 309 break;
301 break; 310 case CRYPT_S390_KMAC:
302 case CRYPT_S390_KLMD: 311 ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
303 ret = crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0); 312 break;
304 break; 313 default:
305 case CRYPT_S390_KMAC: 314 return 0;
306 ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
307 break;
308 default:
309 ret = 0;
310 return ret;
311 }
312 if (ret >= 0){
313 func &= CRYPT_S390_FUNC_MASK;
314 func &= 0x7f; //mask modifier bit
315 if (func < 64){
316 ret = (status.high >> (64 - func - 1)) & 0x1;
317 } else {
318 ret = (status.low >> (128 - func - 1)) & 0x1;
319 }
320 } else {
321 ret = 0;
322 } 315 }
323 return ret; 316 if (ret < 0)
317 return 0;
318 func &= CRYPT_S390_FUNC_MASK;
319 func &= 0x7f; /* mask modifier bit */
320 return (status[func >> 3] & (0x80 >> (func & 7))) != 0;
324} 321}
325 322
326#endif // _CRYPTO_ARCH_S390_CRYPT_S390_H 323#endif /* _CRYPTO_ARCH_S390_CRYPT_S390_H */
diff --git a/arch/s390/crypto/crypt_s390_query.c b/arch/s390/crypto/crypt_s390_query.c
deleted file mode 100644
index 54fb11d7fadd..000000000000
--- a/arch/s390/crypto/crypt_s390_query.c
+++ /dev/null
@@ -1,129 +0,0 @@
1/*
2 * Cryptographic API.
3 *
4 * Support for s390 cryptographic instructions.
5 * Testing module for querying processor crypto capabilities.
6 *
7 * Copyright (c) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
8 * Author(s): Thomas Spatzier (tspat@de.ibm.com)
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 */
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/kernel.h>
19#include <asm/errno.h>
20#include "crypt_s390.h"
21
22static void query_available_functions(void)
23{
24 printk(KERN_INFO "#####################\n");
25
26 /* query available KM functions */
27 printk(KERN_INFO "KM_QUERY: %d\n",
28 crypt_s390_func_available(KM_QUERY));
29 printk(KERN_INFO "KM_DEA: %d\n",
30 crypt_s390_func_available(KM_DEA_ENCRYPT));
31 printk(KERN_INFO "KM_TDEA_128: %d\n",
32 crypt_s390_func_available(KM_TDEA_128_ENCRYPT));
33 printk(KERN_INFO "KM_TDEA_192: %d\n",
34 crypt_s390_func_available(KM_TDEA_192_ENCRYPT));
35 printk(KERN_INFO "KM_AES_128: %d\n",
36 crypt_s390_func_available(KM_AES_128_ENCRYPT));
37 printk(KERN_INFO "KM_AES_192: %d\n",
38 crypt_s390_func_available(KM_AES_192_ENCRYPT));
39 printk(KERN_INFO "KM_AES_256: %d\n",
40 crypt_s390_func_available(KM_AES_256_ENCRYPT));
41
42 /* query available KMC functions */
43 printk(KERN_INFO "KMC_QUERY: %d\n",
44 crypt_s390_func_available(KMC_QUERY));
45 printk(KERN_INFO "KMC_DEA: %d\n",
46 crypt_s390_func_available(KMC_DEA_ENCRYPT));
47 printk(KERN_INFO "KMC_TDEA_128: %d\n",
48 crypt_s390_func_available(KMC_TDEA_128_ENCRYPT));
49 printk(KERN_INFO "KMC_TDEA_192: %d\n",
50 crypt_s390_func_available(KMC_TDEA_192_ENCRYPT));
51 printk(KERN_INFO "KMC_AES_128: %d\n",
52 crypt_s390_func_available(KMC_AES_128_ENCRYPT));
53 printk(KERN_INFO "KMC_AES_192: %d\n",
54 crypt_s390_func_available(KMC_AES_192_ENCRYPT));
55 printk(KERN_INFO "KMC_AES_256: %d\n",
56 crypt_s390_func_available(KMC_AES_256_ENCRYPT));
57
58 /* query available KIMD functions */
59 printk(KERN_INFO "KIMD_QUERY: %d\n",
60 crypt_s390_func_available(KIMD_QUERY));
61 printk(KERN_INFO "KIMD_SHA_1: %d\n",
62 crypt_s390_func_available(KIMD_SHA_1));
63 printk(KERN_INFO "KIMD_SHA_256: %d\n",
64 crypt_s390_func_available(KIMD_SHA_256));
65
66 /* query available KLMD functions */
67 printk(KERN_INFO "KLMD_QUERY: %d\n",
68 crypt_s390_func_available(KLMD_QUERY));
69 printk(KERN_INFO "KLMD_SHA_1: %d\n",
70 crypt_s390_func_available(KLMD_SHA_1));
71 printk(KERN_INFO "KLMD_SHA_256: %d\n",
72 crypt_s390_func_available(KLMD_SHA_256));
73
74 /* query available KMAC functions */
75 printk(KERN_INFO "KMAC_QUERY: %d\n",
76 crypt_s390_func_available(KMAC_QUERY));
77 printk(KERN_INFO "KMAC_DEA: %d\n",
78 crypt_s390_func_available(KMAC_DEA));
79 printk(KERN_INFO "KMAC_TDEA_128: %d\n",
80 crypt_s390_func_available(KMAC_TDEA_128));
81 printk(KERN_INFO "KMAC_TDEA_192: %d\n",
82 crypt_s390_func_available(KMAC_TDEA_192));
83}
84
85static int init(void)
86{
87 struct crypt_s390_query_status status = {
88 .high = 0,
89 .low = 0
90 };
91
92 printk(KERN_INFO "crypt_s390: querying available crypto functions\n");
93 crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0);
94 printk(KERN_INFO "KM:\t%016llx %016llx\n",
95 (unsigned long long) status.high,
96 (unsigned long long) status.low);
97 status.high = status.low = 0;
98 crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0);
99 printk(KERN_INFO "KMC:\t%016llx %016llx\n",
100 (unsigned long long) status.high,
101 (unsigned long long) status.low);
102 status.high = status.low = 0;
103 crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0);
104 printk(KERN_INFO "KIMD:\t%016llx %016llx\n",
105 (unsigned long long) status.high,
106 (unsigned long long) status.low);
107 status.high = status.low = 0;
108 crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0);
109 printk(KERN_INFO "KLMD:\t%016llx %016llx\n",
110 (unsigned long long) status.high,
111 (unsigned long long) status.low);
112 status.high = status.low = 0;
113 crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
114 printk(KERN_INFO "KMAC:\t%016llx %016llx\n",
115 (unsigned long long) status.high,
116 (unsigned long long) status.low);
117
118 query_available_functions();
119 return -ECANCELED;
120}
121
122static void __exit cleanup(void)
123{
124}
125
126module_init(init);
127module_exit(cleanup);
128
129MODULE_LICENSE("GPL");
diff --git a/arch/s390/crypto/des_check_key.c b/arch/s390/crypto/des_check_key.c
index e3f5c5f238fe..5706af266442 100644
--- a/arch/s390/crypto/des_check_key.c
+++ b/arch/s390/crypto/des_check_key.c
@@ -10,8 +10,9 @@
10 * scatterlist interface. Changed LGPL to GPL per section 3 of the LGPL. 10 * scatterlist interface. Changed LGPL to GPL per section 3 of the LGPL.
11 * 11 *
12 * s390 Version: 12 * s390 Version:
13 * Copyright (C) 2003 IBM Deutschland GmbH, IBM Corporation 13 * Copyright IBM Corp. 2003
14 * Author(s): Thomas Spatzier (tspat@de.ibm.com) 14 * Author(s): Thomas Spatzier
15 * Jan Glauber (jan.glauber@de.ibm.com)
15 * 16 *
16 * Derived from "crypto/des.c" 17 * Derived from "crypto/des.c"
17 * Copyright (c) 1992 Dana L. How. 18 * Copyright (c) 1992 Dana L. How.
@@ -30,6 +31,7 @@
30#include <linux/module.h> 31#include <linux/module.h>
31#include <linux/errno.h> 32#include <linux/errno.h>
32#include <linux/crypto.h> 33#include <linux/crypto.h>
34#include "crypto_des.h"
33 35
34#define ROR(d,c,o) ((d) = (d) >> (c) | (d) << (o)) 36#define ROR(d,c,o) ((d) = (d) >> (c) | (d) << (o))
35 37
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 2aba04852fe3..ea22707f435f 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -3,9 +3,9 @@
3 * 3 *
4 * s390 implementation of the DES Cipher Algorithm. 4 * s390 implementation of the DES Cipher Algorithm.
5 * 5 *
6 * Copyright (c) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Copyright IBM Corp. 2003,2007
7 * Author(s): Thomas Spatzier (tspat@de.ibm.com) 7 * Author(s): Thomas Spatzier
8 * 8 * Jan Glauber (jan.glauber@de.ibm.com)
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
@@ -557,7 +557,7 @@ static int init(void)
557 if (!crypt_s390_func_available(KM_DEA_ENCRYPT) || 557 if (!crypt_s390_func_available(KM_DEA_ENCRYPT) ||
558 !crypt_s390_func_available(KM_TDEA_128_ENCRYPT) || 558 !crypt_s390_func_available(KM_TDEA_128_ENCRYPT) ||
559 !crypt_s390_func_available(KM_TDEA_192_ENCRYPT)) 559 !crypt_s390_func_available(KM_TDEA_192_ENCRYPT))
560 return -ENOSYS; 560 return -EOPNOTSUPP;
561 561
562 ret = crypto_register_alg(&des_alg); 562 ret = crypto_register_alg(&des_alg);
563 if (ret) 563 if (ret)
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
new file mode 100644
index 000000000000..8eb3a1aedc22
--- /dev/null
+++ b/arch/s390/crypto/prng.c
@@ -0,0 +1,213 @@
1/*
2 * Copyright IBM Corp. 2006,2007
3 * Author(s): Jan Glauber <jan.glauber@de.ibm.com>
4 * Driver for the s390 pseudo random number generator
5 */
6#include <linux/fs.h>
7#include <linux/init.h>
8#include <linux/kernel.h>
9#include <linux/miscdevice.h>
10#include <linux/module.h>
11#include <linux/moduleparam.h>
12#include <linux/random.h>
13#include <asm/debug.h>
14#include <asm/uaccess.h>
15
16#include "crypt_s390.h"
17
18MODULE_LICENSE("GPL");
19MODULE_AUTHOR("Jan Glauber <jan.glauber@de.ibm.com>");
20MODULE_DESCRIPTION("s390 PRNG interface");
21
22static int prng_chunk_size = 256;
23module_param(prng_chunk_size, int, S_IRUSR | S_IRGRP | S_IROTH);
24MODULE_PARM_DESC(prng_chunk_size, "PRNG read chunk size in bytes");
25
26static int prng_entropy_limit = 4096;
27module_param(prng_entropy_limit, int, S_IRUSR | S_IRGRP | S_IROTH | S_IWUSR);
28MODULE_PARM_DESC(prng_entropy_limit,
29 "PRNG add entropy after that much bytes were produced");
30
31/*
32 * Any one who considers arithmetical methods of producing random digits is,
33 * of course, in a state of sin. -- John von Neumann
34 */
35
36struct s390_prng_data {
37 unsigned long count; /* how many bytes were produced */
38 char *buf;
39};
40
41static struct s390_prng_data *p;
42
43/* copied from libica, use a non-zero initial parameter block */
44static unsigned char parm_block[32] = {
450x0F,0x2B,0x8E,0x63,0x8C,0x8E,0xD2,0x52,0x64,0xB7,0xA0,0x7B,0x75,0x28,0xB8,0xF4,
460x75,0x5F,0xD2,0xA6,0x8D,0x97,0x11,0xFF,0x49,0xD8,0x23,0xF3,0x7E,0x21,0xEC,0xA0,
47};
48
49static int prng_open(struct inode *inode, struct file *file)
50{
51 return nonseekable_open(inode, file);
52}
53
54static void prng_add_entropy(void)
55{
56 __u64 entropy[4];
57 unsigned int i;
58 int ret;
59
60 for (i = 0; i < 16; i++) {
61 ret = crypt_s390_kmc(KMC_PRNG, parm_block, (char *)entropy,
62 (char *)entropy, sizeof(entropy));
63 BUG_ON(ret < 0 || ret != sizeof(entropy));
64 memcpy(parm_block, entropy, sizeof(entropy));
65 }
66}
67
68static void prng_seed(int nbytes)
69{
70 char buf[16];
71 int i = 0;
72
73 BUG_ON(nbytes > 16);
74 get_random_bytes(buf, nbytes);
75
76 /* Add the entropy */
77 while (nbytes >= 8) {
78 *((__u64 *)parm_block) ^= *((__u64 *)buf+i*8);
79 prng_add_entropy();
80 i += 8;
81 nbytes -= 8;
82 }
83 prng_add_entropy();
84}
85
86static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes,
87 loff_t *ppos)
88{
89 int chunk, n;
90 int ret = 0;
91 int tmp;
92
93 /* nbytes can be arbitrary long, we spilt it into chunks */
94 while (nbytes) {
95 /* same as in extract_entropy_user in random.c */
96 if (need_resched()) {
97 if (signal_pending(current)) {
98 if (ret == 0)
99 ret = -ERESTARTSYS;
100 break;
101 }
102 schedule();
103 }
104
105 /*
106 * we lose some random bytes if an attacker issues
107 * reads < 8 bytes, but we don't care
108 */
109 chunk = min_t(int, nbytes, prng_chunk_size);
110
111 /* PRNG only likes multiples of 8 bytes */
112 n = (chunk + 7) & -8;
113
114 if (p->count > prng_entropy_limit)
115 prng_seed(8);
116
117 /* if the CPU supports PRNG stckf is present too */
118 asm volatile(".insn s,0xb27c0000,%0"
119 : "=m" (*((unsigned long long *)p->buf)) : : "cc");
120
121 /*
122 * Beside the STCKF the input for the TDES-EDE is the output
123 * of the last operation. We differ here from X9.17 since we
124 * only store one timestamp into the buffer. Padding the whole
125 * buffer with timestamps does not improve security, since
126 * successive stckf have nearly constant offsets.
127 * If an attacker knows the first timestamp it would be
128 * trivial to guess the additional values. One timestamp
129 * is therefore enough and still guarantees unique input values.
130 *
131 * Note: you can still get strict X9.17 conformity by setting
132 * prng_chunk_size to 8 bytes.
133 */
134 tmp = crypt_s390_kmc(KMC_PRNG, parm_block, p->buf, p->buf, n);
135 BUG_ON((tmp < 0) || (tmp != n));
136
137 p->count += n;
138
139 if (copy_to_user(ubuf, p->buf, chunk))
140 return -EFAULT;
141
142 nbytes -= chunk;
143 ret += chunk;
144 ubuf += chunk;
145 }
146 return ret;
147}
148
149static struct file_operations prng_fops = {
150 .owner = THIS_MODULE,
151 .open = &prng_open,
152 .release = NULL,
153 .read = &prng_read,
154};
155
156static struct miscdevice prng_dev = {
157 .name = "prandom",
158 .minor = MISC_DYNAMIC_MINOR,
159 .fops = &prng_fops,
160};
161
162static int __init prng_init(void)
163{
164 int ret;
165
166 /* check if the CPU has a PRNG */
167 if (!crypt_s390_func_available(KMC_PRNG))
168 return -EOPNOTSUPP;
169
170 if (prng_chunk_size < 8)
171 return -EINVAL;
172
173 p = kmalloc(sizeof(struct s390_prng_data), GFP_KERNEL);
174 if (!p)
175 return -ENOMEM;
176 p->count = 0;
177
178 p->buf = kmalloc(prng_chunk_size, GFP_KERNEL);
179 if (!p->buf) {
180 ret = -ENOMEM;
181 goto out_free;
182 }
183
184 /* initialize the PRNG, add 128 bits of entropy */
185 prng_seed(16);
186
187 ret = misc_register(&prng_dev);
188 if (ret) {
189 printk(KERN_WARNING
190 "Could not register misc device for PRNG.\n");
191 goto out_buf;
192 }
193 return 0;
194
195out_buf:
196 kfree(p->buf);
197out_free:
198 kfree(p);
199 return ret;
200}
201
202static void __exit prng_exit(void)
203{
204 /* wipe me */
205 memset(p->buf, 0, prng_chunk_size);
206 kfree(p->buf);
207 kfree(p);
208
209 misc_deregister(&prng_dev);
210}
211
212module_init(prng_init);
213module_exit(prng_exit);
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index 49ca8690ee39..969639f31977 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -8,8 +8,9 @@
8 * implementation written by Steve Reid. 8 * implementation written by Steve Reid.
9 * 9 *
10 * s390 Version: 10 * s390 Version:
11 * Copyright (C) 2003 IBM Deutschland GmbH, IBM Corporation 11 * Copyright IBM Corp. 2003,2007
12 * Author(s): Thomas Spatzier (tspat@de.ibm.com) 12 * Author(s): Thomas Spatzier
13 * Jan Glauber (jan.glauber@de.ibm.com)
13 * 14 *
14 * Derived from "crypto/sha1.c" 15 * Derived from "crypto/sha1.c"
15 * Copyright (c) Alan Smithee. 16 * Copyright (c) Alan Smithee.
@@ -43,16 +44,14 @@ struct crypt_s390_sha1_ctx {
43static void sha1_init(struct crypto_tfm *tfm) 44static void sha1_init(struct crypto_tfm *tfm)
44{ 45{
45 struct crypt_s390_sha1_ctx *ctx = crypto_tfm_ctx(tfm); 46 struct crypt_s390_sha1_ctx *ctx = crypto_tfm_ctx(tfm);
46 static const u32 initstate[5] = { 47
47 0x67452301, 48 ctx->state[0] = 0x67452301;
48 0xEFCDAB89, 49 ctx->state[1] = 0xEFCDAB89;
49 0x98BADCFE, 50 ctx->state[2] = 0x98BADCFE;
50 0x10325476, 51 ctx->state[3] = 0x10325476;
51 0xC3D2E1F0 52 ctx->state[4] = 0xC3D2E1F0;
52 };
53 53
54 ctx->count = 0; 54 ctx->count = 0;
55 memcpy(ctx->state, &initstate, sizeof(initstate));
56 ctx->buf_len = 0; 55 ctx->buf_len = 0;
57} 56}
58 57
@@ -63,13 +62,13 @@ static void sha1_update(struct crypto_tfm *tfm, const u8 *data,
63 long imd_len; 62 long imd_len;
64 63
65 sctx = crypto_tfm_ctx(tfm); 64 sctx = crypto_tfm_ctx(tfm);
66 sctx->count += len * 8; //message bit length 65 sctx->count += len * 8; /* message bit length */
67 66
68 //anything in buffer yet? -> must be completed 67 /* anything in buffer yet? -> must be completed */
69 if (sctx->buf_len && (sctx->buf_len + len) >= SHA1_BLOCK_SIZE) { 68 if (sctx->buf_len && (sctx->buf_len + len) >= SHA1_BLOCK_SIZE) {
70 //complete full block and hash 69 /* complete full block and hash */
71 memcpy(sctx->buffer + sctx->buf_len, data, 70 memcpy(sctx->buffer + sctx->buf_len, data,
72 SHA1_BLOCK_SIZE - sctx->buf_len); 71 SHA1_BLOCK_SIZE - sctx->buf_len);
73 crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, 72 crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer,
74 SHA1_BLOCK_SIZE); 73 SHA1_BLOCK_SIZE);
75 data += SHA1_BLOCK_SIZE - sctx->buf_len; 74 data += SHA1_BLOCK_SIZE - sctx->buf_len;
@@ -77,37 +76,36 @@ static void sha1_update(struct crypto_tfm *tfm, const u8 *data,
77 sctx->buf_len = 0; 76 sctx->buf_len = 0;
78 } 77 }
79 78
80 //rest of data contains full blocks? 79 /* rest of data contains full blocks? */
81 imd_len = len & ~0x3ful; 80 imd_len = len & ~0x3ful;
82 if (imd_len){ 81 if (imd_len) {
83 crypt_s390_kimd(KIMD_SHA_1, sctx->state, data, imd_len); 82 crypt_s390_kimd(KIMD_SHA_1, sctx->state, data, imd_len);
84 data += imd_len; 83 data += imd_len;
85 len -= imd_len; 84 len -= imd_len;
86 } 85 }
87 //anything left? store in buffer 86 /* anything left? store in buffer */
88 if (len){ 87 if (len) {
89 memcpy(sctx->buffer + sctx->buf_len , data, len); 88 memcpy(sctx->buffer + sctx->buf_len , data, len);
90 sctx->buf_len += len; 89 sctx->buf_len += len;
91 } 90 }
92} 91}
93 92
94 93
95static void 94static void pad_message(struct crypt_s390_sha1_ctx* sctx)
96pad_message(struct crypt_s390_sha1_ctx* sctx)
97{ 95{
98 int index; 96 int index;
99 97
100 index = sctx->buf_len; 98 index = sctx->buf_len;
101 sctx->buf_len = (sctx->buf_len < 56)? 99 sctx->buf_len = (sctx->buf_len < 56) ?
102 SHA1_BLOCK_SIZE:2 * SHA1_BLOCK_SIZE; 100 SHA1_BLOCK_SIZE:2 * SHA1_BLOCK_SIZE;
103 //start pad with 1 101 /* start pad with 1 */
104 sctx->buffer[index] = 0x80; 102 sctx->buffer[index] = 0x80;
105 //pad with zeros 103 /* pad with zeros */
106 index++; 104 index++;
107 memset(sctx->buffer + index, 0x00, sctx->buf_len - index); 105 memset(sctx->buffer + index, 0x00, sctx->buf_len - index);
108 //append length 106 /* append length */
109 memcpy(sctx->buffer + sctx->buf_len - 8, &sctx->count, 107 memcpy(sctx->buffer + sctx->buf_len - 8, &sctx->count,
110 sizeof sctx->count); 108 sizeof sctx->count);
111} 109}
112 110
113/* Add padding and return the message digest. */ 111/* Add padding and return the message digest. */
@@ -115,47 +113,40 @@ static void sha1_final(struct crypto_tfm *tfm, u8 *out)
115{ 113{
116 struct crypt_s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); 114 struct crypt_s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm);
117 115
118 //must perform manual padding 116 /* must perform manual padding */
119 pad_message(sctx); 117 pad_message(sctx);
120 crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len); 118 crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len);
121 //copy digest to out 119 /* copy digest to out */
122 memcpy(out, sctx->state, SHA1_DIGEST_SIZE); 120 memcpy(out, sctx->state, SHA1_DIGEST_SIZE);
123 /* Wipe context */ 121 /* wipe context */
124 memset(sctx, 0, sizeof *sctx); 122 memset(sctx, 0, sizeof *sctx);
125} 123}
126 124
127static struct crypto_alg alg = { 125static struct crypto_alg alg = {
128 .cra_name = "sha1", 126 .cra_name = "sha1",
129 .cra_driver_name = "sha1-s390", 127 .cra_driver_name= "sha1-s390",
130 .cra_priority = CRYPT_S390_PRIORITY, 128 .cra_priority = CRYPT_S390_PRIORITY,
131 .cra_flags = CRYPTO_ALG_TYPE_DIGEST, 129 .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
132 .cra_blocksize = SHA1_BLOCK_SIZE, 130 .cra_blocksize = SHA1_BLOCK_SIZE,
133 .cra_ctxsize = sizeof(struct crypt_s390_sha1_ctx), 131 .cra_ctxsize = sizeof(struct crypt_s390_sha1_ctx),
134 .cra_module = THIS_MODULE, 132 .cra_module = THIS_MODULE,
135 .cra_list = LIST_HEAD_INIT(alg.cra_list), 133 .cra_list = LIST_HEAD_INIT(alg.cra_list),
136 .cra_u = { .digest = { 134 .cra_u = { .digest = {
137 .dia_digestsize = SHA1_DIGEST_SIZE, 135 .dia_digestsize = SHA1_DIGEST_SIZE,
138 .dia_init = sha1_init, 136 .dia_init = sha1_init,
139 .dia_update = sha1_update, 137 .dia_update = sha1_update,
140 .dia_final = sha1_final } } 138 .dia_final = sha1_final } }
141}; 139};
142 140
143static int 141static int __init init(void)
144init(void)
145{ 142{
146 int ret = -ENOSYS; 143 if (!crypt_s390_func_available(KIMD_SHA_1))
144 return -EOPNOTSUPP;
147 145
148 if (crypt_s390_func_available(KIMD_SHA_1)){ 146 return crypto_register_alg(&alg);
149 ret = crypto_register_alg(&alg);
150 if (ret == 0){
151 printk(KERN_INFO "crypt_s390: sha1_s390 loaded.\n");
152 }
153 }
154 return ret;
155} 147}
156 148
157static void __exit 149static void __exit fini(void)
158fini(void)
159{ 150{
160 crypto_unregister_alg(&alg); 151 crypto_unregister_alg(&alg);
161} 152}
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index 8e4e67503fe7..78436c696d37 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -4,7 +4,7 @@
4 * s390 implementation of the SHA256 Secure Hash Algorithm. 4 * s390 implementation of the SHA256 Secure Hash Algorithm.
5 * 5 *
6 * s390 Version: 6 * s390 Version:
7 * Copyright (C) 2005 IBM Deutschland GmbH, IBM Corporation 7 * Copyright IBM Corp. 2005,2007
8 * Author(s): Jan Glauber (jang@de.ibm.com) 8 * Author(s): Jan Glauber (jang@de.ibm.com)
9 * 9 *
10 * Derived from "crypto/sha256.c" 10 * Derived from "crypto/sha256.c"
@@ -143,15 +143,10 @@ static struct crypto_alg alg = {
143 143
144static int init(void) 144static int init(void)
145{ 145{
146 int ret;
147
148 if (!crypt_s390_func_available(KIMD_SHA_256)) 146 if (!crypt_s390_func_available(KIMD_SHA_256))
149 return -ENOSYS; 147 return -EOPNOTSUPP;
150 148
151 ret = crypto_register_alg(&alg); 149 return crypto_register_alg(&alg);
152 if (ret != 0)
153 printk(KERN_INFO "crypt_s390: sha256_s390 couldn't be loaded.");
154 return ret;
155} 150}
156 151
157static void __exit fini(void) 152static void __exit fini(void)
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 5368cf4a350e..7c621b8ef683 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -108,6 +108,8 @@ CONFIG_DEFAULT_MIGRATION_COST=1000000
108CONFIG_COMPAT=y 108CONFIG_COMPAT=y
109CONFIG_SYSVIPC_COMPAT=y 109CONFIG_SYSVIPC_COMPAT=y
110CONFIG_AUDIT_ARCH=y 110CONFIG_AUDIT_ARCH=y
111CONFIG_S390_SWITCH_AMODE=y
112CONFIG_S390_EXEC_PROTECT=y
111 113
112# 114#
113# Code generation options 115# Code generation options
@@ -431,7 +433,6 @@ CONFIG_TN3270_CONSOLE=y
431CONFIG_TN3215=y 433CONFIG_TN3215=y
432CONFIG_TN3215_CONSOLE=y 434CONFIG_TN3215_CONSOLE=y
433CONFIG_CCW_CONSOLE=y 435CONFIG_CCW_CONSOLE=y
434CONFIG_SCLP=y
435CONFIG_SCLP_TTY=y 436CONFIG_SCLP_TTY=y
436CONFIG_SCLP_CONSOLE=y 437CONFIG_SCLP_CONSOLE=y
437CONFIG_SCLP_VT220_TTY=y 438CONFIG_SCLP_VT220_TTY=y
@@ -724,9 +725,7 @@ CONFIG_CRYPTO_MANAGER=y
724# CONFIG_CRYPTO_MD4 is not set 725# CONFIG_CRYPTO_MD4 is not set
725# CONFIG_CRYPTO_MD5 is not set 726# CONFIG_CRYPTO_MD5 is not set
726# CONFIG_CRYPTO_SHA1 is not set 727# CONFIG_CRYPTO_SHA1 is not set
727# CONFIG_CRYPTO_SHA1_S390 is not set
728# CONFIG_CRYPTO_SHA256 is not set 728# CONFIG_CRYPTO_SHA256 is not set
729# CONFIG_CRYPTO_SHA256_S390 is not set
730# CONFIG_CRYPTO_SHA512 is not set 729# CONFIG_CRYPTO_SHA512 is not set
731# CONFIG_CRYPTO_WP512 is not set 730# CONFIG_CRYPTO_WP512 is not set
732# CONFIG_CRYPTO_TGR192 is not set 731# CONFIG_CRYPTO_TGR192 is not set
@@ -735,12 +734,10 @@ CONFIG_CRYPTO_ECB=m
735CONFIG_CRYPTO_CBC=y 734CONFIG_CRYPTO_CBC=y
736# CONFIG_CRYPTO_LRW is not set 735# CONFIG_CRYPTO_LRW is not set
737# CONFIG_CRYPTO_DES is not set 736# CONFIG_CRYPTO_DES is not set
738# CONFIG_CRYPTO_DES_S390 is not set
739# CONFIG_CRYPTO_BLOWFISH is not set 737# CONFIG_CRYPTO_BLOWFISH is not set
740# CONFIG_CRYPTO_TWOFISH is not set 738# CONFIG_CRYPTO_TWOFISH is not set
741# CONFIG_CRYPTO_SERPENT is not set 739# CONFIG_CRYPTO_SERPENT is not set
742# CONFIG_CRYPTO_AES is not set 740# CONFIG_CRYPTO_AES is not set
743# CONFIG_CRYPTO_AES_S390 is not set
744# CONFIG_CRYPTO_CAST5 is not set 741# CONFIG_CRYPTO_CAST5 is not set
745# CONFIG_CRYPTO_CAST6 is not set 742# CONFIG_CRYPTO_CAST6 is not set
746# CONFIG_CRYPTO_TEA is not set 743# CONFIG_CRYPTO_TEA is not set
@@ -755,6 +752,11 @@ CONFIG_CRYPTO_CBC=y
755# 752#
756# Hardware crypto devices 753# Hardware crypto devices
757# 754#
755# CONFIG_CRYPTO_SHA1_S390 is not set
756# CONFIG_CRYPTO_SHA256_S390 is not set
757# CONFIG_CRYPTO_DES_S390 is not set
758# CONFIG_CRYPTO_AES_S390 is not set
759CONFIG_S390_PRNG=m
758 760
759# 761#
760# Library routines 762# Library routines
diff --git a/arch/s390/hypfs/Makefile b/arch/s390/hypfs/Makefile
index f4b00cd81f7c..b08d2abf6178 100644
--- a/arch/s390/hypfs/Makefile
+++ b/arch/s390/hypfs/Makefile
@@ -4,4 +4,4 @@
4 4
5obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o 5obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o
6 6
7s390_hypfs-objs := inode.o hypfs_diag.o 7s390_hypfs-objs := inode.o hypfs_diag.o hypfs_vm.o
diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h
index f3dbd91965c6..aea572009d60 100644
--- a/arch/s390/hypfs/hypfs.h
+++ b/arch/s390/hypfs/hypfs.h
@@ -27,4 +27,13 @@ extern struct dentry *hypfs_create_str(struct super_block *sb,
27 struct dentry *dir, const char *name, 27 struct dentry *dir, const char *name,
28 char *string); 28 char *string);
29 29
30/* LPAR Hypervisor */
31extern int hypfs_diag_init(void);
32extern void hypfs_diag_exit(void);
33extern int hypfs_diag_create_files(struct super_block *sb, struct dentry *root);
34
35/* VM Hypervisor */
36extern int hypfs_vm_init(void);
37extern int hypfs_vm_create_files(struct super_block *sb, struct dentry *root);
38
30#endif /* _HYPFS_H_ */ 39#endif /* _HYPFS_H_ */
diff --git a/arch/s390/hypfs/hypfs_diag.h b/arch/s390/hypfs/hypfs_diag.h
deleted file mode 100644
index 256b384aebe1..000000000000
--- a/arch/s390/hypfs/hypfs_diag.h
+++ /dev/null
@@ -1,16 +0,0 @@
1/*
2 * arch/s390/hypfs_diag.h
3 * Hypervisor filesystem for Linux on s390.
4 *
5 * Copyright (C) IBM Corp. 2006
6 * Author(s): Michael Holzheu <holzheu@de.ibm.com>
7 */
8
9#ifndef _HYPFS_DIAG_H_
10#define _HYPFS_DIAG_H_
11
12extern int hypfs_diag_init(void);
13extern void hypfs_diag_exit(void);
14extern int hypfs_diag_create_files(struct super_block *sb, struct dentry *root);
15
16#endif /* _HYPFS_DIAG_H_ */
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
new file mode 100644
index 000000000000..d01fc8f799f0
--- /dev/null
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -0,0 +1,231 @@
1/*
2 * Hypervisor filesystem for Linux on s390. z/VM implementation.
3 *
4 * Copyright (C) IBM Corp. 2006
5 * Author(s): Michael Holzheu <holzheu@de.ibm.com>
6 */
7
8#include <linux/types.h>
9#include <linux/errno.h>
10#include <linux/string.h>
11#include <linux/vmalloc.h>
12#include <asm/ebcdic.h>
13#include "hypfs.h"
14
15#define NAME_LEN 8
16
17static char local_guest[] = " ";
18static char all_guests[] = "* ";
19static char *guest_query;
20
21struct diag2fc_data {
22 __u32 version;
23 __u32 flags;
24 __u64 used_cpu;
25 __u64 el_time;
26 __u64 mem_min_kb;
27 __u64 mem_max_kb;
28 __u64 mem_share_kb;
29 __u64 mem_used_kb;
30 __u32 pcpus;
31 __u32 lcpus;
32 __u32 vcpus;
33 __u32 cpu_min;
34 __u32 cpu_max;
35 __u32 cpu_shares;
36 __u32 cpu_use_samp;
37 __u32 cpu_delay_samp;
38 __u32 page_wait_samp;
39 __u32 idle_samp;
40 __u32 other_samp;
41 __u32 total_samp;
42 char guest_name[NAME_LEN];
43};
44
45struct diag2fc_parm_list {
46 char userid[NAME_LEN];
47 char aci_grp[NAME_LEN];
48 __u64 addr;
49 __u32 size;
50 __u32 fmt;
51};
52
53static int diag2fc(int size, char* query, void *addr)
54{
55 unsigned long residual_cnt;
56 unsigned long rc;
57 struct diag2fc_parm_list parm_list;
58
59 memcpy(parm_list.userid, query, NAME_LEN);
60 ASCEBC(parm_list.userid, NAME_LEN);
61 parm_list.addr = (unsigned long) addr ;
62 parm_list.size = size;
63 parm_list.fmt = 0x02;
64 memset(parm_list.aci_grp, 0x40, NAME_LEN);
65 rc = -1;
66
67 asm volatile(
68 " diag %0,%1,0x2fc\n"
69 "0:\n"
70 EX_TABLE(0b,0b)
71 : "=d" (residual_cnt), "+d" (rc) : "0" (&parm_list) : "memory");
72
73 if ((rc != 0 ) && (rc != -2))
74 return rc;
75 else
76 return -residual_cnt;
77}
78
79static struct diag2fc_data *diag2fc_store(char *query, int *count)
80{
81 int size;
82 struct diag2fc_data *data;
83
84 do {
85 size = diag2fc(0, query, NULL);
86 if (size < 0)
87 return ERR_PTR(-EACCES);
88 data = vmalloc(size);
89 if (!data)
90 return ERR_PTR(-ENOMEM);
91 if (diag2fc(size, query, data) == 0)
92 break;
93 vfree(data);
94 } while (1);
95 *count = (size / sizeof(*data));
96
97 return data;
98}
99
100static void diag2fc_free(void *data)
101{
102 vfree(data);
103}
104
105#define ATTRIBUTE(sb, dir, name, member) \
106do { \
107 void *rc; \
108 rc = hypfs_create_u64(sb, dir, name, member); \
109 if (IS_ERR(rc)) \
110 return PTR_ERR(rc); \
111} while(0)
112
113static int hpyfs_vm_create_guest(struct super_block *sb,
114 struct dentry *systems_dir,
115 struct diag2fc_data *data)
116{
117 char guest_name[NAME_LEN + 1] = {};
118 struct dentry *guest_dir, *cpus_dir, *samples_dir, *mem_dir;
119 int dedicated_flag, capped_value;
120
121 capped_value = (data->flags & 0x00000006) >> 1;
122 dedicated_flag = (data->flags & 0x00000008) >> 3;
123
124 /* guest dir */
125 memcpy(guest_name, data->guest_name, NAME_LEN);
126 EBCASC(guest_name, NAME_LEN);
127 strstrip(guest_name);
128 guest_dir = hypfs_mkdir(sb, systems_dir, guest_name);
129 if (IS_ERR(guest_dir))
130 return PTR_ERR(guest_dir);
131 ATTRIBUTE(sb, guest_dir, "onlinetime_us", data->el_time);
132
133 /* logical cpu information */
134 cpus_dir = hypfs_mkdir(sb, guest_dir, "cpus");
135 if (IS_ERR(cpus_dir))
136 return PTR_ERR(cpus_dir);
137 ATTRIBUTE(sb, cpus_dir, "cputime_us", data->used_cpu);
138 ATTRIBUTE(sb, cpus_dir, "capped", capped_value);
139 ATTRIBUTE(sb, cpus_dir, "dedicated", dedicated_flag);
140 ATTRIBUTE(sb, cpus_dir, "count", data->vcpus);
141 ATTRIBUTE(sb, cpus_dir, "weight_min", data->cpu_min);
142 ATTRIBUTE(sb, cpus_dir, "weight_max", data->cpu_max);
143 ATTRIBUTE(sb, cpus_dir, "weight_cur", data->cpu_shares);
144
145 /* memory information */
146 mem_dir = hypfs_mkdir(sb, guest_dir, "mem");
147 if (IS_ERR(mem_dir))
148 return PTR_ERR(mem_dir);
149 ATTRIBUTE(sb, mem_dir, "min_KiB", data->mem_min_kb);
150 ATTRIBUTE(sb, mem_dir, "max_KiB", data->mem_max_kb);
151 ATTRIBUTE(sb, mem_dir, "used_KiB", data->mem_used_kb);
152 ATTRIBUTE(sb, mem_dir, "share_KiB", data->mem_share_kb);
153
154 /* samples */
155 samples_dir = hypfs_mkdir(sb, guest_dir, "samples");
156 if (IS_ERR(samples_dir))
157 return PTR_ERR(samples_dir);
158 ATTRIBUTE(sb, samples_dir, "cpu_using", data->cpu_use_samp);
159 ATTRIBUTE(sb, samples_dir, "cpu_delay", data->cpu_delay_samp);
160 ATTRIBUTE(sb, samples_dir, "mem_delay", data->page_wait_samp);
161 ATTRIBUTE(sb, samples_dir, "idle", data->idle_samp);
162 ATTRIBUTE(sb, samples_dir, "other", data->other_samp);
163 ATTRIBUTE(sb, samples_dir, "total", data->total_samp);
164 return 0;
165}
166
167int hypfs_vm_create_files(struct super_block *sb, struct dentry *root)
168{
169 struct dentry *dir, *file;
170 struct diag2fc_data *data;
171 int rc, i, count = 0;
172
173 data = diag2fc_store(guest_query, &count);
174 if (IS_ERR(data))
175 return PTR_ERR(data);
176
177 /* Hpervisor Info */
178 dir = hypfs_mkdir(sb, root, "hyp");
179 if (IS_ERR(dir)) {
180 rc = PTR_ERR(dir);
181 goto failed;
182 }
183 file = hypfs_create_str(sb, dir, "type", "z/VM Hypervisor");
184 if (IS_ERR(file)) {
185 rc = PTR_ERR(file);
186 goto failed;
187 }
188
189 /* physical cpus */
190 dir = hypfs_mkdir(sb, root, "cpus");
191 if (IS_ERR(dir)) {
192 rc = PTR_ERR(dir);
193 goto failed;
194 }
195 file = hypfs_create_u64(sb, dir, "count", data->lcpus);
196 if (IS_ERR(file)) {
197 rc = PTR_ERR(file);
198 goto failed;
199 }
200
201 /* guests */
202 dir = hypfs_mkdir(sb, root, "systems");
203 if (IS_ERR(dir)) {
204 rc = PTR_ERR(dir);
205 goto failed;
206 }
207
208 for (i = 0; i < count; i++) {
209 rc = hpyfs_vm_create_guest(sb, dir, &(data[i]));
210 if (rc)
211 goto failed;
212 }
213 diag2fc_free(data);
214 return 0;
215
216failed:
217 diag2fc_free(data);
218 return rc;
219}
220
221int hypfs_vm_init(void)
222{
223 if (diag2fc(0, all_guests, NULL) > 0)
224 guest_query = all_guests;
225 else if (diag2fc(0, local_guest, NULL) > 0)
226 guest_query = local_guest;
227 else
228 return -EACCES;
229
230 return 0;
231}
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index b6716c4b9934..a4fda7b53640 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -19,7 +19,6 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <asm/ebcdic.h> 20#include <asm/ebcdic.h>
21#include "hypfs.h" 21#include "hypfs.h"
22#include "hypfs_diag.h"
23 22
24#define HYPFS_MAGIC 0x687970 /* ASCII 'hyp' */ 23#define HYPFS_MAGIC 0x687970 /* ASCII 'hyp' */
25#define TMP_SIZE 64 /* size of temporary buffers */ 24#define TMP_SIZE 64 /* size of temporary buffers */
@@ -192,7 +191,10 @@ static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov,
192 goto out; 191 goto out;
193 } 192 }
194 hypfs_delete_tree(sb->s_root); 193 hypfs_delete_tree(sb->s_root);
195 rc = hypfs_diag_create_files(sb, sb->s_root); 194 if (MACHINE_IS_VM)
195 rc = hypfs_vm_create_files(sb, sb->s_root);
196 else
197 rc = hypfs_diag_create_files(sb, sb->s_root);
196 if (rc) { 198 if (rc) {
197 printk(KERN_ERR "hypfs: Update failed\n"); 199 printk(KERN_ERR "hypfs: Update failed\n");
198 hypfs_delete_tree(sb->s_root); 200 hypfs_delete_tree(sb->s_root);
@@ -289,7 +291,10 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
289 rc = -ENOMEM; 291 rc = -ENOMEM;
290 goto err_alloc; 292 goto err_alloc;
291 } 293 }
292 rc = hypfs_diag_create_files(sb, root_dentry); 294 if (MACHINE_IS_VM)
295 rc = hypfs_vm_create_files(sb, root_dentry);
296 else
297 rc = hypfs_diag_create_files(sb, root_dentry);
293 if (rc) 298 if (rc)
294 goto err_tree; 299 goto err_tree;
295 sbi->update_file = hypfs_create_update_file(sb, root_dentry); 300 sbi->update_file = hypfs_create_update_file(sb, root_dentry);
@@ -462,11 +467,15 @@ static int __init hypfs_init(void)
462{ 467{
463 int rc; 468 int rc;
464 469
465 if (MACHINE_IS_VM) 470 if (MACHINE_IS_VM) {
466 return -ENODATA; 471 if (hypfs_vm_init())
467 if (hypfs_diag_init()) { 472 /* no diag 2fc, just exit */
468 rc = -ENODATA; 473 return -ENODATA;
469 goto fail_diag; 474 } else {
475 if (hypfs_diag_init()) {
476 rc = -ENODATA;
477 goto fail_diag;
478 }
470 } 479 }
471 kset_set_kset_s(&s390_subsys, hypervisor_subsys); 480 kset_set_kset_s(&s390_subsys, hypervisor_subsys);
472 rc = subsystem_register(&s390_subsys); 481 rc = subsystem_register(&s390_subsys);
@@ -480,7 +489,8 @@ static int __init hypfs_init(void)
480fail_filesystem: 489fail_filesystem:
481 subsystem_unregister(&s390_subsys); 490 subsystem_unregister(&s390_subsys);
482fail_sysfs: 491fail_sysfs:
483 hypfs_diag_exit(); 492 if (!MACHINE_IS_VM)
493 hypfs_diag_exit();
484fail_diag: 494fail_diag:
485 printk(KERN_ERR "hypfs: Initialization failed with rc = %i.\n", rc); 495 printk(KERN_ERR "hypfs: Initialization failed with rc = %i.\n", rc);
486 return rc; 496 return rc;
@@ -488,7 +498,8 @@ fail_diag:
488 498
489static void __exit hypfs_exit(void) 499static void __exit hypfs_exit(void)
490{ 500{
491 hypfs_diag_exit(); 501 if (!MACHINE_IS_VM)
502 hypfs_diag_exit();
492 unregister_filesystem(&hypfs_type); 503 unregister_filesystem(&hypfs_type);
493 subsystem_unregister(&s390_subsys); 504 subsystem_unregister(&s390_subsys);
494} 505}
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index a81881c9b297..5492d25d7d69 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -4,9 +4,9 @@
4 4
5EXTRA_AFLAGS := -traditional 5EXTRA_AFLAGS := -traditional
6 6
7obj-y := bitmap.o traps.o time.o process.o reset.o \ 7obj-y := bitmap.o traps.o time.o process.o base.o early.o \
8 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ 8 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
9 semaphore.o s390_ext.o debug.o profile.o irq.o ipl.o 9 semaphore.o s390_ext.o debug.o irq.o ipl.o
10 10
11obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 11obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
12obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) 12obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
new file mode 100644
index 000000000000..dc7e5259770f
--- /dev/null
+++ b/arch/s390/kernel/base.S
@@ -0,0 +1,150 @@
1/*
2 * arch/s390/kernel/base.S
3 *
4 * Copyright IBM Corp. 2006,2007
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 * Michael Holzheu <holzheu@de.ibm.com>
7 */
8
9#include <asm/ptrace.h>
10#include <asm/lowcore.h>
11
12#ifdef CONFIG_64BIT
13
14 .globl s390_base_mcck_handler
15s390_base_mcck_handler:
16 basr %r13,0
170: lg %r15,__LC_PANIC_STACK # load panic stack
18 aghi %r15,-STACK_FRAME_OVERHEAD
19 larl %r1,s390_base_mcck_handler_fn
20 lg %r1,0(%r1)
21 ltgr %r1,%r1
22 jz 1f
23 basr %r14,%r1
241: la %r1,4095
25 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
26 lpswe __LC_MCK_OLD_PSW
27
28 .section .bss
29 .globl s390_base_mcck_handler_fn
30s390_base_mcck_handler_fn:
31 .quad 0
32 .previous
33
34 .globl s390_base_ext_handler
35s390_base_ext_handler:
36 stmg %r0,%r15,__LC_SAVE_AREA
37 basr %r13,0
380: aghi %r15,-STACK_FRAME_OVERHEAD
39 larl %r1,s390_base_ext_handler_fn
40 lg %r1,0(%r1)
41 ltgr %r1,%r1
42 jz 1f
43 basr %r14,%r1
441: lmg %r0,%r15,__LC_SAVE_AREA
45 ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
46 lpswe __LC_EXT_OLD_PSW
47
48 .section .bss
49 .globl s390_base_ext_handler_fn
50s390_base_ext_handler_fn:
51 .quad 0
52 .previous
53
54 .globl s390_base_pgm_handler
55s390_base_pgm_handler:
56 stmg %r0,%r15,__LC_SAVE_AREA
57 basr %r13,0
580: aghi %r15,-STACK_FRAME_OVERHEAD
59 larl %r1,s390_base_pgm_handler_fn
60 lg %r1,0(%r1)
61 ltgr %r1,%r1
62 jz 1f
63 basr %r14,%r1
64 lmg %r0,%r15,__LC_SAVE_AREA
65 lpswe __LC_PGM_OLD_PSW
661: lpswe disabled_wait_psw-0b(%r13)
67
68 .align 8
69disabled_wait_psw:
70 .quad 0x0002000180000000,0x0000000000000000 + s390_base_pgm_handler
71
72 .section .bss
73 .globl s390_base_pgm_handler_fn
74s390_base_pgm_handler_fn:
75 .quad 0
76 .previous
77
78#else /* CONFIG_64BIT */
79
80 .globl s390_base_mcck_handler
81s390_base_mcck_handler:
82 basr %r13,0
830: l %r15,__LC_PANIC_STACK # load panic stack
84 ahi %r15,-STACK_FRAME_OVERHEAD
85 l %r1,2f-0b(%r13)
86 l %r1,0(%r1)
87 ltr %r1,%r1
88 jz 1f
89 basr %r14,%r1
901: lm %r0,%r15,__LC_GPREGS_SAVE_AREA
91 lpsw __LC_MCK_OLD_PSW
92
932: .long s390_base_mcck_handler_fn
94
95 .section .bss
96 .globl s390_base_mcck_handler_fn
97s390_base_mcck_handler_fn:
98 .long 0
99 .previous
100
101 .globl s390_base_ext_handler
102s390_base_ext_handler:
103 stm %r0,%r15,__LC_SAVE_AREA
104 basr %r13,0
1050: ahi %r15,-STACK_FRAME_OVERHEAD
106 l %r1,2f-0b(%r13)
107 l %r1,0(%r1)
108 ltr %r1,%r1
109 jz 1f
110 basr %r14,%r1
1111: lm %r0,%r15,__LC_SAVE_AREA
112 ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
113 lpsw __LC_EXT_OLD_PSW
114
1152: .long s390_base_ext_handler_fn
116
117 .section .bss
118 .globl s390_base_ext_handler_fn
119s390_base_ext_handler_fn:
120 .long 0
121 .previous
122
123 .globl s390_base_pgm_handler
124s390_base_pgm_handler:
125 stm %r0,%r15,__LC_SAVE_AREA
126 basr %r13,0
1270: ahi %r15,-STACK_FRAME_OVERHEAD
128 l %r1,2f-0b(%r13)
129 l %r1,0(%r1)
130 ltr %r1,%r1
131 jz 1f
132 basr %r14,%r1
133 lm %r0,%r15,__LC_SAVE_AREA
134 lpsw __LC_PGM_OLD_PSW
135
1361: lpsw disabled_wait_psw-0b(%r13)
137
1382: .long s390_base_pgm_handler_fn
139
140disabled_wait_psw:
141 .align 8
142 .long 0x000a0000,0x00000000 + s390_base_pgm_handler
143
144 .section .bss
145 .globl s390_base_pgm_handler_fn
146s390_base_pgm_handler_fn:
147 .long 0
148 .previous
149
150#endif /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/binfmt_elf32.c b/arch/s390/kernel/binfmt_elf32.c
index 5c46054195cb..f1e40ca00d8d 100644
--- a/arch/s390/kernel/binfmt_elf32.c
+++ b/arch/s390/kernel/binfmt_elf32.c
@@ -192,7 +192,7 @@ MODULE_AUTHOR("Gerhard Tonn <ton@de.ibm.com>");
192 192
193#undef cputime_to_timeval 193#undef cputime_to_timeval
194#define cputime_to_timeval cputime_to_compat_timeval 194#define cputime_to_timeval cputime_to_compat_timeval
195static __inline__ void 195static inline void
196cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value) 196cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
197{ 197{
198 value->tv_usec = cputime % 1000000; 198 value->tv_usec = cputime % 1000000;
diff --git a/arch/s390/kernel/compat_exec_domain.c b/arch/s390/kernel/compat_exec_domain.c
index 71d27c493568..914d49444f92 100644
--- a/arch/s390/kernel/compat_exec_domain.c
+++ b/arch/s390/kernel/compat_exec_domain.c
@@ -12,10 +12,9 @@
12#include <linux/personality.h> 12#include <linux/personality.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14 14
15struct exec_domain s390_exec_domain; 15static struct exec_domain s390_exec_domain;
16 16
17static int __init 17static int __init s390_init (void)
18s390_init (void)
19{ 18{
20 s390_exec_domain.name = "Linux/s390"; 19 s390_exec_domain.name = "Linux/s390";
21 s390_exec_domain.handler = NULL; 20 s390_exec_domain.handler = NULL;
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 5b33f823863a..666bb6daa148 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -69,6 +69,12 @@
69 69
70#include "compat_linux.h" 70#include "compat_linux.h"
71 71
72long psw_user32_bits = (PSW_BASE32_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
73 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
74 PSW_MASK_PSTATE | PSW_DEFAULT_KEY);
75long psw32_user_bits = (PSW32_BASE_BITS | PSW32_MASK_DAT | PSW32_ASC_HOME |
76 PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK |
77 PSW32_MASK_PSTATE);
72 78
73/* For this source file, we want overflow handling. */ 79/* For this source file, we want overflow handling. */
74 80
@@ -416,7 +422,7 @@ asmlinkage long sys32_sysinfo(struct sysinfo32 __user *info)
416 mm_segment_t old_fs = get_fs (); 422 mm_segment_t old_fs = get_fs ();
417 423
418 set_fs (KERNEL_DS); 424 set_fs (KERNEL_DS);
419 ret = sys_sysinfo((struct sysinfo __user *) &s); 425 ret = sys_sysinfo((struct sysinfo __force __user *) &s);
420 set_fs (old_fs); 426 set_fs (old_fs);
421 err = put_user (s.uptime, &info->uptime); 427 err = put_user (s.uptime, &info->uptime);
422 err |= __put_user (s.loads[0], &info->loads[0]); 428 err |= __put_user (s.loads[0], &info->loads[0]);
@@ -445,7 +451,8 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
445 mm_segment_t old_fs = get_fs (); 451 mm_segment_t old_fs = get_fs ();
446 452
447 set_fs (KERNEL_DS); 453 set_fs (KERNEL_DS);
448 ret = sys_sched_rr_get_interval(pid, (struct timespec __user *) &t); 454 ret = sys_sched_rr_get_interval(pid,
455 (struct timespec __force __user *) &t);
449 set_fs (old_fs); 456 set_fs (old_fs);
450 if (put_compat_timespec(&t, interval)) 457 if (put_compat_timespec(&t, interval))
451 return -EFAULT; 458 return -EFAULT;
@@ -472,8 +479,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
472 } 479 }
473 set_fs (KERNEL_DS); 480 set_fs (KERNEL_DS);
474 ret = sys_rt_sigprocmask(how, 481 ret = sys_rt_sigprocmask(how,
475 set ? (sigset_t __user *) &s : NULL, 482 set ? (sigset_t __force __user *) &s : NULL,
476 oset ? (sigset_t __user *) &s : NULL, 483 oset ? (sigset_t __force __user *) &s : NULL,
477 sigsetsize); 484 sigsetsize);
478 set_fs (old_fs); 485 set_fs (old_fs);
479 if (ret) return ret; 486 if (ret) return ret;
@@ -499,7 +506,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
499 mm_segment_t old_fs = get_fs(); 506 mm_segment_t old_fs = get_fs();
500 507
501 set_fs (KERNEL_DS); 508 set_fs (KERNEL_DS);
502 ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize); 509 ret = sys_rt_sigpending((sigset_t __force __user *) &s, sigsetsize);
503 set_fs (old_fs); 510 set_fs (old_fs);
504 if (!ret) { 511 if (!ret) {
505 switch (_NSIG_WORDS) { 512 switch (_NSIG_WORDS) {
@@ -524,7 +531,7 @@ sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo)
524 if (copy_siginfo_from_user32(&info, uinfo)) 531 if (copy_siginfo_from_user32(&info, uinfo))
525 return -EFAULT; 532 return -EFAULT;
526 set_fs (KERNEL_DS); 533 set_fs (KERNEL_DS);
527 ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *) &info); 534 ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force __user *) &info);
528 set_fs (old_fs); 535 set_fs (old_fs);
529 return ret; 536 return ret;
530} 537}
@@ -682,7 +689,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offse
682 689
683 set_fs(KERNEL_DS); 690 set_fs(KERNEL_DS);
684 ret = sys_sendfile(out_fd, in_fd, 691 ret = sys_sendfile(out_fd, in_fd,
685 offset ? (off_t __user *) &of : NULL, count); 692 offset ? (off_t __force __user *) &of : NULL, count);
686 set_fs(old_fs); 693 set_fs(old_fs);
687 694
688 if (offset && put_user(of, offset)) 695 if (offset && put_user(of, offset))
@@ -703,7 +710,8 @@ asmlinkage long sys32_sendfile64(int out_fd, int in_fd,
703 710
704 set_fs(KERNEL_DS); 711 set_fs(KERNEL_DS);
705 ret = sys_sendfile64(out_fd, in_fd, 712 ret = sys_sendfile64(out_fd, in_fd,
706 offset ? (loff_t __user *) &lof : NULL, count); 713 offset ? (loff_t __force __user *) &lof : NULL,
714 count);
707 set_fs(old_fs); 715 set_fs(old_fs);
708 716
709 if (offset && put_user(lof, offset)) 717 if (offset && put_user(lof, offset))
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h
index 1a18e29668ef..e89f8c0c42a0 100644
--- a/arch/s390/kernel/compat_linux.h
+++ b/arch/s390/kernel/compat_linux.h
@@ -115,37 +115,6 @@ typedef struct
115 __u32 addr; 115 __u32 addr;
116} _psw_t32 __attribute__ ((aligned(8))); 116} _psw_t32 __attribute__ ((aligned(8)));
117 117
118#define PSW32_MASK_PER 0x40000000UL
119#define PSW32_MASK_DAT 0x04000000UL
120#define PSW32_MASK_IO 0x02000000UL
121#define PSW32_MASK_EXT 0x01000000UL
122#define PSW32_MASK_KEY 0x00F00000UL
123#define PSW32_MASK_MCHECK 0x00040000UL
124#define PSW32_MASK_WAIT 0x00020000UL
125#define PSW32_MASK_PSTATE 0x00010000UL
126#define PSW32_MASK_ASC 0x0000C000UL
127#define PSW32_MASK_CC 0x00003000UL
128#define PSW32_MASK_PM 0x00000f00UL
129
130#define PSW32_ADDR_AMODE31 0x80000000UL
131#define PSW32_ADDR_INSN 0x7FFFFFFFUL
132
133#define PSW32_BASE_BITS 0x00080000UL
134
135#define PSW32_ASC_PRIMARY 0x00000000UL
136#define PSW32_ASC_ACCREG 0x00004000UL
137#define PSW32_ASC_SECONDARY 0x00008000UL
138#define PSW32_ASC_HOME 0x0000C000UL
139
140#define PSW32_USER_BITS (PSW32_BASE_BITS | PSW32_MASK_DAT | PSW32_ASC_HOME | \
141 PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK | \
142 PSW32_MASK_PSTATE)
143
144#define PSW32_MASK_MERGE(CURRENT,NEW) \
145 (((CURRENT) & ~(PSW32_MASK_CC|PSW32_MASK_PM)) | \
146 ((NEW) & (PSW32_MASK_CC|PSW32_MASK_PM)))
147
148
149typedef struct 118typedef struct
150{ 119{
151 _psw_t32 psw; 120 _psw_t32 psw;
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 861888ab8c13..887a9881d0d0 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -275,8 +275,8 @@ sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss,
275 } 275 }
276 276
277 set_fs (KERNEL_DS); 277 set_fs (KERNEL_DS);
278 ret = do_sigaltstack((stack_t __user *) (uss ? &kss : NULL), 278 ret = do_sigaltstack((stack_t __force __user *) (uss ? &kss : NULL),
279 (stack_t __user *) (uoss ? &koss : NULL), 279 (stack_t __force __user *) (uoss ? &koss : NULL),
280 regs->gprs[15]); 280 regs->gprs[15]);
281 set_fs (old_fs); 281 set_fs (old_fs);
282 282
@@ -298,7 +298,7 @@ static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
298 _s390_regs_common32 regs32; 298 _s390_regs_common32 regs32;
299 int err, i; 299 int err, i;
300 300
301 regs32.psw.mask = PSW32_MASK_MERGE(PSW32_USER_BITS, 301 regs32.psw.mask = PSW32_MASK_MERGE(psw32_user_bits,
302 (__u32)(regs->psw.mask >> 32)); 302 (__u32)(regs->psw.mask >> 32));
303 regs32.psw.addr = PSW32_ADDR_AMODE31 | (__u32) regs->psw.addr; 303 regs32.psw.addr = PSW32_ADDR_AMODE31 | (__u32) regs->psw.addr;
304 for (i = 0; i < NUM_GPRS; i++) 304 for (i = 0; i < NUM_GPRS; i++)
@@ -401,7 +401,7 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
401 goto badframe; 401 goto badframe;
402 402
403 set_fs (KERNEL_DS); 403 set_fs (KERNEL_DS);
404 do_sigaltstack((stack_t __user *)&st, NULL, regs->gprs[15]); 404 do_sigaltstack((stack_t __force __user *)&st, NULL, regs->gprs[15]);
405 set_fs (old_fs); 405 set_fs (old_fs);
406 406
407 return regs->gprs[2]; 407 return regs->gprs[2];
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index a5972f1541fe..6c89f30c8e31 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -16,6 +16,7 @@
16#include <asm/ebcdic.h> 16#include <asm/ebcdic.h>
17#include <asm/cpcmd.h> 17#include <asm/cpcmd.h>
18#include <asm/system.h> 18#include <asm/system.h>
19#include <asm/io.h>
19 20
20static DEFINE_SPINLOCK(cpcmd_lock); 21static DEFINE_SPINLOCK(cpcmd_lock);
21static char cpcmd_buf[241]; 22static char cpcmd_buf[241];
@@ -88,13 +89,8 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
88 int len; 89 int len;
89 unsigned long flags; 90 unsigned long flags;
90 91
91 if ((rlen == 0) || (response == NULL) 92 if ((virt_to_phys(response) != (unsigned long) response) ||
92 || !((unsigned long)response >> 31)) { 93 (((unsigned long)response + rlen) >> 31)) {
93 spin_lock_irqsave(&cpcmd_lock, flags);
94 len = __cpcmd(cmd, response, rlen, response_code);
95 spin_unlock_irqrestore(&cpcmd_lock, flags);
96 }
97 else {
98 lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA); 94 lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA);
99 if (!lowbuf) { 95 if (!lowbuf) {
100 printk(KERN_WARNING 96 printk(KERN_WARNING
@@ -106,6 +102,10 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
106 spin_unlock_irqrestore(&cpcmd_lock, flags); 102 spin_unlock_irqrestore(&cpcmd_lock, flags);
107 memcpy(response, lowbuf, rlen); 103 memcpy(response, lowbuf, rlen);
108 kfree(lowbuf); 104 kfree(lowbuf);
105 } else {
106 spin_lock_irqsave(&cpcmd_lock, flags);
107 len = __cpcmd(cmd, response, rlen, response_code);
108 spin_unlock_irqrestore(&cpcmd_lock, flags);
109 } 109 }
110 return len; 110 return len;
111} 111}
diff --git a/arch/s390/kernel/crash.c b/arch/s390/kernel/crash.c
index 926cceeae0fa..8cc7c9fa64f5 100644
--- a/arch/s390/kernel/crash.c
+++ b/arch/s390/kernel/crash.c
@@ -9,6 +9,7 @@
9 9
10#include <linux/threads.h> 10#include <linux/threads.h>
11#include <linux/kexec.h> 11#include <linux/kexec.h>
12#include <linux/reboot.h>
12 13
13void machine_crash_shutdown(struct pt_regs *regs) 14void machine_crash_shutdown(struct pt_regs *regs)
14{ 15{
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index bb57bc0e3fc8..f4b62df02aa2 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -120,7 +120,7 @@ struct debug_view debug_hex_ascii_view = {
120 NULL 120 NULL
121}; 121};
122 122
123struct debug_view debug_level_view = { 123static struct debug_view debug_level_view = {
124 "level", 124 "level",
125 &debug_prolog_level_fn, 125 &debug_prolog_level_fn,
126 NULL, 126 NULL,
@@ -129,7 +129,7 @@ struct debug_view debug_level_view = {
129 NULL 129 NULL
130}; 130};
131 131
132struct debug_view debug_pages_view = { 132static struct debug_view debug_pages_view = {
133 "pages", 133 "pages",
134 &debug_prolog_pages_fn, 134 &debug_prolog_pages_fn,
135 NULL, 135 NULL,
@@ -138,7 +138,7 @@ struct debug_view debug_pages_view = {
138 NULL 138 NULL
139}; 139};
140 140
141struct debug_view debug_flush_view = { 141static struct debug_view debug_flush_view = {
142 "flush", 142 "flush",
143 NULL, 143 NULL,
144 NULL, 144 NULL,
@@ -156,14 +156,14 @@ struct debug_view debug_sprintf_view = {
156 NULL 156 NULL
157}; 157};
158 158
159 159/* used by dump analysis tools to determine version of debug feature */
160unsigned int debug_feature_version = __DEBUG_FEATURE_VERSION; 160unsigned int debug_feature_version = __DEBUG_FEATURE_VERSION;
161 161
162/* static globals */ 162/* static globals */
163 163
164static debug_info_t *debug_area_first = NULL; 164static debug_info_t *debug_area_first = NULL;
165static debug_info_t *debug_area_last = NULL; 165static debug_info_t *debug_area_last = NULL;
166DECLARE_MUTEX(debug_lock); 166static DECLARE_MUTEX(debug_lock);
167 167
168static int initialized; 168static int initialized;
169 169
@@ -905,7 +905,7 @@ static struct ctl_table s390dbf_dir_table[] = {
905 { .ctl_name = 0 } 905 { .ctl_name = 0 }
906}; 906};
907 907
908struct ctl_table_header *s390dbf_sysctl_header; 908static struct ctl_table_header *s390dbf_sysctl_header;
909 909
910void 910void
911debug_stop_all(void) 911debug_stop_all(void)
@@ -1300,8 +1300,7 @@ out:
1300 * flushes debug areas 1300 * flushes debug areas
1301 */ 1301 */
1302 1302
1303void 1303static void debug_flush(debug_info_t* id, int area)
1304debug_flush(debug_info_t* id, int area)
1305{ 1304{
1306 unsigned long flags; 1305 unsigned long flags;
1307 int i,j; 1306 int i,j;
@@ -1511,8 +1510,7 @@ out:
1511/* 1510/*
1512 * clean up module 1511 * clean up module
1513 */ 1512 */
1514void 1513static void __exit debug_exit(void)
1515__exit debug_exit(void)
1516{ 1514{
1517 debugfs_remove(debug_debugfs_root_entry); 1515 debugfs_remove(debug_debugfs_root_entry);
1518 unregister_sysctl_table(s390dbf_sysctl_header); 1516 unregister_sysctl_table(s390dbf_sysctl_header);
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
new file mode 100644
index 000000000000..e518dd53eff5
--- /dev/null
+++ b/arch/s390/kernel/early.c
@@ -0,0 +1,306 @@
1/*
2 * arch/s390/kernel/early.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Hongjie Yang <hongjie@us.ibm.com>,
6 * Heiko Carstens <heiko.carstens@de.ibm.com>
7 */
8
9#include <linux/init.h>
10#include <linux/errno.h>
11#include <linux/string.h>
12#include <linux/ctype.h>
13#include <linux/lockdep.h>
14#include <linux/module.h>
15#include <linux/pfn.h>
16#include <linux/uaccess.h>
17#include <asm/lowcore.h>
18#include <asm/processor.h>
19#include <asm/sections.h>
20#include <asm/setup.h>
21#include <asm/cpcmd.h>
22#include <asm/sclp.h>
23
24/*
25 * Create a Kernel NSS if the SAVESYS= parameter is defined
26 */
27#define DEFSYS_CMD_SIZE 96
28#define SAVESYS_CMD_SIZE 32
29
30char kernel_nss_name[NSS_NAME_SIZE + 1];
31
32#ifdef CONFIG_SHARED_KERNEL
33static noinline __init void create_kernel_nss(void)
34{
35 unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size;
36#ifdef CONFIG_BLK_DEV_INITRD
37 unsigned int sinitrd_pfn, einitrd_pfn;
38#endif
39 int response;
40 char *savesys_ptr;
41 char upper_command_line[COMMAND_LINE_SIZE];
42 char defsys_cmd[DEFSYS_CMD_SIZE];
43 char savesys_cmd[SAVESYS_CMD_SIZE];
44
45 /* Do nothing if we are not running under VM */
46 if (!MACHINE_IS_VM)
47 return;
48
49 /* Convert COMMAND_LINE to upper case */
50 for (i = 0; i < strlen(COMMAND_LINE); i++)
51 upper_command_line[i] = toupper(COMMAND_LINE[i]);
52
53 savesys_ptr = strstr(upper_command_line, "SAVESYS=");
54
55 if (!savesys_ptr)
56 return;
57
58 savesys_ptr += 8; /* Point to the beginning of the NSS name */
59 for (i = 0; i < NSS_NAME_SIZE; i++) {
60 if (savesys_ptr[i] == ' ' || savesys_ptr[i] == '\0')
61 break;
62 kernel_nss_name[i] = savesys_ptr[i];
63 }
64
65 stext_pfn = PFN_DOWN(__pa(&_stext));
66 eshared_pfn = PFN_DOWN(__pa(&_eshared));
67 end_pfn = PFN_UP(__pa(&_end));
68 min_size = end_pfn << 2;
69
70 sprintf(defsys_cmd, "DEFSYS %s 00000-%.5X EW %.5X-%.5X SR %.5X-%.5X",
71 kernel_nss_name, stext_pfn - 1, stext_pfn, eshared_pfn - 1,
72 eshared_pfn, end_pfn);
73
74#ifdef CONFIG_BLK_DEV_INITRD
75 if (INITRD_START && INITRD_SIZE) {
76 sinitrd_pfn = PFN_DOWN(__pa(INITRD_START));
77 einitrd_pfn = PFN_UP(__pa(INITRD_START + INITRD_SIZE));
78 min_size = einitrd_pfn << 2;
79 sprintf(defsys_cmd, "%s EW %.5X-%.5X", defsys_cmd,
80 sinitrd_pfn, einitrd_pfn);
81 }
82#endif
83
84 sprintf(defsys_cmd, "%s EW MINSIZE=%.7iK", defsys_cmd, min_size);
85 sprintf(savesys_cmd, "SAVESYS %s \n IPL %s",
86 kernel_nss_name, kernel_nss_name);
87
88 __cpcmd(defsys_cmd, NULL, 0, &response);
89
90 if (response != 0)
91 return;
92
93 __cpcmd(savesys_cmd, NULL, 0, &response);
94
95 if (response != strlen(savesys_cmd))
96 return;
97
98 ipl_flags = IPL_NSS_VALID;
99}
100
101#else /* CONFIG_SHARED_KERNEL */
102
103static inline void create_kernel_nss(void) { }
104
105#endif /* CONFIG_SHARED_KERNEL */
106
107/*
108 * Clear bss memory
109 */
110static noinline __init void clear_bss_section(void)
111{
112 memset(__bss_start, 0, _end - __bss_start);
113}
114
115/*
116 * Initialize storage key for kernel pages
117 */
118static noinline __init void init_kernel_storage_key(void)
119{
120 unsigned long end_pfn, init_pfn;
121
122 end_pfn = PFN_UP(__pa(&_end));
123
124 for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
125 page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY);
126}
127
128static noinline __init void detect_machine_type(void)
129{
130 struct cpuinfo_S390 *cpuinfo = &S390_lowcore.cpu_data;
131
132 asm volatile("stidp %0" : "=m" (S390_lowcore.cpu_data.cpu_id));
133
134 /* Running under z/VM ? */
135 if (cpuinfo->cpu_id.version == 0xff)
136 machine_flags |= 1;
137
138 /* Running on a P/390 ? */
139 if (cpuinfo->cpu_id.machine == 0x7490)
140 machine_flags |= 4;
141}
142
143static noinline __init int memory_fast_detect(void)
144{
145
146 unsigned long val0 = 0;
147 unsigned long val1 = 0xc;
148 int ret = -ENOSYS;
149
150 if (ipl_flags & IPL_NSS_VALID)
151 return -ENOSYS;
152
153 asm volatile(
154 " diag %1,%2,0x260\n"
155 "0: lhi %0,0\n"
156 "1:\n"
157 EX_TABLE(0b,1b)
158 : "+d" (ret), "+d" (val0), "+d" (val1) : : "cc");
159
160 if (ret || val0 != val1)
161 return -ENOSYS;
162
163 memory_chunk[0].size = val0;
164 return 0;
165}
166
167#define ADDR2G (1UL << 31)
168
169static noinline __init unsigned long sclp_memory_detect(void)
170{
171 struct sclp_readinfo_sccb *sccb;
172 unsigned long long memsize;
173
174 sccb = &s390_readinfo_sccb;
175
176 if (sccb->header.response_code != 0x10)
177 return 0;
178
179 if (sccb->rnsize)
180 memsize = sccb->rnsize << 20;
181 else
182 memsize = sccb->rnsize2 << 20;
183 if (sccb->rnmax)
184 memsize *= sccb->rnmax;
185 else
186 memsize *= sccb->rnmax2;
187#ifndef CONFIG_64BIT
188 /*
189 * Can't deal with more than 2G in 31 bit addressing mode, so
190 * limit the value in order to avoid strange side effects.
191 */
192 if (memsize > ADDR2G)
193 memsize = ADDR2G;
194#endif
195 return (unsigned long) memsize;
196}
197
198static inline __init unsigned long __tprot(unsigned long addr)
199{
200 int cc = -1;
201
202 asm volatile(
203 " tprot 0(%1),0\n"
204 "0: ipm %0\n"
205 " srl %0,28\n"
206 "1:\n"
207 EX_TABLE(0b,1b)
208 : "+d" (cc) : "a" (addr) : "cc");
209 return (unsigned long)cc;
210}
211
212/* Checking memory in 128KB increments. */
213#define CHUNK_INCR (1UL << 17)
214
215static noinline __init void find_memory_chunks(unsigned long memsize)
216{
217 unsigned long addr = 0, old_addr = 0;
218 unsigned long old_cc = CHUNK_READ_WRITE;
219 unsigned long cc;
220 int chunk = 0;
221
222 while (chunk < MEMORY_CHUNKS) {
223 cc = __tprot(addr);
224 while (cc == old_cc) {
225 addr += CHUNK_INCR;
226 cc = __tprot(addr);
227#ifndef CONFIG_64BIT
228 if (addr == ADDR2G)
229 break;
230#endif
231 }
232
233 if (old_addr != addr &&
234 (old_cc == CHUNK_READ_WRITE || old_cc == CHUNK_READ_ONLY)) {
235 memory_chunk[chunk].addr = old_addr;
236 memory_chunk[chunk].size = addr - old_addr;
237 memory_chunk[chunk].type = old_cc;
238 chunk++;
239 }
240
241 old_addr = addr;
242 old_cc = cc;
243
244#ifndef CONFIG_64BIT
245 if (addr == ADDR2G)
246 break;
247#endif
248 /*
249 * Finish memory detection at the first hole, unless
250 * - we reached the hsa -> skip it.
251 * - we know there must be more.
252 */
253 if (cc == -1UL && !memsize && old_addr != ADDR2G)
254 break;
255 if (memsize && addr >= memsize)
256 break;
257 }
258}
259
260static __init void early_pgm_check_handler(void)
261{
262 unsigned long addr;
263 const struct exception_table_entry *fixup;
264
265 addr = S390_lowcore.program_old_psw.addr;
266 fixup = search_exception_tables(addr & PSW_ADDR_INSN);
267 if (!fixup)
268 disabled_wait(0);
269 S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE;
270}
271
272static noinline __init void setup_lowcore_early(void)
273{
274 psw_t psw;
275
276 psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
277 psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler;
278 S390_lowcore.external_new_psw = psw;
279 psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
280 S390_lowcore.program_new_psw = psw;
281 s390_base_pgm_handler_fn = early_pgm_check_handler;
282}
283
284/*
285 * Save ipl parameters, clear bss memory, initialize storage keys
286 * and create a kernel NSS at startup if the SAVESYS= parm is defined
287 */
288void __init startup_init(void)
289{
290 unsigned long memsize;
291
292 ipl_save_parameters();
293 clear_bss_section();
294 init_kernel_storage_key();
295 lockdep_init();
296 lockdep_off();
297 detect_machine_type();
298 create_kernel_nss();
299 sort_main_extable();
300 setup_lowcore_early();
301 sclp_readinfo_early();
302 memsize = sclp_memory_detect();
303 if (memory_fast_detect() < 0)
304 find_memory_chunks(memsize);
305 lockdep_on();
306}
diff --git a/arch/s390/kernel/ebcdic.c b/arch/s390/kernel/ebcdic.c
index bb0f973137f0..cc0dc609d738 100644
--- a/arch/s390/kernel/ebcdic.c
+++ b/arch/s390/kernel/ebcdic.c
@@ -11,6 +11,7 @@
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <asm/types.h> 13#include <asm/types.h>
14#include <asm/ebcdic.h>
14 15
15/* 16/*
16 * ASCII (IBM PC 437) -> EBCDIC 037 17 * ASCII (IBM PC 437) -> EBCDIC 037
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index eca507050e47..453fd3b4edea 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -51,176 +51,15 @@ startup_continue:
51 st %r15,__LC_KERNEL_STACK # set end of kernel stack 51 st %r15,__LC_KERNEL_STACK # set end of kernel stack
52 ahi %r15,-96 52 ahi %r15,-96
53 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain 53 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
54
55 l %r14,.Lipl_save_parameters-.LPG1(%r13)
56 basr %r14,%r14
57# 54#
58# clear bss memory 55# Save ipl parameters, clear bss memory, initialize storage key for kernel pages,
56# and create a kernel NSS if the SAVESYS= parm is defined
59# 57#
60 l %r2,.Lbss_bgn-.LPG1(%r13) # start of bss 58 l %r14,.Lstartup_init-.LPG1(%r13)
61 l %r3,.Lbss_end-.LPG1(%r13) # end of bss 59 basr %r14,%r14
62 sr %r3,%r2 # length of bss
63 sr %r4,%r4
64 sr %r5,%r5 # set src,length and pad to zero
65 sr %r0,%r0
66 mvcle %r2,%r4,0 # clear mem
67 jo .-4 # branch back, if not finish
68
69 l %r2,.Lrcp-.LPG1(%r13) # Read SCP forced command word
70.Lservicecall:
71 stosm .Lpmask-.LPG1(%r13),0x01 # authorize ext interrupts
72
73 stctl %r0, %r0,.Lcr-.LPG1(%r13) # get cr0
74 la %r1,0x200 # set bit 22
75 o %r1,.Lcr-.LPG1(%r13) # or old cr0 with r1
76 st %r1,.Lcr-.LPG1(%r13)
77 lctl %r0, %r0,.Lcr-.LPG1(%r13) # load modified cr0
78
79 mvc __LC_EXT_NEW_PSW(8),.Lpcext-.LPG1(%r13) # set postcall psw
80 la %r1, .Lsclph-.LPG1(%r13)
81 a %r1,__LC_EXT_NEW_PSW+4 # set handler
82 st %r1,__LC_EXT_NEW_PSW+4
83
84 l %r4,.Lsccbaddr-.LPG1(%r13) # %r4 is our index for sccb stuff
85 lr %r1,%r4 # our sccb
86 .insn rre,0xb2200000,%r2,%r1 # service call
87 ipm %r1
88 srl %r1,28 # get cc code
89 xr %r3, %r3
90 chi %r1,3
91 be .Lfchunk-.LPG1(%r13) # leave
92 chi %r1,2
93 be .Lservicecall-.LPG1(%r13)
94 lpsw .Lwaitsclp-.LPG1(%r13)
95.Lsclph:
96 lh %r1,.Lsccbr-.Lsccb(%r4)
97 chi %r1,0x10 # 0x0010 is the sucess code
98 je .Lprocsccb # let's process the sccb
99 chi %r1,0x1f0
100 bne .Lfchunk-.LPG1(%r13) # unhandled error code
101 c %r2, .Lrcp-.LPG1(%r13) # Did we try Read SCP forced
102 bne .Lfchunk-.LPG1(%r13) # if no, give up
103 l %r2, .Lrcp2-.LPG1(%r13) # try with Read SCP
104 b .Lservicecall-.LPG1(%r13)
105.Lprocsccb:
106 lhi %r1,0
107 icm %r1,3,.Lscpincr1-.Lsccb(%r4) # use this one if != 0
108 jnz .Lscnd
109 lhi %r1,0x800 # otherwise report 2GB
110.Lscnd:
111 lhi %r3,0x800 # limit reported memory size to 2GB
112 cr %r1,%r3
113 jl .Lno2gb
114 lr %r1,%r3
115.Lno2gb:
116 xr %r3,%r3 # same logic
117 ic %r3,.Lscpa1-.Lsccb(%r4)
118 chi %r3,0x00
119 jne .Lcompmem
120 l %r3,.Lscpa2-.Lsccb(%r4)
121.Lcompmem:
122 mr %r2,%r1 # mem in MB on 128-bit
123 l %r1,.Lonemb-.LPG1(%r13)
124 mr %r2,%r1 # mem size in bytes in %r3
125 b .Lfchunk-.LPG1(%r13)
126
127 .align 4
128.Lipl_save_parameters:
129 .long ipl_save_parameters
130.Linittu:
131 .long init_thread_union
132.Lpmask:
133 .byte 0
134 .align 8
135.Lpcext:.long 0x00080000,0x80000000
136.Lcr:
137 .long 0x00 # place holder for cr0
138 .align 8
139.Lwaitsclp:
140 .long 0x010a0000,0x80000000 + .Lsclph
141.Lrcp:
142 .int 0x00120001 # Read SCP forced code
143.Lrcp2:
144 .int 0x00020001 # Read SCP code
145.Lonemb:
146 .int 0x100000
147.Lfchunk:
148 60
149#
150# find memory chunks.
151#
152 lr %r9,%r3 # end of mem
153 mvc __LC_PGM_NEW_PSW(8),.Lpcmem-.LPG1(%r13)
154 la %r1,1 # test in increments of 128KB
155 sll %r1,17
156 l %r3,.Lmchunk-.LPG1(%r13) # get pointer to memory_chunk array
157 slr %r4,%r4 # set start of chunk to zero
158 slr %r5,%r5 # set end of chunk to zero
159 slr %r6,%r6 # set access code to zero
160 la %r10,MEMORY_CHUNKS # number of chunks
161.Lloop:
162 tprot 0(%r5),0 # test protection of first byte
163 ipm %r7
164 srl %r7,28
165 clr %r6,%r7 # compare cc with last access code
166 be .Lsame-.LPG1(%r13)
167 lhi %r8,0 # no program checks
168 b .Lsavchk-.LPG1(%r13)
169.Lsame:
170 ar %r5,%r1 # add 128KB to end of chunk
171 bno .Lloop-.LPG1(%r13) # r1 < 0x80000000 -> loop
172.Lchkmem: # > 2GB or tprot got a program check
173 lhi %r8,1 # set program check flag
174.Lsavchk:
175 clr %r4,%r5 # chunk size > 0?
176 be .Lchkloop-.LPG1(%r13)
177 st %r4,0(%r3) # store start address of chunk
178 lr %r0,%r5
179 slr %r0,%r4
180 st %r0,4(%r3) # store size of chunk
181 st %r6,8(%r3) # store type of chunk
182 la %r3,12(%r3)
183 ahi %r10,-1 # update chunk number
184.Lchkloop:
185 lr %r6,%r7 # set access code to last cc
186 # we got an exception or we're starting a new
187 # chunk , we must check if we should
188 # still try to find valid memory (if we detected
189 # the amount of available storage), and if we
190 # have chunks left
191 xr %r0,%r0
192 clr %r0,%r9 # did we detect memory?
193 je .Ldonemem # if not, leave
194 chi %r10,0 # do we have chunks left?
195 je .Ldonemem
196 chi %r8,1 # program check ?
197 je .Lpgmchk
198 lr %r4,%r5 # potential new chunk
199 alr %r5,%r1 # add 128KB to end of chunk
200 j .Llpcnt
201.Lpgmchk:
202 alr %r5,%r1 # add 128KB to end of chunk
203 lr %r4,%r5 # potential new chunk
204.Llpcnt:
205 clr %r5,%r9 # should we go on?
206 jl .Lloop
207.Ldonemem:
208 l %r12,.Lmflags-.LPG1(%r13) # get address of machine_flags 61 l %r12,.Lmflags-.LPG1(%r13) # get address of machine_flags
209# 62#
210# find out if we are running under VM
211#
212 stidp __LC_CPUID # store cpuid
213 tm __LC_CPUID,0xff # running under VM ?
214 bno .Lnovm-.LPG1(%r13)
215 oi 3(%r12),1 # set VM flag
216.Lnovm:
217 lh %r0,__LC_CPUID+4 # get cpu version
218 chi %r0,0x7490 # running on a P/390 ?
219 bne .Lnop390-.LPG1(%r13)
220 oi 3(%r12),4 # set P/390 flag
221.Lnop390:
222
223#
224# find out if we have an IEEE fpu 63# find out if we have an IEEE fpu
225# 64#
226 mvc __LC_PGM_NEW_PSW(8),.Lpcfpu-.LPG1(%r13) 65 mvc __LC_PGM_NEW_PSW(8),.Lpcfpu-.LPG1(%r13)
@@ -295,7 +134,6 @@ startup_continue:
295 .long 0 # cr15: linkage stack operations 134 .long 0 # cr15: linkage stack operations
296.Lduct: .long 0,0,0,0,0,0,0,0 135.Lduct: .long 0,0,0,0,0,0,0,0
297 .long 0,0,0,0,0,0,0,0 136 .long 0,0,0,0,0,0,0,0
298.Lpcmem:.long 0x00080000,0x80000000 + .Lchkmem
299.Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu 137.Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu
300.Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp 138.Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp
301.Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg 139.Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg
@@ -306,7 +144,9 @@ startup_continue:
306.Lbss_bgn: .long __bss_start 144.Lbss_bgn: .long __bss_start
307.Lbss_end: .long _end 145.Lbss_end: .long _end
308.Lparmaddr: .long PARMAREA 146.Lparmaddr: .long PARMAREA
309.Lsccbaddr: .long .Lsccb 147.Linittu: .long init_thread_union
148.Lstartup_init:
149 .long startup_init
310 150
311 .globl ipl_schib 151 .globl ipl_schib
312ipl_schib: 152ipl_schib:
@@ -322,26 +162,6 @@ ipl_devno:
322 .word 0 162 .word 0
323 163
324 .org 0x12000 164 .org 0x12000
325.globl s390_readinfo_sccb
326s390_readinfo_sccb:
327.Lsccb:
328 .hword 0x1000 # length, one page
329 .byte 0x00,0x00,0x00
330 .byte 0x80 # variable response bit set
331.Lsccbr:
332 .hword 0x00 # response code
333.Lscpincr1:
334 .hword 0x00
335.Lscpa1:
336 .byte 0x00
337 .fill 89,1,0
338.Lscpa2:
339 .int 0x00
340.Lscpincr2:
341 .quad 0x00
342 .fill 3984,1,0
343 .org 0x13000
344
345#ifdef CONFIG_SHARED_KERNEL 165#ifdef CONFIG_SHARED_KERNEL
346 .org 0x100000 166 .org 0x100000
347#endif 167#endif
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 6ba3f4512dd1..b8fec4e5c5d4 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -58,183 +58,15 @@ startup_continue:
58 stg %r15,__LC_KERNEL_STACK # set end of kernel stack 58 stg %r15,__LC_KERNEL_STACK # set end of kernel stack
59 aghi %r15,-160 59 aghi %r15,-160
60 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain 60 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
61
62 brasl %r14,ipl_save_parameters
63# 61#
64# clear bss memory 62# Save ipl parameters, clear bss memory, initialize storage key for kernel pages,
63# and create a kernel NSS if the SAVESYS= parm is defined
65# 64#
66 larl %r2,__bss_start # start of bss segment 65 brasl %r14,startup_init
67 larl %r3,_end # end of bss segment
68 sgr %r3,%r2 # length of bss
69 sgr %r4,%r4 #
70 sgr %r5,%r5 # set src,length and pad to zero
71 mvcle %r2,%r4,0 # clear mem
72 jo .-4 # branch back, if not finish
73 # set program check new psw mask 66 # set program check new psw mask
74 mvc __LC_PGM_NEW_PSW(8),.Lpcmsk-.LPG1(%r13) 67 mvc __LC_PGM_NEW_PSW(8),.Lpcmsk-.LPG1(%r13)
75 larl %r1,.Lslowmemdetect # set program check address
76 stg %r1,__LC_PGM_NEW_PSW+8
77 lghi %r1,0xc
78 diag %r0,%r1,0x260 # get memory size of virtual machine
79 cgr %r0,%r1 # different? -> old detection routine
80 jne .Lslowmemdetect
81 aghi %r1,1 # size is one more than end
82 larl %r2,memory_chunk
83 stg %r1,8(%r2) # store size of chunk
84 j .Ldonemem
85
86.Lslowmemdetect:
87 l %r2,.Lrcp-.LPG1(%r13) # Read SCP forced command word
88.Lservicecall:
89 stosm .Lpmask-.LPG1(%r13),0x01 # authorize ext interrupts
90
91 stctg %r0,%r0,.Lcr-.LPG1(%r13) # get cr0
92 la %r1,0x200 # set bit 22
93 og %r1,.Lcr-.LPG1(%r13) # or old cr0 with r1
94 stg %r1,.Lcr-.LPG1(%r13)
95 lctlg %r0,%r0,.Lcr-.LPG1(%r13) # load modified cr0
96
97 mvc __LC_EXT_NEW_PSW(8),.Lpcmsk-.LPG1(%r13) # set postcall psw
98 larl %r1,.Lsclph
99 stg %r1,__LC_EXT_NEW_PSW+8 # set handler
100
101 larl %r4,.Lsccb # %r4 is our index for sccb stuff
102 lgr %r1,%r4 # our sccb
103 .insn rre,0xb2200000,%r2,%r1 # service call
104 ipm %r1
105 srl %r1,28 # get cc code
106 xr %r3,%r3
107 chi %r1,3
108 be .Lfchunk-.LPG1(%r13) # leave
109 chi %r1,2
110 be .Lservicecall-.LPG1(%r13)
111 lpswe .Lwaitsclp-.LPG1(%r13)
112.Lsclph:
113 lh %r1,.Lsccbr-.Lsccb(%r4)
114 chi %r1,0x10 # 0x0010 is the sucess code
115 je .Lprocsccb # let's process the sccb
116 chi %r1,0x1f0
117 bne .Lfchunk-.LPG1(%r13) # unhandled error code
118 c %r2,.Lrcp-.LPG1(%r13) # Did we try Read SCP forced
119 bne .Lfchunk-.LPG1(%r13) # if no, give up
120 l %r2,.Lrcp2-.LPG1(%r13) # try with Read SCP
121 b .Lservicecall-.LPG1(%r13)
122.Lprocsccb:
123 lghi %r1,0
124 icm %r1,3,.Lscpincr1-.Lsccb(%r4) # use this one if != 0
125 jnz .Lscnd
126 lg %r1,.Lscpincr2-.Lsccb(%r4) # otherwise use this one
127.Lscnd:
128 xr %r3,%r3 # same logic
129 ic %r3,.Lscpa1-.Lsccb(%r4)
130 chi %r3,0x00
131 jne .Lcompmem
132 l %r3,.Lscpa2-.Lsccb(%r4)
133.Lcompmem:
134 mlgr %r2,%r1 # mem in MB on 128-bit
135 l %r1,.Lonemb-.LPG1(%r13)
136 mlgr %r2,%r1 # mem size in bytes in %r3
137 b .Lfchunk-.LPG1(%r13)
138
139 .align 4
140.Lpmask:
141 .byte 0
142 .align 8
143.Lcr:
144 .quad 0x00 # place holder for cr0
145.Lwaitsclp:
146 .quad 0x0102000180000000,.Lsclph
147.Lrcp:
148 .int 0x00120001 # Read SCP forced code
149.Lrcp2:
150 .int 0x00020001 # Read SCP code
151.Lonemb:
152 .int 0x100000
153
154.Lfchunk:
155
156#
157# find memory chunks.
158#
159 lgr %r9,%r3 # end of mem
160 larl %r1,.Lchkmem # set program check address
161 stg %r1,__LC_PGM_NEW_PSW+8
162 la %r1,1 # test in increments of 128KB
163 sllg %r1,%r1,17
164 larl %r3,memory_chunk
165 slgr %r4,%r4 # set start of chunk to zero
166 slgr %r5,%r5 # set end of chunk to zero
167 slr %r6,%r6 # set access code to zero
168 la %r10,MEMORY_CHUNKS # number of chunks
169.Lloop:
170 tprot 0(%r5),0 # test protection of first byte
171 ipm %r7
172 srl %r7,28
173 clr %r6,%r7 # compare cc with last access code
174 je .Lsame
175 lghi %r8,0 # no program checks
176 j .Lsavchk
177.Lsame:
178 algr %r5,%r1 # add 128KB to end of chunk
179 # no need to check here,
180 brc 12,.Lloop # this is the same chunk
181.Lchkmem: # > 16EB or tprot got a program check
182 lghi %r8,1 # set program check flag
183.Lsavchk:
184 clgr %r4,%r5 # chunk size > 0?
185 je .Lchkloop
186 stg %r4,0(%r3) # store start address of chunk
187 lgr %r0,%r5
188 slgr %r0,%r4
189 stg %r0,8(%r3) # store size of chunk
190 st %r6,20(%r3) # store type of chunk
191 la %r3,24(%r3)
192 ahi %r10,-1 # update chunk number
193.Lchkloop:
194 lr %r6,%r7 # set access code to last cc
195 # we got an exception or we're starting a new
196 # chunk , we must check if we should
197 # still try to find valid memory (if we detected
198 # the amount of available storage), and if we
199 # have chunks left
200 lghi %r4,1
201 sllg %r4,%r4,31
202 clgr %r5,%r4
203 je .Lhsaskip
204 xr %r0, %r0
205 clgr %r0, %r9 # did we detect memory?
206 je .Ldonemem # if not, leave
207 chi %r10, 0 # do we have chunks left?
208 je .Ldonemem
209.Lhsaskip:
210 chi %r8,1 # program check ?
211 je .Lpgmchk
212 lgr %r4,%r5 # potential new chunk
213 algr %r5,%r1 # add 128KB to end of chunk
214 j .Llpcnt
215.Lpgmchk:
216 algr %r5,%r1 # add 128KB to end of chunk
217 lgr %r4,%r5 # potential new chunk
218.Llpcnt:
219 clgr %r5,%r9 # should we go on?
220 jl .Lloop
221.Ldonemem:
222
223 larl %r12,machine_flags 68 larl %r12,machine_flags
224# 69#
225# find out if we are running under VM
226#
227 stidp __LC_CPUID # store cpuid
228 tm __LC_CPUID,0xff # running under VM ?
229 bno 0f-.LPG1(%r13)
230 oi 7(%r12),1 # set VM flag
2310: lh %r0,__LC_CPUID+4 # get cpu version
232 chi %r0,0x7490 # running on a P/390 ?
233 bne 1f-.LPG1(%r13)
234 oi 7(%r12),4 # set P/390 flag
2351:
236
237#
238# find out if we have the MVPG instruction 70# find out if we have the MVPG instruction
239# 71#
240 la %r1,0f-.LPG1(%r13) # set program check address 72 la %r1,0f-.LPG1(%r13) # set program check address
@@ -336,25 +168,6 @@ ipl_devno:
336 .word 0 168 .word 0
337 169
338 .org 0x12000 170 .org 0x12000
339.globl s390_readinfo_sccb
340s390_readinfo_sccb:
341.Lsccb:
342 .hword 0x1000 # length, one page
343 .byte 0x00,0x00,0x00
344 .byte 0x80 # variable response bit set
345.Lsccbr:
346 .hword 0x00 # response code
347.Lscpincr1:
348 .hword 0x00
349.Lscpa1:
350 .byte 0x00
351 .fill 89,1,0
352.Lscpa2:
353 .int 0x00
354.Lscpincr2:
355 .quad 0x00
356 .fill 3984,1,0
357 .org 0x13000
358 171
359#ifdef CONFIG_SHARED_KERNEL 172#ifdef CONFIG_SHARED_KERNEL
360 .org 0x100000 173 .org 0x100000
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 9e9972e8a52b..052259530651 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -20,26 +20,27 @@
20#include <asm/cio.h> 20#include <asm/cio.h>
21#include <asm/ebcdic.h> 21#include <asm/ebcdic.h>
22#include <asm/reset.h> 22#include <asm/reset.h>
23#include <asm/sclp.h>
23 24
24#define IPL_PARM_BLOCK_VERSION 0 25#define IPL_PARM_BLOCK_VERSION 0
25#define LOADPARM_LEN 8
26 26
27extern char s390_readinfo_sccb[]; 27#define SCCB_VALID (s390_readinfo_sccb.header.response_code == 0x10)
28#define SCCB_VALID (*((__u16*)&s390_readinfo_sccb[6]) == 0x0010) 28#define SCCB_LOADPARM (&s390_readinfo_sccb.loadparm)
29#define SCCB_LOADPARM (&s390_readinfo_sccb[24]) 29#define SCCB_FLAG (s390_readinfo_sccb.flags)
30#define SCCB_FLAG (s390_readinfo_sccb[91])
31 30
32enum ipl_type { 31enum ipl_type {
33 IPL_TYPE_NONE = 1, 32 IPL_TYPE_NONE = 1,
34 IPL_TYPE_UNKNOWN = 2, 33 IPL_TYPE_UNKNOWN = 2,
35 IPL_TYPE_CCW = 4, 34 IPL_TYPE_CCW = 4,
36 IPL_TYPE_FCP = 8, 35 IPL_TYPE_FCP = 8,
36 IPL_TYPE_NSS = 16,
37}; 37};
38 38
39#define IPL_NONE_STR "none" 39#define IPL_NONE_STR "none"
40#define IPL_UNKNOWN_STR "unknown" 40#define IPL_UNKNOWN_STR "unknown"
41#define IPL_CCW_STR "ccw" 41#define IPL_CCW_STR "ccw"
42#define IPL_FCP_STR "fcp" 42#define IPL_FCP_STR "fcp"
43#define IPL_NSS_STR "nss"
43 44
44static char *ipl_type_str(enum ipl_type type) 45static char *ipl_type_str(enum ipl_type type)
45{ 46{
@@ -50,6 +51,8 @@ static char *ipl_type_str(enum ipl_type type)
50 return IPL_CCW_STR; 51 return IPL_CCW_STR;
51 case IPL_TYPE_FCP: 52 case IPL_TYPE_FCP:
52 return IPL_FCP_STR; 53 return IPL_FCP_STR;
54 case IPL_TYPE_NSS:
55 return IPL_NSS_STR;
53 case IPL_TYPE_UNKNOWN: 56 case IPL_TYPE_UNKNOWN:
54 default: 57 default:
55 return IPL_UNKNOWN_STR; 58 return IPL_UNKNOWN_STR;
@@ -64,6 +67,7 @@ enum ipl_method {
64 IPL_METHOD_FCP_RO_DIAG, 67 IPL_METHOD_FCP_RO_DIAG,
65 IPL_METHOD_FCP_RW_DIAG, 68 IPL_METHOD_FCP_RW_DIAG,
66 IPL_METHOD_FCP_RO_VM, 69 IPL_METHOD_FCP_RO_VM,
70 IPL_METHOD_NSS,
67}; 71};
68 72
69enum shutdown_action { 73enum shutdown_action {
@@ -114,11 +118,14 @@ enum diag308_rc {
114static int diag308_set_works = 0; 118static int diag308_set_works = 0;
115 119
116static int reipl_capabilities = IPL_TYPE_UNKNOWN; 120static int reipl_capabilities = IPL_TYPE_UNKNOWN;
121
117static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN; 122static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN;
118static enum ipl_method reipl_method = IPL_METHOD_NONE; 123static enum ipl_method reipl_method = IPL_METHOD_NONE;
119static struct ipl_parameter_block *reipl_block_fcp; 124static struct ipl_parameter_block *reipl_block_fcp;
120static struct ipl_parameter_block *reipl_block_ccw; 125static struct ipl_parameter_block *reipl_block_ccw;
121 126
127static char reipl_nss_name[NSS_NAME_SIZE + 1];
128
122static int dump_capabilities = IPL_TYPE_NONE; 129static int dump_capabilities = IPL_TYPE_NONE;
123static enum ipl_type dump_type = IPL_TYPE_NONE; 130static enum ipl_type dump_type = IPL_TYPE_NONE;
124static enum ipl_method dump_method = IPL_METHOD_NONE; 131static enum ipl_method dump_method = IPL_METHOD_NONE;
@@ -173,6 +180,24 @@ static struct subsys_attribute sys_##_prefix##_##_name##_attr = \
173 sys_##_prefix##_##_name##_show, \ 180 sys_##_prefix##_##_name##_show, \
174 sys_##_prefix##_##_name##_store); 181 sys_##_prefix##_##_name##_store);
175 182
183#define DEFINE_IPL_ATTR_STR_RW(_prefix, _name, _fmt_out, _fmt_in, _value)\
184static ssize_t sys_##_prefix##_##_name##_show(struct subsystem *subsys, \
185 char *page) \
186{ \
187 return sprintf(page, _fmt_out, _value); \
188} \
189static ssize_t sys_##_prefix##_##_name##_store(struct subsystem *subsys,\
190 const char *buf, size_t len) \
191{ \
192 if (sscanf(buf, _fmt_in, _value) != 1) \
193 return -EINVAL; \
194 return len; \
195} \
196static struct subsys_attribute sys_##_prefix##_##_name##_attr = \
197 __ATTR(_name,(S_IRUGO | S_IWUSR), \
198 sys_##_prefix##_##_name##_show, \
199 sys_##_prefix##_##_name##_store);
200
176static void make_attrs_ro(struct attribute **attrs) 201static void make_attrs_ro(struct attribute **attrs)
177{ 202{
178 while (*attrs) { 203 while (*attrs) {
@@ -189,6 +214,8 @@ static enum ipl_type ipl_get_type(void)
189{ 214{
190 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START; 215 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
191 216
217 if (ipl_flags & IPL_NSS_VALID)
218 return IPL_TYPE_NSS;
192 if (!(ipl_flags & IPL_DEVNO_VALID)) 219 if (!(ipl_flags & IPL_DEVNO_VALID))
193 return IPL_TYPE_UNKNOWN; 220 return IPL_TYPE_UNKNOWN;
194 if (!(ipl_flags & IPL_PARMBLOCK_VALID)) 221 if (!(ipl_flags & IPL_PARMBLOCK_VALID))
@@ -324,6 +351,20 @@ static struct attribute_group ipl_ccw_attr_group = {
324 .attrs = ipl_ccw_attrs, 351 .attrs = ipl_ccw_attrs,
325}; 352};
326 353
354/* NSS ipl device attributes */
355
356DEFINE_IPL_ATTR_RO(ipl_nss, name, "%s\n", kernel_nss_name);
357
358static struct attribute *ipl_nss_attrs[] = {
359 &sys_ipl_type_attr.attr,
360 &sys_ipl_nss_name_attr.attr,
361 NULL,
362};
363
364static struct attribute_group ipl_nss_attr_group = {
365 .attrs = ipl_nss_attrs,
366};
367
327/* UNKNOWN ipl device attributes */ 368/* UNKNOWN ipl device attributes */
328 369
329static struct attribute *ipl_unknown_attrs[] = { 370static struct attribute *ipl_unknown_attrs[] = {
@@ -432,6 +473,21 @@ static struct attribute_group reipl_ccw_attr_group = {
432 .attrs = reipl_ccw_attrs, 473 .attrs = reipl_ccw_attrs,
433}; 474};
434 475
476
477/* NSS reipl device attributes */
478
479DEFINE_IPL_ATTR_STR_RW(reipl_nss, name, "%s\n", "%s\n", reipl_nss_name);
480
481static struct attribute *reipl_nss_attrs[] = {
482 &sys_reipl_nss_name_attr.attr,
483 NULL,
484};
485
486static struct attribute_group reipl_nss_attr_group = {
487 .name = IPL_NSS_STR,
488 .attrs = reipl_nss_attrs,
489};
490
435/* reipl type */ 491/* reipl type */
436 492
437static int reipl_set_type(enum ipl_type type) 493static int reipl_set_type(enum ipl_type type)
@@ -454,6 +510,9 @@ static int reipl_set_type(enum ipl_type type)
454 else 510 else
455 reipl_method = IPL_METHOD_FCP_RO_DIAG; 511 reipl_method = IPL_METHOD_FCP_RO_DIAG;
456 break; 512 break;
513 case IPL_TYPE_NSS:
514 reipl_method = IPL_METHOD_NSS;
515 break;
457 default: 516 default:
458 reipl_method = IPL_METHOD_NONE; 517 reipl_method = IPL_METHOD_NONE;
459 } 518 }
@@ -475,6 +534,8 @@ static ssize_t reipl_type_store(struct subsystem *subsys, const char *buf,
475 rc = reipl_set_type(IPL_TYPE_CCW); 534 rc = reipl_set_type(IPL_TYPE_CCW);
476 else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0) 535 else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0)
477 rc = reipl_set_type(IPL_TYPE_FCP); 536 rc = reipl_set_type(IPL_TYPE_FCP);
537 else if (strncmp(buf, IPL_NSS_STR, strlen(IPL_NSS_STR)) == 0)
538 rc = reipl_set_type(IPL_TYPE_NSS);
478 return (rc != 0) ? rc : len; 539 return (rc != 0) ? rc : len;
479} 540}
480 541
@@ -647,6 +708,10 @@ void do_reipl(void)
647 case IPL_METHOD_FCP_RO_VM: 708 case IPL_METHOD_FCP_RO_VM:
648 __cpcmd("IPL", NULL, 0, NULL); 709 __cpcmd("IPL", NULL, 0, NULL);
649 break; 710 break;
711 case IPL_METHOD_NSS:
712 sprintf(buf, "IPL %s", reipl_nss_name);
713 __cpcmd(buf, NULL, 0, NULL);
714 break;
650 case IPL_METHOD_NONE: 715 case IPL_METHOD_NONE:
651 default: 716 default:
652 if (MACHINE_IS_VM) 717 if (MACHINE_IS_VM)
@@ -733,6 +798,10 @@ static int __init ipl_init(void)
733 case IPL_TYPE_FCP: 798 case IPL_TYPE_FCP:
734 rc = ipl_register_fcp_files(); 799 rc = ipl_register_fcp_files();
735 break; 800 break;
801 case IPL_TYPE_NSS:
802 rc = sysfs_create_group(&ipl_subsys.kset.kobj,
803 &ipl_nss_attr_group);
804 break;
736 default: 805 default:
737 rc = sysfs_create_group(&ipl_subsys.kset.kobj, 806 rc = sysfs_create_group(&ipl_subsys.kset.kobj,
738 &ipl_unknown_attr_group); 807 &ipl_unknown_attr_group);
@@ -755,6 +824,20 @@ static void __init reipl_probe(void)
755 free_page((unsigned long)buffer); 824 free_page((unsigned long)buffer);
756} 825}
757 826
827static int __init reipl_nss_init(void)
828{
829 int rc;
830
831 if (!MACHINE_IS_VM)
832 return 0;
833 rc = sysfs_create_group(&reipl_subsys.kset.kobj, &reipl_nss_attr_group);
834 if (rc)
835 return rc;
836 strncpy(reipl_nss_name, kernel_nss_name, NSS_NAME_SIZE + 1);
837 reipl_capabilities |= IPL_TYPE_NSS;
838 return 0;
839}
840
758static int __init reipl_ccw_init(void) 841static int __init reipl_ccw_init(void)
759{ 842{
760 int rc; 843 int rc;
@@ -837,6 +920,9 @@ static int __init reipl_init(void)
837 rc = reipl_fcp_init(); 920 rc = reipl_fcp_init();
838 if (rc) 921 if (rc)
839 return rc; 922 return rc;
923 rc = reipl_nss_init();
924 if (rc)
925 return rc;
840 rc = reipl_set_type(ipl_get_type()); 926 rc = reipl_set_type(ipl_get_type());
841 if (rc) 927 if (rc)
842 return rc; 928 return rc;
@@ -993,8 +1079,6 @@ static void do_reset_calls(void)
993 reset->fn(); 1079 reset->fn();
994} 1080}
995 1081
996extern void reset_mcck_handler(void);
997extern void reset_pgm_handler(void);
998extern __u32 dump_prefix_page; 1082extern __u32 dump_prefix_page;
999 1083
1000void s390_reset_system(void) 1084void s390_reset_system(void)
@@ -1016,14 +1100,14 @@ void s390_reset_system(void)
1016 __ctl_clear_bit(0,28); 1100 __ctl_clear_bit(0,28);
1017 1101
1018 /* Set new machine check handler */ 1102 /* Set new machine check handler */
1019 S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_MCHECK; 1103 S390_lowcore.mcck_new_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
1020 S390_lowcore.mcck_new_psw.addr = 1104 S390_lowcore.mcck_new_psw.addr =
1021 PSW_ADDR_AMODE | (unsigned long) &reset_mcck_handler; 1105 PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler;
1022 1106
1023 /* Set new program check handler */ 1107 /* Set new program check handler */
1024 S390_lowcore.program_new_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_MCHECK; 1108 S390_lowcore.program_new_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
1025 S390_lowcore.program_new_psw.addr = 1109 S390_lowcore.program_new_psw.addr =
1026 PSW_ADDR_AMODE | (unsigned long) &reset_pgm_handler; 1110 PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
1027 1111
1028 do_reset_calls(); 1112 do_reset_calls();
1029} 1113}
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 1eef50918615..8f0cbca31203 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -1,9 +1,9 @@
1/* 1/*
2 * arch/s390/kernel/irq.c 2 * arch/s390/kernel/irq.c
3 * 3 *
4 * S390 version 4 * Copyright IBM Corp. 2004,2007
5 * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
6 * Thomas Spatzier (tspat@de.ibm.com)
7 * 7 *
8 * This file contains interrupt related functions. 8 * This file contains interrupt related functions.
9 */ 9 */
@@ -14,6 +14,8 @@
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/seq_file.h> 15#include <linux/seq_file.h>
16#include <linux/cpu.h> 16#include <linux/cpu.h>
17#include <linux/proc_fs.h>
18#include <linux/profile.h>
17 19
18/* 20/*
19 * show_interrupts is needed by /proc/interrupts. 21 * show_interrupts is needed by /proc/interrupts.
@@ -93,5 +95,12 @@ asmlinkage void do_softirq(void)
93 95
94 local_irq_restore(flags); 96 local_irq_restore(flags);
95} 97}
96
97EXPORT_SYMBOL(do_softirq); 98EXPORT_SYMBOL(do_softirq);
99
100void init_irq_proc(void)
101{
102 struct proc_dir_entry *root_irq_dir;
103
104 root_irq_dir = proc_mkdir("irq", NULL);
105 create_prof_cpu_mask(root_irq_dir);
106}
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 576368c4f605..a466bab6677e 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -155,15 +155,34 @@ void __kprobes get_instruction_type(struct arch_specific_insn *ainsn)
155static int __kprobes swap_instruction(void *aref) 155static int __kprobes swap_instruction(void *aref)
156{ 156{
157 struct ins_replace_args *args = aref; 157 struct ins_replace_args *args = aref;
158 u32 *addr;
159 u32 instr;
158 int err = -EFAULT; 160 int err = -EFAULT;
159 161
162 /*
163 * Text segment is read-only, hence we use stura to bypass dynamic
164 * address translation to exchange the instruction. Since stura
165 * always operates on four bytes, but we only want to exchange two
166 * bytes do some calculations to get things right. In addition we
167 * shall not cross any page boundaries (vmalloc area!) when writing
168 * the new instruction.
169 */
170 addr = (u32 *)ALIGN((unsigned long)args->ptr, 4);
171 if ((unsigned long)args->ptr & 2)
172 instr = ((*addr) & 0xffff0000) | args->new;
173 else
174 instr = ((*addr) & 0x0000ffff) | args->new << 16;
175
160 asm volatile( 176 asm volatile(
161 "0: mvc 0(2,%2),0(%3)\n" 177 " lra %1,0(%1)\n"
162 "1: la %0,0\n" 178 "0: stura %2,%1\n"
179 "1: la %0,0\n"
163 "2:\n" 180 "2:\n"
164 EX_TABLE(0b,2b) 181 EX_TABLE(0b,2b)
165 : "+d" (err), "=m" (*args->ptr) 182 : "+d" (err)
166 : "a" (args->ptr), "a" (&args->new), "m" (args->new)); 183 : "a" (addr), "d" (instr)
184 : "memory", "cc");
185
167 return err; 186 return err;
168} 187}
169 188
@@ -356,7 +375,7 @@ no_kprobe:
356 * - When the probed function returns, this probe 375 * - When the probed function returns, this probe
357 * causes the handlers to fire 376 * causes the handlers to fire
358 */ 377 */
359void __kprobes kretprobe_trampoline_holder(void) 378void kretprobe_trampoline_holder(void)
360{ 379{
361 asm volatile(".global kretprobe_trampoline\n" 380 asm volatile(".global kretprobe_trampoline\n"
362 "kretprobe_trampoline: bcr 0,0\n"); 381 "kretprobe_trampoline: bcr 0,0\n");
@@ -365,7 +384,8 @@ void __kprobes kretprobe_trampoline_holder(void)
365/* 384/*
366 * Called when the probe at kretprobe trampoline is hit 385 * Called when the probe at kretprobe trampoline is hit
367 */ 386 */
368int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 387static int __kprobes trampoline_probe_handler(struct kprobe *p,
388 struct pt_regs *regs)
369{ 389{
370 struct kretprobe_instance *ri = NULL; 390 struct kretprobe_instance *ri = NULL;
371 struct hlist_head *head, empty_rp; 391 struct hlist_head *head, empty_rp;
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index f6d9bcc0f75b..52f57af252b4 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -11,6 +11,7 @@
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/kexec.h> 12#include <linux/kexec.h>
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/reboot.h>
14#include <asm/cio.h> 15#include <asm/cio.h>
15#include <asm/setup.h> 16#include <asm/setup.h>
16#include <asm/pgtable.h> 17#include <asm/pgtable.h>
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index d989ed45a7aa..39d1dd752529 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -30,6 +30,7 @@
30#include <linux/fs.h> 30#include <linux/fs.h>
31#include <linux/string.h> 31#include <linux/string.h>
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/moduleloader.h>
33 34
34#if 0 35#if 0
35#define DEBUGP printk 36#define DEBUGP printk
@@ -58,7 +59,7 @@ void module_free(struct module *mod, void *module_region)
58 table entries. */ 59 table entries. */
59} 60}
60 61
61static inline void 62static void
62check_rela(Elf_Rela *rela, struct module *me) 63check_rela(Elf_Rela *rela, struct module *me)
63{ 64{
64 struct mod_arch_syminfo *info; 65 struct mod_arch_syminfo *info;
@@ -181,7 +182,7 @@ apply_relocate(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex,
181 return -ENOEXEC; 182 return -ENOEXEC;
182} 183}
183 184
184static inline int 185static int
185apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, 186apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
186 struct module *me) 187 struct module *me)
187{ 188{
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 6603fbb41d07..5acfac654f9d 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -144,7 +144,7 @@ static void default_idle(void)
144 144
145 trace_hardirqs_on(); 145 trace_hardirqs_on();
146 /* Wait for external, I/O or machine check interrupt. */ 146 /* Wait for external, I/O or machine check interrupt. */
147 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_WAIT | 147 __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
148 PSW_MASK_IO | PSW_MASK_EXT); 148 PSW_MASK_IO | PSW_MASK_EXT);
149} 149}
150 150
@@ -190,7 +190,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
190 struct pt_regs regs; 190 struct pt_regs regs;
191 191
192 memset(&regs, 0, sizeof(regs)); 192 memset(&regs, 0, sizeof(regs));
193 regs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT; 193 regs.psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT;
194 regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE; 194 regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE;
195 regs.gprs[9] = (unsigned long) fn; 195 regs.gprs[9] = (unsigned long) fn;
196 regs.gprs[10] = (unsigned long) arg; 196 regs.gprs[10] = (unsigned long) arg;
diff --git a/arch/s390/kernel/profile.c b/arch/s390/kernel/profile.c
deleted file mode 100644
index b81aa1f569ca..000000000000
--- a/arch/s390/kernel/profile.c
+++ /dev/null
@@ -1,20 +0,0 @@
1/*
2 * arch/s390/kernel/profile.c
3 *
4 * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Thomas Spatzier (tspat@de.ibm.com)
6 *
7 */
8#include <linux/proc_fs.h>
9#include <linux/profile.h>
10
11static struct proc_dir_entry * root_irq_dir;
12
13void init_irq_proc(void)
14{
15 /* create /proc/irq */
16 root_irq_dir = proc_mkdir("irq", NULL);
17
18 /* create /proc/irq/prof_cpu_mask */
19 create_prof_cpu_mask(root_irq_dir);
20}
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 8f36504075ed..2a8f0872ea8b 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -86,15 +86,13 @@ FixPerRegisters(struct task_struct *task)
86 per_info->control_regs.bits.storage_alt_space_ctl = 0; 86 per_info->control_regs.bits.storage_alt_space_ctl = 0;
87} 87}
88 88
89void 89static void set_single_step(struct task_struct *task)
90set_single_step(struct task_struct *task)
91{ 90{
92 task->thread.per_info.single_step = 1; 91 task->thread.per_info.single_step = 1;
93 FixPerRegisters(task); 92 FixPerRegisters(task);
94} 93}
95 94
96void 95static void clear_single_step(struct task_struct *task)
97clear_single_step(struct task_struct *task)
98{ 96{
99 task->thread.per_info.single_step = 0; 97 task->thread.per_info.single_step = 0;
100 FixPerRegisters(task); 98 FixPerRegisters(task);
@@ -232,9 +230,9 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
232 */ 230 */
233 if (addr == (addr_t) &dummy->regs.psw.mask && 231 if (addr == (addr_t) &dummy->regs.psw.mask &&
234#ifdef CONFIG_COMPAT 232#ifdef CONFIG_COMPAT
235 data != PSW_MASK_MERGE(PSW_USER32_BITS, data) && 233 data != PSW_MASK_MERGE(psw_user32_bits, data) &&
236#endif 234#endif
237 data != PSW_MASK_MERGE(PSW_USER_BITS, data)) 235 data != PSW_MASK_MERGE(psw_user_bits, data))
238 /* Invalid psw mask. */ 236 /* Invalid psw mask. */
239 return -EINVAL; 237 return -EINVAL;
240#ifndef CONFIG_64BIT 238#ifndef CONFIG_64BIT
@@ -309,7 +307,7 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
309 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); 307 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
310 if (copied != sizeof(tmp)) 308 if (copied != sizeof(tmp))
311 return -EIO; 309 return -EIO;
312 return put_user(tmp, (unsigned long __user *) data); 310 return put_user(tmp, (unsigned long __force __user *) data);
313 311
314 case PTRACE_PEEKUSR: 312 case PTRACE_PEEKUSR:
315 /* read the word at location addr in the USER area. */ 313 /* read the word at location addr in the USER area. */
@@ -331,7 +329,7 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
331 329
332 case PTRACE_PEEKUSR_AREA: 330 case PTRACE_PEEKUSR_AREA:
333 case PTRACE_POKEUSR_AREA: 331 case PTRACE_POKEUSR_AREA:
334 if (copy_from_user(&parea, (void __user *) addr, 332 if (copy_from_user(&parea, (void __force __user *) addr,
335 sizeof(parea))) 333 sizeof(parea)))
336 return -EFAULT; 334 return -EFAULT;
337 addr = parea.kernel_addr; 335 addr = parea.kernel_addr;
@@ -341,10 +339,11 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
341 if (request == PTRACE_PEEKUSR_AREA) 339 if (request == PTRACE_PEEKUSR_AREA)
342 ret = peek_user(child, addr, data); 340 ret = peek_user(child, addr, data);
343 else { 341 else {
344 addr_t tmp; 342 addr_t utmp;
345 if (get_user (tmp, (addr_t __user *) data)) 343 if (get_user(utmp,
344 (addr_t __force __user *) data))
346 return -EFAULT; 345 return -EFAULT;
347 ret = poke_user(child, addr, tmp); 346 ret = poke_user(child, addr, utmp);
348 } 347 }
349 if (ret) 348 if (ret)
350 return ret; 349 return ret;
@@ -394,7 +393,7 @@ peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
394 if (addr == (addr_t) &dummy32->regs.psw.mask) { 393 if (addr == (addr_t) &dummy32->regs.psw.mask) {
395 /* Fake a 31 bit psw mask. */ 394 /* Fake a 31 bit psw mask. */
396 tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32); 395 tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32);
397 tmp = PSW32_MASK_MERGE(PSW32_USER_BITS, tmp); 396 tmp = PSW32_MASK_MERGE(psw32_user_bits, tmp);
398 } else if (addr == (addr_t) &dummy32->regs.psw.addr) { 397 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
399 /* Fake a 31 bit psw address. */ 398 /* Fake a 31 bit psw address. */
400 tmp = (__u32) task_pt_regs(child)->psw.addr | 399 tmp = (__u32) task_pt_regs(child)->psw.addr |
@@ -469,11 +468,11 @@ poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
469 */ 468 */
470 if (addr == (addr_t) &dummy32->regs.psw.mask) { 469 if (addr == (addr_t) &dummy32->regs.psw.mask) {
471 /* Build a 64 bit psw mask from 31 bit mask. */ 470 /* Build a 64 bit psw mask from 31 bit mask. */
472 if (tmp != PSW32_MASK_MERGE(PSW32_USER_BITS, tmp)) 471 if (tmp != PSW32_MASK_MERGE(psw32_user_bits, tmp))
473 /* Invalid psw mask. */ 472 /* Invalid psw mask. */
474 return -EINVAL; 473 return -EINVAL;
475 task_pt_regs(child)->psw.mask = 474 task_pt_regs(child)->psw.mask =
476 PSW_MASK_MERGE(PSW_USER32_BITS, (__u64) tmp << 32); 475 PSW_MASK_MERGE(psw_user32_bits, (__u64) tmp << 32);
477 } else if (addr == (addr_t) &dummy32->regs.psw.addr) { 476 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
478 /* Build a 64 bit psw address from 31 bit address. */ 477 /* Build a 64 bit psw address from 31 bit address. */
479 task_pt_regs(child)->psw.addr = 478 task_pt_regs(child)->psw.addr =
@@ -550,7 +549,7 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data)
550 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); 549 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
551 if (copied != sizeof(tmp)) 550 if (copied != sizeof(tmp))
552 return -EIO; 551 return -EIO;
553 return put_user(tmp, (unsigned int __user *) data); 552 return put_user(tmp, (unsigned int __force __user *) data);
554 553
555 case PTRACE_PEEKUSR: 554 case PTRACE_PEEKUSR:
556 /* read the word at location addr in the USER area. */ 555 /* read the word at location addr in the USER area. */
@@ -571,7 +570,7 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data)
571 570
572 case PTRACE_PEEKUSR_AREA: 571 case PTRACE_PEEKUSR_AREA:
573 case PTRACE_POKEUSR_AREA: 572 case PTRACE_POKEUSR_AREA:
574 if (copy_from_user(&parea, (void __user *) addr, 573 if (copy_from_user(&parea, (void __force __user *) addr,
575 sizeof(parea))) 574 sizeof(parea)))
576 return -EFAULT; 575 return -EFAULT;
577 addr = parea.kernel_addr; 576 addr = parea.kernel_addr;
@@ -581,10 +580,11 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data)
581 if (request == PTRACE_PEEKUSR_AREA) 580 if (request == PTRACE_PEEKUSR_AREA)
582 ret = peek_user_emu31(child, addr, data); 581 ret = peek_user_emu31(child, addr, data);
583 else { 582 else {
584 __u32 tmp; 583 __u32 utmp;
585 if (get_user (tmp, (__u32 __user *) data)) 584 if (get_user(utmp,
585 (__u32 __force __user *) data))
586 return -EFAULT; 586 return -EFAULT;
587 ret = poke_user_emu31(child, addr, tmp); 587 ret = poke_user_emu31(child, addr, utmp);
588 } 588 }
589 if (ret) 589 if (ret)
590 return ret; 590 return ret;
@@ -595,17 +595,19 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data)
595 return 0; 595 return 0;
596 case PTRACE_GETEVENTMSG: 596 case PTRACE_GETEVENTMSG:
597 return put_user((__u32) child->ptrace_message, 597 return put_user((__u32) child->ptrace_message,
598 (unsigned int __user *) data); 598 (unsigned int __force __user *) data);
599 case PTRACE_GETSIGINFO: 599 case PTRACE_GETSIGINFO:
600 if (child->last_siginfo == NULL) 600 if (child->last_siginfo == NULL)
601 return -EINVAL; 601 return -EINVAL;
602 return copy_siginfo_to_user32((compat_siginfo_t __user *) data, 602 return copy_siginfo_to_user32((compat_siginfo_t
603 __force __user *) data,
603 child->last_siginfo); 604 child->last_siginfo);
604 case PTRACE_SETSIGINFO: 605 case PTRACE_SETSIGINFO:
605 if (child->last_siginfo == NULL) 606 if (child->last_siginfo == NULL)
606 return -EINVAL; 607 return -EINVAL;
607 return copy_siginfo_from_user32(child->last_siginfo, 608 return copy_siginfo_from_user32(child->last_siginfo,
608 (compat_siginfo_t __user *) data); 609 (compat_siginfo_t
610 __force __user *) data);
609 } 611 }
610 return ptrace_request(child, request, addr, data); 612 return ptrace_request(child, request, addr, data);
611} 613}
diff --git a/arch/s390/kernel/reset.S b/arch/s390/kernel/reset.S
deleted file mode 100644
index 8a87355161fa..000000000000
--- a/arch/s390/kernel/reset.S
+++ /dev/null
@@ -1,90 +0,0 @@
1/*
2 * arch/s390/kernel/reset.S
3 *
4 * Copyright (C) IBM Corp. 2006
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 * Michael Holzheu <holzheu@de.ibm.com>
7 */
8
9#include <asm/ptrace.h>
10#include <asm/lowcore.h>
11
12#ifdef CONFIG_64BIT
13
14 .globl reset_mcck_handler
15reset_mcck_handler:
16 basr %r13,0
170: lg %r15,__LC_PANIC_STACK # load panic stack
18 aghi %r15,-STACK_FRAME_OVERHEAD
19 lg %r1,s390_reset_mcck_handler-0b(%r13)
20 ltgr %r1,%r1
21 jz 1f
22 basr %r14,%r1
231: la %r1,4095
24 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
25 lpswe __LC_MCK_OLD_PSW
26
27 .globl s390_reset_mcck_handler
28s390_reset_mcck_handler:
29 .quad 0
30
31 .globl reset_pgm_handler
32reset_pgm_handler:
33 stmg %r0,%r15,__LC_SAVE_AREA
34 basr %r13,0
350: lg %r15,__LC_PANIC_STACK # load panic stack
36 aghi %r15,-STACK_FRAME_OVERHEAD
37 lg %r1,s390_reset_pgm_handler-0b(%r13)
38 ltgr %r1,%r1
39 jz 1f
40 basr %r14,%r1
41 lmg %r0,%r15,__LC_SAVE_AREA
42 lpswe __LC_PGM_OLD_PSW
431: lpswe disabled_wait_psw-0b(%r13)
44 .globl s390_reset_pgm_handler
45s390_reset_pgm_handler:
46 .quad 0
47 .align 8
48disabled_wait_psw:
49 .quad 0x0002000180000000,0x0000000000000000 + reset_pgm_handler
50
51#else /* CONFIG_64BIT */
52
53 .globl reset_mcck_handler
54reset_mcck_handler:
55 basr %r13,0
560: l %r15,__LC_PANIC_STACK # load panic stack
57 ahi %r15,-STACK_FRAME_OVERHEAD
58 l %r1,s390_reset_mcck_handler-0b(%r13)
59 ltr %r1,%r1
60 jz 1f
61 basr %r14,%r1
621: lm %r0,%r15,__LC_GPREGS_SAVE_AREA
63 lpsw __LC_MCK_OLD_PSW
64
65 .globl s390_reset_mcck_handler
66s390_reset_mcck_handler:
67 .long 0
68
69 .globl reset_pgm_handler
70reset_pgm_handler:
71 stm %r0,%r15,__LC_SAVE_AREA
72 basr %r13,0
730: l %r15,__LC_PANIC_STACK # load panic stack
74 ahi %r15,-STACK_FRAME_OVERHEAD
75 l %r1,s390_reset_pgm_handler-0b(%r13)
76 ltr %r1,%r1
77 jz 1f
78 basr %r14,%r1
79 lm %r0,%r15,__LC_SAVE_AREA
80 lpsw __LC_PGM_OLD_PSW
81
821: lpsw disabled_wait_psw-0b(%r13)
83 .globl s390_reset_pgm_handler
84s390_reset_pgm_handler:
85 .long 0
86disabled_wait_psw:
87 .align 8
88 .long 0x000a0000,0x00000000 + reset_pgm_handler
89
90#endif /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
index bc5beaa8f98e..acf93dba7727 100644
--- a/arch/s390/kernel/s390_ext.c
+++ b/arch/s390/kernel/s390_ext.c
@@ -125,14 +125,12 @@ void do_extint(struct pt_regs *regs, unsigned short code)
125 * Make sure that the i/o interrupt did not "overtake" 125 * Make sure that the i/o interrupt did not "overtake"
126 * the last HZ timer interrupt. 126 * the last HZ timer interrupt.
127 */ 127 */
128 account_ticks(); 128 account_ticks(S390_lowcore.int_clock);
129 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; 129 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
130 index = ext_hash(code); 130 index = ext_hash(code);
131 for (p = ext_int_hash[index]; p; p = p->next) { 131 for (p = ext_int_hash[index]; p; p = p->next) {
132 if (likely(p->code == code)) { 132 if (likely(p->code == code))
133 if (likely(p->handler)) 133 p->handler(code);
134 p->handler(code);
135 }
136 } 134 }
137 irq_exit(); 135 irq_exit();
138 set_irq_regs(old_regs); 136 set_irq_regs(old_regs);
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 5d8ee3baac14..03739813d3bf 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -38,6 +38,8 @@
38#include <linux/device.h> 38#include <linux/device.h>
39#include <linux/notifier.h> 39#include <linux/notifier.h>
40#include <linux/pfn.h> 40#include <linux/pfn.h>
41#include <linux/ctype.h>
42#include <linux/reboot.h>
41 43
42#include <asm/uaccess.h> 44#include <asm/uaccess.h>
43#include <asm/system.h> 45#include <asm/system.h>
@@ -49,6 +51,14 @@
49#include <asm/page.h> 51#include <asm/page.h>
50#include <asm/ptrace.h> 52#include <asm/ptrace.h>
51#include <asm/sections.h> 53#include <asm/sections.h>
54#include <asm/ebcdic.h>
55#include <asm/compat.h>
56
57long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY |
58 PSW_MASK_MCHECK | PSW_DEFAULT_KEY);
59long psw_user_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
60 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
61 PSW_MASK_PSTATE | PSW_DEFAULT_KEY);
52 62
53/* 63/*
54 * User copy operations. 64 * User copy operations.
@@ -117,9 +127,9 @@ void __devinit cpu_init (void)
117 */ 127 */
118char vmhalt_cmd[128] = ""; 128char vmhalt_cmd[128] = "";
119char vmpoff_cmd[128] = ""; 129char vmpoff_cmd[128] = "";
120char vmpanic_cmd[128] = ""; 130static char vmpanic_cmd[128] = "";
121 131
122static inline void strncpy_skip_quote(char *dst, char *src, int n) 132static void strncpy_skip_quote(char *dst, char *src, int n)
123{ 133{
124 int sx, dx; 134 int sx, dx;
125 135
@@ -275,10 +285,6 @@ static void __init conmode_default(void)
275} 285}
276 286
277#ifdef CONFIG_SMP 287#ifdef CONFIG_SMP
278extern void machine_restart_smp(char *);
279extern void machine_halt_smp(void);
280extern void machine_power_off_smp(void);
281
282void (*_machine_restart)(char *command) = machine_restart_smp; 288void (*_machine_restart)(char *command) = machine_restart_smp;
283void (*_machine_halt)(void) = machine_halt_smp; 289void (*_machine_halt)(void) = machine_halt_smp;
284void (*_machine_power_off)(void) = machine_power_off_smp; 290void (*_machine_power_off)(void) = machine_power_off_smp;
@@ -386,6 +392,84 @@ static int __init early_parse_ipldelay(char *p)
386} 392}
387early_param("ipldelay", early_parse_ipldelay); 393early_param("ipldelay", early_parse_ipldelay);
388 394
395#ifdef CONFIG_S390_SWITCH_AMODE
396unsigned int switch_amode = 0;
397EXPORT_SYMBOL_GPL(switch_amode);
398
399static void set_amode_and_uaccess(unsigned long user_amode,
400 unsigned long user32_amode)
401{
402 psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode |
403 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
404 PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
405#ifdef CONFIG_COMPAT
406 psw_user32_bits = PSW_BASE32_BITS | PSW_MASK_DAT | user_amode |
407 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
408 PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
409 psw32_user_bits = PSW32_BASE_BITS | PSW32_MASK_DAT | user32_amode |
410 PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK |
411 PSW32_MASK_PSTATE;
412#endif
413 psw_kernel_bits = PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
414 PSW_MASK_MCHECK | PSW_DEFAULT_KEY;
415
416 if (MACHINE_HAS_MVCOS) {
417 printk("mvcos available.\n");
418 memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess));
419 } else {
420 printk("mvcos not available.\n");
421 memcpy(&uaccess, &uaccess_pt, sizeof(uaccess));
422 }
423}
424
425/*
426 * Switch kernel/user addressing modes?
427 */
428static int __init early_parse_switch_amode(char *p)
429{
430 switch_amode = 1;
431 return 0;
432}
433early_param("switch_amode", early_parse_switch_amode);
434
435#else /* CONFIG_S390_SWITCH_AMODE */
436static inline void set_amode_and_uaccess(unsigned long user_amode,
437 unsigned long user32_amode)
438{
439}
440#endif /* CONFIG_S390_SWITCH_AMODE */
441
442#ifdef CONFIG_S390_EXEC_PROTECT
443unsigned int s390_noexec = 0;
444EXPORT_SYMBOL_GPL(s390_noexec);
445
446/*
447 * Enable execute protection?
448 */
449static int __init early_parse_noexec(char *p)
450{
451 if (!strncmp(p, "off", 3))
452 return 0;
453 switch_amode = 1;
454 s390_noexec = 1;
455 return 0;
456}
457early_param("noexec", early_parse_noexec);
458#endif /* CONFIG_S390_EXEC_PROTECT */
459
460static void setup_addressing_mode(void)
461{
462 if (s390_noexec) {
463 printk("S390 execute protection active, ");
464 set_amode_and_uaccess(PSW_ASC_SECONDARY, PSW32_ASC_SECONDARY);
465 return;
466 }
467 if (switch_amode) {
468 printk("S390 address spaces switched, ");
469 set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY);
470 }
471}
472
389static void __init 473static void __init
390setup_lowcore(void) 474setup_lowcore(void)
391{ 475{
@@ -402,19 +486,21 @@ setup_lowcore(void)
402 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; 486 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
403 lc->restart_psw.addr = 487 lc->restart_psw.addr =
404 PSW_ADDR_AMODE | (unsigned long) restart_int_handler; 488 PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
405 lc->external_new_psw.mask = PSW_KERNEL_BITS; 489 if (switch_amode)
490 lc->restart_psw.mask |= PSW_ASC_HOME;
491 lc->external_new_psw.mask = psw_kernel_bits;
406 lc->external_new_psw.addr = 492 lc->external_new_psw.addr =
407 PSW_ADDR_AMODE | (unsigned long) ext_int_handler; 493 PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
408 lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT; 494 lc->svc_new_psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT;
409 lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; 495 lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
410 lc->program_new_psw.mask = PSW_KERNEL_BITS; 496 lc->program_new_psw.mask = psw_kernel_bits;
411 lc->program_new_psw.addr = 497 lc->program_new_psw.addr =
412 PSW_ADDR_AMODE | (unsigned long)pgm_check_handler; 498 PSW_ADDR_AMODE | (unsigned long)pgm_check_handler;
413 lc->mcck_new_psw.mask = 499 lc->mcck_new_psw.mask =
414 PSW_KERNEL_BITS & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT; 500 psw_kernel_bits & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT;
415 lc->mcck_new_psw.addr = 501 lc->mcck_new_psw.addr =
416 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; 502 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
417 lc->io_new_psw.mask = PSW_KERNEL_BITS; 503 lc->io_new_psw.mask = psw_kernel_bits;
418 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; 504 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
419 lc->ipl_device = S390_lowcore.ipl_device; 505 lc->ipl_device = S390_lowcore.ipl_device;
420 lc->jiffy_timer = -1LL; 506 lc->jiffy_timer = -1LL;
@@ -439,7 +525,7 @@ setup_lowcore(void)
439static void __init 525static void __init
440setup_resources(void) 526setup_resources(void)
441{ 527{
442 struct resource *res; 528 struct resource *res, *sub_res;
443 int i; 529 int i;
444 530
445 code_resource.start = (unsigned long) &_text; 531 code_resource.start = (unsigned long) &_text;
@@ -464,8 +550,38 @@ setup_resources(void)
464 res->start = memory_chunk[i].addr; 550 res->start = memory_chunk[i].addr;
465 res->end = memory_chunk[i].addr + memory_chunk[i].size - 1; 551 res->end = memory_chunk[i].addr + memory_chunk[i].size - 1;
466 request_resource(&iomem_resource, res); 552 request_resource(&iomem_resource, res);
467 request_resource(res, &code_resource); 553
468 request_resource(res, &data_resource); 554 if (code_resource.start >= res->start &&
555 code_resource.start <= res->end &&
556 code_resource.end > res->end) {
557 sub_res = alloc_bootmem_low(sizeof(struct resource));
558 memcpy(sub_res, &code_resource,
559 sizeof(struct resource));
560 sub_res->end = res->end;
561 code_resource.start = res->end + 1;
562 request_resource(res, sub_res);
563 }
564
565 if (code_resource.start >= res->start &&
566 code_resource.start <= res->end &&
567 code_resource.end <= res->end)
568 request_resource(res, &code_resource);
569
570 if (data_resource.start >= res->start &&
571 data_resource.start <= res->end &&
572 data_resource.end > res->end) {
573 sub_res = alloc_bootmem_low(sizeof(struct resource));
574 memcpy(sub_res, &data_resource,
575 sizeof(struct resource));
576 sub_res->end = res->end;
577 data_resource.start = res->end + 1;
578 request_resource(res, sub_res);
579 }
580
581 if (data_resource.start >= res->start &&
582 data_resource.start <= res->end &&
583 data_resource.end <= res->end)
584 request_resource(res, &data_resource);
469 } 585 }
470} 586}
471 587
@@ -495,16 +611,13 @@ static void __init setup_memory_end(void)
495 } 611 }
496 if (!memory_end) 612 if (!memory_end)
497 memory_end = memory_size; 613 memory_end = memory_size;
498 if (real_size > memory_end)
499 printk("More memory detected than supported. Unused: %luk\n",
500 (real_size - memory_end) >> 10);
501} 614}
502 615
503static void __init 616static void __init
504setup_memory(void) 617setup_memory(void)
505{ 618{
506 unsigned long bootmap_size; 619 unsigned long bootmap_size;
507 unsigned long start_pfn, end_pfn, init_pfn; 620 unsigned long start_pfn, end_pfn;
508 int i; 621 int i;
509 622
510 /* 623 /*
@@ -514,10 +627,6 @@ setup_memory(void)
514 start_pfn = PFN_UP(__pa(&_end)); 627 start_pfn = PFN_UP(__pa(&_end));
515 end_pfn = max_pfn = PFN_DOWN(memory_end); 628 end_pfn = max_pfn = PFN_DOWN(memory_end);
516 629
517 /* Initialize storage key for kernel pages */
518 for (init_pfn = 0 ; init_pfn < start_pfn; init_pfn++)
519 page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY);
520
521#ifdef CONFIG_BLK_DEV_INITRD 630#ifdef CONFIG_BLK_DEV_INITRD
522 /* 631 /*
523 * Move the initrd in case the bitmap of the bootmem allocater 632 * Move the initrd in case the bitmap of the bootmem allocater
@@ -651,6 +760,7 @@ setup_arch(char **cmdline_p)
651 parse_early_param(); 760 parse_early_param();
652 761
653 setup_memory_end(); 762 setup_memory_end();
763 setup_addressing_mode();
654 setup_memory(); 764 setup_memory();
655 setup_resources(); 765 setup_resources();
656 setup_lowcore(); 766 setup_lowcore();
@@ -694,6 +804,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
694 struct cpuinfo_S390 *cpuinfo; 804 struct cpuinfo_S390 *cpuinfo;
695 unsigned long n = (unsigned long) v - 1; 805 unsigned long n = (unsigned long) v - 1;
696 806
807 s390_adjust_jiffies();
697 preempt_disable(); 808 preempt_disable();
698 if (!n) { 809 if (!n) {
699 seq_printf(m, "vendor_id : IBM/S390\n" 810 seq_printf(m, "vendor_id : IBM/S390\n"
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 4c8a7954ef48..554f9cf7499c 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -119,7 +119,7 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
119 119
120 /* Copy a 'clean' PSW mask to the user to avoid leaking 120 /* Copy a 'clean' PSW mask to the user to avoid leaking
121 information about whether PER is currently on. */ 121 information about whether PER is currently on. */
122 user_sregs.regs.psw.mask = PSW_MASK_MERGE(PSW_USER_BITS, regs->psw.mask); 122 user_sregs.regs.psw.mask = PSW_MASK_MERGE(psw_user_bits, regs->psw.mask);
123 user_sregs.regs.psw.addr = regs->psw.addr; 123 user_sregs.regs.psw.addr = regs->psw.addr;
124 memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs)); 124 memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs));
125 memcpy(&user_sregs.regs.acrs, current->thread.acrs, 125 memcpy(&user_sregs.regs.acrs, current->thread.acrs,
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index c0cd255fddbd..65b52320d145 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -22,23 +22,23 @@
22 22
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/init.h> 24#include <linux/init.h>
25
26#include <linux/mm.h> 25#include <linux/mm.h>
27#include <linux/spinlock.h> 26#include <linux/spinlock.h>
28#include <linux/kernel_stat.h> 27#include <linux/kernel_stat.h>
29#include <linux/smp_lock.h> 28#include <linux/smp_lock.h>
30
31#include <linux/delay.h> 29#include <linux/delay.h>
32#include <linux/cache.h> 30#include <linux/cache.h>
33#include <linux/interrupt.h> 31#include <linux/interrupt.h>
34#include <linux/cpu.h> 32#include <linux/cpu.h>
35 33#include <linux/timex.h>
34#include <asm/setup.h>
36#include <asm/sigp.h> 35#include <asm/sigp.h>
37#include <asm/pgalloc.h> 36#include <asm/pgalloc.h>
38#include <asm/irq.h> 37#include <asm/irq.h>
39#include <asm/s390_ext.h> 38#include <asm/s390_ext.h>
40#include <asm/cpcmd.h> 39#include <asm/cpcmd.h>
41#include <asm/tlbflush.h> 40#include <asm/tlbflush.h>
41#include <asm/timer.h>
42 42
43extern volatile int __cpu_logical_map[]; 43extern volatile int __cpu_logical_map[];
44 44
@@ -53,12 +53,6 @@ cpumask_t cpu_possible_map = CPU_MASK_NONE;
53 53
54static struct task_struct *current_set[NR_CPUS]; 54static struct task_struct *current_set[NR_CPUS];
55 55
56/*
57 * Reboot, halt and power_off routines for SMP.
58 */
59extern char vmhalt_cmd[];
60extern char vmpoff_cmd[];
61
62static void smp_ext_bitcall(int, ec_bit_sig); 56static void smp_ext_bitcall(int, ec_bit_sig);
63static void smp_ext_bitcall_others(ec_bit_sig); 57static void smp_ext_bitcall_others(ec_bit_sig);
64 58
@@ -200,7 +194,7 @@ int smp_call_function_on(void (*func) (void *info), void *info,
200} 194}
201EXPORT_SYMBOL(smp_call_function_on); 195EXPORT_SYMBOL(smp_call_function_on);
202 196
203static inline void do_send_stop(void) 197static void do_send_stop(void)
204{ 198{
205 int cpu, rc; 199 int cpu, rc;
206 200
@@ -214,7 +208,7 @@ static inline void do_send_stop(void)
214 } 208 }
215} 209}
216 210
217static inline void do_store_status(void) 211static void do_store_status(void)
218{ 212{
219 int cpu, rc; 213 int cpu, rc;
220 214
@@ -230,7 +224,7 @@ static inline void do_store_status(void)
230 } 224 }
231} 225}
232 226
233static inline void do_wait_for_stop(void) 227static void do_wait_for_stop(void)
234{ 228{
235 int cpu; 229 int cpu;
236 230
@@ -250,7 +244,7 @@ static inline void do_wait_for_stop(void)
250void smp_send_stop(void) 244void smp_send_stop(void)
251{ 245{
252 /* Disable all interrupts/machine checks */ 246 /* Disable all interrupts/machine checks */
253 __load_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK); 247 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
254 248
255 /* write magic number to zero page (absolute 0) */ 249 /* write magic number to zero page (absolute 0) */
256 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; 250 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
@@ -298,7 +292,7 @@ void machine_power_off_smp(void)
298 * cpus are handled. 292 * cpus are handled.
299 */ 293 */
300 294
301void do_ext_call_interrupt(__u16 code) 295static void do_ext_call_interrupt(__u16 code)
302{ 296{
303 unsigned long bits; 297 unsigned long bits;
304 298
@@ -385,7 +379,7 @@ struct ec_creg_mask_parms {
385/* 379/*
386 * callback for setting/clearing control bits 380 * callback for setting/clearing control bits
387 */ 381 */
388void smp_ctl_bit_callback(void *info) { 382static void smp_ctl_bit_callback(void *info) {
389 struct ec_creg_mask_parms *pp = info; 383 struct ec_creg_mask_parms *pp = info;
390 unsigned long cregs[16]; 384 unsigned long cregs[16];
391 int i; 385 int i;
@@ -458,17 +452,15 @@ __init smp_count_cpus(void)
458/* 452/*
459 * Activate a secondary processor. 453 * Activate a secondary processor.
460 */ 454 */
461extern void init_cpu_timer(void);
462extern void init_cpu_vtimer(void);
463
464int __devinit start_secondary(void *cpuvoid) 455int __devinit start_secondary(void *cpuvoid)
465{ 456{
466 /* Setup the cpu */ 457 /* Setup the cpu */
467 cpu_init(); 458 cpu_init();
468 preempt_disable(); 459 preempt_disable();
469 /* init per CPU timer */ 460 /* Enable TOD clock interrupts on the secondary cpu. */
470 init_cpu_timer(); 461 init_cpu_timer();
471#ifdef CONFIG_VIRT_TIMER 462#ifdef CONFIG_VIRT_TIMER
463 /* Enable cpu timer interrupts on the secondary cpu. */
472 init_cpu_vtimer(); 464 init_cpu_vtimer();
473#endif 465#endif
474 /* Enable pfault pseudo page faults on this cpu. */ 466 /* Enable pfault pseudo page faults on this cpu. */
@@ -542,7 +534,7 @@ smp_put_cpu(int cpu)
542 spin_unlock_irqrestore(&smp_reserve_lock, flags); 534 spin_unlock_irqrestore(&smp_reserve_lock, flags);
543} 535}
544 536
545static inline int 537static int
546cpu_stopped(int cpu) 538cpu_stopped(int cpu)
547{ 539{
548 __u32 status; 540 __u32 status;
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 0d14a4789bf2..2e5c65a1863e 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -11,11 +11,11 @@
11#include <linux/stacktrace.h> 11#include <linux/stacktrace.h>
12#include <linux/kallsyms.h> 12#include <linux/kallsyms.h>
13 13
14static inline unsigned long save_context_stack(struct stack_trace *trace, 14static unsigned long save_context_stack(struct stack_trace *trace,
15 unsigned int *skip, 15 unsigned int *skip,
16 unsigned long sp, 16 unsigned long sp,
17 unsigned long low, 17 unsigned long low,
18 unsigned long high) 18 unsigned long high)
19{ 19{
20 struct stack_frame *sf; 20 struct stack_frame *sf;
21 struct pt_regs *regs; 21 struct pt_regs *regs;
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 6cceed4df73e..3b91f27ab202 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -37,11 +37,15 @@
37#include <asm/irq.h> 37#include <asm/irq.h>
38#include <asm/irq_regs.h> 38#include <asm/irq_regs.h>
39#include <asm/timer.h> 39#include <asm/timer.h>
40#include <asm/etr.h>
40 41
41/* change this if you have some constant time drift */ 42/* change this if you have some constant time drift */
42#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) 43#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
43#define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12) 44#define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12)
44 45
46/* The value of the TOD clock for 1.1.1970. */
47#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
48
45/* 49/*
46 * Create a small time difference between the timer interrupts 50 * Create a small time difference between the timer interrupts
47 * on the different cpus to avoid lock contention. 51 * on the different cpus to avoid lock contention.
@@ -51,6 +55,7 @@
51#define TICK_SIZE tick 55#define TICK_SIZE tick
52 56
53static ext_int_info_t ext_int_info_cc; 57static ext_int_info_t ext_int_info_cc;
58static ext_int_info_t ext_int_etr_cc;
54static u64 init_timer_cc; 59static u64 init_timer_cc;
55static u64 jiffies_timer_cc; 60static u64 jiffies_timer_cc;
56static u64 xtime_cc; 61static u64 xtime_cc;
@@ -89,29 +94,21 @@ void tod_to_timeval(__u64 todval, struct timespec *xtime)
89#define s390_do_profile() do { ; } while(0) 94#define s390_do_profile() do { ; } while(0)
90#endif /* CONFIG_PROFILING */ 95#endif /* CONFIG_PROFILING */
91 96
92
93/* 97/*
94 * timer_interrupt() needs to keep up the real-time clock, 98 * Advance the per cpu tick counter up to the time given with the
95 * as well as call the "do_timer()" routine every clocktick 99 * "time" argument. The per cpu update consists of accounting
100 * the virtual cpu time, calling update_process_times and calling
101 * the profiling hook. If xtime is before time it is advanced as well.
96 */ 102 */
97void account_ticks(void) 103void account_ticks(u64 time)
98{ 104{
99 __u64 tmp;
100 __u32 ticks; 105 __u32 ticks;
106 __u64 tmp;
101 107
102 /* Calculate how many ticks have passed. */ 108 /* Calculate how many ticks have passed. */
103 if (S390_lowcore.int_clock < S390_lowcore.jiffy_timer) { 109 if (time < S390_lowcore.jiffy_timer)
104 /*
105 * We have to program the clock comparator even if
106 * no tick has passed. That happens if e.g. an i/o
107 * interrupt wakes up an idle processor that has
108 * switched off its hz timer.
109 */
110 tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION;
111 asm volatile ("SCKC %0" : : "m" (tmp));
112 return; 110 return;
113 } 111 tmp = time - S390_lowcore.jiffy_timer;
114 tmp = S390_lowcore.int_clock - S390_lowcore.jiffy_timer;
115 if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than two ticks ? */ 112 if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than two ticks ? */
116 ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1; 113 ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1;
117 S390_lowcore.jiffy_timer += 114 S390_lowcore.jiffy_timer +=
@@ -124,10 +121,6 @@ void account_ticks(void)
124 S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY; 121 S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY;
125 } 122 }
126 123
127 /* set clock comparator for next tick */
128 tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION;
129 asm volatile ("SCKC %0" : : "m" (tmp));
130
131#ifdef CONFIG_SMP 124#ifdef CONFIG_SMP
132 /* 125 /*
133 * Do not rely on the boot cpu to do the calls to do_timer. 126 * Do not rely on the boot cpu to do the calls to do_timer.
@@ -173,7 +166,7 @@ int sysctl_hz_timer = 1;
173 * Stop the HZ tick on the current CPU. 166 * Stop the HZ tick on the current CPU.
174 * Only cpu_idle may call this function. 167 * Only cpu_idle may call this function.
175 */ 168 */
176static inline void stop_hz_timer(void) 169static void stop_hz_timer(void)
177{ 170{
178 unsigned long flags; 171 unsigned long flags;
179 unsigned long seq, next; 172 unsigned long seq, next;
@@ -210,20 +203,21 @@ static inline void stop_hz_timer(void)
210 if (timer >= jiffies_timer_cc) 203 if (timer >= jiffies_timer_cc)
211 todval = timer; 204 todval = timer;
212 } 205 }
213 asm volatile ("SCKC %0" : : "m" (todval)); 206 set_clock_comparator(todval);
214} 207}
215 208
216/* 209/*
217 * Start the HZ tick on the current CPU. 210 * Start the HZ tick on the current CPU.
218 * Only cpu_idle may call this function. 211 * Only cpu_idle may call this function.
219 */ 212 */
220static inline void start_hz_timer(void) 213static void start_hz_timer(void)
221{ 214{
222 BUG_ON(!in_interrupt()); 215 BUG_ON(!in_interrupt());
223 216
224 if (!cpu_isset(smp_processor_id(), nohz_cpu_mask)) 217 if (!cpu_isset(smp_processor_id(), nohz_cpu_mask))
225 return; 218 return;
226 account_ticks(); 219 account_ticks(get_clock());
220 set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);
227 cpu_clear(smp_processor_id(), nohz_cpu_mask); 221 cpu_clear(smp_processor_id(), nohz_cpu_mask);
228} 222}
229 223
@@ -245,7 +239,7 @@ static struct notifier_block nohz_idle_nb = {
245 .notifier_call = nohz_idle_notify, 239 .notifier_call = nohz_idle_notify,
246}; 240};
247 241
248void __init nohz_init(void) 242static void __init nohz_init(void)
249{ 243{
250 if (register_idle_notifier(&nohz_idle_nb)) 244 if (register_idle_notifier(&nohz_idle_nb))
251 panic("Couldn't register idle notifier"); 245 panic("Couldn't register idle notifier");
@@ -254,24 +248,57 @@ void __init nohz_init(void)
254#endif 248#endif
255 249
256/* 250/*
257 * Start the clock comparator on the current CPU. 251 * Set up per cpu jiffy timer and set the clock comparator.
252 */
253static void setup_jiffy_timer(void)
254{
255 /* Set up clock comparator to next jiffy. */
256 S390_lowcore.jiffy_timer =
257 jiffies_timer_cc + (jiffies_64 + 1) * CLK_TICKS_PER_JIFFY;
258 set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);
259}
260
261/*
262 * Set up lowcore and control register of the current cpu to
263 * enable TOD clock and clock comparator interrupts.
258 */ 264 */
259void init_cpu_timer(void) 265void init_cpu_timer(void)
260{ 266{
261 unsigned long cr0; 267 setup_jiffy_timer();
262 __u64 timer;
263 268
264 timer = jiffies_timer_cc + jiffies_64 * CLK_TICKS_PER_JIFFY; 269 /* Enable clock comparator timer interrupt. */
265 S390_lowcore.jiffy_timer = timer + CLK_TICKS_PER_JIFFY; 270 __ctl_set_bit(0,11);
266 timer += CLK_TICKS_PER_JIFFY + CPU_DEVIATION; 271
267 asm volatile ("SCKC %0" : : "m" (timer)); 272 /* Always allow ETR external interrupts, even without an ETR. */
268 /* allow clock comparator timer interrupt */ 273 __ctl_set_bit(0, 4);
269 __ctl_store(cr0, 0, 0);
270 cr0 |= 0x800;
271 __ctl_load(cr0, 0, 0);
272} 274}
273 275
274extern void vtime_init(void); 276static void clock_comparator_interrupt(__u16 code)
277{
278 /* set clock comparator for next tick */
279 set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);
280}
281
282static void etr_reset(void);
283static void etr_init(void);
284static void etr_ext_handler(__u16);
285
286/*
287 * Get the TOD clock running.
288 */
289static u64 __init reset_tod_clock(void)
290{
291 u64 time;
292
293 etr_reset();
294 if (store_clock(&time) == 0)
295 return time;
296 /* TOD clock not running. Set the clock to Unix Epoch. */
297 if (set_clock(TOD_UNIX_EPOCH) != 0 || store_clock(&time) != 0)
298 panic("TOD clock not operational.");
299
300 return TOD_UNIX_EPOCH;
301}
275 302
276static cycle_t read_tod_clock(void) 303static cycle_t read_tod_clock(void)
277{ 304{
@@ -295,48 +322,31 @@ static struct clocksource clocksource_tod = {
295 */ 322 */
296void __init time_init(void) 323void __init time_init(void)
297{ 324{
298 __u64 set_time_cc; 325 init_timer_cc = reset_tod_clock();
299 int cc; 326 xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY;
300
301 /* kick the TOD clock */
302 asm volatile(
303 " stck 0(%2)\n"
304 " ipm %0\n"
305 " srl %0,28"
306 : "=d" (cc), "=m" (init_timer_cc)
307 : "a" (&init_timer_cc) : "cc");
308 switch (cc) {
309 case 0: /* clock in set state: all is fine */
310 break;
311 case 1: /* clock in non-set state: FIXME */
312 printk("time_init: TOD clock in non-set state\n");
313 break;
314 case 2: /* clock in error state: FIXME */
315 printk("time_init: TOD clock in error state\n");
316 break;
317 case 3: /* clock in stopped or not-operational state: FIXME */
318 printk("time_init: TOD clock stopped/non-operational\n");
319 break;
320 }
321 jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY; 327 jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY;
322 328
323 /* set xtime */ 329 /* set xtime */
324 xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY; 330 tod_to_timeval(init_timer_cc - TOD_UNIX_EPOCH, &xtime);
325 set_time_cc = init_timer_cc - 0x8126d60e46000000LL +
326 (0x3c26700LL*1000000*4096);
327 tod_to_timeval(set_time_cc, &xtime);
328 set_normalized_timespec(&wall_to_monotonic, 331 set_normalized_timespec(&wall_to_monotonic,
329 -xtime.tv_sec, -xtime.tv_nsec); 332 -xtime.tv_sec, -xtime.tv_nsec);
330 333
331 /* request the clock comparator external interrupt */ 334 /* request the clock comparator external interrupt */
332 if (register_early_external_interrupt(0x1004, NULL, 335 if (register_early_external_interrupt(0x1004,
336 clock_comparator_interrupt,
333 &ext_int_info_cc) != 0) 337 &ext_int_info_cc) != 0)
334 panic("Couldn't request external interrupt 0x1004"); 338 panic("Couldn't request external interrupt 0x1004");
335 339
336 if (clocksource_register(&clocksource_tod) != 0) 340 if (clocksource_register(&clocksource_tod) != 0)
337 panic("Could not register TOD clock source"); 341 panic("Could not register TOD clock source");
338 342
339 init_cpu_timer(); 343 /* request the etr external interrupt */
344 if (register_early_external_interrupt(0x1406, etr_ext_handler,
345 &ext_int_etr_cc) != 0)
346 panic("Couldn't request external interrupt 0x1406");
347
348 /* Enable TOD clock interrupts on the boot cpu. */
349 init_cpu_timer();
340 350
341#ifdef CONFIG_NO_IDLE_HZ 351#ifdef CONFIG_NO_IDLE_HZ
342 nohz_init(); 352 nohz_init();
@@ -345,5 +355,1048 @@ void __init time_init(void)
345#ifdef CONFIG_VIRT_TIMER 355#ifdef CONFIG_VIRT_TIMER
346 vtime_init(); 356 vtime_init();
347#endif 357#endif
358 etr_init();
359}
360
361/*
362 * External Time Reference (ETR) code.
363 */
364static int etr_port0_online;
365static int etr_port1_online;
366
367static int __init early_parse_etr(char *p)
368{
369 if (strncmp(p, "off", 3) == 0)
370 etr_port0_online = etr_port1_online = 0;
371 else if (strncmp(p, "port0", 5) == 0)
372 etr_port0_online = 1;
373 else if (strncmp(p, "port1", 5) == 0)
374 etr_port1_online = 1;
375 else if (strncmp(p, "on", 2) == 0)
376 etr_port0_online = etr_port1_online = 1;
377 return 0;
378}
379early_param("etr", early_parse_etr);
380
381enum etr_event {
382 ETR_EVENT_PORT0_CHANGE,
383 ETR_EVENT_PORT1_CHANGE,
384 ETR_EVENT_PORT_ALERT,
385 ETR_EVENT_SYNC_CHECK,
386 ETR_EVENT_SWITCH_LOCAL,
387 ETR_EVENT_UPDATE,
388};
389
390enum etr_flags {
391 ETR_FLAG_ENOSYS,
392 ETR_FLAG_EACCES,
393 ETR_FLAG_STEAI,
394};
395
396/*
397 * Valid bit combinations of the eacr register are (x = don't care):
398 * e0 e1 dp p0 p1 ea es sl
399 * 0 0 x 0 0 0 0 0 initial, disabled state
400 * 0 0 x 0 1 1 0 0 port 1 online
401 * 0 0 x 1 0 1 0 0 port 0 online
402 * 0 0 x 1 1 1 0 0 both ports online
403 * 0 1 x 0 1 1 0 0 port 1 online and usable, ETR or PPS mode
404 * 0 1 x 0 1 1 0 1 port 1 online, usable and ETR mode
405 * 0 1 x 0 1 1 1 0 port 1 online, usable, PPS mode, in-sync
406 * 0 1 x 0 1 1 1 1 port 1 online, usable, ETR mode, in-sync
407 * 0 1 x 1 1 1 0 0 both ports online, port 1 usable
408 * 0 1 x 1 1 1 1 0 both ports online, port 1 usable, PPS mode, in-sync
409 * 0 1 x 1 1 1 1 1 both ports online, port 1 usable, ETR mode, in-sync
410 * 1 0 x 1 0 1 0 0 port 0 online and usable, ETR or PPS mode
411 * 1 0 x 1 0 1 0 1 port 0 online, usable and ETR mode
412 * 1 0 x 1 0 1 1 0 port 0 online, usable, PPS mode, in-sync
413 * 1 0 x 1 0 1 1 1 port 0 online, usable, ETR mode, in-sync
414 * 1 0 x 1 1 1 0 0 both ports online, port 0 usable
415 * 1 0 x 1 1 1 1 0 both ports online, port 0 usable, PPS mode, in-sync
416 * 1 0 x 1 1 1 1 1 both ports online, port 0 usable, ETR mode, in-sync
417 * 1 1 x 1 1 1 1 0 both ports online & usable, ETR, in-sync
418 * 1 1 x 1 1 1 1 1 both ports online & usable, ETR, in-sync
419 */
420static struct etr_eacr etr_eacr;
421static u64 etr_tolec; /* time of last eacr update */
422static unsigned long etr_flags;
423static struct etr_aib etr_port0;
424static int etr_port0_uptodate;
425static struct etr_aib etr_port1;
426static int etr_port1_uptodate;
427static unsigned long etr_events;
428static struct timer_list etr_timer;
429static struct tasklet_struct etr_tasklet;
430static DEFINE_PER_CPU(atomic_t, etr_sync_word);
431
432static void etr_timeout(unsigned long dummy);
433static void etr_tasklet_fn(unsigned long dummy);
434
435/*
436 * The etr get_clock function. It will write the current clock value
437 * to the clock pointer and return 0 if the clock is in sync with the
438 * external time source. If the clock mode is local it will return
439 * -ENOSYS and -EAGAIN if the clock is not in sync with the external
440 * reference. This function is what ETR is all about..
441 */
442int get_sync_clock(unsigned long long *clock)
443{
444 atomic_t *sw_ptr;
445 unsigned int sw0, sw1;
446
447 sw_ptr = &get_cpu_var(etr_sync_word);
448 sw0 = atomic_read(sw_ptr);
449 *clock = get_clock();
450 sw1 = atomic_read(sw_ptr);
451 put_cpu_var(etr_sync_sync);
452 if (sw0 == sw1 && (sw0 & 0x80000000U))
453 /* Success: time is in sync. */
454 return 0;
455 if (test_bit(ETR_FLAG_ENOSYS, &etr_flags))
456 return -ENOSYS;
457 if (test_bit(ETR_FLAG_EACCES, &etr_flags))
458 return -EACCES;
459 return -EAGAIN;
460}
461EXPORT_SYMBOL(get_sync_clock);
462
463/*
464 * Make get_sync_clock return -EAGAIN.
465 */
466static void etr_disable_sync_clock(void *dummy)
467{
468 atomic_t *sw_ptr = &__get_cpu_var(etr_sync_word);
469 /*
470 * Clear the in-sync bit 2^31. All get_sync_clock calls will
471 * fail until the sync bit is turned back on. In addition
472 * increase the "sequence" counter to avoid the race of an
473 * etr event and the complete recovery against get_sync_clock.
474 */
475 atomic_clear_mask(0x80000000, sw_ptr);
476 atomic_inc(sw_ptr);
477}
478
479/*
480 * Make get_sync_clock return 0 again.
481 * Needs to be called from a context disabled for preemption.
482 */
483static void etr_enable_sync_clock(void)
484{
485 atomic_t *sw_ptr = &__get_cpu_var(etr_sync_word);
486 atomic_set_mask(0x80000000, sw_ptr);
487}
488
489/*
490 * Reset ETR attachment.
491 */
492static void etr_reset(void)
493{
494 etr_eacr = (struct etr_eacr) {
495 .e0 = 0, .e1 = 0, ._pad0 = 4, .dp = 0,
496 .p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0,
497 .es = 0, .sl = 0 };
498 if (etr_setr(&etr_eacr) == 0)
499 etr_tolec = get_clock();
500 else {
501 set_bit(ETR_FLAG_ENOSYS, &etr_flags);
502 if (etr_port0_online || etr_port1_online) {
503 printk(KERN_WARNING "Running on non ETR capable "
504 "machine, only local mode available.\n");
505 etr_port0_online = etr_port1_online = 0;
506 }
507 }
508}
509
510static void etr_init(void)
511{
512 struct etr_aib aib;
513
514 if (test_bit(ETR_FLAG_ENOSYS, &etr_flags))
515 return;
516 /* Check if this machine has the steai instruction. */
517 if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0)
518 set_bit(ETR_FLAG_STEAI, &etr_flags);
519 setup_timer(&etr_timer, etr_timeout, 0UL);
520 tasklet_init(&etr_tasklet, etr_tasklet_fn, 0);
521 if (!etr_port0_online && !etr_port1_online)
522 set_bit(ETR_FLAG_EACCES, &etr_flags);
523 if (etr_port0_online) {
524 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
525 tasklet_hi_schedule(&etr_tasklet);
526 }
527 if (etr_port1_online) {
528 set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
529 tasklet_hi_schedule(&etr_tasklet);
530 }
531}
532
533/*
534 * Two sorts of ETR machine checks. The architecture reads:
535 * "When a machine-check niterruption occurs and if a switch-to-local or
536 * ETR-sync-check interrupt request is pending but disabled, this pending
537 * disabled interruption request is indicated and is cleared".
538 * Which means that we can get etr_switch_to_local events from the machine
539 * check handler although the interruption condition is disabled. Lovely..
540 */
541
542/*
543 * Switch to local machine check. This is called when the last usable
544 * ETR port goes inactive. After switch to local the clock is not in sync.
545 */
546void etr_switch_to_local(void)
547{
548 if (!etr_eacr.sl)
549 return;
550 etr_disable_sync_clock(NULL);
551 set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events);
552 tasklet_hi_schedule(&etr_tasklet);
553}
554
555/*
556 * ETR sync check machine check. This is called when the ETR OTE and the
557 * local clock OTE are farther apart than the ETR sync check tolerance.
558 * After a ETR sync check the clock is not in sync. The machine check
559 * is broadcasted to all cpus at the same time.
560 */
561void etr_sync_check(void)
562{
563 if (!etr_eacr.es)
564 return;
565 etr_disable_sync_clock(NULL);
566 set_bit(ETR_EVENT_SYNC_CHECK, &etr_events);
567 tasklet_hi_schedule(&etr_tasklet);
568}
569
570/*
571 * ETR external interrupt. There are two causes:
572 * 1) port state change, check the usability of the port
573 * 2) port alert, one of the ETR-data-validity bits (v1-v2 bits of the
574 * sldr-status word) or ETR-data word 1 (edf1) or ETR-data word 3 (edf3)
575 * or ETR-data word 4 (edf4) has changed.
576 */
577static void etr_ext_handler(__u16 code)
578{
579 struct etr_interruption_parameter *intparm =
580 (struct etr_interruption_parameter *) &S390_lowcore.ext_params;
581
582 if (intparm->pc0)
583 /* ETR port 0 state change. */
584 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
585 if (intparm->pc1)
586 /* ETR port 1 state change. */
587 set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
588 if (intparm->eai)
589 /*
590 * ETR port alert on either port 0, 1 or both.
591 * Both ports are not up-to-date now.
592 */
593 set_bit(ETR_EVENT_PORT_ALERT, &etr_events);
594 tasklet_hi_schedule(&etr_tasklet);
595}
596
597static void etr_timeout(unsigned long dummy)
598{
599 set_bit(ETR_EVENT_UPDATE, &etr_events);
600 tasklet_hi_schedule(&etr_tasklet);
601}
602
603/*
604 * Check if the etr mode is pss.
605 */
606static inline int etr_mode_is_pps(struct etr_eacr eacr)
607{
608 return eacr.es && !eacr.sl;
609}
610
611/*
612 * Check if the etr mode is etr.
613 */
614static inline int etr_mode_is_etr(struct etr_eacr eacr)
615{
616 return eacr.es && eacr.sl;
617}
618
619/*
620 * Check if the port can be used for TOD synchronization.
621 * For PPS mode the port has to receive OTEs. For ETR mode
622 * the port has to receive OTEs, the ETR stepping bit has to
623 * be zero and the validity bits for data frame 1, 2, and 3
624 * have to be 1.
625 */
626static int etr_port_valid(struct etr_aib *aib, int port)
627{
628 unsigned int psc;
629
630 /* Check that this port is receiving OTEs. */
631 if (aib->tsp == 0)
632 return 0;
633
634 psc = port ? aib->esw.psc1 : aib->esw.psc0;
635 if (psc == etr_lpsc_pps_mode)
636 return 1;
637 if (psc == etr_lpsc_operational_step)
638 return !aib->esw.y && aib->slsw.v1 &&
639 aib->slsw.v2 && aib->slsw.v3;
640 return 0;
641}
642
643/*
644 * Check if two ports are on the same network.
645 */
646static int etr_compare_network(struct etr_aib *aib1, struct etr_aib *aib2)
647{
648 // FIXME: any other fields we have to compare?
649 return aib1->edf1.net_id == aib2->edf1.net_id;
650}
651
652/*
653 * Wrapper for etr_stei that converts physical port states
654 * to logical port states to be consistent with the output
655 * of stetr (see etr_psc vs. etr_lpsc).
656 */
657static void etr_steai_cv(struct etr_aib *aib, unsigned int func)
658{
659 BUG_ON(etr_steai(aib, func) != 0);
660 /* Convert port state to logical port state. */
661 if (aib->esw.psc0 == 1)
662 aib->esw.psc0 = 2;
663 else if (aib->esw.psc0 == 0 && aib->esw.p == 0)
664 aib->esw.psc0 = 1;
665 if (aib->esw.psc1 == 1)
666 aib->esw.psc1 = 2;
667 else if (aib->esw.psc1 == 0 && aib->esw.p == 1)
668 aib->esw.psc1 = 1;
669}
670
671/*
672 * Check if the aib a2 is still connected to the same attachment as
673 * aib a1, the etv values differ by one and a2 is valid.
674 */
675static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p)
676{
677 int state_a1, state_a2;
678
679 /* Paranoia check: e0/e1 should better be the same. */
680 if (a1->esw.eacr.e0 != a2->esw.eacr.e0 ||
681 a1->esw.eacr.e1 != a2->esw.eacr.e1)
682 return 0;
683
684 /* Still connected to the same etr ? */
685 state_a1 = p ? a1->esw.psc1 : a1->esw.psc0;
686 state_a2 = p ? a2->esw.psc1 : a2->esw.psc0;
687 if (state_a1 == etr_lpsc_operational_step) {
688 if (state_a2 != etr_lpsc_operational_step ||
689 a1->edf1.net_id != a2->edf1.net_id ||
690 a1->edf1.etr_id != a2->edf1.etr_id ||
691 a1->edf1.etr_pn != a2->edf1.etr_pn)
692 return 0;
693 } else if (state_a2 != etr_lpsc_pps_mode)
694 return 0;
695
696 /* The ETV value of a2 needs to be ETV of a1 + 1. */
697 if (a1->edf2.etv + 1 != a2->edf2.etv)
698 return 0;
699
700 if (!etr_port_valid(a2, p))
701 return 0;
702
703 return 1;
704}
705
706/*
707 * The time is "clock". xtime is what we think the time is.
708 * Adjust the value by a multiple of jiffies and add the delta to ntp.
709 * "delay" is an approximation how long the synchronization took. If
710 * the time correction is positive, then "delay" is subtracted from
711 * the time difference and only the remaining part is passed to ntp.
712 */
713static void etr_adjust_time(unsigned long long clock, unsigned long long delay)
714{
715 unsigned long long delta, ticks;
716 struct timex adjust;
717
718 /*
719 * We don't have to take the xtime lock because the cpu
720 * executing etr_adjust_time is running disabled in
721 * tasklet context and all other cpus are looping in
722 * etr_sync_cpu_start.
723 */
724 if (clock > xtime_cc) {
725 /* It is later than we thought. */
726 delta = ticks = clock - xtime_cc;
727 delta = ticks = (delta < delay) ? 0 : delta - delay;
728 delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
729 init_timer_cc = init_timer_cc + delta;
730 jiffies_timer_cc = jiffies_timer_cc + delta;
731 xtime_cc = xtime_cc + delta;
732 adjust.offset = ticks * (1000000 / HZ);
733 } else {
734 /* It is earlier than we thought. */
735 delta = ticks = xtime_cc - clock;
736 delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
737 init_timer_cc = init_timer_cc - delta;
738 jiffies_timer_cc = jiffies_timer_cc - delta;
739 xtime_cc = xtime_cc - delta;
740 adjust.offset = -ticks * (1000000 / HZ);
741 }
742 if (adjust.offset != 0) {
743 printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n",
744 adjust.offset);
745 adjust.modes = ADJ_OFFSET_SINGLESHOT;
746 do_adjtimex(&adjust);
747 }
748}
749
750static void etr_sync_cpu_start(void *dummy)
751{
752 int *in_sync = dummy;
753
754 etr_enable_sync_clock();
755 /*
756 * This looks like a busy wait loop but it isn't. etr_sync_cpus
757 * is called on all other cpus while the TOD clocks is stopped.
758 * __udelay will stop the cpu on an enabled wait psw until the
759 * TOD is running again.
760 */
761 while (*in_sync == 0)
762 __udelay(1);
763 if (*in_sync != 1)
764 /* Didn't work. Clear per-cpu in sync bit again. */
765 etr_disable_sync_clock(NULL);
766 /*
767 * This round of TOD syncing is done. Set the clock comparator
768 * to the next tick and let the processor continue.
769 */
770 setup_jiffy_timer();
771}
772
773static void etr_sync_cpu_end(void *dummy)
774{
775}
776
777/*
778 * Sync the TOD clock using the port refered to by aibp. This port
779 * has to be enabled and the other port has to be disabled. The
780 * last eacr update has to be more than 1.6 seconds in the past.
781 */
782static int etr_sync_clock(struct etr_aib *aib, int port)
783{
784 struct etr_aib *sync_port;
785 unsigned long long clock, delay;
786 int in_sync, follows;
787 int rc;
788
789 /* Check if the current aib is adjacent to the sync port aib. */
790 sync_port = (port == 0) ? &etr_port0 : &etr_port1;
791 follows = etr_aib_follows(sync_port, aib, port);
792 memcpy(sync_port, aib, sizeof(*aib));
793 if (!follows)
794 return -EAGAIN;
795
796 /*
797 * Catch all other cpus and make them wait until we have
798 * successfully synced the clock. smp_call_function will
799 * return after all other cpus are in etr_sync_cpu_start.
800 */
801 in_sync = 0;
802 preempt_disable();
803 smp_call_function(etr_sync_cpu_start,&in_sync,0,0);
804 local_irq_disable();
805 etr_enable_sync_clock();
806
807 /* Set clock to next OTE. */
808 __ctl_set_bit(14, 21);
809 __ctl_set_bit(0, 29);
810 clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32;
811 if (set_clock(clock) == 0) {
812 __udelay(1); /* Wait for the clock to start. */
813 __ctl_clear_bit(0, 29);
814 __ctl_clear_bit(14, 21);
815 etr_stetr(aib);
816 /* Adjust Linux timing variables. */
817 delay = (unsigned long long)
818 (aib->edf2.etv - sync_port->edf2.etv) << 32;
819 etr_adjust_time(clock, delay);
820 setup_jiffy_timer();
821 /* Verify that the clock is properly set. */
822 if (!etr_aib_follows(sync_port, aib, port)) {
823 /* Didn't work. */
824 etr_disable_sync_clock(NULL);
825 in_sync = -EAGAIN;
826 rc = -EAGAIN;
827 } else {
828 in_sync = 1;
829 rc = 0;
830 }
831 } else {
832 /* Could not set the clock ?!? */
833 __ctl_clear_bit(0, 29);
834 __ctl_clear_bit(14, 21);
835 etr_disable_sync_clock(NULL);
836 in_sync = -EAGAIN;
837 rc = -EAGAIN;
838 }
839 local_irq_enable();
840 smp_call_function(etr_sync_cpu_end,NULL,0,0);
841 preempt_enable();
842 return rc;
843}
844
845/*
846 * Handle the immediate effects of the different events.
847 * The port change event is used for online/offline changes.
848 */
849static struct etr_eacr etr_handle_events(struct etr_eacr eacr)
850{
851 if (test_and_clear_bit(ETR_EVENT_SYNC_CHECK, &etr_events))
852 eacr.es = 0;
853 if (test_and_clear_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events))
854 eacr.es = eacr.sl = 0;
855 if (test_and_clear_bit(ETR_EVENT_PORT_ALERT, &etr_events))
856 etr_port0_uptodate = etr_port1_uptodate = 0;
857
858 if (test_and_clear_bit(ETR_EVENT_PORT0_CHANGE, &etr_events)) {
859 if (eacr.e0)
860 /*
861 * Port change of an enabled port. We have to
862 * assume that this can have caused an stepping
863 * port switch.
864 */
865 etr_tolec = get_clock();
866 eacr.p0 = etr_port0_online;
867 if (!eacr.p0)
868 eacr.e0 = 0;
869 etr_port0_uptodate = 0;
870 }
871 if (test_and_clear_bit(ETR_EVENT_PORT1_CHANGE, &etr_events)) {
872 if (eacr.e1)
873 /*
874 * Port change of an enabled port. We have to
875 * assume that this can have caused an stepping
876 * port switch.
877 */
878 etr_tolec = get_clock();
879 eacr.p1 = etr_port1_online;
880 if (!eacr.p1)
881 eacr.e1 = 0;
882 etr_port1_uptodate = 0;
883 }
884 clear_bit(ETR_EVENT_UPDATE, &etr_events);
885 return eacr;
886}
887
888/*
889 * Set up a timer that expires after the etr_tolec + 1.6 seconds if
890 * one of the ports needs an update.
891 */
892static void etr_set_tolec_timeout(unsigned long long now)
893{
894 unsigned long micros;
895
896 if ((!etr_eacr.p0 || etr_port0_uptodate) &&
897 (!etr_eacr.p1 || etr_port1_uptodate))
898 return;
899 micros = (now > etr_tolec) ? ((now - etr_tolec) >> 12) : 0;
900 micros = (micros > 1600000) ? 0 : 1600000 - micros;
901 mod_timer(&etr_timer, jiffies + (micros * HZ) / 1000000 + 1);
902}
903
904/*
905 * Set up a time that expires after 1/2 second.
906 */
907static void etr_set_sync_timeout(void)
908{
909 mod_timer(&etr_timer, jiffies + HZ/2);
910}
911
912/*
913 * Update the aib information for one or both ports.
914 */
915static struct etr_eacr etr_handle_update(struct etr_aib *aib,
916 struct etr_eacr eacr)
917{
918 /* With both ports disabled the aib information is useless. */
919 if (!eacr.e0 && !eacr.e1)
920 return eacr;
921
922 /* Update port0 or port1 with aib stored in etr_tasklet_fn. */
923 if (aib->esw.q == 0) {
924 /* Information for port 0 stored. */
925 if (eacr.p0 && !etr_port0_uptodate) {
926 etr_port0 = *aib;
927 if (etr_port0_online)
928 etr_port0_uptodate = 1;
929 }
930 } else {
931 /* Information for port 1 stored. */
932 if (eacr.p1 && !etr_port1_uptodate) {
933 etr_port1 = *aib;
934 if (etr_port0_online)
935 etr_port1_uptodate = 1;
936 }
937 }
938
939 /*
940 * Do not try to get the alternate port aib if the clock
941 * is not in sync yet.
942 */
943 if (!eacr.es)
944 return eacr;
945
946 /*
947 * If steai is available we can get the information about
948 * the other port immediately. If only stetr is available the
949 * data-port bit toggle has to be used.
950 */
951 if (test_bit(ETR_FLAG_STEAI, &etr_flags)) {
952 if (eacr.p0 && !etr_port0_uptodate) {
953 etr_steai_cv(&etr_port0, ETR_STEAI_PORT_0);
954 etr_port0_uptodate = 1;
955 }
956 if (eacr.p1 && !etr_port1_uptodate) {
957 etr_steai_cv(&etr_port1, ETR_STEAI_PORT_1);
958 etr_port1_uptodate = 1;
959 }
960 } else {
961 /*
962 * One port was updated above, if the other
963 * port is not uptodate toggle dp bit.
964 */
965 if ((eacr.p0 && !etr_port0_uptodate) ||
966 (eacr.p1 && !etr_port1_uptodate))
967 eacr.dp ^= 1;
968 else
969 eacr.dp = 0;
970 }
971 return eacr;
972}
973
974/*
975 * Write new etr control register if it differs from the current one.
976 * Return 1 if etr_tolec has been updated as well.
977 */
978static void etr_update_eacr(struct etr_eacr eacr)
979{
980 int dp_changed;
981
982 if (memcmp(&etr_eacr, &eacr, sizeof(eacr)) == 0)
983 /* No change, return. */
984 return;
985 /*
986 * The disable of an active port of the change of the data port
987 * bit can/will cause a change in the data port.
988 */
989 dp_changed = etr_eacr.e0 > eacr.e0 || etr_eacr.e1 > eacr.e1 ||
990 (etr_eacr.dp ^ eacr.dp) != 0;
991 etr_eacr = eacr;
992 etr_setr(&etr_eacr);
993 if (dp_changed)
994 etr_tolec = get_clock();
995}
996
997/*
998 * ETR tasklet. In this function you'll find the main logic. In
999 * particular this is the only function that calls etr_update_eacr(),
1000 * it "controls" the etr control register.
1001 */
1002static void etr_tasklet_fn(unsigned long dummy)
1003{
1004 unsigned long long now;
1005 struct etr_eacr eacr;
1006 struct etr_aib aib;
1007 int sync_port;
1008
1009 /* Create working copy of etr_eacr. */
1010 eacr = etr_eacr;
1011
1012 /* Check for the different events and their immediate effects. */
1013 eacr = etr_handle_events(eacr);
1014
1015 /* Check if ETR is supposed to be active. */
1016 eacr.ea = eacr.p0 || eacr.p1;
1017 if (!eacr.ea) {
1018 /* Both ports offline. Reset everything. */
1019 eacr.dp = eacr.es = eacr.sl = 0;
1020 on_each_cpu(etr_disable_sync_clock, NULL, 0, 1);
1021 del_timer_sync(&etr_timer);
1022 etr_update_eacr(eacr);
1023 set_bit(ETR_FLAG_EACCES, &etr_flags);
1024 return;
1025 }
1026
1027 /* Store aib to get the current ETR status word. */
1028 BUG_ON(etr_stetr(&aib) != 0);
1029 etr_port0.esw = etr_port1.esw = aib.esw; /* Copy status word. */
1030 now = get_clock();
1031
1032 /*
1033 * Update the port information if the last stepping port change
1034 * or data port change is older than 1.6 seconds.
1035 */
1036 if (now >= etr_tolec + (1600000 << 12))
1037 eacr = etr_handle_update(&aib, eacr);
1038
1039 /*
1040 * Select ports to enable. The prefered synchronization mode is PPS.
1041 * If a port can be enabled depends on a number of things:
1042 * 1) The port needs to be online and uptodate. A port is not
1043 * disabled just because it is not uptodate, but it is only
1044 * enabled if it is uptodate.
1045 * 2) The port needs to have the same mode (pps / etr).
1046 * 3) The port needs to be usable -> etr_port_valid() == 1
1047 * 4) To enable the second port the clock needs to be in sync.
1048 * 5) If both ports are useable and are ETR ports, the network id
1049 * has to be the same.
1050 * The eacr.sl bit is used to indicate etr mode vs. pps mode.
1051 */
1052 if (eacr.p0 && aib.esw.psc0 == etr_lpsc_pps_mode) {
1053 eacr.sl = 0;
1054 eacr.e0 = 1;
1055 if (!etr_mode_is_pps(etr_eacr))
1056 eacr.es = 0;
1057 if (!eacr.es || !eacr.p1 || aib.esw.psc1 != etr_lpsc_pps_mode)
1058 eacr.e1 = 0;
1059 // FIXME: uptodate checks ?
1060 else if (etr_port0_uptodate && etr_port1_uptodate)
1061 eacr.e1 = 1;
1062 sync_port = (etr_port0_uptodate &&
1063 etr_port_valid(&etr_port0, 0)) ? 0 : -1;
1064 clear_bit(ETR_FLAG_EACCES, &etr_flags);
1065 } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_pps_mode) {
1066 eacr.sl = 0;
1067 eacr.e0 = 0;
1068 eacr.e1 = 1;
1069 if (!etr_mode_is_pps(etr_eacr))
1070 eacr.es = 0;
1071 sync_port = (etr_port1_uptodate &&
1072 etr_port_valid(&etr_port1, 1)) ? 1 : -1;
1073 clear_bit(ETR_FLAG_EACCES, &etr_flags);
1074 } else if (eacr.p0 && aib.esw.psc0 == etr_lpsc_operational_step) {
1075 eacr.sl = 1;
1076 eacr.e0 = 1;
1077 if (!etr_mode_is_etr(etr_eacr))
1078 eacr.es = 0;
1079 if (!eacr.es || !eacr.p1 ||
1080 aib.esw.psc1 != etr_lpsc_operational_alt)
1081 eacr.e1 = 0;
1082 else if (etr_port0_uptodate && etr_port1_uptodate &&
1083 etr_compare_network(&etr_port0, &etr_port1))
1084 eacr.e1 = 1;
1085 sync_port = (etr_port0_uptodate &&
1086 etr_port_valid(&etr_port0, 0)) ? 0 : -1;
1087 clear_bit(ETR_FLAG_EACCES, &etr_flags);
1088 } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_operational_step) {
1089 eacr.sl = 1;
1090 eacr.e0 = 0;
1091 eacr.e1 = 1;
1092 if (!etr_mode_is_etr(etr_eacr))
1093 eacr.es = 0;
1094 sync_port = (etr_port1_uptodate &&
1095 etr_port_valid(&etr_port1, 1)) ? 1 : -1;
1096 clear_bit(ETR_FLAG_EACCES, &etr_flags);
1097 } else {
1098 /* Both ports not usable. */
1099 eacr.es = eacr.sl = 0;
1100 sync_port = -1;
1101 set_bit(ETR_FLAG_EACCES, &etr_flags);
1102 }
1103
1104 /*
1105 * If the clock is in sync just update the eacr and return.
1106 * If there is no valid sync port wait for a port update.
1107 */
1108 if (eacr.es || sync_port < 0) {
1109 etr_update_eacr(eacr);
1110 etr_set_tolec_timeout(now);
1111 return;
1112 }
1113
1114 /*
1115 * Prepare control register for clock syncing
1116 * (reset data port bit, set sync check control.
1117 */
1118 eacr.dp = 0;
1119 eacr.es = 1;
1120
1121 /*
1122 * Update eacr and try to synchronize the clock. If the update
1123 * of eacr caused a stepping port switch (or if we have to
1124 * assume that a stepping port switch has occured) or the
1125 * clock syncing failed, reset the sync check control bit
1126 * and set up a timer to try again after 0.5 seconds
1127 */
1128 etr_update_eacr(eacr);
1129 if (now < etr_tolec + (1600000 << 12) ||
1130 etr_sync_clock(&aib, sync_port) != 0) {
1131 /* Sync failed. Try again in 1/2 second. */
1132 eacr.es = 0;
1133 etr_update_eacr(eacr);
1134 etr_set_sync_timeout();
1135 } else
1136 etr_set_tolec_timeout(now);
1137}
1138
1139/*
1140 * Sysfs interface functions
1141 */
1142static struct sysdev_class etr_sysclass = {
1143 set_kset_name("etr")
1144};
1145
1146static struct sys_device etr_port0_dev = {
1147 .id = 0,
1148 .cls = &etr_sysclass,
1149};
1150
1151static struct sys_device etr_port1_dev = {
1152 .id = 1,
1153 .cls = &etr_sysclass,
1154};
1155
1156/*
1157 * ETR class attributes
1158 */
1159static ssize_t etr_stepping_port_show(struct sysdev_class *class, char *buf)
1160{
1161 return sprintf(buf, "%i\n", etr_port0.esw.p);
1162}
1163
1164static SYSDEV_CLASS_ATTR(stepping_port, 0400, etr_stepping_port_show, NULL);
1165
1166static ssize_t etr_stepping_mode_show(struct sysdev_class *class, char *buf)
1167{
1168 char *mode_str;
1169
1170 if (etr_mode_is_pps(etr_eacr))
1171 mode_str = "pps";
1172 else if (etr_mode_is_etr(etr_eacr))
1173 mode_str = "etr";
1174 else
1175 mode_str = "local";
1176 return sprintf(buf, "%s\n", mode_str);
1177}
1178
1179static SYSDEV_CLASS_ATTR(stepping_mode, 0400, etr_stepping_mode_show, NULL);
1180
1181/*
1182 * ETR port attributes
1183 */
1184static inline struct etr_aib *etr_aib_from_dev(struct sys_device *dev)
1185{
1186 if (dev == &etr_port0_dev)
1187 return etr_port0_online ? &etr_port0 : NULL;
1188 else
1189 return etr_port1_online ? &etr_port1 : NULL;
1190}
1191
1192static ssize_t etr_online_show(struct sys_device *dev, char *buf)
1193{
1194 unsigned int online;
1195
1196 online = (dev == &etr_port0_dev) ? etr_port0_online : etr_port1_online;
1197 return sprintf(buf, "%i\n", online);
1198}
1199
1200static ssize_t etr_online_store(struct sys_device *dev,
1201 const char *buf, size_t count)
1202{
1203 unsigned int value;
1204
1205 value = simple_strtoul(buf, NULL, 0);
1206 if (value != 0 && value != 1)
1207 return -EINVAL;
1208 if (test_bit(ETR_FLAG_ENOSYS, &etr_flags))
1209 return -ENOSYS;
1210 if (dev == &etr_port0_dev) {
1211 if (etr_port0_online == value)
1212 return count; /* Nothing to do. */
1213 etr_port0_online = value;
1214 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
1215 tasklet_hi_schedule(&etr_tasklet);
1216 } else {
1217 if (etr_port1_online == value)
1218 return count; /* Nothing to do. */
1219 etr_port1_online = value;
1220 set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
1221 tasklet_hi_schedule(&etr_tasklet);
1222 }
1223 return count;
1224}
1225
1226static SYSDEV_ATTR(online, 0600, etr_online_show, etr_online_store);
1227
1228static ssize_t etr_stepping_control_show(struct sys_device *dev, char *buf)
1229{
1230 return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ?
1231 etr_eacr.e0 : etr_eacr.e1);
1232}
1233
1234static SYSDEV_ATTR(stepping_control, 0400, etr_stepping_control_show, NULL);
1235
1236static ssize_t etr_mode_code_show(struct sys_device *dev, char *buf)
1237{
1238 if (!etr_port0_online && !etr_port1_online)
1239 /* Status word is not uptodate if both ports are offline. */
1240 return -ENODATA;
1241 return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ?
1242 etr_port0.esw.psc0 : etr_port0.esw.psc1);
1243}
1244
1245static SYSDEV_ATTR(state_code, 0400, etr_mode_code_show, NULL);
1246
1247static ssize_t etr_untuned_show(struct sys_device *dev, char *buf)
1248{
1249 struct etr_aib *aib = etr_aib_from_dev(dev);
1250
1251 if (!aib || !aib->slsw.v1)
1252 return -ENODATA;
1253 return sprintf(buf, "%i\n", aib->edf1.u);
1254}
1255
1256static SYSDEV_ATTR(untuned, 0400, etr_untuned_show, NULL);
1257
1258static ssize_t etr_network_id_show(struct sys_device *dev, char *buf)
1259{
1260 struct etr_aib *aib = etr_aib_from_dev(dev);
1261
1262 if (!aib || !aib->slsw.v1)
1263 return -ENODATA;
1264 return sprintf(buf, "%i\n", aib->edf1.net_id);
1265}
1266
1267static SYSDEV_ATTR(network, 0400, etr_network_id_show, NULL);
1268
1269static ssize_t etr_id_show(struct sys_device *dev, char *buf)
1270{
1271 struct etr_aib *aib = etr_aib_from_dev(dev);
1272
1273 if (!aib || !aib->slsw.v1)
1274 return -ENODATA;
1275 return sprintf(buf, "%i\n", aib->edf1.etr_id);
1276}
1277
1278static SYSDEV_ATTR(id, 0400, etr_id_show, NULL);
1279
1280static ssize_t etr_port_number_show(struct sys_device *dev, char *buf)
1281{
1282 struct etr_aib *aib = etr_aib_from_dev(dev);
1283
1284 if (!aib || !aib->slsw.v1)
1285 return -ENODATA;
1286 return sprintf(buf, "%i\n", aib->edf1.etr_pn);
1287}
1288
1289static SYSDEV_ATTR(port, 0400, etr_port_number_show, NULL);
1290
1291static ssize_t etr_coupled_show(struct sys_device *dev, char *buf)
1292{
1293 struct etr_aib *aib = etr_aib_from_dev(dev);
1294
1295 if (!aib || !aib->slsw.v3)
1296 return -ENODATA;
1297 return sprintf(buf, "%i\n", aib->edf3.c);
1298}
1299
1300static SYSDEV_ATTR(coupled, 0400, etr_coupled_show, NULL);
1301
1302static ssize_t etr_local_time_show(struct sys_device *dev, char *buf)
1303{
1304 struct etr_aib *aib = etr_aib_from_dev(dev);
1305
1306 if (!aib || !aib->slsw.v3)
1307 return -ENODATA;
1308 return sprintf(buf, "%i\n", aib->edf3.blto);
1309}
1310
1311static SYSDEV_ATTR(local_time, 0400, etr_local_time_show, NULL);
1312
1313static ssize_t etr_utc_offset_show(struct sys_device *dev, char *buf)
1314{
1315 struct etr_aib *aib = etr_aib_from_dev(dev);
1316
1317 if (!aib || !aib->slsw.v3)
1318 return -ENODATA;
1319 return sprintf(buf, "%i\n", aib->edf3.buo);
1320}
1321
1322static SYSDEV_ATTR(utc_offset, 0400, etr_utc_offset_show, NULL);
1323
1324static struct sysdev_attribute *etr_port_attributes[] = {
1325 &attr_online,
1326 &attr_stepping_control,
1327 &attr_state_code,
1328 &attr_untuned,
1329 &attr_network,
1330 &attr_id,
1331 &attr_port,
1332 &attr_coupled,
1333 &attr_local_time,
1334 &attr_utc_offset,
1335 NULL
1336};
1337
1338static int __init etr_register_port(struct sys_device *dev)
1339{
1340 struct sysdev_attribute **attr;
1341 int rc;
1342
1343 rc = sysdev_register(dev);
1344 if (rc)
1345 goto out;
1346 for (attr = etr_port_attributes; *attr; attr++) {
1347 rc = sysdev_create_file(dev, *attr);
1348 if (rc)
1349 goto out_unreg;
1350 }
1351 return 0;
1352out_unreg:
1353 for (; attr >= etr_port_attributes; attr--)
1354 sysdev_remove_file(dev, *attr);
1355 sysdev_unregister(dev);
1356out:
1357 return rc;
1358}
1359
1360static void __init etr_unregister_port(struct sys_device *dev)
1361{
1362 struct sysdev_attribute **attr;
1363
1364 for (attr = etr_port_attributes; *attr; attr++)
1365 sysdev_remove_file(dev, *attr);
1366 sysdev_unregister(dev);
1367}
1368
1369static int __init etr_init_sysfs(void)
1370{
1371 int rc;
1372
1373 rc = sysdev_class_register(&etr_sysclass);
1374 if (rc)
1375 goto out;
1376 rc = sysdev_class_create_file(&etr_sysclass, &attr_stepping_port);
1377 if (rc)
1378 goto out_unreg_class;
1379 rc = sysdev_class_create_file(&etr_sysclass, &attr_stepping_mode);
1380 if (rc)
1381 goto out_remove_stepping_port;
1382 rc = etr_register_port(&etr_port0_dev);
1383 if (rc)
1384 goto out_remove_stepping_mode;
1385 rc = etr_register_port(&etr_port1_dev);
1386 if (rc)
1387 goto out_remove_port0;
1388 return 0;
1389
1390out_remove_port0:
1391 etr_unregister_port(&etr_port0_dev);
1392out_remove_stepping_mode:
1393 sysdev_class_remove_file(&etr_sysclass, &attr_stepping_mode);
1394out_remove_stepping_port:
1395 sysdev_class_remove_file(&etr_sysclass, &attr_stepping_port);
1396out_unreg_class:
1397 sysdev_class_unregister(&etr_sysclass);
1398out:
1399 return rc;
348} 1400}
349 1401
1402device_initcall(etr_init_sysfs);
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 3cbb0dcf1f1d..f0e5a320e2ec 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -283,7 +283,7 @@ char *task_show_regs(struct task_struct *task, char *buffer)
283 return buffer; 283 return buffer;
284} 284}
285 285
286DEFINE_SPINLOCK(die_lock); 286static DEFINE_SPINLOCK(die_lock);
287 287
288void die(const char * str, struct pt_regs * regs, long err) 288void die(const char * str, struct pt_regs * regs, long err)
289{ 289{
@@ -364,8 +364,7 @@ void __kprobes do_single_step(struct pt_regs *regs)
364 force_sig(SIGTRAP, current); 364 force_sig(SIGTRAP, current);
365} 365}
366 366
367asmlinkage void 367static void default_trap_handler(struct pt_regs * regs, long interruption_code)
368default_trap_handler(struct pt_regs * regs, long interruption_code)
369{ 368{
370 if (regs->psw.mask & PSW_MASK_PSTATE) { 369 if (regs->psw.mask & PSW_MASK_PSTATE) {
371 local_irq_enable(); 370 local_irq_enable();
@@ -376,7 +375,7 @@ default_trap_handler(struct pt_regs * regs, long interruption_code)
376} 375}
377 376
378#define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \ 377#define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \
379asmlinkage void name(struct pt_regs * regs, long interruption_code) \ 378static void name(struct pt_regs * regs, long interruption_code) \
380{ \ 379{ \
381 siginfo_t info; \ 380 siginfo_t info; \
382 info.si_signo = signr; \ 381 info.si_signo = signr; \
@@ -442,7 +441,7 @@ do_fp_trap(struct pt_regs *regs, void __user *location,
442 "floating point exception", regs, &si); 441 "floating point exception", regs, &si);
443} 442}
444 443
445asmlinkage void illegal_op(struct pt_regs * regs, long interruption_code) 444static void illegal_op(struct pt_regs * regs, long interruption_code)
446{ 445{
447 siginfo_t info; 446 siginfo_t info;
448 __u8 opcode[6]; 447 __u8 opcode[6];
@@ -491,8 +490,15 @@ asmlinkage void illegal_op(struct pt_regs * regs, long interruption_code)
491#endif 490#endif
492 } else 491 } else
493 signal = SIGILL; 492 signal = SIGILL;
494 } else 493 } else {
495 signal = SIGILL; 494 /*
495 * If we get an illegal op in kernel mode, send it through the
496 * kprobes notifier. If kprobes doesn't pick it up, SIGILL
497 */
498 if (notify_die(DIE_BPT, "bpt", regs, interruption_code,
499 3, SIGTRAP) != NOTIFY_STOP)
500 signal = SIGILL;
501 }
496 502
497#ifdef CONFIG_MATHEMU 503#ifdef CONFIG_MATHEMU
498 if (signal == SIGFPE) 504 if (signal == SIGFPE)
@@ -585,7 +591,7 @@ DO_ERROR_INFO(SIGILL, "specification exception", specification_exception,
585 ILL_ILLOPN, get_check_address(regs)); 591 ILL_ILLOPN, get_check_address(regs));
586#endif 592#endif
587 593
588asmlinkage void data_exception(struct pt_regs * regs, long interruption_code) 594static void data_exception(struct pt_regs * regs, long interruption_code)
589{ 595{
590 __u16 __user *location; 596 __u16 __user *location;
591 int signal = 0; 597 int signal = 0;
@@ -675,7 +681,7 @@ asmlinkage void data_exception(struct pt_regs * regs, long interruption_code)
675 } 681 }
676} 682}
677 683
678asmlinkage void space_switch_exception(struct pt_regs * regs, long int_code) 684static void space_switch_exception(struct pt_regs * regs, long int_code)
679{ 685{
680 siginfo_t info; 686 siginfo_t info;
681 687
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index fe0f2e97ba7b..a48907392522 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -31,18 +31,19 @@ SECTIONS
31 31
32 _etext = .; /* End of text section */ 32 _etext = .; /* End of text section */
33 33
34 . = ALIGN(16); /* Exception table */
35 __start___ex_table = .;
36 __ex_table : { *(__ex_table) }
37 __stop___ex_table = .;
38
39 RODATA 34 RODATA
40 35
41#ifdef CONFIG_SHARED_KERNEL 36#ifdef CONFIG_SHARED_KERNEL
42 . = ALIGN(1048576); /* VM shared segments are 1MB aligned */ 37 . = ALIGN(1048576); /* VM shared segments are 1MB aligned */
38#endif
43 39
40 . = ALIGN(4096);
44 _eshared = .; /* End of shareable data */ 41 _eshared = .; /* End of shareable data */
45#endif 42
43 . = ALIGN(16); /* Exception table */
44 __start___ex_table = .;
45 __ex_table : { *(__ex_table) }
46 __stop___ex_table = .;
46 47
47 .data : { /* Data */ 48 .data : { /* Data */
48 *(.data) 49 *(.data)
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 21baaf5496d6..9d5b02801b46 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -25,7 +25,7 @@
25#include <asm/irq_regs.h> 25#include <asm/irq_regs.h>
26 26
27static ext_int_info_t ext_int_info_timer; 27static ext_int_info_t ext_int_info_timer;
28DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); 28static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
29 29
30#ifdef CONFIG_VIRT_CPU_ACCOUNTING 30#ifdef CONFIG_VIRT_CPU_ACCOUNTING
31/* 31/*
@@ -524,16 +524,15 @@ EXPORT_SYMBOL(del_virt_timer);
524void init_cpu_vtimer(void) 524void init_cpu_vtimer(void)
525{ 525{
526 struct vtimer_queue *vt_list; 526 struct vtimer_queue *vt_list;
527 unsigned long cr0;
528 527
529 /* kick the virtual timer */ 528 /* kick the virtual timer */
530 S390_lowcore.exit_timer = VTIMER_MAX_SLICE; 529 S390_lowcore.exit_timer = VTIMER_MAX_SLICE;
531 S390_lowcore.last_update_timer = VTIMER_MAX_SLICE; 530 S390_lowcore.last_update_timer = VTIMER_MAX_SLICE;
532 asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); 531 asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));
533 asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock)); 532 asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock));
534 __ctl_store(cr0, 0, 0); 533
535 cr0 |= 0x400; 534 /* enable cpu timer interrupts */
536 __ctl_load(cr0, 0, 0); 535 __ctl_set_bit(0,10);
537 536
538 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); 537 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
539 INIT_LIST_HEAD(&vt_list->list); 538 INIT_LIST_HEAD(&vt_list->list);
@@ -572,6 +571,7 @@ void __init vtime_init(void)
572 if (register_idle_notifier(&vtimer_idle_nb)) 571 if (register_idle_notifier(&vtimer_idle_nb))
573 panic("Couldn't register idle notifier"); 572 panic("Couldn't register idle notifier");
574 573
574 /* Enable cpu timer interrupts on the boot cpu. */
575 init_cpu_vtimer(); 575 init_cpu_vtimer();
576} 576}
577 577
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index b5f94cf3bde8..7a44fed21b35 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -4,7 +4,7 @@
4 4
5EXTRA_AFLAGS := -traditional 5EXTRA_AFLAGS := -traditional
6 6
7lib-y += delay.o string.o uaccess_std.o uaccess_pt.o 7lib-y += delay.o string.o uaccess_std.o uaccess_pt.o qrnnd.o
8lib-$(CONFIG_32BIT) += div64.o 8lib-$(CONFIG_32BIT) += div64.o
9lib-$(CONFIG_64BIT) += uaccess_mvcos.o 9lib-$(CONFIG_64BIT) += uaccess_mvcos.o
10lib-$(CONFIG_SMP) += spinlock.o 10lib-$(CONFIG_SMP) += spinlock.o
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 027c4742a001..02854449b74b 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * arch/s390/kernel/delay.c 2 * arch/s390/lib/delay.c
3 * Precise Delay Loops for S390 3 * Precise Delay Loops for S390
4 * 4 *
5 * S390 version 5 * S390 version
@@ -13,10 +13,8 @@
13 13
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16 16#include <linux/timex.h>
17#ifdef CONFIG_SMP 17#include <linux/irqflags.h>
18#include <asm/smp.h>
19#endif
20 18
21void __delay(unsigned long loops) 19void __delay(unsigned long loops)
22{ 20{
@@ -31,17 +29,39 @@ void __delay(unsigned long loops)
31} 29}
32 30
33/* 31/*
34 * Waits for 'usecs' microseconds using the tod clock, giving up the time slice 32 * Waits for 'usecs' microseconds using the TOD clock comparator.
35 * of the virtual PU inbetween to avoid congestion.
36 */ 33 */
37void __udelay(unsigned long usecs) 34void __udelay(unsigned long usecs)
38{ 35{
39 uint64_t start_cc; 36 u64 end, time, jiffy_timer = 0;
37 unsigned long flags, cr0, mask, dummy;
38
39 local_irq_save(flags);
40 if (raw_irqs_disabled_flags(flags)) {
41 jiffy_timer = S390_lowcore.jiffy_timer;
42 S390_lowcore.jiffy_timer = -1ULL - (4096 << 12);
43 __ctl_store(cr0, 0, 0);
44 dummy = (cr0 & 0xffff00e0) | 0x00000800;
45 __ctl_load(dummy , 0, 0);
46 mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT;
47 } else
48 mask = psw_kernel_bits | PSW_MASK_WAIT |
49 PSW_MASK_EXT | PSW_MASK_IO;
50
51 end = get_clock() + ((u64) usecs << 12);
52 do {
53 time = end < S390_lowcore.jiffy_timer ?
54 end : S390_lowcore.jiffy_timer;
55 set_clock_comparator(time);
56 trace_hardirqs_on();
57 __load_psw_mask(mask);
58 local_irq_disable();
59 } while (get_clock() < end);
40 60
41 if (usecs == 0) 61 if (raw_irqs_disabled_flags(flags)) {
42 return; 62 __ctl_load(cr0, 0, 0);
43 start_cc = get_clock(); 63 S390_lowcore.jiffy_timer = jiffy_timer;
44 do { 64 }
45 cpu_relax(); 65 set_clock_comparator(S390_lowcore.jiffy_timer);
46 } while (((get_clock() - start_cc)/4096) < usecs); 66 local_irq_restore(flags);
47} 67}
diff --git a/arch/s390/lib/qrnnd.S b/arch/s390/lib/qrnnd.S
new file mode 100644
index 000000000000..eb1df632e749
--- /dev/null
+++ b/arch/s390/lib/qrnnd.S
@@ -0,0 +1,77 @@
1# S/390 __udiv_qrnnd
2
3# r2 : &__r
4# r3 : upper half of 64 bit word n
5# r4 : lower half of 64 bit word n
6# r5 : divisor d
7# the reminder r of the division is to be stored to &__r and
8# the quotient q is to be returned
9
10 .text
11 .globl __udiv_qrnnd
12__udiv_qrnnd:
13 st %r2,24(%r15) # store pointer to reminder for later
14 lr %r0,%r3 # reload n
15 lr %r1,%r4
16 ltr %r2,%r5 # reload and test divisor
17 jp 5f
18 # divisor >= 0x80000000
19 srdl %r0,2 # n/4
20 srl %r2,1 # d/2
21 slr %r1,%r2 # special case if last bit of d is set
22 brc 3,0f # (n/4) div (n/2) can overflow by 1
23 ahi %r0,-1 # trick: subtract n/2, then divide
240: dr %r0,%r2 # signed division
25 ahi %r1,1 # trick part 2: add 1 to the quotient
26 # now (n >> 2) = (d >> 1) * %r1 + %r0
27 lhi %r3,1
28 nr %r3,%r1 # test last bit of q
29 jz 1f
30 alr %r0,%r2 # add (d>>1) to r
311: srl %r1,1 # q >>= 1
32 # now (n >> 2) = (d&-2) * %r1 + %r0
33 lhi %r3,1
34 nr %r3,%r5 # test last bit of d
35 jz 2f
36 slr %r0,%r1 # r -= q
37 brc 3,2f # borrow ?
38 alr %r0,%r5 # r += d
39 ahi %r1,-1
402: # now (n >> 2) = d * %r1 + %r0
41 alr %r1,%r1 # q <<= 1
42 alr %r0,%r0 # r <<= 1
43 brc 12,3f # overflow on r ?
44 slr %r0,%r5 # r -= d
45 ahi %r1,1 # q += 1
463: lhi %r3,2
47 nr %r3,%r4 # test next to last bit of n
48 jz 4f
49 ahi %r0,1 # r += 1
504: clr %r0,%r5 # r >= d ?
51 jl 6f
52 slr %r0,%r5 # r -= d
53 ahi %r1,1 # q += 1
54 # now (n >> 1) = d * %r1 + %r0
55 j 6f
565: # divisor < 0x80000000
57 srdl %r0,1
58 dr %r0,%r2 # signed division
59 # now (n >> 1) = d * %r1 + %r0
606: alr %r1,%r1 # q <<= 1
61 alr %r0,%r0 # r <<= 1
62 brc 12,7f # overflow on r ?
63 slr %r0,%r5 # r -= d
64 ahi %r1,1 # q += 1
657: lhi %r3,1
66 nr %r3,%r4 # isolate last bit of n
67 alr %r0,%r3 # r += (n & 1)
68 clr %r0,%r5 # r >= d ?
69 jl 8f
70 slr %r0,%r5 # r -= d
71 ahi %r1,1 # q += 1
728: # now n = d * %r1 + %r0
73 l %r2,24(%r15)
74 st %r0,0(%r2)
75 lr %r2,%r1
76 br %r14
77 .end __udiv_qrnnd
diff --git a/arch/s390/lib/uaccess.h b/arch/s390/lib/uaccess.h
new file mode 100644
index 000000000000..126011df14f1
--- /dev/null
+++ b/arch/s390/lib/uaccess.h
@@ -0,0 +1,23 @@
1/*
2 * arch/s390/uaccess.h
3 *
4 * Copyright IBM Corp. 2007
5 *
6 */
7
8#ifndef __ARCH_S390_LIB_UACCESS_H
9#define __ARCH_S390_LIB_UACCESS_H
10
11extern size_t copy_from_user_std(size_t, const void __user *, void *);
12extern size_t copy_to_user_std(size_t, void __user *, const void *);
13extern size_t strnlen_user_std(size_t, const char __user *);
14extern size_t strncpy_from_user_std(size_t, const char __user *, char *);
15extern int futex_atomic_cmpxchg_std(int __user *, int, int);
16extern int futex_atomic_op_std(int, int __user *, int, int *);
17
18extern size_t copy_from_user_pt(size_t, const void __user *, void *);
19extern size_t copy_to_user_pt(size_t, void __user *, const void *);
20extern int futex_atomic_op_pt(int, int __user *, int, int *);
21extern int futex_atomic_cmpxchg_pt(int __user *, int, int);
22
23#endif /* __ARCH_S390_LIB_UACCESS_H */
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
index f9a23d57eb79..6d8772339d76 100644
--- a/arch/s390/lib/uaccess_mvcos.c
+++ b/arch/s390/lib/uaccess_mvcos.c
@@ -12,6 +12,7 @@
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <asm/uaccess.h> 13#include <asm/uaccess.h>
14#include <asm/futex.h> 14#include <asm/futex.h>
15#include "uaccess.h"
15 16
16#ifndef __s390x__ 17#ifndef __s390x__
17#define AHI "ahi" 18#define AHI "ahi"
@@ -27,10 +28,7 @@
27#define SLR "slgr" 28#define SLR "slgr"
28#endif 29#endif
29 30
30extern size_t copy_from_user_std(size_t, const void __user *, void *); 31static size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
31extern size_t copy_to_user_std(size_t, void __user *, const void *);
32
33size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
34{ 32{
35 register unsigned long reg0 asm("0") = 0x81UL; 33 register unsigned long reg0 asm("0") = 0x81UL;
36 unsigned long tmp1, tmp2; 34 unsigned long tmp1, tmp2;
@@ -69,14 +67,14 @@ size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
69 return size; 67 return size;
70} 68}
71 69
72size_t copy_from_user_mvcos_check(size_t size, const void __user *ptr, void *x) 70static size_t copy_from_user_mvcos_check(size_t size, const void __user *ptr, void *x)
73{ 71{
74 if (size <= 256) 72 if (size <= 256)
75 return copy_from_user_std(size, ptr, x); 73 return copy_from_user_std(size, ptr, x);
76 return copy_from_user_mvcos(size, ptr, x); 74 return copy_from_user_mvcos(size, ptr, x);
77} 75}
78 76
79size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x) 77static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
80{ 78{
81 register unsigned long reg0 asm("0") = 0x810000UL; 79 register unsigned long reg0 asm("0") = 0x810000UL;
82 unsigned long tmp1, tmp2; 80 unsigned long tmp1, tmp2;
@@ -105,14 +103,16 @@ size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
105 return size; 103 return size;
106} 104}
107 105
108size_t copy_to_user_mvcos_check(size_t size, void __user *ptr, const void *x) 106static size_t copy_to_user_mvcos_check(size_t size, void __user *ptr,
107 const void *x)
109{ 108{
110 if (size <= 256) 109 if (size <= 256)
111 return copy_to_user_std(size, ptr, x); 110 return copy_to_user_std(size, ptr, x);
112 return copy_to_user_mvcos(size, ptr, x); 111 return copy_to_user_mvcos(size, ptr, x);
113} 112}
114 113
115size_t copy_in_user_mvcos(size_t size, void __user *to, const void __user *from) 114static size_t copy_in_user_mvcos(size_t size, void __user *to,
115 const void __user *from)
116{ 116{
117 register unsigned long reg0 asm("0") = 0x810081UL; 117 register unsigned long reg0 asm("0") = 0x810081UL;
118 unsigned long tmp1, tmp2; 118 unsigned long tmp1, tmp2;
@@ -134,7 +134,7 @@ size_t copy_in_user_mvcos(size_t size, void __user *to, const void __user *from)
134 return size; 134 return size;
135} 135}
136 136
137size_t clear_user_mvcos(size_t size, void __user *to) 137static size_t clear_user_mvcos(size_t size, void __user *to)
138{ 138{
139 register unsigned long reg0 asm("0") = 0x810000UL; 139 register unsigned long reg0 asm("0") = 0x810000UL;
140 unsigned long tmp1, tmp2; 140 unsigned long tmp1, tmp2;
@@ -162,10 +162,43 @@ size_t clear_user_mvcos(size_t size, void __user *to)
162 return size; 162 return size;
163} 163}
164 164
165extern size_t strnlen_user_std(size_t, const char __user *); 165static size_t strnlen_user_mvcos(size_t count, const char __user *src)
166extern size_t strncpy_from_user_std(size_t, const char __user *, char *); 166{
167extern int futex_atomic_op(int, int __user *, int, int *); 167 char buf[256];
168extern int futex_atomic_cmpxchg(int __user *, int, int); 168 int rc;
169 size_t done, len, len_str;
170
171 done = 0;
172 do {
173 len = min(count - done, (size_t) 256);
174 rc = uaccess.copy_from_user(len, src + done, buf);
175 if (unlikely(rc == len))
176 return 0;
177 len -= rc;
178 len_str = strnlen(buf, len);
179 done += len_str;
180 } while ((len_str == len) && (done < count));
181 return done + 1;
182}
183
184static size_t strncpy_from_user_mvcos(size_t count, const char __user *src,
185 char *dst)
186{
187 int rc;
188 size_t done, len, len_str;
189
190 done = 0;
191 do {
192 len = min(count - done, (size_t) 4096);
193 rc = uaccess.copy_from_user(len, src + done, dst);
194 if (unlikely(rc == len))
195 return -EFAULT;
196 len -= rc;
197 len_str = strnlen(dst, len);
198 done += len_str;
199 } while ((len_str == len) && (done < count));
200 return done;
201}
169 202
170struct uaccess_ops uaccess_mvcos = { 203struct uaccess_ops uaccess_mvcos = {
171 .copy_from_user = copy_from_user_mvcos_check, 204 .copy_from_user = copy_from_user_mvcos_check,
@@ -176,6 +209,21 @@ struct uaccess_ops uaccess_mvcos = {
176 .clear_user = clear_user_mvcos, 209 .clear_user = clear_user_mvcos,
177 .strnlen_user = strnlen_user_std, 210 .strnlen_user = strnlen_user_std,
178 .strncpy_from_user = strncpy_from_user_std, 211 .strncpy_from_user = strncpy_from_user_std,
179 .futex_atomic_op = futex_atomic_op, 212 .futex_atomic_op = futex_atomic_op_std,
180 .futex_atomic_cmpxchg = futex_atomic_cmpxchg, 213 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
214};
215
216#ifdef CONFIG_S390_SWITCH_AMODE
217struct uaccess_ops uaccess_mvcos_switch = {
218 .copy_from_user = copy_from_user_mvcos,
219 .copy_from_user_small = copy_from_user_mvcos,
220 .copy_to_user = copy_to_user_mvcos,
221 .copy_to_user_small = copy_to_user_mvcos,
222 .copy_in_user = copy_in_user_mvcos,
223 .clear_user = clear_user_mvcos,
224 .strnlen_user = strnlen_user_mvcos,
225 .strncpy_from_user = strncpy_from_user_mvcos,
226 .futex_atomic_op = futex_atomic_op_pt,
227 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
181}; 228};
229#endif
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 49c3e46b4065..63181671e3e3 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * arch/s390/lib/uaccess_pt.c 2 * arch/s390/lib/uaccess_pt.c
3 * 3 *
4 * User access functions based on page table walks. 4 * User access functions based on page table walks for enhanced
5 * system layout without hardware support.
5 * 6 *
6 * Copyright IBM Corp. 2006 7 * Copyright IBM Corp. 2006
7 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com) 8 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
@@ -12,9 +13,10 @@
12#include <linux/mm.h> 13#include <linux/mm.h>
13#include <asm/uaccess.h> 14#include <asm/uaccess.h>
14#include <asm/futex.h> 15#include <asm/futex.h>
16#include "uaccess.h"
15 17
16static inline int __handle_fault(struct mm_struct *mm, unsigned long address, 18static int __handle_fault(struct mm_struct *mm, unsigned long address,
17 int write_access) 19 int write_access)
18{ 20{
19 struct vm_area_struct *vma; 21 struct vm_area_struct *vma;
20 int ret = -EFAULT; 22 int ret = -EFAULT;
@@ -79,8 +81,8 @@ out_sigbus:
79 return ret; 81 return ret;
80} 82}
81 83
82static inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, 84static size_t __user_copy_pt(unsigned long uaddr, void *kptr,
83 size_t n, int write_user) 85 size_t n, int write_user)
84{ 86{
85 struct mm_struct *mm = current->mm; 87 struct mm_struct *mm = current->mm;
86 unsigned long offset, pfn, done, size; 88 unsigned long offset, pfn, done, size;
@@ -133,6 +135,49 @@ fault:
133 goto retry; 135 goto retry;
134} 136}
135 137
138/*
139 * Do DAT for user address by page table walk, return kernel address.
140 * This function needs to be called with current->mm->page_table_lock held.
141 */
142static unsigned long __dat_user_addr(unsigned long uaddr)
143{
144 struct mm_struct *mm = current->mm;
145 unsigned long pfn, ret;
146 pgd_t *pgd;
147 pmd_t *pmd;
148 pte_t *pte;
149 int rc;
150
151 ret = 0;
152retry:
153 pgd = pgd_offset(mm, uaddr);
154 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
155 goto fault;
156
157 pmd = pmd_offset(pgd, uaddr);
158 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
159 goto fault;
160
161 pte = pte_offset_map(pmd, uaddr);
162 if (!pte || !pte_present(*pte))
163 goto fault;
164
165 pfn = pte_pfn(*pte);
166 if (!pfn_valid(pfn))
167 goto out;
168
169 ret = (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1));
170out:
171 return ret;
172fault:
173 spin_unlock(&mm->page_table_lock);
174 rc = __handle_fault(mm, uaddr, 0);
175 spin_lock(&mm->page_table_lock);
176 if (rc)
177 goto out;
178 goto retry;
179}
180
136size_t copy_from_user_pt(size_t n, const void __user *from, void *to) 181size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
137{ 182{
138 size_t rc; 183 size_t rc;
@@ -155,3 +200,277 @@ size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
155 } 200 }
156 return __user_copy_pt((unsigned long) to, (void *) from, n, 1); 201 return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
157} 202}
203
204static size_t clear_user_pt(size_t n, void __user *to)
205{
206 long done, size, ret;
207
208 if (segment_eq(get_fs(), KERNEL_DS)) {
209 memset((void __kernel __force *) to, 0, n);
210 return 0;
211 }
212 done = 0;
213 do {
214 if (n - done > PAGE_SIZE)
215 size = PAGE_SIZE;
216 else
217 size = n - done;
218 ret = __user_copy_pt((unsigned long) to + done,
219 &empty_zero_page, size, 1);
220 done += size;
221 if (ret)
222 return ret + n - done;
223 } while (done < n);
224 return 0;
225}
226
227static size_t strnlen_user_pt(size_t count, const char __user *src)
228{
229 char *addr;
230 unsigned long uaddr = (unsigned long) src;
231 struct mm_struct *mm = current->mm;
232 unsigned long offset, pfn, done, len;
233 pgd_t *pgd;
234 pmd_t *pmd;
235 pte_t *pte;
236 size_t len_str;
237
238 if (segment_eq(get_fs(), KERNEL_DS))
239 return strnlen((const char __kernel __force *) src, count) + 1;
240 done = 0;
241retry:
242 spin_lock(&mm->page_table_lock);
243 do {
244 pgd = pgd_offset(mm, uaddr);
245 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
246 goto fault;
247
248 pmd = pmd_offset(pgd, uaddr);
249 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
250 goto fault;
251
252 pte = pte_offset_map(pmd, uaddr);
253 if (!pte || !pte_present(*pte))
254 goto fault;
255
256 pfn = pte_pfn(*pte);
257 if (!pfn_valid(pfn)) {
258 done = -1;
259 goto out;
260 }
261
262 offset = uaddr & (PAGE_SIZE-1);
263 addr = (char *)(pfn << PAGE_SHIFT) + offset;
264 len = min(count - done, PAGE_SIZE - offset);
265 len_str = strnlen(addr, len);
266 done += len_str;
267 uaddr += len_str;
268 } while ((len_str == len) && (done < count));
269out:
270 spin_unlock(&mm->page_table_lock);
271 return done + 1;
272fault:
273 spin_unlock(&mm->page_table_lock);
274 if (__handle_fault(mm, uaddr, 0)) {
275 return 0;
276 }
277 goto retry;
278}
279
280static size_t strncpy_from_user_pt(size_t count, const char __user *src,
281 char *dst)
282{
283 size_t n = strnlen_user_pt(count, src);
284
285 if (!n)
286 return -EFAULT;
287 if (n > count)
288 n = count;
289 if (segment_eq(get_fs(), KERNEL_DS)) {
290 memcpy(dst, (const char __kernel __force *) src, n);
291 if (dst[n-1] == '\0')
292 return n-1;
293 else
294 return n;
295 }
296 if (__user_copy_pt((unsigned long) src, dst, n, 0))
297 return -EFAULT;
298 if (dst[n-1] == '\0')
299 return n-1;
300 else
301 return n;
302}
303
304static size_t copy_in_user_pt(size_t n, void __user *to,
305 const void __user *from)
306{
307 struct mm_struct *mm = current->mm;
308 unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to,
309 uaddr, done, size;
310 unsigned long uaddr_from = (unsigned long) from;
311 unsigned long uaddr_to = (unsigned long) to;
312 pgd_t *pgd_from, *pgd_to;
313 pmd_t *pmd_from, *pmd_to;
314 pte_t *pte_from, *pte_to;
315 int write_user;
316
317 done = 0;
318retry:
319 spin_lock(&mm->page_table_lock);
320 do {
321 pgd_from = pgd_offset(mm, uaddr_from);
322 if (pgd_none(*pgd_from) || unlikely(pgd_bad(*pgd_from))) {
323 uaddr = uaddr_from;
324 write_user = 0;
325 goto fault;
326 }
327 pgd_to = pgd_offset(mm, uaddr_to);
328 if (pgd_none(*pgd_to) || unlikely(pgd_bad(*pgd_to))) {
329 uaddr = uaddr_to;
330 write_user = 1;
331 goto fault;
332 }
333
334 pmd_from = pmd_offset(pgd_from, uaddr_from);
335 if (pmd_none(*pmd_from) || unlikely(pmd_bad(*pmd_from))) {
336 uaddr = uaddr_from;
337 write_user = 0;
338 goto fault;
339 }
340 pmd_to = pmd_offset(pgd_to, uaddr_to);
341 if (pmd_none(*pmd_to) || unlikely(pmd_bad(*pmd_to))) {
342 uaddr = uaddr_to;
343 write_user = 1;
344 goto fault;
345 }
346
347 pte_from = pte_offset_map(pmd_from, uaddr_from);
348 if (!pte_from || !pte_present(*pte_from)) {
349 uaddr = uaddr_from;
350 write_user = 0;
351 goto fault;
352 }
353 pte_to = pte_offset_map(pmd_to, uaddr_to);
354 if (!pte_to || !pte_present(*pte_to) || !pte_write(*pte_to)) {
355 uaddr = uaddr_to;
356 write_user = 1;
357 goto fault;
358 }
359
360 pfn_from = pte_pfn(*pte_from);
361 if (!pfn_valid(pfn_from))
362 goto out;
363 pfn_to = pte_pfn(*pte_to);
364 if (!pfn_valid(pfn_to))
365 goto out;
366
367 offset_from = uaddr_from & (PAGE_SIZE-1);
368 offset_to = uaddr_from & (PAGE_SIZE-1);
369 offset_max = max(offset_from, offset_to);
370 size = min(n - done, PAGE_SIZE - offset_max);
371
372 memcpy((void *)(pfn_to << PAGE_SHIFT) + offset_to,
373 (void *)(pfn_from << PAGE_SHIFT) + offset_from, size);
374 done += size;
375 uaddr_from += size;
376 uaddr_to += size;
377 } while (done < n);
378out:
379 spin_unlock(&mm->page_table_lock);
380 return n - done;
381fault:
382 spin_unlock(&mm->page_table_lock);
383 if (__handle_fault(mm, uaddr, write_user))
384 return n - done;
385 goto retry;
386}
387
388#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
389 asm volatile("0: l %1,0(%6)\n" \
390 "1: " insn \
391 "2: cs %1,%2,0(%6)\n" \
392 "3: jl 1b\n" \
393 " lhi %0,0\n" \
394 "4:\n" \
395 EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
396 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
397 "=m" (*uaddr) \
398 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
399 "m" (*uaddr) : "cc" );
400
401int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
402{
403 int oldval = 0, newval, ret;
404
405 spin_lock(&current->mm->page_table_lock);
406 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
407 if (!uaddr) {
408 spin_unlock(&current->mm->page_table_lock);
409 return -EFAULT;
410 }
411 get_page(virt_to_page(uaddr));
412 spin_unlock(&current->mm->page_table_lock);
413 switch (op) {
414 case FUTEX_OP_SET:
415 __futex_atomic_op("lr %2,%5\n",
416 ret, oldval, newval, uaddr, oparg);
417 break;
418 case FUTEX_OP_ADD:
419 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
420 ret, oldval, newval, uaddr, oparg);
421 break;
422 case FUTEX_OP_OR:
423 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
424 ret, oldval, newval, uaddr, oparg);
425 break;
426 case FUTEX_OP_ANDN:
427 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
428 ret, oldval, newval, uaddr, oparg);
429 break;
430 case FUTEX_OP_XOR:
431 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
432 ret, oldval, newval, uaddr, oparg);
433 break;
434 default:
435 ret = -ENOSYS;
436 }
437 put_page(virt_to_page(uaddr));
438 *old = oldval;
439 return ret;
440}
441
442int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
443{
444 int ret;
445
446 spin_lock(&current->mm->page_table_lock);
447 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
448 if (!uaddr) {
449 spin_unlock(&current->mm->page_table_lock);
450 return -EFAULT;
451 }
452 get_page(virt_to_page(uaddr));
453 spin_unlock(&current->mm->page_table_lock);
454 asm volatile(" cs %1,%4,0(%5)\n"
455 "0: lr %0,%1\n"
456 "1:\n"
457 EX_TABLE(0b,1b)
458 : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
459 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
460 : "cc", "memory" );
461 put_page(virt_to_page(uaddr));
462 return ret;
463}
464
465struct uaccess_ops uaccess_pt = {
466 .copy_from_user = copy_from_user_pt,
467 .copy_from_user_small = copy_from_user_pt,
468 .copy_to_user = copy_to_user_pt,
469 .copy_to_user_small = copy_to_user_pt,
470 .copy_in_user = copy_in_user_pt,
471 .clear_user = clear_user_pt,
472 .strnlen_user = strnlen_user_pt,
473 .strncpy_from_user = strncpy_from_user_pt,
474 .futex_atomic_op = futex_atomic_op_pt,
475 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
476};
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
index 56a0214e9928..28c4500a58d0 100644
--- a/arch/s390/lib/uaccess_std.c
+++ b/arch/s390/lib/uaccess_std.c
@@ -13,6 +13,7 @@
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <asm/futex.h> 15#include <asm/futex.h>
16#include "uaccess.h"
16 17
17#ifndef __s390x__ 18#ifndef __s390x__
18#define AHI "ahi" 19#define AHI "ahi"
@@ -28,9 +29,6 @@
28#define SLR "slgr" 29#define SLR "slgr"
29#endif 30#endif
30 31
31extern size_t copy_from_user_pt(size_t n, const void __user *from, void *to);
32extern size_t copy_to_user_pt(size_t n, void __user *to, const void *from);
33
34size_t copy_from_user_std(size_t size, const void __user *ptr, void *x) 32size_t copy_from_user_std(size_t size, const void __user *ptr, void *x)
35{ 33{
36 unsigned long tmp1, tmp2; 34 unsigned long tmp1, tmp2;
@@ -72,7 +70,8 @@ size_t copy_from_user_std(size_t size, const void __user *ptr, void *x)
72 return size; 70 return size;
73} 71}
74 72
75size_t copy_from_user_std_check(size_t size, const void __user *ptr, void *x) 73static size_t copy_from_user_std_check(size_t size, const void __user *ptr,
74 void *x)
76{ 75{
77 if (size <= 1024) 76 if (size <= 1024)
78 return copy_from_user_std(size, ptr, x); 77 return copy_from_user_std(size, ptr, x);
@@ -110,14 +109,16 @@ size_t copy_to_user_std(size_t size, void __user *ptr, const void *x)
110 return size; 109 return size;
111} 110}
112 111
113size_t copy_to_user_std_check(size_t size, void __user *ptr, const void *x) 112static size_t copy_to_user_std_check(size_t size, void __user *ptr,
113 const void *x)
114{ 114{
115 if (size <= 1024) 115 if (size <= 1024)
116 return copy_to_user_std(size, ptr, x); 116 return copy_to_user_std(size, ptr, x);
117 return copy_to_user_pt(size, ptr, x); 117 return copy_to_user_pt(size, ptr, x);
118} 118}
119 119
120size_t copy_in_user_std(size_t size, void __user *to, const void __user *from) 120static size_t copy_in_user_std(size_t size, void __user *to,
121 const void __user *from)
121{ 122{
122 unsigned long tmp1; 123 unsigned long tmp1;
123 124
@@ -148,7 +149,7 @@ size_t copy_in_user_std(size_t size, void __user *to, const void __user *from)
148 return size; 149 return size;
149} 150}
150 151
151size_t clear_user_std(size_t size, void __user *to) 152static size_t clear_user_std(size_t size, void __user *to)
152{ 153{
153 unsigned long tmp1, tmp2; 154 unsigned long tmp1, tmp2;
154 155
@@ -254,7 +255,7 @@ size_t strncpy_from_user_std(size_t size, const char __user *src, char *dst)
254 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ 255 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
255 "m" (*uaddr) : "cc"); 256 "m" (*uaddr) : "cc");
256 257
257int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old) 258int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old)
258{ 259{
259 int oldval = 0, newval, ret; 260 int oldval = 0, newval, ret;
260 261
@@ -286,7 +287,7 @@ int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old)
286 return ret; 287 return ret;
287} 288}
288 289
289int futex_atomic_cmpxchg(int __user *uaddr, int oldval, int newval) 290int futex_atomic_cmpxchg_std(int __user *uaddr, int oldval, int newval)
290{ 291{
291 int ret; 292 int ret;
292 293
@@ -311,6 +312,6 @@ struct uaccess_ops uaccess_std = {
311 .clear_user = clear_user_std, 312 .clear_user = clear_user_std,
312 .strnlen_user = strnlen_user_std, 313 .strnlen_user = strnlen_user_std,
313 .strncpy_from_user = strncpy_from_user_std, 314 .strncpy_from_user = strncpy_from_user_std,
314 .futex_atomic_op = futex_atomic_op, 315 .futex_atomic_op = futex_atomic_op_std,
315 .futex_atomic_cmpxchg = futex_atomic_cmpxchg, 316 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
316}; 317};
diff --git a/arch/s390/math-emu/Makefile b/arch/s390/math-emu/Makefile
index c10df144f2ab..73b3e72efc46 100644
--- a/arch/s390/math-emu/Makefile
+++ b/arch/s390/math-emu/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the FPU instruction emulation. 2# Makefile for the FPU instruction emulation.
3# 3#
4 4
5obj-$(CONFIG_MATHEMU) := math.o qrnnd.o 5obj-$(CONFIG_MATHEMU) := math.o
6 6
7EXTRA_CFLAGS := -I$(src) -Iinclude/math-emu -w 7EXTRA_CFLAGS := -I$(src) -Iinclude/math-emu -w
8EXTRA_AFLAGS := -traditional 8EXTRA_AFLAGS := -traditional
diff --git a/arch/s390/math-emu/math.c b/arch/s390/math-emu/math.c
index 6b9aec5a2c18..3ee78ccb617d 100644
--- a/arch/s390/math-emu/math.c
+++ b/arch/s390/math-emu/math.c
@@ -15,7 +15,7 @@
15#include <asm/uaccess.h> 15#include <asm/uaccess.h>
16#include <asm/lowcore.h> 16#include <asm/lowcore.h>
17 17
18#include "sfp-util.h" 18#include <asm/sfp-util.h>
19#include <math-emu/soft-fp.h> 19#include <math-emu/soft-fp.h>
20#include <math-emu/single.h> 20#include <math-emu/single.h>
21#include <math-emu/double.h> 21#include <math-emu/double.h>
diff --git a/arch/s390/math-emu/qrnnd.S b/arch/s390/math-emu/qrnnd.S
deleted file mode 100644
index b01c2b648e22..000000000000
--- a/arch/s390/math-emu/qrnnd.S
+++ /dev/null
@@ -1,77 +0,0 @@
1# S/390 __udiv_qrnnd
2
3# r2 : &__r
4# r3 : upper half of 64 bit word n
5# r4 : lower half of 64 bit word n
6# r5 : divisor d
7# the reminder r of the division is to be stored to &__r and
8# the quotient q is to be returned
9
10 .text
11 .globl __udiv_qrnnd
12__udiv_qrnnd:
13 st %r2,24(%r15) # store pointer to reminder for later
14 lr %r0,%r3 # reload n
15 lr %r1,%r4
16 ltr %r2,%r5 # reload and test divisor
17 jp 5f
18 # divisor >= 0x80000000
19 srdl %r0,2 # n/4
20 srl %r2,1 # d/2
21 slr %r1,%r2 # special case if last bit of d is set
22 brc 3,0f # (n/4) div (n/2) can overflow by 1
23 ahi %r0,-1 # trick: subtract n/2, then divide
240: dr %r0,%r2 # signed division
25 ahi %r1,1 # trick part 2: add 1 to the quotient
26 # now (n >> 2) = (d >> 1) * %r1 + %r0
27 lhi %r3,1
28 nr %r3,%r1 # test last bit of q
29 jz 1f
30 alr %r0,%r2 # add (d>>1) to r
311: srl %r1,1 # q >>= 1
32 # now (n >> 2) = (d&-2) * %r1 + %r0
33 lhi %r3,1
34 nr %r3,%r5 # test last bit of d
35 jz 2f
36 slr %r0,%r1 # r -= q
37 brc 3,2f # borrow ?
38 alr %r0,%r5 # r += d
39 ahi %r1,-1
402: # now (n >> 2) = d * %r1 + %r0
41 alr %r1,%r1 # q <<= 1
42 alr %r0,%r0 # r <<= 1
43 brc 12,3f # overflow on r ?
44 slr %r0,%r5 # r -= d
45 ahi %r1,1 # q += 1
463: lhi %r3,2
47 nr %r3,%r4 # test next to last bit of n
48 jz 4f
49 ahi %r0,1 # r += 1
504: clr %r0,%r5 # r >= d ?
51 jl 6f
52 slr %r0,%r5 # r -= d
53 ahi %r1,1 # q += 1
54 # now (n >> 1) = d * %r1 + %r0
55 j 6f
565: # divisor < 0x80000000
57 srdl %r0,1
58 dr %r0,%r2 # signed division
59 # now (n >> 1) = d * %r1 + %r0
606: alr %r1,%r1 # q <<= 1
61 alr %r0,%r0 # r <<= 1
62 brc 12,7f # overflow on r ?
63 slr %r0,%r5 # r -= d
64 ahi %r1,1 # q += 1
657: lhi %r3,1
66 nr %r3,%r4 # isolate last bit of n
67 alr %r0,%r3 # r += (n & 1)
68 clr %r0,%r5 # r >= d ?
69 jl 8f
70 slr %r0,%r5 # r -= d
71 ahi %r1,1 # q += 1
728: # now n = d * %r1 + %r0
73 l %r2,24(%r15)
74 st %r0,0(%r2)
75 lr %r2,%r1
76 br %r14
77 .end __udiv_qrnnd
diff --git a/arch/s390/math-emu/sfp-util.h b/arch/s390/math-emu/sfp-util.h
deleted file mode 100644
index 5b6ca4570ea4..000000000000
--- a/arch/s390/math-emu/sfp-util.h
+++ /dev/null
@@ -1,66 +0,0 @@
1#include <linux/kernel.h>
2#include <linux/sched.h>
3#include <linux/types.h>
4#include <asm/byteorder.h>
5
6#define add_ssaaaa(sh, sl, ah, al, bh, bl) ({ \
7 unsigned int __sh = (ah); \
8 unsigned int __sl = (al); \
9 asm volatile( \
10 " alr %1,%3\n" \
11 " brc 12,0f\n" \
12 " ahi %0,1\n" \
13 "0: alr %0,%2" \
14 : "+&d" (__sh), "+d" (__sl) \
15 : "d" (bh), "d" (bl) : "cc"); \
16 (sh) = __sh; \
17 (sl) = __sl; \
18})
19
20#define sub_ddmmss(sh, sl, ah, al, bh, bl) ({ \
21 unsigned int __sh = (ah); \
22 unsigned int __sl = (al); \
23 asm volatile( \
24 " slr %1,%3\n" \
25 " brc 3,0f\n" \
26 " ahi %0,-1\n" \
27 "0: slr %0,%2" \
28 : "+&d" (__sh), "+d" (__sl) \
29 : "d" (bh), "d" (bl) : "cc"); \
30 (sh) = __sh; \
31 (sl) = __sl; \
32})
33
34/* a umul b = a mul b + (a>=2<<31) ? b<<32:0 + (b>=2<<31) ? a<<32:0 */
35#define umul_ppmm(wh, wl, u, v) ({ \
36 unsigned int __wh = u; \
37 unsigned int __wl = v; \
38 asm volatile( \
39 " ltr 1,%0\n" \
40 " mr 0,%1\n" \
41 " jnm 0f\n" \
42 " alr 0,%1\n" \
43 "0: ltr %1,%1\n" \
44 " jnm 1f\n" \
45 " alr 0,%0\n" \
46 "1: lr %0,0\n" \
47 " lr %1,1\n" \
48 : "+d" (__wh), "+d" (__wl) \
49 : : "0", "1", "cc"); \
50 wh = __wh; \
51 wl = __wl; \
52})
53
54#define udiv_qrnnd(q, r, n1, n0, d) \
55 do { unsigned long __r; \
56 (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \
57 (r) = __r; \
58 } while (0)
59extern unsigned long __udiv_qrnnd (unsigned long *, unsigned long,
60 unsigned long , unsigned long);
61
62#define UDIV_NEEDS_NORMALIZATION 0
63
64#define abort() return 0
65
66#define __BYTE_ORDER __BIG_ENDIAN
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 607f50ead1fd..f93a056869bc 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -245,7 +245,7 @@ cmm_set_timeout(long nr, long seconds)
245 cmm_set_timer(); 245 cmm_set_timer();
246} 246}
247 247
248static inline int 248static int
249cmm_skip_blanks(char *cp, char **endp) 249cmm_skip_blanks(char *cp, char **endp)
250{ 250{
251 char *str; 251 char *str;
@@ -414,7 +414,7 @@ cmm_smsg_target(char *from, char *msg)
414} 414}
415#endif 415#endif
416 416
417struct ctl_table_header *cmm_sysctl_header; 417static struct ctl_table_header *cmm_sysctl_header;
418 418
419static int 419static int
420cmm_init (void) 420cmm_init (void)
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 775bf19e742b..394980b05e6f 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -15,6 +15,7 @@
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/bootmem.h> 16#include <linux/bootmem.h>
17#include <linux/ctype.h> 17#include <linux/ctype.h>
18#include <linux/ioport.h>
18#include <asm/page.h> 19#include <asm/page.h>
19#include <asm/pgtable.h> 20#include <asm/pgtable.h>
20#include <asm/ebcdic.h> 21#include <asm/ebcdic.h>
@@ -70,6 +71,7 @@ struct qin64 {
70struct dcss_segment { 71struct dcss_segment {
71 struct list_head list; 72 struct list_head list;
72 char dcss_name[8]; 73 char dcss_name[8];
74 char res_name[15];
73 unsigned long start_addr; 75 unsigned long start_addr;
74 unsigned long end; 76 unsigned long end;
75 atomic_t ref_count; 77 atomic_t ref_count;
@@ -77,6 +79,7 @@ struct dcss_segment {
77 unsigned int vm_segtype; 79 unsigned int vm_segtype;
78 struct qrange range[6]; 80 struct qrange range[6];
79 int segcnt; 81 int segcnt;
82 struct resource *res;
80}; 83};
81 84
82static DEFINE_MUTEX(dcss_lock); 85static DEFINE_MUTEX(dcss_lock);
@@ -88,7 +91,7 @@ static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC",
88 * Create the 8 bytes, ebcdic VM segment name from 91 * Create the 8 bytes, ebcdic VM segment name from
89 * an ascii name. 92 * an ascii name.
90 */ 93 */
91static void inline 94static void
92dcss_mkname(char *name, char *dcss_name) 95dcss_mkname(char *name, char *dcss_name)
93{ 96{
94 int i; 97 int i;
@@ -303,6 +306,29 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
303 goto out_free; 306 goto out_free;
304 } 307 }
305 308
309 seg->res = kzalloc(sizeof(struct resource), GFP_KERNEL);
310 if (seg->res == NULL) {
311 rc = -ENOMEM;
312 goto out_shared;
313 }
314 seg->res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
315 seg->res->start = seg->start_addr;
316 seg->res->end = seg->end;
317 memcpy(&seg->res_name, seg->dcss_name, 8);
318 EBCASC(seg->res_name, 8);
319 seg->res_name[8] = '\0';
320 strncat(seg->res_name, " (DCSS)", 7);
321 seg->res->name = seg->res_name;
322 rc = seg->vm_segtype;
323 if (rc == SEG_TYPE_SC ||
324 ((rc == SEG_TYPE_SR || rc == SEG_TYPE_ER) && !do_nonshared))
325 seg->res->flags |= IORESOURCE_READONLY;
326 if (request_resource(&iomem_resource, seg->res)) {
327 rc = -EBUSY;
328 kfree(seg->res);
329 goto out_shared;
330 }
331
306 if (do_nonshared) 332 if (do_nonshared)
307 dcss_command = DCSS_LOADNSR; 333 dcss_command = DCSS_LOADNSR;
308 else 334 else
@@ -316,12 +342,11 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
316 rc = dcss_diag_translate_rc (seg->end); 342 rc = dcss_diag_translate_rc (seg->end);
317 dcss_diag(DCSS_PURGESEG, seg->dcss_name, 343 dcss_diag(DCSS_PURGESEG, seg->dcss_name,
318 &seg->start_addr, &seg->end); 344 &seg->start_addr, &seg->end);
319 goto out_shared; 345 goto out_resource;
320 } 346 }
321 seg->do_nonshared = do_nonshared; 347 seg->do_nonshared = do_nonshared;
322 atomic_set(&seg->ref_count, 1); 348 atomic_set(&seg->ref_count, 1);
323 list_add(&seg->list, &dcss_list); 349 list_add(&seg->list, &dcss_list);
324 rc = seg->vm_segtype;
325 *addr = seg->start_addr; 350 *addr = seg->start_addr;
326 *end = seg->end; 351 *end = seg->end;
327 if (do_nonshared) 352 if (do_nonshared)
@@ -329,12 +354,16 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
329 "type %s in non-shared mode\n", name, 354 "type %s in non-shared mode\n", name,
330 (void*)seg->start_addr, (void*)seg->end, 355 (void*)seg->start_addr, (void*)seg->end,
331 segtype_string[seg->vm_segtype]); 356 segtype_string[seg->vm_segtype]);
332 else 357 else {
333 PRINT_INFO ("segment_load: loaded segment %s range %p .. %p " 358 PRINT_INFO ("segment_load: loaded segment %s range %p .. %p "
334 "type %s in shared mode\n", name, 359 "type %s in shared mode\n", name,
335 (void*)seg->start_addr, (void*)seg->end, 360 (void*)seg->start_addr, (void*)seg->end,
336 segtype_string[seg->vm_segtype]); 361 segtype_string[seg->vm_segtype]);
362 }
337 goto out; 363 goto out;
364 out_resource:
365 release_resource(seg->res);
366 kfree(seg->res);
338 out_shared: 367 out_shared:
339 remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); 368 remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
340 out_free: 369 out_free:
@@ -401,6 +430,7 @@ segment_load (char *name, int do_nonshared, unsigned long *addr,
401 * -ENOENT : no such segment (segment gone!) 430 * -ENOENT : no such segment (segment gone!)
402 * -EAGAIN : segment is in use by other exploiters, try later 431 * -EAGAIN : segment is in use by other exploiters, try later
403 * -EINVAL : no segment with the given name is currently loaded - name invalid 432 * -EINVAL : no segment with the given name is currently loaded - name invalid
433 * -EBUSY : segment can temporarily not be used (overlaps with dcss)
404 * 0 : operation succeeded 434 * 0 : operation succeeded
405 */ 435 */
406int 436int
@@ -428,12 +458,24 @@ segment_modify_shared (char *name, int do_nonshared)
428 rc = -EAGAIN; 458 rc = -EAGAIN;
429 goto out_unlock; 459 goto out_unlock;
430 } 460 }
431 dcss_diag(DCSS_PURGESEG, seg->dcss_name, 461 release_resource(seg->res);
432 &dummy, &dummy); 462 if (do_nonshared) {
433 if (do_nonshared)
434 dcss_command = DCSS_LOADNSR; 463 dcss_command = DCSS_LOADNSR;
435 else 464 seg->res->flags &= ~IORESOURCE_READONLY;
436 dcss_command = DCSS_LOADNOLY; 465 } else {
466 dcss_command = DCSS_LOADNOLY;
467 if (seg->vm_segtype == SEG_TYPE_SR ||
468 seg->vm_segtype == SEG_TYPE_ER)
469 seg->res->flags |= IORESOURCE_READONLY;
470 }
471 if (request_resource(&iomem_resource, seg->res)) {
472 PRINT_WARN("segment_modify_shared: could not reload segment %s"
473 " - overlapping resources\n", name);
474 rc = -EBUSY;
475 kfree(seg->res);
476 goto out_del;
477 }
478 dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
437 diag_cc = dcss_diag(dcss_command, seg->dcss_name, 479 diag_cc = dcss_diag(dcss_command, seg->dcss_name,
438 &seg->start_addr, &seg->end); 480 &seg->start_addr, &seg->end);
439 if (diag_cc > 1) { 481 if (diag_cc > 1) {
@@ -446,9 +488,9 @@ segment_modify_shared (char *name, int do_nonshared)
446 rc = 0; 488 rc = 0;
447 goto out_unlock; 489 goto out_unlock;
448 out_del: 490 out_del:
491 remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
449 list_del(&seg->list); 492 list_del(&seg->list);
450 dcss_diag(DCSS_PURGESEG, seg->dcss_name, 493 dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
451 &dummy, &dummy);
452 kfree(seg); 494 kfree(seg);
453 out_unlock: 495 out_unlock:
454 mutex_unlock(&dcss_lock); 496 mutex_unlock(&dcss_lock);
@@ -478,6 +520,8 @@ segment_unload(char *name)
478 } 520 }
479 if (atomic_dec_return(&seg->ref_count) != 0) 521 if (atomic_dec_return(&seg->ref_count) != 0)
480 goto out_unlock; 522 goto out_unlock;
523 release_resource(seg->res);
524 kfree(seg->res);
481 remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); 525 remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
482 list_del(&seg->list); 526 list_del(&seg->list);
483 dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy); 527 dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index cd85e34d8703..9ff143e87746 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -52,7 +52,7 @@ extern int sysctl_userprocess_debug;
52extern void die(const char *,struct pt_regs *,long); 52extern void die(const char *,struct pt_regs *,long);
53 53
54#ifdef CONFIG_KPROBES 54#ifdef CONFIG_KPROBES
55ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); 55static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
56int register_page_fault_notifier(struct notifier_block *nb) 56int register_page_fault_notifier(struct notifier_block *nb)
57{ 57{
58 return atomic_notifier_chain_register(&notify_page_fault_chain, nb); 58 return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
@@ -137,7 +137,9 @@ static int __check_access_register(struct pt_regs *regs, int error_code)
137 137
138/* 138/*
139 * Check which address space the address belongs to. 139 * Check which address space the address belongs to.
140 * Returns 1 for user space and 0 for kernel space. 140 * May return 1 or 2 for user space and 0 for kernel space.
141 * Returns 2 for user space in primary addressing mode with
142 * CONFIG_S390_EXEC_PROTECT on and kernel parameter noexec=on.
141 */ 143 */
142static inline int check_user_space(struct pt_regs *regs, int error_code) 144static inline int check_user_space(struct pt_regs *regs, int error_code)
143{ 145{
@@ -154,7 +156,7 @@ static inline int check_user_space(struct pt_regs *regs, int error_code)
154 return __check_access_register(regs, error_code); 156 return __check_access_register(regs, error_code);
155 if (descriptor == 2) 157 if (descriptor == 2)
156 return current->thread.mm_segment.ar4; 158 return current->thread.mm_segment.ar4;
157 return descriptor != 0; 159 return ((descriptor != 0) ^ (switch_amode)) << s390_noexec;
158} 160}
159 161
160/* 162/*
@@ -183,6 +185,77 @@ static void do_sigsegv(struct pt_regs *regs, unsigned long error_code,
183 force_sig_info(SIGSEGV, &si, current); 185 force_sig_info(SIGSEGV, &si, current);
184} 186}
185 187
188#ifdef CONFIG_S390_EXEC_PROTECT
189extern long sys_sigreturn(struct pt_regs *regs);
190extern long sys_rt_sigreturn(struct pt_regs *regs);
191extern long sys32_sigreturn(struct pt_regs *regs);
192extern long sys32_rt_sigreturn(struct pt_regs *regs);
193
194static inline void do_sigreturn(struct mm_struct *mm, struct pt_regs *regs,
195 int rt)
196{
197 up_read(&mm->mmap_sem);
198 clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
199#ifdef CONFIG_COMPAT
200 if (test_tsk_thread_flag(current, TIF_31BIT)) {
201 if (rt)
202 sys32_rt_sigreturn(regs);
203 else
204 sys32_sigreturn(regs);
205 return;
206 }
207#endif /* CONFIG_COMPAT */
208 if (rt)
209 sys_rt_sigreturn(regs);
210 else
211 sys_sigreturn(regs);
212 return;
213}
214
215static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
216 unsigned long address, unsigned long error_code)
217{
218 pgd_t *pgd;
219 pmd_t *pmd;
220 pte_t *pte;
221 u16 *instruction;
222 unsigned long pfn, uaddr = regs->psw.addr;
223
224 spin_lock(&mm->page_table_lock);
225 pgd = pgd_offset(mm, uaddr);
226 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
227 goto out_fault;
228 pmd = pmd_offset(pgd, uaddr);
229 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
230 goto out_fault;
231 pte = pte_offset_map(pmd_offset(pgd_offset(mm, uaddr), uaddr), uaddr);
232 if (!pte || !pte_present(*pte))
233 goto out_fault;
234 pfn = pte_pfn(*pte);
235 if (!pfn_valid(pfn))
236 goto out_fault;
237 spin_unlock(&mm->page_table_lock);
238
239 instruction = (u16 *) ((pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE-1)));
240 if (*instruction == 0x0a77)
241 do_sigreturn(mm, regs, 0);
242 else if (*instruction == 0x0aad)
243 do_sigreturn(mm, regs, 1);
244 else {
245 printk("- XXX - do_exception: task = %s, primary, NO EXEC "
246 "-> SIGSEGV\n", current->comm);
247 up_read(&mm->mmap_sem);
248 current->thread.prot_addr = address;
249 current->thread.trap_no = error_code;
250 do_sigsegv(regs, error_code, SEGV_MAPERR, address);
251 }
252 return 0;
253out_fault:
254 spin_unlock(&mm->page_table_lock);
255 return -EFAULT;
256}
257#endif /* CONFIG_S390_EXEC_PROTECT */
258
186/* 259/*
187 * This routine handles page faults. It determines the address, 260 * This routine handles page faults. It determines the address,
188 * and the problem, and then passes it off to one of the appropriate 261 * and the problem, and then passes it off to one of the appropriate
@@ -260,6 +333,17 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
260 vma = find_vma(mm, address); 333 vma = find_vma(mm, address);
261 if (!vma) 334 if (!vma)
262 goto bad_area; 335 goto bad_area;
336
337#ifdef CONFIG_S390_EXEC_PROTECT
338 if (unlikely((user_address == 2) && !(vma->vm_flags & VM_EXEC)))
339 if (!signal_return(mm, regs, address, error_code))
340 /*
341 * signal_return() has done an up_read(&mm->mmap_sem)
342 * if it returns 0.
343 */
344 return;
345#endif
346
263 if (vma->vm_start <= address) 347 if (vma->vm_start <= address)
264 goto good_area; 348 goto good_area;
265 if (!(vma->vm_flags & VM_GROWSDOWN)) 349 if (!(vma->vm_flags & VM_GROWSDOWN))
@@ -452,8 +536,7 @@ void pfault_fini(void)
452 : : "a" (&refbk), "m" (refbk) : "cc"); 536 : : "a" (&refbk), "m" (refbk) : "cc");
453} 537}
454 538
455asmlinkage void 539static void pfault_interrupt(__u16 error_code)
456pfault_interrupt(__u16 error_code)
457{ 540{
458 struct task_struct *tsk; 541 struct task_struct *tsk;
459 __u16 subcode; 542 __u16 subcode;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 4bb21be3b007..b3e7c45efb63 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -25,7 +25,7 @@
25#include <linux/bootmem.h> 25#include <linux/bootmem.h>
26#include <linux/pfn.h> 26#include <linux/pfn.h>
27#include <linux/poison.h> 27#include <linux/poison.h>
28 28#include <linux/initrd.h>
29#include <asm/processor.h> 29#include <asm/processor.h>
30#include <asm/system.h> 30#include <asm/system.h>
31#include <asm/uaccess.h> 31#include <asm/uaccess.h>
@@ -95,20 +95,18 @@ static void __init setup_ro_region(void)
95 pte_t new_pte; 95 pte_t new_pte;
96 unsigned long address, end; 96 unsigned long address, end;
97 97
98 address = ((unsigned long)&__start_rodata) & PAGE_MASK; 98 address = ((unsigned long)&_stext) & PAGE_MASK;
99 end = PFN_ALIGN((unsigned long)&__end_rodata); 99 end = PFN_ALIGN((unsigned long)&_eshared);
100 100
101 for (; address < end; address += PAGE_SIZE) { 101 for (; address < end; address += PAGE_SIZE) {
102 pgd = pgd_offset_k(address); 102 pgd = pgd_offset_k(address);
103 pmd = pmd_offset(pgd, address); 103 pmd = pmd_offset(pgd, address);
104 pte = pte_offset_kernel(pmd, address); 104 pte = pte_offset_kernel(pmd, address);
105 new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO)); 105 new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO));
106 set_pte(pte, new_pte); 106 *pte = new_pte;
107 } 107 }
108} 108}
109 109
110extern void vmem_map_init(void);
111
112/* 110/*
113 * paging_init() sets up the page tables 111 * paging_init() sets up the page tables
114 */ 112 */
@@ -125,11 +123,11 @@ void __init paging_init(void)
125#ifdef CONFIG_64BIT 123#ifdef CONFIG_64BIT
126 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE; 124 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE;
127 for (i = 0; i < PTRS_PER_PGD; i++) 125 for (i = 0; i < PTRS_PER_PGD; i++)
128 pgd_clear(pg_dir + i); 126 pgd_clear_kernel(pg_dir + i);
129#else 127#else
130 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; 128 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
131 for (i = 0; i < PTRS_PER_PGD; i++) 129 for (i = 0; i < PTRS_PER_PGD; i++)
132 pmd_clear((pmd_t *)(pg_dir + i)); 130 pmd_clear_kernel((pmd_t *)(pg_dir + i));
133#endif 131#endif
134 vmem_map_init(); 132 vmem_map_init();
135 setup_ro_region(); 133 setup_ro_region();
@@ -174,10 +172,8 @@ void __init mem_init(void)
174 datasize >>10, 172 datasize >>10,
175 initsize >> 10); 173 initsize >> 10);
176 printk("Write protected kernel read-only data: %#lx - %#lx\n", 174 printk("Write protected kernel read-only data: %#lx - %#lx\n",
177 (unsigned long)&__start_rodata, 175 (unsigned long)&_stext,
178 PFN_ALIGN((unsigned long)&__end_rodata) - 1); 176 PFN_ALIGN((unsigned long)&_eshared) - 1);
179 printk("Virtual memmap size: %ldk\n",
180 (max_pfn * sizeof(struct page)) >> 10);
181} 177}
182 178
183void free_initmem(void) 179void free_initmem(void)
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index cd3d93e8c211..92a565190028 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -82,7 +82,7 @@ static inline pmd_t *vmem_pmd_alloc(void)
82 if (!pmd) 82 if (!pmd)
83 return NULL; 83 return NULL;
84 for (i = 0; i < PTRS_PER_PMD; i++) 84 for (i = 0; i < PTRS_PER_PMD; i++)
85 pmd_clear(pmd + i); 85 pmd_clear_kernel(pmd + i);
86 return pmd; 86 return pmd;
87} 87}
88 88
@@ -97,7 +97,7 @@ static inline pte_t *vmem_pte_alloc(void)
97 return NULL; 97 return NULL;
98 pte_val(empty_pte) = _PAGE_TYPE_EMPTY; 98 pte_val(empty_pte) = _PAGE_TYPE_EMPTY;
99 for (i = 0; i < PTRS_PER_PTE; i++) 99 for (i = 0; i < PTRS_PER_PTE; i++)
100 set_pte(pte + i, empty_pte); 100 pte[i] = empty_pte;
101 return pte; 101 return pte;
102} 102}
103 103
@@ -119,7 +119,7 @@ static int vmem_add_range(unsigned long start, unsigned long size)
119 pm_dir = vmem_pmd_alloc(); 119 pm_dir = vmem_pmd_alloc();
120 if (!pm_dir) 120 if (!pm_dir)
121 goto out; 121 goto out;
122 pgd_populate(&init_mm, pg_dir, pm_dir); 122 pgd_populate_kernel(&init_mm, pg_dir, pm_dir);
123 } 123 }
124 124
125 pm_dir = pmd_offset(pg_dir, address); 125 pm_dir = pmd_offset(pg_dir, address);
@@ -132,7 +132,7 @@ static int vmem_add_range(unsigned long start, unsigned long size)
132 132
133 pt_dir = pte_offset_kernel(pm_dir, address); 133 pt_dir = pte_offset_kernel(pm_dir, address);
134 pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL); 134 pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL);
135 set_pte(pt_dir, pte); 135 *pt_dir = pte;
136 } 136 }
137 ret = 0; 137 ret = 0;
138out: 138out:
@@ -161,7 +161,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
161 if (pmd_none(*pm_dir)) 161 if (pmd_none(*pm_dir))
162 continue; 162 continue;
163 pt_dir = pte_offset_kernel(pm_dir, address); 163 pt_dir = pte_offset_kernel(pm_dir, address);
164 set_pte(pt_dir, pte); 164 *pt_dir = pte;
165 } 165 }
166 flush_tlb_kernel_range(start, start + size); 166 flush_tlb_kernel_range(start, start + size);
167} 167}
@@ -191,7 +191,7 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size)
191 pm_dir = vmem_pmd_alloc(); 191 pm_dir = vmem_pmd_alloc();
192 if (!pm_dir) 192 if (!pm_dir)
193 goto out; 193 goto out;
194 pgd_populate(&init_mm, pg_dir, pm_dir); 194 pgd_populate_kernel(&init_mm, pg_dir, pm_dir);
195 } 195 }
196 196
197 pm_dir = pmd_offset(pg_dir, address); 197 pm_dir = pmd_offset(pg_dir, address);
@@ -210,7 +210,7 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size)
210 if (!new_page) 210 if (!new_page)
211 goto out; 211 goto out;
212 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); 212 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
213 set_pte(pt_dir, pte); 213 *pt_dir = pte;
214 } 214 }
215 } 215 }
216 ret = 0; 216 ret = 0;
diff --git a/arch/x86_64/kernel/early-quirks.c b/arch/x86_64/kernel/early-quirks.c
index 49802f1bee94..bd30d138113f 100644
--- a/arch/x86_64/kernel/early-quirks.c
+++ b/arch/x86_64/kernel/early-quirks.c
@@ -32,7 +32,7 @@ static void via_bugs(void)
32 32
33static int nvidia_hpet_detected __initdata; 33static int nvidia_hpet_detected __initdata;
34 34
35static int __init nvidia_hpet_check(unsigned long phys, unsigned long size) 35static int __init nvidia_hpet_check(struct acpi_table_header *header)
36{ 36{
37 nvidia_hpet_detected = 1; 37 nvidia_hpet_detected = 1;
38 return 0; 38 return 0;
@@ -53,7 +53,7 @@ static void nvidia_bugs(void)
53 return; 53 return;
54 54
55 nvidia_hpet_detected = 0; 55 nvidia_hpet_detected = 0;
56 acpi_table_parse(ACPI_HPET, nvidia_hpet_check); 56 acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check);
57 if (nvidia_hpet_detected == 0) { 57 if (nvidia_hpet_detected == 0) {
58 acpi_skip_timer_override = 1; 58 acpi_skip_timer_override = 1;
59 printk(KERN_INFO "Nvidia board " 59 printk(KERN_INFO "Nvidia board "
diff --git a/arch/x86_64/kernel/genapic.c b/arch/x86_64/kernel/genapic.c
index b007433f96bb..0b3603adf56d 100644
--- a/arch/x86_64/kernel/genapic.c
+++ b/arch/x86_64/kernel/genapic.c
@@ -58,8 +58,8 @@ void __init clustered_apic_check(void)
58 * Some x86_64 machines use physical APIC mode regardless of how many 58 * Some x86_64 machines use physical APIC mode regardless of how many
59 * procs/clusters are present (x86_64 ES7000 is an example). 59 * procs/clusters are present (x86_64 ES7000 is an example).
60 */ 60 */
61 if (acpi_fadt.revision > FADT2_REVISION_ID) 61 if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID)
62 if (acpi_fadt.force_apic_physical_destination_mode) { 62 if (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) {
63 genapic = &apic_cluster; 63 genapic = &apic_cluster;
64 goto print; 64 goto print;
65 } 65 }
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index d7bad90a5ad8..6be6730acb5c 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -1956,24 +1956,31 @@ static struct irq_chip msi_chip = {
1956 .retrigger = ioapic_retrigger_irq, 1956 .retrigger = ioapic_retrigger_irq,
1957}; 1957};
1958 1958
1959int arch_setup_msi_irq(unsigned int irq, struct pci_dev *dev) 1959int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
1960{ 1960{
1961 struct msi_msg msg; 1961 struct msi_msg msg;
1962 int ret; 1962 int irq, ret;
1963 irq = create_irq();
1964 if (irq < 0)
1965 return irq;
1966
1967 set_irq_msi(irq, desc);
1963 ret = msi_compose_msg(dev, irq, &msg); 1968 ret = msi_compose_msg(dev, irq, &msg);
1964 if (ret < 0) 1969 if (ret < 0) {
1970 destroy_irq(irq);
1965 return ret; 1971 return ret;
1972 }
1966 1973
1967 write_msi_msg(irq, &msg); 1974 write_msi_msg(irq, &msg);
1968 1975
1969 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); 1976 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
1970 1977
1971 return 0; 1978 return irq;
1972} 1979}
1973 1980
1974void arch_teardown_msi_irq(unsigned int irq) 1981void arch_teardown_msi_irq(unsigned int irq)
1975{ 1982{
1976 return; 1983 destroy_irq(irq);
1977} 1984}
1978 1985
1979#endif /* CONFIG_PCI_MSI */ 1986#endif /* CONFIG_PCI_MSI */
diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c
index 08072568847d..50dd8bef850e 100644
--- a/arch/x86_64/kernel/mpparse.c
+++ b/arch/x86_64/kernel/mpparse.c
@@ -798,7 +798,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
798 return gsi; 798 return gsi;
799 799
800 /* Don't set up the ACPI SCI because it's already set up */ 800 /* Don't set up the ACPI SCI because it's already set up */
801 if (acpi_fadt.sci_int == gsi) 801 if (acpi_gbl_FADT.sci_interrupt == gsi)
802 return gsi; 802 return gsi;
803 803
804 ioapic = mp_find_ioapic(gsi); 804 ioapic = mp_find_ioapic(gsi);
diff --git a/arch/x86_64/kernel/pci-swiotlb.c b/arch/x86_64/kernel/pci-swiotlb.c
index 697f0aa794b9..eb18be5a6569 100644
--- a/arch/x86_64/kernel/pci-swiotlb.c
+++ b/arch/x86_64/kernel/pci-swiotlb.c
@@ -29,7 +29,7 @@ struct dma_mapping_ops swiotlb_dma_ops = {
29 .dma_supported = NULL, 29 .dma_supported = NULL,
30}; 30};
31 31
32void pci_swiotlb_init(void) 32void __init pci_swiotlb_init(void)
33{ 33{
34 /* don't initialize swiotlb if iommu=off (no_iommu=1) */ 34 /* don't initialize swiotlb if iommu=off (no_iommu=1) */
35 if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN) 35 if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN)
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 5cc76d0d331f..335cc91c49b7 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -498,7 +498,7 @@ static unsigned long get_cmos_time(void)
498{ 498{
499 unsigned int year, mon, day, hour, min, sec; 499 unsigned int year, mon, day, hour, min, sec;
500 unsigned long flags; 500 unsigned long flags;
501 unsigned extyear = 0; 501 unsigned century = 0;
502 502
503 spin_lock_irqsave(&rtc_lock, flags); 503 spin_lock_irqsave(&rtc_lock, flags);
504 504
@@ -510,9 +510,9 @@ static unsigned long get_cmos_time(void)
510 mon = CMOS_READ(RTC_MONTH); 510 mon = CMOS_READ(RTC_MONTH);
511 year = CMOS_READ(RTC_YEAR); 511 year = CMOS_READ(RTC_YEAR);
512#ifdef CONFIG_ACPI 512#ifdef CONFIG_ACPI
513 if (acpi_fadt.revision >= FADT2_REVISION_ID && 513 if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
514 acpi_fadt.century) 514 acpi_gbl_FADT.century)
515 extyear = CMOS_READ(acpi_fadt.century); 515 century = CMOS_READ(acpi_gbl_FADT.century);
516#endif 516#endif
517 } while (sec != CMOS_READ(RTC_SECONDS)); 517 } while (sec != CMOS_READ(RTC_SECONDS));
518 518
@@ -530,10 +530,10 @@ static unsigned long get_cmos_time(void)
530 BCD_TO_BIN(mon); 530 BCD_TO_BIN(mon);
531 BCD_TO_BIN(year); 531 BCD_TO_BIN(year);
532 532
533 if (extyear) { 533 if (century) {
534 BCD_TO_BIN(extyear); 534 BCD_TO_BIN(century);
535 year += extyear; 535 year += century * 100;
536 printk(KERN_INFO "Extended CMOS year: %d\n", extyear); 536 printk(KERN_INFO "Extended CMOS year: %d\n", century * 100);
537 } else { 537 } else {
538 /* 538 /*
539 * x86-64 systems only exists since 2002. 539 * x86-64 systems only exists since 2002.
@@ -954,7 +954,7 @@ __cpuinit int unsynchronized_tsc(void)
954 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { 954 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
955#ifdef CONFIG_ACPI 955#ifdef CONFIG_ACPI
956 /* But TSC doesn't tick in C3 so don't use it there */ 956 /* But TSC doesn't tick in C3 so don't use it there */
957 if (acpi_fadt.length > 0 && acpi_fadt.plvl3_lat < 1000) 957 if (acpi_gbl_FADT.header.length > 0 && acpi_gbl_FADT.C3latency < 1000)
958 return 1; 958 return 1;
959#endif 959#endif
960 return 0; 960 return 0;
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c
index 1087e150a218..2efe215fc76a 100644
--- a/arch/x86_64/mm/srat.c
+++ b/arch/x86_64/mm/srat.c
@@ -101,7 +101,7 @@ static __init inline int srat_disabled(void)
101static __init int slit_valid(struct acpi_table_slit *slit) 101static __init int slit_valid(struct acpi_table_slit *slit)
102{ 102{
103 int i, j; 103 int i, j;
104 int d = slit->localities; 104 int d = slit->locality_count;
105 for (i = 0; i < d; i++) { 105 for (i = 0; i < d; i++) {
106 for (j = 0; j < d; j++) { 106 for (j = 0; j < d; j++) {
107 u8 val = slit->entry[d*i + j]; 107 u8 val = slit->entry[d*i + j];
@@ -127,18 +127,18 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
127 127
128/* Callback for Proximity Domain -> LAPIC mapping */ 128/* Callback for Proximity Domain -> LAPIC mapping */
129void __init 129void __init
130acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa) 130acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
131{ 131{
132 int pxm, node; 132 int pxm, node;
133 if (srat_disabled()) 133 if (srat_disabled())
134 return; 134 return;
135 if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) { 135 if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
136 bad_srat(); 136 bad_srat();
137 return; 137 return;
138 } 138 }
139 if (pa->flags.enabled == 0) 139 if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
140 return; 140 return;
141 pxm = pa->proximity_domain; 141 pxm = pa->proximity_domain_lo;
142 node = setup_node(pxm); 142 node = setup_node(pxm);
143 if (node < 0) { 143 if (node < 0) {
144 printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm); 144 printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
@@ -254,21 +254,21 @@ static int reserve_hotadd(int node, unsigned long start, unsigned long end)
254 /* Looks good */ 254 /* Looks good */
255 255
256 if (nd->start == nd->end) { 256 if (nd->start == nd->end) {
257 nd->start = start; 257 nd->start = start;
258 nd->end = end; 258 nd->end = end;
259 changed = 1; 259 changed = 1;
260 } else { 260 } else {
261 if (nd->start == end) { 261 if (nd->start == end) {
262 nd->start = start; 262 nd->start = start;
263 changed = 1; 263 changed = 1;
264 } 264 }
265 if (nd->end == start) { 265 if (nd->end == start) {
266 nd->end = end; 266 nd->end = end;
267 changed = 1; 267 changed = 1;
268 } 268 }
269 if (!changed) 269 if (!changed)
270 printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n"); 270 printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n");
271 } 271 }
272 272
273 ret = update_end_of_memory(nd->end); 273 ret = update_end_of_memory(nd->end);
274 274
@@ -279,7 +279,7 @@ static int reserve_hotadd(int node, unsigned long start, unsigned long end)
279 279
280/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ 280/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
281void __init 281void __init
282acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma) 282acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
283{ 283{
284 struct bootnode *nd, oldnode; 284 struct bootnode *nd, oldnode;
285 unsigned long start, end; 285 unsigned long start, end;
@@ -288,16 +288,17 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
288 288
289 if (srat_disabled()) 289 if (srat_disabled())
290 return; 290 return;
291 if (ma->header.length != sizeof(struct acpi_table_memory_affinity)) { 291 if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) {
292 bad_srat(); 292 bad_srat();
293 return; 293 return;
294 } 294 }
295 if (ma->flags.enabled == 0) 295 if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
296 return; 296 return;
297 if (ma->flags.hot_pluggable && !save_add_info()) 297
298 if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
298 return; 299 return;
299 start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32); 300 start = ma->base_address;
300 end = start + (ma->length_lo | ((u64)ma->length_hi << 32)); 301 end = start + ma->length;
301 pxm = ma->proximity_domain; 302 pxm = ma->proximity_domain;
302 node = setup_node(pxm); 303 node = setup_node(pxm);
303 if (node < 0) { 304 if (node < 0) {
@@ -337,7 +338,8 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
337 push_node_boundaries(node, nd->start >> PAGE_SHIFT, 338 push_node_boundaries(node, nd->start >> PAGE_SHIFT,
338 nd->end >> PAGE_SHIFT); 339 nd->end >> PAGE_SHIFT);
339 340
340 if (ma->flags.hot_pluggable && (reserve_hotadd(node, start, end) < 0)) { 341 if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) &&
342 (reserve_hotadd(node, start, end) < 0)) {
341 /* Ignore hotadd region. Undo damage */ 343 /* Ignore hotadd region. Undo damage */
342 printk(KERN_NOTICE "SRAT: Hotplug region ignored\n"); 344 printk(KERN_NOTICE "SRAT: Hotplug region ignored\n");
343 *nd = oldnode; 345 *nd = oldnode;
@@ -394,7 +396,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
394 396
395 /* First clean up the node list */ 397 /* First clean up the node list */
396 for (i = 0; i < MAX_NUMNODES; i++) { 398 for (i = 0; i < MAX_NUMNODES; i++) {
397 cutoff_node(i, start, end); 399 cutoff_node(i, start, end);
398 if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE) { 400 if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE) {
399 unparse_node(i); 401 unparse_node(i);
400 node_set_offline(i); 402 node_set_offline(i);
@@ -426,7 +428,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
426 if (!node_online(i)) 428 if (!node_online(i))
427 setup_node_bootmem(i, nodes[i].start, nodes[i].end); 429 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
428 430
429 for (i = 0; i < NR_CPUS; i++) { 431 for (i = 0; i < NR_CPUS; i++) {
430 if (cpu_to_node[i] == NUMA_NO_NODE) 432 if (cpu_to_node[i] == NUMA_NO_NODE)
431 continue; 433 continue;
432 if (!node_isset(cpu_to_node[i], nodes_parsed)) 434 if (!node_isset(cpu_to_node[i], nodes_parsed))
@@ -461,7 +463,7 @@ int __node_distance(int a, int b)
461 463
462 if (!acpi_slit) 464 if (!acpi_slit)
463 return a == b ? 10 : 20; 465 return a == b ? 10 : 20;
464 index = acpi_slit->localities * node_to_pxm(a); 466 index = acpi_slit->locality_count * node_to_pxm(a);
465 return acpi_slit->entry[index + node_to_pxm(b)]; 467 return acpi_slit->entry[index + node_to_pxm(b)];
466} 468}
467 469
diff --git a/arch/x86_64/pci/mmconfig.c b/arch/x86_64/pci/mmconfig.c
index f8b6b2800a62..faabb6e87f12 100644
--- a/arch/x86_64/pci/mmconfig.c
+++ b/arch/x86_64/pci/mmconfig.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * mmconfig.c - Low-level direct PCI config space access via MMCONFIG 2 * mmconfig.c - Low-level direct PCI config space access via MMCONFIG
3 * 3 *
4 * This is an 64bit optimized version that always keeps the full mmconfig 4 * This is an 64bit optimized version that always keeps the full mmconfig
5 * space mapped. This allows lockless config space operation. 5 * space mapped. This allows lockless config space operation.
6 */ 6 */
@@ -25,7 +25,7 @@ static DECLARE_BITMAP(fallback_slots, 32*MAX_CHECK_BUS);
25 25
26/* Static virtual mapping of the MMCONFIG aperture */ 26/* Static virtual mapping of the MMCONFIG aperture */
27struct mmcfg_virt { 27struct mmcfg_virt {
28 struct acpi_table_mcfg_config *cfg; 28 struct acpi_mcfg_allocation *cfg;
29 char __iomem *virt; 29 char __iomem *virt;
30}; 30};
31static struct mmcfg_virt *pci_mmcfg_virt; 31static struct mmcfg_virt *pci_mmcfg_virt;
@@ -33,14 +33,14 @@ static struct mmcfg_virt *pci_mmcfg_virt;
33static char __iomem *get_virt(unsigned int seg, unsigned bus) 33static char __iomem *get_virt(unsigned int seg, unsigned bus)
34{ 34{
35 int cfg_num = -1; 35 int cfg_num = -1;
36 struct acpi_table_mcfg_config *cfg; 36 struct acpi_mcfg_allocation *cfg;
37 37
38 while (1) { 38 while (1) {
39 ++cfg_num; 39 ++cfg_num;
40 if (cfg_num >= pci_mmcfg_config_num) 40 if (cfg_num >= pci_mmcfg_config_num)
41 break; 41 break;
42 cfg = pci_mmcfg_virt[cfg_num].cfg; 42 cfg = pci_mmcfg_virt[cfg_num].cfg;
43 if (cfg->pci_segment_group_number != seg) 43 if (cfg->pci_segment != seg)
44 continue; 44 continue;
45 if ((cfg->start_bus_number <= bus) && 45 if ((cfg->start_bus_number <= bus) &&
46 (cfg->end_bus_number >= bus)) 46 (cfg->end_bus_number >= bus))
@@ -52,7 +52,7 @@ static char __iomem *get_virt(unsigned int seg, unsigned bus)
52 this applies to all busses. */ 52 this applies to all busses. */
53 cfg = &pci_mmcfg_config[0]; 53 cfg = &pci_mmcfg_config[0];
54 if (pci_mmcfg_config_num == 1 && 54 if (pci_mmcfg_config_num == 1 &&
55 cfg->pci_segment_group_number == 0 && 55 cfg->pci_segment == 0 &&
56 (cfg->start_bus_number | cfg->end_bus_number) == 0) 56 (cfg->start_bus_number | cfg->end_bus_number) == 0)
57 return pci_mmcfg_virt[0].virt; 57 return pci_mmcfg_virt[0].virt;
58 58
@@ -170,19 +170,19 @@ void __init pci_mmcfg_init(int type)
170 if ((pci_probe & PCI_PROBE_MMCONF) == 0) 170 if ((pci_probe & PCI_PROBE_MMCONF) == 0)
171 return; 171 return;
172 172
173 acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg); 173 acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg);
174 if ((pci_mmcfg_config_num == 0) || 174 if ((pci_mmcfg_config_num == 0) ||
175 (pci_mmcfg_config == NULL) || 175 (pci_mmcfg_config == NULL) ||
176 (pci_mmcfg_config[0].base_address == 0)) 176 (pci_mmcfg_config[0].address == 0))
177 return; 177 return;
178 178
179 /* Only do this check when type 1 works. If it doesn't work 179 /* Only do this check when type 1 works. If it doesn't work
180 assume we run on a Mac and always use MCFG */ 180 assume we run on a Mac and always use MCFG */
181 if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].base_address, 181 if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].address,
182 pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN, 182 pci_mmcfg_config[0].address + MMCONFIG_APER_MIN,
183 E820_RESERVED)) { 183 E820_RESERVED)) {
184 printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n", 184 printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %lx is not E820-reserved\n",
185 pci_mmcfg_config[0].base_address); 185 (unsigned long)pci_mmcfg_config[0].address);
186 printk(KERN_ERR "PCI: Not using MMCONFIG.\n"); 186 printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
187 return; 187 return;
188 } 188 }
@@ -194,15 +194,16 @@ void __init pci_mmcfg_init(int type)
194 } 194 }
195 for (i = 0; i < pci_mmcfg_config_num; ++i) { 195 for (i = 0; i < pci_mmcfg_config_num; ++i) {
196 pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i]; 196 pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i];
197 pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].base_address, 197 pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].address,
198 MMCONFIG_APER_MAX); 198 MMCONFIG_APER_MAX);
199 if (!pci_mmcfg_virt[i].virt) { 199 if (!pci_mmcfg_virt[i].virt) {
200 printk(KERN_ERR "PCI: Cannot map mmconfig aperture for " 200 printk(KERN_ERR "PCI: Cannot map mmconfig aperture for "
201 "segment %d\n", 201 "segment %d\n",
202 pci_mmcfg_config[i].pci_segment_group_number); 202 pci_mmcfg_config[i].pci_segment);
203 return; 203 return;
204 } 204 }
205 printk(KERN_INFO "PCI: Using MMCONFIG at %x\n", pci_mmcfg_config[i].base_address); 205 printk(KERN_INFO "PCI: Using MMCONFIG at %lx\n",
206 (unsigned long)pci_mmcfg_config[i].address);
206 } 207 }
207 208
208 unreachable_devices(); 209 unreachable_devices();