aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig41
-rw-r--r--arch/ia64/configs/sn2_defconfig219
-rw-r--r--arch/ia64/hp/common/sba_iommu.c18
-rw-r--r--arch/ia64/hp/sim/Kconfig4
-rw-r--r--arch/ia64/hp/sim/hpsim_irq.c2
-rw-r--r--arch/ia64/hp/sim/simserial.c7
-rw-r--r--arch/ia64/ia32/binfmt_elf32.c8
-rw-r--r--arch/ia64/ia32/ia32_support.c2
-rw-r--r--arch/ia64/ia32/ia32priv.h2
-rw-r--r--arch/ia64/ia32/sys_ia32.c6
-rw-r--r--arch/ia64/kernel/Makefile1
-rw-r--r--arch/ia64/kernel/acpi.c9
-rw-r--r--arch/ia64/kernel/cpufreq/acpi-cpufreq.c11
-rw-r--r--arch/ia64/kernel/crash.c245
-rw-r--r--arch/ia64/kernel/efi.c71
-rw-r--r--arch/ia64/kernel/entry.S2
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c1
-rw-r--r--arch/ia64/kernel/iosapic.c27
-rw-r--r--arch/ia64/kernel/irq.c4
-rw-r--r--arch/ia64/kernel/irq_ia64.c24
-rw-r--r--arch/ia64/kernel/irq_lsapic.c2
-rw-r--r--arch/ia64/kernel/kprobes.c4
-rw-r--r--arch/ia64/kernel/machine_kexec.c133
-rw-r--r--arch/ia64/kernel/mca.c13
-rw-r--r--arch/ia64/kernel/mca_drv.c95
-rw-r--r--arch/ia64/kernel/pal.S58
-rw-r--r--arch/ia64/kernel/palinfo.c24
-rw-r--r--arch/ia64/kernel/perfmon.c18
-rw-r--r--arch/ia64/kernel/perfmon_montecito.h12
-rw-r--r--arch/ia64/kernel/relocate_kernel.S334
-rw-r--r--arch/ia64/kernel/sal.c11
-rw-r--r--arch/ia64/kernel/salinfo.c8
-rw-r--r--arch/ia64/kernel/setup.c40
-rw-r--r--arch/ia64/kernel/smp.c40
-rw-r--r--arch/ia64/kernel/smpboot.c12
-rw-r--r--arch/ia64/kernel/time.c6
-rw-r--r--arch/ia64/kernel/topology.c8
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S8
-rw-r--r--arch/ia64/lib/checksum.c38
-rw-r--r--arch/ia64/lib/csum_partial_copy.c31
-rw-r--r--arch/ia64/lib/ip_fast_csum.S58
-rw-r--r--arch/ia64/mm/hugetlbpage.c9
-rw-r--r--arch/ia64/mm/init.c4
-rw-r--r--arch/ia64/pci/Makefile2
-rw-r--r--arch/ia64/pci/fixup.c69
-rw-r--r--arch/ia64/pci/pci.c82
-rw-r--r--arch/ia64/sn/kernel/Makefile5
-rw-r--r--arch/ia64/sn/kernel/bte.c9
-rw-r--r--arch/ia64/sn/kernel/io_acpi_init.c231
-rw-r--r--arch/ia64/sn/kernel/io_common.c613
-rw-r--r--arch/ia64/sn/kernel/io_init.c633
-rw-r--r--arch/ia64/sn/kernel/iomv.c11
-rw-r--r--arch/ia64/sn/kernel/irq.c20
-rw-r--r--arch/ia64/sn/kernel/msi_sn.c4
-rw-r--r--arch/ia64/sn/kernel/setup.c38
-rw-r--r--arch/ia64/sn/kernel/tiocx.c2
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_provider.c17
-rw-r--r--arch/ia64/sn/pci/tioce_provider.c18
58 files changed, 2505 insertions, 919 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 70f7eb9fed35..fcacfe291b9b 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -34,6 +34,14 @@ config RWSEM_XCHGADD_ALGORITHM
34 bool 34 bool
35 default y 35 default y
36 36
37config ARCH_HAS_ILOG2_U32
38 bool
39 default n
40
41config ARCH_HAS_ILOG2_U64
42 bool
43 default n
44
37config GENERIC_FIND_NEXT_BIT 45config GENERIC_FIND_NEXT_BIT
38 bool 46 bool
39 default y 47 default y
@@ -341,6 +349,7 @@ config NUMA
341 bool "NUMA support" 349 bool "NUMA support"
342 depends on !IA64_HP_SIM && !FLATMEM 350 depends on !IA64_HP_SIM && !FLATMEM
343 default y if IA64_SGI_SN2 351 default y if IA64_SGI_SN2
352 select ACPI_NUMA if ACPI
344 help 353 help
345 Say Y to compile the kernel to support NUMA (Non-Uniform Memory 354 Say Y to compile the kernel to support NUMA (Non-Uniform Memory
346 Access). This option is for configuring high-end multiprocessor 355 Access). This option is for configuring high-end multiprocessor
@@ -433,6 +442,29 @@ config IA64_ESI
433 442
434source "drivers/sn/Kconfig" 443source "drivers/sn/Kconfig"
435 444
445config KEXEC
446 bool "kexec system call (EXPERIMENTAL)"
447 depends on EXPERIMENTAL && !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
448 help
449 kexec is a system call that implements the ability to shutdown your
450 current kernel, and to start another kernel. It is like a reboot
451 but it is indepedent of the system firmware. And like a reboot
452 you can start any kernel with it, not just Linux.
453
454 The name comes from the similiarity to the exec system call.
455
456 It is an ongoing process to be certain the hardware in a machine
457 is properly shutdown, so do not be surprised if this code does not
458 initially work for you. It may help to enable device hotplugging
459 support. As of this writing the exact hardware interface is
460 strongly in flux, so no good recommendation can be made.
461
462config CRASH_DUMP
463 bool "kernel crash dumps (EXPERIMENTAL)"
464 depends on EXPERIMENTAL && IA64_MCA_RECOVERY && !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
465 help
466 Generate crash dump after being started by kexec.
467
436source "drivers/firmware/Kconfig" 468source "drivers/firmware/Kconfig"
437 469
438source "fs/Kconfig.binfmt" 470source "fs/Kconfig.binfmt"
@@ -483,6 +515,15 @@ source "net/Kconfig"
483 515
484source "drivers/Kconfig" 516source "drivers/Kconfig"
485 517
518config MSPEC
519 tristate "Memory special operations driver"
520 depends on IA64
521 select IA64_UNCACHED_ALLOCATOR
522 help
523 If you have an ia64 and you want to enable memory special
524 operations support (formerly known as fetchop), say Y here,
525 otherwise say N.
526
486source "fs/Kconfig" 527source "fs/Kconfig"
487 528
488source "lib/Kconfig" 529source "lib/Kconfig"
diff --git a/arch/ia64/configs/sn2_defconfig b/arch/ia64/configs/sn2_defconfig
index 0f14a82b856e..64e951de4e57 100644
--- a/arch/ia64/configs/sn2_defconfig
+++ b/arch/ia64/configs/sn2_defconfig
@@ -1,8 +1,9 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.17-rc3 3# Linux kernel version: 2.6.19-rc1
4# Thu Apr 27 11:48:23 2006 4# Mon Oct 9 10:53:59 2006
5# 5#
6CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
6 7
7# 8#
8# Code maturity level options 9# Code maturity level options
@@ -18,16 +19,22 @@ CONFIG_LOCALVERSION=""
18# CONFIG_LOCALVERSION_AUTO is not set 19# CONFIG_LOCALVERSION_AUTO is not set
19CONFIG_SWAP=y 20CONFIG_SWAP=y
20CONFIG_SYSVIPC=y 21CONFIG_SYSVIPC=y
22# CONFIG_IPC_NS is not set
21CONFIG_POSIX_MQUEUE=y 23CONFIG_POSIX_MQUEUE=y
22# CONFIG_BSD_PROCESS_ACCT is not set 24# CONFIG_BSD_PROCESS_ACCT is not set
23CONFIG_SYSCTL=y 25CONFIG_TASKSTATS=y
26# CONFIG_TASK_DELAY_ACCT is not set
27# CONFIG_UTS_NS is not set
24# CONFIG_AUDIT is not set 28# CONFIG_AUDIT is not set
25# CONFIG_IKCONFIG is not set 29# CONFIG_IKCONFIG is not set
26CONFIG_CPUSETS=y 30CONFIG_CPUSETS=y
27CONFIG_RELAY=y 31CONFIG_RELAY=y
28CONFIG_INITRAMFS_SOURCE="" 32CONFIG_INITRAMFS_SOURCE=""
29CONFIG_CC_OPTIMIZE_FOR_SIZE=y 33CONFIG_CC_OPTIMIZE_FOR_SIZE=y
34CONFIG_TASK_XACCT=y
35CONFIG_SYSCTL=y
30# CONFIG_EMBEDDED is not set 36# CONFIG_EMBEDDED is not set
37# CONFIG_SYSCTL_SYSCALL is not set
31CONFIG_KALLSYMS=y 38CONFIG_KALLSYMS=y
32CONFIG_KALLSYMS_ALL=y 39CONFIG_KALLSYMS_ALL=y
33# CONFIG_KALLSYMS_EXTRA_PASS is not set 40# CONFIG_KALLSYMS_EXTRA_PASS is not set
@@ -40,6 +47,8 @@ CONFIG_FUTEX=y
40CONFIG_EPOLL=y 47CONFIG_EPOLL=y
41CONFIG_SHMEM=y 48CONFIG_SHMEM=y
42CONFIG_SLAB=y 49CONFIG_SLAB=y
50CONFIG_VM_EVENT_COUNTERS=y
51CONFIG_RT_MUTEXES=y
43# CONFIG_TINY_SHMEM is not set 52# CONFIG_TINY_SHMEM is not set
44CONFIG_BASE_SMALL=0 53CONFIG_BASE_SMALL=0
45# CONFIG_SLOB is not set 54# CONFIG_SLOB is not set
@@ -58,6 +67,7 @@ CONFIG_STOP_MACHINE=y
58# 67#
59# Block layer 68# Block layer
60# 69#
70CONFIG_BLOCK=y
61# CONFIG_BLK_DEV_IO_TRACE is not set 71# CONFIG_BLK_DEV_IO_TRACE is not set
62 72
63# 73#
@@ -89,7 +99,7 @@ CONFIG_EFI=y
89CONFIG_GENERIC_IOMAP=y 99CONFIG_GENERIC_IOMAP=y
90CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y 100CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
91CONFIG_IA64_UNCACHED_ALLOCATOR=y 101CONFIG_IA64_UNCACHED_ALLOCATOR=y
92CONFIG_DMA_IS_DMA32=y 102CONFIG_AUDIT_ARCH=y
93# CONFIG_IA64_GENERIC is not set 103# CONFIG_IA64_GENERIC is not set
94# CONFIG_IA64_DIG is not set 104# CONFIG_IA64_DIG is not set
95# CONFIG_IA64_HP_ZX1 is not set 105# CONFIG_IA64_HP_ZX1 is not set
@@ -116,6 +126,7 @@ CONFIG_FORCE_MAX_ZONEORDER=17
116CONFIG_SMP=y 126CONFIG_SMP=y
117CONFIG_NR_CPUS=1024 127CONFIG_NR_CPUS=1024
118# CONFIG_HOTPLUG_CPU is not set 128# CONFIG_HOTPLUG_CPU is not set
129CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
119CONFIG_SCHED_SMT=y 130CONFIG_SCHED_SMT=y
120CONFIG_PREEMPT=y 131CONFIG_PREEMPT=y
121CONFIG_SELECT_MEMORY_MODEL=y 132CONFIG_SELECT_MEMORY_MODEL=y
@@ -128,6 +139,7 @@ CONFIG_NEED_MULTIPLE_NODES=y
128# CONFIG_SPARSEMEM_STATIC is not set 139# CONFIG_SPARSEMEM_STATIC is not set
129CONFIG_SPLIT_PTLOCK_CPUS=4 140CONFIG_SPLIT_PTLOCK_CPUS=4
130CONFIG_MIGRATION=y 141CONFIG_MIGRATION=y
142CONFIG_RESOURCES_64BIT=y
131CONFIG_ARCH_SELECT_MEMORY_MODEL=y 143CONFIG_ARCH_SELECT_MEMORY_MODEL=y
132CONFIG_ARCH_DISCONTIGMEM_ENABLE=y 144CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
133CONFIG_ARCH_FLATMEM_ENABLE=y 145CONFIG_ARCH_FLATMEM_ENABLE=y
@@ -135,15 +147,24 @@ CONFIG_ARCH_SPARSEMEM_ENABLE=y
135CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y 147CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
136CONFIG_NUMA=y 148CONFIG_NUMA=y
137CONFIG_NODES_SHIFT=10 149CONFIG_NODES_SHIFT=10
150CONFIG_ARCH_POPULATES_NODE_MAP=y
138CONFIG_VIRTUAL_MEM_MAP=y 151CONFIG_VIRTUAL_MEM_MAP=y
139CONFIG_HOLES_IN_ZONE=y 152CONFIG_HOLES_IN_ZONE=y
140CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y 153CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
154CONFIG_HAVE_ARCH_NODEDATA_EXTENSION=y
141CONFIG_IA32_SUPPORT=y 155CONFIG_IA32_SUPPORT=y
142CONFIG_COMPAT=y 156CONFIG_COMPAT=y
143CONFIG_IA64_MCA_RECOVERY=y 157CONFIG_IA64_MCA_RECOVERY=y
144CONFIG_PERFMON=y 158CONFIG_PERFMON=y
145CONFIG_IA64_PALINFO=y 159CONFIG_IA64_PALINFO=y
146CONFIG_SGI_SN=y 160CONFIG_SGI_SN=y
161# CONFIG_IA64_ESI is not set
162
163#
164# SN Devices
165#
166CONFIG_SGI_IOC4=y
167CONFIG_SGI_IOC3=y
147 168
148# 169#
149# Firmware Drivers 170# Firmware Drivers
@@ -159,6 +180,7 @@ CONFIG_BINFMT_ELF=y
159CONFIG_PM=y 180CONFIG_PM=y
160# CONFIG_PM_LEGACY is not set 181# CONFIG_PM_LEGACY is not set
161# CONFIG_PM_DEBUG is not set 182# CONFIG_PM_DEBUG is not set
183# CONFIG_PM_SYSFS_DEPRECATED is not set
162 184
163# 185#
164# ACPI (Advanced Configuration and Power Interface) Support 186# ACPI (Advanced Configuration and Power Interface) Support
@@ -166,6 +188,7 @@ CONFIG_PM=y
166CONFIG_ACPI=y 188CONFIG_ACPI=y
167# CONFIG_ACPI_BUTTON is not set 189# CONFIG_ACPI_BUTTON is not set
168# CONFIG_ACPI_FAN is not set 190# CONFIG_ACPI_FAN is not set
191# CONFIG_ACPI_DOCK is not set
169# CONFIG_ACPI_PROCESSOR is not set 192# CONFIG_ACPI_PROCESSOR is not set
170CONFIG_ACPI_NUMA=y 193CONFIG_ACPI_NUMA=y
171CONFIG_ACPI_BLACKLIST_YEAR=0 194CONFIG_ACPI_BLACKLIST_YEAR=0
@@ -185,7 +208,12 @@ CONFIG_ACPI_SYSTEM=y
185# 208#
186CONFIG_PCI=y 209CONFIG_PCI=y
187CONFIG_PCI_DOMAINS=y 210CONFIG_PCI_DOMAINS=y
211CONFIG_PCIEPORTBUS=y
212CONFIG_HOTPLUG_PCI_PCIE=y
213# CONFIG_HOTPLUG_PCI_PCIE_POLL_EVENT_MODE is not set
214CONFIG_PCIEAER=y
188# CONFIG_PCI_MSI is not set 215# CONFIG_PCI_MSI is not set
216# CONFIG_PCI_MULTITHREAD_PROBE is not set
189# CONFIG_PCI_DEBUG is not set 217# CONFIG_PCI_DEBUG is not set
190 218
191# 219#
@@ -215,6 +243,9 @@ CONFIG_NET=y
215CONFIG_PACKET=y 243CONFIG_PACKET=y
216CONFIG_PACKET_MMAP=y 244CONFIG_PACKET_MMAP=y
217CONFIG_UNIX=y 245CONFIG_UNIX=y
246CONFIG_XFRM=y
247# CONFIG_XFRM_USER is not set
248# CONFIG_XFRM_SUB_POLICY is not set
218# CONFIG_NET_KEY is not set 249# CONFIG_NET_KEY is not set
219CONFIG_INET=y 250CONFIG_INET=y
220CONFIG_IP_MULTICAST=y 251CONFIG_IP_MULTICAST=y
@@ -231,19 +262,31 @@ CONFIG_SYN_COOKIES=y
231# CONFIG_INET_IPCOMP is not set 262# CONFIG_INET_IPCOMP is not set
232# CONFIG_INET_XFRM_TUNNEL is not set 263# CONFIG_INET_XFRM_TUNNEL is not set
233# CONFIG_INET_TUNNEL is not set 264# CONFIG_INET_TUNNEL is not set
265CONFIG_INET_XFRM_MODE_TRANSPORT=y
266CONFIG_INET_XFRM_MODE_TUNNEL=y
267CONFIG_INET_XFRM_MODE_BEET=y
234CONFIG_INET_DIAG=m 268CONFIG_INET_DIAG=m
235CONFIG_INET_TCP_DIAG=m 269CONFIG_INET_TCP_DIAG=m
236# CONFIG_TCP_CONG_ADVANCED is not set 270# CONFIG_TCP_CONG_ADVANCED is not set
237CONFIG_TCP_CONG_BIC=y 271CONFIG_TCP_CONG_CUBIC=y
272CONFIG_DEFAULT_TCP_CONG="cubic"
238CONFIG_IPV6=m 273CONFIG_IPV6=m
239# CONFIG_IPV6_PRIVACY is not set 274# CONFIG_IPV6_PRIVACY is not set
240# CONFIG_IPV6_ROUTER_PREF is not set 275# CONFIG_IPV6_ROUTER_PREF is not set
241# CONFIG_INET6_AH is not set 276# CONFIG_INET6_AH is not set
242# CONFIG_INET6_ESP is not set 277# CONFIG_INET6_ESP is not set
243# CONFIG_INET6_IPCOMP is not set 278# CONFIG_INET6_IPCOMP is not set
279# CONFIG_IPV6_MIP6 is not set
244# CONFIG_INET6_XFRM_TUNNEL is not set 280# CONFIG_INET6_XFRM_TUNNEL is not set
245# CONFIG_INET6_TUNNEL is not set 281# CONFIG_INET6_TUNNEL is not set
282CONFIG_INET6_XFRM_MODE_TRANSPORT=m
283CONFIG_INET6_XFRM_MODE_TUNNEL=m
284CONFIG_INET6_XFRM_MODE_BEET=m
285# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
246# CONFIG_IPV6_TUNNEL is not set 286# CONFIG_IPV6_TUNNEL is not set
287# CONFIG_IPV6_SUBTREES is not set
288# CONFIG_IPV6_MULTIPLE_TABLES is not set
289# CONFIG_NETWORK_SECMARK is not set
247# CONFIG_NETFILTER is not set 290# CONFIG_NETFILTER is not set
248 291
249# 292#
@@ -269,7 +312,6 @@ CONFIG_IPV6=m
269# CONFIG_ATALK is not set 312# CONFIG_ATALK is not set
270# CONFIG_X25 is not set 313# CONFIG_X25 is not set
271# CONFIG_LAPB is not set 314# CONFIG_LAPB is not set
272# CONFIG_NET_DIVERT is not set
273# CONFIG_ECONET is not set 315# CONFIG_ECONET is not set
274# CONFIG_WAN_ROUTER is not set 316# CONFIG_WAN_ROUTER is not set
275 317
@@ -298,6 +340,7 @@ CONFIG_STANDALONE=y
298CONFIG_PREVENT_FIRMWARE_BUILD=y 340CONFIG_PREVENT_FIRMWARE_BUILD=y
299CONFIG_FW_LOADER=y 341CONFIG_FW_LOADER=y
300# CONFIG_DEBUG_DRIVER is not set 342# CONFIG_DEBUG_DRIVER is not set
343# CONFIG_SYS_HYPERVISOR is not set
301 344
302# 345#
303# Connector - unified userspace <-> kernelspace linker 346# Connector - unified userspace <-> kernelspace linker
@@ -335,6 +378,7 @@ CONFIG_BLK_DEV_NBD=m
335CONFIG_BLK_DEV_RAM=y 378CONFIG_BLK_DEV_RAM=y
336CONFIG_BLK_DEV_RAM_COUNT=16 379CONFIG_BLK_DEV_RAM_COUNT=16
337CONFIG_BLK_DEV_RAM_SIZE=4096 380CONFIG_BLK_DEV_RAM_SIZE=4096
381CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
338CONFIG_BLK_DEV_INITRD=y 382CONFIG_BLK_DEV_INITRD=y
339# CONFIG_CDROM_PKTCDVD is not set 383# CONFIG_CDROM_PKTCDVD is not set
340CONFIG_ATA_OVER_ETH=m 384CONFIG_ATA_OVER_ETH=m
@@ -381,6 +425,7 @@ CONFIG_IDEDMA_PCI_AUTO=y
381# CONFIG_BLK_DEV_CS5530 is not set 425# CONFIG_BLK_DEV_CS5530 is not set
382# CONFIG_BLK_DEV_HPT34X is not set 426# CONFIG_BLK_DEV_HPT34X is not set
383# CONFIG_BLK_DEV_HPT366 is not set 427# CONFIG_BLK_DEV_HPT366 is not set
428# CONFIG_BLK_DEV_JMICRON is not set
384# CONFIG_BLK_DEV_SC1200 is not set 429# CONFIG_BLK_DEV_SC1200 is not set
385# CONFIG_BLK_DEV_PIIX is not set 430# CONFIG_BLK_DEV_PIIX is not set
386# CONFIG_BLK_DEV_IT821X is not set 431# CONFIG_BLK_DEV_IT821X is not set
@@ -404,6 +449,7 @@ CONFIG_IDEDMA_AUTO=y
404# 449#
405# CONFIG_RAID_ATTRS is not set 450# CONFIG_RAID_ATTRS is not set
406CONFIG_SCSI=y 451CONFIG_SCSI=y
452CONFIG_SCSI_NETLINK=y
407CONFIG_SCSI_PROC_FS=y 453CONFIG_SCSI_PROC_FS=y
408 454
409# 455#
@@ -425,12 +471,14 @@ CONFIG_SCSI_CONSTANTS=y
425# CONFIG_SCSI_LOGGING is not set 471# CONFIG_SCSI_LOGGING is not set
426 472
427# 473#
428# SCSI Transport Attributes 474# SCSI Transports
429# 475#
430CONFIG_SCSI_SPI_ATTRS=y 476CONFIG_SCSI_SPI_ATTRS=y
431CONFIG_SCSI_FC_ATTRS=y 477CONFIG_SCSI_FC_ATTRS=y
432CONFIG_SCSI_ISCSI_ATTRS=m 478CONFIG_SCSI_ISCSI_ATTRS=m
433CONFIG_SCSI_SAS_ATTRS=y 479CONFIG_SCSI_SAS_ATTRS=y
480CONFIG_SCSI_SAS_LIBSAS=y
481# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
434 482
435# 483#
436# SCSI low-level drivers 484# SCSI low-level drivers
@@ -443,46 +491,82 @@ CONFIG_ISCSI_TCP=m
443# CONFIG_SCSI_AIC7XXX is not set 491# CONFIG_SCSI_AIC7XXX is not set
444# CONFIG_SCSI_AIC7XXX_OLD is not set 492# CONFIG_SCSI_AIC7XXX_OLD is not set
445# CONFIG_SCSI_AIC79XX is not set 493# CONFIG_SCSI_AIC79XX is not set
494# CONFIG_SCSI_AIC94XX is not set
495# CONFIG_SCSI_ARCMSR is not set
446# CONFIG_MEGARAID_NEWGEN is not set 496# CONFIG_MEGARAID_NEWGEN is not set
447# CONFIG_MEGARAID_LEGACY is not set 497# CONFIG_MEGARAID_LEGACY is not set
448# CONFIG_MEGARAID_SAS is not set 498# CONFIG_MEGARAID_SAS is not set
449CONFIG_SCSI_SATA=y 499# CONFIG_SCSI_HPTIOP is not set
450# CONFIG_SCSI_SATA_AHCI is not set
451# CONFIG_SCSI_SATA_SVW is not set
452# CONFIG_SCSI_ATA_PIIX is not set
453# CONFIG_SCSI_SATA_MV is not set
454# CONFIG_SCSI_SATA_NV is not set
455# CONFIG_SCSI_PDC_ADMA is not set
456# CONFIG_SCSI_SATA_QSTOR is not set
457# CONFIG_SCSI_SATA_PROMISE is not set
458# CONFIG_SCSI_SATA_SX4 is not set
459# CONFIG_SCSI_SATA_SIL is not set
460# CONFIG_SCSI_SATA_SIL24 is not set
461# CONFIG_SCSI_SATA_SIS is not set
462# CONFIG_SCSI_SATA_ULI is not set
463# CONFIG_SCSI_SATA_VIA is not set
464CONFIG_SCSI_SATA_VITESSE=y
465# CONFIG_SCSI_DMX3191D is not set 500# CONFIG_SCSI_DMX3191D is not set
466# CONFIG_SCSI_FUTURE_DOMAIN is not set 501# CONFIG_SCSI_FUTURE_DOMAIN is not set
467# CONFIG_SCSI_IPS is not set 502# CONFIG_SCSI_IPS is not set
468# CONFIG_SCSI_INITIO is not set 503# CONFIG_SCSI_INITIO is not set
469# CONFIG_SCSI_INIA100 is not set 504# CONFIG_SCSI_INIA100 is not set
505# CONFIG_SCSI_STEX is not set
470# CONFIG_SCSI_SYM53C8XX_2 is not set 506# CONFIG_SCSI_SYM53C8XX_2 is not set
471# CONFIG_SCSI_IPR is not set 507# CONFIG_SCSI_IPR is not set
472CONFIG_SCSI_QLOGIC_1280=y 508CONFIG_SCSI_QLOGIC_1280=y
473CONFIG_SCSI_QLA_FC=y 509CONFIG_SCSI_QLA_FC=y
474CONFIG_SCSI_QLA2XXX_EMBEDDED_FIRMWARE=y 510# CONFIG_SCSI_QLA_ISCSI is not set
475# CONFIG_SCSI_QLA21XX is not set
476CONFIG_SCSI_QLA22XX=y
477CONFIG_SCSI_QLA2300=y
478CONFIG_SCSI_QLA2322=y
479# CONFIG_SCSI_QLA24XX is not set
480# CONFIG_SCSI_LPFC is not set 511# CONFIG_SCSI_LPFC is not set
481# CONFIG_SCSI_DC395x is not set 512# CONFIG_SCSI_DC395x is not set
482# CONFIG_SCSI_DC390T is not set 513# CONFIG_SCSI_DC390T is not set
483# CONFIG_SCSI_DEBUG is not set 514# CONFIG_SCSI_DEBUG is not set
484 515
485# 516#
517# Serial ATA (prod) and Parallel ATA (experimental) drivers
518#
519CONFIG_ATA=y
520# CONFIG_SATA_AHCI is not set
521# CONFIG_SATA_SVW is not set
522# CONFIG_ATA_PIIX is not set
523# CONFIG_SATA_MV is not set
524# CONFIG_SATA_NV is not set
525# CONFIG_PDC_ADMA is not set
526# CONFIG_SATA_QSTOR is not set
527# CONFIG_SATA_PROMISE is not set
528# CONFIG_SATA_SX4 is not set
529# CONFIG_SATA_SIL is not set
530# CONFIG_SATA_SIL24 is not set
531# CONFIG_SATA_SIS is not set
532# CONFIG_SATA_ULI is not set
533# CONFIG_SATA_VIA is not set
534CONFIG_SATA_VITESSE=y
535# CONFIG_PATA_ALI is not set
536# CONFIG_PATA_AMD is not set
537# CONFIG_PATA_ARTOP is not set
538# CONFIG_PATA_ATIIXP is not set
539# CONFIG_PATA_CMD64X is not set
540# CONFIG_PATA_CS5520 is not set
541# CONFIG_PATA_CS5530 is not set
542# CONFIG_PATA_CYPRESS is not set
543# CONFIG_PATA_EFAR is not set
544# CONFIG_ATA_GENERIC is not set
545# CONFIG_PATA_HPT366 is not set
546# CONFIG_PATA_HPT37X is not set
547# CONFIG_PATA_HPT3X2N is not set
548# CONFIG_PATA_HPT3X3 is not set
549# CONFIG_PATA_IT821X is not set
550# CONFIG_PATA_JMICRON is not set
551# CONFIG_PATA_TRIFLEX is not set
552# CONFIG_PATA_MPIIX is not set
553# CONFIG_PATA_OLDPIIX is not set
554# CONFIG_PATA_NETCELL is not set
555# CONFIG_PATA_NS87410 is not set
556# CONFIG_PATA_OPTI is not set
557# CONFIG_PATA_OPTIDMA is not set
558# CONFIG_PATA_PDC_OLD is not set
559# CONFIG_PATA_RADISYS is not set
560# CONFIG_PATA_RZ1000 is not set
561# CONFIG_PATA_SC1200 is not set
562# CONFIG_PATA_SERVERWORKS is not set
563# CONFIG_PATA_PDC2027X is not set
564# CONFIG_PATA_SIL680 is not set
565# CONFIG_PATA_SIS is not set
566# CONFIG_PATA_VIA is not set
567# CONFIG_PATA_WINBOND is not set
568
569#
486# Multi-device support (RAID and LVM) 570# Multi-device support (RAID and LVM)
487# 571#
488CONFIG_MD=y 572CONFIG_MD=y
@@ -491,12 +575,12 @@ CONFIG_MD_LINEAR=y
491CONFIG_MD_RAID0=y 575CONFIG_MD_RAID0=y
492CONFIG_MD_RAID1=y 576CONFIG_MD_RAID1=y
493# CONFIG_MD_RAID10 is not set 577# CONFIG_MD_RAID10 is not set
494CONFIG_MD_RAID5=y 578CONFIG_MD_RAID456=y
495# CONFIG_MD_RAID5_RESHAPE is not set 579# CONFIG_MD_RAID5_RESHAPE is not set
496# CONFIG_MD_RAID6 is not set
497CONFIG_MD_MULTIPATH=y 580CONFIG_MD_MULTIPATH=y
498# CONFIG_MD_FAULTY is not set 581# CONFIG_MD_FAULTY is not set
499CONFIG_BLK_DEV_DM=y 582CONFIG_BLK_DEV_DM=y
583# CONFIG_DM_DEBUG is not set
500CONFIG_DM_CRYPT=m 584CONFIG_DM_CRYPT=m
501CONFIG_DM_SNAPSHOT=m 585CONFIG_DM_SNAPSHOT=m
502CONFIG_DM_MIRROR=m 586CONFIG_DM_MIRROR=m
@@ -563,6 +647,7 @@ CONFIG_NETDEVICES=y
563# CONFIG_SK98LIN is not set 647# CONFIG_SK98LIN is not set
564CONFIG_TIGON3=y 648CONFIG_TIGON3=y
565# CONFIG_BNX2 is not set 649# CONFIG_BNX2 is not set
650# CONFIG_QLA3XXX is not set
566 651
567# 652#
568# Ethernet (10000 Mbit) 653# Ethernet (10000 Mbit)
@@ -571,6 +656,7 @@ CONFIG_CHELSIO_T1=m
571# CONFIG_IXGB is not set 656# CONFIG_IXGB is not set
572CONFIG_S2IO=m 657CONFIG_S2IO=m
573# CONFIG_S2IO_NAPI is not set 658# CONFIG_S2IO_NAPI is not set
659# CONFIG_MYRI10GE is not set
574 660
575# 661#
576# Token Ring devices 662# Token Ring devices
@@ -612,6 +698,7 @@ CONFIG_NET_POLL_CONTROLLER=y
612# Input device support 698# Input device support
613# 699#
614CONFIG_INPUT=y 700CONFIG_INPUT=y
701# CONFIG_INPUT_FF_MEMLESS is not set
615 702
616# 703#
617# Userland interfaces 704# Userland interfaces
@@ -646,6 +733,7 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
646CONFIG_VT=y 733CONFIG_VT=y
647CONFIG_VT_CONSOLE=y 734CONFIG_VT_CONSOLE=y
648CONFIG_HW_CONSOLE=y 735CONFIG_HW_CONSOLE=y
736# CONFIG_VT_HW_CONSOLE_BINDING is not set
649CONFIG_SERIAL_NONSTANDARD=y 737CONFIG_SERIAL_NONSTANDARD=y
650# CONFIG_COMPUTONE is not set 738# CONFIG_COMPUTONE is not set
651# CONFIG_ROCKETPORT is not set 739# CONFIG_ROCKETPORT is not set
@@ -659,10 +747,12 @@ CONFIG_SERIAL_NONSTANDARD=y
659# CONFIG_N_HDLC is not set 747# CONFIG_N_HDLC is not set
660# CONFIG_SPECIALIX is not set 748# CONFIG_SPECIALIX is not set
661# CONFIG_SX is not set 749# CONFIG_SX is not set
750# CONFIG_RIO is not set
662# CONFIG_STALDRV is not set 751# CONFIG_STALDRV is not set
663CONFIG_SGI_SNSC=y 752CONFIG_SGI_SNSC=y
664CONFIG_SGI_TIOCX=y 753CONFIG_SGI_TIOCX=y
665CONFIG_SGI_MBCS=m 754CONFIG_SGI_MBCS=m
755CONFIG_MSPEC=y
666 756
667# 757#
668# Serial drivers 758# Serial drivers
@@ -701,6 +791,7 @@ CONFIG_EFI_RTC=y
701# Ftape, the floppy tape device driver 791# Ftape, the floppy tape device driver
702# 792#
703CONFIG_AGP=y 793CONFIG_AGP=y
794# CONFIG_AGP_SIS is not set
704# CONFIG_AGP_VIA is not set 795# CONFIG_AGP_VIA is not set
705CONFIG_AGP_SGI_TIOCA=y 796CONFIG_AGP_SGI_TIOCA=y
706# CONFIG_DRM is not set 797# CONFIG_DRM is not set
@@ -730,7 +821,6 @@ CONFIG_MMTIMER=y
730# 821#
731# Dallas's 1-wire bus 822# Dallas's 1-wire bus
732# 823#
733# CONFIG_W1 is not set
734 824
735# 825#
736# Hardware Monitoring support 826# Hardware Monitoring support
@@ -741,6 +831,7 @@ CONFIG_MMTIMER=y
741# 831#
742# Misc devices 832# Misc devices
743# 833#
834# CONFIG_TIFM_CORE is not set
744 835
745# 836#
746# Multimedia devices 837# Multimedia devices
@@ -756,6 +847,7 @@ CONFIG_MMTIMER=y
756# 847#
757# Graphics support 848# Graphics support
758# 849#
850CONFIG_FIRMWARE_EDID=y
759# CONFIG_FB is not set 851# CONFIG_FB is not set
760 852
761# 853#
@@ -764,6 +856,7 @@ CONFIG_MMTIMER=y
764CONFIG_VGA_CONSOLE=y 856CONFIG_VGA_CONSOLE=y
765# CONFIG_VGACON_SOFT_SCROLLBACK is not set 857# CONFIG_VGACON_SOFT_SCROLLBACK is not set
766CONFIG_DUMMY_CONSOLE=y 858CONFIG_DUMMY_CONSOLE=y
859# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
767 860
768# 861#
769# Sound 862# Sound
@@ -794,6 +887,7 @@ CONFIG_USB=m
794CONFIG_USB_EHCI_HCD=m 887CONFIG_USB_EHCI_HCD=m
795# CONFIG_USB_EHCI_SPLIT_ISO is not set 888# CONFIG_USB_EHCI_SPLIT_ISO is not set
796# CONFIG_USB_EHCI_ROOT_HUB_TT is not set 889# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
890# CONFIG_USB_EHCI_TT_NEWSCHED is not set
797# CONFIG_USB_ISP116X_HCD is not set 891# CONFIG_USB_ISP116X_HCD is not set
798CONFIG_USB_OHCI_HCD=m 892CONFIG_USB_OHCI_HCD=m
799# CONFIG_USB_OHCI_BIG_ENDIAN is not set 893# CONFIG_USB_OHCI_BIG_ENDIAN is not set
@@ -843,6 +937,7 @@ CONFIG_USB_HIDINPUT=y
843# CONFIG_USB_ATI_REMOTE2 is not set 937# CONFIG_USB_ATI_REMOTE2 is not set
844# CONFIG_USB_KEYSPAN_REMOTE is not set 938# CONFIG_USB_KEYSPAN_REMOTE is not set
845# CONFIG_USB_APPLETOUCH is not set 939# CONFIG_USB_APPLETOUCH is not set
940# CONFIG_USB_TRANCEVIBRATOR is not set
846 941
847# 942#
848# USB Imaging devices 943# USB Imaging devices
@@ -874,15 +969,18 @@ CONFIG_USB_MON=y
874# 969#
875# CONFIG_USB_EMI62 is not set 970# CONFIG_USB_EMI62 is not set
876# CONFIG_USB_EMI26 is not set 971# CONFIG_USB_EMI26 is not set
972# CONFIG_USB_ADUTUX is not set
877# CONFIG_USB_AUERSWALD is not set 973# CONFIG_USB_AUERSWALD is not set
878# CONFIG_USB_RIO500 is not set 974# CONFIG_USB_RIO500 is not set
879# CONFIG_USB_LEGOTOWER is not set 975# CONFIG_USB_LEGOTOWER is not set
880# CONFIG_USB_LCD is not set 976# CONFIG_USB_LCD is not set
881# CONFIG_USB_LED is not set 977# CONFIG_USB_LED is not set
978# CONFIG_USB_CYPRESS_CY7C63 is not set
882# CONFIG_USB_CYTHERM is not set 979# CONFIG_USB_CYTHERM is not set
883# CONFIG_USB_PHIDGETKIT is not set 980# CONFIG_USB_PHIDGET is not set
884# CONFIG_USB_PHIDGETSERVO is not set
885# CONFIG_USB_IDMOUSE is not set 981# CONFIG_USB_IDMOUSE is not set
982# CONFIG_USB_FTDI_ELAN is not set
983# CONFIG_USB_APPLEDISPLAY is not set
886# CONFIG_USB_SISUSBVGA is not set 984# CONFIG_USB_SISUSBVGA is not set
887# CONFIG_USB_LD is not set 985# CONFIG_USB_LD is not set
888 986
@@ -919,18 +1017,15 @@ CONFIG_USB_MON=y
919CONFIG_INFINIBAND=m 1017CONFIG_INFINIBAND=m
920# CONFIG_INFINIBAND_USER_MAD is not set 1018# CONFIG_INFINIBAND_USER_MAD is not set
921CONFIG_INFINIBAND_USER_ACCESS=m 1019CONFIG_INFINIBAND_USER_ACCESS=m
1020CONFIG_INFINIBAND_ADDR_TRANS=y
922CONFIG_INFINIBAND_MTHCA=m 1021CONFIG_INFINIBAND_MTHCA=m
923CONFIG_INFINIBAND_MTHCA_DEBUG=y 1022CONFIG_INFINIBAND_MTHCA_DEBUG=y
1023# CONFIG_INFINIBAND_AMSO1100 is not set
924CONFIG_INFINIBAND_IPOIB=m 1024CONFIG_INFINIBAND_IPOIB=m
925CONFIG_INFINIBAND_IPOIB_DEBUG=y 1025CONFIG_INFINIBAND_IPOIB_DEBUG=y
926# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set 1026# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set
927CONFIG_INFINIBAND_SRP=m 1027CONFIG_INFINIBAND_SRP=m
928 1028# CONFIG_INFINIBAND_ISER is not set
929#
930# SN Devices
931#
932CONFIG_SGI_IOC4=y
933CONFIG_SGI_IOC3=y
934 1029
935# 1030#
936# EDAC - error detection and reporting (RAS) (EXPERIMENTAL) 1031# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
@@ -942,6 +1037,19 @@ CONFIG_SGI_IOC3=y
942# CONFIG_RTC_CLASS is not set 1037# CONFIG_RTC_CLASS is not set
943 1038
944# 1039#
1040# DMA Engine support
1041#
1042# CONFIG_DMA_ENGINE is not set
1043
1044#
1045# DMA Clients
1046#
1047
1048#
1049# DMA Devices
1050#
1051
1052#
945# File systems 1053# File systems
946# 1054#
947CONFIG_EXT2_FS=y 1055CONFIG_EXT2_FS=y
@@ -965,15 +1073,16 @@ CONFIG_REISERFS_FS_SECURITY=y
965# CONFIG_JFS_FS is not set 1073# CONFIG_JFS_FS is not set
966CONFIG_FS_POSIX_ACL=y 1074CONFIG_FS_POSIX_ACL=y
967CONFIG_XFS_FS=y 1075CONFIG_XFS_FS=y
968CONFIG_XFS_EXPORT=y
969CONFIG_XFS_QUOTA=y 1076CONFIG_XFS_QUOTA=y
970# CONFIG_XFS_SECURITY is not set 1077# CONFIG_XFS_SECURITY is not set
971CONFIG_XFS_POSIX_ACL=y 1078CONFIG_XFS_POSIX_ACL=y
972CONFIG_XFS_RT=y 1079CONFIG_XFS_RT=y
1080# CONFIG_GFS2_FS is not set
973# CONFIG_OCFS2_FS is not set 1081# CONFIG_OCFS2_FS is not set
974# CONFIG_MINIX_FS is not set 1082# CONFIG_MINIX_FS is not set
975# CONFIG_ROMFS_FS is not set 1083# CONFIG_ROMFS_FS is not set
976CONFIG_INOTIFY=y 1084CONFIG_INOTIFY=y
1085CONFIG_INOTIFY_USER=y
977CONFIG_QUOTA=y 1086CONFIG_QUOTA=y
978# CONFIG_QFMT_V1 is not set 1087# CONFIG_QFMT_V1 is not set
979# CONFIG_QFMT_V2 is not set 1088# CONFIG_QFMT_V2 is not set
@@ -1007,8 +1116,10 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
1007# 1116#
1008CONFIG_PROC_FS=y 1117CONFIG_PROC_FS=y
1009CONFIG_PROC_KCORE=y 1118CONFIG_PROC_KCORE=y
1119CONFIG_PROC_SYSCTL=y
1010CONFIG_SYSFS=y 1120CONFIG_SYSFS=y
1011CONFIG_TMPFS=y 1121CONFIG_TMPFS=y
1122# CONFIG_TMPFS_POSIX_ACL is not set
1012CONFIG_HUGETLBFS=y 1123CONFIG_HUGETLBFS=y
1013CONFIG_HUGETLB_PAGE=y 1124CONFIG_HUGETLB_PAGE=y
1014CONFIG_RAMFS=y 1125CONFIG_RAMFS=y
@@ -1046,7 +1157,7 @@ CONFIG_NFSD_V4=y
1046CONFIG_NFSD_TCP=y 1157CONFIG_NFSD_TCP=y
1047CONFIG_LOCKD=m 1158CONFIG_LOCKD=m
1048CONFIG_LOCKD_V4=y 1159CONFIG_LOCKD_V4=y
1049CONFIG_EXPORTFS=y 1160CONFIG_EXPORTFS=m
1050CONFIG_NFS_COMMON=y 1161CONFIG_NFS_COMMON=y
1051CONFIG_SUNRPC=m 1162CONFIG_SUNRPC=m
1052CONFIG_SUNRPC_GSS=m 1163CONFIG_SUNRPC_GSS=m
@@ -1056,7 +1167,9 @@ CONFIG_SMB_FS=m
1056# CONFIG_SMB_NLS_DEFAULT is not set 1167# CONFIG_SMB_NLS_DEFAULT is not set
1057CONFIG_CIFS=m 1168CONFIG_CIFS=m
1058# CONFIG_CIFS_STATS is not set 1169# CONFIG_CIFS_STATS is not set
1170# CONFIG_CIFS_WEAK_PW_HASH is not set
1059# CONFIG_CIFS_XATTR is not set 1171# CONFIG_CIFS_XATTR is not set
1172# CONFIG_CIFS_DEBUG2 is not set
1060# CONFIG_CIFS_EXPERIMENTAL is not set 1173# CONFIG_CIFS_EXPERIMENTAL is not set
1061# CONFIG_NCP_FS is not set 1174# CONFIG_NCP_FS is not set
1062# CONFIG_CODA_FS is not set 1175# CONFIG_CODA_FS is not set
@@ -1129,6 +1242,10 @@ CONFIG_NLS_ISO8859_1=y
1129CONFIG_NLS_UTF8=y 1242CONFIG_NLS_UTF8=y
1130 1243
1131# 1244#
1245# Distributed Lock Manager
1246#
1247
1248#
1132# Library routines 1249# Library routines
1133# 1250#
1134# CONFIG_CRC_CCITT is not set 1251# CONFIG_CRC_CCITT is not set
@@ -1138,9 +1255,11 @@ CONFIG_LIBCRC32C=m
1138CONFIG_ZLIB_INFLATE=m 1255CONFIG_ZLIB_INFLATE=m
1139CONFIG_ZLIB_DEFLATE=m 1256CONFIG_ZLIB_DEFLATE=m
1140CONFIG_GENERIC_ALLOCATOR=y 1257CONFIG_GENERIC_ALLOCATOR=y
1258CONFIG_PLIST=y
1141CONFIG_GENERIC_HARDIRQS=y 1259CONFIG_GENERIC_HARDIRQS=y
1142CONFIG_GENERIC_IRQ_PROBE=y 1260CONFIG_GENERIC_IRQ_PROBE=y
1143CONFIG_GENERIC_PENDING_IRQ=y 1261CONFIG_GENERIC_PENDING_IRQ=y
1262CONFIG_IRQ_PER_CPU=y
1144 1263
1145# 1264#
1146# Instrumentation Support 1265# Instrumentation Support
@@ -1152,20 +1271,26 @@ CONFIG_GENERIC_PENDING_IRQ=y
1152# Kernel hacking 1271# Kernel hacking
1153# 1272#
1154# CONFIG_PRINTK_TIME is not set 1273# CONFIG_PRINTK_TIME is not set
1274CONFIG_ENABLE_MUST_CHECK=y
1155CONFIG_MAGIC_SYSRQ=y 1275CONFIG_MAGIC_SYSRQ=y
1276# CONFIG_UNUSED_SYMBOLS is not set
1156CONFIG_DEBUG_KERNEL=y 1277CONFIG_DEBUG_KERNEL=y
1157CONFIG_LOG_BUF_SHIFT=20 1278CONFIG_LOG_BUF_SHIFT=20
1158CONFIG_DETECT_SOFTLOCKUP=y 1279CONFIG_DETECT_SOFTLOCKUP=y
1159# CONFIG_SCHEDSTATS is not set 1280# CONFIG_SCHEDSTATS is not set
1160# CONFIG_DEBUG_SLAB is not set 1281# CONFIG_DEBUG_SLAB is not set
1161CONFIG_DEBUG_PREEMPT=y 1282# CONFIG_DEBUG_RT_MUTEXES is not set
1162# CONFIG_DEBUG_MUTEXES is not set 1283# CONFIG_RT_MUTEX_TESTER is not set
1163# CONFIG_DEBUG_SPINLOCK is not set 1284# CONFIG_DEBUG_SPINLOCK is not set
1285# CONFIG_DEBUG_MUTEXES is not set
1286# CONFIG_DEBUG_RWSEMS is not set
1164# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1287# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1288# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1165# CONFIG_DEBUG_KOBJECT is not set 1289# CONFIG_DEBUG_KOBJECT is not set
1166CONFIG_DEBUG_INFO=y 1290CONFIG_DEBUG_INFO=y
1167# CONFIG_DEBUG_FS is not set 1291# CONFIG_DEBUG_FS is not set
1168# CONFIG_DEBUG_VM is not set 1292# CONFIG_DEBUG_VM is not set
1293# CONFIG_DEBUG_LIST is not set
1169CONFIG_FORCED_INLINING=y 1294CONFIG_FORCED_INLINING=y
1170# CONFIG_RCU_TORTURE_TEST is not set 1295# CONFIG_RCU_TORTURE_TEST is not set
1171CONFIG_IA64_GRANULE_16MB=y 1296CONFIG_IA64_GRANULE_16MB=y
@@ -1186,6 +1311,10 @@ CONFIG_SYSVIPC_COMPAT=y
1186# Cryptographic options 1311# Cryptographic options
1187# 1312#
1188CONFIG_CRYPTO=y 1313CONFIG_CRYPTO=y
1314CONFIG_CRYPTO_ALGAPI=y
1315CONFIG_CRYPTO_BLKCIPHER=m
1316CONFIG_CRYPTO_HASH=y
1317CONFIG_CRYPTO_MANAGER=m
1189CONFIG_CRYPTO_HMAC=y 1318CONFIG_CRYPTO_HMAC=y
1190# CONFIG_CRYPTO_NULL is not set 1319# CONFIG_CRYPTO_NULL is not set
1191# CONFIG_CRYPTO_MD4 is not set 1320# CONFIG_CRYPTO_MD4 is not set
@@ -1195,6 +1324,8 @@ CONFIG_CRYPTO_SHA1=m
1195# CONFIG_CRYPTO_SHA512 is not set 1324# CONFIG_CRYPTO_SHA512 is not set
1196# CONFIG_CRYPTO_WP512 is not set 1325# CONFIG_CRYPTO_WP512 is not set
1197# CONFIG_CRYPTO_TGR192 is not set 1326# CONFIG_CRYPTO_TGR192 is not set
1327CONFIG_CRYPTO_ECB=m
1328CONFIG_CRYPTO_CBC=m
1198CONFIG_CRYPTO_DES=m 1329CONFIG_CRYPTO_DES=m
1199# CONFIG_CRYPTO_BLOWFISH is not set 1330# CONFIG_CRYPTO_BLOWFISH is not set
1200# CONFIG_CRYPTO_TWOFISH is not set 1331# CONFIG_CRYPTO_TWOFISH is not set
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index db8e1fcfa047..ce49fe3a3b56 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -75,7 +75,7 @@
75** If a device prefetches beyond the end of a valid pdir entry, it will cause 75** If a device prefetches beyond the end of a valid pdir entry, it will cause
76** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should 76** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should
77** disconnect on 4k boundaries and prevent such issues. If the device is 77** disconnect on 4k boundaries and prevent such issues. If the device is
78** particularly agressive, this option will keep the entire pdir valid such 78** particularly aggressive, this option will keep the entire pdir valid such
79** that prefetching will hit a valid address. This could severely impact 79** that prefetching will hit a valid address. This could severely impact
80** error containment, and is therefore off by default. The page that is 80** error containment, and is therefore off by default. The page that is
81** used for spill-over is poisoned, so that should help debugging somewhat. 81** used for spill-over is poisoned, so that should help debugging somewhat.
@@ -258,10 +258,10 @@ static u64 prefetch_spill_page;
258 258
259/* 259/*
260** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up 260** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
261** (or rather not merge) DMA's into managable chunks. 261** (or rather not merge) DMAs into manageable chunks.
262** On parisc, this is more of the software/tuning constraint 262** On parisc, this is more of the software/tuning constraint
263** rather than the HW. I/O MMU allocation alogorithms can be 263** rather than the HW. I/O MMU allocation algorithms can be
264** faster with smaller size is (to some degree). 264** faster with smaller sizes (to some degree).
265*/ 265*/
266#define DMA_CHUNK_SIZE (BITS_PER_LONG*iovp_size) 266#define DMA_CHUNK_SIZE (BITS_PER_LONG*iovp_size)
267 267
@@ -1672,15 +1672,13 @@ ioc_sac_init(struct ioc *ioc)
1672 * SAC (single address cycle) addressable, so allocate a 1672 * SAC (single address cycle) addressable, so allocate a
1673 * pseudo-device to enforce that. 1673 * pseudo-device to enforce that.
1674 */ 1674 */
1675 sac = kmalloc(sizeof(*sac), GFP_KERNEL); 1675 sac = kzalloc(sizeof(*sac), GFP_KERNEL);
1676 if (!sac) 1676 if (!sac)
1677 panic(PFX "Couldn't allocate struct pci_dev"); 1677 panic(PFX "Couldn't allocate struct pci_dev");
1678 memset(sac, 0, sizeof(*sac));
1679 1678
1680 controller = kmalloc(sizeof(*controller), GFP_KERNEL); 1679 controller = kzalloc(sizeof(*controller), GFP_KERNEL);
1681 if (!controller) 1680 if (!controller)
1682 panic(PFX "Couldn't allocate struct pci_controller"); 1681 panic(PFX "Couldn't allocate struct pci_controller");
1683 memset(controller, 0, sizeof(*controller));
1684 1682
1685 controller->iommu = ioc; 1683 controller->iommu = ioc;
1686 sac->sysdata = controller; 1684 sac->sysdata = controller;
@@ -1737,12 +1735,10 @@ ioc_init(u64 hpa, void *handle)
1737 struct ioc *ioc; 1735 struct ioc *ioc;
1738 struct ioc_iommu *info; 1736 struct ioc_iommu *info;
1739 1737
1740 ioc = kmalloc(sizeof(*ioc), GFP_KERNEL); 1738 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
1741 if (!ioc) 1739 if (!ioc)
1742 return NULL; 1740 return NULL;
1743 1741
1744 memset(ioc, 0, sizeof(*ioc));
1745
1746 ioc->next = ioc_list; 1742 ioc->next = ioc_list;
1747 ioc_list = ioc; 1743 ioc_list = ioc;
1748 1744
diff --git a/arch/ia64/hp/sim/Kconfig b/arch/ia64/hp/sim/Kconfig
index 18ccb1266e18..f92306bbedb8 100644
--- a/arch/ia64/hp/sim/Kconfig
+++ b/arch/ia64/hp/sim/Kconfig
@@ -13,8 +13,8 @@ config HP_SIMSERIAL_CONSOLE
13 depends on HP_SIMSERIAL 13 depends on HP_SIMSERIAL
14 14
15config HP_SIMSCSI 15config HP_SIMSCSI
16 tristate "Simulated SCSI disk" 16 bool "Simulated SCSI disk"
17 depends on SCSI 17 depends on SCSI=y
18 18
19endmenu 19endmenu
20 20
diff --git a/arch/ia64/hp/sim/hpsim_irq.c b/arch/ia64/hp/sim/hpsim_irq.c
index 8145547bb52d..c2f58ff364e7 100644
--- a/arch/ia64/hp/sim/hpsim_irq.c
+++ b/arch/ia64/hp/sim/hpsim_irq.c
@@ -27,7 +27,7 @@ hpsim_set_affinity_noop (unsigned int a, cpumask_t b)
27} 27}
28 28
29static struct hw_interrupt_type irq_type_hp_sim = { 29static struct hw_interrupt_type irq_type_hp_sim = {
30 .typename = "hpsim", 30 .name = "hpsim",
31 .startup = hpsim_irq_startup, 31 .startup = hpsim_irq_startup,
32 .shutdown = hpsim_irq_noop, 32 .shutdown = hpsim_irq_noop,
33 .enable = hpsim_irq_noop, 33 .enable = hpsim_irq_noop,
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index caab986af70c..1f16ebb9a800 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -209,7 +209,7 @@ static void do_serial_bh(void)
209} 209}
210#endif 210#endif
211 211
212static void do_softint(void *private_) 212static void do_softint(struct work_struct *private_)
213{ 213{
214 printk(KERN_ERR "simserial: do_softint called\n"); 214 printk(KERN_ERR "simserial: do_softint called\n");
215} 215}
@@ -684,12 +684,11 @@ static int get_async_struct(int line, struct async_struct **ret_info)
684 *ret_info = sstate->info; 684 *ret_info = sstate->info;
685 return 0; 685 return 0;
686 } 686 }
687 info = kmalloc(sizeof(struct async_struct), GFP_KERNEL); 687 info = kzalloc(sizeof(struct async_struct), GFP_KERNEL);
688 if (!info) { 688 if (!info) {
689 sstate->count--; 689 sstate->count--;
690 return -ENOMEM; 690 return -ENOMEM;
691 } 691 }
692 memset(info, 0, sizeof(struct async_struct));
693 init_waitqueue_head(&info->open_wait); 692 init_waitqueue_head(&info->open_wait);
694 init_waitqueue_head(&info->close_wait); 693 init_waitqueue_head(&info->close_wait);
695 init_waitqueue_head(&info->delta_msr_wait); 694 init_waitqueue_head(&info->delta_msr_wait);
@@ -698,7 +697,7 @@ static int get_async_struct(int line, struct async_struct **ret_info)
698 info->flags = sstate->flags; 697 info->flags = sstate->flags;
699 info->xmit_fifo_size = sstate->xmit_fifo_size; 698 info->xmit_fifo_size = sstate->xmit_fifo_size;
700 info->line = line; 699 info->line = line;
701 INIT_WORK(&info->work, do_softint, info); 700 INIT_WORK(&info->work, do_softint);
702 info->state = sstate; 701 info->state = sstate;
703 if (sstate->info) { 702 if (sstate->info) {
704 kfree(info); 703 kfree(info);
diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
index daa6b91bc921..578737ec7629 100644
--- a/arch/ia64/ia32/binfmt_elf32.c
+++ b/arch/ia64/ia32/binfmt_elf32.c
@@ -91,7 +91,7 @@ ia64_elf32_init (struct pt_regs *regs)
91 * it with privilege level 3 because the IVE uses non-privileged accesses to these 91 * it with privilege level 3 because the IVE uses non-privileged accesses to these
92 * tables. IA-32 segmentation is used to protect against IA-32 accesses to them. 92 * tables. IA-32 segmentation is used to protect against IA-32 accesses to them.
93 */ 93 */
94 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 94 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
95 if (vma) { 95 if (vma) {
96 memset(vma, 0, sizeof(*vma)); 96 memset(vma, 0, sizeof(*vma));
97 vma->vm_mm = current->mm; 97 vma->vm_mm = current->mm;
@@ -117,7 +117,7 @@ ia64_elf32_init (struct pt_regs *regs)
117 * code is locked in specific gate page, which is pointed by pretcode 117 * code is locked in specific gate page, which is pointed by pretcode
118 * when setup_frame_ia32 118 * when setup_frame_ia32
119 */ 119 */
120 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 120 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
121 if (vma) { 121 if (vma) {
122 memset(vma, 0, sizeof(*vma)); 122 memset(vma, 0, sizeof(*vma));
123 vma->vm_mm = current->mm; 123 vma->vm_mm = current->mm;
@@ -142,7 +142,7 @@ ia64_elf32_init (struct pt_regs *regs)
142 * Install LDT as anonymous memory. This gives us all-zero segment descriptors 142 * Install LDT as anonymous memory. This gives us all-zero segment descriptors
143 * until a task modifies them via modify_ldt(). 143 * until a task modifies them via modify_ldt().
144 */ 144 */
145 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 145 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
146 if (vma) { 146 if (vma) {
147 memset(vma, 0, sizeof(*vma)); 147 memset(vma, 0, sizeof(*vma));
148 vma->vm_mm = current->mm; 148 vma->vm_mm = current->mm;
@@ -214,7 +214,7 @@ ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack)
214 bprm->loader += stack_base; 214 bprm->loader += stack_base;
215 bprm->exec += stack_base; 215 bprm->exec += stack_base;
216 216
217 mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 217 mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
218 if (!mpnt) 218 if (!mpnt)
219 return -ENOMEM; 219 return -ENOMEM;
220 220
diff --git a/arch/ia64/ia32/ia32_support.c b/arch/ia64/ia32/ia32_support.c
index c187743965a0..6af400a12ca1 100644
--- a/arch/ia64/ia32/ia32_support.c
+++ b/arch/ia64/ia32/ia32_support.c
@@ -249,7 +249,7 @@ ia32_init (void)
249 249
250#if PAGE_SHIFT > IA32_PAGE_SHIFT 250#if PAGE_SHIFT > IA32_PAGE_SHIFT
251 { 251 {
252 extern kmem_cache_t *partial_page_cachep; 252 extern struct kmem_cache *partial_page_cachep;
253 253
254 partial_page_cachep = kmem_cache_create("partial_page_cache", 254 partial_page_cachep = kmem_cache_create("partial_page_cache",
255 sizeof(struct partial_page), 0, 0, 255 sizeof(struct partial_page), 0, 0,
diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
index 703a67c934f8..cfa0bc0026b5 100644
--- a/arch/ia64/ia32/ia32priv.h
+++ b/arch/ia64/ia32/ia32priv.h
@@ -330,8 +330,6 @@ struct old_linux32_dirent {
330void ia64_elf32_init(struct pt_regs *regs); 330void ia64_elf32_init(struct pt_regs *regs);
331#define ELF_PLAT_INIT(_r, load_addr) ia64_elf32_init(_r) 331#define ELF_PLAT_INIT(_r, load_addr) ia64_elf32_init(_r)
332 332
333#define elf_addr_t u32
334
335/* This macro yields a bitmask that programs can use to figure out 333/* This macro yields a bitmask that programs can use to figure out
336 what instruction set this CPU supports. */ 334 what instruction set this CPU supports. */
337#define ELF_HWCAP 0 335#define ELF_HWCAP 0
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index 9d6a3f210148..957681c39ad9 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -235,7 +235,7 @@ mmap_subpage (struct file *file, unsigned long start, unsigned long end, int pro
235 235
236 if (!(flags & MAP_ANONYMOUS)) { 236 if (!(flags & MAP_ANONYMOUS)) {
237 /* read the file contents */ 237 /* read the file contents */
238 inode = file->f_dentry->d_inode; 238 inode = file->f_path.dentry->d_inode;
239 if (!inode->i_fop || !file->f_op->read 239 if (!inode->i_fop || !file->f_op->read
240 || ((*file->f_op->read)(file, (char __user *) start, end - start, &off) < 0)) 240 || ((*file->f_op->read)(file, (char __user *) start, end - start, &off) < 0))
241 { 241 {
@@ -254,7 +254,7 @@ mmap_subpage (struct file *file, unsigned long start, unsigned long end, int pro
254} 254}
255 255
256/* SLAB cache for partial_page structures */ 256/* SLAB cache for partial_page structures */
257kmem_cache_t *partial_page_cachep; 257struct kmem_cache *partial_page_cachep;
258 258
259/* 259/*
260 * init partial_page_list. 260 * init partial_page_list.
@@ -837,7 +837,7 @@ emulate_mmap (struct file *file, unsigned long start, unsigned long len, int pro
837 837
838 if (!is_congruent) { 838 if (!is_congruent) {
839 /* read the file contents */ 839 /* read the file contents */
840 inode = file->f_dentry->d_inode; 840 inode = file->f_path.dentry->d_inode;
841 if (!inode->i_fop || !file->f_op->read 841 if (!inode->i_fop || !file->f_op->read
842 || ((*file->f_op->read)(file, (char __user *) pstart, pend - pstart, &poff) 842 || ((*file->f_op->read)(file, (char __user *) pstart, pend - pstart, &poff)
843 < 0)) 843 < 0))
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index cfa099b04cda..8ae384eb5357 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_IA64_CYCLONE) += cyclone.o
28obj-$(CONFIG_CPU_FREQ) += cpufreq/ 28obj-$(CONFIG_CPU_FREQ) += cpufreq/
29obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o 29obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o
30obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o 30obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o
31obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
31obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o 32obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o
32obj-$(CONFIG_AUDIT) += audit.o 33obj-$(CONFIG_AUDIT) += audit.o
33obj-$(CONFIG_PCI_MSI) += msi_ia64.o 34obj-$(CONFIG_PCI_MSI) += msi_ia64.o
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 32c3abededc6..73ef4a85b861 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -64,9 +64,6 @@ EXPORT_SYMBOL(pm_idle);
64void (*pm_power_off) (void); 64void (*pm_power_off) (void);
65EXPORT_SYMBOL(pm_power_off); 65EXPORT_SYMBOL(pm_power_off);
66 66
67unsigned char acpi_kbd_controller_present = 1;
68unsigned char acpi_legacy_devices;
69
70unsigned int acpi_cpei_override; 67unsigned int acpi_cpei_override;
71unsigned int acpi_cpei_phys_cpuid; 68unsigned int acpi_cpei_phys_cpuid;
72 69
@@ -628,12 +625,6 @@ static int __init acpi_parse_fadt(unsigned long phys_addr, unsigned long size)
628 625
629 fadt = (struct fadt_descriptor *)fadt_header; 626 fadt = (struct fadt_descriptor *)fadt_header;
630 627
631 if (!(fadt->iapc_boot_arch & BAF_8042_KEYBOARD_CONTROLLER))
632 acpi_kbd_controller_present = 0;
633
634 if (fadt->iapc_boot_arch & BAF_LEGACY_DEVICES)
635 acpi_legacy_devices = 1;
636
637 acpi_register_gsi(fadt->sci_int, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW); 628 acpi_register_gsi(fadt->sci_int, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
638 return 0; 629 return 0;
639} 630}
diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
index 86faf221a070..088f130197ae 100644
--- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
+++ b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
@@ -68,7 +68,8 @@ processor_get_pstate (
68 68
69 dprintk("processor_get_pstate\n"); 69 dprintk("processor_get_pstate\n");
70 70
71 retval = ia64_pal_get_pstate(&pstate_index); 71 retval = ia64_pal_get_pstate(&pstate_index,
72 PAL_GET_PSTATE_TYPE_INSTANT);
72 *value = (u32) pstate_index; 73 *value = (u32) pstate_index;
73 74
74 if (retval) 75 if (retval)
@@ -91,7 +92,7 @@ extract_clock (
91 dprintk("extract_clock\n"); 92 dprintk("extract_clock\n");
92 93
93 for (i = 0; i < data->acpi_data.state_count; i++) { 94 for (i = 0; i < data->acpi_data.state_count; i++) {
94 if (value >= data->acpi_data.states[i].control) 95 if (value == data->acpi_data.states[i].status)
95 return data->acpi_data.states[i].core_frequency; 96 return data->acpi_data.states[i].core_frequency;
96 } 97 }
97 return data->acpi_data.states[i-1].core_frequency; 98 return data->acpi_data.states[i-1].core_frequency;
@@ -117,11 +118,7 @@ processor_get_freq (
117 goto migrate_end; 118 goto migrate_end;
118 } 119 }
119 120
120 /* 121 /* processor_get_pstate gets the instantaneous frequency */
121 * processor_get_pstate gets the average frequency since the
122 * last get. So, do two PAL_get_freq()...
123 */
124 ret = processor_get_pstate(&value);
125 ret = processor_get_pstate(&value); 122 ret = processor_get_pstate(&value);
126 123
127 if (ret) { 124 if (ret) {
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
new file mode 100644
index 000000000000..0aabedf95dad
--- /dev/null
+++ b/arch/ia64/kernel/crash.c
@@ -0,0 +1,245 @@
1/*
2 * arch/ia64/kernel/crash.c
3 *
4 * Architecture specific (ia64) functions for kexec based crash dumps.
5 *
6 * Created by: Khalid Aziz <khalid.aziz@hp.com>
7 * Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
8 * Copyright (C) 2005 Intel Corp Zou Nan hai <nanhai.zou@intel.com>
9 *
10 */
11#include <linux/smp.h>
12#include <linux/delay.h>
13#include <linux/crash_dump.h>
14#include <linux/bootmem.h>
15#include <linux/kexec.h>
16#include <linux/elfcore.h>
17#include <linux/sysctl.h>
18#include <linux/init.h>
19
20#include <asm/kdebug.h>
21#include <asm/mca.h>
22#include <asm/uaccess.h>
23
24int kdump_status[NR_CPUS];
25atomic_t kdump_cpu_freezed;
26atomic_t kdump_in_progress;
27int kdump_on_init = 1;
28ssize_t
29copy_oldmem_page(unsigned long pfn, char *buf,
30 size_t csize, unsigned long offset, int userbuf)
31{
32 void *vaddr;
33
34 if (!csize)
35 return 0;
36 vaddr = __va(pfn<<PAGE_SHIFT);
37 if (userbuf) {
38 if (copy_to_user(buf, (vaddr + offset), csize)) {
39 return -EFAULT;
40 }
41 } else
42 memcpy(buf, (vaddr + offset), csize);
43 return csize;
44}
45
46static inline Elf64_Word
47*append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data,
48 size_t data_len)
49{
50 struct elf_note *note = (struct elf_note *)buf;
51 note->n_namesz = strlen(name) + 1;
52 note->n_descsz = data_len;
53 note->n_type = type;
54 buf += (sizeof(*note) + 3)/4;
55 memcpy(buf, name, note->n_namesz);
56 buf += (note->n_namesz + 3)/4;
57 memcpy(buf, data, data_len);
58 buf += (data_len + 3)/4;
59 return buf;
60}
61
62static void
63final_note(void *buf)
64{
65 memset(buf, 0, sizeof(struct elf_note));
66}
67
68extern void ia64_dump_cpu_regs(void *);
69
70static DEFINE_PER_CPU(struct elf_prstatus, elf_prstatus);
71
72void
73crash_save_this_cpu()
74{
75 void *buf;
76 unsigned long cfm, sof, sol;
77
78 int cpu = smp_processor_id();
79 struct elf_prstatus *prstatus = &per_cpu(elf_prstatus, cpu);
80
81 elf_greg_t *dst = (elf_greg_t *)&(prstatus->pr_reg);
82 memset(prstatus, 0, sizeof(*prstatus));
83 prstatus->pr_pid = current->pid;
84
85 ia64_dump_cpu_regs(dst);
86 cfm = dst[43];
87 sol = (cfm >> 7) & 0x7f;
88 sof = cfm & 0x7f;
89 dst[46] = (unsigned long)ia64_rse_skip_regs((unsigned long *)dst[46],
90 sof - sol);
91
92 buf = (u64 *) per_cpu_ptr(crash_notes, cpu);
93 if (!buf)
94 return;
95 buf = append_elf_note(buf, "CORE", NT_PRSTATUS, prstatus,
96 sizeof(*prstatus));
97 final_note(buf);
98}
99
100static int
101kdump_wait_cpu_freeze(void)
102{
103 int cpu_num = num_online_cpus() - 1;
104 int timeout = 1000;
105 while(timeout-- > 0) {
106 if (atomic_read(&kdump_cpu_freezed) == cpu_num)
107 return 0;
108 udelay(1000);
109 }
110 return 1;
111}
112
113void
114machine_crash_shutdown(struct pt_regs *pt)
115{
116 /* This function is only called after the system
117 * has paniced or is otherwise in a critical state.
118 * The minimum amount of code to allow a kexec'd kernel
119 * to run successfully needs to happen here.
120 *
121 * In practice this means shooting down the other cpus in
122 * an SMP system.
123 */
124 kexec_disable_iosapic();
125#ifdef CONFIG_SMP
126 kdump_smp_send_stop();
127 if (kdump_wait_cpu_freeze() && kdump_on_init) {
128 //not all cpu response to IPI, send INIT to freeze them
129 kdump_smp_send_init();
130 }
131#endif
132}
133
134static void
135machine_kdump_on_init(void)
136{
137 local_irq_disable();
138 kexec_disable_iosapic();
139 machine_kexec(ia64_kimage);
140}
141
142void
143kdump_cpu_freeze(struct unw_frame_info *info, void *arg)
144{
145 int cpuid;
146 local_irq_disable();
147 cpuid = smp_processor_id();
148 crash_save_this_cpu();
149 current->thread.ksp = (__u64)info->sw - 16;
150 atomic_inc(&kdump_cpu_freezed);
151 kdump_status[cpuid] = 1;
152 mb();
153 if (cpuid == 0) {
154 for (;;)
155 cpu_relax();
156 } else
157 ia64_jump_to_sal(&sal_boot_rendez_state[cpuid]);
158}
159
160static int
161kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
162{
163 struct ia64_mca_notify_die *nd;
164 struct die_args *args = data;
165
166 if (!kdump_on_init)
167 return NOTIFY_DONE;
168
169 if (val != DIE_INIT_MONARCH_ENTER &&
170 val != DIE_INIT_SLAVE_ENTER &&
171 val != DIE_MCA_RENDZVOUS_LEAVE &&
172 val != DIE_MCA_MONARCH_LEAVE)
173 return NOTIFY_DONE;
174
175 nd = (struct ia64_mca_notify_die *)args->err;
176 /* Reason code 1 means machine check rendezous*/
177 if ((val == DIE_INIT_MONARCH_ENTER || DIE_INIT_SLAVE_ENTER) &&
178 nd->sos->rv_rc == 1)
179 return NOTIFY_DONE;
180
181 switch (val) {
182 case DIE_INIT_MONARCH_ENTER:
183 machine_kdump_on_init();
184 break;
185 case DIE_INIT_SLAVE_ENTER:
186 unw_init_running(kdump_cpu_freeze, NULL);
187 break;
188 case DIE_MCA_RENDZVOUS_LEAVE:
189 if (atomic_read(&kdump_in_progress))
190 unw_init_running(kdump_cpu_freeze, NULL);
191 break;
192 case DIE_MCA_MONARCH_LEAVE:
193 /* die_register->signr indicate if MCA is recoverable */
194 if (!args->signr)
195 machine_kdump_on_init();
196 break;
197 }
198 return NOTIFY_DONE;
199}
200
201#ifdef CONFIG_SYSCTL
202static ctl_table kdump_on_init_table[] = {
203 {
204 .ctl_name = CTL_UNNUMBERED,
205 .procname = "kdump_on_init",
206 .data = &kdump_on_init,
207 .maxlen = sizeof(int),
208 .mode = 0644,
209 .proc_handler = &proc_dointvec,
210 },
211 { .ctl_name = 0 }
212};
213
214static ctl_table sys_table[] = {
215 {
216 .ctl_name = CTL_KERN,
217 .procname = "kernel",
218 .mode = 0555,
219 .child = kdump_on_init_table,
220 },
221 { .ctl_name = 0 }
222};
223#endif
224
225static int
226machine_crash_setup(void)
227{
228 char *from = strstr(saved_command_line, "elfcorehdr=");
229 static struct notifier_block kdump_init_notifier_nb = {
230 .notifier_call = kdump_init_notifier,
231 };
232 int ret;
233 if (from)
234 elfcorehdr_addr = memparse(from+11, &from);
235 saved_max_pfn = (unsigned long)-1;
236 if((ret = register_die_notifier(&kdump_init_notifier_nb)) != 0)
237 return ret;
238#ifdef CONFIG_SYSCTL
239 register_sysctl_table(sys_table, 0);
240#endif
241 return 0;
242}
243
244__initcall(machine_crash_setup);
245
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index bb8770a177b5..0b25a7d4e1e4 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -26,6 +26,7 @@
26#include <linux/types.h> 26#include <linux/types.h>
27#include <linux/time.h> 27#include <linux/time.h>
28#include <linux/efi.h> 28#include <linux/efi.h>
29#include <linux/kexec.h>
29 30
30#include <asm/io.h> 31#include <asm/io.h>
31#include <asm/kregs.h> 32#include <asm/kregs.h>
@@ -41,7 +42,7 @@ extern efi_status_t efi_call_phys (void *, ...);
41struct efi efi; 42struct efi efi;
42EXPORT_SYMBOL(efi); 43EXPORT_SYMBOL(efi);
43static efi_runtime_services_t *runtime; 44static efi_runtime_services_t *runtime;
44static unsigned long mem_limit = ~0UL, max_addr = ~0UL; 45static unsigned long mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL;
45 46
46#define efi_call_virt(f, args...) (*(f))(args) 47#define efi_call_virt(f, args...) (*(f))(args)
47 48
@@ -224,7 +225,7 @@ efi_gettimeofday (struct timespec *ts)
224} 225}
225 226
226static int 227static int
227is_available_memory (efi_memory_desc_t *md) 228is_memory_available (efi_memory_desc_t *md)
228{ 229{
229 if (!(md->attribute & EFI_MEMORY_WB)) 230 if (!(md->attribute & EFI_MEMORY_WB))
230 return 0; 231 return 0;
@@ -421,6 +422,8 @@ efi_init (void)
421 mem_limit = memparse(cp + 4, &cp); 422 mem_limit = memparse(cp + 4, &cp);
422 } else if (memcmp(cp, "max_addr=", 9) == 0) { 423 } else if (memcmp(cp, "max_addr=", 9) == 0) {
423 max_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp)); 424 max_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
425 } else if (memcmp(cp, "min_addr=", 9) == 0) {
426 min_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
424 } else { 427 } else {
425 while (*cp != ' ' && *cp) 428 while (*cp != ' ' && *cp)
426 ++cp; 429 ++cp;
@@ -428,6 +431,8 @@ efi_init (void)
428 ++cp; 431 ++cp;
429 } 432 }
430 } 433 }
434 if (min_addr != 0UL)
435 printk(KERN_INFO "Ignoring memory below %luMB\n", min_addr >> 20);
431 if (max_addr != ~0UL) 436 if (max_addr != ~0UL)
432 printk(KERN_INFO "Ignoring memory above %luMB\n", max_addr >> 20); 437 printk(KERN_INFO "Ignoring memory above %luMB\n", max_addr >> 20);
433 438
@@ -887,14 +892,15 @@ find_memmap_space (void)
887 } 892 }
888 contig_high = GRANULEROUNDDOWN(contig_high); 893 contig_high = GRANULEROUNDDOWN(contig_high);
889 } 894 }
890 if (!is_available_memory(md) || md->type == EFI_LOADER_DATA) 895 if (!is_memory_available(md) || md->type == EFI_LOADER_DATA)
891 continue; 896 continue;
892 897
893 /* Round ends inward to granule boundaries */ 898 /* Round ends inward to granule boundaries */
894 as = max(contig_low, md->phys_addr); 899 as = max(contig_low, md->phys_addr);
895 ae = min(contig_high, efi_md_end(md)); 900 ae = min(contig_high, efi_md_end(md));
896 901
897 /* keep within max_addr= command line arg */ 902 /* keep within max_addr= and min_addr= command line arg */
903 as = max(as, min_addr);
898 ae = min(ae, max_addr); 904 ae = min(ae, max_addr);
899 if (ae <= as) 905 if (ae <= as)
900 continue; 906 continue;
@@ -962,7 +968,7 @@ efi_memmap_init(unsigned long *s, unsigned long *e)
962 } 968 }
963 contig_high = GRANULEROUNDDOWN(contig_high); 969 contig_high = GRANULEROUNDDOWN(contig_high);
964 } 970 }
965 if (!is_available_memory(md)) 971 if (!is_memory_available(md))
966 continue; 972 continue;
967 973
968 /* 974 /*
@@ -1004,7 +1010,8 @@ efi_memmap_init(unsigned long *s, unsigned long *e)
1004 } else 1010 } else
1005 ae = efi_md_end(md); 1011 ae = efi_md_end(md);
1006 1012
1007 /* keep within max_addr= command line arg */ 1013 /* keep within max_addr= and min_addr= command line arg */
1014 as = max(as, min_addr);
1008 ae = min(ae, max_addr); 1015 ae = min(ae, max_addr);
1009 if (ae <= as) 1016 if (ae <= as)
1010 continue; 1017 continue;
@@ -1116,6 +1123,58 @@ efi_initialize_iomem_resources(struct resource *code_resource,
1116 */ 1123 */
1117 insert_resource(res, code_resource); 1124 insert_resource(res, code_resource);
1118 insert_resource(res, data_resource); 1125 insert_resource(res, data_resource);
1126#ifdef CONFIG_KEXEC
1127 insert_resource(res, &efi_memmap_res);
1128 insert_resource(res, &boot_param_res);
1129 if (crashk_res.end > crashk_res.start)
1130 insert_resource(res, &crashk_res);
1131#endif
1119 } 1132 }
1120 } 1133 }
1121} 1134}
1135
1136#ifdef CONFIG_KEXEC
1137/* find a block of memory aligned to 64M exclude reserved regions
1138 rsvd_regions are sorted
1139 */
1140unsigned long
1141kdump_find_rsvd_region (unsigned long size,
1142 struct rsvd_region *r, int n)
1143{
1144 int i;
1145 u64 start, end;
1146 u64 alignment = 1UL << _PAGE_SIZE_64M;
1147 void *efi_map_start, *efi_map_end, *p;
1148 efi_memory_desc_t *md;
1149 u64 efi_desc_size;
1150
1151 efi_map_start = __va(ia64_boot_param->efi_memmap);
1152 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
1153 efi_desc_size = ia64_boot_param->efi_memdesc_size;
1154
1155 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
1156 md = p;
1157 if (!efi_wb(md))
1158 continue;
1159 start = ALIGN(md->phys_addr, alignment);
1160 end = efi_md_end(md);
1161 for (i = 0; i < n; i++) {
1162 if (__pa(r[i].start) >= start && __pa(r[i].end) < end) {
1163 if (__pa(r[i].start) > start + size)
1164 return start;
1165 start = ALIGN(__pa(r[i].end), alignment);
1166 if (i < n-1 && __pa(r[i+1].start) < start + size)
1167 continue;
1168 else
1169 break;
1170 }
1171 }
1172 if (end > start + size)
1173 return start;
1174 }
1175
1176 printk(KERN_WARNING "Cannot reserve 0x%lx byte of memory for crashdump\n",
1177 size);
1178 return ~0UL;
1179}
1180#endif
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 3390b7c5a63f..15234ed3a341 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1575,7 +1575,7 @@ sys_call_table:
1575 data8 sys_mq_timedreceive // 1265 1575 data8 sys_mq_timedreceive // 1265
1576 data8 sys_mq_notify 1576 data8 sys_mq_notify
1577 data8 sys_mq_getsetattr 1577 data8 sys_mq_getsetattr
1578 data8 sys_ni_syscall // reserved for kexec_load 1578 data8 sys_kexec_load
1579 data8 sys_ni_syscall // reserved for vserver 1579 data8 sys_ni_syscall // reserved for vserver
1580 data8 sys_waitid // 1270 1580 data8 sys_waitid // 1270
1581 data8 sys_add_key 1581 data8 sys_add_key
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 879c1817bd1c..bd17190bebb6 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -14,6 +14,7 @@ EXPORT_SYMBOL(strlen);
14 14
15#include <asm/checksum.h> 15#include <asm/checksum.h>
16EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */ 16EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */
17EXPORT_SYMBOL(csum_ipv6_magic);
17 18
18#include <asm/semaphore.h> 19#include <asm/semaphore.h>
19EXPORT_SYMBOL(__down); 20EXPORT_SYMBOL(__down);
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 9bf15fefa7e4..0fc5fb7865cf 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -288,6 +288,27 @@ nop (unsigned int irq)
288 /* do nothing... */ 288 /* do nothing... */
289} 289}
290 290
291
292#ifdef CONFIG_KEXEC
293void
294kexec_disable_iosapic(void)
295{
296 struct iosapic_intr_info *info;
297 struct iosapic_rte_info *rte;
298 u8 vec = 0;
299 for (info = iosapic_intr_info; info <
300 iosapic_intr_info + IA64_NUM_VECTORS; ++info, ++vec) {
301 list_for_each_entry(rte, &info->rtes,
302 rte_list) {
303 iosapic_write(rte->addr,
304 IOSAPIC_RTE_LOW(rte->rte_index),
305 IOSAPIC_MASK|vec);
306 iosapic_eoi(rte->addr, vec);
307 }
308 }
309}
310#endif
311
291static void 312static void
292mask_irq (unsigned int irq) 313mask_irq (unsigned int irq)
293{ 314{
@@ -426,7 +447,7 @@ iosapic_end_level_irq (unsigned int irq)
426#define iosapic_ack_level_irq nop 447#define iosapic_ack_level_irq nop
427 448
428struct hw_interrupt_type irq_type_iosapic_level = { 449struct hw_interrupt_type irq_type_iosapic_level = {
429 .typename = "IO-SAPIC-level", 450 .name = "IO-SAPIC-level",
430 .startup = iosapic_startup_level_irq, 451 .startup = iosapic_startup_level_irq,
431 .shutdown = iosapic_shutdown_level_irq, 452 .shutdown = iosapic_shutdown_level_irq,
432 .enable = iosapic_enable_level_irq, 453 .enable = iosapic_enable_level_irq,
@@ -473,7 +494,7 @@ iosapic_ack_edge_irq (unsigned int irq)
473#define iosapic_end_edge_irq nop 494#define iosapic_end_edge_irq nop
474 495
475struct hw_interrupt_type irq_type_iosapic_edge = { 496struct hw_interrupt_type irq_type_iosapic_edge = {
476 .typename = "IO-SAPIC-edge", 497 .name = "IO-SAPIC-edge",
477 .startup = iosapic_startup_edge_irq, 498 .startup = iosapic_startup_edge_irq,
478 .shutdown = iosapic_disable_edge_irq, 499 .shutdown = iosapic_disable_edge_irq,
479 .enable = iosapic_enable_edge_irq, 500 .enable = iosapic_enable_edge_irq,
@@ -664,7 +685,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
664 printk(KERN_WARNING 685 printk(KERN_WARNING
665 "%s: changing vector %d from %s to %s\n", 686 "%s: changing vector %d from %s to %s\n",
666 __FUNCTION__, vector, 687 __FUNCTION__, vector,
667 idesc->chip->typename, irq_type->typename); 688 idesc->chip->name, irq_type->name);
668 idesc->chip = irq_type; 689 idesc->chip = irq_type;
669 } 690 }
670 return 0; 691 return 0;
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index f07c0864b0b4..54d55e4d64f7 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -76,7 +76,7 @@ int show_interrupts(struct seq_file *p, void *v)
76 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 76 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
77 } 77 }
78#endif 78#endif
79 seq_printf(p, " %14s", irq_desc[i].chip->typename); 79 seq_printf(p, " %14s", irq_desc[i].chip->name);
80 seq_printf(p, " %s", action->name); 80 seq_printf(p, " %s", action->name);
81 81
82 for (action=action->next; action; action = action->next) 82 for (action=action->next; action; action = action->next)
@@ -197,7 +197,7 @@ void fixup_irqs(void)
197 struct pt_regs *old_regs = set_irq_regs(NULL); 197 struct pt_regs *old_regs = set_irq_regs(NULL);
198 198
199 vectors_in_migration[irq]=0; 199 vectors_in_migration[irq]=0;
200 __do_IRQ(irq); 200 generic_handle_irq(irq);
201 set_irq_regs(old_regs); 201 set_irq_regs(old_regs);
202 } 202 }
203 } 203 }
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 68339dd0c9e2..ba3ba8bc50be 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -180,11 +180,13 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
180 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); 180 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
181 ia64_srlz_d(); 181 ia64_srlz_d();
182 while (vector != IA64_SPURIOUS_INT_VECTOR) { 182 while (vector != IA64_SPURIOUS_INT_VECTOR) {
183 if (!IS_RESCHEDULE(vector)) { 183 if (unlikely(IS_RESCHEDULE(vector)))
184 kstat_this_cpu.irqs[vector]++;
185 else {
184 ia64_setreg(_IA64_REG_CR_TPR, vector); 186 ia64_setreg(_IA64_REG_CR_TPR, vector);
185 ia64_srlz_d(); 187 ia64_srlz_d();
186 188
187 __do_IRQ(local_vector_to_irq(vector)); 189 generic_handle_irq(local_vector_to_irq(vector));
188 190
189 /* 191 /*
190 * Disable interrupts and send EOI: 192 * Disable interrupts and send EOI:
@@ -225,7 +227,9 @@ void ia64_process_pending_intr(void)
225 * Perform normal interrupt style processing 227 * Perform normal interrupt style processing
226 */ 228 */
227 while (vector != IA64_SPURIOUS_INT_VECTOR) { 229 while (vector != IA64_SPURIOUS_INT_VECTOR) {
228 if (!IS_RESCHEDULE(vector)) { 230 if (unlikely(IS_RESCHEDULE(vector)))
231 kstat_this_cpu.irqs[vector]++;
232 else {
229 struct pt_regs *old_regs = set_irq_regs(NULL); 233 struct pt_regs *old_regs = set_irq_regs(NULL);
230 234
231 ia64_setreg(_IA64_REG_CR_TPR, vector); 235 ia64_setreg(_IA64_REG_CR_TPR, vector);
@@ -238,7 +242,7 @@ void ia64_process_pending_intr(void)
238 * Probably could shared code. 242 * Probably could shared code.
239 */ 243 */
240 vectors_in_migration[local_vector_to_irq(vector)]=0; 244 vectors_in_migration[local_vector_to_irq(vector)]=0;
241 __do_IRQ(local_vector_to_irq(vector)); 245 generic_handle_irq(local_vector_to_irq(vector));
242 set_irq_regs(old_regs); 246 set_irq_regs(old_regs);
243 247
244 /* 248 /*
@@ -258,11 +262,22 @@ void ia64_process_pending_intr(void)
258#ifdef CONFIG_SMP 262#ifdef CONFIG_SMP
259extern irqreturn_t handle_IPI (int irq, void *dev_id); 263extern irqreturn_t handle_IPI (int irq, void *dev_id);
260 264
265static irqreturn_t dummy_handler (int irq, void *dev_id)
266{
267 BUG();
268}
269
261static struct irqaction ipi_irqaction = { 270static struct irqaction ipi_irqaction = {
262 .handler = handle_IPI, 271 .handler = handle_IPI,
263 .flags = IRQF_DISABLED, 272 .flags = IRQF_DISABLED,
264 .name = "IPI" 273 .name = "IPI"
265}; 274};
275
276static struct irqaction resched_irqaction = {
277 .handler = dummy_handler,
278 .flags = SA_INTERRUPT,
279 .name = "resched"
280};
266#endif 281#endif
267 282
268void 283void
@@ -287,6 +302,7 @@ init_IRQ (void)
287 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); 302 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
288#ifdef CONFIG_SMP 303#ifdef CONFIG_SMP
289 register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction); 304 register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
305 register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
290#endif 306#endif
291#ifdef CONFIG_PERFMON 307#ifdef CONFIG_PERFMON
292 pfm_init_percpu(); 308 pfm_init_percpu();
diff --git a/arch/ia64/kernel/irq_lsapic.c b/arch/ia64/kernel/irq_lsapic.c
index 1ab58b09f3d7..c2f07beb1759 100644
--- a/arch/ia64/kernel/irq_lsapic.c
+++ b/arch/ia64/kernel/irq_lsapic.c
@@ -34,7 +34,7 @@ static int lsapic_retrigger(unsigned int irq)
34} 34}
35 35
36struct hw_interrupt_type irq_type_ia64_lsapic = { 36struct hw_interrupt_type irq_type_ia64_lsapic = {
37 .typename = "LSAPIC", 37 .name = "LSAPIC",
38 .startup = lsapic_noop_startup, 38 .startup = lsapic_noop_startup,
39 .shutdown = lsapic_noop, 39 .shutdown = lsapic_noop,
40 .enable = lsapic_noop, 40 .enable = lsapic_noop,
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 51217d63285e..76e778951e20 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -481,7 +481,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
481void __kprobes arch_remove_kprobe(struct kprobe *p) 481void __kprobes arch_remove_kprobe(struct kprobe *p)
482{ 482{
483 mutex_lock(&kprobe_mutex); 483 mutex_lock(&kprobe_mutex);
484 free_insn_slot(p->ainsn.insn); 484 free_insn_slot(p->ainsn.insn, 0);
485 mutex_unlock(&kprobe_mutex); 485 mutex_unlock(&kprobe_mutex);
486} 486}
487/* 487/*
@@ -851,7 +851,7 @@ static void ia64_get_bsp_cfm(struct unw_frame_info *info, void *arg)
851 return; 851 return;
852 } 852 }
853 } while (unw_unwind(info) >= 0); 853 } while (unw_unwind(info) >= 0);
854 lp->bsp = 0; 854 lp->bsp = NULL;
855 lp->cfm = 0; 855 lp->cfm = 0;
856 return; 856 return;
857} 857}
diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c
new file mode 100644
index 000000000000..468233fa2cee
--- /dev/null
+++ b/arch/ia64/kernel/machine_kexec.c
@@ -0,0 +1,133 @@
1/*
2 * arch/ia64/kernel/machine_kexec.c
3 *
4 * Handle transition of Linux booting another kernel
5 * Copyright (C) 2005 Hewlett-Packard Development Comapny, L.P.
6 * Copyright (C) 2005 Khalid Aziz <khalid.aziz@hp.com>
7 * Copyright (C) 2006 Intel Corp, Zou Nan hai <nanhai.zou@intel.com>
8 *
9 * This source code is licensed under the GNU General Public License,
10 * Version 2. See the file COPYING for more details.
11 */
12
13#include <linux/mm.h>
14#include <linux/kexec.h>
15#include <linux/cpu.h>
16#include <linux/irq.h>
17#include <asm/mmu_context.h>
18#include <asm/setup.h>
19#include <asm/delay.h>
20#include <asm/meminit.h>
21
22typedef void (*relocate_new_kernel_t)(unsigned long, unsigned long,
23 struct ia64_boot_param *, unsigned long);
24
25struct kimage *ia64_kimage;
26
27struct resource efi_memmap_res = {
28 .name = "EFI Memory Map",
29 .start = 0,
30 .end = 0,
31 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
32};
33
34struct resource boot_param_res = {
35 .name = "Boot parameter",
36 .start = 0,
37 .end = 0,
38 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
39};
40
41
42/*
43 * Do what every setup is needed on image and the
44 * reboot code buffer to allow us to avoid allocations
45 * later.
46 */
47int machine_kexec_prepare(struct kimage *image)
48{
49 void *control_code_buffer;
50 const unsigned long *func;
51
52 func = (unsigned long *)&relocate_new_kernel;
53 /* Pre-load control code buffer to minimize work in kexec path */
54 control_code_buffer = page_address(image->control_code_page);
55 memcpy((void *)control_code_buffer, (const void *)func[0],
56 relocate_new_kernel_size);
57 flush_icache_range((unsigned long)control_code_buffer,
58 (unsigned long)control_code_buffer + relocate_new_kernel_size);
59 ia64_kimage = image;
60
61 return 0;
62}
63
64void machine_kexec_cleanup(struct kimage *image)
65{
66}
67
68void machine_shutdown(void)
69{
70 int cpu;
71
72 for_each_online_cpu(cpu) {
73 if (cpu != smp_processor_id())
74 cpu_down(cpu);
75 }
76 kexec_disable_iosapic();
77}
78
79/*
80 * Do not allocate memory (or fail in any way) in machine_kexec().
81 * We are past the point of no return, committed to rebooting now.
82 */
83extern void *efi_get_pal_addr(void);
84static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
85{
86 struct kimage *image = arg;
87 relocate_new_kernel_t rnk;
88 void *pal_addr = efi_get_pal_addr();
89 unsigned long code_addr = (unsigned long)page_address(image->control_code_page);
90 unsigned long vector;
91 int ii;
92
93 if (image->type == KEXEC_TYPE_CRASH) {
94 crash_save_this_cpu();
95 current->thread.ksp = (__u64)info->sw - 16;
96 }
97
98 /* Interrupts aren't acceptable while we reboot */
99 local_irq_disable();
100
101 /* Mask CMC and Performance Monitor interrupts */
102 ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
103 ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
104
105 /* Mask ITV and Local Redirect Registers */
106 ia64_set_itv(1 << 16);
107 ia64_set_lrr0(1 << 16);
108 ia64_set_lrr1(1 << 16);
109
110 /* terminate possible nested in-service interrupts */
111 for (ii = 0; ii < 16; ii++)
112 ia64_eoi();
113
114 /* unmask TPR and clear any pending interrupts */
115 ia64_setreg(_IA64_REG_CR_TPR, 0);
116 ia64_srlz_d();
117 vector = ia64_get_ivr();
118 while (vector != IA64_SPURIOUS_INT_VECTOR) {
119 ia64_eoi();
120 vector = ia64_get_ivr();
121 }
122 platform_kernel_launch_event();
123 rnk = (relocate_new_kernel_t)&code_addr;
124 (*rnk)(image->head, image->start, ia64_boot_param,
125 GRANULEROUNDDOWN((unsigned long) pal_addr));
126 BUG();
127}
128
129void machine_kexec(struct kimage *image)
130{
131 unw_init_running(ia64_machine_kexec, image);
132 for(;;);
133}
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 7cfa63a98cb3..87c1c4f42872 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -82,6 +82,7 @@
82#include <asm/system.h> 82#include <asm/system.h>
83#include <asm/sal.h> 83#include <asm/sal.h>
84#include <asm/mca.h> 84#include <asm/mca.h>
85#include <asm/kexec.h>
85 86
86#include <asm/irq.h> 87#include <asm/irq.h>
87#include <asm/hw_irq.h> 88#include <asm/hw_irq.h>
@@ -678,7 +679,7 @@ ia64_mca_cmc_vector_enable (void *dummy)
678 * disable the cmc interrupt vector. 679 * disable the cmc interrupt vector.
679 */ 680 */
680static void 681static void
681ia64_mca_cmc_vector_disable_keventd(void *unused) 682ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused)
682{ 683{
683 on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0); 684 on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);
684} 685}
@@ -690,7 +691,7 @@ ia64_mca_cmc_vector_disable_keventd(void *unused)
690 * enable the cmc interrupt vector. 691 * enable the cmc interrupt vector.
691 */ 692 */
692static void 693static void
693ia64_mca_cmc_vector_enable_keventd(void *unused) 694ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused)
694{ 695{
695 on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0); 696 on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
696} 697}
@@ -1238,6 +1239,10 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1238 } else { 1239 } else {
1239 /* Dump buffered message to console */ 1240 /* Dump buffered message to console */
1240 ia64_mlogbuf_finish(1); 1241 ia64_mlogbuf_finish(1);
1242#ifdef CONFIG_CRASH_DUMP
1243 atomic_set(&kdump_in_progress, 1);
1244 monarch_cpu = -1;
1245#endif
1241 } 1246 }
1242 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) 1247 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
1243 == NOTIFY_STOP) 1248 == NOTIFY_STOP)
@@ -1247,8 +1252,8 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1247 monarch_cpu = -1; 1252 monarch_cpu = -1;
1248} 1253}
1249 1254
1250static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL); 1255static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd);
1251static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL); 1256static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd);
1252 1257
1253/* 1258/*
1254 * ia64_mca_cmc_int_handler 1259 * ia64_mca_cmc_int_handler
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index a45009d2bc90..afc1403799c9 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -435,6 +435,50 @@ is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci,
435} 435}
436 436
437/** 437/**
438 * get_target_identifier - Get the valid Cache or Bus check target identifier.
439 * @peidx: pointer of index of processor error section
440 *
441 * Return value:
442 * target address on Success / 0 on Failue
443 */
444static u64
445get_target_identifier(peidx_table_t *peidx)
446{
447 u64 target_address = 0;
448 sal_log_mod_error_info_t *smei;
449 pal_cache_check_info_t *pcci;
450 int i, level = 9;
451
452 /*
453 * Look through the cache checks for a valid target identifier
454 * If more than one valid target identifier, return the one
455 * with the lowest cache level.
456 */
457 for (i = 0; i < peidx_cache_check_num(peidx); i++) {
458 smei = (sal_log_mod_error_info_t *)peidx_cache_check(peidx, i);
459 if (smei->valid.target_identifier && smei->target_identifier) {
460 pcci = (pal_cache_check_info_t *)&(smei->check_info);
461 if (!target_address || (pcci->level < level)) {
462 target_address = smei->target_identifier;
463 level = pcci->level;
464 continue;
465 }
466 }
467 }
468 if (target_address)
469 return target_address;
470
471 /*
472 * Look at the bus check for a valid target identifier
473 */
474 smei = peidx_bus_check(peidx, 0);
475 if (smei && smei->valid.target_identifier)
476 return smei->target_identifier;
477
478 return 0;
479}
480
481/**
438 * recover_from_read_error - Try to recover the errors which type are "read"s. 482 * recover_from_read_error - Try to recover the errors which type are "read"s.
439 * @slidx: pointer of index of SAL error record 483 * @slidx: pointer of index of SAL error record
440 * @peidx: pointer of index of processor error section 484 * @peidx: pointer of index of processor error section
@@ -450,13 +494,14 @@ recover_from_read_error(slidx_table_t *slidx,
450 peidx_table_t *peidx, pal_bus_check_info_t *pbci, 494 peidx_table_t *peidx, pal_bus_check_info_t *pbci,
451 struct ia64_sal_os_state *sos) 495 struct ia64_sal_os_state *sos)
452{ 496{
453 sal_log_mod_error_info_t *smei; 497 u64 target_identifier;
454 pal_min_state_area_t *pmsa; 498 pal_min_state_area_t *pmsa;
455 struct ia64_psr *psr1, *psr2; 499 struct ia64_psr *psr1, *psr2;
456 ia64_fptr_t *mca_hdlr_bh = (ia64_fptr_t*)mca_handler_bhhook; 500 ia64_fptr_t *mca_hdlr_bh = (ia64_fptr_t*)mca_handler_bhhook;
457 501
458 /* Is target address valid? */ 502 /* Is target address valid? */
459 if (!pbci->tv) 503 target_identifier = get_target_identifier(peidx);
504 if (!target_identifier)
460 return fatal_mca("target address not valid"); 505 return fatal_mca("target address not valid");
461 506
462 /* 507 /*
@@ -487,32 +532,28 @@ recover_from_read_error(slidx_table_t *slidx,
487 pmsa = sos->pal_min_state; 532 pmsa = sos->pal_min_state;
488 if (psr1->cpl != 0 || 533 if (psr1->cpl != 0 ||
489 ((psr2->cpl != 0) && mca_recover_range(pmsa->pmsa_iip))) { 534 ((psr2->cpl != 0) && mca_recover_range(pmsa->pmsa_iip))) {
490 smei = peidx_bus_check(peidx, 0); 535 /*
491 if (smei->valid.target_identifier) { 536 * setup for resume to bottom half of MCA,
492 /* 537 * "mca_handler_bhhook"
493 * setup for resume to bottom half of MCA, 538 */
494 * "mca_handler_bhhook" 539 /* pass to bhhook as argument (gr8, ...) */
495 */ 540 pmsa->pmsa_gr[8-1] = target_identifier;
496 /* pass to bhhook as argument (gr8, ...) */ 541 pmsa->pmsa_gr[9-1] = pmsa->pmsa_iip;
497 pmsa->pmsa_gr[8-1] = smei->target_identifier; 542 pmsa->pmsa_gr[10-1] = pmsa->pmsa_ipsr;
498 pmsa->pmsa_gr[9-1] = pmsa->pmsa_iip; 543 /* set interrupted return address (but no use) */
499 pmsa->pmsa_gr[10-1] = pmsa->pmsa_ipsr; 544 pmsa->pmsa_br0 = pmsa->pmsa_iip;
500 /* set interrupted return address (but no use) */ 545 /* change resume address to bottom half */
501 pmsa->pmsa_br0 = pmsa->pmsa_iip; 546 pmsa->pmsa_iip = mca_hdlr_bh->fp;
502 /* change resume address to bottom half */ 547 pmsa->pmsa_gr[1-1] = mca_hdlr_bh->gp;
503 pmsa->pmsa_iip = mca_hdlr_bh->fp; 548 /* set cpl with kernel mode */
504 pmsa->pmsa_gr[1-1] = mca_hdlr_bh->gp; 549 psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr;
505 /* set cpl with kernel mode */ 550 psr2->cpl = 0;
506 psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr; 551 psr2->ri = 0;
507 psr2->cpl = 0; 552 psr2->bn = 1;
508 psr2->ri = 0; 553 psr2->i = 0;
509 psr2->bn = 1; 554
510 psr2->i = 0; 555 return mca_recovered("user memory corruption. "
511
512 return mca_recovered("user memory corruption. "
513 "kill affected process - recovered."); 556 "kill affected process - recovered.");
514 }
515
516 } 557 }
517 558
518 return fatal_mca("kernel context not recovered, iip 0x%lx\n", 559 return fatal_mca("kernel context not recovered, iip 0x%lx\n",
diff --git a/arch/ia64/kernel/pal.S b/arch/ia64/kernel/pal.S
index ebaf1e685f5e..0b533441c3c9 100644
--- a/arch/ia64/kernel/pal.S
+++ b/arch/ia64/kernel/pal.S
@@ -21,11 +21,12 @@ pal_entry_point:
21 .text 21 .text
22 22
23/* 23/*
24 * Set the PAL entry point address. This could be written in C code, but we do it here 24 * Set the PAL entry point address. This could be written in C code, but we
25 * to keep it all in one module (besides, it's so trivial that it's 25 * do it here to keep it all in one module (besides, it's so trivial that it's
26 * not a big deal). 26 * not a big deal).
27 * 27 *
28 * in0 Address of the PAL entry point (text address, NOT a function descriptor). 28 * in0 Address of the PAL entry point (text address, NOT a function
29 * descriptor).
29 */ 30 */
30GLOBAL_ENTRY(ia64_pal_handler_init) 31GLOBAL_ENTRY(ia64_pal_handler_init)
31 alloc r3=ar.pfs,1,0,0,0 32 alloc r3=ar.pfs,1,0,0,0
@@ -36,9 +37,9 @@ GLOBAL_ENTRY(ia64_pal_handler_init)
36END(ia64_pal_handler_init) 37END(ia64_pal_handler_init)
37 38
38/* 39/*
39 * Default PAL call handler. This needs to be coded in assembly because it uses 40 * Default PAL call handler. This needs to be coded in assembly because it
40 * the static calling convention, i.e., the RSE may not be used and calls are 41 * uses the static calling convention, i.e., the RSE may not be used and
41 * done via "br.cond" (not "br.call"). 42 * calls are done via "br.cond" (not "br.call").
42 */ 43 */
43GLOBAL_ENTRY(ia64_pal_default_handler) 44GLOBAL_ENTRY(ia64_pal_default_handler)
44 mov r8=-1 45 mov r8=-1
@@ -50,12 +51,10 @@ END(ia64_pal_default_handler)
50 * 51 *
51 * in0 Index of PAL service 52 * in0 Index of PAL service
52 * in1 - in3 Remaining PAL arguments 53 * in1 - in3 Remaining PAL arguments
53 * in4 1 ==> clear psr.ic, 0 ==> don't clear psr.ic
54 *
55 */ 54 */
56GLOBAL_ENTRY(ia64_pal_call_static) 55GLOBAL_ENTRY(ia64_pal_call_static)
57 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5) 56 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(4)
58 alloc loc1 = ar.pfs,5,5,0,0 57 alloc loc1 = ar.pfs,4,5,0,0
59 movl loc2 = pal_entry_point 58 movl loc2 = pal_entry_point
601: { 591: {
61 mov r28 = in0 60 mov r28 = in0
@@ -64,7 +63,6 @@ GLOBAL_ENTRY(ia64_pal_call_static)
64 } 63 }
65 ;; 64 ;;
66 ld8 loc2 = [loc2] // loc2 <- entry point 65 ld8 loc2 = [loc2] // loc2 <- entry point
67 tbit.nz p6,p7 = in4, 0
68 adds r8 = 1f-1b,r8 66 adds r8 = 1f-1b,r8
69 mov loc4=ar.rsc // save RSE configuration 67 mov loc4=ar.rsc // save RSE configuration
70 ;; 68 ;;
@@ -74,13 +72,11 @@ GLOBAL_ENTRY(ia64_pal_call_static)
74 .body 72 .body
75 mov r30 = in2 73 mov r30 = in2
76 74
77(p6) rsm psr.i | psr.ic
78 mov r31 = in3 75 mov r31 = in3
79 mov b7 = loc2 76 mov b7 = loc2
80 77
81(p7) rsm psr.i 78 rsm psr.i
82 ;; 79 ;;
83(p6) srlz.i
84 mov rp = r8 80 mov rp = r8
85 br.cond.sptk.many b7 81 br.cond.sptk.many b7
861: mov psr.l = loc3 821: mov psr.l = loc3
@@ -96,8 +92,8 @@ END(ia64_pal_call_static)
96 * Make a PAL call using the stacked registers calling convention. 92 * Make a PAL call using the stacked registers calling convention.
97 * 93 *
98 * Inputs: 94 * Inputs:
99 * in0 Index of PAL service 95 * in0 Index of PAL service
100 * in2 - in3 Remaning PAL arguments 96 * in2 - in3 Remaining PAL arguments
101 */ 97 */
102GLOBAL_ENTRY(ia64_pal_call_stacked) 98GLOBAL_ENTRY(ia64_pal_call_stacked)
103 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(4) 99 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(4)
@@ -131,18 +127,18 @@ END(ia64_pal_call_stacked)
131 * Make a physical mode PAL call using the static registers calling convention. 127 * Make a physical mode PAL call using the static registers calling convention.
132 * 128 *
133 * Inputs: 129 * Inputs:
134 * in0 Index of PAL service 130 * in0 Index of PAL service
135 * in2 - in3 Remaning PAL arguments 131 * in2 - in3 Remaining PAL arguments
136 * 132 *
137 * PSR_LP, PSR_TB, PSR_ID, PSR_DA are never set by the kernel. 133 * PSR_LP, PSR_TB, PSR_ID, PSR_DA are never set by the kernel.
138 * So we don't need to clear them. 134 * So we don't need to clear them.
139 */ 135 */
140#define PAL_PSR_BITS_TO_CLEAR \ 136#define PAL_PSR_BITS_TO_CLEAR \
141 (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_DB | IA64_PSR_RT | \ 137 (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_DB | IA64_PSR_RT |\
142 IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \ 138 IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \
143 IA64_PSR_DFL | IA64_PSR_DFH) 139 IA64_PSR_DFL | IA64_PSR_DFH)
144 140
145#define PAL_PSR_BITS_TO_SET \ 141#define PAL_PSR_BITS_TO_SET \
146 (IA64_PSR_BN) 142 (IA64_PSR_BN)
147 143
148 144
@@ -178,7 +174,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_static)
178 ;; 174 ;;
179 andcm r16=loc3,r16 // removes bits to clear from psr 175 andcm r16=loc3,r16 // removes bits to clear from psr
180 br.call.sptk.many rp=ia64_switch_mode_phys 176 br.call.sptk.many rp=ia64_switch_mode_phys
181.ret1: mov rp = r8 // install return address (physical) 177 mov rp = r8 // install return address (physical)
182 mov loc5 = r19 178 mov loc5 = r19
183 mov loc6 = r20 179 mov loc6 = r20
184 br.cond.sptk.many b7 180 br.cond.sptk.many b7
@@ -188,7 +184,6 @@ GLOBAL_ENTRY(ia64_pal_call_phys_static)
188 mov r19=loc5 184 mov r19=loc5
189 mov r20=loc6 185 mov r20=loc6
190 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode 186 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
191.ret2:
192 mov psr.l = loc3 // restore init PSR 187 mov psr.l = loc3 // restore init PSR
193 188
194 mov ar.pfs = loc1 189 mov ar.pfs = loc1
@@ -203,8 +198,8 @@ END(ia64_pal_call_phys_static)
203 * Make a PAL call using the stacked registers in physical mode. 198 * Make a PAL call using the stacked registers in physical mode.
204 * 199 *
205 * Inputs: 200 * Inputs:
206 * in0 Index of PAL service 201 * in0 Index of PAL service
207 * in2 - in3 Remaning PAL arguments 202 * in2 - in3 Remaining PAL arguments
208 */ 203 */
209GLOBAL_ENTRY(ia64_pal_call_phys_stacked) 204GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
210 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5) 205 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
@@ -212,7 +207,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
212 movl loc2 = pal_entry_point 207 movl loc2 = pal_entry_point
2131: { 2081: {
214 mov r28 = in0 // copy procedure index 209 mov r28 = in0 // copy procedure index
215 mov loc0 = rp // save rp 210 mov loc0 = rp // save rp
216 } 211 }
217 .body 212 .body
218 ;; 213 ;;
@@ -245,7 +240,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
245 mov r16=loc3 // r16= original psr 240 mov r16=loc3 // r16= original psr
246 mov r19=loc5 241 mov r19=loc5
247 mov r20=loc6 242 mov r20=loc6
248 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode 243 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
249 244
250 mov psr.l = loc3 // restore init PSR 245 mov psr.l = loc3 // restore init PSR
251 mov ar.pfs = loc1 246 mov ar.pfs = loc1
@@ -257,10 +252,11 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
257END(ia64_pal_call_phys_stacked) 252END(ia64_pal_call_phys_stacked)
258 253
259/* 254/*
260 * Save scratch fp scratch regs which aren't saved in pt_regs already (fp10-fp15). 255 * Save scratch fp scratch regs which aren't saved in pt_regs already
256 * (fp10-fp15).
261 * 257 *
262 * NOTE: We need to do this since firmware (SAL and PAL) may use any of the scratch 258 * NOTE: We need to do this since firmware (SAL and PAL) may use any of the
263 * regs fp-low partition. 259 * scratch regs fp-low partition.
264 * 260 *
265 * Inputs: 261 * Inputs:
266 * in0 Address of stack storage for fp regs 262 * in0 Address of stack storage for fp regs
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 0b546e2b36ac..a71df9ae0397 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -16,6 +16,7 @@
16 * 02/05/2001 S.Eranian fixed module support 16 * 02/05/2001 S.Eranian fixed module support
17 * 10/23/2001 S.Eranian updated pal_perf_mon_info bug fixes 17 * 10/23/2001 S.Eranian updated pal_perf_mon_info bug fixes
18 * 03/24/2004 Ashok Raj updated to work with CPU Hotplug 18 * 03/24/2004 Ashok Raj updated to work with CPU Hotplug
19 * 10/26/2006 Russ Anderson updated processor features to rev 2.2 spec
19 */ 20 */
20#include <linux/types.h> 21#include <linux/types.h>
21#include <linux/errno.h> 22#include <linux/errno.h>
@@ -314,13 +315,20 @@ vm_info(char *page)
314 "Protection Key Registers(PKR) : %d\n" 315 "Protection Key Registers(PKR) : %d\n"
315 "Implemented bits in PKR.key : %d\n" 316 "Implemented bits in PKR.key : %d\n"
316 "Hash Tag ID : 0x%x\n" 317 "Hash Tag ID : 0x%x\n"
317 "Size of RR.rid : %d\n", 318 "Size of RR.rid : %d\n"
319 "Max Purges : ",
318 vm_info_1.pal_vm_info_1_s.phys_add_size, 320 vm_info_1.pal_vm_info_1_s.phys_add_size,
319 vm_info_2.pal_vm_info_2_s.impl_va_msb+1, 321 vm_info_2.pal_vm_info_2_s.impl_va_msb+1,
320 vm_info_1.pal_vm_info_1_s.max_pkr+1, 322 vm_info_1.pal_vm_info_1_s.max_pkr+1,
321 vm_info_1.pal_vm_info_1_s.key_size, 323 vm_info_1.pal_vm_info_1_s.key_size,
322 vm_info_1.pal_vm_info_1_s.hash_tag_id, 324 vm_info_1.pal_vm_info_1_s.hash_tag_id,
323 vm_info_2.pal_vm_info_2_s.rid_size); 325 vm_info_2.pal_vm_info_2_s.rid_size);
326 if (vm_info_2.pal_vm_info_2_s.max_purges == PAL_MAX_PURGES)
327 p += sprintf(p, "unlimited\n");
328 else
329 p += sprintf(p, "%d\n",
330 vm_info_2.pal_vm_info_2_s.max_purges ?
331 vm_info_2.pal_vm_info_2_s.max_purges : 1);
324 } 332 }
325 333
326 if (ia64_pal_mem_attrib(&attrib) == 0) { 334 if (ia64_pal_mem_attrib(&attrib) == 0) {
@@ -467,7 +475,11 @@ static const char *proc_features[]={
467 NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL, 475 NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,
468 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, 476 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
469 NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL, 477 NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,
470 NULL,NULL,NULL,NULL,NULL, 478 "Unimplemented instruction address fault",
479 "INIT, PMI, and LINT pins",
480 "Simple unimplemented instr addresses",
481 "Variable P-state performance",
482 "Virtual machine features implemented",
471 "XIP,XPSR,XFS implemented", 483 "XIP,XPSR,XFS implemented",
472 "XR1-XR3 implemented", 484 "XR1-XR3 implemented",
473 "Disable dynamic predicate prediction", 485 "Disable dynamic predicate prediction",
@@ -475,7 +487,11 @@ static const char *proc_features[]={
475 "Disable dynamic data cache prefetch", 487 "Disable dynamic data cache prefetch",
476 "Disable dynamic inst cache prefetch", 488 "Disable dynamic inst cache prefetch",
477 "Disable dynamic branch prediction", 489 "Disable dynamic branch prediction",
478 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 490 NULL, NULL, NULL, NULL,
491 "Disable P-states",
492 "Enable MCA on Data Poisoning",
493 "Enable vmsw instruction",
494 "Enable extern environmental notification",
479 "Disable BINIT on processor time-out", 495 "Disable BINIT on processor time-out",
480 "Disable dynamic power management (DPM)", 496 "Disable dynamic power management (DPM)",
481 "Disable coherency", 497 "Disable coherency",
@@ -952,7 +968,6 @@ remove_palinfo_proc_entries(unsigned int hcpu)
952 } 968 }
953} 969}
954 970
955#ifdef CONFIG_HOTPLUG_CPU
956static int palinfo_cpu_callback(struct notifier_block *nfb, 971static int palinfo_cpu_callback(struct notifier_block *nfb,
957 unsigned long action, void *hcpu) 972 unsigned long action, void *hcpu)
958{ 973{
@@ -974,7 +989,6 @@ static struct notifier_block palinfo_cpu_notifier =
974 .notifier_call = palinfo_cpu_callback, 989 .notifier_call = palinfo_cpu_callback,
975 .priority = 0, 990 .priority = 0,
976}; 991};
977#endif
978 992
979static int __init 993static int __init
980palinfo_init(void) 994palinfo_init(void)
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 281004ff7b00..aa94f60fa8e7 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -853,9 +853,8 @@ pfm_context_alloc(void)
853 * allocate context descriptor 853 * allocate context descriptor
854 * must be able to free with interrupts disabled 854 * must be able to free with interrupts disabled
855 */ 855 */
856 ctx = kmalloc(sizeof(pfm_context_t), GFP_KERNEL); 856 ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL);
857 if (ctx) { 857 if (ctx) {
858 memset(ctx, 0, sizeof(pfm_context_t));
859 DPRINT(("alloc ctx @%p\n", ctx)); 858 DPRINT(("alloc ctx @%p\n", ctx));
860 } 859 }
861 return ctx; 860 return ctx;
@@ -2189,13 +2188,13 @@ pfm_alloc_fd(struct file **cfile)
2189 /* 2188 /*
2190 * allocate a new dcache entry 2189 * allocate a new dcache entry
2191 */ 2190 */
2192 file->f_dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this); 2191 file->f_path.dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this);
2193 if (!file->f_dentry) goto out; 2192 if (!file->f_path.dentry) goto out;
2194 2193
2195 file->f_dentry->d_op = &pfmfs_dentry_operations; 2194 file->f_path.dentry->d_op = &pfmfs_dentry_operations;
2196 2195
2197 d_add(file->f_dentry, inode); 2196 d_add(file->f_path.dentry, inode);
2198 file->f_vfsmnt = mntget(pfmfs_mnt); 2197 file->f_path.mnt = mntget(pfmfs_mnt);
2199 file->f_mapping = inode->i_mapping; 2198 file->f_mapping = inode->i_mapping;
2200 2199
2201 file->f_op = &pfm_file_ops; 2200 file->f_op = &pfm_file_ops;
@@ -2302,7 +2301,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon
2302 DPRINT(("smpl_buf @%p\n", smpl_buf)); 2301 DPRINT(("smpl_buf @%p\n", smpl_buf));
2303 2302
2304 /* allocate vma */ 2303 /* allocate vma */
2305 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 2304 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
2306 if (!vma) { 2305 if (!vma) {
2307 DPRINT(("Cannot allocate vma\n")); 2306 DPRINT(("Cannot allocate vma\n"));
2308 goto error_kmem; 2307 goto error_kmem;
@@ -5558,12 +5557,13 @@ report_spurious2:
5558} 5557}
5559 5558
5560static irqreturn_t 5559static irqreturn_t
5561pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs) 5560pfm_interrupt_handler(int irq, void *arg)
5562{ 5561{
5563 unsigned long start_cycles, total_cycles; 5562 unsigned long start_cycles, total_cycles;
5564 unsigned long min, max; 5563 unsigned long min, max;
5565 int this_cpu; 5564 int this_cpu;
5566 int ret; 5565 int ret;
5566 struct pt_regs *regs = get_irq_regs();
5567 5567
5568 this_cpu = get_cpu(); 5568 this_cpu = get_cpu();
5569 if (likely(!pfm_alt_intr_handler)) { 5569 if (likely(!pfm_alt_intr_handler)) {
diff --git a/arch/ia64/kernel/perfmon_montecito.h b/arch/ia64/kernel/perfmon_montecito.h
index cd06ac6a686c..7f8da4c7ca67 100644
--- a/arch/ia64/kernel/perfmon_montecito.h
+++ b/arch/ia64/kernel/perfmon_montecito.h
@@ -45,16 +45,16 @@ static pfm_reg_desc_t pfm_mont_pmc_desc[PMU_MAX_PMCS]={
45/* pmc29 */ { PFM_REG_NOTIMPL, }, 45/* pmc29 */ { PFM_REG_NOTIMPL, },
46/* pmc30 */ { PFM_REG_NOTIMPL, }, 46/* pmc30 */ { PFM_REG_NOTIMPL, },
47/* pmc31 */ { PFM_REG_NOTIMPL, }, 47/* pmc31 */ { PFM_REG_NOTIMPL, },
48/* pmc32 */ { PFM_REG_CONFIG, 0, 0x30f01ffffffffff, 0x30f01ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, 48/* pmc32 */ { PFM_REG_CONFIG, 0, 0x30f01ffffffffffUL, 0x30f01ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
49/* pmc33 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, 49/* pmc33 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
50/* pmc34 */ { PFM_REG_CONFIG, 0, 0xf01ffffffffff, 0xf01ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, 50/* pmc34 */ { PFM_REG_CONFIG, 0, 0xf01ffffffffffUL, 0xf01ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
51/* pmc35 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, 51/* pmc35 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
52/* pmc36 */ { PFM_REG_CONFIG, 0, 0xfffffff0, 0xf, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, 52/* pmc36 */ { PFM_REG_CONFIG, 0, 0xfffffff0, 0xf, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
53/* pmc37 */ { PFM_REG_MONITOR, 4, 0x0, 0x3fff, NULL, pfm_mont_pmc_check, {RDEP_MONT_IEAR, 0, 0, 0}, {0, 0, 0, 0}}, 53/* pmc37 */ { PFM_REG_MONITOR, 4, 0x0, 0x3fff, NULL, pfm_mont_pmc_check, {RDEP_MONT_IEAR, 0, 0, 0}, {0, 0, 0, 0}},
54/* pmc38 */ { PFM_REG_CONFIG, 0, 0xdb6, 0x2492, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, 54/* pmc38 */ { PFM_REG_CONFIG, 0, 0xdb6, 0x2492, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
55/* pmc39 */ { PFM_REG_MONITOR, 6, 0x0, 0xffcf, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}}, 55/* pmc39 */ { PFM_REG_MONITOR, 6, 0x0, 0xffcf, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}},
56/* pmc40 */ { PFM_REG_MONITOR, 6, 0x2000000, 0xf01cf, NULL, pfm_mont_pmc_check, {RDEP_MONT_DEAR,0, 0, 0}, {0,0, 0, 0}}, 56/* pmc40 */ { PFM_REG_MONITOR, 6, 0x2000000, 0xf01cf, NULL, pfm_mont_pmc_check, {RDEP_MONT_DEAR,0, 0, 0}, {0,0, 0, 0}},
57/* pmc41 */ { PFM_REG_CONFIG, 0, 0x00002078fefefefe, 0x1e00018181818, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, 57/* pmc41 */ { PFM_REG_CONFIG, 0, 0x00002078fefefefeUL, 0x1e00018181818UL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}},
58/* pmc42 */ { PFM_REG_MONITOR, 6, 0x0, 0x7ff4f, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}}, 58/* pmc42 */ { PFM_REG_MONITOR, 6, 0x0, 0x7ff4f, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}},
59 { PFM_REG_END , 0, 0x0, -1, NULL, NULL, {0,}, {0,}}, /* end marker */ 59 { PFM_REG_END , 0, 0x0, -1, NULL, NULL, {0,}, {0,}}, /* end marker */
60}; 60};
@@ -185,7 +185,7 @@ pfm_mont_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cn
185 DPRINT(("cnum=%u val=0x%lx, using_dbreg=%d loaded=%d\n", cnum, tmpval, ctx->ctx_fl_using_dbreg, is_loaded)); 185 DPRINT(("cnum=%u val=0x%lx, using_dbreg=%d loaded=%d\n", cnum, tmpval, ctx->ctx_fl_using_dbreg, is_loaded));
186 186
187 if (cnum == 41 && is_loaded 187 if (cnum == 41 && is_loaded
188 && (tmpval & 0x1e00000000000) && (tmpval & 0x18181818UL) != 0x18181818UL && ctx->ctx_fl_using_dbreg == 0) { 188 && (tmpval & 0x1e00000000000UL) && (tmpval & 0x18181818UL) != 0x18181818UL && ctx->ctx_fl_using_dbreg == 0) {
189 189
190 DPRINT(("pmc[%d]=0x%lx has active pmc41 settings, clearing dbr\n", cnum, tmpval)); 190 DPRINT(("pmc[%d]=0x%lx has active pmc41 settings, clearing dbr\n", cnum, tmpval));
191 191
diff --git a/arch/ia64/kernel/relocate_kernel.S b/arch/ia64/kernel/relocate_kernel.S
new file mode 100644
index 000000000000..ae473e3f2a0d
--- /dev/null
+++ b/arch/ia64/kernel/relocate_kernel.S
@@ -0,0 +1,334 @@
1/*
2 * arch/ia64/kernel/relocate_kernel.S
3 *
4 * Relocate kexec'able kernel and start it
5 *
6 * Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
7 * Copyright (C) 2005 Khalid Aziz <khalid.aziz@hp.com>
8 * Copyright (C) 2005 Intel Corp, Zou Nan hai <nanhai.zou@intel.com>
9 *
10 * This source code is licensed under the GNU General Public License,
11 * Version 2. See the file COPYING for more details.
12 */
13#include <asm/asmmacro.h>
14#include <asm/kregs.h>
15#include <asm/page.h>
16#include <asm/pgtable.h>
17#include <asm/mca_asm.h>
18
19 /* Must be relocatable PIC code callable as a C function
20 */
21GLOBAL_ENTRY(relocate_new_kernel)
22 .prologue
23 alloc r31=ar.pfs,4,0,0,0
24 .body
25.reloc_entry:
26{
27 rsm psr.i| psr.ic
28 mov r2=ip
29}
30 ;;
31{
32 flushrs // must be first insn in group
33 srlz.i
34}
35 ;;
36 dep r2=0,r2,61,3 //to physical address
37 ;;
38 //first switch to physical mode
39 add r3=1f-.reloc_entry, r2
40 movl r16 = IA64_PSR_AC|IA64_PSR_BN|IA64_PSR_IC
41 mov ar.rsc=0 // put RSE in enforced lazy mode
42 ;;
43 add sp=(memory_stack_end - 16 - .reloc_entry),r2
44 add r8=(register_stack - .reloc_entry),r2
45 ;;
46 mov r18=ar.rnat
47 mov ar.bspstore=r8
48 ;;
49 mov cr.ipsr=r16
50 mov cr.iip=r3
51 mov cr.ifs=r0
52 srlz.i
53 ;;
54 mov ar.rnat=r18
55 rfi
56 ;;
571:
58 //physical mode code begin
59 mov b6=in1
60 dep r28=0,in2,61,3 //to physical address
61
62 // purge all TC entries
63#define O(member) IA64_CPUINFO_##member##_OFFSET
64 GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
65 ;;
66 addl r17=O(PTCE_STRIDE),r2
67 addl r2=O(PTCE_BASE),r2
68 ;;
69 ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base
70 ld4 r19=[r2],4 // r19=ptce_count[0]
71 ld4 r21=[r17],4 // r21=ptce_stride[0]
72 ;;
73 ld4 r20=[r2] // r20=ptce_count[1]
74 ld4 r22=[r17] // r22=ptce_stride[1]
75 mov r24=r0
76 ;;
77 adds r20=-1,r20
78 ;;
79#undef O
802:
81 cmp.ltu p6,p7=r24,r19
82(p7) br.cond.dpnt.few 4f
83 mov ar.lc=r20
843:
85 ptc.e r18
86 ;;
87 add r18=r22,r18
88 br.cloop.sptk.few 3b
89 ;;
90 add r18=r21,r18
91 add r24=1,r24
92 ;;
93 br.sptk.few 2b
944:
95 srlz.i
96 ;;
97 //purge TR entry for kernel text and data
98 movl r16=KERNEL_START
99 mov r18=KERNEL_TR_PAGE_SHIFT<<2
100 ;;
101 ptr.i r16, r18
102 ptr.d r16, r18
103 ;;
104 srlz.i
105 ;;
106
107 // purge TR entry for percpu data
108 movl r16=PERCPU_ADDR
109 mov r18=PERCPU_PAGE_SHIFT<<2
110 ;;
111 ptr.d r16,r18
112 ;;
113 srlz.d
114 ;;
115
116 // purge TR entry for pal code
117 mov r16=in3
118 mov r18=IA64_GRANULE_SHIFT<<2
119 ;;
120 ptr.i r16,r18
121 ;;
122 srlz.i
123 ;;
124
125 // purge TR entry for stack
126 mov r16=IA64_KR(CURRENT_STACK)
127 ;;
128 shl r16=r16,IA64_GRANULE_SHIFT
129 movl r19=PAGE_OFFSET
130 ;;
131 add r16=r19,r16
132 mov r18=IA64_GRANULE_SHIFT<<2
133 ;;
134 ptr.d r16,r18
135 ;;
136 srlz.i
137 ;;
138
139 //copy segments
140 movl r16=PAGE_MASK
141 mov r30=in0 // in0 is page_list
142 br.sptk.few .dest_page
143 ;;
144.loop:
145 ld8 r30=[in0], 8;;
146.dest_page:
147 tbit.z p0, p6=r30, 0;; // 0x1 dest page
148(p6) and r17=r30, r16
149(p6) br.cond.sptk.few .loop;;
150
151 tbit.z p0, p6=r30, 1;; // 0x2 indirect page
152(p6) and in0=r30, r16
153(p6) br.cond.sptk.few .loop;;
154
155 tbit.z p0, p6=r30, 2;; // 0x4 end flag
156(p6) br.cond.sptk.few .end_loop;;
157
158 tbit.z p6, p0=r30, 3;; // 0x8 source page
159(p6) br.cond.sptk.few .loop
160
161 and r18=r30, r16
162
163 // simple copy page, may optimize later
164 movl r14=PAGE_SIZE/8 - 1;;
165 mov ar.lc=r14;;
1661:
167 ld8 r14=[r18], 8;;
168 st8 [r17]=r14;;
169 fc.i r17
170 add r17=8, r17
171 br.ctop.sptk.few 1b
172 br.sptk.few .loop
173 ;;
174
175.end_loop:
176 sync.i // for fc.i
177 ;;
178 srlz.i
179 ;;
180 srlz.d
181 ;;
182 br.call.sptk.many b0=b6;;
183
184.align 32
185memory_stack:
186 .fill 8192, 1, 0
187memory_stack_end:
188register_stack:
189 .fill 8192, 1, 0
190register_stack_end:
191relocate_new_kernel_end:
192END(relocate_new_kernel)
193
194.global relocate_new_kernel_size
195relocate_new_kernel_size:
196 data8 relocate_new_kernel_end - relocate_new_kernel
197
198GLOBAL_ENTRY(ia64_dump_cpu_regs)
199 .prologue
200 alloc loc0=ar.pfs,1,2,0,0
201 .body
202 mov ar.rsc=0 // put RSE in enforced lazy mode
203 add loc1=4*8, in0 // save r4 and r5 first
204 ;;
205{
206 flushrs // flush dirty regs to backing store
207 srlz.i
208}
209 st8 [loc1]=r4, 8
210 ;;
211 st8 [loc1]=r5, 8
212 ;;
213 add loc1=32*8, in0
214 mov r4=ar.rnat
215 ;;
216 st8 [in0]=r0, 8 // r0
217 st8 [loc1]=r4, 8 // rnat
218 mov r5=pr
219 ;;
220 st8 [in0]=r1, 8 // r1
221 st8 [loc1]=r5, 8 // pr
222 mov r4=b0
223 ;;
224 st8 [in0]=r2, 8 // r2
225 st8 [loc1]=r4, 8 // b0
226 mov r5=b1;
227 ;;
228 st8 [in0]=r3, 24 // r3
229 st8 [loc1]=r5, 8 // b1
230 mov r4=b2
231 ;;
232 st8 [in0]=r6, 8 // r6
233 st8 [loc1]=r4, 8 // b2
234 mov r5=b3
235 ;;
236 st8 [in0]=r7, 8 // r7
237 st8 [loc1]=r5, 8 // b3
238 mov r4=b4
239 ;;
240 st8 [in0]=r8, 8 // r8
241 st8 [loc1]=r4, 8 // b4
242 mov r5=b5
243 ;;
244 st8 [in0]=r9, 8 // r9
245 st8 [loc1]=r5, 8 // b5
246 mov r4=b6
247 ;;
248 st8 [in0]=r10, 8 // r10
249 st8 [loc1]=r5, 8 // b6
250 mov r5=b7
251 ;;
252 st8 [in0]=r11, 8 // r11
253 st8 [loc1]=r5, 8 // b7
254 mov r4=b0
255 ;;
256 st8 [in0]=r12, 8 // r12
257 st8 [loc1]=r4, 8 // ip
258 mov r5=loc0
259 ;;
260 st8 [in0]=r13, 8 // r13
261 extr.u r5=r5, 0, 38 // ar.pfs.pfm
262 mov r4=r0 // user mask
263 ;;
264 st8 [in0]=r14, 8 // r14
265 st8 [loc1]=r5, 8 // cfm
266 ;;
267 st8 [in0]=r15, 8 // r15
268 st8 [loc1]=r4, 8 // user mask
269 mov r5=ar.rsc
270 ;;
271 st8 [in0]=r16, 8 // r16
272 st8 [loc1]=r5, 8 // ar.rsc
273 mov r4=ar.bsp
274 ;;
275 st8 [in0]=r17, 8 // r17
276 st8 [loc1]=r4, 8 // ar.bsp
277 mov r5=ar.bspstore
278 ;;
279 st8 [in0]=r18, 8 // r18
280 st8 [loc1]=r5, 8 // ar.bspstore
281 mov r4=ar.rnat
282 ;;
283 st8 [in0]=r19, 8 // r19
284 st8 [loc1]=r4, 8 // ar.rnat
285 mov r5=ar.ccv
286 ;;
287 st8 [in0]=r20, 8 // r20
288 st8 [loc1]=r5, 8 // ar.ccv
289 mov r4=ar.unat
290 ;;
291 st8 [in0]=r21, 8 // r21
292 st8 [loc1]=r4, 8 // ar.unat
293 mov r5 = ar.fpsr
294 ;;
295 st8 [in0]=r22, 8 // r22
296 st8 [loc1]=r5, 8 // ar.fpsr
297 mov r4 = ar.unat
298 ;;
299 st8 [in0]=r23, 8 // r23
300 st8 [loc1]=r4, 8 // unat
301 mov r5 = ar.fpsr
302 ;;
303 st8 [in0]=r24, 8 // r24
304 st8 [loc1]=r5, 8 // fpsr
305 mov r4 = ar.pfs
306 ;;
307 st8 [in0]=r25, 8 // r25
308 st8 [loc1]=r4, 8 // ar.pfs
309 mov r5 = ar.lc
310 ;;
311 st8 [in0]=r26, 8 // r26
312 st8 [loc1]=r5, 8 // ar.lc
313 mov r4 = ar.ec
314 ;;
315 st8 [in0]=r27, 8 // r27
316 st8 [loc1]=r4, 8 // ar.ec
317 mov r5 = ar.csd
318 ;;
319 st8 [in0]=r28, 8 // r28
320 st8 [loc1]=r5, 8 // ar.csd
321 mov r4 = ar.ssd
322 ;;
323 st8 [in0]=r29, 8 // r29
324 st8 [loc1]=r4, 8 // ar.ssd
325 ;;
326 st8 [in0]=r30, 8 // r30
327 ;;
328 st8 [in0]=r31, 8 // r31
329 mov ar.pfs=loc0
330 ;;
331 br.ret.sptk.many rp
332END(ia64_dump_cpu_regs)
333
334
diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c
index 642fdc7b969d..20bad78b5073 100644
--- a/arch/ia64/kernel/sal.c
+++ b/arch/ia64/kernel/sal.c
@@ -223,12 +223,13 @@ static void __init sal_desc_ap_wakeup(void *p) { }
223 */ 223 */
224static int sal_cache_flush_drops_interrupts; 224static int sal_cache_flush_drops_interrupts;
225 225
226static void __init 226void __init
227check_sal_cache_flush (void) 227check_sal_cache_flush (void)
228{ 228{
229 unsigned long flags; 229 unsigned long flags;
230 int cpu; 230 int cpu;
231 u64 vector; 231 u64 vector, cache_type = 3;
232 struct ia64_sal_retval isrv;
232 233
233 cpu = get_cpu(); 234 cpu = get_cpu();
234 local_irq_save(flags); 235 local_irq_save(flags);
@@ -243,7 +244,10 @@ check_sal_cache_flush (void)
243 while (!ia64_get_irr(IA64_TIMER_VECTOR)) 244 while (!ia64_get_irr(IA64_TIMER_VECTOR))
244 cpu_relax(); 245 cpu_relax();
245 246
246 ia64_sal_cache_flush(3); 247 SAL_CALL(isrv, SAL_CACHE_FLUSH, cache_type, 0, 0, 0, 0, 0, 0);
248
249 if (isrv.status)
250 printk(KERN_ERR "SAL_CAL_FLUSH failed with %ld\n", isrv.status);
247 251
248 if (ia64_get_irr(IA64_TIMER_VECTOR)) { 252 if (ia64_get_irr(IA64_TIMER_VECTOR)) {
249 vector = ia64_get_ivr(); 253 vector = ia64_get_ivr();
@@ -331,7 +335,6 @@ ia64_sal_init (struct ia64_sal_systab *systab)
331 p += SAL_DESC_SIZE(*p); 335 p += SAL_DESC_SIZE(*p);
332 } 336 }
333 337
334 check_sal_cache_flush();
335} 338}
336 339
337int 340int
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index e63b8ca5344a..e375a2f0f2c3 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -302,7 +302,7 @@ salinfo_event_open(struct inode *inode, struct file *file)
302static ssize_t 302static ssize_t
303salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) 303salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
304{ 304{
305 struct inode *inode = file->f_dentry->d_inode; 305 struct inode *inode = file->f_path.dentry->d_inode;
306 struct proc_dir_entry *entry = PDE(inode); 306 struct proc_dir_entry *entry = PDE(inode);
307 struct salinfo_data *data = entry->data; 307 struct salinfo_data *data = entry->data;
308 char cmd[32]; 308 char cmd[32];
@@ -464,7 +464,7 @@ retry:
464static ssize_t 464static ssize_t
465salinfo_log_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) 465salinfo_log_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
466{ 466{
467 struct inode *inode = file->f_dentry->d_inode; 467 struct inode *inode = file->f_path.dentry->d_inode;
468 struct proc_dir_entry *entry = PDE(inode); 468 struct proc_dir_entry *entry = PDE(inode);
469 struct salinfo_data *data = entry->data; 469 struct salinfo_data *data = entry->data;
470 u8 *buf; 470 u8 *buf;
@@ -525,7 +525,7 @@ salinfo_log_clear(struct salinfo_data *data, int cpu)
525static ssize_t 525static ssize_t
526salinfo_log_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) 526salinfo_log_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
527{ 527{
528 struct inode *inode = file->f_dentry->d_inode; 528 struct inode *inode = file->f_path.dentry->d_inode;
529 struct proc_dir_entry *entry = PDE(inode); 529 struct proc_dir_entry *entry = PDE(inode);
530 struct salinfo_data *data = entry->data; 530 struct salinfo_data *data = entry->data;
531 char cmd[32]; 531 char cmd[32];
@@ -575,7 +575,6 @@ static struct file_operations salinfo_data_fops = {
575 .write = salinfo_log_write, 575 .write = salinfo_log_write,
576}; 576};
577 577
578#ifdef CONFIG_HOTPLUG_CPU
579static int __devinit 578static int __devinit
580salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) 579salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
581{ 580{
@@ -620,7 +619,6 @@ static struct notifier_block salinfo_cpu_notifier =
620 .notifier_call = salinfo_cpu_callback, 619 .notifier_call = salinfo_cpu_callback,
621 .priority = 0, 620 .priority = 0,
622}; 621};
623#endif /* CONFIG_HOTPLUG_CPU */
624 622
625static int __init 623static int __init
626salinfo_init(void) 624salinfo_init(void)
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index c4caa8003492..14e1200376a9 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -43,6 +43,8 @@
43#include <linux/initrd.h> 43#include <linux/initrd.h>
44#include <linux/pm.h> 44#include <linux/pm.h>
45#include <linux/cpufreq.h> 45#include <linux/cpufreq.h>
46#include <linux/kexec.h>
47#include <linux/crash_dump.h>
46 48
47#include <asm/ia32.h> 49#include <asm/ia32.h>
48#include <asm/machvec.h> 50#include <asm/machvec.h>
@@ -252,6 +254,41 @@ reserve_memory (void)
252 efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); 254 efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end);
253 n++; 255 n++;
254 256
257#ifdef CONFIG_KEXEC
258 /* crashkernel=size@offset specifies the size to reserve for a crash
259 * kernel.(offset is ingored for keep compatibility with other archs)
260 * By reserving this memory we guarantee that linux never set's it
261 * up as a DMA target.Useful for holding code to do something
262 * appropriate after a kernel panic.
263 */
264 {
265 char *from = strstr(saved_command_line, "crashkernel=");
266 unsigned long base, size;
267 if (from) {
268 size = memparse(from + 12, &from);
269 if (size) {
270 sort_regions(rsvd_region, n);
271 base = kdump_find_rsvd_region(size,
272 rsvd_region, n);
273 if (base != ~0UL) {
274 rsvd_region[n].start =
275 (unsigned long)__va(base);
276 rsvd_region[n].end =
277 (unsigned long)__va(base + size);
278 n++;
279 crashk_res.start = base;
280 crashk_res.end = base + size - 1;
281 }
282 }
283 }
284 efi_memmap_res.start = ia64_boot_param->efi_memmap;
285 efi_memmap_res.end = efi_memmap_res.start +
286 ia64_boot_param->efi_memmap_size;
287 boot_param_res.start = __pa(ia64_boot_param);
288 boot_param_res.end = boot_param_res.start +
289 sizeof(*ia64_boot_param);
290 }
291#endif
255 /* end of memory marker */ 292 /* end of memory marker */
256 rsvd_region[n].start = ~0UL; 293 rsvd_region[n].start = ~0UL;
257 rsvd_region[n].end = ~0UL; 294 rsvd_region[n].end = ~0UL;
@@ -263,6 +300,7 @@ reserve_memory (void)
263 sort_regions(rsvd_region, num_rsvd_regions); 300 sort_regions(rsvd_region, num_rsvd_regions);
264} 301}
265 302
303
266/** 304/**
267 * find_initrd - get initrd parameters from the boot parameter structure 305 * find_initrd - get initrd parameters from the boot parameter structure
268 * 306 *
@@ -457,6 +495,8 @@ setup_arch (char **cmdline_p)
457 cpu_init(); /* initialize the bootstrap CPU */ 495 cpu_init(); /* initialize the bootstrap CPU */
458 mmu_context_init(); /* initialize context_id bitmap */ 496 mmu_context_init(); /* initialize context_id bitmap */
459 497
498 check_sal_cache_flush();
499
460#ifdef CONFIG_ACPI 500#ifdef CONFIG_ACPI
461 acpi_boot_init(); 501 acpi_boot_init();
462#endif 502#endif
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 657ac99a451c..b1b9aa4364b9 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -30,6 +30,7 @@
30#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/efi.h> 31#include <linux/efi.h>
32#include <linux/bitops.h> 32#include <linux/bitops.h>
33#include <linux/kexec.h>
33 34
34#include <asm/atomic.h> 35#include <asm/atomic.h>
35#include <asm/current.h> 36#include <asm/current.h>
@@ -66,6 +67,7 @@ static volatile struct call_data_struct *call_data;
66 67
67#define IPI_CALL_FUNC 0 68#define IPI_CALL_FUNC 0
68#define IPI_CPU_STOP 1 69#define IPI_CPU_STOP 1
70#define IPI_KDUMP_CPU_STOP 3
69 71
70/* This needs to be cacheline aligned because it is written to by *other* CPUs. */ 72/* This needs to be cacheline aligned because it is written to by *other* CPUs. */
71static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned; 73static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned;
@@ -108,7 +110,7 @@ cpu_die(void)
108} 110}
109 111
110irqreturn_t 112irqreturn_t
111handle_IPI (int irq, void *dev_id, struct pt_regs *regs) 113handle_IPI (int irq, void *dev_id)
112{ 114{
113 int this_cpu = get_cpu(); 115 int this_cpu = get_cpu();
114 unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation); 116 unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation);
@@ -155,7 +157,11 @@ handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
155 case IPI_CPU_STOP: 157 case IPI_CPU_STOP:
156 stop_this_cpu(); 158 stop_this_cpu();
157 break; 159 break;
158 160#ifdef CONFIG_CRASH_DUMP
161 case IPI_KDUMP_CPU_STOP:
162 unw_init_running(kdump_cpu_freeze, NULL);
163 break;
164#endif
159 default: 165 default:
160 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which); 166 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which);
161 break; 167 break;
@@ -213,6 +219,26 @@ send_IPI_self (int op)
213 send_IPI_single(smp_processor_id(), op); 219 send_IPI_single(smp_processor_id(), op);
214} 220}
215 221
222#ifdef CONFIG_CRASH_DUMP
223void
224kdump_smp_send_stop()
225{
226 send_IPI_allbutself(IPI_KDUMP_CPU_STOP);
227}
228
229void
230kdump_smp_send_init()
231{
232 unsigned int cpu, self_cpu;
233 self_cpu = smp_processor_id();
234 for_each_online_cpu(cpu) {
235 if (cpu != self_cpu) {
236 if(kdump_status[cpu] == 0)
237 platform_send_ipi(cpu, 0, IA64_IPI_DM_INIT, 0);
238 }
239 }
240}
241#endif
216/* 242/*
217 * Called with preeemption disabled. 243 * Called with preeemption disabled.
218 */ 244 */
@@ -328,10 +354,14 @@ int
328smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait) 354smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait)
329{ 355{
330 struct call_data_struct data; 356 struct call_data_struct data;
331 int cpus = num_online_cpus()-1; 357 int cpus;
332 358
333 if (!cpus) 359 spin_lock(&call_lock);
360 cpus = num_online_cpus() - 1;
361 if (!cpus) {
362 spin_unlock(&call_lock);
334 return 0; 363 return 0;
364 }
335 365
336 /* Can deadlock when called with interrupts disabled */ 366 /* Can deadlock when called with interrupts disabled */
337 WARN_ON(irqs_disabled()); 367 WARN_ON(irqs_disabled());
@@ -343,8 +373,6 @@ smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wai
343 if (wait) 373 if (wait)
344 atomic_set(&data.finished, 0); 374 atomic_set(&data.finished, 0);
345 375
346 spin_lock(&call_lock);
347
348 call_data = &data; 376 call_data = &data;
349 mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */ 377 mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
350 send_IPI_allbutself(IPI_CALL_FUNC); 378 send_IPI_allbutself(IPI_CALL_FUNC);
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index f7d7f5668144..b21ddecea943 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -463,15 +463,17 @@ struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
463} 463}
464 464
465struct create_idle { 465struct create_idle {
466 struct work_struct work;
466 struct task_struct *idle; 467 struct task_struct *idle;
467 struct completion done; 468 struct completion done;
468 int cpu; 469 int cpu;
469}; 470};
470 471
471void 472void
472do_fork_idle(void *_c_idle) 473do_fork_idle(struct work_struct *work)
473{ 474{
474 struct create_idle *c_idle = _c_idle; 475 struct create_idle *c_idle =
476 container_of(work, struct create_idle, work);
475 477
476 c_idle->idle = fork_idle(c_idle->cpu); 478 c_idle->idle = fork_idle(c_idle->cpu);
477 complete(&c_idle->done); 479 complete(&c_idle->done);
@@ -482,10 +484,10 @@ do_boot_cpu (int sapicid, int cpu)
482{ 484{
483 int timeout; 485 int timeout;
484 struct create_idle c_idle = { 486 struct create_idle c_idle = {
487 .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
485 .cpu = cpu, 488 .cpu = cpu,
486 .done = COMPLETION_INITIALIZER(c_idle.done), 489 .done = COMPLETION_INITIALIZER(c_idle.done),
487 }; 490 };
488 DECLARE_WORK(work, do_fork_idle, &c_idle);
489 491
490 c_idle.idle = get_idle_for_cpu(cpu); 492 c_idle.idle = get_idle_for_cpu(cpu);
491 if (c_idle.idle) { 493 if (c_idle.idle) {
@@ -497,9 +499,9 @@ do_boot_cpu (int sapicid, int cpu)
497 * We can't use kernel_thread since we must avoid to reschedule the child. 499 * We can't use kernel_thread since we must avoid to reschedule the child.
498 */ 500 */
499 if (!keventd_up() || current_is_keventd()) 501 if (!keventd_up() || current_is_keventd())
500 work.func(work.data); 502 c_idle.work.func(&c_idle.work);
501 else { 503 else {
502 schedule_work(&work); 504 schedule_work(&c_idle.work);
503 wait_for_completion(&c_idle.done); 505 wait_for_completion(&c_idle.done);
504 } 506 }
505 507
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 41169a9bc301..39e0cd3a0884 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -84,6 +84,12 @@ timer_interrupt (int irq, void *dev_id)
84 84
85 if (time_after(new_itm, ia64_get_itc())) 85 if (time_after(new_itm, ia64_get_itc()))
86 break; 86 break;
87
88 /*
89 * Allow IPIs to interrupt the timer loop.
90 */
91 local_irq_enable();
92 local_irq_disable();
87 } 93 }
88 94
89 do { 95 do {
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 5629b45e89c6..687500ddb4b8 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -31,11 +31,11 @@ int arch_register_cpu(int num)
31{ 31{
32#if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU) 32#if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU)
33 /* 33 /*
34 * If CPEI cannot be re-targetted, and this is 34 * If CPEI can be re-targetted or if this is not
35 * CPEI target, then dont create the control file 35 * CPEI target, then it is hotpluggable
36 */ 36 */
37 if (!can_cpei_retarget() && is_cpu_cpei_target(num)) 37 if (can_cpei_retarget() || !is_cpu_cpei_target(num))
38 sysfs_cpus[num].cpu.no_control = 1; 38 sysfs_cpus[num].cpu.hotpluggable = 1;
39 map_cpu_to_node(num, node_cpuid[num].nid); 39 map_cpu_to_node(num, node_cpuid[num].nid);
40#endif 40#endif
41 41
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index b3b2e389d6b2..d6083a0936f4 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -128,13 +128,7 @@ SECTIONS
128 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) 128 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET)
129 { 129 {
130 __initcall_start = .; 130 __initcall_start = .;
131 *(.initcall1.init) 131 INITCALLS
132 *(.initcall2.init)
133 *(.initcall3.init)
134 *(.initcall4.init)
135 *(.initcall5.init)
136 *(.initcall6.init)
137 *(.initcall7.init)
138 __initcall_end = .; 132 __initcall_end = .;
139 } 133 }
140 134
diff --git a/arch/ia64/lib/checksum.c b/arch/ia64/lib/checksum.c
index beb11721d9f5..4411d9baeb21 100644
--- a/arch/ia64/lib/checksum.c
+++ b/arch/ia64/lib/checksum.c
@@ -33,32 +33,32 @@ from64to16 (unsigned long x)
33 * computes the checksum of the TCP/UDP pseudo-header 33 * computes the checksum of the TCP/UDP pseudo-header
34 * returns a 16-bit checksum, already complemented. 34 * returns a 16-bit checksum, already complemented.
35 */ 35 */
36unsigned short int 36__sum16
37csum_tcpudp_magic (unsigned long saddr, unsigned long daddr, unsigned short len, 37csum_tcpudp_magic (__be32 saddr, __be32 daddr, unsigned short len,
38 unsigned short proto, unsigned int sum) 38 unsigned short proto, __wsum sum)
39{ 39{
40 return ~from64to16(saddr + daddr + sum + ((unsigned long) ntohs(len) << 16) + 40 return (__force __sum16)~from64to16(
41 ((unsigned long) proto << 8)); 41 (__force u64)saddr + (__force u64)daddr +
42 (__force u64)sum + ((len + proto) << 8));
42} 43}
43 44
44EXPORT_SYMBOL(csum_tcpudp_magic); 45EXPORT_SYMBOL(csum_tcpudp_magic);
45 46
46unsigned int 47__wsum
47csum_tcpudp_nofold (unsigned long saddr, unsigned long daddr, unsigned short len, 48csum_tcpudp_nofold (__be32 saddr, __be32 daddr, unsigned short len,
48 unsigned short proto, unsigned int sum) 49 unsigned short proto, __wsum sum)
49{ 50{
50 unsigned long result; 51 unsigned long result;
51 52
52 result = (saddr + daddr + sum + 53 result = (__force u64)saddr + (__force u64)daddr +
53 ((unsigned long) ntohs(len) << 16) + 54 (__force u64)sum + ((len + proto) << 8);
54 ((unsigned long) proto << 8));
55 55
56 /* Fold down to 32-bits so we don't lose in the typedef-less network stack. */ 56 /* Fold down to 32-bits so we don't lose in the typedef-less network stack. */
57 /* 64 to 33 */ 57 /* 64 to 33 */
58 result = (result & 0xffffffff) + (result >> 32); 58 result = (result & 0xffffffff) + (result >> 32);
59 /* 33 to 32 */ 59 /* 33 to 32 */
60 result = (result & 0xffffffff) + (result >> 32); 60 result = (result & 0xffffffff) + (result >> 32);
61 return result; 61 return (__force __wsum)result;
62} 62}
63 63
64extern unsigned long do_csum (const unsigned char *, long); 64extern unsigned long do_csum (const unsigned char *, long);
@@ -75,16 +75,15 @@ extern unsigned long do_csum (const unsigned char *, long);
75 * 75 *
76 * it's best to have buff aligned on a 32-bit boundary 76 * it's best to have buff aligned on a 32-bit boundary
77 */ 77 */
78unsigned int 78__wsum csum_partial(const void *buff, int len, __wsum sum)
79csum_partial (const unsigned char * buff, int len, unsigned int sum)
80{ 79{
81 unsigned long result = do_csum(buff, len); 80 u64 result = do_csum(buff, len);
82 81
83 /* add in old sum, and carry.. */ 82 /* add in old sum, and carry.. */
84 result += sum; 83 result += (__force u32)sum;
85 /* 32+c bits -> 32 bits */ 84 /* 32+c bits -> 32 bits */
86 result = (result & 0xffffffff) + (result >> 32); 85 result = (result & 0xffffffff) + (result >> 32);
87 return result; 86 return (__force __wsum)result;
88} 87}
89 88
90EXPORT_SYMBOL(csum_partial); 89EXPORT_SYMBOL(csum_partial);
@@ -93,10 +92,9 @@ EXPORT_SYMBOL(csum_partial);
93 * this routine is used for miscellaneous IP-like checksums, mainly 92 * this routine is used for miscellaneous IP-like checksums, mainly
94 * in icmp.c 93 * in icmp.c
95 */ 94 */
96unsigned short 95__sum16 ip_compute_csum (const void *buff, int len)
97ip_compute_csum (unsigned char * buff, int len)
98{ 96{
99 return ~do_csum(buff,len); 97 return (__force __sum16)~do_csum(buff,len);
100} 98}
101 99
102EXPORT_SYMBOL(ip_compute_csum); 100EXPORT_SYMBOL(ip_compute_csum);
diff --git a/arch/ia64/lib/csum_partial_copy.c b/arch/ia64/lib/csum_partial_copy.c
index 36866e8a5d2b..503dfe6d1450 100644
--- a/arch/ia64/lib/csum_partial_copy.c
+++ b/arch/ia64/lib/csum_partial_copy.c
@@ -104,9 +104,9 @@ out:
104 */ 104 */
105extern unsigned long do_csum(const unsigned char *, long); 105extern unsigned long do_csum(const unsigned char *, long);
106 106
107static unsigned int 107__wsum
108do_csum_partial_copy_from_user (const unsigned char __user *src, unsigned char *dst, 108csum_partial_copy_from_user(const void __user *src, void *dst,
109 int len, unsigned int psum, int *errp) 109 int len, __wsum psum, int *errp)
110{ 110{
111 unsigned long result; 111 unsigned long result;
112 112
@@ -122,30 +122,17 @@ do_csum_partial_copy_from_user (const unsigned char __user *src, unsigned char *
122 result = do_csum(dst, len); 122 result = do_csum(dst, len);
123 123
124 /* add in old sum, and carry.. */ 124 /* add in old sum, and carry.. */
125 result += psum; 125 result += (__force u32)psum;
126 /* 32+c bits -> 32 bits */ 126 /* 32+c bits -> 32 bits */
127 result = (result & 0xffffffff) + (result >> 32); 127 result = (result & 0xffffffff) + (result >> 32);
128 return result; 128 return (__force __wsum)result;
129}
130
131unsigned int
132csum_partial_copy_from_user (const unsigned char __user *src, unsigned char *dst,
133 int len, unsigned int sum, int *errp)
134{
135 if (!access_ok(VERIFY_READ, src, len)) {
136 *errp = -EFAULT;
137 memset(dst, 0, len);
138 return sum;
139 }
140
141 return do_csum_partial_copy_from_user(src, dst, len, sum, errp);
142} 129}
143 130
144unsigned int 131__wsum
145csum_partial_copy_nocheck(const unsigned char __user *src, unsigned char *dst, 132csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
146 int len, unsigned int sum)
147{ 133{
148 return do_csum_partial_copy_from_user(src, dst, len, sum, NULL); 134 return csum_partial_copy_from_user((__force const void __user *)src,
135 dst, len, sum, NULL);
149} 136}
150 137
151EXPORT_SYMBOL(csum_partial_copy_nocheck); 138EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/ia64/lib/ip_fast_csum.S b/arch/ia64/lib/ip_fast_csum.S
index 19674ca2acfc..1f86aeb2c948 100644
--- a/arch/ia64/lib/ip_fast_csum.S
+++ b/arch/ia64/lib/ip_fast_csum.S
@@ -8,8 +8,8 @@
8 * in0: address of buffer to checksum (char *) 8 * in0: address of buffer to checksum (char *)
9 * in1: length of the buffer (int) 9 * in1: length of the buffer (int)
10 * 10 *
11 * Copyright (C) 2002 Intel Corp. 11 * Copyright (C) 2002, 2006 Intel Corp.
12 * Copyright (C) 2002 Ken Chen <kenneth.w.chen@intel.com> 12 * Copyright (C) 2002, 2006 Ken Chen <kenneth.w.chen@intel.com>
13 */ 13 */
14 14
15#include <asm/asmmacro.h> 15#include <asm/asmmacro.h>
@@ -25,6 +25,9 @@
25 25
26#define in0 r32 26#define in0 r32
27#define in1 r33 27#define in1 r33
28#define in2 r34
29#define in3 r35
30#define in4 r36
28#define ret0 r8 31#define ret0 r8
29 32
30GLOBAL_ENTRY(ip_fast_csum) 33GLOBAL_ENTRY(ip_fast_csum)
@@ -65,8 +68,9 @@ GLOBAL_ENTRY(ip_fast_csum)
65 zxt2 r20=r20 68 zxt2 r20=r20
66 ;; 69 ;;
67 add r20=ret0,r20 70 add r20=ret0,r20
71 mov r9=0xffff
68 ;; 72 ;;
69 andcm ret0=-1,r20 73 andcm ret0=r9,r20
70 .restore sp // reset frame state 74 .restore sp // reset frame state
71 br.ret.sptk.many b0 75 br.ret.sptk.many b0
72 ;; 76 ;;
@@ -88,3 +92,51 @@ GLOBAL_ENTRY(ip_fast_csum)
88 mov b0=r34 92 mov b0=r34
89 br.ret.sptk.many b0 93 br.ret.sptk.many b0
90END(ip_fast_csum) 94END(ip_fast_csum)
95
96GLOBAL_ENTRY(csum_ipv6_magic)
97 ld4 r20=[in0],4
98 ld4 r21=[in1],4
99 dep r15=in3,in2,32,16
100 ;;
101 ld4 r22=[in0],4
102 ld4 r23=[in1],4
103 mux1 r15=r15,@rev
104 ;;
105 ld4 r24=[in0],4
106 ld4 r25=[in1],4
107 shr.u r15=r15,16
108 add r16=r20,r21
109 add r17=r22,r23
110 ;;
111 ld4 r26=[in0],4
112 ld4 r27=[in1],4
113 add r18=r24,r25
114 add r8=r16,r17
115 ;;
116 add r19=r26,r27
117 add r8=r8,r18
118 ;;
119 add r8=r8,r19
120 add r15=r15,in4
121 ;;
122 add r8=r8,r15
123 ;;
124 shr.u r10=r8,32 // now fold sum into short
125 zxt4 r11=r8
126 ;;
127 add r8=r10,r11
128 ;;
129 shr.u r10=r8,16 // yeah, keep it rolling
130 zxt2 r11=r8
131 ;;
132 add r8=r10,r11
133 ;;
134 shr.u r10=r8,16 // three times lucky
135 zxt2 r11=r8
136 ;;
137 add r8=r10,r11
138 mov r9=0xffff
139 ;;
140 andcm r8=r9,r8
141 br.ret.sptk.many b0
142END(csum_ipv6_magic)
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index eee5c1cfbe32..0c7e94edc20e 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -64,14 +64,21 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr)
64 return pte; 64 return pte;
65} 65}
66 66
67int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
68{
69 return 0;
70}
71
67#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; } 72#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
68 73
69/* 74/*
70 * Don't actually need to do any preparation, but need to make sure 75 * Don't actually need to do any preparation, but need to make sure
71 * the address is in the right region. 76 * the address is in the right region.
72 */ 77 */
73int prepare_hugepage_range(unsigned long addr, unsigned long len) 78int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff)
74{ 79{
80 if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
81 return -EINVAL;
75 if (len & ~HPAGE_MASK) 82 if (len & ~HPAGE_MASK)
76 return -EINVAL; 83 return -EINVAL;
77 if (addr & ~HPAGE_MASK) 84 if (addr & ~HPAGE_MASK)
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index ff87a5cba399..56dc2024220e 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -156,7 +156,7 @@ ia64_init_addr_space (void)
156 * the problem. When the process attempts to write to the register backing store 156 * the problem. When the process attempts to write to the register backing store
157 * for the first time, it will get a SEGFAULT in this case. 157 * for the first time, it will get a SEGFAULT in this case.
158 */ 158 */
159 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 159 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
160 if (vma) { 160 if (vma) {
161 memset(vma, 0, sizeof(*vma)); 161 memset(vma, 0, sizeof(*vma));
162 vma->vm_mm = current->mm; 162 vma->vm_mm = current->mm;
@@ -175,7 +175,7 @@ ia64_init_addr_space (void)
175 175
176 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ 176 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
177 if (!(current->personality & MMAP_PAGE_ZERO)) { 177 if (!(current->personality & MMAP_PAGE_ZERO)) {
178 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 178 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
179 if (vma) { 179 if (vma) {
180 memset(vma, 0, sizeof(*vma)); 180 memset(vma, 0, sizeof(*vma));
181 vma->vm_mm = current->mm; 181 vma->vm_mm = current->mm;
diff --git a/arch/ia64/pci/Makefile b/arch/ia64/pci/Makefile
index e66889e6922a..fb14dc520d2d 100644
--- a/arch/ia64/pci/Makefile
+++ b/arch/ia64/pci/Makefile
@@ -1,4 +1,4 @@
1# 1#
2# Makefile for the ia64-specific parts of the pci bus 2# Makefile for the ia64-specific parts of the pci bus
3# 3#
4obj-y := pci.o 4obj-y := pci.o fixup.o
diff --git a/arch/ia64/pci/fixup.c b/arch/ia64/pci/fixup.c
new file mode 100644
index 000000000000..245dc1fedc24
--- /dev/null
+++ b/arch/ia64/pci/fixup.c
@@ -0,0 +1,69 @@
1/*
2 * Exceptions for specific devices. Usually work-arounds for fatal design flaws.
3 * Derived from fixup.c of i386 tree.
4 */
5
6#include <linux/pci.h>
7#include <linux/init.h>
8
9#include <asm/machvec.h>
10
11/*
12 * Fixup to mark boot BIOS video selected by BIOS before it changes
13 *
14 * From information provided by "Jon Smirl" <jonsmirl@gmail.com>
15 *
16 * The standard boot ROM sequence for an x86 machine uses the BIOS
17 * to select an initial video card for boot display. This boot video
18 * card will have it's BIOS copied to C0000 in system RAM.
19 * IORESOURCE_ROM_SHADOW is used to associate the boot video
20 * card with this copy. On laptops this copy has to be used since
21 * the main ROM may be compressed or combined with another image.
22 * See pci_map_rom() for use of this flag. IORESOURCE_ROM_SHADOW
23 * is marked here since the boot video device will be the only enabled
24 * video device at this point.
25 */
26
27static void __devinit pci_fixup_video(struct pci_dev *pdev)
28{
29 struct pci_dev *bridge;
30 struct pci_bus *bus;
31 u16 config;
32
33 if ((strcmp(platform_name, "dig") != 0)
34 && (strcmp(platform_name, "hpzx1") != 0))
35 return;
36 /* Maybe, this machine supports legacy memory map. */
37
38 if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
39 return;
40
41 /* Is VGA routed to us? */
42 bus = pdev->bus;
43 while (bus) {
44 bridge = bus->self;
45
46 /*
47 * From information provided by
48 * "David Miller" <davem@davemloft.net>
49 * The bridge control register is valid for PCI header
50 * type BRIDGE, or CARDBUS. Host to PCI controllers use
51 * PCI header type NORMAL.
52 */
53 if (bridge
54 &&((bridge->hdr_type == PCI_HEADER_TYPE_BRIDGE)
55 ||(bridge->hdr_type == PCI_HEADER_TYPE_CARDBUS))) {
56 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
57 &config);
58 if (!(config & PCI_BRIDGE_CTL_VGA))
59 return;
60 }
61 bus = bus->parent;
62 }
63 pci_read_config_word(pdev, PCI_COMMAND, &config);
64 if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
65 pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW;
66 printk(KERN_DEBUG "Boot video device is %s\n", pci_name(pdev));
67 }
68}
69DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_video);
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index b30be7c48ba8..474d179966dc 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -125,11 +125,10 @@ alloc_pci_controller (int seg)
125{ 125{
126 struct pci_controller *controller; 126 struct pci_controller *controller;
127 127
128 controller = kmalloc(sizeof(*controller), GFP_KERNEL); 128 controller = kzalloc(sizeof(*controller), GFP_KERNEL);
129 if (!controller) 129 if (!controller)
130 return NULL; 130 return NULL;
131 131
132 memset(controller, 0, sizeof(*controller));
133 controller->segment = seg; 132 controller->segment = seg;
134 controller->node = -1; 133 controller->node = -1;
135 return controller; 134 return controller;
@@ -469,10 +468,11 @@ pcibios_fixup_resources(struct pci_dev *dev, int start, int limit)
469 } 468 }
470} 469}
471 470
472static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev) 471void __devinit pcibios_fixup_device_resources(struct pci_dev *dev)
473{ 472{
474 pcibios_fixup_resources(dev, 0, PCI_BRIDGE_RESOURCES); 473 pcibios_fixup_resources(dev, 0, PCI_BRIDGE_RESOURCES);
475} 474}
475EXPORT_SYMBOL_GPL(pcibios_fixup_device_resources);
476 476
477static void __devinit pcibios_fixup_bridge_resources(struct pci_dev *dev) 477static void __devinit pcibios_fixup_bridge_resources(struct pci_dev *dev)
478{ 478{
@@ -493,6 +493,7 @@ pcibios_fixup_bus (struct pci_bus *b)
493 } 493 }
494 list_for_each_entry(dev, &b->devices, bus_list) 494 list_for_each_entry(dev, &b->devices, bus_list)
495 pcibios_fixup_device_resources(dev); 495 pcibios_fixup_device_resources(dev);
496 platform_pci_fixup_bus(b);
496 497
497 return; 498 return;
498} 499}
@@ -562,8 +563,8 @@ pcibios_enable_device (struct pci_dev *dev, int mask)
562void 563void
563pcibios_disable_device (struct pci_dev *dev) 564pcibios_disable_device (struct pci_dev *dev)
564{ 565{
565 if (dev->is_enabled) 566 BUG_ON(atomic_read(&dev->enable_cnt));
566 acpi_pci_irq_disable(dev); 567 acpi_pci_irq_disable(dev);
567} 568}
568 569
569void 570void
@@ -738,75 +739,44 @@ int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
738 return ret; 739 return ret;
739} 740}
740 741
742/* It's defined in drivers/pci/pci.c */
743extern u8 pci_cache_line_size;
744
741/** 745/**
742 * pci_cacheline_size - determine cacheline size for PCI devices 746 * set_pci_cacheline_size - determine cacheline size for PCI devices
743 * @dev: void
744 * 747 *
745 * We want to use the line-size of the outer-most cache. We assume 748 * We want to use the line-size of the outer-most cache. We assume
746 * that this line-size is the same for all CPUs. 749 * that this line-size is the same for all CPUs.
747 * 750 *
748 * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info(). 751 * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
749 *
750 * RETURNS: An appropriate -ERRNO error value on eror, or zero for success.
751 */ 752 */
752static unsigned long 753static void __init set_pci_cacheline_size(void)
753pci_cacheline_size (void)
754{ 754{
755 u64 levels, unique_caches; 755 u64 levels, unique_caches;
756 s64 status; 756 s64 status;
757 pal_cache_config_info_t cci; 757 pal_cache_config_info_t cci;
758 static u8 cacheline_size;
759
760 if (cacheline_size)
761 return cacheline_size;
762 758
763 status = ia64_pal_cache_summary(&levels, &unique_caches); 759 status = ia64_pal_cache_summary(&levels, &unique_caches);
764 if (status != 0) { 760 if (status != 0) {
765 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n", 761 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed "
766 __FUNCTION__, status); 762 "(status=%ld)\n", __FUNCTION__, status);
767 return SMP_CACHE_BYTES; 763 return;
768 } 764 }
769 765
770 status = ia64_pal_cache_config_info(levels - 1, /* cache_type (data_or_unified)= */ 2, 766 status = ia64_pal_cache_config_info(levels - 1,
771 &cci); 767 /* cache_type (data_or_unified)= */ 2, &cci);
772 if (status != 0) { 768 if (status != 0) {
773 printk(KERN_ERR "%s: ia64_pal_cache_config_info() failed (status=%ld)\n", 769 printk(KERN_ERR "%s: ia64_pal_cache_config_info() failed "
774 __FUNCTION__, status); 770 "(status=%ld)\n", __FUNCTION__, status);
775 return SMP_CACHE_BYTES; 771 return;
776 } 772 }
777 cacheline_size = 1 << cci.pcci_line_size; 773 pci_cache_line_size = (1 << cci.pcci_line_size) / 4;
778 return cacheline_size;
779} 774}
780 775
781/** 776static int __init pcibios_init(void)
782 * pcibios_prep_mwi - helper function for drivers/pci/pci.c:pci_set_mwi() 777{
783 * @dev: the PCI device for which MWI is enabled 778 set_pci_cacheline_size();
784 * 779 return 0;
785 * For ia64, we can get the cacheline sizes from PAL.
786 *
787 * RETURNS: An appropriate -ERRNO error value on eror, or zero for success.
788 */
789int
790pcibios_prep_mwi (struct pci_dev *dev)
791{
792 unsigned long desired_linesize, current_linesize;
793 int rc = 0;
794 u8 pci_linesize;
795
796 desired_linesize = pci_cacheline_size();
797
798 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &pci_linesize);
799 current_linesize = 4 * pci_linesize;
800 if (desired_linesize != current_linesize) {
801 printk(KERN_WARNING "PCI: slot %s has incorrect PCI cache line size of %lu bytes,",
802 pci_name(dev), current_linesize);
803 if (current_linesize > desired_linesize) {
804 printk(" expected %lu bytes instead\n", desired_linesize);
805 rc = -EINVAL;
806 } else {
807 printk(" correcting to %lu\n", desired_linesize);
808 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, desired_linesize / 4);
809 }
810 }
811 return rc;
812} 780}
781
782subsys_initcall(pcibios_init);
diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile
index 2d78f34dd763..0a59371d3475 100644
--- a/arch/ia64/sn/kernel/Makefile
+++ b/arch/ia64/sn/kernel/Makefile
@@ -4,13 +4,14 @@
4# License. See the file "COPYING" in the main directory of this archive 4# License. See the file "COPYING" in the main directory of this archive
5# for more details. 5# for more details.
6# 6#
7# Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All Rights Reserved. 7# Copyright (C) 1999,2001-2006 Silicon Graphics, Inc. All Rights Reserved.
8# 8#
9 9
10CPPFLAGS += -I$(srctree)/arch/ia64/sn/include 10CPPFLAGS += -I$(srctree)/arch/ia64/sn/include
11 11
12obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \ 12obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \
13 huberror.o io_init.o iomv.o klconflib.o pio_phys.o \ 13 huberror.o io_acpi_init.o io_common.o \
14 io_init.o iomv.o klconflib.o pio_phys.o \
14 sn2/ 15 sn2/
15obj-$(CONFIG_IA64_GENERIC) += machvec.o 16obj-$(CONFIG_IA64_GENERIC) += machvec.o
16obj-$(CONFIG_SGI_TIOCX) += tiocx.o 17obj-$(CONFIG_SGI_TIOCX) += tiocx.o
diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c
index 7f73ad4408aa..ff1c55601178 100644
--- a/arch/ia64/sn/kernel/bte.c
+++ b/arch/ia64/sn/kernel/bte.c
@@ -381,14 +381,13 @@ bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
381 * bcopy to the destination. 381 * bcopy to the destination.
382 */ 382 */
383 383
384 /* Add the leader from source */
385 headBteLen = len + (src & L1_CACHE_MASK);
386 /* Add the trailing bytes from footer. */
387 headBteLen += L1_CACHE_BYTES - (headBteLen & L1_CACHE_MASK);
388 headBteSource = src & ~L1_CACHE_MASK;
389 headBcopySrcOffset = src & L1_CACHE_MASK; 384 headBcopySrcOffset = src & L1_CACHE_MASK;
390 headBcopyDest = dest; 385 headBcopyDest = dest;
391 headBcopyLen = len; 386 headBcopyLen = len;
387
388 headBteSource = src - headBcopySrcOffset;
389 /* Add the leading and trailing bytes from source */
390 headBteLen = L1_CACHE_ALIGN(len + headBcopySrcOffset);
392 } 391 }
393 392
394 if (headBcopyLen > 0) { 393 if (headBcopyLen > 0) {
diff --git a/arch/ia64/sn/kernel/io_acpi_init.c b/arch/ia64/sn/kernel/io_acpi_init.c
new file mode 100644
index 000000000000..99d7f278612a
--- /dev/null
+++ b/arch/ia64/sn/kernel/io_acpi_init.c
@@ -0,0 +1,231 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2006 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#include <asm/sn/types.h>
10#include <asm/sn/addrs.h>
11#include <asm/sn/pcidev.h>
12#include <asm/sn/pcibus_provider_defs.h>
13#include <asm/sn/sn_sal.h>
14#include "xtalk/hubdev.h"
15#include <linux/acpi.h>
16
17
18/*
19 * The code in this file will only be executed when running with
20 * a PROM that has ACPI IO support. (i.e., SN_ACPI_BASE_SUPPORT() == 1)
21 */
22
23
24/*
25 * This value must match the UUID the PROM uses
26 * (io/acpi/defblk.c) when building a vendor descriptor.
27 */
28struct acpi_vendor_uuid sn_uuid = {
29 .subtype = 0,
30 .data = { 0x2c, 0xc6, 0xa6, 0xfe, 0x9c, 0x44, 0xda, 0x11,
31 0xa2, 0x7c, 0x08, 0x00, 0x69, 0x13, 0xea, 0x51 },
32};
33
34/*
35 * Perform the early IO init in PROM.
36 */
37static s64
38sal_ioif_init(u64 *result)
39{
40 struct ia64_sal_retval isrv = {0,0,0,0};
41
42 SAL_CALL_NOLOCK(isrv,
43 SN_SAL_IOIF_INIT, 0, 0, 0, 0, 0, 0, 0);
44 *result = isrv.v0;
45 return isrv.status;
46}
47
48/*
49 * sn_hubdev_add - The 'add' function of the acpi_sn_hubdev_driver.
50 * Called for every "SGIHUB" or "SGITIO" device defined
51 * in the ACPI namespace.
52 */
53static int __init
54sn_hubdev_add(struct acpi_device *device)
55{
56 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
57 u64 addr;
58 struct hubdev_info *hubdev;
59 struct hubdev_info *hubdev_ptr;
60 int i;
61 u64 nasid;
62 struct acpi_resource *resource;
63 int ret = 0;
64 acpi_status status;
65 struct acpi_resource_vendor_typed *vendor;
66 extern void sn_common_hubdev_init(struct hubdev_info *);
67
68 status = acpi_get_vendor_resource(device->handle, METHOD_NAME__CRS,
69 &sn_uuid, &buffer);
70 if (ACPI_FAILURE(status)) {
71 printk(KERN_ERR
72 "sn_hubdev_add: acpi_get_vendor_resource() failed: %d\n",
73 status);
74 return 1;
75 }
76
77 resource = buffer.pointer;
78 vendor = &resource->data.vendor_typed;
79 if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) !=
80 sizeof(struct hubdev_info *)) {
81 printk(KERN_ERR
82 "sn_hubdev_add: Invalid vendor data length: %d\n",
83 vendor->byte_length);
84 ret = 1;
85 goto exit;
86 }
87
88 memcpy(&addr, vendor->byte_data, sizeof(struct hubdev_info *));
89 hubdev_ptr = __va((struct hubdev_info *) addr);
90
91 nasid = hubdev_ptr->hdi_nasid;
92 i = nasid_to_cnodeid(nasid);
93 hubdev = (struct hubdev_info *)(NODEPDA(i)->pdinfo);
94 *hubdev = *hubdev_ptr;
95 sn_common_hubdev_init(hubdev);
96
97exit:
98 kfree(buffer.pointer);
99 return ret;
100}
101
102/*
103 * sn_get_bussoft_ptr() - The pcibus_bussoft pointer is found in
104 * the ACPI Vendor resource for this bus.
105 */
106static struct pcibus_bussoft *
107sn_get_bussoft_ptr(struct pci_bus *bus)
108{
109 u64 addr;
110 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
111 acpi_handle handle;
112 struct pcibus_bussoft *prom_bussoft_ptr;
113 struct acpi_resource *resource;
114 acpi_status status;
115 struct acpi_resource_vendor_typed *vendor;
116
117
118 handle = PCI_CONTROLLER(bus)->acpi_handle;
119 status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS,
120 &sn_uuid, &buffer);
121 if (ACPI_FAILURE(status)) {
122 printk(KERN_ERR "get_acpi_pcibus_ptr: "
123 "get_acpi_bussoft_info() failed: %d\n",
124 status);
125 return NULL;
126 }
127 resource = buffer.pointer;
128 vendor = &resource->data.vendor_typed;
129
130 if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) !=
131 sizeof(struct pcibus_bussoft *)) {
132 printk(KERN_ERR
133 "get_acpi_bussoft_ptr: Invalid vendor data "
134 "length %d\n", vendor->byte_length);
135 kfree(buffer.pointer);
136 return NULL;
137 }
138 memcpy(&addr, vendor->byte_data, sizeof(struct pcibus_bussoft *));
139 prom_bussoft_ptr = __va((struct pcibus_bussoft *) addr);
140 kfree(buffer.pointer);
141
142 return prom_bussoft_ptr;
143}
144
145/*
146 * sn_acpi_bus_fixup
147 */
148void
149sn_acpi_bus_fixup(struct pci_bus *bus)
150{
151 struct pci_dev *pci_dev = NULL;
152 struct pcibus_bussoft *prom_bussoft_ptr;
153 extern void sn_common_bus_fixup(struct pci_bus *,
154 struct pcibus_bussoft *);
155
156 if (!bus->parent) { /* If root bus */
157 prom_bussoft_ptr = sn_get_bussoft_ptr(bus);
158 if (prom_bussoft_ptr == NULL) {
159 printk(KERN_ERR
160 "sn_pci_fixup_bus: 0x%04x:0x%02x Unable to "
161 "obtain prom_bussoft_ptr\n",
162 pci_domain_nr(bus), bus->number);
163 return;
164 }
165 sn_common_bus_fixup(bus, prom_bussoft_ptr);
166 }
167 list_for_each_entry(pci_dev, &bus->devices, bus_list) {
168 sn_pci_fixup_slot(pci_dev);
169 }
170}
171
172/*
173 * sn_acpi_slot_fixup - Perform any SN specific slot fixup.
174 * At present there does not appear to be
175 * any generic way to handle a ROM image
176 * that has been shadowed by the PROM, so
177 * we pass a pointer to it within the
178 * pcidev_info structure.
179 */
180
181void
182sn_acpi_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info)
183{
184 void __iomem *addr;
185 size_t size;
186
187 if (pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE]) {
188 /*
189 * A valid ROM image exists and has been shadowed by the
190 * PROM. Setup the pci_dev ROM resource to point to
191 * the shadowed copy.
192 */
193 size = dev->resource[PCI_ROM_RESOURCE].end -
194 dev->resource[PCI_ROM_RESOURCE].start;
195 addr =
196 ioremap(pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE],
197 size);
198 dev->resource[PCI_ROM_RESOURCE].start = (unsigned long) addr;
199 dev->resource[PCI_ROM_RESOURCE].end =
200 (unsigned long) addr + size;
201 dev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_BIOS_COPY;
202 }
203}
204
205static struct acpi_driver acpi_sn_hubdev_driver = {
206 .name = "SGI HUBDEV Driver",
207 .ids = "SGIHUB,SGITIO",
208 .ops = {
209 .add = sn_hubdev_add,
210 },
211};
212
213
214/*
215 * sn_io_acpi_init - PROM has ACPI support for IO, defining at a minimum the
216 * nodes and root buses in the DSDT. As a result, bus scanning
217 * will be initiated by the Linux ACPI code.
218 */
219
220void __init
221sn_io_acpi_init(void)
222{
223 u64 result;
224 s64 status;
225
226 acpi_bus_register_driver(&acpi_sn_hubdev_driver);
227 status = sal_ioif_init(&result);
228 if (status || result)
229 panic("sal_ioif_init failed: [%lx] %s\n",
230 status, ia64_sal_strerror(status));
231}
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c
new file mode 100644
index 000000000000..d4dd8f4b6b8d
--- /dev/null
+++ b/arch/ia64/sn/kernel/io_common.c
@@ -0,0 +1,613 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2006 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#include <linux/bootmem.h>
10#include <asm/sn/types.h>
11#include <asm/sn/addrs.h>
12#include <asm/sn/sn_feature_sets.h>
13#include <asm/sn/geo.h>
14#include <asm/sn/io.h>
15#include <asm/sn/l1.h>
16#include <asm/sn/module.h>
17#include <asm/sn/pcibr_provider.h>
18#include <asm/sn/pcibus_provider_defs.h>
19#include <asm/sn/pcidev.h>
20#include <asm/sn/simulator.h>
21#include <asm/sn/sn_sal.h>
22#include <asm/sn/tioca_provider.h>
23#include <asm/sn/tioce_provider.h>
24#include "xtalk/hubdev.h"
25#include "xtalk/xwidgetdev.h"
26#include <linux/acpi.h>
27#include <asm/sn/sn2/sn_hwperf.h>
28#include <asm/sn/acpi.h>
29
30extern void sn_init_cpei_timer(void);
31extern void register_sn_procfs(void);
32extern void sn_acpi_bus_fixup(struct pci_bus *);
33extern void sn_bus_fixup(struct pci_bus *);
34extern void sn_acpi_slot_fixup(struct pci_dev *, struct pcidev_info *);
35extern void sn_more_slot_fixup(struct pci_dev *, struct pcidev_info *);
36extern void sn_legacy_pci_window_fixup(struct pci_controller *, u64, u64);
37extern void sn_io_acpi_init(void);
38extern void sn_io_init(void);
39
40
41static struct list_head sn_sysdata_list;
42
43/* sysdata list struct */
44struct sysdata_el {
45 struct list_head entry;
46 void *sysdata;
47};
48
49int sn_ioif_inited; /* SN I/O infrastructure initialized? */
50
51struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */
52
53/*
54 * Hooks and struct for unsupported pci providers
55 */
56
57static dma_addr_t
58sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size, int type)
59{
60 return 0;
61}
62
63static void
64sn_default_pci_unmap(struct pci_dev *pdev, dma_addr_t addr, int direction)
65{
66 return;
67}
68
69static void *
70sn_default_pci_bus_fixup(struct pcibus_bussoft *soft, struct pci_controller *controller)
71{
72 return NULL;
73}
74
75static struct sn_pcibus_provider sn_pci_default_provider = {
76 .dma_map = sn_default_pci_map,
77 .dma_map_consistent = sn_default_pci_map,
78 .dma_unmap = sn_default_pci_unmap,
79 .bus_fixup = sn_default_pci_bus_fixup,
80};
81
82/*
83 * Retrieve the DMA Flush List given nasid, widget, and device.
84 * This list is needed to implement the WAR - Flush DMA data on PIO Reads.
85 */
86static inline u64
87sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
88 u64 address)
89{
90 struct ia64_sal_retval ret_stuff;
91 ret_stuff.status = 0;
92 ret_stuff.v0 = 0;
93
94 SAL_CALL_NOLOCK(ret_stuff,
95 (u64) SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST,
96 (u64) nasid, (u64) widget_num,
97 (u64) device_num, (u64) address, 0, 0, 0);
98 return ret_stuff.status;
99}
100
101/*
102 * Retrieve the pci device information given the bus and device|function number.
103 */
104static inline u64
105sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
106 u64 sn_irq_info)
107{
108 struct ia64_sal_retval ret_stuff;
109 ret_stuff.status = 0;
110 ret_stuff.v0 = 0;
111
112 SAL_CALL_NOLOCK(ret_stuff,
113 (u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
114 (u64) segment, (u64) bus_number, (u64) devfn,
115 (u64) pci_dev,
116 sn_irq_info, 0, 0);
117 return ret_stuff.v0;
118}
119
120/*
121 * sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified
122 * device.
123 */
124inline struct pcidev_info *
125sn_pcidev_info_get(struct pci_dev *dev)
126{
127 struct pcidev_info *pcidev;
128
129 list_for_each_entry(pcidev,
130 &(SN_PLATFORM_DATA(dev)->pcidev_info), pdi_list) {
131 if (pcidev->pdi_linux_pcidev == dev)
132 return pcidev;
133 }
134 return NULL;
135}
136
137/* Older PROM flush WAR
138 *
139 * 01/16/06 -- This war will be in place until a new official PROM is released.
140 * Additionally note that the struct sn_flush_device_war also has to be
141 * removed from arch/ia64/sn/include/xtalk/hubdev.h
142 */
143static u8 war_implemented = 0;
144
145static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device,
146 struct sn_flush_device_common *common)
147{
148 struct sn_flush_device_war *war_list;
149 struct sn_flush_device_war *dev_entry;
150 struct ia64_sal_retval isrv = {0,0,0,0};
151
152 if (!war_implemented) {
153 printk(KERN_WARNING "PROM version < 4.50 -- implementing old "
154 "PROM flush WAR\n");
155 war_implemented = 1;
156 }
157
158 war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL);
159 if (!war_list)
160 BUG();
161
162 SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,
163 nasid, widget, __pa(war_list), 0, 0, 0 ,0);
164 if (isrv.status)
165 panic("sn_device_fixup_war failed: %s\n",
166 ia64_sal_strerror(isrv.status));
167
168 dev_entry = war_list + device;
169 memcpy(common,dev_entry, sizeof(*common));
170 kfree(war_list);
171
172 return isrv.status;
173}
174
175/*
176 * sn_common_hubdev_init() - This routine is called to initialize the HUB data
177 * structure for each node in the system.
178 */
179void __init
180sn_common_hubdev_init(struct hubdev_info *hubdev)
181{
182
183 struct sn_flush_device_kernel *sn_flush_device_kernel;
184 struct sn_flush_device_kernel *dev_entry;
185 s64 status;
186 int widget, device, size;
187
188 /* Attach the error interrupt handlers */
189 if (hubdev->hdi_nasid & 1) /* If TIO */
190 ice_error_init(hubdev);
191 else
192 hub_error_init(hubdev);
193
194 for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++)
195 hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev;
196
197 if (!hubdev->hdi_flush_nasid_list.widget_p)
198 return;
199
200 size = (HUB_WIDGET_ID_MAX + 1) *
201 sizeof(struct sn_flush_device_kernel *);
202 hubdev->hdi_flush_nasid_list.widget_p =
203 kzalloc(size, GFP_KERNEL);
204 if (!hubdev->hdi_flush_nasid_list.widget_p)
205 BUG();
206
207 for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) {
208 size = DEV_PER_WIDGET *
209 sizeof(struct sn_flush_device_kernel);
210 sn_flush_device_kernel = kzalloc(size, GFP_KERNEL);
211 if (!sn_flush_device_kernel)
212 BUG();
213
214 dev_entry = sn_flush_device_kernel;
215 for (device = 0; device < DEV_PER_WIDGET;
216 device++, dev_entry++) {
217 size = sizeof(struct sn_flush_device_common);
218 dev_entry->common = kzalloc(size, GFP_KERNEL);
219 if (!dev_entry->common)
220 BUG();
221 if (sn_prom_feature_available(PRF_DEVICE_FLUSH_LIST))
222 status = sal_get_device_dmaflush_list(
223 hubdev->hdi_nasid, widget, device,
224 (u64)(dev_entry->common));
225 else
226 status = sn_device_fixup_war(hubdev->hdi_nasid,
227 widget, device,
228 dev_entry->common);
229 if (status != SALRET_OK)
230 panic("SAL call failed: %s\n",
231 ia64_sal_strerror(status));
232
233 spin_lock_init(&dev_entry->sfdl_flush_lock);
234 }
235
236 if (sn_flush_device_kernel)
237 hubdev->hdi_flush_nasid_list.widget_p[widget] =
238 sn_flush_device_kernel;
239 }
240}
241
242void sn_pci_unfixup_slot(struct pci_dev *dev)
243{
244 struct pci_dev *host_pci_dev = SN_PCIDEV_INFO(dev)->host_pci_dev;
245
246 sn_irq_unfixup(dev);
247 pci_dev_put(host_pci_dev);
248 pci_dev_put(dev);
249}
250
251/*
252 * sn_pci_fixup_slot() - This routine sets up a slot's resources consistent
253 * with the Linux PCI abstraction layer. Resources
254 * acquired from our PCI provider include PIO maps
255 * to BAR space and interrupt objects.
256 */
257void sn_pci_fixup_slot(struct pci_dev *dev)
258{
259 int segment = pci_domain_nr(dev->bus);
260 int status = 0;
261 struct pcibus_bussoft *bs;
262 struct pci_bus *host_pci_bus;
263 struct pci_dev *host_pci_dev;
264 struct pcidev_info *pcidev_info;
265 struct sn_irq_info *sn_irq_info;
266 unsigned int bus_no, devfn;
267
268 pci_dev_get(dev); /* for the sysdata pointer */
269 pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
270 if (!pcidev_info)
271 BUG(); /* Cannot afford to run out of memory */
272
273 sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
274 if (!sn_irq_info)
275 BUG(); /* Cannot afford to run out of memory */
276
277 /* Call to retrieve pci device information needed by kernel. */
278 status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number,
279 dev->devfn,
280 (u64) __pa(pcidev_info),
281 (u64) __pa(sn_irq_info));
282 if (status)
283 BUG(); /* Cannot get platform pci device information */
284
285 /* Add pcidev_info to list in pci_controller.platform_data */
286 list_add_tail(&pcidev_info->pdi_list,
287 &(SN_PLATFORM_DATA(dev->bus)->pcidev_info));
288
289 if (SN_ACPI_BASE_SUPPORT())
290 sn_acpi_slot_fixup(dev, pcidev_info);
291 else
292 sn_more_slot_fixup(dev, pcidev_info);
293 /*
294 * Using the PROMs values for the PCI host bus, get the Linux
295 * PCI host_pci_dev struct and set up host bus linkages
296 */
297
298 bus_no = (pcidev_info->pdi_slot_host_handle >> 32) & 0xff;
299 devfn = pcidev_info->pdi_slot_host_handle & 0xffffffff;
300 host_pci_bus = pci_find_bus(segment, bus_no);
301 host_pci_dev = pci_get_slot(host_pci_bus, devfn);
302
303 pcidev_info->host_pci_dev = host_pci_dev;
304 pcidev_info->pdi_linux_pcidev = dev;
305 pcidev_info->pdi_host_pcidev_info = SN_PCIDEV_INFO(host_pci_dev);
306 bs = SN_PCIBUS_BUSSOFT(dev->bus);
307 pcidev_info->pdi_pcibus_info = bs;
308
309 if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) {
310 SN_PCIDEV_BUSPROVIDER(dev) = sn_pci_provider[bs->bs_asic_type];
311 } else {
312 SN_PCIDEV_BUSPROVIDER(dev) = &sn_pci_default_provider;
313 }
314
315 /* Only set up IRQ stuff if this device has a host bus context */
316 if (bs && sn_irq_info->irq_irq) {
317 pcidev_info->pdi_sn_irq_info = sn_irq_info;
318 dev->irq = pcidev_info->pdi_sn_irq_info->irq_irq;
319 sn_irq_fixup(dev, sn_irq_info);
320 } else {
321 pcidev_info->pdi_sn_irq_info = NULL;
322 kfree(sn_irq_info);
323 }
324}
325
326/*
327 * sn_common_bus_fixup - Perform platform specific bus fixup.
328 * Execute the ASIC specific fixup routine
329 * for this bus.
330 */
331void
332sn_common_bus_fixup(struct pci_bus *bus,
333 struct pcibus_bussoft *prom_bussoft_ptr)
334{
335 int cnode;
336 struct pci_controller *controller;
337 struct hubdev_info *hubdev_info;
338 int nasid;
339 void *provider_soft;
340 struct sn_pcibus_provider *provider;
341 struct sn_platform_data *sn_platform_data;
342
343 controller = PCI_CONTROLLER(bus);
344 /*
345 * Per-provider fixup. Copies the bus soft structure from prom
346 * to local area and links SN_PCIBUS_BUSSOFT().
347 */
348
349 if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES) {
350 printk(KERN_WARNING "sn_common_bus_fixup: Unsupported asic type, %d",
351 prom_bussoft_ptr->bs_asic_type);
352 return;
353 }
354
355 if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB)
356 return; /* no further fixup necessary */
357
358 provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type];
359 if (provider == NULL)
360 panic("sn_common_bus_fixup: No provider registered for this asic type, %d",
361 prom_bussoft_ptr->bs_asic_type);
362
363 if (provider->bus_fixup)
364 provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr,
365 controller);
366 else
367 provider_soft = NULL;
368
369 /*
370 * Generic bus fixup goes here. Don't reference prom_bussoft_ptr
371 * after this point.
372 */
373 controller->platform_data = kzalloc(sizeof(struct sn_platform_data),
374 GFP_KERNEL);
375 if (controller->platform_data == NULL)
376 BUG();
377 sn_platform_data =
378 (struct sn_platform_data *) controller->platform_data;
379 sn_platform_data->provider_soft = provider_soft;
380 INIT_LIST_HEAD(&((struct sn_platform_data *)
381 controller->platform_data)->pcidev_info);
382 nasid = NASID_GET(SN_PCIBUS_BUSSOFT(bus)->bs_base);
383 cnode = nasid_to_cnodeid(nasid);
384 hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
385 SN_PCIBUS_BUSSOFT(bus)->bs_xwidget_info =
386 &(hubdev_info->hdi_xwidget_info[SN_PCIBUS_BUSSOFT(bus)->bs_xid]);
387
388 /*
389 * If the node information we obtained during the fixup phase is
390 * invalid then set controller->node to -1 (undetermined)
391 */
392 if (controller->node >= num_online_nodes()) {
393 struct pcibus_bussoft *b = SN_PCIBUS_BUSSOFT(bus);
394
395 printk(KERN_WARNING "Device ASIC=%u XID=%u PBUSNUM=%u"
396 "L_IO=%lx L_MEM=%lx BASE=%lx\n",
397 b->bs_asic_type, b->bs_xid, b->bs_persist_busnum,
398 b->bs_legacy_io, b->bs_legacy_mem, b->bs_base);
399 printk(KERN_WARNING "on node %d but only %d nodes online."
400 "Association set to undetermined.\n",
401 controller->node, num_online_nodes());
402 controller->node = -1;
403 }
404}
405
406void sn_bus_store_sysdata(struct pci_dev *dev)
407{
408 struct sysdata_el *element;
409
410 element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL);
411 if (!element) {
412 dev_dbg(dev, "%s: out of memory!\n", __FUNCTION__);
413 return;
414 }
415 element->sysdata = SN_PCIDEV_INFO(dev);
416 list_add(&element->entry, &sn_sysdata_list);
417}
418
419void sn_bus_free_sysdata(void)
420{
421 struct sysdata_el *element;
422 struct list_head *list, *safe;
423
424 list_for_each_safe(list, safe, &sn_sysdata_list) {
425 element = list_entry(list, struct sysdata_el, entry);
426 list_del(&element->entry);
427 list_del(&(((struct pcidev_info *)
428 (element->sysdata))->pdi_list));
429 kfree(element->sysdata);
430 kfree(element);
431 }
432 return;
433}
434
435/*
436 * hubdev_init_node() - Creates the HUB data structure and link them to it's
437 * own NODE specific data area.
438 */
439void hubdev_init_node(nodepda_t * npda, cnodeid_t node)
440{
441 struct hubdev_info *hubdev_info;
442 int size;
443 pg_data_t *pg;
444
445 size = sizeof(struct hubdev_info);
446
447 if (node >= num_online_nodes()) /* Headless/memless IO nodes */
448 pg = NODE_DATA(0);
449 else
450 pg = NODE_DATA(node);
451
452 hubdev_info = (struct hubdev_info *)alloc_bootmem_node(pg, size);
453
454 npda->pdinfo = (void *)hubdev_info;
455}
456
457geoid_t
458cnodeid_get_geoid(cnodeid_t cnode)
459{
460 struct hubdev_info *hubdev;
461
462 hubdev = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
463 return hubdev->hdi_geoid;
464}
465
466void sn_generate_path(struct pci_bus *pci_bus, char *address)
467{
468 nasid_t nasid;
469 cnodeid_t cnode;
470 geoid_t geoid;
471 moduleid_t moduleid;
472 u16 bricktype;
473
474 nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base);
475 cnode = nasid_to_cnodeid(nasid);
476 geoid = cnodeid_get_geoid(cnode);
477 moduleid = geo_module(geoid);
478
479 sprintf(address, "module_%c%c%c%c%.2d",
480 '0'+RACK_GET_CLASS(MODULE_GET_RACK(moduleid)),
481 '0'+RACK_GET_GROUP(MODULE_GET_RACK(moduleid)),
482 '0'+RACK_GET_NUM(MODULE_GET_RACK(moduleid)),
483 MODULE_GET_BTCHAR(moduleid), MODULE_GET_BPOS(moduleid));
484
485 /* Tollhouse requires slot id to be displayed */
486 bricktype = MODULE_GET_BTYPE(moduleid);
487 if ((bricktype == L1_BRICKTYPE_191010) ||
488 (bricktype == L1_BRICKTYPE_1932))
489 sprintf(address, "%s^%d", address, geo_slot(geoid));
490}
491
492/*
493 * sn_pci_fixup_bus() - Perform SN specific setup of software structs
494 * (pcibus_bussoft, pcidev_info) and hardware
495 * registers, for the specified bus and devices under it.
496 */
497void __devinit
498sn_pci_fixup_bus(struct pci_bus *bus)
499{
500
501 if (SN_ACPI_BASE_SUPPORT())
502 sn_acpi_bus_fixup(bus);
503 else
504 sn_bus_fixup(bus);
505}
506
507/*
508 * sn_io_early_init - Perform early IO (and some non-IO) initialization.
509 * In particular, setup the sn_pci_provider[] array.
510 * This needs to be done prior to any bus scanning
511 * (acpi_scan_init()) in the ACPI case, as the SN
512 * bus fixup code will reference the array.
513 */
514static int __init
515sn_io_early_init(void)
516{
517 int i;
518
519 if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
520 return 0;
521
522 /*
523 * prime sn_pci_provider[]. Individial provider init routines will
524 * override their respective default entries.
525 */
526
527 for (i = 0; i < PCIIO_ASIC_MAX_TYPES; i++)
528 sn_pci_provider[i] = &sn_pci_default_provider;
529
530 pcibr_init_provider();
531 tioca_init_provider();
532 tioce_init_provider();
533
534 /*
535 * This is needed to avoid bounce limit checks in the blk layer
536 */
537 ia64_max_iommu_merge_mask = ~PAGE_MASK;
538
539 sn_irq_lh_init();
540 INIT_LIST_HEAD(&sn_sysdata_list);
541 sn_init_cpei_timer();
542
543#ifdef CONFIG_PROC_FS
544 register_sn_procfs();
545#endif
546
547 printk(KERN_INFO "ACPI DSDT OEM Rev 0x%x\n",
548 acpi_gbl_DSDT->oem_revision);
549 if (SN_ACPI_BASE_SUPPORT())
550 sn_io_acpi_init();
551 else
552 sn_io_init();
553 return 0;
554}
555
556arch_initcall(sn_io_early_init);
557
558/*
559 * sn_io_late_init() - Perform any final platform specific IO initialization.
560 */
561
562int __init
563sn_io_late_init(void)
564{
565 struct pci_bus *bus;
566 struct pcibus_bussoft *bussoft;
567 cnodeid_t cnode;
568 nasid_t nasid;
569 cnodeid_t near_cnode;
570
571 if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
572 return 0;
573
574 /*
575 * Setup closest node in pci_controller->node for
576 * PIC, TIOCP, TIOCE (TIOCA does it during bus fixup using
577 * info from the PROM).
578 */
579 bus = NULL;
580 while ((bus = pci_find_next_bus(bus)) != NULL) {
581 bussoft = SN_PCIBUS_BUSSOFT(bus);
582 nasid = NASID_GET(bussoft->bs_base);
583 cnode = nasid_to_cnodeid(nasid);
584 if ((bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) ||
585 (bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCE)) {
586 /* TIO PCI Bridge: find nearest node with CPUs */
587 int e = sn_hwperf_get_nearest_node(cnode, NULL,
588 &near_cnode);
589 if (e < 0) {
590 near_cnode = (cnodeid_t)-1; /* use any node */
591 printk(KERN_WARNING "pcibr_bus_fixup: failed "
592 "to find near node with CPUs to TIO "
593 "node %d, err=%d\n", cnode, e);
594 }
595 PCI_CONTROLLER(bus)->node = near_cnode;
596 } else if (bussoft->bs_asic_type == PCIIO_ASIC_TYPE_PIC) {
597 PCI_CONTROLLER(bus)->node = cnode;
598 }
599 }
600
601 sn_ioif_inited = 1; /* SN I/O infrastructure now initialized */
602
603 return 0;
604}
605
606fs_initcall(sn_io_late_init);
607
608EXPORT_SYMBOL(sn_pci_fixup_slot);
609EXPORT_SYMBOL(sn_pci_unfixup_slot);
610EXPORT_SYMBOL(sn_bus_store_sysdata);
611EXPORT_SYMBOL(sn_bus_free_sysdata);
612EXPORT_SYMBOL(sn_generate_path);
613
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index dc09a6a28a37..9ad843e0383b 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -3,103 +3,28 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 1992 - 1997, 2000-2006 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8 8
9#include <linux/bootmem.h>
10#include <linux/nodemask.h>
11#include <asm/sn/types.h> 9#include <asm/sn/types.h>
12#include <asm/sn/addrs.h> 10#include <asm/sn/addrs.h>
13#include <asm/sn/sn_feature_sets.h>
14#include <asm/sn/geo.h>
15#include <asm/sn/io.h> 11#include <asm/sn/io.h>
16#include <asm/sn/l1.h>
17#include <asm/sn/module.h> 12#include <asm/sn/module.h>
18#include <asm/sn/pcibr_provider.h> 13#include <asm/sn/intr.h>
19#include <asm/sn/pcibus_provider_defs.h> 14#include <asm/sn/pcibus_provider_defs.h>
20#include <asm/sn/pcidev.h> 15#include <asm/sn/pcidev.h>
21#include <asm/sn/simulator.h>
22#include <asm/sn/sn_sal.h> 16#include <asm/sn/sn_sal.h>
23#include <asm/sn/tioca_provider.h>
24#include <asm/sn/tioce_provider.h>
25#include "xtalk/hubdev.h" 17#include "xtalk/hubdev.h"
26#include "xtalk/xwidgetdev.h"
27
28
29extern void sn_init_cpei_timer(void);
30extern void register_sn_procfs(void);
31
32static struct list_head sn_sysdata_list;
33
34/* sysdata list struct */
35struct sysdata_el {
36 struct list_head entry;
37 void *sysdata;
38};
39
40struct slab_info {
41 struct hubdev_info hubdev;
42};
43
44struct brick {
45 moduleid_t id; /* Module ID of this module */
46 struct slab_info slab_info[MAX_SLABS + 1];
47};
48
49int sn_ioif_inited; /* SN I/O infrastructure initialized? */
50
51struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */
52
53static int max_segment_number; /* Default highest segment number */
54static int max_pcibus_number = 255; /* Default highest pci bus number */
55 18
56/* 19/*
57 * Hooks and struct for unsupported pci providers 20 * The code in this file will only be executed when running with
21 * a PROM that does _not_ have base ACPI IO support.
22 * (i.e., SN_ACPI_BASE_SUPPORT() == 0)
58 */ 23 */
59 24
60static dma_addr_t 25static int max_segment_number; /* Default highest segment number */
61sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size, int type) 26static int max_pcibus_number = 255; /* Default highest pci bus number */
62{
63 return 0;
64}
65
66static void
67sn_default_pci_unmap(struct pci_dev *pdev, dma_addr_t addr, int direction)
68{
69 return;
70}
71
72static void *
73sn_default_pci_bus_fixup(struct pcibus_bussoft *soft, struct pci_controller *controller)
74{
75 return NULL;
76}
77
78static struct sn_pcibus_provider sn_pci_default_provider = {
79 .dma_map = sn_default_pci_map,
80 .dma_map_consistent = sn_default_pci_map,
81 .dma_unmap = sn_default_pci_unmap,
82 .bus_fixup = sn_default_pci_bus_fixup,
83};
84
85/*
86 * Retrieve the DMA Flush List given nasid, widget, and device.
87 * This list is needed to implement the WAR - Flush DMA data on PIO Reads.
88 */
89static inline u64
90sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
91 u64 address)
92{
93 struct ia64_sal_retval ret_stuff;
94 ret_stuff.status = 0;
95 ret_stuff.v0 = 0;
96 27
97 SAL_CALL_NOLOCK(ret_stuff,
98 (u64) SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST,
99 (u64) nasid, (u64) widget_num,
100 (u64) device_num, (u64) address, 0, 0, 0);
101 return ret_stuff.status;
102}
103 28
104/* 29/*
105 * Retrieve the hub device info structure for the given nasid. 30 * Retrieve the hub device info structure for the given nasid.
@@ -131,93 +56,20 @@ static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
131 return ret_stuff.v0; 56 return ret_stuff.v0;
132} 57}
133 58
134/*
135 * Retrieve the pci device information given the bus and device|function number.
136 */
137static inline u64
138sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
139 u64 sn_irq_info)
140{
141 struct ia64_sal_retval ret_stuff;
142 ret_stuff.status = 0;
143 ret_stuff.v0 = 0;
144
145 SAL_CALL_NOLOCK(ret_stuff,
146 (u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
147 (u64) segment, (u64) bus_number, (u64) devfn,
148 (u64) pci_dev,
149 sn_irq_info, 0, 0);
150 return ret_stuff.v0;
151}
152
153/*
154 * sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified
155 * device.
156 */
157inline struct pcidev_info *
158sn_pcidev_info_get(struct pci_dev *dev)
159{
160 struct pcidev_info *pcidev;
161
162 list_for_each_entry(pcidev,
163 &(SN_PCI_CONTROLLER(dev)->pcidev_info), pdi_list) {
164 if (pcidev->pdi_linux_pcidev == dev) {
165 return pcidev;
166 }
167 }
168 return NULL;
169}
170
171/* Older PROM flush WAR
172 *
173 * 01/16/06 -- This war will be in place until a new official PROM is released.
174 * Additionally note that the struct sn_flush_device_war also has to be
175 * removed from arch/ia64/sn/include/xtalk/hubdev.h
176 */
177static u8 war_implemented = 0;
178
179static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device,
180 struct sn_flush_device_common *common)
181{
182 struct sn_flush_device_war *war_list;
183 struct sn_flush_device_war *dev_entry;
184 struct ia64_sal_retval isrv = {0,0,0,0};
185
186 if (!war_implemented) {
187 printk(KERN_WARNING "PROM version < 4.50 -- implementing old "
188 "PROM flush WAR\n");
189 war_implemented = 1;
190 }
191
192 war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL);
193 if (!war_list)
194 BUG();
195
196 SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,
197 nasid, widget, __pa(war_list), 0, 0, 0 ,0);
198 if (isrv.status)
199 panic("sn_device_fixup_war failed: %s\n",
200 ia64_sal_strerror(isrv.status));
201
202 dev_entry = war_list + device;
203 memcpy(common,dev_entry, sizeof(*common));
204 kfree(war_list);
205
206 return isrv.status;
207}
208 59
209/* 60/*
210 * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for 61 * sn_fixup_ionodes() - This routine initializes the HUB data structure for
211 * each node in the system. 62 * each node in the system. This function is only
63 * executed when running with a non-ACPI capable PROM.
212 */ 64 */
213static void __init sn_fixup_ionodes(void) 65static void __init sn_fixup_ionodes(void)
214{ 66{
215 struct sn_flush_device_kernel *sn_flush_device_kernel; 67
216 struct sn_flush_device_kernel *dev_entry;
217 struct hubdev_info *hubdev; 68 struct hubdev_info *hubdev;
218 u64 status; 69 u64 status;
219 u64 nasid; 70 u64 nasid;
220 int i, widget, device, size; 71 int i;
72 extern void sn_common_hubdev_init(struct hubdev_info *);
221 73
222 /* 74 /*
223 * Get SGI Specific HUB chipset information. 75 * Get SGI Specific HUB chipset information.
@@ -240,70 +92,47 @@ static void __init sn_fixup_ionodes(void)
240 max_segment_number = hubdev->max_segment_number; 92 max_segment_number = hubdev->max_segment_number;
241 max_pcibus_number = hubdev->max_pcibus_number; 93 max_pcibus_number = hubdev->max_pcibus_number;
242 } 94 }
95 sn_common_hubdev_init(hubdev);
96 }
97}
243 98
244 /* Attach the error interrupt handlers */ 99/*
245 if (nasid & 1) 100 * sn_pci_legacy_window_fixup - Create PCI controller windows for
246 ice_error_init(hubdev); 101 * legacy IO and MEM space. This needs to
247 else 102 * be done here, as the PROM does not have
248 hub_error_init(hubdev); 103 * ACPI support defining the root buses
249 104 * and their resources (_CRS),
250 for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) 105 */
251 hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev; 106static void
252 107sn_legacy_pci_window_fixup(struct pci_controller *controller,
253 if (!hubdev->hdi_flush_nasid_list.widget_p) 108 u64 legacy_io, u64 legacy_mem)
254 continue; 109{
255 110 controller->window = kcalloc(2, sizeof(struct pci_window),
256 size = (HUB_WIDGET_ID_MAX + 1) * 111 GFP_KERNEL);
257 sizeof(struct sn_flush_device_kernel *); 112 if (controller->window == NULL)
258 hubdev->hdi_flush_nasid_list.widget_p =
259 kzalloc(size, GFP_KERNEL);
260 if (!hubdev->hdi_flush_nasid_list.widget_p)
261 BUG(); 113 BUG();
262 114 controller->window[0].offset = legacy_io;
263 for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) { 115 controller->window[0].resource.name = "legacy_io";
264 size = DEV_PER_WIDGET * 116 controller->window[0].resource.flags = IORESOURCE_IO;
265 sizeof(struct sn_flush_device_kernel); 117 controller->window[0].resource.start = legacy_io;
266 sn_flush_device_kernel = kzalloc(size, GFP_KERNEL); 118 controller->window[0].resource.end =
267 if (!sn_flush_device_kernel) 119 controller->window[0].resource.start + 0xffff;
268 BUG(); 120 controller->window[0].resource.parent = &ioport_resource;
269 121 controller->window[1].offset = legacy_mem;
270 dev_entry = sn_flush_device_kernel; 122 controller->window[1].resource.name = "legacy_mem";
271 for (device = 0; device < DEV_PER_WIDGET; 123 controller->window[1].resource.flags = IORESOURCE_MEM;
272 device++,dev_entry++) { 124 controller->window[1].resource.start = legacy_mem;
273 size = sizeof(struct sn_flush_device_common); 125 controller->window[1].resource.end =
274 dev_entry->common = kzalloc(size, GFP_KERNEL); 126 controller->window[1].resource.start + (1024 * 1024) - 1;
275 if (!dev_entry->common) 127 controller->window[1].resource.parent = &iomem_resource;
276 BUG(); 128 controller->windows = 2;
277
278 if (sn_prom_feature_available(
279 PRF_DEVICE_FLUSH_LIST))
280 status = sal_get_device_dmaflush_list(
281 nasid, widget, device,
282 (u64)(dev_entry->common));
283 else
284 status = sn_device_fixup_war(nasid,
285 widget, device,
286 dev_entry->common);
287 if (status != SALRET_OK)
288 panic("SAL call failed: %s\n",
289 ia64_sal_strerror(status));
290
291 spin_lock_init(&dev_entry->sfdl_flush_lock);
292 }
293
294 if (sn_flush_device_kernel)
295 hubdev->hdi_flush_nasid_list.widget_p[widget] =
296 sn_flush_device_kernel;
297 }
298 }
299} 129}
300 130
301/* 131/*
302 * sn_pci_window_fixup() - Create a pci_window for each device resource. 132 * sn_pci_window_fixup() - Create a pci_window for each device resource.
303 * Until ACPI support is added, we need this code 133 * It will setup pci_windows for use by
304 * to setup pci_windows for use by 134 * pcibios_bus_to_resource(), pcibios_resource_to_bus(),
305 * pcibios_bus_to_resource(), 135 * etc.
306 * pcibios_resource_to_bus(), etc.
307 */ 136 */
308static void 137static void
309sn_pci_window_fixup(struct pci_dev *dev, unsigned int count, 138sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
@@ -342,60 +171,22 @@ sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
342 controller->window = new_window; 171 controller->window = new_window;
343} 172}
344 173
345void sn_pci_unfixup_slot(struct pci_dev *dev)
346{
347 struct pci_dev *host_pci_dev = SN_PCIDEV_INFO(dev)->host_pci_dev;
348
349 sn_irq_unfixup(dev);
350 pci_dev_put(host_pci_dev);
351 pci_dev_put(dev);
352}
353
354/* 174/*
355 * sn_pci_fixup_slot() - This routine sets up a slot's resources 175 * sn_more_slot_fixup() - We are not running with an ACPI capable PROM,
356 * consistent with the Linux PCI abstraction layer. Resources acquired 176 * and need to convert the pci_dev->resource
357 * from our PCI provider include PIO maps to BAR space and interrupt 177 * 'start' and 'end' addresses to mapped addresses,
358 * objects. 178 * and setup the pci_controller->window array entries.
359 */ 179 */
360void sn_pci_fixup_slot(struct pci_dev *dev) 180void
181sn_more_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info)
361{ 182{
362 unsigned int count = 0; 183 unsigned int count = 0;
363 int idx; 184 int idx;
364 int segment = pci_domain_nr(dev->bus);
365 int status = 0;
366 struct pcibus_bussoft *bs;
367 struct pci_bus *host_pci_bus;
368 struct pci_dev *host_pci_dev;
369 struct pcidev_info *pcidev_info;
370 s64 pci_addrs[PCI_ROM_RESOURCE + 1]; 185 s64 pci_addrs[PCI_ROM_RESOURCE + 1];
371 struct sn_irq_info *sn_irq_info; 186 unsigned long addr, end, size, start;
372 unsigned long size;
373 unsigned int bus_no, devfn;
374
375 pci_dev_get(dev); /* for the sysdata pointer */
376 pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
377 if (!pcidev_info)
378 BUG(); /* Cannot afford to run out of memory */
379
380 sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
381 if (!sn_irq_info)
382 BUG(); /* Cannot afford to run out of memory */
383
384 /* Call to retrieve pci device information needed by kernel. */
385 status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number,
386 dev->devfn,
387 (u64) __pa(pcidev_info),
388 (u64) __pa(sn_irq_info));
389 if (status)
390 BUG(); /* Cannot get platform pci device information */
391
392 /* Add pcidev_info to list in sn_pci_controller struct */
393 list_add_tail(&pcidev_info->pdi_list,
394 &(SN_PCI_CONTROLLER(dev->bus)->pcidev_info));
395 187
396 /* Copy over PIO Mapped Addresses */ 188 /* Copy over PIO Mapped Addresses */
397 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) { 189 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
398 unsigned long start, end, addr;
399 190
400 if (!pcidev_info->pdi_pio_mapped_addr[idx]) { 191 if (!pcidev_info->pdi_pio_mapped_addr[idx]) {
401 pci_addrs[idx] = -1; 192 pci_addrs[idx] = -1;
@@ -419,60 +210,28 @@ void sn_pci_fixup_slot(struct pci_dev *dev)
419 dev->resource[idx].parent = &ioport_resource; 210 dev->resource[idx].parent = &ioport_resource;
420 else 211 else
421 dev->resource[idx].parent = &iomem_resource; 212 dev->resource[idx].parent = &iomem_resource;
213 /* If ROM, mark as shadowed in PROM */
214 if (idx == PCI_ROM_RESOURCE)
215 dev->resource[idx].flags |= IORESOURCE_ROM_BIOS_COPY;
422 } 216 }
423 /* Create a pci_window in the pci_controller struct for 217 /* Create a pci_window in the pci_controller struct for
424 * each device resource. 218 * each device resource.
425 */ 219 */
426 if (count > 0) 220 if (count > 0)
427 sn_pci_window_fixup(dev, count, pci_addrs); 221 sn_pci_window_fixup(dev, count, pci_addrs);
428
429 /*
430 * Using the PROMs values for the PCI host bus, get the Linux
431 * PCI host_pci_dev struct and set up host bus linkages
432 */
433
434 bus_no = (pcidev_info->pdi_slot_host_handle >> 32) & 0xff;
435 devfn = pcidev_info->pdi_slot_host_handle & 0xffffffff;
436 host_pci_bus = pci_find_bus(segment, bus_no);
437 host_pci_dev = pci_get_slot(host_pci_bus, devfn);
438
439 pcidev_info->host_pci_dev = host_pci_dev;
440 pcidev_info->pdi_linux_pcidev = dev;
441 pcidev_info->pdi_host_pcidev_info = SN_PCIDEV_INFO(host_pci_dev);
442 bs = SN_PCIBUS_BUSSOFT(dev->bus);
443 pcidev_info->pdi_pcibus_info = bs;
444
445 if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) {
446 SN_PCIDEV_BUSPROVIDER(dev) = sn_pci_provider[bs->bs_asic_type];
447 } else {
448 SN_PCIDEV_BUSPROVIDER(dev) = &sn_pci_default_provider;
449 }
450
451 /* Only set up IRQ stuff if this device has a host bus context */
452 if (bs && sn_irq_info->irq_irq) {
453 pcidev_info->pdi_sn_irq_info = sn_irq_info;
454 dev->irq = pcidev_info->pdi_sn_irq_info->irq_irq;
455 sn_irq_fixup(dev, sn_irq_info);
456 } else {
457 pcidev_info->pdi_sn_irq_info = NULL;
458 kfree(sn_irq_info);
459 }
460} 222}
461 223
462/* 224/*
463 * sn_pci_controller_fixup() - This routine sets up a bus's resources 225 * sn_pci_controller_fixup() - This routine sets up a bus's resources
464 * consistent with the Linux PCI abstraction layer. 226 * consistent with the Linux PCI abstraction layer.
465 */ 227 */
466void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus) 228static void
229sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
467{ 230{
468 int status; 231 s64 status = 0;
469 int nasid, cnode;
470 struct pci_controller *controller; 232 struct pci_controller *controller;
471 struct sn_pci_controller *sn_controller;
472 struct pcibus_bussoft *prom_bussoft_ptr; 233 struct pcibus_bussoft *prom_bussoft_ptr;
473 struct hubdev_info *hubdev_info; 234
474 void *provider_soft;
475 struct sn_pcibus_provider *provider;
476 235
477 status = sal_get_pcibus_info((u64) segment, (u64) busnum, 236 status = sal_get_pcibus_info((u64) segment, (u64) busnum,
478 (u64) ia64_tpa(&prom_bussoft_ptr)); 237 (u64) ia64_tpa(&prom_bussoft_ptr));
@@ -480,261 +239,77 @@ void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
480 return; /*bus # does not exist */ 239 return; /*bus # does not exist */
481 prom_bussoft_ptr = __va(prom_bussoft_ptr); 240 prom_bussoft_ptr = __va(prom_bussoft_ptr);
482 241
483 /* Allocate a sn_pci_controller, which has a pci_controller struct 242 controller = kzalloc(sizeof(*controller), GFP_KERNEL);
484 * as the first member. 243 if (!controller)
485 */
486 sn_controller = kzalloc(sizeof(struct sn_pci_controller), GFP_KERNEL);
487 if (!sn_controller)
488 BUG(); 244 BUG();
489 INIT_LIST_HEAD(&sn_controller->pcidev_info);
490 controller = &sn_controller->pci_controller;
491 controller->segment = segment; 245 controller->segment = segment;
492 246
493 if (bus == NULL) {
494 bus = pci_scan_bus(busnum, &pci_root_ops, controller);
495 if (bus == NULL)
496 goto error_return; /* error, or bus already scanned */
497 bus->sysdata = NULL;
498 }
499
500 if (bus->sysdata)
501 goto error_return; /* sysdata already alloc'd */
502
503 /* 247 /*
504 * Per-provider fixup. Copies the contents from prom to local 248 * Temporarily save the prom_bussoft_ptr for use by sn_bus_fixup().
505 * area and links SN_PCIBUS_BUSSOFT(). 249 * (platform_data will be overwritten later in sn_common_bus_fixup())
506 */ 250 */
251 controller->platform_data = prom_bussoft_ptr;
507 252
508 if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES) 253 bus = pci_scan_bus(busnum, &pci_root_ops, controller);
509 goto error_return; /* unsupported asic type */ 254 if (bus == NULL)
510 255 goto error_return; /* error, or bus already scanned */
511 if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB)
512 goto error_return; /* no further fixup necessary */
513
514 provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type];
515 if (provider == NULL)
516 goto error_return; /* no provider registerd for this asic */
517 256
518 bus->sysdata = controller; 257 bus->sysdata = controller;
519 if (provider->bus_fixup)
520 provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr, controller);
521 else
522 provider_soft = NULL;
523
524 if (provider_soft == NULL) {
525 /* fixup failed or not applicable */
526 bus->sysdata = NULL;
527 goto error_return;
528 }
529
530 /*
531 * Setup pci_windows for legacy IO and MEM space.
532 * (Temporary until ACPI support is in place.)
533 */
534 controller->window = kcalloc(2, sizeof(struct pci_window), GFP_KERNEL);
535 if (controller->window == NULL)
536 BUG();
537 controller->window[0].offset = prom_bussoft_ptr->bs_legacy_io;
538 controller->window[0].resource.name = "legacy_io";
539 controller->window[0].resource.flags = IORESOURCE_IO;
540 controller->window[0].resource.start = prom_bussoft_ptr->bs_legacy_io;
541 controller->window[0].resource.end =
542 controller->window[0].resource.start + 0xffff;
543 controller->window[0].resource.parent = &ioport_resource;
544 controller->window[1].offset = prom_bussoft_ptr->bs_legacy_mem;
545 controller->window[1].resource.name = "legacy_mem";
546 controller->window[1].resource.flags = IORESOURCE_MEM;
547 controller->window[1].resource.start = prom_bussoft_ptr->bs_legacy_mem;
548 controller->window[1].resource.end =
549 controller->window[1].resource.start + (1024 * 1024) - 1;
550 controller->window[1].resource.parent = &iomem_resource;
551 controller->windows = 2;
552
553 /*
554 * Generic bus fixup goes here. Don't reference prom_bussoft_ptr
555 * after this point.
556 */
557
558 PCI_CONTROLLER(bus)->platform_data = provider_soft;
559 nasid = NASID_GET(SN_PCIBUS_BUSSOFT(bus)->bs_base);
560 cnode = nasid_to_cnodeid(nasid);
561 hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
562 SN_PCIBUS_BUSSOFT(bus)->bs_xwidget_info =
563 &(hubdev_info->hdi_xwidget_info[SN_PCIBUS_BUSSOFT(bus)->bs_xid]);
564 258
565 /*
566 * If the node information we obtained during the fixup phase is invalid
567 * then set controller->node to -1 (undetermined)
568 */
569 if (controller->node >= num_online_nodes()) {
570 struct pcibus_bussoft *b = SN_PCIBUS_BUSSOFT(bus);
571
572 printk(KERN_WARNING "Device ASIC=%u XID=%u PBUSNUM=%u"
573 "L_IO=%lx L_MEM=%lx BASE=%lx\n",
574 b->bs_asic_type, b->bs_xid, b->bs_persist_busnum,
575 b->bs_legacy_io, b->bs_legacy_mem, b->bs_base);
576 printk(KERN_WARNING "on node %d but only %d nodes online."
577 "Association set to undetermined.\n",
578 controller->node, num_online_nodes());
579 controller->node = -1;
580 }
581 return; 259 return;
582 260
583error_return: 261error_return:
584 262
585 kfree(sn_controller); 263 kfree(controller);
586 return; 264 return;
587} 265}
588 266
589void sn_bus_store_sysdata(struct pci_dev *dev) 267/*
268 * sn_bus_fixup
269 */
270void
271sn_bus_fixup(struct pci_bus *bus)
590{ 272{
591 struct sysdata_el *element; 273 struct pci_dev *pci_dev = NULL;
592 274 struct pcibus_bussoft *prom_bussoft_ptr;
593 element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL); 275 extern void sn_common_bus_fixup(struct pci_bus *,
594 if (!element) { 276 struct pcibus_bussoft *);
595 dev_dbg(dev, "%s: out of memory!\n", __FUNCTION__); 277
596 return; 278
597 } 279 if (!bus->parent) { /* If root bus */
598 element->sysdata = SN_PCIDEV_INFO(dev); 280 prom_bussoft_ptr = PCI_CONTROLLER(bus)->platform_data;
599 list_add(&element->entry, &sn_sysdata_list); 281 if (prom_bussoft_ptr == NULL) {
600} 282 printk(KERN_ERR
283 "sn_bus_fixup: 0x%04x:0x%02x Unable to "
284 "obtain prom_bussoft_ptr\n",
285 pci_domain_nr(bus), bus->number);
286 return;
287 }
288 sn_common_bus_fixup(bus, prom_bussoft_ptr);
289 sn_legacy_pci_window_fixup(PCI_CONTROLLER(bus),
290 prom_bussoft_ptr->bs_legacy_io,
291 prom_bussoft_ptr->bs_legacy_mem);
292 }
293 list_for_each_entry(pci_dev, &bus->devices, bus_list) {
294 sn_pci_fixup_slot(pci_dev);
295 }
601 296
602void sn_bus_free_sysdata(void)
603{
604 struct sysdata_el *element;
605 struct list_head *list, *safe;
606
607 list_for_each_safe(list, safe, &sn_sysdata_list) {
608 element = list_entry(list, struct sysdata_el, entry);
609 list_del(&element->entry);
610 list_del(&(((struct pcidev_info *)
611 (element->sysdata))->pdi_list));
612 kfree(element->sysdata);
613 kfree(element);
614 }
615 return;
616} 297}
617 298
618/* 299/*
619 * Ugly hack to get PCI setup until we have a proper ACPI namespace. 300 * sn_io_init - PROM does not have ACPI support to define nodes or root buses,
301 * so we need to do things the hard way, including initiating the
302 * bus scanning ourselves.
620 */ 303 */
621 304
622#define PCI_BUSES_TO_SCAN 256 305void __init sn_io_init(void)
623
624static int __init sn_pci_init(void)
625{ 306{
626 int i, j; 307 int i, j;
627 struct pci_dev *pci_dev = NULL;
628
629 if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
630 return 0;
631
632 /*
633 * prime sn_pci_provider[]. Individial provider init routines will
634 * override their respective default entries.
635 */
636
637 for (i = 0; i < PCIIO_ASIC_MAX_TYPES; i++)
638 sn_pci_provider[i] = &sn_pci_default_provider;
639 308
640 pcibr_init_provider();
641 tioca_init_provider();
642 tioce_init_provider();
643
644 /*
645 * This is needed to avoid bounce limit checks in the blk layer
646 */
647 ia64_max_iommu_merge_mask = ~PAGE_MASK;
648 sn_fixup_ionodes(); 309 sn_fixup_ionodes();
649 sn_irq_lh_init();
650 INIT_LIST_HEAD(&sn_sysdata_list);
651 sn_init_cpei_timer();
652
653#ifdef CONFIG_PROC_FS
654 register_sn_procfs();
655#endif
656 310
657 /* busses are not known yet ... */ 311 /* busses are not known yet ... */
658 for (i = 0; i <= max_segment_number; i++) 312 for (i = 0; i <= max_segment_number; i++)
659 for (j = 0; j <= max_pcibus_number; j++) 313 for (j = 0; j <= max_pcibus_number; j++)
660 sn_pci_controller_fixup(i, j, NULL); 314 sn_pci_controller_fixup(i, j, NULL);
661
662 /*
663 * Generic Linux PCI Layer has created the pci_bus and pci_dev
664 * structures - time for us to add our SN PLatform specific
665 * information.
666 */
667
668 while ((pci_dev =
669 pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL)
670 sn_pci_fixup_slot(pci_dev);
671
672 sn_ioif_inited = 1; /* sn I/O infrastructure now initialized */
673
674 return 0;
675}
676
677/*
678 * hubdev_init_node() - Creates the HUB data structure and link them to it's
679 * own NODE specific data area.
680 */
681void hubdev_init_node(nodepda_t * npda, cnodeid_t node)
682{
683 struct hubdev_info *hubdev_info;
684 int size;
685 pg_data_t *pg;
686
687 size = sizeof(struct hubdev_info);
688
689 if (node >= num_online_nodes()) /* Headless/memless IO nodes */
690 pg = NODE_DATA(0);
691 else
692 pg = NODE_DATA(node);
693
694 hubdev_info = (struct hubdev_info *)alloc_bootmem_node(pg, size);
695
696 npda->pdinfo = (void *)hubdev_info;
697} 315}
698
699geoid_t
700cnodeid_get_geoid(cnodeid_t cnode)
701{
702 struct hubdev_info *hubdev;
703
704 hubdev = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
705 return hubdev->hdi_geoid;
706}
707
708void sn_generate_path(struct pci_bus *pci_bus, char *address)
709{
710 nasid_t nasid;
711 cnodeid_t cnode;
712 geoid_t geoid;
713 moduleid_t moduleid;
714 u16 bricktype;
715
716 nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base);
717 cnode = nasid_to_cnodeid(nasid);
718 geoid = cnodeid_get_geoid(cnode);
719 moduleid = geo_module(geoid);
720
721 sprintf(address, "module_%c%c%c%c%.2d",
722 '0'+RACK_GET_CLASS(MODULE_GET_RACK(moduleid)),
723 '0'+RACK_GET_GROUP(MODULE_GET_RACK(moduleid)),
724 '0'+RACK_GET_NUM(MODULE_GET_RACK(moduleid)),
725 MODULE_GET_BTCHAR(moduleid), MODULE_GET_BPOS(moduleid));
726
727 /* Tollhouse requires slot id to be displayed */
728 bricktype = MODULE_GET_BTYPE(moduleid);
729 if ((bricktype == L1_BRICKTYPE_191010) ||
730 (bricktype == L1_BRICKTYPE_1932))
731 sprintf(address, "%s^%d", address, geo_slot(geoid));
732}
733
734subsys_initcall(sn_pci_init);
735EXPORT_SYMBOL(sn_pci_fixup_slot);
736EXPORT_SYMBOL(sn_pci_unfixup_slot);
737EXPORT_SYMBOL(sn_pci_controller_fixup);
738EXPORT_SYMBOL(sn_bus_store_sysdata);
739EXPORT_SYMBOL(sn_bus_free_sysdata);
740EXPORT_SYMBOL(sn_generate_path);
diff --git a/arch/ia64/sn/kernel/iomv.c b/arch/ia64/sn/kernel/iomv.c
index 7ce3cdad627b..4aa4f301d56d 100644
--- a/arch/ia64/sn/kernel/iomv.c
+++ b/arch/ia64/sn/kernel/iomv.c
@@ -3,10 +3,11 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 2000-2003, 2006 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8 8
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/acpi.h>
10#include <asm/io.h> 11#include <asm/io.h>
11#include <asm/delay.h> 12#include <asm/delay.h>
12#include <asm/vga.h> 13#include <asm/vga.h>
@@ -15,6 +16,7 @@
15#include <asm/sn/pda.h> 16#include <asm/sn/pda.h>
16#include <asm/sn/sn_cpuid.h> 17#include <asm/sn/sn_cpuid.h>
17#include <asm/sn/shub_mmr.h> 18#include <asm/sn/shub_mmr.h>
19#include <asm/sn/acpi.h>
18 20
19#define IS_LEGACY_VGA_IOPORT(p) \ 21#define IS_LEGACY_VGA_IOPORT(p) \
20 (((p) >= 0x3b0 && (p) <= 0x3bb) || ((p) >= 0x3c0 && (p) <= 0x3df)) 22 (((p) >= 0x3b0 && (p) <= 0x3bb) || ((p) >= 0x3c0 && (p) <= 0x3df))
@@ -31,11 +33,14 @@ void *sn_io_addr(unsigned long port)
31{ 33{
32 if (!IS_RUNNING_ON_SIMULATOR()) { 34 if (!IS_RUNNING_ON_SIMULATOR()) {
33 if (IS_LEGACY_VGA_IOPORT(port)) 35 if (IS_LEGACY_VGA_IOPORT(port))
34 port += vga_console_iobase; 36 return (__ia64_mk_io_addr(port));
35 /* On sn2, legacy I/O ports don't point at anything */ 37 /* On sn2, legacy I/O ports don't point at anything */
36 if (port < (64 * 1024)) 38 if (port < (64 * 1024))
37 return NULL; 39 return NULL;
38 return ((void *)(port | __IA64_UNCACHED_OFFSET)); 40 if (SN_ACPI_BASE_SUPPORT())
41 return (__ia64_mk_io_addr(port));
42 else
43 return ((void *)(port | __IA64_UNCACHED_OFFSET));
39 } else { 44 } else {
40 /* but the simulator uses them... */ 45 /* but the simulator uses them... */
41 unsigned long addr; 46 unsigned long addr;
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index 7bb6ad188ba3..8c5bee01eaa2 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -117,7 +117,10 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
117 nasid_t nasid, int slice) 117 nasid_t nasid, int slice)
118{ 118{
119 int vector; 119 int vector;
120 int cpuid;
121#ifdef CONFIG_SMP
120 int cpuphys; 122 int cpuphys;
123#endif
121 int64_t bridge; 124 int64_t bridge;
122 int local_widget, status; 125 int local_widget, status;
123 nasid_t local_nasid; 126 nasid_t local_nasid;
@@ -146,7 +149,6 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
146 vector = sn_irq_info->irq_irq; 149 vector = sn_irq_info->irq_irq;
147 /* Free the old PROM new_irq_info structure */ 150 /* Free the old PROM new_irq_info structure */
148 sn_intr_free(local_nasid, local_widget, new_irq_info); 151 sn_intr_free(local_nasid, local_widget, new_irq_info);
149 /* Update kernels new_irq_info with new target info */
150 unregister_intr_pda(new_irq_info); 152 unregister_intr_pda(new_irq_info);
151 153
152 /* allocate a new PROM new_irq_info struct */ 154 /* allocate a new PROM new_irq_info struct */
@@ -160,8 +162,10 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
160 return NULL; 162 return NULL;
161 } 163 }
162 164
163 cpuphys = nasid_slice_to_cpuid(nasid, slice); 165 /* Update kernels new_irq_info with new target info */
164 new_irq_info->irq_cpuid = cpuphys; 166 cpuid = nasid_slice_to_cpuid(new_irq_info->irq_nasid,
167 new_irq_info->irq_slice);
168 new_irq_info->irq_cpuid = cpuid;
165 register_intr_pda(new_irq_info); 169 register_intr_pda(new_irq_info);
166 170
167 pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type]; 171 pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];
@@ -180,6 +184,7 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
180 call_rcu(&sn_irq_info->rcu, sn_irq_info_free); 184 call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
181 185
182#ifdef CONFIG_SMP 186#ifdef CONFIG_SMP
187 cpuphys = cpu_physical_id(cpuid);
183 set_irq_affinity_info((vector & 0xff), cpuphys, 0); 188 set_irq_affinity_info((vector & 0xff), cpuphys, 0);
184#endif 189#endif
185 190
@@ -201,7 +206,7 @@ static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
201} 206}
202 207
203struct hw_interrupt_type irq_type_sn = { 208struct hw_interrupt_type irq_type_sn = {
204 .typename = "SN hub", 209 .name = "SN hub",
205 .startup = sn_startup_irq, 210 .startup = sn_startup_irq,
206 .shutdown = sn_shutdown_irq, 211 .shutdown = sn_shutdown_irq,
207 .enable = sn_enable_irq, 212 .enable = sn_enable_irq,
@@ -299,6 +304,9 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
299 nasid_t nasid = sn_irq_info->irq_nasid; 304 nasid_t nasid = sn_irq_info->irq_nasid;
300 int slice = sn_irq_info->irq_slice; 305 int slice = sn_irq_info->irq_slice;
301 int cpu = nasid_slice_to_cpuid(nasid, slice); 306 int cpu = nasid_slice_to_cpuid(nasid, slice);
307#ifdef CONFIG_SMP
308 int cpuphys;
309#endif
302 310
303 pci_dev_get(pci_dev); 311 pci_dev_get(pci_dev);
304 sn_irq_info->irq_cpuid = cpu; 312 sn_irq_info->irq_cpuid = cpu;
@@ -311,6 +319,10 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
311 spin_unlock(&sn_irq_info_lock); 319 spin_unlock(&sn_irq_info_lock);
312 320
313 register_intr_pda(sn_irq_info); 321 register_intr_pda(sn_irq_info);
322#ifdef CONFIG_SMP
323 cpuphys = cpu_physical_id(cpu);
324 set_irq_affinity_info(sn_irq_info->irq_irq, cpuphys, 0);
325#endif
314} 326}
315 327
316void sn_irq_unfixup(struct pci_dev *pci_dev) 328void sn_irq_unfixup(struct pci_dev *pci_dev)
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c
index 6ffd1f850d41..b3a435fd70fb 100644
--- a/arch/ia64/sn/kernel/msi_sn.c
+++ b/arch/ia64/sn/kernel/msi_sn.c
@@ -136,10 +136,6 @@ int sn_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
136 */ 136 */
137 msg.data = 0x100 + irq; 137 msg.data = 0x100 + irq;
138 138
139#ifdef CONFIG_SMP
140 set_irq_affinity_info(irq, sn_irq_info->irq_cpuid, 0);
141#endif
142
143 write_msi_msg(irq, &msg); 139 write_msi_msg(irq, &msg);
144 set_irq_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq); 140 set_irq_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq);
145 141
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index 5f2dcba7fa8d..a934ad069425 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -65,7 +65,6 @@ extern void sn_timer_init(void);
65extern unsigned long last_time_offset; 65extern unsigned long last_time_offset;
66extern void (*ia64_mark_idle) (int); 66extern void (*ia64_mark_idle) (int);
67extern void snidle(int); 67extern void snidle(int);
68extern unsigned char acpi_kbd_controller_present;
69extern unsigned long long (*ia64_printk_clock)(void); 68extern unsigned long long (*ia64_printk_clock)(void);
70 69
71unsigned long sn_rtc_cycles_per_second; 70unsigned long sn_rtc_cycles_per_second;
@@ -389,6 +388,14 @@ void __init sn_setup(char **cmdline_p)
389 ia64_sn_plat_set_error_handling_features(); // obsolete 388 ia64_sn_plat_set_error_handling_features(); // obsolete
390 ia64_sn_set_os_feature(OSF_MCA_SLV_TO_OS_INIT_SLV); 389 ia64_sn_set_os_feature(OSF_MCA_SLV_TO_OS_INIT_SLV);
391 ia64_sn_set_os_feature(OSF_FEAT_LOG_SBES); 390 ia64_sn_set_os_feature(OSF_FEAT_LOG_SBES);
391 /*
392 * Note: The calls to notify the PROM of ACPI and PCI Segment
393 * support must be done prior to acpi_load_tables(), as
394 * an ACPI capable PROM will rebuild the DSDT as result
395 * of the call.
396 */
397 ia64_sn_set_os_feature(OSF_PCISEGMENT_ENABLE);
398 ia64_sn_set_os_feature(OSF_ACPI_ENABLE);
392 399
393 400
394#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE) 401#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
@@ -414,6 +421,16 @@ void __init sn_setup(char **cmdline_p)
414 if (! vga_console_membase) 421 if (! vga_console_membase)
415 sn_scan_pcdp(); 422 sn_scan_pcdp();
416 423
424 /*
425 * Setup legacy IO space.
426 * vga_console_iobase maps to PCI IO Space address 0 on the
427 * bus containing the VGA console.
428 */
429 if (vga_console_iobase) {
430 io_space[0].mmio_base = vga_console_iobase;
431 io_space[0].sparse = 0;
432 }
433
417 if (vga_console_membase) { 434 if (vga_console_membase) {
418 /* usable vga ... make tty0 the preferred default console */ 435 /* usable vga ... make tty0 the preferred default console */
419 if (!strstr(*cmdline_p, "console=")) 436 if (!strstr(*cmdline_p, "console="))
@@ -452,17 +469,6 @@ void __init sn_setup(char **cmdline_p)
452 469
453 ia64_printk_clock = ia64_sn2_printk_clock; 470 ia64_printk_clock = ia64_sn2_printk_clock;
454 471
455 /*
456 * Old PROMs do not provide an ACPI FADT. Disable legacy keyboard
457 * support here so we don't have to listen to failed keyboard probe
458 * messages.
459 */
460 if (is_shub1() && version <= 0x0209 && acpi_kbd_controller_present) {
461 printk(KERN_INFO "Disabling legacy keyboard support as prom "
462 "is too old and doesn't provide FADT\n");
463 acpi_kbd_controller_present = 0;
464 }
465
466 printk("SGI SAL version %x.%02x\n", version >> 8, version & 0x00FF); 472 printk("SGI SAL version %x.%02x\n", version >> 8, version & 0x00FF);
467 473
468 /* 474 /*
@@ -763,5 +769,13 @@ int sn_prom_feature_available(int id)
763 return 0; 769 return 0;
764 return test_bit(id, sn_prom_features); 770 return test_bit(id, sn_prom_features);
765} 771}
772
773void
774sn_kernel_launch_event(void)
775{
776 /* ignore status until we understand possible failure, if any*/
777 if (ia64_sn_kernel_launch_event())
778 printk(KERN_ERR "KEXEC is not supported in this PROM, Please update the PROM.\n");
779}
766EXPORT_SYMBOL(sn_prom_feature_available); 780EXPORT_SYMBOL(sn_prom_feature_available);
767 781
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c
index feaf1a6e8101..493380b2c05f 100644
--- a/arch/ia64/sn/kernel/tiocx.c
+++ b/arch/ia64/sn/kernel/tiocx.c
@@ -552,7 +552,7 @@ static void __exit tiocx_exit(void)
552 bus_unregister(&tiocx_bus_type); 552 bus_unregister(&tiocx_bus_type);
553} 553}
554 554
555subsys_initcall(tiocx_init); 555fs_initcall(tiocx_init);
556module_exit(tiocx_exit); 556module_exit(tiocx_exit);
557 557
558/************************************************************************ 558/************************************************************************
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
index 27dd7df0f446..6846dc9b432d 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 2001-2004, 2006 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8 8
9#include <linux/interrupt.h> 9#include <linux/interrupt.h>
@@ -109,7 +109,6 @@ void *
109pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller) 109pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
110{ 110{
111 int nasid, cnode, j; 111 int nasid, cnode, j;
112 cnodeid_t near_cnode;
113 struct hubdev_info *hubdev_info; 112 struct hubdev_info *hubdev_info;
114 struct pcibus_info *soft; 113 struct pcibus_info *soft;
115 struct sn_flush_device_kernel *sn_flush_device_kernel; 114 struct sn_flush_device_kernel *sn_flush_device_kernel;
@@ -186,20 +185,6 @@ pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
186 return NULL; 185 return NULL;
187 } 186 }
188 187
189 if (prom_bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) {
190 /* TIO PCI Bridge: find nearest node with CPUs */
191 int e = sn_hwperf_get_nearest_node(cnode, NULL, &near_cnode);
192
193 if (e < 0) {
194 near_cnode = (cnodeid_t)-1; /* use any node */
195 printk(KERN_WARNING "pcibr_bus_fixup: failed to find "
196 "near node with CPUs to TIO node %d, err=%d\n",
197 cnode, e);
198 }
199 controller->node = near_cnode;
200 }
201 else
202 controller->node = cnode;
203 return soft; 188 return soft;
204} 189}
205 190
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c
index 46e16dcf5971..35f854fb6120 100644
--- a/arch/ia64/sn/pci/tioce_provider.c
+++ b/arch/ia64/sn/pci/tioce_provider.c
@@ -15,7 +15,6 @@
15#include <asm/sn/pcidev.h> 15#include <asm/sn/pcidev.h>
16#include <asm/sn/pcibus_provider_defs.h> 16#include <asm/sn/pcibus_provider_defs.h>
17#include <asm/sn/tioce_provider.h> 17#include <asm/sn/tioce_provider.h>
18#include <asm/sn/sn2/sn_hwperf.h>
19 18
20/* 19/*
21 * 1/26/2006 20 * 1/26/2006
@@ -990,8 +989,6 @@ tioce_target_interrupt(struct sn_irq_info *sn_irq_info)
990static void * 989static void *
991tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller) 990tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
992{ 991{
993 int my_nasid;
994 cnodeid_t my_cnode, mem_cnode;
995 struct tioce_common *tioce_common; 992 struct tioce_common *tioce_common;
996 struct tioce_kernel *tioce_kern; 993 struct tioce_kernel *tioce_kern;
997 struct tioce __iomem *tioce_mmr; 994 struct tioce __iomem *tioce_mmr;
@@ -1035,21 +1032,6 @@ tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
1035 tioce_common->ce_pcibus.bs_persist_segment, 1032 tioce_common->ce_pcibus.bs_persist_segment,
1036 tioce_common->ce_pcibus.bs_persist_busnum); 1033 tioce_common->ce_pcibus.bs_persist_busnum);
1037 1034
1038 /*
1039 * identify closest nasid for memory allocations
1040 */
1041
1042 my_nasid = NASID_GET(tioce_common->ce_pcibus.bs_base);
1043 my_cnode = nasid_to_cnodeid(my_nasid);
1044
1045 if (sn_hwperf_get_nearest_node(my_cnode, &mem_cnode, NULL) < 0) {
1046 printk(KERN_WARNING "tioce_bus_fixup: failed to find "
1047 "closest node with MEM to TIO node %d\n", my_cnode);
1048 mem_cnode = (cnodeid_t)-1; /* use any node */
1049 }
1050
1051 controller->node = mem_cnode;
1052
1053 return tioce_common; 1035 return tioce_common;
1054} 1036}
1055 1037