aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/Kconfig19
-rw-r--r--arch/ia64/configs/tiger_defconfig96
-rw-r--r--arch/ia64/hp/common/sba_iommu.c34
-rw-r--r--arch/ia64/kernel/acpi.c23
-rw-r--r--arch/ia64/kernel/entry.S2
-rw-r--r--arch/ia64/kernel/fsys.S4
-rw-r--r--arch/ia64/kernel/mca_drv.c4
-rw-r--r--arch/ia64/kernel/mca_drv_asm.S18
-rw-r--r--arch/ia64/kernel/perfmon.c43
-rw-r--r--arch/ia64/kernel/process.c55
-rw-r--r--arch/ia64/kernel/signal.c3
-rw-r--r--arch/ia64/lib/flush.S6
-rw-r--r--arch/ia64/lib/memcpy_mck.S2
-rw-r--r--arch/ia64/lib/memset.S2
-rw-r--r--arch/ia64/sn/kernel/Makefile7
-rw-r--r--arch/ia64/sn/kernel/io_init.c10
-rw-r--r--arch/ia64/sn/kernel/mca.c34
-rw-r--r--arch/ia64/sn/kernel/setup.c40
-rw-r--r--arch/ia64/sn/kernel/tiocx.c60
-rw-r--r--arch/ia64/sn/kernel/xp_main.c289
-rw-r--r--arch/ia64/sn/kernel/xp_nofault.S31
-rw-r--r--arch/ia64/sn/kernel/xpc.h991
-rw-r--r--arch/ia64/sn/kernel/xpc_channel.c2297
-rw-r--r--arch/ia64/sn/kernel/xpc_main.c1064
-rw-r--r--arch/ia64/sn/kernel/xpc_partition.c984
-rw-r--r--arch/ia64/sn/kernel/xpnet.c715
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_dma.c2
-rw-r--r--arch/ia64/sn/pci/tioca_provider.c2
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--include/asm-ia64/sn/addrs.h8
-rw-r--r--include/asm-ia64/sn/arch.h17
-rw-r--r--include/asm-ia64/sn/fetchop.h85
-rw-r--r--include/asm-ia64/sn/l1.h3
-rw-r--r--include/asm-ia64/sn/nodepda.h15
-rw-r--r--include/asm-ia64/sn/pda.h9
-rw-r--r--include/asm-ia64/sn/shub_mmr.h24
-rw-r--r--include/asm-ia64/sn/shubio.h3116
-rw-r--r--include/asm-ia64/sn/sn_cpuid.h25
-rw-r--r--include/asm-ia64/sn/sn_fru.h44
-rw-r--r--include/asm-ia64/sn/sn_sal.h65
-rw-r--r--include/asm-ia64/sn/sndrv.h47
-rw-r--r--include/asm-ia64/sn/xp.h436
-rw-r--r--kernel/exit.c2
-rw-r--r--mm/page_alloc.c2
44 files changed, 8697 insertions, 2040 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 468dbe8a6b9c..ce13ad689d19 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -217,6 +217,16 @@ config IA64_SGI_SN_SIM
217 If you are compiling a kernel that will run under SGI's IA-64 217 If you are compiling a kernel that will run under SGI's IA-64
218 simulator (Medusa) then say Y, otherwise say N. 218 simulator (Medusa) then say Y, otherwise say N.
219 219
220config IA64_SGI_SN_XP
221 tristate "Support communication between SGI SSIs"
222 depends on MSPEC
223 help
224 An SGI machine can be divided into multiple Single System
225 Images which act independently of each other and have
226 hardware based memory protection from the others. Enabling
227 this feature will allow for direct communication between SSIs
228 based on a network adapter and DMA messaging.
229
220config FORCE_MAX_ZONEORDER 230config FORCE_MAX_ZONEORDER
221 int 231 int
222 default "18" 232 default "18"
@@ -261,6 +271,15 @@ config HOTPLUG_CPU
261 can be controlled through /sys/devices/system/cpu/cpu#. 271 can be controlled through /sys/devices/system/cpu/cpu#.
262 Say N if you want to disable CPU hotplug. 272 Say N if you want to disable CPU hotplug.
263 273
274config SCHED_SMT
275 bool "SMT scheduler support"
276 depends on SMP
277 default off
278 help
279 Improves the CPU scheduler's decision making when dealing with
280 Intel IA64 chips with MultiThreading at a cost of slightly increased
281 overhead in some places. If unsure say N here.
282
264config PREEMPT 283config PREEMPT
265 bool "Preemptible Kernel" 284 bool "Preemptible Kernel"
266 help 285 help
diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig
index 99830e8fc9ba..9086b789f6ac 100644
--- a/arch/ia64/configs/tiger_defconfig
+++ b/arch/ia64/configs/tiger_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.11-rc2 3# Linux kernel version: 2.6.12-rc3
4# Sat Jan 22 11:17:02 2005 4# Tue May 3 15:55:04 2005
5# 5#
6 6
7# 7#
@@ -10,6 +10,7 @@
10CONFIG_EXPERIMENTAL=y 10CONFIG_EXPERIMENTAL=y
11CONFIG_CLEAN_COMPILE=y 11CONFIG_CLEAN_COMPILE=y
12CONFIG_LOCK_KERNEL=y 12CONFIG_LOCK_KERNEL=y
13CONFIG_INIT_ENV_ARG_LIMIT=32
13 14
14# 15#
15# General setup 16# General setup
@@ -21,24 +22,27 @@ CONFIG_POSIX_MQUEUE=y
21# CONFIG_BSD_PROCESS_ACCT is not set 22# CONFIG_BSD_PROCESS_ACCT is not set
22CONFIG_SYSCTL=y 23CONFIG_SYSCTL=y
23# CONFIG_AUDIT is not set 24# CONFIG_AUDIT is not set
24CONFIG_LOG_BUF_SHIFT=20
25CONFIG_HOTPLUG=y 25CONFIG_HOTPLUG=y
26CONFIG_KOBJECT_UEVENT=y 26CONFIG_KOBJECT_UEVENT=y
27CONFIG_IKCONFIG=y 27CONFIG_IKCONFIG=y
28CONFIG_IKCONFIG_PROC=y 28CONFIG_IKCONFIG_PROC=y
29# CONFIG_CPUSETS is not set
29# CONFIG_EMBEDDED is not set 30# CONFIG_EMBEDDED is not set
30CONFIG_KALLSYMS=y 31CONFIG_KALLSYMS=y
31CONFIG_KALLSYMS_ALL=y 32CONFIG_KALLSYMS_ALL=y
32# CONFIG_KALLSYMS_EXTRA_PASS is not set 33# CONFIG_KALLSYMS_EXTRA_PASS is not set
34CONFIG_PRINTK=y
35CONFIG_BUG=y
36CONFIG_BASE_FULL=y
33CONFIG_FUTEX=y 37CONFIG_FUTEX=y
34CONFIG_EPOLL=y 38CONFIG_EPOLL=y
35# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
36CONFIG_SHMEM=y 39CONFIG_SHMEM=y
37CONFIG_CC_ALIGN_FUNCTIONS=0 40CONFIG_CC_ALIGN_FUNCTIONS=0
38CONFIG_CC_ALIGN_LABELS=0 41CONFIG_CC_ALIGN_LABELS=0
39CONFIG_CC_ALIGN_LOOPS=0 42CONFIG_CC_ALIGN_LOOPS=0
40CONFIG_CC_ALIGN_JUMPS=0 43CONFIG_CC_ALIGN_JUMPS=0
41# CONFIG_TINY_SHMEM is not set 44# CONFIG_TINY_SHMEM is not set
45CONFIG_BASE_SMALL=0
42 46
43# 47#
44# Loadable module support 48# Loadable module support
@@ -85,6 +89,7 @@ CONFIG_FORCE_MAX_ZONEORDER=18
85CONFIG_SMP=y 89CONFIG_SMP=y
86CONFIG_NR_CPUS=4 90CONFIG_NR_CPUS=4
87CONFIG_HOTPLUG_CPU=y 91CONFIG_HOTPLUG_CPU=y
92# CONFIG_SCHED_SMT is not set
88# CONFIG_PREEMPT is not set 93# CONFIG_PREEMPT is not set
89CONFIG_HAVE_DEC_LOCK=y 94CONFIG_HAVE_DEC_LOCK=y
90CONFIG_IA32_SUPPORT=y 95CONFIG_IA32_SUPPORT=y
@@ -135,6 +140,7 @@ CONFIG_PCI_DOMAINS=y
135# CONFIG_PCI_MSI is not set 140# CONFIG_PCI_MSI is not set
136CONFIG_PCI_LEGACY_PROC=y 141CONFIG_PCI_LEGACY_PROC=y
137CONFIG_PCI_NAMES=y 142CONFIG_PCI_NAMES=y
143# CONFIG_PCI_DEBUG is not set
138 144
139# 145#
140# PCI Hotplug Support 146# PCI Hotplug Support
@@ -152,10 +158,6 @@ CONFIG_HOTPLUG_PCI_ACPI=m
152# CONFIG_PCCARD is not set 158# CONFIG_PCCARD is not set
153 159
154# 160#
155# PC-card bridges
156#
157
158#
159# Device Drivers 161# Device Drivers
160# 162#
161 163
@@ -195,9 +197,10 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m
195CONFIG_BLK_DEV_NBD=m 197CONFIG_BLK_DEV_NBD=m
196# CONFIG_BLK_DEV_SX8 is not set 198# CONFIG_BLK_DEV_SX8 is not set
197# CONFIG_BLK_DEV_UB is not set 199# CONFIG_BLK_DEV_UB is not set
198CONFIG_BLK_DEV_RAM=m 200CONFIG_BLK_DEV_RAM=y
199CONFIG_BLK_DEV_RAM_COUNT=16 201CONFIG_BLK_DEV_RAM_COUNT=16
200CONFIG_BLK_DEV_RAM_SIZE=4096 202CONFIG_BLK_DEV_RAM_SIZE=4096
203CONFIG_BLK_DEV_INITRD=y
201CONFIG_INITRAMFS_SOURCE="" 204CONFIG_INITRAMFS_SOURCE=""
202# CONFIG_CDROM_PKTCDVD is not set 205# CONFIG_CDROM_PKTCDVD is not set
203 206
@@ -313,7 +316,6 @@ CONFIG_SCSI_FC_ATTRS=y
313# CONFIG_SCSI_BUSLOGIC is not set 316# CONFIG_SCSI_BUSLOGIC is not set
314# CONFIG_SCSI_DMX3191D is not set 317# CONFIG_SCSI_DMX3191D is not set
315# CONFIG_SCSI_EATA is not set 318# CONFIG_SCSI_EATA is not set
316# CONFIG_SCSI_EATA_PIO is not set
317# CONFIG_SCSI_FUTURE_DOMAIN is not set 319# CONFIG_SCSI_FUTURE_DOMAIN is not set
318# CONFIG_SCSI_GDTH is not set 320# CONFIG_SCSI_GDTH is not set
319# CONFIG_SCSI_IPS is not set 321# CONFIG_SCSI_IPS is not set
@@ -325,7 +327,6 @@ CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
325CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64 327CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
326# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set 328# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
327# CONFIG_SCSI_IPR is not set 329# CONFIG_SCSI_IPR is not set
328# CONFIG_SCSI_QLOGIC_ISP is not set
329CONFIG_SCSI_QLOGIC_FC=y 330CONFIG_SCSI_QLOGIC_FC=y
330# CONFIG_SCSI_QLOGIC_FC_FIRMWARE is not set 331# CONFIG_SCSI_QLOGIC_FC_FIRMWARE is not set
331CONFIG_SCSI_QLOGIC_1280=y 332CONFIG_SCSI_QLOGIC_1280=y
@@ -336,6 +337,7 @@ CONFIG_SCSI_QLA22XX=m
336CONFIG_SCSI_QLA2300=m 337CONFIG_SCSI_QLA2300=m
337CONFIG_SCSI_QLA2322=m 338CONFIG_SCSI_QLA2322=m
338# CONFIG_SCSI_QLA6312 is not set 339# CONFIG_SCSI_QLA6312 is not set
340# CONFIG_SCSI_LPFC is not set
339# CONFIG_SCSI_DC395x is not set 341# CONFIG_SCSI_DC395x is not set
340# CONFIG_SCSI_DC390T is not set 342# CONFIG_SCSI_DC390T is not set
341# CONFIG_SCSI_DEBUG is not set 343# CONFIG_SCSI_DEBUG is not set
@@ -358,6 +360,7 @@ CONFIG_DM_CRYPT=m
358CONFIG_DM_SNAPSHOT=m 360CONFIG_DM_SNAPSHOT=m
359CONFIG_DM_MIRROR=m 361CONFIG_DM_MIRROR=m
360CONFIG_DM_ZERO=m 362CONFIG_DM_ZERO=m
363# CONFIG_DM_MULTIPATH is not set
361 364
362# 365#
363# Fusion MPT device support 366# Fusion MPT device support
@@ -386,7 +389,6 @@ CONFIG_NET=y
386# 389#
387CONFIG_PACKET=y 390CONFIG_PACKET=y
388# CONFIG_PACKET_MMAP is not set 391# CONFIG_PACKET_MMAP is not set
389CONFIG_NETLINK_DEV=y
390CONFIG_UNIX=y 392CONFIG_UNIX=y
391# CONFIG_NET_KEY is not set 393# CONFIG_NET_KEY is not set
392CONFIG_INET=y 394CONFIG_INET=y
@@ -446,7 +448,6 @@ CONFIG_DUMMY=m
446# CONFIG_BONDING is not set 448# CONFIG_BONDING is not set
447# CONFIG_EQUALIZER is not set 449# CONFIG_EQUALIZER is not set
448# CONFIG_TUN is not set 450# CONFIG_TUN is not set
449# CONFIG_ETHERTAP is not set
450 451
451# 452#
452# ARCnet devices 453# ARCnet devices
@@ -484,7 +485,6 @@ CONFIG_NET_PCI=y
484# CONFIG_DGRS is not set 485# CONFIG_DGRS is not set
485CONFIG_EEPRO100=m 486CONFIG_EEPRO100=m
486CONFIG_E100=m 487CONFIG_E100=m
487# CONFIG_E100_NAPI is not set
488# CONFIG_FEALNX is not set 488# CONFIG_FEALNX is not set
489# CONFIG_NATSEMI is not set 489# CONFIG_NATSEMI is not set
490# CONFIG_NE2K_PCI is not set 490# CONFIG_NE2K_PCI is not set
@@ -566,25 +566,6 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
566# CONFIG_INPUT_EVBUG is not set 566# CONFIG_INPUT_EVBUG is not set
567 567
568# 568#
569# Input I/O drivers
570#
571CONFIG_GAMEPORT=m
572CONFIG_SOUND_GAMEPORT=m
573# CONFIG_GAMEPORT_NS558 is not set
574# CONFIG_GAMEPORT_L4 is not set
575# CONFIG_GAMEPORT_EMU10K1 is not set
576# CONFIG_GAMEPORT_VORTEX is not set
577# CONFIG_GAMEPORT_FM801 is not set
578# CONFIG_GAMEPORT_CS461X is not set
579CONFIG_SERIO=y
580CONFIG_SERIO_I8042=y
581# CONFIG_SERIO_SERPORT is not set
582# CONFIG_SERIO_CT82C710 is not set
583# CONFIG_SERIO_PCIPS2 is not set
584CONFIG_SERIO_LIBPS2=y
585# CONFIG_SERIO_RAW is not set
586
587#
588# Input Device Drivers 569# Input Device Drivers
589# 570#
590CONFIG_INPUT_KEYBOARD=y 571CONFIG_INPUT_KEYBOARD=y
@@ -602,6 +583,24 @@ CONFIG_MOUSE_PS2=y
602# CONFIG_INPUT_MISC is not set 583# CONFIG_INPUT_MISC is not set
603 584
604# 585#
586# Hardware I/O ports
587#
588CONFIG_SERIO=y
589CONFIG_SERIO_I8042=y
590# CONFIG_SERIO_SERPORT is not set
591# CONFIG_SERIO_PCIPS2 is not set
592CONFIG_SERIO_LIBPS2=y
593# CONFIG_SERIO_RAW is not set
594CONFIG_GAMEPORT=m
595# CONFIG_GAMEPORT_NS558 is not set
596# CONFIG_GAMEPORT_L4 is not set
597# CONFIG_GAMEPORT_EMU10K1 is not set
598# CONFIG_GAMEPORT_VORTEX is not set
599# CONFIG_GAMEPORT_FM801 is not set
600# CONFIG_GAMEPORT_CS461X is not set
601CONFIG_SOUND_GAMEPORT=m
602
603#
605# Character devices 604# Character devices
606# 605#
607CONFIG_VT=y 606CONFIG_VT=y
@@ -615,6 +614,8 @@ CONFIG_SERIAL_NONSTANDARD=y
615# CONFIG_SYNCLINK is not set 614# CONFIG_SYNCLINK is not set
616# CONFIG_SYNCLINKMP is not set 615# CONFIG_SYNCLINKMP is not set
617# CONFIG_N_HDLC is not set 616# CONFIG_N_HDLC is not set
617# CONFIG_SPECIALIX is not set
618# CONFIG_SX is not set
618# CONFIG_STALDRV is not set 619# CONFIG_STALDRV is not set
619 620
620# 621#
@@ -635,6 +636,7 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
635# 636#
636CONFIG_SERIAL_CORE=y 637CONFIG_SERIAL_CORE=y
637CONFIG_SERIAL_CORE_CONSOLE=y 638CONFIG_SERIAL_CORE_CONSOLE=y
639# CONFIG_SERIAL_JSM is not set
638CONFIG_UNIX98_PTYS=y 640CONFIG_UNIX98_PTYS=y
639CONFIG_LEGACY_PTYS=y 641CONFIG_LEGACY_PTYS=y
640CONFIG_LEGACY_PTY_COUNT=256 642CONFIG_LEGACY_PTY_COUNT=256
@@ -670,6 +672,12 @@ CONFIG_HPET=y
670# CONFIG_HPET_RTC_IRQ is not set 672# CONFIG_HPET_RTC_IRQ is not set
671CONFIG_HPET_MMAP=y 673CONFIG_HPET_MMAP=y
672CONFIG_MAX_RAW_DEVS=256 674CONFIG_MAX_RAW_DEVS=256
675# CONFIG_HANGCHECK_TIMER is not set
676
677#
678# TPM devices
679#
680# CONFIG_TCG_TPM is not set
673 681
674# 682#
675# I2C support 683# I2C support
@@ -705,7 +713,6 @@ CONFIG_MAX_RAW_DEVS=256
705# 713#
706CONFIG_VGA_CONSOLE=y 714CONFIG_VGA_CONSOLE=y
707CONFIG_DUMMY_CONSOLE=y 715CONFIG_DUMMY_CONSOLE=y
708# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
709 716
710# 717#
711# Sound 718# Sound
@@ -715,6 +722,8 @@ CONFIG_DUMMY_CONSOLE=y
715# 722#
716# USB support 723# USB support
717# 724#
725CONFIG_USB_ARCH_HAS_HCD=y
726CONFIG_USB_ARCH_HAS_OHCI=y
718CONFIG_USB=y 727CONFIG_USB=y
719# CONFIG_USB_DEBUG is not set 728# CONFIG_USB_DEBUG is not set
720 729
@@ -726,8 +735,6 @@ CONFIG_USB_DEVICEFS=y
726# CONFIG_USB_DYNAMIC_MINORS is not set 735# CONFIG_USB_DYNAMIC_MINORS is not set
727# CONFIG_USB_SUSPEND is not set 736# CONFIG_USB_SUSPEND is not set
728# CONFIG_USB_OTG is not set 737# CONFIG_USB_OTG is not set
729CONFIG_USB_ARCH_HAS_HCD=y
730CONFIG_USB_ARCH_HAS_OHCI=y
731 738
732# 739#
733# USB Host Controller Drivers 740# USB Host Controller Drivers
@@ -736,6 +743,8 @@ CONFIG_USB_EHCI_HCD=m
736# CONFIG_USB_EHCI_SPLIT_ISO is not set 743# CONFIG_USB_EHCI_SPLIT_ISO is not set
737# CONFIG_USB_EHCI_ROOT_HUB_TT is not set 744# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
738CONFIG_USB_OHCI_HCD=m 745CONFIG_USB_OHCI_HCD=m
746# CONFIG_USB_OHCI_BIG_ENDIAN is not set
747CONFIG_USB_OHCI_LITTLE_ENDIAN=y
739CONFIG_USB_UHCI_HCD=y 748CONFIG_USB_UHCI_HCD=y
740# CONFIG_USB_SL811_HCD is not set 749# CONFIG_USB_SL811_HCD is not set
741 750
@@ -751,12 +760,11 @@ CONFIG_USB_UHCI_HCD=y
751# 760#
752CONFIG_USB_STORAGE=m 761CONFIG_USB_STORAGE=m
753# CONFIG_USB_STORAGE_DEBUG is not set 762# CONFIG_USB_STORAGE_DEBUG is not set
754# CONFIG_USB_STORAGE_RW_DETECT is not set
755# CONFIG_USB_STORAGE_DATAFAB is not set 763# CONFIG_USB_STORAGE_DATAFAB is not set
756# CONFIG_USB_STORAGE_FREECOM is not set 764# CONFIG_USB_STORAGE_FREECOM is not set
757# CONFIG_USB_STORAGE_ISD200 is not set 765# CONFIG_USB_STORAGE_ISD200 is not set
758# CONFIG_USB_STORAGE_DPCM is not set 766# CONFIG_USB_STORAGE_DPCM is not set
759# CONFIG_USB_STORAGE_HP8200e is not set 767# CONFIG_USB_STORAGE_USBAT is not set
760# CONFIG_USB_STORAGE_SDDR09 is not set 768# CONFIG_USB_STORAGE_SDDR09 is not set
761# CONFIG_USB_STORAGE_SDDR55 is not set 769# CONFIG_USB_STORAGE_SDDR55 is not set
762# CONFIG_USB_STORAGE_JUMPSHOT is not set 770# CONFIG_USB_STORAGE_JUMPSHOT is not set
@@ -800,6 +808,7 @@ CONFIG_USB_HIDINPUT=y
800# CONFIG_USB_PEGASUS is not set 808# CONFIG_USB_PEGASUS is not set
801# CONFIG_USB_RTL8150 is not set 809# CONFIG_USB_RTL8150 is not set
802# CONFIG_USB_USBNET is not set 810# CONFIG_USB_USBNET is not set
811# CONFIG_USB_MON is not set
803 812
804# 813#
805# USB port drivers 814# USB port drivers
@@ -824,6 +833,7 @@ CONFIG_USB_HIDINPUT=y
824# CONFIG_USB_PHIDGETKIT is not set 833# CONFIG_USB_PHIDGETKIT is not set
825# CONFIG_USB_PHIDGETSERVO is not set 834# CONFIG_USB_PHIDGETSERVO is not set
826# CONFIG_USB_IDMOUSE is not set 835# CONFIG_USB_IDMOUSE is not set
836# CONFIG_USB_SISUSBVGA is not set
827# CONFIG_USB_TEST is not set 837# CONFIG_USB_TEST is not set
828 838
829# 839#
@@ -867,7 +877,12 @@ CONFIG_REISERFS_FS_POSIX_ACL=y
867CONFIG_REISERFS_FS_SECURITY=y 877CONFIG_REISERFS_FS_SECURITY=y
868# CONFIG_JFS_FS is not set 878# CONFIG_JFS_FS is not set
869CONFIG_FS_POSIX_ACL=y 879CONFIG_FS_POSIX_ACL=y
880
881#
882# XFS support
883#
870CONFIG_XFS_FS=y 884CONFIG_XFS_FS=y
885CONFIG_XFS_EXPORT=y
871# CONFIG_XFS_RT is not set 886# CONFIG_XFS_RT is not set
872# CONFIG_XFS_QUOTA is not set 887# CONFIG_XFS_QUOTA is not set
873# CONFIG_XFS_SECURITY is not set 888# CONFIG_XFS_SECURITY is not set
@@ -945,7 +960,7 @@ CONFIG_NFSD_V4=y
945CONFIG_NFSD_TCP=y 960CONFIG_NFSD_TCP=y
946CONFIG_LOCKD=m 961CONFIG_LOCKD=m
947CONFIG_LOCKD_V4=y 962CONFIG_LOCKD_V4=y
948CONFIG_EXPORTFS=m 963CONFIG_EXPORTFS=y
949CONFIG_SUNRPC=m 964CONFIG_SUNRPC=m
950CONFIG_SUNRPC_GSS=m 965CONFIG_SUNRPC_GSS=m
951CONFIG_RPCSEC_GSS_KRB5=m 966CONFIG_RPCSEC_GSS_KRB5=m
@@ -1042,8 +1057,10 @@ CONFIG_GENERIC_IRQ_PROBE=y
1042# 1057#
1043# Kernel hacking 1058# Kernel hacking
1044# 1059#
1060# CONFIG_PRINTK_TIME is not set
1045CONFIG_DEBUG_KERNEL=y 1061CONFIG_DEBUG_KERNEL=y
1046CONFIG_MAGIC_SYSRQ=y 1062CONFIG_MAGIC_SYSRQ=y
1063CONFIG_LOG_BUF_SHIFT=20
1047# CONFIG_SCHEDSTATS is not set 1064# CONFIG_SCHEDSTATS is not set
1048# CONFIG_DEBUG_SLAB is not set 1065# CONFIG_DEBUG_SLAB is not set
1049# CONFIG_DEBUG_SPINLOCK is not set 1066# CONFIG_DEBUG_SPINLOCK is not set
@@ -1077,6 +1094,7 @@ CONFIG_CRYPTO_MD5=m
1077# CONFIG_CRYPTO_SHA256 is not set 1094# CONFIG_CRYPTO_SHA256 is not set
1078# CONFIG_CRYPTO_SHA512 is not set 1095# CONFIG_CRYPTO_SHA512 is not set
1079# CONFIG_CRYPTO_WP512 is not set 1096# CONFIG_CRYPTO_WP512 is not set
1097# CONFIG_CRYPTO_TGR192 is not set
1080CONFIG_CRYPTO_DES=m 1098CONFIG_CRYPTO_DES=m
1081# CONFIG_CRYPTO_BLOWFISH is not set 1099# CONFIG_CRYPTO_BLOWFISH is not set
1082# CONFIG_CRYPTO_TWOFISH is not set 1100# CONFIG_CRYPTO_TWOFISH is not set
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 6a8fcba7a853..b8db6e3e5e81 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -1944,43 +1944,17 @@ sba_connect_bus(struct pci_bus *bus)
1944static void __init 1944static void __init
1945sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle) 1945sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
1946{ 1946{
1947 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
1948 union acpi_object *obj;
1949 acpi_handle phandle;
1950 unsigned int node; 1947 unsigned int node;
1948 int pxm;
1951 1949
1952 ioc->node = MAX_NUMNODES; 1950 ioc->node = MAX_NUMNODES;
1953 1951
1954 /* 1952 pxm = acpi_get_pxm(handle);
1955 * Check for a _PXM on this node first. We don't typically see
1956 * one here, so we'll end up getting it from the parent.
1957 */
1958 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PXM", NULL, &buffer))) {
1959 if (ACPI_FAILURE(acpi_get_parent(handle, &phandle)))
1960 return;
1961
1962 /* Reset the acpi buffer */
1963 buffer.length = ACPI_ALLOCATE_BUFFER;
1964 buffer.pointer = NULL;
1965
1966 if (ACPI_FAILURE(acpi_evaluate_object(phandle, "_PXM", NULL,
1967 &buffer)))
1968 return;
1969 }
1970 1953
1971 if (!buffer.length || !buffer.pointer) 1954 if (pxm < 0)
1972 return; 1955 return;
1973 1956
1974 obj = buffer.pointer; 1957 node = pxm_to_nid_map[pxm];
1975
1976 if (obj->type != ACPI_TYPE_INTEGER ||
1977 obj->integer.value >= MAX_PXM_DOMAINS) {
1978 acpi_os_free(buffer.pointer);
1979 return;
1980 }
1981
1982 node = pxm_to_nid_map[obj->integer.value];
1983 acpi_os_free(buffer.pointer);
1984 1958
1985 if (node >= MAX_NUMNODES || !node_online(node)) 1959 if (node >= MAX_NUMNODES || !node_online(node))
1986 return; 1960 return;
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index a8e99c56a768..72dfd9e7de0f 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -779,7 +779,7 @@ acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret)
779 union acpi_object *obj; 779 union acpi_object *obj;
780 struct acpi_table_iosapic *iosapic; 780 struct acpi_table_iosapic *iosapic;
781 unsigned int gsi_base; 781 unsigned int gsi_base;
782 int node; 782 int pxm, node;
783 783
784 /* Only care about objects w/ a method that returns the MADT */ 784 /* Only care about objects w/ a method that returns the MADT */
785 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) 785 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
@@ -805,29 +805,16 @@ acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret)
805 gsi_base = iosapic->global_irq_base; 805 gsi_base = iosapic->global_irq_base;
806 806
807 acpi_os_free(buffer.pointer); 807 acpi_os_free(buffer.pointer);
808 buffer.length = ACPI_ALLOCATE_BUFFER;
809 buffer.pointer = NULL;
810 808
811 /* 809 /*
812 * OK, it's an IOSAPIC MADT entry, look for a _PXM method to tell 810 * OK, it's an IOSAPIC MADT entry, look for a _PXM value to tell
813 * us which node to associate this with. 811 * us which node to associate this with.
814 */ 812 */
815 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PXM", NULL, &buffer))) 813 pxm = acpi_get_pxm(handle);
816 return AE_OK; 814 if (pxm < 0)
817
818 if (!buffer.length || !buffer.pointer)
819 return AE_OK;
820
821 obj = buffer.pointer;
822
823 if (obj->type != ACPI_TYPE_INTEGER ||
824 obj->integer.value >= MAX_PXM_DOMAINS) {
825 acpi_os_free(buffer.pointer);
826 return AE_OK; 815 return AE_OK;
827 }
828 816
829 node = pxm_to_nid_map[obj->integer.value]; 817 node = pxm_to_nid_map[pxm];
830 acpi_os_free(buffer.pointer);
831 818
832 if (node >= MAX_NUMNODES || !node_online(node) || 819 if (node >= MAX_NUMNODES || !node_online(node) ||
833 cpus_empty(node_to_cpumask(node))) 820 cpus_empty(node_to_cpumask(node)))
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index d3f093820bc7..81c45d447394 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -782,7 +782,7 @@ GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
782 st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit 782 st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
783 .mem.offset 8,0 783 .mem.offset 8,0
784 st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit 784 st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
785END(ia64_ret_from_ia32_execve_syscall) 785END(ia64_ret_from_ia32_execve)
786 // fall through 786 // fall through
787#endif /* CONFIG_IA32_SUPPORT */ 787#endif /* CONFIG_IA32_SUPPORT */
788GLOBAL_ENTRY(ia64_leave_kernel) 788GLOBAL_ENTRY(ia64_leave_kernel)
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index 0d8650f7fce7..4f3cdef75797 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -611,8 +611,10 @@ GLOBAL_ENTRY(fsys_bubble_down)
611 movl r2=ia64_ret_from_syscall 611 movl r2=ia64_ret_from_syscall
612 ;; 612 ;;
613 mov rp=r2 // set the real return addr 613 mov rp=r2 // set the real return addr
614 tbit.z p8,p0=r3,TIF_SYSCALL_TRACE 614 and r3=_TIF_SYSCALL_TRACEAUDIT,r3
615 ;; 615 ;;
616 cmp.eq p8,p0=r3,r0
617
616(p10) br.cond.spnt.many ia64_ret_from_syscall // p10==true means out registers are more than 8 618(p10) br.cond.spnt.many ia64_ret_from_syscall // p10==true means out registers are more than 8
617(p8) br.call.sptk.many b6=b6 // ignore this return addr 619(p8) br.call.sptk.many b6=b6 // ignore this return addr
618 br.cond.sptk ia64_trace_syscall 620 br.cond.sptk ia64_trace_syscall
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index ab478172c349..abc0113a821d 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -132,8 +132,7 @@ mca_handler_bh(unsigned long paddr)
132 spin_unlock(&mca_bh_lock); 132 spin_unlock(&mca_bh_lock);
133 133
134 /* This process is about to be killed itself */ 134 /* This process is about to be killed itself */
135 force_sig(SIGKILL, current); 135 do_exit(SIGKILL);
136 schedule();
137} 136}
138 137
139/** 138/**
@@ -439,6 +438,7 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec
439 psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr; 438 psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr;
440 psr2->cpl = 0; 439 psr2->cpl = 0;
441 psr2->ri = 0; 440 psr2->ri = 0;
441 psr2->i = 0;
442 442
443 return 1; 443 return 1;
444 } 444 }
diff --git a/arch/ia64/kernel/mca_drv_asm.S b/arch/ia64/kernel/mca_drv_asm.S
index bcfa05acc561..2d7e0217638d 100644
--- a/arch/ia64/kernel/mca_drv_asm.S
+++ b/arch/ia64/kernel/mca_drv_asm.S
@@ -10,6 +10,7 @@
10 10
11#include <asm/asmmacro.h> 11#include <asm/asmmacro.h>
12#include <asm/processor.h> 12#include <asm/processor.h>
13#include <asm/ptrace.h>
13 14
14GLOBAL_ENTRY(mca_handler_bhhook) 15GLOBAL_ENTRY(mca_handler_bhhook)
15 invala // clear RSE ? 16 invala // clear RSE ?
@@ -20,12 +21,21 @@ GLOBAL_ENTRY(mca_handler_bhhook)
20 ;; 21 ;;
21 alloc r16=ar.pfs,0,2,1,0 // make a new frame 22 alloc r16=ar.pfs,0,2,1,0 // make a new frame
22 ;; 23 ;;
24 mov ar.rsc=0
25 ;;
23 mov r13=IA64_KR(CURRENT) // current task pointer 26 mov r13=IA64_KR(CURRENT) // current task pointer
24 ;; 27 ;;
25 adds r12=IA64_TASK_THREAD_KSP_OFFSET,r13 28 mov r2=r13
29 ;;
30 addl r22=IA64_RBS_OFFSET,r2
31 ;;
32 mov ar.bspstore=r22
26 ;; 33 ;;
27 ld8 r12=[r12] // stack pointer 34 addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2
28 ;; 35 ;;
36 adds r2=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
37 ;;
38 st1 [r2]=r0 // clear current->thread.on_ustack flag
29 mov loc0=r16 39 mov loc0=r16
30 movl loc1=mca_handler_bh // recovery C function 40 movl loc1=mca_handler_bh // recovery C function
31 ;; 41 ;;
@@ -34,7 +44,9 @@ GLOBAL_ENTRY(mca_handler_bhhook)
34 ;; 44 ;;
35 mov loc1=rp 45 mov loc1=rp
36 ;; 46 ;;
37 br.call.sptk.many rp=b6 // not return ... 47 ssm psr.i
48 ;;
49 br.call.sptk.many rp=b6 // does not return ...
38 ;; 50 ;;
39 mov ar.pfs=loc0 51 mov ar.pfs=loc0
40 mov rp=loc1 52 mov rp=loc1
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 376fcbc3f8da..71c101601e3e 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -1265,6 +1265,8 @@ out:
1265} 1265}
1266EXPORT_SYMBOL(pfm_unregister_buffer_fmt); 1266EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
1267 1267
1268extern void update_pal_halt_status(int);
1269
1268static int 1270static int
1269pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) 1271pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
1270{ 1272{
@@ -1311,6 +1313,11 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
1311 is_syswide, 1313 is_syswide,
1312 cpu)); 1314 cpu));
1313 1315
1316 /*
1317 * disable default_idle() to go to PAL_HALT
1318 */
1319 update_pal_halt_status(0);
1320
1314 UNLOCK_PFS(flags); 1321 UNLOCK_PFS(flags);
1315 1322
1316 return 0; 1323 return 0;
@@ -1366,6 +1373,12 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
1366 is_syswide, 1373 is_syswide,
1367 cpu)); 1374 cpu));
1368 1375
1376 /*
1377 * if possible, enable default_idle() to go into PAL_HALT
1378 */
1379 if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0)
1380 update_pal_halt_status(1);
1381
1369 UNLOCK_PFS(flags); 1382 UNLOCK_PFS(flags);
1370 1383
1371 return 0; 1384 return 0;
@@ -4202,7 +4215,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4202 DPRINT(("cannot load to [%d], invalid ctx_state=%d\n", 4215 DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
4203 req->load_pid, 4216 req->load_pid,
4204 ctx->ctx_state)); 4217 ctx->ctx_state));
4205 return -EINVAL; 4218 return -EBUSY;
4206 } 4219 }
4207 4220
4208 DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg)); 4221 DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
@@ -4704,16 +4717,26 @@ recheck:
4704 if (task == current || ctx->ctx_fl_system) return 0; 4717 if (task == current || ctx->ctx_fl_system) return 0;
4705 4718
4706 /* 4719 /*
4707 * if context is UNLOADED we are safe to go 4720 * we are monitoring another thread
4708 */
4709 if (state == PFM_CTX_UNLOADED) return 0;
4710
4711 /*
4712 * no command can operate on a zombie context
4713 */ 4721 */
4714 if (state == PFM_CTX_ZOMBIE) { 4722 switch(state) {
4715 DPRINT(("cmd %d state zombie cannot operate on context\n", cmd)); 4723 case PFM_CTX_UNLOADED:
4716 return -EINVAL; 4724 /*
4725 * if context is UNLOADED we are safe to go
4726 */
4727 return 0;
4728 case PFM_CTX_ZOMBIE:
4729 /*
4730 * no command can operate on a zombie context
4731 */
4732 DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
4733 return -EINVAL;
4734 case PFM_CTX_MASKED:
4735 /*
4736 * PMU state has been saved to software even though
4737 * the thread may still be running.
4738 */
4739 if (cmd != PFM_UNLOAD_CONTEXT) return 0;
4717 } 4740 }
4718 4741
4719 /* 4742 /*
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 7c43aea5f7f7..ebb71f3d6d19 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -50,7 +50,7 @@
50#include "sigframe.h" 50#include "sigframe.h"
51 51
52void (*ia64_mark_idle)(int); 52void (*ia64_mark_idle)(int);
53static cpumask_t cpu_idle_map; 53static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
54 54
55unsigned long boot_option_idle_override = 0; 55unsigned long boot_option_idle_override = 0;
56EXPORT_SYMBOL(boot_option_idle_override); 56EXPORT_SYMBOL(boot_option_idle_override);
@@ -173,7 +173,9 @@ do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall
173 ia64_do_signal(oldset, scr, in_syscall); 173 ia64_do_signal(oldset, scr, in_syscall);
174} 174}
175 175
176static int pal_halt = 1; 176static int pal_halt = 1;
177static int can_do_pal_halt = 1;
178
177static int __init nohalt_setup(char * str) 179static int __init nohalt_setup(char * str)
178{ 180{
179 pal_halt = 0; 181 pal_halt = 0;
@@ -181,16 +183,20 @@ static int __init nohalt_setup(char * str)
181} 183}
182__setup("nohalt", nohalt_setup); 184__setup("nohalt", nohalt_setup);
183 185
186void
187update_pal_halt_status(int status)
188{
189 can_do_pal_halt = pal_halt && status;
190}
191
184/* 192/*
185 * We use this if we don't have any better idle routine.. 193 * We use this if we don't have any better idle routine..
186 */ 194 */
187void 195void
188default_idle (void) 196default_idle (void)
189{ 197{
190 unsigned long pmu_active = ia64_getreg(_IA64_REG_PSR) & (IA64_PSR_PP | IA64_PSR_UP);
191
192 while (!need_resched()) 198 while (!need_resched())
193 if (pal_halt && !pmu_active) 199 if (can_do_pal_halt)
194 safe_halt(); 200 safe_halt();
195 else 201 else
196 cpu_relax(); 202 cpu_relax();
@@ -223,20 +229,31 @@ static inline void play_dead(void)
223} 229}
224#endif /* CONFIG_HOTPLUG_CPU */ 230#endif /* CONFIG_HOTPLUG_CPU */
225 231
226
227void cpu_idle_wait(void) 232void cpu_idle_wait(void)
228{ 233{
229 int cpu; 234 unsigned int cpu, this_cpu = get_cpu();
230 cpumask_t map; 235 cpumask_t map;
231 236
232 for_each_online_cpu(cpu) 237 set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
233 cpu_set(cpu, cpu_idle_map); 238 put_cpu();
234 239
235 wmb(); 240 cpus_clear(map);
236 do { 241 for_each_online_cpu(cpu) {
237 ssleep(1); 242 per_cpu(cpu_idle_state, cpu) = 1;
238 cpus_and(map, cpu_idle_map, cpu_online_map); 243 cpu_set(cpu, map);
239 } while (!cpus_empty(map)); 244 }
245
246 __get_cpu_var(cpu_idle_state) = 0;
247
248 wmb();
249 do {
250 ssleep(1);
251 for_each_online_cpu(cpu) {
252 if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
253 cpu_clear(cpu, map);
254 }
255 cpus_and(map, map, cpu_online_map);
256 } while (!cpus_empty(map));
240} 257}
241EXPORT_SYMBOL_GPL(cpu_idle_wait); 258EXPORT_SYMBOL_GPL(cpu_idle_wait);
242 259
@@ -244,7 +261,6 @@ void __attribute__((noreturn))
244cpu_idle (void) 261cpu_idle (void)
245{ 262{
246 void (*mark_idle)(int) = ia64_mark_idle; 263 void (*mark_idle)(int) = ia64_mark_idle;
247 int cpu = smp_processor_id();
248 264
249 /* endless idle loop with no priority at all */ 265 /* endless idle loop with no priority at all */
250 while (1) { 266 while (1) {
@@ -255,12 +271,13 @@ cpu_idle (void)
255 while (!need_resched()) { 271 while (!need_resched()) {
256 void (*idle)(void); 272 void (*idle)(void);
257 273
274 if (__get_cpu_var(cpu_idle_state))
275 __get_cpu_var(cpu_idle_state) = 0;
276
277 rmb();
258 if (mark_idle) 278 if (mark_idle)
259 (*mark_idle)(1); 279 (*mark_idle)(1);
260 280
261 if (cpu_isset(cpu, cpu_idle_map))
262 cpu_clear(cpu, cpu_idle_map);
263 rmb();
264 idle = pm_idle; 281 idle = pm_idle;
265 if (!idle) 282 if (!idle)
266 idle = default_idle; 283 idle = default_idle;
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index 6891d86937d9..499b7e5317cf 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -224,7 +224,8 @@ ia64_rt_sigreturn (struct sigscratch *scr)
224 * could be corrupted. 224 * could be corrupted.
225 */ 225 */
226 retval = (long) &ia64_leave_kernel; 226 retval = (long) &ia64_leave_kernel;
227 if (test_thread_flag(TIF_SYSCALL_TRACE)) 227 if (test_thread_flag(TIF_SYSCALL_TRACE)
228 || test_thread_flag(TIF_SYSCALL_AUDIT))
228 /* 229 /*
229 * strace expects to be notified after sigreturn returns even though the 230 * strace expects to be notified after sigreturn returns even though the
230 * context to which we return may not be in the middle of a syscall. 231 * context to which we return may not be in the middle of a syscall.
diff --git a/arch/ia64/lib/flush.S b/arch/ia64/lib/flush.S
index 29c802b19669..a1af9146cfdb 100644
--- a/arch/ia64/lib/flush.S
+++ b/arch/ia64/lib/flush.S
@@ -1,8 +1,8 @@
1/* 1/*
2 * Cache flushing routines. 2 * Cache flushing routines.
3 * 3 *
4 * Copyright (C) 1999-2001 Hewlett-Packard Co 4 * Copyright (C) 1999-2001, 2005 Hewlett-Packard Co
5 * Copyright (C) 1999-2001 David Mosberger-Tang <davidm@hpl.hp.com> 5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 */ 6 */
7#include <asm/asmmacro.h> 7#include <asm/asmmacro.h>
8#include <asm/page.h> 8#include <asm/page.h>
@@ -26,7 +26,7 @@ GLOBAL_ENTRY(flush_icache_range)
26 26
27 mov ar.lc=r8 27 mov ar.lc=r8
28 ;; 28 ;;
29.Loop: fc in0 // issuable on M0 only 29.Loop: fc.i in0 // issuable on M2 only
30 add in0=32,in0 30 add in0=32,in0
31 br.cloop.sptk.few .Loop 31 br.cloop.sptk.few .Loop
32 ;; 32 ;;
diff --git a/arch/ia64/lib/memcpy_mck.S b/arch/ia64/lib/memcpy_mck.S
index 3c2cd2f04db9..6f308e62c137 100644
--- a/arch/ia64/lib/memcpy_mck.S
+++ b/arch/ia64/lib/memcpy_mck.S
@@ -75,6 +75,7 @@ GLOBAL_ENTRY(memcpy)
75 mov f6=f0 75 mov f6=f0
76 br.cond.sptk .common_code 76 br.cond.sptk .common_code
77 ;; 77 ;;
78END(memcpy)
78GLOBAL_ENTRY(__copy_user) 79GLOBAL_ENTRY(__copy_user)
79 .prologue 80 .prologue
80// check dest alignment 81// check dest alignment
@@ -524,7 +525,6 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \
524#undef B 525#undef B
525#undef C 526#undef C
526#undef D 527#undef D
527END(memcpy)
528 528
529/* 529/*
530 * Due to lack of local tag support in gcc 2.x assembler, it is not clear which 530 * Due to lack of local tag support in gcc 2.x assembler, it is not clear which
diff --git a/arch/ia64/lib/memset.S b/arch/ia64/lib/memset.S
index bd8cf907fe22..f26c16aefb1c 100644
--- a/arch/ia64/lib/memset.S
+++ b/arch/ia64/lib/memset.S
@@ -57,10 +57,10 @@ GLOBAL_ENTRY(memset)
57{ .mmi 57{ .mmi
58 .prologue 58 .prologue
59 alloc tmp = ar.pfs, 3, 0, 0, 0 59 alloc tmp = ar.pfs, 3, 0, 0, 0
60 .body
61 lfetch.nt1 [dest] // 60 lfetch.nt1 [dest] //
62 .save ar.lc, save_lc 61 .save ar.lc, save_lc
63 mov.i save_lc = ar.lc 62 mov.i save_lc = ar.lc
63 .body
64} { .mmi 64} { .mmi
65 mov ret0 = dest // return value 65 mov ret0 = dest // return value
66 cmp.ne p_nz, p_zr = value, r0 // use stf.spill if value is zero 66 cmp.ne p_nz, p_zr = value, r0 // use stf.spill if value is zero
diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile
index 4f381fb25049..4351c4ff9845 100644
--- a/arch/ia64/sn/kernel/Makefile
+++ b/arch/ia64/sn/kernel/Makefile
@@ -4,10 +4,15 @@
4# License. See the file "COPYING" in the main directory of this archive 4# License. See the file "COPYING" in the main directory of this archive
5# for more details. 5# for more details.
6# 6#
7# Copyright (C) 1999,2001-2003 Silicon Graphics, Inc. All Rights Reserved. 7# Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All Rights Reserved.
8# 8#
9 9
10obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \ 10obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \
11 huberror.o io_init.o iomv.o klconflib.o sn2/ 11 huberror.o io_init.o iomv.o klconflib.o sn2/
12obj-$(CONFIG_IA64_GENERIC) += machvec.o 12obj-$(CONFIG_IA64_GENERIC) += machvec.o
13obj-$(CONFIG_SGI_TIOCX) += tiocx.o 13obj-$(CONFIG_SGI_TIOCX) += tiocx.o
14obj-$(CONFIG_IA64_SGI_SN_XP) += xp.o
15xp-y := xp_main.o xp_nofault.o
16obj-$(CONFIG_IA64_SGI_SN_XP) += xpc.o
17xpc-y := xpc_main.o xpc_channel.o xpc_partition.o
18obj-$(CONFIG_IA64_SGI_SN_XP) += xpnet.o
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index 18160a06a8c9..9e07f5463f21 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -174,6 +174,12 @@ static void sn_fixup_ionodes(void)
174 if (status) 174 if (status)
175 continue; 175 continue;
176 176
177 /* Attach the error interrupt handlers */
178 if (nasid & 1)
179 ice_error_init(hubdev);
180 else
181 hub_error_init(hubdev);
182
177 for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) 183 for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++)
178 hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev; 184 hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev;
179 185
@@ -211,10 +217,6 @@ static void sn_fixup_ionodes(void)
211 sn_flush_device_list; 217 sn_flush_device_list;
212 } 218 }
213 219
214 if (!(i & 1))
215 hub_error_init(hubdev);
216 else
217 ice_error_init(hubdev);
218 } 220 }
219 221
220} 222}
diff --git a/arch/ia64/sn/kernel/mca.c b/arch/ia64/sn/kernel/mca.c
index 857774bb2c9a..6546db6abdba 100644
--- a/arch/ia64/sn/kernel/mca.c
+++ b/arch/ia64/sn/kernel/mca.c
@@ -37,6 +37,11 @@ static u64 *sn_oemdata_size, sn_oemdata_bufsize;
37 * This function is the callback routine that SAL calls to log error 37 * This function is the callback routine that SAL calls to log error
38 * info for platform errors. buf is appended to sn_oemdata, resizing as 38 * info for platform errors. buf is appended to sn_oemdata, resizing as
39 * required. 39 * required.
40 * Note: this is a SAL to OS callback, running under the same rules as the SAL
41 * code. SAL calls are run with preempt disabled so this routine must not
42 * sleep. vmalloc can sleep so print_hook cannot resize the output buffer
43 * itself, instead it must set the required size and return to let the caller
44 * resize the buffer then redrive the SAL call.
40 */ 45 */
41static int print_hook(const char *fmt, ...) 46static int print_hook(const char *fmt, ...)
42{ 47{
@@ -47,18 +52,8 @@ static int print_hook(const char *fmt, ...)
47 vsnprintf(buf, sizeof(buf), fmt, args); 52 vsnprintf(buf, sizeof(buf), fmt, args);
48 va_end(args); 53 va_end(args);
49 len = strlen(buf); 54 len = strlen(buf);
50 while (*sn_oemdata_size + len + 1 > sn_oemdata_bufsize) { 55 if (*sn_oemdata_size + len <= sn_oemdata_bufsize)
51 u8 *newbuf = vmalloc(sn_oemdata_bufsize += 1000); 56 memcpy(*sn_oemdata + *sn_oemdata_size, buf, len);
52 if (!newbuf) {
53 printk(KERN_ERR "%s: unable to extend sn_oemdata\n",
54 __FUNCTION__);
55 return 0;
56 }
57 memcpy(newbuf, *sn_oemdata, *sn_oemdata_size);
58 vfree(*sn_oemdata);
59 *sn_oemdata = newbuf;
60 }
61 memcpy(*sn_oemdata + *sn_oemdata_size, buf, len + 1);
62 *sn_oemdata_size += len; 57 *sn_oemdata_size += len;
63 return 0; 58 return 0;
64} 59}
@@ -98,7 +93,20 @@ sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata,
98 sn_oemdata = oemdata; 93 sn_oemdata = oemdata;
99 sn_oemdata_size = oemdata_size; 94 sn_oemdata_size = oemdata_size;
100 sn_oemdata_bufsize = 0; 95 sn_oemdata_bufsize = 0;
101 ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header); 96 *sn_oemdata_size = PAGE_SIZE; /* first guess at how much data will be generated */
97 while (*sn_oemdata_size > sn_oemdata_bufsize) {
98 u8 *newbuf = vmalloc(*sn_oemdata_size);
99 if (!newbuf) {
100 printk(KERN_ERR "%s: unable to extend sn_oemdata\n",
101 __FUNCTION__);
102 return 1;
103 }
104 vfree(*sn_oemdata);
105 *sn_oemdata = newbuf;
106 sn_oemdata_bufsize = *sn_oemdata_size;
107 *sn_oemdata_size = 0;
108 ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header);
109 }
102 up(&sn_oemdata_mutex); 110 up(&sn_oemdata_mutex);
103 return 0; 111 return 0;
104} 112}
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index d35f2a6f9c94..4fb44984afe6 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1999,2001-2004 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8 8
9#include <linux/config.h> 9#include <linux/config.h>
@@ -73,6 +73,12 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
73DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); 73DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
74EXPORT_PER_CPU_SYMBOL(__sn_hub_info); 74EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
75 75
76DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_NUMNODES]);
77EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid);
78
79DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);
80EXPORT_PER_CPU_SYMBOL(__sn_nodepda);
81
76partid_t sn_partid = -1; 82partid_t sn_partid = -1;
77EXPORT_SYMBOL(sn_partid); 83EXPORT_SYMBOL(sn_partid);
78char sn_system_serial_number_string[128]; 84char sn_system_serial_number_string[128];
@@ -373,11 +379,11 @@ static void __init sn_init_pdas(char **cmdline_p)
373{ 379{
374 cnodeid_t cnode; 380 cnodeid_t cnode;
375 381
376 memset(pda->cnodeid_to_nasid_table, -1, 382 memset(sn_cnodeid_to_nasid, -1,
377 sizeof(pda->cnodeid_to_nasid_table)); 383 sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid)));
378 for_each_online_node(cnode) 384 for_each_online_node(cnode)
379 pda->cnodeid_to_nasid_table[cnode] = 385 sn_cnodeid_to_nasid[cnode] =
380 pxm_to_nasid(nid_to_pxm_map[cnode]); 386 pxm_to_nasid(nid_to_pxm_map[cnode]);
381 387
382 numionodes = num_online_nodes(); 388 numionodes = num_online_nodes();
383 scan_for_ionodes(); 389 scan_for_ionodes();
@@ -477,7 +483,8 @@ void __init sn_cpu_init(void)
477 483
478 cnode = nasid_to_cnodeid(nasid); 484 cnode = nasid_to_cnodeid(nasid);
479 485
480 pda->p_nodepda = nodepdaindr[cnode]; 486 sn_nodepda = nodepdaindr[cnode];
487
481 pda->led_address = 488 pda->led_address =
482 (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT)); 489 (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT));
483 pda->led_state = LED_ALWAYS_SET; 490 pda->led_state = LED_ALWAYS_SET;
@@ -486,15 +493,18 @@ void __init sn_cpu_init(void)
486 pda->idle_flag = 0; 493 pda->idle_flag = 0;
487 494
488 if (cpuid != 0) { 495 if (cpuid != 0) {
489 memcpy(pda->cnodeid_to_nasid_table, 496 /* copy cpu 0's sn_cnodeid_to_nasid table to this cpu's */
490 pdacpu(0)->cnodeid_to_nasid_table, 497 memcpy(sn_cnodeid_to_nasid,
491 sizeof(pda->cnodeid_to_nasid_table)); 498 (&per_cpu(__sn_cnodeid_to_nasid, 0)),
499 sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid)));
492 } 500 }
493 501
494 /* 502 /*
495 * Check for WARs. 503 * Check for WARs.
496 * Only needs to be done once, on BSP. 504 * Only needs to be done once, on BSP.
497 * Has to be done after loop above, because it uses pda.cnodeid_to_nasid_table[i]. 505 * Has to be done after loop above, because it uses this cpu's
506 * sn_cnodeid_to_nasid table which was just initialized if this
507 * isn't cpu 0.
498 * Has to be done before assignment below. 508 * Has to be done before assignment below.
499 */ 509 */
500 if (!wars_have_been_checked) { 510 if (!wars_have_been_checked) {
@@ -580,8 +590,7 @@ static void __init scan_for_ionodes(void)
580 brd = find_lboard_any(brd, KLTYPE_SNIA); 590 brd = find_lboard_any(brd, KLTYPE_SNIA);
581 591
582 while (brd) { 592 while (brd) {
583 pda->cnodeid_to_nasid_table[numionodes] = 593 sn_cnodeid_to_nasid[numionodes] = brd->brd_nasid;
584 brd->brd_nasid;
585 physical_node_map[brd->brd_nasid] = numionodes; 594 physical_node_map[brd->brd_nasid] = numionodes;
586 root_lboard[numionodes] = brd; 595 root_lboard[numionodes] = brd;
587 numionodes++; 596 numionodes++;
@@ -602,8 +611,7 @@ static void __init scan_for_ionodes(void)
602 root_lboard[nasid_to_cnodeid(nasid)], 611 root_lboard[nasid_to_cnodeid(nasid)],
603 KLTYPE_TIO); 612 KLTYPE_TIO);
604 while (brd) { 613 while (brd) {
605 pda->cnodeid_to_nasid_table[numionodes] = 614 sn_cnodeid_to_nasid[numionodes] = brd->brd_nasid;
606 brd->brd_nasid;
607 physical_node_map[brd->brd_nasid] = numionodes; 615 physical_node_map[brd->brd_nasid] = numionodes;
608 root_lboard[numionodes] = brd; 616 root_lboard[numionodes] = brd;
609 numionodes++; 617 numionodes++;
@@ -614,7 +622,6 @@ static void __init scan_for_ionodes(void)
614 brd = find_lboard_any(brd, KLTYPE_TIO); 622 brd = find_lboard_any(brd, KLTYPE_TIO);
615 } 623 }
616 } 624 }
617
618} 625}
619 626
620int 627int
@@ -623,7 +630,8 @@ nasid_slice_to_cpuid(int nasid, int slice)
623 long cpu; 630 long cpu;
624 631
625 for (cpu=0; cpu < NR_CPUS; cpu++) 632 for (cpu=0; cpu < NR_CPUS; cpu++)
626 if (nodepda->phys_cpuid[cpu].nasid == nasid && nodepda->phys_cpuid[cpu].slice == slice) 633 if (cpuid_to_nasid(cpu) == nasid &&
634 cpuid_to_slice(cpu) == slice)
627 return cpu; 635 return cpu;
628 636
629 return -1; 637 return -1;
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c
index 66190d7e492d..ab9b5f35c2a7 100644
--- a/arch/ia64/sn/kernel/tiocx.c
+++ b/arch/ia64/sn/kernel/tiocx.c
@@ -21,6 +21,8 @@
21#include <asm/sn/types.h> 21#include <asm/sn/types.h>
22#include <asm/sn/shubio.h> 22#include <asm/sn/shubio.h>
23#include <asm/sn/tiocx.h> 23#include <asm/sn/tiocx.h>
24#include <asm/sn/l1.h>
25#include <asm/sn/module.h>
24#include "tio.h" 26#include "tio.h"
25#include "xtalk/xwidgetdev.h" 27#include "xtalk/xwidgetdev.h"
26#include "xtalk/hubdev.h" 28#include "xtalk/hubdev.h"
@@ -308,14 +310,12 @@ void tiocx_irq_free(struct sn_irq_info *sn_irq_info)
308 } 310 }
309} 311}
310 312
311uint64_t 313uint64_t tiocx_dma_addr(uint64_t addr)
312tiocx_dma_addr(uint64_t addr)
313{ 314{
314 return PHYS_TO_TIODMA(addr); 315 return PHYS_TO_TIODMA(addr);
315} 316}
316 317
317uint64_t 318uint64_t tiocx_swin_base(int nasid)
318tiocx_swin_base(int nasid)
319{ 319{
320 return TIO_SWIN_BASE(nasid, TIOCX_CORELET); 320 return TIO_SWIN_BASE(nasid, TIOCX_CORELET);
321} 321}
@@ -330,19 +330,6 @@ EXPORT_SYMBOL(tiocx_bus_type);
330EXPORT_SYMBOL(tiocx_dma_addr); 330EXPORT_SYMBOL(tiocx_dma_addr);
331EXPORT_SYMBOL(tiocx_swin_base); 331EXPORT_SYMBOL(tiocx_swin_base);
332 332
333static uint64_t tiocx_get_hubdev_info(u64 handle, u64 address)
334{
335
336 struct ia64_sal_retval ret_stuff;
337 ret_stuff.status = 0;
338 ret_stuff.v0 = 0;
339
340 ia64_sal_oemcall_nolock(&ret_stuff,
341 SN_SAL_IOIF_GET_HUBDEV_INFO,
342 handle, address, 0, 0, 0, 0, 0);
343 return ret_stuff.v0;
344}
345
346static void tio_conveyor_set(nasid_t nasid, int enable_flag) 333static void tio_conveyor_set(nasid_t nasid, int enable_flag)
347{ 334{
348 uint64_t ice_frz; 335 uint64_t ice_frz;
@@ -379,7 +366,29 @@ static void tio_corelet_reset(nasid_t nasid, int corelet)
379 udelay(2000); 366 udelay(2000);
380} 367}
381 368
382static int fpga_attached(nasid_t nasid) 369static int tiocx_btchar_get(int nasid)
370{
371 moduleid_t module_id;
372 geoid_t geoid;
373 int cnodeid;
374
375 cnodeid = nasid_to_cnodeid(nasid);
376 geoid = cnodeid_get_geoid(cnodeid);
377 module_id = geo_module(geoid);
378 return MODULE_GET_BTCHAR(module_id);
379}
380
381static int is_fpga_brick(int nasid)
382{
383 switch (tiocx_btchar_get(nasid)) {
384 case L1_BRICKTYPE_SA:
385 case L1_BRICKTYPE_ATHENA:
386 return 1;
387 }
388 return 0;
389}
390
391static int bitstream_loaded(nasid_t nasid)
383{ 392{
384 uint64_t cx_credits; 393 uint64_t cx_credits;
385 394
@@ -396,7 +405,7 @@ static int tiocx_reload(struct cx_dev *cx_dev)
396 int mfg_num = CX_DEV_NONE; 405 int mfg_num = CX_DEV_NONE;
397 nasid_t nasid = cx_dev->cx_id.nasid; 406 nasid_t nasid = cx_dev->cx_id.nasid;
398 407
399 if (fpga_attached(nasid)) { 408 if (bitstream_loaded(nasid)) {
400 uint64_t cx_id; 409 uint64_t cx_id;
401 410
402 cx_id = 411 cx_id =
@@ -427,9 +436,10 @@ static ssize_t show_cxdev_control(struct device *dev, char *buf)
427{ 436{
428 struct cx_dev *cx_dev = to_cx_dev(dev); 437 struct cx_dev *cx_dev = to_cx_dev(dev);
429 438
430 return sprintf(buf, "0x%x 0x%x 0x%x\n", 439 return sprintf(buf, "0x%x 0x%x 0x%x %d\n",
431 cx_dev->cx_id.nasid, 440 cx_dev->cx_id.nasid,
432 cx_dev->cx_id.part_num, cx_dev->cx_id.mfg_num); 441 cx_dev->cx_id.part_num, cx_dev->cx_id.mfg_num,
442 tiocx_btchar_get(cx_dev->cx_id.nasid));
433} 443}
434 444
435static ssize_t store_cxdev_control(struct device *dev, const char *buf, 445static ssize_t store_cxdev_control(struct device *dev, const char *buf,
@@ -475,20 +485,14 @@ static int __init tiocx_init(void)
475 if ((nasid = cnodeid_to_nasid(cnodeid)) < 0) 485 if ((nasid = cnodeid_to_nasid(cnodeid)) < 0)
476 break; /* No more nasids .. bail out of loop */ 486 break; /* No more nasids .. bail out of loop */
477 487
478 if (nasid & 0x1) { /* TIO's are always odd */ 488 if ((nasid & 0x1) && is_fpga_brick(nasid)) {
479 struct hubdev_info *hubdev; 489 struct hubdev_info *hubdev;
480 uint64_t status;
481 struct xwidget_info *widgetp; 490 struct xwidget_info *widgetp;
482 491
483 DBG("Found TIO at nasid 0x%x\n", nasid); 492 DBG("Found TIO at nasid 0x%x\n", nasid);
484 493
485 hubdev = 494 hubdev =
486 (struct hubdev_info *)(NODEPDA(cnodeid)->pdinfo); 495 (struct hubdev_info *)(NODEPDA(cnodeid)->pdinfo);
487 status =
488 tiocx_get_hubdev_info(nasid,
489 (uint64_t) __pa(hubdev));
490 if (status)
491 continue;
492 496
493 widgetp = &hubdev->hdi_xwidget_info[TIOCX_CORELET]; 497 widgetp = &hubdev->hdi_xwidget_info[TIOCX_CORELET];
494 498
diff --git a/arch/ia64/sn/kernel/xp_main.c b/arch/ia64/sn/kernel/xp_main.c
new file mode 100644
index 000000000000..3be52a34c80f
--- /dev/null
+++ b/arch/ia64/sn/kernel/xp_main.c
@@ -0,0 +1,289 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9
10/*
11 * Cross Partition (XP) base.
12 *
13 * XP provides a base from which its users can interact
14 * with XPC, yet not be dependent on XPC.
15 *
16 */
17
18
19#include <linux/kernel.h>
20#include <linux/interrupt.h>
21#include <linux/module.h>
22#include <asm/sn/intr.h>
23#include <asm/sn/sn_sal.h>
24#include <asm/sn/xp.h>
25
26
27/*
28 * Target of nofault PIO read.
29 */
30u64 xp_nofault_PIOR_target;
31
32
33/*
34 * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level
35 * users of XPC.
36 */
37struct xpc_registration xpc_registrations[XPC_NCHANNELS];
38
39
40/*
41 * Initialize the XPC interface to indicate that XPC isn't loaded.
42 */
43static enum xpc_retval xpc_notloaded(void) { return xpcNotLoaded; }
44
45struct xpc_interface xpc_interface = {
46 (void (*)(int)) xpc_notloaded,
47 (void (*)(int)) xpc_notloaded,
48 (enum xpc_retval (*)(partid_t, int, u32, void **)) xpc_notloaded,
49 (enum xpc_retval (*)(partid_t, int, void *)) xpc_notloaded,
50 (enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func, void *))
51 xpc_notloaded,
52 (void (*)(partid_t, int, void *)) xpc_notloaded,
53 (enum xpc_retval (*)(partid_t, void *)) xpc_notloaded
54};
55
56
57/*
58 * XPC calls this when it (the XPC module) has been loaded.
59 */
60void
61xpc_set_interface(void (*connect)(int),
62 void (*disconnect)(int),
63 enum xpc_retval (*allocate)(partid_t, int, u32, void **),
64 enum xpc_retval (*send)(partid_t, int, void *),
65 enum xpc_retval (*send_notify)(partid_t, int, void *,
66 xpc_notify_func, void *),
67 void (*received)(partid_t, int, void *),
68 enum xpc_retval (*partid_to_nasids)(partid_t, void *))
69{
70 xpc_interface.connect = connect;
71 xpc_interface.disconnect = disconnect;
72 xpc_interface.allocate = allocate;
73 xpc_interface.send = send;
74 xpc_interface.send_notify = send_notify;
75 xpc_interface.received = received;
76 xpc_interface.partid_to_nasids = partid_to_nasids;
77}
78
79
80/*
81 * XPC calls this when it (the XPC module) is being unloaded.
82 */
83void
84xpc_clear_interface(void)
85{
86 xpc_interface.connect = (void (*)(int)) xpc_notloaded;
87 xpc_interface.disconnect = (void (*)(int)) xpc_notloaded;
88 xpc_interface.allocate = (enum xpc_retval (*)(partid_t, int, u32,
89 void **)) xpc_notloaded;
90 xpc_interface.send = (enum xpc_retval (*)(partid_t, int, void *))
91 xpc_notloaded;
92 xpc_interface.send_notify = (enum xpc_retval (*)(partid_t, int, void *,
93 xpc_notify_func, void *)) xpc_notloaded;
94 xpc_interface.received = (void (*)(partid_t, int, void *))
95 xpc_notloaded;
96 xpc_interface.partid_to_nasids = (enum xpc_retval (*)(partid_t, void *))
97 xpc_notloaded;
98}
99
100
101/*
102 * Register for automatic establishment of a channel connection whenever
103 * a partition comes up.
104 *
105 * Arguments:
106 *
107 * ch_number - channel # to register for connection.
108 * func - function to call for asynchronous notification of channel
109 * state changes (i.e., connection, disconnection, error) and
110 * the arrival of incoming messages.
111 * key - pointer to optional user-defined value that gets passed back
112 * to the user on any callouts made to func.
113 * payload_size - size in bytes of the XPC message's payload area which
114 * contains a user-defined message. The user should make
115 * this large enough to hold their largest message.
116 * nentries - max #of XPC message entries a message queue can contain.
117 * The actual number, which is determined when a connection
118 * is established and may be less then requested, will be
119 * passed to the user via the xpcConnected callout.
120 * assigned_limit - max number of kthreads allowed to be processing
121 * messages (per connection) at any given instant.
122 * idle_limit - max number of kthreads allowed to be idle at any given
123 * instant.
124 */
125enum xpc_retval
126xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
127 u16 nentries, u32 assigned_limit, u32 idle_limit)
128{
129 struct xpc_registration *registration;
130
131
132 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
133 DBUG_ON(payload_size == 0 || nentries == 0);
134 DBUG_ON(func == NULL);
135 DBUG_ON(assigned_limit == 0 || idle_limit > assigned_limit);
136
137 registration = &xpc_registrations[ch_number];
138
139 if (down_interruptible(&registration->sema) != 0) {
140 return xpcInterrupted;
141 }
142
143 /* if XPC_CHANNEL_REGISTERED(ch_number) */
144 if (registration->func != NULL) {
145 up(&registration->sema);
146 return xpcAlreadyRegistered;
147 }
148
149 /* register the channel for connection */
150 registration->msg_size = XPC_MSG_SIZE(payload_size);
151 registration->nentries = nentries;
152 registration->assigned_limit = assigned_limit;
153 registration->idle_limit = idle_limit;
154 registration->key = key;
155 registration->func = func;
156
157 up(&registration->sema);
158
159 xpc_interface.connect(ch_number);
160
161 return xpcSuccess;
162}
163
164
165/*
166 * Remove the registration for automatic connection of the specified channel
167 * when a partition comes up.
168 *
169 * Before returning this xpc_disconnect() will wait for all connections on the
170 * specified channel have been closed/torndown. So the caller can be assured
171 * that they will not be receiving any more callouts from XPC to their
172 * function registered via xpc_connect().
173 *
174 * Arguments:
175 *
176 * ch_number - channel # to unregister.
177 */
178void
179xpc_disconnect(int ch_number)
180{
181 struct xpc_registration *registration;
182
183
184 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
185
186 registration = &xpc_registrations[ch_number];
187
188 /*
189 * We've decided not to make this a down_interruptible(), since we
190 * figured XPC's users will just turn around and call xpc_disconnect()
191 * again anyways, so we might as well wait, if need be.
192 */
193 down(&registration->sema);
194
195 /* if !XPC_CHANNEL_REGISTERED(ch_number) */
196 if (registration->func == NULL) {
197 up(&registration->sema);
198 return;
199 }
200
201 /* remove the connection registration for the specified channel */
202 registration->func = NULL;
203 registration->key = NULL;
204 registration->nentries = 0;
205 registration->msg_size = 0;
206 registration->assigned_limit = 0;
207 registration->idle_limit = 0;
208
209 xpc_interface.disconnect(ch_number);
210
211 up(&registration->sema);
212
213 return;
214}
215
216
217int __init
218xp_init(void)
219{
220 int ret, ch_number;
221 u64 func_addr = *(u64 *) xp_nofault_PIOR;
222 u64 err_func_addr = *(u64 *) xp_error_PIOR;
223
224
225 if (!ia64_platform_is("sn2")) {
226 return -ENODEV;
227 }
228
229 /*
230 * Register a nofault code region which performs a cross-partition
231 * PIO read. If the PIO read times out, the MCA handler will consume
232 * the error and return to a kernel-provided instruction to indicate
233 * an error. This PIO read exists because it is guaranteed to timeout
234 * if the destination is down (AMO operations do not timeout on at
235 * least some CPUs on Shubs <= v1.2, which unfortunately we have to
236 * work around).
237 */
238 if ((ret = sn_register_nofault_code(func_addr, err_func_addr,
239 err_func_addr, 1, 1)) != 0) {
240 printk(KERN_ERR "XP: can't register nofault code, error=%d\n",
241 ret);
242 }
243 /*
244 * Setup the nofault PIO read target. (There is no special reason why
245 * SH_IPI_ACCESS was selected.)
246 */
247 if (is_shub2()) {
248 xp_nofault_PIOR_target = SH2_IPI_ACCESS0;
249 } else {
250 xp_nofault_PIOR_target = SH1_IPI_ACCESS;
251 }
252
253 /* initialize the connection registration semaphores */
254 for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) {
255 sema_init(&xpc_registrations[ch_number].sema, 1); /* mutex */
256 }
257
258 return 0;
259}
260module_init(xp_init);
261
262
263void __exit
264xp_exit(void)
265{
266 u64 func_addr = *(u64 *) xp_nofault_PIOR;
267 u64 err_func_addr = *(u64 *) xp_error_PIOR;
268
269
270 /* unregister the PIO read nofault code region */
271 (void) sn_register_nofault_code(func_addr, err_func_addr,
272 err_func_addr, 1, 0);
273}
274module_exit(xp_exit);
275
276
277MODULE_AUTHOR("Silicon Graphics, Inc.");
278MODULE_DESCRIPTION("Cross Partition (XP) base");
279MODULE_LICENSE("GPL");
280
281EXPORT_SYMBOL(xp_nofault_PIOR);
282EXPORT_SYMBOL(xp_nofault_PIOR_target);
283EXPORT_SYMBOL(xpc_registrations);
284EXPORT_SYMBOL(xpc_interface);
285EXPORT_SYMBOL(xpc_clear_interface);
286EXPORT_SYMBOL(xpc_set_interface);
287EXPORT_SYMBOL(xpc_connect);
288EXPORT_SYMBOL(xpc_disconnect);
289
diff --git a/arch/ia64/sn/kernel/xp_nofault.S b/arch/ia64/sn/kernel/xp_nofault.S
new file mode 100644
index 000000000000..b772543053c9
--- /dev/null
+++ b/arch/ia64/sn/kernel/xp_nofault.S
@@ -0,0 +1,31 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9
10/*
11 * The xp_nofault_PIOR function takes a pointer to a remote PIO register
12 * and attempts to load and consume a value from it. This function
13 * will be registered as a nofault code block. In the event that the
14 * PIO read fails, the MCA handler will force the error to look
15 * corrected and vector to the xp_error_PIOR which will return an error.
16 *
17 * extern int xp_nofault_PIOR(void *remote_register);
18 */
19
20 .global xp_nofault_PIOR
21xp_nofault_PIOR:
22 mov r8=r0 // Stage a success return value
23 ld8.acq r9=[r32];; // PIO Read the specified register
24 adds r9=1,r9 // Add to force a consume
25 br.ret.sptk.many b0;; // Return success
26
27 .global xp_error_PIOR
28xp_error_PIOR:
29 mov r8=1 // Return value of 1
30 br.ret.sptk.many b0;; // Return failure
31
diff --git a/arch/ia64/sn/kernel/xpc.h b/arch/ia64/sn/kernel/xpc.h
new file mode 100644
index 000000000000..1a0aed8490d1
--- /dev/null
+++ b/arch/ia64/sn/kernel/xpc.h
@@ -0,0 +1,991 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9
10/*
11 * Cross Partition Communication (XPC) structures and macros.
12 */
13
14#ifndef _IA64_SN_KERNEL_XPC_H
15#define _IA64_SN_KERNEL_XPC_H
16
17
18#include <linux/config.h>
19#include <linux/interrupt.h>
20#include <linux/sysctl.h>
21#include <linux/device.h>
22#include <asm/pgtable.h>
23#include <asm/processor.h>
24#include <asm/sn/bte.h>
25#include <asm/sn/clksupport.h>
26#include <asm/sn/addrs.h>
27#include <asm/sn/mspec.h>
28#include <asm/sn/shub_mmr.h>
29#include <asm/sn/xp.h>
30
31
32/*
33 * XPC Version numbers consist of a major and minor number. XPC can always
34 * talk to versions with same major #, and never talk to versions with a
35 * different major #.
36 */
37#define _XPC_VERSION(_maj, _min) (((_maj) << 4) | ((_min) & 0xf))
38#define XPC_VERSION_MAJOR(_v) ((_v) >> 4)
39#define XPC_VERSION_MINOR(_v) ((_v) & 0xf)
40
41
42/*
43 * The next macros define word or bit representations for given
44 * C-brick nasid in either the SAL provided bit array representing
45 * nasids in the partition/machine or the AMO_t array used for
46 * inter-partition initiation communications.
47 *
48 * For SN2 machines, C-Bricks are alway even numbered NASIDs. As
49 * such, some space will be saved by insisting that nasid information
50 * passed from SAL always be packed for C-Bricks and the
51 * cross-partition interrupts use the same packing scheme.
52 */
53#define XPC_NASID_W_INDEX(_n) (((_n) / 64) / 2)
54#define XPC_NASID_B_INDEX(_n) (((_n) / 2) & (64 - 1))
55#define XPC_NASID_IN_ARRAY(_n, _p) ((_p)[XPC_NASID_W_INDEX(_n)] & \
56 (1UL << XPC_NASID_B_INDEX(_n)))
57#define XPC_NASID_FROM_W_B(_w, _b) (((_w) * 64 + (_b)) * 2)
58
59#define XPC_HB_DEFAULT_INTERVAL 5 /* incr HB every x secs */
60#define XPC_HB_CHECK_DEFAULT_TIMEOUT 20 /* check HB every x secs */
61
62/* define the process name of HB checker and the CPU it is pinned to */
63#define XPC_HB_CHECK_THREAD_NAME "xpc_hb"
64#define XPC_HB_CHECK_CPU 0
65
66/* define the process name of the discovery thread */
67#define XPC_DISCOVERY_THREAD_NAME "xpc_discovery"
68
69
70#define XPC_HB_ALLOWED(_p, _v) ((_v)->heartbeating_to_mask & (1UL << (_p)))
71#define XPC_ALLOW_HB(_p, _v) (_v)->heartbeating_to_mask |= (1UL << (_p))
72#define XPC_DISALLOW_HB(_p, _v) (_v)->heartbeating_to_mask &= (~(1UL << (_p)))
73
74
75/*
76 * Reserved Page provided by SAL.
77 *
78 * SAL provides one page per partition of reserved memory. When SAL
79 * initialization is complete, SAL_signature, SAL_version, partid,
80 * part_nasids, and mach_nasids are set.
81 *
82 * Note: Until vars_pa is set, the partition XPC code has not been initialized.
83 */
84struct xpc_rsvd_page {
85 u64 SAL_signature; /* SAL unique signature */
86 u64 SAL_version; /* SAL specified version */
87 u8 partid; /* partition ID from SAL */
88 u8 version;
89 u8 pad[6]; /* pad to u64 align */
90 u64 vars_pa;
91 u64 part_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned;
92 u64 mach_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned;
93};
94#define XPC_RP_VERSION _XPC_VERSION(1,0) /* version 1.0 of the reserved page */
95
96#define XPC_RSVD_PAGE_ALIGNED_SIZE \
97 (L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page)))
98
99
100/*
101 * Define the structures by which XPC variables can be exported to other
102 * partitions. (There are two: struct xpc_vars and struct xpc_vars_part)
103 */
104
105/*
106 * The following structure describes the partition generic variables
107 * needed by other partitions in order to properly initialize.
108 *
109 * struct xpc_vars version number also applies to struct xpc_vars_part.
110 * Changes to either structure and/or related functionality should be
111 * reflected by incrementing either the major or minor version numbers
112 * of struct xpc_vars.
113 */
114struct xpc_vars {
115 u8 version;
116 u64 heartbeat;
117 u64 heartbeating_to_mask;
118 u64 kdb_status; /* 0 = machine running */
119 int act_nasid;
120 int act_phys_cpuid;
121 u64 vars_part_pa;
122 u64 amos_page_pa; /* paddr of page of AMOs from MSPEC driver */
123 AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */
124 AMO_t *act_amos; /* pointer to the first activation AMO */
125};
126#define XPC_V_VERSION _XPC_VERSION(3,0) /* version 3.0 of the cross vars */
127
128#define XPC_VARS_ALIGNED_SIZE (L1_CACHE_ALIGN(sizeof(struct xpc_vars)))
129
130/*
131 * The following structure describes the per partition specific variables.
132 *
133 * An array of these structures, one per partition, will be defined. As a
134 * partition becomes active XPC will copy the array entry corresponding to
135 * itself from that partition. It is desirable that the size of this
136 * structure evenly divide into a cacheline, such that none of the entries
137 * in this array crosses a cacheline boundary. As it is now, each entry
138 * occupies half a cacheline.
139 */
140struct xpc_vars_part {
141 u64 magic;
142
143 u64 openclose_args_pa; /* physical address of open and close args */
144 u64 GPs_pa; /* physical address of Get/Put values */
145
146 u64 IPI_amo_pa; /* physical address of IPI AMO_t structure */
147 int IPI_nasid; /* nasid of where to send IPIs */
148 int IPI_phys_cpuid; /* physical CPU ID of where to send IPIs */
149
150 u8 nchannels; /* #of defined channels supported */
151
152 u8 reserved[23]; /* pad to a full 64 bytes */
153};
154
155/*
156 * The vars_part MAGIC numbers play a part in the first contact protocol.
157 *
158 * MAGIC1 indicates that the per partition specific variables for a remote
159 * partition have been initialized by this partition.
160 *
161 * MAGIC2 indicates that this partition has pulled the remote partititions
162 * per partition variables that pertain to this partition.
163 */
164#define XPC_VP_MAGIC1 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */
165#define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */
166
167
168
169/*
170 * Functions registered by add_timer() or called by kernel_thread() only
171 * allow for a single 64-bit argument. The following macros can be used to
172 * pack and unpack two (32-bit, 16-bit or 8-bit) arguments into or out from
173 * the passed argument.
174 */
175#define XPC_PACK_ARGS(_arg1, _arg2) \
176 ((((u64) _arg1) & 0xffffffff) | \
177 ((((u64) _arg2) & 0xffffffff) << 32))
178
179#define XPC_UNPACK_ARG1(_args) (((u64) _args) & 0xffffffff)
180#define XPC_UNPACK_ARG2(_args) ((((u64) _args) >> 32) & 0xffffffff)
181
182
183
184/*
185 * Define a Get/Put value pair (pointers) used with a message queue.
186 */
187struct xpc_gp {
188 s64 get; /* Get value */
189 s64 put; /* Put value */
190};
191
192#define XPC_GP_SIZE \
193 L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_NCHANNELS)
194
195
196
197/*
198 * Define a structure that contains arguments associated with opening and
199 * closing a channel.
200 */
201struct xpc_openclose_args {
202 u16 reason; /* reason why channel is closing */
203 u16 msg_size; /* sizeof each message entry */
204 u16 remote_nentries; /* #of message entries in remote msg queue */
205 u16 local_nentries; /* #of message entries in local msg queue */
206 u64 local_msgqueue_pa; /* physical address of local message queue */
207};
208
209#define XPC_OPENCLOSE_ARGS_SIZE \
210 L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * XPC_NCHANNELS)
211
212
213
214/* struct xpc_msg flags */
215
216#define XPC_M_DONE 0x01 /* msg has been received/consumed */
217#define XPC_M_READY 0x02 /* msg is ready to be sent */
218#define XPC_M_INTERRUPT 0x04 /* send interrupt when msg consumed */
219
220
221#define XPC_MSG_ADDRESS(_payload) \
222 ((struct xpc_msg *)((u8 *)(_payload) - XPC_MSG_PAYLOAD_OFFSET))
223
224
225
226/*
227 * Defines notify entry.
228 *
229 * This is used to notify a message's sender that their message was received
230 * and consumed by the intended recipient.
231 */
232struct xpc_notify {
233 struct semaphore sema; /* notify semaphore */
234 u8 type; /* type of notification */
235
236 /* the following two fields are only used if type == XPC_N_CALL */
237 xpc_notify_func func; /* user's notify function */
238 void *key; /* pointer to user's key */
239};
240
241/* struct xpc_notify type of notification */
242
243#define XPC_N_CALL 0x01 /* notify function provided by user */
244
245
246
247/*
248 * Define the structure that manages all the stuff required by a channel. In
249 * particular, they are used to manage the messages sent across the channel.
250 *
251 * This structure is private to a partition, and is NOT shared across the
252 * partition boundary.
253 *
254 * There is an array of these structures for each remote partition. It is
255 * allocated at the time a partition becomes active. The array contains one
256 * of these structures for each potential channel connection to that partition.
257 *
258 * Each of these structures manages two message queues (circular buffers).
259 * They are allocated at the time a channel connection is made. One of
260 * these message queues (local_msgqueue) holds the locally created messages
261 * that are destined for the remote partition. The other of these message
262 * queues (remote_msgqueue) is a locally cached copy of the remote partition's
263 * own local_msgqueue.
264 *
265 * The following is a description of the Get/Put pointers used to manage these
266 * two message queues. Consider the local_msgqueue to be on one partition
267 * and the remote_msgqueue to be its cached copy on another partition. A
268 * description of what each of the lettered areas contains is included.
269 *
270 *
271 * local_msgqueue remote_msgqueue
272 *
273 * |/////////| |/////////|
274 * w_remote_GP.get --> +---------+ |/////////|
275 * | F | |/////////|
276 * remote_GP.get --> +---------+ +---------+ <-- local_GP->get
277 * | | | |
278 * | | | E |
279 * | | | |
280 * | | +---------+ <-- w_local_GP.get
281 * | B | |/////////|
282 * | | |////D////|
283 * | | |/////////|
284 * | | +---------+ <-- w_remote_GP.put
285 * | | |////C////|
286 * local_GP->put --> +---------+ +---------+ <-- remote_GP.put
287 * | | |/////////|
288 * | A | |/////////|
289 * | | |/////////|
290 * w_local_GP.put --> +---------+ |/////////|
291 * |/////////| |/////////|
292 *
293 *
294 * ( remote_GP.[get|put] are cached copies of the remote
295 * partition's local_GP->[get|put], and thus their values can
296 * lag behind their counterparts on the remote partition. )
297 *
298 *
299 * A - Messages that have been allocated, but have not yet been sent to the
300 * remote partition.
301 *
302 * B - Messages that have been sent, but have not yet been acknowledged by the
303 * remote partition as having been received.
304 *
305 * C - Area that needs to be prepared for the copying of sent messages, by
306 * the clearing of the message flags of any previously received messages.
307 *
308 * D - Area into which sent messages are to be copied from the remote
309 * partition's local_msgqueue and then delivered to their intended
310 * recipients. [ To allow for a multi-message copy, another pointer
311 * (next_msg_to_pull) has been added to keep track of the next message
312 * number needing to be copied (pulled). It chases after w_remote_GP.put.
313 * Any messages lying between w_local_GP.get and next_msg_to_pull have
314 * been copied and are ready to be delivered. ]
315 *
316 * E - Messages that have been copied and delivered, but have not yet been
317 * acknowledged by the recipient as having been received.
318 *
319 * F - Messages that have been acknowledged, but XPC has not yet notified the
320 * sender that the message was received by its intended recipient.
321 * This is also an area that needs to be prepared for the allocating of
322 * new messages, by the clearing of the message flags of the acknowledged
323 * messages.
324 */
325struct xpc_channel {
326 partid_t partid; /* ID of remote partition connected */
327 spinlock_t lock; /* lock for updating this structure */
328 u32 flags; /* general flags */
329
330 enum xpc_retval reason; /* reason why channel is disconnect'g */
331 int reason_line; /* line# disconnect initiated from */
332
333 u16 number; /* channel # */
334
335 u16 msg_size; /* sizeof each msg entry */
336 u16 local_nentries; /* #of msg entries in local msg queue */
337 u16 remote_nentries; /* #of msg entries in remote msg queue*/
338
339 void *local_msgqueue_base; /* base address of kmalloc'd space */
340 struct xpc_msg *local_msgqueue; /* local message queue */
341 void *remote_msgqueue_base; /* base address of kmalloc'd space */
342 struct xpc_msg *remote_msgqueue;/* cached copy of remote partition's */
343 /* local message queue */
344 u64 remote_msgqueue_pa; /* phys addr of remote partition's */
345 /* local message queue */
346
347 atomic_t references; /* #of external references to queues */
348
349 atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */
350 wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */
351
352 /* queue of msg senders who want to be notified when msg received */
353
354 atomic_t n_to_notify; /* #of msg senders to notify */
355 struct xpc_notify *notify_queue;/* notify queue for messages sent */
356
357 xpc_channel_func func; /* user's channel function */
358 void *key; /* pointer to user's key */
359
360 struct semaphore msg_to_pull_sema; /* next msg to pull serialization */
361 struct semaphore teardown_sema; /* wait for teardown completion */
362
363 struct xpc_openclose_args *local_openclose_args; /* args passed on */
364 /* opening or closing of channel */
365
366 /* various flavors of local and remote Get/Put values */
367
368 struct xpc_gp *local_GP; /* local Get/Put values */
369 struct xpc_gp remote_GP; /* remote Get/Put values */
370 struct xpc_gp w_local_GP; /* working local Get/Put values */
371 struct xpc_gp w_remote_GP; /* working remote Get/Put values */
372 s64 next_msg_to_pull; /* Put value of next msg to pull */
373
374 /* kthread management related fields */
375
376// >>> rethink having kthreads_assigned_limit and kthreads_idle_limit; perhaps
377// >>> allow the assigned limit be unbounded and let the idle limit be dynamic
378// >>> dependent on activity over the last interval of time
379 atomic_t kthreads_assigned; /* #of kthreads assigned to channel */
380 u32 kthreads_assigned_limit; /* limit on #of kthreads assigned */
381 atomic_t kthreads_idle; /* #of kthreads idle waiting for work */
382 u32 kthreads_idle_limit; /* limit on #of kthreads idle */
383 atomic_t kthreads_active; /* #of kthreads actively working */
384 // >>> following field is temporary
385 u32 kthreads_created; /* total #of kthreads created */
386
387 wait_queue_head_t idle_wq; /* idle kthread wait queue */
388
389} ____cacheline_aligned;
390
391
392/* struct xpc_channel flags */
393
394#define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */
395
396#define XPC_C_ROPENREPLY 0x00000002 /* remote open channel reply */
397#define XPC_C_OPENREPLY 0x00000004 /* local open channel reply */
398#define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */
399#define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */
400
401#define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */
402#define XPC_C_CONNECTCALLOUT 0x00000040 /* channel connected callout made */
403#define XPC_C_CONNECTED 0x00000080 /* local channel is connected */
404#define XPC_C_CONNECTING 0x00000100 /* channel is being connected */
405
406#define XPC_C_RCLOSEREPLY 0x00000200 /* remote close channel reply */
407#define XPC_C_CLOSEREPLY 0x00000400 /* local close channel reply */
408#define XPC_C_RCLOSEREQUEST 0x00000800 /* remote close channel request */
409#define XPC_C_CLOSEREQUEST 0x00001000 /* local close channel request */
410
411#define XPC_C_DISCONNECTED 0x00002000 /* channel is disconnected */
412#define XPC_C_DISCONNECTING 0x00004000 /* channel is being disconnected */
413
414
415
416/*
417 * Manages channels on a partition basis. There is one of these structures
418 * for each partition (a partition will never utilize the structure that
419 * represents itself).
420 */
421struct xpc_partition {
422
423 /* XPC HB infrastructure */
424
425 u64 remote_rp_pa; /* phys addr of partition's rsvd pg */
426 u64 remote_vars_pa; /* phys addr of partition's vars */
427 u64 remote_vars_part_pa; /* phys addr of partition's vars part */
428 u64 last_heartbeat; /* HB at last read */
429 u64 remote_amos_page_pa; /* phys addr of partition's amos page */
430 int remote_act_nasid; /* active part's act/deact nasid */
431 int remote_act_phys_cpuid; /* active part's act/deact phys cpuid */
432 u32 act_IRQ_rcvd; /* IRQs since activation */
433 spinlock_t act_lock; /* protect updating of act_state */
434 u8 act_state; /* from XPC HB viewpoint */
435 enum xpc_retval reason; /* reason partition is deactivating */
436 int reason_line; /* line# deactivation initiated from */
437 int reactivate_nasid; /* nasid in partition to reactivate */
438
439
440 /* XPC infrastructure referencing and teardown control */
441
442 u8 setup_state; /* infrastructure setup state */
443 wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */
444 atomic_t references; /* #of references to infrastructure */
445
446
447 /*
448 * NONE OF THE PRECEDING FIELDS OF THIS STRUCTURE WILL BE CLEARED WHEN
449 * XPC SETS UP THE NECESSARY INFRASTRUCTURE TO SUPPORT CROSS PARTITION
450 * COMMUNICATION. ALL OF THE FOLLOWING FIELDS WILL BE CLEARED. (THE
451 * 'nchannels' FIELD MUST BE THE FIRST OF THE FIELDS TO BE CLEARED.)
452 */
453
454
455 u8 nchannels; /* #of defined channels supported */
456 atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */
457 struct xpc_channel *channels;/* array of channel structures */
458
459 void *local_GPs_base; /* base address of kmalloc'd space */
460 struct xpc_gp *local_GPs; /* local Get/Put values */
461 void *remote_GPs_base; /* base address of kmalloc'd space */
462 struct xpc_gp *remote_GPs;/* copy of remote partition's local Get/Put */
463 /* values */
464 u64 remote_GPs_pa; /* phys address of remote partition's local */
465 /* Get/Put values */
466
467
468 /* fields used to pass args when opening or closing a channel */
469
470 void *local_openclose_args_base; /* base address of kmalloc'd space */
471 struct xpc_openclose_args *local_openclose_args; /* local's args */
472 void *remote_openclose_args_base; /* base address of kmalloc'd space */
473 struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */
474 /* args */
475 u64 remote_openclose_args_pa; /* phys addr of remote's args */
476
477
478 /* IPI sending, receiving and handling related fields */
479
480 int remote_IPI_nasid; /* nasid of where to send IPIs */
481 int remote_IPI_phys_cpuid; /* phys CPU ID of where to send IPIs */
482 AMO_t *remote_IPI_amo_va; /* address of remote IPI AMO_t structure */
483
484 AMO_t *local_IPI_amo_va; /* address of IPI AMO_t structure */
485 u64 local_IPI_amo; /* IPI amo flags yet to be handled */
486 char IPI_owner[8]; /* IPI owner's name */
487 struct timer_list dropped_IPI_timer; /* dropped IPI timer */
488
489 spinlock_t IPI_lock; /* IPI handler lock */
490
491
492 /* channel manager related fields */
493
494 atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */
495 wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */
496
497} ____cacheline_aligned;
498
499
500/* struct xpc_partition act_state values (for XPC HB) */
501
502#define XPC_P_INACTIVE 0x00 /* partition is not active */
503#define XPC_P_ACTIVATION_REQ 0x01 /* created thread to activate */
504#define XPC_P_ACTIVATING 0x02 /* activation thread started */
505#define XPC_P_ACTIVE 0x03 /* xpc_partition_up() was called */
506#define XPC_P_DEACTIVATING 0x04 /* partition deactivation initiated */
507
508
509#define XPC_DEACTIVATE_PARTITION(_p, _reason) \
510 xpc_deactivate_partition(__LINE__, (_p), (_reason))
511
512
513/* struct xpc_partition setup_state values */
514
515#define XPC_P_UNSET 0x00 /* infrastructure was never setup */
516#define XPC_P_SETUP 0x01 /* infrastructure is setup */
517#define XPC_P_WTEARDOWN 0x02 /* waiting to teardown infrastructure */
518#define XPC_P_TORNDOWN 0x03 /* infrastructure is torndown */
519
520
521/*
522 * struct xpc_partition IPI_timer #of seconds to wait before checking for
523 * dropped IPIs. These occur whenever an IPI amo write doesn't complete until
524 * after the IPI was received.
525 */
526#define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ)
527
528
529#define XPC_PARTID(_p) ((partid_t) ((_p) - &xpc_partitions[0]))
530
531
532
533/* found in xp_main.c */
534extern struct xpc_registration xpc_registrations[];
535
536
537/* >>> found in xpc_main.c only */
538extern struct device *xpc_part;
539extern struct device *xpc_chan;
540extern irqreturn_t xpc_notify_IRQ_handler(int, void *, struct pt_regs *);
541extern void xpc_dropped_IPI_check(struct xpc_partition *);
542extern void xpc_activate_kthreads(struct xpc_channel *, int);
543extern void xpc_create_kthreads(struct xpc_channel *, int);
544extern void xpc_disconnect_wait(int);
545
546
547/* found in xpc_main.c and efi-xpc.c */
548extern void xpc_activate_partition(struct xpc_partition *);
549
550
551/* found in xpc_partition.c */
552extern int xpc_exiting;
553extern int xpc_hb_interval;
554extern int xpc_hb_check_interval;
555extern struct xpc_vars *xpc_vars;
556extern struct xpc_rsvd_page *xpc_rsvd_page;
557extern struct xpc_vars_part *xpc_vars_part;
558extern struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
559extern char xpc_remote_copy_buffer[];
560extern struct xpc_rsvd_page *xpc_rsvd_page_init(void);
561extern void xpc_allow_IPI_ops(void);
562extern void xpc_restrict_IPI_ops(void);
563extern int xpc_identify_act_IRQ_sender(void);
564extern enum xpc_retval xpc_mark_partition_active(struct xpc_partition *);
565extern void xpc_mark_partition_inactive(struct xpc_partition *);
566extern void xpc_discovery(void);
567extern void xpc_check_remote_hb(void);
568extern void xpc_deactivate_partition(const int, struct xpc_partition *,
569 enum xpc_retval);
570extern enum xpc_retval xpc_initiate_partid_to_nasids(partid_t, void *);
571
572
573/* found in xpc_channel.c */
574extern void xpc_initiate_connect(int);
575extern void xpc_initiate_disconnect(int);
576extern enum xpc_retval xpc_initiate_allocate(partid_t, int, u32, void **);
577extern enum xpc_retval xpc_initiate_send(partid_t, int, void *);
578extern enum xpc_retval xpc_initiate_send_notify(partid_t, int, void *,
579 xpc_notify_func, void *);
580extern void xpc_initiate_received(partid_t, int, void *);
581extern enum xpc_retval xpc_setup_infrastructure(struct xpc_partition *);
582extern enum xpc_retval xpc_pull_remote_vars_part(struct xpc_partition *);
583extern void xpc_process_channel_activity(struct xpc_partition *);
584extern void xpc_connected_callout(struct xpc_channel *);
585extern void xpc_deliver_msg(struct xpc_channel *);
586extern void xpc_disconnect_channel(const int, struct xpc_channel *,
587 enum xpc_retval, unsigned long *);
588extern void xpc_disconnected_callout(struct xpc_channel *);
589extern void xpc_partition_down(struct xpc_partition *, enum xpc_retval);
590extern void xpc_teardown_infrastructure(struct xpc_partition *);
591
592
593
594static inline void
595xpc_wakeup_channel_mgr(struct xpc_partition *part)
596{
597 if (atomic_inc_return(&part->channel_mgr_requests) == 1) {
598 wake_up(&part->channel_mgr_wq);
599 }
600}
601
602
603
604/*
605 * These next two inlines are used to keep us from tearing down a channel's
606 * msg queues while a thread may be referencing them.
607 */
608static inline void
609xpc_msgqueue_ref(struct xpc_channel *ch)
610{
611 atomic_inc(&ch->references);
612}
613
614static inline void
615xpc_msgqueue_deref(struct xpc_channel *ch)
616{
617 s32 refs = atomic_dec_return(&ch->references);
618
619 DBUG_ON(refs < 0);
620 if (refs == 0) {
621 xpc_wakeup_channel_mgr(&xpc_partitions[ch->partid]);
622 }
623}
624
625
626
627#define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \
628 xpc_disconnect_channel(__LINE__, _ch, _reason, _irqflgs)
629
630
631/*
632 * These two inlines are used to keep us from tearing down a partition's
633 * setup infrastructure while a thread may be referencing it.
634 */
635static inline void
636xpc_part_deref(struct xpc_partition *part)
637{
638 s32 refs = atomic_dec_return(&part->references);
639
640
641 DBUG_ON(refs < 0);
642 if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN) {
643 wake_up(&part->teardown_wq);
644 }
645}
646
647static inline int
648xpc_part_ref(struct xpc_partition *part)
649{
650 int setup;
651
652
653 atomic_inc(&part->references);
654 setup = (part->setup_state == XPC_P_SETUP);
655 if (!setup) {
656 xpc_part_deref(part);
657 }
658 return setup;
659}
660
661
662
663/*
664 * The following macro is to be used for the setting of the reason and
665 * reason_line fields in both the struct xpc_channel and struct xpc_partition
666 * structures.
667 */
668#define XPC_SET_REASON(_p, _reason, _line) \
669 { \
670 (_p)->reason = _reason; \
671 (_p)->reason_line = _line; \
672 }
673
674
675
676/*
677 * The following set of macros and inlines are used for the sending and
678 * receiving of IPIs (also known as IRQs). There are two flavors of IPIs,
679 * one that is associated with partition activity (SGI_XPC_ACTIVATE) and
680 * the other that is associated with channel activity (SGI_XPC_NOTIFY).
681 */
682
683static inline u64
684xpc_IPI_receive(AMO_t *amo)
685{
686 return FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_CLEAR);
687}
688
689
690static inline enum xpc_retval
691xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
692{
693 int ret = 0;
694 unsigned long irq_flags;
695
696
697 local_irq_save(irq_flags);
698
699 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, flag);
700 sn_send_IPI_phys(nasid, phys_cpuid, vector, 0);
701
702 /*
703 * We must always use the nofault function regardless of whether we
704 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
705 * didn't, we'd never know that the other partition is down and would
706 * keep sending IPIs and AMOs to it until the heartbeat times out.
707 */
708 ret = xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->variable),
709 xp_nofault_PIOR_target));
710
711 local_irq_restore(irq_flags);
712
713 return ((ret == 0) ? xpcSuccess : xpcPioReadError);
714}
715
716
717/*
718 * IPIs associated with SGI_XPC_ACTIVATE IRQ.
719 */
720
721/*
722 * Flag the appropriate AMO variable and send an IPI to the specified node.
723 */
724static inline void
725xpc_activate_IRQ_send(u64 amos_page, int from_nasid, int to_nasid,
726 int to_phys_cpuid)
727{
728 int w_index = XPC_NASID_W_INDEX(from_nasid);
729 int b_index = XPC_NASID_B_INDEX(from_nasid);
730 AMO_t *amos = (AMO_t *) __va(amos_page +
731 (XP_MAX_PARTITIONS * sizeof(AMO_t)));
732
733
734 (void) xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid,
735 to_phys_cpuid, SGI_XPC_ACTIVATE);
736}
737
738static inline void
739xpc_IPI_send_activate(struct xpc_vars *vars)
740{
741 xpc_activate_IRQ_send(vars->amos_page_pa, cnodeid_to_nasid(0),
742 vars->act_nasid, vars->act_phys_cpuid);
743}
744
745static inline void
746xpc_IPI_send_activated(struct xpc_partition *part)
747{
748 xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
749 part->remote_act_nasid, part->remote_act_phys_cpuid);
750}
751
752static inline void
753xpc_IPI_send_reactivate(struct xpc_partition *part)
754{
755 xpc_activate_IRQ_send(xpc_vars->amos_page_pa, part->reactivate_nasid,
756 xpc_vars->act_nasid, xpc_vars->act_phys_cpuid);
757}
758
759
760/*
761 * IPIs associated with SGI_XPC_NOTIFY IRQ.
762 */
763
764/*
765 * Send an IPI to the remote partition that is associated with the
766 * specified channel.
767 */
768#define XPC_NOTIFY_IRQ_SEND(_ch, _ipi_f, _irq_f) \
769 xpc_notify_IRQ_send(_ch, _ipi_f, #_ipi_f, _irq_f)
770
771static inline void
772xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string,
773 unsigned long *irq_flags)
774{
775 struct xpc_partition *part = &xpc_partitions[ch->partid];
776 enum xpc_retval ret;
777
778
779 if (likely(part->act_state != XPC_P_DEACTIVATING)) {
780 ret = xpc_IPI_send(part->remote_IPI_amo_va,
781 (u64) ipi_flag << (ch->number * 8),
782 part->remote_IPI_nasid,
783 part->remote_IPI_phys_cpuid,
784 SGI_XPC_NOTIFY);
785 dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n",
786 ipi_flag_string, ch->partid, ch->number, ret);
787 if (unlikely(ret != xpcSuccess)) {
788 if (irq_flags != NULL) {
789 spin_unlock_irqrestore(&ch->lock, *irq_flags);
790 }
791 XPC_DEACTIVATE_PARTITION(part, ret);
792 if (irq_flags != NULL) {
793 spin_lock_irqsave(&ch->lock, *irq_flags);
794 }
795 }
796 }
797}
798
799
800/*
801 * Make it look like the remote partition, which is associated with the
802 * specified channel, sent us an IPI. This faked IPI will be handled
803 * by xpc_dropped_IPI_check().
804 */
805#define XPC_NOTIFY_IRQ_SEND_LOCAL(_ch, _ipi_f) \
806 xpc_notify_IRQ_send_local(_ch, _ipi_f, #_ipi_f)
807
808static inline void
809xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag,
810 char *ipi_flag_string)
811{
812 struct xpc_partition *part = &xpc_partitions[ch->partid];
813
814
815 FETCHOP_STORE_OP(TO_AMO((u64) &part->local_IPI_amo_va->variable),
816 FETCHOP_OR, ((u64) ipi_flag << (ch->number * 8)));
817 dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n",
818 ipi_flag_string, ch->partid, ch->number);
819}
820
821
822/*
823 * The sending and receiving of IPIs includes the setting of an AMO variable
824 * to indicate the reason the IPI was sent. The 64-bit variable is divided
825 * up into eight bytes, ordered from right to left. Byte zero pertains to
826 * channel 0, byte one to channel 1, and so on. Each byte is described by
827 * the following IPI flags.
828 */
829
830#define XPC_IPI_CLOSEREQUEST 0x01
831#define XPC_IPI_CLOSEREPLY 0x02
832#define XPC_IPI_OPENREQUEST 0x04
833#define XPC_IPI_OPENREPLY 0x08
834#define XPC_IPI_MSGREQUEST 0x10
835
836
837/* given an AMO variable and a channel#, get its associated IPI flags */
838#define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff))
839
840#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0f)
841#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010)
842
843
844static inline void
845xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags)
846{
847 struct xpc_openclose_args *args = ch->local_openclose_args;
848
849
850 args->reason = ch->reason;
851
852 XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREQUEST, irq_flags);
853}
854
855static inline void
856xpc_IPI_send_closereply(struct xpc_channel *ch, unsigned long *irq_flags)
857{
858 XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREPLY, irq_flags);
859}
860
861static inline void
862xpc_IPI_send_openrequest(struct xpc_channel *ch, unsigned long *irq_flags)
863{
864 struct xpc_openclose_args *args = ch->local_openclose_args;
865
866
867 args->msg_size = ch->msg_size;
868 args->local_nentries = ch->local_nentries;
869
870 XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREQUEST, irq_flags);
871}
872
873static inline void
874xpc_IPI_send_openreply(struct xpc_channel *ch, unsigned long *irq_flags)
875{
876 struct xpc_openclose_args *args = ch->local_openclose_args;
877
878
879 args->remote_nentries = ch->remote_nentries;
880 args->local_nentries = ch->local_nentries;
881 args->local_msgqueue_pa = __pa(ch->local_msgqueue);
882
883 XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREPLY, irq_flags);
884}
885
886static inline void
887xpc_IPI_send_msgrequest(struct xpc_channel *ch)
888{
889 XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_MSGREQUEST, NULL);
890}
891
892static inline void
893xpc_IPI_send_local_msgrequest(struct xpc_channel *ch)
894{
895 XPC_NOTIFY_IRQ_SEND_LOCAL(ch, XPC_IPI_MSGREQUEST);
896}
897
898
899/*
900 * Memory for XPC's AMO variables is allocated by the MSPEC driver. These
901 * pages are located in the lowest granule. The lowest granule uses 4k pages
902 * for cached references and an alternate TLB handler to never provide a
903 * cacheable mapping for the entire region. This will prevent speculative
904 * reading of cached copies of our lines from being issued which will cause
905 * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64
906 * (XP_MAX_PARTITIONS) AMO variables for message notification (xpc_main.c)
907 * and an additional 16 AMO variables for partition activation (xpc_hb.c).
908 */
909static inline AMO_t *
910xpc_IPI_init(partid_t partid)
911{
912 AMO_t *part_amo = xpc_vars->amos_page + partid;
913
914
915 xpc_IPI_receive(part_amo);
916 return part_amo;
917}
918
919
920
921static inline enum xpc_retval
922xpc_map_bte_errors(bte_result_t error)
923{
924 switch (error) {
925 case BTE_SUCCESS: return xpcSuccess;
926 case BTEFAIL_DIR: return xpcBteDirectoryError;
927 case BTEFAIL_POISON: return xpcBtePoisonError;
928 case BTEFAIL_WERR: return xpcBteWriteError;
929 case BTEFAIL_ACCESS: return xpcBteAccessError;
930 case BTEFAIL_PWERR: return xpcBtePWriteError;
931 case BTEFAIL_PRERR: return xpcBtePReadError;
932 case BTEFAIL_TOUT: return xpcBteTimeOutError;
933 case BTEFAIL_XTERR: return xpcBteXtalkError;
934 case BTEFAIL_NOTAVAIL: return xpcBteNotAvailable;
935 default: return xpcBteUnmappedError;
936 }
937}
938
939
940
941static inline void *
942xpc_kmalloc_cacheline_aligned(size_t size, int flags, void **base)
943{
944 /* see if kmalloc will give us cachline aligned memory by default */
945 *base = kmalloc(size, flags);
946 if (*base == NULL) {
947 return NULL;
948 }
949 if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) {
950 return *base;
951 }
952 kfree(*base);
953
954 /* nope, we'll have to do it ourselves */
955 *base = kmalloc(size + L1_CACHE_BYTES, flags);
956 if (*base == NULL) {
957 return NULL;
958 }
959 return (void *) L1_CACHE_ALIGN((u64) *base);
960}
961
962
963/*
964 * Check to see if there is any channel activity to/from the specified
965 * partition.
966 */
967static inline void
968xpc_check_for_channel_activity(struct xpc_partition *part)
969{
970 u64 IPI_amo;
971 unsigned long irq_flags;
972
973
974 IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va);
975 if (IPI_amo == 0) {
976 return;
977 }
978
979 spin_lock_irqsave(&part->IPI_lock, irq_flags);
980 part->local_IPI_amo |= IPI_amo;
981 spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
982
983 dev_dbg(xpc_chan, "received IPI from partid=%d, IPI_amo=0x%lx\n",
984 XPC_PARTID(part), IPI_amo);
985
986 xpc_wakeup_channel_mgr(part);
987}
988
989
990#endif /* _IA64_SN_KERNEL_XPC_H */
991
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c
new file mode 100644
index 000000000000..0bf6fbcc46d2
--- /dev/null
+++ b/arch/ia64/sn/kernel/xpc_channel.c
@@ -0,0 +1,2297 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9
10/*
11 * Cross Partition Communication (XPC) channel support.
12 *
13 * This is the part of XPC that manages the channels and
14 * sends/receives messages across them to/from other partitions.
15 *
16 */
17
18
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/sched.h>
22#include <linux/cache.h>
23#include <linux/interrupt.h>
24#include <linux/slab.h>
25#include <asm/sn/bte.h>
26#include <asm/sn/sn_sal.h>
27#include "xpc.h"
28
29
30/*
31 * Set up the initial values for the XPartition Communication channels.
32 */
33static void
34xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
35{
36 int ch_number;
37 struct xpc_channel *ch;
38
39
40 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
41 ch = &part->channels[ch_number];
42
43 ch->partid = partid;
44 ch->number = ch_number;
45 ch->flags = XPC_C_DISCONNECTED;
46
47 ch->local_GP = &part->local_GPs[ch_number];
48 ch->local_openclose_args =
49 &part->local_openclose_args[ch_number];
50
51 atomic_set(&ch->kthreads_assigned, 0);
52 atomic_set(&ch->kthreads_idle, 0);
53 atomic_set(&ch->kthreads_active, 0);
54
55 atomic_set(&ch->references, 0);
56 atomic_set(&ch->n_to_notify, 0);
57
58 spin_lock_init(&ch->lock);
59 sema_init(&ch->msg_to_pull_sema, 1); /* mutex */
60
61 atomic_set(&ch->n_on_msg_allocate_wq, 0);
62 init_waitqueue_head(&ch->msg_allocate_wq);
63 init_waitqueue_head(&ch->idle_wq);
64 }
65}
66
67
68/*
69 * Setup the infrastructure necessary to support XPartition Communication
70 * between the specified remote partition and the local one.
71 */
72enum xpc_retval
73xpc_setup_infrastructure(struct xpc_partition *part)
74{
75 int ret;
76 struct timer_list *timer;
77 partid_t partid = XPC_PARTID(part);
78
79
80 /*
81 * Zero out MOST of the entry for this partition. Only the fields
82 * starting with `nchannels' will be zeroed. The preceding fields must
83 * remain `viable' across partition ups and downs, since they may be
84 * referenced during this memset() operation.
85 */
86 memset(&part->nchannels, 0, sizeof(struct xpc_partition) -
87 offsetof(struct xpc_partition, nchannels));
88
89 /*
90 * Allocate all of the channel structures as a contiguous chunk of
91 * memory.
92 */
93 part->channels = kmalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS,
94 GFP_KERNEL);
95 if (part->channels == NULL) {
96 dev_err(xpc_chan, "can't get memory for channels\n");
97 return xpcNoMemory;
98 }
99 memset(part->channels, 0, sizeof(struct xpc_channel) * XPC_NCHANNELS);
100
101 part->nchannels = XPC_NCHANNELS;
102
103
104 /* allocate all the required GET/PUT values */
105
106 part->local_GPs = xpc_kmalloc_cacheline_aligned(XPC_GP_SIZE,
107 GFP_KERNEL, &part->local_GPs_base);
108 if (part->local_GPs == NULL) {
109 kfree(part->channels);
110 part->channels = NULL;
111 dev_err(xpc_chan, "can't get memory for local get/put "
112 "values\n");
113 return xpcNoMemory;
114 }
115 memset(part->local_GPs, 0, XPC_GP_SIZE);
116
117 part->remote_GPs = xpc_kmalloc_cacheline_aligned(XPC_GP_SIZE,
118 GFP_KERNEL, &part->remote_GPs_base);
119 if (part->remote_GPs == NULL) {
120 kfree(part->channels);
121 part->channels = NULL;
122 kfree(part->local_GPs_base);
123 part->local_GPs = NULL;
124 dev_err(xpc_chan, "can't get memory for remote get/put "
125 "values\n");
126 return xpcNoMemory;
127 }
128 memset(part->remote_GPs, 0, XPC_GP_SIZE);
129
130
131 /* allocate all the required open and close args */
132
133 part->local_openclose_args = xpc_kmalloc_cacheline_aligned(
134 XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
135 &part->local_openclose_args_base);
136 if (part->local_openclose_args == NULL) {
137 kfree(part->channels);
138 part->channels = NULL;
139 kfree(part->local_GPs_base);
140 part->local_GPs = NULL;
141 kfree(part->remote_GPs_base);
142 part->remote_GPs = NULL;
143 dev_err(xpc_chan, "can't get memory for local connect args\n");
144 return xpcNoMemory;
145 }
146 memset(part->local_openclose_args, 0, XPC_OPENCLOSE_ARGS_SIZE);
147
148 part->remote_openclose_args = xpc_kmalloc_cacheline_aligned(
149 XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL,
150 &part->remote_openclose_args_base);
151 if (part->remote_openclose_args == NULL) {
152 kfree(part->channels);
153 part->channels = NULL;
154 kfree(part->local_GPs_base);
155 part->local_GPs = NULL;
156 kfree(part->remote_GPs_base);
157 part->remote_GPs = NULL;
158 kfree(part->local_openclose_args_base);
159 part->local_openclose_args = NULL;
160 dev_err(xpc_chan, "can't get memory for remote connect args\n");
161 return xpcNoMemory;
162 }
163 memset(part->remote_openclose_args, 0, XPC_OPENCLOSE_ARGS_SIZE);
164
165
166 xpc_initialize_channels(part, partid);
167
168 atomic_set(&part->nchannels_active, 0);
169
170
171 /* local_IPI_amo were set to 0 by an earlier memset() */
172
173 /* Initialize this partitions AMO_t structure */
174 part->local_IPI_amo_va = xpc_IPI_init(partid);
175
176 spin_lock_init(&part->IPI_lock);
177
178 atomic_set(&part->channel_mgr_requests, 1);
179 init_waitqueue_head(&part->channel_mgr_wq);
180
181 sprintf(part->IPI_owner, "xpc%02d", partid);
182 ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, SA_SHIRQ,
183 part->IPI_owner, (void *) (u64) partid);
184 if (ret != 0) {
185 kfree(part->channels);
186 part->channels = NULL;
187 kfree(part->local_GPs_base);
188 part->local_GPs = NULL;
189 kfree(part->remote_GPs_base);
190 part->remote_GPs = NULL;
191 kfree(part->local_openclose_args_base);
192 part->local_openclose_args = NULL;
193 kfree(part->remote_openclose_args_base);
194 part->remote_openclose_args = NULL;
195 dev_err(xpc_chan, "can't register NOTIFY IRQ handler, "
196 "errno=%d\n", -ret);
197 return xpcLackOfResources;
198 }
199
200 /* Setup a timer to check for dropped IPIs */
201 timer = &part->dropped_IPI_timer;
202 init_timer(timer);
203 timer->function = (void (*)(unsigned long)) xpc_dropped_IPI_check;
204 timer->data = (unsigned long) part;
205 timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT;
206 add_timer(timer);
207
208 /*
209 * With the setting of the partition setup_state to XPC_P_SETUP, we're
210 * declaring that this partition is ready to go.
211 */
212 (volatile u8) part->setup_state = XPC_P_SETUP;
213
214
215 /*
216 * Setup the per partition specific variables required by the
217 * remote partition to establish channel connections with us.
218 *
219 * The setting of the magic # indicates that these per partition
220 * specific variables are ready to be used.
221 */
222 xpc_vars_part[partid].GPs_pa = __pa(part->local_GPs);
223 xpc_vars_part[partid].openclose_args_pa =
224 __pa(part->local_openclose_args);
225 xpc_vars_part[partid].IPI_amo_pa = __pa(part->local_IPI_amo_va);
226 xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(smp_processor_id());
227 xpc_vars_part[partid].IPI_phys_cpuid =
228 cpu_physical_id(smp_processor_id());
229 xpc_vars_part[partid].nchannels = part->nchannels;
230 (volatile u64) xpc_vars_part[partid].magic = XPC_VP_MAGIC1;
231
232 return xpcSuccess;
233}
234
235
236/*
237 * Create a wrapper that hides the underlying mechanism for pulling a cacheline
238 * (or multiple cachelines) from a remote partition.
239 *
240 * src must be a cacheline aligned physical address on the remote partition.
241 * dst must be a cacheline aligned virtual address on this partition.
242 * cnt must be an cacheline sized
243 */
244static enum xpc_retval
245xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst,
246 const void *src, size_t cnt)
247{
248 bte_result_t bte_ret;
249
250
251 DBUG_ON((u64) src != L1_CACHE_ALIGN((u64) src));
252 DBUG_ON((u64) dst != L1_CACHE_ALIGN((u64) dst));
253 DBUG_ON(cnt != L1_CACHE_ALIGN(cnt));
254
255 if (part->act_state == XPC_P_DEACTIVATING) {
256 return part->reason;
257 }
258
259 bte_ret = xp_bte_copy((u64) src, (u64) ia64_tpa((u64) dst),
260 (u64) cnt, (BTE_NORMAL | BTE_WACQUIRE), NULL);
261 if (bte_ret == BTE_SUCCESS) {
262 return xpcSuccess;
263 }
264
265 dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n",
266 XPC_PARTID(part), bte_ret);
267
268 return xpc_map_bte_errors(bte_ret);
269}
270
271
272/*
273 * Pull the remote per partititon specific variables from the specified
274 * partition.
275 */
276enum xpc_retval
277xpc_pull_remote_vars_part(struct xpc_partition *part)
278{
279 u8 buffer[L1_CACHE_BYTES * 2];
280 struct xpc_vars_part *pulled_entry_cacheline =
281 (struct xpc_vars_part *) L1_CACHE_ALIGN((u64) buffer);
282 struct xpc_vars_part *pulled_entry;
283 u64 remote_entry_cacheline_pa, remote_entry_pa;
284 partid_t partid = XPC_PARTID(part);
285 enum xpc_retval ret;
286
287
288 /* pull the cacheline that contains the variables we're interested in */
289
290 DBUG_ON(part->remote_vars_part_pa !=
291 L1_CACHE_ALIGN(part->remote_vars_part_pa));
292 DBUG_ON(sizeof(struct xpc_vars_part) != L1_CACHE_BYTES / 2);
293
294 remote_entry_pa = part->remote_vars_part_pa +
295 sn_partition_id * sizeof(struct xpc_vars_part);
296
297 remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1));
298
299 pulled_entry = (struct xpc_vars_part *) ((u64) pulled_entry_cacheline +
300 (remote_entry_pa & (L1_CACHE_BYTES - 1)));
301
302 ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline,
303 (void *) remote_entry_cacheline_pa,
304 L1_CACHE_BYTES);
305 if (ret != xpcSuccess) {
306 dev_dbg(xpc_chan, "failed to pull XPC vars_part from "
307 "partition %d, ret=%d\n", partid, ret);
308 return ret;
309 }
310
311
312 /* see if they've been set up yet */
313
314 if (pulled_entry->magic != XPC_VP_MAGIC1 &&
315 pulled_entry->magic != XPC_VP_MAGIC2) {
316
317 if (pulled_entry->magic != 0) {
318 dev_dbg(xpc_chan, "partition %d's XPC vars_part for "
319 "partition %d has bad magic value (=0x%lx)\n",
320 partid, sn_partition_id, pulled_entry->magic);
321 return xpcBadMagic;
322 }
323
324 /* they've not been initialized yet */
325 return xpcRetry;
326 }
327
328 if (xpc_vars_part[partid].magic == XPC_VP_MAGIC1) {
329
330 /* validate the variables */
331
332 if (pulled_entry->GPs_pa == 0 ||
333 pulled_entry->openclose_args_pa == 0 ||
334 pulled_entry->IPI_amo_pa == 0) {
335
336 dev_err(xpc_chan, "partition %d's XPC vars_part for "
337 "partition %d are not valid\n", partid,
338 sn_partition_id);
339 return xpcInvalidAddress;
340 }
341
342 /* the variables we imported look to be valid */
343
344 part->remote_GPs_pa = pulled_entry->GPs_pa;
345 part->remote_openclose_args_pa =
346 pulled_entry->openclose_args_pa;
347 part->remote_IPI_amo_va =
348 (AMO_t *) __va(pulled_entry->IPI_amo_pa);
349 part->remote_IPI_nasid = pulled_entry->IPI_nasid;
350 part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid;
351
352 if (part->nchannels > pulled_entry->nchannels) {
353 part->nchannels = pulled_entry->nchannels;
354 }
355
356 /* let the other side know that we've pulled their variables */
357
358 (volatile u64) xpc_vars_part[partid].magic = XPC_VP_MAGIC2;
359 }
360
361 if (pulled_entry->magic == XPC_VP_MAGIC1) {
362 return xpcRetry;
363 }
364
365 return xpcSuccess;
366}
367
368
369/*
370 * Get the IPI flags and pull the openclose args and/or remote GPs as needed.
371 */
372static u64
373xpc_get_IPI_flags(struct xpc_partition *part)
374{
375 unsigned long irq_flags;
376 u64 IPI_amo;
377 enum xpc_retval ret;
378
379
380 /*
381 * See if there are any IPI flags to be handled.
382 */
383
384 spin_lock_irqsave(&part->IPI_lock, irq_flags);
385 if ((IPI_amo = part->local_IPI_amo) != 0) {
386 part->local_IPI_amo = 0;
387 }
388 spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
389
390
391 if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) {
392 ret = xpc_pull_remote_cachelines(part,
393 part->remote_openclose_args,
394 (void *) part->remote_openclose_args_pa,
395 XPC_OPENCLOSE_ARGS_SIZE);
396 if (ret != xpcSuccess) {
397 XPC_DEACTIVATE_PARTITION(part, ret);
398
399 dev_dbg(xpc_chan, "failed to pull openclose args from "
400 "partition %d, ret=%d\n", XPC_PARTID(part),
401 ret);
402
403 /* don't bother processing IPIs anymore */
404 IPI_amo = 0;
405 }
406 }
407
408 if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) {
409 ret = xpc_pull_remote_cachelines(part, part->remote_GPs,
410 (void *) part->remote_GPs_pa,
411 XPC_GP_SIZE);
412 if (ret != xpcSuccess) {
413 XPC_DEACTIVATE_PARTITION(part, ret);
414
415 dev_dbg(xpc_chan, "failed to pull GPs from partition "
416 "%d, ret=%d\n", XPC_PARTID(part), ret);
417
418 /* don't bother processing IPIs anymore */
419 IPI_amo = 0;
420 }
421 }
422
423 return IPI_amo;
424}
425
426
427/*
428 * Allocate the local message queue and the notify queue.
429 */
430static enum xpc_retval
431xpc_allocate_local_msgqueue(struct xpc_channel *ch)
432{
433 unsigned long irq_flags;
434 int nentries;
435 size_t nbytes;
436
437
438 // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
439 // >>> iterations of the for-loop, bail if set?
440
441 // >>> should we impose a minumum #of entries? like 4 or 8?
442 for (nentries = ch->local_nentries; nentries > 0; nentries--) {
443
444 nbytes = nentries * ch->msg_size;
445 ch->local_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes,
446 (GFP_KERNEL | GFP_DMA),
447 &ch->local_msgqueue_base);
448 if (ch->local_msgqueue == NULL) {
449 continue;
450 }
451 memset(ch->local_msgqueue, 0, nbytes);
452
453 nbytes = nentries * sizeof(struct xpc_notify);
454 ch->notify_queue = kmalloc(nbytes, (GFP_KERNEL | GFP_DMA));
455 if (ch->notify_queue == NULL) {
456 kfree(ch->local_msgqueue_base);
457 ch->local_msgqueue = NULL;
458 continue;
459 }
460 memset(ch->notify_queue, 0, nbytes);
461
462 spin_lock_irqsave(&ch->lock, irq_flags);
463 if (nentries < ch->local_nentries) {
464 dev_dbg(xpc_chan, "nentries=%d local_nentries=%d, "
465 "partid=%d, channel=%d\n", nentries,
466 ch->local_nentries, ch->partid, ch->number);
467
468 ch->local_nentries = nentries;
469 }
470 spin_unlock_irqrestore(&ch->lock, irq_flags);
471 return xpcSuccess;
472 }
473
474 dev_dbg(xpc_chan, "can't get memory for local message queue and notify "
475 "queue, partid=%d, channel=%d\n", ch->partid, ch->number);
476 return xpcNoMemory;
477}
478
479
480/*
481 * Allocate the cached remote message queue.
482 */
483static enum xpc_retval
484xpc_allocate_remote_msgqueue(struct xpc_channel *ch)
485{
486 unsigned long irq_flags;
487 int nentries;
488 size_t nbytes;
489
490
491 DBUG_ON(ch->remote_nentries <= 0);
492
493 // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between
494 // >>> iterations of the for-loop, bail if set?
495
496 // >>> should we impose a minumum #of entries? like 4 or 8?
497 for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
498
499 nbytes = nentries * ch->msg_size;
500 ch->remote_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes,
501 (GFP_KERNEL | GFP_DMA),
502 &ch->remote_msgqueue_base);
503 if (ch->remote_msgqueue == NULL) {
504 continue;
505 }
506 memset(ch->remote_msgqueue, 0, nbytes);
507
508 spin_lock_irqsave(&ch->lock, irq_flags);
509 if (nentries < ch->remote_nentries) {
510 dev_dbg(xpc_chan, "nentries=%d remote_nentries=%d, "
511 "partid=%d, channel=%d\n", nentries,
512 ch->remote_nentries, ch->partid, ch->number);
513
514 ch->remote_nentries = nentries;
515 }
516 spin_unlock_irqrestore(&ch->lock, irq_flags);
517 return xpcSuccess;
518 }
519
520 dev_dbg(xpc_chan, "can't get memory for cached remote message queue, "
521 "partid=%d, channel=%d\n", ch->partid, ch->number);
522 return xpcNoMemory;
523}
524
525
526/*
527 * Allocate message queues and other stuff associated with a channel.
528 *
529 * Note: Assumes all of the channel sizes are filled in.
530 */
531static enum xpc_retval
532xpc_allocate_msgqueues(struct xpc_channel *ch)
533{
534 unsigned long irq_flags;
535 int i;
536 enum xpc_retval ret;
537
538
539 DBUG_ON(ch->flags & XPC_C_SETUP);
540
541 if ((ret = xpc_allocate_local_msgqueue(ch)) != xpcSuccess) {
542 return ret;
543 }
544
545 if ((ret = xpc_allocate_remote_msgqueue(ch)) != xpcSuccess) {
546 kfree(ch->local_msgqueue_base);
547 ch->local_msgqueue = NULL;
548 kfree(ch->notify_queue);
549 ch->notify_queue = NULL;
550 return ret;
551 }
552
553 for (i = 0; i < ch->local_nentries; i++) {
554 /* use a semaphore as an event wait queue */
555 sema_init(&ch->notify_queue[i].sema, 0);
556 }
557
558 sema_init(&ch->teardown_sema, 0); /* event wait */
559
560 spin_lock_irqsave(&ch->lock, irq_flags);
561 ch->flags |= XPC_C_SETUP;
562 spin_unlock_irqrestore(&ch->lock, irq_flags);
563
564 return xpcSuccess;
565}
566
567
568/*
569 * Process a connect message from a remote partition.
570 *
571 * Note: xpc_process_connect() is expecting to be called with the
572 * spin_lock_irqsave held and will leave it locked upon return.
573 */
574static void
575xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
576{
577 enum xpc_retval ret;
578
579
580 DBUG_ON(!spin_is_locked(&ch->lock));
581
582 if (!(ch->flags & XPC_C_OPENREQUEST) ||
583 !(ch->flags & XPC_C_ROPENREQUEST)) {
584 /* nothing more to do for now */
585 return;
586 }
587 DBUG_ON(!(ch->flags & XPC_C_CONNECTING));
588
589 if (!(ch->flags & XPC_C_SETUP)) {
590 spin_unlock_irqrestore(&ch->lock, *irq_flags);
591 ret = xpc_allocate_msgqueues(ch);
592 spin_lock_irqsave(&ch->lock, *irq_flags);
593
594 if (ret != xpcSuccess) {
595 XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
596 }
597 if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) {
598 return;
599 }
600
601 DBUG_ON(!(ch->flags & XPC_C_SETUP));
602 DBUG_ON(ch->local_msgqueue == NULL);
603 DBUG_ON(ch->remote_msgqueue == NULL);
604 }
605
606 if (!(ch->flags & XPC_C_OPENREPLY)) {
607 ch->flags |= XPC_C_OPENREPLY;
608 xpc_IPI_send_openreply(ch, irq_flags);
609 }
610
611 if (!(ch->flags & XPC_C_ROPENREPLY)) {
612 return;
613 }
614
615 DBUG_ON(ch->remote_msgqueue_pa == 0);
616
617 ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */
618
619 dev_info(xpc_chan, "channel %d to partition %d connected\n",
620 ch->number, ch->partid);
621
622 spin_unlock_irqrestore(&ch->lock, *irq_flags);
623 xpc_create_kthreads(ch, 1);
624 spin_lock_irqsave(&ch->lock, *irq_flags);
625}
626
627
628/*
629 * Free up message queues and other stuff that were allocated for the specified
630 * channel.
631 *
632 * Note: ch->reason and ch->reason_line are left set for debugging purposes,
633 * they're cleared when XPC_C_DISCONNECTED is cleared.
634 */
635static void
636xpc_free_msgqueues(struct xpc_channel *ch)
637{
638 DBUG_ON(!spin_is_locked(&ch->lock));
639 DBUG_ON(atomic_read(&ch->n_to_notify) != 0);
640
641 ch->remote_msgqueue_pa = 0;
642 ch->func = NULL;
643 ch->key = NULL;
644 ch->msg_size = 0;
645 ch->local_nentries = 0;
646 ch->remote_nentries = 0;
647 ch->kthreads_assigned_limit = 0;
648 ch->kthreads_idle_limit = 0;
649
650 ch->local_GP->get = 0;
651 ch->local_GP->put = 0;
652 ch->remote_GP.get = 0;
653 ch->remote_GP.put = 0;
654 ch->w_local_GP.get = 0;
655 ch->w_local_GP.put = 0;
656 ch->w_remote_GP.get = 0;
657 ch->w_remote_GP.put = 0;
658 ch->next_msg_to_pull = 0;
659
660 if (ch->flags & XPC_C_SETUP) {
661 ch->flags &= ~XPC_C_SETUP;
662
663 dev_dbg(xpc_chan, "ch->flags=0x%x, partid=%d, channel=%d\n",
664 ch->flags, ch->partid, ch->number);
665
666 kfree(ch->local_msgqueue_base);
667 ch->local_msgqueue = NULL;
668 kfree(ch->remote_msgqueue_base);
669 ch->remote_msgqueue = NULL;
670 kfree(ch->notify_queue);
671 ch->notify_queue = NULL;
672
673 /* in case someone is waiting for the teardown to complete */
674 up(&ch->teardown_sema);
675 }
676}
677
678
679/*
680 * spin_lock_irqsave() is expected to be held on entry.
681 */
682static void
683xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
684{
685 struct xpc_partition *part = &xpc_partitions[ch->partid];
686 u32 ch_flags = ch->flags;
687
688
689 DBUG_ON(!spin_is_locked(&ch->lock));
690
691 if (!(ch->flags & XPC_C_DISCONNECTING)) {
692 return;
693 }
694
695 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
696
697 /* make sure all activity has settled down first */
698
699 if (atomic_read(&ch->references) > 0) {
700 return;
701 }
702 DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0);
703
704 /* it's now safe to free the channel's message queues */
705
706 xpc_free_msgqueues(ch);
707 DBUG_ON(ch->flags & XPC_C_SETUP);
708
709 if (part->act_state != XPC_P_DEACTIVATING) {
710
711 /* as long as the other side is up do the full protocol */
712
713 if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
714 return;
715 }
716
717 if (!(ch->flags & XPC_C_CLOSEREPLY)) {
718 ch->flags |= XPC_C_CLOSEREPLY;
719 xpc_IPI_send_closereply(ch, irq_flags);
720 }
721
722 if (!(ch->flags & XPC_C_RCLOSEREPLY)) {
723 return;
724 }
725 }
726
727 /* both sides are disconnected now */
728
729 ch->flags = XPC_C_DISCONNECTED; /* clear all flags, but this one */
730
731 atomic_dec(&part->nchannels_active);
732
733 if (ch_flags & XPC_C_WASCONNECTED) {
734 dev_info(xpc_chan, "channel %d to partition %d disconnected, "
735 "reason=%d\n", ch->number, ch->partid, ch->reason);
736 }
737}
738
739
740/*
741 * Process a change in the channel's remote connection state.
742 */
743static void
744xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number,
745 u8 IPI_flags)
746{
747 unsigned long irq_flags;
748 struct xpc_openclose_args *args =
749 &part->remote_openclose_args[ch_number];
750 struct xpc_channel *ch = &part->channels[ch_number];
751 enum xpc_retval reason;
752
753
754
755 spin_lock_irqsave(&ch->lock, irq_flags);
756
757
758 if (IPI_flags & XPC_IPI_CLOSEREQUEST) {
759
760 dev_dbg(xpc_chan, "XPC_IPI_CLOSEREQUEST (reason=%d) received "
761 "from partid=%d, channel=%d\n", args->reason,
762 ch->partid, ch->number);
763
764 /*
765 * If RCLOSEREQUEST is set, we're probably waiting for
766 * RCLOSEREPLY. We should find it and a ROPENREQUEST packed
767 * with this RCLOSEQREUQEST in the IPI_flags.
768 */
769
770 if (ch->flags & XPC_C_RCLOSEREQUEST) {
771 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
772 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
773 DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY));
774 DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY);
775
776 DBUG_ON(!(IPI_flags & XPC_IPI_CLOSEREPLY));
777 IPI_flags &= ~XPC_IPI_CLOSEREPLY;
778 ch->flags |= XPC_C_RCLOSEREPLY;
779
780 /* both sides have finished disconnecting */
781 xpc_process_disconnect(ch, &irq_flags);
782 }
783
784 if (ch->flags & XPC_C_DISCONNECTED) {
785 // >>> explain this section
786
787 if (!(IPI_flags & XPC_IPI_OPENREQUEST)) {
788 DBUG_ON(part->act_state !=
789 XPC_P_DEACTIVATING);
790 spin_unlock_irqrestore(&ch->lock, irq_flags);
791 return;
792 }
793
794 XPC_SET_REASON(ch, 0, 0);
795 ch->flags &= ~XPC_C_DISCONNECTED;
796
797 atomic_inc(&part->nchannels_active);
798 ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST);
799 }
800
801 IPI_flags &= ~(XPC_IPI_OPENREQUEST | XPC_IPI_OPENREPLY);
802
803 /*
804 * The meaningful CLOSEREQUEST connection state fields are:
805 * reason = reason connection is to be closed
806 */
807
808 ch->flags |= XPC_C_RCLOSEREQUEST;
809
810 if (!(ch->flags & XPC_C_DISCONNECTING)) {
811 reason = args->reason;
812 if (reason <= xpcSuccess || reason > xpcUnknownReason) {
813 reason = xpcUnknownReason;
814 } else if (reason == xpcUnregistering) {
815 reason = xpcOtherUnregistering;
816 }
817
818 XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
819 } else {
820 xpc_process_disconnect(ch, &irq_flags);
821 }
822 }
823
824
825 if (IPI_flags & XPC_IPI_CLOSEREPLY) {
826
827 dev_dbg(xpc_chan, "XPC_IPI_CLOSEREPLY received from partid=%d,"
828 " channel=%d\n", ch->partid, ch->number);
829
830 if (ch->flags & XPC_C_DISCONNECTED) {
831 DBUG_ON(part->act_state != XPC_P_DEACTIVATING);
832 spin_unlock_irqrestore(&ch->lock, irq_flags);
833 return;
834 }
835
836 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
837 DBUG_ON(!(ch->flags & XPC_C_RCLOSEREQUEST));
838
839 ch->flags |= XPC_C_RCLOSEREPLY;
840
841 if (ch->flags & XPC_C_CLOSEREPLY) {
842 /* both sides have finished disconnecting */
843 xpc_process_disconnect(ch, &irq_flags);
844 }
845 }
846
847
848 if (IPI_flags & XPC_IPI_OPENREQUEST) {
849
850 dev_dbg(xpc_chan, "XPC_IPI_OPENREQUEST (msg_size=%d, "
851 "local_nentries=%d) received from partid=%d, "
852 "channel=%d\n", args->msg_size, args->local_nentries,
853 ch->partid, ch->number);
854
855 if ((ch->flags & XPC_C_DISCONNECTING) ||
856 part->act_state == XPC_P_DEACTIVATING) {
857 spin_unlock_irqrestore(&ch->lock, irq_flags);
858 return;
859 }
860 DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED |
861 XPC_C_OPENREQUEST)));
862 DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
863 XPC_C_OPENREPLY | XPC_C_CONNECTED));
864
865 /*
866 * The meaningful OPENREQUEST connection state fields are:
867 * msg_size = size of channel's messages in bytes
868 * local_nentries = remote partition's local_nentries
869 */
870 DBUG_ON(args->msg_size == 0);
871 DBUG_ON(args->local_nentries == 0);
872
873 ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING);
874 ch->remote_nentries = args->local_nentries;
875
876
877 if (ch->flags & XPC_C_OPENREQUEST) {
878 if (args->msg_size != ch->msg_size) {
879 XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
880 &irq_flags);
881 spin_unlock_irqrestore(&ch->lock, irq_flags);
882 return;
883 }
884 } else {
885 ch->msg_size = args->msg_size;
886
887 XPC_SET_REASON(ch, 0, 0);
888 ch->flags &= ~XPC_C_DISCONNECTED;
889
890 atomic_inc(&part->nchannels_active);
891 }
892
893 xpc_process_connect(ch, &irq_flags);
894 }
895
896
897 if (IPI_flags & XPC_IPI_OPENREPLY) {
898
899 dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY (local_msgqueue_pa=0x%lx, "
900 "local_nentries=%d, remote_nentries=%d) received from "
901 "partid=%d, channel=%d\n", args->local_msgqueue_pa,
902 args->local_nentries, args->remote_nentries,
903 ch->partid, ch->number);
904
905 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
906 spin_unlock_irqrestore(&ch->lock, irq_flags);
907 return;
908 }
909 DBUG_ON(!(ch->flags & XPC_C_OPENREQUEST));
910 DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST));
911 DBUG_ON(ch->flags & XPC_C_CONNECTED);
912
913 /*
914 * The meaningful OPENREPLY connection state fields are:
915 * local_msgqueue_pa = physical address of remote
916 * partition's local_msgqueue
917 * local_nentries = remote partition's local_nentries
918 * remote_nentries = remote partition's remote_nentries
919 */
920 DBUG_ON(args->local_msgqueue_pa == 0);
921 DBUG_ON(args->local_nentries == 0);
922 DBUG_ON(args->remote_nentries == 0);
923
924 ch->flags |= XPC_C_ROPENREPLY;
925 ch->remote_msgqueue_pa = args->local_msgqueue_pa;
926
927 if (args->local_nentries < ch->remote_nentries) {
928 dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY: new "
929 "remote_nentries=%d, old remote_nentries=%d, "
930 "partid=%d, channel=%d\n",
931 args->local_nentries, ch->remote_nentries,
932 ch->partid, ch->number);
933
934 ch->remote_nentries = args->local_nentries;
935 }
936 if (args->remote_nentries < ch->local_nentries) {
937 dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY: new "
938 "local_nentries=%d, old local_nentries=%d, "
939 "partid=%d, channel=%d\n",
940 args->remote_nentries, ch->local_nentries,
941 ch->partid, ch->number);
942
943 ch->local_nentries = args->remote_nentries;
944 }
945
946 xpc_process_connect(ch, &irq_flags);
947 }
948
949 spin_unlock_irqrestore(&ch->lock, irq_flags);
950}
951
952
953/*
954 * Attempt to establish a channel connection to a remote partition.
955 */
956static enum xpc_retval
957xpc_connect_channel(struct xpc_channel *ch)
958{
959 unsigned long irq_flags;
960 struct xpc_registration *registration = &xpc_registrations[ch->number];
961
962
963 if (down_interruptible(&registration->sema) != 0) {
964 return xpcInterrupted;
965 }
966
967 if (!XPC_CHANNEL_REGISTERED(ch->number)) {
968 up(&registration->sema);
969 return xpcUnregistered;
970 }
971
972 spin_lock_irqsave(&ch->lock, irq_flags);
973
974 DBUG_ON(ch->flags & XPC_C_CONNECTED);
975 DBUG_ON(ch->flags & XPC_C_OPENREQUEST);
976
977 if (ch->flags & XPC_C_DISCONNECTING) {
978 spin_unlock_irqrestore(&ch->lock, irq_flags);
979 up(&registration->sema);
980 return ch->reason;
981 }
982
983
984 /* add info from the channel connect registration to the channel */
985
986 ch->kthreads_assigned_limit = registration->assigned_limit;
987 ch->kthreads_idle_limit = registration->idle_limit;
988 DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0);
989 DBUG_ON(atomic_read(&ch->kthreads_idle) != 0);
990 DBUG_ON(atomic_read(&ch->kthreads_active) != 0);
991
992 ch->func = registration->func;
993 DBUG_ON(registration->func == NULL);
994 ch->key = registration->key;
995
996 ch->local_nentries = registration->nentries;
997
998 if (ch->flags & XPC_C_ROPENREQUEST) {
999 if (registration->msg_size != ch->msg_size) {
1000 /* the local and remote sides aren't the same */
1001
1002 /*
1003 * Because XPC_DISCONNECT_CHANNEL() can block we're
1004 * forced to up the registration sema before we unlock
1005 * the channel lock. But that's okay here because we're
1006 * done with the part that required the registration
1007 * sema. XPC_DISCONNECT_CHANNEL() requires that the
1008 * channel lock be locked and will unlock and relock
1009 * the channel lock as needed.
1010 */
1011 up(&registration->sema);
1012 XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
1013 &irq_flags);
1014 spin_unlock_irqrestore(&ch->lock, irq_flags);
1015 return xpcUnequalMsgSizes;
1016 }
1017 } else {
1018 ch->msg_size = registration->msg_size;
1019
1020 XPC_SET_REASON(ch, 0, 0);
1021 ch->flags &= ~XPC_C_DISCONNECTED;
1022
1023 atomic_inc(&xpc_partitions[ch->partid].nchannels_active);
1024 }
1025
1026 up(&registration->sema);
1027
1028
1029 /* initiate the connection */
1030
1031 ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
1032 xpc_IPI_send_openrequest(ch, &irq_flags);
1033
1034 xpc_process_connect(ch, &irq_flags);
1035
1036 spin_unlock_irqrestore(&ch->lock, irq_flags);
1037
1038 return xpcSuccess;
1039}
1040
1041
1042/*
1043 * Notify those who wanted to be notified upon delivery of their message.
1044 */
1045static void
1046xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put)
1047{
1048 struct xpc_notify *notify;
1049 u8 notify_type;
1050 s64 get = ch->w_remote_GP.get - 1;
1051
1052
1053 while (++get < put && atomic_read(&ch->n_to_notify) > 0) {
1054
1055 notify = &ch->notify_queue[get % ch->local_nentries];
1056
1057 /*
1058 * See if the notify entry indicates it was associated with
1059 * a message who's sender wants to be notified. It is possible
1060 * that it is, but someone else is doing or has done the
1061 * notification.
1062 */
1063 notify_type = notify->type;
1064 if (notify_type == 0 ||
1065 cmpxchg(&notify->type, notify_type, 0) !=
1066 notify_type) {
1067 continue;
1068 }
1069
1070 DBUG_ON(notify_type != XPC_N_CALL);
1071
1072 atomic_dec(&ch->n_to_notify);
1073
1074 if (notify->func != NULL) {
1075 dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, "
1076 "msg_number=%ld, partid=%d, channel=%d\n",
1077 (void *) notify, get, ch->partid, ch->number);
1078
1079 notify->func(reason, ch->partid, ch->number,
1080 notify->key);
1081
1082 dev_dbg(xpc_chan, "notify->func() returned, "
1083 "notify=0x%p, msg_number=%ld, partid=%d, "
1084 "channel=%d\n", (void *) notify, get,
1085 ch->partid, ch->number);
1086 }
1087 }
1088}
1089
1090
1091/*
1092 * Clear some of the msg flags in the local message queue.
1093 */
1094static inline void
1095xpc_clear_local_msgqueue_flags(struct xpc_channel *ch)
1096{
1097 struct xpc_msg *msg;
1098 s64 get;
1099
1100
1101 get = ch->w_remote_GP.get;
1102 do {
1103 msg = (struct xpc_msg *) ((u64) ch->local_msgqueue +
1104 (get % ch->local_nentries) * ch->msg_size);
1105 msg->flags = 0;
1106 } while (++get < (volatile s64) ch->remote_GP.get);
1107}
1108
1109
1110/*
1111 * Clear some of the msg flags in the remote message queue.
1112 */
1113static inline void
1114xpc_clear_remote_msgqueue_flags(struct xpc_channel *ch)
1115{
1116 struct xpc_msg *msg;
1117 s64 put;
1118
1119
1120 put = ch->w_remote_GP.put;
1121 do {
1122 msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue +
1123 (put % ch->remote_nentries) * ch->msg_size);
1124 msg->flags = 0;
1125 } while (++put < (volatile s64) ch->remote_GP.put);
1126}
1127
1128
1129static void
1130xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
1131{
1132 struct xpc_channel *ch = &part->channels[ch_number];
1133 int nmsgs_sent;
1134
1135
1136 ch->remote_GP = part->remote_GPs[ch_number];
1137
1138
1139 /* See what, if anything, has changed for each connected channel */
1140
1141 xpc_msgqueue_ref(ch);
1142
1143 if (ch->w_remote_GP.get == ch->remote_GP.get &&
1144 ch->w_remote_GP.put == ch->remote_GP.put) {
1145 /* nothing changed since GPs were last pulled */
1146 xpc_msgqueue_deref(ch);
1147 return;
1148 }
1149
1150 if (!(ch->flags & XPC_C_CONNECTED)){
1151 xpc_msgqueue_deref(ch);
1152 return;
1153 }
1154
1155
1156 /*
1157 * First check to see if messages recently sent by us have been
1158 * received by the other side. (The remote GET value will have
1159 * changed since we last looked at it.)
1160 */
1161
1162 if (ch->w_remote_GP.get != ch->remote_GP.get) {
1163
1164 /*
1165 * We need to notify any senders that want to be notified
1166 * that their sent messages have been received by their
1167 * intended recipients. We need to do this before updating
1168 * w_remote_GP.get so that we don't allocate the same message
1169 * queue entries prematurely (see xpc_allocate_msg()).
1170 */
1171 if (atomic_read(&ch->n_to_notify) > 0) {
1172 /*
1173 * Notify senders that messages sent have been
1174 * received and delivered by the other side.
1175 */
1176 xpc_notify_senders(ch, xpcMsgDelivered,
1177 ch->remote_GP.get);
1178 }
1179
1180 /*
1181 * Clear msg->flags in previously sent messages, so that
1182 * they're ready for xpc_allocate_msg().
1183 */
1184 xpc_clear_local_msgqueue_flags(ch);
1185
1186 (volatile s64) ch->w_remote_GP.get = ch->remote_GP.get;
1187
1188 dev_dbg(xpc_chan, "w_remote_GP.get changed to %ld, partid=%d, "
1189 "channel=%d\n", ch->w_remote_GP.get, ch->partid,
1190 ch->number);
1191
1192 /*
1193 * If anyone was waiting for message queue entries to become
1194 * available, wake them up.
1195 */
1196 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) {
1197 wake_up(&ch->msg_allocate_wq);
1198 }
1199 }
1200
1201
1202 /*
1203 * Now check for newly sent messages by the other side. (The remote
1204 * PUT value will have changed since we last looked at it.)
1205 */
1206
1207 if (ch->w_remote_GP.put != ch->remote_GP.put) {
1208 /*
1209 * Clear msg->flags in previously received messages, so that
1210 * they're ready for xpc_get_deliverable_msg().
1211 */
1212 xpc_clear_remote_msgqueue_flags(ch);
1213
1214 (volatile s64) ch->w_remote_GP.put = ch->remote_GP.put;
1215
1216 dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, "
1217 "channel=%d\n", ch->w_remote_GP.put, ch->partid,
1218 ch->number);
1219
1220 nmsgs_sent = ch->w_remote_GP.put - ch->w_local_GP.get;
1221 if (nmsgs_sent > 0) {
1222 dev_dbg(xpc_chan, "msgs waiting to be copied and "
1223 "delivered=%d, partid=%d, channel=%d\n",
1224 nmsgs_sent, ch->partid, ch->number);
1225
1226 if (ch->flags & XPC_C_CONNECTCALLOUT) {
1227 xpc_activate_kthreads(ch, nmsgs_sent);
1228 }
1229 }
1230 }
1231
1232 xpc_msgqueue_deref(ch);
1233}
1234
1235
1236void
1237xpc_process_channel_activity(struct xpc_partition *part)
1238{
1239 unsigned long irq_flags;
1240 u64 IPI_amo, IPI_flags;
1241 struct xpc_channel *ch;
1242 int ch_number;
1243
1244
1245 IPI_amo = xpc_get_IPI_flags(part);
1246
1247 /*
1248 * Initiate channel connections for registered channels.
1249 *
1250 * For each connected channel that has pending messages activate idle
1251 * kthreads and/or create new kthreads as needed.
1252 */
1253
1254 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
1255 ch = &part->channels[ch_number];
1256
1257
1258 /*
1259 * Process any open or close related IPI flags, and then deal
1260 * with connecting or disconnecting the channel as required.
1261 */
1262
1263 IPI_flags = XPC_GET_IPI_FLAGS(IPI_amo, ch_number);
1264
1265 if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags)) {
1266 xpc_process_openclose_IPI(part, ch_number, IPI_flags);
1267 }
1268
1269
1270 if (ch->flags & XPC_C_DISCONNECTING) {
1271 spin_lock_irqsave(&ch->lock, irq_flags);
1272 xpc_process_disconnect(ch, &irq_flags);
1273 spin_unlock_irqrestore(&ch->lock, irq_flags);
1274 continue;
1275 }
1276
1277 if (part->act_state == XPC_P_DEACTIVATING) {
1278 continue;
1279 }
1280
1281 if (!(ch->flags & XPC_C_CONNECTED)) {
1282 if (!(ch->flags & XPC_C_OPENREQUEST)) {
1283 DBUG_ON(ch->flags & XPC_C_SETUP);
1284 (void) xpc_connect_channel(ch);
1285 } else {
1286 spin_lock_irqsave(&ch->lock, irq_flags);
1287 xpc_process_connect(ch, &irq_flags);
1288 spin_unlock_irqrestore(&ch->lock, irq_flags);
1289 }
1290 continue;
1291 }
1292
1293
1294 /*
1295 * Process any message related IPI flags, this may involve the
1296 * activation of kthreads to deliver any pending messages sent
1297 * from the other partition.
1298 */
1299
1300 if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags)) {
1301 xpc_process_msg_IPI(part, ch_number);
1302 }
1303 }
1304}
1305
1306
1307/*
1308 * XPC's heartbeat code calls this function to inform XPC that a partition has
1309 * gone down. XPC responds by tearing down the XPartition Communication
1310 * infrastructure used for the just downed partition.
1311 *
1312 * XPC's heartbeat code will never call this function and xpc_partition_up()
1313 * at the same time. Nor will it ever make multiple calls to either function
1314 * at the same time.
1315 */
1316void
1317xpc_partition_down(struct xpc_partition *part, enum xpc_retval reason)
1318{
1319 unsigned long irq_flags;
1320 int ch_number;
1321 struct xpc_channel *ch;
1322
1323
1324 dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n",
1325 XPC_PARTID(part), reason);
1326
1327 if (!xpc_part_ref(part)) {
1328 /* infrastructure for this partition isn't currently set up */
1329 return;
1330 }
1331
1332
1333 /* disconnect all channels associated with the downed partition */
1334
1335 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
1336 ch = &part->channels[ch_number];
1337
1338
1339 xpc_msgqueue_ref(ch);
1340 spin_lock_irqsave(&ch->lock, irq_flags);
1341
1342 XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
1343
1344 spin_unlock_irqrestore(&ch->lock, irq_flags);
1345 xpc_msgqueue_deref(ch);
1346 }
1347
1348 xpc_wakeup_channel_mgr(part);
1349
1350 xpc_part_deref(part);
1351}
1352
1353
1354/*
1355 * Teardown the infrastructure necessary to support XPartition Communication
1356 * between the specified remote partition and the local one.
1357 */
1358void
1359xpc_teardown_infrastructure(struct xpc_partition *part)
1360{
1361 partid_t partid = XPC_PARTID(part);
1362
1363
1364 /*
1365 * We start off by making this partition inaccessible to local
1366 * processes by marking it as no longer setup. Then we make it
1367 * inaccessible to remote processes by clearing the XPC per partition
1368 * specific variable's magic # (which indicates that these variables
1369 * are no longer valid) and by ignoring all XPC notify IPIs sent to
1370 * this partition.
1371 */
1372
1373 DBUG_ON(atomic_read(&part->nchannels_active) != 0);
1374 DBUG_ON(part->setup_state != XPC_P_SETUP);
1375 part->setup_state = XPC_P_WTEARDOWN;
1376
1377 xpc_vars_part[partid].magic = 0;
1378
1379
1380 free_irq(SGI_XPC_NOTIFY, (void *) (u64) partid);
1381
1382
1383 /*
1384 * Before proceding with the teardown we have to wait until all
1385 * existing references cease.
1386 */
1387 wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));
1388
1389
1390 /* now we can begin tearing down the infrastructure */
1391
1392 part->setup_state = XPC_P_TORNDOWN;
1393
1394 /* in case we've still got outstanding timers registered... */
1395 del_timer_sync(&part->dropped_IPI_timer);
1396
1397 kfree(part->remote_openclose_args_base);
1398 part->remote_openclose_args = NULL;
1399 kfree(part->local_openclose_args_base);
1400 part->local_openclose_args = NULL;
1401 kfree(part->remote_GPs_base);
1402 part->remote_GPs = NULL;
1403 kfree(part->local_GPs_base);
1404 part->local_GPs = NULL;
1405 kfree(part->channels);
1406 part->channels = NULL;
1407 part->local_IPI_amo_va = NULL;
1408}
1409
1410
1411/*
1412 * Called by XP at the time of channel connection registration to cause
1413 * XPC to establish connections to all currently active partitions.
1414 */
1415void
1416xpc_initiate_connect(int ch_number)
1417{
1418 partid_t partid;
1419 struct xpc_partition *part;
1420 struct xpc_channel *ch;
1421
1422
1423 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
1424
1425 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
1426 part = &xpc_partitions[partid];
1427
1428 if (xpc_part_ref(part)) {
1429 ch = &part->channels[ch_number];
1430
1431 if (!(ch->flags & XPC_C_DISCONNECTING)) {
1432 DBUG_ON(ch->flags & XPC_C_OPENREQUEST);
1433 DBUG_ON(ch->flags & XPC_C_CONNECTED);
1434 DBUG_ON(ch->flags & XPC_C_SETUP);
1435
1436 /*
1437 * Initiate the establishment of a connection
1438 * on the newly registered channel to the
1439 * remote partition.
1440 */
1441 xpc_wakeup_channel_mgr(part);
1442 }
1443
1444 xpc_part_deref(part);
1445 }
1446 }
1447}
1448
1449
1450void
1451xpc_connected_callout(struct xpc_channel *ch)
1452{
1453 unsigned long irq_flags;
1454
1455
1456 /* let the registerer know that a connection has been established */
1457
1458 if (ch->func != NULL) {
1459 dev_dbg(xpc_chan, "ch->func() called, reason=xpcConnected, "
1460 "partid=%d, channel=%d\n", ch->partid, ch->number);
1461
1462 ch->func(xpcConnected, ch->partid, ch->number,
1463 (void *) (u64) ch->local_nentries, ch->key);
1464
1465 dev_dbg(xpc_chan, "ch->func() returned, reason=xpcConnected, "
1466 "partid=%d, channel=%d\n", ch->partid, ch->number);
1467 }
1468
1469 spin_lock_irqsave(&ch->lock, irq_flags);
1470 ch->flags |= XPC_C_CONNECTCALLOUT;
1471 spin_unlock_irqrestore(&ch->lock, irq_flags);
1472}
1473
1474
1475/*
1476 * Called by XP at the time of channel connection unregistration to cause
1477 * XPC to teardown all current connections for the specified channel.
1478 *
1479 * Before returning xpc_initiate_disconnect() will wait until all connections
1480 * on the specified channel have been closed/torndown. So the caller can be
1481 * assured that they will not be receiving any more callouts from XPC to the
1482 * function they registered via xpc_connect().
1483 *
1484 * Arguments:
1485 *
1486 * ch_number - channel # to unregister.
1487 */
1488void
1489xpc_initiate_disconnect(int ch_number)
1490{
1491 unsigned long irq_flags;
1492 partid_t partid;
1493 struct xpc_partition *part;
1494 struct xpc_channel *ch;
1495
1496
1497 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
1498
1499 /* initiate the channel disconnect for every active partition */
1500 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
1501 part = &xpc_partitions[partid];
1502
1503 if (xpc_part_ref(part)) {
1504 ch = &part->channels[ch_number];
1505 xpc_msgqueue_ref(ch);
1506
1507 spin_lock_irqsave(&ch->lock, irq_flags);
1508
1509 XPC_DISCONNECT_CHANNEL(ch, xpcUnregistering,
1510 &irq_flags);
1511
1512 spin_unlock_irqrestore(&ch->lock, irq_flags);
1513
1514 xpc_msgqueue_deref(ch);
1515 xpc_part_deref(part);
1516 }
1517 }
1518
1519 xpc_disconnect_wait(ch_number);
1520}
1521
1522
1523/*
1524 * To disconnect a channel, and reflect it back to all who may be waiting.
1525 *
1526 * >>> An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by
1527 * >>> xpc_free_msgqueues().
1528 *
1529 * THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN.
1530 */
1531void
1532xpc_disconnect_channel(const int line, struct xpc_channel *ch,
1533 enum xpc_retval reason, unsigned long *irq_flags)
1534{
1535 u32 flags;
1536
1537
1538 DBUG_ON(!spin_is_locked(&ch->lock));
1539
1540 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
1541 return;
1542 }
1543 DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED)));
1544
1545 dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n",
1546 reason, line, ch->partid, ch->number);
1547
1548 XPC_SET_REASON(ch, reason, line);
1549
1550 flags = ch->flags;
1551 /* some of these may not have been set */
1552 ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY |
1553 XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
1554 XPC_C_CONNECTING | XPC_C_CONNECTED);
1555
1556 ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING);
1557 xpc_IPI_send_closerequest(ch, irq_flags);
1558
1559 if (flags & XPC_C_CONNECTED) {
1560 ch->flags |= XPC_C_WASCONNECTED;
1561 }
1562
1563 if (atomic_read(&ch->kthreads_idle) > 0) {
1564 /* wake all idle kthreads so they can exit */
1565 wake_up_all(&ch->idle_wq);
1566 }
1567
1568 spin_unlock_irqrestore(&ch->lock, *irq_flags);
1569
1570
1571 /* wake those waiting to allocate an entry from the local msg queue */
1572
1573 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) {
1574 wake_up(&ch->msg_allocate_wq);
1575 }
1576
1577 /* wake those waiting for notify completion */
1578
1579 if (atomic_read(&ch->n_to_notify) > 0) {
1580 xpc_notify_senders(ch, reason, ch->w_local_GP.put);
1581 }
1582
1583 spin_lock_irqsave(&ch->lock, *irq_flags);
1584}
1585
1586
1587void
1588xpc_disconnected_callout(struct xpc_channel *ch)
1589{
1590 /*
1591 * Let the channel's registerer know that the channel is now
1592 * disconnected. We don't want to do this if the registerer was never
1593 * informed of a connection being made, unless the disconnect was for
1594 * abnormal reasons.
1595 */
1596
1597 if (ch->func != NULL) {
1598 dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, "
1599 "channel=%d\n", ch->reason, ch->partid, ch->number);
1600
1601 ch->func(ch->reason, ch->partid, ch->number, NULL, ch->key);
1602
1603 dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, "
1604 "channel=%d\n", ch->reason, ch->partid, ch->number);
1605 }
1606}
1607
1608
1609/*
1610 * Wait for a message entry to become available for the specified channel,
1611 * but don't wait any longer than 1 jiffy.
1612 */
1613static enum xpc_retval
1614xpc_allocate_msg_wait(struct xpc_channel *ch)
1615{
1616 enum xpc_retval ret;
1617
1618
1619 if (ch->flags & XPC_C_DISCONNECTING) {
1620 DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true?
1621 return ch->reason;
1622 }
1623
1624 atomic_inc(&ch->n_on_msg_allocate_wq);
1625 ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1);
1626 atomic_dec(&ch->n_on_msg_allocate_wq);
1627
1628 if (ch->flags & XPC_C_DISCONNECTING) {
1629 ret = ch->reason;
1630 DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true?
1631 } else if (ret == 0) {
1632 ret = xpcTimeout;
1633 } else {
1634 ret = xpcInterrupted;
1635 }
1636
1637 return ret;
1638}
1639
1640
1641/*
1642 * Allocate an entry for a message from the message queue associated with the
1643 * specified channel.
1644 */
1645static enum xpc_retval
1646xpc_allocate_msg(struct xpc_channel *ch, u32 flags,
1647 struct xpc_msg **address_of_msg)
1648{
1649 struct xpc_msg *msg;
1650 enum xpc_retval ret;
1651 s64 put;
1652
1653
1654 /* this reference will be dropped in xpc_send_msg() */
1655 xpc_msgqueue_ref(ch);
1656
1657 if (ch->flags & XPC_C_DISCONNECTING) {
1658 xpc_msgqueue_deref(ch);
1659 return ch->reason;
1660 }
1661 if (!(ch->flags & XPC_C_CONNECTED)) {
1662 xpc_msgqueue_deref(ch);
1663 return xpcNotConnected;
1664 }
1665
1666
1667 /*
1668 * Get the next available message entry from the local message queue.
1669 * If none are available, we'll make sure that we grab the latest
1670 * GP values.
1671 */
1672 ret = xpcTimeout;
1673
1674 while (1) {
1675
1676 put = (volatile s64) ch->w_local_GP.put;
1677 if (put - (volatile s64) ch->w_remote_GP.get <
1678 ch->local_nentries) {
1679
1680 /* There are available message entries. We need to try
1681 * to secure one for ourselves. We'll do this by trying
1682 * to increment w_local_GP.put as long as someone else
1683 * doesn't beat us to it. If they do, we'll have to
1684 * try again.
1685 */
1686 if (cmpxchg(&ch->w_local_GP.put, put, put + 1) ==
1687 put) {
1688 /* we got the entry referenced by put */
1689 break;
1690 }
1691 continue; /* try again */
1692 }
1693
1694
1695 /*
1696 * There aren't any available msg entries at this time.
1697 *
1698 * In waiting for a message entry to become available,
1699 * we set a timeout in case the other side is not
1700 * sending completion IPIs. This lets us fake an IPI
1701 * that will cause the IPI handler to fetch the latest
1702 * GP values as if an IPI was sent by the other side.
1703 */
1704 if (ret == xpcTimeout) {
1705 xpc_IPI_send_local_msgrequest(ch);
1706 }
1707
1708 if (flags & XPC_NOWAIT) {
1709 xpc_msgqueue_deref(ch);
1710 return xpcNoWait;
1711 }
1712
1713 ret = xpc_allocate_msg_wait(ch);
1714 if (ret != xpcInterrupted && ret != xpcTimeout) {
1715 xpc_msgqueue_deref(ch);
1716 return ret;
1717 }
1718 }
1719
1720
1721 /* get the message's address and initialize it */
1722 msg = (struct xpc_msg *) ((u64) ch->local_msgqueue +
1723 (put % ch->local_nentries) * ch->msg_size);
1724
1725
1726 DBUG_ON(msg->flags != 0);
1727 msg->number = put;
1728
1729 dev_dbg(xpc_chan, "w_local_GP.put changed to %ld; msg=0x%p, "
1730 "msg_number=%ld, partid=%d, channel=%d\n", put + 1,
1731 (void *) msg, msg->number, ch->partid, ch->number);
1732
1733 *address_of_msg = msg;
1734
1735 return xpcSuccess;
1736}
1737
1738
1739/*
1740 * Allocate an entry for a message from the message queue associated with the
1741 * specified channel. NOTE that this routine can sleep waiting for a message
1742 * entry to become available. To not sleep, pass in the XPC_NOWAIT flag.
1743 *
1744 * Arguments:
1745 *
1746 * partid - ID of partition to which the channel is connected.
1747 * ch_number - channel #.
1748 * flags - see xpc.h for valid flags.
1749 * payload - address of the allocated payload area pointer (filled in on
1750 * return) in which the user-defined message is constructed.
1751 */
1752enum xpc_retval
1753xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload)
1754{
1755 struct xpc_partition *part = &xpc_partitions[partid];
1756 enum xpc_retval ret = xpcUnknownReason;
1757 struct xpc_msg *msg;
1758
1759
1760 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
1761 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
1762
1763 *payload = NULL;
1764
1765 if (xpc_part_ref(part)) {
1766 ret = xpc_allocate_msg(&part->channels[ch_number], flags, &msg);
1767 xpc_part_deref(part);
1768
1769 if (msg != NULL) {
1770 *payload = &msg->payload;
1771 }
1772 }
1773
1774 return ret;
1775}
1776
1777
1778/*
1779 * Now we actually send the messages that are ready to be sent by advancing
1780 * the local message queue's Put value and then send an IPI to the recipient
1781 * partition.
1782 */
1783static void
1784xpc_send_msgs(struct xpc_channel *ch, s64 initial_put)
1785{
1786 struct xpc_msg *msg;
1787 s64 put = initial_put + 1;
1788 int send_IPI = 0;
1789
1790
1791 while (1) {
1792
1793 while (1) {
1794 if (put == (volatile s64) ch->w_local_GP.put) {
1795 break;
1796 }
1797
1798 msg = (struct xpc_msg *) ((u64) ch->local_msgqueue +
1799 (put % ch->local_nentries) * ch->msg_size);
1800
1801 if (!(msg->flags & XPC_M_READY)) {
1802 break;
1803 }
1804
1805 put++;
1806 }
1807
1808 if (put == initial_put) {
1809 /* nothing's changed */
1810 break;
1811 }
1812
1813 if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) !=
1814 initial_put) {
1815 /* someone else beat us to it */
1816 DBUG_ON((volatile s64) ch->local_GP->put < initial_put);
1817 break;
1818 }
1819
1820 /* we just set the new value of local_GP->put */
1821
1822 dev_dbg(xpc_chan, "local_GP->put changed to %ld, partid=%d, "
1823 "channel=%d\n", put, ch->partid, ch->number);
1824
1825 send_IPI = 1;
1826
1827 /*
1828 * We need to ensure that the message referenced by
1829 * local_GP->put is not XPC_M_READY or that local_GP->put
1830 * equals w_local_GP.put, so we'll go have a look.
1831 */
1832 initial_put = put;
1833 }
1834
1835 if (send_IPI) {
1836 xpc_IPI_send_msgrequest(ch);
1837 }
1838}
1839
1840
1841/*
1842 * Common code that does the actual sending of the message by advancing the
1843 * local message queue's Put value and sends an IPI to the partition the
1844 * message is being sent to.
1845 */
1846static enum xpc_retval
1847xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type,
1848 xpc_notify_func func, void *key)
1849{
1850 enum xpc_retval ret = xpcSuccess;
1851 struct xpc_notify *notify = NULL; // >>> to keep the compiler happy!!
1852 s64 put, msg_number = msg->number;
1853
1854
1855 DBUG_ON(notify_type == XPC_N_CALL && func == NULL);
1856 DBUG_ON((((u64) msg - (u64) ch->local_msgqueue) / ch->msg_size) !=
1857 msg_number % ch->local_nentries);
1858 DBUG_ON(msg->flags & XPC_M_READY);
1859
1860 if (ch->flags & XPC_C_DISCONNECTING) {
1861 /* drop the reference grabbed in xpc_allocate_msg() */
1862 xpc_msgqueue_deref(ch);
1863 return ch->reason;
1864 }
1865
1866 if (notify_type != 0) {
1867 /*
1868 * Tell the remote side to send an ACK interrupt when the
1869 * message has been delivered.
1870 */
1871 msg->flags |= XPC_M_INTERRUPT;
1872
1873 atomic_inc(&ch->n_to_notify);
1874
1875 notify = &ch->notify_queue[msg_number % ch->local_nentries];
1876 notify->func = func;
1877 notify->key = key;
1878 (volatile u8) notify->type = notify_type;
1879
1880 // >>> is a mb() needed here?
1881
1882 if (ch->flags & XPC_C_DISCONNECTING) {
1883 /*
1884 * An error occurred between our last error check and
1885 * this one. We will try to clear the type field from
1886 * the notify entry. If we succeed then
1887 * xpc_disconnect_channel() didn't already process
1888 * the notify entry.
1889 */
1890 if (cmpxchg(&notify->type, notify_type, 0) ==
1891 notify_type) {
1892 atomic_dec(&ch->n_to_notify);
1893 ret = ch->reason;
1894 }
1895
1896 /* drop the reference grabbed in xpc_allocate_msg() */
1897 xpc_msgqueue_deref(ch);
1898 return ret;
1899 }
1900 }
1901
1902 msg->flags |= XPC_M_READY;
1903
1904 /*
1905 * The preceding store of msg->flags must occur before the following
1906 * load of ch->local_GP->put.
1907 */
1908 mb();
1909
1910 /* see if the message is next in line to be sent, if so send it */
1911
1912 put = ch->local_GP->put;
1913 if (put == msg_number) {
1914 xpc_send_msgs(ch, put);
1915 }
1916
1917 /* drop the reference grabbed in xpc_allocate_msg() */
1918 xpc_msgqueue_deref(ch);
1919 return ret;
1920}
1921
1922
1923/*
1924 * Send a message previously allocated using xpc_initiate_allocate() on the
1925 * specified channel connected to the specified partition.
1926 *
1927 * This routine will not wait for the message to be received, nor will
1928 * notification be given when it does happen. Once this routine has returned
1929 * the message entry allocated via xpc_initiate_allocate() is no longer
1930 * accessable to the caller.
1931 *
1932 * This routine, although called by users, does not call xpc_part_ref() to
1933 * ensure that the partition infrastructure is in place. It relies on the
1934 * fact that we called xpc_msgqueue_ref() in xpc_allocate_msg().
1935 *
1936 * Arguments:
1937 *
1938 * partid - ID of partition to which the channel is connected.
1939 * ch_number - channel # to send message on.
1940 * payload - pointer to the payload area allocated via
1941 * xpc_initiate_allocate().
1942 */
1943enum xpc_retval
1944xpc_initiate_send(partid_t partid, int ch_number, void *payload)
1945{
1946 struct xpc_partition *part = &xpc_partitions[partid];
1947 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
1948 enum xpc_retval ret;
1949
1950
1951 dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg,
1952 partid, ch_number);
1953
1954 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
1955 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
1956 DBUG_ON(msg == NULL);
1957
1958 ret = xpc_send_msg(&part->channels[ch_number], msg, 0, NULL, NULL);
1959
1960 return ret;
1961}
1962
1963
1964/*
1965 * Send a message previously allocated using xpc_initiate_allocate on the
1966 * specified channel connected to the specified partition.
1967 *
1968 * This routine will not wait for the message to be sent. Once this routine
1969 * has returned the message entry allocated via xpc_initiate_allocate() is no
1970 * longer accessable to the caller.
1971 *
1972 * Once the remote end of the channel has received the message, the function
1973 * passed as an argument to xpc_initiate_send_notify() will be called. This
1974 * allows the sender to free up or re-use any buffers referenced by the
1975 * message, but does NOT mean the message has been processed at the remote
1976 * end by a receiver.
1977 *
1978 * If this routine returns an error, the caller's function will NOT be called.
1979 *
1980 * This routine, although called by users, does not call xpc_part_ref() to
1981 * ensure that the partition infrastructure is in place. It relies on the
1982 * fact that we called xpc_msgqueue_ref() in xpc_allocate_msg().
1983 *
1984 * Arguments:
1985 *
1986 * partid - ID of partition to which the channel is connected.
1987 * ch_number - channel # to send message on.
1988 * payload - pointer to the payload area allocated via
1989 * xpc_initiate_allocate().
1990 * func - function to call with asynchronous notification of message
1991 * receipt. THIS FUNCTION MUST BE NON-BLOCKING.
1992 * key - user-defined key to be passed to the function when it's called.
1993 */
1994enum xpc_retval
1995xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload,
1996 xpc_notify_func func, void *key)
1997{
1998 struct xpc_partition *part = &xpc_partitions[partid];
1999 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
2000 enum xpc_retval ret;
2001
2002
2003 dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg,
2004 partid, ch_number);
2005
2006 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
2007 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
2008 DBUG_ON(msg == NULL);
2009 DBUG_ON(func == NULL);
2010
2011 ret = xpc_send_msg(&part->channels[ch_number], msg, XPC_N_CALL,
2012 func, key);
2013 return ret;
2014}
2015
2016
2017static struct xpc_msg *
2018xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
2019{
2020 struct xpc_partition *part = &xpc_partitions[ch->partid];
2021 struct xpc_msg *remote_msg, *msg;
2022 u32 msg_index, nmsgs;
2023 u64 msg_offset;
2024 enum xpc_retval ret;
2025
2026
2027 if (down_interruptible(&ch->msg_to_pull_sema) != 0) {
2028 /* we were interrupted by a signal */
2029 return NULL;
2030 }
2031
2032 while (get >= ch->next_msg_to_pull) {
2033
2034 /* pull as many messages as are ready and able to be pulled */
2035
2036 msg_index = ch->next_msg_to_pull % ch->remote_nentries;
2037
2038 DBUG_ON(ch->next_msg_to_pull >=
2039 (volatile s64) ch->w_remote_GP.put);
2040 nmsgs = (volatile s64) ch->w_remote_GP.put -
2041 ch->next_msg_to_pull;
2042 if (msg_index + nmsgs > ch->remote_nentries) {
2043 /* ignore the ones that wrap the msg queue for now */
2044 nmsgs = ch->remote_nentries - msg_index;
2045 }
2046
2047 msg_offset = msg_index * ch->msg_size;
2048 msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue +
2049 msg_offset);
2050 remote_msg = (struct xpc_msg *) (ch->remote_msgqueue_pa +
2051 msg_offset);
2052
2053 if ((ret = xpc_pull_remote_cachelines(part, msg, remote_msg,
2054 nmsgs * ch->msg_size)) != xpcSuccess) {
2055
2056 dev_dbg(xpc_chan, "failed to pull %d msgs starting with"
2057 " msg %ld from partition %d, channel=%d, "
2058 "ret=%d\n", nmsgs, ch->next_msg_to_pull,
2059 ch->partid, ch->number, ret);
2060
2061 XPC_DEACTIVATE_PARTITION(part, ret);
2062
2063 up(&ch->msg_to_pull_sema);
2064 return NULL;
2065 }
2066
2067 mb(); /* >>> this may not be needed, we're not sure */
2068
2069 ch->next_msg_to_pull += nmsgs;
2070 }
2071
2072 up(&ch->msg_to_pull_sema);
2073
2074 /* return the message we were looking for */
2075 msg_offset = (get % ch->remote_nentries) * ch->msg_size;
2076 msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + msg_offset);
2077
2078 return msg;
2079}
2080
2081
2082/*
2083 * Get a message to be delivered.
2084 */
2085static struct xpc_msg *
2086xpc_get_deliverable_msg(struct xpc_channel *ch)
2087{
2088 struct xpc_msg *msg = NULL;
2089 s64 get;
2090
2091
2092 do {
2093 if ((volatile u32) ch->flags & XPC_C_DISCONNECTING) {
2094 break;
2095 }
2096
2097 get = (volatile s64) ch->w_local_GP.get;
2098 if (get == (volatile s64) ch->w_remote_GP.put) {
2099 break;
2100 }
2101
2102 /* There are messages waiting to be pulled and delivered.
2103 * We need to try to secure one for ourselves. We'll do this
2104 * by trying to increment w_local_GP.get and hope that no one
2105 * else beats us to it. If they do, we'll we'll simply have
2106 * to try again for the next one.
2107 */
2108
2109 if (cmpxchg(&ch->w_local_GP.get, get, get + 1) == get) {
2110 /* we got the entry referenced by get */
2111
2112 dev_dbg(xpc_chan, "w_local_GP.get changed to %ld, "
2113 "partid=%d, channel=%d\n", get + 1,
2114 ch->partid, ch->number);
2115
2116 /* pull the message from the remote partition */
2117
2118 msg = xpc_pull_remote_msg(ch, get);
2119
2120 DBUG_ON(msg != NULL && msg->number != get);
2121 DBUG_ON(msg != NULL && (msg->flags & XPC_M_DONE));
2122 DBUG_ON(msg != NULL && !(msg->flags & XPC_M_READY));
2123
2124 break;
2125 }
2126
2127 } while (1);
2128
2129 return msg;
2130}
2131
2132
2133/*
2134 * Deliver a message to its intended recipient.
2135 */
2136void
2137xpc_deliver_msg(struct xpc_channel *ch)
2138{
2139 struct xpc_msg *msg;
2140
2141
2142 if ((msg = xpc_get_deliverable_msg(ch)) != NULL) {
2143
2144 /*
2145 * This ref is taken to protect the payload itself from being
2146 * freed before the user is finished with it, which the user
2147 * indicates by calling xpc_initiate_received().
2148 */
2149 xpc_msgqueue_ref(ch);
2150
2151 atomic_inc(&ch->kthreads_active);
2152
2153 if (ch->func != NULL) {
2154 dev_dbg(xpc_chan, "ch->func() called, msg=0x%p, "
2155 "msg_number=%ld, partid=%d, channel=%d\n",
2156 (void *) msg, msg->number, ch->partid,
2157 ch->number);
2158
2159 /* deliver the message to its intended recipient */
2160 ch->func(xpcMsgReceived, ch->partid, ch->number,
2161 &msg->payload, ch->key);
2162
2163 dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, "
2164 "msg_number=%ld, partid=%d, channel=%d\n",
2165 (void *) msg, msg->number, ch->partid,
2166 ch->number);
2167 }
2168
2169 atomic_dec(&ch->kthreads_active);
2170 }
2171}
2172
2173
2174/*
2175 * Now we actually acknowledge the messages that have been delivered and ack'd
2176 * by advancing the cached remote message queue's Get value and if requested
2177 * send an IPI to the message sender's partition.
2178 */
2179static void
2180xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
2181{
2182 struct xpc_msg *msg;
2183 s64 get = initial_get + 1;
2184 int send_IPI = 0;
2185
2186
2187 while (1) {
2188
2189 while (1) {
2190 if (get == (volatile s64) ch->w_local_GP.get) {
2191 break;
2192 }
2193
2194 msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue +
2195 (get % ch->remote_nentries) * ch->msg_size);
2196
2197 if (!(msg->flags & XPC_M_DONE)) {
2198 break;
2199 }
2200
2201 msg_flags |= msg->flags;
2202 get++;
2203 }
2204
2205 if (get == initial_get) {
2206 /* nothing's changed */
2207 break;
2208 }
2209
2210 if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) !=
2211 initial_get) {
2212 /* someone else beat us to it */
2213 DBUG_ON((volatile s64) ch->local_GP->get <=
2214 initial_get);
2215 break;
2216 }
2217
2218 /* we just set the new value of local_GP->get */
2219
2220 dev_dbg(xpc_chan, "local_GP->get changed to %ld, partid=%d, "
2221 "channel=%d\n", get, ch->partid, ch->number);
2222
2223 send_IPI = (msg_flags & XPC_M_INTERRUPT);
2224
2225 /*
2226 * We need to ensure that the message referenced by
2227 * local_GP->get is not XPC_M_DONE or that local_GP->get
2228 * equals w_local_GP.get, so we'll go have a look.
2229 */
2230 initial_get = get;
2231 }
2232
2233 if (send_IPI) {
2234 xpc_IPI_send_msgrequest(ch);
2235 }
2236}
2237
2238
2239/*
2240 * Acknowledge receipt of a delivered message.
2241 *
2242 * If a message has XPC_M_INTERRUPT set, send an interrupt to the partition
2243 * that sent the message.
2244 *
2245 * This function, although called by users, does not call xpc_part_ref() to
2246 * ensure that the partition infrastructure is in place. It relies on the
2247 * fact that we called xpc_msgqueue_ref() in xpc_deliver_msg().
2248 *
2249 * Arguments:
2250 *
2251 * partid - ID of partition to which the channel is connected.
2252 * ch_number - channel # message received on.
2253 * payload - pointer to the payload area allocated via
2254 * xpc_initiate_allocate().
2255 */
2256void
2257xpc_initiate_received(partid_t partid, int ch_number, void *payload)
2258{
2259 struct xpc_partition *part = &xpc_partitions[partid];
2260 struct xpc_channel *ch;
2261 struct xpc_msg *msg = XPC_MSG_ADDRESS(payload);
2262 s64 get, msg_number = msg->number;
2263
2264
2265 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
2266 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
2267
2268 ch = &part->channels[ch_number];
2269
2270 dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n",
2271 (void *) msg, msg_number, ch->partid, ch->number);
2272
2273 DBUG_ON((((u64) msg - (u64) ch->remote_msgqueue) / ch->msg_size) !=
2274 msg_number % ch->remote_nentries);
2275 DBUG_ON(msg->flags & XPC_M_DONE);
2276
2277 msg->flags |= XPC_M_DONE;
2278
2279 /*
2280 * The preceding store of msg->flags must occur before the following
2281 * load of ch->local_GP->get.
2282 */
2283 mb();
2284
2285 /*
2286 * See if this message is next in line to be acknowledged as having
2287 * been delivered.
2288 */
2289 get = ch->local_GP->get;
2290 if (get == msg_number) {
2291 xpc_acknowledge_msgs(ch, get, msg->flags);
2292 }
2293
2294 /* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg() */
2295 xpc_msgqueue_deref(ch);
2296}
2297
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c
new file mode 100644
index 000000000000..177ddb748ebe
--- /dev/null
+++ b/arch/ia64/sn/kernel/xpc_main.c
@@ -0,0 +1,1064 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9
10/*
11 * Cross Partition Communication (XPC) support - standard version.
12 *
13 * XPC provides a message passing capability that crosses partition
14 * boundaries. This module is made up of two parts:
15 *
16 * partition This part detects the presence/absence of other
17 * partitions. It provides a heartbeat and monitors
18 * the heartbeats of other partitions.
19 *
20 * channel This part manages the channels and sends/receives
21 * messages across them to/from other partitions.
22 *
23 * There are a couple of additional functions residing in XP, which
24 * provide an interface to XPC for its users.
25 *
26 *
27 * Caveats:
28 *
29 * . We currently have no way to determine which nasid an IPI came
30 * from. Thus, xpc_IPI_send() does a remote AMO write followed by
31 * an IPI. The AMO indicates where data is to be pulled from, so
32 * after the IPI arrives, the remote partition checks the AMO word.
33 * The IPI can actually arrive before the AMO however, so other code
34 * must periodically check for this case. Also, remote AMO operations
35 * do not reliably time out. Thus we do a remote PIO read solely to
36 * know whether the remote partition is down and whether we should
37 * stop sending IPIs to it. This remote PIO read operation is set up
38 * in a special nofault region so SAL knows to ignore (and cleanup)
39 * any errors due to the remote AMO write, PIO read, and/or PIO
40 * write operations.
41 *
42 * If/when new hardware solves this IPI problem, we should abandon
43 * the current approach.
44 *
45 */
46
47
48#include <linux/kernel.h>
49#include <linux/module.h>
50#include <linux/init.h>
51#include <linux/sched.h>
52#include <linux/syscalls.h>
53#include <linux/cache.h>
54#include <linux/interrupt.h>
55#include <linux/slab.h>
56#include <asm/sn/intr.h>
57#include <asm/sn/sn_sal.h>
58#include <asm/uaccess.h>
59#include "xpc.h"
60
61
62/* define two XPC debug device structures to be used with dev_dbg() et al */
63
64struct device_driver xpc_dbg_name = {
65 .name = "xpc"
66};
67
68struct device xpc_part_dbg_subname = {
69 .bus_id = {0}, /* set to "part" at xpc_init() time */
70 .driver = &xpc_dbg_name
71};
72
73struct device xpc_chan_dbg_subname = {
74 .bus_id = {0}, /* set to "chan" at xpc_init() time */
75 .driver = &xpc_dbg_name
76};
77
78struct device *xpc_part = &xpc_part_dbg_subname;
79struct device *xpc_chan = &xpc_chan_dbg_subname;
80
81
82/* systune related variables for /proc/sys directories */
83
84static int xpc_hb_min = 1;
85static int xpc_hb_max = 10;
86
87static int xpc_hb_check_min = 10;
88static int xpc_hb_check_max = 120;
89
90static ctl_table xpc_sys_xpc_hb_dir[] = {
91 {
92 1,
93 "hb_interval",
94 &xpc_hb_interval,
95 sizeof(int),
96 0644,
97 NULL,
98 &proc_dointvec_minmax,
99 &sysctl_intvec,
100 NULL,
101 &xpc_hb_min, &xpc_hb_max
102 },
103 {
104 2,
105 "hb_check_interval",
106 &xpc_hb_check_interval,
107 sizeof(int),
108 0644,
109 NULL,
110 &proc_dointvec_minmax,
111 &sysctl_intvec,
112 NULL,
113 &xpc_hb_check_min, &xpc_hb_check_max
114 },
115 {0}
116};
117static ctl_table xpc_sys_xpc_dir[] = {
118 {
119 1,
120 "hb",
121 NULL,
122 0,
123 0555,
124 xpc_sys_xpc_hb_dir
125 },
126 {0}
127};
128static ctl_table xpc_sys_dir[] = {
129 {
130 1,
131 "xpc",
132 NULL,
133 0,
134 0555,
135 xpc_sys_xpc_dir
136 },
137 {0}
138};
139static struct ctl_table_header *xpc_sysctl;
140
141
142/* #of IRQs received */
143static atomic_t xpc_act_IRQ_rcvd;
144
145/* IRQ handler notifies this wait queue on receipt of an IRQ */
146static DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq);
147
148static unsigned long xpc_hb_check_timeout;
149
150/* xpc_hb_checker thread exited notification */
151static DECLARE_MUTEX_LOCKED(xpc_hb_checker_exited);
152
153/* xpc_discovery thread exited notification */
154static DECLARE_MUTEX_LOCKED(xpc_discovery_exited);
155
156
157static struct timer_list xpc_hb_timer;
158
159
160static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *);
161
162
163/*
164 * Notify the heartbeat check thread that an IRQ has been received.
165 */
166static irqreturn_t
167xpc_act_IRQ_handler(int irq, void *dev_id, struct pt_regs *regs)
168{
169 atomic_inc(&xpc_act_IRQ_rcvd);
170 wake_up_interruptible(&xpc_act_IRQ_wq);
171 return IRQ_HANDLED;
172}
173
174
175/*
176 * Timer to produce the heartbeat. The timer structures function is
177 * already set when this is initially called. A tunable is used to
178 * specify when the next timeout should occur.
179 */
180static void
181xpc_hb_beater(unsigned long dummy)
182{
183 xpc_vars->heartbeat++;
184
185 if (jiffies >= xpc_hb_check_timeout) {
186 wake_up_interruptible(&xpc_act_IRQ_wq);
187 }
188
189 xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ);
190 add_timer(&xpc_hb_timer);
191}
192
193
194/*
195 * This thread is responsible for nearly all of the partition
196 * activation/deactivation.
197 */
198static int
199xpc_hb_checker(void *ignore)
200{
201 int last_IRQ_count = 0;
202 int new_IRQ_count;
203 int force_IRQ=0;
204
205
206 /* this thread was marked active by xpc_hb_init() */
207
208 daemonize(XPC_HB_CHECK_THREAD_NAME);
209
210 set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU));
211
212 xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
213
214 while (!(volatile int) xpc_exiting) {
215
216 /* wait for IRQ or timeout */
217 (void) wait_event_interruptible(xpc_act_IRQ_wq,
218 (last_IRQ_count < atomic_read(&xpc_act_IRQ_rcvd) ||
219 jiffies >= xpc_hb_check_timeout ||
220 (volatile int) xpc_exiting));
221
222 dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
223 "been received\n",
224 (int) (xpc_hb_check_timeout - jiffies),
225 atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count);
226
227
228 /* checking of remote heartbeats is skewed by IRQ handling */
229 if (jiffies >= xpc_hb_check_timeout) {
230 dev_dbg(xpc_part, "checking remote heartbeats\n");
231 xpc_check_remote_hb();
232
233 /*
234 * We need to periodically recheck to ensure no
235 * IPI/AMO pairs have been missed. That check
236 * must always reset xpc_hb_check_timeout.
237 */
238 force_IRQ = 1;
239 }
240
241
242 new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd);
243 if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) {
244 force_IRQ = 0;
245
246 dev_dbg(xpc_part, "found an IRQ to process; will be "
247 "resetting xpc_hb_check_timeout\n");
248
249 last_IRQ_count += xpc_identify_act_IRQ_sender();
250 if (last_IRQ_count < new_IRQ_count) {
251 /* retry once to help avoid missing AMO */
252 (void) xpc_identify_act_IRQ_sender();
253 }
254 last_IRQ_count = new_IRQ_count;
255
256 xpc_hb_check_timeout = jiffies +
257 (xpc_hb_check_interval * HZ);
258 }
259 }
260
261 dev_dbg(xpc_part, "heartbeat checker is exiting\n");
262
263
264 /* mark this thread as inactive */
265 up(&xpc_hb_checker_exited);
266 return 0;
267}
268
269
270/*
271 * This thread will attempt to discover other partitions to activate
272 * based on info provided by SAL. This new thread is short lived and
273 * will exit once discovery is complete.
274 */
275static int
276xpc_initiate_discovery(void *ignore)
277{
278 daemonize(XPC_DISCOVERY_THREAD_NAME);
279
280 xpc_discovery();
281
282 dev_dbg(xpc_part, "discovery thread is exiting\n");
283
284 /* mark this thread as inactive */
285 up(&xpc_discovery_exited);
286 return 0;
287}
288
289
290/*
291 * Establish first contact with the remote partititon. This involves pulling
292 * the XPC per partition variables from the remote partition and waiting for
293 * the remote partition to pull ours.
294 */
295static enum xpc_retval
296xpc_make_first_contact(struct xpc_partition *part)
297{
298 enum xpc_retval ret;
299
300
301 while ((ret = xpc_pull_remote_vars_part(part)) != xpcSuccess) {
302 if (ret != xpcRetry) {
303 XPC_DEACTIVATE_PARTITION(part, ret);
304 return ret;
305 }
306
307 dev_dbg(xpc_chan, "waiting to make first contact with "
308 "partition %d\n", XPC_PARTID(part));
309
310 /* wait a 1/4 of a second or so */
311 set_current_state(TASK_INTERRUPTIBLE);
312 (void) schedule_timeout(0.25 * HZ);
313
314 if (part->act_state == XPC_P_DEACTIVATING) {
315 return part->reason;
316 }
317 }
318
319 return xpc_mark_partition_active(part);
320}
321
322
323/*
324 * The first kthread assigned to a newly activated partition is the one
325 * created by XPC HB with which it calls xpc_partition_up(). XPC hangs on to
326 * that kthread until the partition is brought down, at which time that kthread
327 * returns back to XPC HB. (The return of that kthread will signify to XPC HB
328 * that XPC has dismantled all communication infrastructure for the associated
329 * partition.) This kthread becomes the channel manager for that partition.
330 *
331 * Each active partition has a channel manager, who, besides connecting and
332 * disconnecting channels, will ensure that each of the partition's connected
333 * channels has the required number of assigned kthreads to get the work done.
334 */
335static void
336xpc_channel_mgr(struct xpc_partition *part)
337{
338 while (part->act_state != XPC_P_DEACTIVATING ||
339 atomic_read(&part->nchannels_active) > 0) {
340
341 xpc_process_channel_activity(part);
342
343
344 /*
345 * Wait until we've been requested to activate kthreads or
346 * all of the channel's message queues have been torn down or
347 * a signal is pending.
348 *
349 * The channel_mgr_requests is set to 1 after being awakened,
350 * This is done to prevent the channel mgr from making one pass
351 * through the loop for each request, since he will
352 * be servicing all the requests in one pass. The reason it's
353 * set to 1 instead of 0 is so that other kthreads will know
354 * that the channel mgr is running and won't bother trying to
355 * wake him up.
356 */
357 atomic_dec(&part->channel_mgr_requests);
358 (void) wait_event_interruptible(part->channel_mgr_wq,
359 (atomic_read(&part->channel_mgr_requests) > 0 ||
360 (volatile u64) part->local_IPI_amo != 0 ||
361 ((volatile u8) part->act_state ==
362 XPC_P_DEACTIVATING &&
363 atomic_read(&part->nchannels_active) == 0)));
364 atomic_set(&part->channel_mgr_requests, 1);
365
366 // >>> Does it need to wakeup periodically as well? In case we
367 // >>> miscalculated the #of kthreads to wakeup or create?
368 }
369}
370
371
372/*
373 * When XPC HB determines that a partition has come up, it will create a new
374 * kthread and that kthread will call this function to attempt to set up the
375 * basic infrastructure used for Cross Partition Communication with the newly
376 * upped partition.
377 *
378 * The kthread that was created by XPC HB and which setup the XPC
379 * infrastructure will remain assigned to the partition until the partition
380 * goes down. At which time the kthread will teardown the XPC infrastructure
381 * and then exit.
382 *
383 * XPC HB will put the remote partition's XPC per partition specific variables
384 * physical address into xpc_partitions[partid].remote_vars_part_pa prior to
385 * calling xpc_partition_up().
386 */
387static void
388xpc_partition_up(struct xpc_partition *part)
389{
390 DBUG_ON(part->channels != NULL);
391
392 dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part));
393
394 if (xpc_setup_infrastructure(part) != xpcSuccess) {
395 return;
396 }
397
398 /*
399 * The kthread that XPC HB called us with will become the
400 * channel manager for this partition. It will not return
401 * back to XPC HB until the partition's XPC infrastructure
402 * has been dismantled.
403 */
404
405 (void) xpc_part_ref(part); /* this will always succeed */
406
407 if (xpc_make_first_contact(part) == xpcSuccess) {
408 xpc_channel_mgr(part);
409 }
410
411 xpc_part_deref(part);
412
413 xpc_teardown_infrastructure(part);
414}
415
416
417static int
418xpc_activating(void *__partid)
419{
420 partid_t partid = (u64) __partid;
421 struct xpc_partition *part = &xpc_partitions[partid];
422 unsigned long irq_flags;
423 struct sched_param param = { sched_priority: MAX_USER_RT_PRIO - 1 };
424 int ret;
425
426
427 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
428
429 spin_lock_irqsave(&part->act_lock, irq_flags);
430
431 if (part->act_state == XPC_P_DEACTIVATING) {
432 part->act_state = XPC_P_INACTIVE;
433 spin_unlock_irqrestore(&part->act_lock, irq_flags);
434 part->remote_rp_pa = 0;
435 return 0;
436 }
437
438 /* indicate the thread is activating */
439 DBUG_ON(part->act_state != XPC_P_ACTIVATION_REQ);
440 part->act_state = XPC_P_ACTIVATING;
441
442 XPC_SET_REASON(part, 0, 0);
443 spin_unlock_irqrestore(&part->act_lock, irq_flags);
444
445 dev_dbg(xpc_part, "bringing partition %d up\n", partid);
446
447 daemonize("xpc%02d", partid);
448
449 /*
450 * This thread needs to run at a realtime priority to prevent a
451 * significant performance degradation.
452 */
453 ret = sched_setscheduler(current, SCHED_FIFO, &param);
454 if (ret != 0) {
455 dev_warn(xpc_part, "unable to set pid %d to a realtime "
456 "priority, ret=%d\n", current->pid, ret);
457 }
458
459 /* allow this thread and its children to run on any CPU */
460 set_cpus_allowed(current, CPU_MASK_ALL);
461
462 /*
463 * Register the remote partition's AMOs with SAL so it can handle
464 * and cleanup errors within that address range should the remote
465 * partition go down. We don't unregister this range because it is
466 * difficult to tell when outstanding writes to the remote partition
467 * are finished and thus when it is safe to unregister. This should
468 * not result in wasted space in the SAL xp_addr_region table because
469 * we should get the same page for remote_amos_page_pa after module
470 * reloads and system reboots.
471 */
472 if (sn_register_xp_addr_region(part->remote_amos_page_pa,
473 PAGE_SIZE, 1) < 0) {
474 dev_warn(xpc_part, "xpc_partition_up(%d) failed to register "
475 "xp_addr region\n", partid);
476
477 spin_lock_irqsave(&part->act_lock, irq_flags);
478 part->act_state = XPC_P_INACTIVE;
479 XPC_SET_REASON(part, xpcPhysAddrRegFailed, __LINE__);
480 spin_unlock_irqrestore(&part->act_lock, irq_flags);
481 part->remote_rp_pa = 0;
482 return 0;
483 }
484
485 XPC_ALLOW_HB(partid, xpc_vars);
486 xpc_IPI_send_activated(part);
487
488
489 /*
490 * xpc_partition_up() holds this thread and marks this partition as
491 * XPC_P_ACTIVE by calling xpc_hb_mark_active().
492 */
493 (void) xpc_partition_up(part);
494
495 xpc_mark_partition_inactive(part);
496
497 if (part->reason == xpcReactivating) {
498 /* interrupting ourselves results in activating partition */
499 xpc_IPI_send_reactivate(part);
500 }
501
502 return 0;
503}
504
505
506void
507xpc_activate_partition(struct xpc_partition *part)
508{
509 partid_t partid = XPC_PARTID(part);
510 unsigned long irq_flags;
511 pid_t pid;
512
513
514 spin_lock_irqsave(&part->act_lock, irq_flags);
515
516 pid = kernel_thread(xpc_activating, (void *) ((u64) partid), 0);
517
518 DBUG_ON(part->act_state != XPC_P_INACTIVE);
519
520 if (pid > 0) {
521 part->act_state = XPC_P_ACTIVATION_REQ;
522 XPC_SET_REASON(part, xpcCloneKThread, __LINE__);
523 } else {
524 XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__);
525 }
526
527 spin_unlock_irqrestore(&part->act_lock, irq_flags);
528}
529
530
531/*
532 * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
533 * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
534 * than one partition, we use an AMO_t structure per partition to indicate
535 * whether a partition has sent an IPI or not. >>> If it has, then wake up the
536 * associated kthread to handle it.
537 *
538 * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC
539 * running on other partitions.
540 *
541 * Noteworthy Arguments:
542 *
543 * irq - Interrupt ReQuest number. NOT USED.
544 *
545 * dev_id - partid of IPI's potential sender.
546 *
547 * regs - processor's context before the processor entered
548 * interrupt code. NOT USED.
549 */
550irqreturn_t
551xpc_notify_IRQ_handler(int irq, void *dev_id, struct pt_regs *regs)
552{
553 partid_t partid = (partid_t) (u64) dev_id;
554 struct xpc_partition *part = &xpc_partitions[partid];
555
556
557 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
558
559 if (xpc_part_ref(part)) {
560 xpc_check_for_channel_activity(part);
561
562 xpc_part_deref(part);
563 }
564 return IRQ_HANDLED;
565}
566
567
568/*
569 * Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor
570 * because the write to their associated IPI amo completed after the IRQ/IPI
571 * was received.
572 */
573void
574xpc_dropped_IPI_check(struct xpc_partition *part)
575{
576 if (xpc_part_ref(part)) {
577 xpc_check_for_channel_activity(part);
578
579 part->dropped_IPI_timer.expires = jiffies +
580 XPC_P_DROPPED_IPI_WAIT;
581 add_timer(&part->dropped_IPI_timer);
582 xpc_part_deref(part);
583 }
584}
585
586
587void
588xpc_activate_kthreads(struct xpc_channel *ch, int needed)
589{
590 int idle = atomic_read(&ch->kthreads_idle);
591 int assigned = atomic_read(&ch->kthreads_assigned);
592 int wakeup;
593
594
595 DBUG_ON(needed <= 0);
596
597 if (idle > 0) {
598 wakeup = (needed > idle) ? idle : needed;
599 needed -= wakeup;
600
601 dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, "
602 "channel=%d\n", wakeup, ch->partid, ch->number);
603
604 /* only wakeup the requested number of kthreads */
605 wake_up_nr(&ch->idle_wq, wakeup);
606 }
607
608 if (needed <= 0) {
609 return;
610 }
611
612 if (needed + assigned > ch->kthreads_assigned_limit) {
613 needed = ch->kthreads_assigned_limit - assigned;
614 // >>>should never be less than 0
615 if (needed <= 0) {
616 return;
617 }
618 }
619
620 dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n",
621 needed, ch->partid, ch->number);
622
623 xpc_create_kthreads(ch, needed);
624}
625
626
627/*
628 * This function is where XPC's kthreads wait for messages to deliver.
629 */
630static void
631xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
632{
633 do {
634 /* deliver messages to their intended recipients */
635
636 while ((volatile s64) ch->w_local_GP.get <
637 (volatile s64) ch->w_remote_GP.put &&
638 !((volatile u32) ch->flags &
639 XPC_C_DISCONNECTING)) {
640 xpc_deliver_msg(ch);
641 }
642
643 if (atomic_inc_return(&ch->kthreads_idle) >
644 ch->kthreads_idle_limit) {
645 /* too many idle kthreads on this channel */
646 atomic_dec(&ch->kthreads_idle);
647 break;
648 }
649
650 dev_dbg(xpc_chan, "idle kthread calling "
651 "wait_event_interruptible_exclusive()\n");
652
653 (void) wait_event_interruptible_exclusive(ch->idle_wq,
654 ((volatile s64) ch->w_local_GP.get <
655 (volatile s64) ch->w_remote_GP.put ||
656 ((volatile u32) ch->flags &
657 XPC_C_DISCONNECTING)));
658
659 atomic_dec(&ch->kthreads_idle);
660
661 } while (!((volatile u32) ch->flags & XPC_C_DISCONNECTING));
662}
663
664
665static int
666xpc_daemonize_kthread(void *args)
667{
668 partid_t partid = XPC_UNPACK_ARG1(args);
669 u16 ch_number = XPC_UNPACK_ARG2(args);
670 struct xpc_partition *part = &xpc_partitions[partid];
671 struct xpc_channel *ch;
672 int n_needed;
673
674
675 daemonize("xpc%02dc%d", partid, ch_number);
676
677 dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
678 partid, ch_number);
679
680 ch = &part->channels[ch_number];
681
682 if (!(ch->flags & XPC_C_DISCONNECTING)) {
683 DBUG_ON(!(ch->flags & XPC_C_CONNECTED));
684
685 /* let registerer know that connection has been established */
686
687 if (atomic_read(&ch->kthreads_assigned) == 1) {
688 xpc_connected_callout(ch);
689
690 /*
691 * It is possible that while the callout was being
692 * made that the remote partition sent some messages.
693 * If that is the case, we may need to activate
694 * additional kthreads to help deliver them. We only
695 * need one less than total #of messages to deliver.
696 */
697 n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1;
698 if (n_needed > 0 &&
699 !(ch->flags & XPC_C_DISCONNECTING)) {
700 xpc_activate_kthreads(ch, n_needed);
701 }
702 }
703
704 xpc_kthread_waitmsgs(part, ch);
705 }
706
707 if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
708 ((ch->flags & XPC_C_CONNECTCALLOUT) ||
709 (ch->reason != xpcUnregistering &&
710 ch->reason != xpcOtherUnregistering))) {
711 xpc_disconnected_callout(ch);
712 }
713
714
715 xpc_msgqueue_deref(ch);
716
717 dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n",
718 partid, ch_number);
719
720 xpc_part_deref(part);
721 return 0;
722}
723
724
725/*
726 * For each partition that XPC has established communications with, there is
727 * a minimum of one kernel thread assigned to perform any operation that
728 * may potentially sleep or block (basically the callouts to the asynchronous
729 * functions registered via xpc_connect()).
730 *
731 * Additional kthreads are created and destroyed by XPC as the workload
732 * demands.
733 *
734 * A kthread is assigned to one of the active channels that exists for a given
735 * partition.
736 */
737void
738xpc_create_kthreads(struct xpc_channel *ch, int needed)
739{
740 unsigned long irq_flags;
741 pid_t pid;
742 u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
743
744
745 while (needed-- > 0) {
746 pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0);
747 if (pid < 0) {
748 /* the fork failed */
749
750 if (atomic_read(&ch->kthreads_assigned) <
751 ch->kthreads_idle_limit) {
752 /*
753 * Flag this as an error only if we have an
754 * insufficient #of kthreads for the channel
755 * to function.
756 *
757 * No xpc_msgqueue_ref() is needed here since
758 * the channel mgr is doing this.
759 */
760 spin_lock_irqsave(&ch->lock, irq_flags);
761 XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources,
762 &irq_flags);
763 spin_unlock_irqrestore(&ch->lock, irq_flags);
764 }
765 break;
766 }
767
768 /*
769 * The following is done on behalf of the newly created
770 * kthread. That kthread is responsible for doing the
771 * counterpart to the following before it exits.
772 */
773 (void) xpc_part_ref(&xpc_partitions[ch->partid]);
774 xpc_msgqueue_ref(ch);
775 atomic_inc(&ch->kthreads_assigned);
776 ch->kthreads_created++; // >>> temporary debug only!!!
777 }
778}
779
780
781void
782xpc_disconnect_wait(int ch_number)
783{
784 partid_t partid;
785 struct xpc_partition *part;
786 struct xpc_channel *ch;
787
788
789 /* now wait for all callouts to the caller's function to cease */
790 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
791 part = &xpc_partitions[partid];
792
793 if (xpc_part_ref(part)) {
794 ch = &part->channels[ch_number];
795
796// >>> how do we keep from falling into the window between our check and going
797// >>> down and coming back up where sema is re-inited?
798 if (ch->flags & XPC_C_SETUP) {
799 (void) down(&ch->teardown_sema);
800 }
801
802 xpc_part_deref(part);
803 }
804 }
805}
806
807
808static void
809xpc_do_exit(void)
810{
811 partid_t partid;
812 int active_part_count;
813 struct xpc_partition *part;
814
815
816 /* now it's time to eliminate our heartbeat */
817 del_timer_sync(&xpc_hb_timer);
818 xpc_vars->heartbeating_to_mask = 0;
819
820 /* indicate to others that our reserved page is uninitialized */
821 xpc_rsvd_page->vars_pa = 0;
822
823 /*
824 * Ignore all incoming interrupts. Without interupts the heartbeat
825 * checker won't activate any new partitions that may come up.
826 */
827 free_irq(SGI_XPC_ACTIVATE, NULL);
828
829 /*
830 * Cause the heartbeat checker and the discovery threads to exit.
831 * We don't want them attempting to activate new partitions as we
832 * try to deactivate the existing ones.
833 */
834 xpc_exiting = 1;
835 wake_up_interruptible(&xpc_act_IRQ_wq);
836
837 /* wait for the heartbeat checker thread to mark itself inactive */
838 down(&xpc_hb_checker_exited);
839
840 /* wait for the discovery thread to mark itself inactive */
841 down(&xpc_discovery_exited);
842
843
844 set_current_state(TASK_INTERRUPTIBLE);
845 schedule_timeout(0.3 * HZ);
846 set_current_state(TASK_RUNNING);
847
848
849 /* wait for all partitions to become inactive */
850
851 do {
852 active_part_count = 0;
853
854 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
855 part = &xpc_partitions[partid];
856 if (part->act_state != XPC_P_INACTIVE) {
857 active_part_count++;
858
859 XPC_DEACTIVATE_PARTITION(part, xpcUnloading);
860 }
861 }
862
863 if (active_part_count) {
864 set_current_state(TASK_INTERRUPTIBLE);
865 schedule_timeout(0.3 * HZ);
866 set_current_state(TASK_RUNNING);
867 }
868
869 } while (active_part_count > 0);
870
871
872 /* close down protections for IPI operations */
873 xpc_restrict_IPI_ops();
874
875
876 /* clear the interface to XPC's functions */
877 xpc_clear_interface();
878
879 if (xpc_sysctl) {
880 unregister_sysctl_table(xpc_sysctl);
881 }
882}
883
884
885int __init
886xpc_init(void)
887{
888 int ret;
889 partid_t partid;
890 struct xpc_partition *part;
891 pid_t pid;
892
893
894 /*
895 * xpc_remote_copy_buffer is used as a temporary buffer for bte_copy'ng
896 * both a partition's reserved page and its XPC variables. Its size was
897 * based on the size of a reserved page. So we need to ensure that the
898 * XPC variables will fit as well.
899 */
900 if (XPC_VARS_ALIGNED_SIZE > XPC_RSVD_PAGE_ALIGNED_SIZE) {
901 dev_err(xpc_part, "xpc_remote_copy_buffer is not big enough\n");
902 return -EPERM;
903 }
904 DBUG_ON((u64) xpc_remote_copy_buffer !=
905 L1_CACHE_ALIGN((u64) xpc_remote_copy_buffer));
906
907 snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part");
908 snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan");
909
910 xpc_sysctl = register_sysctl_table(xpc_sys_dir, 1);
911
912 /*
913 * The first few fields of each entry of xpc_partitions[] need to
914 * be initialized now so that calls to xpc_connect() and
915 * xpc_disconnect() can be made prior to the activation of any remote
916 * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE
917 * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING
918 * PARTITION HAS BEEN ACTIVATED.
919 */
920 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
921 part = &xpc_partitions[partid];
922
923 DBUG_ON((u64) part != L1_CACHE_ALIGN((u64) part));
924
925 part->act_IRQ_rcvd = 0;
926 spin_lock_init(&part->act_lock);
927 part->act_state = XPC_P_INACTIVE;
928 XPC_SET_REASON(part, 0, 0);
929 part->setup_state = XPC_P_UNSET;
930 init_waitqueue_head(&part->teardown_wq);
931 atomic_set(&part->references, 0);
932 }
933
934 /*
935 * Open up protections for IPI operations (and AMO operations on
936 * Shub 1.1 systems).
937 */
938 xpc_allow_IPI_ops();
939
940 /*
941 * Interrupts being processed will increment this atomic variable and
942 * awaken the heartbeat thread which will process the interrupts.
943 */
944 atomic_set(&xpc_act_IRQ_rcvd, 0);
945
946 /*
947 * This is safe to do before the xpc_hb_checker thread has started
948 * because the handler releases a wait queue. If an interrupt is
949 * received before the thread is waiting, it will not go to sleep,
950 * but rather immediately process the interrupt.
951 */
952 ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0,
953 "xpc hb", NULL);
954 if (ret != 0) {
955 dev_err(xpc_part, "can't register ACTIVATE IRQ handler, "
956 "errno=%d\n", -ret);
957
958 xpc_restrict_IPI_ops();
959
960 if (xpc_sysctl) {
961 unregister_sysctl_table(xpc_sysctl);
962 }
963 return -EBUSY;
964 }
965
966 /*
967 * Fill the partition reserved page with the information needed by
968 * other partitions to discover we are alive and establish initial
969 * communications.
970 */
971 xpc_rsvd_page = xpc_rsvd_page_init();
972 if (xpc_rsvd_page == NULL) {
973 dev_err(xpc_part, "could not setup our reserved page\n");
974
975 free_irq(SGI_XPC_ACTIVATE, NULL);
976 xpc_restrict_IPI_ops();
977
978 if (xpc_sysctl) {
979 unregister_sysctl_table(xpc_sysctl);
980 }
981 return -EBUSY;
982 }
983
984
985 /*
986 * Set the beating to other partitions into motion. This is
987 * the last requirement for other partitions' discovery to
988 * initiate communications with us.
989 */
990 init_timer(&xpc_hb_timer);
991 xpc_hb_timer.function = xpc_hb_beater;
992 xpc_hb_beater(0);
993
994
995 /*
996 * The real work-horse behind xpc. This processes incoming
997 * interrupts and monitors remote heartbeats.
998 */
999 pid = kernel_thread(xpc_hb_checker, NULL, 0);
1000 if (pid < 0) {
1001 dev_err(xpc_part, "failed while forking hb check thread\n");
1002
1003 /* indicate to others that our reserved page is uninitialized */
1004 xpc_rsvd_page->vars_pa = 0;
1005
1006 del_timer_sync(&xpc_hb_timer);
1007 free_irq(SGI_XPC_ACTIVATE, NULL);
1008 xpc_restrict_IPI_ops();
1009
1010 if (xpc_sysctl) {
1011 unregister_sysctl_table(xpc_sysctl);
1012 }
1013 return -EBUSY;
1014 }
1015
1016
1017 /*
1018 * Startup a thread that will attempt to discover other partitions to
1019 * activate based on info provided by SAL. This new thread is short
1020 * lived and will exit once discovery is complete.
1021 */
1022 pid = kernel_thread(xpc_initiate_discovery, NULL, 0);
1023 if (pid < 0) {
1024 dev_err(xpc_part, "failed while forking discovery thread\n");
1025
1026 /* mark this new thread as a non-starter */
1027 up(&xpc_discovery_exited);
1028
1029 xpc_do_exit();
1030 return -EBUSY;
1031 }
1032
1033
1034 /* set the interface to point at XPC's functions */
1035 xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
1036 xpc_initiate_allocate, xpc_initiate_send,
1037 xpc_initiate_send_notify, xpc_initiate_received,
1038 xpc_initiate_partid_to_nasids);
1039
1040 return 0;
1041}
1042module_init(xpc_init);
1043
1044
1045void __exit
1046xpc_exit(void)
1047{
1048 xpc_do_exit();
1049}
1050module_exit(xpc_exit);
1051
1052
1053MODULE_AUTHOR("Silicon Graphics, Inc.");
1054MODULE_DESCRIPTION("Cross Partition Communication (XPC) support");
1055MODULE_LICENSE("GPL");
1056
1057module_param(xpc_hb_interval, int, 0);
1058MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between "
1059 "heartbeat increments.");
1060
1061module_param(xpc_hb_check_interval, int, 0);
1062MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
1063 "heartbeat checks.");
1064
diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/arch/ia64/sn/kernel/xpc_partition.c
new file mode 100644
index 000000000000..2c3c4a8af553
--- /dev/null
+++ b/arch/ia64/sn/kernel/xpc_partition.c
@@ -0,0 +1,984 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9
10/*
11 * Cross Partition Communication (XPC) partition support.
12 *
13 * This is the part of XPC that detects the presence/absence of
14 * other partitions. It provides a heartbeat and monitors the
15 * heartbeats of other partitions.
16 *
17 */
18
19
20#include <linux/kernel.h>
21#include <linux/sysctl.h>
22#include <linux/cache.h>
23#include <linux/mmzone.h>
24#include <linux/nodemask.h>
25#include <asm/sn/bte.h>
26#include <asm/sn/intr.h>
27#include <asm/sn/sn_sal.h>
28#include <asm/sn/nodepda.h>
29#include <asm/sn/addrs.h>
30#include "xpc.h"
31
32
33/* XPC is exiting flag */
34int xpc_exiting;
35
36
37/* SH_IPI_ACCESS shub register value on startup */
38static u64 xpc_sh1_IPI_access;
39static u64 xpc_sh2_IPI_access0;
40static u64 xpc_sh2_IPI_access1;
41static u64 xpc_sh2_IPI_access2;
42static u64 xpc_sh2_IPI_access3;
43
44
45/* original protection values for each node */
46u64 xpc_prot_vec[MAX_COMPACT_NODES];
47
48
49/* this partition's reserved page */
50struct xpc_rsvd_page *xpc_rsvd_page;
51
52/* this partition's XPC variables (within the reserved page) */
53struct xpc_vars *xpc_vars;
54struct xpc_vars_part *xpc_vars_part;
55
56
57/*
58 * For performance reasons, each entry of xpc_partitions[] is cacheline
59 * aligned. And xpc_partitions[] is padded with an additional entry at the
60 * end so that the last legitimate entry doesn't share its cacheline with
61 * another variable.
62 */
63struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
64
65
66/*
67 * Generic buffer used to store a local copy of the remote partitions
68 * reserved page or XPC variables.
69 *
70 * xpc_discovery runs only once and is a seperate thread that is
71 * very likely going to be processing in parallel with receiving
72 * interrupts.
73 */
74char ____cacheline_aligned
75 xpc_remote_copy_buffer[XPC_RSVD_PAGE_ALIGNED_SIZE];
76
77
78/* systune related variables */
79int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
80int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_TIMEOUT;
81
82
83/*
84 * Given a nasid, get the physical address of the partition's reserved page
85 * for that nasid. This function returns 0 on any error.
86 */
87static u64
88xpc_get_rsvd_page_pa(int nasid, u64 buf, u64 buf_size)
89{
90 bte_result_t bte_res;
91 s64 status;
92 u64 cookie = 0;
93 u64 rp_pa = nasid; /* seed with nasid */
94 u64 len = 0;
95
96
97 while (1) {
98
99 status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa,
100 &len);
101
102 dev_dbg(xpc_part, "SAL returned with status=%li, cookie="
103 "0x%016lx, address=0x%016lx, len=0x%016lx\n",
104 status, cookie, rp_pa, len);
105
106 if (status != SALRET_MORE_PASSES) {
107 break;
108 }
109
110 if (len > buf_size) {
111 dev_err(xpc_part, "len (=0x%016lx) > buf_size\n", len);
112 status = SALRET_ERROR;
113 break;
114 }
115
116 bte_res = xp_bte_copy(rp_pa, ia64_tpa(buf), buf_size,
117 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
118 if (bte_res != BTE_SUCCESS) {
119 dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res);
120 status = SALRET_ERROR;
121 break;
122 }
123 }
124
125 if (status != SALRET_OK) {
126 rp_pa = 0;
127 }
128 dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa);
129 return rp_pa;
130}
131
132
133/*
134 * Fill the partition reserved page with the information needed by
135 * other partitions to discover we are alive and establish initial
136 * communications.
137 */
138struct xpc_rsvd_page *
139xpc_rsvd_page_init(void)
140{
141 struct xpc_rsvd_page *rp;
142 AMO_t *amos_page;
143 u64 rp_pa, next_cl, nasid_array = 0;
144 int i, ret;
145
146
147 /* get the local reserved page's address */
148
149 rp_pa = xpc_get_rsvd_page_pa(cnodeid_to_nasid(0),
150 (u64) xpc_remote_copy_buffer,
151 XPC_RSVD_PAGE_ALIGNED_SIZE);
152 if (rp_pa == 0) {
153 dev_err(xpc_part, "SAL failed to locate the reserved page\n");
154 return NULL;
155 }
156 rp = (struct xpc_rsvd_page *) __va(rp_pa);
157
158 if (rp->partid != sn_partition_id) {
159 dev_err(xpc_part, "the reserved page's partid of %d should be "
160 "%d\n", rp->partid, sn_partition_id);
161 return NULL;
162 }
163
164 rp->version = XPC_RP_VERSION;
165
166 /*
167 * Place the XPC variables on the cache line following the
168 * reserved page structure.
169 */
170 next_cl = (u64) rp + XPC_RSVD_PAGE_ALIGNED_SIZE;
171 xpc_vars = (struct xpc_vars *) next_cl;
172
173 /*
174 * Before clearing xpc_vars, see if a page of AMOs had been previously
175 * allocated. If not we'll need to allocate one and set permissions
176 * so that cross-partition AMOs are allowed.
177 *
178 * The allocated AMO page needs MCA reporting to remain disabled after
179 * XPC has unloaded. To make this work, we keep a copy of the pointer
180 * to this page (i.e., amos_page) in the struct xpc_vars structure,
181 * which is pointed to by the reserved page, and re-use that saved copy
182 * on subsequent loads of XPC. This AMO page is never freed, and its
183 * memory protections are never restricted.
184 */
185 if ((amos_page = xpc_vars->amos_page) == NULL) {
186 amos_page = (AMO_t *) mspec_kalloc_page(0);
187 if (amos_page == NULL) {
188 dev_err(xpc_part, "can't allocate page of AMOs\n");
189 return NULL;
190 }
191
192 /*
193 * Open up AMO-R/W to cpu. This is done for Shub 1.1 systems
194 * when xpc_allow_IPI_ops() is called via xpc_hb_init().
195 */
196 if (!enable_shub_wars_1_1()) {
197 ret = sn_change_memprotect(ia64_tpa((u64) amos_page),
198 PAGE_SIZE, SN_MEMPROT_ACCESS_CLASS_1,
199 &nasid_array);
200 if (ret != 0) {
201 dev_err(xpc_part, "can't change memory "
202 "protections\n");
203 mspec_kfree_page((unsigned long) amos_page);
204 return NULL;
205 }
206 }
207 } else if (!IS_AMO_ADDRESS((u64) amos_page)) {
208 /*
209 * EFI's XPBOOT can also set amos_page in the reserved page,
210 * but it happens to leave it as an uncached physical address
211 * and we need it to be an uncached virtual, so we'll have to
212 * convert it.
213 */
214 if (!IS_AMO_PHYS_ADDRESS((u64) amos_page)) {
215 dev_err(xpc_part, "previously used amos_page address "
216 "is bad = 0x%p\n", (void *) amos_page);
217 return NULL;
218 }
219 amos_page = (AMO_t *) TO_AMO((u64) amos_page);
220 }
221
222 memset(xpc_vars, 0, sizeof(struct xpc_vars));
223
224 /*
225 * Place the XPC per partition specific variables on the cache line
226 * following the XPC variables structure.
227 */
228 next_cl += XPC_VARS_ALIGNED_SIZE;
229 memset((u64 *) next_cl, 0, sizeof(struct xpc_vars_part) *
230 XP_MAX_PARTITIONS);
231 xpc_vars_part = (struct xpc_vars_part *) next_cl;
232 xpc_vars->vars_part_pa = __pa(next_cl);
233
234 xpc_vars->version = XPC_V_VERSION;
235 xpc_vars->act_nasid = cpuid_to_nasid(0);
236 xpc_vars->act_phys_cpuid = cpu_physical_id(0);
237 xpc_vars->amos_page = amos_page; /* save for next load of XPC */
238
239
240 /*
241 * Initialize the activation related AMO variables.
242 */
243 xpc_vars->act_amos = xpc_IPI_init(XP_MAX_PARTITIONS);
244 for (i = 1; i < XP_NASID_MASK_WORDS; i++) {
245 xpc_IPI_init(i + XP_MAX_PARTITIONS);
246 }
247 /* export AMO page's physical address to other partitions */
248 xpc_vars->amos_page_pa = ia64_tpa((u64) xpc_vars->amos_page);
249
250 /*
251 * This signifies to the remote partition that our reserved
252 * page is initialized.
253 */
254 (volatile u64) rp->vars_pa = __pa(xpc_vars);
255
256 return rp;
257}
258
259
260/*
261 * Change protections to allow IPI operations (and AMO operations on
262 * Shub 1.1 systems).
263 */
264void
265xpc_allow_IPI_ops(void)
266{
267 int node;
268 int nasid;
269
270
271 // >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
272
273 if (is_shub2()) {
274 xpc_sh2_IPI_access0 =
275 (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
276 xpc_sh2_IPI_access1 =
277 (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
278 xpc_sh2_IPI_access2 =
279 (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
280 xpc_sh2_IPI_access3 =
281 (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));
282
283 for_each_online_node(node) {
284 nasid = cnodeid_to_nasid(node);
285 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
286 -1UL);
287 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
288 -1UL);
289 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
290 -1UL);
291 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
292 -1UL);
293 }
294
295 } else {
296 xpc_sh1_IPI_access =
297 (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_IPI_ACCESS));
298
299 for_each_online_node(node) {
300 nasid = cnodeid_to_nasid(node);
301 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
302 -1UL);
303
304 /*
305 * Since the BIST collides with memory operations on
306 * SHUB 1.1 sn_change_memprotect() cannot be used.
307 */
308 if (enable_shub_wars_1_1()) {
309 /* open up everything */
310 xpc_prot_vec[node] = (u64) HUB_L((u64 *)
311 GLOBAL_MMR_ADDR(nasid,
312 SH1_MD_DQLP_MMR_DIR_PRIVEC0));
313 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
314 SH1_MD_DQLP_MMR_DIR_PRIVEC0),
315 -1UL);
316 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
317 SH1_MD_DQRP_MMR_DIR_PRIVEC0),
318 -1UL);
319 }
320 }
321 }
322}
323
324
325/*
326 * Restrict protections to disallow IPI operations (and AMO operations on
327 * Shub 1.1 systems).
328 */
329void
330xpc_restrict_IPI_ops(void)
331{
332 int node;
333 int nasid;
334
335
336 // >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
337
338 if (is_shub2()) {
339
340 for_each_online_node(node) {
341 nasid = cnodeid_to_nasid(node);
342 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
343 xpc_sh2_IPI_access0);
344 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
345 xpc_sh2_IPI_access1);
346 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
347 xpc_sh2_IPI_access2);
348 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
349 xpc_sh2_IPI_access3);
350 }
351
352 } else {
353
354 for_each_online_node(node) {
355 nasid = cnodeid_to_nasid(node);
356 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
357 xpc_sh1_IPI_access);
358
359 if (enable_shub_wars_1_1()) {
360 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
361 SH1_MD_DQLP_MMR_DIR_PRIVEC0),
362 xpc_prot_vec[node]);
363 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
364 SH1_MD_DQRP_MMR_DIR_PRIVEC0),
365 xpc_prot_vec[node]);
366 }
367 }
368 }
369}
370
371
372/*
373 * At periodic intervals, scan through all active partitions and ensure
374 * their heartbeat is still active. If not, the partition is deactivated.
375 */
376void
377xpc_check_remote_hb(void)
378{
379 struct xpc_vars *remote_vars;
380 struct xpc_partition *part;
381 partid_t partid;
382 bte_result_t bres;
383
384
385 remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer;
386
387 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
388 if (partid == sn_partition_id) {
389 continue;
390 }
391
392 part = &xpc_partitions[partid];
393
394 if (part->act_state == XPC_P_INACTIVE ||
395 part->act_state == XPC_P_DEACTIVATING) {
396 continue;
397 }
398
399 /* pull the remote_hb cache line */
400 bres = xp_bte_copy(part->remote_vars_pa,
401 ia64_tpa((u64) remote_vars),
402 XPC_VARS_ALIGNED_SIZE,
403 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
404 if (bres != BTE_SUCCESS) {
405 XPC_DEACTIVATE_PARTITION(part,
406 xpc_map_bte_errors(bres));
407 continue;
408 }
409
410 dev_dbg(xpc_part, "partid = %d, heartbeat = %ld, last_heartbeat"
411 " = %ld, kdb_status = %ld, HB_mask = 0x%lx\n", partid,
412 remote_vars->heartbeat, part->last_heartbeat,
413 remote_vars->kdb_status,
414 remote_vars->heartbeating_to_mask);
415
416 if (((remote_vars->heartbeat == part->last_heartbeat) &&
417 (remote_vars->kdb_status == 0)) ||
418 !XPC_HB_ALLOWED(sn_partition_id, remote_vars)) {
419
420 XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat);
421 continue;
422 }
423
424 part->last_heartbeat = remote_vars->heartbeat;
425 }
426}
427
428
429/*
430 * Get a copy of the remote partition's rsvd page.
431 *
432 * remote_rp points to a buffer that is cacheline aligned for BTE copies and
433 * assumed to be of size XPC_RSVD_PAGE_ALIGNED_SIZE.
434 */
435static enum xpc_retval
436xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
437 struct xpc_rsvd_page *remote_rp, u64 *remote_rsvd_page_pa)
438{
439 int bres, i;
440
441
442 /* get the reserved page's physical address */
443
444 *remote_rsvd_page_pa = xpc_get_rsvd_page_pa(nasid, (u64) remote_rp,
445 XPC_RSVD_PAGE_ALIGNED_SIZE);
446 if (*remote_rsvd_page_pa == 0) {
447 return xpcNoRsvdPageAddr;
448 }
449
450
451 /* pull over the reserved page structure */
452
453 bres = xp_bte_copy(*remote_rsvd_page_pa, ia64_tpa((u64) remote_rp),
454 XPC_RSVD_PAGE_ALIGNED_SIZE,
455 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
456 if (bres != BTE_SUCCESS) {
457 return xpc_map_bte_errors(bres);
458 }
459
460
461 if (discovered_nasids != NULL) {
462 for (i = 0; i < XP_NASID_MASK_WORDS; i++) {
463 discovered_nasids[i] |= remote_rp->part_nasids[i];
464 }
465 }
466
467
468 /* check that the partid is for another partition */
469
470 if (remote_rp->partid < 1 ||
471 remote_rp->partid > (XP_MAX_PARTITIONS - 1)) {
472 return xpcInvalidPartid;
473 }
474
475 if (remote_rp->partid == sn_partition_id) {
476 return xpcLocalPartid;
477 }
478
479
480 if (XPC_VERSION_MAJOR(remote_rp->version) !=
481 XPC_VERSION_MAJOR(XPC_RP_VERSION)) {
482 return xpcBadVersion;
483 }
484
485 return xpcSuccess;
486}
487
488
489/*
490 * Get a copy of the remote partition's XPC variables.
491 *
492 * remote_vars points to a buffer that is cacheline aligned for BTE copies and
493 * assumed to be of size XPC_VARS_ALIGNED_SIZE.
494 */
495static enum xpc_retval
496xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars)
497{
498 int bres;
499
500
501 if (remote_vars_pa == 0) {
502 return xpcVarsNotSet;
503 }
504
505
506 /* pull over the cross partition variables */
507
508 bres = xp_bte_copy(remote_vars_pa, ia64_tpa((u64) remote_vars),
509 XPC_VARS_ALIGNED_SIZE,
510 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
511 if (bres != BTE_SUCCESS) {
512 return xpc_map_bte_errors(bres);
513 }
514
515 if (XPC_VERSION_MAJOR(remote_vars->version) !=
516 XPC_VERSION_MAJOR(XPC_V_VERSION)) {
517 return xpcBadVersion;
518 }
519
520 return xpcSuccess;
521}
522
523
524/*
525 * Prior code has determine the nasid which generated an IPI. Inspect
526 * that nasid to determine if its partition needs to be activated or
527 * deactivated.
528 *
529 * A partition is consider "awaiting activation" if our partition
530 * flags indicate it is not active and it has a heartbeat. A
531 * partition is considered "awaiting deactivation" if our partition
532 * flags indicate it is active but it has no heartbeat or it is not
533 * sending its heartbeat to us.
534 *
535 * To determine the heartbeat, the remote nasid must have a properly
536 * initialized reserved page.
537 */
538static void
539xpc_identify_act_IRQ_req(int nasid)
540{
541 struct xpc_rsvd_page *remote_rp;
542 struct xpc_vars *remote_vars;
543 u64 remote_rsvd_page_pa;
544 u64 remote_vars_pa;
545 partid_t partid;
546 struct xpc_partition *part;
547 enum xpc_retval ret;
548
549
550 /* pull over the reserved page structure */
551
552 remote_rp = (struct xpc_rsvd_page *) xpc_remote_copy_buffer;
553
554 ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rsvd_page_pa);
555 if (ret != xpcSuccess) {
556 dev_warn(xpc_part, "unable to get reserved page from nasid %d, "
557 "which sent interrupt, reason=%d\n", nasid, ret);
558 return;
559 }
560
561 remote_vars_pa = remote_rp->vars_pa;
562 partid = remote_rp->partid;
563 part = &xpc_partitions[partid];
564
565
566 /* pull over the cross partition variables */
567
568 remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer;
569
570 ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
571 if (ret != xpcSuccess) {
572
573 dev_warn(xpc_part, "unable to get XPC variables from nasid %d, "
574 "which sent interrupt, reason=%d\n", nasid, ret);
575
576 XPC_DEACTIVATE_PARTITION(part, ret);
577 return;
578 }
579
580
581 part->act_IRQ_rcvd++;
582
583 dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = "
584 "%ld:0x%lx\n", (int) nasid, (int) partid, part->act_IRQ_rcvd,
585 remote_vars->heartbeat, remote_vars->heartbeating_to_mask);
586
587
588 if (part->act_state == XPC_P_INACTIVE) {
589
590 part->remote_rp_pa = remote_rsvd_page_pa;
591 dev_dbg(xpc_part, " remote_rp_pa = 0x%016lx\n",
592 part->remote_rp_pa);
593
594 part->remote_vars_pa = remote_vars_pa;
595 dev_dbg(xpc_part, " remote_vars_pa = 0x%016lx\n",
596 part->remote_vars_pa);
597
598 part->last_heartbeat = remote_vars->heartbeat;
599 dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n",
600 part->last_heartbeat);
601
602 part->remote_vars_part_pa = remote_vars->vars_part_pa;
603 dev_dbg(xpc_part, " remote_vars_part_pa = 0x%016lx\n",
604 part->remote_vars_part_pa);
605
606 part->remote_act_nasid = remote_vars->act_nasid;
607 dev_dbg(xpc_part, " remote_act_nasid = 0x%x\n",
608 part->remote_act_nasid);
609
610 part->remote_act_phys_cpuid = remote_vars->act_phys_cpuid;
611 dev_dbg(xpc_part, " remote_act_phys_cpuid = 0x%x\n",
612 part->remote_act_phys_cpuid);
613
614 part->remote_amos_page_pa = remote_vars->amos_page_pa;
615 dev_dbg(xpc_part, " remote_amos_page_pa = 0x%lx\n",
616 part->remote_amos_page_pa);
617
618 xpc_activate_partition(part);
619
620 } else if (part->remote_amos_page_pa != remote_vars->amos_page_pa ||
621 !XPC_HB_ALLOWED(sn_partition_id, remote_vars)) {
622
623 part->reactivate_nasid = nasid;
624 XPC_DEACTIVATE_PARTITION(part, xpcReactivating);
625 }
626}
627
628
629/*
630 * Loop through the activation AMO variables and process any bits
631 * which are set. Each bit indicates a nasid sending a partition
632 * activation or deactivation request.
633 *
634 * Return #of IRQs detected.
635 */
636int
637xpc_identify_act_IRQ_sender(void)
638{
639 int word, bit;
640 u64 nasid_mask;
641 u64 nasid; /* remote nasid */
642 int n_IRQs_detected = 0;
643 AMO_t *act_amos;
644 struct xpc_rsvd_page *rp = (struct xpc_rsvd_page *) xpc_rsvd_page;
645
646
647 act_amos = xpc_vars->act_amos;
648
649
650 /* scan through act AMO variable looking for non-zero entries */
651 for (word = 0; word < XP_NASID_MASK_WORDS; word++) {
652
653 nasid_mask = xpc_IPI_receive(&act_amos[word]);
654 if (nasid_mask == 0) {
655 /* no IRQs from nasids in this variable */
656 continue;
657 }
658
659 dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word,
660 nasid_mask);
661
662
663 /*
664 * If this nasid has been added to the machine since
665 * our partition was reset, this will retain the
666 * remote nasid in our reserved pages machine mask.
667 * This is used in the event of module reload.
668 */
669 rp->mach_nasids[word] |= nasid_mask;
670
671
672 /* locate the nasid(s) which sent interrupts */
673
674 for (bit = 0; bit < (8 * sizeof(u64)); bit++) {
675 if (nasid_mask & (1UL << bit)) {
676 n_IRQs_detected++;
677 nasid = XPC_NASID_FROM_W_B(word, bit);
678 dev_dbg(xpc_part, "interrupt from nasid %ld\n",
679 nasid);
680 xpc_identify_act_IRQ_req(nasid);
681 }
682 }
683 }
684 return n_IRQs_detected;
685}
686
687
688/*
689 * Mark specified partition as active.
690 */
691enum xpc_retval
692xpc_mark_partition_active(struct xpc_partition *part)
693{
694 unsigned long irq_flags;
695 enum xpc_retval ret;
696
697
698 dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part));
699
700 spin_lock_irqsave(&part->act_lock, irq_flags);
701 if (part->act_state == XPC_P_ACTIVATING) {
702 part->act_state = XPC_P_ACTIVE;
703 ret = xpcSuccess;
704 } else {
705 DBUG_ON(part->reason == xpcSuccess);
706 ret = part->reason;
707 }
708 spin_unlock_irqrestore(&part->act_lock, irq_flags);
709
710 return ret;
711}
712
713
714/*
715 * Notify XPC that the partition is down.
716 */
717void
718xpc_deactivate_partition(const int line, struct xpc_partition *part,
719 enum xpc_retval reason)
720{
721 unsigned long irq_flags;
722 partid_t partid = XPC_PARTID(part);
723
724
725 spin_lock_irqsave(&part->act_lock, irq_flags);
726
727 if (part->act_state == XPC_P_INACTIVE) {
728 XPC_SET_REASON(part, reason, line);
729 spin_unlock_irqrestore(&part->act_lock, irq_flags);
730 if (reason == xpcReactivating) {
731 /* we interrupt ourselves to reactivate partition */
732 xpc_IPI_send_reactivate(part);
733 }
734 return;
735 }
736 if (part->act_state == XPC_P_DEACTIVATING) {
737 if ((part->reason == xpcUnloading && reason != xpcUnloading) ||
738 reason == xpcReactivating) {
739 XPC_SET_REASON(part, reason, line);
740 }
741 spin_unlock_irqrestore(&part->act_lock, irq_flags);
742 return;
743 }
744
745 part->act_state = XPC_P_DEACTIVATING;
746 XPC_SET_REASON(part, reason, line);
747
748 spin_unlock_irqrestore(&part->act_lock, irq_flags);
749
750 XPC_DISALLOW_HB(partid, xpc_vars);
751
752 dev_dbg(xpc_part, "bringing partition %d down, reason = %d\n", partid,
753 reason);
754
755 xpc_partition_down(part, reason);
756}
757
758
759/*
760 * Mark specified partition as active.
761 */
762void
763xpc_mark_partition_inactive(struct xpc_partition *part)
764{
765 unsigned long irq_flags;
766
767
768 dev_dbg(xpc_part, "setting partition %d to INACTIVE\n",
769 XPC_PARTID(part));
770
771 spin_lock_irqsave(&part->act_lock, irq_flags);
772 part->act_state = XPC_P_INACTIVE;
773 spin_unlock_irqrestore(&part->act_lock, irq_flags);
774 part->remote_rp_pa = 0;
775}
776
777
778/*
779 * SAL has provided a partition and machine mask. The partition mask
780 * contains a bit for each even nasid in our partition. The machine
781 * mask contains a bit for each even nasid in the entire machine.
782 *
783 * Using those two bit arrays, we can determine which nasids are
784 * known in the machine. Each should also have a reserved page
785 * initialized if they are available for partitioning.
786 */
787void
788xpc_discovery(void)
789{
790 void *remote_rp_base;
791 struct xpc_rsvd_page *remote_rp;
792 struct xpc_vars *remote_vars;
793 u64 remote_rsvd_page_pa;
794 u64 remote_vars_pa;
795 int region;
796 int max_regions;
797 int nasid;
798 struct xpc_rsvd_page *rp;
799 partid_t partid;
800 struct xpc_partition *part;
801 u64 *discovered_nasids;
802 enum xpc_retval ret;
803
804
805 remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RSVD_PAGE_ALIGNED_SIZE,
806 GFP_KERNEL, &remote_rp_base);
807 if (remote_rp == NULL) {
808 return;
809 }
810 remote_vars = (struct xpc_vars *) remote_rp;
811
812
813 discovered_nasids = kmalloc(sizeof(u64) * XP_NASID_MASK_WORDS,
814 GFP_KERNEL);
815 if (discovered_nasids == NULL) {
816 kfree(remote_rp_base);
817 return;
818 }
819 memset(discovered_nasids, 0, sizeof(u64) * XP_NASID_MASK_WORDS);
820
821 rp = (struct xpc_rsvd_page *) xpc_rsvd_page;
822
823 /*
824 * The term 'region' in this context refers to the minimum number of
825 * nodes that can comprise an access protection grouping. The access
826 * protection is in regards to memory, IOI and IPI.
827 */
828//>>> move the next two #defines into either include/asm-ia64/sn/arch.h or
829//>>> include/asm-ia64/sn/addrs.h
830#define SH1_MAX_REGIONS 64
831#define SH2_MAX_REGIONS 256
832 max_regions = is_shub2() ? SH2_MAX_REGIONS : SH1_MAX_REGIONS;
833
834 for (region = 0; region < max_regions; region++) {
835
836 if ((volatile int) xpc_exiting) {
837 break;
838 }
839
840 dev_dbg(xpc_part, "searching region %d\n", region);
841
842 for (nasid = (region * sn_region_size * 2);
843 nasid < ((region + 1) * sn_region_size * 2);
844 nasid += 2) {
845
846 if ((volatile int) xpc_exiting) {
847 break;
848 }
849
850 dev_dbg(xpc_part, "checking nasid %d\n", nasid);
851
852
853 if (XPC_NASID_IN_ARRAY(nasid, rp->part_nasids)) {
854 dev_dbg(xpc_part, "PROM indicates Nasid %d is "
855 "part of the local partition; skipping "
856 "region\n", nasid);
857 break;
858 }
859
860 if (!(XPC_NASID_IN_ARRAY(nasid, rp->mach_nasids))) {
861 dev_dbg(xpc_part, "PROM indicates Nasid %d was "
862 "not on Numa-Link network at reset\n",
863 nasid);
864 continue;
865 }
866
867 if (XPC_NASID_IN_ARRAY(nasid, discovered_nasids)) {
868 dev_dbg(xpc_part, "Nasid %d is part of a "
869 "partition which was previously "
870 "discovered\n", nasid);
871 continue;
872 }
873
874
875 /* pull over the reserved page structure */
876
877 ret = xpc_get_remote_rp(nasid, discovered_nasids,
878 remote_rp, &remote_rsvd_page_pa);
879 if (ret != xpcSuccess) {
880 dev_dbg(xpc_part, "unable to get reserved page "
881 "from nasid %d, reason=%d\n", nasid,
882 ret);
883
884 if (ret == xpcLocalPartid) {
885 break;
886 }
887 continue;
888 }
889
890 remote_vars_pa = remote_rp->vars_pa;
891
892 partid = remote_rp->partid;
893 part = &xpc_partitions[partid];
894
895
896 /* pull over the cross partition variables */
897
898 ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
899 if (ret != xpcSuccess) {
900 dev_dbg(xpc_part, "unable to get XPC variables "
901 "from nasid %d, reason=%d\n", nasid,
902 ret);
903
904 XPC_DEACTIVATE_PARTITION(part, ret);
905 continue;
906 }
907
908 if (part->act_state != XPC_P_INACTIVE) {
909 dev_dbg(xpc_part, "partition %d on nasid %d is "
910 "already activating\n", partid, nasid);
911 break;
912 }
913
914 /*
915 * Register the remote partition's AMOs with SAL so it
916 * can handle and cleanup errors within that address
917 * range should the remote partition go down. We don't
918 * unregister this range because it is difficult to
919 * tell when outstanding writes to the remote partition
920 * are finished and thus when it is thus safe to
921 * unregister. This should not result in wasted space
922 * in the SAL xp_addr_region table because we should
923 * get the same page for remote_act_amos_pa after
924 * module reloads and system reboots.
925 */
926 if (sn_register_xp_addr_region(
927 remote_vars->amos_page_pa,
928 PAGE_SIZE, 1) < 0) {
929 dev_dbg(xpc_part, "partition %d failed to "
930 "register xp_addr region 0x%016lx\n",
931 partid, remote_vars->amos_page_pa);
932
933 XPC_SET_REASON(part, xpcPhysAddrRegFailed,
934 __LINE__);
935 break;
936 }
937
938 /*
939 * The remote nasid is valid and available.
940 * Send an interrupt to that nasid to notify
941 * it that we are ready to begin activation.
942 */
943 dev_dbg(xpc_part, "sending an interrupt to AMO 0x%lx, "
944 "nasid %d, phys_cpuid 0x%x\n",
945 remote_vars->amos_page_pa,
946 remote_vars->act_nasid,
947 remote_vars->act_phys_cpuid);
948
949 xpc_IPI_send_activate(remote_vars);
950 }
951 }
952
953 kfree(discovered_nasids);
954 kfree(remote_rp_base);
955}
956
957
958/*
959 * Given a partid, get the nasids owned by that partition from the
960 * remote partition's reserved page.
961 */
962enum xpc_retval
963xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask)
964{
965 struct xpc_partition *part;
966 u64 part_nasid_pa;
967 int bte_res;
968
969
970 part = &xpc_partitions[partid];
971 if (part->remote_rp_pa == 0) {
972 return xpcPartitionDown;
973 }
974
975 part_nasid_pa = part->remote_rp_pa +
976 (u64) &((struct xpc_rsvd_page *) 0)->part_nasids;
977
978 bte_res = xp_bte_copy(part_nasid_pa, ia64_tpa((u64) nasid_mask),
979 L1_CACHE_ALIGN(XP_NASID_MASK_BYTES),
980 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
981
982 return xpc_map_bte_errors(bte_res);
983}
984
diff --git a/arch/ia64/sn/kernel/xpnet.c b/arch/ia64/sn/kernel/xpnet.c
new file mode 100644
index 000000000000..78c13d676fa6
--- /dev/null
+++ b/arch/ia64/sn/kernel/xpnet.c
@@ -0,0 +1,715 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
7 */
8
9
10/*
11 * Cross Partition Network Interface (XPNET) support
12 *
13 * XPNET provides a virtual network layered on top of the Cross
14 * Partition communication layer.
15 *
16 * XPNET provides direct point-to-point and broadcast-like support
17 * for an ethernet-like device. The ethernet broadcast medium is
18 * replaced with a point-to-point message structure which passes
19 * pointers to a DMA-capable block that a remote partition should
20 * retrieve and pass to the upper level networking layer.
21 *
22 */
23
24
25#include <linux/config.h>
26#include <linux/module.h>
27#include <linux/kernel.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/ioport.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/delay.h>
34#include <linux/ethtool.h>
35#include <linux/mii.h>
36#include <linux/smp.h>
37#include <linux/string.h>
38#include <asm/sn/bte.h>
39#include <asm/sn/io.h>
40#include <asm/sn/sn_sal.h>
41#include <asm/types.h>
42#include <asm/atomic.h>
43#include <asm/sn/xp.h>
44
45
46/*
47 * The message payload transferred by XPC.
48 *
49 * buf_pa is the physical address where the DMA should pull from.
50 *
51 * NOTE: for performance reasons, buf_pa should _ALWAYS_ begin on a
52 * cacheline boundary. To accomplish this, we record the number of
53 * bytes from the beginning of the first cacheline to the first useful
54 * byte of the skb (leadin_ignore) and the number of bytes from the
55 * last useful byte of the skb to the end of the last cacheline
56 * (tailout_ignore).
57 *
58 * size is the number of bytes to transfer which includes the skb->len
59 * (useful bytes of the senders skb) plus the leadin and tailout
60 */
61struct xpnet_message {
62 u16 version; /* Version for this message */
63 u16 embedded_bytes; /* #of bytes embedded in XPC message */
64 u32 magic; /* Special number indicating this is xpnet */
65 u64 buf_pa; /* phys address of buffer to retrieve */
66 u32 size; /* #of bytes in buffer */
67 u8 leadin_ignore; /* #of bytes to ignore at the beginning */
68 u8 tailout_ignore; /* #of bytes to ignore at the end */
69 unsigned char data; /* body of small packets */
70};
71
72/*
73 * Determine the size of our message, the cacheline aligned size,
74 * and then the number of message will request from XPC.
75 *
76 * XPC expects each message to exist in an individual cacheline.
77 */
78#define XPNET_MSG_SIZE (L1_CACHE_BYTES - XPC_MSG_PAYLOAD_OFFSET)
79#define XPNET_MSG_DATA_MAX \
80 (XPNET_MSG_SIZE - (u64)(&((struct xpnet_message *)0)->data))
81#define XPNET_MSG_ALIGNED_SIZE (L1_CACHE_ALIGN(XPNET_MSG_SIZE))
82#define XPNET_MSG_NENTRIES (PAGE_SIZE / XPNET_MSG_ALIGNED_SIZE)
83
84
85#define XPNET_MAX_KTHREADS (XPNET_MSG_NENTRIES + 1)
86#define XPNET_MAX_IDLE_KTHREADS (XPNET_MSG_NENTRIES + 1)
87
88/*
89 * Version number of XPNET implementation. XPNET can always talk to versions
90 * with same major #, and never talk to versions with a different version.
91 */
92#define _XPNET_VERSION(_major, _minor) (((_major) << 4) | (_minor))
93#define XPNET_VERSION_MAJOR(_v) ((_v) >> 4)
94#define XPNET_VERSION_MINOR(_v) ((_v) & 0xf)
95
96#define XPNET_VERSION _XPNET_VERSION(1,0) /* version 1.0 */
97#define XPNET_VERSION_EMBED _XPNET_VERSION(1,1) /* version 1.1 */
98#define XPNET_MAGIC 0x88786984 /* "XNET" */
99
100#define XPNET_VALID_MSG(_m) \
101 ((XPNET_VERSION_MAJOR(_m->version) == XPNET_VERSION_MAJOR(XPNET_VERSION)) \
102 && (msg->magic == XPNET_MAGIC))
103
104#define XPNET_DEVICE_NAME "xp0"
105
106
107/*
108 * When messages are queued with xpc_send_notify, a kmalloc'd buffer
109 * of the following type is passed as a notification cookie. When the
110 * notification function is called, we use the cookie to decide
111 * whether all outstanding message sends have completed. The skb can
112 * then be released.
113 */
114struct xpnet_pending_msg {
115 struct list_head free_list;
116 struct sk_buff *skb;
117 atomic_t use_count;
118};
119
120/* driver specific structure pointed to by the device structure */
121struct xpnet_dev_private {
122 struct net_device_stats stats;
123};
124
125struct net_device *xpnet_device;
126
127/*
128 * When we are notified of other partitions activating, we add them to
129 * our bitmask of partitions to which we broadcast.
130 */
131static u64 xpnet_broadcast_partitions;
132/* protect above */
133static spinlock_t xpnet_broadcast_lock = SPIN_LOCK_UNLOCKED;
134
135/*
136 * Since the Block Transfer Engine (BTE) is being used for the transfer
137 * and it relies upon cache-line size transfers, we need to reserve at
138 * least one cache-line for head and tail alignment. The BTE is
139 * limited to 8MB transfers.
140 *
141 * Testing has shown that changing MTU to greater than 64KB has no effect
142 * on TCP as the two sides negotiate a Max Segment Size that is limited
143 * to 64K. Other protocols May use packets greater than this, but for
144 * now, the default is 64KB.
145 */
146#define XPNET_MAX_MTU (0x800000UL - L1_CACHE_BYTES)
147/* 32KB has been determined to be the ideal */
148#define XPNET_DEF_MTU (0x8000UL)
149
150
151/*
152 * The partition id is encapsulated in the MAC address. The following
153 * define locates the octet the partid is in.
154 */
155#define XPNET_PARTID_OCTET 1
156#define XPNET_LICENSE_OCTET 2
157
158
159/*
160 * Define the XPNET debug device structure that is to be used with dev_dbg(),
161 * dev_err(), dev_warn(), and dev_info().
162 */
163struct device_driver xpnet_dbg_name = {
164 .name = "xpnet"
165};
166
167struct device xpnet_dbg_subname = {
168 .bus_id = {0}, /* set to "" */
169 .driver = &xpnet_dbg_name
170};
171
172struct device *xpnet = &xpnet_dbg_subname;
173
174/*
175 * Packet was recevied by XPC and forwarded to us.
176 */
177static void
178xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg)
179{
180 struct sk_buff *skb;
181 bte_result_t bret;
182 struct xpnet_dev_private *priv =
183 (struct xpnet_dev_private *) xpnet_device->priv;
184
185
186 if (!XPNET_VALID_MSG(msg)) {
187 /*
188 * Packet with a different XPC version. Ignore.
189 */
190 xpc_received(partid, channel, (void *) msg);
191
192 priv->stats.rx_errors++;
193
194 return;
195 }
196 dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size,
197 msg->leadin_ignore, msg->tailout_ignore);
198
199
200 /* reserve an extra cache line */
201 skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES);
202 if (!skb) {
203 dev_err(xpnet, "failed on dev_alloc_skb(%d)\n",
204 msg->size + L1_CACHE_BYTES);
205
206 xpc_received(partid, channel, (void *) msg);
207
208 priv->stats.rx_errors++;
209
210 return;
211 }
212
213 /*
214 * The allocated skb has some reserved space.
215 * In order to use bte_copy, we need to get the
216 * skb->data pointer moved forward.
217 */
218 skb_reserve(skb, (L1_CACHE_BYTES - ((u64)skb->data &
219 (L1_CACHE_BYTES - 1)) +
220 msg->leadin_ignore));
221
222 /*
223 * Update the tail pointer to indicate data actually
224 * transferred.
225 */
226 skb_put(skb, (msg->size - msg->leadin_ignore - msg->tailout_ignore));
227
228 /*
229 * Move the data over from the the other side.
230 */
231 if ((XPNET_VERSION_MINOR(msg->version) == 1) &&
232 (msg->embedded_bytes != 0)) {
233 dev_dbg(xpnet, "copying embedded message. memcpy(0x%p, 0x%p, "
234 "%lu)\n", skb->data, &msg->data,
235 (size_t) msg->embedded_bytes);
236
237 memcpy(skb->data, &msg->data, (size_t) msg->embedded_bytes);
238 } else {
239 dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t"
240 "bte_copy(0x%p, 0x%p, %hu)\n", (void *)msg->buf_pa,
241 (void *)__pa((u64)skb->data & ~(L1_CACHE_BYTES - 1)),
242 msg->size);
243
244 bret = bte_copy(msg->buf_pa,
245 __pa((u64)skb->data & ~(L1_CACHE_BYTES - 1)),
246 msg->size, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
247
248 if (bret != BTE_SUCCESS) {
249 // >>> Need better way of cleaning skb. Currently skb
250 // >>> appears in_use and we can't just call
251 // >>> dev_kfree_skb.
252 dev_err(xpnet, "bte_copy(0x%p, 0x%p, 0x%hx) returned "
253 "error=0x%x\n", (void *)msg->buf_pa,
254 (void *)__pa((u64)skb->data &
255 ~(L1_CACHE_BYTES - 1)),
256 msg->size, bret);
257
258 xpc_received(partid, channel, (void *) msg);
259
260 priv->stats.rx_errors++;
261
262 return;
263 }
264 }
265
266 dev_dbg(xpnet, "<skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
267 "skb->end=0x%p skb->len=%d\n", (void *) skb->head,
268 (void *) skb->data, (void *) skb->tail, (void *) skb->end,
269 skb->len);
270
271 skb->dev = xpnet_device;
272 skb->protocol = eth_type_trans(skb, xpnet_device);
273 skb->ip_summed = CHECKSUM_UNNECESSARY;
274
275 dev_dbg(xpnet, "passing skb to network layer; \n\tskb->head=0x%p "
276 "skb->data=0x%p skb->tail=0x%p skb->end=0x%p skb->len=%d\n",
277 (void *) skb->head, (void *) skb->data, (void *) skb->tail,
278 (void *) skb->end, skb->len);
279
280
281 xpnet_device->last_rx = jiffies;
282 priv->stats.rx_packets++;
283 priv->stats.rx_bytes += skb->len + ETH_HLEN;
284
285 netif_rx_ni(skb);
286 xpc_received(partid, channel, (void *) msg);
287}
288
289
290/*
291 * This is the handler which XPC calls during any sort of change in
292 * state or message reception on a connection.
293 */
294static void
295xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel,
296 void *data, void *key)
297{
298 long bp;
299
300
301 DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
302 DBUG_ON(channel != XPC_NET_CHANNEL);
303
304 switch(reason) {
305 case xpcMsgReceived: /* message received */
306 DBUG_ON(data == NULL);
307
308 xpnet_receive(partid, channel, (struct xpnet_message *) data);
309 break;
310
311 case xpcConnected: /* connection completed to a partition */
312 spin_lock_bh(&xpnet_broadcast_lock);
313 xpnet_broadcast_partitions |= 1UL << (partid -1 );
314 bp = xpnet_broadcast_partitions;
315 spin_unlock_bh(&xpnet_broadcast_lock);
316
317 netif_carrier_on(xpnet_device);
318
319 dev_dbg(xpnet, "%s connection created to partition %d; "
320 "xpnet_broadcast_partitions=0x%lx\n",
321 xpnet_device->name, partid, bp);
322 break;
323
324 default:
325 spin_lock_bh(&xpnet_broadcast_lock);
326 xpnet_broadcast_partitions &= ~(1UL << (partid -1 ));
327 bp = xpnet_broadcast_partitions;
328 spin_unlock_bh(&xpnet_broadcast_lock);
329
330 if (bp == 0) {
331 netif_carrier_off(xpnet_device);
332 }
333
334 dev_dbg(xpnet, "%s disconnected from partition %d; "
335 "xpnet_broadcast_partitions=0x%lx\n",
336 xpnet_device->name, partid, bp);
337 break;
338
339 }
340}
341
342
343static int
344xpnet_dev_open(struct net_device *dev)
345{
346 enum xpc_retval ret;
347
348
349 dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %d, "
350 "%d)\n", XPC_NET_CHANNEL, xpnet_connection_activity,
351 XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, XPNET_MAX_KTHREADS,
352 XPNET_MAX_IDLE_KTHREADS);
353
354 ret = xpc_connect(XPC_NET_CHANNEL, xpnet_connection_activity, NULL,
355 XPNET_MSG_SIZE, XPNET_MSG_NENTRIES,
356 XPNET_MAX_KTHREADS, XPNET_MAX_IDLE_KTHREADS);
357 if (ret != xpcSuccess) {
358 dev_err(xpnet, "ifconfig up of %s failed on XPC connect, "
359 "ret=%d\n", dev->name, ret);
360
361 return -ENOMEM;
362 }
363
364 dev_dbg(xpnet, "ifconfig up of %s; XPC connected\n", dev->name);
365
366 return 0;
367}
368
369
370static int
371xpnet_dev_stop(struct net_device *dev)
372{
373 xpc_disconnect(XPC_NET_CHANNEL);
374
375 dev_dbg(xpnet, "ifconfig down of %s; XPC disconnected\n", dev->name);
376
377 return 0;
378}
379
380
381static int
382xpnet_dev_change_mtu(struct net_device *dev, int new_mtu)
383{
384 /* 68 comes from min TCP+IP+MAC header */
385 if ((new_mtu < 68) || (new_mtu > XPNET_MAX_MTU)) {
386 dev_err(xpnet, "ifconfig %s mtu %d failed; value must be "
387 "between 68 and %ld\n", dev->name, new_mtu,
388 XPNET_MAX_MTU);
389 return -EINVAL;
390 }
391
392 dev->mtu = new_mtu;
393 dev_dbg(xpnet, "ifconfig %s mtu set to %d\n", dev->name, new_mtu);
394 return 0;
395}
396
397
398/*
399 * Required for the net_device structure.
400 */
401static int
402xpnet_dev_set_config(struct net_device *dev, struct ifmap *new_map)
403{
404 return 0;
405}
406
407
408/*
409 * Return statistics to the caller.
410 */
411static struct net_device_stats *
412xpnet_dev_get_stats(struct net_device *dev)
413{
414 struct xpnet_dev_private *priv;
415
416
417 priv = (struct xpnet_dev_private *) dev->priv;
418
419 return &priv->stats;
420}
421
422
423/*
424 * Notification that the other end has received the message and
425 * DMA'd the skb information. At this point, they are done with
426 * our side. When all recipients are done processing, we
427 * release the skb and then release our pending message structure.
428 */
429static void
430xpnet_send_completed(enum xpc_retval reason, partid_t partid, int channel,
431 void *__qm)
432{
433 struct xpnet_pending_msg *queued_msg =
434 (struct xpnet_pending_msg *) __qm;
435
436
437 DBUG_ON(queued_msg == NULL);
438
439 dev_dbg(xpnet, "message to %d notified with reason %d\n",
440 partid, reason);
441
442 if (atomic_dec_return(&queued_msg->use_count) == 0) {
443 dev_dbg(xpnet, "all acks for skb->head=-x%p\n",
444 (void *) queued_msg->skb->head);
445
446 dev_kfree_skb_any(queued_msg->skb);
447 kfree(queued_msg);
448 }
449}
450
451
452/*
453 * Network layer has formatted a packet (skb) and is ready to place it
454 * "on the wire". Prepare and send an xpnet_message to all partitions
455 * which have connected with us and are targets of this packet.
456 *
457 * MAC-NOTE: For the XPNET driver, the MAC address contains the
458 * destination partition_id. If the destination partition id word
459 * is 0xff, this packet is to broadcast to all partitions.
460 */
461static int
462xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
463{
464 struct xpnet_pending_msg *queued_msg;
465 enum xpc_retval ret;
466 struct xpnet_message *msg;
467 u64 start_addr, end_addr;
468 long dp;
469 u8 second_mac_octet;
470 partid_t dest_partid;
471 struct xpnet_dev_private *priv;
472 u16 embedded_bytes;
473
474
475 priv = (struct xpnet_dev_private *) dev->priv;
476
477
478 dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p "
479 "skb->end=0x%p skb->len=%d\n", (void *) skb->head,
480 (void *) skb->data, (void *) skb->tail, (void *) skb->end,
481 skb->len);
482
483
484 /*
485 * The xpnet_pending_msg tracks how many outstanding
486 * xpc_send_notifies are relying on this skb. When none
487 * remain, release the skb.
488 */
489 queued_msg = kmalloc(sizeof(struct xpnet_pending_msg), GFP_ATOMIC);
490 if (queued_msg == NULL) {
491 dev_warn(xpnet, "failed to kmalloc %ld bytes; dropping "
492 "packet\n", sizeof(struct xpnet_pending_msg));
493
494 priv->stats.tx_errors++;
495
496 return -ENOMEM;
497 }
498
499
500 /* get the beginning of the first cacheline and end of last */
501 start_addr = ((u64) skb->data & ~(L1_CACHE_BYTES - 1));
502 end_addr = L1_CACHE_ALIGN((u64) skb->tail);
503
504 /* calculate how many bytes to embed in the XPC message */
505 embedded_bytes = 0;
506 if (unlikely(skb->len <= XPNET_MSG_DATA_MAX)) {
507 /* skb->data does fit so embed */
508 embedded_bytes = skb->len;
509 }
510
511
512 /*
513 * Since the send occurs asynchronously, we set the count to one
514 * and begin sending. Any sends that happen to complete before
515 * we are done sending will not free the skb. We will be left
516 * with that task during exit. This also handles the case of
517 * a packet destined for a partition which is no longer up.
518 */
519 atomic_set(&queued_msg->use_count, 1);
520 queued_msg->skb = skb;
521
522
523 second_mac_octet = skb->data[XPNET_PARTID_OCTET];
524 if (second_mac_octet == 0xff) {
525 /* we are being asked to broadcast to all partitions */
526 dp = xpnet_broadcast_partitions;
527 } else if (second_mac_octet != 0) {
528 dp = xpnet_broadcast_partitions &
529 (1UL << (second_mac_octet - 1));
530 } else {
531 /* 0 is an invalid partid. Ignore */
532 dp = 0;
533 }
534 dev_dbg(xpnet, "destination Partitions mask (dp) = 0x%lx\n", dp);
535
536 /*
537 * If we wanted to allow promiscous mode to work like an
538 * unswitched network, this would be a good point to OR in a
539 * mask of partitions which should be receiving all packets.
540 */
541
542 /*
543 * Main send loop.
544 */
545 for (dest_partid = 1; dp && dest_partid < XP_MAX_PARTITIONS;
546 dest_partid++) {
547
548
549 if (!(dp & (1UL << (dest_partid - 1)))) {
550 /* not destined for this partition */
551 continue;
552 }
553
554 /* remove this partition from the destinations mask */
555 dp &= ~(1UL << (dest_partid - 1));
556
557
558 /* found a partition to send to */
559
560 ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL,
561 XPC_NOWAIT, (void **)&msg);
562 if (unlikely(ret != xpcSuccess)) {
563 continue;
564 }
565
566 msg->embedded_bytes = embedded_bytes;
567 if (unlikely(embedded_bytes != 0)) {
568 msg->version = XPNET_VERSION_EMBED;
569 dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n",
570 &msg->data, skb->data, (size_t) embedded_bytes);
571 memcpy(&msg->data, skb->data, (size_t) embedded_bytes);
572 } else {
573 msg->version = XPNET_VERSION;
574 }
575 msg->magic = XPNET_MAGIC;
576 msg->size = end_addr - start_addr;
577 msg->leadin_ignore = (u64) skb->data - start_addr;
578 msg->tailout_ignore = end_addr - (u64) skb->tail;
579 msg->buf_pa = __pa(start_addr);
580
581 dev_dbg(xpnet, "sending XPC message to %d:%d\nmsg->buf_pa="
582 "0x%lx, msg->size=%u, msg->leadin_ignore=%u, "
583 "msg->tailout_ignore=%u\n", dest_partid,
584 XPC_NET_CHANNEL, msg->buf_pa, msg->size,
585 msg->leadin_ignore, msg->tailout_ignore);
586
587
588 atomic_inc(&queued_msg->use_count);
589
590 ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, msg,
591 xpnet_send_completed, queued_msg);
592 if (unlikely(ret != xpcSuccess)) {
593 atomic_dec(&queued_msg->use_count);
594 continue;
595 }
596
597 }
598
599 if (atomic_dec_return(&queued_msg->use_count) == 0) {
600 dev_dbg(xpnet, "no partitions to receive packet destined for "
601 "%d\n", dest_partid);
602
603
604 dev_kfree_skb(skb);
605 kfree(queued_msg);
606 }
607
608 priv->stats.tx_packets++;
609 priv->stats.tx_bytes += skb->len;
610
611 return 0;
612}
613
614
615/*
616 * Deal with transmit timeouts coming from the network layer.
617 */
618static void
619xpnet_dev_tx_timeout (struct net_device *dev)
620{
621 struct xpnet_dev_private *priv;
622
623
624 priv = (struct xpnet_dev_private *) dev->priv;
625
626 priv->stats.tx_errors++;
627 return;
628}
629
630
631static int __init
632xpnet_init(void)
633{
634 int i;
635 u32 license_num;
636 int result = -ENOMEM;
637
638
639 dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME);
640
641 /*
642 * use ether_setup() to init the majority of our device
643 * structure and then override the necessary pieces.
644 */
645 xpnet_device = alloc_netdev(sizeof(struct xpnet_dev_private),
646 XPNET_DEVICE_NAME, ether_setup);
647 if (xpnet_device == NULL) {
648 return -ENOMEM;
649 }
650
651 netif_carrier_off(xpnet_device);
652
653 xpnet_device->mtu = XPNET_DEF_MTU;
654 xpnet_device->change_mtu = xpnet_dev_change_mtu;
655 xpnet_device->open = xpnet_dev_open;
656 xpnet_device->get_stats = xpnet_dev_get_stats;
657 xpnet_device->stop = xpnet_dev_stop;
658 xpnet_device->hard_start_xmit = xpnet_dev_hard_start_xmit;
659 xpnet_device->tx_timeout = xpnet_dev_tx_timeout;
660 xpnet_device->set_config = xpnet_dev_set_config;
661
662 /*
663 * Multicast assumes the LSB of the first octet is set for multicast
664 * MAC addresses. We chose the first octet of the MAC to be unlikely
665 * to collide with any vendor's officially issued MAC.
666 */
667 xpnet_device->dev_addr[0] = 0xfe;
668 xpnet_device->dev_addr[XPNET_PARTID_OCTET] = sn_partition_id;
669 license_num = sn_partition_serial_number_val();
670 for (i = 3; i >= 0; i--) {
671 xpnet_device->dev_addr[XPNET_LICENSE_OCTET + i] =
672 license_num & 0xff;
673 license_num = license_num >> 8;
674 }
675
676 /*
677 * ether_setup() sets this to a multicast device. We are
678 * really not supporting multicast at this time.
679 */
680 xpnet_device->flags &= ~IFF_MULTICAST;
681
682 /*
683 * No need to checksum as it is a DMA transfer. The BTE will
684 * report an error if the data is not retrievable and the
685 * packet will be dropped.
686 */
687 xpnet_device->features = NETIF_F_NO_CSUM;
688
689 result = register_netdev(xpnet_device);
690 if (result != 0) {
691 free_netdev(xpnet_device);
692 }
693
694 return result;
695}
696module_init(xpnet_init);
697
698
699static void __exit
700xpnet_exit(void)
701{
702 dev_info(xpnet, "unregistering network device %s\n",
703 xpnet_device[0].name);
704
705 unregister_netdev(xpnet_device);
706
707 free_netdev(xpnet_device);
708}
709module_exit(xpnet_exit);
710
711
712MODULE_AUTHOR("Silicon Graphics, Inc.");
713MODULE_DESCRIPTION("Cross Partition Network adapter (XPNET)");
714MODULE_LICENSE("GPL");
715
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
index c90685985d81..64af2b2c1787 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
@@ -301,7 +301,7 @@ void sn_dma_flush(uint64_t addr)
301 spin_lock_irqsave(&((struct sn_flush_device_list *)p)-> 301 spin_lock_irqsave(&((struct sn_flush_device_list *)p)->
302 sfdl_flush_lock, flags); 302 sfdl_flush_lock, flags);
303 303
304 p->sfdl_flush_value = 0; 304 *p->sfdl_flush_addr = 0;
305 305
306 /* force an interrupt. */ 306 /* force an interrupt. */
307 *(volatile uint32_t *)(p->sfdl_force_int_addr) = 1; 307 *(volatile uint32_t *)(p->sfdl_force_int_addr) = 1;
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
index 54a0dd447e76..8dae9eb45456 100644
--- a/arch/ia64/sn/pci/tioca_provider.c
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -431,7 +431,7 @@ tioca_dma_mapped(struct pci_dev *pdev, uint64_t paddr, size_t req_size)
431 ca_dmamap->cad_dma_addr = bus_addr; 431 ca_dmamap->cad_dma_addr = bus_addr;
432 ca_dmamap->cad_gart_size = entries; 432 ca_dmamap->cad_gart_size = entries;
433 ca_dmamap->cad_gart_entry = entry; 433 ca_dmamap->cad_gart_entry = entry;
434 list_add(&ca_dmamap->cad_list, &tioca_kern->ca_list); 434 list_add(&ca_dmamap->cad_list, &tioca_kern->ca_dmamaps);
435 435
436 if (xio_addr % ps) { 436 if (xio_addr % ps) {
437 tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr); 437 tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 2d5a19f6378d..5ed6515ae01f 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -408,7 +408,7 @@ config SGI_TIOCX
408 408
409config SGI_MBCS 409config SGI_MBCS
410 tristate "SGI FPGA Core Services driver support" 410 tristate "SGI FPGA Core Services driver support"
411 depends on (IA64_SGI_SN2 || IA64_GENERIC) 411 depends on SGI_TIOCX
412 help 412 help
413 If you have an SGI Altix with an attached SABrick 413 If you have an SGI Altix with an attached SABrick
414 say Y or M here, otherwise say N. 414 say Y or M here, otherwise say N.
diff --git a/include/asm-ia64/sn/addrs.h b/include/asm-ia64/sn/addrs.h
index 960d626ee589..1bfdfb4d7b01 100644
--- a/include/asm-ia64/sn/addrs.h
+++ b/include/asm-ia64/sn/addrs.h
@@ -136,6 +136,7 @@
136 */ 136 */
137#define CAC_BASE (CACHED | AS_CAC_SPACE) 137#define CAC_BASE (CACHED | AS_CAC_SPACE)
138#define AMO_BASE (UNCACHED | AS_AMO_SPACE) 138#define AMO_BASE (UNCACHED | AS_AMO_SPACE)
139#define AMO_PHYS_BASE (UNCACHED_PHYS | AS_AMO_SPACE)
139#define GET_BASE (CACHED | AS_GET_SPACE) 140#define GET_BASE (CACHED | AS_GET_SPACE)
140 141
141/* 142/*
@@ -161,6 +162,13 @@
161 162
162 163
163/* 164/*
165 * Macros to test for address type.
166 */
167#define IS_AMO_ADDRESS(x) (((u64)(x) & (REGION_BITS | AS_MASK)) == AMO_BASE)
168#define IS_AMO_PHYS_ADDRESS(x) (((u64)(x) & (REGION_BITS | AS_MASK)) == AMO_PHYS_BASE)
169
170
171/*
164 * The following definitions pertain to the IO special address 172 * The following definitions pertain to the IO special address
165 * space. They define the location of the big and little windows 173 * space. They define the location of the big and little windows
166 * of any given node. 174 * of any given node.
diff --git a/include/asm-ia64/sn/arch.h b/include/asm-ia64/sn/arch.h
index 7c349f07916a..635fdce854a8 100644
--- a/include/asm-ia64/sn/arch.h
+++ b/include/asm-ia64/sn/arch.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * SGI specific setup. 6 * SGI specific setup.
7 * 7 *
8 * Copyright (C) 1995-1997,1999,2001-2004 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 1995-1997,1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
9 * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org) 9 * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org)
10 */ 10 */
11#ifndef _ASM_IA64_SN_ARCH_H 11#ifndef _ASM_IA64_SN_ARCH_H
@@ -47,6 +47,21 @@ DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
47#define MAX_COMPACT_NODES 2048 47#define MAX_COMPACT_NODES 2048
48#define CPUS_PER_NODE 4 48#define CPUS_PER_NODE 4
49 49
50
51/*
52 * Compact node ID to nasid mappings kept in the per-cpu data areas of each
53 * cpu.
54 */
55DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_NUMNODES]);
56#define sn_cnodeid_to_nasid (&__get_cpu_var(__sn_cnodeid_to_nasid[0]))
57
58
59
60extern u8 sn_partition_id;
61extern u8 sn_system_size;
62extern u8 sn_sharing_domain_size;
63extern u8 sn_region_size;
64
50extern void sn_flush_all_caches(long addr, long bytes); 65extern void sn_flush_all_caches(long addr, long bytes);
51 66
52#endif /* _ASM_IA64_SN_ARCH_H */ 67#endif /* _ASM_IA64_SN_ARCH_H */
diff --git a/include/asm-ia64/sn/fetchop.h b/include/asm-ia64/sn/fetchop.h
deleted file mode 100644
index 5f4ad8f4b5d2..000000000000
--- a/include/asm-ia64/sn/fetchop.h
+++ /dev/null
@@ -1,85 +0,0 @@
1/*
2 *
3 * This file is subject to the terms and conditions of the GNU General Public
4 * License. See the file "COPYING" in the main directory of this archive
5 * for more details.
6 *
7 * Copyright (c) 2001-2004 Silicon Graphics, Inc. All rights reserved.
8 */
9
10#ifndef _ASM_IA64_SN_FETCHOP_H
11#define _ASM_IA64_SN_FETCHOP_H
12
13#include <linux/config.h>
14
15#define FETCHOP_BASENAME "sgi_fetchop"
16#define FETCHOP_FULLNAME "/dev/sgi_fetchop"
17
18
19
20#define FETCHOP_VAR_SIZE 64 /* 64 byte per fetchop variable */
21
22#define FETCHOP_LOAD 0
23#define FETCHOP_INCREMENT 8
24#define FETCHOP_DECREMENT 16
25#define FETCHOP_CLEAR 24
26
27#define FETCHOP_STORE 0
28#define FETCHOP_AND 24
29#define FETCHOP_OR 32
30
31#define FETCHOP_CLEAR_CACHE 56
32
33#define FETCHOP_LOAD_OP(addr, op) ( \
34 *(volatile long *)((char*) (addr) + (op)))
35
36#define FETCHOP_STORE_OP(addr, op, x) ( \
37 *(volatile long *)((char*) (addr) + (op)) = (long) (x))
38
39#ifdef __KERNEL__
40
41/*
42 * Convert a region 6 (kaddr) address to the address of the fetchop variable
43 */
44#define FETCHOP_KADDR_TO_MSPEC_ADDR(kaddr) TO_MSPEC(kaddr)
45
46
47/*
48 * Each Atomic Memory Operation (AMO formerly known as fetchop)
49 * variable is 64 bytes long. The first 8 bytes are used. The
50 * remaining 56 bytes are unaddressable due to the operation taking
51 * that portion of the address.
52 *
53 * NOTE: The AMO_t _MUST_ be placed in either the first or second half
54 * of the cache line. The cache line _MUST NOT_ be used for anything
55 * other than additional AMO_t entries. This is because there are two
56 * addresses which reference the same physical cache line. One will
57 * be a cached entry with the memory type bits all set. This address
58 * may be loaded into processor cache. The AMO_t will be referenced
59 * uncached via the memory special memory type. If any portion of the
60 * cached cache-line is modified, when that line is flushed, it will
61 * overwrite the uncached value in physical memory and lead to
62 * inconsistency.
63 */
64typedef struct {
65 u64 variable;
66 u64 unused[7];
67} AMO_t;
68
69
70/*
71 * The following APIs are externalized to the kernel to allocate/free pages of
72 * fetchop variables.
73 * fetchop_kalloc_page - Allocate/initialize 1 fetchop page on the
74 * specified cnode.
75 * fetchop_kfree_page - Free a previously allocated fetchop page
76 */
77
78unsigned long fetchop_kalloc_page(int nid);
79void fetchop_kfree_page(unsigned long maddr);
80
81
82#endif /* __KERNEL__ */
83
84#endif /* _ASM_IA64_SN_FETCHOP_H */
85
diff --git a/include/asm-ia64/sn/l1.h b/include/asm-ia64/sn/l1.h
index d5dbd55e44b5..08050d37b662 100644
--- a/include/asm-ia64/sn/l1.h
+++ b/include/asm-ia64/sn/l1.h
@@ -29,8 +29,9 @@
29#define L1_BRICKTYPE_CHI_CG 0x76 /* v */ 29#define L1_BRICKTYPE_CHI_CG 0x76 /* v */
30#define L1_BRICKTYPE_X 0x78 /* x */ 30#define L1_BRICKTYPE_X 0x78 /* x */
31#define L1_BRICKTYPE_X2 0x79 /* y */ 31#define L1_BRICKTYPE_X2 0x79 /* y */
32#define L1_BRICKTYPE_SA 0x5e /* ^ */ /* TIO bringup brick */ 32#define L1_BRICKTYPE_SA 0x5e /* ^ */
33#define L1_BRICKTYPE_PA 0x6a /* j */ 33#define L1_BRICKTYPE_PA 0x6a /* j */
34#define L1_BRICKTYPE_IA 0x6b /* k */ 34#define L1_BRICKTYPE_IA 0x6b /* k */
35#define L1_BRICKTYPE_ATHENA 0x2b /* + */
35 36
36#endif /* _ASM_IA64_SN_L1_H */ 37#endif /* _ASM_IA64_SN_L1_H */
diff --git a/include/asm-ia64/sn/nodepda.h b/include/asm-ia64/sn/nodepda.h
index 13cc1002b294..7138b1eafd6b 100644
--- a/include/asm-ia64/sn/nodepda.h
+++ b/include/asm-ia64/sn/nodepda.h
@@ -13,7 +13,6 @@
13#include <asm/irq.h> 13#include <asm/irq.h>
14#include <asm/sn/arch.h> 14#include <asm/sn/arch.h>
15#include <asm/sn/intr.h> 15#include <asm/sn/intr.h>
16#include <asm/sn/pda.h>
17#include <asm/sn/bte.h> 16#include <asm/sn/bte.h>
18 17
19/* 18/*
@@ -67,20 +66,18 @@ typedef struct nodepda_s nodepda_t;
67 * The next set of definitions provides this. 66 * The next set of definitions provides this.
68 * Routines are expected to use 67 * Routines are expected to use
69 * 68 *
70 * nodepda -> to access node PDA for the node on which code is running 69 * sn_nodepda - to access node PDA for the node on which code is running
71 * subnodepda -> to access subnode PDA for the subnode on which code is running 70 * NODEPDA(cnodeid) - to access node PDA for cnodeid
72 *
73 * NODEPDA(cnode) -> to access node PDA for cnodeid
74 * SUBNODEPDA(cnode,sn) -> to access subnode PDA for cnodeid/subnode
75 */ 71 */
76 72
77#define nodepda pda->p_nodepda /* Ptr to this node's PDA */ 73DECLARE_PER_CPU(struct nodepda_s *, __sn_nodepda);
78#define NODEPDA(cnode) (nodepda->pernode_pdaindr[cnode]) 74#define sn_nodepda (__get_cpu_var(__sn_nodepda))
75#define NODEPDA(cnodeid) (sn_nodepda->pernode_pdaindr[cnodeid])
79 76
80/* 77/*
81 * Check if given a compact node id the corresponding node has all the 78 * Check if given a compact node id the corresponding node has all the
82 * cpus disabled. 79 * cpus disabled.
83 */ 80 */
84#define is_headless_node(cnode) (nr_cpus_node(cnode) == 0) 81#define is_headless_node(cnodeid) (nr_cpus_node(cnodeid) == 0)
85 82
86#endif /* _ASM_IA64_SN_NODEPDA_H */ 83#endif /* _ASM_IA64_SN_NODEPDA_H */
diff --git a/include/asm-ia64/sn/pda.h b/include/asm-ia64/sn/pda.h
index cd19f17bf91a..ea5590c76ca4 100644
--- a/include/asm-ia64/sn/pda.h
+++ b/include/asm-ia64/sn/pda.h
@@ -24,14 +24,6 @@
24 24
25typedef struct pda_s { 25typedef struct pda_s {
26 26
27 /* Having a pointer in the begining of PDA tends to increase
28 * the chance of having this pointer in cache. (Yes something
29 * else gets pushed out). Doing this reduces the number of memory
30 * access to all nodepda variables to be one
31 */
32 struct nodepda_s *p_nodepda; /* Pointer to Per node PDA */
33 struct subnodepda_s *p_subnodepda; /* Pointer to CPU subnode PDA */
34
35 /* 27 /*
36 * Support for SN LEDs 28 * Support for SN LEDs
37 */ 29 */
@@ -49,7 +41,6 @@ typedef struct pda_s {
49 41
50 unsigned long sn_soft_irr[4]; 42 unsigned long sn_soft_irr[4];
51 unsigned long sn_in_service_ivecs[4]; 43 unsigned long sn_in_service_ivecs[4];
52 short cnodeid_to_nasid_table[MAX_NUMNODES];
53 int sn_lb_int_war_ticks; 44 int sn_lb_int_war_ticks;
54 int sn_last_irq; 45 int sn_last_irq;
55 int sn_first_irq; 46 int sn_first_irq;
diff --git a/include/asm-ia64/sn/shub_mmr.h b/include/asm-ia64/sn/shub_mmr.h
index 2f885088e095..323fa0cd8d83 100644
--- a/include/asm-ia64/sn/shub_mmr.h
+++ b/include/asm-ia64/sn/shub_mmr.h
@@ -385,6 +385,17 @@
385#define SH_EVENT_OCCURRED_RTC3_INT_MASK 0x0000000004000000 385#define SH_EVENT_OCCURRED_RTC3_INT_MASK 0x0000000004000000
386 386
387/* ==================================================================== */ 387/* ==================================================================== */
388/* Register "SH_IPI_ACCESS" */
389/* CPU interrupt Access Permission Bits */
390/* ==================================================================== */
391
392#define SH1_IPI_ACCESS 0x0000000110060480
393#define SH2_IPI_ACCESS0 0x0000000010060c00
394#define SH2_IPI_ACCESS1 0x0000000010060c80
395#define SH2_IPI_ACCESS2 0x0000000010060d00
396#define SH2_IPI_ACCESS3 0x0000000010060d80
397
398/* ==================================================================== */
388/* Register "SH_INT_CMPB" */ 399/* Register "SH_INT_CMPB" */
389/* RTC Compare Value for Processor B */ 400/* RTC Compare Value for Processor B */
390/* ==================================================================== */ 401/* ==================================================================== */
@@ -429,6 +440,19 @@
429#define SH_INT_CMPD_REAL_TIME_CMPD_SHFT 0 440#define SH_INT_CMPD_REAL_TIME_CMPD_SHFT 0
430#define SH_INT_CMPD_REAL_TIME_CMPD_MASK 0x007fffffffffffff 441#define SH_INT_CMPD_REAL_TIME_CMPD_MASK 0x007fffffffffffff
431 442
443/* ==================================================================== */
444/* Register "SH_MD_DQLP_MMR_DIR_PRIVEC0" */
445/* privilege vector for acc=0 */
446/* ==================================================================== */
447
448#define SH1_MD_DQLP_MMR_DIR_PRIVEC0 0x0000000100030300
449
450/* ==================================================================== */
451/* Register "SH_MD_DQRP_MMR_DIR_PRIVEC0" */
452/* privilege vector for acc=0 */
453/* ==================================================================== */
454
455#define SH1_MD_DQRP_MMR_DIR_PRIVEC0 0x0000000100050300
432 456
433/* ==================================================================== */ 457/* ==================================================================== */
434/* Some MMRs are functionally identical (or close enough) on both SHUB1 */ 458/* Some MMRs are functionally identical (or close enough) on both SHUB1 */
diff --git a/include/asm-ia64/sn/shubio.h b/include/asm-ia64/sn/shubio.h
index fbd880e6bb96..831b72111fdc 100644
--- a/include/asm-ia64/sn/shubio.h
+++ b/include/asm-ia64/sn/shubio.h
@@ -3,292 +3,287 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8 8
9#ifndef _ASM_IA64_SN_SHUBIO_H 9#ifndef _ASM_IA64_SN_SHUBIO_H
10#define _ASM_IA64_SN_SHUBIO_H 10#define _ASM_IA64_SN_SHUBIO_H
11 11
12#define HUB_WIDGET_ID_MAX 0xf 12#define HUB_WIDGET_ID_MAX 0xf
13#define IIO_NUM_ITTES 7 13#define IIO_NUM_ITTES 7
14#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1) 14#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1)
15 15
16#define IIO_WID 0x00400000 /* Crosstalk Widget Identification */ 16#define IIO_WID 0x00400000 /* Crosstalk Widget Identification */
17 /* This register is also accessible from 17 /* This register is also accessible from
18 * Crosstalk at address 0x0. */ 18 * Crosstalk at address 0x0. */
19#define IIO_WSTAT 0x00400008 /* Crosstalk Widget Status */ 19#define IIO_WSTAT 0x00400008 /* Crosstalk Widget Status */
20#define IIO_WCR 0x00400020 /* Crosstalk Widget Control Register */ 20#define IIO_WCR 0x00400020 /* Crosstalk Widget Control Register */
21#define IIO_ILAPR 0x00400100 /* IO Local Access Protection Register */ 21#define IIO_ILAPR 0x00400100 /* IO Local Access Protection Register */
22#define IIO_ILAPO 0x00400108 /* IO Local Access Protection Override */ 22#define IIO_ILAPO 0x00400108 /* IO Local Access Protection Override */
23#define IIO_IOWA 0x00400110 /* IO Outbound Widget Access */ 23#define IIO_IOWA 0x00400110 /* IO Outbound Widget Access */
24#define IIO_IIWA 0x00400118 /* IO Inbound Widget Access */ 24#define IIO_IIWA 0x00400118 /* IO Inbound Widget Access */
25#define IIO_IIDEM 0x00400120 /* IO Inbound Device Error Mask */ 25#define IIO_IIDEM 0x00400120 /* IO Inbound Device Error Mask */
26#define IIO_ILCSR 0x00400128 /* IO LLP Control and Status Register */ 26#define IIO_ILCSR 0x00400128 /* IO LLP Control and Status Register */
27#define IIO_ILLR 0x00400130 /* IO LLP Log Register */ 27#define IIO_ILLR 0x00400130 /* IO LLP Log Register */
28#define IIO_IIDSR 0x00400138 /* IO Interrupt Destination */ 28#define IIO_IIDSR 0x00400138 /* IO Interrupt Destination */
29 29
30#define IIO_IGFX0 0x00400140 /* IO Graphics Node-Widget Map 0 */ 30#define IIO_IGFX0 0x00400140 /* IO Graphics Node-Widget Map 0 */
31#define IIO_IGFX1 0x00400148 /* IO Graphics Node-Widget Map 1 */ 31#define IIO_IGFX1 0x00400148 /* IO Graphics Node-Widget Map 1 */
32 32
33#define IIO_ISCR0 0x00400150 /* IO Scratch Register 0 */ 33#define IIO_ISCR0 0x00400150 /* IO Scratch Register 0 */
34#define IIO_ISCR1 0x00400158 /* IO Scratch Register 1 */ 34#define IIO_ISCR1 0x00400158 /* IO Scratch Register 1 */
35 35
36#define IIO_ITTE1 0x00400160 /* IO Translation Table Entry 1 */ 36#define IIO_ITTE1 0x00400160 /* IO Translation Table Entry 1 */
37#define IIO_ITTE2 0x00400168 /* IO Translation Table Entry 2 */ 37#define IIO_ITTE2 0x00400168 /* IO Translation Table Entry 2 */
38#define IIO_ITTE3 0x00400170 /* IO Translation Table Entry 3 */ 38#define IIO_ITTE3 0x00400170 /* IO Translation Table Entry 3 */
39#define IIO_ITTE4 0x00400178 /* IO Translation Table Entry 4 */ 39#define IIO_ITTE4 0x00400178 /* IO Translation Table Entry 4 */
40#define IIO_ITTE5 0x00400180 /* IO Translation Table Entry 5 */ 40#define IIO_ITTE5 0x00400180 /* IO Translation Table Entry 5 */
41#define IIO_ITTE6 0x00400188 /* IO Translation Table Entry 6 */ 41#define IIO_ITTE6 0x00400188 /* IO Translation Table Entry 6 */
42#define IIO_ITTE7 0x00400190 /* IO Translation Table Entry 7 */ 42#define IIO_ITTE7 0x00400190 /* IO Translation Table Entry 7 */
43 43
44#define IIO_IPRB0 0x00400198 /* IO PRB Entry 0 */ 44#define IIO_IPRB0 0x00400198 /* IO PRB Entry 0 */
45#define IIO_IPRB8 0x004001A0 /* IO PRB Entry 8 */ 45#define IIO_IPRB8 0x004001A0 /* IO PRB Entry 8 */
46#define IIO_IPRB9 0x004001A8 /* IO PRB Entry 9 */ 46#define IIO_IPRB9 0x004001A8 /* IO PRB Entry 9 */
47#define IIO_IPRBA 0x004001B0 /* IO PRB Entry A */ 47#define IIO_IPRBA 0x004001B0 /* IO PRB Entry A */
48#define IIO_IPRBB 0x004001B8 /* IO PRB Entry B */ 48#define IIO_IPRBB 0x004001B8 /* IO PRB Entry B */
49#define IIO_IPRBC 0x004001C0 /* IO PRB Entry C */ 49#define IIO_IPRBC 0x004001C0 /* IO PRB Entry C */
50#define IIO_IPRBD 0x004001C8 /* IO PRB Entry D */ 50#define IIO_IPRBD 0x004001C8 /* IO PRB Entry D */
51#define IIO_IPRBE 0x004001D0 /* IO PRB Entry E */ 51#define IIO_IPRBE 0x004001D0 /* IO PRB Entry E */
52#define IIO_IPRBF 0x004001D8 /* IO PRB Entry F */ 52#define IIO_IPRBF 0x004001D8 /* IO PRB Entry F */
53 53
54#define IIO_IXCC 0x004001E0 /* IO Crosstalk Credit Count Timeout */ 54#define IIO_IXCC 0x004001E0 /* IO Crosstalk Credit Count Timeout */
55#define IIO_IMEM 0x004001E8 /* IO Miscellaneous Error Mask */ 55#define IIO_IMEM 0x004001E8 /* IO Miscellaneous Error Mask */
56#define IIO_IXTT 0x004001F0 /* IO Crosstalk Timeout Threshold */ 56#define IIO_IXTT 0x004001F0 /* IO Crosstalk Timeout Threshold */
57#define IIO_IECLR 0x004001F8 /* IO Error Clear Register */ 57#define IIO_IECLR 0x004001F8 /* IO Error Clear Register */
58#define IIO_IBCR 0x00400200 /* IO BTE Control Register */ 58#define IIO_IBCR 0x00400200 /* IO BTE Control Register */
59 59
60#define IIO_IXSM 0x00400208 /* IO Crosstalk Spurious Message */ 60#define IIO_IXSM 0x00400208 /* IO Crosstalk Spurious Message */
61#define IIO_IXSS 0x00400210 /* IO Crosstalk Spurious Sideband */ 61#define IIO_IXSS 0x00400210 /* IO Crosstalk Spurious Sideband */
62 62
63#define IIO_ILCT 0x00400218 /* IO LLP Channel Test */ 63#define IIO_ILCT 0x00400218 /* IO LLP Channel Test */
64 64
65#define IIO_IIEPH1 0x00400220 /* IO Incoming Error Packet Header, Part 1 */ 65#define IIO_IIEPH1 0x00400220 /* IO Incoming Error Packet Header, Part 1 */
66#define IIO_IIEPH2 0x00400228 /* IO Incoming Error Packet Header, Part 2 */ 66#define IIO_IIEPH2 0x00400228 /* IO Incoming Error Packet Header, Part 2 */
67 67
68 68#define IIO_ISLAPR 0x00400230 /* IO SXB Local Access Protection Regster */
69#define IIO_ISLAPR 0x00400230 /* IO SXB Local Access Protection Regster */ 69#define IIO_ISLAPO 0x00400238 /* IO SXB Local Access Protection Override */
70#define IIO_ISLAPO 0x00400238 /* IO SXB Local Access Protection Override */ 70
71 71#define IIO_IWI 0x00400240 /* IO Wrapper Interrupt Register */
72#define IIO_IWI 0x00400240 /* IO Wrapper Interrupt Register */ 72#define IIO_IWEL 0x00400248 /* IO Wrapper Error Log Register */
73#define IIO_IWEL 0x00400248 /* IO Wrapper Error Log Register */ 73#define IIO_IWC 0x00400250 /* IO Wrapper Control Register */
74#define IIO_IWC 0x00400250 /* IO Wrapper Control Register */ 74#define IIO_IWS 0x00400258 /* IO Wrapper Status Register */
75#define IIO_IWS 0x00400258 /* IO Wrapper Status Register */ 75#define IIO_IWEIM 0x00400260 /* IO Wrapper Error Interrupt Masking Register */
76#define IIO_IWEIM 0x00400260 /* IO Wrapper Error Interrupt Masking Register */ 76
77 77#define IIO_IPCA 0x00400300 /* IO PRB Counter Adjust */
78#define IIO_IPCA 0x00400300 /* IO PRB Counter Adjust */ 78
79 79#define IIO_IPRTE0_A 0x00400308 /* IO PIO Read Address Table Entry 0, Part A */
80#define IIO_IPRTE0_A 0x00400308 /* IO PIO Read Address Table Entry 0, Part A */ 80#define IIO_IPRTE1_A 0x00400310 /* IO PIO Read Address Table Entry 1, Part A */
81#define IIO_IPRTE1_A 0x00400310 /* IO PIO Read Address Table Entry 1, Part A */ 81#define IIO_IPRTE2_A 0x00400318 /* IO PIO Read Address Table Entry 2, Part A */
82#define IIO_IPRTE2_A 0x00400318 /* IO PIO Read Address Table Entry 2, Part A */ 82#define IIO_IPRTE3_A 0x00400320 /* IO PIO Read Address Table Entry 3, Part A */
83#define IIO_IPRTE3_A 0x00400320 /* IO PIO Read Address Table Entry 3, Part A */ 83#define IIO_IPRTE4_A 0x00400328 /* IO PIO Read Address Table Entry 4, Part A */
84#define IIO_IPRTE4_A 0x00400328 /* IO PIO Read Address Table Entry 4, Part A */ 84#define IIO_IPRTE5_A 0x00400330 /* IO PIO Read Address Table Entry 5, Part A */
85#define IIO_IPRTE5_A 0x00400330 /* IO PIO Read Address Table Entry 5, Part A */ 85#define IIO_IPRTE6_A 0x00400338 /* IO PIO Read Address Table Entry 6, Part A */
86#define IIO_IPRTE6_A 0x00400338 /* IO PIO Read Address Table Entry 6, Part A */ 86#define IIO_IPRTE7_A 0x00400340 /* IO PIO Read Address Table Entry 7, Part A */
87#define IIO_IPRTE7_A 0x00400340 /* IO PIO Read Address Table Entry 7, Part A */ 87
88 88#define IIO_IPRTE0_B 0x00400348 /* IO PIO Read Address Table Entry 0, Part B */
89#define IIO_IPRTE0_B 0x00400348 /* IO PIO Read Address Table Entry 0, Part B */ 89#define IIO_IPRTE1_B 0x00400350 /* IO PIO Read Address Table Entry 1, Part B */
90#define IIO_IPRTE1_B 0x00400350 /* IO PIO Read Address Table Entry 1, Part B */ 90#define IIO_IPRTE2_B 0x00400358 /* IO PIO Read Address Table Entry 2, Part B */
91#define IIO_IPRTE2_B 0x00400358 /* IO PIO Read Address Table Entry 2, Part B */ 91#define IIO_IPRTE3_B 0x00400360 /* IO PIO Read Address Table Entry 3, Part B */
92#define IIO_IPRTE3_B 0x00400360 /* IO PIO Read Address Table Entry 3, Part B */ 92#define IIO_IPRTE4_B 0x00400368 /* IO PIO Read Address Table Entry 4, Part B */
93#define IIO_IPRTE4_B 0x00400368 /* IO PIO Read Address Table Entry 4, Part B */ 93#define IIO_IPRTE5_B 0x00400370 /* IO PIO Read Address Table Entry 5, Part B */
94#define IIO_IPRTE5_B 0x00400370 /* IO PIO Read Address Table Entry 5, Part B */ 94#define IIO_IPRTE6_B 0x00400378 /* IO PIO Read Address Table Entry 6, Part B */
95#define IIO_IPRTE6_B 0x00400378 /* IO PIO Read Address Table Entry 6, Part B */ 95#define IIO_IPRTE7_B 0x00400380 /* IO PIO Read Address Table Entry 7, Part B */
96#define IIO_IPRTE7_B 0x00400380 /* IO PIO Read Address Table Entry 7, Part B */ 96
97 97#define IIO_IPDR 0x00400388 /* IO PIO Deallocation Register */
98#define IIO_IPDR 0x00400388 /* IO PIO Deallocation Register */ 98#define IIO_ICDR 0x00400390 /* IO CRB Entry Deallocation Register */
99#define IIO_ICDR 0x00400390 /* IO CRB Entry Deallocation Register */ 99#define IIO_IFDR 0x00400398 /* IO IOQ FIFO Depth Register */
100#define IIO_IFDR 0x00400398 /* IO IOQ FIFO Depth Register */ 100#define IIO_IIAP 0x004003A0 /* IO IIQ Arbitration Parameters */
101#define IIO_IIAP 0x004003A0 /* IO IIQ Arbitration Parameters */ 101#define IIO_ICMR 0x004003A8 /* IO CRB Management Register */
102#define IIO_ICMR 0x004003A8 /* IO CRB Management Register */ 102#define IIO_ICCR 0x004003B0 /* IO CRB Control Register */
103#define IIO_ICCR 0x004003B0 /* IO CRB Control Register */ 103#define IIO_ICTO 0x004003B8 /* IO CRB Timeout */
104#define IIO_ICTO 0x004003B8 /* IO CRB Timeout */ 104#define IIO_ICTP 0x004003C0 /* IO CRB Timeout Prescalar */
105#define IIO_ICTP 0x004003C0 /* IO CRB Timeout Prescalar */ 105
106 106#define IIO_ICRB0_A 0x00400400 /* IO CRB Entry 0_A */
107#define IIO_ICRB0_A 0x00400400 /* IO CRB Entry 0_A */ 107#define IIO_ICRB0_B 0x00400408 /* IO CRB Entry 0_B */
108#define IIO_ICRB0_B 0x00400408 /* IO CRB Entry 0_B */ 108#define IIO_ICRB0_C 0x00400410 /* IO CRB Entry 0_C */
109#define IIO_ICRB0_C 0x00400410 /* IO CRB Entry 0_C */ 109#define IIO_ICRB0_D 0x00400418 /* IO CRB Entry 0_D */
110#define IIO_ICRB0_D 0x00400418 /* IO CRB Entry 0_D */ 110#define IIO_ICRB0_E 0x00400420 /* IO CRB Entry 0_E */
111#define IIO_ICRB0_E 0x00400420 /* IO CRB Entry 0_E */ 111
112 112#define IIO_ICRB1_A 0x00400430 /* IO CRB Entry 1_A */
113#define IIO_ICRB1_A 0x00400430 /* IO CRB Entry 1_A */ 113#define IIO_ICRB1_B 0x00400438 /* IO CRB Entry 1_B */
114#define IIO_ICRB1_B 0x00400438 /* IO CRB Entry 1_B */ 114#define IIO_ICRB1_C 0x00400440 /* IO CRB Entry 1_C */
115#define IIO_ICRB1_C 0x00400440 /* IO CRB Entry 1_C */ 115#define IIO_ICRB1_D 0x00400448 /* IO CRB Entry 1_D */
116#define IIO_ICRB1_D 0x00400448 /* IO CRB Entry 1_D */ 116#define IIO_ICRB1_E 0x00400450 /* IO CRB Entry 1_E */
117#define IIO_ICRB1_E 0x00400450 /* IO CRB Entry 1_E */ 117
118 118#define IIO_ICRB2_A 0x00400460 /* IO CRB Entry 2_A */
119#define IIO_ICRB2_A 0x00400460 /* IO CRB Entry 2_A */ 119#define IIO_ICRB2_B 0x00400468 /* IO CRB Entry 2_B */
120#define IIO_ICRB2_B 0x00400468 /* IO CRB Entry 2_B */ 120#define IIO_ICRB2_C 0x00400470 /* IO CRB Entry 2_C */
121#define IIO_ICRB2_C 0x00400470 /* IO CRB Entry 2_C */ 121#define IIO_ICRB2_D 0x00400478 /* IO CRB Entry 2_D */
122#define IIO_ICRB2_D 0x00400478 /* IO CRB Entry 2_D */ 122#define IIO_ICRB2_E 0x00400480 /* IO CRB Entry 2_E */
123#define IIO_ICRB2_E 0x00400480 /* IO CRB Entry 2_E */ 123
124 124#define IIO_ICRB3_A 0x00400490 /* IO CRB Entry 3_A */
125#define IIO_ICRB3_A 0x00400490 /* IO CRB Entry 3_A */ 125#define IIO_ICRB3_B 0x00400498 /* IO CRB Entry 3_B */
126#define IIO_ICRB3_B 0x00400498 /* IO CRB Entry 3_B */ 126#define IIO_ICRB3_C 0x004004a0 /* IO CRB Entry 3_C */
127#define IIO_ICRB3_C 0x004004a0 /* IO CRB Entry 3_C */ 127#define IIO_ICRB3_D 0x004004a8 /* IO CRB Entry 3_D */
128#define IIO_ICRB3_D 0x004004a8 /* IO CRB Entry 3_D */ 128#define IIO_ICRB3_E 0x004004b0 /* IO CRB Entry 3_E */
129#define IIO_ICRB3_E 0x004004b0 /* IO CRB Entry 3_E */ 129
130 130#define IIO_ICRB4_A 0x004004c0 /* IO CRB Entry 4_A */
131#define IIO_ICRB4_A 0x004004c0 /* IO CRB Entry 4_A */ 131#define IIO_ICRB4_B 0x004004c8 /* IO CRB Entry 4_B */
132#define IIO_ICRB4_B 0x004004c8 /* IO CRB Entry 4_B */ 132#define IIO_ICRB4_C 0x004004d0 /* IO CRB Entry 4_C */
133#define IIO_ICRB4_C 0x004004d0 /* IO CRB Entry 4_C */ 133#define IIO_ICRB4_D 0x004004d8 /* IO CRB Entry 4_D */
134#define IIO_ICRB4_D 0x004004d8 /* IO CRB Entry 4_D */ 134#define IIO_ICRB4_E 0x004004e0 /* IO CRB Entry 4_E */
135#define IIO_ICRB4_E 0x004004e0 /* IO CRB Entry 4_E */ 135
136 136#define IIO_ICRB5_A 0x004004f0 /* IO CRB Entry 5_A */
137#define IIO_ICRB5_A 0x004004f0 /* IO CRB Entry 5_A */ 137#define IIO_ICRB5_B 0x004004f8 /* IO CRB Entry 5_B */
138#define IIO_ICRB5_B 0x004004f8 /* IO CRB Entry 5_B */ 138#define IIO_ICRB5_C 0x00400500 /* IO CRB Entry 5_C */
139#define IIO_ICRB5_C 0x00400500 /* IO CRB Entry 5_C */ 139#define IIO_ICRB5_D 0x00400508 /* IO CRB Entry 5_D */
140#define IIO_ICRB5_D 0x00400508 /* IO CRB Entry 5_D */ 140#define IIO_ICRB5_E 0x00400510 /* IO CRB Entry 5_E */
141#define IIO_ICRB5_E 0x00400510 /* IO CRB Entry 5_E */ 141
142 142#define IIO_ICRB6_A 0x00400520 /* IO CRB Entry 6_A */
143#define IIO_ICRB6_A 0x00400520 /* IO CRB Entry 6_A */ 143#define IIO_ICRB6_B 0x00400528 /* IO CRB Entry 6_B */
144#define IIO_ICRB6_B 0x00400528 /* IO CRB Entry 6_B */ 144#define IIO_ICRB6_C 0x00400530 /* IO CRB Entry 6_C */
145#define IIO_ICRB6_C 0x00400530 /* IO CRB Entry 6_C */ 145#define IIO_ICRB6_D 0x00400538 /* IO CRB Entry 6_D */
146#define IIO_ICRB6_D 0x00400538 /* IO CRB Entry 6_D */ 146#define IIO_ICRB6_E 0x00400540 /* IO CRB Entry 6_E */
147#define IIO_ICRB6_E 0x00400540 /* IO CRB Entry 6_E */ 147
148 148#define IIO_ICRB7_A 0x00400550 /* IO CRB Entry 7_A */
149#define IIO_ICRB7_A 0x00400550 /* IO CRB Entry 7_A */ 149#define IIO_ICRB7_B 0x00400558 /* IO CRB Entry 7_B */
150#define IIO_ICRB7_B 0x00400558 /* IO CRB Entry 7_B */ 150#define IIO_ICRB7_C 0x00400560 /* IO CRB Entry 7_C */
151#define IIO_ICRB7_C 0x00400560 /* IO CRB Entry 7_C */ 151#define IIO_ICRB7_D 0x00400568 /* IO CRB Entry 7_D */
152#define IIO_ICRB7_D 0x00400568 /* IO CRB Entry 7_D */ 152#define IIO_ICRB7_E 0x00400570 /* IO CRB Entry 7_E */
153#define IIO_ICRB7_E 0x00400570 /* IO CRB Entry 7_E */ 153
154 154#define IIO_ICRB8_A 0x00400580 /* IO CRB Entry 8_A */
155#define IIO_ICRB8_A 0x00400580 /* IO CRB Entry 8_A */ 155#define IIO_ICRB8_B 0x00400588 /* IO CRB Entry 8_B */
156#define IIO_ICRB8_B 0x00400588 /* IO CRB Entry 8_B */ 156#define IIO_ICRB8_C 0x00400590 /* IO CRB Entry 8_C */
157#define IIO_ICRB8_C 0x00400590 /* IO CRB Entry 8_C */ 157#define IIO_ICRB8_D 0x00400598 /* IO CRB Entry 8_D */
158#define IIO_ICRB8_D 0x00400598 /* IO CRB Entry 8_D */ 158#define IIO_ICRB8_E 0x004005a0 /* IO CRB Entry 8_E */
159#define IIO_ICRB8_E 0x004005a0 /* IO CRB Entry 8_E */ 159
160 160#define IIO_ICRB9_A 0x004005b0 /* IO CRB Entry 9_A */
161#define IIO_ICRB9_A 0x004005b0 /* IO CRB Entry 9_A */ 161#define IIO_ICRB9_B 0x004005b8 /* IO CRB Entry 9_B */
162#define IIO_ICRB9_B 0x004005b8 /* IO CRB Entry 9_B */ 162#define IIO_ICRB9_C 0x004005c0 /* IO CRB Entry 9_C */
163#define IIO_ICRB9_C 0x004005c0 /* IO CRB Entry 9_C */ 163#define IIO_ICRB9_D 0x004005c8 /* IO CRB Entry 9_D */
164#define IIO_ICRB9_D 0x004005c8 /* IO CRB Entry 9_D */ 164#define IIO_ICRB9_E 0x004005d0 /* IO CRB Entry 9_E */
165#define IIO_ICRB9_E 0x004005d0 /* IO CRB Entry 9_E */ 165
166 166#define IIO_ICRBA_A 0x004005e0 /* IO CRB Entry A_A */
167#define IIO_ICRBA_A 0x004005e0 /* IO CRB Entry A_A */ 167#define IIO_ICRBA_B 0x004005e8 /* IO CRB Entry A_B */
168#define IIO_ICRBA_B 0x004005e8 /* IO CRB Entry A_B */ 168#define IIO_ICRBA_C 0x004005f0 /* IO CRB Entry A_C */
169#define IIO_ICRBA_C 0x004005f0 /* IO CRB Entry A_C */ 169#define IIO_ICRBA_D 0x004005f8 /* IO CRB Entry A_D */
170#define IIO_ICRBA_D 0x004005f8 /* IO CRB Entry A_D */ 170#define IIO_ICRBA_E 0x00400600 /* IO CRB Entry A_E */
171#define IIO_ICRBA_E 0x00400600 /* IO CRB Entry A_E */ 171
172 172#define IIO_ICRBB_A 0x00400610 /* IO CRB Entry B_A */
173#define IIO_ICRBB_A 0x00400610 /* IO CRB Entry B_A */ 173#define IIO_ICRBB_B 0x00400618 /* IO CRB Entry B_B */
174#define IIO_ICRBB_B 0x00400618 /* IO CRB Entry B_B */ 174#define IIO_ICRBB_C 0x00400620 /* IO CRB Entry B_C */
175#define IIO_ICRBB_C 0x00400620 /* IO CRB Entry B_C */ 175#define IIO_ICRBB_D 0x00400628 /* IO CRB Entry B_D */
176#define IIO_ICRBB_D 0x00400628 /* IO CRB Entry B_D */ 176#define IIO_ICRBB_E 0x00400630 /* IO CRB Entry B_E */
177#define IIO_ICRBB_E 0x00400630 /* IO CRB Entry B_E */ 177
178 178#define IIO_ICRBC_A 0x00400640 /* IO CRB Entry C_A */
179#define IIO_ICRBC_A 0x00400640 /* IO CRB Entry C_A */ 179#define IIO_ICRBC_B 0x00400648 /* IO CRB Entry C_B */
180#define IIO_ICRBC_B 0x00400648 /* IO CRB Entry C_B */ 180#define IIO_ICRBC_C 0x00400650 /* IO CRB Entry C_C */
181#define IIO_ICRBC_C 0x00400650 /* IO CRB Entry C_C */ 181#define IIO_ICRBC_D 0x00400658 /* IO CRB Entry C_D */
182#define IIO_ICRBC_D 0x00400658 /* IO CRB Entry C_D */ 182#define IIO_ICRBC_E 0x00400660 /* IO CRB Entry C_E */
183#define IIO_ICRBC_E 0x00400660 /* IO CRB Entry C_E */ 183
184 184#define IIO_ICRBD_A 0x00400670 /* IO CRB Entry D_A */
185#define IIO_ICRBD_A 0x00400670 /* IO CRB Entry D_A */ 185#define IIO_ICRBD_B 0x00400678 /* IO CRB Entry D_B */
186#define IIO_ICRBD_B 0x00400678 /* IO CRB Entry D_B */ 186#define IIO_ICRBD_C 0x00400680 /* IO CRB Entry D_C */
187#define IIO_ICRBD_C 0x00400680 /* IO CRB Entry D_C */ 187#define IIO_ICRBD_D 0x00400688 /* IO CRB Entry D_D */
188#define IIO_ICRBD_D 0x00400688 /* IO CRB Entry D_D */ 188#define IIO_ICRBD_E 0x00400690 /* IO CRB Entry D_E */
189#define IIO_ICRBD_E 0x00400690 /* IO CRB Entry D_E */ 189
190 190#define IIO_ICRBE_A 0x004006a0 /* IO CRB Entry E_A */
191#define IIO_ICRBE_A 0x004006a0 /* IO CRB Entry E_A */ 191#define IIO_ICRBE_B 0x004006a8 /* IO CRB Entry E_B */
192#define IIO_ICRBE_B 0x004006a8 /* IO CRB Entry E_B */ 192#define IIO_ICRBE_C 0x004006b0 /* IO CRB Entry E_C */
193#define IIO_ICRBE_C 0x004006b0 /* IO CRB Entry E_C */ 193#define IIO_ICRBE_D 0x004006b8 /* IO CRB Entry E_D */
194#define IIO_ICRBE_D 0x004006b8 /* IO CRB Entry E_D */ 194#define IIO_ICRBE_E 0x004006c0 /* IO CRB Entry E_E */
195#define IIO_ICRBE_E 0x004006c0 /* IO CRB Entry E_E */ 195
196 196#define IIO_ICSML 0x00400700 /* IO CRB Spurious Message Low */
197#define IIO_ICSML 0x00400700 /* IO CRB Spurious Message Low */ 197#define IIO_ICSMM 0x00400708 /* IO CRB Spurious Message Middle */
198#define IIO_ICSMM 0x00400708 /* IO CRB Spurious Message Middle */ 198#define IIO_ICSMH 0x00400710 /* IO CRB Spurious Message High */
199#define IIO_ICSMH 0x00400710 /* IO CRB Spurious Message High */ 199
200 200#define IIO_IDBSS 0x00400718 /* IO Debug Submenu Select */
201#define IIO_IDBSS 0x00400718 /* IO Debug Submenu Select */ 201
202 202#define IIO_IBLS0 0x00410000 /* IO BTE Length Status 0 */
203#define IIO_IBLS0 0x00410000 /* IO BTE Length Status 0 */ 203#define IIO_IBSA0 0x00410008 /* IO BTE Source Address 0 */
204#define IIO_IBSA0 0x00410008 /* IO BTE Source Address 0 */ 204#define IIO_IBDA0 0x00410010 /* IO BTE Destination Address 0 */
205#define IIO_IBDA0 0x00410010 /* IO BTE Destination Address 0 */ 205#define IIO_IBCT0 0x00410018 /* IO BTE Control Terminate 0 */
206#define IIO_IBCT0 0x00410018 /* IO BTE Control Terminate 0 */ 206#define IIO_IBNA0 0x00410020 /* IO BTE Notification Address 0 */
207#define IIO_IBNA0 0x00410020 /* IO BTE Notification Address 0 */ 207#define IIO_IBIA0 0x00410028 /* IO BTE Interrupt Address 0 */
208#define IIO_IBIA0 0x00410028 /* IO BTE Interrupt Address 0 */ 208#define IIO_IBLS1 0x00420000 /* IO BTE Length Status 1 */
209#define IIO_IBLS1 0x00420000 /* IO BTE Length Status 1 */ 209#define IIO_IBSA1 0x00420008 /* IO BTE Source Address 1 */
210#define IIO_IBSA1 0x00420008 /* IO BTE Source Address 1 */ 210#define IIO_IBDA1 0x00420010 /* IO BTE Destination Address 1 */
211#define IIO_IBDA1 0x00420010 /* IO BTE Destination Address 1 */ 211#define IIO_IBCT1 0x00420018 /* IO BTE Control Terminate 1 */
212#define IIO_IBCT1 0x00420018 /* IO BTE Control Terminate 1 */ 212#define IIO_IBNA1 0x00420020 /* IO BTE Notification Address 1 */
213#define IIO_IBNA1 0x00420020 /* IO BTE Notification Address 1 */ 213#define IIO_IBIA1 0x00420028 /* IO BTE Interrupt Address 1 */
214#define IIO_IBIA1 0x00420028 /* IO BTE Interrupt Address 1 */ 214
215 215#define IIO_IPCR 0x00430000 /* IO Performance Control */
216#define IIO_IPCR 0x00430000 /* IO Performance Control */ 216#define IIO_IPPR 0x00430008 /* IO Performance Profiling */
217#define IIO_IPPR 0x00430008 /* IO Performance Profiling */ 217
218 218/************************************************************************
219 219 * *
220/************************************************************************
221 * *
222 * Description: This register echoes some information from the * 220 * Description: This register echoes some information from the *
223 * LB_REV_ID register. It is available through Crosstalk as described * 221 * LB_REV_ID register. It is available through Crosstalk as described *
224 * above. The REV_NUM and MFG_NUM fields receive their values from * 222 * above. The REV_NUM and MFG_NUM fields receive their values from *
225 * the REVISION and MANUFACTURER fields in the LB_REV_ID register. * 223 * the REVISION and MANUFACTURER fields in the LB_REV_ID register. *
226 * The PART_NUM field's value is the Crosstalk device ID number that * 224 * The PART_NUM field's value is the Crosstalk device ID number that *
227 * Steve Miller assigned to the SHub chip. * 225 * Steve Miller assigned to the SHub chip. *
228 * * 226 * *
229 ************************************************************************/ 227 ************************************************************************/
230 228
231typedef union ii_wid_u { 229typedef union ii_wid_u {
232 uint64_t ii_wid_regval; 230 uint64_t ii_wid_regval;
233 struct { 231 struct {
234 uint64_t w_rsvd_1 : 1; 232 uint64_t w_rsvd_1:1;
235 uint64_t w_mfg_num : 11; 233 uint64_t w_mfg_num:11;
236 uint64_t w_part_num : 16; 234 uint64_t w_part_num:16;
237 uint64_t w_rev_num : 4; 235 uint64_t w_rev_num:4;
238 uint64_t w_rsvd : 32; 236 uint64_t w_rsvd:32;
239 } ii_wid_fld_s; 237 } ii_wid_fld_s;
240} ii_wid_u_t; 238} ii_wid_u_t;
241 239
242
243/************************************************************************ 240/************************************************************************
244 * * 241 * *
245 * The fields in this register are set upon detection of an error * 242 * The fields in this register are set upon detection of an error *
246 * and cleared by various mechanisms, as explained in the * 243 * and cleared by various mechanisms, as explained in the *
247 * description. * 244 * description. *
248 * * 245 * *
249 ************************************************************************/ 246 ************************************************************************/
250 247
251typedef union ii_wstat_u { 248typedef union ii_wstat_u {
252 uint64_t ii_wstat_regval; 249 uint64_t ii_wstat_regval;
253 struct { 250 struct {
254 uint64_t w_pending : 4; 251 uint64_t w_pending:4;
255 uint64_t w_xt_crd_to : 1; 252 uint64_t w_xt_crd_to:1;
256 uint64_t w_xt_tail_to : 1; 253 uint64_t w_xt_tail_to:1;
257 uint64_t w_rsvd_3 : 3; 254 uint64_t w_rsvd_3:3;
258 uint64_t w_tx_mx_rty : 1; 255 uint64_t w_tx_mx_rty:1;
259 uint64_t w_rsvd_2 : 6; 256 uint64_t w_rsvd_2:6;
260 uint64_t w_llp_tx_cnt : 8; 257 uint64_t w_llp_tx_cnt:8;
261 uint64_t w_rsvd_1 : 8; 258 uint64_t w_rsvd_1:8;
262 uint64_t w_crazy : 1; 259 uint64_t w_crazy:1;
263 uint64_t w_rsvd : 31; 260 uint64_t w_rsvd:31;
264 } ii_wstat_fld_s; 261 } ii_wstat_fld_s;
265} ii_wstat_u_t; 262} ii_wstat_u_t;
266 263
267
268/************************************************************************ 264/************************************************************************
269 * * 265 * *
270 * Description: This is a read-write enabled register. It controls * 266 * Description: This is a read-write enabled register. It controls *
271 * various aspects of the Crosstalk flow control. * 267 * various aspects of the Crosstalk flow control. *
272 * * 268 * *
273 ************************************************************************/ 269 ************************************************************************/
274 270
275typedef union ii_wcr_u { 271typedef union ii_wcr_u {
276 uint64_t ii_wcr_regval; 272 uint64_t ii_wcr_regval;
277 struct { 273 struct {
278 uint64_t w_wid : 4; 274 uint64_t w_wid:4;
279 uint64_t w_tag : 1; 275 uint64_t w_tag:1;
280 uint64_t w_rsvd_1 : 8; 276 uint64_t w_rsvd_1:8;
281 uint64_t w_dst_crd : 3; 277 uint64_t w_dst_crd:3;
282 uint64_t w_f_bad_pkt : 1; 278 uint64_t w_f_bad_pkt:1;
283 uint64_t w_dir_con : 1; 279 uint64_t w_dir_con:1;
284 uint64_t w_e_thresh : 5; 280 uint64_t w_e_thresh:5;
285 uint64_t w_rsvd : 41; 281 uint64_t w_rsvd:41;
286 } ii_wcr_fld_s; 282 } ii_wcr_fld_s;
287} ii_wcr_u_t; 283} ii_wcr_u_t;
288 284
289
290/************************************************************************ 285/************************************************************************
291 * * 286 * *
292 * Description: This register's value is a bit vector that guards * 287 * Description: This register's value is a bit vector that guards *
293 * access to local registers within the II as well as to external * 288 * access to local registers within the II as well as to external *
294 * Crosstalk widgets. Each bit in the register corresponds to a * 289 * Crosstalk widgets. Each bit in the register corresponds to a *
@@ -311,21 +306,18 @@ typedef union ii_wcr_u {
311 * region ID bits are enabled in this same register. It can also be * 306 * region ID bits are enabled in this same register. It can also be *
312 * accessed through the IAlias space by the local processors. * 307 * accessed through the IAlias space by the local processors. *
313 * The reset value of this register allows access by all nodes. * 308 * The reset value of this register allows access by all nodes. *
314 * * 309 * *
315 ************************************************************************/ 310 ************************************************************************/
316 311
317typedef union ii_ilapr_u { 312typedef union ii_ilapr_u {
318 uint64_t ii_ilapr_regval; 313 uint64_t ii_ilapr_regval;
319 struct { 314 struct {
320 uint64_t i_region : 64; 315 uint64_t i_region:64;
321 } ii_ilapr_fld_s; 316 } ii_ilapr_fld_s;
322} ii_ilapr_u_t; 317} ii_ilapr_u_t;
323 318
324
325
326
327/************************************************************************ 319/************************************************************************
328 * * 320 * *
329 * Description: A write to this register of the 64-bit value * 321 * Description: A write to this register of the 64-bit value *
330 * "SGIrules" in ASCII, will cause the bit in the ILAPR register * 322 * "SGIrules" in ASCII, will cause the bit in the ILAPR register *
331 * corresponding to the region of the requestor to be set (allow * 323 * corresponding to the region of the requestor to be set (allow *
@@ -334,59 +326,54 @@ typedef union ii_ilapr_u {
334 * This register can also be accessed through the IAlias space. * 326 * This register can also be accessed through the IAlias space. *
335 * However, this access will not change the access permissions in the * 327 * However, this access will not change the access permissions in the *
336 * ILAPR. * 328 * ILAPR. *
337 * * 329 * *
338 ************************************************************************/ 330 ************************************************************************/
339 331
340typedef union ii_ilapo_u { 332typedef union ii_ilapo_u {
341 uint64_t ii_ilapo_regval; 333 uint64_t ii_ilapo_regval;
342 struct { 334 struct {
343 uint64_t i_io_ovrride : 64; 335 uint64_t i_io_ovrride:64;
344 } ii_ilapo_fld_s; 336 } ii_ilapo_fld_s;
345} ii_ilapo_u_t; 337} ii_ilapo_u_t;
346 338
347
348
349/************************************************************************ 339/************************************************************************
350 * * 340 * *
351 * This register qualifies all the PIO and Graphics writes launched * 341 * This register qualifies all the PIO and Graphics writes launched *
352 * from the SHUB towards a widget. * 342 * from the SHUB towards a widget. *
353 * * 343 * *
354 ************************************************************************/ 344 ************************************************************************/
355 345
356typedef union ii_iowa_u { 346typedef union ii_iowa_u {
357 uint64_t ii_iowa_regval; 347 uint64_t ii_iowa_regval;
358 struct { 348 struct {
359 uint64_t i_w0_oac : 1; 349 uint64_t i_w0_oac:1;
360 uint64_t i_rsvd_1 : 7; 350 uint64_t i_rsvd_1:7;
361 uint64_t i_wx_oac : 8; 351 uint64_t i_wx_oac:8;
362 uint64_t i_rsvd : 48; 352 uint64_t i_rsvd:48;
363 } ii_iowa_fld_s; 353 } ii_iowa_fld_s;
364} ii_iowa_u_t; 354} ii_iowa_u_t;
365 355
366
367/************************************************************************ 356/************************************************************************
368 * * 357 * *
369 * Description: This register qualifies all the requests launched * 358 * Description: This register qualifies all the requests launched *
370 * from a widget towards the Shub. This register is intended to be * 359 * from a widget towards the Shub. This register is intended to be *
371 * used by software in case of misbehaving widgets. * 360 * used by software in case of misbehaving widgets. *
372 * * 361 * *
373 * * 362 * *
374 ************************************************************************/ 363 ************************************************************************/
375 364
376typedef union ii_iiwa_u { 365typedef union ii_iiwa_u {
377 uint64_t ii_iiwa_regval; 366 uint64_t ii_iiwa_regval;
378 struct { 367 struct {
379 uint64_t i_w0_iac : 1; 368 uint64_t i_w0_iac:1;
380 uint64_t i_rsvd_1 : 7; 369 uint64_t i_rsvd_1:7;
381 uint64_t i_wx_iac : 8; 370 uint64_t i_wx_iac:8;
382 uint64_t i_rsvd : 48; 371 uint64_t i_rsvd:48;
383 } ii_iiwa_fld_s; 372 } ii_iiwa_fld_s;
384} ii_iiwa_u_t; 373} ii_iiwa_u_t;
385 374
386
387
388/************************************************************************ 375/************************************************************************
389 * * 376 * *
390 * Description: This register qualifies all the operations launched * 377 * Description: This register qualifies all the operations launched *
391 * from a widget towards the SHub. It allows individual access * 378 * from a widget towards the SHub. It allows individual access *
392 * control for up to 8 devices per widget. A device refers to * 379 * control for up to 8 devices per widget. A device refers to *
@@ -401,72 +388,69 @@ typedef union ii_iiwa_u {
401 * The bits in this field are set by writing a 1 to them. Incoming * 388 * The bits in this field are set by writing a 1 to them. Incoming *
402 * replies from Crosstalk are not subject to this access control * 389 * replies from Crosstalk are not subject to this access control *
403 * mechanism. * 390 * mechanism. *
404 * * 391 * *
405 ************************************************************************/ 392 ************************************************************************/
406 393
407typedef union ii_iidem_u { 394typedef union ii_iidem_u {
408 uint64_t ii_iidem_regval; 395 uint64_t ii_iidem_regval;
409 struct { 396 struct {
410 uint64_t i_w8_dxs : 8; 397 uint64_t i_w8_dxs:8;
411 uint64_t i_w9_dxs : 8; 398 uint64_t i_w9_dxs:8;
412 uint64_t i_wa_dxs : 8; 399 uint64_t i_wa_dxs:8;
413 uint64_t i_wb_dxs : 8; 400 uint64_t i_wb_dxs:8;
414 uint64_t i_wc_dxs : 8; 401 uint64_t i_wc_dxs:8;
415 uint64_t i_wd_dxs : 8; 402 uint64_t i_wd_dxs:8;
416 uint64_t i_we_dxs : 8; 403 uint64_t i_we_dxs:8;
417 uint64_t i_wf_dxs : 8; 404 uint64_t i_wf_dxs:8;
418 } ii_iidem_fld_s; 405 } ii_iidem_fld_s;
419} ii_iidem_u_t; 406} ii_iidem_u_t;
420 407
421
422/************************************************************************ 408/************************************************************************
423 * * 409 * *
424 * This register contains the various programmable fields necessary * 410 * This register contains the various programmable fields necessary *
425 * for controlling and observing the LLP signals. * 411 * for controlling and observing the LLP signals. *
426 * * 412 * *
427 ************************************************************************/ 413 ************************************************************************/
428 414
429typedef union ii_ilcsr_u { 415typedef union ii_ilcsr_u {
430 uint64_t ii_ilcsr_regval; 416 uint64_t ii_ilcsr_regval;
431 struct { 417 struct {
432 uint64_t i_nullto : 6; 418 uint64_t i_nullto:6;
433 uint64_t i_rsvd_4 : 2; 419 uint64_t i_rsvd_4:2;
434 uint64_t i_wrmrst : 1; 420 uint64_t i_wrmrst:1;
435 uint64_t i_rsvd_3 : 1; 421 uint64_t i_rsvd_3:1;
436 uint64_t i_llp_en : 1; 422 uint64_t i_llp_en:1;
437 uint64_t i_bm8 : 1; 423 uint64_t i_bm8:1;
438 uint64_t i_llp_stat : 2; 424 uint64_t i_llp_stat:2;
439 uint64_t i_remote_power : 1; 425 uint64_t i_remote_power:1;
440 uint64_t i_rsvd_2 : 1; 426 uint64_t i_rsvd_2:1;
441 uint64_t i_maxrtry : 10; 427 uint64_t i_maxrtry:10;
442 uint64_t i_d_avail_sel : 2; 428 uint64_t i_d_avail_sel:2;
443 uint64_t i_rsvd_1 : 4; 429 uint64_t i_rsvd_1:4;
444 uint64_t i_maxbrst : 10; 430 uint64_t i_maxbrst:10;
445 uint64_t i_rsvd : 22; 431 uint64_t i_rsvd:22;
446 432
447 } ii_ilcsr_fld_s; 433 } ii_ilcsr_fld_s;
448} ii_ilcsr_u_t; 434} ii_ilcsr_u_t;
449 435
450
451/************************************************************************ 436/************************************************************************
452 * * 437 * *
453 * This is simply a status registers that monitors the LLP error * 438 * This is simply a status registers that monitors the LLP error *
454 * rate. * 439 * rate. *
455 * * 440 * *
456 ************************************************************************/ 441 ************************************************************************/
457 442
458typedef union ii_illr_u { 443typedef union ii_illr_u {
459 uint64_t ii_illr_regval; 444 uint64_t ii_illr_regval;
460 struct { 445 struct {
461 uint64_t i_sn_cnt : 16; 446 uint64_t i_sn_cnt:16;
462 uint64_t i_cb_cnt : 16; 447 uint64_t i_cb_cnt:16;
463 uint64_t i_rsvd : 32; 448 uint64_t i_rsvd:32;
464 } ii_illr_fld_s; 449 } ii_illr_fld_s;
465} ii_illr_u_t; 450} ii_illr_u_t;
466 451
467
468/************************************************************************ 452/************************************************************************
469 * * 453 * *
470 * Description: All II-detected non-BTE error interrupts are * 454 * Description: All II-detected non-BTE error interrupts are *
471 * specified via this register. * 455 * specified via this register. *
472 * NOTE: The PI interrupt register address is hardcoded in the II. If * 456 * NOTE: The PI interrupt register address is hardcoded in the II. If *
@@ -476,107 +460,100 @@ typedef union ii_illr_u {
476 * PI_ID==1, then the II sends the interrupt request to address * 460 * PI_ID==1, then the II sends the interrupt request to address *
477 * offset 0x01A0_0090 within the local register address space of PI1 * 461 * offset 0x01A0_0090 within the local register address space of PI1 *
478 * on the node specified by the NODE field. * 462 * on the node specified by the NODE field. *
479 * * 463 * *
480 ************************************************************************/ 464 ************************************************************************/
481 465
482typedef union ii_iidsr_u { 466typedef union ii_iidsr_u {
483 uint64_t ii_iidsr_regval; 467 uint64_t ii_iidsr_regval;
484 struct { 468 struct {
485 uint64_t i_level : 8; 469 uint64_t i_level:8;
486 uint64_t i_pi_id : 1; 470 uint64_t i_pi_id:1;
487 uint64_t i_node : 11; 471 uint64_t i_node:11;
488 uint64_t i_rsvd_3 : 4; 472 uint64_t i_rsvd_3:4;
489 uint64_t i_enable : 1; 473 uint64_t i_enable:1;
490 uint64_t i_rsvd_2 : 3; 474 uint64_t i_rsvd_2:3;
491 uint64_t i_int_sent : 2; 475 uint64_t i_int_sent:2;
492 uint64_t i_rsvd_1 : 2; 476 uint64_t i_rsvd_1:2;
493 uint64_t i_pi0_forward_int : 1; 477 uint64_t i_pi0_forward_int:1;
494 uint64_t i_pi1_forward_int : 1; 478 uint64_t i_pi1_forward_int:1;
495 uint64_t i_rsvd : 30; 479 uint64_t i_rsvd:30;
496 } ii_iidsr_fld_s; 480 } ii_iidsr_fld_s;
497} ii_iidsr_u_t; 481} ii_iidsr_u_t;
498 482
499
500
501/************************************************************************ 483/************************************************************************
502 * * 484 * *
503 * There are two instances of this register. This register is used * 485 * There are two instances of this register. This register is used *
504 * for matching up the incoming responses from the graphics widget to * 486 * for matching up the incoming responses from the graphics widget to *
505 * the processor that initiated the graphics operation. The * 487 * the processor that initiated the graphics operation. The *
506 * write-responses are converted to graphics credits and returned to * 488 * write-responses are converted to graphics credits and returned to *
507 * the processor so that the processor interface can manage the flow * 489 * the processor so that the processor interface can manage the flow *
508 * control. * 490 * control. *
509 * * 491 * *
510 ************************************************************************/ 492 ************************************************************************/
511 493
512typedef union ii_igfx0_u { 494typedef union ii_igfx0_u {
513 uint64_t ii_igfx0_regval; 495 uint64_t ii_igfx0_regval;
514 struct { 496 struct {
515 uint64_t i_w_num : 4; 497 uint64_t i_w_num:4;
516 uint64_t i_pi_id : 1; 498 uint64_t i_pi_id:1;
517 uint64_t i_n_num : 12; 499 uint64_t i_n_num:12;
518 uint64_t i_p_num : 1; 500 uint64_t i_p_num:1;
519 uint64_t i_rsvd : 46; 501 uint64_t i_rsvd:46;
520 } ii_igfx0_fld_s; 502 } ii_igfx0_fld_s;
521} ii_igfx0_u_t; 503} ii_igfx0_u_t;
522 504
523
524/************************************************************************ 505/************************************************************************
525 * * 506 * *
526 * There are two instances of this register. This register is used * 507 * There are two instances of this register. This register is used *
527 * for matching up the incoming responses from the graphics widget to * 508 * for matching up the incoming responses from the graphics widget to *
528 * the processor that initiated the graphics operation. The * 509 * the processor that initiated the graphics operation. The *
529 * write-responses are converted to graphics credits and returned to * 510 * write-responses are converted to graphics credits and returned to *
530 * the processor so that the processor interface can manage the flow * 511 * the processor so that the processor interface can manage the flow *
531 * control. * 512 * control. *
532 * * 513 * *
533 ************************************************************************/ 514 ************************************************************************/
534 515
535typedef union ii_igfx1_u { 516typedef union ii_igfx1_u {
536 uint64_t ii_igfx1_regval; 517 uint64_t ii_igfx1_regval;
537 struct { 518 struct {
538 uint64_t i_w_num : 4; 519 uint64_t i_w_num:4;
539 uint64_t i_pi_id : 1; 520 uint64_t i_pi_id:1;
540 uint64_t i_n_num : 12; 521 uint64_t i_n_num:12;
541 uint64_t i_p_num : 1; 522 uint64_t i_p_num:1;
542 uint64_t i_rsvd : 46; 523 uint64_t i_rsvd:46;
543 } ii_igfx1_fld_s; 524 } ii_igfx1_fld_s;
544} ii_igfx1_u_t; 525} ii_igfx1_u_t;
545 526
546
547/************************************************************************ 527/************************************************************************
548 * * 528 * *
549 * There are two instances of this registers. These registers are * 529 * There are two instances of this registers. These registers are *
550 * used as scratch registers for software use. * 530 * used as scratch registers for software use. *
551 * * 531 * *
552 ************************************************************************/ 532 ************************************************************************/
553 533
554typedef union ii_iscr0_u { 534typedef union ii_iscr0_u {
555 uint64_t ii_iscr0_regval; 535 uint64_t ii_iscr0_regval;
556 struct { 536 struct {
557 uint64_t i_scratch : 64; 537 uint64_t i_scratch:64;
558 } ii_iscr0_fld_s; 538 } ii_iscr0_fld_s;
559} ii_iscr0_u_t; 539} ii_iscr0_u_t;
560 540
561
562
563/************************************************************************ 541/************************************************************************
564 * * 542 * *
565 * There are two instances of this registers. These registers are * 543 * There are two instances of this registers. These registers are *
566 * used as scratch registers for software use. * 544 * used as scratch registers for software use. *
567 * * 545 * *
568 ************************************************************************/ 546 ************************************************************************/
569 547
570typedef union ii_iscr1_u { 548typedef union ii_iscr1_u {
571 uint64_t ii_iscr1_regval; 549 uint64_t ii_iscr1_regval;
572 struct { 550 struct {
573 uint64_t i_scratch : 64; 551 uint64_t i_scratch:64;
574 } ii_iscr1_fld_s; 552 } ii_iscr1_fld_s;
575} ii_iscr1_u_t; 553} ii_iscr1_u_t;
576 554
577
578/************************************************************************ 555/************************************************************************
579 * * 556 * *
580 * Description: There are seven instances of translation table entry * 557 * Description: There are seven instances of translation table entry *
581 * registers. Each register maps a Shub Big Window to a 48-bit * 558 * registers. Each register maps a Shub Big Window to a 48-bit *
582 * address on Crosstalk. * 559 * address on Crosstalk. *
@@ -599,23 +576,22 @@ typedef union ii_iscr1_u {
599 * Crosstalk space addressable by the Shub is thus the lower * 576 * Crosstalk space addressable by the Shub is thus the lower *
600 * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * 577 * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
601 * of this space can be accessed. * 578 * of this space can be accessed. *
602 * * 579 * *
603 ************************************************************************/ 580 ************************************************************************/
604 581
605typedef union ii_itte1_u { 582typedef union ii_itte1_u {
606 uint64_t ii_itte1_regval; 583 uint64_t ii_itte1_regval;
607 struct { 584 struct {
608 uint64_t i_offset : 5; 585 uint64_t i_offset:5;
609 uint64_t i_rsvd_1 : 3; 586 uint64_t i_rsvd_1:3;
610 uint64_t i_w_num : 4; 587 uint64_t i_w_num:4;
611 uint64_t i_iosp : 1; 588 uint64_t i_iosp:1;
612 uint64_t i_rsvd : 51; 589 uint64_t i_rsvd:51;
613 } ii_itte1_fld_s; 590 } ii_itte1_fld_s;
614} ii_itte1_u_t; 591} ii_itte1_u_t;
615 592
616
617/************************************************************************ 593/************************************************************************
618 * * 594 * *
619 * Description: There are seven instances of translation table entry * 595 * Description: There are seven instances of translation table entry *
620 * registers. Each register maps a Shub Big Window to a 48-bit * 596 * registers. Each register maps a Shub Big Window to a 48-bit *
621 * address on Crosstalk. * 597 * address on Crosstalk. *
@@ -638,23 +614,22 @@ typedef union ii_itte1_u {
638 * Crosstalk space addressable by the Shub is thus the lower * 614 * Crosstalk space addressable by the Shub is thus the lower *
639 * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * 615 * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
640 * of this space can be accessed. * 616 * of this space can be accessed. *
641 * * 617 * *
642 ************************************************************************/ 618 ************************************************************************/
643 619
644typedef union ii_itte2_u { 620typedef union ii_itte2_u {
645 uint64_t ii_itte2_regval; 621 uint64_t ii_itte2_regval;
646 struct { 622 struct {
647 uint64_t i_offset : 5; 623 uint64_t i_offset:5;
648 uint64_t i_rsvd_1 : 3; 624 uint64_t i_rsvd_1:3;
649 uint64_t i_w_num : 4; 625 uint64_t i_w_num:4;
650 uint64_t i_iosp : 1; 626 uint64_t i_iosp:1;
651 uint64_t i_rsvd : 51; 627 uint64_t i_rsvd:51;
652 } ii_itte2_fld_s; 628 } ii_itte2_fld_s;
653} ii_itte2_u_t; 629} ii_itte2_u_t;
654 630
655
656/************************************************************************ 631/************************************************************************
657 * * 632 * *
658 * Description: There are seven instances of translation table entry * 633 * Description: There are seven instances of translation table entry *
659 * registers. Each register maps a Shub Big Window to a 48-bit * 634 * registers. Each register maps a Shub Big Window to a 48-bit *
660 * address on Crosstalk. * 635 * address on Crosstalk. *
@@ -677,23 +652,22 @@ typedef union ii_itte2_u {
677 * Crosstalk space addressable by the SHub is thus the lower * 652 * Crosstalk space addressable by the SHub is thus the lower *
678 * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * 653 * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
679 * of this space can be accessed. * 654 * of this space can be accessed. *
680 * * 655 * *
681 ************************************************************************/ 656 ************************************************************************/
682 657
683typedef union ii_itte3_u { 658typedef union ii_itte3_u {
684 uint64_t ii_itte3_regval; 659 uint64_t ii_itte3_regval;
685 struct { 660 struct {
686 uint64_t i_offset : 5; 661 uint64_t i_offset:5;
687 uint64_t i_rsvd_1 : 3; 662 uint64_t i_rsvd_1:3;
688 uint64_t i_w_num : 4; 663 uint64_t i_w_num:4;
689 uint64_t i_iosp : 1; 664 uint64_t i_iosp:1;
690 uint64_t i_rsvd : 51; 665 uint64_t i_rsvd:51;
691 } ii_itte3_fld_s; 666 } ii_itte3_fld_s;
692} ii_itte3_u_t; 667} ii_itte3_u_t;
693 668
694
695/************************************************************************ 669/************************************************************************
696 * * 670 * *
697 * Description: There are seven instances of translation table entry * 671 * Description: There are seven instances of translation table entry *
698 * registers. Each register maps a SHub Big Window to a 48-bit * 672 * registers. Each register maps a SHub Big Window to a 48-bit *
699 * address on Crosstalk. * 673 * address on Crosstalk. *
@@ -716,23 +690,22 @@ typedef union ii_itte3_u {
716 * Crosstalk space addressable by the SHub is thus the lower * 690 * Crosstalk space addressable by the SHub is thus the lower *
717 * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * 691 * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
718 * of this space can be accessed. * 692 * of this space can be accessed. *
719 * * 693 * *
720 ************************************************************************/ 694 ************************************************************************/
721 695
722typedef union ii_itte4_u { 696typedef union ii_itte4_u {
723 uint64_t ii_itte4_regval; 697 uint64_t ii_itte4_regval;
724 struct { 698 struct {
725 uint64_t i_offset : 5; 699 uint64_t i_offset:5;
726 uint64_t i_rsvd_1 : 3; 700 uint64_t i_rsvd_1:3;
727 uint64_t i_w_num : 4; 701 uint64_t i_w_num:4;
728 uint64_t i_iosp : 1; 702 uint64_t i_iosp:1;
729 uint64_t i_rsvd : 51; 703 uint64_t i_rsvd:51;
730 } ii_itte4_fld_s; 704 } ii_itte4_fld_s;
731} ii_itte4_u_t; 705} ii_itte4_u_t;
732 706
733
734/************************************************************************ 707/************************************************************************
735 * * 708 * *
736 * Description: There are seven instances of translation table entry * 709 * Description: There are seven instances of translation table entry *
737 * registers. Each register maps a SHub Big Window to a 48-bit * 710 * registers. Each register maps a SHub Big Window to a 48-bit *
738 * address on Crosstalk. * 711 * address on Crosstalk. *
@@ -755,23 +728,22 @@ typedef union ii_itte4_u {
755 * Crosstalk space addressable by the Shub is thus the lower * 728 * Crosstalk space addressable by the Shub is thus the lower *
756 * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * 729 * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
757 * of this space can be accessed. * 730 * of this space can be accessed. *
758 * * 731 * *
759 ************************************************************************/ 732 ************************************************************************/
760 733
761typedef union ii_itte5_u { 734typedef union ii_itte5_u {
762 uint64_t ii_itte5_regval; 735 uint64_t ii_itte5_regval;
763 struct { 736 struct {
764 uint64_t i_offset : 5; 737 uint64_t i_offset:5;
765 uint64_t i_rsvd_1 : 3; 738 uint64_t i_rsvd_1:3;
766 uint64_t i_w_num : 4; 739 uint64_t i_w_num:4;
767 uint64_t i_iosp : 1; 740 uint64_t i_iosp:1;
768 uint64_t i_rsvd : 51; 741 uint64_t i_rsvd:51;
769 } ii_itte5_fld_s; 742 } ii_itte5_fld_s;
770} ii_itte5_u_t; 743} ii_itte5_u_t;
771 744
772
773/************************************************************************ 745/************************************************************************
774 * * 746 * *
775 * Description: There are seven instances of translation table entry * 747 * Description: There are seven instances of translation table entry *
776 * registers. Each register maps a Shub Big Window to a 48-bit * 748 * registers. Each register maps a Shub Big Window to a 48-bit *
777 * address on Crosstalk. * 749 * address on Crosstalk. *
@@ -794,23 +766,22 @@ typedef union ii_itte5_u {
794 * Crosstalk space addressable by the Shub is thus the lower * 766 * Crosstalk space addressable by the Shub is thus the lower *
795 * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * 767 * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
796 * of this space can be accessed. * 768 * of this space can be accessed. *
797 * * 769 * *
798 ************************************************************************/ 770 ************************************************************************/
799 771
800typedef union ii_itte6_u { 772typedef union ii_itte6_u {
801 uint64_t ii_itte6_regval; 773 uint64_t ii_itte6_regval;
802 struct { 774 struct {
803 uint64_t i_offset : 5; 775 uint64_t i_offset:5;
804 uint64_t i_rsvd_1 : 3; 776 uint64_t i_rsvd_1:3;
805 uint64_t i_w_num : 4; 777 uint64_t i_w_num:4;
806 uint64_t i_iosp : 1; 778 uint64_t i_iosp:1;
807 uint64_t i_rsvd : 51; 779 uint64_t i_rsvd:51;
808 } ii_itte6_fld_s; 780 } ii_itte6_fld_s;
809} ii_itte6_u_t; 781} ii_itte6_u_t;
810 782
811
812/************************************************************************ 783/************************************************************************
813 * * 784 * *
814 * Description: There are seven instances of translation table entry * 785 * Description: There are seven instances of translation table entry *
815 * registers. Each register maps a Shub Big Window to a 48-bit * 786 * registers. Each register maps a Shub Big Window to a 48-bit *
816 * address on Crosstalk. * 787 * address on Crosstalk. *
@@ -833,23 +804,22 @@ typedef union ii_itte6_u {
833 * Crosstalk space addressable by the SHub is thus the lower * 804 * Crosstalk space addressable by the SHub is thus the lower *
834 * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * 805 * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
835 * of this space can be accessed. * 806 * of this space can be accessed. *
836 * * 807 * *
837 ************************************************************************/ 808 ************************************************************************/
838 809
839typedef union ii_itte7_u { 810typedef union ii_itte7_u {
840 uint64_t ii_itte7_regval; 811 uint64_t ii_itte7_regval;
841 struct { 812 struct {
842 uint64_t i_offset : 5; 813 uint64_t i_offset:5;
843 uint64_t i_rsvd_1 : 3; 814 uint64_t i_rsvd_1:3;
844 uint64_t i_w_num : 4; 815 uint64_t i_w_num:4;
845 uint64_t i_iosp : 1; 816 uint64_t i_iosp:1;
846 uint64_t i_rsvd : 51; 817 uint64_t i_rsvd:51;
847 } ii_itte7_fld_s; 818 } ii_itte7_fld_s;
848} ii_itte7_u_t; 819} ii_itte7_u_t;
849 820
850
851/************************************************************************ 821/************************************************************************
852 * * 822 * *
853 * Description: There are 9 instances of this register, one per * 823 * Description: There are 9 instances of this register, one per *
854 * actual widget in this implementation of SHub and Crossbow. * 824 * actual widget in this implementation of SHub and Crossbow. *
855 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * 825 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
@@ -868,33 +838,32 @@ typedef union ii_itte7_u {
868 * register; the write will correct the C field and capture its new * 838 * register; the write will correct the C field and capture its new *
869 * value in the internal register. Even if IECLR[E_PRB_x] is set, the * 839 * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
870 * SPUR_WR bit will persist if IPRBx hasn't yet been written. * 840 * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
871 * . * 841 * . *
872 * * 842 * *
873 ************************************************************************/ 843 ************************************************************************/
874 844
875typedef union ii_iprb0_u { 845typedef union ii_iprb0_u {
876 uint64_t ii_iprb0_regval; 846 uint64_t ii_iprb0_regval;
877 struct { 847 struct {
878 uint64_t i_c : 8; 848 uint64_t i_c:8;
879 uint64_t i_na : 14; 849 uint64_t i_na:14;
880 uint64_t i_rsvd_2 : 2; 850 uint64_t i_rsvd_2:2;
881 uint64_t i_nb : 14; 851 uint64_t i_nb:14;
882 uint64_t i_rsvd_1 : 2; 852 uint64_t i_rsvd_1:2;
883 uint64_t i_m : 2; 853 uint64_t i_m:2;
884 uint64_t i_f : 1; 854 uint64_t i_f:1;
885 uint64_t i_of_cnt : 5; 855 uint64_t i_of_cnt:5;
886 uint64_t i_error : 1; 856 uint64_t i_error:1;
887 uint64_t i_rd_to : 1; 857 uint64_t i_rd_to:1;
888 uint64_t i_spur_wr : 1; 858 uint64_t i_spur_wr:1;
889 uint64_t i_spur_rd : 1; 859 uint64_t i_spur_rd:1;
890 uint64_t i_rsvd : 11; 860 uint64_t i_rsvd:11;
891 uint64_t i_mult_err : 1; 861 uint64_t i_mult_err:1;
892 } ii_iprb0_fld_s; 862 } ii_iprb0_fld_s;
893} ii_iprb0_u_t; 863} ii_iprb0_u_t;
894 864
895
896/************************************************************************ 865/************************************************************************
897 * * 866 * *
898 * Description: There are 9 instances of this register, one per * 867 * Description: There are 9 instances of this register, one per *
899 * actual widget in this implementation of SHub and Crossbow. * 868 * actual widget in this implementation of SHub and Crossbow. *
900 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * 869 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
@@ -913,33 +882,32 @@ typedef union ii_iprb0_u {
913 * register; the write will correct the C field and capture its new * 882 * register; the write will correct the C field and capture its new *
914 * value in the internal register. Even if IECLR[E_PRB_x] is set, the * 883 * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
915 * SPUR_WR bit will persist if IPRBx hasn't yet been written. * 884 * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
916 * . * 885 * . *
917 * * 886 * *
918 ************************************************************************/ 887 ************************************************************************/
919 888
920typedef union ii_iprb8_u { 889typedef union ii_iprb8_u {
921 uint64_t ii_iprb8_regval; 890 uint64_t ii_iprb8_regval;
922 struct { 891 struct {
923 uint64_t i_c : 8; 892 uint64_t i_c:8;
924 uint64_t i_na : 14; 893 uint64_t i_na:14;
925 uint64_t i_rsvd_2 : 2; 894 uint64_t i_rsvd_2:2;
926 uint64_t i_nb : 14; 895 uint64_t i_nb:14;
927 uint64_t i_rsvd_1 : 2; 896 uint64_t i_rsvd_1:2;
928 uint64_t i_m : 2; 897 uint64_t i_m:2;
929 uint64_t i_f : 1; 898 uint64_t i_f:1;
930 uint64_t i_of_cnt : 5; 899 uint64_t i_of_cnt:5;
931 uint64_t i_error : 1; 900 uint64_t i_error:1;
932 uint64_t i_rd_to : 1; 901 uint64_t i_rd_to:1;
933 uint64_t i_spur_wr : 1; 902 uint64_t i_spur_wr:1;
934 uint64_t i_spur_rd : 1; 903 uint64_t i_spur_rd:1;
935 uint64_t i_rsvd : 11; 904 uint64_t i_rsvd:11;
936 uint64_t i_mult_err : 1; 905 uint64_t i_mult_err:1;
937 } ii_iprb8_fld_s; 906 } ii_iprb8_fld_s;
938} ii_iprb8_u_t; 907} ii_iprb8_u_t;
939 908
940
941/************************************************************************ 909/************************************************************************
942 * * 910 * *
943 * Description: There are 9 instances of this register, one per * 911 * Description: There are 9 instances of this register, one per *
944 * actual widget in this implementation of SHub and Crossbow. * 912 * actual widget in this implementation of SHub and Crossbow. *
945 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * 913 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
@@ -958,33 +926,32 @@ typedef union ii_iprb8_u {
958 * register; the write will correct the C field and capture its new * 926 * register; the write will correct the C field and capture its new *
959 * value in the internal register. Even if IECLR[E_PRB_x] is set, the * 927 * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
960 * SPUR_WR bit will persist if IPRBx hasn't yet been written. * 928 * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
961 * . * 929 * . *
962 * * 930 * *
963 ************************************************************************/ 931 ************************************************************************/
964 932
965typedef union ii_iprb9_u { 933typedef union ii_iprb9_u {
966 uint64_t ii_iprb9_regval; 934 uint64_t ii_iprb9_regval;
967 struct { 935 struct {
968 uint64_t i_c : 8; 936 uint64_t i_c:8;
969 uint64_t i_na : 14; 937 uint64_t i_na:14;
970 uint64_t i_rsvd_2 : 2; 938 uint64_t i_rsvd_2:2;
971 uint64_t i_nb : 14; 939 uint64_t i_nb:14;
972 uint64_t i_rsvd_1 : 2; 940 uint64_t i_rsvd_1:2;
973 uint64_t i_m : 2; 941 uint64_t i_m:2;
974 uint64_t i_f : 1; 942 uint64_t i_f:1;
975 uint64_t i_of_cnt : 5; 943 uint64_t i_of_cnt:5;
976 uint64_t i_error : 1; 944 uint64_t i_error:1;
977 uint64_t i_rd_to : 1; 945 uint64_t i_rd_to:1;
978 uint64_t i_spur_wr : 1; 946 uint64_t i_spur_wr:1;
979 uint64_t i_spur_rd : 1; 947 uint64_t i_spur_rd:1;
980 uint64_t i_rsvd : 11; 948 uint64_t i_rsvd:11;
981 uint64_t i_mult_err : 1; 949 uint64_t i_mult_err:1;
982 } ii_iprb9_fld_s; 950 } ii_iprb9_fld_s;
983} ii_iprb9_u_t; 951} ii_iprb9_u_t;
984 952
985
986/************************************************************************ 953/************************************************************************
987 * * 954 * *
988 * Description: There are 9 instances of this register, one per * 955 * Description: There are 9 instances of this register, one per *
989 * actual widget in this implementation of SHub and Crossbow. * 956 * actual widget in this implementation of SHub and Crossbow. *
990 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * 957 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
@@ -1003,33 +970,32 @@ typedef union ii_iprb9_u {
1003 * register; the write will correct the C field and capture its new * 970 * register; the write will correct the C field and capture its new *
1004 * value in the internal register. Even if IECLR[E_PRB_x] is set, the * 971 * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
1005 * SPUR_WR bit will persist if IPRBx hasn't yet been written. * 972 * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
1006 * * 973 * *
1007 * * 974 * *
1008 ************************************************************************/ 975 ************************************************************************/
1009 976
1010typedef union ii_iprba_u { 977typedef union ii_iprba_u {
1011 uint64_t ii_iprba_regval; 978 uint64_t ii_iprba_regval;
1012 struct { 979 struct {
1013 uint64_t i_c : 8; 980 uint64_t i_c:8;
1014 uint64_t i_na : 14; 981 uint64_t i_na:14;
1015 uint64_t i_rsvd_2 : 2; 982 uint64_t i_rsvd_2:2;
1016 uint64_t i_nb : 14; 983 uint64_t i_nb:14;
1017 uint64_t i_rsvd_1 : 2; 984 uint64_t i_rsvd_1:2;
1018 uint64_t i_m : 2; 985 uint64_t i_m:2;
1019 uint64_t i_f : 1; 986 uint64_t i_f:1;
1020 uint64_t i_of_cnt : 5; 987 uint64_t i_of_cnt:5;
1021 uint64_t i_error : 1; 988 uint64_t i_error:1;
1022 uint64_t i_rd_to : 1; 989 uint64_t i_rd_to:1;
1023 uint64_t i_spur_wr : 1; 990 uint64_t i_spur_wr:1;
1024 uint64_t i_spur_rd : 1; 991 uint64_t i_spur_rd:1;
1025 uint64_t i_rsvd : 11; 992 uint64_t i_rsvd:11;
1026 uint64_t i_mult_err : 1; 993 uint64_t i_mult_err:1;
1027 } ii_iprba_fld_s; 994 } ii_iprba_fld_s;
1028} ii_iprba_u_t; 995} ii_iprba_u_t;
1029 996
1030
1031/************************************************************************ 997/************************************************************************
1032 * * 998 * *
1033 * Description: There are 9 instances of this register, one per * 999 * Description: There are 9 instances of this register, one per *
1034 * actual widget in this implementation of SHub and Crossbow. * 1000 * actual widget in this implementation of SHub and Crossbow. *
1035 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * 1001 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
@@ -1048,33 +1014,32 @@ typedef union ii_iprba_u {
1048 * register; the write will correct the C field and capture its new * 1014 * register; the write will correct the C field and capture its new *
1049 * value in the internal register. Even if IECLR[E_PRB_x] is set, the * 1015 * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
1050 * SPUR_WR bit will persist if IPRBx hasn't yet been written. * 1016 * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
1051 * . * 1017 * . *
1052 * * 1018 * *
1053 ************************************************************************/ 1019 ************************************************************************/
1054 1020
1055typedef union ii_iprbb_u { 1021typedef union ii_iprbb_u {
1056 uint64_t ii_iprbb_regval; 1022 uint64_t ii_iprbb_regval;
1057 struct { 1023 struct {
1058 uint64_t i_c : 8; 1024 uint64_t i_c:8;
1059 uint64_t i_na : 14; 1025 uint64_t i_na:14;
1060 uint64_t i_rsvd_2 : 2; 1026 uint64_t i_rsvd_2:2;
1061 uint64_t i_nb : 14; 1027 uint64_t i_nb:14;
1062 uint64_t i_rsvd_1 : 2; 1028 uint64_t i_rsvd_1:2;
1063 uint64_t i_m : 2; 1029 uint64_t i_m:2;
1064 uint64_t i_f : 1; 1030 uint64_t i_f:1;
1065 uint64_t i_of_cnt : 5; 1031 uint64_t i_of_cnt:5;
1066 uint64_t i_error : 1; 1032 uint64_t i_error:1;
1067 uint64_t i_rd_to : 1; 1033 uint64_t i_rd_to:1;
1068 uint64_t i_spur_wr : 1; 1034 uint64_t i_spur_wr:1;
1069 uint64_t i_spur_rd : 1; 1035 uint64_t i_spur_rd:1;
1070 uint64_t i_rsvd : 11; 1036 uint64_t i_rsvd:11;
1071 uint64_t i_mult_err : 1; 1037 uint64_t i_mult_err:1;
1072 } ii_iprbb_fld_s; 1038 } ii_iprbb_fld_s;
1073} ii_iprbb_u_t; 1039} ii_iprbb_u_t;
1074 1040
1075
1076/************************************************************************ 1041/************************************************************************
1077 * * 1042 * *
1078 * Description: There are 9 instances of this register, one per * 1043 * Description: There are 9 instances of this register, one per *
1079 * actual widget in this implementation of SHub and Crossbow. * 1044 * actual widget in this implementation of SHub and Crossbow. *
1080 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * 1045 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
@@ -1093,33 +1058,32 @@ typedef union ii_iprbb_u {
1093 * register; the write will correct the C field and capture its new * 1058 * register; the write will correct the C field and capture its new *
1094 * value in the internal register. Even if IECLR[E_PRB_x] is set, the * 1059 * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
1095 * SPUR_WR bit will persist if IPRBx hasn't yet been written. * 1060 * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
1096 * . * 1061 * . *
1097 * * 1062 * *
1098 ************************************************************************/ 1063 ************************************************************************/
1099 1064
1100typedef union ii_iprbc_u { 1065typedef union ii_iprbc_u {
1101 uint64_t ii_iprbc_regval; 1066 uint64_t ii_iprbc_regval;
1102 struct { 1067 struct {
1103 uint64_t i_c : 8; 1068 uint64_t i_c:8;
1104 uint64_t i_na : 14; 1069 uint64_t i_na:14;
1105 uint64_t i_rsvd_2 : 2; 1070 uint64_t i_rsvd_2:2;
1106 uint64_t i_nb : 14; 1071 uint64_t i_nb:14;
1107 uint64_t i_rsvd_1 : 2; 1072 uint64_t i_rsvd_1:2;
1108 uint64_t i_m : 2; 1073 uint64_t i_m:2;
1109 uint64_t i_f : 1; 1074 uint64_t i_f:1;
1110 uint64_t i_of_cnt : 5; 1075 uint64_t i_of_cnt:5;
1111 uint64_t i_error : 1; 1076 uint64_t i_error:1;
1112 uint64_t i_rd_to : 1; 1077 uint64_t i_rd_to:1;
1113 uint64_t i_spur_wr : 1; 1078 uint64_t i_spur_wr:1;
1114 uint64_t i_spur_rd : 1; 1079 uint64_t i_spur_rd:1;
1115 uint64_t i_rsvd : 11; 1080 uint64_t i_rsvd:11;
1116 uint64_t i_mult_err : 1; 1081 uint64_t i_mult_err:1;
1117 } ii_iprbc_fld_s; 1082 } ii_iprbc_fld_s;
1118} ii_iprbc_u_t; 1083} ii_iprbc_u_t;
1119 1084
1120
1121/************************************************************************ 1085/************************************************************************
1122 * * 1086 * *
1123 * Description: There are 9 instances of this register, one per * 1087 * Description: There are 9 instances of this register, one per *
1124 * actual widget in this implementation of SHub and Crossbow. * 1088 * actual widget in this implementation of SHub and Crossbow. *
1125 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * 1089 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
@@ -1138,33 +1102,32 @@ typedef union ii_iprbc_u {
1138 * register; the write will correct the C field and capture its new * 1102 * register; the write will correct the C field and capture its new *
1139 * value in the internal register. Even if IECLR[E_PRB_x] is set, the * 1103 * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
1140 * SPUR_WR bit will persist if IPRBx hasn't yet been written. * 1104 * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
1141 * . * 1105 * . *
1142 * * 1106 * *
1143 ************************************************************************/ 1107 ************************************************************************/
1144 1108
1145typedef union ii_iprbd_u { 1109typedef union ii_iprbd_u {
1146 uint64_t ii_iprbd_regval; 1110 uint64_t ii_iprbd_regval;
1147 struct { 1111 struct {
1148 uint64_t i_c : 8; 1112 uint64_t i_c:8;
1149 uint64_t i_na : 14; 1113 uint64_t i_na:14;
1150 uint64_t i_rsvd_2 : 2; 1114 uint64_t i_rsvd_2:2;
1151 uint64_t i_nb : 14; 1115 uint64_t i_nb:14;
1152 uint64_t i_rsvd_1 : 2; 1116 uint64_t i_rsvd_1:2;
1153 uint64_t i_m : 2; 1117 uint64_t i_m:2;
1154 uint64_t i_f : 1; 1118 uint64_t i_f:1;
1155 uint64_t i_of_cnt : 5; 1119 uint64_t i_of_cnt:5;
1156 uint64_t i_error : 1; 1120 uint64_t i_error:1;
1157 uint64_t i_rd_to : 1; 1121 uint64_t i_rd_to:1;
1158 uint64_t i_spur_wr : 1; 1122 uint64_t i_spur_wr:1;
1159 uint64_t i_spur_rd : 1; 1123 uint64_t i_spur_rd:1;
1160 uint64_t i_rsvd : 11; 1124 uint64_t i_rsvd:11;
1161 uint64_t i_mult_err : 1; 1125 uint64_t i_mult_err:1;
1162 } ii_iprbd_fld_s; 1126 } ii_iprbd_fld_s;
1163} ii_iprbd_u_t; 1127} ii_iprbd_u_t;
1164 1128
1165
1166/************************************************************************ 1129/************************************************************************
1167 * * 1130 * *
1168 * Description: There are 9 instances of this register, one per * 1131 * Description: There are 9 instances of this register, one per *
1169 * actual widget in this implementation of SHub and Crossbow. * 1132 * actual widget in this implementation of SHub and Crossbow. *
1170 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * 1133 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
@@ -1183,33 +1146,32 @@ typedef union ii_iprbd_u {
1183 * register; the write will correct the C field and capture its new * 1146 * register; the write will correct the C field and capture its new *
1184 * value in the internal register. Even if IECLR[E_PRB_x] is set, the * 1147 * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
1185 * SPUR_WR bit will persist if IPRBx hasn't yet been written. * 1148 * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
1186 * . * 1149 * . *
1187 * * 1150 * *
1188 ************************************************************************/ 1151 ************************************************************************/
1189 1152
1190typedef union ii_iprbe_u { 1153typedef union ii_iprbe_u {
1191 uint64_t ii_iprbe_regval; 1154 uint64_t ii_iprbe_regval;
1192 struct { 1155 struct {
1193 uint64_t i_c : 8; 1156 uint64_t i_c:8;
1194 uint64_t i_na : 14; 1157 uint64_t i_na:14;
1195 uint64_t i_rsvd_2 : 2; 1158 uint64_t i_rsvd_2:2;
1196 uint64_t i_nb : 14; 1159 uint64_t i_nb:14;
1197 uint64_t i_rsvd_1 : 2; 1160 uint64_t i_rsvd_1:2;
1198 uint64_t i_m : 2; 1161 uint64_t i_m:2;
1199 uint64_t i_f : 1; 1162 uint64_t i_f:1;
1200 uint64_t i_of_cnt : 5; 1163 uint64_t i_of_cnt:5;
1201 uint64_t i_error : 1; 1164 uint64_t i_error:1;
1202 uint64_t i_rd_to : 1; 1165 uint64_t i_rd_to:1;
1203 uint64_t i_spur_wr : 1; 1166 uint64_t i_spur_wr:1;
1204 uint64_t i_spur_rd : 1; 1167 uint64_t i_spur_rd:1;
1205 uint64_t i_rsvd : 11; 1168 uint64_t i_rsvd:11;
1206 uint64_t i_mult_err : 1; 1169 uint64_t i_mult_err:1;
1207 } ii_iprbe_fld_s; 1170 } ii_iprbe_fld_s;
1208} ii_iprbe_u_t; 1171} ii_iprbe_u_t;
1209 1172
1210
1211/************************************************************************ 1173/************************************************************************
1212 * * 1174 * *
1213 * Description: There are 9 instances of this register, one per * 1175 * Description: There are 9 instances of this register, one per *
1214 * actual widget in this implementation of Shub and Crossbow. * 1176 * actual widget in this implementation of Shub and Crossbow. *
1215 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * 1177 * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
@@ -1228,33 +1190,32 @@ typedef union ii_iprbe_u {
1228 * register; the write will correct the C field and capture its new * 1190 * register; the write will correct the C field and capture its new *
1229 * value in the internal register. Even if IECLR[E_PRB_x] is set, the * 1191 * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
1230 * SPUR_WR bit will persist if IPRBx hasn't yet been written. * 1192 * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
1231 * . * 1193 * . *
1232 * * 1194 * *
1233 ************************************************************************/ 1195 ************************************************************************/
1234 1196
1235typedef union ii_iprbf_u { 1197typedef union ii_iprbf_u {
1236 uint64_t ii_iprbf_regval; 1198 uint64_t ii_iprbf_regval;
1237 struct { 1199 struct {
1238 uint64_t i_c : 8; 1200 uint64_t i_c:8;
1239 uint64_t i_na : 14; 1201 uint64_t i_na:14;
1240 uint64_t i_rsvd_2 : 2; 1202 uint64_t i_rsvd_2:2;
1241 uint64_t i_nb : 14; 1203 uint64_t i_nb:14;
1242 uint64_t i_rsvd_1 : 2; 1204 uint64_t i_rsvd_1:2;
1243 uint64_t i_m : 2; 1205 uint64_t i_m:2;
1244 uint64_t i_f : 1; 1206 uint64_t i_f:1;
1245 uint64_t i_of_cnt : 5; 1207 uint64_t i_of_cnt:5;
1246 uint64_t i_error : 1; 1208 uint64_t i_error:1;
1247 uint64_t i_rd_to : 1; 1209 uint64_t i_rd_to:1;
1248 uint64_t i_spur_wr : 1; 1210 uint64_t i_spur_wr:1;
1249 uint64_t i_spur_rd : 1; 1211 uint64_t i_spur_rd:1;
1250 uint64_t i_rsvd : 11; 1212 uint64_t i_rsvd:11;
1251 uint64_t i_mult_err : 1; 1213 uint64_t i_mult_err:1;
1252 } ii_iprbe_fld_s; 1214 } ii_iprbe_fld_s;
1253} ii_iprbf_u_t; 1215} ii_iprbf_u_t;
1254 1216
1255
1256/************************************************************************ 1217/************************************************************************
1257 * * 1218 * *
1258 * This register specifies the timeout value to use for monitoring * 1219 * This register specifies the timeout value to use for monitoring *
1259 * Crosstalk credits which are used outbound to Crosstalk. An * 1220 * Crosstalk credits which are used outbound to Crosstalk. An *
1260 * internal counter called the Crosstalk Credit Timeout Counter * 1221 * internal counter called the Crosstalk Credit Timeout Counter *
@@ -1267,20 +1228,19 @@ typedef union ii_iprbf_u {
1267 * Crosstalk Credit Timeout has occurred. The internal counter is not * 1228 * Crosstalk Credit Timeout has occurred. The internal counter is not *
1268 * readable from software, and stops counting at its maximum value, * 1229 * readable from software, and stops counting at its maximum value, *
1269 * so it cannot cause more than one interrupt. * 1230 * so it cannot cause more than one interrupt. *
1270 * * 1231 * *
1271 ************************************************************************/ 1232 ************************************************************************/
1272 1233
1273typedef union ii_ixcc_u { 1234typedef union ii_ixcc_u {
1274 uint64_t ii_ixcc_regval; 1235 uint64_t ii_ixcc_regval;
1275 struct { 1236 struct {
1276 uint64_t i_time_out : 26; 1237 uint64_t i_time_out:26;
1277 uint64_t i_rsvd : 38; 1238 uint64_t i_rsvd:38;
1278 } ii_ixcc_fld_s; 1239 } ii_ixcc_fld_s;
1279} ii_ixcc_u_t; 1240} ii_ixcc_u_t;
1280 1241
1281
1282/************************************************************************ 1242/************************************************************************
1283 * * 1243 * *
1284 * Description: This register qualifies all the PIO and DMA * 1244 * Description: This register qualifies all the PIO and DMA *
1285 * operations launched from widget 0 towards the SHub. In * 1245 * operations launched from widget 0 towards the SHub. In *
1286 * addition, it also qualifies accesses by the BTE streams. * 1246 * addition, it also qualifies accesses by the BTE streams. *
@@ -1292,27 +1252,25 @@ typedef union ii_ixcc_u {
1292 * the Wx_IAC field. The bits in this field are set by writing a 1 to * 1252 * the Wx_IAC field. The bits in this field are set by writing a 1 to *
1293 * them. Incoming replies from Crosstalk are not subject to this * 1253 * them. Incoming replies from Crosstalk are not subject to this *
1294 * access control mechanism. * 1254 * access control mechanism. *
1295 * * 1255 * *
1296 ************************************************************************/ 1256 ************************************************************************/
1297 1257
1298typedef union ii_imem_u { 1258typedef union ii_imem_u {
1299 uint64_t ii_imem_regval; 1259 uint64_t ii_imem_regval;
1300 struct { 1260 struct {
1301 uint64_t i_w0_esd : 1; 1261 uint64_t i_w0_esd:1;
1302 uint64_t i_rsvd_3 : 3; 1262 uint64_t i_rsvd_3:3;
1303 uint64_t i_b0_esd : 1; 1263 uint64_t i_b0_esd:1;
1304 uint64_t i_rsvd_2 : 3; 1264 uint64_t i_rsvd_2:3;
1305 uint64_t i_b1_esd : 1; 1265 uint64_t i_b1_esd:1;
1306 uint64_t i_rsvd_1 : 3; 1266 uint64_t i_rsvd_1:3;
1307 uint64_t i_clr_precise : 1; 1267 uint64_t i_clr_precise:1;
1308 uint64_t i_rsvd : 51; 1268 uint64_t i_rsvd:51;
1309 } ii_imem_fld_s; 1269 } ii_imem_fld_s;
1310} ii_imem_u_t; 1270} ii_imem_u_t;
1311 1271
1312
1313
1314/************************************************************************ 1272/************************************************************************
1315 * * 1273 * *
1316 * Description: This register specifies the timeout value to use for * 1274 * Description: This register specifies the timeout value to use for *
1317 * monitoring Crosstalk tail flits coming into the Shub in the * 1275 * monitoring Crosstalk tail flits coming into the Shub in the *
1318 * TAIL_TO field. An internal counter associated with this register * 1276 * TAIL_TO field. An internal counter associated with this register *
@@ -1332,90 +1290,87 @@ typedef union ii_imem_u {
1332 * the value in the RRSP_TO field, a Read Response Timeout has * 1290 * the value in the RRSP_TO field, a Read Response Timeout has *
1333 * occurred, and error handling occurs as described in the Error * 1291 * occurred, and error handling occurs as described in the Error *
1334 * Handling section of this document. * 1292 * Handling section of this document. *
1335 * * 1293 * *
1336 ************************************************************************/ 1294 ************************************************************************/
1337 1295
1338typedef union ii_ixtt_u { 1296typedef union ii_ixtt_u {
1339 uint64_t ii_ixtt_regval; 1297 uint64_t ii_ixtt_regval;
1340 struct { 1298 struct {
1341 uint64_t i_tail_to : 26; 1299 uint64_t i_tail_to:26;
1342 uint64_t i_rsvd_1 : 6; 1300 uint64_t i_rsvd_1:6;
1343 uint64_t i_rrsp_ps : 23; 1301 uint64_t i_rrsp_ps:23;
1344 uint64_t i_rrsp_to : 5; 1302 uint64_t i_rrsp_to:5;
1345 uint64_t i_rsvd : 4; 1303 uint64_t i_rsvd:4;
1346 } ii_ixtt_fld_s; 1304 } ii_ixtt_fld_s;
1347} ii_ixtt_u_t; 1305} ii_ixtt_u_t;
1348 1306
1349
1350/************************************************************************ 1307/************************************************************************
1351 * * 1308 * *
1352 * Writing a 1 to the fields of this register clears the appropriate * 1309 * Writing a 1 to the fields of this register clears the appropriate *
1353 * error bits in other areas of SHub. Note that when the * 1310 * error bits in other areas of SHub. Note that when the *
1354 * E_PRB_x bits are used to clear error bits in PRB registers, * 1311 * E_PRB_x bits are used to clear error bits in PRB registers, *
1355 * SPUR_RD and SPUR_WR may persist, because they require additional * 1312 * SPUR_RD and SPUR_WR may persist, because they require additional *
1356 * action to clear them. See the IPRBx and IXSS Register * 1313 * action to clear them. See the IPRBx and IXSS Register *
1357 * specifications. * 1314 * specifications. *
1358 * * 1315 * *
1359 ************************************************************************/ 1316 ************************************************************************/
1360 1317
1361typedef union ii_ieclr_u { 1318typedef union ii_ieclr_u {
1362 uint64_t ii_ieclr_regval; 1319 uint64_t ii_ieclr_regval;
1363 struct { 1320 struct {
1364 uint64_t i_e_prb_0 : 1; 1321 uint64_t i_e_prb_0:1;
1365 uint64_t i_rsvd : 7; 1322 uint64_t i_rsvd:7;
1366 uint64_t i_e_prb_8 : 1; 1323 uint64_t i_e_prb_8:1;
1367 uint64_t i_e_prb_9 : 1; 1324 uint64_t i_e_prb_9:1;
1368 uint64_t i_e_prb_a : 1; 1325 uint64_t i_e_prb_a:1;
1369 uint64_t i_e_prb_b : 1; 1326 uint64_t i_e_prb_b:1;
1370 uint64_t i_e_prb_c : 1; 1327 uint64_t i_e_prb_c:1;
1371 uint64_t i_e_prb_d : 1; 1328 uint64_t i_e_prb_d:1;
1372 uint64_t i_e_prb_e : 1; 1329 uint64_t i_e_prb_e:1;
1373 uint64_t i_e_prb_f : 1; 1330 uint64_t i_e_prb_f:1;
1374 uint64_t i_e_crazy : 1; 1331 uint64_t i_e_crazy:1;
1375 uint64_t i_e_bte_0 : 1; 1332 uint64_t i_e_bte_0:1;
1376 uint64_t i_e_bte_1 : 1; 1333 uint64_t i_e_bte_1:1;
1377 uint64_t i_reserved_1 : 10; 1334 uint64_t i_reserved_1:10;
1378 uint64_t i_spur_rd_hdr : 1; 1335 uint64_t i_spur_rd_hdr:1;
1379 uint64_t i_cam_intr_to : 1; 1336 uint64_t i_cam_intr_to:1;
1380 uint64_t i_cam_overflow : 1; 1337 uint64_t i_cam_overflow:1;
1381 uint64_t i_cam_read_miss : 1; 1338 uint64_t i_cam_read_miss:1;
1382 uint64_t i_ioq_rep_underflow : 1; 1339 uint64_t i_ioq_rep_underflow:1;
1383 uint64_t i_ioq_req_underflow : 1; 1340 uint64_t i_ioq_req_underflow:1;
1384 uint64_t i_ioq_rep_overflow : 1; 1341 uint64_t i_ioq_rep_overflow:1;
1385 uint64_t i_ioq_req_overflow : 1; 1342 uint64_t i_ioq_req_overflow:1;
1386 uint64_t i_iiq_rep_overflow : 1; 1343 uint64_t i_iiq_rep_overflow:1;
1387 uint64_t i_iiq_req_overflow : 1; 1344 uint64_t i_iiq_req_overflow:1;
1388 uint64_t i_ii_xn_rep_cred_overflow : 1; 1345 uint64_t i_ii_xn_rep_cred_overflow:1;
1389 uint64_t i_ii_xn_req_cred_overflow : 1; 1346 uint64_t i_ii_xn_req_cred_overflow:1;
1390 uint64_t i_ii_xn_invalid_cmd : 1; 1347 uint64_t i_ii_xn_invalid_cmd:1;
1391 uint64_t i_xn_ii_invalid_cmd : 1; 1348 uint64_t i_xn_ii_invalid_cmd:1;
1392 uint64_t i_reserved_2 : 21; 1349 uint64_t i_reserved_2:21;
1393 } ii_ieclr_fld_s; 1350 } ii_ieclr_fld_s;
1394} ii_ieclr_u_t; 1351} ii_ieclr_u_t;
1395 1352
1396
1397/************************************************************************ 1353/************************************************************************
1398 * * 1354 * *
1399 * This register controls both BTEs. SOFT_RESET is intended for * 1355 * This register controls both BTEs. SOFT_RESET is intended for *
1400 * recovery after an error. COUNT controls the total number of CRBs * 1356 * recovery after an error. COUNT controls the total number of CRBs *
1401 * that both BTEs (combined) can use, which affects total BTE * 1357 * that both BTEs (combined) can use, which affects total BTE *
1402 * bandwidth. * 1358 * bandwidth. *
1403 * * 1359 * *
1404 ************************************************************************/ 1360 ************************************************************************/
1405 1361
1406typedef union ii_ibcr_u { 1362typedef union ii_ibcr_u {
1407 uint64_t ii_ibcr_regval; 1363 uint64_t ii_ibcr_regval;
1408 struct { 1364 struct {
1409 uint64_t i_count : 4; 1365 uint64_t i_count:4;
1410 uint64_t i_rsvd_1 : 4; 1366 uint64_t i_rsvd_1:4;
1411 uint64_t i_soft_reset : 1; 1367 uint64_t i_soft_reset:1;
1412 uint64_t i_rsvd : 55; 1368 uint64_t i_rsvd:55;
1413 } ii_ibcr_fld_s; 1369 } ii_ibcr_fld_s;
1414} ii_ibcr_u_t; 1370} ii_ibcr_u_t;
1415 1371
1416
1417/************************************************************************ 1372/************************************************************************
1418 * * 1373 * *
1419 * This register contains the header of a spurious read response * 1374 * This register contains the header of a spurious read response *
1420 * received from Crosstalk. A spurious read response is defined as a * 1375 * received from Crosstalk. A spurious read response is defined as a *
1421 * read response received by II from a widget for which (1) the SIDN * 1376 * read response received by II from a widget for which (1) the SIDN *
@@ -1440,49 +1395,47 @@ typedef union ii_ibcr_u {
1440 * will be set. Any SPUR_RD bits in any other PRB registers indicate * 1395 * will be set. Any SPUR_RD bits in any other PRB registers indicate *
1441 * spurious messages from other widets which were detected after the * 1396 * spurious messages from other widets which were detected after the *
1442 * header was captured.. * 1397 * header was captured.. *
1443 * * 1398 * *
1444 ************************************************************************/ 1399 ************************************************************************/
1445 1400
1446typedef union ii_ixsm_u { 1401typedef union ii_ixsm_u {
1447 uint64_t ii_ixsm_regval; 1402 uint64_t ii_ixsm_regval;
1448 struct { 1403 struct {
1449 uint64_t i_byte_en : 32; 1404 uint64_t i_byte_en:32;
1450 uint64_t i_reserved : 1; 1405 uint64_t i_reserved:1;
1451 uint64_t i_tag : 3; 1406 uint64_t i_tag:3;
1452 uint64_t i_alt_pactyp : 4; 1407 uint64_t i_alt_pactyp:4;
1453 uint64_t i_bo : 1; 1408 uint64_t i_bo:1;
1454 uint64_t i_error : 1; 1409 uint64_t i_error:1;
1455 uint64_t i_vbpm : 1; 1410 uint64_t i_vbpm:1;
1456 uint64_t i_gbr : 1; 1411 uint64_t i_gbr:1;
1457 uint64_t i_ds : 2; 1412 uint64_t i_ds:2;
1458 uint64_t i_ct : 1; 1413 uint64_t i_ct:1;
1459 uint64_t i_tnum : 5; 1414 uint64_t i_tnum:5;
1460 uint64_t i_pactyp : 4; 1415 uint64_t i_pactyp:4;
1461 uint64_t i_sidn : 4; 1416 uint64_t i_sidn:4;
1462 uint64_t i_didn : 4; 1417 uint64_t i_didn:4;
1463 } ii_ixsm_fld_s; 1418 } ii_ixsm_fld_s;
1464} ii_ixsm_u_t; 1419} ii_ixsm_u_t;
1465 1420
1466
1467/************************************************************************ 1421/************************************************************************
1468 * * 1422 * *
1469 * This register contains the sideband bits of a spurious read * 1423 * This register contains the sideband bits of a spurious read *
1470 * response received from Crosstalk. * 1424 * response received from Crosstalk. *
1471 * * 1425 * *
1472 ************************************************************************/ 1426 ************************************************************************/
1473 1427
1474typedef union ii_ixss_u { 1428typedef union ii_ixss_u {
1475 uint64_t ii_ixss_regval; 1429 uint64_t ii_ixss_regval;
1476 struct { 1430 struct {
1477 uint64_t i_sideband : 8; 1431 uint64_t i_sideband:8;
1478 uint64_t i_rsvd : 55; 1432 uint64_t i_rsvd:55;
1479 uint64_t i_valid : 1; 1433 uint64_t i_valid:1;
1480 } ii_ixss_fld_s; 1434 } ii_ixss_fld_s;
1481} ii_ixss_u_t; 1435} ii_ixss_u_t;
1482 1436
1483
1484/************************************************************************ 1437/************************************************************************
1485 * * 1438 * *
1486 * This register enables software to access the II LLP's test port. * 1439 * This register enables software to access the II LLP's test port. *
1487 * Refer to the LLP 2.5 documentation for an explanation of the test * 1440 * Refer to the LLP 2.5 documentation for an explanation of the test *
1488 * port. Software can write to this register to program the values * 1441 * port. Software can write to this register to program the values *
@@ -1490,27 +1443,26 @@ typedef union ii_ixss_u {
1490 * TestMask and TestSeed). Similarly, software can read from this * 1443 * TestMask and TestSeed). Similarly, software can read from this *
1491 * register to obtain the values of the test port's status outputs * 1444 * register to obtain the values of the test port's status outputs *
1492 * (TestCBerr, TestValid and TestData). * 1445 * (TestCBerr, TestValid and TestData). *
1493 * * 1446 * *
1494 ************************************************************************/ 1447 ************************************************************************/
1495 1448
1496typedef union ii_ilct_u { 1449typedef union ii_ilct_u {
1497 uint64_t ii_ilct_regval; 1450 uint64_t ii_ilct_regval;
1498 struct { 1451 struct {
1499 uint64_t i_test_seed : 20; 1452 uint64_t i_test_seed:20;
1500 uint64_t i_test_mask : 8; 1453 uint64_t i_test_mask:8;
1501 uint64_t i_test_data : 20; 1454 uint64_t i_test_data:20;
1502 uint64_t i_test_valid : 1; 1455 uint64_t i_test_valid:1;
1503 uint64_t i_test_cberr : 1; 1456 uint64_t i_test_cberr:1;
1504 uint64_t i_test_flit : 3; 1457 uint64_t i_test_flit:3;
1505 uint64_t i_test_clear : 1; 1458 uint64_t i_test_clear:1;
1506 uint64_t i_test_err_capture : 1; 1459 uint64_t i_test_err_capture:1;
1507 uint64_t i_rsvd : 9; 1460 uint64_t i_rsvd:9;
1508 } ii_ilct_fld_s; 1461 } ii_ilct_fld_s;
1509} ii_ilct_u_t; 1462} ii_ilct_u_t;
1510 1463
1511
1512/************************************************************************ 1464/************************************************************************
1513 * * 1465 * *
1514 * If the II detects an illegal incoming Duplonet packet (request or * 1466 * If the II detects an illegal incoming Duplonet packet (request or *
1515 * reply) when VALID==0 in the IIEPH1 register, then it saves the * 1467 * reply) when VALID==0 in the IIEPH1 register, then it saves the *
1516 * contents of the packet's header flit in the IIEPH1 and IIEPH2 * 1468 * contents of the packet's header flit in the IIEPH1 and IIEPH2 *
@@ -1526,575 +1478,549 @@ typedef union ii_ilct_u {
1526 * packet when VALID==1 in the IIEPH1 register, then it merely sets * 1478 * packet when VALID==1 in the IIEPH1 register, then it merely sets *
1527 * the OVERRUN bit to indicate that a subsequent error has happened, * 1479 * the OVERRUN bit to indicate that a subsequent error has happened, *
1528 * and does nothing further. * 1480 * and does nothing further. *
1529 * * 1481 * *
1530 ************************************************************************/ 1482 ************************************************************************/
1531 1483
1532typedef union ii_iieph1_u { 1484typedef union ii_iieph1_u {
1533 uint64_t ii_iieph1_regval; 1485 uint64_t ii_iieph1_regval;
1534 struct { 1486 struct {
1535 uint64_t i_command : 7; 1487 uint64_t i_command:7;
1536 uint64_t i_rsvd_5 : 1; 1488 uint64_t i_rsvd_5:1;
1537 uint64_t i_suppl : 14; 1489 uint64_t i_suppl:14;
1538 uint64_t i_rsvd_4 : 1; 1490 uint64_t i_rsvd_4:1;
1539 uint64_t i_source : 14; 1491 uint64_t i_source:14;
1540 uint64_t i_rsvd_3 : 1; 1492 uint64_t i_rsvd_3:1;
1541 uint64_t i_err_type : 4; 1493 uint64_t i_err_type:4;
1542 uint64_t i_rsvd_2 : 4; 1494 uint64_t i_rsvd_2:4;
1543 uint64_t i_overrun : 1; 1495 uint64_t i_overrun:1;
1544 uint64_t i_rsvd_1 : 3; 1496 uint64_t i_rsvd_1:3;
1545 uint64_t i_valid : 1; 1497 uint64_t i_valid:1;
1546 uint64_t i_rsvd : 13; 1498 uint64_t i_rsvd:13;
1547 } ii_iieph1_fld_s; 1499 } ii_iieph1_fld_s;
1548} ii_iieph1_u_t; 1500} ii_iieph1_u_t;
1549 1501
1550
1551/************************************************************************ 1502/************************************************************************
1552 * * 1503 * *
1553 * This register holds the Address field from the header flit of an * 1504 * This register holds the Address field from the header flit of an *
1554 * incoming erroneous Duplonet packet, along with the tail bit which * 1505 * incoming erroneous Duplonet packet, along with the tail bit which *
1555 * accompanied this header flit. This register is essentially an * 1506 * accompanied this header flit. This register is essentially an *
1556 * extension of IIEPH1. Two registers were necessary because the 64 * 1507 * extension of IIEPH1. Two registers were necessary because the 64 *
1557 * bits available in only a single register were insufficient to * 1508 * bits available in only a single register were insufficient to *
1558 * capture the entire header flit of an erroneous packet. * 1509 * capture the entire header flit of an erroneous packet. *
1559 * * 1510 * *
1560 ************************************************************************/ 1511 ************************************************************************/
1561 1512
1562typedef union ii_iieph2_u { 1513typedef union ii_iieph2_u {
1563 uint64_t ii_iieph2_regval; 1514 uint64_t ii_iieph2_regval;
1564 struct { 1515 struct {
1565 uint64_t i_rsvd_0 : 3; 1516 uint64_t i_rsvd_0:3;
1566 uint64_t i_address : 47; 1517 uint64_t i_address:47;
1567 uint64_t i_rsvd_1 : 10; 1518 uint64_t i_rsvd_1:10;
1568 uint64_t i_tail : 1; 1519 uint64_t i_tail:1;
1569 uint64_t i_rsvd : 3; 1520 uint64_t i_rsvd:3;
1570 } ii_iieph2_fld_s; 1521 } ii_iieph2_fld_s;
1571} ii_iieph2_u_t; 1522} ii_iieph2_u_t;
1572 1523
1573
1574/******************************/ 1524/******************************/
1575 1525
1576
1577
1578/************************************************************************ 1526/************************************************************************
1579 * * 1527 * *
1580 * This register's value is a bit vector that guards access from SXBs * 1528 * This register's value is a bit vector that guards access from SXBs *
1581 * to local registers within the II as well as to external Crosstalk * 1529 * to local registers within the II as well as to external Crosstalk *
1582 * widgets * 1530 * widgets *
1583 * * 1531 * *
1584 ************************************************************************/ 1532 ************************************************************************/
1585 1533
1586typedef union ii_islapr_u { 1534typedef union ii_islapr_u {
1587 uint64_t ii_islapr_regval; 1535 uint64_t ii_islapr_regval;
1588 struct { 1536 struct {
1589 uint64_t i_region : 64; 1537 uint64_t i_region:64;
1590 } ii_islapr_fld_s; 1538 } ii_islapr_fld_s;
1591} ii_islapr_u_t; 1539} ii_islapr_u_t;
1592 1540
1593
1594/************************************************************************ 1541/************************************************************************
1595 * * 1542 * *
1596 * A write to this register of the 56-bit value "Pup+Bun" will cause * 1543 * A write to this register of the 56-bit value "Pup+Bun" will cause *
1597 * the bit in the ISLAPR register corresponding to the region of the * 1544 * the bit in the ISLAPR register corresponding to the region of the *
1598 * requestor to be set (access allowed). ( 1545 * requestor to be set (access allowed). (
1599 * * 1546 * *
1600 ************************************************************************/ 1547 ************************************************************************/
1601 1548
1602typedef union ii_islapo_u { 1549typedef union ii_islapo_u {
1603 uint64_t ii_islapo_regval; 1550 uint64_t ii_islapo_regval;
1604 struct { 1551 struct {
1605 uint64_t i_io_sbx_ovrride : 56; 1552 uint64_t i_io_sbx_ovrride:56;
1606 uint64_t i_rsvd : 8; 1553 uint64_t i_rsvd:8;
1607 } ii_islapo_fld_s; 1554 } ii_islapo_fld_s;
1608} ii_islapo_u_t; 1555} ii_islapo_u_t;
1609 1556
1610/************************************************************************ 1557/************************************************************************
1611 * * 1558 * *
1612 * Determines how long the wrapper will wait aftr an interrupt is * 1559 * Determines how long the wrapper will wait aftr an interrupt is *
1613 * initially issued from the II before it times out the outstanding * 1560 * initially issued from the II before it times out the outstanding *
1614 * interrupt and drops it from the interrupt queue. * 1561 * interrupt and drops it from the interrupt queue. *
1615 * * 1562 * *
1616 ************************************************************************/ 1563 ************************************************************************/
1617 1564
1618typedef union ii_iwi_u { 1565typedef union ii_iwi_u {
1619 uint64_t ii_iwi_regval; 1566 uint64_t ii_iwi_regval;
1620 struct { 1567 struct {
1621 uint64_t i_prescale : 24; 1568 uint64_t i_prescale:24;
1622 uint64_t i_rsvd : 8; 1569 uint64_t i_rsvd:8;
1623 uint64_t i_timeout : 8; 1570 uint64_t i_timeout:8;
1624 uint64_t i_rsvd1 : 8; 1571 uint64_t i_rsvd1:8;
1625 uint64_t i_intrpt_retry_period : 8; 1572 uint64_t i_intrpt_retry_period:8;
1626 uint64_t i_rsvd2 : 8; 1573 uint64_t i_rsvd2:8;
1627 } ii_iwi_fld_s; 1574 } ii_iwi_fld_s;
1628} ii_iwi_u_t; 1575} ii_iwi_u_t;
1629 1576
1630/************************************************************************ 1577/************************************************************************
1631 * * 1578 * *
1632 * Log errors which have occurred in the II wrapper. The errors are * 1579 * Log errors which have occurred in the II wrapper. The errors are *
1633 * cleared by writing to the IECLR register. * 1580 * cleared by writing to the IECLR register. *
1634 * * 1581 * *
1635 ************************************************************************/ 1582 ************************************************************************/
1636 1583
1637typedef union ii_iwel_u { 1584typedef union ii_iwel_u {
1638 uint64_t ii_iwel_regval; 1585 uint64_t ii_iwel_regval;
1639 struct { 1586 struct {
1640 uint64_t i_intr_timed_out : 1; 1587 uint64_t i_intr_timed_out:1;
1641 uint64_t i_rsvd : 7; 1588 uint64_t i_rsvd:7;
1642 uint64_t i_cam_overflow : 1; 1589 uint64_t i_cam_overflow:1;
1643 uint64_t i_cam_read_miss : 1; 1590 uint64_t i_cam_read_miss:1;
1644 uint64_t i_rsvd1 : 2; 1591 uint64_t i_rsvd1:2;
1645 uint64_t i_ioq_rep_underflow : 1; 1592 uint64_t i_ioq_rep_underflow:1;
1646 uint64_t i_ioq_req_underflow : 1; 1593 uint64_t i_ioq_req_underflow:1;
1647 uint64_t i_ioq_rep_overflow : 1; 1594 uint64_t i_ioq_rep_overflow:1;
1648 uint64_t i_ioq_req_overflow : 1; 1595 uint64_t i_ioq_req_overflow:1;
1649 uint64_t i_iiq_rep_overflow : 1; 1596 uint64_t i_iiq_rep_overflow:1;
1650 uint64_t i_iiq_req_overflow : 1; 1597 uint64_t i_iiq_req_overflow:1;
1651 uint64_t i_rsvd2 : 6; 1598 uint64_t i_rsvd2:6;
1652 uint64_t i_ii_xn_rep_cred_over_under: 1; 1599 uint64_t i_ii_xn_rep_cred_over_under:1;
1653 uint64_t i_ii_xn_req_cred_over_under: 1; 1600 uint64_t i_ii_xn_req_cred_over_under:1;
1654 uint64_t i_rsvd3 : 6; 1601 uint64_t i_rsvd3:6;
1655 uint64_t i_ii_xn_invalid_cmd : 1; 1602 uint64_t i_ii_xn_invalid_cmd:1;
1656 uint64_t i_xn_ii_invalid_cmd : 1; 1603 uint64_t i_xn_ii_invalid_cmd:1;
1657 uint64_t i_rsvd4 : 30; 1604 uint64_t i_rsvd4:30;
1658 } ii_iwel_fld_s; 1605 } ii_iwel_fld_s;
1659} ii_iwel_u_t; 1606} ii_iwel_u_t;
1660 1607
1661/************************************************************************ 1608/************************************************************************
1662 * * 1609 * *
1663 * Controls the II wrapper. * 1610 * Controls the II wrapper. *
1664 * * 1611 * *
1665 ************************************************************************/ 1612 ************************************************************************/
1666 1613
1667typedef union ii_iwc_u { 1614typedef union ii_iwc_u {
1668 uint64_t ii_iwc_regval; 1615 uint64_t ii_iwc_regval;
1669 struct { 1616 struct {
1670 uint64_t i_dma_byte_swap : 1; 1617 uint64_t i_dma_byte_swap:1;
1671 uint64_t i_rsvd : 3; 1618 uint64_t i_rsvd:3;
1672 uint64_t i_cam_read_lines_reset : 1; 1619 uint64_t i_cam_read_lines_reset:1;
1673 uint64_t i_rsvd1 : 3; 1620 uint64_t i_rsvd1:3;
1674 uint64_t i_ii_xn_cred_over_under_log: 1; 1621 uint64_t i_ii_xn_cred_over_under_log:1;
1675 uint64_t i_rsvd2 : 19; 1622 uint64_t i_rsvd2:19;
1676 uint64_t i_xn_rep_iq_depth : 5; 1623 uint64_t i_xn_rep_iq_depth:5;
1677 uint64_t i_rsvd3 : 3; 1624 uint64_t i_rsvd3:3;
1678 uint64_t i_xn_req_iq_depth : 5; 1625 uint64_t i_xn_req_iq_depth:5;
1679 uint64_t i_rsvd4 : 3; 1626 uint64_t i_rsvd4:3;
1680 uint64_t i_iiq_depth : 6; 1627 uint64_t i_iiq_depth:6;
1681 uint64_t i_rsvd5 : 12; 1628 uint64_t i_rsvd5:12;
1682 uint64_t i_force_rep_cred : 1; 1629 uint64_t i_force_rep_cred:1;
1683 uint64_t i_force_req_cred : 1; 1630 uint64_t i_force_req_cred:1;
1684 } ii_iwc_fld_s; 1631 } ii_iwc_fld_s;
1685} ii_iwc_u_t; 1632} ii_iwc_u_t;
1686 1633
1687/************************************************************************ 1634/************************************************************************
1688 * * 1635 * *
1689 * Status in the II wrapper. * 1636 * Status in the II wrapper. *
1690 * * 1637 * *
1691 ************************************************************************/ 1638 ************************************************************************/
1692 1639
1693typedef union ii_iws_u { 1640typedef union ii_iws_u {
1694 uint64_t ii_iws_regval; 1641 uint64_t ii_iws_regval;
1695 struct { 1642 struct {
1696 uint64_t i_xn_rep_iq_credits : 5; 1643 uint64_t i_xn_rep_iq_credits:5;
1697 uint64_t i_rsvd : 3; 1644 uint64_t i_rsvd:3;
1698 uint64_t i_xn_req_iq_credits : 5; 1645 uint64_t i_xn_req_iq_credits:5;
1699 uint64_t i_rsvd1 : 51; 1646 uint64_t i_rsvd1:51;
1700 } ii_iws_fld_s; 1647 } ii_iws_fld_s;
1701} ii_iws_u_t; 1648} ii_iws_u_t;
1702 1649
1703/************************************************************************ 1650/************************************************************************
1704 * * 1651 * *
1705 * Masks errors in the IWEL register. * 1652 * Masks errors in the IWEL register. *
1706 * * 1653 * *
1707 ************************************************************************/ 1654 ************************************************************************/
1708 1655
1709typedef union ii_iweim_u { 1656typedef union ii_iweim_u {
1710 uint64_t ii_iweim_regval; 1657 uint64_t ii_iweim_regval;
1711 struct { 1658 struct {
1712 uint64_t i_intr_timed_out : 1; 1659 uint64_t i_intr_timed_out:1;
1713 uint64_t i_rsvd : 7; 1660 uint64_t i_rsvd:7;
1714 uint64_t i_cam_overflow : 1; 1661 uint64_t i_cam_overflow:1;
1715 uint64_t i_cam_read_miss : 1; 1662 uint64_t i_cam_read_miss:1;
1716 uint64_t i_rsvd1 : 2; 1663 uint64_t i_rsvd1:2;
1717 uint64_t i_ioq_rep_underflow : 1; 1664 uint64_t i_ioq_rep_underflow:1;
1718 uint64_t i_ioq_req_underflow : 1; 1665 uint64_t i_ioq_req_underflow:1;
1719 uint64_t i_ioq_rep_overflow : 1; 1666 uint64_t i_ioq_rep_overflow:1;
1720 uint64_t i_ioq_req_overflow : 1; 1667 uint64_t i_ioq_req_overflow:1;
1721 uint64_t i_iiq_rep_overflow : 1; 1668 uint64_t i_iiq_rep_overflow:1;
1722 uint64_t i_iiq_req_overflow : 1; 1669 uint64_t i_iiq_req_overflow:1;
1723 uint64_t i_rsvd2 : 6; 1670 uint64_t i_rsvd2:6;
1724 uint64_t i_ii_xn_rep_cred_overflow : 1; 1671 uint64_t i_ii_xn_rep_cred_overflow:1;
1725 uint64_t i_ii_xn_req_cred_overflow : 1; 1672 uint64_t i_ii_xn_req_cred_overflow:1;
1726 uint64_t i_rsvd3 : 6; 1673 uint64_t i_rsvd3:6;
1727 uint64_t i_ii_xn_invalid_cmd : 1; 1674 uint64_t i_ii_xn_invalid_cmd:1;
1728 uint64_t i_xn_ii_invalid_cmd : 1; 1675 uint64_t i_xn_ii_invalid_cmd:1;
1729 uint64_t i_rsvd4 : 30; 1676 uint64_t i_rsvd4:30;
1730 } ii_iweim_fld_s; 1677 } ii_iweim_fld_s;
1731} ii_iweim_u_t; 1678} ii_iweim_u_t;
1732 1679
1733
1734/************************************************************************ 1680/************************************************************************
1735 * * 1681 * *
1736 * A write to this register causes a particular field in the * 1682 * A write to this register causes a particular field in the *
1737 * corresponding widget's PRB entry to be adjusted up or down by 1. * 1683 * corresponding widget's PRB entry to be adjusted up or down by 1. *
1738 * This counter should be used when recovering from error and reset * 1684 * This counter should be used when recovering from error and reset *
1739 * conditions. Note that software would be capable of causing * 1685 * conditions. Note that software would be capable of causing *
1740 * inadvertent overflow or underflow of these counters. * 1686 * inadvertent overflow or underflow of these counters. *
1741 * * 1687 * *
1742 ************************************************************************/ 1688 ************************************************************************/
1743 1689
1744typedef union ii_ipca_u { 1690typedef union ii_ipca_u {
1745 uint64_t ii_ipca_regval; 1691 uint64_t ii_ipca_regval;
1746 struct { 1692 struct {
1747 uint64_t i_wid : 4; 1693 uint64_t i_wid:4;
1748 uint64_t i_adjust : 1; 1694 uint64_t i_adjust:1;
1749 uint64_t i_rsvd_1 : 3; 1695 uint64_t i_rsvd_1:3;
1750 uint64_t i_field : 2; 1696 uint64_t i_field:2;
1751 uint64_t i_rsvd : 54; 1697 uint64_t i_rsvd:54;
1752 } ii_ipca_fld_s; 1698 } ii_ipca_fld_s;
1753} ii_ipca_u_t; 1699} ii_ipca_u_t;
1754 1700
1755
1756/************************************************************************ 1701/************************************************************************
1757 * * 1702 * *
1758 * There are 8 instances of this register. This register contains * 1703 * There are 8 instances of this register. This register contains *
1759 * the information that the II has to remember once it has launched a * 1704 * the information that the II has to remember once it has launched a *
1760 * PIO Read operation. The contents are used to form the correct * 1705 * PIO Read operation. The contents are used to form the correct *
1761 * Router Network packet and direct the Crosstalk reply to the * 1706 * Router Network packet and direct the Crosstalk reply to the *
1762 * appropriate processor. * 1707 * appropriate processor. *
1763 * * 1708 * *
1764 ************************************************************************/ 1709 ************************************************************************/
1765 1710
1766
1767typedef union ii_iprte0a_u { 1711typedef union ii_iprte0a_u {
1768 uint64_t ii_iprte0a_regval; 1712 uint64_t ii_iprte0a_regval;
1769 struct { 1713 struct {
1770 uint64_t i_rsvd_1 : 54; 1714 uint64_t i_rsvd_1:54;
1771 uint64_t i_widget : 4; 1715 uint64_t i_widget:4;
1772 uint64_t i_to_cnt : 5; 1716 uint64_t i_to_cnt:5;
1773 uint64_t i_vld : 1; 1717 uint64_t i_vld:1;
1774 } ii_iprte0a_fld_s; 1718 } ii_iprte0a_fld_s;
1775} ii_iprte0a_u_t; 1719} ii_iprte0a_u_t;
1776 1720
1777
1778/************************************************************************ 1721/************************************************************************
1779 * * 1722 * *
1780 * There are 8 instances of this register. This register contains * 1723 * There are 8 instances of this register. This register contains *
1781 * the information that the II has to remember once it has launched a * 1724 * the information that the II has to remember once it has launched a *
1782 * PIO Read operation. The contents are used to form the correct * 1725 * PIO Read operation. The contents are used to form the correct *
1783 * Router Network packet and direct the Crosstalk reply to the * 1726 * Router Network packet and direct the Crosstalk reply to the *
1784 * appropriate processor. * 1727 * appropriate processor. *
1785 * * 1728 * *
1786 ************************************************************************/ 1729 ************************************************************************/
1787 1730
1788typedef union ii_iprte1a_u { 1731typedef union ii_iprte1a_u {
1789 uint64_t ii_iprte1a_regval; 1732 uint64_t ii_iprte1a_regval;
1790 struct { 1733 struct {
1791 uint64_t i_rsvd_1 : 54; 1734 uint64_t i_rsvd_1:54;
1792 uint64_t i_widget : 4; 1735 uint64_t i_widget:4;
1793 uint64_t i_to_cnt : 5; 1736 uint64_t i_to_cnt:5;
1794 uint64_t i_vld : 1; 1737 uint64_t i_vld:1;
1795 } ii_iprte1a_fld_s; 1738 } ii_iprte1a_fld_s;
1796} ii_iprte1a_u_t; 1739} ii_iprte1a_u_t;
1797 1740
1798
1799/************************************************************************ 1741/************************************************************************
1800 * * 1742 * *
1801 * There are 8 instances of this register. This register contains * 1743 * There are 8 instances of this register. This register contains *
1802 * the information that the II has to remember once it has launched a * 1744 * the information that the II has to remember once it has launched a *
1803 * PIO Read operation. The contents are used to form the correct * 1745 * PIO Read operation. The contents are used to form the correct *
1804 * Router Network packet and direct the Crosstalk reply to the * 1746 * Router Network packet and direct the Crosstalk reply to the *
1805 * appropriate processor. * 1747 * appropriate processor. *
1806 * * 1748 * *
1807 ************************************************************************/ 1749 ************************************************************************/
1808 1750
1809typedef union ii_iprte2a_u { 1751typedef union ii_iprte2a_u {
1810 uint64_t ii_iprte2a_regval; 1752 uint64_t ii_iprte2a_regval;
1811 struct { 1753 struct {
1812 uint64_t i_rsvd_1 : 54; 1754 uint64_t i_rsvd_1:54;
1813 uint64_t i_widget : 4; 1755 uint64_t i_widget:4;
1814 uint64_t i_to_cnt : 5; 1756 uint64_t i_to_cnt:5;
1815 uint64_t i_vld : 1; 1757 uint64_t i_vld:1;
1816 } ii_iprte2a_fld_s; 1758 } ii_iprte2a_fld_s;
1817} ii_iprte2a_u_t; 1759} ii_iprte2a_u_t;
1818 1760
1819
1820/************************************************************************ 1761/************************************************************************
1821 * * 1762 * *
1822 * There are 8 instances of this register. This register contains * 1763 * There are 8 instances of this register. This register contains *
1823 * the information that the II has to remember once it has launched a * 1764 * the information that the II has to remember once it has launched a *
1824 * PIO Read operation. The contents are used to form the correct * 1765 * PIO Read operation. The contents are used to form the correct *
1825 * Router Network packet and direct the Crosstalk reply to the * 1766 * Router Network packet and direct the Crosstalk reply to the *
1826 * appropriate processor. * 1767 * appropriate processor. *
1827 * * 1768 * *
1828 ************************************************************************/ 1769 ************************************************************************/
1829 1770
1830typedef union ii_iprte3a_u { 1771typedef union ii_iprte3a_u {
1831 uint64_t ii_iprte3a_regval; 1772 uint64_t ii_iprte3a_regval;
1832 struct { 1773 struct {
1833 uint64_t i_rsvd_1 : 54; 1774 uint64_t i_rsvd_1:54;
1834 uint64_t i_widget : 4; 1775 uint64_t i_widget:4;
1835 uint64_t i_to_cnt : 5; 1776 uint64_t i_to_cnt:5;
1836 uint64_t i_vld : 1; 1777 uint64_t i_vld:1;
1837 } ii_iprte3a_fld_s; 1778 } ii_iprte3a_fld_s;
1838} ii_iprte3a_u_t; 1779} ii_iprte3a_u_t;
1839 1780
1840
1841/************************************************************************ 1781/************************************************************************
1842 * * 1782 * *
1843 * There are 8 instances of this register. This register contains * 1783 * There are 8 instances of this register. This register contains *
1844 * the information that the II has to remember once it has launched a * 1784 * the information that the II has to remember once it has launched a *
1845 * PIO Read operation. The contents are used to form the correct * 1785 * PIO Read operation. The contents are used to form the correct *
1846 * Router Network packet and direct the Crosstalk reply to the * 1786 * Router Network packet and direct the Crosstalk reply to the *
1847 * appropriate processor. * 1787 * appropriate processor. *
1848 * * 1788 * *
1849 ************************************************************************/ 1789 ************************************************************************/
1850 1790
1851typedef union ii_iprte4a_u { 1791typedef union ii_iprte4a_u {
1852 uint64_t ii_iprte4a_regval; 1792 uint64_t ii_iprte4a_regval;
1853 struct { 1793 struct {
1854 uint64_t i_rsvd_1 : 54; 1794 uint64_t i_rsvd_1:54;
1855 uint64_t i_widget : 4; 1795 uint64_t i_widget:4;
1856 uint64_t i_to_cnt : 5; 1796 uint64_t i_to_cnt:5;
1857 uint64_t i_vld : 1; 1797 uint64_t i_vld:1;
1858 } ii_iprte4a_fld_s; 1798 } ii_iprte4a_fld_s;
1859} ii_iprte4a_u_t; 1799} ii_iprte4a_u_t;
1860 1800
1861
1862/************************************************************************ 1801/************************************************************************
1863 * * 1802 * *
1864 * There are 8 instances of this register. This register contains * 1803 * There are 8 instances of this register. This register contains *
1865 * the information that the II has to remember once it has launched a * 1804 * the information that the II has to remember once it has launched a *
1866 * PIO Read operation. The contents are used to form the correct * 1805 * PIO Read operation. The contents are used to form the correct *
1867 * Router Network packet and direct the Crosstalk reply to the * 1806 * Router Network packet and direct the Crosstalk reply to the *
1868 * appropriate processor. * 1807 * appropriate processor. *
1869 * * 1808 * *
1870 ************************************************************************/ 1809 ************************************************************************/
1871 1810
1872typedef union ii_iprte5a_u { 1811typedef union ii_iprte5a_u {
1873 uint64_t ii_iprte5a_regval; 1812 uint64_t ii_iprte5a_regval;
1874 struct { 1813 struct {
1875 uint64_t i_rsvd_1 : 54; 1814 uint64_t i_rsvd_1:54;
1876 uint64_t i_widget : 4; 1815 uint64_t i_widget:4;
1877 uint64_t i_to_cnt : 5; 1816 uint64_t i_to_cnt:5;
1878 uint64_t i_vld : 1; 1817 uint64_t i_vld:1;
1879 } ii_iprte5a_fld_s; 1818 } ii_iprte5a_fld_s;
1880} ii_iprte5a_u_t; 1819} ii_iprte5a_u_t;
1881 1820
1882
1883/************************************************************************ 1821/************************************************************************
1884 * * 1822 * *
1885 * There are 8 instances of this register. This register contains * 1823 * There are 8 instances of this register. This register contains *
1886 * the information that the II has to remember once it has launched a * 1824 * the information that the II has to remember once it has launched a *
1887 * PIO Read operation. The contents are used to form the correct * 1825 * PIO Read operation. The contents are used to form the correct *
1888 * Router Network packet and direct the Crosstalk reply to the * 1826 * Router Network packet and direct the Crosstalk reply to the *
1889 * appropriate processor. * 1827 * appropriate processor. *
1890 * * 1828 * *
1891 ************************************************************************/ 1829 ************************************************************************/
1892 1830
1893typedef union ii_iprte6a_u { 1831typedef union ii_iprte6a_u {
1894 uint64_t ii_iprte6a_regval; 1832 uint64_t ii_iprte6a_regval;
1895 struct { 1833 struct {
1896 uint64_t i_rsvd_1 : 54; 1834 uint64_t i_rsvd_1:54;
1897 uint64_t i_widget : 4; 1835 uint64_t i_widget:4;
1898 uint64_t i_to_cnt : 5; 1836 uint64_t i_to_cnt:5;
1899 uint64_t i_vld : 1; 1837 uint64_t i_vld:1;
1900 } ii_iprte6a_fld_s; 1838 } ii_iprte6a_fld_s;
1901} ii_iprte6a_u_t; 1839} ii_iprte6a_u_t;
1902 1840
1903
1904/************************************************************************ 1841/************************************************************************
1905 * * 1842 * *
1906 * There are 8 instances of this register. This register contains * 1843 * There are 8 instances of this register. This register contains *
1907 * the information that the II has to remember once it has launched a * 1844 * the information that the II has to remember once it has launched a *
1908 * PIO Read operation. The contents are used to form the correct * 1845 * PIO Read operation. The contents are used to form the correct *
1909 * Router Network packet and direct the Crosstalk reply to the * 1846 * Router Network packet and direct the Crosstalk reply to the *
1910 * appropriate processor. * 1847 * appropriate processor. *
1911 * * 1848 * *
1912 ************************************************************************/ 1849 ************************************************************************/
1913 1850
1914typedef union ii_iprte7a_u { 1851typedef union ii_iprte7a_u {
1915 uint64_t ii_iprte7a_regval; 1852 uint64_t ii_iprte7a_regval;
1916 struct { 1853 struct {
1917 uint64_t i_rsvd_1 : 54; 1854 uint64_t i_rsvd_1:54;
1918 uint64_t i_widget : 4; 1855 uint64_t i_widget:4;
1919 uint64_t i_to_cnt : 5; 1856 uint64_t i_to_cnt:5;
1920 uint64_t i_vld : 1; 1857 uint64_t i_vld:1;
1921 } ii_iprtea7_fld_s; 1858 } ii_iprtea7_fld_s;
1922} ii_iprte7a_u_t; 1859} ii_iprte7a_u_t;
1923 1860
1924
1925
1926/************************************************************************ 1861/************************************************************************
1927 * * 1862 * *
1928 * There are 8 instances of this register. This register contains * 1863 * There are 8 instances of this register. This register contains *
1929 * the information that the II has to remember once it has launched a * 1864 * the information that the II has to remember once it has launched a *
1930 * PIO Read operation. The contents are used to form the correct * 1865 * PIO Read operation. The contents are used to form the correct *
1931 * Router Network packet and direct the Crosstalk reply to the * 1866 * Router Network packet and direct the Crosstalk reply to the *
1932 * appropriate processor. * 1867 * appropriate processor. *
1933 * * 1868 * *
1934 ************************************************************************/ 1869 ************************************************************************/
1935 1870
1936
1937typedef union ii_iprte0b_u { 1871typedef union ii_iprte0b_u {
1938 uint64_t ii_iprte0b_regval; 1872 uint64_t ii_iprte0b_regval;
1939 struct { 1873 struct {
1940 uint64_t i_rsvd_1 : 3; 1874 uint64_t i_rsvd_1:3;
1941 uint64_t i_address : 47; 1875 uint64_t i_address:47;
1942 uint64_t i_init : 3; 1876 uint64_t i_init:3;
1943 uint64_t i_source : 11; 1877 uint64_t i_source:11;
1944 } ii_iprte0b_fld_s; 1878 } ii_iprte0b_fld_s;
1945} ii_iprte0b_u_t; 1879} ii_iprte0b_u_t;
1946 1880
1947
1948/************************************************************************ 1881/************************************************************************
1949 * * 1882 * *
1950 * There are 8 instances of this register. This register contains * 1883 * There are 8 instances of this register. This register contains *
1951 * the information that the II has to remember once it has launched a * 1884 * the information that the II has to remember once it has launched a *
1952 * PIO Read operation. The contents are used to form the correct * 1885 * PIO Read operation. The contents are used to form the correct *
1953 * Router Network packet and direct the Crosstalk reply to the * 1886 * Router Network packet and direct the Crosstalk reply to the *
1954 * appropriate processor. * 1887 * appropriate processor. *
1955 * * 1888 * *
1956 ************************************************************************/ 1889 ************************************************************************/
1957 1890
1958typedef union ii_iprte1b_u { 1891typedef union ii_iprte1b_u {
1959 uint64_t ii_iprte1b_regval; 1892 uint64_t ii_iprte1b_regval;
1960 struct { 1893 struct {
1961 uint64_t i_rsvd_1 : 3; 1894 uint64_t i_rsvd_1:3;
1962 uint64_t i_address : 47; 1895 uint64_t i_address:47;
1963 uint64_t i_init : 3; 1896 uint64_t i_init:3;
1964 uint64_t i_source : 11; 1897 uint64_t i_source:11;
1965 } ii_iprte1b_fld_s; 1898 } ii_iprte1b_fld_s;
1966} ii_iprte1b_u_t; 1899} ii_iprte1b_u_t;
1967 1900
1968
1969/************************************************************************ 1901/************************************************************************
1970 * * 1902 * *
1971 * There are 8 instances of this register. This register contains * 1903 * There are 8 instances of this register. This register contains *
1972 * the information that the II has to remember once it has launched a * 1904 * the information that the II has to remember once it has launched a *
1973 * PIO Read operation. The contents are used to form the correct * 1905 * PIO Read operation. The contents are used to form the correct *
1974 * Router Network packet and direct the Crosstalk reply to the * 1906 * Router Network packet and direct the Crosstalk reply to the *
1975 * appropriate processor. * 1907 * appropriate processor. *
1976 * * 1908 * *
1977 ************************************************************************/ 1909 ************************************************************************/
1978 1910
1979typedef union ii_iprte2b_u { 1911typedef union ii_iprte2b_u {
1980 uint64_t ii_iprte2b_regval; 1912 uint64_t ii_iprte2b_regval;
1981 struct { 1913 struct {
1982 uint64_t i_rsvd_1 : 3; 1914 uint64_t i_rsvd_1:3;
1983 uint64_t i_address : 47; 1915 uint64_t i_address:47;
1984 uint64_t i_init : 3; 1916 uint64_t i_init:3;
1985 uint64_t i_source : 11; 1917 uint64_t i_source:11;
1986 } ii_iprte2b_fld_s; 1918 } ii_iprte2b_fld_s;
1987} ii_iprte2b_u_t; 1919} ii_iprte2b_u_t;
1988 1920
1989
1990/************************************************************************ 1921/************************************************************************
1991 * * 1922 * *
1992 * There are 8 instances of this register. This register contains * 1923 * There are 8 instances of this register. This register contains *
1993 * the information that the II has to remember once it has launched a * 1924 * the information that the II has to remember once it has launched a *
1994 * PIO Read operation. The contents are used to form the correct * 1925 * PIO Read operation. The contents are used to form the correct *
1995 * Router Network packet and direct the Crosstalk reply to the * 1926 * Router Network packet and direct the Crosstalk reply to the *
1996 * appropriate processor. * 1927 * appropriate processor. *
1997 * * 1928 * *
1998 ************************************************************************/ 1929 ************************************************************************/
1999 1930
2000typedef union ii_iprte3b_u { 1931typedef union ii_iprte3b_u {
2001 uint64_t ii_iprte3b_regval; 1932 uint64_t ii_iprte3b_regval;
2002 struct { 1933 struct {
2003 uint64_t i_rsvd_1 : 3; 1934 uint64_t i_rsvd_1:3;
2004 uint64_t i_address : 47; 1935 uint64_t i_address:47;
2005 uint64_t i_init : 3; 1936 uint64_t i_init:3;
2006 uint64_t i_source : 11; 1937 uint64_t i_source:11;
2007 } ii_iprte3b_fld_s; 1938 } ii_iprte3b_fld_s;
2008} ii_iprte3b_u_t; 1939} ii_iprte3b_u_t;
2009 1940
2010
2011/************************************************************************ 1941/************************************************************************
2012 * * 1942 * *
2013 * There are 8 instances of this register. This register contains * 1943 * There are 8 instances of this register. This register contains *
2014 * the information that the II has to remember once it has launched a * 1944 * the information that the II has to remember once it has launched a *
2015 * PIO Read operation. The contents are used to form the correct * 1945 * PIO Read operation. The contents are used to form the correct *
2016 * Router Network packet and direct the Crosstalk reply to the * 1946 * Router Network packet and direct the Crosstalk reply to the *
2017 * appropriate processor. * 1947 * appropriate processor. *
2018 * * 1948 * *
2019 ************************************************************************/ 1949 ************************************************************************/
2020 1950
2021typedef union ii_iprte4b_u { 1951typedef union ii_iprte4b_u {
2022 uint64_t ii_iprte4b_regval; 1952 uint64_t ii_iprte4b_regval;
2023 struct { 1953 struct {
2024 uint64_t i_rsvd_1 : 3; 1954 uint64_t i_rsvd_1:3;
2025 uint64_t i_address : 47; 1955 uint64_t i_address:47;
2026 uint64_t i_init : 3; 1956 uint64_t i_init:3;
2027 uint64_t i_source : 11; 1957 uint64_t i_source:11;
2028 } ii_iprte4b_fld_s; 1958 } ii_iprte4b_fld_s;
2029} ii_iprte4b_u_t; 1959} ii_iprte4b_u_t;
2030 1960
2031
2032/************************************************************************ 1961/************************************************************************
2033 * * 1962 * *
2034 * There are 8 instances of this register. This register contains * 1963 * There are 8 instances of this register. This register contains *
2035 * the information that the II has to remember once it has launched a * 1964 * the information that the II has to remember once it has launched a *
2036 * PIO Read operation. The contents are used to form the correct * 1965 * PIO Read operation. The contents are used to form the correct *
2037 * Router Network packet and direct the Crosstalk reply to the * 1966 * Router Network packet and direct the Crosstalk reply to the *
2038 * appropriate processor. * 1967 * appropriate processor. *
2039 * * 1968 * *
2040 ************************************************************************/ 1969 ************************************************************************/
2041 1970
2042typedef union ii_iprte5b_u { 1971typedef union ii_iprte5b_u {
2043 uint64_t ii_iprte5b_regval; 1972 uint64_t ii_iprte5b_regval;
2044 struct { 1973 struct {
2045 uint64_t i_rsvd_1 : 3; 1974 uint64_t i_rsvd_1:3;
2046 uint64_t i_address : 47; 1975 uint64_t i_address:47;
2047 uint64_t i_init : 3; 1976 uint64_t i_init:3;
2048 uint64_t i_source : 11; 1977 uint64_t i_source:11;
2049 } ii_iprte5b_fld_s; 1978 } ii_iprte5b_fld_s;
2050} ii_iprte5b_u_t; 1979} ii_iprte5b_u_t;
2051 1980
2052
2053/************************************************************************ 1981/************************************************************************
2054 * * 1982 * *
2055 * There are 8 instances of this register. This register contains * 1983 * There are 8 instances of this register. This register contains *
2056 * the information that the II has to remember once it has launched a * 1984 * the information that the II has to remember once it has launched a *
2057 * PIO Read operation. The contents are used to form the correct * 1985 * PIO Read operation. The contents are used to form the correct *
2058 * Router Network packet and direct the Crosstalk reply to the * 1986 * Router Network packet and direct the Crosstalk reply to the *
2059 * appropriate processor. * 1987 * appropriate processor. *
2060 * * 1988 * *
2061 ************************************************************************/ 1989 ************************************************************************/
2062 1990
2063typedef union ii_iprte6b_u { 1991typedef union ii_iprte6b_u {
2064 uint64_t ii_iprte6b_regval; 1992 uint64_t ii_iprte6b_regval;
2065 struct { 1993 struct {
2066 uint64_t i_rsvd_1 : 3; 1994 uint64_t i_rsvd_1:3;
2067 uint64_t i_address : 47; 1995 uint64_t i_address:47;
2068 uint64_t i_init : 3; 1996 uint64_t i_init:3;
2069 uint64_t i_source : 11; 1997 uint64_t i_source:11;
2070 1998
2071 } ii_iprte6b_fld_s; 1999 } ii_iprte6b_fld_s;
2072} ii_iprte6b_u_t; 2000} ii_iprte6b_u_t;
2073 2001
2074
2075/************************************************************************ 2002/************************************************************************
2076 * * 2003 * *
2077 * There are 8 instances of this register. This register contains * 2004 * There are 8 instances of this register. This register contains *
2078 * the information that the II has to remember once it has launched a * 2005 * the information that the II has to remember once it has launched a *
2079 * PIO Read operation. The contents are used to form the correct * 2006 * PIO Read operation. The contents are used to form the correct *
2080 * Router Network packet and direct the Crosstalk reply to the * 2007 * Router Network packet and direct the Crosstalk reply to the *
2081 * appropriate processor. * 2008 * appropriate processor. *
2082 * * 2009 * *
2083 ************************************************************************/ 2010 ************************************************************************/
2084 2011
2085typedef union ii_iprte7b_u { 2012typedef union ii_iprte7b_u {
2086 uint64_t ii_iprte7b_regval; 2013 uint64_t ii_iprte7b_regval;
2087 struct { 2014 struct {
2088 uint64_t i_rsvd_1 : 3; 2015 uint64_t i_rsvd_1:3;
2089 uint64_t i_address : 47; 2016 uint64_t i_address:47;
2090 uint64_t i_init : 3; 2017 uint64_t i_init:3;
2091 uint64_t i_source : 11; 2018 uint64_t i_source:11;
2092 } ii_iprte7b_fld_s; 2019 } ii_iprte7b_fld_s;
2093} ii_iprte7b_u_t; 2020} ii_iprte7b_u_t;
2094 2021
2095
2096/************************************************************************ 2022/************************************************************************
2097 * * 2023 * *
2098 * Description: SHub II contains a feature which did not exist in * 2024 * Description: SHub II contains a feature which did not exist in *
2099 * the Hub which automatically cleans up after a Read Response * 2025 * the Hub which automatically cleans up after a Read Response *
2100 * timeout, including deallocation of the IPRTE and recovery of IBuf * 2026 * timeout, including deallocation of the IPRTE and recovery of IBuf *
@@ -2108,23 +2034,22 @@ typedef union ii_iprte7b_u {
2108 * Note that this register does not affect the contents of the IPRTE * 2034 * Note that this register does not affect the contents of the IPRTE *
2109 * registers. The Valid bits in those registers have to be * 2035 * registers. The Valid bits in those registers have to be *
2110 * specifically turned off by software. * 2036 * specifically turned off by software. *
2111 * * 2037 * *
2112 ************************************************************************/ 2038 ************************************************************************/
2113 2039
2114typedef union ii_ipdr_u { 2040typedef union ii_ipdr_u {
2115 uint64_t ii_ipdr_regval; 2041 uint64_t ii_ipdr_regval;
2116 struct { 2042 struct {
2117 uint64_t i_te : 3; 2043 uint64_t i_te:3;
2118 uint64_t i_rsvd_1 : 1; 2044 uint64_t i_rsvd_1:1;
2119 uint64_t i_pnd : 1; 2045 uint64_t i_pnd:1;
2120 uint64_t i_init_rpcnt : 1; 2046 uint64_t i_init_rpcnt:1;
2121 uint64_t i_rsvd : 58; 2047 uint64_t i_rsvd:58;
2122 } ii_ipdr_fld_s; 2048 } ii_ipdr_fld_s;
2123} ii_ipdr_u_t; 2049} ii_ipdr_u_t;
2124 2050
2125
2126/************************************************************************ 2051/************************************************************************
2127 * * 2052 * *
2128 * A write to this register causes a CRB entry to be returned to the * 2053 * A write to this register causes a CRB entry to be returned to the *
2129 * queue of free CRBs. The entry should have previously been cleared * 2054 * queue of free CRBs. The entry should have previously been cleared *
2130 * (mark bit) via backdoor access to the pertinent CRB entry. This * 2055 * (mark bit) via backdoor access to the pertinent CRB entry. This *
@@ -2137,21 +2062,20 @@ typedef union ii_ipdr_u {
2137 * software clears the mark bit, and finally 4) software writes to * 2062 * software clears the mark bit, and finally 4) software writes to *
2138 * the ICDR register to return the CRB entry to the list of free CRB * 2063 * the ICDR register to return the CRB entry to the list of free CRB *
2139 * entries. * 2064 * entries. *
2140 * * 2065 * *
2141 ************************************************************************/ 2066 ************************************************************************/
2142 2067
2143typedef union ii_icdr_u { 2068typedef union ii_icdr_u {
2144 uint64_t ii_icdr_regval; 2069 uint64_t ii_icdr_regval;
2145 struct { 2070 struct {
2146 uint64_t i_crb_num : 4; 2071 uint64_t i_crb_num:4;
2147 uint64_t i_pnd : 1; 2072 uint64_t i_pnd:1;
2148 uint64_t i_rsvd : 59; 2073 uint64_t i_rsvd:59;
2149 } ii_icdr_fld_s; 2074 } ii_icdr_fld_s;
2150} ii_icdr_u_t; 2075} ii_icdr_u_t;
2151 2076
2152
2153/************************************************************************ 2077/************************************************************************
2154 * * 2078 * *
2155 * This register provides debug access to two FIFOs inside of II. * 2079 * This register provides debug access to two FIFOs inside of II. *
2156 * Both IOQ_MAX* fields of this register contain the instantaneous * 2080 * Both IOQ_MAX* fields of this register contain the instantaneous *
2157 * depth (in units of the number of available entries) of the * 2081 * depth (in units of the number of available entries) of the *
@@ -2164,130 +2088,124 @@ typedef union ii_icdr_u {
2164 * this register is written. If there are any active entries in any * 2088 * this register is written. If there are any active entries in any *
2165 * of these FIFOs when this register is written, the results are * 2089 * of these FIFOs when this register is written, the results are *
2166 * undefined. * 2090 * undefined. *
2167 * * 2091 * *
2168 ************************************************************************/ 2092 ************************************************************************/
2169 2093
2170typedef union ii_ifdr_u { 2094typedef union ii_ifdr_u {
2171 uint64_t ii_ifdr_regval; 2095 uint64_t ii_ifdr_regval;
2172 struct { 2096 struct {
2173 uint64_t i_ioq_max_rq : 7; 2097 uint64_t i_ioq_max_rq:7;
2174 uint64_t i_set_ioq_rq : 1; 2098 uint64_t i_set_ioq_rq:1;
2175 uint64_t i_ioq_max_rp : 7; 2099 uint64_t i_ioq_max_rp:7;
2176 uint64_t i_set_ioq_rp : 1; 2100 uint64_t i_set_ioq_rp:1;
2177 uint64_t i_rsvd : 48; 2101 uint64_t i_rsvd:48;
2178 } ii_ifdr_fld_s; 2102 } ii_ifdr_fld_s;
2179} ii_ifdr_u_t; 2103} ii_ifdr_u_t;
2180 2104
2181
2182/************************************************************************ 2105/************************************************************************
2183 * * 2106 * *
2184 * This register allows the II to become sluggish in removing * 2107 * This register allows the II to become sluggish in removing *
2185 * messages from its inbound queue (IIQ). This will cause messages to * 2108 * messages from its inbound queue (IIQ). This will cause messages to *
2186 * back up in either virtual channel. Disabling the "molasses" mode * 2109 * back up in either virtual channel. Disabling the "molasses" mode *
2187 * subsequently allows the II to be tested under stress. In the * 2110 * subsequently allows the II to be tested under stress. In the *
2188 * sluggish ("Molasses") mode, the localized effects of congestion * 2111 * sluggish ("Molasses") mode, the localized effects of congestion *
2189 * can be observed. * 2112 * can be observed. *
2190 * * 2113 * *
2191 ************************************************************************/ 2114 ************************************************************************/
2192 2115
2193typedef union ii_iiap_u { 2116typedef union ii_iiap_u {
2194 uint64_t ii_iiap_regval; 2117 uint64_t ii_iiap_regval;
2195 struct { 2118 struct {
2196 uint64_t i_rq_mls : 6; 2119 uint64_t i_rq_mls:6;
2197 uint64_t i_rsvd_1 : 2; 2120 uint64_t i_rsvd_1:2;
2198 uint64_t i_rp_mls : 6; 2121 uint64_t i_rp_mls:6;
2199 uint64_t i_rsvd : 50; 2122 uint64_t i_rsvd:50;
2200 } ii_iiap_fld_s; 2123 } ii_iiap_fld_s;
2201} ii_iiap_u_t; 2124} ii_iiap_u_t;
2202 2125
2203
2204/************************************************************************ 2126/************************************************************************
2205 * * 2127 * *
2206 * This register allows several parameters of CRB operation to be * 2128 * This register allows several parameters of CRB operation to be *
2207 * set. Note that writing to this register can have catastrophic side * 2129 * set. Note that writing to this register can have catastrophic side *
2208 * effects, if the CRB is not quiescent, i.e. if the CRB is * 2130 * effects, if the CRB is not quiescent, i.e. if the CRB is *
2209 * processing protocol messages when the write occurs. * 2131 * processing protocol messages when the write occurs. *
2210 * * 2132 * *
2211 ************************************************************************/ 2133 ************************************************************************/
2212 2134
2213typedef union ii_icmr_u { 2135typedef union ii_icmr_u {
2214 uint64_t ii_icmr_regval; 2136 uint64_t ii_icmr_regval;
2215 struct { 2137 struct {
2216 uint64_t i_sp_msg : 1; 2138 uint64_t i_sp_msg:1;
2217 uint64_t i_rd_hdr : 1; 2139 uint64_t i_rd_hdr:1;
2218 uint64_t i_rsvd_4 : 2; 2140 uint64_t i_rsvd_4:2;
2219 uint64_t i_c_cnt : 4; 2141 uint64_t i_c_cnt:4;
2220 uint64_t i_rsvd_3 : 4; 2142 uint64_t i_rsvd_3:4;
2221 uint64_t i_clr_rqpd : 1; 2143 uint64_t i_clr_rqpd:1;
2222 uint64_t i_clr_rppd : 1; 2144 uint64_t i_clr_rppd:1;
2223 uint64_t i_rsvd_2 : 2; 2145 uint64_t i_rsvd_2:2;
2224 uint64_t i_fc_cnt : 4; 2146 uint64_t i_fc_cnt:4;
2225 uint64_t i_crb_vld : 15; 2147 uint64_t i_crb_vld:15;
2226 uint64_t i_crb_mark : 15; 2148 uint64_t i_crb_mark:15;
2227 uint64_t i_rsvd_1 : 2; 2149 uint64_t i_rsvd_1:2;
2228 uint64_t i_precise : 1; 2150 uint64_t i_precise:1;
2229 uint64_t i_rsvd : 11; 2151 uint64_t i_rsvd:11;
2230 } ii_icmr_fld_s; 2152 } ii_icmr_fld_s;
2231} ii_icmr_u_t; 2153} ii_icmr_u_t;
2232 2154
2233
2234/************************************************************************ 2155/************************************************************************
2235 * * 2156 * *
2236 * This register allows control of the table portion of the CRB * 2157 * This register allows control of the table portion of the CRB *
2237 * logic via software. Control operations from this register have * 2158 * logic via software. Control operations from this register have *
2238 * priority over all incoming Crosstalk or BTE requests. * 2159 * priority over all incoming Crosstalk or BTE requests. *
2239 * * 2160 * *
2240 ************************************************************************/ 2161 ************************************************************************/
2241 2162
2242typedef union ii_iccr_u { 2163typedef union ii_iccr_u {
2243 uint64_t ii_iccr_regval; 2164 uint64_t ii_iccr_regval;
2244 struct { 2165 struct {
2245 uint64_t i_crb_num : 4; 2166 uint64_t i_crb_num:4;
2246 uint64_t i_rsvd_1 : 4; 2167 uint64_t i_rsvd_1:4;
2247 uint64_t i_cmd : 8; 2168 uint64_t i_cmd:8;
2248 uint64_t i_pending : 1; 2169 uint64_t i_pending:1;
2249 uint64_t i_rsvd : 47; 2170 uint64_t i_rsvd:47;
2250 } ii_iccr_fld_s; 2171 } ii_iccr_fld_s;
2251} ii_iccr_u_t; 2172} ii_iccr_u_t;
2252 2173
2253
2254/************************************************************************ 2174/************************************************************************
2255 * * 2175 * *
2256 * This register allows the maximum timeout value to be programmed. * 2176 * This register allows the maximum timeout value to be programmed. *
2257 * * 2177 * *
2258 ************************************************************************/ 2178 ************************************************************************/
2259 2179
2260typedef union ii_icto_u { 2180typedef union ii_icto_u {
2261 uint64_t ii_icto_regval; 2181 uint64_t ii_icto_regval;
2262 struct { 2182 struct {
2263 uint64_t i_timeout : 8; 2183 uint64_t i_timeout:8;
2264 uint64_t i_rsvd : 56; 2184 uint64_t i_rsvd:56;
2265 } ii_icto_fld_s; 2185 } ii_icto_fld_s;
2266} ii_icto_u_t; 2186} ii_icto_u_t;
2267 2187
2268
2269/************************************************************************ 2188/************************************************************************
2270 * * 2189 * *
2271 * This register allows the timeout prescalar to be programmed. An * 2190 * This register allows the timeout prescalar to be programmed. An *
2272 * internal counter is associated with this register. When the * 2191 * internal counter is associated with this register. When the *
2273 * internal counter reaches the value of the PRESCALE field, the * 2192 * internal counter reaches the value of the PRESCALE field, the *
2274 * timer registers in all valid CRBs are incremented (CRBx_D[TIMEOUT] * 2193 * timer registers in all valid CRBs are incremented (CRBx_D[TIMEOUT] *
2275 * field). The internal counter resets to zero, and then continues * 2194 * field). The internal counter resets to zero, and then continues *
2276 * counting. * 2195 * counting. *
2277 * * 2196 * *
2278 ************************************************************************/ 2197 ************************************************************************/
2279 2198
2280typedef union ii_ictp_u { 2199typedef union ii_ictp_u {
2281 uint64_t ii_ictp_regval; 2200 uint64_t ii_ictp_regval;
2282 struct { 2201 struct {
2283 uint64_t i_prescale : 24; 2202 uint64_t i_prescale:24;
2284 uint64_t i_rsvd : 40; 2203 uint64_t i_rsvd:40;
2285 } ii_ictp_fld_s; 2204 } ii_ictp_fld_s;
2286} ii_ictp_u_t; 2205} ii_ictp_u_t;
2287 2206
2288
2289/************************************************************************ 2207/************************************************************************
2290 * * 2208 * *
2291 * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are * 2209 * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
2292 * used for Crosstalk operations (both cacheline and partial * 2210 * used for Crosstalk operations (both cacheline and partial *
2293 * operations) or BTE/IO. Because the CRB entries are very wide, five * 2211 * operations) or BTE/IO. Because the CRB entries are very wide, five *
@@ -2306,243 +2224,234 @@ typedef union ii_ictp_u {
2306 * recovering any potential error state from before the reset). * 2224 * recovering any potential error state from before the reset). *
2307 * The following four tables summarize the format for the four * 2225 * The following four tables summarize the format for the four *
2308 * registers that are used for each ICRB# Entry. * 2226 * registers that are used for each ICRB# Entry. *
2309 * * 2227 * *
2310 ************************************************************************/ 2228 ************************************************************************/
2311 2229
2312typedef union ii_icrb0_a_u { 2230typedef union ii_icrb0_a_u {
2313 uint64_t ii_icrb0_a_regval; 2231 uint64_t ii_icrb0_a_regval;
2314 struct { 2232 struct {
2315 uint64_t ia_iow : 1; 2233 uint64_t ia_iow:1;
2316 uint64_t ia_vld : 1; 2234 uint64_t ia_vld:1;
2317 uint64_t ia_addr : 47; 2235 uint64_t ia_addr:47;
2318 uint64_t ia_tnum : 5; 2236 uint64_t ia_tnum:5;
2319 uint64_t ia_sidn : 4; 2237 uint64_t ia_sidn:4;
2320 uint64_t ia_rsvd : 6; 2238 uint64_t ia_rsvd:6;
2321 } ii_icrb0_a_fld_s; 2239 } ii_icrb0_a_fld_s;
2322} ii_icrb0_a_u_t; 2240} ii_icrb0_a_u_t;
2323 2241
2324
2325/************************************************************************ 2242/************************************************************************
2326 * * 2243 * *
2327 * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are * 2244 * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
2328 * used for Crosstalk operations (both cacheline and partial * 2245 * used for Crosstalk operations (both cacheline and partial *
2329 * operations) or BTE/IO. Because the CRB entries are very wide, five * 2246 * operations) or BTE/IO. Because the CRB entries are very wide, five *
2330 * registers (_A to _E) are required to read and write each entry. * 2247 * registers (_A to _E) are required to read and write each entry. *
2331 * * 2248 * *
2332 ************************************************************************/ 2249 ************************************************************************/
2333 2250
2334typedef union ii_icrb0_b_u { 2251typedef union ii_icrb0_b_u {
2335 uint64_t ii_icrb0_b_regval; 2252 uint64_t ii_icrb0_b_regval;
2336 struct { 2253 struct {
2337 uint64_t ib_xt_err : 1; 2254 uint64_t ib_xt_err:1;
2338 uint64_t ib_mark : 1; 2255 uint64_t ib_mark:1;
2339 uint64_t ib_ln_uce : 1; 2256 uint64_t ib_ln_uce:1;
2340 uint64_t ib_errcode : 3; 2257 uint64_t ib_errcode:3;
2341 uint64_t ib_error : 1; 2258 uint64_t ib_error:1;
2342 uint64_t ib_stall__bte_1 : 1; 2259 uint64_t ib_stall__bte_1:1;
2343 uint64_t ib_stall__bte_0 : 1; 2260 uint64_t ib_stall__bte_0:1;
2344 uint64_t ib_stall__intr : 1; 2261 uint64_t ib_stall__intr:1;
2345 uint64_t ib_stall_ib : 1; 2262 uint64_t ib_stall_ib:1;
2346 uint64_t ib_intvn : 1; 2263 uint64_t ib_intvn:1;
2347 uint64_t ib_wb : 1; 2264 uint64_t ib_wb:1;
2348 uint64_t ib_hold : 1; 2265 uint64_t ib_hold:1;
2349 uint64_t ib_ack : 1; 2266 uint64_t ib_ack:1;
2350 uint64_t ib_resp : 1; 2267 uint64_t ib_resp:1;
2351 uint64_t ib_ack_cnt : 11; 2268 uint64_t ib_ack_cnt:11;
2352 uint64_t ib_rsvd : 7; 2269 uint64_t ib_rsvd:7;
2353 uint64_t ib_exc : 5; 2270 uint64_t ib_exc:5;
2354 uint64_t ib_init : 3; 2271 uint64_t ib_init:3;
2355 uint64_t ib_imsg : 8; 2272 uint64_t ib_imsg:8;
2356 uint64_t ib_imsgtype : 2; 2273 uint64_t ib_imsgtype:2;
2357 uint64_t ib_use_old : 1; 2274 uint64_t ib_use_old:1;
2358 uint64_t ib_rsvd_1 : 11; 2275 uint64_t ib_rsvd_1:11;
2359 } ii_icrb0_b_fld_s; 2276 } ii_icrb0_b_fld_s;
2360} ii_icrb0_b_u_t; 2277} ii_icrb0_b_u_t;
2361 2278
2362
2363/************************************************************************ 2279/************************************************************************
2364 * * 2280 * *
2365 * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are * 2281 * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
2366 * used for Crosstalk operations (both cacheline and partial * 2282 * used for Crosstalk operations (both cacheline and partial *
2367 * operations) or BTE/IO. Because the CRB entries are very wide, five * 2283 * operations) or BTE/IO. Because the CRB entries are very wide, five *
2368 * registers (_A to _E) are required to read and write each entry. * 2284 * registers (_A to _E) are required to read and write each entry. *
2369 * * 2285 * *
2370 ************************************************************************/ 2286 ************************************************************************/
2371 2287
2372typedef union ii_icrb0_c_u { 2288typedef union ii_icrb0_c_u {
2373 uint64_t ii_icrb0_c_regval; 2289 uint64_t ii_icrb0_c_regval;
2374 struct { 2290 struct {
2375 uint64_t ic_source : 15; 2291 uint64_t ic_source:15;
2376 uint64_t ic_size : 2; 2292 uint64_t ic_size:2;
2377 uint64_t ic_ct : 1; 2293 uint64_t ic_ct:1;
2378 uint64_t ic_bte_num : 1; 2294 uint64_t ic_bte_num:1;
2379 uint64_t ic_gbr : 1; 2295 uint64_t ic_gbr:1;
2380 uint64_t ic_resprqd : 1; 2296 uint64_t ic_resprqd:1;
2381 uint64_t ic_bo : 1; 2297 uint64_t ic_bo:1;
2382 uint64_t ic_suppl : 15; 2298 uint64_t ic_suppl:15;
2383 uint64_t ic_rsvd : 27; 2299 uint64_t ic_rsvd:27;
2384 } ii_icrb0_c_fld_s; 2300 } ii_icrb0_c_fld_s;
2385} ii_icrb0_c_u_t; 2301} ii_icrb0_c_u_t;
2386 2302
2387
2388/************************************************************************ 2303/************************************************************************
2389 * * 2304 * *
2390 * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are * 2305 * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
2391 * used for Crosstalk operations (both cacheline and partial * 2306 * used for Crosstalk operations (both cacheline and partial *
2392 * operations) or BTE/IO. Because the CRB entries are very wide, five * 2307 * operations) or BTE/IO. Because the CRB entries are very wide, five *
2393 * registers (_A to _E) are required to read and write each entry. * 2308 * registers (_A to _E) are required to read and write each entry. *
2394 * * 2309 * *
2395 ************************************************************************/ 2310 ************************************************************************/
2396 2311
2397typedef union ii_icrb0_d_u { 2312typedef union ii_icrb0_d_u {
2398 uint64_t ii_icrb0_d_regval; 2313 uint64_t ii_icrb0_d_regval;
2399 struct { 2314 struct {
2400 uint64_t id_pa_be : 43; 2315 uint64_t id_pa_be:43;
2401 uint64_t id_bte_op : 1; 2316 uint64_t id_bte_op:1;
2402 uint64_t id_pr_psc : 4; 2317 uint64_t id_pr_psc:4;
2403 uint64_t id_pr_cnt : 4; 2318 uint64_t id_pr_cnt:4;
2404 uint64_t id_sleep : 1; 2319 uint64_t id_sleep:1;
2405 uint64_t id_rsvd : 11; 2320 uint64_t id_rsvd:11;
2406 } ii_icrb0_d_fld_s; 2321 } ii_icrb0_d_fld_s;
2407} ii_icrb0_d_u_t; 2322} ii_icrb0_d_u_t;
2408 2323
2409
2410/************************************************************************ 2324/************************************************************************
2411 * * 2325 * *
2412 * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are * 2326 * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
2413 * used for Crosstalk operations (both cacheline and partial * 2327 * used for Crosstalk operations (both cacheline and partial *
2414 * operations) or BTE/IO. Because the CRB entries are very wide, five * 2328 * operations) or BTE/IO. Because the CRB entries are very wide, five *
2415 * registers (_A to _E) are required to read and write each entry. * 2329 * registers (_A to _E) are required to read and write each entry. *
2416 * * 2330 * *
2417 ************************************************************************/ 2331 ************************************************************************/
2418 2332
2419typedef union ii_icrb0_e_u { 2333typedef union ii_icrb0_e_u {
2420 uint64_t ii_icrb0_e_regval; 2334 uint64_t ii_icrb0_e_regval;
2421 struct { 2335 struct {
2422 uint64_t ie_timeout : 8; 2336 uint64_t ie_timeout:8;
2423 uint64_t ie_context : 15; 2337 uint64_t ie_context:15;
2424 uint64_t ie_rsvd : 1; 2338 uint64_t ie_rsvd:1;
2425 uint64_t ie_tvld : 1; 2339 uint64_t ie_tvld:1;
2426 uint64_t ie_cvld : 1; 2340 uint64_t ie_cvld:1;
2427 uint64_t ie_rsvd_0 : 38; 2341 uint64_t ie_rsvd_0:38;
2428 } ii_icrb0_e_fld_s; 2342 } ii_icrb0_e_fld_s;
2429} ii_icrb0_e_u_t; 2343} ii_icrb0_e_u_t;
2430 2344
2431
2432/************************************************************************ 2345/************************************************************************
2433 * * 2346 * *
2434 * This register contains the lower 64 bits of the header of the * 2347 * This register contains the lower 64 bits of the header of the *
2435 * spurious message captured by II. Valid when the SP_MSG bit in ICMR * 2348 * spurious message captured by II. Valid when the SP_MSG bit in ICMR *
2436 * register is set. * 2349 * register is set. *
2437 * * 2350 * *
2438 ************************************************************************/ 2351 ************************************************************************/
2439 2352
2440typedef union ii_icsml_u { 2353typedef union ii_icsml_u {
2441 uint64_t ii_icsml_regval; 2354 uint64_t ii_icsml_regval;
2442 struct { 2355 struct {
2443 uint64_t i_tt_addr : 47; 2356 uint64_t i_tt_addr:47;
2444 uint64_t i_newsuppl_ex : 14; 2357 uint64_t i_newsuppl_ex:14;
2445 uint64_t i_reserved : 2; 2358 uint64_t i_reserved:2;
2446 uint64_t i_overflow : 1; 2359 uint64_t i_overflow:1;
2447 } ii_icsml_fld_s; 2360 } ii_icsml_fld_s;
2448} ii_icsml_u_t; 2361} ii_icsml_u_t;
2449 2362
2450
2451/************************************************************************ 2363/************************************************************************
2452 * * 2364 * *
2453 * This register contains the middle 64 bits of the header of the * 2365 * This register contains the middle 64 bits of the header of the *
2454 * spurious message captured by II. Valid when the SP_MSG bit in ICMR * 2366 * spurious message captured by II. Valid when the SP_MSG bit in ICMR *
2455 * register is set. * 2367 * register is set. *
2456 * * 2368 * *
2457 ************************************************************************/ 2369 ************************************************************************/
2458 2370
2459typedef union ii_icsmm_u { 2371typedef union ii_icsmm_u {
2460 uint64_t ii_icsmm_regval; 2372 uint64_t ii_icsmm_regval;
2461 struct { 2373 struct {
2462 uint64_t i_tt_ack_cnt : 11; 2374 uint64_t i_tt_ack_cnt:11;
2463 uint64_t i_reserved : 53; 2375 uint64_t i_reserved:53;
2464 } ii_icsmm_fld_s; 2376 } ii_icsmm_fld_s;
2465} ii_icsmm_u_t; 2377} ii_icsmm_u_t;
2466 2378
2467
2468/************************************************************************ 2379/************************************************************************
2469 * * 2380 * *
2470 * This register contains the microscopic state, all the inputs to * 2381 * This register contains the microscopic state, all the inputs to *
2471 * the protocol table, captured with the spurious message. Valid when * 2382 * the protocol table, captured with the spurious message. Valid when *
2472 * the SP_MSG bit in the ICMR register is set. * 2383 * the SP_MSG bit in the ICMR register is set. *
2473 * * 2384 * *
2474 ************************************************************************/ 2385 ************************************************************************/
2475 2386
2476typedef union ii_icsmh_u { 2387typedef union ii_icsmh_u {
2477 uint64_t ii_icsmh_regval; 2388 uint64_t ii_icsmh_regval;
2478 struct { 2389 struct {
2479 uint64_t i_tt_vld : 1; 2390 uint64_t i_tt_vld:1;
2480 uint64_t i_xerr : 1; 2391 uint64_t i_xerr:1;
2481 uint64_t i_ft_cwact_o : 1; 2392 uint64_t i_ft_cwact_o:1;
2482 uint64_t i_ft_wact_o : 1; 2393 uint64_t i_ft_wact_o:1;
2483 uint64_t i_ft_active_o : 1; 2394 uint64_t i_ft_active_o:1;
2484 uint64_t i_sync : 1; 2395 uint64_t i_sync:1;
2485 uint64_t i_mnusg : 1; 2396 uint64_t i_mnusg:1;
2486 uint64_t i_mnusz : 1; 2397 uint64_t i_mnusz:1;
2487 uint64_t i_plusz : 1; 2398 uint64_t i_plusz:1;
2488 uint64_t i_plusg : 1; 2399 uint64_t i_plusg:1;
2489 uint64_t i_tt_exc : 5; 2400 uint64_t i_tt_exc:5;
2490 uint64_t i_tt_wb : 1; 2401 uint64_t i_tt_wb:1;
2491 uint64_t i_tt_hold : 1; 2402 uint64_t i_tt_hold:1;
2492 uint64_t i_tt_ack : 1; 2403 uint64_t i_tt_ack:1;
2493 uint64_t i_tt_resp : 1; 2404 uint64_t i_tt_resp:1;
2494 uint64_t i_tt_intvn : 1; 2405 uint64_t i_tt_intvn:1;
2495 uint64_t i_g_stall_bte1 : 1; 2406 uint64_t i_g_stall_bte1:1;
2496 uint64_t i_g_stall_bte0 : 1; 2407 uint64_t i_g_stall_bte0:1;
2497 uint64_t i_g_stall_il : 1; 2408 uint64_t i_g_stall_il:1;
2498 uint64_t i_g_stall_ib : 1; 2409 uint64_t i_g_stall_ib:1;
2499 uint64_t i_tt_imsg : 8; 2410 uint64_t i_tt_imsg:8;
2500 uint64_t i_tt_imsgtype : 2; 2411 uint64_t i_tt_imsgtype:2;
2501 uint64_t i_tt_use_old : 1; 2412 uint64_t i_tt_use_old:1;
2502 uint64_t i_tt_respreqd : 1; 2413 uint64_t i_tt_respreqd:1;
2503 uint64_t i_tt_bte_num : 1; 2414 uint64_t i_tt_bte_num:1;
2504 uint64_t i_cbn : 1; 2415 uint64_t i_cbn:1;
2505 uint64_t i_match : 1; 2416 uint64_t i_match:1;
2506 uint64_t i_rpcnt_lt_34 : 1; 2417 uint64_t i_rpcnt_lt_34:1;
2507 uint64_t i_rpcnt_ge_34 : 1; 2418 uint64_t i_rpcnt_ge_34:1;
2508 uint64_t i_rpcnt_lt_18 : 1; 2419 uint64_t i_rpcnt_lt_18:1;
2509 uint64_t i_rpcnt_ge_18 : 1; 2420 uint64_t i_rpcnt_ge_18:1;
2510 uint64_t i_rpcnt_lt_2 : 1; 2421 uint64_t i_rpcnt_lt_2:1;
2511 uint64_t i_rpcnt_ge_2 : 1; 2422 uint64_t i_rpcnt_ge_2:1;
2512 uint64_t i_rqcnt_lt_18 : 1; 2423 uint64_t i_rqcnt_lt_18:1;
2513 uint64_t i_rqcnt_ge_18 : 1; 2424 uint64_t i_rqcnt_ge_18:1;
2514 uint64_t i_rqcnt_lt_2 : 1; 2425 uint64_t i_rqcnt_lt_2:1;
2515 uint64_t i_rqcnt_ge_2 : 1; 2426 uint64_t i_rqcnt_ge_2:1;
2516 uint64_t i_tt_device : 7; 2427 uint64_t i_tt_device:7;
2517 uint64_t i_tt_init : 3; 2428 uint64_t i_tt_init:3;
2518 uint64_t i_reserved : 5; 2429 uint64_t i_reserved:5;
2519 } ii_icsmh_fld_s; 2430 } ii_icsmh_fld_s;
2520} ii_icsmh_u_t; 2431} ii_icsmh_u_t;
2521 2432
2522
2523/************************************************************************ 2433/************************************************************************
2524 * * 2434 * *
2525 * The Shub DEBUG unit provides a 3-bit selection signal to the * 2435 * The Shub DEBUG unit provides a 3-bit selection signal to the *
2526 * II core and a 3-bit selection signal to the fsbclk domain in the II * 2436 * II core and a 3-bit selection signal to the fsbclk domain in the II *
2527 * wrapper. * 2437 * wrapper. *
2528 * * 2438 * *
2529 ************************************************************************/ 2439 ************************************************************************/
2530 2440
2531typedef union ii_idbss_u { 2441typedef union ii_idbss_u {
2532 uint64_t ii_idbss_regval; 2442 uint64_t ii_idbss_regval;
2533 struct { 2443 struct {
2534 uint64_t i_iioclk_core_submenu : 3; 2444 uint64_t i_iioclk_core_submenu:3;
2535 uint64_t i_rsvd : 5; 2445 uint64_t i_rsvd:5;
2536 uint64_t i_fsbclk_wrapper_submenu : 3; 2446 uint64_t i_fsbclk_wrapper_submenu:3;
2537 uint64_t i_rsvd_1 : 5; 2447 uint64_t i_rsvd_1:5;
2538 uint64_t i_iioclk_menu : 5; 2448 uint64_t i_iioclk_menu:5;
2539 uint64_t i_rsvd_2 : 43; 2449 uint64_t i_rsvd_2:43;
2540 } ii_idbss_fld_s; 2450 } ii_idbss_fld_s;
2541} ii_idbss_u_t; 2451} ii_idbss_u_t;
2542 2452
2543
2544/************************************************************************ 2453/************************************************************************
2545 * * 2454 * *
2546 * Description: This register is used to set up the length for a * 2455 * Description: This register is used to set up the length for a *
2547 * transfer and then to monitor the progress of that transfer. This * 2456 * transfer and then to monitor the progress of that transfer. This *
2548 * register needs to be initialized before a transfer is started. A * 2457 * register needs to be initialized before a transfer is started. A *
@@ -2553,63 +2462,60 @@ typedef union ii_idbss_u {
2553 * transfer completes, hardware will clear the Busy bit. The length * 2462 * transfer completes, hardware will clear the Busy bit. The length *
2554 * field will also contain the number of cache lines left to be * 2463 * field will also contain the number of cache lines left to be *
2555 * transferred. * 2464 * transferred. *
2556 * * 2465 * *
2557 ************************************************************************/ 2466 ************************************************************************/
2558 2467
2559typedef union ii_ibls0_u { 2468typedef union ii_ibls0_u {
2560 uint64_t ii_ibls0_regval; 2469 uint64_t ii_ibls0_regval;
2561 struct { 2470 struct {
2562 uint64_t i_length : 16; 2471 uint64_t i_length:16;
2563 uint64_t i_error : 1; 2472 uint64_t i_error:1;
2564 uint64_t i_rsvd_1 : 3; 2473 uint64_t i_rsvd_1:3;
2565 uint64_t i_busy : 1; 2474 uint64_t i_busy:1;
2566 uint64_t i_rsvd : 43; 2475 uint64_t i_rsvd:43;
2567 } ii_ibls0_fld_s; 2476 } ii_ibls0_fld_s;
2568} ii_ibls0_u_t; 2477} ii_ibls0_u_t;
2569 2478
2570
2571/************************************************************************ 2479/************************************************************************
2572 * * 2480 * *
2573 * This register should be loaded before a transfer is started. The * 2481 * This register should be loaded before a transfer is started. The *
2574 * address to be loaded in bits 39:0 is the 40-bit TRex+ physical * 2482 * address to be loaded in bits 39:0 is the 40-bit TRex+ physical *
2575 * address as described in Section 1.3, Figure2 and Figure3. Since * 2483 * address as described in Section 1.3, Figure2 and Figure3. Since *
2576 * the bottom 7 bits of the address are always taken to be zero, BTE * 2484 * the bottom 7 bits of the address are always taken to be zero, BTE *
2577 * transfers are always cacheline-aligned. * 2485 * transfers are always cacheline-aligned. *
2578 * * 2486 * *
2579 ************************************************************************/ 2487 ************************************************************************/
2580 2488
2581typedef union ii_ibsa0_u { 2489typedef union ii_ibsa0_u {
2582 uint64_t ii_ibsa0_regval; 2490 uint64_t ii_ibsa0_regval;
2583 struct { 2491 struct {
2584 uint64_t i_rsvd_1 : 7; 2492 uint64_t i_rsvd_1:7;
2585 uint64_t i_addr : 42; 2493 uint64_t i_addr:42;
2586 uint64_t i_rsvd : 15; 2494 uint64_t i_rsvd:15;
2587 } ii_ibsa0_fld_s; 2495 } ii_ibsa0_fld_s;
2588} ii_ibsa0_u_t; 2496} ii_ibsa0_u_t;
2589 2497
2590
2591/************************************************************************ 2498/************************************************************************
2592 * * 2499 * *
2593 * This register should be loaded before a transfer is started. The * 2500 * This register should be loaded before a transfer is started. The *
2594 * address to be loaded in bits 39:0 is the 40-bit TRex+ physical * 2501 * address to be loaded in bits 39:0 is the 40-bit TRex+ physical *
2595 * address as described in Section 1.3, Figure2 and Figure3. Since * 2502 * address as described in Section 1.3, Figure2 and Figure3. Since *
2596 * the bottom 7 bits of the address are always taken to be zero, BTE * 2503 * the bottom 7 bits of the address are always taken to be zero, BTE *
2597 * transfers are always cacheline-aligned. * 2504 * transfers are always cacheline-aligned. *
2598 * * 2505 * *
2599 ************************************************************************/ 2506 ************************************************************************/
2600 2507
2601typedef union ii_ibda0_u { 2508typedef union ii_ibda0_u {
2602 uint64_t ii_ibda0_regval; 2509 uint64_t ii_ibda0_regval;
2603 struct { 2510 struct {
2604 uint64_t i_rsvd_1 : 7; 2511 uint64_t i_rsvd_1:7;
2605 uint64_t i_addr : 42; 2512 uint64_t i_addr:42;
2606 uint64_t i_rsvd : 15; 2513 uint64_t i_rsvd:15;
2607 } ii_ibda0_fld_s; 2514 } ii_ibda0_fld_s;
2608} ii_ibda0_u_t; 2515} ii_ibda0_u_t;
2609 2516
2610
2611/************************************************************************ 2517/************************************************************************
2612 * * 2518 * *
2613 * Writing to this register sets up the attributes of the transfer * 2519 * Writing to this register sets up the attributes of the transfer *
2614 * and initiates the transfer operation. Reading this register has * 2520 * and initiates the transfer operation. Reading this register has *
2615 * the side effect of terminating any transfer in progress. Note: * 2521 * the side effect of terminating any transfer in progress. Note: *
@@ -2617,61 +2523,58 @@ typedef union ii_ibda0_u {
2617 * other BTE. If a BTE stream has to be stopped (due to error * 2523 * other BTE. If a BTE stream has to be stopped (due to error *
2618 * handling for example), both BTE streams should be stopped and * 2524 * handling for example), both BTE streams should be stopped and *
2619 * their transfers discarded. * 2525 * their transfers discarded. *
2620 * * 2526 * *
2621 ************************************************************************/ 2527 ************************************************************************/
2622 2528
2623typedef union ii_ibct0_u { 2529typedef union ii_ibct0_u {
2624 uint64_t ii_ibct0_regval; 2530 uint64_t ii_ibct0_regval;
2625 struct { 2531 struct {
2626 uint64_t i_zerofill : 1; 2532 uint64_t i_zerofill:1;
2627 uint64_t i_rsvd_2 : 3; 2533 uint64_t i_rsvd_2:3;
2628 uint64_t i_notify : 1; 2534 uint64_t i_notify:1;
2629 uint64_t i_rsvd_1 : 3; 2535 uint64_t i_rsvd_1:3;
2630 uint64_t i_poison : 1; 2536 uint64_t i_poison:1;
2631 uint64_t i_rsvd : 55; 2537 uint64_t i_rsvd:55;
2632 } ii_ibct0_fld_s; 2538 } ii_ibct0_fld_s;
2633} ii_ibct0_u_t; 2539} ii_ibct0_u_t;
2634 2540
2635
2636/************************************************************************ 2541/************************************************************************
2637 * * 2542 * *
2638 * This register contains the address to which the WINV is sent. * 2543 * This register contains the address to which the WINV is sent. *
2639 * This address has to be cache line aligned. * 2544 * This address has to be cache line aligned. *
2640 * * 2545 * *
2641 ************************************************************************/ 2546 ************************************************************************/
2642 2547
2643typedef union ii_ibna0_u { 2548typedef union ii_ibna0_u {
2644 uint64_t ii_ibna0_regval; 2549 uint64_t ii_ibna0_regval;
2645 struct { 2550 struct {
2646 uint64_t i_rsvd_1 : 7; 2551 uint64_t i_rsvd_1:7;
2647 uint64_t i_addr : 42; 2552 uint64_t i_addr:42;
2648 uint64_t i_rsvd : 15; 2553 uint64_t i_rsvd:15;
2649 } ii_ibna0_fld_s; 2554 } ii_ibna0_fld_s;
2650} ii_ibna0_u_t; 2555} ii_ibna0_u_t;
2651 2556
2652
2653/************************************************************************ 2557/************************************************************************
2654 * * 2558 * *
2655 * This register contains the programmable level as well as the node * 2559 * This register contains the programmable level as well as the node *
2656 * ID and PI unit of the processor to which the interrupt will be * 2560 * ID and PI unit of the processor to which the interrupt will be *
2657 * sent. * 2561 * sent. *
2658 * * 2562 * *
2659 ************************************************************************/ 2563 ************************************************************************/
2660 2564
2661typedef union ii_ibia0_u { 2565typedef union ii_ibia0_u {
2662 uint64_t ii_ibia0_regval; 2566 uint64_t ii_ibia0_regval;
2663 struct { 2567 struct {
2664 uint64_t i_rsvd_2 : 1; 2568 uint64_t i_rsvd_2:1;
2665 uint64_t i_node_id : 11; 2569 uint64_t i_node_id:11;
2666 uint64_t i_rsvd_1 : 4; 2570 uint64_t i_rsvd_1:4;
2667 uint64_t i_level : 7; 2571 uint64_t i_level:7;
2668 uint64_t i_rsvd : 41; 2572 uint64_t i_rsvd:41;
2669 } ii_ibia0_fld_s; 2573 } ii_ibia0_fld_s;
2670} ii_ibia0_u_t; 2574} ii_ibia0_u_t;
2671 2575
2672
2673/************************************************************************ 2576/************************************************************************
2674 * * 2577 * *
2675 * Description: This register is used to set up the length for a * 2578 * Description: This register is used to set up the length for a *
2676 * transfer and then to monitor the progress of that transfer. This * 2579 * transfer and then to monitor the progress of that transfer. This *
2677 * register needs to be initialized before a transfer is started. A * 2580 * register needs to be initialized before a transfer is started. A *
@@ -2682,63 +2585,60 @@ typedef union ii_ibia0_u {
2682 * transfer completes, hardware will clear the Busy bit. The length * 2585 * transfer completes, hardware will clear the Busy bit. The length *
2683 * field will also contain the number of cache lines left to be * 2586 * field will also contain the number of cache lines left to be *
2684 * transferred. * 2587 * transferred. *
2685 * * 2588 * *
2686 ************************************************************************/ 2589 ************************************************************************/
2687 2590
2688typedef union ii_ibls1_u { 2591typedef union ii_ibls1_u {
2689 uint64_t ii_ibls1_regval; 2592 uint64_t ii_ibls1_regval;
2690 struct { 2593 struct {
2691 uint64_t i_length : 16; 2594 uint64_t i_length:16;
2692 uint64_t i_error : 1; 2595 uint64_t i_error:1;
2693 uint64_t i_rsvd_1 : 3; 2596 uint64_t i_rsvd_1:3;
2694 uint64_t i_busy : 1; 2597 uint64_t i_busy:1;
2695 uint64_t i_rsvd : 43; 2598 uint64_t i_rsvd:43;
2696 } ii_ibls1_fld_s; 2599 } ii_ibls1_fld_s;
2697} ii_ibls1_u_t; 2600} ii_ibls1_u_t;
2698 2601
2699
2700/************************************************************************ 2602/************************************************************************
2701 * * 2603 * *
2702 * This register should be loaded before a transfer is started. The * 2604 * This register should be loaded before a transfer is started. The *
2703 * address to be loaded in bits 39:0 is the 40-bit TRex+ physical * 2605 * address to be loaded in bits 39:0 is the 40-bit TRex+ physical *
2704 * address as described in Section 1.3, Figure2 and Figure3. Since * 2606 * address as described in Section 1.3, Figure2 and Figure3. Since *
2705 * the bottom 7 bits of the address are always taken to be zero, BTE * 2607 * the bottom 7 bits of the address are always taken to be zero, BTE *
2706 * transfers are always cacheline-aligned. * 2608 * transfers are always cacheline-aligned. *
2707 * * 2609 * *
2708 ************************************************************************/ 2610 ************************************************************************/
2709 2611
2710typedef union ii_ibsa1_u { 2612typedef union ii_ibsa1_u {
2711 uint64_t ii_ibsa1_regval; 2613 uint64_t ii_ibsa1_regval;
2712 struct { 2614 struct {
2713 uint64_t i_rsvd_1 : 7; 2615 uint64_t i_rsvd_1:7;
2714 uint64_t i_addr : 33; 2616 uint64_t i_addr:33;
2715 uint64_t i_rsvd : 24; 2617 uint64_t i_rsvd:24;
2716 } ii_ibsa1_fld_s; 2618 } ii_ibsa1_fld_s;
2717} ii_ibsa1_u_t; 2619} ii_ibsa1_u_t;
2718 2620
2719
2720/************************************************************************ 2621/************************************************************************
2721 * * 2622 * *
2722 * This register should be loaded before a transfer is started. The * 2623 * This register should be loaded before a transfer is started. The *
2723 * address to be loaded in bits 39:0 is the 40-bit TRex+ physical * 2624 * address to be loaded in bits 39:0 is the 40-bit TRex+ physical *
2724 * address as described in Section 1.3, Figure2 and Figure3. Since * 2625 * address as described in Section 1.3, Figure2 and Figure3. Since *
2725 * the bottom 7 bits of the address are always taken to be zero, BTE * 2626 * the bottom 7 bits of the address are always taken to be zero, BTE *
2726 * transfers are always cacheline-aligned. * 2627 * transfers are always cacheline-aligned. *
2727 * * 2628 * *
2728 ************************************************************************/ 2629 ************************************************************************/
2729 2630
2730typedef union ii_ibda1_u { 2631typedef union ii_ibda1_u {
2731 uint64_t ii_ibda1_regval; 2632 uint64_t ii_ibda1_regval;
2732 struct { 2633 struct {
2733 uint64_t i_rsvd_1 : 7; 2634 uint64_t i_rsvd_1:7;
2734 uint64_t i_addr : 33; 2635 uint64_t i_addr:33;
2735 uint64_t i_rsvd : 24; 2636 uint64_t i_rsvd:24;
2736 } ii_ibda1_fld_s; 2637 } ii_ibda1_fld_s;
2737} ii_ibda1_u_t; 2638} ii_ibda1_u_t;
2738 2639
2739
2740/************************************************************************ 2640/************************************************************************
2741 * * 2641 * *
2742 * Writing to this register sets up the attributes of the transfer * 2642 * Writing to this register sets up the attributes of the transfer *
2743 * and initiates the transfer operation. Reading this register has * 2643 * and initiates the transfer operation. Reading this register has *
2744 * the side effect of terminating any transfer in progress. Note: * 2644 * the side effect of terminating any transfer in progress. Note: *
@@ -2746,61 +2646,58 @@ typedef union ii_ibda1_u {
2746 * other BTE. If a BTE stream has to be stopped (due to error * 2646 * other BTE. If a BTE stream has to be stopped (due to error *
2747 * handling for example), both BTE streams should be stopped and * 2647 * handling for example), both BTE streams should be stopped and *
2748 * their transfers discarded. * 2648 * their transfers discarded. *
2749 * * 2649 * *
2750 ************************************************************************/ 2650 ************************************************************************/
2751 2651
2752typedef union ii_ibct1_u { 2652typedef union ii_ibct1_u {
2753 uint64_t ii_ibct1_regval; 2653 uint64_t ii_ibct1_regval;
2754 struct { 2654 struct {
2755 uint64_t i_zerofill : 1; 2655 uint64_t i_zerofill:1;
2756 uint64_t i_rsvd_2 : 3; 2656 uint64_t i_rsvd_2:3;
2757 uint64_t i_notify : 1; 2657 uint64_t i_notify:1;
2758 uint64_t i_rsvd_1 : 3; 2658 uint64_t i_rsvd_1:3;
2759 uint64_t i_poison : 1; 2659 uint64_t i_poison:1;
2760 uint64_t i_rsvd : 55; 2660 uint64_t i_rsvd:55;
2761 } ii_ibct1_fld_s; 2661 } ii_ibct1_fld_s;
2762} ii_ibct1_u_t; 2662} ii_ibct1_u_t;
2763 2663
2764
2765/************************************************************************ 2664/************************************************************************
2766 * * 2665 * *
2767 * This register contains the address to which the WINV is sent. * 2666 * This register contains the address to which the WINV is sent. *
2768 * This address has to be cache line aligned. * 2667 * This address has to be cache line aligned. *
2769 * * 2668 * *
2770 ************************************************************************/ 2669 ************************************************************************/
2771 2670
2772typedef union ii_ibna1_u { 2671typedef union ii_ibna1_u {
2773 uint64_t ii_ibna1_regval; 2672 uint64_t ii_ibna1_regval;
2774 struct { 2673 struct {
2775 uint64_t i_rsvd_1 : 7; 2674 uint64_t i_rsvd_1:7;
2776 uint64_t i_addr : 33; 2675 uint64_t i_addr:33;
2777 uint64_t i_rsvd : 24; 2676 uint64_t i_rsvd:24;
2778 } ii_ibna1_fld_s; 2677 } ii_ibna1_fld_s;
2779} ii_ibna1_u_t; 2678} ii_ibna1_u_t;
2780 2679
2781
2782/************************************************************************ 2680/************************************************************************
2783 * * 2681 * *
2784 * This register contains the programmable level as well as the node * 2682 * This register contains the programmable level as well as the node *
2785 * ID and PI unit of the processor to which the interrupt will be * 2683 * ID and PI unit of the processor to which the interrupt will be *
2786 * sent. * 2684 * sent. *
2787 * * 2685 * *
2788 ************************************************************************/ 2686 ************************************************************************/
2789 2687
2790typedef union ii_ibia1_u { 2688typedef union ii_ibia1_u {
2791 uint64_t ii_ibia1_regval; 2689 uint64_t ii_ibia1_regval;
2792 struct { 2690 struct {
2793 uint64_t i_pi_id : 1; 2691 uint64_t i_pi_id:1;
2794 uint64_t i_node_id : 8; 2692 uint64_t i_node_id:8;
2795 uint64_t i_rsvd_1 : 7; 2693 uint64_t i_rsvd_1:7;
2796 uint64_t i_level : 7; 2694 uint64_t i_level:7;
2797 uint64_t i_rsvd : 41; 2695 uint64_t i_rsvd:41;
2798 } ii_ibia1_fld_s; 2696 } ii_ibia1_fld_s;
2799} ii_ibia1_u_t; 2697} ii_ibia1_u_t;
2800 2698
2801
2802/************************************************************************ 2699/************************************************************************
2803 * * 2700 * *
2804 * This register defines the resources that feed information into * 2701 * This register defines the resources that feed information into *
2805 * the two performance counters located in the IO Performance * 2702 * the two performance counters located in the IO Performance *
2806 * Profiling Register. There are 17 different quantities that can be * 2703 * Profiling Register. There are 17 different quantities that can be *
@@ -2811,133 +2708,129 @@ typedef union ii_ibia1_u {
2811 * other is available from the other performance counter. Hence, the * 2708 * other is available from the other performance counter. Hence, the *
2812 * II supports all 17*16=272 possible combinations of quantities to * 2709 * II supports all 17*16=272 possible combinations of quantities to *
2813 * measure. * 2710 * measure. *
2814 * * 2711 * *
2815 ************************************************************************/ 2712 ************************************************************************/
2816 2713
2817typedef union ii_ipcr_u { 2714typedef union ii_ipcr_u {
2818 uint64_t ii_ipcr_regval; 2715 uint64_t ii_ipcr_regval;
2819 struct { 2716 struct {
2820 uint64_t i_ippr0_c : 4; 2717 uint64_t i_ippr0_c:4;
2821 uint64_t i_ippr1_c : 4; 2718 uint64_t i_ippr1_c:4;
2822 uint64_t i_icct : 8; 2719 uint64_t i_icct:8;
2823 uint64_t i_rsvd : 48; 2720 uint64_t i_rsvd:48;
2824 } ii_ipcr_fld_s; 2721 } ii_ipcr_fld_s;
2825} ii_ipcr_u_t; 2722} ii_ipcr_u_t;
2826 2723
2827
2828/************************************************************************ 2724/************************************************************************
2829 * * 2725 * *
2830 * * 2726 * *
2831 * * 2727 * *
2832 ************************************************************************/ 2728 ************************************************************************/
2833 2729
2834typedef union ii_ippr_u { 2730typedef union ii_ippr_u {
2835 uint64_t ii_ippr_regval; 2731 uint64_t ii_ippr_regval;
2836 struct { 2732 struct {
2837 uint64_t i_ippr0 : 32; 2733 uint64_t i_ippr0:32;
2838 uint64_t i_ippr1 : 32; 2734 uint64_t i_ippr1:32;
2839 } ii_ippr_fld_s; 2735 } ii_ippr_fld_s;
2840} ii_ippr_u_t; 2736} ii_ippr_u_t;
2841 2737
2842 2738/************************************************************************
2843 2739 * *
2844/************************************************************************** 2740 * The following defines which were not formed into structures are *
2845 * * 2741 * probably indentical to another register, and the name of the *
2846 * The following defines which were not formed into structures are * 2742 * register is provided against each of these registers. This *
2847 * probably indentical to another register, and the name of the * 2743 * information needs to be checked carefully *
2848 * register is provided against each of these registers. This * 2744 * *
2849 * information needs to be checked carefully * 2745 * IIO_ICRB1_A IIO_ICRB0_A *
2850 * * 2746 * IIO_ICRB1_B IIO_ICRB0_B *
2851 * IIO_ICRB1_A IIO_ICRB0_A * 2747 * IIO_ICRB1_C IIO_ICRB0_C *
2852 * IIO_ICRB1_B IIO_ICRB0_B * 2748 * IIO_ICRB1_D IIO_ICRB0_D *
2853 * IIO_ICRB1_C IIO_ICRB0_C * 2749 * IIO_ICRB1_E IIO_ICRB0_E *
2854 * IIO_ICRB1_D IIO_ICRB0_D * 2750 * IIO_ICRB2_A IIO_ICRB0_A *
2855 * IIO_ICRB1_E IIO_ICRB0_E * 2751 * IIO_ICRB2_B IIO_ICRB0_B *
2856 * IIO_ICRB2_A IIO_ICRB0_A * 2752 * IIO_ICRB2_C IIO_ICRB0_C *
2857 * IIO_ICRB2_B IIO_ICRB0_B * 2753 * IIO_ICRB2_D IIO_ICRB0_D *
2858 * IIO_ICRB2_C IIO_ICRB0_C * 2754 * IIO_ICRB2_E IIO_ICRB0_E *
2859 * IIO_ICRB2_D IIO_ICRB0_D * 2755 * IIO_ICRB3_A IIO_ICRB0_A *
2860 * IIO_ICRB2_E IIO_ICRB0_E * 2756 * IIO_ICRB3_B IIO_ICRB0_B *
2861 * IIO_ICRB3_A IIO_ICRB0_A * 2757 * IIO_ICRB3_C IIO_ICRB0_C *
2862 * IIO_ICRB3_B IIO_ICRB0_B * 2758 * IIO_ICRB3_D IIO_ICRB0_D *
2863 * IIO_ICRB3_C IIO_ICRB0_C * 2759 * IIO_ICRB3_E IIO_ICRB0_E *
2864 * IIO_ICRB3_D IIO_ICRB0_D * 2760 * IIO_ICRB4_A IIO_ICRB0_A *
2865 * IIO_ICRB3_E IIO_ICRB0_E * 2761 * IIO_ICRB4_B IIO_ICRB0_B *
2866 * IIO_ICRB4_A IIO_ICRB0_A * 2762 * IIO_ICRB4_C IIO_ICRB0_C *
2867 * IIO_ICRB4_B IIO_ICRB0_B * 2763 * IIO_ICRB4_D IIO_ICRB0_D *
2868 * IIO_ICRB4_C IIO_ICRB0_C * 2764 * IIO_ICRB4_E IIO_ICRB0_E *
2869 * IIO_ICRB4_D IIO_ICRB0_D * 2765 * IIO_ICRB5_A IIO_ICRB0_A *
2870 * IIO_ICRB4_E IIO_ICRB0_E * 2766 * IIO_ICRB5_B IIO_ICRB0_B *
2871 * IIO_ICRB5_A IIO_ICRB0_A * 2767 * IIO_ICRB5_C IIO_ICRB0_C *
2872 * IIO_ICRB5_B IIO_ICRB0_B * 2768 * IIO_ICRB5_D IIO_ICRB0_D *
2873 * IIO_ICRB5_C IIO_ICRB0_C * 2769 * IIO_ICRB5_E IIO_ICRB0_E *
2874 * IIO_ICRB5_D IIO_ICRB0_D * 2770 * IIO_ICRB6_A IIO_ICRB0_A *
2875 * IIO_ICRB5_E IIO_ICRB0_E * 2771 * IIO_ICRB6_B IIO_ICRB0_B *
2876 * IIO_ICRB6_A IIO_ICRB0_A * 2772 * IIO_ICRB6_C IIO_ICRB0_C *
2877 * IIO_ICRB6_B IIO_ICRB0_B * 2773 * IIO_ICRB6_D IIO_ICRB0_D *
2878 * IIO_ICRB6_C IIO_ICRB0_C * 2774 * IIO_ICRB6_E IIO_ICRB0_E *
2879 * IIO_ICRB6_D IIO_ICRB0_D * 2775 * IIO_ICRB7_A IIO_ICRB0_A *
2880 * IIO_ICRB6_E IIO_ICRB0_E * 2776 * IIO_ICRB7_B IIO_ICRB0_B *
2881 * IIO_ICRB7_A IIO_ICRB0_A * 2777 * IIO_ICRB7_C IIO_ICRB0_C *
2882 * IIO_ICRB7_B IIO_ICRB0_B * 2778 * IIO_ICRB7_D IIO_ICRB0_D *
2883 * IIO_ICRB7_C IIO_ICRB0_C * 2779 * IIO_ICRB7_E IIO_ICRB0_E *
2884 * IIO_ICRB7_D IIO_ICRB0_D * 2780 * IIO_ICRB8_A IIO_ICRB0_A *
2885 * IIO_ICRB7_E IIO_ICRB0_E * 2781 * IIO_ICRB8_B IIO_ICRB0_B *
2886 * IIO_ICRB8_A IIO_ICRB0_A * 2782 * IIO_ICRB8_C IIO_ICRB0_C *
2887 * IIO_ICRB8_B IIO_ICRB0_B * 2783 * IIO_ICRB8_D IIO_ICRB0_D *
2888 * IIO_ICRB8_C IIO_ICRB0_C * 2784 * IIO_ICRB8_E IIO_ICRB0_E *
2889 * IIO_ICRB8_D IIO_ICRB0_D * 2785 * IIO_ICRB9_A IIO_ICRB0_A *
2890 * IIO_ICRB8_E IIO_ICRB0_E * 2786 * IIO_ICRB9_B IIO_ICRB0_B *
2891 * IIO_ICRB9_A IIO_ICRB0_A * 2787 * IIO_ICRB9_C IIO_ICRB0_C *
2892 * IIO_ICRB9_B IIO_ICRB0_B * 2788 * IIO_ICRB9_D IIO_ICRB0_D *
2893 * IIO_ICRB9_C IIO_ICRB0_C * 2789 * IIO_ICRB9_E IIO_ICRB0_E *
2894 * IIO_ICRB9_D IIO_ICRB0_D * 2790 * IIO_ICRBA_A IIO_ICRB0_A *
2895 * IIO_ICRB9_E IIO_ICRB0_E * 2791 * IIO_ICRBA_B IIO_ICRB0_B *
2896 * IIO_ICRBA_A IIO_ICRB0_A * 2792 * IIO_ICRBA_C IIO_ICRB0_C *
2897 * IIO_ICRBA_B IIO_ICRB0_B * 2793 * IIO_ICRBA_D IIO_ICRB0_D *
2898 * IIO_ICRBA_C IIO_ICRB0_C * 2794 * IIO_ICRBA_E IIO_ICRB0_E *
2899 * IIO_ICRBA_D IIO_ICRB0_D * 2795 * IIO_ICRBB_A IIO_ICRB0_A *
2900 * IIO_ICRBA_E IIO_ICRB0_E * 2796 * IIO_ICRBB_B IIO_ICRB0_B *
2901 * IIO_ICRBB_A IIO_ICRB0_A * 2797 * IIO_ICRBB_C IIO_ICRB0_C *
2902 * IIO_ICRBB_B IIO_ICRB0_B * 2798 * IIO_ICRBB_D IIO_ICRB0_D *
2903 * IIO_ICRBB_C IIO_ICRB0_C * 2799 * IIO_ICRBB_E IIO_ICRB0_E *
2904 * IIO_ICRBB_D IIO_ICRB0_D * 2800 * IIO_ICRBC_A IIO_ICRB0_A *
2905 * IIO_ICRBB_E IIO_ICRB0_E * 2801 * IIO_ICRBC_B IIO_ICRB0_B *
2906 * IIO_ICRBC_A IIO_ICRB0_A * 2802 * IIO_ICRBC_C IIO_ICRB0_C *
2907 * IIO_ICRBC_B IIO_ICRB0_B * 2803 * IIO_ICRBC_D IIO_ICRB0_D *
2908 * IIO_ICRBC_C IIO_ICRB0_C * 2804 * IIO_ICRBC_E IIO_ICRB0_E *
2909 * IIO_ICRBC_D IIO_ICRB0_D * 2805 * IIO_ICRBD_A IIO_ICRB0_A *
2910 * IIO_ICRBC_E IIO_ICRB0_E * 2806 * IIO_ICRBD_B IIO_ICRB0_B *
2911 * IIO_ICRBD_A IIO_ICRB0_A * 2807 * IIO_ICRBD_C IIO_ICRB0_C *
2912 * IIO_ICRBD_B IIO_ICRB0_B * 2808 * IIO_ICRBD_D IIO_ICRB0_D *
2913 * IIO_ICRBD_C IIO_ICRB0_C * 2809 * IIO_ICRBD_E IIO_ICRB0_E *
2914 * IIO_ICRBD_D IIO_ICRB0_D * 2810 * IIO_ICRBE_A IIO_ICRB0_A *
2915 * IIO_ICRBD_E IIO_ICRB0_E * 2811 * IIO_ICRBE_B IIO_ICRB0_B *
2916 * IIO_ICRBE_A IIO_ICRB0_A * 2812 * IIO_ICRBE_C IIO_ICRB0_C *
2917 * IIO_ICRBE_B IIO_ICRB0_B * 2813 * IIO_ICRBE_D IIO_ICRB0_D *
2918 * IIO_ICRBE_C IIO_ICRB0_C * 2814 * IIO_ICRBE_E IIO_ICRB0_E *
2919 * IIO_ICRBE_D IIO_ICRB0_D * 2815 * *
2920 * IIO_ICRBE_E IIO_ICRB0_E * 2816 ************************************************************************/
2921 * *
2922 **************************************************************************/
2923
2924 2817
2925/* 2818/*
2926 * Slightly friendlier names for some common registers. 2819 * Slightly friendlier names for some common registers.
2927 */ 2820 */
2928#define IIO_WIDGET IIO_WID /* Widget identification */ 2821#define IIO_WIDGET IIO_WID /* Widget identification */
2929#define IIO_WIDGET_STAT IIO_WSTAT /* Widget status register */ 2822#define IIO_WIDGET_STAT IIO_WSTAT /* Widget status register */
2930#define IIO_WIDGET_CTRL IIO_WCR /* Widget control register */ 2823#define IIO_WIDGET_CTRL IIO_WCR /* Widget control register */
2931#define IIO_PROTECT IIO_ILAPR /* IO interface protection */ 2824#define IIO_PROTECT IIO_ILAPR /* IO interface protection */
2932#define IIO_PROTECT_OVRRD IIO_ILAPO /* IO protect override */ 2825#define IIO_PROTECT_OVRRD IIO_ILAPO /* IO protect override */
2933#define IIO_OUTWIDGET_ACCESS IIO_IOWA /* Outbound widget access */ 2826#define IIO_OUTWIDGET_ACCESS IIO_IOWA /* Outbound widget access */
2934#define IIO_INWIDGET_ACCESS IIO_IIWA /* Inbound widget access */ 2827#define IIO_INWIDGET_ACCESS IIO_IIWA /* Inbound widget access */
2935#define IIO_INDEV_ERR_MASK IIO_IIDEM /* Inbound device error mask */ 2828#define IIO_INDEV_ERR_MASK IIO_IIDEM /* Inbound device error mask */
2936#define IIO_LLP_CSR IIO_ILCSR /* LLP control and status */ 2829#define IIO_LLP_CSR IIO_ILCSR /* LLP control and status */
2937#define IIO_LLP_LOG IIO_ILLR /* LLP log */ 2830#define IIO_LLP_LOG IIO_ILLR /* LLP log */
2938#define IIO_XTALKCC_TOUT IIO_IXCC /* Xtalk credit count timeout*/ 2831#define IIO_XTALKCC_TOUT IIO_IXCC /* Xtalk credit count timeout */
2939#define IIO_XTALKTT_TOUT IIO_IXTT /* Xtalk tail timeout */ 2832#define IIO_XTALKTT_TOUT IIO_IXTT /* Xtalk tail timeout */
2940#define IIO_IO_ERR_CLR IIO_IECLR /* IO error clear */ 2833#define IIO_IO_ERR_CLR IIO_IECLR /* IO error clear */
2941#define IIO_IGFX_0 IIO_IGFX0 2834#define IIO_IGFX_0 IIO_IGFX0
2942#define IIO_IGFX_1 IIO_IGFX1 2835#define IIO_IGFX_1 IIO_IGFX1
2943#define IIO_IBCT_0 IIO_IBCT0 2836#define IIO_IBCT_0 IIO_IBCT0
@@ -2957,12 +2850,12 @@ typedef union ii_ippr_u {
2957#define IIO_PRTE_A(_x) (IIO_IPRTE0_A + (8 * (_x))) 2850#define IIO_PRTE_A(_x) (IIO_IPRTE0_A + (8 * (_x)))
2958#define IIO_PRTE_B(_x) (IIO_IPRTE0_B + (8 * (_x))) 2851#define IIO_PRTE_B(_x) (IIO_IPRTE0_B + (8 * (_x)))
2959#define IIO_NUM_PRTES 8 /* Total number of PRB table entries */ 2852#define IIO_NUM_PRTES 8 /* Total number of PRB table entries */
2960#define IIO_WIDPRTE_A(x) IIO_PRTE_A(((x) - 8)) /* widget ID to its PRTE num */ 2853#define IIO_WIDPRTE_A(x) IIO_PRTE_A(((x) - 8)) /* widget ID to its PRTE num */
2961#define IIO_WIDPRTE_B(x) IIO_PRTE_B(((x) - 8)) /* widget ID to its PRTE num */ 2854#define IIO_WIDPRTE_B(x) IIO_PRTE_B(((x) - 8)) /* widget ID to its PRTE num */
2962 2855
2963#define IIO_NUM_IPRBS (9) 2856#define IIO_NUM_IPRBS 9
2964 2857
2965#define IIO_LLP_CSR_IS_UP 0x00002000 2858#define IIO_LLP_CSR_IS_UP 0x00002000
2966#define IIO_LLP_CSR_LLP_STAT_MASK 0x00003000 2859#define IIO_LLP_CSR_LLP_STAT_MASK 0x00003000
2967#define IIO_LLP_CSR_LLP_STAT_SHFT 12 2860#define IIO_LLP_CSR_LLP_STAT_SHFT 12
2968 2861
@@ -2970,30 +2863,29 @@ typedef union ii_ippr_u {
2970#define IIO_LLP_SN_MAX 0xffff /* in ILLR SN_CNT, Max Sequence Number errors */ 2863#define IIO_LLP_SN_MAX 0xffff /* in ILLR SN_CNT, Max Sequence Number errors */
2971 2864
2972/* key to IIO_PROTECT_OVRRD */ 2865/* key to IIO_PROTECT_OVRRD */
2973#define IIO_PROTECT_OVRRD_KEY 0x53474972756c6573ull /* "SGIrules" */ 2866#define IIO_PROTECT_OVRRD_KEY 0x53474972756c6573ull /* "SGIrules" */
2974 2867
2975/* BTE register names */ 2868/* BTE register names */
2976#define IIO_BTE_STAT_0 IIO_IBLS_0 /* Also BTE length/status 0 */ 2869#define IIO_BTE_STAT_0 IIO_IBLS_0 /* Also BTE length/status 0 */
2977#define IIO_BTE_SRC_0 IIO_IBSA_0 /* Also BTE source address 0 */ 2870#define IIO_BTE_SRC_0 IIO_IBSA_0 /* Also BTE source address 0 */
2978#define IIO_BTE_DEST_0 IIO_IBDA_0 /* Also BTE dest. address 0 */ 2871#define IIO_BTE_DEST_0 IIO_IBDA_0 /* Also BTE dest. address 0 */
2979#define IIO_BTE_CTRL_0 IIO_IBCT_0 /* Also BTE control/terminate 0 */ 2872#define IIO_BTE_CTRL_0 IIO_IBCT_0 /* Also BTE control/terminate 0 */
2980#define IIO_BTE_NOTIFY_0 IIO_IBNA_0 /* Also BTE notification 0 */ 2873#define IIO_BTE_NOTIFY_0 IIO_IBNA_0 /* Also BTE notification 0 */
2981#define IIO_BTE_INT_0 IIO_IBIA_0 /* Also BTE interrupt 0 */ 2874#define IIO_BTE_INT_0 IIO_IBIA_0 /* Also BTE interrupt 0 */
2982#define IIO_BTE_OFF_0 0 /* Base offset from BTE 0 regs. */ 2875#define IIO_BTE_OFF_0 0 /* Base offset from BTE 0 regs. */
2983#define IIO_BTE_OFF_1 (IIO_IBLS_1 - IIO_IBLS_0) /* Offset from base to BTE 1 */ 2876#define IIO_BTE_OFF_1 (IIO_IBLS_1 - IIO_IBLS_0) /* Offset from base to BTE 1 */
2984 2877
2985/* BTE register offsets from base */ 2878/* BTE register offsets from base */
2986#define BTEOFF_STAT 0 2879#define BTEOFF_STAT 0
2987#define BTEOFF_SRC (IIO_BTE_SRC_0 - IIO_BTE_STAT_0) 2880#define BTEOFF_SRC (IIO_BTE_SRC_0 - IIO_BTE_STAT_0)
2988#define BTEOFF_DEST (IIO_BTE_DEST_0 - IIO_BTE_STAT_0) 2881#define BTEOFF_DEST (IIO_BTE_DEST_0 - IIO_BTE_STAT_0)
2989#define BTEOFF_CTRL (IIO_BTE_CTRL_0 - IIO_BTE_STAT_0) 2882#define BTEOFF_CTRL (IIO_BTE_CTRL_0 - IIO_BTE_STAT_0)
2990#define BTEOFF_NOTIFY (IIO_BTE_NOTIFY_0 - IIO_BTE_STAT_0) 2883#define BTEOFF_NOTIFY (IIO_BTE_NOTIFY_0 - IIO_BTE_STAT_0)
2991#define BTEOFF_INT (IIO_BTE_INT_0 - IIO_BTE_STAT_0) 2884#define BTEOFF_INT (IIO_BTE_INT_0 - IIO_BTE_STAT_0)
2992
2993 2885
2994/* names used in shub diags */ 2886/* names used in shub diags */
2995#define IIO_BASE_BTE0 IIO_IBLS_0 2887#define IIO_BASE_BTE0 IIO_IBLS_0
2996#define IIO_BASE_BTE1 IIO_IBLS_1 2888#define IIO_BASE_BTE1 IIO_IBLS_1
2997 2889
2998/* 2890/*
2999 * Macro which takes the widget number, and returns the 2891 * Macro which takes the widget number, and returns the
@@ -3001,10 +2893,9 @@ typedef union ii_ippr_u {
3001 * value _x is expected to be a widget number in the range 2893 * value _x is expected to be a widget number in the range
3002 * 0, 8 - 0xF 2894 * 0, 8 - 0xF
3003 */ 2895 */
3004#define IIO_IOPRB(_x) (IIO_IOPRB_0 + ( ( (_x) < HUB_WIDGET_ID_MIN ? \ 2896#define IIO_IOPRB(_x) (IIO_IOPRB_0 + ( ( (_x) < HUB_WIDGET_ID_MIN ? \
3005 (_x) : \ 2897 (_x) : \
3006 (_x) - (HUB_WIDGET_ID_MIN-1)) << 3) ) 2898 (_x) - (HUB_WIDGET_ID_MIN-1)) << 3) )
3007
3008 2899
3009/* GFX Flow Control Node/Widget Register */ 2900/* GFX Flow Control Node/Widget Register */
3010#define IIO_IGFX_W_NUM_BITS 4 /* size of widget num field */ 2901#define IIO_IGFX_W_NUM_BITS 4 /* size of widget num field */
@@ -3025,7 +2916,6 @@ typedef union ii_ippr_u {
3025 (((node) & IIO_IGFX_N_NUM_MASK) << IIO_IGFX_N_NUM_SHIFT) | \ 2916 (((node) & IIO_IGFX_N_NUM_MASK) << IIO_IGFX_N_NUM_SHIFT) | \
3026 (((cpu) & IIO_IGFX_P_NUM_MASK) << IIO_IGFX_P_NUM_SHIFT)) 2917 (((cpu) & IIO_IGFX_P_NUM_MASK) << IIO_IGFX_P_NUM_SHIFT))
3027 2918
3028
3029/* Scratch registers (all bits available) */ 2919/* Scratch registers (all bits available) */
3030#define IIO_SCRATCH_REG0 IIO_ISCR0 2920#define IIO_SCRATCH_REG0 IIO_ISCR0
3031#define IIO_SCRATCH_REG1 IIO_ISCR1 2921#define IIO_SCRATCH_REG1 IIO_ISCR1
@@ -3046,21 +2936,21 @@ typedef union ii_ippr_u {
3046#define IIO_SCRATCH_BIT1_0 0x0000000000000001UL 2936#define IIO_SCRATCH_BIT1_0 0x0000000000000001UL
3047#define IIO_SCRATCH_BIT1_1 0x0000000000000002UL 2937#define IIO_SCRATCH_BIT1_1 0x0000000000000002UL
3048/* IO Translation Table Entries */ 2938/* IO Translation Table Entries */
3049#define IIO_NUM_ITTES 7 /* ITTEs numbered 0..6 */ 2939#define IIO_NUM_ITTES 7 /* ITTEs numbered 0..6 */
3050 /* Hw manuals number them 1..7! */ 2940 /* Hw manuals number them 1..7! */
3051/* 2941/*
3052 * IIO_IMEM Register fields. 2942 * IIO_IMEM Register fields.
3053 */ 2943 */
3054#define IIO_IMEM_W0ESD 0x1UL /* Widget 0 shut down due to error */ 2944#define IIO_IMEM_W0ESD 0x1UL /* Widget 0 shut down due to error */
3055#define IIO_IMEM_B0ESD (1UL << 4) /* BTE 0 shut down due to error */ 2945#define IIO_IMEM_B0ESD (1UL << 4) /* BTE 0 shut down due to error */
3056#define IIO_IMEM_B1ESD (1UL << 8) /* BTE 1 Shut down due to error */ 2946#define IIO_IMEM_B1ESD (1UL << 8) /* BTE 1 Shut down due to error */
3057 2947
3058/* 2948/*
3059 * As a permanent workaround for a bug in the PI side of the shub, we've 2949 * As a permanent workaround for a bug in the PI side of the shub, we've
3060 * redefined big window 7 as small window 0. 2950 * redefined big window 7 as small window 0.
3061 XXX does this still apply for SN1?? 2951 XXX does this still apply for SN1??
3062 */ 2952 */
3063#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1) 2953#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1)
3064 2954
3065/* 2955/*
3066 * Use the top big window as a surrogate for the first small window 2956 * Use the top big window as a surrogate for the first small window
@@ -3071,11 +2961,11 @@ typedef union ii_ippr_u {
3071 2961
3072/* 2962/*
3073 * CRB manipulation macros 2963 * CRB manipulation macros
3074 * The CRB macros are slightly complicated, since there are up to 2964 * The CRB macros are slightly complicated, since there are up to
3075 * four registers associated with each CRB entry. 2965 * four registers associated with each CRB entry.
3076 */ 2966 */
3077#define IIO_NUM_CRBS 15 /* Number of CRBs */ 2967#define IIO_NUM_CRBS 15 /* Number of CRBs */
3078#define IIO_NUM_PC_CRBS 4 /* Number of partial cache CRBs */ 2968#define IIO_NUM_PC_CRBS 4 /* Number of partial cache CRBs */
3079#define IIO_ICRB_OFFSET 8 2969#define IIO_ICRB_OFFSET 8
3080#define IIO_ICRB_0 IIO_ICRB0_A 2970#define IIO_ICRB_0 IIO_ICRB0_A
3081#define IIO_ICRB_ADDR_SHFT 2 /* Shift to get proper address */ 2971#define IIO_ICRB_ADDR_SHFT 2 /* Shift to get proper address */
@@ -3083,43 +2973,43 @@ typedef union ii_ippr_u {
3083 #define IIO_FIRST_PC_ENTRY 12 2973 #define IIO_FIRST_PC_ENTRY 12
3084 */ 2974 */
3085 2975
3086#define IIO_ICRB_A(_x) ((u64)(IIO_ICRB_0 + (6 * IIO_ICRB_OFFSET * (_x)))) 2976#define IIO_ICRB_A(_x) ((u64)(IIO_ICRB_0 + (6 * IIO_ICRB_OFFSET * (_x))))
3087#define IIO_ICRB_B(_x) ((u64)((char *)IIO_ICRB_A(_x) + 1*IIO_ICRB_OFFSET)) 2977#define IIO_ICRB_B(_x) ((u64)((char *)IIO_ICRB_A(_x) + 1*IIO_ICRB_OFFSET))
3088#define IIO_ICRB_C(_x) ((u64)((char *)IIO_ICRB_A(_x) + 2*IIO_ICRB_OFFSET)) 2978#define IIO_ICRB_C(_x) ((u64)((char *)IIO_ICRB_A(_x) + 2*IIO_ICRB_OFFSET))
3089#define IIO_ICRB_D(_x) ((u64)((char *)IIO_ICRB_A(_x) + 3*IIO_ICRB_OFFSET)) 2979#define IIO_ICRB_D(_x) ((u64)((char *)IIO_ICRB_A(_x) + 3*IIO_ICRB_OFFSET))
3090#define IIO_ICRB_E(_x) ((u64)((char *)IIO_ICRB_A(_x) + 4*IIO_ICRB_OFFSET)) 2980#define IIO_ICRB_E(_x) ((u64)((char *)IIO_ICRB_A(_x) + 4*IIO_ICRB_OFFSET))
3091 2981
3092#define TNUM_TO_WIDGET_DEV(_tnum) (_tnum & 0x7) 2982#define TNUM_TO_WIDGET_DEV(_tnum) (_tnum & 0x7)
3093 2983
3094/* 2984/*
3095 * values for "ecode" field 2985 * values for "ecode" field
3096 */ 2986 */
3097#define IIO_ICRB_ECODE_DERR 0 /* Directory error due to IIO access */ 2987#define IIO_ICRB_ECODE_DERR 0 /* Directory error due to IIO access */
3098#define IIO_ICRB_ECODE_PERR 1 /* Poison error on IO access */ 2988#define IIO_ICRB_ECODE_PERR 1 /* Poison error on IO access */
3099#define IIO_ICRB_ECODE_WERR 2 /* Write error by IIO access 2989#define IIO_ICRB_ECODE_WERR 2 /* Write error by IIO access
3100 * e.g. WINV to a Read only line. */ 2990 * e.g. WINV to a Read only line. */
3101#define IIO_ICRB_ECODE_AERR 3 /* Access error caused by IIO access */ 2991#define IIO_ICRB_ECODE_AERR 3 /* Access error caused by IIO access */
3102#define IIO_ICRB_ECODE_PWERR 4 /* Error on partial write */ 2992#define IIO_ICRB_ECODE_PWERR 4 /* Error on partial write */
3103#define IIO_ICRB_ECODE_PRERR 5 /* Error on partial read */ 2993#define IIO_ICRB_ECODE_PRERR 5 /* Error on partial read */
3104#define IIO_ICRB_ECODE_TOUT 6 /* CRB timeout before deallocating */ 2994#define IIO_ICRB_ECODE_TOUT 6 /* CRB timeout before deallocating */
3105#define IIO_ICRB_ECODE_XTERR 7 /* Incoming xtalk pkt had error bit */ 2995#define IIO_ICRB_ECODE_XTERR 7 /* Incoming xtalk pkt had error bit */
3106 2996
3107/* 2997/*
3108 * Values for field imsgtype 2998 * Values for field imsgtype
3109 */ 2999 */
3110#define IIO_ICRB_IMSGT_XTALK 0 /* Incoming Meessage from Xtalk */ 3000#define IIO_ICRB_IMSGT_XTALK 0 /* Incoming Meessage from Xtalk */
3111#define IIO_ICRB_IMSGT_BTE 1 /* Incoming message from BTE */ 3001#define IIO_ICRB_IMSGT_BTE 1 /* Incoming message from BTE */
3112#define IIO_ICRB_IMSGT_SN1NET 2 /* Incoming message from SN1 net */ 3002#define IIO_ICRB_IMSGT_SN1NET 2 /* Incoming message from SN1 net */
3113#define IIO_ICRB_IMSGT_CRB 3 /* Incoming message from CRB ??? */ 3003#define IIO_ICRB_IMSGT_CRB 3 /* Incoming message from CRB ??? */
3114 3004
3115/* 3005/*
3116 * values for field initiator. 3006 * values for field initiator.
3117 */ 3007 */
3118#define IIO_ICRB_INIT_XTALK 0 /* Message originated in xtalk */ 3008#define IIO_ICRB_INIT_XTALK 0 /* Message originated in xtalk */
3119#define IIO_ICRB_INIT_BTE0 0x1 /* Message originated in BTE 0 */ 3009#define IIO_ICRB_INIT_BTE0 0x1 /* Message originated in BTE 0 */
3120#define IIO_ICRB_INIT_SN1NET 0x2 /* Message originated in SN1net */ 3010#define IIO_ICRB_INIT_SN1NET 0x2 /* Message originated in SN1net */
3121#define IIO_ICRB_INIT_CRB 0x3 /* Message originated in CRB ? */ 3011#define IIO_ICRB_INIT_CRB 0x3 /* Message originated in CRB ? */
3122#define IIO_ICRB_INIT_BTE1 0x5 /* MEssage originated in BTE 1 */ 3012#define IIO_ICRB_INIT_BTE1 0x5 /* MEssage originated in BTE 1 */
3123 3013
3124/* 3014/*
3125 * Number of credits Hub widget has while sending req/response to 3015 * Number of credits Hub widget has while sending req/response to
@@ -3127,8 +3017,8 @@ typedef union ii_ippr_u {
3127 * Value of 3 is required by Xbow 1.1 3017 * Value of 3 is required by Xbow 1.1
3128 * We may be able to increase this to 4 with Xbow 1.2. 3018 * We may be able to increase this to 4 with Xbow 1.2.
3129 */ 3019 */
3130#define HUBII_XBOW_CREDIT 3 3020#define HUBII_XBOW_CREDIT 3
3131#define HUBII_XBOW_REV2_CREDIT 4 3021#define HUBII_XBOW_REV2_CREDIT 4
3132 3022
3133/* 3023/*
3134 * Number of credits that xtalk devices should use when communicating 3024 * Number of credits that xtalk devices should use when communicating
@@ -3159,28 +3049,28 @@ typedef union ii_ippr_u {
3159 */ 3049 */
3160 3050
3161#define IIO_ICMR_CRB_VLD_SHFT 20 3051#define IIO_ICMR_CRB_VLD_SHFT 20
3162#define IIO_ICMR_CRB_VLD_MASK (0x7fffUL << IIO_ICMR_CRB_VLD_SHFT) 3052#define IIO_ICMR_CRB_VLD_MASK (0x7fffUL << IIO_ICMR_CRB_VLD_SHFT)
3163 3053
3164#define IIO_ICMR_FC_CNT_SHFT 16 3054#define IIO_ICMR_FC_CNT_SHFT 16
3165#define IIO_ICMR_FC_CNT_MASK (0xf << IIO_ICMR_FC_CNT_SHFT) 3055#define IIO_ICMR_FC_CNT_MASK (0xf << IIO_ICMR_FC_CNT_SHFT)
3166 3056
3167#define IIO_ICMR_C_CNT_SHFT 4 3057#define IIO_ICMR_C_CNT_SHFT 4
3168#define IIO_ICMR_C_CNT_MASK (0xf << IIO_ICMR_C_CNT_SHFT) 3058#define IIO_ICMR_C_CNT_MASK (0xf << IIO_ICMR_C_CNT_SHFT)
3169 3059
3170#define IIO_ICMR_PRECISE (1UL << 52) 3060#define IIO_ICMR_PRECISE (1UL << 52)
3171#define IIO_ICMR_CLR_RPPD (1UL << 13) 3061#define IIO_ICMR_CLR_RPPD (1UL << 13)
3172#define IIO_ICMR_CLR_RQPD (1UL << 12) 3062#define IIO_ICMR_CLR_RQPD (1UL << 12)
3173 3063
3174/* 3064/*
3175 * IIO PIO Deallocation register field masks : (IIO_IPDR) 3065 * IIO PIO Deallocation register field masks : (IIO_IPDR)
3176 XXX present but not needed in bedrock? See the manual. 3066 XXX present but not needed in bedrock? See the manual.
3177 */ 3067 */
3178#define IIO_IPDR_PND (1 << 4) 3068#define IIO_IPDR_PND (1 << 4)
3179 3069
3180/* 3070/*
3181 * IIO CRB deallocation register field masks: (IIO_ICDR) 3071 * IIO CRB deallocation register field masks: (IIO_ICDR)
3182 */ 3072 */
3183#define IIO_ICDR_PND (1 << 4) 3073#define IIO_ICDR_PND (1 << 4)
3184 3074
3185/* 3075/*
3186 * IO BTE Length/Status (IIO_IBLS) register bit field definitions 3076 * IO BTE Length/Status (IIO_IBLS) register bit field definitions
@@ -3223,35 +3113,35 @@ typedef union ii_ippr_u {
3223/* 3113/*
3224 * IO Error Clear register bit field definitions 3114 * IO Error Clear register bit field definitions
3225 */ 3115 */
3226#define IECLR_PI1_FWD_INT (1UL << 31) /* clear PI1_FORWARD_INT in iidsr */ 3116#define IECLR_PI1_FWD_INT (1UL << 31) /* clear PI1_FORWARD_INT in iidsr */
3227#define IECLR_PI0_FWD_INT (1UL << 30) /* clear PI0_FORWARD_INT in iidsr */ 3117#define IECLR_PI0_FWD_INT (1UL << 30) /* clear PI0_FORWARD_INT in iidsr */
3228#define IECLR_SPUR_RD_HDR (1UL << 29) /* clear valid bit in ixss reg */ 3118#define IECLR_SPUR_RD_HDR (1UL << 29) /* clear valid bit in ixss reg */
3229#define IECLR_BTE1 (1UL << 18) /* clear bte error 1 */ 3119#define IECLR_BTE1 (1UL << 18) /* clear bte error 1 */
3230#define IECLR_BTE0 (1UL << 17) /* clear bte error 0 */ 3120#define IECLR_BTE0 (1UL << 17) /* clear bte error 0 */
3231#define IECLR_CRAZY (1UL << 16) /* clear crazy bit in wstat reg */ 3121#define IECLR_CRAZY (1UL << 16) /* clear crazy bit in wstat reg */
3232#define IECLR_PRB_F (1UL << 15) /* clear err bit in PRB_F reg */ 3122#define IECLR_PRB_F (1UL << 15) /* clear err bit in PRB_F reg */
3233#define IECLR_PRB_E (1UL << 14) /* clear err bit in PRB_E reg */ 3123#define IECLR_PRB_E (1UL << 14) /* clear err bit in PRB_E reg */
3234#define IECLR_PRB_D (1UL << 13) /* clear err bit in PRB_D reg */ 3124#define IECLR_PRB_D (1UL << 13) /* clear err bit in PRB_D reg */
3235#define IECLR_PRB_C (1UL << 12) /* clear err bit in PRB_C reg */ 3125#define IECLR_PRB_C (1UL << 12) /* clear err bit in PRB_C reg */
3236#define IECLR_PRB_B (1UL << 11) /* clear err bit in PRB_B reg */ 3126#define IECLR_PRB_B (1UL << 11) /* clear err bit in PRB_B reg */
3237#define IECLR_PRB_A (1UL << 10) /* clear err bit in PRB_A reg */ 3127#define IECLR_PRB_A (1UL << 10) /* clear err bit in PRB_A reg */
3238#define IECLR_PRB_9 (1UL << 9) /* clear err bit in PRB_9 reg */ 3128#define IECLR_PRB_9 (1UL << 9) /* clear err bit in PRB_9 reg */
3239#define IECLR_PRB_8 (1UL << 8) /* clear err bit in PRB_8 reg */ 3129#define IECLR_PRB_8 (1UL << 8) /* clear err bit in PRB_8 reg */
3240#define IECLR_PRB_0 (1UL << 0) /* clear err bit in PRB_0 reg */ 3130#define IECLR_PRB_0 (1UL << 0) /* clear err bit in PRB_0 reg */
3241 3131
3242/* 3132/*
3243 * IIO CRB control register Fields: IIO_ICCR 3133 * IIO CRB control register Fields: IIO_ICCR
3244 */ 3134 */
3245#define IIO_ICCR_PENDING (0x10000) 3135#define IIO_ICCR_PENDING 0x10000
3246#define IIO_ICCR_CMD_MASK (0xFF) 3136#define IIO_ICCR_CMD_MASK 0xFF
3247#define IIO_ICCR_CMD_SHFT (7) 3137#define IIO_ICCR_CMD_SHFT 7
3248#define IIO_ICCR_CMD_NOP (0x0) /* No Op */ 3138#define IIO_ICCR_CMD_NOP 0x0 /* No Op */
3249#define IIO_ICCR_CMD_WAKE (0x100) /* Reactivate CRB entry and process */ 3139#define IIO_ICCR_CMD_WAKE 0x100 /* Reactivate CRB entry and process */
3250#define IIO_ICCR_CMD_TIMEOUT (0x200) /* Make CRB timeout & mark invalid */ 3140#define IIO_ICCR_CMD_TIMEOUT 0x200 /* Make CRB timeout & mark invalid */
3251#define IIO_ICCR_CMD_EJECT (0x400) /* Contents of entry written to memory 3141#define IIO_ICCR_CMD_EJECT 0x400 /* Contents of entry written to memory
3252 * via a WB 3142 * via a WB
3253 */ 3143 */
3254#define IIO_ICCR_CMD_FLUSH (0x800) 3144#define IIO_ICCR_CMD_FLUSH 0x800
3255 3145
3256/* 3146/*
3257 * 3147 *
@@ -3283,8 +3173,8 @@ typedef union ii_ippr_u {
3283 * Easy access macros for CRBs, all 5 registers (A-E) 3173 * Easy access macros for CRBs, all 5 registers (A-E)
3284 */ 3174 */
3285typedef ii_icrb0_a_u_t icrba_t; 3175typedef ii_icrb0_a_u_t icrba_t;
3286#define a_sidn ii_icrb0_a_fld_s.ia_sidn 3176#define a_sidn ii_icrb0_a_fld_s.ia_sidn
3287#define a_tnum ii_icrb0_a_fld_s.ia_tnum 3177#define a_tnum ii_icrb0_a_fld_s.ia_tnum
3288#define a_addr ii_icrb0_a_fld_s.ia_addr 3178#define a_addr ii_icrb0_a_fld_s.ia_addr
3289#define a_valid ii_icrb0_a_fld_s.ia_vld 3179#define a_valid ii_icrb0_a_fld_s.ia_vld
3290#define a_iow ii_icrb0_a_fld_s.ia_iow 3180#define a_iow ii_icrb0_a_fld_s.ia_iow
@@ -3324,14 +3214,13 @@ typedef ii_icrb0_c_u_t icrbc_t;
3324#define c_source ii_icrb0_c_fld_s.ic_source 3214#define c_source ii_icrb0_c_fld_s.ic_source
3325#define c_regvalue ii_icrb0_c_regval 3215#define c_regvalue ii_icrb0_c_regval
3326 3216
3327
3328typedef ii_icrb0_d_u_t icrbd_t; 3217typedef ii_icrb0_d_u_t icrbd_t;
3329#define d_sleep ii_icrb0_d_fld_s.id_sleep 3218#define d_sleep ii_icrb0_d_fld_s.id_sleep
3330#define d_pricnt ii_icrb0_d_fld_s.id_pr_cnt 3219#define d_pricnt ii_icrb0_d_fld_s.id_pr_cnt
3331#define d_pripsc ii_icrb0_d_fld_s.id_pr_psc 3220#define d_pripsc ii_icrb0_d_fld_s.id_pr_psc
3332#define d_bteop ii_icrb0_d_fld_s.id_bte_op 3221#define d_bteop ii_icrb0_d_fld_s.id_bte_op
3333#define d_bteaddr ii_icrb0_d_fld_s.id_pa_be /* ic_pa_be fld has 2 names*/ 3222#define d_bteaddr ii_icrb0_d_fld_s.id_pa_be /* ic_pa_be fld has 2 names */
3334#define d_benable ii_icrb0_d_fld_s.id_pa_be /* ic_pa_be fld has 2 names*/ 3223#define d_benable ii_icrb0_d_fld_s.id_pa_be /* ic_pa_be fld has 2 names */
3335#define d_regvalue ii_icrb0_d_regval 3224#define d_regvalue ii_icrb0_d_regval
3336 3225
3337typedef ii_icrb0_e_u_t icrbe_t; 3226typedef ii_icrb0_e_u_t icrbe_t;
@@ -3341,7 +3230,6 @@ typedef ii_icrb0_e_u_t icrbe_t;
3341#define icrbe_timeout ii_icrb0_e_fld_s.ie_timeout 3230#define icrbe_timeout ii_icrb0_e_fld_s.ie_timeout
3342#define e_regvalue ii_icrb0_e_regval 3231#define e_regvalue ii_icrb0_e_regval
3343 3232
3344
3345/* Number of widgets supported by shub */ 3233/* Number of widgets supported by shub */
3346#define HUB_NUM_WIDGET 9 3234#define HUB_NUM_WIDGET 9
3347#define HUB_WIDGET_ID_MIN 0x8 3235#define HUB_WIDGET_ID_MIN 0x8
@@ -3367,27 +3255,27 @@ typedef ii_icrb0_e_u_t icrbe_t;
3367 3255
3368#define LNK_STAT_WORKING 0x2 /* LLP is working */ 3256#define LNK_STAT_WORKING 0x2 /* LLP is working */
3369 3257
3370#define IIO_WSTAT_ECRAZY (1ULL << 32) /* Hub gone crazy */ 3258#define IIO_WSTAT_ECRAZY (1ULL << 32) /* Hub gone crazy */
3371#define IIO_WSTAT_TXRETRY (1ULL << 9) /* Hub Tx Retry timeout */ 3259#define IIO_WSTAT_TXRETRY (1ULL << 9) /* Hub Tx Retry timeout */
3372#define IIO_WSTAT_TXRETRY_MASK (0x7F) /* should be 0xFF?? */ 3260#define IIO_WSTAT_TXRETRY_MASK 0x7F /* should be 0xFF?? */
3373#define IIO_WSTAT_TXRETRY_SHFT (16) 3261#define IIO_WSTAT_TXRETRY_SHFT 16
3374#define IIO_WSTAT_TXRETRY_CNT(w) (((w) >> IIO_WSTAT_TXRETRY_SHFT) & \ 3262#define IIO_WSTAT_TXRETRY_CNT(w) (((w) >> IIO_WSTAT_TXRETRY_SHFT) & \
3375 IIO_WSTAT_TXRETRY_MASK) 3263 IIO_WSTAT_TXRETRY_MASK)
3376 3264
3377/* Number of II perf. counters we can multiplex at once */ 3265/* Number of II perf. counters we can multiplex at once */
3378 3266
3379#define IO_PERF_SETS 32 3267#define IO_PERF_SETS 32
3380 3268
3381/* Bit for the widget in inbound access register */ 3269/* Bit for the widget in inbound access register */
3382#define IIO_IIWA_WIDGET(_w) ((uint64_t)(1ULL << _w)) 3270#define IIO_IIWA_WIDGET(_w) ((uint64_t)(1ULL << _w))
3383/* Bit for the widget in outbound access register */ 3271/* Bit for the widget in outbound access register */
3384#define IIO_IOWA_WIDGET(_w) ((uint64_t)(1ULL << _w)) 3272#define IIO_IOWA_WIDGET(_w) ((uint64_t)(1ULL << _w))
3385 3273
3386/* NOTE: The following define assumes that we are going to get 3274/* NOTE: The following define assumes that we are going to get
3387 * widget numbers from 8 thru F and the device numbers within 3275 * widget numbers from 8 thru F and the device numbers within
3388 * widget from 0 thru 7. 3276 * widget from 0 thru 7.
3389 */ 3277 */
3390#define IIO_IIDEM_WIDGETDEV_MASK(w, d) ((uint64_t)(1ULL << (8 * ((w) - 8) + (d)))) 3278#define IIO_IIDEM_WIDGETDEV_MASK(w, d) ((uint64_t)(1ULL << (8 * ((w) - 8) + (d))))
3391 3279
3392/* IO Interrupt Destination Register */ 3280/* IO Interrupt Destination Register */
3393#define IIO_IIDSR_SENT_SHIFT 28 3281#define IIO_IIDSR_SENT_SHIFT 28
@@ -3402,11 +3290,11 @@ typedef ii_icrb0_e_u_t icrbe_t;
3402#define IIO_IIDSR_LVL_MASK 0x000000ff 3290#define IIO_IIDSR_LVL_MASK 0x000000ff
3403 3291
3404/* Xtalk timeout threshhold register (IIO_IXTT) */ 3292/* Xtalk timeout threshhold register (IIO_IXTT) */
3405#define IXTT_RRSP_TO_SHFT 55 /* read response timeout */ 3293#define IXTT_RRSP_TO_SHFT 55 /* read response timeout */
3406#define IXTT_RRSP_TO_MASK (0x1FULL << IXTT_RRSP_TO_SHFT) 3294#define IXTT_RRSP_TO_MASK (0x1FULL << IXTT_RRSP_TO_SHFT)
3407#define IXTT_RRSP_PS_SHFT 32 /* read responsed TO prescalar */ 3295#define IXTT_RRSP_PS_SHFT 32 /* read responsed TO prescalar */
3408#define IXTT_RRSP_PS_MASK (0x7FFFFFULL << IXTT_RRSP_PS_SHFT) 3296#define IXTT_RRSP_PS_MASK (0x7FFFFFULL << IXTT_RRSP_PS_SHFT)
3409#define IXTT_TAIL_TO_SHFT 0 /* tail timeout counter threshold */ 3297#define IXTT_TAIL_TO_SHFT 0 /* tail timeout counter threshold */
3410#define IXTT_TAIL_TO_MASK (0x3FFFFFFULL << IXTT_TAIL_TO_SHFT) 3298#define IXTT_TAIL_TO_MASK (0x3FFFFFFULL << IXTT_TAIL_TO_SHFT)
3411 3299
3412/* 3300/*
@@ -3414,17 +3302,17 @@ typedef ii_icrb0_e_u_t icrbe_t;
3414 */ 3302 */
3415 3303
3416typedef union hubii_wcr_u { 3304typedef union hubii_wcr_u {
3417 uint64_t wcr_reg_value; 3305 uint64_t wcr_reg_value;
3418 struct { 3306 struct {
3419 uint64_t wcr_widget_id: 4, /* LLP crossbar credit */ 3307 uint64_t wcr_widget_id:4, /* LLP crossbar credit */
3420 wcr_tag_mode: 1, /* Tag mode */ 3308 wcr_tag_mode:1, /* Tag mode */
3421 wcr_rsvd1: 8, /* Reserved */ 3309 wcr_rsvd1:8, /* Reserved */
3422 wcr_xbar_crd: 3, /* LLP crossbar credit */ 3310 wcr_xbar_crd:3, /* LLP crossbar credit */
3423 wcr_f_bad_pkt: 1, /* Force bad llp pkt enable */ 3311 wcr_f_bad_pkt:1, /* Force bad llp pkt enable */
3424 wcr_dir_con: 1, /* widget direct connect */ 3312 wcr_dir_con:1, /* widget direct connect */
3425 wcr_e_thresh: 5, /* elasticity threshold */ 3313 wcr_e_thresh:5, /* elasticity threshold */
3426 wcr_rsvd: 41; /* unused */ 3314 wcr_rsvd:41; /* unused */
3427 } wcr_fields_s; 3315 } wcr_fields_s;
3428} hubii_wcr_t; 3316} hubii_wcr_t;
3429 3317
3430#define iwcr_dir_con wcr_fields_s.wcr_dir_con 3318#define iwcr_dir_con wcr_fields_s.wcr_dir_con
@@ -3436,41 +3324,35 @@ performance registers */
3436 performed */ 3324 performed */
3437 3325
3438typedef union io_perf_sel { 3326typedef union io_perf_sel {
3439 uint64_t perf_sel_reg; 3327 uint64_t perf_sel_reg;
3440 struct { 3328 struct {
3441 uint64_t perf_ippr0 : 4, 3329 uint64_t perf_ippr0:4, perf_ippr1:4, perf_icct:8, perf_rsvd:48;
3442 perf_ippr1 : 4, 3330 } perf_sel_bits;
3443 perf_icct : 8,
3444 perf_rsvd : 48;
3445 } perf_sel_bits;
3446} io_perf_sel_t; 3331} io_perf_sel_t;
3447 3332
3448/* io_perf_cnt is to extract the count from the shub registers. Due to 3333/* io_perf_cnt is to extract the count from the shub registers. Due to
3449 hardware problems there is only one counter, not two. */ 3334 hardware problems there is only one counter, not two. */
3450 3335
3451typedef union io_perf_cnt { 3336typedef union io_perf_cnt {
3452 uint64_t perf_cnt; 3337 uint64_t perf_cnt;
3453 struct { 3338 struct {
3454 uint64_t perf_cnt : 20, 3339 uint64_t perf_cnt:20, perf_rsvd2:12, perf_rsvd1:32;
3455 perf_rsvd2 : 12, 3340 } perf_cnt_bits;
3456 perf_rsvd1 : 32;
3457 } perf_cnt_bits;
3458 3341
3459} io_perf_cnt_t; 3342} io_perf_cnt_t;
3460 3343
3461typedef union iprte_a { 3344typedef union iprte_a {
3462 uint64_t entry; 3345 uint64_t entry;
3463 struct { 3346 struct {
3464 uint64_t i_rsvd_1 : 3; 3347 uint64_t i_rsvd_1:3;
3465 uint64_t i_addr : 38; 3348 uint64_t i_addr:38;
3466 uint64_t i_init : 3; 3349 uint64_t i_init:3;
3467 uint64_t i_source : 8; 3350 uint64_t i_source:8;
3468 uint64_t i_rsvd : 2; 3351 uint64_t i_rsvd:2;
3469 uint64_t i_widget : 4; 3352 uint64_t i_widget:4;
3470 uint64_t i_to_cnt : 5; 3353 uint64_t i_to_cnt:5;
3471 uint64_t i_vld : 1; 3354 uint64_t i_vld:1;
3472 } iprte_fields; 3355 } iprte_fields;
3473} iprte_a_t; 3356} iprte_a_t;
3474 3357
3475#endif /* _ASM_IA64_SN_SHUBIO_H */ 3358#endif /* _ASM_IA64_SN_SHUBIO_H */
3476
diff --git a/include/asm-ia64/sn/sn_cpuid.h b/include/asm-ia64/sn/sn_cpuid.h
index 685435af170d..20b300187669 100644
--- a/include/asm-ia64/sn/sn_cpuid.h
+++ b/include/asm-ia64/sn/sn_cpuid.h
@@ -4,7 +4,7 @@
4 * License. See the file "COPYING" in the main directory of this archive 4 * License. See the file "COPYING" in the main directory of this archive
5 * for more details. 5 * for more details.
6 * 6 *
7 * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved. 7 * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
8 */ 8 */
9 9
10 10
@@ -92,24 +92,24 @@
92 * NOTE: on non-MP systems, only cpuid 0 exists 92 * NOTE: on non-MP systems, only cpuid 0 exists
93 */ 93 */
94 94
95extern short physical_node_map[]; /* indexed by nasid to get cnode */ 95extern short physical_node_map[]; /* indexed by nasid to get cnode */
96 96
97/* 97/*
98 * Macros for retrieving info about current cpu 98 * Macros for retrieving info about current cpu
99 */ 99 */
100#define get_nasid() (nodepda->phys_cpuid[smp_processor_id()].nasid) 100#define get_nasid() (sn_nodepda->phys_cpuid[smp_processor_id()].nasid)
101#define get_subnode() (nodepda->phys_cpuid[smp_processor_id()].subnode) 101#define get_subnode() (sn_nodepda->phys_cpuid[smp_processor_id()].subnode)
102#define get_slice() (nodepda->phys_cpuid[smp_processor_id()].slice) 102#define get_slice() (sn_nodepda->phys_cpuid[smp_processor_id()].slice)
103#define get_cnode() (nodepda->phys_cpuid[smp_processor_id()].cnode) 103#define get_cnode() (sn_nodepda->phys_cpuid[smp_processor_id()].cnode)
104#define get_sapicid() ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff) 104#define get_sapicid() ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff)
105 105
106/* 106/*
107 * Macros for retrieving info about an arbitrary cpu 107 * Macros for retrieving info about an arbitrary cpu
108 * cpuid - logical cpu id 108 * cpuid - logical cpu id
109 */ 109 */
110#define cpuid_to_nasid(cpuid) (nodepda->phys_cpuid[cpuid].nasid) 110#define cpuid_to_nasid(cpuid) (sn_nodepda->phys_cpuid[cpuid].nasid)
111#define cpuid_to_subnode(cpuid) (nodepda->phys_cpuid[cpuid].subnode) 111#define cpuid_to_subnode(cpuid) (sn_nodepda->phys_cpuid[cpuid].subnode)
112#define cpuid_to_slice(cpuid) (nodepda->phys_cpuid[cpuid].slice) 112#define cpuid_to_slice(cpuid) (sn_nodepda->phys_cpuid[cpuid].slice)
113#define cpuid_to_cnodeid(cpuid) (physical_node_map[cpuid_to_nasid(cpuid)]) 113#define cpuid_to_cnodeid(cpuid) (physical_node_map[cpuid_to_nasid(cpuid)])
114 114
115 115
@@ -123,11 +123,8 @@ extern int nasid_slice_to_cpuid(int, int);
123 123
124/* 124/*
125 * cnodeid_to_nasid - convert a cnodeid to a NASID 125 * cnodeid_to_nasid - convert a cnodeid to a NASID
126 * Macro relies on pg_data for a node being on the node itself.
127 * Just extract the NASID from the pointer.
128 *
129 */ 126 */
130#define cnodeid_to_nasid(cnodeid) pda->cnodeid_to_nasid_table[cnodeid] 127#define cnodeid_to_nasid(cnodeid) (sn_cnodeid_to_nasid[cnodeid])
131 128
132/* 129/*
133 * nasid_to_cnodeid - convert a NASID to a cnodeid 130 * nasid_to_cnodeid - convert a NASID to a cnodeid
diff --git a/include/asm-ia64/sn/sn_fru.h b/include/asm-ia64/sn/sn_fru.h
deleted file mode 100644
index 8c21ac3f0156..000000000000
--- a/include/asm-ia64/sn/sn_fru.h
+++ /dev/null
@@ -1,44 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1992-1997,1999-2004 Silicon Graphics, Inc. All rights reserved.
7 */
8#ifndef _ASM_IA64_SN_SN_FRU_H
9#define _ASM_IA64_SN_SN_FRU_H
10
11#define MAX_DIMMS 8 /* max # of dimm banks */
12#define MAX_PCIDEV 8 /* max # of pci devices on a pci bus */
13
14typedef unsigned char confidence_t;
15
16typedef struct kf_mem_s {
17 confidence_t km_confidence; /* confidence level that the memory is bad
18 * is this necessary ?
19 */
20 confidence_t km_dimm[MAX_DIMMS];
21 /* confidence level that dimm[i] is bad
22 *I think this is the right number
23 */
24
25} kf_mem_t;
26
27typedef struct kf_cpu_s {
28 confidence_t kc_confidence; /* confidence level that cpu is bad */
29 confidence_t kc_icache; /* confidence level that instr. cache is bad */
30 confidence_t kc_dcache; /* confidence level that data cache is bad */
31 confidence_t kc_scache; /* confidence level that sec. cache is bad */
32 confidence_t kc_sysbus; /* confidence level that sysad/cmd/state bus is bad */
33} kf_cpu_t;
34
35
36typedef struct kf_pci_bus_s {
37 confidence_t kpb_belief; /* confidence level that the pci bus is bad */
38 confidence_t kpb_pcidev_belief[MAX_PCIDEV];
39 /* confidence level that the pci dev is bad */
40} kf_pci_bus_t;
41
42
43#endif /* _ASM_IA64_SN_SN_FRU_H */
44
diff --git a/include/asm-ia64/sn/sn_sal.h b/include/asm-ia64/sn/sn_sal.h
index f914f6da077c..56d74ca76b5d 100644
--- a/include/asm-ia64/sn/sn_sal.h
+++ b/include/asm-ia64/sn/sn_sal.h
@@ -557,7 +557,8 @@ static inline u64
557ia64_sn_partition_serial_get(void) 557ia64_sn_partition_serial_get(void)
558{ 558{
559 struct ia64_sal_retval ret_stuff; 559 struct ia64_sal_retval ret_stuff;
560 SAL_CALL(ret_stuff, SN_SAL_PARTITION_SERIAL_GET, 0, 0, 0, 0, 0, 0, 0); 560 ia64_sal_oemcall_reentrant(&ret_stuff, SN_SAL_PARTITION_SERIAL_GET, 0,
561 0, 0, 0, 0, 0, 0);
561 if (ret_stuff.status != 0) 562 if (ret_stuff.status != 0)
562 return 0; 563 return 0;
563 return ret_stuff.v0; 564 return ret_stuff.v0;
@@ -565,11 +566,10 @@ ia64_sn_partition_serial_get(void)
565 566
566static inline u64 567static inline u64
567sn_partition_serial_number_val(void) { 568sn_partition_serial_number_val(void) {
568 if (sn_partition_serial_number) { 569 if (unlikely(sn_partition_serial_number == 0)) {
569 return(sn_partition_serial_number); 570 sn_partition_serial_number = ia64_sn_partition_serial_get();
570 } else {
571 return(sn_partition_serial_number = ia64_sn_partition_serial_get());
572 } 571 }
572 return sn_partition_serial_number;
573} 573}
574 574
575/* 575/*
@@ -580,8 +580,8 @@ static inline partid_t
580ia64_sn_sysctl_partition_get(nasid_t nasid) 580ia64_sn_sysctl_partition_get(nasid_t nasid)
581{ 581{
582 struct ia64_sal_retval ret_stuff; 582 struct ia64_sal_retval ret_stuff;
583 SAL_CALL(ret_stuff, SN_SAL_SYSCTL_PARTITION_GET, nasid, 583 ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_SYSCTL_PARTITION_GET, nasid,
584 0, 0, 0, 0, 0, 0); 584 0, 0, 0, 0, 0, 0);
585 if (ret_stuff.status != 0) 585 if (ret_stuff.status != 0)
586 return INVALID_PARTID; 586 return INVALID_PARTID;
587 return ((partid_t)ret_stuff.v0); 587 return ((partid_t)ret_stuff.v0);
@@ -595,11 +595,38 @@ extern partid_t sn_partid;
595 595
596static inline partid_t 596static inline partid_t
597sn_local_partid(void) { 597sn_local_partid(void) {
598 if (sn_partid < 0) { 598 if (unlikely(sn_partid < 0)) {
599 return (sn_partid = ia64_sn_sysctl_partition_get(cpuid_to_nasid(smp_processor_id()))); 599 sn_partid = ia64_sn_sysctl_partition_get(cpuid_to_nasid(smp_processor_id()));
600 } else {
601 return sn_partid;
602 } 600 }
601 return sn_partid;
602}
603
604/*
605 * Returns the physical address of the partition's reserved page through
606 * an iterative number of calls.
607 *
608 * On first call, 'cookie' and 'len' should be set to 0, and 'addr'
609 * set to the nasid of the partition whose reserved page's address is
610 * being sought.
611 * On subsequent calls, pass the values, that were passed back on the
612 * previous call.
613 *
614 * While the return status equals SALRET_MORE_PASSES, keep calling
615 * this function after first copying 'len' bytes starting at 'addr'
616 * into 'buf'. Once the return status equals SALRET_OK, 'addr' will
617 * be the physical address of the partition's reserved page. If the
618 * return status equals neither of these, an error as occurred.
619 */
620static inline s64
621sn_partition_reserved_page_pa(u64 buf, u64 *cookie, u64 *addr, u64 *len)
622{
623 struct ia64_sal_retval rv;
624 ia64_sal_oemcall_reentrant(&rv, SN_SAL_GET_PARTITION_ADDR, *cookie,
625 *addr, buf, *len, 0, 0, 0);
626 *cookie = rv.v0;
627 *addr = rv.v1;
628 *len = rv.v2;
629 return rv.status;
603} 630}
604 631
605/* 632/*
@@ -621,8 +648,8 @@ static inline int
621sn_register_xp_addr_region(u64 paddr, u64 len, int operation) 648sn_register_xp_addr_region(u64 paddr, u64 len, int operation)
622{ 649{
623 struct ia64_sal_retval ret_stuff; 650 struct ia64_sal_retval ret_stuff;
624 SAL_CALL(ret_stuff, SN_SAL_XP_ADDR_REGION, paddr, len, (u64)operation, 651 ia64_sal_oemcall(&ret_stuff, SN_SAL_XP_ADDR_REGION, paddr, len,
625 0, 0, 0, 0); 652 (u64)operation, 0, 0, 0, 0);
626 return ret_stuff.status; 653 return ret_stuff.status;
627} 654}
628 655
@@ -646,8 +673,8 @@ sn_register_nofault_code(u64 start_addr, u64 end_addr, u64 return_addr,
646 } else { 673 } else {
647 call = SN_SAL_NO_FAULT_ZONE_PHYSICAL; 674 call = SN_SAL_NO_FAULT_ZONE_PHYSICAL;
648 } 675 }
649 SAL_CALL(ret_stuff, call, start_addr, end_addr, return_addr, (u64)1, 676 ia64_sal_oemcall(&ret_stuff, call, start_addr, end_addr, return_addr,
650 0, 0, 0); 677 (u64)1, 0, 0, 0);
651 return ret_stuff.status; 678 return ret_stuff.status;
652} 679}
653 680
@@ -668,8 +695,8 @@ static inline int
668sn_change_coherence(u64 *new_domain, u64 *old_domain) 695sn_change_coherence(u64 *new_domain, u64 *old_domain)
669{ 696{
670 struct ia64_sal_retval ret_stuff; 697 struct ia64_sal_retval ret_stuff;
671 SAL_CALL(ret_stuff, SN_SAL_COHERENCE, new_domain, old_domain, 0, 0, 698 ia64_sal_oemcall(&ret_stuff, SN_SAL_COHERENCE, (u64)new_domain,
672 0, 0, 0); 699 (u64)old_domain, 0, 0, 0, 0, 0);
673 return ret_stuff.status; 700 return ret_stuff.status;
674} 701}
675 702
@@ -688,8 +715,8 @@ sn_change_memprotect(u64 paddr, u64 len, u64 perms, u64 *nasid_array)
688 cnodeid = nasid_to_cnodeid(get_node_number(paddr)); 715 cnodeid = nasid_to_cnodeid(get_node_number(paddr));
689 // spin_lock(&NODEPDA(cnodeid)->bist_lock); 716 // spin_lock(&NODEPDA(cnodeid)->bist_lock);
690 local_irq_save(irq_flags); 717 local_irq_save(irq_flags);
691 SAL_CALL_NOLOCK(ret_stuff, SN_SAL_MEMPROTECT, paddr, len, nasid_array, 718 ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_MEMPROTECT, paddr, len,
692 perms, 0, 0, 0); 719 (u64)nasid_array, perms, 0, 0, 0);
693 local_irq_restore(irq_flags); 720 local_irq_restore(irq_flags);
694 // spin_unlock(&NODEPDA(cnodeid)->bist_lock); 721 // spin_unlock(&NODEPDA(cnodeid)->bist_lock);
695 return ret_stuff.status; 722 return ret_stuff.status;
diff --git a/include/asm-ia64/sn/sndrv.h b/include/asm-ia64/sn/sndrv.h
deleted file mode 100644
index aa00d42cde32..000000000000
--- a/include/asm-ia64/sn/sndrv.h
+++ /dev/null
@@ -1,47 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2002-2004 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9#ifndef _ASM_IA64_SN_SNDRV_H
10#define _ASM_IA64_SN_SNDRV_H
11
12/* ioctl commands */
13#define SNDRV_GET_ROUTERINFO 1
14#define SNDRV_GET_INFOSIZE 2
15#define SNDRV_GET_HUBINFO 3
16#define SNDRV_GET_FLASHLOGSIZE 4
17#define SNDRV_SET_FLASHSYNC 5
18#define SNDRV_GET_FLASHLOGDATA 6
19#define SNDRV_GET_FLASHLOGALL 7
20
21#define SNDRV_SET_HISTOGRAM_TYPE 14
22
23#define SNDRV_ELSC_COMMAND 19
24#define SNDRV_CLEAR_LOG 20
25#define SNDRV_INIT_LOG 21
26#define SNDRV_GET_PIMM_PSC 22
27#define SNDRV_SET_PARTITION 23
28#define SNDRV_GET_PARTITION 24
29
30/* see synergy_perf_ioctl() */
31#define SNDRV_GET_SYNERGY_VERSION 30
32#define SNDRV_GET_SYNERGY_STATUS 31
33#define SNDRV_GET_SYNERGYINFO 32
34#define SNDRV_SYNERGY_APPEND 33
35#define SNDRV_SYNERGY_ENABLE 34
36#define SNDRV_SYNERGY_FREQ 35
37
38/* Devices */
39#define SNDRV_UKNOWN_DEVICE -1
40#define SNDRV_ROUTER_DEVICE 1
41#define SNDRV_HUB_DEVICE 2
42#define SNDRV_ELSC_NVRAM_DEVICE 3
43#define SNDRV_ELSC_CONTROLLER_DEVICE 4
44#define SNDRV_SYSCTL_SUBCH 5
45#define SNDRV_SYNERGY_DEVICE 6
46
47#endif /* _ASM_IA64_SN_SNDRV_H */
diff --git a/include/asm-ia64/sn/xp.h b/include/asm-ia64/sn/xp.h
new file mode 100644
index 000000000000..9902185c0288
--- /dev/null
+++ b/include/asm-ia64/sn/xp.h
@@ -0,0 +1,436 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2004-2005 Silicon Graphics, Inc. All rights reserved.
7 */
8
9
10/*
11 * External Cross Partition (XP) structures and defines.
12 */
13
14
15#ifndef _ASM_IA64_SN_XP_H
16#define _ASM_IA64_SN_XP_H
17
18
19#include <linux/version.h>
20#include <linux/cache.h>
21#include <linux/hardirq.h>
22#include <asm/sn/types.h>
23#include <asm/sn/bte.h>
24
25
26#ifdef USE_DBUG_ON
27#define DBUG_ON(condition) BUG_ON(condition)
28#else
29#define DBUG_ON(condition)
30#endif
31
32
33/*
34 * Define the maximum number of logically defined partitions the system
35 * can support. It is constrained by the maximum number of hardware
36 * partitionable regions. The term 'region' in this context refers to the
37 * minimum number of nodes that can comprise an access protection grouping.
38 * The access protection is in regards to memory, IPI and IOI.
39 *
40 * The maximum number of hardware partitionable regions is equal to the
41 * maximum number of nodes in the entire system divided by the minimum number
42 * of nodes that comprise an access protection grouping.
43 */
44#define XP_MAX_PARTITIONS 64
45
46
47/*
48 * Define the number of u64s required to represent all the C-brick nasids
49 * as a bitmap. The cross-partition kernel modules deal only with
50 * C-brick nasids, thus the need for bitmaps which don't account for
51 * odd-numbered (non C-brick) nasids.
52 */
53#define XP_MAX_PHYSNODE_ID (MAX_PHYSNODE_ID / 2)
54#define XP_NASID_MASK_BYTES ((XP_MAX_PHYSNODE_ID + 7) / 8)
55#define XP_NASID_MASK_WORDS ((XP_MAX_PHYSNODE_ID + 63) / 64)
56
57
58/*
59 * Wrapper for bte_copy() that should it return a failure status will retry
60 * the bte_copy() once in the hope that the failure was due to a temporary
61 * aberration (i.e., the link going down temporarily).
62 *
63 * See bte_copy for definition of the input parameters.
64 *
65 * Note: xp_bte_copy() should never be called while holding a spinlock.
66 */
67static inline bte_result_t
68xp_bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
69{
70 bte_result_t ret;
71
72
73 ret = bte_copy(src, dest, len, mode, notification);
74
75 if (ret != BTE_SUCCESS) {
76 if (!in_interrupt()) {
77 cond_resched();
78 }
79 ret = bte_copy(src, dest, len, mode, notification);
80 }
81
82 return ret;
83}
84
85
86/*
87 * XPC establishes channel connections between the local partition and any
88 * other partition that is currently up. Over these channels, kernel-level
89 * `users' can communicate with their counterparts on the other partitions.
90 *
91 * The maxinum number of channels is limited to eight. For performance reasons,
92 * the internal cross partition structures require sixteen bytes per channel,
93 * and eight allows all of this interface-shared info to fit in one cache line.
94 *
95 * XPC_NCHANNELS reflects the total number of channels currently defined.
96 * If the need for additional channels arises, one can simply increase
97 * XPC_NCHANNELS accordingly. If the day should come where that number
98 * exceeds the MAXIMUM number of channels allowed (eight), then one will need
99 * to make changes to the XPC code to allow for this.
100 */
101#define XPC_MEM_CHANNEL 0 /* memory channel number */
102#define XPC_NET_CHANNEL 1 /* network channel number */
103
104#define XPC_NCHANNELS 2 /* #of defined channels */
105#define XPC_MAX_NCHANNELS 8 /* max #of channels allowed */
106
107#if XPC_NCHANNELS > XPC_MAX_NCHANNELS
108#error XPC_NCHANNELS exceeds MAXIMUM allowed.
109#endif
110
111
112/*
113 * The format of an XPC message is as follows:
114 *
115 * +-------+--------------------------------+
116 * | flags |////////////////////////////////|
117 * +-------+--------------------------------+
118 * | message # |
119 * +----------------------------------------+
120 * | payload (user-defined message) |
121 * | |
122 * :
123 * | |
124 * +----------------------------------------+
125 *
126 * The size of the payload is defined by the user via xpc_connect(). A user-
127 * defined message resides in the payload area.
128 *
129 * The user should have no dealings with the message header, but only the
130 * message's payload. When a message entry is allocated (via xpc_allocate())
131 * a pointer to the payload area is returned and not the actual beginning of
132 * the XPC message. The user then constructs a message in the payload area
133 * and passes that pointer as an argument on xpc_send() or xpc_send_notify().
134 *
135 * The size of a message entry (within a message queue) must be a cacheline
136 * sized multiple in order to facilitate the BTE transfer of messages from one
137 * message queue to another. A macro, XPC_MSG_SIZE(), is provided for the user
138 * that wants to fit as many msg entries as possible in a given memory size
139 * (e.g. a memory page).
140 */
141struct xpc_msg {
142 u8 flags; /* FOR XPC INTERNAL USE ONLY */
143 u8 reserved[7]; /* FOR XPC INTERNAL USE ONLY */
144 s64 number; /* FOR XPC INTERNAL USE ONLY */
145
146 u64 payload; /* user defined portion of message */
147};
148
149
150#define XPC_MSG_PAYLOAD_OFFSET (u64) (&((struct xpc_msg *)0)->payload)
151#define XPC_MSG_SIZE(_payload_size) \
152 L1_CACHE_ALIGN(XPC_MSG_PAYLOAD_OFFSET + (_payload_size))
153
154
155/*
156 * Define the return values and values passed to user's callout functions.
157 * (It is important to add new value codes at the end just preceding
158 * xpcUnknownReason, which must have the highest numerical value.)
159 */
160enum xpc_retval {
161 xpcSuccess = 0,
162
163 xpcNotConnected, /* 1: channel is not connected */
164 xpcConnected, /* 2: channel connected (opened) */
165 xpcRETIRED1, /* 3: (formerly xpcDisconnected) */
166
167 xpcMsgReceived, /* 4: message received */
168 xpcMsgDelivered, /* 5: message delivered and acknowledged */
169
170 xpcRETIRED2, /* 6: (formerly xpcTransferFailed) */
171
172 xpcNoWait, /* 7: operation would require wait */
173 xpcRetry, /* 8: retry operation */
174 xpcTimeout, /* 9: timeout in xpc_allocate_msg_wait() */
175 xpcInterrupted, /* 10: interrupted wait */
176
177 xpcUnequalMsgSizes, /* 11: message size disparity between sides */
178 xpcInvalidAddress, /* 12: invalid address */
179
180 xpcNoMemory, /* 13: no memory available for XPC structures */
181 xpcLackOfResources, /* 14: insufficient resources for operation */
182 xpcUnregistered, /* 15: channel is not registered */
183 xpcAlreadyRegistered, /* 16: channel is already registered */
184
185 xpcPartitionDown, /* 17: remote partition is down */
186 xpcNotLoaded, /* 18: XPC module is not loaded */
187 xpcUnloading, /* 19: this side is unloading XPC module */
188
189 xpcBadMagic, /* 20: XPC MAGIC string not found */
190
191 xpcReactivating, /* 21: remote partition was reactivated */
192
193 xpcUnregistering, /* 22: this side is unregistering channel */
194 xpcOtherUnregistering, /* 23: other side is unregistering channel */
195
196 xpcCloneKThread, /* 24: cloning kernel thread */
197 xpcCloneKThreadFailed, /* 25: cloning kernel thread failed */
198
199 xpcNoHeartbeat, /* 26: remote partition has no heartbeat */
200
201 xpcPioReadError, /* 27: PIO read error */
202 xpcPhysAddrRegFailed, /* 28: registration of phys addr range failed */
203
204 xpcBteDirectoryError, /* 29: maps to BTEFAIL_DIR */
205 xpcBtePoisonError, /* 30: maps to BTEFAIL_POISON */
206 xpcBteWriteError, /* 31: maps to BTEFAIL_WERR */
207 xpcBteAccessError, /* 32: maps to BTEFAIL_ACCESS */
208 xpcBtePWriteError, /* 33: maps to BTEFAIL_PWERR */
209 xpcBtePReadError, /* 34: maps to BTEFAIL_PRERR */
210 xpcBteTimeOutError, /* 35: maps to BTEFAIL_TOUT */
211 xpcBteXtalkError, /* 36: maps to BTEFAIL_XTERR */
212 xpcBteNotAvailable, /* 37: maps to BTEFAIL_NOTAVAIL */
213 xpcBteUnmappedError, /* 38: unmapped BTEFAIL_ error */
214
215 xpcBadVersion, /* 39: bad version number */
216 xpcVarsNotSet, /* 40: the XPC variables are not set up */
217 xpcNoRsvdPageAddr, /* 41: unable to get rsvd page's phys addr */
218 xpcInvalidPartid, /* 42: invalid partition ID */
219 xpcLocalPartid, /* 43: local partition ID */
220
221 xpcUnknownReason /* 44: unknown reason -- must be last in list */
222};
223
224
225/*
226 * Define the callout function types used by XPC to update the user on
227 * connection activity and state changes (via the user function registered by
228 * xpc_connect()) and to notify them of messages received and delivered (via
229 * the user function registered by xpc_send_notify()).
230 *
231 * The two function types are xpc_channel_func and xpc_notify_func and
232 * both share the following arguments, with the exception of "data", which
233 * only xpc_channel_func has.
234 *
235 * Arguments:
236 *
237 * reason - reason code. (See following table.)
238 * partid - partition ID associated with condition.
239 * ch_number - channel # associated with condition.
240 * data - pointer to optional data. (See following table.)
241 * key - pointer to optional user-defined value provided as the "key"
242 * argument to xpc_connect() or xpc_send_notify().
243 *
244 * In the following table the "Optional Data" column applies to callouts made
245 * to functions registered by xpc_connect(). A "NA" in that column indicates
246 * that this reason code can be passed to functions registered by
247 * xpc_send_notify() (i.e. they don't have data arguments).
248 *
249 * Also, the first three reason codes in the following table indicate
250 * success, whereas the others indicate failure. When a failure reason code
251 * is received, one can assume that the channel is not connected.
252 *
253 *
254 * Reason Code | Cause | Optional Data
255 * =====================+================================+=====================
256 * xpcConnected | connection has been established| max #of entries
257 * | to the specified partition on | allowed in message
258 * | the specified channel | queue
259 * ---------------------+--------------------------------+---------------------
260 * xpcMsgReceived | an XPC message arrived from | address of payload
261 * | the specified partition on the |
262 * | specified channel | [the user must call
263 * | | xpc_received() when
264 * | | finished with the
265 * | | payload]
266 * ---------------------+--------------------------------+---------------------
267 * xpcMsgDelivered | notification that the message | NA
268 * | was delivered to the intended |
269 * | recipient and that they have |
270 * | acknowledged its receipt by |
271 * | calling xpc_received() |
272 * =====================+================================+=====================
273 * xpcUnequalMsgSizes | can't connect to the specified | NULL
274 * | partition on the specified |
275 * | channel because of mismatched |
276 * | message sizes |
277 * ---------------------+--------------------------------+---------------------
278 * xpcNoMemory | insufficient memory avaiable | NULL
279 * | to allocate message queue |
280 * ---------------------+--------------------------------+---------------------
281 * xpcLackOfResources | lack of resources to create | NULL
282 * | the necessary kthreads to |
283 * | support the channel |
284 * ---------------------+--------------------------------+---------------------
285 * xpcUnregistering | this side's user has | NULL or NA
286 * | unregistered by calling |
287 * | xpc_disconnect() |
288 * ---------------------+--------------------------------+---------------------
289 * xpcOtherUnregistering| the other side's user has | NULL or NA
290 * | unregistered by calling |
291 * | xpc_disconnect() |
292 * ---------------------+--------------------------------+---------------------
293 * xpcNoHeartbeat | the other side's XPC is no | NULL or NA
294 * | longer heartbeating |
295 * | |
296 * ---------------------+--------------------------------+---------------------
297 * xpcUnloading | this side's XPC module is | NULL or NA
298 * | being unloaded |
299 * | |
300 * ---------------------+--------------------------------+---------------------
301 * xpcOtherUnloading | the other side's XPC module is | NULL or NA
302 * | is being unloaded |
303 * | |
304 * ---------------------+--------------------------------+---------------------
305 * xpcPioReadError | xp_nofault_PIOR() returned an | NULL or NA
306 * | error while sending an IPI |
307 * | |
308 * ---------------------+--------------------------------+---------------------
309 * xpcInvalidAddress | the address either received or | NULL or NA
310 * | sent by the specified partition|
311 * | is invalid |
312 * ---------------------+--------------------------------+---------------------
313 * xpcBteNotAvailable | attempt to pull data from the | NULL or NA
314 * xpcBtePoisonError | specified partition over the |
315 * xpcBteWriteError | specified channel via a |
316 * xpcBteAccessError | bte_copy() failed |
317 * xpcBteTimeOutError | |
318 * xpcBteXtalkError | |
319 * xpcBteDirectoryError | |
320 * xpcBteGenericError | |
321 * xpcBteUnmappedError | |
322 * ---------------------+--------------------------------+---------------------
323 * xpcUnknownReason | the specified channel to the | NULL or NA
324 * | specified partition was |
325 * | unavailable for unknown reasons|
326 * =====================+================================+=====================
327 */
328
329typedef void (*xpc_channel_func)(enum xpc_retval reason, partid_t partid,
330 int ch_number, void *data, void *key);
331
332typedef void (*xpc_notify_func)(enum xpc_retval reason, partid_t partid,
333 int ch_number, void *key);
334
335
336/*
337 * The following is a registration entry. There is a global array of these,
338 * one per channel. It is used to record the connection registration made
339 * by the users of XPC. As long as a registration entry exists, for any
340 * partition that comes up, XPC will attempt to establish a connection on
341 * that channel. Notification that a connection has been made will occur via
342 * the xpc_channel_func function.
343 *
344 * The 'func' field points to the function to call when aynchronous
345 * notification is required for such events as: a connection established/lost,
346 * or an incomming message received, or an error condition encountered. A
347 * non-NULL 'func' field indicates that there is an active registration for
348 * the channel.
349 */
350struct xpc_registration {
351 struct semaphore sema;
352 xpc_channel_func func; /* function to call */
353 void *key; /* pointer to user's key */
354 u16 nentries; /* #of msg entries in local msg queue */
355 u16 msg_size; /* message queue's message size */
356 u32 assigned_limit; /* limit on #of assigned kthreads */
357 u32 idle_limit; /* limit on #of idle kthreads */
358} ____cacheline_aligned;
359
360
361#define XPC_CHANNEL_REGISTERED(_c) (xpc_registrations[_c].func != NULL)
362
363
364/* the following are valid xpc_allocate() flags */
365#define XPC_WAIT 0 /* wait flag */
366#define XPC_NOWAIT 1 /* no wait flag */
367
368
369struct xpc_interface {
370 void (*connect)(int);
371 void (*disconnect)(int);
372 enum xpc_retval (*allocate)(partid_t, int, u32, void **);
373 enum xpc_retval (*send)(partid_t, int, void *);
374 enum xpc_retval (*send_notify)(partid_t, int, void *,
375 xpc_notify_func, void *);
376 void (*received)(partid_t, int, void *);
377 enum xpc_retval (*partid_to_nasids)(partid_t, void *);
378};
379
380
381extern struct xpc_interface xpc_interface;
382
383extern void xpc_set_interface(void (*)(int),
384 void (*)(int),
385 enum xpc_retval (*)(partid_t, int, u32, void **),
386 enum xpc_retval (*)(partid_t, int, void *),
387 enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func,
388 void *),
389 void (*)(partid_t, int, void *),
390 enum xpc_retval (*)(partid_t, void *));
391extern void xpc_clear_interface(void);
392
393
394extern enum xpc_retval xpc_connect(int, xpc_channel_func, void *, u16,
395 u16, u32, u32);
396extern void xpc_disconnect(int);
397
398static inline enum xpc_retval
399xpc_allocate(partid_t partid, int ch_number, u32 flags, void **payload)
400{
401 return xpc_interface.allocate(partid, ch_number, flags, payload);
402}
403
404static inline enum xpc_retval
405xpc_send(partid_t partid, int ch_number, void *payload)
406{
407 return xpc_interface.send(partid, ch_number, payload);
408}
409
410static inline enum xpc_retval
411xpc_send_notify(partid_t partid, int ch_number, void *payload,
412 xpc_notify_func func, void *key)
413{
414 return xpc_interface.send_notify(partid, ch_number, payload, func, key);
415}
416
417static inline void
418xpc_received(partid_t partid, int ch_number, void *payload)
419{
420 return xpc_interface.received(partid, ch_number, payload);
421}
422
423static inline enum xpc_retval
424xpc_partid_to_nasids(partid_t partid, void *nasids)
425{
426 return xpc_interface.partid_to_nasids(partid, nasids);
427}
428
429
430extern u64 xp_nofault_PIOR_target;
431extern int xp_nofault_PIOR(void *);
432extern int xp_error_PIOR(void);
433
434
435#endif /* _ASM_IA64_SN_XP_H */
436
diff --git a/kernel/exit.c b/kernel/exit.c
index 7be283d98983..edaa50b5bbfa 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -846,6 +846,8 @@ fastcall NORET_TYPE void do_exit(long code)
846 for (;;) ; 846 for (;;) ;
847} 847}
848 848
849EXPORT_SYMBOL_GPL(do_exit);
850
849NORET_TYPE void complete_and_exit(struct completion *comp, long code) 851NORET_TYPE void complete_and_exit(struct completion *comp, long code)
850{ 852{
851 if (comp) 853 if (comp)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index fc1b1064c505..b1061b1962f8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -43,7 +43,9 @@
43 * initializer cleaner 43 * initializer cleaner
44 */ 44 */
45nodemask_t node_online_map = { { [0] = 1UL } }; 45nodemask_t node_online_map = { { [0] = 1UL } };
46EXPORT_SYMBOL(node_online_map);
46nodemask_t node_possible_map = NODE_MASK_ALL; 47nodemask_t node_possible_map = NODE_MASK_ALL;
48EXPORT_SYMBOL(node_possible_map);
47struct pglist_data *pgdat_list; 49struct pglist_data *pgdat_list;
48unsigned long totalram_pages; 50unsigned long totalram_pages;
49unsigned long totalhigh_pages; 51unsigned long totalhigh_pages;