aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/Kconfig2
-rw-r--r--arch/ia64/configs/sn2_defconfig2
-rw-r--r--arch/ia64/hp/common/sba_iommu.c96
-rw-r--r--arch/ia64/kernel/entry.S14
-rw-r--r--arch/ia64/kernel/iosapic.c358
-rw-r--r--arch/ia64/kernel/irq_ia64.c16
-rw-r--r--arch/ia64/kernel/perfmon.c59
-rw-r--r--arch/ia64/kernel/perfmon_default_smpl.c13
-rw-r--r--arch/ia64/kernel/setup.c69
-rw-r--r--arch/ia64/kernel/smpboot.c217
-rw-r--r--arch/ia64/kernel/unwind.c27
-rw-r--r--arch/ia64/lib/memcpy_mck.S2
-rw-r--r--arch/ia64/mm/contig.c3
-rw-r--r--arch/ia64/mm/discontig.c3
-rw-r--r--arch/ia64/mm/fault.c9
-rw-r--r--arch/ia64/mm/init.c74
-rw-r--r--arch/ia64/sn/include/pci/pcibr_provider.h6
-rw-r--r--arch/ia64/sn/kernel/Makefile1
-rw-r--r--arch/ia64/sn/kernel/bte.c20
-rw-r--r--arch/ia64/sn/kernel/bte_error.c76
-rw-r--r--arch/ia64/sn/kernel/huberror.c9
-rw-r--r--arch/ia64/sn/kernel/io_init.c78
-rw-r--r--arch/ia64/sn/kernel/irq.c19
-rw-r--r--arch/ia64/sn/kernel/setup.c9
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c112
-rw-r--r--arch/ia64/sn/kernel/tiocx.c548
-rw-r--r--arch/ia64/sn/pci/Makefile2
-rw-r--r--arch/ia64/sn/pci/pci_dma.c39
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_ate.c4
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_dma.c107
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_provider.c24
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_reg.c4
-rw-r--r--arch/ia64/sn/pci/tioca_provider.c668
-rw-r--r--arch/ppc64/kernel/vdso32/cacheflush.S2
-rw-r--r--arch/ppc64/kernel/vdso32/gettimeofday.S1
-rw-r--r--arch/ppc64/kernel/vdso64/cacheflush.S2
-rw-r--r--drivers/char/Kconfig14
-rw-r--r--drivers/char/Makefile3
-rw-r--r--drivers/char/mbcs.c849
-rw-r--r--drivers/char/mbcs.h553
-rw-r--r--drivers/char/snsc.c8
-rw-r--r--drivers/char/snsc.h40
-rw-r--r--drivers/char/snsc_event.c304
-rw-r--r--drivers/net/tg3.c12
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--fs/compat.c6
-rw-r--r--include/asm-ia64/hw_irq.h1
-rw-r--r--include/asm-ia64/pal.h68
-rw-r--r--include/asm-ia64/perfmon.h12
-rw-r--r--include/asm-ia64/pgalloc.h148
-rw-r--r--include/asm-ia64/processor.h10
-rw-r--r--include/asm-ia64/sal.h12
-rw-r--r--include/asm-ia64/smp.h5
-rw-r--r--include/asm-ia64/sn/addrs.h6
-rw-r--r--include/asm-ia64/sn/bte.h53
-rw-r--r--include/asm-ia64/sn/geo.h45
-rw-r--r--include/asm-ia64/sn/nodepda.h4
-rw-r--r--include/asm-ia64/sn/pcibus_provider_defs.h (renamed from arch/ia64/sn/include/pci/pcibus_provider_defs.h)13
-rw-r--r--include/asm-ia64/sn/pcidev.h (renamed from arch/ia64/sn/include/pci/pcidev.h)4
-rw-r--r--include/asm-ia64/sn/pda.h3
-rw-r--r--include/asm-ia64/sn/shub_mmr.h37
-rw-r--r--include/asm-ia64/sn/sn_sal.h46
-rw-r--r--include/asm-ia64/sn/tioca.h596
-rw-r--r--include/asm-ia64/sn/tioca_provider.h206
-rw-r--r--include/asm-ia64/sn/tiocx.h71
-rw-r--r--include/asm-ia64/sn/types.h3
-rw-r--r--include/linux/sysctl.h1
-rw-r--r--include/net/pkt_sched.h5
-rw-r--r--include/net/sctp/sm.h42
-rw-r--r--include/net/sctp/structs.h11
-rw-r--r--kernel/time.c8
-rw-r--r--net/core/neighbour.c2
-rw-r--r--net/ipv4/route.c4
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/sctp/endpointola.c1
-rw-r--r--net/sctp/ipv6.c4
-rw-r--r--net/sctp/output.c4
-rw-r--r--net/sctp/protocol.c7
-rw-r--r--net/sctp/sm_make_chunk.c19
-rw-r--r--net/sctp/sm_statefuns.c77
-rw-r--r--net/sctp/socket.c38
-rw-r--r--net/sctp/sysctl.c8
82 files changed, 5465 insertions, 597 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 33fcb205fcb7..468dbe8a6b9c 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -329,7 +329,7 @@ menu "Power management and ACPI"
329 329
330config PM 330config PM
331 bool "Power Management support" 331 bool "Power Management support"
332 depends on IA64_GENERIC || IA64_DIG || IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB 332 depends on !IA64_HP_SIM
333 default y 333 default y
334 help 334 help
335 "Power Management" means that parts of your computer are shut 335 "Power Management" means that parts of your computer are shut
diff --git a/arch/ia64/configs/sn2_defconfig b/arch/ia64/configs/sn2_defconfig
index bfeb952fe8e2..6ff7107fee4d 100644
--- a/arch/ia64/configs/sn2_defconfig
+++ b/arch/ia64/configs/sn2_defconfig
@@ -574,6 +574,8 @@ CONFIG_SERIAL_NONSTANDARD=y
574# CONFIG_N_HDLC is not set 574# CONFIG_N_HDLC is not set
575# CONFIG_STALDRV is not set 575# CONFIG_STALDRV is not set
576CONFIG_SGI_SNSC=y 576CONFIG_SGI_SNSC=y
577CONFIG_SGI_TIOCX=y
578CONFIG_SGI_MBCS=m
577 579
578# 580#
579# Serial drivers 581# Serial drivers
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 017c9ab5fc1b..6a8fcba7a853 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -1,9 +1,9 @@
1/* 1/*
2** IA64 System Bus Adapter (SBA) I/O MMU manager 2** IA64 System Bus Adapter (SBA) I/O MMU manager
3** 3**
4** (c) Copyright 2002-2004 Alex Williamson 4** (c) Copyright 2002-2005 Alex Williamson
5** (c) Copyright 2002-2003 Grant Grundler 5** (c) Copyright 2002-2003 Grant Grundler
6** (c) Copyright 2002-2004 Hewlett-Packard Company 6** (c) Copyright 2002-2005 Hewlett-Packard Company
7** 7**
8** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code) 8** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
9** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code) 9** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
@@ -459,21 +459,32 @@ get_iovp_order (unsigned long size)
459 * sba_search_bitmap - find free space in IO PDIR resource bitmap 459 * sba_search_bitmap - find free space in IO PDIR resource bitmap
460 * @ioc: IO MMU structure which owns the pdir we are interested in. 460 * @ioc: IO MMU structure which owns the pdir we are interested in.
461 * @bits_wanted: number of entries we need. 461 * @bits_wanted: number of entries we need.
462 * @use_hint: use res_hint to indicate where to start looking
462 * 463 *
463 * Find consecutive free bits in resource bitmap. 464 * Find consecutive free bits in resource bitmap.
464 * Each bit represents one entry in the IO Pdir. 465 * Each bit represents one entry in the IO Pdir.
465 * Cool perf optimization: search for log2(size) bits at a time. 466 * Cool perf optimization: search for log2(size) bits at a time.
466 */ 467 */
467static SBA_INLINE unsigned long 468static SBA_INLINE unsigned long
468sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) 469sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
469{ 470{
470 unsigned long *res_ptr = ioc->res_hint; 471 unsigned long *res_ptr;
471 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); 472 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
472 unsigned long pide = ~0UL; 473 unsigned long flags, pide = ~0UL;
473 474
474 ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0); 475 ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
475 ASSERT(res_ptr < res_end); 476 ASSERT(res_ptr < res_end);
476 477
478 spin_lock_irqsave(&ioc->res_lock, flags);
479
480 /* Allow caller to force a search through the entire resource space */
481 if (likely(use_hint)) {
482 res_ptr = ioc->res_hint;
483 } else {
484 res_ptr = (ulong *)ioc->res_map;
485 ioc->res_bitshift = 0;
486 }
487
477 /* 488 /*
478 * N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts 489 * N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts
479 * if a TLB entry is purged while in use. sba_mark_invalid() 490 * if a TLB entry is purged while in use. sba_mark_invalid()
@@ -570,10 +581,12 @@ not_found:
570 prefetch(ioc->res_map); 581 prefetch(ioc->res_map);
571 ioc->res_hint = (unsigned long *) ioc->res_map; 582 ioc->res_hint = (unsigned long *) ioc->res_map;
572 ioc->res_bitshift = 0; 583 ioc->res_bitshift = 0;
584 spin_unlock_irqrestore(&ioc->res_lock, flags);
573 return (pide); 585 return (pide);
574 586
575found_it: 587found_it:
576 ioc->res_hint = res_ptr; 588 ioc->res_hint = res_ptr;
589 spin_unlock_irqrestore(&ioc->res_lock, flags);
577 return (pide); 590 return (pide);
578} 591}
579 592
@@ -594,36 +607,36 @@ sba_alloc_range(struct ioc *ioc, size_t size)
594 unsigned long itc_start; 607 unsigned long itc_start;
595#endif 608#endif
596 unsigned long pide; 609 unsigned long pide;
597 unsigned long flags;
598 610
599 ASSERT(pages_needed); 611 ASSERT(pages_needed);
600 ASSERT(0 == (size & ~iovp_mask)); 612 ASSERT(0 == (size & ~iovp_mask));
601 613
602 spin_lock_irqsave(&ioc->res_lock, flags);
603
604#ifdef PDIR_SEARCH_TIMING 614#ifdef PDIR_SEARCH_TIMING
605 itc_start = ia64_get_itc(); 615 itc_start = ia64_get_itc();
606#endif 616#endif
607 /* 617 /*
608 ** "seek and ye shall find"...praying never hurts either... 618 ** "seek and ye shall find"...praying never hurts either...
609 */ 619 */
610 pide = sba_search_bitmap(ioc, pages_needed); 620 pide = sba_search_bitmap(ioc, pages_needed, 1);
611 if (unlikely(pide >= (ioc->res_size << 3))) { 621 if (unlikely(pide >= (ioc->res_size << 3))) {
612 pide = sba_search_bitmap(ioc, pages_needed); 622 pide = sba_search_bitmap(ioc, pages_needed, 0);
613 if (unlikely(pide >= (ioc->res_size << 3))) { 623 if (unlikely(pide >= (ioc->res_size << 3))) {
614#if DELAYED_RESOURCE_CNT > 0 624#if DELAYED_RESOURCE_CNT > 0
625 unsigned long flags;
626
615 /* 627 /*
616 ** With delayed resource freeing, we can give this one more shot. We're 628 ** With delayed resource freeing, we can give this one more shot. We're
617 ** getting close to being in trouble here, so do what we can to make this 629 ** getting close to being in trouble here, so do what we can to make this
618 ** one count. 630 ** one count.
619 */ 631 */
620 spin_lock(&ioc->saved_lock); 632 spin_lock_irqsave(&ioc->saved_lock, flags);
621 if (ioc->saved_cnt > 0) { 633 if (ioc->saved_cnt > 0) {
622 struct sba_dma_pair *d; 634 struct sba_dma_pair *d;
623 int cnt = ioc->saved_cnt; 635 int cnt = ioc->saved_cnt;
624 636
625 d = &(ioc->saved[ioc->saved_cnt]); 637 d = &(ioc->saved[ioc->saved_cnt - 1]);
626 638
639 spin_lock(&ioc->res_lock);
627 while (cnt--) { 640 while (cnt--) {
628 sba_mark_invalid(ioc, d->iova, d->size); 641 sba_mark_invalid(ioc, d->iova, d->size);
629 sba_free_range(ioc, d->iova, d->size); 642 sba_free_range(ioc, d->iova, d->size);
@@ -631,10 +644,11 @@ sba_alloc_range(struct ioc *ioc, size_t size)
631 } 644 }
632 ioc->saved_cnt = 0; 645 ioc->saved_cnt = 0;
633 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ 646 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
647 spin_unlock(&ioc->res_lock);
634 } 648 }
635 spin_unlock(&ioc->saved_lock); 649 spin_unlock_irqrestore(&ioc->saved_lock, flags);
636 650
637 pide = sba_search_bitmap(ioc, pages_needed); 651 pide = sba_search_bitmap(ioc, pages_needed, 0);
638 if (unlikely(pide >= (ioc->res_size << 3))) 652 if (unlikely(pide >= (ioc->res_size << 3)))
639 panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", 653 panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
640 ioc->ioc_hpa); 654 ioc->ioc_hpa);
@@ -664,8 +678,6 @@ sba_alloc_range(struct ioc *ioc, size_t size)
664 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map), 678 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
665 ioc->res_bitshift ); 679 ioc->res_bitshift );
666 680
667 spin_unlock_irqrestore(&ioc->res_lock, flags);
668
669 return (pide); 681 return (pide);
670} 682}
671 683
@@ -950,6 +962,30 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
950 return SBA_IOVA(ioc, iovp, offset); 962 return SBA_IOVA(ioc, iovp, offset);
951} 963}
952 964
965#ifdef ENABLE_MARK_CLEAN
966static SBA_INLINE void
967sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
968{
969 u32 iovp = (u32) SBA_IOVP(ioc,iova);
970 int off = PDIR_INDEX(iovp);
971 void *addr;
972
973 if (size <= iovp_size) {
974 addr = phys_to_virt(ioc->pdir_base[off] &
975 ~0xE000000000000FFFULL);
976 mark_clean(addr, size);
977 } else {
978 do {
979 addr = phys_to_virt(ioc->pdir_base[off] &
980 ~0xE000000000000FFFULL);
981 mark_clean(addr, min(size, iovp_size));
982 off++;
983 size -= iovp_size;
984 } while (size > 0);
985 }
986}
987#endif
988
953/** 989/**
954 * sba_unmap_single - unmap one IOVA and free resources 990 * sba_unmap_single - unmap one IOVA and free resources
955 * @dev: instance of PCI owned by the driver that's asking. 991 * @dev: instance of PCI owned by the driver that's asking.
@@ -995,6 +1031,10 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
995 size += offset; 1031 size += offset;
996 size = ROUNDUP(size, iovp_size); 1032 size = ROUNDUP(size, iovp_size);
997 1033
1034#ifdef ENABLE_MARK_CLEAN
1035 if (dir == DMA_FROM_DEVICE)
1036 sba_mark_clean(ioc, iova, size);
1037#endif
998 1038
999#if DELAYED_RESOURCE_CNT > 0 1039#if DELAYED_RESOURCE_CNT > 0
1000 spin_lock_irqsave(&ioc->saved_lock, flags); 1040 spin_lock_irqsave(&ioc->saved_lock, flags);
@@ -1021,30 +1061,6 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
1021 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ 1061 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
1022 spin_unlock_irqrestore(&ioc->res_lock, flags); 1062 spin_unlock_irqrestore(&ioc->res_lock, flags);
1023#endif /* DELAYED_RESOURCE_CNT == 0 */ 1063#endif /* DELAYED_RESOURCE_CNT == 0 */
1024#ifdef ENABLE_MARK_CLEAN
1025 if (dir == DMA_FROM_DEVICE) {
1026 u32 iovp = (u32) SBA_IOVP(ioc,iova);
1027 int off = PDIR_INDEX(iovp);
1028 void *addr;
1029
1030 if (size <= iovp_size) {
1031 addr = phys_to_virt(ioc->pdir_base[off] &
1032 ~0xE000000000000FFFULL);
1033 mark_clean(addr, size);
1034 } else {
1035 size_t byte_cnt = size;
1036
1037 do {
1038 addr = phys_to_virt(ioc->pdir_base[off] &
1039 ~0xE000000000000FFFULL);
1040 mark_clean(addr, min(byte_cnt, iovp_size));
1041 off++;
1042 byte_cnt -= iovp_size;
1043
1044 } while (byte_cnt > 0);
1045 }
1046 }
1047#endif
1048} 1064}
1049 1065
1050 1066
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 0272c010a3ba..bd86fea49a0c 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -728,12 +728,8 @@ ENTRY(ia64_leave_syscall)
728 mov f8=f0 // clear f8 728 mov f8=f0 // clear f8
729 ;; 729 ;;
730 ld8 r30=[r2],16 // M0|1 load cr.ifs 730 ld8 r30=[r2],16 // M0|1 load cr.ifs
731 mov.m ar.ssd=r0 // M2 clear ar.ssd
732 cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
733 ;;
734 ld8 r25=[r3],16 // M0|1 load ar.unat 731 ld8 r25=[r3],16 // M0|1 load ar.unat
735 mov.m ar.csd=r0 // M2 clear ar.csd 732 cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
736 mov r22=r0 // clear r22
737 ;; 733 ;;
738 ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs 734 ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs
739(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled 735(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
@@ -756,11 +752,15 @@ ENTRY(ia64_leave_syscall)
756 mov f7=f0 // clear f7 752 mov f7=f0 // clear f7
757 ;; 753 ;;
758 ld8.fill r12=[r2] // restore r12 (sp) 754 ld8.fill r12=[r2] // restore r12 (sp)
755 mov.m ar.ssd=r0 // M2 clear ar.ssd
756 mov r22=r0 // clear r22
757
759 ld8.fill r15=[r3] // restore r15 758 ld8.fill r15=[r3] // restore r15
759(pUStk) st1 [r14]=r17
760 addl r3=THIS_CPU(ia64_phys_stacked_size_p8),r0 760 addl r3=THIS_CPU(ia64_phys_stacked_size_p8),r0
761 ;; 761 ;;
762(pUStk) ld4 r3=[r3] // r3 = cpu_data->phys_stacked_size_p8 762(pUStk) ld4 r17=[r3] // r17 = cpu_data->phys_stacked_size_p8
763(pUStk) st1 [r14]=r17 763 mov.m ar.csd=r0 // M2 clear ar.csd
764 mov b6=r18 // I0 restore b6 764 mov b6=r18 // I0 restore b6
765 ;; 765 ;;
766 mov r14=r0 // clear r14 766 mov r14=r0 // clear r14
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index c15be5c38f56..88b014381df5 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -79,6 +79,7 @@
79#include <linux/smp.h> 79#include <linux/smp.h>
80#include <linux/smp_lock.h> 80#include <linux/smp_lock.h>
81#include <linux/string.h> 81#include <linux/string.h>
82#include <linux/bootmem.h>
82 83
83#include <asm/delay.h> 84#include <asm/delay.h>
84#include <asm/hw_irq.h> 85#include <asm/hw_irq.h>
@@ -98,19 +99,30 @@
98#define DBG(fmt...) 99#define DBG(fmt...)
99#endif 100#endif
100 101
102#define NR_PREALLOCATE_RTE_ENTRIES (PAGE_SIZE / sizeof(struct iosapic_rte_info))
103#define RTE_PREALLOCATED (1)
104
101static DEFINE_SPINLOCK(iosapic_lock); 105static DEFINE_SPINLOCK(iosapic_lock);
102 106
103/* These tables map IA-64 vectors to the IOSAPIC pin that generates this vector. */ 107/* These tables map IA-64 vectors to the IOSAPIC pin that generates this vector. */
104 108
105static struct iosapic_intr_info { 109struct iosapic_rte_info {
110 struct list_head rte_list; /* node in list of RTEs sharing the same vector */
106 char __iomem *addr; /* base address of IOSAPIC */ 111 char __iomem *addr; /* base address of IOSAPIC */
107 u32 low32; /* current value of low word of Redirection table entry */
108 unsigned int gsi_base; /* first GSI assigned to this IOSAPIC */ 112 unsigned int gsi_base; /* first GSI assigned to this IOSAPIC */
109 char rte_index; /* IOSAPIC RTE index (-1 => not an IOSAPIC interrupt) */ 113 char rte_index; /* IOSAPIC RTE index */
114 int refcnt; /* reference counter */
115 unsigned int flags; /* flags */
116} ____cacheline_aligned;
117
118static struct iosapic_intr_info {
119 struct list_head rtes; /* RTEs using this vector (empty => not an IOSAPIC interrupt) */
120 int count; /* # of RTEs that shares this vector */
121 u32 low32; /* current value of low word of Redirection table entry */
122 unsigned int dest; /* destination CPU physical ID */
110 unsigned char dmode : 3; /* delivery mode (see iosapic.h) */ 123 unsigned char dmode : 3; /* delivery mode (see iosapic.h) */
111 unsigned char polarity: 1; /* interrupt polarity (see iosapic.h) */ 124 unsigned char polarity: 1; /* interrupt polarity (see iosapic.h) */
112 unsigned char trigger : 1; /* trigger mode (see iosapic.h) */ 125 unsigned char trigger : 1; /* trigger mode (see iosapic.h) */
113 int refcnt; /* reference counter */
114} iosapic_intr_info[IA64_NUM_VECTORS]; 126} iosapic_intr_info[IA64_NUM_VECTORS];
115 127
116static struct iosapic { 128static struct iosapic {
@@ -126,6 +138,8 @@ static int num_iosapic;
126 138
127static unsigned char pcat_compat __initdata; /* 8259 compatibility flag */ 139static unsigned char pcat_compat __initdata; /* 8259 compatibility flag */
128 140
141static int iosapic_kmalloc_ok;
142static LIST_HEAD(free_rte_list);
129 143
130/* 144/*
131 * Find an IOSAPIC associated with a GSI 145 * Find an IOSAPIC associated with a GSI
@@ -147,10 +161,12 @@ static inline int
147_gsi_to_vector (unsigned int gsi) 161_gsi_to_vector (unsigned int gsi)
148{ 162{
149 struct iosapic_intr_info *info; 163 struct iosapic_intr_info *info;
164 struct iosapic_rte_info *rte;
150 165
151 for (info = iosapic_intr_info; info < iosapic_intr_info + IA64_NUM_VECTORS; ++info) 166 for (info = iosapic_intr_info; info < iosapic_intr_info + IA64_NUM_VECTORS; ++info)
152 if (info->gsi_base + info->rte_index == gsi) 167 list_for_each_entry(rte, &info->rtes, rte_list)
153 return info - iosapic_intr_info; 168 if (rte->gsi_base + rte->rte_index == gsi)
169 return info - iosapic_intr_info;
154 return -1; 170 return -1;
155} 171}
156 172
@@ -167,33 +183,52 @@ gsi_to_vector (unsigned int gsi)
167int 183int
168gsi_to_irq (unsigned int gsi) 184gsi_to_irq (unsigned int gsi)
169{ 185{
186 unsigned long flags;
187 int irq;
170 /* 188 /*
171 * XXX fix me: this assumes an identity mapping vetween IA-64 vector and Linux irq 189 * XXX fix me: this assumes an identity mapping vetween IA-64 vector and Linux irq
172 * numbers... 190 * numbers...
173 */ 191 */
174 return _gsi_to_vector(gsi); 192 spin_lock_irqsave(&iosapic_lock, flags);
193 {
194 irq = _gsi_to_vector(gsi);
195 }
196 spin_unlock_irqrestore(&iosapic_lock, flags);
197
198 return irq;
199}
200
201static struct iosapic_rte_info *gsi_vector_to_rte(unsigned int gsi, unsigned int vec)
202{
203 struct iosapic_rte_info *rte;
204
205 list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list)
206 if (rte->gsi_base + rte->rte_index == gsi)
207 return rte;
208 return NULL;
175} 209}
176 210
177static void 211static void
178set_rte (unsigned int vector, unsigned int dest, int mask) 212set_rte (unsigned int gsi, unsigned int vector, unsigned int dest, int mask)
179{ 213{
180 unsigned long pol, trigger, dmode; 214 unsigned long pol, trigger, dmode;
181 u32 low32, high32; 215 u32 low32, high32;
182 char __iomem *addr; 216 char __iomem *addr;
183 int rte_index; 217 int rte_index;
184 char redir; 218 char redir;
219 struct iosapic_rte_info *rte;
185 220
186 DBG(KERN_DEBUG"IOSAPIC: routing vector %d to 0x%x\n", vector, dest); 221 DBG(KERN_DEBUG"IOSAPIC: routing vector %d to 0x%x\n", vector, dest);
187 222
188 rte_index = iosapic_intr_info[vector].rte_index; 223 rte = gsi_vector_to_rte(gsi, vector);
189 if (rte_index < 0) 224 if (!rte)
190 return; /* not an IOSAPIC interrupt */ 225 return; /* not an IOSAPIC interrupt */
191 226
192 addr = iosapic_intr_info[vector].addr; 227 rte_index = rte->rte_index;
228 addr = rte->addr;
193 pol = iosapic_intr_info[vector].polarity; 229 pol = iosapic_intr_info[vector].polarity;
194 trigger = iosapic_intr_info[vector].trigger; 230 trigger = iosapic_intr_info[vector].trigger;
195 dmode = iosapic_intr_info[vector].dmode; 231 dmode = iosapic_intr_info[vector].dmode;
196 vector &= (~IA64_IRQ_REDIRECTED);
197 232
198 redir = (dmode == IOSAPIC_LOWEST_PRIORITY) ? 1 : 0; 233 redir = (dmode == IOSAPIC_LOWEST_PRIORITY) ? 1 : 0;
199 234
@@ -221,6 +256,7 @@ set_rte (unsigned int vector, unsigned int dest, int mask)
221 iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index), high32); 256 iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index), high32);
222 iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32); 257 iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
223 iosapic_intr_info[vector].low32 = low32; 258 iosapic_intr_info[vector].low32 = low32;
259 iosapic_intr_info[vector].dest = dest;
224} 260}
225 261
226static void 262static void
@@ -237,18 +273,20 @@ mask_irq (unsigned int irq)
237 u32 low32; 273 u32 low32;
238 int rte_index; 274 int rte_index;
239 ia64_vector vec = irq_to_vector(irq); 275 ia64_vector vec = irq_to_vector(irq);
276 struct iosapic_rte_info *rte;
240 277
241 addr = iosapic_intr_info[vec].addr; 278 if (list_empty(&iosapic_intr_info[vec].rtes))
242 rte_index = iosapic_intr_info[vec].rte_index;
243
244 if (rte_index < 0)
245 return; /* not an IOSAPIC interrupt! */ 279 return; /* not an IOSAPIC interrupt! */
246 280
247 spin_lock_irqsave(&iosapic_lock, flags); 281 spin_lock_irqsave(&iosapic_lock, flags);
248 { 282 {
249 /* set only the mask bit */ 283 /* set only the mask bit */
250 low32 = iosapic_intr_info[vec].low32 |= IOSAPIC_MASK; 284 low32 = iosapic_intr_info[vec].low32 |= IOSAPIC_MASK;
251 iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32); 285 list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) {
286 addr = rte->addr;
287 rte_index = rte->rte_index;
288 iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
289 }
252 } 290 }
253 spin_unlock_irqrestore(&iosapic_lock, flags); 291 spin_unlock_irqrestore(&iosapic_lock, flags);
254} 292}
@@ -261,16 +299,19 @@ unmask_irq (unsigned int irq)
261 u32 low32; 299 u32 low32;
262 int rte_index; 300 int rte_index;
263 ia64_vector vec = irq_to_vector(irq); 301 ia64_vector vec = irq_to_vector(irq);
302 struct iosapic_rte_info *rte;
264 303
265 addr = iosapic_intr_info[vec].addr; 304 if (list_empty(&iosapic_intr_info[vec].rtes))
266 rte_index = iosapic_intr_info[vec].rte_index;
267 if (rte_index < 0)
268 return; /* not an IOSAPIC interrupt! */ 305 return; /* not an IOSAPIC interrupt! */
269 306
270 spin_lock_irqsave(&iosapic_lock, flags); 307 spin_lock_irqsave(&iosapic_lock, flags);
271 { 308 {
272 low32 = iosapic_intr_info[vec].low32 &= ~IOSAPIC_MASK; 309 low32 = iosapic_intr_info[vec].low32 &= ~IOSAPIC_MASK;
273 iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32); 310 list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) {
311 addr = rte->addr;
312 rte_index = rte->rte_index;
313 iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
314 }
274 } 315 }
275 spin_unlock_irqrestore(&iosapic_lock, flags); 316 spin_unlock_irqrestore(&iosapic_lock, flags);
276} 317}
@@ -286,6 +327,7 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
286 char __iomem *addr; 327 char __iomem *addr;
287 int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0; 328 int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
288 ia64_vector vec; 329 ia64_vector vec;
330 struct iosapic_rte_info *rte;
289 331
290 irq &= (~IA64_IRQ_REDIRECTED); 332 irq &= (~IA64_IRQ_REDIRECTED);
291 vec = irq_to_vector(irq); 333 vec = irq_to_vector(irq);
@@ -295,10 +337,7 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
295 337
296 dest = cpu_physical_id(first_cpu(mask)); 338 dest = cpu_physical_id(first_cpu(mask));
297 339
298 rte_index = iosapic_intr_info[vec].rte_index; 340 if (list_empty(&iosapic_intr_info[vec].rtes))
299 addr = iosapic_intr_info[vec].addr;
300
301 if (rte_index < 0)
302 return; /* not an IOSAPIC interrupt */ 341 return; /* not an IOSAPIC interrupt */
303 342
304 set_irq_affinity_info(irq, dest, redir); 343 set_irq_affinity_info(irq, dest, redir);
@@ -318,8 +357,13 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
318 low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT); 357 low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT);
319 358
320 iosapic_intr_info[vec].low32 = low32; 359 iosapic_intr_info[vec].low32 = low32;
321 iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index), high32); 360 iosapic_intr_info[vec].dest = dest;
322 iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32); 361 list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) {
362 addr = rte->addr;
363 rte_index = rte->rte_index;
364 iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index), high32);
365 iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
366 }
323 } 367 }
324 spin_unlock_irqrestore(&iosapic_lock, flags); 368 spin_unlock_irqrestore(&iosapic_lock, flags);
325#endif 369#endif
@@ -340,9 +384,11 @@ static void
340iosapic_end_level_irq (unsigned int irq) 384iosapic_end_level_irq (unsigned int irq)
341{ 385{
342 ia64_vector vec = irq_to_vector(irq); 386 ia64_vector vec = irq_to_vector(irq);
387 struct iosapic_rte_info *rte;
343 388
344 move_irq(irq); 389 move_irq(irq);
345 iosapic_eoi(iosapic_intr_info[vec].addr, vec); 390 list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list)
391 iosapic_eoi(rte->addr, vec);
346} 392}
347 393
348#define iosapic_shutdown_level_irq mask_irq 394#define iosapic_shutdown_level_irq mask_irq
@@ -422,6 +468,34 @@ iosapic_version (char __iomem *addr)
422 return iosapic_read(addr, IOSAPIC_VERSION); 468 return iosapic_read(addr, IOSAPIC_VERSION);
423} 469}
424 470
471static int iosapic_find_sharable_vector (unsigned long trigger, unsigned long pol)
472{
473 int i, vector = -1, min_count = -1;
474 struct iosapic_intr_info *info;
475
476 /*
477 * shared vectors for edge-triggered interrupts are not
478 * supported yet
479 */
480 if (trigger == IOSAPIC_EDGE)
481 return -1;
482
483 for (i = IA64_FIRST_DEVICE_VECTOR; i <= IA64_LAST_DEVICE_VECTOR; i++) {
484 info = &iosapic_intr_info[i];
485 if (info->trigger == trigger && info->polarity == pol &&
486 (info->dmode == IOSAPIC_FIXED || info->dmode == IOSAPIC_LOWEST_PRIORITY)) {
487 if (min_count == -1 || info->count < min_count) {
488 vector = i;
489 min_count = info->count;
490 }
491 }
492 }
493 if (vector < 0)
494 panic("%s: out of interrupt vectors!\n", __FUNCTION__);
495
496 return vector;
497}
498
425/* 499/*
426 * if the given vector is already owned by other, 500 * if the given vector is already owned by other,
427 * assign a new vector for the other and make the vector available 501 * assign a new vector for the other and make the vector available
@@ -431,19 +505,63 @@ iosapic_reassign_vector (int vector)
431{ 505{
432 int new_vector; 506 int new_vector;
433 507
434 if (iosapic_intr_info[vector].rte_index >= 0 || iosapic_intr_info[vector].addr 508 if (!list_empty(&iosapic_intr_info[vector].rtes)) {
435 || iosapic_intr_info[vector].gsi_base || iosapic_intr_info[vector].dmode
436 || iosapic_intr_info[vector].polarity || iosapic_intr_info[vector].trigger)
437 {
438 new_vector = assign_irq_vector(AUTO_ASSIGN); 509 new_vector = assign_irq_vector(AUTO_ASSIGN);
439 printk(KERN_INFO "Reassigning vector %d to %d\n", vector, new_vector); 510 printk(KERN_INFO "Reassigning vector %d to %d\n", vector, new_vector);
440 memcpy(&iosapic_intr_info[new_vector], &iosapic_intr_info[vector], 511 memcpy(&iosapic_intr_info[new_vector], &iosapic_intr_info[vector],
441 sizeof(struct iosapic_intr_info)); 512 sizeof(struct iosapic_intr_info));
513 INIT_LIST_HEAD(&iosapic_intr_info[new_vector].rtes);
514 list_move(iosapic_intr_info[vector].rtes.next, &iosapic_intr_info[new_vector].rtes);
442 memset(&iosapic_intr_info[vector], 0, sizeof(struct iosapic_intr_info)); 515 memset(&iosapic_intr_info[vector], 0, sizeof(struct iosapic_intr_info));
443 iosapic_intr_info[vector].rte_index = -1; 516 iosapic_intr_info[vector].low32 = IOSAPIC_MASK;
517 INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes);
444 } 518 }
445} 519}
446 520
521static struct iosapic_rte_info *iosapic_alloc_rte (void)
522{
523 int i;
524 struct iosapic_rte_info *rte;
525 int preallocated = 0;
526
527 if (!iosapic_kmalloc_ok && list_empty(&free_rte_list)) {
528 rte = alloc_bootmem(sizeof(struct iosapic_rte_info) * NR_PREALLOCATE_RTE_ENTRIES);
529 if (!rte)
530 return NULL;
531 for (i = 0; i < NR_PREALLOCATE_RTE_ENTRIES; i++, rte++)
532 list_add(&rte->rte_list, &free_rte_list);
533 }
534
535 if (!list_empty(&free_rte_list)) {
536 rte = list_entry(free_rte_list.next, struct iosapic_rte_info, rte_list);
537 list_del(&rte->rte_list);
538 preallocated++;
539 } else {
540 rte = kmalloc(sizeof(struct iosapic_rte_info), GFP_ATOMIC);
541 if (!rte)
542 return NULL;
543 }
544
545 memset(rte, 0, sizeof(struct iosapic_rte_info));
546 if (preallocated)
547 rte->flags |= RTE_PREALLOCATED;
548
549 return rte;
550}
551
552static void iosapic_free_rte (struct iosapic_rte_info *rte)
553{
554 if (rte->flags & RTE_PREALLOCATED)
555 list_add_tail(&rte->rte_list, &free_rte_list);
556 else
557 kfree(rte);
558}
559
560static inline int vector_is_shared (int vector)
561{
562 return (iosapic_intr_info[vector].count > 1);
563}
564
447static void 565static void
448register_intr (unsigned int gsi, int vector, unsigned char delivery, 566register_intr (unsigned int gsi, int vector, unsigned char delivery,
449 unsigned long polarity, unsigned long trigger) 567 unsigned long polarity, unsigned long trigger)
@@ -454,6 +572,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
454 int index; 572 int index;
455 unsigned long gsi_base; 573 unsigned long gsi_base;
456 void __iomem *iosapic_address; 574 void __iomem *iosapic_address;
575 struct iosapic_rte_info *rte;
457 576
458 index = find_iosapic(gsi); 577 index = find_iosapic(gsi);
459 if (index < 0) { 578 if (index < 0) {
@@ -464,14 +583,33 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
464 iosapic_address = iosapic_lists[index].addr; 583 iosapic_address = iosapic_lists[index].addr;
465 gsi_base = iosapic_lists[index].gsi_base; 584 gsi_base = iosapic_lists[index].gsi_base;
466 585
467 rte_index = gsi - gsi_base; 586 rte = gsi_vector_to_rte(gsi, vector);
468 iosapic_intr_info[vector].rte_index = rte_index; 587 if (!rte) {
588 rte = iosapic_alloc_rte();
589 if (!rte) {
590 printk(KERN_WARNING "%s: cannot allocate memory\n", __FUNCTION__);
591 return;
592 }
593
594 rte_index = gsi - gsi_base;
595 rte->rte_index = rte_index;
596 rte->addr = iosapic_address;
597 rte->gsi_base = gsi_base;
598 rte->refcnt++;
599 list_add_tail(&rte->rte_list, &iosapic_intr_info[vector].rtes);
600 iosapic_intr_info[vector].count++;
601 }
602 else if (vector_is_shared(vector)) {
603 struct iosapic_intr_info *info = &iosapic_intr_info[vector];
604 if (info->trigger != trigger || info->polarity != polarity) {
605 printk (KERN_WARNING "%s: cannot override the interrupt\n", __FUNCTION__);
606 return;
607 }
608 }
609
469 iosapic_intr_info[vector].polarity = polarity; 610 iosapic_intr_info[vector].polarity = polarity;
470 iosapic_intr_info[vector].dmode = delivery; 611 iosapic_intr_info[vector].dmode = delivery;
471 iosapic_intr_info[vector].addr = iosapic_address;
472 iosapic_intr_info[vector].gsi_base = gsi_base;
473 iosapic_intr_info[vector].trigger = trigger; 612 iosapic_intr_info[vector].trigger = trigger;
474 iosapic_intr_info[vector].refcnt++;
475 613
476 if (trigger == IOSAPIC_EDGE) 614 if (trigger == IOSAPIC_EDGE)
477 irq_type = &irq_type_iosapic_edge; 615 irq_type = &irq_type_iosapic_edge;
@@ -494,6 +632,13 @@ get_target_cpu (unsigned int gsi, int vector)
494 static int cpu = -1; 632 static int cpu = -1;
495 633
496 /* 634 /*
635 * In case of vector shared by multiple RTEs, all RTEs that
636 * share the vector need to use the same destination CPU.
637 */
638 if (!list_empty(&iosapic_intr_info[vector].rtes))
639 return iosapic_intr_info[vector].dest;
640
641 /*
497 * If the platform supports redirection via XTP, let it 642 * If the platform supports redirection via XTP, let it
498 * distribute interrupts. 643 * distribute interrupts.
499 */ 644 */
@@ -565,10 +710,12 @@ int
565iosapic_register_intr (unsigned int gsi, 710iosapic_register_intr (unsigned int gsi,
566 unsigned long polarity, unsigned long trigger) 711 unsigned long polarity, unsigned long trigger)
567{ 712{
568 int vector; 713 int vector, mask = 1;
569 unsigned int dest; 714 unsigned int dest;
570 unsigned long flags; 715 unsigned long flags;
571 716 struct iosapic_rte_info *rte;
717 u32 low32;
718again:
572 /* 719 /*
573 * If this GSI has already been registered (i.e., it's a 720 * If this GSI has already been registered (i.e., it's a
574 * shared interrupt, or we lost a race to register it), 721 * shared interrupt, or we lost a race to register it),
@@ -578,19 +725,45 @@ iosapic_register_intr (unsigned int gsi,
578 { 725 {
579 vector = gsi_to_vector(gsi); 726 vector = gsi_to_vector(gsi);
580 if (vector > 0) { 727 if (vector > 0) {
581 iosapic_intr_info[vector].refcnt++; 728 rte = gsi_vector_to_rte(gsi, vector);
729 rte->refcnt++;
582 spin_unlock_irqrestore(&iosapic_lock, flags); 730 spin_unlock_irqrestore(&iosapic_lock, flags);
583 return vector; 731 return vector;
584 } 732 }
733 }
734 spin_unlock_irqrestore(&iosapic_lock, flags);
735
736 /* If vector is running out, we try to find a sharable vector */
737 vector = assign_irq_vector_nopanic(AUTO_ASSIGN);
738 if (vector < 0)
739 vector = iosapic_find_sharable_vector(trigger, polarity);
740
741 spin_lock_irqsave(&irq_descp(vector)->lock, flags);
742 spin_lock(&iosapic_lock);
743 {
744 if (gsi_to_vector(gsi) > 0) {
745 if (list_empty(&iosapic_intr_info[vector].rtes))
746 free_irq_vector(vector);
747 spin_unlock(&iosapic_lock);
748 spin_unlock_irqrestore(&irq_descp(vector)->lock, flags);
749 goto again;
750 }
585 751
586 vector = assign_irq_vector(AUTO_ASSIGN);
587 dest = get_target_cpu(gsi, vector); 752 dest = get_target_cpu(gsi, vector);
588 register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY, 753 register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY,
589 polarity, trigger); 754 polarity, trigger);
590 755
591 set_rte(vector, dest, 1); 756 /*
757 * If the vector is shared and already unmasked for
758 * other interrupt sources, don't mask it.
759 */
760 low32 = iosapic_intr_info[vector].low32;
761 if (vector_is_shared(vector) && !(low32 & IOSAPIC_MASK))
762 mask = 0;
763 set_rte(gsi, vector, dest, mask);
592 } 764 }
593 spin_unlock_irqrestore(&iosapic_lock, flags); 765 spin_unlock(&iosapic_lock);
766 spin_unlock_irqrestore(&irq_descp(vector)->lock, flags);
594 767
595 printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n", 768 printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n",
596 gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"), 769 gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
@@ -607,8 +780,10 @@ iosapic_unregister_intr (unsigned int gsi)
607 unsigned long flags; 780 unsigned long flags;
608 int irq, vector; 781 int irq, vector;
609 irq_desc_t *idesc; 782 irq_desc_t *idesc;
610 int rte_index; 783 u32 low32;
611 unsigned long trigger, polarity; 784 unsigned long trigger, polarity;
785 unsigned int dest;
786 struct iosapic_rte_info *rte;
612 787
613 /* 788 /*
614 * If the irq associated with the gsi is not found, 789 * If the irq associated with the gsi is not found,
@@ -627,54 +802,56 @@ iosapic_unregister_intr (unsigned int gsi)
627 spin_lock_irqsave(&idesc->lock, flags); 802 spin_lock_irqsave(&idesc->lock, flags);
628 spin_lock(&iosapic_lock); 803 spin_lock(&iosapic_lock);
629 { 804 {
630 rte_index = iosapic_intr_info[vector].rte_index; 805 if ((rte = gsi_vector_to_rte(gsi, vector)) == NULL) {
631 if (rte_index < 0) {
632 spin_unlock(&iosapic_lock);
633 spin_unlock_irqrestore(&idesc->lock, flags);
634 printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n", gsi); 806 printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n", gsi);
635 WARN_ON(1); 807 WARN_ON(1);
636 return; 808 goto out;
637 } 809 }
638 810
639 if (--iosapic_intr_info[vector].refcnt > 0) { 811 if (--rte->refcnt > 0)
640 spin_unlock(&iosapic_lock); 812 goto out;
641 spin_unlock_irqrestore(&idesc->lock, flags);
642 return;
643 }
644 813
645 /* 814 /* Mask the interrupt */
646 * If interrupt handlers still exist on the irq 815 low32 = iosapic_intr_info[vector].low32 | IOSAPIC_MASK;
647 * associated with the gsi, don't unregister the 816 iosapic_write(rte->addr, IOSAPIC_RTE_LOW(rte->rte_index), low32);
648 * interrupt.
649 */
650 if (idesc->action) {
651 iosapic_intr_info[vector].refcnt++;
652 spin_unlock(&iosapic_lock);
653 spin_unlock_irqrestore(&idesc->lock, flags);
654 printk(KERN_WARNING "Cannot unregister GSI. IRQ %u is still in use.\n", irq);
655 return;
656 }
657 817
658 /* Clear the interrupt controller descriptor. */ 818 /* Remove the rte entry from the list */
659 idesc->handler = &no_irq_type; 819 list_del(&rte->rte_list);
820 iosapic_intr_info[vector].count--;
821 iosapic_free_rte(rte);
660 822
661 trigger = iosapic_intr_info[vector].trigger; 823 trigger = iosapic_intr_info[vector].trigger;
662 polarity = iosapic_intr_info[vector].polarity; 824 polarity = iosapic_intr_info[vector].polarity;
825 dest = iosapic_intr_info[vector].dest;
826 printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d unregistered\n",
827 gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
828 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
829 cpu_logical_id(dest), dest, vector);
830
831 if (list_empty(&iosapic_intr_info[vector].rtes)) {
832 /* Sanity check */
833 BUG_ON(iosapic_intr_info[vector].count);
834
835 /* Clear the interrupt controller descriptor */
836 idesc->handler = &no_irq_type;
837
838 /* Clear the interrupt information */
839 memset(&iosapic_intr_info[vector], 0, sizeof(struct iosapic_intr_info));
840 iosapic_intr_info[vector].low32 |= IOSAPIC_MASK;
841 INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes);
842
843 if (idesc->action) {
844 printk(KERN_ERR "interrupt handlers still exist on IRQ %u\n", irq);
845 WARN_ON(1);
846 }
663 847
664 /* Clear the interrupt information. */ 848 /* Free the interrupt vector */
665 memset(&iosapic_intr_info[vector], 0, sizeof(struct iosapic_intr_info)); 849 free_irq_vector(vector);
666 iosapic_intr_info[vector].rte_index = -1; /* mark as unused */ 850 }
667 } 851 }
852 out:
668 spin_unlock(&iosapic_lock); 853 spin_unlock(&iosapic_lock);
669 spin_unlock_irqrestore(&idesc->lock, flags); 854 spin_unlock_irqrestore(&idesc->lock, flags);
670
671 /* Free the interrupt vector */
672 free_irq_vector(vector);
673
674 printk(KERN_INFO "GSI %u (%s, %s) -> vector %d unregisterd.\n",
675 gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
676 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
677 vector);
678} 855}
679#endif /* CONFIG_ACPI_DEALLOCATE_IRQ */ 856#endif /* CONFIG_ACPI_DEALLOCATE_IRQ */
680 857
@@ -724,7 +901,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
724 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), 901 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
725 cpu_logical_id(dest), dest, vector); 902 cpu_logical_id(dest), dest, vector);
726 903
727 set_rte(vector, dest, mask); 904 set_rte(gsi, vector, dest, mask);
728 return vector; 905 return vector;
729} 906}
730 907
@@ -750,7 +927,7 @@ iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
750 polarity == IOSAPIC_POL_HIGH ? "high" : "low", 927 polarity == IOSAPIC_POL_HIGH ? "high" : "low",
751 cpu_logical_id(dest), dest, vector); 928 cpu_logical_id(dest), dest, vector);
752 929
753 set_rte(vector, dest, 1); 930 set_rte(gsi, vector, dest, 1);
754} 931}
755 932
756void __init 933void __init
@@ -758,8 +935,10 @@ iosapic_system_init (int system_pcat_compat)
758{ 935{
759 int vector; 936 int vector;
760 937
761 for (vector = 0; vector < IA64_NUM_VECTORS; ++vector) 938 for (vector = 0; vector < IA64_NUM_VECTORS; ++vector) {
762 iosapic_intr_info[vector].rte_index = -1; /* mark as unused */ 939 iosapic_intr_info[vector].low32 = IOSAPIC_MASK;
940 INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes); /* mark as unused */
941 }
763 942
764 pcat_compat = system_pcat_compat; 943 pcat_compat = system_pcat_compat;
765 if (pcat_compat) { 944 if (pcat_compat) {
@@ -825,3 +1004,10 @@ map_iosapic_to_node(unsigned int gsi_base, int node)
825 return; 1004 return;
826} 1005}
827#endif 1006#endif
1007
1008static int __init iosapic_enable_kmalloc (void)
1009{
1010 iosapic_kmalloc_ok = 1;
1011 return 0;
1012}
1013core_initcall (iosapic_enable_kmalloc);
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 5ba06ebe355b..4fe60c7a2e90 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -63,20 +63,30 @@ EXPORT_SYMBOL(isa_irq_to_vector_map);
63static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_NUM_DEVICE_VECTORS)]; 63static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_NUM_DEVICE_VECTORS)];
64 64
65int 65int
66assign_irq_vector (int irq) 66assign_irq_vector_nopanic (int irq)
67{ 67{
68 int pos, vector; 68 int pos, vector;
69 again: 69 again:
70 pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS); 70 pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS);
71 vector = IA64_FIRST_DEVICE_VECTOR + pos; 71 vector = IA64_FIRST_DEVICE_VECTOR + pos;
72 if (vector > IA64_LAST_DEVICE_VECTOR) 72 if (vector > IA64_LAST_DEVICE_VECTOR)
73 /* XXX could look for sharable vectors instead of panic'ing... */ 73 return -1;
74 panic("assign_irq_vector: out of interrupt vectors!");
75 if (test_and_set_bit(pos, ia64_vector_mask)) 74 if (test_and_set_bit(pos, ia64_vector_mask))
76 goto again; 75 goto again;
77 return vector; 76 return vector;
78} 77}
79 78
79int
80assign_irq_vector (int irq)
81{
82 int vector = assign_irq_vector_nopanic(irq);
83
84 if (vector < 0)
85 panic("assign_irq_vector: out of interrupt vectors!");
86
87 return vector;
88}
89
80void 90void
81free_irq_vector (int vector) 91free_irq_vector (int vector)
82{ 92{
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 71147be3279c..376fcbc3f8da 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -480,14 +480,6 @@ typedef struct {
480#define PFM_CMD_ARG_MANY -1 /* cannot be zero */ 480#define PFM_CMD_ARG_MANY -1 /* cannot be zero */
481 481
482typedef struct { 482typedef struct {
483 int debug; /* turn on/off debugging via syslog */
484 int debug_ovfl; /* turn on/off debug printk in overflow handler */
485 int fastctxsw; /* turn on/off fast (unsecure) ctxsw */
486 int expert_mode; /* turn on/off value checking */
487 int debug_pfm_read;
488} pfm_sysctl_t;
489
490typedef struct {
491 unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */ 483 unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */
492 unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */ 484 unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */
493 unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */ 485 unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */
@@ -514,8 +506,8 @@ static LIST_HEAD(pfm_buffer_fmt_list);
514static pmu_config_t *pmu_conf; 506static pmu_config_t *pmu_conf;
515 507
516/* sysctl() controls */ 508/* sysctl() controls */
517static pfm_sysctl_t pfm_sysctl; 509pfm_sysctl_t pfm_sysctl;
518int pfm_debug_var; 510EXPORT_SYMBOL(pfm_sysctl);
519 511
520static ctl_table pfm_ctl_table[]={ 512static ctl_table pfm_ctl_table[]={
521 {1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,}, 513 {1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,},
@@ -1576,7 +1568,7 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
1576 goto abort_locked; 1568 goto abort_locked;
1577 } 1569 }
1578 1570
1579 DPRINT(("[%d] fd=%d type=%d\n", current->pid, msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type)); 1571 DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
1580 1572
1581 ret = -EFAULT; 1573 ret = -EFAULT;
1582 if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t); 1574 if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);
@@ -3695,8 +3687,6 @@ pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3695 3687
3696 pfm_sysctl.debug = m == 0 ? 0 : 1; 3688 pfm_sysctl.debug = m == 0 ? 0 : 1;
3697 3689
3698 pfm_debug_var = pfm_sysctl.debug;
3699
3700 printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off"); 3690 printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
3701 3691
3702 if (m == 0) { 3692 if (m == 0) {
@@ -4996,13 +4986,21 @@ pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
4996} 4986}
4997 4987
4998static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds); 4988static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
4999 4989 /*
4990 * pfm_handle_work() can be called with interrupts enabled
4991 * (TIF_NEED_RESCHED) or disabled. The down_interruptible
4992 * call may sleep, therefore we must re-enable interrupts
4993 * to avoid deadlocks. It is safe to do so because this function
4994 * is called ONLY when returning to user level (PUStk=1), in which case
4995 * there is no risk of kernel stack overflow due to deep
4996 * interrupt nesting.
4997 */
5000void 4998void
5001pfm_handle_work(void) 4999pfm_handle_work(void)
5002{ 5000{
5003 pfm_context_t *ctx; 5001 pfm_context_t *ctx;
5004 struct pt_regs *regs; 5002 struct pt_regs *regs;
5005 unsigned long flags; 5003 unsigned long flags, dummy_flags;
5006 unsigned long ovfl_regs; 5004 unsigned long ovfl_regs;
5007 unsigned int reason; 5005 unsigned int reason;
5008 int ret; 5006 int ret;
@@ -5039,18 +5037,15 @@ pfm_handle_work(void)
5039 //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking; 5037 //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
5040 if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking; 5038 if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking;
5041 5039
5040 /*
5041 * restore interrupt mask to what it was on entry.
5042 * Could be enabled/diasbled.
5043 */
5042 UNPROTECT_CTX(ctx, flags); 5044 UNPROTECT_CTX(ctx, flags);
5043 5045
5044 /* 5046 /*
5045 * pfm_handle_work() is currently called with interrupts disabled. 5047 * force interrupt enable because of down_interruptible()
5046 * The down_interruptible call may sleep, therefore we 5048 */
5047 * must re-enable interrupts to avoid deadlocks. It is
5048 * safe to do so because this function is called ONLY
5049 * when returning to user level (PUStk=1), in which case
5050 * there is no risk of kernel stack overflow due to deep
5051 * interrupt nesting.
5052 */
5053 BUG_ON(flags & IA64_PSR_I);
5054 local_irq_enable(); 5049 local_irq_enable();
5055 5050
5056 DPRINT(("before block sleeping\n")); 5051 DPRINT(("before block sleeping\n"));
@@ -5064,12 +5059,12 @@ pfm_handle_work(void)
5064 DPRINT(("after block sleeping ret=%d\n", ret)); 5059 DPRINT(("after block sleeping ret=%d\n", ret));
5065 5060
5066 /* 5061 /*
5067 * disable interrupts to restore state we had upon entering 5062 * lock context and mask interrupts again
5068 * this function 5063 * We save flags into a dummy because we may have
5064 * altered interrupts mask compared to entry in this
5065 * function.
5069 */ 5066 */
5070 local_irq_disable(); 5067 PROTECT_CTX(ctx, dummy_flags);
5071
5072 PROTECT_CTX(ctx, flags);
5073 5068
5074 /* 5069 /*
5075 * we need to read the ovfl_regs only after wake-up 5070 * we need to read the ovfl_regs only after wake-up
@@ -5095,7 +5090,9 @@ skip_blocking:
5095 ctx->ctx_ovfl_regs[0] = 0UL; 5090 ctx->ctx_ovfl_regs[0] = 0UL;
5096 5091
5097nothing_to_do: 5092nothing_to_do:
5098 5093 /*
5094 * restore flags as they were upon entry
5095 */
5099 UNPROTECT_CTX(ctx, flags); 5096 UNPROTECT_CTX(ctx, flags);
5100} 5097}
5101 5098
diff --git a/arch/ia64/kernel/perfmon_default_smpl.c b/arch/ia64/kernel/perfmon_default_smpl.c
index 965d29004555..344941db0a9e 100644
--- a/arch/ia64/kernel/perfmon_default_smpl.c
+++ b/arch/ia64/kernel/perfmon_default_smpl.c
@@ -20,24 +20,17 @@ MODULE_AUTHOR("Stephane Eranian <eranian@hpl.hp.com>");
20MODULE_DESCRIPTION("perfmon default sampling format"); 20MODULE_DESCRIPTION("perfmon default sampling format");
21MODULE_LICENSE("GPL"); 21MODULE_LICENSE("GPL");
22 22
23MODULE_PARM(debug, "i");
24MODULE_PARM_DESC(debug, "debug");
25
26MODULE_PARM(debug_ovfl, "i");
27MODULE_PARM_DESC(debug_ovfl, "debug ovfl");
28
29
30#define DEFAULT_DEBUG 1 23#define DEFAULT_DEBUG 1
31 24
32#ifdef DEFAULT_DEBUG 25#ifdef DEFAULT_DEBUG
33#define DPRINT(a) \ 26#define DPRINT(a) \
34 do { \ 27 do { \
35 if (unlikely(debug >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \ 28 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \
36 } while (0) 29 } while (0)
37 30
38#define DPRINT_ovfl(a) \ 31#define DPRINT_ovfl(a) \
39 do { \ 32 do { \
40 if (unlikely(debug_ovfl >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \ 33 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \
41 } while (0) 34 } while (0)
42 35
43#else 36#else
@@ -45,8 +38,6 @@ MODULE_PARM_DESC(debug_ovfl, "debug ovfl");
45#define DPRINT_ovfl(a) 38#define DPRINT_ovfl(a)
46#endif 39#endif
47 40
48static int debug, debug_ovfl;
49
50static int 41static int
51default_validate(struct task_struct *task, unsigned int flags, int cpu, void *data) 42default_validate(struct task_struct *task, unsigned int flags, int cpu, void *data)
52{ 43{
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index f05650c801d2..b7e6b4cb374b 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -4,10 +4,15 @@
4 * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co 4 * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com> 5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * Stephane Eranian <eranian@hpl.hp.com> 6 * Stephane Eranian <eranian@hpl.hp.com>
7 * Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com> 7 * Copyright (C) 2000, 2004 Intel Corp
8 * Rohit Seth <rohit.seth@intel.com>
9 * Suresh Siddha <suresh.b.siddha@intel.com>
10 * Gordon Jin <gordon.jin@intel.com>
8 * Copyright (C) 1999 VA Linux Systems 11 * Copyright (C) 1999 VA Linux Systems
9 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> 12 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
10 * 13 *
14 * 12/26/04 S.Siddha, G.Jin, R.Seth
15 * Add multi-threading and multi-core detection
11 * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo(). 16 * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo().
12 * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map 17 * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
13 * 03/31/00 R.Seth cpu_initialized and current->processor fixes 18 * 03/31/00 R.Seth cpu_initialized and current->processor fixes
@@ -296,6 +301,34 @@ mark_bsp_online (void)
296#endif 301#endif
297} 302}
298 303
304#ifdef CONFIG_SMP
305static void
306check_for_logical_procs (void)
307{
308 pal_logical_to_physical_t info;
309 s64 status;
310
311 status = ia64_pal_logical_to_phys(0, &info);
312 if (status == -1) {
313 printk(KERN_INFO "No logical to physical processor mapping "
314 "available\n");
315 return;
316 }
317 if (status) {
318 printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n",
319 status);
320 return;
321 }
322 /*
323 * Total number of siblings that BSP has. Though not all of them
324 * may have booted successfully. The correct number of siblings
325 * booted is in info.overview_num_log.
326 */
327 smp_num_siblings = info.overview_tpc;
328 smp_num_cpucores = info.overview_cpp;
329}
330#endif
331
299void __init 332void __init
300setup_arch (char **cmdline_p) 333setup_arch (char **cmdline_p)
301{ 334{
@@ -356,6 +389,19 @@ setup_arch (char **cmdline_p)
356 389
357#ifdef CONFIG_SMP 390#ifdef CONFIG_SMP
358 cpu_physical_id(0) = hard_smp_processor_id(); 391 cpu_physical_id(0) = hard_smp_processor_id();
392
393 cpu_set(0, cpu_sibling_map[0]);
394 cpu_set(0, cpu_core_map[0]);
395
396 check_for_logical_procs();
397 if (smp_num_cpucores > 1)
398 printk(KERN_INFO
399 "cpu package is Multi-Core capable: number of cores=%d\n",
400 smp_num_cpucores);
401 if (smp_num_siblings > 1)
402 printk(KERN_INFO
403 "cpu package is Multi-Threading capable: number of siblings=%d\n",
404 smp_num_siblings);
359#endif 405#endif
360 406
361 cpu_init(); /* initialize the bootstrap CPU */ 407 cpu_init(); /* initialize the bootstrap CPU */
@@ -459,12 +505,23 @@ show_cpuinfo (struct seq_file *m, void *v)
459 "cpu regs : %u\n" 505 "cpu regs : %u\n"
460 "cpu MHz : %lu.%06lu\n" 506 "cpu MHz : %lu.%06lu\n"
461 "itc MHz : %lu.%06lu\n" 507 "itc MHz : %lu.%06lu\n"
462 "BogoMIPS : %lu.%02lu\n\n", 508 "BogoMIPS : %lu.%02lu\n",
463 cpunum, c->vendor, family, c->model, c->revision, c->archrev, 509 cpunum, c->vendor, family, c->model, c->revision, c->archrev,
464 features, c->ppn, c->number, 510 features, c->ppn, c->number,
465 c->proc_freq / 1000000, c->proc_freq % 1000000, 511 c->proc_freq / 1000000, c->proc_freq % 1000000,
466 c->itc_freq / 1000000, c->itc_freq % 1000000, 512 c->itc_freq / 1000000, c->itc_freq % 1000000,
467 lpj*HZ/500000, (lpj*HZ/5000) % 100); 513 lpj*HZ/500000, (lpj*HZ/5000) % 100);
514#ifdef CONFIG_SMP
515 seq_printf(m, "siblings : %u\n", c->num_log);
516 if (c->threads_per_core > 1 || c->cores_per_socket > 1)
517 seq_printf(m,
518 "physical id: %u\n"
519 "core id : %u\n"
520 "thread id : %u\n",
521 c->socket_id, c->core_id, c->thread_id);
522#endif
523 seq_printf(m,"\n");
524
468 return 0; 525 return 0;
469} 526}
470 527
@@ -533,6 +590,14 @@ identify_cpu (struct cpuinfo_ia64 *c)
533 memcpy(c->vendor, cpuid.field.vendor, 16); 590 memcpy(c->vendor, cpuid.field.vendor, 16);
534#ifdef CONFIG_SMP 591#ifdef CONFIG_SMP
535 c->cpu = smp_processor_id(); 592 c->cpu = smp_processor_id();
593
594 /* below default values will be overwritten by identify_siblings()
595 * for Multi-Threading/Multi-Core capable cpu's
596 */
597 c->threads_per_core = c->cores_per_socket = c->num_log = 1;
598 c->socket_id = -1;
599
600 identify_siblings(c);
536#endif 601#endif
537 c->ppn = cpuid.field.ppn; 602 c->ppn = cpuid.field.ppn;
538 c->number = cpuid.field.number; 603 c->number = cpuid.field.number;
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index ca1536db3394..0d5ee57c9865 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -1,8 +1,13 @@
1/* 1/*
2 * SMP boot-related support 2 * SMP boot-related support
3 * 3 *
4 * Copyright (C) 1998-2003 Hewlett-Packard Co 4 * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com> 5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * Copyright (C) 2001, 2004-2005 Intel Corp
7 * Rohit Seth <rohit.seth@intel.com>
8 * Suresh Siddha <suresh.b.siddha@intel.com>
9 * Gordon Jin <gordon.jin@intel.com>
10 * Ashok Raj <ashok.raj@intel.com>
6 * 11 *
7 * 01/05/16 Rohit Seth <rohit.seth@intel.com> Moved SMP booting functions from smp.c to here. 12 * 01/05/16 Rohit Seth <rohit.seth@intel.com> Moved SMP booting functions from smp.c to here.
8 * 01/04/27 David Mosberger <davidm@hpl.hp.com> Added ITC synching code. 13 * 01/04/27 David Mosberger <davidm@hpl.hp.com> Added ITC synching code.
@@ -10,6 +15,11 @@
10 * smp_boot_cpus()/smp_commence() is replaced by 15 * smp_boot_cpus()/smp_commence() is replaced by
11 * smp_prepare_cpus()/__cpu_up()/smp_cpus_done(). 16 * smp_prepare_cpus()/__cpu_up()/smp_cpus_done().
12 * 04/06/21 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support 17 * 04/06/21 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support
18 * 04/12/26 Jin Gordon <gordon.jin@intel.com>
19 * 04/12/26 Rohit Seth <rohit.seth@intel.com>
20 * Add multi-threading and multi-core detection
21 * 05/01/30 Suresh Siddha <suresh.b.siddha@intel.com>
22 * Setup cpu_sibling_map and cpu_core_map
13 */ 23 */
14#include <linux/config.h> 24#include <linux/config.h>
15 25
@@ -122,6 +132,11 @@ EXPORT_SYMBOL(cpu_online_map);
122cpumask_t cpu_possible_map; 132cpumask_t cpu_possible_map;
123EXPORT_SYMBOL(cpu_possible_map); 133EXPORT_SYMBOL(cpu_possible_map);
124 134
135cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
136cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
137int smp_num_siblings = 1;
138int smp_num_cpucores = 1;
139
125/* which logical CPU number maps to which CPU (physical APIC ID) */ 140/* which logical CPU number maps to which CPU (physical APIC ID) */
126volatile int ia64_cpu_to_sapicid[NR_CPUS]; 141volatile int ia64_cpu_to_sapicid[NR_CPUS];
127EXPORT_SYMBOL(ia64_cpu_to_sapicid); 142EXPORT_SYMBOL(ia64_cpu_to_sapicid);
@@ -156,7 +171,8 @@ sync_master (void *arg)
156 local_irq_save(flags); 171 local_irq_save(flags);
157 { 172 {
158 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) { 173 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) {
159 while (!go[MASTER]); 174 while (!go[MASTER])
175 cpu_relax();
160 go[MASTER] = 0; 176 go[MASTER] = 0;
161 go[SLAVE] = ia64_get_itc(); 177 go[SLAVE] = ia64_get_itc();
162 } 178 }
@@ -179,7 +195,8 @@ get_delta (long *rt, long *master)
179 for (i = 0; i < NUM_ITERS; ++i) { 195 for (i = 0; i < NUM_ITERS; ++i) {
180 t0 = ia64_get_itc(); 196 t0 = ia64_get_itc();
181 go[MASTER] = 1; 197 go[MASTER] = 1;
182 while (!(tm = go[SLAVE])); 198 while (!(tm = go[SLAVE]))
199 cpu_relax();
183 go[SLAVE] = 0; 200 go[SLAVE] = 0;
184 t1 = ia64_get_itc(); 201 t1 = ia64_get_itc();
185 202
@@ -258,7 +275,8 @@ ia64_sync_itc (unsigned int master)
258 return; 275 return;
259 } 276 }
260 277
261 while (go[MASTER]); /* wait for master to be ready */ 278 while (go[MASTER])
279 cpu_relax(); /* wait for master to be ready */
262 280
263 spin_lock_irqsave(&itc_sync_lock, flags); 281 spin_lock_irqsave(&itc_sync_lock, flags);
264 { 282 {
@@ -595,7 +613,68 @@ void __devinit smp_prepare_boot_cpu(void)
595 cpu_set(smp_processor_id(), cpu_callin_map); 613 cpu_set(smp_processor_id(), cpu_callin_map);
596} 614}
597 615
616/*
617 * mt_info[] is a temporary store for all info returned by
618 * PAL_LOGICAL_TO_PHYSICAL, to be copied into cpuinfo_ia64 when the
619 * specific cpu comes.
620 */
621static struct {
622 __u32 socket_id;
623 __u16 core_id;
624 __u16 thread_id;
625 __u16 proc_fixed_addr;
626 __u8 valid;
627}mt_info[NR_CPUS] __devinit;
628
598#ifdef CONFIG_HOTPLUG_CPU 629#ifdef CONFIG_HOTPLUG_CPU
630static inline void
631remove_from_mtinfo(int cpu)
632{
633 int i;
634
635 for_each_cpu(i)
636 if (mt_info[i].valid && mt_info[i].socket_id ==
637 cpu_data(cpu)->socket_id)
638 mt_info[i].valid = 0;
639}
640
641static inline void
642clear_cpu_sibling_map(int cpu)
643{
644 int i;
645
646 for_each_cpu_mask(i, cpu_sibling_map[cpu])
647 cpu_clear(cpu, cpu_sibling_map[i]);
648 for_each_cpu_mask(i, cpu_core_map[cpu])
649 cpu_clear(cpu, cpu_core_map[i]);
650
651 cpu_sibling_map[cpu] = cpu_core_map[cpu] = CPU_MASK_NONE;
652}
653
654static void
655remove_siblinginfo(int cpu)
656{
657 int last = 0;
658
659 if (cpu_data(cpu)->threads_per_core == 1 &&
660 cpu_data(cpu)->cores_per_socket == 1) {
661 cpu_clear(cpu, cpu_core_map[cpu]);
662 cpu_clear(cpu, cpu_sibling_map[cpu]);
663 return;
664 }
665
666 last = (cpus_weight(cpu_core_map[cpu]) == 1 ? 1 : 0);
667
668 /* remove it from all sibling map's */
669 clear_cpu_sibling_map(cpu);
670
671 /* if this cpu is the last in the core group, remove all its info
672 * from mt_info structure
673 */
674 if (last)
675 remove_from_mtinfo(cpu);
676}
677
599extern void fixup_irqs(void); 678extern void fixup_irqs(void);
600/* must be called with cpucontrol mutex held */ 679/* must be called with cpucontrol mutex held */
601int __cpu_disable(void) 680int __cpu_disable(void)
@@ -608,6 +687,7 @@ int __cpu_disable(void)
608 if (cpu == 0) 687 if (cpu == 0)
609 return -EBUSY; 688 return -EBUSY;
610 689
690 remove_siblinginfo(cpu);
611 fixup_irqs(); 691 fixup_irqs();
612 local_flush_tlb_all(); 692 local_flush_tlb_all();
613 cpu_clear(cpu, cpu_callin_map); 693 cpu_clear(cpu, cpu_callin_map);
@@ -660,6 +740,23 @@ smp_cpus_done (unsigned int dummy)
660 (int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100); 740 (int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100);
661} 741}
662 742
743static inline void __devinit
744set_cpu_sibling_map(int cpu)
745{
746 int i;
747
748 for_each_online_cpu(i) {
749 if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) {
750 cpu_set(i, cpu_core_map[cpu]);
751 cpu_set(cpu, cpu_core_map[i]);
752 if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) {
753 cpu_set(i, cpu_sibling_map[cpu]);
754 cpu_set(cpu, cpu_sibling_map[i]);
755 }
756 }
757 }
758}
759
663int __devinit 760int __devinit
664__cpu_up (unsigned int cpu) 761__cpu_up (unsigned int cpu)
665{ 762{
@@ -682,6 +779,15 @@ __cpu_up (unsigned int cpu)
682 if (ret < 0) 779 if (ret < 0)
683 return ret; 780 return ret;
684 781
782 if (cpu_data(cpu)->threads_per_core == 1 &&
783 cpu_data(cpu)->cores_per_socket == 1) {
784 cpu_set(cpu, cpu_sibling_map[cpu]);
785 cpu_set(cpu, cpu_core_map[cpu]);
786 return 0;
787 }
788
789 set_cpu_sibling_map(cpu);
790
685 return 0; 791 return 0;
686} 792}
687 793
@@ -709,3 +815,106 @@ init_smp_config(void)
709 ia64_sal_strerror(sal_ret)); 815 ia64_sal_strerror(sal_ret));
710} 816}
711 817
818static inline int __devinit
819check_for_mtinfo_index(void)
820{
821 int i;
822
823 for_each_cpu(i)
824 if (!mt_info[i].valid)
825 return i;
826
827 return -1;
828}
829
830/*
831 * Search the mt_info to find out if this socket's cid/tid information is
832 * cached or not. If the socket exists, fill in the core_id and thread_id
833 * in cpuinfo
834 */
835static int __devinit
836check_for_new_socket(__u16 logical_address, struct cpuinfo_ia64 *c)
837{
838 int i;
839 __u32 sid = c->socket_id;
840
841 for_each_cpu(i) {
842 if (mt_info[i].valid && mt_info[i].proc_fixed_addr == logical_address
843 && mt_info[i].socket_id == sid) {
844 c->core_id = mt_info[i].core_id;
845 c->thread_id = mt_info[i].thread_id;
846 return 1; /* not a new socket */
847 }
848 }
849 return 0;
850}
851
852/*
853 * identify_siblings(cpu) gets called from identify_cpu. This populates the
854 * information related to logical execution units in per_cpu_data structure.
855 */
856void __devinit
857identify_siblings(struct cpuinfo_ia64 *c)
858{
859 s64 status;
860 u16 pltid;
861 u64 proc_fixed_addr;
862 int count, i;
863 pal_logical_to_physical_t info;
864
865 if (smp_num_cpucores == 1 && smp_num_siblings == 1)
866 return;
867
868 if ((status = ia64_pal_logical_to_phys(0, &info)) != PAL_STATUS_SUCCESS) {
869 printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n",
870 status);
871 return;
872 }
873 if ((status = ia64_sal_physical_id_info(&pltid)) != PAL_STATUS_SUCCESS) {
874 printk(KERN_ERR "ia64_sal_pltid failed with %ld\n", status);
875 return;
876 }
877 if ((status = ia64_pal_fixed_addr(&proc_fixed_addr)) != PAL_STATUS_SUCCESS) {
878 printk(KERN_ERR "ia64_pal_fixed_addr failed with %ld\n", status);
879 return;
880 }
881
882 c->socket_id = (pltid << 8) | info.overview_ppid;
883 c->cores_per_socket = info.overview_cpp;
884 c->threads_per_core = info.overview_tpc;
885 count = c->num_log = info.overview_num_log;
886
887 /* If the thread and core id information is already cached, then
888 * we will simply update cpu_info and return. Otherwise, we will
889 * do the PAL calls and cache core and thread id's of all the siblings.
890 */
891 if (check_for_new_socket(proc_fixed_addr, c))
892 return;
893
894 for (i = 0; i < count; i++) {
895 int index;
896
897 if (i && (status = ia64_pal_logical_to_phys(i, &info))
898 != PAL_STATUS_SUCCESS) {
899 printk(KERN_ERR "ia64_pal_logical_to_phys failed"
900 " with %ld\n", status);
901 return;
902 }
903 if (info.log2_la == proc_fixed_addr) {
904 c->core_id = info.log1_cid;
905 c->thread_id = info.log1_tid;
906 }
907
908 index = check_for_mtinfo_index();
909 /* We will not do the mt_info caching optimization in this case.
910 */
911 if (index < 0)
912 continue;
913
914 mt_info[index].valid = 1;
915 mt_info[index].socket_id = c->socket_id;
916 mt_info[index].core_id = info.log1_cid;
917 mt_info[index].thread_id = info.log1_tid;
918 mt_info[index].proc_fixed_addr = info.log2_la;
919 }
920}
diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c
index d494ff647cac..2776a074c6f1 100644
--- a/arch/ia64/kernel/unwind.c
+++ b/arch/ia64/kernel/unwind.c
@@ -1943,23 +1943,30 @@ EXPORT_SYMBOL(unw_unwind);
1943int 1943int
1944unw_unwind_to_user (struct unw_frame_info *info) 1944unw_unwind_to_user (struct unw_frame_info *info)
1945{ 1945{
1946 unsigned long ip, sp; 1946 unsigned long ip, sp, pr = 0;
1947 1947
1948 while (unw_unwind(info) >= 0) { 1948 while (unw_unwind(info) >= 0) {
1949 if (unw_get_rp(info, &ip) < 0) {
1950 unw_get_ip(info, &ip);
1951 UNW_DPRINT(0, "unwind.%s: failed to read return pointer (ip=0x%lx)\n",
1952 __FUNCTION__, ip);
1953 return -1;
1954 }
1955 unw_get_sp(info, &sp); 1949 unw_get_sp(info, &sp);
1956 if (sp >= (unsigned long)info->task + IA64_STK_OFFSET) 1950 if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp)
1951 < IA64_PT_REGS_SIZE) {
1952 UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n",
1953 __FUNCTION__);
1957 break; 1954 break;
1958 if (ip < FIXADDR_USER_END) 1955 }
1956 if (unw_is_intr_frame(info) &&
1957 (pr & (1UL << PRED_USER_STACK)))
1959 return 0; 1958 return 0;
1959 if (unw_get_pr (info, &pr) < 0) {
1960 unw_get_rp(info, &ip);
1961 UNW_DPRINT(0, "unwind.%s: failed to read "
1962 "predicate register (ip=0x%lx)\n",
1963 __FUNCTION__, ip);
1964 return -1;
1965 }
1960 } 1966 }
1961 unw_get_ip(info, &ip); 1967 unw_get_ip(info, &ip);
1962 UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n", __FUNCTION__, ip); 1968 UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n",
1969 __FUNCTION__, ip);
1963 return -1; 1970 return -1;
1964} 1971}
1965EXPORT_SYMBOL(unw_unwind_to_user); 1972EXPORT_SYMBOL(unw_unwind_to_user);
diff --git a/arch/ia64/lib/memcpy_mck.S b/arch/ia64/lib/memcpy_mck.S
index 6f26ef7cc236..3c2cd2f04db9 100644
--- a/arch/ia64/lib/memcpy_mck.S
+++ b/arch/ia64/lib/memcpy_mck.S
@@ -300,7 +300,7 @@ EK(.ex_handler, (p[D]) st8 [dst1] = t15, 4*8)
300 add src_pre_mem=0,src0 // prefetch src pointer 300 add src_pre_mem=0,src0 // prefetch src pointer
301 add dst_pre_mem=0,dst0 // prefetch dest pointer 301 add dst_pre_mem=0,dst0 // prefetch dest pointer
302 and src0=-8,src0 // 1st src pointer 302 and src0=-8,src0 // 1st src pointer
303(p7) mov ar.lc = r21 303(p7) mov ar.lc = cnt
304(p8) mov ar.lc = r0 304(p8) mov ar.lc = r0
305 ;; 305 ;;
306 TEXT_ALIGN(32) 306 TEXT_ALIGN(32)
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 6daf15ac8940..91a055f5731f 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -61,7 +61,8 @@ show_mem (void)
61 printk("%d reserved pages\n", reserved); 61 printk("%d reserved pages\n", reserved);
62 printk("%d pages shared\n", shared); 62 printk("%d pages shared\n", shared);
63 printk("%d pages swap cached\n", cached); 63 printk("%d pages swap cached\n", cached);
64 printk("%ld pages in page table cache\n", pgtable_cache_size); 64 printk("%ld pages in page table cache\n",
65 pgtable_quicklist_total_size());
65} 66}
66 67
67/* physical address where the bootmem map is located */ 68/* physical address where the bootmem map is located */
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 3456a9b6971e..c00710929390 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -582,7 +582,8 @@ void show_mem(void)
582 printk("%d reserved pages\n", total_reserved); 582 printk("%d reserved pages\n", total_reserved);
583 printk("%d pages shared\n", total_shared); 583 printk("%d pages shared\n", total_shared);
584 printk("%d pages swap cached\n", total_cached); 584 printk("%d pages swap cached\n", total_cached);
585 printk("Total of %ld pages in page table cache\n", pgtable_cache_size); 585 printk("Total of %ld pages in page table cache\n",
586 pgtable_quicklist_total_size());
586 printk("%d free buffer pages\n", nr_free_buffer_pages()); 587 printk("%d free buffer pages\n", nr_free_buffer_pages());
587} 588}
588 589
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index da859125aaef..4174ec999dde 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -209,10 +209,13 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
209 } 209 }
210 210
211 no_context: 211 no_context:
212 if (isr & IA64_ISR_SP) { 212 if ((isr & IA64_ISR_SP)
213 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
214 {
213 /* 215 /*
214 * This fault was due to a speculative load set the "ed" bit in the psr to 216 * This fault was due to a speculative load or lfetch.fault, set the "ed"
215 * ensure forward progress (target register will get a NaT). 217 * bit in the psr to ensure forward progress. (Target register will get a
218 * NaT for ld.s, lfetch will be canceled.)
216 */ 219 */
217 ia64_psr(regs)->ed = 1; 220 ia64_psr(regs)->ed = 1;
218 return; 221 return;
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 65cf839573ea..547785e3cba2 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -39,6 +39,9 @@
39 39
40DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 40DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
41 41
42DEFINE_PER_CPU(unsigned long *, __pgtable_quicklist);
43DEFINE_PER_CPU(long, __pgtable_quicklist_size);
44
42extern void ia64_tlb_init (void); 45extern void ia64_tlb_init (void);
43 46
44unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; 47unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
@@ -50,27 +53,53 @@ struct page *vmem_map;
50EXPORT_SYMBOL(vmem_map); 53EXPORT_SYMBOL(vmem_map);
51#endif 54#endif
52 55
53static int pgt_cache_water[2] = { 25, 50 }; 56struct page *zero_page_memmap_ptr; /* map entry for zero page */
54
55struct page *zero_page_memmap_ptr; /* map entry for zero page */
56EXPORT_SYMBOL(zero_page_memmap_ptr); 57EXPORT_SYMBOL(zero_page_memmap_ptr);
57 58
59#define MIN_PGT_PAGES 25UL
60#define MAX_PGT_FREES_PER_PASS 16L
61#define PGT_FRACTION_OF_NODE_MEM 16
62
63static inline long
64max_pgt_pages(void)
65{
66 u64 node_free_pages, max_pgt_pages;
67
68#ifndef CONFIG_NUMA
69 node_free_pages = nr_free_pages();
70#else
71 node_free_pages = nr_free_pages_pgdat(NODE_DATA(numa_node_id()));
72#endif
73 max_pgt_pages = node_free_pages / PGT_FRACTION_OF_NODE_MEM;
74 max_pgt_pages = max(max_pgt_pages, MIN_PGT_PAGES);
75 return max_pgt_pages;
76}
77
78static inline long
79min_pages_to_free(void)
80{
81 long pages_to_free;
82
83 pages_to_free = pgtable_quicklist_size - max_pgt_pages();
84 pages_to_free = min(pages_to_free, MAX_PGT_FREES_PER_PASS);
85 return pages_to_free;
86}
87
58void 88void
59check_pgt_cache (void) 89check_pgt_cache(void)
60{ 90{
61 int low, high; 91 long pages_to_free;
62 92
63 low = pgt_cache_water[0]; 93 if (unlikely(pgtable_quicklist_size <= MIN_PGT_PAGES))
64 high = pgt_cache_water[1]; 94 return;
65 95
66 preempt_disable(); 96 preempt_disable();
67 if (pgtable_cache_size > (u64) high) { 97 while (unlikely((pages_to_free = min_pages_to_free()) > 0)) {
68 do { 98 while (pages_to_free--) {
69 if (pgd_quicklist) 99 free_page((unsigned long)pgtable_quicklist_alloc());
70 free_page((unsigned long)pgd_alloc_one_fast(NULL)); 100 }
71 if (pmd_quicklist) 101 preempt_enable();
72 free_page((unsigned long)pmd_alloc_one_fast(NULL, 0)); 102 preempt_disable();
73 } while (pgtable_cache_size > (u64) low);
74 } 103 }
75 preempt_enable(); 104 preempt_enable();
76} 105}
@@ -523,11 +552,14 @@ void
523mem_init (void) 552mem_init (void)
524{ 553{
525 long reserved_pages, codesize, datasize, initsize; 554 long reserved_pages, codesize, datasize, initsize;
526 unsigned long num_pgt_pages;
527 pg_data_t *pgdat; 555 pg_data_t *pgdat;
528 int i; 556 int i;
529 static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel; 557 static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel;
530 558
559 BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
560 BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
561 BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
562
531#ifdef CONFIG_PCI 563#ifdef CONFIG_PCI
532 /* 564 /*
533 * This needs to be called _after_ the command line has been parsed but _before_ 565 * This needs to be called _after_ the command line has been parsed but _before_
@@ -564,18 +596,6 @@ mem_init (void)
564 num_physpages << (PAGE_SHIFT - 10), codesize >> 10, 596 num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
565 reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10); 597 reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
566 598
567 /*
568 * Allow for enough (cached) page table pages so that we can map the entire memory
569 * at least once. Each task also needs a couple of page tables pages, so add in a
570 * fudge factor for that (don't use "threads-max" here; that would be wrong!).
571 * Don't allow the cache to be more than 10% of total memory, though.
572 */
573# define NUM_TASKS 500 /* typical number of tasks */
574 num_pgt_pages = nr_free_pages() / PTRS_PER_PGD + NUM_TASKS;
575 if (num_pgt_pages > nr_free_pages() / 10)
576 num_pgt_pages = nr_free_pages() / 10;
577 if (num_pgt_pages > (u64) pgt_cache_water[1])
578 pgt_cache_water[1] = num_pgt_pages;
579 599
580 /* 600 /*
581 * For fsyscall entrpoints with no light-weight handler, use the ordinary 601 * For fsyscall entrpoints with no light-weight handler, use the ordinary
diff --git a/arch/ia64/sn/include/pci/pcibr_provider.h b/arch/ia64/sn/include/pci/pcibr_provider.h
index b1f05ffec70b..1cd291d8badd 100644
--- a/arch/ia64/sn/include/pci/pcibr_provider.h
+++ b/arch/ia64/sn/include/pci/pcibr_provider.h
@@ -123,9 +123,11 @@ pcibr_lock(struct pcibus_info *pcibus_info)
123} 123}
124#define pcibr_unlock(pcibus_info, flag) spin_unlock_irqrestore(&pcibus_info->pbi_lock, flag) 124#define pcibr_unlock(pcibus_info, flag) spin_unlock_irqrestore(&pcibus_info->pbi_lock, flag)
125 125
126extern int pcibr_init_provider(void);
126extern void *pcibr_bus_fixup(struct pcibus_bussoft *); 127extern void *pcibr_bus_fixup(struct pcibus_bussoft *);
127extern uint64_t pcibr_dma_map(struct pcidev_info *, unsigned long, size_t, unsigned int); 128extern dma_addr_t pcibr_dma_map(struct pci_dev *, unsigned long, size_t);
128extern void pcibr_dma_unmap(struct pcidev_info *, dma_addr_t, int); 129extern dma_addr_t pcibr_dma_map_consistent(struct pci_dev *, unsigned long, size_t);
130extern void pcibr_dma_unmap(struct pci_dev *, dma_addr_t, int);
129 131
130/* 132/*
131 * prototypes for the bridge asic register access routines in pcibr_reg.c 133 * prototypes for the bridge asic register access routines in pcibr_reg.c
diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile
index 6c7f4d9e8ea0..4f381fb25049 100644
--- a/arch/ia64/sn/kernel/Makefile
+++ b/arch/ia64/sn/kernel/Makefile
@@ -10,3 +10,4 @@
10obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \ 10obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \
11 huberror.o io_init.o iomv.o klconflib.o sn2/ 11 huberror.o io_init.o iomv.o klconflib.o sn2/
12obj-$(CONFIG_IA64_GENERIC) += machvec.o 12obj-$(CONFIG_IA64_GENERIC) += machvec.o
13obj-$(CONFIG_SGI_TIOCX) += tiocx.o
diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c
index ce0bc4085eae..647deae9bfcd 100644
--- a/arch/ia64/sn/kernel/bte.c
+++ b/arch/ia64/sn/kernel/bte.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8 8
9#include <linux/config.h> 9#include <linux/config.h>
@@ -170,10 +170,6 @@ retry_bteop:
170 /* Initialize the notification to a known value. */ 170 /* Initialize the notification to a known value. */
171 *bte->most_rcnt_na = BTE_WORD_BUSY; 171 *bte->most_rcnt_na = BTE_WORD_BUSY;
172 172
173 /* Set the status reg busy bit and transfer length */
174 BTE_PRINTKV(("IBLS = 0x%lx\n", IBLS_BUSY | transfer_size));
175 BTE_LNSTAT_STORE(bte, IBLS_BUSY | transfer_size);
176
177 /* Set the source and destination registers */ 173 /* Set the source and destination registers */
178 BTE_PRINTKV(("IBSA = 0x%lx)\n", (TO_PHYS(src)))); 174 BTE_PRINTKV(("IBSA = 0x%lx)\n", (TO_PHYS(src))));
179 BTE_SRC_STORE(bte, TO_PHYS(src)); 175 BTE_SRC_STORE(bte, TO_PHYS(src));
@@ -188,7 +184,7 @@ retry_bteop:
188 184
189 /* Initiate the transfer */ 185 /* Initiate the transfer */
190 BTE_PRINTK(("IBCT = 0x%lx)\n", BTE_VALID_MODE(mode))); 186 BTE_PRINTK(("IBCT = 0x%lx)\n", BTE_VALID_MODE(mode)));
191 BTE_CTRL_STORE(bte, BTE_VALID_MODE(mode)); 187 BTE_START_TRANSFER(bte, transfer_size, BTE_VALID_MODE(mode));
192 188
193 itc_end = ia64_get_itc() + (40000000 * local_cpu_data->cyc_per_usec); 189 itc_end = ia64_get_itc() + (40000000 * local_cpu_data->cyc_per_usec);
194 190
@@ -429,10 +425,16 @@ void bte_init_node(nodepda_t * mynodepda, cnodeid_t cnode)
429 mynodepda->bte_recovery_timer.data = (unsigned long)mynodepda; 425 mynodepda->bte_recovery_timer.data = (unsigned long)mynodepda;
430 426
431 for (i = 0; i < BTES_PER_NODE; i++) { 427 for (i = 0; i < BTES_PER_NODE; i++) {
428 u64 *base_addr;
429
432 /* Which link status register should we use? */ 430 /* Which link status register should we use? */
433 unsigned long link_status = (i == 0 ? IIO_IBLS0 : IIO_IBLS1); 431 base_addr = (u64 *)
434 mynodepda->bte_if[i].bte_base_addr = (u64 *) 432 REMOTE_HUB_ADDR(cnodeid_to_nasid(cnode), BTE_BASE_ADDR(i));
435 REMOTE_HUB_ADDR(cnodeid_to_nasid(cnode), link_status); 433 mynodepda->bte_if[i].bte_base_addr = base_addr;
434 mynodepda->bte_if[i].bte_source_addr = BTE_SOURCE_ADDR(base_addr);
435 mynodepda->bte_if[i].bte_destination_addr = BTE_DEST_ADDR(base_addr);
436 mynodepda->bte_if[i].bte_control_addr = BTE_CTRL_ADDR(base_addr);
437 mynodepda->bte_if[i].bte_notify_addr = BTE_NOTIF_ADDR(base_addr);
436 438
437 /* 439 /*
438 * Initialize the notification and spinlock 440 * Initialize the notification and spinlock
diff --git a/arch/ia64/sn/kernel/bte_error.c b/arch/ia64/sn/kernel/bte_error.c
index fd104312c6bd..fcbc748ae433 100644
--- a/arch/ia64/sn/kernel/bte_error.c
+++ b/arch/ia64/sn/kernel/bte_error.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8 8
9#include <linux/types.h> 9#include <linux/types.h>
@@ -33,48 +33,28 @@ void bte_error_handler(unsigned long);
33 * Wait until all BTE related CRBs are completed 33 * Wait until all BTE related CRBs are completed
34 * and then reset the interfaces. 34 * and then reset the interfaces.
35 */ 35 */
36void bte_error_handler(unsigned long _nodepda) 36void shub1_bte_error_handler(unsigned long _nodepda)
37{ 37{
38 struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda; 38 struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda;
39 spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock;
40 struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer; 39 struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer;
41 nasid_t nasid; 40 nasid_t nasid;
42 int i; 41 int i;
43 int valid_crbs; 42 int valid_crbs;
44 unsigned long irq_flags;
45 volatile u64 *notify;
46 bte_result_t bh_error;
47 ii_imem_u_t imem; /* II IMEM Register */ 43 ii_imem_u_t imem; /* II IMEM Register */
48 ii_icrb0_d_u_t icrbd; /* II CRB Register D */ 44 ii_icrb0_d_u_t icrbd; /* II CRB Register D */
49 ii_ibcr_u_t ibcr; 45 ii_ibcr_u_t ibcr;
50 ii_icmr_u_t icmr; 46 ii_icmr_u_t icmr;
51 ii_ieclr_u_t ieclr; 47 ii_ieclr_u_t ieclr;
52 48
53 BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda, 49 BTE_PRINTK(("shub1_bte_error_handler(%p) - %d\n", err_nodepda,
54 smp_processor_id())); 50 smp_processor_id()));
55 51
56 spin_lock_irqsave(recovery_lock, irq_flags);
57
58 if ((err_nodepda->bte_if[0].bh_error == BTE_SUCCESS) && 52 if ((err_nodepda->bte_if[0].bh_error == BTE_SUCCESS) &&
59 (err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) { 53 (err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) {
60 BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda, 54 BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda,
61 smp_processor_id())); 55 smp_processor_id()));
62 spin_unlock_irqrestore(recovery_lock, irq_flags);
63 return; 56 return;
64 } 57 }
65 /*
66 * Lock all interfaces on this node to prevent new transfers
67 * from being queued.
68 */
69 for (i = 0; i < BTES_PER_NODE; i++) {
70 if (err_nodepda->bte_if[i].cleanup_active) {
71 continue;
72 }
73 spin_lock(&err_nodepda->bte_if[i].spinlock);
74 BTE_PRINTK(("eh:%p:%d locked %d\n", err_nodepda,
75 smp_processor_id(), i));
76 err_nodepda->bte_if[i].cleanup_active = 1;
77 }
78 58
79 /* Determine information about our hub */ 59 /* Determine information about our hub */
80 nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode); 60 nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode);
@@ -101,7 +81,6 @@ void bte_error_handler(unsigned long _nodepda)
101 mod_timer(recovery_timer, HZ * 5); 81 mod_timer(recovery_timer, HZ * 5);
102 BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda, 82 BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda,
103 smp_processor_id())); 83 smp_processor_id()));
104 spin_unlock_irqrestore(recovery_lock, irq_flags);
105 return; 84 return;
106 } 85 }
107 if (icmr.ii_icmr_fld_s.i_crb_vld != 0) { 86 if (icmr.ii_icmr_fld_s.i_crb_vld != 0) {
@@ -120,8 +99,6 @@ void bte_error_handler(unsigned long _nodepda)
120 BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n", 99 BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n",
121 err_nodepda, smp_processor_id(), 100 err_nodepda, smp_processor_id(),
122 i)); 101 i));
123 spin_unlock_irqrestore(recovery_lock,
124 irq_flags);
125 return; 102 return;
126 } 103 }
127 } 104 }
@@ -146,6 +123,51 @@ void bte_error_handler(unsigned long _nodepda)
146 ibcr.ii_ibcr_fld_s.i_soft_reset = 1; 123 ibcr.ii_ibcr_fld_s.i_soft_reset = 1;
147 REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval); 124 REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval);
148 125
126 del_timer(recovery_timer);
127}
128
129/*
130 * Wait until all BTE related CRBs are completed
131 * and then reset the interfaces.
132 */
133void bte_error_handler(unsigned long _nodepda)
134{
135 struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda;
136 spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock;
137 int i;
138 nasid_t nasid;
139 unsigned long irq_flags;
140 volatile u64 *notify;
141 bte_result_t bh_error;
142
143 BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda,
144 smp_processor_id()));
145
146 spin_lock_irqsave(recovery_lock, irq_flags);
147
148 /*
149 * Lock all interfaces on this node to prevent new transfers
150 * from being queued.
151 */
152 for (i = 0; i < BTES_PER_NODE; i++) {
153 if (err_nodepda->bte_if[i].cleanup_active) {
154 continue;
155 }
156 spin_lock(&err_nodepda->bte_if[i].spinlock);
157 BTE_PRINTK(("eh:%p:%d locked %d\n", err_nodepda,
158 smp_processor_id(), i));
159 err_nodepda->bte_if[i].cleanup_active = 1;
160 }
161
162 if (is_shub1()) {
163 shub1_bte_error_handler(_nodepda);
164 } else {
165 nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode);
166
167 if (ia64_sn_bte_recovery(nasid))
168 panic("bte_error_handler(): Fatal BTE Error");
169 }
170
149 for (i = 0; i < BTES_PER_NODE; i++) { 171 for (i = 0; i < BTES_PER_NODE; i++) {
150 bh_error = err_nodepda->bte_if[i].bh_error; 172 bh_error = err_nodepda->bte_if[i].bh_error;
151 if (bh_error != BTE_SUCCESS) { 173 if (bh_error != BTE_SUCCESS) {
@@ -165,8 +187,6 @@ void bte_error_handler(unsigned long _nodepda)
165 spin_unlock(&err_nodepda->bte_if[i].spinlock); 187 spin_unlock(&err_nodepda->bte_if[i].spinlock);
166 } 188 }
167 189
168 del_timer(recovery_timer);
169
170 spin_unlock_irqrestore(recovery_lock, irq_flags); 190 spin_unlock_irqrestore(recovery_lock, irq_flags);
171} 191}
172 192
diff --git a/arch/ia64/sn/kernel/huberror.c b/arch/ia64/sn/kernel/huberror.c
index 2bdf684c5066..5c39b43ba3c0 100644
--- a/arch/ia64/sn/kernel/huberror.c
+++ b/arch/ia64/sn/kernel/huberror.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1992 - 1997, 2000,2002-2004 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 1992 - 1997, 2000,2002-2005 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8 8
9#include <linux/types.h> 9#include <linux/types.h>
@@ -38,8 +38,11 @@ static irqreturn_t hub_eint_handler(int irq, void *arg, struct pt_regs *ep)
38 if ((int)ret_stuff.v0) 38 if ((int)ret_stuff.v0)
39 panic("hubii_eint_handler(): Fatal TIO Error"); 39 panic("hubii_eint_handler(): Fatal TIO Error");
40 40
41 if (!(nasid & 1)) /* Not a TIO, handle CRB errors */ 41 if (is_shub1()) {
42 (void)hubiio_crb_error_handler(hubdev_info); 42 if (!(nasid & 1)) /* Not a TIO, handle CRB errors */
43 (void)hubiio_crb_error_handler(hubdev_info);
44 } else
45 bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid)));
43 46
44 return IRQ_HANDLED; 47 return IRQ_HANDLED;
45} 48}
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index 001880812b7c..18160a06a8c9 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -11,14 +11,15 @@
11#include <asm/sn/types.h> 11#include <asm/sn/types.h>
12#include <asm/sn/sn_sal.h> 12#include <asm/sn/sn_sal.h>
13#include <asm/sn/addrs.h> 13#include <asm/sn/addrs.h>
14#include "pci/pcibus_provider_defs.h" 14#include <asm/sn/pcibus_provider_defs.h>
15#include "pci/pcidev.h" 15#include <asm/sn/pcidev.h>
16#include "pci/pcibr_provider.h" 16#include "pci/pcibr_provider.h"
17#include "xtalk/xwidgetdev.h" 17#include "xtalk/xwidgetdev.h"
18#include <asm/sn/geo.h> 18#include <asm/sn/geo.h>
19#include "xtalk/hubdev.h" 19#include "xtalk/hubdev.h"
20#include <asm/sn/io.h> 20#include <asm/sn/io.h>
21#include <asm/sn/simulator.h> 21#include <asm/sn/simulator.h>
22#include <asm/sn/tioca_provider.h>
22 23
23char master_baseio_wid; 24char master_baseio_wid;
24nasid_t master_nasid = INVALID_NASID; /* Partition Master */ 25nasid_t master_nasid = INVALID_NASID; /* Partition Master */
@@ -34,6 +35,37 @@ struct brick {
34 35
35int sn_ioif_inited = 0; /* SN I/O infrastructure initialized? */ 36int sn_ioif_inited = 0; /* SN I/O infrastructure initialized? */
36 37
38struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */
39
40/*
41 * Hooks and struct for unsupported pci providers
42 */
43
44static dma_addr_t
45sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size)
46{
47 return 0;
48}
49
50static void
51sn_default_pci_unmap(struct pci_dev *pdev, dma_addr_t addr, int direction)
52{
53 return;
54}
55
56static void *
57sn_default_pci_bus_fixup(struct pcibus_bussoft *soft)
58{
59 return NULL;
60}
61
62static struct sn_pcibus_provider sn_pci_default_provider = {
63 .dma_map = sn_default_pci_map,
64 .dma_map_consistent = sn_default_pci_map,
65 .dma_unmap = sn_default_pci_unmap,
66 .bus_fixup = sn_default_pci_bus_fixup,
67};
68
37/* 69/*
38 * Retrieve the DMA Flush List given nasid. This list is needed 70 * Retrieve the DMA Flush List given nasid. This list is needed
39 * to implement the WAR - Flush DMA data on PIO Reads. 71 * to implement the WAR - Flush DMA data on PIO Reads.
@@ -201,6 +233,7 @@ static void sn_pci_fixup_slot(struct pci_dev *dev)
201 struct sn_irq_info *sn_irq_info; 233 struct sn_irq_info *sn_irq_info;
202 struct pci_dev *host_pci_dev; 234 struct pci_dev *host_pci_dev;
203 int status = 0; 235 int status = 0;
236 struct pcibus_bussoft *bs;
204 237
205 dev->sysdata = kmalloc(sizeof(struct pcidev_info), GFP_KERNEL); 238 dev->sysdata = kmalloc(sizeof(struct pcidev_info), GFP_KERNEL);
206 if (SN_PCIDEV_INFO(dev) <= 0) 239 if (SN_PCIDEV_INFO(dev) <= 0)
@@ -241,6 +274,7 @@ static void sn_pci_fixup_slot(struct pci_dev *dev)
241 } 274 }
242 275
243 /* set up host bus linkages */ 276 /* set up host bus linkages */
277 bs = SN_PCIBUS_BUSSOFT(dev->bus);
244 host_pci_dev = 278 host_pci_dev =
245 pci_find_slot(SN_PCIDEV_INFO(dev)->pdi_slot_host_handle >> 32, 279 pci_find_slot(SN_PCIDEV_INFO(dev)->pdi_slot_host_handle >> 32,
246 SN_PCIDEV_INFO(dev)-> 280 SN_PCIDEV_INFO(dev)->
@@ -248,10 +282,16 @@ static void sn_pci_fixup_slot(struct pci_dev *dev)
248 SN_PCIDEV_INFO(dev)->pdi_host_pcidev_info = 282 SN_PCIDEV_INFO(dev)->pdi_host_pcidev_info =
249 SN_PCIDEV_INFO(host_pci_dev); 283 SN_PCIDEV_INFO(host_pci_dev);
250 SN_PCIDEV_INFO(dev)->pdi_linux_pcidev = dev; 284 SN_PCIDEV_INFO(dev)->pdi_linux_pcidev = dev;
251 SN_PCIDEV_INFO(dev)->pdi_pcibus_info = SN_PCIBUS_BUSSOFT(dev->bus); 285 SN_PCIDEV_INFO(dev)->pdi_pcibus_info = bs;
286
287 if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) {
288 SN_PCIDEV_BUSPROVIDER(dev) = sn_pci_provider[bs->bs_asic_type];
289 } else {
290 SN_PCIDEV_BUSPROVIDER(dev) = &sn_pci_default_provider;
291 }
252 292
253 /* Only set up IRQ stuff if this device has a host bus context */ 293 /* Only set up IRQ stuff if this device has a host bus context */
254 if (SN_PCIDEV_BUSSOFT(dev) && sn_irq_info->irq_irq) { 294 if (bs && sn_irq_info->irq_irq) {
255 SN_PCIDEV_INFO(dev)->pdi_sn_irq_info = sn_irq_info; 295 SN_PCIDEV_INFO(dev)->pdi_sn_irq_info = sn_irq_info;
256 dev->irq = SN_PCIDEV_INFO(dev)->pdi_sn_irq_info->irq_irq; 296 dev->irq = SN_PCIDEV_INFO(dev)->pdi_sn_irq_info->irq_irq;
257 sn_irq_fixup(dev, sn_irq_info); 297 sn_irq_fixup(dev, sn_irq_info);
@@ -271,6 +311,7 @@ static void sn_pci_controller_fixup(int segment, int busnum)
271 struct pcibus_bussoft *prom_bussoft_ptr; 311 struct pcibus_bussoft *prom_bussoft_ptr;
272 struct hubdev_info *hubdev_info; 312 struct hubdev_info *hubdev_info;
273 void *provider_soft; 313 void *provider_soft;
314 struct sn_pcibus_provider *provider;
274 315
275 status = 316 status =
276 sal_get_pcibus_info((u64) segment, (u64) busnum, 317 sal_get_pcibus_info((u64) segment, (u64) busnum,
@@ -291,16 +332,22 @@ static void sn_pci_controller_fixup(int segment, int busnum)
291 /* 332 /*
292 * Per-provider fixup. Copies the contents from prom to local 333 * Per-provider fixup. Copies the contents from prom to local
293 * area and links SN_PCIBUS_BUSSOFT(). 334 * area and links SN_PCIBUS_BUSSOFT().
294 *
295 * Note: Provider is responsible for ensuring that prom_bussoft_ptr
296 * represents an asic-type that it can handle.
297 */ 335 */
298 336
299 if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB) { 337 if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES) {
300 return; /* no further fixup necessary */ 338 return; /* unsupported asic type */
339 }
340
341 provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type];
342 if (provider == NULL) {
343 return; /* no provider registerd for this asic */
344 }
345
346 provider_soft = NULL;
347 if (provider->bus_fixup) {
348 provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr);
301 } 349 }
302 350
303 provider_soft = pcibr_bus_fixup(prom_bussoft_ptr);
304 if (provider_soft == NULL) { 351 if (provider_soft == NULL) {
305 return; /* fixup failed or not applicable */ 352 return; /* fixup failed or not applicable */
306 } 353 }
@@ -339,6 +386,17 @@ static int __init sn_pci_init(void)
339 return 0; 386 return 0;
340 387
341 /* 388 /*
389 * prime sn_pci_provider[]. Individial provider init routines will
390 * override their respective default entries.
391 */
392
393 for (i = 0; i < PCIIO_ASIC_MAX_TYPES; i++)
394 sn_pci_provider[i] = &sn_pci_default_provider;
395
396 pcibr_init_provider();
397 tioca_init_provider();
398
399 /*
342 * This is needed to avoid bounce limit checks in the blk layer 400 * This is needed to avoid bounce limit checks in the blk layer
343 */ 401 */
344 ia64_max_iommu_merge_mask = ~PAGE_MASK; 402 ia64_max_iommu_merge_mask = ~PAGE_MASK;
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index 3be44724f6c8..0f4e8138658f 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -13,8 +13,8 @@
13#include <asm/sn/addrs.h> 13#include <asm/sn/addrs.h>
14#include <asm/sn/arch.h> 14#include <asm/sn/arch.h>
15#include "xtalk/xwidgetdev.h" 15#include "xtalk/xwidgetdev.h"
16#include "pci/pcibus_provider_defs.h" 16#include <asm/sn/pcibus_provider_defs.h>
17#include "pci/pcidev.h" 17#include <asm/sn/pcidev.h>
18#include "pci/pcibr_provider.h" 18#include "pci/pcibr_provider.h"
19#include <asm/sn/shub_mmr.h> 19#include <asm/sn/shub_mmr.h>
20#include <asm/sn/sn_sal.h> 20#include <asm/sn/sn_sal.h>
@@ -82,20 +82,9 @@ static void sn_ack_irq(unsigned int irq)
82 nasid = get_nasid(); 82 nasid = get_nasid();
83 event_occurred = 83 event_occurred =
84 HUB_L((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED)); 84 HUB_L((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED));
85 if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) { 85 mask = event_occurred & SH_ALL_INT_MASK;
86 mask |= (1 << SH_EVENT_OCCURRED_UART_INT_SHFT);
87 }
88 if (event_occurred & SH_EVENT_OCCURRED_IPI_INT_MASK) {
89 mask |= (1 << SH_EVENT_OCCURRED_IPI_INT_SHFT);
90 }
91 if (event_occurred & SH_EVENT_OCCURRED_II_INT0_MASK) {
92 mask |= (1 << SH_EVENT_OCCURRED_II_INT0_SHFT);
93 }
94 if (event_occurred & SH_EVENT_OCCURRED_II_INT1_MASK) {
95 mask |= (1 << SH_EVENT_OCCURRED_II_INT1_SHFT);
96 }
97 HUB_S((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS), 86 HUB_S((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS),
98 mask); 87 mask);
99 __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs); 88 __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
100 89
101 move_irq(irq); 90 move_irq(irq);
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index f0306b516afb..d35f2a6f9c94 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -29,6 +29,7 @@
29#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/root_dev.h> 30#include <linux/root_dev.h>
31#include <linux/nodemask.h> 31#include <linux/nodemask.h>
32#include <linux/pm.h>
32 33
33#include <asm/io.h> 34#include <asm/io.h>
34#include <asm/sal.h> 35#include <asm/sal.h>
@@ -353,6 +354,14 @@ void __init sn_setup(char **cmdline_p)
353 screen_info = sn_screen_info; 354 screen_info = sn_screen_info;
354 355
355 sn_timer_init(); 356 sn_timer_init();
357
358 /*
359 * set pm_power_off to a SAL call to allow
360 * sn machines to power off. The SAL call can be replaced
361 * by an ACPI interface call when ACPI is fully implemented
362 * for sn.
363 */
364 pm_power_off = ia64_sn_power_down;
356} 365}
357 366
358/** 367/**
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index 197356460ee1..833e700fdac9 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -28,6 +28,7 @@
28#include <linux/vmalloc.h> 28#include <linux/vmalloc.h>
29#include <linux/seq_file.h> 29#include <linux/seq_file.h>
30#include <linux/miscdevice.h> 30#include <linux/miscdevice.h>
31#include <linux/utsname.h>
31#include <linux/cpumask.h> 32#include <linux/cpumask.h>
32#include <linux/smp_lock.h> 33#include <linux/smp_lock.h>
33#include <linux/nodemask.h> 34#include <linux/nodemask.h>
@@ -43,6 +44,7 @@
43#include <asm/sn/module.h> 44#include <asm/sn/module.h>
44#include <asm/sn/geo.h> 45#include <asm/sn/geo.h>
45#include <asm/sn/sn2/sn_hwperf.h> 46#include <asm/sn/sn2/sn_hwperf.h>
47#include <asm/sn/addrs.h>
46 48
47static void *sn_hwperf_salheap = NULL; 49static void *sn_hwperf_salheap = NULL;
48static int sn_hwperf_obj_cnt = 0; 50static int sn_hwperf_obj_cnt = 0;
@@ -81,26 +83,45 @@ out:
81 return e; 83 return e;
82} 84}
83 85
86static int sn_hwperf_location_to_bpos(char *location,
87 int *rack, int *bay, int *slot, int *slab)
88{
89 char type;
90
91 /* first scan for an old style geoid string */
92 if (sscanf(location, "%03d%c%02d#%d",
93 rack, &type, bay, slab) == 4)
94 *slot = 0;
95 else /* scan for a new bladed geoid string */
96 if (sscanf(location, "%03d%c%02d^%02d#%d",
97 rack, &type, bay, slot, slab) != 5)
98 return -1;
99 /* success */
100 return 0;
101}
102
84static int sn_hwperf_geoid_to_cnode(char *location) 103static int sn_hwperf_geoid_to_cnode(char *location)
85{ 104{
86 int cnode; 105 int cnode;
87 geoid_t geoid; 106 geoid_t geoid;
88 moduleid_t module_id; 107 moduleid_t module_id;
89 char type; 108 int rack, bay, slot, slab;
90 int rack, slot, slab; 109 int this_rack, this_bay, this_slot, this_slab;
91 int this_rack, this_slot, this_slab;
92 110
93 if (sscanf(location, "%03d%c%02d#%d", &rack, &type, &slot, &slab) != 4) 111 if (sn_hwperf_location_to_bpos(location, &rack, &bay, &slot, &slab))
94 return -1; 112 return -1;
95 113
96 for (cnode = 0; cnode < numionodes; cnode++) { 114 for (cnode = 0; cnode < numionodes; cnode++) {
97 geoid = cnodeid_get_geoid(cnode); 115 geoid = cnodeid_get_geoid(cnode);
98 module_id = geo_module(geoid); 116 module_id = geo_module(geoid);
99 this_rack = MODULE_GET_RACK(module_id); 117 this_rack = MODULE_GET_RACK(module_id);
100 this_slot = MODULE_GET_BPOS(module_id); 118 this_bay = MODULE_GET_BPOS(module_id);
119 this_slot = geo_slot(geoid);
101 this_slab = geo_slab(geoid); 120 this_slab = geo_slab(geoid);
102 if (rack == this_rack && slot == this_slot && slab == this_slab) 121 if (rack == this_rack && bay == this_bay &&
122 slot == this_slot && slab == this_slab) {
103 break; 123 break;
124 }
104 } 125 }
105 126
106 return cnode < numionodes ? cnode : -1; 127 return cnode < numionodes ? cnode : -1;
@@ -153,11 +174,36 @@ static const char *sn_hwperf_get_slabname(struct sn_hwperf_object_info *obj,
153 return slabname; 174 return slabname;
154} 175}
155 176
177static void print_pci_topology(struct seq_file *s,
178 struct sn_hwperf_object_info *obj, int *ordinal,
179 u64 rack, u64 bay, u64 slot, u64 slab)
180{
181 char *p1;
182 char *p2;
183 char *pg;
184
185 if (!(pg = (char *)get_zeroed_page(GFP_KERNEL)))
186 return; /* ignore */
187 if (ia64_sn_ioif_get_pci_topology(rack, bay, slot, slab,
188 __pa(pg), PAGE_SIZE) == SN_HWPERF_OP_OK) {
189 for (p1=pg; *p1 && p1 < pg + PAGE_SIZE;) {
190 if (!(p2 = strchr(p1, '\n')))
191 break;
192 *p2 = '\0';
193 seq_printf(s, "pcibus %d %s-%s\n",
194 *ordinal, obj->location, p1);
195 (*ordinal)++;
196 p1 = p2 + 1;
197 }
198 }
199 free_page((unsigned long)pg);
200}
201
156static int sn_topology_show(struct seq_file *s, void *d) 202static int sn_topology_show(struct seq_file *s, void *d)
157{ 203{
158 int sz; 204 int sz;
159 int pt; 205 int pt;
160 int e; 206 int e = 0;
161 int i; 207 int i;
162 int j; 208 int j;
163 const char *slabname; 209 const char *slabname;
@@ -169,11 +215,44 @@ static int sn_topology_show(struct seq_file *s, void *d)
169 struct sn_hwperf_object_info *p; 215 struct sn_hwperf_object_info *p;
170 struct sn_hwperf_object_info *obj = d; /* this object */ 216 struct sn_hwperf_object_info *obj = d; /* this object */
171 struct sn_hwperf_object_info *objs = s->private; /* all objects */ 217 struct sn_hwperf_object_info *objs = s->private; /* all objects */
218 int rack, bay, slot, slab;
219 u8 shubtype;
220 u8 system_size;
221 u8 sharing_size;
222 u8 partid;
223 u8 coher;
224 u8 nasid_shift;
225 u8 region_size;
226 u16 nasid_mask;
227 int nasid_msb;
228 int pci_bus_ordinal = 0;
172 229
173 if (obj == objs) { 230 if (obj == objs) {
174 seq_printf(s, "# sn_topology version 1\n"); 231 seq_printf(s, "# sn_topology version 2\n");
175 seq_printf(s, "# objtype ordinal location partition" 232 seq_printf(s, "# objtype ordinal location partition"
176 " [attribute value [, ...]]\n"); 233 " [attribute value [, ...]]\n");
234
235 if (ia64_sn_get_sn_info(0,
236 &shubtype, &nasid_mask, &nasid_shift, &system_size,
237 &sharing_size, &partid, &coher, &region_size))
238 BUG();
239 for (nasid_msb=63; nasid_msb > 0; nasid_msb--) {
240 if (((u64)nasid_mask << nasid_shift) & (1ULL << nasid_msb))
241 break;
242 }
243 seq_printf(s, "partition %u %s local "
244 "shubtype %s, "
245 "nasid_mask 0x%016lx, "
246 "nasid_bits %d:%d, "
247 "system_size %d, "
248 "sharing_size %d, "
249 "coherency_domain %d, "
250 "region_size %d\n",
251
252 partid, system_utsname.nodename,
253 shubtype ? "shub2" : "shub1",
254 (u64)nasid_mask << nasid_shift, nasid_msb, nasid_shift,
255 system_size, sharing_size, coher, region_size);
177 } 256 }
178 257
179 if (SN_HWPERF_FOREIGN(obj)) { 258 if (SN_HWPERF_FOREIGN(obj)) {
@@ -181,7 +260,7 @@ static int sn_topology_show(struct seq_file *s, void *d)
181 return 0; 260 return 0;
182 } 261 }
183 262
184 for (i = 0; obj->name[i]; i++) { 263 for (i = 0; i < SN_HWPERF_MAXSTRING && obj->name[i]; i++) {
185 if (obj->name[i] == ' ') 264 if (obj->name[i] == ' ')
186 obj->name[i] = '_'; 265 obj->name[i] = '_';
187 } 266 }
@@ -221,6 +300,17 @@ static int sn_topology_show(struct seq_file *s, void *d)
221 seq_putc(s, '\n'); 300 seq_putc(s, '\n');
222 } 301 }
223 } 302 }
303
304 /*
305 * PCI busses attached to this node, if any
306 */
307 if (sn_hwperf_location_to_bpos(obj->location,
308 &rack, &bay, &slot, &slab)) {
309 /* export pci bus info */
310 print_pci_topology(s, obj, &pci_bus_ordinal,
311 rack, bay, slot, slab);
312
313 }
224 } 314 }
225 315
226 if (obj->ports) { 316 if (obj->ports) {
@@ -397,6 +487,9 @@ static int sn_hwperf_map_err(int hwperf_err)
397 break; 487 break;
398 488
399 case SN_HWPERF_OP_BUSY: 489 case SN_HWPERF_OP_BUSY:
490 e = -EBUSY;
491 break;
492
400 case SN_HWPERF_OP_RECONFIGURE: 493 case SN_HWPERF_OP_RECONFIGURE:
401 e = -EAGAIN; 494 e = -EAGAIN;
402 break; 495 break;
@@ -549,6 +642,7 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg)
549 r = sn_hwperf_op_cpu(&op_info); 642 r = sn_hwperf_op_cpu(&op_info);
550 if (r) { 643 if (r) {
551 r = sn_hwperf_map_err(r); 644 r = sn_hwperf_map_err(r);
645 a.v0 = v0;
552 goto error; 646 goto error;
553 } 647 }
554 break; 648 break;
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c
new file mode 100644
index 000000000000..66190d7e492d
--- /dev/null
+++ b/arch/ia64/sn/kernel/tiocx.c
@@ -0,0 +1,548 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#include <linux/module.h>
10#include <linux/kernel.h>
11#include <linux/version.h>
12#include <linux/slab.h>
13#include <linux/spinlock.h>
14#include <linux/proc_fs.h>
15#include <linux/device.h>
16#include <linux/delay.h>
17#include <asm/uaccess.h>
18#include <asm/sn/sn_sal.h>
19#include <asm/sn/addrs.h>
20#include <asm/sn/io.h>
21#include <asm/sn/types.h>
22#include <asm/sn/shubio.h>
23#include <asm/sn/tiocx.h>
24#include "tio.h"
25#include "xtalk/xwidgetdev.h"
26#include "xtalk/hubdev.h"
27
28#define CX_DEV_NONE 0
29#define DEVICE_NAME "tiocx"
30#define WIDGET_ID 0
31#define TIOCX_DEBUG 0
32
33#if TIOCX_DEBUG
34#define DBG(fmt...) printk(KERN_ALERT fmt)
35#else
36#define DBG(fmt...)
37#endif
38
39struct device_attribute dev_attr_cxdev_control;
40
41/**
42 * tiocx_match - Try to match driver id list with device.
43 * @dev: device pointer
44 * @drv: driver pointer
45 *
46 * Returns 1 if match, 0 otherwise.
47 */
48static int tiocx_match(struct device *dev, struct device_driver *drv)
49{
50 struct cx_dev *cx_dev = to_cx_dev(dev);
51 struct cx_drv *cx_drv = to_cx_driver(drv);
52 const struct cx_device_id *ids = cx_drv->id_table;
53
54 if (!ids)
55 return 0;
56
57 while (ids->part_num) {
58 if (ids->part_num == cx_dev->cx_id.part_num)
59 return 1;
60 ids++;
61 }
62 return 0;
63
64}
65
66static int tiocx_hotplug(struct device *dev, char **envp, int num_envp,
67 char *buffer, int buffer_size)
68{
69 return -ENODEV;
70}
71
72static void tiocx_bus_release(struct device *dev)
73{
74 kfree(to_cx_dev(dev));
75}
76
77struct bus_type tiocx_bus_type = {
78 .name = "tiocx",
79 .match = tiocx_match,
80 .hotplug = tiocx_hotplug,
81};
82
83/**
84 * cx_device_match - Find cx_device in the id table.
85 * @ids: id table from driver
86 * @cx_device: part/mfg id for the device
87 *
88 */
89static const struct cx_device_id *cx_device_match(const struct cx_device_id
90 *ids,
91 struct cx_dev *cx_device)
92{
93 /*
94 * NOTES: We may want to check for CX_ANY_ID too.
95 * Do we want to match against nasid too?
96 * CX_DEV_NONE == 0, if the driver tries to register for
97 * part/mfg == 0 we should return no-match (NULL) here.
98 */
99 while (ids->part_num && ids->mfg_num) {
100 if (ids->part_num == cx_device->cx_id.part_num &&
101 ids->mfg_num == cx_device->cx_id.mfg_num)
102 return ids;
103 ids++;
104 }
105
106 return NULL;
107}
108
109/**
110 * cx_device_probe - Look for matching device.
111 * Call driver probe routine if found.
112 * @cx_driver: driver table (cx_drv struct) from driver
113 * @cx_device: part/mfg id for the device
114 */
115static int cx_device_probe(struct device *dev)
116{
117 const struct cx_device_id *id;
118 struct cx_drv *cx_drv = to_cx_driver(dev->driver);
119 struct cx_dev *cx_dev = to_cx_dev(dev);
120 int error = 0;
121
122 if (!cx_dev->driver && cx_drv->probe) {
123 id = cx_device_match(cx_drv->id_table, cx_dev);
124 if (id) {
125 if ((error = cx_drv->probe(cx_dev, id)) < 0)
126 return error;
127 else
128 cx_dev->driver = cx_drv;
129 }
130 }
131
132 return error;
133}
134
135/**
136 * cx_driver_remove - Remove driver from device struct.
137 * @dev: device
138 */
139static int cx_driver_remove(struct device *dev)
140{
141 struct cx_dev *cx_dev = to_cx_dev(dev);
142 struct cx_drv *cx_drv = cx_dev->driver;
143 if (cx_drv->remove)
144 cx_drv->remove(cx_dev);
145 cx_dev->driver = NULL;
146 return 0;
147}
148
149/**
150 * cx_driver_register - Register the driver.
151 * @cx_driver: driver table (cx_drv struct) from driver
152 *
153 * Called from the driver init routine to register a driver.
154 * The cx_drv struct contains the driver name, a pointer to
155 * a table of part/mfg numbers and a pointer to the driver's
156 * probe/attach routine.
157 */
158int cx_driver_register(struct cx_drv *cx_driver)
159{
160 cx_driver->driver.name = cx_driver->name;
161 cx_driver->driver.bus = &tiocx_bus_type;
162 cx_driver->driver.probe = cx_device_probe;
163 cx_driver->driver.remove = cx_driver_remove;
164
165 return driver_register(&cx_driver->driver);
166}
167
168/**
169 * cx_driver_unregister - Unregister the driver.
170 * @cx_driver: driver table (cx_drv struct) from driver
171 */
172int cx_driver_unregister(struct cx_drv *cx_driver)
173{
174 driver_unregister(&cx_driver->driver);
175 return 0;
176}
177
178/**
179 * cx_device_register - Register a device.
180 * @nasid: device's nasid
181 * @part_num: device's part number
182 * @mfg_num: device's manufacturer number
183 * @hubdev: hub info associated with this device
184 *
185 */
186int
187cx_device_register(nasid_t nasid, int part_num, int mfg_num,
188 struct hubdev_info *hubdev)
189{
190 struct cx_dev *cx_dev;
191
192 cx_dev = kcalloc(1, sizeof(struct cx_dev), GFP_KERNEL);
193 DBG("cx_dev= 0x%p\n", cx_dev);
194 if (cx_dev == NULL)
195 return -ENOMEM;
196
197 cx_dev->cx_id.part_num = part_num;
198 cx_dev->cx_id.mfg_num = mfg_num;
199 cx_dev->cx_id.nasid = nasid;
200 cx_dev->hubdev = hubdev;
201
202 cx_dev->dev.parent = NULL;
203 cx_dev->dev.bus = &tiocx_bus_type;
204 cx_dev->dev.release = tiocx_bus_release;
205 snprintf(cx_dev->dev.bus_id, BUS_ID_SIZE, "%d.0x%x",
206 cx_dev->cx_id.nasid, cx_dev->cx_id.part_num);
207 device_register(&cx_dev->dev);
208 get_device(&cx_dev->dev);
209
210 device_create_file(&cx_dev->dev, &dev_attr_cxdev_control);
211
212 return 0;
213}
214
215/**
216 * cx_device_unregister - Unregister a device.
217 * @cx_dev: part/mfg id for the device
218 */
219int cx_device_unregister(struct cx_dev *cx_dev)
220{
221 put_device(&cx_dev->dev);
222 device_unregister(&cx_dev->dev);
223 return 0;
224}
225
226/**
227 * cx_device_reload - Reload the device.
228 * @nasid: device's nasid
229 * @part_num: device's part number
230 * @mfg_num: device's manufacturer number
231 *
232 * Remove the device associated with 'nasid' from device list and then
233 * call device-register with the given part/mfg numbers.
234 */
235static int cx_device_reload(struct cx_dev *cx_dev)
236{
237 device_remove_file(&cx_dev->dev, &dev_attr_cxdev_control);
238 cx_device_unregister(cx_dev);
239 return cx_device_register(cx_dev->cx_id.nasid, cx_dev->cx_id.part_num,
240 cx_dev->cx_id.mfg_num, cx_dev->hubdev);
241}
242
243static inline uint64_t tiocx_intr_alloc(nasid_t nasid, int widget,
244 u64 sn_irq_info,
245 int req_irq, nasid_t req_nasid,
246 int req_slice)
247{
248 struct ia64_sal_retval rv;
249 rv.status = 0;
250 rv.v0 = 0;
251
252 ia64_sal_oemcall_nolock(&rv, SN_SAL_IOIF_INTERRUPT,
253 SAL_INTR_ALLOC, nasid,
254 widget, sn_irq_info, req_irq,
255 req_nasid, req_slice);
256 return rv.status;
257}
258
259static inline void tiocx_intr_free(nasid_t nasid, int widget,
260 struct sn_irq_info *sn_irq_info)
261{
262 struct ia64_sal_retval rv;
263 rv.status = 0;
264 rv.v0 = 0;
265
266 ia64_sal_oemcall_nolock(&rv, SN_SAL_IOIF_INTERRUPT,
267 SAL_INTR_FREE, nasid,
268 widget, sn_irq_info->irq_irq,
269 sn_irq_info->irq_cookie, 0, 0);
270}
271
272struct sn_irq_info *tiocx_irq_alloc(nasid_t nasid, int widget, int irq,
273 nasid_t req_nasid, int slice)
274{
275 struct sn_irq_info *sn_irq_info;
276 int status;
277 int sn_irq_size = sizeof(struct sn_irq_info);
278
279 if ((nasid & 1) == 0)
280 return NULL;
281
282 sn_irq_info = kmalloc(sn_irq_size, GFP_KERNEL);
283 if (sn_irq_info == NULL)
284 return NULL;
285
286 memset(sn_irq_info, 0x0, sn_irq_size);
287
288 status = tiocx_intr_alloc(nasid, widget, __pa(sn_irq_info), irq,
289 req_nasid, slice);
290 if (status) {
291 kfree(sn_irq_info);
292 return NULL;
293 } else {
294 return sn_irq_info;
295 }
296}
297
298void tiocx_irq_free(struct sn_irq_info *sn_irq_info)
299{
300 uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge;
301 nasid_t nasid = NASID_GET(bridge);
302 int widget;
303
304 if (nasid & 1) {
305 widget = TIO_SWIN_WIDGETNUM(bridge);
306 tiocx_intr_free(nasid, widget, sn_irq_info);
307 kfree(sn_irq_info);
308 }
309}
310
311uint64_t
312tiocx_dma_addr(uint64_t addr)
313{
314 return PHYS_TO_TIODMA(addr);
315}
316
317uint64_t
318tiocx_swin_base(int nasid)
319{
320 return TIO_SWIN_BASE(nasid, TIOCX_CORELET);
321}
322
323EXPORT_SYMBOL(cx_driver_register);
324EXPORT_SYMBOL(cx_driver_unregister);
325EXPORT_SYMBOL(cx_device_register);
326EXPORT_SYMBOL(cx_device_unregister);
327EXPORT_SYMBOL(tiocx_irq_alloc);
328EXPORT_SYMBOL(tiocx_irq_free);
329EXPORT_SYMBOL(tiocx_bus_type);
330EXPORT_SYMBOL(tiocx_dma_addr);
331EXPORT_SYMBOL(tiocx_swin_base);
332
333static uint64_t tiocx_get_hubdev_info(u64 handle, u64 address)
334{
335
336 struct ia64_sal_retval ret_stuff;
337 ret_stuff.status = 0;
338 ret_stuff.v0 = 0;
339
340 ia64_sal_oemcall_nolock(&ret_stuff,
341 SN_SAL_IOIF_GET_HUBDEV_INFO,
342 handle, address, 0, 0, 0, 0, 0);
343 return ret_stuff.v0;
344}
345
346static void tio_conveyor_set(nasid_t nasid, int enable_flag)
347{
348 uint64_t ice_frz;
349 uint64_t disable_cb = (1ull << 61);
350
351 if (!(nasid & 1))
352 return;
353
354 ice_frz = REMOTE_HUB_L(nasid, TIO_ICE_FRZ_CFG);
355 if (enable_flag) {
356 if (!(ice_frz & disable_cb)) /* already enabled */
357 return;
358 ice_frz &= ~disable_cb;
359 } else {
360 if (ice_frz & disable_cb) /* already disabled */
361 return;
362 ice_frz |= disable_cb;
363 }
364 DBG(KERN_ALERT "TIO_ICE_FRZ_CFG= 0x%lx\n", ice_frz);
365 REMOTE_HUB_S(nasid, TIO_ICE_FRZ_CFG, ice_frz);
366}
367
368#define tio_conveyor_enable(nasid) tio_conveyor_set(nasid, 1)
369#define tio_conveyor_disable(nasid) tio_conveyor_set(nasid, 0)
370
371static void tio_corelet_reset(nasid_t nasid, int corelet)
372{
373 if (!(nasid & 1))
374 return;
375
376 REMOTE_HUB_S(nasid, TIO_ICE_PMI_TX_CFG, 1 << corelet);
377 udelay(2000);
378 REMOTE_HUB_S(nasid, TIO_ICE_PMI_TX_CFG, 0);
379 udelay(2000);
380}
381
382static int fpga_attached(nasid_t nasid)
383{
384 uint64_t cx_credits;
385
386 cx_credits = REMOTE_HUB_L(nasid, TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3);
387 cx_credits &= TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3_CREDIT_CNT_MASK;
388 DBG("cx_credits= 0x%lx\n", cx_credits);
389
390 return (cx_credits == 0xf) ? 1 : 0;
391}
392
393static int tiocx_reload(struct cx_dev *cx_dev)
394{
395 int part_num = CX_DEV_NONE;
396 int mfg_num = CX_DEV_NONE;
397 nasid_t nasid = cx_dev->cx_id.nasid;
398
399 if (fpga_attached(nasid)) {
400 uint64_t cx_id;
401
402 cx_id =
403 *(volatile int32_t *)(TIO_SWIN_BASE(nasid, TIOCX_CORELET) +
404 WIDGET_ID);
405 part_num = XWIDGET_PART_NUM(cx_id);
406 mfg_num = XWIDGET_MFG_NUM(cx_id);
407 DBG("part= 0x%x, mfg= 0x%x\n", part_num, mfg_num);
408 /* just ignore it if it's a CE */
409 if (part_num == TIO_CE_ASIC_PARTNUM)
410 return 0;
411 }
412
413 cx_dev->cx_id.part_num = part_num;
414 cx_dev->cx_id.mfg_num = mfg_num;
415
416 /*
417 * Delete old device and register the new one. It's ok if
418 * part_num/mfg_num == CX_DEV_NONE. We want to register
419 * devices in the table even if a bitstream isn't loaded.
420 * That allows use to see that a bitstream isn't loaded via
421 * TIOCX_IOCTL_DEV_LIST.
422 */
423 return cx_device_reload(cx_dev);
424}
425
426static ssize_t show_cxdev_control(struct device *dev, char *buf)
427{
428 struct cx_dev *cx_dev = to_cx_dev(dev);
429
430 return sprintf(buf, "0x%x 0x%x 0x%x\n",
431 cx_dev->cx_id.nasid,
432 cx_dev->cx_id.part_num, cx_dev->cx_id.mfg_num);
433}
434
435static ssize_t store_cxdev_control(struct device *dev, const char *buf,
436 size_t count)
437{
438 int n;
439 struct cx_dev *cx_dev = to_cx_dev(dev);
440
441 if (!capable(CAP_SYS_ADMIN))
442 return -EPERM;
443
444 if (count <= 0)
445 return 0;
446
447 n = simple_strtoul(buf, NULL, 0);
448
449 switch (n) {
450 case 1:
451 tiocx_reload(cx_dev);
452 break;
453 case 3:
454 tio_corelet_reset(cx_dev->cx_id.nasid, TIOCX_CORELET);
455 break;
456 default:
457 break;
458 }
459
460 return count;
461}
462
463DEVICE_ATTR(cxdev_control, 0644, show_cxdev_control, store_cxdev_control);
464
465static int __init tiocx_init(void)
466{
467 cnodeid_t cnodeid;
468 int found_tiocx_device = 0;
469
470 bus_register(&tiocx_bus_type);
471
472 for (cnodeid = 0; cnodeid < MAX_COMPACT_NODES; cnodeid++) {
473 nasid_t nasid;
474
475 if ((nasid = cnodeid_to_nasid(cnodeid)) < 0)
476 break; /* No more nasids .. bail out of loop */
477
478 if (nasid & 0x1) { /* TIO's are always odd */
479 struct hubdev_info *hubdev;
480 uint64_t status;
481 struct xwidget_info *widgetp;
482
483 DBG("Found TIO at nasid 0x%x\n", nasid);
484
485 hubdev =
486 (struct hubdev_info *)(NODEPDA(cnodeid)->pdinfo);
487 status =
488 tiocx_get_hubdev_info(nasid,
489 (uint64_t) __pa(hubdev));
490 if (status)
491 continue;
492
493 widgetp = &hubdev->hdi_xwidget_info[TIOCX_CORELET];
494
495 /* The CE hangs off of the CX port but is not an FPGA */
496 if (widgetp->xwi_hwid.part_num == TIO_CE_ASIC_PARTNUM)
497 continue;
498
499 tio_corelet_reset(nasid, TIOCX_CORELET);
500 tio_conveyor_enable(nasid);
501
502 if (cx_device_register
503 (nasid, widgetp->xwi_hwid.part_num,
504 widgetp->xwi_hwid.mfg_num, hubdev) < 0)
505 return -ENXIO;
506 else
507 found_tiocx_device++;
508 }
509 }
510
511 /* It's ok if we find zero devices. */
512 DBG("found_tiocx_device= %d\n", found_tiocx_device);
513
514 return 0;
515}
516
517static void __exit tiocx_exit(void)
518{
519 struct device *dev;
520 struct device *tdev;
521
522 DBG("tiocx_exit\n");
523
524 /*
525 * Unregister devices.
526 */
527 list_for_each_entry_safe(dev, tdev, &tiocx_bus_type.devices.list,
528 bus_list) {
529 if (dev) {
530 struct cx_dev *cx_dev = to_cx_dev(dev);
531 device_remove_file(dev, &dev_attr_cxdev_control);
532 cx_device_unregister(cx_dev);
533 }
534 }
535
536 bus_unregister(&tiocx_bus_type);
537}
538
539module_init(tiocx_init);
540module_exit(tiocx_exit);
541
542/************************************************************************
543 * Module licensing and description
544 ************************************************************************/
545MODULE_LICENSE("GPL");
546MODULE_AUTHOR("Bruce Losure <blosure@sgi.com>");
547MODULE_DESCRIPTION("TIOCX module");
548MODULE_SUPPORTED_DEVICE(DEVICE_NAME);
diff --git a/arch/ia64/sn/pci/Makefile b/arch/ia64/sn/pci/Makefile
index b5dca0097a8e..2f915bce25f9 100644
--- a/arch/ia64/sn/pci/Makefile
+++ b/arch/ia64/sn/pci/Makefile
@@ -7,4 +7,4 @@
7# 7#
8# Makefile for the sn pci general routines. 8# Makefile for the sn pci general routines.
9 9
10obj-y := pci_dma.o pcibr/ 10obj-y := pci_dma.o tioca_provider.o pcibr/
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index f680824f819d..5da9bdbde7cb 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -12,9 +12,8 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <asm/dma.h> 13#include <asm/dma.h>
14#include <asm/sn/sn_sal.h> 14#include <asm/sn/sn_sal.h>
15#include "pci/pcibus_provider_defs.h" 15#include <asm/sn/pcibus_provider_defs.h>
16#include "pci/pcidev.h" 16#include <asm/sn/pcidev.h>
17#include "pci/pcibr_provider.h"
18 17
19#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) 18#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
20#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG)) 19#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
@@ -79,7 +78,8 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size,
79{ 78{
80 void *cpuaddr; 79 void *cpuaddr;
81 unsigned long phys_addr; 80 unsigned long phys_addr;
82 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); 81 struct pci_dev *pdev = to_pci_dev(dev);
82 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
83 83
84 BUG_ON(dev->bus != &pci_bus_type); 84 BUG_ON(dev->bus != &pci_bus_type);
85 85
@@ -102,8 +102,7 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size,
102 * resources. 102 * resources.
103 */ 103 */
104 104
105 *dma_handle = pcibr_dma_map(pcidev_info, phys_addr, size, 105 *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size);
106 SN_PCIDMA_CONSISTENT);
107 if (!*dma_handle) { 106 if (!*dma_handle) {
108 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); 107 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
109 free_pages((unsigned long)cpuaddr, get_order(size)); 108 free_pages((unsigned long)cpuaddr, get_order(size));
@@ -127,11 +126,12 @@ EXPORT_SYMBOL(sn_dma_alloc_coherent);
127void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 126void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
128 dma_addr_t dma_handle) 127 dma_addr_t dma_handle)
129{ 128{
130 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); 129 struct pci_dev *pdev = to_pci_dev(dev);
130 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
131 131
132 BUG_ON(dev->bus != &pci_bus_type); 132 BUG_ON(dev->bus != &pci_bus_type);
133 133
134 pcibr_dma_unmap(pcidev_info, dma_handle, 0); 134 provider->dma_unmap(pdev, dma_handle, 0);
135 free_pages((unsigned long)cpu_addr, get_order(size)); 135 free_pages((unsigned long)cpu_addr, get_order(size));
136} 136}
137EXPORT_SYMBOL(sn_dma_free_coherent); 137EXPORT_SYMBOL(sn_dma_free_coherent);
@@ -159,12 +159,13 @@ dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size,
159{ 159{
160 dma_addr_t dma_addr; 160 dma_addr_t dma_addr;
161 unsigned long phys_addr; 161 unsigned long phys_addr;
162 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); 162 struct pci_dev *pdev = to_pci_dev(dev);
163 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
163 164
164 BUG_ON(dev->bus != &pci_bus_type); 165 BUG_ON(dev->bus != &pci_bus_type);
165 166
166 phys_addr = __pa(cpu_addr); 167 phys_addr = __pa(cpu_addr);
167 dma_addr = pcibr_dma_map(pcidev_info, phys_addr, size, 0); 168 dma_addr = provider->dma_map(pdev, phys_addr, size);
168 if (!dma_addr) { 169 if (!dma_addr) {
169 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); 170 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
170 return 0; 171 return 0;
@@ -187,10 +188,12 @@ EXPORT_SYMBOL(sn_dma_map_single);
187void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 188void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
188 int direction) 189 int direction)
189{ 190{
190 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); 191 struct pci_dev *pdev = to_pci_dev(dev);
192 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
191 193
192 BUG_ON(dev->bus != &pci_bus_type); 194 BUG_ON(dev->bus != &pci_bus_type);
193 pcibr_dma_unmap(pcidev_info, dma_addr, direction); 195
196 provider->dma_unmap(pdev, dma_addr, direction);
194} 197}
195EXPORT_SYMBOL(sn_dma_unmap_single); 198EXPORT_SYMBOL(sn_dma_unmap_single);
196 199
@@ -207,12 +210,13 @@ void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
207 int nhwentries, int direction) 210 int nhwentries, int direction)
208{ 211{
209 int i; 212 int i;
210 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); 213 struct pci_dev *pdev = to_pci_dev(dev);
214 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
211 215
212 BUG_ON(dev->bus != &pci_bus_type); 216 BUG_ON(dev->bus != &pci_bus_type);
213 217
214 for (i = 0; i < nhwentries; i++, sg++) { 218 for (i = 0; i < nhwentries; i++, sg++) {
215 pcibr_dma_unmap(pcidev_info, sg->dma_address, direction); 219 provider->dma_unmap(pdev, sg->dma_address, direction);
216 sg->dma_address = (dma_addr_t) NULL; 220 sg->dma_address = (dma_addr_t) NULL;
217 sg->dma_length = 0; 221 sg->dma_length = 0;
218 } 222 }
@@ -233,7 +237,8 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
233{ 237{
234 unsigned long phys_addr; 238 unsigned long phys_addr;
235 struct scatterlist *saved_sg = sg; 239 struct scatterlist *saved_sg = sg;
236 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); 240 struct pci_dev *pdev = to_pci_dev(dev);
241 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
237 int i; 242 int i;
238 243
239 BUG_ON(dev->bus != &pci_bus_type); 244 BUG_ON(dev->bus != &pci_bus_type);
@@ -243,8 +248,8 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
243 */ 248 */
244 for (i = 0; i < nhwentries; i++, sg++) { 249 for (i = 0; i < nhwentries; i++, sg++) {
245 phys_addr = SG_ENT_PHYS_ADDRESS(sg); 250 phys_addr = SG_ENT_PHYS_ADDRESS(sg);
246 sg->dma_address = pcibr_dma_map(pcidev_info, phys_addr, 251 sg->dma_address = provider->dma_map(pdev,
247 sg->length, 0); 252 phys_addr, sg->length);
248 253
249 if (!sg->dma_address) { 254 if (!sg->dma_address) {
250 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); 255 printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_ate.c b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
index 9d6854666f9b..0e47bce85f2d 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_ate.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
@@ -8,8 +8,8 @@
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10#include <asm/sn/sn_sal.h> 10#include <asm/sn/sn_sal.h>
11#include "pci/pcibus_provider_defs.h" 11#include <asm/sn/pcibus_provider_defs.h>
12#include "pci/pcidev.h" 12#include <asm/sn/pcidev.h>
13#include "pci/pcibr_provider.h" 13#include "pci/pcibr_provider.h"
14 14
15int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */ 15int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
index b1d66ac065c8..c90685985d81 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
@@ -12,8 +12,8 @@
12#include <asm/sn/geo.h> 12#include <asm/sn/geo.h>
13#include "xtalk/xwidgetdev.h" 13#include "xtalk/xwidgetdev.h"
14#include "xtalk/hubdev.h" 14#include "xtalk/hubdev.h"
15#include "pci/pcibus_provider_defs.h" 15#include <asm/sn/pcibus_provider_defs.h>
16#include "pci/pcidev.h" 16#include <asm/sn/pcidev.h>
17#include "pci/tiocp.h" 17#include "pci/tiocp.h"
18#include "pci/pic.h" 18#include "pci/pic.h"
19#include "pci/pcibr_provider.h" 19#include "pci/pcibr_provider.h"
@@ -40,7 +40,7 @@ extern int sn_ioif_inited;
40 * we do not have to allocate entries in the PMU. 40 * we do not have to allocate entries in the PMU.
41 */ 41 */
42 42
43static uint64_t 43static dma_addr_t
44pcibr_dmamap_ate32(struct pcidev_info *info, 44pcibr_dmamap_ate32(struct pcidev_info *info,
45 uint64_t paddr, size_t req_size, uint64_t flags) 45 uint64_t paddr, size_t req_size, uint64_t flags)
46{ 46{
@@ -109,7 +109,7 @@ pcibr_dmamap_ate32(struct pcidev_info *info,
109 return pci_addr; 109 return pci_addr;
110} 110}
111 111
112static uint64_t 112static dma_addr_t
113pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr, 113pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr,
114 uint64_t dma_attributes) 114 uint64_t dma_attributes)
115{ 115{
@@ -141,7 +141,7 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr,
141 141
142} 142}
143 143
144static uint64_t 144static dma_addr_t
145pcibr_dmatrans_direct32(struct pcidev_info * info, 145pcibr_dmatrans_direct32(struct pcidev_info * info,
146 uint64_t paddr, size_t req_size, uint64_t flags) 146 uint64_t paddr, size_t req_size, uint64_t flags)
147{ 147{
@@ -180,11 +180,11 @@ pcibr_dmatrans_direct32(struct pcidev_info * info,
180 * DMA mappings for Direct 64 and 32 do not have any DMA maps. 180 * DMA mappings for Direct 64 and 32 do not have any DMA maps.
181 */ 181 */
182void 182void
183pcibr_dma_unmap(struct pcidev_info *pcidev_info, dma_addr_t dma_handle, 183pcibr_dma_unmap(struct pci_dev *hwdev, dma_addr_t dma_handle, int direction)
184 int direction)
185{ 184{
186 struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info-> 185 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
187 pdi_pcibus_info; 186 struct pcibus_info *pcibus_info =
187 (struct pcibus_info *)pcidev_info->pdi_pcibus_info;
188 188
189 if (IS_PCI32_MAPPED(dma_handle)) { 189 if (IS_PCI32_MAPPED(dma_handle)) {
190 int ate_index; 190 int ate_index;
@@ -316,64 +316,63 @@ void sn_dma_flush(uint64_t addr)
316} 316}
317 317
318/* 318/*
319 * Wrapper DMA interface. Called from pci_dma.c routines. 319 * DMA interfaces. Called from pci_dma.c routines.
320 */ 320 */
321 321
322uint64_t 322dma_addr_t
323pcibr_dma_map(struct pcidev_info * pcidev_info, unsigned long phys_addr, 323pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size)
324 size_t size, unsigned int flags)
325{ 324{
326 dma_addr_t dma_handle; 325 dma_addr_t dma_handle;
327 struct pci_dev *pcidev = pcidev_info->pdi_linux_pcidev; 326 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
328
329 if (flags & SN_PCIDMA_CONSISTENT) {
330 /* sn_pci_alloc_consistent interfaces */
331 if (pcidev->dev.coherent_dma_mask == ~0UL) {
332 dma_handle =
333 pcibr_dmatrans_direct64(pcidev_info, phys_addr,
334 PCI64_ATTR_BAR);
335 } else {
336 dma_handle =
337 (dma_addr_t) pcibr_dmamap_ate32(pcidev_info,
338 phys_addr, size,
339 PCI32_ATE_BAR);
340 }
341 } else {
342 /* map_sg/map_single interfaces */
343 327
344 /* SN cannot support DMA addresses smaller than 32 bits. */ 328 /* SN cannot support DMA addresses smaller than 32 bits. */
345 if (pcidev->dma_mask < 0x7fffffff) { 329 if (hwdev->dma_mask < 0x7fffffff) {
346 return 0; 330 return 0;
347 } 331 }
348 332
349 if (pcidev->dma_mask == ~0UL) { 333 if (hwdev->dma_mask == ~0UL) {
334 /*
335 * Handle the most common case: 64 bit cards. This
336 * call should always succeed.
337 */
338
339 dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
340 PCI64_ATTR_PREF);
341 } else {
342 /* Handle 32-63 bit cards via direct mapping */
343 dma_handle = pcibr_dmatrans_direct32(pcidev_info, phys_addr,
344 size, 0);
345 if (!dma_handle) {
350 /* 346 /*
351 * Handle the most common case: 64 bit cards. This 347 * It is a 32 bit card and we cannot do direct mapping,
352 * call should always succeed. 348 * so we use an ATE.
353 */ 349 */
354 350
355 dma_handle = 351 dma_handle = pcibr_dmamap_ate32(pcidev_info, phys_addr,
356 pcibr_dmatrans_direct64(pcidev_info, phys_addr, 352 size, PCI32_ATE_PREF);
357 PCI64_ATTR_PREF);
358 } else {
359 /* Handle 32-63 bit cards via direct mapping */
360 dma_handle =
361 pcibr_dmatrans_direct32(pcidev_info, phys_addr,
362 size, 0);
363 if (!dma_handle) {
364 /*
365 * It is a 32 bit card and we cannot do direct mapping,
366 * so we use an ATE.
367 */
368
369 dma_handle =
370 pcibr_dmamap_ate32(pcidev_info, phys_addr,
371 size, PCI32_ATE_PREF);
372 }
373 } 353 }
374 } 354 }
375 355
376 return dma_handle; 356 return dma_handle;
377} 357}
378 358
359dma_addr_t
360pcibr_dma_map_consistent(struct pci_dev * hwdev, unsigned long phys_addr,
361 size_t size)
362{
363 dma_addr_t dma_handle;
364 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
365
366 if (hwdev->dev.coherent_dma_mask == ~0UL) {
367 dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
368 PCI64_ATTR_BAR);
369 } else {
370 dma_handle = (dma_addr_t) pcibr_dmamap_ate32(pcidev_info,
371 phys_addr, size,
372 PCI32_ATE_BAR);
373 }
374
375 return dma_handle;
376}
377
379EXPORT_SYMBOL(sn_dma_flush); 378EXPORT_SYMBOL(sn_dma_flush);
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
index 92bd278cf7ff..3893999d23d8 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
@@ -13,8 +13,8 @@
13#include "xtalk/xwidgetdev.h" 13#include "xtalk/xwidgetdev.h"
14#include <asm/sn/geo.h> 14#include <asm/sn/geo.h>
15#include "xtalk/hubdev.h" 15#include "xtalk/hubdev.h"
16#include "pci/pcibus_provider_defs.h" 16#include <asm/sn/pcibus_provider_defs.h>
17#include "pci/pcidev.h" 17#include <asm/sn/pcidev.h>
18#include "pci/pcibr_provider.h" 18#include "pci/pcibr_provider.h"
19#include <asm/sn/addrs.h> 19#include <asm/sn/addrs.h>
20 20
@@ -168,3 +168,23 @@ void pcibr_change_devices_irq(struct sn_irq_info *sn_irq_info)
168 pcibr_force_interrupt(sn_irq_info); 168 pcibr_force_interrupt(sn_irq_info);
169 } 169 }
170} 170}
171
172/*
173 * Provider entries for PIC/CP
174 */
175
176struct sn_pcibus_provider pcibr_provider = {
177 .dma_map = pcibr_dma_map,
178 .dma_map_consistent = pcibr_dma_map_consistent,
179 .dma_unmap = pcibr_dma_unmap,
180 .bus_fixup = pcibr_bus_fixup,
181};
182
183int
184pcibr_init_provider(void)
185{
186 sn_pci_provider[PCIIO_ASIC_TYPE_PIC] = &pcibr_provider;
187 sn_pci_provider[PCIIO_ASIC_TYPE_TIOCP] = &pcibr_provider;
188
189 return 0;
190}
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_reg.c b/arch/ia64/sn/pci/pcibr/pcibr_reg.c
index 74a74a7d2a13..865c11c3b50a 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_reg.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_reg.c
@@ -8,8 +8,8 @@
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/interrupt.h> 10#include <linux/interrupt.h>
11#include "pci/pcibus_provider_defs.h" 11#include <asm/sn/pcibus_provider_defs.h>
12#include "pci/pcidev.h" 12#include <asm/sn/pcidev.h>
13#include "pci/tiocp.h" 13#include "pci/tiocp.h"
14#include "pci/pic.h" 14#include "pci/pic.h"
15#include "pci/pcibr_provider.h" 15#include "pci/pcibr_provider.h"
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
new file mode 100644
index 000000000000..54a0dd447e76
--- /dev/null
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -0,0 +1,668 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003-2005 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9#include <linux/types.h>
10#include <linux/interrupt.h>
11#include <linux/pci.h>
12#include <asm/sn/sn_sal.h>
13#include <asm/sn/addrs.h>
14#include <asm/sn/pcidev.h>
15#include <asm/sn/pcibus_provider_defs.h>
16#include <asm/sn/tioca_provider.h>
17
18uint32_t tioca_gart_found;
19EXPORT_SYMBOL(tioca_gart_found); /* used by agp-sgi */
20
21LIST_HEAD(tioca_list);
22EXPORT_SYMBOL(tioca_list); /* used by agp-sgi */
23
24static int tioca_gart_init(struct tioca_kernel *);
25
26/**
27 * tioca_gart_init - Initialize SGI TIOCA GART
28 * @tioca_common: ptr to common prom/kernel struct identifying the
29 *
30 * If the indicated tioca has devices present, initialize its associated
31 * GART MMR's and kernel memory.
32 */
33static int
34tioca_gart_init(struct tioca_kernel *tioca_kern)
35{
36 uint64_t ap_reg;
37 uint64_t offset;
38 struct page *tmp;
39 struct tioca_common *tioca_common;
40 volatile struct tioca *ca_base;
41
42 tioca_common = tioca_kern->ca_common;
43 ca_base = (struct tioca *)tioca_common->ca_common.bs_base;
44
45 if (list_empty(tioca_kern->ca_devices))
46 return 0;
47
48 ap_reg = 0;
49
50 /*
51 * Validate aperature size
52 */
53
54 switch (CA_APERATURE_SIZE >> 20) {
55 case 4:
56 ap_reg |= (0x3ff << CA_GART_AP_SIZE_SHFT); /* 4MB */
57 break;
58 case 8:
59 ap_reg |= (0x3fe << CA_GART_AP_SIZE_SHFT); /* 8MB */
60 break;
61 case 16:
62 ap_reg |= (0x3fc << CA_GART_AP_SIZE_SHFT); /* 16MB */
63 break;
64 case 32:
65 ap_reg |= (0x3f8 << CA_GART_AP_SIZE_SHFT); /* 32 MB */
66 break;
67 case 64:
68 ap_reg |= (0x3f0 << CA_GART_AP_SIZE_SHFT); /* 64 MB */
69 break;
70 case 128:
71 ap_reg |= (0x3e0 << CA_GART_AP_SIZE_SHFT); /* 128 MB */
72 break;
73 case 256:
74 ap_reg |= (0x3c0 << CA_GART_AP_SIZE_SHFT); /* 256 MB */
75 break;
76 case 512:
77 ap_reg |= (0x380 << CA_GART_AP_SIZE_SHFT); /* 512 MB */
78 break;
79 case 1024:
80 ap_reg |= (0x300 << CA_GART_AP_SIZE_SHFT); /* 1GB */
81 break;
82 case 2048:
83 ap_reg |= (0x200 << CA_GART_AP_SIZE_SHFT); /* 2GB */
84 break;
85 case 4096:
86 ap_reg |= (0x000 << CA_GART_AP_SIZE_SHFT); /* 4 GB */
87 break;
88 default:
89 printk(KERN_ERR "%s: Invalid CA_APERATURE_SIZE "
90 "0x%lx\n", __FUNCTION__, (ulong) CA_APERATURE_SIZE);
91 return -1;
92 }
93
94 /*
95 * Set up other aperature parameters
96 */
97
98 if (PAGE_SIZE >= 16384) {
99 tioca_kern->ca_ap_pagesize = 16384;
100 ap_reg |= CA_GART_PAGE_SIZE;
101 } else {
102 tioca_kern->ca_ap_pagesize = 4096;
103 }
104
105 tioca_kern->ca_ap_size = CA_APERATURE_SIZE;
106 tioca_kern->ca_ap_bus_base = CA_APERATURE_BASE;
107 tioca_kern->ca_gart_entries =
108 tioca_kern->ca_ap_size / tioca_kern->ca_ap_pagesize;
109
110 ap_reg |= (CA_GART_AP_ENB_AGP | CA_GART_AP_ENB_PCI);
111 ap_reg |= tioca_kern->ca_ap_bus_base;
112
113 /*
114 * Allocate and set up the GART
115 */
116
117 tioca_kern->ca_gart_size = tioca_kern->ca_gart_entries * sizeof(u64);
118 tmp =
119 alloc_pages_node(tioca_kern->ca_closest_node,
120 GFP_KERNEL | __GFP_ZERO,
121 get_order(tioca_kern->ca_gart_size));
122
123 if (!tmp) {
124 printk(KERN_ERR "%s: Could not allocate "
125 "%lu bytes (order %d) for GART\n",
126 __FUNCTION__,
127 tioca_kern->ca_gart_size,
128 get_order(tioca_kern->ca_gart_size));
129 return -ENOMEM;
130 }
131
132 tioca_kern->ca_gart = page_address(tmp);
133 tioca_kern->ca_gart_coretalk_addr =
134 PHYS_TO_TIODMA(virt_to_phys(tioca_kern->ca_gart));
135
136 /*
137 * Compute PCI/AGP convenience fields
138 */
139
140 offset = CA_PCI32_MAPPED_BASE - CA_APERATURE_BASE;
141 tioca_kern->ca_pciap_base = CA_PCI32_MAPPED_BASE;
142 tioca_kern->ca_pciap_size = CA_PCI32_MAPPED_SIZE;
143 tioca_kern->ca_pcigart_start = offset / tioca_kern->ca_ap_pagesize;
144 tioca_kern->ca_pcigart_base =
145 tioca_kern->ca_gart_coretalk_addr + offset;
146 tioca_kern->ca_pcigart =
147 &tioca_kern->ca_gart[tioca_kern->ca_pcigart_start];
148 tioca_kern->ca_pcigart_entries =
149 tioca_kern->ca_pciap_size / tioca_kern->ca_ap_pagesize;
150 tioca_kern->ca_pcigart_pagemap =
151 kcalloc(1, tioca_kern->ca_pcigart_entries / 8, GFP_KERNEL);
152 if (!tioca_kern->ca_pcigart_pagemap) {
153 free_pages((unsigned long)tioca_kern->ca_gart,
154 get_order(tioca_kern->ca_gart_size));
155 return -1;
156 }
157
158 offset = CA_AGP_MAPPED_BASE - CA_APERATURE_BASE;
159 tioca_kern->ca_gfxap_base = CA_AGP_MAPPED_BASE;
160 tioca_kern->ca_gfxap_size = CA_AGP_MAPPED_SIZE;
161 tioca_kern->ca_gfxgart_start = offset / tioca_kern->ca_ap_pagesize;
162 tioca_kern->ca_gfxgart_base =
163 tioca_kern->ca_gart_coretalk_addr + offset;
164 tioca_kern->ca_gfxgart =
165 &tioca_kern->ca_gart[tioca_kern->ca_gfxgart_start];
166 tioca_kern->ca_gfxgart_entries =
167 tioca_kern->ca_gfxap_size / tioca_kern->ca_ap_pagesize;
168
169 /*
170 * various control settings:
171 * use agp op-combining
172 * use GET semantics to fetch memory
173 * participate in coherency domain
174 * DISABLE GART PREFETCHING due to hw bug tracked in SGI PV930029
175 */
176
177 ca_base->ca_control1 |= CA_AGPDMA_OP_ENB_COMBDELAY; /* PV895469 ? */
178 ca_base->ca_control2 &= ~(CA_GART_MEM_PARAM);
179 ca_base->ca_control2 |= (0x2ull << CA_GART_MEM_PARAM_SHFT);
180 tioca_kern->ca_gart_iscoherent = 1;
181 ca_base->ca_control2 &=
182 ~(CA_GART_WR_PREFETCH_ENB | CA_GART_RD_PREFETCH_ENB);
183
184 /*
185 * Unmask GART fetch error interrupts. Clear residual errors first.
186 */
187
188 ca_base->ca_int_status_alias = CA_GART_FETCH_ERR;
189 ca_base->ca_mult_error_alias = CA_GART_FETCH_ERR;
190 ca_base->ca_int_mask &= ~CA_GART_FETCH_ERR;
191
192 /*
193 * Program the aperature and gart registers in TIOCA
194 */
195
196 ca_base->ca_gart_aperature = ap_reg;
197 ca_base->ca_gart_ptr_table = tioca_kern->ca_gart_coretalk_addr | 1;
198
199 return 0;
200}
201
202/**
203 * tioca_fastwrite_enable - enable AGP FW for a tioca and its functions
204 * @tioca_kernel: structure representing the CA
205 *
206 * Given a CA, scan all attached functions making sure they all support
207 * FastWrite. If so, enable FastWrite for all functions and the CA itself.
208 */
209
210void
211tioca_fastwrite_enable(struct tioca_kernel *tioca_kern)
212{
213 int cap_ptr;
214 uint64_t ca_control1;
215 uint32_t reg;
216 struct tioca *tioca_base;
217 struct pci_dev *pdev;
218 struct tioca_common *common;
219
220 common = tioca_kern->ca_common;
221
222 /*
223 * Scan all vga controllers on this bus making sure they all
224 * suport FW. If not, return.
225 */
226
227 list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) {
228 if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8))
229 continue;
230
231 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
232 if (!cap_ptr)
233 return; /* no AGP CAP means no FW */
234
235 pci_read_config_dword(pdev, cap_ptr + PCI_AGP_STATUS, &reg);
236 if (!(reg & PCI_AGP_STATUS_FW))
237 return; /* function doesn't support FW */
238 }
239
240 /*
241 * Set fw for all vga fn's
242 */
243
244 list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) {
245 if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8))
246 continue;
247
248 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
249 pci_read_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, &reg);
250 reg |= PCI_AGP_COMMAND_FW;
251 pci_write_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, reg);
252 }
253
254 /*
255 * Set ca's fw to match
256 */
257
258 tioca_base = (struct tioca *)common->ca_common.bs_base;
259 ca_control1 = tioca_base->ca_control1;
260 ca_control1 |= CA_AGP_FW_ENABLE;
261 tioca_base->ca_control1 = ca_control1;
262}
263
264EXPORT_SYMBOL(tioca_fastwrite_enable); /* used by agp-sgi */
265
266/**
267 * tioca_dma_d64 - create a DMA mapping using 64-bit direct mode
268 * @paddr: system physical address
269 *
270 * Map @paddr into 64-bit CA bus space. No device context is necessary.
271 * Bits 53:0 come from the coretalk address. We just need to mask in the
272 * following optional bits of the 64-bit pci address:
273 *
274 * 63:60 - Coretalk Packet Type - 0x1 for Mem Get/Put (coherent)
275 * 0x2 for PIO (non-coherent)
276 * We will always use 0x1
277 * 55:55 - Swap bytes Currently unused
278 */
279static uint64_t
280tioca_dma_d64(unsigned long paddr)
281{
282 dma_addr_t bus_addr;
283
284 bus_addr = PHYS_TO_TIODMA(paddr);
285
286 BUG_ON(!bus_addr);
287 BUG_ON(bus_addr >> 54);
288
289 /* Set upper nibble to Cache Coherent Memory op */
290 bus_addr |= (1UL << 60);
291
292 return bus_addr;
293}
294
295/**
296 * tioca_dma_d48 - create a DMA mapping using 48-bit direct mode
297 * @pdev: linux pci_dev representing the function
298 * @paddr: system physical address
299 *
300 * Map @paddr into 64-bit bus space of the CA associated with @pcidev_info.
301 *
302 * The CA agp 48 bit direct address falls out as follows:
303 *
304 * When direct mapping AGP addresses, the 48 bit AGP address is
305 * constructed as follows:
306 *
307 * [47:40] - Low 8 bits of the page Node ID extracted from coretalk
308 * address [47:40]. The upper 8 node bits are fixed
309 * and come from the xxx register bits [5:0]
310 * [39:38] - Chiplet ID extracted from coretalk address [39:38]
311 * [37:00] - node offset extracted from coretalk address [37:00]
312 *
313 * Since the node id in general will be non-zero, and the chiplet id
314 * will always be non-zero, it follows that the device must support
315 * a dma mask of at least 0xffffffffff (40 bits) to target node 0
316 * and in general should be 0xffffffffffff (48 bits) to target nodes
317 * up to 255. Nodes above 255 need the support of the xxx register,
318 * and so a given CA can only directly target nodes in the range
319 * xxx - xxx+255.
320 */
321static uint64_t
322tioca_dma_d48(struct pci_dev *pdev, uint64_t paddr)
323{
324 struct tioca_common *tioca_common;
325 struct tioca *ca_base;
326 uint64_t ct_addr;
327 dma_addr_t bus_addr;
328 uint32_t node_upper;
329 uint64_t agp_dma_extn;
330 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
331
332 tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
333 ca_base = (struct tioca *)tioca_common->ca_common.bs_base;
334
335 ct_addr = PHYS_TO_TIODMA(paddr);
336 if (!ct_addr)
337 return 0;
338
339 bus_addr = (dma_addr_t) (ct_addr & 0xffffffffffff);
340 node_upper = ct_addr >> 48;
341
342 if (node_upper > 64) {
343 printk(KERN_ERR "%s: coretalk addr 0x%p node id out "
344 "of range\n", __FUNCTION__, (void *)ct_addr);
345 return 0;
346 }
347
348 agp_dma_extn = ca_base->ca_agp_dma_addr_extn;
349 if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) {
350 printk(KERN_ERR "%s: coretalk upper node (%u) "
351 "mismatch with ca_agp_dma_addr_extn (%lu)\n",
352 __FUNCTION__,
353 node_upper, (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT));
354 return 0;
355 }
356
357 return bus_addr;
358}
359
360/**
361 * tioca_dma_mapped - create a DMA mapping using a CA GART
362 * @pdev: linux pci_dev representing the function
363 * @paddr: host physical address to map
364 * @req_size: len (bytes) to map
365 *
366 * Map @paddr into CA address space using the GART mechanism. The mapped
367 * dma_addr_t is guarenteed to be contiguous in CA bus space.
368 */
369static dma_addr_t
370tioca_dma_mapped(struct pci_dev *pdev, uint64_t paddr, size_t req_size)
371{
372 int i, ps, ps_shift, entry, entries, mapsize, last_entry;
373 uint64_t xio_addr, end_xio_addr;
374 struct tioca_common *tioca_common;
375 struct tioca_kernel *tioca_kern;
376 dma_addr_t bus_addr = 0;
377 struct tioca_dmamap *ca_dmamap;
378 void *map;
379 unsigned long flags;
380 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);;
381
382 tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
383 tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private;
384
385 xio_addr = PHYS_TO_TIODMA(paddr);
386 if (!xio_addr)
387 return 0;
388
389 spin_lock_irqsave(&tioca_kern->ca_lock, flags);
390
391 /*
392 * allocate a map struct
393 */
394
395 ca_dmamap = kcalloc(1, sizeof(struct tioca_dmamap), GFP_ATOMIC);
396 if (!ca_dmamap)
397 goto map_return;
398
399 /*
400 * Locate free entries that can hold req_size. Account for
401 * unaligned start/length when allocating.
402 */
403
404 ps = tioca_kern->ca_ap_pagesize; /* will be power of 2 */
405 ps_shift = ffs(ps) - 1;
406 end_xio_addr = xio_addr + req_size - 1;
407
408 entries = (end_xio_addr >> ps_shift) - (xio_addr >> ps_shift) + 1;
409
410 map = tioca_kern->ca_pcigart_pagemap;
411 mapsize = tioca_kern->ca_pcigart_entries;
412
413 entry = find_first_zero_bit(map, mapsize);
414 while (entry < mapsize) {
415 last_entry = find_next_bit(map, mapsize, entry);
416
417 if (last_entry - entry >= entries)
418 break;
419
420 entry = find_next_zero_bit(map, mapsize, last_entry);
421 }
422
423 if (entry > mapsize)
424 goto map_return;
425
426 for (i = 0; i < entries; i++)
427 set_bit(entry + i, map);
428
429 bus_addr = tioca_kern->ca_pciap_base + (entry * ps);
430
431 ca_dmamap->cad_dma_addr = bus_addr;
432 ca_dmamap->cad_gart_size = entries;
433 ca_dmamap->cad_gart_entry = entry;
434 list_add(&ca_dmamap->cad_list, &tioca_kern->ca_list);
435
436 if (xio_addr % ps) {
437 tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr);
438 bus_addr += xio_addr & (ps - 1);
439 xio_addr &= ~(ps - 1);
440 xio_addr += ps;
441 entry++;
442 }
443
444 while (xio_addr < end_xio_addr) {
445 tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr);
446 xio_addr += ps;
447 entry++;
448 }
449
450 tioca_tlbflush(tioca_kern);
451
452map_return:
453 spin_unlock_irqrestore(&tioca_kern->ca_lock, flags);
454 return bus_addr;
455}
456
457/**
458 * tioca_dma_unmap - release CA mapping resources
459 * @pdev: linux pci_dev representing the function
460 * @bus_addr: bus address returned by an earlier tioca_dma_map
461 * @dir: mapping direction (unused)
462 *
463 * Locate mapping resources associated with @bus_addr and release them.
464 * For mappings created using the direct modes (64 or 48) there are no
465 * resources to release.
466 */
467void
468tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
469{
470 int i, entry;
471 struct tioca_common *tioca_common;
472 struct tioca_kernel *tioca_kern;
473 struct tioca_dmamap *map;
474 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
475 unsigned long flags;
476
477 tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
478 tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private;
479
480 /* return straight away if this isn't be a mapped address */
481
482 if (bus_addr < tioca_kern->ca_pciap_base ||
483 bus_addr >= (tioca_kern->ca_pciap_base + tioca_kern->ca_pciap_size))
484 return;
485
486 spin_lock_irqsave(&tioca_kern->ca_lock, flags);
487
488 list_for_each_entry(map, &tioca_kern->ca_dmamaps, cad_list)
489 if (map->cad_dma_addr == bus_addr)
490 break;
491
492 BUG_ON(map == NULL);
493
494 entry = map->cad_gart_entry;
495
496 for (i = 0; i < map->cad_gart_size; i++, entry++) {
497 clear_bit(entry, tioca_kern->ca_pcigart_pagemap);
498 tioca_kern->ca_pcigart[entry] = 0;
499 }
500 tioca_tlbflush(tioca_kern);
501
502 list_del(&map->cad_list);
503 spin_unlock_irqrestore(&tioca_kern->ca_lock, flags);
504 kfree(map);
505}
506
507/**
508 * tioca_dma_map - map pages for PCI DMA
509 * @pdev: linux pci_dev representing the function
510 * @paddr: host physical address to map
511 * @byte_count: bytes to map
512 *
513 * This is the main wrapper for mapping host physical pages to CA PCI space.
514 * The mapping mode used is based on the devices dma_mask. As a last resort
515 * use the GART mapped mode.
516 */
517uint64_t
518tioca_dma_map(struct pci_dev *pdev, uint64_t paddr, size_t byte_count)
519{
520 uint64_t mapaddr;
521
522 /*
523 * If card is 64 or 48 bit addresable, use a direct mapping. 32
524 * bit direct is so restrictive w.r.t. where the memory resides that
525 * we don't use it even though CA has some support.
526 */
527
528 if (pdev->dma_mask == ~0UL)
529 mapaddr = tioca_dma_d64(paddr);
530 else if (pdev->dma_mask == 0xffffffffffffUL)
531 mapaddr = tioca_dma_d48(pdev, paddr);
532 else
533 mapaddr = 0;
534
535 /* Last resort ... use PCI portion of CA GART */
536
537 if (mapaddr == 0)
538 mapaddr = tioca_dma_mapped(pdev, paddr, byte_count);
539
540 return mapaddr;
541}
542
543/**
544 * tioca_error_intr_handler - SGI TIO CA error interrupt handler
545 * @irq: unused
546 * @arg: pointer to tioca_common struct for the given CA
547 * @pt: unused
548 *
549 * Handle a CA error interrupt. Simply a wrapper around a SAL call which
550 * defers processing to the SGI prom.
551 */
552static irqreturn_t
553tioca_error_intr_handler(int irq, void *arg, struct pt_regs *pt)
554{
555 struct tioca_common *soft = arg;
556 struct ia64_sal_retval ret_stuff;
557 uint64_t segment;
558 uint64_t busnum;
559 ret_stuff.status = 0;
560 ret_stuff.v0 = 0;
561
562 segment = 0;
563 busnum = soft->ca_common.bs_persist_busnum;
564
565 SAL_CALL_NOLOCK(ret_stuff,
566 (u64) SN_SAL_IOIF_ERROR_INTERRUPT,
567 segment, busnum, 0, 0, 0, 0, 0);
568
569 return IRQ_HANDLED;
570}
571
572/**
573 * tioca_bus_fixup - perform final PCI fixup for a TIO CA bus
574 * @prom_bussoft: Common prom/kernel struct representing the bus
575 *
576 * Replicates the tioca_common pointed to by @prom_bussoft in kernel
577 * space. Allocates and initializes a kernel-only area for a given CA,
578 * and sets up an irq for handling CA error interrupts.
579 *
580 * On successful setup, returns the kernel version of tioca_common back to
581 * the caller.
582 */
583void *
584tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft)
585{
586 struct tioca_common *tioca_common;
587 struct tioca_kernel *tioca_kern;
588 struct pci_bus *bus;
589
590 /* sanity check prom rev */
591
592 if (sn_sal_rev_major() < 4 ||
593 (sn_sal_rev_major() == 4 && sn_sal_rev_minor() < 6)) {
594 printk
595 (KERN_ERR "%s: SGI prom rev 4.06 or greater required "
596 "for tioca support\n", __FUNCTION__);
597 return NULL;
598 }
599
600 /*
601 * Allocate kernel bus soft and copy from prom.
602 */
603
604 tioca_common = kcalloc(1, sizeof(struct tioca_common), GFP_KERNEL);
605 if (!tioca_common)
606 return NULL;
607
608 memcpy(tioca_common, prom_bussoft, sizeof(struct tioca_common));
609 tioca_common->ca_common.bs_base |= __IA64_UNCACHED_OFFSET;
610
611 /* init kernel-private area */
612
613 tioca_kern = kcalloc(1, sizeof(struct tioca_kernel), GFP_KERNEL);
614 if (!tioca_kern) {
615 kfree(tioca_common);
616 return NULL;
617 }
618
619 tioca_kern->ca_common = tioca_common;
620 spin_lock_init(&tioca_kern->ca_lock);
621 INIT_LIST_HEAD(&tioca_kern->ca_dmamaps);
622 tioca_kern->ca_closest_node =
623 nasid_to_cnodeid(tioca_common->ca_closest_nasid);
624 tioca_common->ca_kernel_private = (uint64_t) tioca_kern;
625
626 bus = pci_find_bus(0, tioca_common->ca_common.bs_persist_busnum);
627 BUG_ON(!bus);
628 tioca_kern->ca_devices = &bus->devices;
629
630 /* init GART */
631
632 if (tioca_gart_init(tioca_kern) < 0) {
633 kfree(tioca_kern);
634 kfree(tioca_common);
635 return NULL;
636 }
637
638 tioca_gart_found++;
639 list_add(&tioca_kern->ca_list, &tioca_list);
640
641 if (request_irq(SGI_TIOCA_ERROR,
642 tioca_error_intr_handler,
643 SA_SHIRQ, "TIOCA error", (void *)tioca_common))
644 printk(KERN_WARNING
645 "%s: Unable to get irq %d. "
646 "Error interrupts won't be routed for TIOCA bus %d\n",
647 __FUNCTION__, SGI_TIOCA_ERROR,
648 (int)tioca_common->ca_common.bs_persist_busnum);
649
650 return tioca_common;
651}
652
653static struct sn_pcibus_provider tioca_pci_interfaces = {
654 .dma_map = tioca_dma_map,
655 .dma_map_consistent = tioca_dma_map,
656 .dma_unmap = tioca_dma_unmap,
657 .bus_fixup = tioca_bus_fixup,
658};
659
660/**
661 * tioca_init_provider - init SN PCI provider ops for TIO CA
662 */
663int
664tioca_init_provider(void)
665{
666 sn_pci_provider[PCIIO_ASIC_TYPE_TIOCA] = &tioca_pci_interfaces;
667 return 0;
668}
diff --git a/arch/ppc64/kernel/vdso32/cacheflush.S b/arch/ppc64/kernel/vdso32/cacheflush.S
index c74fddb6afd4..0ed7ea721715 100644
--- a/arch/ppc64/kernel/vdso32/cacheflush.S
+++ b/arch/ppc64/kernel/vdso32/cacheflush.S
@@ -47,6 +47,7 @@ V_FUNCTION_BEGIN(__kernel_sync_dicache)
47 addi r6,r6,128 47 addi r6,r6,128
48 bdnz 1b 48 bdnz 1b
49 isync 49 isync
50 li r3,0
50 blr 51 blr
51 .cfi_endproc 52 .cfi_endproc
52V_FUNCTION_END(__kernel_sync_dicache) 53V_FUNCTION_END(__kernel_sync_dicache)
@@ -59,6 +60,7 @@ V_FUNCTION_BEGIN(__kernel_sync_dicache_p5)
59 .cfi_startproc 60 .cfi_startproc
60 sync 61 sync
61 isync 62 isync
63 li r3,0
62 blr 64 blr
63 .cfi_endproc 65 .cfi_endproc
64V_FUNCTION_END(__kernel_sync_dicache_p5) 66V_FUNCTION_END(__kernel_sync_dicache_p5)
diff --git a/arch/ppc64/kernel/vdso32/gettimeofday.S b/arch/ppc64/kernel/vdso32/gettimeofday.S
index ca7f415195c4..2b48bf1fb109 100644
--- a/arch/ppc64/kernel/vdso32/gettimeofday.S
+++ b/arch/ppc64/kernel/vdso32/gettimeofday.S
@@ -58,6 +58,7 @@ V_FUNCTION_BEGIN(__kernel_gettimeofday)
58 stw r5,TZONE_TZ_DSTTIME(r11) 58 stw r5,TZONE_TZ_DSTTIME(r11)
59 59
601: mtlr r12 601: mtlr r12
61 li r3,0
61 blr 62 blr
62 63
632: mr r3,r10 642: mr r3,r10
diff --git a/arch/ppc64/kernel/vdso64/cacheflush.S b/arch/ppc64/kernel/vdso64/cacheflush.S
index d9696ffcf334..e0725b7b7003 100644
--- a/arch/ppc64/kernel/vdso64/cacheflush.S
+++ b/arch/ppc64/kernel/vdso64/cacheflush.S
@@ -47,6 +47,7 @@ V_FUNCTION_BEGIN(__kernel_sync_dicache)
47 addi r6,r6,128 47 addi r6,r6,128
48 bdnz 1b 48 bdnz 1b
49 isync 49 isync
50 li r3,0
50 blr 51 blr
51 .cfi_endproc 52 .cfi_endproc
52V_FUNCTION_END(__kernel_sync_dicache) 53V_FUNCTION_END(__kernel_sync_dicache)
@@ -59,6 +60,7 @@ V_FUNCTION_BEGIN(__kernel_sync_dicache_p5)
59 .cfi_startproc 60 .cfi_startproc
60 sync 61 sync
61 isync 62 isync
63 li r3,0
62 blr 64 blr
63 .cfi_endproc 65 .cfi_endproc
64V_FUNCTION_END(__kernel_sync_dicache_p5) 66V_FUNCTION_END(__kernel_sync_dicache_p5)
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 096a1202ea07..97ac4edf4655 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -399,6 +399,20 @@ config SGI_SNSC
399 controller communication from user space (you want this!), 399 controller communication from user space (you want this!),
400 say Y. Otherwise, say N. 400 say Y. Otherwise, say N.
401 401
402config SGI_TIOCX
403 bool "SGI TIO CX driver support"
404 depends on (IA64_SGI_SN2 || IA64_GENERIC)
405 help
406 If you have an SGI Altix and you have fpga devices attached
407 to your TIO, say Y here, otherwise say N.
408
409config SGI_MBCS
410 tristate "SGI FPGA Core Services driver support"
411 depends on (IA64_SGI_SN2 || IA64_GENERIC)
412 help
413 If you have an SGI Altix with an attached SABrick
414 say Y or M here, otherwise say N.
415
402source "drivers/serial/Kconfig" 416source "drivers/serial/Kconfig"
403 417
404config UNIX98_PTYS 418config UNIX98_PTYS
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 54ed76af1a47..e3f5c32aac55 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -42,11 +42,12 @@ obj-$(CONFIG_SX) += sx.o generic_serial.o
42obj-$(CONFIG_RIO) += rio/ generic_serial.o 42obj-$(CONFIG_RIO) += rio/ generic_serial.o
43obj-$(CONFIG_HVC_CONSOLE) += hvc_console.o hvsi.o 43obj-$(CONFIG_HVC_CONSOLE) += hvc_console.o hvsi.o
44obj-$(CONFIG_RAW_DRIVER) += raw.o 44obj-$(CONFIG_RAW_DRIVER) += raw.o
45obj-$(CONFIG_SGI_SNSC) += snsc.o 45obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
46obj-$(CONFIG_MMTIMER) += mmtimer.o 46obj-$(CONFIG_MMTIMER) += mmtimer.o
47obj-$(CONFIG_VIOCONS) += viocons.o 47obj-$(CONFIG_VIOCONS) += viocons.o
48obj-$(CONFIG_VIOTAPE) += viotape.o 48obj-$(CONFIG_VIOTAPE) += viotape.o
49obj-$(CONFIG_HVCS) += hvcs.o 49obj-$(CONFIG_HVCS) += hvcs.o
50obj-$(CONFIG_SGI_MBCS) += mbcs.o
50 51
51obj-$(CONFIG_PRINTER) += lp.o 52obj-$(CONFIG_PRINTER) += lp.o
52obj-$(CONFIG_TIPAR) += tipar.o 53obj-$(CONFIG_TIPAR) += tipar.o
diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
new file mode 100644
index 000000000000..ec7100556c50
--- /dev/null
+++ b/drivers/char/mbcs.c
@@ -0,0 +1,849 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved.
7 */
8
9/*
10 * MOATB Core Services driver.
11 */
12
13#include <linux/config.h>
14#include <linux/interrupt.h>
15#include <linux/module.h>
16#include <linux/moduleparam.h>
17#include <linux/types.h>
18#include <linux/ioport.h>
19#include <linux/notifier.h>
20#include <linux/reboot.h>
21#include <linux/init.h>
22#include <linux/fs.h>
23#include <linux/delay.h>
24#include <linux/device.h>
25#include <linux/mm.h>
26#include <linux/uio.h>
27#include <asm/io.h>
28#include <asm/uaccess.h>
29#include <asm/system.h>
30#include <asm/pgtable.h>
31#include <asm/sn/addrs.h>
32#include <asm/sn/intr.h>
33#include <asm/sn/tiocx.h>
34#include "mbcs.h"
35
36#define MBCS_DEBUG 0
37#if MBCS_DEBUG
38#define DBG(fmt...) printk(KERN_ALERT fmt)
39#else
40#define DBG(fmt...)
41#endif
42int mbcs_major;
43
44LIST_HEAD(soft_list);
45
46/*
47 * file operations
48 */
49struct file_operations mbcs_ops = {
50 .open = mbcs_open,
51 .llseek = mbcs_sram_llseek,
52 .read = mbcs_sram_read,
53 .write = mbcs_sram_write,
54 .mmap = mbcs_gscr_mmap,
55};
56
57struct mbcs_callback_arg {
58 int minor;
59 struct cx_dev *cx_dev;
60};
61
62static inline void mbcs_getdma_init(struct getdma *gdma)
63{
64 memset(gdma, 0, sizeof(struct getdma));
65 gdma->DoneIntEnable = 1;
66}
67
68static inline void mbcs_putdma_init(struct putdma *pdma)
69{
70 memset(pdma, 0, sizeof(struct putdma));
71 pdma->DoneIntEnable = 1;
72}
73
74static inline void mbcs_algo_init(struct algoblock *algo_soft)
75{
76 memset(algo_soft, 0, sizeof(struct algoblock));
77}
78
79static inline void mbcs_getdma_set(void *mmr,
80 uint64_t hostAddr,
81 uint64_t localAddr,
82 uint64_t localRamSel,
83 uint64_t numPkts,
84 uint64_t amoEnable,
85 uint64_t intrEnable,
86 uint64_t peerIO,
87 uint64_t amoHostDest,
88 uint64_t amoModType, uint64_t intrHostDest,
89 uint64_t intrVector)
90{
91 union dma_control rdma_control;
92 union dma_amo_dest amo_dest;
93 union intr_dest intr_dest;
94 union dma_localaddr local_addr;
95 union dma_hostaddr host_addr;
96
97 rdma_control.dma_control_reg = 0;
98 amo_dest.dma_amo_dest_reg = 0;
99 intr_dest.intr_dest_reg = 0;
100 local_addr.dma_localaddr_reg = 0;
101 host_addr.dma_hostaddr_reg = 0;
102
103 host_addr.dma_sys_addr = hostAddr;
104 MBCS_MMR_SET(mmr, MBCS_RD_DMA_SYS_ADDR, host_addr.dma_hostaddr_reg);
105
106 local_addr.dma_ram_addr = localAddr;
107 local_addr.dma_ram_sel = localRamSel;
108 MBCS_MMR_SET(mmr, MBCS_RD_DMA_LOC_ADDR, local_addr.dma_localaddr_reg);
109
110 rdma_control.dma_op_length = numPkts;
111 rdma_control.done_amo_en = amoEnable;
112 rdma_control.done_int_en = intrEnable;
113 rdma_control.pio_mem_n = peerIO;
114 MBCS_MMR_SET(mmr, MBCS_RD_DMA_CTRL, rdma_control.dma_control_reg);
115
116 amo_dest.dma_amo_sys_addr = amoHostDest;
117 amo_dest.dma_amo_mod_type = amoModType;
118 MBCS_MMR_SET(mmr, MBCS_RD_DMA_AMO_DEST, amo_dest.dma_amo_dest_reg);
119
120 intr_dest.address = intrHostDest;
121 intr_dest.int_vector = intrVector;
122 MBCS_MMR_SET(mmr, MBCS_RD_DMA_INT_DEST, intr_dest.intr_dest_reg);
123
124}
125
126static inline void mbcs_putdma_set(void *mmr,
127 uint64_t hostAddr,
128 uint64_t localAddr,
129 uint64_t localRamSel,
130 uint64_t numPkts,
131 uint64_t amoEnable,
132 uint64_t intrEnable,
133 uint64_t peerIO,
134 uint64_t amoHostDest,
135 uint64_t amoModType,
136 uint64_t intrHostDest, uint64_t intrVector)
137{
138 union dma_control wdma_control;
139 union dma_amo_dest amo_dest;
140 union intr_dest intr_dest;
141 union dma_localaddr local_addr;
142 union dma_hostaddr host_addr;
143
144 wdma_control.dma_control_reg = 0;
145 amo_dest.dma_amo_dest_reg = 0;
146 intr_dest.intr_dest_reg = 0;
147 local_addr.dma_localaddr_reg = 0;
148 host_addr.dma_hostaddr_reg = 0;
149
150 host_addr.dma_sys_addr = hostAddr;
151 MBCS_MMR_SET(mmr, MBCS_WR_DMA_SYS_ADDR, host_addr.dma_hostaddr_reg);
152
153 local_addr.dma_ram_addr = localAddr;
154 local_addr.dma_ram_sel = localRamSel;
155 MBCS_MMR_SET(mmr, MBCS_WR_DMA_LOC_ADDR, local_addr.dma_localaddr_reg);
156
157 wdma_control.dma_op_length = numPkts;
158 wdma_control.done_amo_en = amoEnable;
159 wdma_control.done_int_en = intrEnable;
160 wdma_control.pio_mem_n = peerIO;
161 MBCS_MMR_SET(mmr, MBCS_WR_DMA_CTRL, wdma_control.dma_control_reg);
162
163 amo_dest.dma_amo_sys_addr = amoHostDest;
164 amo_dest.dma_amo_mod_type = amoModType;
165 MBCS_MMR_SET(mmr, MBCS_WR_DMA_AMO_DEST, amo_dest.dma_amo_dest_reg);
166
167 intr_dest.address = intrHostDest;
168 intr_dest.int_vector = intrVector;
169 MBCS_MMR_SET(mmr, MBCS_WR_DMA_INT_DEST, intr_dest.intr_dest_reg);
170
171}
172
173static inline void mbcs_algo_set(void *mmr,
174 uint64_t amoHostDest,
175 uint64_t amoModType,
176 uint64_t intrHostDest,
177 uint64_t intrVector, uint64_t algoStepCount)
178{
179 union dma_amo_dest amo_dest;
180 union intr_dest intr_dest;
181 union algo_step step;
182
183 step.algo_step_reg = 0;
184 intr_dest.intr_dest_reg = 0;
185 amo_dest.dma_amo_dest_reg = 0;
186
187 amo_dest.dma_amo_sys_addr = amoHostDest;
188 amo_dest.dma_amo_mod_type = amoModType;
189 MBCS_MMR_SET(mmr, MBCS_ALG_AMO_DEST, amo_dest.dma_amo_dest_reg);
190
191 intr_dest.address = intrHostDest;
192 intr_dest.int_vector = intrVector;
193 MBCS_MMR_SET(mmr, MBCS_ALG_INT_DEST, intr_dest.intr_dest_reg);
194
195 step.alg_step_cnt = algoStepCount;
196 MBCS_MMR_SET(mmr, MBCS_ALG_STEP, step.algo_step_reg);
197}
198
199static inline int mbcs_getdma_start(struct mbcs_soft *soft)
200{
201 void *mmr_base;
202 struct getdma *gdma;
203 uint64_t numPkts;
204 union cm_control cm_control;
205
206 mmr_base = soft->mmr_base;
207 gdma = &soft->getdma;
208
209 /* check that host address got setup */
210 if (!gdma->hostAddr)
211 return -1;
212
213 numPkts =
214 (gdma->bytes + (MBCS_CACHELINE_SIZE - 1)) / MBCS_CACHELINE_SIZE;
215
216 /* program engine */
217 mbcs_getdma_set(mmr_base, tiocx_dma_addr(gdma->hostAddr),
218 gdma->localAddr,
219 (gdma->localAddr < MB2) ? 0 :
220 (gdma->localAddr < MB4) ? 1 :
221 (gdma->localAddr < MB6) ? 2 : 3,
222 numPkts,
223 gdma->DoneAmoEnable,
224 gdma->DoneIntEnable,
225 gdma->peerIO,
226 gdma->amoHostDest,
227 gdma->amoModType,
228 gdma->intrHostDest, gdma->intrVector);
229
230 /* start engine */
231 cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
232 cm_control.rd_dma_go = 1;
233 MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
234
235 return 0;
236
237}
238
239static inline int mbcs_putdma_start(struct mbcs_soft *soft)
240{
241 void *mmr_base;
242 struct putdma *pdma;
243 uint64_t numPkts;
244 union cm_control cm_control;
245
246 mmr_base = soft->mmr_base;
247 pdma = &soft->putdma;
248
249 /* check that host address got setup */
250 if (!pdma->hostAddr)
251 return -1;
252
253 numPkts =
254 (pdma->bytes + (MBCS_CACHELINE_SIZE - 1)) / MBCS_CACHELINE_SIZE;
255
256 /* program engine */
257 mbcs_putdma_set(mmr_base, tiocx_dma_addr(pdma->hostAddr),
258 pdma->localAddr,
259 (pdma->localAddr < MB2) ? 0 :
260 (pdma->localAddr < MB4) ? 1 :
261 (pdma->localAddr < MB6) ? 2 : 3,
262 numPkts,
263 pdma->DoneAmoEnable,
264 pdma->DoneIntEnable,
265 pdma->peerIO,
266 pdma->amoHostDest,
267 pdma->amoModType,
268 pdma->intrHostDest, pdma->intrVector);
269
270 /* start engine */
271 cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
272 cm_control.wr_dma_go = 1;
273 MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
274
275 return 0;
276
277}
278
279static inline int mbcs_algo_start(struct mbcs_soft *soft)
280{
281 struct algoblock *algo_soft = &soft->algo;
282 void *mmr_base = soft->mmr_base;
283 union cm_control cm_control;
284
285 if (down_interruptible(&soft->algolock))
286 return -ERESTARTSYS;
287
288 atomic_set(&soft->algo_done, 0);
289
290 mbcs_algo_set(mmr_base,
291 algo_soft->amoHostDest,
292 algo_soft->amoModType,
293 algo_soft->intrHostDest,
294 algo_soft->intrVector, algo_soft->algoStepCount);
295
296 /* start algorithm */
297 cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
298 cm_control.alg_done_int_en = 1;
299 cm_control.alg_go = 1;
300 MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
301
302 up(&soft->algolock);
303
304 return 0;
305}
306
307static inline ssize_t
308do_mbcs_sram_dmawrite(struct mbcs_soft *soft, uint64_t hostAddr,
309 size_t len, loff_t * off)
310{
311 int rv = 0;
312
313 if (down_interruptible(&soft->dmawritelock))
314 return -ERESTARTSYS;
315
316 atomic_set(&soft->dmawrite_done, 0);
317
318 soft->putdma.hostAddr = hostAddr;
319 soft->putdma.localAddr = *off;
320 soft->putdma.bytes = len;
321
322 if (mbcs_putdma_start(soft) < 0) {
323 DBG(KERN_ALERT "do_mbcs_sram_dmawrite: "
324 "mbcs_putdma_start failed\n");
325 rv = -EAGAIN;
326 goto dmawrite_exit;
327 }
328
329 if (wait_event_interruptible(soft->dmawrite_queue,
330 atomic_read(&soft->dmawrite_done))) {
331 rv = -ERESTARTSYS;
332 goto dmawrite_exit;
333 }
334
335 rv = len;
336 *off += len;
337
338dmawrite_exit:
339 up(&soft->dmawritelock);
340
341 return rv;
342}
343
344static inline ssize_t
345do_mbcs_sram_dmaread(struct mbcs_soft *soft, uint64_t hostAddr,
346 size_t len, loff_t * off)
347{
348 int rv = 0;
349
350 if (down_interruptible(&soft->dmareadlock))
351 return -ERESTARTSYS;
352
353 atomic_set(&soft->dmawrite_done, 0);
354
355 soft->getdma.hostAddr = hostAddr;
356 soft->getdma.localAddr = *off;
357 soft->getdma.bytes = len;
358
359 if (mbcs_getdma_start(soft) < 0) {
360 DBG(KERN_ALERT "mbcs_strategy: mbcs_getdma_start failed\n");
361 rv = -EAGAIN;
362 goto dmaread_exit;
363 }
364
365 if (wait_event_interruptible(soft->dmaread_queue,
366 atomic_read(&soft->dmaread_done))) {
367 rv = -ERESTARTSYS;
368 goto dmaread_exit;
369 }
370
371 rv = len;
372 *off += len;
373
374dmaread_exit:
375 up(&soft->dmareadlock);
376
377 return rv;
378}
379
380int mbcs_open(struct inode *ip, struct file *fp)
381{
382 struct mbcs_soft *soft;
383 int minor;
384
385 minor = iminor(ip);
386
387 list_for_each_entry(soft, &soft_list, list) {
388 if (soft->nasid == minor) {
389 fp->private_data = soft->cxdev;
390 return 0;
391 }
392 }
393
394 return -ENODEV;
395}
396
397ssize_t mbcs_sram_read(struct file * fp, char *buf, size_t len, loff_t * off)
398{
399 struct cx_dev *cx_dev = fp->private_data;
400 struct mbcs_soft *soft = cx_dev->soft;
401 uint64_t hostAddr;
402 int rv = 0;
403
404 hostAddr = __get_dma_pages(GFP_KERNEL, get_order(len));
405 if (hostAddr == 0)
406 return -ENOMEM;
407
408 rv = do_mbcs_sram_dmawrite(soft, hostAddr, len, off);
409 if (rv < 0)
410 goto exit;
411
412 if (copy_to_user(buf, (void *)hostAddr, len))
413 rv = -EFAULT;
414
415 exit:
416 free_pages(hostAddr, get_order(len));
417
418 return rv;
419}
420
421ssize_t
422mbcs_sram_write(struct file * fp, const char *buf, size_t len, loff_t * off)
423{
424 struct cx_dev *cx_dev = fp->private_data;
425 struct mbcs_soft *soft = cx_dev->soft;
426 uint64_t hostAddr;
427 int rv = 0;
428
429 hostAddr = __get_dma_pages(GFP_KERNEL, get_order(len));
430 if (hostAddr == 0)
431 return -ENOMEM;
432
433 if (copy_from_user((void *)hostAddr, buf, len)) {
434 rv = -EFAULT;
435 goto exit;
436 }
437
438 rv = do_mbcs_sram_dmaread(soft, hostAddr, len, off);
439
440 exit:
441 free_pages(hostAddr, get_order(len));
442
443 return rv;
444}
445
446loff_t mbcs_sram_llseek(struct file * filp, loff_t off, int whence)
447{
448 loff_t newpos;
449
450 switch (whence) {
451 case 0: /* SEEK_SET */
452 newpos = off;
453 break;
454
455 case 1: /* SEEK_CUR */
456 newpos = filp->f_pos + off;
457 break;
458
459 case 2: /* SEEK_END */
460 newpos = MBCS_SRAM_SIZE + off;
461 break;
462
463 default: /* can't happen */
464 return -EINVAL;
465 }
466
467 if (newpos < 0)
468 return -EINVAL;
469
470 filp->f_pos = newpos;
471
472 return newpos;
473}
474
475static uint64_t mbcs_pioaddr(struct mbcs_soft *soft, uint64_t offset)
476{
477 uint64_t mmr_base;
478
479 mmr_base = (uint64_t) (soft->mmr_base + offset);
480
481 return mmr_base;
482}
483
484static void mbcs_debug_pioaddr_set(struct mbcs_soft *soft)
485{
486 soft->debug_addr = mbcs_pioaddr(soft, MBCS_DEBUG_START);
487}
488
489static void mbcs_gscr_pioaddr_set(struct mbcs_soft *soft)
490{
491 soft->gscr_addr = mbcs_pioaddr(soft, MBCS_GSCR_START);
492}
493
494int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma)
495{
496 struct cx_dev *cx_dev = fp->private_data;
497 struct mbcs_soft *soft = cx_dev->soft;
498
499 if (vma->vm_pgoff != 0)
500 return -EINVAL;
501
502 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
503
504 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
505 if (remap_pfn_range(vma,
506 vma->vm_start,
507 __pa(soft->gscr_addr) >> PAGE_SHIFT,
508 PAGE_SIZE,
509 vma->vm_page_prot))
510 return -EAGAIN;
511
512 return 0;
513}
514
515/**
516 * mbcs_completion_intr_handler - Primary completion handler.
517 * @irq: irq
518 * @arg: soft struct for device
519 * @ep: regs
520 *
521 */
522static irqreturn_t
523mbcs_completion_intr_handler(int irq, void *arg, struct pt_regs *ep)
524{
525 struct mbcs_soft *soft = (struct mbcs_soft *)arg;
526 void *mmr_base;
527 union cm_status cm_status;
528 union cm_control cm_control;
529
530 mmr_base = soft->mmr_base;
531 cm_status.cm_status_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_STATUS);
532
533 if (cm_status.rd_dma_done) {
534 /* stop dma-read engine, clear status */
535 cm_control.cm_control_reg =
536 MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
537 cm_control.rd_dma_clr = 1;
538 MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL,
539 cm_control.cm_control_reg);
540 atomic_set(&soft->dmaread_done, 1);
541 wake_up(&soft->dmaread_queue);
542 }
543 if (cm_status.wr_dma_done) {
544 /* stop dma-write engine, clear status */
545 cm_control.cm_control_reg =
546 MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
547 cm_control.wr_dma_clr = 1;
548 MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL,
549 cm_control.cm_control_reg);
550 atomic_set(&soft->dmawrite_done, 1);
551 wake_up(&soft->dmawrite_queue);
552 }
553 if (cm_status.alg_done) {
554 /* clear status */
555 cm_control.cm_control_reg =
556 MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
557 cm_control.alg_done_clr = 1;
558 MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL,
559 cm_control.cm_control_reg);
560 atomic_set(&soft->algo_done, 1);
561 wake_up(&soft->algo_queue);
562 }
563
564 return IRQ_HANDLED;
565}
566
567/**
568 * mbcs_intr_alloc - Allocate interrupts.
569 * @dev: device pointer
570 *
571 */
572static int mbcs_intr_alloc(struct cx_dev *dev)
573{
574 struct sn_irq_info *sn_irq;
575 struct mbcs_soft *soft;
576 struct getdma *getdma;
577 struct putdma *putdma;
578 struct algoblock *algo;
579
580 soft = dev->soft;
581 getdma = &soft->getdma;
582 putdma = &soft->putdma;
583 algo = &soft->algo;
584
585 soft->get_sn_irq = NULL;
586 soft->put_sn_irq = NULL;
587 soft->algo_sn_irq = NULL;
588
589 sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1);
590 if (sn_irq == NULL)
591 return -EAGAIN;
592 soft->get_sn_irq = sn_irq;
593 getdma->intrHostDest = sn_irq->irq_xtalkaddr;
594 getdma->intrVector = sn_irq->irq_irq;
595 if (request_irq(sn_irq->irq_irq,
596 (void *)mbcs_completion_intr_handler, SA_SHIRQ,
597 "MBCS get intr", (void *)soft)) {
598 tiocx_irq_free(soft->get_sn_irq);
599 return -EAGAIN;
600 }
601
602 sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1);
603 if (sn_irq == NULL) {
604 free_irq(soft->get_sn_irq->irq_irq, soft);
605 tiocx_irq_free(soft->get_sn_irq);
606 return -EAGAIN;
607 }
608 soft->put_sn_irq = sn_irq;
609 putdma->intrHostDest = sn_irq->irq_xtalkaddr;
610 putdma->intrVector = sn_irq->irq_irq;
611 if (request_irq(sn_irq->irq_irq,
612 (void *)mbcs_completion_intr_handler, SA_SHIRQ,
613 "MBCS put intr", (void *)soft)) {
614 tiocx_irq_free(soft->put_sn_irq);
615 free_irq(soft->get_sn_irq->irq_irq, soft);
616 tiocx_irq_free(soft->get_sn_irq);
617 return -EAGAIN;
618 }
619
620 sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1);
621 if (sn_irq == NULL) {
622 free_irq(soft->put_sn_irq->irq_irq, soft);
623 tiocx_irq_free(soft->put_sn_irq);
624 free_irq(soft->get_sn_irq->irq_irq, soft);
625 tiocx_irq_free(soft->get_sn_irq);
626 return -EAGAIN;
627 }
628 soft->algo_sn_irq = sn_irq;
629 algo->intrHostDest = sn_irq->irq_xtalkaddr;
630 algo->intrVector = sn_irq->irq_irq;
631 if (request_irq(sn_irq->irq_irq,
632 (void *)mbcs_completion_intr_handler, SA_SHIRQ,
633 "MBCS algo intr", (void *)soft)) {
634 tiocx_irq_free(soft->algo_sn_irq);
635 free_irq(soft->put_sn_irq->irq_irq, soft);
636 tiocx_irq_free(soft->put_sn_irq);
637 free_irq(soft->get_sn_irq->irq_irq, soft);
638 tiocx_irq_free(soft->get_sn_irq);
639 return -EAGAIN;
640 }
641
642 return 0;
643}
644
645/**
646 * mbcs_intr_dealloc - Remove interrupts.
647 * @dev: device pointer
648 *
649 */
650static void mbcs_intr_dealloc(struct cx_dev *dev)
651{
652 struct mbcs_soft *soft;
653
654 soft = dev->soft;
655
656 free_irq(soft->get_sn_irq->irq_irq, soft);
657 tiocx_irq_free(soft->get_sn_irq);
658 free_irq(soft->put_sn_irq->irq_irq, soft);
659 tiocx_irq_free(soft->put_sn_irq);
660 free_irq(soft->algo_sn_irq->irq_irq, soft);
661 tiocx_irq_free(soft->algo_sn_irq);
662}
663
664static inline int mbcs_hw_init(struct mbcs_soft *soft)
665{
666 void *mmr_base = soft->mmr_base;
667 union cm_control cm_control;
668 union cm_req_timeout cm_req_timeout;
669 uint64_t err_stat;
670
671 cm_req_timeout.cm_req_timeout_reg =
672 MBCS_MMR_GET(mmr_base, MBCS_CM_REQ_TOUT);
673
674 cm_req_timeout.time_out = MBCS_CM_CONTROL_REQ_TOUT_MASK;
675 MBCS_MMR_SET(mmr_base, MBCS_CM_REQ_TOUT,
676 cm_req_timeout.cm_req_timeout_reg);
677
678 mbcs_gscr_pioaddr_set(soft);
679 mbcs_debug_pioaddr_set(soft);
680
681 /* clear errors */
682 err_stat = MBCS_MMR_GET(mmr_base, MBCS_CM_ERR_STAT);
683 MBCS_MMR_SET(mmr_base, MBCS_CM_CLR_ERR_STAT, err_stat);
684 MBCS_MMR_ZERO(mmr_base, MBCS_CM_ERROR_DETAIL1);
685
686 /* enable interrupts */
687 /* turn off 2^23 (INT_EN_PIO_REQ_ADDR_INV) */
688 MBCS_MMR_SET(mmr_base, MBCS_CM_ERR_INT_EN, 0x3ffffff7e00ffUL);
689
690 /* arm status regs and clear engines */
691 cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
692 cm_control.rearm_stat_regs = 1;
693 cm_control.alg_clr = 1;
694 cm_control.wr_dma_clr = 1;
695 cm_control.rd_dma_clr = 1;
696
697 MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
698
699 return 0;
700}
701
702static ssize_t show_algo(struct device *dev, char *buf)
703{
704 struct cx_dev *cx_dev = to_cx_dev(dev);
705 struct mbcs_soft *soft = cx_dev->soft;
706 uint64_t debug0;
707
708 /*
709 * By convention, the first debug register contains the
710 * algorithm number and revision.
711 */
712 debug0 = *(uint64_t *) soft->debug_addr;
713
714 return sprintf(buf, "0x%lx 0x%lx\n",
715 (debug0 >> 32), (debug0 & 0xffffffff));
716}
717
718static ssize_t store_algo(struct device *dev, const char *buf, size_t count)
719{
720 int n;
721 struct cx_dev *cx_dev = to_cx_dev(dev);
722 struct mbcs_soft *soft = cx_dev->soft;
723
724 if (count <= 0)
725 return 0;
726
727 n = simple_strtoul(buf, NULL, 0);
728
729 if (n == 1) {
730 mbcs_algo_start(soft);
731 if (wait_event_interruptible(soft->algo_queue,
732 atomic_read(&soft->algo_done)))
733 return -ERESTARTSYS;
734 }
735
736 return count;
737}
738
739DEVICE_ATTR(algo, 0644, show_algo, store_algo);
740
741/**
742 * mbcs_probe - Initialize for device
743 * @dev: device pointer
744 * @device_id: id table pointer
745 *
746 */
747static int mbcs_probe(struct cx_dev *dev, const struct cx_device_id *id)
748{
749 struct mbcs_soft *soft;
750
751 dev->soft = NULL;
752
753 soft = kcalloc(1, sizeof(struct mbcs_soft), GFP_KERNEL);
754 if (soft == NULL)
755 return -ENOMEM;
756
757 soft->nasid = dev->cx_id.nasid;
758 list_add(&soft->list, &soft_list);
759 soft->mmr_base = (void *)tiocx_swin_base(dev->cx_id.nasid);
760 dev->soft = soft;
761 soft->cxdev = dev;
762
763 init_waitqueue_head(&soft->dmawrite_queue);
764 init_waitqueue_head(&soft->dmaread_queue);
765 init_waitqueue_head(&soft->algo_queue);
766
767 init_MUTEX(&soft->dmawritelock);
768 init_MUTEX(&soft->dmareadlock);
769 init_MUTEX(&soft->algolock);
770
771 mbcs_getdma_init(&soft->getdma);
772 mbcs_putdma_init(&soft->putdma);
773 mbcs_algo_init(&soft->algo);
774
775 mbcs_hw_init(soft);
776
777 /* Allocate interrupts */
778 mbcs_intr_alloc(dev);
779
780 device_create_file(&dev->dev, &dev_attr_algo);
781
782 return 0;
783}
784
785static int mbcs_remove(struct cx_dev *dev)
786{
787 if (dev->soft) {
788 mbcs_intr_dealloc(dev);
789 kfree(dev->soft);
790 }
791
792 device_remove_file(&dev->dev, &dev_attr_algo);
793
794 return 0;
795}
796
797const struct cx_device_id __devinitdata mbcs_id_table[] = {
798 {
799 .part_num = MBCS_PART_NUM,
800 .mfg_num = MBCS_MFG_NUM,
801 },
802 {
803 .part_num = MBCS_PART_NUM_ALG0,
804 .mfg_num = MBCS_MFG_NUM,
805 },
806 {0, 0}
807};
808
809MODULE_DEVICE_TABLE(cx, mbcs_id_table);
810
811struct cx_drv mbcs_driver = {
812 .name = DEVICE_NAME,
813 .id_table = mbcs_id_table,
814 .probe = mbcs_probe,
815 .remove = mbcs_remove,
816};
817
818static void __exit mbcs_exit(void)
819{
820 int rv;
821
822 rv = unregister_chrdev(mbcs_major, DEVICE_NAME);
823 if (rv < 0)
824 DBG(KERN_ALERT "Error in unregister_chrdev: %d\n", rv);
825
826 cx_driver_unregister(&mbcs_driver);
827}
828
829static int __init mbcs_init(void)
830{
831 int rv;
832
833 // Put driver into chrdevs[]. Get major number.
834 rv = register_chrdev(mbcs_major, DEVICE_NAME, &mbcs_ops);
835 if (rv < 0) {
836 DBG(KERN_ALERT "mbcs_init: can't get major number. %d\n", rv);
837 return rv;
838 }
839 mbcs_major = rv;
840
841 return cx_driver_register(&mbcs_driver);
842}
843
844module_init(mbcs_init);
845module_exit(mbcs_exit);
846
847MODULE_AUTHOR("Bruce Losure <blosure@sgi.com>");
848MODULE_DESCRIPTION("Driver for MOATB Core Services");
849MODULE_LICENSE("GPL");
diff --git a/drivers/char/mbcs.h b/drivers/char/mbcs.h
new file mode 100644
index 000000000000..844644d201c5
--- /dev/null
+++ b/drivers/char/mbcs.h
@@ -0,0 +1,553 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#ifndef __MBCS_H__
10#define __MBCS_H__
11
12/*
13 * General macros
14 */
15#define MB (1024*1024)
16#define MB2 (2*MB)
17#define MB4 (4*MB)
18#define MB6 (6*MB)
19
20/*
21 * Offsets and masks
22 */
23#define MBCS_CM_ID 0x0000 /* Identification */
24#define MBCS_CM_STATUS 0x0008 /* Status */
25#define MBCS_CM_ERROR_DETAIL1 0x0010 /* Error Detail1 */
26#define MBCS_CM_ERROR_DETAIL2 0x0018 /* Error Detail2 */
27#define MBCS_CM_CONTROL 0x0020 /* Control */
28#define MBCS_CM_REQ_TOUT 0x0028 /* Request Time-out */
29#define MBCS_CM_ERR_INT_DEST 0x0038 /* Error Interrupt Destination */
30#define MBCS_CM_TARG_FL 0x0050 /* Target Flush */
31#define MBCS_CM_ERR_STAT 0x0060 /* Error Status */
32#define MBCS_CM_CLR_ERR_STAT 0x0068 /* Clear Error Status */
33#define MBCS_CM_ERR_INT_EN 0x0070 /* Error Interrupt Enable */
34#define MBCS_RD_DMA_SYS_ADDR 0x0100 /* Read DMA System Address */
35#define MBCS_RD_DMA_LOC_ADDR 0x0108 /* Read DMA Local Address */
36#define MBCS_RD_DMA_CTRL 0x0110 /* Read DMA Control */
37#define MBCS_RD_DMA_AMO_DEST 0x0118 /* Read DMA AMO Destination */
38#define MBCS_RD_DMA_INT_DEST 0x0120 /* Read DMA Interrupt Destination */
39#define MBCS_RD_DMA_AUX_STAT 0x0130 /* Read DMA Auxillary Status */
40#define MBCS_WR_DMA_SYS_ADDR 0x0200 /* Write DMA System Address */
41#define MBCS_WR_DMA_LOC_ADDR 0x0208 /* Write DMA Local Address */
42#define MBCS_WR_DMA_CTRL 0x0210 /* Write DMA Control */
43#define MBCS_WR_DMA_AMO_DEST 0x0218 /* Write DMA AMO Destination */
44#define MBCS_WR_DMA_INT_DEST 0x0220 /* Write DMA Interrupt Destination */
45#define MBCS_WR_DMA_AUX_STAT 0x0230 /* Write DMA Auxillary Status */
46#define MBCS_ALG_AMO_DEST 0x0300 /* Algorithm AMO Destination */
47#define MBCS_ALG_INT_DEST 0x0308 /* Algorithm Interrupt Destination */
48#define MBCS_ALG_OFFSETS 0x0310
49#define MBCS_ALG_STEP 0x0318 /* Algorithm Step */
50
51#define MBCS_GSCR_START 0x0000000
52#define MBCS_DEBUG_START 0x0100000
53#define MBCS_RAM0_START 0x0200000
54#define MBCS_RAM1_START 0x0400000
55#define MBCS_RAM2_START 0x0600000
56
57#define MBCS_CM_CONTROL_REQ_TOUT_MASK 0x0000000000ffffffUL
58//#define PIO_BASE_ADDR_BASE_OFFSET_MASK 0x00fffffffff00000UL
59
60#define MBCS_SRAM_SIZE (1024*1024)
61#define MBCS_CACHELINE_SIZE 128
62
63/*
64 * MMR get's and put's
65 */
66#define MBCS_MMR_ADDR(mmr_base, offset)((uint64_t *)(mmr_base + offset))
67#define MBCS_MMR_SET(mmr_base, offset, value) { \
68 uint64_t *mbcs_mmr_set_u64p, readback; \
69 mbcs_mmr_set_u64p = (uint64_t *)(mmr_base + offset); \
70 *mbcs_mmr_set_u64p = value; \
71 readback = *mbcs_mmr_set_u64p; \
72}
73#define MBCS_MMR_GET(mmr_base, offset) *(uint64_t *)(mmr_base + offset)
74#define MBCS_MMR_ZERO(mmr_base, offset) MBCS_MMR_SET(mmr_base, offset, 0)
75
76/*
77 * MBCS mmr structures
78 */
79union cm_id {
80 uint64_t cm_id_reg;
81 struct {
82 uint64_t always_one:1, // 0
83 mfg_id:11, // 11:1
84 part_num:16, // 27:12
85 bitstream_rev:8, // 35:28
86 :28; // 63:36
87 };
88};
89
90union cm_status {
91 uint64_t cm_status_reg;
92 struct {
93 uint64_t pending_reads:8, // 7:0
94 pending_writes:8, // 15:8
95 ice_rsp_credits:8, // 23:16
96 ice_req_credits:8, // 31:24
97 cm_req_credits:8, // 39:32
98 :1, // 40
99 rd_dma_in_progress:1, // 41
100 rd_dma_done:1, // 42
101 :1, // 43
102 wr_dma_in_progress:1, // 44
103 wr_dma_done:1, // 45
104 alg_waiting:1, // 46
105 alg_pipe_running:1, // 47
106 alg_done:1, // 48
107 :3, // 51:49
108 pending_int_reqs:8, // 59:52
109 :3, // 62:60
110 alg_half_speed_sel:1; // 63
111 };
112};
113
114union cm_error_detail1 {
115 uint64_t cm_error_detail1_reg;
116 struct {
117 uint64_t packet_type:4, // 3:0
118 source_id:2, // 5:4
119 data_size:2, // 7:6
120 tnum:8, // 15:8
121 byte_enable:8, // 23:16
122 gfx_cred:8, // 31:24
123 read_type:2, // 33:32
124 pio_or_memory:1, // 34
125 head_cw_error:1, // 35
126 :12, // 47:36
127 head_error_bit:1, // 48
128 data_error_bit:1, // 49
129 :13, // 62:50
130 valid:1; // 63
131 };
132};
133
134union cm_error_detail2 {
135 uint64_t cm_error_detail2_reg;
136 struct {
137 uint64_t address:56, // 55:0
138 :8; // 63:56
139 };
140};
141
142union cm_control {
143 uint64_t cm_control_reg;
144 struct {
145 uint64_t cm_id:2, // 1:0
146 :2, // 3:2
147 max_trans:5, // 8:4
148 :3, // 11:9
149 address_mode:1, // 12
150 :7, // 19:13
151 credit_limit:8, // 27:20
152 :5, // 32:28
153 rearm_stat_regs:1, // 33
154 prescalar_byp:1, // 34
155 force_gap_war:1, // 35
156 rd_dma_go:1, // 36
157 wr_dma_go:1, // 37
158 alg_go:1, // 38
159 rd_dma_clr:1, // 39
160 wr_dma_clr:1, // 40
161 alg_clr:1, // 41
162 :2, // 43:42
163 alg_wait_step:1, // 44
164 alg_done_amo_en:1, // 45
165 alg_done_int_en:1, // 46
166 :1, // 47
167 alg_sram0_locked:1, // 48
168 alg_sram1_locked:1, // 49
169 alg_sram2_locked:1, // 50
170 alg_done_clr:1, // 51
171 :12; // 63:52
172 };
173};
174
175union cm_req_timeout {
176 uint64_t cm_req_timeout_reg;
177 struct {
178 uint64_t time_out:24, // 23:0
179 :40; // 63:24
180 };
181};
182
183union intr_dest {
184 uint64_t intr_dest_reg;
185 struct {
186 uint64_t address:56, // 55:0
187 int_vector:8; // 63:56
188 };
189};
190
191union cm_error_status {
192 uint64_t cm_error_status_reg;
193 struct {
194 uint64_t ecc_sbe:1, // 0
195 ecc_mbe:1, // 1
196 unsupported_req:1, // 2
197 unexpected_rsp:1, // 3
198 bad_length:1, // 4
199 bad_datavalid:1, // 5
200 buffer_overflow:1, // 6
201 request_timeout:1, // 7
202 :8, // 15:8
203 head_inv_data_size:1, // 16
204 rsp_pactype_inv:1, // 17
205 head_sb_err:1, // 18
206 missing_head:1, // 19
207 head_inv_rd_type:1, // 20
208 head_cmd_err_bit:1, // 21
209 req_addr_align_inv:1, // 22
210 pio_req_addr_inv:1, // 23
211 req_range_dsize_inv:1, // 24
212 early_term:1, // 25
213 early_tail:1, // 26
214 missing_tail:1, // 27
215 data_flit_sb_err:1, // 28
216 cm2hcm_req_cred_of:1, // 29
217 cm2hcm_rsp_cred_of:1, // 30
218 rx_bad_didn:1, // 31
219 rd_dma_err_rsp:1, // 32
220 rd_dma_tnum_tout:1, // 33
221 rd_dma_multi_tnum_tou:1, // 34
222 wr_dma_err_rsp:1, // 35
223 wr_dma_tnum_tout:1, // 36
224 wr_dma_multi_tnum_tou:1, // 37
225 alg_data_overflow:1, // 38
226 alg_data_underflow:1, // 39
227 ram0_access_conflict:1, // 40
228 ram1_access_conflict:1, // 41
229 ram2_access_conflict:1, // 42
230 ram0_perr:1, // 43
231 ram1_perr:1, // 44
232 ram2_perr:1, // 45
233 int_gen_rsp_err:1, // 46
234 int_gen_tnum_tout:1, // 47
235 rd_dma_prog_err:1, // 48
236 wr_dma_prog_err:1, // 49
237 :14; // 63:50
238 };
239};
240
241union cm_clr_error_status {
242 uint64_t cm_clr_error_status_reg;
243 struct {
244 uint64_t clr_ecc_sbe:1, // 0
245 clr_ecc_mbe:1, // 1
246 clr_unsupported_req:1, // 2
247 clr_unexpected_rsp:1, // 3
248 clr_bad_length:1, // 4
249 clr_bad_datavalid:1, // 5
250 clr_buffer_overflow:1, // 6
251 clr_request_timeout:1, // 7
252 :8, // 15:8
253 clr_head_inv_data_siz:1, // 16
254 clr_rsp_pactype_inv:1, // 17
255 clr_head_sb_err:1, // 18
256 clr_missing_head:1, // 19
257 clr_head_inv_rd_type:1, // 20
258 clr_head_cmd_err_bit:1, // 21
259 clr_req_addr_align_in:1, // 22
260 clr_pio_req_addr_inv:1, // 23
261 clr_req_range_dsize_i:1, // 24
262 clr_early_term:1, // 25
263 clr_early_tail:1, // 26
264 clr_missing_tail:1, // 27
265 clr_data_flit_sb_err:1, // 28
266 clr_cm2hcm_req_cred_o:1, // 29
267 clr_cm2hcm_rsp_cred_o:1, // 30
268 clr_rx_bad_didn:1, // 31
269 clr_rd_dma_err_rsp:1, // 32
270 clr_rd_dma_tnum_tout:1, // 33
271 clr_rd_dma_multi_tnum:1, // 34
272 clr_wr_dma_err_rsp:1, // 35
273 clr_wr_dma_tnum_tout:1, // 36
274 clr_wr_dma_multi_tnum:1, // 37
275 clr_alg_data_overflow:1, // 38
276 clr_alg_data_underflo:1, // 39
277 clr_ram0_access_confl:1, // 40
278 clr_ram1_access_confl:1, // 41
279 clr_ram2_access_confl:1, // 42
280 clr_ram0_perr:1, // 43
281 clr_ram1_perr:1, // 44
282 clr_ram2_perr:1, // 45
283 clr_int_gen_rsp_err:1, // 46
284 clr_int_gen_tnum_tout:1, // 47
285 clr_rd_dma_prog_err:1, // 48
286 clr_wr_dma_prog_err:1, // 49
287 :14; // 63:50
288 };
289};
290
291union cm_error_intr_enable {
292 uint64_t cm_error_intr_enable_reg;
293 struct {
294 uint64_t int_en_ecc_sbe:1, // 0
295 int_en_ecc_mbe:1, // 1
296 int_en_unsupported_re:1, // 2
297 int_en_unexpected_rsp:1, // 3
298 int_en_bad_length:1, // 4
299 int_en_bad_datavalid:1, // 5
300 int_en_buffer_overflo:1, // 6
301 int_en_request_timeou:1, // 7
302 :8, // 15:8
303 int_en_head_inv_data_:1, // 16
304 int_en_rsp_pactype_in:1, // 17
305 int_en_head_sb_err:1, // 18
306 int_en_missing_head:1, // 19
307 int_en_head_inv_rd_ty:1, // 20
308 int_en_head_cmd_err_b:1, // 21
309 int_en_req_addr_align:1, // 22
310 int_en_pio_req_addr_i:1, // 23
311 int_en_req_range_dsiz:1, // 24
312 int_en_early_term:1, // 25
313 int_en_early_tail:1, // 26
314 int_en_missing_tail:1, // 27
315 int_en_data_flit_sb_e:1, // 28
316 int_en_cm2hcm_req_cre:1, // 29
317 int_en_cm2hcm_rsp_cre:1, // 30
318 int_en_rx_bad_didn:1, // 31
319 int_en_rd_dma_err_rsp:1, // 32
320 int_en_rd_dma_tnum_to:1, // 33
321 int_en_rd_dma_multi_t:1, // 34
322 int_en_wr_dma_err_rsp:1, // 35
323 int_en_wr_dma_tnum_to:1, // 36
324 int_en_wr_dma_multi_t:1, // 37
325 int_en_alg_data_overf:1, // 38
326 int_en_alg_data_under:1, // 39
327 int_en_ram0_access_co:1, // 40
328 int_en_ram1_access_co:1, // 41
329 int_en_ram2_access_co:1, // 42
330 int_en_ram0_perr:1, // 43
331 int_en_ram1_perr:1, // 44
332 int_en_ram2_perr:1, // 45
333 int_en_int_gen_rsp_er:1, // 46
334 int_en_int_gen_tnum_t:1, // 47
335 int_en_rd_dma_prog_er:1, // 48
336 int_en_wr_dma_prog_er:1, // 49
337 :14; // 63:50
338 };
339};
340
341struct cm_mmr {
342 union cm_id id;
343 union cm_status status;
344 union cm_error_detail1 err_detail1;
345 union cm_error_detail2 err_detail2;
346 union cm_control control;
347 union cm_req_timeout req_timeout;
348 uint64_t reserved1[1];
349 union intr_dest int_dest;
350 uint64_t reserved2[2];
351 uint64_t targ_flush;
352 uint64_t reserved3[1];
353 union cm_error_status err_status;
354 union cm_clr_error_status clr_err_status;
355 union cm_error_intr_enable int_enable;
356};
357
358union dma_hostaddr {
359 uint64_t dma_hostaddr_reg;
360 struct {
361 uint64_t dma_sys_addr:56, // 55:0
362 :8; // 63:56
363 };
364};
365
366union dma_localaddr {
367 uint64_t dma_localaddr_reg;
368 struct {
369 uint64_t dma_ram_addr:21, // 20:0
370 dma_ram_sel:2, // 22:21
371 :41; // 63:23
372 };
373};
374
375union dma_control {
376 uint64_t dma_control_reg;
377 struct {
378 uint64_t dma_op_length:16, // 15:0
379 :18, // 33:16
380 done_amo_en:1, // 34
381 done_int_en:1, // 35
382 :1, // 36
383 pio_mem_n:1, // 37
384 :26; // 63:38
385 };
386};
387
388union dma_amo_dest {
389 uint64_t dma_amo_dest_reg;
390 struct {
391 uint64_t dma_amo_sys_addr:56, // 55:0
392 dma_amo_mod_type:3, // 58:56
393 :5; // 63:59
394 };
395};
396
397union rdma_aux_status {
398 uint64_t rdma_aux_status_reg;
399 struct {
400 uint64_t op_num_pacs_left:17, // 16:0
401 :5, // 21:17
402 lrsp_buff_empty:1, // 22
403 :17, // 39:23
404 pending_reqs_left:6, // 45:40
405 :18; // 63:46
406 };
407};
408
409struct rdma_mmr {
410 union dma_hostaddr host_addr;
411 union dma_localaddr local_addr;
412 union dma_control control;
413 union dma_amo_dest amo_dest;
414 union intr_dest intr_dest;
415 union rdma_aux_status aux_status;
416};
417
418union wdma_aux_status {
419 uint64_t wdma_aux_status_reg;
420 struct {
421 uint64_t op_num_pacs_left:17, // 16:0
422 :4, // 20:17
423 lreq_buff_empty:1, // 21
424 :18, // 39:22
425 pending_reqs_left:6, // 45:40
426 :18; // 63:46
427 };
428};
429
430struct wdma_mmr {
431 union dma_hostaddr host_addr;
432 union dma_localaddr local_addr;
433 union dma_control control;
434 union dma_amo_dest amo_dest;
435 union intr_dest intr_dest;
436 union wdma_aux_status aux_status;
437};
438
439union algo_step {
440 uint64_t algo_step_reg;
441 struct {
442 uint64_t alg_step_cnt:16, // 15:0
443 :48; // 63:16
444 };
445};
446
447struct algo_mmr {
448 union dma_amo_dest amo_dest;
449 union intr_dest intr_dest;
450 union {
451 uint64_t algo_offset_reg;
452 struct {
453 uint64_t sram0_offset:7, // 6:0
454 reserved0:1, // 7
455 sram1_offset:7, // 14:8
456 reserved1:1, // 15
457 sram2_offset:7, // 22:16
458 reserved2:14; // 63:23
459 };
460 } sram_offset;
461 union algo_step step;
462};
463
464struct mbcs_mmr {
465 struct cm_mmr cm;
466 uint64_t reserved1[17];
467 struct rdma_mmr rdDma;
468 uint64_t reserved2[25];
469 struct wdma_mmr wrDma;
470 uint64_t reserved3[25];
471 struct algo_mmr algo;
472 uint64_t reserved4[156];
473};
474
475/*
476 * defines
477 */
478#define DEVICE_NAME "mbcs"
479#define MBCS_PART_NUM 0xfff0
480#define MBCS_PART_NUM_ALG0 0xf001
481#define MBCS_MFG_NUM 0x1
482
483struct algoblock {
484 uint64_t amoHostDest;
485 uint64_t amoModType;
486 uint64_t intrHostDest;
487 uint64_t intrVector;
488 uint64_t algoStepCount;
489};
490
491struct getdma {
492 uint64_t hostAddr;
493 uint64_t localAddr;
494 uint64_t bytes;
495 uint64_t DoneAmoEnable;
496 uint64_t DoneIntEnable;
497 uint64_t peerIO;
498 uint64_t amoHostDest;
499 uint64_t amoModType;
500 uint64_t intrHostDest;
501 uint64_t intrVector;
502};
503
504struct putdma {
505 uint64_t hostAddr;
506 uint64_t localAddr;
507 uint64_t bytes;
508 uint64_t DoneAmoEnable;
509 uint64_t DoneIntEnable;
510 uint64_t peerIO;
511 uint64_t amoHostDest;
512 uint64_t amoModType;
513 uint64_t intrHostDest;
514 uint64_t intrVector;
515};
516
517struct mbcs_soft {
518 struct list_head list;
519 struct cx_dev *cxdev;
520 int major;
521 int nasid;
522 void *mmr_base;
523 wait_queue_head_t dmawrite_queue;
524 wait_queue_head_t dmaread_queue;
525 wait_queue_head_t algo_queue;
526 struct sn_irq_info *get_sn_irq;
527 struct sn_irq_info *put_sn_irq;
528 struct sn_irq_info *algo_sn_irq;
529 struct getdma getdma;
530 struct putdma putdma;
531 struct algoblock algo;
532 uint64_t gscr_addr; // pio addr
533 uint64_t ram0_addr; // pio addr
534 uint64_t ram1_addr; // pio addr
535 uint64_t ram2_addr; // pio addr
536 uint64_t debug_addr; // pio addr
537 atomic_t dmawrite_done;
538 atomic_t dmaread_done;
539 atomic_t algo_done;
540 struct semaphore dmawritelock;
541 struct semaphore dmareadlock;
542 struct semaphore algolock;
543};
544
545extern int mbcs_open(struct inode *ip, struct file *fp);
546extern ssize_t mbcs_sram_read(struct file *fp, char *buf, size_t len,
547 loff_t * off);
548extern ssize_t mbcs_sram_write(struct file *fp, const char *buf, size_t len,
549 loff_t * off);
550extern loff_t mbcs_sram_llseek(struct file *filp, loff_t off, int whence);
551extern int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma);
552
553#endif // __MBCS_H__
diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c
index ffb9143376bb..e3c0b52d943f 100644
--- a/drivers/char/snsc.c
+++ b/drivers/char/snsc.c
@@ -374,6 +374,7 @@ scdrv_init(void)
374 void *salbuf; 374 void *salbuf;
375 struct class_simple *snsc_class; 375 struct class_simple *snsc_class;
376 dev_t first_dev, dev; 376 dev_t first_dev, dev;
377 nasid_t event_nasid = ia64_sn_get_console_nasid();
377 378
378 if (alloc_chrdev_region(&first_dev, 0, numionodes, 379 if (alloc_chrdev_region(&first_dev, 0, numionodes,
379 SYSCTL_BASENAME) < 0) { 380 SYSCTL_BASENAME) < 0) {
@@ -441,6 +442,13 @@ scdrv_init(void)
441 ia64_sn_irtr_intr_enable(scd->scd_nasid, 442 ia64_sn_irtr_intr_enable(scd->scd_nasid,
442 0 /*ignored */ , 443 0 /*ignored */ ,
443 SAL_IROUTER_INTR_RECV); 444 SAL_IROUTER_INTR_RECV);
445
446 /* on the console nasid, prepare to receive
447 * system controller environmental events
448 */
449 if(scd->scd_nasid == event_nasid) {
450 scdrv_event_init(scd);
451 }
444 } 452 }
445 return 0; 453 return 0;
446} 454}
diff --git a/drivers/char/snsc.h b/drivers/char/snsc.h
index c22c6c55e254..a9efc13cc858 100644
--- a/drivers/char/snsc.h
+++ b/drivers/char/snsc.h
@@ -47,4 +47,44 @@ struct sysctl_data_s {
47 nasid_t scd_nasid; /* Node on which subchannels are opened. */ 47 nasid_t scd_nasid; /* Node on which subchannels are opened. */
48}; 48};
49 49
50
51/* argument types */
52#define IR_ARG_INT 0x00 /* 4-byte integer (big-endian) */
53#define IR_ARG_ASCII 0x01 /* null-terminated ASCII string */
54#define IR_ARG_UNKNOWN 0x80 /* unknown data type. The low
55 * 7 bits will contain the data
56 * length. */
57#define IR_ARG_UNKNOWN_LENGTH_MASK 0x7f
58
59
60/* system controller event codes */
61#define EV_CLASS_MASK 0xf000ul
62#define EV_SEVERITY_MASK 0x0f00ul
63#define EV_COMPONENT_MASK 0x00fful
64
65#define EV_CLASS_POWER 0x1000ul
66#define EV_CLASS_FAN 0x2000ul
67#define EV_CLASS_TEMP 0x3000ul
68#define EV_CLASS_ENV 0x4000ul
69#define EV_CLASS_TEST_FAULT 0x5000ul
70#define EV_CLASS_TEST_WARNING 0x6000ul
71#define EV_CLASS_PWRD_NOTIFY 0x8000ul
72
73#define EV_SEVERITY_POWER_STABLE 0x0000ul
74#define EV_SEVERITY_POWER_LOW_WARNING 0x0100ul
75#define EV_SEVERITY_POWER_HIGH_WARNING 0x0200ul
76#define EV_SEVERITY_POWER_HIGH_FAULT 0x0300ul
77#define EV_SEVERITY_POWER_LOW_FAULT 0x0400ul
78
79#define EV_SEVERITY_FAN_STABLE 0x0000ul
80#define EV_SEVERITY_FAN_WARNING 0x0100ul
81#define EV_SEVERITY_FAN_FAULT 0x0200ul
82
83#define EV_SEVERITY_TEMP_STABLE 0x0000ul
84#define EV_SEVERITY_TEMP_ADVISORY 0x0100ul
85#define EV_SEVERITY_TEMP_CRITICAL 0x0200ul
86#define EV_SEVERITY_TEMP_FAULT 0x0300ul
87
88void scdrv_event_init(struct sysctl_data_s *);
89
50#endif /* _SN_SYSCTL_H_ */ 90#endif /* _SN_SYSCTL_H_ */
diff --git a/drivers/char/snsc_event.c b/drivers/char/snsc_event.c
new file mode 100644
index 000000000000..d692af57213a
--- /dev/null
+++ b/drivers/char/snsc_event.c
@@ -0,0 +1,304 @@
1/*
2 * SN Platform system controller communication support
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved.
9 */
10
11/*
12 * System controller event handler
13 *
14 * These routines deal with environmental events arriving from the
15 * system controllers.
16 */
17
18#include <linux/interrupt.h>
19#include <linux/sched.h>
20#include <linux/byteorder/generic.h>
21#include <asm/sn/sn_sal.h>
22#include "snsc.h"
23
24static struct subch_data_s *event_sd;
25
26void scdrv_event(unsigned long);
27DECLARE_TASKLET(sn_sysctl_event, scdrv_event, 0);
28
29/*
30 * scdrv_event_interrupt
31 *
32 * Pull incoming environmental events off the physical link to the
33 * system controller and put them in a temporary holding area in SAL.
34 * Schedule scdrv_event() to move them along to their ultimate
35 * destination.
36 */
37static irqreturn_t
38scdrv_event_interrupt(int irq, void *subch_data, struct pt_regs *regs)
39{
40 struct subch_data_s *sd = subch_data;
41 unsigned long flags;
42 int status;
43
44 spin_lock_irqsave(&sd->sd_rlock, flags);
45 status = ia64_sn_irtr_intr(sd->sd_nasid, sd->sd_subch);
46
47 if ((status > 0) && (status & SAL_IROUTER_INTR_RECV)) {
48 tasklet_schedule(&sn_sysctl_event);
49 }
50 spin_unlock_irqrestore(&sd->sd_rlock, flags);
51 return IRQ_HANDLED;
52}
53
54
55/*
56 * scdrv_parse_event
57 *
58 * Break an event (as read from SAL) into useful pieces so we can decide
59 * what to do with it.
60 */
61static int
62scdrv_parse_event(char *event, int *src, int *code, int *esp_code, char *desc)
63{
64 char *desc_end;
65
66 /* record event source address */
67 *src = be32_to_cpup((__be32 *)event);
68 event += 4; /* move on to event code */
69
70 /* record the system controller's event code */
71 *code = be32_to_cpup((__be32 *)event);
72 event += 4; /* move on to event arguments */
73
74 /* how many arguments are in the packet? */
75 if (*event++ != 2) {
76 /* if not 2, give up */
77 return -1;
78 }
79
80 /* parse out the ESP code */
81 if (*event++ != IR_ARG_INT) {
82 /* not an integer argument, so give up */
83 return -1;
84 }
85 *esp_code = be32_to_cpup((__be32 *)event);
86 event += 4;
87
88 /* parse out the event description */
89 if (*event++ != IR_ARG_ASCII) {
90 /* not an ASCII string, so give up */
91 return -1;
92 }
93 event[CHUNKSIZE-1] = '\0'; /* ensure this string ends! */
94 event += 2; /* skip leading CR/LF */
95 desc_end = desc + sprintf(desc, "%s", event);
96
97 /* strip trailing CR/LF (if any) */
98 for (desc_end--;
99 (desc_end != desc) && ((*desc_end == 0xd) || (*desc_end == 0xa));
100 desc_end--) {
101 *desc_end = '\0';
102 }
103
104 return 0;
105}
106
107
108/*
109 * scdrv_event_severity
110 *
111 * Figure out how urgent a message we should write to the console/syslog
112 * via printk.
113 */
114static char *
115scdrv_event_severity(int code)
116{
117 int ev_class = (code & EV_CLASS_MASK);
118 int ev_severity = (code & EV_SEVERITY_MASK);
119 char *pk_severity = KERN_NOTICE;
120
121 switch (ev_class) {
122 case EV_CLASS_POWER:
123 switch (ev_severity) {
124 case EV_SEVERITY_POWER_LOW_WARNING:
125 case EV_SEVERITY_POWER_HIGH_WARNING:
126 pk_severity = KERN_WARNING;
127 break;
128 case EV_SEVERITY_POWER_HIGH_FAULT:
129 case EV_SEVERITY_POWER_LOW_FAULT:
130 pk_severity = KERN_ALERT;
131 break;
132 }
133 break;
134 case EV_CLASS_FAN:
135 switch (ev_severity) {
136 case EV_SEVERITY_FAN_WARNING:
137 pk_severity = KERN_WARNING;
138 break;
139 case EV_SEVERITY_FAN_FAULT:
140 pk_severity = KERN_CRIT;
141 break;
142 }
143 break;
144 case EV_CLASS_TEMP:
145 switch (ev_severity) {
146 case EV_SEVERITY_TEMP_ADVISORY:
147 pk_severity = KERN_WARNING;
148 break;
149 case EV_SEVERITY_TEMP_CRITICAL:
150 pk_severity = KERN_CRIT;
151 break;
152 case EV_SEVERITY_TEMP_FAULT:
153 pk_severity = KERN_ALERT;
154 break;
155 }
156 break;
157 case EV_CLASS_ENV:
158 pk_severity = KERN_ALERT;
159 break;
160 case EV_CLASS_TEST_FAULT:
161 pk_severity = KERN_ALERT;
162 break;
163 case EV_CLASS_TEST_WARNING:
164 pk_severity = KERN_WARNING;
165 break;
166 case EV_CLASS_PWRD_NOTIFY:
167 pk_severity = KERN_ALERT;
168 break;
169 }
170
171 return pk_severity;
172}
173
174
175/*
176 * scdrv_dispatch_event
177 *
178 * Do the right thing with an incoming event. That's often nothing
179 * more than printing it to the system log. For power-down notifications
180 * we start a graceful shutdown.
181 */
182static void
183scdrv_dispatch_event(char *event, int len)
184{
185 int code, esp_code, src;
186 char desc[CHUNKSIZE];
187 char *severity;
188
189 if (scdrv_parse_event(event, &src, &code, &esp_code, desc) < 0) {
190 /* ignore uninterpretible event */
191 return;
192 }
193
194 /* how urgent is the message? */
195 severity = scdrv_event_severity(code);
196
197 if ((code & EV_CLASS_MASK) == EV_CLASS_PWRD_NOTIFY) {
198 struct task_struct *p;
199
200 /* give a SIGPWR signal to init proc */
201
202 /* first find init's task */
203 read_lock(&tasklist_lock);
204 for_each_process(p) {
205 if (p->pid == 1)
206 break;
207 }
208 if (p) { /* we found init's task */
209 printk(KERN_EMERG "Power off indication received. Initiating power fail sequence...\n");
210 force_sig(SIGPWR, p);
211 } else { /* failed to find init's task - just give message(s) */
212 printk(KERN_WARNING "Failed to find init proc to handle power off!\n");
213 printk("%s|$(0x%x)%s\n", severity, esp_code, desc);
214 }
215 read_unlock(&tasklist_lock);
216 } else {
217 /* print to system log */
218 printk("%s|$(0x%x)%s\n", severity, esp_code, desc);
219 }
220}
221
222
223/*
224 * scdrv_event
225 *
226 * Called as a tasklet when an event arrives from the L1. Read the event
227 * from where it's temporarily stored in SAL and call scdrv_dispatch_event()
228 * to send it on its way. Keep trying to read events until SAL indicates
229 * that there are no more immediately available.
230 */
231void
232scdrv_event(unsigned long dummy)
233{
234 int status;
235 int len;
236 unsigned long flags;
237 struct subch_data_s *sd = event_sd;
238
239 /* anything to read? */
240 len = CHUNKSIZE;
241 spin_lock_irqsave(&sd->sd_rlock, flags);
242 status = ia64_sn_irtr_recv(sd->sd_nasid, sd->sd_subch,
243 sd->sd_rb, &len);
244
245 while (!(status < 0)) {
246 spin_unlock_irqrestore(&sd->sd_rlock, flags);
247 scdrv_dispatch_event(sd->sd_rb, len);
248 len = CHUNKSIZE;
249 spin_lock_irqsave(&sd->sd_rlock, flags);
250 status = ia64_sn_irtr_recv(sd->sd_nasid, sd->sd_subch,
251 sd->sd_rb, &len);
252 }
253 spin_unlock_irqrestore(&sd->sd_rlock, flags);
254}
255
256
257/*
258 * scdrv_event_init
259 *
260 * Sets up a system controller subchannel to begin receiving event
261 * messages. This is sort of a specialized version of scdrv_open()
262 * in drivers/char/sn_sysctl.c.
263 */
264void
265scdrv_event_init(struct sysctl_data_s *scd)
266{
267 int rv;
268
269 event_sd = kmalloc(sizeof (struct subch_data_s), GFP_KERNEL);
270 if (event_sd == NULL) {
271 printk(KERN_WARNING "%s: couldn't allocate subchannel info"
272 " for event monitoring\n", __FUNCTION__);
273 return;
274 }
275
276 /* initialize subch_data_s fields */
277 memset(event_sd, 0, sizeof (struct subch_data_s));
278 event_sd->sd_nasid = scd->scd_nasid;
279 spin_lock_init(&event_sd->sd_rlock);
280
281 /* ask the system controllers to send events to this node */
282 event_sd->sd_subch = ia64_sn_sysctl_event_init(scd->scd_nasid);
283
284 if (event_sd->sd_subch < 0) {
285 kfree(event_sd);
286 printk(KERN_WARNING "%s: couldn't open event subchannel\n",
287 __FUNCTION__);
288 return;
289 }
290
291 /* hook event subchannel up to the system controller interrupt */
292 rv = request_irq(SGI_UART_VECTOR, scdrv_event_interrupt,
293 SA_SHIRQ | SA_INTERRUPT,
294 "system controller events", event_sd);
295 if (rv) {
296 printk(KERN_WARNING "%s: irq request failed (%d)\n",
297 __FUNCTION__, rv);
298 ia64_sn_irtr_close(event_sd->sd_nasid, event_sd->sd_subch);
299 kfree(event_sd);
300 return;
301 }
302}
303
304
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 903d0ced7ddb..058c70c6f1ac 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -5830,7 +5830,7 @@ static int tg3_test_interrupt(struct tg3 *tp)
5830 free_irq(tp->pdev->irq, dev); 5830 free_irq(tp->pdev->irq, dev);
5831 5831
5832 err = request_irq(tp->pdev->irq, tg3_test_isr, 5832 err = request_irq(tp->pdev->irq, tg3_test_isr,
5833 SA_SHIRQ, dev->name, dev); 5833 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5834 if (err) 5834 if (err)
5835 return err; 5835 return err;
5836 5836
@@ -5852,10 +5852,10 @@ static int tg3_test_interrupt(struct tg3 *tp)
5852 5852
5853 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) 5853 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5854 err = request_irq(tp->pdev->irq, tg3_msi, 5854 err = request_irq(tp->pdev->irq, tg3_msi,
5855 0, dev->name, dev); 5855 SA_SAMPLE_RANDOM, dev->name, dev);
5856 else 5856 else
5857 err = request_irq(tp->pdev->irq, tg3_interrupt, 5857 err = request_irq(tp->pdev->irq, tg3_interrupt,
5858 SA_SHIRQ, dev->name, dev); 5858 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5859 5859
5860 if (err) 5860 if (err)
5861 return err; 5861 return err;
@@ -5908,7 +5908,7 @@ static int tg3_test_msi(struct tg3 *tp)
5908 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; 5908 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
5909 5909
5910 err = request_irq(tp->pdev->irq, tg3_interrupt, 5910 err = request_irq(tp->pdev->irq, tg3_interrupt,
5911 SA_SHIRQ, dev->name, dev); 5911 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5912 5912
5913 if (err) 5913 if (err)
5914 return err; 5914 return err;
@@ -5965,10 +5965,10 @@ static int tg3_open(struct net_device *dev)
5965 } 5965 }
5966 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) 5966 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5967 err = request_irq(tp->pdev->irq, tg3_msi, 5967 err = request_irq(tp->pdev->irq, tg3_msi,
5968 0, dev->name, dev); 5968 SA_SAMPLE_RANDOM, dev->name, dev);
5969 else 5969 else
5970 err = request_irq(tp->pdev->irq, tg3_interrupt, 5970 err = request_irq(tp->pdev->irq, tg3_interrupt,
5971 SA_SHIRQ, dev->name, dev); 5971 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5972 5972
5973 if (err) { 5973 if (err) {
5974 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 5974 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 6ae62cbf7c2e..ce9423bb2de3 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -945,7 +945,7 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
945 retval = arch_setup_additional_pages(bprm, executable_stack); 945 retval = arch_setup_additional_pages(bprm, executable_stack);
946 if (retval < 0) { 946 if (retval < 0) {
947 send_sig(SIGKILL, current, 0); 947 send_sig(SIGKILL, current, 0);
948 goto out_free_dentry; 948 goto out;
949 } 949 }
950#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */ 950#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
951 951
diff --git a/fs/compat.c b/fs/compat.c
index 67c0b94d1148..728cd8365384 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -809,7 +809,7 @@ static void *do_smb_super_data_conv(void *raw_data)
809 809
810struct compat_nfs_string { 810struct compat_nfs_string {
811 compat_uint_t len; 811 compat_uint_t len;
812 compat_uptr_t __user data; 812 compat_uptr_t data;
813}; 813};
814 814
815static inline void compat_nfs_string(struct nfs_string *dst, 815static inline void compat_nfs_string(struct nfs_string *dst,
@@ -834,10 +834,10 @@ struct compat_nfs4_mount_data_v1 {
834 struct compat_nfs_string mnt_path; 834 struct compat_nfs_string mnt_path;
835 struct compat_nfs_string hostname; 835 struct compat_nfs_string hostname;
836 compat_uint_t host_addrlen; 836 compat_uint_t host_addrlen;
837 compat_uptr_t __user host_addr; 837 compat_uptr_t host_addr;
838 compat_int_t proto; 838 compat_int_t proto;
839 compat_int_t auth_flavourlen; 839 compat_int_t auth_flavourlen;
840 compat_uptr_t __user auth_flavours; 840 compat_uptr_t auth_flavours;
841}; 841};
842 842
843static int do_nfs4_super_data_conv(void *raw_data) 843static int do_nfs4_super_data_conv(void *raw_data)
diff --git a/include/asm-ia64/hw_irq.h b/include/asm-ia64/hw_irq.h
index 041ab8c51a64..cd4e06b74ab6 100644
--- a/include/asm-ia64/hw_irq.h
+++ b/include/asm-ia64/hw_irq.h
@@ -81,6 +81,7 @@ extern __u8 isa_irq_to_vector_map[16];
81 81
82extern struct hw_interrupt_type irq_type_ia64_lsapic; /* CPU-internal interrupt controller */ 82extern struct hw_interrupt_type irq_type_ia64_lsapic; /* CPU-internal interrupt controller */
83 83
84extern int assign_irq_vector_nopanic (int irq); /* allocate a free vector without panic */
84extern int assign_irq_vector (int irq); /* allocate a free vector */ 85extern int assign_irq_vector (int irq); /* allocate a free vector */
85extern void free_irq_vector (int vector); 86extern void free_irq_vector (int vector);
86extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect); 87extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect);
diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h
index 5dd477ffb88e..2303a10ee595 100644
--- a/include/asm-ia64/pal.h
+++ b/include/asm-ia64/pal.h
@@ -67,6 +67,7 @@
67#define PAL_REGISTER_INFO 39 /* return AR and CR register information*/ 67#define PAL_REGISTER_INFO 39 /* return AR and CR register information*/
68#define PAL_SHUTDOWN 40 /* enter processor shutdown state */ 68#define PAL_SHUTDOWN 40 /* enter processor shutdown state */
69#define PAL_PREFETCH_VISIBILITY 41 /* Make Processor Prefetches Visible */ 69#define PAL_PREFETCH_VISIBILITY 41 /* Make Processor Prefetches Visible */
70#define PAL_LOGICAL_TO_PHYSICAL 42 /* returns information on logical to physical processor mapping */
70 71
71#define PAL_COPY_PAL 256 /* relocate PAL procedures and PAL PMI */ 72#define PAL_COPY_PAL 256 /* relocate PAL procedures and PAL PMI */
72#define PAL_HALT_INFO 257 /* return the low power capabilities of processor */ 73#define PAL_HALT_INFO 257 /* return the low power capabilities of processor */
@@ -1559,6 +1560,73 @@ ia64_pal_prefetch_visibility (s64 trans_type)
1559 return iprv.status; 1560 return iprv.status;
1560} 1561}
1561 1562
1563/* data structure for getting information on logical to physical mappings */
1564typedef union pal_log_overview_u {
1565 struct {
1566 u64 num_log :16, /* Total number of logical
1567 * processors on this die
1568 */
1569 tpc :8, /* Threads per core */
1570 reserved3 :8, /* Reserved */
1571 cpp :8, /* Cores per processor */
1572 reserved2 :8, /* Reserved */
1573 ppid :8, /* Physical processor ID */
1574 reserved1 :8; /* Reserved */
1575 } overview_bits;
1576 u64 overview_data;
1577} pal_log_overview_t;
1578
1579typedef union pal_proc_n_log_info1_u{
1580 struct {
1581 u64 tid :16, /* Thread id */
1582 reserved2 :16, /* Reserved */
1583 cid :16, /* Core id */
1584 reserved1 :16; /* Reserved */
1585 } ppli1_bits;
1586 u64 ppli1_data;
1587} pal_proc_n_log_info1_t;
1588
1589typedef union pal_proc_n_log_info2_u {
1590 struct {
1591 u64 la :16, /* Logical address */
1592 reserved :48; /* Reserved */
1593 } ppli2_bits;
1594 u64 ppli2_data;
1595} pal_proc_n_log_info2_t;
1596
1597typedef struct pal_logical_to_physical_s
1598{
1599 pal_log_overview_t overview;
1600 pal_proc_n_log_info1_t ppli1;
1601 pal_proc_n_log_info2_t ppli2;
1602} pal_logical_to_physical_t;
1603
1604#define overview_num_log overview.overview_bits.num_log
1605#define overview_tpc overview.overview_bits.tpc
1606#define overview_cpp overview.overview_bits.cpp
1607#define overview_ppid overview.overview_bits.ppid
1608#define log1_tid ppli1.ppli1_bits.tid
1609#define log1_cid ppli1.ppli1_bits.cid
1610#define log2_la ppli2.ppli2_bits.la
1611
1612/* Get information on logical to physical processor mappings. */
1613static inline s64
1614ia64_pal_logical_to_phys(u64 proc_number, pal_logical_to_physical_t *mapping)
1615{
1616 struct ia64_pal_retval iprv;
1617
1618 PAL_CALL(iprv, PAL_LOGICAL_TO_PHYSICAL, proc_number, 0, 0);
1619
1620 if (iprv.status == PAL_STATUS_SUCCESS)
1621 {
1622 if (proc_number == 0)
1623 mapping->overview.overview_data = iprv.v0;
1624 mapping->ppli1.ppli1_data = iprv.v1;
1625 mapping->ppli2.ppli2_data = iprv.v2;
1626 }
1627
1628 return iprv.status;
1629}
1562#endif /* __ASSEMBLY__ */ 1630#endif /* __ASSEMBLY__ */
1563 1631
1564#endif /* _ASM_IA64_PAL_H */ 1632#endif /* _ASM_IA64_PAL_H */
diff --git a/include/asm-ia64/perfmon.h b/include/asm-ia64/perfmon.h
index 136c60e6bfcc..ed5416c5b1ac 100644
--- a/include/asm-ia64/perfmon.h
+++ b/include/asm-ia64/perfmon.h
@@ -254,6 +254,18 @@ extern int pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int
254#define PFM_CPUINFO_DCR_PP 0x2 /* if set the system wide session has started */ 254#define PFM_CPUINFO_DCR_PP 0x2 /* if set the system wide session has started */
255#define PFM_CPUINFO_EXCL_IDLE 0x4 /* the system wide session excludes the idle task */ 255#define PFM_CPUINFO_EXCL_IDLE 0x4 /* the system wide session excludes the idle task */
256 256
257/*
258 * sysctl control structure. visible to sampling formats
259 */
260typedef struct {
261 int debug; /* turn on/off debugging via syslog */
262 int debug_ovfl; /* turn on/off debug printk in overflow handler */
263 int fastctxsw; /* turn on/off fast (unsecure) ctxsw */
264 int expert_mode; /* turn on/off value checking */
265} pfm_sysctl_t;
266extern pfm_sysctl_t pfm_sysctl;
267
268
257#endif /* __KERNEL__ */ 269#endif /* __KERNEL__ */
258 270
259#endif /* _ASM_IA64_PERFMON_H */ 271#endif /* _ASM_IA64_PERFMON_H */
diff --git a/include/asm-ia64/pgalloc.h b/include/asm-ia64/pgalloc.h
index 0f05dc8bd460..a5f214554afd 100644
--- a/include/asm-ia64/pgalloc.h
+++ b/include/asm-ia64/pgalloc.h
@@ -22,146 +22,124 @@
22 22
23#include <asm/mmu_context.h> 23#include <asm/mmu_context.h>
24 24
25/* 25DECLARE_PER_CPU(unsigned long *, __pgtable_quicklist);
26 * Very stupidly, we used to get new pgd's and pmd's, init their contents 26#define pgtable_quicklist __ia64_per_cpu_var(__pgtable_quicklist)
27 * to point to the NULL versions of the next level page table, later on 27DECLARE_PER_CPU(long, __pgtable_quicklist_size);
28 * completely re-init them the same way, then free them up. This wasted 28#define pgtable_quicklist_size __ia64_per_cpu_var(__pgtable_quicklist_size)
29 * a lot of work and caused unnecessary memory traffic. How broken...
30 * We fix this by caching them.
31 */
32#define pgd_quicklist (local_cpu_data->pgd_quick)
33#define pmd_quicklist (local_cpu_data->pmd_quick)
34#define pgtable_cache_size (local_cpu_data->pgtable_cache_sz)
35 29
36static inline pgd_t* 30static inline long pgtable_quicklist_total_size(void)
37pgd_alloc_one_fast (struct mm_struct *mm) 31{
32 long ql_size = 0;
33 int cpuid;
34
35 for_each_online_cpu(cpuid) {
36 ql_size += per_cpu(__pgtable_quicklist_size, cpuid);
37 }
38 return ql_size;
39}
40
41static inline void *pgtable_quicklist_alloc(void)
38{ 42{
39 unsigned long *ret = NULL; 43 unsigned long *ret = NULL;
40 44
41 preempt_disable(); 45 preempt_disable();
42 46
43 ret = pgd_quicklist; 47 ret = pgtable_quicklist;
44 if (likely(ret != NULL)) { 48 if (likely(ret != NULL)) {
45 pgd_quicklist = (unsigned long *)(*ret); 49 pgtable_quicklist = (unsigned long *)(*ret);
46 ret[0] = 0; 50 ret[0] = 0;
47 --pgtable_cache_size; 51 --pgtable_quicklist_size;
48 } else 52 preempt_enable();
49 ret = NULL; 53 } else {
50 54 preempt_enable();
51 preempt_enable(); 55 ret = (unsigned long *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
56 }
52 57
53 return (pgd_t *) ret; 58 return ret;
54} 59}
55 60
56static inline pgd_t* 61static inline void pgtable_quicklist_free(void *pgtable_entry)
57pgd_alloc (struct mm_struct *mm)
58{ 62{
59 /* the VM system never calls pgd_alloc_one_fast(), so we do it here. */ 63#ifdef CONFIG_NUMA
60 pgd_t *pgd = pgd_alloc_one_fast(mm); 64 unsigned long nid = page_to_nid(virt_to_page(pgtable_entry));
61 65
62 if (unlikely(pgd == NULL)) { 66 if (unlikely(nid != numa_node_id())) {
63 pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); 67 free_page((unsigned long)pgtable_entry);
68 return;
64 } 69 }
65 return pgd; 70#endif
66}
67 71
68static inline void
69pgd_free (pgd_t *pgd)
70{
71 preempt_disable(); 72 preempt_disable();
72 *(unsigned long *)pgd = (unsigned long) pgd_quicklist; 73 *(unsigned long *)pgtable_entry = (unsigned long)pgtable_quicklist;
73 pgd_quicklist = (unsigned long *) pgd; 74 pgtable_quicklist = (unsigned long *)pgtable_entry;
74 ++pgtable_cache_size; 75 ++pgtable_quicklist_size;
75 preempt_enable(); 76 preempt_enable();
76} 77}
77 78
78static inline void 79static inline pgd_t *pgd_alloc(struct mm_struct *mm)
79pud_populate (struct mm_struct *mm, pud_t *pud_entry, pmd_t *pmd)
80{ 80{
81 pud_val(*pud_entry) = __pa(pmd); 81 return pgtable_quicklist_alloc();
82} 82}
83 83
84static inline pmd_t* 84static inline void pgd_free(pgd_t * pgd)
85pmd_alloc_one_fast (struct mm_struct *mm, unsigned long addr)
86{ 85{
87 unsigned long *ret = NULL; 86 pgtable_quicklist_free(pgd);
88
89 preempt_disable();
90
91 ret = (unsigned long *)pmd_quicklist;
92 if (likely(ret != NULL)) {
93 pmd_quicklist = (unsigned long *)(*ret);
94 ret[0] = 0;
95 --pgtable_cache_size;
96 }
97
98 preempt_enable();
99
100 return (pmd_t *)ret;
101} 87}
102 88
103static inline pmd_t* 89static inline void
104pmd_alloc_one (struct mm_struct *mm, unsigned long addr) 90pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
105{ 91{
106 pmd_t *pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 92 pud_val(*pud_entry) = __pa(pmd);
93}
107 94
108 return pmd; 95static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
96{
97 return pgtable_quicklist_alloc();
109} 98}
110 99
111static inline void 100static inline void pmd_free(pmd_t * pmd)
112pmd_free (pmd_t *pmd)
113{ 101{
114 preempt_disable(); 102 pgtable_quicklist_free(pmd);
115 *(unsigned long *)pmd = (unsigned long) pmd_quicklist;
116 pmd_quicklist = (unsigned long *) pmd;
117 ++pgtable_cache_size;
118 preempt_enable();
119} 103}
120 104
121#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd) 105#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
122 106
123static inline void 107static inline void
124pmd_populate (struct mm_struct *mm, pmd_t *pmd_entry, struct page *pte) 108pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte)
125{ 109{
126 pmd_val(*pmd_entry) = page_to_phys(pte); 110 pmd_val(*pmd_entry) = page_to_phys(pte);
127} 111}
128 112
129static inline void 113static inline void
130pmd_populate_kernel (struct mm_struct *mm, pmd_t *pmd_entry, pte_t *pte) 114pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
131{ 115{
132 pmd_val(*pmd_entry) = __pa(pte); 116 pmd_val(*pmd_entry) = __pa(pte);
133} 117}
134 118
135static inline struct page * 119static inline struct page *pte_alloc_one(struct mm_struct *mm,
136pte_alloc_one (struct mm_struct *mm, unsigned long addr) 120 unsigned long addr)
137{ 121{
138 struct page *pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); 122 return virt_to_page(pgtable_quicklist_alloc());
139
140 return pte;
141} 123}
142 124
143static inline pte_t * 125static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
144pte_alloc_one_kernel (struct mm_struct *mm, unsigned long addr) 126 unsigned long addr)
145{ 127{
146 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 128 return pgtable_quicklist_alloc();
147
148 return pte;
149} 129}
150 130
151static inline void 131static inline void pte_free(struct page *pte)
152pte_free (struct page *pte)
153{ 132{
154 __free_page(pte); 133 pgtable_quicklist_free(page_address(pte));
155} 134}
156 135
157static inline void 136static inline void pte_free_kernel(pte_t * pte)
158pte_free_kernel (pte_t *pte)
159{ 137{
160 free_page((unsigned long) pte); 138 pgtable_quicklist_free(pte);
161} 139}
162 140
163#define __pte_free_tlb(tlb, pte) tlb_remove_page((tlb), (pte)) 141#define __pte_free_tlb(tlb, pte) pte_free(pte)
164 142
165extern void check_pgt_cache (void); 143extern void check_pgt_cache(void);
166 144
167#endif /* _ASM_IA64_PGALLOC_H */ 145#endif /* _ASM_IA64_PGALLOC_H */
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h
index 2807f8d766d4..9e1ba8b7fb68 100644
--- a/include/asm-ia64/processor.h
+++ b/include/asm-ia64/processor.h
@@ -137,9 +137,6 @@ struct cpuinfo_ia64 {
137 __u64 nsec_per_cyc; /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */ 137 __u64 nsec_per_cyc; /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */
138 __u64 unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */ 138 __u64 unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */
139 __u64 unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */ 139 __u64 unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */
140 __u64 *pgd_quick;
141 __u64 *pmd_quick;
142 __u64 pgtable_cache_sz;
143 __u64 itc_freq; /* frequency of ITC counter */ 140 __u64 itc_freq; /* frequency of ITC counter */
144 __u64 proc_freq; /* frequency of processor */ 141 __u64 proc_freq; /* frequency of processor */
145 __u64 cyc_per_usec; /* itc_freq/1000000 */ 142 __u64 cyc_per_usec; /* itc_freq/1000000 */
@@ -151,6 +148,13 @@ struct cpuinfo_ia64 {
151#ifdef CONFIG_SMP 148#ifdef CONFIG_SMP
152 __u64 loops_per_jiffy; 149 __u64 loops_per_jiffy;
153 int cpu; 150 int cpu;
151 __u32 socket_id; /* physical processor socket id */
152 __u16 core_id; /* core id */
153 __u16 thread_id; /* thread id */
154 __u16 num_log; /* Total number of logical processors on
155 * this socket that were successfully booted */
156 __u8 cores_per_socket; /* Cores per processor socket */
157 __u8 threads_per_core; /* Threads per core */
154#endif 158#endif
155 159
156 /* CPUID-derived information: */ 160 /* CPUID-derived information: */
diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h
index 240676f75390..29df88bdd2bc 100644
--- a/include/asm-ia64/sal.h
+++ b/include/asm-ia64/sal.h
@@ -91,6 +91,7 @@ extern spinlock_t sal_lock;
91#define SAL_PCI_CONFIG_READ 0x01000010 91#define SAL_PCI_CONFIG_READ 0x01000010
92#define SAL_PCI_CONFIG_WRITE 0x01000011 92#define SAL_PCI_CONFIG_WRITE 0x01000011
93#define SAL_FREQ_BASE 0x01000012 93#define SAL_FREQ_BASE 0x01000012
94#define SAL_PHYSICAL_ID_INFO 0x01000013
94 95
95#define SAL_UPDATE_PAL 0x01000020 96#define SAL_UPDATE_PAL 0x01000020
96 97
@@ -815,6 +816,17 @@ ia64_sal_update_pal (u64 param_buf, u64 scratch_buf, u64 scratch_buf_size,
815 return isrv.status; 816 return isrv.status;
816} 817}
817 818
819/* Get physical processor die mapping in the platform. */
820static inline s64
821ia64_sal_physical_id_info(u16 *splid)
822{
823 struct ia64_sal_retval isrv;
824 SAL_CALL(isrv, SAL_PHYSICAL_ID_INFO, 0, 0, 0, 0, 0, 0, 0);
825 if (splid)
826 *splid = isrv.v0;
827 return isrv.status;
828}
829
818extern unsigned long sal_platform_features; 830extern unsigned long sal_platform_features;
819 831
820extern int (*salinfo_platform_oemdata)(const u8 *, u8 **, u64 *); 832extern int (*salinfo_platform_oemdata)(const u8 *, u8 **, u64 *);
diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h
index c4a227acfeb0..3ba1a061e4ae 100644
--- a/include/asm-ia64/smp.h
+++ b/include/asm-ia64/smp.h
@@ -56,6 +56,10 @@ extern struct smp_boot_data {
56extern char no_int_routing __devinitdata; 56extern char no_int_routing __devinitdata;
57 57
58extern cpumask_t cpu_online_map; 58extern cpumask_t cpu_online_map;
59extern cpumask_t cpu_core_map[NR_CPUS];
60extern cpumask_t cpu_sibling_map[NR_CPUS];
61extern int smp_num_siblings;
62extern int smp_num_cpucores;
59extern void __iomem *ipi_base_addr; 63extern void __iomem *ipi_base_addr;
60extern unsigned char smp_int_redirect; 64extern unsigned char smp_int_redirect;
61 65
@@ -124,6 +128,7 @@ extern int smp_call_function_single (int cpuid, void (*func) (void *info), void
124extern void smp_send_reschedule (int cpu); 128extern void smp_send_reschedule (int cpu);
125extern void lock_ipi_calllock(void); 129extern void lock_ipi_calllock(void);
126extern void unlock_ipi_calllock(void); 130extern void unlock_ipi_calllock(void);
131extern void identify_siblings (struct cpuinfo_ia64 *);
127 132
128#else 133#else
129 134
diff --git a/include/asm-ia64/sn/addrs.h b/include/asm-ia64/sn/addrs.h
index c916bd22767a..960d626ee589 100644
--- a/include/asm-ia64/sn/addrs.h
+++ b/include/asm-ia64/sn/addrs.h
@@ -154,8 +154,9 @@
154 * the chiplet id is zero. If we implement TIO-TIO dma, we might need 154 * the chiplet id is zero. If we implement TIO-TIO dma, we might need
155 * to insert a chiplet id into this macro. However, it is our belief 155 * to insert a chiplet id into this macro. However, it is our belief
156 * right now that this chiplet id will be ICE, which is also zero. 156 * right now that this chiplet id will be ICE, which is also zero.
157 * Nasid starts on bit 40.
157 */ 158 */
158#define PHYS_TO_TIODMA(x) ( (((u64)(x) & NASID_MASK) << 2) | NODE_OFFSET(x)) 159#define PHYS_TO_TIODMA(x) ( (((u64)(NASID_GET(x))) << 40) | NODE_OFFSET(x))
159#define PHYS_TO_DMA(x) ( (((u64)(x) & NASID_MASK) >> 2) | NODE_OFFSET(x)) 160#define PHYS_TO_DMA(x) ( (((u64)(x) & NASID_MASK) >> 2) | NODE_OFFSET(x))
160 161
161 162
@@ -168,7 +169,10 @@
168#define TIO_BWIN_SIZE_BITS 30 /* big window size: 1G */ 169#define TIO_BWIN_SIZE_BITS 30 /* big window size: 1G */
169#define NODE_SWIN_BASE(n, w) ((w == 0) ? NODE_BWIN_BASE((n), SWIN0_BIGWIN) \ 170#define NODE_SWIN_BASE(n, w) ((w == 0) ? NODE_BWIN_BASE((n), SWIN0_BIGWIN) \
170 : RAW_NODE_SWIN_BASE(n, w)) 171 : RAW_NODE_SWIN_BASE(n, w))
172#define TIO_SWIN_BASE(n, w) (TIO_IO_BASE(n) + \
173 ((u64) (w) << TIO_SWIN_SIZE_BITS))
171#define NODE_IO_BASE(n) (GLOBAL_MMR_SPACE | NASID_SPACE(n)) 174#define NODE_IO_BASE(n) (GLOBAL_MMR_SPACE | NASID_SPACE(n))
175#define TIO_IO_BASE(n) (UNCACHED | NASID_SPACE(n))
172#define BWIN_SIZE (1UL << BWIN_SIZE_BITS) 176#define BWIN_SIZE (1UL << BWIN_SIZE_BITS)
173#define NODE_BWIN_BASE0(n) (NODE_IO_BASE(n) + BWIN_SIZE) 177#define NODE_BWIN_BASE0(n) (NODE_IO_BASE(n) + BWIN_SIZE)
174#define NODE_BWIN_BASE(n, w) (NODE_BWIN_BASE0(n) + ((u64) (w) << BWIN_SIZE_BITS)) 178#define NODE_BWIN_BASE(n, w) (NODE_BWIN_BASE0(n) + ((u64) (w) << BWIN_SIZE_BITS))
diff --git a/include/asm-ia64/sn/bte.h b/include/asm-ia64/sn/bte.h
index 0ec27f99c181..f50da3d91d07 100644
--- a/include/asm-ia64/sn/bte.h
+++ b/include/asm-ia64/sn/bte.h
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved. 6 * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
7 */ 7 */
8 8
9 9
@@ -13,8 +13,12 @@
13#include <linux/timer.h> 13#include <linux/timer.h>
14#include <linux/spinlock.h> 14#include <linux/spinlock.h>
15#include <linux/cache.h> 15#include <linux/cache.h>
16#include <asm/sn/pda.h>
16#include <asm/sn/types.h> 17#include <asm/sn/types.h>
18#include <asm/sn/shub_mmr.h>
17 19
20#define IBCT_NOTIFY (0x1UL << 4)
21#define IBCT_ZFIL_MODE (0x1UL << 0)
18 22
19/* #define BTE_DEBUG */ 23/* #define BTE_DEBUG */
20/* #define BTE_DEBUG_VERBOSE */ 24/* #define BTE_DEBUG_VERBOSE */
@@ -39,8 +43,36 @@
39 43
40 44
41/* Define hardware */ 45/* Define hardware */
42#define BTES_PER_NODE 2 46#define BTES_PER_NODE (is_shub2() ? 4 : 2)
47#define MAX_BTES_PER_NODE 4
43 48
49#define BTE2OFF_CTRL (0)
50#define BTE2OFF_SRC (SH2_BT_ENG_SRC_ADDR_0 - SH2_BT_ENG_CSR_0)
51#define BTE2OFF_DEST (SH2_BT_ENG_DEST_ADDR_0 - SH2_BT_ENG_CSR_0)
52#define BTE2OFF_NOTIFY (SH2_BT_ENG_NOTIF_ADDR_0 - SH2_BT_ENG_CSR_0)
53
54#define BTE_BASE_ADDR(interface) \
55 (is_shub2() ? (interface == 0) ? SH2_BT_ENG_CSR_0 : \
56 (interface == 1) ? SH2_BT_ENG_CSR_1 : \
57 (interface == 2) ? SH2_BT_ENG_CSR_2 : \
58 SH2_BT_ENG_CSR_3 \
59 : (interface == 0) ? IIO_IBLS0 : IIO_IBLS1)
60
61#define BTE_SOURCE_ADDR(base) \
62 (is_shub2() ? base + (BTE2OFF_SRC/8) \
63 : base + (BTEOFF_SRC/8))
64
65#define BTE_DEST_ADDR(base) \
66 (is_shub2() ? base + (BTE2OFF_DEST/8) \
67 : base + (BTEOFF_DEST/8))
68
69#define BTE_CTRL_ADDR(base) \
70 (is_shub2() ? base + (BTE2OFF_CTRL/8) \
71 : base + (BTEOFF_CTRL/8))
72
73#define BTE_NOTIF_ADDR(base) \
74 (is_shub2() ? base + (BTE2OFF_NOTIFY/8) \
75 : base + (BTEOFF_NOTIFY/8))
44 76
45/* Define hardware modes */ 77/* Define hardware modes */
46#define BTE_NOTIFY (IBCT_NOTIFY) 78#define BTE_NOTIFY (IBCT_NOTIFY)
@@ -68,14 +100,18 @@
68#define BTE_LNSTAT_STORE(_bte, _x) \ 100#define BTE_LNSTAT_STORE(_bte, _x) \
69 HUB_S(_bte->bte_base_addr, (_x)) 101 HUB_S(_bte->bte_base_addr, (_x))
70#define BTE_SRC_STORE(_bte, _x) \ 102#define BTE_SRC_STORE(_bte, _x) \
71 HUB_S(_bte->bte_base_addr + (BTEOFF_SRC/8), (_x)) 103 HUB_S(_bte->bte_source_addr, (_x))
72#define BTE_DEST_STORE(_bte, _x) \ 104#define BTE_DEST_STORE(_bte, _x) \
73 HUB_S(_bte->bte_base_addr + (BTEOFF_DEST/8), (_x)) 105 HUB_S(_bte->bte_destination_addr, (_x))
74#define BTE_CTRL_STORE(_bte, _x) \ 106#define BTE_CTRL_STORE(_bte, _x) \
75 HUB_S(_bte->bte_base_addr + (BTEOFF_CTRL/8), (_x)) 107 HUB_S(_bte->bte_control_addr, (_x))
76#define BTE_NOTIF_STORE(_bte, _x) \ 108#define BTE_NOTIF_STORE(_bte, _x) \
77 HUB_S(_bte->bte_base_addr + (BTEOFF_NOTIFY/8), (_x)) 109 HUB_S(_bte->bte_notify_addr, (_x))
78 110
111#define BTE_START_TRANSFER(_bte, _len, _mode) \
112 is_shub2() ? BTE_CTRL_STORE(_bte, IBLS_BUSY | (_mode << 24) | _len) \
113 : BTE_LNSTAT_STORE(_bte, _len); \
114 BTE_CTRL_STORE(_bte, _mode)
79 115
80/* Possible results from bte_copy and bte_unaligned_copy */ 116/* Possible results from bte_copy and bte_unaligned_copy */
81/* The following error codes map into the BTE hardware codes 117/* The following error codes map into the BTE hardware codes
@@ -110,6 +146,10 @@ typedef enum {
110struct bteinfo_s { 146struct bteinfo_s {
111 volatile u64 notify ____cacheline_aligned; 147 volatile u64 notify ____cacheline_aligned;
112 u64 *bte_base_addr ____cacheline_aligned; 148 u64 *bte_base_addr ____cacheline_aligned;
149 u64 *bte_source_addr;
150 u64 *bte_destination_addr;
151 u64 *bte_control_addr;
152 u64 *bte_notify_addr;
113 spinlock_t spinlock; 153 spinlock_t spinlock;
114 cnodeid_t bte_cnode; /* cnode */ 154 cnodeid_t bte_cnode; /* cnode */
115 int bte_error_count; /* Number of errors encountered */ 155 int bte_error_count; /* Number of errors encountered */
@@ -117,6 +157,7 @@ struct bteinfo_s {
117 int cleanup_active; /* Interface is locked for cleanup */ 157 int cleanup_active; /* Interface is locked for cleanup */
118 volatile bte_result_t bh_error; /* error while processing */ 158 volatile bte_result_t bh_error; /* error while processing */
119 volatile u64 *most_rcnt_na; 159 volatile u64 *most_rcnt_na;
160 struct bteinfo_s *btes_to_try[MAX_BTES_PER_NODE];
120}; 161};
121 162
122 163
diff --git a/include/asm-ia64/sn/geo.h b/include/asm-ia64/sn/geo.h
index f566343d25f8..84b254603b8d 100644
--- a/include/asm-ia64/sn/geo.h
+++ b/include/asm-ia64/sn/geo.h
@@ -18,32 +18,34 @@
18#define GEOID_SIZE 8 /* Would 16 be better? The size can 18#define GEOID_SIZE 8 /* Would 16 be better? The size can
19 be different on different platforms. */ 19 be different on different platforms. */
20 20
21#define MAX_SLABS 0xe /* slabs per module */ 21#define MAX_SLOTS 0xf /* slots per module */
22#define MAX_SLABS 0xf /* slabs per slot */
22 23
23typedef unsigned char geo_type_t; 24typedef unsigned char geo_type_t;
24 25
25/* Fields common to all substructures */ 26/* Fields common to all substructures */
26typedef struct geo_any_s { 27typedef struct geo_common_s {
27 moduleid_t module; /* The module (box) this h/w lives in */ 28 moduleid_t module; /* The module (box) this h/w lives in */
28 geo_type_t type; /* What type of h/w is named by this geoid_t */ 29 geo_type_t type; /* What type of h/w is named by this geoid_t */
29 slabid_t slab; /* The logical assembly within the module */ 30 slabid_t slab:4; /* slab (ASIC), 0 .. 15 within slot */
30} geo_any_t; 31 slotid_t slot:4; /* slot (Blade), 0 .. 15 within module */
32} geo_common_t;
31 33
32/* Additional fields for particular types of hardware */ 34/* Additional fields for particular types of hardware */
33typedef struct geo_node_s { 35typedef struct geo_node_s {
34 geo_any_t any; /* No additional fields needed */ 36 geo_common_t common; /* No additional fields needed */
35} geo_node_t; 37} geo_node_t;
36 38
37typedef struct geo_rtr_s { 39typedef struct geo_rtr_s {
38 geo_any_t any; /* No additional fields needed */ 40 geo_common_t common; /* No additional fields needed */
39} geo_rtr_t; 41} geo_rtr_t;
40 42
41typedef struct geo_iocntl_s { 43typedef struct geo_iocntl_s {
42 geo_any_t any; /* No additional fields needed */ 44 geo_common_t common; /* No additional fields needed */
43} geo_iocntl_t; 45} geo_iocntl_t;
44 46
45typedef struct geo_pcicard_s { 47typedef struct geo_pcicard_s {
46 geo_iocntl_t any; 48 geo_iocntl_t common;
47 char bus; /* Bus/widget number */ 49 char bus; /* Bus/widget number */
48 char slot; /* PCI slot number */ 50 char slot; /* PCI slot number */
49} geo_pcicard_t; 51} geo_pcicard_t;
@@ -62,14 +64,14 @@ typedef struct geo_mem_s {
62 64
63 65
64typedef union geoid_u { 66typedef union geoid_u {
65 geo_any_t any; 67 geo_common_t common;
66 geo_node_t node; 68 geo_node_t node;
67 geo_iocntl_t iocntl; 69 geo_iocntl_t iocntl;
68 geo_pcicard_t pcicard; 70 geo_pcicard_t pcicard;
69 geo_rtr_t rtr; 71 geo_rtr_t rtr;
70 geo_cpu_t cpu; 72 geo_cpu_t cpu;
71 geo_mem_t mem; 73 geo_mem_t mem;
72 char padsize[GEOID_SIZE]; 74 char padsize[GEOID_SIZE];
73} geoid_t; 75} geoid_t;
74 76
75 77
@@ -104,19 +106,26 @@ typedef union geoid_u {
104#define INVALID_CNODEID ((cnodeid_t)-1) 106#define INVALID_CNODEID ((cnodeid_t)-1)
105#define INVALID_PNODEID ((pnodeid_t)-1) 107#define INVALID_PNODEID ((pnodeid_t)-1)
106#define INVALID_SLAB (slabid_t)-1 108#define INVALID_SLAB (slabid_t)-1
109#define INVALID_SLOT (slotid_t)-1
107#define INVALID_MODULE ((moduleid_t)-1) 110#define INVALID_MODULE ((moduleid_t)-1)
108#define INVALID_PARTID ((partid_t)-1) 111#define INVALID_PARTID ((partid_t)-1)
109 112
110static inline slabid_t geo_slab(geoid_t g) 113static inline slabid_t geo_slab(geoid_t g)
111{ 114{
112 return (g.any.type == GEO_TYPE_INVALID) ? 115 return (g.common.type == GEO_TYPE_INVALID) ?
113 INVALID_SLAB : g.any.slab; 116 INVALID_SLAB : g.common.slab;
117}
118
119static inline slotid_t geo_slot(geoid_t g)
120{
121 return (g.common.type == GEO_TYPE_INVALID) ?
122 INVALID_SLOT : g.common.slot;
114} 123}
115 124
116static inline moduleid_t geo_module(geoid_t g) 125static inline moduleid_t geo_module(geoid_t g)
117{ 126{
118 return (g.any.type == GEO_TYPE_INVALID) ? 127 return (g.common.type == GEO_TYPE_INVALID) ?
119 INVALID_MODULE : g.any.module; 128 INVALID_MODULE : g.common.module;
120} 129}
121 130
122extern geoid_t cnodeid_get_geoid(cnodeid_t cnode); 131extern geoid_t cnodeid_get_geoid(cnodeid_t cnode);
diff --git a/include/asm-ia64/sn/nodepda.h b/include/asm-ia64/sn/nodepda.h
index 2fbde33656e6..13cc1002b294 100644
--- a/include/asm-ia64/sn/nodepda.h
+++ b/include/asm-ia64/sn/nodepda.h
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8#ifndef _ASM_IA64_SN_NODEPDA_H 8#ifndef _ASM_IA64_SN_NODEPDA_H
9#define _ASM_IA64_SN_NODEPDA_H 9#define _ASM_IA64_SN_NODEPDA_H
@@ -43,7 +43,7 @@ struct nodepda_s {
43 /* 43 /*
44 * The BTEs on this node are shared by the local cpus 44 * The BTEs on this node are shared by the local cpus
45 */ 45 */
46 struct bteinfo_s bte_if[BTES_PER_NODE]; /* Virtual Interface */ 46 struct bteinfo_s bte_if[MAX_BTES_PER_NODE]; /* Virtual Interface */
47 struct timer_list bte_recovery_timer; 47 struct timer_list bte_recovery_timer;
48 spinlock_t bte_recovery_lock; 48 spinlock_t bte_recovery_lock;
49 49
diff --git a/arch/ia64/sn/include/pci/pcibus_provider_defs.h b/include/asm-ia64/sn/pcibus_provider_defs.h
index 07065615bbea..04e27d5b3820 100644
--- a/arch/ia64/sn/include/pci/pcibus_provider_defs.h
+++ b/include/asm-ia64/sn/pcibus_provider_defs.h
@@ -17,6 +17,9 @@
17#define PCIIO_ASIC_TYPE_PPB 1 17#define PCIIO_ASIC_TYPE_PPB 1
18#define PCIIO_ASIC_TYPE_PIC 2 18#define PCIIO_ASIC_TYPE_PIC 2
19#define PCIIO_ASIC_TYPE_TIOCP 3 19#define PCIIO_ASIC_TYPE_TIOCP 3
20#define PCIIO_ASIC_TYPE_TIOCA 4
21
22#define PCIIO_ASIC_MAX_TYPES 5
20 23
21/* 24/*
22 * Common pciio bus provider data. There should be one of these as the 25 * Common pciio bus provider data. There should be one of these as the
@@ -35,9 +38,15 @@ struct pcibus_bussoft {
35}; 38};
36 39
37/* 40/*
38 * DMA mapping flags 41 * SN pci bus indirection
39 */ 42 */
40 43
41#define SN_PCIDMA_CONSISTENT 0x0001 44struct sn_pcibus_provider {
45 dma_addr_t (*dma_map)(struct pci_dev *, unsigned long, size_t);
46 dma_addr_t (*dma_map_consistent)(struct pci_dev *, unsigned long, size_t);
47 void (*dma_unmap)(struct pci_dev *, dma_addr_t, int);
48 void * (*bus_fixup)(struct pcibus_bussoft *);
49};
42 50
51extern struct sn_pcibus_provider *sn_pci_provider[];
43#endif /* _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H */ 52#endif /* _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H */
diff --git a/arch/ia64/sn/include/pci/pcidev.h b/include/asm-ia64/sn/pcidev.h
index 81eb95d3bf47..ed4031d80811 100644
--- a/arch/ia64/sn/include/pci/pcidev.h
+++ b/include/asm-ia64/sn/pcidev.h
@@ -32,6 +32,9 @@ extern struct sn_irq_info **sn_irq;
32#define SN_PCIDEV_BUSSOFT(pci_dev) \ 32#define SN_PCIDEV_BUSSOFT(pci_dev) \
33 (SN_PCIDEV_INFO(pci_dev)->pdi_host_pcidev_info->pdi_pcibus_info) 33 (SN_PCIDEV_INFO(pci_dev)->pdi_host_pcidev_info->pdi_pcibus_info)
34 34
35#define SN_PCIDEV_BUSPROVIDER(pci_dev) \
36 (SN_PCIDEV_INFO(pci_dev)->pdi_provider)
37
35#define PCIIO_BUS_NONE 255 /* bus 255 reserved */ 38#define PCIIO_BUS_NONE 255 /* bus 255 reserved */
36#define PCIIO_SLOT_NONE 255 39#define PCIIO_SLOT_NONE 255
37#define PCIIO_FUNC_NONE 255 40#define PCIIO_FUNC_NONE 255
@@ -46,6 +49,7 @@ struct pcidev_info {
46 struct pci_dev *pdi_linux_pcidev; /* Kernel pci_dev */ 49 struct pci_dev *pdi_linux_pcidev; /* Kernel pci_dev */
47 50
48 struct sn_irq_info *pdi_sn_irq_info; 51 struct sn_irq_info *pdi_sn_irq_info;
52 struct sn_pcibus_provider *pdi_provider; /* sn pci ops */
49}; 53};
50 54
51extern void sn_irq_fixup(struct pci_dev *pci_dev, 55extern void sn_irq_fixup(struct pci_dev *pci_dev,
diff --git a/include/asm-ia64/sn/pda.h b/include/asm-ia64/sn/pda.h
index e940d3647c80..cd19f17bf91a 100644
--- a/include/asm-ia64/sn/pda.h
+++ b/include/asm-ia64/sn/pda.h
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8#ifndef _ASM_IA64_SN_PDA_H 8#ifndef _ASM_IA64_SN_PDA_H
9#define _ASM_IA64_SN_PDA_H 9#define _ASM_IA64_SN_PDA_H
@@ -11,7 +11,6 @@
11#include <linux/cache.h> 11#include <linux/cache.h>
12#include <asm/percpu.h> 12#include <asm/percpu.h>
13#include <asm/system.h> 13#include <asm/system.h>
14#include <asm/sn/bte.h>
15 14
16 15
17/* 16/*
diff --git a/include/asm-ia64/sn/shub_mmr.h b/include/asm-ia64/sn/shub_mmr.h
index 5c2fcf13d5ce..2f885088e095 100644
--- a/include/asm-ia64/sn/shub_mmr.h
+++ b/include/asm-ia64/sn/shub_mmr.h
@@ -4,7 +4,7 @@
4 * License. See the file "COPYING" in the main directory of this archive 4 * License. See the file "COPYING" in the main directory of this archive
5 * for more details. 5 * for more details.
6 * 6 *
7 * Copyright (c) 2001-2004 Silicon Graphics, Inc. All rights reserved. 7 * Copyright (c) 2001-2005 Silicon Graphics, Inc. All rights reserved.
8 */ 8 */
9 9
10#ifndef _ASM_IA64_SN_SHUB_MMR_H 10#ifndef _ASM_IA64_SN_SHUB_MMR_H
@@ -129,6 +129,23 @@
129#define SH_EVENT_OCCURRED_II_INT1_SHFT 30 129#define SH_EVENT_OCCURRED_II_INT1_SHFT 30
130#define SH_EVENT_OCCURRED_II_INT1_MASK 0x0000000040000000 130#define SH_EVENT_OCCURRED_II_INT1_MASK 0x0000000040000000
131 131
132/* SH2_EVENT_OCCURRED_EXTIO_INT2 */
133/* Description: Pending SHUB 2 EXT IO INT2 */
134#define SH2_EVENT_OCCURRED_EXTIO_INT2_SHFT 33
135#define SH2_EVENT_OCCURRED_EXTIO_INT2_MASK 0x0000000200000000
136
137/* SH2_EVENT_OCCURRED_EXTIO_INT3 */
138/* Description: Pending SHUB 2 EXT IO INT3 */
139#define SH2_EVENT_OCCURRED_EXTIO_INT3_SHFT 34
140#define SH2_EVENT_OCCURRED_EXTIO_INT3_MASK 0x0000000400000000
141
142#define SH_ALL_INT_MASK \
143 (SH_EVENT_OCCURRED_UART_INT_MASK | SH_EVENT_OCCURRED_IPI_INT_MASK | \
144 SH_EVENT_OCCURRED_II_INT0_MASK | SH_EVENT_OCCURRED_II_INT1_MASK | \
145 SH_EVENT_OCCURRED_II_INT1_MASK | SH2_EVENT_OCCURRED_EXTIO_INT2_MASK | \
146 SH2_EVENT_OCCURRED_EXTIO_INT3_MASK)
147
148
132/* ==================================================================== */ 149/* ==================================================================== */
133/* LEDS */ 150/* LEDS */
134/* ==================================================================== */ 151/* ==================================================================== */
@@ -438,4 +455,22 @@
438#define SH_INT_CMPC shubmmr(SH, INT_CMPC) 455#define SH_INT_CMPC shubmmr(SH, INT_CMPC)
439#define SH_INT_CMPD shubmmr(SH, INT_CMPD) 456#define SH_INT_CMPD shubmmr(SH, INT_CMPD)
440 457
458/* ========================================================================== */
459/* Register "SH2_BT_ENG_CSR_0" */
460/* Engine 0 Control and Status Register */
461/* ========================================================================== */
462
463#define SH2_BT_ENG_CSR_0 0x0000000030040000
464#define SH2_BT_ENG_SRC_ADDR_0 0x0000000030040080
465#define SH2_BT_ENG_DEST_ADDR_0 0x0000000030040100
466#define SH2_BT_ENG_NOTIF_ADDR_0 0x0000000030040180
467
468/* ========================================================================== */
469/* BTE interfaces 1-3 */
470/* ========================================================================== */
471
472#define SH2_BT_ENG_CSR_1 0x0000000030050000
473#define SH2_BT_ENG_CSR_2 0x0000000030060000
474#define SH2_BT_ENG_CSR_3 0x0000000030070000
475
441#endif /* _ASM_IA64_SN_SHUB_MMR_H */ 476#endif /* _ASM_IA64_SN_SHUB_MMR_H */
diff --git a/include/asm-ia64/sn/sn_sal.h b/include/asm-ia64/sn/sn_sal.h
index 88c31b53dc09..f914f6da077c 100644
--- a/include/asm-ia64/sn/sn_sal.h
+++ b/include/asm-ia64/sn/sn_sal.h
@@ -8,7 +8,7 @@
8 * License. See the file "COPYING" in the main directory of this archive 8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details. 9 * for more details.
10 * 10 *
11 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All rights reserved. 11 * Copyright (c) 2000-2005 Silicon Graphics, Inc. All rights reserved.
12 */ 12 */
13 13
14 14
@@ -35,8 +35,8 @@
35#define SN_SAL_PRINT_ERROR 0x02000012 35#define SN_SAL_PRINT_ERROR 0x02000012
36#define SN_SAL_SET_ERROR_HANDLING_FEATURES 0x0200001a // reentrant 36#define SN_SAL_SET_ERROR_HANDLING_FEATURES 0x0200001a // reentrant
37#define SN_SAL_GET_FIT_COMPT 0x0200001b // reentrant 37#define SN_SAL_GET_FIT_COMPT 0x0200001b // reentrant
38#define SN_SAL_GET_SN_INFO 0x0200001c
39#define SN_SAL_GET_SAPIC_INFO 0x0200001d 38#define SN_SAL_GET_SAPIC_INFO 0x0200001d
39#define SN_SAL_GET_SN_INFO 0x0200001e
40#define SN_SAL_CONSOLE_PUTC 0x02000021 40#define SN_SAL_CONSOLE_PUTC 0x02000021
41#define SN_SAL_CONSOLE_GETC 0x02000022 41#define SN_SAL_CONSOLE_GETC 0x02000022
42#define SN_SAL_CONSOLE_PUTS 0x02000023 42#define SN_SAL_CONSOLE_PUTS 0x02000023
@@ -64,6 +64,7 @@
64 64
65#define SN_SAL_SYSCTL_IOBRICK_PCI_OP 0x02000042 // reentrant 65#define SN_SAL_SYSCTL_IOBRICK_PCI_OP 0x02000042 // reentrant
66#define SN_SAL_IROUTER_OP 0x02000043 66#define SN_SAL_IROUTER_OP 0x02000043
67#define SN_SAL_SYSCTL_EVENT 0x02000044
67#define SN_SAL_IOIF_INTERRUPT 0x0200004a 68#define SN_SAL_IOIF_INTERRUPT 0x0200004a
68#define SN_SAL_HWPERF_OP 0x02000050 // lock 69#define SN_SAL_HWPERF_OP 0x02000050 // lock
69#define SN_SAL_IOIF_ERROR_INTERRUPT 0x02000051 70#define SN_SAL_IOIF_ERROR_INTERRUPT 0x02000051
@@ -76,7 +77,8 @@
76#define SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST 0x02000058 77#define SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST 0x02000058
77 78
78#define SN_SAL_HUB_ERROR_INTERRUPT 0x02000060 79#define SN_SAL_HUB_ERROR_INTERRUPT 0x02000060
79 80#define SN_SAL_BTE_RECOVER 0x02000061
81#define SN_SAL_IOIF_GET_PCI_TOPOLOGY 0x02000062
80 82
81/* 83/*
82 * Service-specific constants 84 * Service-specific constants
@@ -849,6 +851,19 @@ ia64_sn_irtr_intr_disable(nasid_t nasid, int subch, u64 intr)
849 return (int) rv.v0; 851 return (int) rv.v0;
850} 852}
851 853
854/*
855 * Set up a node as the point of contact for system controller
856 * environmental event delivery.
857 */
858static inline int
859ia64_sn_sysctl_event_init(nasid_t nasid)
860{
861 struct ia64_sal_retval rv;
862 SAL_CALL_REENTRANT(rv, SN_SAL_SYSCTL_EVENT, (u64) nasid,
863 0, 0, 0, 0, 0, 0);
864 return (int) rv.v0;
865}
866
852/** 867/**
853 * ia64_sn_get_fit_compt - read a FIT entry from the PROM header 868 * ia64_sn_get_fit_compt - read a FIT entry from the PROM header
854 * @nasid: NASID of node to read 869 * @nasid: NASID of node to read
@@ -1012,4 +1027,29 @@ ia64_sn_hwperf_op(nasid_t nasid, u64 opcode, u64 a0, u64 a1, u64 a2,
1012 return (int) rv.status; 1027 return (int) rv.status;
1013} 1028}
1014 1029
1030static inline int
1031ia64_sn_ioif_get_pci_topology(u64 rack, u64 bay, u64 slot, u64 slab,
1032 u64 buf, u64 len)
1033{
1034 struct ia64_sal_retval rv;
1035 SAL_CALL_NOLOCK(rv, SN_SAL_IOIF_GET_PCI_TOPOLOGY,
1036 rack, bay, slot, slab, buf, len, 0);
1037 return (int) rv.status;
1038}
1039
1040/*
1041 * BTE error recovery is implemented in SAL
1042 */
1043static inline int
1044ia64_sn_bte_recovery(nasid_t nasid)
1045{
1046 struct ia64_sal_retval rv;
1047
1048 rv.status = 0;
1049 SAL_CALL_NOLOCK(rv, SN_SAL_BTE_RECOVER, 0, 0, 0, 0, 0, 0, 0);
1050 if (rv.status == SALRET_NOT_IMPLEMENTED)
1051 return 0;
1052 return (int) rv.status;
1053}
1054
1015#endif /* _ASM_IA64_SN_SN_SAL_H */ 1055#endif /* _ASM_IA64_SN_SN_SAL_H */
diff --git a/include/asm-ia64/sn/tioca.h b/include/asm-ia64/sn/tioca.h
new file mode 100644
index 000000000000..bc1aacfb9483
--- /dev/null
+++ b/include/asm-ia64/sn/tioca.h
@@ -0,0 +1,596 @@
1#ifndef _ASM_IA64_SN_TIO_TIOCA_H
2#define _ASM_IA64_SN_TIO_TIOCA_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * Copyright (c) 2003-2005 Silicon Graphics, Inc. All rights reserved.
10 */
11
12
13#define TIOCA_PART_NUM 0xE020
14#define TIOCA_MFGR_NUM 0x24
15#define TIOCA_REV_A 0x1
16
17/*
18 * Register layout for TIO:CA. See below for bitmasks for each register.
19 */
20
21struct tioca {
22 uint64_t ca_id; /* 0x000000 */
23 uint64_t ca_control1; /* 0x000008 */
24 uint64_t ca_control2; /* 0x000010 */
25 uint64_t ca_status1; /* 0x000018 */
26 uint64_t ca_status2; /* 0x000020 */
27 uint64_t ca_gart_aperature; /* 0x000028 */
28 uint64_t ca_gfx_detach; /* 0x000030 */
29 uint64_t ca_inta_dest_addr; /* 0x000038 */
30 uint64_t ca_intb_dest_addr; /* 0x000040 */
31 uint64_t ca_err_int_dest_addr; /* 0x000048 */
32 uint64_t ca_int_status; /* 0x000050 */
33 uint64_t ca_int_status_alias; /* 0x000058 */
34 uint64_t ca_mult_error; /* 0x000060 */
35 uint64_t ca_mult_error_alias; /* 0x000068 */
36 uint64_t ca_first_error; /* 0x000070 */
37 uint64_t ca_int_mask; /* 0x000078 */
38 uint64_t ca_crm_pkterr_type; /* 0x000080 */
39 uint64_t ca_crm_pkterr_type_alias; /* 0x000088 */
40 uint64_t ca_crm_ct_error_detail_1; /* 0x000090 */
41 uint64_t ca_crm_ct_error_detail_2; /* 0x000098 */
42 uint64_t ca_crm_tnumto; /* 0x0000A0 */
43 uint64_t ca_gart_err; /* 0x0000A8 */
44 uint64_t ca_pcierr_type; /* 0x0000B0 */
45 uint64_t ca_pcierr_addr; /* 0x0000B8 */
46
47 uint64_t ca_pad_0000C0[3]; /* 0x0000{C0..D0} */
48
49 uint64_t ca_pci_rd_buf_flush; /* 0x0000D8 */
50 uint64_t ca_pci_dma_addr_extn; /* 0x0000E0 */
51 uint64_t ca_agp_dma_addr_extn; /* 0x0000E8 */
52 uint64_t ca_force_inta; /* 0x0000F0 */
53 uint64_t ca_force_intb; /* 0x0000F8 */
54 uint64_t ca_debug_vector_sel; /* 0x000100 */
55 uint64_t ca_debug_mux_core_sel; /* 0x000108 */
56 uint64_t ca_debug_mux_pci_sel; /* 0x000110 */
57 uint64_t ca_debug_domain_sel; /* 0x000118 */
58
59 uint64_t ca_pad_000120[28]; /* 0x0001{20..F8} */
60
61 uint64_t ca_gart_ptr_table; /* 0x200 */
62 uint64_t ca_gart_tlb_addr[8]; /* 0x2{08..40} */
63};
64
65/*
66 * Mask/shift definitions for TIO:CA registers. The convention here is
67 * to mainly use the names as they appear in the "TIO AEGIS Programmers'
68 * Reference" with a CA_ prefix added. Some exceptions were made to fix
69 * duplicate field names or to generalize fields that are common to
70 * different registers (ca_debug_mux_core_sel and ca_debug_mux_pci_sel for
71 * example).
72 *
73 * Fields consisting of a single bit have a single #define have a single
74 * macro declaration to mask the bit. Fields consisting of multiple bits
75 * have two declarations: one to mask the proper bits in a register, and
76 * a second with the suffix "_SHFT" to identify how far the mask needs to
77 * be shifted right to get its base value.
78 */
79
80/* ==== ca_control1 */
81#define CA_SYS_BIG_END (1ull << 0)
82#define CA_DMA_AGP_SWAP (1ull << 1)
83#define CA_DMA_PCI_SWAP (1ull << 2)
84#define CA_PIO_IO_SWAP (1ull << 3)
85#define CA_PIO_MEM_SWAP (1ull << 4)
86#define CA_GFX_WR_SWAP (1ull << 5)
87#define CA_AGP_FW_ENABLE (1ull << 6)
88#define CA_AGP_CAL_CYCLE (0x7ull << 7)
89#define CA_AGP_CAL_CYCLE_SHFT 7
90#define CA_AGP_CAL_PRSCL_BYP (1ull << 10)
91#define CA_AGP_INIT_CAL_ENB (1ull << 11)
92#define CA_INJ_ADDR_PERR (1ull << 12)
93#define CA_INJ_DATA_PERR (1ull << 13)
94 /* bits 15:14 unused */
95#define CA_PCIM_IO_NBE_AD (0x7ull << 16)
96#define CA_PCIM_IO_NBE_AD_SHFT 16
97#define CA_PCIM_FAST_BTB_ENB (1ull << 19)
98 /* bits 23:20 unused */
99#define CA_PIO_ADDR_OFFSET (0xffull << 24)
100#define CA_PIO_ADDR_OFFSET_SHFT 24
101 /* bits 35:32 unused */
102#define CA_AGPDMA_OP_COMBDELAY (0x1full << 36)
103#define CA_AGPDMA_OP_COMBDELAY_SHFT 36
104 /* bit 41 unused */
105#define CA_AGPDMA_OP_ENB_COMBDELAY (1ull << 42)
106#define CA_PCI_INT_LPCNT (0xffull << 44)
107#define CA_PCI_INT_LPCNT_SHFT 44
108 /* bits 63:52 unused */
109
110/* ==== ca_control2 */
111#define CA_AGP_LATENCY_TO (0xffull << 0)
112#define CA_AGP_LATENCY_TO_SHFT 0
113#define CA_PCI_LATENCY_TO (0xffull << 8)
114#define CA_PCI_LATENCY_TO_SHFT 8
115#define CA_PCI_MAX_RETRY (0x3ffull << 16)
116#define CA_PCI_MAX_RETRY_SHFT 16
117 /* bits 27:26 unused */
118#define CA_RT_INT_EN (0x3ull << 28)
119#define CA_RT_INT_EN_SHFT 28
120#define CA_MSI_INT_ENB (1ull << 30)
121#define CA_PCI_ARB_ERR_ENB (1ull << 31)
122#define CA_GART_MEM_PARAM (0x3ull << 32)
123#define CA_GART_MEM_PARAM_SHFT 32
124#define CA_GART_RD_PREFETCH_ENB (1ull << 34)
125#define CA_GART_WR_PREFETCH_ENB (1ull << 35)
126#define CA_GART_FLUSH_TLB (1ull << 36)
127 /* bits 39:37 unused */
128#define CA_CRM_TNUMTO_PERIOD (0x1fffull << 40)
129#define CA_CRM_TNUMTO_PERIOD_SHFT 40
130 /* bits 55:53 unused */
131#define CA_CRM_TNUMTO_ENB (1ull << 56)
132#define CA_CRM_PRESCALER_BYP (1ull << 57)
133 /* bits 59:58 unused */
134#define CA_CRM_MAX_CREDIT (0x7ull << 60)
135#define CA_CRM_MAX_CREDIT_SHFT 60
136 /* bit 63 unused */
137
138/* ==== ca_status1 */
139#define CA_CORELET_ID (0x3ull << 0)
140#define CA_CORELET_ID_SHFT 0
141#define CA_INTA_N (1ull << 2)
142#define CA_INTB_N (1ull << 3)
143#define CA_CRM_CREDIT_AVAIL (0x7ull << 4)
144#define CA_CRM_CREDIT_AVAIL_SHFT 4
145 /* bit 7 unused */
146#define CA_CRM_SPACE_AVAIL (0x7full << 8)
147#define CA_CRM_SPACE_AVAIL_SHFT 8
148 /* bit 15 unused */
149#define CA_GART_TLB_VAL (0xffull << 16)
150#define CA_GART_TLB_VAL_SHFT 16
151 /* bits 63:24 unused */
152
153/* ==== ca_status2 */
154#define CA_GFX_CREDIT_AVAIL (0xffull << 0)
155#define CA_GFX_CREDIT_AVAIL_SHFT 0
156#define CA_GFX_OPQ_AVAIL (0xffull << 8)
157#define CA_GFX_OPQ_AVAIL_SHFT 8
158#define CA_GFX_WRBUFF_AVAIL (0xffull << 16)
159#define CA_GFX_WRBUFF_AVAIL_SHFT 16
160#define CA_ADMA_OPQ_AVAIL (0xffull << 24)
161#define CA_ADMA_OPQ_AVAIL_SHFT 24
162#define CA_ADMA_WRBUFF_AVAIL (0xffull << 32)
163#define CA_ADMA_WRBUFF_AVAIL_SHFT 32
164#define CA_ADMA_RDBUFF_AVAIL (0x7full << 40)
165#define CA_ADMA_RDBUFF_AVAIL_SHFT 40
166#define CA_PCI_PIO_OP_STAT (1ull << 47)
167#define CA_PDMA_OPQ_AVAIL (0xfull << 48)
168#define CA_PDMA_OPQ_AVAIL_SHFT 48
169#define CA_PDMA_WRBUFF_AVAIL (0xfull << 52)
170#define CA_PDMA_WRBUFF_AVAIL_SHFT 52
171#define CA_PDMA_RDBUFF_AVAIL (0x3ull << 56)
172#define CA_PDMA_RDBUFF_AVAIL_SHFT 56
173 /* bits 63:58 unused */
174
175/* ==== ca_gart_aperature */
176#define CA_GART_AP_ENB_AGP (1ull << 0)
177#define CA_GART_PAGE_SIZE (1ull << 1)
178#define CA_GART_AP_ENB_PCI (1ull << 2)
179 /* bits 11:3 unused */
180#define CA_GART_AP_SIZE (0x3ffull << 12)
181#define CA_GART_AP_SIZE_SHFT 12
182#define CA_GART_AP_BASE (0x3ffffffffffull << 22)
183#define CA_GART_AP_BASE_SHFT 22
184
185/* ==== ca_inta_dest_addr
186 ==== ca_intb_dest_addr
187 ==== ca_err_int_dest_addr */
188 /* bits 2:0 unused */
189#define CA_INT_DEST_ADDR (0x7ffffffffffffull << 3)
190#define CA_INT_DEST_ADDR_SHFT 3
191 /* bits 55:54 unused */
192#define CA_INT_DEST_VECT (0xffull << 56)
193#define CA_INT_DEST_VECT_SHFT 56
194
195/* ==== ca_int_status */
196/* ==== ca_int_status_alias */
197/* ==== ca_mult_error */
198/* ==== ca_mult_error_alias */
199/* ==== ca_first_error */
200/* ==== ca_int_mask */
201#define CA_PCI_ERR (1ull << 0)
202 /* bits 3:1 unused */
203#define CA_GART_FETCH_ERR (1ull << 4)
204#define CA_GFX_WR_OVFLW (1ull << 5)
205#define CA_PIO_REQ_OVFLW (1ull << 6)
206#define CA_CRM_PKTERR (1ull << 7)
207#define CA_CRM_DVERR (1ull << 8)
208#define CA_TNUMTO (1ull << 9)
209#define CA_CXM_RSP_CRED_OVFLW (1ull << 10)
210#define CA_CXM_REQ_CRED_OVFLW (1ull << 11)
211#define CA_PIO_INVALID_ADDR (1ull << 12)
212#define CA_PCI_ARB_TO (1ull << 13)
213#define CA_AGP_REQ_OFLOW (1ull << 14)
214#define CA_SBA_TYPE1_ERR (1ull << 15)
215 /* bit 16 unused */
216#define CA_INTA (1ull << 17)
217#define CA_INTB (1ull << 18)
218#define CA_MULT_INTA (1ull << 19)
219#define CA_MULT_INTB (1ull << 20)
220#define CA_GFX_CREDIT_OVFLW (1ull << 21)
221 /* bits 63:22 unused */
222
223/* ==== ca_crm_pkterr_type */
224/* ==== ca_crm_pkterr_type_alias */
225#define CA_CRM_PKTERR_SBERR_HDR (1ull << 0)
226#define CA_CRM_PKTERR_DIDN (1ull << 1)
227#define CA_CRM_PKTERR_PACTYPE (1ull << 2)
228#define CA_CRM_PKTERR_INV_TNUM (1ull << 3)
229#define CA_CRM_PKTERR_ADDR_RNG (1ull << 4)
230#define CA_CRM_PKTERR_ADDR_ALGN (1ull << 5)
231#define CA_CRM_PKTERR_HDR_PARAM (1ull << 6)
232#define CA_CRM_PKTERR_CW_ERR (1ull << 7)
233#define CA_CRM_PKTERR_SBERR_NH (1ull << 8)
234#define CA_CRM_PKTERR_EARLY_TERM (1ull << 9)
235#define CA_CRM_PKTERR_EARLY_TAIL (1ull << 10)
236#define CA_CRM_PKTERR_MSSNG_TAIL (1ull << 11)
237#define CA_CRM_PKTERR_MSSNG_HDR (1ull << 12)
238 /* bits 15:13 unused */
239#define CA_FIRST_CRM_PKTERR_SBERR_HDR (1ull << 16)
240#define CA_FIRST_CRM_PKTERR_DIDN (1ull << 17)
241#define CA_FIRST_CRM_PKTERR_PACTYPE (1ull << 18)
242#define CA_FIRST_CRM_PKTERR_INV_TNUM (1ull << 19)
243#define CA_FIRST_CRM_PKTERR_ADDR_RNG (1ull << 20)
244#define CA_FIRST_CRM_PKTERR_ADDR_ALGN (1ull << 21)
245#define CA_FIRST_CRM_PKTERR_HDR_PARAM (1ull << 22)
246#define CA_FIRST_CRM_PKTERR_CW_ERR (1ull << 23)
247#define CA_FIRST_CRM_PKTERR_SBERR_NH (1ull << 24)
248#define CA_FIRST_CRM_PKTERR_EARLY_TERM (1ull << 25)
249#define CA_FIRST_CRM_PKTERR_EARLY_TAIL (1ull << 26)
250#define CA_FIRST_CRM_PKTERR_MSSNG_TAIL (1ull << 27)
251#define CA_FIRST_CRM_PKTERR_MSSNG_HDR (1ull << 28)
252 /* bits 63:29 unused */
253
254/* ==== ca_crm_ct_error_detail_1 */
255#define CA_PKT_TYPE (0xfull << 0)
256#define CA_PKT_TYPE_SHFT 0
257#define CA_SRC_ID (0x3ull << 4)
258#define CA_SRC_ID_SHFT 4
259#define CA_DATA_SZ (0x3ull << 6)
260#define CA_DATA_SZ_SHFT 6
261#define CA_TNUM (0xffull << 8)
262#define CA_TNUM_SHFT 8
263#define CA_DW_DATA_EN (0xffull << 16)
264#define CA_DW_DATA_EN_SHFT 16
265#define CA_GFX_CRED (0xffull << 24)
266#define CA_GFX_CRED_SHFT 24
267#define CA_MEM_RD_PARAM (0x3ull << 32)
268#define CA_MEM_RD_PARAM_SHFT 32
269#define CA_PIO_OP (1ull << 34)
270#define CA_CW_ERR (1ull << 35)
271 /* bits 62:36 unused */
272#define CA_VALID (1ull << 63)
273
274/* ==== ca_crm_ct_error_detail_2 */
275 /* bits 2:0 unused */
276#define CA_PKT_ADDR (0x1fffffffffffffull << 3)
277#define CA_PKT_ADDR_SHFT 3
278 /* bits 63:56 unused */
279
280/* ==== ca_crm_tnumto */
281#define CA_CRM_TNUMTO_VAL (0xffull << 0)
282#define CA_CRM_TNUMTO_VAL_SHFT 0
283#define CA_CRM_TNUMTO_WR (1ull << 8)
284 /* bits 63:9 unused */
285
286/* ==== ca_gart_err */
287#define CA_GART_ERR_SOURCE (0x3ull << 0)
288#define CA_GART_ERR_SOURCE_SHFT 0
289 /* bits 3:2 unused */
290#define CA_GART_ERR_ADDR (0xfffffffffull << 4)
291#define CA_GART_ERR_ADDR_SHFT 4
292 /* bits 63:40 unused */
293
294/* ==== ca_pcierr_type */
295#define CA_PCIERR_DATA (0xffffffffull << 0)
296#define CA_PCIERR_DATA_SHFT 0
297#define CA_PCIERR_ENB (0xfull << 32)
298#define CA_PCIERR_ENB_SHFT 32
299#define CA_PCIERR_CMD (0xfull << 36)
300#define CA_PCIERR_CMD_SHFT 36
301#define CA_PCIERR_A64 (1ull << 40)
302#define CA_PCIERR_SLV_SERR (1ull << 41)
303#define CA_PCIERR_SLV_WR_PERR (1ull << 42)
304#define CA_PCIERR_SLV_RD_PERR (1ull << 43)
305#define CA_PCIERR_MST_SERR (1ull << 44)
306#define CA_PCIERR_MST_WR_PERR (1ull << 45)
307#define CA_PCIERR_MST_RD_PERR (1ull << 46)
308#define CA_PCIERR_MST_MABT (1ull << 47)
309#define CA_PCIERR_MST_TABT (1ull << 48)
310#define CA_PCIERR_MST_RETRY_TOUT (1ull << 49)
311
312#define CA_PCIERR_TYPES \
313 (CA_PCIERR_A64|CA_PCIERR_SLV_SERR| \
314 CA_PCIERR_SLV_WR_PERR|CA_PCIERR_SLV_RD_PERR| \
315 CA_PCIERR_MST_SERR|CA_PCIERR_MST_WR_PERR|CA_PCIERR_MST_RD_PERR| \
316 CA_PCIERR_MST_MABT|CA_PCIERR_MST_TABT|CA_PCIERR_MST_RETRY_TOUT)
317
318 /* bits 63:50 unused */
319
320/* ==== ca_pci_dma_addr_extn */
321#define CA_UPPER_NODE_OFFSET (0x3full << 0)
322#define CA_UPPER_NODE_OFFSET_SHFT 0
323 /* bits 7:6 unused */
324#define CA_CHIPLET_ID (0x3ull << 8)
325#define CA_CHIPLET_ID_SHFT 8
326 /* bits 11:10 unused */
327#define CA_PCI_DMA_NODE_ID (0xffffull << 12)
328#define CA_PCI_DMA_NODE_ID_SHFT 12
329 /* bits 27:26 unused */
330#define CA_PCI_DMA_PIO_MEM_TYPE (1ull << 28)
331 /* bits 63:29 unused */
332
333
334/* ==== ca_agp_dma_addr_extn */
335 /* bits 19:0 unused */
336#define CA_AGP_DMA_NODE_ID (0xffffull << 20)
337#define CA_AGP_DMA_NODE_ID_SHFT 20
338 /* bits 27:26 unused */
339#define CA_AGP_DMA_PIO_MEM_TYPE (1ull << 28)
340 /* bits 63:29 unused */
341
342/* ==== ca_debug_vector_sel */
343#define CA_DEBUG_MN_VSEL (0xfull << 0)
344#define CA_DEBUG_MN_VSEL_SHFT 0
345#define CA_DEBUG_PP_VSEL (0xfull << 4)
346#define CA_DEBUG_PP_VSEL_SHFT 4
347#define CA_DEBUG_GW_VSEL (0xfull << 8)
348#define CA_DEBUG_GW_VSEL_SHFT 8
349#define CA_DEBUG_GT_VSEL (0xfull << 12)
350#define CA_DEBUG_GT_VSEL_SHFT 12
351#define CA_DEBUG_PD_VSEL (0xfull << 16)
352#define CA_DEBUG_PD_VSEL_SHFT 16
353#define CA_DEBUG_AD_VSEL (0xfull << 20)
354#define CA_DEBUG_AD_VSEL_SHFT 20
355#define CA_DEBUG_CX_VSEL (0xfull << 24)
356#define CA_DEBUG_CX_VSEL_SHFT 24
357#define CA_DEBUG_CR_VSEL (0xfull << 28)
358#define CA_DEBUG_CR_VSEL_SHFT 28
359#define CA_DEBUG_BA_VSEL (0xfull << 32)
360#define CA_DEBUG_BA_VSEL_SHFT 32
361#define CA_DEBUG_PE_VSEL (0xfull << 36)
362#define CA_DEBUG_PE_VSEL_SHFT 36
363#define CA_DEBUG_BO_VSEL (0xfull << 40)
364#define CA_DEBUG_BO_VSEL_SHFT 40
365#define CA_DEBUG_BI_VSEL (0xfull << 44)
366#define CA_DEBUG_BI_VSEL_SHFT 44
367#define CA_DEBUG_AS_VSEL (0xfull << 48)
368#define CA_DEBUG_AS_VSEL_SHFT 48
369#define CA_DEBUG_PS_VSEL (0xfull << 52)
370#define CA_DEBUG_PS_VSEL_SHFT 52
371#define CA_DEBUG_PM_VSEL (0xfull << 56)
372#define CA_DEBUG_PM_VSEL_SHFT 56
373 /* bits 63:60 unused */
374
375/* ==== ca_debug_mux_core_sel */
376/* ==== ca_debug_mux_pci_sel */
377#define CA_DEBUG_MSEL0 (0x7ull << 0)
378#define CA_DEBUG_MSEL0_SHFT 0
379 /* bit 3 unused */
380#define CA_DEBUG_NSEL0 (0x7ull << 4)
381#define CA_DEBUG_NSEL0_SHFT 4
382 /* bit 7 unused */
383#define CA_DEBUG_MSEL1 (0x7ull << 8)
384#define CA_DEBUG_MSEL1_SHFT 8
385 /* bit 11 unused */
386#define CA_DEBUG_NSEL1 (0x7ull << 12)
387#define CA_DEBUG_NSEL1_SHFT 12
388 /* bit 15 unused */
389#define CA_DEBUG_MSEL2 (0x7ull << 16)
390#define CA_DEBUG_MSEL2_SHFT 16
391 /* bit 19 unused */
392#define CA_DEBUG_NSEL2 (0x7ull << 20)
393#define CA_DEBUG_NSEL2_SHFT 20
394 /* bit 23 unused */
395#define CA_DEBUG_MSEL3 (0x7ull << 24)
396#define CA_DEBUG_MSEL3_SHFT 24
397 /* bit 27 unused */
398#define CA_DEBUG_NSEL3 (0x7ull << 28)
399#define CA_DEBUG_NSEL3_SHFT 28
400 /* bit 31 unused */
401#define CA_DEBUG_MSEL4 (0x7ull << 32)
402#define CA_DEBUG_MSEL4_SHFT 32
403 /* bit 35 unused */
404#define CA_DEBUG_NSEL4 (0x7ull << 36)
405#define CA_DEBUG_NSEL4_SHFT 36
406 /* bit 39 unused */
407#define CA_DEBUG_MSEL5 (0x7ull << 40)
408#define CA_DEBUG_MSEL5_SHFT 40
409 /* bit 43 unused */
410#define CA_DEBUG_NSEL5 (0x7ull << 44)
411#define CA_DEBUG_NSEL5_SHFT 44
412 /* bit 47 unused */
413#define CA_DEBUG_MSEL6 (0x7ull << 48)
414#define CA_DEBUG_MSEL6_SHFT 48
415 /* bit 51 unused */
416#define CA_DEBUG_NSEL6 (0x7ull << 52)
417#define CA_DEBUG_NSEL6_SHFT 52
418 /* bit 55 unused */
419#define CA_DEBUG_MSEL7 (0x7ull << 56)
420#define CA_DEBUG_MSEL7_SHFT 56
421 /* bit 59 unused */
422#define CA_DEBUG_NSEL7 (0x7ull << 60)
423#define CA_DEBUG_NSEL7_SHFT 60
424 /* bit 63 unused */
425
426
427/* ==== ca_debug_domain_sel */
428#define CA_DEBUG_DOMAIN_L (1ull << 0)
429#define CA_DEBUG_DOMAIN_H (1ull << 1)
430 /* bits 63:2 unused */
431
432/* ==== ca_gart_ptr_table */
433#define CA_GART_PTR_VAL (1ull << 0)
434 /* bits 11:1 unused */
435#define CA_GART_PTR_ADDR (0xfffffffffffull << 12)
436#define CA_GART_PTR_ADDR_SHFT 12
437 /* bits 63:56 unused */
438
439/* ==== ca_gart_tlb_addr[0-7] */
440#define CA_GART_TLB_ADDR (0xffffffffffffffull << 0)
441#define CA_GART_TLB_ADDR_SHFT 0
442 /* bits 62:56 unused */
443#define CA_GART_TLB_ENTRY_VAL (1ull << 63)
444
445/*
446 * PIO address space ranges for TIO:CA
447 */
448
449/* CA internal registers */
450#define CA_PIO_ADMIN 0x00000000
451#define CA_PIO_ADMIN_LEN 0x00010000
452
453/* GFX Write Buffer - Diagnostics */
454#define CA_PIO_GFX 0x00010000
455#define CA_PIO_GFX_LEN 0x00010000
456
457/* AGP DMA Write Buffer - Diagnostics */
458#define CA_PIO_AGP_DMAWRITE 0x00020000
459#define CA_PIO_AGP_DMAWRITE_LEN 0x00010000
460
461/* AGP DMA READ Buffer - Diagnostics */
462#define CA_PIO_AGP_DMAREAD 0x00030000
463#define CA_PIO_AGP_DMAREAD_LEN 0x00010000
464
465/* PCI Config Type 0 */
466#define CA_PIO_PCI_TYPE0_CONFIG 0x01000000
467#define CA_PIO_PCI_TYPE0_CONFIG_LEN 0x01000000
468
469/* PCI Config Type 1 */
470#define CA_PIO_PCI_TYPE1_CONFIG 0x02000000
471#define CA_PIO_PCI_TYPE1_CONFIG_LEN 0x01000000
472
473/* PCI I/O Cycles - mapped to PCI Address 0x00000000-0x04ffffff */
474#define CA_PIO_PCI_IO 0x03000000
475#define CA_PIO_PCI_IO_LEN 0x05000000
476
477/* PCI MEM Cycles - mapped to PCI with CA_PIO_ADDR_OFFSET of ca_control1 */
478/* use Fast Write if enabled and coretalk packet type is a GFX request */
479#define CA_PIO_PCI_MEM_OFFSET 0x08000000
480#define CA_PIO_PCI_MEM_OFFSET_LEN 0x08000000
481
482/* PCI MEM Cycles - mapped to PCI Address 0x00000000-0xbfffffff */
483/* use Fast Write if enabled and coretalk packet type is a GFX request */
484#define CA_PIO_PCI_MEM 0x40000000
485#define CA_PIO_PCI_MEM_LEN 0xc0000000
486
487/*
488 * DMA space
489 *
490 * The CA aperature (ie. bus address range) mapped by the GART is segmented into
491 * two parts. The lower portion of the aperature is used for mapping 32 bit
492 * PCI addresses which are managed by the dma interfaces in this file. The
493 * upper poprtion of the aperature is used for mapping 48 bit AGP addresses.
494 * The AGP portion of the aperature is managed by the agpgart_be.c driver
495 * in drivers/linux/agp. There are ca-specific hooks in that driver to
496 * manipulate the gart, but management of the AGP portion of the aperature
497 * is the responsibility of that driver.
498 *
499 * CA allows three main types of DMA mapping:
500 *
501 * PCI 64-bit Managed by this driver
502 * PCI 32-bit Managed by this driver
503 * AGP 48-bit Managed by hooks in the /dev/agpgart driver
504 *
505 * All of the above can optionally be remapped through the GART. The following
506 * table lists the combinations of addressing types and GART remapping that
507 * is currently supported by the driver (h/w supports all, s/w limits this):
508 *
509 * PCI64 PCI32 AGP48
510 * GART no yes yes
511 * Direct yes yes no
512 *
513 * GART remapping of PCI64 is not done because there is no need to. The
514 * 64 bit PCI address holds all of the information necessary to target any
515 * memory in the system.
516 *
517 * AGP48 is always mapped through the GART. Management of the AGP48 portion
518 * of the aperature is the responsibility of code in the agpgart_be driver.
519 *
520 * The non-64 bit bus address space will currently be partitioned like this:
521 *
522 * 0xffff_ffff_ffff +--------
523 * | AGP48 direct
524 * | Space managed by this driver
525 * CA_AGP_DIRECT_BASE +--------
526 * | AGP GART mapped (gfx aperature)
527 * | Space managed by /dev/agpgart driver
528 * | This range is exposed to the agpgart
529 * | driver as the "graphics aperature"
530 * CA_AGP_MAPPED_BASE +-----
531 * | PCI GART mapped
532 * | Space managed by this driver
533 * CA_PCI32_MAPPED_BASE +----
534 * | PCI32 direct
535 * | Space managed by this driver
536 * 0xC000_0000 +--------
537 * (CA_PCI32_DIRECT_BASE)
538 *
539 * The bus address range CA_PCI32_MAPPED_BASE through CA_AGP_DIRECT_BASE
540 * is what we call the CA aperature. Addresses falling in this range will
541 * be remapped using the GART.
542 *
543 * The bus address range CA_AGP_MAPPED_BASE through CA_AGP_DIRECT_BASE
544 * is what we call the graphics aperature. This is a subset of the CA
545 * aperature and is under the control of the agpgart_be driver.
546 *
547 * CA_PCI32_MAPPED_BASE, CA_AGP_MAPPED_BASE, and CA_AGP_DIRECT_BASE are
548 * somewhat arbitrary values. The known constraints on choosing these is:
549 *
550 * 1) CA_AGP_DIRECT_BASE-CA_PCI32_MAPPED_BASE+1 (the CA aperature size)
551 * must be one of the values supported by the ca_gart_aperature register.
552 * Currently valid values are: 4MB through 4096MB in powers of 2 increments
553 *
554 * 2) CA_AGP_DIRECT_BASE-CA_AGP_MAPPED_BASE+1 (the gfx aperature size)
555 * must be in MB units since that's what the agpgart driver assumes.
556 */
557
558/*
559 * Define Bus DMA ranges. These are configurable (see constraints above)
560 * and will probably need tuning based on experience.
561 */
562
563
564/*
565 * 11/24/03
566 * CA has an addressing glitch w.r.t. PCI direct 32 bit DMA that makes it
567 * generally unusable. The problem is that for PCI direct 32
568 * DMA's, all 32 bits of the bus address are used to form the lower 32 bits
569 * of the coretalk address, and coretalk bits 38:32 come from a register.
570 * Since only PCI bus addresses 0xC0000000-0xFFFFFFFF (1GB) are available
571 * for DMA (the rest is allocated to PIO), host node addresses need to be
572 * such that their lower 32 bits fall in the 0xC0000000-0xffffffff range
573 * as well. So there can be no PCI32 direct DMA below 3GB!! For this
574 * reason we set the CA_PCI32_DIRECT_SIZE to 0 which essentially makes
575 * tioca_dma_direct32() a noop but preserves the code flow should this issue
576 * be fixed in a respin.
577 *
578 * For now, all PCI32 DMA's must be mapped through the GART.
579 */
580
581#define CA_PCI32_DIRECT_BASE 0xC0000000UL /* BASE not configurable */
582#define CA_PCI32_DIRECT_SIZE 0x00000000UL /* 0 MB */
583
584#define CA_PCI32_MAPPED_BASE 0xC0000000UL
585#define CA_PCI32_MAPPED_SIZE 0x40000000UL /* 2GB */
586
587#define CA_AGP_MAPPED_BASE 0x80000000UL
588#define CA_AGP_MAPPED_SIZE 0x40000000UL /* 2GB */
589
590#define CA_AGP_DIRECT_BASE 0x40000000UL /* 2GB */
591#define CA_AGP_DIRECT_SIZE 0x40000000UL
592
593#define CA_APERATURE_BASE (CA_AGP_MAPPED_BASE)
594#define CA_APERATURE_SIZE (CA_AGP_MAPPED_SIZE+CA_PCI32_MAPPED_SIZE)
595
596#endif /* _ASM_IA64_SN_TIO_TIOCA_H */
diff --git a/include/asm-ia64/sn/tioca_provider.h b/include/asm-ia64/sn/tioca_provider.h
new file mode 100644
index 000000000000..b6acc22ab239
--- /dev/null
+++ b/include/asm-ia64/sn/tioca_provider.h
@@ -0,0 +1,206 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2003-2005 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#ifndef _ASM_IA64_SN_TIO_CA_AGP_PROVIDER_H
10#define _ASM_IA64_SN_TIO_CA_AGP_PROVIDER_H
11
12#include <asm/sn/tioca.h>
13
14/*
15 * WAR enables
16 * Defines for individual WARs. Each is a bitmask of applicable
17 * part revision numbers. (1 << 1) == rev A, (1 << 2) == rev B,
18 * (3 << 1) == (rev A or rev B), etc
19 */
20
21#define TIOCA_WAR_ENABLED(pv, tioca_common) \
22 ((1 << tioca_common->ca_rev) & pv)
23
24 /* TIO:ICE:FRZ:Freezer loses a PIO data ucred on PIO RD RSP with CW error */
25#define PV907908 (1 << 1)
26 /* ATI config space problems after BIOS execution starts */
27#define PV908234 (1 << 1)
28 /* CA:AGPDMA write request data mismatch with ABC1CL merge */
29#define PV895469 (1 << 1)
30 /* TIO:CA TLB invalidate of written GART entries possibly not occuring in CA*/
31#define PV910244 (1 << 1)
32
33struct tioca_dmamap{
34 struct list_head cad_list; /* headed by ca_list */
35
36 dma_addr_t cad_dma_addr; /* Linux dma handle */
37 uint cad_gart_entry; /* start entry in ca_gart_pagemap */
38 uint cad_gart_size; /* #entries for this map */
39};
40
41/*
42 * Kernel only fields. Prom may look at this stuff for debugging only.
43 * Access this structure through the ca_kernel_private ptr.
44 */
45
46struct tioca_common ;
47
48struct tioca_kernel {
49 struct tioca_common *ca_common; /* tioca this belongs to */
50 struct list_head ca_list; /* list of all ca's */
51 struct list_head ca_dmamaps;
52 spinlock_t ca_lock; /* Kernel lock */
53 cnodeid_t ca_closest_node;
54 struct list_head *ca_devices; /* bus->devices */
55
56 /*
57 * General GART stuff
58 */
59 uint64_t ca_ap_size; /* size of aperature in bytes */
60 uint32_t ca_gart_entries; /* # uint64_t entries in gart */
61 uint32_t ca_ap_pagesize; /* aperature page size in bytes */
62 uint64_t ca_ap_bus_base; /* bus address of CA aperature */
63 uint64_t ca_gart_size; /* gart size in bytes */
64 uint64_t *ca_gart; /* gart table vaddr */
65 uint64_t ca_gart_coretalk_addr; /* gart coretalk addr */
66 uint8_t ca_gart_iscoherent; /* used in tioca_tlbflush */
67
68 /* PCI GART convenience values */
69 uint64_t ca_pciap_base; /* pci aperature bus base address */
70 uint64_t ca_pciap_size; /* pci aperature size (bytes) */
71 uint64_t ca_pcigart_base; /* gfx GART bus base address */
72 uint64_t *ca_pcigart; /* gfx GART vm address */
73 uint32_t ca_pcigart_entries;
74 uint32_t ca_pcigart_start; /* PCI start index in ca_gart */
75 void *ca_pcigart_pagemap;
76
77 /* AGP GART convenience values */
78 uint64_t ca_gfxap_base; /* gfx aperature bus base address */
79 uint64_t ca_gfxap_size; /* gfx aperature size (bytes) */
80 uint64_t ca_gfxgart_base; /* gfx GART bus base address */
81 uint64_t *ca_gfxgart; /* gfx GART vm address */
82 uint32_t ca_gfxgart_entries;
83 uint32_t ca_gfxgart_start; /* agpgart start index in ca_gart */
84};
85
86/*
87 * Common tioca info shared between kernel and prom
88 *
89 * DO NOT CHANGE THIS STRUCT WITHOUT MAKING CORRESPONDING CHANGES
90 * TO THE PROM VERSION.
91 */
92
93struct tioca_common {
94 struct pcibus_bussoft ca_common; /* common pciio header */
95
96 uint32_t ca_rev;
97 uint32_t ca_closest_nasid;
98
99 uint64_t ca_prom_private;
100 uint64_t ca_kernel_private;
101};
102
103/**
104 * tioca_paddr_to_gart - Convert an SGI coretalk address to a CA GART entry
105 * @paddr: page address to convert
106 *
107 * Convert a system [coretalk] address to a GART entry. GART entries are
108 * formed using the following:
109 *
110 * data = ( (1<<63) | ( (REMAP_NODE_ID << 40) | (MD_CHIPLET_ID << 38) |
111 * (REMAP_SYS_ADDR) ) >> 12 )
112 *
113 * DATA written to 1 GART TABLE Entry in system memory is remapped system
114 * addr for 1 page
115 *
116 * The data is for coretalk address format right shifted 12 bits with a
117 * valid bit.
118 *
119 * GART_TABLE_ENTRY [ 25:0 ] -- REMAP_SYS_ADDRESS[37:12].
120 * GART_TABLE_ENTRY [ 27:26 ] -- SHUB MD chiplet id.
121 * GART_TABLE_ENTRY [ 41:28 ] -- REMAP_NODE_ID.
122 * GART_TABLE_ENTRY [ 63 ] -- Valid Bit
123 */
124static inline u64
125tioca_paddr_to_gart(unsigned long paddr)
126{
127 /*
128 * We are assuming right now that paddr already has the correct
129 * format since the address from xtalk_dmaXXX should already have
130 * NODE_ID, CHIPLET_ID, and SYS_ADDR in the correct locations.
131 */
132
133 return ((paddr) >> 12) | (1UL << 63);
134}
135
136/**
137 * tioca_physpage_to_gart - Map a host physical page for SGI CA based DMA
138 * @page_addr: system page address to map
139 */
140
141static inline unsigned long
142tioca_physpage_to_gart(uint64_t page_addr)
143{
144 uint64_t coretalk_addr;
145
146 coretalk_addr = PHYS_TO_TIODMA(page_addr);
147 if (!coretalk_addr) {
148 return 0;
149 }
150
151 return tioca_paddr_to_gart(coretalk_addr);
152}
153
154/**
155 * tioca_tlbflush - invalidate cached SGI CA GART TLB entries
156 * @tioca_kernel: CA context
157 *
158 * Invalidate tlb entries for a given CA GART. Main complexity is to account
159 * for revA bug.
160 */
161static inline void
162tioca_tlbflush(struct tioca_kernel *tioca_kernel)
163{
164 volatile uint64_t tmp;
165 volatile struct tioca *ca_base;
166 struct tioca_common *tioca_common;
167
168 tioca_common = tioca_kernel->ca_common;
169 ca_base = (struct tioca *)tioca_common->ca_common.bs_base;
170
171 /*
172 * Explicit flushes not needed if GART is in cached mode
173 */
174 if (tioca_kernel->ca_gart_iscoherent) {
175 if (TIOCA_WAR_ENABLED(PV910244, tioca_common)) {
176 /*
177 * PV910244: RevA CA needs explicit flushes.
178 * Need to put GART into uncached mode before
179 * flushing otherwise the explicit flush is ignored.
180 *
181 * Alternate WAR would be to leave GART cached and
182 * touch every CL aligned GART entry.
183 */
184
185 ca_base->ca_control2 &= ~(CA_GART_MEM_PARAM);
186 ca_base->ca_control2 |= CA_GART_FLUSH_TLB;
187 ca_base->ca_control2 |=
188 (0x2ull << CA_GART_MEM_PARAM_SHFT);
189 tmp = ca_base->ca_control2;
190 }
191
192 return;
193 }
194
195 /*
196 * Gart in uncached mode ... need an explicit flush.
197 */
198
199 ca_base->ca_control2 |= CA_GART_FLUSH_TLB;
200 tmp = ca_base->ca_control2;
201}
202
203extern uint32_t tioca_gart_found;
204extern int tioca_init_provider(void);
205extern void tioca_fastwrite_enable(struct tioca_kernel *tioca_kern);
206#endif /* _ASM_IA64_SN_TIO_CA_AGP_PROVIDER_H */
diff --git a/include/asm-ia64/sn/tiocx.h b/include/asm-ia64/sn/tiocx.h
new file mode 100644
index 000000000000..c5447a504509
--- /dev/null
+++ b/include/asm-ia64/sn/tiocx.h
@@ -0,0 +1,71 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#ifndef _ASM_IA64_SN_TIO_TIOCX_H
10#define _ASM_IA64_SN_TIO_TIOCX_H
11
12#ifdef __KERNEL__
13
14struct cx_id_s {
15 unsigned int part_num;
16 unsigned int mfg_num;
17 int nasid;
18};
19
20struct cx_dev {
21 struct cx_id_s cx_id;
22 void *soft; /* driver specific */
23 struct hubdev_info *hubdev;
24 struct device dev;
25 struct cx_drv *driver;
26};
27
28struct cx_device_id {
29 unsigned int part_num;
30 unsigned int mfg_num;
31};
32
33struct cx_drv {
34 char *name;
35 const struct cx_device_id *id_table;
36 struct device_driver driver;
37 int (*probe) (struct cx_dev * dev, const struct cx_device_id * id);
38 int (*remove) (struct cx_dev * dev);
39};
40
41/* create DMA address by stripping AS bits */
42#define TIOCX_DMA_ADDR(a) (uint64_t)((uint64_t)(a) & 0xffffcfffffffffUL)
43
44#define TIOCX_TO_TIOCX_DMA_ADDR(a) (uint64_t)(((uint64_t)(a) & 0xfffffffff) | \
45 ((((uint64_t)(a)) & 0xffffc000000000UL) <<2))
46
47#define TIO_CE_ASIC_PARTNUM 0xce00
48#define TIOCX_CORELET 3
49
50/* These are taken from tio_mmr_as.h */
51#define TIO_ICE_FRZ_CFG TIO_MMR_ADDR_MOD(0x00000000b0008100UL)
52#define TIO_ICE_PMI_TX_CFG TIO_MMR_ADDR_MOD(0x00000000b000b100UL)
53#define TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3 TIO_MMR_ADDR_MOD(0x00000000b000be18UL)
54#define TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3_CREDIT_CNT_MASK 0x000000000000000fUL
55
56#define to_cx_dev(n) container_of(n, struct cx_dev, dev)
57#define to_cx_driver(drv) container_of(drv, struct cx_drv, driver)
58
59extern struct sn_irq_info *tiocx_irq_alloc(nasid_t, int, int, nasid_t, int);
60extern void tiocx_irq_free(struct sn_irq_info *);
61extern int cx_device_unregister(struct cx_dev *);
62extern int cx_device_register(nasid_t, int, int, struct hubdev_info *);
63extern int cx_driver_unregister(struct cx_drv *);
64extern int cx_driver_register(struct cx_drv *);
65extern uint64_t tiocx_dma_addr(uint64_t addr);
66extern uint64_t tiocx_swin_base(int nasid);
67extern void tiocx_mmr_store(int nasid, uint64_t offset, uint64_t value);
68extern uint64_t tiocx_mmr_load(int nasid, uint64_t offset);
69
70#endif // __KERNEL__
71#endif // _ASM_IA64_SN_TIO_TIOCX__
diff --git a/include/asm-ia64/sn/types.h b/include/asm-ia64/sn/types.h
index 586ed47cae9c..8e04ee211e59 100644
--- a/include/asm-ia64/sn/types.h
+++ b/include/asm-ia64/sn/types.h
@@ -16,7 +16,8 @@ typedef signed short nasid_t; /* node id in numa-as-id space */
16typedef signed char partid_t; /* partition ID type */ 16typedef signed char partid_t; /* partition ID type */
17typedef unsigned int moduleid_t; /* user-visible module number type */ 17typedef unsigned int moduleid_t; /* user-visible module number type */
18typedef unsigned int cmoduleid_t; /* kernel compact module id type */ 18typedef unsigned int cmoduleid_t; /* kernel compact module id type */
19typedef signed char slabid_t; 19typedef unsigned char slotid_t; /* slot (blade) within module */
20typedef unsigned char slabid_t; /* slab (asic) within slot */
20typedef u64 nic_t; 21typedef u64 nic_t;
21typedef unsigned long iopaddr_t; 22typedef unsigned long iopaddr_t;
22typedef unsigned long paddr_t; 23typedef unsigned long paddr_t;
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 358d52b0c445..772998147e3e 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -643,6 +643,7 @@ enum {
643 NET_SCTP_MAX_BURST = 12, 643 NET_SCTP_MAX_BURST = 12,
644 NET_SCTP_ADDIP_ENABLE = 13, 644 NET_SCTP_ADDIP_ENABLE = 13,
645 NET_SCTP_PRSCTP_ENABLE = 14, 645 NET_SCTP_PRSCTP_ENABLE = 14,
646 NET_SCTP_SNDBUF_POLICY = 15,
646}; 647};
647 648
648/* /proc/sys/net/bridge */ 649/* /proc/sys/net/bridge */
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 87496e3aa330..7352e455053c 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -140,7 +140,7 @@ psched_tod_diff(int delta_sec, int bound)
140 if (bound <= 1000000 || delta_sec > (0x7FFFFFFF/1000000)-1) 140 if (bound <= 1000000 || delta_sec > (0x7FFFFFFF/1000000)-1)
141 return bound; 141 return bound;
142 delta = delta_sec * 1000000; 142 delta = delta_sec * 1000000;
143 if (delta > bound) 143 if (delta > bound || delta < 0)
144 delta = bound; 144 delta = bound;
145 return delta; 145 return delta;
146} 146}
@@ -156,7 +156,8 @@ psched_tod_diff(int delta_sec, int bound)
156 __delta += 1000000; \ 156 __delta += 1000000; \
157 case 1: \ 157 case 1: \
158 __delta += 1000000; \ 158 __delta += 1000000; \
159 case 0: ; \ 159 case 0: \
160 __delta = abs(__delta); \
160 } \ 161 } \
161 __delta; \ 162 __delta; \
162}) 163})
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 5576db56324d..f4fcee104707 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -407,32 +407,38 @@ sctp_vtag_verify(const struct sctp_chunk *chunk,
407 return 0; 407 return 0;
408} 408}
409 409
410/* Check VTAG of the packet matches the sender's own tag OR its peer's 410/* Check VTAG of the packet matches the sender's own tag and the T bit is
411 * tag and the T bit is set in the Chunk Flags. 411 * not set, OR its peer's tag and the T bit is set in the Chunk Flags.
412 */ 412 */
413static inline int 413static inline int
414sctp_vtag_verify_either(const struct sctp_chunk *chunk, 414sctp_vtag_verify_either(const struct sctp_chunk *chunk,
415 const struct sctp_association *asoc) 415 const struct sctp_association *asoc)
416{ 416{
417 /* RFC 2960 Section 8.5.1, sctpimpguide-06 Section 2.13.2 417 /* RFC 2960 Section 8.5.1, sctpimpguide Section 2.41
418 * 418 *
419 * B) The receiver of a ABORT shall accept the packet if the 419 * B) The receiver of a ABORT MUST accept the packet
420 * Verification Tag field of the packet matches its own tag OR it 420 * if the Verification Tag field of the packet matches its own tag
421 * is set to its peer's tag and the T bit is set in the Chunk 421 * and the T bit is not set
422 * Flags. Otherwise, the receiver MUST silently discard the packet 422 * OR
423 * and take no further action. 423 * it is set to its peer's tag and the T bit is set in the Chunk
424 * 424 * Flags.
425 * (C) The receiver of a SHUTDOWN COMPLETE shall accept the 425 * Otherwise, the receiver MUST silently discard the packet
426 * packet if the Verification Tag field of the packet 426 * and take no further action.
427 * matches its own tag OR it is set to its peer's tag and
428 * the T bit is set in the Chunk Flags. Otherwise, the
429 * receiver MUST silently discard the packet and take no
430 * further action....
431 * 427 *
428 * C) The receiver of a SHUTDOWN COMPLETE shall accept the packet
429 * if the Verification Tag field of the packet matches its own tag
430 * and the T bit is not set
431 * OR
432 * it is set to its peer's tag and the T bit is set in the Chunk
433 * Flags.
434 * Otherwise, the receiver MUST silently discard the packet
435 * and take no further action. An endpoint MUST ignore the
436 * SHUTDOWN COMPLETE if it is not in the SHUTDOWN-ACK-SENT state.
432 */ 437 */
433 if ((ntohl(chunk->sctp_hdr->vtag) == asoc->c.my_vtag) || 438 if ((!sctp_test_T_bit(chunk) &&
434 (sctp_test_T_bit(chunk) && (ntohl(chunk->sctp_hdr->vtag) 439 (ntohl(chunk->sctp_hdr->vtag) == asoc->c.my_vtag)) ||
435 == asoc->c.peer_vtag))) { 440 (sctp_test_T_bit(chunk) &&
441 (ntohl(chunk->sctp_hdr->vtag) == asoc->c.peer_vtag))) {
436 return 1; 442 return 1;
437 } 443 }
438 444
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 7e64cf6bda1e..6c24d9cd3d66 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -154,6 +154,13 @@ extern struct sctp_globals {
154 int max_retrans_path; 154 int max_retrans_path;
155 int max_retrans_init; 155 int max_retrans_init;
156 156
157 /*
158 * Policy for preforming sctp/socket accounting
159 * 0 - do socket level accounting, all assocs share sk_sndbuf
160 * 1 - do sctp accounting, each asoc may use sk_sndbuf bytes
161 */
162 int sndbuf_policy;
163
157 /* HB.interval - 30 seconds */ 164 /* HB.interval - 30 seconds */
158 int hb_interval; 165 int hb_interval;
159 166
@@ -207,6 +214,7 @@ extern struct sctp_globals {
207#define sctp_valid_cookie_life (sctp_globals.valid_cookie_life) 214#define sctp_valid_cookie_life (sctp_globals.valid_cookie_life)
208#define sctp_cookie_preserve_enable (sctp_globals.cookie_preserve_enable) 215#define sctp_cookie_preserve_enable (sctp_globals.cookie_preserve_enable)
209#define sctp_max_retrans_association (sctp_globals.max_retrans_association) 216#define sctp_max_retrans_association (sctp_globals.max_retrans_association)
217#define sctp_sndbuf_policy (sctp_globals.sndbuf_policy)
210#define sctp_max_retrans_path (sctp_globals.max_retrans_path) 218#define sctp_max_retrans_path (sctp_globals.max_retrans_path)
211#define sctp_max_retrans_init (sctp_globals.max_retrans_init) 219#define sctp_max_retrans_init (sctp_globals.max_retrans_init)
212#define sctp_hb_interval (sctp_globals.hb_interval) 220#define sctp_hb_interval (sctp_globals.hb_interval)
@@ -1212,7 +1220,8 @@ struct sctp_endpoint {
1212 /* Default timeouts. */ 1220 /* Default timeouts. */
1213 int timeouts[SCTP_NUM_TIMEOUT_TYPES]; 1221 int timeouts[SCTP_NUM_TIMEOUT_TYPES];
1214 1222
1215 /* Various thresholds. */ 1223 /* sendbuf acct. policy. */
1224 __u32 sndbuf_policy;
1216 1225
1217 /* Name for debugging output... */ 1226 /* Name for debugging output... */
1218 char *debug_name; 1227 char *debug_name;
diff --git a/kernel/time.c b/kernel/time.c
index 96fd0f499631..d4335c1c884c 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -516,14 +516,6 @@ int do_settimeofday (struct timespec *tv)
516 516
517 write_seqlock_irq(&xtime_lock); 517 write_seqlock_irq(&xtime_lock);
518 { 518 {
519 /*
520 * This is revolting. We need to set "xtime" correctly. However, the value
521 * in this location is the value at the most recent update of wall time.
522 * Discover what correction gettimeofday would have done, and then undo
523 * it!
524 */
525 nsec -= time_interpolator_get_offset();
526
527 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); 519 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
528 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); 520 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
529 521
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 0a2f67bbef2e..43bdc521e20d 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1953,7 +1953,7 @@ static int neigh_stat_seq_show(struct seq_file *seq, void *v)
1953 struct neigh_statistics *st = v; 1953 struct neigh_statistics *st = v;
1954 1954
1955 if (v == SEQ_START_TOKEN) { 1955 if (v == SEQ_START_TOKEN) {
1956 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs forced_gc_goal_miss\n"); 1956 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs\n");
1957 return 0; 1957 return 0;
1958 } 1958 }
1959 1959
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index bb90a0c3a91e..199311746932 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -397,7 +397,7 @@ static int rt_cpu_seq_show(struct seq_file *seq, void *v)
397 struct rt_cache_stat *st = v; 397 struct rt_cache_stat *st = v;
398 398
399 if (v == SEQ_START_TOKEN) { 399 if (v == SEQ_START_TOKEN) {
400 seq_printf(seq, "entries in_hit in_slow_tot in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n"); 400 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
401 return 0; 401 return 0;
402 } 402 }
403 403
@@ -2843,7 +2843,7 @@ ctl_table ipv4_route_table[] = {
2843 .procname = "flush", 2843 .procname = "flush",
2844 .data = &flush_delay, 2844 .data = &flush_delay,
2845 .maxlen = sizeof(int), 2845 .maxlen = sizeof(int),
2846 .mode = 0644, 2846 .mode = 0200,
2847 .proc_handler = &ipv4_sysctl_rtcache_flush, 2847 .proc_handler = &ipv4_sysctl_rtcache_flush,
2848 .strategy = &ipv4_sysctl_rtcache_flush_strategy, 2848 .strategy = &ipv4_sysctl_rtcache_flush_strategy,
2849 }, 2849 },
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 183802902c02..3bf8a0254f81 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2005,7 +2005,7 @@ ctl_table ipv6_route_table[] = {
2005 .procname = "flush", 2005 .procname = "flush",
2006 .data = &flush_delay, 2006 .data = &flush_delay,
2007 .maxlen = sizeof(int), 2007 .maxlen = sizeof(int),
2008 .mode = 0644, 2008 .mode = 0200,
2009 .proc_handler = &ipv6_sysctl_rtcache_flush 2009 .proc_handler = &ipv6_sysctl_rtcache_flush
2010 }, 2010 },
2011 { 2011 {
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 544b75077dbd..334f61773e6d 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -125,6 +125,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
125 sp->autoclose * HZ; 125 sp->autoclose * HZ;
126 126
127 /* Use SCTP specific send buffer space queues. */ 127 /* Use SCTP specific send buffer space queues. */
128 ep->sndbuf_policy = sctp_sndbuf_policy;
128 sk->sk_write_space = sctp_write_space; 129 sk->sk_write_space = sctp_write_space;
129 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 130 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
130 131
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index e42c74e3ec1e..c9d9ea064734 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -496,9 +496,7 @@ static void sctp_v6_inaddr_any(union sctp_addr *addr, unsigned short port)
496/* Is this a wildcard address? */ 496/* Is this a wildcard address? */
497static int sctp_v6_is_any(const union sctp_addr *addr) 497static int sctp_v6_is_any(const union sctp_addr *addr)
498{ 498{
499 int type; 499 return ipv6_addr_any(&addr->v6.sin6_addr);
500 type = ipv6_addr_type((struct in6_addr *)&addr->v6.sin6_addr);
501 return IPV6_ADDR_ANY == type;
502} 500}
503 501
504/* Should this be available for binding? */ 502/* Should this be available for binding? */
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 9013f64f5219..84b5b370b09d 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -313,12 +313,12 @@ int sctp_packet_transmit(struct sctp_packet *packet)
313 sk = chunk->skb->sk; 313 sk = chunk->skb->sk;
314 314
315 /* Allocate the new skb. */ 315 /* Allocate the new skb. */
316 nskb = dev_alloc_skb(packet->size); 316 nskb = alloc_skb(packet->size + LL_MAX_HEADER, GFP_ATOMIC);
317 if (!nskb) 317 if (!nskb)
318 goto nomem; 318 goto nomem;
319 319
320 /* Make sure the outbound skb has enough header room reserved. */ 320 /* Make sure the outbound skb has enough header room reserved. */
321 skb_reserve(nskb, packet->overhead); 321 skb_reserve(nskb, packet->overhead + LL_MAX_HEADER);
322 322
323 /* Set the owning socket so that we know where to get the 323 /* Set the owning socket so that we know where to get the
324 * destination IP address. 324 * destination IP address.
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index b9813cf3d91c..2e1f9c3556f5 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1043,6 +1043,9 @@ SCTP_STATIC __init int sctp_init(void)
1043 sctp_max_retrans_path = 5; 1043 sctp_max_retrans_path = 5;
1044 sctp_max_retrans_init = 8; 1044 sctp_max_retrans_init = 8;
1045 1045
1046 /* Sendbuffer growth - do per-socket accounting */
1047 sctp_sndbuf_policy = 0;
1048
1046 /* HB.interval - 30 seconds */ 1049 /* HB.interval - 30 seconds */
1047 sctp_hb_interval = 30 * HZ; 1050 sctp_hb_interval = 30 * HZ;
1048 1051
@@ -1159,8 +1162,6 @@ SCTP_STATIC __init int sctp_init(void)
1159 status = 0; 1162 status = 0;
1160out: 1163out:
1161 return status; 1164 return status;
1162err_add_protocol:
1163 proto_unregister(&sctp_prot);
1164err_ctl_sock_init: 1165err_ctl_sock_init:
1165 sctp_v6_exit(); 1166 sctp_v6_exit();
1166err_v6_init: 1167err_v6_init:
@@ -1188,6 +1189,8 @@ err_bucket_cachep:
1188 inet_del_protocol(&sctp_protocol, IPPROTO_SCTP); 1189 inet_del_protocol(&sctp_protocol, IPPROTO_SCTP);
1189 inet_unregister_protosw(&sctp_seqpacket_protosw); 1190 inet_unregister_protosw(&sctp_seqpacket_protosw);
1190 inet_unregister_protosw(&sctp_stream_protosw); 1191 inet_unregister_protosw(&sctp_stream_protosw);
1192err_add_protocol:
1193 proto_unregister(&sctp_prot);
1191 goto out; 1194 goto out;
1192} 1195}
1193 1196
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 1db12cc18cf7..33ac8bf47b0e 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -710,7 +710,9 @@ struct sctp_chunk *sctp_make_shutdown_complete(
710 struct sctp_chunk *retval; 710 struct sctp_chunk *retval;
711 __u8 flags = 0; 711 __u8 flags = 0;
712 712
713 /* Maybe set the T-bit if we have no association. */ 713 /* Set the T-bit if we have no association (vtag will be
714 * reflected)
715 */
714 flags |= asoc ? 0 : SCTP_CHUNK_FLAG_T; 716 flags |= asoc ? 0 : SCTP_CHUNK_FLAG_T;
715 717
716 retval = sctp_make_chunk(asoc, SCTP_CID_SHUTDOWN_COMPLETE, flags, 0); 718 retval = sctp_make_chunk(asoc, SCTP_CID_SHUTDOWN_COMPLETE, flags, 0);
@@ -732,7 +734,7 @@ struct sctp_chunk *sctp_make_shutdown_complete(
732} 734}
733 735
734/* Create an ABORT. Note that we set the T bit if we have no 736/* Create an ABORT. Note that we set the T bit if we have no
735 * association. 737 * association, except when responding to an INIT (sctpimpguide 2.41).
736 */ 738 */
737struct sctp_chunk *sctp_make_abort(const struct sctp_association *asoc, 739struct sctp_chunk *sctp_make_abort(const struct sctp_association *asoc,
738 const struct sctp_chunk *chunk, 740 const struct sctp_chunk *chunk,
@@ -741,8 +743,16 @@ struct sctp_chunk *sctp_make_abort(const struct sctp_association *asoc,
741 struct sctp_chunk *retval; 743 struct sctp_chunk *retval;
742 __u8 flags = 0; 744 __u8 flags = 0;
743 745
744 /* Maybe set the T-bit if we have no association. */ 746 /* Set the T-bit if we have no association and 'chunk' is not
745 flags |= asoc ? 0 : SCTP_CHUNK_FLAG_T; 747 * an INIT (vtag will be reflected).
748 */
749 if (!asoc) {
750 if (chunk && chunk->chunk_hdr &&
751 chunk->chunk_hdr->type == SCTP_CID_INIT)
752 flags = 0;
753 else
754 flags = SCTP_CHUNK_FLAG_T;
755 }
746 756
747 retval = sctp_make_chunk(asoc, SCTP_CID_ABORT, flags, hint); 757 retval = sctp_make_chunk(asoc, SCTP_CID_ABORT, flags, hint);
748 758
@@ -2744,7 +2754,6 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
2744 2754
2745 hint = (nstreams + 1) * sizeof(__u32); 2755 hint = (nstreams + 1) * sizeof(__u32);
2746 2756
2747 /* Maybe set the T-bit if we have no association. */
2748 retval = sctp_make_chunk(asoc, SCTP_CID_FWD_TSN, 0, hint); 2757 retval = sctp_make_chunk(asoc, SCTP_CID_FWD_TSN, 0, hint);
2749 2758
2750 if (!retval) 2759 if (!retval)
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 278c56a2d076..8e01b8f09ac2 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -126,15 +126,18 @@ sctp_chunk_length_valid(struct sctp_chunk *chunk,
126 * should stop the T2-shutdown timer and remove all knowledge of the 126 * should stop the T2-shutdown timer and remove all knowledge of the
127 * association (and thus the association enters the CLOSED state). 127 * association (and thus the association enters the CLOSED state).
128 * 128 *
129 * Verification Tag: 8.5.1(C) 129 * Verification Tag: 8.5.1(C), sctpimpguide 2.41.
130 * C) Rules for packet carrying SHUTDOWN COMPLETE: 130 * C) Rules for packet carrying SHUTDOWN COMPLETE:
131 * ... 131 * ...
132 * - The receiver of a SHUTDOWN COMPLETE shall accept the packet if the 132 * - The receiver of a SHUTDOWN COMPLETE shall accept the packet
133 * Verification Tag field of the packet matches its own tag OR it is 133 * if the Verification Tag field of the packet matches its own tag and
134 * set to its peer's tag and the T bit is set in the Chunk Flags. 134 * the T bit is not set
135 * Otherwise, the receiver MUST silently discard the packet and take 135 * OR
136 * no further action. An endpoint MUST ignore the SHUTDOWN COMPLETE if 136 * it is set to its peer's tag and the T bit is set in the Chunk
137 * it is not in the SHUTDOWN-ACK-SENT state. 137 * Flags.
138 * Otherwise, the receiver MUST silently discard the packet
139 * and take no further action. An endpoint MUST ignore the
140 * SHUTDOWN COMPLETE if it is not in the SHUTDOWN-ACK-SENT state.
138 * 141 *
139 * Inputs 142 * Inputs
140 * (endpoint, asoc, chunk) 143 * (endpoint, asoc, chunk)
@@ -2858,16 +2861,16 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep,
2858/* 2861/*
2859 * Generate an ABORT in response to a packet. 2862 * Generate an ABORT in response to a packet.
2860 * 2863 *
2861 * Section: 8.4 Handle "Out of the blue" Packets 2864 * Section: 8.4 Handle "Out of the blue" Packets, sctpimpguide 2.41
2862 * 2865 *
2863 * 8) The receiver should respond to the sender of the OOTB packet 2866 * 8) The receiver should respond to the sender of the OOTB packet with
2864 * with an ABORT. When sending the ABORT, the receiver of the 2867 * an ABORT. When sending the ABORT, the receiver of the OOTB packet
2865 * OOTB packet MUST fill in the Verification Tag field of the 2868 * MUST fill in the Verification Tag field of the outbound packet
2866 * outbound packet with the value found in the Verification Tag 2869 * with the value found in the Verification Tag field of the OOTB
2867 * field of the OOTB packet and set the T-bit in the Chunk Flags 2870 * packet and set the T-bit in the Chunk Flags to indicate that the
2868 * to indicate that no TCB was found. After sending this ABORT, 2871 * Verification Tag is reflected. After sending this ABORT, the
2869 * the receiver of the OOTB packet shall discard the OOTB packet 2872 * receiver of the OOTB packet shall discard the OOTB packet and take
2870 * and take no further action. 2873 * no further action.
2871 * 2874 *
2872 * Verification Tag: 2875 * Verification Tag:
2873 * 2876 *
@@ -2895,6 +2898,10 @@ sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep,
2895 return SCTP_DISPOSITION_NOMEM; 2898 return SCTP_DISPOSITION_NOMEM;
2896 } 2899 }
2897 2900
2901 /* Reflect vtag if T-Bit is set */
2902 if (sctp_test_T_bit(abort))
2903 packet->vtag = ntohl(chunk->sctp_hdr->vtag);
2904
2898 /* Set the skb to the belonging sock for accounting. */ 2905 /* Set the skb to the belonging sock for accounting. */
2899 abort->skb->sk = ep->base.sk; 2906 abort->skb->sk = ep->base.sk;
2900 2907
@@ -3026,22 +3033,24 @@ nomem:
3026} 3033}
3027 3034
3028/* 3035/*
3029 * RFC 2960, 8.4 - Handle "Out of the blue" Packets 3036 * RFC 2960, 8.4 - Handle "Out of the blue" Packets, sctpimpguide 2.41.
3037 *
3030 * 5) If the packet contains a SHUTDOWN ACK chunk, the receiver should 3038 * 5) If the packet contains a SHUTDOWN ACK chunk, the receiver should
3031 * respond to the sender of the OOTB packet with a SHUTDOWN COMPLETE. 3039 * respond to the sender of the OOTB packet with a SHUTDOWN COMPLETE.
3032 * When sending the SHUTDOWN COMPLETE, the receiver of the OOTB 3040 * When sending the SHUTDOWN COMPLETE, the receiver of the OOTB
3033 * packet must fill in the Verification Tag field of the outbound 3041 * packet must fill in the Verification Tag field of the outbound
3034 * packet with the Verification Tag received in the SHUTDOWN ACK and 3042 * packet with the Verification Tag received in the SHUTDOWN ACK and
3035 * set the T-bit in the Chunk Flags to indicate that no TCB was 3043 * set the T-bit in the Chunk Flags to indicate that the Verification
3036 * found. Otherwise, 3044 * Tag is reflected.
3037 * 3045 *
3038 * 8) The receiver should respond to the sender of the OOTB packet with 3046 * 8) The receiver should respond to the sender of the OOTB packet with
3039 * an ABORT. When sending the ABORT, the receiver of the OOTB packet 3047 * an ABORT. When sending the ABORT, the receiver of the OOTB packet
3040 * MUST fill in the Verification Tag field of the outbound packet 3048 * MUST fill in the Verification Tag field of the outbound packet
3041 * with the value found in the Verification Tag field of the OOTB 3049 * with the value found in the Verification Tag field of the OOTB
3042 * packet and set the T-bit in the Chunk Flags to indicate that no 3050 * packet and set the T-bit in the Chunk Flags to indicate that the
3043 * TCB was found. After sending this ABORT, the receiver of the OOTB 3051 * Verification Tag is reflected. After sending this ABORT, the
3044 * packet shall discard the OOTB packet and take no further action. 3052 * receiver of the OOTB packet shall discard the OOTB packet and take
3053 * no further action.
3045 */ 3054 */
3046sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep, 3055sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
3047 const struct sctp_association *asoc, 3056 const struct sctp_association *asoc,
@@ -3090,13 +3099,15 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
3090/* 3099/*
3091 * Handle an "Out of the blue" SHUTDOWN ACK. 3100 * Handle an "Out of the blue" SHUTDOWN ACK.
3092 * 3101 *
3093 * Section: 8.4 5) 3102 * Section: 8.4 5, sctpimpguide 2.41.
3103 *
3094 * 5) If the packet contains a SHUTDOWN ACK chunk, the receiver should 3104 * 5) If the packet contains a SHUTDOWN ACK chunk, the receiver should
3095 * respond to the sender of the OOTB packet with a SHUTDOWN COMPLETE. 3105 * respond to the sender of the OOTB packet with a SHUTDOWN COMPLETE.
3096 * When sending the SHUTDOWN COMPLETE, the receiver of the OOTB packet 3106 * When sending the SHUTDOWN COMPLETE, the receiver of the OOTB
3097 * must fill in the Verification Tag field of the outbound packet with 3107 * packet must fill in the Verification Tag field of the outbound
3098 * the Verification Tag received in the SHUTDOWN ACK and set the 3108 * packet with the Verification Tag received in the SHUTDOWN ACK and
3099 * T-bit in the Chunk Flags to indicate that no TCB was found. 3109 * set the T-bit in the Chunk Flags to indicate that the Verification
3110 * Tag is reflected.
3100 * 3111 *
3101 * Inputs 3112 * Inputs
3102 * (endpoint, asoc, type, arg, commands) 3113 * (endpoint, asoc, type, arg, commands)
@@ -3128,6 +3139,10 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep,
3128 return SCTP_DISPOSITION_NOMEM; 3139 return SCTP_DISPOSITION_NOMEM;
3129 } 3140 }
3130 3141
3142 /* Reflect vtag if T-Bit is set */
3143 if (sctp_test_T_bit(shut))
3144 packet->vtag = ntohl(chunk->sctp_hdr->vtag);
3145
3131 /* Set the skb to the belonging sock for accounting. */ 3146 /* Set the skb to the belonging sock for accounting. */
3132 shut->skb->sk = ep->base.sk; 3147 shut->skb->sk = ep->base.sk;
3133 3148
@@ -3591,7 +3606,6 @@ sctp_disposition_t sctp_sf_discard_chunk(const struct sctp_endpoint *ep,
3591 * 3606 *
3592 * 2) If the OOTB packet contains an ABORT chunk, the receiver MUST 3607 * 2) If the OOTB packet contains an ABORT chunk, the receiver MUST
3593 * silently discard the OOTB packet and take no further action. 3608 * silently discard the OOTB packet and take no further action.
3594 * Otherwise,
3595 * 3609 *
3596 * Verification Tag: No verification necessary 3610 * Verification Tag: No verification necessary
3597 * 3611 *
@@ -4961,6 +4975,11 @@ static struct sctp_packet *sctp_abort_pkt_new(const struct sctp_endpoint *ep,
4961 sctp_ootb_pkt_free(packet); 4975 sctp_ootb_pkt_free(packet);
4962 return NULL; 4976 return NULL;
4963 } 4977 }
4978
4979 /* Reflect vtag if T-Bit is set */
4980 if (sctp_test_T_bit(abort))
4981 packet->vtag = ntohl(chunk->sctp_hdr->vtag);
4982
4964 /* Add specified error causes, i.e., payload, to the 4983 /* Add specified error causes, i.e., payload, to the
4965 * end of the chunk. 4984 * end of the chunk.
4966 */ 4985 */
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index e8c210182571..0b338eca6dc0 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -115,9 +115,17 @@ static inline int sctp_wspace(struct sctp_association *asoc)
115 struct sock *sk = asoc->base.sk; 115 struct sock *sk = asoc->base.sk;
116 int amt = 0; 116 int amt = 0;
117 117
118 amt = sk->sk_sndbuf - asoc->sndbuf_used; 118 if (asoc->ep->sndbuf_policy) {
119 /* make sure that no association uses more than sk_sndbuf */
120 amt = sk->sk_sndbuf - asoc->sndbuf_used;
121 } else {
122 /* do socket level accounting */
123 amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
124 }
125
119 if (amt < 0) 126 if (amt < 0)
120 amt = 0; 127 amt = 0;
128
121 return amt; 129 return amt;
122} 130}
123 131
@@ -138,12 +146,21 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
138 /* The sndbuf space is tracked per association. */ 146 /* The sndbuf space is tracked per association. */
139 sctp_association_hold(asoc); 147 sctp_association_hold(asoc);
140 148
149 skb_set_owner_w(chunk->skb, sk);
150
141 chunk->skb->destructor = sctp_wfree; 151 chunk->skb->destructor = sctp_wfree;
142 /* Save the chunk pointer in skb for sctp_wfree to use later. */ 152 /* Save the chunk pointer in skb for sctp_wfree to use later. */
143 *((struct sctp_chunk **)(chunk->skb->cb)) = chunk; 153 *((struct sctp_chunk **)(chunk->skb->cb)) = chunk;
144 154
145 asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk); 155 asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) +
146 sk->sk_wmem_queued += SCTP_DATA_SNDSIZE(chunk); 156 sizeof(struct sk_buff) +
157 sizeof(struct sctp_chunk);
158
159 sk->sk_wmem_queued += SCTP_DATA_SNDSIZE(chunk) +
160 sizeof(struct sk_buff) +
161 sizeof(struct sctp_chunk);
162
163 atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
147} 164}
148 165
149/* Verify that this is a valid address. */ 166/* Verify that this is a valid address. */
@@ -3473,7 +3490,7 @@ static int sctp_getsockopt_associnfo(struct sock *sk, int len,
3473 return -EINVAL; 3490 return -EINVAL;
3474 3491
3475 /* Values correspoinding to the specific association */ 3492 /* Values correspoinding to the specific association */
3476 if (assocparams.sasoc_assoc_id != 0) { 3493 if (asoc) {
3477 assocparams.sasoc_asocmaxrxt = asoc->max_retrans; 3494 assocparams.sasoc_asocmaxrxt = asoc->max_retrans;
3478 assocparams.sasoc_peer_rwnd = asoc->peer.rwnd; 3495 assocparams.sasoc_peer_rwnd = asoc->peer.rwnd;
3479 assocparams.sasoc_local_rwnd = asoc->a_rwnd; 3496 assocparams.sasoc_local_rwnd = asoc->a_rwnd;
@@ -4422,8 +4439,17 @@ static void sctp_wfree(struct sk_buff *skb)
4422 chunk = *((struct sctp_chunk **)(skb->cb)); 4439 chunk = *((struct sctp_chunk **)(skb->cb));
4423 asoc = chunk->asoc; 4440 asoc = chunk->asoc;
4424 sk = asoc->base.sk; 4441 sk = asoc->base.sk;
4425 asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk); 4442 asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) +
4426 sk->sk_wmem_queued -= SCTP_DATA_SNDSIZE(chunk); 4443 sizeof(struct sk_buff) +
4444 sizeof(struct sctp_chunk);
4445
4446 sk->sk_wmem_queued -= SCTP_DATA_SNDSIZE(chunk) +
4447 sizeof(struct sk_buff) +
4448 sizeof(struct sctp_chunk);
4449
4450 atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
4451
4452 sock_wfree(skb);
4427 __sctp_write_space(asoc); 4453 __sctp_write_space(asoc);
4428 4454
4429 sctp_association_put(asoc); 4455 sctp_association_put(asoc);
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 89fa20c73a5c..7fc31849312b 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -110,6 +110,14 @@ static ctl_table sctp_table[] = {
110 .proc_handler = &proc_dointvec 110 .proc_handler = &proc_dointvec
111 }, 111 },
112 { 112 {
113 .ctl_name = NET_SCTP_SNDBUF_POLICY,
114 .procname = "sndbuf_policy",
115 .data = &sctp_sndbuf_policy,
116 .maxlen = sizeof(int),
117 .mode = 0644,
118 .proc_handler = &proc_dointvec
119 },
120 {
113 .ctl_name = NET_SCTP_PATH_MAX_RETRANS, 121 .ctl_name = NET_SCTP_PATH_MAX_RETRANS,
114 .procname = "path_max_retrans", 122 .procname = "path_max_retrans",
115 .data = &sctp_max_retrans_path, 123 .data = &sctp_max_retrans_path,