aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/sn/pci/tioce_provider.c326
1 files changed, 303 insertions, 23 deletions
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c
index e52831ed93eb..fa073cc4b565 100644
--- a/arch/ia64/sn/pci/tioce_provider.c
+++ b/arch/ia64/sn/pci/tioce_provider.c
@@ -15,6 +15,124 @@
15#include <asm/sn/pcidev.h> 15#include <asm/sn/pcidev.h>
16#include <asm/sn/pcibus_provider_defs.h> 16#include <asm/sn/pcibus_provider_defs.h>
17#include <asm/sn/tioce_provider.h> 17#include <asm/sn/tioce_provider.h>
18#include <asm/sn/sn2/sn_hwperf.h>
19
20/*
21 * 1/26/2006
22 *
23 * WAR for SGI PV 944642. For revA TIOCE, need to use the following recipe
24 * (taken from the above PV) before and after accessing tioce internal MMR's
25 * to avoid tioce lockups.
26 *
27 * The recipe as taken from the PV:
28 *
29 * if(mmr address < 0x45000) {
30 * if(mmr address == 0 or 0x80)
31 * mmr wrt or read address 0xc0
32 * else if(mmr address == 0x148 or 0x200)
33 * mmr wrt or read address 0x28
34 * else
35 * mmr wrt or read address 0x158
36 *
37 * do desired mmr access (rd or wrt)
38 *
39 * if(mmr address == 0x100)
40 * mmr wrt or read address 0x38
41 * mmr wrt or read address 0xb050
42 * } else
43 * do desired mmr access
44 *
45 * According to hw, we can use reads instead of writes to the above addres
46 *
47 * Note this WAR can only to be used for accessing internal MMR's in the
48 * TIOCE Coretalk Address Range 0x0 - 0x07ff_ffff. This includes the
49 * "Local CE Registers and Memories" and "PCI Compatible Config Space" address
50 * spaces from table 2-1 of the "CE Programmer's Reference Overview" document.
51 *
52 * All registers defined in struct tioce will meet that criteria.
53 */
54
55static void inline
56tioce_mmr_war_pre(struct tioce_kernel *kern, void *mmr_addr)
57{
58 u64 mmr_base;
59 u64 mmr_offset;
60
61 if (kern->ce_common->ce_rev != TIOCE_REV_A)
62 return;
63
64 mmr_base = kern->ce_common->ce_pcibus.bs_base;
65 mmr_offset = (u64)mmr_addr - mmr_base;
66
67 if (mmr_offset < 0x45000) {
68 u64 mmr_war_offset;
69
70 if (mmr_offset == 0 || mmr_offset == 0x80)
71 mmr_war_offset = 0xc0;
72 else if (mmr_offset == 0x148 || mmr_offset == 0x200)
73 mmr_war_offset = 0x28;
74 else
75 mmr_war_offset = 0x158;
76
77 readq_relaxed((void *)(mmr_base + mmr_war_offset));
78 }
79}
80
81static void inline
82tioce_mmr_war_post(struct tioce_kernel *kern, void *mmr_addr)
83{
84 u64 mmr_base;
85 u64 mmr_offset;
86
87 if (kern->ce_common->ce_rev != TIOCE_REV_A)
88 return;
89
90 mmr_base = kern->ce_common->ce_pcibus.bs_base;
91 mmr_offset = (u64)mmr_addr - mmr_base;
92
93 if (mmr_offset < 0x45000) {
94 if (mmr_offset == 0x100)
95 readq_relaxed((void *)(mmr_base + 0x38));
96 readq_relaxed((void *)(mmr_base + 0xb050));
97 }
98}
99
100/* load mmr contents into a variable */
101#define tioce_mmr_load(kern, mmrp, varp) do {\
102 tioce_mmr_war_pre(kern, mmrp); \
103 *(varp) = readq_relaxed(mmrp); \
104 tioce_mmr_war_post(kern, mmrp); \
105} while (0)
106
107/* store variable contents into mmr */
108#define tioce_mmr_store(kern, mmrp, varp) do {\
109 tioce_mmr_war_pre(kern, mmrp); \
110 writeq(*varp, mmrp); \
111 tioce_mmr_war_post(kern, mmrp); \
112} while (0)
113
114/* store immediate value into mmr */
115#define tioce_mmr_storei(kern, mmrp, val) do {\
116 tioce_mmr_war_pre(kern, mmrp); \
117 writeq(val, mmrp); \
118 tioce_mmr_war_post(kern, mmrp); \
119} while (0)
120
121/* set bits (immediate value) into mmr */
122#define tioce_mmr_seti(kern, mmrp, bits) do {\
123 u64 tmp; \
124 tioce_mmr_load(kern, mmrp, &tmp); \
125 tmp |= (bits); \
126 tioce_mmr_store(kern, mmrp, &tmp); \
127} while (0)
128
129/* clear bits (immediate value) into mmr */
130#define tioce_mmr_clri(kern, mmrp, bits) do { \
131 u64 tmp; \
132 tioce_mmr_load(kern, mmrp, &tmp); \
133 tmp &= ~(bits); \
134 tioce_mmr_store(kern, mmrp, &tmp); \
135} while (0)
18 136
19/** 137/**
20 * Bus address ranges for the 5 flavors of TIOCE DMA 138 * Bus address ranges for the 5 flavors of TIOCE DMA
@@ -62,9 +180,9 @@
62#define TIOCE_ATE_M40 2 180#define TIOCE_ATE_M40 2
63#define TIOCE_ATE_M40S 3 181#define TIOCE_ATE_M40S 3
64 182
65#define KB(x) ((x) << 10) 183#define KB(x) ((u64)(x) << 10)
66#define MB(x) ((x) << 20) 184#define MB(x) ((u64)(x) << 20)
67#define GB(x) ((x) << 30) 185#define GB(x) ((u64)(x) << 30)
68 186
69/** 187/**
70 * tioce_dma_d64 - create a DMA mapping using 64-bit direct mode 188 * tioce_dma_d64 - create a DMA mapping using 64-bit direct mode
@@ -151,7 +269,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
151 int last; 269 int last;
152 int entries; 270 int entries;
153 int nates; 271 int nates;
154 int pagesize; 272 u64 pagesize;
155 u64 *ate_shadow; 273 u64 *ate_shadow;
156 u64 *ate_reg; 274 u64 *ate_reg;
157 u64 addr; 275 u64 addr;
@@ -228,7 +346,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
228 346
229 ate = ATE_MAKE(addr, pagesize); 347 ate = ATE_MAKE(addr, pagesize);
230 ate_shadow[i + j] = ate; 348 ate_shadow[i + j] = ate;
231 writeq(ate, &ate_reg[i + j]); 349 tioce_mmr_storei(ce_kern, &ate_reg[i + j], ate);
232 addr += pagesize; 350 addr += pagesize;
233 } 351 }
234 352
@@ -272,7 +390,8 @@ tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr)
272 u64 tmp; 390 u64 tmp;
273 391
274 ce_kern->ce_port[port].dirmap_shadow = ct_upper; 392 ce_kern->ce_port[port].dirmap_shadow = ct_upper;
275 writeq(ct_upper, &ce_mmr->ce_ure_dir_map[port]); 393 tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_dir_map[port],
394 ct_upper);
276 tmp = ce_mmr->ce_ure_dir_map[port]; 395 tmp = ce_mmr->ce_ure_dir_map[port];
277 dma_ok = 1; 396 dma_ok = 1;
278 } else 397 } else
@@ -344,7 +463,8 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
344 if (TIOCE_D32_ADDR(bus_addr)) { 463 if (TIOCE_D32_ADDR(bus_addr)) {
345 if (--ce_kern->ce_port[port].dirmap_refcnt == 0) { 464 if (--ce_kern->ce_port[port].dirmap_refcnt == 0) {
346 ce_kern->ce_port[port].dirmap_shadow = 0; 465 ce_kern->ce_port[port].dirmap_shadow = 0;
347 writeq(0, &ce_mmr->ce_ure_dir_map[port]); 466 tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_dir_map[port],
467 0);
348 } 468 }
349 } else { 469 } else {
350 struct tioce_dmamap *map; 470 struct tioce_dmamap *map;
@@ -365,7 +485,7 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
365 } else if (--map->refcnt == 0) { 485 } else if (--map->refcnt == 0) {
366 for (i = 0; i < map->ate_count; i++) { 486 for (i = 0; i < map->ate_count; i++) {
367 map->ate_shadow[i] = 0; 487 map->ate_shadow[i] = 0;
368 map->ate_hw[i] = 0; 488 tioce_mmr_storei(ce_kern, &map->ate_hw[i], 0);
369 } 489 }
370 490
371 list_del(&map->ce_dmamap_list); 491 list_del(&map->ce_dmamap_list);
@@ -486,7 +606,7 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
486 spin_unlock_irqrestore(&ce_kern->ce_lock, flags); 606 spin_unlock_irqrestore(&ce_kern->ce_lock, flags);
487 607
488dma_map_done: 608dma_map_done:
489 if (mapaddr & barrier) 609 if (mapaddr && barrier)
490 mapaddr = tioce_dma_barrier(mapaddr, 1); 610 mapaddr = tioce_dma_barrier(mapaddr, 1);
491 611
492 return mapaddr; 612 return mapaddr;
@@ -541,17 +661,61 @@ tioce_error_intr_handler(int irq, void *arg, struct pt_regs *pt)
541 soft->ce_pcibus.bs_persist_segment, 661 soft->ce_pcibus.bs_persist_segment,
542 soft->ce_pcibus.bs_persist_busnum, 0, 0, 0, 0, 0); 662 soft->ce_pcibus.bs_persist_busnum, 0, 0, 0, 0, 0);
543 663
664 if (ret_stuff.v0)
665 panic("tioce_error_intr_handler: Fatal TIOCE error");
666
544 return IRQ_HANDLED; 667 return IRQ_HANDLED;
545} 668}
546 669
547/** 670/**
671 * tioce_reserve_m32 - reserve M32 ate's for the indicated address range
672 * @tioce_kernel: TIOCE context to reserve ate's for
673 * @base: starting bus address to reserve
674 * @limit: last bus address to reserve
675 *
676 * If base/limit falls within the range of bus space mapped through the
677 * M32 space, reserve the resources corresponding to the range.
678 */
679static void
680tioce_reserve_m32(struct tioce_kernel *ce_kern, u64 base, u64 limit)
681{
682 int ate_index, last_ate, ps;
683 struct tioce *ce_mmr;
684
685 if (!TIOCE_M32_ADDR(base))
686 return;
687
688 ce_mmr = (struct tioce *)ce_kern->ce_common->ce_pcibus.bs_base;
689 ps = ce_kern->ce_ate3240_pagesize;
690 ate_index = ATE_PAGE(base, ps);
691 last_ate = ate_index + ATE_NPAGES(base, limit-base+1, ps) - 1;
692
693 if (ate_index < 64)
694 ate_index = 64;
695
696 while (ate_index <= last_ate) {
697 u64 ate;
698
699 ate = ATE_MAKE(0xdeadbeef, ps);
700 ce_kern->ce_ate3240_shadow[ate_index] = ate;
701 tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_ate3240[ate_index],
702 ate);
703 ate_index++;
704 }
705}
706
707/**
548 * tioce_kern_init - init kernel structures related to a given TIOCE 708 * tioce_kern_init - init kernel structures related to a given TIOCE
549 * @tioce_common: ptr to a cached tioce_common struct that originated in prom 709 * @tioce_common: ptr to a cached tioce_common struct that originated in prom
550 */ static struct tioce_kernel * 710 */
711static struct tioce_kernel *
551tioce_kern_init(struct tioce_common *tioce_common) 712tioce_kern_init(struct tioce_common *tioce_common)
552{ 713{
553 int i; 714 int i;
715 int ps;
716 int dev;
554 u32 tmp; 717 u32 tmp;
718 unsigned int seg, bus;
555 struct tioce *tioce_mmr; 719 struct tioce *tioce_mmr;
556 struct tioce_kernel *tioce_kern; 720 struct tioce_kernel *tioce_kern;
557 721
@@ -572,9 +736,10 @@ tioce_kern_init(struct tioce_common *tioce_common)
572 * here to use pci_read_config_xxx() so use the raw_pci_ops vector. 736 * here to use pci_read_config_xxx() so use the raw_pci_ops vector.
573 */ 737 */
574 738
575 raw_pci_ops->read(tioce_common->ce_pcibus.bs_persist_segment, 739 seg = tioce_common->ce_pcibus.bs_persist_segment;
576 tioce_common->ce_pcibus.bs_persist_busnum, 740 bus = tioce_common->ce_pcibus.bs_persist_busnum;
577 PCI_DEVFN(2, 0), PCI_SECONDARY_BUS, 1, &tmp); 741
742 raw_pci_ops->read(seg, bus, PCI_DEVFN(2, 0), PCI_SECONDARY_BUS, 1,&tmp);
578 tioce_kern->ce_port1_secondary = (u8) tmp; 743 tioce_kern->ce_port1_secondary = (u8) tmp;
579 744
580 /* 745 /*
@@ -583,18 +748,76 @@ tioce_kern_init(struct tioce_common *tioce_common)
583 */ 748 */
584 749
585 tioce_mmr = (struct tioce *)tioce_common->ce_pcibus.bs_base; 750 tioce_mmr = (struct tioce *)tioce_common->ce_pcibus.bs_base;
586 __sn_clrq_relaxed(&tioce_mmr->ce_ure_page_map, CE_URE_PAGESIZE_MASK); 751 tioce_mmr_clri(tioce_kern, &tioce_mmr->ce_ure_page_map,
587 __sn_setq_relaxed(&tioce_mmr->ce_ure_page_map, CE_URE_256K_PAGESIZE); 752 CE_URE_PAGESIZE_MASK);
588 tioce_kern->ce_ate3240_pagesize = KB(256); 753 tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_ure_page_map,
754 CE_URE_256K_PAGESIZE);
755 ps = tioce_kern->ce_ate3240_pagesize = KB(256);
589 756
590 for (i = 0; i < TIOCE_NUM_M40_ATES; i++) { 757 for (i = 0; i < TIOCE_NUM_M40_ATES; i++) {
591 tioce_kern->ce_ate40_shadow[i] = 0; 758 tioce_kern->ce_ate40_shadow[i] = 0;
592 writeq(0, &tioce_mmr->ce_ure_ate40[i]); 759 tioce_mmr_storei(tioce_kern, &tioce_mmr->ce_ure_ate40[i], 0);
593 } 760 }
594 761
595 for (i = 0; i < TIOCE_NUM_M3240_ATES; i++) { 762 for (i = 0; i < TIOCE_NUM_M3240_ATES; i++) {
596 tioce_kern->ce_ate3240_shadow[i] = 0; 763 tioce_kern->ce_ate3240_shadow[i] = 0;
597 writeq(0, &tioce_mmr->ce_ure_ate3240[i]); 764 tioce_mmr_storei(tioce_kern, &tioce_mmr->ce_ure_ate3240[i], 0);
765 }
766
767 /*
768 * Reserve ATE's corresponding to reserved address ranges. These
769 * include:
770 *
771 * Memory space covered by each PPB mem base/limit register
772 * Memory space covered by each PPB prefetch base/limit register
773 *
774 * These bus ranges are for pio (downstream) traffic only, and so
775 * cannot be used for DMA.
776 */
777
778 for (dev = 1; dev <= 2; dev++) {
779 u64 base, limit;
780
781 /* mem base/limit */
782
783 raw_pci_ops->read(seg, bus, PCI_DEVFN(dev, 0),
784 PCI_MEMORY_BASE, 2, &tmp);
785 base = (u64)tmp << 16;
786
787 raw_pci_ops->read(seg, bus, PCI_DEVFN(dev, 0),
788 PCI_MEMORY_LIMIT, 2, &tmp);
789 limit = (u64)tmp << 16;
790 limit |= 0xfffffUL;
791
792 if (base < limit)
793 tioce_reserve_m32(tioce_kern, base, limit);
794
795 /*
796 * prefetch mem base/limit. The tioce ppb's have 64-bit
797 * decoders, so read the upper portions w/o checking the
798 * attributes.
799 */
800
801 raw_pci_ops->read(seg, bus, PCI_DEVFN(dev, 0),
802 PCI_PREF_MEMORY_BASE, 2, &tmp);
803 base = ((u64)tmp & PCI_PREF_RANGE_MASK) << 16;
804
805 raw_pci_ops->read(seg, bus, PCI_DEVFN(dev, 0),
806 PCI_PREF_BASE_UPPER32, 4, &tmp);
807 base |= (u64)tmp << 32;
808
809 raw_pci_ops->read(seg, bus, PCI_DEVFN(dev, 0),
810 PCI_PREF_MEMORY_LIMIT, 2, &tmp);
811
812 limit = ((u64)tmp & PCI_PREF_RANGE_MASK) << 16;
813 limit |= 0xfffffUL;
814
815 raw_pci_ops->read(seg, bus, PCI_DEVFN(dev, 0),
816 PCI_PREF_LIMIT_UPPER32, 4, &tmp);
817 limit |= (u64)tmp << 32;
818
819 if ((base < limit) && TIOCE_M32_ADDR(base))
820 tioce_reserve_m32(tioce_kern, base, limit);
598 } 821 }
599 822
600 return tioce_kern; 823 return tioce_kern;
@@ -614,6 +837,7 @@ tioce_force_interrupt(struct sn_irq_info *sn_irq_info)
614{ 837{
615 struct pcidev_info *pcidev_info; 838 struct pcidev_info *pcidev_info;
616 struct tioce_common *ce_common; 839 struct tioce_common *ce_common;
840 struct tioce_kernel *ce_kern;
617 struct tioce *ce_mmr; 841 struct tioce *ce_mmr;
618 u64 force_int_val; 842 u64 force_int_val;
619 843
@@ -629,6 +853,29 @@ tioce_force_interrupt(struct sn_irq_info *sn_irq_info)
629 853
630 ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info; 854 ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info;
631 ce_mmr = (struct tioce *)ce_common->ce_pcibus.bs_base; 855 ce_mmr = (struct tioce *)ce_common->ce_pcibus.bs_base;
856 ce_kern = (struct tioce_kernel *)ce_common->ce_kernel_private;
857
858 /*
859 * TIOCE Rev A workaround (PV 945826), force an interrupt by writing
860 * the TIO_INTx register directly (1/26/2006)
861 */
862 if (ce_common->ce_rev == TIOCE_REV_A) {
863 u64 int_bit_mask = (1ULL << sn_irq_info->irq_int_bit);
864 u64 status;
865
866 tioce_mmr_load(ce_kern, &ce_mmr->ce_adm_int_status, &status);
867 if (status & int_bit_mask) {
868 u64 force_irq = (1 << 8) | sn_irq_info->irq_irq;
869 u64 ctalk = sn_irq_info->irq_xtalkaddr;
870 u64 nasid, offset;
871
872 nasid = (ctalk & CTALK_NASID_MASK) >> CTALK_NASID_SHFT;
873 offset = (ctalk & CTALK_NODE_OFFSET);
874 HUB_S(TIO_IOSPACE_ADDR(nasid, offset), force_irq);
875 }
876
877 return;
878 }
632 879
633 /* 880 /*
634 * irq_int_bit is originally set up by prom, and holds the interrupt 881 * irq_int_bit is originally set up by prom, and holds the interrupt
@@ -666,7 +913,7 @@ tioce_force_interrupt(struct sn_irq_info *sn_irq_info)
666 default: 913 default:
667 return; 914 return;
668 } 915 }
669 writeq(force_int_val, &ce_mmr->ce_adm_force_int); 916 tioce_mmr_storei(ce_kern, &ce_mmr->ce_adm_force_int, force_int_val);
670} 917}
671 918
672/** 919/**
@@ -685,6 +932,7 @@ tioce_target_interrupt(struct sn_irq_info *sn_irq_info)
685{ 932{
686 struct pcidev_info *pcidev_info; 933 struct pcidev_info *pcidev_info;
687 struct tioce_common *ce_common; 934 struct tioce_common *ce_common;
935 struct tioce_kernel *ce_kern;
688 struct tioce *ce_mmr; 936 struct tioce *ce_mmr;
689 int bit; 937 int bit;
690 u64 vector; 938 u64 vector;
@@ -695,14 +943,15 @@ tioce_target_interrupt(struct sn_irq_info *sn_irq_info)
695 943
696 ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info; 944 ce_common = (struct tioce_common *)pcidev_info->pdi_pcibus_info;
697 ce_mmr = (struct tioce *)ce_common->ce_pcibus.bs_base; 945 ce_mmr = (struct tioce *)ce_common->ce_pcibus.bs_base;
946 ce_kern = (struct tioce_kernel *)ce_common->ce_kernel_private;
698 947
699 bit = sn_irq_info->irq_int_bit; 948 bit = sn_irq_info->irq_int_bit;
700 949
701 __sn_setq_relaxed(&ce_mmr->ce_adm_int_mask, (1UL << bit)); 950 tioce_mmr_seti(ce_kern, &ce_mmr->ce_adm_int_mask, (1UL << bit));
702 vector = (u64)sn_irq_info->irq_irq << INTR_VECTOR_SHFT; 951 vector = (u64)sn_irq_info->irq_irq << INTR_VECTOR_SHFT;
703 vector |= sn_irq_info->irq_xtalkaddr; 952 vector |= sn_irq_info->irq_xtalkaddr;
704 writeq(vector, &ce_mmr->ce_adm_int_dest[bit]); 953 tioce_mmr_storei(ce_kern, &ce_mmr->ce_adm_int_dest[bit], vector);
705 __sn_clrq_relaxed(&ce_mmr->ce_adm_int_mask, (1UL << bit)); 954 tioce_mmr_clri(ce_kern, &ce_mmr->ce_adm_int_mask, (1UL << bit));
706 955
707 tioce_force_interrupt(sn_irq_info); 956 tioce_force_interrupt(sn_irq_info);
708} 957}
@@ -721,7 +970,11 @@ tioce_target_interrupt(struct sn_irq_info *sn_irq_info)
721static void * 970static void *
722tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller) 971tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
723{ 972{
973 int my_nasid;
974 cnodeid_t my_cnode, mem_cnode;
724 struct tioce_common *tioce_common; 975 struct tioce_common *tioce_common;
976 struct tioce_kernel *tioce_kern;
977 struct tioce *tioce_mmr;
725 978
726 /* 979 /*
727 * Allocate kernel bus soft and copy from prom. 980 * Allocate kernel bus soft and copy from prom.
@@ -734,11 +987,23 @@ tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
734 memcpy(tioce_common, prom_bussoft, sizeof(struct tioce_common)); 987 memcpy(tioce_common, prom_bussoft, sizeof(struct tioce_common));
735 tioce_common->ce_pcibus.bs_base |= __IA64_UNCACHED_OFFSET; 988 tioce_common->ce_pcibus.bs_base |= __IA64_UNCACHED_OFFSET;
736 989
737 if (tioce_kern_init(tioce_common) == NULL) { 990 tioce_kern = tioce_kern_init(tioce_common);
991 if (tioce_kern == NULL) {
738 kfree(tioce_common); 992 kfree(tioce_common);
739 return NULL; 993 return NULL;
740 } 994 }
741 995
996 /*
997 * Clear out any transient errors before registering the error
998 * interrupt handler.
999 */
1000
1001 tioce_mmr = (struct tioce *)tioce_common->ce_pcibus.bs_base;
1002 tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_adm_int_status_alias, ~0ULL);
1003 tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_adm_error_summary_alias,
1004 ~0ULL);
1005 tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_dre_comp_err_addr, ~0ULL);
1006
742 if (request_irq(SGI_PCIASIC_ERROR, 1007 if (request_irq(SGI_PCIASIC_ERROR,
743 tioce_error_intr_handler, 1008 tioce_error_intr_handler,
744 SA_SHIRQ, "TIOCE error", (void *)tioce_common)) 1009 SA_SHIRQ, "TIOCE error", (void *)tioce_common))
@@ -750,6 +1015,21 @@ tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
750 tioce_common->ce_pcibus.bs_persist_segment, 1015 tioce_common->ce_pcibus.bs_persist_segment,
751 tioce_common->ce_pcibus.bs_persist_busnum); 1016 tioce_common->ce_pcibus.bs_persist_busnum);
752 1017
1018 /*
1019 * identify closest nasid for memory allocations
1020 */
1021
1022 my_nasid = NASID_GET(tioce_common->ce_pcibus.bs_base);
1023 my_cnode = nasid_to_cnodeid(my_nasid);
1024
1025 if (sn_hwperf_get_nearest_node(my_cnode, &mem_cnode, NULL) < 0) {
1026 printk(KERN_WARNING "tioce_bus_fixup: failed to find "
1027 "closest node with MEM to TIO node %d\n", my_cnode);
1028 mem_cnode = (cnodeid_t)-1; /* use any node */
1029 }
1030
1031 controller->node = mem_cnode;
1032
753 return tioce_common; 1033 return tioce_common;
754} 1034}
755 1035