diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-16 23:56:49 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-16 23:56:49 -0500 |
commit | a1bc5cdf9f9550bd7e9e5d8400e95b164165b275 (patch) | |
tree | 3d4e80bb369675765e39450c55c6140e1213da81 /arch | |
parent | 8dca6f33f026dc8a7fc2710b78a7521e899bd611 (diff) | |
parent | 859538763155814d217054eb6e3cdff71bdb4d89 (diff) |
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
Diffstat (limited to 'arch')
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/perfmon_montecito.h | 269 | ||||
-rw-r--r-- | arch/ia64/mm/init.c | 36 | ||||
-rw-r--r-- | arch/ia64/pci/pci.c | 19 | ||||
-rw-r--r-- | arch/ia64/sn/include/xtalk/hubdev.h | 10 | ||||
-rw-r--r-- | arch/ia64/sn/include/xtalk/xbow.h | 206 | ||||
-rw-r--r-- | arch/ia64/sn/include/xtalk/xwidgetdev.h | 46 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/io_init.c | 10 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/irq.c | 10 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/tiocx.c | 18 | ||||
-rw-r--r-- | arch/ia64/sn/pci/pcibr/pcibr_ate.c | 16 | ||||
-rw-r--r-- | arch/ia64/sn/pci/pcibr/pcibr_dma.c | 44 | ||||
-rw-r--r-- | arch/ia64/sn/pci/pcibr/pcibr_provider.c | 12 | ||||
-rw-r--r-- | arch/ia64/sn/pci/pcibr/pcibr_reg.c | 28 | ||||
-rw-r--r-- | arch/ia64/sn/pci/tioca_provider.c | 36 | ||||
-rw-r--r-- | arch/ia64/sn/pci/tioce_provider.c | 68 |
16 files changed, 578 insertions, 252 deletions
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index bd87cb6b7a81..2ea4b39efffa 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -628,9 +628,11 @@ static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, | |||
628 | 628 | ||
629 | #include "perfmon_itanium.h" | 629 | #include "perfmon_itanium.h" |
630 | #include "perfmon_mckinley.h" | 630 | #include "perfmon_mckinley.h" |
631 | #include "perfmon_montecito.h" | ||
631 | #include "perfmon_generic.h" | 632 | #include "perfmon_generic.h" |
632 | 633 | ||
633 | static pmu_config_t *pmu_confs[]={ | 634 | static pmu_config_t *pmu_confs[]={ |
635 | &pmu_conf_mont, | ||
634 | &pmu_conf_mck, | 636 | &pmu_conf_mck, |
635 | &pmu_conf_ita, | 637 | &pmu_conf_ita, |
636 | &pmu_conf_gen, /* must be last */ | 638 | &pmu_conf_gen, /* must be last */ |
diff --git a/arch/ia64/kernel/perfmon_montecito.h b/arch/ia64/kernel/perfmon_montecito.h new file mode 100644 index 000000000000..cd06ac6a686c --- /dev/null +++ b/arch/ia64/kernel/perfmon_montecito.h | |||
@@ -0,0 +1,269 @@ | |||
1 | /* | ||
2 | * This file contains the Montecito PMU register description tables | ||
3 | * and pmc checker used by perfmon.c. | ||
4 | * | ||
5 | * Copyright (c) 2005-2006 Hewlett-Packard Development Company, L.P. | ||
6 | * Contributed by Stephane Eranian <eranian@hpl.hp.com> | ||
7 | */ | ||
8 | static int pfm_mont_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs); | ||
9 | |||
10 | #define RDEP_MONT_ETB (RDEP(38)|RDEP(39)|RDEP(48)|RDEP(49)|RDEP(50)|RDEP(51)|RDEP(52)|RDEP(53)|RDEP(54)|\ | ||
11 | RDEP(55)|RDEP(56)|RDEP(57)|RDEP(58)|RDEP(59)|RDEP(60)|RDEP(61)|RDEP(62)|RDEP(63)) | ||
12 | #define RDEP_MONT_DEAR (RDEP(32)|RDEP(33)|RDEP(36)) | ||
13 | #define RDEP_MONT_IEAR (RDEP(34)|RDEP(35)) | ||
14 | |||
15 | static pfm_reg_desc_t pfm_mont_pmc_desc[PMU_MAX_PMCS]={ | ||
16 | /* pmc0 */ { PFM_REG_CONTROL , 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {0,0, 0, 0}}, | ||
17 | /* pmc1 */ { PFM_REG_CONTROL , 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {0,0, 0, 0}}, | ||
18 | /* pmc2 */ { PFM_REG_CONTROL , 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {0,0, 0, 0}}, | ||
19 | /* pmc3 */ { PFM_REG_CONTROL , 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {0,0, 0, 0}}, | ||
20 | /* pmc4 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(4),0, 0, 0}, {0,0, 0, 0}}, | ||
21 | /* pmc5 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(5),0, 0, 0}, {0,0, 0, 0}}, | ||
22 | /* pmc6 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(6),0, 0, 0}, {0,0, 0, 0}}, | ||
23 | /* pmc7 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(7),0, 0, 0}, {0,0, 0, 0}}, | ||
24 | /* pmc8 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(8),0, 0, 0}, {0,0, 0, 0}}, | ||
25 | /* pmc9 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(9),0, 0, 0}, {0,0, 0, 0}}, | ||
26 | /* pmc10 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(10),0, 0, 0}, {0,0, 0, 0}}, | ||
27 | /* pmc11 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(11),0, 0, 0}, {0,0, 0, 0}}, | ||
28 | /* pmc12 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(12),0, 0, 0}, {0,0, 0, 0}}, | ||
29 | /* pmc13 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(13),0, 0, 0}, {0,0, 0, 0}}, | ||
30 | /* pmc14 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(14),0, 0, 0}, {0,0, 0, 0}}, | ||
31 | /* pmc15 */ { PFM_REG_COUNTING, 6, 0x2000000, 0x7c7fff7f, NULL, pfm_mont_pmc_check, {RDEP(15),0, 0, 0}, {0,0, 0, 0}}, | ||
32 | /* pmc16 */ { PFM_REG_NOTIMPL, }, | ||
33 | /* pmc17 */ { PFM_REG_NOTIMPL, }, | ||
34 | /* pmc18 */ { PFM_REG_NOTIMPL, }, | ||
35 | /* pmc19 */ { PFM_REG_NOTIMPL, }, | ||
36 | /* pmc20 */ { PFM_REG_NOTIMPL, }, | ||
37 | /* pmc21 */ { PFM_REG_NOTIMPL, }, | ||
38 | /* pmc22 */ { PFM_REG_NOTIMPL, }, | ||
39 | /* pmc23 */ { PFM_REG_NOTIMPL, }, | ||
40 | /* pmc24 */ { PFM_REG_NOTIMPL, }, | ||
41 | /* pmc25 */ { PFM_REG_NOTIMPL, }, | ||
42 | /* pmc26 */ { PFM_REG_NOTIMPL, }, | ||
43 | /* pmc27 */ { PFM_REG_NOTIMPL, }, | ||
44 | /* pmc28 */ { PFM_REG_NOTIMPL, }, | ||
45 | /* pmc29 */ { PFM_REG_NOTIMPL, }, | ||
46 | /* pmc30 */ { PFM_REG_NOTIMPL, }, | ||
47 | /* pmc31 */ { PFM_REG_NOTIMPL, }, | ||
48 | /* pmc32 */ { PFM_REG_CONFIG, 0, 0x30f01ffffffffff, 0x30f01ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, | ||
49 | /* pmc33 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, | ||
50 | /* pmc34 */ { PFM_REG_CONFIG, 0, 0xf01ffffffffff, 0xf01ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, | ||
51 | /* pmc35 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, | ||
52 | /* pmc36 */ { PFM_REG_CONFIG, 0, 0xfffffff0, 0xf, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, | ||
53 | /* pmc37 */ { PFM_REG_MONITOR, 4, 0x0, 0x3fff, NULL, pfm_mont_pmc_check, {RDEP_MONT_IEAR, 0, 0, 0}, {0, 0, 0, 0}}, | ||
54 | /* pmc38 */ { PFM_REG_CONFIG, 0, 0xdb6, 0x2492, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, | ||
55 | /* pmc39 */ { PFM_REG_MONITOR, 6, 0x0, 0xffcf, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}}, | ||
56 | /* pmc40 */ { PFM_REG_MONITOR, 6, 0x2000000, 0xf01cf, NULL, pfm_mont_pmc_check, {RDEP_MONT_DEAR,0, 0, 0}, {0,0, 0, 0}}, | ||
57 | /* pmc41 */ { PFM_REG_CONFIG, 0, 0x00002078fefefefe, 0x1e00018181818, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, | ||
58 | /* pmc42 */ { PFM_REG_MONITOR, 6, 0x0, 0x7ff4f, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}}, | ||
59 | { PFM_REG_END , 0, 0x0, -1, NULL, NULL, {0,}, {0,}}, /* end marker */ | ||
60 | }; | ||
61 | |||
62 | static pfm_reg_desc_t pfm_mont_pmd_desc[PMU_MAX_PMDS]={ | ||
63 | /* pmd0 */ { PFM_REG_NOTIMPL, }, | ||
64 | /* pmd1 */ { PFM_REG_NOTIMPL, }, | ||
65 | /* pmd2 */ { PFM_REG_NOTIMPL, }, | ||
66 | /* pmd3 */ { PFM_REG_NOTIMPL, }, | ||
67 | /* pmd4 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(4),0, 0, 0}}, | ||
68 | /* pmd5 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(5),0, 0, 0}}, | ||
69 | /* pmd6 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(6),0, 0, 0}}, | ||
70 | /* pmd7 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(7),0, 0, 0}}, | ||
71 | /* pmd8 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(8),0, 0, 0}}, | ||
72 | /* pmd9 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(9),0, 0, 0}}, | ||
73 | /* pmd10 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(10),0, 0, 0}}, | ||
74 | /* pmd11 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(11),0, 0, 0}}, | ||
75 | /* pmd12 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(12),0, 0, 0}}, | ||
76 | /* pmd13 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(13),0, 0, 0}}, | ||
77 | /* pmd14 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(14),0, 0, 0}}, | ||
78 | /* pmd15 */ { PFM_REG_COUNTING, 0, 0x0, -1, NULL, NULL, {0,0, 0, 0}, {RDEP(15),0, 0, 0}}, | ||
79 | /* pmd16 */ { PFM_REG_NOTIMPL, }, | ||
80 | /* pmd17 */ { PFM_REG_NOTIMPL, }, | ||
81 | /* pmd18 */ { PFM_REG_NOTIMPL, }, | ||
82 | /* pmd19 */ { PFM_REG_NOTIMPL, }, | ||
83 | /* pmd20 */ { PFM_REG_NOTIMPL, }, | ||
84 | /* pmd21 */ { PFM_REG_NOTIMPL, }, | ||
85 | /* pmd22 */ { PFM_REG_NOTIMPL, }, | ||
86 | /* pmd23 */ { PFM_REG_NOTIMPL, }, | ||
87 | /* pmd24 */ { PFM_REG_NOTIMPL, }, | ||
88 | /* pmd25 */ { PFM_REG_NOTIMPL, }, | ||
89 | /* pmd26 */ { PFM_REG_NOTIMPL, }, | ||
90 | /* pmd27 */ { PFM_REG_NOTIMPL, }, | ||
91 | /* pmd28 */ { PFM_REG_NOTIMPL, }, | ||
92 | /* pmd29 */ { PFM_REG_NOTIMPL, }, | ||
93 | /* pmd30 */ { PFM_REG_NOTIMPL, }, | ||
94 | /* pmd31 */ { PFM_REG_NOTIMPL, }, | ||
95 | /* pmd32 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(33)|RDEP(36),0, 0, 0}, {RDEP(40),0, 0, 0}}, | ||
96 | /* pmd33 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(32)|RDEP(36),0, 0, 0}, {RDEP(40),0, 0, 0}}, | ||
97 | /* pmd34 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(35),0, 0, 0}, {RDEP(37),0, 0, 0}}, | ||
98 | /* pmd35 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(34),0, 0, 0}, {RDEP(37),0, 0, 0}}, | ||
99 | /* pmd36 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP(32)|RDEP(33),0, 0, 0}, {RDEP(40),0, 0, 0}}, | ||
100 | /* pmd37 */ { PFM_REG_NOTIMPL, }, | ||
101 | /* pmd38 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}}, | ||
102 | /* pmd39 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}}, | ||
103 | /* pmd40 */ { PFM_REG_NOTIMPL, }, | ||
104 | /* pmd41 */ { PFM_REG_NOTIMPL, }, | ||
105 | /* pmd42 */ { PFM_REG_NOTIMPL, }, | ||
106 | /* pmd43 */ { PFM_REG_NOTIMPL, }, | ||
107 | /* pmd44 */ { PFM_REG_NOTIMPL, }, | ||
108 | /* pmd45 */ { PFM_REG_NOTIMPL, }, | ||
109 | /* pmd46 */ { PFM_REG_NOTIMPL, }, | ||
110 | /* pmd47 */ { PFM_REG_NOTIMPL, }, | ||
111 | /* pmd48 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}}, | ||
112 | /* pmd49 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}}, | ||
113 | /* pmd50 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}}, | ||
114 | /* pmd51 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}}, | ||
115 | /* pmd52 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}}, | ||
116 | /* pmd53 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}}, | ||
117 | /* pmd54 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}}, | ||
118 | /* pmd55 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}}, | ||
119 | /* pmd56 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}}, | ||
120 | /* pmd57 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}}, | ||
121 | /* pmd58 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}}, | ||
122 | /* pmd59 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}}, | ||
123 | /* pmd60 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}}, | ||
124 | /* pmd61 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}}, | ||
125 | /* pmd62 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}}, | ||
126 | /* pmd63 */ { PFM_REG_BUFFER, 0, 0x0, -1, NULL, NULL, {RDEP_MONT_ETB,0, 0, 0}, {RDEP(39),0, 0, 0}}, | ||
127 | { PFM_REG_END , 0, 0x0, -1, NULL, NULL, {0,}, {0,}}, /* end marker */ | ||
128 | }; | ||
129 | |||
130 | /* | ||
131 | * PMC reserved fields must have their power-up values preserved | ||
132 | */ | ||
133 | static int | ||
134 | pfm_mont_reserved(unsigned int cnum, unsigned long *val, struct pt_regs *regs) | ||
135 | { | ||
136 | unsigned long tmp1, tmp2, ival = *val; | ||
137 | |||
138 | /* remove reserved areas from user value */ | ||
139 | tmp1 = ival & PMC_RSVD_MASK(cnum); | ||
140 | |||
141 | /* get reserved fields values */ | ||
142 | tmp2 = PMC_DFL_VAL(cnum) & ~PMC_RSVD_MASK(cnum); | ||
143 | |||
144 | *val = tmp1 | tmp2; | ||
145 | |||
146 | DPRINT(("pmc[%d]=0x%lx, mask=0x%lx, reset=0x%lx, val=0x%lx\n", | ||
147 | cnum, ival, PMC_RSVD_MASK(cnum), PMC_DFL_VAL(cnum), *val)); | ||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | /* | ||
152 | * task can be NULL if the context is unloaded | ||
153 | */ | ||
154 | static int | ||
155 | pfm_mont_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs) | ||
156 | { | ||
157 | int ret = 0; | ||
158 | unsigned long val32 = 0, val38 = 0, val41 = 0; | ||
159 | unsigned long tmpval; | ||
160 | int check_case1 = 0; | ||
161 | int is_loaded; | ||
162 | |||
163 | /* first preserve the reserved fields */ | ||
164 | pfm_mont_reserved(cnum, val, regs); | ||
165 | |||
166 | tmpval = *val; | ||
167 | |||
168 | /* sanity check */ | ||
169 | if (ctx == NULL) return -EINVAL; | ||
170 | |||
171 | is_loaded = ctx->ctx_state == PFM_CTX_LOADED || ctx->ctx_state == PFM_CTX_MASKED; | ||
172 | |||
173 | /* | ||
174 | * we must clear the debug registers if pmc41 has a value which enable | ||
175 | * memory pipeline event constraints. In this case we need to clear the | ||
176 | * the debug registers if they have not yet been accessed. This is required | ||
177 | * to avoid picking stale state. | ||
178 | * PMC41 is "active" if: | ||
179 | * one of the pmc41.cfg_dtagXX field is different from 0x3 | ||
180 | * AND | ||
181 | * at the corresponding pmc41.en_dbrpXX is set. | ||
182 | * AND | ||
183 | * ctx_fl_using_dbreg == 0 (i.e., dbr not yet used) | ||
184 | */ | ||
185 | DPRINT(("cnum=%u val=0x%lx, using_dbreg=%d loaded=%d\n", cnum, tmpval, ctx->ctx_fl_using_dbreg, is_loaded)); | ||
186 | |||
187 | if (cnum == 41 && is_loaded | ||
188 | && (tmpval & 0x1e00000000000) && (tmpval & 0x18181818UL) != 0x18181818UL && ctx->ctx_fl_using_dbreg == 0) { | ||
189 | |||
190 | DPRINT(("pmc[%d]=0x%lx has active pmc41 settings, clearing dbr\n", cnum, tmpval)); | ||
191 | |||
192 | /* don't mix debug with perfmon */ | ||
193 | if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL; | ||
194 | |||
195 | /* | ||
196 | * a count of 0 will mark the debug registers if: | ||
197 | * AND | ||
198 | */ | ||
199 | ret = pfm_write_ibr_dbr(PFM_DATA_RR, ctx, NULL, 0, regs); | ||
200 | if (ret) return ret; | ||
201 | } | ||
202 | /* | ||
203 | * we must clear the (instruction) debug registers if: | ||
204 | * pmc38.ig_ibrpX is 0 (enabled) | ||
205 | * AND | ||
206 | * ctx_fl_using_dbreg == 0 (i.e., dbr not yet used) | ||
207 | */ | ||
208 | if (cnum == 38 && is_loaded && ((tmpval & 0x492UL) != 0x492UL) && ctx->ctx_fl_using_dbreg == 0) { | ||
209 | |||
210 | DPRINT(("pmc38=0x%lx has active pmc38 settings, clearing ibr\n", tmpval)); | ||
211 | |||
212 | /* don't mix debug with perfmon */ | ||
213 | if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL; | ||
214 | |||
215 | /* | ||
216 | * a count of 0 will mark the debug registers as in use and also | ||
217 | * ensure that they are properly cleared. | ||
218 | */ | ||
219 | ret = pfm_write_ibr_dbr(PFM_CODE_RR, ctx, NULL, 0, regs); | ||
220 | if (ret) return ret; | ||
221 | |||
222 | } | ||
223 | switch(cnum) { | ||
224 | case 32: val32 = *val; | ||
225 | val38 = ctx->ctx_pmcs[38]; | ||
226 | val41 = ctx->ctx_pmcs[41]; | ||
227 | check_case1 = 1; | ||
228 | break; | ||
229 | case 38: val38 = *val; | ||
230 | val32 = ctx->ctx_pmcs[32]; | ||
231 | val41 = ctx->ctx_pmcs[41]; | ||
232 | check_case1 = 1; | ||
233 | break; | ||
234 | case 41: val41 = *val; | ||
235 | val32 = ctx->ctx_pmcs[32]; | ||
236 | val38 = ctx->ctx_pmcs[38]; | ||
237 | check_case1 = 1; | ||
238 | break; | ||
239 | } | ||
240 | /* check illegal configuration which can produce inconsistencies in tagging | ||
241 | * i-side events in L1D and L2 caches | ||
242 | */ | ||
243 | if (check_case1) { | ||
244 | ret = (((val41 >> 45) & 0xf) == 0 && ((val32>>57) & 0x1) == 0) | ||
245 | && ((((val38>>1) & 0x3) == 0x2 || ((val38>>1) & 0x3) == 0) | ||
246 | || (((val38>>4) & 0x3) == 0x2 || ((val38>>4) & 0x3) == 0)); | ||
247 | if (ret) { | ||
248 | DPRINT(("invalid config pmc38=0x%lx pmc41=0x%lx pmc32=0x%lx\n", val38, val41, val32)); | ||
249 | return -EINVAL; | ||
250 | } | ||
251 | } | ||
252 | *val = tmpval; | ||
253 | return 0; | ||
254 | } | ||
255 | |||
256 | /* | ||
257 | * impl_pmcs, impl_pmds are computed at runtime to minimize errors! | ||
258 | */ | ||
259 | static pmu_config_t pmu_conf_mont={ | ||
260 | .pmu_name = "Montecito", | ||
261 | .pmu_family = 0x20, | ||
262 | .flags = PFM_PMU_IRQ_RESEND, | ||
263 | .ovfl_val = (1UL << 47) - 1, | ||
264 | .pmd_desc = pfm_mont_pmd_desc, | ||
265 | .pmc_desc = pfm_mont_pmc_desc, | ||
266 | .num_ibrs = 8, | ||
267 | .num_dbrs = 8, | ||
268 | .use_rr_dbregs = 1 /* debug register are use for range retrictions */ | ||
269 | }; | ||
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index e3215ba64ffd..b38b6d213c15 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -635,3 +635,39 @@ mem_init (void) | |||
635 | ia32_mem_init(); | 635 | ia32_mem_init(); |
636 | #endif | 636 | #endif |
637 | } | 637 | } |
638 | |||
639 | #ifdef CONFIG_MEMORY_HOTPLUG | ||
640 | void online_page(struct page *page) | ||
641 | { | ||
642 | ClearPageReserved(page); | ||
643 | set_page_count(page, 1); | ||
644 | __free_page(page); | ||
645 | totalram_pages++; | ||
646 | num_physpages++; | ||
647 | } | ||
648 | |||
649 | int add_memory(u64 start, u64 size) | ||
650 | { | ||
651 | pg_data_t *pgdat; | ||
652 | struct zone *zone; | ||
653 | unsigned long start_pfn = start >> PAGE_SHIFT; | ||
654 | unsigned long nr_pages = size >> PAGE_SHIFT; | ||
655 | int ret; | ||
656 | |||
657 | pgdat = NODE_DATA(0); | ||
658 | |||
659 | zone = pgdat->node_zones + ZONE_NORMAL; | ||
660 | ret = __add_pages(zone, start_pfn, nr_pages); | ||
661 | |||
662 | if (ret) | ||
663 | printk("%s: Problem encountered in __add_pages() as ret=%d\n", | ||
664 | __FUNCTION__, ret); | ||
665 | |||
666 | return ret; | ||
667 | } | ||
668 | |||
669 | int remove_memory(u64 start, u64 size) | ||
670 | { | ||
671 | return -EINVAL; | ||
672 | } | ||
673 | #endif | ||
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index 30dbc98bf0b3..d27ecdcb6fca 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c | |||
@@ -454,14 +454,13 @@ static int __devinit is_valid_resource(struct pci_dev *dev, int idx) | |||
454 | return 0; | 454 | return 0; |
455 | } | 455 | } |
456 | 456 | ||
457 | static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev) | 457 | static void __devinit |
458 | pcibios_fixup_resources(struct pci_dev *dev, int start, int limit) | ||
458 | { | 459 | { |
459 | struct pci_bus_region region; | 460 | struct pci_bus_region region; |
460 | int i; | 461 | int i; |
461 | int limit = (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) ? \ | ||
462 | PCI_BRIDGE_RESOURCES : PCI_NUM_RESOURCES; | ||
463 | 462 | ||
464 | for (i = 0; i < limit; i++) { | 463 | for (i = start; i < limit; i++) { |
465 | if (!dev->resource[i].flags) | 464 | if (!dev->resource[i].flags) |
466 | continue; | 465 | continue; |
467 | region.start = dev->resource[i].start; | 466 | region.start = dev->resource[i].start; |
@@ -472,6 +471,16 @@ static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev) | |||
472 | } | 471 | } |
473 | } | 472 | } |
474 | 473 | ||
474 | static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev) | ||
475 | { | ||
476 | pcibios_fixup_resources(dev, 0, PCI_BRIDGE_RESOURCES); | ||
477 | } | ||
478 | |||
479 | static void __devinit pcibios_fixup_bridge_resources(struct pci_dev *dev) | ||
480 | { | ||
481 | pcibios_fixup_resources(dev, PCI_BRIDGE_RESOURCES, PCI_NUM_RESOURCES); | ||
482 | } | ||
483 | |||
475 | /* | 484 | /* |
476 | * Called after each bus is probed, but before its children are examined. | 485 | * Called after each bus is probed, but before its children are examined. |
477 | */ | 486 | */ |
@@ -482,7 +491,7 @@ pcibios_fixup_bus (struct pci_bus *b) | |||
482 | 491 | ||
483 | if (b->self) { | 492 | if (b->self) { |
484 | pci_read_bridge_bases(b); | 493 | pci_read_bridge_bases(b); |
485 | pcibios_fixup_device_resources(b->self); | 494 | pcibios_fixup_bridge_resources(b->self); |
486 | } | 495 | } |
487 | list_for_each_entry(dev, &b->devices, bus_list) | 496 | list_for_each_entry(dev, &b->devices, bus_list) |
488 | pcibios_fixup_device_resources(dev); | 497 | pcibios_fixup_device_resources(dev); |
diff --git a/arch/ia64/sn/include/xtalk/hubdev.h b/arch/ia64/sn/include/xtalk/hubdev.h index 4d417c301201..7c88e9a58516 100644 --- a/arch/ia64/sn/include/xtalk/hubdev.h +++ b/arch/ia64/sn/include/xtalk/hubdev.h | |||
@@ -40,8 +40,8 @@ struct sn_flush_device_common { | |||
40 | unsigned long sfdl_force_int_addr; | 40 | unsigned long sfdl_force_int_addr; |
41 | unsigned long sfdl_flush_value; | 41 | unsigned long sfdl_flush_value; |
42 | volatile unsigned long *sfdl_flush_addr; | 42 | volatile unsigned long *sfdl_flush_addr; |
43 | uint32_t sfdl_persistent_busnum; | 43 | u32 sfdl_persistent_busnum; |
44 | uint32_t sfdl_persistent_segment; | 44 | u32 sfdl_persistent_segment; |
45 | struct pcibus_info *sfdl_pcibus_info; | 45 | struct pcibus_info *sfdl_pcibus_info; |
46 | }; | 46 | }; |
47 | 47 | ||
@@ -56,7 +56,7 @@ struct sn_flush_device_kernel { | |||
56 | */ | 56 | */ |
57 | struct sn_flush_nasid_entry { | 57 | struct sn_flush_nasid_entry { |
58 | struct sn_flush_device_kernel **widget_p; // Used as an array of wid_num | 58 | struct sn_flush_device_kernel **widget_p; // Used as an array of wid_num |
59 | uint64_t iio_itte[8]; | 59 | u64 iio_itte[8]; |
60 | }; | 60 | }; |
61 | 61 | ||
62 | struct hubdev_info { | 62 | struct hubdev_info { |
@@ -70,8 +70,8 @@ struct hubdev_info { | |||
70 | 70 | ||
71 | void *hdi_nodepda; | 71 | void *hdi_nodepda; |
72 | void *hdi_node_vertex; | 72 | void *hdi_node_vertex; |
73 | uint32_t max_segment_number; | 73 | u32 max_segment_number; |
74 | uint32_t max_pcibus_number; | 74 | u32 max_pcibus_number; |
75 | }; | 75 | }; |
76 | 76 | ||
77 | extern void hubdev_init_node(nodepda_t *, cnodeid_t); | 77 | extern void hubdev_init_node(nodepda_t *, cnodeid_t); |
diff --git a/arch/ia64/sn/include/xtalk/xbow.h b/arch/ia64/sn/include/xtalk/xbow.h index ec56b3432f17..90f37a4133d0 100644 --- a/arch/ia64/sn/include/xtalk/xbow.h +++ b/arch/ia64/sn/include/xtalk/xbow.h | |||
@@ -3,7 +3,8 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 1992-1997,2000-2004 Silicon Graphics, Inc. All Rights Reserved. | 6 | * Copyright (C) 1992-1997,2000-2006 Silicon Graphics, Inc. All Rights |
7 | * Reserved. | ||
7 | */ | 8 | */ |
8 | #ifndef _ASM_IA64_SN_XTALK_XBOW_H | 9 | #ifndef _ASM_IA64_SN_XTALK_XBOW_H |
9 | #define _ASM_IA64_SN_XTALK_XBOW_H | 10 | #define _ASM_IA64_SN_XTALK_XBOW_H |
@@ -21,94 +22,94 @@ | |||
21 | 22 | ||
22 | /* Register set for each xbow link */ | 23 | /* Register set for each xbow link */ |
23 | typedef volatile struct xb_linkregs_s { | 24 | typedef volatile struct xb_linkregs_s { |
24 | /* | 25 | /* |
25 | * we access these through synergy unswizzled space, so the address | 26 | * we access these through synergy unswizzled space, so the address |
26 | * gets twiddled (i.e. references to 0x4 actually go to 0x0 and vv.) | 27 | * gets twiddled (i.e. references to 0x4 actually go to 0x0 and vv.) |
27 | * That's why we put the register first and filler second. | 28 | * That's why we put the register first and filler second. |
28 | */ | 29 | */ |
29 | uint32_t link_ibf; | 30 | u32 link_ibf; |
30 | uint32_t filler0; /* filler for proper alignment */ | 31 | u32 filler0; /* filler for proper alignment */ |
31 | uint32_t link_control; | 32 | u32 link_control; |
32 | uint32_t filler1; | 33 | u32 filler1; |
33 | uint32_t link_status; | 34 | u32 link_status; |
34 | uint32_t filler2; | 35 | u32 filler2; |
35 | uint32_t link_arb_upper; | 36 | u32 link_arb_upper; |
36 | uint32_t filler3; | 37 | u32 filler3; |
37 | uint32_t link_arb_lower; | 38 | u32 link_arb_lower; |
38 | uint32_t filler4; | 39 | u32 filler4; |
39 | uint32_t link_status_clr; | 40 | u32 link_status_clr; |
40 | uint32_t filler5; | 41 | u32 filler5; |
41 | uint32_t link_reset; | 42 | u32 link_reset; |
42 | uint32_t filler6; | 43 | u32 filler6; |
43 | uint32_t link_aux_status; | 44 | u32 link_aux_status; |
44 | uint32_t filler7; | 45 | u32 filler7; |
45 | } xb_linkregs_t; | 46 | } xb_linkregs_t; |
46 | 47 | ||
47 | typedef volatile struct xbow_s { | 48 | typedef volatile struct xbow_s { |
48 | /* standard widget configuration 0x000000-0x000057 */ | 49 | /* standard widget configuration 0x000000-0x000057 */ |
49 | struct widget_cfg xb_widget; /* 0x000000 */ | 50 | struct widget_cfg xb_widget; /* 0x000000 */ |
50 | 51 | ||
51 | /* helper fieldnames for accessing bridge widget */ | 52 | /* helper fieldnames for accessing bridge widget */ |
52 | 53 | ||
53 | #define xb_wid_id xb_widget.w_id | 54 | #define xb_wid_id xb_widget.w_id |
54 | #define xb_wid_stat xb_widget.w_status | 55 | #define xb_wid_stat xb_widget.w_status |
55 | #define xb_wid_err_upper xb_widget.w_err_upper_addr | 56 | #define xb_wid_err_upper xb_widget.w_err_upper_addr |
56 | #define xb_wid_err_lower xb_widget.w_err_lower_addr | 57 | #define xb_wid_err_lower xb_widget.w_err_lower_addr |
57 | #define xb_wid_control xb_widget.w_control | 58 | #define xb_wid_control xb_widget.w_control |
58 | #define xb_wid_req_timeout xb_widget.w_req_timeout | 59 | #define xb_wid_req_timeout xb_widget.w_req_timeout |
59 | #define xb_wid_int_upper xb_widget.w_intdest_upper_addr | 60 | #define xb_wid_int_upper xb_widget.w_intdest_upper_addr |
60 | #define xb_wid_int_lower xb_widget.w_intdest_lower_addr | 61 | #define xb_wid_int_lower xb_widget.w_intdest_lower_addr |
61 | #define xb_wid_err_cmdword xb_widget.w_err_cmd_word | 62 | #define xb_wid_err_cmdword xb_widget.w_err_cmd_word |
62 | #define xb_wid_llp xb_widget.w_llp_cfg | 63 | #define xb_wid_llp xb_widget.w_llp_cfg |
63 | #define xb_wid_stat_clr xb_widget.w_tflush | 64 | #define xb_wid_stat_clr xb_widget.w_tflush |
64 | 65 | ||
65 | /* | 66 | /* |
66 | * we access these through synergy unswizzled space, so the address | 67 | * we access these through synergy unswizzled space, so the address |
67 | * gets twiddled (i.e. references to 0x4 actually go to 0x0 and vv.) | 68 | * gets twiddled (i.e. references to 0x4 actually go to 0x0 and vv.) |
68 | * That's why we put the register first and filler second. | 69 | * That's why we put the register first and filler second. |
69 | */ | 70 | */ |
70 | /* xbow-specific widget configuration 0x000058-0x0000FF */ | 71 | /* xbow-specific widget configuration 0x000058-0x0000FF */ |
71 | uint32_t xb_wid_arb_reload; /* 0x00005C */ | 72 | u32 xb_wid_arb_reload; /* 0x00005C */ |
72 | uint32_t _pad_000058; | 73 | u32 _pad_000058; |
73 | uint32_t xb_perf_ctr_a; /* 0x000064 */ | 74 | u32 xb_perf_ctr_a; /* 0x000064 */ |
74 | uint32_t _pad_000060; | 75 | u32 _pad_000060; |
75 | uint32_t xb_perf_ctr_b; /* 0x00006c */ | 76 | u32 xb_perf_ctr_b; /* 0x00006c */ |
76 | uint32_t _pad_000068; | 77 | u32 _pad_000068; |
77 | uint32_t xb_nic; /* 0x000074 */ | 78 | u32 xb_nic; /* 0x000074 */ |
78 | uint32_t _pad_000070; | 79 | u32 _pad_000070; |
79 | 80 | ||
80 | /* Xbridge only */ | 81 | /* Xbridge only */ |
81 | uint32_t xb_w0_rst_fnc; /* 0x00007C */ | 82 | u32 xb_w0_rst_fnc; /* 0x00007C */ |
82 | uint32_t _pad_000078; | 83 | u32 _pad_000078; |
83 | uint32_t xb_l8_rst_fnc; /* 0x000084 */ | 84 | u32 xb_l8_rst_fnc; /* 0x000084 */ |
84 | uint32_t _pad_000080; | 85 | u32 _pad_000080; |
85 | uint32_t xb_l9_rst_fnc; /* 0x00008c */ | 86 | u32 xb_l9_rst_fnc; /* 0x00008c */ |
86 | uint32_t _pad_000088; | 87 | u32 _pad_000088; |
87 | uint32_t xb_la_rst_fnc; /* 0x000094 */ | 88 | u32 xb_la_rst_fnc; /* 0x000094 */ |
88 | uint32_t _pad_000090; | 89 | u32 _pad_000090; |
89 | uint32_t xb_lb_rst_fnc; /* 0x00009c */ | 90 | u32 xb_lb_rst_fnc; /* 0x00009c */ |
90 | uint32_t _pad_000098; | 91 | u32 _pad_000098; |
91 | uint32_t xb_lc_rst_fnc; /* 0x0000a4 */ | 92 | u32 xb_lc_rst_fnc; /* 0x0000a4 */ |
92 | uint32_t _pad_0000a0; | 93 | u32 _pad_0000a0; |
93 | uint32_t xb_ld_rst_fnc; /* 0x0000ac */ | 94 | u32 xb_ld_rst_fnc; /* 0x0000ac */ |
94 | uint32_t _pad_0000a8; | 95 | u32 _pad_0000a8; |
95 | uint32_t xb_le_rst_fnc; /* 0x0000b4 */ | 96 | u32 xb_le_rst_fnc; /* 0x0000b4 */ |
96 | uint32_t _pad_0000b0; | 97 | u32 _pad_0000b0; |
97 | uint32_t xb_lf_rst_fnc; /* 0x0000bc */ | 98 | u32 xb_lf_rst_fnc; /* 0x0000bc */ |
98 | uint32_t _pad_0000b8; | 99 | u32 _pad_0000b8; |
99 | uint32_t xb_lock; /* 0x0000c4 */ | 100 | u32 xb_lock; /* 0x0000c4 */ |
100 | uint32_t _pad_0000c0; | 101 | u32 _pad_0000c0; |
101 | uint32_t xb_lock_clr; /* 0x0000cc */ | 102 | u32 xb_lock_clr; /* 0x0000cc */ |
102 | uint32_t _pad_0000c8; | 103 | u32 _pad_0000c8; |
103 | /* end of Xbridge only */ | 104 | /* end of Xbridge only */ |
104 | uint32_t _pad_0000d0[12]; | 105 | u32 _pad_0000d0[12]; |
105 | 106 | ||
106 | /* Link Specific Registers, port 8..15 0x000100-0x000300 */ | 107 | /* Link Specific Registers, port 8..15 0x000100-0x000300 */ |
107 | xb_linkregs_t xb_link_raw[MAX_XBOW_PORTS]; | 108 | xb_linkregs_t xb_link_raw[MAX_XBOW_PORTS]; |
108 | #define xb_link(p) xb_link_raw[(p) & (MAX_XBOW_PORTS - 1)] | ||
109 | |||
110 | } xbow_t; | 109 | } xbow_t; |
111 | 110 | ||
111 | #define xb_link(p) xb_link_raw[(p) & (MAX_XBOW_PORTS - 1)] | ||
112 | |||
112 | #define XB_FLAGS_EXISTS 0x1 /* device exists */ | 113 | #define XB_FLAGS_EXISTS 0x1 /* device exists */ |
113 | #define XB_FLAGS_MASTER 0x2 | 114 | #define XB_FLAGS_MASTER 0x2 |
114 | #define XB_FLAGS_SLAVE 0x0 | 115 | #define XB_FLAGS_SLAVE 0x0 |
@@ -160,7 +161,7 @@ typedef volatile struct xbow_s { | |||
160 | /* End of Xbridge only */ | 161 | /* End of Xbridge only */ |
161 | 162 | ||
162 | /* used only in ide, but defined here within the reserved portion */ | 163 | /* used only in ide, but defined here within the reserved portion */ |
163 | /* of the widget0 address space (before 0xf4) */ | 164 | /* of the widget0 address space (before 0xf4) */ |
164 | #define XBOW_WID_UNDEF 0xe4 | 165 | #define XBOW_WID_UNDEF 0xe4 |
165 | 166 | ||
166 | /* xbow link register set base, legal value for x is 0x8..0xf */ | 167 | /* xbow link register set base, legal value for x is 0x8..0xf */ |
@@ -179,29 +180,37 @@ typedef volatile struct xbow_s { | |||
179 | 180 | ||
180 | /* link_control(x) */ | 181 | /* link_control(x) */ |
181 | #define XB_CTRL_LINKALIVE_IE 0x80000000 /* link comes alive */ | 182 | #define XB_CTRL_LINKALIVE_IE 0x80000000 /* link comes alive */ |
182 | /* reserved: 0x40000000 */ | 183 | /* reserved: 0x40000000 */ |
183 | #define XB_CTRL_PERF_CTR_MODE_MSK 0x30000000 /* perf counter mode */ | 184 | #define XB_CTRL_PERF_CTR_MODE_MSK 0x30000000 /* perf counter mode */ |
184 | #define XB_CTRL_IBUF_LEVEL_MSK 0x0e000000 /* input packet buffer level */ | 185 | #define XB_CTRL_IBUF_LEVEL_MSK 0x0e000000 /* input packet buffer |
185 | #define XB_CTRL_8BIT_MODE 0x01000000 /* force link into 8 bit mode */ | 186 | level */ |
186 | #define XB_CTRL_BAD_LLP_PKT 0x00800000 /* force bad LLP packet */ | 187 | #define XB_CTRL_8BIT_MODE 0x01000000 /* force link into 8 |
187 | #define XB_CTRL_WIDGET_CR_MSK 0x007c0000 /* LLP widget credit mask */ | 188 | bit mode */ |
188 | #define XB_CTRL_WIDGET_CR_SHFT 18 /* LLP widget credit shift */ | 189 | #define XB_CTRL_BAD_LLP_PKT 0x00800000 /* force bad LLP |
189 | #define XB_CTRL_ILLEGAL_DST_IE 0x00020000 /* illegal destination */ | 190 | packet */ |
190 | #define XB_CTRL_OALLOC_IBUF_IE 0x00010000 /* overallocated input buffer */ | 191 | #define XB_CTRL_WIDGET_CR_MSK 0x007c0000 /* LLP widget credit |
191 | /* reserved: 0x0000fe00 */ | 192 | mask */ |
193 | #define XB_CTRL_WIDGET_CR_SHFT 18 /* LLP widget credit | ||
194 | shift */ | ||
195 | #define XB_CTRL_ILLEGAL_DST_IE 0x00020000 /* illegal destination | ||
196 | */ | ||
197 | #define XB_CTRL_OALLOC_IBUF_IE 0x00010000 /* overallocated input | ||
198 | buffer */ | ||
199 | /* reserved: 0x0000fe00 */ | ||
192 | #define XB_CTRL_BNDWDTH_ALLOC_IE 0x00000100 /* bandwidth alloc */ | 200 | #define XB_CTRL_BNDWDTH_ALLOC_IE 0x00000100 /* bandwidth alloc */ |
193 | #define XB_CTRL_RCV_CNT_OFLOW_IE 0x00000080 /* rcv retry overflow */ | 201 | #define XB_CTRL_RCV_CNT_OFLOW_IE 0x00000080 /* rcv retry overflow */ |
194 | #define XB_CTRL_XMT_CNT_OFLOW_IE 0x00000040 /* xmt retry overflow */ | 202 | #define XB_CTRL_XMT_CNT_OFLOW_IE 0x00000040 /* xmt retry overflow */ |
195 | #define XB_CTRL_XMT_MAX_RTRY_IE 0x00000020 /* max transmit retry */ | 203 | #define XB_CTRL_XMT_MAX_RTRY_IE 0x00000020 /* max transmit retry */ |
196 | #define XB_CTRL_RCV_IE 0x00000010 /* receive */ | 204 | #define XB_CTRL_RCV_IE 0x00000010 /* receive */ |
197 | #define XB_CTRL_XMT_RTRY_IE 0x00000008 /* transmit retry */ | 205 | #define XB_CTRL_XMT_RTRY_IE 0x00000008 /* transmit retry */ |
198 | /* reserved: 0x00000004 */ | 206 | /* reserved: 0x00000004 */ |
199 | #define XB_CTRL_MAXREQ_TOUT_IE 0x00000002 /* maximum request timeout */ | 207 | #define XB_CTRL_MAXREQ_TOUT_IE 0x00000002 /* maximum request |
208 | timeout */ | ||
200 | #define XB_CTRL_SRC_TOUT_IE 0x00000001 /* source timeout */ | 209 | #define XB_CTRL_SRC_TOUT_IE 0x00000001 /* source timeout */ |
201 | 210 | ||
202 | /* link_status(x) */ | 211 | /* link_status(x) */ |
203 | #define XB_STAT_LINKALIVE XB_CTRL_LINKALIVE_IE | 212 | #define XB_STAT_LINKALIVE XB_CTRL_LINKALIVE_IE |
204 | /* reserved: 0x7ff80000 */ | 213 | /* reserved: 0x7ff80000 */ |
205 | #define XB_STAT_MULTI_ERR 0x00040000 /* multi error */ | 214 | #define XB_STAT_MULTI_ERR 0x00040000 /* multi error */ |
206 | #define XB_STAT_ILLEGAL_DST_ERR XB_CTRL_ILLEGAL_DST_IE | 215 | #define XB_STAT_ILLEGAL_DST_ERR XB_CTRL_ILLEGAL_DST_IE |
207 | #define XB_STAT_OALLOC_IBUF_ERR XB_CTRL_OALLOC_IBUF_IE | 216 | #define XB_STAT_OALLOC_IBUF_ERR XB_CTRL_OALLOC_IBUF_IE |
@@ -211,7 +220,7 @@ typedef volatile struct xbow_s { | |||
211 | #define XB_STAT_XMT_MAX_RTRY_ERR XB_CTRL_XMT_MAX_RTRY_IE | 220 | #define XB_STAT_XMT_MAX_RTRY_ERR XB_CTRL_XMT_MAX_RTRY_IE |
212 | #define XB_STAT_RCV_ERR XB_CTRL_RCV_IE | 221 | #define XB_STAT_RCV_ERR XB_CTRL_RCV_IE |
213 | #define XB_STAT_XMT_RTRY_ERR XB_CTRL_XMT_RTRY_IE | 222 | #define XB_STAT_XMT_RTRY_ERR XB_CTRL_XMT_RTRY_IE |
214 | /* reserved: 0x00000004 */ | 223 | /* reserved: 0x00000004 */ |
215 | #define XB_STAT_MAXREQ_TOUT_ERR XB_CTRL_MAXREQ_TOUT_IE | 224 | #define XB_STAT_MAXREQ_TOUT_ERR XB_CTRL_MAXREQ_TOUT_IE |
216 | #define XB_STAT_SRC_TOUT_ERR XB_CTRL_SRC_TOUT_IE | 225 | #define XB_STAT_SRC_TOUT_ERR XB_CTRL_SRC_TOUT_IE |
217 | 226 | ||
@@ -222,7 +231,7 @@ typedef volatile struct xbow_s { | |||
222 | #define XB_AUX_LINKFAIL_RST_BAD 0x00000040 | 231 | #define XB_AUX_LINKFAIL_RST_BAD 0x00000040 |
223 | #define XB_AUX_STAT_PRESENT 0x00000020 | 232 | #define XB_AUX_STAT_PRESENT 0x00000020 |
224 | #define XB_AUX_STAT_PORT_WIDTH 0x00000010 | 233 | #define XB_AUX_STAT_PORT_WIDTH 0x00000010 |
225 | /* reserved: 0x0000000f */ | 234 | /* reserved: 0x0000000f */ |
226 | 235 | ||
227 | /* | 236 | /* |
228 | * link_arb_upper/link_arb_lower(x), (reg) should be the link_arb_upper | 237 | * link_arb_upper/link_arb_lower(x), (reg) should be the link_arb_upper |
@@ -238,7 +247,8 @@ typedef volatile struct xbow_s { | |||
238 | /* XBOW_WID_STAT */ | 247 | /* XBOW_WID_STAT */ |
239 | #define XB_WID_STAT_LINK_INTR_SHFT (24) | 248 | #define XB_WID_STAT_LINK_INTR_SHFT (24) |
240 | #define XB_WID_STAT_LINK_INTR_MASK (0xFF << XB_WID_STAT_LINK_INTR_SHFT) | 249 | #define XB_WID_STAT_LINK_INTR_MASK (0xFF << XB_WID_STAT_LINK_INTR_SHFT) |
241 | #define XB_WID_STAT_LINK_INTR(x) (0x1 << (((x)&7) + XB_WID_STAT_LINK_INTR_SHFT)) | 250 | #define XB_WID_STAT_LINK_INTR(x) \ |
251 | (0x1 << (((x)&7) + XB_WID_STAT_LINK_INTR_SHFT)) | ||
242 | #define XB_WID_STAT_WIDGET0_INTR 0x00800000 | 252 | #define XB_WID_STAT_WIDGET0_INTR 0x00800000 |
243 | #define XB_WID_STAT_SRCID_MASK 0x000003c0 /* Xbridge only */ | 253 | #define XB_WID_STAT_SRCID_MASK 0x000003c0 /* Xbridge only */ |
244 | #define XB_WID_STAT_REG_ACC_ERR 0x00000020 | 254 | #define XB_WID_STAT_REG_ACC_ERR 0x00000020 |
@@ -264,7 +274,7 @@ typedef volatile struct xbow_s { | |||
264 | #define XXBOW_WIDGET_PART_NUM 0xd000 /* Xbridge */ | 274 | #define XXBOW_WIDGET_PART_NUM 0xd000 /* Xbridge */ |
265 | #define XBOW_WIDGET_MFGR_NUM 0x0 | 275 | #define XBOW_WIDGET_MFGR_NUM 0x0 |
266 | #define XXBOW_WIDGET_MFGR_NUM 0x0 | 276 | #define XXBOW_WIDGET_MFGR_NUM 0x0 |
267 | #define PXBOW_WIDGET_PART_NUM 0xd100 /* PIC */ | 277 | #define PXBOW_WIDGET_PART_NUM 0xd100 /* PIC */ |
268 | 278 | ||
269 | #define XBOW_REV_1_0 0x1 /* xbow rev 1.0 is "1" */ | 279 | #define XBOW_REV_1_0 0x1 /* xbow rev 1.0 is "1" */ |
270 | #define XBOW_REV_1_1 0x2 /* xbow rev 1.1 is "2" */ | 280 | #define XBOW_REV_1_1 0x2 /* xbow rev 1.1 is "2" */ |
@@ -279,13 +289,13 @@ typedef volatile struct xbow_s { | |||
279 | #define XBOW_WID_ARB_RELOAD_INT 0x3f /* GBR reload interval */ | 289 | #define XBOW_WID_ARB_RELOAD_INT 0x3f /* GBR reload interval */ |
280 | 290 | ||
281 | #define IS_XBRIDGE_XBOW(wid) \ | 291 | #define IS_XBRIDGE_XBOW(wid) \ |
282 | (XWIDGET_PART_NUM(wid) == XXBOW_WIDGET_PART_NUM && \ | 292 | (XWIDGET_PART_NUM(wid) == XXBOW_WIDGET_PART_NUM && \ |
283 | XWIDGET_MFG_NUM(wid) == XXBOW_WIDGET_MFGR_NUM) | 293 | XWIDGET_MFG_NUM(wid) == XXBOW_WIDGET_MFGR_NUM) |
284 | 294 | ||
285 | #define IS_PIC_XBOW(wid) \ | 295 | #define IS_PIC_XBOW(wid) \ |
286 | (XWIDGET_PART_NUM(wid) == PXBOW_WIDGET_PART_NUM && \ | 296 | (XWIDGET_PART_NUM(wid) == PXBOW_WIDGET_PART_NUM && \ |
287 | XWIDGET_MFG_NUM(wid) == XXBOW_WIDGET_MFGR_NUM) | 297 | XWIDGET_MFG_NUM(wid) == XXBOW_WIDGET_MFGR_NUM) |
288 | 298 | ||
289 | #define XBOW_WAR_ENABLED(pv, widid) ((1 << XWIDGET_REV_NUM(widid)) & pv) | 299 | #define XBOW_WAR_ENABLED(pv, widid) ((1 << XWIDGET_REV_NUM(widid)) & pv) |
290 | 300 | ||
291 | #endif /* _ASM_IA64_SN_XTALK_XBOW_H */ | 301 | #endif /* _ASM_IA64_SN_XTALK_XBOW_H */ |
diff --git a/arch/ia64/sn/include/xtalk/xwidgetdev.h b/arch/ia64/sn/include/xtalk/xwidgetdev.h index c5f4bc5cc033..2800eda0fd68 100644 --- a/arch/ia64/sn/include/xtalk/xwidgetdev.h +++ b/arch/ia64/sn/include/xtalk/xwidgetdev.h | |||
@@ -25,28 +25,28 @@ | |||
25 | 25 | ||
26 | /* widget configuration registers */ | 26 | /* widget configuration registers */ |
27 | struct widget_cfg{ | 27 | struct widget_cfg{ |
28 | uint32_t w_id; /* 0x04 */ | 28 | u32 w_id; /* 0x04 */ |
29 | uint32_t w_pad_0; /* 0x00 */ | 29 | u32 w_pad_0; /* 0x00 */ |
30 | uint32_t w_status; /* 0x0c */ | 30 | u32 w_status; /* 0x0c */ |
31 | uint32_t w_pad_1; /* 0x08 */ | 31 | u32 w_pad_1; /* 0x08 */ |
32 | uint32_t w_err_upper_addr; /* 0x14 */ | 32 | u32 w_err_upper_addr; /* 0x14 */ |
33 | uint32_t w_pad_2; /* 0x10 */ | 33 | u32 w_pad_2; /* 0x10 */ |
34 | uint32_t w_err_lower_addr; /* 0x1c */ | 34 | u32 w_err_lower_addr; /* 0x1c */ |
35 | uint32_t w_pad_3; /* 0x18 */ | 35 | u32 w_pad_3; /* 0x18 */ |
36 | uint32_t w_control; /* 0x24 */ | 36 | u32 w_control; /* 0x24 */ |
37 | uint32_t w_pad_4; /* 0x20 */ | 37 | u32 w_pad_4; /* 0x20 */ |
38 | uint32_t w_req_timeout; /* 0x2c */ | 38 | u32 w_req_timeout; /* 0x2c */ |
39 | uint32_t w_pad_5; /* 0x28 */ | 39 | u32 w_pad_5; /* 0x28 */ |
40 | uint32_t w_intdest_upper_addr; /* 0x34 */ | 40 | u32 w_intdest_upper_addr; /* 0x34 */ |
41 | uint32_t w_pad_6; /* 0x30 */ | 41 | u32 w_pad_6; /* 0x30 */ |
42 | uint32_t w_intdest_lower_addr; /* 0x3c */ | 42 | u32 w_intdest_lower_addr; /* 0x3c */ |
43 | uint32_t w_pad_7; /* 0x38 */ | 43 | u32 w_pad_7; /* 0x38 */ |
44 | uint32_t w_err_cmd_word; /* 0x44 */ | 44 | u32 w_err_cmd_word; /* 0x44 */ |
45 | uint32_t w_pad_8; /* 0x40 */ | 45 | u32 w_pad_8; /* 0x40 */ |
46 | uint32_t w_llp_cfg; /* 0x4c */ | 46 | u32 w_llp_cfg; /* 0x4c */ |
47 | uint32_t w_pad_9; /* 0x48 */ | 47 | u32 w_pad_9; /* 0x48 */ |
48 | uint32_t w_tflush; /* 0x54 */ | 48 | u32 w_tflush; /* 0x54 */ |
49 | uint32_t w_pad_10; /* 0x50 */ | 49 | u32 w_pad_10; /* 0x50 */ |
50 | }; | 50 | }; |
51 | 51 | ||
52 | /* | 52 | /* |
@@ -63,7 +63,7 @@ struct xwidget_info{ | |||
63 | struct xwidget_hwid xwi_hwid; /* Widget Identification */ | 63 | struct xwidget_hwid xwi_hwid; /* Widget Identification */ |
64 | char xwi_masterxid; /* Hub's Widget Port Number */ | 64 | char xwi_masterxid; /* Hub's Widget Port Number */ |
65 | void *xwi_hubinfo; /* Hub's provider private info */ | 65 | void *xwi_hubinfo; /* Hub's provider private info */ |
66 | uint64_t *xwi_hub_provider; /* prom provider functions */ | 66 | u64 *xwi_hub_provider; /* prom provider functions */ |
67 | void *xwi_vertex; | 67 | void *xwi_vertex; |
68 | }; | 68 | }; |
69 | 69 | ||
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c index 258d9d7aff98..233d55115d33 100644 --- a/arch/ia64/sn/kernel/io_init.c +++ b/arch/ia64/sn/kernel/io_init.c | |||
@@ -132,8 +132,8 @@ static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address) | |||
132 | * Retrieve the pci device information given the bus and device|function number. | 132 | * Retrieve the pci device information given the bus and device|function number. |
133 | */ | 133 | */ |
134 | static inline u64 | 134 | static inline u64 |
135 | sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev, | 135 | sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev, |
136 | u64 sn_irq_info) | 136 | u64 sn_irq_info) |
137 | { | 137 | { |
138 | struct ia64_sal_retval ret_stuff; | 138 | struct ia64_sal_retval ret_stuff; |
139 | ret_stuff.status = 0; | 139 | ret_stuff.status = 0; |
@@ -141,7 +141,7 @@ sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev, | |||
141 | 141 | ||
142 | SAL_CALL_NOLOCK(ret_stuff, | 142 | SAL_CALL_NOLOCK(ret_stuff, |
143 | (u64) SN_SAL_IOIF_GET_PCIDEV_INFO, | 143 | (u64) SN_SAL_IOIF_GET_PCIDEV_INFO, |
144 | (u64) segment, (u64) bus_number, (u64) devfn, | 144 | (u64) segment, (u64) bus_number, (u64) devfn, |
145 | (u64) pci_dev, | 145 | (u64) pci_dev, |
146 | sn_irq_info, 0, 0); | 146 | sn_irq_info, 0, 0); |
147 | return ret_stuff.v0; | 147 | return ret_stuff.v0; |
@@ -268,7 +268,7 @@ static void sn_fixup_ionodes(void) | |||
268 | */ | 268 | */ |
269 | static void | 269 | static void |
270 | sn_pci_window_fixup(struct pci_dev *dev, unsigned int count, | 270 | sn_pci_window_fixup(struct pci_dev *dev, unsigned int count, |
271 | int64_t * pci_addrs) | 271 | s64 * pci_addrs) |
272 | { | 272 | { |
273 | struct pci_controller *controller = PCI_CONTROLLER(dev->bus); | 273 | struct pci_controller *controller = PCI_CONTROLLER(dev->bus); |
274 | unsigned int i; | 274 | unsigned int i; |
@@ -328,7 +328,7 @@ void sn_pci_fixup_slot(struct pci_dev *dev) | |||
328 | struct pci_bus *host_pci_bus; | 328 | struct pci_bus *host_pci_bus; |
329 | struct pci_dev *host_pci_dev; | 329 | struct pci_dev *host_pci_dev; |
330 | struct pcidev_info *pcidev_info; | 330 | struct pcidev_info *pcidev_info; |
331 | int64_t pci_addrs[PCI_ROM_RESOURCE + 1]; | 331 | s64 pci_addrs[PCI_ROM_RESOURCE + 1]; |
332 | struct sn_irq_info *sn_irq_info; | 332 | struct sn_irq_info *sn_irq_info; |
333 | unsigned long size; | 333 | unsigned long size; |
334 | unsigned int bus_no, devfn; | 334 | unsigned int bus_no, devfn; |
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index 01d18b7b5bb3..ec37084bdc17 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c | |||
@@ -28,7 +28,7 @@ extern int sn_ioif_inited; | |||
28 | static struct list_head **sn_irq_lh; | 28 | static struct list_head **sn_irq_lh; |
29 | static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */ | 29 | static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */ |
30 | 30 | ||
31 | static inline uint64_t sn_intr_alloc(nasid_t local_nasid, int local_widget, | 31 | static inline u64 sn_intr_alloc(nasid_t local_nasid, int local_widget, |
32 | u64 sn_irq_info, | 32 | u64 sn_irq_info, |
33 | int req_irq, nasid_t req_nasid, | 33 | int req_irq, nasid_t req_nasid, |
34 | int req_slice) | 34 | int req_slice) |
@@ -123,7 +123,7 @@ static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) | |||
123 | 123 | ||
124 | list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, | 124 | list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, |
125 | sn_irq_lh[irq], list) { | 125 | sn_irq_lh[irq], list) { |
126 | uint64_t bridge; | 126 | u64 bridge; |
127 | int local_widget, status; | 127 | int local_widget, status; |
128 | nasid_t local_nasid; | 128 | nasid_t local_nasid; |
129 | struct sn_irq_info *new_irq_info; | 129 | struct sn_irq_info *new_irq_info; |
@@ -134,7 +134,7 @@ static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) | |||
134 | break; | 134 | break; |
135 | memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info)); | 135 | memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info)); |
136 | 136 | ||
137 | bridge = (uint64_t) new_irq_info->irq_bridge; | 137 | bridge = (u64) new_irq_info->irq_bridge; |
138 | if (!bridge) { | 138 | if (!bridge) { |
139 | kfree(new_irq_info); | 139 | kfree(new_irq_info); |
140 | break; /* irq is not a device interrupt */ | 140 | break; /* irq is not a device interrupt */ |
@@ -349,10 +349,10 @@ static void force_interrupt(int irq) | |||
349 | */ | 349 | */ |
350 | static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info) | 350 | static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info) |
351 | { | 351 | { |
352 | uint64_t regval; | 352 | u64 regval; |
353 | int irr_reg_num; | 353 | int irr_reg_num; |
354 | int irr_bit; | 354 | int irr_bit; |
355 | uint64_t irr_reg; | 355 | u64 irr_reg; |
356 | struct pcidev_info *pcidev_info; | 356 | struct pcidev_info *pcidev_info; |
357 | struct pcibus_info *pcibus_info; | 357 | struct pcibus_info *pcibus_info; |
358 | 358 | ||
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c index 6a7939b16a1c..d263d3e8fbb9 100644 --- a/arch/ia64/sn/kernel/tiocx.c +++ b/arch/ia64/sn/kernel/tiocx.c | |||
@@ -245,7 +245,7 @@ static int cx_device_reload(struct cx_dev *cx_dev) | |||
245 | cx_dev->bt); | 245 | cx_dev->bt); |
246 | } | 246 | } |
247 | 247 | ||
248 | static inline uint64_t tiocx_intr_alloc(nasid_t nasid, int widget, | 248 | static inline u64 tiocx_intr_alloc(nasid_t nasid, int widget, |
249 | u64 sn_irq_info, | 249 | u64 sn_irq_info, |
250 | int req_irq, nasid_t req_nasid, | 250 | int req_irq, nasid_t req_nasid, |
251 | int req_slice) | 251 | int req_slice) |
@@ -302,7 +302,7 @@ struct sn_irq_info *tiocx_irq_alloc(nasid_t nasid, int widget, int irq, | |||
302 | 302 | ||
303 | void tiocx_irq_free(struct sn_irq_info *sn_irq_info) | 303 | void tiocx_irq_free(struct sn_irq_info *sn_irq_info) |
304 | { | 304 | { |
305 | uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge; | 305 | u64 bridge = (u64) sn_irq_info->irq_bridge; |
306 | nasid_t nasid = NASID_GET(bridge); | 306 | nasid_t nasid = NASID_GET(bridge); |
307 | int widget; | 307 | int widget; |
308 | 308 | ||
@@ -313,12 +313,12 @@ void tiocx_irq_free(struct sn_irq_info *sn_irq_info) | |||
313 | } | 313 | } |
314 | } | 314 | } |
315 | 315 | ||
316 | uint64_t tiocx_dma_addr(uint64_t addr) | 316 | u64 tiocx_dma_addr(u64 addr) |
317 | { | 317 | { |
318 | return PHYS_TO_TIODMA(addr); | 318 | return PHYS_TO_TIODMA(addr); |
319 | } | 319 | } |
320 | 320 | ||
321 | uint64_t tiocx_swin_base(int nasid) | 321 | u64 tiocx_swin_base(int nasid) |
322 | { | 322 | { |
323 | return TIO_SWIN_BASE(nasid, TIOCX_CORELET); | 323 | return TIO_SWIN_BASE(nasid, TIOCX_CORELET); |
324 | } | 324 | } |
@@ -335,8 +335,8 @@ EXPORT_SYMBOL(tiocx_swin_base); | |||
335 | 335 | ||
336 | static void tio_conveyor_set(nasid_t nasid, int enable_flag) | 336 | static void tio_conveyor_set(nasid_t nasid, int enable_flag) |
337 | { | 337 | { |
338 | uint64_t ice_frz; | 338 | u64 ice_frz; |
339 | uint64_t disable_cb = (1ull << 61); | 339 | u64 disable_cb = (1ull << 61); |
340 | 340 | ||
341 | if (!(nasid & 1)) | 341 | if (!(nasid & 1)) |
342 | return; | 342 | return; |
@@ -388,7 +388,7 @@ static int is_fpga_tio(int nasid, int *bt) | |||
388 | 388 | ||
389 | static int bitstream_loaded(nasid_t nasid) | 389 | static int bitstream_loaded(nasid_t nasid) |
390 | { | 390 | { |
391 | uint64_t cx_credits; | 391 | u64 cx_credits; |
392 | 392 | ||
393 | cx_credits = REMOTE_HUB_L(nasid, TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3); | 393 | cx_credits = REMOTE_HUB_L(nasid, TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3); |
394 | cx_credits &= TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3_CREDIT_CNT_MASK; | 394 | cx_credits &= TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3_CREDIT_CNT_MASK; |
@@ -404,14 +404,14 @@ static int tiocx_reload(struct cx_dev *cx_dev) | |||
404 | nasid_t nasid = cx_dev->cx_id.nasid; | 404 | nasid_t nasid = cx_dev->cx_id.nasid; |
405 | 405 | ||
406 | if (bitstream_loaded(nasid)) { | 406 | if (bitstream_loaded(nasid)) { |
407 | uint64_t cx_id; | 407 | u64 cx_id; |
408 | int rv; | 408 | int rv; |
409 | 409 | ||
410 | rv = ia64_sn_sysctl_tio_clock_reset(nasid); | 410 | rv = ia64_sn_sysctl_tio_clock_reset(nasid); |
411 | if (rv) { | 411 | if (rv) { |
412 | printk(KERN_ALERT "CX port JTAG reset failed.\n"); | 412 | printk(KERN_ALERT "CX port JTAG reset failed.\n"); |
413 | } else { | 413 | } else { |
414 | cx_id = *(volatile uint64_t *) | 414 | cx_id = *(volatile u64 *) |
415 | (TIO_SWIN_BASE(nasid, TIOCX_CORELET) + | 415 | (TIO_SWIN_BASE(nasid, TIOCX_CORELET) + |
416 | WIDGET_ID); | 416 | WIDGET_ID); |
417 | part_num = XWIDGET_PART_NUM(cx_id); | 417 | part_num = XWIDGET_PART_NUM(cx_id); |
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_ate.c b/arch/ia64/sn/pci/pcibr/pcibr_ate.c index d1647b863e61..aa3fa5152a32 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_ate.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_ate.c | |||
@@ -18,10 +18,10 @@ int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */ | |||
18 | * mark_ate: Mark the ate as either free or inuse. | 18 | * mark_ate: Mark the ate as either free or inuse. |
19 | */ | 19 | */ |
20 | static void mark_ate(struct ate_resource *ate_resource, int start, int number, | 20 | static void mark_ate(struct ate_resource *ate_resource, int start, int number, |
21 | uint64_t value) | 21 | u64 value) |
22 | { | 22 | { |
23 | 23 | ||
24 | uint64_t *ate = ate_resource->ate; | 24 | u64 *ate = ate_resource->ate; |
25 | int index; | 25 | int index; |
26 | int length = 0; | 26 | int length = 0; |
27 | 27 | ||
@@ -38,7 +38,7 @@ static int find_free_ate(struct ate_resource *ate_resource, int start, | |||
38 | int count) | 38 | int count) |
39 | { | 39 | { |
40 | 40 | ||
41 | uint64_t *ate = ate_resource->ate; | 41 | u64 *ate = ate_resource->ate; |
42 | int index; | 42 | int index; |
43 | int start_free; | 43 | int start_free; |
44 | 44 | ||
@@ -119,7 +119,7 @@ static inline int alloc_ate_resource(struct ate_resource *ate_resource, | |||
119 | int pcibr_ate_alloc(struct pcibus_info *pcibus_info, int count) | 119 | int pcibr_ate_alloc(struct pcibus_info *pcibus_info, int count) |
120 | { | 120 | { |
121 | int status = 0; | 121 | int status = 0; |
122 | uint64_t flag; | 122 | u64 flag; |
123 | 123 | ||
124 | flag = pcibr_lock(pcibus_info); | 124 | flag = pcibr_lock(pcibus_info); |
125 | status = alloc_ate_resource(&pcibus_info->pbi_int_ate_resource, count); | 125 | status = alloc_ate_resource(&pcibus_info->pbi_int_ate_resource, count); |
@@ -139,7 +139,7 @@ int pcibr_ate_alloc(struct pcibus_info *pcibus_info, int count) | |||
139 | * Setup an Address Translation Entry as specified. Use either the Bridge | 139 | * Setup an Address Translation Entry as specified. Use either the Bridge |
140 | * internal maps or the external map RAM, as appropriate. | 140 | * internal maps or the external map RAM, as appropriate. |
141 | */ | 141 | */ |
142 | static inline uint64_t *pcibr_ate_addr(struct pcibus_info *pcibus_info, | 142 | static inline u64 *pcibr_ate_addr(struct pcibus_info *pcibus_info, |
143 | int ate_index) | 143 | int ate_index) |
144 | { | 144 | { |
145 | if (ate_index < pcibus_info->pbi_int_ate_size) { | 145 | if (ate_index < pcibus_info->pbi_int_ate_size) { |
@@ -153,7 +153,7 @@ static inline uint64_t *pcibr_ate_addr(struct pcibus_info *pcibus_info, | |||
153 | */ | 153 | */ |
154 | void inline | 154 | void inline |
155 | ate_write(struct pcibus_info *pcibus_info, int ate_index, int count, | 155 | ate_write(struct pcibus_info *pcibus_info, int ate_index, int count, |
156 | volatile uint64_t ate) | 156 | volatile u64 ate) |
157 | { | 157 | { |
158 | while (count-- > 0) { | 158 | while (count-- > 0) { |
159 | if (ate_index < pcibus_info->pbi_int_ate_size) { | 159 | if (ate_index < pcibus_info->pbi_int_ate_size) { |
@@ -171,9 +171,9 @@ ate_write(struct pcibus_info *pcibus_info, int ate_index, int count, | |||
171 | void pcibr_ate_free(struct pcibus_info *pcibus_info, int index) | 171 | void pcibr_ate_free(struct pcibus_info *pcibus_info, int index) |
172 | { | 172 | { |
173 | 173 | ||
174 | volatile uint64_t ate; | 174 | volatile u64 ate; |
175 | int count; | 175 | int count; |
176 | uint64_t flags; | 176 | u64 flags; |
177 | 177 | ||
178 | if (pcibr_invalidate_ate) { | 178 | if (pcibr_invalidate_ate) { |
179 | /* For debugging purposes, clear the valid bit in the ATE */ | 179 | /* For debugging purposes, clear the valid bit in the ATE */ |
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c index e68332d93171..54ce5b7ceed2 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c | |||
@@ -41,21 +41,21 @@ extern int sn_ioif_inited; | |||
41 | 41 | ||
42 | static dma_addr_t | 42 | static dma_addr_t |
43 | pcibr_dmamap_ate32(struct pcidev_info *info, | 43 | pcibr_dmamap_ate32(struct pcidev_info *info, |
44 | uint64_t paddr, size_t req_size, uint64_t flags) | 44 | u64 paddr, size_t req_size, u64 flags) |
45 | { | 45 | { |
46 | 46 | ||
47 | struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; | 47 | struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; |
48 | struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info-> | 48 | struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info-> |
49 | pdi_pcibus_info; | 49 | pdi_pcibus_info; |
50 | uint8_t internal_device = (PCI_SLOT(pcidev_info->pdi_host_pcidev_info-> | 50 | u8 internal_device = (PCI_SLOT(pcidev_info->pdi_host_pcidev_info-> |
51 | pdi_linux_pcidev->devfn)) - 1; | 51 | pdi_linux_pcidev->devfn)) - 1; |
52 | int ate_count; | 52 | int ate_count; |
53 | int ate_index; | 53 | int ate_index; |
54 | uint64_t ate_flags = flags | PCI32_ATE_V; | 54 | u64 ate_flags = flags | PCI32_ATE_V; |
55 | uint64_t ate; | 55 | u64 ate; |
56 | uint64_t pci_addr; | 56 | u64 pci_addr; |
57 | uint64_t xio_addr; | 57 | u64 xio_addr; |
58 | uint64_t offset; | 58 | u64 offset; |
59 | 59 | ||
60 | /* PIC in PCI-X mode does not supports 32bit PageMap mode */ | 60 | /* PIC in PCI-X mode does not supports 32bit PageMap mode */ |
61 | if (IS_PIC_SOFT(pcibus_info) && IS_PCIX(pcibus_info)) { | 61 | if (IS_PIC_SOFT(pcibus_info) && IS_PCIX(pcibus_info)) { |
@@ -109,12 +109,12 @@ pcibr_dmamap_ate32(struct pcidev_info *info, | |||
109 | } | 109 | } |
110 | 110 | ||
111 | static dma_addr_t | 111 | static dma_addr_t |
112 | pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr, | 112 | pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr, |
113 | uint64_t dma_attributes) | 113 | u64 dma_attributes) |
114 | { | 114 | { |
115 | struct pcibus_info *pcibus_info = (struct pcibus_info *) | 115 | struct pcibus_info *pcibus_info = (struct pcibus_info *) |
116 | ((info->pdi_host_pcidev_info)->pdi_pcibus_info); | 116 | ((info->pdi_host_pcidev_info)->pdi_pcibus_info); |
117 | uint64_t pci_addr; | 117 | u64 pci_addr; |
118 | 118 | ||
119 | /* Translate to Crosstalk View of Physical Address */ | 119 | /* Translate to Crosstalk View of Physical Address */ |
120 | pci_addr = (IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : | 120 | pci_addr = (IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : |
@@ -127,7 +127,7 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr, | |||
127 | /* Handle Bridge Chipset differences */ | 127 | /* Handle Bridge Chipset differences */ |
128 | if (IS_PIC_SOFT(pcibus_info)) { | 128 | if (IS_PIC_SOFT(pcibus_info)) { |
129 | pci_addr |= | 129 | pci_addr |= |
130 | ((uint64_t) pcibus_info-> | 130 | ((u64) pcibus_info-> |
131 | pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT); | 131 | pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT); |
132 | } else | 132 | } else |
133 | pci_addr |= TIOCP_PCI64_CMDTYPE_MEM; | 133 | pci_addr |= TIOCP_PCI64_CMDTYPE_MEM; |
@@ -142,17 +142,17 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr, | |||
142 | 142 | ||
143 | static dma_addr_t | 143 | static dma_addr_t |
144 | pcibr_dmatrans_direct32(struct pcidev_info * info, | 144 | pcibr_dmatrans_direct32(struct pcidev_info * info, |
145 | uint64_t paddr, size_t req_size, uint64_t flags) | 145 | u64 paddr, size_t req_size, u64 flags) |
146 | { | 146 | { |
147 | 147 | ||
148 | struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; | 148 | struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; |
149 | struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info-> | 149 | struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info-> |
150 | pdi_pcibus_info; | 150 | pdi_pcibus_info; |
151 | uint64_t xio_addr; | 151 | u64 xio_addr; |
152 | 152 | ||
153 | uint64_t xio_base; | 153 | u64 xio_base; |
154 | uint64_t offset; | 154 | u64 offset; |
155 | uint64_t endoff; | 155 | u64 endoff; |
156 | 156 | ||
157 | if (IS_PCIX(pcibus_info)) { | 157 | if (IS_PCIX(pcibus_info)) { |
158 | return 0; | 158 | return 0; |
@@ -209,14 +209,14 @@ pcibr_dma_unmap(struct pci_dev *hwdev, dma_addr_t dma_handle, int direction) | |||
209 | * unlike the PIC Device(x) Write Request Buffer Flush register. | 209 | * unlike the PIC Device(x) Write Request Buffer Flush register. |
210 | */ | 210 | */ |
211 | 211 | ||
212 | void sn_dma_flush(uint64_t addr) | 212 | void sn_dma_flush(u64 addr) |
213 | { | 213 | { |
214 | nasid_t nasid; | 214 | nasid_t nasid; |
215 | int is_tio; | 215 | int is_tio; |
216 | int wid_num; | 216 | int wid_num; |
217 | int i, j; | 217 | int i, j; |
218 | uint64_t flags; | 218 | u64 flags; |
219 | uint64_t itte; | 219 | u64 itte; |
220 | struct hubdev_info *hubinfo; | 220 | struct hubdev_info *hubinfo; |
221 | volatile struct sn_flush_device_kernel *p; | 221 | volatile struct sn_flush_device_kernel *p; |
222 | volatile struct sn_flush_device_common *common; | 222 | volatile struct sn_flush_device_common *common; |
@@ -299,8 +299,8 @@ void sn_dma_flush(uint64_t addr) | |||
299 | * If CE ever needs the sn_dma_flush mechanism, we will have | 299 | * If CE ever needs the sn_dma_flush mechanism, we will have |
300 | * to account for that here and in tioce_bus_fixup(). | 300 | * to account for that here and in tioce_bus_fixup(). |
301 | */ | 301 | */ |
302 | uint32_t tio_id = HUB_L(TIO_IOSPACE_ADDR(nasid, TIO_NODE_ID)); | 302 | u32 tio_id = HUB_L(TIO_IOSPACE_ADDR(nasid, TIO_NODE_ID)); |
303 | uint32_t revnum = XWIDGET_PART_REV_NUM(tio_id); | 303 | u32 revnum = XWIDGET_PART_REV_NUM(tio_id); |
304 | 304 | ||
305 | /* TIOCP BRINGUP WAR (PV907516): Don't write buffer flush reg */ | 305 | /* TIOCP BRINGUP WAR (PV907516): Don't write buffer flush reg */ |
306 | if ((1 << XWIDGET_PART_REV_NUM_REV(revnum)) & PV907516) { | 306 | if ((1 << XWIDGET_PART_REV_NUM_REV(revnum)) & PV907516) { |
@@ -315,7 +315,7 @@ void sn_dma_flush(uint64_t addr) | |||
315 | *common->sfdl_flush_addr = 0; | 315 | *common->sfdl_flush_addr = 0; |
316 | 316 | ||
317 | /* force an interrupt. */ | 317 | /* force an interrupt. */ |
318 | *(volatile uint32_t *)(common->sfdl_force_int_addr) = 1; | 318 | *(volatile u32 *)(common->sfdl_force_int_addr) = 1; |
319 | 319 | ||
320 | /* wait for the interrupt to come back. */ | 320 | /* wait for the interrupt to come back. */ |
321 | while (*(common->sfdl_flush_addr) != 0x10f) | 321 | while (*(common->sfdl_flush_addr) != 0x10f) |
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c index e328e948175d..77a1262751d3 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c | |||
@@ -23,7 +23,7 @@ int | |||
23 | sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp) | 23 | sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp) |
24 | { | 24 | { |
25 | struct ia64_sal_retval ret_stuff; | 25 | struct ia64_sal_retval ret_stuff; |
26 | uint64_t busnum; | 26 | u64 busnum; |
27 | 27 | ||
28 | ret_stuff.status = 0; | 28 | ret_stuff.status = 0; |
29 | ret_stuff.v0 = 0; | 29 | ret_stuff.v0 = 0; |
@@ -40,7 +40,7 @@ sal_pcibr_slot_disable(struct pcibus_info *soft, int device, int action, | |||
40 | void *resp) | 40 | void *resp) |
41 | { | 41 | { |
42 | struct ia64_sal_retval ret_stuff; | 42 | struct ia64_sal_retval ret_stuff; |
43 | uint64_t busnum; | 43 | u64 busnum; |
44 | 44 | ||
45 | ret_stuff.status = 0; | 45 | ret_stuff.status = 0; |
46 | ret_stuff.v0 = 0; | 46 | ret_stuff.v0 = 0; |
@@ -56,7 +56,7 @@ sal_pcibr_slot_disable(struct pcibus_info *soft, int device, int action, | |||
56 | static int sal_pcibr_error_interrupt(struct pcibus_info *soft) | 56 | static int sal_pcibr_error_interrupt(struct pcibus_info *soft) |
57 | { | 57 | { |
58 | struct ia64_sal_retval ret_stuff; | 58 | struct ia64_sal_retval ret_stuff; |
59 | uint64_t busnum; | 59 | u64 busnum; |
60 | int segment; | 60 | int segment; |
61 | ret_stuff.status = 0; | 61 | ret_stuff.status = 0; |
62 | ret_stuff.v0 = 0; | 62 | ret_stuff.v0 = 0; |
@@ -159,9 +159,9 @@ pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont | |||
159 | /* Setup the PMU ATE map */ | 159 | /* Setup the PMU ATE map */ |
160 | soft->pbi_int_ate_resource.lowest_free_index = 0; | 160 | soft->pbi_int_ate_resource.lowest_free_index = 0; |
161 | soft->pbi_int_ate_resource.ate = | 161 | soft->pbi_int_ate_resource.ate = |
162 | kmalloc(soft->pbi_int_ate_size * sizeof(uint64_t), GFP_KERNEL); | 162 | kmalloc(soft->pbi_int_ate_size * sizeof(u64), GFP_KERNEL); |
163 | memset(soft->pbi_int_ate_resource.ate, 0, | 163 | memset(soft->pbi_int_ate_resource.ate, 0, |
164 | (soft->pbi_int_ate_size * sizeof(uint64_t))); | 164 | (soft->pbi_int_ate_size * sizeof(u64))); |
165 | 165 | ||
166 | if (prom_bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) { | 166 | if (prom_bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) { |
167 | /* TIO PCI Bridge: find nearest node with CPUs */ | 167 | /* TIO PCI Bridge: find nearest node with CPUs */ |
@@ -203,7 +203,7 @@ void pcibr_target_interrupt(struct sn_irq_info *sn_irq_info) | |||
203 | struct pcidev_info *pcidev_info; | 203 | struct pcidev_info *pcidev_info; |
204 | struct pcibus_info *pcibus_info; | 204 | struct pcibus_info *pcibus_info; |
205 | int bit = sn_irq_info->irq_int_bit; | 205 | int bit = sn_irq_info->irq_int_bit; |
206 | uint64_t xtalk_addr = sn_irq_info->irq_xtalkaddr; | 206 | u64 xtalk_addr = sn_irq_info->irq_xtalkaddr; |
207 | 207 | ||
208 | pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; | 208 | pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; |
209 | if (pcidev_info) { | 209 | if (pcidev_info) { |
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_reg.c b/arch/ia64/sn/pci/pcibr/pcibr_reg.c index 79fdb91d7259..8b8bbd51d433 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_reg.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_reg.c | |||
@@ -23,7 +23,7 @@ union br_ptr { | |||
23 | /* | 23 | /* |
24 | * Control Register Access -- Read/Write 0000_0020 | 24 | * Control Register Access -- Read/Write 0000_0020 |
25 | */ | 25 | */ |
26 | void pcireg_control_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits) | 26 | void pcireg_control_bit_clr(struct pcibus_info *pcibus_info, u64 bits) |
27 | { | 27 | { |
28 | union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; | 28 | union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; |
29 | 29 | ||
@@ -43,7 +43,7 @@ void pcireg_control_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits) | |||
43 | } | 43 | } |
44 | } | 44 | } |
45 | 45 | ||
46 | void pcireg_control_bit_set(struct pcibus_info *pcibus_info, uint64_t bits) | 46 | void pcireg_control_bit_set(struct pcibus_info *pcibus_info, u64 bits) |
47 | { | 47 | { |
48 | union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; | 48 | union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; |
49 | 49 | ||
@@ -66,10 +66,10 @@ void pcireg_control_bit_set(struct pcibus_info *pcibus_info, uint64_t bits) | |||
66 | /* | 66 | /* |
67 | * PCI/PCIX Target Flush Register Access -- Read Only 0000_0050 | 67 | * PCI/PCIX Target Flush Register Access -- Read Only 0000_0050 |
68 | */ | 68 | */ |
69 | uint64_t pcireg_tflush_get(struct pcibus_info *pcibus_info) | 69 | u64 pcireg_tflush_get(struct pcibus_info *pcibus_info) |
70 | { | 70 | { |
71 | union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; | 71 | union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; |
72 | uint64_t ret = 0; | 72 | u64 ret = 0; |
73 | 73 | ||
74 | if (pcibus_info) { | 74 | if (pcibus_info) { |
75 | switch (pcibus_info->pbi_bridge_type) { | 75 | switch (pcibus_info->pbi_bridge_type) { |
@@ -96,10 +96,10 @@ uint64_t pcireg_tflush_get(struct pcibus_info *pcibus_info) | |||
96 | /* | 96 | /* |
97 | * Interrupt Status Register Access -- Read Only 0000_0100 | 97 | * Interrupt Status Register Access -- Read Only 0000_0100 |
98 | */ | 98 | */ |
99 | uint64_t pcireg_intr_status_get(struct pcibus_info * pcibus_info) | 99 | u64 pcireg_intr_status_get(struct pcibus_info * pcibus_info) |
100 | { | 100 | { |
101 | union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; | 101 | union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; |
102 | uint64_t ret = 0; | 102 | u64 ret = 0; |
103 | 103 | ||
104 | if (pcibus_info) { | 104 | if (pcibus_info) { |
105 | switch (pcibus_info->pbi_bridge_type) { | 105 | switch (pcibus_info->pbi_bridge_type) { |
@@ -121,7 +121,7 @@ uint64_t pcireg_intr_status_get(struct pcibus_info * pcibus_info) | |||
121 | /* | 121 | /* |
122 | * Interrupt Enable Register Access -- Read/Write 0000_0108 | 122 | * Interrupt Enable Register Access -- Read/Write 0000_0108 |
123 | */ | 123 | */ |
124 | void pcireg_intr_enable_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits) | 124 | void pcireg_intr_enable_bit_clr(struct pcibus_info *pcibus_info, u64 bits) |
125 | { | 125 | { |
126 | union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; | 126 | union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; |
127 | 127 | ||
@@ -141,7 +141,7 @@ void pcireg_intr_enable_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits) | |||
141 | } | 141 | } |
142 | } | 142 | } |
143 | 143 | ||
144 | void pcireg_intr_enable_bit_set(struct pcibus_info *pcibus_info, uint64_t bits) | 144 | void pcireg_intr_enable_bit_set(struct pcibus_info *pcibus_info, u64 bits) |
145 | { | 145 | { |
146 | union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; | 146 | union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; |
147 | 147 | ||
@@ -165,7 +165,7 @@ void pcireg_intr_enable_bit_set(struct pcibus_info *pcibus_info, uint64_t bits) | |||
165 | * Intr Host Address Register (int_addr) -- Read/Write 0000_0130 - 0000_0168 | 165 | * Intr Host Address Register (int_addr) -- Read/Write 0000_0130 - 0000_0168 |
166 | */ | 166 | */ |
167 | void pcireg_intr_addr_addr_set(struct pcibus_info *pcibus_info, int int_n, | 167 | void pcireg_intr_addr_addr_set(struct pcibus_info *pcibus_info, int int_n, |
168 | uint64_t addr) | 168 | u64 addr) |
169 | { | 169 | { |
170 | union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; | 170 | union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; |
171 | 171 | ||
@@ -217,10 +217,10 @@ void pcireg_force_intr_set(struct pcibus_info *pcibus_info, int int_n) | |||
217 | /* | 217 | /* |
218 | * Device(x) Write Buffer Flush Reg Access -- Read Only 0000_0240 - 0000_0258 | 218 | * Device(x) Write Buffer Flush Reg Access -- Read Only 0000_0240 - 0000_0258 |
219 | */ | 219 | */ |
220 | uint64_t pcireg_wrb_flush_get(struct pcibus_info *pcibus_info, int device) | 220 | u64 pcireg_wrb_flush_get(struct pcibus_info *pcibus_info, int device) |
221 | { | 221 | { |
222 | union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; | 222 | union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; |
223 | uint64_t ret = 0; | 223 | u64 ret = 0; |
224 | 224 | ||
225 | if (pcibus_info) { | 225 | if (pcibus_info) { |
226 | switch (pcibus_info->pbi_bridge_type) { | 226 | switch (pcibus_info->pbi_bridge_type) { |
@@ -242,7 +242,7 @@ uint64_t pcireg_wrb_flush_get(struct pcibus_info *pcibus_info, int device) | |||
242 | } | 242 | } |
243 | 243 | ||
244 | void pcireg_int_ate_set(struct pcibus_info *pcibus_info, int ate_index, | 244 | void pcireg_int_ate_set(struct pcibus_info *pcibus_info, int ate_index, |
245 | uint64_t val) | 245 | u64 val) |
246 | { | 246 | { |
247 | union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; | 247 | union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; |
248 | 248 | ||
@@ -262,10 +262,10 @@ void pcireg_int_ate_set(struct pcibus_info *pcibus_info, int ate_index, | |||
262 | } | 262 | } |
263 | } | 263 | } |
264 | 264 | ||
265 | uint64_t __iomem *pcireg_int_ate_addr(struct pcibus_info *pcibus_info, int ate_index) | 265 | u64 __iomem *pcireg_int_ate_addr(struct pcibus_info *pcibus_info, int ate_index) |
266 | { | 266 | { |
267 | union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; | 267 | union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; |
268 | uint64_t __iomem *ret = NULL; | 268 | u64 __iomem *ret = NULL; |
269 | 269 | ||
270 | if (pcibus_info) { | 270 | if (pcibus_info) { |
271 | switch (pcibus_info->pbi_bridge_type) { | 271 | switch (pcibus_info->pbi_bridge_type) { |
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c index 27aa1842dacc..7571a4025529 100644 --- a/arch/ia64/sn/pci/tioca_provider.c +++ b/arch/ia64/sn/pci/tioca_provider.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <asm/sn/pcibus_provider_defs.h> | 16 | #include <asm/sn/pcibus_provider_defs.h> |
17 | #include <asm/sn/tioca_provider.h> | 17 | #include <asm/sn/tioca_provider.h> |
18 | 18 | ||
19 | uint32_t tioca_gart_found; | 19 | u32 tioca_gart_found; |
20 | EXPORT_SYMBOL(tioca_gart_found); /* used by agp-sgi */ | 20 | EXPORT_SYMBOL(tioca_gart_found); /* used by agp-sgi */ |
21 | 21 | ||
22 | LIST_HEAD(tioca_list); | 22 | LIST_HEAD(tioca_list); |
@@ -34,8 +34,8 @@ static int tioca_gart_init(struct tioca_kernel *); | |||
34 | static int | 34 | static int |
35 | tioca_gart_init(struct tioca_kernel *tioca_kern) | 35 | tioca_gart_init(struct tioca_kernel *tioca_kern) |
36 | { | 36 | { |
37 | uint64_t ap_reg; | 37 | u64 ap_reg; |
38 | uint64_t offset; | 38 | u64 offset; |
39 | struct page *tmp; | 39 | struct page *tmp; |
40 | struct tioca_common *tioca_common; | 40 | struct tioca_common *tioca_common; |
41 | struct tioca __iomem *ca_base; | 41 | struct tioca __iomem *ca_base; |
@@ -214,7 +214,7 @@ void | |||
214 | tioca_fastwrite_enable(struct tioca_kernel *tioca_kern) | 214 | tioca_fastwrite_enable(struct tioca_kernel *tioca_kern) |
215 | { | 215 | { |
216 | int cap_ptr; | 216 | int cap_ptr; |
217 | uint32_t reg; | 217 | u32 reg; |
218 | struct tioca __iomem *tioca_base; | 218 | struct tioca __iomem *tioca_base; |
219 | struct pci_dev *pdev; | 219 | struct pci_dev *pdev; |
220 | struct tioca_common *common; | 220 | struct tioca_common *common; |
@@ -276,7 +276,7 @@ EXPORT_SYMBOL(tioca_fastwrite_enable); /* used by agp-sgi */ | |||
276 | * We will always use 0x1 | 276 | * We will always use 0x1 |
277 | * 55:55 - Swap bytes Currently unused | 277 | * 55:55 - Swap bytes Currently unused |
278 | */ | 278 | */ |
279 | static uint64_t | 279 | static u64 |
280 | tioca_dma_d64(unsigned long paddr) | 280 | tioca_dma_d64(unsigned long paddr) |
281 | { | 281 | { |
282 | dma_addr_t bus_addr; | 282 | dma_addr_t bus_addr; |
@@ -318,15 +318,15 @@ tioca_dma_d64(unsigned long paddr) | |||
318 | * and so a given CA can only directly target nodes in the range | 318 | * and so a given CA can only directly target nodes in the range |
319 | * xxx - xxx+255. | 319 | * xxx - xxx+255. |
320 | */ | 320 | */ |
321 | static uint64_t | 321 | static u64 |
322 | tioca_dma_d48(struct pci_dev *pdev, uint64_t paddr) | 322 | tioca_dma_d48(struct pci_dev *pdev, u64 paddr) |
323 | { | 323 | { |
324 | struct tioca_common *tioca_common; | 324 | struct tioca_common *tioca_common; |
325 | struct tioca __iomem *ca_base; | 325 | struct tioca __iomem *ca_base; |
326 | uint64_t ct_addr; | 326 | u64 ct_addr; |
327 | dma_addr_t bus_addr; | 327 | dma_addr_t bus_addr; |
328 | uint32_t node_upper; | 328 | u32 node_upper; |
329 | uint64_t agp_dma_extn; | 329 | u64 agp_dma_extn; |
330 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev); | 330 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev); |
331 | 331 | ||
332 | tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info; | 332 | tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info; |
@@ -367,10 +367,10 @@ tioca_dma_d48(struct pci_dev *pdev, uint64_t paddr) | |||
367 | * dma_addr_t is guarenteed to be contiguous in CA bus space. | 367 | * dma_addr_t is guarenteed to be contiguous in CA bus space. |
368 | */ | 368 | */ |
369 | static dma_addr_t | 369 | static dma_addr_t |
370 | tioca_dma_mapped(struct pci_dev *pdev, uint64_t paddr, size_t req_size) | 370 | tioca_dma_mapped(struct pci_dev *pdev, u64 paddr, size_t req_size) |
371 | { | 371 | { |
372 | int i, ps, ps_shift, entry, entries, mapsize, last_entry; | 372 | int i, ps, ps_shift, entry, entries, mapsize, last_entry; |
373 | uint64_t xio_addr, end_xio_addr; | 373 | u64 xio_addr, end_xio_addr; |
374 | struct tioca_common *tioca_common; | 374 | struct tioca_common *tioca_common; |
375 | struct tioca_kernel *tioca_kern; | 375 | struct tioca_kernel *tioca_kern; |
376 | dma_addr_t bus_addr = 0; | 376 | dma_addr_t bus_addr = 0; |
@@ -514,10 +514,10 @@ tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) | |||
514 | * The mapping mode used is based on the devices dma_mask. As a last resort | 514 | * The mapping mode used is based on the devices dma_mask. As a last resort |
515 | * use the GART mapped mode. | 515 | * use the GART mapped mode. |
516 | */ | 516 | */ |
517 | static uint64_t | 517 | static u64 |
518 | tioca_dma_map(struct pci_dev *pdev, uint64_t paddr, size_t byte_count) | 518 | tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count) |
519 | { | 519 | { |
520 | uint64_t mapaddr; | 520 | u64 mapaddr; |
521 | 521 | ||
522 | /* | 522 | /* |
523 | * If card is 64 or 48 bit addresable, use a direct mapping. 32 | 523 | * If card is 64 or 48 bit addresable, use a direct mapping. 32 |
@@ -554,8 +554,8 @@ tioca_error_intr_handler(int irq, void *arg, struct pt_regs *pt) | |||
554 | { | 554 | { |
555 | struct tioca_common *soft = arg; | 555 | struct tioca_common *soft = arg; |
556 | struct ia64_sal_retval ret_stuff; | 556 | struct ia64_sal_retval ret_stuff; |
557 | uint64_t segment; | 557 | u64 segment; |
558 | uint64_t busnum; | 558 | u64 busnum; |
559 | ret_stuff.status = 0; | 559 | ret_stuff.status = 0; |
560 | ret_stuff.v0 = 0; | 560 | ret_stuff.v0 = 0; |
561 | 561 | ||
@@ -620,7 +620,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont | |||
620 | INIT_LIST_HEAD(&tioca_kern->ca_dmamaps); | 620 | INIT_LIST_HEAD(&tioca_kern->ca_dmamaps); |
621 | tioca_kern->ca_closest_node = | 621 | tioca_kern->ca_closest_node = |
622 | nasid_to_cnodeid(tioca_common->ca_closest_nasid); | 622 | nasid_to_cnodeid(tioca_common->ca_closest_nasid); |
623 | tioca_common->ca_kernel_private = (uint64_t) tioca_kern; | 623 | tioca_common->ca_kernel_private = (u64) tioca_kern; |
624 | 624 | ||
625 | bus = pci_find_bus(tioca_common->ca_common.bs_persist_segment, | 625 | bus = pci_find_bus(tioca_common->ca_common.bs_persist_segment, |
626 | tioca_common->ca_common.bs_persist_busnum); | 626 | tioca_common->ca_common.bs_persist_busnum); |
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c index dda196c9e324..e52831ed93eb 100644 --- a/arch/ia64/sn/pci/tioce_provider.c +++ b/arch/ia64/sn/pci/tioce_provider.c | |||
@@ -81,10 +81,10 @@ | |||
81 | * 61 - 0 since this is not an MSI transaction | 81 | * 61 - 0 since this is not an MSI transaction |
82 | * 60:54 - reserved, MBZ | 82 | * 60:54 - reserved, MBZ |
83 | */ | 83 | */ |
84 | static uint64_t | 84 | static u64 |
85 | tioce_dma_d64(unsigned long ct_addr) | 85 | tioce_dma_d64(unsigned long ct_addr) |
86 | { | 86 | { |
87 | uint64_t bus_addr; | 87 | u64 bus_addr; |
88 | 88 | ||
89 | bus_addr = ct_addr | (1UL << 63); | 89 | bus_addr = ct_addr | (1UL << 63); |
90 | 90 | ||
@@ -141,9 +141,9 @@ pcidev_to_tioce(struct pci_dev *pdev, struct tioce **base, | |||
141 | * length, and if enough resources exist, fill in the ATE's and construct a | 141 | * length, and if enough resources exist, fill in the ATE's and construct a |
142 | * tioce_dmamap struct to track the mapping. | 142 | * tioce_dmamap struct to track the mapping. |
143 | */ | 143 | */ |
144 | static uint64_t | 144 | static u64 |
145 | tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, | 145 | tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, |
146 | uint64_t ct_addr, int len) | 146 | u64 ct_addr, int len) |
147 | { | 147 | { |
148 | int i; | 148 | int i; |
149 | int j; | 149 | int j; |
@@ -152,11 +152,11 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, | |||
152 | int entries; | 152 | int entries; |
153 | int nates; | 153 | int nates; |
154 | int pagesize; | 154 | int pagesize; |
155 | uint64_t *ate_shadow; | 155 | u64 *ate_shadow; |
156 | uint64_t *ate_reg; | 156 | u64 *ate_reg; |
157 | uint64_t addr; | 157 | u64 addr; |
158 | struct tioce *ce_mmr; | 158 | struct tioce *ce_mmr; |
159 | uint64_t bus_base; | 159 | u64 bus_base; |
160 | struct tioce_dmamap *map; | 160 | struct tioce_dmamap *map; |
161 | 161 | ||
162 | ce_mmr = (struct tioce *)ce_kern->ce_common->ce_pcibus.bs_base; | 162 | ce_mmr = (struct tioce *)ce_kern->ce_common->ce_pcibus.bs_base; |
@@ -224,7 +224,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, | |||
224 | 224 | ||
225 | addr = ct_addr; | 225 | addr = ct_addr; |
226 | for (j = 0; j < nates; j++) { | 226 | for (j = 0; j < nates; j++) { |
227 | uint64_t ate; | 227 | u64 ate; |
228 | 228 | ||
229 | ate = ATE_MAKE(addr, pagesize); | 229 | ate = ATE_MAKE(addr, pagesize); |
230 | ate_shadow[i + j] = ate; | 230 | ate_shadow[i + j] = ate; |
@@ -252,15 +252,15 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, | |||
252 | * | 252 | * |
253 | * Map @paddr into 32-bit bus space of the CE associated with @pcidev_info. | 253 | * Map @paddr into 32-bit bus space of the CE associated with @pcidev_info. |
254 | */ | 254 | */ |
255 | static uint64_t | 255 | static u64 |
256 | tioce_dma_d32(struct pci_dev *pdev, uint64_t ct_addr) | 256 | tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr) |
257 | { | 257 | { |
258 | int dma_ok; | 258 | int dma_ok; |
259 | int port; | 259 | int port; |
260 | struct tioce *ce_mmr; | 260 | struct tioce *ce_mmr; |
261 | struct tioce_kernel *ce_kern; | 261 | struct tioce_kernel *ce_kern; |
262 | uint64_t ct_upper; | 262 | u64 ct_upper; |
263 | uint64_t ct_lower; | 263 | u64 ct_lower; |
264 | dma_addr_t bus_addr; | 264 | dma_addr_t bus_addr; |
265 | 265 | ||
266 | ct_upper = ct_addr & ~0x3fffffffUL; | 266 | ct_upper = ct_addr & ~0x3fffffffUL; |
@@ -269,7 +269,7 @@ tioce_dma_d32(struct pci_dev *pdev, uint64_t ct_addr) | |||
269 | pcidev_to_tioce(pdev, &ce_mmr, &ce_kern, &port); | 269 | pcidev_to_tioce(pdev, &ce_mmr, &ce_kern, &port); |
270 | 270 | ||
271 | if (ce_kern->ce_port[port].dirmap_refcnt == 0) { | 271 | if (ce_kern->ce_port[port].dirmap_refcnt == 0) { |
272 | uint64_t tmp; | 272 | u64 tmp; |
273 | 273 | ||
274 | ce_kern->ce_port[port].dirmap_shadow = ct_upper; | 274 | ce_kern->ce_port[port].dirmap_shadow = ct_upper; |
275 | writeq(ct_upper, &ce_mmr->ce_ure_dir_map[port]); | 275 | writeq(ct_upper, &ce_mmr->ce_ure_dir_map[port]); |
@@ -295,10 +295,10 @@ tioce_dma_d32(struct pci_dev *pdev, uint64_t ct_addr) | |||
295 | * Given a TIOCE bus address, set the appropriate bit to indicate barrier | 295 | * Given a TIOCE bus address, set the appropriate bit to indicate barrier |
296 | * attributes. | 296 | * attributes. |
297 | */ | 297 | */ |
298 | static uint64_t | 298 | static u64 |
299 | tioce_dma_barrier(uint64_t bus_addr, int on) | 299 | tioce_dma_barrier(u64 bus_addr, int on) |
300 | { | 300 | { |
301 | uint64_t barrier_bit; | 301 | u64 barrier_bit; |
302 | 302 | ||
303 | /* barrier not supported in M40/M40S mode */ | 303 | /* barrier not supported in M40/M40S mode */ |
304 | if (TIOCE_M40_ADDR(bus_addr) || TIOCE_M40S_ADDR(bus_addr)) | 304 | if (TIOCE_M40_ADDR(bus_addr) || TIOCE_M40S_ADDR(bus_addr)) |
@@ -351,7 +351,7 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) | |||
351 | 351 | ||
352 | list_for_each_entry(map, &ce_kern->ce_dmamap_list, | 352 | list_for_each_entry(map, &ce_kern->ce_dmamap_list, |
353 | ce_dmamap_list) { | 353 | ce_dmamap_list) { |
354 | uint64_t last; | 354 | u64 last; |
355 | 355 | ||
356 | last = map->pci_start + map->nbytes - 1; | 356 | last = map->pci_start + map->nbytes - 1; |
357 | if (bus_addr >= map->pci_start && bus_addr <= last) | 357 | if (bus_addr >= map->pci_start && bus_addr <= last) |
@@ -385,17 +385,17 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) | |||
385 | * This is the main wrapper for mapping host physical pages to CE PCI space. | 385 | * This is the main wrapper for mapping host physical pages to CE PCI space. |
386 | * The mapping mode used is based on the device's dma_mask. | 386 | * The mapping mode used is based on the device's dma_mask. |
387 | */ | 387 | */ |
388 | static uint64_t | 388 | static u64 |
389 | tioce_do_dma_map(struct pci_dev *pdev, uint64_t paddr, size_t byte_count, | 389 | tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, |
390 | int barrier) | 390 | int barrier) |
391 | { | 391 | { |
392 | unsigned long flags; | 392 | unsigned long flags; |
393 | uint64_t ct_addr; | 393 | u64 ct_addr; |
394 | uint64_t mapaddr = 0; | 394 | u64 mapaddr = 0; |
395 | struct tioce_kernel *ce_kern; | 395 | struct tioce_kernel *ce_kern; |
396 | struct tioce_dmamap *map; | 396 | struct tioce_dmamap *map; |
397 | int port; | 397 | int port; |
398 | uint64_t dma_mask; | 398 | u64 dma_mask; |
399 | 399 | ||
400 | dma_mask = (barrier) ? pdev->dev.coherent_dma_mask : pdev->dma_mask; | 400 | dma_mask = (barrier) ? pdev->dev.coherent_dma_mask : pdev->dma_mask; |
401 | 401 | ||
@@ -425,7 +425,7 @@ tioce_do_dma_map(struct pci_dev *pdev, uint64_t paddr, size_t byte_count, | |||
425 | * address bits than this device can support. | 425 | * address bits than this device can support. |
426 | */ | 426 | */ |
427 | list_for_each_entry(map, &ce_kern->ce_dmamap_list, ce_dmamap_list) { | 427 | list_for_each_entry(map, &ce_kern->ce_dmamap_list, ce_dmamap_list) { |
428 | uint64_t last; | 428 | u64 last; |
429 | 429 | ||
430 | last = map->ct_start + map->nbytes - 1; | 430 | last = map->ct_start + map->nbytes - 1; |
431 | if (ct_addr >= map->ct_start && | 431 | if (ct_addr >= map->ct_start && |
@@ -501,8 +501,8 @@ dma_map_done: | |||
501 | * Simply call tioce_do_dma_map() to create a map with the barrier bit clear | 501 | * Simply call tioce_do_dma_map() to create a map with the barrier bit clear |
502 | * in the address. | 502 | * in the address. |
503 | */ | 503 | */ |
504 | static uint64_t | 504 | static u64 |
505 | tioce_dma(struct pci_dev *pdev, uint64_t paddr, size_t byte_count) | 505 | tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count) |
506 | { | 506 | { |
507 | return tioce_do_dma_map(pdev, paddr, byte_count, 0); | 507 | return tioce_do_dma_map(pdev, paddr, byte_count, 0); |
508 | } | 508 | } |
@@ -515,8 +515,8 @@ tioce_dma(struct pci_dev *pdev, uint64_t paddr, size_t byte_count) | |||
515 | * | 515 | * |
516 | * Simply call tioce_do_dma_map() to create a map with the barrier bit set | 516 | * Simply call tioce_do_dma_map() to create a map with the barrier bit set |
517 | * in the address. | 517 | * in the address. |
518 | */ static uint64_t | 518 | */ static u64 |
519 | tioce_dma_consistent(struct pci_dev *pdev, uint64_t paddr, size_t byte_count) | 519 | tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count) |
520 | { | 520 | { |
521 | return tioce_do_dma_map(pdev, paddr, byte_count, 1); | 521 | return tioce_do_dma_map(pdev, paddr, byte_count, 1); |
522 | } | 522 | } |
@@ -551,7 +551,7 @@ tioce_error_intr_handler(int irq, void *arg, struct pt_regs *pt) | |||
551 | tioce_kern_init(struct tioce_common *tioce_common) | 551 | tioce_kern_init(struct tioce_common *tioce_common) |
552 | { | 552 | { |
553 | int i; | 553 | int i; |
554 | uint32_t tmp; | 554 | u32 tmp; |
555 | struct tioce *tioce_mmr; | 555 | struct tioce *tioce_mmr; |
556 | struct tioce_kernel *tioce_kern; | 556 | struct tioce_kernel *tioce_kern; |
557 | 557 | ||
@@ -563,7 +563,7 @@ tioce_kern_init(struct tioce_common *tioce_common) | |||
563 | tioce_kern->ce_common = tioce_common; | 563 | tioce_kern->ce_common = tioce_common; |
564 | spin_lock_init(&tioce_kern->ce_lock); | 564 | spin_lock_init(&tioce_kern->ce_lock); |
565 | INIT_LIST_HEAD(&tioce_kern->ce_dmamap_list); | 565 | INIT_LIST_HEAD(&tioce_kern->ce_dmamap_list); |
566 | tioce_common->ce_kernel_private = (uint64_t) tioce_kern; | 566 | tioce_common->ce_kernel_private = (u64) tioce_kern; |
567 | 567 | ||
568 | /* | 568 | /* |
569 | * Determine the secondary bus number of the port2 logical PPB. | 569 | * Determine the secondary bus number of the port2 logical PPB. |
@@ -575,7 +575,7 @@ tioce_kern_init(struct tioce_common *tioce_common) | |||
575 | raw_pci_ops->read(tioce_common->ce_pcibus.bs_persist_segment, | 575 | raw_pci_ops->read(tioce_common->ce_pcibus.bs_persist_segment, |
576 | tioce_common->ce_pcibus.bs_persist_busnum, | 576 | tioce_common->ce_pcibus.bs_persist_busnum, |
577 | PCI_DEVFN(2, 0), PCI_SECONDARY_BUS, 1, &tmp); | 577 | PCI_DEVFN(2, 0), PCI_SECONDARY_BUS, 1, &tmp); |
578 | tioce_kern->ce_port1_secondary = (uint8_t) tmp; | 578 | tioce_kern->ce_port1_secondary = (u8) tmp; |
579 | 579 | ||
580 | /* | 580 | /* |
581 | * Set PMU pagesize to the largest size available, and zero out | 581 | * Set PMU pagesize to the largest size available, and zero out |
@@ -615,7 +615,7 @@ tioce_force_interrupt(struct sn_irq_info *sn_irq_info) | |||
615 | struct pcidev_info *pcidev_info; | 615 | struct pcidev_info *pcidev_info; |
616 | struct tioce_common *ce_common; | 616 | struct tioce_common *ce_common; |
617 | struct tioce *ce_mmr; | 617 | struct tioce *ce_mmr; |
618 | uint64_t force_int_val; | 618 | u64 force_int_val; |
619 | 619 | ||
620 | if (!sn_irq_info->irq_bridge) | 620 | if (!sn_irq_info->irq_bridge) |
621 | return; | 621 | return; |
@@ -687,7 +687,7 @@ tioce_target_interrupt(struct sn_irq_info *sn_irq_info) | |||
687 | struct tioce_common *ce_common; | 687 | struct tioce_common *ce_common; |
688 | struct tioce *ce_mmr; | 688 | struct tioce *ce_mmr; |
689 | int bit; | 689 | int bit; |
690 | uint64_t vector; | 690 | u64 vector; |
691 | 691 | ||
692 | pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; | 692 | pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; |
693 | if (!pcidev_info) | 693 | if (!pcidev_info) |
@@ -699,7 +699,7 @@ tioce_target_interrupt(struct sn_irq_info *sn_irq_info) | |||
699 | bit = sn_irq_info->irq_int_bit; | 699 | bit = sn_irq_info->irq_int_bit; |
700 | 700 | ||
701 | __sn_setq_relaxed(&ce_mmr->ce_adm_int_mask, (1UL << bit)); | 701 | __sn_setq_relaxed(&ce_mmr->ce_adm_int_mask, (1UL << bit)); |
702 | vector = (uint64_t)sn_irq_info->irq_irq << INTR_VECTOR_SHFT; | 702 | vector = (u64)sn_irq_info->irq_irq << INTR_VECTOR_SHFT; |
703 | vector |= sn_irq_info->irq_xtalkaddr; | 703 | vector |= sn_irq_info->irq_xtalkaddr; |
704 | writeq(vector, &ce_mmr->ce_adm_int_dest[bit]); | 704 | writeq(vector, &ce_mmr->ce_adm_int_dest[bit]); |
705 | __sn_clrq_relaxed(&ce_mmr->ce_adm_int_mask, (1UL << bit)); | 705 | __sn_clrq_relaxed(&ce_mmr->ce_adm_int_mask, (1UL << bit)); |