aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2007-10-11 18:46:15 -0400
committerRalf Baechle <ralf@linux-mips.org>2007-10-11 18:46:15 -0400
commit10cc3529072d5415fb040018a8a99aa7a60190b6 (patch)
treefe07fb5112c9c34c2aecfac982155307bc168f07 /arch
parentaeffdbbaff133b0c3989e20af5baa091d3d0b409 (diff)
[MIPS] Allow hardwiring of the CPU type to a single type for optimization.
This saves a few k on systems which only ever ship with a single CPU type. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/dec/ecc-berr.c2
-rw-r--r--arch/mips/dec/kn02xa-berr.c2
-rw-r--r--arch/mips/dec/prom/init.c8
-rw-r--r--arch/mips/kernel/traps.c6
-rw-r--r--arch/mips/mm/c-r4k.c12
-rw-r--r--arch/mips/mm/c-tx39.c6
-rw-r--r--arch/mips/mm/dma-default.c4
-rw-r--r--arch/mips/mm/pg-r4k.c2
-rw-r--r--arch/mips/mm/tlbex.c10
-rw-r--r--arch/mips/oprofile/common.c2
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c4
-rw-r--r--arch/mips/pci/pci-vr41xx.c2
-rw-r--r--arch/mips/vr41xx/common/bcu.c8
-rw-r--r--arch/mips/vr41xx/common/cmu.c16
-rw-r--r--arch/mips/vr41xx/common/giu.c2
-rw-r--r--arch/mips/vr41xx/common/icu.c76
-rw-r--r--arch/mips/vr41xx/common/pmu.c4
-rw-r--r--arch/mips/vr41xx/common/rtc.c2
-rw-r--r--arch/mips/vr41xx/common/siu.c2
19 files changed, 85 insertions, 85 deletions
diff --git a/arch/mips/dec/ecc-berr.c b/arch/mips/dec/ecc-berr.c
index 6d55e8aab668..6a17c9b508ea 100644
--- a/arch/mips/dec/ecc-berr.c
+++ b/arch/mips/dec/ecc-berr.c
@@ -263,7 +263,7 @@ static inline void dec_kn03_be_init(void)
263 */ 263 */
264 *mcr = (*mcr & ~(KN03_MCR_DIAGCHK | KN03_MCR_DIAGGEN)) | 264 *mcr = (*mcr & ~(KN03_MCR_DIAGCHK | KN03_MCR_DIAGGEN)) |
265 KN03_MCR_CORRECT; 265 KN03_MCR_CORRECT;
266 if (current_cpu_data.cputype == CPU_R4400SC) 266 if (current_cpu_type() == CPU_R4400SC)
267 *mbcs |= KN4K_MB_CSR_EE; 267 *mbcs |= KN4K_MB_CSR_EE;
268 fast_iob(); 268 fast_iob();
269} 269}
diff --git a/arch/mips/dec/kn02xa-berr.c b/arch/mips/dec/kn02xa-berr.c
index 7a053aadcd3a..5f04545c3606 100644
--- a/arch/mips/dec/kn02xa-berr.c
+++ b/arch/mips/dec/kn02xa-berr.c
@@ -132,7 +132,7 @@ void __init dec_kn02xa_be_init(void)
132 volatile u32 *mbcs = (void *)CKSEG1ADDR(KN4K_SLOT_BASE + KN4K_MB_CSR); 132 volatile u32 *mbcs = (void *)CKSEG1ADDR(KN4K_SLOT_BASE + KN4K_MB_CSR);
133 133
134 /* For KN04 we need to make sure EE (?) is enabled in the MB. */ 134 /* For KN04 we need to make sure EE (?) is enabled in the MB. */
135 if (current_cpu_data.cputype == CPU_R4000SC) 135 if (current_cpu_type() == CPU_R4000SC)
136 *mbcs |= KN4K_MB_CSR_EE; 136 *mbcs |= KN4K_MB_CSR_EE;
137 fast_iob(); 137 fast_iob();
138 138
diff --git a/arch/mips/dec/prom/init.c b/arch/mips/dec/prom/init.c
index 808c182fd3fa..93f1239af524 100644
--- a/arch/mips/dec/prom/init.c
+++ b/arch/mips/dec/prom/init.c
@@ -108,8 +108,8 @@ void __init prom_init(void)
108 108
109 /* Were we compiled with the right CPU option? */ 109 /* Were we compiled with the right CPU option? */
110#if defined(CONFIG_CPU_R3000) 110#if defined(CONFIG_CPU_R3000)
111 if ((current_cpu_data.cputype == CPU_R4000SC) || 111 if ((current_cpu_type() == CPU_R4000SC) ||
112 (current_cpu_data.cputype == CPU_R4400SC)) { 112 (current_cpu_type() == CPU_R4400SC)) {
113 static char r4k_msg[] __initdata = 113 static char r4k_msg[] __initdata =
114 "Please recompile with \"CONFIG_CPU_R4x00 = y\".\n"; 114 "Please recompile with \"CONFIG_CPU_R4x00 = y\".\n";
115 printk(cpu_msg); 115 printk(cpu_msg);
@@ -119,8 +119,8 @@ void __init prom_init(void)
119#endif 119#endif
120 120
121#if defined(CONFIG_CPU_R4X00) 121#if defined(CONFIG_CPU_R4X00)
122 if ((current_cpu_data.cputype == CPU_R3000) || 122 if ((current_cpu_type() == CPU_R3000) ||
123 (current_cpu_data.cputype == CPU_R3000A)) { 123 (current_cpu_type() == CPU_R3000A)) {
124 static char r3k_msg[] __initdata = 124 static char r3k_msg[] __initdata =
125 "Please recompile with \"CONFIG_CPU_R3000 = y\".\n"; 125 "Please recompile with \"CONFIG_CPU_R3000 = y\".\n";
126 printk(cpu_msg); 126 printk(cpu_msg);
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index d96f8218a91e..ac762d8d802d 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -954,7 +954,7 @@ asmlinkage void do_reserved(struct pt_regs *regs)
954 */ 954 */
955static inline void parity_protection_init(void) 955static inline void parity_protection_init(void)
956{ 956{
957 switch (current_cpu_data.cputype) { 957 switch (current_cpu_type()) {
958 case CPU_24K: 958 case CPU_24K:
959 case CPU_34K: 959 case CPU_34K:
960 case CPU_5KC: 960 case CPU_5KC:
@@ -1549,8 +1549,8 @@ void __init trap_init(void)
1549 set_except_vector(12, handle_ov); 1549 set_except_vector(12, handle_ov);
1550 set_except_vector(13, handle_tr); 1550 set_except_vector(13, handle_tr);
1551 1551
1552 if (current_cpu_data.cputype == CPU_R6000 || 1552 if (current_cpu_type() == CPU_R6000 ||
1553 current_cpu_data.cputype == CPU_R6000A) { 1553 current_cpu_type() == CPU_R6000A) {
1554 /* 1554 /*
1555 * The R6000 is the only R-series CPU that features a machine 1555 * The R6000 is the only R-series CPU that features a machine
1556 * check exception (similar to the R4000 cache error) and 1556 * check exception (similar to the R4000 cache error) and
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index cf48371e5690..8b7b7c57baca 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -328,7 +328,7 @@ static inline void local_r4k___flush_cache_all(void * args)
328 r4k_blast_dcache(); 328 r4k_blast_dcache();
329 r4k_blast_icache(); 329 r4k_blast_icache();
330 330
331 switch (current_cpu_data.cputype) { 331 switch (current_cpu_type()) {
332 case CPU_R4000SC: 332 case CPU_R4000SC:
333 case CPU_R4000MC: 333 case CPU_R4000MC:
334 case CPU_R4400SC: 334 case CPU_R4400SC:
@@ -377,10 +377,10 @@ static inline void local_r4k_flush_cache_mm(void * args)
377 * R4000SC and R4400SC indexed S-cache ops also invalidate primary 377 * R4000SC and R4400SC indexed S-cache ops also invalidate primary
378 * caches, so we can bail out early. 378 * caches, so we can bail out early.
379 */ 379 */
380 if (current_cpu_data.cputype == CPU_R4000SC || 380 if (current_cpu_type() == CPU_R4000SC ||
381 current_cpu_data.cputype == CPU_R4000MC || 381 current_cpu_type() == CPU_R4000MC ||
382 current_cpu_data.cputype == CPU_R4400SC || 382 current_cpu_type() == CPU_R4400SC ||
383 current_cpu_data.cputype == CPU_R4400MC) { 383 current_cpu_type() == CPU_R4400MC) {
384 r4k_blast_scache(); 384 r4k_blast_scache();
385 return; 385 return;
386 } 386 }
@@ -1197,7 +1197,7 @@ static void __init coherency_setup(void)
1197 * this bit and; some wire it to zero, others like Toshiba had the 1197 * this bit and; some wire it to zero, others like Toshiba had the
1198 * silly idea of putting something else there ... 1198 * silly idea of putting something else there ...
1199 */ 1199 */
1200 switch (current_cpu_data.cputype) { 1200 switch (current_cpu_type()) {
1201 case CPU_R4000PC: 1201 case CPU_R4000PC:
1202 case CPU_R4000SC: 1202 case CPU_R4000SC:
1203 case CPU_R4000MC: 1203 case CPU_R4000MC:
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
index 560a6de96556..9ea121e8cdce 100644
--- a/arch/mips/mm/c-tx39.c
+++ b/arch/mips/mm/c-tx39.c
@@ -69,7 +69,7 @@ static void tx39h_dma_cache_wback_inv(unsigned long addr, unsigned long size)
69/* TX39H2,TX39H3 */ 69/* TX39H2,TX39H3 */
70static inline void tx39_blast_dcache_page(unsigned long addr) 70static inline void tx39_blast_dcache_page(unsigned long addr)
71{ 71{
72 if (current_cpu_data.cputype != CPU_TX3912) 72 if (current_cpu_type() != CPU_TX3912)
73 blast_dcache16_page(addr); 73 blast_dcache16_page(addr);
74} 74}
75 75
@@ -307,7 +307,7 @@ static __init void tx39_probe_cache(void)
307 TX39_CONF_DCS_SHIFT)); 307 TX39_CONF_DCS_SHIFT));
308 308
309 current_cpu_data.icache.linesz = 16; 309 current_cpu_data.icache.linesz = 16;
310 switch (current_cpu_data.cputype) { 310 switch (current_cpu_type()) {
311 case CPU_TX3912: 311 case CPU_TX3912:
312 current_cpu_data.icache.ways = 1; 312 current_cpu_data.icache.ways = 1;
313 current_cpu_data.dcache.ways = 1; 313 current_cpu_data.dcache.ways = 1;
@@ -341,7 +341,7 @@ void __init tx39_cache_init(void)
341 341
342 tx39_probe_cache(); 342 tx39_probe_cache();
343 343
344 switch (current_cpu_data.cputype) { 344 switch (current_cpu_type()) {
345 case CPU_TX3912: 345 case CPU_TX3912:
346 /* TX39/H core (writethru direct-map cache) */ 346 /* TX39/H core (writethru direct-map cache) */
347 flush_cache_all = tx39h_flush_icache_all; 347 flush_cache_all = tx39h_flush_icache_all;
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index f60b3dc0fc62..98b5e5bac02e 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -35,8 +35,8 @@ static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr)
35static inline int cpu_is_noncoherent_r10000(struct device *dev) 35static inline int cpu_is_noncoherent_r10000(struct device *dev)
36{ 36{
37 return !plat_device_is_coherent(dev) && 37 return !plat_device_is_coherent(dev) &&
38 (current_cpu_data.cputype == CPU_R10000 || 38 (current_cpu_type() == CPU_R10000 ||
39 current_cpu_data.cputype == CPU_R12000); 39 current_cpu_type() == CPU_R12000);
40} 40}
41 41
42void *dma_alloc_noncoherent(struct device *dev, size_t size, 42void *dma_alloc_noncoherent(struct device *dev, size_t size,
diff --git a/arch/mips/mm/pg-r4k.c b/arch/mips/mm/pg-r4k.c
index c0294541d295..4f770ac885ce 100644
--- a/arch/mips/mm/pg-r4k.c
+++ b/arch/mips/mm/pg-r4k.c
@@ -354,7 +354,7 @@ void __init build_clear_page(void)
354 store_offset = 0; 354 store_offset = 0;
355 355
356 if (cpu_has_prefetch) { 356 if (cpu_has_prefetch) {
357 switch (current_cpu_data.cputype) { 357 switch (current_cpu_type()) {
358 case CPU_TX49XX: 358 case CPU_TX49XX:
359 /* TX49 supports only Pref_Load */ 359 /* TX49 supports only Pref_Load */
360 pref_offset_clear = 0; 360 pref_offset_clear = 0;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 73250741a0f9..c3da4fefbcb4 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -827,7 +827,7 @@ static __initdata u32 final_handler[64];
827 */ 827 */
828static __init void __maybe_unused build_tlb_probe_entry(u32 **p) 828static __init void __maybe_unused build_tlb_probe_entry(u32 **p)
829{ 829{
830 switch (current_cpu_data.cputype) { 830 switch (current_cpu_type()) {
831 /* Found by experiment: R4600 v2.0 needs this, too. */ 831 /* Found by experiment: R4600 v2.0 needs this, too. */
832 case CPU_R4600: 832 case CPU_R4600:
833 case CPU_R5000: 833 case CPU_R5000:
@@ -860,7 +860,7 @@ static __init void build_tlb_write_entry(u32 **p, struct label **l,
860 case tlb_indexed: tlbw = i_tlbwi; break; 860 case tlb_indexed: tlbw = i_tlbwi; break;
861 } 861 }
862 862
863 switch (current_cpu_data.cputype) { 863 switch (current_cpu_type()) {
864 case CPU_R4000PC: 864 case CPU_R4000PC:
865 case CPU_R4000SC: 865 case CPU_R4000SC:
866 case CPU_R4000MC: 866 case CPU_R4000MC:
@@ -1158,7 +1158,7 @@ static __init void build_adjust_context(u32 **p, unsigned int ctx)
1158 unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12; 1158 unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
1159 unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); 1159 unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
1160 1160
1161 switch (current_cpu_data.cputype) { 1161 switch (current_cpu_type()) {
1162 case CPU_VR41XX: 1162 case CPU_VR41XX:
1163 case CPU_VR4111: 1163 case CPU_VR4111:
1164 case CPU_VR4121: 1164 case CPU_VR4121:
@@ -1188,7 +1188,7 @@ static __init void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
1188 * in a different cacheline or a load instruction, probably any 1188 * in a different cacheline or a load instruction, probably any
1189 * memory reference, is between them. 1189 * memory reference, is between them.
1190 */ 1190 */
1191 switch (current_cpu_data.cputype) { 1191 switch (current_cpu_type()) {
1192 case CPU_NEVADA: 1192 case CPU_NEVADA:
1193 i_LW(p, ptr, 0, ptr); 1193 i_LW(p, ptr, 0, ptr);
1194 GET_CONTEXT(p, tmp); /* get context reg */ 1194 GET_CONTEXT(p, tmp); /* get context reg */
@@ -1872,7 +1872,7 @@ void __init build_tlb_refill_handler(void)
1872 */ 1872 */
1873 static int run_once = 0; 1873 static int run_once = 0;
1874 1874
1875 switch (current_cpu_data.cputype) { 1875 switch (current_cpu_type()) {
1876 case CPU_R2000: 1876 case CPU_R2000:
1877 case CPU_R3000: 1877 case CPU_R3000:
1878 case CPU_R3000A: 1878 case CPU_R3000A:
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index 4e0a90b3916b..aa52aa146cea 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -74,7 +74,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
74 struct op_mips_model *lmodel = NULL; 74 struct op_mips_model *lmodel = NULL;
75 int res; 75 int res;
76 76
77 switch (current_cpu_data.cputype) { 77 switch (current_cpu_type()) {
78 case CPU_5KC: 78 case CPU_5KC:
79 case CPU_20KC: 79 case CPU_20KC:
80 case CPU_24K: 80 case CPU_24K:
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 1ea5c9c1010b..6383000422ab 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -222,7 +222,7 @@ static inline int n_counters(void)
222{ 222{
223 int counters; 223 int counters;
224 224
225 switch (current_cpu_data.cputype) { 225 switch (current_cpu_type()) {
226 case CPU_R10000: 226 case CPU_R10000:
227 counters = 2; 227 counters = 2;
228 break; 228 break;
@@ -274,7 +274,7 @@ static int __init mipsxx_init(void)
274#endif 274#endif
275 275
276 op_model_mipsxx_ops.num_counters = counters; 276 op_model_mipsxx_ops.num_counters = counters;
277 switch (current_cpu_data.cputype) { 277 switch (current_cpu_type()) {
278 case CPU_20KC: 278 case CPU_20KC:
279 op_model_mipsxx_ops.cpu_type = "mips/20K"; 279 op_model_mipsxx_ops.cpu_type = "mips/20K";
280 break; 280 break;
diff --git a/arch/mips/pci/pci-vr41xx.c b/arch/mips/pci/pci-vr41xx.c
index 9885fa403603..240df9e33813 100644
--- a/arch/mips/pci/pci-vr41xx.c
+++ b/arch/mips/pci/pci-vr41xx.c
@@ -228,7 +228,7 @@ static int __init vr41xx_pciu_init(void)
228 else 228 else
229 pciu_write(PCIEXACCREG, 0); 229 pciu_write(PCIEXACCREG, 0);
230 230
231 if (current_cpu_data.cputype == CPU_VR4122) 231 if (current_cpu_type() == CPU_VR4122)
232 pciu_write(PCITRDYVREG, TRDYV(setup->wait_time_limit_from_irdy_to_trdy)); 232 pciu_write(PCITRDYVREG, TRDYV(setup->wait_time_limit_from_irdy_to_trdy));
233 233
234 pciu_write(LATTIMEREG, MLTIM(setup->master_latency_timer)); 234 pciu_write(LATTIMEREG, MLTIM(setup->master_latency_timer));
diff --git a/arch/mips/vr41xx/common/bcu.c b/arch/mips/vr41xx/common/bcu.c
index ff272b2e8395..d77c330a0d59 100644
--- a/arch/mips/vr41xx/common/bcu.c
+++ b/arch/mips/vr41xx/common/bcu.c
@@ -70,7 +70,7 @@ EXPORT_SYMBOL_GPL(vr41xx_get_tclock_frequency);
70 70
71static inline uint16_t read_clkspeed(void) 71static inline uint16_t read_clkspeed(void)
72{ 72{
73 switch (current_cpu_data.cputype) { 73 switch (current_cpu_type()) {
74 case CPU_VR4111: 74 case CPU_VR4111:
75 case CPU_VR4121: return readw(CLKSPEEDREG_TYPE1); 75 case CPU_VR4121: return readw(CLKSPEEDREG_TYPE1);
76 case CPU_VR4122: 76 case CPU_VR4122:
@@ -88,7 +88,7 @@ static inline unsigned long calculate_pclock(uint16_t clkspeed)
88{ 88{
89 unsigned long pclock = 0; 89 unsigned long pclock = 0;
90 90
91 switch (current_cpu_data.cputype) { 91 switch (current_cpu_type()) {
92 case CPU_VR4111: 92 case CPU_VR4111:
93 case CPU_VR4121: 93 case CPU_VR4121:
94 pclock = 18432000 * 64; 94 pclock = 18432000 * 64;
@@ -138,7 +138,7 @@ static inline unsigned long calculate_vtclock(uint16_t clkspeed, unsigned long p
138{ 138{
139 unsigned long vtclock = 0; 139 unsigned long vtclock = 0;
140 140
141 switch (current_cpu_data.cputype) { 141 switch (current_cpu_type()) {
142 case CPU_VR4111: 142 case CPU_VR4111:
143 /* The NEC VR4111 doesn't have the VTClock. */ 143 /* The NEC VR4111 doesn't have the VTClock. */
144 break; 144 break;
@@ -180,7 +180,7 @@ static inline unsigned long calculate_tclock(uint16_t clkspeed, unsigned long pc
180{ 180{
181 unsigned long tclock = 0; 181 unsigned long tclock = 0;
182 182
183 switch (current_cpu_data.cputype) { 183 switch (current_cpu_type()) {
184 case CPU_VR4111: 184 case CPU_VR4111:
185 if (!(clkspeed & DIV2B)) 185 if (!(clkspeed & DIV2B))
186 tclock = pclock / 2; 186 tclock = pclock / 2;
diff --git a/arch/mips/vr41xx/common/cmu.c b/arch/mips/vr41xx/common/cmu.c
index 657c5133c933..ad0e8e3409d9 100644
--- a/arch/mips/vr41xx/common/cmu.c
+++ b/arch/mips/vr41xx/common/cmu.c
@@ -95,8 +95,8 @@ void vr41xx_supply_clock(vr41xx_clock_t clock)
95 cmuclkmsk |= MSKFIR | MSKFFIR; 95 cmuclkmsk |= MSKFIR | MSKFFIR;
96 break; 96 break;
97 case DSIU_CLOCK: 97 case DSIU_CLOCK:
98 if (current_cpu_data.cputype == CPU_VR4111 || 98 if (current_cpu_type() == CPU_VR4111 ||
99 current_cpu_data.cputype == CPU_VR4121) 99 current_cpu_type() == CPU_VR4121)
100 cmuclkmsk |= MSKDSIU; 100 cmuclkmsk |= MSKDSIU;
101 else 101 else
102 cmuclkmsk |= MSKSIU | MSKDSIU; 102 cmuclkmsk |= MSKSIU | MSKDSIU;
@@ -146,8 +146,8 @@ void vr41xx_mask_clock(vr41xx_clock_t clock)
146 cmuclkmsk &= ~MSKPIU; 146 cmuclkmsk &= ~MSKPIU;
147 break; 147 break;
148 case SIU_CLOCK: 148 case SIU_CLOCK:
149 if (current_cpu_data.cputype == CPU_VR4111 || 149 if (current_cpu_type() == CPU_VR4111 ||
150 current_cpu_data.cputype == CPU_VR4121) { 150 current_cpu_type() == CPU_VR4121) {
151 cmuclkmsk &= ~(MSKSIU | MSKSSIU); 151 cmuclkmsk &= ~(MSKSIU | MSKSSIU);
152 } else { 152 } else {
153 if (cmuclkmsk & MSKDSIU) 153 if (cmuclkmsk & MSKDSIU)
@@ -166,8 +166,8 @@ void vr41xx_mask_clock(vr41xx_clock_t clock)
166 cmuclkmsk &= ~(MSKFIR | MSKFFIR); 166 cmuclkmsk &= ~(MSKFIR | MSKFFIR);
167 break; 167 break;
168 case DSIU_CLOCK: 168 case DSIU_CLOCK:
169 if (current_cpu_data.cputype == CPU_VR4111 || 169 if (current_cpu_type() == CPU_VR4111 ||
170 current_cpu_data.cputype == CPU_VR4121) { 170 current_cpu_type() == CPU_VR4121) {
171 cmuclkmsk &= ~MSKDSIU; 171 cmuclkmsk &= ~MSKDSIU;
172 } else { 172 } else {
173 if (cmuclkmsk & MSKSSIU) 173 if (cmuclkmsk & MSKSSIU)
@@ -216,7 +216,7 @@ static int __init vr41xx_cmu_init(void)
216{ 216{
217 unsigned long start, size; 217 unsigned long start, size;
218 218
219 switch (current_cpu_data.cputype) { 219 switch (current_cpu_type()) {
220 case CPU_VR4111: 220 case CPU_VR4111:
221 case CPU_VR4121: 221 case CPU_VR4121:
222 start = CMU_TYPE1_BASE; 222 start = CMU_TYPE1_BASE;
@@ -246,7 +246,7 @@ static int __init vr41xx_cmu_init(void)
246 } 246 }
247 247
248 cmuclkmsk = cmu_read(CMUCLKMSK); 248 cmuclkmsk = cmu_read(CMUCLKMSK);
249 if (current_cpu_data.cputype == CPU_VR4133) 249 if (current_cpu_type() == CPU_VR4133)
250 cmuclkmsk2 = cmu_read(CMUCLKMSK2); 250 cmuclkmsk2 = cmu_read(CMUCLKMSK2);
251 251
252 spin_lock_init(&cmu_lock); 252 spin_lock_init(&cmu_lock);
diff --git a/arch/mips/vr41xx/common/giu.c b/arch/mips/vr41xx/common/giu.c
index d21f6f2d22a3..2b272f1496fe 100644
--- a/arch/mips/vr41xx/common/giu.c
+++ b/arch/mips/vr41xx/common/giu.c
@@ -81,7 +81,7 @@ static int __init vr41xx_giu_add(void)
81 if (!pdev) 81 if (!pdev)
82 return -ENOMEM; 82 return -ENOMEM;
83 83
84 switch (current_cpu_data.cputype) { 84 switch (current_cpu_type()) {
85 case CPU_VR4111: 85 case CPU_VR4111:
86 case CPU_VR4121: 86 case CPU_VR4121:
87 pdev->id = GPIO_50PINS_PULLUPDOWN; 87 pdev->id = GPIO_50PINS_PULLUPDOWN;
diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
index adabc6bad440..1899601e5862 100644
--- a/arch/mips/vr41xx/common/icu.c
+++ b/arch/mips/vr41xx/common/icu.c
@@ -157,8 +157,8 @@ void vr41xx_enable_piuint(uint16_t mask)
157 struct irq_desc *desc = irq_desc + PIU_IRQ; 157 struct irq_desc *desc = irq_desc + PIU_IRQ;
158 unsigned long flags; 158 unsigned long flags;
159 159
160 if (current_cpu_data.cputype == CPU_VR4111 || 160 if (current_cpu_type() == CPU_VR4111 ||
161 current_cpu_data.cputype == CPU_VR4121) { 161 current_cpu_type() == CPU_VR4121) {
162 spin_lock_irqsave(&desc->lock, flags); 162 spin_lock_irqsave(&desc->lock, flags);
163 icu1_set(MPIUINTREG, mask); 163 icu1_set(MPIUINTREG, mask);
164 spin_unlock_irqrestore(&desc->lock, flags); 164 spin_unlock_irqrestore(&desc->lock, flags);
@@ -172,8 +172,8 @@ void vr41xx_disable_piuint(uint16_t mask)
172 struct irq_desc *desc = irq_desc + PIU_IRQ; 172 struct irq_desc *desc = irq_desc + PIU_IRQ;
173 unsigned long flags; 173 unsigned long flags;
174 174
175 if (current_cpu_data.cputype == CPU_VR4111 || 175 if (current_cpu_type() == CPU_VR4111 ||
176 current_cpu_data.cputype == CPU_VR4121) { 176 current_cpu_type() == CPU_VR4121) {
177 spin_lock_irqsave(&desc->lock, flags); 177 spin_lock_irqsave(&desc->lock, flags);
178 icu1_clear(MPIUINTREG, mask); 178 icu1_clear(MPIUINTREG, mask);
179 spin_unlock_irqrestore(&desc->lock, flags); 179 spin_unlock_irqrestore(&desc->lock, flags);
@@ -187,8 +187,8 @@ void vr41xx_enable_aiuint(uint16_t mask)
187 struct irq_desc *desc = irq_desc + AIU_IRQ; 187 struct irq_desc *desc = irq_desc + AIU_IRQ;
188 unsigned long flags; 188 unsigned long flags;
189 189
190 if (current_cpu_data.cputype == CPU_VR4111 || 190 if (current_cpu_type() == CPU_VR4111 ||
191 current_cpu_data.cputype == CPU_VR4121) { 191 current_cpu_type() == CPU_VR4121) {
192 spin_lock_irqsave(&desc->lock, flags); 192 spin_lock_irqsave(&desc->lock, flags);
193 icu1_set(MAIUINTREG, mask); 193 icu1_set(MAIUINTREG, mask);
194 spin_unlock_irqrestore(&desc->lock, flags); 194 spin_unlock_irqrestore(&desc->lock, flags);
@@ -202,8 +202,8 @@ void vr41xx_disable_aiuint(uint16_t mask)
202 struct irq_desc *desc = irq_desc + AIU_IRQ; 202 struct irq_desc *desc = irq_desc + AIU_IRQ;
203 unsigned long flags; 203 unsigned long flags;
204 204
205 if (current_cpu_data.cputype == CPU_VR4111 || 205 if (current_cpu_type() == CPU_VR4111 ||
206 current_cpu_data.cputype == CPU_VR4121) { 206 current_cpu_type() == CPU_VR4121) {
207 spin_lock_irqsave(&desc->lock, flags); 207 spin_lock_irqsave(&desc->lock, flags);
208 icu1_clear(MAIUINTREG, mask); 208 icu1_clear(MAIUINTREG, mask);
209 spin_unlock_irqrestore(&desc->lock, flags); 209 spin_unlock_irqrestore(&desc->lock, flags);
@@ -217,8 +217,8 @@ void vr41xx_enable_kiuint(uint16_t mask)
217 struct irq_desc *desc = irq_desc + KIU_IRQ; 217 struct irq_desc *desc = irq_desc + KIU_IRQ;
218 unsigned long flags; 218 unsigned long flags;
219 219
220 if (current_cpu_data.cputype == CPU_VR4111 || 220 if (current_cpu_type() == CPU_VR4111 ||
221 current_cpu_data.cputype == CPU_VR4121) { 221 current_cpu_type() == CPU_VR4121) {
222 spin_lock_irqsave(&desc->lock, flags); 222 spin_lock_irqsave(&desc->lock, flags);
223 icu1_set(MKIUINTREG, mask); 223 icu1_set(MKIUINTREG, mask);
224 spin_unlock_irqrestore(&desc->lock, flags); 224 spin_unlock_irqrestore(&desc->lock, flags);
@@ -232,8 +232,8 @@ void vr41xx_disable_kiuint(uint16_t mask)
232 struct irq_desc *desc = irq_desc + KIU_IRQ; 232 struct irq_desc *desc = irq_desc + KIU_IRQ;
233 unsigned long flags; 233 unsigned long flags;
234 234
235 if (current_cpu_data.cputype == CPU_VR4111 || 235 if (current_cpu_type() == CPU_VR4111 ||
236 current_cpu_data.cputype == CPU_VR4121) { 236 current_cpu_type() == CPU_VR4121) {
237 spin_lock_irqsave(&desc->lock, flags); 237 spin_lock_irqsave(&desc->lock, flags);
238 icu1_clear(MKIUINTREG, mask); 238 icu1_clear(MKIUINTREG, mask);
239 spin_unlock_irqrestore(&desc->lock, flags); 239 spin_unlock_irqrestore(&desc->lock, flags);
@@ -319,9 +319,9 @@ void vr41xx_enable_pciint(void)
319 struct irq_desc *desc = irq_desc + PCI_IRQ; 319 struct irq_desc *desc = irq_desc + PCI_IRQ;
320 unsigned long flags; 320 unsigned long flags;
321 321
322 if (current_cpu_data.cputype == CPU_VR4122 || 322 if (current_cpu_type() == CPU_VR4122 ||
323 current_cpu_data.cputype == CPU_VR4131 || 323 current_cpu_type() == CPU_VR4131 ||
324 current_cpu_data.cputype == CPU_VR4133) { 324 current_cpu_type() == CPU_VR4133) {
325 spin_lock_irqsave(&desc->lock, flags); 325 spin_lock_irqsave(&desc->lock, flags);
326 icu2_write(MPCIINTREG, PCIINT0); 326 icu2_write(MPCIINTREG, PCIINT0);
327 spin_unlock_irqrestore(&desc->lock, flags); 327 spin_unlock_irqrestore(&desc->lock, flags);
@@ -335,9 +335,9 @@ void vr41xx_disable_pciint(void)
335 struct irq_desc *desc = irq_desc + PCI_IRQ; 335 struct irq_desc *desc = irq_desc + PCI_IRQ;
336 unsigned long flags; 336 unsigned long flags;
337 337
338 if (current_cpu_data.cputype == CPU_VR4122 || 338 if (current_cpu_type() == CPU_VR4122 ||
339 current_cpu_data.cputype == CPU_VR4131 || 339 current_cpu_type() == CPU_VR4131 ||
340 current_cpu_data.cputype == CPU_VR4133) { 340 current_cpu_type() == CPU_VR4133) {
341 spin_lock_irqsave(&desc->lock, flags); 341 spin_lock_irqsave(&desc->lock, flags);
342 icu2_write(MPCIINTREG, 0); 342 icu2_write(MPCIINTREG, 0);
343 spin_unlock_irqrestore(&desc->lock, flags); 343 spin_unlock_irqrestore(&desc->lock, flags);
@@ -351,9 +351,9 @@ void vr41xx_enable_scuint(void)
351 struct irq_desc *desc = irq_desc + SCU_IRQ; 351 struct irq_desc *desc = irq_desc + SCU_IRQ;
352 unsigned long flags; 352 unsigned long flags;
353 353
354 if (current_cpu_data.cputype == CPU_VR4122 || 354 if (current_cpu_type() == CPU_VR4122 ||
355 current_cpu_data.cputype == CPU_VR4131 || 355 current_cpu_type() == CPU_VR4131 ||
356 current_cpu_data.cputype == CPU_VR4133) { 356 current_cpu_type() == CPU_VR4133) {
357 spin_lock_irqsave(&desc->lock, flags); 357 spin_lock_irqsave(&desc->lock, flags);
358 icu2_write(MSCUINTREG, SCUINT0); 358 icu2_write(MSCUINTREG, SCUINT0);
359 spin_unlock_irqrestore(&desc->lock, flags); 359 spin_unlock_irqrestore(&desc->lock, flags);
@@ -367,9 +367,9 @@ void vr41xx_disable_scuint(void)
367 struct irq_desc *desc = irq_desc + SCU_IRQ; 367 struct irq_desc *desc = irq_desc + SCU_IRQ;
368 unsigned long flags; 368 unsigned long flags;
369 369
370 if (current_cpu_data.cputype == CPU_VR4122 || 370 if (current_cpu_type() == CPU_VR4122 ||
371 current_cpu_data.cputype == CPU_VR4131 || 371 current_cpu_type() == CPU_VR4131 ||
372 current_cpu_data.cputype == CPU_VR4133) { 372 current_cpu_type() == CPU_VR4133) {
373 spin_lock_irqsave(&desc->lock, flags); 373 spin_lock_irqsave(&desc->lock, flags);
374 icu2_write(MSCUINTREG, 0); 374 icu2_write(MSCUINTREG, 0);
375 spin_unlock_irqrestore(&desc->lock, flags); 375 spin_unlock_irqrestore(&desc->lock, flags);
@@ -383,9 +383,9 @@ void vr41xx_enable_csiint(uint16_t mask)
383 struct irq_desc *desc = irq_desc + CSI_IRQ; 383 struct irq_desc *desc = irq_desc + CSI_IRQ;
384 unsigned long flags; 384 unsigned long flags;
385 385
386 if (current_cpu_data.cputype == CPU_VR4122 || 386 if (current_cpu_type() == CPU_VR4122 ||
387 current_cpu_data.cputype == CPU_VR4131 || 387 current_cpu_type() == CPU_VR4131 ||
388 current_cpu_data.cputype == CPU_VR4133) { 388 current_cpu_type() == CPU_VR4133) {
389 spin_lock_irqsave(&desc->lock, flags); 389 spin_lock_irqsave(&desc->lock, flags);
390 icu2_set(MCSIINTREG, mask); 390 icu2_set(MCSIINTREG, mask);
391 spin_unlock_irqrestore(&desc->lock, flags); 391 spin_unlock_irqrestore(&desc->lock, flags);
@@ -399,9 +399,9 @@ void vr41xx_disable_csiint(uint16_t mask)
399 struct irq_desc *desc = irq_desc + CSI_IRQ; 399 struct irq_desc *desc = irq_desc + CSI_IRQ;
400 unsigned long flags; 400 unsigned long flags;
401 401
402 if (current_cpu_data.cputype == CPU_VR4122 || 402 if (current_cpu_type() == CPU_VR4122 ||
403 current_cpu_data.cputype == CPU_VR4131 || 403 current_cpu_type() == CPU_VR4131 ||
404 current_cpu_data.cputype == CPU_VR4133) { 404 current_cpu_type() == CPU_VR4133) {
405 spin_lock_irqsave(&desc->lock, flags); 405 spin_lock_irqsave(&desc->lock, flags);
406 icu2_clear(MCSIINTREG, mask); 406 icu2_clear(MCSIINTREG, mask);
407 spin_unlock_irqrestore(&desc->lock, flags); 407 spin_unlock_irqrestore(&desc->lock, flags);
@@ -415,9 +415,9 @@ void vr41xx_enable_bcuint(void)
415 struct irq_desc *desc = irq_desc + BCU_IRQ; 415 struct irq_desc *desc = irq_desc + BCU_IRQ;
416 unsigned long flags; 416 unsigned long flags;
417 417
418 if (current_cpu_data.cputype == CPU_VR4122 || 418 if (current_cpu_type() == CPU_VR4122 ||
419 current_cpu_data.cputype == CPU_VR4131 || 419 current_cpu_type() == CPU_VR4131 ||
420 current_cpu_data.cputype == CPU_VR4133) { 420 current_cpu_type() == CPU_VR4133) {
421 spin_lock_irqsave(&desc->lock, flags); 421 spin_lock_irqsave(&desc->lock, flags);
422 icu2_write(MBCUINTREG, BCUINTR); 422 icu2_write(MBCUINTREG, BCUINTR);
423 spin_unlock_irqrestore(&desc->lock, flags); 423 spin_unlock_irqrestore(&desc->lock, flags);
@@ -431,9 +431,9 @@ void vr41xx_disable_bcuint(void)
431 struct irq_desc *desc = irq_desc + BCU_IRQ; 431 struct irq_desc *desc = irq_desc + BCU_IRQ;
432 unsigned long flags; 432 unsigned long flags;
433 433
434 if (current_cpu_data.cputype == CPU_VR4122 || 434 if (current_cpu_type() == CPU_VR4122 ||
435 current_cpu_data.cputype == CPU_VR4131 || 435 current_cpu_type() == CPU_VR4131 ||
436 current_cpu_data.cputype == CPU_VR4133) { 436 current_cpu_type() == CPU_VR4133) {
437 spin_lock_irqsave(&desc->lock, flags); 437 spin_lock_irqsave(&desc->lock, flags);
438 icu2_write(MBCUINTREG, 0); 438 icu2_write(MBCUINTREG, 0);
439 spin_unlock_irqrestore(&desc->lock, flags); 439 spin_unlock_irqrestore(&desc->lock, flags);
@@ -608,7 +608,7 @@ int vr41xx_set_intassign(unsigned int irq, unsigned char intassign)
608{ 608{
609 int retval = -EINVAL; 609 int retval = -EINVAL;
610 610
611 if (current_cpu_data.cputype != CPU_VR4133) 611 if (current_cpu_type() != CPU_VR4133)
612 return -EINVAL; 612 return -EINVAL;
613 613
614 if (intassign > INTASSIGN_MAX) 614 if (intassign > INTASSIGN_MAX)
@@ -665,7 +665,7 @@ static int __init vr41xx_icu_init(void)
665 unsigned long icu1_start, icu2_start; 665 unsigned long icu1_start, icu2_start;
666 int i; 666 int i;
667 667
668 switch (current_cpu_data.cputype) { 668 switch (current_cpu_type()) {
669 case CPU_VR4111: 669 case CPU_VR4111:
670 case CPU_VR4121: 670 case CPU_VR4121:
671 icu1_start = ICU1_TYPE1_BASE; 671 icu1_start = ICU1_TYPE1_BASE;
diff --git a/arch/mips/vr41xx/common/pmu.c b/arch/mips/vr41xx/common/pmu.c
index ad5b6db53396..028aaf75eb21 100644
--- a/arch/mips/vr41xx/common/pmu.c
+++ b/arch/mips/vr41xx/common/pmu.c
@@ -62,7 +62,7 @@ static inline void software_reset(void)
62{ 62{
63 uint16_t pmucnt2; 63 uint16_t pmucnt2;
64 64
65 switch (current_cpu_data.cputype) { 65 switch (current_cpu_type()) {
66 case CPU_VR4122: 66 case CPU_VR4122:
67 case CPU_VR4131: 67 case CPU_VR4131:
68 case CPU_VR4133: 68 case CPU_VR4133:
@@ -98,7 +98,7 @@ static int __init vr41xx_pmu_init(void)
98{ 98{
99 unsigned long start, size; 99 unsigned long start, size;
100 100
101 switch (current_cpu_data.cputype) { 101 switch (current_cpu_type()) {
102 case CPU_VR4111: 102 case CPU_VR4111:
103 case CPU_VR4121: 103 case CPU_VR4121:
104 start = PMU_TYPE1_BASE; 104 start = PMU_TYPE1_BASE;
diff --git a/arch/mips/vr41xx/common/rtc.c b/arch/mips/vr41xx/common/rtc.c
index cce605b3d688..9f26c14edcac 100644
--- a/arch/mips/vr41xx/common/rtc.c
+++ b/arch/mips/vr41xx/common/rtc.c
@@ -82,7 +82,7 @@ static int __init vr41xx_rtc_add(void)
82 if (!pdev) 82 if (!pdev)
83 return -ENOMEM; 83 return -ENOMEM;
84 84
85 switch (current_cpu_data.cputype) { 85 switch (current_cpu_type()) {
86 case CPU_VR4111: 86 case CPU_VR4111:
87 case CPU_VR4121: 87 case CPU_VR4121:
88 res = rtc_type1_resource; 88 res = rtc_type1_resource;
diff --git a/arch/mips/vr41xx/common/siu.c b/arch/mips/vr41xx/common/siu.c
index a1e774142163..b735f45b25f0 100644
--- a/arch/mips/vr41xx/common/siu.c
+++ b/arch/mips/vr41xx/common/siu.c
@@ -83,7 +83,7 @@ static int __init vr41xx_siu_add(void)
83 if (!pdev) 83 if (!pdev)
84 return -ENOMEM; 84 return -ENOMEM;
85 85
86 switch (current_cpu_data.cputype) { 86 switch (current_cpu_type()) {
87 case CPU_VR4111: 87 case CPU_VR4111:
88 case CPU_VR4121: 88 case CPU_VR4121:
89 pdev->dev.platform_data = siu_type1_ports; 89 pdev->dev.platform_data = siu_type1_ports;
hl opt">[1] == 1 && phy->r_val[2] == 0) mib->fddiPORTNeighborType = TS ; else if (phy->r_val[1] == 1 && phy->r_val[2] == 1) mib->fddiPORTNeighborType = TM ; break ; case 4: if (mib->fddiPORTMy_Type == TM && mib->fddiPORTNeighborType == TM) { DB_PCMN(1,"PCM %c : E100 withhold M-M\n", phy->phy_name,0) ; mib->fddiPORTPC_Withhold = PC_WH_M_M ; RS_SET(smc,RS_EVENT) ; } else if (phy->t_val[3] || phy->r_val[3]) { mib->fddiPORTPC_Withhold = PC_WH_NONE ; if (mib->fddiPORTMy_Type == TM || mib->fddiPORTNeighborType == TM) phy->pc_mode = PM_TREE ; else phy->pc_mode = PM_PEER ; /* reevaluate the selection criteria (wc_flag) */ all_selection_criteria (smc); if (phy->wc_flag) { mib->fddiPORTPC_Withhold = PC_WH_PATH ; } } else { mib->fddiPORTPC_Withhold = PC_WH_OTHER ; RS_SET(smc,RS_EVENT) ; DB_PCMN(1,"PCM %c : E101 withhold other\n", phy->phy_name,0) ; } phy->twisted = ((mib->fddiPORTMy_Type != TS) && (mib->fddiPORTMy_Type != TM) && (mib->fddiPORTNeighborType == mib->fddiPORTMy_Type)) ; if (phy->twisted) { DB_PCMN(1,"PCM %c : E102 !!! TWISTED !!!\n", phy->phy_name,0) ; } break ; case 5 : break ; case 6: if (phy->t_val[4] || phy->r_val[4]) { if ((phy->t_val[4] && phy->t_val[5]) || (phy->r_val[4] && phy->r_val[5]) ) phy->lc_test = LC_EXTENDED ; else phy->lc_test = LC_LONG ; } else if (phy->t_val[5] || phy->r_val[5]) phy->lc_test = LC_MEDIUM ; else phy->lc_test = LC_SHORT ; switch (phy->lc_test) { case LC_SHORT : /* 50ms */ outpw(PLC((int)phy->np,PL_LC_LENGTH), TP_LC_LENGTH ) ; phy->t_next[7] = smc->s.pcm_lc_short ; break ; case LC_MEDIUM : /* 500ms */ outpw(PLC((int)phy->np,PL_LC_LENGTH), TP_LC_LONGLN ) ; phy->t_next[7] = smc->s.pcm_lc_medium ; break ; case LC_LONG : SETMASK(PLC((int)phy->np,PL_CNTRL_B),PL_LONG,PL_LONG) ; phy->t_next[7] = smc->s.pcm_lc_long ; break ; case LC_EXTENDED : SETMASK(PLC((int)phy->np,PL_CNTRL_B),PL_LONG,PL_LONG) ; phy->t_next[7] = smc->s.pcm_lc_extended ; break ; } if (phy->t_next[7] > smc->s.pcm_lc_medium) { start_pcm_timer0(smc,phy->t_next[7],PC_TIMEOUT_LCT,phy); } DB_PCMN(1,"LCT timer = %ld us\n", phy->t_next[7], 0) ; phy->t_next[9] = smc->s.pcm_t_next_9 ; break ; case 7: if (phy->t_val[6]) { phy->cf_loop = TRUE ; } phy->td_flag = TRUE ; break ; case 8: if (phy->t_val[7] || phy->r_val[7]) { DB_PCMN(1,"PCM %c : E103 LCT fail %s\n", phy->phy_name,phy->t_val[7]? "local":"remote") ; queue_event(smc,(int)(EVENT_PCM+phy->np),PC_START) ; } break ; case 9: if (phy->t_val[8] || phy->r_val[8]) { if (phy->t_val[8]) phy->cf_loop = TRUE ; phy->td_flag = TRUE ; } break ; case 10: if (phy->r_val[9]) { /* neighbor intends to have MAC on output */ ; mib->fddiPORTMacIndicated.R_val = TRUE ; } else { /* neighbor does not intend to have MAC on output */ ; mib->fddiPORTMacIndicated.R_val = FALSE ; } break ; } } /* * PCM pseudo code 5.1 .. 6.1 */ static void pc_tcode_actions(struct s_smc *smc, const int bit, struct s_phy *phy) { int np = phy->np ; struct fddi_mib_p *mib ; mib = phy->mib ; switch(bit) { case 0: phy->t_val[0] = 0 ; /* no escape used */ break ; case 1: if (mib->fddiPORTMy_Type == TS || mib->fddiPORTMy_Type == TM) phy->t_val[1] = 1 ; else phy->t_val[1] = 0 ; break ; case 2 : if (mib->fddiPORTMy_Type == TB || mib->fddiPORTMy_Type == TM) phy->t_val[2] = 1 ; else phy->t_val[2] = 0 ; break ; case 3: { int type,ne ; int policy ; type = mib->fddiPORTMy_Type ; ne = mib->fddiPORTNeighborType ; policy = smc->mib.fddiSMTConnectionPolicy ; phy->t_val[3] = 1 ; /* Accept connection */ switch (type) { case TA : if ( ((policy & POLICY_AA) && ne == TA) || ((policy & POLICY_AB) && ne == TB) || ((policy & POLICY_AS) && ne == TS) || ((policy & POLICY_AM) && ne == TM) ) phy->t_val[3] = 0 ; /* Reject */ break ; case TB : if ( ((policy & POLICY_BA) && ne == TA) || ((policy & POLICY_BB) && ne == TB) || ((policy & POLICY_BS) && ne == TS) || ((policy & POLICY_BM) && ne == TM) ) phy->t_val[3] = 0 ; /* Reject */ break ; case TS : if ( ((policy & POLICY_SA) && ne == TA) || ((policy & POLICY_SB) && ne == TB) || ((policy & POLICY_SS) && ne == TS) || ((policy & POLICY_SM) && ne == TM) ) phy->t_val[3] = 0 ; /* Reject */ break ; case TM : if ( ne == TM || ((policy & POLICY_MA) && ne == TA) || ((policy & POLICY_MB) && ne == TB) || ((policy & POLICY_MS) && ne == TS) || ((policy & POLICY_MM) && ne == TM) ) phy->t_val[3] = 0 ; /* Reject */ break ; } #ifndef SLIM_SMT /* * detect undesirable connection attempt event */ if ( (type == TA && ne == TA ) || (type == TA && ne == TS ) || (type == TB && ne == TB ) || (type == TB && ne == TS ) || (type == TS && ne == TA ) || (type == TS && ne == TB ) ) { smt_srf_event(smc,SMT_EVENT_PORT_CONNECTION, (int) (INDEX_PORT+ phy->np) ,0) ; } #endif } break ; case 4: if (mib->fddiPORTPC_Withhold == PC_WH_NONE) { if (phy->pc_lem_fail) { phy->t_val[4] = 1 ; /* long */ phy->t_val[5] = 0 ; } else { phy->t_val[4] = 0 ; if (mib->fddiPORTLCTFail_Ct > 0) phy->t_val[5] = 1 ; /* medium */ else phy->t_val[5] = 0 ; /* short */ /* * Implementers choice: use medium * instead of short when undesired * connection attempt is made. */ if (phy->wc_flag) phy->t_val[5] = 1 ; /* medium */ } mib->fddiPORTConnectState = PCM_CONNECTING ; } else { mib->fddiPORTConnectState = PCM_STANDBY ; phy->t_val[4] = 1 ; /* extended */ phy->t_val[5] = 1 ; } break ; case 5: break ; case 6: /* we do NOT have a MAC for LCT */ phy->t_val[6] = 0 ; break ; case 7: phy->cf_loop = FALSE ; lem_check_lct(smc,phy) ; if (phy->pc_lem_fail) { DB_PCMN(1,"PCM %c : E104 LCT failed\n", phy->phy_name,0) ; phy->t_val[7] = 1 ; } else phy->t_val[7] = 0 ; break ; case 8: phy->t_val[8] = 0 ; /* Don't request MAC loopback */ break ; case 9: phy->cf_loop = 0 ; if ((mib->fddiPORTPC_Withhold != PC_WH_NONE) || ((smc->s.sas == SMT_DAS) && (phy->wc_flag))) { queue_event(smc,EVENT_PCM+np,PC_START) ; break ; } phy->t_val[9] = FALSE ; switch (smc->s.sas) { case SMT_DAS : /* * MAC intended on output */ if (phy->pc_mode == PM_TREE) { if ((np == PB) || ((np == PA) && (smc->y[PB].mib->fddiPORTConnectState != PCM_ACTIVE))) phy->t_val[9] = TRUE ; } else { if (np == PB) phy->t_val[9] = TRUE ; } break ; case SMT_SAS : if (np == PS) phy->t_val[9] = TRUE ; break ; #ifdef CONCENTRATOR case SMT_NAC : /* * MAC intended on output */ if (np == PB) phy->t_val[9] = TRUE ; break ; #endif } mib->fddiPORTMacIndicated.T_val = phy->t_val[9] ; break ; } DB_PCMN(1,"SIG snd %x %x: \n", bit,phy->t_val[bit] ) ; } /* * return status twisted (called by SMT) */ int pcm_status_twisted(struct s_smc *smc) { int twist = 0 ; if (smc->s.sas != SMT_DAS) return(0) ; if (smc->y[PA].twisted && (smc->y[PA].mib->fddiPORTPCMState == PC8_ACTIVE)) twist |= 1 ; if (smc->y[PB].twisted && (smc->y[PB].mib->fddiPORTPCMState == PC8_ACTIVE)) twist |= 2 ; return(twist) ; } /* * return status (called by SMT) * type * state * remote phy type * remote mac yes/no */ void pcm_status_state(struct s_smc *smc, int np, int *type, int *state, int *remote, int *mac) { struct s_phy *phy = &smc->y[np] ; struct fddi_mib_p *mib ; mib = phy->mib ; /* remote PHY type and MAC - set only if active */ *mac = 0 ; *type = mib->fddiPORTMy_Type ; /* our PHY type */ *state = mib->fddiPORTConnectState ; *remote = mib->fddiPORTNeighborType ; switch(mib->fddiPORTPCMState) { case PC8_ACTIVE : *mac = mib->fddiPORTMacIndicated.R_val ; break ; } } /* * return rooted station status (called by SMT) */ int pcm_rooted_station(struct s_smc *smc) { int n ; for (n = 0 ; n < NUMPHYS ; n++) { if (smc->y[n].mib->fddiPORTPCMState == PC8_ACTIVE && smc->y[n].mib->fddiPORTNeighborType == TM) return(0) ; } return(1) ; } /* * Interrupt actions for PLC & PCM events */ void plc_irq(struct s_smc *smc, int np, unsigned int cmd) /* int np; PHY index */ { struct s_phy *phy = &smc->y[np] ; struct s_plc *plc = &phy->plc ; int n ; #ifdef SUPERNET_3 int corr_mask ; #endif /* SUPERNET_3 */ int i ; if (np >= smc->s.numphys) { plc->soft_err++ ; return ; } if (cmd & PL_EBUF_ERR) { /* elastic buff. det. over-|underflow*/ /* * Check whether the SRF Condition occurred. */ if (!plc->ebuf_cont && phy->mib->fddiPORTPCMState == PC8_ACTIVE){ /* * This is the real Elasticity Error. * More than one in a row are treated as a * single one. * Only count this in the active state. */ phy->mib->fddiPORTEBError_Ct ++ ; } plc->ebuf_err++ ; if (plc->ebuf_cont <= 1000) { /* * Prevent counter from being wrapped after * hanging years in that interrupt. */ plc->ebuf_cont++ ; /* Ebuf continous error */ } #ifdef SUPERNET_3 if (plc->ebuf_cont == 1000 && ((inpw(PLC(np,PL_STATUS_A)) & PLC_REV_MASK) == PLC_REV_SN3)) { /* * This interrupt remeained high for at least * 1000 consecutive interrupt calls. * * This is caused by a hardware error of the * ORION part of the Supernet III chipset. * * Disable this bit from the mask. */ corr_mask = (plc_imsk_na & ~PL_EBUF_ERR) ; outpw(PLC(np,PL_INTR_MASK),corr_mask); /* * Disconnect from the ring. * Call the driver with the reset indication. */ queue_event(smc,EVENT_ECM,EC_DISCONNECT) ; /* * Make an error log entry. */ SMT_ERR_LOG(smc,SMT_E0136, SMT_E0136_MSG) ; /* * Indicate the Reset. */ drv_reset_indication(smc) ; } #endif /* SUPERNET_3 */ } else { /* Reset the continous error variable */ plc->ebuf_cont = 0 ; /* reset Ebuf continous error */ } if (cmd & PL_PHYINV) { /* physical layer invalid signal */ plc->phyinv++ ; } if (cmd & PL_VSYM_CTR) { /* violation symbol counter has incr.*/ plc->vsym_ctr++ ; } if (cmd & PL_MINI_CTR) { /* dep. on PLC_CNTRL_A's MINI_CTR_INT*/ plc->mini_ctr++ ; } if (cmd & PL_LE_CTR) { /* link error event counter */ int j ; /* * note: PL_LINK_ERR_CTR MUST be read to clear it */ j = inpw(PLC(np,PL_LE_THRESHOLD)) ; i = inpw(PLC(np,PL_LINK_ERR_CTR)) ; if (i < j) { /* wrapped around */ i += 256 ; } if (phy->lem.lem_on) { /* Note: Lem errors shall only be counted when * link is ACTIVE or LCT is active. */ phy->lem.lem_errors += i ; phy->mib->fddiPORTLem_Ct += i ; } } if (cmd & PL_TPC_EXPIRED) { /* TPC timer reached zero */ if (plc->p_state == PS_LCT) { /* * end of LCT */ ; } plc->tpc_exp++ ; } if (cmd & PL_LS_MATCH) { /* LS == LS in PLC_CNTRL_B's MATCH_LS*/ switch (inpw(PLC(np,PL_CNTRL_B)) & PL_MATCH_LS) { case PL_I_IDLE : phy->curr_ls = PC_ILS ; break ; case PL_I_HALT : phy->curr_ls = PC_HLS ; break ; case PL_I_MASTR : phy->curr_ls = PC_MLS ; break ; case PL_I_QUIET : phy->curr_ls = PC_QLS ; break ; } } if (cmd & PL_PCM_BREAK) { /* PCM has entered the BREAK state */ int reason; reason = inpw(PLC(np,PL_STATUS_B)) & PL_BREAK_REASON ; switch (reason) { case PL_B_PCS : plc->b_pcs++ ; break ; case PL_B_TPC : plc->b_tpc++ ; break ; case PL_B_TNE : plc->b_tne++ ; break ; case PL_B_QLS : plc->b_qls++ ; break ; case PL_B_ILS : plc->b_ils++ ; break ; case PL_B_HLS : plc->b_hls++ ; break ; } /*jd 05-Aug-1999 changed: Bug #10419 */ DB_PCMN(1,"PLC %d: MDcF = %x\n", np, smc->e.DisconnectFlag); if (smc->e.DisconnectFlag == FALSE) { DB_PCMN(1,"PLC %d: restart (reason %x)\n", np, reason); queue_event(smc,EVENT_PCM+np,PC_START) ; } else { DB_PCMN(1,"PLC %d: NO!! restart (reason %x)\n", np, reason); } return ; } /* * If both CODE & ENABLE are set ignore enable */ if (cmd & PL_PCM_CODE) { /* receive last sign.-bit | LCT complete */ queue_event(smc,EVENT_PCM+np,PC_SIGNAL) ; n = inpw(PLC(np,PL_RCV_VECTOR)) ; for (i = 0 ; i < plc->p_bits ; i++) { phy->r_val[plc->p_start+i] = n & 1 ; n >>= 1 ; } } else if (cmd & PL_PCM_ENABLED) { /* asserted SC_JOIN, scrub.completed*/ queue_event(smc,EVENT_PCM+np,PC_JOIN) ; } if (cmd & PL_TRACE_PROP) { /* MLS while PC8_ACTIV || PC2_TRACE */ /*PC22b*/ if (!phy->tr_flag) { DB_PCMN(1,"PCM : irq TRACE_PROP %d %d\n", np,smc->mib.fddiSMTECMState) ; phy->tr_flag = TRUE ; smc->e.trace_prop |= ENTITY_BIT(ENTITY_PHY(np)) ; queue_event(smc,EVENT_ECM,EC_TRACE_PROP) ; } } /* * filter PLC glitch ??? * QLS || HLS only while in PC2_TRACE state */ if ((cmd & PL_SELF_TEST) && (phy->mib->fddiPORTPCMState == PC2_TRACE)) { /*PC22a*/ if (smc->e.path_test == PT_PASSED) { DB_PCMN(1,"PCM : state = %s %d\n", get_pcmstate(smc,np), phy->mib->fddiPORTPCMState) ; smc->e.path_test = PT_PENDING ; queue_event(smc,EVENT_ECM,EC_PATH_TEST) ; } } if (cmd & PL_TNE_EXPIRED) { /* TNE: length of noise events */ /* break_required (TNE > NS_Max) */ if (phy->mib->fddiPORTPCMState == PC8_ACTIVE) { if (!phy->tr_flag) { DB_PCMN(1,"PCM %c : PC81 %s\n",phy->phy_name,"NSE"); queue_event(smc,EVENT_PCM+np,PC_START) ; return ; } } } #if 0 if (cmd & PL_NP_ERR) { /* NP has requested to r/w an inv reg*/ /* * It's a bug by AMD */ plc->np_err++ ; } /* pin inactiv (GND) */ if (cmd & PL_PARITY_ERR) { /* p. error dedected on TX9-0 inp */ plc->parity_err++ ; } if (cmd & PL_LSDO) { /* carrier detected */ ; } #endif } #ifdef DEBUG /* * fill state struct */ void pcm_get_state(struct s_smc *smc, struct smt_state *state) { struct s_phy *phy ; struct pcm_state *pcs ; int i ; int ii ; short rbits ; short tbits ; struct fddi_mib_p *mib ; for (i = 0, phy = smc->y, pcs = state->pcm_state ; i < NUMPHYS ; i++ , phy++, pcs++ ) { mib = phy->mib ; pcs->pcm_type = (u_char) mib->fddiPORTMy_Type ; pcs->pcm_state = (u_char) mib->fddiPORTPCMState ; pcs->pcm_mode = phy->pc_mode ; pcs->pcm_neighbor = (u_char) mib->fddiPORTNeighborType ; pcs->pcm_bsf = mib->fddiPORTBS_Flag ; pcs->pcm_lsf = phy->ls_flag ; pcs->pcm_lct_fail = (u_char) mib->fddiPORTLCTFail_Ct ; pcs->pcm_ls_rx = LS2MIB(sm_pm_get_ls(smc,i)) ; for (ii = 0, rbits = tbits = 0 ; ii < NUMBITS ; ii++) { rbits <<= 1 ; tbits <<= 1 ; if (phy->r_val[NUMBITS-1-ii]) rbits |= 1 ; if (phy->t_val[NUMBITS-1-ii]) tbits |= 1 ; } pcs->pcm_r_val = rbits ; pcs->pcm_t_val = tbits ; } } int get_pcm_state(struct s_smc *smc, int np) { int pcs ; SK_UNUSED(smc) ; switch (inpw(PLC(np,PL_STATUS_B)) & PL_PCM_STATE) { case PL_PC0 : pcs = PC_STOP ; break ; case PL_PC1 : pcs = PC_START ; break ; case PL_PC2 : pcs = PC_TRACE ; break ; case PL_PC3 : pcs = PC_SIGNAL ; break ; case PL_PC4 : pcs = PC_SIGNAL ; break ; case PL_PC5 : pcs = PC_SIGNAL ; break ; case PL_PC6 : pcs = PC_JOIN ; break ; case PL_PC7 : pcs = PC_JOIN ; break ; case PL_PC8 : pcs = PC_ENABLE ; break ; case PL_PC9 : pcs = PC_MAINT ; break ; default : pcs = PC_DISABLE ; break ; } return(pcs) ; } char *get_linestate(struct s_smc *smc, int np) { char *ls = "" ; SK_UNUSED(smc) ; switch (inpw(PLC(np,PL_STATUS_A)) & PL_LINE_ST) { case PL_L_NLS : ls = "NOISE" ; break ; case PL_L_ALS : ls = "ACTIV" ; break ; case PL_L_UND : ls = "UNDEF" ; break ; case PL_L_ILS4: ls = "ILS 4" ; break ; case PL_L_QLS : ls = "QLS" ; break ; case PL_L_MLS : ls = "MLS" ; break ; case PL_L_HLS : ls = "HLS" ; break ; case PL_L_ILS16:ls = "ILS16" ; break ; #ifdef lint default: ls = "unknown" ; break ; #endif } return(ls) ; } char *get_pcmstate(struct s_smc *smc, int np) { char *pcs ; SK_UNUSED(smc) ; switch (inpw(PLC(np,PL_STATUS_B)) & PL_PCM_STATE) { case PL_PC0 : pcs = "OFF" ; break ; case PL_PC1 : pcs = "BREAK" ; break ; case PL_PC2 : pcs = "TRACE" ; break ; case PL_PC3 : pcs = "CONNECT"; break ; case PL_PC4 : pcs = "NEXT" ; break ; case PL_PC5 : pcs = "SIGNAL" ; break ; case PL_PC6 : pcs = "JOIN" ; break ; case PL_PC7 : pcs = "VERIFY" ; break ; case PL_PC8 : pcs = "ACTIV" ; break ; case PL_PC9 : pcs = "MAINT" ; break ; default : pcs = "UNKNOWN" ; break ; } return(pcs) ; } void list_phy(struct s_smc *smc) { struct s_plc *plc ; int np ; for (np = 0 ; np < NUMPHYS ; np++) { plc = &smc->y[np].plc ; printf("PHY %d:\tERRORS\t\t\tBREAK_REASONS\t\tSTATES:\n",np) ; printf("\tsoft_error: %ld \t\tPC_Start : %ld\n", plc->soft_err,plc->b_pcs); printf("\tparity_err: %ld \t\tTPC exp. : %ld\t\tLine: %s\n", plc->parity_err,plc->b_tpc,get_linestate(smc,np)) ; printf("\tebuf_error: %ld \t\tTNE exp. : %ld\n", plc->ebuf_err,plc->b_tne) ; printf("\tphyinvalid: %ld \t\tQLS det. : %ld\t\tPCM : %s\n", plc->phyinv,plc->b_qls,get_pcmstate(smc,np)) ; printf("\tviosym_ctr: %ld \t\tILS det. : %ld\n", plc->vsym_ctr,plc->b_ils) ; printf("\tmingap_ctr: %ld \t\tHLS det. : %ld\n", plc->mini_ctr,plc->b_hls) ; printf("\tnodepr_err: %ld\n",plc->np_err) ; printf("\tTPC_exp : %ld\n",plc->tpc_exp) ; printf("\tLEM_err : %ld\n",smc->y[np].lem.lem_errors) ; } } #ifdef CONCENTRATOR void pcm_lem_dump(struct s_smc *smc) { int i ; struct s_phy *phy ; struct fddi_mib_p *mib ; char *entostring() ; printf("PHY errors BER\n") ; printf("----------------------\n") ; for (i = 0,phy = smc->y ; i < NUMPHYS ; i++,phy++) { if (!plc_is_installed(smc,i)) continue ; mib = phy->mib ; printf("%s\t%ld\t10E-%d\n", entostring(smc,ENTITY_PHY(i)), mib->fddiPORTLem_Ct, mib->fddiPORTLer_Estimate) ; } } #endif #endif