aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mach-voyager
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mach-voyager')
-rw-r--r--arch/x86/mach-voyager/setup.c32
-rw-r--r--arch/x86/mach-voyager/voyager_basic.c132
-rw-r--r--arch/x86/mach-voyager/voyager_cat.c601
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c672
-rw-r--r--arch/x86/mach-voyager/voyager_thread.c52
5 files changed, 718 insertions, 771 deletions
diff --git a/arch/x86/mach-voyager/setup.c b/arch/x86/mach-voyager/setup.c
index 3bef977cb29b..81257a861984 100644
--- a/arch/x86/mach-voyager/setup.c
+++ b/arch/x86/mach-voyager/setup.c
@@ -44,7 +44,7 @@ void __init trap_init_hook(void)
44{ 44{
45} 45}
46 46
47static struct irqaction irq0 = { 47static struct irqaction irq0 = {
48 .handler = timer_interrupt, 48 .handler = timer_interrupt,
49 .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL, 49 .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL,
50 .mask = CPU_MASK_NONE, 50 .mask = CPU_MASK_NONE,
@@ -59,44 +59,47 @@ void __init time_init_hook(void)
59 59
60/* Hook for machine specific memory setup. */ 60/* Hook for machine specific memory setup. */
61 61
62char * __init machine_specific_memory_setup(void) 62char *__init machine_specific_memory_setup(void)
63{ 63{
64 char *who; 64 char *who;
65 65
66 who = "NOT VOYAGER"; 66 who = "NOT VOYAGER";
67 67
68 if(voyager_level == 5) { 68 if (voyager_level == 5) {
69 __u32 addr, length; 69 __u32 addr, length;
70 int i; 70 int i;
71 71
72 who = "Voyager-SUS"; 72 who = "Voyager-SUS";
73 73
74 e820.nr_map = 0; 74 e820.nr_map = 0;
75 for(i=0; voyager_memory_detect(i, &addr, &length); i++) { 75 for (i = 0; voyager_memory_detect(i, &addr, &length); i++) {
76 add_memory_region(addr, length, E820_RAM); 76 add_memory_region(addr, length, E820_RAM);
77 } 77 }
78 return who; 78 return who;
79 } else if(voyager_level == 4) { 79 } else if (voyager_level == 4) {
80 __u32 tom; 80 __u32 tom;
81 __u16 catbase = inb(VOYAGER_SSPB_RELOCATION_PORT)<<8; 81 __u16 catbase = inb(VOYAGER_SSPB_RELOCATION_PORT) << 8;
82 /* select the DINO config space */ 82 /* select the DINO config space */
83 outb(VOYAGER_DINO, VOYAGER_CAT_CONFIG_PORT); 83 outb(VOYAGER_DINO, VOYAGER_CAT_CONFIG_PORT);
84 /* Read DINO top of memory register */ 84 /* Read DINO top of memory register */
85 tom = ((inb(catbase + 0x4) & 0xf0) << 16) 85 tom = ((inb(catbase + 0x4) & 0xf0) << 16)
86 + ((inb(catbase + 0x5) & 0x7f) << 24); 86 + ((inb(catbase + 0x5) & 0x7f) << 24);
87 87
88 if(inb(catbase) != VOYAGER_DINO) { 88 if (inb(catbase) != VOYAGER_DINO) {
89 printk(KERN_ERR "Voyager: Failed to get DINO for L4, setting tom to EXT_MEM_K\n"); 89 printk(KERN_ERR
90 tom = (boot_params.screen_info.ext_mem_k)<<10; 90 "Voyager: Failed to get DINO for L4, setting tom to EXT_MEM_K\n");
91 tom = (boot_params.screen_info.ext_mem_k) << 10;
91 } 92 }
92 who = "Voyager-TOM"; 93 who = "Voyager-TOM";
93 add_memory_region(0, 0x9f000, E820_RAM); 94 add_memory_region(0, 0x9f000, E820_RAM);
94 /* map from 1M to top of memory */ 95 /* map from 1M to top of memory */
95 add_memory_region(1*1024*1024, tom - 1*1024*1024, E820_RAM); 96 add_memory_region(1 * 1024 * 1024, tom - 1 * 1024 * 1024,
97 E820_RAM);
96 /* FIXME: Should check the ASICs to see if I need to 98 /* FIXME: Should check the ASICs to see if I need to
97 * take out the 8M window. Just do it at the moment 99 * take out the 8M window. Just do it at the moment
98 * */ 100 * */
99 add_memory_region(8*1024*1024, 8*1024*1024, E820_RESERVED); 101 add_memory_region(8 * 1024 * 1024, 8 * 1024 * 1024,
102 E820_RESERVED);
100 return who; 103 return who;
101 } 104 }
102 105
@@ -114,8 +117,7 @@ char * __init machine_specific_memory_setup(void)
114 unsigned long mem_size; 117 unsigned long mem_size;
115 118
116 /* compare results from other methods and take the greater */ 119 /* compare results from other methods and take the greater */
117 if (boot_params.alt_mem_k 120 if (boot_params.alt_mem_k < boot_params.screen_info.ext_mem_k) {
118 < boot_params.screen_info.ext_mem_k) {
119 mem_size = boot_params.screen_info.ext_mem_k; 121 mem_size = boot_params.screen_info.ext_mem_k;
120 who = "BIOS-88"; 122 who = "BIOS-88";
121 } else { 123 } else {
@@ -126,6 +128,6 @@ char * __init machine_specific_memory_setup(void)
126 e820.nr_map = 0; 128 e820.nr_map = 0;
127 add_memory_region(0, LOWMEMSIZE(), E820_RAM); 129 add_memory_region(0, LOWMEMSIZE(), E820_RAM);
128 add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM); 130 add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
129 } 131 }
130 return who; 132 return who;
131} 133}
diff --git a/arch/x86/mach-voyager/voyager_basic.c b/arch/x86/mach-voyager/voyager_basic.c
index 9b77b39b71a6..6a949e4edde8 100644
--- a/arch/x86/mach-voyager/voyager_basic.c
+++ b/arch/x86/mach-voyager/voyager_basic.c
@@ -35,7 +35,7 @@
35/* 35/*
36 * Power off function, if any 36 * Power off function, if any
37 */ 37 */
38void (*pm_power_off)(void); 38void (*pm_power_off) (void);
39EXPORT_SYMBOL(pm_power_off); 39EXPORT_SYMBOL(pm_power_off);
40 40
41int voyager_level = 0; 41int voyager_level = 0;
@@ -43,39 +43,38 @@ int voyager_level = 0;
43struct voyager_SUS *voyager_SUS = NULL; 43struct voyager_SUS *voyager_SUS = NULL;
44 44
45#ifdef CONFIG_SMP 45#ifdef CONFIG_SMP
46static void 46static void voyager_dump(int dummy1, struct tty_struct *dummy3)
47voyager_dump(int dummy1, struct tty_struct *dummy3)
48{ 47{
49 /* get here via a sysrq */ 48 /* get here via a sysrq */
50 voyager_smp_dump(); 49 voyager_smp_dump();
51} 50}
52 51
53static struct sysrq_key_op sysrq_voyager_dump_op = { 52static struct sysrq_key_op sysrq_voyager_dump_op = {
54 .handler = voyager_dump, 53 .handler = voyager_dump,
55 .help_msg = "Voyager", 54 .help_msg = "Voyager",
56 .action_msg = "Dump Voyager Status", 55 .action_msg = "Dump Voyager Status",
57}; 56};
58#endif 57#endif
59 58
60void 59void voyager_detect(struct voyager_bios_info *bios)
61voyager_detect(struct voyager_bios_info *bios)
62{ 60{
63 if(bios->len != 0xff) { 61 if (bios->len != 0xff) {
64 int class = (bios->class_1 << 8) 62 int class = (bios->class_1 << 8)
65 | (bios->class_2 & 0xff); 63 | (bios->class_2 & 0xff);
66 64
67 printk("Voyager System detected.\n" 65 printk("Voyager System detected.\n"
68 " Class %x, Revision %d.%d\n", 66 " Class %x, Revision %d.%d\n",
69 class, bios->major, bios->minor); 67 class, bios->major, bios->minor);
70 if(class == VOYAGER_LEVEL4) 68 if (class == VOYAGER_LEVEL4)
71 voyager_level = 4; 69 voyager_level = 4;
72 else if(class < VOYAGER_LEVEL5_AND_ABOVE) 70 else if (class < VOYAGER_LEVEL5_AND_ABOVE)
73 voyager_level = 3; 71 voyager_level = 3;
74 else 72 else
75 voyager_level = 5; 73 voyager_level = 5;
76 printk(" Architecture Level %d\n", voyager_level); 74 printk(" Architecture Level %d\n", voyager_level);
77 if(voyager_level < 4) 75 if (voyager_level < 4)
78 printk("\n**WARNING**: Voyager HAL only supports Levels 4 and 5 Architectures at the moment\n\n"); 76 printk
77 ("\n**WARNING**: Voyager HAL only supports Levels 4 and 5 Architectures at the moment\n\n");
79 /* install the power off handler */ 78 /* install the power off handler */
80 pm_power_off = voyager_power_off; 79 pm_power_off = voyager_power_off;
81#ifdef CONFIG_SMP 80#ifdef CONFIG_SMP
@@ -86,15 +85,13 @@ voyager_detect(struct voyager_bios_info *bios)
86 } 85 }
87} 86}
88 87
89void 88void voyager_system_interrupt(int cpl, void *dev_id)
90voyager_system_interrupt(int cpl, void *dev_id)
91{ 89{
92 printk("Voyager: detected system interrupt\n"); 90 printk("Voyager: detected system interrupt\n");
93} 91}
94 92
95/* Routine to read information from the extended CMOS area */ 93/* Routine to read information from the extended CMOS area */
96__u8 94__u8 voyager_extended_cmos_read(__u16 addr)
97voyager_extended_cmos_read(__u16 addr)
98{ 95{
99 outb(addr & 0xff, 0x74); 96 outb(addr & 0xff, 0x74);
100 outb((addr >> 8) & 0xff, 0x75); 97 outb((addr >> 8) & 0xff, 0x75);
@@ -108,12 +105,11 @@ voyager_extended_cmos_read(__u16 addr)
108 105
109typedef struct ClickMap { 106typedef struct ClickMap {
110 struct Entry { 107 struct Entry {
111 __u32 Address; 108 __u32 Address;
112 __u32 Length; 109 __u32 Length;
113 } Entry[CLICK_ENTRIES]; 110 } Entry[CLICK_ENTRIES];
114} ClickMap_t; 111} ClickMap_t;
115 112
116
117/* This routine is pretty much an awful hack to read the bios clickmap by 113/* This routine is pretty much an awful hack to read the bios clickmap by
118 * mapping it into page 0. There are usually three regions in the map: 114 * mapping it into page 0. There are usually three regions in the map:
119 * Base Memory 115 * Base Memory
@@ -122,8 +118,7 @@ typedef struct ClickMap {
122 * 118 *
123 * Returns are 0 for failure and 1 for success on extracting region. 119 * Returns are 0 for failure and 1 for success on extracting region.
124 */ 120 */
125int __init 121int __init voyager_memory_detect(int region, __u32 * start, __u32 * length)
126voyager_memory_detect(int region, __u32 *start, __u32 *length)
127{ 122{
128 int i; 123 int i;
129 int retval = 0; 124 int retval = 0;
@@ -132,13 +127,14 @@ voyager_memory_detect(int region, __u32 *start, __u32 *length)
132 unsigned long map_addr; 127 unsigned long map_addr;
133 unsigned long old; 128 unsigned long old;
134 129
135 if(region >= CLICK_ENTRIES) { 130 if (region >= CLICK_ENTRIES) {
136 printk("Voyager: Illegal ClickMap region %d\n", region); 131 printk("Voyager: Illegal ClickMap region %d\n", region);
137 return 0; 132 return 0;
138 } 133 }
139 134
140 for(i = 0; i < sizeof(cmos); i++) 135 for (i = 0; i < sizeof(cmos); i++)
141 cmos[i] = voyager_extended_cmos_read(VOYAGER_MEMORY_CLICKMAP + i); 136 cmos[i] =
137 voyager_extended_cmos_read(VOYAGER_MEMORY_CLICKMAP + i);
142 138
143 map_addr = *(unsigned long *)cmos; 139 map_addr = *(unsigned long *)cmos;
144 140
@@ -147,10 +143,10 @@ voyager_memory_detect(int region, __u32 *start, __u32 *length)
147 pg0[0] = ((map_addr & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT); 143 pg0[0] = ((map_addr & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT);
148 local_flush_tlb(); 144 local_flush_tlb();
149 /* now clear everything out but page 0 */ 145 /* now clear everything out but page 0 */
150 map = (ClickMap_t *)(map_addr & (~PAGE_MASK)); 146 map = (ClickMap_t *) (map_addr & (~PAGE_MASK));
151 147
152 /* zero length is the end of the clickmap */ 148 /* zero length is the end of the clickmap */
153 if(map->Entry[region].Length != 0) { 149 if (map->Entry[region].Length != 0) {
154 *length = map->Entry[region].Length * CLICK_SIZE; 150 *length = map->Entry[region].Length * CLICK_SIZE;
155 *start = map->Entry[region].Address; 151 *start = map->Entry[region].Address;
156 retval = 1; 152 retval = 1;
@@ -165,10 +161,9 @@ voyager_memory_detect(int region, __u32 *start, __u32 *length)
165/* voyager specific handling code for timer interrupts. Used to hand 161/* voyager specific handling code for timer interrupts. Used to hand
166 * off the timer tick to the SMP code, since the VIC doesn't have an 162 * off the timer tick to the SMP code, since the VIC doesn't have an
167 * internal timer (The QIC does, but that's another story). */ 163 * internal timer (The QIC does, but that's another story). */
168void 164void voyager_timer_interrupt(void)
169voyager_timer_interrupt(void)
170{ 165{
171 if((jiffies & 0x3ff) == 0) { 166 if ((jiffies & 0x3ff) == 0) {
172 167
173 /* There seems to be something flaky in either 168 /* There seems to be something flaky in either
174 * hardware or software that is resetting the timer 0 169 * hardware or software that is resetting the timer 0
@@ -186,18 +181,20 @@ voyager_timer_interrupt(void)
186 __u16 val; 181 __u16 val;
187 182
188 spin_lock(&i8253_lock); 183 spin_lock(&i8253_lock);
189 184
190 outb_p(0x00, 0x43); 185 outb_p(0x00, 0x43);
191 val = inb_p(0x40); 186 val = inb_p(0x40);
192 val |= inb(0x40) << 8; 187 val |= inb(0x40) << 8;
193 spin_unlock(&i8253_lock); 188 spin_unlock(&i8253_lock);
194 189
195 if(val > LATCH) { 190 if (val > LATCH) {
196 printk("\nVOYAGER: countdown timer value too high (%d), resetting\n\n", val); 191 printk
192 ("\nVOYAGER: countdown timer value too high (%d), resetting\n\n",
193 val);
197 spin_lock(&i8253_lock); 194 spin_lock(&i8253_lock);
198 outb(0x34,0x43); 195 outb(0x34, 0x43);
199 outb_p(LATCH & 0xff , 0x40); /* LSB */ 196 outb_p(LATCH & 0xff, 0x40); /* LSB */
200 outb(LATCH >> 8 , 0x40); /* MSB */ 197 outb(LATCH >> 8, 0x40); /* MSB */
201 spin_unlock(&i8253_lock); 198 spin_unlock(&i8253_lock);
202 } 199 }
203 } 200 }
@@ -206,14 +203,13 @@ voyager_timer_interrupt(void)
206#endif 203#endif
207} 204}
208 205
209void 206void voyager_power_off(void)
210voyager_power_off(void)
211{ 207{
212 printk("VOYAGER Power Off\n"); 208 printk("VOYAGER Power Off\n");
213 209
214 if(voyager_level == 5) { 210 if (voyager_level == 5) {
215 voyager_cat_power_off(); 211 voyager_cat_power_off();
216 } else if(voyager_level == 4) { 212 } else if (voyager_level == 4) {
217 /* This doesn't apparently work on most L4 machines, 213 /* This doesn't apparently work on most L4 machines,
218 * but the specs say to do this to get automatic power 214 * but the specs say to do this to get automatic power
219 * off. Unfortunately, if it doesn't power off the 215 * off. Unfortunately, if it doesn't power off the
@@ -222,10 +218,8 @@ voyager_power_off(void)
222#if 0 218#if 0
223 int port; 219 int port;
224 220
225
226 /* enable the voyager Configuration Space */ 221 /* enable the voyager Configuration Space */
227 outb((inb(VOYAGER_MC_SETUP) & 0xf0) | 0x8, 222 outb((inb(VOYAGER_MC_SETUP) & 0xf0) | 0x8, VOYAGER_MC_SETUP);
228 VOYAGER_MC_SETUP);
229 /* the port for the power off flag is an offset from the 223 /* the port for the power off flag is an offset from the
230 floating base */ 224 floating base */
231 port = (inb(VOYAGER_SSPB_RELOCATION_PORT) << 8) + 0x21; 225 port = (inb(VOYAGER_SSPB_RELOCATION_PORT) << 8) + 0x21;
@@ -235,62 +229,57 @@ voyager_power_off(void)
235 } 229 }
236 /* and wait for it to happen */ 230 /* and wait for it to happen */
237 local_irq_disable(); 231 local_irq_disable();
238 for(;;) 232 for (;;)
239 halt(); 233 halt();
240} 234}
241 235
242/* copied from process.c */ 236/* copied from process.c */
243static inline void 237static inline void kb_wait(void)
244kb_wait(void)
245{ 238{
246 int i; 239 int i;
247 240
248 for (i=0; i<0x10000; i++) 241 for (i = 0; i < 0x10000; i++)
249 if ((inb_p(0x64) & 0x02) == 0) 242 if ((inb_p(0x64) & 0x02) == 0)
250 break; 243 break;
251} 244}
252 245
253void 246void machine_shutdown(void)
254machine_shutdown(void)
255{ 247{
256 /* Architecture specific shutdown needed before a kexec */ 248 /* Architecture specific shutdown needed before a kexec */
257} 249}
258 250
259void 251void machine_restart(char *cmd)
260machine_restart(char *cmd)
261{ 252{
262 printk("Voyager Warm Restart\n"); 253 printk("Voyager Warm Restart\n");
263 kb_wait(); 254 kb_wait();
264 255
265 if(voyager_level == 5) { 256 if (voyager_level == 5) {
266 /* write magic values to the RTC to inform system that 257 /* write magic values to the RTC to inform system that
267 * shutdown is beginning */ 258 * shutdown is beginning */
268 outb(0x8f, 0x70); 259 outb(0x8f, 0x70);
269 outb(0x5 , 0x71); 260 outb(0x5, 0x71);
270 261
271 udelay(50); 262 udelay(50);
272 outb(0xfe,0x64); /* pull reset low */ 263 outb(0xfe, 0x64); /* pull reset low */
273 } else if(voyager_level == 4) { 264 } else if (voyager_level == 4) {
274 __u16 catbase = inb(VOYAGER_SSPB_RELOCATION_PORT)<<8; 265 __u16 catbase = inb(VOYAGER_SSPB_RELOCATION_PORT) << 8;
275 __u8 basebd = inb(VOYAGER_MC_SETUP); 266 __u8 basebd = inb(VOYAGER_MC_SETUP);
276 267
277 outb(basebd | 0x08, VOYAGER_MC_SETUP); 268 outb(basebd | 0x08, VOYAGER_MC_SETUP);
278 outb(0x02, catbase + 0x21); 269 outb(0x02, catbase + 0x21);
279 } 270 }
280 local_irq_disable(); 271 local_irq_disable();
281 for(;;) 272 for (;;)
282 halt(); 273 halt();
283} 274}
284 275
285void 276void machine_emergency_restart(void)
286machine_emergency_restart(void)
287{ 277{
288 /*for now, just hook this to a warm restart */ 278 /*for now, just hook this to a warm restart */
289 machine_restart(NULL); 279 machine_restart(NULL);
290} 280}
291 281
292void 282void mca_nmi_hook(void)
293mca_nmi_hook(void)
294{ 283{
295 __u8 dumpval __maybe_unused = inb(0xf823); 284 __u8 dumpval __maybe_unused = inb(0xf823);
296 __u8 swnmi __maybe_unused = inb(0xf813); 285 __u8 swnmi __maybe_unused = inb(0xf813);
@@ -301,8 +290,8 @@ mca_nmi_hook(void)
301 /* clear swnmi */ 290 /* clear swnmi */
302 outb(0xff, 0xf813); 291 outb(0xff, 0xf813);
303 /* tell SUS to ignore dump */ 292 /* tell SUS to ignore dump */
304 if(voyager_level == 5 && voyager_SUS != NULL) { 293 if (voyager_level == 5 && voyager_SUS != NULL) {
305 if(voyager_SUS->SUS_mbox == VOYAGER_DUMP_BUTTON_NMI) { 294 if (voyager_SUS->SUS_mbox == VOYAGER_DUMP_BUTTON_NMI) {
306 voyager_SUS->kernel_mbox = VOYAGER_NO_COMMAND; 295 voyager_SUS->kernel_mbox = VOYAGER_NO_COMMAND;
307 voyager_SUS->kernel_flags |= VOYAGER_OS_IN_PROGRESS; 296 voyager_SUS->kernel_flags |= VOYAGER_OS_IN_PROGRESS;
308 udelay(1000); 297 udelay(1000);
@@ -310,15 +299,14 @@ mca_nmi_hook(void)
310 voyager_SUS->kernel_flags &= ~VOYAGER_OS_IN_PROGRESS; 299 voyager_SUS->kernel_flags &= ~VOYAGER_OS_IN_PROGRESS;
311 } 300 }
312 } 301 }
313 printk(KERN_ERR "VOYAGER: Dump switch pressed, printing CPU%d tracebacks\n", smp_processor_id()); 302 printk(KERN_ERR
303 "VOYAGER: Dump switch pressed, printing CPU%d tracebacks\n",
304 smp_processor_id());
314 show_stack(NULL, NULL); 305 show_stack(NULL, NULL);
315 show_state(); 306 show_state();
316} 307}
317 308
318 309void machine_halt(void)
319
320void
321machine_halt(void)
322{ 310{
323 /* treat a halt like a power off */ 311 /* treat a halt like a power off */
324 machine_power_off(); 312 machine_power_off();
diff --git a/arch/x86/mach-voyager/voyager_cat.c b/arch/x86/mach-voyager/voyager_cat.c
index 2132ca652df1..17a7904f75b1 100644
--- a/arch/x86/mach-voyager/voyager_cat.c
+++ b/arch/x86/mach-voyager/voyager_cat.c
@@ -39,34 +39,32 @@
39#define CAT_DATA (sspb + 0xd) 39#define CAT_DATA (sspb + 0xd)
40 40
41/* the internal cat functions */ 41/* the internal cat functions */
42static void cat_pack(__u8 *msg, __u16 start_bit, __u8 *data, 42static void cat_pack(__u8 * msg, __u16 start_bit, __u8 * data, __u16 num_bits);
43 __u16 num_bits); 43static void cat_unpack(__u8 * msg, __u16 start_bit, __u8 * data,
44static void cat_unpack(__u8 *msg, __u16 start_bit, __u8 *data,
45 __u16 num_bits); 44 __u16 num_bits);
46static void cat_build_header(__u8 *header, const __u16 len, 45static void cat_build_header(__u8 * header, const __u16 len,
47 const __u16 smallest_reg_bits, 46 const __u16 smallest_reg_bits,
48 const __u16 longest_reg_bits); 47 const __u16 longest_reg_bits);
49static int cat_sendinst(voyager_module_t *modp, voyager_asic_t *asicp, 48static int cat_sendinst(voyager_module_t * modp, voyager_asic_t * asicp,
50 __u8 reg, __u8 op); 49 __u8 reg, __u8 op);
51static int cat_getdata(voyager_module_t *modp, voyager_asic_t *asicp, 50static int cat_getdata(voyager_module_t * modp, voyager_asic_t * asicp,
52 __u8 reg, __u8 *value); 51 __u8 reg, __u8 * value);
53static int cat_shiftout(__u8 *data, __u16 data_bytes, __u16 header_bytes, 52static int cat_shiftout(__u8 * data, __u16 data_bytes, __u16 header_bytes,
54 __u8 pad_bits); 53 __u8 pad_bits);
55static int cat_write(voyager_module_t *modp, voyager_asic_t *asicp, __u8 reg, 54static int cat_write(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg,
56 __u8 value); 55 __u8 value);
57static int cat_read(voyager_module_t *modp, voyager_asic_t *asicp, __u8 reg, 56static int cat_read(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg,
58 __u8 *value); 57 __u8 * value);
59static int cat_subread(voyager_module_t *modp, voyager_asic_t *asicp, 58static int cat_subread(voyager_module_t * modp, voyager_asic_t * asicp,
60 __u16 offset, __u16 len, void *buf); 59 __u16 offset, __u16 len, void *buf);
61static int cat_senddata(voyager_module_t *modp, voyager_asic_t *asicp, 60static int cat_senddata(voyager_module_t * modp, voyager_asic_t * asicp,
62 __u8 reg, __u8 value); 61 __u8 reg, __u8 value);
63static int cat_disconnect(voyager_module_t *modp, voyager_asic_t *asicp); 62static int cat_disconnect(voyager_module_t * modp, voyager_asic_t * asicp);
64static int cat_connect(voyager_module_t *modp, voyager_asic_t *asicp); 63static int cat_connect(voyager_module_t * modp, voyager_asic_t * asicp);
65 64
66static inline const char * 65static inline const char *cat_module_name(int module_id)
67cat_module_name(int module_id)
68{ 66{
69 switch(module_id) { 67 switch (module_id) {
70 case 0x10: 68 case 0x10:
71 return "Processor Slot 0"; 69 return "Processor Slot 0";
72 case 0x11: 70 case 0x11:
@@ -105,14 +103,14 @@ voyager_module_t *voyager_cat_list;
105 103
106/* the I/O port assignments for the VIC and QIC */ 104/* the I/O port assignments for the VIC and QIC */
107static struct resource vic_res = { 105static struct resource vic_res = {
108 .name = "Voyager Interrupt Controller", 106 .name = "Voyager Interrupt Controller",
109 .start = 0xFC00, 107 .start = 0xFC00,
110 .end = 0xFC6F 108 .end = 0xFC6F
111}; 109};
112static struct resource qic_res = { 110static struct resource qic_res = {
113 .name = "Quad Interrupt Controller", 111 .name = "Quad Interrupt Controller",
114 .start = 0xFC70, 112 .start = 0xFC70,
115 .end = 0xFCFF 113 .end = 0xFCFF
116}; 114};
117 115
118/* This function is used to pack a data bit stream inside a message. 116/* This function is used to pack a data bit stream inside a message.
@@ -120,7 +118,7 @@ static struct resource qic_res = {
120 * Note: This function assumes that any unused bit in the data stream 118 * Note: This function assumes that any unused bit in the data stream
121 * is set to zero so that the ors will work correctly */ 119 * is set to zero so that the ors will work correctly */
122static void 120static void
123cat_pack(__u8 *msg, const __u16 start_bit, __u8 *data, const __u16 num_bits) 121cat_pack(__u8 * msg, const __u16 start_bit, __u8 * data, const __u16 num_bits)
124{ 122{
125 /* compute initial shift needed */ 123 /* compute initial shift needed */
126 const __u16 offset = start_bit % BITS_PER_BYTE; 124 const __u16 offset = start_bit % BITS_PER_BYTE;
@@ -130,7 +128,7 @@ cat_pack(__u8 *msg, const __u16 start_bit, __u8 *data, const __u16 num_bits)
130 int i; 128 int i;
131 129
132 /* adjust if we have more than a byte of residue */ 130 /* adjust if we have more than a byte of residue */
133 if(residue >= BITS_PER_BYTE) { 131 if (residue >= BITS_PER_BYTE) {
134 residue -= BITS_PER_BYTE; 132 residue -= BITS_PER_BYTE;
135 len++; 133 len++;
136 } 134 }
@@ -138,24 +136,25 @@ cat_pack(__u8 *msg, const __u16 start_bit, __u8 *data, const __u16 num_bits)
138 /* clear out the bits. We assume here that if len==0 then 136 /* clear out the bits. We assume here that if len==0 then
139 * residue >= offset. This is always true for the catbus 137 * residue >= offset. This is always true for the catbus
140 * operations */ 138 * operations */
141 msg[byte] &= 0xff << (BITS_PER_BYTE - offset); 139 msg[byte] &= 0xff << (BITS_PER_BYTE - offset);
142 msg[byte++] |= data[0] >> offset; 140 msg[byte++] |= data[0] >> offset;
143 if(len == 0) 141 if (len == 0)
144 return; 142 return;
145 for(i = 1; i < len; i++) 143 for (i = 1; i < len; i++)
146 msg[byte++] = (data[i-1] << (BITS_PER_BYTE - offset)) 144 msg[byte++] = (data[i - 1] << (BITS_PER_BYTE - offset))
147 | (data[i] >> offset); 145 | (data[i] >> offset);
148 if(residue != 0) { 146 if (residue != 0) {
149 __u8 mask = 0xff >> residue; 147 __u8 mask = 0xff >> residue;
150 __u8 last_byte = data[i-1] << (BITS_PER_BYTE - offset) 148 __u8 last_byte = data[i - 1] << (BITS_PER_BYTE - offset)
151 | (data[i] >> offset); 149 | (data[i] >> offset);
152 150
153 last_byte &= ~mask; 151 last_byte &= ~mask;
154 msg[byte] &= mask; 152 msg[byte] &= mask;
155 msg[byte] |= last_byte; 153 msg[byte] |= last_byte;
156 } 154 }
157 return; 155 return;
158} 156}
157
159/* unpack the data again (same arguments as cat_pack()). data buffer 158/* unpack the data again (same arguments as cat_pack()). data buffer
160 * must be zero populated. 159 * must be zero populated.
161 * 160 *
@@ -163,7 +162,7 @@ cat_pack(__u8 *msg, const __u16 start_bit, __u8 *data, const __u16 num_bits)
163 * data (starting at bit 0 in data). 162 * data (starting at bit 0 in data).
164 */ 163 */
165static void 164static void
166cat_unpack(__u8 *msg, const __u16 start_bit, __u8 *data, const __u16 num_bits) 165cat_unpack(__u8 * msg, const __u16 start_bit, __u8 * data, const __u16 num_bits)
167{ 166{
168 /* compute initial shift needed */ 167 /* compute initial shift needed */
169 const __u16 offset = start_bit % BITS_PER_BYTE; 168 const __u16 offset = start_bit % BITS_PER_BYTE;
@@ -172,97 +171,97 @@ cat_unpack(__u8 *msg, const __u16 start_bit, __u8 *data, const __u16 num_bits)
172 __u16 byte = start_bit / BITS_PER_BYTE; 171 __u16 byte = start_bit / BITS_PER_BYTE;
173 int i; 172 int i;
174 173
175 if(last_bits != 0) 174 if (last_bits != 0)
176 len++; 175 len++;
177 176
178 /* special case: want < 8 bits from msg and we can get it from 177 /* special case: want < 8 bits from msg and we can get it from
179 * a single byte of the msg */ 178 * a single byte of the msg */
180 if(len == 0 && BITS_PER_BYTE - offset >= num_bits) { 179 if (len == 0 && BITS_PER_BYTE - offset >= num_bits) {
181 data[0] = msg[byte] << offset; 180 data[0] = msg[byte] << offset;
182 data[0] &= 0xff >> (BITS_PER_BYTE - num_bits); 181 data[0] &= 0xff >> (BITS_PER_BYTE - num_bits);
183 return; 182 return;
184 } 183 }
185 for(i = 0; i < len; i++) { 184 for (i = 0; i < len; i++) {
186 /* this annoying if has to be done just in case a read of 185 /* this annoying if has to be done just in case a read of
187 * msg one beyond the array causes a panic */ 186 * msg one beyond the array causes a panic */
188 if(offset != 0) { 187 if (offset != 0) {
189 data[i] = msg[byte++] << offset; 188 data[i] = msg[byte++] << offset;
190 data[i] |= msg[byte] >> (BITS_PER_BYTE - offset); 189 data[i] |= msg[byte] >> (BITS_PER_BYTE - offset);
191 } 190 } else {
192 else {
193 data[i] = msg[byte++]; 191 data[i] = msg[byte++];
194 } 192 }
195 } 193 }
196 /* do we need to truncate the final byte */ 194 /* do we need to truncate the final byte */
197 if(last_bits != 0) { 195 if (last_bits != 0) {
198 data[i-1] &= 0xff << (BITS_PER_BYTE - last_bits); 196 data[i - 1] &= 0xff << (BITS_PER_BYTE - last_bits);
199 } 197 }
200 return; 198 return;
201} 199}
202 200
203static void 201static void
204cat_build_header(__u8 *header, const __u16 len, const __u16 smallest_reg_bits, 202cat_build_header(__u8 * header, const __u16 len, const __u16 smallest_reg_bits,
205 const __u16 longest_reg_bits) 203 const __u16 longest_reg_bits)
206{ 204{
207 int i; 205 int i;
208 __u16 start_bit = (smallest_reg_bits - 1) % BITS_PER_BYTE; 206 __u16 start_bit = (smallest_reg_bits - 1) % BITS_PER_BYTE;
209 __u8 *last_byte = &header[len - 1]; 207 __u8 *last_byte = &header[len - 1];
210 208
211 if(start_bit == 0) 209 if (start_bit == 0)
212 start_bit = 1; /* must have at least one bit in the hdr */ 210 start_bit = 1; /* must have at least one bit in the hdr */
213 211
214 for(i=0; i < len; i++) 212 for (i = 0; i < len; i++)
215 header[i] = 0; 213 header[i] = 0;
216 214
217 for(i = start_bit; i > 0; i--) 215 for (i = start_bit; i > 0; i--)
218 *last_byte = ((*last_byte) << 1) + 1; 216 *last_byte = ((*last_byte) << 1) + 1;
219 217
220} 218}
221 219
222static int 220static int
223cat_sendinst(voyager_module_t *modp, voyager_asic_t *asicp, __u8 reg, __u8 op) 221cat_sendinst(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg, __u8 op)
224{ 222{
225 __u8 parity, inst, inst_buf[4] = { 0 }; 223 __u8 parity, inst, inst_buf[4] = { 0 };
226 __u8 iseq[VOYAGER_MAX_SCAN_PATH], hseq[VOYAGER_MAX_REG_SIZE]; 224 __u8 iseq[VOYAGER_MAX_SCAN_PATH], hseq[VOYAGER_MAX_REG_SIZE];
227 __u16 ibytes, hbytes, padbits; 225 __u16 ibytes, hbytes, padbits;
228 int i; 226 int i;
229 227
230 /* 228 /*
231 * Parity is the parity of the register number + 1 (READ_REGISTER 229 * Parity is the parity of the register number + 1 (READ_REGISTER
232 * and WRITE_REGISTER always add '1' to the number of bits == 1) 230 * and WRITE_REGISTER always add '1' to the number of bits == 1)
233 */ 231 */
234 parity = (__u8)(1 + (reg & 0x01) + 232 parity = (__u8) (1 + (reg & 0x01) +
235 ((__u8)(reg & 0x02) >> 1) + 233 ((__u8) (reg & 0x02) >> 1) +
236 ((__u8)(reg & 0x04) >> 2) + 234 ((__u8) (reg & 0x04) >> 2) +
237 ((__u8)(reg & 0x08) >> 3)) % 2; 235 ((__u8) (reg & 0x08) >> 3)) % 2;
238 236
239 inst = ((parity << 7) | (reg << 2) | op); 237 inst = ((parity << 7) | (reg << 2) | op);
240 238
241 outb(VOYAGER_CAT_IRCYC, CAT_CMD); 239 outb(VOYAGER_CAT_IRCYC, CAT_CMD);
242 if(!modp->scan_path_connected) { 240 if (!modp->scan_path_connected) {
243 if(asicp->asic_id != VOYAGER_CAT_ID) { 241 if (asicp->asic_id != VOYAGER_CAT_ID) {
244 printk("**WARNING***: cat_sendinst has disconnected scan path not to CAT asic\n"); 242 printk
243 ("**WARNING***: cat_sendinst has disconnected scan path not to CAT asic\n");
245 return 1; 244 return 1;
246 } 245 }
247 outb(VOYAGER_CAT_HEADER, CAT_DATA); 246 outb(VOYAGER_CAT_HEADER, CAT_DATA);
248 outb(inst, CAT_DATA); 247 outb(inst, CAT_DATA);
249 if(inb(CAT_DATA) != VOYAGER_CAT_HEADER) { 248 if (inb(CAT_DATA) != VOYAGER_CAT_HEADER) {
250 CDEBUG(("VOYAGER CAT: cat_sendinst failed to get CAT_HEADER\n")); 249 CDEBUG(("VOYAGER CAT: cat_sendinst failed to get CAT_HEADER\n"));
251 return 1; 250 return 1;
252 } 251 }
253 return 0; 252 return 0;
254 } 253 }
255 ibytes = modp->inst_bits / BITS_PER_BYTE; 254 ibytes = modp->inst_bits / BITS_PER_BYTE;
256 if((padbits = modp->inst_bits % BITS_PER_BYTE) != 0) { 255 if ((padbits = modp->inst_bits % BITS_PER_BYTE) != 0) {
257 padbits = BITS_PER_BYTE - padbits; 256 padbits = BITS_PER_BYTE - padbits;
258 ibytes++; 257 ibytes++;
259 } 258 }
260 hbytes = modp->largest_reg / BITS_PER_BYTE; 259 hbytes = modp->largest_reg / BITS_PER_BYTE;
261 if(modp->largest_reg % BITS_PER_BYTE) 260 if (modp->largest_reg % BITS_PER_BYTE)
262 hbytes++; 261 hbytes++;
263 CDEBUG(("cat_sendinst: ibytes=%d, hbytes=%d\n", ibytes, hbytes)); 262 CDEBUG(("cat_sendinst: ibytes=%d, hbytes=%d\n", ibytes, hbytes));
264 /* initialise the instruction sequence to 0xff */ 263 /* initialise the instruction sequence to 0xff */
265 for(i=0; i < ibytes + hbytes; i++) 264 for (i = 0; i < ibytes + hbytes; i++)
266 iseq[i] = 0xff; 265 iseq[i] = 0xff;
267 cat_build_header(hseq, hbytes, modp->smallest_reg, modp->largest_reg); 266 cat_build_header(hseq, hbytes, modp->smallest_reg, modp->largest_reg);
268 cat_pack(iseq, modp->inst_bits, hseq, hbytes * BITS_PER_BYTE); 267 cat_pack(iseq, modp->inst_bits, hseq, hbytes * BITS_PER_BYTE);
@@ -271,11 +270,11 @@ cat_sendinst(voyager_module_t *modp, voyager_asic_t *asicp, __u8 reg, __u8 op)
271 cat_pack(iseq, asicp->bit_location, inst_buf, asicp->ireg_length); 270 cat_pack(iseq, asicp->bit_location, inst_buf, asicp->ireg_length);
272#ifdef VOYAGER_CAT_DEBUG 271#ifdef VOYAGER_CAT_DEBUG
273 printk("ins = 0x%x, iseq: ", inst); 272 printk("ins = 0x%x, iseq: ", inst);
274 for(i=0; i< ibytes + hbytes; i++) 273 for (i = 0; i < ibytes + hbytes; i++)
275 printk("0x%x ", iseq[i]); 274 printk("0x%x ", iseq[i]);
276 printk("\n"); 275 printk("\n");
277#endif 276#endif
278 if(cat_shiftout(iseq, ibytes, hbytes, padbits)) { 277 if (cat_shiftout(iseq, ibytes, hbytes, padbits)) {
279 CDEBUG(("VOYAGER CAT: cat_sendinst: cat_shiftout failed\n")); 278 CDEBUG(("VOYAGER CAT: cat_sendinst: cat_shiftout failed\n"));
280 return 1; 279 return 1;
281 } 280 }
@@ -284,72 +283,74 @@ cat_sendinst(voyager_module_t *modp, voyager_asic_t *asicp, __u8 reg, __u8 op)
284} 283}
285 284
286static int 285static int
287cat_getdata(voyager_module_t *modp, voyager_asic_t *asicp, __u8 reg, 286cat_getdata(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg,
288 __u8 *value) 287 __u8 * value)
289{ 288{
290 if(!modp->scan_path_connected) { 289 if (!modp->scan_path_connected) {
291 if(asicp->asic_id != VOYAGER_CAT_ID) { 290 if (asicp->asic_id != VOYAGER_CAT_ID) {
292 CDEBUG(("VOYAGER CAT: ERROR: cat_getdata to CAT asic with scan path connected\n")); 291 CDEBUG(("VOYAGER CAT: ERROR: cat_getdata to CAT asic with scan path connected\n"));
293 return 1; 292 return 1;
294 } 293 }
295 if(reg > VOYAGER_SUBADDRHI) 294 if (reg > VOYAGER_SUBADDRHI)
296 outb(VOYAGER_CAT_RUN, CAT_CMD); 295 outb(VOYAGER_CAT_RUN, CAT_CMD);
297 outb(VOYAGER_CAT_DRCYC, CAT_CMD); 296 outb(VOYAGER_CAT_DRCYC, CAT_CMD);
298 outb(VOYAGER_CAT_HEADER, CAT_DATA); 297 outb(VOYAGER_CAT_HEADER, CAT_DATA);
299 *value = inb(CAT_DATA); 298 *value = inb(CAT_DATA);
300 outb(0xAA, CAT_DATA); 299 outb(0xAA, CAT_DATA);
301 if(inb(CAT_DATA) != VOYAGER_CAT_HEADER) { 300 if (inb(CAT_DATA) != VOYAGER_CAT_HEADER) {
302 CDEBUG(("cat_getdata: failed to get VOYAGER_CAT_HEADER\n")); 301 CDEBUG(("cat_getdata: failed to get VOYAGER_CAT_HEADER\n"));
303 return 1; 302 return 1;
304 } 303 }
305 return 0; 304 return 0;
306 } 305 } else {
307 else { 306 __u16 sbits = modp->num_asics - 1 + asicp->ireg_length;
308 __u16 sbits = modp->num_asics -1 + asicp->ireg_length;
309 __u16 sbytes = sbits / BITS_PER_BYTE; 307 __u16 sbytes = sbits / BITS_PER_BYTE;
310 __u16 tbytes; 308 __u16 tbytes;
311 __u8 string[VOYAGER_MAX_SCAN_PATH], trailer[VOYAGER_MAX_REG_SIZE]; 309 __u8 string[VOYAGER_MAX_SCAN_PATH],
310 trailer[VOYAGER_MAX_REG_SIZE];
312 __u8 padbits; 311 __u8 padbits;
313 int i; 312 int i;
314 313
315 outb(VOYAGER_CAT_DRCYC, CAT_CMD); 314 outb(VOYAGER_CAT_DRCYC, CAT_CMD);
316 315
317 if((padbits = sbits % BITS_PER_BYTE) != 0) { 316 if ((padbits = sbits % BITS_PER_BYTE) != 0) {
318 padbits = BITS_PER_BYTE - padbits; 317 padbits = BITS_PER_BYTE - padbits;
319 sbytes++; 318 sbytes++;
320 } 319 }
321 tbytes = asicp->ireg_length / BITS_PER_BYTE; 320 tbytes = asicp->ireg_length / BITS_PER_BYTE;
322 if(asicp->ireg_length % BITS_PER_BYTE) 321 if (asicp->ireg_length % BITS_PER_BYTE)
323 tbytes++; 322 tbytes++;
324 CDEBUG(("cat_getdata: tbytes = %d, sbytes = %d, padbits = %d\n", 323 CDEBUG(("cat_getdata: tbytes = %d, sbytes = %d, padbits = %d\n",
325 tbytes, sbytes, padbits)); 324 tbytes, sbytes, padbits));
326 cat_build_header(trailer, tbytes, 1, asicp->ireg_length); 325 cat_build_header(trailer, tbytes, 1, asicp->ireg_length);
327 326
328 327 for (i = tbytes - 1; i >= 0; i--) {
329 for(i = tbytes - 1; i >= 0; i--) {
330 outb(trailer[i], CAT_DATA); 328 outb(trailer[i], CAT_DATA);
331 string[sbytes + i] = inb(CAT_DATA); 329 string[sbytes + i] = inb(CAT_DATA);
332 } 330 }
333 331
334 for(i = sbytes - 1; i >= 0; i--) { 332 for (i = sbytes - 1; i >= 0; i--) {
335 outb(0xaa, CAT_DATA); 333 outb(0xaa, CAT_DATA);
336 string[i] = inb(CAT_DATA); 334 string[i] = inb(CAT_DATA);
337 } 335 }
338 *value = 0; 336 *value = 0;
339 cat_unpack(string, padbits + (tbytes * BITS_PER_BYTE) + asicp->asic_location, value, asicp->ireg_length); 337 cat_unpack(string,
338 padbits + (tbytes * BITS_PER_BYTE) +
339 asicp->asic_location, value, asicp->ireg_length);
340#ifdef VOYAGER_CAT_DEBUG 340#ifdef VOYAGER_CAT_DEBUG
341 printk("value=0x%x, string: ", *value); 341 printk("value=0x%x, string: ", *value);
342 for(i=0; i< tbytes+sbytes; i++) 342 for (i = 0; i < tbytes + sbytes; i++)
343 printk("0x%x ", string[i]); 343 printk("0x%x ", string[i]);
344 printk("\n"); 344 printk("\n");
345#endif 345#endif
346 346
347 /* sanity check the rest of the return */ 347 /* sanity check the rest of the return */
348 for(i=0; i < tbytes; i++) { 348 for (i = 0; i < tbytes; i++) {
349 __u8 input = 0; 349 __u8 input = 0;
350 350
351 cat_unpack(string, padbits + (i * BITS_PER_BYTE), &input, BITS_PER_BYTE); 351 cat_unpack(string, padbits + (i * BITS_PER_BYTE),
352 if(trailer[i] != input) { 352 &input, BITS_PER_BYTE);
353 if (trailer[i] != input) {
353 CDEBUG(("cat_getdata: failed to sanity check rest of ret(%d) 0x%x != 0x%x\n", i, input, trailer[i])); 354 CDEBUG(("cat_getdata: failed to sanity check rest of ret(%d) 0x%x != 0x%x\n", i, input, trailer[i]));
354 return 1; 355 return 1;
355 } 356 }
@@ -360,14 +361,14 @@ cat_getdata(voyager_module_t *modp, voyager_asic_t *asicp, __u8 reg,
360} 361}
361 362
362static int 363static int
363cat_shiftout(__u8 *data, __u16 data_bytes, __u16 header_bytes, __u8 pad_bits) 364cat_shiftout(__u8 * data, __u16 data_bytes, __u16 header_bytes, __u8 pad_bits)
364{ 365{
365 int i; 366 int i;
366 367
367 for(i = data_bytes + header_bytes - 1; i >= header_bytes; i--) 368 for (i = data_bytes + header_bytes - 1; i >= header_bytes; i--)
368 outb(data[i], CAT_DATA); 369 outb(data[i], CAT_DATA);
369 370
370 for(i = header_bytes - 1; i >= 0; i--) { 371 for (i = header_bytes - 1; i >= 0; i--) {
371 __u8 header = 0; 372 __u8 header = 0;
372 __u8 input; 373 __u8 input;
373 374
@@ -376,7 +377,7 @@ cat_shiftout(__u8 *data, __u16 data_bytes, __u16 header_bytes, __u8 pad_bits)
376 CDEBUG(("cat_shiftout: returned 0x%x\n", input)); 377 CDEBUG(("cat_shiftout: returned 0x%x\n", input));
377 cat_unpack(data, ((data_bytes + i) * BITS_PER_BYTE) - pad_bits, 378 cat_unpack(data, ((data_bytes + i) * BITS_PER_BYTE) - pad_bits,
378 &header, BITS_PER_BYTE); 379 &header, BITS_PER_BYTE);
379 if(input != header) { 380 if (input != header) {
380 CDEBUG(("VOYAGER CAT: cat_shiftout failed to return header 0x%x != 0x%x\n", input, header)); 381 CDEBUG(("VOYAGER CAT: cat_shiftout failed to return header 0x%x != 0x%x\n", input, header));
381 return 1; 382 return 1;
382 } 383 }
@@ -385,57 +386,57 @@ cat_shiftout(__u8 *data, __u16 data_bytes, __u16 header_bytes, __u8 pad_bits)
385} 386}
386 387
387static int 388static int
388cat_senddata(voyager_module_t *modp, voyager_asic_t *asicp, 389cat_senddata(voyager_module_t * modp, voyager_asic_t * asicp,
389 __u8 reg, __u8 value) 390 __u8 reg, __u8 value)
390{ 391{
391 outb(VOYAGER_CAT_DRCYC, CAT_CMD); 392 outb(VOYAGER_CAT_DRCYC, CAT_CMD);
392 if(!modp->scan_path_connected) { 393 if (!modp->scan_path_connected) {
393 if(asicp->asic_id != VOYAGER_CAT_ID) { 394 if (asicp->asic_id != VOYAGER_CAT_ID) {
394 CDEBUG(("VOYAGER CAT: ERROR: scan path disconnected when asic != CAT\n")); 395 CDEBUG(("VOYAGER CAT: ERROR: scan path disconnected when asic != CAT\n"));
395 return 1; 396 return 1;
396 } 397 }
397 outb(VOYAGER_CAT_HEADER, CAT_DATA); 398 outb(VOYAGER_CAT_HEADER, CAT_DATA);
398 outb(value, CAT_DATA); 399 outb(value, CAT_DATA);
399 if(inb(CAT_DATA) != VOYAGER_CAT_HEADER) { 400 if (inb(CAT_DATA) != VOYAGER_CAT_HEADER) {
400 CDEBUG(("cat_senddata: failed to get correct header response to sent data\n")); 401 CDEBUG(("cat_senddata: failed to get correct header response to sent data\n"));
401 return 1; 402 return 1;
402 } 403 }
403 if(reg > VOYAGER_SUBADDRHI) { 404 if (reg > VOYAGER_SUBADDRHI) {
404 outb(VOYAGER_CAT_RUN, CAT_CMD); 405 outb(VOYAGER_CAT_RUN, CAT_CMD);
405 outb(VOYAGER_CAT_END, CAT_CMD); 406 outb(VOYAGER_CAT_END, CAT_CMD);
406 outb(VOYAGER_CAT_RUN, CAT_CMD); 407 outb(VOYAGER_CAT_RUN, CAT_CMD);
407 } 408 }
408 409
409 return 0; 410 return 0;
410 } 411 } else {
411 else {
412 __u16 hbytes = asicp->ireg_length / BITS_PER_BYTE; 412 __u16 hbytes = asicp->ireg_length / BITS_PER_BYTE;
413 __u16 dbytes = (modp->num_asics - 1 + asicp->ireg_length)/BITS_PER_BYTE; 413 __u16 dbytes =
414 __u8 padbits, dseq[VOYAGER_MAX_SCAN_PATH], 414 (modp->num_asics - 1 + asicp->ireg_length) / BITS_PER_BYTE;
415 hseq[VOYAGER_MAX_REG_SIZE]; 415 __u8 padbits, dseq[VOYAGER_MAX_SCAN_PATH],
416 hseq[VOYAGER_MAX_REG_SIZE];
416 int i; 417 int i;
417 418
418 if((padbits = (modp->num_asics - 1 419 if ((padbits = (modp->num_asics - 1
419 + asicp->ireg_length) % BITS_PER_BYTE) != 0) { 420 + asicp->ireg_length) % BITS_PER_BYTE) != 0) {
420 padbits = BITS_PER_BYTE - padbits; 421 padbits = BITS_PER_BYTE - padbits;
421 dbytes++; 422 dbytes++;
422 } 423 }
423 if(asicp->ireg_length % BITS_PER_BYTE) 424 if (asicp->ireg_length % BITS_PER_BYTE)
424 hbytes++; 425 hbytes++;
425 426
426 cat_build_header(hseq, hbytes, 1, asicp->ireg_length); 427 cat_build_header(hseq, hbytes, 1, asicp->ireg_length);
427 428
428 for(i = 0; i < dbytes + hbytes; i++) 429 for (i = 0; i < dbytes + hbytes; i++)
429 dseq[i] = 0xff; 430 dseq[i] = 0xff;
430 CDEBUG(("cat_senddata: dbytes=%d, hbytes=%d, padbits=%d\n", 431 CDEBUG(("cat_senddata: dbytes=%d, hbytes=%d, padbits=%d\n",
431 dbytes, hbytes, padbits)); 432 dbytes, hbytes, padbits));
432 cat_pack(dseq, modp->num_asics - 1 + asicp->ireg_length, 433 cat_pack(dseq, modp->num_asics - 1 + asicp->ireg_length,
433 hseq, hbytes * BITS_PER_BYTE); 434 hseq, hbytes * BITS_PER_BYTE);
434 cat_pack(dseq, asicp->asic_location, &value, 435 cat_pack(dseq, asicp->asic_location, &value,
435 asicp->ireg_length); 436 asicp->ireg_length);
436#ifdef VOYAGER_CAT_DEBUG 437#ifdef VOYAGER_CAT_DEBUG
437 printk("dseq "); 438 printk("dseq ");
438 for(i=0; i<hbytes+dbytes; i++) { 439 for (i = 0; i < hbytes + dbytes; i++) {
439 printk("0x%x ", dseq[i]); 440 printk("0x%x ", dseq[i]);
440 } 441 }
441 printk("\n"); 442 printk("\n");
@@ -445,121 +446,125 @@ cat_senddata(voyager_module_t *modp, voyager_asic_t *asicp,
445} 446}
446 447
447static int 448static int
448cat_write(voyager_module_t *modp, voyager_asic_t *asicp, __u8 reg, 449cat_write(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg, __u8 value)
449 __u8 value)
450{ 450{
451 if(cat_sendinst(modp, asicp, reg, VOYAGER_WRITE_CONFIG)) 451 if (cat_sendinst(modp, asicp, reg, VOYAGER_WRITE_CONFIG))
452 return 1; 452 return 1;
453 return cat_senddata(modp, asicp, reg, value); 453 return cat_senddata(modp, asicp, reg, value);
454} 454}
455 455
456static int 456static int
457cat_read(voyager_module_t *modp, voyager_asic_t *asicp, __u8 reg, 457cat_read(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg,
458 __u8 *value) 458 __u8 * value)
459{ 459{
460 if(cat_sendinst(modp, asicp, reg, VOYAGER_READ_CONFIG)) 460 if (cat_sendinst(modp, asicp, reg, VOYAGER_READ_CONFIG))
461 return 1; 461 return 1;
462 return cat_getdata(modp, asicp, reg, value); 462 return cat_getdata(modp, asicp, reg, value);
463} 463}
464 464
465static int 465static int
466cat_subaddrsetup(voyager_module_t *modp, voyager_asic_t *asicp, __u16 offset, 466cat_subaddrsetup(voyager_module_t * modp, voyager_asic_t * asicp, __u16 offset,
467 __u16 len) 467 __u16 len)
468{ 468{
469 __u8 val; 469 __u8 val;
470 470
471 if(len > 1) { 471 if (len > 1) {
472 /* set auto increment */ 472 /* set auto increment */
473 __u8 newval; 473 __u8 newval;
474 474
475 if(cat_read(modp, asicp, VOYAGER_AUTO_INC_REG, &val)) { 475 if (cat_read(modp, asicp, VOYAGER_AUTO_INC_REG, &val)) {
476 CDEBUG(("cat_subaddrsetup: read of VOYAGER_AUTO_INC_REG failed\n")); 476 CDEBUG(("cat_subaddrsetup: read of VOYAGER_AUTO_INC_REG failed\n"));
477 return 1; 477 return 1;
478 } 478 }
479 CDEBUG(("cat_subaddrsetup: VOYAGER_AUTO_INC_REG = 0x%x\n", val)); 479 CDEBUG(("cat_subaddrsetup: VOYAGER_AUTO_INC_REG = 0x%x\n",
480 val));
480 newval = val | VOYAGER_AUTO_INC; 481 newval = val | VOYAGER_AUTO_INC;
481 if(newval != val) { 482 if (newval != val) {
482 if(cat_write(modp, asicp, VOYAGER_AUTO_INC_REG, val)) { 483 if (cat_write(modp, asicp, VOYAGER_AUTO_INC_REG, val)) {
483 CDEBUG(("cat_subaddrsetup: write to VOYAGER_AUTO_INC_REG failed\n")); 484 CDEBUG(("cat_subaddrsetup: write to VOYAGER_AUTO_INC_REG failed\n"));
484 return 1; 485 return 1;
485 } 486 }
486 } 487 }
487 } 488 }
488 if(cat_write(modp, asicp, VOYAGER_SUBADDRLO, (__u8)(offset &0xff))) { 489 if (cat_write(modp, asicp, VOYAGER_SUBADDRLO, (__u8) (offset & 0xff))) {
489 CDEBUG(("cat_subaddrsetup: write to SUBADDRLO failed\n")); 490 CDEBUG(("cat_subaddrsetup: write to SUBADDRLO failed\n"));
490 return 1; 491 return 1;
491 } 492 }
492 if(asicp->subaddr > VOYAGER_SUBADDR_LO) { 493 if (asicp->subaddr > VOYAGER_SUBADDR_LO) {
493 if(cat_write(modp, asicp, VOYAGER_SUBADDRHI, (__u8)(offset >> 8))) { 494 if (cat_write
495 (modp, asicp, VOYAGER_SUBADDRHI, (__u8) (offset >> 8))) {
494 CDEBUG(("cat_subaddrsetup: write to SUBADDRHI failed\n")); 496 CDEBUG(("cat_subaddrsetup: write to SUBADDRHI failed\n"));
495 return 1; 497 return 1;
496 } 498 }
497 cat_read(modp, asicp, VOYAGER_SUBADDRHI, &val); 499 cat_read(modp, asicp, VOYAGER_SUBADDRHI, &val);
498 CDEBUG(("cat_subaddrsetup: offset = %d, hi = %d\n", offset, val)); 500 CDEBUG(("cat_subaddrsetup: offset = %d, hi = %d\n", offset,
501 val));
499 } 502 }
500 cat_read(modp, asicp, VOYAGER_SUBADDRLO, &val); 503 cat_read(modp, asicp, VOYAGER_SUBADDRLO, &val);
501 CDEBUG(("cat_subaddrsetup: offset = %d, lo = %d\n", offset, val)); 504 CDEBUG(("cat_subaddrsetup: offset = %d, lo = %d\n", offset, val));
502 return 0; 505 return 0;
503} 506}
504 507
505static int 508static int
506cat_subwrite(voyager_module_t *modp, voyager_asic_t *asicp, __u16 offset, 509cat_subwrite(voyager_module_t * modp, voyager_asic_t * asicp, __u16 offset,
507 __u16 len, void *buf) 510 __u16 len, void *buf)
508{ 511{
509 int i, retval; 512 int i, retval;
510 513
511 /* FIXME: need special actions for VOYAGER_CAT_ID here */ 514 /* FIXME: need special actions for VOYAGER_CAT_ID here */
512 if(asicp->asic_id == VOYAGER_CAT_ID) { 515 if (asicp->asic_id == VOYAGER_CAT_ID) {
513 CDEBUG(("cat_subwrite: ATTEMPT TO WRITE TO CAT ASIC\n")); 516 CDEBUG(("cat_subwrite: ATTEMPT TO WRITE TO CAT ASIC\n"));
514 /* FIXME -- This is supposed to be handled better 517 /* FIXME -- This is supposed to be handled better
515 * There is a problem writing to the cat asic in the 518 * There is a problem writing to the cat asic in the
516 * PSI. The 30us delay seems to work, though */ 519 * PSI. The 30us delay seems to work, though */
517 udelay(30); 520 udelay(30);
518 } 521 }
519 522
520 if((retval = cat_subaddrsetup(modp, asicp, offset, len)) != 0) { 523 if ((retval = cat_subaddrsetup(modp, asicp, offset, len)) != 0) {
521 printk("cat_subwrite: cat_subaddrsetup FAILED\n"); 524 printk("cat_subwrite: cat_subaddrsetup FAILED\n");
522 return retval; 525 return retval;
523 } 526 }
524 527
525 if(cat_sendinst(modp, asicp, VOYAGER_SUBADDRDATA, VOYAGER_WRITE_CONFIG)) { 528 if (cat_sendinst
529 (modp, asicp, VOYAGER_SUBADDRDATA, VOYAGER_WRITE_CONFIG)) {
526 printk("cat_subwrite: cat_sendinst FAILED\n"); 530 printk("cat_subwrite: cat_sendinst FAILED\n");
527 return 1; 531 return 1;
528 } 532 }
529 for(i = 0; i < len; i++) { 533 for (i = 0; i < len; i++) {
530 if(cat_senddata(modp, asicp, 0xFF, ((__u8 *)buf)[i])) { 534 if (cat_senddata(modp, asicp, 0xFF, ((__u8 *) buf)[i])) {
531 printk("cat_subwrite: cat_sendata element at %d FAILED\n", i); 535 printk
536 ("cat_subwrite: cat_sendata element at %d FAILED\n",
537 i);
532 return 1; 538 return 1;
533 } 539 }
534 } 540 }
535 return 0; 541 return 0;
536} 542}
537static int 543static int
538cat_subread(voyager_module_t *modp, voyager_asic_t *asicp, __u16 offset, 544cat_subread(voyager_module_t * modp, voyager_asic_t * asicp, __u16 offset,
539 __u16 len, void *buf) 545 __u16 len, void *buf)
540{ 546{
541 int i, retval; 547 int i, retval;
542 548
543 if((retval = cat_subaddrsetup(modp, asicp, offset, len)) != 0) { 549 if ((retval = cat_subaddrsetup(modp, asicp, offset, len)) != 0) {
544 CDEBUG(("cat_subread: cat_subaddrsetup FAILED\n")); 550 CDEBUG(("cat_subread: cat_subaddrsetup FAILED\n"));
545 return retval; 551 return retval;
546 } 552 }
547 553
548 if(cat_sendinst(modp, asicp, VOYAGER_SUBADDRDATA, VOYAGER_READ_CONFIG)) { 554 if (cat_sendinst(modp, asicp, VOYAGER_SUBADDRDATA, VOYAGER_READ_CONFIG)) {
549 CDEBUG(("cat_subread: cat_sendinst failed\n")); 555 CDEBUG(("cat_subread: cat_sendinst failed\n"));
550 return 1; 556 return 1;
551 } 557 }
552 for(i = 0; i < len; i++) { 558 for (i = 0; i < len; i++) {
553 if(cat_getdata(modp, asicp, 0xFF, 559 if (cat_getdata(modp, asicp, 0xFF, &((__u8 *) buf)[i])) {
554 &((__u8 *)buf)[i])) { 560 CDEBUG(("cat_subread: cat_getdata element %d failed\n",
555 CDEBUG(("cat_subread: cat_getdata element %d failed\n", i)); 561 i));
556 return 1; 562 return 1;
557 } 563 }
558 } 564 }
559 return 0; 565 return 0;
560} 566}
561 567
562
563/* buffer for storing EPROM data read in during initialisation */ 568/* buffer for storing EPROM data read in during initialisation */
564static __initdata __u8 eprom_buf[0xFFFF]; 569static __initdata __u8 eprom_buf[0xFFFF];
565static voyager_module_t *voyager_initial_module; 570static voyager_module_t *voyager_initial_module;
@@ -568,8 +573,7 @@ static voyager_module_t *voyager_initial_module;
568 * boot cpu *after* all memory initialisation has been done (so we can 573 * boot cpu *after* all memory initialisation has been done (so we can
569 * use kmalloc) but before smp initialisation, so we can probe the SMP 574 * use kmalloc) but before smp initialisation, so we can probe the SMP
570 * configuration and pick up necessary information. */ 575 * configuration and pick up necessary information. */
571void __init 576void __init voyager_cat_init(void)
572voyager_cat_init(void)
573{ 577{
574 voyager_module_t **modpp = &voyager_initial_module; 578 voyager_module_t **modpp = &voyager_initial_module;
575 voyager_asic_t **asicpp; 579 voyager_asic_t **asicpp;
@@ -578,27 +582,29 @@ voyager_cat_init(void)
578 unsigned long qic_addr = 0; 582 unsigned long qic_addr = 0;
579 __u8 qabc_data[0x20]; 583 __u8 qabc_data[0x20];
580 __u8 num_submodules, val; 584 __u8 num_submodules, val;
581 voyager_eprom_hdr_t *eprom_hdr = (voyager_eprom_hdr_t *)&eprom_buf[0]; 585 voyager_eprom_hdr_t *eprom_hdr = (voyager_eprom_hdr_t *) & eprom_buf[0];
582 586
583 __u8 cmos[4]; 587 __u8 cmos[4];
584 unsigned long addr; 588 unsigned long addr;
585 589
586 /* initiallise the SUS mailbox */ 590 /* initiallise the SUS mailbox */
587 for(i=0; i<sizeof(cmos); i++) 591 for (i = 0; i < sizeof(cmos); i++)
588 cmos[i] = voyager_extended_cmos_read(VOYAGER_DUMP_LOCATION + i); 592 cmos[i] = voyager_extended_cmos_read(VOYAGER_DUMP_LOCATION + i);
589 addr = *(unsigned long *)cmos; 593 addr = *(unsigned long *)cmos;
590 if((addr & 0xff000000) != 0xff000000) { 594 if ((addr & 0xff000000) != 0xff000000) {
591 printk(KERN_ERR "Voyager failed to get SUS mailbox (addr = 0x%lx\n", addr); 595 printk(KERN_ERR
596 "Voyager failed to get SUS mailbox (addr = 0x%lx\n",
597 addr);
592 } else { 598 } else {
593 static struct resource res; 599 static struct resource res;
594 600
595 res.name = "voyager SUS"; 601 res.name = "voyager SUS";
596 res.start = addr; 602 res.start = addr;
597 res.end = addr+0x3ff; 603 res.end = addr + 0x3ff;
598 604
599 request_resource(&iomem_resource, &res); 605 request_resource(&iomem_resource, &res);
600 voyager_SUS = (struct voyager_SUS *) 606 voyager_SUS = (struct voyager_SUS *)
601 ioremap(addr, 0x400); 607 ioremap(addr, 0x400);
602 printk(KERN_NOTICE "Voyager SUS mailbox version 0x%x\n", 608 printk(KERN_NOTICE "Voyager SUS mailbox version 0x%x\n",
603 voyager_SUS->SUS_version); 609 voyager_SUS->SUS_version);
604 voyager_SUS->kernel_version = VOYAGER_MAILBOX_VERSION; 610 voyager_SUS->kernel_version = VOYAGER_MAILBOX_VERSION;
@@ -609,8 +615,6 @@ voyager_cat_init(void)
609 voyager_extended_vic_processors = 0; 615 voyager_extended_vic_processors = 0;
610 voyager_quad_processors = 0; 616 voyager_quad_processors = 0;
611 617
612
613
614 printk("VOYAGER: beginning CAT bus probe\n"); 618 printk("VOYAGER: beginning CAT bus probe\n");
615 /* set up the SuperSet Port Block which tells us where the 619 /* set up the SuperSet Port Block which tells us where the
616 * CAT communication port is */ 620 * CAT communication port is */
@@ -618,14 +622,14 @@ voyager_cat_init(void)
618 VDEBUG(("VOYAGER DEBUG: sspb = 0x%x\n", sspb)); 622 VDEBUG(("VOYAGER DEBUG: sspb = 0x%x\n", sspb));
619 623
620 /* now find out if were 8 slot or normal */ 624 /* now find out if were 8 slot or normal */
621 if((inb(VIC_PROC_WHO_AM_I) & EIGHT_SLOT_IDENTIFIER) 625 if ((inb(VIC_PROC_WHO_AM_I) & EIGHT_SLOT_IDENTIFIER)
622 == EIGHT_SLOT_IDENTIFIER) { 626 == EIGHT_SLOT_IDENTIFIER) {
623 voyager_8slot = 1; 627 voyager_8slot = 1;
624 printk(KERN_NOTICE "Voyager: Eight slot 51xx configuration detected\n"); 628 printk(KERN_NOTICE
629 "Voyager: Eight slot 51xx configuration detected\n");
625 } 630 }
626 631
627 for(i = VOYAGER_MIN_MODULE; 632 for (i = VOYAGER_MIN_MODULE; i <= VOYAGER_MAX_MODULE; i++) {
628 i <= VOYAGER_MAX_MODULE; i++) {
629 __u8 input; 633 __u8 input;
630 int asic; 634 int asic;
631 __u16 eprom_size; 635 __u16 eprom_size;
@@ -643,21 +647,21 @@ voyager_cat_init(void)
643 outb(0xAA, CAT_DATA); 647 outb(0xAA, CAT_DATA);
644 input = inb(CAT_DATA); 648 input = inb(CAT_DATA);
645 outb(VOYAGER_CAT_END, CAT_CMD); 649 outb(VOYAGER_CAT_END, CAT_CMD);
646 if(input != VOYAGER_CAT_HEADER) { 650 if (input != VOYAGER_CAT_HEADER) {
647 continue; 651 continue;
648 } 652 }
649 CDEBUG(("VOYAGER DEBUG: found module id 0x%x, %s\n", i, 653 CDEBUG(("VOYAGER DEBUG: found module id 0x%x, %s\n", i,
650 cat_module_name(i))); 654 cat_module_name(i)));
651 *modpp = kmalloc(sizeof(voyager_module_t), GFP_KERNEL); /*&voyager_module_storage[cat_count++];*/ 655 *modpp = kmalloc(sizeof(voyager_module_t), GFP_KERNEL); /*&voyager_module_storage[cat_count++]; */
652 if(*modpp == NULL) { 656 if (*modpp == NULL) {
653 printk("**WARNING** kmalloc failure in cat_init\n"); 657 printk("**WARNING** kmalloc failure in cat_init\n");
654 continue; 658 continue;
655 } 659 }
656 memset(*modpp, 0, sizeof(voyager_module_t)); 660 memset(*modpp, 0, sizeof(voyager_module_t));
657 /* need temporary asic for cat_subread. It will be 661 /* need temporary asic for cat_subread. It will be
658 * filled in correctly later */ 662 * filled in correctly later */
659 (*modpp)->asic = kmalloc(sizeof(voyager_asic_t), GFP_KERNEL); /*&voyager_asic_storage[asic_count];*/ 663 (*modpp)->asic = kmalloc(sizeof(voyager_asic_t), GFP_KERNEL); /*&voyager_asic_storage[asic_count]; */
660 if((*modpp)->asic == NULL) { 664 if ((*modpp)->asic == NULL) {
661 printk("**WARNING** kmalloc failure in cat_init\n"); 665 printk("**WARNING** kmalloc failure in cat_init\n");
662 continue; 666 continue;
663 } 667 }
@@ -666,47 +670,52 @@ voyager_cat_init(void)
666 (*modpp)->asic->subaddr = VOYAGER_SUBADDR_HI; 670 (*modpp)->asic->subaddr = VOYAGER_SUBADDR_HI;
667 (*modpp)->module_addr = i; 671 (*modpp)->module_addr = i;
668 (*modpp)->scan_path_connected = 0; 672 (*modpp)->scan_path_connected = 0;
669 if(i == VOYAGER_PSI) { 673 if (i == VOYAGER_PSI) {
670 /* Exception leg for modules with no EEPROM */ 674 /* Exception leg for modules with no EEPROM */
671 printk("Module \"%s\"\n", cat_module_name(i)); 675 printk("Module \"%s\"\n", cat_module_name(i));
672 continue; 676 continue;
673 } 677 }
674 678
675 CDEBUG(("cat_init: Reading eeprom for module 0x%x at offset %d\n", i, VOYAGER_XSUM_END_OFFSET)); 679 CDEBUG(("cat_init: Reading eeprom for module 0x%x at offset %d\n", i, VOYAGER_XSUM_END_OFFSET));
676 outb(VOYAGER_CAT_RUN, CAT_CMD); 680 outb(VOYAGER_CAT_RUN, CAT_CMD);
677 cat_disconnect(*modpp, (*modpp)->asic); 681 cat_disconnect(*modpp, (*modpp)->asic);
678 if(cat_subread(*modpp, (*modpp)->asic, 682 if (cat_subread(*modpp, (*modpp)->asic,
679 VOYAGER_XSUM_END_OFFSET, sizeof(eprom_size), 683 VOYAGER_XSUM_END_OFFSET, sizeof(eprom_size),
680 &eprom_size)) { 684 &eprom_size)) {
681 printk("**WARNING**: Voyager couldn't read EPROM size for module 0x%x\n", i); 685 printk
686 ("**WARNING**: Voyager couldn't read EPROM size for module 0x%x\n",
687 i);
682 outb(VOYAGER_CAT_END, CAT_CMD); 688 outb(VOYAGER_CAT_END, CAT_CMD);
683 continue; 689 continue;
684 } 690 }
685 if(eprom_size > sizeof(eprom_buf)) { 691 if (eprom_size > sizeof(eprom_buf)) {
686 printk("**WARNING**: Voyager insufficient size to read EPROM data, module 0x%x. Need %d\n", i, eprom_size); 692 printk
693 ("**WARNING**: Voyager insufficient size to read EPROM data, module 0x%x. Need %d\n",
694 i, eprom_size);
687 outb(VOYAGER_CAT_END, CAT_CMD); 695 outb(VOYAGER_CAT_END, CAT_CMD);
688 continue; 696 continue;
689 } 697 }
690 outb(VOYAGER_CAT_END, CAT_CMD); 698 outb(VOYAGER_CAT_END, CAT_CMD);
691 outb(VOYAGER_CAT_RUN, CAT_CMD); 699 outb(VOYAGER_CAT_RUN, CAT_CMD);
692 CDEBUG(("cat_init: module 0x%x, eeprom_size %d\n", i, eprom_size)); 700 CDEBUG(("cat_init: module 0x%x, eeprom_size %d\n", i,
693 if(cat_subread(*modpp, (*modpp)->asic, 0, 701 eprom_size));
694 eprom_size, eprom_buf)) { 702 if (cat_subread
703 (*modpp, (*modpp)->asic, 0, eprom_size, eprom_buf)) {
695 outb(VOYAGER_CAT_END, CAT_CMD); 704 outb(VOYAGER_CAT_END, CAT_CMD);
696 continue; 705 continue;
697 } 706 }
698 outb(VOYAGER_CAT_END, CAT_CMD); 707 outb(VOYAGER_CAT_END, CAT_CMD);
699 printk("Module \"%s\", version 0x%x, tracer 0x%x, asics %d\n", 708 printk("Module \"%s\", version 0x%x, tracer 0x%x, asics %d\n",
700 cat_module_name(i), eprom_hdr->version_id, 709 cat_module_name(i), eprom_hdr->version_id,
701 *((__u32 *)eprom_hdr->tracer), eprom_hdr->num_asics); 710 *((__u32 *) eprom_hdr->tracer), eprom_hdr->num_asics);
702 (*modpp)->ee_size = eprom_hdr->ee_size; 711 (*modpp)->ee_size = eprom_hdr->ee_size;
703 (*modpp)->num_asics = eprom_hdr->num_asics; 712 (*modpp)->num_asics = eprom_hdr->num_asics;
704 asicpp = &((*modpp)->asic); 713 asicpp = &((*modpp)->asic);
705 sp_offset = eprom_hdr->scan_path_offset; 714 sp_offset = eprom_hdr->scan_path_offset;
706 /* All we really care about are the Quad cards. We 715 /* All we really care about are the Quad cards. We
707 * identify them because they are in a processor slot 716 * identify them because they are in a processor slot
708 * and have only four asics */ 717 * and have only four asics */
709 if((i < 0x10 || (i>=0x14 && i < 0x1c) || i>0x1f)) { 718 if ((i < 0x10 || (i >= 0x14 && i < 0x1c) || i > 0x1f)) {
710 modpp = &((*modpp)->next); 719 modpp = &((*modpp)->next);
711 continue; 720 continue;
712 } 721 }
@@ -717,16 +726,17 @@ voyager_cat_init(void)
717 &num_submodules); 726 &num_submodules);
718 /* lowest two bits, active low */ 727 /* lowest two bits, active low */
719 num_submodules = ~(0xfc | num_submodules); 728 num_submodules = ~(0xfc | num_submodules);
720 CDEBUG(("VOYAGER CAT: %d submodules present\n", num_submodules)); 729 CDEBUG(("VOYAGER CAT: %d submodules present\n",
721 if(num_submodules == 0) { 730 num_submodules));
731 if (num_submodules == 0) {
722 /* fill in the dyadic extended processors */ 732 /* fill in the dyadic extended processors */
723 __u8 cpu = i & 0x07; 733 __u8 cpu = i & 0x07;
724 734
725 printk("Module \"%s\": Dyadic Processor Card\n", 735 printk("Module \"%s\": Dyadic Processor Card\n",
726 cat_module_name(i)); 736 cat_module_name(i));
727 voyager_extended_vic_processors |= (1<<cpu); 737 voyager_extended_vic_processors |= (1 << cpu);
728 cpu += 4; 738 cpu += 4;
729 voyager_extended_vic_processors |= (1<<cpu); 739 voyager_extended_vic_processors |= (1 << cpu);
730 outb(VOYAGER_CAT_END, CAT_CMD); 740 outb(VOYAGER_CAT_END, CAT_CMD);
731 continue; 741 continue;
732 } 742 }
@@ -740,28 +750,32 @@ voyager_cat_init(void)
740 cat_write(*modpp, (*modpp)->asic, VOYAGER_SUBMODSELECT, val); 750 cat_write(*modpp, (*modpp)->asic, VOYAGER_SUBMODSELECT, val);
741 751
742 outb(VOYAGER_CAT_END, CAT_CMD); 752 outb(VOYAGER_CAT_END, CAT_CMD);
743
744 753
745 CDEBUG(("cat_init: Reading eeprom for module 0x%x at offset %d\n", i, VOYAGER_XSUM_END_OFFSET)); 754 CDEBUG(("cat_init: Reading eeprom for module 0x%x at offset %d\n", i, VOYAGER_XSUM_END_OFFSET));
746 outb(VOYAGER_CAT_RUN, CAT_CMD); 755 outb(VOYAGER_CAT_RUN, CAT_CMD);
747 cat_disconnect(*modpp, (*modpp)->asic); 756 cat_disconnect(*modpp, (*modpp)->asic);
748 if(cat_subread(*modpp, (*modpp)->asic, 757 if (cat_subread(*modpp, (*modpp)->asic,
749 VOYAGER_XSUM_END_OFFSET, sizeof(eprom_size), 758 VOYAGER_XSUM_END_OFFSET, sizeof(eprom_size),
750 &eprom_size)) { 759 &eprom_size)) {
751 printk("**WARNING**: Voyager couldn't read EPROM size for module 0x%x\n", i); 760 printk
761 ("**WARNING**: Voyager couldn't read EPROM size for module 0x%x\n",
762 i);
752 outb(VOYAGER_CAT_END, CAT_CMD); 763 outb(VOYAGER_CAT_END, CAT_CMD);
753 continue; 764 continue;
754 } 765 }
755 if(eprom_size > sizeof(eprom_buf)) { 766 if (eprom_size > sizeof(eprom_buf)) {
756 printk("**WARNING**: Voyager insufficient size to read EPROM data, module 0x%x. Need %d\n", i, eprom_size); 767 printk
768 ("**WARNING**: Voyager insufficient size to read EPROM data, module 0x%x. Need %d\n",
769 i, eprom_size);
757 outb(VOYAGER_CAT_END, CAT_CMD); 770 outb(VOYAGER_CAT_END, CAT_CMD);
758 continue; 771 continue;
759 } 772 }
760 outb(VOYAGER_CAT_END, CAT_CMD); 773 outb(VOYAGER_CAT_END, CAT_CMD);
761 outb(VOYAGER_CAT_RUN, CAT_CMD); 774 outb(VOYAGER_CAT_RUN, CAT_CMD);
762 CDEBUG(("cat_init: module 0x%x, eeprom_size %d\n", i, eprom_size)); 775 CDEBUG(("cat_init: module 0x%x, eeprom_size %d\n", i,
763 if(cat_subread(*modpp, (*modpp)->asic, 0, 776 eprom_size));
764 eprom_size, eprom_buf)) { 777 if (cat_subread
778 (*modpp, (*modpp)->asic, 0, eprom_size, eprom_buf)) {
765 outb(VOYAGER_CAT_END, CAT_CMD); 779 outb(VOYAGER_CAT_END, CAT_CMD);
766 continue; 780 continue;
767 } 781 }
@@ -773,30 +787,35 @@ voyager_cat_init(void)
773 sp_offset = eprom_hdr->scan_path_offset; 787 sp_offset = eprom_hdr->scan_path_offset;
774 /* get rid of the dummy CAT asic and read the real one */ 788 /* get rid of the dummy CAT asic and read the real one */
775 kfree((*modpp)->asic); 789 kfree((*modpp)->asic);
776 for(asic=0; asic < (*modpp)->num_asics; asic++) { 790 for (asic = 0; asic < (*modpp)->num_asics; asic++) {
777 int j; 791 int j;
778 voyager_asic_t *asicp = *asicpp 792 voyager_asic_t *asicp = *asicpp = kzalloc(sizeof(voyager_asic_t), GFP_KERNEL); /*&voyager_asic_storage[asic_count++]; */
779 = kzalloc(sizeof(voyager_asic_t), GFP_KERNEL); /*&voyager_asic_storage[asic_count++];*/
780 voyager_sp_table_t *sp_table; 793 voyager_sp_table_t *sp_table;
781 voyager_at_t *asic_table; 794 voyager_at_t *asic_table;
782 voyager_jtt_t *jtag_table; 795 voyager_jtt_t *jtag_table;
783 796
784 if(asicp == NULL) { 797 if (asicp == NULL) {
785 printk("**WARNING** kmalloc failure in cat_init\n"); 798 printk
799 ("**WARNING** kmalloc failure in cat_init\n");
786 continue; 800 continue;
787 } 801 }
788 asicpp = &(asicp->next); 802 asicpp = &(asicp->next);
789 asicp->asic_location = asic; 803 asicp->asic_location = asic;
790 sp_table = (voyager_sp_table_t *)(eprom_buf + sp_offset); 804 sp_table =
805 (voyager_sp_table_t *) (eprom_buf + sp_offset);
791 asicp->asic_id = sp_table->asic_id; 806 asicp->asic_id = sp_table->asic_id;
792 asic_table = (voyager_at_t *)(eprom_buf + sp_table->asic_data_offset); 807 asic_table =
793 for(j=0; j<4; j++) 808 (voyager_at_t *) (eprom_buf +
809 sp_table->asic_data_offset);
810 for (j = 0; j < 4; j++)
794 asicp->jtag_id[j] = asic_table->jtag_id[j]; 811 asicp->jtag_id[j] = asic_table->jtag_id[j];
795 jtag_table = (voyager_jtt_t *)(eprom_buf + asic_table->jtag_offset); 812 jtag_table =
813 (voyager_jtt_t *) (eprom_buf +
814 asic_table->jtag_offset);
796 asicp->ireg_length = jtag_table->ireg_len; 815 asicp->ireg_length = jtag_table->ireg_len;
797 asicp->bit_location = (*modpp)->inst_bits; 816 asicp->bit_location = (*modpp)->inst_bits;
798 (*modpp)->inst_bits += asicp->ireg_length; 817 (*modpp)->inst_bits += asicp->ireg_length;
799 if(asicp->ireg_length > (*modpp)->largest_reg) 818 if (asicp->ireg_length > (*modpp)->largest_reg)
800 (*modpp)->largest_reg = asicp->ireg_length; 819 (*modpp)->largest_reg = asicp->ireg_length;
801 if (asicp->ireg_length < (*modpp)->smallest_reg || 820 if (asicp->ireg_length < (*modpp)->smallest_reg ||
802 (*modpp)->smallest_reg == 0) 821 (*modpp)->smallest_reg == 0)
@@ -804,15 +823,13 @@ voyager_cat_init(void)
804 CDEBUG(("asic 0x%x, ireg_length=%d, bit_location=%d\n", 823 CDEBUG(("asic 0x%x, ireg_length=%d, bit_location=%d\n",
805 asicp->asic_id, asicp->ireg_length, 824 asicp->asic_id, asicp->ireg_length,
806 asicp->bit_location)); 825 asicp->bit_location));
807 if(asicp->asic_id == VOYAGER_QUAD_QABC) { 826 if (asicp->asic_id == VOYAGER_QUAD_QABC) {
808 CDEBUG(("VOYAGER CAT: QABC ASIC found\n")); 827 CDEBUG(("VOYAGER CAT: QABC ASIC found\n"));
809 qabc_asic = asicp; 828 qabc_asic = asicp;
810 } 829 }
811 sp_offset += sizeof(voyager_sp_table_t); 830 sp_offset += sizeof(voyager_sp_table_t);
812 } 831 }
813 CDEBUG(("Module inst_bits = %d, largest_reg = %d, smallest_reg=%d\n", 832 CDEBUG(("Module inst_bits = %d, largest_reg = %d, smallest_reg=%d\n", (*modpp)->inst_bits, (*modpp)->largest_reg, (*modpp)->smallest_reg));
814 (*modpp)->inst_bits, (*modpp)->largest_reg,
815 (*modpp)->smallest_reg));
816 /* OK, now we have the QUAD ASICs set up, use them. 833 /* OK, now we have the QUAD ASICs set up, use them.
817 * we need to: 834 * we need to:
818 * 835 *
@@ -828,10 +845,11 @@ voyager_cat_init(void)
828 qic_addr = qabc_data[5] << 8; 845 qic_addr = qabc_data[5] << 8;
829 qic_addr = (qic_addr | qabc_data[6]) << 8; 846 qic_addr = (qic_addr | qabc_data[6]) << 8;
830 qic_addr = (qic_addr | qabc_data[7]) << 8; 847 qic_addr = (qic_addr | qabc_data[7]) << 8;
831 printk("Module \"%s\": Quad Processor Card; CPI 0x%lx, SET=0x%x\n", 848 printk
832 cat_module_name(i), qic_addr, qabc_data[8]); 849 ("Module \"%s\": Quad Processor Card; CPI 0x%lx, SET=0x%x\n",
850 cat_module_name(i), qic_addr, qabc_data[8]);
833#if 0 /* plumbing fails---FIXME */ 851#if 0 /* plumbing fails---FIXME */
834 if((qabc_data[8] & 0xf0) == 0) { 852 if ((qabc_data[8] & 0xf0) == 0) {
835 /* FIXME: 32 way 8 CPU slot monster cannot be 853 /* FIXME: 32 way 8 CPU slot monster cannot be
836 * plumbed this way---need to check for it */ 854 * plumbed this way---need to check for it */
837 855
@@ -842,94 +860,97 @@ voyager_cat_init(void)
842#ifdef VOYAGER_CAT_DEBUG 860#ifdef VOYAGER_CAT_DEBUG
843 /* verify plumbing */ 861 /* verify plumbing */
844 cat_subread(*modpp, qabc_asic, 8, 1, &qabc_data[8]); 862 cat_subread(*modpp, qabc_asic, 8, 1, &qabc_data[8]);
845 if((qabc_data[8] & 0xf0) == 0) { 863 if ((qabc_data[8] & 0xf0) == 0) {
846 CDEBUG(("PLUMBING FAILED: 0x%x\n", qabc_data[8])); 864 CDEBUG(("PLUMBING FAILED: 0x%x\n",
865 qabc_data[8]));
847 } 866 }
848#endif 867#endif
849 } 868 }
850#endif 869#endif
851 870
852 { 871 {
853 struct resource *res = kzalloc(sizeof(struct resource),GFP_KERNEL); 872 struct resource *res =
873 kzalloc(sizeof(struct resource), GFP_KERNEL);
854 res->name = kmalloc(128, GFP_KERNEL); 874 res->name = kmalloc(128, GFP_KERNEL);
855 sprintf((char *)res->name, "Voyager %s Quad CPI", cat_module_name(i)); 875 sprintf((char *)res->name, "Voyager %s Quad CPI",
876 cat_module_name(i));
856 res->start = qic_addr; 877 res->start = qic_addr;
857 res->end = qic_addr + 0x3ff; 878 res->end = qic_addr + 0x3ff;
858 request_resource(&iomem_resource, res); 879 request_resource(&iomem_resource, res);
859 } 880 }
860 881
861 qic_addr = (unsigned long)ioremap(qic_addr, 0x400); 882 qic_addr = (unsigned long)ioremap(qic_addr, 0x400);
862 883
863 for(j = 0; j < 4; j++) { 884 for (j = 0; j < 4; j++) {
864 __u8 cpu; 885 __u8 cpu;
865 886
866 if(voyager_8slot) { 887 if (voyager_8slot) {
867 /* 8 slot has a different mapping, 888 /* 8 slot has a different mapping,
868 * each slot has only one vic line, so 889 * each slot has only one vic line, so
869 * 1 cpu in each slot must be < 8 */ 890 * 1 cpu in each slot must be < 8 */
870 cpu = (i & 0x07) + j*8; 891 cpu = (i & 0x07) + j * 8;
871 } else { 892 } else {
872 cpu = (i & 0x03) + j*4; 893 cpu = (i & 0x03) + j * 4;
873 } 894 }
874 if( (qabc_data[8] & (1<<j))) { 895 if ((qabc_data[8] & (1 << j))) {
875 voyager_extended_vic_processors |= (1<<cpu); 896 voyager_extended_vic_processors |= (1 << cpu);
876 } 897 }
877 if(qabc_data[8] & (1<<(j+4)) ) { 898 if (qabc_data[8] & (1 << (j + 4))) {
878 /* Second SET register plumbed: Quad 899 /* Second SET register plumbed: Quad
879 * card has two VIC connected CPUs. 900 * card has two VIC connected CPUs.
880 * Secondary cannot be booted as a VIC 901 * Secondary cannot be booted as a VIC
881 * CPU */ 902 * CPU */
882 voyager_extended_vic_processors |= (1<<cpu); 903 voyager_extended_vic_processors |= (1 << cpu);
883 voyager_allowed_boot_processors &= (~(1<<cpu)); 904 voyager_allowed_boot_processors &=
905 (~(1 << cpu));
884 } 906 }
885 907
886 voyager_quad_processors |= (1<<cpu); 908 voyager_quad_processors |= (1 << cpu);
887 voyager_quad_cpi_addr[cpu] = (struct voyager_qic_cpi *) 909 voyager_quad_cpi_addr[cpu] = (struct voyager_qic_cpi *)
888 (qic_addr+(j<<8)); 910 (qic_addr + (j << 8));
889 CDEBUG(("CPU%d: CPI address 0x%lx\n", cpu, 911 CDEBUG(("CPU%d: CPI address 0x%lx\n", cpu,
890 (unsigned long)voyager_quad_cpi_addr[cpu])); 912 (unsigned long)voyager_quad_cpi_addr[cpu]));
891 } 913 }
892 outb(VOYAGER_CAT_END, CAT_CMD); 914 outb(VOYAGER_CAT_END, CAT_CMD);
893 915
894
895
896 *asicpp = NULL; 916 *asicpp = NULL;
897 modpp = &((*modpp)->next); 917 modpp = &((*modpp)->next);
898 } 918 }
899 *modpp = NULL; 919 *modpp = NULL;
900 printk("CAT Bus Initialisation finished: extended procs 0x%x, quad procs 0x%x, allowed vic boot = 0x%x\n", voyager_extended_vic_processors, voyager_quad_processors, voyager_allowed_boot_processors); 920 printk
921 ("CAT Bus Initialisation finished: extended procs 0x%x, quad procs 0x%x, allowed vic boot = 0x%x\n",
922 voyager_extended_vic_processors, voyager_quad_processors,
923 voyager_allowed_boot_processors);
901 request_resource(&ioport_resource, &vic_res); 924 request_resource(&ioport_resource, &vic_res);
902 if(voyager_quad_processors) 925 if (voyager_quad_processors)
903 request_resource(&ioport_resource, &qic_res); 926 request_resource(&ioport_resource, &qic_res);
904 /* set up the front power switch */ 927 /* set up the front power switch */
905} 928}
906 929
907int 930int voyager_cat_readb(__u8 module, __u8 asic, int reg)
908voyager_cat_readb(__u8 module, __u8 asic, int reg)
909{ 931{
910 return 0; 932 return 0;
911} 933}
912 934
913static int 935static int cat_disconnect(voyager_module_t * modp, voyager_asic_t * asicp)
914cat_disconnect(voyager_module_t *modp, voyager_asic_t *asicp)
915{ 936{
916 __u8 val; 937 __u8 val;
917 int err = 0; 938 int err = 0;
918 939
919 if(!modp->scan_path_connected) 940 if (!modp->scan_path_connected)
920 return 0; 941 return 0;
921 if(asicp->asic_id != VOYAGER_CAT_ID) { 942 if (asicp->asic_id != VOYAGER_CAT_ID) {
922 CDEBUG(("cat_disconnect: ASIC is not CAT\n")); 943 CDEBUG(("cat_disconnect: ASIC is not CAT\n"));
923 return 1; 944 return 1;
924 } 945 }
925 err = cat_read(modp, asicp, VOYAGER_SCANPATH, &val); 946 err = cat_read(modp, asicp, VOYAGER_SCANPATH, &val);
926 if(err) { 947 if (err) {
927 CDEBUG(("cat_disconnect: failed to read SCANPATH\n")); 948 CDEBUG(("cat_disconnect: failed to read SCANPATH\n"));
928 return err; 949 return err;
929 } 950 }
930 val &= VOYAGER_DISCONNECT_ASIC; 951 val &= VOYAGER_DISCONNECT_ASIC;
931 err = cat_write(modp, asicp, VOYAGER_SCANPATH, val); 952 err = cat_write(modp, asicp, VOYAGER_SCANPATH, val);
932 if(err) { 953 if (err) {
933 CDEBUG(("cat_disconnect: failed to write SCANPATH\n")); 954 CDEBUG(("cat_disconnect: failed to write SCANPATH\n"));
934 return err; 955 return err;
935 } 956 }
@@ -940,27 +961,26 @@ cat_disconnect(voyager_module_t *modp, voyager_asic_t *asicp)
940 return 0; 961 return 0;
941} 962}
942 963
943static int 964static int cat_connect(voyager_module_t * modp, voyager_asic_t * asicp)
944cat_connect(voyager_module_t *modp, voyager_asic_t *asicp)
945{ 965{
946 __u8 val; 966 __u8 val;
947 int err = 0; 967 int err = 0;
948 968
949 if(modp->scan_path_connected) 969 if (modp->scan_path_connected)
950 return 0; 970 return 0;
951 if(asicp->asic_id != VOYAGER_CAT_ID) { 971 if (asicp->asic_id != VOYAGER_CAT_ID) {
952 CDEBUG(("cat_connect: ASIC is not CAT\n")); 972 CDEBUG(("cat_connect: ASIC is not CAT\n"));
953 return 1; 973 return 1;
954 } 974 }
955 975
956 err = cat_read(modp, asicp, VOYAGER_SCANPATH, &val); 976 err = cat_read(modp, asicp, VOYAGER_SCANPATH, &val);
957 if(err) { 977 if (err) {
958 CDEBUG(("cat_connect: failed to read SCANPATH\n")); 978 CDEBUG(("cat_connect: failed to read SCANPATH\n"));
959 return err; 979 return err;
960 } 980 }
961 val |= VOYAGER_CONNECT_ASIC; 981 val |= VOYAGER_CONNECT_ASIC;
962 err = cat_write(modp, asicp, VOYAGER_SCANPATH, val); 982 err = cat_write(modp, asicp, VOYAGER_SCANPATH, val);
963 if(err) { 983 if (err) {
964 CDEBUG(("cat_connect: failed to write SCANPATH\n")); 984 CDEBUG(("cat_connect: failed to write SCANPATH\n"));
965 return err; 985 return err;
966 } 986 }
@@ -971,11 +991,10 @@ cat_connect(voyager_module_t *modp, voyager_asic_t *asicp)
971 return 0; 991 return 0;
972} 992}
973 993
974void 994void voyager_cat_power_off(void)
975voyager_cat_power_off(void)
976{ 995{
977 /* Power the machine off by writing to the PSI over the CAT 996 /* Power the machine off by writing to the PSI over the CAT
978 * bus */ 997 * bus */
979 __u8 data; 998 __u8 data;
980 voyager_module_t psi = { 0 }; 999 voyager_module_t psi = { 0 };
981 voyager_asic_t psi_asic = { 0 }; 1000 voyager_asic_t psi_asic = { 0 };
@@ -1009,8 +1028,7 @@ voyager_cat_power_off(void)
1009 1028
1010struct voyager_status voyager_status = { 0 }; 1029struct voyager_status voyager_status = { 0 };
1011 1030
1012void 1031void voyager_cat_psi(__u8 cmd, __u16 reg, __u8 * data)
1013voyager_cat_psi(__u8 cmd, __u16 reg, __u8 *data)
1014{ 1032{
1015 voyager_module_t psi = { 0 }; 1033 voyager_module_t psi = { 0 };
1016 voyager_asic_t psi_asic = { 0 }; 1034 voyager_asic_t psi_asic = { 0 };
@@ -1027,7 +1045,7 @@ voyager_cat_psi(__u8 cmd, __u16 reg, __u8 *data)
1027 outb(VOYAGER_PSI, VOYAGER_CAT_CONFIG_PORT); 1045 outb(VOYAGER_PSI, VOYAGER_CAT_CONFIG_PORT);
1028 outb(VOYAGER_CAT_RUN, CAT_CMD); 1046 outb(VOYAGER_CAT_RUN, CAT_CMD);
1029 cat_disconnect(&psi, &psi_asic); 1047 cat_disconnect(&psi, &psi_asic);
1030 switch(cmd) { 1048 switch (cmd) {
1031 case VOYAGER_PSI_READ: 1049 case VOYAGER_PSI_READ:
1032 cat_read(&psi, &psi_asic, reg, data); 1050 cat_read(&psi, &psi_asic, reg, data);
1033 break; 1051 break;
@@ -1047,8 +1065,7 @@ voyager_cat_psi(__u8 cmd, __u16 reg, __u8 *data)
1047 outb(VOYAGER_CAT_END, CAT_CMD); 1065 outb(VOYAGER_CAT_END, CAT_CMD);
1048} 1066}
1049 1067
1050void 1068void voyager_cat_do_common_interrupt(void)
1051voyager_cat_do_common_interrupt(void)
1052{ 1069{
1053 /* This is caused either by a memory parity error or something 1070 /* This is caused either by a memory parity error or something
1054 * in the PSI */ 1071 * in the PSI */
@@ -1057,7 +1074,7 @@ voyager_cat_do_common_interrupt(void)
1057 voyager_asic_t psi_asic = { 0 }; 1074 voyager_asic_t psi_asic = { 0 };
1058 struct voyager_psi psi_reg; 1075 struct voyager_psi psi_reg;
1059 int i; 1076 int i;
1060 re_read: 1077 re_read:
1061 psi.asic = &psi_asic; 1078 psi.asic = &psi_asic;
1062 psi.asic->asic_id = VOYAGER_CAT_ID; 1079 psi.asic->asic_id = VOYAGER_CAT_ID;
1063 psi.asic->subaddr = VOYAGER_SUBADDR_HI; 1080 psi.asic->subaddr = VOYAGER_SUBADDR_HI;
@@ -1072,43 +1089,45 @@ voyager_cat_do_common_interrupt(void)
1072 cat_disconnect(&psi, &psi_asic); 1089 cat_disconnect(&psi, &psi_asic);
1073 /* Read the status. NOTE: Need to read *all* the PSI regs here 1090 /* Read the status. NOTE: Need to read *all* the PSI regs here
1074 * otherwise the cmn int will be reasserted */ 1091 * otherwise the cmn int will be reasserted */
1075 for(i = 0; i < sizeof(psi_reg.regs); i++) { 1092 for (i = 0; i < sizeof(psi_reg.regs); i++) {
1076 cat_read(&psi, &psi_asic, i, &((__u8 *)&psi_reg.regs)[i]); 1093 cat_read(&psi, &psi_asic, i, &((__u8 *) & psi_reg.regs)[i]);
1077 } 1094 }
1078 outb(VOYAGER_CAT_END, CAT_CMD); 1095 outb(VOYAGER_CAT_END, CAT_CMD);
1079 if((psi_reg.regs.checkbit & 0x02) == 0) { 1096 if ((psi_reg.regs.checkbit & 0x02) == 0) {
1080 psi_reg.regs.checkbit |= 0x02; 1097 psi_reg.regs.checkbit |= 0x02;
1081 cat_write(&psi, &psi_asic, 5, psi_reg.regs.checkbit); 1098 cat_write(&psi, &psi_asic, 5, psi_reg.regs.checkbit);
1082 printk("VOYAGER RE-READ PSI\n"); 1099 printk("VOYAGER RE-READ PSI\n");
1083 goto re_read; 1100 goto re_read;
1084 } 1101 }
1085 outb(VOYAGER_CAT_RUN, CAT_CMD); 1102 outb(VOYAGER_CAT_RUN, CAT_CMD);
1086 for(i = 0; i < sizeof(psi_reg.subregs); i++) { 1103 for (i = 0; i < sizeof(psi_reg.subregs); i++) {
1087 /* This looks strange, but the PSI doesn't do auto increment 1104 /* This looks strange, but the PSI doesn't do auto increment
1088 * correctly */ 1105 * correctly */
1089 cat_subread(&psi, &psi_asic, VOYAGER_PSI_SUPPLY_REG + i, 1106 cat_subread(&psi, &psi_asic, VOYAGER_PSI_SUPPLY_REG + i,
1090 1, &((__u8 *)&psi_reg.subregs)[i]); 1107 1, &((__u8 *) & psi_reg.subregs)[i]);
1091 } 1108 }
1092 outb(VOYAGER_CAT_END, CAT_CMD); 1109 outb(VOYAGER_CAT_END, CAT_CMD);
1093#ifdef VOYAGER_CAT_DEBUG 1110#ifdef VOYAGER_CAT_DEBUG
1094 printk("VOYAGER PSI: "); 1111 printk("VOYAGER PSI: ");
1095 for(i=0; i<sizeof(psi_reg.regs); i++) 1112 for (i = 0; i < sizeof(psi_reg.regs); i++)
1096 printk("%02x ", ((__u8 *)&psi_reg.regs)[i]); 1113 printk("%02x ", ((__u8 *) & psi_reg.regs)[i]);
1097 printk("\n "); 1114 printk("\n ");
1098 for(i=0; i<sizeof(psi_reg.subregs); i++) 1115 for (i = 0; i < sizeof(psi_reg.subregs); i++)
1099 printk("%02x ", ((__u8 *)&psi_reg.subregs)[i]); 1116 printk("%02x ", ((__u8 *) & psi_reg.subregs)[i]);
1100 printk("\n"); 1117 printk("\n");
1101#endif 1118#endif
1102 if(psi_reg.regs.intstatus & PSI_MON) { 1119 if (psi_reg.regs.intstatus & PSI_MON) {
1103 /* switch off or power fail */ 1120 /* switch off or power fail */
1104 1121
1105 if(psi_reg.subregs.supply & PSI_SWITCH_OFF) { 1122 if (psi_reg.subregs.supply & PSI_SWITCH_OFF) {
1106 if(voyager_status.switch_off) { 1123 if (voyager_status.switch_off) {
1107 printk(KERN_ERR "Voyager front panel switch turned off again---Immediate power off!\n"); 1124 printk(KERN_ERR
1125 "Voyager front panel switch turned off again---Immediate power off!\n");
1108 voyager_cat_power_off(); 1126 voyager_cat_power_off();
1109 /* not reached */ 1127 /* not reached */
1110 } else { 1128 } else {
1111 printk(KERN_ERR "Voyager front panel switch turned off\n"); 1129 printk(KERN_ERR
1130 "Voyager front panel switch turned off\n");
1112 voyager_status.switch_off = 1; 1131 voyager_status.switch_off = 1;
1113 voyager_status.request_from_kernel = 1; 1132 voyager_status.request_from_kernel = 1;
1114 wake_up_process(voyager_thread); 1133 wake_up_process(voyager_thread);
@@ -1127,7 +1146,7 @@ voyager_cat_do_common_interrupt(void)
1127 1146
1128 VDEBUG(("Voyager ac fail reg 0x%x\n", 1147 VDEBUG(("Voyager ac fail reg 0x%x\n",
1129 psi_reg.subregs.ACfail)); 1148 psi_reg.subregs.ACfail));
1130 if((psi_reg.subregs.ACfail & AC_FAIL_STAT_CHANGE) == 0) { 1149 if ((psi_reg.subregs.ACfail & AC_FAIL_STAT_CHANGE) == 0) {
1131 /* No further update */ 1150 /* No further update */
1132 return; 1151 return;
1133 } 1152 }
@@ -1135,20 +1154,20 @@ voyager_cat_do_common_interrupt(void)
1135 /* Don't bother trying to find out who failed. 1154 /* Don't bother trying to find out who failed.
1136 * FIXME: This probably makes the code incorrect on 1155 * FIXME: This probably makes the code incorrect on
1137 * anything other than a 345x */ 1156 * anything other than a 345x */
1138 for(i=0; i< 5; i++) { 1157 for (i = 0; i < 5; i++) {
1139 if( psi_reg.subregs.ACfail &(1<<i)) { 1158 if (psi_reg.subregs.ACfail & (1 << i)) {
1140 break; 1159 break;
1141 } 1160 }
1142 } 1161 }
1143 printk(KERN_NOTICE "AC FAIL IN SUPPLY %d\n", i); 1162 printk(KERN_NOTICE "AC FAIL IN SUPPLY %d\n", i);
1144#endif 1163#endif
1145 /* DON'T do this: it shuts down the AC PSI 1164 /* DON'T do this: it shuts down the AC PSI
1146 outb(VOYAGER_CAT_RUN, CAT_CMD); 1165 outb(VOYAGER_CAT_RUN, CAT_CMD);
1147 data = PSI_MASK_MASK | i; 1166 data = PSI_MASK_MASK | i;
1148 cat_subwrite(&psi, &psi_asic, VOYAGER_PSI_MASK, 1167 cat_subwrite(&psi, &psi_asic, VOYAGER_PSI_MASK,
1149 1, &data); 1168 1, &data);
1150 outb(VOYAGER_CAT_END, CAT_CMD); 1169 outb(VOYAGER_CAT_END, CAT_CMD);
1151 */ 1170 */
1152 printk(KERN_ERR "Voyager AC power failure\n"); 1171 printk(KERN_ERR "Voyager AC power failure\n");
1153 outb(VOYAGER_CAT_RUN, CAT_CMD); 1172 outb(VOYAGER_CAT_RUN, CAT_CMD);
1154 data = PSI_COLD_START; 1173 data = PSI_COLD_START;
@@ -1159,16 +1178,16 @@ voyager_cat_do_common_interrupt(void)
1159 voyager_status.request_from_kernel = 1; 1178 voyager_status.request_from_kernel = 1;
1160 wake_up_process(voyager_thread); 1179 wake_up_process(voyager_thread);
1161 } 1180 }
1162 1181
1163 1182 } else if (psi_reg.regs.intstatus & PSI_FAULT) {
1164 } else if(psi_reg.regs.intstatus & PSI_FAULT) {
1165 /* Major fault! */ 1183 /* Major fault! */
1166 printk(KERN_ERR "Voyager PSI Detected major fault, immediate power off!\n"); 1184 printk(KERN_ERR
1185 "Voyager PSI Detected major fault, immediate power off!\n");
1167 voyager_cat_power_off(); 1186 voyager_cat_power_off();
1168 /* not reached */ 1187 /* not reached */
1169 } else if(psi_reg.regs.intstatus & (PSI_DC_FAIL | PSI_ALARM 1188 } else if (psi_reg.regs.intstatus & (PSI_DC_FAIL | PSI_ALARM
1170 | PSI_CURRENT | PSI_DVM 1189 | PSI_CURRENT | PSI_DVM
1171 | PSI_PSCFAULT | PSI_STAT_CHG)) { 1190 | PSI_PSCFAULT | PSI_STAT_CHG)) {
1172 /* other psi fault */ 1191 /* other psi fault */
1173 1192
1174 printk(KERN_WARNING "Voyager PSI status 0x%x\n", data); 1193 printk(KERN_WARNING "Voyager PSI status 0x%x\n", data);
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 88124dd35406..73c435ce10fd 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -32,7 +32,8 @@
32DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { &init_mm, 0 }; 32DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { &init_mm, 0 };
33 33
34/* CPU IRQ affinity -- set to all ones initially */ 34/* CPU IRQ affinity -- set to all ones initially */
35static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1] = ~0UL }; 35static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned =
36 {[0 ... NR_CPUS-1] = ~0UL };
36 37
37/* per CPU data structure (for /proc/cpuinfo et al), visible externally 38/* per CPU data structure (for /proc/cpuinfo et al), visible externally
38 * indexed physically */ 39 * indexed physically */
@@ -76,7 +77,6 @@ EXPORT_SYMBOL(cpu_online_map);
76 * by scheduler but indexed physically */ 77 * by scheduler but indexed physically */
77cpumask_t phys_cpu_present_map = CPU_MASK_NONE; 78cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
78 79
79
80/* The internal functions */ 80/* The internal functions */
81static void send_CPI(__u32 cpuset, __u8 cpi); 81static void send_CPI(__u32 cpuset, __u8 cpi);
82static void ack_CPI(__u8 cpi); 82static void ack_CPI(__u8 cpi);
@@ -101,94 +101,86 @@ int hard_smp_processor_id(void);
101int safe_smp_processor_id(void); 101int safe_smp_processor_id(void);
102 102
103/* Inline functions */ 103/* Inline functions */
104static inline void 104static inline void send_one_QIC_CPI(__u8 cpu, __u8 cpi)
105send_one_QIC_CPI(__u8 cpu, __u8 cpi)
106{ 105{
107 voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi = 106 voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi =
108 (smp_processor_id() << 16) + cpi; 107 (smp_processor_id() << 16) + cpi;
109} 108}
110 109
111static inline void 110static inline void send_QIC_CPI(__u32 cpuset, __u8 cpi)
112send_QIC_CPI(__u32 cpuset, __u8 cpi)
113{ 111{
114 int cpu; 112 int cpu;
115 113
116 for_each_online_cpu(cpu) { 114 for_each_online_cpu(cpu) {
117 if(cpuset & (1<<cpu)) { 115 if (cpuset & (1 << cpu)) {
118#ifdef VOYAGER_DEBUG 116#ifdef VOYAGER_DEBUG
119 if(!cpu_isset(cpu, cpu_online_map)) 117 if (!cpu_isset(cpu, cpu_online_map))
120 VDEBUG(("CPU%d sending cpi %d to CPU%d not in cpu_online_map\n", hard_smp_processor_id(), cpi, cpu)); 118 VDEBUG(("CPU%d sending cpi %d to CPU%d not in "
119 "cpu_online_map\n",
120 hard_smp_processor_id(), cpi, cpu));
121#endif 121#endif
122 send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET); 122 send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET);
123 } 123 }
124 } 124 }
125} 125}
126 126
127static inline void 127static inline void wrapper_smp_local_timer_interrupt(void)
128wrapper_smp_local_timer_interrupt(void)
129{ 128{
130 irq_enter(); 129 irq_enter();
131 smp_local_timer_interrupt(); 130 smp_local_timer_interrupt();
132 irq_exit(); 131 irq_exit();
133} 132}
134 133
135static inline void 134static inline void send_one_CPI(__u8 cpu, __u8 cpi)
136send_one_CPI(__u8 cpu, __u8 cpi)
137{ 135{
138 if(voyager_quad_processors & (1<<cpu)) 136 if (voyager_quad_processors & (1 << cpu))
139 send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET); 137 send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET);
140 else 138 else
141 send_CPI(1<<cpu, cpi); 139 send_CPI(1 << cpu, cpi);
142} 140}
143 141
144static inline void 142static inline void send_CPI_allbutself(__u8 cpi)
145send_CPI_allbutself(__u8 cpi)
146{ 143{
147 __u8 cpu = smp_processor_id(); 144 __u8 cpu = smp_processor_id();
148 __u32 mask = cpus_addr(cpu_online_map)[0] & ~(1 << cpu); 145 __u32 mask = cpus_addr(cpu_online_map)[0] & ~(1 << cpu);
149 send_CPI(mask, cpi); 146 send_CPI(mask, cpi);
150} 147}
151 148
152static inline int 149static inline int is_cpu_quad(void)
153is_cpu_quad(void)
154{ 150{
155 __u8 cpumask = inb(VIC_PROC_WHO_AM_I); 151 __u8 cpumask = inb(VIC_PROC_WHO_AM_I);
156 return ((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER); 152 return ((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER);
157} 153}
158 154
159static inline int 155static inline int is_cpu_extended(void)
160is_cpu_extended(void)
161{ 156{
162 __u8 cpu = hard_smp_processor_id(); 157 __u8 cpu = hard_smp_processor_id();
163 158
164 return(voyager_extended_vic_processors & (1<<cpu)); 159 return (voyager_extended_vic_processors & (1 << cpu));
165} 160}
166 161
167static inline int 162static inline int is_cpu_vic_boot(void)
168is_cpu_vic_boot(void)
169{ 163{
170 __u8 cpu = hard_smp_processor_id(); 164 __u8 cpu = hard_smp_processor_id();
171 165
172 return(voyager_extended_vic_processors 166 return (voyager_extended_vic_processors
173 & voyager_allowed_boot_processors & (1<<cpu)); 167 & voyager_allowed_boot_processors & (1 << cpu));
174} 168}
175 169
176 170static inline void ack_CPI(__u8 cpi)
177static inline void
178ack_CPI(__u8 cpi)
179{ 171{
180 switch(cpi) { 172 switch (cpi) {
181 case VIC_CPU_BOOT_CPI: 173 case VIC_CPU_BOOT_CPI:
182 if(is_cpu_quad() && !is_cpu_vic_boot()) 174 if (is_cpu_quad() && !is_cpu_vic_boot())
183 ack_QIC_CPI(cpi); 175 ack_QIC_CPI(cpi);
184 else 176 else
185 ack_VIC_CPI(cpi); 177 ack_VIC_CPI(cpi);
186 break; 178 break;
187 case VIC_SYS_INT: 179 case VIC_SYS_INT:
188 case VIC_CMN_INT: 180 case VIC_CMN_INT:
189 /* These are slightly strange. Even on the Quad card, 181 /* These are slightly strange. Even on the Quad card,
190 * They are vectored as VIC CPIs */ 182 * They are vectored as VIC CPIs */
191 if(is_cpu_quad()) 183 if (is_cpu_quad())
192 ack_special_QIC_CPI(cpi); 184 ack_special_QIC_CPI(cpi);
193 else 185 else
194 ack_VIC_CPI(cpi); 186 ack_VIC_CPI(cpi);
@@ -205,11 +197,11 @@ ack_CPI(__u8 cpi)
205 * 8259 IRQs except that masks and things must be kept per processor 197 * 8259 IRQs except that masks and things must be kept per processor
206 */ 198 */
207static struct irq_chip vic_chip = { 199static struct irq_chip vic_chip = {
208 .name = "VIC", 200 .name = "VIC",
209 .startup = startup_vic_irq, 201 .startup = startup_vic_irq,
210 .mask = mask_vic_irq, 202 .mask = mask_vic_irq,
211 .unmask = unmask_vic_irq, 203 .unmask = unmask_vic_irq,
212 .set_affinity = set_vic_irq_affinity, 204 .set_affinity = set_vic_irq_affinity,
213}; 205};
214 206
215/* used to count up as CPUs are brought on line (starts at 0) */ 207/* used to count up as CPUs are brought on line (starts at 0) */
@@ -223,7 +215,7 @@ static __u32 trampoline_base;
223/* The per cpu profile stuff - used in smp_local_timer_interrupt */ 215/* The per cpu profile stuff - used in smp_local_timer_interrupt */
224static DEFINE_PER_CPU(int, prof_multiplier) = 1; 216static DEFINE_PER_CPU(int, prof_multiplier) = 1;
225static DEFINE_PER_CPU(int, prof_old_multiplier) = 1; 217static DEFINE_PER_CPU(int, prof_old_multiplier) = 1;
226static DEFINE_PER_CPU(int, prof_counter) = 1; 218static DEFINE_PER_CPU(int, prof_counter) = 1;
227 219
228/* the map used to check if a CPU has booted */ 220/* the map used to check if a CPU has booted */
229static __u32 cpu_booted_map; 221static __u32 cpu_booted_map;
@@ -246,9 +238,9 @@ static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned;
246static __u16 vic_irq_enable_mask[NR_CPUS] __cacheline_aligned = { 0 }; 238static __u16 vic_irq_enable_mask[NR_CPUS] __cacheline_aligned = { 0 };
247 239
248/* Lock for enable/disable of VIC interrupts */ 240/* Lock for enable/disable of VIC interrupts */
249static __cacheline_aligned DEFINE_SPINLOCK(vic_irq_lock); 241static __cacheline_aligned DEFINE_SPINLOCK(vic_irq_lock);
250 242
251/* The boot processor is correctly set up in PC mode when it 243/* The boot processor is correctly set up in PC mode when it
252 * comes up, but the secondaries need their master/slave 8259 244 * comes up, but the secondaries need their master/slave 8259
253 * pairs initializing correctly */ 245 * pairs initializing correctly */
254 246
@@ -262,8 +254,7 @@ static unsigned long vic_tick[NR_CPUS] __cacheline_aligned = { 0 };
262static unsigned long vic_cpi_mailbox[NR_CPUS] __cacheline_aligned; 254static unsigned long vic_cpi_mailbox[NR_CPUS] __cacheline_aligned;
263 255
264/* debugging routine to read the isr of the cpu's pic */ 256/* debugging routine to read the isr of the cpu's pic */
265static inline __u16 257static inline __u16 vic_read_isr(void)
266vic_read_isr(void)
267{ 258{
268 __u16 isr; 259 __u16 isr;
269 260
@@ -275,17 +266,16 @@ vic_read_isr(void)
275 return isr; 266 return isr;
276} 267}
277 268
278static __init void 269static __init void qic_setup(void)
279qic_setup(void)
280{ 270{
281 if(!is_cpu_quad()) { 271 if (!is_cpu_quad()) {
282 /* not a quad, no setup */ 272 /* not a quad, no setup */
283 return; 273 return;
284 } 274 }
285 outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0); 275 outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0);
286 outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1); 276 outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1);
287 277
288 if(is_cpu_extended()) { 278 if (is_cpu_extended()) {
289 /* the QIC duplicate of the VIC base register */ 279 /* the QIC duplicate of the VIC base register */
290 outb(VIC_DEFAULT_CPI_BASE, QIC_VIC_CPI_BASE_REGISTER); 280 outb(VIC_DEFAULT_CPI_BASE, QIC_VIC_CPI_BASE_REGISTER);
291 outb(QIC_DEFAULT_CPI_BASE, QIC_CPI_BASE_REGISTER); 281 outb(QIC_DEFAULT_CPI_BASE, QIC_CPI_BASE_REGISTER);
@@ -295,8 +285,7 @@ qic_setup(void)
295 } 285 }
296} 286}
297 287
298static __init void 288static __init void vic_setup_pic(void)
299vic_setup_pic(void)
300{ 289{
301 outb(1, VIC_REDIRECT_REGISTER_1); 290 outb(1, VIC_REDIRECT_REGISTER_1);
302 /* clear the claim registers for dynamic routing */ 291 /* clear the claim registers for dynamic routing */
@@ -333,7 +322,7 @@ vic_setup_pic(void)
333 322
334 /* ICW2: slave vector base */ 323 /* ICW2: slave vector base */
335 outb(FIRST_EXTERNAL_VECTOR + 8, 0xA1); 324 outb(FIRST_EXTERNAL_VECTOR + 8, 0xA1);
336 325
337 /* ICW3: slave ID */ 326 /* ICW3: slave ID */
338 outb(0x02, 0xA1); 327 outb(0x02, 0xA1);
339 328
@@ -341,19 +330,18 @@ vic_setup_pic(void)
341 outb(0x01, 0xA1); 330 outb(0x01, 0xA1);
342} 331}
343 332
344static void 333static void do_quad_bootstrap(void)
345do_quad_bootstrap(void)
346{ 334{
347 if(is_cpu_quad() && is_cpu_vic_boot()) { 335 if (is_cpu_quad() && is_cpu_vic_boot()) {
348 int i; 336 int i;
349 unsigned long flags; 337 unsigned long flags;
350 __u8 cpuid = hard_smp_processor_id(); 338 __u8 cpuid = hard_smp_processor_id();
351 339
352 local_irq_save(flags); 340 local_irq_save(flags);
353 341
354 for(i = 0; i<4; i++) { 342 for (i = 0; i < 4; i++) {
355 /* FIXME: this would be >>3 &0x7 on the 32 way */ 343 /* FIXME: this would be >>3 &0x7 on the 32 way */
356 if(((cpuid >> 2) & 0x03) == i) 344 if (((cpuid >> 2) & 0x03) == i)
357 /* don't lower our own mask! */ 345 /* don't lower our own mask! */
358 continue; 346 continue;
359 347
@@ -368,12 +356,10 @@ do_quad_bootstrap(void)
368 } 356 }
369} 357}
370 358
371
372/* Set up all the basic stuff: read the SMP config and make all the 359/* Set up all the basic stuff: read the SMP config and make all the
373 * SMP information reflect only the boot cpu. All others will be 360 * SMP information reflect only the boot cpu. All others will be
374 * brought on-line later. */ 361 * brought on-line later. */
375void __init 362void __init find_smp_config(void)
376find_smp_config(void)
377{ 363{
378 int i; 364 int i;
379 365
@@ -382,24 +368,31 @@ find_smp_config(void)
382 printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id); 368 printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id);
383 369
384 /* initialize the CPU structures (moved from smp_boot_cpus) */ 370 /* initialize the CPU structures (moved from smp_boot_cpus) */
385 for(i=0; i<NR_CPUS; i++) { 371 for (i = 0; i < NR_CPUS; i++) {
386 cpu_irq_affinity[i] = ~0; 372 cpu_irq_affinity[i] = ~0;
387 } 373 }
388 cpu_online_map = cpumask_of_cpu(boot_cpu_id); 374 cpu_online_map = cpumask_of_cpu(boot_cpu_id);
389 375
390 /* The boot CPU must be extended */ 376 /* The boot CPU must be extended */
391 voyager_extended_vic_processors = 1<<boot_cpu_id; 377 voyager_extended_vic_processors = 1 << boot_cpu_id;
392 /* initially, all of the first 8 CPUs can boot */ 378 /* initially, all of the first 8 CPUs can boot */
393 voyager_allowed_boot_processors = 0xff; 379 voyager_allowed_boot_processors = 0xff;
394 /* set up everything for just this CPU, we can alter 380 /* set up everything for just this CPU, we can alter
395 * this as we start the other CPUs later */ 381 * this as we start the other CPUs later */
396 /* now get the CPU disposition from the extended CMOS */ 382 /* now get the CPU disposition from the extended CMOS */
397 cpus_addr(phys_cpu_present_map)[0] = voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK); 383 cpus_addr(phys_cpu_present_map)[0] =
398 cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8; 384 voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK);
399 cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 2) << 16; 385 cpus_addr(phys_cpu_present_map)[0] |=
400 cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 3) << 24; 386 voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8;
387 cpus_addr(phys_cpu_present_map)[0] |=
388 voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK +
389 2) << 16;
390 cpus_addr(phys_cpu_present_map)[0] |=
391 voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK +
392 3) << 24;
401 cpu_possible_map = phys_cpu_present_map; 393 cpu_possible_map = phys_cpu_present_map;
402 printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", cpus_addr(phys_cpu_present_map)[0]); 394 printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n",
395 cpus_addr(phys_cpu_present_map)[0]);
403 /* Here we set up the VIC to enable SMP */ 396 /* Here we set up the VIC to enable SMP */
404 /* enable the CPIs by writing the base vector to their register */ 397 /* enable the CPIs by writing the base vector to their register */
405 outb(VIC_DEFAULT_CPI_BASE, VIC_CPI_BASE_REGISTER); 398 outb(VIC_DEFAULT_CPI_BASE, VIC_CPI_BASE_REGISTER);
@@ -427,8 +420,7 @@ find_smp_config(void)
427/* 420/*
428 * The bootstrap kernel entry code has set these up. Save them 421 * The bootstrap kernel entry code has set these up. Save them
429 * for a given CPU, id is physical */ 422 * for a given CPU, id is physical */
430void __init 423void __init smp_store_cpu_info(int id)
431smp_store_cpu_info(int id)
432{ 424{
433 struct cpuinfo_x86 *c = &cpu_data(id); 425 struct cpuinfo_x86 *c = &cpu_data(id);
434 426
@@ -438,21 +430,19 @@ smp_store_cpu_info(int id)
438} 430}
439 431
440/* set up the trampoline and return the physical address of the code */ 432/* set up the trampoline and return the physical address of the code */
441static __u32 __init 433static __u32 __init setup_trampoline(void)
442setup_trampoline(void)
443{ 434{
444 /* these two are global symbols in trampoline.S */ 435 /* these two are global symbols in trampoline.S */
445 extern const __u8 trampoline_end[]; 436 extern const __u8 trampoline_end[];
446 extern const __u8 trampoline_data[]; 437 extern const __u8 trampoline_data[];
447 438
448 memcpy((__u8 *)trampoline_base, trampoline_data, 439 memcpy((__u8 *) trampoline_base, trampoline_data,
449 trampoline_end - trampoline_data); 440 trampoline_end - trampoline_data);
450 return virt_to_phys((__u8 *)trampoline_base); 441 return virt_to_phys((__u8 *) trampoline_base);
451} 442}
452 443
453/* Routine initially called when a non-boot CPU is brought online */ 444/* Routine initially called when a non-boot CPU is brought online */
454static void __init 445static void __init start_secondary(void *unused)
455start_secondary(void *unused)
456{ 446{
457 __u8 cpuid = hard_smp_processor_id(); 447 __u8 cpuid = hard_smp_processor_id();
458 /* external functions not defined in the headers */ 448 /* external functions not defined in the headers */
@@ -464,17 +454,18 @@ start_secondary(void *unused)
464 ack_CPI(VIC_CPU_BOOT_CPI); 454 ack_CPI(VIC_CPU_BOOT_CPI);
465 455
466 /* setup the 8259 master slave pair belonging to this CPU --- 456 /* setup the 8259 master slave pair belonging to this CPU ---
467 * we won't actually receive any until the boot CPU 457 * we won't actually receive any until the boot CPU
468 * relinquishes it's static routing mask */ 458 * relinquishes it's static routing mask */
469 vic_setup_pic(); 459 vic_setup_pic();
470 460
471 qic_setup(); 461 qic_setup();
472 462
473 if(is_cpu_quad() && !is_cpu_vic_boot()) { 463 if (is_cpu_quad() && !is_cpu_vic_boot()) {
474 /* clear the boot CPI */ 464 /* clear the boot CPI */
475 __u8 dummy; 465 __u8 dummy;
476 466
477 dummy = voyager_quad_cpi_addr[cpuid]->qic_cpi[VIC_CPU_BOOT_CPI].cpi; 467 dummy =
468 voyager_quad_cpi_addr[cpuid]->qic_cpi[VIC_CPU_BOOT_CPI].cpi;
478 printk("read dummy %d\n", dummy); 469 printk("read dummy %d\n", dummy);
479 } 470 }
480 471
@@ -516,7 +507,6 @@ start_secondary(void *unused)
516 cpu_idle(); 507 cpu_idle();
517} 508}
518 509
519
520/* Routine to kick start the given CPU and wait for it to report ready 510/* Routine to kick start the given CPU and wait for it to report ready
521 * (or timeout in startup). When this routine returns, the requested 511 * (or timeout in startup). When this routine returns, the requested
522 * CPU is either fully running and configured or known to be dead. 512 * CPU is either fully running and configured or known to be dead.
@@ -524,15 +514,14 @@ start_secondary(void *unused)
524 * We call this routine sequentially 1 CPU at a time, so no need for 514 * We call this routine sequentially 1 CPU at a time, so no need for
525 * locking */ 515 * locking */
526 516
527static void __init 517static void __init do_boot_cpu(__u8 cpu)
528do_boot_cpu(__u8 cpu)
529{ 518{
530 struct task_struct *idle; 519 struct task_struct *idle;
531 int timeout; 520 int timeout;
532 unsigned long flags; 521 unsigned long flags;
533 int quad_boot = (1<<cpu) & voyager_quad_processors 522 int quad_boot = (1 << cpu) & voyager_quad_processors
534 & ~( voyager_extended_vic_processors 523 & ~(voyager_extended_vic_processors
535 & voyager_allowed_boot_processors); 524 & voyager_allowed_boot_processors);
536 525
537 /* This is an area in head.S which was used to set up the 526 /* This is an area in head.S which was used to set up the
538 * initial kernel stack. We need to alter this to give the 527 * initial kernel stack. We need to alter this to give the
@@ -543,10 +532,10 @@ do_boot_cpu(__u8 cpu)
543 } stack_start; 532 } stack_start;
544 /* This is the format of the CPI IDT gate (in real mode) which 533 /* This is the format of the CPI IDT gate (in real mode) which
545 * we're hijacking to boot the CPU */ 534 * we're hijacking to boot the CPU */
546 union IDTFormat { 535 union IDTFormat {
547 struct seg { 536 struct seg {
548 __u16 Offset; 537 __u16 Offset;
549 __u16 Segment; 538 __u16 Segment;
550 } idt; 539 } idt;
551 __u32 val; 540 __u32 val;
552 } hijack_source; 541 } hijack_source;
@@ -565,19 +554,19 @@ do_boot_cpu(__u8 cpu)
565 alternatives_smp_switch(1); 554 alternatives_smp_switch(1);
566 555
567 idle = fork_idle(cpu); 556 idle = fork_idle(cpu);
568 if(IS_ERR(idle)) 557 if (IS_ERR(idle))
569 panic("failed fork for CPU%d", cpu); 558 panic("failed fork for CPU%d", cpu);
570 idle->thread.eip = (unsigned long) start_secondary; 559 idle->thread.eip = (unsigned long)start_secondary;
571 /* init_tasks (in sched.c) is indexed logically */ 560 /* init_tasks (in sched.c) is indexed logically */
572 stack_start.esp = (void *) idle->thread.esp; 561 stack_start.esp = (void *)idle->thread.esp;
573 562
574 init_gdt(cpu); 563 init_gdt(cpu);
575 per_cpu(current_task, cpu) = idle; 564 per_cpu(current_task, cpu) = idle;
576 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); 565 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
577 irq_ctx_init(cpu); 566 irq_ctx_init(cpu);
578 567
579 /* Note: Don't modify initial ss override */ 568 /* Note: Don't modify initial ss override */
580 VDEBUG(("VOYAGER SMP: Booting CPU%d at 0x%lx[%x:%x], stack %p\n", cpu, 569 VDEBUG(("VOYAGER SMP: Booting CPU%d at 0x%lx[%x:%x], stack %p\n", cpu,
581 (unsigned long)hijack_source.val, hijack_source.idt.Segment, 570 (unsigned long)hijack_source.val, hijack_source.idt.Segment,
582 hijack_source.idt.Offset, stack_start.esp)); 571 hijack_source.idt.Offset, stack_start.esp));
583 572
@@ -586,16 +575,23 @@ do_boot_cpu(__u8 cpu)
586 min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS)); 575 min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS));
587 flush_tlb_all(); 576 flush_tlb_all();
588 577
589 if(quad_boot) { 578 if (quad_boot) {
590 printk("CPU %d: non extended Quad boot\n", cpu); 579 printk("CPU %d: non extended Quad boot\n", cpu);
591 hijack_vector = (__u32 *)phys_to_virt((VIC_CPU_BOOT_CPI + QIC_DEFAULT_CPI_BASE)*4); 580 hijack_vector =
581 (__u32 *)
582 phys_to_virt((VIC_CPU_BOOT_CPI + QIC_DEFAULT_CPI_BASE) * 4);
592 *hijack_vector = hijack_source.val; 583 *hijack_vector = hijack_source.val;
593 } else { 584 } else {
594 printk("CPU%d: extended VIC boot\n", cpu); 585 printk("CPU%d: extended VIC boot\n", cpu);
595 hijack_vector = (__u32 *)phys_to_virt((VIC_CPU_BOOT_CPI + VIC_DEFAULT_CPI_BASE)*4); 586 hijack_vector =
587 (__u32 *)
588 phys_to_virt((VIC_CPU_BOOT_CPI + VIC_DEFAULT_CPI_BASE) * 4);
596 *hijack_vector = hijack_source.val; 589 *hijack_vector = hijack_source.val;
597 /* VIC errata, may also receive interrupt at this address */ 590 /* VIC errata, may also receive interrupt at this address */
598 hijack_vector = (__u32 *)phys_to_virt((VIC_CPU_BOOT_ERRATA_CPI + VIC_DEFAULT_CPI_BASE)*4); 591 hijack_vector =
592 (__u32 *)
593 phys_to_virt((VIC_CPU_BOOT_ERRATA_CPI +
594 VIC_DEFAULT_CPI_BASE) * 4);
599 *hijack_vector = hijack_source.val; 595 *hijack_vector = hijack_source.val;
600 } 596 }
601 /* All non-boot CPUs start with interrupts fully masked. Need 597 /* All non-boot CPUs start with interrupts fully masked. Need
@@ -603,73 +599,76 @@ do_boot_cpu(__u8 cpu)
603 * this in the VIC by masquerading as the processor we're 599 * this in the VIC by masquerading as the processor we're
604 * about to boot and lowering its interrupt mask */ 600 * about to boot and lowering its interrupt mask */
605 local_irq_save(flags); 601 local_irq_save(flags);
606 if(quad_boot) { 602 if (quad_boot) {
607 send_one_QIC_CPI(cpu, VIC_CPU_BOOT_CPI); 603 send_one_QIC_CPI(cpu, VIC_CPU_BOOT_CPI);
608 } else { 604 } else {
609 outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID); 605 outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID);
610 /* here we're altering registers belonging to `cpu' */ 606 /* here we're altering registers belonging to `cpu' */
611 607
612 outb(VIC_BOOT_INTERRUPT_MASK, 0x21); 608 outb(VIC_BOOT_INTERRUPT_MASK, 0x21);
613 /* now go back to our original identity */ 609 /* now go back to our original identity */
614 outb(boot_cpu_id, VIC_PROCESSOR_ID); 610 outb(boot_cpu_id, VIC_PROCESSOR_ID);
615 611
616 /* and boot the CPU */ 612 /* and boot the CPU */
617 613
618 send_CPI((1<<cpu), VIC_CPU_BOOT_CPI); 614 send_CPI((1 << cpu), VIC_CPU_BOOT_CPI);
619 } 615 }
620 cpu_booted_map = 0; 616 cpu_booted_map = 0;
621 local_irq_restore(flags); 617 local_irq_restore(flags);
622 618
623 /* now wait for it to become ready (or timeout) */ 619 /* now wait for it to become ready (or timeout) */
624 for(timeout = 0; timeout < 50000; timeout++) { 620 for (timeout = 0; timeout < 50000; timeout++) {
625 if(cpu_booted_map) 621 if (cpu_booted_map)
626 break; 622 break;
627 udelay(100); 623 udelay(100);
628 } 624 }
629 /* reset the page table */ 625 /* reset the page table */
630 zap_low_mappings(); 626 zap_low_mappings();
631 627
632 if (cpu_booted_map) { 628 if (cpu_booted_map) {
633 VDEBUG(("CPU%d: Booted successfully, back in CPU %d\n", 629 VDEBUG(("CPU%d: Booted successfully, back in CPU %d\n",
634 cpu, smp_processor_id())); 630 cpu, smp_processor_id()));
635 631
636 printk("CPU%d: ", cpu); 632 printk("CPU%d: ", cpu);
637 print_cpu_info(&cpu_data(cpu)); 633 print_cpu_info(&cpu_data(cpu));
638 wmb(); 634 wmb();
639 cpu_set(cpu, cpu_callout_map); 635 cpu_set(cpu, cpu_callout_map);
640 cpu_set(cpu, cpu_present_map); 636 cpu_set(cpu, cpu_present_map);
641 } 637 } else {
642 else {
643 printk("CPU%d FAILED TO BOOT: ", cpu); 638 printk("CPU%d FAILED TO BOOT: ", cpu);
644 if (*((volatile unsigned char *)phys_to_virt(start_phys_address))==0xA5) 639 if (*
640 ((volatile unsigned char *)phys_to_virt(start_phys_address))
641 == 0xA5)
645 printk("Stuck.\n"); 642 printk("Stuck.\n");
646 else 643 else
647 printk("Not responding.\n"); 644 printk("Not responding.\n");
648 645
649 cpucount--; 646 cpucount--;
650 } 647 }
651} 648}
652 649
653void __init 650void __init smp_boot_cpus(void)
654smp_boot_cpus(void)
655{ 651{
656 int i; 652 int i;
657 653
658 /* CAT BUS initialisation must be done after the memory */ 654 /* CAT BUS initialisation must be done after the memory */
659 /* FIXME: The L4 has a catbus too, it just needs to be 655 /* FIXME: The L4 has a catbus too, it just needs to be
660 * accessed in a totally different way */ 656 * accessed in a totally different way */
661 if(voyager_level == 5) { 657 if (voyager_level == 5) {
662 voyager_cat_init(); 658 voyager_cat_init();
663 659
664 /* now that the cat has probed the Voyager System Bus, sanity 660 /* now that the cat has probed the Voyager System Bus, sanity
665 * check the cpu map */ 661 * check the cpu map */
666 if( ((voyager_quad_processors | voyager_extended_vic_processors) 662 if (((voyager_quad_processors | voyager_extended_vic_processors)
667 & cpus_addr(phys_cpu_present_map)[0]) != cpus_addr(phys_cpu_present_map)[0]) { 663 & cpus_addr(phys_cpu_present_map)[0]) !=
664 cpus_addr(phys_cpu_present_map)[0]) {
668 /* should panic */ 665 /* should panic */
669 printk("\n\n***WARNING*** Sanity check of CPU present map FAILED\n"); 666 printk("\n\n***WARNING*** "
667 "Sanity check of CPU present map FAILED\n");
670 } 668 }
671 } else if(voyager_level == 4) 669 } else if (voyager_level == 4)
672 voyager_extended_vic_processors = cpus_addr(phys_cpu_present_map)[0]; 670 voyager_extended_vic_processors =
671 cpus_addr(phys_cpu_present_map)[0];
673 672
674 /* this sets up the idle task to run on the current cpu */ 673 /* this sets up the idle task to run on the current cpu */
675 voyager_extended_cpus = 1; 674 voyager_extended_cpus = 1;
@@ -678,14 +677,14 @@ smp_boot_cpus(void)
678 //global_irq_holder = boot_cpu_id; 677 //global_irq_holder = boot_cpu_id;
679 678
680 /* FIXME: Need to do something about this but currently only works 679 /* FIXME: Need to do something about this but currently only works
681 * on CPUs with a tsc which none of mine have. 680 * on CPUs with a tsc which none of mine have.
682 smp_tune_scheduling(); 681 smp_tune_scheduling();
683 */ 682 */
684 smp_store_cpu_info(boot_cpu_id); 683 smp_store_cpu_info(boot_cpu_id);
685 printk("CPU%d: ", boot_cpu_id); 684 printk("CPU%d: ", boot_cpu_id);
686 print_cpu_info(&cpu_data(boot_cpu_id)); 685 print_cpu_info(&cpu_data(boot_cpu_id));
687 686
688 if(is_cpu_quad()) { 687 if (is_cpu_quad()) {
689 /* booting on a Quad CPU */ 688 /* booting on a Quad CPU */
690 printk("VOYAGER SMP: Boot CPU is Quad\n"); 689 printk("VOYAGER SMP: Boot CPU is Quad\n");
691 qic_setup(); 690 qic_setup();
@@ -697,11 +696,11 @@ smp_boot_cpus(void)
697 696
698 cpu_set(boot_cpu_id, cpu_online_map); 697 cpu_set(boot_cpu_id, cpu_online_map);
699 cpu_set(boot_cpu_id, cpu_callout_map); 698 cpu_set(boot_cpu_id, cpu_callout_map);
700 699
701 /* loop over all the extended VIC CPUs and boot them. The 700 /* loop over all the extended VIC CPUs and boot them. The
702 * Quad CPUs must be bootstrapped by their extended VIC cpu */ 701 * Quad CPUs must be bootstrapped by their extended VIC cpu */
703 for(i = 0; i < NR_CPUS; i++) { 702 for (i = 0; i < NR_CPUS; i++) {
704 if(i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map)) 703 if (i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map))
705 continue; 704 continue;
706 do_boot_cpu(i); 705 do_boot_cpu(i);
707 /* This udelay seems to be needed for the Quad boots 706 /* This udelay seems to be needed for the Quad boots
@@ -715,25 +714,26 @@ smp_boot_cpus(void)
715 for (i = 0; i < NR_CPUS; i++) 714 for (i = 0; i < NR_CPUS; i++)
716 if (cpu_isset(i, cpu_online_map)) 715 if (cpu_isset(i, cpu_online_map))
717 bogosum += cpu_data(i).loops_per_jiffy; 716 bogosum += cpu_data(i).loops_per_jiffy;
718 printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", 717 printk(KERN_INFO "Total of %d processors activated "
719 cpucount+1, 718 "(%lu.%02lu BogoMIPS).\n",
720 bogosum/(500000/HZ), 719 cpucount + 1, bogosum / (500000 / HZ),
721 (bogosum/(5000/HZ))%100); 720 (bogosum / (5000 / HZ)) % 100);
722 } 721 }
723 voyager_extended_cpus = hweight32(voyager_extended_vic_processors); 722 voyager_extended_cpus = hweight32(voyager_extended_vic_processors);
724 printk("VOYAGER: Extended (interrupt handling CPUs): %d, non-extended: %d\n", voyager_extended_cpus, num_booting_cpus() - voyager_extended_cpus); 723 printk("VOYAGER: Extended (interrupt handling CPUs): "
724 "%d, non-extended: %d\n", voyager_extended_cpus,
725 num_booting_cpus() - voyager_extended_cpus);
725 /* that's it, switch to symmetric mode */ 726 /* that's it, switch to symmetric mode */
726 outb(0, VIC_PRIORITY_REGISTER); 727 outb(0, VIC_PRIORITY_REGISTER);
727 outb(0, VIC_CLAIM_REGISTER_0); 728 outb(0, VIC_CLAIM_REGISTER_0);
728 outb(0, VIC_CLAIM_REGISTER_1); 729 outb(0, VIC_CLAIM_REGISTER_1);
729 730
730 VDEBUG(("VOYAGER SMP: Booted with %d CPUs\n", num_booting_cpus())); 731 VDEBUG(("VOYAGER SMP: Booted with %d CPUs\n", num_booting_cpus()));
731} 732}
732 733
733/* Reload the secondary CPUs task structure (this function does not 734/* Reload the secondary CPUs task structure (this function does not
734 * return ) */ 735 * return ) */
735void __init 736void __init initialize_secondary(void)
736initialize_secondary(void)
737{ 737{
738#if 0 738#if 0
739 // AC kernels only 739 // AC kernels only
@@ -745,11 +745,9 @@ initialize_secondary(void)
745 * basically just the stack pointer and the eip. 745 * basically just the stack pointer and the eip.
746 */ 746 */
747 747
748 asm volatile( 748 asm volatile ("movl %0,%%esp\n\t"
749 "movl %0,%%esp\n\t" 749 "jmp *%1"::"r" (current->thread.esp),
750 "jmp *%1" 750 "r"(current->thread.eip));
751 :
752 :"r" (current->thread.esp),"r" (current->thread.eip));
753} 751}
754 752
755/* handle a Voyager SYS_INT -- If we don't, the base board will 753/* handle a Voyager SYS_INT -- If we don't, the base board will
@@ -758,25 +756,23 @@ initialize_secondary(void)
758 * System interrupts occur because some problem was detected on the 756 * System interrupts occur because some problem was detected on the
759 * various busses. To find out what you have to probe all the 757 * various busses. To find out what you have to probe all the
760 * hardware via the CAT bus. FIXME: At the moment we do nothing. */ 758 * hardware via the CAT bus. FIXME: At the moment we do nothing. */
761fastcall void 759fastcall void smp_vic_sys_interrupt(struct pt_regs *regs)
762smp_vic_sys_interrupt(struct pt_regs *regs)
763{ 760{
764 ack_CPI(VIC_SYS_INT); 761 ack_CPI(VIC_SYS_INT);
765 printk("Voyager SYSTEM INTERRUPT\n"); 762 printk("Voyager SYSTEM INTERRUPT\n");
766} 763}
767 764
768/* Handle a voyager CMN_INT; These interrupts occur either because of 765/* Handle a voyager CMN_INT; These interrupts occur either because of
769 * a system status change or because a single bit memory error 766 * a system status change or because a single bit memory error
770 * occurred. FIXME: At the moment, ignore all this. */ 767 * occurred. FIXME: At the moment, ignore all this. */
771fastcall void 768fastcall void smp_vic_cmn_interrupt(struct pt_regs *regs)
772smp_vic_cmn_interrupt(struct pt_regs *regs)
773{ 769{
774 static __u8 in_cmn_int = 0; 770 static __u8 in_cmn_int = 0;
775 static DEFINE_SPINLOCK(cmn_int_lock); 771 static DEFINE_SPINLOCK(cmn_int_lock);
776 772
777 /* common ints are broadcast, so make sure we only do this once */ 773 /* common ints are broadcast, so make sure we only do this once */
778 _raw_spin_lock(&cmn_int_lock); 774 _raw_spin_lock(&cmn_int_lock);
779 if(in_cmn_int) 775 if (in_cmn_int)
780 goto unlock_end; 776 goto unlock_end;
781 777
782 in_cmn_int++; 778 in_cmn_int++;
@@ -784,12 +780,12 @@ smp_vic_cmn_interrupt(struct pt_regs *regs)
784 780
785 VDEBUG(("Voyager COMMON INTERRUPT\n")); 781 VDEBUG(("Voyager COMMON INTERRUPT\n"));
786 782
787 if(voyager_level == 5) 783 if (voyager_level == 5)
788 voyager_cat_do_common_interrupt(); 784 voyager_cat_do_common_interrupt();
789 785
790 _raw_spin_lock(&cmn_int_lock); 786 _raw_spin_lock(&cmn_int_lock);
791 in_cmn_int = 0; 787 in_cmn_int = 0;
792 unlock_end: 788 unlock_end:
793 _raw_spin_unlock(&cmn_int_lock); 789 _raw_spin_unlock(&cmn_int_lock);
794 ack_CPI(VIC_CMN_INT); 790 ack_CPI(VIC_CMN_INT);
795} 791}
@@ -797,26 +793,24 @@ smp_vic_cmn_interrupt(struct pt_regs *regs)
797/* 793/*
798 * Reschedule call back. Nothing to do, all the work is done 794 * Reschedule call back. Nothing to do, all the work is done
799 * automatically when we return from the interrupt. */ 795 * automatically when we return from the interrupt. */
800static void 796static void smp_reschedule_interrupt(void)
801smp_reschedule_interrupt(void)
802{ 797{
803 /* do nothing */ 798 /* do nothing */
804} 799}
805 800
806static struct mm_struct * flush_mm; 801static struct mm_struct *flush_mm;
807static unsigned long flush_va; 802static unsigned long flush_va;
808static DEFINE_SPINLOCK(tlbstate_lock); 803static DEFINE_SPINLOCK(tlbstate_lock);
809#define FLUSH_ALL 0xffffffff 804#define FLUSH_ALL 0xffffffff
810 805
811/* 806/*
812 * We cannot call mmdrop() because we are in interrupt context, 807 * We cannot call mmdrop() because we are in interrupt context,
813 * instead update mm->cpu_vm_mask. 808 * instead update mm->cpu_vm_mask.
814 * 809 *
815 * We need to reload %cr3 since the page tables may be going 810 * We need to reload %cr3 since the page tables may be going
816 * away from under us.. 811 * away from under us..
817 */ 812 */
818static inline void 813static inline void leave_mm(unsigned long cpu)
819leave_mm (unsigned long cpu)
820{ 814{
821 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) 815 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
822 BUG(); 816 BUG();
@@ -824,12 +818,10 @@ leave_mm (unsigned long cpu)
824 load_cr3(swapper_pg_dir); 818 load_cr3(swapper_pg_dir);
825} 819}
826 820
827
828/* 821/*
829 * Invalidate call-back 822 * Invalidate call-back
830 */ 823 */
831static void 824static void smp_invalidate_interrupt(void)
832smp_invalidate_interrupt(void)
833{ 825{
834 __u8 cpu = smp_processor_id(); 826 __u8 cpu = smp_processor_id();
835 827
@@ -837,9 +829,9 @@ smp_invalidate_interrupt(void)
837 return; 829 return;
838 /* This will flood messages. Don't uncomment unless you see 830 /* This will flood messages. Don't uncomment unless you see
839 * Problems with cross cpu invalidation 831 * Problems with cross cpu invalidation
840 VDEBUG(("VOYAGER SMP: CPU%d received INVALIDATE_CPI\n", 832 VDEBUG(("VOYAGER SMP: CPU%d received INVALIDATE_CPI\n",
841 smp_processor_id())); 833 smp_processor_id()));
842 */ 834 */
843 835
844 if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) { 836 if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
845 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) { 837 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
@@ -857,11 +849,10 @@ smp_invalidate_interrupt(void)
857 849
858/* All the new flush operations for 2.4 */ 850/* All the new flush operations for 2.4 */
859 851
860
861/* This routine is called with a physical cpu mask */ 852/* This routine is called with a physical cpu mask */
862static void 853static void
863voyager_flush_tlb_others (unsigned long cpumask, struct mm_struct *mm, 854voyager_flush_tlb_others(unsigned long cpumask, struct mm_struct *mm,
864 unsigned long va) 855 unsigned long va)
865{ 856{
866 int stuck = 50000; 857 int stuck = 50000;
867 858
@@ -875,7 +866,7 @@ voyager_flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
875 BUG(); 866 BUG();
876 867
877 spin_lock(&tlbstate_lock); 868 spin_lock(&tlbstate_lock);
878 869
879 flush_mm = mm; 870 flush_mm = mm;
880 flush_va = va; 871 flush_va = va;
881 atomic_set_mask(cpumask, &smp_invalidate_needed); 872 atomic_set_mask(cpumask, &smp_invalidate_needed);
@@ -887,23 +878,23 @@ voyager_flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
887 878
888 while (smp_invalidate_needed) { 879 while (smp_invalidate_needed) {
889 mb(); 880 mb();
890 if(--stuck == 0) { 881 if (--stuck == 0) {
891 printk("***WARNING*** Stuck doing invalidate CPI (CPU%d)\n", smp_processor_id()); 882 printk("***WARNING*** Stuck doing invalidate CPI "
883 "(CPU%d)\n", smp_processor_id());
892 break; 884 break;
893 } 885 }
894 } 886 }
895 887
896 /* Uncomment only to debug invalidation problems 888 /* Uncomment only to debug invalidation problems
897 VDEBUG(("VOYAGER SMP: Completed invalidate CPI (CPU%d)\n", cpu)); 889 VDEBUG(("VOYAGER SMP: Completed invalidate CPI (CPU%d)\n", cpu));
898 */ 890 */
899 891
900 flush_mm = NULL; 892 flush_mm = NULL;
901 flush_va = 0; 893 flush_va = 0;
902 spin_unlock(&tlbstate_lock); 894 spin_unlock(&tlbstate_lock);
903} 895}
904 896
905void 897void flush_tlb_current_task(void)
906flush_tlb_current_task(void)
907{ 898{
908 struct mm_struct *mm = current->mm; 899 struct mm_struct *mm = current->mm;
909 unsigned long cpu_mask; 900 unsigned long cpu_mask;
@@ -918,9 +909,7 @@ flush_tlb_current_task(void)
918 preempt_enable(); 909 preempt_enable();
919} 910}
920 911
921 912void flush_tlb_mm(struct mm_struct *mm)
922void
923flush_tlb_mm (struct mm_struct * mm)
924{ 913{
925 unsigned long cpu_mask; 914 unsigned long cpu_mask;
926 915
@@ -940,7 +929,7 @@ flush_tlb_mm (struct mm_struct * mm)
940 preempt_enable(); 929 preempt_enable();
941} 930}
942 931
943void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) 932void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
944{ 933{
945 struct mm_struct *mm = vma->vm_mm; 934 struct mm_struct *mm = vma->vm_mm;
946 unsigned long cpu_mask; 935 unsigned long cpu_mask;
@@ -949,10 +938,10 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
949 938
950 cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); 939 cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
951 if (current->active_mm == mm) { 940 if (current->active_mm == mm) {
952 if(current->mm) 941 if (current->mm)
953 __flush_tlb_one(va); 942 __flush_tlb_one(va);
954 else 943 else
955 leave_mm(smp_processor_id()); 944 leave_mm(smp_processor_id());
956 } 945 }
957 946
958 if (cpu_mask) 947 if (cpu_mask)
@@ -960,21 +949,21 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
960 949
961 preempt_enable(); 950 preempt_enable();
962} 951}
952
963EXPORT_SYMBOL(flush_tlb_page); 953EXPORT_SYMBOL(flush_tlb_page);
964 954
965/* enable the requested IRQs */ 955/* enable the requested IRQs */
966static void 956static void smp_enable_irq_interrupt(void)
967smp_enable_irq_interrupt(void)
968{ 957{
969 __u8 irq; 958 __u8 irq;
970 __u8 cpu = get_cpu(); 959 __u8 cpu = get_cpu();
971 960
972 VDEBUG(("VOYAGER SMP: CPU%d enabling irq mask 0x%x\n", cpu, 961 VDEBUG(("VOYAGER SMP: CPU%d enabling irq mask 0x%x\n", cpu,
973 vic_irq_enable_mask[cpu])); 962 vic_irq_enable_mask[cpu]));
974 963
975 spin_lock(&vic_irq_lock); 964 spin_lock(&vic_irq_lock);
976 for(irq = 0; irq < 16; irq++) { 965 for (irq = 0; irq < 16; irq++) {
977 if(vic_irq_enable_mask[cpu] & (1<<irq)) 966 if (vic_irq_enable_mask[cpu] & (1 << irq))
978 enable_local_vic_irq(irq); 967 enable_local_vic_irq(irq);
979 } 968 }
980 vic_irq_enable_mask[cpu] = 0; 969 vic_irq_enable_mask[cpu] = 0;
@@ -982,17 +971,16 @@ smp_enable_irq_interrupt(void)
982 971
983 put_cpu_no_resched(); 972 put_cpu_no_resched();
984} 973}
985 974
986/* 975/*
987 * CPU halt call-back 976 * CPU halt call-back
988 */ 977 */
989static void 978static void smp_stop_cpu_function(void *dummy)
990smp_stop_cpu_function(void *dummy)
991{ 979{
992 VDEBUG(("VOYAGER SMP: CPU%d is STOPPING\n", smp_processor_id())); 980 VDEBUG(("VOYAGER SMP: CPU%d is STOPPING\n", smp_processor_id()));
993 cpu_clear(smp_processor_id(), cpu_online_map); 981 cpu_clear(smp_processor_id(), cpu_online_map);
994 local_irq_disable(); 982 local_irq_disable();
995 for(;;) 983 for (;;)
996 halt(); 984 halt();
997} 985}
998 986
@@ -1006,14 +994,13 @@ struct call_data_struct {
1006 int wait; 994 int wait;
1007}; 995};
1008 996
1009static struct call_data_struct * call_data; 997static struct call_data_struct *call_data;
1010 998
1011/* execute a thread on a new CPU. The function to be called must be 999/* execute a thread on a new CPU. The function to be called must be
1012 * previously set up. This is used to schedule a function for 1000 * previously set up. This is used to schedule a function for
1013 * execution on all CPUs - set up the function then broadcast a 1001 * execution on all CPUs - set up the function then broadcast a
1014 * function_interrupt CPI to come here on each CPU */ 1002 * function_interrupt CPI to come here on each CPU */
1015static void 1003static void smp_call_function_interrupt(void)
1016smp_call_function_interrupt(void)
1017{ 1004{
1018 void (*func) (void *info) = call_data->func; 1005 void (*func) (void *info) = call_data->func;
1019 void *info = call_data->info; 1006 void *info = call_data->info;
@@ -1027,16 +1014,17 @@ smp_call_function_interrupt(void)
1027 * about to execute the function 1014 * about to execute the function
1028 */ 1015 */
1029 mb(); 1016 mb();
1030 if(!test_and_clear_bit(cpu, &call_data->started)) { 1017 if (!test_and_clear_bit(cpu, &call_data->started)) {
1031 /* If the bit wasn't set, this could be a replay */ 1018 /* If the bit wasn't set, this could be a replay */
1032 printk(KERN_WARNING "VOYAGER SMP: CPU %d received call funtion with no call pending\n", cpu); 1019 printk(KERN_WARNING "VOYAGER SMP: CPU %d received call funtion"
1020 " with no call pending\n", cpu);
1033 return; 1021 return;
1034 } 1022 }
1035 /* 1023 /*
1036 * At this point the info structure may be out of scope unless wait==1 1024 * At this point the info structure may be out of scope unless wait==1
1037 */ 1025 */
1038 irq_enter(); 1026 irq_enter();
1039 (*func)(info); 1027 (*func) (info);
1040 __get_cpu_var(irq_stat).irq_call_count++; 1028 __get_cpu_var(irq_stat).irq_call_count++;
1041 irq_exit(); 1029 irq_exit();
1042 if (wait) { 1030 if (wait) {
@@ -1046,14 +1034,13 @@ smp_call_function_interrupt(void)
1046} 1034}
1047 1035
1048static int 1036static int
1049voyager_smp_call_function_mask (cpumask_t cpumask, 1037voyager_smp_call_function_mask(cpumask_t cpumask,
1050 void (*func) (void *info), void *info, 1038 void (*func) (void *info), void *info, int wait)
1051 int wait)
1052{ 1039{
1053 struct call_data_struct data; 1040 struct call_data_struct data;
1054 u32 mask = cpus_addr(cpumask)[0]; 1041 u32 mask = cpus_addr(cpumask)[0];
1055 1042
1056 mask &= ~(1<<smp_processor_id()); 1043 mask &= ~(1 << smp_processor_id());
1057 1044
1058 if (!mask) 1045 if (!mask)
1059 return 0; 1046 return 0;
@@ -1093,7 +1080,7 @@ voyager_smp_call_function_mask (cpumask_t cpumask,
1093 * so we use the system clock to interrupt one processor, which in 1080 * so we use the system clock to interrupt one processor, which in
1094 * turn, broadcasts a timer CPI to all the others --- we receive that 1081 * turn, broadcasts a timer CPI to all the others --- we receive that
1095 * CPI here. We don't use this actually for counting so losing 1082 * CPI here. We don't use this actually for counting so losing
1096 * ticks doesn't matter 1083 * ticks doesn't matter
1097 * 1084 *
1098 * FIXME: For those CPUs which actually have a local APIC, we could 1085 * FIXME: For those CPUs which actually have a local APIC, we could
1099 * try to use it to trigger this interrupt instead of having to 1086 * try to use it to trigger this interrupt instead of having to
@@ -1101,8 +1088,7 @@ voyager_smp_call_function_mask (cpumask_t cpumask,
1101 * no local APIC, so I can't do this 1088 * no local APIC, so I can't do this
1102 * 1089 *
1103 * This function is currently a placeholder and is unused in the code */ 1090 * This function is currently a placeholder and is unused in the code */
1104fastcall void 1091fastcall void smp_apic_timer_interrupt(struct pt_regs *regs)
1105smp_apic_timer_interrupt(struct pt_regs *regs)
1106{ 1092{
1107 struct pt_regs *old_regs = set_irq_regs(regs); 1093 struct pt_regs *old_regs = set_irq_regs(regs);
1108 wrapper_smp_local_timer_interrupt(); 1094 wrapper_smp_local_timer_interrupt();
@@ -1110,8 +1096,7 @@ smp_apic_timer_interrupt(struct pt_regs *regs)
1110} 1096}
1111 1097
1112/* All of the QUAD interrupt GATES */ 1098/* All of the QUAD interrupt GATES */
1113fastcall void 1099fastcall void smp_qic_timer_interrupt(struct pt_regs *regs)
1114smp_qic_timer_interrupt(struct pt_regs *regs)
1115{ 1100{
1116 struct pt_regs *old_regs = set_irq_regs(regs); 1101 struct pt_regs *old_regs = set_irq_regs(regs);
1117 ack_QIC_CPI(QIC_TIMER_CPI); 1102 ack_QIC_CPI(QIC_TIMER_CPI);
@@ -1119,60 +1104,54 @@ smp_qic_timer_interrupt(struct pt_regs *regs)
1119 set_irq_regs(old_regs); 1104 set_irq_regs(old_regs);
1120} 1105}
1121 1106
1122fastcall void 1107fastcall void smp_qic_invalidate_interrupt(struct pt_regs *regs)
1123smp_qic_invalidate_interrupt(struct pt_regs *regs)
1124{ 1108{
1125 ack_QIC_CPI(QIC_INVALIDATE_CPI); 1109 ack_QIC_CPI(QIC_INVALIDATE_CPI);
1126 smp_invalidate_interrupt(); 1110 smp_invalidate_interrupt();
1127} 1111}
1128 1112
1129fastcall void 1113fastcall void smp_qic_reschedule_interrupt(struct pt_regs *regs)
1130smp_qic_reschedule_interrupt(struct pt_regs *regs)
1131{ 1114{
1132 ack_QIC_CPI(QIC_RESCHEDULE_CPI); 1115 ack_QIC_CPI(QIC_RESCHEDULE_CPI);
1133 smp_reschedule_interrupt(); 1116 smp_reschedule_interrupt();
1134} 1117}
1135 1118
1136fastcall void 1119fastcall void smp_qic_enable_irq_interrupt(struct pt_regs *regs)
1137smp_qic_enable_irq_interrupt(struct pt_regs *regs)
1138{ 1120{
1139 ack_QIC_CPI(QIC_ENABLE_IRQ_CPI); 1121 ack_QIC_CPI(QIC_ENABLE_IRQ_CPI);
1140 smp_enable_irq_interrupt(); 1122 smp_enable_irq_interrupt();
1141} 1123}
1142 1124
1143fastcall void 1125fastcall void smp_qic_call_function_interrupt(struct pt_regs *regs)
1144smp_qic_call_function_interrupt(struct pt_regs *regs)
1145{ 1126{
1146 ack_QIC_CPI(QIC_CALL_FUNCTION_CPI); 1127 ack_QIC_CPI(QIC_CALL_FUNCTION_CPI);
1147 smp_call_function_interrupt(); 1128 smp_call_function_interrupt();
1148} 1129}
1149 1130
1150fastcall void 1131fastcall void smp_vic_cpi_interrupt(struct pt_regs *regs)
1151smp_vic_cpi_interrupt(struct pt_regs *regs)
1152{ 1132{
1153 struct pt_regs *old_regs = set_irq_regs(regs); 1133 struct pt_regs *old_regs = set_irq_regs(regs);
1154 __u8 cpu = smp_processor_id(); 1134 __u8 cpu = smp_processor_id();
1155 1135
1156 if(is_cpu_quad()) 1136 if (is_cpu_quad())
1157 ack_QIC_CPI(VIC_CPI_LEVEL0); 1137 ack_QIC_CPI(VIC_CPI_LEVEL0);
1158 else 1138 else
1159 ack_VIC_CPI(VIC_CPI_LEVEL0); 1139 ack_VIC_CPI(VIC_CPI_LEVEL0);
1160 1140
1161 if(test_and_clear_bit(VIC_TIMER_CPI, &vic_cpi_mailbox[cpu])) 1141 if (test_and_clear_bit(VIC_TIMER_CPI, &vic_cpi_mailbox[cpu]))
1162 wrapper_smp_local_timer_interrupt(); 1142 wrapper_smp_local_timer_interrupt();
1163 if(test_and_clear_bit(VIC_INVALIDATE_CPI, &vic_cpi_mailbox[cpu])) 1143 if (test_and_clear_bit(VIC_INVALIDATE_CPI, &vic_cpi_mailbox[cpu]))
1164 smp_invalidate_interrupt(); 1144 smp_invalidate_interrupt();
1165 if(test_and_clear_bit(VIC_RESCHEDULE_CPI, &vic_cpi_mailbox[cpu])) 1145 if (test_and_clear_bit(VIC_RESCHEDULE_CPI, &vic_cpi_mailbox[cpu]))
1166 smp_reschedule_interrupt(); 1146 smp_reschedule_interrupt();
1167 if(test_and_clear_bit(VIC_ENABLE_IRQ_CPI, &vic_cpi_mailbox[cpu])) 1147 if (test_and_clear_bit(VIC_ENABLE_IRQ_CPI, &vic_cpi_mailbox[cpu]))
1168 smp_enable_irq_interrupt(); 1148 smp_enable_irq_interrupt();
1169 if(test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu])) 1149 if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu]))
1170 smp_call_function_interrupt(); 1150 smp_call_function_interrupt();
1171 set_irq_regs(old_regs); 1151 set_irq_regs(old_regs);
1172} 1152}
1173 1153
1174static void 1154static void do_flush_tlb_all(void *info)
1175do_flush_tlb_all(void* info)
1176{ 1155{
1177 unsigned long cpu = smp_processor_id(); 1156 unsigned long cpu = smp_processor_id();
1178 1157
@@ -1181,65 +1160,56 @@ do_flush_tlb_all(void* info)
1181 leave_mm(cpu); 1160 leave_mm(cpu);
1182} 1161}
1183 1162
1184
1185/* flush the TLB of every active CPU in the system */ 1163/* flush the TLB of every active CPU in the system */
1186void 1164void flush_tlb_all(void)
1187flush_tlb_all(void)
1188{ 1165{
1189 on_each_cpu(do_flush_tlb_all, 0, 1, 1); 1166 on_each_cpu(do_flush_tlb_all, 0, 1, 1);
1190} 1167}
1191 1168
1192/* used to set up the trampoline for other CPUs when the memory manager 1169/* used to set up the trampoline for other CPUs when the memory manager
1193 * is sorted out */ 1170 * is sorted out */
1194void __init 1171void __init smp_alloc_memory(void)
1195smp_alloc_memory(void)
1196{ 1172{
1197 trampoline_base = (__u32)alloc_bootmem_low_pages(PAGE_SIZE); 1173 trampoline_base = (__u32) alloc_bootmem_low_pages(PAGE_SIZE);
1198 if(__pa(trampoline_base) >= 0x93000) 1174 if (__pa(trampoline_base) >= 0x93000)
1199 BUG(); 1175 BUG();
1200} 1176}
1201 1177
1202/* send a reschedule CPI to one CPU by physical CPU number*/ 1178/* send a reschedule CPI to one CPU by physical CPU number*/
1203static void 1179static void voyager_smp_send_reschedule(int cpu)
1204voyager_smp_send_reschedule(int cpu)
1205{ 1180{
1206 send_one_CPI(cpu, VIC_RESCHEDULE_CPI); 1181 send_one_CPI(cpu, VIC_RESCHEDULE_CPI);
1207} 1182}
1208 1183
1209 1184int hard_smp_processor_id(void)
1210int
1211hard_smp_processor_id(void)
1212{ 1185{
1213 __u8 i; 1186 __u8 i;
1214 __u8 cpumask = inb(VIC_PROC_WHO_AM_I); 1187 __u8 cpumask = inb(VIC_PROC_WHO_AM_I);
1215 if((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER) 1188 if ((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER)
1216 return cpumask & 0x1F; 1189 return cpumask & 0x1F;
1217 1190
1218 for(i = 0; i < 8; i++) { 1191 for (i = 0; i < 8; i++) {
1219 if(cpumask & (1<<i)) 1192 if (cpumask & (1 << i))
1220 return i; 1193 return i;
1221 } 1194 }
1222 printk("** WARNING ** Illegal cpuid returned by VIC: %d", cpumask); 1195 printk("** WARNING ** Illegal cpuid returned by VIC: %d", cpumask);
1223 return 0; 1196 return 0;
1224} 1197}
1225 1198
1226int 1199int safe_smp_processor_id(void)
1227safe_smp_processor_id(void)
1228{ 1200{
1229 return hard_smp_processor_id(); 1201 return hard_smp_processor_id();
1230} 1202}
1231 1203
1232/* broadcast a halt to all other CPUs */ 1204/* broadcast a halt to all other CPUs */
1233static void 1205static void voyager_smp_send_stop(void)
1234voyager_smp_send_stop(void)
1235{ 1206{
1236 smp_call_function(smp_stop_cpu_function, NULL, 1, 1); 1207 smp_call_function(smp_stop_cpu_function, NULL, 1, 1);
1237} 1208}
1238 1209
1239/* this function is triggered in time.c when a clock tick fires 1210/* this function is triggered in time.c when a clock tick fires
1240 * we need to re-broadcast the tick to all CPUs */ 1211 * we need to re-broadcast the tick to all CPUs */
1241void 1212void smp_vic_timer_interrupt(void)
1242smp_vic_timer_interrupt(void)
1243{ 1213{
1244 send_CPI_allbutself(VIC_TIMER_CPI); 1214 send_CPI_allbutself(VIC_TIMER_CPI);
1245 smp_local_timer_interrupt(); 1215 smp_local_timer_interrupt();
@@ -1253,8 +1223,7 @@ smp_vic_timer_interrupt(void)
1253 * multiplier is 1 and it can be changed by writing the new multiplier 1223 * multiplier is 1 and it can be changed by writing the new multiplier
1254 * value into /proc/profile. 1224 * value into /proc/profile.
1255 */ 1225 */
1256void 1226void smp_local_timer_interrupt(void)
1257smp_local_timer_interrupt(void)
1258{ 1227{
1259 int cpu = smp_processor_id(); 1228 int cpu = smp_processor_id();
1260 long weight; 1229 long weight;
@@ -1269,18 +1238,18 @@ smp_local_timer_interrupt(void)
1269 * 1238 *
1270 * Interrupts are already masked off at this point. 1239 * Interrupts are already masked off at this point.
1271 */ 1240 */
1272 per_cpu(prof_counter,cpu) = per_cpu(prof_multiplier, cpu); 1241 per_cpu(prof_counter, cpu) = per_cpu(prof_multiplier, cpu);
1273 if (per_cpu(prof_counter, cpu) != 1242 if (per_cpu(prof_counter, cpu) !=
1274 per_cpu(prof_old_multiplier, cpu)) { 1243 per_cpu(prof_old_multiplier, cpu)) {
1275 /* FIXME: need to update the vic timer tick here */ 1244 /* FIXME: need to update the vic timer tick here */
1276 per_cpu(prof_old_multiplier, cpu) = 1245 per_cpu(prof_old_multiplier, cpu) =
1277 per_cpu(prof_counter, cpu); 1246 per_cpu(prof_counter, cpu);
1278 } 1247 }
1279 1248
1280 update_process_times(user_mode_vm(get_irq_regs())); 1249 update_process_times(user_mode_vm(get_irq_regs()));
1281 } 1250 }
1282 1251
1283 if( ((1<<cpu) & voyager_extended_vic_processors) == 0) 1252 if (((1 << cpu) & voyager_extended_vic_processors) == 0)
1284 /* only extended VIC processors participate in 1253 /* only extended VIC processors participate in
1285 * interrupt distribution */ 1254 * interrupt distribution */
1286 return; 1255 return;
@@ -1296,12 +1265,12 @@ smp_local_timer_interrupt(void)
1296 * we can take more than 100K local irqs per second on a 100 MHz P5. 1265 * we can take more than 100K local irqs per second on a 100 MHz P5.
1297 */ 1266 */
1298 1267
1299 if((++vic_tick[cpu] & 0x7) != 0) 1268 if ((++vic_tick[cpu] & 0x7) != 0)
1300 return; 1269 return;
1301 /* get here every 16 ticks (about every 1/6 of a second) */ 1270 /* get here every 16 ticks (about every 1/6 of a second) */
1302 1271
1303 /* Change our priority to give someone else a chance at getting 1272 /* Change our priority to give someone else a chance at getting
1304 * the IRQ. The algorithm goes like this: 1273 * the IRQ. The algorithm goes like this:
1305 * 1274 *
1306 * In the VIC, the dynamically routed interrupt is always 1275 * In the VIC, the dynamically routed interrupt is always
1307 * handled by the lowest priority eligible (i.e. receiving 1276 * handled by the lowest priority eligible (i.e. receiving
@@ -1325,18 +1294,18 @@ smp_local_timer_interrupt(void)
1325 * affinity code since we now try to even up the interrupt 1294 * affinity code since we now try to even up the interrupt
1326 * counts when an affinity binding is keeping them on a 1295 * counts when an affinity binding is keeping them on a
1327 * particular CPU*/ 1296 * particular CPU*/
1328 weight = (vic_intr_count[cpu]*voyager_extended_cpus 1297 weight = (vic_intr_count[cpu] * voyager_extended_cpus
1329 - vic_intr_total) >> 4; 1298 - vic_intr_total) >> 4;
1330 weight += 4; 1299 weight += 4;
1331 if(weight > 7) 1300 if (weight > 7)
1332 weight = 7; 1301 weight = 7;
1333 if(weight < 0) 1302 if (weight < 0)
1334 weight = 0; 1303 weight = 0;
1335 1304
1336 outb((__u8)weight, VIC_PRIORITY_REGISTER); 1305 outb((__u8) weight, VIC_PRIORITY_REGISTER);
1337 1306
1338#ifdef VOYAGER_DEBUG 1307#ifdef VOYAGER_DEBUG
1339 if((vic_tick[cpu] & 0xFFF) == 0) { 1308 if ((vic_tick[cpu] & 0xFFF) == 0) {
1340 /* print this message roughly every 25 secs */ 1309 /* print this message roughly every 25 secs */
1341 printk("VOYAGER SMP: vic_tick[%d] = %lu, weight = %ld\n", 1310 printk("VOYAGER SMP: vic_tick[%d] = %lu, weight = %ld\n",
1342 cpu, vic_tick[cpu], weight); 1311 cpu, vic_tick[cpu], weight);
@@ -1345,15 +1314,14 @@ smp_local_timer_interrupt(void)
1345} 1314}
1346 1315
1347/* setup the profiling timer */ 1316/* setup the profiling timer */
1348int 1317int setup_profiling_timer(unsigned int multiplier)
1349setup_profiling_timer(unsigned int multiplier)
1350{ 1318{
1351 int i; 1319 int i;
1352 1320
1353 if ( (!multiplier)) 1321 if ((!multiplier))
1354 return -EINVAL; 1322 return -EINVAL;
1355 1323
1356 /* 1324 /*
1357 * Set the new multiplier for each CPU. CPUs don't start using the 1325 * Set the new multiplier for each CPU. CPUs don't start using the
1358 * new values until the next timer interrupt in which they do process 1326 * new values until the next timer interrupt in which they do process
1359 * accounting. 1327 * accounting.
@@ -1367,15 +1335,13 @@ setup_profiling_timer(unsigned int multiplier)
1367/* This is a bit of a mess, but forced on us by the genirq changes 1335/* This is a bit of a mess, but forced on us by the genirq changes
1368 * there's no genirq handler that really does what voyager wants 1336 * there's no genirq handler that really does what voyager wants
1369 * so hack it up with the simple IRQ handler */ 1337 * so hack it up with the simple IRQ handler */
1370static void fastcall 1338static void fastcall handle_vic_irq(unsigned int irq, struct irq_desc *desc)
1371handle_vic_irq(unsigned int irq, struct irq_desc *desc)
1372{ 1339{
1373 before_handle_vic_irq(irq); 1340 before_handle_vic_irq(irq);
1374 handle_simple_irq(irq, desc); 1341 handle_simple_irq(irq, desc);
1375 after_handle_vic_irq(irq); 1342 after_handle_vic_irq(irq);
1376} 1343}
1377 1344
1378
1379/* The CPIs are handled in the per cpu 8259s, so they must be 1345/* The CPIs are handled in the per cpu 8259s, so they must be
1380 * enabled to be received: FIX: enabling the CPIs in the early 1346 * enabled to be received: FIX: enabling the CPIs in the early
1381 * boot sequence interferes with bug checking; enable them later 1347 * boot sequence interferes with bug checking; enable them later
@@ -1385,13 +1351,12 @@ handle_vic_irq(unsigned int irq, struct irq_desc *desc)
1385#define QIC_SET_GATE(cpi, vector) \ 1351#define QIC_SET_GATE(cpi, vector) \
1386 set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector)) 1352 set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector))
1387 1353
1388void __init 1354void __init smp_intr_init(void)
1389smp_intr_init(void)
1390{ 1355{
1391 int i; 1356 int i;
1392 1357
1393 /* initialize the per cpu irq mask to all disabled */ 1358 /* initialize the per cpu irq mask to all disabled */
1394 for(i = 0; i < NR_CPUS; i++) 1359 for (i = 0; i < NR_CPUS; i++)
1395 vic_irq_mask[i] = 0xFFFF; 1360 vic_irq_mask[i] = 0xFFFF;
1396 1361
1397 VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt); 1362 VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt);
@@ -1404,42 +1369,40 @@ smp_intr_init(void)
1404 QIC_SET_GATE(QIC_RESCHEDULE_CPI, qic_reschedule_interrupt); 1369 QIC_SET_GATE(QIC_RESCHEDULE_CPI, qic_reschedule_interrupt);
1405 QIC_SET_GATE(QIC_ENABLE_IRQ_CPI, qic_enable_irq_interrupt); 1370 QIC_SET_GATE(QIC_ENABLE_IRQ_CPI, qic_enable_irq_interrupt);
1406 QIC_SET_GATE(QIC_CALL_FUNCTION_CPI, qic_call_function_interrupt); 1371 QIC_SET_GATE(QIC_CALL_FUNCTION_CPI, qic_call_function_interrupt);
1407
1408 1372
1409 /* now put the VIC descriptor into the first 48 IRQs 1373 /* now put the VIC descriptor into the first 48 IRQs
1410 * 1374 *
1411 * This is for later: first 16 correspond to PC IRQs; next 16 1375 * This is for later: first 16 correspond to PC IRQs; next 16
1412 * are Primary MC IRQs and final 16 are Secondary MC IRQs */ 1376 * are Primary MC IRQs and final 16 are Secondary MC IRQs */
1413 for(i = 0; i < 48; i++) 1377 for (i = 0; i < 48; i++)
1414 set_irq_chip_and_handler(i, &vic_chip, handle_vic_irq); 1378 set_irq_chip_and_handler(i, &vic_chip, handle_vic_irq);
1415} 1379}
1416 1380
1417/* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per 1381/* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per
1418 * processor to receive CPI */ 1382 * processor to receive CPI */
1419static void 1383static void send_CPI(__u32 cpuset, __u8 cpi)
1420send_CPI(__u32 cpuset, __u8 cpi)
1421{ 1384{
1422 int cpu; 1385 int cpu;
1423 __u32 quad_cpuset = (cpuset & voyager_quad_processors); 1386 __u32 quad_cpuset = (cpuset & voyager_quad_processors);
1424 1387
1425 if(cpi < VIC_START_FAKE_CPI) { 1388 if (cpi < VIC_START_FAKE_CPI) {
1426 /* fake CPI are only used for booting, so send to the 1389 /* fake CPI are only used for booting, so send to the
1427 * extended quads as well---Quads must be VIC booted */ 1390 * extended quads as well---Quads must be VIC booted */
1428 outb((__u8)(cpuset), VIC_CPI_Registers[cpi]); 1391 outb((__u8) (cpuset), VIC_CPI_Registers[cpi]);
1429 return; 1392 return;
1430 } 1393 }
1431 if(quad_cpuset) 1394 if (quad_cpuset)
1432 send_QIC_CPI(quad_cpuset, cpi); 1395 send_QIC_CPI(quad_cpuset, cpi);
1433 cpuset &= ~quad_cpuset; 1396 cpuset &= ~quad_cpuset;
1434 cpuset &= 0xff; /* only first 8 CPUs vaild for VIC CPI */ 1397 cpuset &= 0xff; /* only first 8 CPUs vaild for VIC CPI */
1435 if(cpuset == 0) 1398 if (cpuset == 0)
1436 return; 1399 return;
1437 for_each_online_cpu(cpu) { 1400 for_each_online_cpu(cpu) {
1438 if(cpuset & (1<<cpu)) 1401 if (cpuset & (1 << cpu))
1439 set_bit(cpi, &vic_cpi_mailbox[cpu]); 1402 set_bit(cpi, &vic_cpi_mailbox[cpu]);
1440 } 1403 }
1441 if(cpuset) 1404 if (cpuset)
1442 outb((__u8)cpuset, VIC_CPI_Registers[VIC_CPI_LEVEL0]); 1405 outb((__u8) cpuset, VIC_CPI_Registers[VIC_CPI_LEVEL0]);
1443} 1406}
1444 1407
1445/* Acknowledge receipt of CPI in the QIC, clear in QIC hardware and 1408/* Acknowledge receipt of CPI in the QIC, clear in QIC hardware and
@@ -1448,20 +1411,19 @@ send_CPI(__u32 cpuset, __u8 cpi)
1448 * DON'T make this inline otherwise the cache line read will be 1411 * DON'T make this inline otherwise the cache line read will be
1449 * optimised away 1412 * optimised away
1450 * */ 1413 * */
1451static int 1414static int ack_QIC_CPI(__u8 cpi)
1452ack_QIC_CPI(__u8 cpi) { 1415{
1453 __u8 cpu = hard_smp_processor_id(); 1416 __u8 cpu = hard_smp_processor_id();
1454 1417
1455 cpi &= 7; 1418 cpi &= 7;
1456 1419
1457 outb(1<<cpi, QIC_INTERRUPT_CLEAR1); 1420 outb(1 << cpi, QIC_INTERRUPT_CLEAR1);
1458 return voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi; 1421 return voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi;
1459} 1422}
1460 1423
1461static void 1424static void ack_special_QIC_CPI(__u8 cpi)
1462ack_special_QIC_CPI(__u8 cpi)
1463{ 1425{
1464 switch(cpi) { 1426 switch (cpi) {
1465 case VIC_CMN_INT: 1427 case VIC_CMN_INT:
1466 outb(QIC_CMN_INT, QIC_INTERRUPT_CLEAR0); 1428 outb(QIC_CMN_INT, QIC_INTERRUPT_CLEAR0);
1467 break; 1429 break;
@@ -1474,8 +1436,7 @@ ack_special_QIC_CPI(__u8 cpi)
1474} 1436}
1475 1437
1476/* Acknowledge receipt of CPI in the VIC (essentially an EOI) */ 1438/* Acknowledge receipt of CPI in the VIC (essentially an EOI) */
1477static void 1439static void ack_VIC_CPI(__u8 cpi)
1478ack_VIC_CPI(__u8 cpi)
1479{ 1440{
1480#ifdef VOYAGER_DEBUG 1441#ifdef VOYAGER_DEBUG
1481 unsigned long flags; 1442 unsigned long flags;
@@ -1484,17 +1445,17 @@ ack_VIC_CPI(__u8 cpi)
1484 1445
1485 local_irq_save(flags); 1446 local_irq_save(flags);
1486 isr = vic_read_isr(); 1447 isr = vic_read_isr();
1487 if((isr & (1<<(cpi &7))) == 0) { 1448 if ((isr & (1 << (cpi & 7))) == 0) {
1488 printk("VOYAGER SMP: CPU%d lost CPI%d\n", cpu, cpi); 1449 printk("VOYAGER SMP: CPU%d lost CPI%d\n", cpu, cpi);
1489 } 1450 }
1490#endif 1451#endif
1491 /* send specific EOI; the two system interrupts have 1452 /* send specific EOI; the two system interrupts have
1492 * bit 4 set for a separate vector but behave as the 1453 * bit 4 set for a separate vector but behave as the
1493 * corresponding 3 bit intr */ 1454 * corresponding 3 bit intr */
1494 outb_p(0x60|(cpi & 7),0x20); 1455 outb_p(0x60 | (cpi & 7), 0x20);
1495 1456
1496#ifdef VOYAGER_DEBUG 1457#ifdef VOYAGER_DEBUG
1497 if((vic_read_isr() & (1<<(cpi &7))) != 0) { 1458 if ((vic_read_isr() & (1 << (cpi & 7))) != 0) {
1498 printk("VOYAGER SMP: CPU%d still asserting CPI%d\n", cpu, cpi); 1459 printk("VOYAGER SMP: CPU%d still asserting CPI%d\n", cpu, cpi);
1499 } 1460 }
1500 local_irq_restore(flags); 1461 local_irq_restore(flags);
@@ -1502,12 +1463,11 @@ ack_VIC_CPI(__u8 cpi)
1502} 1463}
1503 1464
1504/* cribbed with thanks from irq.c */ 1465/* cribbed with thanks from irq.c */
1505#define __byte(x,y) (((unsigned char *)&(y))[x]) 1466#define __byte(x,y) (((unsigned char *)&(y))[x])
1506#define cached_21(cpu) (__byte(0,vic_irq_mask[cpu])) 1467#define cached_21(cpu) (__byte(0,vic_irq_mask[cpu]))
1507#define cached_A1(cpu) (__byte(1,vic_irq_mask[cpu])) 1468#define cached_A1(cpu) (__byte(1,vic_irq_mask[cpu]))
1508 1469
1509static unsigned int 1470static unsigned int startup_vic_irq(unsigned int irq)
1510startup_vic_irq(unsigned int irq)
1511{ 1471{
1512 unmask_vic_irq(irq); 1472 unmask_vic_irq(irq);
1513 1473
@@ -1535,13 +1495,12 @@ startup_vic_irq(unsigned int irq)
1535 * broadcast an Interrupt enable CPI which causes all other CPUs to 1495 * broadcast an Interrupt enable CPI which causes all other CPUs to
1536 * adjust their masks accordingly. */ 1496 * adjust their masks accordingly. */
1537 1497
1538static void 1498static void unmask_vic_irq(unsigned int irq)
1539unmask_vic_irq(unsigned int irq)
1540{ 1499{
1541 /* linux doesn't to processor-irq affinity, so enable on 1500 /* linux doesn't to processor-irq affinity, so enable on
1542 * all CPUs we know about */ 1501 * all CPUs we know about */
1543 int cpu = smp_processor_id(), real_cpu; 1502 int cpu = smp_processor_id(), real_cpu;
1544 __u16 mask = (1<<irq); 1503 __u16 mask = (1 << irq);
1545 __u32 processorList = 0; 1504 __u32 processorList = 0;
1546 unsigned long flags; 1505 unsigned long flags;
1547 1506
@@ -1549,78 +1508,72 @@ unmask_vic_irq(unsigned int irq)
1549 irq, cpu, cpu_irq_affinity[cpu])); 1508 irq, cpu, cpu_irq_affinity[cpu]));
1550 spin_lock_irqsave(&vic_irq_lock, flags); 1509 spin_lock_irqsave(&vic_irq_lock, flags);
1551 for_each_online_cpu(real_cpu) { 1510 for_each_online_cpu(real_cpu) {
1552 if(!(voyager_extended_vic_processors & (1<<real_cpu))) 1511 if (!(voyager_extended_vic_processors & (1 << real_cpu)))
1553 continue; 1512 continue;
1554 if(!(cpu_irq_affinity[real_cpu] & mask)) { 1513 if (!(cpu_irq_affinity[real_cpu] & mask)) {
1555 /* irq has no affinity for this CPU, ignore */ 1514 /* irq has no affinity for this CPU, ignore */
1556 continue; 1515 continue;
1557 } 1516 }
1558 if(real_cpu == cpu) { 1517 if (real_cpu == cpu) {
1559 enable_local_vic_irq(irq); 1518 enable_local_vic_irq(irq);
1560 } 1519 } else if (vic_irq_mask[real_cpu] & mask) {
1561 else if(vic_irq_mask[real_cpu] & mask) {
1562 vic_irq_enable_mask[real_cpu] |= mask; 1520 vic_irq_enable_mask[real_cpu] |= mask;
1563 processorList |= (1<<real_cpu); 1521 processorList |= (1 << real_cpu);
1564 } 1522 }
1565 } 1523 }
1566 spin_unlock_irqrestore(&vic_irq_lock, flags); 1524 spin_unlock_irqrestore(&vic_irq_lock, flags);
1567 if(processorList) 1525 if (processorList)
1568 send_CPI(processorList, VIC_ENABLE_IRQ_CPI); 1526 send_CPI(processorList, VIC_ENABLE_IRQ_CPI);
1569} 1527}
1570 1528
1571static void 1529static void mask_vic_irq(unsigned int irq)
1572mask_vic_irq(unsigned int irq)
1573{ 1530{
1574 /* lazy disable, do nothing */ 1531 /* lazy disable, do nothing */
1575} 1532}
1576 1533
1577static void 1534static void enable_local_vic_irq(unsigned int irq)
1578enable_local_vic_irq(unsigned int irq)
1579{ 1535{
1580 __u8 cpu = smp_processor_id(); 1536 __u8 cpu = smp_processor_id();
1581 __u16 mask = ~(1 << irq); 1537 __u16 mask = ~(1 << irq);
1582 __u16 old_mask = vic_irq_mask[cpu]; 1538 __u16 old_mask = vic_irq_mask[cpu];
1583 1539
1584 vic_irq_mask[cpu] &= mask; 1540 vic_irq_mask[cpu] &= mask;
1585 if(vic_irq_mask[cpu] == old_mask) 1541 if (vic_irq_mask[cpu] == old_mask)
1586 return; 1542 return;
1587 1543
1588 VDEBUG(("VOYAGER DEBUG: Enabling irq %d in hardware on CPU %d\n", 1544 VDEBUG(("VOYAGER DEBUG: Enabling irq %d in hardware on CPU %d\n",
1589 irq, cpu)); 1545 irq, cpu));
1590 1546
1591 if (irq & 8) { 1547 if (irq & 8) {
1592 outb_p(cached_A1(cpu),0xA1); 1548 outb_p(cached_A1(cpu), 0xA1);
1593 (void)inb_p(0xA1); 1549 (void)inb_p(0xA1);
1594 } 1550 } else {
1595 else { 1551 outb_p(cached_21(cpu), 0x21);
1596 outb_p(cached_21(cpu),0x21);
1597 (void)inb_p(0x21); 1552 (void)inb_p(0x21);
1598 } 1553 }
1599} 1554}
1600 1555
1601static void 1556static void disable_local_vic_irq(unsigned int irq)
1602disable_local_vic_irq(unsigned int irq)
1603{ 1557{
1604 __u8 cpu = smp_processor_id(); 1558 __u8 cpu = smp_processor_id();
1605 __u16 mask = (1 << irq); 1559 __u16 mask = (1 << irq);
1606 __u16 old_mask = vic_irq_mask[cpu]; 1560 __u16 old_mask = vic_irq_mask[cpu];
1607 1561
1608 if(irq == 7) 1562 if (irq == 7)
1609 return; 1563 return;
1610 1564
1611 vic_irq_mask[cpu] |= mask; 1565 vic_irq_mask[cpu] |= mask;
1612 if(old_mask == vic_irq_mask[cpu]) 1566 if (old_mask == vic_irq_mask[cpu])
1613 return; 1567 return;
1614 1568
1615 VDEBUG(("VOYAGER DEBUG: Disabling irq %d in hardware on CPU %d\n", 1569 VDEBUG(("VOYAGER DEBUG: Disabling irq %d in hardware on CPU %d\n",
1616 irq, cpu)); 1570 irq, cpu));
1617 1571
1618 if (irq & 8) { 1572 if (irq & 8) {
1619 outb_p(cached_A1(cpu),0xA1); 1573 outb_p(cached_A1(cpu), 0xA1);
1620 (void)inb_p(0xA1); 1574 (void)inb_p(0xA1);
1621 } 1575 } else {
1622 else { 1576 outb_p(cached_21(cpu), 0x21);
1623 outb_p(cached_21(cpu),0x21);
1624 (void)inb_p(0x21); 1577 (void)inb_p(0x21);
1625 } 1578 }
1626} 1579}
@@ -1631,8 +1584,7 @@ disable_local_vic_irq(unsigned int irq)
1631 * interrupt in the vic, so we merely set a flag (IRQ_DISABLED). If 1584 * interrupt in the vic, so we merely set a flag (IRQ_DISABLED). If
1632 * this interrupt actually comes in, then we mask and ack here to push 1585 * this interrupt actually comes in, then we mask and ack here to push
1633 * the interrupt off to another CPU */ 1586 * the interrupt off to another CPU */
1634static void 1587static void before_handle_vic_irq(unsigned int irq)
1635before_handle_vic_irq(unsigned int irq)
1636{ 1588{
1637 irq_desc_t *desc = irq_desc + irq; 1589 irq_desc_t *desc = irq_desc + irq;
1638 __u8 cpu = smp_processor_id(); 1590 __u8 cpu = smp_processor_id();
@@ -1641,16 +1593,16 @@ before_handle_vic_irq(unsigned int irq)
1641 vic_intr_total++; 1593 vic_intr_total++;
1642 vic_intr_count[cpu]++; 1594 vic_intr_count[cpu]++;
1643 1595
1644 if(!(cpu_irq_affinity[cpu] & (1<<irq))) { 1596 if (!(cpu_irq_affinity[cpu] & (1 << irq))) {
1645 /* The irq is not in our affinity mask, push it off 1597 /* The irq is not in our affinity mask, push it off
1646 * onto another CPU */ 1598 * onto another CPU */
1647 VDEBUG(("VOYAGER DEBUG: affinity triggered disable of irq %d on cpu %d\n", 1599 VDEBUG(("VOYAGER DEBUG: affinity triggered disable of irq %d "
1648 irq, cpu)); 1600 "on cpu %d\n", irq, cpu));
1649 disable_local_vic_irq(irq); 1601 disable_local_vic_irq(irq);
1650 /* set IRQ_INPROGRESS to prevent the handler in irq.c from 1602 /* set IRQ_INPROGRESS to prevent the handler in irq.c from
1651 * actually calling the interrupt routine */ 1603 * actually calling the interrupt routine */
1652 desc->status |= IRQ_REPLAY | IRQ_INPROGRESS; 1604 desc->status |= IRQ_REPLAY | IRQ_INPROGRESS;
1653 } else if(desc->status & IRQ_DISABLED) { 1605 } else if (desc->status & IRQ_DISABLED) {
1654 /* Damn, the interrupt actually arrived, do the lazy 1606 /* Damn, the interrupt actually arrived, do the lazy
1655 * disable thing. The interrupt routine in irq.c will 1607 * disable thing. The interrupt routine in irq.c will
1656 * not handle a IRQ_DISABLED interrupt, so nothing more 1608 * not handle a IRQ_DISABLED interrupt, so nothing more
@@ -1667,8 +1619,7 @@ before_handle_vic_irq(unsigned int irq)
1667} 1619}
1668 1620
1669/* Finish the VIC interrupt: basically mask */ 1621/* Finish the VIC interrupt: basically mask */
1670static void 1622static void after_handle_vic_irq(unsigned int irq)
1671after_handle_vic_irq(unsigned int irq)
1672{ 1623{
1673 irq_desc_t *desc = irq_desc + irq; 1624 irq_desc_t *desc = irq_desc + irq;
1674 1625
@@ -1685,11 +1636,11 @@ after_handle_vic_irq(unsigned int irq)
1685#ifdef VOYAGER_DEBUG 1636#ifdef VOYAGER_DEBUG
1686 /* DEBUG: before we ack, check what's in progress */ 1637 /* DEBUG: before we ack, check what's in progress */
1687 isr = vic_read_isr(); 1638 isr = vic_read_isr();
1688 if((isr & (1<<irq) && !(status & IRQ_REPLAY)) == 0) { 1639 if ((isr & (1 << irq) && !(status & IRQ_REPLAY)) == 0) {
1689 int i; 1640 int i;
1690 __u8 cpu = smp_processor_id(); 1641 __u8 cpu = smp_processor_id();
1691 __u8 real_cpu; 1642 __u8 real_cpu;
1692 int mask; /* Um... initialize me??? --RR */ 1643 int mask; /* Um... initialize me??? --RR */
1693 1644
1694 printk("VOYAGER SMP: CPU%d lost interrupt %d\n", 1645 printk("VOYAGER SMP: CPU%d lost interrupt %d\n",
1695 cpu, irq); 1646 cpu, irq);
@@ -1698,9 +1649,10 @@ after_handle_vic_irq(unsigned int irq)
1698 outb(VIC_CPU_MASQUERADE_ENABLE | real_cpu, 1649 outb(VIC_CPU_MASQUERADE_ENABLE | real_cpu,
1699 VIC_PROCESSOR_ID); 1650 VIC_PROCESSOR_ID);
1700 isr = vic_read_isr(); 1651 isr = vic_read_isr();
1701 if(isr & (1<<irq)) { 1652 if (isr & (1 << irq)) {
1702 printk("VOYAGER SMP: CPU%d ack irq %d\n", 1653 printk
1703 real_cpu, irq); 1654 ("VOYAGER SMP: CPU%d ack irq %d\n",
1655 real_cpu, irq);
1704 ack_vic_irq(irq); 1656 ack_vic_irq(irq);
1705 } 1657 }
1706 outb(cpu, VIC_PROCESSOR_ID); 1658 outb(cpu, VIC_PROCESSOR_ID);
@@ -1711,7 +1663,7 @@ after_handle_vic_irq(unsigned int irq)
1711 * receipt by another CPU so everything must be in 1663 * receipt by another CPU so everything must be in
1712 * order here */ 1664 * order here */
1713 ack_vic_irq(irq); 1665 ack_vic_irq(irq);
1714 if(status & IRQ_REPLAY) { 1666 if (status & IRQ_REPLAY) {
1715 /* replay is set if we disable the interrupt 1667 /* replay is set if we disable the interrupt
1716 * in the before_handle_vic_irq() routine, so 1668 * in the before_handle_vic_irq() routine, so
1717 * clear the in progress bit here to allow the 1669 * clear the in progress bit here to allow the
@@ -1720,9 +1672,9 @@ after_handle_vic_irq(unsigned int irq)
1720 } 1672 }
1721#ifdef VOYAGER_DEBUG 1673#ifdef VOYAGER_DEBUG
1722 isr = vic_read_isr(); 1674 isr = vic_read_isr();
1723 if((isr & (1<<irq)) != 0) 1675 if ((isr & (1 << irq)) != 0)
1724 printk("VOYAGER SMP: after_handle_vic_irq() after ack irq=%d, isr=0x%x\n", 1676 printk("VOYAGER SMP: after_handle_vic_irq() after "
1725 irq, isr); 1677 "ack irq=%d, isr=0x%x\n", irq, isr);
1726#endif /* VOYAGER_DEBUG */ 1678#endif /* VOYAGER_DEBUG */
1727 } 1679 }
1728 _raw_spin_unlock(&vic_irq_lock); 1680 _raw_spin_unlock(&vic_irq_lock);
@@ -1731,7 +1683,6 @@ after_handle_vic_irq(unsigned int irq)
1731 * may be intercepted by another CPU if reasserted */ 1683 * may be intercepted by another CPU if reasserted */
1732} 1684}
1733 1685
1734
1735/* Linux processor - interrupt affinity manipulations. 1686/* Linux processor - interrupt affinity manipulations.
1736 * 1687 *
1737 * For each processor, we maintain a 32 bit irq affinity mask. 1688 * For each processor, we maintain a 32 bit irq affinity mask.
@@ -1748,8 +1699,7 @@ after_handle_vic_irq(unsigned int irq)
1748 * change the mask and then do an interrupt enable CPI to re-enable on 1699 * change the mask and then do an interrupt enable CPI to re-enable on
1749 * the selected processors */ 1700 * the selected processors */
1750 1701
1751void 1702void set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
1752set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
1753{ 1703{
1754 /* Only extended processors handle interrupts */ 1704 /* Only extended processors handle interrupts */
1755 unsigned long real_mask; 1705 unsigned long real_mask;
@@ -1757,13 +1707,13 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
1757 int cpu; 1707 int cpu;
1758 1708
1759 real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors; 1709 real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors;
1760 1710
1761 if(cpus_addr(mask)[0] == 0) 1711 if (cpus_addr(mask)[0] == 0)
1762 /* can't have no CPUs to accept the interrupt -- extremely 1712 /* can't have no CPUs to accept the interrupt -- extremely
1763 * bad things will happen */ 1713 * bad things will happen */
1764 return; 1714 return;
1765 1715
1766 if(irq == 0) 1716 if (irq == 0)
1767 /* can't change the affinity of the timer IRQ. This 1717 /* can't change the affinity of the timer IRQ. This
1768 * is due to the constraint in the voyager 1718 * is due to the constraint in the voyager
1769 * architecture that the CPI also comes in on and IRQ 1719 * architecture that the CPI also comes in on and IRQ
@@ -1772,7 +1722,7 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
1772 * will no-longer be able to accept VIC CPIs */ 1722 * will no-longer be able to accept VIC CPIs */
1773 return; 1723 return;
1774 1724
1775 if(irq >= 32) 1725 if (irq >= 32)
1776 /* You can only have 32 interrupts in a voyager system 1726 /* You can only have 32 interrupts in a voyager system
1777 * (and 32 only if you have a secondary microchannel 1727 * (and 32 only if you have a secondary microchannel
1778 * bus) */ 1728 * bus) */
@@ -1780,8 +1730,8 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
1780 1730
1781 for_each_online_cpu(cpu) { 1731 for_each_online_cpu(cpu) {
1782 unsigned long cpu_mask = 1 << cpu; 1732 unsigned long cpu_mask = 1 << cpu;
1783 1733
1784 if(cpu_mask & real_mask) { 1734 if (cpu_mask & real_mask) {
1785 /* enable the interrupt for this cpu */ 1735 /* enable the interrupt for this cpu */
1786 cpu_irq_affinity[cpu] |= irq_mask; 1736 cpu_irq_affinity[cpu] |= irq_mask;
1787 } else { 1737 } else {
@@ -1800,25 +1750,23 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
1800 unmask_vic_irq(irq); 1750 unmask_vic_irq(irq);
1801} 1751}
1802 1752
1803static void 1753static void ack_vic_irq(unsigned int irq)
1804ack_vic_irq(unsigned int irq)
1805{ 1754{
1806 if (irq & 8) { 1755 if (irq & 8) {
1807 outb(0x62,0x20); /* Specific EOI to cascade */ 1756 outb(0x62, 0x20); /* Specific EOI to cascade */
1808 outb(0x60|(irq & 7),0xA0); 1757 outb(0x60 | (irq & 7), 0xA0);
1809 } else { 1758 } else {
1810 outb(0x60 | (irq & 7),0x20); 1759 outb(0x60 | (irq & 7), 0x20);
1811 } 1760 }
1812} 1761}
1813 1762
1814/* enable the CPIs. In the VIC, the CPIs are delivered by the 8259 1763/* enable the CPIs. In the VIC, the CPIs are delivered by the 8259
1815 * but are not vectored by it. This means that the 8259 mask must be 1764 * but are not vectored by it. This means that the 8259 mask must be
1816 * lowered to receive them */ 1765 * lowered to receive them */
1817static __init void 1766static __init void vic_enable_cpi(void)
1818vic_enable_cpi(void)
1819{ 1767{
1820 __u8 cpu = smp_processor_id(); 1768 __u8 cpu = smp_processor_id();
1821 1769
1822 /* just take a copy of the current mask (nop for boot cpu) */ 1770 /* just take a copy of the current mask (nop for boot cpu) */
1823 vic_irq_mask[cpu] = vic_irq_mask[boot_cpu_id]; 1771 vic_irq_mask[cpu] = vic_irq_mask[boot_cpu_id];
1824 1772
@@ -1827,7 +1775,7 @@ vic_enable_cpi(void)
1827 /* for sys int and cmn int */ 1775 /* for sys int and cmn int */
1828 enable_local_vic_irq(7); 1776 enable_local_vic_irq(7);
1829 1777
1830 if(is_cpu_quad()) { 1778 if (is_cpu_quad()) {
1831 outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0); 1779 outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0);
1832 outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1); 1780 outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1);
1833 VDEBUG(("VOYAGER SMP: QIC ENABLE CPI: CPU%d: MASK 0x%x\n", 1781 VDEBUG(("VOYAGER SMP: QIC ENABLE CPI: CPU%d: MASK 0x%x\n",
@@ -1838,8 +1786,7 @@ vic_enable_cpi(void)
1838 cpu, vic_irq_mask[cpu])); 1786 cpu, vic_irq_mask[cpu]));
1839} 1787}
1840 1788
1841void 1789void voyager_smp_dump()
1842voyager_smp_dump()
1843{ 1790{
1844 int old_cpu = smp_processor_id(), cpu; 1791 int old_cpu = smp_processor_id(), cpu;
1845 1792
@@ -1865,10 +1812,10 @@ voyager_smp_dump()
1865 cpu, vic_irq_mask[cpu], imr, irr, isr); 1812 cpu, vic_irq_mask[cpu], imr, irr, isr);
1866#if 0 1813#if 0
1867 /* These lines are put in to try to unstick an un ack'd irq */ 1814 /* These lines are put in to try to unstick an un ack'd irq */
1868 if(isr != 0) { 1815 if (isr != 0) {
1869 int irq; 1816 int irq;
1870 for(irq=0; irq<16; irq++) { 1817 for (irq = 0; irq < 16; irq++) {
1871 if(isr & (1<<irq)) { 1818 if (isr & (1 << irq)) {
1872 printk("\tCPU%d: ack irq %d\n", 1819 printk("\tCPU%d: ack irq %d\n",
1873 cpu, irq); 1820 cpu, irq);
1874 local_irq_save(flags); 1821 local_irq_save(flags);
@@ -1884,17 +1831,15 @@ voyager_smp_dump()
1884 } 1831 }
1885} 1832}
1886 1833
1887void 1834void smp_voyager_power_off(void *dummy)
1888smp_voyager_power_off(void *dummy)
1889{ 1835{
1890 if(smp_processor_id() == boot_cpu_id) 1836 if (smp_processor_id() == boot_cpu_id)
1891 voyager_power_off(); 1837 voyager_power_off();
1892 else 1838 else
1893 smp_stop_cpu_function(NULL); 1839 smp_stop_cpu_function(NULL);
1894} 1840}
1895 1841
1896static void __init 1842static void __init voyager_smp_prepare_cpus(unsigned int max_cpus)
1897voyager_smp_prepare_cpus(unsigned int max_cpus)
1898{ 1843{
1899 /* FIXME: ignore max_cpus for now */ 1844 /* FIXME: ignore max_cpus for now */
1900 smp_boot_cpus(); 1845 smp_boot_cpus();
@@ -1911,8 +1856,7 @@ static void __cpuinit voyager_smp_prepare_boot_cpu(void)
1911 cpu_set(smp_processor_id(), cpu_present_map); 1856 cpu_set(smp_processor_id(), cpu_present_map);
1912} 1857}
1913 1858
1914static int __cpuinit 1859static int __cpuinit voyager_cpu_up(unsigned int cpu)
1915voyager_cpu_up(unsigned int cpu)
1916{ 1860{
1917 /* This only works at boot for x86. See "rewrite" above. */ 1861 /* This only works at boot for x86. See "rewrite" above. */
1918 if (cpu_isset(cpu, smp_commenced_mask)) 1862 if (cpu_isset(cpu, smp_commenced_mask))
@@ -1928,14 +1872,12 @@ voyager_cpu_up(unsigned int cpu)
1928 return 0; 1872 return 0;
1929} 1873}
1930 1874
1931static void __init 1875static void __init voyager_smp_cpus_done(unsigned int max_cpus)
1932voyager_smp_cpus_done(unsigned int max_cpus)
1933{ 1876{
1934 zap_low_mappings(); 1877 zap_low_mappings();
1935} 1878}
1936 1879
1937void __init 1880void __init smp_setup_processor_id(void)
1938smp_setup_processor_id(void)
1939{ 1881{
1940 current_thread_info()->cpu = hard_smp_processor_id(); 1882 current_thread_info()->cpu = hard_smp_processor_id();
1941 x86_write_percpu(cpu_number, hard_smp_processor_id()); 1883 x86_write_percpu(cpu_number, hard_smp_processor_id());
diff --git a/arch/x86/mach-voyager/voyager_thread.c b/arch/x86/mach-voyager/voyager_thread.c
index 50f9366c411e..c69c931818ed 100644
--- a/arch/x86/mach-voyager/voyager_thread.c
+++ b/arch/x86/mach-voyager/voyager_thread.c
@@ -30,12 +30,10 @@
30#include <asm/mtrr.h> 30#include <asm/mtrr.h>
31#include <asm/msr.h> 31#include <asm/msr.h>
32 32
33
34struct task_struct *voyager_thread; 33struct task_struct *voyager_thread;
35static __u8 set_timeout; 34static __u8 set_timeout;
36 35
37static int 36static int execute(const char *string)
38execute(const char *string)
39{ 37{
40 int ret; 38 int ret;
41 39
@@ -52,48 +50,48 @@ execute(const char *string)
52 NULL, 50 NULL,
53 }; 51 };
54 52
55 if ((ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC)) != 0) { 53 if ((ret =
56 printk(KERN_ERR "Voyager failed to run \"%s\": %i\n", 54 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC)) != 0) {
57 string, ret); 55 printk(KERN_ERR "Voyager failed to run \"%s\": %i\n", string,
56 ret);
58 } 57 }
59 return ret; 58 return ret;
60} 59}
61 60
62static void 61static void check_from_kernel(void)
63check_from_kernel(void)
64{ 62{
65 if(voyager_status.switch_off) { 63 if (voyager_status.switch_off) {
66 64
67 /* FIXME: This should be configurable via proc */ 65 /* FIXME: This should be configurable via proc */
68 execute("umask 600; echo 0 > /etc/initrunlvl; kill -HUP 1"); 66 execute("umask 600; echo 0 > /etc/initrunlvl; kill -HUP 1");
69 } else if(voyager_status.power_fail) { 67 } else if (voyager_status.power_fail) {
70 VDEBUG(("Voyager daemon detected AC power failure\n")); 68 VDEBUG(("Voyager daemon detected AC power failure\n"));
71 69
72 /* FIXME: This should be configureable via proc */ 70 /* FIXME: This should be configureable via proc */
73 execute("umask 600; echo F > /etc/powerstatus; kill -PWR 1"); 71 execute("umask 600; echo F > /etc/powerstatus; kill -PWR 1");
74 set_timeout = 1; 72 set_timeout = 1;
75 } 73 }
76} 74}
77 75
78static void 76static void check_continuing_condition(void)
79check_continuing_condition(void)
80{ 77{
81 if(voyager_status.power_fail) { 78 if (voyager_status.power_fail) {
82 __u8 data; 79 __u8 data;
83 voyager_cat_psi(VOYAGER_PSI_SUBREAD, 80 voyager_cat_psi(VOYAGER_PSI_SUBREAD,
84 VOYAGER_PSI_AC_FAIL_REG, &data); 81 VOYAGER_PSI_AC_FAIL_REG, &data);
85 if((data & 0x1f) == 0) { 82 if ((data & 0x1f) == 0) {
86 /* all power restored */ 83 /* all power restored */
87 printk(KERN_NOTICE "VOYAGER AC power restored, cancelling shutdown\n"); 84 printk(KERN_NOTICE
85 "VOYAGER AC power restored, cancelling shutdown\n");
88 /* FIXME: should be user configureable */ 86 /* FIXME: should be user configureable */
89 execute("umask 600; echo O > /etc/powerstatus; kill -PWR 1"); 87 execute
88 ("umask 600; echo O > /etc/powerstatus; kill -PWR 1");
90 set_timeout = 0; 89 set_timeout = 0;
91 } 90 }
92 } 91 }
93} 92}
94 93
95static int 94static int thread(void *unused)
96thread(void *unused)
97{ 95{
98 printk(KERN_NOTICE "Voyager starting monitor thread\n"); 96 printk(KERN_NOTICE "Voyager starting monitor thread\n");
99 97
@@ -102,7 +100,7 @@ thread(void *unused)
102 schedule_timeout(set_timeout ? HZ : MAX_SCHEDULE_TIMEOUT); 100 schedule_timeout(set_timeout ? HZ : MAX_SCHEDULE_TIMEOUT);
103 101
104 VDEBUG(("Voyager Daemon awoken\n")); 102 VDEBUG(("Voyager Daemon awoken\n"));
105 if(voyager_status.request_from_kernel == 0) { 103 if (voyager_status.request_from_kernel == 0) {
106 /* probably awoken from timeout */ 104 /* probably awoken from timeout */
107 check_continuing_condition(); 105 check_continuing_condition();
108 } else { 106 } else {
@@ -112,20 +110,18 @@ thread(void *unused)
112 } 110 }
113} 111}
114 112
115static int __init 113static int __init voyager_thread_start(void)
116voyager_thread_start(void)
117{ 114{
118 voyager_thread = kthread_run(thread, NULL, "kvoyagerd"); 115 voyager_thread = kthread_run(thread, NULL, "kvoyagerd");
119 if (IS_ERR(voyager_thread)) { 116 if (IS_ERR(voyager_thread)) {
120 printk(KERN_ERR "Voyager: Failed to create system monitor thread.\n"); 117 printk(KERN_ERR
118 "Voyager: Failed to create system monitor thread.\n");
121 return PTR_ERR(voyager_thread); 119 return PTR_ERR(voyager_thread);
122 } 120 }
123 return 0; 121 return 0;
124} 122}
125 123
126 124static void __exit voyager_thread_stop(void)
127static void __exit
128voyager_thread_stop(void)
129{ 125{
130 kthread_stop(voyager_thread); 126 kthread_stop(voyager_thread);
131} 127}