aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/sgi-ip27
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/sgi-ip27')
-rw-r--r--arch/mips/sgi-ip27/ip27-init.c22
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c117
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c5
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c27
4 files changed, 39 insertions, 132 deletions
diff --git a/arch/mips/sgi-ip27/ip27-init.c b/arch/mips/sgi-ip27/ip27-init.c
index 7093e7c573a4..4a500e8cd3cc 100644
--- a/arch/mips/sgi-ip27/ip27-init.c
+++ b/arch/mips/sgi-ip27/ip27-init.c
@@ -161,27 +161,6 @@ cnodeid_t get_compact_nodeid(void)
161 return NASID_TO_COMPACT_NODEID(get_nasid()); 161 return NASID_TO_COMPACT_NODEID(get_nasid());
162} 162}
163 163
164/* Extracted from the IOC3 meta driver. FIXME. */
165static inline void ioc3_sio_init(void)
166{
167 struct ioc3 *ioc3;
168 nasid_t nid;
169 long loops;
170
171 nid = get_nasid();
172 ioc3 = (struct ioc3 *) KL_CONFIG_CH_CONS_INFO(nid)->memory_base;
173
174 ioc3->sscr_a = 0; /* PIO mode for uarta. */
175 ioc3->sscr_b = 0; /* PIO mode for uartb. */
176 ioc3->sio_iec = ~0;
177 ioc3->sio_ies = (SIO_IR_SA_INT | SIO_IR_SB_INT);
178
179 loops=1000000; while(loops--);
180 ioc3->sregs.uarta.iu_fcr = 0;
181 ioc3->sregs.uartb.iu_fcr = 0;
182 loops=1000000; while(loops--);
183}
184
185static inline void ioc3_eth_init(void) 164static inline void ioc3_eth_init(void)
186{ 165{
187 struct ioc3 *ioc3; 166 struct ioc3 *ioc3;
@@ -234,7 +213,6 @@ void __init plat_mem_setup(void)
234 panic("Kernel compiled for N mode."); 213 panic("Kernel compiled for N mode.");
235#endif 214#endif
236 215
237 ioc3_sio_init();
238 ioc3_eth_init(); 216 ioc3_eth_init();
239 per_cpu_init(); 217 per_cpu_init();
240 218
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index bf438d02366e..42cd10956306 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -33,10 +33,6 @@
33#define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT) 33#define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT)
34#define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT) 34#define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT)
35 35
36#define SLOT_IGNORED 0xffff
37
38static short __initdata slot_lastfilled_cache[MAX_COMPACT_NODES];
39static unsigned short __initdata slot_psize_cache[MAX_COMPACT_NODES][MAX_MEM_SLOTS];
40static struct bootmem_data __initdata plat_node_bdata[MAX_COMPACT_NODES]; 36static struct bootmem_data __initdata plat_node_bdata[MAX_COMPACT_NODES];
41 37
42struct node_data *__node_data[MAX_COMPACT_NODES]; 38struct node_data *__node_data[MAX_COMPACT_NODES];
@@ -267,51 +263,6 @@ static pfn_t __init slot_getbasepfn(cnodeid_t cnode, int slot)
267 return ((pfn_t)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT); 263 return ((pfn_t)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
268} 264}
269 265
270/*
271 * Return the number of pages of memory provided by the given slot
272 * on the specified node.
273 */
274static pfn_t __init slot_getsize(cnodeid_t node, int slot)
275{
276 return (pfn_t) slot_psize_cache[node][slot];
277}
278
279/*
280 * Return highest slot filled
281 */
282static int __init node_getlastslot(cnodeid_t node)
283{
284 return (int) slot_lastfilled_cache[node];
285}
286
287/*
288 * Return the pfn of the last free page of memory on a node.
289 */
290static pfn_t __init node_getmaxclick(cnodeid_t node)
291{
292 pfn_t slot_psize;
293 int slot;
294
295 /*
296 * Start at the top slot. When we find a slot with memory in it,
297 * that's the winner.
298 */
299 for (slot = (MAX_MEM_SLOTS - 1); slot >= 0; slot--) {
300 if ((slot_psize = slot_getsize(node, slot))) {
301 if (slot_psize == SLOT_IGNORED)
302 continue;
303 /* Return the basepfn + the slot size, minus 1. */
304 return slot_getbasepfn(node, slot) + slot_psize - 1;
305 }
306 }
307
308 /*
309 * If there's no memory on the node, return 0. This is likely
310 * to cause problems.
311 */
312 return 0;
313}
314
315static pfn_t __init slot_psize_compute(cnodeid_t node, int slot) 266static pfn_t __init slot_psize_compute(cnodeid_t node, int slot)
316{ 267{
317 nasid_t nasid; 268 nasid_t nasid;
@@ -404,13 +355,13 @@ static void __init mlreset(void)
404static void __init szmem(void) 355static void __init szmem(void)
405{ 356{
406 pfn_t slot_psize, slot0sz = 0, nodebytes; /* Hack to detect problem configs */ 357 pfn_t slot_psize, slot0sz = 0, nodebytes; /* Hack to detect problem configs */
407 int slot, ignore; 358 int slot;
408 cnodeid_t node; 359 cnodeid_t node;
409 360
410 num_physpages = 0; 361 num_physpages = 0;
411 362
412 for_each_online_node(node) { 363 for_each_online_node(node) {
413 ignore = nodebytes = 0; 364 nodebytes = 0;
414 for (slot = 0; slot < MAX_MEM_SLOTS; slot++) { 365 for (slot = 0; slot < MAX_MEM_SLOTS; slot++) {
415 slot_psize = slot_psize_compute(node, slot); 366 slot_psize = slot_psize_compute(node, slot);
416 if (slot == 0) 367 if (slot == 0)
@@ -420,21 +371,20 @@ static void __init szmem(void)
420 * kernel text. 371 * kernel text.
421 */ 372 */
422 nodebytes += (1LL << SLOT_SHIFT); 373 nodebytes += (1LL << SLOT_SHIFT);
374
375 if (!slot_psize)
376 continue;
377
423 if ((nodebytes >> PAGE_SHIFT) * (sizeof(struct page)) > 378 if ((nodebytes >> PAGE_SHIFT) * (sizeof(struct page)) >
424 (slot0sz << PAGE_SHIFT)) 379 (slot0sz << PAGE_SHIFT)) {
425 ignore = 1;
426 if (ignore && slot_psize) {
427 printk("Ignoring slot %d onwards on node %d\n", 380 printk("Ignoring slot %d onwards on node %d\n",
428 slot, node); 381 slot, node);
429 slot_psize_cache[node][slot] = SLOT_IGNORED;
430 slot = MAX_MEM_SLOTS; 382 slot = MAX_MEM_SLOTS;
431 continue; 383 continue;
432 } 384 }
433 num_physpages += slot_psize; 385 num_physpages += slot_psize;
434 slot_psize_cache[node][slot] = 386 add_active_range(node, slot_getbasepfn(node, slot),
435 (unsigned short) slot_psize; 387 slot_getbasepfn(node, slot) + slot_psize);
436 if (slot_psize)
437 slot_lastfilled_cache[node] = slot;
438 } 388 }
439 } 389 }
440} 390}
@@ -442,18 +392,20 @@ static void __init szmem(void)
442static void __init node_mem_init(cnodeid_t node) 392static void __init node_mem_init(cnodeid_t node)
443{ 393{
444 pfn_t slot_firstpfn = slot_getbasepfn(node, 0); 394 pfn_t slot_firstpfn = slot_getbasepfn(node, 0);
445 pfn_t slot_lastpfn = slot_firstpfn + slot_getsize(node, 0);
446 pfn_t slot_freepfn = node_getfirstfree(node); 395 pfn_t slot_freepfn = node_getfirstfree(node);
447 struct pglist_data *pd;
448 unsigned long bootmap_size; 396 unsigned long bootmap_size;
397 pfn_t start_pfn, end_pfn;
398
399 get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
449 400
450 /* 401 /*
451 * Allocate the node data structures on the node first. 402 * Allocate the node data structures on the node first.
452 */ 403 */
453 __node_data[node] = __va(slot_freepfn << PAGE_SHIFT); 404 __node_data[node] = __va(slot_freepfn << PAGE_SHIFT);
454 405
455 pd = NODE_DATA(node); 406 NODE_DATA(node)->bdata = &plat_node_bdata[node];
456 pd->bdata = &plat_node_bdata[node]; 407 NODE_DATA(node)->node_start_pfn = start_pfn;
408 NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
457 409
458 cpus_clear(hub_data(node)->h_cpus); 410 cpus_clear(hub_data(node)->h_cpus);
459 411
@@ -461,12 +413,12 @@ static void __init node_mem_init(cnodeid_t node)
461 sizeof(struct hub_data)); 413 sizeof(struct hub_data));
462 414
463 bootmap_size = init_bootmem_node(NODE_DATA(node), slot_freepfn, 415 bootmap_size = init_bootmem_node(NODE_DATA(node), slot_freepfn,
464 slot_firstpfn, slot_lastpfn); 416 start_pfn, end_pfn);
465 free_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT, 417 free_bootmem_with_active_regions(node, end_pfn);
466 (slot_lastpfn - slot_firstpfn) << PAGE_SHIFT);
467 reserve_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT, 418 reserve_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT,
468 ((slot_freepfn - slot_firstpfn) << PAGE_SHIFT) + bootmap_size, 419 ((slot_freepfn - slot_firstpfn) << PAGE_SHIFT) + bootmap_size,
469 BOOTMEM_DEFAULT); 420 BOOTMEM_DEFAULT);
421 sparse_memory_present_with_active_regions(node);
470} 422}
471 423
472/* 424/*
@@ -515,16 +467,15 @@ void __init paging_init(void)
515 pagetable_init(); 467 pagetable_init();
516 468
517 for_each_online_node(node) { 469 for_each_online_node(node) {
518 pfn_t start_pfn = slot_getbasepfn(node, 0); 470 pfn_t start_pfn, end_pfn;
519 pfn_t end_pfn = node_getmaxclick(node) + 1;
520 471
521 zones_size[ZONE_NORMAL] = end_pfn - start_pfn; 472 get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
522 free_area_init_node(node, NODE_DATA(node),
523 zones_size, start_pfn, NULL);
524 473
525 if (end_pfn > max_low_pfn) 474 if (end_pfn > max_low_pfn)
526 max_low_pfn = end_pfn; 475 max_low_pfn = end_pfn;
527 } 476 }
477 zones_size[ZONE_NORMAL] = max_low_pfn;
478 free_area_init_nodes(zones_size);
528} 479}
529 480
530void __init mem_init(void) 481void __init mem_init(void)
@@ -535,34 +486,10 @@ void __init mem_init(void)
535 high_memory = (void *) __va(num_physpages << PAGE_SHIFT); 486 high_memory = (void *) __va(num_physpages << PAGE_SHIFT);
536 487
537 for_each_online_node(node) { 488 for_each_online_node(node) {
538 unsigned slot, numslots;
539 struct page *end, *p;
540
541 /* 489 /*
542 * This will free up the bootmem, ie, slot 0 memory. 490 * This will free up the bootmem, ie, slot 0 memory.
543 */ 491 */
544 totalram_pages += free_all_bootmem_node(NODE_DATA(node)); 492 totalram_pages += free_all_bootmem_node(NODE_DATA(node));
545
546 /*
547 * We need to manually do the other slots.
548 */
549 numslots = node_getlastslot(node);
550 for (slot = 1; slot <= numslots; slot++) {
551 p = nid_page_nr(node, slot_getbasepfn(node, slot) -
552 slot_getbasepfn(node, 0));
553
554 /*
555 * Free valid memory in current slot.
556 */
557 for (end = p + slot_getsize(node, slot); p < end; p++) {
558 /* if (!page_is_ram(pgnr)) continue; */
559 /* commented out until page_is_ram works */
560 ClearPageReserved(p);
561 init_page_count(p);
562 __free_page(p);
563 totalram_pages++;
564 }
565 }
566 } 493 }
567 494
568 totalram_pages -= setup_zero_pages(); /* This comes from node 0 */ 495 totalram_pages -= setup_zero_pages(); /* This comes from node 0 */
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index f15fc93d6b35..ba5cdebeaf0d 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -176,11 +176,14 @@ static void ip27_send_ipi_mask(cpumask_t mask, unsigned int action)
176static void __cpuinit ip27_init_secondary(void) 176static void __cpuinit ip27_init_secondary(void)
177{ 177{
178 per_cpu_init(); 178 per_cpu_init();
179 local_irq_enable();
180} 179}
181 180
182static void __cpuinit ip27_smp_finish(void) 181static void __cpuinit ip27_smp_finish(void)
183{ 182{
183 extern void hub_rt_clock_event_init(void);
184
185 hub_rt_clock_event_init();
186 local_irq_enable();
184} 187}
185 188
186static void __init ip27_cpus_done(void) 189static void __init ip27_cpus_done(void)
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index 9cebc9e7da63..8b4e854af925 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -160,10 +160,13 @@ static void rt_set_mode(enum clock_event_mode mode,
160 160
161int rt_timer_irq; 161int rt_timer_irq;
162 162
163static DEFINE_PER_CPU(struct clock_event_device, hub_rt_clockevent);
164static DEFINE_PER_CPU(char [11], hub_rt_name);
165
163static irqreturn_t hub_rt_counter_handler(int irq, void *dev_id) 166static irqreturn_t hub_rt_counter_handler(int irq, void *dev_id)
164{ 167{
165 struct clock_event_device *cd = dev_id;
166 unsigned int cpu = smp_processor_id(); 168 unsigned int cpu = smp_processor_id();
169 struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu);
167 int slice = cputoslice(cpu); 170 int slice = cputoslice(cpu);
168 171
169 /* 172 /*
@@ -192,10 +195,7 @@ struct irqaction hub_rt_irqaction = {
192#define NSEC_PER_CYCLE 800 195#define NSEC_PER_CYCLE 800
193#define CYCLES_PER_SEC (NSEC_PER_SEC / NSEC_PER_CYCLE) 196#define CYCLES_PER_SEC (NSEC_PER_SEC / NSEC_PER_CYCLE)
194 197
195static DEFINE_PER_CPU(struct clock_event_device, hub_rt_clockevent); 198void __cpuinit hub_rt_clock_event_init(void)
196static DEFINE_PER_CPU(char [11], hub_rt_name);
197
198static void __cpuinit hub_rt_clock_event_init(void)
199{ 199{
200 unsigned int cpu = smp_processor_id(); 200 unsigned int cpu = smp_processor_id();
201 struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu); 201 struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu);
@@ -203,17 +203,16 @@ static void __cpuinit hub_rt_clock_event_init(void)
203 int irq = rt_timer_irq; 203 int irq = rt_timer_irq;
204 204
205 sprintf(name, "hub-rt %d", cpu); 205 sprintf(name, "hub-rt %d", cpu);
206 cd->name = "HUB-RT", 206 cd->name = name;
207 cd->features = CLOCK_EVT_FEAT_ONESHOT, 207 cd->features = CLOCK_EVT_FEAT_ONESHOT;
208 clockevent_set_clock(cd, CYCLES_PER_SEC); 208 clockevent_set_clock(cd, CYCLES_PER_SEC);
209 cd->max_delta_ns = clockevent_delta2ns(0xfffffffffffff, cd); 209 cd->max_delta_ns = clockevent_delta2ns(0xfffffffffffff, cd);
210 cd->min_delta_ns = clockevent_delta2ns(0x300, cd); 210 cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
211 cd->rating = 200, 211 cd->rating = 200;
212 cd->irq = irq, 212 cd->irq = irq;
213 cd->cpumask = cpumask_of_cpu(cpu), 213 cd->cpumask = cpumask_of_cpu(cpu);
214 cd->rating = 300, 214 cd->set_next_event = rt_next_event;
215 cd->set_next_event = rt_next_event, 215 cd->set_mode = rt_set_mode;
216 cd->set_mode = rt_set_mode,
217 clockevents_register_device(cd); 216 clockevents_register_device(cd);
218} 217}
219 218
@@ -261,6 +260,7 @@ void __init plat_time_init(void)
261{ 260{
262 hub_rt_clocksource_init(); 261 hub_rt_clocksource_init();
263 hub_rt_clock_event_global_init(); 262 hub_rt_clock_event_global_init();
263 hub_rt_clock_event_init();
264} 264}
265 265
266void __cpuinit cpu_time_init(void) 266void __cpuinit cpu_time_init(void)
@@ -281,7 +281,6 @@ void __cpuinit cpu_time_init(void)
281 281
282 printk("CPU %d clock is %dMHz.\n", smp_processor_id(), cpu->cpu_speed); 282 printk("CPU %d clock is %dMHz.\n", smp_processor_id(), cpu->cpu_speed);
283 283
284 hub_rt_clock_event_init();
285 set_c0_status(SRB_TIMOCLK); 284 set_c0_status(SRB_TIMOCLK);
286} 285}
287 286