aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRoman Zippel <zippel@linux-m68k.org>2007-05-01 16:32:45 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-04 20:59:06 -0400
commitd6713b4091a99fa2af2fabdcd2f3fb97f32ecf2e (patch)
tree7cabd0ff35f9ec4413ba936ddb203d13dffb1550
parentf8744bc95dac461cef40df7143756d1bfa393991 (diff)
m68k: early parameter support
Add early parameter support and convert current users to it. Signed-off-by: Roman Zippel <zippel@linux-m68k.org> Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/m68k/amiga/config.c41
-rw-r--r--arch/m68k/atari/config.c17
-rw-r--r--arch/m68k/atari/debug.c24
-rw-r--r--arch/m68k/kernel/setup.c39
-rw-r--r--arch/m68k/mac/config.c8
-rw-r--r--arch/m68k/mac/debug.c20
-rw-r--r--arch/m68k/q40/config.c23
-rw-r--r--arch/m68k/sun3x/prom.c11
8 files changed, 77 insertions, 106 deletions
diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c
index c3b69b1b0c..3574853132 100644
--- a/arch/m68k/amiga/config.c
+++ b/arch/m68k/amiga/config.c
@@ -79,8 +79,6 @@ static char *amiga_models[] __initdata = {
79 79
80static char amiga_model_name[13] = "Amiga "; 80static char amiga_model_name[13] = "Amiga ";
81 81
82extern char m68k_debug_device[];
83
84static void amiga_sched_init(irq_handler_t handler); 82static void amiga_sched_init(irq_handler_t handler);
85/* amiga specific irq functions */ 83/* amiga specific irq functions */
86extern void amiga_init_IRQ(void); 84extern void amiga_init_IRQ(void);
@@ -95,12 +93,10 @@ static unsigned int amiga_get_ss(void);
95extern void amiga_mksound(unsigned int count, unsigned int ticks); 93extern void amiga_mksound(unsigned int count, unsigned int ticks);
96static void amiga_reset(void); 94static void amiga_reset(void);
97extern void amiga_init_sound(void); 95extern void amiga_init_sound(void);
98static void amiga_savekmsg_init(void);
99static void amiga_mem_console_write(struct console *co, const char *b, 96static void amiga_mem_console_write(struct console *co, const char *b,
100 unsigned int count); 97 unsigned int count);
101void amiga_serial_console_write(struct console *co, const char *s, 98void amiga_serial_console_write(struct console *co, const char *s,
102 unsigned int count); 99 unsigned int count);
103static void amiga_debug_init(void);
104#ifdef CONFIG_HEARTBEAT 100#ifdef CONFIG_HEARTBEAT
105static void amiga_heartbeat(int on); 101static void amiga_heartbeat(int on);
106#endif 102#endif
@@ -370,7 +366,6 @@ void __init config_amiga(void)
370{ 366{
371 int i; 367 int i;
372 368
373 amiga_debug_init();
374 amiga_identify(); 369 amiga_identify();
375 370
376 /* Yuk, we don't have PCI memory */ 371 /* Yuk, we don't have PCI memory */
@@ -458,17 +453,6 @@ void __init config_amiga(void)
458 /* initialize chipram allocator */ 453 /* initialize chipram allocator */
459 amiga_chip_init(); 454 amiga_chip_init();
460 455
461 /* debugging using chipram */
462 if (!strcmp(m68k_debug_device, "mem")) {
463 if (!AMIGAHW_PRESENT(CHIP_RAM))
464 printk("Warning: no chipram present for debugging\n");
465 else {
466 amiga_savekmsg_init();
467 amiga_console_driver.write = amiga_mem_console_write;
468 register_console(&amiga_console_driver);
469 }
470 }
471
472 /* our beloved beeper */ 456 /* our beloved beeper */
473 if (AMIGAHW_PRESENT(AMI_AUDIO)) 457 if (AMIGAHW_PRESENT(AMI_AUDIO))
474 amiga_init_sound(); 458 amiga_init_sound();
@@ -787,17 +771,33 @@ static void amiga_mem_console_write(struct console *co, const char *s,
787 } 771 }
788} 772}
789 773
790static void amiga_savekmsg_init(void) 774static int __init amiga_savekmsg_setup(char *arg)
791{ 775{
792 static struct resource debug_res = { .name = "Debug" }; 776 static struct resource debug_res = { .name = "Debug" };
793 777
778 if (!MACH_IS_AMIGA || strcmp(arg, "mem"))
779 goto done;
780
781 if (!AMIGAHW_PRESENT(CHIP_RAM)) {
782 printk("Warning: no chipram present for debugging\n");
783 goto done;
784 }
785
794 savekmsg = amiga_chip_alloc_res(SAVEKMSG_MAXMEM, &debug_res); 786 savekmsg = amiga_chip_alloc_res(SAVEKMSG_MAXMEM, &debug_res);
795 savekmsg->magic1 = SAVEKMSG_MAGIC1; 787 savekmsg->magic1 = SAVEKMSG_MAGIC1;
796 savekmsg->magic2 = SAVEKMSG_MAGIC2; 788 savekmsg->magic2 = SAVEKMSG_MAGIC2;
797 savekmsg->magicptr = ZTWO_PADDR(savekmsg); 789 savekmsg->magicptr = ZTWO_PADDR(savekmsg);
798 savekmsg->size = 0; 790 savekmsg->size = 0;
791
792 amiga_console_driver.write = amiga_mem_console_write;
793 register_console(&amiga_console_driver);
794
795done:
796 return 0;
799} 797}
800 798
799early_param("debug", amiga_savekmsg_setup);
800
801static void amiga_serial_putc(char c) 801static void amiga_serial_putc(char c)
802{ 802{
803 amiga_custom.serdat = (unsigned char)c | 0x100; 803 amiga_custom.serdat = (unsigned char)c | 0x100;
@@ -872,15 +872,18 @@ void amiga_serial_gets(struct console *co, char *s, int len)
872} 872}
873#endif 873#endif
874 874
875static void __init amiga_debug_init(void) 875static int __init amiga_debug_setup(char *arg)
876{ 876{
877 if (!strcmp(m68k_debug_device, "ser" )) { 877 if (MACH_IS_AMIGA && !strcmp(arg, "ser")) {
878 /* no initialization required (?) */ 878 /* no initialization required (?) */
879 amiga_console_driver.write = amiga_serial_console_write; 879 amiga_console_driver.write = amiga_serial_console_write;
880 register_console(&amiga_console_driver); 880 register_console(&amiga_console_driver);
881 } 881 }
882 return 0;
882} 883}
883 884
885early_param("debug", amiga_debug_setup);
886
884#ifdef CONFIG_HEARTBEAT 887#ifdef CONFIG_HEARTBEAT
885static void amiga_heartbeat(int on) 888static void amiga_heartbeat(int on)
886{ 889{
diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c
index b10e7addae..e40e5dcaa3 100644
--- a/arch/m68k/atari/config.c
+++ b/arch/m68k/atari/config.c
@@ -69,9 +69,6 @@ extern int atari_tt_hwclk (int, struct rtc_time *);
69extern int atari_mste_set_clock_mmss (unsigned long); 69extern int atari_mste_set_clock_mmss (unsigned long);
70extern int atari_tt_set_clock_mmss (unsigned long); 70extern int atari_tt_set_clock_mmss (unsigned long);
71 71
72/* atari specific debug functions (in debug.c) */
73extern void atari_debug_init(void);
74
75 72
76/* ++roman: This is a more elaborate test for an SCC chip, since the plain 73/* ++roman: This is a more elaborate test for an SCC chip, since the plain
77 * Medusa board generates DTACK at the SCC's standard addresses, but a SCC 74 * Medusa board generates DTACK at the SCC's standard addresses, but a SCC
@@ -137,15 +134,18 @@ int __init atari_parse_bootinfo(const struct bi_record *record)
137 134
138 135
139/* Parse the Atari-specific switches= option. */ 136/* Parse the Atari-specific switches= option. */
140void __init atari_switches_setup(const char *str, unsigned len) 137static int __init atari_switches_setup(char *str)
141{ 138{
142 char switches[len+1]; 139 char switches[strlen(str) + 1];
143 char *p; 140 char *p;
144 int ovsc_shift; 141 int ovsc_shift;
145 char *args = switches; 142 char *args = switches;
146 143
144 if (!MACH_IS_ATARI)
145 return 0;
146
147 /* copy string to local array, strsep works destructively... */ 147 /* copy string to local array, strsep works destructively... */
148 strlcpy(sn>list->rb_tree.rb_node; struct rb_node *parent = NULL; struct gc_candidate *cur; int comp; cand->list = list; while (*p) { parent = *p; cur = rb_entry(parent, struct gc_candidate, rb_node); if (list->sort_by_ec) comp = cand->erase_count < cur->erase_count; else comp = cand->valid < cur->valid; if (comp) p = &parent->rb_left; else p = &parent->rb_right; } rb_link_node(&cand->rb_node, parent, p); rb_insert_color(&cand->rb_node, &list->rb_tree); if (list->count <= list->maxcount) { list->count++; return NULL; } cand = rb_entry(rb_last(&list->rb_tree), struct gc_candidate, rb_node); rb_erase(&cand->rb_node, &list->rb_tree); cand->list = NULL; return cand; } static void remove_from_list(struct gc_candidate *cand) { struct candidate_list *list = cand->list; rb_erase(&cand->rb_node, &list->rb_tree); list->count--; } static void free_candidate(struct super_block *sb, struct gc_candidate *cand) { struct logfs_super *super = logfs_super(sb); btree_remove32(&super->s_cand_tree, cand->segno); kfree(cand); } u32 get_best_cand(struct super_block *sb, struct candidate_list *list, u32 *ec) { struct gc_candidate *cand; u32 segno; BUG_ON(list->count == 0); cand = rb_entry(rb_first(&list->rb_tree), struct gc_candidate, rb_node); remove_from_list(cand); segno = cand->segno; if (ec) *ec = cand->erase_count; free_candidate(sb, cand); return segno; } /* * We have several lists to manage segments with. The reserve_list is used to * deal with bad blocks. We try to keep the best (lowest ec) segments on this * list. * The free_list contains free segments for normal usage. It usually gets the * second pick after the reserve_list. But when the free_list is running short * it is more important to keep the free_list full than to keep a reserve. * * Segments that are not free are put onto a per-level low_list. If we have * to run garbage collection, we pick a candidate from there. All segments on * those lists should have at least some free space so GC will make progress. * * And last we have the ec_list, which is used to pick segments for wear * leveling. * * If all appropriate lists are full, we simply free the candidate and forget * about that segment for a while. We have better candidates for each purpose. */ static void __add_candidate(struct super_block *sb, struct gc_candidate *cand) { struct logfs_super *super = logfs_super(sb); u32 full = super->s_segsize - LOGFS_SEGMENT_RESERVE; if (cand->valid == 0) { /* 100% free segments */ log_gc_noisy("add reserve segment %x (ec %x) at %llx\n", cand->segno, cand->erase_count, dev_ofs(sb, cand->segno, 0)); cand = add_list(cand, &super->s_reserve_list); if (cand) { log_gc_noisy("add free segment %x (ec %x) at %llx\n", cand->segno, cand->erase_count, dev_ofs(sb, cand->segno, 0)); cand = add_list(cand, &super->s_free_list); } } else { /* good candidates for Garbage Collection */ if (cand->valid < full) cand = add_list(cand, &super->s_low_list[cand->dist]); /* good candidates for wear leveling, * segments that were recently written get ignored */ if (cand) cand = add_list(cand, &super->s_ec_list); } if (cand) free_candidate(sb, cand); } static int add_candidate(struct super_block *sb, u32 segno, u32 valid, u32 ec, u8 dist) { struct logfs_super *super = logfs_super(sb); struct gc_candidate *cand; cand = kmalloc(sizeof(*cand), GFP_NOFS); if (!cand) return -ENOMEM; cand->segno = segno; cand->valid = valid; cand->erase_count = ec; cand->dist = dist; btree_insert32(&super->s_cand_tree, segno, cand, GFP_NOFS); __add_candidate(sb, cand); return 0; } static void remove_segment_from_lists(struct super_block *sb, u32 segno) { struct logfs_super *super = logfs_super(sb); struct gc_candidate *cand; cand = btree_lookup32(&super->s_cand_tree, segno); if (cand) { remove_from_list(cand); free_candidate(sb, cand); } } static void scan_segment(struct super_block *sb, u32 segno) { u32 valid, ec = 0; gc_level_t gc_level = 0; u8 dist; if (segment_is_reserved(sb, segno)) return; remove_segment_from_lists(sb, segno); valid = logfs_valid_bytes(sb, segno, &ec, &gc_level); if (valid == RESERVED) return; dist = root_distance(sb, gc_level); add_candidate(sb, segno, valid, ec, dist); } static struct gc_candidate *first_in_list(struct candidate_list *list) { if (list->count == 0) return NULL; return rb_entry(rb_first(&list->rb_tree), struct gc_candidate, rb_node); } /* * Find the best segment for garbage collection. Main criterion is * the segment requiring the least effort to clean. Secondary * criterion is to GC on the lowest level available. * * So we search the least effort segment on the lowest level first, * then move up and pick another segment iff is requires significantly * less effort. Hence the LOGFS_MAX_OBJECTSIZE in the comparison. */ static struct gc_candidate *get_candidate(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); int i, max_dist; struct gc_candidate *cand = NULL, *this; max_dist = min(no_free_segments(sb), LOGFS_NO_AREAS); for (i = max_dist; i >= 0; i--) { this = first_in_list(&super->s_low_list[i]); if (!this) continue; if (!cand) cand = this; if (this->valid + LOGFS_MAX_OBJECTSIZE <= cand->valid) cand = this; } return cand; } static int __logfs_gc_once(struct super_block *sb, struct gc_candidate *cand) { struct logfs_super *super = logfs_super(sb); gc_level_t gc_level; u32 cleaned, valid, segno, ec; u8 dist; if (!cand) { log_gc("GC attempted, but no candidate found\n"); return 0; } segno = cand->segno; dist = cand->dist; valid = logfs_valid_bytes(sb, segno, &ec, &gc_level); free_candidate(sb, cand); log_gc("GC segment #%02x at %llx, %x required, %x free, %x valid, %llx free\n", segno, (u64)segno << super->s_segshift, dist, no_free_segments(sb), valid, super->s_free_bytes); cleaned = logfs_gc_segment(sb, segno); log_gc("GC segment #%02x complete - now %x valid\n", segno, valid - cleaned); BUG_ON(cleaned != valid); return 1; } static int logfs_gc_once(struct super_block *sb) { struct gc_candidate *cand; cand = get_candidate(sb); if (cand) remove_from_list(cand); return __logfs_gc_once(sb, cand); } /* returns 1 if a wrap occurs, 0 otherwise */ static int logfs_scan_some(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); u32 segno; int i, ret = 0; segno = super->s_sweeper; for (i = SCAN_RATIO; i > 0; i--) { segno++; if (segno >= super->s_no_segs) { segno = 0; ret = 1; /* Break out of the loop. We want to read a single * block from the segment size on next invocation if * SCAN_RATIO is set to match block size */ break; } scan_segment(sb, segno); } super->s_sweeper = segno; return ret; } /* * In principle, this function should loop forever, looking for GC candidates * and moving data. LogFS is designed in such a way that this loop is * guaranteed to terminate. * * Limiting the loop to some iterations serves purely to catch cases when * these guarantees have failed. An actual endless loop is an obvious bug * and should be reported as such. */ static void __logfs_gc_pass(struct super_block *sb, int target) { struct logfs_super *super = logfs_super(sb); struct logfs_block *block; int round, progress, last_progress = 0; /* * Doing too many changes to the segfile at once would result * in a large number of aliases. Write the journal before * things get out of hand. */ if (super->s_shadow_tree.no_shadowed_segments >= MAX_OBJ_ALIASES) logfs_write_anchor(sb); if (no_free_segments(sb) >= target && super->s_no_object_aliases < MAX_OBJ_ALIASES) return; log_gc("__logfs_gc_pass(%x)\n", target); for (round = 0; round < SCAN_ROUNDS; ) { if (no_free_segments(sb) >= target) goto write_alias; /* Sync in-memory state with on-medium state in case they * diverged */ logfs_write_anchor(sb); round += logfs_scan_some(sb); if (no_free_segments(sb) >= target) goto write_alias; progress = logfs_gc_once(sb); if (progress) last_progress = round; else if (round - last_progress > 2) break; continue; /* * The goto logic is nasty, I just don't know a better way to * code it. GC is supposed to ensure two things: * 1. Enough free segments are available. * 2. The number of aliases is bounded. * When 1. is achieved, we take a look at 2. and write back * some alias-containing blocks, if necessary. However, after * each such write we need to go back to 1., as writes can * consume free segments. */ write_alias: if (super->s_no_object_aliases < MAX_OBJ_ALIASES) return; if (list_empty(&super->s_object_alias)) { /* All aliases are still in btree */ return; } log_gc("Write back one alias\n"); block = list_entry(super->s_object_alias.next, struct logfs_block, alias_list); block->ops->write_block(block); /* * To round off the nasty goto logic, we reset round here. It * is a safety-net for GC not making any progress and limited * to something reasonably small. If incremented it for every * single alias, the loop could terminate rather quickly. */ round = 0; } LOGFS_BUG(sb); } static int wl_ratelimit(struct super_block *sb, u64 *next_event) { struct logfs_super *super = logfs_super(sb); if (*next_event < super->s_gec) { *next_event = super->s_gec + WL_RATELIMIT; return 0; } return 1; } static void logfs_wl_pass(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); struct gc_candidate *wl_cand, *free_cand; if (wl_ratelimit(sb, &super->s_wl_gec_ostore)) return; wl_cand = first_in_list(&super->s_ec_list); if (!wl_cand) return; free_cand = first_in_list(&super->s_free_list); if (!free_cand) return; if (wl_cand->erase_count < free_cand->erase_count + WL_DELTA) { remove_from_list(wl_cand); __logfs_gc_once(sb, wl_cand); } } /* * The journal needs wear leveling as well. But moving the journal is an * expensive operation so we try to avoid it as much as possible. And if we * have to do it, we move the whole journal, not individual segments. * * Ratelimiting is not strictly necessary here, it mainly serves to avoid the * calculations. First we check whether moving the journal would be a * significant improvement. That means that a) the current journal segments * have more wear than the future journal segments and b) the current journal * segments have more wear than normal ostore segments. * Rationale for b) is that we don't have to move the journal if it is aging * less than the ostore, even if the reserve segments age even less (they are * excluded from wear leveling, after all). * Next we check that the superblocks have less wear than the journal. Since * moving the journal requires writing the superblocks, we have to protect the * superblocks even more than the journal. * * Also we double the acceptable wear difference, compared to ostore wear * leveling. Journal data is read and rewritten rapidly, comparatively. So * soft errors have much less time to accumulate and we allow the journal to * be a bit worse than the ostore. */ static void logfs_journal_wl_pass(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); struct gc_candidate *cand; u32 min_journal_ec = -1, max_reserve_ec = 0; int i; if (wl_ratelimit(sb, &super->s_wl_gec_journal)) return; if (super->s_reserve_list.count < super->s_no_journal_segs) { /* Reserve is not full enough to move complete journal */ return; } journal_for_each(i) if (super->s_journal_seg[i]) min_journal_ec = min(min_journal_ec, super->s_journal_ec[i]); cand = rb_entry(rb_first(&super->s_free_list.rb_tree), struct gc_candidate, rb_node); max_reserve_ec = cand->erase_count; for (i = 0; i < 2; i++) { struct logfs_segment_entry se; u32 segno = seg_no(sb, super->s_sb_ofs[i]); u32 ec; logfs_get_segment_entry(sb, segno, &se); ec = be32_to_cpu(se.ec_level) >> 4; max_reserve_ec = max(max_reserve_ec, ec); } if (min_journal_ec > max_reserve_ec + 2 * WL_DELTA) { do_logfs_journal_wl_pass(sb); } } void logfs_gc_pass(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); //BUG_ON(mutex_trylock(&logfs_super(sb)->s_w_mutex)); /* Write journal before free space is getting saturated with dirty * objects. */ if (super->s_dirty_used_bytes + super->s_dirty_free_bytes + LOGFS_MAX_OBJECTSIZE >= super->s_free_bytes) logfs_write_anchor(sb); __logfs_gc_pass(sb, super->s_total_levels); logfs_wl_pass(sb); logfs_journal_wl_pass(sb); } static int check_area(struct super_block *sb, int i) { struct logfs_super *super = logfs_super(sb); struct logfs_area *area = super->s_area[i]; gc_level_t gc_level; u32 cleaned, valid, ec; u32 segno = area->a_segno; u64 ofs = dev_ofs(sb, area->a_segno, area->a_written_bytes); if (!area->a_is_open) return 0; if (super->s_devops->can_write_buf(sb, ofs) == 0) return 0; printk(KERN_INFO"LogFS: Possibly incomplete write at %llx\n", ofs); /* * The device cannot write back the write buffer. Most likely the * wbuf was already written out and the system crashed at some point * before the journal commit happened. In that case we wouldn't have * to do anything. But if the crash happened before the wbuf was * written out correctly, we must GC this segment. So assume the * worst and always do the GC run. */ area->a_is_open = 0; valid = logfs_valid_bytes(sb, segno, &ec, &gc_level); cleaned = logfs_gc_segment(sb, segno); if (cleaned != valid) return -EIO; return 0; } int logfs_check_areas(struct super_block *sb) { int i, err; for_each_area(i) { err = check_area(sb, i); if (err) return err; } return 0; } static void logfs_init_candlist(struct candidate_list *list, int maxcount, int sort_by_ec) { list->count = 0; list->maxcount = maxcount; list->sort_by_ec = sort_by_ec; list->rb_tree = RB_ROOT; } int logfs_init_gc(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); int i; btree_init_mempool32(&super->s_cand_tree, super->s_btree_pool); logfs_init_candlist(&super->s_free_list, LIST_SIZE + SCAN_RATIO, 1); logfs_init_candlist(&super->s_reserve_list, super->s_bad_seg_reserve, 1); for_each_area(i) logfs_init_candlist(&super->s_low_list[i], LIST_SIZE, 0); logfs_init_candlist(&super->s_ec_list, LIST_SIZE, 1); return 0; } static void logfs_cleanup_list(struct super_block *sb, struct candidate_list *list) { struct gc_candidate *cand; while (list->count) { cand = rb_entry(list->rb_tree.rb_node, struct gc_candidate, rb_node); remove_from_list(cand); free_candidate(sb, cand); } BUG_ON(list->rb_tree.rb_node); } void logfs_cleanup_gc(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); int i; if (!super->s_free_list.count) return; /* * FIXME: The btree may still contain a single empty node. So we * call the grim visitor to clean up that mess. Btree code should * do it for us, really. */ btree_grim_visitor32(&super->s_cand_tree, 0, NULL); logfs_cleanup_list(sb, &super->s_free_list); logfs_cleanup_list(sb, &super->s_reserve_list); for_each_area(i) logfs_cleanup_list(sb, &super->s_low_list[i]); logfs_cleanup_list(sb, &super->s_ec_list); }