diff options
Diffstat (limited to 'arch/m32r')
-rw-r--r-- | arch/m32r/Kconfig | 1 | ||||
-rw-r--r-- | arch/m32r/boot/compressed/misc.c | 37 | ||||
-rw-r--r-- | arch/m32r/kernel/m32r_ksyms.c | 3 | ||||
-rw-r--r-- | arch/m32r/kernel/smp.c | 132 | ||||
-rw-r--r-- | arch/m32r/kernel/traps.c | 3 | ||||
-rw-r--r-- | arch/m32r/mm/discontig.c | 10 | ||||
-rw-r--r-- | arch/m32r/mm/init.c | 42 |
7 files changed, 23 insertions, 205 deletions
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index de153de2ea9f..a5f864c445b2 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig | |||
@@ -296,6 +296,7 @@ config PREEMPT | |||
296 | 296 | ||
297 | config SMP | 297 | config SMP |
298 | bool "Symmetric multi-processing support" | 298 | bool "Symmetric multi-processing support" |
299 | select USE_GENERIC_SMP_HELPERS | ||
299 | ---help--- | 300 | ---help--- |
300 | This enables support for systems with more than one CPU. If you have | 301 | This enables support for systems with more than one CPU. If you have |
301 | a system with only one CPU, like most personal computers, say N. If | 302 | a system with only one CPU, like most personal computers, say N. If |
diff --git a/arch/m32r/boot/compressed/misc.c b/arch/m32r/boot/compressed/misc.c index 600d40e33495..d394292498c0 100644 --- a/arch/m32r/boot/compressed/misc.c +++ b/arch/m32r/boot/compressed/misc.c | |||
@@ -70,8 +70,6 @@ static unsigned outcnt = 0; /* bytes in output buffer */ | |||
70 | static int fill_inbuf(void); | 70 | static int fill_inbuf(void); |
71 | static void flush_window(void); | 71 | static void flush_window(void); |
72 | static void error(char *m); | 72 | static void error(char *m); |
73 | static void gzip_mark(void **); | ||
74 | static void gzip_release(void **); | ||
75 | 73 | ||
76 | static unsigned char *input_data; | 74 | static unsigned char *input_data; |
77 | static int input_len; | 75 | static int input_len; |
@@ -82,9 +80,6 @@ static unsigned long output_ptr = 0; | |||
82 | 80 | ||
83 | #include "m32r_sio.c" | 81 | #include "m32r_sio.c" |
84 | 82 | ||
85 | static void *malloc(int size); | ||
86 | static void free(void *where); | ||
87 | |||
88 | static unsigned long free_mem_ptr; | 83 | static unsigned long free_mem_ptr; |
89 | static unsigned long free_mem_end_ptr; | 84 | static unsigned long free_mem_end_ptr; |
90 | 85 | ||
@@ -92,38 +87,6 @@ static unsigned long free_mem_end_ptr; | |||
92 | 87 | ||
93 | #include "../../../../lib/inflate.c" | 88 | #include "../../../../lib/inflate.c" |
94 | 89 | ||
95 | static void *malloc(int size) | ||
96 | { | ||
97 | void *p; | ||
98 | |||
99 | if (size <0) error("Malloc error"); | ||
100 | if (free_mem_ptr == 0) error("Memory error"); | ||
101 | |||
102 | free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */ | ||
103 | |||
104 | p = (void *)free_mem_ptr; | ||
105 | free_mem_ptr += size; | ||
106 | |||
107 | if (free_mem_ptr >= free_mem_end_ptr) | ||
108 | error("Out of memory"); | ||
109 | |||
110 | return p; | ||
111 | } | ||
112 | |||
113 | static void free(void *where) | ||
114 | { /* Don't care */ | ||
115 | } | ||
116 | |||
117 | static void gzip_mark(void **ptr) | ||
118 | { | ||
119 | *ptr = (void *) free_mem_ptr; | ||
120 | } | ||
121 | |||
122 | static void gzip_release(void **ptr) | ||
123 | { | ||
124 | free_mem_ptr = (long) *ptr; | ||
125 | } | ||
126 | |||
127 | void* memset(void* s, int c, size_t n) | 90 | void* memset(void* s, int c, size_t n) |
128 | { | 91 | { |
129 | int i; | 92 | int i; |
diff --git a/arch/m32r/kernel/m32r_ksyms.c b/arch/m32r/kernel/m32r_ksyms.c index e6709fe950ba..16bcb189a383 100644 --- a/arch/m32r/kernel/m32r_ksyms.c +++ b/arch/m32r/kernel/m32r_ksyms.c | |||
@@ -43,9 +43,6 @@ EXPORT_SYMBOL(dcache_dummy); | |||
43 | #endif | 43 | #endif |
44 | EXPORT_SYMBOL(cpu_data); | 44 | EXPORT_SYMBOL(cpu_data); |
45 | 45 | ||
46 | /* Global SMP stuff */ | ||
47 | EXPORT_SYMBOL(smp_call_function); | ||
48 | |||
49 | /* TLB flushing */ | 46 | /* TLB flushing */ |
50 | EXPORT_SYMBOL(smp_flush_tlb_page); | 47 | EXPORT_SYMBOL(smp_flush_tlb_page); |
51 | #endif | 48 | #endif |
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c index c837bc13b015..7577f971ea4e 100644 --- a/arch/m32r/kernel/smp.c +++ b/arch/m32r/kernel/smp.c | |||
@@ -35,22 +35,6 @@ | |||
35 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | 35 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * Structure and data for smp_call_function(). This is designed to minimise | ||
39 | * static memory requirements. It also looks cleaner. | ||
40 | */ | ||
41 | static DEFINE_SPINLOCK(call_lock); | ||
42 | |||
43 | struct call_data_struct { | ||
44 | void (*func) (void *info); | ||
45 | void *info; | ||
46 | atomic_t started; | ||
47 | atomic_t finished; | ||
48 | int wait; | ||
49 | } __attribute__ ((__aligned__(SMP_CACHE_BYTES))); | ||
50 | |||
51 | static struct call_data_struct *call_data; | ||
52 | |||
53 | /* | ||
54 | * For flush_cache_all() | 38 | * For flush_cache_all() |
55 | */ | 39 | */ |
56 | static DEFINE_SPINLOCK(flushcache_lock); | 40 | static DEFINE_SPINLOCK(flushcache_lock); |
@@ -96,9 +80,6 @@ void smp_invalidate_interrupt(void); | |||
96 | void smp_send_stop(void); | 80 | void smp_send_stop(void); |
97 | static void stop_this_cpu(void *); | 81 | static void stop_this_cpu(void *); |
98 | 82 | ||
99 | int smp_call_function(void (*) (void *), void *, int, int); | ||
100 | void smp_call_function_interrupt(void); | ||
101 | |||
102 | void smp_send_timer(void); | 83 | void smp_send_timer(void); |
103 | void smp_ipi_timer_interrupt(struct pt_regs *); | 84 | void smp_ipi_timer_interrupt(struct pt_regs *); |
104 | void smp_local_timer_interrupt(void); | 85 | void smp_local_timer_interrupt(void); |
@@ -231,7 +212,7 @@ void smp_flush_tlb_all(void) | |||
231 | local_irq_save(flags); | 212 | local_irq_save(flags); |
232 | __flush_tlb_all(); | 213 | __flush_tlb_all(); |
233 | local_irq_restore(flags); | 214 | local_irq_restore(flags); |
234 | smp_call_function(flush_tlb_all_ipi, NULL, 1, 1); | 215 | smp_call_function(flush_tlb_all_ipi, NULL, 1); |
235 | preempt_enable(); | 216 | preempt_enable(); |
236 | } | 217 | } |
237 | 218 | ||
@@ -524,7 +505,7 @@ void smp_invalidate_interrupt(void) | |||
524 | *==========================================================================*/ | 505 | *==========================================================================*/ |
525 | void smp_send_stop(void) | 506 | void smp_send_stop(void) |
526 | { | 507 | { |
527 | smp_call_function(stop_this_cpu, NULL, 1, 0); | 508 | smp_call_function(stop_this_cpu, NULL, 0); |
528 | } | 509 | } |
529 | 510 | ||
530 | /*==========================================================================* | 511 | /*==========================================================================* |
@@ -565,86 +546,14 @@ static void stop_this_cpu(void *dummy) | |||
565 | for ( ; ; ); | 546 | for ( ; ; ); |
566 | } | 547 | } |
567 | 548 | ||
568 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | 549 | void arch_send_call_function_ipi(cpumask_t mask) |
569 | /* Call function Routines */ | ||
570 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | ||
571 | |||
572 | /*==========================================================================* | ||
573 | * Name: smp_call_function | ||
574 | * | ||
575 | * Description: This routine sends a 'CALL_FUNCTION_IPI' to all other CPUs | ||
576 | * in the system. | ||
577 | * | ||
578 | * Born on Date: 2002.02.05 | ||
579 | * | ||
580 | * Arguments: *func - The function to run. This must be fast and | ||
581 | * non-blocking. | ||
582 | * *info - An arbitrary pointer to pass to the function. | ||
583 | * nonatomic - currently unused. | ||
584 | * wait - If true, wait (atomically) until function has | ||
585 | * completed on other CPUs. | ||
586 | * | ||
587 | * Returns: 0 on success, else a negative status code. Does not return | ||
588 | * until remote CPUs are nearly ready to execute <<func>> or | ||
589 | * are or have executed. | ||
590 | * | ||
591 | * Cautions: You must not call this function with disabled interrupts or | ||
592 | * from a hardware interrupt handler, you may call it from a | ||
593 | * bottom half handler. | ||
594 | * | ||
595 | * Modification log: | ||
596 | * Date Who Description | ||
597 | * ---------- --- -------------------------------------------------------- | ||
598 | * | ||
599 | *==========================================================================*/ | ||
600 | int smp_call_function(void (*func) (void *info), void *info, int nonatomic, | ||
601 | int wait) | ||
602 | { | 550 | { |
603 | struct call_data_struct data; | 551 | send_IPI_mask(mask, CALL_FUNCTION_IPI, 0); |
604 | int cpus; | 552 | } |
605 | |||
606 | #ifdef DEBUG_SMP | ||
607 | unsigned long flags; | ||
608 | __save_flags(flags); | ||
609 | if (!(flags & 0x0040)) /* Interrupt Disable NONONO */ | ||
610 | BUG(); | ||
611 | #endif /* DEBUG_SMP */ | ||
612 | |||
613 | /* Holding any lock stops cpus from going down. */ | ||
614 | spin_lock(&call_lock); | ||
615 | cpus = num_online_cpus() - 1; | ||
616 | |||
617 | if (!cpus) { | ||
618 | spin_unlock(&call_lock); | ||
619 | return 0; | ||
620 | } | ||
621 | |||
622 | /* Can deadlock when called with interrupts disabled */ | ||
623 | WARN_ON(irqs_disabled()); | ||
624 | |||
625 | data.func = func; | ||
626 | data.info = info; | ||
627 | atomic_set(&data.started, 0); | ||
628 | data.wait = wait; | ||
629 | if (wait) | ||
630 | atomic_set(&data.finished, 0); | ||
631 | |||
632 | call_data = &data; | ||
633 | mb(); | ||
634 | |||
635 | /* Send a message to all other CPUs and wait for them to respond */ | ||
636 | send_IPI_allbutself(CALL_FUNCTION_IPI, 0); | ||
637 | |||
638 | /* Wait for response */ | ||
639 | while (atomic_read(&data.started) != cpus) | ||
640 | barrier(); | ||
641 | |||
642 | if (wait) | ||
643 | while (atomic_read(&data.finished) != cpus) | ||
644 | barrier(); | ||
645 | spin_unlock(&call_lock); | ||
646 | 553 | ||
647 | return 0; | 554 | void arch_send_call_function_single_ipi(int cpu) |
555 | { | ||
556 | send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNC_SINGLE_IPI, 0); | ||
648 | } | 557 | } |
649 | 558 | ||
650 | /*==========================================================================* | 559 | /*==========================================================================* |
@@ -666,27 +575,16 @@ int smp_call_function(void (*func) (void *info), void *info, int nonatomic, | |||
666 | *==========================================================================*/ | 575 | *==========================================================================*/ |
667 | void smp_call_function_interrupt(void) | 576 | void smp_call_function_interrupt(void) |
668 | { | 577 | { |
669 | void (*func) (void *info) = call_data->func; | ||
670 | void *info = call_data->info; | ||
671 | int wait = call_data->wait; | ||
672 | |||
673 | /* | ||
674 | * Notify initiating CPU that I've grabbed the data and am | ||
675 | * about to execute the function | ||
676 | */ | ||
677 | mb(); | ||
678 | atomic_inc(&call_data->started); | ||
679 | /* | ||
680 | * At this point the info structure may be out of scope unless wait==1 | ||
681 | */ | ||
682 | irq_enter(); | 578 | irq_enter(); |
683 | (*func)(info); | 579 | generic_smp_call_function_interrupt(); |
684 | irq_exit(); | 580 | irq_exit(); |
581 | } | ||
685 | 582 | ||
686 | if (wait) { | 583 | void smp_call_function_single_interrupt(void) |
687 | mb(); | 584 | { |
688 | atomic_inc(&call_data->finished); | 585 | irq_enter(); |
689 | } | 586 | generic_smp_call_function_single_interrupt(); |
587 | irq_exit(); | ||
690 | } | 588 | } |
691 | 589 | ||
692 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | 590 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ |
diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c index 89ba4a0b5d51..46159a4e644b 100644 --- a/arch/m32r/kernel/traps.c +++ b/arch/m32r/kernel/traps.c | |||
@@ -40,6 +40,7 @@ extern void smp_invalidate_interrupt(void); | |||
40 | extern void smp_call_function_interrupt(void); | 40 | extern void smp_call_function_interrupt(void); |
41 | extern void smp_ipi_timer_interrupt(void); | 41 | extern void smp_ipi_timer_interrupt(void); |
42 | extern void smp_flush_cache_all_interrupt(void); | 42 | extern void smp_flush_cache_all_interrupt(void); |
43 | extern void smp_call_function_single_interrupt(void); | ||
43 | 44 | ||
44 | /* | 45 | /* |
45 | * for Boot AP function | 46 | * for Boot AP function |
@@ -103,7 +104,7 @@ void set_eit_vector_entries(void) | |||
103 | eit_vector[186] = (unsigned long)smp_call_function_interrupt; | 104 | eit_vector[186] = (unsigned long)smp_call_function_interrupt; |
104 | eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt; | 105 | eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt; |
105 | eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt; | 106 | eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt; |
106 | eit_vector[189] = 0; | 107 | eit_vector[189] = (unsigned long)smp_call_function_single_interrupt; |
107 | eit_vector[190] = 0; | 108 | eit_vector[190] = 0; |
108 | eit_vector[191] = 0; | 109 | eit_vector[191] = 0; |
109 | #endif | 110 | #endif |
diff --git a/arch/m32r/mm/discontig.c b/arch/m32r/mm/discontig.c index 07c1af7dc0e2..cbc3c4c54566 100644 --- a/arch/m32r/mm/discontig.c +++ b/arch/m32r/mm/discontig.c | |||
@@ -20,7 +20,6 @@ extern char _end[]; | |||
20 | 20 | ||
21 | struct pglist_data *node_data[MAX_NUMNODES]; | 21 | struct pglist_data *node_data[MAX_NUMNODES]; |
22 | EXPORT_SYMBOL(node_data); | 22 | EXPORT_SYMBOL(node_data); |
23 | static bootmem_data_t node_bdata[MAX_NUMNODES] __initdata; | ||
24 | 23 | ||
25 | pg_data_t m32r_node_data[MAX_NUMNODES]; | 24 | pg_data_t m32r_node_data[MAX_NUMNODES]; |
26 | 25 | ||
@@ -81,7 +80,7 @@ unsigned long __init setup_memory(void) | |||
81 | for_each_online_node(nid) { | 80 | for_each_online_node(nid) { |
82 | mp = &mem_prof[nid]; | 81 | mp = &mem_prof[nid]; |
83 | NODE_DATA(nid)=(pg_data_t *)&m32r_node_data[nid]; | 82 | NODE_DATA(nid)=(pg_data_t *)&m32r_node_data[nid]; |
84 | NODE_DATA(nid)->bdata = &node_bdata[nid]; | 83 | NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; |
85 | min_pfn = mp->start_pfn; | 84 | min_pfn = mp->start_pfn; |
86 | max_pfn = mp->start_pfn + mp->pages; | 85 | max_pfn = mp->start_pfn + mp->pages; |
87 | bootmap_size = init_bootmem_node(NODE_DATA(nid), mp->free_pfn, | 86 | bootmap_size = init_bootmem_node(NODE_DATA(nid), mp->free_pfn, |
@@ -124,8 +123,7 @@ unsigned long __init setup_memory(void) | |||
124 | return max_low_pfn; | 123 | return max_low_pfn; |
125 | } | 124 | } |
126 | 125 | ||
127 | #define START_PFN(nid) \ | 126 | #define START_PFN(nid) (NODE_DATA(nid)->bdata->node_min_pfn) |
128 | (NODE_DATA(nid)->bdata->node_boot_start >> PAGE_SHIFT) | ||
129 | #define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn) | 127 | #define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn) |
130 | 128 | ||
131 | unsigned long __init zone_sizes_init(void) | 129 | unsigned long __init zone_sizes_init(void) |
@@ -148,8 +146,7 @@ unsigned long __init zone_sizes_init(void) | |||
148 | zholes_size[ZONE_DMA] = mp->holes; | 146 | zholes_size[ZONE_DMA] = mp->holes; |
149 | holes += zholes_size[ZONE_DMA]; | 147 | holes += zholes_size[ZONE_DMA]; |
150 | 148 | ||
151 | free_area_init_node(nid, NODE_DATA(nid), zones_size, | 149 | free_area_init_node(nid, zones_size, start_pfn, zholes_size); |
152 | start_pfn, zholes_size); | ||
153 | } | 150 | } |
154 | 151 | ||
155 | /* | 152 | /* |
@@ -163,4 +160,3 @@ unsigned long __init zone_sizes_init(void) | |||
163 | 160 | ||
164 | return holes; | 161 | return holes; |
165 | } | 162 | } |
166 | |||
diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c index bbd97c85bc5d..24d429f9358a 100644 --- a/arch/m32r/mm/init.c +++ b/arch/m32r/mm/init.c | |||
@@ -36,42 +36,6 @@ pgd_t swapper_pg_dir[1024]; | |||
36 | 36 | ||
37 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 37 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
38 | 38 | ||
39 | void show_mem(void) | ||
40 | { | ||
41 | int total = 0, reserved = 0; | ||
42 | int shared = 0, cached = 0; | ||
43 | int highmem = 0; | ||
44 | struct page *page; | ||
45 | pg_data_t *pgdat; | ||
46 | unsigned long i; | ||
47 | |||
48 | printk("Mem-info:\n"); | ||
49 | show_free_areas(); | ||
50 | printk("Free swap: %6ldkB\n",nr_swap_pages<<(PAGE_SHIFT-10)); | ||
51 | for_each_online_pgdat(pgdat) { | ||
52 | unsigned long flags; | ||
53 | pgdat_resize_lock(pgdat, &flags); | ||
54 | for (i = 0; i < pgdat->node_spanned_pages; ++i) { | ||
55 | page = pgdat_page_nr(pgdat, i); | ||
56 | total++; | ||
57 | if (PageHighMem(page)) | ||
58 | highmem++; | ||
59 | if (PageReserved(page)) | ||
60 | reserved++; | ||
61 | else if (PageSwapCache(page)) | ||
62 | cached++; | ||
63 | else if (page_count(page)) | ||
64 | shared += page_count(page) - 1; | ||
65 | } | ||
66 | pgdat_resize_unlock(pgdat, &flags); | ||
67 | } | ||
68 | printk("%d pages of RAM\n", total); | ||
69 | printk("%d pages of HIGHMEM\n",highmem); | ||
70 | printk("%d reserved pages\n",reserved); | ||
71 | printk("%d pages shared\n",shared); | ||
72 | printk("%d pages swap cached\n",cached); | ||
73 | } | ||
74 | |||
75 | /* | 39 | /* |
76 | * Cache of MMU context last used. | 40 | * Cache of MMU context last used. |
77 | */ | 41 | */ |
@@ -93,8 +57,7 @@ void free_initrd_mem(unsigned long, unsigned long); | |||
93 | #endif | 57 | #endif |
94 | 58 | ||
95 | /* It'd be good if these lines were in the standard header file. */ | 59 | /* It'd be good if these lines were in the standard header file. */ |
96 | #define START_PFN(nid) \ | 60 | #define START_PFN(nid) (NODE_DATA(nid)->bdata->node_min_pfn) |
97 | (NODE_DATA(nid)->bdata->node_boot_start >> PAGE_SHIFT) | ||
98 | #define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn) | 61 | #define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn) |
99 | 62 | ||
100 | #ifndef CONFIG_DISCONTIGMEM | 63 | #ifndef CONFIG_DISCONTIGMEM |
@@ -123,7 +86,7 @@ unsigned long __init zone_sizes_init(void) | |||
123 | start_pfn = __MEMORY_START >> PAGE_SHIFT; | 86 | start_pfn = __MEMORY_START >> PAGE_SHIFT; |
124 | #endif /* CONFIG_MMU */ | 87 | #endif /* CONFIG_MMU */ |
125 | 88 | ||
126 | free_area_init_node(0, NODE_DATA(0), zones_size, start_pfn, 0); | 89 | free_area_init_node(0, zones_size, start_pfn, 0); |
127 | 90 | ||
128 | return 0; | 91 | return 0; |
129 | } | 92 | } |
@@ -252,4 +215,3 @@ void free_initrd_mem(unsigned long start, unsigned long end) | |||
252 | printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | 215 | printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10); |
253 | } | 216 | } |
254 | #endif | 217 | #endif |
255 | |||