diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2007-08-08 20:32:33 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-08-08 20:33:52 -0400 |
commit | b434e71933aa0519ee042c01419db76b7dcc058e (patch) | |
tree | 0aa4506224aea925a95631f4b1fe7c2c2d07de4f /arch/sparc64/kernel | |
parent | 6c70b6fc7b6fc321636a014082d9e32333da1f80 (diff) |
[SPARC64]: Fix memory leak when cpu hotplugging.
Every time a cpu is added via hotplug, we allocate the per-cpu MONDO
queues but we never free them up. Freeing isn't easy since the first
cpu gets this memory from bootmem.
Therefore, the simplest thing to do to fix this bug is to allocate the
queues for all possible cpus at boot time.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel')
-rw-r--r-- | arch/sparc64/kernel/hvtramp.S | 7 | ||||
-rw-r--r-- | arch/sparc64/kernel/irq.c | 74 | ||||
-rw-r--r-- | arch/sparc64/kernel/smp.c | 5 | ||||
-rw-r--r-- | arch/sparc64/kernel/trampoline.S | 7 |
4 files changed, 28 insertions, 65 deletions
diff --git a/arch/sparc64/kernel/hvtramp.S b/arch/sparc64/kernel/hvtramp.S index a55c252e18cc..b692e044a463 100644 --- a/arch/sparc64/kernel/hvtramp.S +++ b/arch/sparc64/kernel/hvtramp.S | |||
@@ -115,11 +115,8 @@ hv_cpu_startup: | |||
115 | call hard_smp_processor_id | 115 | call hard_smp_processor_id |
116 | nop | 116 | nop |
117 | 117 | ||
118 | mov %o0, %o1 | 118 | call sun4v_register_mondo_queues |
119 | mov 0, %o0 | 119 | nop |
120 | mov 0, %o2 | ||
121 | call sun4v_init_mondo_queues | ||
122 | mov 1, %o3 | ||
123 | 120 | ||
124 | call init_cur_cpu_trap | 121 | call init_cur_cpu_trap |
125 | mov %g6, %o0 | 122 | mov %g6, %o0 |
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index db31bf6b42db..384abf410cf0 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c | |||
@@ -929,7 +929,7 @@ static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type | |||
929 | } | 929 | } |
930 | } | 930 | } |
931 | 931 | ||
932 | static void __cpuinit sun4v_register_mondo_queues(int this_cpu) | 932 | void __cpuinit sun4v_register_mondo_queues(int this_cpu) |
933 | { | 933 | { |
934 | struct trap_per_cpu *tb = &trap_block[this_cpu]; | 934 | struct trap_per_cpu *tb = &trap_block[this_cpu]; |
935 | 935 | ||
@@ -943,20 +943,10 @@ static void __cpuinit sun4v_register_mondo_queues(int this_cpu) | |||
943 | tb->nonresum_qmask); | 943 | tb->nonresum_qmask); |
944 | } | 944 | } |
945 | 945 | ||
946 | static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask, int use_bootmem) | 946 | static void __init alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask) |
947 | { | 947 | { |
948 | unsigned long size = PAGE_ALIGN(qmask + 1); | 948 | unsigned long size = PAGE_ALIGN(qmask + 1); |
949 | unsigned long order = get_order(size); | 949 | void *p = __alloc_bootmem_low(size, size, 0); |
950 | void *p = NULL; | ||
951 | |||
952 | if (use_bootmem) { | ||
953 | p = __alloc_bootmem_low(size, size, 0); | ||
954 | } else { | ||
955 | struct page *page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, order); | ||
956 | if (page) | ||
957 | p = page_address(page); | ||
958 | } | ||
959 | |||
960 | if (!p) { | 950 | if (!p) { |
961 | prom_printf("SUN4V: Error, cannot allocate mondo queue.\n"); | 951 | prom_printf("SUN4V: Error, cannot allocate mondo queue.\n"); |
962 | prom_halt(); | 952 | prom_halt(); |
@@ -965,19 +955,10 @@ static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask | |||
965 | *pa_ptr = __pa(p); | 955 | *pa_ptr = __pa(p); |
966 | } | 956 | } |
967 | 957 | ||
968 | static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask, int use_bootmem) | 958 | static void __init alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask) |
969 | { | 959 | { |
970 | unsigned long size = PAGE_ALIGN(qmask + 1); | 960 | unsigned long size = PAGE_ALIGN(qmask + 1); |
971 | unsigned long order = get_order(size); | 961 | void *p = __alloc_bootmem_low(size, size, 0); |
972 | void *p = NULL; | ||
973 | |||
974 | if (use_bootmem) { | ||
975 | p = __alloc_bootmem_low(size, size, 0); | ||
976 | } else { | ||
977 | struct page *page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, order); | ||
978 | if (page) | ||
979 | p = page_address(page); | ||
980 | } | ||
981 | 962 | ||
982 | if (!p) { | 963 | if (!p) { |
983 | prom_printf("SUN4V: Error, cannot allocate kbuf page.\n"); | 964 | prom_printf("SUN4V: Error, cannot allocate kbuf page.\n"); |
@@ -987,18 +968,14 @@ static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask, | |||
987 | *pa_ptr = __pa(p); | 968 | *pa_ptr = __pa(p); |
988 | } | 969 | } |
989 | 970 | ||
990 | static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem) | 971 | static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb) |
991 | { | 972 | { |
992 | #ifdef CONFIG_SMP | 973 | #ifdef CONFIG_SMP |
993 | void *page; | 974 | void *page; |
994 | 975 | ||
995 | BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64)); | 976 | BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64)); |
996 | 977 | ||
997 | if (use_bootmem) | 978 | page = alloc_bootmem_low_pages(PAGE_SIZE); |
998 | page = alloc_bootmem_low_pages(PAGE_SIZE); | ||
999 | else | ||
1000 | page = (void *) get_zeroed_page(GFP_ATOMIC); | ||
1001 | |||
1002 | if (!page) { | 979 | if (!page) { |
1003 | prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n"); | 980 | prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n"); |
1004 | prom_halt(); | 981 | prom_halt(); |
@@ -1009,30 +986,27 @@ static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_ | |||
1009 | #endif | 986 | #endif |
1010 | } | 987 | } |
1011 | 988 | ||
1012 | /* Allocate and register the mondo and error queues for this cpu. */ | 989 | /* Allocate mondo and error queues for all possible cpus. */ |
1013 | void __cpuinit sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load) | 990 | static void __init sun4v_init_mondo_queues(void) |
1014 | { | 991 | { |
1015 | struct trap_per_cpu *tb = &trap_block[cpu]; | 992 | int cpu; |
1016 | 993 | ||
1017 | if (alloc) { | 994 | for_each_possible_cpu(cpu) { |
1018 | alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask, use_bootmem); | 995 | struct trap_per_cpu *tb = &trap_block[cpu]; |
1019 | alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask, use_bootmem); | ||
1020 | alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask, use_bootmem); | ||
1021 | alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask, use_bootmem); | ||
1022 | alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask, use_bootmem); | ||
1023 | alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, tb->nonresum_qmask, use_bootmem); | ||
1024 | 996 | ||
1025 | init_cpu_send_mondo_info(tb, use_bootmem); | 997 | alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask); |
1026 | } | 998 | alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask); |
999 | alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask); | ||
1000 | alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask); | ||
1001 | alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask); | ||
1002 | alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, | ||
1003 | tb->nonresum_qmask); | ||
1027 | 1004 | ||
1028 | if (load) { | 1005 | init_cpu_send_mondo_info(tb); |
1029 | if (cpu != hard_smp_processor_id()) { | ||
1030 | prom_printf("SUN4V: init mondo on cpu %d not %d\n", | ||
1031 | cpu, hard_smp_processor_id()); | ||
1032 | prom_halt(); | ||
1033 | } | ||
1034 | sun4v_register_mondo_queues(cpu); | ||
1035 | } | 1006 | } |
1007 | |||
1008 | /* Load up the boot cpu's entries. */ | ||
1009 | sun4v_register_mondo_queues(hard_smp_processor_id()); | ||
1036 | } | 1010 | } |
1037 | 1011 | ||
1038 | static struct irqaction timer_irq_action = { | 1012 | static struct irqaction timer_irq_action = { |
@@ -1047,7 +1021,7 @@ void __init init_IRQ(void) | |||
1047 | memset(&ivector_table[0], 0, sizeof(ivector_table)); | 1021 | memset(&ivector_table[0], 0, sizeof(ivector_table)); |
1048 | 1022 | ||
1049 | if (tlb_type == hypervisor) | 1023 | if (tlb_type == hypervisor) |
1050 | sun4v_init_mondo_queues(1, hard_smp_processor_id(), 1, 1); | 1024 | sun4v_init_mondo_queues(); |
1051 | 1025 | ||
1052 | /* We need to clear any IRQ's pending in the soft interrupt | 1026 | /* We need to clear any IRQ's pending in the soft interrupt |
1053 | * registers, a spurious one could be left around from the | 1027 | * registers, a spurious one could be left around from the |
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index b448d33321c6..b84c49e3697c 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
@@ -334,8 +334,6 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg) | |||
334 | } | 334 | } |
335 | #endif | 335 | #endif |
336 | 336 | ||
337 | extern void sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load); | ||
338 | |||
339 | extern unsigned long sparc64_cpu_startup; | 337 | extern unsigned long sparc64_cpu_startup; |
340 | 338 | ||
341 | /* The OBP cpu startup callback truncates the 3rd arg cookie to | 339 | /* The OBP cpu startup callback truncates the 3rd arg cookie to |
@@ -359,9 +357,6 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu) | |||
359 | cpu_new_thread = task_thread_info(p); | 357 | cpu_new_thread = task_thread_info(p); |
360 | 358 | ||
361 | if (tlb_type == hypervisor) { | 359 | if (tlb_type == hypervisor) { |
362 | /* Alloc the mondo queues, cpu will load them. */ | ||
363 | sun4v_init_mondo_queues(0, cpu, 1, 0); | ||
364 | |||
365 | #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) | 360 | #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) |
366 | if (ldom_domaining_enabled) | 361 | if (ldom_domaining_enabled) |
367 | ldom_startcpu_cpuid(cpu, | 362 | ldom_startcpu_cpuid(cpu, |
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S index a4dc01a3d238..9448595f9063 100644 --- a/arch/sparc64/kernel/trampoline.S +++ b/arch/sparc64/kernel/trampoline.S | |||
@@ -366,11 +366,8 @@ after_lock_tlb: | |||
366 | call hard_smp_processor_id | 366 | call hard_smp_processor_id |
367 | nop | 367 | nop |
368 | 368 | ||
369 | mov %o0, %o1 | 369 | call sun4v_register_mondo_queues |
370 | mov 0, %o0 | 370 | nop |
371 | mov 0, %o2 | ||
372 | call sun4v_init_mondo_queues | ||
373 | mov 1, %o3 | ||
374 | 371 | ||
375 | 1: call init_cur_cpu_trap | 372 | 1: call init_cur_cpu_trap |
376 | ldx [%l0], %o0 | 373 | ldx [%l0], %o0 |