aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-06-25 22:00:47 -0400
committerDavid S. Miller <davem@davemloft.net>2009-06-25 22:00:47 -0400
commit14a2ff6ed28931f796d2c2c8a440227a5d90f441 (patch)
treea5e7bd83dd3a4a4fc60b8815a99bfda8aefb9c0f /arch/sparc/kernel
parent28d0325ce6e0a52f53d8af687e6427fee59004d3 (diff)
sparc64: Don't use alloc_bootmem() in init_IRQ() code paths.
The page allocator and SLAB are available at this point now, and if we still try to use bootmem allocations here the kernel spits out warnings. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/kernel')
-rw-r--r--arch/sparc/kernel/irq_64.c45
1 files changed, 19 insertions, 26 deletions
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index bd075054942b..f0ee79055409 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -20,7 +20,6 @@
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/proc_fs.h> 21#include <linux/proc_fs.h>
22#include <linux/seq_file.h> 22#include <linux/seq_file.h>
23#include <linux/bootmem.h>
24#include <linux/irq.h> 23#include <linux/irq.h>
25 24
26#include <asm/ptrace.h> 25#include <asm/ptrace.h>
@@ -914,25 +913,19 @@ void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu)
914 tb->nonresum_qmask); 913 tb->nonresum_qmask);
915} 914}
916 915
917static void __init alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask) 916/* Each queue region must be a power of 2 multiple of 64 bytes in
918{ 917 * size. The base real address must be aligned to the size of the
919 unsigned long size = PAGE_ALIGN(qmask + 1); 918 * region. Thus, an 8KB queue must be 8KB aligned, for example.
920 void *p = __alloc_bootmem(size, size, 0); 919 */
921 if (!p) { 920static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
922 prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
923 prom_halt();
924 }
925
926 *pa_ptr = __pa(p);
927}
928
929static void __init alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask)
930{ 921{
931 unsigned long size = PAGE_ALIGN(qmask + 1); 922 unsigned long size = PAGE_ALIGN(qmask + 1);
932 void *p = __alloc_bootmem(size, size, 0); 923 unsigned long order = get_order(size);
924 unsigned long p;
933 925
926 p = __get_free_pages(GFP_KERNEL, order);
934 if (!p) { 927 if (!p) {
935 prom_printf("SUN4V: Error, cannot allocate kbuf page.\n"); 928 prom_printf("SUN4V: Error, cannot allocate queue.\n");
936 prom_halt(); 929 prom_halt();
937 } 930 }
938 931
@@ -942,11 +935,11 @@ static void __init alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask)
942static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb) 935static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
943{ 936{
944#ifdef CONFIG_SMP 937#ifdef CONFIG_SMP
945 void *page; 938 unsigned long page;
946 939
947 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64)); 940 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
948 941
949 page = alloc_bootmem_pages(PAGE_SIZE); 942 page = get_zeroed_page(GFP_KERNEL);
950 if (!page) { 943 if (!page) {
951 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n"); 944 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
952 prom_halt(); 945 prom_halt();
@@ -965,13 +958,13 @@ static void __init sun4v_init_mondo_queues(void)
965 for_each_possible_cpu(cpu) { 958 for_each_possible_cpu(cpu) {
966 struct trap_per_cpu *tb = &trap_block[cpu]; 959 struct trap_per_cpu *tb = &trap_block[cpu];
967 960
968 alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask); 961 alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
969 alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask); 962 alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
970 alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask); 963 alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask);
971 alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask); 964 alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask);
972 alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask); 965 alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
973 alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, 966 alloc_one_queue(&tb->nonresum_kernel_buf_pa,
974 tb->nonresum_qmask); 967 tb->nonresum_qmask);
975 } 968 }
976} 969}
977 970
@@ -999,7 +992,7 @@ void __init init_IRQ(void)
999 kill_prom_timer(); 992 kill_prom_timer();
1000 993
1001 size = sizeof(struct ino_bucket) * NUM_IVECS; 994 size = sizeof(struct ino_bucket) * NUM_IVECS;
1002 ivector_table = alloc_bootmem(size); 995 ivector_table = kzalloc(size, GFP_KERNEL);
1003 if (!ivector_table) { 996 if (!ivector_table) {
1004 prom_printf("Fatal error, cannot allocate ivector_table\n"); 997 prom_printf("Fatal error, cannot allocate ivector_table\n");
1005 prom_halt(); 998 prom_halt();