aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorHuang, Ying <ying.huang@intel.com>2008-01-30 07:33:44 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:33:44 -0500
commit4716e79c9946044a53a65418cfba04836f6a5c36 (patch)
treeb29941081f48496a08110351a67de11ca8f6be47 /arch/x86
parent0947b2f31ca1ea1211d3cde2dbd8fcec579ef395 (diff)
x86: replace boot_ioremap() with enhanced bt_ioremap() - remove boot_ioremap()
This patch replaces boot_ioremap invokation with bt_ioremap and removes the boot_ioremap implementation. Signed-off-by: Huang Ying <ying.huang@intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig6
-rw-r--r--arch/x86/kernel/srat_32.c8
-rw-r--r--arch/x86/mm/Makefile_321
-rw-r--r--arch/x86/mm/boot_ioremap_32.c100
4 files changed, 3 insertions, 112 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 92dc919c7640..fb3eea3e38ee 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1009,12 +1009,6 @@ config IRQBALANCE
1009 The default yes will allow the kernel to do irq load balancing. 1009 The default yes will allow the kernel to do irq load balancing.
1010 Saying no will keep the kernel from doing irq load balancing. 1010 Saying no will keep the kernel from doing irq load balancing.
1011 1011
1012# turning this on wastes a bunch of space.
1013# Summit needs it only when NUMA is on
1014config BOOT_IOREMAP
1015 def_bool y
1016 depends on X86_32 && (((X86_SUMMIT || X86_GENERICARCH) && NUMA) || (X86 && EFI))
1017
1018config SECCOMP 1012config SECCOMP
1019 def_bool y 1013 def_bool y
1020 prompt "Enable seccomp to safely compute untrusted bytecode" 1014 prompt "Enable seccomp to safely compute untrusted bytecode"
diff --git a/arch/x86/kernel/srat_32.c b/arch/x86/kernel/srat_32.c
index 2a8713ec0f9a..24bfd4a9e62a 100644
--- a/arch/x86/kernel/srat_32.c
+++ b/arch/x86/kernel/srat_32.c
@@ -57,8 +57,6 @@ static struct node_memory_chunk_s node_memory_chunk[MAXCHUNKS];
57static int num_memory_chunks; /* total number of memory chunks */ 57static int num_memory_chunks; /* total number of memory chunks */
58static u8 __initdata apicid_to_pxm[MAX_APICID]; 58static u8 __initdata apicid_to_pxm[MAX_APICID];
59 59
60extern void * boot_ioremap(unsigned long, unsigned long);
61
62/* Identify CPU proximity domains */ 60/* Identify CPU proximity domains */
63static void __init parse_cpu_affinity_structure(char *p) 61static void __init parse_cpu_affinity_structure(char *p)
64{ 62{
@@ -299,7 +297,7 @@ int __init get_memcfg_from_srat(void)
299 } 297 }
300 298
301 rsdt = (struct acpi_table_rsdt *) 299 rsdt = (struct acpi_table_rsdt *)
302 boot_ioremap(rsdp->rsdt_physical_address, sizeof(struct acpi_table_rsdt)); 300 bt_ioremap(rsdp->rsdt_physical_address, sizeof(struct acpi_table_rsdt));
303 301
304 if (!rsdt) { 302 if (!rsdt) {
305 printk(KERN_WARNING 303 printk(KERN_WARNING
@@ -339,11 +337,11 @@ int __init get_memcfg_from_srat(void)
339 for (i = 0; i < tables; i++) { 337 for (i = 0; i < tables; i++) {
340 /* Map in header, then map in full table length. */ 338 /* Map in header, then map in full table length. */
341 header = (struct acpi_table_header *) 339 header = (struct acpi_table_header *)
342 boot_ioremap(saved_rsdt.table.table_offset_entry[i], sizeof(struct acpi_table_header)); 340 bt_ioremap(saved_rsdt.table.table_offset_entry[i], sizeof(struct acpi_table_header));
343 if (!header) 341 if (!header)
344 break; 342 break;
345 header = (struct acpi_table_header *) 343 header = (struct acpi_table_header *)
346 boot_ioremap(saved_rsdt.table.table_offset_entry[i], header->length); 344 bt_ioremap(saved_rsdt.table.table_offset_entry[i], header->length);
347 if (!header) 345 if (!header)
348 break; 346 break;
349 347
diff --git a/arch/x86/mm/Makefile_32 b/arch/x86/mm/Makefile_32
index be2f55160bf8..1aeba3bf34bd 100644
--- a/arch/x86/mm/Makefile_32
+++ b/arch/x86/mm/Makefile_32
@@ -8,4 +8,3 @@ obj-$(CONFIG_CPA_DEBUG) += pageattr-test.o
8obj-$(CONFIG_NUMA) += discontig_32.o 8obj-$(CONFIG_NUMA) += discontig_32.o
9obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 9obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
10obj-$(CONFIG_HIGHMEM) += highmem_32.o 10obj-$(CONFIG_HIGHMEM) += highmem_32.o
11obj-$(CONFIG_BOOT_IOREMAP) += boot_ioremap_32.o
diff --git a/arch/x86/mm/boot_ioremap_32.c b/arch/x86/mm/boot_ioremap_32.c
deleted file mode 100644
index 7ede1e3e1f42..000000000000
--- a/arch/x86/mm/boot_ioremap_32.c
+++ /dev/null
@@ -1,100 +0,0 @@
1/*
2 * arch/i386/mm/boot_ioremap.c
3 *
4 * Re-map functions for early boot-time before paging_init() when the
5 * boot-time pagetables are still in use
6 *
7 * Written by Dave Hansen <haveblue@us.ibm.com>
8 */
9
10
11/*
12 * We need to use the 2-level pagetable functions, but CONFIG_X86_PAE
13 * keeps that from happening. If anyone has a better way, I'm listening.
14 *
15 * boot_pte_t is defined only if this all works correctly
16 */
17
18#undef CONFIG_X86_PAE
19#undef CONFIG_PARAVIRT
20#include <asm/page.h>
21#include <asm/pgtable.h>
22#include <asm/tlbflush.h>
23#include <linux/init.h>
24#include <linux/stddef.h>
25
26/*
27 * I'm cheating here. It is known that the two boot PTE pages are
28 * allocated next to each other. I'm pretending that they're just
29 * one big array.
30 */
31
32#define BOOT_PTE_PTRS (PTRS_PER_PTE*2)
33
34static unsigned long boot_pte_index(unsigned long vaddr)
35{
36 return __pa(vaddr) >> PAGE_SHIFT;
37}
38
39static inline boot_pte_t* boot_vaddr_to_pte(void *address)
40{
41 boot_pte_t* boot_pg = (boot_pte_t*)pg0;
42 return &boot_pg[boot_pte_index((unsigned long)address)];
43}
44
45/*
46 * This is only for a caller who is clever enough to page-align
47 * phys_addr and virtual_source, and who also has a preference
48 * about which virtual address from which to steal ptes
49 */
50static void __boot_ioremap(unsigned long phys_addr, unsigned long nrpages,
51 void* virtual_source)
52{
53 boot_pte_t* pte;
54 int i;
55 char *vaddr = virtual_source;
56
57 pte = boot_vaddr_to_pte(virtual_source);
58 for (i=0; i < nrpages; i++, phys_addr += PAGE_SIZE, pte++) {
59 set_pte(pte, pfn_pte(phys_addr>>PAGE_SHIFT, PAGE_KERNEL));
60 __flush_tlb_one((unsigned long) &vaddr[i*PAGE_SIZE]);
61 }
62}
63
64/* the virtual space we're going to remap comes from this array */
65#define BOOT_IOREMAP_PAGES 4
66#define BOOT_IOREMAP_SIZE (BOOT_IOREMAP_PAGES*PAGE_SIZE)
67static __initdata char boot_ioremap_space[BOOT_IOREMAP_SIZE]
68 __attribute__ ((aligned (PAGE_SIZE)));
69
70/*
71 * This only applies to things which need to ioremap before paging_init()
72 * bt_ioremap() and plain ioremap() are both useless at this point.
73 *
74 * When used, we're still using the boot-time pagetables, which only
75 * have 2 PTE pages mapping the first 8MB
76 *
77 * There is no unmap. The boot-time PTE pages aren't used after boot.
78 * If you really want the space back, just remap it yourself.
79 * boot_ioremap(&ioremap_space-PAGE_OFFSET, BOOT_IOREMAP_SIZE)
80 */
81__init void* boot_ioremap(unsigned long phys_addr, unsigned long size)
82{
83 unsigned long last_addr, offset;
84 unsigned int nrpages;
85
86 last_addr = phys_addr + size - 1;
87
88 /* page align the requested address */
89 offset = phys_addr & ~PAGE_MASK;
90 phys_addr &= PAGE_MASK;
91 size = PAGE_ALIGN(last_addr) - phys_addr;
92
93 nrpages = size >> PAGE_SHIFT;
94 if (nrpages > BOOT_IOREMAP_PAGES)
95 return NULL;
96
97 __boot_ioremap(phys_addr, nrpages, boot_ioremap_space);
98
99 return &boot_ioremap_space[offset];
100}