aboutsummaryrefslogtreecommitdiffstats
path: root/arch/unicore32
diff options
context:
space:
mode:
authorGuanXuetao <gxt@mprc.pku.edu.cn>2011-01-15 05:16:59 -0500
committerGuanXuetao <gxt@mprc.pku.edu.cn>2011-03-16 21:19:08 -0400
commitb50f1704e9c441c58cf6dc05e72953ca30e1d4d2 (patch)
treebfd7f81c849aa42f6355d9fb383f2167c5f0e087 /arch/unicore32
parentf73670e8a55c11d47c28dca35dc4bc7dfbd4e6eb (diff)
unicore32 core architecture: mm related: generic codes
This patch includes generic codes for memory management. Signed-off-by: Guan Xuetao <gxt@mprc.pku.edu.cn> Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/unicore32')
-rw-r--r--arch/unicore32/include/asm/cache.h27
-rw-r--r--arch/unicore32/include/asm/memblock.h46
-rw-r--r--arch/unicore32/include/asm/memory.h123
-rw-r--r--arch/unicore32/include/asm/page.h80
-rw-r--r--arch/unicore32/include/asm/tlb.h98
-rw-r--r--arch/unicore32/include/mach/map.h20
-rw-r--r--arch/unicore32/include/mach/memory.h58
-rw-r--r--arch/unicore32/mm/Kconfig50
-rw-r--r--arch/unicore32/mm/Makefile15
-rw-r--r--arch/unicore32/mm/init.c517
-rw-r--r--arch/unicore32/mm/iomap.c56
-rw-r--r--arch/unicore32/mm/ioremap.c261
-rw-r--r--arch/unicore32/mm/mm.h39
13 files changed, 1390 insertions, 0 deletions
diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
new file mode 100644
index 000000000000..ad8f795d86ca
--- /dev/null
+++ b/arch/unicore32/include/asm/cache.h
@@ -0,0 +1,27 @@
1/*
2 * linux/arch/unicore32/include/asm/cache.h
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#ifndef __UNICORE_CACHE_H__
13#define __UNICORE_CACHE_H__
14
15#define L1_CACHE_SHIFT (5)
16#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
17
18/*
19 * Memory returned by kmalloc() may be used for DMA, so we must make
20 * sure that all such allocations are cache aligned. Otherwise,
21 * unrelated code may cause parts of the buffer to be read into the
22 * cache before the transfer is done, causing old data to be seen by
23 * the CPU.
24 */
25#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
26
27#endif
diff --git a/arch/unicore32/include/asm/memblock.h b/arch/unicore32/include/asm/memblock.h
new file mode 100644
index 000000000000..a8a5d8d0a26e
--- /dev/null
+++ b/arch/unicore32/include/asm/memblock.h
@@ -0,0 +1,46 @@
1/*
2 * linux/arch/unicore32/include/asm/memblock.h
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#ifndef __UNICORE_MEMBLOCK_H__
14#define __UNICORE_MEMBLOCK_H__
15
16/*
17 * Memory map description
18 */
19# define NR_BANKS 8
20
21struct membank {
22 unsigned long start;
23 unsigned long size;
24 unsigned int highmem;
25};
26
27struct meminfo {
28 int nr_banks;
29 struct membank bank[NR_BANKS];
30};
31
32extern struct meminfo meminfo;
33
34#define for_each_bank(iter, mi) \
35 for (iter = 0; iter < (mi)->nr_banks; iter++)
36
37#define bank_pfn_start(bank) __phys_to_pfn((bank)->start)
38#define bank_pfn_end(bank) __phys_to_pfn((bank)->start + (bank)->size)
39#define bank_pfn_size(bank) ((bank)->size >> PAGE_SHIFT)
40#define bank_phys_start(bank) ((bank)->start)
41#define bank_phys_end(bank) ((bank)->start + (bank)->size)
42#define bank_phys_size(bank) ((bank)->size)
43
44extern void uc32_memblock_init(struct meminfo *);
45
46#endif
diff --git a/arch/unicore32/include/asm/memory.h b/arch/unicore32/include/asm/memory.h
new file mode 100644
index 000000000000..5eddb997defe
--- /dev/null
+++ b/arch/unicore32/include/asm/memory.h
@@ -0,0 +1,123 @@
1/*
2 * linux/arch/unicore32/include/asm/memory.h
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Note: this file should not be included by non-asm/.h files
13 */
14#ifndef __UNICORE_MEMORY_H__
15#define __UNICORE_MEMORY_H__
16
17#include <linux/compiler.h>
18#include <linux/const.h>
19#include <asm/sizes.h>
20#include <mach/memory.h>
21
22/*
23 * Allow for constants defined here to be used from assembly code
24 * by prepending the UL suffix only with actual C code compilation.
25 */
26#define UL(x) _AC(x, UL)
27
28/*
29 * PAGE_OFFSET - the virtual address of the start of the kernel image
30 * TASK_SIZE - the maximum size of a user space task.
31 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
32 */
33#define PAGE_OFFSET UL(0xC0000000)
34#define TASK_SIZE (PAGE_OFFSET - UL(0x41000000))
35#define TASK_UNMAPPED_BASE (PAGE_OFFSET / 3)
36
37/*
38 * The module space lives between the addresses given by TASK_SIZE
39 * and PAGE_OFFSET - it must be within 32MB of the kernel text.
40 */
41#define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024)
42#if TASK_SIZE > MODULES_VADDR
43#error Top of user space clashes with start of module space
44#endif
45
46#define MODULES_END (PAGE_OFFSET)
47
48/*
49 * Allow 16MB-aligned ioremap pages
50 */
51#define IOREMAP_MAX_ORDER 24
52
53/*
54 * Physical vs virtual RAM address space conversion. These are
55 * private definitions which should NOT be used outside memory.h
56 * files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
57 */
58#ifndef __virt_to_phys
59#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
60#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
61#endif
62
63/*
64 * Convert a physical address to a Page Frame Number and back
65 */
66#define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT)
67#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
68
69/*
70 * Convert a page to/from a physical address
71 */
72#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
73#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
74
75#ifndef __ASSEMBLY__
76
77#ifndef arch_adjust_zones
78#define arch_adjust_zones(size, holes) do { } while (0)
79#endif
80
81/*
82 * PFNs are used to describe any physical page; this means
83 * PFN 0 == physical address 0.
84 *
85 * This is the PFN of the first RAM page in the kernel
86 * direct-mapped view. We assume this is the first page
87 * of RAM in the mem_map as well.
88 */
89#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
90
91/*
92 * Drivers should NOT use these either.
93 */
94#define __pa(x) __virt_to_phys((unsigned long)(x))
95#define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
96#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
97
98/*
99 * Conversion between a struct page and a physical address.
100 *
101 * Note: when converting an unknown physical address to a
102 * struct page, the resulting pointer must be validated
103 * using VALID_PAGE(). It must return an invalid struct page
104 * for any physical address not corresponding to a system
105 * RAM address.
106 *
107 * page_to_pfn(page) convert a struct page * to a PFN number
108 * pfn_to_page(pfn) convert a _valid_ PFN number to struct page *
109 *
110 * virt_to_page(k) convert a _valid_ virtual address to struct page *
111 * virt_addr_valid(k) indicates whether a virtual address is valid
112 */
113#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
114
115#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
116#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && \
117 (unsigned long)(kaddr) < (unsigned long)high_memory)
118
119#endif
120
121#include <asm-generic/memory_model.h>
122
123#endif
diff --git a/arch/unicore32/include/asm/page.h b/arch/unicore32/include/asm/page.h
new file mode 100644
index 000000000000..594b3226250e
--- /dev/null
+++ b/arch/unicore32/include/asm/page.h
@@ -0,0 +1,80 @@
1/*
2 * linux/arch/unicore32/include/asm/page.h
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#ifndef __UNICORE_PAGE_H__
13#define __UNICORE_PAGE_H__
14
15/* PAGE_SHIFT determines the page size */
16#define PAGE_SHIFT 12
17#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
18#define PAGE_MASK (~(PAGE_SIZE-1))
19
20#ifndef __ASSEMBLY__
21
22struct page;
23struct vm_area_struct;
24
25#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
26extern void copy_page(void *to, const void *from);
27
28#define clear_user_page(page, vaddr, pg) clear_page(page)
29#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
30
31#undef STRICT_MM_TYPECHECKS
32
33#ifdef STRICT_MM_TYPECHECKS
34/*
35 * These are used to make use of C type-checking..
36 */
37typedef struct { unsigned long pte; } pte_t;
38typedef struct { unsigned long pgd; } pgd_t;
39typedef struct { unsigned long pgprot; } pgprot_t;
40
41#define pte_val(x) ((x).pte)
42#define pgd_val(x) ((x).pgd)
43#define pgprot_val(x) ((x).pgprot)
44
45#define __pte(x) ((pte_t) { (x) })
46#define __pgd(x) ((pgd_t) { (x) })
47#define __pgprot(x) ((pgprot_t) { (x) })
48
49#else
50/*
51 * .. while these make it easier on the compiler
52 */
53typedef unsigned long pte_t;
54typedef unsigned long pgd_t;
55typedef unsigned long pgprot_t;
56
57#define pte_val(x) (x)
58#define pgd_val(x) (x)
59#define pgprot_val(x) (x)
60
61#define __pte(x) (x)
62#define __pgd(x) (x)
63#define __pgprot(x) (x)
64
65#endif /* STRICT_MM_TYPECHECKS */
66
67typedef struct page *pgtable_t;
68
69extern int pfn_valid(unsigned long);
70
71#include <asm/memory.h>
72
73#endif /* !__ASSEMBLY__ */
74
75#define VM_DATA_DEFAULT_FLAGS \
76 (VM_READ | VM_WRITE | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
77
78#include <asm-generic/getorder.h>
79
80#endif
diff --git a/arch/unicore32/include/asm/tlb.h b/arch/unicore32/include/asm/tlb.h
new file mode 100644
index 000000000000..02ee40e47a0d
--- /dev/null
+++ b/arch/unicore32/include/asm/tlb.h
@@ -0,0 +1,98 @@
1/*
2 * linux/arch/unicore32/include/asm/tlb.h
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#ifndef __UNICORE_TLB_H__
13#define __UNICORE_TLB_H__
14
15#include <asm/cacheflush.h>
16#include <asm/tlbflush.h>
17#include <asm/pgalloc.h>
18
19/*
20 * TLB handling. This allows us to remove pages from the page
21 * tables, and efficiently handle the TLB issues.
22 */
23struct mmu_gather {
24 struct mm_struct *mm;
25 unsigned int fullmm;
26 unsigned long range_start;
27 unsigned long range_end;
28};
29
30DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
31
32static inline struct mmu_gather *
33tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
34{
35 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
36
37 tlb->mm = mm;
38 tlb->fullmm = full_mm_flush;
39
40 return tlb;
41}
42
43static inline void
44tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
45{
46 if (tlb->fullmm)
47 flush_tlb_mm(tlb->mm);
48
49 /* keep the page table cache within bounds */
50 check_pgt_cache();
51
52 put_cpu_var(mmu_gathers);
53}
54
55/*
56 * Memorize the range for the TLB flush.
57 */
58static inline void
59tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
60{
61 if (!tlb->fullmm) {
62 if (addr < tlb->range_start)
63 tlb->range_start = addr;
64 if (addr + PAGE_SIZE > tlb->range_end)
65 tlb->range_end = addr + PAGE_SIZE;
66 }
67}
68
69/*
70 * In the case of tlb vma handling, we can optimise these away in the
71 * case where we're doing a full MM flush. When we're doing a munmap,
72 * the vmas are adjusted to only cover the region to be torn down.
73 */
74static inline void
75tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
76{
77 if (!tlb->fullmm) {
78 flush_cache_range(vma, vma->vm_start, vma->vm_end);
79 tlb->range_start = TASK_SIZE;
80 tlb->range_end = 0;
81 }
82}
83
84static inline void
85tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
86{
87 if (!tlb->fullmm && tlb->range_end > 0)
88 flush_tlb_range(vma, tlb->range_start, tlb->range_end);
89}
90
91#define tlb_remove_page(tlb, page) free_page_and_swap_cache(page)
92#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
93#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
94#define pud_free_tlb(tlb, x, addr) do { } while (0)
95
96#define tlb_migrate_finish(mm) do { } while (0)
97
98#endif
diff --git a/arch/unicore32/include/mach/map.h b/arch/unicore32/include/mach/map.h
new file mode 100644
index 000000000000..55c936573741
--- /dev/null
+++ b/arch/unicore32/include/mach/map.h
@@ -0,0 +1,20 @@
1/*
2 * linux/arch/unicore32/include/mach/map.h
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Page table mapping constructs and function prototypes
13 */
14#define MT_DEVICE 0
15#define MT_DEVICE_CACHED 2
16#define MT_KUSER 7
17#define MT_HIGH_VECTORS 8
18#define MT_MEMORY 9
19#define MT_ROM 10
20
diff --git a/arch/unicore32/include/mach/memory.h b/arch/unicore32/include/mach/memory.h
new file mode 100644
index 000000000000..541949dfa5b4
--- /dev/null
+++ b/arch/unicore32/include/mach/memory.h
@@ -0,0 +1,58 @@
1/*
2 * linux/arch/unicore32/include/mach/memory.h
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#ifndef __MACH_PUV3_MEMORY_H__
13#define __MACH_PUV3_MEMORY_H__
14
15#include <mach/hardware.h>
16
17/* Physical DRAM offset. */
18#define PHYS_OFFSET UL(0x00000000)
19/* The base address of exception vectors. */
20#define VECTORS_BASE UL(0xffff0000)
21/* The base address of kuser area. */
22#define KUSER_BASE UL(0x80000000)
23
24#ifdef __ASSEMBLY__
25/* The byte offset of the kernel image in RAM from the start of RAM. */
26#define KERNEL_IMAGE_START 0x00408000
27#endif
28
29#if !defined(__ASSEMBLY__) && defined(CONFIG_PCI)
30
31void puv3_pci_adjust_zones(unsigned long *size, unsigned long *holes);
32
33#define arch_adjust_zones(size, holes) \
34 puv3_pci_adjust_zones(size, holes)
35
36#endif
37
38/*
39 * PCI controller in PKUnity-3 masks highest 5-bit for upstream channel,
40 * so we must limit the DMA allocation within 128M physical memory for
41 * supporting PCI devices.
42 */
43#define PCI_DMA_THRESHOLD (PHYS_OFFSET + SZ_128M - 1)
44
45#define is_pcibus_device(dev) (dev && \
46 (strncmp(dev->bus->name, "pci", 3) == 0))
47
48#define __virt_to_pcibus(x) (__virt_to_phys(x) + PKUNITY_PCIAHB_BASE)
49#define __pcibus_to_virt(x) __phys_to_virt((x) - PKUNITY_PCIAHB_BASE)
50
51/* kuser area */
52#define KUSER_VECPAGE_BASE (KUSER_BASE + UL(0x3fff0000))
53#define KUSER_UNIGFX_BASE (KUSER_BASE + PKUNITY_UNIGFX_MMAP_BASE)
54/* kuser_vecpage (0xbfff0000) is ro, and vectors page (0xffff0000) is rw */
55#define kuser_vecpage_to_vectors(x) ((x) - (KUSER_VECPAGE_BASE) \
56 + (VECTORS_BASE))
57
58#endif
diff --git a/arch/unicore32/mm/Kconfig b/arch/unicore32/mm/Kconfig
new file mode 100644
index 000000000000..5f77fb3c63be
--- /dev/null
+++ b/arch/unicore32/mm/Kconfig
@@ -0,0 +1,50 @@
1comment "Processor Type"
2
3# Select CPU types depending on the architecture selected. This selects
4# which CPUs we support in the kernel image, and the compiler instruction
5# optimiser behaviour.
6
7config CPU_UCV2
8 def_bool y
9
10comment "Processor Features"
11
12config CPU_ICACHE_DISABLE
13 bool "Disable I-Cache (I-bit)"
14 help
15 Say Y here to disable the processor instruction cache. Unless
16 you have a reason not to or are unsure, say N.
17
18config CPU_DCACHE_DISABLE
19 bool "Disable D-Cache (D-bit)"
20 help
21 Say Y here to disable the processor data cache. Unless
22 you have a reason not to or are unsure, say N.
23
24config CPU_DCACHE_WRITETHROUGH
25 bool "Force write through D-cache"
26 help
27 Say Y here to use the data cache in writethrough mode. Unless you
28 specifically require this or are unsure, say N.
29
30config CPU_DCACHE_LINE_DISABLE
31 bool "Disable D-cache line ops"
32 default y
33 help
34 Say Y here to disable the data cache line operations.
35
36config CPU_TLB_SINGLE_ENTRY_DISABLE
37 bool "Disable TLB single entry ops"
38 default y
39 help
40 Say Y here to disable the TLB single entry operations.
41
42config SWIOTLB
43 def_bool y
44
45config IOMMU_HELPER
46 def_bool SWIOTLB
47
48config NEED_SG_DMA_LENGTH
49 def_bool SWIOTLB
50
diff --git a/arch/unicore32/mm/Makefile b/arch/unicore32/mm/Makefile
new file mode 100644
index 000000000000..f3ff41039f51
--- /dev/null
+++ b/arch/unicore32/mm/Makefile
@@ -0,0 +1,15 @@
1#
2# Makefile for the linux unicore-specific parts of the memory manager.
3#
4
5obj-y := extable.o fault.o init.o pgd.o mmu.o
6obj-y += iomap.o flush.o ioremap.o
7
8obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o
9
10obj-$(CONFIG_MODULES) += proc-syms.o
11
12obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
13
14obj-$(CONFIG_CPU_UCV2) += cache-ucv2.o tlb-ucv2.o proc-ucv2.o
15
diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c
new file mode 100644
index 000000000000..3dbe3709b69d
--- /dev/null
+++ b/arch/unicore32/mm/init.c
@@ -0,0 +1,517 @@
1/*
2 * linux/arch/unicore32/mm/init.c
3 *
4 * Copyright (C) 2010 GUAN Xue-tao
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/kernel.h>
11#include <linux/errno.h>
12#include <linux/swap.h>
13#include <linux/init.h>
14#include <linux/bootmem.h>
15#include <linux/mman.h>
16#include <linux/nodemask.h>
17#include <linux/initrd.h>
18#include <linux/highmem.h>
19#include <linux/gfp.h>
20#include <linux/memblock.h>
21#include <linux/sort.h>
22#include <linux/dma-mapping.h>
23
24#include <asm/sections.h>
25#include <asm/setup.h>
26#include <asm/sizes.h>
27#include <asm/tlb.h>
28#include <mach/map.h>
29
30#include "mm.h"
31
32static unsigned long phys_initrd_start __initdata = 0x01000000;
33static unsigned long phys_initrd_size __initdata = SZ_8M;
34
35static int __init early_initrd(char *p)
36{
37 unsigned long start, size;
38 char *endp;
39
40 start = memparse(p, &endp);
41 if (*endp == ',') {
42 size = memparse(endp + 1, NULL);
43
44 phys_initrd_start = start;
45 phys_initrd_size = size;
46 }
47 return 0;
48}
49early_param("initrd", early_initrd);
50
51/*
52 * This keeps memory configuration data used by a couple memory
53 * initialization functions, as well as show_mem() for the skipping
54 * of holes in the memory map. It is populated by uc32_add_memory().
55 */
56struct meminfo meminfo;
57
58void show_mem(void)
59{
60 int free = 0, total = 0, reserved = 0;
61 int shared = 0, cached = 0, slab = 0, i;
62 struct meminfo *mi = &meminfo;
63
64 printk(KERN_DEFAULT "Mem-info:\n");
65 show_free_areas();
66
67 for_each_bank(i, mi) {
68 struct membank *bank = &mi->bank[i];
69 unsigned int pfn1, pfn2;
70 struct page *page, *end;
71
72 pfn1 = bank_pfn_start(bank);
73 pfn2 = bank_pfn_end(bank);
74
75 page = pfn_to_page(pfn1);
76 end = pfn_to_page(pfn2 - 1) + 1;
77
78 do {
79 total++;
80 if (PageReserved(page))
81 reserved++;
82 else if (PageSwapCache(page))
83 cached++;
84 else if (PageSlab(page))
85 slab++;
86 else if (!page_count(page))
87 free++;
88 else
89 shared += page_count(page) - 1;
90 page++;
91 } while (page < end);
92 }
93
94 printk(KERN_DEFAULT "%d pages of RAM\n", total);
95 printk(KERN_DEFAULT "%d free pages\n", free);
96 printk(KERN_DEFAULT "%d reserved pages\n", reserved);
97 printk(KERN_DEFAULT "%d slab pages\n", slab);
98 printk(KERN_DEFAULT "%d pages shared\n", shared);
99 printk(KERN_DEFAULT "%d pages swap cached\n", cached);
100}
101
102static void __init find_limits(unsigned long *min, unsigned long *max_low,
103 unsigned long *max_high)
104{
105 struct meminfo *mi = &meminfo;
106 int i;
107
108 *min = -1UL;
109 *max_low = *max_high = 0;
110
111 for_each_bank(i, mi) {
112 struct membank *bank = &mi->bank[i];
113 unsigned long start, end;
114
115 start = bank_pfn_start(bank);
116 end = bank_pfn_end(bank);
117
118 if (*min > start)
119 *min = start;
120 if (*max_high < end)
121 *max_high = end;
122 if (bank->highmem)
123 continue;
124 if (*max_low < end)
125 *max_low = end;
126 }
127}
128
129static void __init uc32_bootmem_init(unsigned long start_pfn,
130 unsigned long end_pfn)
131{
132 struct memblock_region *reg;
133 unsigned int boot_pages;
134 phys_addr_t bitmap;
135 pg_data_t *pgdat;
136
137 /*
138 * Allocate the bootmem bitmap page. This must be in a region
139 * of memory which has already been mapped.
140 */
141 boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
142 bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
143 __pfn_to_phys(end_pfn));
144
145 /*
146 * Initialise the bootmem allocator, handing the
147 * memory banks over to bootmem.
148 */
149 node_set_online(0);
150 pgdat = NODE_DATA(0);
151 init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
152
153 /* Free the lowmem regions from memblock into bootmem. */
154 for_each_memblock(memory, reg) {
155 unsigned long start = memblock_region_memory_base_pfn(reg);
156 unsigned long end = memblock_region_memory_end_pfn(reg);
157
158 if (end >= end_pfn)
159 end = end_pfn;
160 if (start >= end)
161 break;
162
163 free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
164 }
165
166 /* Reserve the lowmem memblock reserved regions in bootmem. */
167 for_each_memblock(reserved, reg) {
168 unsigned long start = memblock_region_reserved_base_pfn(reg);
169 unsigned long end = memblock_region_reserved_end_pfn(reg);
170
171 if (end >= end_pfn)
172 end = end_pfn;
173 if (start >= end)
174 break;
175
176 reserve_bootmem(__pfn_to_phys(start),
177 (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
178 }
179}
180
181static void __init uc32_bootmem_free(unsigned long min, unsigned long max_low,
182 unsigned long max_high)
183{
184 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
185 struct memblock_region *reg;
186
187 /*
188 * initialise the zones.
189 */
190 memset(zone_size, 0, sizeof(zone_size));
191
192 /*
193 * The memory size has already been determined. If we need
194 * to do anything fancy with the allocation of this memory
195 * to the zones, now is the time to do it.
196 */
197 zone_size[0] = max_low - min;
198
199 /*
200 * Calculate the size of the holes.
201 * holes = node_size - sum(bank_sizes)
202 */
203 memcpy(zhole_size, zone_size, sizeof(zhole_size));
204 for_each_memblock(memory, reg) {
205 unsigned long start = memblock_region_memory_base_pfn(reg);
206 unsigned long end = memblock_region_memory_end_pfn(reg);
207
208 if (start < max_low) {
209 unsigned long low_end = min(end, max_low);
210 zhole_size[0] -= low_end - start;
211 }
212 }
213
214 /*
215 * Adjust the sizes according to any special requirements for
216 * this machine type.
217 */
218 arch_adjust_zones(zone_size, zhole_size);
219
220 free_area_init_node(0, zone_size, min, zhole_size);
221}
222
223int pfn_valid(unsigned long pfn)
224{
225 return memblock_is_memory(pfn << PAGE_SHIFT);
226}
227EXPORT_SYMBOL(pfn_valid);
228
229static void uc32_memory_present(void)
230{
231}
232
233static int __init meminfo_cmp(const void *_a, const void *_b)
234{
235 const struct membank *a = _a, *b = _b;
236 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
237 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
238}
239
240void __init uc32_memblock_init(struct meminfo *mi)
241{
242 int i;
243
244 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]),
245 meminfo_cmp, NULL);
246
247 memblock_init();
248 for (i = 0; i < mi->nr_banks; i++)
249 memblock_add(mi->bank[i].start, mi->bank[i].size);
250
251 /* Register the kernel text, kernel data and initrd with memblock. */
252 memblock_reserve(__pa(_text), _end - _text);
253
254#ifdef CONFIG_BLK_DEV_INITRD
255 if (phys_initrd_size) {
256 memblock_reserve(phys_initrd_start, phys_initrd_size);
257
258 /* Now convert initrd to virtual addresses */
259 initrd_start = __phys_to_virt(phys_initrd_start);
260 initrd_end = initrd_start + phys_initrd_size;
261 }
262#endif
263
264 uc32_mm_memblock_reserve();
265
266 memblock_analyze();
267 memblock_dump_all();
268}
269
270void __init bootmem_init(void)
271{
272 unsigned long min, max_low, max_high;
273
274 max_low = max_high = 0;
275
276 find_limits(&min, &max_low, &max_high);
277
278 uc32_bootmem_init(min, max_low);
279
280#ifdef CONFIG_SWIOTLB
281 swiotlb_init(1);
282#endif
283 /*
284 * Sparsemem tries to allocate bootmem in memory_present(),
285 * so must be done after the fixed reservations
286 */
287 uc32_memory_present();
288
289 /*
290 * sparse_init() needs the bootmem allocator up and running.
291 */
292 sparse_init();
293
294 /*
295 * Now free the memory - free_area_init_node needs
296 * the sparse mem_map arrays initialized by sparse_init()
297 * for memmap_init_zone(), otherwise all PFNs are invalid.
298 */
299 uc32_bootmem_free(min, max_low, max_high);
300
301 high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1;
302
303 /*
304 * This doesn't seem to be used by the Linux memory manager any
305 * more, but is used by ll_rw_block. If we can get rid of it, we
306 * also get rid of some of the stuff above as well.
307 *
308 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
309 * the system, not the maximum PFN.
310 */
311 max_low_pfn = max_low - PHYS_PFN_OFFSET;
312 max_pfn = max_high - PHYS_PFN_OFFSET;
313}
314
315static inline int free_area(unsigned long pfn, unsigned long end, char *s)
316{
317 unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
318
319 for (; pfn < end; pfn++) {
320 struct page *page = pfn_to_page(pfn);
321 ClearPageReserved(page);
322 init_page_count(page);
323 __free_page(page);
324 pages++;
325 }
326
327 if (size && s)
328 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
329
330 return pages;
331}
332
333static inline void
334free_memmap(unsigned long start_pfn, unsigned long end_pfn)
335{
336 struct page *start_pg, *end_pg;
337 unsigned long pg, pgend;
338
339 /*
340 * Convert start_pfn/end_pfn to a struct page pointer.
341 */
342 start_pg = pfn_to_page(start_pfn - 1) + 1;
343 end_pg = pfn_to_page(end_pfn);
344
345 /*
346 * Convert to physical addresses, and
347 * round start upwards and end downwards.
348 */
349 pg = PAGE_ALIGN(__pa(start_pg));
350 pgend = __pa(end_pg) & PAGE_MASK;
351
352 /*
353 * If there are free pages between these,
354 * free the section of the memmap array.
355 */
356 if (pg < pgend)
357 free_bootmem(pg, pgend - pg);
358}
359
360/*
361 * The mem_map array can get very big. Free the unused area of the memory map.
362 */
363static void __init free_unused_memmap(struct meminfo *mi)
364{
365 unsigned long bank_start, prev_bank_end = 0;
366 unsigned int i;
367
368 /*
369 * This relies on each bank being in address order.
370 * The banks are sorted previously in bootmem_init().
371 */
372 for_each_bank(i, mi) {
373 struct membank *bank = &mi->bank[i];
374
375 bank_start = bank_pfn_start(bank);
376
377 /*
378 * If we had a previous bank, and there is a space
379 * between the current bank and the previous, free it.
380 */
381 if (prev_bank_end && prev_bank_end < bank_start)
382 free_memmap(prev_bank_end, bank_start);
383
384 /*
385 * Align up here since the VM subsystem insists that the
386 * memmap entries are valid from the bank end aligned to
387 * MAX_ORDER_NR_PAGES.
388 */
389 prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
390 }
391}
392
393/*
394 * mem_init() marks the free areas in the mem_map and tells us how much
395 * memory is free. This is done after various parts of the system have
396 * claimed their memory after the kernel image.
397 */
398void __init mem_init(void)
399{
400 unsigned long reserved_pages, free_pages;
401 struct memblock_region *reg;
402 int i;
403
404 max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
405
406 /* this will put all unused low memory onto the freelists */
407 free_unused_memmap(&meminfo);
408
409 totalram_pages += free_all_bootmem();
410
411 reserved_pages = free_pages = 0;
412
413 for_each_bank(i, &meminfo) {
414 struct membank *bank = &meminfo.bank[i];
415 unsigned int pfn1, pfn2;
416 struct page *page, *end;
417
418 pfn1 = bank_pfn_start(bank);
419 pfn2 = bank_pfn_end(bank);
420
421 page = pfn_to_page(pfn1);
422 end = pfn_to_page(pfn2 - 1) + 1;
423
424 do {
425 if (PageReserved(page))
426 reserved_pages++;
427 else if (!page_count(page))
428 free_pages++;
429 page++;
430 } while (page < end);
431 }
432
433 /*
434 * Since our memory may not be contiguous, calculate the
435 * real number of pages we have in this system
436 */
437 printk(KERN_INFO "Memory:");
438 num_physpages = 0;
439 for_each_memblock(memory, reg) {
440 unsigned long pages = memblock_region_memory_end_pfn(reg) -
441 memblock_region_memory_base_pfn(reg);
442 num_physpages += pages;
443 printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
444 }
445 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
446
447 printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
448 nr_free_pages() << (PAGE_SHIFT-10),
449 free_pages << (PAGE_SHIFT-10),
450 reserved_pages << (PAGE_SHIFT-10),
451 totalhigh_pages << (PAGE_SHIFT-10));
452
453 printk(KERN_NOTICE "Virtual kernel memory layout:\n"
454 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
455 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
456 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
457 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
458 " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
459 " .text : 0x%p" " - 0x%p" " (%4d kB)\n"
460 " .data : 0x%p" " - 0x%p" " (%4d kB)\n",
461
462 VECTORS_BASE, VECTORS_BASE + PAGE_SIZE,
463 DIV_ROUND_UP(PAGE_SIZE, SZ_1K),
464 VMALLOC_START, VMALLOC_END,
465 DIV_ROUND_UP((VMALLOC_END - VMALLOC_START), SZ_1M),
466 PAGE_OFFSET, (unsigned long)high_memory,
467 DIV_ROUND_UP(((unsigned long)high_memory - PAGE_OFFSET), SZ_1M),
468 MODULES_VADDR, MODULES_END,
469 DIV_ROUND_UP((MODULES_END - MODULES_VADDR), SZ_1M),
470
471 __init_begin, __init_end,
472 DIV_ROUND_UP((__init_end - __init_begin), SZ_1K),
473 _stext, _etext,
474 DIV_ROUND_UP((_etext - _stext), SZ_1K),
475 _sdata, _edata,
476 DIV_ROUND_UP((_edata - _sdata), SZ_1K));
477
478 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
479 BUG_ON(TASK_SIZE > MODULES_VADDR);
480
481 if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
482 /*
483 * On a machine this small we won't get
484 * anywhere without overcommit, so turn
485 * it on by default.
486 */
487 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
488 }
489}
490
491void free_initmem(void)
492{
493 totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
494 __phys_to_pfn(__pa(__init_end)),
495 "init");
496}
497
498#ifdef CONFIG_BLK_DEV_INITRD
499
500static int keep_initrd;
501
502void free_initrd_mem(unsigned long start, unsigned long end)
503{
504 if (!keep_initrd)
505 totalram_pages += free_area(__phys_to_pfn(__pa(start)),
506 __phys_to_pfn(__pa(end)),
507 "initrd");
508}
509
510static int __init keepinitrd_setup(char *__unused)
511{
512 keep_initrd = 1;
513 return 1;
514}
515
516__setup("keepinitrd", keepinitrd_setup);
517#endif
diff --git a/arch/unicore32/mm/iomap.c b/arch/unicore32/mm/iomap.c
new file mode 100644
index 000000000000..a7e1a3d2e069
--- /dev/null
+++ b/arch/unicore32/mm/iomap.c
@@ -0,0 +1,56 @@
1/*
2 * linux/arch/unicore32/mm/iomap.c
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Map IO port and PCI memory spaces so that {read,write}[bwl] can
13 * be used to access this memory.
14 */
15#include <linux/module.h>
16#include <linux/pci.h>
17#include <linux/ioport.h>
18#include <linux/io.h>
19
20#ifdef __io
21void __iomem *ioport_map(unsigned long port, unsigned int nr)
22{
23 /* we map PC lagcy 64K IO port to PCI IO space 0x80030000 */
24 return (void __iomem *) (unsigned long)
25 io_p2v((port & 0xffff) + PKUNITY_PCILIO_BASE);
26}
27EXPORT_SYMBOL(ioport_map);
28
29void ioport_unmap(void __iomem *addr)
30{
31}
32EXPORT_SYMBOL(ioport_unmap);
33#endif
34
35#ifdef CONFIG_PCI
36void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
37{
38 resource_size_t start = pci_resource_start(dev, bar);
39 resource_size_t len = pci_resource_len(dev, bar);
40 unsigned long flags = pci_resource_flags(dev, bar);
41
42 if (!len || !start)
43 return NULL;
44 if (maxlen && len > maxlen)
45 len = maxlen;
46 if (flags & IORESOURCE_IO)
47 return ioport_map(start, len);
48 if (flags & IORESOURCE_MEM) {
49 if (flags & IORESOURCE_CACHEABLE)
50 return ioremap(start, len);
51 return ioremap_nocache(start, len);
52 }
53 return NULL;
54}
55EXPORT_SYMBOL(pci_iomap);
56#endif
diff --git a/arch/unicore32/mm/ioremap.c b/arch/unicore32/mm/ioremap.c
new file mode 100644
index 000000000000..b7a605597b08
--- /dev/null
+++ b/arch/unicore32/mm/ioremap.c
@@ -0,0 +1,261 @@
1/*
2 * linux/arch/unicore32/mm/ioremap.c
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 *
13 * Re-map IO memory to kernel address space so that we can access it.
14 *
15 * This allows a driver to remap an arbitrary region of bus memory into
16 * virtual space. One should *only* use readl, writel, memcpy_toio and
17 * so on with such remapped areas.
18 *
19 * Because UniCore only has a 32-bit address space we can't address the
20 * whole of the (physical) PCI space at once. PCI huge-mode addressing
21 * allows us to circumvent this restriction by splitting PCI space into
22 * two 2GB chunks and mapping only one at a time into processor memory.
23 * We use MMU protection domains to trap any attempt to access the bank
24 * that is not currently mapped. (This isn't fully implemented yet.)
25 */
26#include <linux/module.h>
27#include <linux/errno.h>
28#include <linux/mm.h>
29#include <linux/vmalloc.h>
30#include <linux/io.h>
31
32#include <asm/cputype.h>
33#include <asm/cacheflush.h>
34#include <asm/mmu_context.h>
35#include <asm/pgalloc.h>
36#include <asm/tlbflush.h>
37#include <asm/sizes.h>
38
39#include <mach/map.h>
40#include "mm.h"
41
42/*
43 * Used by ioremap() and iounmap() code to mark (super)section-mapped
44 * I/O regions in vm_struct->flags field.
45 */
46#define VM_UNICORE_SECTION_MAPPING 0x80000000
47
48int ioremap_page(unsigned long virt, unsigned long phys,
49 const struct mem_type *mtype)
50{
51 return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
52 __pgprot(mtype->prot_pte));
53}
54EXPORT_SYMBOL(ioremap_page);
55
56/*
57 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
58 * the other CPUs will not see this change until their next context switch.
59 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
60 * which requires the new ioremap'd region to be referenced, the CPU will
61 * reference the _old_ region.
62 *
63 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
64 * mask the size back to 4MB aligned or we will overflow in the loop below.
65 */
66static void unmap_area_sections(unsigned long virt, unsigned long size)
67{
68 unsigned long addr = virt, end = virt + (size & ~(SZ_4M - 1));
69 pgd_t *pgd;
70
71 flush_cache_vunmap(addr, end);
72 pgd = pgd_offset_k(addr);
73 do {
74 pmd_t pmd, *pmdp = pmd_offset((pud_t *)pgd, addr);
75
76 pmd = *pmdp;
77 if (!pmd_none(pmd)) {
78 /*
79 * Clear the PMD from the page table, and
80 * increment the kvm sequence so others
81 * notice this change.
82 *
83 * Note: this is still racy on SMP machines.
84 */
85 pmd_clear(pmdp);
86
87 /*
88 * Free the page table, if there was one.
89 */
90 if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
91 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
92 }
93
94 addr += PGDIR_SIZE;
95 pgd++;
96 } while (addr < end);
97
98 flush_tlb_kernel_range(virt, end);
99}
100
101static int
102remap_area_sections(unsigned long virt, unsigned long pfn,
103 size_t size, const struct mem_type *type)
104{
105 unsigned long addr = virt, end = virt + size;
106 pgd_t *pgd;
107
108 /*
109 * Remove and free any PTE-based mapping, and
110 * sync the current kernel mapping.
111 */
112 unmap_area_sections(virt, size);
113
114 pgd = pgd_offset_k(addr);
115 do {
116 pmd_t *pmd = pmd_offset((pud_t *)pgd, addr);
117
118 set_pmd(pmd, __pmd(__pfn_to_phys(pfn) | type->prot_sect));
119 pfn += SZ_4M >> PAGE_SHIFT;
120 flush_pmd_entry(pmd);
121
122 addr += PGDIR_SIZE;
123 pgd++;
124 } while (addr < end);
125
126 return 0;
127}
128
129void __iomem *__uc32_ioremap_pfn_caller(unsigned long pfn,
130 unsigned long offset, size_t size, unsigned int mtype, void *caller)
131{
132 const struct mem_type *type;
133 int err;
134 unsigned long addr;
135 struct vm_struct *area;
136
137 /*
138 * High mappings must be section aligned
139 */
140 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SECTION_MASK))
141 return NULL;
142
143 /*
144 * Don't allow RAM to be mapped
145 */
146 if (pfn_valid(pfn)) {
147 printk(KERN_WARNING "BUG: Your driver calls ioremap() on\n"
148 "system memory. This leads to architecturally\n"
149 "unpredictable behaviour, and ioremap() will fail in\n"
150 "the next kernel release. Please fix your driver.\n");
151 WARN_ON(1);
152 }
153
154 type = get_mem_type(mtype);
155 if (!type)
156 return NULL;
157
158 /*
159 * Page align the mapping size, taking account of any offset.
160 */
161 size = PAGE_ALIGN(offset + size);
162
163 area = get_vm_area_caller(size, VM_IOREMAP, caller);
164 if (!area)
165 return NULL;
166 addr = (unsigned long)area->addr;
167
168 if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
169 area->flags |= VM_UNICORE_SECTION_MAPPING;
170 err = remap_area_sections(addr, pfn, size, type);
171 } else
172 err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
173 __pgprot(type->prot_pte));
174
175 if (err) {
176 vunmap((void *)addr);
177 return NULL;
178 }
179
180 flush_cache_vmap(addr, addr + size);
181 return (void __iomem *) (offset + addr);
182}
183
184void __iomem *__uc32_ioremap_caller(unsigned long phys_addr, size_t size,
185 unsigned int mtype, void *caller)
186{
187 unsigned long last_addr;
188 unsigned long offset = phys_addr & ~PAGE_MASK;
189 unsigned long pfn = __phys_to_pfn(phys_addr);
190
191 /*
192 * Don't allow wraparound or zero size
193 */
194 last_addr = phys_addr + size - 1;
195 if (!size || last_addr < phys_addr)
196 return NULL;
197
198 return __uc32_ioremap_pfn_caller(pfn, offset, size, mtype, caller);
199}
200
201/*
202 * Remap an arbitrary physical address space into the kernel virtual
203 * address space. Needed when the kernel wants to access high addresses
204 * directly.
205 *
206 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
207 * have to convert them into an offset in a page-aligned mapping, but the
208 * caller shouldn't need to know that small detail.
209 */
210void __iomem *
211__uc32_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
212 unsigned int mtype)
213{
214 return __uc32_ioremap_pfn_caller(pfn, offset, size, mtype,
215 __builtin_return_address(0));
216}
217EXPORT_SYMBOL(__uc32_ioremap_pfn);
218
219void __iomem *
220__uc32_ioremap(unsigned long phys_addr, size_t size)
221{
222 return __uc32_ioremap_caller(phys_addr, size, MT_DEVICE,
223 __builtin_return_address(0));
224}
225EXPORT_SYMBOL(__uc32_ioremap);
226
227void __iomem *
228__uc32_ioremap_cached(unsigned long phys_addr, size_t size)
229{
230 return __uc32_ioremap_caller(phys_addr, size, MT_DEVICE_CACHED,
231 __builtin_return_address(0));
232}
233EXPORT_SYMBOL(__uc32_ioremap_cached);
234
235void __uc32_iounmap(volatile void __iomem *io_addr)
236{
237 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
238 struct vm_struct **p, *tmp;
239
240 /*
241 * If this is a section based mapping we need to handle it
242 * specially as the VM subsystem does not know how to handle
243 * such a beast. We need the lock here b/c we need to clear
244 * all the mappings before the area can be reclaimed
245 * by someone else.
246 */
247 write_lock(&vmlist_lock);
248 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
249 if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
250 if (tmp->flags & VM_UNICORE_SECTION_MAPPING) {
251 unmap_area_sections((unsigned long)tmp->addr,
252 tmp->size);
253 }
254 break;
255 }
256 }
257 write_unlock(&vmlist_lock);
258
259 vunmap(addr);
260}
261EXPORT_SYMBOL(__uc32_iounmap);
diff --git a/arch/unicore32/mm/mm.h b/arch/unicore32/mm/mm.h
new file mode 100644
index 000000000000..3296bca0f1f7
--- /dev/null
+++ b/arch/unicore32/mm/mm.h
@@ -0,0 +1,39 @@
1/*
2 * linux/arch/unicore32/mm/mm.h
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12/* the upper-most page table pointer */
13extern pmd_t *top_pmd;
14extern int sysctl_overcommit_memory;
15
16#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
17
18static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt)
19{
20 return pmd_offset((pud_t *)pgd, virt);
21}
22
23static inline pmd_t *pmd_off_k(unsigned long virt)
24{
25 return pmd_off(pgd_offset_k(virt), virt);
26}
27
28struct mem_type {
29 unsigned int prot_pte;
30 unsigned int prot_l1;
31 unsigned int prot_sect;
32};
33
34const struct mem_type *get_mem_type(unsigned int type);
35
36extern void __flush_dcache_page(struct address_space *, struct page *);
37
38void __init bootmem_init(void);
39void uc32_mm_memblock_reserve(void);