diff options
-rw-r--r-- | arch/sh64/mm/Makefile | 44 | ||||
-rw-r--r-- | arch/sh64/mm/consistent.c | 53 | ||||
-rw-r--r-- | arch/sh64/mm/hugetlbpage.c | 105 | ||||
-rw-r--r-- | arch/sh64/mm/init.c | 189 |
4 files changed, 0 insertions, 391 deletions
diff --git a/arch/sh64/mm/Makefile b/arch/sh64/mm/Makefile deleted file mode 100644 index d0e813632480..000000000000 --- a/arch/sh64/mm/Makefile +++ /dev/null | |||
@@ -1,44 +0,0 @@ | |||
1 | # | ||
2 | # This file is subject to the terms and conditions of the GNU General Public | ||
3 | # License. See the file "COPYING" in the main directory of this archive | ||
4 | # for more details. | ||
5 | # | ||
6 | # Copyright (C) 2000, 2001 Paolo Alberelli | ||
7 | # Copyright (C) 2003, 2004 Paul Mundt | ||
8 | # | ||
9 | # Makefile for the sh64-specific parts of the Linux memory manager. | ||
10 | # | ||
11 | # Note! Dependencies are done automagically by 'make dep', which also | ||
12 | # removes any old dependencies. DON'T put your own dependencies here | ||
13 | # unless it's something special (ie not a .c file). | ||
14 | # | ||
15 | |||
16 | obj-y := cache.o consistent.o extable.o fault.o init.o ioremap.o \ | ||
17 | tlbmiss.o tlb.o | ||
18 | |||
19 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | ||
20 | |||
21 | # Special flags for tlbmiss.o. This puts restrictions on the number of | ||
22 | # caller-save registers that the compiler can target when building this file. | ||
23 | # This is required because the code is called from a context in entry.S where | ||
24 | # very few registers have been saved in the exception handler (for speed | ||
25 | # reasons). | ||
26 | # The caller save registers that have been saved and which can be used are | ||
27 | # r2,r3,r4,r5 : argument passing | ||
28 | # r15, r18 : SP and LINK | ||
29 | # tr0-4 : allow all caller-save TR's. The compiler seems to be able to make | ||
30 | # use of them, so it's probably beneficial to performance to save them | ||
31 | # and have them available for it. | ||
32 | # | ||
33 | # The resources not listed below are callee save, i.e. the compiler is free to | ||
34 | # use any of them and will spill them to the stack itself. | ||
35 | |||
36 | CFLAGS_tlbmiss.o += -ffixed-r7 \ | ||
37 | -ffixed-r8 -ffixed-r9 -ffixed-r10 -ffixed-r11 -ffixed-r12 \ | ||
38 | -ffixed-r13 -ffixed-r14 -ffixed-r16 -ffixed-r17 -ffixed-r19 \ | ||
39 | -ffixed-r20 -ffixed-r21 -ffixed-r22 -ffixed-r23 \ | ||
40 | -ffixed-r24 -ffixed-r25 -ffixed-r26 -ffixed-r27 \ | ||
41 | -ffixed-r36 -ffixed-r37 -ffixed-r38 -ffixed-r39 -ffixed-r40 \ | ||
42 | -ffixed-r41 -ffixed-r42 -ffixed-r43 \ | ||
43 | -ffixed-r60 -ffixed-r61 -ffixed-r62 \ | ||
44 | -fomit-frame-pointer | ||
diff --git a/arch/sh64/mm/consistent.c b/arch/sh64/mm/consistent.c deleted file mode 100644 index c439620402cb..000000000000 --- a/arch/sh64/mm/consistent.c +++ /dev/null | |||
@@ -1,53 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2001 David J. Mckay (david.mckay@st.com) | ||
3 | * Copyright (C) 2003 Paul Mundt (lethal@linux-sh.org) | ||
4 | * | ||
5 | * May be copied or modified under the terms of the GNU General Public | ||
6 | * License. See linux/COPYING for more information. | ||
7 | * | ||
8 | * Dynamic DMA mapping support. | ||
9 | */ | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/mm.h> | ||
12 | #include <linux/string.h> | ||
13 | #include <linux/pci.h> | ||
14 | #include <linux/dma-mapping.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <asm/io.h> | ||
17 | |||
18 | void *consistent_alloc(struct pci_dev *hwdev, size_t size, | ||
19 | dma_addr_t *dma_handle) | ||
20 | { | ||
21 | void *ret; | ||
22 | int gfp = GFP_ATOMIC; | ||
23 | void *vp; | ||
24 | |||
25 | if (hwdev == NULL || hwdev->dma_mask != 0xffffffff) | ||
26 | gfp |= GFP_DMA; | ||
27 | |||
28 | ret = (void *)__get_free_pages(gfp, get_order(size)); | ||
29 | |||
30 | /* now call our friend ioremap_nocache to give us an uncached area */ | ||
31 | vp = ioremap_nocache(virt_to_phys(ret), size); | ||
32 | |||
33 | if (vp != NULL) { | ||
34 | memset(vp, 0, size); | ||
35 | *dma_handle = virt_to_phys(ret); | ||
36 | dma_cache_sync(NULL, ret, size, DMA_BIDIRECTIONAL); | ||
37 | } | ||
38 | |||
39 | return vp; | ||
40 | } | ||
41 | EXPORT_SYMBOL(consistent_alloc); | ||
42 | |||
43 | void consistent_free(struct pci_dev *hwdev, size_t size, | ||
44 | void *vaddr, dma_addr_t dma_handle) | ||
45 | { | ||
46 | void *alloc; | ||
47 | |||
48 | alloc = phys_to_virt((unsigned long)dma_handle); | ||
49 | free_pages((unsigned long)alloc, get_order(size)); | ||
50 | |||
51 | iounmap(vaddr); | ||
52 | } | ||
53 | EXPORT_SYMBOL(consistent_free); | ||
diff --git a/arch/sh64/mm/hugetlbpage.c b/arch/sh64/mm/hugetlbpage.c deleted file mode 100644 index fa66daa2dfa9..000000000000 --- a/arch/sh64/mm/hugetlbpage.c +++ /dev/null | |||
@@ -1,105 +0,0 @@ | |||
1 | /* | ||
2 | * arch/sh64/mm/hugetlbpage.c | ||
3 | * | ||
4 | * SuperH HugeTLB page support. | ||
5 | * | ||
6 | * Cloned from sparc64 by Paul Mundt. | ||
7 | * | ||
8 | * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com) | ||
9 | */ | ||
10 | |||
11 | #include <linux/init.h> | ||
12 | #include <linux/fs.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/hugetlb.h> | ||
15 | #include <linux/pagemap.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/sysctl.h> | ||
18 | |||
19 | #include <asm/mman.h> | ||
20 | #include <asm/pgalloc.h> | ||
21 | #include <asm/tlb.h> | ||
22 | #include <asm/tlbflush.h> | ||
23 | #include <asm/cacheflush.h> | ||
24 | |||
25 | pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) | ||
26 | { | ||
27 | pgd_t *pgd; | ||
28 | pmd_t *pmd; | ||
29 | pte_t *pte = NULL; | ||
30 | |||
31 | pgd = pgd_offset(mm, addr); | ||
32 | if (pgd) { | ||
33 | pmd = pmd_alloc(mm, pgd, addr); | ||
34 | if (pmd) | ||
35 | pte = pte_alloc_map(mm, pmd, addr); | ||
36 | } | ||
37 | return pte; | ||
38 | } | ||
39 | |||
40 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | ||
41 | { | ||
42 | pgd_t *pgd; | ||
43 | pmd_t *pmd; | ||
44 | pte_t *pte = NULL; | ||
45 | |||
46 | pgd = pgd_offset(mm, addr); | ||
47 | if (pgd) { | ||
48 | pmd = pmd_offset(pgd, addr); | ||
49 | if (pmd) | ||
50 | pte = pte_offset_map(pmd, addr); | ||
51 | } | ||
52 | return pte; | ||
53 | } | ||
54 | |||
55 | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) | ||
56 | { | ||
57 | return 0; | ||
58 | } | ||
59 | |||
60 | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | ||
61 | pte_t *ptep, pte_t entry) | ||
62 | { | ||
63 | int i; | ||
64 | |||
65 | for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { | ||
66 | set_pte_at(mm, addr, ptep, entry); | ||
67 | ptep++; | ||
68 | addr += PAGE_SIZE; | ||
69 | pte_val(entry) += PAGE_SIZE; | ||
70 | } | ||
71 | } | ||
72 | |||
73 | pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, | ||
74 | pte_t *ptep) | ||
75 | { | ||
76 | pte_t entry; | ||
77 | int i; | ||
78 | |||
79 | entry = *ptep; | ||
80 | |||
81 | for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { | ||
82 | pte_clear(mm, addr, ptep); | ||
83 | addr += PAGE_SIZE; | ||
84 | ptep++; | ||
85 | } | ||
86 | |||
87 | return entry; | ||
88 | } | ||
89 | |||
90 | struct page *follow_huge_addr(struct mm_struct *mm, | ||
91 | unsigned long address, int write) | ||
92 | { | ||
93 | return ERR_PTR(-EINVAL); | ||
94 | } | ||
95 | |||
96 | int pmd_huge(pmd_t pmd) | ||
97 | { | ||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, | ||
102 | pmd_t *pmd, int write) | ||
103 | { | ||
104 | return NULL; | ||
105 | } | ||
diff --git a/arch/sh64/mm/init.c b/arch/sh64/mm/init.c deleted file mode 100644 index 21cf42de23e2..000000000000 --- a/arch/sh64/mm/init.c +++ /dev/null | |||
@@ -1,189 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * arch/sh64/mm/init.c | ||
7 | * | ||
8 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
9 | * Copyright (C) 2003, 2004 Paul Mundt | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <linux/init.h> | ||
14 | #include <linux/rwsem.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/swap.h> | ||
17 | #include <linux/bootmem.h> | ||
18 | |||
19 | #include <asm/mmu_context.h> | ||
20 | #include <asm/page.h> | ||
21 | #include <asm/pgalloc.h> | ||
22 | #include <asm/pgtable.h> | ||
23 | #include <asm/tlb.h> | ||
24 | |||
25 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
26 | |||
27 | /* | ||
28 | * Cache of MMU context last used. | ||
29 | */ | ||
30 | unsigned long mmu_context_cache; | ||
31 | pgd_t * mmu_pdtp_cache; | ||
32 | int after_bootmem = 0; | ||
33 | |||
34 | /* | ||
35 | * BAD_PAGE is the page that is used for page faults when linux | ||
36 | * is out-of-memory. Older versions of linux just did a | ||
37 | * do_exit(), but using this instead means there is less risk | ||
38 | * for a process dying in kernel mode, possibly leaving an inode | ||
39 | * unused etc.. | ||
40 | * | ||
41 | * BAD_PAGETABLE is the accompanying page-table: it is initialized | ||
42 | * to point to BAD_PAGE entries. | ||
43 | * | ||
44 | * ZERO_PAGE is a special page that is used for zero-initialized | ||
45 | * data and COW. | ||
46 | */ | ||
47 | |||
48 | extern unsigned char empty_zero_page[PAGE_SIZE]; | ||
49 | extern unsigned char empty_bad_page[PAGE_SIZE]; | ||
50 | extern pte_t empty_bad_pte_table[PTRS_PER_PTE]; | ||
51 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | ||
52 | |||
53 | extern char _text, _etext, _edata, __bss_start, _end; | ||
54 | extern char __init_begin, __init_end; | ||
55 | |||
56 | /* It'd be good if these lines were in the standard header file. */ | ||
57 | #define START_PFN (NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT) | ||
58 | #define MAX_LOW_PFN (NODE_DATA(0)->bdata->node_low_pfn) | ||
59 | |||
60 | |||
61 | void show_mem(void) | ||
62 | { | ||
63 | int i, total = 0, reserved = 0; | ||
64 | int shared = 0, cached = 0; | ||
65 | |||
66 | printk("Mem-info:\n"); | ||
67 | show_free_areas(); | ||
68 | printk("Free swap: %6ldkB\n",nr_swap_pages<<(PAGE_SHIFT-10)); | ||
69 | i = max_mapnr; | ||
70 | while (i-- > 0) { | ||
71 | total++; | ||
72 | if (PageReserved(mem_map+i)) | ||
73 | reserved++; | ||
74 | else if (PageSwapCache(mem_map+i)) | ||
75 | cached++; | ||
76 | else if (page_count(mem_map+i)) | ||
77 | shared += page_count(mem_map+i) - 1; | ||
78 | } | ||
79 | printk("%d pages of RAM\n",total); | ||
80 | printk("%d reserved pages\n",reserved); | ||
81 | printk("%d pages shared\n",shared); | ||
82 | printk("%d pages swap cached\n",cached); | ||
83 | printk("%ld pages in page table cache\n", quicklist_total_size()); | ||
84 | } | ||
85 | |||
86 | /* | ||
87 | * paging_init() sets up the page tables. | ||
88 | * | ||
89 | * head.S already did a lot to set up address translation for the kernel. | ||
90 | * Here we comes with: | ||
91 | * . MMU enabled | ||
92 | * . ASID set (SR) | ||
93 | * . some 512MB regions being mapped of which the most relevant here is: | ||
94 | * . CACHED segment (ASID 0 [irrelevant], shared AND NOT user) | ||
95 | * . possible variable length regions being mapped as: | ||
96 | * . UNCACHED segment (ASID 0 [irrelevant], shared AND NOT user) | ||
97 | * . All of the memory regions are placed, independently from the platform | ||
98 | * on high addresses, above 0x80000000. | ||
99 | * . swapper_pg_dir is already cleared out by the .space directive | ||
100 | * in any case swapper does not require a real page directory since | ||
101 | * it's all kernel contained. | ||
102 | * | ||
103 | * Those pesky NULL-reference errors in the kernel are then | ||
104 | * dealt with by not mapping address 0x00000000 at all. | ||
105 | * | ||
106 | */ | ||
107 | void __init paging_init(void) | ||
108 | { | ||
109 | unsigned long zones_size[MAX_NR_ZONES] = {0, }; | ||
110 | |||
111 | pgd_init((unsigned long)swapper_pg_dir); | ||
112 | pgd_init((unsigned long)swapper_pg_dir + | ||
113 | sizeof(pgd_t) * USER_PTRS_PER_PGD); | ||
114 | |||
115 | mmu_context_cache = MMU_CONTEXT_FIRST_VERSION; | ||
116 | |||
117 | zones_size[ZONE_NORMAL] = MAX_LOW_PFN - START_PFN; | ||
118 | NODE_DATA(0)->node_mem_map = NULL; | ||
119 | free_area_init_node(0, NODE_DATA(0), zones_size, __MEMORY_START >> PAGE_SHIFT, 0); | ||
120 | } | ||
121 | |||
122 | void __init mem_init(void) | ||
123 | { | ||
124 | int codesize, reservedpages, datasize, initsize; | ||
125 | int tmp; | ||
126 | |||
127 | max_mapnr = num_physpages = MAX_LOW_PFN - START_PFN; | ||
128 | high_memory = (void *)__va(MAX_LOW_PFN * PAGE_SIZE); | ||
129 | |||
130 | /* | ||
131 | * Clear the zero-page. | ||
132 | * This is not required but we might want to re-use | ||
133 | * this very page to pass boot parameters, one day. | ||
134 | */ | ||
135 | memset(empty_zero_page, 0, PAGE_SIZE); | ||
136 | |||
137 | /* this will put all low memory onto the freelists */ | ||
138 | totalram_pages += free_all_bootmem_node(NODE_DATA(0)); | ||
139 | reservedpages = 0; | ||
140 | for (tmp = 0; tmp < num_physpages; tmp++) | ||
141 | /* | ||
142 | * Only count reserved RAM pages | ||
143 | */ | ||
144 | if (PageReserved(mem_map+tmp)) | ||
145 | reservedpages++; | ||
146 | |||
147 | after_bootmem = 1; | ||
148 | |||
149 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | ||
150 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | ||
151 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | ||
152 | |||
153 | printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n", | ||
154 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | ||
155 | max_mapnr << (PAGE_SHIFT-10), | ||
156 | codesize >> 10, | ||
157 | reservedpages << (PAGE_SHIFT-10), | ||
158 | datasize >> 10, | ||
159 | initsize >> 10); | ||
160 | } | ||
161 | |||
162 | void free_initmem(void) | ||
163 | { | ||
164 | unsigned long addr; | ||
165 | |||
166 | addr = (unsigned long)(&__init_begin); | ||
167 | for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { | ||
168 | ClearPageReserved(virt_to_page(addr)); | ||
169 | init_page_count(virt_to_page(addr)); | ||
170 | free_page(addr); | ||
171 | totalram_pages++; | ||
172 | } | ||
173 | printk ("Freeing unused kernel memory: %ldk freed\n", (&__init_end - &__init_begin) >> 10); | ||
174 | } | ||
175 | |||
176 | #ifdef CONFIG_BLK_DEV_INITRD | ||
177 | void free_initrd_mem(unsigned long start, unsigned long end) | ||
178 | { | ||
179 | unsigned long p; | ||
180 | for (p = start; p < end; p += PAGE_SIZE) { | ||
181 | ClearPageReserved(virt_to_page(p)); | ||
182 | init_page_count(virt_to_page(p)); | ||
183 | free_page(p); | ||
184 | totalram_pages++; | ||
185 | } | ||
186 | printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | ||
187 | } | ||
188 | #endif | ||
189 | |||