diff options
author | David S. Miller <davem@davemloft.net> | 2005-09-02 00:51:26 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2005-09-02 00:51:26 -0400 |
commit | a7a6cac204147634aba7487e4d618b028ff54c0d (patch) | |
tree | 8c68a738984d823a830e95c72fd3df4d6c4dc6f6 /arch/sparc | |
parent | 8a36895c0ddac143b7f0e87d46153f4f75d9fff7 (diff) |
[SPARC]: Kill io_remap_page_range()
It's been deprecated long enough and there are no in-tree
users any longer.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc')
-rw-r--r-- | arch/sparc/kernel/sparc_ksyms.c | 1 | ||||
-rw-r--r-- | arch/sparc/mm/generic.c | 57 |
2 files changed, 0 insertions, 58 deletions
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c index 8faa8dc4de43..5d974a2b735a 100644 --- a/arch/sparc/kernel/sparc_ksyms.c +++ b/arch/sparc/kernel/sparc_ksyms.c | |||
@@ -175,7 +175,6 @@ EXPORT_SYMBOL(set_auxio); | |||
175 | EXPORT_SYMBOL(get_auxio); | 175 | EXPORT_SYMBOL(get_auxio); |
176 | #endif | 176 | #endif |
177 | EXPORT_SYMBOL(request_fast_irq); | 177 | EXPORT_SYMBOL(request_fast_irq); |
178 | EXPORT_SYMBOL(io_remap_page_range); | ||
179 | EXPORT_SYMBOL(io_remap_pfn_range); | 178 | EXPORT_SYMBOL(io_remap_pfn_range); |
180 | /* P3: iounit_xxx may be needed, sun4d users */ | 179 | /* P3: iounit_xxx may be needed, sun4d users */ |
181 | /* EXPORT_SYMBOL(iounit_map_dma_init); */ | 180 | /* EXPORT_SYMBOL(iounit_map_dma_init); */ |
diff --git a/arch/sparc/mm/generic.c b/arch/sparc/mm/generic.c index db27eee3bda1..20ccb957fb77 100644 --- a/arch/sparc/mm/generic.c +++ b/arch/sparc/mm/generic.c | |||
@@ -16,31 +16,6 @@ | |||
16 | #include <asm/cacheflush.h> | 16 | #include <asm/cacheflush.h> |
17 | #include <asm/tlbflush.h> | 17 | #include <asm/tlbflush.h> |
18 | 18 | ||
19 | static inline void forget_pte(pte_t page) | ||
20 | { | ||
21 | #if 0 /* old 2.4 code */ | ||
22 | if (pte_none(page)) | ||
23 | return; | ||
24 | if (pte_present(page)) { | ||
25 | unsigned long pfn = pte_pfn(page); | ||
26 | struct page *ptpage; | ||
27 | if (!pfn_valid(pfn)) | ||
28 | return; | ||
29 | ptpage = pfn_to_page(pfn); | ||
30 | if (PageReserved(ptpage)) | ||
31 | return; | ||
32 | page_cache_release(ptpage); | ||
33 | return; | ||
34 | } | ||
35 | swap_free(pte_to_swp_entry(page)); | ||
36 | #else | ||
37 | if (!pte_none(page)) { | ||
38 | printk("forget_pte: old mapping existed!\n"); | ||
39 | BUG(); | ||
40 | } | ||
41 | #endif | ||
42 | } | ||
43 | |||
44 | /* Remap IO memory, the same way as remap_pfn_range(), but use | 19 | /* Remap IO memory, the same way as remap_pfn_range(), but use |
45 | * the obio memory space. | 20 | * the obio memory space. |
46 | * | 21 | * |
@@ -60,7 +35,6 @@ static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, unsigne | |||
60 | pte_t oldpage = *pte; | 35 | pte_t oldpage = *pte; |
61 | pte_clear(mm, address, pte); | 36 | pte_clear(mm, address, pte); |
62 | set_pte(pte, mk_pte_io(offset, prot, space)); | 37 | set_pte(pte, mk_pte_io(offset, prot, space)); |
63 | forget_pte(oldpage); | ||
64 | address += PAGE_SIZE; | 38 | address += PAGE_SIZE; |
65 | offset += PAGE_SIZE; | 39 | offset += PAGE_SIZE; |
66 | pte++; | 40 | pte++; |
@@ -88,37 +62,6 @@ static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned | |||
88 | return 0; | 62 | return 0; |
89 | } | 63 | } |
90 | 64 | ||
91 | int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long offset, unsigned long size, pgprot_t prot, int space) | ||
92 | { | ||
93 | int error = 0; | ||
94 | pgd_t * dir; | ||
95 | unsigned long beg = from; | ||
96 | unsigned long end = from + size; | ||
97 | struct mm_struct *mm = vma->vm_mm; | ||
98 | |||
99 | prot = __pgprot(pg_iobits); | ||
100 | offset -= from; | ||
101 | dir = pgd_offset(mm, from); | ||
102 | flush_cache_range(vma, beg, end); | ||
103 | |||
104 | spin_lock(&mm->page_table_lock); | ||
105 | while (from < end) { | ||
106 | pmd_t *pmd = pmd_alloc(current->mm, dir, from); | ||
107 | error = -ENOMEM; | ||
108 | if (!pmd) | ||
109 | break; | ||
110 | error = io_remap_pmd_range(mm, pmd, from, end - from, offset + from, prot, space); | ||
111 | if (error) | ||
112 | break; | ||
113 | from = (from + PGDIR_SIZE) & PGDIR_MASK; | ||
114 | dir++; | ||
115 | } | ||
116 | spin_unlock(&mm->page_table_lock); | ||
117 | |||
118 | flush_tlb_range(vma, beg, end); | ||
119 | return error; | ||
120 | } | ||
121 | |||
122 | int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, | 65 | int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, |
123 | unsigned long pfn, unsigned long size, pgprot_t prot) | 66 | unsigned long pfn, unsigned long size, pgprot_t prot) |
124 | { | 67 | { |