aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh64/mm')
-rw-r--r--arch/sh64/mm/cache.c68
-rw-r--r--arch/sh64/mm/hugetlbpage.c188
-rw-r--r--arch/sh64/mm/ioremap.c4
3 files changed, 43 insertions, 217 deletions
diff --git a/arch/sh64/mm/cache.c b/arch/sh64/mm/cache.c
index 3b87e25ea773..c0c1b21350d8 100644
--- a/arch/sh64/mm/cache.c
+++ b/arch/sh64/mm/cache.c
@@ -584,32 +584,36 @@ static void sh64_dcache_purge_phy_page(unsigned long paddr)
584 } 584 }
585} 585}
586 586
587static void sh64_dcache_purge_user_page(struct mm_struct *mm, unsigned long eaddr) 587static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
588 unsigned long addr, unsigned long end)
588{ 589{
589 pgd_t *pgd; 590 pgd_t *pgd;
590 pmd_t *pmd; 591 pmd_t *pmd;
591 pte_t *pte; 592 pte_t *pte;
592 pte_t entry; 593 pte_t entry;
594 spinlock_t *ptl;
593 unsigned long paddr; 595 unsigned long paddr;
594 596
595 /* NOTE : all the callers of this have mm->page_table_lock held, so the 597 if (!mm)
596 following page table traversal is safe even on SMP/pre-emptible. */ 598 return; /* No way to find physical address of page */
597 599
598 if (!mm) return; /* No way to find physical address of page */ 600 pgd = pgd_offset(mm, addr);
599 pgd = pgd_offset(mm, eaddr); 601 if (pgd_bad(*pgd))
600 if (pgd_bad(*pgd)) return; 602 return;
601 603
602 pmd = pmd_offset(pgd, eaddr); 604 pmd = pmd_offset(pgd, addr);
603 if (pmd_none(*pmd) || pmd_bad(*pmd)) return; 605 if (pmd_none(*pmd) || pmd_bad(*pmd))
604 606 return;
605 pte = pte_offset_kernel(pmd, eaddr); 607
606 entry = *pte; 608 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
607 if (pte_none(entry) || !pte_present(entry)) return; 609 do {
608 610 entry = *pte;
609 paddr = pte_val(entry) & PAGE_MASK; 611 if (pte_none(entry) || !pte_present(entry))
610 612 continue;
611 sh64_dcache_purge_coloured_phy_page(paddr, eaddr); 613 paddr = pte_val(entry) & PAGE_MASK;
612 614 sh64_dcache_purge_coloured_phy_page(paddr, addr);
615 } while (pte++, addr += PAGE_SIZE, addr != end);
616 pte_unmap_unlock(pte - 1, ptl);
613} 617}
614/****************************************************************************/ 618/****************************************************************************/
615 619
@@ -668,7 +672,7 @@ static void sh64_dcache_purge_user_range(struct mm_struct *mm,
668 int n_pages; 672 int n_pages;
669 673
670 n_pages = ((end - start) >> PAGE_SHIFT); 674 n_pages = ((end - start) >> PAGE_SHIFT);
671 if (n_pages >= 64) { 675 if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) {
672#if 1 676#if 1
673 sh64_dcache_purge_all(); 677 sh64_dcache_purge_all();
674#else 678#else
@@ -707,20 +711,10 @@ static void sh64_dcache_purge_user_range(struct mm_struct *mm,
707 } 711 }
708#endif 712#endif
709 } else { 713 } else {
710 /* 'Small' range */ 714 /* Small range, covered by a single page table page */
711 unsigned long aligned_start; 715 start &= PAGE_MASK; /* should already be so */
712 unsigned long eaddr; 716 end = PAGE_ALIGN(end); /* should already be so */
713 unsigned long last_page_start; 717 sh64_dcache_purge_user_pages(mm, start, end);
714
715 aligned_start = start & PAGE_MASK;
716 /* 'end' is 1 byte beyond the end of the range */
717 last_page_start = (end - 1) & PAGE_MASK;
718
719 eaddr = aligned_start;
720 while (eaddr <= last_page_start) {
721 sh64_dcache_purge_user_page(mm, eaddr);
722 eaddr += PAGE_SIZE;
723 }
724 } 718 }
725 return; 719 return;
726} 720}
@@ -880,9 +874,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
880 addresses from the user address space specified by mm, after writing 874 addresses from the user address space specified by mm, after writing
881 back any dirty data. 875 back any dirty data.
882 876
883 Note(1), 'end' is 1 byte beyond the end of the range to flush. 877 Note, 'end' is 1 byte beyond the end of the range to flush. */
884
885 Note(2), this is called with mm->page_table_lock held.*/
886 878
887 sh64_dcache_purge_user_range(mm, start, end); 879 sh64_dcache_purge_user_range(mm, start, end);
888 sh64_icache_inv_user_page_range(mm, start, end); 880 sh64_icache_inv_user_page_range(mm, start, end);
@@ -898,7 +890,7 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr, unsigned
898 the I-cache must be searched too in case the page in question is 890 the I-cache must be searched too in case the page in question is
899 both writable and being executed from (e.g. stack trampolines.) 891 both writable and being executed from (e.g. stack trampolines.)
900 892
901 Note(1), this is called with mm->page_table_lock held. 893 Note, this is called with pte lock held.
902 */ 894 */
903 895
904 sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT); 896 sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);
diff --git a/arch/sh64/mm/hugetlbpage.c b/arch/sh64/mm/hugetlbpage.c
index dcd9c8a8baf8..ed6a505b3ee2 100644
--- a/arch/sh64/mm/hugetlbpage.c
+++ b/arch/sh64/mm/hugetlbpage.c
@@ -54,41 +54,31 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
54 return pte; 54 return pte;
55} 55}
56 56
57#define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0) 57void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
58 58 pte_t *ptep, pte_t entry)
59static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma,
60 struct page *page, pte_t * page_table, int write_access)
61{ 59{
62 unsigned long i; 60 int i;
63 pte_t entry;
64
65 add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE);
66
67 if (write_access)
68 entry = pte_mkwrite(pte_mkdirty(mk_pte(page,
69 vma->vm_page_prot)));
70 else
71 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
72 entry = pte_mkyoung(entry);
73 mk_pte_huge(entry);
74 61
75 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { 62 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
76 set_pte(page_table, entry); 63 set_pte_at(mm, addr, ptep, entry);
77 page_table++; 64 ptep++;
78 65 addr += PAGE_SIZE;
79 pte_val(entry) += PAGE_SIZE; 66 pte_val(entry) += PAGE_SIZE;
80 } 67 }
81} 68}
82 69
83pte_t huge_ptep_get_and_clear(pte_t *ptep) 70pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
71 pte_t *ptep)
84{ 72{
85 pte_t entry; 73 pte_t entry;
74 int i;
86 75
87 entry = *ptep; 76 entry = *ptep;
88 77
89 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { 78 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
90 pte_clear(pte); 79 pte_clear(mm, addr, ptep);
91 pte++; 80 addr += PAGE_SIZE;
81 ptep++;
92 } 82 }
93 83
94 return entry; 84 return entry;
@@ -106,79 +96,6 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
106 return 0; 96 return 0;
107} 97}
108 98
109int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
110 struct vm_area_struct *vma)
111{
112 pte_t *src_pte, *dst_pte, entry;
113 struct page *ptepage;
114 unsigned long addr = vma->vm_start;
115 unsigned long end = vma->vm_end;
116 int i;
117
118 while (addr < end) {
119 dst_pte = huge_pte_alloc(dst, addr);
120 if (!dst_pte)
121 goto nomem;
122 src_pte = huge_pte_offset(src, addr);
123 BUG_ON(!src_pte || pte_none(*src_pte));
124 entry = *src_pte;
125 ptepage = pte_page(entry);
126 get_page(ptepage);
127 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
128 set_pte(dst_pte, entry);
129 pte_val(entry) += PAGE_SIZE;
130 dst_pte++;
131 }
132 add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE);
133 addr += HPAGE_SIZE;
134 }
135 return 0;
136
137nomem:
138 return -ENOMEM;
139}
140
141int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
142 struct page **pages, struct vm_area_struct **vmas,
143 unsigned long *position, int *length, int i)
144{
145 unsigned long vaddr = *position;
146 int remainder = *length;
147
148 WARN_ON(!is_vm_hugetlb_page(vma));
149
150 while (vaddr < vma->vm_end && remainder) {
151 if (pages) {
152 pte_t *pte;
153 struct page *page;
154
155 pte = huge_pte_offset(mm, vaddr);
156
157 /* hugetlb should be locked, and hence, prefaulted */
158 BUG_ON(!pte || pte_none(*pte));
159
160 page = pte_page(*pte);
161
162 WARN_ON(!PageCompound(page));
163
164 get_page(page);
165 pages[i] = page;
166 }
167
168 if (vmas)
169 vmas[i] = vma;
170
171 vaddr += PAGE_SIZE;
172 --remainder;
173 ++i;
174 }
175
176 *length = remainder;
177 *position = vaddr;
178
179 return i;
180}
181
182struct page *follow_huge_addr(struct mm_struct *mm, 99struct page *follow_huge_addr(struct mm_struct *mm,
183 unsigned long address, int write) 100 unsigned long address, int write)
184{ 101{
@@ -195,84 +112,3 @@ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
195{ 112{
196 return NULL; 113 return NULL;
197} 114}
198
199void unmap_hugepage_range(struct vm_area_struct *vma,
200 unsigned long start, unsigned long end)
201{
202 struct mm_struct *mm = vma->vm_mm;
203 unsigned long address;
204 pte_t *pte;
205 struct page *page;
206 int i;
207
208 BUG_ON(start & (HPAGE_SIZE - 1));
209 BUG_ON(end & (HPAGE_SIZE - 1));
210
211 for (address = start; address < end; address += HPAGE_SIZE) {
212 pte = huge_pte_offset(mm, address);
213 BUG_ON(!pte);
214 if (pte_none(*pte))
215 continue;
216 page = pte_page(*pte);
217 put_page(page);
218 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
219 pte_clear(mm, address+(i*PAGE_SIZE), pte);
220 pte++;
221 }
222 }
223 add_mm_counter(mm, rss, -((end - start) >> PAGE_SHIFT));
224 flush_tlb_range(vma, start, end);
225}
226
227int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
228{
229 struct mm_struct *mm = current->mm;
230 unsigned long addr;
231 int ret = 0;
232
233 BUG_ON(vma->vm_start & ~HPAGE_MASK);
234 BUG_ON(vma->vm_end & ~HPAGE_MASK);
235
236 spin_lock(&mm->page_table_lock);
237 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
238 unsigned long idx;
239 pte_t *pte = huge_pte_alloc(mm, addr);
240 struct page *page;
241
242 if (!pte) {
243 ret = -ENOMEM;
244 goto out;
245 }
246 if (!pte_none(*pte))
247 continue;
248
249 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
250 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
251 page = find_get_page(mapping, idx);
252 if (!page) {
253 /* charge the fs quota first */
254 if (hugetlb_get_quota(mapping)) {
255 ret = -ENOMEM;
256 goto out;
257 }
258 page = alloc_huge_page();
259 if (!page) {
260 hugetlb_put_quota(mapping);
261 ret = -ENOMEM;
262 goto out;
263 }
264 ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
265 if (! ret) {
266 unlock_page(page);
267 } else {
268 hugetlb_put_quota(mapping);
269 free_huge_page(page);
270 goto out;
271 }
272 }
273 set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
274 }
275out:
276 spin_unlock(&mm->page_table_lock);
277 return ret;
278}
diff --git a/arch/sh64/mm/ioremap.c b/arch/sh64/mm/ioremap.c
index f4003da556bc..fb1866fa2c9d 100644
--- a/arch/sh64/mm/ioremap.c
+++ b/arch/sh64/mm/ioremap.c
@@ -79,7 +79,7 @@ static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned lo
79 BUG(); 79 BUG();
80 80
81 do { 81 do {
82 pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address); 82 pte_t * pte = pte_alloc_kernel(pmd, address);
83 if (!pte) 83 if (!pte)
84 return -ENOMEM; 84 return -ENOMEM;
85 remap_area_pte(pte, address, end - address, address + phys_addr, flags); 85 remap_area_pte(pte, address, end - address, address + phys_addr, flags);
@@ -101,7 +101,6 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr,
101 flush_cache_all(); 101 flush_cache_all();
102 if (address >= end) 102 if (address >= end)
103 BUG(); 103 BUG();
104 spin_lock(&init_mm.page_table_lock);
105 do { 104 do {
106 pmd_t *pmd = pmd_alloc(&init_mm, dir, address); 105 pmd_t *pmd = pmd_alloc(&init_mm, dir, address);
107 error = -ENOMEM; 106 error = -ENOMEM;
@@ -115,7 +114,6 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr,
115 address = (address + PGDIR_SIZE) & PGDIR_MASK; 114 address = (address + PGDIR_SIZE) & PGDIR_MASK;
116 dir++; 115 dir++;
117 } while (address && (address < end)); 116 } while (address && (address < end));
118 spin_unlock(&init_mm.page_table_lock);
119 flush_tlb_all(); 117 flush_tlb_all();
120 return 0; 118 return 0;
121} 119}