aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2009-11-20 13:19:52 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-11-24 12:41:36 -0500
commitacaac256b3a14a09ab278409a72d119f2d75b02b (patch)
treef9e3c66fddc3e40e86d4b03fec166101da1fdb04 /arch/arm/mm
parent31ebf94435f74294523683867fe0b89000e61521 (diff)
ARM: dma-mapping: get rid of setting/clearing the reserved page bit
It's unnecessary; x86 doesn't do it, and ALSA doesn't require it anymore. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> Acked-by: Greg Ungerer <gerg@uclinux.org>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/dma-mapping.c23
1 files changed, 3 insertions, 20 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 707d81247630..6fac793329c6 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -209,10 +209,6 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
209 do { 209 do {
210 BUG_ON(!pte_none(*pte)); 210 BUG_ON(!pte_none(*pte));
211 211
212 /*
213 * x86 does not mark the pages reserved...
214 */
215 SetPageReserved(page);
216 set_pte_ext(pte, mk_pte(page, prot), 0); 212 set_pte_ext(pte, mk_pte(page, prot), 0);
217 page++; 213 page++;
218 pte++; 214 pte++;
@@ -257,7 +253,6 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
257 addr = c->vm_start; 253 addr = c->vm_start;
258 do { 254 do {
259 pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); 255 pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
260 unsigned long pfn;
261 256
262 ptep++; 257 ptep++;
263 addr += PAGE_SIZE; 258 addr += PAGE_SIZE;
@@ -267,21 +262,9 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
267 ptep = consistent_pte[++idx]; 262 ptep = consistent_pte[++idx];
268 } 263 }
269 264
270 if (!pte_none(pte) && pte_present(pte)) { 265 if (pte_none(pte) || !pte_present(pte))
271 pfn = pte_pfn(pte); 266 printk(KERN_CRIT "%s: bad page in kernel page table\n",
272 267 __func__);
273 if (pfn_valid(pfn)) {
274 struct page *page = pfn_to_page(pfn);
275
276 /*
277 * x86 does not mark the pages reserved...
278 */
279 ClearPageReserved(page);
280 continue;
281 }
282 }
283 printk(KERN_CRIT "%s: bad page in kernel page table\n",
284 __func__);
285 } while (size -= PAGE_SIZE); 268 } while (size -= PAGE_SIZE);
286 269
287 flush_tlb_kernel_range(c->vm_start, c->vm_end); 270 flush_tlb_kernel_range(c->vm_start, c->vm_end);