aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/cachetlb.txt2
-rw-r--r--arch/powerpc/mm/imalloc.c3
-rw-r--r--arch/powerpc/mm/pgtable_64.c1
-rw-r--r--include/linux/vmalloc.h3
-rw-r--r--mm/vmalloc.c13
5 files changed, 14 insertions, 8 deletions
diff --git a/Documentation/cachetlb.txt b/Documentation/cachetlb.txt
index debf6813934a..866b76139420 100644
--- a/Documentation/cachetlb.txt
+++ b/Documentation/cachetlb.txt
@@ -253,7 +253,7 @@ Here are the routines, one by one:
253 253
254 The first of these two routines is invoked after map_vm_area() 254 The first of these two routines is invoked after map_vm_area()
255 has installed the page table entries. The second is invoked 255 has installed the page table entries. The second is invoked
256 before unmap_vm_area() deletes the page table entries. 256 before unmap_kernel_range() deletes the page table entries.
257 257
258There exists another whole class of cpu cache issues which currently 258There exists another whole class of cpu cache issues which currently
259require a whole different set of interfaces to handle properly. 259require a whole different set of interfaces to handle properly.
diff --git a/arch/powerpc/mm/imalloc.c b/arch/powerpc/mm/imalloc.c
index c831815c31f0..9eddf37303d7 100644
--- a/arch/powerpc/mm/imalloc.c
+++ b/arch/powerpc/mm/imalloc.c
@@ -301,7 +301,8 @@ void im_free(void * addr)
301 for (p = &imlist ; (tmp = *p) ; p = &tmp->next) { 301 for (p = &imlist ; (tmp = *p) ; p = &tmp->next) {
302 if (tmp->addr == addr) { 302 if (tmp->addr == addr) {
303 *p = tmp->next; 303 *p = tmp->next;
304 unmap_vm_area(tmp); 304 unmap_kernel_range((unsigned long)tmp->addr,
305 tmp->size);
305 kfree(tmp); 306 kfree(tmp);
306 mutex_unlock(&imlist_mutex); 307 mutex_unlock(&imlist_mutex);
307 return; 308 return;
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index ad6e135bf212..fa5c828d3876 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -240,7 +240,6 @@ int __ioremap_explicit(phys_addr_t pa, unsigned long ea,
240/* 240/*
241 * Unmap an IO region and remove it from imalloc'd list. 241 * Unmap an IO region and remove it from imalloc'd list.
242 * Access to IO memory should be serialized by driver. 242 * Access to IO memory should be serialized by driver.
243 * This code is modeled after vmalloc code - unmap_vm_area()
244 * 243 *
245 * XXX what about calls before mem_init_done (ie python_countermeasures()) 244 * XXX what about calls before mem_init_done (ie python_countermeasures())
246 */ 245 */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 4b7ee83787c1..132b260aef1e 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -65,9 +65,10 @@ extern struct vm_struct *get_vm_area_node(unsigned long size,
65 unsigned long flags, int node, 65 unsigned long flags, int node,
66 gfp_t gfp_mask); 66 gfp_t gfp_mask);
67extern struct vm_struct *remove_vm_area(void *addr); 67extern struct vm_struct *remove_vm_area(void *addr);
68
68extern int map_vm_area(struct vm_struct *area, pgprot_t prot, 69extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
69 struct page ***pages); 70 struct page ***pages);
70extern void unmap_vm_area(struct vm_struct *area); 71extern void unmap_kernel_range(unsigned long addr, unsigned long size);
71 72
72/* 73/*
73 * Internals. Dont't use.. 74 * Internals. Dont't use..
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index d3a9c5368257..ddf87145cc49 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -68,12 +68,12 @@ static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr,
68 } while (pud++, addr = next, addr != end); 68 } while (pud++, addr = next, addr != end);
69} 69}
70 70
71void unmap_vm_area(struct vm_struct *area) 71void unmap_kernel_range(unsigned long addr, unsigned long size)
72{ 72{
73 pgd_t *pgd; 73 pgd_t *pgd;
74 unsigned long next; 74 unsigned long next;
75 unsigned long addr = (unsigned long) area->addr; 75 unsigned long start = addr;
76 unsigned long end = addr + area->size; 76 unsigned long end = addr + size;
77 77
78 BUG_ON(addr >= end); 78 BUG_ON(addr >= end);
79 pgd = pgd_offset_k(addr); 79 pgd = pgd_offset_k(addr);
@@ -84,7 +84,12 @@ void unmap_vm_area(struct vm_struct *area)
84 continue; 84 continue;
85 vunmap_pud_range(pgd, addr, next); 85 vunmap_pud_range(pgd, addr, next);
86 } while (pgd++, addr = next, addr != end); 86 } while (pgd++, addr = next, addr != end);
87 flush_tlb_kernel_range((unsigned long) area->addr, end); 87 flush_tlb_kernel_range(start, end);
88}
89
90static void unmap_vm_area(struct vm_struct *area)
91{
92 unmap_kernel_range((unsigned long)area->addr, area->size);
88} 93}
89 94
90static int vmap_pte_range(pmd_t *pmd, unsigned long addr, 95static int vmap_pte_range(pmd_t *pmd, unsigned long addr,