aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/nommu.c8
-rw-r--r--mm/vmalloc.c16
2 files changed, 12 insertions, 12 deletions
diff --git a/mm/nommu.c b/mm/nommu.c
index b989cb928a7c..f3bfd015c40b 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -167,7 +167,7 @@ EXPORT_SYMBOL(get_user_pages);
167DEFINE_RWLOCK(vmlist_lock); 167DEFINE_RWLOCK(vmlist_lock);
168struct vm_struct *vmlist; 168struct vm_struct *vmlist;
169 169
170void vfree(void *addr) 170void vfree(const void *addr)
171{ 171{
172 kfree(addr); 172 kfree(addr);
173} 173}
@@ -183,13 +183,13 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
183} 183}
184EXPORT_SYMBOL(__vmalloc); 184EXPORT_SYMBOL(__vmalloc);
185 185
186struct page * vmalloc_to_page(void *addr) 186struct page *vmalloc_to_page(const void *addr)
187{ 187{
188 return virt_to_page(addr); 188 return virt_to_page(addr);
189} 189}
190EXPORT_SYMBOL(vmalloc_to_page); 190EXPORT_SYMBOL(vmalloc_to_page);
191 191
192unsigned long vmalloc_to_pfn(void *addr) 192unsigned long vmalloc_to_pfn(const void *addr)
193{ 193{
194 return page_to_pfn(virt_to_page(addr)); 194 return page_to_pfn(virt_to_page(addr));
195} 195}
@@ -267,7 +267,7 @@ void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_
267} 267}
268EXPORT_SYMBOL(vmap); 268EXPORT_SYMBOL(vmap);
269 269
270void vunmap(void *addr) 270void vunmap(const void *addr)
271{ 271{
272 BUG(); 272 BUG();
273} 273}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index e4c59a30835b..21abac2c3941 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -169,7 +169,7 @@ EXPORT_SYMBOL_GPL(map_vm_area);
169/* 169/*
170 * Map a vmalloc()-space virtual address to the physical page. 170 * Map a vmalloc()-space virtual address to the physical page.
171 */ 171 */
172struct page *vmalloc_to_page(void *vmalloc_addr) 172struct page *vmalloc_to_page(const void *vmalloc_addr)
173{ 173{
174 unsigned long addr = (unsigned long) vmalloc_addr; 174 unsigned long addr = (unsigned long) vmalloc_addr;
175 struct page *page = NULL; 175 struct page *page = NULL;
@@ -198,7 +198,7 @@ EXPORT_SYMBOL(vmalloc_to_page);
198/* 198/*
199 * Map a vmalloc()-space virtual address to the physical page frame number. 199 * Map a vmalloc()-space virtual address to the physical page frame number.
200 */ 200 */
201unsigned long vmalloc_to_pfn(void *vmalloc_addr) 201unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
202{ 202{
203 return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 203 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
204} 204}
@@ -306,7 +306,7 @@ struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
306} 306}
307 307
308/* Caller must hold vmlist_lock */ 308/* Caller must hold vmlist_lock */
309static struct vm_struct *__find_vm_area(void *addr) 309static struct vm_struct *__find_vm_area(const void *addr)
310{ 310{
311 struct vm_struct *tmp; 311 struct vm_struct *tmp;
312 312
@@ -319,7 +319,7 @@ static struct vm_struct *__find_vm_area(void *addr)
319} 319}
320 320
321/* Caller must hold vmlist_lock */ 321/* Caller must hold vmlist_lock */
322static struct vm_struct *__remove_vm_area(void *addr) 322static struct vm_struct *__remove_vm_area(const void *addr)
323{ 323{
324 struct vm_struct **p, *tmp; 324 struct vm_struct **p, *tmp;
325 325
@@ -348,7 +348,7 @@ found:
348 * This function returns the found VM area, but using it is NOT safe 348 * This function returns the found VM area, but using it is NOT safe
349 * on SMP machines, except for its size or flags. 349 * on SMP machines, except for its size or flags.
350 */ 350 */
351struct vm_struct *remove_vm_area(void *addr) 351struct vm_struct *remove_vm_area(const void *addr)
352{ 352{
353 struct vm_struct *v; 353 struct vm_struct *v;
354 write_lock(&vmlist_lock); 354 write_lock(&vmlist_lock);
@@ -357,7 +357,7 @@ struct vm_struct *remove_vm_area(void *addr)
357 return v; 357 return v;
358} 358}
359 359
360static void __vunmap(void *addr, int deallocate_pages) 360static void __vunmap(const void *addr, int deallocate_pages)
361{ 361{
362 struct vm_struct *area; 362 struct vm_struct *area;
363 363
@@ -408,7 +408,7 @@ static void __vunmap(void *addr, int deallocate_pages)
408 * 408 *
409 * Must not be called in interrupt context. 409 * Must not be called in interrupt context.
410 */ 410 */
411void vfree(void *addr) 411void vfree(const void *addr)
412{ 412{
413 BUG_ON(in_interrupt()); 413 BUG_ON(in_interrupt());
414 __vunmap(addr, 1); 414 __vunmap(addr, 1);
@@ -424,7 +424,7 @@ EXPORT_SYMBOL(vfree);
424 * 424 *
425 * Must not be called in interrupt context. 425 * Must not be called in interrupt context.
426 */ 426 */
427void vunmap(void *addr) 427void vunmap(const void *addr)
428{ 428{
429 BUG_ON(in_interrupt()); 429 BUG_ON(in_interrupt());
430 __vunmap(addr, 0); 430 __vunmap(addr, 0);