diff options
author | Dave Airlie <airlied@redhat.com> | 2010-12-21 18:48:54 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-12-21 18:48:54 -0500 |
commit | ae09f09e94d755ed45c58b695675636c0ec53f9e (patch) | |
tree | 77cb9bac7d81f5b1250b8638a007e10c17b600af /mm/vmalloc.c | |
parent | 1d99e5c57255d188773fb437391df24fe8faf575 (diff) | |
parent | 5909a77ac62cc042f94bd262016cf468a2f96022 (diff) |
Merge remote branch 'intel/drm-intel-next' of /ssd/git/drm-next into drm-core-next
* 'intel/drm-intel-next' of /ssd/git/drm-next: (771 commits)
drm/i915: Undo "Uncouple render/power ctx before suspending"
drm/i915: Allow the application to choose the constant addressing mode
drm/i915: dynamic render p-state support for Sandy Bridge
drm/i915: Enable EI mode for RCx decision making on Sandybridge
drm/i915/sdvo: Border and stall select became test bits in gen5
drm/i915: Add Guess-o-matic for pageflip timestamping.
drm/i915: Add support for precise vblank timestamping (v2)
drm/i915: Add frame buffer compression on Sandybridge
drm/i915: Add self-refresh support on Sandybridge
drm/i915: Wait for vblank before unpinning old fb
Revert "drm/i915: Avoid using PIPE_CONTROL on Ironlake"
drm/i915: Pass clock limits down to PLL matcher
drm/i915: Poll for seqno completion if IRQ is disabled
drm/i915/ringbuffer: Make IRQ refcnting atomic
agp/intel: Fix missed cached memory flags setting in i965_write_entry()
drm/i915/sdvo: Only use the SDVO pin if it is in the valid range
drm/i915: Enable RC6 autodownclocking on Sandybridge
drm/i915: Terminate the FORCE WAKE after we have finished reading
drm/i915/gtt: Clear the cachelines upon resume
drm/i915: Restore GTT mapping first upon resume
...
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r-- | mm/vmalloc.c | 28 |
1 files changed, 17 insertions, 11 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index a3d66b3dc5cb..eb5cc7d00c5a 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -31,8 +31,6 @@ | |||
31 | #include <asm/tlbflush.h> | 31 | #include <asm/tlbflush.h> |
32 | #include <asm/shmparam.h> | 32 | #include <asm/shmparam.h> |
33 | 33 | ||
34 | bool vmap_lazy_unmap __read_mostly = true; | ||
35 | |||
36 | /*** Page table manipulation functions ***/ | 34 | /*** Page table manipulation functions ***/ |
37 | 35 | ||
38 | static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) | 36 | static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) |
@@ -503,9 +501,6 @@ static unsigned long lazy_max_pages(void) | |||
503 | { | 501 | { |
504 | unsigned int log; | 502 | unsigned int log; |
505 | 503 | ||
506 | if (!vmap_lazy_unmap) | ||
507 | return 0; | ||
508 | |||
509 | log = fls(num_online_cpus()); | 504 | log = fls(num_online_cpus()); |
510 | 505 | ||
511 | return log * (32UL * 1024 * 1024 / PAGE_SIZE); | 506 | return log * (32UL * 1024 * 1024 / PAGE_SIZE); |
@@ -566,7 +561,6 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, | |||
566 | if (va->va_end > *end) | 561 | if (va->va_end > *end) |
567 | *end = va->va_end; | 562 | *end = va->va_end; |
568 | nr += (va->va_end - va->va_start) >> PAGE_SHIFT; | 563 | nr += (va->va_end - va->va_start) >> PAGE_SHIFT; |
569 | unmap_vmap_area(va); | ||
570 | list_add_tail(&va->purge_list, &valist); | 564 | list_add_tail(&va->purge_list, &valist); |
571 | va->flags |= VM_LAZY_FREEING; | 565 | va->flags |= VM_LAZY_FREEING; |
572 | va->flags &= ~VM_LAZY_FREE; | 566 | va->flags &= ~VM_LAZY_FREE; |
@@ -611,10 +605,11 @@ static void purge_vmap_area_lazy(void) | |||
611 | } | 605 | } |
612 | 606 | ||
613 | /* | 607 | /* |
614 | * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been | 608 | * Free a vmap area, caller ensuring that the area has been unmapped |
615 | * called for the correct range previously. | 609 | * and flush_cache_vunmap had been called for the correct range |
610 | * previously. | ||
616 | */ | 611 | */ |
617 | static void free_unmap_vmap_area_noflush(struct vmap_area *va) | 612 | static void free_vmap_area_noflush(struct vmap_area *va) |
618 | { | 613 | { |
619 | va->flags |= VM_LAZY_FREE; | 614 | va->flags |= VM_LAZY_FREE; |
620 | atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); | 615 | atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); |
@@ -623,6 +618,16 @@ static void free_unmap_vmap_area_noflush(struct vmap_area *va) | |||
623 | } | 618 | } |
624 | 619 | ||
625 | /* | 620 | /* |
621 | * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been | ||
622 | * called for the correct range previously. | ||
623 | */ | ||
624 | static void free_unmap_vmap_area_noflush(struct vmap_area *va) | ||
625 | { | ||
626 | unmap_vmap_area(va); | ||
627 | free_vmap_area_noflush(va); | ||
628 | } | ||
629 | |||
630 | /* | ||
626 | * Free and unmap a vmap area | 631 | * Free and unmap a vmap area |
627 | */ | 632 | */ |
628 | static void free_unmap_vmap_area(struct vmap_area *va) | 633 | static void free_unmap_vmap_area(struct vmap_area *va) |
@@ -798,7 +803,7 @@ static void free_vmap_block(struct vmap_block *vb) | |||
798 | spin_unlock(&vmap_block_tree_lock); | 803 | spin_unlock(&vmap_block_tree_lock); |
799 | BUG_ON(tmp != vb); | 804 | BUG_ON(tmp != vb); |
800 | 805 | ||
801 | free_unmap_vmap_area_noflush(vb->va); | 806 | free_vmap_area_noflush(vb->va); |
802 | call_rcu(&vb->rcu_head, rcu_free_vb); | 807 | call_rcu(&vb->rcu_head, rcu_free_vb); |
803 | } | 808 | } |
804 | 809 | ||
@@ -936,6 +941,8 @@ static void vb_free(const void *addr, unsigned long size) | |||
936 | rcu_read_unlock(); | 941 | rcu_read_unlock(); |
937 | BUG_ON(!vb); | 942 | BUG_ON(!vb); |
938 | 943 | ||
944 | vunmap_page_range((unsigned long)addr, (unsigned long)addr + size); | ||
945 | |||
939 | spin_lock(&vb->lock); | 946 | spin_lock(&vb->lock); |
940 | BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order)); | 947 | BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order)); |
941 | 948 | ||
@@ -988,7 +995,6 @@ void vm_unmap_aliases(void) | |||
988 | 995 | ||
989 | s = vb->va->va_start + (i << PAGE_SHIFT); | 996 | s = vb->va->va_start + (i << PAGE_SHIFT); |
990 | e = vb->va->va_start + (j << PAGE_SHIFT); | 997 | e = vb->va->va_start + (j << PAGE_SHIFT); |
991 | vunmap_page_range(s, e); | ||
992 | flush = 1; | 998 | flush = 1; |
993 | 999 | ||
994 | if (s < start) | 1000 | if (s < start) |