diff options
| -rw-r--r-- | arch/ia64/kernel/sys_ia64.c | 37 | ||||
| -rw-r--r-- | arch/ia64/mm/hugetlbpage.c | 20 |
2 files changed, 21 insertions, 36 deletions
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c index d9439ef2f661..41e33f84c185 100644 --- a/arch/ia64/kernel/sys_ia64.c +++ b/arch/ia64/kernel/sys_ia64.c | |||
| @@ -25,9 +25,9 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len | |||
| 25 | unsigned long pgoff, unsigned long flags) | 25 | unsigned long pgoff, unsigned long flags) |
| 26 | { | 26 | { |
| 27 | long map_shared = (flags & MAP_SHARED); | 27 | long map_shared = (flags & MAP_SHARED); |
| 28 | unsigned long start_addr, align_mask = PAGE_SIZE - 1; | 28 | unsigned long align_mask = 0; |
| 29 | struct mm_struct *mm = current->mm; | 29 | struct mm_struct *mm = current->mm; |
| 30 | struct vm_area_struct *vma; | 30 | struct vm_unmapped_area_info info; |
| 31 | 31 | ||
| 32 | if (len > RGN_MAP_LIMIT) | 32 | if (len > RGN_MAP_LIMIT) |
| 33 | return -ENOMEM; | 33 | return -ENOMEM; |
| @@ -44,7 +44,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len | |||
| 44 | addr = 0; | 44 | addr = 0; |
| 45 | #endif | 45 | #endif |
| 46 | if (!addr) | 46 | if (!addr) |
| 47 | addr = mm->free_area_cache; | 47 | addr = TASK_UNMAPPED_BASE; |
| 48 | 48 | ||
| 49 | if (map_shared && (TASK_SIZE > 0xfffffffful)) | 49 | if (map_shared && (TASK_SIZE > 0xfffffffful)) |
| 50 | /* | 50 | /* |
| @@ -53,28 +53,15 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len | |||
| 53 | * tasks, we prefer to avoid exhausting the address space too quickly by | 53 | * tasks, we prefer to avoid exhausting the address space too quickly by |
| 54 | * limiting alignment to a single page. | 54 | * limiting alignment to a single page. |
| 55 | */ | 55 | */ |
| 56 | align_mask = SHMLBA - 1; | 56 | align_mask = PAGE_MASK & (SHMLBA - 1); |
| 57 | 57 | ||
| 58 | full_search: | 58 | info.flags = 0; |
| 59 | start_addr = addr = (addr + align_mask) & ~align_mask; | 59 | info.length = len; |
| 60 | 60 | info.low_limit = addr; | |
| 61 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { | 61 | info.high_limit = TASK_SIZE; |
| 62 | /* At this point: (!vma || addr < vma->vm_end). */ | 62 | info.align_mask = align_mask; |
| 63 | if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) { | 63 | info.align_offset = 0; |
| 64 | if (start_addr != TASK_UNMAPPED_BASE) { | 64 | return vm_unmapped_area(&info); |
| 65 | /* Start a new search --- just in case we missed some holes. */ | ||
| 66 | addr = TASK_UNMAPPED_BASE; | ||
| 67 | goto full_search; | ||
| 68 | } | ||
| 69 | return -ENOMEM; | ||
| 70 | } | ||
| 71 | if (!vma || addr + len <= vma->vm_start) { | ||
| 72 | /* Remember the address where we stopped this search: */ | ||
| 73 | mm->free_area_cache = addr + len; | ||
| 74 | return addr; | ||
| 75 | } | ||
| 76 | addr = (vma->vm_end + align_mask) & ~align_mask; | ||
| 77 | } | ||
| 78 | } | 65 | } |
| 79 | 66 | ||
| 80 | asmlinkage long | 67 | asmlinkage long |
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index 5ca674b74737..76069c18ee42 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c | |||
| @@ -148,7 +148,7 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, | |||
| 148 | unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, | 148 | unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, |
| 149 | unsigned long pgoff, unsigned long flags) | 149 | unsigned long pgoff, unsigned long flags) |
| 150 | { | 150 | { |
| 151 | struct vm_area_struct *vmm; | 151 | struct vm_unmapped_area_info info; |
| 152 | 152 | ||
| 153 | if (len > RGN_MAP_LIMIT) | 153 | if (len > RGN_MAP_LIMIT) |
| 154 | return -ENOMEM; | 154 | return -ENOMEM; |
| @@ -165,16 +165,14 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u | |||
| 165 | /* This code assumes that RGN_HPAGE != 0. */ | 165 | /* This code assumes that RGN_HPAGE != 0. */ |
| 166 | if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1))) | 166 | if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1))) |
| 167 | addr = HPAGE_REGION_BASE; | 167 | addr = HPAGE_REGION_BASE; |
| 168 | else | 168 | |
| 169 | addr = ALIGN(addr, HPAGE_SIZE); | 169 | info.flags = 0; |
| 170 | for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { | 170 | info.length = len; |
| 171 | /* At this point: (!vmm || addr < vmm->vm_end). */ | 171 | info.low_limit = addr; |
| 172 | if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT) | 172 | info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT; |
| 173 | return -ENOMEM; | 173 | info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1); |
| 174 | if (!vmm || (addr + len) <= vmm->vm_start) | 174 | info.align_offset = 0; |
| 175 | return addr; | 175 | return vm_unmapped_area(&info); |
| 176 | addr = ALIGN(vmm->vm_end, HPAGE_SIZE); | ||
| 177 | } | ||
| 178 | } | 176 | } |
| 179 | 177 | ||
| 180 | static int __init hugetlb_setup_sz(char *str) | 178 | static int __init hugetlb_setup_sz(char *str) |
