aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-11 21:05:37 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-11 21:05:37 -0500
commit608ff1a210ab0e8b969399039bf8e18693605910 (patch)
treefaea7bb1764461c73d0953089bd5439d91733a03 /arch/mips
parent414a6750e59b0b687034764c464e9ddecac0f7a6 (diff)
parent74d42d8fe146e870c52bde3b1c692f86cc8ff844 (diff)
Merge branch 'akpm' (Andrew's patchbomb)
Merge misc updates from Andrew Morton: "About half of most of MM. Going very early this time due to uncertainty over the coreautounifiednumasched things. I'll send the other half of most of MM tomorrow. The rest of MM awaits a slab merge from Pekka." * emailed patches from Andrew Morton: (71 commits) memory_hotplug: ensure every online node has NORMAL memory memory_hotplug: handle empty zone when online_movable/online_kernel mm, memory-hotplug: dynamic configure movable memory and portion memory drivers/base/node.c: cleanup node_state_attr[] bootmem: fix wrong call parameter for free_bootmem() avr32, kconfig: remove HAVE_ARCH_BOOTMEM mm: cma: remove watermark hacks mm: cma: skip watermarks check for already isolated blocks in split_free_page() mm, oom: fix race when specifying a thread as the oom origin mm, oom: change type of oom_score_adj to short mm: cleanup register_node() mm, mempolicy: remove duplicate code mm/vmscan.c: try_to_freeze() returns boolean mm: introduce putback_movable_pages() virtio_balloon: introduce migration primitives to balloon pages mm: introduce compaction and migration for ballooned pages mm: introduce a common interface for balloon pages mobility mm: redefine address_space.assoc_mapping mm: adjust address_space_operations.migratepage() return code arch/sparc/kernel/sys_sparc_64.c: s/COLOUR/COLOR/ ...
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/include/uapi/asm/mman.h11
-rw-r--r--arch/mips/mm/mmap.c111
2 files changed, 28 insertions, 94 deletions
diff --git a/arch/mips/include/uapi/asm/mman.h b/arch/mips/include/uapi/asm/mman.h
index 46d3da0d4b92..9a936ac9a942 100644
--- a/arch/mips/include/uapi/asm/mman.h
+++ b/arch/mips/include/uapi/asm/mman.h
@@ -87,4 +87,15 @@
87/* compatibility flags */ 87/* compatibility flags */
88#define MAP_FILE 0 88#define MAP_FILE 0
89 89
90/*
91 * When MAP_HUGETLB is set bits [26:31] encode the log2 of the huge page size.
92 * This gives us 6 bits, which is enough until someone invents 128 bit address
93 * spaces.
94 *
95 * Assume these are all power of twos.
96 * When 0 use the default page size.
97 */
98#define MAP_HUGE_SHIFT 26
99#define MAP_HUGE_MASK 0x3f
100
90#endif /* _ASM_MMAN_H */ 101#endif /* _ASM_MMAN_H */
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index 302d779d5b0d..d9be7540a6be 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -45,18 +45,6 @@ static unsigned long mmap_base(unsigned long rnd)
45 return PAGE_ALIGN(TASK_SIZE - gap - rnd); 45 return PAGE_ALIGN(TASK_SIZE - gap - rnd);
46} 46}
47 47
48static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
49 unsigned long pgoff)
50{
51 unsigned long base = addr & ~shm_align_mask;
52 unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
53
54 if (base + off <= addr)
55 return base + off;
56
57 return base - off;
58}
59
60#define COLOUR_ALIGN(addr, pgoff) \ 48#define COLOUR_ALIGN(addr, pgoff) \
61 ((((addr) + shm_align_mask) & ~shm_align_mask) + \ 49 ((((addr) + shm_align_mask) & ~shm_align_mask) + \
62 (((pgoff) << PAGE_SHIFT) & shm_align_mask)) 50 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
@@ -71,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
71 struct vm_area_struct *vma; 59 struct vm_area_struct *vma;
72 unsigned long addr = addr0; 60 unsigned long addr = addr0;
73 int do_color_align; 61 int do_color_align;
62 struct vm_unmapped_area_info info;
74 63
75 if (unlikely(len > TASK_SIZE)) 64 if (unlikely(len > TASK_SIZE))
76 return -ENOMEM; 65 return -ENOMEM;
@@ -107,97 +96,31 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
107 return addr; 96 return addr;
108 } 97 }
109 98
110 if (dir == UP) { 99 info.length = len;
111 addr = mm->mmap_base; 100 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
112 if (do_color_align) 101 info.align_offset = pgoff << PAGE_SHIFT;
113 addr = COLOUR_ALIGN(addr, pgoff);
114 else
115 addr = PAGE_ALIGN(addr);
116 102
117 for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { 103 if (dir == DOWN) {
118 /* At this point: (!vma || addr < vma->vm_end). */ 104 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
119 if (TASK_SIZE - len < addr) 105 info.low_limit = PAGE_SIZE;
120 return -ENOMEM; 106 info.high_limit = mm->mmap_base;
121 if (!vma || addr + len <= vma->vm_start) 107 addr = vm_unmapped_area(&info);
122 return addr; 108
123 addr = vma->vm_end; 109 if (!(addr & ~PAGE_MASK))
124 if (do_color_align) 110 return addr;
125 addr = COLOUR_ALIGN(addr, pgoff);
126 }
127 } else {
128 /* check if free_area_cache is useful for us */
129 if (len <= mm->cached_hole_size) {
130 mm->cached_hole_size = 0;
131 mm->free_area_cache = mm->mmap_base;
132 }
133 111
134 /*
135 * either no address requested, or the mapping can't fit into
136 * the requested address hole
137 */
138 addr = mm->free_area_cache;
139 if (do_color_align) {
140 unsigned long base =
141 COLOUR_ALIGN_DOWN(addr - len, pgoff);
142 addr = base + len;
143 }
144
145 /* make sure it can fit in the remaining address space */
146 if (likely(addr > len)) {
147 vma = find_vma(mm, addr - len);
148 if (!vma || addr <= vma->vm_start) {
149 /* cache the address as a hint for next time */
150 return mm->free_area_cache = addr - len;
151 }
152 }
153
154 if (unlikely(mm->mmap_base < len))
155 goto bottomup;
156
157 addr = mm->mmap_base - len;
158 if (do_color_align)
159 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
160
161 do {
162 /*
163 * Lookup failure means no vma is above this address,
164 * else if new region fits below vma->vm_start,
165 * return with success:
166 */
167 vma = find_vma(mm, addr);
168 if (likely(!vma || addr + len <= vma->vm_start)) {
169 /* cache the address as a hint for next time */
170 return mm->free_area_cache = addr;
171 }
172
173 /* remember the largest hole we saw so far */
174 if (addr + mm->cached_hole_size < vma->vm_start)
175 mm->cached_hole_size = vma->vm_start - addr;
176
177 /* try just below the current vma->vm_start */
178 addr = vma->vm_start - len;
179 if (do_color_align)
180 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
181 } while (likely(len < vma->vm_start));
182
183bottomup:
184 /* 112 /*
185 * A failed mmap() very likely causes application failure, 113 * A failed mmap() very likely causes application failure,
186 * so fall back to the bottom-up function here. This scenario 114 * so fall back to the bottom-up function here. This scenario
187 * can happen with large stack limits and large mmap() 115 * can happen with large stack limits and large mmap()
188 * allocations. 116 * allocations.
189 */ 117 */
190 mm->cached_hole_size = ~0UL;
191 mm->free_area_cache = TASK_UNMAPPED_BASE;
192 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
193 /*
194 * Restore the topdown base:
195 */
196 mm->free_area_cache = mm->mmap_base;
197 mm->cached_hole_size = ~0UL;
198
199 return addr;
200 } 118 }
119
120 info.flags = 0;
121 info.low_limit = mm->mmap_base;
122 info.high_limit = TASK_SIZE;
123 return vm_unmapped_area(&info);
201} 124}
202 125
203unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0, 126unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,