diff options
Diffstat (limited to 'arch/i386')
-rw-r--r-- | arch/i386/boot/Makefile | 2 | ||||
-rw-r--r-- | arch/i386/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/i386/kernel/apm.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/cpuid.c | 22 | ||||
-rw-r--r-- | arch/i386/kernel/msr.c | 22 | ||||
-rw-r--r-- | arch/i386/kernel/syscall_table.S | 2 | ||||
-rw-r--r-- | arch/i386/kernel/traps.c | 2 | ||||
-rw-r--r-- | arch/i386/lib/delay.c | 2 | ||||
-rw-r--r-- | arch/i386/mm/hugetlbpage.c | 204 | ||||
-rw-r--r-- | arch/i386/mm/init.c | 1 |
10 files changed, 65 insertions, 196 deletions
diff --git a/arch/i386/boot/Makefile b/arch/i386/boot/Makefile index aa7064a75e..43cd6220ee 100644 --- a/arch/i386/boot/Makefile +++ b/arch/i386/boot/Makefile | |||
@@ -48,7 +48,7 @@ cmd_image = $(obj)/tools/build $(BUILDFLAGS) $(obj)/bootsect $(obj)/setup \ | |||
48 | $(obj)/zImage $(obj)/bzImage: $(obj)/bootsect $(obj)/setup \ | 48 | $(obj)/zImage $(obj)/bzImage: $(obj)/bootsect $(obj)/setup \ |
49 | $(obj)/vmlinux.bin $(obj)/tools/build FORCE | 49 | $(obj)/vmlinux.bin $(obj)/tools/build FORCE |
50 | $(call if_changed,image) | 50 | $(call if_changed,image) |
51 | @echo 'Kernel: $@ is ready' | 51 | @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' |
52 | 52 | ||
53 | $(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE | 53 | $(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE |
54 | $(call if_changed,objcopy) | 54 | $(call if_changed,objcopy) |
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile index 0fbcfe00dd..51ecd51260 100644 --- a/arch/i386/kernel/Makefile +++ b/arch/i386/kernel/Makefile | |||
@@ -43,7 +43,7 @@ obj-$(CONFIG_SCx200) += scx200.o | |||
43 | # Note: kbuild does not track this dependency due to usage of .incbin | 43 | # Note: kbuild does not track this dependency due to usage of .incbin |
44 | $(obj)/vsyscall.o: $(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so | 44 | $(obj)/vsyscall.o: $(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so |
45 | targets += $(foreach F,int80 sysenter,vsyscall-$F.o vsyscall-$F.so) | 45 | targets += $(foreach F,int80 sysenter,vsyscall-$F.o vsyscall-$F.so) |
46 | targets += vsyscall.lds | 46 | targets += vsyscall-note.o vsyscall.lds |
47 | 47 | ||
48 | # The DSO images are built using a special linker script. | 48 | # The DSO images are built using a special linker script. |
49 | quiet_cmd_syscall = SYSCALL $@ | 49 | quiet_cmd_syscall = SYSCALL $@ |
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c index 45641a8725..0ff65abcd5 100644 --- a/arch/i386/kernel/apm.c +++ b/arch/i386/kernel/apm.c | |||
@@ -1222,6 +1222,7 @@ static int suspend(int vetoable) | |||
1222 | 1222 | ||
1223 | save_processor_state(); | 1223 | save_processor_state(); |
1224 | err = set_system_power_state(APM_STATE_SUSPEND); | 1224 | err = set_system_power_state(APM_STATE_SUSPEND); |
1225 | ignore_normal_resume = 1; | ||
1225 | restore_processor_state(); | 1226 | restore_processor_state(); |
1226 | 1227 | ||
1227 | local_irq_disable(); | 1228 | local_irq_disable(); |
@@ -1229,7 +1230,6 @@ static int suspend(int vetoable) | |||
1229 | spin_lock(&i8253_lock); | 1230 | spin_lock(&i8253_lock); |
1230 | reinit_timer(); | 1231 | reinit_timer(); |
1231 | set_time(); | 1232 | set_time(); |
1232 | ignore_normal_resume = 1; | ||
1233 | 1233 | ||
1234 | spin_unlock(&i8253_lock); | 1234 | spin_unlock(&i8253_lock); |
1235 | write_sequnlock(&xtime_lock); | 1235 | write_sequnlock(&xtime_lock); |
diff --git a/arch/i386/kernel/cpuid.c b/arch/i386/kernel/cpuid.c index 2e2756345b..4647db4ad6 100644 --- a/arch/i386/kernel/cpuid.c +++ b/arch/i386/kernel/cpuid.c | |||
@@ -45,7 +45,7 @@ | |||
45 | #include <asm/uaccess.h> | 45 | #include <asm/uaccess.h> |
46 | #include <asm/system.h> | 46 | #include <asm/system.h> |
47 | 47 | ||
48 | static struct class_simple *cpuid_class; | 48 | static struct class *cpuid_class; |
49 | 49 | ||
50 | #ifdef CONFIG_SMP | 50 | #ifdef CONFIG_SMP |
51 | 51 | ||
@@ -158,12 +158,12 @@ static struct file_operations cpuid_fops = { | |||
158 | .open = cpuid_open, | 158 | .open = cpuid_open, |
159 | }; | 159 | }; |
160 | 160 | ||
161 | static int cpuid_class_simple_device_add(int i) | 161 | static int cpuid_class_device_create(int i) |
162 | { | 162 | { |
163 | int err = 0; | 163 | int err = 0; |
164 | struct class_device *class_err; | 164 | struct class_device *class_err; |
165 | 165 | ||
166 | class_err = class_simple_device_add(cpuid_class, MKDEV(CPUID_MAJOR, i), NULL, "cpu%d",i); | 166 | class_err = class_device_create(cpuid_class, MKDEV(CPUID_MAJOR, i), NULL, "cpu%d",i); |
167 | if (IS_ERR(class_err)) | 167 | if (IS_ERR(class_err)) |
168 | err = PTR_ERR(class_err); | 168 | err = PTR_ERR(class_err); |
169 | return err; | 169 | return err; |
@@ -175,10 +175,10 @@ static int __devinit cpuid_class_cpu_callback(struct notifier_block *nfb, unsign | |||
175 | 175 | ||
176 | switch (action) { | 176 | switch (action) { |
177 | case CPU_ONLINE: | 177 | case CPU_ONLINE: |
178 | cpuid_class_simple_device_add(cpu); | 178 | cpuid_class_device_create(cpu); |
179 | break; | 179 | break; |
180 | case CPU_DEAD: | 180 | case CPU_DEAD: |
181 | class_simple_device_remove(MKDEV(CPUID_MAJOR, cpu)); | 181 | class_device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu)); |
182 | break; | 182 | break; |
183 | } | 183 | } |
184 | return NOTIFY_OK; | 184 | return NOTIFY_OK; |
@@ -200,13 +200,13 @@ static int __init cpuid_init(void) | |||
200 | err = -EBUSY; | 200 | err = -EBUSY; |
201 | goto out; | 201 | goto out; |
202 | } | 202 | } |
203 | cpuid_class = class_simple_create(THIS_MODULE, "cpuid"); | 203 | cpuid_class = class_create(THIS_MODULE, "cpuid"); |
204 | if (IS_ERR(cpuid_class)) { | 204 | if (IS_ERR(cpuid_class)) { |
205 | err = PTR_ERR(cpuid_class); | 205 | err = PTR_ERR(cpuid_class); |
206 | goto out_chrdev; | 206 | goto out_chrdev; |
207 | } | 207 | } |
208 | for_each_online_cpu(i) { | 208 | for_each_online_cpu(i) { |
209 | err = cpuid_class_simple_device_add(i); | 209 | err = cpuid_class_device_create(i); |
210 | if (err != 0) | 210 | if (err != 0) |
211 | goto out_class; | 211 | goto out_class; |
212 | } | 212 | } |
@@ -218,9 +218,9 @@ static int __init cpuid_init(void) | |||
218 | out_class: | 218 | out_class: |
219 | i = 0; | 219 | i = 0; |
220 | for_each_online_cpu(i) { | 220 | for_each_online_cpu(i) { |
221 | class_simple_device_remove(MKDEV(CPUID_MAJOR, i)); | 221 | class_device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, i)); |
222 | } | 222 | } |
223 | class_simple_destroy(cpuid_class); | 223 | class_destroy(cpuid_class); |
224 | out_chrdev: | 224 | out_chrdev: |
225 | unregister_chrdev(CPUID_MAJOR, "cpu/cpuid"); | 225 | unregister_chrdev(CPUID_MAJOR, "cpu/cpuid"); |
226 | out: | 226 | out: |
@@ -232,8 +232,8 @@ static void __exit cpuid_exit(void) | |||
232 | int cpu = 0; | 232 | int cpu = 0; |
233 | 233 | ||
234 | for_each_online_cpu(cpu) | 234 | for_each_online_cpu(cpu) |
235 | class_simple_device_remove(MKDEV(CPUID_MAJOR, cpu)); | 235 | class_device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu)); |
236 | class_simple_destroy(cpuid_class); | 236 | class_destroy(cpuid_class); |
237 | unregister_chrdev(CPUID_MAJOR, "cpu/cpuid"); | 237 | unregister_chrdev(CPUID_MAJOR, "cpu/cpuid"); |
238 | unregister_cpu_notifier(&cpuid_class_cpu_notifier); | 238 | unregister_cpu_notifier(&cpuid_class_cpu_notifier); |
239 | } | 239 | } |
diff --git a/arch/i386/kernel/msr.c b/arch/i386/kernel/msr.c index 05d9f8f363..b2f03c39a6 100644 --- a/arch/i386/kernel/msr.c +++ b/arch/i386/kernel/msr.c | |||
@@ -44,7 +44,7 @@ | |||
44 | #include <asm/uaccess.h> | 44 | #include <asm/uaccess.h> |
45 | #include <asm/system.h> | 45 | #include <asm/system.h> |
46 | 46 | ||
47 | static struct class_simple *msr_class; | 47 | static struct class *msr_class; |
48 | 48 | ||
49 | /* Note: "err" is handled in a funny way below. Otherwise one version | 49 | /* Note: "err" is handled in a funny way below. Otherwise one version |
50 | of gcc or another breaks. */ | 50 | of gcc or another breaks. */ |
@@ -260,12 +260,12 @@ static struct file_operations msr_fops = { | |||
260 | .open = msr_open, | 260 | .open = msr_open, |
261 | }; | 261 | }; |
262 | 262 | ||
263 | static int msr_class_simple_device_add(int i) | 263 | static int msr_class_device_create(int i) |
264 | { | 264 | { |
265 | int err = 0; | 265 | int err = 0; |
266 | struct class_device *class_err; | 266 | struct class_device *class_err; |
267 | 267 | ||
268 | class_err = class_simple_device_add(msr_class, MKDEV(MSR_MAJOR, i), NULL, "msr%d",i); | 268 | class_err = class_device_create(msr_class, MKDEV(MSR_MAJOR, i), NULL, "msr%d",i); |
269 | if (IS_ERR(class_err)) | 269 | if (IS_ERR(class_err)) |
270 | err = PTR_ERR(class_err); | 270 | err = PTR_ERR(class_err); |
271 | return err; | 271 | return err; |
@@ -277,10 +277,10 @@ static int __devinit msr_class_cpu_callback(struct notifier_block *nfb, unsigned | |||
277 | 277 | ||
278 | switch (action) { | 278 | switch (action) { |
279 | case CPU_ONLINE: | 279 | case CPU_ONLINE: |
280 | msr_class_simple_device_add(cpu); | 280 | msr_class_device_create(cpu); |
281 | break; | 281 | break; |
282 | case CPU_DEAD: | 282 | case CPU_DEAD: |
283 | class_simple_device_remove(MKDEV(MSR_MAJOR, cpu)); | 283 | class_device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu)); |
284 | break; | 284 | break; |
285 | } | 285 | } |
286 | return NOTIFY_OK; | 286 | return NOTIFY_OK; |
@@ -302,13 +302,13 @@ static int __init msr_init(void) | |||
302 | err = -EBUSY; | 302 | err = -EBUSY; |
303 | goto out; | 303 | goto out; |
304 | } | 304 | } |
305 | msr_class = class_simple_create(THIS_MODULE, "msr"); | 305 | msr_class = class_create(THIS_MODULE, "msr"); |
306 | if (IS_ERR(msr_class)) { | 306 | if (IS_ERR(msr_class)) { |
307 | err = PTR_ERR(msr_class); | 307 | err = PTR_ERR(msr_class); |
308 | goto out_chrdev; | 308 | goto out_chrdev; |
309 | } | 309 | } |
310 | for_each_online_cpu(i) { | 310 | for_each_online_cpu(i) { |
311 | err = msr_class_simple_device_add(i); | 311 | err = msr_class_device_create(i); |
312 | if (err != 0) | 312 | if (err != 0) |
313 | goto out_class; | 313 | goto out_class; |
314 | } | 314 | } |
@@ -320,8 +320,8 @@ static int __init msr_init(void) | |||
320 | out_class: | 320 | out_class: |
321 | i = 0; | 321 | i = 0; |
322 | for_each_online_cpu(i) | 322 | for_each_online_cpu(i) |
323 | class_simple_device_remove(MKDEV(MSR_MAJOR, i)); | 323 | class_device_destroy(msr_class, MKDEV(MSR_MAJOR, i)); |
324 | class_simple_destroy(msr_class); | 324 | class_destroy(msr_class); |
325 | out_chrdev: | 325 | out_chrdev: |
326 | unregister_chrdev(MSR_MAJOR, "cpu/msr"); | 326 | unregister_chrdev(MSR_MAJOR, "cpu/msr"); |
327 | out: | 327 | out: |
@@ -332,8 +332,8 @@ static void __exit msr_exit(void) | |||
332 | { | 332 | { |
333 | int cpu = 0; | 333 | int cpu = 0; |
334 | for_each_online_cpu(cpu) | 334 | for_each_online_cpu(cpu) |
335 | class_simple_device_remove(MKDEV(MSR_MAJOR, cpu)); | 335 | class_device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu)); |
336 | class_simple_destroy(msr_class); | 336 | class_destroy(msr_class); |
337 | unregister_chrdev(MSR_MAJOR, "cpu/msr"); | 337 | unregister_chrdev(MSR_MAJOR, "cpu/msr"); |
338 | unregister_cpu_notifier(&msr_class_cpu_notifier); | 338 | unregister_cpu_notifier(&msr_class_cpu_notifier); |
339 | } | 339 | } |
diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S index 6cd1ed311f..d408afaf64 100644 --- a/arch/i386/kernel/syscall_table.S +++ b/arch/i386/kernel/syscall_table.S | |||
@@ -251,7 +251,7 @@ ENTRY(sys_call_table) | |||
251 | .long sys_io_submit | 251 | .long sys_io_submit |
252 | .long sys_io_cancel | 252 | .long sys_io_cancel |
253 | .long sys_fadvise64 /* 250 */ | 253 | .long sys_fadvise64 /* 250 */ |
254 | .long sys_ni_syscall | 254 | .long sys_set_zone_reclaim |
255 | .long sys_exit_group | 255 | .long sys_exit_group |
256 | .long sys_lookup_dcookie | 256 | .long sys_lookup_dcookie |
257 | .long sys_epoll_create | 257 | .long sys_epoll_create |
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index 00c63419c0..83c579e82a 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c | |||
@@ -306,7 +306,7 @@ void die(const char * str, struct pt_regs * regs, long err) | |||
306 | }; | 306 | }; |
307 | static int die_counter; | 307 | static int die_counter; |
308 | 308 | ||
309 | if (die.lock_owner != _smp_processor_id()) { | 309 | if (die.lock_owner != raw_smp_processor_id()) { |
310 | console_verbose(); | 310 | console_verbose(); |
311 | spin_lock_irq(&die.lock); | 311 | spin_lock_irq(&die.lock); |
312 | die.lock_owner = smp_processor_id(); | 312 | die.lock_owner = smp_processor_id(); |
diff --git a/arch/i386/lib/delay.c b/arch/i386/lib/delay.c index 080639f262..eb0cdfe928 100644 --- a/arch/i386/lib/delay.c +++ b/arch/i386/lib/delay.c | |||
@@ -34,7 +34,7 @@ inline void __const_udelay(unsigned long xloops) | |||
34 | xloops *= 4; | 34 | xloops *= 4; |
35 | __asm__("mull %0" | 35 | __asm__("mull %0" |
36 | :"=d" (xloops), "=&a" (d0) | 36 | :"=d" (xloops), "=&a" (d0) |
37 | :"1" (xloops),"0" (cpu_data[_smp_processor_id()].loops_per_jiffy * (HZ/4))); | 37 | :"1" (xloops),"0" (cpu_data[raw_smp_processor_id()].loops_per_jiffy * (HZ/4))); |
38 | __delay(++xloops); | 38 | __delay(++xloops); |
39 | } | 39 | } |
40 | 40 | ||
diff --git a/arch/i386/mm/hugetlbpage.c b/arch/i386/mm/hugetlbpage.c index 171fc925e1..3b099f32b9 100644 --- a/arch/i386/mm/hugetlbpage.c +++ b/arch/i386/mm/hugetlbpage.c | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <asm/tlb.h> | 18 | #include <asm/tlb.h> |
19 | #include <asm/tlbflush.h> | 19 | #include <asm/tlbflush.h> |
20 | 20 | ||
21 | static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) | 21 | pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) |
22 | { | 22 | { |
23 | pgd_t *pgd; | 23 | pgd_t *pgd; |
24 | pud_t *pud; | 24 | pud_t *pud; |
@@ -30,7 +30,7 @@ static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) | |||
30 | return (pte_t *) pmd; | 30 | return (pte_t *) pmd; |
31 | } | 31 | } |
32 | 32 | ||
33 | static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | 33 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) |
34 | { | 34 | { |
35 | pgd_t *pgd; | 35 | pgd_t *pgd; |
36 | pud_t *pud; | 36 | pud_t *pud; |
@@ -42,21 +42,6 @@ static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | |||
42 | return (pte_t *) pmd; | 42 | return (pte_t *) pmd; |
43 | } | 43 | } |
44 | 44 | ||
45 | static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page, pte_t * page_table, int write_access) | ||
46 | { | ||
47 | pte_t entry; | ||
48 | |||
49 | add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE); | ||
50 | if (write_access) { | ||
51 | entry = | ||
52 | pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); | ||
53 | } else | ||
54 | entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot)); | ||
55 | entry = pte_mkyoung(entry); | ||
56 | mk_pte_huge(entry); | ||
57 | set_pte(page_table, entry); | ||
58 | } | ||
59 | |||
60 | /* | 45 | /* |
61 | * This function checks for proper alignment of input addr and len parameters. | 46 | * This function checks for proper alignment of input addr and len parameters. |
62 | */ | 47 | */ |
@@ -69,77 +54,6 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len) | |||
69 | return 0; | 54 | return 0; |
70 | } | 55 | } |
71 | 56 | ||
72 | int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, | ||
73 | struct vm_area_struct *vma) | ||
74 | { | ||
75 | pte_t *src_pte, *dst_pte, entry; | ||
76 | struct page *ptepage; | ||
77 | unsigned long addr = vma->vm_start; | ||
78 | unsigned long end = vma->vm_end; | ||
79 | |||
80 | while (addr < end) { | ||
81 | dst_pte = huge_pte_alloc(dst, addr); | ||
82 | if (!dst_pte) | ||
83 | goto nomem; | ||
84 | src_pte = huge_pte_offset(src, addr); | ||
85 | entry = *src_pte; | ||
86 | ptepage = pte_page(entry); | ||
87 | get_page(ptepage); | ||
88 | set_pte(dst_pte, entry); | ||
89 | add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE); | ||
90 | addr += HPAGE_SIZE; | ||
91 | } | ||
92 | return 0; | ||
93 | |||
94 | nomem: | ||
95 | return -ENOMEM; | ||
96 | } | ||
97 | |||
98 | int | ||
99 | follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | ||
100 | struct page **pages, struct vm_area_struct **vmas, | ||
101 | unsigned long *position, int *length, int i) | ||
102 | { | ||
103 | unsigned long vpfn, vaddr = *position; | ||
104 | int remainder = *length; | ||
105 | |||
106 | WARN_ON(!is_vm_hugetlb_page(vma)); | ||
107 | |||
108 | vpfn = vaddr/PAGE_SIZE; | ||
109 | while (vaddr < vma->vm_end && remainder) { | ||
110 | |||
111 | if (pages) { | ||
112 | pte_t *pte; | ||
113 | struct page *page; | ||
114 | |||
115 | pte = huge_pte_offset(mm, vaddr); | ||
116 | |||
117 | /* hugetlb should be locked, and hence, prefaulted */ | ||
118 | WARN_ON(!pte || pte_none(*pte)); | ||
119 | |||
120 | page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)]; | ||
121 | |||
122 | WARN_ON(!PageCompound(page)); | ||
123 | |||
124 | get_page(page); | ||
125 | pages[i] = page; | ||
126 | } | ||
127 | |||
128 | if (vmas) | ||
129 | vmas[i] = vma; | ||
130 | |||
131 | vaddr += PAGE_SIZE; | ||
132 | ++vpfn; | ||
133 | --remainder; | ||
134 | ++i; | ||
135 | } | ||
136 | |||
137 | *length = remainder; | ||
138 | *position = vaddr; | ||
139 | |||
140 | return i; | ||
141 | } | ||
142 | |||
143 | #if 0 /* This is just for testing */ | 57 | #if 0 /* This is just for testing */ |
144 | struct page * | 58 | struct page * |
145 | follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) | 59 | follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) |
@@ -204,83 +118,15 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, | |||
204 | } | 118 | } |
205 | #endif | 119 | #endif |
206 | 120 | ||
207 | void unmap_hugepage_range(struct vm_area_struct *vma, | 121 | void hugetlb_clean_stale_pgtable(pte_t *pte) |
208 | unsigned long start, unsigned long end) | ||
209 | { | 122 | { |
210 | struct mm_struct *mm = vma->vm_mm; | 123 | pmd_t *pmd = (pmd_t *) pte; |
211 | unsigned long address; | ||
212 | pte_t pte, *ptep; | ||
213 | struct page *page; | 124 | struct page *page; |
214 | 125 | ||
215 | BUG_ON(start & (HPAGE_SIZE - 1)); | 126 | page = pmd_page(*pmd); |
216 | BUG_ON(end & (HPAGE_SIZE - 1)); | 127 | pmd_clear(pmd); |
217 | 128 | dec_page_state(nr_page_table_pages); | |
218 | for (address = start; address < end; address += HPAGE_SIZE) { | 129 | page_cache_release(page); |
219 | ptep = huge_pte_offset(mm, address); | ||
220 | if (!ptep) | ||
221 | continue; | ||
222 | pte = ptep_get_and_clear(mm, address, ptep); | ||
223 | if (pte_none(pte)) | ||
224 | continue; | ||
225 | page = pte_page(pte); | ||
226 | put_page(page); | ||
227 | } | ||
228 | add_mm_counter(mm ,rss, -((end - start) >> PAGE_SHIFT)); | ||
229 | flush_tlb_range(vma, start, end); | ||
230 | } | ||
231 | |||
232 | int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) | ||
233 | { | ||
234 | struct mm_struct *mm = current->mm; | ||
235 | unsigned long addr; | ||
236 | int ret = 0; | ||
237 | |||
238 | BUG_ON(vma->vm_start & ~HPAGE_MASK); | ||
239 | BUG_ON(vma->vm_end & ~HPAGE_MASK); | ||
240 | |||
241 | spin_lock(&mm->page_table_lock); | ||
242 | for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) { | ||
243 | unsigned long idx; | ||
244 | pte_t *pte = huge_pte_alloc(mm, addr); | ||
245 | struct page *page; | ||
246 | |||
247 | if (!pte) { | ||
248 | ret = -ENOMEM; | ||
249 | goto out; | ||
250 | } | ||
251 | |||
252 | if (!pte_none(*pte)) | ||
253 | continue; | ||
254 | |||
255 | idx = ((addr - vma->vm_start) >> HPAGE_SHIFT) | ||
256 | + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); | ||
257 | page = find_get_page(mapping, idx); | ||
258 | if (!page) { | ||
259 | /* charge the fs quota first */ | ||
260 | if (hugetlb_get_quota(mapping)) { | ||
261 | ret = -ENOMEM; | ||
262 | goto out; | ||
263 | } | ||
264 | page = alloc_huge_page(); | ||
265 | if (!page) { | ||
266 | hugetlb_put_quota(mapping); | ||
267 | ret = -ENOMEM; | ||
268 | goto out; | ||
269 | } | ||
270 | ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC); | ||
271 | if (! ret) { | ||
272 | unlock_page(page); | ||
273 | } else { | ||
274 | hugetlb_put_quota(mapping); | ||
275 | free_huge_page(page); | ||
276 | goto out; | ||
277 | } | ||
278 | } | ||
279 | set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE); | ||
280 | } | ||
281 | out: | ||
282 | spin_unlock(&mm->page_table_lock); | ||
283 | return ret; | ||
284 | } | 130 | } |
285 | 131 | ||
286 | /* x86_64 also uses this file */ | 132 | /* x86_64 also uses this file */ |
@@ -294,7 +140,12 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, | |||
294 | struct vm_area_struct *vma; | 140 | struct vm_area_struct *vma; |
295 | unsigned long start_addr; | 141 | unsigned long start_addr; |
296 | 142 | ||
297 | start_addr = mm->free_area_cache; | 143 | if (len > mm->cached_hole_size) { |
144 | start_addr = mm->free_area_cache; | ||
145 | } else { | ||
146 | start_addr = TASK_UNMAPPED_BASE; | ||
147 | mm->cached_hole_size = 0; | ||
148 | } | ||
298 | 149 | ||
299 | full_search: | 150 | full_search: |
300 | addr = ALIGN(start_addr, HPAGE_SIZE); | 151 | addr = ALIGN(start_addr, HPAGE_SIZE); |
@@ -308,6 +159,7 @@ full_search: | |||
308 | */ | 159 | */ |
309 | if (start_addr != TASK_UNMAPPED_BASE) { | 160 | if (start_addr != TASK_UNMAPPED_BASE) { |
310 | start_addr = TASK_UNMAPPED_BASE; | 161 | start_addr = TASK_UNMAPPED_BASE; |
162 | mm->cached_hole_size = 0; | ||
311 | goto full_search; | 163 | goto full_search; |
312 | } | 164 | } |
313 | return -ENOMEM; | 165 | return -ENOMEM; |
@@ -316,6 +168,8 @@ full_search: | |||
316 | mm->free_area_cache = addr + len; | 168 | mm->free_area_cache = addr + len; |
317 | return addr; | 169 | return addr; |
318 | } | 170 | } |
171 | if (addr + mm->cached_hole_size < vma->vm_start) | ||
172 | mm->cached_hole_size = vma->vm_start - addr; | ||
319 | addr = ALIGN(vma->vm_end, HPAGE_SIZE); | 173 | addr = ALIGN(vma->vm_end, HPAGE_SIZE); |
320 | } | 174 | } |
321 | } | 175 | } |
@@ -327,12 +181,17 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, | |||
327 | struct mm_struct *mm = current->mm; | 181 | struct mm_struct *mm = current->mm; |
328 | struct vm_area_struct *vma, *prev_vma; | 182 | struct vm_area_struct *vma, *prev_vma; |
329 | unsigned long base = mm->mmap_base, addr = addr0; | 183 | unsigned long base = mm->mmap_base, addr = addr0; |
184 | unsigned long largest_hole = mm->cached_hole_size; | ||
330 | int first_time = 1; | 185 | int first_time = 1; |
331 | 186 | ||
332 | /* don't allow allocations above current base */ | 187 | /* don't allow allocations above current base */ |
333 | if (mm->free_area_cache > base) | 188 | if (mm->free_area_cache > base) |
334 | mm->free_area_cache = base; | 189 | mm->free_area_cache = base; |
335 | 190 | ||
191 | if (len <= largest_hole) { | ||
192 | largest_hole = 0; | ||
193 | mm->free_area_cache = base; | ||
194 | } | ||
336 | try_again: | 195 | try_again: |
337 | /* make sure it can fit in the remaining address space */ | 196 | /* make sure it can fit in the remaining address space */ |
338 | if (mm->free_area_cache < len) | 197 | if (mm->free_area_cache < len) |
@@ -353,13 +212,21 @@ try_again: | |||
353 | * vma->vm_start, use it: | 212 | * vma->vm_start, use it: |
354 | */ | 213 | */ |
355 | if (addr + len <= vma->vm_start && | 214 | if (addr + len <= vma->vm_start && |
356 | (!prev_vma || (addr >= prev_vma->vm_end))) | 215 | (!prev_vma || (addr >= prev_vma->vm_end))) { |
357 | /* remember the address as a hint for next time */ | 216 | /* remember the address as a hint for next time */ |
358 | return (mm->free_area_cache = addr); | 217 | mm->cached_hole_size = largest_hole; |
359 | else | 218 | return (mm->free_area_cache = addr); |
219 | } else { | ||
360 | /* pull free_area_cache down to the first hole */ | 220 | /* pull free_area_cache down to the first hole */ |
361 | if (mm->free_area_cache == vma->vm_end) | 221 | if (mm->free_area_cache == vma->vm_end) { |
362 | mm->free_area_cache = vma->vm_start; | 222 | mm->free_area_cache = vma->vm_start; |
223 | mm->cached_hole_size = largest_hole; | ||
224 | } | ||
225 | } | ||
226 | |||
227 | /* remember the largest hole we saw so far */ | ||
228 | if (addr + largest_hole < vma->vm_start) | ||
229 | largest_hole = vma->vm_start - addr; | ||
363 | 230 | ||
364 | /* try just below the current vma->vm_start */ | 231 | /* try just below the current vma->vm_start */ |
365 | addr = (vma->vm_start - len) & HPAGE_MASK; | 232 | addr = (vma->vm_start - len) & HPAGE_MASK; |
@@ -372,6 +239,7 @@ fail: | |||
372 | */ | 239 | */ |
373 | if (first_time) { | 240 | if (first_time) { |
374 | mm->free_area_cache = base; | 241 | mm->free_area_cache = base; |
242 | largest_hole = 0; | ||
375 | first_time = 0; | 243 | first_time = 0; |
376 | goto try_again; | 244 | goto try_again; |
377 | } | 245 | } |
@@ -382,6 +250,7 @@ fail: | |||
382 | * allocations. | 250 | * allocations. |
383 | */ | 251 | */ |
384 | mm->free_area_cache = TASK_UNMAPPED_BASE; | 252 | mm->free_area_cache = TASK_UNMAPPED_BASE; |
253 | mm->cached_hole_size = ~0UL; | ||
385 | addr = hugetlb_get_unmapped_area_bottomup(file, addr0, | 254 | addr = hugetlb_get_unmapped_area_bottomup(file, addr0, |
386 | len, pgoff, flags); | 255 | len, pgoff, flags); |
387 | 256 | ||
@@ -389,6 +258,7 @@ fail: | |||
389 | * Restore the topdown base: | 258 | * Restore the topdown base: |
390 | */ | 259 | */ |
391 | mm->free_area_cache = base; | 260 | mm->free_area_cache = base; |
261 | mm->cached_hole_size = ~0UL; | ||
392 | 262 | ||
393 | return addr; | 263 | return addr; |
394 | } | 264 | } |
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c index 7a7ea37372..8766c771bb 100644 --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c | |||
@@ -269,7 +269,6 @@ void __init one_highpage_init(struct page *page, int pfn, int bad_ppro) | |||
269 | { | 269 | { |
270 | if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) { | 270 | if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) { |
271 | ClearPageReserved(page); | 271 | ClearPageReserved(page); |
272 | set_bit(PG_highmem, &page->flags); | ||
273 | set_page_count(page, 1); | 272 | set_page_count(page, 1); |
274 | __free_page(page); | 273 | __free_page(page); |
275 | totalhigh_pages++; | 274 | totalhigh_pages++; |