aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/boot/a20.c75
-rw-r--r--arch/x86/include/asm/page.h1
-rw-r--r--arch/x86/include/asm/paravirt.h17
-rw-r--r--arch/x86/kernel/hpet.c2
-rw-r--r--arch/x86/kernel/paravirt.c26
-rw-r--r--arch/x86/kernel/ptrace.c16
-rw-r--r--arch/x86/mm/ioremap.c19
-rw-r--r--arch/x86/mm/pageattr.c15
-rw-r--r--arch/x86/mm/pat.c83
-rw-r--r--include/linux/mm.h1
-rw-r--r--mm/mlock.c7
11 files changed, 147 insertions, 115 deletions
diff --git a/arch/x86/boot/a20.c b/arch/x86/boot/a20.c
index 4063d630deff..fba8e9c6a504 100644
--- a/arch/x86/boot/a20.c
+++ b/arch/x86/boot/a20.c
@@ -2,6 +2,7 @@
2 * 2 *
3 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright 2007-2008 rPath, Inc. - All Rights Reserved 4 * Copyright 2007-2008 rPath, Inc. - All Rights Reserved
5 * Copyright 2009 Intel Corporation
5 * 6 *
6 * This file is part of the Linux kernel, and is made available under 7 * This file is part of the Linux kernel, and is made available under
7 * the terms of the GNU General Public License version 2. 8 * the terms of the GNU General Public License version 2.
@@ -15,16 +16,23 @@
15#include "boot.h" 16#include "boot.h"
16 17
17#define MAX_8042_LOOPS 100000 18#define MAX_8042_LOOPS 100000
19#define MAX_8042_FF 32
18 20
19static int empty_8042(void) 21static int empty_8042(void)
20{ 22{
21 u8 status; 23 u8 status;
22 int loops = MAX_8042_LOOPS; 24 int loops = MAX_8042_LOOPS;
25 int ffs = MAX_8042_FF;
23 26
24 while (loops--) { 27 while (loops--) {
25 io_delay(); 28 io_delay();
26 29
27 status = inb(0x64); 30 status = inb(0x64);
31 if (status == 0xff) {
32 /* FF is a plausible, but very unlikely status */
33 if (!--ffs)
34 return -1; /* Assume no KBC present */
35 }
28 if (status & 1) { 36 if (status & 1) {
29 /* Read and discard input data */ 37 /* Read and discard input data */
30 io_delay(); 38 io_delay();
@@ -118,44 +126,43 @@ static void enable_a20_fast(void)
118 126
119int enable_a20(void) 127int enable_a20(void)
120{ 128{
121#if defined(CONFIG_X86_ELAN) 129#ifdef CONFIG_X86_VOYAGER
122 /* Elan croaks if we try to touch the KBC */
123 enable_a20_fast();
124 while (!a20_test_long())
125 ;
126 return 0;
127#elif defined(CONFIG_X86_VOYAGER)
128 /* On Voyager, a20_test() is unsafe? */ 130 /* On Voyager, a20_test() is unsafe? */
129 enable_a20_kbc(); 131 enable_a20_kbc();
130 return 0; 132 return 0;
131#else 133#else
132 int loops = A20_ENABLE_LOOPS; 134 int loops = A20_ENABLE_LOOPS;
133 while (loops--) { 135 int kbc_err;
134 /* First, check to see if A20 is already enabled 136
135 (legacy free, etc.) */ 137 while (loops--) {
136 if (a20_test_short()) 138 /* First, check to see if A20 is already enabled
137 return 0; 139 (legacy free, etc.) */
138 140 if (a20_test_short())
139 /* Next, try the BIOS (INT 0x15, AX=0x2401) */ 141 return 0;
140 enable_a20_bios(); 142
141 if (a20_test_short()) 143 /* Next, try the BIOS (INT 0x15, AX=0x2401) */
142 return 0; 144 enable_a20_bios();
143 145 if (a20_test_short())
144 /* Try enabling A20 through the keyboard controller */ 146 return 0;
145 empty_8042(); 147
146 if (a20_test_short()) 148 /* Try enabling A20 through the keyboard controller */
147 return 0; /* BIOS worked, but with delayed reaction */ 149 kbc_err = empty_8042();
148 150
149 enable_a20_kbc(); 151 if (a20_test_short())
150 if (a20_test_long()) 152 return 0; /* BIOS worked, but with delayed reaction */
151 return 0; 153
152 154 if (!kbc_err) {
153 /* Finally, try enabling the "fast A20 gate" */ 155 enable_a20_kbc();
154 enable_a20_fast(); 156 if (a20_test_long())
155 if (a20_test_long()) 157 return 0;
156 return 0; 158 }
157 } 159
158 160 /* Finally, try enabling the "fast A20 gate" */
159 return -1; 161 enable_a20_fast();
162 if (a20_test_long())
163 return 0;
164 }
165
166 return -1;
160#endif 167#endif
161} 168}
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
index 40226999cbf8..05f2da7f387a 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -57,7 +57,6 @@ typedef struct { pgdval_t pgd; } pgd_t;
57typedef struct { pgprotval_t pgprot; } pgprot_t; 57typedef struct { pgprotval_t pgprot; } pgprot_t;
58 58
59extern int page_is_ram(unsigned long pagenr); 59extern int page_is_ram(unsigned long pagenr);
60extern int pagerange_is_ram(unsigned long start, unsigned long end);
61extern int devmem_is_allowed(unsigned long pagenr); 60extern int devmem_is_allowed(unsigned long pagenr);
62extern void map_devmem(unsigned long pfn, unsigned long size, 61extern void map_devmem(unsigned long pfn, unsigned long size,
63 pgprot_t vma_prot); 62 pgprot_t vma_prot);
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 1c244b64573f..b788dfd20483 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -1431,14 +1431,7 @@ static inline void arch_leave_lazy_cpu_mode(void)
1431 PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave); 1431 PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
1432} 1432}
1433 1433
1434static inline void arch_flush_lazy_cpu_mode(void) 1434void arch_flush_lazy_cpu_mode(void);
1435{
1436 if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
1437 arch_leave_lazy_cpu_mode();
1438 arch_enter_lazy_cpu_mode();
1439 }
1440}
1441
1442 1435
1443#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE 1436#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
1444static inline void arch_enter_lazy_mmu_mode(void) 1437static inline void arch_enter_lazy_mmu_mode(void)
@@ -1451,13 +1444,7 @@ static inline void arch_leave_lazy_mmu_mode(void)
1451 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave); 1444 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
1452} 1445}
1453 1446
1454static inline void arch_flush_lazy_mmu_mode(void) 1447void arch_flush_lazy_mmu_mode(void);
1455{
1456 if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
1457 arch_leave_lazy_mmu_mode();
1458 arch_enter_lazy_mmu_mode();
1459 }
1460}
1461 1448
1462static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, 1449static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1463 unsigned long phys, pgprot_t flags) 1450 unsigned long phys, pgprot_t flags)
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 388254f69a2a..a00545fe5cdd 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -269,6 +269,8 @@ static void hpet_set_mode(enum clock_event_mode mode,
269 now = hpet_readl(HPET_COUNTER); 269 now = hpet_readl(HPET_COUNTER);
270 cmp = now + (unsigned long) delta; 270 cmp = now + (unsigned long) delta;
271 cfg = hpet_readl(HPET_Tn_CFG(timer)); 271 cfg = hpet_readl(HPET_Tn_CFG(timer));
272 /* Make sure we use edge triggered interrupts */
273 cfg &= ~HPET_TN_LEVEL;
272 cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | 274 cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
273 HPET_TN_SETVAL | HPET_TN_32BIT; 275 HPET_TN_SETVAL | HPET_TN_32BIT;
274 hpet_writel(cfg, HPET_Tn_CFG(timer)); 276 hpet_writel(cfg, HPET_Tn_CFG(timer));
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index cea11c8e3049..6dc4dca255e4 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -286,6 +286,32 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
286 return __get_cpu_var(paravirt_lazy_mode); 286 return __get_cpu_var(paravirt_lazy_mode);
287} 287}
288 288
289void arch_flush_lazy_mmu_mode(void)
290{
291 preempt_disable();
292
293 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
294 WARN_ON(preempt_count() == 1);
295 arch_leave_lazy_mmu_mode();
296 arch_enter_lazy_mmu_mode();
297 }
298
299 preempt_enable();
300}
301
302void arch_flush_lazy_cpu_mode(void)
303{
304 preempt_disable();
305
306 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
307 WARN_ON(preempt_count() == 1);
308 arch_leave_lazy_cpu_mode();
309 arch_enter_lazy_cpu_mode();
310 }
311
312 preempt_enable();
313}
314
289struct pv_info pv_info = { 315struct pv_info pv_info = {
290 .name = "bare hardware", 316 .name = "bare hardware",
291 .paravirt_enabled = 0, 317 .paravirt_enabled = 0,
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 7ec39ab37a2d..d2f7cd5b2c83 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -805,12 +805,16 @@ static void ptrace_bts_untrace(struct task_struct *child)
805 805
806static void ptrace_bts_detach(struct task_struct *child) 806static void ptrace_bts_detach(struct task_struct *child)
807{ 807{
808 if (unlikely(child->bts)) { 808 /*
809 ds_release_bts(child->bts); 809 * Ptrace_detach() races with ptrace_untrace() in case
810 child->bts = NULL; 810 * the child dies and is reaped by another thread.
811 811 *
812 ptrace_bts_free_buffer(child); 812 * We only do the memory accounting at this point and
813 } 813 * leave the buffer deallocation and the bts tracer
814 * release to ptrace_bts_untrace() which will be called
815 * later on with tasklist_lock held.
816 */
817 release_locked_buffer(child->bts_buffer, child->bts_size);
814} 818}
815#else 819#else
816static inline void ptrace_bts_fork(struct task_struct *tsk) {} 820static inline void ptrace_bts_fork(struct task_struct *tsk) {}
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 1448bcb7f22f..433f7bd4648a 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -134,25 +134,6 @@ int page_is_ram(unsigned long pagenr)
134 return 0; 134 return 0;
135} 135}
136 136
137int pagerange_is_ram(unsigned long start, unsigned long end)
138{
139 int ram_page = 0, not_rampage = 0;
140 unsigned long page_nr;
141
142 for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
143 ++page_nr) {
144 if (page_is_ram(page_nr))
145 ram_page = 1;
146 else
147 not_rampage = 1;
148
149 if (ram_page == not_rampage)
150 return -1;
151 }
152
153 return ram_page;
154}
155
156/* 137/*
157 * Fix up the linear direct mapping of the kernel to avoid cache attribute 138 * Fix up the linear direct mapping of the kernel to avoid cache attribute
158 * conflicts. 139 * conflicts.
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 84ba74820ad6..8ca0d8566fc8 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -575,7 +575,6 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
575 address = cpa->vaddr[cpa->curpage]; 575 address = cpa->vaddr[cpa->curpage];
576 else 576 else
577 address = *cpa->vaddr; 577 address = *cpa->vaddr;
578
579repeat: 578repeat:
580 kpte = lookup_address(address, &level); 579 kpte = lookup_address(address, &level);
581 if (!kpte) 580 if (!kpte)
@@ -812,6 +811,13 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
812 811
813 vm_unmap_aliases(); 812 vm_unmap_aliases();
814 813
814 /*
815 * If we're called with lazy mmu updates enabled, the
816 * in-memory pte state may be stale. Flush pending updates to
817 * bring them up to date.
818 */
819 arch_flush_lazy_mmu_mode();
820
815 cpa.vaddr = addr; 821 cpa.vaddr = addr;
816 cpa.numpages = numpages; 822 cpa.numpages = numpages;
817 cpa.mask_set = mask_set; 823 cpa.mask_set = mask_set;
@@ -854,6 +860,13 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
854 } else 860 } else
855 cpa_flush_all(cache); 861 cpa_flush_all(cache);
856 862
863 /*
864 * If we've been called with lazy mmu updates enabled, then
865 * make sure that everything gets flushed out before we
866 * return.
867 */
868 arch_flush_lazy_mmu_mode();
869
857out: 870out:
858 return ret; 871 return ret;
859} 872}
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 9127e31c7268..05f9aef6818a 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -220,6 +220,33 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
220static struct memtype *cached_entry; 220static struct memtype *cached_entry;
221static u64 cached_start; 221static u64 cached_start;
222 222
223static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
224{
225 int ram_page = 0, not_rampage = 0;
226 unsigned long page_nr;
227
228 for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
229 ++page_nr) {
230 /*
231 * For legacy reasons, physical address range in the legacy ISA
232 * region is tracked as non-RAM. This will allow users of
233 * /dev/mem to map portions of legacy ISA region, even when
234 * some of those portions are listed(or not even listed) with
235 * different e820 types(RAM/reserved/..)
236 */
237 if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) &&
238 page_is_ram(page_nr))
239 ram_page = 1;
240 else
241 not_rampage = 1;
242
243 if (ram_page == not_rampage)
244 return -1;
245 }
246
247 return ram_page;
248}
249
223/* 250/*
224 * For RAM pages, mark the pages as non WB memory type using 251 * For RAM pages, mark the pages as non WB memory type using
225 * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or 252 * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or
@@ -345,20 +372,12 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
345 if (new_type) 372 if (new_type)
346 *new_type = actual_type; 373 *new_type = actual_type;
347 374
348 /* 375 is_range_ram = pat_pagerange_is_ram(start, end);
349 * For legacy reasons, some parts of the physical address range in the 376 if (is_range_ram == 1)
350 * legacy 1MB region is treated as non-RAM (even when listed as RAM in 377 return reserve_ram_pages_type(start, end, req_type,
351 * the e820 tables). So we will track the memory attributes of this 378 new_type);
352 * legacy 1MB region using the linear memtype_list always. 379 else if (is_range_ram < 0)
353 */ 380 return -EINVAL;
354 if (end >= ISA_END_ADDRESS) {
355 is_range_ram = pagerange_is_ram(start, end);
356 if (is_range_ram == 1)
357 return reserve_ram_pages_type(start, end, req_type,
358 new_type);
359 else if (is_range_ram < 0)
360 return -EINVAL;
361 }
362 381
363 new = kmalloc(sizeof(struct memtype), GFP_KERNEL); 382 new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
364 if (!new) 383 if (!new)
@@ -455,19 +474,11 @@ int free_memtype(u64 start, u64 end)
455 if (is_ISA_range(start, end - 1)) 474 if (is_ISA_range(start, end - 1))
456 return 0; 475 return 0;
457 476
458 /* 477 is_range_ram = pat_pagerange_is_ram(start, end);
459 * For legacy reasons, some parts of the physical address range in the 478 if (is_range_ram == 1)
460 * legacy 1MB region is treated as non-RAM (even when listed as RAM in 479 return free_ram_pages_type(start, end);
461 * the e820 tables). So we will track the memory attributes of this 480 else if (is_range_ram < 0)
462 * legacy 1MB region using the linear memtype_list always. 481 return -EINVAL;
463 */
464 if (end >= ISA_END_ADDRESS) {
465 is_range_ram = pagerange_is_ram(start, end);
466 if (is_range_ram == 1)
467 return free_ram_pages_type(start, end);
468 else if (is_range_ram < 0)
469 return -EINVAL;
470 }
471 482
472 spin_lock(&memtype_lock); 483 spin_lock(&memtype_lock);
473 list_for_each_entry(entry, &memtype_list, nd) { 484 list_for_each_entry(entry, &memtype_list, nd) {
@@ -635,17 +646,13 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
635 unsigned long flags; 646 unsigned long flags;
636 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); 647 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
637 648
638 is_ram = pagerange_is_ram(paddr, paddr + size); 649 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
639 650
640 if (is_ram != 0) { 651 /*
641 /* 652 * reserve_pfn_range() doesn't support RAM pages.
642 * For mapping RAM pages, drivers need to call 653 */
643 * set_memory_[uc|wc|wb] directly, for reserve and free, before 654 if (is_ram != 0)
644 * setting up the PTE. 655 return -EINVAL;
645 */
646 WARN_ON_ONCE(1);
647 return 0;
648 }
649 656
650 ret = reserve_memtype(paddr, paddr + size, want_flags, &flags); 657 ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
651 if (ret) 658 if (ret)
@@ -702,7 +709,7 @@ static void free_pfn_range(u64 paddr, unsigned long size)
702{ 709{
703 int is_ram; 710 int is_ram;
704 711
705 is_ram = pagerange_is_ram(paddr, paddr + size); 712 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
706 if (is_ram == 0) 713 if (is_ram == 0)
707 free_memtype(paddr, paddr + size); 714 free_memtype(paddr, paddr + size);
708} 715}
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 323561582c10..7dc04ff5ab89 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1304,5 +1304,6 @@ void vmemmap_populate_print_last(void);
1304 1304
1305extern void *alloc_locked_buffer(size_t size); 1305extern void *alloc_locked_buffer(size_t size);
1306extern void free_locked_buffer(void *buffer, size_t size); 1306extern void free_locked_buffer(void *buffer, size_t size);
1307extern void release_locked_buffer(void *buffer, size_t size);
1307#endif /* __KERNEL__ */ 1308#endif /* __KERNEL__ */
1308#endif /* _LINUX_MM_H */ 1309#endif /* _LINUX_MM_H */
diff --git a/mm/mlock.c b/mm/mlock.c
index 037161d61b4e..cbe9e0581b75 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -660,7 +660,7 @@ void *alloc_locked_buffer(size_t size)
660 return buffer; 660 return buffer;
661} 661}
662 662
663void free_locked_buffer(void *buffer, size_t size) 663void release_locked_buffer(void *buffer, size_t size)
664{ 664{
665 unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; 665 unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
666 666
@@ -670,6 +670,11 @@ void free_locked_buffer(void *buffer, size_t size)
670 current->mm->locked_vm -= pgsz; 670 current->mm->locked_vm -= pgsz;
671 671
672 up_write(&current->mm->mmap_sem); 672 up_write(&current->mm->mmap_sem);
673}
674
675void free_locked_buffer(void *buffer, size_t size)
676{
677 release_locked_buffer(buffer, size);
673 678
674 kfree(buffer); 679 kfree(buffer);
675} 680}