aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-10-21 16:47:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-21 16:47:29 -0400
commitc3b86a29429dac1033e3f602f51fa8d00006a8eb (patch)
treebcedd0a553ca2396eeb58318ef6ee6b426e83652 /include/asm-generic
parent8d8d2e9ccd331a1345c88b292ebee9d256fd8749 (diff)
parent2aeb66d3036dbafc297ac553a257a40283dadb3e (diff)
Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86-32, percpu: Correct the ordering of the percpu readmostly section x86, mm: Enable ARCH_DMA_ADDR_T_64BIT with X86_64 || HIGHMEM64G x86: Spread tlb flush vector between nodes percpu: Introduce a read-mostly percpu API x86, mm: Fix incorrect data type in vmalloc_sync_all() x86, mm: Hold mm->page_table_lock while doing vmalloc_sync x86, mm: Fix bogus whitespace in sync_global_pgds() x86-32: Fix sparse warning for the __PHYSICAL_MASK calculation x86, mm: Add RESERVE_BRK_ARRAY() helper mm, x86: Saving vmcore with non-lazy freeing of vmas x86, kdump: Change copy_oldmem_page() to use cached addressing x86, mm: fix uninitialized addr in kernel_physical_mapping_init() x86, kmemcheck: Remove double test x86, mm: Make spurious_fault check explicitly check the PRESENT bit x86-64, mem: Update all PGDs for direct mapping and vmemmap mapping changes x86, mm: Separate x86_64 vmalloc_sync_all() into separate functions x86, mm: Avoid unnecessary TLB flush
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/pgtable.h4
-rw-r--r--include/asm-generic/vmlinux.lds.h4
2 files changed, 8 insertions, 0 deletions
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index e2bd73e8f9c0..f4d4120e5128 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -129,6 +129,10 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
129#define move_pte(pte, prot, old_addr, new_addr) (pte) 129#define move_pte(pte, prot, old_addr, new_addr) (pte)
130#endif 130#endif
131 131
132#ifndef flush_tlb_fix_spurious_fault
133#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
134#endif
135
132#ifndef pgprot_noncached 136#ifndef pgprot_noncached
133#define pgprot_noncached(prot) (prot) 137#define pgprot_noncached(prot) (prot)
134#endif 138#endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index ef2af9948eac..f4229fb315e1 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -687,7 +687,9 @@
687 - LOAD_OFFSET) { \ 687 - LOAD_OFFSET) { \
688 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 688 VMLINUX_SYMBOL(__per_cpu_start) = .; \
689 *(.data..percpu..first) \ 689 *(.data..percpu..first) \
690 . = ALIGN(PAGE_SIZE); \
690 *(.data..percpu..page_aligned) \ 691 *(.data..percpu..page_aligned) \
692 *(.data..percpu..readmostly) \
691 *(.data..percpu) \ 693 *(.data..percpu) \
692 *(.data..percpu..shared_aligned) \ 694 *(.data..percpu..shared_aligned) \
693 VMLINUX_SYMBOL(__per_cpu_end) = .; \ 695 VMLINUX_SYMBOL(__per_cpu_end) = .; \
@@ -713,7 +715,9 @@
713 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 715 VMLINUX_SYMBOL(__per_cpu_load) = .; \
714 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 716 VMLINUX_SYMBOL(__per_cpu_start) = .; \
715 *(.data..percpu..first) \ 717 *(.data..percpu..first) \
718 . = ALIGN(PAGE_SIZE); \
716 *(.data..percpu..page_aligned) \ 719 *(.data..percpu..page_aligned) \
720 *(.data..percpu..readmostly) \
717 *(.data..percpu) \ 721 *(.data..percpu) \
718 *(.data..percpu..shared_aligned) \ 722 *(.data..percpu..shared_aligned) \
719 VMLINUX_SYMBOL(__per_cpu_end) = .; \ 723 VMLINUX_SYMBOL(__per_cpu_end) = .; \