diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-09 03:23:15 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-09 03:23:15 -0400 |
commit | 9e2d8656f5e8aa214e66b462680cf86b210b74a8 (patch) | |
tree | f67d62e896cedf75599ea45f9ecf9999c6ad24cd /include/asm-generic | |
parent | 1ea4f4f8405cc1ceec23f2d261bc3775785e6712 (diff) | |
parent | 9e695d2ecc8451cc2c1603d60b5c8e7f5581923a (diff) |
Merge branch 'akpm' (Andrew's patch-bomb)
Merge patches from Andrew Morton:
"A few misc things and very nearly all of the MM tree. A tremendous
amount of stuff (again), including a significant rbtree library
rework."
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (160 commits)
sparc64: Support transparent huge pages.
mm: thp: Use more portable PMD clearing sequenece in zap_huge_pmd().
mm: Add and use update_mmu_cache_pmd() in transparent huge page code.
sparc64: Document PGD and PMD layout.
sparc64: Eliminate PTE table memory wastage.
sparc64: Halve the size of PTE tables
sparc64: Only support 4MB huge pages and 8KB base pages.
memory-hotplug: suppress "Trying to free nonexistent resource <XXXXXXXXXXXXXXXX-YYYYYYYYYYYYYYYY>" warning
mm: memcg: clean up mm_match_cgroup() signature
mm: document PageHuge somewhat
mm: use %pK for /proc/vmallocinfo
mm, thp: fix mlock statistics
mm, thp: fix mapped pages avoiding unevictable list on mlock
memory-hotplug: update memory block's state and notify userspace
memory-hotplug: preparation to notify memory block's state at memory hot remove
mm: avoid section mismatch warning for memblock_type_name
make GFP_NOTRACK definition unconditional
cma: decrease cc.nr_migratepages after reclaiming pagelist
CMA: migrate mlocked pages
kpageflags: fix wrong KPF_THP on non-huge compound pages
...
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/pgtable.h | 72 |
1 files changed, 48 insertions, 24 deletions
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index ff4947b7a97..b36ce40bd1c 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
@@ -87,7 +87,7 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, | |||
87 | pmd_t *pmdp) | 87 | pmd_t *pmdp) |
88 | { | 88 | { |
89 | pmd_t pmd = *pmdp; | 89 | pmd_t pmd = *pmdp; |
90 | pmd_clear(mm, address, pmdp); | 90 | pmd_clear(pmdp); |
91 | return pmd; | 91 | return pmd; |
92 | } | 92 | } |
93 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 93 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
@@ -162,6 +162,19 @@ extern void pmdp_splitting_flush(struct vm_area_struct *vma, | |||
162 | unsigned long address, pmd_t *pmdp); | 162 | unsigned long address, pmd_t *pmdp); |
163 | #endif | 163 | #endif |
164 | 164 | ||
165 | #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT | ||
166 | extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable); | ||
167 | #endif | ||
168 | |||
169 | #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW | ||
170 | extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm); | ||
171 | #endif | ||
172 | |||
173 | #ifndef __HAVE_ARCH_PMDP_INVALIDATE | ||
174 | extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, | ||
175 | pmd_t *pmdp); | ||
176 | #endif | ||
177 | |||
165 | #ifndef __HAVE_ARCH_PTE_SAME | 178 | #ifndef __HAVE_ARCH_PTE_SAME |
166 | static inline int pte_same(pte_t pte_a, pte_t pte_b) | 179 | static inline int pte_same(pte_t pte_a, pte_t pte_b) |
167 | { | 180 | { |
@@ -381,48 +394,59 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm, | |||
381 | 394 | ||
382 | #ifndef __HAVE_PFNMAP_TRACKING | 395 | #ifndef __HAVE_PFNMAP_TRACKING |
383 | /* | 396 | /* |
384 | * Interface that can be used by architecture code to keep track of | 397 | * Interfaces that can be used by architecture code to keep track of |
385 | * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn) | 398 | * memory type of pfn mappings specified by the remap_pfn_range, |
386 | * | 399 | * vm_insert_pfn. |
387 | * track_pfn_vma_new is called when a _new_ pfn mapping is being established | 400 | */ |
388 | * for physical range indicated by pfn and size. | 401 | |
402 | /* | ||
403 | * track_pfn_remap is called when a _new_ pfn mapping is being established | ||
404 | * by remap_pfn_range() for physical range indicated by pfn and size. | ||
389 | */ | 405 | */ |
390 | static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, | 406 | static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, |
391 | unsigned long pfn, unsigned long size) | 407 | unsigned long pfn, unsigned long addr, |
408 | unsigned long size) | ||
392 | { | 409 | { |
393 | return 0; | 410 | return 0; |
394 | } | 411 | } |
395 | 412 | ||
396 | /* | 413 | /* |
397 | * Interface that can be used by architecture code to keep track of | 414 | * track_pfn_insert is called when a _new_ single pfn is established |
398 | * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn) | 415 | * by vm_insert_pfn(). |
399 | * | 416 | */ |
400 | * track_pfn_vma_copy is called when vma that is covering the pfnmap gets | 417 | static inline int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, |
418 | unsigned long pfn) | ||
419 | { | ||
420 | return 0; | ||
421 | } | ||
422 | |||
423 | /* | ||
424 | * track_pfn_copy is called when vma that is covering the pfnmap gets | ||
401 | * copied through copy_page_range(). | 425 | * copied through copy_page_range(). |
402 | */ | 426 | */ |
403 | static inline int track_pfn_vma_copy(struct vm_area_struct *vma) | 427 | static inline int track_pfn_copy(struct vm_area_struct *vma) |
404 | { | 428 | { |
405 | return 0; | 429 | return 0; |
406 | } | 430 | } |
407 | 431 | ||
408 | /* | 432 | /* |
409 | * Interface that can be used by architecture code to keep track of | ||
410 | * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn) | ||
411 | * | ||
412 | * untrack_pfn_vma is called while unmapping a pfnmap for a region. | 433 | * untrack_pfn_vma is called while unmapping a pfnmap for a region. |
413 | * untrack can be called for a specific region indicated by pfn and size or | 434 | * untrack can be called for a specific region indicated by pfn and size or |
414 | * can be for the entire vma (in which case size can be zero). | 435 | * can be for the entire vma (in which case pfn, size are zero). |
415 | */ | 436 | */ |
416 | static inline void untrack_pfn_vma(struct vm_area_struct *vma, | 437 | static inline void untrack_pfn(struct vm_area_struct *vma, |
417 | unsigned long pfn, unsigned long size) | 438 | unsigned long pfn, unsigned long size) |
418 | { | 439 | { |
419 | } | 440 | } |
420 | #else | 441 | #else |
421 | extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, | 442 | extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, |
422 | unsigned long pfn, unsigned long size); | 443 | unsigned long pfn, unsigned long addr, |
423 | extern int track_pfn_vma_copy(struct vm_area_struct *vma); | 444 | unsigned long size); |
424 | extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, | 445 | extern int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, |
425 | unsigned long size); | 446 | unsigned long pfn); |
447 | extern int track_pfn_copy(struct vm_area_struct *vma); | ||
448 | extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, | ||
449 | unsigned long size); | ||
426 | #endif | 450 | #endif |
427 | 451 | ||
428 | #ifdef CONFIG_MMU | 452 | #ifdef CONFIG_MMU |