aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/hugetlbpage.c21
-rw-r--r--arch/x86/mm/pageattr.c10
-rw-r--r--arch/x86/mm/srat.c15
3 files changed, 28 insertions, 18 deletions
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index f6679a7fb8ca..b91e48512425 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -56,9 +56,16 @@ static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
56} 56}
57 57
58/* 58/*
59 * search for a shareable pmd page for hugetlb. 59 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
60 * and returns the corresponding pte. While this is not necessary for the
61 * !shared pmd case because we can allocate the pmd later as well, it makes the
62 * code much cleaner. pmd allocation is essential for the shared case because
63 * pud has to be populated inside the same i_mmap_mutex section - otherwise
64 * racing tasks could either miss the sharing (see huge_pte_offset) or select a
65 * bad pmd for sharing.
60 */ 66 */
61static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) 67static pte_t *
68huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
62{ 69{
63 struct vm_area_struct *vma = find_vma(mm, addr); 70 struct vm_area_struct *vma = find_vma(mm, addr);
64 struct address_space *mapping = vma->vm_file->f_mapping; 71 struct address_space *mapping = vma->vm_file->f_mapping;
@@ -68,9 +75,10 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
68 struct vm_area_struct *svma; 75 struct vm_area_struct *svma;
69 unsigned long saddr; 76 unsigned long saddr;
70 pte_t *spte = NULL; 77 pte_t *spte = NULL;
78 pte_t *pte;
71 79
72 if (!vma_shareable(vma, addr)) 80 if (!vma_shareable(vma, addr))
73 return; 81 return (pte_t *)pmd_alloc(mm, pud, addr);
74 82
75 mutex_lock(&mapping->i_mmap_mutex); 83 mutex_lock(&mapping->i_mmap_mutex);
76 vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) { 84 vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) {
@@ -97,7 +105,9 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
97 put_page(virt_to_page(spte)); 105 put_page(virt_to_page(spte));
98 spin_unlock(&mm->page_table_lock); 106 spin_unlock(&mm->page_table_lock);
99out: 107out:
108 pte = (pte_t *)pmd_alloc(mm, pud, addr);
100 mutex_unlock(&mapping->i_mmap_mutex); 109 mutex_unlock(&mapping->i_mmap_mutex);
110 return pte;
101} 111}
102 112
103/* 113/*
@@ -142,8 +152,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
142 } else { 152 } else {
143 BUG_ON(sz != PMD_SIZE); 153 BUG_ON(sz != PMD_SIZE);
144 if (pud_none(*pud)) 154 if (pud_none(*pud))
145 huge_pmd_share(mm, addr, pud); 155 pte = huge_pmd_share(mm, addr, pud);
146 pte = (pte_t *) pmd_alloc(mm, pud, addr); 156 else
157 pte = (pte_t *)pmd_alloc(mm, pud, addr);
147 } 158 }
148 } 159 }
149 BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); 160 BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 931930a96160..a718e0d23503 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -919,13 +919,11 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
919 919
920 /* 920 /*
921 * On success we use clflush, when the CPU supports it to 921 * On success we use clflush, when the CPU supports it to
922 * avoid the wbindv. If the CPU does not support it, in the 922 * avoid the wbindv. If the CPU does not support it and in the
923 * error case, and during early boot (for EFI) we fall back 923 * error case we fall back to cpa_flush_all (which uses
924 * to cpa_flush_all (which uses wbinvd): 924 * wbindv):
925 */ 925 */
926 if (early_boot_irqs_disabled) 926 if (!ret && cpu_has_clflush) {
927 __cpa_flush_all((void *)(long)cache);
928 else if (!ret && cpu_has_clflush) {
929 if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) { 927 if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
930 cpa_flush_array(addr, numpages, cache, 928 cpa_flush_array(addr, numpages, cache,
931 cpa.flags, pages); 929 cpa.flags, pages);
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index 4599c3e8bcb6..4ddf497ca65b 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -142,23 +142,23 @@ static inline int save_add_info(void) {return 0;}
142#endif 142#endif
143 143
144/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ 144/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
145void __init 145int __init
146acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) 146acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
147{ 147{
148 u64 start, end; 148 u64 start, end;
149 int node, pxm; 149 int node, pxm;
150 150
151 if (srat_disabled()) 151 if (srat_disabled())
152 return; 152 return -1;
153 if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) { 153 if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) {
154 bad_srat(); 154 bad_srat();
155 return; 155 return -1;
156 } 156 }
157 if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0) 157 if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
158 return; 158 return -1;
159 159
160 if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info()) 160 if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
161 return; 161 return -1;
162 start = ma->base_address; 162 start = ma->base_address;
163 end = start + ma->length; 163 end = start + ma->length;
164 pxm = ma->proximity_domain; 164 pxm = ma->proximity_domain;
@@ -168,12 +168,12 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
168 if (node < 0) { 168 if (node < 0) {
169 printk(KERN_ERR "SRAT: Too many proximity domains.\n"); 169 printk(KERN_ERR "SRAT: Too many proximity domains.\n");
170 bad_srat(); 170 bad_srat();
171 return; 171 return -1;
172 } 172 }
173 173
174 if (numa_add_memblk(node, start, end) < 0) { 174 if (numa_add_memblk(node, start, end) < 0) {
175 bad_srat(); 175 bad_srat();
176 return; 176 return -1;
177 } 177 }
178 178
179 node_set(node, numa_nodes_parsed); 179 node_set(node, numa_nodes_parsed);
@@ -181,6 +181,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
181 printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n", 181 printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n",
182 node, pxm, 182 node, pxm,
183 (unsigned long long) start, (unsigned long long) end - 1); 183 (unsigned long long) start, (unsigned long long) end - 1);
184 return 0;
184} 185}
185 186
186void __init acpi_numa_arch_fixup(void) {} 187void __init acpi_numa_arch_fixup(void) {}