summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-09-29 13:33:41 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-29 13:33:41 -0400
commita3c0e7b1fe1fc62bba5f591c4bc404eea96823b8 (patch)
tree494409b373e8d43e72c88d228e77f3d3aaf46a91
parent939ca9f1751d1d65424f80b9284b6c18e78c7f4e (diff)
parent4c806b897d6075bfa5067e524fb058c57ab64e7b (diff)
Merge tag 'libnvdimm-fixes-5.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
More libnvdimm updates from Dan Williams: - Complete the reworks to interoperate with powerpc dynamic huge page sizes - Fix a crash due to missed accounting for the powerpc 'struct page'-memmap mapping granularity - Fix badblock initialization for volatile (DRAM emulated) pmem ranges - Stop triggering request_key() notifications to userspace when NVDIMM-security is disabled / not present - Miscellaneous small fixups * tag 'libnvdimm-fixes-5.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: libnvdimm/region: Enable MAP_SYNC for volatile regions libnvdimm: prevent nvdimm from requesting key when security is disabled libnvdimm/region: Initialize bad block for volatile namespaces libnvdimm/nfit_test: Fix acpi_handle redefinition libnvdimm/altmap: Track namespace boundaries in altmap libnvdimm: Fix endian conversion issues  libnvdimm/dax: Pick the right alignment default when creating dax devices powerpc/book3s64: Export has_transparent_hugepage() related functions.
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h8
-rw-r--r--arch/powerpc/mm/book3s64/hash_pgtable.c2
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c7
-rw-r--r--arch/powerpc/mm/init_64.c17
-rw-r--r--drivers/nvdimm/btt.c8
-rw-r--r--drivers/nvdimm/bus.c2
-rw-r--r--drivers/nvdimm/namespace_devs.c7
-rw-r--r--drivers/nvdimm/nd.h6
-rw-r--r--drivers/nvdimm/pfn_devs.c77
-rw-r--r--drivers/nvdimm/region.c4
-rw-r--r--drivers/nvdimm/region_devs.c7
-rw-r--r--drivers/nvdimm/security.c4
-rw-r--r--include/linux/huge_mm.h7
-rw-r--r--include/linux/memremap.h1
-rw-r--r--tools/testing/nvdimm/test/nfit_test.h4
15 files changed, 110 insertions, 51 deletions
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index 574eca33f893..d97db3ad9aae 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -254,7 +254,13 @@ extern void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
254extern pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); 254extern pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
255extern pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm, 255extern pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
256 unsigned long addr, pmd_t *pmdp); 256 unsigned long addr, pmd_t *pmdp);
257extern int radix__has_transparent_hugepage(void); 257static inline int radix__has_transparent_hugepage(void)
258{
259 /* For radix 2M at PMD level means thp */
260 if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT)
261 return 1;
262 return 0;
263}
258#endif 264#endif
259 265
260extern int __meminit radix__vmemmap_create_mapping(unsigned long start, 266extern int __meminit radix__vmemmap_create_mapping(unsigned long start,
diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c
index d1f390ac9cdb..64733b9cb20a 100644
--- a/arch/powerpc/mm/book3s64/hash_pgtable.c
+++ b/arch/powerpc/mm/book3s64/hash_pgtable.c
@@ -406,6 +406,8 @@ int hash__has_transparent_hugepage(void)
406 406
407 return 1; 407 return 1;
408} 408}
409EXPORT_SYMBOL_GPL(hash__has_transparent_hugepage);
410
409#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 411#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
410 412
411#ifdef CONFIG_STRICT_KERNEL_RWX 413#ifdef CONFIG_STRICT_KERNEL_RWX
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 3a1fbf9cb8f8..6ee17d09649c 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -1027,13 +1027,6 @@ pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
1027 return old_pmd; 1027 return old_pmd;
1028} 1028}
1029 1029
1030int radix__has_transparent_hugepage(void)
1031{
1032 /* For radix 2M at PMD level means thp */
1033 if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT)
1034 return 1;
1035 return 0;
1036}
1037#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1030#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1038 1031
1039void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, 1032void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index a44f6281ca3a..4e08246acd79 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -172,6 +172,21 @@ static __meminit void vmemmap_list_populate(unsigned long phys,
172 vmemmap_list = vmem_back; 172 vmemmap_list = vmem_back;
173} 173}
174 174
175static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
176 unsigned long page_size)
177{
178 unsigned long nr_pfn = page_size / sizeof(struct page);
179 unsigned long start_pfn = page_to_pfn((struct page *)start);
180
181 if ((start_pfn + nr_pfn) > altmap->end_pfn)
182 return true;
183
184 if (start_pfn < altmap->base_pfn)
185 return true;
186
187 return false;
188}
189
175int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, 190int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
176 struct vmem_altmap *altmap) 191 struct vmem_altmap *altmap)
177{ 192{
@@ -194,7 +209,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
194 * fail due to alignment issues when using 16MB hugepages, so 209 * fail due to alignment issues when using 16MB hugepages, so
195 * fall back to system memory if the altmap allocation fail. 210 * fall back to system memory if the altmap allocation fail.
196 */ 211 */
197 if (altmap) { 212 if (altmap && !altmap_cross_boundary(altmap, start, page_size)) {
198 p = altmap_alloc_block_buf(page_size, altmap); 213 p = altmap_alloc_block_buf(page_size, altmap);
199 if (!p) 214 if (!p)
200 pr_debug("altmap block allocation failed, falling back to system memory"); 215 pr_debug("altmap block allocation failed, falling back to system memory");
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index a8d56887ec88..3e9f45aec8d1 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -392,9 +392,9 @@ static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
392 arena->freelist[lane].sub = 1 - arena->freelist[lane].sub; 392 arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
393 if (++(arena->freelist[lane].seq) == 4) 393 if (++(arena->freelist[lane].seq) == 4)
394 arena->freelist[lane].seq = 1; 394 arena->freelist[lane].seq = 1;
395 if (ent_e_flag(ent->old_map)) 395 if (ent_e_flag(le32_to_cpu(ent->old_map)))
396 arena->freelist[lane].has_err = 1; 396 arena->freelist[lane].has_err = 1;
397 arena->freelist[lane].block = le32_to_cpu(ent_lba(ent->old_map)); 397 arena->freelist[lane].block = ent_lba(le32_to_cpu(ent->old_map));
398 398
399 return ret; 399 return ret;
400} 400}
@@ -560,8 +560,8 @@ static int btt_freelist_init(struct arena_info *arena)
560 * FIXME: if error clearing fails during init, we want to make 560 * FIXME: if error clearing fails during init, we want to make
561 * the BTT read-only 561 * the BTT read-only
562 */ 562 */
563 if (ent_e_flag(log_new.old_map) && 563 if (ent_e_flag(le32_to_cpu(log_new.old_map)) &&
564 !ent_normal(log_new.old_map)) { 564 !ent_normal(le32_to_cpu(log_new.old_map))) {
565 arena->freelist[i].has_err = 1; 565 arena->freelist[i].has_err = 1;
566 ret = arena_clear_freelist_error(arena, i); 566 ret = arena_clear_freelist_error(arena, i);
567 if (ret) 567 if (ret)
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 75a58a6e9615..d47412dcdf38 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -180,7 +180,7 @@ static int nvdimm_clear_badblocks_region(struct device *dev, void *data)
180 sector_t sector; 180 sector_t sector;
181 181
182 /* make sure device is a region */ 182 /* make sure device is a region */
183 if (!is_nd_pmem(dev)) 183 if (!is_memory(dev))
184 return 0; 184 return 0;
185 185
186 nd_region = to_nd_region(dev); 186 nd_region = to_nd_region(dev);
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 43401325c874..cca0a3ba1d2c 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1987,7 +1987,7 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
1987 nd_mapping = &nd_region->mapping[i]; 1987 nd_mapping = &nd_region->mapping[i];
1988 label_ent = list_first_entry_or_null(&nd_mapping->labels, 1988 label_ent = list_first_entry_or_null(&nd_mapping->labels,
1989 typeof(*label_ent), list); 1989 typeof(*label_ent), list);
1990 label0 = label_ent ? label_ent->label : 0; 1990 label0 = label_ent ? label_ent->label : NULL;
1991 1991
1992 if (!label0) { 1992 if (!label0) {
1993 WARN_ON(1); 1993 WARN_ON(1);
@@ -2322,8 +2322,9 @@ static struct device **scan_labels(struct nd_region *nd_region)
2322 continue; 2322 continue;
2323 2323
2324 /* skip labels that describe extents outside of the region */ 2324 /* skip labels that describe extents outside of the region */
2325 if (nd_label->dpa < nd_mapping->start || nd_label->dpa > map_end) 2325 if (__le64_to_cpu(nd_label->dpa) < nd_mapping->start ||
2326 continue; 2326 __le64_to_cpu(nd_label->dpa) > map_end)
2327 continue;
2327 2328
2328 i = add_namespace_resource(nd_region, nd_label, devs, count); 2329 i = add_namespace_resource(nd_region, nd_label, devs, count);
2329 if (i < 0) 2330 if (i < 0)
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index e89af4b2d8e9..ee5c04070ef9 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -289,11 +289,7 @@ static inline struct device *nd_btt_create(struct nd_region *nd_region)
289struct nd_pfn *to_nd_pfn(struct device *dev); 289struct nd_pfn *to_nd_pfn(struct device *dev);
290#if IS_ENABLED(CONFIG_NVDIMM_PFN) 290#if IS_ENABLED(CONFIG_NVDIMM_PFN)
291 291
292#ifdef CONFIG_TRANSPARENT_HUGEPAGE 292#define MAX_NVDIMM_ALIGN 4
293#define PFN_DEFAULT_ALIGNMENT HPAGE_PMD_SIZE
294#else
295#define PFN_DEFAULT_ALIGNMENT PAGE_SIZE
296#endif
297 293
298int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns); 294int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns);
299bool is_nd_pfn(struct device *dev); 295bool is_nd_pfn(struct device *dev);
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index bb9cc5cf0873..60d81fae06ee 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -103,39 +103,42 @@ static ssize_t align_show(struct device *dev,
103 return sprintf(buf, "%ld\n", nd_pfn->align); 103 return sprintf(buf, "%ld\n", nd_pfn->align);
104} 104}
105 105
106static const unsigned long *nd_pfn_supported_alignments(void) 106static unsigned long *nd_pfn_supported_alignments(unsigned long *alignments)
107{ 107{
108 /*
109 * This needs to be a non-static variable because the *_SIZE
110 * macros aren't always constants.
111 */
112 const unsigned long supported_alignments[] = {
113 PAGE_SIZE,
114#ifdef CONFIG_TRANSPARENT_HUGEPAGE
115 HPAGE_PMD_SIZE,
116#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
117 HPAGE_PUD_SIZE,
118#endif
119#endif
120 0,
121 };
122 static unsigned long data[ARRAY_SIZE(supported_alignments)];
123 108
124 memcpy(data, supported_alignments, sizeof(data)); 109 alignments[0] = PAGE_SIZE;
110
111 if (has_transparent_hugepage()) {
112 alignments[1] = HPAGE_PMD_SIZE;
113 if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
114 alignments[2] = HPAGE_PUD_SIZE;
115 }
116
117 return alignments;
118}
119
120/*
121 * Use pmd mapping if supported as default alignment
122 */
123static unsigned long nd_pfn_default_alignment(void)
124{
125 125
126 return data; 126 if (has_transparent_hugepage())
127 return HPAGE_PMD_SIZE;
128 return PAGE_SIZE;
127} 129}
128 130
129static ssize_t align_store(struct device *dev, 131static ssize_t align_store(struct device *dev,
130 struct device_attribute *attr, const char *buf, size_t len) 132 struct device_attribute *attr, const char *buf, size_t len)
131{ 133{
132 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); 134 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
135 unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, };
133 ssize_t rc; 136 ssize_t rc;
134 137
135 nd_device_lock(dev); 138 nd_device_lock(dev);
136 nvdimm_bus_lock(dev); 139 nvdimm_bus_lock(dev);
137 rc = nd_size_select_store(dev, buf, &nd_pfn->align, 140 rc = nd_size_select_store(dev, buf, &nd_pfn->align,
138 nd_pfn_supported_alignments()); 141 nd_pfn_supported_alignments(aligns));
139 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, 142 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
140 buf[len - 1] == '\n' ? "" : "\n"); 143 buf[len - 1] == '\n' ? "" : "\n");
141 nvdimm_bus_unlock(dev); 144 nvdimm_bus_unlock(dev);
@@ -259,7 +262,10 @@ static DEVICE_ATTR_RO(size);
259static ssize_t supported_alignments_show(struct device *dev, 262static ssize_t supported_alignments_show(struct device *dev,
260 struct device_attribute *attr, char *buf) 263 struct device_attribute *attr, char *buf)
261{ 264{
262 return nd_size_select_show(0, nd_pfn_supported_alignments(), buf); 265 unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, };
266
267 return nd_size_select_show(0,
268 nd_pfn_supported_alignments(aligns), buf);
263} 269}
264static DEVICE_ATTR_RO(supported_alignments); 270static DEVICE_ATTR_RO(supported_alignments);
265 271
@@ -302,7 +308,7 @@ struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
302 return NULL; 308 return NULL;
303 309
304 nd_pfn->mode = PFN_MODE_NONE; 310 nd_pfn->mode = PFN_MODE_NONE;
305 nd_pfn->align = PFN_DEFAULT_ALIGNMENT; 311 nd_pfn->align = nd_pfn_default_alignment();
306 dev = &nd_pfn->dev; 312 dev = &nd_pfn->dev;
307 device_initialize(&nd_pfn->dev); 313 device_initialize(&nd_pfn->dev);
308 if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) { 314 if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) {
@@ -412,6 +418,21 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
412 return 0; 418 return 0;
413} 419}
414 420
421static bool nd_supported_alignment(unsigned long align)
422{
423 int i;
424 unsigned long supported[MAX_NVDIMM_ALIGN] = { [0] = 0, };
425
426 if (align == 0)
427 return false;
428
429 nd_pfn_supported_alignments(supported);
430 for (i = 0; supported[i]; i++)
431 if (align == supported[i])
432 return true;
433 return false;
434}
435
415/** 436/**
416 * nd_pfn_validate - read and validate info-block 437 * nd_pfn_validate - read and validate info-block
417 * @nd_pfn: fsdax namespace runtime state / properties 438 * @nd_pfn: fsdax namespace runtime state / properties
@@ -496,6 +517,18 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
496 return -EOPNOTSUPP; 517 return -EOPNOTSUPP;
497 } 518 }
498 519
520 /*
521 * Check whether the we support the alignment. For Dax if the
522 * superblock alignment is not matching, we won't initialize
523 * the device.
524 */
525 if (!nd_supported_alignment(align) &&
526 !memcmp(pfn_sb->signature, DAX_SIG, PFN_SIG_LEN)) {
527 dev_err(&nd_pfn->dev, "init failed, alignment mismatch: "
528 "%ld:%ld\n", nd_pfn->align, align);
529 return -EOPNOTSUPP;
530 }
531
499 if (!nd_pfn->uuid) { 532 if (!nd_pfn->uuid) {
500 /* 533 /*
501 * When probing a namepace via nd_pfn_probe() the uuid 534 * When probing a namepace via nd_pfn_probe() the uuid
@@ -639,9 +672,11 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
639 struct nd_namespace_common *ndns = nd_pfn->ndns; 672 struct nd_namespace_common *ndns = nd_pfn->ndns;
640 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); 673 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
641 resource_size_t base = nsio->res.start + start_pad; 674 resource_size_t base = nsio->res.start + start_pad;
675 resource_size_t end = nsio->res.end - end_trunc;
642 struct vmem_altmap __altmap = { 676 struct vmem_altmap __altmap = {
643 .base_pfn = init_altmap_base(base), 677 .base_pfn = init_altmap_base(base),
644 .reserve = init_altmap_reserve(base), 678 .reserve = init_altmap_reserve(base),
679 .end_pfn = PHYS_PFN(end),
645 }; 680 };
646 681
647 memcpy(res, &nsio->res, sizeof(*res)); 682 memcpy(res, &nsio->res, sizeof(*res));
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
index 37bf8719a2a4..0f6978e72e7c 100644
--- a/drivers/nvdimm/region.c
+++ b/drivers/nvdimm/region.c
@@ -34,7 +34,7 @@ static int nd_region_probe(struct device *dev)
34 if (rc) 34 if (rc)
35 return rc; 35 return rc;
36 36
37 if (is_nd_pmem(&nd_region->dev)) { 37 if (is_memory(&nd_region->dev)) {
38 struct resource ndr_res; 38 struct resource ndr_res;
39 39
40 if (devm_init_badblocks(dev, &nd_region->bb)) 40 if (devm_init_badblocks(dev, &nd_region->bb))
@@ -123,7 +123,7 @@ static void nd_region_notify(struct device *dev, enum nvdimm_event event)
123 struct nd_region *nd_region = to_nd_region(dev); 123 struct nd_region *nd_region = to_nd_region(dev);
124 struct resource res; 124 struct resource res;
125 125
126 if (is_nd_pmem(&nd_region->dev)) { 126 if (is_memory(&nd_region->dev)) {
127 res.start = nd_region->ndr_start; 127 res.start = nd_region->ndr_start;
128 res.end = nd_region->ndr_start + 128 res.end = nd_region->ndr_start +
129 nd_region->ndr_size - 1; 129 nd_region->ndr_size - 1;
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 3fd6b59abd33..ef423ba1a711 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -632,11 +632,11 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
632 if (!is_memory(dev) && a == &dev_attr_dax_seed.attr) 632 if (!is_memory(dev) && a == &dev_attr_dax_seed.attr)
633 return 0; 633 return 0;
634 634
635 if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr) 635 if (!is_memory(dev) && a == &dev_attr_badblocks.attr)
636 return 0; 636 return 0;
637 637
638 if (a == &dev_attr_resource.attr) { 638 if (a == &dev_attr_resource.attr) {
639 if (is_nd_pmem(dev)) 639 if (is_memory(dev))
640 return 0400; 640 return 0400;
641 else 641 else
642 return 0; 642 return 0;
@@ -1168,6 +1168,9 @@ EXPORT_SYMBOL_GPL(nvdimm_has_cache);
1168 1168
1169bool is_nvdimm_sync(struct nd_region *nd_region) 1169bool is_nvdimm_sync(struct nd_region *nd_region)
1170{ 1170{
1171 if (is_nd_volatile(&nd_region->dev))
1172 return true;
1173
1171 return is_nd_pmem(&nd_region->dev) && 1174 return is_nd_pmem(&nd_region->dev) &&
1172 !test_bit(ND_REGION_ASYNC, &nd_region->flags); 1175 !test_bit(ND_REGION_ASYNC, &nd_region->flags);
1173} 1176}
diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
index 9e45b207ff01..89b85970912d 100644
--- a/drivers/nvdimm/security.c
+++ b/drivers/nvdimm/security.c
@@ -177,6 +177,10 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
177 || !nvdimm->sec.flags) 177 || !nvdimm->sec.flags)
178 return -EIO; 178 return -EIO;
179 179
180 /* No need to go further if security is disabled */
181 if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
182 return 0;
183
180 if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) { 184 if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
181 dev_dbg(dev, "Security operation in progress.\n"); 185 dev_dbg(dev, "Security operation in progress.\n");
182 return -EBUSY; 186 return -EBUSY;
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 61c9ffd89b05..93d5cf0bc716 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -108,7 +108,12 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
108 108
109 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG)) 109 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
110 return true; 110 return true;
111 111 /*
112 * For dax vmas, try to always use hugepage mappings. If the kernel does
113 * not support hugepages, fsdax mappings will fallback to PAGE_SIZE
114 * mappings, and device-dax namespaces, that try to guarantee a given
115 * mapping size, will fail to enable
116 */
112 if (vma_is_dax(vma)) 117 if (vma_is_dax(vma))
113 return true; 118 return true;
114 119
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index bef51e35d8d2..6fefb09af7c3 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -17,6 +17,7 @@ struct device;
17 */ 17 */
18struct vmem_altmap { 18struct vmem_altmap {
19 const unsigned long base_pfn; 19 const unsigned long base_pfn;
20 const unsigned long end_pfn;
20 const unsigned long reserve; 21 const unsigned long reserve;
21 unsigned long free; 22 unsigned long free;
22 unsigned long align; 23 unsigned long align;
diff --git a/tools/testing/nvdimm/test/nfit_test.h b/tools/testing/nvdimm/test/nfit_test.h
index 448d686da8b1..0bf5640f1f07 100644
--- a/tools/testing/nvdimm/test/nfit_test.h
+++ b/tools/testing/nvdimm/test/nfit_test.h
@@ -4,6 +4,7 @@
4 */ 4 */
5#ifndef __NFIT_TEST_H__ 5#ifndef __NFIT_TEST_H__
6#define __NFIT_TEST_H__ 6#define __NFIT_TEST_H__
7#include <linux/acpi.h>
7#include <linux/list.h> 8#include <linux/list.h>
8#include <linux/uuid.h> 9#include <linux/uuid.h>
9#include <linux/ioport.h> 10#include <linux/ioport.h>
@@ -202,9 +203,6 @@ struct nd_intel_lss {
202 __u32 status; 203 __u32 status;
203} __packed; 204} __packed;
204 205
205union acpi_object;
206typedef void *acpi_handle;
207
208typedef struct nfit_test_resource *(*nfit_test_lookup_fn)(resource_size_t); 206typedef struct nfit_test_resource *(*nfit_test_lookup_fn)(resource_size_t);
209typedef union acpi_object *(*nfit_test_evaluate_dsm_fn)(acpi_handle handle, 207typedef union acpi_object *(*nfit_test_evaluate_dsm_fn)(acpi_handle handle,
210 const guid_t *guid, u64 rev, u64 func, 208 const guid_t *guid, u64 rev, u64 func,