diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-08 17:35:59 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-08 17:35:59 -0400 |
commit | 12f03ee606914317e7e6a0815e53a48205c31dae (patch) | |
tree | f8579bf77d29b3921e1877e0ae12ec65b5ebc738 | |
parent | d9241b22b58e012f26dd2244508d9f4837402af0 (diff) | |
parent | 004f1afbe199e6ab20805b95aefd83ccd24bc5c7 (diff) |
Merge tag 'libnvdimm-for-4.3' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull libnvdimm updates from Dan Williams:
"This update has successfully completed a 0day-kbuild run and has
appeared in a linux-next release. The changes outside of the typical
drivers/nvdimm/ and drivers/acpi/nfit.[ch] paths are related to the
removal of IORESOURCE_CACHEABLE, the introduction of memremap(), and
the introduction of ZONE_DEVICE + devm_memremap_pages().
Summary:
- Introduce ZONE_DEVICE and devm_memremap_pages() as a generic
mechanism for adding device-driver-discovered memory regions to the
kernel's direct map.
This facility is used by the pmem driver to enable pfn_to_page()
operations on the page frames returned by DAX ('direct_access' in
'struct block_device_operations').
For now, the 'memmap' allocation for these "device" pages comes
from "System RAM". Support for allocating the memmap from device
memory will arrive in a later kernel.
- Introduce memremap() to replace usages of ioremap_cache() and
ioremap_wt(). memremap() drops the __iomem annotation for these
mappings to memory that do not have i/o side effects. The
replacement of ioremap_cache() with memremap() is limited to the
pmem driver to ease merging the api change in v4.3.
Completion of the conversion is targeted for v4.4.
- Similar to the usage of memcpy_to_pmem() + wmb_pmem() in the pmem
driver, update the VFS DAX implementation and PMEM api to provide
persistence guarantees for kernel operations on a DAX mapping.
- Convert the ACPI NFIT 'BLK' driver to map the block apertures as
cacheable to improve performance.
- Miscellaneous updates and fixes to libnvdimm including support for
issuing "address range scrub" commands, clarifying the optimal
'sector size' of pmem devices, a clarification of the usage of the
ACPI '_STA' (status) property for DIMM devices, and other minor
fixes"
* tag 'libnvdimm-for-4.3' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: (34 commits)
libnvdimm, pmem: direct map legacy pmem by default
libnvdimm, pmem: 'struct page' for pmem
libnvdimm, pfn: 'struct page' provider infrastructure
x86, pmem: clarify that ARCH_HAS_PMEM_API implies PMEM mapped WB
add devm_memremap_pages
mm: ZONE_DEVICE for "device memory"
mm: move __phys_to_pfn and __pfn_to_phys to asm/generic/memory_model.h
dax: drop size parameter to ->direct_access()
nd_blk: change aperture mapping from WC to WB
nvdimm: change to use generic kvfree()
pmem, dax: have direct_access use __pmem annotation
dax: update I/O path to do proper PMEM flushing
pmem: add copy_from_iter_pmem() and clear_pmem()
pmem, x86: clean up conditional pmem includes
pmem: remove layer when calling arch_has_wmb_pmem()
pmem, x86: move x86 PMEM API to new pmem.h header
libnvdimm, e820: make CONFIG_X86_PMEM_LEGACY a tristate option
pmem: switch to devm_ allocations
devres: add devm_memremap
libnvdimm, btt: write and validate parent_uuid
...
92 files changed, 2142 insertions, 745 deletions
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index 6a34a0f4d37c..06d443450f21 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking | |||
@@ -397,7 +397,8 @@ prototypes: | |||
397 | int (*release) (struct gendisk *, fmode_t); | 397 | int (*release) (struct gendisk *, fmode_t); |
398 | int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | 398 | int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); |
399 | int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | 399 | int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); |
400 | int (*direct_access) (struct block_device *, sector_t, void **, unsigned long *); | 400 | int (*direct_access) (struct block_device *, sector_t, void __pmem **, |
401 | unsigned long *); | ||
401 | int (*media_changed) (struct gendisk *); | 402 | int (*media_changed) (struct gendisk *); |
402 | void (*unlock_native_capacity) (struct gendisk *); | 403 | void (*unlock_native_capacity) (struct gendisk *); |
403 | int (*revalidate_disk) (struct gendisk *); | 404 | int (*revalidate_disk) (struct gendisk *); |
diff --git a/MAINTAINERS b/MAINTAINERS index 6dfc2242715d..82778382bef6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -6229,6 +6229,7 @@ Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ | |||
6229 | S: Supported | 6229 | S: Supported |
6230 | F: drivers/nvdimm/pmem.c | 6230 | F: drivers/nvdimm/pmem.c |
6231 | F: include/linux/pmem.h | 6231 | F: include/linux/pmem.h |
6232 | F: arch/*/include/asm/pmem.h | ||
6232 | 6233 | ||
6233 | LINUX FOR IBM pSERIES (RS/6000) | 6234 | LINUX FOR IBM pSERIES (RS/6000) |
6234 | M: Paul Mackerras <paulus@au.ibm.com> | 6235 | M: Paul Mackerras <paulus@au.ibm.com> |
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index b7f6fb462ea0..98d58bb04ac5 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h | |||
@@ -119,12 +119,6 @@ | |||
119 | #endif | 119 | #endif |
120 | 120 | ||
121 | /* | 121 | /* |
122 | * Convert a physical address to a Page Frame Number and back | ||
123 | */ | ||
124 | #define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) | ||
125 | #define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT) | ||
126 | |||
127 | /* | ||
128 | * Convert a page to/from a physical address | 122 | * Convert a page to/from a physical address |
129 | */ | 123 | */ |
130 | #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) | 124 | #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) |
diff --git a/arch/arm/mach-clps711x/board-cdb89712.c b/arch/arm/mach-clps711x/board-cdb89712.c index 1ec378c334e5..972abdb10028 100644 --- a/arch/arm/mach-clps711x/board-cdb89712.c +++ b/arch/arm/mach-clps711x/board-cdb89712.c | |||
@@ -95,7 +95,7 @@ static struct physmap_flash_data cdb89712_bootrom_pdata __initdata = { | |||
95 | 95 | ||
96 | static struct resource cdb89712_bootrom_resources[] __initdata = { | 96 | static struct resource cdb89712_bootrom_resources[] __initdata = { |
97 | DEFINE_RES_NAMED(CS7_PHYS_BASE, SZ_128, "BOOTROM", IORESOURCE_MEM | | 97 | DEFINE_RES_NAMED(CS7_PHYS_BASE, SZ_128, "BOOTROM", IORESOURCE_MEM | |
98 | IORESOURCE_CACHEABLE | IORESOURCE_READONLY), | 98 | IORESOURCE_READONLY), |
99 | }; | 99 | }; |
100 | 100 | ||
101 | static struct platform_device cdb89712_bootrom_pdev __initdata = { | 101 | static struct platform_device cdb89712_bootrom_pdev __initdata = { |
diff --git a/arch/arm/mach-shmobile/pm-rcar.c b/arch/arm/mach-shmobile/pm-rcar.c index 4092ad16e0a4..0af05d288b09 100644 --- a/arch/arm/mach-shmobile/pm-rcar.c +++ b/arch/arm/mach-shmobile/pm-rcar.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <linux/err.h> | 12 | #include <linux/err.h> |
13 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
14 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
15 | #include <asm/io.h> | 15 | #include <linux/io.h> |
16 | #include "pm-rcar.h" | 16 | #include "pm-rcar.h" |
17 | 17 | ||
18 | /* SYSC Common */ | 18 | /* SYSC Common */ |
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 44a59c20e773..6b4c3ad75a2a 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h | |||
@@ -81,12 +81,6 @@ | |||
81 | #define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET + PAGE_OFFSET)) | 81 | #define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET + PAGE_OFFSET)) |
82 | 82 | ||
83 | /* | 83 | /* |
84 | * Convert a physical address to a Page Frame Number and back | ||
85 | */ | ||
86 | #define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) | ||
87 | #define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT) | ||
88 | |||
89 | /* | ||
90 | * Convert a page to/from a physical address | 84 | * Convert a page to/from a physical address |
91 | */ | 85 | */ |
92 | #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) | 86 | #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) |
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h index 80a7e34be009..9041bbe2b7b4 100644 --- a/arch/ia64/include/asm/io.h +++ b/arch/ia64/include/asm/io.h | |||
@@ -435,6 +435,7 @@ static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned lo | |||
435 | { | 435 | { |
436 | return ioremap(phys_addr, size); | 436 | return ioremap(phys_addr, size); |
437 | } | 437 | } |
438 | #define ioremap_cache ioremap_cache | ||
438 | 439 | ||
439 | 440 | ||
440 | /* | 441 | /* |
diff --git a/arch/ia64/kernel/cyclone.c b/arch/ia64/kernel/cyclone.c index 4826ff957a3d..5fa3848ba224 100644 --- a/arch/ia64/kernel/cyclone.c +++ b/arch/ia64/kernel/cyclone.c | |||
@@ -4,7 +4,7 @@ | |||
4 | #include <linux/errno.h> | 4 | #include <linux/errno.h> |
5 | #include <linux/timex.h> | 5 | #include <linux/timex.h> |
6 | #include <linux/clocksource.h> | 6 | #include <linux/clocksource.h> |
7 | #include <asm/io.h> | 7 | #include <linux/io.h> |
8 | 8 | ||
9 | /* IBM Summit (EXA) Cyclone counter code*/ | 9 | /* IBM Summit (EXA) Cyclone counter code*/ |
10 | #define CYCLONE_CBAR_ADDR 0xFEB00CD0 | 10 | #define CYCLONE_CBAR_ADDR 0xFEB00CD0 |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 97e48b0eefc7..1841ef69183d 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -645,7 +645,7 @@ mem_init (void) | |||
645 | } | 645 | } |
646 | 646 | ||
647 | #ifdef CONFIG_MEMORY_HOTPLUG | 647 | #ifdef CONFIG_MEMORY_HOTPLUG |
648 | int arch_add_memory(int nid, u64 start, u64 size) | 648 | int arch_add_memory(int nid, u64 start, u64 size, bool for_device) |
649 | { | 649 | { |
650 | pg_data_t *pgdat; | 650 | pg_data_t *pgdat; |
651 | struct zone *zone; | 651 | struct zone *zone; |
@@ -656,7 +656,7 @@ int arch_add_memory(int nid, u64 start, u64 size) | |||
656 | pgdat = NODE_DATA(nid); | 656 | pgdat = NODE_DATA(nid); |
657 | 657 | ||
658 | zone = pgdat->node_zones + | 658 | zone = pgdat->node_zones + |
659 | zone_for_memory(nid, start, size, ZONE_NORMAL); | 659 | zone_for_memory(nid, start, size, ZONE_NORMAL, for_device); |
660 | ret = __add_pages(nid, zone, start_pfn, nr_pages); | 660 | ret = __add_pages(nid, zone, start_pfn, nr_pages); |
661 | 661 | ||
662 | if (ret) | 662 | if (ret) |
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index c8c62c7fc31c..2e710c15893f 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c | |||
@@ -102,7 +102,7 @@ static void of_pci_parse_addrs(struct device_node *node, struct pci_dev *dev) | |||
102 | res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; | 102 | res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; |
103 | } else if (i == dev->rom_base_reg) { | 103 | } else if (i == dev->rom_base_reg) { |
104 | res = &dev->resource[PCI_ROM_RESOURCE]; | 104 | res = &dev->resource[PCI_ROM_RESOURCE]; |
105 | flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE; | 105 | flags |= IORESOURCE_READONLY; |
106 | } else { | 106 | } else { |
107 | printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i); | 107 | printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i); |
108 | continue; | 108 | continue; |
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index e1fe333da946..22d94c3e6fc4 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
@@ -113,7 +113,7 @@ int memory_add_physaddr_to_nid(u64 start) | |||
113 | } | 113 | } |
114 | #endif | 114 | #endif |
115 | 115 | ||
116 | int arch_add_memory(int nid, u64 start, u64 size) | 116 | int arch_add_memory(int nid, u64 start, u64 size, bool for_device) |
117 | { | 117 | { |
118 | struct pglist_data *pgdata; | 118 | struct pglist_data *pgdata; |
119 | struct zone *zone; | 119 | struct zone *zone; |
@@ -128,7 +128,7 @@ int arch_add_memory(int nid, u64 start, u64 size) | |||
128 | 128 | ||
129 | /* this should work for most non-highmem platforms */ | 129 | /* this should work for most non-highmem platforms */ |
130 | zone = pgdata->node_zones + | 130 | zone = pgdata->node_zones + |
131 | zone_for_memory(nid, start, size, 0); | 131 | zone_for_memory(nid, start, size, 0, for_device); |
132 | 132 | ||
133 | return __add_pages(nid, zone, start_pfn, nr_pages); | 133 | return __add_pages(nid, zone, start_pfn, nr_pages); |
134 | } | 134 | } |
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c index f86250c48b53..d2b79bc336c1 100644 --- a/arch/powerpc/sysdev/axonram.c +++ b/arch/powerpc/sysdev/axonram.c | |||
@@ -141,13 +141,14 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio) | |||
141 | */ | 141 | */ |
142 | static long | 142 | static long |
143 | axon_ram_direct_access(struct block_device *device, sector_t sector, | 143 | axon_ram_direct_access(struct block_device *device, sector_t sector, |
144 | void **kaddr, unsigned long *pfn, long size) | 144 | void __pmem **kaddr, unsigned long *pfn) |
145 | { | 145 | { |
146 | struct axon_ram_bank *bank = device->bd_disk->private_data; | 146 | struct axon_ram_bank *bank = device->bd_disk->private_data; |
147 | loff_t offset = (loff_t)sector << AXON_RAM_SECTOR_SHIFT; | 147 | loff_t offset = (loff_t)sector << AXON_RAM_SECTOR_SHIFT; |
148 | void *addr = (void *)(bank->ph_addr + offset); | ||
148 | 149 | ||
149 | *kaddr = (void *)(bank->ph_addr + offset); | 150 | *kaddr = (void __pmem *)addr; |
150 | *pfn = virt_to_phys(*kaddr) >> PAGE_SHIFT; | 151 | *pfn = virt_to_phys(addr) >> PAGE_SHIFT; |
151 | 152 | ||
152 | return bank->size - offset; | 153 | return bank->size - offset; |
153 | } | 154 | } |
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 2963b563621c..c3c07d3505ba 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -169,7 +169,7 @@ void __init free_initrd_mem(unsigned long start, unsigned long end) | |||
169 | #endif | 169 | #endif |
170 | 170 | ||
171 | #ifdef CONFIG_MEMORY_HOTPLUG | 171 | #ifdef CONFIG_MEMORY_HOTPLUG |
172 | int arch_add_memory(int nid, u64 start, u64 size) | 172 | int arch_add_memory(int nid, u64 start, u64 size, bool for_device) |
173 | { | 173 | { |
174 | unsigned long normal_end_pfn = PFN_DOWN(memblock_end_of_DRAM()); | 174 | unsigned long normal_end_pfn = PFN_DOWN(memblock_end_of_DRAM()); |
175 | unsigned long dma_end_pfn = PFN_DOWN(MAX_DMA_ADDRESS); | 175 | unsigned long dma_end_pfn = PFN_DOWN(MAX_DMA_ADDRESS); |
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 93ec9066dbef..3280a6bfa503 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h | |||
@@ -342,6 +342,7 @@ ioremap_cache(phys_addr_t offset, unsigned long size) | |||
342 | { | 342 | { |
343 | return __ioremap_mode(offset, size, PAGE_KERNEL); | 343 | return __ioremap_mode(offset, size, PAGE_KERNEL); |
344 | } | 344 | } |
345 | #define ioremap_cache ioremap_cache | ||
345 | 346 | ||
346 | #ifdef CONFIG_HAVE_IOREMAP_PROT | 347 | #ifdef CONFIG_HAVE_IOREMAP_PROT |
347 | static inline void __iomem * | 348 | static inline void __iomem * |
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 17f486233db0..75491862d900 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c | |||
@@ -485,7 +485,7 @@ void free_initrd_mem(unsigned long start, unsigned long end) | |||
485 | #endif | 485 | #endif |
486 | 486 | ||
487 | #ifdef CONFIG_MEMORY_HOTPLUG | 487 | #ifdef CONFIG_MEMORY_HOTPLUG |
488 | int arch_add_memory(int nid, u64 start, u64 size) | 488 | int arch_add_memory(int nid, u64 start, u64 size, bool for_device) |
489 | { | 489 | { |
490 | pg_data_t *pgdat; | 490 | pg_data_t *pgdat; |
491 | unsigned long start_pfn = PFN_DOWN(start); | 491 | unsigned long start_pfn = PFN_DOWN(start); |
@@ -496,7 +496,8 @@ int arch_add_memory(int nid, u64 start, u64 size) | |||
496 | 496 | ||
497 | /* We only have ZONE_NORMAL, so this is easy.. */ | 497 | /* We only have ZONE_NORMAL, so this is easy.. */ |
498 | ret = __add_pages(nid, pgdat->node_zones + | 498 | ret = __add_pages(nid, pgdat->node_zones + |
499 | zone_for_memory(nid, start, size, ZONE_NORMAL), | 499 | zone_for_memory(nid, start, size, ZONE_NORMAL, |
500 | for_device), | ||
500 | start_pfn, nr_pages); | 501 | start_pfn, nr_pages); |
501 | if (unlikely(ret)) | 502 | if (unlikely(ret)) |
502 | printk("%s: Failed, __add_pages() == %d\n", __func__, ret); | 503 | printk("%s: Failed, __add_pages() == %d\n", __func__, ret); |
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 3a14a35592fe..b91d7f146175 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c | |||
@@ -231,8 +231,7 @@ static void pci_parse_of_addrs(struct platform_device *op, | |||
231 | res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; | 231 | res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; |
232 | } else if (i == dev->rom_base_reg) { | 232 | } else if (i == dev->rom_base_reg) { |
233 | res = &dev->resource[PCI_ROM_RESOURCE]; | 233 | res = &dev->resource[PCI_ROM_RESOURCE]; |
234 | flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE | 234 | flags |= IORESOURCE_READONLY | IORESOURCE_SIZEALIGN; |
235 | | IORESOURCE_SIZEALIGN; | ||
236 | } else { | 235 | } else { |
237 | printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i); | 236 | printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i); |
238 | continue; | 237 | continue; |
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c index 5bd252e3fdc5..d4e1fc41d06d 100644 --- a/arch/tile/mm/init.c +++ b/arch/tile/mm/init.c | |||
@@ -863,7 +863,7 @@ void __init mem_init(void) | |||
863 | * memory to the highmem for now. | 863 | * memory to the highmem for now. |
864 | */ | 864 | */ |
865 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 865 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
866 | int arch_add_memory(u64 start, u64 size) | 866 | int arch_add_memory(u64 start, u64 size, bool for_device) |
867 | { | 867 | { |
868 | struct pglist_data *pgdata = &contig_page_data; | 868 | struct pglist_data *pgdata = &contig_page_data; |
869 | struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1; | 869 | struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1; |
diff --git a/arch/unicore32/include/asm/memory.h b/arch/unicore32/include/asm/memory.h index debafc40200a..3bb0a29fd2d7 100644 --- a/arch/unicore32/include/asm/memory.h +++ b/arch/unicore32/include/asm/memory.h | |||
@@ -61,12 +61,6 @@ | |||
61 | #endif | 61 | #endif |
62 | 62 | ||
63 | /* | 63 | /* |
64 | * Convert a physical address to a Page Frame Number and back | ||
65 | */ | ||
66 | #define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT) | ||
67 | #define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) | ||
68 | |||
69 | /* | ||
70 | * Convert a page to/from a physical address | 64 | * Convert a page to/from a physical address |
71 | */ | 65 | */ |
72 | #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) | 66 | #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 117e2f373e50..cc0d73eac047 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -27,7 +27,8 @@ config X86 | |||
27 | select ARCH_HAS_ELF_RANDOMIZE | 27 | select ARCH_HAS_ELF_RANDOMIZE |
28 | select ARCH_HAS_FAST_MULTIPLIER | 28 | select ARCH_HAS_FAST_MULTIPLIER |
29 | select ARCH_HAS_GCOV_PROFILE_ALL | 29 | select ARCH_HAS_GCOV_PROFILE_ALL |
30 | select ARCH_HAS_PMEM_API | 30 | select ARCH_HAS_PMEM_API if X86_64 |
31 | select ARCH_HAS_MMIO_FLUSH | ||
31 | select ARCH_HAS_SG_CHAIN | 32 | select ARCH_HAS_SG_CHAIN |
32 | select ARCH_HAVE_NMI_SAFE_CMPXCHG | 33 | select ARCH_HAVE_NMI_SAFE_CMPXCHG |
33 | select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI | 34 | select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI |
@@ -1450,10 +1451,14 @@ config ILLEGAL_POINTER_VALUE | |||
1450 | 1451 | ||
1451 | source "mm/Kconfig" | 1452 | source "mm/Kconfig" |
1452 | 1453 | ||
1454 | config X86_PMEM_LEGACY_DEVICE | ||
1455 | bool | ||
1456 | |||
1453 | config X86_PMEM_LEGACY | 1457 | config X86_PMEM_LEGACY |
1454 | bool "Support non-standard NVDIMMs and ADR protected memory" | 1458 | tristate "Support non-standard NVDIMMs and ADR protected memory" |
1455 | depends on PHYS_ADDR_T_64BIT | 1459 | depends on PHYS_ADDR_T_64BIT |
1456 | depends on BLK_DEV | 1460 | depends on BLK_DEV |
1461 | select X86_PMEM_LEGACY_DEVICE | ||
1457 | select LIBNVDIMM | 1462 | select LIBNVDIMM |
1458 | help | 1463 | help |
1459 | Treat memory marked using the non-standard e820 type of 12 as used | 1464 | Treat memory marked using the non-standard e820 type of 12 as used |
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h index 9bf3ea14b9f0..e63aa38e85fb 100644 --- a/arch/x86/include/asm/cacheflush.h +++ b/arch/x86/include/asm/cacheflush.h | |||
@@ -89,6 +89,8 @@ int set_pages_rw(struct page *page, int numpages); | |||
89 | 89 | ||
90 | void clflush_cache_range(void *addr, unsigned int size); | 90 | void clflush_cache_range(void *addr, unsigned int size); |
91 | 91 | ||
92 | #define mmio_flush_range(addr, size) clflush_cache_range(addr, size) | ||
93 | |||
92 | #ifdef CONFIG_DEBUG_RODATA | 94 | #ifdef CONFIG_DEBUG_RODATA |
93 | void mark_rodata_ro(void); | 95 | void mark_rodata_ro(void); |
94 | extern const int rodata_test_data; | 96 | extern const int rodata_test_data; |
@@ -109,75 +111,4 @@ static inline int rodata_test(void) | |||
109 | } | 111 | } |
110 | #endif | 112 | #endif |
111 | 113 | ||
112 | #ifdef ARCH_HAS_NOCACHE_UACCESS | ||
113 | |||
114 | /** | ||
115 | * arch_memcpy_to_pmem - copy data to persistent memory | ||
116 | * @dst: destination buffer for the copy | ||
117 | * @src: source buffer for the copy | ||
118 | * @n: length of the copy in bytes | ||
119 | * | ||
120 | * Copy data to persistent memory media via non-temporal stores so that | ||
121 | * a subsequent arch_wmb_pmem() can flush cpu and memory controller | ||
122 | * write buffers to guarantee durability. | ||
123 | */ | ||
124 | static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, | ||
125 | size_t n) | ||
126 | { | ||
127 | int unwritten; | ||
128 | |||
129 | /* | ||
130 | * We are copying between two kernel buffers, if | ||
131 | * __copy_from_user_inatomic_nocache() returns an error (page | ||
132 | * fault) we would have already reported a general protection fault | ||
133 | * before the WARN+BUG. | ||
134 | */ | ||
135 | unwritten = __copy_from_user_inatomic_nocache((void __force *) dst, | ||
136 | (void __user *) src, n); | ||
137 | if (WARN(unwritten, "%s: fault copying %p <- %p unwritten: %d\n", | ||
138 | __func__, dst, src, unwritten)) | ||
139 | BUG(); | ||
140 | } | ||
141 | |||
142 | /** | ||
143 | * arch_wmb_pmem - synchronize writes to persistent memory | ||
144 | * | ||
145 | * After a series of arch_memcpy_to_pmem() operations this drains data | ||
146 | * from cpu write buffers and any platform (memory controller) buffers | ||
147 | * to ensure that written data is durable on persistent memory media. | ||
148 | */ | ||
149 | static inline void arch_wmb_pmem(void) | ||
150 | { | ||
151 | /* | ||
152 | * wmb() to 'sfence' all previous writes such that they are | ||
153 | * architecturally visible to 'pcommit'. Note, that we've | ||
154 | * already arranged for pmem writes to avoid the cache via | ||
155 | * arch_memcpy_to_pmem(). | ||
156 | */ | ||
157 | wmb(); | ||
158 | pcommit_sfence(); | ||
159 | } | ||
160 | |||
161 | static inline bool __arch_has_wmb_pmem(void) | ||
162 | { | ||
163 | #ifdef CONFIG_X86_64 | ||
164 | /* | ||
165 | * We require that wmb() be an 'sfence', that is only guaranteed on | ||
166 | * 64-bit builds | ||
167 | */ | ||
168 | return static_cpu_has(X86_FEATURE_PCOMMIT); | ||
169 | #else | ||
170 | return false; | ||
171 | #endif | ||
172 | } | ||
173 | #else /* ARCH_HAS_NOCACHE_UACCESS i.e. ARCH=um */ | ||
174 | extern void arch_memcpy_to_pmem(void __pmem *dst, const void *src, size_t n); | ||
175 | extern void arch_wmb_pmem(void); | ||
176 | |||
177 | static inline bool __arch_has_wmb_pmem(void) | ||
178 | { | ||
179 | return false; | ||
180 | } | ||
181 | #endif | ||
182 | |||
183 | #endif /* _ASM_X86_CACHEFLUSH_H */ | 114 | #endif /* _ASM_X86_CACHEFLUSH_H */ |
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 7cfc085b6879..de25aad07853 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h | |||
@@ -250,12 +250,6 @@ static inline void flush_write_buffers(void) | |||
250 | #endif | 250 | #endif |
251 | } | 251 | } |
252 | 252 | ||
253 | static inline void __pmem *arch_memremap_pmem(resource_size_t offset, | ||
254 | unsigned long size) | ||
255 | { | ||
256 | return (void __force __pmem *) ioremap_cache(offset, size); | ||
257 | } | ||
258 | |||
259 | #endif /* __KERNEL__ */ | 253 | #endif /* __KERNEL__ */ |
260 | 254 | ||
261 | extern void native_io_delay(void); | 255 | extern void native_io_delay(void); |
diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h new file mode 100644 index 000000000000..d8ce3ec816ab --- /dev/null +++ b/arch/x86/include/asm/pmem.h | |||
@@ -0,0 +1,153 @@ | |||
1 | /* | ||
2 | * Copyright(c) 2015 Intel Corporation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of version 2 of the GNU General Public License as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
11 | * General Public License for more details. | ||
12 | */ | ||
13 | #ifndef __ASM_X86_PMEM_H__ | ||
14 | #define __ASM_X86_PMEM_H__ | ||
15 | |||
16 | #include <linux/uaccess.h> | ||
17 | #include <asm/cacheflush.h> | ||
18 | #include <asm/cpufeature.h> | ||
19 | #include <asm/special_insns.h> | ||
20 | |||
21 | #ifdef CONFIG_ARCH_HAS_PMEM_API | ||
22 | /** | ||
23 | * arch_memcpy_to_pmem - copy data to persistent memory | ||
24 | * @dst: destination buffer for the copy | ||
25 | * @src: source buffer for the copy | ||
26 | * @n: length of the copy in bytes | ||
27 | * | ||
28 | * Copy data to persistent memory media via non-temporal stores so that | ||
29 | * a subsequent arch_wmb_pmem() can flush cpu and memory controller | ||
30 | * write buffers to guarantee durability. | ||
31 | */ | ||
32 | static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, | ||
33 | size_t n) | ||
34 | { | ||
35 | int unwritten; | ||
36 | |||
37 | /* | ||
38 | * We are copying between two kernel buffers, if | ||
39 | * __copy_from_user_inatomic_nocache() returns an error (page | ||
40 | * fault) we would have already reported a general protection fault | ||
41 | * before the WARN+BUG. | ||
42 | */ | ||
43 | unwritten = __copy_from_user_inatomic_nocache((void __force *) dst, | ||
44 | (void __user *) src, n); | ||
45 | if (WARN(unwritten, "%s: fault copying %p <- %p unwritten: %d\n", | ||
46 | __func__, dst, src, unwritten)) | ||
47 | BUG(); | ||
48 | } | ||
49 | |||
50 | /** | ||
51 | * arch_wmb_pmem - synchronize writes to persistent memory | ||
52 | * | ||
53 | * After a series of arch_memcpy_to_pmem() operations this drains data | ||
54 | * from cpu write buffers and any platform (memory controller) buffers | ||
55 | * to ensure that written data is durable on persistent memory media. | ||
56 | */ | ||
57 | static inline void arch_wmb_pmem(void) | ||
58 | { | ||
59 | /* | ||
60 | * wmb() to 'sfence' all previous writes such that they are | ||
61 | * architecturally visible to 'pcommit'. Note, that we've | ||
62 | * already arranged for pmem writes to avoid the cache via | ||
63 | * arch_memcpy_to_pmem(). | ||
64 | */ | ||
65 | wmb(); | ||
66 | pcommit_sfence(); | ||
67 | } | ||
68 | |||
69 | /** | ||
70 | * __arch_wb_cache_pmem - write back a cache range with CLWB | ||
71 | * @vaddr: virtual start address | ||
72 | * @size: number of bytes to write back | ||
73 | * | ||
74 | * Write back a cache range using the CLWB (cache line write back) | ||
75 | * instruction. This function requires explicit ordering with an | ||
76 | * arch_wmb_pmem() call. This API is internal to the x86 PMEM implementation. | ||
77 | */ | ||
78 | static inline void __arch_wb_cache_pmem(void *vaddr, size_t size) | ||
79 | { | ||
80 | u16 x86_clflush_size = boot_cpu_data.x86_clflush_size; | ||
81 | unsigned long clflush_mask = x86_clflush_size - 1; | ||
82 | void *vend = vaddr + size; | ||
83 | void *p; | ||
84 | |||
85 | for (p = (void *)((unsigned long)vaddr & ~clflush_mask); | ||
86 | p < vend; p += x86_clflush_size) | ||
87 | clwb(p); | ||
88 | } | ||
89 | |||
90 | /* | ||
91 | * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec | ||
92 | * iterators, so for other types (bvec & kvec) we must do a cache write-back. | ||
93 | */ | ||
94 | static inline bool __iter_needs_pmem_wb(struct iov_iter *i) | ||
95 | { | ||
96 | return iter_is_iovec(i) == false; | ||
97 | } | ||
98 | |||
99 | /** | ||
100 | * arch_copy_from_iter_pmem - copy data from an iterator to PMEM | ||
101 | * @addr: PMEM destination address | ||
102 | * @bytes: number of bytes to copy | ||
103 | * @i: iterator with source data | ||
104 | * | ||
105 | * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'. | ||
106 | * This function requires explicit ordering with an arch_wmb_pmem() call. | ||
107 | */ | ||
108 | static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes, | ||
109 | struct iov_iter *i) | ||
110 | { | ||
111 | void *vaddr = (void __force *)addr; | ||
112 | size_t len; | ||
113 | |||
114 | /* TODO: skip the write-back by always using non-temporal stores */ | ||
115 | len = copy_from_iter_nocache(vaddr, bytes, i); | ||
116 | |||
117 | if (__iter_needs_pmem_wb(i)) | ||
118 | __arch_wb_cache_pmem(vaddr, bytes); | ||
119 | |||
120 | return len; | ||
121 | } | ||
122 | |||
123 | /** | ||
124 | * arch_clear_pmem - zero a PMEM memory range | ||
125 | * @addr: virtual start address | ||
126 | * @size: number of bytes to zero | ||
127 | * | ||
128 | * Write zeros into the memory range starting at 'addr' for 'size' bytes. | ||
129 | * This function requires explicit ordering with an arch_wmb_pmem() call. | ||
130 | */ | ||
131 | static inline void arch_clear_pmem(void __pmem *addr, size_t size) | ||
132 | { | ||
133 | void *vaddr = (void __force *)addr; | ||
134 | |||
135 | /* TODO: implement the zeroing via non-temporal writes */ | ||
136 | if (size == PAGE_SIZE && ((unsigned long)vaddr & ~PAGE_MASK) == 0) | ||
137 | clear_page(vaddr); | ||
138 | else | ||
139 | memset(vaddr, 0, size); | ||
140 | |||
141 | __arch_wb_cache_pmem(vaddr, size); | ||
142 | } | ||
143 | |||
144 | static inline bool __arch_has_wmb_pmem(void) | ||
145 | { | ||
146 | /* | ||
147 | * We require that wmb() be an 'sfence', that is only guaranteed on | ||
148 | * 64-bit builds | ||
149 | */ | ||
150 | return static_cpu_has(X86_FEATURE_PCOMMIT); | ||
151 | } | ||
152 | #endif /* CONFIG_ARCH_HAS_PMEM_API */ | ||
153 | #endif /* __ASM_X86_PMEM_H__ */ | ||
diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h index 0f457e6eab18..9dafe59cf6e2 100644 --- a/arch/x86/include/uapi/asm/e820.h +++ b/arch/x86/include/uapi/asm/e820.h | |||
@@ -37,7 +37,7 @@ | |||
37 | /* | 37 | /* |
38 | * This is a non-standardized way to represent ADR or NVDIMM regions that | 38 | * This is a non-standardized way to represent ADR or NVDIMM regions that |
39 | * persist over a reboot. The kernel will ignore their special capabilities | 39 | * persist over a reboot. The kernel will ignore their special capabilities |
40 | * unless the CONFIG_X86_PMEM_LEGACY=y option is set. | 40 | * unless the CONFIG_X86_PMEM_LEGACY option is set. |
41 | * | 41 | * |
42 | * ( Note that older platforms also used 6 for the same type of memory, | 42 | * ( Note that older platforms also used 6 for the same type of memory, |
43 | * but newer versions switched to 12 as 6 was assigned differently. Some | 43 | * but newer versions switched to 12 as 6 was assigned differently. Some |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 3c3622176340..9ffdf25e5b86 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -94,7 +94,7 @@ obj-$(CONFIG_KVM_GUEST) += kvm.o kvmclock.o | |||
94 | obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o | 94 | obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o |
95 | obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= paravirt-spinlocks.o | 95 | obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= paravirt-spinlocks.o |
96 | obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o | 96 | obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o |
97 | obj-$(CONFIG_X86_PMEM_LEGACY) += pmem.o | 97 | obj-$(CONFIG_X86_PMEM_LEGACY_DEVICE) += pmem.o |
98 | 98 | ||
99 | obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o | 99 | obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o |
100 | 100 | ||
diff --git a/arch/x86/kernel/pmem.c b/arch/x86/kernel/pmem.c index 64f90f53bb85..4f00b63d7ff3 100644 --- a/arch/x86/kernel/pmem.c +++ b/arch/x86/kernel/pmem.c | |||
@@ -3,80 +3,17 @@ | |||
3 | * Copyright (c) 2015, Intel Corporation. | 3 | * Copyright (c) 2015, Intel Corporation. |
4 | */ | 4 | */ |
5 | #include <linux/platform_device.h> | 5 | #include <linux/platform_device.h> |
6 | #include <linux/libnvdimm.h> | ||
7 | #include <linux/module.h> | 6 | #include <linux/module.h> |
8 | #include <asm/e820.h> | ||
9 | |||
10 | static void e820_pmem_release(struct device *dev) | ||
11 | { | ||
12 | struct nvdimm_bus *nvdimm_bus = dev->platform_data; | ||
13 | |||
14 | if (nvdimm_bus) | ||
15 | nvdimm_bus_unregister(nvdimm_bus); | ||
16 | } | ||
17 | |||
18 | static struct platform_device e820_pmem = { | ||
19 | .name = "e820_pmem", | ||
20 | .id = -1, | ||
21 | .dev = { | ||
22 | .release = e820_pmem_release, | ||
23 | }, | ||
24 | }; | ||
25 | |||
26 | static const struct attribute_group *e820_pmem_attribute_groups[] = { | ||
27 | &nvdimm_bus_attribute_group, | ||
28 | NULL, | ||
29 | }; | ||
30 | |||
31 | static const struct attribute_group *e820_pmem_region_attribute_groups[] = { | ||
32 | &nd_region_attribute_group, | ||
33 | &nd_device_attribute_group, | ||
34 | NULL, | ||
35 | }; | ||
36 | 7 | ||
37 | static __init int register_e820_pmem(void) | 8 | static __init int register_e820_pmem(void) |
38 | { | 9 | { |
39 | static struct nvdimm_bus_descriptor nd_desc; | 10 | struct platform_device *pdev; |
40 | struct device *dev = &e820_pmem.dev; | 11 | |
41 | struct nvdimm_bus *nvdimm_bus; | 12 | /* |
42 | int rc, i; | 13 | * See drivers/nvdimm/e820.c for the implementation, this is |
43 | 14 | * simply here to trigger the module to load on demand. | |
44 | rc = platform_device_register(&e820_pmem); | 15 | */ |
45 | if (rc) | 16 | pdev = platform_device_alloc("e820_pmem", -1); |
46 | return rc; | 17 | return platform_device_add(pdev); |
47 | |||
48 | nd_desc.attr_groups = e820_pmem_attribute_groups; | ||
49 | nd_desc.provider_name = "e820"; | ||
50 | nvdimm_bus = nvdimm_bus_register(dev, &nd_desc); | ||
51 | if (!nvdimm_bus) | ||
52 | goto err; | ||
53 | dev->platform_data = nvdimm_bus; | ||
54 | |||
55 | for (i = 0; i < e820.nr_map; i++) { | ||
56 | struct e820entry *ei = &e820.map[i]; | ||
57 | struct resource res = { | ||
58 | .flags = IORESOURCE_MEM, | ||
59 | .start = ei->addr, | ||
60 | .end = ei->addr + ei->size - 1, | ||
61 | }; | ||
62 | struct nd_region_desc ndr_desc; | ||
63 | |||
64 | if (ei->type != E820_PRAM) | ||
65 | continue; | ||
66 | |||
67 | memset(&ndr_desc, 0, sizeof(ndr_desc)); | ||
68 | ndr_desc.res = &res; | ||
69 | ndr_desc.attr_groups = e820_pmem_region_attribute_groups; | ||
70 | ndr_desc.numa_node = NUMA_NO_NODE; | ||
71 | if (!nvdimm_pmem_region_create(nvdimm_bus, &ndr_desc)) | ||
72 | goto err; | ||
73 | } | ||
74 | |||
75 | return 0; | ||
76 | |||
77 | err: | ||
78 | dev_err(dev, "failed to register legacy persistent memory ranges\n"); | ||
79 | platform_device_unregister(&e820_pmem); | ||
80 | return -ENXIO; | ||
81 | } | 18 | } |
82 | device_initcall(register_e820_pmem); | 19 | device_initcall(register_e820_pmem); |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 68aec42545c2..7562f42914b4 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -823,11 +823,11 @@ void __init mem_init(void) | |||
823 | } | 823 | } |
824 | 824 | ||
825 | #ifdef CONFIG_MEMORY_HOTPLUG | 825 | #ifdef CONFIG_MEMORY_HOTPLUG |
826 | int arch_add_memory(int nid, u64 start, u64 size) | 826 | int arch_add_memory(int nid, u64 start, u64 size, bool for_device) |
827 | { | 827 | { |
828 | struct pglist_data *pgdata = NODE_DATA(nid); | 828 | struct pglist_data *pgdata = NODE_DATA(nid); |
829 | struct zone *zone = pgdata->node_zones + | 829 | struct zone *zone = pgdata->node_zones + |
830 | zone_for_memory(nid, start, size, ZONE_HIGHMEM); | 830 | zone_for_memory(nid, start, size, ZONE_HIGHMEM, for_device); |
831 | unsigned long start_pfn = start >> PAGE_SHIFT; | 831 | unsigned long start_pfn = start >> PAGE_SHIFT; |
832 | unsigned long nr_pages = size >> PAGE_SHIFT; | 832 | unsigned long nr_pages = size >> PAGE_SHIFT; |
833 | 833 | ||
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 3fba623e3ba5..30564e2752d3 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -687,11 +687,11 @@ static void update_end_of_memory_vars(u64 start, u64 size) | |||
687 | * Memory is added always to NORMAL zone. This means you will never get | 687 | * Memory is added always to NORMAL zone. This means you will never get |
688 | * additional DMA/DMA32 memory. | 688 | * additional DMA/DMA32 memory. |
689 | */ | 689 | */ |
690 | int arch_add_memory(int nid, u64 start, u64 size) | 690 | int arch_add_memory(int nid, u64 start, u64 size, bool for_device) |
691 | { | 691 | { |
692 | struct pglist_data *pgdat = NODE_DATA(nid); | 692 | struct pglist_data *pgdat = NODE_DATA(nid); |
693 | struct zone *zone = pgdat->node_zones + | 693 | struct zone *zone = pgdat->node_zones + |
694 | zone_for_memory(nid, start, size, ZONE_NORMAL); | 694 | zone_for_memory(nid, start, size, ZONE_NORMAL, for_device); |
695 | unsigned long start_pfn = start >> PAGE_SHIFT; | 695 | unsigned long start_pfn = start >> PAGE_SHIFT; |
696 | unsigned long nr_pages = size >> PAGE_SHIFT; | 696 | unsigned long nr_pages = size >> PAGE_SHIFT; |
697 | int ret; | 697 | int ret; |
diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h index c39bb6e61911..867840f5400f 100644 --- a/arch/xtensa/include/asm/io.h +++ b/arch/xtensa/include/asm/io.h | |||
@@ -57,6 +57,7 @@ static inline void __iomem *ioremap_cache(unsigned long offset, | |||
57 | else | 57 | else |
58 | BUG(); | 58 | BUG(); |
59 | } | 59 | } |
60 | #define ioremap_cache ioremap_cache | ||
60 | 61 | ||
61 | #define ioremap_wc ioremap_nocache | 62 | #define ioremap_wc ioremap_nocache |
62 | #define ioremap_wt ioremap_nocache | 63 | #define ioremap_wt ioremap_nocache |
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 54e9729f9634..5d1015c26ff4 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig | |||
@@ -417,6 +417,7 @@ config ACPI_NFIT | |||
417 | tristate "ACPI NVDIMM Firmware Interface Table (NFIT)" | 417 | tristate "ACPI NVDIMM Firmware Interface Table (NFIT)" |
418 | depends on PHYS_ADDR_T_64BIT | 418 | depends on PHYS_ADDR_T_64BIT |
419 | depends on BLK_DEV | 419 | depends on BLK_DEV |
420 | depends on ARCH_HAS_MMIO_FLUSH | ||
420 | select LIBNVDIMM | 421 | select LIBNVDIMM |
421 | help | 422 | help |
422 | Infrastructure to probe ACPI 6 compliant platforms for | 423 | Infrastructure to probe ACPI 6 compliant platforms for |
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c index cf0fd96a7602..c1b8d03e262e 100644 --- a/drivers/acpi/nfit.c +++ b/drivers/acpi/nfit.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/sort.h> | 20 | #include <linux/sort.h> |
21 | #include <linux/pmem.h> | 21 | #include <linux/pmem.h> |
22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
23 | #include <asm/cacheflush.h> | ||
23 | #include "nfit.h" | 24 | #include "nfit.h" |
24 | 25 | ||
25 | /* | 26 | /* |
@@ -764,9 +765,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, | |||
764 | struct acpi_device *adev, *adev_dimm; | 765 | struct acpi_device *adev, *adev_dimm; |
765 | struct device *dev = acpi_desc->dev; | 766 | struct device *dev = acpi_desc->dev; |
766 | const u8 *uuid = to_nfit_uuid(NFIT_DEV_DIMM); | 767 | const u8 *uuid = to_nfit_uuid(NFIT_DEV_DIMM); |
767 | unsigned long long sta; | 768 | int i; |
768 | int i, rc = -ENODEV; | ||
769 | acpi_status status; | ||
770 | 769 | ||
771 | nfit_mem->dsm_mask = acpi_desc->dimm_dsm_force_en; | 770 | nfit_mem->dsm_mask = acpi_desc->dimm_dsm_force_en; |
772 | adev = to_acpi_dev(acpi_desc); | 771 | adev = to_acpi_dev(acpi_desc); |
@@ -781,25 +780,11 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, | |||
781 | return force_enable_dimms ? 0 : -ENODEV; | 780 | return force_enable_dimms ? 0 : -ENODEV; |
782 | } | 781 | } |
783 | 782 | ||
784 | status = acpi_evaluate_integer(adev_dimm->handle, "_STA", NULL, &sta); | ||
785 | if (status == AE_NOT_FOUND) { | ||
786 | dev_dbg(dev, "%s missing _STA, assuming enabled...\n", | ||
787 | dev_name(&adev_dimm->dev)); | ||
788 | rc = 0; | ||
789 | } else if (ACPI_FAILURE(status)) | ||
790 | dev_err(dev, "%s failed to retrieve_STA, disabling...\n", | ||
791 | dev_name(&adev_dimm->dev)); | ||
792 | else if ((sta & ACPI_STA_DEVICE_ENABLED) == 0) | ||
793 | dev_info(dev, "%s disabled by firmware\n", | ||
794 | dev_name(&adev_dimm->dev)); | ||
795 | else | ||
796 | rc = 0; | ||
797 | |||
798 | for (i = ND_CMD_SMART; i <= ND_CMD_VENDOR; i++) | 783 | for (i = ND_CMD_SMART; i <= ND_CMD_VENDOR; i++) |
799 | if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i)) | 784 | if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i)) |
800 | set_bit(i, &nfit_mem->dsm_mask); | 785 | set_bit(i, &nfit_mem->dsm_mask); |
801 | 786 | ||
802 | return force_enable_dimms ? 0 : rc; | 787 | return 0; |
803 | } | 788 | } |
804 | 789 | ||
805 | static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) | 790 | static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) |
@@ -868,6 +853,7 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) | |||
868 | struct acpi_device *adev; | 853 | struct acpi_device *adev; |
869 | int i; | 854 | int i; |
870 | 855 | ||
856 | nd_desc->dsm_mask = acpi_desc->bus_dsm_force_en; | ||
871 | adev = to_acpi_dev(acpi_desc); | 857 | adev = to_acpi_dev(acpi_desc); |
872 | if (!adev) | 858 | if (!adev) |
873 | return; | 859 | return; |
@@ -1032,7 +1018,7 @@ static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) | |||
1032 | if (mmio->num_lines) | 1018 | if (mmio->num_lines) |
1033 | offset = to_interleave_offset(offset, mmio); | 1019 | offset = to_interleave_offset(offset, mmio); |
1034 | 1020 | ||
1035 | return readl(mmio->base + offset); | 1021 | return readl(mmio->addr.base + offset); |
1036 | } | 1022 | } |
1037 | 1023 | ||
1038 | static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, | 1024 | static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, |
@@ -1057,11 +1043,11 @@ static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, | |||
1057 | if (mmio->num_lines) | 1043 | if (mmio->num_lines) |
1058 | offset = to_interleave_offset(offset, mmio); | 1044 | offset = to_interleave_offset(offset, mmio); |
1059 | 1045 | ||
1060 | writeq(cmd, mmio->base + offset); | 1046 | writeq(cmd, mmio->addr.base + offset); |
1061 | wmb_blk(nfit_blk); | 1047 | wmb_blk(nfit_blk); |
1062 | 1048 | ||
1063 | if (nfit_blk->dimm_flags & ND_BLK_DCR_LATCH) | 1049 | if (nfit_blk->dimm_flags & ND_BLK_DCR_LATCH) |
1064 | readq(mmio->base + offset); | 1050 | readq(mmio->addr.base + offset); |
1065 | } | 1051 | } |
1066 | 1052 | ||
1067 | static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, | 1053 | static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, |
@@ -1093,11 +1079,16 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, | |||
1093 | } | 1079 | } |
1094 | 1080 | ||
1095 | if (rw) | 1081 | if (rw) |
1096 | memcpy_to_pmem(mmio->aperture + offset, | 1082 | memcpy_to_pmem(mmio->addr.aperture + offset, |
1097 | iobuf + copied, c); | 1083 | iobuf + copied, c); |
1098 | else | 1084 | else { |
1085 | if (nfit_blk->dimm_flags & ND_BLK_READ_FLUSH) | ||
1086 | mmio_flush_range((void __force *) | ||
1087 | mmio->addr.aperture + offset, c); | ||
1088 | |||
1099 | memcpy_from_pmem(iobuf + copied, | 1089 | memcpy_from_pmem(iobuf + copied, |
1100 | mmio->aperture + offset, c); | 1090 | mmio->addr.aperture + offset, c); |
1091 | } | ||
1101 | 1092 | ||
1102 | copied += c; | 1093 | copied += c; |
1103 | len -= c; | 1094 | len -= c; |
@@ -1144,7 +1135,10 @@ static void nfit_spa_mapping_release(struct kref *kref) | |||
1144 | 1135 | ||
1145 | WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex)); | 1136 | WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex)); |
1146 | dev_dbg(acpi_desc->dev, "%s: SPA%d\n", __func__, spa->range_index); | 1137 | dev_dbg(acpi_desc->dev, "%s: SPA%d\n", __func__, spa->range_index); |
1147 | iounmap(spa_map->iomem); | 1138 | if (spa_map->type == SPA_MAP_APERTURE) |
1139 | memunmap((void __force *)spa_map->addr.aperture); | ||
1140 | else | ||
1141 | iounmap(spa_map->addr.base); | ||
1148 | release_mem_region(spa->address, spa->length); | 1142 | release_mem_region(spa->address, spa->length); |
1149 | list_del(&spa_map->list); | 1143 | list_del(&spa_map->list); |
1150 | kfree(spa_map); | 1144 | kfree(spa_map); |
@@ -1190,7 +1184,7 @@ static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc, | |||
1190 | spa_map = find_spa_mapping(acpi_desc, spa); | 1184 | spa_map = find_spa_mapping(acpi_desc, spa); |
1191 | if (spa_map) { | 1185 | if (spa_map) { |
1192 | kref_get(&spa_map->kref); | 1186 | kref_get(&spa_map->kref); |
1193 | return spa_map->iomem; | 1187 | return spa_map->addr.base; |
1194 | } | 1188 | } |
1195 | 1189 | ||
1196 | spa_map = kzalloc(sizeof(*spa_map), GFP_KERNEL); | 1190 | spa_map = kzalloc(sizeof(*spa_map), GFP_KERNEL); |
@@ -1206,20 +1200,19 @@ static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc, | |||
1206 | if (!res) | 1200 | if (!res) |
1207 | goto err_mem; | 1201 | goto err_mem; |
1208 | 1202 | ||
1209 | if (type == SPA_MAP_APERTURE) { | 1203 | spa_map->type = type; |
1210 | /* | 1204 | if (type == SPA_MAP_APERTURE) |
1211 | * TODO: memremap_pmem() support, but that requires cache | 1205 | spa_map->addr.aperture = (void __pmem *)memremap(start, n, |
1212 | * flushing when the aperture is moved. | 1206 | ARCH_MEMREMAP_PMEM); |
1213 | */ | 1207 | else |
1214 | spa_map->iomem = ioremap_wc(start, n); | 1208 | spa_map->addr.base = ioremap_nocache(start, n); |
1215 | } else | 1209 | |
1216 | spa_map->iomem = ioremap_nocache(start, n); | ||
1217 | 1210 | ||
1218 | if (!spa_map->iomem) | 1211 | if (!spa_map->addr.base) |
1219 | goto err_map; | 1212 | goto err_map; |
1220 | 1213 | ||
1221 | list_add_tail(&spa_map->list, &acpi_desc->spa_maps); | 1214 | list_add_tail(&spa_map->list, &acpi_desc->spa_maps); |
1222 | return spa_map->iomem; | 1215 | return spa_map->addr.base; |
1223 | 1216 | ||
1224 | err_map: | 1217 | err_map: |
1225 | release_mem_region(start, n); | 1218 | release_mem_region(start, n); |
@@ -1282,7 +1275,7 @@ static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc, | |||
1282 | nfit_blk->dimm_flags = flags.flags; | 1275 | nfit_blk->dimm_flags = flags.flags; |
1283 | else if (rc == -ENOTTY) { | 1276 | else if (rc == -ENOTTY) { |
1284 | /* fall back to a conservative default */ | 1277 | /* fall back to a conservative default */ |
1285 | nfit_blk->dimm_flags = ND_BLK_DCR_LATCH; | 1278 | nfit_blk->dimm_flags = ND_BLK_DCR_LATCH | ND_BLK_READ_FLUSH; |
1286 | rc = 0; | 1279 | rc = 0; |
1287 | } else | 1280 | } else |
1288 | rc = -ENXIO; | 1281 | rc = -ENXIO; |
@@ -1322,9 +1315,9 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, | |||
1322 | /* map block aperture memory */ | 1315 | /* map block aperture memory */ |
1323 | nfit_blk->bdw_offset = nfit_mem->bdw->offset; | 1316 | nfit_blk->bdw_offset = nfit_mem->bdw->offset; |
1324 | mmio = &nfit_blk->mmio[BDW]; | 1317 | mmio = &nfit_blk->mmio[BDW]; |
1325 | mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw, | 1318 | mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw, |
1326 | SPA_MAP_APERTURE); | 1319 | SPA_MAP_APERTURE); |
1327 | if (!mmio->base) { | 1320 | if (!mmio->addr.base) { |
1328 | dev_dbg(dev, "%s: %s failed to map bdw\n", __func__, | 1321 | dev_dbg(dev, "%s: %s failed to map bdw\n", __func__, |
1329 | nvdimm_name(nvdimm)); | 1322 | nvdimm_name(nvdimm)); |
1330 | return -ENOMEM; | 1323 | return -ENOMEM; |
@@ -1345,9 +1338,9 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, | |||
1345 | nfit_blk->cmd_offset = nfit_mem->dcr->command_offset; | 1338 | nfit_blk->cmd_offset = nfit_mem->dcr->command_offset; |
1346 | nfit_blk->stat_offset = nfit_mem->dcr->status_offset; | 1339 | nfit_blk->stat_offset = nfit_mem->dcr->status_offset; |
1347 | mmio = &nfit_blk->mmio[DCR]; | 1340 | mmio = &nfit_blk->mmio[DCR]; |
1348 | mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr, | 1341 | mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr, |
1349 | SPA_MAP_CONTROL); | 1342 | SPA_MAP_CONTROL); |
1350 | if (!mmio->base) { | 1343 | if (!mmio->addr.base) { |
1351 | dev_dbg(dev, "%s: %s failed to map dcr\n", __func__, | 1344 | dev_dbg(dev, "%s: %s failed to map dcr\n", __func__, |
1352 | nvdimm_name(nvdimm)); | 1345 | nvdimm_name(nvdimm)); |
1353 | return -ENOMEM; | 1346 | return -ENOMEM; |
@@ -1379,7 +1372,7 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, | |||
1379 | return -ENOMEM; | 1372 | return -ENOMEM; |
1380 | } | 1373 | } |
1381 | 1374 | ||
1382 | if (!arch_has_pmem_api() && !nfit_blk->nvdimm_flush) | 1375 | if (!arch_has_wmb_pmem() && !nfit_blk->nvdimm_flush) |
1383 | dev_warn(dev, "unable to guarantee persistence of writes\n"); | 1376 | dev_warn(dev, "unable to guarantee persistence of writes\n"); |
1384 | 1377 | ||
1385 | if (mmio->line_size == 0) | 1378 | if (mmio->line_size == 0) |
@@ -1414,7 +1407,7 @@ static void acpi_nfit_blk_region_disable(struct nvdimm_bus *nvdimm_bus, | |||
1414 | for (i = 0; i < 2; i++) { | 1407 | for (i = 0; i < 2; i++) { |
1415 | struct nfit_blk_mmio *mmio = &nfit_blk->mmio[i]; | 1408 | struct nfit_blk_mmio *mmio = &nfit_blk->mmio[i]; |
1416 | 1409 | ||
1417 | if (mmio->base) | 1410 | if (mmio->addr.base) |
1418 | nfit_spa_unmap(acpi_desc, mmio->spa); | 1411 | nfit_spa_unmap(acpi_desc, mmio->spa); |
1419 | } | 1412 | } |
1420 | nd_blk_region_set_provider_data(ndbr, NULL); | 1413 | nd_blk_region_set_provider_data(ndbr, NULL); |
diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h index 79b6d83875c1..7e740156b9c2 100644 --- a/drivers/acpi/nfit.h +++ b/drivers/acpi/nfit.h | |||
@@ -41,6 +41,7 @@ enum nfit_uuids { | |||
41 | }; | 41 | }; |
42 | 42 | ||
43 | enum { | 43 | enum { |
44 | ND_BLK_READ_FLUSH = 1, | ||
44 | ND_BLK_DCR_LATCH = 2, | 45 | ND_BLK_DCR_LATCH = 2, |
45 | }; | 46 | }; |
46 | 47 | ||
@@ -107,6 +108,7 @@ struct acpi_nfit_desc { | |||
107 | struct nvdimm_bus *nvdimm_bus; | 108 | struct nvdimm_bus *nvdimm_bus; |
108 | struct device *dev; | 109 | struct device *dev; |
109 | unsigned long dimm_dsm_force_en; | 110 | unsigned long dimm_dsm_force_en; |
111 | unsigned long bus_dsm_force_en; | ||
110 | int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, | 112 | int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, |
111 | void *iobuf, u64 len, int rw); | 113 | void *iobuf, u64 len, int rw); |
112 | }; | 114 | }; |
@@ -116,12 +118,16 @@ enum nd_blk_mmio_selector { | |||
116 | DCR, | 118 | DCR, |
117 | }; | 119 | }; |
118 | 120 | ||
121 | struct nd_blk_addr { | ||
122 | union { | ||
123 | void __iomem *base; | ||
124 | void __pmem *aperture; | ||
125 | }; | ||
126 | }; | ||
127 | |||
119 | struct nfit_blk { | 128 | struct nfit_blk { |
120 | struct nfit_blk_mmio { | 129 | struct nfit_blk_mmio { |
121 | union { | 130 | struct nd_blk_addr addr; |
122 | void __iomem *base; | ||
123 | void __pmem *aperture; | ||
124 | }; | ||
125 | u64 size; | 131 | u64 size; |
126 | u64 base_offset; | 132 | u64 base_offset; |
127 | u32 line_size; | 133 | u32 line_size; |
@@ -148,7 +154,8 @@ struct nfit_spa_mapping { | |||
148 | struct acpi_nfit_system_address *spa; | 154 | struct acpi_nfit_system_address *spa; |
149 | struct list_head list; | 155 | struct list_head list; |
150 | struct kref kref; | 156 | struct kref kref; |
151 | void __iomem *iomem; | 157 | enum spa_map_type type; |
158 | struct nd_blk_addr addr; | ||
152 | }; | 159 | }; |
153 | 160 | ||
154 | static inline struct nfit_spa_mapping *to_spa_map(struct kref *kref) | 161 | static inline struct nfit_spa_mapping *to_spa_map(struct kref *kref) |
diff --git a/drivers/block/brd.c b/drivers/block/brd.c index f9ab74505e69..b9794aeeb878 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c | |||
@@ -374,7 +374,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector, | |||
374 | 374 | ||
375 | #ifdef CONFIG_BLK_DEV_RAM_DAX | 375 | #ifdef CONFIG_BLK_DEV_RAM_DAX |
376 | static long brd_direct_access(struct block_device *bdev, sector_t sector, | 376 | static long brd_direct_access(struct block_device *bdev, sector_t sector, |
377 | void **kaddr, unsigned long *pfn, long size) | 377 | void __pmem **kaddr, unsigned long *pfn) |
378 | { | 378 | { |
379 | struct brd_device *brd = bdev->bd_disk->private_data; | 379 | struct brd_device *brd = bdev->bd_disk->private_data; |
380 | struct page *page; | 380 | struct page *page; |
@@ -384,13 +384,9 @@ static long brd_direct_access(struct block_device *bdev, sector_t sector, | |||
384 | page = brd_insert_page(brd, sector); | 384 | page = brd_insert_page(brd, sector); |
385 | if (!page) | 385 | if (!page) |
386 | return -ENOSPC; | 386 | return -ENOSPC; |
387 | *kaddr = page_address(page); | 387 | *kaddr = (void __pmem *)page_address(page); |
388 | *pfn = page_to_pfn(page); | 388 | *pfn = page_to_pfn(page); |
389 | 389 | ||
390 | /* | ||
391 | * TODO: If size > PAGE_SIZE, we could look to see if the next page in | ||
392 | * the file happens to be mapped to the next page of physical RAM. | ||
393 | */ | ||
394 | return PAGE_SIZE; | 390 | return PAGE_SIZE; |
395 | } | 391 | } |
396 | #else | 392 | #else |
diff --git a/drivers/isdn/icn/icn.h b/drivers/isdn/icn/icn.h index b713466997a0..f8f2e76d34bf 100644 --- a/drivers/isdn/icn/icn.h +++ b/drivers/isdn/icn/icn.h | |||
@@ -38,7 +38,7 @@ typedef struct icn_cdef { | |||
38 | #include <linux/errno.h> | 38 | #include <linux/errno.h> |
39 | #include <linux/fs.h> | 39 | #include <linux/fs.h> |
40 | #include <linux/major.h> | 40 | #include <linux/major.h> |
41 | #include <asm/io.h> | 41 | #include <linux/io.h> |
42 | #include <linux/kernel.h> | 42 | #include <linux/kernel.h> |
43 | #include <linux/signal.h> | 43 | #include <linux/signal.h> |
44 | #include <linux/slab.h> | 44 | #include <linux/slab.h> |
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c index 2fc4957cbe7f..a70eb83e68f1 100644 --- a/drivers/mtd/devices/slram.c +++ b/drivers/mtd/devices/slram.c | |||
@@ -41,7 +41,7 @@ | |||
41 | #include <linux/fs.h> | 41 | #include <linux/fs.h> |
42 | #include <linux/ioctl.h> | 42 | #include <linux/ioctl.h> |
43 | #include <linux/init.h> | 43 | #include <linux/init.h> |
44 | #include <asm/io.h> | 44 | #include <linux/io.h> |
45 | 45 | ||
46 | #include <linux/mtd/mtd.h> | 46 | #include <linux/mtd/mtd.h> |
47 | 47 | ||
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c index 7da266a53979..0802158a3f75 100644 --- a/drivers/mtd/nand/diskonchip.c +++ b/drivers/mtd/nand/diskonchip.c | |||
@@ -24,7 +24,7 @@ | |||
24 | #include <linux/rslib.h> | 24 | #include <linux/rslib.h> |
25 | #include <linux/moduleparam.h> | 25 | #include <linux/moduleparam.h> |
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <asm/io.h> | 27 | #include <linux/io.h> |
28 | 28 | ||
29 | #include <linux/mtd/mtd.h> | 29 | #include <linux/mtd/mtd.h> |
30 | #include <linux/mtd/nand.h> | 30 | #include <linux/mtd/nand.h> |
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c index 32a216d31141..ab7bda0bb245 100644 --- a/drivers/mtd/onenand/generic.c +++ b/drivers/mtd/onenand/generic.c | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <linux/mtd/mtd.h> | 18 | #include <linux/mtd/mtd.h> |
19 | #include <linux/mtd/onenand.h> | 19 | #include <linux/mtd/onenand.h> |
20 | #include <linux/mtd/partitions.h> | 20 | #include <linux/mtd/partitions.h> |
21 | #include <asm/io.h> | 21 | #include <linux/io.h> |
22 | 22 | ||
23 | /* | 23 | /* |
24 | * Note: Driver name and platform data format have been updated! | 24 | * Note: Driver name and platform data format have been updated! |
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig index 72226acb5c0f..53c11621d5b1 100644 --- a/drivers/nvdimm/Kconfig +++ b/drivers/nvdimm/Kconfig | |||
@@ -21,6 +21,7 @@ config BLK_DEV_PMEM | |||
21 | default LIBNVDIMM | 21 | default LIBNVDIMM |
22 | depends on HAS_IOMEM | 22 | depends on HAS_IOMEM |
23 | select ND_BTT if BTT | 23 | select ND_BTT if BTT |
24 | select ND_PFN if NVDIMM_PFN | ||
24 | help | 25 | help |
25 | Memory ranges for PMEM are described by either an NFIT | 26 | Memory ranges for PMEM are described by either an NFIT |
26 | (NVDIMM Firmware Interface Table, see CONFIG_NFIT_ACPI), a | 27 | (NVDIMM Firmware Interface Table, see CONFIG_NFIT_ACPI), a |
@@ -47,12 +48,16 @@ config ND_BLK | |||
47 | (CONFIG_ACPI_NFIT), or otherwise exposes BLK-mode | 48 | (CONFIG_ACPI_NFIT), or otherwise exposes BLK-mode |
48 | capabilities. | 49 | capabilities. |
49 | 50 | ||
51 | config ND_CLAIM | ||
52 | bool | ||
53 | |||
50 | config ND_BTT | 54 | config ND_BTT |
51 | tristate | 55 | tristate |
52 | 56 | ||
53 | config BTT | 57 | config BTT |
54 | bool "BTT: Block Translation Table (atomic sector updates)" | 58 | bool "BTT: Block Translation Table (atomic sector updates)" |
55 | default y if LIBNVDIMM | 59 | default y if LIBNVDIMM |
60 | select ND_CLAIM | ||
56 | help | 61 | help |
57 | The Block Translation Table (BTT) provides atomic sector | 62 | The Block Translation Table (BTT) provides atomic sector |
58 | update semantics for persistent memory devices, so that | 63 | update semantics for persistent memory devices, so that |
@@ -65,4 +70,22 @@ config BTT | |||
65 | 70 | ||
66 | Select Y if unsure | 71 | Select Y if unsure |
67 | 72 | ||
73 | config ND_PFN | ||
74 | tristate | ||
75 | |||
76 | config NVDIMM_PFN | ||
77 | bool "PFN: Map persistent (device) memory" | ||
78 | default LIBNVDIMM | ||
79 | depends on ZONE_DEVICE | ||
80 | select ND_CLAIM | ||
81 | help | ||
82 | Map persistent memory, i.e. advertise it to the memory | ||
83 | management sub-system. By default persistent memory does | ||
84 | not support direct I/O, RDMA, or any other usage that | ||
85 | requires a 'struct page' to mediate an I/O request. This | ||
86 | driver allocates and initializes the infrastructure needed | ||
87 | to support those use cases. | ||
88 | |||
89 | Select Y if unsure | ||
90 | |||
68 | endif | 91 | endif |
diff --git a/drivers/nvdimm/Makefile b/drivers/nvdimm/Makefile index 594bb97c867a..ea84d3c4e8e5 100644 --- a/drivers/nvdimm/Makefile +++ b/drivers/nvdimm/Makefile | |||
@@ -2,6 +2,7 @@ obj-$(CONFIG_LIBNVDIMM) += libnvdimm.o | |||
2 | obj-$(CONFIG_BLK_DEV_PMEM) += nd_pmem.o | 2 | obj-$(CONFIG_BLK_DEV_PMEM) += nd_pmem.o |
3 | obj-$(CONFIG_ND_BTT) += nd_btt.o | 3 | obj-$(CONFIG_ND_BTT) += nd_btt.o |
4 | obj-$(CONFIG_ND_BLK) += nd_blk.o | 4 | obj-$(CONFIG_ND_BLK) += nd_blk.o |
5 | obj-$(CONFIG_X86_PMEM_LEGACY) += nd_e820.o | ||
5 | 6 | ||
6 | nd_pmem-y := pmem.o | 7 | nd_pmem-y := pmem.o |
7 | 8 | ||
@@ -9,6 +10,8 @@ nd_btt-y := btt.o | |||
9 | 10 | ||
10 | nd_blk-y := blk.o | 11 | nd_blk-y := blk.o |
11 | 12 | ||
13 | nd_e820-y := e820.o | ||
14 | |||
12 | libnvdimm-y := core.o | 15 | libnvdimm-y := core.o |
13 | libnvdimm-y += bus.o | 16 | libnvdimm-y += bus.o |
14 | libnvdimm-y += dimm_devs.o | 17 | libnvdimm-y += dimm_devs.o |
@@ -17,4 +20,6 @@ libnvdimm-y += region_devs.o | |||
17 | libnvdimm-y += region.o | 20 | libnvdimm-y += region.o |
18 | libnvdimm-y += namespace_devs.o | 21 | libnvdimm-y += namespace_devs.o |
19 | libnvdimm-y += label.o | 22 | libnvdimm-y += label.o |
23 | libnvdimm-$(CONFIG_ND_CLAIM) += claim.o | ||
20 | libnvdimm-$(CONFIG_BTT) += btt_devs.o | 24 | libnvdimm-$(CONFIG_BTT) += btt_devs.o |
25 | libnvdimm-$(CONFIG_NVDIMM_PFN) += pfn_devs.o | ||
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 341202ed32b4..254239746020 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c | |||
@@ -583,33 +583,6 @@ static void free_arenas(struct btt *btt) | |||
583 | } | 583 | } |
584 | 584 | ||
585 | /* | 585 | /* |
586 | * This function checks if the metadata layout is valid and error free | ||
587 | */ | ||
588 | static int arena_is_valid(struct arena_info *arena, struct btt_sb *super, | ||
589 | u8 *uuid, u32 lbasize) | ||
590 | { | ||
591 | u64 checksum; | ||
592 | |||
593 | if (memcmp(super->uuid, uuid, 16)) | ||
594 | return 0; | ||
595 | |||
596 | checksum = le64_to_cpu(super->checksum); | ||
597 | super->checksum = 0; | ||
598 | if (checksum != nd_btt_sb_checksum(super)) | ||
599 | return 0; | ||
600 | super->checksum = cpu_to_le64(checksum); | ||
601 | |||
602 | if (lbasize != le32_to_cpu(super->external_lbasize)) | ||
603 | return 0; | ||
604 | |||
605 | /* TODO: figure out action for this */ | ||
606 | if ((le32_to_cpu(super->flags) & IB_FLAG_ERROR_MASK) != 0) | ||
607 | dev_info(to_dev(arena), "Found arena with an error flag\n"); | ||
608 | |||
609 | return 1; | ||
610 | } | ||
611 | |||
612 | /* | ||
613 | * This function reads an existing valid btt superblock and | 586 | * This function reads an existing valid btt superblock and |
614 | * populates the corresponding arena_info struct | 587 | * populates the corresponding arena_info struct |
615 | */ | 588 | */ |
@@ -632,8 +605,9 @@ static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super, | |||
632 | arena->logoff = arena_off + le64_to_cpu(super->logoff); | 605 | arena->logoff = arena_off + le64_to_cpu(super->logoff); |
633 | arena->info2off = arena_off + le64_to_cpu(super->info2off); | 606 | arena->info2off = arena_off + le64_to_cpu(super->info2off); |
634 | 607 | ||
635 | arena->size = (super->nextoff > 0) ? (le64_to_cpu(super->nextoff)) : | 608 | arena->size = (le64_to_cpu(super->nextoff) > 0) |
636 | (arena->info2off - arena->infooff + BTT_PG_SIZE); | 609 | ? (le64_to_cpu(super->nextoff)) |
610 | : (arena->info2off - arena->infooff + BTT_PG_SIZE); | ||
637 | 611 | ||
638 | arena->flags = le32_to_cpu(super->flags); | 612 | arena->flags = le32_to_cpu(super->flags); |
639 | } | 613 | } |
@@ -665,8 +639,7 @@ static int discover_arenas(struct btt *btt) | |||
665 | if (ret) | 639 | if (ret) |
666 | goto out; | 640 | goto out; |
667 | 641 | ||
668 | if (!arena_is_valid(arena, super, btt->nd_btt->uuid, | 642 | if (!nd_btt_arena_is_valid(btt->nd_btt, super)) { |
669 | btt->lbasize)) { | ||
670 | if (remaining == btt->rawsize) { | 643 | if (remaining == btt->rawsize) { |
671 | btt->init_state = INIT_NOTFOUND; | 644 | btt->init_state = INIT_NOTFOUND; |
672 | dev_info(to_dev(arena), "No existing arenas\n"); | 645 | dev_info(to_dev(arena), "No existing arenas\n"); |
@@ -755,10 +728,13 @@ static int create_arenas(struct btt *btt) | |||
755 | * It is only called for an uninitialized arena when a write | 728 | * It is only called for an uninitialized arena when a write |
756 | * to that arena occurs for the first time. | 729 | * to that arena occurs for the first time. |
757 | */ | 730 | */ |
758 | static int btt_arena_write_layout(struct arena_info *arena, u8 *uuid) | 731 | static int btt_arena_write_layout(struct arena_info *arena) |
759 | { | 732 | { |
760 | int ret; | 733 | int ret; |
734 | u64 sum; | ||
761 | struct btt_sb *super; | 735 | struct btt_sb *super; |
736 | struct nd_btt *nd_btt = arena->nd_btt; | ||
737 | const u8 *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev); | ||
762 | 738 | ||
763 | ret = btt_map_init(arena); | 739 | ret = btt_map_init(arena); |
764 | if (ret) | 740 | if (ret) |
@@ -773,7 +749,8 @@ static int btt_arena_write_layout(struct arena_info *arena, u8 *uuid) | |||
773 | return -ENOMEM; | 749 | return -ENOMEM; |
774 | 750 | ||
775 | strncpy(super->signature, BTT_SIG, BTT_SIG_LEN); | 751 | strncpy(super->signature, BTT_SIG, BTT_SIG_LEN); |
776 | memcpy(super->uuid, uuid, 16); | 752 | memcpy(super->uuid, nd_btt->uuid, 16); |
753 | memcpy(super->parent_uuid, parent_uuid, 16); | ||
777 | super->flags = cpu_to_le32(arena->flags); | 754 | super->flags = cpu_to_le32(arena->flags); |
778 | super->version_major = cpu_to_le16(arena->version_major); | 755 | super->version_major = cpu_to_le16(arena->version_major); |
779 | super->version_minor = cpu_to_le16(arena->version_minor); | 756 | super->version_minor = cpu_to_le16(arena->version_minor); |
@@ -794,7 +771,8 @@ static int btt_arena_write_layout(struct arena_info *arena, u8 *uuid) | |||
794 | super->info2off = cpu_to_le64(arena->info2off - arena->infooff); | 771 | super->info2off = cpu_to_le64(arena->info2off - arena->infooff); |
795 | 772 | ||
796 | super->flags = 0; | 773 | super->flags = 0; |
797 | super->checksum = cpu_to_le64(nd_btt_sb_checksum(super)); | 774 | sum = nd_sb_checksum((struct nd_gen_sb *) super); |
775 | super->checksum = cpu_to_le64(sum); | ||
798 | 776 | ||
799 | ret = btt_info_write(arena, super); | 777 | ret = btt_info_write(arena, super); |
800 | 778 | ||
@@ -813,7 +791,7 @@ static int btt_meta_init(struct btt *btt) | |||
813 | 791 | ||
814 | mutex_lock(&btt->init_lock); | 792 | mutex_lock(&btt->init_lock); |
815 | list_for_each_entry(arena, &btt->arena_list, list) { | 793 | list_for_each_entry(arena, &btt->arena_list, list) { |
816 | ret = btt_arena_write_layout(arena, btt->nd_btt->uuid); | 794 | ret = btt_arena_write_layout(arena); |
817 | if (ret) | 795 | if (ret) |
818 | goto unlock; | 796 | goto unlock; |
819 | 797 | ||
@@ -1447,8 +1425,6 @@ static int __init nd_btt_init(void) | |||
1447 | { | 1425 | { |
1448 | int rc; | 1426 | int rc; |
1449 | 1427 | ||
1450 | BUILD_BUG_ON(sizeof(struct btt_sb) != SZ_4K); | ||
1451 | |||
1452 | btt_major = register_blkdev(0, "btt"); | 1428 | btt_major = register_blkdev(0, "btt"); |
1453 | if (btt_major < 0) | 1429 | if (btt_major < 0) |
1454 | return btt_major; | 1430 | return btt_major; |
diff --git a/drivers/nvdimm/btt.h b/drivers/nvdimm/btt.h index 75b0d80a6bd9..b2f8651e5395 100644 --- a/drivers/nvdimm/btt.h +++ b/drivers/nvdimm/btt.h | |||
@@ -182,4 +182,7 @@ struct btt { | |||
182 | int init_state; | 182 | int init_state; |
183 | int num_arenas; | 183 | int num_arenas; |
184 | }; | 184 | }; |
185 | |||
186 | bool nd_btt_arena_is_valid(struct nd_btt *nd_btt, struct btt_sb *super); | ||
187 | |||
185 | #endif | 188 | #endif |
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c index 6ac8c0fea3ec..59ad54a63d9f 100644 --- a/drivers/nvdimm/btt_devs.c +++ b/drivers/nvdimm/btt_devs.c | |||
@@ -21,63 +21,13 @@ | |||
21 | #include "btt.h" | 21 | #include "btt.h" |
22 | #include "nd.h" | 22 | #include "nd.h" |
23 | 23 | ||
24 | static void __nd_btt_detach_ndns(struct nd_btt *nd_btt) | ||
25 | { | ||
26 | struct nd_namespace_common *ndns = nd_btt->ndns; | ||
27 | |||
28 | dev_WARN_ONCE(&nd_btt->dev, !mutex_is_locked(&ndns->dev.mutex) | ||
29 | || ndns->claim != &nd_btt->dev, | ||
30 | "%s: invalid claim\n", __func__); | ||
31 | ndns->claim = NULL; | ||
32 | nd_btt->ndns = NULL; | ||
33 | put_device(&ndns->dev); | ||
34 | } | ||
35 | |||
36 | static void nd_btt_detach_ndns(struct nd_btt *nd_btt) | ||
37 | { | ||
38 | struct nd_namespace_common *ndns = nd_btt->ndns; | ||
39 | |||
40 | if (!ndns) | ||
41 | return; | ||
42 | get_device(&ndns->dev); | ||
43 | device_lock(&ndns->dev); | ||
44 | __nd_btt_detach_ndns(nd_btt); | ||
45 | device_unlock(&ndns->dev); | ||
46 | put_device(&ndns->dev); | ||
47 | } | ||
48 | |||
49 | static bool __nd_btt_attach_ndns(struct nd_btt *nd_btt, | ||
50 | struct nd_namespace_common *ndns) | ||
51 | { | ||
52 | if (ndns->claim) | ||
53 | return false; | ||
54 | dev_WARN_ONCE(&nd_btt->dev, !mutex_is_locked(&ndns->dev.mutex) | ||
55 | || nd_btt->ndns, | ||
56 | "%s: invalid claim\n", __func__); | ||
57 | ndns->claim = &nd_btt->dev; | ||
58 | nd_btt->ndns = ndns; | ||
59 | get_device(&ndns->dev); | ||
60 | return true; | ||
61 | } | ||
62 | |||
63 | static bool nd_btt_attach_ndns(struct nd_btt *nd_btt, | ||
64 | struct nd_namespace_common *ndns) | ||
65 | { | ||
66 | bool claimed; | ||
67 | |||
68 | device_lock(&ndns->dev); | ||
69 | claimed = __nd_btt_attach_ndns(nd_btt, ndns); | ||
70 | device_unlock(&ndns->dev); | ||
71 | return claimed; | ||
72 | } | ||
73 | |||
74 | static void nd_btt_release(struct device *dev) | 24 | static void nd_btt_release(struct device *dev) |
75 | { | 25 | { |
76 | struct nd_region *nd_region = to_nd_region(dev->parent); | 26 | struct nd_region *nd_region = to_nd_region(dev->parent); |
77 | struct nd_btt *nd_btt = to_nd_btt(dev); | 27 | struct nd_btt *nd_btt = to_nd_btt(dev); |
78 | 28 | ||
79 | dev_dbg(dev, "%s\n", __func__); | 29 | dev_dbg(dev, "%s\n", __func__); |
80 | nd_btt_detach_ndns(nd_btt); | 30 | nd_detach_ndns(&nd_btt->dev, &nd_btt->ndns); |
81 | ida_simple_remove(&nd_region->btt_ida, nd_btt->id); | 31 | ida_simple_remove(&nd_region->btt_ida, nd_btt->id); |
82 | kfree(nd_btt->uuid); | 32 | kfree(nd_btt->uuid); |
83 | kfree(nd_btt); | 33 | kfree(nd_btt); |
@@ -172,104 +122,15 @@ static ssize_t namespace_show(struct device *dev, | |||
172 | return rc; | 122 | return rc; |
173 | } | 123 | } |
174 | 124 | ||
175 | static int namespace_match(struct device *dev, void *data) | ||
176 | { | ||
177 | char *name = data; | ||
178 | |||
179 | return strcmp(name, dev_name(dev)) == 0; | ||
180 | } | ||
181 | |||
182 | static bool is_nd_btt_idle(struct device *dev) | ||
183 | { | ||
184 | struct nd_region *nd_region = to_nd_region(dev->parent); | ||
185 | struct nd_btt *nd_btt = to_nd_btt(dev); | ||
186 | |||
187 | if (nd_region->btt_seed == dev || nd_btt->ndns || dev->driver) | ||
188 | return false; | ||
189 | return true; | ||
190 | } | ||
191 | |||
192 | static ssize_t __namespace_store(struct device *dev, | ||
193 | struct device_attribute *attr, const char *buf, size_t len) | ||
194 | { | ||
195 | struct nd_btt *nd_btt = to_nd_btt(dev); | ||
196 | struct nd_namespace_common *ndns; | ||
197 | struct device *found; | ||
198 | char *name; | ||
199 | |||
200 | if (dev->driver) { | ||
201 | dev_dbg(dev, "%s: -EBUSY\n", __func__); | ||
202 | return -EBUSY; | ||
203 | } | ||
204 | |||
205 | name = kstrndup(buf, len, GFP_KERNEL); | ||
206 | if (!name) | ||
207 | return -ENOMEM; | ||
208 | strim(name); | ||
209 | |||
210 | if (strncmp(name, "namespace", 9) == 0 || strcmp(name, "") == 0) | ||
211 | /* pass */; | ||
212 | else { | ||
213 | len = -EINVAL; | ||
214 | goto out; | ||
215 | } | ||
216 | |||
217 | ndns = nd_btt->ndns; | ||
218 | if (strcmp(name, "") == 0) { | ||
219 | /* detach the namespace and destroy / reset the btt device */ | ||
220 | nd_btt_detach_ndns(nd_btt); | ||
221 | if (is_nd_btt_idle(dev)) | ||
222 | nd_device_unregister(dev, ND_ASYNC); | ||
223 | else { | ||
224 | nd_btt->lbasize = 0; | ||
225 | kfree(nd_btt->uuid); | ||
226 | nd_btt->uuid = NULL; | ||
227 | } | ||
228 | goto out; | ||
229 | } else if (ndns) { | ||
230 | dev_dbg(dev, "namespace already set to: %s\n", | ||
231 | dev_name(&ndns->dev)); | ||
232 | len = -EBUSY; | ||
233 | goto out; | ||
234 | } | ||
235 | |||
236 | found = device_find_child(dev->parent, name, namespace_match); | ||
237 | if (!found) { | ||
238 | dev_dbg(dev, "'%s' not found under %s\n", name, | ||
239 | dev_name(dev->parent)); | ||
240 | len = -ENODEV; | ||
241 | goto out; | ||
242 | } | ||
243 | |||
244 | ndns = to_ndns(found); | ||
245 | if (__nvdimm_namespace_capacity(ndns) < SZ_16M) { | ||
246 | dev_dbg(dev, "%s too small to host btt\n", name); | ||
247 | len = -ENXIO; | ||
248 | goto out_attach; | ||
249 | } | ||
250 | |||
251 | WARN_ON_ONCE(!is_nvdimm_bus_locked(&nd_btt->dev)); | ||
252 | if (!nd_btt_attach_ndns(nd_btt, ndns)) { | ||
253 | dev_dbg(dev, "%s already claimed\n", | ||
254 | dev_name(&ndns->dev)); | ||
255 | len = -EBUSY; | ||
256 | } | ||
257 | |||
258 | out_attach: | ||
259 | put_device(&ndns->dev); /* from device_find_child */ | ||
260 | out: | ||
261 | kfree(name); | ||
262 | return len; | ||
263 | } | ||
264 | |||
265 | static ssize_t namespace_store(struct device *dev, | 125 | static ssize_t namespace_store(struct device *dev, |
266 | struct device_attribute *attr, const char *buf, size_t len) | 126 | struct device_attribute *attr, const char *buf, size_t len) |
267 | { | 127 | { |
128 | struct nd_btt *nd_btt = to_nd_btt(dev); | ||
268 | ssize_t rc; | 129 | ssize_t rc; |
269 | 130 | ||
270 | nvdimm_bus_lock(dev); | 131 | nvdimm_bus_lock(dev); |
271 | device_lock(dev); | 132 | device_lock(dev); |
272 | rc = __namespace_store(dev, attr, buf, len); | 133 | rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len); |
273 | dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, | 134 | dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, |
274 | rc, buf, buf[len - 1] == '\n' ? "" : "\n"); | 135 | rc, buf, buf[len - 1] == '\n' ? "" : "\n"); |
275 | device_unlock(dev); | 136 | device_unlock(dev); |
@@ -324,7 +185,7 @@ static struct device *__nd_btt_create(struct nd_region *nd_region, | |||
324 | dev->type = &nd_btt_device_type; | 185 | dev->type = &nd_btt_device_type; |
325 | dev->groups = nd_btt_attribute_groups; | 186 | dev->groups = nd_btt_attribute_groups; |
326 | device_initialize(&nd_btt->dev); | 187 | device_initialize(&nd_btt->dev); |
327 | if (ndns && !__nd_btt_attach_ndns(nd_btt, ndns)) { | 188 | if (ndns && !__nd_attach_ndns(&nd_btt->dev, ndns, &nd_btt->ndns)) { |
328 | dev_dbg(&ndns->dev, "%s failed, already claimed by %s\n", | 189 | dev_dbg(&ndns->dev, "%s failed, already claimed by %s\n", |
329 | __func__, dev_name(ndns->claim)); | 190 | __func__, dev_name(ndns->claim)); |
330 | put_device(dev); | 191 | put_device(dev); |
@@ -342,30 +203,54 @@ struct device *nd_btt_create(struct nd_region *nd_region) | |||
342 | return dev; | 203 | return dev; |
343 | } | 204 | } |
344 | 205 | ||
345 | /* | 206 | static bool uuid_is_null(u8 *uuid) |
346 | * nd_btt_sb_checksum: compute checksum for btt info block | 207 | { |
208 | static const u8 null_uuid[16]; | ||
209 | |||
210 | return (memcmp(uuid, null_uuid, 16) == 0); | ||
211 | } | ||
212 | |||
213 | /** | ||
214 | * nd_btt_arena_is_valid - check if the metadata layout is valid | ||
215 | * @nd_btt: device with BTT geometry and backing device info | ||
216 | * @super: pointer to the arena's info block being tested | ||
217 | * | ||
218 | * Check consistency of the btt info block with itself by validating | ||
219 | * the checksum, and with the parent namespace by verifying the | ||
220 | * parent_uuid contained in the info block with the one supplied in. | ||
347 | * | 221 | * |
348 | * Returns a fletcher64 checksum of everything in the given info block | 222 | * Returns: |
349 | * except the last field (since that's where the checksum lives). | 223 | * false for an invalid info block, true for a valid one |
350 | */ | 224 | */ |
351 | u64 nd_btt_sb_checksum(struct btt_sb *btt_sb) | 225 | bool nd_btt_arena_is_valid(struct nd_btt *nd_btt, struct btt_sb *super) |
352 | { | 226 | { |
353 | u64 sum; | 227 | const u8 *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev); |
354 | __le64 sum_save; | 228 | u64 checksum; |
355 | 229 | ||
356 | sum_save = btt_sb->checksum; | 230 | if (memcmp(super->signature, BTT_SIG, BTT_SIG_LEN) != 0) |
357 | btt_sb->checksum = 0; | 231 | return false; |
358 | sum = nd_fletcher64(btt_sb, sizeof(*btt_sb), 1); | 232 | |
359 | btt_sb->checksum = sum_save; | 233 | if (!uuid_is_null(super->parent_uuid)) |
360 | return sum; | 234 | if (memcmp(super->parent_uuid, parent_uuid, 16) != 0) |
235 | return false; | ||
236 | |||
237 | checksum = le64_to_cpu(super->checksum); | ||
238 | super->checksum = 0; | ||
239 | if (checksum != nd_sb_checksum((struct nd_gen_sb *) super)) | ||
240 | return false; | ||
241 | super->checksum = cpu_to_le64(checksum); | ||
242 | |||
243 | /* TODO: figure out action for this */ | ||
244 | if ((le32_to_cpu(super->flags) & IB_FLAG_ERROR_MASK) != 0) | ||
245 | dev_info(&nd_btt->dev, "Found arena with an error flag\n"); | ||
246 | |||
247 | return true; | ||
361 | } | 248 | } |
362 | EXPORT_SYMBOL(nd_btt_sb_checksum); | 249 | EXPORT_SYMBOL(nd_btt_arena_is_valid); |
363 | 250 | ||
364 | static int __nd_btt_probe(struct nd_btt *nd_btt, | 251 | static int __nd_btt_probe(struct nd_btt *nd_btt, |
365 | struct nd_namespace_common *ndns, struct btt_sb *btt_sb) | 252 | struct nd_namespace_common *ndns, struct btt_sb *btt_sb) |
366 | { | 253 | { |
367 | u64 checksum; | ||
368 | |||
369 | if (!btt_sb || !ndns || !nd_btt) | 254 | if (!btt_sb || !ndns || !nd_btt) |
370 | return -ENODEV; | 255 | return -ENODEV; |
371 | 256 | ||
@@ -375,14 +260,8 @@ static int __nd_btt_probe(struct nd_btt *nd_btt, | |||
375 | if (nvdimm_namespace_capacity(ndns) < SZ_16M) | 260 | if (nvdimm_namespace_capacity(ndns) < SZ_16M) |
376 | return -ENXIO; | 261 | return -ENXIO; |
377 | 262 | ||
378 | if (memcmp(btt_sb->signature, BTT_SIG, BTT_SIG_LEN) != 0) | 263 | if (!nd_btt_arena_is_valid(nd_btt, btt_sb)) |
379 | return -ENODEV; | ||
380 | |||
381 | checksum = le64_to_cpu(btt_sb->checksum); | ||
382 | btt_sb->checksum = 0; | ||
383 | if (checksum != nd_btt_sb_checksum(btt_sb)) | ||
384 | return -ENODEV; | 264 | return -ENODEV; |
385 | btt_sb->checksum = cpu_to_le64(checksum); | ||
386 | 265 | ||
387 | nd_btt->lbasize = le32_to_cpu(btt_sb->external_lbasize); | 266 | nd_btt->lbasize = le32_to_cpu(btt_sb->external_lbasize); |
388 | nd_btt->uuid = kmemdup(btt_sb->uuid, 16, GFP_KERNEL); | 267 | nd_btt->uuid = kmemdup(btt_sb->uuid, 16, GFP_KERNEL); |
@@ -416,7 +295,9 @@ int nd_btt_probe(struct nd_namespace_common *ndns, void *drvdata) | |||
416 | dev_dbg(&ndns->dev, "%s: btt: %s\n", __func__, | 295 | dev_dbg(&ndns->dev, "%s: btt: %s\n", __func__, |
417 | rc == 0 ? dev_name(dev) : "<none>"); | 296 | rc == 0 ? dev_name(dev) : "<none>"); |
418 | if (rc < 0) { | 297 | if (rc < 0) { |
419 | __nd_btt_detach_ndns(to_nd_btt(dev)); | 298 | struct nd_btt *nd_btt = to_nd_btt(dev); |
299 | |||
300 | __nd_detach_ndns(dev, &nd_btt->ndns); | ||
420 | put_device(dev); | 301 | put_device(dev); |
421 | } | 302 | } |
422 | 303 | ||
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c new file mode 100644 index 000000000000..e8f03b0e95e4 --- /dev/null +++ b/drivers/nvdimm/claim.c | |||
@@ -0,0 +1,201 @@ | |||
1 | /* | ||
2 | * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of version 2 of the GNU General Public License as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
11 | * General Public License for more details. | ||
12 | */ | ||
13 | #include <linux/device.h> | ||
14 | #include <linux/sizes.h> | ||
15 | #include "nd-core.h" | ||
16 | #include "pfn.h" | ||
17 | #include "btt.h" | ||
18 | #include "nd.h" | ||
19 | |||
20 | void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns) | ||
21 | { | ||
22 | struct nd_namespace_common *ndns = *_ndns; | ||
23 | |||
24 | dev_WARN_ONCE(dev, !mutex_is_locked(&ndns->dev.mutex) | ||
25 | || ndns->claim != dev, | ||
26 | "%s: invalid claim\n", __func__); | ||
27 | ndns->claim = NULL; | ||
28 | *_ndns = NULL; | ||
29 | put_device(&ndns->dev); | ||
30 | } | ||
31 | |||
32 | void nd_detach_ndns(struct device *dev, | ||
33 | struct nd_namespace_common **_ndns) | ||
34 | { | ||
35 | struct nd_namespace_common *ndns = *_ndns; | ||
36 | |||
37 | if (!ndns) | ||
38 | return; | ||
39 | get_device(&ndns->dev); | ||
40 | device_lock(&ndns->dev); | ||
41 | __nd_detach_ndns(dev, _ndns); | ||
42 | device_unlock(&ndns->dev); | ||
43 | put_device(&ndns->dev); | ||
44 | } | ||
45 | |||
46 | bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach, | ||
47 | struct nd_namespace_common **_ndns) | ||
48 | { | ||
49 | if (attach->claim) | ||
50 | return false; | ||
51 | dev_WARN_ONCE(dev, !mutex_is_locked(&attach->dev.mutex) | ||
52 | || *_ndns, | ||
53 | "%s: invalid claim\n", __func__); | ||
54 | attach->claim = dev; | ||
55 | *_ndns = attach; | ||
56 | get_device(&attach->dev); | ||
57 | return true; | ||
58 | } | ||
59 | |||
60 | bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach, | ||
61 | struct nd_namespace_common **_ndns) | ||
62 | { | ||
63 | bool claimed; | ||
64 | |||
65 | device_lock(&attach->dev); | ||
66 | claimed = __nd_attach_ndns(dev, attach, _ndns); | ||
67 | device_unlock(&attach->dev); | ||
68 | return claimed; | ||
69 | } | ||
70 | |||
71 | static int namespace_match(struct device *dev, void *data) | ||
72 | { | ||
73 | char *name = data; | ||
74 | |||
75 | return strcmp(name, dev_name(dev)) == 0; | ||
76 | } | ||
77 | |||
78 | static bool is_idle(struct device *dev, struct nd_namespace_common *ndns) | ||
79 | { | ||
80 | struct nd_region *nd_region = to_nd_region(dev->parent); | ||
81 | struct device *seed = NULL; | ||
82 | |||
83 | if (is_nd_btt(dev)) | ||
84 | seed = nd_region->btt_seed; | ||
85 | else if (is_nd_pfn(dev)) | ||
86 | seed = nd_region->pfn_seed; | ||
87 | |||
88 | if (seed == dev || ndns || dev->driver) | ||
89 | return false; | ||
90 | return true; | ||
91 | } | ||
92 | |||
93 | static void nd_detach_and_reset(struct device *dev, | ||
94 | struct nd_namespace_common **_ndns) | ||
95 | { | ||
96 | /* detach the namespace and destroy / reset the device */ | ||
97 | nd_detach_ndns(dev, _ndns); | ||
98 | if (is_idle(dev, *_ndns)) { | ||
99 | nd_device_unregister(dev, ND_ASYNC); | ||
100 | } else if (is_nd_btt(dev)) { | ||
101 | struct nd_btt *nd_btt = to_nd_btt(dev); | ||
102 | |||
103 | nd_btt->lbasize = 0; | ||
104 | kfree(nd_btt->uuid); | ||
105 | nd_btt->uuid = NULL; | ||
106 | } else if (is_nd_pfn(dev)) { | ||
107 | struct nd_pfn *nd_pfn = to_nd_pfn(dev); | ||
108 | |||
109 | kfree(nd_pfn->uuid); | ||
110 | nd_pfn->uuid = NULL; | ||
111 | nd_pfn->mode = PFN_MODE_NONE; | ||
112 | } | ||
113 | } | ||
114 | |||
115 | ssize_t nd_namespace_store(struct device *dev, | ||
116 | struct nd_namespace_common **_ndns, const char *buf, | ||
117 | size_t len) | ||
118 | { | ||
119 | struct nd_namespace_common *ndns; | ||
120 | struct device *found; | ||
121 | char *name; | ||
122 | |||
123 | if (dev->driver) { | ||
124 | dev_dbg(dev, "%s: -EBUSY\n", __func__); | ||
125 | return -EBUSY; | ||
126 | } | ||
127 | |||
128 | name = kstrndup(buf, len, GFP_KERNEL); | ||
129 | if (!name) | ||
130 | return -ENOMEM; | ||
131 | strim(name); | ||
132 | |||
133 | if (strncmp(name, "namespace", 9) == 0 || strcmp(name, "") == 0) | ||
134 | /* pass */; | ||
135 | else { | ||
136 | len = -EINVAL; | ||
137 | goto out; | ||
138 | } | ||
139 | |||
140 | ndns = *_ndns; | ||
141 | if (strcmp(name, "") == 0) { | ||
142 | nd_detach_and_reset(dev, _ndns); | ||
143 | goto out; | ||
144 | } else if (ndns) { | ||
145 | dev_dbg(dev, "namespace already set to: %s\n", | ||
146 | dev_name(&ndns->dev)); | ||
147 | len = -EBUSY; | ||
148 | goto out; | ||
149 | } | ||
150 | |||
151 | found = device_find_child(dev->parent, name, namespace_match); | ||
152 | if (!found) { | ||
153 | dev_dbg(dev, "'%s' not found under %s\n", name, | ||
154 | dev_name(dev->parent)); | ||
155 | len = -ENODEV; | ||
156 | goto out; | ||
157 | } | ||
158 | |||
159 | ndns = to_ndns(found); | ||
160 | if (__nvdimm_namespace_capacity(ndns) < SZ_16M) { | ||
161 | dev_dbg(dev, "%s too small to host\n", name); | ||
162 | len = -ENXIO; | ||
163 | goto out_attach; | ||
164 | } | ||
165 | |||
166 | WARN_ON_ONCE(!is_nvdimm_bus_locked(dev)); | ||
167 | if (!nd_attach_ndns(dev, ndns, _ndns)) { | ||
168 | dev_dbg(dev, "%s already claimed\n", | ||
169 | dev_name(&ndns->dev)); | ||
170 | len = -EBUSY; | ||
171 | } | ||
172 | |||
173 | out_attach: | ||
174 | put_device(&ndns->dev); /* from device_find_child */ | ||
175 | out: | ||
176 | kfree(name); | ||
177 | return len; | ||
178 | } | ||
179 | |||
180 | /* | ||
181 | * nd_sb_checksum: compute checksum for a generic info block | ||
182 | * | ||
183 | * Returns a fletcher64 checksum of everything in the given info block | ||
184 | * except the last field (since that's where the checksum lives). | ||
185 | */ | ||
186 | u64 nd_sb_checksum(struct nd_gen_sb *nd_gen_sb) | ||
187 | { | ||
188 | u64 sum; | ||
189 | __le64 sum_save; | ||
190 | |||
191 | BUILD_BUG_ON(sizeof(struct btt_sb) != SZ_4K); | ||
192 | BUILD_BUG_ON(sizeof(struct nd_pfn_sb) != SZ_4K); | ||
193 | BUILD_BUG_ON(sizeof(struct nd_gen_sb) != SZ_4K); | ||
194 | |||
195 | sum_save = nd_gen_sb->checksum; | ||
196 | nd_gen_sb->checksum = 0; | ||
197 | sum = nd_fletcher64(nd_gen_sb, sizeof(*nd_gen_sb), 1); | ||
198 | nd_gen_sb->checksum = sum_save; | ||
199 | return sum; | ||
200 | } | ||
201 | EXPORT_SYMBOL(nd_sb_checksum); | ||
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index c05eb807d674..651b8d19d324 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c | |||
@@ -241,10 +241,7 @@ void nvdimm_drvdata_release(struct kref *kref) | |||
241 | nvdimm_free_dpa(ndd, res); | 241 | nvdimm_free_dpa(ndd, res); |
242 | nvdimm_bus_unlock(dev); | 242 | nvdimm_bus_unlock(dev); |
243 | 243 | ||
244 | if (ndd->data && is_vmalloc_addr(ndd->data)) | 244 | kvfree(ndd->data); |
245 | vfree(ndd->data); | ||
246 | else | ||
247 | kfree(ndd->data); | ||
248 | kfree(ndd); | 245 | kfree(ndd); |
249 | put_device(dev); | 246 | put_device(dev); |
250 | } | 247 | } |
diff --git a/drivers/nvdimm/e820.c b/drivers/nvdimm/e820.c new file mode 100644 index 000000000000..8282db2ef99e --- /dev/null +++ b/drivers/nvdimm/e820.c | |||
@@ -0,0 +1,87 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2015, Christoph Hellwig. | ||
3 | * Copyright (c) 2015, Intel Corporation. | ||
4 | */ | ||
5 | #include <linux/platform_device.h> | ||
6 | #include <linux/libnvdimm.h> | ||
7 | #include <linux/module.h> | ||
8 | |||
9 | static const struct attribute_group *e820_pmem_attribute_groups[] = { | ||
10 | &nvdimm_bus_attribute_group, | ||
11 | NULL, | ||
12 | }; | ||
13 | |||
14 | static const struct attribute_group *e820_pmem_region_attribute_groups[] = { | ||
15 | &nd_region_attribute_group, | ||
16 | &nd_device_attribute_group, | ||
17 | NULL, | ||
18 | }; | ||
19 | |||
20 | static int e820_pmem_remove(struct platform_device *pdev) | ||
21 | { | ||
22 | struct nvdimm_bus *nvdimm_bus = platform_get_drvdata(pdev); | ||
23 | |||
24 | nvdimm_bus_unregister(nvdimm_bus); | ||
25 | return 0; | ||
26 | } | ||
27 | |||
28 | static int e820_pmem_probe(struct platform_device *pdev) | ||
29 | { | ||
30 | static struct nvdimm_bus_descriptor nd_desc; | ||
31 | struct device *dev = &pdev->dev; | ||
32 | struct nvdimm_bus *nvdimm_bus; | ||
33 | struct resource *p; | ||
34 | |||
35 | nd_desc.attr_groups = e820_pmem_attribute_groups; | ||
36 | nd_desc.provider_name = "e820"; | ||
37 | nvdimm_bus = nvdimm_bus_register(dev, &nd_desc); | ||
38 | if (!nvdimm_bus) | ||
39 | goto err; | ||
40 | platform_set_drvdata(pdev, nvdimm_bus); | ||
41 | |||
42 | for (p = iomem_resource.child; p ; p = p->sibling) { | ||
43 | struct nd_region_desc ndr_desc; | ||
44 | |||
45 | if (strncmp(p->name, "Persistent Memory (legacy)", 26) != 0) | ||
46 | continue; | ||
47 | |||
48 | memset(&ndr_desc, 0, sizeof(ndr_desc)); | ||
49 | ndr_desc.res = p; | ||
50 | ndr_desc.attr_groups = e820_pmem_region_attribute_groups; | ||
51 | ndr_desc.numa_node = NUMA_NO_NODE; | ||
52 | set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags); | ||
53 | if (!nvdimm_pmem_region_create(nvdimm_bus, &ndr_desc)) | ||
54 | goto err; | ||
55 | } | ||
56 | |||
57 | return 0; | ||
58 | |||
59 | err: | ||
60 | nvdimm_bus_unregister(nvdimm_bus); | ||
61 | dev_err(dev, "failed to register legacy persistent memory ranges\n"); | ||
62 | return -ENXIO; | ||
63 | } | ||
64 | |||
65 | static struct platform_driver e820_pmem_driver = { | ||
66 | .probe = e820_pmem_probe, | ||
67 | .remove = e820_pmem_remove, | ||
68 | .driver = { | ||
69 | .name = "e820_pmem", | ||
70 | }, | ||
71 | }; | ||
72 | |||
73 | static __init int e820_pmem_init(void) | ||
74 | { | ||
75 | return platform_driver_register(&e820_pmem_driver); | ||
76 | } | ||
77 | |||
78 | static __exit void e820_pmem_exit(void) | ||
79 | { | ||
80 | platform_driver_unregister(&e820_pmem_driver); | ||
81 | } | ||
82 | |||
83 | MODULE_ALIAS("platform:e820_pmem*"); | ||
84 | MODULE_LICENSE("GPL v2"); | ||
85 | MODULE_AUTHOR("Intel Corporation"); | ||
86 | module_init(e820_pmem_init); | ||
87 | module_exit(e820_pmem_exit); | ||
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index fef0dd80d4ad..0955b2cb10fe 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/device.h> | 14 | #include <linux/device.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/pmem.h> | ||
16 | #include <linux/nd.h> | 17 | #include <linux/nd.h> |
17 | #include "nd-core.h" | 18 | #include "nd-core.h" |
18 | #include "nd.h" | 19 | #include "nd.h" |
@@ -76,22 +77,54 @@ static bool is_namespace_io(struct device *dev) | |||
76 | return dev ? dev->type == &namespace_io_device_type : false; | 77 | return dev ? dev->type == &namespace_io_device_type : false; |
77 | } | 78 | } |
78 | 79 | ||
80 | bool pmem_should_map_pages(struct device *dev) | ||
81 | { | ||
82 | struct nd_region *nd_region = to_nd_region(dev->parent); | ||
83 | |||
84 | if (!IS_ENABLED(CONFIG_ZONE_DEVICE)) | ||
85 | return false; | ||
86 | |||
87 | if (!test_bit(ND_REGION_PAGEMAP, &nd_region->flags)) | ||
88 | return false; | ||
89 | |||
90 | if (is_nd_pfn(dev) || is_nd_btt(dev)) | ||
91 | return false; | ||
92 | |||
93 | #ifdef ARCH_MEMREMAP_PMEM | ||
94 | return ARCH_MEMREMAP_PMEM == MEMREMAP_WB; | ||
95 | #else | ||
96 | return false; | ||
97 | #endif | ||
98 | } | ||
99 | EXPORT_SYMBOL(pmem_should_map_pages); | ||
100 | |||
79 | const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns, | 101 | const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns, |
80 | char *name) | 102 | char *name) |
81 | { | 103 | { |
82 | struct nd_region *nd_region = to_nd_region(ndns->dev.parent); | 104 | struct nd_region *nd_region = to_nd_region(ndns->dev.parent); |
83 | const char *suffix = ""; | 105 | const char *suffix = NULL; |
84 | 106 | ||
85 | if (ndns->claim && is_nd_btt(ndns->claim)) | 107 | if (ndns->claim) { |
86 | suffix = "s"; | 108 | if (is_nd_btt(ndns->claim)) |
109 | suffix = "s"; | ||
110 | else if (is_nd_pfn(ndns->claim)) | ||
111 | suffix = "m"; | ||
112 | else | ||
113 | dev_WARN_ONCE(&ndns->dev, 1, | ||
114 | "unknown claim type by %s\n", | ||
115 | dev_name(ndns->claim)); | ||
116 | } | ||
87 | 117 | ||
88 | if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) | 118 | if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) { |
89 | sprintf(name, "pmem%d%s", nd_region->id, suffix); | 119 | if (!suffix && pmem_should_map_pages(&ndns->dev)) |
90 | else if (is_namespace_blk(&ndns->dev)) { | 120 | suffix = "m"; |
121 | sprintf(name, "pmem%d%s", nd_region->id, suffix ? suffix : ""); | ||
122 | } else if (is_namespace_blk(&ndns->dev)) { | ||
91 | struct nd_namespace_blk *nsblk; | 123 | struct nd_namespace_blk *nsblk; |
92 | 124 | ||
93 | nsblk = to_nd_namespace_blk(&ndns->dev); | 125 | nsblk = to_nd_namespace_blk(&ndns->dev); |
94 | sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id, suffix); | 126 | sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id, |
127 | suffix ? suffix : ""); | ||
95 | } else { | 128 | } else { |
96 | return NULL; | 129 | return NULL; |
97 | } | 130 | } |
@@ -100,6 +133,26 @@ const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns, | |||
100 | } | 133 | } |
101 | EXPORT_SYMBOL(nvdimm_namespace_disk_name); | 134 | EXPORT_SYMBOL(nvdimm_namespace_disk_name); |
102 | 135 | ||
136 | const u8 *nd_dev_to_uuid(struct device *dev) | ||
137 | { | ||
138 | static const u8 null_uuid[16]; | ||
139 | |||
140 | if (!dev) | ||
141 | return null_uuid; | ||
142 | |||
143 | if (is_namespace_pmem(dev)) { | ||
144 | struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); | ||
145 | |||
146 | return nspm->uuid; | ||
147 | } else if (is_namespace_blk(dev)) { | ||
148 | struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); | ||
149 | |||
150 | return nsblk->uuid; | ||
151 | } else | ||
152 | return null_uuid; | ||
153 | } | ||
154 | EXPORT_SYMBOL(nd_dev_to_uuid); | ||
155 | |||
103 | static ssize_t nstype_show(struct device *dev, | 156 | static ssize_t nstype_show(struct device *dev, |
104 | struct device_attribute *attr, char *buf) | 157 | struct device_attribute *attr, char *buf) |
105 | { | 158 | { |
@@ -1235,12 +1288,22 @@ static const struct attribute_group *nd_namespace_attribute_groups[] = { | |||
1235 | struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev) | 1288 | struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev) |
1236 | { | 1289 | { |
1237 | struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL; | 1290 | struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL; |
1291 | struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL; | ||
1238 | struct nd_namespace_common *ndns; | 1292 | struct nd_namespace_common *ndns; |
1239 | resource_size_t size; | 1293 | resource_size_t size; |
1240 | 1294 | ||
1241 | if (nd_btt) { | 1295 | if (nd_btt || nd_pfn) { |
1242 | ndns = nd_btt->ndns; | 1296 | struct device *host = NULL; |
1243 | if (!ndns) | 1297 | |
1298 | if (nd_btt) { | ||
1299 | host = &nd_btt->dev; | ||
1300 | ndns = nd_btt->ndns; | ||
1301 | } else if (nd_pfn) { | ||
1302 | host = &nd_pfn->dev; | ||
1303 | ndns = nd_pfn->ndns; | ||
1304 | } | ||
1305 | |||
1306 | if (!ndns || !host) | ||
1244 | return ERR_PTR(-ENODEV); | 1307 | return ERR_PTR(-ENODEV); |
1245 | 1308 | ||
1246 | /* | 1309 | /* |
@@ -1251,12 +1314,12 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev) | |||
1251 | device_unlock(&ndns->dev); | 1314 | device_unlock(&ndns->dev); |
1252 | if (ndns->dev.driver) { | 1315 | if (ndns->dev.driver) { |
1253 | dev_dbg(&ndns->dev, "is active, can't bind %s\n", | 1316 | dev_dbg(&ndns->dev, "is active, can't bind %s\n", |
1254 | dev_name(&nd_btt->dev)); | 1317 | dev_name(host)); |
1255 | return ERR_PTR(-EBUSY); | 1318 | return ERR_PTR(-EBUSY); |
1256 | } | 1319 | } |
1257 | if (dev_WARN_ONCE(&ndns->dev, ndns->claim != &nd_btt->dev, | 1320 | if (dev_WARN_ONCE(&ndns->dev, ndns->claim != host, |
1258 | "host (%s) vs claim (%s) mismatch\n", | 1321 | "host (%s) vs claim (%s) mismatch\n", |
1259 | dev_name(&nd_btt->dev), | 1322 | dev_name(host), |
1260 | dev_name(ndns->claim))) | 1323 | dev_name(ndns->claim))) |
1261 | return ERR_PTR(-ENXIO); | 1324 | return ERR_PTR(-ENXIO); |
1262 | } else { | 1325 | } else { |
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h index e1970c71ad1c..159aed532042 100644 --- a/drivers/nvdimm/nd-core.h +++ b/drivers/nvdimm/nd-core.h | |||
@@ -80,4 +80,13 @@ struct resource *nsblk_add_resource(struct nd_region *nd_region, | |||
80 | int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd); | 80 | int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd); |
81 | void get_ndd(struct nvdimm_drvdata *ndd); | 81 | void get_ndd(struct nvdimm_drvdata *ndd); |
82 | resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns); | 82 | resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns); |
83 | void nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns); | ||
84 | void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns); | ||
85 | bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach, | ||
86 | struct nd_namespace_common **_ndns); | ||
87 | bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach, | ||
88 | struct nd_namespace_common **_ndns); | ||
89 | ssize_t nd_namespace_store(struct device *dev, | ||
90 | struct nd_namespace_common **_ndns, const char *buf, | ||
91 | size_t len); | ||
83 | #endif /* __ND_CORE_H__ */ | 92 | #endif /* __ND_CORE_H__ */ |
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index c41f53e74277..417e521d299c 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h | |||
@@ -29,6 +29,13 @@ enum { | |||
29 | ND_MAX_LANES = 256, | 29 | ND_MAX_LANES = 256, |
30 | SECTOR_SHIFT = 9, | 30 | SECTOR_SHIFT = 9, |
31 | INT_LBASIZE_ALIGNMENT = 64, | 31 | INT_LBASIZE_ALIGNMENT = 64, |
32 | #if IS_ENABLED(CONFIG_NVDIMM_PFN) | ||
33 | ND_PFN_ALIGN = PAGES_PER_SECTION * PAGE_SIZE, | ||
34 | ND_PFN_MASK = ND_PFN_ALIGN - 1, | ||
35 | #else | ||
36 | ND_PFN_ALIGN = 0, | ||
37 | ND_PFN_MASK = 0, | ||
38 | #endif | ||
32 | }; | 39 | }; |
33 | 40 | ||
34 | struct nvdimm_drvdata { | 41 | struct nvdimm_drvdata { |
@@ -92,8 +99,11 @@ struct nd_region { | |||
92 | struct device dev; | 99 | struct device dev; |
93 | struct ida ns_ida; | 100 | struct ida ns_ida; |
94 | struct ida btt_ida; | 101 | struct ida btt_ida; |
102 | struct ida pfn_ida; | ||
103 | unsigned long flags; | ||
95 | struct device *ns_seed; | 104 | struct device *ns_seed; |
96 | struct device *btt_seed; | 105 | struct device *btt_seed; |
106 | struct device *pfn_seed; | ||
97 | u16 ndr_mappings; | 107 | u16 ndr_mappings; |
98 | u64 ndr_size; | 108 | u64 ndr_size; |
99 | u64 ndr_start; | 109 | u64 ndr_start; |
@@ -133,6 +143,22 @@ struct nd_btt { | |||
133 | int id; | 143 | int id; |
134 | }; | 144 | }; |
135 | 145 | ||
146 | enum nd_pfn_mode { | ||
147 | PFN_MODE_NONE, | ||
148 | PFN_MODE_RAM, | ||
149 | PFN_MODE_PMEM, | ||
150 | }; | ||
151 | |||
152 | struct nd_pfn { | ||
153 | int id; | ||
154 | u8 *uuid; | ||
155 | struct device dev; | ||
156 | unsigned long npfns; | ||
157 | enum nd_pfn_mode mode; | ||
158 | struct nd_pfn_sb *pfn_sb; | ||
159 | struct nd_namespace_common *ndns; | ||
160 | }; | ||
161 | |||
136 | enum nd_async_mode { | 162 | enum nd_async_mode { |
137 | ND_SYNC, | 163 | ND_SYNC, |
138 | ND_ASYNC, | 164 | ND_ASYNC, |
@@ -159,14 +185,19 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd); | |||
159 | int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, | 185 | int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, |
160 | void *buf, size_t len); | 186 | void *buf, size_t len); |
161 | struct nd_btt *to_nd_btt(struct device *dev); | 187 | struct nd_btt *to_nd_btt(struct device *dev); |
162 | struct btt_sb; | 188 | |
163 | u64 nd_btt_sb_checksum(struct btt_sb *btt_sb); | 189 | struct nd_gen_sb { |
190 | char reserved[SZ_4K - 8]; | ||
191 | __le64 checksum; | ||
192 | }; | ||
193 | |||
194 | u64 nd_sb_checksum(struct nd_gen_sb *sb); | ||
164 | #if IS_ENABLED(CONFIG_BTT) | 195 | #if IS_ENABLED(CONFIG_BTT) |
165 | int nd_btt_probe(struct nd_namespace_common *ndns, void *drvdata); | 196 | int nd_btt_probe(struct nd_namespace_common *ndns, void *drvdata); |
166 | bool is_nd_btt(struct device *dev); | 197 | bool is_nd_btt(struct device *dev); |
167 | struct device *nd_btt_create(struct nd_region *nd_region); | 198 | struct device *nd_btt_create(struct nd_region *nd_region); |
168 | #else | 199 | #else |
169 | static inline nd_btt_probe(struct nd_namespace_common *ndns, void *drvdata) | 200 | static inline int nd_btt_probe(struct nd_namespace_common *ndns, void *drvdata) |
170 | { | 201 | { |
171 | return -ENODEV; | 202 | return -ENODEV; |
172 | } | 203 | } |
@@ -180,8 +211,36 @@ static inline struct device *nd_btt_create(struct nd_region *nd_region) | |||
180 | { | 211 | { |
181 | return NULL; | 212 | return NULL; |
182 | } | 213 | } |
214 | #endif | ||
183 | 215 | ||
216 | struct nd_pfn *to_nd_pfn(struct device *dev); | ||
217 | #if IS_ENABLED(CONFIG_NVDIMM_PFN) | ||
218 | int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata); | ||
219 | bool is_nd_pfn(struct device *dev); | ||
220 | struct device *nd_pfn_create(struct nd_region *nd_region); | ||
221 | int nd_pfn_validate(struct nd_pfn *nd_pfn); | ||
222 | #else | ||
223 | static inline int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata) | ||
224 | { | ||
225 | return -ENODEV; | ||
226 | } | ||
227 | |||
228 | static inline bool is_nd_pfn(struct device *dev) | ||
229 | { | ||
230 | return false; | ||
231 | } | ||
232 | |||
233 | static inline struct device *nd_pfn_create(struct nd_region *nd_region) | ||
234 | { | ||
235 | return NULL; | ||
236 | } | ||
237 | |||
238 | static inline int nd_pfn_validate(struct nd_pfn *nd_pfn) | ||
239 | { | ||
240 | return -ENODEV; | ||
241 | } | ||
184 | #endif | 242 | #endif |
243 | |||
185 | struct nd_region *to_nd_region(struct device *dev); | 244 | struct nd_region *to_nd_region(struct device *dev); |
186 | int nd_region_to_nstype(struct nd_region *nd_region); | 245 | int nd_region_to_nstype(struct nd_region *nd_region); |
187 | int nd_region_register_namespaces(struct nd_region *nd_region, int *err); | 246 | int nd_region_register_namespaces(struct nd_region *nd_region, int *err); |
@@ -217,4 +276,6 @@ static inline bool nd_iostat_start(struct bio *bio, unsigned long *start) | |||
217 | } | 276 | } |
218 | void nd_iostat_end(struct bio *bio, unsigned long start); | 277 | void nd_iostat_end(struct bio *bio, unsigned long start); |
219 | resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk); | 278 | resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk); |
279 | const u8 *nd_dev_to_uuid(struct device *dev); | ||
280 | bool pmem_should_map_pages(struct device *dev); | ||
220 | #endif /* __ND_H__ */ | 281 | #endif /* __ND_H__ */ |
diff --git a/drivers/nvdimm/pfn.h b/drivers/nvdimm/pfn.h new file mode 100644 index 000000000000..cc243754acef --- /dev/null +++ b/drivers/nvdimm/pfn.h | |||
@@ -0,0 +1,35 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2014-2015, Intel Corporation. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms and conditions of the GNU General Public License, | ||
6 | * version 2, as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
11 | * more details. | ||
12 | */ | ||
13 | |||
14 | #ifndef __NVDIMM_PFN_H | ||
15 | #define __NVDIMM_PFN_H | ||
16 | |||
17 | #include <linux/types.h> | ||
18 | |||
19 | #define PFN_SIG_LEN 16 | ||
20 | #define PFN_SIG "NVDIMM_PFN_INFO\0" | ||
21 | |||
22 | struct nd_pfn_sb { | ||
23 | u8 signature[PFN_SIG_LEN]; | ||
24 | u8 uuid[16]; | ||
25 | u8 parent_uuid[16]; | ||
26 | __le32 flags; | ||
27 | __le16 version_major; | ||
28 | __le16 version_minor; | ||
29 | __le64 dataoff; | ||
30 | __le64 npfns; | ||
31 | __le32 mode; | ||
32 | u8 padding[4012]; | ||
33 | __le64 checksum; | ||
34 | }; | ||
35 | #endif /* __NVDIMM_PFN_H */ | ||
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c new file mode 100644 index 000000000000..3fd7d0d81a47 --- /dev/null +++ b/drivers/nvdimm/pfn_devs.c | |||
@@ -0,0 +1,337 @@ | |||
1 | /* | ||
2 | * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of version 2 of the GNU General Public License as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
11 | * General Public License for more details. | ||
12 | */ | ||
13 | #include <linux/blkdev.h> | ||
14 | #include <linux/device.h> | ||
15 | #include <linux/genhd.h> | ||
16 | #include <linux/sizes.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/fs.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include "nd-core.h" | ||
21 | #include "pfn.h" | ||
22 | #include "nd.h" | ||
23 | |||
24 | static void nd_pfn_release(struct device *dev) | ||
25 | { | ||
26 | struct nd_region *nd_region = to_nd_region(dev->parent); | ||
27 | struct nd_pfn *nd_pfn = to_nd_pfn(dev); | ||
28 | |||
29 | dev_dbg(dev, "%s\n", __func__); | ||
30 | nd_detach_ndns(&nd_pfn->dev, &nd_pfn->ndns); | ||
31 | ida_simple_remove(&nd_region->pfn_ida, nd_pfn->id); | ||
32 | kfree(nd_pfn->uuid); | ||
33 | kfree(nd_pfn); | ||
34 | } | ||
35 | |||
36 | static struct device_type nd_pfn_device_type = { | ||
37 | .name = "nd_pfn", | ||
38 | .release = nd_pfn_release, | ||
39 | }; | ||
40 | |||
41 | bool is_nd_pfn(struct device *dev) | ||
42 | { | ||
43 | return dev ? dev->type == &nd_pfn_device_type : false; | ||
44 | } | ||
45 | EXPORT_SYMBOL(is_nd_pfn); | ||
46 | |||
47 | struct nd_pfn *to_nd_pfn(struct device *dev) | ||
48 | { | ||
49 | struct nd_pfn *nd_pfn = container_of(dev, struct nd_pfn, dev); | ||
50 | |||
51 | WARN_ON(!is_nd_pfn(dev)); | ||
52 | return nd_pfn; | ||
53 | } | ||
54 | EXPORT_SYMBOL(to_nd_pfn); | ||
55 | |||
56 | static ssize_t mode_show(struct device *dev, | ||
57 | struct device_attribute *attr, char *buf) | ||
58 | { | ||
59 | struct nd_pfn *nd_pfn = to_nd_pfn(dev); | ||
60 | |||
61 | switch (nd_pfn->mode) { | ||
62 | case PFN_MODE_RAM: | ||
63 | return sprintf(buf, "ram\n"); | ||
64 | case PFN_MODE_PMEM: | ||
65 | return sprintf(buf, "pmem\n"); | ||
66 | default: | ||
67 | return sprintf(buf, "none\n"); | ||
68 | } | ||
69 | } | ||
70 | |||
71 | static ssize_t mode_store(struct device *dev, | ||
72 | struct device_attribute *attr, const char *buf, size_t len) | ||
73 | { | ||
74 | struct nd_pfn *nd_pfn = to_nd_pfn(dev); | ||
75 | ssize_t rc = 0; | ||
76 | |||
77 | device_lock(dev); | ||
78 | nvdimm_bus_lock(dev); | ||
79 | if (dev->driver) | ||
80 | rc = -EBUSY; | ||
81 | else { | ||
82 | size_t n = len - 1; | ||
83 | |||
84 | if (strncmp(buf, "pmem\n", n) == 0 | ||
85 | || strncmp(buf, "pmem", n) == 0) { | ||
86 | /* TODO: allocate from PMEM support */ | ||
87 | rc = -ENOTTY; | ||
88 | } else if (strncmp(buf, "ram\n", n) == 0 | ||
89 | || strncmp(buf, "ram", n) == 0) | ||
90 | nd_pfn->mode = PFN_MODE_RAM; | ||
91 | else if (strncmp(buf, "none\n", n) == 0 | ||
92 | || strncmp(buf, "none", n) == 0) | ||
93 | nd_pfn->mode = PFN_MODE_NONE; | ||
94 | else | ||
95 | rc = -EINVAL; | ||
96 | } | ||
97 | dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, | ||
98 | rc, buf, buf[len - 1] == '\n' ? "" : "\n"); | ||
99 | nvdimm_bus_unlock(dev); | ||
100 | device_unlock(dev); | ||
101 | |||
102 | return rc ? rc : len; | ||
103 | } | ||
104 | static DEVICE_ATTR_RW(mode); | ||
105 | |||
106 | static ssize_t uuid_show(struct device *dev, | ||
107 | struct device_attribute *attr, char *buf) | ||
108 | { | ||
109 | struct nd_pfn *nd_pfn = to_nd_pfn(dev); | ||
110 | |||
111 | if (nd_pfn->uuid) | ||
112 | return sprintf(buf, "%pUb\n", nd_pfn->uuid); | ||
113 | return sprintf(buf, "\n"); | ||
114 | } | ||
115 | |||
116 | static ssize_t uuid_store(struct device *dev, | ||
117 | struct device_attribute *attr, const char *buf, size_t len) | ||
118 | { | ||
119 | struct nd_pfn *nd_pfn = to_nd_pfn(dev); | ||
120 | ssize_t rc; | ||
121 | |||
122 | device_lock(dev); | ||
123 | rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len); | ||
124 | dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, | ||
125 | rc, buf, buf[len - 1] == '\n' ? "" : "\n"); | ||
126 | device_unlock(dev); | ||
127 | |||
128 | return rc ? rc : len; | ||
129 | } | ||
130 | static DEVICE_ATTR_RW(uuid); | ||
131 | |||
132 | static ssize_t namespace_show(struct device *dev, | ||
133 | struct device_attribute *attr, char *buf) | ||
134 | { | ||
135 | struct nd_pfn *nd_pfn = to_nd_pfn(dev); | ||
136 | ssize_t rc; | ||
137 | |||
138 | nvdimm_bus_lock(dev); | ||
139 | rc = sprintf(buf, "%s\n", nd_pfn->ndns | ||
140 | ? dev_name(&nd_pfn->ndns->dev) : ""); | ||
141 | nvdimm_bus_unlock(dev); | ||
142 | return rc; | ||
143 | } | ||
144 | |||
145 | static ssize_t namespace_store(struct device *dev, | ||
146 | struct device_attribute *attr, const char *buf, size_t len) | ||
147 | { | ||
148 | struct nd_pfn *nd_pfn = to_nd_pfn(dev); | ||
149 | ssize_t rc; | ||
150 | |||
151 | nvdimm_bus_lock(dev); | ||
152 | device_lock(dev); | ||
153 | rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len); | ||
154 | dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, | ||
155 | rc, buf, buf[len - 1] == '\n' ? "" : "\n"); | ||
156 | device_unlock(dev); | ||
157 | nvdimm_bus_unlock(dev); | ||
158 | |||
159 | return rc; | ||
160 | } | ||
161 | static DEVICE_ATTR_RW(namespace); | ||
162 | |||
163 | static struct attribute *nd_pfn_attributes[] = { | ||
164 | &dev_attr_mode.attr, | ||
165 | &dev_attr_namespace.attr, | ||
166 | &dev_attr_uuid.attr, | ||
167 | NULL, | ||
168 | }; | ||
169 | |||
170 | static struct attribute_group nd_pfn_attribute_group = { | ||
171 | .attrs = nd_pfn_attributes, | ||
172 | }; | ||
173 | |||
174 | static const struct attribute_group *nd_pfn_attribute_groups[] = { | ||
175 | &nd_pfn_attribute_group, | ||
176 | &nd_device_attribute_group, | ||
177 | &nd_numa_attribute_group, | ||
178 | NULL, | ||
179 | }; | ||
180 | |||
181 | static struct device *__nd_pfn_create(struct nd_region *nd_region, | ||
182 | u8 *uuid, enum nd_pfn_mode mode, | ||
183 | struct nd_namespace_common *ndns) | ||
184 | { | ||
185 | struct nd_pfn *nd_pfn; | ||
186 | struct device *dev; | ||
187 | |||
188 | /* we can only create pages for contiguous ranged of pmem */ | ||
189 | if (!is_nd_pmem(&nd_region->dev)) | ||
190 | return NULL; | ||
191 | |||
192 | nd_pfn = kzalloc(sizeof(*nd_pfn), GFP_KERNEL); | ||
193 | if (!nd_pfn) | ||
194 | return NULL; | ||
195 | |||
196 | nd_pfn->id = ida_simple_get(&nd_region->pfn_ida, 0, 0, GFP_KERNEL); | ||
197 | if (nd_pfn->id < 0) { | ||
198 | kfree(nd_pfn); | ||
199 | return NULL; | ||
200 | } | ||
201 | |||
202 | nd_pfn->mode = mode; | ||
203 | if (uuid) | ||
204 | uuid = kmemdup(uuid, 16, GFP_KERNEL); | ||
205 | nd_pfn->uuid = uuid; | ||
206 | dev = &nd_pfn->dev; | ||
207 | dev_set_name(dev, "pfn%d.%d", nd_region->id, nd_pfn->id); | ||
208 | dev->parent = &nd_region->dev; | ||
209 | dev->type = &nd_pfn_device_type; | ||
210 | dev->groups = nd_pfn_attribute_groups; | ||
211 | device_initialize(&nd_pfn->dev); | ||
212 | if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) { | ||
213 | dev_dbg(&ndns->dev, "%s failed, already claimed by %s\n", | ||
214 | __func__, dev_name(ndns->claim)); | ||
215 | put_device(dev); | ||
216 | return NULL; | ||
217 | } | ||
218 | return dev; | ||
219 | } | ||
220 | |||
221 | struct device *nd_pfn_create(struct nd_region *nd_region) | ||
222 | { | ||
223 | struct device *dev = __nd_pfn_create(nd_region, NULL, PFN_MODE_NONE, | ||
224 | NULL); | ||
225 | |||
226 | if (dev) | ||
227 | __nd_device_register(dev); | ||
228 | return dev; | ||
229 | } | ||
230 | |||
231 | int nd_pfn_validate(struct nd_pfn *nd_pfn) | ||
232 | { | ||
233 | struct nd_namespace_common *ndns = nd_pfn->ndns; | ||
234 | struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; | ||
235 | struct nd_namespace_io *nsio; | ||
236 | u64 checksum, offset; | ||
237 | |||
238 | if (!pfn_sb || !ndns) | ||
239 | return -ENODEV; | ||
240 | |||
241 | if (!is_nd_pmem(nd_pfn->dev.parent)) | ||
242 | return -ENODEV; | ||
243 | |||
244 | /* section alignment for simple hotplug */ | ||
245 | if (nvdimm_namespace_capacity(ndns) < ND_PFN_ALIGN) | ||
246 | return -ENODEV; | ||
247 | |||
248 | if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb))) | ||
249 | return -ENXIO; | ||
250 | |||
251 | if (memcmp(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN) != 0) | ||
252 | return -ENODEV; | ||
253 | |||
254 | checksum = le64_to_cpu(pfn_sb->checksum); | ||
255 | pfn_sb->checksum = 0; | ||
256 | if (checksum != nd_sb_checksum((struct nd_gen_sb *) pfn_sb)) | ||
257 | return -ENODEV; | ||
258 | pfn_sb->checksum = cpu_to_le64(checksum); | ||
259 | |||
260 | switch (le32_to_cpu(pfn_sb->mode)) { | ||
261 | case PFN_MODE_RAM: | ||
262 | break; | ||
263 | case PFN_MODE_PMEM: | ||
264 | /* TODO: allocate from PMEM support */ | ||
265 | return -ENOTTY; | ||
266 | default: | ||
267 | return -ENXIO; | ||
268 | } | ||
269 | |||
270 | if (!nd_pfn->uuid) { | ||
271 | /* from probe we allocate */ | ||
272 | nd_pfn->uuid = kmemdup(pfn_sb->uuid, 16, GFP_KERNEL); | ||
273 | if (!nd_pfn->uuid) | ||
274 | return -ENOMEM; | ||
275 | } else { | ||
276 | /* from init we validate */ | ||
277 | if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0) | ||
278 | return -EINVAL; | ||
279 | } | ||
280 | |||
281 | /* | ||
282 | * These warnings are verbose because they can only trigger in | ||
283 | * the case where the physical address alignment of the | ||
284 | * namespace has changed since the pfn superblock was | ||
285 | * established. | ||
286 | */ | ||
287 | offset = le64_to_cpu(pfn_sb->dataoff); | ||
288 | nsio = to_nd_namespace_io(&ndns->dev); | ||
289 | if (nsio->res.start & ND_PFN_MASK) { | ||
290 | dev_err(&nd_pfn->dev, | ||
291 | "init failed: %s not section aligned\n", | ||
292 | dev_name(&ndns->dev)); | ||
293 | return -EBUSY; | ||
294 | } else if (offset >= resource_size(&nsio->res)) { | ||
295 | dev_err(&nd_pfn->dev, "pfn array size exceeds capacity of %s\n", | ||
296 | dev_name(&ndns->dev)); | ||
297 | return -EBUSY; | ||
298 | } | ||
299 | |||
300 | return 0; | ||
301 | } | ||
302 | EXPORT_SYMBOL(nd_pfn_validate); | ||
303 | |||
304 | int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata) | ||
305 | { | ||
306 | int rc; | ||
307 | struct device *dev; | ||
308 | struct nd_pfn *nd_pfn; | ||
309 | struct nd_pfn_sb *pfn_sb; | ||
310 | struct nd_region *nd_region = to_nd_region(ndns->dev.parent); | ||
311 | |||
312 | if (ndns->force_raw) | ||
313 | return -ENODEV; | ||
314 | |||
315 | nvdimm_bus_lock(&ndns->dev); | ||
316 | dev = __nd_pfn_create(nd_region, NULL, PFN_MODE_NONE, ndns); | ||
317 | nvdimm_bus_unlock(&ndns->dev); | ||
318 | if (!dev) | ||
319 | return -ENOMEM; | ||
320 | dev_set_drvdata(dev, drvdata); | ||
321 | pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL); | ||
322 | nd_pfn = to_nd_pfn(dev); | ||
323 | nd_pfn->pfn_sb = pfn_sb; | ||
324 | rc = nd_pfn_validate(nd_pfn); | ||
325 | nd_pfn->pfn_sb = NULL; | ||
326 | kfree(pfn_sb); | ||
327 | dev_dbg(&ndns->dev, "%s: pfn: %s\n", __func__, | ||
328 | rc == 0 ? dev_name(dev) : "<none>"); | ||
329 | if (rc < 0) { | ||
330 | __nd_detach_ndns(dev, &nd_pfn->ndns); | ||
331 | put_device(dev); | ||
332 | } else | ||
333 | __nd_device_register(&nd_pfn->dev); | ||
334 | |||
335 | return rc; | ||
336 | } | ||
337 | EXPORT_SYMBOL(nd_pfn_probe); | ||
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 4c079d5cb539..b9525385c0dc 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c | |||
@@ -21,18 +21,24 @@ | |||
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/memory_hotplug.h> | ||
24 | #include <linux/moduleparam.h> | 25 | #include <linux/moduleparam.h> |
26 | #include <linux/vmalloc.h> | ||
25 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
26 | #include <linux/pmem.h> | 28 | #include <linux/pmem.h> |
27 | #include <linux/nd.h> | 29 | #include <linux/nd.h> |
30 | #include "pfn.h" | ||
28 | #include "nd.h" | 31 | #include "nd.h" |
29 | 32 | ||
30 | struct pmem_device { | 33 | struct pmem_device { |
31 | struct request_queue *pmem_queue; | 34 | struct request_queue *pmem_queue; |
32 | struct gendisk *pmem_disk; | 35 | struct gendisk *pmem_disk; |
36 | struct nd_namespace_common *ndns; | ||
33 | 37 | ||
34 | /* One contiguous memory region per device */ | 38 | /* One contiguous memory region per device */ |
35 | phys_addr_t phys_addr; | 39 | phys_addr_t phys_addr; |
40 | /* when non-zero this device is hosting a 'pfn' instance */ | ||
41 | phys_addr_t data_offset; | ||
36 | void __pmem *virt_addr; | 42 | void __pmem *virt_addr; |
37 | size_t size; | 43 | size_t size; |
38 | }; | 44 | }; |
@@ -44,7 +50,7 @@ static void pmem_do_bvec(struct pmem_device *pmem, struct page *page, | |||
44 | sector_t sector) | 50 | sector_t sector) |
45 | { | 51 | { |
46 | void *mem = kmap_atomic(page); | 52 | void *mem = kmap_atomic(page); |
47 | size_t pmem_off = sector << 9; | 53 | phys_addr_t pmem_off = sector * 512 + pmem->data_offset; |
48 | void __pmem *pmem_addr = pmem->virt_addr + pmem_off; | 54 | void __pmem *pmem_addr = pmem->virt_addr + pmem_off; |
49 | 55 | ||
50 | if (rw == READ) { | 56 | if (rw == READ) { |
@@ -92,19 +98,26 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector, | |||
92 | } | 98 | } |
93 | 99 | ||
94 | static long pmem_direct_access(struct block_device *bdev, sector_t sector, | 100 | static long pmem_direct_access(struct block_device *bdev, sector_t sector, |
95 | void **kaddr, unsigned long *pfn, long size) | 101 | void __pmem **kaddr, unsigned long *pfn) |
96 | { | 102 | { |
97 | struct pmem_device *pmem = bdev->bd_disk->private_data; | 103 | struct pmem_device *pmem = bdev->bd_disk->private_data; |
98 | size_t offset = sector << 9; | 104 | resource_size_t offset = sector * 512 + pmem->data_offset; |
99 | 105 | resource_size_t size; | |
100 | if (!pmem) | 106 | |
101 | return -ENODEV; | 107 | if (pmem->data_offset) { |
108 | /* | ||
109 | * Limit the direct_access() size to what is covered by | ||
110 | * the memmap | ||
111 | */ | ||
112 | size = (pmem->size - offset) & ~ND_PFN_MASK; | ||
113 | } else | ||
114 | size = pmem->size - offset; | ||
102 | 115 | ||
103 | /* FIXME convert DAX to comprehend that this mapping has a lifetime */ | 116 | /* FIXME convert DAX to comprehend that this mapping has a lifetime */ |
104 | *kaddr = (void __force *) pmem->virt_addr + offset; | 117 | *kaddr = pmem->virt_addr + offset; |
105 | *pfn = (pmem->phys_addr + offset) >> PAGE_SHIFT; | 118 | *pfn = (pmem->phys_addr + offset) >> PAGE_SHIFT; |
106 | 119 | ||
107 | return pmem->size - offset; | 120 | return size; |
108 | } | 121 | } |
109 | 122 | ||
110 | static const struct block_device_operations pmem_fops = { | 123 | static const struct block_device_operations pmem_fops = { |
@@ -119,27 +132,33 @@ static struct pmem_device *pmem_alloc(struct device *dev, | |||
119 | { | 132 | { |
120 | struct pmem_device *pmem; | 133 | struct pmem_device *pmem; |
121 | 134 | ||
122 | pmem = kzalloc(sizeof(*pmem), GFP_KERNEL); | 135 | pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL); |
123 | if (!pmem) | 136 | if (!pmem) |
124 | return ERR_PTR(-ENOMEM); | 137 | return ERR_PTR(-ENOMEM); |
125 | 138 | ||
126 | pmem->phys_addr = res->start; | 139 | pmem->phys_addr = res->start; |
127 | pmem->size = resource_size(res); | 140 | pmem->size = resource_size(res); |
128 | if (!arch_has_pmem_api()) | 141 | if (!arch_has_wmb_pmem()) |
129 | dev_warn(dev, "unable to guarantee persistence of writes\n"); | 142 | dev_warn(dev, "unable to guarantee persistence of writes\n"); |
130 | 143 | ||
131 | if (!request_mem_region(pmem->phys_addr, pmem->size, dev_name(dev))) { | 144 | if (!devm_request_mem_region(dev, pmem->phys_addr, pmem->size, |
145 | dev_name(dev))) { | ||
132 | dev_warn(dev, "could not reserve region [0x%pa:0x%zx]\n", | 146 | dev_warn(dev, "could not reserve region [0x%pa:0x%zx]\n", |
133 | &pmem->phys_addr, pmem->size); | 147 | &pmem->phys_addr, pmem->size); |
134 | kfree(pmem); | ||
135 | return ERR_PTR(-EBUSY); | 148 | return ERR_PTR(-EBUSY); |
136 | } | 149 | } |
137 | 150 | ||
138 | pmem->virt_addr = memremap_pmem(pmem->phys_addr, pmem->size); | 151 | if (pmem_should_map_pages(dev)) { |
139 | if (!pmem->virt_addr) { | 152 | void *addr = devm_memremap_pages(dev, res); |
140 | release_mem_region(pmem->phys_addr, pmem->size); | 153 | |
141 | kfree(pmem); | 154 | if (IS_ERR(addr)) |
142 | return ERR_PTR(-ENXIO); | 155 | return addr; |
156 | pmem->virt_addr = (void __pmem *) addr; | ||
157 | } else { | ||
158 | pmem->virt_addr = memremap_pmem(dev, pmem->phys_addr, | ||
159 | pmem->size); | ||
160 | if (!pmem->virt_addr) | ||
161 | return ERR_PTR(-ENXIO); | ||
143 | } | 162 | } |
144 | 163 | ||
145 | return pmem; | 164 | return pmem; |
@@ -147,13 +166,16 @@ static struct pmem_device *pmem_alloc(struct device *dev, | |||
147 | 166 | ||
148 | static void pmem_detach_disk(struct pmem_device *pmem) | 167 | static void pmem_detach_disk(struct pmem_device *pmem) |
149 | { | 168 | { |
169 | if (!pmem->pmem_disk) | ||
170 | return; | ||
171 | |||
150 | del_gendisk(pmem->pmem_disk); | 172 | del_gendisk(pmem->pmem_disk); |
151 | put_disk(pmem->pmem_disk); | 173 | put_disk(pmem->pmem_disk); |
152 | blk_cleanup_queue(pmem->pmem_queue); | 174 | blk_cleanup_queue(pmem->pmem_queue); |
153 | } | 175 | } |
154 | 176 | ||
155 | static int pmem_attach_disk(struct nd_namespace_common *ndns, | 177 | static int pmem_attach_disk(struct device *dev, |
156 | struct pmem_device *pmem) | 178 | struct nd_namespace_common *ndns, struct pmem_device *pmem) |
157 | { | 179 | { |
158 | struct gendisk *disk; | 180 | struct gendisk *disk; |
159 | 181 | ||
@@ -162,6 +184,7 @@ static int pmem_attach_disk(struct nd_namespace_common *ndns, | |||
162 | return -ENOMEM; | 184 | return -ENOMEM; |
163 | 185 | ||
164 | blk_queue_make_request(pmem->pmem_queue, pmem_make_request); | 186 | blk_queue_make_request(pmem->pmem_queue, pmem_make_request); |
187 | blk_queue_physical_block_size(pmem->pmem_queue, PAGE_SIZE); | ||
165 | blk_queue_max_hw_sectors(pmem->pmem_queue, UINT_MAX); | 188 | blk_queue_max_hw_sectors(pmem->pmem_queue, UINT_MAX); |
166 | blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY); | 189 | blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY); |
167 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, pmem->pmem_queue); | 190 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, pmem->pmem_queue); |
@@ -179,8 +202,8 @@ static int pmem_attach_disk(struct nd_namespace_common *ndns, | |||
179 | disk->queue = pmem->pmem_queue; | 202 | disk->queue = pmem->pmem_queue; |
180 | disk->flags = GENHD_FL_EXT_DEVT; | 203 | disk->flags = GENHD_FL_EXT_DEVT; |
181 | nvdimm_namespace_disk_name(ndns, disk->disk_name); | 204 | nvdimm_namespace_disk_name(ndns, disk->disk_name); |
182 | disk->driverfs_dev = &ndns->dev; | 205 | disk->driverfs_dev = dev; |
183 | set_capacity(disk, pmem->size >> 9); | 206 | set_capacity(disk, (pmem->size - pmem->data_offset) / 512); |
184 | pmem->pmem_disk = disk; | 207 | pmem->pmem_disk = disk; |
185 | 208 | ||
186 | add_disk(disk); | 209 | add_disk(disk); |
@@ -209,11 +232,152 @@ static int pmem_rw_bytes(struct nd_namespace_common *ndns, | |||
209 | return 0; | 232 | return 0; |
210 | } | 233 | } |
211 | 234 | ||
212 | static void pmem_free(struct pmem_device *pmem) | 235 | static int nd_pfn_init(struct nd_pfn *nd_pfn) |
236 | { | ||
237 | struct nd_pfn_sb *pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL); | ||
238 | struct pmem_device *pmem = dev_get_drvdata(&nd_pfn->dev); | ||
239 | struct nd_namespace_common *ndns = nd_pfn->ndns; | ||
240 | struct nd_region *nd_region; | ||
241 | unsigned long npfns; | ||
242 | phys_addr_t offset; | ||
243 | u64 checksum; | ||
244 | int rc; | ||
245 | |||
246 | if (!pfn_sb) | ||
247 | return -ENOMEM; | ||
248 | |||
249 | nd_pfn->pfn_sb = pfn_sb; | ||
250 | rc = nd_pfn_validate(nd_pfn); | ||
251 | if (rc == 0 || rc == -EBUSY) | ||
252 | return rc; | ||
253 | |||
254 | /* section alignment for simple hotplug */ | ||
255 | if (nvdimm_namespace_capacity(ndns) < ND_PFN_ALIGN | ||
256 | || pmem->phys_addr & ND_PFN_MASK) | ||
257 | return -ENODEV; | ||
258 | |||
259 | nd_region = to_nd_region(nd_pfn->dev.parent); | ||
260 | if (nd_region->ro) { | ||
261 | dev_info(&nd_pfn->dev, | ||
262 | "%s is read-only, unable to init metadata\n", | ||
263 | dev_name(&nd_region->dev)); | ||
264 | goto err; | ||
265 | } | ||
266 | |||
267 | memset(pfn_sb, 0, sizeof(*pfn_sb)); | ||
268 | npfns = (pmem->size - SZ_8K) / SZ_4K; | ||
269 | /* | ||
270 | * Note, we use 64 here for the standard size of struct page, | ||
271 | * debugging options may cause it to be larger in which case the | ||
272 | * implementation will limit the pfns advertised through | ||
273 | * ->direct_access() to those that are included in the memmap. | ||
274 | */ | ||
275 | if (nd_pfn->mode == PFN_MODE_PMEM) | ||
276 | offset = ALIGN(SZ_8K + 64 * npfns, PMD_SIZE); | ||
277 | else if (nd_pfn->mode == PFN_MODE_RAM) | ||
278 | offset = SZ_8K; | ||
279 | else | ||
280 | goto err; | ||
281 | |||
282 | npfns = (pmem->size - offset) / SZ_4K; | ||
283 | pfn_sb->mode = cpu_to_le32(nd_pfn->mode); | ||
284 | pfn_sb->dataoff = cpu_to_le64(offset); | ||
285 | pfn_sb->npfns = cpu_to_le64(npfns); | ||
286 | memcpy(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN); | ||
287 | memcpy(pfn_sb->uuid, nd_pfn->uuid, 16); | ||
288 | pfn_sb->version_major = cpu_to_le16(1); | ||
289 | checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb); | ||
290 | pfn_sb->checksum = cpu_to_le64(checksum); | ||
291 | |||
292 | rc = nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb)); | ||
293 | if (rc) | ||
294 | goto err; | ||
295 | |||
296 | return 0; | ||
297 | err: | ||
298 | nd_pfn->pfn_sb = NULL; | ||
299 | kfree(pfn_sb); | ||
300 | return -ENXIO; | ||
301 | } | ||
302 | |||
303 | static int nvdimm_namespace_detach_pfn(struct nd_namespace_common *ndns) | ||
304 | { | ||
305 | struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim); | ||
306 | struct pmem_device *pmem; | ||
307 | |||
308 | /* free pmem disk */ | ||
309 | pmem = dev_get_drvdata(&nd_pfn->dev); | ||
310 | pmem_detach_disk(pmem); | ||
311 | |||
312 | /* release nd_pfn resources */ | ||
313 | kfree(nd_pfn->pfn_sb); | ||
314 | nd_pfn->pfn_sb = NULL; | ||
315 | |||
316 | return 0; | ||
317 | } | ||
318 | |||
319 | static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns) | ||
213 | { | 320 | { |
214 | memunmap_pmem(pmem->virt_addr); | 321 | struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); |
215 | release_mem_region(pmem->phys_addr, pmem->size); | 322 | struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim); |
216 | kfree(pmem); | 323 | struct device *dev = &nd_pfn->dev; |
324 | struct vmem_altmap *altmap; | ||
325 | struct nd_region *nd_region; | ||
326 | struct nd_pfn_sb *pfn_sb; | ||
327 | struct pmem_device *pmem; | ||
328 | phys_addr_t offset; | ||
329 | int rc; | ||
330 | |||
331 | if (!nd_pfn->uuid || !nd_pfn->ndns) | ||
332 | return -ENODEV; | ||
333 | |||
334 | nd_region = to_nd_region(dev->parent); | ||
335 | rc = nd_pfn_init(nd_pfn); | ||
336 | if (rc) | ||
337 | return rc; | ||
338 | |||
339 | if (PAGE_SIZE != SZ_4K) { | ||
340 | dev_err(dev, "only supported on systems with 4K PAGE_SIZE\n"); | ||
341 | return -ENXIO; | ||
342 | } | ||
343 | if (nsio->res.start & ND_PFN_MASK) { | ||
344 | dev_err(dev, "%s not memory hotplug section aligned\n", | ||
345 | dev_name(&ndns->dev)); | ||
346 | return -ENXIO; | ||
347 | } | ||
348 | |||
349 | pfn_sb = nd_pfn->pfn_sb; | ||
350 | offset = le64_to_cpu(pfn_sb->dataoff); | ||
351 | nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode); | ||
352 | if (nd_pfn->mode == PFN_MODE_RAM) { | ||
353 | if (offset != SZ_8K) | ||
354 | return -EINVAL; | ||
355 | nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns); | ||
356 | altmap = NULL; | ||
357 | } else { | ||
358 | rc = -ENXIO; | ||
359 | goto err; | ||
360 | } | ||
361 | |||
362 | /* establish pfn range for lookup, and switch to direct map */ | ||
363 | pmem = dev_get_drvdata(dev); | ||
364 | memunmap_pmem(dev, pmem->virt_addr); | ||
365 | pmem->virt_addr = (void __pmem *)devm_memremap_pages(dev, &nsio->res); | ||
366 | if (IS_ERR(pmem->virt_addr)) { | ||
367 | rc = PTR_ERR(pmem->virt_addr); | ||
368 | goto err; | ||
369 | } | ||
370 | |||
371 | /* attach pmem disk in "pfn-mode" */ | ||
372 | pmem->data_offset = offset; | ||
373 | rc = pmem_attach_disk(dev, ndns, pmem); | ||
374 | if (rc) | ||
375 | goto err; | ||
376 | |||
377 | return rc; | ||
378 | err: | ||
379 | nvdimm_namespace_detach_pfn(ndns); | ||
380 | return rc; | ||
217 | } | 381 | } |
218 | 382 | ||
219 | static int nd_pmem_probe(struct device *dev) | 383 | static int nd_pmem_probe(struct device *dev) |
@@ -222,7 +386,6 @@ static int nd_pmem_probe(struct device *dev) | |||
222 | struct nd_namespace_common *ndns; | 386 | struct nd_namespace_common *ndns; |
223 | struct nd_namespace_io *nsio; | 387 | struct nd_namespace_io *nsio; |
224 | struct pmem_device *pmem; | 388 | struct pmem_device *pmem; |
225 | int rc; | ||
226 | 389 | ||
227 | ndns = nvdimm_namespace_common_probe(dev); | 390 | ndns = nvdimm_namespace_common_probe(dev); |
228 | if (IS_ERR(ndns)) | 391 | if (IS_ERR(ndns)) |
@@ -233,18 +396,27 @@ static int nd_pmem_probe(struct device *dev) | |||
233 | if (IS_ERR(pmem)) | 396 | if (IS_ERR(pmem)) |
234 | return PTR_ERR(pmem); | 397 | return PTR_ERR(pmem); |
235 | 398 | ||
399 | pmem->ndns = ndns; | ||
236 | dev_set_drvdata(dev, pmem); | 400 | dev_set_drvdata(dev, pmem); |
237 | ndns->rw_bytes = pmem_rw_bytes; | 401 | ndns->rw_bytes = pmem_rw_bytes; |
402 | |||
238 | if (is_nd_btt(dev)) | 403 | if (is_nd_btt(dev)) |
239 | rc = nvdimm_namespace_attach_btt(ndns); | 404 | return nvdimm_namespace_attach_btt(ndns); |
240 | else if (nd_btt_probe(ndns, pmem) == 0) { | 405 | |
406 | if (is_nd_pfn(dev)) | ||
407 | return nvdimm_namespace_attach_pfn(ndns); | ||
408 | |||
409 | if (nd_btt_probe(ndns, pmem) == 0) { | ||
241 | /* we'll come back as btt-pmem */ | 410 | /* we'll come back as btt-pmem */ |
242 | rc = -ENXIO; | 411 | return -ENXIO; |
243 | } else | 412 | } |
244 | rc = pmem_attach_disk(ndns, pmem); | 413 | |
245 | if (rc) | 414 | if (nd_pfn_probe(ndns, pmem) == 0) { |
246 | pmem_free(pmem); | 415 | /* we'll come back as pfn-pmem */ |
247 | return rc; | 416 | return -ENXIO; |
417 | } | ||
418 | |||
419 | return pmem_attach_disk(dev, ndns, pmem); | ||
248 | } | 420 | } |
249 | 421 | ||
250 | static int nd_pmem_remove(struct device *dev) | 422 | static int nd_pmem_remove(struct device *dev) |
@@ -252,10 +424,11 @@ static int nd_pmem_remove(struct device *dev) | |||
252 | struct pmem_device *pmem = dev_get_drvdata(dev); | 424 | struct pmem_device *pmem = dev_get_drvdata(dev); |
253 | 425 | ||
254 | if (is_nd_btt(dev)) | 426 | if (is_nd_btt(dev)) |
255 | nvdimm_namespace_detach_btt(to_nd_btt(dev)->ndns); | 427 | nvdimm_namespace_detach_btt(pmem->ndns); |
428 | else if (is_nd_pfn(dev)) | ||
429 | nvdimm_namespace_detach_pfn(pmem->ndns); | ||
256 | else | 430 | else |
257 | pmem_detach_disk(pmem); | 431 | pmem_detach_disk(pmem); |
258 | pmem_free(pmem); | ||
259 | 432 | ||
260 | return 0; | 433 | return 0; |
261 | } | 434 | } |
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c index f28f78ccff19..7da63eac78ee 100644 --- a/drivers/nvdimm/region.c +++ b/drivers/nvdimm/region.c | |||
@@ -53,6 +53,7 @@ static int nd_region_probe(struct device *dev) | |||
53 | return -ENODEV; | 53 | return -ENODEV; |
54 | 54 | ||
55 | nd_region->btt_seed = nd_btt_create(nd_region); | 55 | nd_region->btt_seed = nd_btt_create(nd_region); |
56 | nd_region->pfn_seed = nd_pfn_create(nd_region); | ||
56 | if (err == 0) | 57 | if (err == 0) |
57 | return 0; | 58 | return 0; |
58 | 59 | ||
@@ -84,6 +85,7 @@ static int nd_region_remove(struct device *dev) | |||
84 | nvdimm_bus_lock(dev); | 85 | nvdimm_bus_lock(dev); |
85 | nd_region->ns_seed = NULL; | 86 | nd_region->ns_seed = NULL; |
86 | nd_region->btt_seed = NULL; | 87 | nd_region->btt_seed = NULL; |
88 | nd_region->pfn_seed = NULL; | ||
87 | dev_set_drvdata(dev, NULL); | 89 | dev_set_drvdata(dev, NULL); |
88 | nvdimm_bus_unlock(dev); | 90 | nvdimm_bus_unlock(dev); |
89 | 91 | ||
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index 7384455792bf..529f3f02e7b2 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c | |||
@@ -345,6 +345,23 @@ static ssize_t btt_seed_show(struct device *dev, | |||
345 | } | 345 | } |
346 | static DEVICE_ATTR_RO(btt_seed); | 346 | static DEVICE_ATTR_RO(btt_seed); |
347 | 347 | ||
348 | static ssize_t pfn_seed_show(struct device *dev, | ||
349 | struct device_attribute *attr, char *buf) | ||
350 | { | ||
351 | struct nd_region *nd_region = to_nd_region(dev); | ||
352 | ssize_t rc; | ||
353 | |||
354 | nvdimm_bus_lock(dev); | ||
355 | if (nd_region->pfn_seed) | ||
356 | rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed)); | ||
357 | else | ||
358 | rc = sprintf(buf, "\n"); | ||
359 | nvdimm_bus_unlock(dev); | ||
360 | |||
361 | return rc; | ||
362 | } | ||
363 | static DEVICE_ATTR_RO(pfn_seed); | ||
364 | |||
348 | static ssize_t read_only_show(struct device *dev, | 365 | static ssize_t read_only_show(struct device *dev, |
349 | struct device_attribute *attr, char *buf) | 366 | struct device_attribute *attr, char *buf) |
350 | { | 367 | { |
@@ -373,6 +390,7 @@ static struct attribute *nd_region_attributes[] = { | |||
373 | &dev_attr_nstype.attr, | 390 | &dev_attr_nstype.attr, |
374 | &dev_attr_mappings.attr, | 391 | &dev_attr_mappings.attr, |
375 | &dev_attr_btt_seed.attr, | 392 | &dev_attr_btt_seed.attr, |
393 | &dev_attr_pfn_seed.attr, | ||
376 | &dev_attr_read_only.attr, | 394 | &dev_attr_read_only.attr, |
377 | &dev_attr_set_cookie.attr, | 395 | &dev_attr_set_cookie.attr, |
378 | &dev_attr_available_size.attr, | 396 | &dev_attr_available_size.attr, |
@@ -740,10 +758,12 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, | |||
740 | nd_region->provider_data = ndr_desc->provider_data; | 758 | nd_region->provider_data = ndr_desc->provider_data; |
741 | nd_region->nd_set = ndr_desc->nd_set; | 759 | nd_region->nd_set = ndr_desc->nd_set; |
742 | nd_region->num_lanes = ndr_desc->num_lanes; | 760 | nd_region->num_lanes = ndr_desc->num_lanes; |
761 | nd_region->flags = ndr_desc->flags; | ||
743 | nd_region->ro = ro; | 762 | nd_region->ro = ro; |
744 | nd_region->numa_node = ndr_desc->numa_node; | 763 | nd_region->numa_node = ndr_desc->numa_node; |
745 | ida_init(&nd_region->ns_ida); | 764 | ida_init(&nd_region->ns_ida); |
746 | ida_init(&nd_region->btt_ida); | 765 | ida_init(&nd_region->btt_ida); |
766 | ida_init(&nd_region->pfn_ida); | ||
747 | dev = &nd_region->dev; | 767 | dev = &nd_region->dev; |
748 | dev_set_name(dev, "region%d", nd_region->id); | 768 | dev_set_name(dev, "region%d", nd_region->id); |
749 | dev->parent = &nvdimm_bus->dev; | 769 | dev->parent = &nvdimm_bus->dev; |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 8177f3b04491..0b2be174d981 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -326,8 +326,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) | |||
326 | struct resource *res = &dev->resource[PCI_ROM_RESOURCE]; | 326 | struct resource *res = &dev->resource[PCI_ROM_RESOURCE]; |
327 | dev->rom_base_reg = rom; | 327 | dev->rom_base_reg = rom; |
328 | res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH | | 328 | res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH | |
329 | IORESOURCE_READONLY | IORESOURCE_CACHEABLE | | 329 | IORESOURCE_READONLY | IORESOURCE_SIZEALIGN; |
330 | IORESOURCE_SIZEALIGN; | ||
331 | __pci_read_base(dev, pci_bar_mem32, res, rom); | 330 | __pci_read_base(dev, pci_bar_mem32, res, rom); |
332 | } | 331 | } |
333 | } | 332 | } |
diff --git a/drivers/pnp/manager.c b/drivers/pnp/manager.c index 9357aa779048..7ad3295752ef 100644 --- a/drivers/pnp/manager.c +++ b/drivers/pnp/manager.c | |||
@@ -97,8 +97,6 @@ static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx) | |||
97 | /* ??? rule->flags restricted to 8 bits, all tests bogus ??? */ | 97 | /* ??? rule->flags restricted to 8 bits, all tests bogus ??? */ |
98 | if (!(rule->flags & IORESOURCE_MEM_WRITEABLE)) | 98 | if (!(rule->flags & IORESOURCE_MEM_WRITEABLE)) |
99 | res->flags |= IORESOURCE_READONLY; | 99 | res->flags |= IORESOURCE_READONLY; |
100 | if (rule->flags & IORESOURCE_MEM_CACHEABLE) | ||
101 | res->flags |= IORESOURCE_CACHEABLE; | ||
102 | if (rule->flags & IORESOURCE_MEM_RANGELENGTH) | 100 | if (rule->flags & IORESOURCE_MEM_RANGELENGTH) |
103 | res->flags |= IORESOURCE_RANGELENGTH; | 101 | res->flags |= IORESOURCE_RANGELENGTH; |
104 | if (rule->flags & IORESOURCE_MEM_SHADOWABLE) | 102 | if (rule->flags & IORESOURCE_MEM_SHADOWABLE) |
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 2b744fbba68e..5ed44fe21380 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c | |||
@@ -29,7 +29,7 @@ static int dcssblk_open(struct block_device *bdev, fmode_t mode); | |||
29 | static void dcssblk_release(struct gendisk *disk, fmode_t mode); | 29 | static void dcssblk_release(struct gendisk *disk, fmode_t mode); |
30 | static void dcssblk_make_request(struct request_queue *q, struct bio *bio); | 30 | static void dcssblk_make_request(struct request_queue *q, struct bio *bio); |
31 | static long dcssblk_direct_access(struct block_device *bdev, sector_t secnum, | 31 | static long dcssblk_direct_access(struct block_device *bdev, sector_t secnum, |
32 | void **kaddr, unsigned long *pfn, long size); | 32 | void __pmem **kaddr, unsigned long *pfn); |
33 | 33 | ||
34 | static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0"; | 34 | static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0"; |
35 | 35 | ||
@@ -881,18 +881,20 @@ fail: | |||
881 | 881 | ||
882 | static long | 882 | static long |
883 | dcssblk_direct_access (struct block_device *bdev, sector_t secnum, | 883 | dcssblk_direct_access (struct block_device *bdev, sector_t secnum, |
884 | void **kaddr, unsigned long *pfn, long size) | 884 | void __pmem **kaddr, unsigned long *pfn) |
885 | { | 885 | { |
886 | struct dcssblk_dev_info *dev_info; | 886 | struct dcssblk_dev_info *dev_info; |
887 | unsigned long offset, dev_sz; | 887 | unsigned long offset, dev_sz; |
888 | void *addr; | ||
888 | 889 | ||
889 | dev_info = bdev->bd_disk->private_data; | 890 | dev_info = bdev->bd_disk->private_data; |
890 | if (!dev_info) | 891 | if (!dev_info) |
891 | return -ENODEV; | 892 | return -ENODEV; |
892 | dev_sz = dev_info->end - dev_info->start; | 893 | dev_sz = dev_info->end - dev_info->start; |
893 | offset = secnum * 512; | 894 | offset = secnum * 512; |
894 | *kaddr = (void *) (dev_info->start + offset); | 895 | addr = (void *) (dev_info->start + offset); |
895 | *pfn = virt_to_phys(*kaddr) >> PAGE_SHIFT; | 896 | *pfn = virt_to_phys(addr) >> PAGE_SHIFT; |
897 | *kaddr = (void __pmem *) addr; | ||
896 | 898 | ||
897 | return dev_sz - offset; | 899 | return dev_sz - offset; |
898 | } | 900 | } |
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c index 31e8576cbaab..f6c336b05d5b 100644 --- a/drivers/scsi/aic94xx/aic94xx_init.c +++ b/drivers/scsi/aic94xx/aic94xx_init.c | |||
@@ -100,12 +100,7 @@ static int asd_map_memio(struct asd_ha_struct *asd_ha) | |||
100 | pci_name(asd_ha->pcidev)); | 100 | pci_name(asd_ha->pcidev)); |
101 | goto Err; | 101 | goto Err; |
102 | } | 102 | } |
103 | if (io_handle->flags & IORESOURCE_CACHEABLE) | 103 | io_handle->addr = ioremap(io_handle->start, io_handle->len); |
104 | io_handle->addr = ioremap(io_handle->start, | ||
105 | io_handle->len); | ||
106 | else | ||
107 | io_handle->addr = ioremap_nocache(io_handle->start, | ||
108 | io_handle->len); | ||
109 | if (!io_handle->addr) { | 104 | if (!io_handle->addr) { |
110 | asd_printk("couldn't map MBAR%d of %s\n", i==0?0:1, | 105 | asd_printk("couldn't map MBAR%d of %s\n", i==0?0:1, |
111 | pci_name(asd_ha->pcidev)); | 106 | pci_name(asd_ha->pcidev)); |
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 6ac74fb4ea9a..333db5953607 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c | |||
@@ -259,10 +259,7 @@ static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb) | |||
259 | addr = (unsigned long)pci_resource_start(pdev, 0); | 259 | addr = (unsigned long)pci_resource_start(pdev, 0); |
260 | range = pci_resource_len(pdev, 0); | 260 | range = pci_resource_len(pdev, 0); |
261 | flags = pci_resource_flags(pdev, 0); | 261 | flags = pci_resource_flags(pdev, 0); |
262 | if (flags & IORESOURCE_CACHEABLE) | 262 | mem_base0 = ioremap(addr, range); |
263 | mem_base0 = ioremap(addr, range); | ||
264 | else | ||
265 | mem_base0 = ioremap_nocache(addr, range); | ||
266 | if (!mem_base0) { | 263 | if (!mem_base0) { |
267 | pr_notice("arcmsr%d: memory mapping region fail\n", | 264 | pr_notice("arcmsr%d: memory mapping region fail\n", |
268 | acb->host->host_no); | 265 | acb->host->host_no); |
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index f466a6aa8830..e2d555c1bffc 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c | |||
@@ -324,13 +324,9 @@ int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex) | |||
324 | goto err_out; | 324 | goto err_out; |
325 | 325 | ||
326 | res_flag_ex = pci_resource_flags(pdev, bar_ex); | 326 | res_flag_ex = pci_resource_flags(pdev, bar_ex); |
327 | if (res_flag_ex & IORESOURCE_MEM) { | 327 | if (res_flag_ex & IORESOURCE_MEM) |
328 | if (res_flag_ex & IORESOURCE_CACHEABLE) | 328 | mvi->regs_ex = ioremap(res_start, res_len); |
329 | mvi->regs_ex = ioremap(res_start, res_len); | 329 | else |
330 | else | ||
331 | mvi->regs_ex = ioremap_nocache(res_start, | ||
332 | res_len); | ||
333 | } else | ||
334 | mvi->regs_ex = (void *)res_start; | 330 | mvi->regs_ex = (void *)res_start; |
335 | if (!mvi->regs_ex) | 331 | if (!mvi->regs_ex) |
336 | goto err_out; | 332 | goto err_out; |
@@ -345,10 +341,7 @@ int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex) | |||
345 | } | 341 | } |
346 | 342 | ||
347 | res_flag = pci_resource_flags(pdev, bar); | 343 | res_flag = pci_resource_flags(pdev, bar); |
348 | if (res_flag & IORESOURCE_CACHEABLE) | 344 | mvi->regs = ioremap(res_start, res_len); |
349 | mvi->regs = ioremap(res_start, res_len); | ||
350 | else | ||
351 | mvi->regs = ioremap_nocache(res_start, res_len); | ||
352 | 345 | ||
353 | if (!mvi->regs) { | 346 | if (!mvi->regs) { |
354 | if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM)) | 347 | if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM)) |
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c index e26e81de7c45..d50c5ed8f428 100644 --- a/drivers/scsi/sun3x_esp.c +++ b/drivers/scsi/sun3x_esp.c | |||
@@ -12,9 +12,9 @@ | |||
12 | #include <linux/platform_device.h> | 12 | #include <linux/platform_device.h> |
13 | #include <linux/dma-mapping.h> | 13 | #include <linux/dma-mapping.h> |
14 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
15 | #include <linux/io.h> | ||
15 | 16 | ||
16 | #include <asm/sun3x.h> | 17 | #include <asm/sun3x.h> |
17 | #include <asm/io.h> | ||
18 | #include <asm/dma.h> | 18 | #include <asm/dma.h> |
19 | #include <asm/dvma.h> | 19 | #include <asm/dvma.h> |
20 | 20 | ||
diff --git a/drivers/staging/comedi/drivers/ii_pci20kc.c b/drivers/staging/comedi/drivers/ii_pci20kc.c index 0768bc42a5db..14ef1f67dd42 100644 --- a/drivers/staging/comedi/drivers/ii_pci20kc.c +++ b/drivers/staging/comedi/drivers/ii_pci20kc.c | |||
@@ -28,6 +28,7 @@ | |||
28 | */ | 28 | */ |
29 | 29 | ||
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | #include <linux/io.h> | ||
31 | #include "../comedidev.h" | 32 | #include "../comedidev.h" |
32 | 33 | ||
33 | /* | 34 | /* |
diff --git a/drivers/staging/unisys/visorbus/visorchannel.c b/drivers/staging/unisys/visorbus/visorchannel.c index 6da7e49a6627..2693c46afdc0 100644 --- a/drivers/staging/unisys/visorbus/visorchannel.c +++ b/drivers/staging/unisys/visorbus/visorchannel.c | |||
@@ -20,6 +20,7 @@ | |||
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <linux/uuid.h> | 22 | #include <linux/uuid.h> |
23 | #include <linux/io.h> | ||
23 | 24 | ||
24 | #include "version.h" | 25 | #include "version.h" |
25 | #include "visorbus.h" | 26 | #include "visorbus.h" |
@@ -35,7 +36,7 @@ static const uuid_le spar_video_guid = SPAR_CONSOLEVIDEO_CHANNEL_PROTOCOL_GUID; | |||
35 | struct visorchannel { | 36 | struct visorchannel { |
36 | u64 physaddr; | 37 | u64 physaddr; |
37 | ulong nbytes; | 38 | ulong nbytes; |
38 | void __iomem *mapped; | 39 | void *mapped; |
39 | bool requested; | 40 | bool requested; |
40 | struct channel_header chan_hdr; | 41 | struct channel_header chan_hdr; |
41 | uuid_le guid; | 42 | uuid_le guid; |
@@ -92,7 +93,7 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes, | |||
92 | } | 93 | } |
93 | } | 94 | } |
94 | 95 | ||
95 | channel->mapped = ioremap_cache(physaddr, size); | 96 | channel->mapped = memremap(physaddr, size, MEMREMAP_WB); |
96 | if (!channel->mapped) { | 97 | if (!channel->mapped) { |
97 | release_mem_region(physaddr, size); | 98 | release_mem_region(physaddr, size); |
98 | goto cleanup; | 99 | goto cleanup; |
@@ -112,7 +113,7 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes, | |||
112 | if (uuid_le_cmp(guid, NULL_UUID_LE) == 0) | 113 | if (uuid_le_cmp(guid, NULL_UUID_LE) == 0) |
113 | guid = channel->chan_hdr.chtype; | 114 | guid = channel->chan_hdr.chtype; |
114 | 115 | ||
115 | iounmap(channel->mapped); | 116 | memunmap(channel->mapped); |
116 | if (channel->requested) | 117 | if (channel->requested) |
117 | release_mem_region(channel->physaddr, channel->nbytes); | 118 | release_mem_region(channel->physaddr, channel->nbytes); |
118 | channel->mapped = NULL; | 119 | channel->mapped = NULL; |
@@ -125,7 +126,8 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes, | |||
125 | } | 126 | } |
126 | } | 127 | } |
127 | 128 | ||
128 | channel->mapped = ioremap_cache(channel->physaddr, channel_bytes); | 129 | channel->mapped = memremap(channel->physaddr, channel_bytes, |
130 | MEMREMAP_WB); | ||
129 | if (!channel->mapped) { | 131 | if (!channel->mapped) { |
130 | release_mem_region(channel->physaddr, channel_bytes); | 132 | release_mem_region(channel->physaddr, channel_bytes); |
131 | goto cleanup; | 133 | goto cleanup; |
@@ -166,7 +168,7 @@ visorchannel_destroy(struct visorchannel *channel) | |||
166 | if (!channel) | 168 | if (!channel) |
167 | return; | 169 | return; |
168 | if (channel->mapped) { | 170 | if (channel->mapped) { |
169 | iounmap(channel->mapped); | 171 | memunmap(channel->mapped); |
170 | if (channel->requested) | 172 | if (channel->requested) |
171 | release_mem_region(channel->physaddr, channel->nbytes); | 173 | release_mem_region(channel->physaddr, channel->nbytes); |
172 | } | 174 | } |
@@ -240,7 +242,7 @@ visorchannel_read(struct visorchannel *channel, ulong offset, | |||
240 | if (offset + nbytes > channel->nbytes) | 242 | if (offset + nbytes > channel->nbytes) |
241 | return -EIO; | 243 | return -EIO; |
242 | 244 | ||
243 | memcpy_fromio(local, channel->mapped + offset, nbytes); | 245 | memcpy(local, channel->mapped + offset, nbytes); |
244 | 246 | ||
245 | return 0; | 247 | return 0; |
246 | } | 248 | } |
@@ -262,7 +264,7 @@ visorchannel_write(struct visorchannel *channel, ulong offset, | |||
262 | local, copy_size); | 264 | local, copy_size); |
263 | } | 265 | } |
264 | 266 | ||
265 | memcpy_toio(channel->mapped + offset, local, nbytes); | 267 | memcpy(channel->mapped + offset, local, nbytes); |
266 | 268 | ||
267 | return 0; | 269 | return 0; |
268 | } | 270 | } |
diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c index 4b76cb441ed4..94419c36d2af 100644 --- a/drivers/staging/unisys/visorbus/visorchipset.c +++ b/drivers/staging/unisys/visorbus/visorchipset.c | |||
@@ -118,7 +118,7 @@ static struct visorchannel *controlvm_channel; | |||
118 | 118 | ||
119 | /* Manages the request payload in the controlvm channel */ | 119 | /* Manages the request payload in the controlvm channel */ |
120 | struct visor_controlvm_payload_info { | 120 | struct visor_controlvm_payload_info { |
121 | u8 __iomem *ptr; /* pointer to base address of payload pool */ | 121 | u8 *ptr; /* pointer to base address of payload pool */ |
122 | u64 offset; /* offset from beginning of controlvm | 122 | u64 offset; /* offset from beginning of controlvm |
123 | * channel to beginning of payload * pool */ | 123 | * channel to beginning of payload * pool */ |
124 | u32 bytes; /* number of bytes in payload pool */ | 124 | u32 bytes; /* number of bytes in payload pool */ |
@@ -400,21 +400,22 @@ parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry) | |||
400 | p = __va((unsigned long) (addr)); | 400 | p = __va((unsigned long) (addr)); |
401 | memcpy(ctx->data, p, bytes); | 401 | memcpy(ctx->data, p, bytes); |
402 | } else { | 402 | } else { |
403 | void __iomem *mapping; | 403 | void *mapping; |
404 | 404 | ||
405 | if (!request_mem_region(addr, bytes, "visorchipset")) { | 405 | if (!request_mem_region(addr, bytes, "visorchipset")) { |
406 | rc = NULL; | 406 | rc = NULL; |
407 | goto cleanup; | 407 | goto cleanup; |
408 | } | 408 | } |
409 | 409 | ||
410 | mapping = ioremap_cache(addr, bytes); | 410 | mapping = memremap(addr, bytes, MEMREMAP_WB); |
411 | if (!mapping) { | 411 | if (!mapping) { |
412 | release_mem_region(addr, bytes); | 412 | release_mem_region(addr, bytes); |
413 | rc = NULL; | 413 | rc = NULL; |
414 | goto cleanup; | 414 | goto cleanup; |
415 | } | 415 | } |
416 | memcpy_fromio(ctx->data, mapping, bytes); | 416 | memcpy(ctx->data, mapping, bytes); |
417 | release_mem_region(addr, bytes); | 417 | release_mem_region(addr, bytes); |
418 | memunmap(mapping); | ||
418 | } | 419 | } |
419 | 420 | ||
420 | ctx->byte_stream = true; | 421 | ctx->byte_stream = true; |
@@ -1327,7 +1328,7 @@ static int | |||
1327 | initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes, | 1328 | initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes, |
1328 | struct visor_controlvm_payload_info *info) | 1329 | struct visor_controlvm_payload_info *info) |
1329 | { | 1330 | { |
1330 | u8 __iomem *payload = NULL; | 1331 | u8 *payload = NULL; |
1331 | int rc = CONTROLVM_RESP_SUCCESS; | 1332 | int rc = CONTROLVM_RESP_SUCCESS; |
1332 | 1333 | ||
1333 | if (!info) { | 1334 | if (!info) { |
@@ -1339,7 +1340,7 @@ initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes, | |||
1339 | rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID; | 1340 | rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID; |
1340 | goto cleanup; | 1341 | goto cleanup; |
1341 | } | 1342 | } |
1342 | payload = ioremap_cache(phys_addr + offset, bytes); | 1343 | payload = memremap(phys_addr + offset, bytes, MEMREMAP_WB); |
1343 | if (!payload) { | 1344 | if (!payload) { |
1344 | rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED; | 1345 | rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED; |
1345 | goto cleanup; | 1346 | goto cleanup; |
@@ -1352,7 +1353,7 @@ initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes, | |||
1352 | cleanup: | 1353 | cleanup: |
1353 | if (rc < 0) { | 1354 | if (rc < 0) { |
1354 | if (payload) { | 1355 | if (payload) { |
1355 | iounmap(payload); | 1356 | memunmap(payload); |
1356 | payload = NULL; | 1357 | payload = NULL; |
1357 | } | 1358 | } |
1358 | } | 1359 | } |
@@ -1363,7 +1364,7 @@ static void | |||
1363 | destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info) | 1364 | destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info) |
1364 | { | 1365 | { |
1365 | if (info->ptr) { | 1366 | if (info->ptr) { |
1366 | iounmap(info->ptr); | 1367 | memunmap(info->ptr); |
1367 | info->ptr = NULL; | 1368 | info->ptr = NULL; |
1368 | } | 1369 | } |
1369 | memset(info, 0, sizeof(struct visor_controlvm_payload_info)); | 1370 | memset(info, 0, sizeof(struct visor_controlvm_payload_info)); |
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index cfbb9d728e31..271d12137649 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c | |||
@@ -36,11 +36,11 @@ | |||
36 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
37 | #include <linux/uaccess.h> | 37 | #include <linux/uaccess.h> |
38 | #include <linux/pm_runtime.h> | 38 | #include <linux/pm_runtime.h> |
39 | #include <linux/io.h> | ||
39 | #ifdef CONFIG_SPARC | 40 | #ifdef CONFIG_SPARC |
40 | #include <linux/sunserialcore.h> | 41 | #include <linux/sunserialcore.h> |
41 | #endif | 42 | #endif |
42 | 43 | ||
43 | #include <asm/io.h> | ||
44 | #include <asm/irq.h> | 44 | #include <asm/irq.h> |
45 | 45 | ||
46 | #include "8250.h" | 46 | #include "8250.h" |
diff --git a/drivers/video/fbdev/ocfb.c b/drivers/video/fbdev/ocfb.c index de9819660ca0..c9293aea8ec3 100644 --- a/drivers/video/fbdev/ocfb.c +++ b/drivers/video/fbdev/ocfb.c | |||
@@ -325,7 +325,6 @@ static int ocfb_probe(struct platform_device *pdev) | |||
325 | dev_err(&pdev->dev, "I/O resource request failed\n"); | 325 | dev_err(&pdev->dev, "I/O resource request failed\n"); |
326 | return -ENXIO; | 326 | return -ENXIO; |
327 | } | 327 | } |
328 | res->flags &= ~IORESOURCE_CACHEABLE; | ||
329 | fbdev->regs = devm_ioremap_resource(&pdev->dev, res); | 328 | fbdev->regs = devm_ioremap_resource(&pdev->dev, res); |
330 | if (IS_ERR(fbdev->regs)) | 329 | if (IS_ERR(fbdev->regs)) |
331 | return PTR_ERR(fbdev->regs); | 330 | return PTR_ERR(fbdev->regs); |
diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c index 83433cb0dfba..96aa46dc696c 100644 --- a/drivers/video/fbdev/s1d13xxxfb.c +++ b/drivers/video/fbdev/s1d13xxxfb.c | |||
@@ -32,8 +32,7 @@ | |||
32 | #include <linux/spinlock_types.h> | 32 | #include <linux/spinlock_types.h> |
33 | #include <linux/spinlock.h> | 33 | #include <linux/spinlock.h> |
34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
35 | 35 | #include <linux/io.h> | |
36 | #include <asm/io.h> | ||
37 | 36 | ||
38 | #include <video/s1d13xxxfb.h> | 37 | #include <video/s1d13xxxfb.h> |
39 | 38 | ||
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c index 735355b0e023..7df4228e25f0 100644 --- a/drivers/video/fbdev/stifb.c +++ b/drivers/video/fbdev/stifb.c | |||
@@ -64,6 +64,7 @@ | |||
64 | #include <linux/fb.h> | 64 | #include <linux/fb.h> |
65 | #include <linux/init.h> | 65 | #include <linux/init.h> |
66 | #include <linux/ioport.h> | 66 | #include <linux/ioport.h> |
67 | #include <linux/io.h> | ||
67 | 68 | ||
68 | #include <asm/grfioctl.h> /* for HP-UX compatibility */ | 69 | #include <asm/grfioctl.h> /* for HP-UX compatibility */ |
69 | #include <asm/uaccess.h> | 70 | #include <asm/uaccess.h> |
diff --git a/fs/block_dev.c b/fs/block_dev.c index 33b813e04f79..f77da0ec0e64 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -441,7 +441,7 @@ EXPORT_SYMBOL_GPL(bdev_write_page); | |||
441 | * accessible at this address. | 441 | * accessible at this address. |
442 | */ | 442 | */ |
443 | long bdev_direct_access(struct block_device *bdev, sector_t sector, | 443 | long bdev_direct_access(struct block_device *bdev, sector_t sector, |
444 | void **addr, unsigned long *pfn, long size) | 444 | void __pmem **addr, unsigned long *pfn, long size) |
445 | { | 445 | { |
446 | long avail; | 446 | long avail; |
447 | const struct block_device_operations *ops = bdev->bd_disk->fops; | 447 | const struct block_device_operations *ops = bdev->bd_disk->fops; |
@@ -462,7 +462,7 @@ long bdev_direct_access(struct block_device *bdev, sector_t sector, | |||
462 | sector += get_start_sect(bdev); | 462 | sector += get_start_sect(bdev); |
463 | if (sector % (PAGE_SIZE / 512)) | 463 | if (sector % (PAGE_SIZE / 512)) |
464 | return -EINVAL; | 464 | return -EINVAL; |
465 | avail = ops->direct_access(bdev, sector, addr, pfn, size); | 465 | avail = ops->direct_access(bdev, sector, addr, pfn); |
466 | if (!avail) | 466 | if (!avail) |
467 | return -ERANGE; | 467 | return -ERANGE; |
468 | return min(avail, size); | 468 | return min(avail, size); |
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/memcontrol.h> | 23 | #include <linux/memcontrol.h> |
24 | #include <linux/mm.h> | 24 | #include <linux/mm.h> |
25 | #include <linux/mutex.h> | 25 | #include <linux/mutex.h> |
26 | #include <linux/pmem.h> | ||
26 | #include <linux/sched.h> | 27 | #include <linux/sched.h> |
27 | #include <linux/uio.h> | 28 | #include <linux/uio.h> |
28 | #include <linux/vmstat.h> | 29 | #include <linux/vmstat.h> |
@@ -34,7 +35,7 @@ int dax_clear_blocks(struct inode *inode, sector_t block, long size) | |||
34 | 35 | ||
35 | might_sleep(); | 36 | might_sleep(); |
36 | do { | 37 | do { |
37 | void *addr; | 38 | void __pmem *addr; |
38 | unsigned long pfn; | 39 | unsigned long pfn; |
39 | long count; | 40 | long count; |
40 | 41 | ||
@@ -46,10 +47,7 @@ int dax_clear_blocks(struct inode *inode, sector_t block, long size) | |||
46 | unsigned pgsz = PAGE_SIZE - offset_in_page(addr); | 47 | unsigned pgsz = PAGE_SIZE - offset_in_page(addr); |
47 | if (pgsz > count) | 48 | if (pgsz > count) |
48 | pgsz = count; | 49 | pgsz = count; |
49 | if (pgsz < PAGE_SIZE) | 50 | clear_pmem(addr, pgsz); |
50 | memset(addr, 0, pgsz); | ||
51 | else | ||
52 | clear_page(addr); | ||
53 | addr += pgsz; | 51 | addr += pgsz; |
54 | size -= pgsz; | 52 | size -= pgsz; |
55 | count -= pgsz; | 53 | count -= pgsz; |
@@ -59,26 +57,29 @@ int dax_clear_blocks(struct inode *inode, sector_t block, long size) | |||
59 | } | 57 | } |
60 | } while (size); | 58 | } while (size); |
61 | 59 | ||
60 | wmb_pmem(); | ||
62 | return 0; | 61 | return 0; |
63 | } | 62 | } |
64 | EXPORT_SYMBOL_GPL(dax_clear_blocks); | 63 | EXPORT_SYMBOL_GPL(dax_clear_blocks); |
65 | 64 | ||
66 | static long dax_get_addr(struct buffer_head *bh, void **addr, unsigned blkbits) | 65 | static long dax_get_addr(struct buffer_head *bh, void __pmem **addr, |
66 | unsigned blkbits) | ||
67 | { | 67 | { |
68 | unsigned long pfn; | 68 | unsigned long pfn; |
69 | sector_t sector = bh->b_blocknr << (blkbits - 9); | 69 | sector_t sector = bh->b_blocknr << (blkbits - 9); |
70 | return bdev_direct_access(bh->b_bdev, sector, addr, &pfn, bh->b_size); | 70 | return bdev_direct_access(bh->b_bdev, sector, addr, &pfn, bh->b_size); |
71 | } | 71 | } |
72 | 72 | ||
73 | static void dax_new_buf(void *addr, unsigned size, unsigned first, loff_t pos, | 73 | /* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */ |
74 | loff_t end) | 74 | static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first, |
75 | loff_t pos, loff_t end) | ||
75 | { | 76 | { |
76 | loff_t final = end - pos + first; /* The final byte of the buffer */ | 77 | loff_t final = end - pos + first; /* The final byte of the buffer */ |
77 | 78 | ||
78 | if (first > 0) | 79 | if (first > 0) |
79 | memset(addr, 0, first); | 80 | clear_pmem(addr, first); |
80 | if (final < size) | 81 | if (final < size) |
81 | memset(addr + final, 0, size - final); | 82 | clear_pmem(addr + final, size - final); |
82 | } | 83 | } |
83 | 84 | ||
84 | static bool buffer_written(struct buffer_head *bh) | 85 | static bool buffer_written(struct buffer_head *bh) |
@@ -106,14 +107,15 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter, | |||
106 | loff_t pos = start; | 107 | loff_t pos = start; |
107 | loff_t max = start; | 108 | loff_t max = start; |
108 | loff_t bh_max = start; | 109 | loff_t bh_max = start; |
109 | void *addr; | 110 | void __pmem *addr; |
110 | bool hole = false; | 111 | bool hole = false; |
112 | bool need_wmb = false; | ||
111 | 113 | ||
112 | if (iov_iter_rw(iter) != WRITE) | 114 | if (iov_iter_rw(iter) != WRITE) |
113 | end = min(end, i_size_read(inode)); | 115 | end = min(end, i_size_read(inode)); |
114 | 116 | ||
115 | while (pos < end) { | 117 | while (pos < end) { |
116 | unsigned len; | 118 | size_t len; |
117 | if (pos == max) { | 119 | if (pos == max) { |
118 | unsigned blkbits = inode->i_blkbits; | 120 | unsigned blkbits = inode->i_blkbits; |
119 | sector_t block = pos >> blkbits; | 121 | sector_t block = pos >> blkbits; |
@@ -145,19 +147,23 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter, | |||
145 | retval = dax_get_addr(bh, &addr, blkbits); | 147 | retval = dax_get_addr(bh, &addr, blkbits); |
146 | if (retval < 0) | 148 | if (retval < 0) |
147 | break; | 149 | break; |
148 | if (buffer_unwritten(bh) || buffer_new(bh)) | 150 | if (buffer_unwritten(bh) || buffer_new(bh)) { |
149 | dax_new_buf(addr, retval, first, pos, | 151 | dax_new_buf(addr, retval, first, pos, |
150 | end); | 152 | end); |
153 | need_wmb = true; | ||
154 | } | ||
151 | addr += first; | 155 | addr += first; |
152 | size = retval - first; | 156 | size = retval - first; |
153 | } | 157 | } |
154 | max = min(pos + size, end); | 158 | max = min(pos + size, end); |
155 | } | 159 | } |
156 | 160 | ||
157 | if (iov_iter_rw(iter) == WRITE) | 161 | if (iov_iter_rw(iter) == WRITE) { |
158 | len = copy_from_iter_nocache(addr, max - pos, iter); | 162 | len = copy_from_iter_pmem(addr, max - pos, iter); |
159 | else if (!hole) | 163 | need_wmb = true; |
160 | len = copy_to_iter(addr, max - pos, iter); | 164 | } else if (!hole) |
165 | len = copy_to_iter((void __force *)addr, max - pos, | ||
166 | iter); | ||
161 | else | 167 | else |
162 | len = iov_iter_zero(max - pos, iter); | 168 | len = iov_iter_zero(max - pos, iter); |
163 | 169 | ||
@@ -168,6 +174,9 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter, | |||
168 | addr += len; | 174 | addr += len; |
169 | } | 175 | } |
170 | 176 | ||
177 | if (need_wmb) | ||
178 | wmb_pmem(); | ||
179 | |||
171 | return (pos == start) ? retval : pos - start; | 180 | return (pos == start) ? retval : pos - start; |
172 | } | 181 | } |
173 | 182 | ||
@@ -260,11 +269,13 @@ static int dax_load_hole(struct address_space *mapping, struct page *page, | |||
260 | static int copy_user_bh(struct page *to, struct buffer_head *bh, | 269 | static int copy_user_bh(struct page *to, struct buffer_head *bh, |
261 | unsigned blkbits, unsigned long vaddr) | 270 | unsigned blkbits, unsigned long vaddr) |
262 | { | 271 | { |
263 | void *vfrom, *vto; | 272 | void __pmem *vfrom; |
273 | void *vto; | ||
274 | |||
264 | if (dax_get_addr(bh, &vfrom, blkbits) < 0) | 275 | if (dax_get_addr(bh, &vfrom, blkbits) < 0) |
265 | return -EIO; | 276 | return -EIO; |
266 | vto = kmap_atomic(to); | 277 | vto = kmap_atomic(to); |
267 | copy_user_page(vto, vfrom, vaddr, to); | 278 | copy_user_page(vto, (void __force *)vfrom, vaddr, to); |
268 | kunmap_atomic(vto); | 279 | kunmap_atomic(vto); |
269 | return 0; | 280 | return 0; |
270 | } | 281 | } |
@@ -275,7 +286,7 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh, | |||
275 | struct address_space *mapping = inode->i_mapping; | 286 | struct address_space *mapping = inode->i_mapping; |
276 | sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9); | 287 | sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9); |
277 | unsigned long vaddr = (unsigned long)vmf->virtual_address; | 288 | unsigned long vaddr = (unsigned long)vmf->virtual_address; |
278 | void *addr; | 289 | void __pmem *addr; |
279 | unsigned long pfn; | 290 | unsigned long pfn; |
280 | pgoff_t size; | 291 | pgoff_t size; |
281 | int error; | 292 | int error; |
@@ -303,8 +314,10 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh, | |||
303 | goto out; | 314 | goto out; |
304 | } | 315 | } |
305 | 316 | ||
306 | if (buffer_unwritten(bh) || buffer_new(bh)) | 317 | if (buffer_unwritten(bh) || buffer_new(bh)) { |
307 | clear_page(addr); | 318 | clear_pmem(addr, PAGE_SIZE); |
319 | wmb_pmem(); | ||
320 | } | ||
308 | 321 | ||
309 | error = vm_insert_mixed(vma, vaddr, pfn); | 322 | error = vm_insert_mixed(vma, vaddr, pfn); |
310 | 323 | ||
@@ -548,11 +561,12 @@ int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length, | |||
548 | if (err < 0) | 561 | if (err < 0) |
549 | return err; | 562 | return err; |
550 | if (buffer_written(&bh)) { | 563 | if (buffer_written(&bh)) { |
551 | void *addr; | 564 | void __pmem *addr; |
552 | err = dax_get_addr(&bh, &addr, inode->i_blkbits); | 565 | err = dax_get_addr(&bh, &addr, inode->i_blkbits); |
553 | if (err < 0) | 566 | if (err < 0) |
554 | return err; | 567 | return err; |
555 | memset(addr + offset, 0, length); | 568 | clear_pmem(addr + offset, length); |
569 | wmb_pmem(); | ||
556 | } | 570 | } |
557 | 571 | ||
558 | return 0; | 572 | return 0; |
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h index 14909b0b9cae..f20f407ce45d 100644 --- a/include/asm-generic/memory_model.h +++ b/include/asm-generic/memory_model.h | |||
@@ -69,6 +69,12 @@ | |||
69 | }) | 69 | }) |
70 | #endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */ | 70 | #endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */ |
71 | 71 | ||
72 | /* | ||
73 | * Convert a physical address to a Page Frame Number and back | ||
74 | */ | ||
75 | #define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) | ||
76 | #define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) | ||
77 | |||
72 | #define page_to_pfn __page_to_pfn | 78 | #define page_to_pfn __page_to_pfn |
73 | #define pfn_to_page __pfn_to_page | 79 | #define pfn_to_page __pfn_to_page |
74 | 80 | ||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index a622f270f09e..708923b9b623 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -1569,8 +1569,8 @@ struct block_device_operations { | |||
1569 | int (*rw_page)(struct block_device *, sector_t, struct page *, int rw); | 1569 | int (*rw_page)(struct block_device *, sector_t, struct page *, int rw); |
1570 | int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | 1570 | int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); |
1571 | int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | 1571 | int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); |
1572 | long (*direct_access)(struct block_device *, sector_t, | 1572 | long (*direct_access)(struct block_device *, sector_t, void __pmem **, |
1573 | void **, unsigned long *pfn, long size); | 1573 | unsigned long *pfn); |
1574 | unsigned int (*check_events) (struct gendisk *disk, | 1574 | unsigned int (*check_events) (struct gendisk *disk, |
1575 | unsigned int clearing); | 1575 | unsigned int clearing); |
1576 | /* ->media_changed() is DEPRECATED, use ->check_events() instead */ | 1576 | /* ->media_changed() is DEPRECATED, use ->check_events() instead */ |
@@ -1588,8 +1588,8 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, | |||
1588 | extern int bdev_read_page(struct block_device *, sector_t, struct page *); | 1588 | extern int bdev_read_page(struct block_device *, sector_t, struct page *); |
1589 | extern int bdev_write_page(struct block_device *, sector_t, struct page *, | 1589 | extern int bdev_write_page(struct block_device *, sector_t, struct page *, |
1590 | struct writeback_control *); | 1590 | struct writeback_control *); |
1591 | extern long bdev_direct_access(struct block_device *, sector_t, void **addr, | 1591 | extern long bdev_direct_access(struct block_device *, sector_t, |
1592 | unsigned long *pfn, long size); | 1592 | void __pmem **addr, unsigned long *pfn, long size); |
1593 | #else /* CONFIG_BLOCK */ | 1593 | #else /* CONFIG_BLOCK */ |
1594 | 1594 | ||
1595 | struct block_device; | 1595 | struct block_device; |
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index c27dde7215b5..e399029b68c5 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <linux/types.h> | 21 | #include <linux/types.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <linux/bug.h> | 23 | #include <linux/bug.h> |
24 | #include <asm/io.h> | 24 | #include <linux/io.h> |
25 | #include <asm/page.h> | 25 | #include <asm/page.h> |
26 | 26 | ||
27 | /* | 27 | /* |
diff --git a/include/linux/io.h b/include/linux/io.h index fb5a99800e77..de64c1e53612 100644 --- a/include/linux/io.h +++ b/include/linux/io.h | |||
@@ -20,10 +20,13 @@ | |||
20 | 20 | ||
21 | #include <linux/types.h> | 21 | #include <linux/types.h> |
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/bug.h> | ||
24 | #include <linux/err.h> | ||
23 | #include <asm/io.h> | 25 | #include <asm/io.h> |
24 | #include <asm/page.h> | 26 | #include <asm/page.h> |
25 | 27 | ||
26 | struct device; | 28 | struct device; |
29 | struct resource; | ||
27 | 30 | ||
28 | __visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count); | 31 | __visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count); |
29 | void __iowrite64_copy(void __iomem *to, const void *from, size_t count); | 32 | void __iowrite64_copy(void __iomem *to, const void *from, size_t count); |
@@ -80,6 +83,27 @@ int check_signature(const volatile void __iomem *io_addr, | |||
80 | const unsigned char *signature, int length); | 83 | const unsigned char *signature, int length); |
81 | void devm_ioremap_release(struct device *dev, void *res); | 84 | void devm_ioremap_release(struct device *dev, void *res); |
82 | 85 | ||
86 | void *devm_memremap(struct device *dev, resource_size_t offset, | ||
87 | size_t size, unsigned long flags); | ||
88 | void devm_memunmap(struct device *dev, void *addr); | ||
89 | |||
90 | void *__devm_memremap_pages(struct device *dev, struct resource *res); | ||
91 | |||
92 | #ifdef CONFIG_ZONE_DEVICE | ||
93 | void *devm_memremap_pages(struct device *dev, struct resource *res); | ||
94 | #else | ||
95 | static inline void *devm_memremap_pages(struct device *dev, struct resource *res) | ||
96 | { | ||
97 | /* | ||
98 | * Fail attempts to call devm_memremap_pages() without | ||
99 | * ZONE_DEVICE support enabled, this requires callers to fall | ||
100 | * back to plain devm_memremap() based on config | ||
101 | */ | ||
102 | WARN_ON_ONCE(1); | ||
103 | return ERR_PTR(-ENXIO); | ||
104 | } | ||
105 | #endif | ||
106 | |||
83 | /* | 107 | /* |
84 | * Some systems do not have legacy ISA devices. | 108 | * Some systems do not have legacy ISA devices. |
85 | * /dev/port is not a valid interface on these systems. | 109 | * /dev/port is not a valid interface on these systems. |
@@ -121,4 +145,13 @@ static inline int arch_phys_wc_index(int handle) | |||
121 | #endif | 145 | #endif |
122 | #endif | 146 | #endif |
123 | 147 | ||
148 | enum { | ||
149 | /* See memremap() kernel-doc for usage description... */ | ||
150 | MEMREMAP_WB = 1 << 0, | ||
151 | MEMREMAP_WT = 1 << 1, | ||
152 | }; | ||
153 | |||
154 | void *memremap(resource_size_t offset, size_t size, unsigned long flags); | ||
155 | void memunmap(void *addr); | ||
156 | |||
124 | #endif /* _LINUX_IO_H */ | 157 | #endif /* _LINUX_IO_H */ |
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 75e3af01ee32..3f021dc5da8c 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h | |||
@@ -31,6 +31,9 @@ enum { | |||
31 | ND_CMD_ARS_STATUS_MAX = SZ_4K, | 31 | ND_CMD_ARS_STATUS_MAX = SZ_4K, |
32 | ND_MAX_MAPPINGS = 32, | 32 | ND_MAX_MAPPINGS = 32, |
33 | 33 | ||
34 | /* region flag indicating to direct-map persistent memory by default */ | ||
35 | ND_REGION_PAGEMAP = 0, | ||
36 | |||
34 | /* mark newly adjusted resources as requiring a label update */ | 37 | /* mark newly adjusted resources as requiring a label update */ |
35 | DPA_RESOURCE_ADJUSTED = 1 << 0, | 38 | DPA_RESOURCE_ADJUSTED = 1 << 0, |
36 | }; | 39 | }; |
@@ -91,6 +94,7 @@ struct nd_region_desc { | |||
91 | void *provider_data; | 94 | void *provider_data; |
92 | int num_lanes; | 95 | int num_lanes; |
93 | int numa_node; | 96 | int numa_node; |
97 | unsigned long flags; | ||
94 | }; | 98 | }; |
95 | 99 | ||
96 | struct nvdimm_bus; | 100 | struct nvdimm_bus; |
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 6ffa0ac7f7d6..8f60e899b33c 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h | |||
@@ -266,8 +266,9 @@ static inline void remove_memory(int nid, u64 start, u64 size) {} | |||
266 | extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, | 266 | extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, |
267 | void *arg, int (*func)(struct memory_block *, void *)); | 267 | void *arg, int (*func)(struct memory_block *, void *)); |
268 | extern int add_memory(int nid, u64 start, u64 size); | 268 | extern int add_memory(int nid, u64 start, u64 size); |
269 | extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default); | 269 | extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default, |
270 | extern int arch_add_memory(int nid, u64 start, u64 size); | 270 | bool for_device); |
271 | extern int arch_add_memory(int nid, u64 start, u64 size, bool for_device); | ||
271 | extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); | 272 | extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); |
272 | extern bool is_memblock_offlined(struct memory_block *mem); | 273 | extern bool is_memblock_offlined(struct memory_block *mem); |
273 | extern void remove_memory(int nid, u64 start, u64 size); | 274 | extern void remove_memory(int nid, u64 start, u64 size); |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 8b257c43855b..1171a292e06e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -372,7 +372,14 @@ static inline int put_page_unless_one(struct page *page) | |||
372 | } | 372 | } |
373 | 373 | ||
374 | extern int page_is_ram(unsigned long pfn); | 374 | extern int page_is_ram(unsigned long pfn); |
375 | extern int region_is_ram(resource_size_t phys_addr, unsigned long size); | 375 | |
376 | enum { | ||
377 | REGION_INTERSECTS, | ||
378 | REGION_DISJOINT, | ||
379 | REGION_MIXED, | ||
380 | }; | ||
381 | |||
382 | int region_intersects(resource_size_t offset, size_t size, const char *type); | ||
376 | 383 | ||
377 | /* Support for virtually mapped pages */ | 384 | /* Support for virtually mapped pages */ |
378 | struct page *vmalloc_to_page(const void *addr); | 385 | struct page *vmalloc_to_page(const void *addr); |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index ac00e2050943..d94347737292 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -319,7 +319,11 @@ enum zone_type { | |||
319 | ZONE_HIGHMEM, | 319 | ZONE_HIGHMEM, |
320 | #endif | 320 | #endif |
321 | ZONE_MOVABLE, | 321 | ZONE_MOVABLE, |
322 | #ifdef CONFIG_ZONE_DEVICE | ||
323 | ZONE_DEVICE, | ||
324 | #endif | ||
322 | __MAX_NR_ZONES | 325 | __MAX_NR_ZONES |
326 | |||
323 | }; | 327 | }; |
324 | 328 | ||
325 | #ifndef __GENERATING_BOUNDS_H | 329 | #ifndef __GENERATING_BOUNDS_H |
@@ -786,6 +790,25 @@ static inline bool pgdat_is_empty(pg_data_t *pgdat) | |||
786 | return !pgdat->node_start_pfn && !pgdat->node_spanned_pages; | 790 | return !pgdat->node_start_pfn && !pgdat->node_spanned_pages; |
787 | } | 791 | } |
788 | 792 | ||
793 | static inline int zone_id(const struct zone *zone) | ||
794 | { | ||
795 | struct pglist_data *pgdat = zone->zone_pgdat; | ||
796 | |||
797 | return zone - pgdat->node_zones; | ||
798 | } | ||
799 | |||
800 | #ifdef CONFIG_ZONE_DEVICE | ||
801 | static inline bool is_dev_zone(const struct zone *zone) | ||
802 | { | ||
803 | return zone_id(zone) == ZONE_DEVICE; | ||
804 | } | ||
805 | #else | ||
806 | static inline bool is_dev_zone(const struct zone *zone) | ||
807 | { | ||
808 | return false; | ||
809 | } | ||
810 | #endif | ||
811 | |||
789 | #include <linux/memory_hotplug.h> | 812 | #include <linux/memory_hotplug.h> |
790 | 813 | ||
791 | extern struct mutex zonelists_mutex; | 814 | extern struct mutex zonelists_mutex; |
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h index 29975c73a953..366cf77953b5 100644 --- a/include/linux/mtd/map.h +++ b/include/linux/mtd/map.h | |||
@@ -27,9 +27,9 @@ | |||
27 | #include <linux/string.h> | 27 | #include <linux/string.h> |
28 | #include <linux/bug.h> | 28 | #include <linux/bug.h> |
29 | #include <linux/kernel.h> | 29 | #include <linux/kernel.h> |
30 | #include <linux/io.h> | ||
30 | 31 | ||
31 | #include <asm/unaligned.h> | 32 | #include <asm/unaligned.h> |
32 | #include <asm/io.h> | ||
33 | #include <asm/barrier.h> | 33 | #include <asm/barrier.h> |
34 | 34 | ||
35 | #ifdef CONFIG_MTD_MAP_BANK_WIDTH_1 | 35 | #ifdef CONFIG_MTD_MAP_BANK_WIDTH_1 |
diff --git a/include/linux/pmem.h b/include/linux/pmem.h index d2114045a6c4..85f810b33917 100644 --- a/include/linux/pmem.h +++ b/include/linux/pmem.h | |||
@@ -14,28 +14,42 @@ | |||
14 | #define __PMEM_H__ | 14 | #define __PMEM_H__ |
15 | 15 | ||
16 | #include <linux/io.h> | 16 | #include <linux/io.h> |
17 | #include <linux/uio.h> | ||
17 | 18 | ||
18 | #ifdef CONFIG_ARCH_HAS_PMEM_API | 19 | #ifdef CONFIG_ARCH_HAS_PMEM_API |
19 | #include <asm/cacheflush.h> | 20 | #define ARCH_MEMREMAP_PMEM MEMREMAP_WB |
21 | #include <asm/pmem.h> | ||
20 | #else | 22 | #else |
23 | #define ARCH_MEMREMAP_PMEM MEMREMAP_WT | ||
24 | /* | ||
25 | * These are simply here to enable compilation, all call sites gate | ||
26 | * calling these symbols with arch_has_pmem_api() and redirect to the | ||
27 | * implementation in asm/pmem.h. | ||
28 | */ | ||
29 | static inline bool __arch_has_wmb_pmem(void) | ||
30 | { | ||
31 | return false; | ||
32 | } | ||
33 | |||
21 | static inline void arch_wmb_pmem(void) | 34 | static inline void arch_wmb_pmem(void) |
22 | { | 35 | { |
23 | BUG(); | 36 | BUG(); |
24 | } | 37 | } |
25 | 38 | ||
26 | static inline bool __arch_has_wmb_pmem(void) | 39 | static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, |
40 | size_t n) | ||
27 | { | 41 | { |
28 | return false; | 42 | BUG(); |
29 | } | 43 | } |
30 | 44 | ||
31 | static inline void __pmem *arch_memremap_pmem(resource_size_t offset, | 45 | static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes, |
32 | unsigned long size) | 46 | struct iov_iter *i) |
33 | { | 47 | { |
34 | return NULL; | 48 | BUG(); |
49 | return 0; | ||
35 | } | 50 | } |
36 | 51 | ||
37 | static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, | 52 | static inline void arch_clear_pmem(void __pmem *addr, size_t size) |
38 | size_t n) | ||
39 | { | 53 | { |
40 | BUG(); | 54 | BUG(); |
41 | } | 55 | } |
@@ -43,18 +57,22 @@ static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, | |||
43 | 57 | ||
44 | /* | 58 | /* |
45 | * Architectures that define ARCH_HAS_PMEM_API must provide | 59 | * Architectures that define ARCH_HAS_PMEM_API must provide |
46 | * implementations for arch_memremap_pmem(), arch_memcpy_to_pmem(), | 60 | * implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(), |
47 | * arch_wmb_pmem(), and __arch_has_wmb_pmem(). | 61 | * arch_copy_from_iter_pmem(), arch_clear_pmem() and arch_has_wmb_pmem(). |
48 | */ | 62 | */ |
49 | |||
50 | static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size) | 63 | static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size) |
51 | { | 64 | { |
52 | memcpy(dst, (void __force const *) src, size); | 65 | memcpy(dst, (void __force const *) src, size); |
53 | } | 66 | } |
54 | 67 | ||
55 | static inline void memunmap_pmem(void __pmem *addr) | 68 | static inline void memunmap_pmem(struct device *dev, void __pmem *addr) |
69 | { | ||
70 | devm_memunmap(dev, (void __force *) addr); | ||
71 | } | ||
72 | |||
73 | static inline bool arch_has_pmem_api(void) | ||
56 | { | 74 | { |
57 | iounmap((void __force __iomem *) addr); | 75 | return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API); |
58 | } | 76 | } |
59 | 77 | ||
60 | /** | 78 | /** |
@@ -68,14 +86,7 @@ static inline void memunmap_pmem(void __pmem *addr) | |||
68 | */ | 86 | */ |
69 | static inline bool arch_has_wmb_pmem(void) | 87 | static inline bool arch_has_wmb_pmem(void) |
70 | { | 88 | { |
71 | if (IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API)) | 89 | return arch_has_pmem_api() && __arch_has_wmb_pmem(); |
72 | return __arch_has_wmb_pmem(); | ||
73 | return false; | ||
74 | } | ||
75 | |||
76 | static inline bool arch_has_pmem_api(void) | ||
77 | { | ||
78 | return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && arch_has_wmb_pmem(); | ||
79 | } | 90 | } |
80 | 91 | ||
81 | /* | 92 | /* |
@@ -85,16 +96,24 @@ static inline bool arch_has_pmem_api(void) | |||
85 | * default_memremap_pmem + default_memcpy_to_pmem is sufficient for | 96 | * default_memremap_pmem + default_memcpy_to_pmem is sufficient for |
86 | * making data durable relative to i/o completion. | 97 | * making data durable relative to i/o completion. |
87 | */ | 98 | */ |
88 | static void default_memcpy_to_pmem(void __pmem *dst, const void *src, | 99 | static inline void default_memcpy_to_pmem(void __pmem *dst, const void *src, |
89 | size_t size) | 100 | size_t size) |
90 | { | 101 | { |
91 | memcpy((void __force *) dst, src, size); | 102 | memcpy((void __force *) dst, src, size); |
92 | } | 103 | } |
93 | 104 | ||
94 | static void __pmem *default_memremap_pmem(resource_size_t offset, | 105 | static inline size_t default_copy_from_iter_pmem(void __pmem *addr, |
95 | unsigned long size) | 106 | size_t bytes, struct iov_iter *i) |
107 | { | ||
108 | return copy_from_iter_nocache((void __force *)addr, bytes, i); | ||
109 | } | ||
110 | |||
111 | static inline void default_clear_pmem(void __pmem *addr, size_t size) | ||
96 | { | 112 | { |
97 | return (void __pmem __force *)ioremap_wt(offset, size); | 113 | if (size == PAGE_SIZE && ((unsigned long)addr & ~PAGE_MASK) == 0) |
114 | clear_page((void __force *)addr); | ||
115 | else | ||
116 | memset((void __force *)addr, 0, size); | ||
98 | } | 117 | } |
99 | 118 | ||
100 | /** | 119 | /** |
@@ -109,12 +128,11 @@ static void __pmem *default_memremap_pmem(resource_size_t offset, | |||
109 | * wmb_pmem() arrange for the data to be written through the | 128 | * wmb_pmem() arrange for the data to be written through the |
110 | * cache to persistent media. | 129 | * cache to persistent media. |
111 | */ | 130 | */ |
112 | static inline void __pmem *memremap_pmem(resource_size_t offset, | 131 | static inline void __pmem *memremap_pmem(struct device *dev, |
113 | unsigned long size) | 132 | resource_size_t offset, unsigned long size) |
114 | { | 133 | { |
115 | if (arch_has_pmem_api()) | 134 | return (void __pmem *) devm_memremap(dev, offset, size, |
116 | return arch_memremap_pmem(offset, size); | 135 | ARCH_MEMREMAP_PMEM); |
117 | return default_memremap_pmem(offset, size); | ||
118 | } | 136 | } |
119 | 137 | ||
120 | /** | 138 | /** |
@@ -146,7 +164,42 @@ static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n) | |||
146 | */ | 164 | */ |
147 | static inline void wmb_pmem(void) | 165 | static inline void wmb_pmem(void) |
148 | { | 166 | { |
149 | if (arch_has_pmem_api()) | 167 | if (arch_has_wmb_pmem()) |
150 | arch_wmb_pmem(); | 168 | arch_wmb_pmem(); |
169 | else | ||
170 | wmb(); | ||
171 | } | ||
172 | |||
173 | /** | ||
174 | * copy_from_iter_pmem - copy data from an iterator to PMEM | ||
175 | * @addr: PMEM destination address | ||
176 | * @bytes: number of bytes to copy | ||
177 | * @i: iterator with source data | ||
178 | * | ||
179 | * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'. | ||
180 | * This function requires explicit ordering with a wmb_pmem() call. | ||
181 | */ | ||
182 | static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes, | ||
183 | struct iov_iter *i) | ||
184 | { | ||
185 | if (arch_has_pmem_api()) | ||
186 | return arch_copy_from_iter_pmem(addr, bytes, i); | ||
187 | return default_copy_from_iter_pmem(addr, bytes, i); | ||
188 | } | ||
189 | |||
190 | /** | ||
191 | * clear_pmem - zero a PMEM memory range | ||
192 | * @addr: virtual start address | ||
193 | * @size: number of bytes to zero | ||
194 | * | ||
195 | * Write zeros into the memory range starting at 'addr' for 'size' bytes. | ||
196 | * This function requires explicit ordering with a wmb_pmem() call. | ||
197 | */ | ||
198 | static inline void clear_pmem(void __pmem *addr, size_t size) | ||
199 | { | ||
200 | if (arch_has_pmem_api()) | ||
201 | arch_clear_pmem(addr, size); | ||
202 | else | ||
203 | default_clear_pmem(addr, size); | ||
151 | } | 204 | } |
152 | #endif /* __PMEM_H__ */ | 205 | #endif /* __PMEM_H__ */ |
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h index 2b94ea2287bb..5b4a4be06e2b 100644 --- a/include/uapi/linux/ndctl.h +++ b/include/uapi/linux/ndctl.h | |||
@@ -87,7 +87,7 @@ struct nd_cmd_ars_status { | |||
87 | __u32 handle; | 87 | __u32 handle; |
88 | __u32 flags; | 88 | __u32 flags; |
89 | __u64 err_address; | 89 | __u64 err_address; |
90 | __u64 mask; | 90 | __u64 length; |
91 | } __packed records[0]; | 91 | } __packed records[0]; |
92 | } __packed; | 92 | } __packed; |
93 | 93 | ||
@@ -111,6 +111,11 @@ enum { | |||
111 | ND_CMD_VENDOR = 9, | 111 | ND_CMD_VENDOR = 9, |
112 | }; | 112 | }; |
113 | 113 | ||
114 | enum { | ||
115 | ND_ARS_VOLATILE = 1, | ||
116 | ND_ARS_PERSISTENT = 2, | ||
117 | }; | ||
118 | |||
114 | static inline const char *nvdimm_bus_cmd_name(unsigned cmd) | 119 | static inline const char *nvdimm_bus_cmd_name(unsigned cmd) |
115 | { | 120 | { |
116 | static const char * const names[] = { | 121 | static const char * const names[] = { |
@@ -194,4 +199,9 @@ enum nd_driver_flags { | |||
194 | enum { | 199 | enum { |
195 | ND_MIN_NAMESPACE_SIZE = 0x00400000, | 200 | ND_MIN_NAMESPACE_SIZE = 0x00400000, |
196 | }; | 201 | }; |
202 | |||
203 | enum ars_masks { | ||
204 | ARS_STATUS_MASK = 0x0000FFFF, | ||
205 | ARS_EXT_STATUS_SHIFT = 16, | ||
206 | }; | ||
197 | #endif /* __NDCTL_H__ */ | 207 | #endif /* __NDCTL_H__ */ |
diff --git a/include/video/vga.h b/include/video/vga.h index cac567f22e62..d334e64c1c19 100644 --- a/include/video/vga.h +++ b/include/video/vga.h | |||
@@ -18,7 +18,7 @@ | |||
18 | #define __linux_video_vga_h__ | 18 | #define __linux_video_vga_h__ |
19 | 19 | ||
20 | #include <linux/types.h> | 20 | #include <linux/types.h> |
21 | #include <asm/io.h> | 21 | #include <linux/io.h> |
22 | #include <asm/vga.h> | 22 | #include <asm/vga.h> |
23 | #include <asm/byteorder.h> | 23 | #include <asm/byteorder.h> |
24 | 24 | ||
diff --git a/kernel/Makefile b/kernel/Makefile index d25ebea0453a..e0d7587e7684 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -99,6 +99,8 @@ obj-$(CONFIG_JUMP_LABEL) += jump_label.o | |||
99 | obj-$(CONFIG_CONTEXT_TRACKING) += context_tracking.o | 99 | obj-$(CONFIG_CONTEXT_TRACKING) += context_tracking.o |
100 | obj-$(CONFIG_TORTURE_TEST) += torture.o | 100 | obj-$(CONFIG_TORTURE_TEST) += torture.o |
101 | 101 | ||
102 | obj-$(CONFIG_HAS_IOMEM) += memremap.o | ||
103 | |||
102 | $(obj)/configs.o: $(obj)/config_data.h | 104 | $(obj)/configs.o: $(obj)/config_data.h |
103 | 105 | ||
104 | # config_data.h contains the same information as ikconfig.h but gzipped. | 106 | # config_data.h contains the same information as ikconfig.h but gzipped. |
diff --git a/kernel/memremap.c b/kernel/memremap.c new file mode 100644 index 000000000000..72b0c66628b6 --- /dev/null +++ b/kernel/memremap.c | |||
@@ -0,0 +1,190 @@ | |||
1 | /* | ||
2 | * Copyright(c) 2015 Intel Corporation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of version 2 of the GNU General Public License as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
11 | * General Public License for more details. | ||
12 | */ | ||
13 | #include <linux/device.h> | ||
14 | #include <linux/types.h> | ||
15 | #include <linux/io.h> | ||
16 | #include <linux/mm.h> | ||
17 | #include <linux/memory_hotplug.h> | ||
18 | |||
19 | #ifndef ioremap_cache | ||
20 | /* temporary while we convert existing ioremap_cache users to memremap */ | ||
21 | __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size) | ||
22 | { | ||
23 | return ioremap(offset, size); | ||
24 | } | ||
25 | #endif | ||
26 | |||
27 | /** | ||
28 | * memremap() - remap an iomem_resource as cacheable memory | ||
29 | * @offset: iomem resource start address | ||
30 | * @size: size of remap | ||
31 | * @flags: either MEMREMAP_WB or MEMREMAP_WT | ||
32 | * | ||
33 | * memremap() is "ioremap" for cases where it is known that the resource | ||
34 | * being mapped does not have i/o side effects and the __iomem | ||
35 | * annotation is not applicable. | ||
36 | * | ||
37 | * MEMREMAP_WB - matches the default mapping for "System RAM" on | ||
38 | * the architecture. This is usually a read-allocate write-back cache. | ||
39 | * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM | ||
40 | * memremap() will bypass establishing a new mapping and instead return | ||
41 | * a pointer into the direct map. | ||
42 | * | ||
43 | * MEMREMAP_WT - establish a mapping whereby writes either bypass the | ||
44 | * cache or are written through to memory and never exist in a | ||
45 | * cache-dirty state with respect to program visibility. Attempts to | ||
46 | * map "System RAM" with this mapping type will fail. | ||
47 | */ | ||
48 | void *memremap(resource_size_t offset, size_t size, unsigned long flags) | ||
49 | { | ||
50 | int is_ram = region_intersects(offset, size, "System RAM"); | ||
51 | void *addr = NULL; | ||
52 | |||
53 | if (is_ram == REGION_MIXED) { | ||
54 | WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n", | ||
55 | &offset, (unsigned long) size); | ||
56 | return NULL; | ||
57 | } | ||
58 | |||
59 | /* Try all mapping types requested until one returns non-NULL */ | ||
60 | if (flags & MEMREMAP_WB) { | ||
61 | flags &= ~MEMREMAP_WB; | ||
62 | /* | ||
63 | * MEMREMAP_WB is special in that it can be satisifed | ||
64 | * from the direct map. Some archs depend on the | ||
65 | * capability of memremap() to autodetect cases where | ||
66 | * the requested range is potentially in "System RAM" | ||
67 | */ | ||
68 | if (is_ram == REGION_INTERSECTS) | ||
69 | addr = __va(offset); | ||
70 | else | ||
71 | addr = ioremap_cache(offset, size); | ||
72 | } | ||
73 | |||
74 | /* | ||
75 | * If we don't have a mapping yet and more request flags are | ||
76 | * pending then we will be attempting to establish a new virtual | ||
77 | * address mapping. Enforce that this mapping is not aliasing | ||
78 | * "System RAM" | ||
79 | */ | ||
80 | if (!addr && is_ram == REGION_INTERSECTS && flags) { | ||
81 | WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n", | ||
82 | &offset, (unsigned long) size); | ||
83 | return NULL; | ||
84 | } | ||
85 | |||
86 | if (!addr && (flags & MEMREMAP_WT)) { | ||
87 | flags &= ~MEMREMAP_WT; | ||
88 | addr = ioremap_wt(offset, size); | ||
89 | } | ||
90 | |||
91 | return addr; | ||
92 | } | ||
93 | EXPORT_SYMBOL(memremap); | ||
94 | |||
95 | void memunmap(void *addr) | ||
96 | { | ||
97 | if (is_vmalloc_addr(addr)) | ||
98 | iounmap((void __iomem *) addr); | ||
99 | } | ||
100 | EXPORT_SYMBOL(memunmap); | ||
101 | |||
102 | static void devm_memremap_release(struct device *dev, void *res) | ||
103 | { | ||
104 | memunmap(res); | ||
105 | } | ||
106 | |||
107 | static int devm_memremap_match(struct device *dev, void *res, void *match_data) | ||
108 | { | ||
109 | return *(void **)res == match_data; | ||
110 | } | ||
111 | |||
112 | void *devm_memremap(struct device *dev, resource_size_t offset, | ||
113 | size_t size, unsigned long flags) | ||
114 | { | ||
115 | void **ptr, *addr; | ||
116 | |||
117 | ptr = devres_alloc(devm_memremap_release, sizeof(*ptr), GFP_KERNEL); | ||
118 | if (!ptr) | ||
119 | return NULL; | ||
120 | |||
121 | addr = memremap(offset, size, flags); | ||
122 | if (addr) { | ||
123 | *ptr = addr; | ||
124 | devres_add(dev, ptr); | ||
125 | } else | ||
126 | devres_free(ptr); | ||
127 | |||
128 | return addr; | ||
129 | } | ||
130 | EXPORT_SYMBOL(devm_memremap); | ||
131 | |||
132 | void devm_memunmap(struct device *dev, void *addr) | ||
133 | { | ||
134 | WARN_ON(devres_destroy(dev, devm_memremap_release, devm_memremap_match, | ||
135 | addr)); | ||
136 | memunmap(addr); | ||
137 | } | ||
138 | EXPORT_SYMBOL(devm_memunmap); | ||
139 | |||
140 | #ifdef CONFIG_ZONE_DEVICE | ||
141 | struct page_map { | ||
142 | struct resource res; | ||
143 | }; | ||
144 | |||
145 | static void devm_memremap_pages_release(struct device *dev, void *res) | ||
146 | { | ||
147 | struct page_map *page_map = res; | ||
148 | |||
149 | /* pages are dead and unused, undo the arch mapping */ | ||
150 | arch_remove_memory(page_map->res.start, resource_size(&page_map->res)); | ||
151 | } | ||
152 | |||
153 | void *devm_memremap_pages(struct device *dev, struct resource *res) | ||
154 | { | ||
155 | int is_ram = region_intersects(res->start, resource_size(res), | ||
156 | "System RAM"); | ||
157 | struct page_map *page_map; | ||
158 | int error, nid; | ||
159 | |||
160 | if (is_ram == REGION_MIXED) { | ||
161 | WARN_ONCE(1, "%s attempted on mixed region %pr\n", | ||
162 | __func__, res); | ||
163 | return ERR_PTR(-ENXIO); | ||
164 | } | ||
165 | |||
166 | if (is_ram == REGION_INTERSECTS) | ||
167 | return __va(res->start); | ||
168 | |||
169 | page_map = devres_alloc(devm_memremap_pages_release, | ||
170 | sizeof(*page_map), GFP_KERNEL); | ||
171 | if (!page_map) | ||
172 | return ERR_PTR(-ENOMEM); | ||
173 | |||
174 | memcpy(&page_map->res, res, sizeof(*res)); | ||
175 | |||
176 | nid = dev_to_node(dev); | ||
177 | if (nid < 0) | ||
178 | nid = 0; | ||
179 | |||
180 | error = arch_add_memory(nid, res->start, resource_size(res), true); | ||
181 | if (error) { | ||
182 | devres_free(page_map); | ||
183 | return ERR_PTR(error); | ||
184 | } | ||
185 | |||
186 | devres_add(dev, page_map); | ||
187 | return __va(res->start); | ||
188 | } | ||
189 | EXPORT_SYMBOL(devm_memremap_pages); | ||
190 | #endif /* CONFIG_ZONE_DEVICE */ | ||
diff --git a/kernel/resource.c b/kernel/resource.c index fed052a1bc9f..f150dbbe6f62 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
@@ -492,40 +492,51 @@ int __weak page_is_ram(unsigned long pfn) | |||
492 | } | 492 | } |
493 | EXPORT_SYMBOL_GPL(page_is_ram); | 493 | EXPORT_SYMBOL_GPL(page_is_ram); |
494 | 494 | ||
495 | /* | 495 | /** |
496 | * Search for a resouce entry that fully contains the specified region. | 496 | * region_intersects() - determine intersection of region with known resources |
497 | * If found, return 1 if it is RAM, 0 if not. | 497 | * @start: region start address |
498 | * If not found, or region is not fully contained, return -1 | 498 | * @size: size of region |
499 | * @name: name of resource (in iomem_resource) | ||
499 | * | 500 | * |
500 | * Used by the ioremap functions to ensure the user is not remapping RAM and is | 501 | * Check if the specified region partially overlaps or fully eclipses a |
501 | * a vast speed up over walking through the resource table page by page. | 502 | * resource identified by @name. Return REGION_DISJOINT if the region |
503 | * does not overlap @name, return REGION_MIXED if the region overlaps | ||
504 | * @type and another resource, and return REGION_INTERSECTS if the | ||
505 | * region overlaps @type and no other defined resource. Note, that | ||
506 | * REGION_INTERSECTS is also returned in the case when the specified | ||
507 | * region overlaps RAM and undefined memory holes. | ||
508 | * | ||
509 | * region_intersect() is used by memory remapping functions to ensure | ||
510 | * the user is not remapping RAM and is a vast speed up over walking | ||
511 | * through the resource table page by page. | ||
502 | */ | 512 | */ |
503 | int region_is_ram(resource_size_t start, unsigned long size) | 513 | int region_intersects(resource_size_t start, size_t size, const char *name) |
504 | { | 514 | { |
505 | struct resource *p; | ||
506 | resource_size_t end = start + size - 1; | ||
507 | unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY; | 515 | unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
508 | const char *name = "System RAM"; | 516 | resource_size_t end = start + size - 1; |
509 | int ret = -1; | 517 | int type = 0; int other = 0; |
518 | struct resource *p; | ||
510 | 519 | ||
511 | read_lock(&resource_lock); | 520 | read_lock(&resource_lock); |
512 | for (p = iomem_resource.child; p ; p = p->sibling) { | 521 | for (p = iomem_resource.child; p ; p = p->sibling) { |
513 | if (p->end < start) | 522 | bool is_type = strcmp(p->name, name) == 0 && p->flags == flags; |
514 | continue; | 523 | |
515 | 524 | if (start >= p->start && start <= p->end) | |
516 | if (p->start <= start && end <= p->end) { | 525 | is_type ? type++ : other++; |
517 | /* resource fully contains region */ | 526 | if (end >= p->start && end <= p->end) |
518 | if ((p->flags != flags) || strcmp(p->name, name)) | 527 | is_type ? type++ : other++; |
519 | ret = 0; | 528 | if (p->start >= start && p->end <= end) |
520 | else | 529 | is_type ? type++ : other++; |
521 | ret = 1; | ||
522 | break; | ||
523 | } | ||
524 | if (end < p->start) | ||
525 | break; /* not found */ | ||
526 | } | 530 | } |
527 | read_unlock(&resource_lock); | 531 | read_unlock(&resource_lock); |
528 | return ret; | 532 | |
533 | if (other == 0) | ||
534 | return type ? REGION_INTERSECTS : REGION_DISJOINT; | ||
535 | |||
536 | if (type) | ||
537 | return REGION_MIXED; | ||
538 | |||
539 | return REGION_DISJOINT; | ||
529 | } | 540 | } |
530 | 541 | ||
531 | void __weak arch_remove_reservations(struct resource *avail) | 542 | void __weak arch_remove_reservations(struct resource *avail) |
diff --git a/lib/Kconfig b/lib/Kconfig index 8a49ff9d1502..2e491ac15622 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -525,4 +525,7 @@ config ARCH_HAS_SG_CHAIN | |||
525 | config ARCH_HAS_PMEM_API | 525 | config ARCH_HAS_PMEM_API |
526 | bool | 526 | bool |
527 | 527 | ||
528 | config ARCH_HAS_MMIO_FLUSH | ||
529 | bool | ||
530 | |||
528 | endmenu | 531 | endmenu |
diff --git a/lib/devres.c b/lib/devres.c index fbe2aac522e6..f13a2468ff39 100644 --- a/lib/devres.c +++ b/lib/devres.c | |||
@@ -119,10 +119,9 @@ EXPORT_SYMBOL(devm_iounmap); | |||
119 | * @dev: generic device to handle the resource for | 119 | * @dev: generic device to handle the resource for |
120 | * @res: resource to be handled | 120 | * @res: resource to be handled |
121 | * | 121 | * |
122 | * Checks that a resource is a valid memory region, requests the memory region | 122 | * Checks that a resource is a valid memory region, requests the memory |
123 | * and ioremaps it either as cacheable or as non-cacheable memory depending on | 123 | * region and ioremaps it. All operations are managed and will be undone |
124 | * the resource's flags. All operations are managed and will be undone on | 124 | * on driver detach. |
125 | * driver detach. | ||
126 | * | 125 | * |
127 | * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code | 126 | * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code |
128 | * on failure. Usage example: | 127 | * on failure. Usage example: |
@@ -153,11 +152,7 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res) | |||
153 | return IOMEM_ERR_PTR(-EBUSY); | 152 | return IOMEM_ERR_PTR(-EBUSY); |
154 | } | 153 | } |
155 | 154 | ||
156 | if (res->flags & IORESOURCE_CACHEABLE) | 155 | dest_ptr = devm_ioremap(dev, res->start, size); |
157 | dest_ptr = devm_ioremap(dev, res->start, size); | ||
158 | else | ||
159 | dest_ptr = devm_ioremap_nocache(dev, res->start, size); | ||
160 | |||
161 | if (!dest_ptr) { | 156 | if (!dest_ptr) { |
162 | dev_err(dev, "ioremap failed for resource %pR\n", res); | 157 | dev_err(dev, "ioremap failed for resource %pR\n", res); |
163 | devm_release_mem_region(dev, res->start, size); | 158 | devm_release_mem_region(dev, res->start, size); |
diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c index 5f5d24d1d53f..c10fba461454 100644 --- a/lib/pci_iomap.c +++ b/lib/pci_iomap.c | |||
@@ -41,11 +41,8 @@ void __iomem *pci_iomap_range(struct pci_dev *dev, | |||
41 | len = maxlen; | 41 | len = maxlen; |
42 | if (flags & IORESOURCE_IO) | 42 | if (flags & IORESOURCE_IO) |
43 | return __pci_ioport_map(dev, start, len); | 43 | return __pci_ioport_map(dev, start, len); |
44 | if (flags & IORESOURCE_MEM) { | 44 | if (flags & IORESOURCE_MEM) |
45 | if (flags & IORESOURCE_CACHEABLE) | 45 | return ioremap(start, len); |
46 | return ioremap(start, len); | ||
47 | return ioremap_nocache(start, len); | ||
48 | } | ||
49 | /* What? */ | 46 | /* What? */ |
50 | return NULL; | 47 | return NULL; |
51 | } | 48 | } |
diff --git a/mm/Kconfig b/mm/Kconfig index d4e6495a720f..3a4070f5ab79 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -648,3 +648,20 @@ config DEFERRED_STRUCT_PAGE_INIT | |||
648 | when kswapd starts. This has a potential performance impact on | 648 | when kswapd starts. This has a potential performance impact on |
649 | processes running early in the lifetime of the systemm until kswapd | 649 | processes running early in the lifetime of the systemm until kswapd |
650 | finishes the initialisation. | 650 | finishes the initialisation. |
651 | |||
652 | config ZONE_DEVICE | ||
653 | bool "Device memory (pmem, etc...) hotplug support" if EXPERT | ||
654 | default !ZONE_DMA | ||
655 | depends on !ZONE_DMA | ||
656 | depends on MEMORY_HOTPLUG | ||
657 | depends on MEMORY_HOTREMOVE | ||
658 | depends on X86_64 #arch_add_memory() comprehends device memory | ||
659 | |||
660 | help | ||
661 | Device memory hotplug support allows for establishing pmem, | ||
662 | or other device driver discovered memory regions, in the | ||
663 | memmap. This allows pfn_to_page() lookups of otherwise | ||
664 | "device-physical" addresses which is needed for using a DAX | ||
665 | mapping in an O_DIRECT operation, among other things. | ||
666 | |||
667 | If FS_DAX is enabled, then say Y. | ||
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 8fd97dac538a..aa992e2df58a 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -778,7 +778,10 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, | |||
778 | 778 | ||
779 | start = phys_start_pfn << PAGE_SHIFT; | 779 | start = phys_start_pfn << PAGE_SHIFT; |
780 | size = nr_pages * PAGE_SIZE; | 780 | size = nr_pages * PAGE_SIZE; |
781 | ret = release_mem_region_adjustable(&iomem_resource, start, size); | 781 | |
782 | /* in the ZONE_DEVICE case device driver owns the memory region */ | ||
783 | if (!is_dev_zone(zone)) | ||
784 | ret = release_mem_region_adjustable(&iomem_resource, start, size); | ||
782 | if (ret) { | 785 | if (ret) { |
783 | resource_size_t endres = start + size - 1; | 786 | resource_size_t endres = start + size - 1; |
784 | 787 | ||
@@ -1215,8 +1218,13 @@ static int should_add_memory_movable(int nid, u64 start, u64 size) | |||
1215 | return 0; | 1218 | return 0; |
1216 | } | 1219 | } |
1217 | 1220 | ||
1218 | int zone_for_memory(int nid, u64 start, u64 size, int zone_default) | 1221 | int zone_for_memory(int nid, u64 start, u64 size, int zone_default, |
1222 | bool for_device) | ||
1219 | { | 1223 | { |
1224 | #ifdef CONFIG_ZONE_DEVICE | ||
1225 | if (for_device) | ||
1226 | return ZONE_DEVICE; | ||
1227 | #endif | ||
1220 | if (should_add_memory_movable(nid, start, size)) | 1228 | if (should_add_memory_movable(nid, start, size)) |
1221 | return ZONE_MOVABLE; | 1229 | return ZONE_MOVABLE; |
1222 | 1230 | ||
@@ -1265,7 +1273,7 @@ int __ref add_memory(int nid, u64 start, u64 size) | |||
1265 | } | 1273 | } |
1266 | 1274 | ||
1267 | /* call arch's memory hotadd */ | 1275 | /* call arch's memory hotadd */ |
1268 | ret = arch_add_memory(nid, start, size); | 1276 | ret = arch_add_memory(nid, start, size, false); |
1269 | 1277 | ||
1270 | if (ret < 0) | 1278 | if (ret < 0) |
1271 | goto error; | 1279 | goto error; |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5b5240b7f642..b401d40cb4fd 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -206,6 +206,9 @@ static char * const zone_names[MAX_NR_ZONES] = { | |||
206 | "HighMem", | 206 | "HighMem", |
207 | #endif | 207 | #endif |
208 | "Movable", | 208 | "Movable", |
209 | #ifdef CONFIG_ZONE_DEVICE | ||
210 | "Device", | ||
211 | #endif | ||
209 | }; | 212 | }; |
210 | 213 | ||
211 | int min_free_kbytes = 1024; | 214 | int min_free_kbytes = 1024; |
diff --git a/tools/testing/nvdimm/Kbuild b/tools/testing/nvdimm/Kbuild index f56914c7929b..38b00ecb2ed5 100644 --- a/tools/testing/nvdimm/Kbuild +++ b/tools/testing/nvdimm/Kbuild | |||
@@ -1,9 +1,12 @@ | |||
1 | ldflags-y += --wrap=ioremap_wt | ||
2 | ldflags-y += --wrap=ioremap_wc | 1 | ldflags-y += --wrap=ioremap_wc |
2 | ldflags-y += --wrap=memremap | ||
3 | ldflags-y += --wrap=devm_ioremap_nocache | 3 | ldflags-y += --wrap=devm_ioremap_nocache |
4 | ldflags-y += --wrap=ioremap_cache | 4 | ldflags-y += --wrap=devm_memremap |
5 | ldflags-y += --wrap=devm_memunmap | ||
5 | ldflags-y += --wrap=ioremap_nocache | 6 | ldflags-y += --wrap=ioremap_nocache |
6 | ldflags-y += --wrap=iounmap | 7 | ldflags-y += --wrap=iounmap |
8 | ldflags-y += --wrap=memunmap | ||
9 | ldflags-y += --wrap=__devm_request_region | ||
7 | ldflags-y += --wrap=__request_region | 10 | ldflags-y += --wrap=__request_region |
8 | ldflags-y += --wrap=__release_region | 11 | ldflags-y += --wrap=__release_region |
9 | 12 | ||
@@ -15,6 +18,7 @@ obj-$(CONFIG_LIBNVDIMM) += libnvdimm.o | |||
15 | obj-$(CONFIG_BLK_DEV_PMEM) += nd_pmem.o | 18 | obj-$(CONFIG_BLK_DEV_PMEM) += nd_pmem.o |
16 | obj-$(CONFIG_ND_BTT) += nd_btt.o | 19 | obj-$(CONFIG_ND_BTT) += nd_btt.o |
17 | obj-$(CONFIG_ND_BLK) += nd_blk.o | 20 | obj-$(CONFIG_ND_BLK) += nd_blk.o |
21 | obj-$(CONFIG_X86_PMEM_LEGACY) += nd_e820.o | ||
18 | obj-$(CONFIG_ACPI_NFIT) += nfit.o | 22 | obj-$(CONFIG_ACPI_NFIT) += nfit.o |
19 | 23 | ||
20 | nfit-y := $(ACPI_SRC)/nfit.o | 24 | nfit-y := $(ACPI_SRC)/nfit.o |
@@ -29,6 +33,9 @@ nd_btt-y += config_check.o | |||
29 | nd_blk-y := $(NVDIMM_SRC)/blk.o | 33 | nd_blk-y := $(NVDIMM_SRC)/blk.o |
30 | nd_blk-y += config_check.o | 34 | nd_blk-y += config_check.o |
31 | 35 | ||
36 | nd_e820-y := $(NVDIMM_SRC)/e820.o | ||
37 | nd_e820-y += config_check.o | ||
38 | |||
32 | libnvdimm-y := $(NVDIMM_SRC)/core.o | 39 | libnvdimm-y := $(NVDIMM_SRC)/core.o |
33 | libnvdimm-y += $(NVDIMM_SRC)/bus.o | 40 | libnvdimm-y += $(NVDIMM_SRC)/bus.o |
34 | libnvdimm-y += $(NVDIMM_SRC)/dimm_devs.o | 41 | libnvdimm-y += $(NVDIMM_SRC)/dimm_devs.o |
@@ -37,7 +44,9 @@ libnvdimm-y += $(NVDIMM_SRC)/region_devs.o | |||
37 | libnvdimm-y += $(NVDIMM_SRC)/region.o | 44 | libnvdimm-y += $(NVDIMM_SRC)/region.o |
38 | libnvdimm-y += $(NVDIMM_SRC)/namespace_devs.o | 45 | libnvdimm-y += $(NVDIMM_SRC)/namespace_devs.o |
39 | libnvdimm-y += $(NVDIMM_SRC)/label.o | 46 | libnvdimm-y += $(NVDIMM_SRC)/label.o |
47 | libnvdimm-$(CONFIG_ND_CLAIM) += $(NVDIMM_SRC)/claim.o | ||
40 | libnvdimm-$(CONFIG_BTT) += $(NVDIMM_SRC)/btt_devs.o | 48 | libnvdimm-$(CONFIG_BTT) += $(NVDIMM_SRC)/btt_devs.o |
49 | libnvdimm-$(CONFIG_NVDIMM_PFN) += $(NVDIMM_SRC)/pfn_devs.o | ||
41 | libnvdimm-y += config_check.o | 50 | libnvdimm-y += config_check.o |
42 | 51 | ||
43 | obj-m += test/ | 52 | obj-m += test/ |
diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c index 64bfaa50831c..b7251314bbc0 100644 --- a/tools/testing/nvdimm/test/iomap.c +++ b/tools/testing/nvdimm/test/iomap.c | |||
@@ -80,23 +80,52 @@ void __iomem *__wrap_devm_ioremap_nocache(struct device *dev, | |||
80 | } | 80 | } |
81 | EXPORT_SYMBOL(__wrap_devm_ioremap_nocache); | 81 | EXPORT_SYMBOL(__wrap_devm_ioremap_nocache); |
82 | 82 | ||
83 | void __iomem *__wrap_ioremap_cache(resource_size_t offset, unsigned long size) | 83 | void *__wrap_devm_memremap(struct device *dev, resource_size_t offset, |
84 | size_t size, unsigned long flags) | ||
84 | { | 85 | { |
85 | return __nfit_test_ioremap(offset, size, ioremap_cache); | 86 | struct nfit_test_resource *nfit_res; |
87 | |||
88 | rcu_read_lock(); | ||
89 | nfit_res = get_nfit_res(offset); | ||
90 | rcu_read_unlock(); | ||
91 | if (nfit_res) | ||
92 | return nfit_res->buf + offset - nfit_res->res->start; | ||
93 | return devm_memremap(dev, offset, size, flags); | ||
86 | } | 94 | } |
87 | EXPORT_SYMBOL(__wrap_ioremap_cache); | 95 | EXPORT_SYMBOL(__wrap_devm_memremap); |
88 | 96 | ||
89 | void __iomem *__wrap_ioremap_nocache(resource_size_t offset, unsigned long size) | 97 | void *__wrap_memremap(resource_size_t offset, size_t size, |
98 | unsigned long flags) | ||
90 | { | 99 | { |
91 | return __nfit_test_ioremap(offset, size, ioremap_nocache); | 100 | struct nfit_test_resource *nfit_res; |
101 | |||
102 | rcu_read_lock(); | ||
103 | nfit_res = get_nfit_res(offset); | ||
104 | rcu_read_unlock(); | ||
105 | if (nfit_res) | ||
106 | return nfit_res->buf + offset - nfit_res->res->start; | ||
107 | return memremap(offset, size, flags); | ||
92 | } | 108 | } |
93 | EXPORT_SYMBOL(__wrap_ioremap_nocache); | 109 | EXPORT_SYMBOL(__wrap_memremap); |
110 | |||
111 | void __wrap_devm_memunmap(struct device *dev, void *addr) | ||
112 | { | ||
113 | struct nfit_test_resource *nfit_res; | ||
114 | |||
115 | rcu_read_lock(); | ||
116 | nfit_res = get_nfit_res((unsigned long) addr); | ||
117 | rcu_read_unlock(); | ||
118 | if (nfit_res) | ||
119 | return; | ||
120 | return devm_memunmap(dev, addr); | ||
121 | } | ||
122 | EXPORT_SYMBOL(__wrap_devm_memunmap); | ||
94 | 123 | ||
95 | void __iomem *__wrap_ioremap_wt(resource_size_t offset, unsigned long size) | 124 | void __iomem *__wrap_ioremap_nocache(resource_size_t offset, unsigned long size) |
96 | { | 125 | { |
97 | return __nfit_test_ioremap(offset, size, ioremap_wt); | 126 | return __nfit_test_ioremap(offset, size, ioremap_nocache); |
98 | } | 127 | } |
99 | EXPORT_SYMBOL(__wrap_ioremap_wt); | 128 | EXPORT_SYMBOL(__wrap_ioremap_nocache); |
100 | 129 | ||
101 | void __iomem *__wrap_ioremap_wc(resource_size_t offset, unsigned long size) | 130 | void __iomem *__wrap_ioremap_wc(resource_size_t offset, unsigned long size) |
102 | { | 131 | { |
@@ -117,9 +146,22 @@ void __wrap_iounmap(volatile void __iomem *addr) | |||
117 | } | 146 | } |
118 | EXPORT_SYMBOL(__wrap_iounmap); | 147 | EXPORT_SYMBOL(__wrap_iounmap); |
119 | 148 | ||
120 | struct resource *__wrap___request_region(struct resource *parent, | 149 | void __wrap_memunmap(void *addr) |
121 | resource_size_t start, resource_size_t n, const char *name, | 150 | { |
122 | int flags) | 151 | struct nfit_test_resource *nfit_res; |
152 | |||
153 | rcu_read_lock(); | ||
154 | nfit_res = get_nfit_res((unsigned long) addr); | ||
155 | rcu_read_unlock(); | ||
156 | if (nfit_res) | ||
157 | return; | ||
158 | return memunmap(addr); | ||
159 | } | ||
160 | EXPORT_SYMBOL(__wrap_memunmap); | ||
161 | |||
162 | static struct resource *nfit_test_request_region(struct device *dev, | ||
163 | struct resource *parent, resource_size_t start, | ||
164 | resource_size_t n, const char *name, int flags) | ||
123 | { | 165 | { |
124 | struct nfit_test_resource *nfit_res; | 166 | struct nfit_test_resource *nfit_res; |
125 | 167 | ||
@@ -147,10 +189,29 @@ struct resource *__wrap___request_region(struct resource *parent, | |||
147 | return res; | 189 | return res; |
148 | } | 190 | } |
149 | } | 191 | } |
192 | if (dev) | ||
193 | return __devm_request_region(dev, parent, start, n, name); | ||
150 | return __request_region(parent, start, n, name, flags); | 194 | return __request_region(parent, start, n, name, flags); |
151 | } | 195 | } |
196 | |||
197 | struct resource *__wrap___request_region(struct resource *parent, | ||
198 | resource_size_t start, resource_size_t n, const char *name, | ||
199 | int flags) | ||
200 | { | ||
201 | return nfit_test_request_region(NULL, parent, start, n, name, flags); | ||
202 | } | ||
152 | EXPORT_SYMBOL(__wrap___request_region); | 203 | EXPORT_SYMBOL(__wrap___request_region); |
153 | 204 | ||
205 | struct resource *__wrap___devm_request_region(struct device *dev, | ||
206 | struct resource *parent, resource_size_t start, | ||
207 | resource_size_t n, const char *name) | ||
208 | { | ||
209 | if (!dev) | ||
210 | return NULL; | ||
211 | return nfit_test_request_region(dev, parent, start, n, name, 0); | ||
212 | } | ||
213 | EXPORT_SYMBOL(__wrap___devm_request_region); | ||
214 | |||
154 | void __wrap___release_region(struct resource *parent, resource_size_t start, | 215 | void __wrap___release_region(struct resource *parent, resource_size_t start, |
155 | resource_size_t n) | 216 | resource_size_t n) |
156 | { | 217 | { |
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c index d0bdae40ccc9..021e6f97f33e 100644 --- a/tools/testing/nvdimm/test/nfit.c +++ b/tools/testing/nvdimm/test/nfit.c | |||
@@ -147,75 +147,153 @@ static struct nfit_test *to_nfit_test(struct device *dev) | |||
147 | return container_of(pdev, struct nfit_test, pdev); | 147 | return container_of(pdev, struct nfit_test, pdev); |
148 | } | 148 | } |
149 | 149 | ||
150 | static int nfit_test_cmd_get_config_size(struct nd_cmd_get_config_size *nd_cmd, | ||
151 | unsigned int buf_len) | ||
152 | { | ||
153 | if (buf_len < sizeof(*nd_cmd)) | ||
154 | return -EINVAL; | ||
155 | |||
156 | nd_cmd->status = 0; | ||
157 | nd_cmd->config_size = LABEL_SIZE; | ||
158 | nd_cmd->max_xfer = SZ_4K; | ||
159 | |||
160 | return 0; | ||
161 | } | ||
162 | |||
163 | static int nfit_test_cmd_get_config_data(struct nd_cmd_get_config_data_hdr | ||
164 | *nd_cmd, unsigned int buf_len, void *label) | ||
165 | { | ||
166 | unsigned int len, offset = nd_cmd->in_offset; | ||
167 | int rc; | ||
168 | |||
169 | if (buf_len < sizeof(*nd_cmd)) | ||
170 | return -EINVAL; | ||
171 | if (offset >= LABEL_SIZE) | ||
172 | return -EINVAL; | ||
173 | if (nd_cmd->in_length + sizeof(*nd_cmd) > buf_len) | ||
174 | return -EINVAL; | ||
175 | |||
176 | nd_cmd->status = 0; | ||
177 | len = min(nd_cmd->in_length, LABEL_SIZE - offset); | ||
178 | memcpy(nd_cmd->out_buf, label + offset, len); | ||
179 | rc = buf_len - sizeof(*nd_cmd) - len; | ||
180 | |||
181 | return rc; | ||
182 | } | ||
183 | |||
184 | static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr *nd_cmd, | ||
185 | unsigned int buf_len, void *label) | ||
186 | { | ||
187 | unsigned int len, offset = nd_cmd->in_offset; | ||
188 | u32 *status; | ||
189 | int rc; | ||
190 | |||
191 | if (buf_len < sizeof(*nd_cmd)) | ||
192 | return -EINVAL; | ||
193 | if (offset >= LABEL_SIZE) | ||
194 | return -EINVAL; | ||
195 | if (nd_cmd->in_length + sizeof(*nd_cmd) + 4 > buf_len) | ||
196 | return -EINVAL; | ||
197 | |||
198 | status = (void *)nd_cmd + nd_cmd->in_length + sizeof(*nd_cmd); | ||
199 | *status = 0; | ||
200 | len = min(nd_cmd->in_length, LABEL_SIZE - offset); | ||
201 | memcpy(label + offset, nd_cmd->in_buf, len); | ||
202 | rc = buf_len - sizeof(*nd_cmd) - (len + 4); | ||
203 | |||
204 | return rc; | ||
205 | } | ||
206 | |||
207 | static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd, | ||
208 | unsigned int buf_len) | ||
209 | { | ||
210 | if (buf_len < sizeof(*nd_cmd)) | ||
211 | return -EINVAL; | ||
212 | |||
213 | nd_cmd->max_ars_out = 256; | ||
214 | nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16; | ||
215 | |||
216 | return 0; | ||
217 | } | ||
218 | |||
219 | static int nfit_test_cmd_ars_start(struct nd_cmd_ars_start *nd_cmd, | ||
220 | unsigned int buf_len) | ||
221 | { | ||
222 | if (buf_len < sizeof(*nd_cmd)) | ||
223 | return -EINVAL; | ||
224 | |||
225 | nd_cmd->status = 0; | ||
226 | |||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | static int nfit_test_cmd_ars_status(struct nd_cmd_ars_status *nd_cmd, | ||
231 | unsigned int buf_len) | ||
232 | { | ||
233 | if (buf_len < sizeof(*nd_cmd)) | ||
234 | return -EINVAL; | ||
235 | |||
236 | nd_cmd->out_length = 256; | ||
237 | nd_cmd->num_records = 0; | ||
238 | nd_cmd->status = 0; | ||
239 | |||
240 | return 0; | ||
241 | } | ||
242 | |||
150 | static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc, | 243 | static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc, |
151 | struct nvdimm *nvdimm, unsigned int cmd, void *buf, | 244 | struct nvdimm *nvdimm, unsigned int cmd, void *buf, |
152 | unsigned int buf_len) | 245 | unsigned int buf_len) |
153 | { | 246 | { |
154 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); | 247 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); |
155 | struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc); | 248 | struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc); |
156 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); | 249 | int i, rc = 0; |
157 | int i, rc; | ||
158 | 250 | ||
159 | if (!nfit_mem || !test_bit(cmd, &nfit_mem->dsm_mask)) | 251 | if (nvdimm) { |
160 | return -ENOTTY; | 252 | struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); |
161 | 253 | ||
162 | /* lookup label space for the given dimm */ | 254 | if (!nfit_mem || !test_bit(cmd, &nfit_mem->dsm_mask)) |
163 | for (i = 0; i < ARRAY_SIZE(handle); i++) | 255 | return -ENOTTY; |
164 | if (__to_nfit_memdev(nfit_mem)->device_handle == handle[i]) | 256 | |
257 | /* lookup label space for the given dimm */ | ||
258 | for (i = 0; i < ARRAY_SIZE(handle); i++) | ||
259 | if (__to_nfit_memdev(nfit_mem)->device_handle == | ||
260 | handle[i]) | ||
261 | break; | ||
262 | if (i >= ARRAY_SIZE(handle)) | ||
263 | return -ENXIO; | ||
264 | |||
265 | switch (cmd) { | ||
266 | case ND_CMD_GET_CONFIG_SIZE: | ||
267 | rc = nfit_test_cmd_get_config_size(buf, buf_len); | ||
165 | break; | 268 | break; |
166 | if (i >= ARRAY_SIZE(handle)) | 269 | case ND_CMD_GET_CONFIG_DATA: |
167 | return -ENXIO; | 270 | rc = nfit_test_cmd_get_config_data(buf, buf_len, |
271 | t->label[i]); | ||
272 | break; | ||
273 | case ND_CMD_SET_CONFIG_DATA: | ||
274 | rc = nfit_test_cmd_set_config_data(buf, buf_len, | ||
275 | t->label[i]); | ||
276 | break; | ||
277 | default: | ||
278 | return -ENOTTY; | ||
279 | } | ||
280 | } else { | ||
281 | if (!nd_desc || !test_bit(cmd, &nd_desc->dsm_mask)) | ||
282 | return -ENOTTY; | ||
168 | 283 | ||
169 | switch (cmd) { | 284 | switch (cmd) { |
170 | case ND_CMD_GET_CONFIG_SIZE: { | 285 | case ND_CMD_ARS_CAP: |
171 | struct nd_cmd_get_config_size *nd_cmd = buf; | 286 | rc = nfit_test_cmd_ars_cap(buf, buf_len); |
172 | 287 | break; | |
173 | if (buf_len < sizeof(*nd_cmd)) | 288 | case ND_CMD_ARS_START: |
174 | return -EINVAL; | 289 | rc = nfit_test_cmd_ars_start(buf, buf_len); |
175 | nd_cmd->status = 0; | 290 | break; |
176 | nd_cmd->config_size = LABEL_SIZE; | 291 | case ND_CMD_ARS_STATUS: |
177 | nd_cmd->max_xfer = SZ_4K; | 292 | rc = nfit_test_cmd_ars_status(buf, buf_len); |
178 | rc = 0; | 293 | break; |
179 | break; | 294 | default: |
180 | } | 295 | return -ENOTTY; |
181 | case ND_CMD_GET_CONFIG_DATA: { | 296 | } |
182 | struct nd_cmd_get_config_data_hdr *nd_cmd = buf; | ||
183 | unsigned int len, offset = nd_cmd->in_offset; | ||
184 | |||
185 | if (buf_len < sizeof(*nd_cmd)) | ||
186 | return -EINVAL; | ||
187 | if (offset >= LABEL_SIZE) | ||
188 | return -EINVAL; | ||
189 | if (nd_cmd->in_length + sizeof(*nd_cmd) > buf_len) | ||
190 | return -EINVAL; | ||
191 | |||
192 | nd_cmd->status = 0; | ||
193 | len = min(nd_cmd->in_length, LABEL_SIZE - offset); | ||
194 | memcpy(nd_cmd->out_buf, t->label[i] + offset, len); | ||
195 | rc = buf_len - sizeof(*nd_cmd) - len; | ||
196 | break; | ||
197 | } | ||
198 | case ND_CMD_SET_CONFIG_DATA: { | ||
199 | struct nd_cmd_set_config_hdr *nd_cmd = buf; | ||
200 | unsigned int len, offset = nd_cmd->in_offset; | ||
201 | u32 *status; | ||
202 | |||
203 | if (buf_len < sizeof(*nd_cmd)) | ||
204 | return -EINVAL; | ||
205 | if (offset >= LABEL_SIZE) | ||
206 | return -EINVAL; | ||
207 | if (nd_cmd->in_length + sizeof(*nd_cmd) + 4 > buf_len) | ||
208 | return -EINVAL; | ||
209 | |||
210 | status = buf + nd_cmd->in_length + sizeof(*nd_cmd); | ||
211 | *status = 0; | ||
212 | len = min(nd_cmd->in_length, LABEL_SIZE - offset); | ||
213 | memcpy(t->label[i] + offset, nd_cmd->in_buf, len); | ||
214 | rc = buf_len - sizeof(*nd_cmd) - (len + 4); | ||
215 | break; | ||
216 | } | ||
217 | default: | ||
218 | return -ENOTTY; | ||
219 | } | 297 | } |
220 | 298 | ||
221 | return rc; | 299 | return rc; |
@@ -876,6 +954,9 @@ static void nfit_test0_setup(struct nfit_test *t) | |||
876 | set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_dsm_force_en); | 954 | set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_dsm_force_en); |
877 | set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en); | 955 | set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en); |
878 | set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en); | 956 | set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en); |
957 | set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_dsm_force_en); | ||
958 | set_bit(ND_CMD_ARS_START, &acpi_desc->bus_dsm_force_en); | ||
959 | set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_dsm_force_en); | ||
879 | nd_desc = &acpi_desc->nd_desc; | 960 | nd_desc = &acpi_desc->nd_desc; |
880 | nd_desc->ndctl = nfit_test_ctl; | 961 | nd_desc->ndctl = nfit_test_ctl; |
881 | } | 962 | } |
@@ -948,9 +1029,13 @@ static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa, | |||
948 | 1029 | ||
949 | lane = nd_region_acquire_lane(nd_region); | 1030 | lane = nd_region_acquire_lane(nd_region); |
950 | if (rw) | 1031 | if (rw) |
951 | memcpy(mmio->base + dpa, iobuf, len); | 1032 | memcpy(mmio->addr.base + dpa, iobuf, len); |
952 | else | 1033 | else { |
953 | memcpy(iobuf, mmio->base + dpa, len); | 1034 | memcpy(iobuf, mmio->addr.base + dpa, len); |
1035 | |||
1036 | /* give us some some coverage of the mmio_flush_range() API */ | ||
1037 | mmio_flush_range(mmio->addr.base + dpa, len); | ||
1038 | } | ||
954 | nd_region_release_lane(nd_region, lane); | 1039 | nd_region_release_lane(nd_region, lane); |
955 | 1040 | ||
956 | return 0; | 1041 | return 0; |