diff options
author | Dan Williams <dan.j.williams@intel.com> | 2016-03-09 20:15:43 -0500 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2016-03-09 20:15:43 -0500 |
commit | 489011652a2d5555901def04c24d68874e8ba9a1 (patch) | |
tree | f4d000e5ae63350016d12e6ccb52b0809bb814df /drivers/nvdimm | |
parent | 59e6473980f321c16299e12db69d1fabc2644a6f (diff) | |
parent | ff8e92d5d94b99aab39f439d532cba435947dfc0 (diff) |
Merge branch 'for-4.6/pfn' into libnvdimm-for-next
Diffstat (limited to 'drivers/nvdimm')
-rw-r--r-- | drivers/nvdimm/blk.c | 18 | ||||
-rw-r--r-- | drivers/nvdimm/btt.c | 19 | ||||
-rw-r--r-- | drivers/nvdimm/e820.c | 2 | ||||
-rw-r--r-- | drivers/nvdimm/namespace_devs.c | 7 | ||||
-rw-r--r-- | drivers/nvdimm/pfn.h | 23 | ||||
-rw-r--r-- | drivers/nvdimm/pfn_devs.c | 61 | ||||
-rw-r--r-- | drivers/nvdimm/pmem.c | 164 |
7 files changed, 205 insertions, 89 deletions
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c index 91a336ea8c4f..e9ff9229d942 100644 --- a/drivers/nvdimm/blk.c +++ b/drivers/nvdimm/blk.c | |||
@@ -31,8 +31,6 @@ struct nd_blk_device { | |||
31 | u32 internal_lbasize; | 31 | u32 internal_lbasize; |
32 | }; | 32 | }; |
33 | 33 | ||
34 | static int nd_blk_major; | ||
35 | |||
36 | static u32 nd_blk_meta_size(struct nd_blk_device *blk_dev) | 34 | static u32 nd_blk_meta_size(struct nd_blk_device *blk_dev) |
37 | { | 35 | { |
38 | return blk_dev->nsblk->lbasize - blk_dev->sector_size; | 36 | return blk_dev->nsblk->lbasize - blk_dev->sector_size; |
@@ -264,7 +262,6 @@ static int nd_blk_attach_disk(struct nd_namespace_common *ndns, | |||
264 | } | 262 | } |
265 | 263 | ||
266 | disk->driverfs_dev = &ndns->dev; | 264 | disk->driverfs_dev = &ndns->dev; |
267 | disk->major = nd_blk_major; | ||
268 | disk->first_minor = 0; | 265 | disk->first_minor = 0; |
269 | disk->fops = &nd_blk_fops; | 266 | disk->fops = &nd_blk_fops; |
270 | disk->private_data = blk_dev; | 267 | disk->private_data = blk_dev; |
@@ -358,25 +355,12 @@ static struct nd_device_driver nd_blk_driver = { | |||
358 | 355 | ||
359 | static int __init nd_blk_init(void) | 356 | static int __init nd_blk_init(void) |
360 | { | 357 | { |
361 | int rc; | 358 | return nd_driver_register(&nd_blk_driver); |
362 | |||
363 | rc = register_blkdev(0, "nd_blk"); | ||
364 | if (rc < 0) | ||
365 | return rc; | ||
366 | |||
367 | nd_blk_major = rc; | ||
368 | rc = nd_driver_register(&nd_blk_driver); | ||
369 | |||
370 | if (rc < 0) | ||
371 | unregister_blkdev(nd_blk_major, "nd_blk"); | ||
372 | |||
373 | return rc; | ||
374 | } | 359 | } |
375 | 360 | ||
376 | static void __exit nd_blk_exit(void) | 361 | static void __exit nd_blk_exit(void) |
377 | { | 362 | { |
378 | driver_unregister(&nd_blk_driver.drv); | 363 | driver_unregister(&nd_blk_driver.drv); |
379 | unregister_blkdev(nd_blk_major, "nd_blk"); | ||
380 | } | 364 | } |
381 | 365 | ||
382 | MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>"); | 366 | MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>"); |
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index efb2c1ceef98..c32cbb593600 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c | |||
@@ -31,8 +31,6 @@ enum log_ent_request { | |||
31 | LOG_OLD_ENT | 31 | LOG_OLD_ENT |
32 | }; | 32 | }; |
33 | 33 | ||
34 | static int btt_major; | ||
35 | |||
36 | static int arena_read_bytes(struct arena_info *arena, resource_size_t offset, | 34 | static int arena_read_bytes(struct arena_info *arena, resource_size_t offset, |
37 | void *buf, size_t n) | 35 | void *buf, size_t n) |
38 | { | 36 | { |
@@ -1246,7 +1244,6 @@ static int btt_blk_init(struct btt *btt) | |||
1246 | 1244 | ||
1247 | nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name); | 1245 | nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name); |
1248 | btt->btt_disk->driverfs_dev = &btt->nd_btt->dev; | 1246 | btt->btt_disk->driverfs_dev = &btt->nd_btt->dev; |
1249 | btt->btt_disk->major = btt_major; | ||
1250 | btt->btt_disk->first_minor = 0; | 1247 | btt->btt_disk->first_minor = 0; |
1251 | btt->btt_disk->fops = &btt_fops; | 1248 | btt->btt_disk->fops = &btt_fops; |
1252 | btt->btt_disk->private_data = btt; | 1249 | btt->btt_disk->private_data = btt; |
@@ -1423,22 +1420,11 @@ EXPORT_SYMBOL(nvdimm_namespace_detach_btt); | |||
1423 | 1420 | ||
1424 | static int __init nd_btt_init(void) | 1421 | static int __init nd_btt_init(void) |
1425 | { | 1422 | { |
1426 | int rc; | 1423 | int rc = 0; |
1427 | |||
1428 | btt_major = register_blkdev(0, "btt"); | ||
1429 | if (btt_major < 0) | ||
1430 | return btt_major; | ||
1431 | 1424 | ||
1432 | debugfs_root = debugfs_create_dir("btt", NULL); | 1425 | debugfs_root = debugfs_create_dir("btt", NULL); |
1433 | if (IS_ERR_OR_NULL(debugfs_root)) { | 1426 | if (IS_ERR_OR_NULL(debugfs_root)) |
1434 | rc = -ENXIO; | 1427 | rc = -ENXIO; |
1435 | goto err_debugfs; | ||
1436 | } | ||
1437 | |||
1438 | return 0; | ||
1439 | |||
1440 | err_debugfs: | ||
1441 | unregister_blkdev(btt_major, "btt"); | ||
1442 | 1428 | ||
1443 | return rc; | 1429 | return rc; |
1444 | } | 1430 | } |
@@ -1446,7 +1432,6 @@ static int __init nd_btt_init(void) | |||
1446 | static void __exit nd_btt_exit(void) | 1432 | static void __exit nd_btt_exit(void) |
1447 | { | 1433 | { |
1448 | debugfs_remove_recursive(debugfs_root); | 1434 | debugfs_remove_recursive(debugfs_root); |
1449 | unregister_blkdev(btt_major, "btt"); | ||
1450 | } | 1435 | } |
1451 | 1436 | ||
1452 | MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT); | 1437 | MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT); |
diff --git a/drivers/nvdimm/e820.c b/drivers/nvdimm/e820.c index b0045a505dc8..95825b38559a 100644 --- a/drivers/nvdimm/e820.c +++ b/drivers/nvdimm/e820.c | |||
@@ -55,7 +55,7 @@ static int e820_pmem_probe(struct platform_device *pdev) | |||
55 | for (p = iomem_resource.child; p ; p = p->sibling) { | 55 | for (p = iomem_resource.child; p ; p = p->sibling) { |
56 | struct nd_region_desc ndr_desc; | 56 | struct nd_region_desc ndr_desc; |
57 | 57 | ||
58 | if (strncmp(p->name, "Persistent Memory (legacy)", 26) != 0) | 58 | if (p->desc != IORES_DESC_PERSISTENT_MEMORY_LEGACY) |
59 | continue; | 59 | continue; |
60 | 60 | ||
61 | memset(&ndr_desc, 0, sizeof(ndr_desc)); | 61 | memset(&ndr_desc, 0, sizeof(ndr_desc)); |
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 9edf7eb7d17c..f5cb88601359 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c | |||
@@ -133,6 +133,7 @@ bool nd_is_uuid_unique(struct device *dev, u8 *uuid) | |||
133 | bool pmem_should_map_pages(struct device *dev) | 133 | bool pmem_should_map_pages(struct device *dev) |
134 | { | 134 | { |
135 | struct nd_region *nd_region = to_nd_region(dev->parent); | 135 | struct nd_region *nd_region = to_nd_region(dev->parent); |
136 | struct nd_namespace_io *nsio; | ||
136 | 137 | ||
137 | if (!IS_ENABLED(CONFIG_ZONE_DEVICE)) | 138 | if (!IS_ENABLED(CONFIG_ZONE_DEVICE)) |
138 | return false; | 139 | return false; |
@@ -143,6 +144,12 @@ bool pmem_should_map_pages(struct device *dev) | |||
143 | if (is_nd_pfn(dev) || is_nd_btt(dev)) | 144 | if (is_nd_pfn(dev) || is_nd_btt(dev)) |
144 | return false; | 145 | return false; |
145 | 146 | ||
147 | nsio = to_nd_namespace_io(dev); | ||
148 | if (region_intersects(nsio->res.start, resource_size(&nsio->res), | ||
149 | IORESOURCE_SYSTEM_RAM, | ||
150 | IORES_DESC_NONE) == REGION_MIXED) | ||
151 | return false; | ||
152 | |||
146 | #ifdef ARCH_MEMREMAP_PMEM | 153 | #ifdef ARCH_MEMREMAP_PMEM |
147 | return ARCH_MEMREMAP_PMEM == MEMREMAP_WB; | 154 | return ARCH_MEMREMAP_PMEM == MEMREMAP_WB; |
148 | #else | 155 | #else |
diff --git a/drivers/nvdimm/pfn.h b/drivers/nvdimm/pfn.h index cc243754acef..8e343a3ca873 100644 --- a/drivers/nvdimm/pfn.h +++ b/drivers/nvdimm/pfn.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #define __NVDIMM_PFN_H | 15 | #define __NVDIMM_PFN_H |
16 | 16 | ||
17 | #include <linux/types.h> | 17 | #include <linux/types.h> |
18 | #include <linux/mmzone.h> | ||
18 | 19 | ||
19 | #define PFN_SIG_LEN 16 | 20 | #define PFN_SIG_LEN 16 |
20 | #define PFN_SIG "NVDIMM_PFN_INFO\0" | 21 | #define PFN_SIG "NVDIMM_PFN_INFO\0" |
@@ -26,10 +27,28 @@ struct nd_pfn_sb { | |||
26 | __le32 flags; | 27 | __le32 flags; |
27 | __le16 version_major; | 28 | __le16 version_major; |
28 | __le16 version_minor; | 29 | __le16 version_minor; |
29 | __le64 dataoff; | 30 | __le64 dataoff; /* relative to namespace_base + start_pad */ |
30 | __le64 npfns; | 31 | __le64 npfns; |
31 | __le32 mode; | 32 | __le32 mode; |
32 | u8 padding[4012]; | 33 | /* minor-version-1 additions for section alignment */ |
34 | __le32 start_pad; | ||
35 | __le32 end_trunc; | ||
36 | u8 padding[4004]; | ||
33 | __le64 checksum; | 37 | __le64 checksum; |
34 | }; | 38 | }; |
39 | |||
40 | #ifdef CONFIG_SPARSEMEM | ||
41 | #define PFN_SECTION_ALIGN_DOWN(x) SECTION_ALIGN_DOWN(x) | ||
42 | #define PFN_SECTION_ALIGN_UP(x) SECTION_ALIGN_UP(x) | ||
43 | #else | ||
44 | /* | ||
45 | * In this case ZONE_DEVICE=n and we will disable 'pfn' device support, | ||
46 | * but we still want pmem to compile. | ||
47 | */ | ||
48 | #define PFN_SECTION_ALIGN_DOWN(x) (x) | ||
49 | #define PFN_SECTION_ALIGN_UP(x) (x) | ||
50 | #endif | ||
51 | |||
52 | #define PHYS_SECTION_ALIGN_DOWN(x) PFN_PHYS(PFN_SECTION_ALIGN_DOWN(PHYS_PFN(x))) | ||
53 | #define PHYS_SECTION_ALIGN_UP(x) PFN_PHYS(PFN_SECTION_ALIGN_UP(PHYS_PFN(x))) | ||
35 | #endif /* __NVDIMM_PFN_H */ | 54 | #endif /* __NVDIMM_PFN_H */ |
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index ae81a2f1da50..254d3bc13f70 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c | |||
@@ -205,11 +205,67 @@ static ssize_t namespace_store(struct device *dev, | |||
205 | } | 205 | } |
206 | static DEVICE_ATTR_RW(namespace); | 206 | static DEVICE_ATTR_RW(namespace); |
207 | 207 | ||
208 | static ssize_t resource_show(struct device *dev, | ||
209 | struct device_attribute *attr, char *buf) | ||
210 | { | ||
211 | struct nd_pfn *nd_pfn = to_nd_pfn(dev); | ||
212 | ssize_t rc; | ||
213 | |||
214 | device_lock(dev); | ||
215 | if (dev->driver) { | ||
216 | struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; | ||
217 | u64 offset = __le64_to_cpu(pfn_sb->dataoff); | ||
218 | struct nd_namespace_common *ndns = nd_pfn->ndns; | ||
219 | u32 start_pad = __le32_to_cpu(pfn_sb->start_pad); | ||
220 | struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); | ||
221 | |||
222 | rc = sprintf(buf, "%#llx\n", (unsigned long long) nsio->res.start | ||
223 | + start_pad + offset); | ||
224 | } else { | ||
225 | /* no address to convey if the pfn instance is disabled */ | ||
226 | rc = -ENXIO; | ||
227 | } | ||
228 | device_unlock(dev); | ||
229 | |||
230 | return rc; | ||
231 | } | ||
232 | static DEVICE_ATTR_RO(resource); | ||
233 | |||
234 | static ssize_t size_show(struct device *dev, | ||
235 | struct device_attribute *attr, char *buf) | ||
236 | { | ||
237 | struct nd_pfn *nd_pfn = to_nd_pfn(dev); | ||
238 | ssize_t rc; | ||
239 | |||
240 | device_lock(dev); | ||
241 | if (dev->driver) { | ||
242 | struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; | ||
243 | u64 offset = __le64_to_cpu(pfn_sb->dataoff); | ||
244 | struct nd_namespace_common *ndns = nd_pfn->ndns; | ||
245 | u32 start_pad = __le32_to_cpu(pfn_sb->start_pad); | ||
246 | u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc); | ||
247 | struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); | ||
248 | |||
249 | rc = sprintf(buf, "%llu\n", (unsigned long long) | ||
250 | resource_size(&nsio->res) - start_pad | ||
251 | - end_trunc - offset); | ||
252 | } else { | ||
253 | /* no size to convey if the pfn instance is disabled */ | ||
254 | rc = -ENXIO; | ||
255 | } | ||
256 | device_unlock(dev); | ||
257 | |||
258 | return rc; | ||
259 | } | ||
260 | static DEVICE_ATTR_RO(size); | ||
261 | |||
208 | static struct attribute *nd_pfn_attributes[] = { | 262 | static struct attribute *nd_pfn_attributes[] = { |
209 | &dev_attr_mode.attr, | 263 | &dev_attr_mode.attr, |
210 | &dev_attr_namespace.attr, | 264 | &dev_attr_namespace.attr, |
211 | &dev_attr_uuid.attr, | 265 | &dev_attr_uuid.attr, |
212 | &dev_attr_align.attr, | 266 | &dev_attr_align.attr, |
267 | &dev_attr_resource.attr, | ||
268 | &dev_attr_size.attr, | ||
213 | NULL, | 269 | NULL, |
214 | }; | 270 | }; |
215 | 271 | ||
@@ -299,6 +355,11 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn) | |||
299 | if (memcmp(pfn_sb->parent_uuid, parent_uuid, 16) != 0) | 355 | if (memcmp(pfn_sb->parent_uuid, parent_uuid, 16) != 0) |
300 | return -ENODEV; | 356 | return -ENODEV; |
301 | 357 | ||
358 | if (__le16_to_cpu(pfn_sb->version_minor) < 1) { | ||
359 | pfn_sb->start_pad = 0; | ||
360 | pfn_sb->end_trunc = 0; | ||
361 | } | ||
362 | |||
302 | switch (le32_to_cpu(pfn_sb->mode)) { | 363 | switch (le32_to_cpu(pfn_sb->mode)) { |
303 | case PFN_MODE_RAM: | 364 | case PFN_MODE_RAM: |
304 | case PFN_MODE_PMEM: | 365 | case PFN_MODE_PMEM: |
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index adc387236fe7..ca5721c306bb 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c | |||
@@ -43,12 +43,13 @@ struct pmem_device { | |||
43 | phys_addr_t data_offset; | 43 | phys_addr_t data_offset; |
44 | u64 pfn_flags; | 44 | u64 pfn_flags; |
45 | void __pmem *virt_addr; | 45 | void __pmem *virt_addr; |
46 | /* immutable base size of the namespace */ | ||
46 | size_t size; | 47 | size_t size; |
48 | /* trim size when namespace capacity has been section aligned */ | ||
49 | u32 pfn_pad; | ||
47 | struct badblocks bb; | 50 | struct badblocks bb; |
48 | }; | 51 | }; |
49 | 52 | ||
50 | static int pmem_major; | ||
51 | |||
52 | static bool is_bad_pmem(struct badblocks *bb, sector_t sector, unsigned int len) | 53 | static bool is_bad_pmem(struct badblocks *bb, sector_t sector, unsigned int len) |
53 | { | 54 | { |
54 | if (bb->count) { | 55 | if (bb->count) { |
@@ -175,7 +176,7 @@ static long pmem_direct_access(struct block_device *bdev, sector_t sector, | |||
175 | *kaddr = pmem->virt_addr + offset; | 176 | *kaddr = pmem->virt_addr + offset; |
176 | *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); | 177 | *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); |
177 | 178 | ||
178 | return pmem->size - offset; | 179 | return pmem->size - pmem->pfn_pad - offset; |
179 | } | 180 | } |
180 | 181 | ||
181 | static const struct block_device_operations pmem_fops = { | 182 | static const struct block_device_operations pmem_fops = { |
@@ -258,15 +259,14 @@ static int pmem_attach_disk(struct device *dev, | |||
258 | return -ENOMEM; | 259 | return -ENOMEM; |
259 | } | 260 | } |
260 | 261 | ||
261 | disk->major = pmem_major; | ||
262 | disk->first_minor = 0; | ||
263 | disk->fops = &pmem_fops; | 262 | disk->fops = &pmem_fops; |
264 | disk->private_data = pmem; | 263 | disk->private_data = pmem; |
265 | disk->queue = pmem->pmem_queue; | 264 | disk->queue = pmem->pmem_queue; |
266 | disk->flags = GENHD_FL_EXT_DEVT; | 265 | disk->flags = GENHD_FL_EXT_DEVT; |
267 | nvdimm_namespace_disk_name(ndns, disk->disk_name); | 266 | nvdimm_namespace_disk_name(ndns, disk->disk_name); |
268 | disk->driverfs_dev = dev; | 267 | disk->driverfs_dev = dev; |
269 | set_capacity(disk, (pmem->size - pmem->data_offset) / 512); | 268 | set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset) |
269 | / 512); | ||
270 | pmem->pmem_disk = disk; | 270 | pmem->pmem_disk = disk; |
271 | devm_exit_badblocks(dev, &pmem->bb); | 271 | devm_exit_badblocks(dev, &pmem->bb); |
272 | if (devm_init_badblocks(dev, &pmem->bb)) | 272 | if (devm_init_badblocks(dev, &pmem->bb)) |
@@ -309,6 +309,9 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) | |||
309 | struct nd_pfn_sb *pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL); | 309 | struct nd_pfn_sb *pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL); |
310 | struct pmem_device *pmem = dev_get_drvdata(&nd_pfn->dev); | 310 | struct pmem_device *pmem = dev_get_drvdata(&nd_pfn->dev); |
311 | struct nd_namespace_common *ndns = nd_pfn->ndns; | 311 | struct nd_namespace_common *ndns = nd_pfn->ndns; |
312 | u32 start_pad = 0, end_trunc = 0; | ||
313 | resource_size_t start, size; | ||
314 | struct nd_namespace_io *nsio; | ||
312 | struct nd_region *nd_region; | 315 | struct nd_region *nd_region; |
313 | unsigned long npfns; | 316 | unsigned long npfns; |
314 | phys_addr_t offset; | 317 | phys_addr_t offset; |
@@ -334,21 +337,56 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) | |||
334 | } | 337 | } |
335 | 338 | ||
336 | memset(pfn_sb, 0, sizeof(*pfn_sb)); | 339 | memset(pfn_sb, 0, sizeof(*pfn_sb)); |
337 | npfns = (pmem->size - SZ_8K) / SZ_4K; | 340 | |
341 | /* | ||
342 | * Check if pmem collides with 'System RAM' when section aligned and | ||
343 | * trim it accordingly | ||
344 | */ | ||
345 | nsio = to_nd_namespace_io(&ndns->dev); | ||
346 | start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start); | ||
347 | size = resource_size(&nsio->res); | ||
348 | if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM, | ||
349 | IORES_DESC_NONE) == REGION_MIXED) { | ||
350 | |||
351 | start = nsio->res.start; | ||
352 | start_pad = PHYS_SECTION_ALIGN_UP(start) - start; | ||
353 | } | ||
354 | |||
355 | start = nsio->res.start; | ||
356 | size = PHYS_SECTION_ALIGN_UP(start + size) - start; | ||
357 | if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM, | ||
358 | IORES_DESC_NONE) == REGION_MIXED) { | ||
359 | size = resource_size(&nsio->res); | ||
360 | end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size); | ||
361 | } | ||
362 | |||
363 | if (start_pad + end_trunc) | ||
364 | dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n", | ||
365 | dev_name(&ndns->dev), start_pad + end_trunc); | ||
366 | |||
338 | /* | 367 | /* |
339 | * Note, we use 64 here for the standard size of struct page, | 368 | * Note, we use 64 here for the standard size of struct page, |
340 | * debugging options may cause it to be larger in which case the | 369 | * debugging options may cause it to be larger in which case the |
341 | * implementation will limit the pfns advertised through | 370 | * implementation will limit the pfns advertised through |
342 | * ->direct_access() to those that are included in the memmap. | 371 | * ->direct_access() to those that are included in the memmap. |
343 | */ | 372 | */ |
373 | start += start_pad; | ||
374 | npfns = (pmem->size - start_pad - end_trunc - SZ_8K) / SZ_4K; | ||
344 | if (nd_pfn->mode == PFN_MODE_PMEM) | 375 | if (nd_pfn->mode == PFN_MODE_PMEM) |
345 | offset = ALIGN(SZ_8K + 64 * npfns, nd_pfn->align); | 376 | offset = ALIGN(start + SZ_8K + 64 * npfns, nd_pfn->align) |
377 | - start; | ||
346 | else if (nd_pfn->mode == PFN_MODE_RAM) | 378 | else if (nd_pfn->mode == PFN_MODE_RAM) |
347 | offset = ALIGN(SZ_8K, nd_pfn->align); | 379 | offset = ALIGN(start + SZ_8K, nd_pfn->align) - start; |
348 | else | 380 | else |
349 | goto err; | 381 | goto err; |
350 | 382 | ||
351 | npfns = (pmem->size - offset) / SZ_4K; | 383 | if (offset + start_pad + end_trunc >= pmem->size) { |
384 | dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n", | ||
385 | dev_name(&ndns->dev)); | ||
386 | goto err; | ||
387 | } | ||
388 | |||
389 | npfns = (pmem->size - offset - start_pad - end_trunc) / SZ_4K; | ||
352 | pfn_sb->mode = cpu_to_le32(nd_pfn->mode); | 390 | pfn_sb->mode = cpu_to_le32(nd_pfn->mode); |
353 | pfn_sb->dataoff = cpu_to_le64(offset); | 391 | pfn_sb->dataoff = cpu_to_le64(offset); |
354 | pfn_sb->npfns = cpu_to_le64(npfns); | 392 | pfn_sb->npfns = cpu_to_le64(npfns); |
@@ -356,6 +394,9 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) | |||
356 | memcpy(pfn_sb->uuid, nd_pfn->uuid, 16); | 394 | memcpy(pfn_sb->uuid, nd_pfn->uuid, 16); |
357 | memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16); | 395 | memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16); |
358 | pfn_sb->version_major = cpu_to_le16(1); | 396 | pfn_sb->version_major = cpu_to_le16(1); |
397 | pfn_sb->version_minor = cpu_to_le16(1); | ||
398 | pfn_sb->start_pad = cpu_to_le32(start_pad); | ||
399 | pfn_sb->end_trunc = cpu_to_le32(end_trunc); | ||
359 | checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb); | 400 | checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb); |
360 | pfn_sb->checksum = cpu_to_le64(checksum); | 401 | pfn_sb->checksum = cpu_to_le64(checksum); |
361 | 402 | ||
@@ -386,41 +427,56 @@ static int nvdimm_namespace_detach_pfn(struct nd_namespace_common *ndns) | |||
386 | return 0; | 427 | return 0; |
387 | } | 428 | } |
388 | 429 | ||
389 | static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns) | 430 | /* |
431 | * We hotplug memory at section granularity, pad the reserved area from | ||
432 | * the previous section base to the namespace base address. | ||
433 | */ | ||
434 | static unsigned long init_altmap_base(resource_size_t base) | ||
435 | { | ||
436 | unsigned long base_pfn = PHYS_PFN(base); | ||
437 | |||
438 | return PFN_SECTION_ALIGN_DOWN(base_pfn); | ||
439 | } | ||
440 | |||
441 | static unsigned long init_altmap_reserve(resource_size_t base) | ||
442 | { | ||
443 | unsigned long reserve = PHYS_PFN(SZ_8K); | ||
444 | unsigned long base_pfn = PHYS_PFN(base); | ||
445 | |||
446 | reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn); | ||
447 | return reserve; | ||
448 | } | ||
449 | |||
450 | static int __nvdimm_namespace_attach_pfn(struct nd_pfn *nd_pfn) | ||
390 | { | 451 | { |
391 | struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); | ||
392 | struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim); | ||
393 | struct device *dev = &nd_pfn->dev; | ||
394 | struct nd_region *nd_region; | ||
395 | struct vmem_altmap *altmap; | ||
396 | struct nd_pfn_sb *pfn_sb; | ||
397 | struct pmem_device *pmem; | ||
398 | struct request_queue *q; | ||
399 | phys_addr_t offset; | ||
400 | int rc; | 452 | int rc; |
453 | struct resource res; | ||
454 | struct request_queue *q; | ||
455 | struct pmem_device *pmem; | ||
456 | struct vmem_altmap *altmap; | ||
457 | struct device *dev = &nd_pfn->dev; | ||
458 | struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; | ||
459 | struct nd_namespace_common *ndns = nd_pfn->ndns; | ||
460 | u32 start_pad = __le32_to_cpu(pfn_sb->start_pad); | ||
461 | u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc); | ||
462 | struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); | ||
463 | resource_size_t base = nsio->res.start + start_pad; | ||
401 | struct vmem_altmap __altmap = { | 464 | struct vmem_altmap __altmap = { |
402 | .base_pfn = __phys_to_pfn(nsio->res.start), | 465 | .base_pfn = init_altmap_base(base), |
403 | .reserve = __phys_to_pfn(SZ_8K), | 466 | .reserve = init_altmap_reserve(base), |
404 | }; | 467 | }; |
405 | 468 | ||
406 | if (!nd_pfn->uuid || !nd_pfn->ndns) | 469 | pmem = dev_get_drvdata(dev); |
407 | return -ENODEV; | 470 | pmem->data_offset = le64_to_cpu(pfn_sb->dataoff); |
408 | 471 | pmem->pfn_pad = start_pad + end_trunc; | |
409 | nd_region = to_nd_region(dev->parent); | ||
410 | rc = nd_pfn_init(nd_pfn); | ||
411 | if (rc) | ||
412 | return rc; | ||
413 | |||
414 | pfn_sb = nd_pfn->pfn_sb; | ||
415 | offset = le64_to_cpu(pfn_sb->dataoff); | ||
416 | nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode); | 472 | nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode); |
417 | if (nd_pfn->mode == PFN_MODE_RAM) { | 473 | if (nd_pfn->mode == PFN_MODE_RAM) { |
418 | if (offset < SZ_8K) | 474 | if (pmem->data_offset < SZ_8K) |
419 | return -EINVAL; | 475 | return -EINVAL; |
420 | nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns); | 476 | nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns); |
421 | altmap = NULL; | 477 | altmap = NULL; |
422 | } else if (nd_pfn->mode == PFN_MODE_PMEM) { | 478 | } else if (nd_pfn->mode == PFN_MODE_PMEM) { |
423 | nd_pfn->npfns = (resource_size(&nsio->res) - offset) | 479 | nd_pfn->npfns = (pmem->size - pmem->pfn_pad - pmem->data_offset) |
424 | / PAGE_SIZE; | 480 | / PAGE_SIZE; |
425 | if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns) | 481 | if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns) |
426 | dev_info(&nd_pfn->dev, | 482 | dev_info(&nd_pfn->dev, |
@@ -428,7 +484,7 @@ static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns) | |||
428 | le64_to_cpu(nd_pfn->pfn_sb->npfns), | 484 | le64_to_cpu(nd_pfn->pfn_sb->npfns), |
429 | nd_pfn->npfns); | 485 | nd_pfn->npfns); |
430 | altmap = & __altmap; | 486 | altmap = & __altmap; |
431 | altmap->free = __phys_to_pfn(offset - SZ_8K); | 487 | altmap->free = PHYS_PFN(pmem->data_offset - SZ_8K); |
432 | altmap->alloc = 0; | 488 | altmap->alloc = 0; |
433 | } else { | 489 | } else { |
434 | rc = -ENXIO; | 490 | rc = -ENXIO; |
@@ -436,10 +492,12 @@ static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns) | |||
436 | } | 492 | } |
437 | 493 | ||
438 | /* establish pfn range for lookup, and switch to direct map */ | 494 | /* establish pfn range for lookup, and switch to direct map */ |
439 | pmem = dev_get_drvdata(dev); | ||
440 | q = pmem->pmem_queue; | 495 | q = pmem->pmem_queue; |
496 | memcpy(&res, &nsio->res, sizeof(res)); | ||
497 | res.start += start_pad; | ||
498 | res.end -= end_trunc; | ||
441 | devm_memunmap(dev, (void __force *) pmem->virt_addr); | 499 | devm_memunmap(dev, (void __force *) pmem->virt_addr); |
442 | pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, &nsio->res, | 500 | pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, &res, |
443 | &q->q_usage_counter, altmap); | 501 | &q->q_usage_counter, altmap); |
444 | pmem->pfn_flags |= PFN_MAP; | 502 | pmem->pfn_flags |= PFN_MAP; |
445 | if (IS_ERR(pmem->virt_addr)) { | 503 | if (IS_ERR(pmem->virt_addr)) { |
@@ -448,7 +506,6 @@ static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns) | |||
448 | } | 506 | } |
449 | 507 | ||
450 | /* attach pmem disk in "pfn-mode" */ | 508 | /* attach pmem disk in "pfn-mode" */ |
451 | pmem->data_offset = offset; | ||
452 | rc = pmem_attach_disk(dev, ndns, pmem); | 509 | rc = pmem_attach_disk(dev, ndns, pmem); |
453 | if (rc) | 510 | if (rc) |
454 | goto err; | 511 | goto err; |
@@ -457,6 +514,22 @@ static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns) | |||
457 | err: | 514 | err: |
458 | nvdimm_namespace_detach_pfn(ndns); | 515 | nvdimm_namespace_detach_pfn(ndns); |
459 | return rc; | 516 | return rc; |
517 | |||
518 | } | ||
519 | |||
520 | static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns) | ||
521 | { | ||
522 | struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim); | ||
523 | int rc; | ||
524 | |||
525 | if (!nd_pfn->uuid || !nd_pfn->ndns) | ||
526 | return -ENODEV; | ||
527 | |||
528 | rc = nd_pfn_init(nd_pfn); | ||
529 | if (rc) | ||
530 | return rc; | ||
531 | /* we need a valid pfn_sb before we can init a vmem_altmap */ | ||
532 | return __nvdimm_namespace_attach_pfn(nd_pfn); | ||
460 | } | 533 | } |
461 | 534 | ||
462 | static int nd_pmem_probe(struct device *dev) | 535 | static int nd_pmem_probe(struct device *dev) |
@@ -547,26 +620,13 @@ static struct nd_device_driver nd_pmem_driver = { | |||
547 | 620 | ||
548 | static int __init pmem_init(void) | 621 | static int __init pmem_init(void) |
549 | { | 622 | { |
550 | int error; | 623 | return nd_driver_register(&nd_pmem_driver); |
551 | |||
552 | pmem_major = register_blkdev(0, "pmem"); | ||
553 | if (pmem_major < 0) | ||
554 | return pmem_major; | ||
555 | |||
556 | error = nd_driver_register(&nd_pmem_driver); | ||
557 | if (error) { | ||
558 | unregister_blkdev(pmem_major, "pmem"); | ||
559 | return error; | ||
560 | } | ||
561 | |||
562 | return 0; | ||
563 | } | 624 | } |
564 | module_init(pmem_init); | 625 | module_init(pmem_init); |
565 | 626 | ||
566 | static void pmem_exit(void) | 627 | static void pmem_exit(void) |
567 | { | 628 | { |
568 | driver_unregister(&nd_pmem_driver.drv); | 629 | driver_unregister(&nd_pmem_driver.drv); |
569 | unregister_blkdev(pmem_major, "pmem"); | ||
570 | } | 630 | } |
571 | module_exit(pmem_exit); | 631 | module_exit(pmem_exit); |
572 | 632 | ||