diff options
author | Dan Williams <dan.j.williams@intel.com> | 2015-05-01 13:34:01 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2015-06-24 21:24:10 -0400 |
commit | 1b40e09a1232de537b193fa1b6b3ef16d3a1e397 (patch) | |
tree | 48111c53fa3125b7d1f46d0f01b9448acbee62d0 /drivers | |
parent | bf9bccc14c05dae8caba29df6187c731710f5380 (diff) |
libnvdimm: blk labels and namespace instantiation
A blk label set describes a namespace comprised of one or more
discontiguous dpa ranges on a single dimm. They may alias with one or
more pmem interleave sets that include the given dimm.
This is the runtime/volatile configuration infrastructure for sysfs
manipulation of 'alt_name', 'uuid', 'size', and 'sector_size'. A later
patch will make these settings persistent by writing back the label(s).
Unlike pmem namespaces, multiple blk namespaces can be created per
region. Once a blk namespace has been created a new seed device
(unconfigured child of a parent blk region) is instantiated. As long as
a region has 'available_size' != 0 new child namespaces may be created.
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Neil Brown <neilb@suse.de>
Acked-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/nvdimm/core.c | 40 | ||||
-rw-r--r-- | drivers/nvdimm/dimm_devs.c | 36 | ||||
-rw-r--r-- | drivers/nvdimm/namespace_devs.c | 498 | ||||
-rw-r--r-- | drivers/nvdimm/nd-core.h | 8 | ||||
-rw-r--r-- | drivers/nvdimm/nd.h | 5 | ||||
-rw-r--r-- | drivers/nvdimm/region_devs.c | 17 |
6 files changed, 566 insertions, 38 deletions
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c index cf99cce8ef33..dd824d7c2669 100644 --- a/drivers/nvdimm/core.c +++ b/drivers/nvdimm/core.c | |||
@@ -173,6 +173,46 @@ int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf, | |||
173 | return 0; | 173 | return 0; |
174 | } | 174 | } |
175 | 175 | ||
176 | ssize_t nd_sector_size_show(unsigned long current_lbasize, | ||
177 | const unsigned long *supported, char *buf) | ||
178 | { | ||
179 | ssize_t len = 0; | ||
180 | int i; | ||
181 | |||
182 | for (i = 0; supported[i]; i++) | ||
183 | if (current_lbasize == supported[i]) | ||
184 | len += sprintf(buf + len, "[%ld] ", supported[i]); | ||
185 | else | ||
186 | len += sprintf(buf + len, "%ld ", supported[i]); | ||
187 | len += sprintf(buf + len, "\n"); | ||
188 | return len; | ||
189 | } | ||
190 | |||
191 | ssize_t nd_sector_size_store(struct device *dev, const char *buf, | ||
192 | unsigned long *current_lbasize, const unsigned long *supported) | ||
193 | { | ||
194 | unsigned long lbasize; | ||
195 | int rc, i; | ||
196 | |||
197 | if (dev->driver) | ||
198 | return -EBUSY; | ||
199 | |||
200 | rc = kstrtoul(buf, 0, &lbasize); | ||
201 | if (rc) | ||
202 | return rc; | ||
203 | |||
204 | for (i = 0; supported[i]; i++) | ||
205 | if (lbasize == supported[i]) | ||
206 | break; | ||
207 | |||
208 | if (supported[i]) { | ||
209 | *current_lbasize = lbasize; | ||
210 | return 0; | ||
211 | } else { | ||
212 | return -EINVAL; | ||
213 | } | ||
214 | } | ||
215 | |||
176 | static ssize_t commands_show(struct device *dev, | 216 | static ssize_t commands_show(struct device *dev, |
177 | struct device_attribute *attr, char *buf) | 217 | struct device_attribute *attr, char *buf) |
178 | { | 218 | { |
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index b55acef179ba..101d3b76e405 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c | |||
@@ -290,6 +290,42 @@ struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, | |||
290 | EXPORT_SYMBOL_GPL(nvdimm_create); | 290 | EXPORT_SYMBOL_GPL(nvdimm_create); |
291 | 291 | ||
292 | /** | 292 | /** |
293 | * nd_blk_available_dpa - account the unused dpa of BLK region | ||
294 | * @nd_mapping: container of dpa-resource-root + labels | ||
295 | * | ||
296 | * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges. | ||
297 | */ | ||
298 | resource_size_t nd_blk_available_dpa(struct nd_mapping *nd_mapping) | ||
299 | { | ||
300 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | ||
301 | resource_size_t map_end, busy = 0, available; | ||
302 | struct resource *res; | ||
303 | |||
304 | if (!ndd) | ||
305 | return 0; | ||
306 | |||
307 | map_end = nd_mapping->start + nd_mapping->size - 1; | ||
308 | for_each_dpa_resource(ndd, res) | ||
309 | if (res->start >= nd_mapping->start && res->start < map_end) { | ||
310 | resource_size_t end = min(map_end, res->end); | ||
311 | |||
312 | busy += end - res->start + 1; | ||
313 | } else if (res->end >= nd_mapping->start | ||
314 | && res->end <= map_end) { | ||
315 | busy += res->end - nd_mapping->start; | ||
316 | } else if (nd_mapping->start > res->start | ||
317 | && nd_mapping->start < res->end) { | ||
318 | /* total eclipse of the BLK region mapping */ | ||
319 | busy += nd_mapping->size; | ||
320 | } | ||
321 | |||
322 | available = map_end - nd_mapping->start + 1; | ||
323 | if (busy < available) | ||
324 | return available - busy; | ||
325 | return 0; | ||
326 | } | ||
327 | |||
328 | /** | ||
293 | * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa | 329 | * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa |
294 | * @nd_mapping: container of dpa-resource-root + labels | 330 | * @nd_mapping: container of dpa-resource-root + labels |
295 | * @nd_region: constrain available space check to this reference region | 331 | * @nd_region: constrain available space check to this reference region |
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 5d81032fcfc5..ad0ec09ca40f 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c | |||
@@ -35,7 +35,15 @@ static void namespace_pmem_release(struct device *dev) | |||
35 | 35 | ||
36 | static void namespace_blk_release(struct device *dev) | 36 | static void namespace_blk_release(struct device *dev) |
37 | { | 37 | { |
38 | /* TODO: blk namespace support */ | 38 | struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); |
39 | struct nd_region *nd_region = to_nd_region(dev->parent); | ||
40 | |||
41 | if (nsblk->id >= 0) | ||
42 | ida_simple_remove(&nd_region->ns_ida, nsblk->id); | ||
43 | kfree(nsblk->alt_name); | ||
44 | kfree(nsblk->uuid); | ||
45 | kfree(nsblk->res); | ||
46 | kfree(nsblk); | ||
39 | } | 47 | } |
40 | 48 | ||
41 | static struct device_type namespace_io_device_type = { | 49 | static struct device_type namespace_io_device_type = { |
@@ -88,8 +96,9 @@ static ssize_t __alt_name_store(struct device *dev, const char *buf, | |||
88 | 96 | ||
89 | ns_altname = &nspm->alt_name; | 97 | ns_altname = &nspm->alt_name; |
90 | } else if (is_namespace_blk(dev)) { | 98 | } else if (is_namespace_blk(dev)) { |
91 | /* TODO: blk namespace support */ | 99 | struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); |
92 | return -ENXIO; | 100 | |
101 | ns_altname = &nsblk->alt_name; | ||
93 | } else | 102 | } else |
94 | return -ENXIO; | 103 | return -ENXIO; |
95 | 104 | ||
@@ -122,6 +131,24 @@ out: | |||
122 | return rc; | 131 | return rc; |
123 | } | 132 | } |
124 | 133 | ||
134 | static resource_size_t nd_namespace_blk_size(struct nd_namespace_blk *nsblk) | ||
135 | { | ||
136 | struct nd_region *nd_region = to_nd_region(nsblk->dev.parent); | ||
137 | struct nd_mapping *nd_mapping = &nd_region->mapping[0]; | ||
138 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | ||
139 | struct nd_label_id label_id; | ||
140 | resource_size_t size = 0; | ||
141 | struct resource *res; | ||
142 | |||
143 | if (!nsblk->uuid) | ||
144 | return 0; | ||
145 | nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL); | ||
146 | for_each_dpa_resource(ndd, res) | ||
147 | if (strcmp(res->name, label_id.id) == 0) | ||
148 | size += resource_size(res); | ||
149 | return size; | ||
150 | } | ||
151 | |||
125 | static ssize_t alt_name_store(struct device *dev, | 152 | static ssize_t alt_name_store(struct device *dev, |
126 | struct device_attribute *attr, const char *buf, size_t len) | 153 | struct device_attribute *attr, const char *buf, size_t len) |
127 | { | 154 | { |
@@ -148,8 +175,9 @@ static ssize_t alt_name_show(struct device *dev, | |||
148 | 175 | ||
149 | ns_altname = nspm->alt_name; | 176 | ns_altname = nspm->alt_name; |
150 | } else if (is_namespace_blk(dev)) { | 177 | } else if (is_namespace_blk(dev)) { |
151 | /* TODO: blk namespace support */ | 178 | struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); |
152 | return -ENXIO; | 179 | |
180 | ns_altname = nsblk->alt_name; | ||
153 | } else | 181 | } else |
154 | return -ENXIO; | 182 | return -ENXIO; |
155 | 183 | ||
@@ -195,6 +223,8 @@ static int scan_free(struct nd_region *nd_region, | |||
195 | new_start = res->start; | 223 | new_start = res->start; |
196 | 224 | ||
197 | rc = adjust_resource(res, new_start, resource_size(res) - n); | 225 | rc = adjust_resource(res, new_start, resource_size(res) - n); |
226 | if (rc == 0) | ||
227 | res->flags |= DPA_RESOURCE_ADJUSTED; | ||
198 | nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc); | 228 | nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc); |
199 | break; | 229 | break; |
200 | } | 230 | } |
@@ -255,14 +285,15 @@ static resource_size_t init_dpa_allocation(struct nd_label_id *label_id, | |||
255 | return rc ? n : 0; | 285 | return rc ? n : 0; |
256 | } | 286 | } |
257 | 287 | ||
258 | static bool space_valid(bool is_pmem, struct nd_label_id *label_id, | 288 | static bool space_valid(bool is_pmem, bool is_reserve, |
259 | struct resource *res) | 289 | struct nd_label_id *label_id, struct resource *res) |
260 | { | 290 | { |
261 | /* | 291 | /* |
262 | * For BLK-space any space is valid, for PMEM-space, it must be | 292 | * For BLK-space any space is valid, for PMEM-space, it must be |
263 | * contiguous with an existing allocation. | 293 | * contiguous with an existing allocation unless we are |
294 | * reserving pmem. | ||
264 | */ | 295 | */ |
265 | if (!is_pmem) | 296 | if (is_reserve || !is_pmem) |
266 | return true; | 297 | return true; |
267 | if (!res || strcmp(res->name, label_id->id) == 0) | 298 | if (!res || strcmp(res->name, label_id->id) == 0) |
268 | return true; | 299 | return true; |
@@ -278,6 +309,7 @@ static resource_size_t scan_allocate(struct nd_region *nd_region, | |||
278 | resource_size_t n) | 309 | resource_size_t n) |
279 | { | 310 | { |
280 | resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1; | 311 | resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1; |
312 | bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0; | ||
281 | bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0; | 313 | bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0; |
282 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | 314 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); |
283 | const resource_size_t to_allocate = n; | 315 | const resource_size_t to_allocate = n; |
@@ -303,7 +335,7 @@ static resource_size_t scan_allocate(struct nd_region *nd_region, | |||
303 | if (!first++ && res->start > nd_mapping->start) { | 335 | if (!first++ && res->start > nd_mapping->start) { |
304 | free_start = nd_mapping->start; | 336 | free_start = nd_mapping->start; |
305 | available = res->start - free_start; | 337 | available = res->start - free_start; |
306 | if (space_valid(is_pmem, label_id, NULL)) | 338 | if (space_valid(is_pmem, is_reserve, label_id, NULL)) |
307 | loc = ALLOC_BEFORE; | 339 | loc = ALLOC_BEFORE; |
308 | } | 340 | } |
309 | 341 | ||
@@ -311,7 +343,7 @@ static resource_size_t scan_allocate(struct nd_region *nd_region, | |||
311 | if (!loc && next) { | 343 | if (!loc && next) { |
312 | free_start = res->start + resource_size(res); | 344 | free_start = res->start + resource_size(res); |
313 | free_end = min(mapping_end, next->start - 1); | 345 | free_end = min(mapping_end, next->start - 1); |
314 | if (space_valid(is_pmem, label_id, res) | 346 | if (space_valid(is_pmem, is_reserve, label_id, res) |
315 | && free_start < free_end) { | 347 | && free_start < free_end) { |
316 | available = free_end + 1 - free_start; | 348 | available = free_end + 1 - free_start; |
317 | loc = ALLOC_MID; | 349 | loc = ALLOC_MID; |
@@ -322,7 +354,7 @@ static resource_size_t scan_allocate(struct nd_region *nd_region, | |||
322 | if (!loc && !next) { | 354 | if (!loc && !next) { |
323 | free_start = res->start + resource_size(res); | 355 | free_start = res->start + resource_size(res); |
324 | free_end = mapping_end; | 356 | free_end = mapping_end; |
325 | if (space_valid(is_pmem, label_id, res) | 357 | if (space_valid(is_pmem, is_reserve, label_id, res) |
326 | && free_start < free_end) { | 358 | && free_start < free_end) { |
327 | available = free_end + 1 - free_start; | 359 | available = free_end + 1 - free_start; |
328 | loc = ALLOC_AFTER; | 360 | loc = ALLOC_AFTER; |
@@ -336,7 +368,7 @@ static resource_size_t scan_allocate(struct nd_region *nd_region, | |||
336 | case ALLOC_BEFORE: | 368 | case ALLOC_BEFORE: |
337 | if (strcmp(res->name, label_id->id) == 0) { | 369 | if (strcmp(res->name, label_id->id) == 0) { |
338 | /* adjust current resource up */ | 370 | /* adjust current resource up */ |
339 | if (is_pmem) | 371 | if (is_pmem && !is_reserve) |
340 | return n; | 372 | return n; |
341 | rc = adjust_resource(res, res->start - allocate, | 373 | rc = adjust_resource(res, res->start - allocate, |
342 | resource_size(res) + allocate); | 374 | resource_size(res) + allocate); |
@@ -347,7 +379,7 @@ static resource_size_t scan_allocate(struct nd_region *nd_region, | |||
347 | case ALLOC_MID: | 379 | case ALLOC_MID: |
348 | if (strcmp(next->name, label_id->id) == 0) { | 380 | if (strcmp(next->name, label_id->id) == 0) { |
349 | /* adjust next resource up */ | 381 | /* adjust next resource up */ |
350 | if (is_pmem) | 382 | if (is_pmem && !is_reserve) |
351 | return n; | 383 | return n; |
352 | rc = adjust_resource(next, next->start | 384 | rc = adjust_resource(next, next->start |
353 | - allocate, resource_size(next) | 385 | - allocate, resource_size(next) |
@@ -373,7 +405,7 @@ static resource_size_t scan_allocate(struct nd_region *nd_region, | |||
373 | /* BLK allocate bottom up */ | 405 | /* BLK allocate bottom up */ |
374 | if (!is_pmem) | 406 | if (!is_pmem) |
375 | free_start += available - allocate; | 407 | free_start += available - allocate; |
376 | else if (free_start != nd_mapping->start) | 408 | else if (!is_reserve && free_start != nd_mapping->start) |
377 | return n; | 409 | return n; |
378 | 410 | ||
379 | new_res = nvdimm_allocate_dpa(ndd, label_id, | 411 | new_res = nvdimm_allocate_dpa(ndd, label_id, |
@@ -384,6 +416,8 @@ static resource_size_t scan_allocate(struct nd_region *nd_region, | |||
384 | /* adjust current resource down */ | 416 | /* adjust current resource down */ |
385 | rc = adjust_resource(res, res->start, resource_size(res) | 417 | rc = adjust_resource(res, res->start, resource_size(res) |
386 | + allocate); | 418 | + allocate); |
419 | if (rc == 0) | ||
420 | res->flags |= DPA_RESOURCE_ADJUSTED; | ||
387 | } | 421 | } |
388 | 422 | ||
389 | if (!new_res) | 423 | if (!new_res) |
@@ -409,11 +443,108 @@ static resource_size_t scan_allocate(struct nd_region *nd_region, | |||
409 | return 0; | 443 | return 0; |
410 | } | 444 | } |
411 | 445 | ||
412 | if (is_pmem && n == to_allocate) | 446 | /* |
447 | * If we allocated nothing in the BLK case it may be because we are in | ||
448 | * an initial "pmem-reserve pass". Only do an initial BLK allocation | ||
449 | * when none of the DPA space is reserved. | ||
450 | */ | ||
451 | if ((is_pmem || !ndd->dpa.child) && n == to_allocate) | ||
413 | return init_dpa_allocation(label_id, nd_region, nd_mapping, n); | 452 | return init_dpa_allocation(label_id, nd_region, nd_mapping, n); |
414 | return n; | 453 | return n; |
415 | } | 454 | } |
416 | 455 | ||
456 | static int merge_dpa(struct nd_region *nd_region, | ||
457 | struct nd_mapping *nd_mapping, struct nd_label_id *label_id) | ||
458 | { | ||
459 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | ||
460 | struct resource *res; | ||
461 | |||
462 | if (strncmp("pmem", label_id->id, 4) == 0) | ||
463 | return 0; | ||
464 | retry: | ||
465 | for_each_dpa_resource(ndd, res) { | ||
466 | int rc; | ||
467 | struct resource *next = res->sibling; | ||
468 | resource_size_t end = res->start + resource_size(res); | ||
469 | |||
470 | if (!next || strcmp(res->name, label_id->id) != 0 | ||
471 | || strcmp(next->name, label_id->id) != 0 | ||
472 | || end != next->start) | ||
473 | continue; | ||
474 | end += resource_size(next); | ||
475 | nvdimm_free_dpa(ndd, next); | ||
476 | rc = adjust_resource(res, res->start, end - res->start); | ||
477 | nd_dbg_dpa(nd_region, ndd, res, "merge %d\n", rc); | ||
478 | if (rc) | ||
479 | return rc; | ||
480 | res->flags |= DPA_RESOURCE_ADJUSTED; | ||
481 | goto retry; | ||
482 | } | ||
483 | |||
484 | return 0; | ||
485 | } | ||
486 | |||
487 | static int __reserve_free_pmem(struct device *dev, void *data) | ||
488 | { | ||
489 | struct nvdimm *nvdimm = data; | ||
490 | struct nd_region *nd_region; | ||
491 | struct nd_label_id label_id; | ||
492 | int i; | ||
493 | |||
494 | if (!is_nd_pmem(dev)) | ||
495 | return 0; | ||
496 | |||
497 | nd_region = to_nd_region(dev); | ||
498 | if (nd_region->ndr_mappings == 0) | ||
499 | return 0; | ||
500 | |||
501 | memset(&label_id, 0, sizeof(label_id)); | ||
502 | strcat(label_id.id, "pmem-reserve"); | ||
503 | for (i = 0; i < nd_region->ndr_mappings; i++) { | ||
504 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | ||
505 | resource_size_t n, rem = 0; | ||
506 | |||
507 | if (nd_mapping->nvdimm != nvdimm) | ||
508 | continue; | ||
509 | |||
510 | n = nd_pmem_available_dpa(nd_region, nd_mapping, &rem); | ||
511 | if (n == 0) | ||
512 | return 0; | ||
513 | rem = scan_allocate(nd_region, nd_mapping, &label_id, n); | ||
514 | dev_WARN_ONCE(&nd_region->dev, rem, | ||
515 | "pmem reserve underrun: %#llx of %#llx bytes\n", | ||
516 | (unsigned long long) n - rem, | ||
517 | (unsigned long long) n); | ||
518 | return rem ? -ENXIO : 0; | ||
519 | } | ||
520 | |||
521 | return 0; | ||
522 | } | ||
523 | |||
524 | static void release_free_pmem(struct nvdimm_bus *nvdimm_bus, | ||
525 | struct nd_mapping *nd_mapping) | ||
526 | { | ||
527 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | ||
528 | struct resource *res, *_res; | ||
529 | |||
530 | for_each_dpa_resource_safe(ndd, res, _res) | ||
531 | if (strcmp(res->name, "pmem-reserve") == 0) | ||
532 | nvdimm_free_dpa(ndd, res); | ||
533 | } | ||
534 | |||
535 | static int reserve_free_pmem(struct nvdimm_bus *nvdimm_bus, | ||
536 | struct nd_mapping *nd_mapping) | ||
537 | { | ||
538 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | ||
539 | int rc; | ||
540 | |||
541 | rc = device_for_each_child(&nvdimm_bus->dev, nvdimm, | ||
542 | __reserve_free_pmem); | ||
543 | if (rc) | ||
544 | release_free_pmem(nvdimm_bus, nd_mapping); | ||
545 | return rc; | ||
546 | } | ||
547 | |||
417 | /** | 548 | /** |
418 | * grow_dpa_allocation - for each dimm allocate n bytes for @label_id | 549 | * grow_dpa_allocation - for each dimm allocate n bytes for @label_id |
419 | * @nd_region: the set of dimms to allocate @n more bytes from | 550 | * @nd_region: the set of dimms to allocate @n more bytes from |
@@ -430,13 +561,45 @@ static resource_size_t scan_allocate(struct nd_region *nd_region, | |||
430 | static int grow_dpa_allocation(struct nd_region *nd_region, | 561 | static int grow_dpa_allocation(struct nd_region *nd_region, |
431 | struct nd_label_id *label_id, resource_size_t n) | 562 | struct nd_label_id *label_id, resource_size_t n) |
432 | { | 563 | { |
564 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); | ||
565 | bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0; | ||
433 | int i; | 566 | int i; |
434 | 567 | ||
435 | for (i = 0; i < nd_region->ndr_mappings; i++) { | 568 | for (i = 0; i < nd_region->ndr_mappings; i++) { |
436 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | 569 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; |
437 | int rc; | 570 | resource_size_t rem = n; |
571 | int rc, j; | ||
572 | |||
573 | /* | ||
574 | * In the BLK case try once with all unallocated PMEM | ||
575 | * reserved, and once without | ||
576 | */ | ||
577 | for (j = is_pmem; j < 2; j++) { | ||
578 | bool blk_only = j == 0; | ||
579 | |||
580 | if (blk_only) { | ||
581 | rc = reserve_free_pmem(nvdimm_bus, nd_mapping); | ||
582 | if (rc) | ||
583 | return rc; | ||
584 | } | ||
585 | rem = scan_allocate(nd_region, nd_mapping, | ||
586 | label_id, rem); | ||
587 | if (blk_only) | ||
588 | release_free_pmem(nvdimm_bus, nd_mapping); | ||
438 | 589 | ||
439 | rc = scan_allocate(nd_region, nd_mapping, label_id, n); | 590 | /* try again and allow encroachments into PMEM */ |
591 | if (rem == 0) | ||
592 | break; | ||
593 | } | ||
594 | |||
595 | dev_WARN_ONCE(&nd_region->dev, rem, | ||
596 | "allocation underrun: %#llx of %#llx bytes\n", | ||
597 | (unsigned long long) n - rem, | ||
598 | (unsigned long long) n); | ||
599 | if (rem) | ||
600 | return -ENXIO; | ||
601 | |||
602 | rc = merge_dpa(nd_region, nd_mapping, label_id); | ||
440 | if (rc) | 603 | if (rc) |
441 | return rc; | 604 | return rc; |
442 | } | 605 | } |
@@ -472,8 +635,10 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) | |||
472 | 635 | ||
473 | uuid = nspm->uuid; | 636 | uuid = nspm->uuid; |
474 | } else if (is_namespace_blk(dev)) { | 637 | } else if (is_namespace_blk(dev)) { |
475 | /* TODO: blk namespace support */ | 638 | struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); |
476 | return -ENXIO; | 639 | |
640 | uuid = nsblk->uuid; | ||
641 | flags = NSLABEL_FLAG_LOCAL; | ||
477 | } | 642 | } |
478 | 643 | ||
479 | /* | 644 | /* |
@@ -528,6 +693,14 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) | |||
528 | 693 | ||
529 | nd_namespace_pmem_set_size(nd_region, nspm, | 694 | nd_namespace_pmem_set_size(nd_region, nspm, |
530 | val * nd_region->ndr_mappings); | 695 | val * nd_region->ndr_mappings); |
696 | } else if (is_namespace_blk(dev)) { | ||
697 | /* | ||
698 | * Try to delete the namespace if we deleted all of its | ||
699 | * allocation and this is not the seed device for the | ||
700 | * region. | ||
701 | */ | ||
702 | if (val == 0 && nd_region->ns_seed != dev) | ||
703 | nd_device_unregister(dev, ND_ASYNC); | ||
531 | } | 704 | } |
532 | 705 | ||
533 | return rc; | 706 | return rc; |
@@ -554,8 +727,9 @@ static ssize_t size_store(struct device *dev, | |||
554 | 727 | ||
555 | uuid = &nspm->uuid; | 728 | uuid = &nspm->uuid; |
556 | } else if (is_namespace_blk(dev)) { | 729 | } else if (is_namespace_blk(dev)) { |
557 | /* TODO: blk namespace support */ | 730 | struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); |
558 | rc = -ENXIO; | 731 | |
732 | uuid = &nsblk->uuid; | ||
559 | } | 733 | } |
560 | 734 | ||
561 | if (rc == 0 && val == 0 && uuid) { | 735 | if (rc == 0 && val == 0 && uuid) { |
@@ -576,21 +750,23 @@ static ssize_t size_store(struct device *dev, | |||
576 | static ssize_t size_show(struct device *dev, | 750 | static ssize_t size_show(struct device *dev, |
577 | struct device_attribute *attr, char *buf) | 751 | struct device_attribute *attr, char *buf) |
578 | { | 752 | { |
753 | unsigned long long size = 0; | ||
754 | |||
755 | nvdimm_bus_lock(dev); | ||
579 | if (is_namespace_pmem(dev)) { | 756 | if (is_namespace_pmem(dev)) { |
580 | struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); | 757 | struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); |
581 | 758 | ||
582 | return sprintf(buf, "%llu\n", (unsigned long long) | 759 | size = resource_size(&nspm->nsio.res); |
583 | resource_size(&nspm->nsio.res)); | ||
584 | } else if (is_namespace_blk(dev)) { | 760 | } else if (is_namespace_blk(dev)) { |
585 | /* TODO: blk namespace support */ | 761 | size = nd_namespace_blk_size(to_nd_namespace_blk(dev)); |
586 | return -ENXIO; | ||
587 | } else if (is_namespace_io(dev)) { | 762 | } else if (is_namespace_io(dev)) { |
588 | struct nd_namespace_io *nsio = to_nd_namespace_io(dev); | 763 | struct nd_namespace_io *nsio = to_nd_namespace_io(dev); |
589 | 764 | ||
590 | return sprintf(buf, "%llu\n", (unsigned long long) | 765 | size = resource_size(&nsio->res); |
591 | resource_size(&nsio->res)); | 766 | } |
592 | } else | 767 | nvdimm_bus_unlock(dev); |
593 | return -ENXIO; | 768 | |
769 | return sprintf(buf, "%llu\n", size); | ||
594 | } | 770 | } |
595 | static DEVICE_ATTR(size, S_IRUGO, size_show, size_store); | 771 | static DEVICE_ATTR(size, S_IRUGO, size_show, size_store); |
596 | 772 | ||
@@ -604,8 +780,9 @@ static ssize_t uuid_show(struct device *dev, | |||
604 | 780 | ||
605 | uuid = nspm->uuid; | 781 | uuid = nspm->uuid; |
606 | } else if (is_namespace_blk(dev)) { | 782 | } else if (is_namespace_blk(dev)) { |
607 | /* TODO: blk namespace support */ | 783 | struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); |
608 | return -ENXIO; | 784 | |
785 | uuid = nsblk->uuid; | ||
609 | } else | 786 | } else |
610 | return -ENXIO; | 787 | return -ENXIO; |
611 | 788 | ||
@@ -669,8 +846,9 @@ static ssize_t uuid_store(struct device *dev, | |||
669 | 846 | ||
670 | ns_uuid = &nspm->uuid; | 847 | ns_uuid = &nspm->uuid; |
671 | } else if (is_namespace_blk(dev)) { | 848 | } else if (is_namespace_blk(dev)) { |
672 | /* TODO: blk namespace support */ | 849 | struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); |
673 | return -ENXIO; | 850 | |
851 | ns_uuid = &nsblk->uuid; | ||
674 | } else | 852 | } else |
675 | return -ENXIO; | 853 | return -ENXIO; |
676 | 854 | ||
@@ -712,12 +890,48 @@ static ssize_t resource_show(struct device *dev, | |||
712 | } | 890 | } |
713 | static DEVICE_ATTR_RO(resource); | 891 | static DEVICE_ATTR_RO(resource); |
714 | 892 | ||
893 | static const unsigned long ns_lbasize_supported[] = { 512, 0 }; | ||
894 | |||
895 | static ssize_t sector_size_show(struct device *dev, | ||
896 | struct device_attribute *attr, char *buf) | ||
897 | { | ||
898 | struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); | ||
899 | |||
900 | if (!is_namespace_blk(dev)) | ||
901 | return -ENXIO; | ||
902 | |||
903 | return nd_sector_size_show(nsblk->lbasize, ns_lbasize_supported, buf); | ||
904 | } | ||
905 | |||
906 | static ssize_t sector_size_store(struct device *dev, | ||
907 | struct device_attribute *attr, const char *buf, size_t len) | ||
908 | { | ||
909 | struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); | ||
910 | ssize_t rc; | ||
911 | |||
912 | if (!is_namespace_blk(dev)) | ||
913 | return -ENXIO; | ||
914 | |||
915 | device_lock(dev); | ||
916 | nvdimm_bus_lock(dev); | ||
917 | rc = nd_sector_size_store(dev, buf, &nsblk->lbasize, | ||
918 | ns_lbasize_supported); | ||
919 | dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__, | ||
920 | rc, buf, buf[len - 1] == '\n' ? "" : "\n"); | ||
921 | nvdimm_bus_unlock(dev); | ||
922 | device_unlock(dev); | ||
923 | |||
924 | return rc ? rc : len; | ||
925 | } | ||
926 | static DEVICE_ATTR_RW(sector_size); | ||
927 | |||
715 | static struct attribute *nd_namespace_attributes[] = { | 928 | static struct attribute *nd_namespace_attributes[] = { |
716 | &dev_attr_nstype.attr, | 929 | &dev_attr_nstype.attr, |
717 | &dev_attr_size.attr, | 930 | &dev_attr_size.attr, |
718 | &dev_attr_uuid.attr, | 931 | &dev_attr_uuid.attr, |
719 | &dev_attr_resource.attr, | 932 | &dev_attr_resource.attr, |
720 | &dev_attr_alt_name.attr, | 933 | &dev_attr_alt_name.attr, |
934 | &dev_attr_sector_size.attr, | ||
721 | NULL, | 935 | NULL, |
722 | }; | 936 | }; |
723 | 937 | ||
@@ -735,6 +949,10 @@ static umode_t namespace_visible(struct kobject *kobj, | |||
735 | if (is_namespace_pmem(dev) || is_namespace_blk(dev)) { | 949 | if (is_namespace_pmem(dev) || is_namespace_blk(dev)) { |
736 | if (a == &dev_attr_size.attr) | 950 | if (a == &dev_attr_size.attr) |
737 | return S_IWUSR | S_IRUGO; | 951 | return S_IWUSR | S_IRUGO; |
952 | |||
953 | if (is_namespace_pmem(dev) && a == &dev_attr_sector_size.attr) | ||
954 | return 0; | ||
955 | |||
738 | return a->mode; | 956 | return a->mode; |
739 | } | 957 | } |
740 | 958 | ||
@@ -1022,6 +1240,176 @@ static struct device **create_namespace_pmem(struct nd_region *nd_region) | |||
1022 | return NULL; | 1240 | return NULL; |
1023 | } | 1241 | } |
1024 | 1242 | ||
1243 | struct resource *nsblk_add_resource(struct nd_region *nd_region, | ||
1244 | struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk, | ||
1245 | resource_size_t start) | ||
1246 | { | ||
1247 | struct nd_label_id label_id; | ||
1248 | struct resource *res; | ||
1249 | |||
1250 | nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL); | ||
1251 | res = krealloc(nsblk->res, | ||
1252 | sizeof(void *) * (nsblk->num_resources + 1), | ||
1253 | GFP_KERNEL); | ||
1254 | if (!res) | ||
1255 | return NULL; | ||
1256 | nsblk->res = (struct resource **) res; | ||
1257 | for_each_dpa_resource(ndd, res) | ||
1258 | if (strcmp(res->name, label_id.id) == 0 | ||
1259 | && res->start == start) { | ||
1260 | nsblk->res[nsblk->num_resources++] = res; | ||
1261 | return res; | ||
1262 | } | ||
1263 | return NULL; | ||
1264 | } | ||
1265 | |||
1266 | static struct device *nd_namespace_blk_create(struct nd_region *nd_region) | ||
1267 | { | ||
1268 | struct nd_namespace_blk *nsblk; | ||
1269 | struct device *dev; | ||
1270 | |||
1271 | if (!is_nd_blk(&nd_region->dev)) | ||
1272 | return NULL; | ||
1273 | |||
1274 | nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL); | ||
1275 | if (!nsblk) | ||
1276 | return NULL; | ||
1277 | |||
1278 | dev = &nsblk->dev; | ||
1279 | dev->type = &namespace_blk_device_type; | ||
1280 | nsblk->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL); | ||
1281 | if (nsblk->id < 0) { | ||
1282 | kfree(nsblk); | ||
1283 | return NULL; | ||
1284 | } | ||
1285 | dev_set_name(dev, "namespace%d.%d", nd_region->id, nsblk->id); | ||
1286 | dev->parent = &nd_region->dev; | ||
1287 | dev->groups = nd_namespace_attribute_groups; | ||
1288 | |||
1289 | return &nsblk->dev; | ||
1290 | } | ||
1291 | |||
1292 | void nd_region_create_blk_seed(struct nd_region *nd_region) | ||
1293 | { | ||
1294 | WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); | ||
1295 | nd_region->ns_seed = nd_namespace_blk_create(nd_region); | ||
1296 | /* | ||
1297 | * Seed creation failures are not fatal, provisioning is simply | ||
1298 | * disabled until memory becomes available | ||
1299 | */ | ||
1300 | if (!nd_region->ns_seed) | ||
1301 | dev_err(&nd_region->dev, "failed to create blk namespace\n"); | ||
1302 | else | ||
1303 | nd_device_register(nd_region->ns_seed); | ||
1304 | } | ||
1305 | |||
1306 | static struct device **create_namespace_blk(struct nd_region *nd_region) | ||
1307 | { | ||
1308 | struct nd_mapping *nd_mapping = &nd_region->mapping[0]; | ||
1309 | struct nd_namespace_label *nd_label; | ||
1310 | struct device *dev, **devs = NULL; | ||
1311 | struct nd_namespace_blk *nsblk; | ||
1312 | struct nvdimm_drvdata *ndd; | ||
1313 | int i, l, count = 0; | ||
1314 | struct resource *res; | ||
1315 | |||
1316 | if (nd_region->ndr_mappings == 0) | ||
1317 | return NULL; | ||
1318 | |||
1319 | ndd = to_ndd(nd_mapping); | ||
1320 | for_each_label(l, nd_label, nd_mapping->labels) { | ||
1321 | u32 flags = __le32_to_cpu(nd_label->flags); | ||
1322 | char *name[NSLABEL_NAME_LEN]; | ||
1323 | struct device **__devs; | ||
1324 | |||
1325 | if (flags & NSLABEL_FLAG_LOCAL) | ||
1326 | /* pass */; | ||
1327 | else | ||
1328 | continue; | ||
1329 | |||
1330 | for (i = 0; i < count; i++) { | ||
1331 | nsblk = to_nd_namespace_blk(devs[i]); | ||
1332 | if (memcmp(nsblk->uuid, nd_label->uuid, | ||
1333 | NSLABEL_UUID_LEN) == 0) { | ||
1334 | res = nsblk_add_resource(nd_region, ndd, nsblk, | ||
1335 | __le64_to_cpu(nd_label->dpa)); | ||
1336 | if (!res) | ||
1337 | goto err; | ||
1338 | nd_dbg_dpa(nd_region, ndd, res, "%s assign\n", | ||
1339 | dev_name(&nsblk->dev)); | ||
1340 | break; | ||
1341 | } | ||
1342 | } | ||
1343 | if (i < count) | ||
1344 | continue; | ||
1345 | __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL); | ||
1346 | if (!__devs) | ||
1347 | goto err; | ||
1348 | memcpy(__devs, devs, sizeof(dev) * count); | ||
1349 | kfree(devs); | ||
1350 | devs = __devs; | ||
1351 | |||
1352 | nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL); | ||
1353 | if (!nsblk) | ||
1354 | goto err; | ||
1355 | dev = &nsblk->dev; | ||
1356 | dev->type = &namespace_blk_device_type; | ||
1357 | dev->parent = &nd_region->dev; | ||
1358 | dev_set_name(dev, "namespace%d.%d", nd_region->id, count); | ||
1359 | devs[count++] = dev; | ||
1360 | nsblk->id = -1; | ||
1361 | nsblk->lbasize = __le64_to_cpu(nd_label->lbasize); | ||
1362 | nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN, | ||
1363 | GFP_KERNEL); | ||
1364 | if (!nsblk->uuid) | ||
1365 | goto err; | ||
1366 | memcpy(name, nd_label->name, NSLABEL_NAME_LEN); | ||
1367 | if (name[0]) | ||
1368 | nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN, | ||
1369 | GFP_KERNEL); | ||
1370 | res = nsblk_add_resource(nd_region, ndd, nsblk, | ||
1371 | __le64_to_cpu(nd_label->dpa)); | ||
1372 | if (!res) | ||
1373 | goto err; | ||
1374 | nd_dbg_dpa(nd_region, ndd, res, "%s assign\n", | ||
1375 | dev_name(&nsblk->dev)); | ||
1376 | } | ||
1377 | |||
1378 | dev_dbg(&nd_region->dev, "%s: discovered %d blk namespace%s\n", | ||
1379 | __func__, count, count == 1 ? "" : "s"); | ||
1380 | |||
1381 | if (count == 0) { | ||
1382 | /* Publish a zero-sized namespace for userspace to configure. */ | ||
1383 | for (i = 0; i < nd_region->ndr_mappings; i++) { | ||
1384 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | ||
1385 | |||
1386 | kfree(nd_mapping->labels); | ||
1387 | nd_mapping->labels = NULL; | ||
1388 | } | ||
1389 | |||
1390 | devs = kcalloc(2, sizeof(dev), GFP_KERNEL); | ||
1391 | if (!devs) | ||
1392 | goto err; | ||
1393 | nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL); | ||
1394 | if (!nsblk) | ||
1395 | goto err; | ||
1396 | dev = &nsblk->dev; | ||
1397 | dev->type = &namespace_blk_device_type; | ||
1398 | dev->parent = &nd_region->dev; | ||
1399 | devs[count++] = dev; | ||
1400 | } | ||
1401 | |||
1402 | return devs; | ||
1403 | |||
1404 | err: | ||
1405 | for (i = 0; i < count; i++) { | ||
1406 | nsblk = to_nd_namespace_blk(devs[i]); | ||
1407 | namespace_blk_release(&nsblk->dev); | ||
1408 | } | ||
1409 | kfree(devs); | ||
1410 | return NULL; | ||
1411 | } | ||
1412 | |||
1025 | static int init_active_labels(struct nd_region *nd_region) | 1413 | static int init_active_labels(struct nd_region *nd_region) |
1026 | { | 1414 | { |
1027 | int i; | 1415 | int i; |
@@ -1087,6 +1475,9 @@ int nd_region_register_namespaces(struct nd_region *nd_region, int *err) | |||
1087 | case ND_DEVICE_NAMESPACE_PMEM: | 1475 | case ND_DEVICE_NAMESPACE_PMEM: |
1088 | devs = create_namespace_pmem(nd_region); | 1476 | devs = create_namespace_pmem(nd_region); |
1089 | break; | 1477 | break; |
1478 | case ND_DEVICE_NAMESPACE_BLK: | ||
1479 | devs = create_namespace_blk(nd_region); | ||
1480 | break; | ||
1090 | default: | 1481 | default: |
1091 | break; | 1482 | break; |
1092 | } | 1483 | } |
@@ -1095,15 +1486,50 @@ int nd_region_register_namespaces(struct nd_region *nd_region, int *err) | |||
1095 | if (!devs) | 1486 | if (!devs) |
1096 | return -ENODEV; | 1487 | return -ENODEV; |
1097 | 1488 | ||
1098 | nd_region->ns_seed = devs[0]; | ||
1099 | for (i = 0; devs[i]; i++) { | 1489 | for (i = 0; devs[i]; i++) { |
1100 | struct device *dev = devs[i]; | 1490 | struct device *dev = devs[i]; |
1491 | int id; | ||
1101 | 1492 | ||
1102 | dev_set_name(dev, "namespace%d.%d", nd_region->id, i); | 1493 | if (type == ND_DEVICE_NAMESPACE_BLK) { |
1494 | struct nd_namespace_blk *nsblk; | ||
1495 | |||
1496 | nsblk = to_nd_namespace_blk(dev); | ||
1497 | id = ida_simple_get(&nd_region->ns_ida, 0, 0, | ||
1498 | GFP_KERNEL); | ||
1499 | nsblk->id = id; | ||
1500 | } else | ||
1501 | id = i; | ||
1502 | |||
1503 | if (id < 0) | ||
1504 | break; | ||
1505 | dev_set_name(dev, "namespace%d.%d", nd_region->id, id); | ||
1103 | dev->groups = nd_namespace_attribute_groups; | 1506 | dev->groups = nd_namespace_attribute_groups; |
1104 | nd_device_register(dev); | 1507 | nd_device_register(dev); |
1105 | } | 1508 | } |
1509 | if (i) | ||
1510 | nd_region->ns_seed = devs[0]; | ||
1511 | |||
1512 | if (devs[i]) { | ||
1513 | int j; | ||
1514 | |||
1515 | for (j = i; devs[j]; j++) { | ||
1516 | struct device *dev = devs[j]; | ||
1517 | |||
1518 | device_initialize(dev); | ||
1519 | put_device(dev); | ||
1520 | } | ||
1521 | *err = j - i; | ||
1522 | /* | ||
1523 | * All of the namespaces we tried to register failed, so | ||
1524 | * fail region activation. | ||
1525 | */ | ||
1526 | if (*err == 0) | ||
1527 | rc = -ENODEV; | ||
1528 | } | ||
1106 | kfree(devs); | 1529 | kfree(devs); |
1107 | 1530 | ||
1531 | if (rc == -ENODEV) | ||
1532 | return rc; | ||
1533 | |||
1108 | return i; | 1534 | return i; |
1109 | } | 1535 | } |
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h index c6c889292bab..22489555a6f1 100644 --- a/drivers/nvdimm/nd-core.h +++ b/drivers/nvdimm/nd-core.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/libnvdimm.h> | 17 | #include <linux/libnvdimm.h> |
18 | #include <linux/sizes.h> | 18 | #include <linux/sizes.h> |
19 | #include <linux/mutex.h> | 19 | #include <linux/mutex.h> |
20 | #include <linux/nd.h> | ||
20 | 21 | ||
21 | extern struct list_head nvdimm_bus_list; | 22 | extern struct list_head nvdimm_bus_list; |
22 | extern struct mutex nvdimm_bus_list_mutex; | 23 | extern struct mutex nvdimm_bus_list_mutex; |
@@ -48,6 +49,8 @@ struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev); | |||
48 | int __init nvdimm_bus_init(void); | 49 | int __init nvdimm_bus_init(void); |
49 | void nvdimm_bus_exit(void); | 50 | void nvdimm_bus_exit(void); |
50 | void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev); | 51 | void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev); |
52 | struct nd_region; | ||
53 | void nd_region_create_blk_seed(struct nd_region *nd_region); | ||
51 | void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev); | 54 | void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev); |
52 | int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus); | 55 | int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus); |
53 | void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus); | 56 | void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus); |
@@ -64,8 +67,13 @@ struct nvdimm_drvdata; | |||
64 | struct nd_mapping; | 67 | struct nd_mapping; |
65 | resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, | 68 | resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, |
66 | struct nd_mapping *nd_mapping, resource_size_t *overlap); | 69 | struct nd_mapping *nd_mapping, resource_size_t *overlap); |
70 | resource_size_t nd_blk_available_dpa(struct nd_mapping *nd_mapping); | ||
67 | resource_size_t nd_region_available_dpa(struct nd_region *nd_region); | 71 | resource_size_t nd_region_available_dpa(struct nd_region *nd_region); |
68 | resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd, | 72 | resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd, |
69 | struct nd_label_id *label_id); | 73 | struct nd_label_id *label_id); |
74 | struct nd_mapping; | ||
75 | struct resource *nsblk_add_resource(struct nd_region *nd_region, | ||
76 | struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk, | ||
77 | resource_size_t start); | ||
70 | void get_ndd(struct nvdimm_drvdata *ndd); | 78 | void get_ndd(struct nvdimm_drvdata *ndd); |
71 | #endif /* __ND_CORE_H__ */ | 79 | #endif /* __ND_CORE_H__ */ |
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 03e610cd9f43..9b021b626202 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h | |||
@@ -73,6 +73,7 @@ static inline struct nd_namespace_index *to_next_namespace_index( | |||
73 | 73 | ||
74 | struct nd_region { | 74 | struct nd_region { |
75 | struct device dev; | 75 | struct device dev; |
76 | struct ida ns_ida; | ||
76 | struct device *ns_seed; | 77 | struct device *ns_seed; |
77 | u16 ndr_mappings; | 78 | u16 ndr_mappings; |
78 | u64 ndr_size; | 79 | u64 ndr_size; |
@@ -102,6 +103,10 @@ void nd_device_register(struct device *dev); | |||
102 | void nd_device_unregister(struct device *dev, enum nd_async_mode mode); | 103 | void nd_device_unregister(struct device *dev, enum nd_async_mode mode); |
103 | int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf, | 104 | int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf, |
104 | size_t len); | 105 | size_t len); |
106 | ssize_t nd_sector_size_show(unsigned long current_lbasize, | ||
107 | const unsigned long *supported, char *buf); | ||
108 | ssize_t nd_sector_size_store(struct device *dev, const char *buf, | ||
109 | unsigned long *current_lbasize, const unsigned long *supported); | ||
105 | int __init nvdimm_init(void); | 110 | int __init nvdimm_init(void); |
106 | int __init nd_region_init(void); | 111 | int __init nd_region_init(void); |
107 | void nvdimm_exit(void); | 112 | void nvdimm_exit(void); |
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index b45806f7176d..ac21ce419beb 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c | |||
@@ -118,7 +118,12 @@ static int is_uuid_busy(struct device *dev, void *data) | |||
118 | break; | 118 | break; |
119 | } | 119 | } |
120 | case ND_DEVICE_NAMESPACE_BLK: { | 120 | case ND_DEVICE_NAMESPACE_BLK: { |
121 | /* TODO: blk namespace support */ | 121 | struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); |
122 | |||
123 | if (!nsblk->uuid) | ||
124 | break; | ||
125 | if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) == 0) | ||
126 | return -EBUSY; | ||
122 | break; | 127 | break; |
123 | } | 128 | } |
124 | default: | 129 | default: |
@@ -230,7 +235,7 @@ resource_size_t nd_region_available_dpa(struct nd_region *nd_region) | |||
230 | goto retry; | 235 | goto retry; |
231 | } | 236 | } |
232 | } else if (is_nd_blk(&nd_region->dev)) { | 237 | } else if (is_nd_blk(&nd_region->dev)) { |
233 | /* TODO: BLK Namespace support */ | 238 | available += nd_blk_available_dpa(nd_mapping); |
234 | } | 239 | } |
235 | } | 240 | } |
236 | 241 | ||
@@ -360,6 +365,13 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus, | |||
360 | nd_mapping->ndd = NULL; | 365 | nd_mapping->ndd = NULL; |
361 | atomic_dec(&nvdimm->busy); | 366 | atomic_dec(&nvdimm->busy); |
362 | } | 367 | } |
368 | } else if (dev->parent && is_nd_blk(dev->parent) && probe) { | ||
369 | struct nd_region *nd_region = to_nd_region(dev->parent); | ||
370 | |||
371 | nvdimm_bus_lock(dev); | ||
372 | if (nd_region->ns_seed == dev) | ||
373 | nd_region_create_blk_seed(nd_region); | ||
374 | nvdimm_bus_unlock(dev); | ||
363 | } | 375 | } |
364 | } | 376 | } |
365 | 377 | ||
@@ -533,6 +545,7 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, | |||
533 | nd_region->ndr_mappings = ndr_desc->num_mappings; | 545 | nd_region->ndr_mappings = ndr_desc->num_mappings; |
534 | nd_region->provider_data = ndr_desc->provider_data; | 546 | nd_region->provider_data = ndr_desc->provider_data; |
535 | nd_region->nd_set = ndr_desc->nd_set; | 547 | nd_region->nd_set = ndr_desc->nd_set; |
548 | ida_init(&nd_region->ns_ida); | ||
536 | dev = &nd_region->dev; | 549 | dev = &nd_region->dev; |
537 | dev_set_name(dev, "region%d", nd_region->id); | 550 | dev_set_name(dev, "region%d", nd_region->id); |
538 | dev->parent = &nvdimm_bus->dev; | 551 | dev->parent = &nvdimm_bus->dev; |