diff options
Diffstat (limited to 'drivers/acpi/nfit.c')
| -rw-r--r-- | drivers/acpi/nfit.c | 134 |
1 files changed, 120 insertions, 14 deletions
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c index 2161fa178c8d..628a42c41ab1 100644 --- a/drivers/acpi/nfit.c +++ b/drivers/acpi/nfit.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/list.h> | 18 | #include <linux/list.h> |
| 19 | #include <linux/acpi.h> | 19 | #include <linux/acpi.h> |
| 20 | #include <linux/sort.h> | 20 | #include <linux/sort.h> |
| 21 | #include <linux/pmem.h> | ||
| 21 | #include <linux/io.h> | 22 | #include <linux/io.h> |
| 22 | #include "nfit.h" | 23 | #include "nfit.h" |
| 23 | 24 | ||
| @@ -305,6 +306,23 @@ static bool add_idt(struct acpi_nfit_desc *acpi_desc, | |||
| 305 | return true; | 306 | return true; |
| 306 | } | 307 | } |
| 307 | 308 | ||
| 309 | static bool add_flush(struct acpi_nfit_desc *acpi_desc, | ||
| 310 | struct acpi_nfit_flush_address *flush) | ||
| 311 | { | ||
| 312 | struct device *dev = acpi_desc->dev; | ||
| 313 | struct nfit_flush *nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush), | ||
| 314 | GFP_KERNEL); | ||
| 315 | |||
| 316 | if (!nfit_flush) | ||
| 317 | return false; | ||
| 318 | INIT_LIST_HEAD(&nfit_flush->list); | ||
| 319 | nfit_flush->flush = flush; | ||
| 320 | list_add_tail(&nfit_flush->list, &acpi_desc->flushes); | ||
| 321 | dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__, | ||
| 322 | flush->device_handle, flush->hint_count); | ||
| 323 | return true; | ||
| 324 | } | ||
| 325 | |||
| 308 | static void *add_table(struct acpi_nfit_desc *acpi_desc, void *table, | 326 | static void *add_table(struct acpi_nfit_desc *acpi_desc, void *table, |
| 309 | const void *end) | 327 | const void *end) |
| 310 | { | 328 | { |
| @@ -338,7 +356,8 @@ static void *add_table(struct acpi_nfit_desc *acpi_desc, void *table, | |||
| 338 | return err; | 356 | return err; |
| 339 | break; | 357 | break; |
| 340 | case ACPI_NFIT_TYPE_FLUSH_ADDRESS: | 358 | case ACPI_NFIT_TYPE_FLUSH_ADDRESS: |
| 341 | dev_dbg(dev, "%s: flush\n", __func__); | 359 | if (!add_flush(acpi_desc, table)) |
| 360 | return err; | ||
| 342 | break; | 361 | break; |
| 343 | case ACPI_NFIT_TYPE_SMBIOS: | 362 | case ACPI_NFIT_TYPE_SMBIOS: |
| 344 | dev_dbg(dev, "%s: smbios\n", __func__); | 363 | dev_dbg(dev, "%s: smbios\n", __func__); |
| @@ -389,6 +408,7 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc, | |||
| 389 | { | 408 | { |
| 390 | u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; | 409 | u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; |
| 391 | struct nfit_memdev *nfit_memdev; | 410 | struct nfit_memdev *nfit_memdev; |
| 411 | struct nfit_flush *nfit_flush; | ||
| 392 | struct nfit_dcr *nfit_dcr; | 412 | struct nfit_dcr *nfit_dcr; |
| 393 | struct nfit_bdw *nfit_bdw; | 413 | struct nfit_bdw *nfit_bdw; |
| 394 | struct nfit_idt *nfit_idt; | 414 | struct nfit_idt *nfit_idt; |
| @@ -442,6 +462,14 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc, | |||
| 442 | nfit_mem->idt_bdw = nfit_idt->idt; | 462 | nfit_mem->idt_bdw = nfit_idt->idt; |
| 443 | break; | 463 | break; |
| 444 | } | 464 | } |
| 465 | |||
| 466 | list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) { | ||
| 467 | if (nfit_flush->flush->device_handle != | ||
| 468 | nfit_memdev->memdev->device_handle) | ||
| 469 | continue; | ||
| 470 | nfit_mem->nfit_flush = nfit_flush; | ||
| 471 | break; | ||
| 472 | } | ||
| 445 | break; | 473 | break; |
| 446 | } | 474 | } |
| 447 | 475 | ||
| @@ -978,6 +1006,24 @@ static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio) | |||
| 978 | return mmio->base_offset + line_offset + table_offset + sub_line_offset; | 1006 | return mmio->base_offset + line_offset + table_offset + sub_line_offset; |
| 979 | } | 1007 | } |
| 980 | 1008 | ||
| 1009 | static void wmb_blk(struct nfit_blk *nfit_blk) | ||
| 1010 | { | ||
| 1011 | |||
| 1012 | if (nfit_blk->nvdimm_flush) { | ||
| 1013 | /* | ||
| 1014 | * The first wmb() is needed to 'sfence' all previous writes | ||
| 1015 | * such that they are architecturally visible for the platform | ||
| 1016 | * buffer flush. Note that we've already arranged for pmem | ||
| 1017 | * writes to avoid the cache via arch_memcpy_to_pmem(). The | ||
| 1018 | * final wmb() ensures ordering for the NVDIMM flush write. | ||
| 1019 | */ | ||
| 1020 | wmb(); | ||
| 1021 | writeq(1, nfit_blk->nvdimm_flush); | ||
| 1022 | wmb(); | ||
| 1023 | } else | ||
| 1024 | wmb_pmem(); | ||
| 1025 | } | ||
| 1026 | |||
| 981 | static u64 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) | 1027 | static u64 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) |
| 982 | { | 1028 | { |
| 983 | struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; | 1029 | struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; |
| @@ -1012,7 +1058,10 @@ static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, | |||
| 1012 | offset = to_interleave_offset(offset, mmio); | 1058 | offset = to_interleave_offset(offset, mmio); |
| 1013 | 1059 | ||
| 1014 | writeq(cmd, mmio->base + offset); | 1060 | writeq(cmd, mmio->base + offset); |
| 1015 | /* FIXME: conditionally perform read-back if mandated by firmware */ | 1061 | wmb_blk(nfit_blk); |
| 1062 | |||
| 1063 | if (nfit_blk->dimm_flags & ND_BLK_DCR_LATCH) | ||
| 1064 | readq(mmio->base + offset); | ||
| 1016 | } | 1065 | } |
| 1017 | 1066 | ||
| 1018 | static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, | 1067 | static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, |
| @@ -1026,7 +1075,6 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, | |||
| 1026 | 1075 | ||
| 1027 | base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES | 1076 | base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES |
| 1028 | + lane * mmio->size; | 1077 | + lane * mmio->size; |
| 1029 | /* TODO: non-temporal access, flush hints, cache management etc... */ | ||
| 1030 | write_blk_ctl(nfit_blk, lane, dpa, len, rw); | 1078 | write_blk_ctl(nfit_blk, lane, dpa, len, rw); |
| 1031 | while (len) { | 1079 | while (len) { |
| 1032 | unsigned int c; | 1080 | unsigned int c; |
| @@ -1045,13 +1093,19 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, | |||
| 1045 | } | 1093 | } |
| 1046 | 1094 | ||
| 1047 | if (rw) | 1095 | if (rw) |
| 1048 | memcpy(mmio->aperture + offset, iobuf + copied, c); | 1096 | memcpy_to_pmem(mmio->aperture + offset, |
| 1097 | iobuf + copied, c); | ||
| 1049 | else | 1098 | else |
| 1050 | memcpy(iobuf + copied, mmio->aperture + offset, c); | 1099 | memcpy_from_pmem(iobuf + copied, |
| 1100 | mmio->aperture + offset, c); | ||
| 1051 | 1101 | ||
| 1052 | copied += c; | 1102 | copied += c; |
| 1053 | len -= c; | 1103 | len -= c; |
| 1054 | } | 1104 | } |
| 1105 | |||
| 1106 | if (rw) | ||
| 1107 | wmb_blk(nfit_blk); | ||
| 1108 | |||
| 1055 | rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0; | 1109 | rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0; |
| 1056 | return rc; | 1110 | return rc; |
| 1057 | } | 1111 | } |
| @@ -1124,7 +1178,7 @@ static void nfit_spa_unmap(struct acpi_nfit_desc *acpi_desc, | |||
| 1124 | } | 1178 | } |
| 1125 | 1179 | ||
| 1126 | static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc, | 1180 | static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc, |
| 1127 | struct acpi_nfit_system_address *spa) | 1181 | struct acpi_nfit_system_address *spa, enum spa_map_type type) |
| 1128 | { | 1182 | { |
| 1129 | resource_size_t start = spa->address; | 1183 | resource_size_t start = spa->address; |
| 1130 | resource_size_t n = spa->length; | 1184 | resource_size_t n = spa->length; |
| @@ -1152,8 +1206,15 @@ static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc, | |||
| 1152 | if (!res) | 1206 | if (!res) |
| 1153 | goto err_mem; | 1207 | goto err_mem; |
| 1154 | 1208 | ||
| 1155 | /* TODO: cacheability based on the spa type */ | 1209 | if (type == SPA_MAP_APERTURE) { |
| 1156 | spa_map->iomem = ioremap_nocache(start, n); | 1210 | /* |
| 1211 | * TODO: memremap_pmem() support, but that requires cache | ||
| 1212 | * flushing when the aperture is moved. | ||
| 1213 | */ | ||
| 1214 | spa_map->iomem = ioremap_wc(start, n); | ||
| 1215 | } else | ||
| 1216 | spa_map->iomem = ioremap_nocache(start, n); | ||
| 1217 | |||
| 1157 | if (!spa_map->iomem) | 1218 | if (!spa_map->iomem) |
| 1158 | goto err_map; | 1219 | goto err_map; |
| 1159 | 1220 | ||
| @@ -1171,6 +1232,7 @@ static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc, | |||
| 1171 | * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges | 1232 | * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges |
| 1172 | * @nvdimm_bus: NFIT-bus that provided the spa table entry | 1233 | * @nvdimm_bus: NFIT-bus that provided the spa table entry |
| 1173 | * @nfit_spa: spa table to map | 1234 | * @nfit_spa: spa table to map |
| 1235 | * @type: aperture or control region | ||
| 1174 | * | 1236 | * |
| 1175 | * In the case where block-data-window apertures and | 1237 | * In the case where block-data-window apertures and |
| 1176 | * dimm-control-regions are interleaved they will end up sharing a | 1238 | * dimm-control-regions are interleaved they will end up sharing a |
| @@ -1180,12 +1242,12 @@ static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc, | |||
| 1180 | * unbound. | 1242 | * unbound. |
| 1181 | */ | 1243 | */ |
| 1182 | static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc, | 1244 | static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc, |
| 1183 | struct acpi_nfit_system_address *spa) | 1245 | struct acpi_nfit_system_address *spa, enum spa_map_type type) |
| 1184 | { | 1246 | { |
| 1185 | void __iomem *iomem; | 1247 | void __iomem *iomem; |
| 1186 | 1248 | ||
| 1187 | mutex_lock(&acpi_desc->spa_map_mutex); | 1249 | mutex_lock(&acpi_desc->spa_map_mutex); |
| 1188 | iomem = __nfit_spa_map(acpi_desc, spa); | 1250 | iomem = __nfit_spa_map(acpi_desc, spa, type); |
| 1189 | mutex_unlock(&acpi_desc->spa_map_mutex); | 1251 | mutex_unlock(&acpi_desc->spa_map_mutex); |
| 1190 | 1252 | ||
| 1191 | return iomem; | 1253 | return iomem; |
| @@ -1206,12 +1268,35 @@ static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio, | |||
| 1206 | return 0; | 1268 | return 0; |
| 1207 | } | 1269 | } |
| 1208 | 1270 | ||
| 1271 | static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc, | ||
| 1272 | struct nvdimm *nvdimm, struct nfit_blk *nfit_blk) | ||
| 1273 | { | ||
| 1274 | struct nd_cmd_dimm_flags flags; | ||
| 1275 | int rc; | ||
| 1276 | |||
| 1277 | memset(&flags, 0, sizeof(flags)); | ||
| 1278 | rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags, | ||
| 1279 | sizeof(flags)); | ||
| 1280 | |||
| 1281 | if (rc >= 0 && flags.status == 0) | ||
| 1282 | nfit_blk->dimm_flags = flags.flags; | ||
| 1283 | else if (rc == -ENOTTY) { | ||
| 1284 | /* fall back to a conservative default */ | ||
| 1285 | nfit_blk->dimm_flags = ND_BLK_DCR_LATCH; | ||
| 1286 | rc = 0; | ||
| 1287 | } else | ||
| 1288 | rc = -ENXIO; | ||
| 1289 | |||
| 1290 | return rc; | ||
| 1291 | } | ||
| 1292 | |||
| 1209 | static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, | 1293 | static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, |
| 1210 | struct device *dev) | 1294 | struct device *dev) |
| 1211 | { | 1295 | { |
| 1212 | struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); | 1296 | struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); |
| 1213 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); | 1297 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); |
| 1214 | struct nd_blk_region *ndbr = to_nd_blk_region(dev); | 1298 | struct nd_blk_region *ndbr = to_nd_blk_region(dev); |
| 1299 | struct nfit_flush *nfit_flush; | ||
| 1215 | struct nfit_blk_mmio *mmio; | 1300 | struct nfit_blk_mmio *mmio; |
| 1216 | struct nfit_blk *nfit_blk; | 1301 | struct nfit_blk *nfit_blk; |
| 1217 | struct nfit_mem *nfit_mem; | 1302 | struct nfit_mem *nfit_mem; |
| @@ -1223,8 +1308,8 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, | |||
| 1223 | if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) { | 1308 | if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) { |
| 1224 | dev_dbg(dev, "%s: missing%s%s%s\n", __func__, | 1309 | dev_dbg(dev, "%s: missing%s%s%s\n", __func__, |
| 1225 | nfit_mem ? "" : " nfit_mem", | 1310 | nfit_mem ? "" : " nfit_mem", |
| 1226 | nfit_mem->dcr ? "" : " dcr", | 1311 | (nfit_mem && nfit_mem->dcr) ? "" : " dcr", |
| 1227 | nfit_mem->bdw ? "" : " bdw"); | 1312 | (nfit_mem && nfit_mem->bdw) ? "" : " bdw"); |
| 1228 | return -ENXIO; | 1313 | return -ENXIO; |
| 1229 | } | 1314 | } |
| 1230 | 1315 | ||
| @@ -1237,7 +1322,8 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, | |||
| 1237 | /* map block aperture memory */ | 1322 | /* map block aperture memory */ |
| 1238 | nfit_blk->bdw_offset = nfit_mem->bdw->offset; | 1323 | nfit_blk->bdw_offset = nfit_mem->bdw->offset; |
| 1239 | mmio = &nfit_blk->mmio[BDW]; | 1324 | mmio = &nfit_blk->mmio[BDW]; |
| 1240 | mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw); | 1325 | mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw, |
| 1326 | SPA_MAP_APERTURE); | ||
| 1241 | if (!mmio->base) { | 1327 | if (!mmio->base) { |
| 1242 | dev_dbg(dev, "%s: %s failed to map bdw\n", __func__, | 1328 | dev_dbg(dev, "%s: %s failed to map bdw\n", __func__, |
| 1243 | nvdimm_name(nvdimm)); | 1329 | nvdimm_name(nvdimm)); |
| @@ -1259,7 +1345,8 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, | |||
| 1259 | nfit_blk->cmd_offset = nfit_mem->dcr->command_offset; | 1345 | nfit_blk->cmd_offset = nfit_mem->dcr->command_offset; |
| 1260 | nfit_blk->stat_offset = nfit_mem->dcr->status_offset; | 1346 | nfit_blk->stat_offset = nfit_mem->dcr->status_offset; |
| 1261 | mmio = &nfit_blk->mmio[DCR]; | 1347 | mmio = &nfit_blk->mmio[DCR]; |
| 1262 | mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr); | 1348 | mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr, |
| 1349 | SPA_MAP_CONTROL); | ||
| 1263 | if (!mmio->base) { | 1350 | if (!mmio->base) { |
| 1264 | dev_dbg(dev, "%s: %s failed to map dcr\n", __func__, | 1351 | dev_dbg(dev, "%s: %s failed to map dcr\n", __func__, |
| 1265 | nvdimm_name(nvdimm)); | 1352 | nvdimm_name(nvdimm)); |
| @@ -1277,6 +1364,24 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, | |||
| 1277 | return rc; | 1364 | return rc; |
| 1278 | } | 1365 | } |
| 1279 | 1366 | ||
| 1367 | rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk); | ||
| 1368 | if (rc < 0) { | ||
| 1369 | dev_dbg(dev, "%s: %s failed get DIMM flags\n", | ||
| 1370 | __func__, nvdimm_name(nvdimm)); | ||
| 1371 | return rc; | ||
| 1372 | } | ||
| 1373 | |||
| 1374 | nfit_flush = nfit_mem->nfit_flush; | ||
| 1375 | if (nfit_flush && nfit_flush->flush->hint_count != 0) { | ||
| 1376 | nfit_blk->nvdimm_flush = devm_ioremap_nocache(dev, | ||
| 1377 | nfit_flush->flush->hint_address[0], 8); | ||
| 1378 | if (!nfit_blk->nvdimm_flush) | ||
| 1379 | return -ENOMEM; | ||
| 1380 | } | ||
| 1381 | |||
| 1382 | if (!arch_has_pmem_api() && !nfit_blk->nvdimm_flush) | ||
| 1383 | dev_warn(dev, "unable to guarantee persistence of writes\n"); | ||
| 1384 | |||
| 1280 | if (mmio->line_size == 0) | 1385 | if (mmio->line_size == 0) |
| 1281 | return 0; | 1386 | return 0; |
| 1282 | 1387 | ||
| @@ -1459,6 +1564,7 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz) | |||
| 1459 | INIT_LIST_HEAD(&acpi_desc->dcrs); | 1564 | INIT_LIST_HEAD(&acpi_desc->dcrs); |
| 1460 | INIT_LIST_HEAD(&acpi_desc->bdws); | 1565 | INIT_LIST_HEAD(&acpi_desc->bdws); |
| 1461 | INIT_LIST_HEAD(&acpi_desc->idts); | 1566 | INIT_LIST_HEAD(&acpi_desc->idts); |
| 1567 | INIT_LIST_HEAD(&acpi_desc->flushes); | ||
| 1462 | INIT_LIST_HEAD(&acpi_desc->memdevs); | 1568 | INIT_LIST_HEAD(&acpi_desc->memdevs); |
| 1463 | INIT_LIST_HEAD(&acpi_desc->dimms); | 1569 | INIT_LIST_HEAD(&acpi_desc->dimms); |
| 1464 | mutex_init(&acpi_desc->spa_map_mutex); | 1570 | mutex_init(&acpi_desc->spa_map_mutex); |
