diff options
-rw-r--r-- | mm/hmm.c | 196 |
1 files changed, 26 insertions, 170 deletions
@@ -986,17 +986,16 @@ static void hmm_devmem_ref_exit(void *data) | |||
986 | struct hmm_devmem *devmem; | 986 | struct hmm_devmem *devmem; |
987 | 987 | ||
988 | devmem = container_of(ref, struct hmm_devmem, ref); | 988 | devmem = container_of(ref, struct hmm_devmem, ref); |
989 | wait_for_completion(&devmem->completion); | ||
989 | percpu_ref_exit(ref); | 990 | percpu_ref_exit(ref); |
990 | } | 991 | } |
991 | 992 | ||
992 | static void hmm_devmem_ref_kill(void *data) | 993 | static void hmm_devmem_ref_kill(struct percpu_ref *ref) |
993 | { | 994 | { |
994 | struct percpu_ref *ref = data; | ||
995 | struct hmm_devmem *devmem; | 995 | struct hmm_devmem *devmem; |
996 | 996 | ||
997 | devmem = container_of(ref, struct hmm_devmem, ref); | 997 | devmem = container_of(ref, struct hmm_devmem, ref); |
998 | percpu_ref_kill(ref); | 998 | percpu_ref_kill(ref); |
999 | wait_for_completion(&devmem->completion); | ||
1000 | } | 999 | } |
1001 | 1000 | ||
1002 | static int hmm_devmem_fault(struct vm_area_struct *vma, | 1001 | static int hmm_devmem_fault(struct vm_area_struct *vma, |
@@ -1019,154 +1018,6 @@ static void hmm_devmem_free(struct page *page, void *data) | |||
1019 | devmem->ops->free(devmem, page); | 1018 | devmem->ops->free(devmem, page); |
1020 | } | 1019 | } |
1021 | 1020 | ||
1022 | static DEFINE_MUTEX(hmm_devmem_lock); | ||
1023 | static RADIX_TREE(hmm_devmem_radix, GFP_KERNEL); | ||
1024 | |||
1025 | static void hmm_devmem_radix_release(struct resource *resource) | ||
1026 | { | ||
1027 | resource_size_t key; | ||
1028 | |||
1029 | mutex_lock(&hmm_devmem_lock); | ||
1030 | for (key = resource->start; | ||
1031 | key <= resource->end; | ||
1032 | key += PA_SECTION_SIZE) | ||
1033 | radix_tree_delete(&hmm_devmem_radix, key >> PA_SECTION_SHIFT); | ||
1034 | mutex_unlock(&hmm_devmem_lock); | ||
1035 | } | ||
1036 | |||
1037 | static void hmm_devmem_release(void *data) | ||
1038 | { | ||
1039 | struct hmm_devmem *devmem = data; | ||
1040 | struct resource *resource = devmem->resource; | ||
1041 | unsigned long start_pfn, npages; | ||
1042 | struct zone *zone; | ||
1043 | struct page *page; | ||
1044 | |||
1045 | /* pages are dead and unused, undo the arch mapping */ | ||
1046 | start_pfn = (resource->start & ~(PA_SECTION_SIZE - 1)) >> PAGE_SHIFT; | ||
1047 | npages = ALIGN(resource_size(resource), PA_SECTION_SIZE) >> PAGE_SHIFT; | ||
1048 | |||
1049 | page = pfn_to_page(start_pfn); | ||
1050 | zone = page_zone(page); | ||
1051 | |||
1052 | mem_hotplug_begin(); | ||
1053 | if (resource->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) | ||
1054 | __remove_pages(zone, start_pfn, npages, NULL); | ||
1055 | else | ||
1056 | arch_remove_memory(start_pfn << PAGE_SHIFT, | ||
1057 | npages << PAGE_SHIFT, NULL); | ||
1058 | mem_hotplug_done(); | ||
1059 | |||
1060 | hmm_devmem_radix_release(resource); | ||
1061 | } | ||
1062 | |||
1063 | static int hmm_devmem_pages_create(struct hmm_devmem *devmem) | ||
1064 | { | ||
1065 | resource_size_t key, align_start, align_size, align_end; | ||
1066 | struct device *device = devmem->device; | ||
1067 | int ret, nid, is_ram; | ||
1068 | |||
1069 | align_start = devmem->resource->start & ~(PA_SECTION_SIZE - 1); | ||
1070 | align_size = ALIGN(devmem->resource->start + | ||
1071 | resource_size(devmem->resource), | ||
1072 | PA_SECTION_SIZE) - align_start; | ||
1073 | |||
1074 | is_ram = region_intersects(align_start, align_size, | ||
1075 | IORESOURCE_SYSTEM_RAM, | ||
1076 | IORES_DESC_NONE); | ||
1077 | if (is_ram == REGION_MIXED) { | ||
1078 | WARN_ONCE(1, "%s attempted on mixed region %pr\n", | ||
1079 | __func__, devmem->resource); | ||
1080 | return -ENXIO; | ||
1081 | } | ||
1082 | if (is_ram == REGION_INTERSECTS) | ||
1083 | return -ENXIO; | ||
1084 | |||
1085 | if (devmem->resource->desc == IORES_DESC_DEVICE_PUBLIC_MEMORY) | ||
1086 | devmem->pagemap.type = MEMORY_DEVICE_PUBLIC; | ||
1087 | else | ||
1088 | devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; | ||
1089 | |||
1090 | devmem->pagemap.res = *devmem->resource; | ||
1091 | devmem->pagemap.page_fault = hmm_devmem_fault; | ||
1092 | devmem->pagemap.page_free = hmm_devmem_free; | ||
1093 | devmem->pagemap.dev = devmem->device; | ||
1094 | devmem->pagemap.ref = &devmem->ref; | ||
1095 | devmem->pagemap.data = devmem; | ||
1096 | |||
1097 | mutex_lock(&hmm_devmem_lock); | ||
1098 | align_end = align_start + align_size - 1; | ||
1099 | for (key = align_start; key <= align_end; key += PA_SECTION_SIZE) { | ||
1100 | struct hmm_devmem *dup; | ||
1101 | |||
1102 | dup = radix_tree_lookup(&hmm_devmem_radix, | ||
1103 | key >> PA_SECTION_SHIFT); | ||
1104 | if (dup) { | ||
1105 | dev_err(device, "%s: collides with mapping for %s\n", | ||
1106 | __func__, dev_name(dup->device)); | ||
1107 | mutex_unlock(&hmm_devmem_lock); | ||
1108 | ret = -EBUSY; | ||
1109 | goto error; | ||
1110 | } | ||
1111 | ret = radix_tree_insert(&hmm_devmem_radix, | ||
1112 | key >> PA_SECTION_SHIFT, | ||
1113 | devmem); | ||
1114 | if (ret) { | ||
1115 | dev_err(device, "%s: failed: %d\n", __func__, ret); | ||
1116 | mutex_unlock(&hmm_devmem_lock); | ||
1117 | goto error_radix; | ||
1118 | } | ||
1119 | } | ||
1120 | mutex_unlock(&hmm_devmem_lock); | ||
1121 | |||
1122 | nid = dev_to_node(device); | ||
1123 | if (nid < 0) | ||
1124 | nid = numa_mem_id(); | ||
1125 | |||
1126 | mem_hotplug_begin(); | ||
1127 | /* | ||
1128 | * For device private memory we call add_pages() as we only need to | ||
1129 | * allocate and initialize struct page for the device memory. More- | ||
1130 | * over the device memory is un-accessible thus we do not want to | ||
1131 | * create a linear mapping for the memory like arch_add_memory() | ||
1132 | * would do. | ||
1133 | * | ||
1134 | * For device public memory, which is accesible by the CPU, we do | ||
1135 | * want the linear mapping and thus use arch_add_memory(). | ||
1136 | */ | ||
1137 | if (devmem->pagemap.type == MEMORY_DEVICE_PUBLIC) | ||
1138 | ret = arch_add_memory(nid, align_start, align_size, NULL, | ||
1139 | false); | ||
1140 | else | ||
1141 | ret = add_pages(nid, align_start >> PAGE_SHIFT, | ||
1142 | align_size >> PAGE_SHIFT, NULL, false); | ||
1143 | if (ret) { | ||
1144 | mem_hotplug_done(); | ||
1145 | goto error_add_memory; | ||
1146 | } | ||
1147 | move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], | ||
1148 | align_start >> PAGE_SHIFT, | ||
1149 | align_size >> PAGE_SHIFT, NULL); | ||
1150 | mem_hotplug_done(); | ||
1151 | |||
1152 | /* | ||
1153 | * Initialization of the pages has been deferred until now in order | ||
1154 | * to allow us to do the work while not holding the hotplug lock. | ||
1155 | */ | ||
1156 | memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], | ||
1157 | align_start >> PAGE_SHIFT, | ||
1158 | align_size >> PAGE_SHIFT, &devmem->pagemap); | ||
1159 | |||
1160 | return 0; | ||
1161 | |||
1162 | error_add_memory: | ||
1163 | untrack_pfn(NULL, PHYS_PFN(align_start), align_size); | ||
1164 | error_radix: | ||
1165 | hmm_devmem_radix_release(devmem->resource); | ||
1166 | error: | ||
1167 | return ret; | ||
1168 | } | ||
1169 | |||
1170 | /* | 1021 | /* |
1171 | * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory | 1022 | * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory |
1172 | * | 1023 | * |
@@ -1190,6 +1041,7 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, | |||
1190 | { | 1041 | { |
1191 | struct hmm_devmem *devmem; | 1042 | struct hmm_devmem *devmem; |
1192 | resource_size_t addr; | 1043 | resource_size_t addr; |
1044 | void *result; | ||
1193 | int ret; | 1045 | int ret; |
1194 | 1046 | ||
1195 | dev_pagemap_get_ops(); | 1047 | dev_pagemap_get_ops(); |
@@ -1244,14 +1096,18 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, | |||
1244 | devmem->pfn_last = devmem->pfn_first + | 1096 | devmem->pfn_last = devmem->pfn_first + |
1245 | (resource_size(devmem->resource) >> PAGE_SHIFT); | 1097 | (resource_size(devmem->resource) >> PAGE_SHIFT); |
1246 | 1098 | ||
1247 | ret = hmm_devmem_pages_create(devmem); | 1099 | devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; |
1248 | if (ret) | 1100 | devmem->pagemap.res = *devmem->resource; |
1249 | return ERR_PTR(ret); | 1101 | devmem->pagemap.page_fault = hmm_devmem_fault; |
1250 | 1102 | devmem->pagemap.page_free = hmm_devmem_free; | |
1251 | ret = devm_add_action_or_reset(device, hmm_devmem_release, devmem); | 1103 | devmem->pagemap.altmap_valid = false; |
1252 | if (ret) | 1104 | devmem->pagemap.ref = &devmem->ref; |
1253 | return ERR_PTR(ret); | 1105 | devmem->pagemap.data = devmem; |
1106 | devmem->pagemap.kill = hmm_devmem_ref_kill; | ||
1254 | 1107 | ||
1108 | result = devm_memremap_pages(devmem->device, &devmem->pagemap); | ||
1109 | if (IS_ERR(result)) | ||
1110 | return result; | ||
1255 | return devmem; | 1111 | return devmem; |
1256 | } | 1112 | } |
1257 | EXPORT_SYMBOL(hmm_devmem_add); | 1113 | EXPORT_SYMBOL(hmm_devmem_add); |
@@ -1261,6 +1117,7 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, | |||
1261 | struct resource *res) | 1117 | struct resource *res) |
1262 | { | 1118 | { |
1263 | struct hmm_devmem *devmem; | 1119 | struct hmm_devmem *devmem; |
1120 | void *result; | ||
1264 | int ret; | 1121 | int ret; |
1265 | 1122 | ||
1266 | if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY) | 1123 | if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY) |
@@ -1293,19 +1150,18 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, | |||
1293 | devmem->pfn_last = devmem->pfn_first + | 1150 | devmem->pfn_last = devmem->pfn_first + |
1294 | (resource_size(devmem->resource) >> PAGE_SHIFT); | 1151 | (resource_size(devmem->resource) >> PAGE_SHIFT); |
1295 | 1152 | ||
1296 | ret = hmm_devmem_pages_create(devmem); | 1153 | devmem->pagemap.type = MEMORY_DEVICE_PUBLIC; |
1297 | if (ret) | 1154 | devmem->pagemap.res = *devmem->resource; |
1298 | return ERR_PTR(ret); | 1155 | devmem->pagemap.page_fault = hmm_devmem_fault; |
1299 | 1156 | devmem->pagemap.page_free = hmm_devmem_free; | |
1300 | ret = devm_add_action_or_reset(device, hmm_devmem_release, devmem); | 1157 | devmem->pagemap.altmap_valid = false; |
1301 | if (ret) | 1158 | devmem->pagemap.ref = &devmem->ref; |
1302 | return ERR_PTR(ret); | 1159 | devmem->pagemap.data = devmem; |
1303 | 1160 | devmem->pagemap.kill = hmm_devmem_ref_kill; | |
1304 | ret = devm_add_action_or_reset(device, hmm_devmem_ref_kill, | ||
1305 | &devmem->ref); | ||
1306 | if (ret) | ||
1307 | return ERR_PTR(ret); | ||
1308 | 1161 | ||
1162 | result = devm_memremap_pages(devmem->device, &devmem->pagemap); | ||
1163 | if (IS_ERR(result)) | ||
1164 | return result; | ||
1309 | return devmem; | 1165 | return devmem; |
1310 | } | 1166 | } |
1311 | EXPORT_SYMBOL(hmm_devmem_add_resource); | 1167 | EXPORT_SYMBOL(hmm_devmem_add_resource); |