diff options
author | Dan Williams <dan.j.williams@intel.com> | 2018-03-29 22:07:13 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2018-05-16 02:08:33 -0400 |
commit | 5981690ddb8f72f9546a2d017a914cf56095fc1f (patch) | |
tree | 0f7b9d0f919cf9eef997d8905bc79063ec30cc3e | |
parent | 6d08b06e67cd117f6992c46611dfb4ce267cd71e (diff) |
memremap: split devm_memremap_pages() and memremap() infrastructure
Currently, kernel/memremap.c contains generic code for supporting
memremap() (CONFIG_HAS_IOMEM) and devm_memremap_pages()
(CONFIG_ZONE_DEVICE). This causes ongoing build maintenance problems as
additions to memremap.c, especially for the ZONE_DEVICE case, need to be
careful about being placed in ifdef guards. Remove the need for these
ifdef guards by moving the ZONE_DEVICE support functions to their own
compilation unit.
Cc: "Jérôme Glisse" <jglisse@redhat.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Jan Kara <jack@suse.cz>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r-- | kernel/Makefile | 3 | ||||
-rw-r--r-- | kernel/iomem.c | 167 | ||||
-rw-r--r-- | kernel/memremap.c | 178 |
3 files changed, 171 insertions, 177 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index f85ae5dfa474..9b9241361311 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -112,7 +112,8 @@ obj-$(CONFIG_JUMP_LABEL) += jump_label.o | |||
112 | obj-$(CONFIG_CONTEXT_TRACKING) += context_tracking.o | 112 | obj-$(CONFIG_CONTEXT_TRACKING) += context_tracking.o |
113 | obj-$(CONFIG_TORTURE_TEST) += torture.o | 113 | obj-$(CONFIG_TORTURE_TEST) += torture.o |
114 | 114 | ||
115 | obj-$(CONFIG_HAS_IOMEM) += memremap.o | 115 | obj-$(CONFIG_HAS_IOMEM) += iomem.o |
116 | obj-$(CONFIG_ZONE_DEVICE) += memremap.o | ||
116 | 117 | ||
117 | $(obj)/configs.o: $(obj)/config_data.h | 118 | $(obj)/configs.o: $(obj)/config_data.h |
118 | 119 | ||
diff --git a/kernel/iomem.c b/kernel/iomem.c new file mode 100644 index 000000000000..f7525e14ebc6 --- /dev/null +++ b/kernel/iomem.c | |||
@@ -0,0 +1,167 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | #include <linux/device.h> | ||
3 | #include <linux/types.h> | ||
4 | #include <linux/io.h> | ||
5 | #include <linux/mm.h> | ||
6 | |||
7 | #ifndef ioremap_cache | ||
8 | /* temporary while we convert existing ioremap_cache users to memremap */ | ||
9 | __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size) | ||
10 | { | ||
11 | return ioremap(offset, size); | ||
12 | } | ||
13 | #endif | ||
14 | |||
15 | #ifndef arch_memremap_wb | ||
16 | static void *arch_memremap_wb(resource_size_t offset, unsigned long size) | ||
17 | { | ||
18 | return (__force void *)ioremap_cache(offset, size); | ||
19 | } | ||
20 | #endif | ||
21 | |||
22 | #ifndef arch_memremap_can_ram_remap | ||
23 | static bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, | ||
24 | unsigned long flags) | ||
25 | { | ||
26 | return true; | ||
27 | } | ||
28 | #endif | ||
29 | |||
30 | static void *try_ram_remap(resource_size_t offset, size_t size, | ||
31 | unsigned long flags) | ||
32 | { | ||
33 | unsigned long pfn = PHYS_PFN(offset); | ||
34 | |||
35 | /* In the simple case just return the existing linear address */ | ||
36 | if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)) && | ||
37 | arch_memremap_can_ram_remap(offset, size, flags)) | ||
38 | return __va(offset); | ||
39 | |||
40 | return NULL; /* fallback to arch_memremap_wb */ | ||
41 | } | ||
42 | |||
43 | /** | ||
44 | * memremap() - remap an iomem_resource as cacheable memory | ||
45 | * @offset: iomem resource start address | ||
46 | * @size: size of remap | ||
47 | * @flags: any of MEMREMAP_WB, MEMREMAP_WT, MEMREMAP_WC, | ||
48 | * MEMREMAP_ENC, MEMREMAP_DEC | ||
49 | * | ||
50 | * memremap() is "ioremap" for cases where it is known that the resource | ||
51 | * being mapped does not have i/o side effects and the __iomem | ||
52 | * annotation is not applicable. In the case of multiple flags, the different | ||
53 | * mapping types will be attempted in the order listed below until one of | ||
54 | * them succeeds. | ||
55 | * | ||
56 | * MEMREMAP_WB - matches the default mapping for System RAM on | ||
57 | * the architecture. This is usually a read-allocate write-back cache. | ||
58 | * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM | ||
59 | * memremap() will bypass establishing a new mapping and instead return | ||
60 | * a pointer into the direct map. | ||
61 | * | ||
62 | * MEMREMAP_WT - establish a mapping whereby writes either bypass the | ||
63 | * cache or are written through to memory and never exist in a | ||
64 | * cache-dirty state with respect to program visibility. Attempts to | ||
65 | * map System RAM with this mapping type will fail. | ||
66 | * | ||
67 | * MEMREMAP_WC - establish a writecombine mapping, whereby writes may | ||
68 | * be coalesced together (e.g. in the CPU's write buffers), but is otherwise | ||
69 | * uncached. Attempts to map System RAM with this mapping type will fail. | ||
70 | */ | ||
71 | void *memremap(resource_size_t offset, size_t size, unsigned long flags) | ||
72 | { | ||
73 | int is_ram = region_intersects(offset, size, | ||
74 | IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); | ||
75 | void *addr = NULL; | ||
76 | |||
77 | if (!flags) | ||
78 | return NULL; | ||
79 | |||
80 | if (is_ram == REGION_MIXED) { | ||
81 | WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n", | ||
82 | &offset, (unsigned long) size); | ||
83 | return NULL; | ||
84 | } | ||
85 | |||
86 | /* Try all mapping types requested until one returns non-NULL */ | ||
87 | if (flags & MEMREMAP_WB) { | ||
88 | /* | ||
89 | * MEMREMAP_WB is special in that it can be satisifed | ||
90 | * from the direct map. Some archs depend on the | ||
91 | * capability of memremap() to autodetect cases where | ||
92 | * the requested range is potentially in System RAM. | ||
93 | */ | ||
94 | if (is_ram == REGION_INTERSECTS) | ||
95 | addr = try_ram_remap(offset, size, flags); | ||
96 | if (!addr) | ||
97 | addr = arch_memremap_wb(offset, size); | ||
98 | } | ||
99 | |||
100 | /* | ||
101 | * If we don't have a mapping yet and other request flags are | ||
102 | * present then we will be attempting to establish a new virtual | ||
103 | * address mapping. Enforce that this mapping is not aliasing | ||
104 | * System RAM. | ||
105 | */ | ||
106 | if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) { | ||
107 | WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n", | ||
108 | &offset, (unsigned long) size); | ||
109 | return NULL; | ||
110 | } | ||
111 | |||
112 | if (!addr && (flags & MEMREMAP_WT)) | ||
113 | addr = ioremap_wt(offset, size); | ||
114 | |||
115 | if (!addr && (flags & MEMREMAP_WC)) | ||
116 | addr = ioremap_wc(offset, size); | ||
117 | |||
118 | return addr; | ||
119 | } | ||
120 | EXPORT_SYMBOL(memremap); | ||
121 | |||
122 | void memunmap(void *addr) | ||
123 | { | ||
124 | if (is_vmalloc_addr(addr)) | ||
125 | iounmap((void __iomem *) addr); | ||
126 | } | ||
127 | EXPORT_SYMBOL(memunmap); | ||
128 | |||
129 | static void devm_memremap_release(struct device *dev, void *res) | ||
130 | { | ||
131 | memunmap(*(void **)res); | ||
132 | } | ||
133 | |||
134 | static int devm_memremap_match(struct device *dev, void *res, void *match_data) | ||
135 | { | ||
136 | return *(void **)res == match_data; | ||
137 | } | ||
138 | |||
139 | void *devm_memremap(struct device *dev, resource_size_t offset, | ||
140 | size_t size, unsigned long flags) | ||
141 | { | ||
142 | void **ptr, *addr; | ||
143 | |||
144 | ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL, | ||
145 | dev_to_node(dev)); | ||
146 | if (!ptr) | ||
147 | return ERR_PTR(-ENOMEM); | ||
148 | |||
149 | addr = memremap(offset, size, flags); | ||
150 | if (addr) { | ||
151 | *ptr = addr; | ||
152 | devres_add(dev, ptr); | ||
153 | } else { | ||
154 | devres_free(ptr); | ||
155 | return ERR_PTR(-ENXIO); | ||
156 | } | ||
157 | |||
158 | return addr; | ||
159 | } | ||
160 | EXPORT_SYMBOL(devm_memremap); | ||
161 | |||
162 | void devm_memunmap(struct device *dev, void *addr) | ||
163 | { | ||
164 | WARN_ON(devres_release(dev, devm_memremap_release, | ||
165 | devm_memremap_match, addr)); | ||
166 | } | ||
167 | EXPORT_SYMBOL(devm_memunmap); | ||
diff --git a/kernel/memremap.c b/kernel/memremap.c index 895e6b76b25e..37a9604133f6 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c | |||
@@ -1,15 +1,5 @@ | |||
1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | * Copyright(c) 2015 Intel Corporation. All rights reserved. | 2 | /* Copyright(c) 2015 Intel Corporation. All rights reserved. */ |
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of version 2 of the GNU General Public License as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
11 | * General Public License for more details. | ||
12 | */ | ||
13 | #include <linux/radix-tree.h> | 3 | #include <linux/radix-tree.h> |
14 | #include <linux/device.h> | 4 | #include <linux/device.h> |
15 | #include <linux/types.h> | 5 | #include <linux/types.h> |
@@ -20,169 +10,6 @@ | |||
20 | #include <linux/swap.h> | 10 | #include <linux/swap.h> |
21 | #include <linux/swapops.h> | 11 | #include <linux/swapops.h> |
22 | 12 | ||
23 | #ifndef ioremap_cache | ||
24 | /* temporary while we convert existing ioremap_cache users to memremap */ | ||
25 | __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size) | ||
26 | { | ||
27 | return ioremap(offset, size); | ||
28 | } | ||
29 | #endif | ||
30 | |||
31 | #ifndef arch_memremap_wb | ||
32 | static void *arch_memremap_wb(resource_size_t offset, unsigned long size) | ||
33 | { | ||
34 | return (__force void *)ioremap_cache(offset, size); | ||
35 | } | ||
36 | #endif | ||
37 | |||
38 | #ifndef arch_memremap_can_ram_remap | ||
39 | static bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, | ||
40 | unsigned long flags) | ||
41 | { | ||
42 | return true; | ||
43 | } | ||
44 | #endif | ||
45 | |||
46 | static void *try_ram_remap(resource_size_t offset, size_t size, | ||
47 | unsigned long flags) | ||
48 | { | ||
49 | unsigned long pfn = PHYS_PFN(offset); | ||
50 | |||
51 | /* In the simple case just return the existing linear address */ | ||
52 | if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)) && | ||
53 | arch_memremap_can_ram_remap(offset, size, flags)) | ||
54 | return __va(offset); | ||
55 | |||
56 | return NULL; /* fallback to arch_memremap_wb */ | ||
57 | } | ||
58 | |||
59 | /** | ||
60 | * memremap() - remap an iomem_resource as cacheable memory | ||
61 | * @offset: iomem resource start address | ||
62 | * @size: size of remap | ||
63 | * @flags: any of MEMREMAP_WB, MEMREMAP_WT, MEMREMAP_WC, | ||
64 | * MEMREMAP_ENC, MEMREMAP_DEC | ||
65 | * | ||
66 | * memremap() is "ioremap" for cases where it is known that the resource | ||
67 | * being mapped does not have i/o side effects and the __iomem | ||
68 | * annotation is not applicable. In the case of multiple flags, the different | ||
69 | * mapping types will be attempted in the order listed below until one of | ||
70 | * them succeeds. | ||
71 | * | ||
72 | * MEMREMAP_WB - matches the default mapping for System RAM on | ||
73 | * the architecture. This is usually a read-allocate write-back cache. | ||
74 | * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM | ||
75 | * memremap() will bypass establishing a new mapping and instead return | ||
76 | * a pointer into the direct map. | ||
77 | * | ||
78 | * MEMREMAP_WT - establish a mapping whereby writes either bypass the | ||
79 | * cache or are written through to memory and never exist in a | ||
80 | * cache-dirty state with respect to program visibility. Attempts to | ||
81 | * map System RAM with this mapping type will fail. | ||
82 | * | ||
83 | * MEMREMAP_WC - establish a writecombine mapping, whereby writes may | ||
84 | * be coalesced together (e.g. in the CPU's write buffers), but is otherwise | ||
85 | * uncached. Attempts to map System RAM with this mapping type will fail. | ||
86 | */ | ||
87 | void *memremap(resource_size_t offset, size_t size, unsigned long flags) | ||
88 | { | ||
89 | int is_ram = region_intersects(offset, size, | ||
90 | IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); | ||
91 | void *addr = NULL; | ||
92 | |||
93 | if (!flags) | ||
94 | return NULL; | ||
95 | |||
96 | if (is_ram == REGION_MIXED) { | ||
97 | WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n", | ||
98 | &offset, (unsigned long) size); | ||
99 | return NULL; | ||
100 | } | ||
101 | |||
102 | /* Try all mapping types requested until one returns non-NULL */ | ||
103 | if (flags & MEMREMAP_WB) { | ||
104 | /* | ||
105 | * MEMREMAP_WB is special in that it can be satisifed | ||
106 | * from the direct map. Some archs depend on the | ||
107 | * capability of memremap() to autodetect cases where | ||
108 | * the requested range is potentially in System RAM. | ||
109 | */ | ||
110 | if (is_ram == REGION_INTERSECTS) | ||
111 | addr = try_ram_remap(offset, size, flags); | ||
112 | if (!addr) | ||
113 | addr = arch_memremap_wb(offset, size); | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * If we don't have a mapping yet and other request flags are | ||
118 | * present then we will be attempting to establish a new virtual | ||
119 | * address mapping. Enforce that this mapping is not aliasing | ||
120 | * System RAM. | ||
121 | */ | ||
122 | if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) { | ||
123 | WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n", | ||
124 | &offset, (unsigned long) size); | ||
125 | return NULL; | ||
126 | } | ||
127 | |||
128 | if (!addr && (flags & MEMREMAP_WT)) | ||
129 | addr = ioremap_wt(offset, size); | ||
130 | |||
131 | if (!addr && (flags & MEMREMAP_WC)) | ||
132 | addr = ioremap_wc(offset, size); | ||
133 | |||
134 | return addr; | ||
135 | } | ||
136 | EXPORT_SYMBOL(memremap); | ||
137 | |||
138 | void memunmap(void *addr) | ||
139 | { | ||
140 | if (is_vmalloc_addr(addr)) | ||
141 | iounmap((void __iomem *) addr); | ||
142 | } | ||
143 | EXPORT_SYMBOL(memunmap); | ||
144 | |||
145 | static void devm_memremap_release(struct device *dev, void *res) | ||
146 | { | ||
147 | memunmap(*(void **)res); | ||
148 | } | ||
149 | |||
150 | static int devm_memremap_match(struct device *dev, void *res, void *match_data) | ||
151 | { | ||
152 | return *(void **)res == match_data; | ||
153 | } | ||
154 | |||
155 | void *devm_memremap(struct device *dev, resource_size_t offset, | ||
156 | size_t size, unsigned long flags) | ||
157 | { | ||
158 | void **ptr, *addr; | ||
159 | |||
160 | ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL, | ||
161 | dev_to_node(dev)); | ||
162 | if (!ptr) | ||
163 | return ERR_PTR(-ENOMEM); | ||
164 | |||
165 | addr = memremap(offset, size, flags); | ||
166 | if (addr) { | ||
167 | *ptr = addr; | ||
168 | devres_add(dev, ptr); | ||
169 | } else { | ||
170 | devres_free(ptr); | ||
171 | return ERR_PTR(-ENXIO); | ||
172 | } | ||
173 | |||
174 | return addr; | ||
175 | } | ||
176 | EXPORT_SYMBOL(devm_memremap); | ||
177 | |||
178 | void devm_memunmap(struct device *dev, void *addr) | ||
179 | { | ||
180 | WARN_ON(devres_release(dev, devm_memremap_release, | ||
181 | devm_memremap_match, addr)); | ||
182 | } | ||
183 | EXPORT_SYMBOL(devm_memunmap); | ||
184 | |||
185 | #ifdef CONFIG_ZONE_DEVICE | ||
186 | static DEFINE_MUTEX(pgmap_lock); | 13 | static DEFINE_MUTEX(pgmap_lock); |
187 | static RADIX_TREE(pgmap_radix, GFP_KERNEL); | 14 | static RADIX_TREE(pgmap_radix, GFP_KERNEL); |
188 | #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1) | 15 | #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1) |
@@ -473,7 +300,6 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn, | |||
473 | 300 | ||
474 | return pgmap; | 301 | return pgmap; |
475 | } | 302 | } |
476 | #endif /* CONFIG_ZONE_DEVICE */ | ||
477 | 303 | ||
478 | #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC) | 304 | #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC) |
479 | void put_zone_device_private_or_public_page(struct page *page) | 305 | void put_zone_device_private_or_public_page(struct page *page) |