diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2008-06-26 15:27:56 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-06-27 04:12:16 -0400 |
commit | bd0e521158af407ec816aea070831d4ca7ae65e9 (patch) | |
tree | 9abdee17a693141f5dda48adefa73edb736f8082 /arch/x86/kernel/amd_iommu.c | |
parent | a19ae1eccfb2d97f4704b1a2b3d1d9905845dcac (diff) |
x86, AMD IOMMU: add functions to initialize unity mappings
This patch adds the functions which will initialize the unity mappings in the
device page tables.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Cc: iommu@lists.linux-foundation.org
Cc: bhavna.sarathy@amd.com
Cc: Sebastian.Biemueller@amd.com
Cc: robert.richter@amd.com
Cc: joro@8bytes.org
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 122 |
1 files changed, 122 insertions, 0 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index a24ee4a5203a..1d70f5e6f438 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -37,6 +37,9 @@ struct command { | |||
37 | u32 data[4]; | 37 | u32 data[4]; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, | ||
41 | struct unity_map_entry *e); | ||
42 | |||
40 | static int __iommu_queue_command(struct amd_iommu *iommu, struct command *cmd) | 43 | static int __iommu_queue_command(struct amd_iommu *iommu, struct command *cmd) |
41 | { | 44 | { |
42 | u32 tail, head; | 45 | u32 tail, head; |
@@ -144,3 +147,122 @@ static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid, | |||
144 | return 0; | 147 | return 0; |
145 | } | 148 | } |
146 | 149 | ||
150 | static int iommu_map(struct protection_domain *dom, | ||
151 | unsigned long bus_addr, | ||
152 | unsigned long phys_addr, | ||
153 | int prot) | ||
154 | { | ||
155 | u64 __pte, *pte, *page; | ||
156 | |||
157 | bus_addr = PAGE_ALIGN(bus_addr); | ||
158 | phys_addr = PAGE_ALIGN(bus_addr); | ||
159 | |||
160 | /* only support 512GB address spaces for now */ | ||
161 | if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK)) | ||
162 | return -EINVAL; | ||
163 | |||
164 | pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(bus_addr)]; | ||
165 | |||
166 | if (!IOMMU_PTE_PRESENT(*pte)) { | ||
167 | page = (u64 *)get_zeroed_page(GFP_KERNEL); | ||
168 | if (!page) | ||
169 | return -ENOMEM; | ||
170 | *pte = IOMMU_L2_PDE(virt_to_phys(page)); | ||
171 | } | ||
172 | |||
173 | pte = IOMMU_PTE_PAGE(*pte); | ||
174 | pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)]; | ||
175 | |||
176 | if (!IOMMU_PTE_PRESENT(*pte)) { | ||
177 | page = (u64 *)get_zeroed_page(GFP_KERNEL); | ||
178 | if (!page) | ||
179 | return -ENOMEM; | ||
180 | *pte = IOMMU_L1_PDE(virt_to_phys(page)); | ||
181 | } | ||
182 | |||
183 | pte = IOMMU_PTE_PAGE(*pte); | ||
184 | pte = &pte[IOMMU_PTE_L0_INDEX(bus_addr)]; | ||
185 | |||
186 | if (IOMMU_PTE_PRESENT(*pte)) | ||
187 | return -EBUSY; | ||
188 | |||
189 | __pte = phys_addr | IOMMU_PTE_P; | ||
190 | if (prot & IOMMU_PROT_IR) | ||
191 | __pte |= IOMMU_PTE_IR; | ||
192 | if (prot & IOMMU_PROT_IW) | ||
193 | __pte |= IOMMU_PTE_IW; | ||
194 | |||
195 | *pte = __pte; | ||
196 | |||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | static int iommu_for_unity_map(struct amd_iommu *iommu, | ||
201 | struct unity_map_entry *entry) | ||
202 | { | ||
203 | u16 bdf, i; | ||
204 | |||
205 | for (i = entry->devid_start; i <= entry->devid_end; ++i) { | ||
206 | bdf = amd_iommu_alias_table[i]; | ||
207 | if (amd_iommu_rlookup_table[bdf] == iommu) | ||
208 | return 1; | ||
209 | } | ||
210 | |||
211 | return 0; | ||
212 | } | ||
213 | |||
214 | static int iommu_init_unity_mappings(struct amd_iommu *iommu) | ||
215 | { | ||
216 | struct unity_map_entry *entry; | ||
217 | int ret; | ||
218 | |||
219 | list_for_each_entry(entry, &amd_iommu_unity_map, list) { | ||
220 | if (!iommu_for_unity_map(iommu, entry)) | ||
221 | continue; | ||
222 | ret = dma_ops_unity_map(iommu->default_dom, entry); | ||
223 | if (ret) | ||
224 | return ret; | ||
225 | } | ||
226 | |||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, | ||
231 | struct unity_map_entry *e) | ||
232 | { | ||
233 | u64 addr; | ||
234 | int ret; | ||
235 | |||
236 | for (addr = e->address_start; addr < e->address_end; | ||
237 | addr += PAGE_SIZE) { | ||
238 | ret = iommu_map(&dma_dom->domain, addr, addr, e->prot); | ||
239 | if (ret) | ||
240 | return ret; | ||
241 | /* | ||
242 | * if unity mapping is in aperture range mark the page | ||
243 | * as allocated in the aperture | ||
244 | */ | ||
245 | if (addr < dma_dom->aperture_size) | ||
246 | __set_bit(addr >> PAGE_SHIFT, dma_dom->bitmap); | ||
247 | } | ||
248 | |||
249 | return 0; | ||
250 | } | ||
251 | |||
252 | static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom, | ||
253 | u16 devid) | ||
254 | { | ||
255 | struct unity_map_entry *e; | ||
256 | int ret; | ||
257 | |||
258 | list_for_each_entry(e, &amd_iommu_unity_map, list) { | ||
259 | if (!(devid >= e->devid_start && devid <= e->devid_end)) | ||
260 | continue; | ||
261 | ret = dma_ops_unity_map(dma_dom, e); | ||
262 | if (ret) | ||
263 | return ret; | ||
264 | } | ||
265 | |||
266 | return 0; | ||
267 | } | ||
268 | |||