summaryrefslogtreecommitdiffstats
path: root/drivers/vfio
diff options
context:
space:
mode:
authorAlexey Kardashevskiy <aik@ozlabs.ru>2015-06-05 02:35:03 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2015-06-11 01:14:56 -0400
commit649354b75dca1ebcb55d8db41e1d6b59ef69ac77 (patch)
tree972d11c3b8a4ab16c3e8f82febb459e4e8af8025 /drivers/vfio
parent3c56e822f8fb0105949d04bda0e549e9d08713cd (diff)
vfio: powerpc/spapr: Moving pinning/unpinning to helpers
This is a pretty mechanical patch to make next patches simpler. New tce_iommu_unuse_page() helper does put_page() now but it might skip that after the memory registering patch applied. As we are here, this removes unnecessary checks for a value returned by pfn_to_page() as it cannot possibly return NULL. This moves tce_iommu_disable() later to let tce_iommu_clear() know if the container has been enabled because if it has not been, then put_page() must not be called on TCEs from the TCE table. This situation is not yet possible but it will after KVM acceleration patchset is applied. This changes code to work with physical addresses rather than linear mapping addresses for better code readability. Following patches will add an xchg() callback for an IOMMU table which will accept/return physical addresses (unlike current tce_build()) which will eliminate redundant conversions. Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> [aw: for the vfio related changes] Acked-by: Alex Williamson <alex.williamson@redhat.com> Reviewed-by: David Gibson <david@gibson.dropbear.id.au> Reviewed-by: Gavin Shan <gwshan@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'drivers/vfio')
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c61
1 files changed, 41 insertions, 20 deletions
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 5bbdf378fd59..cf5d4a159ce9 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -191,69 +191,90 @@ static void tce_iommu_release(void *iommu_data)
191 struct tce_container *container = iommu_data; 191 struct tce_container *container = iommu_data;
192 192
193 WARN_ON(container->tbl && !container->tbl->it_group); 193 WARN_ON(container->tbl && !container->tbl->it_group);
194 tce_iommu_disable(container);
195 194
196 if (container->tbl && container->tbl->it_group) 195 if (container->tbl && container->tbl->it_group)
197 tce_iommu_detach_group(iommu_data, container->tbl->it_group); 196 tce_iommu_detach_group(iommu_data, container->tbl->it_group);
198 197
198 tce_iommu_disable(container);
199 mutex_destroy(&container->lock); 199 mutex_destroy(&container->lock);
200 200
201 kfree(container); 201 kfree(container);
202} 202}
203 203
204static void tce_iommu_unuse_page(struct tce_container *container,
205 unsigned long oldtce)
206{
207 struct page *page;
208
209 if (!(oldtce & (TCE_PCI_READ | TCE_PCI_WRITE)))
210 return;
211
212 page = pfn_to_page(oldtce >> PAGE_SHIFT);
213
214 if (oldtce & TCE_PCI_WRITE)
215 SetPageDirty(page);
216
217 put_page(page);
218}
219
204static int tce_iommu_clear(struct tce_container *container, 220static int tce_iommu_clear(struct tce_container *container,
205 struct iommu_table *tbl, 221 struct iommu_table *tbl,
206 unsigned long entry, unsigned long pages) 222 unsigned long entry, unsigned long pages)
207{ 223{
208 unsigned long oldtce; 224 unsigned long oldtce;
209 struct page *page;
210 225
211 for ( ; pages; --pages, ++entry) { 226 for ( ; pages; --pages, ++entry) {
212 oldtce = iommu_clear_tce(tbl, entry); 227 oldtce = iommu_clear_tce(tbl, entry);
213 if (!oldtce) 228 if (!oldtce)
214 continue; 229 continue;
215 230
216 page = pfn_to_page(oldtce >> PAGE_SHIFT); 231 tce_iommu_unuse_page(container, oldtce);
217 WARN_ON(!page);
218 if (page) {
219 if (oldtce & TCE_PCI_WRITE)
220 SetPageDirty(page);
221 put_page(page);
222 }
223 } 232 }
224 233
225 return 0; 234 return 0;
226} 235}
227 236
237static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa)
238{
239 struct page *page = NULL;
240 enum dma_data_direction direction = iommu_tce_direction(tce);
241
242 if (get_user_pages_fast(tce & PAGE_MASK, 1,
243 direction != DMA_TO_DEVICE, &page) != 1)
244 return -EFAULT;
245
246 *hpa = __pa((unsigned long) page_address(page));
247
248 return 0;
249}
250
228static long tce_iommu_build(struct tce_container *container, 251static long tce_iommu_build(struct tce_container *container,
229 struct iommu_table *tbl, 252 struct iommu_table *tbl,
230 unsigned long entry, unsigned long tce, unsigned long pages) 253 unsigned long entry, unsigned long tce, unsigned long pages)
231{ 254{
232 long i, ret = 0; 255 long i, ret = 0;
233 struct page *page = NULL; 256 struct page *page;
234 unsigned long hva; 257 unsigned long hpa;
235 enum dma_data_direction direction = iommu_tce_direction(tce); 258 enum dma_data_direction direction = iommu_tce_direction(tce);
236 259
237 for (i = 0; i < pages; ++i) { 260 for (i = 0; i < pages; ++i) {
238 unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK; 261 unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
239 262
240 ret = get_user_pages_fast(tce & PAGE_MASK, 1, 263 ret = tce_iommu_use_page(tce, &hpa);
241 direction != DMA_TO_DEVICE, &page); 264 if (ret)
242 if (unlikely(ret != 1)) {
243 ret = -EFAULT;
244 break; 265 break;
245 }
246 266
267 page = pfn_to_page(hpa >> PAGE_SHIFT);
247 if (!tce_page_is_contained(page, tbl->it_page_shift)) { 268 if (!tce_page_is_contained(page, tbl->it_page_shift)) {
248 ret = -EPERM; 269 ret = -EPERM;
249 break; 270 break;
250 } 271 }
251 272
252 hva = (unsigned long) page_address(page) + offset; 273 hpa |= offset;
253 274 ret = iommu_tce_build(tbl, entry + i, (unsigned long) __va(hpa),
254 ret = iommu_tce_build(tbl, entry + i, hva, direction); 275 direction);
255 if (ret) { 276 if (ret) {
256 put_page(page); 277 tce_iommu_unuse_page(container, hpa);
257 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n", 278 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
258 __func__, entry << tbl->it_page_shift, 279 __func__, entry << tbl->it_page_shift,
259 tce, ret); 280 tce, ret);