diff options
| author | Gerald Schaefer <gerald.schaefer@de.ibm.com> | 2015-08-27 09:33:03 -0400 |
|---|---|---|
| committer | Joerg Roedel <jroedel@suse.de> | 2015-10-06 06:20:24 -0400 |
| commit | 8128f23c436d0dd4f72412e1bf9256e424479dc3 (patch) | |
| tree | fa98a41a6b55c0108fa8f5724a8edc4d124a56ff /arch/s390/pci | |
| parent | 049e6dde7e57f0054fdc49102e7ef4830c698b46 (diff) | |
iommu/s390: Add iommu api for s390 pci devices
This adds an IOMMU API implementation for s390 PCI devices.
Reviewed-by: Sebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'arch/s390/pci')
| -rw-r--r-- | arch/s390/pci/pci_dma.c | 37 |
1 files changed, 25 insertions, 12 deletions
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index 37505b8b4093..37d10f74425a 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c | |||
| @@ -24,7 +24,7 @@ static int zpci_refresh_global(struct zpci_dev *zdev) | |||
| 24 | zdev->iommu_pages * PAGE_SIZE); | 24 | zdev->iommu_pages * PAGE_SIZE); |
| 25 | } | 25 | } |
| 26 | 26 | ||
| 27 | static unsigned long *dma_alloc_cpu_table(void) | 27 | unsigned long *dma_alloc_cpu_table(void) |
| 28 | { | 28 | { |
| 29 | unsigned long *table, *entry; | 29 | unsigned long *table, *entry; |
| 30 | 30 | ||
| @@ -114,12 +114,12 @@ static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr | |||
| 114 | return &pto[px]; | 114 | return &pto[px]; |
| 115 | } | 115 | } |
| 116 | 116 | ||
| 117 | static void dma_update_cpu_trans(struct zpci_dev *zdev, void *page_addr, | 117 | void dma_update_cpu_trans(unsigned long *dma_table, void *page_addr, |
| 118 | dma_addr_t dma_addr, int flags) | 118 | dma_addr_t dma_addr, int flags) |
| 119 | { | 119 | { |
| 120 | unsigned long *entry; | 120 | unsigned long *entry; |
| 121 | 121 | ||
| 122 | entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr); | 122 | entry = dma_walk_cpu_trans(dma_table, dma_addr); |
| 123 | if (!entry) { | 123 | if (!entry) { |
| 124 | WARN_ON_ONCE(1); | 124 | WARN_ON_ONCE(1); |
| 125 | return; | 125 | return; |
| @@ -156,7 +156,8 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa, | |||
| 156 | goto no_refresh; | 156 | goto no_refresh; |
| 157 | 157 | ||
| 158 | for (i = 0; i < nr_pages; i++) { | 158 | for (i = 0; i < nr_pages; i++) { |
| 159 | dma_update_cpu_trans(zdev, page_addr, dma_addr, flags); | 159 | dma_update_cpu_trans(zdev->dma_table, page_addr, dma_addr, |
| 160 | flags); | ||
| 160 | page_addr += PAGE_SIZE; | 161 | page_addr += PAGE_SIZE; |
| 161 | dma_addr += PAGE_SIZE; | 162 | dma_addr += PAGE_SIZE; |
| 162 | } | 163 | } |
| @@ -181,7 +182,7 @@ no_refresh: | |||
| 181 | return rc; | 182 | return rc; |
| 182 | } | 183 | } |
| 183 | 184 | ||
| 184 | static void dma_free_seg_table(unsigned long entry) | 185 | void dma_free_seg_table(unsigned long entry) |
| 185 | { | 186 | { |
| 186 | unsigned long *sto = get_rt_sto(entry); | 187 | unsigned long *sto = get_rt_sto(entry); |
| 187 | int sx; | 188 | int sx; |
| @@ -193,21 +194,18 @@ static void dma_free_seg_table(unsigned long entry) | |||
| 193 | dma_free_cpu_table(sto); | 194 | dma_free_cpu_table(sto); |
| 194 | } | 195 | } |
| 195 | 196 | ||
| 196 | static void dma_cleanup_tables(struct zpci_dev *zdev) | 197 | void dma_cleanup_tables(unsigned long *table) |
| 197 | { | 198 | { |
| 198 | unsigned long *table; | ||
| 199 | int rtx; | 199 | int rtx; |
| 200 | 200 | ||
| 201 | if (!zdev || !zdev->dma_table) | 201 | if (!table) |
| 202 | return; | 202 | return; |
| 203 | 203 | ||
| 204 | table = zdev->dma_table; | ||
| 205 | for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++) | 204 | for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++) |
| 206 | if (reg_entry_isvalid(table[rtx])) | 205 | if (reg_entry_isvalid(table[rtx])) |
| 207 | dma_free_seg_table(table[rtx]); | 206 | dma_free_seg_table(table[rtx]); |
| 208 | 207 | ||
| 209 | dma_free_cpu_table(table); | 208 | dma_free_cpu_table(table); |
| 210 | zdev->dma_table = NULL; | ||
| 211 | } | 209 | } |
| 212 | 210 | ||
| 213 | static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, | 211 | static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, |
| @@ -416,6 +414,13 @@ int zpci_dma_init_device(struct zpci_dev *zdev) | |||
| 416 | { | 414 | { |
| 417 | int rc; | 415 | int rc; |
| 418 | 416 | ||
| 417 | /* | ||
| 418 | * At this point, if the device is part of an IOMMU domain, this would | ||
| 419 | * be a strong hint towards a bug in the IOMMU API (common) code and/or | ||
| 420 | * simultaneous access via IOMMU and DMA API. So let's issue a warning. | ||
| 421 | */ | ||
| 422 | WARN_ON(zdev->s390_domain); | ||
| 423 | |||
| 419 | spin_lock_init(&zdev->iommu_bitmap_lock); | 424 | spin_lock_init(&zdev->iommu_bitmap_lock); |
| 420 | spin_lock_init(&zdev->dma_table_lock); | 425 | spin_lock_init(&zdev->dma_table_lock); |
| 421 | 426 | ||
| @@ -450,8 +455,16 @@ out_clean: | |||
| 450 | 455 | ||
| 451 | void zpci_dma_exit_device(struct zpci_dev *zdev) | 456 | void zpci_dma_exit_device(struct zpci_dev *zdev) |
| 452 | { | 457 | { |
| 458 | /* | ||
| 459 | * At this point, if the device is part of an IOMMU domain, this would | ||
| 460 | * be a strong hint towards a bug in the IOMMU API (common) code and/or | ||
| 461 | * simultaneous access via IOMMU and DMA API. So let's issue a warning. | ||
| 462 | */ | ||
| 463 | WARN_ON(zdev->s390_domain); | ||
| 464 | |||
| 453 | zpci_unregister_ioat(zdev, 0); | 465 | zpci_unregister_ioat(zdev, 0); |
| 454 | dma_cleanup_tables(zdev); | 466 | dma_cleanup_tables(zdev->dma_table); |
| 467 | zdev->dma_table = NULL; | ||
| 455 | vfree(zdev->iommu_bitmap); | 468 | vfree(zdev->iommu_bitmap); |
| 456 | zdev->iommu_bitmap = NULL; | 469 | zdev->iommu_bitmap = NULL; |
| 457 | zdev->next_bit = 0; | 470 | zdev->next_bit = 0; |
