aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/vfio
diff options
context:
space:
mode:
authorAlistair Popple <alistair@popple.id.au>2013-12-09 02:17:01 -0500
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2013-12-29 22:17:06 -0500
commite589a4404fa06730355de204d3d136ed9bbc7dea (patch)
treea7b4d1dad98a06d89e652194947735db7eec02b5 /drivers/vfio
parentfee26f6d5d68a8815b20c32d15dd70d5384eb937 (diff)
powerpc/iommu: Update constant names to reflect their hardcoded page size
The powerpc iommu uses a hardcoded page size of 4K. This patch changes the name of the IOMMU_PAGE_* macros to reflect the hardcoded values. A future patch will use the existing names to support dynamic page sizes. Signed-off-by: Alistair Popple <alistair@popple.id.au> Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'drivers/vfio')
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index bdae7a04af75..a84788ba662c 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -81,7 +81,7 @@ static int tce_iommu_enable(struct tce_container *container)
81 * enforcing the limit based on the max that the guest can map. 81 * enforcing the limit based on the max that the guest can map.
82 */ 82 */
83 down_write(&current->mm->mmap_sem); 83 down_write(&current->mm->mmap_sem);
84 npages = (tbl->it_size << IOMMU_PAGE_SHIFT) >> PAGE_SHIFT; 84 npages = (tbl->it_size << IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT;
85 locked = current->mm->locked_vm + npages; 85 locked = current->mm->locked_vm + npages;
86 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 86 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
87 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) { 87 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
@@ -110,7 +110,7 @@ static void tce_iommu_disable(struct tce_container *container)
110 110
111 down_write(&current->mm->mmap_sem); 111 down_write(&current->mm->mmap_sem);
112 current->mm->locked_vm -= (container->tbl->it_size << 112 current->mm->locked_vm -= (container->tbl->it_size <<
113 IOMMU_PAGE_SHIFT) >> PAGE_SHIFT; 113 IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT;
114 up_write(&current->mm->mmap_sem); 114 up_write(&current->mm->mmap_sem);
115} 115}
116 116
@@ -174,8 +174,8 @@ static long tce_iommu_ioctl(void *iommu_data,
174 if (info.argsz < minsz) 174 if (info.argsz < minsz)
175 return -EINVAL; 175 return -EINVAL;
176 176
177 info.dma32_window_start = tbl->it_offset << IOMMU_PAGE_SHIFT; 177 info.dma32_window_start = tbl->it_offset << IOMMU_PAGE_SHIFT_4K;
178 info.dma32_window_size = tbl->it_size << IOMMU_PAGE_SHIFT; 178 info.dma32_window_size = tbl->it_size << IOMMU_PAGE_SHIFT_4K;
179 info.flags = 0; 179 info.flags = 0;
180 180
181 if (copy_to_user((void __user *)arg, &info, minsz)) 181 if (copy_to_user((void __user *)arg, &info, minsz))
@@ -205,8 +205,8 @@ static long tce_iommu_ioctl(void *iommu_data,
205 VFIO_DMA_MAP_FLAG_WRITE)) 205 VFIO_DMA_MAP_FLAG_WRITE))
206 return -EINVAL; 206 return -EINVAL;
207 207
208 if ((param.size & ~IOMMU_PAGE_MASK) || 208 if ((param.size & ~IOMMU_PAGE_MASK_4K) ||
209 (param.vaddr & ~IOMMU_PAGE_MASK)) 209 (param.vaddr & ~IOMMU_PAGE_MASK_4K))
210 return -EINVAL; 210 return -EINVAL;
211 211
212 /* iova is checked by the IOMMU API */ 212 /* iova is checked by the IOMMU API */
@@ -220,17 +220,17 @@ static long tce_iommu_ioctl(void *iommu_data,
220 if (ret) 220 if (ret)
221 return ret; 221 return ret;
222 222
223 for (i = 0; i < (param.size >> IOMMU_PAGE_SHIFT); ++i) { 223 for (i = 0; i < (param.size >> IOMMU_PAGE_SHIFT_4K); ++i) {
224 ret = iommu_put_tce_user_mode(tbl, 224 ret = iommu_put_tce_user_mode(tbl,
225 (param.iova >> IOMMU_PAGE_SHIFT) + i, 225 (param.iova >> IOMMU_PAGE_SHIFT_4K) + i,
226 tce); 226 tce);
227 if (ret) 227 if (ret)
228 break; 228 break;
229 tce += IOMMU_PAGE_SIZE; 229 tce += IOMMU_PAGE_SIZE_4K;
230 } 230 }
231 if (ret) 231 if (ret)
232 iommu_clear_tces_and_put_pages(tbl, 232 iommu_clear_tces_and_put_pages(tbl,
233 param.iova >> IOMMU_PAGE_SHIFT, i); 233 param.iova >> IOMMU_PAGE_SHIFT_4K, i);
234 234
235 iommu_flush_tce(tbl); 235 iommu_flush_tce(tbl);
236 236
@@ -256,17 +256,17 @@ static long tce_iommu_ioctl(void *iommu_data,
256 if (param.flags) 256 if (param.flags)
257 return -EINVAL; 257 return -EINVAL;
258 258
259 if (param.size & ~IOMMU_PAGE_MASK) 259 if (param.size & ~IOMMU_PAGE_MASK_4K)
260 return -EINVAL; 260 return -EINVAL;
261 261
262 ret = iommu_tce_clear_param_check(tbl, param.iova, 0, 262 ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
263 param.size >> IOMMU_PAGE_SHIFT); 263 param.size >> IOMMU_PAGE_SHIFT_4K);
264 if (ret) 264 if (ret)
265 return ret; 265 return ret;
266 266
267 ret = iommu_clear_tces_and_put_pages(tbl, 267 ret = iommu_clear_tces_and_put_pages(tbl,
268 param.iova >> IOMMU_PAGE_SHIFT, 268 param.iova >> IOMMU_PAGE_SHIFT_4K,
269 param.size >> IOMMU_PAGE_SHIFT); 269 param.size >> IOMMU_PAGE_SHIFT_4K);
270 iommu_flush_tce(tbl); 270 iommu_flush_tce(tbl);
271 271
272 return ret; 272 return ret;