diff options
author | Alistair Popple <alistair@popple.id.au> | 2013-12-09 02:17:01 -0500 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2013-12-29 22:17:06 -0500 |
commit | e589a4404fa06730355de204d3d136ed9bbc7dea (patch) | |
tree | a7b4d1dad98a06d89e652194947735db7eec02b5 /drivers | |
parent | fee26f6d5d68a8815b20c32d15dd70d5384eb937 (diff) |
powerpc/iommu: Update constant names to reflect their hardcoded page size
The powerpc iommu uses a hardcoded page size of 4K. This patch changes
the name of the IOMMU_PAGE_* macros to reflect the hardcoded values. A
future patch will use the existing names to support dynamic page
sizes.
Signed-off-by: Alistair Popple <alistair@popple.id.au>
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/ethernet/ibm/ibmveth.c | 9 | ||||
-rw-r--r-- | drivers/vfio/vfio_iommu_spapr_tce.c | 28 |
2 files changed, 19 insertions, 18 deletions
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 952d795230a4..f7d7538b6bd9 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c | |||
@@ -1282,24 +1282,25 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev) | |||
1282 | 1282 | ||
1283 | /* netdev inits at probe time along with the structures we need below*/ | 1283 | /* netdev inits at probe time along with the structures we need below*/ |
1284 | if (netdev == NULL) | 1284 | if (netdev == NULL) |
1285 | return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT); | 1285 | return IOMMU_PAGE_ALIGN_4K(IBMVETH_IO_ENTITLEMENT_DEFAULT); |
1286 | 1286 | ||
1287 | adapter = netdev_priv(netdev); | 1287 | adapter = netdev_priv(netdev); |
1288 | 1288 | ||
1289 | ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE; | 1289 | ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE; |
1290 | ret += IOMMU_PAGE_ALIGN(netdev->mtu); | 1290 | ret += IOMMU_PAGE_ALIGN_4K(netdev->mtu); |
1291 | 1291 | ||
1292 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { | 1292 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { |
1293 | /* add the size of the active receive buffers */ | 1293 | /* add the size of the active receive buffers */ |
1294 | if (adapter->rx_buff_pool[i].active) | 1294 | if (adapter->rx_buff_pool[i].active) |
1295 | ret += | 1295 | ret += |
1296 | adapter->rx_buff_pool[i].size * | 1296 | adapter->rx_buff_pool[i].size * |
1297 | IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i]. | 1297 | IOMMU_PAGE_ALIGN_4K(adapter->rx_buff_pool[i]. |
1298 | buff_size); | 1298 | buff_size); |
1299 | rxqentries += adapter->rx_buff_pool[i].size; | 1299 | rxqentries += adapter->rx_buff_pool[i].size; |
1300 | } | 1300 | } |
1301 | /* add the size of the receive queue entries */ | 1301 | /* add the size of the receive queue entries */ |
1302 | ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry)); | 1302 | ret += IOMMU_PAGE_ALIGN_4K( |
1303 | rxqentries * sizeof(struct ibmveth_rx_q_entry)); | ||
1303 | 1304 | ||
1304 | return ret; | 1305 | return ret; |
1305 | } | 1306 | } |
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index bdae7a04af75..a84788ba662c 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c | |||
@@ -81,7 +81,7 @@ static int tce_iommu_enable(struct tce_container *container) | |||
81 | * enforcing the limit based on the max that the guest can map. | 81 | * enforcing the limit based on the max that the guest can map. |
82 | */ | 82 | */ |
83 | down_write(¤t->mm->mmap_sem); | 83 | down_write(¤t->mm->mmap_sem); |
84 | npages = (tbl->it_size << IOMMU_PAGE_SHIFT) >> PAGE_SHIFT; | 84 | npages = (tbl->it_size << IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT; |
85 | locked = current->mm->locked_vm + npages; | 85 | locked = current->mm->locked_vm + npages; |
86 | lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; | 86 | lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; |
87 | if (locked > lock_limit && !capable(CAP_IPC_LOCK)) { | 87 | if (locked > lock_limit && !capable(CAP_IPC_LOCK)) { |
@@ -110,7 +110,7 @@ static void tce_iommu_disable(struct tce_container *container) | |||
110 | 110 | ||
111 | down_write(¤t->mm->mmap_sem); | 111 | down_write(¤t->mm->mmap_sem); |
112 | current->mm->locked_vm -= (container->tbl->it_size << | 112 | current->mm->locked_vm -= (container->tbl->it_size << |
113 | IOMMU_PAGE_SHIFT) >> PAGE_SHIFT; | 113 | IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT; |
114 | up_write(¤t->mm->mmap_sem); | 114 | up_write(¤t->mm->mmap_sem); |
115 | } | 115 | } |
116 | 116 | ||
@@ -174,8 +174,8 @@ static long tce_iommu_ioctl(void *iommu_data, | |||
174 | if (info.argsz < minsz) | 174 | if (info.argsz < minsz) |
175 | return -EINVAL; | 175 | return -EINVAL; |
176 | 176 | ||
177 | info.dma32_window_start = tbl->it_offset << IOMMU_PAGE_SHIFT; | 177 | info.dma32_window_start = tbl->it_offset << IOMMU_PAGE_SHIFT_4K; |
178 | info.dma32_window_size = tbl->it_size << IOMMU_PAGE_SHIFT; | 178 | info.dma32_window_size = tbl->it_size << IOMMU_PAGE_SHIFT_4K; |
179 | info.flags = 0; | 179 | info.flags = 0; |
180 | 180 | ||
181 | if (copy_to_user((void __user *)arg, &info, minsz)) | 181 | if (copy_to_user((void __user *)arg, &info, minsz)) |
@@ -205,8 +205,8 @@ static long tce_iommu_ioctl(void *iommu_data, | |||
205 | VFIO_DMA_MAP_FLAG_WRITE)) | 205 | VFIO_DMA_MAP_FLAG_WRITE)) |
206 | return -EINVAL; | 206 | return -EINVAL; |
207 | 207 | ||
208 | if ((param.size & ~IOMMU_PAGE_MASK) || | 208 | if ((param.size & ~IOMMU_PAGE_MASK_4K) || |
209 | (param.vaddr & ~IOMMU_PAGE_MASK)) | 209 | (param.vaddr & ~IOMMU_PAGE_MASK_4K)) |
210 | return -EINVAL; | 210 | return -EINVAL; |
211 | 211 | ||
212 | /* iova is checked by the IOMMU API */ | 212 | /* iova is checked by the IOMMU API */ |
@@ -220,17 +220,17 @@ static long tce_iommu_ioctl(void *iommu_data, | |||
220 | if (ret) | 220 | if (ret) |
221 | return ret; | 221 | return ret; |
222 | 222 | ||
223 | for (i = 0; i < (param.size >> IOMMU_PAGE_SHIFT); ++i) { | 223 | for (i = 0; i < (param.size >> IOMMU_PAGE_SHIFT_4K); ++i) { |
224 | ret = iommu_put_tce_user_mode(tbl, | 224 | ret = iommu_put_tce_user_mode(tbl, |
225 | (param.iova >> IOMMU_PAGE_SHIFT) + i, | 225 | (param.iova >> IOMMU_PAGE_SHIFT_4K) + i, |
226 | tce); | 226 | tce); |
227 | if (ret) | 227 | if (ret) |
228 | break; | 228 | break; |
229 | tce += IOMMU_PAGE_SIZE; | 229 | tce += IOMMU_PAGE_SIZE_4K; |
230 | } | 230 | } |
231 | if (ret) | 231 | if (ret) |
232 | iommu_clear_tces_and_put_pages(tbl, | 232 | iommu_clear_tces_and_put_pages(tbl, |
233 | param.iova >> IOMMU_PAGE_SHIFT, i); | 233 | param.iova >> IOMMU_PAGE_SHIFT_4K, i); |
234 | 234 | ||
235 | iommu_flush_tce(tbl); | 235 | iommu_flush_tce(tbl); |
236 | 236 | ||
@@ -256,17 +256,17 @@ static long tce_iommu_ioctl(void *iommu_data, | |||
256 | if (param.flags) | 256 | if (param.flags) |
257 | return -EINVAL; | 257 | return -EINVAL; |
258 | 258 | ||
259 | if (param.size & ~IOMMU_PAGE_MASK) | 259 | if (param.size & ~IOMMU_PAGE_MASK_4K) |
260 | return -EINVAL; | 260 | return -EINVAL; |
261 | 261 | ||
262 | ret = iommu_tce_clear_param_check(tbl, param.iova, 0, | 262 | ret = iommu_tce_clear_param_check(tbl, param.iova, 0, |
263 | param.size >> IOMMU_PAGE_SHIFT); | 263 | param.size >> IOMMU_PAGE_SHIFT_4K); |
264 | if (ret) | 264 | if (ret) |
265 | return ret; | 265 | return ret; |
266 | 266 | ||
267 | ret = iommu_clear_tces_and_put_pages(tbl, | 267 | ret = iommu_clear_tces_and_put_pages(tbl, |
268 | param.iova >> IOMMU_PAGE_SHIFT, | 268 | param.iova >> IOMMU_PAGE_SHIFT_4K, |
269 | param.size >> IOMMU_PAGE_SHIFT); | 269 | param.size >> IOMMU_PAGE_SHIFT_4K); |
270 | iommu_flush_tce(tbl); | 270 | iommu_flush_tce(tbl); |
271 | 271 | ||
272 | return ret; | 272 | return ret; |