aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/vfio
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-28 00:11:26 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-28 00:11:26 -0500
commit1b17366d695c8ab03f98d0155357e97a427e1dce (patch)
treed223c79cc33ca1d890d264a202a1dd9c29655039 /drivers/vfio
parentd12de1ef5eba3adb88f8e9dd81b6a60349466378 (diff)
parent7179ba52889bef7e5e23f72908270e1ab2b7fc6f (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc updates from Ben Herrenschmidt: "So here's my next branch for powerpc. A bit late as I was on vacation last week. It's mostly the same stuff that was in next already, I just added two patches today which are the wiring up of lockref for powerpc, which for some reason fell through the cracks last time and is trivial. The highlights are, in addition to a bunch of bug fixes: - Reworked Machine Check handling on kernels running without a hypervisor (or acting as a hypervisor). Provides hooks to handle some errors in real mode such as TLB errors, handle SLB errors, etc... - Support for retrieving memory error information from the service processor on IBM servers running without a hypervisor and routing them to the memory poison infrastructure. - _PAGE_NUMA support on server processors - 32-bit BookE relocatable kernel support - FSL e6500 hardware tablewalk support - A bunch of new/revived board support - FSL e6500 deeper idle states and altivec powerdown support You'll notice a generic mm change here, it has been acked by the relevant authorities and is a pre-req for our _PAGE_NUMA support" * 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (121 commits) powerpc: Implement arch_spin_is_locked() using arch_spin_value_unlocked() powerpc: Add support for the optimised lockref implementation powerpc/powernv: Call OPAL sync before kexec'ing powerpc/eeh: Escalate error on non-existing PE powerpc/eeh: Handle multiple EEH errors powerpc: Fix transactional FP/VMX/VSX unavailable handlers powerpc: Don't corrupt transactional state when using FP/VMX in kernel powerpc: Reclaim two unused thread_info flag bits powerpc: Fix races with irq_work Move precessing of MCE queued event out from syscall exit path. pseries/cpuidle: Remove redundant call to ppc64_runlatch_off() in cpu idle routines powerpc: Make add_system_ram_resources() __init powerpc: add SATA_MV to ppc64_defconfig powerpc/powernv: Increase candidate fw image size powerpc: Add debug checks to catch invalid cpu-to-node mappings powerpc: Fix the setup of CPU-to-Node mappings during CPU online powerpc/iommu: Don't detach device without IOMMU group powerpc/eeh: Hotplug improvement powerpc/eeh: Call opal_pci_reinit() on powernv for restoring config space powerpc/eeh: Add restore_config operation ...
Diffstat (limited to 'drivers/vfio')
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index bdae7a04af75..a84788ba662c 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -81,7 +81,7 @@ static int tce_iommu_enable(struct tce_container *container)
81 * enforcing the limit based on the max that the guest can map. 81 * enforcing the limit based on the max that the guest can map.
82 */ 82 */
83 down_write(&current->mm->mmap_sem); 83 down_write(&current->mm->mmap_sem);
84 npages = (tbl->it_size << IOMMU_PAGE_SHIFT) >> PAGE_SHIFT; 84 npages = (tbl->it_size << IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT;
85 locked = current->mm->locked_vm + npages; 85 locked = current->mm->locked_vm + npages;
86 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 86 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
87 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) { 87 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
@@ -110,7 +110,7 @@ static void tce_iommu_disable(struct tce_container *container)
110 110
111 down_write(&current->mm->mmap_sem); 111 down_write(&current->mm->mmap_sem);
112 current->mm->locked_vm -= (container->tbl->it_size << 112 current->mm->locked_vm -= (container->tbl->it_size <<
113 IOMMU_PAGE_SHIFT) >> PAGE_SHIFT; 113 IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT;
114 up_write(&current->mm->mmap_sem); 114 up_write(&current->mm->mmap_sem);
115} 115}
116 116
@@ -174,8 +174,8 @@ static long tce_iommu_ioctl(void *iommu_data,
174 if (info.argsz < minsz) 174 if (info.argsz < minsz)
175 return -EINVAL; 175 return -EINVAL;
176 176
177 info.dma32_window_start = tbl->it_offset << IOMMU_PAGE_SHIFT; 177 info.dma32_window_start = tbl->it_offset << IOMMU_PAGE_SHIFT_4K;
178 info.dma32_window_size = tbl->it_size << IOMMU_PAGE_SHIFT; 178 info.dma32_window_size = tbl->it_size << IOMMU_PAGE_SHIFT_4K;
179 info.flags = 0; 179 info.flags = 0;
180 180
181 if (copy_to_user((void __user *)arg, &info, minsz)) 181 if (copy_to_user((void __user *)arg, &info, minsz))
@@ -205,8 +205,8 @@ static long tce_iommu_ioctl(void *iommu_data,
205 VFIO_DMA_MAP_FLAG_WRITE)) 205 VFIO_DMA_MAP_FLAG_WRITE))
206 return -EINVAL; 206 return -EINVAL;
207 207
208 if ((param.size & ~IOMMU_PAGE_MASK) || 208 if ((param.size & ~IOMMU_PAGE_MASK_4K) ||
209 (param.vaddr & ~IOMMU_PAGE_MASK)) 209 (param.vaddr & ~IOMMU_PAGE_MASK_4K))
210 return -EINVAL; 210 return -EINVAL;
211 211
212 /* iova is checked by the IOMMU API */ 212 /* iova is checked by the IOMMU API */
@@ -220,17 +220,17 @@ static long tce_iommu_ioctl(void *iommu_data,
220 if (ret) 220 if (ret)
221 return ret; 221 return ret;
222 222
223 for (i = 0; i < (param.size >> IOMMU_PAGE_SHIFT); ++i) { 223 for (i = 0; i < (param.size >> IOMMU_PAGE_SHIFT_4K); ++i) {
224 ret = iommu_put_tce_user_mode(tbl, 224 ret = iommu_put_tce_user_mode(tbl,
225 (param.iova >> IOMMU_PAGE_SHIFT) + i, 225 (param.iova >> IOMMU_PAGE_SHIFT_4K) + i,
226 tce); 226 tce);
227 if (ret) 227 if (ret)
228 break; 228 break;
229 tce += IOMMU_PAGE_SIZE; 229 tce += IOMMU_PAGE_SIZE_4K;
230 } 230 }
231 if (ret) 231 if (ret)
232 iommu_clear_tces_and_put_pages(tbl, 232 iommu_clear_tces_and_put_pages(tbl,
233 param.iova >> IOMMU_PAGE_SHIFT, i); 233 param.iova >> IOMMU_PAGE_SHIFT_4K, i);
234 234
235 iommu_flush_tce(tbl); 235 iommu_flush_tce(tbl);
236 236
@@ -256,17 +256,17 @@ static long tce_iommu_ioctl(void *iommu_data,
256 if (param.flags) 256 if (param.flags)
257 return -EINVAL; 257 return -EINVAL;
258 258
259 if (param.size & ~IOMMU_PAGE_MASK) 259 if (param.size & ~IOMMU_PAGE_MASK_4K)
260 return -EINVAL; 260 return -EINVAL;
261 261
262 ret = iommu_tce_clear_param_check(tbl, param.iova, 0, 262 ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
263 param.size >> IOMMU_PAGE_SHIFT); 263 param.size >> IOMMU_PAGE_SHIFT_4K);
264 if (ret) 264 if (ret)
265 return ret; 265 return ret;
266 266
267 ret = iommu_clear_tces_and_put_pages(tbl, 267 ret = iommu_clear_tces_and_put_pages(tbl,
268 param.iova >> IOMMU_PAGE_SHIFT, 268 param.iova >> IOMMU_PAGE_SHIFT_4K,
269 param.size >> IOMMU_PAGE_SHIFT); 269 param.size >> IOMMU_PAGE_SHIFT_4K);
270 iommu_flush_tce(tbl); 270 iommu_flush_tce(tbl);
271 271
272 return ret; 272 return ret;