aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-28 00:11:26 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-28 00:11:26 -0500
commit1b17366d695c8ab03f98d0155357e97a427e1dce (patch)
treed223c79cc33ca1d890d264a202a1dd9c29655039 /drivers
parentd12de1ef5eba3adb88f8e9dd81b6a60349466378 (diff)
parent7179ba52889bef7e5e23f72908270e1ab2b7fc6f (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc updates from Ben Herrenschmidt: "So here's my next branch for powerpc. A bit late as I was on vacation last week. It's mostly the same stuff that was in next already, I just added two patches today which are the wiring up of lockref for powerpc, which for some reason fell through the cracks last time and is trivial. The highlights are, in addition to a bunch of bug fixes: - Reworked Machine Check handling on kernels running without a hypervisor (or acting as a hypervisor). Provides hooks to handle some errors in real mode such as TLB errors, handle SLB errors, etc... - Support for retrieving memory error information from the service processor on IBM servers running without a hypervisor and routing them to the memory poison infrastructure. - _PAGE_NUMA support on server processors - 32-bit BookE relocatable kernel support - FSL e6500 hardware tablewalk support - A bunch of new/revived board support - FSL e6500 deeper idle states and altivec powerdown support You'll notice a generic mm change here, it has been acked by the relevant authorities and is a pre-req for our _PAGE_NUMA support" * 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (121 commits) powerpc: Implement arch_spin_is_locked() using arch_spin_value_unlocked() powerpc: Add support for the optimised lockref implementation powerpc/powernv: Call OPAL sync before kexec'ing powerpc/eeh: Escalate error on non-existing PE powerpc/eeh: Handle multiple EEH errors powerpc: Fix transactional FP/VMX/VSX unavailable handlers powerpc: Don't corrupt transactional state when using FP/VMX in kernel powerpc: Reclaim two unused thread_info flag bits powerpc: Fix races with irq_work Move precessing of MCE queued event out from syscall exit path. pseries/cpuidle: Remove redundant call to ppc64_runlatch_off() in cpu idle routines powerpc: Make add_system_ram_resources() __init powerpc: add SATA_MV to ppc64_defconfig powerpc/powernv: Increase candidate fw image size powerpc: Add debug checks to catch invalid cpu-to-node mappings powerpc: Fix the setup of CPU-to-Node mappings during CPU online powerpc/iommu: Don't detach device without IOMMU group powerpc/eeh: Hotplug improvement powerpc/eeh: Call opal_pci_reinit() on powernv for restoring config space powerpc/eeh: Add restore_config operation ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/macintosh/windfarm_lm75_sensor.c2
-rw-r--r--drivers/macintosh/windfarm_max6690_sensor.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c12
-rw-r--r--drivers/tty/Kconfig2
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c28
5 files changed, 25 insertions, 21 deletions
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
index 9ef32b3df91f..590214ba736c 100644
--- a/drivers/macintosh/windfarm_lm75_sensor.c
+++ b/drivers/macintosh/windfarm_lm75_sensor.c
@@ -133,7 +133,7 @@ static int wf_lm75_probe(struct i2c_client *client,
133 lm->inited = 0; 133 lm->inited = 0;
134 lm->ds1775 = ds1775; 134 lm->ds1775 = ds1775;
135 lm->i2c = client; 135 lm->i2c = client;
136 lm->sens.name = (char *)name; /* XXX fix constness in structure */ 136 lm->sens.name = name;
137 lm->sens.ops = &wf_lm75_ops; 137 lm->sens.ops = &wf_lm75_ops;
138 i2c_set_clientdata(client, lm); 138 i2c_set_clientdata(client, lm);
139 139
diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c
index 945a25b2f31e..87e439b10318 100644
--- a/drivers/macintosh/windfarm_max6690_sensor.c
+++ b/drivers/macintosh/windfarm_max6690_sensor.c
@@ -95,7 +95,7 @@ static int wf_max6690_probe(struct i2c_client *client,
95 } 95 }
96 96
97 max->i2c = client; 97 max->i2c = client;
98 max->sens.name = (char *)name; /* XXX fix constness in structure */ 98 max->sens.name = name;
99 max->sens.ops = &wf_max6690_ops; 99 max->sens.ops = &wf_max6690_ops;
100 i2c_set_clientdata(client, max); 100 i2c_set_clientdata(client, max);
101 101
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index cde0fd941f0c..4be971590461 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1275,18 +1275,21 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1275{ 1275{
1276 struct net_device *netdev = dev_get_drvdata(&vdev->dev); 1276 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
1277 struct ibmveth_adapter *adapter; 1277 struct ibmveth_adapter *adapter;
1278 struct iommu_table *tbl;
1278 unsigned long ret; 1279 unsigned long ret;
1279 int i; 1280 int i;
1280 int rxqentries = 1; 1281 int rxqentries = 1;
1281 1282
1283 tbl = get_iommu_table_base(&vdev->dev);
1284
1282 /* netdev inits at probe time along with the structures we need below*/ 1285 /* netdev inits at probe time along with the structures we need below*/
1283 if (netdev == NULL) 1286 if (netdev == NULL)
1284 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT); 1287 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl);
1285 1288
1286 adapter = netdev_priv(netdev); 1289 adapter = netdev_priv(netdev);
1287 1290
1288 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE; 1291 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
1289 ret += IOMMU_PAGE_ALIGN(netdev->mtu); 1292 ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl);
1290 1293
1291 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { 1294 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1292 /* add the size of the active receive buffers */ 1295 /* add the size of the active receive buffers */
@@ -1294,11 +1297,12 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1294 ret += 1297 ret +=
1295 adapter->rx_buff_pool[i].size * 1298 adapter->rx_buff_pool[i].size *
1296 IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i]. 1299 IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
1297 buff_size); 1300 buff_size, tbl);
1298 rxqentries += adapter->rx_buff_pool[i].size; 1301 rxqentries += adapter->rx_buff_pool[i].size;
1299 } 1302 }
1300 /* add the size of the receive queue entries */ 1303 /* add the size of the receive queue entries */
1301 ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry)); 1304 ret += IOMMU_PAGE_ALIGN(
1305 rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl);
1302 1306
1303 return ret; 1307 return ret;
1304} 1308}
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 978db344bda0..b24aa010f68c 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -366,7 +366,7 @@ config TRACE_SINK
366 "Trace data router for MIPI P1149.7 cJTAG standard". 366 "Trace data router for MIPI P1149.7 cJTAG standard".
367 367
368config PPC_EPAPR_HV_BYTECHAN 368config PPC_EPAPR_HV_BYTECHAN
369 tristate "ePAPR hypervisor byte channel driver" 369 bool "ePAPR hypervisor byte channel driver"
370 depends on PPC 370 depends on PPC
371 select EPAPR_PARAVIRT 371 select EPAPR_PARAVIRT
372 help 372 help
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index bdae7a04af75..a84788ba662c 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -81,7 +81,7 @@ static int tce_iommu_enable(struct tce_container *container)
81 * enforcing the limit based on the max that the guest can map. 81 * enforcing the limit based on the max that the guest can map.
82 */ 82 */
83 down_write(&current->mm->mmap_sem); 83 down_write(&current->mm->mmap_sem);
84 npages = (tbl->it_size << IOMMU_PAGE_SHIFT) >> PAGE_SHIFT; 84 npages = (tbl->it_size << IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT;
85 locked = current->mm->locked_vm + npages; 85 locked = current->mm->locked_vm + npages;
86 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 86 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
87 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) { 87 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
@@ -110,7 +110,7 @@ static void tce_iommu_disable(struct tce_container *container)
110 110
111 down_write(&current->mm->mmap_sem); 111 down_write(&current->mm->mmap_sem);
112 current->mm->locked_vm -= (container->tbl->it_size << 112 current->mm->locked_vm -= (container->tbl->it_size <<
113 IOMMU_PAGE_SHIFT) >> PAGE_SHIFT; 113 IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT;
114 up_write(&current->mm->mmap_sem); 114 up_write(&current->mm->mmap_sem);
115} 115}
116 116
@@ -174,8 +174,8 @@ static long tce_iommu_ioctl(void *iommu_data,
174 if (info.argsz < minsz) 174 if (info.argsz < minsz)
175 return -EINVAL; 175 return -EINVAL;
176 176
177 info.dma32_window_start = tbl->it_offset << IOMMU_PAGE_SHIFT; 177 info.dma32_window_start = tbl->it_offset << IOMMU_PAGE_SHIFT_4K;
178 info.dma32_window_size = tbl->it_size << IOMMU_PAGE_SHIFT; 178 info.dma32_window_size = tbl->it_size << IOMMU_PAGE_SHIFT_4K;
179 info.flags = 0; 179 info.flags = 0;
180 180
181 if (copy_to_user((void __user *)arg, &info, minsz)) 181 if (copy_to_user((void __user *)arg, &info, minsz))
@@ -205,8 +205,8 @@ static long tce_iommu_ioctl(void *iommu_data,
205 VFIO_DMA_MAP_FLAG_WRITE)) 205 VFIO_DMA_MAP_FLAG_WRITE))
206 return -EINVAL; 206 return -EINVAL;
207 207
208 if ((param.size & ~IOMMU_PAGE_MASK) || 208 if ((param.size & ~IOMMU_PAGE_MASK_4K) ||
209 (param.vaddr & ~IOMMU_PAGE_MASK)) 209 (param.vaddr & ~IOMMU_PAGE_MASK_4K))
210 return -EINVAL; 210 return -EINVAL;
211 211
212 /* iova is checked by the IOMMU API */ 212 /* iova is checked by the IOMMU API */
@@ -220,17 +220,17 @@ static long tce_iommu_ioctl(void *iommu_data,
220 if (ret) 220 if (ret)
221 return ret; 221 return ret;
222 222
223 for (i = 0; i < (param.size >> IOMMU_PAGE_SHIFT); ++i) { 223 for (i = 0; i < (param.size >> IOMMU_PAGE_SHIFT_4K); ++i) {
224 ret = iommu_put_tce_user_mode(tbl, 224 ret = iommu_put_tce_user_mode(tbl,
225 (param.iova >> IOMMU_PAGE_SHIFT) + i, 225 (param.iova >> IOMMU_PAGE_SHIFT_4K) + i,
226 tce); 226 tce);
227 if (ret) 227 if (ret)
228 break; 228 break;
229 tce += IOMMU_PAGE_SIZE; 229 tce += IOMMU_PAGE_SIZE_4K;
230 } 230 }
231 if (ret) 231 if (ret)
232 iommu_clear_tces_and_put_pages(tbl, 232 iommu_clear_tces_and_put_pages(tbl,
233 param.iova >> IOMMU_PAGE_SHIFT, i); 233 param.iova >> IOMMU_PAGE_SHIFT_4K, i);
234 234
235 iommu_flush_tce(tbl); 235 iommu_flush_tce(tbl);
236 236
@@ -256,17 +256,17 @@ static long tce_iommu_ioctl(void *iommu_data,
256 if (param.flags) 256 if (param.flags)
257 return -EINVAL; 257 return -EINVAL;
258 258
259 if (param.size & ~IOMMU_PAGE_MASK) 259 if (param.size & ~IOMMU_PAGE_MASK_4K)
260 return -EINVAL; 260 return -EINVAL;
261 261
262 ret = iommu_tce_clear_param_check(tbl, param.iova, 0, 262 ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
263 param.size >> IOMMU_PAGE_SHIFT); 263 param.size >> IOMMU_PAGE_SHIFT_4K);
264 if (ret) 264 if (ret)
265 return ret; 265 return ret;
266 266
267 ret = iommu_clear_tces_and_put_pages(tbl, 267 ret = iommu_clear_tces_and_put_pages(tbl,
268 param.iova >> IOMMU_PAGE_SHIFT, 268 param.iova >> IOMMU_PAGE_SHIFT_4K,
269 param.size >> IOMMU_PAGE_SHIFT); 269 param.size >> IOMMU_PAGE_SHIFT_4K);
270 iommu_flush_tce(tbl); 270 iommu_flush_tce(tbl);
271 271
272 return ret; 272 return ret;