aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/ibm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-28 00:11:26 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-28 00:11:26 -0500
commit1b17366d695c8ab03f98d0155357e97a427e1dce (patch)
treed223c79cc33ca1d890d264a202a1dd9c29655039 /drivers/net/ethernet/ibm
parentd12de1ef5eba3adb88f8e9dd81b6a60349466378 (diff)
parent7179ba52889bef7e5e23f72908270e1ab2b7fc6f (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc updates from Ben Herrenschmidt: "So here's my next branch for powerpc. A bit late as I was on vacation last week. It's mostly the same stuff that was in next already, I just added two patches today which are the wiring up of lockref for powerpc, which for some reason fell through the cracks last time and is trivial. The highlights are, in addition to a bunch of bug fixes: - Reworked Machine Check handling on kernels running without a hypervisor (or acting as a hypervisor). Provides hooks to handle some errors in real mode such as TLB errors, handle SLB errors, etc... - Support for retrieving memory error information from the service processor on IBM servers running without a hypervisor and routing them to the memory poison infrastructure. - _PAGE_NUMA support on server processors - 32-bit BookE relocatable kernel support - FSL e6500 hardware tablewalk support - A bunch of new/revived board support - FSL e6500 deeper idle states and altivec powerdown support You'll notice a generic mm change here, it has been acked by the relevant authorities and is a pre-req for our _PAGE_NUMA support" * 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (121 commits) powerpc: Implement arch_spin_is_locked() using arch_spin_value_unlocked() powerpc: Add support for the optimised lockref implementation powerpc/powernv: Call OPAL sync before kexec'ing powerpc/eeh: Escalate error on non-existing PE powerpc/eeh: Handle multiple EEH errors powerpc: Fix transactional FP/VMX/VSX unavailable handlers powerpc: Don't corrupt transactional state when using FP/VMX in kernel powerpc: Reclaim two unused thread_info flag bits powerpc: Fix races with irq_work Move precessing of MCE queued event out from syscall exit path. pseries/cpuidle: Remove redundant call to ppc64_runlatch_off() in cpu idle routines powerpc: Make add_system_ram_resources() __init powerpc: add SATA_MV to ppc64_defconfig powerpc/powernv: Increase candidate fw image size powerpc: Add debug checks to catch invalid cpu-to-node mappings powerpc: Fix the setup of CPU-to-Node mappings during CPU online powerpc/iommu: Don't detach device without IOMMU group powerpc/eeh: Hotplug improvement powerpc/eeh: Call opal_pci_reinit() on powernv for restoring config space powerpc/eeh: Add restore_config operation ...
Diffstat (limited to 'drivers/net/ethernet/ibm')
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c12
1 files changed, 8 insertions, 4 deletions
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index cde0fd941f0c..4be971590461 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1275,18 +1275,21 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1275{ 1275{
1276 struct net_device *netdev = dev_get_drvdata(&vdev->dev); 1276 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
1277 struct ibmveth_adapter *adapter; 1277 struct ibmveth_adapter *adapter;
1278 struct iommu_table *tbl;
1278 unsigned long ret; 1279 unsigned long ret;
1279 int i; 1280 int i;
1280 int rxqentries = 1; 1281 int rxqentries = 1;
1281 1282
1283 tbl = get_iommu_table_base(&vdev->dev);
1284
1282 /* netdev inits at probe time along with the structures we need below*/ 1285 /* netdev inits at probe time along with the structures we need below*/
1283 if (netdev == NULL) 1286 if (netdev == NULL)
1284 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT); 1287 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl);
1285 1288
1286 adapter = netdev_priv(netdev); 1289 adapter = netdev_priv(netdev);
1287 1290
1288 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE; 1291 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
1289 ret += IOMMU_PAGE_ALIGN(netdev->mtu); 1292 ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl);
1290 1293
1291 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { 1294 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1292 /* add the size of the active receive buffers */ 1295 /* add the size of the active receive buffers */
@@ -1294,11 +1297,12 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1294 ret += 1297 ret +=
1295 adapter->rx_buff_pool[i].size * 1298 adapter->rx_buff_pool[i].size *
1296 IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i]. 1299 IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
1297 buff_size); 1300 buff_size, tbl);
1298 rxqentries += adapter->rx_buff_pool[i].size; 1301 rxqentries += adapter->rx_buff_pool[i].size;
1299 } 1302 }
1300 /* add the size of the receive queue entries */ 1303 /* add the size of the receive queue entries */
1301 ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry)); 1304 ret += IOMMU_PAGE_ALIGN(
1305 rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl);
1302 1306
1303 return ret; 1307 return ret;
1304} 1308}