aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mlx4
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-05-25 15:05:17 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-25 15:05:17 -0400
commit8e9815a0f8882aaa68645b001bb7538db8886802 (patch)
tree5654a4f982ea681158185b21321acd470cb9b87c /drivers/net/mlx4
parent702c0b04978ce316ec05f4d0a9c148fac124335b (diff)
parentacdc30b56abc0db7d409a13e9b6c72ea23b6f90d (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: RDMA/nes: Fix incorrect unlock in nes_process_mac_intr() RDMA/nes: Async event for closed QP causes crash RDMA/nes: Have ethtool read hardware registers for rx/tx stats RDMA/cxgb4: Only insert sq qid in lookup table RDMA/cxgb4: Support IB_WR_READ_WITH_INV opcode RDMA/cxgb4: Set fence flag for inv-local-stag work requests RDMA/cxgb4: Update some HW limits RDMA/cxgb4: Don't limit fastreg page list depth RDMA/cxgb4: Return proper errors in fastreg mr/pbl allocation RDMA/cxgb4: Fix overflow bug in CQ arm RDMA/cxgb4: Optimize CQ overflow detection RDMA/cxgb4: CQ size must be IQ size - 2 RDMA/cxgb4: Register RDMA provider based on LLD state_change events RDMA/cxgb4: Detach from the LLD after unregistering RDMA device IB/ipath: Remove support for QLogic PCIe QLE devices IB/qib: Add new qib driver for QLogic PCIe InfiniBand adapters IB/mad: Make needlessly global mad_sendq_size/mad_recvq_size static IB/core: Allow device-specific per-port sysfs files mlx4_core: Clean up mlx4_alloc_icm() a bit mlx4_core: Fix possible chunk sg list overflow in mlx4_alloc_icm()
Diffstat (limited to 'drivers/net/mlx4')
-rw-r--r--drivers/net/mlx4/icm.c36
1 files changed, 19 insertions, 17 deletions
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
index 57288ca1395f..b07e4dee80aa 100644
--- a/drivers/net/mlx4/icm.c
+++ b/drivers/net/mlx4/icm.c
@@ -163,28 +163,30 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
163 ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], 163 ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
164 cur_order, gfp_mask); 164 cur_order, gfp_mask);
165 165
166 if (!ret) { 166 if (ret) {
167 ++chunk->npages; 167 if (--cur_order < 0)
168 168 goto fail;
169 if (coherent) 169 else
170 ++chunk->nsg; 170 continue;
171 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { 171 }
172 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
173 chunk->npages,
174 PCI_DMA_BIDIRECTIONAL);
175 172
176 if (chunk->nsg <= 0) 173 ++chunk->npages;
177 goto fail;
178 174
179 chunk = NULL; 175 if (coherent)
180 } 176 ++chunk->nsg;
177 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
178 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
179 chunk->npages,
180 PCI_DMA_BIDIRECTIONAL);
181 181
182 npages -= 1 << cur_order; 182 if (chunk->nsg <= 0)
183 } else {
184 --cur_order;
185 if (cur_order < 0)
186 goto fail; 183 goto fail;
187 } 184 }
185
186 if (chunk->npages == MLX4_ICM_CHUNK_LEN)
187 chunk = NULL;
188
189 npages -= 1 << cur_order;
188 } 190 }
189 191
190 if (!coherent && chunk) { 192 if (!coherent && chunk) {