aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2008-05-06 18:03:38 -0400
committerRoland Dreier <rolandd@cisco.com>2008-05-06 18:03:38 -0400
commit0e9913362a967377eb886bbdf305ec58aa07a878 (patch)
tree4677a588e942eb198c9cdb15338bcbcfb51f6902 /drivers/infiniband
parenta15306365a16380f3bafee9e181ba01231d4acd7 (diff)
RDMA/cxgb3: Don't add PBL memory to gen_pool in chunks
Current iw_cxgb3 code adds PBL memory to the driver's gen_pool in 2 MB chunks. This limits the largest single allocation that can be done to the same size, which means that with 4 KB pages, each of which takes 8 bytes of PBL memory, the largest memory region that can be allocated is 1 GB (256K PBL entries * 4 KB/entry). Remove this limit by adding all the PBL memory in a single gen_pool chunk, if possible. Add code that falls back to smaller chunks if gen_pool_add() fails, which can happen if there is not sufficient contiguous lowmem for the internal gen_pool bitmap. Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_resource.c36
1 files changed, 28 insertions, 8 deletions
diff --git a/drivers/infiniband/hw/cxgb3/cxio_resource.c b/drivers/infiniband/hw/cxgb3/cxio_resource.c
index 45ed4f25ef78..bd233c087653 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_resource.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_resource.c
@@ -250,7 +250,6 @@ void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp)
250 */ 250 */
251 251
252#define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */ 252#define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
253#define PBL_CHUNK 2*1024*1024
254 253
255u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size) 254u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size)
256{ 255{
@@ -267,14 +266,35 @@ void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
267 266
268int cxio_hal_pblpool_create(struct cxio_rdev *rdev_p) 267int cxio_hal_pblpool_create(struct cxio_rdev *rdev_p)
269{ 268{
270 unsigned long i; 269 unsigned pbl_start, pbl_chunk;
270
271 rdev_p->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1); 271 rdev_p->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1);
272 if (rdev_p->pbl_pool) 272 if (!rdev_p->pbl_pool)
273 for (i = rdev_p->rnic_info.pbl_base; 273 return -ENOMEM;
274 i <= rdev_p->rnic_info.pbl_top - PBL_CHUNK + 1; 274
275 i += PBL_CHUNK) 275 pbl_start = rdev_p->rnic_info.pbl_base;
276 gen_pool_add(rdev_p->pbl_pool, i, PBL_CHUNK, -1); 276 pbl_chunk = rdev_p->rnic_info.pbl_top - pbl_start + 1;
277 return rdev_p->pbl_pool ? 0 : -ENOMEM; 277
278 while (pbl_start < rdev_p->rnic_info.pbl_top) {
279 pbl_chunk = min(rdev_p->rnic_info.pbl_top - pbl_start + 1,
280 pbl_chunk);
281 if (gen_pool_add(rdev_p->pbl_pool, pbl_start, pbl_chunk, -1)) {
282 PDBG("%s failed to add PBL chunk (%x/%x)\n",
283 __func__, pbl_start, pbl_chunk);
284 if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
285 printk(KERN_WARNING MOD "%s: Failed to add all PBL chunks (%x/%x)\n",
286 __func__, pbl_start, rdev_p->rnic_info.pbl_top - pbl_start);
287 return 0;
288 }
289 pbl_chunk >>= 1;
290 } else {
291 PDBG("%s added PBL chunk (%x/%x)\n",
292 __func__, pbl_start, pbl_chunk);
293 pbl_start += pbl_chunk;
294 }
295 }
296
297 return 0;
278} 298}
279 299
280void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p) 300void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p)