diff options
| author | Roland Dreier <rolandd@cisco.com> | 2007-05-07 00:18:11 -0400 |
|---|---|---|
| committer | Roland Dreier <rolandd@cisco.com> | 2007-05-07 00:18:11 -0400 |
| commit | 1a70a05d9d2b30db3e56f8cfbebb175663b41bad (patch) | |
| tree | 908d74c0992c0f6bb5ea776503cd901d6f429469 | |
| parent | b7f008fdc92e498af34671048556fd17ddfe9be9 (diff) | |
IB/fmr_pool: Add prefix to all printks
Signed-off-by: Roland Dreier <rolandd@cisco.com>
| -rw-r--r-- | drivers/infiniband/core/fmr_pool.c | 32 |
1 files changed, 17 insertions, 15 deletions
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c index 1d796e7c8199..a06bcc65a871 100644 --- a/drivers/infiniband/core/fmr_pool.c +++ b/drivers/infiniband/core/fmr_pool.c | |||
| @@ -43,6 +43,8 @@ | |||
| 43 | 43 | ||
| 44 | #include "core_priv.h" | 44 | #include "core_priv.h" |
| 45 | 45 | ||
| 46 | #define PFX "fmr_pool: " | ||
| 47 | |||
| 46 | enum { | 48 | enum { |
| 47 | IB_FMR_MAX_REMAPS = 32, | 49 | IB_FMR_MAX_REMAPS = 32, |
| 48 | 50 | ||
| @@ -150,7 +152,7 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool) | |||
| 150 | 152 | ||
| 151 | #ifdef DEBUG | 153 | #ifdef DEBUG |
| 152 | if (fmr->ref_count !=0) { | 154 | if (fmr->ref_count !=0) { |
| 153 | printk(KERN_WARNING "Unmapping FMR 0x%08x with ref count %d", | 155 | printk(KERN_WARNING PFX "Unmapping FMR 0x%08x with ref count %d", |
| 154 | fmr, fmr->ref_count); | 156 | fmr, fmr->ref_count); |
| 155 | } | 157 | } |
| 156 | #endif | 158 | #endif |
| @@ -168,7 +170,7 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool) | |||
| 168 | 170 | ||
| 169 | ret = ib_unmap_fmr(&fmr_list); | 171 | ret = ib_unmap_fmr(&fmr_list); |
| 170 | if (ret) | 172 | if (ret) |
| 171 | printk(KERN_WARNING "ib_unmap_fmr returned %d", ret); | 173 | printk(KERN_WARNING PFX "ib_unmap_fmr returned %d", ret); |
| 172 | 174 | ||
| 173 | spin_lock_irq(&pool->pool_lock); | 175 | spin_lock_irq(&pool->pool_lock); |
| 174 | list_splice(&unmap_list, &pool->free_list); | 176 | list_splice(&unmap_list, &pool->free_list); |
| @@ -226,20 +228,20 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, | |||
| 226 | device = pd->device; | 228 | device = pd->device; |
| 227 | if (!device->alloc_fmr || !device->dealloc_fmr || | 229 | if (!device->alloc_fmr || !device->dealloc_fmr || |
| 228 | !device->map_phys_fmr || !device->unmap_fmr) { | 230 | !device->map_phys_fmr || !device->unmap_fmr) { |
| 229 | printk(KERN_WARNING "Device %s does not support fast memory regions", | 231 | printk(KERN_INFO PFX "Device %s does not support FMRs\n", |
| 230 | device->name); | 232 | device->name); |
| 231 | return ERR_PTR(-ENOSYS); | 233 | return ERR_PTR(-ENOSYS); |
| 232 | } | 234 | } |
| 233 | 235 | ||
| 234 | attr = kmalloc(sizeof *attr, GFP_KERNEL); | 236 | attr = kmalloc(sizeof *attr, GFP_KERNEL); |
| 235 | if (!attr) { | 237 | if (!attr) { |
| 236 | printk(KERN_WARNING "couldn't allocate device attr struct"); | 238 | printk(KERN_WARNING PFX "couldn't allocate device attr struct"); |
| 237 | return ERR_PTR(-ENOMEM); | 239 | return ERR_PTR(-ENOMEM); |
| 238 | } | 240 | } |
| 239 | 241 | ||
| 240 | ret = ib_query_device(device, attr); | 242 | ret = ib_query_device(device, attr); |
| 241 | if (ret) { | 243 | if (ret) { |
| 242 | printk(KERN_WARNING "couldn't query device"); | 244 | printk(KERN_WARNING PFX "couldn't query device: %d", ret); |
| 243 | kfree(attr); | 245 | kfree(attr); |
| 244 | return ERR_PTR(ret); | 246 | return ERR_PTR(ret); |
| 245 | } | 247 | } |
| @@ -253,7 +255,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, | |||
| 253 | 255 | ||
| 254 | pool = kmalloc(sizeof *pool, GFP_KERNEL); | 256 | pool = kmalloc(sizeof *pool, GFP_KERNEL); |
| 255 | if (!pool) { | 257 | if (!pool) { |
| 256 | printk(KERN_WARNING "couldn't allocate pool struct"); | 258 | printk(KERN_WARNING PFX "couldn't allocate pool struct"); |
| 257 | return ERR_PTR(-ENOMEM); | 259 | return ERR_PTR(-ENOMEM); |
| 258 | } | 260 | } |
| 259 | 261 | ||
| @@ -270,7 +272,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, | |||
| 270 | kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket, | 272 | kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket, |
| 271 | GFP_KERNEL); | 273 | GFP_KERNEL); |
| 272 | if (!pool->cache_bucket) { | 274 | if (!pool->cache_bucket) { |
| 273 | printk(KERN_WARNING "Failed to allocate cache in pool"); | 275 | printk(KERN_WARNING PFX "Failed to allocate cache in pool"); |
| 274 | ret = -ENOMEM; | 276 | ret = -ENOMEM; |
| 275 | goto out_free_pool; | 277 | goto out_free_pool; |
| 276 | } | 278 | } |
| @@ -294,7 +296,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, | |||
| 294 | "ib_fmr(%s)", | 296 | "ib_fmr(%s)", |
| 295 | device->name); | 297 | device->name); |
| 296 | if (IS_ERR(pool->thread)) { | 298 | if (IS_ERR(pool->thread)) { |
| 297 | printk(KERN_WARNING "couldn't start cleanup thread"); | 299 | printk(KERN_WARNING PFX "couldn't start cleanup thread"); |
| 298 | ret = PTR_ERR(pool->thread); | 300 | ret = PTR_ERR(pool->thread); |
| 299 | goto out_free_pool; | 301 | goto out_free_pool; |
| 300 | } | 302 | } |
| @@ -311,8 +313,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, | |||
| 311 | fmr = kmalloc(sizeof *fmr + params->max_pages_per_fmr * sizeof (u64), | 313 | fmr = kmalloc(sizeof *fmr + params->max_pages_per_fmr * sizeof (u64), |
| 312 | GFP_KERNEL); | 314 | GFP_KERNEL); |
| 313 | if (!fmr) { | 315 | if (!fmr) { |
| 314 | printk(KERN_WARNING "failed to allocate fmr struct " | 316 | printk(KERN_WARNING PFX "failed to allocate fmr " |
| 315 | "for FMR %d", i); | 317 | "struct for FMR %d", i); |
| 316 | goto out_fail; | 318 | goto out_fail; |
| 317 | } | 319 | } |
| 318 | 320 | ||
| @@ -323,7 +325,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, | |||
| 323 | 325 | ||
| 324 | fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr); | 326 | fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr); |
| 325 | if (IS_ERR(fmr->fmr)) { | 327 | if (IS_ERR(fmr->fmr)) { |
| 326 | printk(KERN_WARNING "fmr_create failed for FMR %d", i); | 328 | printk(KERN_WARNING PFX "fmr_create failed " |
| 329 | "for FMR %d", i); | ||
| 327 | kfree(fmr); | 330 | kfree(fmr); |
| 328 | goto out_fail; | 331 | goto out_fail; |
| 329 | } | 332 | } |
| @@ -378,7 +381,7 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool) | |||
| 378 | } | 381 | } |
| 379 | 382 | ||
| 380 | if (i < pool->pool_size) | 383 | if (i < pool->pool_size) |
| 381 | printk(KERN_WARNING "pool still has %d regions registered", | 384 | printk(KERN_WARNING PFX "pool still has %d regions registered", |
| 382 | pool->pool_size - i); | 385 | pool->pool_size - i); |
| 383 | 386 | ||
| 384 | kfree(pool->cache_bucket); | 387 | kfree(pool->cache_bucket); |
| @@ -463,8 +466,7 @@ struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle, | |||
| 463 | list_add(&fmr->list, &pool->free_list); | 466 | list_add(&fmr->list, &pool->free_list); |
| 464 | spin_unlock_irqrestore(&pool->pool_lock, flags); | 467 | spin_unlock_irqrestore(&pool->pool_lock, flags); |
| 465 | 468 | ||
| 466 | printk(KERN_WARNING "fmr_map returns %d\n", | 469 | printk(KERN_WARNING PFX "fmr_map returns %d\n", result); |
| 467 | result); | ||
| 468 | 470 | ||
| 469 | return ERR_PTR(result); | 471 | return ERR_PTR(result); |
| 470 | } | 472 | } |
| @@ -516,7 +518,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr) | |||
| 516 | 518 | ||
| 517 | #ifdef DEBUG | 519 | #ifdef DEBUG |
| 518 | if (fmr->ref_count < 0) | 520 | if (fmr->ref_count < 0) |
| 519 | printk(KERN_WARNING "FMR %p has ref count %d < 0", | 521 | printk(KERN_WARNING PFX "FMR %p has ref count %d < 0", |
| 520 | fmr, fmr->ref_count); | 522 | fmr, fmr->ref_count); |
| 521 | #endif | 523 | #endif |
| 522 | 524 | ||
