diff options
author | Leon Romanovsky <leon@kernel.org> | 2016-11-03 10:44:12 -0400 |
---|---|---|
committer | Doug Ledford <dledford@redhat.com> | 2016-12-03 13:12:52 -0500 |
commit | 15d4626e498c09b66c0f74a107a83bd95abb175c (patch) | |
tree | dffbf3c0b11d9aab14dc32cf9a2bac9c8222ebd4 /drivers/infiniband | |
parent | aa6aae38f7fb2c030f326a6dd10b58fff1851dfa (diff) |
IB/mlx4: Remove debug prints after allocation failure
The prints after [k|v][m|z|c]alloc() functions are not needed,
because in case of failure, allocator will print their internal
error prints anyway.
Signed-off-by: Leon Romanovsky <leon@kernel.org>
Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/hw/mlx4/alias_GUID.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/cm.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/mad.c | 9 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/main.c | 16 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/mcg.c | 5 |
5 files changed, 8 insertions, 30 deletions
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c index 5e9939045852..06020c54db20 100644 --- a/drivers/infiniband/hw/mlx4/alias_GUID.c +++ b/drivers/infiniband/hw/mlx4/alias_GUID.c | |||
@@ -755,10 +755,8 @@ static void alias_guid_work(struct work_struct *work) | |||
755 | struct mlx4_ib_dev *dev = container_of(ib_sriov, struct mlx4_ib_dev, sriov); | 755 | struct mlx4_ib_dev *dev = container_of(ib_sriov, struct mlx4_ib_dev, sriov); |
756 | 756 | ||
757 | rec = kzalloc(sizeof *rec, GFP_KERNEL); | 757 | rec = kzalloc(sizeof *rec, GFP_KERNEL); |
758 | if (!rec) { | 758 | if (!rec) |
759 | pr_err("alias_guid_work: No Memory\n"); | ||
760 | return; | 759 | return; |
761 | } | ||
762 | 760 | ||
763 | pr_debug("starting [port: %d]...\n", sriov_alias_port->port + 1); | 761 | pr_debug("starting [port: %d]...\n", sriov_alias_port->port + 1); |
764 | ret = get_next_record_to_update(dev, sriov_alias_port->port, rec); | 762 | ret = get_next_record_to_update(dev, sriov_alias_port->port, rec); |
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c index 39a488889fc7..d64845335e87 100644 --- a/drivers/infiniband/hw/mlx4/cm.c +++ b/drivers/infiniband/hw/mlx4/cm.c | |||
@@ -247,10 +247,8 @@ id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id) | |||
247 | struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; | 247 | struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; |
248 | 248 | ||
249 | ent = kmalloc(sizeof (struct id_map_entry), GFP_KERNEL); | 249 | ent = kmalloc(sizeof (struct id_map_entry), GFP_KERNEL); |
250 | if (!ent) { | 250 | if (!ent) |
251 | mlx4_ib_warn(ibdev, "Couldn't allocate id cache entry - out of memory\n"); | ||
252 | return ERR_PTR(-ENOMEM); | 251 | return ERR_PTR(-ENOMEM); |
253 | } | ||
254 | 252 | ||
255 | ent->sl_cm_id = sl_cm_id; | 253 | ent->sl_cm_id = sl_cm_id; |
256 | ent->slave_id = slave_id; | 254 | ent->slave_id = slave_id; |
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 1672907ff219..b0cd66336fcb 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c | |||
@@ -1102,10 +1102,8 @@ static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u8 port_num, | |||
1102 | 1102 | ||
1103 | in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL); | 1103 | in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL); |
1104 | out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); | 1104 | out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); |
1105 | if (!in_mad || !out_mad) { | 1105 | if (!in_mad || !out_mad) |
1106 | mlx4_ib_warn(&dev->ib_dev, "failed to allocate memory for guid info mads\n"); | ||
1107 | goto out; | 1106 | goto out; |
1108 | } | ||
1109 | 1107 | ||
1110 | guid_tbl_blk_num *= 4; | 1108 | guid_tbl_blk_num *= 4; |
1111 | 1109 | ||
@@ -1916,11 +1914,8 @@ static int alloc_pv_object(struct mlx4_ib_dev *dev, int slave, int port, | |||
1916 | 1914 | ||
1917 | *ret_ctx = NULL; | 1915 | *ret_ctx = NULL; |
1918 | ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL); | 1916 | ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL); |
1919 | if (!ctx) { | 1917 | if (!ctx) |
1920 | pr_err("failed allocating pv resource context " | ||
1921 | "for port %d, slave %d\n", port, slave); | ||
1922 | return -ENOMEM; | 1918 | return -ENOMEM; |
1923 | } | ||
1924 | 1919 | ||
1925 | ctx->ib_dev = &dev->ib_dev; | 1920 | ctx->ib_dev = &dev->ib_dev; |
1926 | ctx->port = port; | 1921 | ctx->port = port; |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index b597e8227591..1b54786d13d0 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -2814,11 +2814,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2814 | kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) * | 2814 | kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) * |
2815 | sizeof(long), | 2815 | sizeof(long), |
2816 | GFP_KERNEL); | 2816 | GFP_KERNEL); |
2817 | if (!ibdev->ib_uc_qpns_bitmap) { | 2817 | if (!ibdev->ib_uc_qpns_bitmap) |
2818 | dev_err(&dev->persist->pdev->dev, | ||
2819 | "bit map alloc failed\n"); | ||
2820 | goto err_steer_qp_release; | 2818 | goto err_steer_qp_release; |
2821 | } | ||
2822 | 2819 | ||
2823 | bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count); | 2820 | bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count); |
2824 | 2821 | ||
@@ -3055,15 +3052,12 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init) | |||
3055 | first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports); | 3052 | first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports); |
3056 | 3053 | ||
3057 | dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC); | 3054 | dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC); |
3058 | if (!dm) { | 3055 | if (!dm) |
3059 | pr_err("failed to allocate memory for tunneling qp update\n"); | ||
3060 | return; | 3056 | return; |
3061 | } | ||
3062 | 3057 | ||
3063 | for (i = 0; i < ports; i++) { | 3058 | for (i = 0; i < ports; i++) { |
3064 | dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC); | 3059 | dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC); |
3065 | if (!dm[i]) { | 3060 | if (!dm[i]) { |
3066 | pr_err("failed to allocate memory for tunneling qp update work struct\n"); | ||
3067 | while (--i >= 0) | 3061 | while (--i >= 0) |
3068 | kfree(dm[i]); | 3062 | kfree(dm[i]); |
3069 | goto out; | 3063 | goto out; |
@@ -3223,8 +3217,6 @@ void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev, | |||
3223 | ew->port = port; | 3217 | ew->port = port; |
3224 | ew->ib_dev = ibdev; | 3218 | ew->ib_dev = ibdev; |
3225 | queue_work(wq, &ew->work); | 3219 | queue_work(wq, &ew->work); |
3226 | } else { | ||
3227 | pr_err("failed to allocate memory for sl2vl update work\n"); | ||
3228 | } | 3220 | } |
3229 | } | 3221 | } |
3230 | 3222 | ||
@@ -3284,10 +3276,8 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, | |||
3284 | 3276 | ||
3285 | case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: | 3277 | case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: |
3286 | ew = kmalloc(sizeof *ew, GFP_ATOMIC); | 3278 | ew = kmalloc(sizeof *ew, GFP_ATOMIC); |
3287 | if (!ew) { | 3279 | if (!ew) |
3288 | pr_err("failed to allocate memory for events work\n"); | ||
3289 | break; | 3280 | break; |
3290 | } | ||
3291 | 3281 | ||
3292 | INIT_WORK(&ew->work, handle_port_mgmt_change_event); | 3282 | INIT_WORK(&ew->work, handle_port_mgmt_change_event); |
3293 | memcpy(&ew->ib_eqe, eqe, sizeof *eqe); | 3283 | memcpy(&ew->ib_eqe, eqe, sizeof *eqe); |
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c index a21d37f02f35..e010fe459e67 100644 --- a/drivers/infiniband/hw/mlx4/mcg.c +++ b/drivers/infiniband/hw/mlx4/mcg.c | |||
@@ -1142,7 +1142,6 @@ void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq) | |||
1142 | work = kmalloc(sizeof *work, GFP_KERNEL); | 1142 | work = kmalloc(sizeof *work, GFP_KERNEL); |
1143 | if (!work) { | 1143 | if (!work) { |
1144 | ctx->flushing = 0; | 1144 | ctx->flushing = 0; |
1145 | mcg_warn("failed allocating work for cleanup\n"); | ||
1146 | return; | 1145 | return; |
1147 | } | 1146 | } |
1148 | 1147 | ||
@@ -1202,10 +1201,8 @@ static int push_deleteing_req(struct mcast_group *group, int slave) | |||
1202 | return 0; | 1201 | return 0; |
1203 | 1202 | ||
1204 | req = kzalloc(sizeof *req, GFP_KERNEL); | 1203 | req = kzalloc(sizeof *req, GFP_KERNEL); |
1205 | if (!req) { | 1204 | if (!req) |
1206 | mcg_warn_group(group, "failed allocation - may leave stall groups\n"); | ||
1207 | return -ENOMEM; | 1205 | return -ENOMEM; |
1208 | } | ||
1209 | 1206 | ||
1210 | if (!list_empty(&group->func[slave].pending)) { | 1207 | if (!list_empty(&group->func[slave].pending)) { |
1211 | pend_req = list_entry(group->func[slave].pending.prev, struct mcast_req, group_list); | 1208 | pend_req = list_entry(group->func[slave].pending.prev, struct mcast_req, group_list); |