diff options
| author | Ira Weiny <ira.weiny@intel.com> | 2014-08-08 19:00:53 -0400 |
|---|---|---|
| committer | Roland Dreier <roland@purestorage.com> | 2014-08-10 23:35:39 -0400 |
| commit | 7ef5d4b0463c095a994890131918d3301d8404ee (patch) | |
| tree | 7c5df5a72dfa13da96685b0c2d3a3ec3a98b31b0 | |
| parent | f426a40eb695d315466f130618db30cafb27db90 (diff) | |
IB/mad: Update module to [pr|dev]_* style print messages
Use dev_* style print when struct device is available.
Also combine previously line broken user-visible strings as per
Documentation/CodingStyle:
"However, never break user-visible strings such as printk messages,
because that breaks the ability to grep for them."
Signed-off-by: Ira Weiny <ira.weiny@intel.com>
[ Remove PFX so the patch actually builds. - Roland ]
Signed-off-by: Roland Dreier <roland@purestorage.com>
| -rw-r--r-- | drivers/infiniband/core/agent.c | 12 | ||||
| -rw-r--r-- | drivers/infiniband/core/mad.c | 147 | ||||
| -rw-r--r-- | drivers/infiniband/core/mad_priv.h | 3 |
3 files changed, 85 insertions, 77 deletions
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c index 2bc7f5af64f4..8e32c5abd09d 100644 --- a/drivers/infiniband/core/agent.c +++ b/drivers/infiniband/core/agent.c | |||
| @@ -94,14 +94,14 @@ void agent_send_response(struct ib_mad *mad, struct ib_grh *grh, | |||
| 94 | port_priv = ib_get_agent_port(device, port_num); | 94 | port_priv = ib_get_agent_port(device, port_num); |
| 95 | 95 | ||
| 96 | if (!port_priv) { | 96 | if (!port_priv) { |
| 97 | printk(KERN_ERR SPFX "Unable to find port agent\n"); | 97 | dev_err(&device->dev, "Unable to find port agent\n"); |
| 98 | return; | 98 | return; |
| 99 | } | 99 | } |
| 100 | 100 | ||
| 101 | agent = port_priv->agent[qpn]; | 101 | agent = port_priv->agent[qpn]; |
| 102 | ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num); | 102 | ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num); |
| 103 | if (IS_ERR(ah)) { | 103 | if (IS_ERR(ah)) { |
| 104 | printk(KERN_ERR SPFX "ib_create_ah_from_wc error %ld\n", | 104 | dev_err(&device->dev, "ib_create_ah_from_wc error %ld\n", |
| 105 | PTR_ERR(ah)); | 105 | PTR_ERR(ah)); |
| 106 | return; | 106 | return; |
| 107 | } | 107 | } |
| @@ -110,7 +110,7 @@ void agent_send_response(struct ib_mad *mad, struct ib_grh *grh, | |||
| 110 | IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, | 110 | IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, |
| 111 | GFP_KERNEL); | 111 | GFP_KERNEL); |
| 112 | if (IS_ERR(send_buf)) { | 112 | if (IS_ERR(send_buf)) { |
| 113 | printk(KERN_ERR SPFX "ib_create_send_mad error\n"); | 113 | dev_err(&device->dev, "ib_create_send_mad error\n"); |
| 114 | goto err1; | 114 | goto err1; |
| 115 | } | 115 | } |
| 116 | 116 | ||
| @@ -125,7 +125,7 @@ void agent_send_response(struct ib_mad *mad, struct ib_grh *grh, | |||
| 125 | } | 125 | } |
| 126 | 126 | ||
| 127 | if (ib_post_send_mad(send_buf, NULL)) { | 127 | if (ib_post_send_mad(send_buf, NULL)) { |
| 128 | printk(KERN_ERR SPFX "ib_post_send_mad error\n"); | 128 | dev_err(&device->dev, "ib_post_send_mad error\n"); |
| 129 | goto err2; | 129 | goto err2; |
| 130 | } | 130 | } |
| 131 | return; | 131 | return; |
| @@ -151,7 +151,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num) | |||
| 151 | /* Create new device info */ | 151 | /* Create new device info */ |
| 152 | port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); | 152 | port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); |
| 153 | if (!port_priv) { | 153 | if (!port_priv) { |
| 154 | printk(KERN_ERR SPFX "No memory for ib_agent_port_private\n"); | 154 | dev_err(&device->dev, "No memory for ib_agent_port_private\n"); |
| 155 | ret = -ENOMEM; | 155 | ret = -ENOMEM; |
| 156 | goto error1; | 156 | goto error1; |
| 157 | } | 157 | } |
| @@ -202,7 +202,7 @@ int ib_agent_port_close(struct ib_device *device, int port_num) | |||
| 202 | port_priv = __ib_get_agent_port(device, port_num); | 202 | port_priv = __ib_get_agent_port(device, port_num); |
| 203 | if (port_priv == NULL) { | 203 | if (port_priv == NULL) { |
| 204 | spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); | 204 | spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); |
| 205 | printk(KERN_ERR SPFX "Port %d not found\n", port_num); | 205 | dev_err(&device->dev, "Port %d not found\n", port_num); |
| 206 | return -ENODEV; | 206 | return -ENODEV; |
| 207 | } | 207 | } |
| 208 | list_del(&port_priv->port_list); | 208 | list_del(&port_priv->port_list); |
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index ab31f136d04b..08f430319fc8 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
| @@ -33,6 +33,9 @@ | |||
| 33 | * SOFTWARE. | 33 | * SOFTWARE. |
| 34 | * | 34 | * |
| 35 | */ | 35 | */ |
| 36 | |||
| 37 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 38 | |||
| 36 | #include <linux/dma-mapping.h> | 39 | #include <linux/dma-mapping.h> |
| 37 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
| 38 | #include <linux/module.h> | 41 | #include <linux/module.h> |
| @@ -706,7 +709,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, | |||
| 706 | smi_handle_dr_smp_send(smp, device->node_type, port_num) == | 709 | smi_handle_dr_smp_send(smp, device->node_type, port_num) == |
| 707 | IB_SMI_DISCARD) { | 710 | IB_SMI_DISCARD) { |
| 708 | ret = -EINVAL; | 711 | ret = -EINVAL; |
| 709 | printk(KERN_ERR PFX "Invalid directed route\n"); | 712 | dev_err(&device->dev, "Invalid directed route\n"); |
| 710 | goto out; | 713 | goto out; |
| 711 | } | 714 | } |
| 712 | 715 | ||
| @@ -718,7 +721,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, | |||
| 718 | local = kmalloc(sizeof *local, GFP_ATOMIC); | 721 | local = kmalloc(sizeof *local, GFP_ATOMIC); |
| 719 | if (!local) { | 722 | if (!local) { |
| 720 | ret = -ENOMEM; | 723 | ret = -ENOMEM; |
| 721 | printk(KERN_ERR PFX "No memory for ib_mad_local_private\n"); | 724 | dev_err(&device->dev, "No memory for ib_mad_local_private\n"); |
| 722 | goto out; | 725 | goto out; |
| 723 | } | 726 | } |
| 724 | local->mad_priv = NULL; | 727 | local->mad_priv = NULL; |
| @@ -726,7 +729,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, | |||
| 726 | mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC); | 729 | mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_ATOMIC); |
| 727 | if (!mad_priv) { | 730 | if (!mad_priv) { |
| 728 | ret = -ENOMEM; | 731 | ret = -ENOMEM; |
| 729 | printk(KERN_ERR PFX "No memory for local response MAD\n"); | 732 | dev_err(&device->dev, "No memory for local response MAD\n"); |
| 730 | kfree(local); | 733 | kfree(local); |
| 731 | goto out; | 734 | goto out; |
| 732 | } | 735 | } |
| @@ -837,9 +840,9 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, | |||
| 837 | for (left = send_buf->data_len + pad; left > 0; left -= seg_size) { | 840 | for (left = send_buf->data_len + pad; left > 0; left -= seg_size) { |
| 838 | seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask); | 841 | seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask); |
| 839 | if (!seg) { | 842 | if (!seg) { |
| 840 | printk(KERN_ERR "alloc_send_rmpp_segs: RMPP mem " | 843 | dev_err(&send_buf->mad_agent->device->dev, |
| 841 | "alloc failed for len %zd, gfp %#x\n", | 844 | "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n", |
| 842 | sizeof (*seg) + seg_size, gfp_mask); | 845 | sizeof (*seg) + seg_size, gfp_mask); |
| 843 | free_send_rmpp_list(send_wr); | 846 | free_send_rmpp_list(send_wr); |
| 844 | return -ENOMEM; | 847 | return -ENOMEM; |
| 845 | } | 848 | } |
| @@ -1199,7 +1202,8 @@ EXPORT_SYMBOL(ib_redirect_mad_qp); | |||
| 1199 | int ib_process_mad_wc(struct ib_mad_agent *mad_agent, | 1202 | int ib_process_mad_wc(struct ib_mad_agent *mad_agent, |
| 1200 | struct ib_wc *wc) | 1203 | struct ib_wc *wc) |
| 1201 | { | 1204 | { |
| 1202 | printk(KERN_ERR PFX "ib_process_mad_wc() not implemented yet\n"); | 1205 | dev_err(&mad_agent->device->dev, |
| 1206 | "ib_process_mad_wc() not implemented yet\n"); | ||
| 1203 | return 0; | 1207 | return 0; |
| 1204 | } | 1208 | } |
| 1205 | EXPORT_SYMBOL(ib_process_mad_wc); | 1209 | EXPORT_SYMBOL(ib_process_mad_wc); |
| @@ -1211,7 +1215,7 @@ static int method_in_use(struct ib_mad_mgmt_method_table **method, | |||
| 1211 | 1215 | ||
| 1212 | for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) { | 1216 | for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) { |
| 1213 | if ((*method)->agent[i]) { | 1217 | if ((*method)->agent[i]) { |
| 1214 | printk(KERN_ERR PFX "Method %d already in use\n", i); | 1218 | pr_err("Method %d already in use\n", i); |
| 1215 | return -EINVAL; | 1219 | return -EINVAL; |
| 1216 | } | 1220 | } |
| 1217 | } | 1221 | } |
| @@ -1223,8 +1227,7 @@ static int allocate_method_table(struct ib_mad_mgmt_method_table **method) | |||
| 1223 | /* Allocate management method table */ | 1227 | /* Allocate management method table */ |
| 1224 | *method = kzalloc(sizeof **method, GFP_ATOMIC); | 1228 | *method = kzalloc(sizeof **method, GFP_ATOMIC); |
| 1225 | if (!*method) { | 1229 | if (!*method) { |
| 1226 | printk(KERN_ERR PFX "No memory for " | 1230 | pr_err("No memory for ib_mad_mgmt_method_table\n"); |
| 1227 | "ib_mad_mgmt_method_table\n"); | ||
| 1228 | return -ENOMEM; | 1231 | return -ENOMEM; |
| 1229 | } | 1232 | } |
| 1230 | 1233 | ||
| @@ -1319,8 +1322,8 @@ static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, | |||
| 1319 | /* Allocate management class table for "new" class version */ | 1322 | /* Allocate management class table for "new" class version */ |
| 1320 | *class = kzalloc(sizeof **class, GFP_ATOMIC); | 1323 | *class = kzalloc(sizeof **class, GFP_ATOMIC); |
| 1321 | if (!*class) { | 1324 | if (!*class) { |
| 1322 | printk(KERN_ERR PFX "No memory for " | 1325 | dev_err(&agent_priv->agent.device->dev, |
| 1323 | "ib_mad_mgmt_class_table\n"); | 1326 | "No memory for ib_mad_mgmt_class_table\n"); |
| 1324 | ret = -ENOMEM; | 1327 | ret = -ENOMEM; |
| 1325 | goto error1; | 1328 | goto error1; |
| 1326 | } | 1329 | } |
| @@ -1386,8 +1389,8 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, | |||
| 1386 | /* Allocate mgmt vendor class table for "new" class version */ | 1389 | /* Allocate mgmt vendor class table for "new" class version */ |
| 1387 | vendor = kzalloc(sizeof *vendor, GFP_ATOMIC); | 1390 | vendor = kzalloc(sizeof *vendor, GFP_ATOMIC); |
| 1388 | if (!vendor) { | 1391 | if (!vendor) { |
| 1389 | printk(KERN_ERR PFX "No memory for " | 1392 | dev_err(&agent_priv->agent.device->dev, |
| 1390 | "ib_mad_mgmt_vendor_class_table\n"); | 1393 | "No memory for ib_mad_mgmt_vendor_class_table\n"); |
| 1391 | goto error1; | 1394 | goto error1; |
| 1392 | } | 1395 | } |
| 1393 | 1396 | ||
| @@ -1397,8 +1400,8 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, | |||
| 1397 | /* Allocate table for this management vendor class */ | 1400 | /* Allocate table for this management vendor class */ |
| 1398 | vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC); | 1401 | vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC); |
| 1399 | if (!vendor_class) { | 1402 | if (!vendor_class) { |
| 1400 | printk(KERN_ERR PFX "No memory for " | 1403 | dev_err(&agent_priv->agent.device->dev, |
| 1401 | "ib_mad_mgmt_vendor_class\n"); | 1404 | "No memory for ib_mad_mgmt_vendor_class\n"); |
| 1402 | goto error2; | 1405 | goto error2; |
| 1403 | } | 1406 | } |
| 1404 | 1407 | ||
| @@ -1429,7 +1432,7 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, | |||
| 1429 | goto check_in_use; | 1432 | goto check_in_use; |
| 1430 | } | 1433 | } |
| 1431 | } | 1434 | } |
| 1432 | printk(KERN_ERR PFX "All OUI slots in use\n"); | 1435 | dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n"); |
| 1433 | goto error3; | 1436 | goto error3; |
| 1434 | 1437 | ||
| 1435 | check_in_use: | 1438 | check_in_use: |
| @@ -1640,9 +1643,9 @@ find_mad_agent(struct ib_mad_port_private *port_priv, | |||
| 1640 | if (mad_agent->agent.recv_handler) | 1643 | if (mad_agent->agent.recv_handler) |
| 1641 | atomic_inc(&mad_agent->refcount); | 1644 | atomic_inc(&mad_agent->refcount); |
| 1642 | else { | 1645 | else { |
| 1643 | printk(KERN_NOTICE PFX "No receive handler for client " | 1646 | dev_notice(&port_priv->device->dev, |
| 1644 | "%p on port %d\n", | 1647 | "No receive handler for client %p on port %d\n", |
| 1645 | &mad_agent->agent, port_priv->port_num); | 1648 | &mad_agent->agent, port_priv->port_num); |
| 1646 | mad_agent = NULL; | 1649 | mad_agent = NULL; |
| 1647 | } | 1650 | } |
| 1648 | } | 1651 | } |
| @@ -1658,8 +1661,8 @@ static int validate_mad(struct ib_mad *mad, u32 qp_num) | |||
| 1658 | 1661 | ||
| 1659 | /* Make sure MAD base version is understood */ | 1662 | /* Make sure MAD base version is understood */ |
| 1660 | if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) { | 1663 | if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) { |
| 1661 | printk(KERN_ERR PFX "MAD received with unsupported base " | 1664 | pr_err("MAD received with unsupported base version %d\n", |
| 1662 | "version %d\n", mad->mad_hdr.base_version); | 1665 | mad->mad_hdr.base_version); |
| 1663 | goto out; | 1666 | goto out; |
| 1664 | } | 1667 | } |
| 1665 | 1668 | ||
| @@ -1911,8 +1914,8 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, | |||
| 1911 | 1914 | ||
| 1912 | response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); | 1915 | response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); |
| 1913 | if (!response) { | 1916 | if (!response) { |
| 1914 | printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory " | 1917 | dev_err(&port_priv->device->dev, |
| 1915 | "for response buffer\n"); | 1918 | "ib_mad_recv_done_handler no memory for response buffer\n"); |
| 1916 | goto out; | 1919 | goto out; |
| 1917 | } | 1920 | } |
| 1918 | 1921 | ||
| @@ -2176,7 +2179,8 @@ retry: | |||
| 2176 | ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr, | 2179 | ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr, |
| 2177 | &bad_send_wr); | 2180 | &bad_send_wr); |
| 2178 | if (ret) { | 2181 | if (ret) { |
| 2179 | printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret); | 2182 | dev_err(&port_priv->device->dev, |
| 2183 | "ib_post_send failed: %d\n", ret); | ||
| 2180 | mad_send_wr = queued_send_wr; | 2184 | mad_send_wr = queued_send_wr; |
| 2181 | wc->status = IB_WC_LOC_QP_OP_ERR; | 2185 | wc->status = IB_WC_LOC_QP_OP_ERR; |
| 2182 | goto retry; | 2186 | goto retry; |
| @@ -2248,8 +2252,9 @@ static void mad_error_handler(struct ib_mad_port_private *port_priv, | |||
| 2248 | IB_QP_STATE | IB_QP_CUR_STATE); | 2252 | IB_QP_STATE | IB_QP_CUR_STATE); |
| 2249 | kfree(attr); | 2253 | kfree(attr); |
| 2250 | if (ret) | 2254 | if (ret) |
| 2251 | printk(KERN_ERR PFX "mad_error_handler - " | 2255 | dev_err(&port_priv->device->dev, |
| 2252 | "ib_modify_qp to RTS : %d\n", ret); | 2256 | "mad_error_handler - ib_modify_qp to RTS : %d\n", |
| 2257 | ret); | ||
| 2253 | else | 2258 | else |
| 2254 | mark_sends_for_retry(qp_info); | 2259 | mark_sends_for_retry(qp_info); |
| 2255 | } | 2260 | } |
| @@ -2408,7 +2413,8 @@ static void local_completions(struct work_struct *work) | |||
| 2408 | if (local->mad_priv) { | 2413 | if (local->mad_priv) { |
| 2409 | recv_mad_agent = local->recv_mad_agent; | 2414 | recv_mad_agent = local->recv_mad_agent; |
| 2410 | if (!recv_mad_agent) { | 2415 | if (!recv_mad_agent) { |
| 2411 | printk(KERN_ERR PFX "No receive MAD agent for local completion\n"); | 2416 | dev_err(&mad_agent_priv->agent.device->dev, |
| 2417 | "No receive MAD agent for local completion\n"); | ||
| 2412 | free_mad = 1; | 2418 | free_mad = 1; |
| 2413 | goto local_send_completion; | 2419 | goto local_send_completion; |
| 2414 | } | 2420 | } |
| @@ -2589,7 +2595,8 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, | |||
| 2589 | } else { | 2595 | } else { |
| 2590 | mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); | 2596 | mad_priv = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); |
| 2591 | if (!mad_priv) { | 2597 | if (!mad_priv) { |
| 2592 | printk(KERN_ERR PFX "No memory for receive buffer\n"); | 2598 | dev_err(&qp_info->port_priv->device->dev, |
| 2599 | "No memory for receive buffer\n"); | ||
| 2593 | ret = -ENOMEM; | 2600 | ret = -ENOMEM; |
| 2594 | break; | 2601 | break; |
| 2595 | } | 2602 | } |
| @@ -2625,7 +2632,8 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, | |||
| 2625 | sizeof mad_priv->header, | 2632 | sizeof mad_priv->header, |
| 2626 | DMA_FROM_DEVICE); | 2633 | DMA_FROM_DEVICE); |
| 2627 | kmem_cache_free(ib_mad_cache, mad_priv); | 2634 | kmem_cache_free(ib_mad_cache, mad_priv); |
| 2628 | printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret); | 2635 | dev_err(&qp_info->port_priv->device->dev, |
| 2636 | "ib_post_recv failed: %d\n", ret); | ||
| 2629 | break; | 2637 | break; |
| 2630 | } | 2638 | } |
| 2631 | } while (post); | 2639 | } while (post); |
| @@ -2681,7 +2689,8 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) | |||
| 2681 | 2689 | ||
| 2682 | attr = kmalloc(sizeof *attr, GFP_KERNEL); | 2690 | attr = kmalloc(sizeof *attr, GFP_KERNEL); |
| 2683 | if (!attr) { | 2691 | if (!attr) { |
| 2684 | printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n"); | 2692 | dev_err(&port_priv->device->dev, |
| 2693 | "Couldn't kmalloc ib_qp_attr\n"); | ||
| 2685 | return -ENOMEM; | 2694 | return -ENOMEM; |
| 2686 | } | 2695 | } |
| 2687 | 2696 | ||
| @@ -2705,16 +2714,18 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) | |||
| 2705 | ret = ib_modify_qp(qp, attr, IB_QP_STATE | | 2714 | ret = ib_modify_qp(qp, attr, IB_QP_STATE | |
| 2706 | IB_QP_PKEY_INDEX | IB_QP_QKEY); | 2715 | IB_QP_PKEY_INDEX | IB_QP_QKEY); |
| 2707 | if (ret) { | 2716 | if (ret) { |
| 2708 | printk(KERN_ERR PFX "Couldn't change QP%d state to " | 2717 | dev_err(&port_priv->device->dev, |
| 2709 | "INIT: %d\n", i, ret); | 2718 | "Couldn't change QP%d state to INIT: %d\n", |
| 2719 | i, ret); | ||
| 2710 | goto out; | 2720 | goto out; |
| 2711 | } | 2721 | } |
| 2712 | 2722 | ||
| 2713 | attr->qp_state = IB_QPS_RTR; | 2723 | attr->qp_state = IB_QPS_RTR; |
| 2714 | ret = ib_modify_qp(qp, attr, IB_QP_STATE); | 2724 | ret = ib_modify_qp(qp, attr, IB_QP_STATE); |
| 2715 | if (ret) { | 2725 | if (ret) { |
| 2716 | printk(KERN_ERR PFX "Couldn't change QP%d state to " | 2726 | dev_err(&port_priv->device->dev, |
| 2717 | "RTR: %d\n", i, ret); | 2727 | "Couldn't change QP%d state to RTR: %d\n", |
| 2728 | i, ret); | ||
| 2718 | goto out; | 2729 | goto out; |
| 2719 | } | 2730 | } |
| 2720 | 2731 | ||
| @@ -2722,16 +2733,18 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) | |||
| 2722 | attr->sq_psn = IB_MAD_SEND_Q_PSN; | 2733 | attr->sq_psn = IB_MAD_SEND_Q_PSN; |
| 2723 | ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN); | 2734 | ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN); |
| 2724 | if (ret) { | 2735 | if (ret) { |
| 2725 | printk(KERN_ERR PFX "Couldn't change QP%d state to " | 2736 | dev_err(&port_priv->device->dev, |
| 2726 | "RTS: %d\n", i, ret); | 2737 | "Couldn't change QP%d state to RTS: %d\n", |
| 2738 | i, ret); | ||
| 2727 | goto out; | 2739 | goto out; |
| 2728 | } | 2740 | } |
| 2729 | } | 2741 | } |
| 2730 | 2742 | ||
| 2731 | ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); | 2743 | ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); |
| 2732 | if (ret) { | 2744 | if (ret) { |
| 2733 | printk(KERN_ERR PFX "Failed to request completion " | 2745 | dev_err(&port_priv->device->dev, |
| 2734 | "notification: %d\n", ret); | 2746 | "Failed to request completion notification: %d\n", |
| 2747 | ret); | ||
| 2735 | goto out; | 2748 | goto out; |
| 2736 | } | 2749 | } |
| 2737 | 2750 | ||
| @@ -2741,7 +2754,8 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) | |||
| 2741 | 2754 | ||
| 2742 | ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); | 2755 | ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); |
| 2743 | if (ret) { | 2756 | if (ret) { |
| 2744 | printk(KERN_ERR PFX "Couldn't post receive WRs\n"); | 2757 | dev_err(&port_priv->device->dev, |
| 2758 | "Couldn't post receive WRs\n"); | ||
| 2745 | goto out; | 2759 | goto out; |
| 2746 | } | 2760 | } |
| 2747 | } | 2761 | } |
| @@ -2755,7 +2769,8 @@ static void qp_event_handler(struct ib_event *event, void *qp_context) | |||
| 2755 | struct ib_mad_qp_info *qp_info = qp_context; | 2769 | struct ib_mad_qp_info *qp_info = qp_context; |
| 2756 | 2770 | ||
| 2757 | /* It's worse than that! He's dead, Jim! */ | 2771 | /* It's worse than that! He's dead, Jim! */ |
| 2758 | printk(KERN_ERR PFX "Fatal error (%d) on MAD QP (%d)\n", | 2772 | dev_err(&qp_info->port_priv->device->dev, |
| 2773 | "Fatal error (%d) on MAD QP (%d)\n", | ||
| 2759 | event->event, qp_info->qp->qp_num); | 2774 | event->event, qp_info->qp->qp_num); |
| 2760 | } | 2775 | } |
| 2761 | 2776 | ||
| @@ -2801,8 +2816,9 @@ static int create_mad_qp(struct ib_mad_qp_info *qp_info, | |||
| 2801 | qp_init_attr.event_handler = qp_event_handler; | 2816 | qp_init_attr.event_handler = qp_event_handler; |
| 2802 | qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); | 2817 | qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); |
| 2803 | if (IS_ERR(qp_info->qp)) { | 2818 | if (IS_ERR(qp_info->qp)) { |
| 2804 | printk(KERN_ERR PFX "Couldn't create ib_mad QP%d\n", | 2819 | dev_err(&qp_info->port_priv->device->dev, |
| 2805 | get_spl_qp_index(qp_type)); | 2820 | "Couldn't create ib_mad QP%d\n", |
| 2821 | get_spl_qp_index(qp_type)); | ||
| 2806 | ret = PTR_ERR(qp_info->qp); | 2822 | ret = PTR_ERR(qp_info->qp); |
| 2807 | goto error; | 2823 | goto error; |
| 2808 | } | 2824 | } |
| @@ -2840,7 +2856,7 @@ static int ib_mad_port_open(struct ib_device *device, | |||
| 2840 | /* Create new device info */ | 2856 | /* Create new device info */ |
| 2841 | port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); | 2857 | port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); |
| 2842 | if (!port_priv) { | 2858 | if (!port_priv) { |
| 2843 | printk(KERN_ERR PFX "No memory for ib_mad_port_private\n"); | 2859 | dev_err(&device->dev, "No memory for ib_mad_port_private\n"); |
| 2844 | return -ENOMEM; | 2860 | return -ENOMEM; |
| 2845 | } | 2861 | } |
| 2846 | 2862 | ||
| @@ -2860,21 +2876,21 @@ static int ib_mad_port_open(struct ib_device *device, | |||
| 2860 | ib_mad_thread_completion_handler, | 2876 | ib_mad_thread_completion_handler, |
| 2861 | NULL, port_priv, cq_size, 0); | 2877 | NULL, port_priv, cq_size, 0); |
| 2862 | if (IS_ERR(port_priv->cq)) { | 2878 | if (IS_ERR(port_priv->cq)) { |
| 2863 | printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n"); | 2879 | dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); |
| 2864 | ret = PTR_ERR(port_priv->cq); | 2880 | ret = PTR_ERR(port_priv->cq); |
| 2865 | goto error3; | 2881 | goto error3; |
| 2866 | } | 2882 | } |
| 2867 | 2883 | ||
| 2868 | port_priv->pd = ib_alloc_pd(device); | 2884 | port_priv->pd = ib_alloc_pd(device); |
| 2869 | if (IS_ERR(port_priv->pd)) { | 2885 | if (IS_ERR(port_priv->pd)) { |
| 2870 | printk(KERN_ERR PFX "Couldn't create ib_mad PD\n"); | 2886 | dev_err(&device->dev, "Couldn't create ib_mad PD\n"); |
| 2871 | ret = PTR_ERR(port_priv->pd); | 2887 | ret = PTR_ERR(port_priv->pd); |
| 2872 | goto error4; | 2888 | goto error4; |
| 2873 | } | 2889 | } |
| 2874 | 2890 | ||
| 2875 | port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE); | 2891 | port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE); |
| 2876 | if (IS_ERR(port_priv->mr)) { | 2892 | if (IS_ERR(port_priv->mr)) { |
| 2877 | printk(KERN_ERR PFX "Couldn't get ib_mad DMA MR\n"); | 2893 | dev_err(&device->dev, "Couldn't get ib_mad DMA MR\n"); |
| 2878 | ret = PTR_ERR(port_priv->mr); | 2894 | ret = PTR_ERR(port_priv->mr); |
| 2879 | goto error5; | 2895 | goto error5; |
| 2880 | } | 2896 | } |
| @@ -2902,7 +2918,7 @@ static int ib_mad_port_open(struct ib_device *device, | |||
| 2902 | 2918 | ||
| 2903 | ret = ib_mad_port_start(port_priv); | 2919 | ret = ib_mad_port_start(port_priv); |
| 2904 | if (ret) { | 2920 | if (ret) { |
| 2905 | printk(KERN_ERR PFX "Couldn't start port\n"); | 2921 | dev_err(&device->dev, "Couldn't start port\n"); |
| 2906 | goto error9; | 2922 | goto error9; |
| 2907 | } | 2923 | } |
| 2908 | 2924 | ||
| @@ -2946,7 +2962,7 @@ static int ib_mad_port_close(struct ib_device *device, int port_num) | |||
| 2946 | port_priv = __ib_get_mad_port(device, port_num); | 2962 | port_priv = __ib_get_mad_port(device, port_num); |
| 2947 | if (port_priv == NULL) { | 2963 | if (port_priv == NULL) { |
| 2948 | spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); | 2964 | spin_unlock_irqrestore(&ib_mad_port_list_lock, flags); |
| 2949 | printk(KERN_ERR PFX "Port %d not found\n", port_num); | 2965 | dev_err(&device->dev, "Port %d not found\n", port_num); |
| 2950 | return -ENODEV; | 2966 | return -ENODEV; |
| 2951 | } | 2967 | } |
| 2952 | list_del_init(&port_priv->port_list); | 2968 | list_del_init(&port_priv->port_list); |
| @@ -2984,14 +3000,12 @@ static void ib_mad_init_device(struct ib_device *device) | |||
| 2984 | 3000 | ||
| 2985 | for (i = start; i <= end; i++) { | 3001 | for (i = start; i <= end; i++) { |
| 2986 | if (ib_mad_port_open(device, i)) { | 3002 | if (ib_mad_port_open(device, i)) { |
| 2987 | printk(KERN_ERR PFX "Couldn't open %s port %d\n", | 3003 | dev_err(&device->dev, "Couldn't open port %d\n", i); |
| 2988 | device->name, i); | ||
| 2989 | goto error; | 3004 | goto error; |
| 2990 | } | 3005 | } |
| 2991 | if (ib_agent_port_open(device, i)) { | 3006 | if (ib_agent_port_open(device, i)) { |
| 2992 | printk(KERN_ERR PFX "Couldn't open %s port %d " | 3007 | dev_err(&device->dev, |
| 2993 | "for agents\n", | 3008 | "Couldn't open port %d for agents\n", i); |
| 2994 | device->name, i); | ||
| 2995 | goto error_agent; | 3009 | goto error_agent; |
| 2996 | } | 3010 | } |
| 2997 | } | 3011 | } |
| @@ -2999,20 +3013,17 @@ static void ib_mad_init_device(struct ib_device *device) | |||
| 2999 | 3013 | ||
| 3000 | error_agent: | 3014 | error_agent: |
| 3001 | if (ib_mad_port_close(device, i)) | 3015 | if (ib_mad_port_close(device, i)) |
| 3002 | printk(KERN_ERR PFX "Couldn't close %s port %d\n", | 3016 | dev_err(&device->dev, "Couldn't close port %d\n", i); |
| 3003 | device->name, i); | ||
| 3004 | 3017 | ||
| 3005 | error: | 3018 | error: |
| 3006 | i--; | 3019 | i--; |
| 3007 | 3020 | ||
| 3008 | while (i >= start) { | 3021 | while (i >= start) { |
| 3009 | if (ib_agent_port_close(device, i)) | 3022 | if (ib_agent_port_close(device, i)) |
| 3010 | printk(KERN_ERR PFX "Couldn't close %s port %d " | 3023 | dev_err(&device->dev, |
| 3011 | "for agents\n", | 3024 | "Couldn't close port %d for agents\n", i); |
| 3012 | device->name, i); | ||
| 3013 | if (ib_mad_port_close(device, i)) | 3025 | if (ib_mad_port_close(device, i)) |
| 3014 | printk(KERN_ERR PFX "Couldn't close %s port %d\n", | 3026 | dev_err(&device->dev, "Couldn't close port %d\n", i); |
| 3015 | device->name, i); | ||
| 3016 | i--; | 3027 | i--; |
| 3017 | } | 3028 | } |
| 3018 | } | 3029 | } |
| @@ -3033,12 +3044,12 @@ static void ib_mad_remove_device(struct ib_device *device) | |||
| 3033 | } | 3044 | } |
| 3034 | for (i = 0; i < num_ports; i++, cur_port++) { | 3045 | for (i = 0; i < num_ports; i++, cur_port++) { |
| 3035 | if (ib_agent_port_close(device, cur_port)) | 3046 | if (ib_agent_port_close(device, cur_port)) |
| 3036 | printk(KERN_ERR PFX "Couldn't close %s port %d " | 3047 | dev_err(&device->dev, |
| 3037 | "for agents\n", | 3048 | "Couldn't close port %d for agents\n", |
| 3038 | device->name, cur_port); | 3049 | cur_port); |
| 3039 | if (ib_mad_port_close(device, cur_port)) | 3050 | if (ib_mad_port_close(device, cur_port)) |
| 3040 | printk(KERN_ERR PFX "Couldn't close %s port %d\n", | 3051 | dev_err(&device->dev, "Couldn't close port %d\n", |
| 3041 | device->name, cur_port); | 3052 | cur_port); |
| 3042 | } | 3053 | } |
| 3043 | } | 3054 | } |
| 3044 | 3055 | ||
| @@ -3064,7 +3075,7 @@ static int __init ib_mad_init_module(void) | |||
| 3064 | SLAB_HWCACHE_ALIGN, | 3075 | SLAB_HWCACHE_ALIGN, |
| 3065 | NULL); | 3076 | NULL); |
| 3066 | if (!ib_mad_cache) { | 3077 | if (!ib_mad_cache) { |
| 3067 | printk(KERN_ERR PFX "Couldn't create ib_mad cache\n"); | 3078 | pr_err("Couldn't create ib_mad cache\n"); |
| 3068 | ret = -ENOMEM; | 3079 | ret = -ENOMEM; |
| 3069 | goto error1; | 3080 | goto error1; |
| 3070 | } | 3081 | } |
| @@ -3072,7 +3083,7 @@ static int __init ib_mad_init_module(void) | |||
| 3072 | INIT_LIST_HEAD(&ib_mad_port_list); | 3083 | INIT_LIST_HEAD(&ib_mad_port_list); |
| 3073 | 3084 | ||
| 3074 | if (ib_register_client(&mad_client)) { | 3085 | if (ib_register_client(&mad_client)) { |
| 3075 | printk(KERN_ERR PFX "Couldn't register ib_mad client\n"); | 3086 | pr_err("Couldn't register ib_mad client\n"); |
| 3076 | ret = -EINVAL; | 3087 | ret = -EINVAL; |
| 3077 | goto error2; | 3088 | goto error2; |
| 3078 | } | 3089 | } |
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h index 9430ab4969c5..d1a0b0ee9444 100644 --- a/drivers/infiniband/core/mad_priv.h +++ b/drivers/infiniband/core/mad_priv.h | |||
| @@ -42,9 +42,6 @@ | |||
| 42 | #include <rdma/ib_mad.h> | 42 | #include <rdma/ib_mad.h> |
| 43 | #include <rdma/ib_smi.h> | 43 | #include <rdma/ib_smi.h> |
| 44 | 44 | ||
| 45 | |||
| 46 | #define PFX "ib_mad: " | ||
| 47 | |||
| 48 | #define IB_MAD_QPS_CORE 2 /* Always QP0 and QP1 as a minimum */ | 45 | #define IB_MAD_QPS_CORE 2 /* Always QP0 and QP1 as a minimum */ |
| 49 | 46 | ||
| 50 | /* QP and CQ parameters */ | 47 | /* QP and CQ parameters */ |
