aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core/cm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/core/cm.c')
-rw-r--r--drivers/infiniband/core/cm.c306
1 files changed, 288 insertions, 18 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 2e39236d189f..c0150147d347 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2004-2006 Intel Corporation. All rights reserved. 2 * Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. 4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
@@ -37,12 +37,14 @@
37 37
38#include <linux/completion.h> 38#include <linux/completion.h>
39#include <linux/dma-mapping.h> 39#include <linux/dma-mapping.h>
40#include <linux/device.h>
40#include <linux/err.h> 41#include <linux/err.h>
41#include <linux/idr.h> 42#include <linux/idr.h>
42#include <linux/interrupt.h> 43#include <linux/interrupt.h>
43#include <linux/random.h> 44#include <linux/random.h>
44#include <linux/rbtree.h> 45#include <linux/rbtree.h>
45#include <linux/spinlock.h> 46#include <linux/spinlock.h>
47#include <linux/sysfs.h>
46#include <linux/workqueue.h> 48#include <linux/workqueue.h>
47 49
48#include <rdma/ib_cache.h> 50#include <rdma/ib_cache.h>
@@ -78,17 +80,94 @@ static struct ib_cm {
78 struct workqueue_struct *wq; 80 struct workqueue_struct *wq;
79} cm; 81} cm;
80 82
83/* Counter indexes ordered by attribute ID */
84enum {
85 CM_REQ_COUNTER,
86 CM_MRA_COUNTER,
87 CM_REJ_COUNTER,
88 CM_REP_COUNTER,
89 CM_RTU_COUNTER,
90 CM_DREQ_COUNTER,
91 CM_DREP_COUNTER,
92 CM_SIDR_REQ_COUNTER,
93 CM_SIDR_REP_COUNTER,
94 CM_LAP_COUNTER,
95 CM_APR_COUNTER,
96 CM_ATTR_COUNT,
97 CM_ATTR_ID_OFFSET = 0x0010,
98};
99
100enum {
101 CM_XMIT,
102 CM_XMIT_RETRIES,
103 CM_RECV,
104 CM_RECV_DUPLICATES,
105 CM_COUNTER_GROUPS
106};
107
108static char const counter_group_names[CM_COUNTER_GROUPS]
109 [sizeof("cm_rx_duplicates")] = {
110 "cm_tx_msgs", "cm_tx_retries",
111 "cm_rx_msgs", "cm_rx_duplicates"
112};
113
114struct cm_counter_group {
115 struct kobject obj;
116 atomic_long_t counter[CM_ATTR_COUNT];
117};
118
119struct cm_counter_attribute {
120 struct attribute attr;
121 int index;
122};
123
124#define CM_COUNTER_ATTR(_name, _index) \
125struct cm_counter_attribute cm_##_name##_counter_attr = { \
126 .attr = { .name = __stringify(_name), .mode = 0444, .owner = THIS_MODULE }, \
127 .index = _index \
128}
129
130static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
131static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
132static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
133static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
134static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
135static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
136static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
137static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
138static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
139static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
140static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
141
142static struct attribute *cm_counter_default_attrs[] = {
143 &cm_req_counter_attr.attr,
144 &cm_mra_counter_attr.attr,
145 &cm_rej_counter_attr.attr,
146 &cm_rep_counter_attr.attr,
147 &cm_rtu_counter_attr.attr,
148 &cm_dreq_counter_attr.attr,
149 &cm_drep_counter_attr.attr,
150 &cm_sidr_req_counter_attr.attr,
151 &cm_sidr_rep_counter_attr.attr,
152 &cm_lap_counter_attr.attr,
153 &cm_apr_counter_attr.attr,
154 NULL
155};
156
81struct cm_port { 157struct cm_port {
82 struct cm_device *cm_dev; 158 struct cm_device *cm_dev;
83 struct ib_mad_agent *mad_agent; 159 struct ib_mad_agent *mad_agent;
160 struct kobject port_obj;
84 u8 port_num; 161 u8 port_num;
162 struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
85}; 163};
86 164
87struct cm_device { 165struct cm_device {
88 struct list_head list; 166 struct list_head list;
89 struct ib_device *device; 167 struct ib_device *device;
168 struct kobject dev_obj;
90 u8 ack_delay; 169 u8 ack_delay;
91 struct cm_port port[0]; 170 struct cm_port *port[0];
92}; 171};
93 172
94struct cm_av { 173struct cm_av {
@@ -278,7 +357,7 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
278 list_for_each_entry(cm_dev, &cm.device_list, list) { 357 list_for_each_entry(cm_dev, &cm.device_list, list) {
279 if (!ib_find_cached_gid(cm_dev->device, &path->sgid, 358 if (!ib_find_cached_gid(cm_dev->device, &path->sgid,
280 &p, NULL)) { 359 &p, NULL)) {
281 port = &cm_dev->port[p-1]; 360 port = cm_dev->port[p-1];
282 break; 361 break;
283 } 362 }
284 } 363 }
@@ -1270,6 +1349,9 @@ static void cm_dup_req_handler(struct cm_work *work,
1270 struct ib_mad_send_buf *msg = NULL; 1349 struct ib_mad_send_buf *msg = NULL;
1271 int ret; 1350 int ret;
1272 1351
1352 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1353 counter[CM_REQ_COUNTER]);
1354
1273 /* Quick state check to discard duplicate REQs. */ 1355 /* Quick state check to discard duplicate REQs. */
1274 if (cm_id_priv->id.state == IB_CM_REQ_RCVD) 1356 if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1275 return; 1357 return;
@@ -1616,6 +1698,8 @@ static void cm_dup_rep_handler(struct cm_work *work)
1616 if (!cm_id_priv) 1698 if (!cm_id_priv)
1617 return; 1699 return;
1618 1700
1701 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1702 counter[CM_REP_COUNTER]);
1619 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); 1703 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1620 if (ret) 1704 if (ret)
1621 goto deref; 1705 goto deref;
@@ -1781,6 +1865,8 @@ static int cm_rtu_handler(struct cm_work *work)
1781 if (cm_id_priv->id.state != IB_CM_REP_SENT && 1865 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
1782 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) { 1866 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
1783 spin_unlock_irq(&cm_id_priv->lock); 1867 spin_unlock_irq(&cm_id_priv->lock);
1868 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1869 counter[CM_RTU_COUNTER]);
1784 goto out; 1870 goto out;
1785 } 1871 }
1786 cm_id_priv->id.state = IB_CM_ESTABLISHED; 1872 cm_id_priv->id.state = IB_CM_ESTABLISHED;
@@ -1958,6 +2044,8 @@ static int cm_dreq_handler(struct cm_work *work)
1958 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id, 2044 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
1959 dreq_msg->local_comm_id); 2045 dreq_msg->local_comm_id);
1960 if (!cm_id_priv) { 2046 if (!cm_id_priv) {
2047 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2048 counter[CM_DREQ_COUNTER]);
1961 cm_issue_drep(work->port, work->mad_recv_wc); 2049 cm_issue_drep(work->port, work->mad_recv_wc);
1962 return -EINVAL; 2050 return -EINVAL;
1963 } 2051 }
@@ -1977,6 +2065,8 @@ static int cm_dreq_handler(struct cm_work *work)
1977 case IB_CM_MRA_REP_RCVD: 2065 case IB_CM_MRA_REP_RCVD:
1978 break; 2066 break;
1979 case IB_CM_TIMEWAIT: 2067 case IB_CM_TIMEWAIT:
2068 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2069 counter[CM_DREQ_COUNTER]);
1980 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) 2070 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
1981 goto unlock; 2071 goto unlock;
1982 2072
@@ -1988,6 +2078,10 @@ static int cm_dreq_handler(struct cm_work *work)
1988 if (ib_post_send_mad(msg, NULL)) 2078 if (ib_post_send_mad(msg, NULL))
1989 cm_free_msg(msg); 2079 cm_free_msg(msg);
1990 goto deref; 2080 goto deref;
2081 case IB_CM_DREQ_RCVD:
2082 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2083 counter[CM_DREQ_COUNTER]);
2084 goto unlock;
1991 default: 2085 default:
1992 goto unlock; 2086 goto unlock;
1993 } 2087 }
@@ -2339,10 +2433,20 @@ static int cm_mra_handler(struct cm_work *work)
2339 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER || 2433 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
2340 cm_id_priv->id.lap_state != IB_CM_LAP_SENT || 2434 cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
2341 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2435 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2342 cm_id_priv->msg, timeout)) 2436 cm_id_priv->msg, timeout)) {
2437 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2438 atomic_long_inc(&work->port->
2439 counter_group[CM_RECV_DUPLICATES].
2440 counter[CM_MRA_COUNTER]);
2343 goto out; 2441 goto out;
2442 }
2344 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD; 2443 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
2345 break; 2444 break;
2445 case IB_CM_MRA_REQ_RCVD:
2446 case IB_CM_MRA_REP_RCVD:
2447 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2448 counter[CM_MRA_COUNTER]);
2449 /* fall through */
2346 default: 2450 default:
2347 goto out; 2451 goto out;
2348 } 2452 }
@@ -2502,6 +2606,8 @@ static int cm_lap_handler(struct cm_work *work)
2502 case IB_CM_LAP_IDLE: 2606 case IB_CM_LAP_IDLE:
2503 break; 2607 break;
2504 case IB_CM_MRA_LAP_SENT: 2608 case IB_CM_MRA_LAP_SENT:
2609 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2610 counter[CM_LAP_COUNTER]);
2505 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) 2611 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2506 goto unlock; 2612 goto unlock;
2507 2613
@@ -2515,6 +2621,10 @@ static int cm_lap_handler(struct cm_work *work)
2515 if (ib_post_send_mad(msg, NULL)) 2621 if (ib_post_send_mad(msg, NULL))
2516 cm_free_msg(msg); 2622 cm_free_msg(msg);
2517 goto deref; 2623 goto deref;
2624 case IB_CM_LAP_RCVD:
2625 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2626 counter[CM_LAP_COUNTER]);
2627 goto unlock;
2518 default: 2628 default:
2519 goto unlock; 2629 goto unlock;
2520 } 2630 }
@@ -2796,6 +2906,8 @@ static int cm_sidr_req_handler(struct cm_work *work)
2796 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); 2906 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
2797 if (cur_cm_id_priv) { 2907 if (cur_cm_id_priv) {
2798 spin_unlock_irq(&cm.lock); 2908 spin_unlock_irq(&cm.lock);
2909 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2910 counter[CM_SIDR_REQ_COUNTER]);
2799 goto out; /* Duplicate message. */ 2911 goto out; /* Duplicate message. */
2800 } 2912 }
2801 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD; 2913 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
@@ -2990,6 +3102,27 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
2990 struct ib_mad_send_wc *mad_send_wc) 3102 struct ib_mad_send_wc *mad_send_wc)
2991{ 3103{
2992 struct ib_mad_send_buf *msg = mad_send_wc->send_buf; 3104 struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
3105 struct cm_port *port;
3106 u16 attr_index;
3107
3108 port = mad_agent->context;
3109 attr_index = be16_to_cpu(((struct ib_mad_hdr *)
3110 msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
3111
3112 /*
3113 * If the send was in response to a received message (context[0] is not
3114 * set to a cm_id), and is not a REJ, then it is a send that was
3115 * manually retried.
3116 */
3117 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
3118 msg->retries = 1;
3119
3120 atomic_long_add(1 + msg->retries,
3121 &port->counter_group[CM_XMIT].counter[attr_index]);
3122 if (msg->retries)
3123 atomic_long_add(msg->retries,
3124 &port->counter_group[CM_XMIT_RETRIES].
3125 counter[attr_index]);
2993 3126
2994 switch (mad_send_wc->status) { 3127 switch (mad_send_wc->status) {
2995 case IB_WC_SUCCESS: 3128 case IB_WC_SUCCESS:
@@ -3148,8 +3281,10 @@ EXPORT_SYMBOL(ib_cm_notify);
3148static void cm_recv_handler(struct ib_mad_agent *mad_agent, 3281static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3149 struct ib_mad_recv_wc *mad_recv_wc) 3282 struct ib_mad_recv_wc *mad_recv_wc)
3150{ 3283{
3284 struct cm_port *port = mad_agent->context;
3151 struct cm_work *work; 3285 struct cm_work *work;
3152 enum ib_cm_event_type event; 3286 enum ib_cm_event_type event;
3287 u16 attr_id;
3153 int paths = 0; 3288 int paths = 0;
3154 3289
3155 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { 3290 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
@@ -3194,6 +3329,10 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3194 return; 3329 return;
3195 } 3330 }
3196 3331
3332 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
3333 atomic_long_inc(&port->counter_group[CM_RECV].
3334 counter[attr_id - CM_ATTR_ID_OFFSET]);
3335
3197 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths, 3336 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
3198 GFP_KERNEL); 3337 GFP_KERNEL);
3199 if (!work) { 3338 if (!work) {
@@ -3204,7 +3343,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3204 INIT_DELAYED_WORK(&work->work, cm_work_handler); 3343 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3205 work->cm_event.event = event; 3344 work->cm_event.event = event;
3206 work->mad_recv_wc = mad_recv_wc; 3345 work->mad_recv_wc = mad_recv_wc;
3207 work->port = (struct cm_port *)mad_agent->context; 3346 work->port = port;
3208 queue_delayed_work(cm.wq, &work->work, 0); 3347 queue_delayed_work(cm.wq, &work->work, 0);
3209} 3348}
3210 3349
@@ -3379,6 +3518,108 @@ static void cm_get_ack_delay(struct cm_device *cm_dev)
3379 cm_dev->ack_delay = attr.local_ca_ack_delay; 3518 cm_dev->ack_delay = attr.local_ca_ack_delay;
3380} 3519}
3381 3520
3521static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
3522 char *buf)
3523{
3524 struct cm_counter_group *group;
3525 struct cm_counter_attribute *cm_attr;
3526
3527 group = container_of(obj, struct cm_counter_group, obj);
3528 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
3529
3530 return sprintf(buf, "%ld\n",
3531 atomic_long_read(&group->counter[cm_attr->index]));
3532}
3533
3534static struct sysfs_ops cm_counter_ops = {
3535 .show = cm_show_counter
3536};
3537
3538static struct kobj_type cm_counter_obj_type = {
3539 .sysfs_ops = &cm_counter_ops,
3540 .default_attrs = cm_counter_default_attrs
3541};
3542
3543static void cm_release_port_obj(struct kobject *obj)
3544{
3545 struct cm_port *cm_port;
3546
3547 printk(KERN_ERR "free cm port\n");
3548
3549 cm_port = container_of(obj, struct cm_port, port_obj);
3550 kfree(cm_port);
3551}
3552
3553static struct kobj_type cm_port_obj_type = {
3554 .release = cm_release_port_obj
3555};
3556
3557static void cm_release_dev_obj(struct kobject *obj)
3558{
3559 struct cm_device *cm_dev;
3560
3561 printk(KERN_ERR "free cm dev\n");
3562
3563 cm_dev = container_of(obj, struct cm_device, dev_obj);
3564 kfree(cm_dev);
3565}
3566
3567static struct kobj_type cm_dev_obj_type = {
3568 .release = cm_release_dev_obj
3569};
3570
3571struct class cm_class = {
3572 .name = "infiniband_cm",
3573};
3574EXPORT_SYMBOL(cm_class);
3575
3576static void cm_remove_fs_obj(struct kobject *obj)
3577{
3578 kobject_put(obj->parent);
3579 kobject_put(obj);
3580}
3581
3582static int cm_create_port_fs(struct cm_port *port)
3583{
3584 int i, ret;
3585
3586 ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type,
3587 kobject_get(&port->cm_dev->dev_obj),
3588 "%d", port->port_num);
3589 if (ret) {
3590 kfree(port);
3591 return ret;
3592 }
3593
3594 for (i = 0; i < CM_COUNTER_GROUPS; i++) {
3595 ret = kobject_init_and_add(&port->counter_group[i].obj,
3596 &cm_counter_obj_type,
3597 kobject_get(&port->port_obj),
3598 "%s", counter_group_names[i]);
3599 if (ret)
3600 goto error;
3601 }
3602
3603 return 0;
3604
3605error:
3606 while (i--)
3607 cm_remove_fs_obj(&port->counter_group[i].obj);
3608 cm_remove_fs_obj(&port->port_obj);
3609 return ret;
3610
3611}
3612
3613static void cm_remove_port_fs(struct cm_port *port)
3614{
3615 int i;
3616
3617 for (i = 0; i < CM_COUNTER_GROUPS; i++)
3618 cm_remove_fs_obj(&port->counter_group[i].obj);
3619
3620 cm_remove_fs_obj(&port->port_obj);
3621}
3622
3382static void cm_add_one(struct ib_device *device) 3623static void cm_add_one(struct ib_device *device)
3383{ 3624{
3384 struct cm_device *cm_dev; 3625 struct cm_device *cm_dev;
@@ -3397,7 +3638,7 @@ static void cm_add_one(struct ib_device *device)
3397 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) 3638 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
3398 return; 3639 return;
3399 3640
3400 cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) * 3641 cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
3401 device->phys_port_cnt, GFP_KERNEL); 3642 device->phys_port_cnt, GFP_KERNEL);
3402 if (!cm_dev) 3643 if (!cm_dev)
3403 return; 3644 return;
@@ -3405,11 +3646,27 @@ static void cm_add_one(struct ib_device *device)
3405 cm_dev->device = device; 3646 cm_dev->device = device;
3406 cm_get_ack_delay(cm_dev); 3647 cm_get_ack_delay(cm_dev);
3407 3648
3649 ret = kobject_init_and_add(&cm_dev->dev_obj, &cm_dev_obj_type,
3650 &cm_class.subsys.kobj, "%s", device->name);
3651 if (ret) {
3652 kfree(cm_dev);
3653 return;
3654 }
3655
3408 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); 3656 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
3409 for (i = 1; i <= device->phys_port_cnt; i++) { 3657 for (i = 1; i <= device->phys_port_cnt; i++) {
3410 port = &cm_dev->port[i-1]; 3658 port = kzalloc(sizeof *port, GFP_KERNEL);
3659 if (!port)
3660 goto error1;
3661
3662 cm_dev->port[i-1] = port;
3411 port->cm_dev = cm_dev; 3663 port->cm_dev = cm_dev;
3412 port->port_num = i; 3664 port->port_num = i;
3665
3666 ret = cm_create_port_fs(port);
3667 if (ret)
3668 goto error1;
3669
3413 port->mad_agent = ib_register_mad_agent(device, i, 3670 port->mad_agent = ib_register_mad_agent(device, i,
3414 IB_QPT_GSI, 3671 IB_QPT_GSI,
3415 &reg_req, 3672 &reg_req,
@@ -3418,11 +3675,11 @@ static void cm_add_one(struct ib_device *device)
3418 cm_recv_handler, 3675 cm_recv_handler,
3419 port); 3676 port);
3420 if (IS_ERR(port->mad_agent)) 3677 if (IS_ERR(port->mad_agent))
3421 goto error1; 3678 goto error2;
3422 3679
3423 ret = ib_modify_port(device, i, 0, &port_modify); 3680 ret = ib_modify_port(device, i, 0, &port_modify);
3424 if (ret) 3681 if (ret)
3425 goto error2; 3682 goto error3;
3426 } 3683 }
3427 ib_set_client_data(device, &cm_client, cm_dev); 3684 ib_set_client_data(device, &cm_client, cm_dev);
3428 3685
@@ -3431,17 +3688,20 @@ static void cm_add_one(struct ib_device *device)
3431 write_unlock_irqrestore(&cm.device_lock, flags); 3688 write_unlock_irqrestore(&cm.device_lock, flags);
3432 return; 3689 return;
3433 3690
3434error2: 3691error3:
3435 ib_unregister_mad_agent(port->mad_agent); 3692 ib_unregister_mad_agent(port->mad_agent);
3693error2:
3694 cm_remove_port_fs(port);
3436error1: 3695error1:
3437 port_modify.set_port_cap_mask = 0; 3696 port_modify.set_port_cap_mask = 0;
3438 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP; 3697 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
3439 while (--i) { 3698 while (--i) {
3440 port = &cm_dev->port[i-1]; 3699 port = cm_dev->port[i-1];
3441 ib_modify_port(device, port->port_num, 0, &port_modify); 3700 ib_modify_port(device, port->port_num, 0, &port_modify);
3442 ib_unregister_mad_agent(port->mad_agent); 3701 ib_unregister_mad_agent(port->mad_agent);
3702 cm_remove_port_fs(port);
3443 } 3703 }
3444 kfree(cm_dev); 3704 cm_remove_fs_obj(&cm_dev->dev_obj);
3445} 3705}
3446 3706
3447static void cm_remove_one(struct ib_device *device) 3707static void cm_remove_one(struct ib_device *device)
@@ -3463,11 +3723,12 @@ static void cm_remove_one(struct ib_device *device)
3463 write_unlock_irqrestore(&cm.device_lock, flags); 3723 write_unlock_irqrestore(&cm.device_lock, flags);
3464 3724
3465 for (i = 1; i <= device->phys_port_cnt; i++) { 3725 for (i = 1; i <= device->phys_port_cnt; i++) {
3466 port = &cm_dev->port[i-1]; 3726 port = cm_dev->port[i-1];
3467 ib_modify_port(device, port->port_num, 0, &port_modify); 3727 ib_modify_port(device, port->port_num, 0, &port_modify);
3468 ib_unregister_mad_agent(port->mad_agent); 3728 ib_unregister_mad_agent(port->mad_agent);
3729 cm_remove_port_fs(port);
3469 } 3730 }
3470 kfree(cm_dev); 3731 cm_remove_fs_obj(&cm_dev->dev_obj);
3471} 3732}
3472 3733
3473static int __init ib_cm_init(void) 3734static int __init ib_cm_init(void)
@@ -3488,17 +3749,25 @@ static int __init ib_cm_init(void)
3488 idr_pre_get(&cm.local_id_table, GFP_KERNEL); 3749 idr_pre_get(&cm.local_id_table, GFP_KERNEL);
3489 INIT_LIST_HEAD(&cm.timewait_list); 3750 INIT_LIST_HEAD(&cm.timewait_list);
3490 3751
3491 cm.wq = create_workqueue("ib_cm"); 3752 ret = class_register(&cm_class);
3492 if (!cm.wq) 3753 if (ret)
3493 return -ENOMEM; 3754 return -ENOMEM;
3494 3755
3756 cm.wq = create_workqueue("ib_cm");
3757 if (!cm.wq) {
3758 ret = -ENOMEM;
3759 goto error1;
3760 }
3761
3495 ret = ib_register_client(&cm_client); 3762 ret = ib_register_client(&cm_client);
3496 if (ret) 3763 if (ret)
3497 goto error; 3764 goto error2;
3498 3765
3499 return 0; 3766 return 0;
3500error: 3767error2:
3501 destroy_workqueue(cm.wq); 3768 destroy_workqueue(cm.wq);
3769error1:
3770 class_unregister(&cm_class);
3502 return ret; 3771 return ret;
3503} 3772}
3504 3773
@@ -3519,6 +3788,7 @@ static void __exit ib_cm_cleanup(void)
3519 } 3788 }
3520 3789
3521 ib_unregister_client(&cm_client); 3790 ib_unregister_client(&cm_client);
3791 class_unregister(&cm_class);
3522 idr_destroy(&cm.local_id_table); 3792 idr_destroy(&cm.local_id_table);
3523} 3793}
3524 3794