aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorUpinder Malhi <umalhi@cisco.com>2014-01-09 17:48:08 -0500
committerRoland Dreier <roland@purestorage.com>2014-01-14 03:44:42 -0500
commit8af94ac66a4d53a96278ecbb9ef2e8592f0d9ba3 (patch)
tree8be021e1102e43ca5fd253bbd1b69d12ffc2fcef
parent2183b990b67b761f81c68a18f60df028e080cf05 (diff)
IB/usnic: Port over main.c and verbs.c to the usnic_fwd.h
This patch ports usnic_ib_main.c, usnic_ib_verbs.c and usnic_ib.h to the new interface of usnic_fwd.h. Signed-off-by: Upinder Malhi <umalhi@cisco.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib.h3
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c38
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c37
3 files changed, 39 insertions, 39 deletions
diff --git a/drivers/infiniband/hw/usnic/usnic_ib.h b/drivers/infiniband/hw/usnic/usnic_ib.h
index 3511c8521f30..92d9d9a60b3b 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib.h
@@ -57,13 +57,10 @@ struct usnic_ib_dev {
57 struct pci_dev *pdev; 57 struct pci_dev *pdev;
58 struct net_device *netdev; 58 struct net_device *netdev;
59 struct usnic_fwd_dev *ufdev; 59 struct usnic_fwd_dev *ufdev;
60 bool link_up;
61 struct list_head ib_dev_link; 60 struct list_head ib_dev_link;
62 struct list_head vf_dev_list; 61 struct list_head vf_dev_list;
63 struct list_head ctx_list; 62 struct list_head ctx_list;
64 struct mutex usdev_lock; 63 struct mutex usdev_lock;
65 char mac[ETH_ALEN];
66 unsigned int mtu;
67 64
68 /* provisioning information */ 65 /* provisioning information */
69 struct kref vf_cnt; 66 struct kref vf_cnt;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index dc09c12435b9..6ab0b41be9c5 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -150,15 +150,17 @@ static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
150 case NETDEV_UP: 150 case NETDEV_UP:
151 case NETDEV_DOWN: 151 case NETDEV_DOWN:
152 case NETDEV_CHANGE: 152 case NETDEV_CHANGE:
153 if (!us_ibdev->link_up && netif_carrier_ok(netdev)) { 153 if (!us_ibdev->ufdev->link_up &&
154 us_ibdev->link_up = true; 154 netif_carrier_ok(netdev)) {
155 usnic_fwd_carrier_up(us_ibdev->ufdev);
155 usnic_info("Link UP on %s\n", us_ibdev->ib_dev.name); 156 usnic_info("Link UP on %s\n", us_ibdev->ib_dev.name);
156 ib_event.event = IB_EVENT_PORT_ACTIVE; 157 ib_event.event = IB_EVENT_PORT_ACTIVE;
157 ib_event.device = &us_ibdev->ib_dev; 158 ib_event.device = &us_ibdev->ib_dev;
158 ib_event.element.port_num = 1; 159 ib_event.element.port_num = 1;
159 ib_dispatch_event(&ib_event); 160 ib_dispatch_event(&ib_event);
160 } else if (us_ibdev->link_up && !netif_carrier_ok(netdev)) { 161 } else if (us_ibdev->ufdev->link_up &&
161 us_ibdev->link_up = false; 162 !netif_carrier_ok(netdev)) {
163 usnic_fwd_carrier_down(us_ibdev->ufdev);
162 usnic_info("Link DOWN on %s\n", us_ibdev->ib_dev.name); 164 usnic_info("Link DOWN on %s\n", us_ibdev->ib_dev.name);
163 usnic_ib_qp_grp_modify_active_to_err(us_ibdev); 165 usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
164 ib_event.event = IB_EVENT_PORT_ERR; 166 ib_event.event = IB_EVENT_PORT_ERR;
@@ -172,17 +174,16 @@ static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
172 } 174 }
173 break; 175 break;
174 case NETDEV_CHANGEADDR: 176 case NETDEV_CHANGEADDR:
175 if (!memcmp(us_ibdev->mac, netdev->dev_addr, 177 if (!memcmp(us_ibdev->ufdev->mac, netdev->dev_addr,
176 sizeof(us_ibdev->mac))) { 178 sizeof(us_ibdev->ufdev->mac))) {
177 usnic_dbg("Ignorning addr change on %s\n", 179 usnic_dbg("Ignorning addr change on %s\n",
178 us_ibdev->ib_dev.name); 180 us_ibdev->ib_dev.name);
179 } else { 181 } else {
180 usnic_info(" %s old mac: %pM new mac: %pM\n", 182 usnic_info(" %s old mac: %pM new mac: %pM\n",
181 us_ibdev->ib_dev.name, 183 us_ibdev->ib_dev.name,
182 us_ibdev->mac, 184 us_ibdev->ufdev->mac,
183 netdev->dev_addr); 185 netdev->dev_addr);
184 memcpy(us_ibdev->mac, netdev->dev_addr, 186 usnic_fwd_set_mac(us_ibdev->ufdev, netdev->dev_addr);
185 sizeof(us_ibdev->mac));
186 usnic_ib_qp_grp_modify_active_to_err(us_ibdev); 187 usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
187 ib_event.event = IB_EVENT_GID_CHANGE; 188 ib_event.event = IB_EVENT_GID_CHANGE;
188 ib_event.device = &us_ibdev->ib_dev; 189 ib_event.device = &us_ibdev->ib_dev;
@@ -192,11 +193,11 @@ static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
192 193
193 break; 194 break;
194 case NETDEV_CHANGEMTU: 195 case NETDEV_CHANGEMTU:
195 if (us_ibdev->mtu != netdev->mtu) { 196 if (us_ibdev->ufdev->mtu != netdev->mtu) {
196 usnic_info("MTU Change on %s old: %u new: %u\n", 197 usnic_info("MTU Change on %s old: %u new: %u\n",
197 us_ibdev->ib_dev.name, 198 us_ibdev->ib_dev.name,
198 us_ibdev->mtu, netdev->mtu); 199 us_ibdev->ufdev->mtu, netdev->mtu);
199 us_ibdev->mtu = netdev->mtu; 200 usnic_fwd_set_mtu(us_ibdev->ufdev, netdev->mtu);
200 usnic_ib_qp_grp_modify_active_to_err(us_ibdev); 201 usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
201 } else { 202 } else {
202 usnic_dbg("Ignoring MTU change on %s\n", 203 usnic_dbg("Ignoring MTU change on %s\n",
@@ -320,18 +321,19 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
320 if (ib_register_device(&us_ibdev->ib_dev, NULL)) 321 if (ib_register_device(&us_ibdev->ib_dev, NULL))
321 goto err_fwd_dealloc; 322 goto err_fwd_dealloc;
322 323
323 us_ibdev->link_up = netif_carrier_ok(us_ibdev->netdev); 324 usnic_fwd_set_mtu(us_ibdev->ufdev, us_ibdev->netdev->mtu);
324 us_ibdev->mtu = us_ibdev->netdev->mtu; 325 usnic_fwd_set_mac(us_ibdev->ufdev, us_ibdev->netdev->dev_addr);
325 memcpy(&us_ibdev->mac, us_ibdev->netdev->dev_addr, 326 if (netif_carrier_ok(us_ibdev->netdev))
326 sizeof(us_ibdev->mac)); 327 usnic_fwd_carrier_up(us_ibdev->ufdev);
327 usnic_mac_to_gid(us_ibdev->netdev->perm_addr, &gid.raw[0]); 328
328 memcpy(&us_ibdev->ib_dev.node_guid, &gid.global.interface_id, 329 memcpy(&us_ibdev->ib_dev.node_guid, &gid.global.interface_id,
329 sizeof(gid.global.interface_id)); 330 sizeof(gid.global.interface_id));
330 kref_init(&us_ibdev->vf_cnt); 331 kref_init(&us_ibdev->vf_cnt);
331 332
332 usnic_info("Added ibdev: %s netdev: %s with mac %pM Link: %u MTU: %u\n", 333 usnic_info("Added ibdev: %s netdev: %s with mac %pM Link: %u MTU: %u\n",
333 us_ibdev->ib_dev.name, netdev_name(us_ibdev->netdev), 334 us_ibdev->ib_dev.name, netdev_name(us_ibdev->netdev),
334 us_ibdev->mac, us_ibdev->link_up, us_ibdev->mtu); 335 us_ibdev->ufdev->mac, us_ibdev->ufdev->link_up,
336 us_ibdev->ufdev->mtu);
335 return us_ibdev; 337 return us_ibdev;
336 338
337err_fwd_dealloc: 339err_fwd_dealloc:
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 8f8dfa2672b0..2217bc0a6bbe 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -47,6 +47,7 @@ static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
47 struct pci_dev *pdev; 47 struct pci_dev *pdev;
48 struct vnic_dev_bar *bar; 48 struct vnic_dev_bar *bar;
49 struct usnic_vnic_res_chunk *chunk; 49 struct usnic_vnic_res_chunk *chunk;
50 struct usnic_ib_qp_grp_flow *default_flow;
50 int i, err; 51 int i, err;
51 52
52 memset(&resp, 0, sizeof(resp)); 53 memset(&resp, 0, sizeof(resp));
@@ -69,7 +70,6 @@ static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
69 resp.vfid = usnic_vnic_get_index(qp_grp->vf->vnic); 70 resp.vfid = usnic_vnic_get_index(qp_grp->vf->vnic);
70 resp.bar_bus_addr = bar->bus_addr; 71 resp.bar_bus_addr = bar->bus_addr;
71 resp.bar_len = bar->len; 72 resp.bar_len = bar->len;
72 resp.transport = qp_grp->transport;
73 73
74 chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ); 74 chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
75 if (IS_ERR_OR_NULL(chunk)) { 75 if (IS_ERR_OR_NULL(chunk)) {
@@ -113,6 +113,10 @@ static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
113 for (i = 0; i < chunk->cnt; i++) 113 for (i = 0; i < chunk->cnt; i++)
114 resp.cq_idx[i] = chunk->res[i]->vnic_idx; 114 resp.cq_idx[i] = chunk->res[i]->vnic_idx;
115 115
116 default_flow = list_first_entry(&qp_grp->flows_lst,
117 struct usnic_ib_qp_grp_flow, link);
118 resp.transport = default_flow->trans_type;
119
116 err = ib_copy_to_udata(udata, &resp, sizeof(resp)); 120 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
117 if (err) { 121 if (err) {
118 usnic_err("Failed to copy udata for %s", us_ibdev->ib_dev.name); 122 usnic_err("Failed to copy udata for %s", us_ibdev->ib_dev.name);
@@ -125,7 +129,7 @@ static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
125static struct usnic_ib_qp_grp* 129static struct usnic_ib_qp_grp*
126find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev, 130find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
127 struct usnic_ib_pd *pd, 131 struct usnic_ib_pd *pd,
128 enum usnic_transport_type transport, 132 struct usnic_transport_spec *trans_spec,
129 struct usnic_vnic_res_spec *res_spec) 133 struct usnic_vnic_res_spec *res_spec)
130{ 134{
131 struct usnic_ib_vf *vf; 135 struct usnic_ib_vf *vf;
@@ -141,11 +145,6 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
141 return NULL; 145 return NULL;
142 } 146 }
143 147
144 if (!us_ibdev->link_up) {
145 usnic_info("Cannot allocate qp b/c PF link is down\n");
146 return NULL;
147 }
148
149 if (usnic_ib_share_vf) { 148 if (usnic_ib_share_vf) {
150 /* Try to find resouces on a used vf which is in pd */ 149 /* Try to find resouces on a used vf which is in pd */
151 dev_list = usnic_uiom_get_dev_list(pd->umem_pd); 150 dev_list = usnic_uiom_get_dev_list(pd->umem_pd);
@@ -189,7 +188,7 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
189 } 188 }
190 189
191 qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, vf, pd, res_spec, 190 qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, vf, pd, res_spec,
192 transport); 191 trans_spec);
193 spin_unlock(&vf->lock); 192 spin_unlock(&vf->lock);
194 if (IS_ERR_OR_NULL(qp_grp)) { 193 if (IS_ERR_OR_NULL(qp_grp)) {
195 usnic_err("Failed to allocate qp_grp\n"); 194 usnic_err("Failed to allocate qp_grp\n");
@@ -253,7 +252,7 @@ int usnic_ib_query_device(struct ib_device *ibdev,
253 us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info); 252 us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
254 us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd); 253 us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd);
255 memset(props, 0, sizeof(*props)); 254 memset(props, 0, sizeof(*props));
256 usnic_mac_to_gid(us_ibdev->mac, &gid.raw[0]); 255 usnic_mac_to_gid(us_ibdev->ufdev->mac, &gid.raw[0]);
257 memcpy(&props->sys_image_guid, &gid.global.interface_id, 256 memcpy(&props->sys_image_guid, &gid.global.interface_id,
258 sizeof(gid.global.interface_id)); 257 sizeof(gid.global.interface_id));
259 usnic_ib_fw_string_to_u64(&info.fw_version[0], &props->fw_ver); 258 usnic_ib_fw_string_to_u64(&info.fw_version[0], &props->fw_ver);
@@ -311,7 +310,7 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
311 props->sm_lid = 0; 310 props->sm_lid = 0;
312 props->sm_sl = 0; 311 props->sm_sl = 0;
313 312
314 if (us_ibdev->link_up) { 313 if (us_ibdev->ufdev->link_up) {
315 props->state = IB_PORT_ACTIVE; 314 props->state = IB_PORT_ACTIVE;
316 props->phys_state = 5; 315 props->phys_state = 5;
317 } else { 316 } else {
@@ -327,9 +326,9 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
327 eth_speed_to_ib_speed(cmd.speed, &props->active_speed, 326 eth_speed_to_ib_speed(cmd.speed, &props->active_speed,
328 &props->active_width); 327 &props->active_width);
329 props->max_mtu = IB_MTU_4096; 328 props->max_mtu = IB_MTU_4096;
330 props->active_mtu = iboe_get_mtu(us_ibdev->mtu); 329 props->active_mtu = iboe_get_mtu(us_ibdev->ufdev->mtu);
331 /* Userspace will adjust for hdrs */ 330 /* Userspace will adjust for hdrs */
332 props->max_msg_sz = us_ibdev->mtu; 331 props->max_msg_sz = us_ibdev->ufdev->mtu;
333 props->max_vl_num = 1; 332 props->max_vl_num = 1;
334 mutex_unlock(&us_ibdev->usdev_lock); 333 mutex_unlock(&us_ibdev->usdev_lock);
335 334
@@ -386,7 +385,7 @@ int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
386 385
387 mutex_lock(&us_ibdev->usdev_lock); 386 mutex_lock(&us_ibdev->usdev_lock);
388 memset(&(gid->raw[0]), 0, sizeof(gid->raw)); 387 memset(&(gid->raw[0]), 0, sizeof(gid->raw));
389 usnic_mac_to_gid(us_ibdev->mac, &gid->raw[0]); 388 usnic_mac_to_gid(us_ibdev->ufdev->mac, &gid->raw[0]);
390 mutex_unlock(&us_ibdev->usdev_lock); 389 mutex_unlock(&us_ibdev->usdev_lock);
391 390
392 return 0; 391 return 0;
@@ -445,6 +444,7 @@ struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
445 struct usnic_ib_ucontext *ucontext; 444 struct usnic_ib_ucontext *ucontext;
446 int cq_cnt; 445 int cq_cnt;
447 struct usnic_vnic_res_spec res_spec; 446 struct usnic_vnic_res_spec res_spec;
447 struct usnic_transport_spec trans_spec;
448 448
449 usnic_dbg("\n"); 449 usnic_dbg("\n");
450 450
@@ -457,12 +457,14 @@ struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
457 return ERR_PTR(-EINVAL); 457 return ERR_PTR(-EINVAL);
458 } 458 }
459 459
460 memset(&trans_spec, 0, sizeof(trans_spec));
461 trans_spec.trans_type = USNIC_TRANSPORT_ROCE_CUSTOM;
460 mutex_lock(&us_ibdev->usdev_lock); 462 mutex_lock(&us_ibdev->usdev_lock);
461 cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2, 463 cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2;
462 res_spec = min_transport_spec[USNIC_DEFAULT_TRANSPORT]; 464 res_spec = min_transport_spec[trans_spec.trans_type];
463 usnic_vnic_res_spec_update(&res_spec, USNIC_VNIC_RES_TYPE_CQ, cq_cnt); 465 usnic_vnic_res_spec_update(&res_spec, USNIC_VNIC_RES_TYPE_CQ, cq_cnt);
464 qp_grp = find_free_vf_and_create_qp_grp(us_ibdev, to_upd(pd), 466 qp_grp = find_free_vf_and_create_qp_grp(us_ibdev, to_upd(pd),
465 USNIC_DEFAULT_TRANSPORT, 467 &trans_spec,
466 &res_spec); 468 &res_spec);
467 if (IS_ERR_OR_NULL(qp_grp)) { 469 if (IS_ERR_OR_NULL(qp_grp)) {
468 err = (qp_grp ? PTR_ERR(qp_grp) : -ENOMEM); 470 err = (qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
@@ -522,8 +524,7 @@ int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
522 /* TODO: Future Support All States */ 524 /* TODO: Future Support All States */
523 mutex_lock(&qp_grp->vf->pf->usdev_lock); 525 mutex_lock(&qp_grp->vf->pf->usdev_lock);
524 if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT) { 526 if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT) {
525 status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_INIT, 527 status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_INIT, NULL);
526 &qp_grp->filters[DFLT_FILTER_IDX]);
527 } else if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_RTR) { 528 } else if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_RTR) {
528 status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RTR, NULL); 529 status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RTR, NULL);
529 } else if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_RTS) { 530 } else if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_RTS) {